SQLAlchemy-0.8.4/0000755000076500000240000000000012251151573014233 5ustar classicstaff00000000000000SQLAlchemy-0.8.4/AUTHORS0000644000076500000240000000104112251147171015276 0ustar classicstaff00000000000000SQLAlchemy was created by Michael Bayer. Major contributing authors include: - Michael Bayer - Jason Kirtland - Gaetan de Menten - Diana Clarke - Michael Trier - Philip Jenvey - Ants Aasma - Paul Johnston - Jonathan Ellis For a larger list of SQLAlchemy contributors over time, see: http://www.sqlalchemy.org/trac/wiki/Contributors SQLAlchemy-0.8.4/CHANGES0000644000076500000240000000023412251147171015224 0ustar classicstaff00000000000000===== MOVED ===== Please see: /doc/changelog/index.html or http://www.sqlalchemy.org/docs/latest/changelog/ for an index of all changelogs. SQLAlchemy-0.8.4/distribute_setup.py0000644000076500000240000003764712251150015020212 0ustar classicstaff00000000000000#!python """Bootstrap distribute installation If you want to use setuptools in your package's setup.py, just include this file in the same directory with it, and add this to the top of your setup.py:: from distribute_setup import use_setuptools use_setuptools() If you want to require a specific version of setuptools, set a download mirror, or use an alternate download directory, you can do so by supplying the appropriate options to ``use_setuptools()``. This file can also be run as a script to install or upgrade setuptools. """ import os import sys import time import fnmatch import tempfile import tarfile from distutils import log try: from site import USER_SITE except ImportError: USER_SITE = None try: import subprocess def _python_cmd(*args): args = (sys.executable,) + args return subprocess.call(args) == 0 except ImportError: # will be used for python 2.3 def _python_cmd(*args): args = (sys.executable,) + args # quoting arguments if windows if sys.platform == 'win32': def quote(arg): if ' ' in arg: return '"%s"' % arg return arg args = [quote(arg) for arg in args] return os.spawnl(os.P_WAIT, sys.executable, *args) == 0 DEFAULT_VERSION = "0.6.28" DEFAULT_URL = "http://pypi.python.org/packages/source/d/distribute/" SETUPTOOLS_FAKED_VERSION = "0.6c11" SETUPTOOLS_PKG_INFO = """\ Metadata-Version: 1.0 Name: setuptools Version: %s Summary: xxxx Home-page: xxx Author: xxx Author-email: xxx License: xxx Description: xxx """ % SETUPTOOLS_FAKED_VERSION def _install(tarball, install_args=()): # extracting the tarball tmpdir = tempfile.mkdtemp() log.warn('Extracting in %s', tmpdir) old_wd = os.getcwd() try: os.chdir(tmpdir) tar = tarfile.open(tarball) _extractall(tar) tar.close() # going in the directory subdir = os.path.join(tmpdir, os.listdir(tmpdir)[0]) os.chdir(subdir) log.warn('Now working in %s', subdir) # installing log.warn('Installing Distribute') if not _python_cmd('setup.py', 'install', *install_args): log.warn('Something went wrong during the installation.') log.warn('See the error message above.') finally: os.chdir(old_wd) def _build_egg(egg, tarball, to_dir): # extracting the tarball tmpdir = tempfile.mkdtemp() log.warn('Extracting in %s', tmpdir) old_wd = os.getcwd() try: os.chdir(tmpdir) tar = tarfile.open(tarball) _extractall(tar) tar.close() # going in the directory subdir = os.path.join(tmpdir, os.listdir(tmpdir)[0]) os.chdir(subdir) log.warn('Now working in %s', subdir) # building an egg log.warn('Building a Distribute egg in %s', to_dir) _python_cmd('setup.py', '-q', 'bdist_egg', '--dist-dir', to_dir) finally: os.chdir(old_wd) # returning the result log.warn(egg) if not os.path.exists(egg): raise IOError('Could not build the egg.') def _do_download(version, download_base, to_dir, download_delay): egg = os.path.join(to_dir, 'distribute-%s-py%d.%d.egg' % (version, sys.version_info[0], sys.version_info[1])) if not os.path.exists(egg): tarball = download_setuptools(version, download_base, to_dir, download_delay) _build_egg(egg, tarball, to_dir) sys.path.insert(0, egg) import setuptools setuptools.bootstrap_install_from = egg def use_setuptools(version=DEFAULT_VERSION, download_base=DEFAULT_URL, to_dir=os.curdir, download_delay=15, no_fake=True): # making sure we use the absolute path to_dir = os.path.abspath(to_dir) was_imported = 'pkg_resources' in sys.modules or \ 'setuptools' in sys.modules try: try: import pkg_resources if not hasattr(pkg_resources, '_distribute'): if not no_fake: _fake_setuptools() raise ImportError except ImportError: return _do_download(version, download_base, to_dir, download_delay) try: pkg_resources.require("distribute>=" + version) return except pkg_resources.VersionConflict: e = sys.exc_info()[1] if was_imported: sys.stderr.write( "The required version of distribute (>=%s) is not available,\n" "and can't be installed while this script is running. Please\n" "install a more recent version first, using\n" "'easy_install -U distribute'." "\n\n(Currently using %r)\n" % (version, e.args[0])) sys.exit(2) else: del pkg_resources, sys.modules['pkg_resources'] # reload ok return _do_download(version, download_base, to_dir, download_delay) except pkg_resources.DistributionNotFound: return _do_download(version, download_base, to_dir, download_delay) finally: if not no_fake: _create_fake_setuptools_pkg_info(to_dir) def download_setuptools(version=DEFAULT_VERSION, download_base=DEFAULT_URL, to_dir=os.curdir, delay=15): """Download distribute from a specified location and return its filename `version` should be a valid distribute version number that is available as an egg for download under the `download_base` URL (which should end with a '/'). `to_dir` is the directory where the egg will be downloaded. `delay` is the number of seconds to pause before an actual download attempt. """ # making sure we use the absolute path to_dir = os.path.abspath(to_dir) try: from urllib.request import urlopen except ImportError: from urllib2 import urlopen tgz_name = "distribute-%s.tar.gz" % version url = download_base + tgz_name saveto = os.path.join(to_dir, tgz_name) src = dst = None if not os.path.exists(saveto): # Avoid repeated downloads try: log.warn("Downloading %s", url) src = urlopen(url) # Read/write all in one block, so we don't create a corrupt file # if the download is interrupted. data = src.read() dst = open(saveto, "wb") dst.write(data) finally: if src: src.close() if dst: dst.close() return os.path.realpath(saveto) def _no_sandbox(function): def __no_sandbox(*args, **kw): try: from setuptools.sandbox import DirectorySandbox if not hasattr(DirectorySandbox, '_old'): def violation(*args): pass DirectorySandbox._old = DirectorySandbox._violation DirectorySandbox._violation = violation patched = True else: patched = False except ImportError: patched = False try: return function(*args, **kw) finally: if patched: DirectorySandbox._violation = DirectorySandbox._old del DirectorySandbox._old return __no_sandbox def _patch_file(path, content): """Will backup the file then patch it""" existing_content = open(path).read() if existing_content == content: # already patched log.warn('Already patched.') return False log.warn('Patching...') _rename_path(path) f = open(path, 'w') try: f.write(content) finally: f.close() return True _patch_file = _no_sandbox(_patch_file) def _same_content(path, content): return open(path).read() == content def _rename_path(path): new_name = path + '.OLD.%s' % time.time() log.warn('Renaming %s into %s', path, new_name) os.rename(path, new_name) return new_name def _remove_flat_installation(placeholder): if not os.path.isdir(placeholder): log.warn('Unkown installation at %s', placeholder) return False found = False for file in os.listdir(placeholder): if fnmatch.fnmatch(file, 'setuptools*.egg-info'): found = True break if not found: log.warn('Could not locate setuptools*.egg-info') return log.warn('Removing elements out of the way...') pkg_info = os.path.join(placeholder, file) if os.path.isdir(pkg_info): patched = _patch_egg_dir(pkg_info) else: patched = _patch_file(pkg_info, SETUPTOOLS_PKG_INFO) if not patched: log.warn('%s already patched.', pkg_info) return False # now let's move the files out of the way for element in ('setuptools', 'pkg_resources.py', 'site.py'): element = os.path.join(placeholder, element) if os.path.exists(element): _rename_path(element) else: log.warn('Could not find the %s element of the ' 'Setuptools distribution', element) return True _remove_flat_installation = _no_sandbox(_remove_flat_installation) def _after_install(dist): log.warn('After install bootstrap.') placeholder = dist.get_command_obj('install').install_purelib _create_fake_setuptools_pkg_info(placeholder) def _create_fake_setuptools_pkg_info(placeholder): if not placeholder or not os.path.exists(placeholder): log.warn('Could not find the install location') return pyver = '%s.%s' % (sys.version_info[0], sys.version_info[1]) setuptools_file = 'setuptools-%s-py%s.egg-info' % \ (SETUPTOOLS_FAKED_VERSION, pyver) pkg_info = os.path.join(placeholder, setuptools_file) if os.path.exists(pkg_info): log.warn('%s already exists', pkg_info) return if not os.access(pkg_info, os.W_OK): log.warn("Don't have permissions to write %s, skipping", pkg_info) log.warn('Creating %s', pkg_info) f = open(pkg_info, 'w') try: f.write(SETUPTOOLS_PKG_INFO) finally: f.close() pth_file = os.path.join(placeholder, 'setuptools.pth') log.warn('Creating %s', pth_file) f = open(pth_file, 'w') try: f.write(os.path.join(os.curdir, setuptools_file)) finally: f.close() _create_fake_setuptools_pkg_info = _no_sandbox( _create_fake_setuptools_pkg_info ) def _patch_egg_dir(path): # let's check if it's already patched pkg_info = os.path.join(path, 'EGG-INFO', 'PKG-INFO') if os.path.exists(pkg_info): if _same_content(pkg_info, SETUPTOOLS_PKG_INFO): log.warn('%s already patched.', pkg_info) return False _rename_path(path) os.mkdir(path) os.mkdir(os.path.join(path, 'EGG-INFO')) pkg_info = os.path.join(path, 'EGG-INFO', 'PKG-INFO') f = open(pkg_info, 'w') try: f.write(SETUPTOOLS_PKG_INFO) finally: f.close() return True _patch_egg_dir = _no_sandbox(_patch_egg_dir) def _before_install(): log.warn('Before install bootstrap.') _fake_setuptools() def _under_prefix(location): if 'install' not in sys.argv: return True args = sys.argv[sys.argv.index('install') + 1:] for index, arg in enumerate(args): for option in ('--root', '--prefix'): if arg.startswith('%s=' % option): top_dir = arg.split('root=')[-1] return location.startswith(top_dir) elif arg == option: if len(args) > index: top_dir = args[index + 1] return location.startswith(top_dir) if arg == '--user' and USER_SITE is not None: return location.startswith(USER_SITE) return True def _fake_setuptools(): log.warn('Scanning installed packages') try: import pkg_resources except ImportError: # we're cool log.warn('Setuptools or Distribute does not seem to be installed.') return ws = pkg_resources.working_set try: setuptools_dist = ws.find( pkg_resources.Requirement.parse('setuptools', replacement=False) ) except TypeError: # old distribute API setuptools_dist = ws.find( pkg_resources.Requirement.parse('setuptools') ) if setuptools_dist is None: log.warn('No setuptools distribution found') return # detecting if it was already faked setuptools_location = setuptools_dist.location log.warn('Setuptools installation detected at %s', setuptools_location) # if --root or --preix was provided, and if # setuptools is not located in them, we don't patch it if not _under_prefix(setuptools_location): log.warn('Not patching, --root or --prefix is installing Distribute' ' in another location') return # let's see if its an egg if not setuptools_location.endswith('.egg'): log.warn('Non-egg installation') res = _remove_flat_installation(setuptools_location) if not res: return else: log.warn('Egg installation') pkg_info = os.path.join(setuptools_location, 'EGG-INFO', 'PKG-INFO') if (os.path.exists(pkg_info) and _same_content(pkg_info, SETUPTOOLS_PKG_INFO)): log.warn('Already patched.') return log.warn('Patching...') # let's create a fake egg replacing setuptools one res = _patch_egg_dir(setuptools_location) if not res: return log.warn('Patched done.') _relaunch() def _relaunch(): log.warn('Relaunching...') # we have to relaunch the process # pip marker to avoid a relaunch bug _cmd = ['-c', 'install', '--single-version-externally-managed'] if sys.argv[:3] == _cmd: sys.argv[0] = 'setup.py' args = [sys.executable] + sys.argv sys.exit(subprocess.call(args)) def _extractall(self, path=".", members=None): """Extract all members from the archive to the current working directory and set owner, modification time and permissions on directories afterwards. `path' specifies a different directory to extract to. `members' is optional and must be a subset of the list returned by getmembers(). """ import copy import operator from tarfile import ExtractError directories = [] if members is None: members = self for tarinfo in members: if tarinfo.isdir(): # Extract directories with a safe mode. directories.append(tarinfo) tarinfo = copy.copy(tarinfo) tarinfo.mode = 448 # decimal for oct 0700 self.extract(tarinfo, path) # Reverse sort directories. if sys.version_info < (2, 4): def sorter(dir1, dir2): return cmp(dir1.name, dir2.name) directories.sort(sorter) directories.reverse() else: directories.sort(key=operator.attrgetter('name'), reverse=True) # Set correct owner, mtime and filemode on directories. for tarinfo in directories: dirpath = os.path.join(path, tarinfo.name) try: self.chown(tarinfo, dirpath) self.utime(tarinfo, dirpath) self.chmod(tarinfo, dirpath) except ExtractError: e = sys.exc_info()[1] if self.errorlevel > 1: raise else: self._dbg(1, "tarfile: %s" % e) def _build_install_args(argv): install_args = [] user_install = '--user' in argv if user_install and sys.version_info < (2, 6): log.warn("--user requires Python 2.6 or later") raise SystemExit(1) if user_install: install_args.append('--user') return install_args def main(argv, version=DEFAULT_VERSION): """Install or upgrade setuptools and EasyInstall""" tarball = download_setuptools() _install(tarball, _build_install_args(argv)) if __name__ == '__main__': main(sys.argv[1:]) SQLAlchemy-0.8.4/doc/0000755000076500000240000000000012251151573015000 5ustar classicstaff00000000000000SQLAlchemy-0.8.4/doc/_images/0000755000076500000240000000000012251151573016404 5ustar classicstaff00000000000000SQLAlchemy-0.8.4/doc/_images/sqla_arch_small.png0000644000076500000240000012335312251147171022245 0ustar classicstaff00000000000000PNG  IHDRMVMiCCPICC ProfilexYy8߷{&spdvyscL) Q"D(J%IRIdH2s9k>Z{X522f ,<&XJ(-PDGZ[[|v3m[@F}}|X'2**"`=3G!Dm/lc_co QS|;DLAHё 7;TlR߿ lJM Eׯ„ԄPm߰"_9B>|@~vlK{[ZZQFFB֑1zygdyJR% /`ٶyKT|':HDAINtðo)(t{,f!s@Ƃ9~ D!u8@?"-qH[4CzD }"L_~" {4 2_ATdE{k̿~FQvV9hEZV4+HwUкhm:Ҧ $b9퇵ǕD$9"ۿo+pzl@bx!!HT -l! ¡XhBP)t.AסN>z CWhF憅aXօa{Ip&Uy/1(:+%RA飬P(T**FUP>3jƢh Z SCϡ[gqzCpa$0jS3&cj1W1=)w,ˊ*cM.`^l{;]p8 & Gp'qqq)*  /<+M8M:M1M-A4BjV5Sx^%&| ~NΆ.."=q5AOp# u;%"(L!cz](qL/MoJK~~AA!2#-0>#1:02L#YHy} Iɐɗ).E }ry,l̜|1 NG2,c(VaVSP!;tw8i6N66?flvCr99l89Nspq2ssppp₹ĹlrUss-spsGr=ʣSsg̫[{#K P) |\|&||gm;7   t , '(JVHE(PPЊp56S$F7DQm=UŰb*b!bĞeD)IUp*a)TTԸ4t5yAW2}25ʋȗ?W *)WhSX)o#d]ي]?f˕UUUTbTTv)Ũ}QRQoPӨјפjӢhyiUjiiS| =;'wUoE_M?Y !aQQт^;&s&æܦ> fffs;RQ]fzc)dny XZzk-b ڦfVvmӮ}kQX.GG7z'B1gd..A.m8WGZ݆rStrrqOpqӓy jE.{z{/[;W_ӿ&@3Xlv`q\~PibIpEJUH]VShsMWpYc{߳eU EG0#XqZqeqH ≇?$%݋볷k߾}ɺgR3OKç=JM/Lўɝ9q@c}VTpzvA):yh37Alnq<\TpHG ':ZD))v''bOX3x/9 ̼ }*779orV{<4vs`>&'LeN?~5}qǩO6>>ϋ_yaj1jqkRݷߺG}XYe_=ַa#GOo¶"Q_wR|Cr$wxwNKIW D!xJMaap4~t#-+))Jڱ}S+%,%﹀`[5WYde-ZګuhkԚױսf`X鲹E +M NiGk #eg1ٷZT]tqؘ́8xDɤ{s;S?)}# CWr*s./?paEǓcNxt,5-S/mEignuv;]%wû{{{]W|?C~G~rgϭ_ WF_zwcf'(ɯS/oɞ ans|K^t3|:~ď韟7mm!Aͣ;1X;4 ^|-]!Io M#me=XXPgpPB҈K<*R8sBIJ9^KNNLs\[L'LEo@0èxdtkAc)b%c`dd`/ >v=c,ۻg?*N`YP|Md(.}Q{@ԫ1ct9%QYvZW&P.:w"2uB5Kj볷jy57'߽PӔ{` erϕW3[} ^^kqVGcRnq4R+C ߛÁ <}ȳW߼~?2rՇ̽]]z}l=4!4i98],姣s/ſd,L|XyUaƆя͠?>PvhuLV kI5s `zC>M,άb#-8Qu^f,%@%Q61m<ɋR?ervW|4BPU3QدYդݨsZ@/E?PHؘxƤYخ(+%u6)jvC9:;;.>>q &yz-7PBކ9"r#M`܍J֊s=6!N1n1)!$Q$q"})&Q!"q33c@֗|̽z<5z o/&h,IJ0! c!m*x@yr` p @L $䏳 DD3JJ+H]p|?Q(ST$ՉEzpt9z #e9nqQ 4"44n%FF{vq]"` TM"q>}=!339ezTE VvK;lwUpqpSxxx)|Zx!3aAHdLK$ eB\^XAh^~AѺYyOKB [TJ.QQ#^ s _<%}?vOs3WqDtl'OQNoUW4Ֆ;{޵IL V|A~GKڛuGOs սTx}cTmT(f+k'7n@Ex02FdL 0A8a1B%@EP 0V}H~^@Pa^ZmDY$+AroX)lǍsÕPh|iihUhhlx?e:=]-&U5H֛Aчq@gJ$'e"h6<]]\9<|<=R~kA&BcD:Dj+$%ˤNK4ɶ=UXVdPT6WPU+Pox9MQ+5077i4l.oNJzMA9'vWEyjd@pBpZ(sXcAxTZ`l|l@}Se$G9fy\Կ\r3նgykh=oDn~{rUkm;joe-z?գ7^zBjH+7Ӿo4>1}9u~c!rqb۝+k`=`OͲ?G#; pi2B~e )e#P?/|n'Q$Ϡqh t4=FV{f˄Ǿq͸uE)U=DUɄ`4o0D0l0Htv ONcsdXn孠h/<,2+sIKzJ*3-%pbk%AH[j$ -AC:KzCVF&VC>KVS6NΛGD;=hF_'lJn.{+ gF2krHI9yգ"rc|n>җՅ1_.Z/~w_\Yu_î]X]_8CόMͮ-mG+ %R B?nm- #\e!?nmmTmmF ?;V"{y6L~{/QD4 IDATx]|=)/)$zE HU(E:JUzA(MAA 6D -$@z߽&,d!ʝݝrf;wfm AA@3)@A@4TAA@ !bRA@TA@ !bRA@TA@ !bRA@TA@ {#tGvv7oF2ek/!~#o&,YO x/lt9dddagg*9}GP/y?L\&MDhhzԭ ՜) #̞=Gw-R-) @3Ʀ@8 9*)x% "*bT E?gv/*mA@ C* ݈Q%57 PzzjYYYɐjm;88q54ι[,  p!`bk WѰaCTXQaǎCr̙QvRJ(Wϼ=z+DTTTޭϣq8}t^Ɇ +hϖJ~C 0(_<^uhbjma?Bc7oF3k},C}9A@Q!M>G{4Xt) `2ܲe /^Fid;qDtUTjժ`rpBZVDkLN8ҥK^K$p~!)) .x}hٲ% OOO   ..N#KDn˼yPBm`sQ̘1x`'OĮ]Eɬs`l8oNNNI*A^G_[`0g5Mrܹ=6n܈ݻkdQre]V#)SҥKZ5l/lڴ >>>rJ\|jR 5bZ7|̓/չpBTVM#_U#h 'LիWv`#""4͗_|f/;6MJHcB\6qL4 uYf^zٳ/.cĿ^x=Pe@iN9  F-H("?0LY[bmw֊cv~X`:&kl|/OʦR&{Lۧɚ!de0i-_\}ׯ 3A}'Xf &O;u:uhlfeĨHzڴi橗ͺsفysɮիFIٜqb似X*<l.hp%A@ D-N-,"U^O\*y۴i͉kkĬ1r*=ΣlXkU6/5M,6cm:u*-[ yP-%炀 ŋ!RUbj+[6WlYmΔ7DdwBbO{jZ k& E@n&>Hd/{Oeg%&UKaTeLe)Rfo4D6d T#F`ժU>}:u///u[;ޑ煕V%Ygo֭[kse&@V4BU:Ysoꓣ @"«_m%u1MUFL*lBeX&;w⡇‘#G0l0PݻwkNOњ,{sZ6)wllZ^n65ŃYfzok&nEN.Oo弬]55rȹ 1Zl$ (WG HwK=z@8e˖ڜ!2YcGdYT) &;dmET8^YeI =xxΑ;*5o\cag홽v&,ִs|,Y[72˄Ȥ̚/:xkZ-˚%/9Z5_lП/J0DF~ U)?&Qf(=ty{ސf.J$  `bN⏽.|O7bcb4ZF- v'TE{2y+B㐵r$NA@07n}۷o׶ԝ7oLՏ5Y)U]zmZQB7{PyK=!@[ QtgΝ֯5Z5}.VOnT z⊭*5kg|r. 4GV+j<+jIFd4DEj>UQzuPOA@gHϞ=Ǽ4UKm0j!RU;*beד*kl-5?$^AF"U1KBe 7Dy=1~Y8lF_  YS_3KoA[}{ -)h*W4UTZ XA@<4՛0C:UE(z;<" @QiK)MUq:fa[U!RURfuzb{A@&S)s JGCsF@ڬUHU  ܎( ָ֩*zW( ܮXⳛא]P`ԏ) #A@/-F, .\ i,*(A@9R]jY\  wYlU[ yzRQA@0DFשM{Xi  X,u)A,D = i`"vצ}-__Gk?BdY 801, Oom.Ɛ~ ;":لy~t%:3jMĐe bfJ8ZCIn?;M־ևWq"19|!hw|uʹJm]jknQssD}G-F#1kKuџ6a‚8o SQKC%ozӠMމظ `?TǰG=8t>REapeѝ%|B:k).op۴-ÃU;JC><+ GΜM)Q8}r66#љN_-#APA#99`>Z~}] NɈ:~SYؐjp.L|~&b]Bl`MC8)i0 ˿{+=!MըE dT}zo\ժ7@'fIwٻ3*.OŜPr$L}GH˰ q%54JN‚QR=\,;7,@Hci迿^pht<}i> VsV&DC&кukn>;5RqB"2OOQ`^Ar4ݤSXc8~<6h~ ."21݆ 2iU]ZݧvG}ཷ^D5-H͢bڙ[=`#ibdN˸QV15aӦ_Mi5."9)G8m mYH|jmMWc]bX0!s_M3a۪S2ºEfh^m|u7G.]?C!nOSg'aָ #}Y8Ba;:?/?=w riZl =4H l7H'v cg w!%A̜:'/qb zg //=0} ;k$b0rڧTB9c_ۀgfu-^ŁD'Ƶ1ri 7 ہ3.%`ܜ㙹xmT[39D]ײw'LA+9™=܏N \fs>m8G{oy^Z0Ip {v6xs5ɉ^” 9ԣ9\ol2~?&4=4Sqpu ,GC= 8;+Zj2c2fn / p`RI)/vbz )^ɾLdCoǬHJDbmcD׎f<)|3> C m`zP5\aFg2`& DF$[u."65/Uu  АMn> ' fgG"Pس@rPy:Z󅭃=R݁^qhWѓd5tFm)A"֩%݇CPt9wޏ!LQ)x Ly$]aUҲ fg^9 ^ B6eࡳZ7):%?~K嚢&{>gƣ[>V&MZH.j#M_ȯH=Y1d40EizL?Gܪ p++1#A+HV܈Js&a#XV; t!M^V๐PK!6") TLؑh6&љJ's9 Ɵߎ#ϭzmKdƙaS&K[j꘿uBaM8I/*21SXm(W0۠bDUtr×F`/2SuSyn0Q^ؽ4^ FoEUqt+s6^<0lP] mAfNe<ܪ"ܜSFԆӱYV>*@1aZ8^:5@d&Y-, !e`ID4ZZ<FOpL4DˁI?c.GCƞJH1җh"v#:5^]0Z1Gp&<\i:@_vW0NՆ oWr}$K˗q% :{!u] 9ʹZ4BBPHӆ&/F#1DEܩKsa e1WCz<ϜEes ?YkRiߕOQgt ּԑ4غzlu}b]Q t "Sƌd…;.4vMV_jtZ {v'u9IKn |'U/G2:,S^0i$TT ^^^\\_ "B F /IRwG1/ `MxH?U֋0ÙF+֬G &@LAye;#\y2ZjF6p:iyyqgr)BZp ER*i;i3;_2YR.0arC*J1"8XCDJ4kNH'sT?kkb_ yu~mH>HS(n$ dA&ԧfe:"Q:ӆ)?Qu0zn4(B ʢ>Q`䃘vL\4hP'yeʉ p خ\"kʙ PV9u旍Өem$:. V8Kz1MPu _ߵ<{-fγx`e-eˢ/N#TD7ym 0tҜku;xտ|h6nי7AMGgKݟxiɧHY]4jLFKtN^&4f{G8YٸnioutLd&0[- 2n 3V{PleFr|{iMKhzeJλ[CS{۫edujw!k5_mtb b#/@E%ND0DِKTbLA@(aS5D%,T' VuUߊ0 %uHըS QA@n+ Q۪e"  PL"բs-]Qꖴ  Pw"jBB 3z:gٍZl yzAA@[!Ru|A@,2"UL]Ue>) @IfYQ9¼ |}MY=z(227[(ׂ  #GϻYשZ*+˿+Ҕ\rA@n vܩ};Ձ?@ĢⳛҐjĄB~lm׮6n܈۷k2%  P·h"4lw12Y#,J<]|Ȅj2:`5>۷GZꗣ  P"xaǛ6m+jļyLq:UC)Beuɓѣ~lݺ̟#   LprrjʕAuA=깍3btH 'U1 kI,8 \|yndq)333\`+$ 'zJlvss?4$UrCZ:UV:::j"ON!Re+WjN\ BLL09%4X 2RA@&SĤ$**T9-s%fU2"ՂЊTY[eW9"T*RB 59  ,K,X*՛r"!R-+VjIj *iy[HZA@AГ"H"=2ZTepYl HU%ȕUSD˂O*2UG_ @QPEz>bNʫ.bzRʅV&Rc2UJz"՟s9A@"'D=)be^RܤW_e$"U#^OJJ%N8BFJ EI"QuTxzR1i*W׊@^  !xӨs> 9p\~Ŗݲiyի*( p"h Ν; bAz*"uZS(  8ysC_^OF kqFʒ4 @dFtYTA@kx7Ph{ jHA@A.F`ɒ%rJ-4DF M w j‚9U#T H  w;HUSe/`vZRsؼysk#χ}!wy?]0p@xxx:&0Ft222ԥZ#y>C y?-_ww9<T-3˵  \Cҵr& ! 2/ !"&A@RA@j䂀 @~懌  ED@HIrA@A ?TCFA@""`hB~PeA~Pd~׶_OjxG~ AAAҬdA~P՛D'LoM4?P`hTHA@AA`=z4 ljMA@AȆxNMA@{웍MM7D줢S+TI  ܃"Uw#MA@`bkhI♞euttZDVVg6dHZmW81A@n?Zl i~~~5TChذ!*V°cdff!5sLԨQC_R%+W'VܣGXyy,OHDEE:<7nӧOYlذ6ly$=~y0N?6(I9.A@PTp*S~g ,}HO?W_}|#:&e˖g_?ժUC׮]q!… y$W&[&'N#"n8,k$wD :;vĖ-[pzqIM{/AA@bCVs||)YzuM;w.X KKKHƍѽ{wTP+Wڵk)SҥKZ5l/lڴ >>>hРV\˗/[MʻaFHII15J/^΅ j$ߨQ#A &`ը]f_?cDD2O8_|EIرcڤt<6lVk܌/,fRAYvx0Z<0WN rA@0a-HD `Yd6Vh&LzKOqCDp3nM65O6L;Yܐ#oyΝf"3iZ:RoV^&"4њ<󌙴i-?oNLL4ћ˖-kӧO7ٙ>l%Ef"O34KHDM!7z!s~D5G N"B3q"7ř$D_~\fM3gΘ|m&sH?[sժUDO>DLd{]r! ps={LN iFyMmڴѴvځLY%҂ <_JYSV9/DZ7ydtDlyZb5f>|8x4BٳGF۷oqif[j[t)~a94p@޽ѷo_m5FY[gMM6g0yMblW^?p4h&͛<,#MhriN:G5kA@C^Fϧh^O M,ZϛΛ7[̠&s2Y͸LLLLpYcb\&&%6yh*:k2ȟtLL\˩$ix^U0ɚɒ2&z{X̬I2Ye>.6k}cK̲ܜ( `0aM`T *{ZKi}&4E*ZU$y)oeS.k _$ @`'pKI{uMI"[+A^@z2,k]Ze;f]6ӷִhkqEZZ+j֧Z>Wr% PX{ƭG] CJFT\\F-GdS69[ke\WqZ%OYK?|?H%zֹVu^羨"U^OrE&"s Vʱqk#K׾KK={"pϽe{Zl e]+#p?&Vl~JC@=p'RkO)K$|KVōw;"wu4ܫ1j5DF֩*BUD$~_-o-o>+5nUNNZY^GvbzT>W?*yɗzn 킶zo[Qmw`QɈד%{;^R`#J[#/ &,Xm*LoS/Q[' D=ߊ2o%x;ZR +ueaKG o7dmg 5Imo}vmw,DNOTbۯHtpF~JCUG&UۧnV#}-7Y<4UYleIW?/apw Oѿ 9K瞿֩KVTR=t"E'z1DOi1WZ*TF!M>ӾjY"TuT2+wes zRx;/ V t݊sTcQ *{=Rfuzbm~yS&Hy}hDbg#(I ~RjdgANkTU@ڬUH {ܧ_k?g%' 1= 6bd!>64x <`{c"Lr}~ q$ P"0giYl-4kI 4V>J{P~N иq#Ԥ;5jTBcȼ 7F:uP^=ԫQ Y=I~ Î#Q_\mhڰh]e!yKSl YWPT" h--U  ܫ3}%N+eԏ|.@W=ph~sQ ѽyc1lm@Po\9Q{;'NL\<Ԭ̙113"˄kʰv߲t&JT:nY|+9$^]?|~7#!MUi'U%=~Фo{yУcԮZ5wuS:̌ ~gnu &WfY:^`lĆ 1`2͓+Zh{tYk xuPxV^V,i#`{pj,5*GbȜ719YQ0{V}jªo!),:5*#(R3HΊ /^j<-I7>' .#'d`׺w٧8Lմ'_5Q52 ^R2|3'?oF#/^ XobymNR C3 l˘D שu{bAuxxAQ;naRIf:VM}>>b+IZl6}#P)$"0d`՞X;g6ʏ.ŘߺNf/3$6n1qh{CubWLW#&?S!ahkcǧH3iؗ|TȿxcyvBWaȳdz$FMX׏?߅x 3/ FxxQ8\/s!:UH2A -6aa <cĮlؖwm6p&M uEؐ)5A+O: =G6wƨ[#! ZKAS1ggTs@P e 7g'8P5i%g Tnp,'aLnLho}~c||疿Аu)a,o: )" *nշZdQ-? m#_Q1˷řLlW~kwҷ~D~B+翐@TI" EX2["59ka>td|SS.ASJh 3;83V6`o2)WMG3+ 1Ϸ.VN͋25*ɖ pQÙ>Ori67]KP}[<_$@!j4NՐ_%2mѠ9wD5YmُS}HLKw&sȢB$j[,˽nnF qCeb6 专z m̚@kwl>DIKh'^ѭD ~k…ICp;vpϱN߇{LE>{0#-1nW/\Bz)|x ڕ;}JZA:~a}eR<#q_nh6)98V㳯 2hSi^p,u"vܝeh-GI(Iҏa^FVߒSZ'VD3 _N?C ?@4UzZr%ZVTSxm$\h1˺@S2~޴ njC'ѡj ̙I9_7cg xͤi3.kcyXcӟcԴ0s֥=Lۢy;/s/Erm͘t iԲLDE͚qZ|#~Hǜ!!۞P#sϩnĐ>o:G]~oL{Ft}t^2:;8r`+T }&D2z5aD4X:6Y)6}E`TT]0/EWs>Zg7{9'Oem;xy+kgЧbddJDweWy.18Sz·1y ?K3^Fw>DƘa 6, 'ln=f_/z99WRir56ycNjGf;Ңۯ !!ƌ)<ZG-:if:Y+ict MZ6GҸc &/8c:(p ۈ;W}?^*/ku&څe/bQ?K>W[bκ$HH:&qDisMH y6oZR4e?6 H6>.8}32Ba䇙? NG/mTEwOӦ3 Åx4.ޮغuƯĜ`GY==R Tn|fvu@mqO~.i?VXi~p j7<>0/ h;>Gԅ8QC@sMy[?!#U^Gk45s#rR`",} bƆyxm'_"#x 2VՐ~918 !6 _m}x`*5oy4"5}7?;˱-"&,6`=E$bD8פ'/ؠt|Γ~y^ԟ_Q"<^Sʻ M (U&^WS2{g8@)8ۘ\jNnK˵{&JIc(VAluȓ^}u:EL(IJbrJ6xSxPeKKbOTɎTO9x گi-E6p ^UKB/ɄT\ pIƷ^@l9n">~sg~G` L;/!֬ ^eǴ*K2}`ª|IInVeēixS@ְ஭ɨ4J6N0õ\ K_5Kr} >̝kq1#솪 O_m7Rs' uB VuٍSbpf[(}6|+B<>:79=X^-Q쇘BM6*NI/R1iV\K{sUyTl)h7R= fгScxjm瓨sj vlUoB>h {x <_e?/._uѨ멙 S0YKDt} sy}rƋUZ'{086&/^_ߕwOc]laͷ+Ј:`sѧA8X[<;|^-oz˚9ˎ4TtjfvgHLLogAѢ/Z.䣈u'ѝ*Q!R5T\`eׁkxd4\??~ aI oq󄳣ʵCƉh>4<;3ًa%q~= ߞiS -0#/_6s ݝg2R.3ⱦ_GdK 4||a\5) =6DϏbL+gq- oҕf'[퐞Lc89; gitNA DGzf4WrvAIӈ-ʵ `@*E^67DK9?2B OOoB7us4Ӹ*i\ӠV R(|xyTmɣ ̤ABGx>U$-ʤ2g=K Zl (Nm^`֑\-2ބg.W:NN.7)Ol=`B'HV&962;=uld؝ RA&")Y3?CXX_, gUK bUm1"jwl:s[[e~r @x/||kZu'#:|WAơ͘=% X+6¨֨*UMq 'D2ӓѨ,ځ2"= `NDgz5 Or]3ZOx}A6o F a(2'eϼGK[a~-;N>L 56 %uz5Y2[@j9geЩX: tiWCda)zp (COU]KFƔaVz&2.a˟f`Ex3yYm EkU}g 5wpE; S}g(l~x_uܛX<ei{?fu,>/)c1RFELגwWZm=%#a]17q#*ykbkTz=.h} ~lb L%@۵i'B琒m RQ&~z?w)?h()4u_73dPJ72.My|_-h_lq/-rhw6fZ.Qq1}h h>ڟ%.zy 66u'㾊HKyNf>mPvc6ju>xBM>sqLd>/qu^R:+__g|KWy|<=+B/b`ZBǚÇMG/͗H>ú!4'ِ#!LLu햄rwBN_7ѳ3} AUEdGY_u׍}Gf9D`O`.N@˵sl9~pb==`g¾E1YĪ=_~a:o@˸xNDљ6I#Bx6 KDTe&gDKr 4SE4OykKH˻n-<BQُМF^ !9܈̯BJCkrND墴駰vsxq6^8|\sLAs=QR0-ѷrR޸4OaB#!'bi@rÆwW=37<̘:^fLXO7w.AO#pL3^hحᣵX#5Lv}l{>uuf<9}3`pFY]5Ue ĝ :`x }W`›Ѓ3h ҏ0jCmpnp_ *(ґWy D0z*v H_'ԛ. hcx6t*c^ʐL5srQ eC2zDƴAhݸ<ۣᰦA9m.'}3ubٰZKšm`C;=6^=)3Sp<0 v-ۗ}`C˰!%Q'w!@ł@Arvy @;nm6Eu2*tyZ{ObbkTIK&5ٴ -(|Ѿ{g9%)]Px5:I4{un@[ߣC9.q;w"RWZN.L_\7Deh\ `\ h#^•JZ]y;)4Gh 4gL/G7밷66^,^u ŝP\q2‘>%`nBM23 kڑm-42* D} DLNshIܘ m]3G%qTz.xSgLs\6;8B9. -u* Ʌ~B[6 ݝ΄J8$\ASZfФz5\92p9J)"{ !!zTZVAduWT\,"VTĎmſXET#;ͽMgS|g̙B3wfZ'o%=f&`G~VmB᎕x誧LaLY a~~; v❂fI8CtJ  8ju)hjU%\gVՇ=oAi^f?N_J 5~J#G L9tHp͘{鉸9YPc8" G; ^gUvG v@^ ҇N2CVxC HjC@PS 琨Ƥ)%x=N@Z 5qgS#U+3j K7LaYd{pZ|}KzJ3LW43*|5=kFd1Ϫ gq}sC ,Yc:"E )%X:ju)hl+KP뀒u<}(o*o8hx`o>W?(-׮@S׬^$}=@p$N{=14\J}H3T'!fx$X2{9}hML53v^{)` P_Yoq /xL]fh*3y,C%QOL%',`& y |n^z=7i,"N{=yܦ?PJ@ -z r%P~p$N{=Iq~tt4ۚk2?賉z ͜-|f9п}L`y=G{&N@hrDD"#e(ƫW޽fSuAI`ժUhݺ$352kA8GV=LP8YБSnnT^*W_}R3$gfw3.:j>e֊|^d=עE,vL1-gt ~sߑ:$ƦO</2.\2ⶺ"FJJ[ ,W9gdI'>)`#r?o< :ƾ@MT/V께4iRSָm,m<(((@LL - +V୷uf6VXXr|fmV9@2A4g{Ϸʃ=; )o&fΜiٷA!-- 񈋋tZm;wqHJJ3=0Xǥ;vħ~:ԺBĔM%%%(**tϞ=عs'vZ|]vY౵ (ŴgϞʲhPET)Vf^k>74|M3˂=7ehXE}իe(l`Q\Yae|:w>ȫo]In#YeѩS'&*01kd1=KZv_y'BŚg6m #ܘyMCJWQQܲY4¦79^l|֢{ʉs$SiETAa'[k"4*Ncb)*om"[|ς|T!k>7I '4܋Rx?Α7N 3k7MXDSitHUjly+T  y)b~qc!o<&2wMy!6w>S=;5{n#nLi}Mf߼?*.p˵~:= f^qs嗿!K||IfV'TvI>-g}Oym;Jg/v&o^^ [HT|FB oy;0?:nðpEiIc}VLcMgT<|]3PG~$_Ȑ~ȑ> St-u@! |铣lbDD)ܗr_٩Vu$Nz=I$)Kb$YHPHV_J|?;)d:q;gؾNY^Yh8 7}:S盀=I)k}?Iϐ1}co.'=HTz0%-D vQH OyL泐_򖱕w*g[wke˖Y@qLc ƋM=z@~0qDk5(IWMcΦ_' 41ށ[xRD9CG;Ny}hB=d羄)-O^d}o[Sy}[z H~I_W>S[z;pzxSo$t:TjJ-[6)SX 8$Lk霾G'|ٳg#;;BC30|t)W6m5+bn<\y/Yc|TEXyduSA)%Tevs ?j_l4رc|rL}ʻrMxOؽ{i-22 <[D+Oł6, tu Ο|.nE;]v%vȄ;kꚟ4I[G'(%ĸЗdzb^ohwm 8#8orhol\*~.n<=\g]3K6+>~ <<BѧF_3Kb3^<ă~ [G\ćVMӊcY.} ϺO%kŹ߿+ڲy xn8mۼ{Qe5ҁ^s-8mu$N{=ySJ eh KmF2\Qfh'w1=t?1V??~R~ 7^=;^;w/_zL$_|^k2^[Ŧ)igaʋǛfek݈Y܈F_݌wGqpSB,|y|q+,wͽ\-vb^Ob{EʡĦh{|3,>+?y ; L>'aX'7>>Z|xUCKCvRYJ =>{Q'Ϙ4aat$'~}l4>gty2܋c`v"zC49CSm+S{.s~؏z., 8UeCnߚA|^u36ܮq7 ?&,ӓvA_#;=qnIGĎHP8_8w& %YZbJoي!/ȋ)#q1n䌙;B{b# %(؟b~ [S0Zs_8mu$-,}P@8dG9;#шVg"G1 +$R؋mF61Mp⪌SgFXk/Fn;ccs1s]شDXQTa.| uUB\q6's0{pZƝb|i' %— &z=7=~ٖm _0_W9f,{~،ҊB|,+8};dي}Lz\wvQ[pb<T-߈xܫ1-(.KaofܢtGv1g7X@Exػ`7v͜l^%Ne[ֽz5&>9M>[q^OJ@ 13 %{*U]1Ř^YHgp1ZY~6}3\lKI1w2̙(S7w:0"Rn)W߀LADE1~&&5: ԋq ^]kn,VG >*NۿqcLUcx$wtgӗxb8nm>}I˯)CF<_Jۋ+否q|l[ۄkX9$,F (#ĘIS.]ŨtUah nd6Ι"; +MС}&b0AT}N­e) 8ŨL6Ÿx1غѱhӮ2[#2"cox GHemf\UŘ`'3&Ȇm\&T{G"+ٍڥ-_88w|FEaǞ"q_1!)Zlц/]Wㅏ0kqI>*iYh.uƵ:|{!M>=sRk![lϟ9@IC (`%dwJB[~45˨HקHħd[tk\hdTTMԕL[Ӳmg"yw;9жku`SM@DaHjo ۟"16T+jC1j~ N]=R1gV ęw``<5V<׽;@:w}+ξ9ãex+ѱ3kHSS݃cŸ!MF,ijc:.:U!%{ײbEDF>u:Sh-ɵqC}௥ tϋ4T TTS= uBgQ}jxplxw/gҔl'6[GS'KP-Bb'TfGGo`ջPXV4' y,ůyظk'*>搦?=E<q$!7O4T%h9l#8PwOf~ .NK@tMy}xx{D. w̭zne3Ox 蟖GuĊ*0R_ߓ?jWJ@ 4+uoбch3omp63f߼_t44bb̟noI!fŽJÈν0oYGnjFkl-qS(^:f/l]~6dHSem;٭>稦S}J@ (TaM8g}=niƘnPRڴ 2Dun;483[d٫305A$ Mܩ{D6;͕#Ow^g*]c{󇡝VuӇNWO,Dp./SMEj5z4}*Fu3Z8PsbHTz:I֧+%1ǎ±4ˆuXN o0.ĸi`ꤓ6!Vf׾qˌi8wDęi +Mg2 )pWģM'x_ }ԥgdj+y&Opס{?ad6ꄔK).w*גHM1M疫x,~Y鉨? j$E LVZ#6!5)^y/+vU 61هGJˈn4uL(A&N|Gד5J@ ( K+x*TEaQ^mpoqjX7O] :w:0)*)z"3A=# iCsi#Q=4IЧ*%V3 .V x^0d-iᢱG##/_lܘ1R(3{_q]8S뒡!e~gb̷@+J3^|moJTCGd蓕1&)f&(DG_H&Ax2 u 舌==?xe]R]q\l$T}:[Svnfz~; vsk@D8pb[Gi LF%@"b͸ZˆV|,9:L2e,|뜊 3/ū3zo_p.hcBH[t6)6[;"}|Ϙ0w퀼ƨ 8mu$N{=5{J@ 4gbeƣJ=l޺.nf3^3cs:v@FY]lLbT!ղڍ7]XS 362͸LwxZfG{!G89VAL eVNa(-;m;kA"z ۬a&?#WmCC|~S^o ^3L?;?t:1;jzF .PJ LSZ $a=~QL?r׮@SS7+iGͿ(%8оd"k%ʅ^Ga|N_0s1E 11֬Y=֝Mq9YBUm a/t/2z\ (ȉH"d8'oi^OΣW*%=Diã)RJ@ (%8U-J@ 4UE@&yRBwb^O"Ǖ.ߕw֭aTTUd5)#))zqiiiqݤ5%%ŚKx}':YБ: @<7NOONJ+wMXƜv6!!nV jJy7S~wrr%R()>֑:o$>%x˅#G څ˳-[`ƌ֭U[%knÆ SM&$ݯ_?-/Di#Q(%hT<Fw „ +`ݺu(//y&͑@Cq裏@ݻ[UfK+瀝YgefF>}_2P:mui'[CPJP *ids2d?裸{oMTP%9&@l~j{*Qo1Zy HٖZ8UD*%ĸRjNHMM5BYX)4<*Li'YLidȝ>yYfqbo3xŐ}7EA"jGT@T/UANƅTYb'%'ϕeeeV30Mv!INcNUx Yҗב_ə/3W/̥{W-D[PK Ѡ)7HZl)7$ّ7LȋV!o# ?rҘK >˵2#BJ_ykOʰ/n\JzDN ("@΍hth|hJMNl>0Oȗ> ;Q'CaJ1eKבxp09-DPKHO.*>-t2='[n"•8yQL)"Ry!r|[Gדg$PK@  ᡁcOC/;N_Ĕ\7zzx\9}{~?y9UlkJ_q%“lxqoS %vzxٞ#Q* ,@흅W)%CD5''y6ydL2q;u9Ao]3W>Z>;j>>z_p$7o:'p" Uh1sP8~Xg&畏P)מ#QI+%P xuzD (%j=PJ@ (*)IDATTT딀PJ@ C@E@zZ (%S)t^@x6cEESd=.ގ7žjSP0@X`zKV_ Jv}**MERQ!J>L/h!dyPܿvɓT@Oou-C"Ȓ%*]H97o1IUQm =W (TRl۷o?e˖a͚5֬jr}d1 %Sv_~\.]dy:Qz%<Ŵš"zc1bzꅘF0I ,,K, /9s`ላz?VQޣB4f*b9mۆ & kp)ȴ4KPC&-b…{0{lFGGU+4%X|A-//winM7݄,6aܹ1uTdffvva':?%ۨ]X)%%%2s Zj.'QFaڵϷ^*×I};?%WDUjl2[nM!ᖬ_Eaa%,""0QQޣB+Ja'502MZ`Yf+ 鳬7FPAE5 FY 4'*廪WB77ieoLYWQ Q @cMFDIoUJ^K_;*P{@C"T XE53]d5JӯRo2J (4"w%c`j%cj/DYEjzqPi aUQ LٿJM1(TTCOU!J@Tj)P$YƥVU}J D Ш$ jœ˺4JSQާB&1F&1iB7&I**%PQ]%P!zz!e%y+$5m/6^ˆ%iRC@Ss(%@1sۙث :vX]5%/,%]1:9-E{+m(7}Ǣ P!gΣw[#1ѷ (W}r-H RJ@ 0jڀ6fM31rn7^7G™s0<'QXU̽Vף8[#0pbٺO6nցʱe F1FF nw ;?\|V?,$3ٓJ@ j#dz GmU1K[ 49LLBdD63;eQ! 0Wm#:9WZ_v7oa,4Bc0H1 \9b?7\29 2p`PMmvr!&. #Faru&:7ۯ7_,{"xtk2Z7{\z奈xg~{W<͊@Y)\4Th.1}RCWcOVJ ZˀdQ\R#kp#{1p[wlEiT_Uv@ɪOҎ1c@gt>]sҭ$³.3+i*./Ŏc.H.R*:J@ 6iy>FOlw1"#{ 9(GiZ;2<2 9]¯qqcTvlي7>%'CQC)a16Z8<]Uw( Jl%BA\BH~qsM,k֪y*Gǧ(_'\w'qhl\g=6EM 8])8"QZC!?*>\ (P#o&hPJq7" )`DΨ\%fx.$Bj|,ދNEfja>/Jl9\HtyᴇVHOpݖ"R9J@ 8ħ5[t#;{xSoWg3\$g ;yٵ/9FBoii^uZ:<%P!A@E5$QP@@E5rAP!A@E5$QP@@E5rAP!A@E5$QP@@E5rAP!A@E5$QP@@E5rA@:%rIѴ)bvV!~m{ٗWEz%P>PJ@ 4jCJ L؛eשּׁU:$C>,ˑ֢j/IjcJ  qOGmbػwoZnVZ֭[ޣkճDEjzqJahiii^-ZDnQjRe/9O 06ƷN: /2.\N&- |W7oSTY+HyMhnOAu\h׮N9L6 ˖-'#..OK!o+Wŋ1|1ݻw/˸ӈjQQJ 𔗗[M%%%(**BAAك;w"//k׮7|cߵkf$B/ZBll,^z!++ iiiVm5)) HRh괦Pbza45J `!QemoxhXCAZ*Waj$a:{YVVWX{и6^B0[?]P4!M b(rkVRSd6^B ]X%"4>4Hڿ/Z,kXmTןCMQ!N@D7~*Z]H!K@h/",Re~?CMQa@(EӾ]P0I p"S4/*"*\G1NE1^%(tvR9k)@! B>}XS8զa(0 `zKUT@@q}**MERQaHz;h4ɇ=ގ7žjSP0PJ@ Mӈ(PJ@ (U-J@ (%TPaO@E5싀PJ@ ("T$5%{*a_PJ@ 4զ"(%@PQ "PJ6I G (%ž1e IENDB`SQLAlchemy-0.8.4/doc/_images/sqla_engine_arch.png0000644000076500000240000006703512251147171022406 0ustar classicstaff00000000000000PNG  IHDRvHLiCCPICC ProfilexYgTTϒ{'0 Hf9K9J*0 AA`@$)Q (* ( F "$y97U5C>G  5֣8Rp0&pP"u-ھhTj[*?3H>^@;+W#@"DZQFE0sb lcxa{/{[}0l?!e `-/7dHH6A7LzJ-Hdb`j/]Hp4~5&'[nxS ̑' 3D uChX20E02;|ãГ-L@y>RͶ}FЛ#m,;C#M&;cþFe`(oۀ+s |@4@P ,>0K_@E81/i #1aѯq~ȸ=t*26oۺHWb[/kdege -G+Кh-*Y@ Ek*0f6ni)WsGۿ/8X]kQ>q:@?,<>"?\Ii$E^VNvmY]AĔȿhg5n߿hHwp/+:">Pv(P:+`\?H FXp pA(u@x5SXA8!v$ yH҂ ! r< ?(@iQ$TCe AO84 }aLanXU`]~>8Nsn[.?(CPR(> EEPY|TՎG=EPkh,85A;Ilt݃~G/71D F18c0 L>s Ӌy|bX2 fcb' 2cI4qV8*. +5npSU:^y#WPT|:[4C4h6hihhhiisihiih7$^oħ ^?* ]]2]%ttk&8AM!: / KD"QCt%Fs;Q*=^ޔޛ }1} <-.CC>A9FZFaF}F*cc1c2L#YBH٤:= Iɐɛ)E 듽i*r/y,l|"#&,,q,,7YXQ¬ͬì;wqdG㎡+ll:l>lYlMl)A'د@ssprdTl|ssrZ6.=ʣȓsg̫{#K Pz( |\|&|||6ES T|yw |%D+"/tF_hEXDI8SHHQ> gbX1 baqEqbA XBI"@IddAJW*F^j\UB:U ~MYE`*rLrfrrr_)**,d]݊?"f=KGTUUUbTTv)E5}QRRSѨҘפjkiQ<ʴƴutuu>6E][WO4@d<2d2t0,25737Z0V4oi1179a2bmeZk`lhcN03/2o!naѾ eԮ7B׭)"o`mmmmlۑڿvuvvdpXdtiY9 KK+ѵuyӻ(3Wdo{nnn7ܩW<0Nu?V 겧g炗O:y޳>>'}>j;7?PhXdTB$\"<#|lھ"##ȽQp Z4PxVLqjc8R\h@x F {>w xnby}P`d )'S9s'O2>TA1Yz}8# G lfyg?*{4ly{8x'.$L89qjש;jv (\{#G-ʃU?xrkH{Ӿg<|daȞ/f^\|jưoGF+މkS9n0> O?ҧx?t>ԧOsIKE~205{}oˣCod^XSY_wZM7[![[꯻ a__ 9 ;<;%+"`G0w5\xZn: ёޖуJ`e%V;ȩWL@B0M譈hw {ɫ,2 ͊\J_T:45k^g33|ibrtޢ~ׄZ&]#I5bwڞ{ݫ=.Pkfadk7e ,ά_1co[[odP.L&'MB_MM!ӛ0(3rHdf7<62?g9[^J߀^X=Q13gR2PV ;'cU45ǵtBt ӌ:_̚c-Hx,Edll9QׇMv1K,#/ ZnO8Du,/F73[ܣL$$N'>X✪&N>YF{fCG*fgo̹x䉘OiQNo)+H) K-=ZZn]PRZꊚ zu;Ӛ/\lF7O^zZzuV6l\:jot;ӥ˸[hG/}ܷ?w=x48'C/z/_}xۅѥwCBS5^Β>Z~:1r^KW ߛVnl i|eAabJE\+M @C'21lLdqfUCVl9.rrr2Sd,r/ /iIdK^)'g$﯐AҜ AUTD[fVv9\$ gC#acZ^sfQFc,#֭o$٪.]rrxXdL溴a^޽#nM= jWwϢo_L@uw%mpQc(9t0h>̾;)Z?ڢbccE'JHa=0druNr6+Bi7ӳ9fd|ɼq8mOǣײ69wSy1Ig ƑS39s3-evBUgU{h_v>!qpiҝ˕iW]Sk.|A|P|A˝Ȟަ{{?<-}]]]ߵ3@r <!7FΏثw=!!"4< &ېqBAHhyAPt OWO?*@u>!g:]^È"yY+u`8e\"#/M5[ц^]k3tt>t XpE<@|GO_@fHbdNCr>93y%lef+5.\D$55vJ">?@`$2&-Vtq>Rv҆2jrr B;E%UTԄ45EdttE4:ew[4:i=abw 9tJ\J{@0w޶>~:mA!{BDGދ,!Uw)2yf!fdW9I:5yLnYʹު \/4^ejbڱ[=Ծ?dtxcjcW&2?00ߺ0DY6[\+ܸs nް4!u8p T!51Bf_a2;.bCBP>ZmNGY$+AroX)Ǎۃ+x,ЪЦ>+t(:{jL#<"7~!ɏ4OƑ ex#h6<\Y<|<"(~kA&BcE:DĪK%J$IW4ʶݗUXVdPT6WqSVU׸9MQ30757l.o׊z̀a9'vWy{1nqT o/I@`0(xg'i\KiM?=ܗ?u49eAioқ噕y.ܨ;`Hnz{J5뜭:o6c ݛ{O.?x.5E+7yӾ4:1u9y~c!|qb[w5f- N T@1hC`$dT27| n'Q$QO8:]p#=3eaOc_xq&:REɣU=BU@4Rߘac`& n3ya,,.Aϱ9S[J);_)(!tOxDdV Pt:,}MfZKIJ*HZGt< LLͽ,,ClmGœ7]AE{{;bn!ws[^5Y_J?-C]=1ztũgcJ}*=λ׸R.51\jI>}s U fI'&@;eD@DX-5|@ Rt E+izdg"ՄhBB<1Ů r_h@DXtU@0>|8֮][ػZ*@ff͚-[ Y0V:X!Ob* UR`͚5x瑚%xY߿?233]TVRD" ޖ"Re V&TjV–"=RX5cԳjD䊀'婶W1&TWB={cYٽ{VV+Eի"P! HVjUD rLXPX`)hMnljX&Lbv7qֳ儒zVWyZtsh=ՉR(9Ւӝ"P P16 z U`}PeNu)U(>3" ;"Z $%y~:SsE@Dt$VKOw@'`BzWMVz0Q`= ֋bjF#`c$iճjt"Z=XU]D@X-;I*% !֋G$DZD/GJYD@JN@bto(LQ(DՁ^," # Z:~[DGzS]a]I92GErڟb ج3JD 4ltED@D@D@D $V\J^D@D@D@D 4ltED@D@D@D $V\J^D@D@D@D 4ltED@D@D@D $V\J^D 2~!--mzh2K].+-)Q ID@D(%ˆ@y&PիWԫ֍_ snw̺ Gb1 'Ο5+[˻9(@gHHD`!2lzק#77_;^v 5hx3еs{76 J " 1M@=1]=ʜ@HǮݹG~B[vhT;Lλ;|dtec-u[AcQB)ղ$6ПpwX-5BL8?.Od_hwmCq[xp[cIѰz޹>V^{WTŤ[Ǩ^mPmPGl:^d9 _JM/,–U;670n80\KJM(" a&܋xZ55M6h\e9oݍVM z5TMǡG?a|lto%"ѮSr 9ˊ^D Ld =]<6BgEHKF"8#гCo2U}RYre˖8лwoѣp( W0 ע)T8Pd/"PxzOT "P.Hk\3*R~/g={bѸkN(ejiYgy(iU)L+}FF,YoO<:z2d&)Ty4BJz!" " "P^ P^TSrkO lr)*)Ve7&9\)`+YRᇸ0x`q?+3\%V+Ө@!`Bլ}~gOK.ꪫ\ `RRL{I)i9Ip '!o?|zEL2f U ؒV׍*br W_&^g~.xe+mzбcG駟?f`zXyoqCKaE@D@D@bŪӿ iӦa2e z%T˶ 9v /zwUltk$VKLE@D@D@b V #$jgC9}ŽɌWLQrV`*צ,`Xwqjy@T LYr;gsLp<+뀛?Y7-PJ" " " 1@BXUUwy_q dBgɛ[ZP{W @=%" A l۶ z F" ".&Xm-nOaÆ+z)*'{,94OD3D (Jm{(?~xX>(^z%x8餓IZ$sqܖvر3f >3'R 0[U$V+#ˆ@ pV3ꫯL%YK/X\+ ~?v}AZVE"P˿ѣẠGuKW_}7x<2wYU<$a"̛73g{g \iӦQ{X Vg5UEr`78qJhѢ 8#pmO<9d8]g)pr~)^,X@y#ʄ]rUï ׯw;0=s4iڵk޽;;0Wvܵ۱N:Zů9~:k\"?kr&rH0eJ]!{޺u+6o,"@ .p<9~lٲew=&\YtP P02qDĪ:-oݺ5F۷_]p!l߾=:uΝ;m۶NUݮ6mSZXxV'LF9q.fGꃂ5P//x쁡t."P ر( S #FJ$;wt ~{Y͚5s[N8͚5 o{XNXcFKʝgJӍD*LAd5T8TmÆ 'nץKbɒ%;qQ2-{~6fG8qdc"6T(ZMQl8Թ-όm~L_/1vN4YyJji|8GE(WY l8yF G`l 9"c6\5嚃Z l~,h4d%HV ?RhLs Wp}'_peL8䂵eLϮ#ⱝyʧ(fLѲx-y Zon3]2` 8] ,r5(2(D _=|~.b?r}0Vn5ax.9OG(ju tM3gNp 3;m&\KPY WkN1];%t}k ]]kKMwWQ;68X-krX5k,)64ָkblٰw7?}r&?3\3c/ed)T)X8q=|rM sW%sOFYEy/XyOq 'f| +=3c9=*0t WM4tn9sdE [2N׌Dn&XF :w.EvBQF=|b>SfW7;Hco-T{qϙ9x4h{)9a**wxD Vr**K%CeϚtWXD LZѣ[nq nU:Β97p:6\>3v@='0I _R|{ߠſWw Pl)`hٓ5^xnZ egٛhbՓ. ŝCB=իWDe˗- (+;ꁨzX TUouau㯯:5~-L._t<#rV2+qF2)-2'W8PpWucX]5(ܶ&\?,+w p%PU;VֵVɞ++" .Qz 60\Ǎ2!,끍Pp}jSNY*'F˘PeL8`9U'τjjĪ V28`^}4D{[&\W\Y,C=,'+؎o={|dC %>wڱPVtpPA7F>k6'p,o>Ϝ9s[s1Nr׼c?G%nBU5|<'ֳs Iӻwo Wnh;k|cɅR$V+H[Cnb<*VAZ:X> TnsZ-NyKQŚ2/V #Z &XɛE UVWOx10#8;g.ɂf]Ǖ?ʸkΝՎ(le~o Y2"NM ХeCBʞ,62v=BQ2>lY\co>AXPum~-N}Qġ E4g=~zcXvWUEa6PF[T0}yꫯ +'Pam@U8ɇK2!}3EٓpX 7φBB -xD?_P=0'9]t  UP=|b4\z0Jfy= ӷo_gM/L>ׯ/*Dp Net~߯~pF.*kMV6.<rZ8J L~d/6McQP.e8 [w'ƿ q:|ۀ^dC[7z̩JVڷĄV"GI&/ځs s|~eQrG 'n M <<Kl]a!s=\7ߌVX**zU+QHQ%Vc"* z*~S<Akr+wtBij/RoXf͚ %Uw#|ػ4q^(*a5, 6ݺuCǎ@M-o+Oy*nԓLmWtI+P4/ro I(e5T+ $%$`~W_J3 ^>uc7pV" " " AT*|YKPj)vX [," " " "PJE@D@D@D@G@b5LllTFFbnm+߻vZy"TD@D@D@DBU̬\?pBիRs_>餓N+]푖}~ٴ%YdYX#UE@"G k!iĽ3GF֞ݜΜٷ;|l r ٘зNa6*,CЯw}zh2b&q1>Xy cЁT|rS38/2ZhjSN?;o?OYfk_3<?c㩘7 }\: 7x8'ovbNhQJPNGݭPjU"ED rf*t=Jֿ-r^ѽX?gZ7+]cܵ3Ѷ~ ťu_y@ZIHd`U8;1 IDATuclܷ݀6a9[j2:@7V"[J,Vp/DzK.`⥗^Yf{?ٳ'vڅn۷hܹcXx1>Spן 4pۻvmx'V޽W=}Ŵi0~x|>|˱yfjժoȑ#q[3?~;︲VuNnr#1l 4Kf5wacX3xqٿqAhP*~.z^z(áؚ.ؐux{Cޟ[sx:b<&\zzĒ(d˷\JI`WN߀Qj6k^>@п4:@ u L2_:}x_6@ 87FU_8<0,^CR=RWE_:t(>'<۴iѣGcݺug}G? k뮻0i$ycgω[ /=v܉1cƸk7x#>hy晠HݰaN=T 4<+q8Pvm7u(ix׮]:.J;L0Af^w?9cݽ _=*nZTr6.rxn lZFXv &']d.l]!\v?c\\0\4WO<ν 6Ps{3X5j7@@ؔO_o/>}YL5?3I*KgCFs.: {kl@:v# K-@NV;4}P9nܸ+V>޽{=\;^z =z@^ܧr;wvڡC1졥x lժ,X7mp0` z5z-#33g}6ڶm}1l0qzN9NNgyӦM^ZjB>.ޟ~)hyR k&#X r g?_͋CL>m'LJ;2bXc9,iR=e`Y YM@RvG/۵qspd*p9,?Bˡ\("-EU^&HpYSGjUrظ~#zގ|}.~|Fl&@C/ ǣe=Cp|\6tw/u}`/1Fw`xKTb/X3e˖Z*6l>39 _"))}7nvGI&zGAL!ˉR=\K;07T2_tW+`fpr5~wp@nf3 >Q3$/.Anөx=[bXJOfCѤ^mTzC>1|+a֌ q?+wsG@UvIZű|nxbէ^ {k4@cѩq2w֦gK×ENx Xv_فuޗY#6tV MC5C|ͺqJudƀX[p%Ǥ =cތ_tfn_kкm*j){{qpLown5}CG%fsGƘQXOZXM1cS Yr(|c\bѢE\12Lnd/%K!2?/FQhY̜==;GMǕغq!Tz#~FmFb}f _m +"P^ ${_x yv1+p=K1qPCoL)KVa.YK/lN-'QaqG{aУp+E'y_]sȯOU*&T4#' }?~(~wi-2a+>{ < 0ItX&6 ~|˽ ἕː 3Y0a4Mz3]O姡6LL{{u|-uϒj=&6b6n阆3/oVn-KWY'Ovƛ_•W^{`9VcZ q|ǓQq{97 cVOO@}f5;:+skzkT\9X\vշ"/R< =b,gqcvL:ιe"`7מ7{)(awSߔy7?l;7[vt{J岸ԩS]f P9 /4 r_k{V Z<;q魹J=s t#:!Nl[oyϿ%ym4-PrRh۱7t`gA#"P<8z^[ 5yB$T4VID LM0q5w`!nwc<=_9 c\m;v!+c36${(+q޲hU%^^qۀSF=1Y :׼O:q3Gb¶6X1 =~'^ B,oR`K8gdor7yFܒ& WbՔX@~zE^V+4WV (+r{eOVQxZjB` A4›[QHrt*4{Eޖ?k3qnX&߽%vϾ?Tuƺ]}!qH_^==9;EA~g45\}<\:$- Fc e޼l*4+jGA۳] ֢sVWe()pCʬlOVp2'W568<\+pIk{O%a 3 cdFZuَ B]8Do6#&)hԨ6b@Z]PiM/{ g~=)+}ҽvR5Gjz78Ώ1|'7mcS^]nZ7V&]ym-SűoaǯuyS(%V߇+" " "ez *ގڣ~*7z߳Tcޚ{)kպMqp'x+&x1 Z[݈cN뢾8LJmM<8{kوjMznVN`7/`L՘eND@D@DXI߄+؉8Λ4l<=BuϹ=R0CJбg=6r\ĪE ps 0ӝ"p@=Cr˙(@% Zy^%3n!<|жj 'pF~9^/m۶_[oŋx{K@90FB16~b w9kҤ'x" L`xWK/?Fnnn!do+JI@oJY*t$$%%n֭[1{l|عsgaׯlݺu0><컬fB"ex7FOyyGFD2X2GGQFZj;w.}]R޸qca>lق'|ٚ5kbĈcEJӉT 9s&^xy0ӹsg 8]v?`A'"P HVJVc:tAMLLth9< {O,2d<7nl@"~Qr,+ Ikv_&6ocJF@bWxPs{8Vw} O\pšVN"8nw=QIS̡SN9'tZh]v91c7/2#vuvى'{>p:h? TJOO;SOuSNn2"'T#f7$=~z^re# Zj5&tCmVE!}X/:tW^U>$9svҤIh߾uZ҉"~EN/R~ޮ];׃JڥKc~h0_vB,EHz$jTAhqZe#_ܺ'ы/M6aƌN~=PɢEp-8 .E:``1Az h(:[u_:ʥE0õ_jXoG˺ZxY;V6,<9Rjq6ָy#*Z*gՅ)QdM 4qX>.W.㟬tgy'fQ6 \ \k41<̆+=W |7N4ocƌq#$m'\XHla:&$#" )H^VlPh9㜍 Arb '.޽q|d%8- QyZЬY3WV'VGO;4gYfr•UgN7Gu9 c9寴~Aj".,ع _HWqgϘ =^Kq5]|K/?nrq';٘^q6m71.se' r=oMO g2wcիWwDӦM]=X8CHP@x11ˆ*\=x6|ɔl*wJew(\YV'ty뫬 _,wO>čsk{cMp@q-1j.qskْƯFϕxiٲesfϛ? `)nw ~~z–N߇]qW0t1jDwɜMcb?َY7Z?]X- k@y64ֳ_#k2%#@ÀɗbynBb/bfV4.+w ~ Z~b촼hq+-Ǽ |֬Yv%*N|EMWBrAʿw3gm5@_~)A_B9@sO$+{|TϜ=%ICJlhMefa]6ׅޖ,u3llh2 kBՆHM V6*&Ru޻wog9꧟~*+>ٿoիW-2p C`b„*E(T9F&r> &0ܥK ~qQǜz8ye|63~0v=y qwe[6i㱵s>HP@Ɔ<oky Q5ʆ^=% errfc 썥YV֭@Pg"=_z~+bLzgic} p*89ן89kX>2lhKlLZևDjHG 68l鲑1s60lt+ =xpbK 6crhli0> ZnK._mʕ?>hoF ׾}&TMxB=Wѫe <ͺu.35{젙cpǺ)#i_!hL'ke==B? :HXa3[oM2D+kMҵyVtyWƍwv֭n+ǹrWκ6ܛ>m4gy? r-W>C|(t{OEenٲŭ믃= EȈ#\*{R9&Z6o;g‰/>_[b5Ff,kaNʈ@$HFr`&oP@F 2e頇tIDAT ?ccƜ ;],3}n,#E*a"?㒹]/ KO'p["0L3̫ $ eeeS;fJ撹I\/[},5%' Z|f1{56 6*<6jz`Cb0ְӵcrFz^섢p? 1 W.߭heZQrb9`C=,<ح8? Fhom7d행UL scom[~IY HOo `~Q̼s9mbn-׷zM2!@ڱcGBw[27:T@HF| p\!@!ı-Zpv!xaZ,TGQ)2-/e,;"RD@"BDIT$zF*@Hn\$iQ\H2EQL)@x P'YfxGm۶& p'j8~\@b5y/;T$1cgq;mܸ|喽=֛5h>?"[,Sx"`BTsSSS1n8=z[rϿ;_Lf_o[7gƐ!CpAelS %PE@D@D@P5!Db&9f?G n=eAߎ j} ~ie_ܹ Ν;9b/XwV( 5#" " "3(TigBbQFNrsUV~aY1h 5jԈrZF؃:||g?uɖkosVZnmgʺ]*j$VJJD@D@D@b !O-!Fvv[K:4i;vX}] 9>`lc6lz_*&%''K.n=gSZ Ho0zWqGJ@$ d`15H؛ތ_t"ZԪ̅?CN/foXw=-\yKؼ-쁝Z/xo6ߌq-Gm0g[:HێϞy>Mc? p)X&L: J^\~NJA*Pu)PM]6<"fy0vne˖a:b pL2}9r쁴PSLӟxfK S|rP0i{_Nf\bCԽ >zF}\y)O7ǐ>mP-w5oq-i .׼tK?z,?:;VEǛfb0i(rw N ~(jxbڞ #ԁ@PQlQQ1qdbzM,E$k_X5!j. PeR׎>?f/8P4a[ߒߚX4ZouSTWbND|Mǂy{zTOZT 3&O؂Y9رW5!9'8g䫨Xn#VnJ"o8A6] IeHSj|`JŒboLX0bOJR@&.MPkvħ̣-1!j˲Ҙ(4@ט %TOIn<6~p5Aj>e>hk$VKLE@b_T{-׶gz5QPfcNLJkjwmCwƞ&zwܣ9V ʞ /ͭkC?ǥ@"`B 3RRX5aj=?]c۹_U@PvL7КX t85^t-Ei?oy UK;s-âK5![V\%;7C&b+}#8}=FCrQ [wEY5Oܣ&_ޘGVMĐ.fǗwU8yk8 [3;%YH*"P> 3sLQXtEhsN5Q(&Ng&NGc@7}+_̡\N?b<6Pٹk[R#ZRrOD $yB1IRؿ>c;&%#gֳzi޶ -؁wc)נNu9wW]x&{Cwnyw[c{ߋ]hwCHX} '__8'yc2$ $ @8@ s9csǸʵ0na >-u<)n /"P) e/' p!m.¥Sf͛qcΜ9n*@F!NURuR!ޯX^& {;VXY٠) h:97#AԪfZlE:Hm $llYk7oG^BM4i 5Ƶ\Y u:. "ipWcǎnGh ts_|Ȉ@e"@crDŽ~iǼv ,@3;tOcs)i̵cnpsk~7qp~ϟQjYPT" 1@ 5z6tVƿ]!)eu 5՝jMz;נە)ͼ|TGmP5%e( &) )(&܄eI]aqX\<4xLcaİ4e6pv_I]\tKR#?:&"P y"/(3 a7lGD &ţ M?Ccxn0~v-km g28P*e/ZV$@"`2ja]ߟ]<.ai] ,Û_=v8RFb5R@L 9F~vkvL׎kK-`“o-hѮ/" " "LY([8힢-.خǪ+5| ~?.JR%Fb<֚," " "P!WVBZl\D@D@D@D r$V#Z)j1)@HFR(&bSpXk$" " " "PL" " " " # 9JID@D@D@D$V LE@D@D@D@"G@b5rT(u%WU/ͼ)m(.QKLE@0!dn@s#0X XE+{RqBB;, bz GFD@#*1ʳ@0qDTJ'VkhՎ_VҪd" jeeQH GB511'Oƶm۰sNڵl 77aUŎ̍sժUQF gQV-4m] VVgBL@b"׮&a&@d>SR@5oONfff"++ˉռ<9w7z?뤤$T^ݱY&RRR9ĪՑk}&T2$V+rl"F&|RR њb* jTHXVouau㯯ɅbX g"'~PHO&boCسa%$ @J?^'֏US՝" "TEB0J1d‰i)FU'WPX-]X%w28VwY&XKX LOCALTIME/LOCALTIMESTAMP .. change:: :tags: :tickets: fixed order of ORDER BY/HAVING in compile .. changelog:: :version: 0.1.6 :released: Wed Apr 12 2006 .. change:: :tags: :tickets: support for MS-SQL added courtesy Rick Morrison, Runar Petursson .. change:: :tags: :tickets: the latest SQLSoup from J. Ellis .. change:: :tags: :tickets: ActiveMapper has preliminary support for inheritance (Jeff Watkins) .. change:: :tags: :tickets: added a "mods" system which allows pluggable modules that modify/augment core functionality, using the function "install_mods(\*modnames)". .. change:: :tags: :tickets: added the first "mod", SelectResults, which modifies mapper selects to return generators that turn ranges into LIMIT/OFFSET queries (Jonas Borgstr? .. change:: :tags: :tickets: factored out querying capabilities of Mapper into a separate Query object which is Session-centric. this improves the performance of mapper.using(session) and makes other things possible. .. change:: :tags: :tickets: objectstore/Session refactored, the official way to save objects is now via the flush() method. The begin/commit functionality of Session is factored into LegacySession which is still established as the default behavior, until the 0.2 series. .. change:: :tags: :tickets: types system is bound to an engine at query compile time, not schema construction time. this simplifies the types system as well as the ProxyEngine. .. change:: :tags: :tickets: added 'version_id' keyword argument to mapper. this keyword should reference a Column object with type Integer, preferably non-nullable, which will be used on the mapped table to track version numbers. this number is incremented on each save operation and is specifed in the UPDATE/DELETE conditions so that it factors into the returned row count, which results in a ConcurrencyError if the value received is not the expected count. .. change:: :tags: :tickets: added 'entity_name' keyword argument to mapper. a mapper is now associated with a class via the class object as well as an optional entity_name parameter, which is a string defaulting to None. any number of primary mappers can be created for a class, qualified by the entity name. instances of those classes will issue all of their load and save operations through their entity_name-qualified mapper, and maintain separate a identity in the identity map for an otherwise equilvalent object. .. change:: :tags: :tickets: overhaul to the attributes system. code has been clarified, and also fixed to support proper polymorphic behavior on object attributes. .. change:: :tags: :tickets: added "for_update" flag to Select objects .. change:: :tags: :tickets: some fixes for backrefs .. change:: :tags: :tickets: fix for postgres1 DateTime type .. change:: :tags: :tickets: documentation pages mostly switched over to Markdown syntax .. changelog:: :version: 0.1.5 :released: Mon Mar 27 2006 .. change:: :tags: :tickets: added SQLSession concept to SQLEngine. this object keeps track of retrieving a connection from the connection pool as well as an in-progress transaction. methods push_session() and pop_session() added to SQLEngine which push/pop a new SQLSession onto the engine, allowing operation upon a second connection "nested" within the previous one, allowing nested transactions. Other tricks are sure to come later regarding SQLSession. .. change:: :tags: :tickets: added nest_on argument to objectstore.Session. This is a single SQLEngine or list of engines for which push_session()/pop_session() will be called each time this Session becomes the active session (via objectstore.push_session() or equivalent). This allows a unit of work Session to take advantage of the nested transaction feature without explicitly calling push_session/pop_session on the engine. .. change:: :tags: :tickets: factored apart objectstore/unitofwork to separate "Session scoping" from "uow commit heavy lifting" .. change:: :tags: :tickets: added populate_instance() method to MapperExtension. allows an extension to modify the population of object attributes. this method can call the populate_instance() method on another mapper to proxy the attribute population from one mapper to another; some row translation logic is also built in to help with this. .. change:: :tags: :tickets: fixed Oracle8-compatibility "use_ansi" flag which converts JOINs to comparisons with the = and (+) operators, passes basic unittests .. change:: :tags: :tickets: tweaks to Oracle LIMIT/OFFSET support .. change:: :tags: :tickets: Oracle reflection uses ALL_** views instead of USER_** to get larger list of stuff to reflect from .. change:: :tags: :tickets: 105 fixes to Oracle foreign key reflection .. change:: :tags: :tickets: objectstore.commit(obj1, obj2,...) adds an extra step to seek out private relations on properties and delete child objects, even though its not a global commit .. change:: :tags: :tickets: lots and lots of fixes to mappers which use inheritance, strengthened the concept of relations on a mapper being made towards the "local" table for that mapper, not the tables it inherits. allows more complex compositional patterns to work with lazy/eager loading. .. change:: :tags: :tickets: added support for mappers to inherit from others based on the same table, just specify the same table as that of both parent/child mapper. .. change:: :tags: :tickets: some minor speed improvements to the attributes system with regards to instantiating and populating new objects. .. change:: :tags: :tickets: fixed MySQL binary unit test .. change:: :tags: :tickets: INSERTs can receive clause elements as VALUES arguments, not just literal values .. change:: :tags: :tickets: support for calling multi-tokened functions, i.e. schema.mypkg.func() .. change:: :tags: :tickets: added J. Ellis' SQLSoup module to extensions package .. change:: :tags: :tickets: added "polymorphic" examples illustrating methods to load multiple object types from one mapper, the second of which uses the new populate_instance() method. small improvements to mapper, UNION construct to help the examples along .. change:: :tags: :tickets: improvements/fixes to session.refresh()/session.expire() (which may have been called "invalidate" earlier..) .. change:: :tags: :tickets: added session.expunge() which totally removes an object from the current session .. change:: :tags: :tickets: added \*args, \**kwargs pass-thru to engine.transaction(func) allowing easier creation of transactionalizing decorator functions .. change:: :tags: :tickets: added iterator interface to ResultProxy: "for row in result:..." .. change:: :tags: :tickets: added assertion to tx = session.begin(); tx.rollback(); tx.begin(), i.e. cant use it after a rollback() .. change:: :tags: :tickets: added date conversion on bind parameter fix to SQLite enabling dates to work with pysqlite1 .. change:: :tags: :tickets: 116 improvements to subqueries to more intelligently construct their FROM clauses .. change:: :tags: :tickets: added PickleType to types. .. change:: :tags: :tickets: fixed two bugs with column labels with regards to bind parameters: bind param keynames they are now generated from a column "label" in all relevant cases to take advantage of excess-name-length rules, and checks for a peculiar collision against a column named the same as "tablename_colname" added .. change:: :tags: :tickets: major overhaul to unit of work documentation, other documentation sections. .. change:: :tags: :tickets: fixed attributes bug where if an object is committed, its lazy-loaded list got blown away if it hadnt been loaded .. change:: :tags: :tickets: added unique_connection() method to engine, connection pool to return a connection that is not part of the thread-local context or any current transaction .. change:: :tags: :tickets: added invalidate() function to pooled connection. will remove the connection from the pool. still need work for engines to auto-reconnect to a stale DB though. .. change:: :tags: :tickets: added distinct() function to column elements so you can do func.count(mycol.distinct()) .. change:: :tags: :tickets: added "always_refresh" flag to Mapper, creates a mapper that will always refresh the attributes of objects it gets/selects from the DB, overwriting any changes made. .. changelog:: :version: 0.1.4 :released: Mon Mar 13 2006 .. change:: :tags: :tickets: create_engine() now uses genericized parameters; host/hostname, db/dbname/database, password/passwd, etc. for all engine connections. makes engine URIs much more "universal" .. change:: :tags: :tickets: added support for SELECT statements embedded into a column clause, using the flag "scalar=True" .. change:: :tags: :tickets: another overhaul to EagerLoading when used in conjunction with mappers that inherit; improvements to eager loads figuring out their aliased queries correctly, also relations set up against a mapper with inherited mappers will create joins against the table that is specific to the mapper itself (i.e. and not any tables that are inherited/are further down the inheritance chain), this can be overridden by using custom primary/secondary joins. .. change:: :tags: :tickets: added J.Ellis patch to mapper.py so that selectone() throws an exception if query returns more than one object row, selectfirst() to not throw the exception. also adds selectfirst_by (synonymous with get_by) and selectone_by .. change:: :tags: :tickets: added onupdate parameter to Column, will exec SQL/python upon an update statement.Also adds "for_update=True" to all DefaultGenerator subclasses .. change:: :tags: :tickets: added support for Oracle table reflection contributed by Andrija Zaric; still some bugs to work out regarding composite primary keys/dictionary selection .. change:: :tags: :tickets: checked in an initial Firebird module, awaiting testing. .. change:: :tags: :tickets: added sql.ClauseParameters dictionary object as the result for compiled.get_params(), does late-typeprocessing of bind parameters so that the original values are easier to access .. change:: :tags: :tickets: more docs for indexes, column defaults, connection pooling, engine construction .. change:: :tags: :tickets: overhaul to the construction of the types system. uses a simpler inheritance pattern so that any of the generic types can be easily subclassed, with no need for TypeDecorator. .. change:: :tags: :tickets: added "convert_unicode=False" parameter to SQLEngine, will cause all String types to perform unicode encoding/decoding (makes Strings act like Unicodes) .. change:: :tags: :tickets: added 'encoding="utf8"' parameter to engine. the given encoding will be used for all encode/decode calls within Unicode types as well as Strings when convert_unicode=True. .. change:: :tags: :tickets: improved support for mapping against UNIONs, added polymorph.py example to illustrate multi-class mapping against a UNION .. change:: :tags: :tickets: fix to SQLite LIMIT/OFFSET syntax .. change:: :tags: :tickets: fix to Oracle LIMIT syntax .. change:: :tags: :tickets: added backref() function, allows backreferences to have keyword arguments that will be passed to the backref. .. change:: :tags: :tickets: Sequences and ColumnDefault objects can do execute()/scalar() standalone .. change:: :tags: :tickets: SQL functions (i.e. func.foo()) can do execute()/scalar() standalone .. change:: :tags: :tickets: fix to SQL functions so that the ANSI-standard functions, i.e. current_timestamp etc., do not specify parenthesis. all other functions do. .. change:: :tags: :tickets: added settattr_clean and append_clean to SmartProperty, which set attributes without triggering a "dirty" event or any history. used as: myclass.prop1.setattr_clean(myobject, 'hi') .. change:: :tags: :tickets: improved support to column defaults when used by mappers; mappers will pull pre-executed defaults from statement's executed bind parameters (pre-conversion) to populate them into a saved object's attributes; if any PassiveDefaults have fired off, will instead post-fetch the row from the DB to populate the object. .. change:: :tags: :tickets: added 'get_session().invalidate(\*obj)' method to objectstore, instances will refresh() themselves upon the next attribute access. .. change:: :tags: :tickets: improvements to SQL func calls including an "engine" keyword argument so they can be execute()d or scalar()ed standalone, also added func accessor to SQLEngine .. change:: :tags: :tickets: fix to MySQL4 custom table engines, i.e. TYPE instead of ENGINE .. change:: :tags: :tickets: slightly enhanced logging, includes timestamps and a somewhat configurable formatting system, in lieu of a full-blown logging system .. change:: :tags: :tickets: improvements to the ActiveMapper class from the TG gang, including many-to-many relationships .. change:: :tags: :tickets: added Double and TinyInt support to mysql .. changelog:: :version: 0.1.3 :released: Thu Mar 02 2006 .. change:: :tags: :tickets: completed "post_update" feature, will add a second update statement before inserts and after deletes in order to reconcile a relationship without any dependencies being created; used when persisting two rows that are dependent on each other .. change:: :tags: :tickets: completed mapper.using(session) function, localized per-object Session functionality; objects can be declared and manipulated as local to any user-defined Session .. change:: :tags: :tickets: fix to Oracle "row_number over" clause with multiple tables .. change:: :tags: :tickets: mapper.get() was not selecting multiple-keyed objects if the mapper's table was a join, such as in an inheritance relationship, this is fixed. .. change:: :tags: :tickets: overhaul to sql/schema packages so that the sql package can run all on its own, producing selects, inserts, etc. without any engine dependencies. builds upon new TableClause/ColumnClause lexical objects. Schema's Table/Column objects are the "physical" subclasses of them. simplifies schema/sql relationship, extensions (like proxyengine), and speeds overall performance by a large margin. removes the entire getattr() behavior that plagued 0.1.1. .. change:: :tags: :tickets: refactoring of how the mapper "synchronizes" data between two objects into a separate module, works better with properties attached to a mapper that has an additional inheritance relationship to one of the related tables, also the same methodology used to synchronize parent/child objects now used by mapper to synchronize between inherited and inheriting mappers. .. change:: :tags: :tickets: made objectstore "check for out-of-identitymap" more aggressive, will perform the check when object attributes are modified or the object is deleted .. change:: :tags: :tickets: Index object fully implemented, can be constructed standalone, or via "index" and "unique" arguments on Columns. .. change:: :tags: :tickets: added "convert_unicode" flag to SQLEngine, will treat all String/CHAR types as Unicode types, with raw-byte/utf-8 translation on the bind parameter and result set side. .. change:: :tags: :tickets: postgres maintains a list of ANSI functions that must have no parenthesis so function calls with no arguments work consistently .. change:: :tags: :tickets: tables can be created with no engine specified. this will default their engine to a module-scoped "default engine" which is a ProxyEngine. this engine can be connected via the function "global_connect". .. change:: :tags: :tickets: added "refresh(\*obj)" method to objectstore / Session to reload the attributes of any set of objects from the database unconditionally .. changelog:: :version: 0.1.2 :released: Fri Feb 24 2006 .. change:: :tags: :tickets: fixed a recursive call in schema that was somehow running 994 times then returning normally. broke nothing, slowed down everything. thanks to jpellerin for finding this. .. changelog:: :version: 0.1.1 :released: Thu Feb 23 2006 .. change:: :tags: :tickets: small fix to Function class so that expressions with a func.foo() use the type of the Function object (i.e. the left side) as the type of the boolean expression, not the other side which is more of a moving target (changeset 1020). .. change:: :tags: :tickets: creating self-referring mappers with backrefs slightly easier (but still not that easy - changeset 1019) .. change:: :tags: :tickets: fixes to one-to-one mappings (changeset 1015) .. change:: :tags: :tickets: psycopg1 date/time issue with None fixed (changeset 1005) .. change:: :tags: :tickets: two issues related to postgres, which doesnt want to give you the "lastrowid" since oids are deprecated: * postgres database-side defaults that are on primary key cols *do* execute explicitly beforehand, even though thats not the idea of a PassiveDefault. this is because sequences on columns get reflected as PassiveDefaults, but need to be explicitly executed on a primary key col so we know what we just inserted. * if you did add a row that has a bunch of database-side defaults on it, and the PassiveDefault thing was working the old way, i.e. they just execute on the DB side, the "cant get the row back without an OID" exception that occurred also will not happen unless someone (usually the ORM) explicitly asks for it. .. change:: :tags: :tickets: fixed a glitch with engine.execute_compiled where it was making a second ResultProxy that just got thrown away. .. change:: :tags: :tickets: began to implement newer logic in object properities. you can now say myclass.attr.property, which will give you the PropertyLoader corresponding to that attribute, i.e. myclass.mapper.props['attr'] .. change:: :tags: :tickets: eager loading has been internally overhauled to use aliases at all times. more complicated chains of eager loads can now be created without any need for explicit "use aliases"-type instructions. EagerLoader code is also much simpler now. .. change:: :tags: :tickets: a new somewhat experimental flag "use_update" added to relations, indicates that this relationship should be handled by a second UPDATE statement, either after a primary INSERT or before a primary DELETE. handles circular row dependencies. .. change:: :tags: :tickets: added exceptions module, all raised exceptions (except for some KeyError/AttributeError exceptions) descend from these classes. .. change:: :tags: :tickets: fix to date types with MySQL, returned timedelta converted to datetime.time .. change:: :tags: :tickets: two-phase objectstore.commit operations (i.e. begin/commit) now return a transactional object (SessionTrans), to more clearly indicate transaction boundaries. .. change:: :tags: :tickets: Index object with create/drop support added to schema .. change:: :tags: :tickets: fix to postgres, where it will explicitly pre-execute a PassiveDefault on a table if it is a primary key column, pursuant to the ongoing "we cant get inserted rows back from postgres" issue .. change:: :tags: :tickets: change to information_schema query that gets back postgres table defs, now uses explicit JOIN keyword, since one user had faster performance with 8.1 .. change:: :tags: :tickets: fix to engine.process_defaults so it works correctly with a table that has different column name/column keys (changset 982) .. change:: :tags: :tickets: a column can only be attached to one table - this is now asserted .. change:: :tags: :tickets: postgres time types descend from Time type .. change:: :tags: :tickets: fix to alltests so that it runs types test (now named testtypes) .. change:: :tags: :tickets: fix to Join object so that it correctly exports its foreign keys (cs 973) .. change:: :tags: :tickets: creating relationships against mappers that use inheritance fixed (cs 973) SQLAlchemy-0.8.4/doc/_sources/changelog/changelog_02.txt0000644000076500000240000007516112251147171023553 0ustar classicstaff00000000000000 ============== 0.2 Changelog ============== .. changelog:: :version: 0.2.8 :released: Tue Sep 05 2006 .. change:: :tags: :tickets: cleanup on connection methods + documentation. custom DBAPI arguments specified in query string, 'connect_args' argument to 'create_engine', or custom creation function via 'creator' function to 'create_engine'. .. change:: :tags: :tickets: 274 added "recycle" argument to Pool, is "pool_recycle" on create_engine, defaults to 3600 seconds; connections after this age will be closed and replaced with a new one, to handle db's that automatically close stale connections .. change:: :tags: :tickets: 121 changed "invalidate" semantics with pooled connection; will instruct the underlying connection record to reconnect the next time its called. "invalidate" will also automatically be called if any error is thrown in the underlying call to connection.cursor(). this will hopefully allow the connection pool to reconnect to a database that had been stopped and started without restarting the connecting application .. change:: :tags: :tickets: eesh ! the tutorial doctest was broken for quite some time. .. change:: :tags: :tickets: add_property() method on mapper does a "compile all mappers" step in case the given property references a non-compiled mapper (as it did in the case of the tutorial !) .. change:: :tags: :tickets: 277 check for pg sequence already existing before create .. change:: :tags: :tickets: if a contextual session is established via MapperExtension.get_session (as it is using the sessioncontext plugin, etc), a lazy load operation will use that session by default if the parent object is not persistent with a session already. .. change:: :tags: :tickets: lazy loads will not fire off for an object that does not have a database identity (why? see http://www.sqlalchemy.org/trac/wiki/WhyDontForeignKeysLoadData) .. change:: :tags: :tickets: unit-of-work does a better check for "orphaned" objects that are part of a "delete-orphan" cascade, for certain conditions where the parent isnt available to cascade from. .. change:: :tags: :tickets: mappers can tell if one of their objects is an "orphan" based on interactions with the attribute package. this check is based on a status flag maintained for each relationship when objects are attached and detached from each other. .. change:: :tags: :tickets: it is now invalid to declare a self-referential relationship with "delete-orphan" (as the abovementioned check would make them impossible to save) .. change:: :tags: :tickets: improved the check for objects being part of a session when the unit of work seeks to flush() them as part of a relationship.. .. change:: :tags: :tickets: 280 statement execution supports using the same BindParam object more than once in an expression; simplified handling of positional parameters. nice job by Bill Noon figuring out the basic idea. .. change:: :tags: :tickets: 60, 71 postgres reflection moved to use pg_schema tables, can be overridden with use_information_schema=True argument to create_engine. .. change:: :tags: :tickets: 155 added case_sensitive argument to MetaData, Table, Column, determines itself automatically based on if a parent schemaitem has a non-None setting for the flag, or if not, then whether the identifier name is all lower case or not. when set to True, quoting is applied to identifiers with mixed or uppercase identifiers. quoting is also applied automatically in all cases to identifiers that are known to be reserved words or contain other non-standard characters. various database dialects can override all of this behavior, but currently they are all using the default behavior. tested with postgres, mysql, sqlite, oracle. needs more testing with firebird, ms-sql. part of the ongoing work with .. change:: :tags: :tickets: unit tests updated to run without any pysqlite installed; pool test uses a mock DBAPI .. change:: :tags: :tickets: 281 urls support escaped characters in passwords .. change:: :tags: :tickets: added limit/offset to UNION queries (though not yet in oracle) .. change:: :tags: :tickets: added "timezone=True" flag to DateTime and Time types. postgres so far will convert this to "TIME[STAMP] (WITH|WITHOUT) TIME ZONE", so that control over timezone presence is more controllable (psycopg2 returns datetimes with tzinfo's if available, which can create confusion against datetimes that dont). .. change:: :tags: :tickets: 287 fix to using query.count() with distinct, \**kwargs with SelectResults count() .. change:: :tags: :tickets: 289 deregister Table from MetaData when autoload fails; .. change:: :tags: :tickets: 293 import of py2.5s sqlite3 .. change:: :tags: :tickets: 296 unicode fix for startswith()/endswith() .. changelog:: :version: 0.2.7 :released: Sat Aug 12 2006 .. change:: :tags: :tickets: quoting facilities set up so that database-specific quoting can be turned on for individual table, schema, and column identifiers when used in all queries/creates/drops. Enabled via "quote=True" in Table or Column, as well as "quote_schema=True" in Table. Thanks to Aaron Spike for the excellent efforts. .. change:: :tags: :tickets: assignmapper was setting is_primary=True, causing all sorts of mayhem by not raising an error when redundant mappers were set up, fixed .. change:: :tags: :tickets: added allow_null_pks option to Mapper, allows rows where some primary key columns are null (i.e. when mapping to outer joins etc) .. change:: :tags: :tickets: modifcation to unitofwork to not maintain ordering within the "new" list or within the UOWTask "objects" list; instead, new objects are tagged with an ordering identifier as they are registered as new with the session, and the INSERT statements are then sorted within the mapper save_obj. the INSERT ordering has basically been pushed all the way to the end of the flush cycle. that way the various sorts and organizations occuring within UOWTask (particularly the circular task sort) dont have to worry about maintaining order (which they werent anyway) .. change:: :tags: :tickets: fixed reflection of foreign keys to autoload the referenced table if it was not loaded already .. change:: :tags: :tickets: 256 - pass URL query string arguments to connect() function .. change:: :tags: :tickets: 257 - oracle boolean type .. change:: :tags: :tickets: custom primary/secondary join conditions in a relation *will* be propagated to backrefs by default. specifying a backref() will override this behavior. .. change:: :tags: :tickets: better check for ambiguous join conditions in sql.Join; propagates to a better error message in PropertyLoader (i.e. relation()/backref()) for when the join condition can't be reasonably determined. .. change:: :tags: :tickets: sqlite creates ForeignKeyConstraint objects properly upon table reflection. .. change:: :tags: :tickets: 224 adjustments to pool stemming from changes made for. overflow counter should only be decremented if the connection actually succeeded. added a test script to attempt testing this. .. change:: :tags: :tickets: fixed mysql reflection of default values to be PassiveDefault .. change:: :tags: :tickets: 263, 264 added reflected 'tinyint', 'mediumint' type to MS-SQL. .. change:: :tags: :tickets: SingletonThreadPool has a size and does a cleanup pass, so that only a given number of thread-local connections stay around (needed for sqlite applications that dispose of threads en masse) .. change:: :tags: :tickets: 267, 265 fixed small pickle bug(s) with lazy loaders .. change:: :tags: :tickets: fixed possible error in mysql reflection where certain versions return an array instead of string for SHOW CREATE TABLE call .. change:: :tags: :tickets: 1770 fix to lazy loads when mapping to joins .. change:: :tags: :tickets: all create()/drop() calls have a keyword argument of "connectable". "engine" is deprecated. .. change:: :tags: :tickets: fixed ms-sql connect() to work with adodbapi .. change:: :tags: :tickets: added "nowait" flag to Select() .. change:: :tags: :tickets: 271 inheritance check uses issubclass() instead of direct __mro__ check to make sure class A inherits from B, allowing mapper inheritance to more flexibly correspond to class inheritance .. change:: :tags: :tickets: 252 SelectResults will use a subselect, when calling an aggregate (i.e. max, min, etc.) on a SelectResults that has an ORDER BY clause .. change:: :tags: :tickets: 269 fixes to types so that database-specific types more easily used; fixes to mysql text types to work with this methodology .. change:: :tags: :tickets: some fixes to sqlite date type organization .. change:: :tags: :tickets: 263 added MSTinyInteger to MS-SQL .. changelog:: :version: 0.2.6 :released: Thu Jul 20 2006 .. change:: :tags: :tickets: 76 big overhaul to schema to allow truly composite primary and foreign key constraints, via new ForeignKeyConstraint and PrimaryKeyConstraint objects. Existing methods of primary/foreign key creation have not been changed but use these new objects behind the scenes. table creation and reflection is now more table oriented rather than column oriented. .. change:: :tags: :tickets: overhaul to MapperExtension calling scheme, wasnt working very well previously .. change:: :tags: :tickets: tweaks to ActiveMapper, supports self-referential relationships .. change:: :tags: :tickets: slight rearrangement to objectstore (in activemapper/threadlocal) so that the SessionContext is referenced by '.context' instead of subclassed directly. .. change:: :tags: :tickets: activemapper will use threadlocal's objectstore if the mod is activated when activemapper is imported .. change:: :tags: :tickets: small fix to URL regexp to allow filenames with '@' in them .. change:: :tags: :tickets: fixes to Session expunge/update/etc...needs more cleanup. .. change:: :tags: :tickets: select_table mappers *still* werent always compiling .. change:: :tags: :tickets: fixed up Boolean datatype .. change:: :tags: :tickets: added count()/count_by() to list of methods proxied by assignmapper; this also adds them to activemapper .. change:: :tags: :tickets: connection exceptions wrapped in DBAPIError .. change:: :tags: :tickets: ActiveMapper now supports autoloading column definitions from the database if you supply a __autoload__ = True attribute in your mapping inner-class. Currently this does not support reflecting any relationships. .. change:: :tags: :tickets: deferred column load could screw up the connection status in a flush() under some circumstances, this was fixed .. change:: :tags: :tickets: expunge() was not working with cascade, fixed. .. change:: :tags: :tickets: potential endless loop in cascading operations fixed. .. change:: :tags: :tickets: added "synonym()" function, applied to properties to have a propname the same as another, for the purposes of overriding props and allowing the original propname to be accessible in select_by(). .. change:: :tags: :tickets: fix to typing in clause construction which specifically helps type issues with polymorphic_union (CAST/ColumnClause propagates its type to proxy columns) .. change:: :tags: :tickets: mapper compilation work ongoing, someday it'll work....moved around the initialization of MapperProperty objects to be after all mappers are created to better handle circular compilations. do_init() method is called on all properties now which are more aware of their "inherited" status if so. .. change:: :tags: :tickets: eager loads explicitly disallowed on self-referential relationships, or relationships to an inheriting mapper (which is also self-referential) .. change:: :tags: :tickets: 244 reduced bind param size in query._get to appease the picky oracle .. change:: :tags: :tickets: 234 added 'checkfirst' argument to table.create()/table.drop(), as well as table.exists() .. change:: :tags: :tickets: 245 some other ongoing fixes to inheritance .. change:: :tags: :tickets: attribute/backref/orphan/history-tracking tweaks as usual... .. changelog:: :version: 0.2.5 :released: Sat Jul 08 2006 .. change:: :tags: :tickets: fixed endless loop bug in select_by(), if the traversal hit two mappers that referenced each other .. change:: :tags: :tickets: upgraded all unittests to insert './lib/' into sys.path, working around new setuptools PYTHONPATH-killing behavior .. change:: :tags: :tickets: further fixes with attributes/dependencies/etc.... .. change:: :tags: :tickets: improved error handling for when DynamicMetaData is not connected .. change:: :tags: :tickets: MS-SQL support largely working (tested with pymssql) .. change:: :tags: :tickets: ordering of UPDATE and DELETE statements within groups is now in order of primary key values, for more deterministic ordering .. change:: :tags: :tickets: after_insert/delete/update mapper extensions now called per object, not per-object-per-table .. change:: :tags: :tickets: further fixes/refactorings to mapper compilation .. changelog:: :version: 0.2.4 :released: Tue Jun 27 2006 .. change:: :tags: :tickets: try/except when the mapper sets init.__name__ on a mapped class, supports python 2.3 .. change:: :tags: :tickets: fixed bug where threadlocal engine would still autocommit despite a transaction in progress .. change:: :tags: :tickets: lazy load and deferred load operations require the parent object to be in a Session to do the operation; whereas before the operation would just return a blank list or None, it now raises an exception. .. change:: :tags: :tickets: Session.update() is slightly more lenient if the session to which the given object was formerly attached to was garbage collected; otherwise still requires you explicitly remove the instance from the previous Session. .. change:: :tags: :tickets: fixes to mapper compilation, checking for more error conditions .. change:: :tags: :tickets: small fix to eager loading combined with ordering/limit/offset .. change:: :tags: :tickets: 206 utterly remarkable: added a single space between 'CREATE TABLE' and '(' since *thats how MySQL indicates a non- reserved word tablename.....* .. change:: :tags: :tickets: more fixes to inheritance, related to many-to-many relations properly saving .. change:: :tags: :tickets: fixed bug when specifying explicit module to mysql dialect .. change:: :tags: :tickets: when QueuePool times out it raises a TimeoutError instead of erroneously making another connection .. change:: :tags: :tickets: Queue.Queue usage in pool has been replaced with a locally modified version (works in py2.3/2.4!) that uses a threading.RLock for a mutex. this is to fix a reported case where a ConnectionFairy's __del__() method got called within the Queue's get() method, which then returns its connection to the Queue via the put() method, causing a reentrant hang unless threading.RLock is used. .. change:: :tags: :tickets: postgres will not place SERIAL keyword on a primary key column if it has a foreign key constraint .. change:: :tags: :tickets: 221 cursor() method on ConnectionFairy allows db-specific extension arguments to be propagated .. change:: :tags: :tickets: 225 lazy load bind params properly propagate column type .. change:: :tags: :tickets: new MySQL types: MSEnum, MSTinyText, MSMediumText, MSLongText, etc. more support for MS-specific length/precision params in numeric types patch courtesy Mike Bernson .. change:: :tags: :tickets: 224 some fixes to connection pool invalidate() .. changelog:: :version: 0.2.3 :released: Sat Jun 17 2006 .. change:: :tags: :tickets: overhaul to mapper compilation to be deferred. this allows mappers to be constructed in any order, and their relationships to each other are compiled when the mappers are first used. .. change:: :tags: :tickets: fixed a pretty big speed bottleneck in cascading behavior particularly when backrefs were in use .. change:: :tags: :tickets: the attribute instrumentation module has been completely rewritten; its now a large degree simpler and clearer, slightly faster. the "history" of an attribute is no longer micromanaged with each change and is instead part of a "CommittedState" object created when the instance is first loaded. HistoryArraySet is gone, the behavior of list attributes is now more open ended (i.e. theyre not sets anymore). .. change:: :tags: :tickets: py2.4 "set" construct used internally, falls back to sets.Set when "set" not available/ordering is needed. .. change:: :tags: :tickets: fix to transaction control, so that repeated rollback() calls dont fail (was failing pretty badly when flush() would raise an exception in a larger try/except transaction block) .. change:: :tags: :tickets: 151 "foreignkey" argument to relation() can also be a list. fixed auto-foreignkey detection .. change:: :tags: :tickets: fixed bug where tables with schema names werent getting indexed in the MetaData object properly .. change:: :tags: :tickets: 207 fixed bug where Column with redefined "key" property wasnt getting type conversion happening in the ResultProxy .. change:: :tags: :tickets: fixed 'port' attribute of URL to be an integer if present .. change:: :tags: :tickets: fixed old bug where if a many-to-many table mapped as "secondary" had extra columns, delete operations didnt work .. change:: :tags: :tickets: bugfixes for mapping against UNION queries .. change:: :tags: :tickets: fixed incorrect exception class thrown when no DB driver present .. change:: :tags: :tickets: 138 added NonExistentTable exception thrown when reflecting a table that doesnt exist .. change:: :tags: :tickets: small fix to ActiveMapper regarding one-to-one backrefs, other refactorings .. change:: :tags: :tickets: overridden constructor in mapped classes gets __name__ and __doc__ from the original class .. change:: :tags: :tickets: 200 fixed small bug in selectresult.py regarding mapper extension .. change:: :tags: :tickets: small tweak to cascade_mappers, not very strongly supported function at the moment .. change:: :tags: :tickets: 202 some fixes to between(), column.between() to propagate typing information better .. change:: :tags: :tickets: 203 if an object fails to be constructed, is not added to the session .. change:: :tags: :tickets: CAST function has been made into its own clause object with its own compilation function in ansicompiler; allows MySQL to silently ignore most CAST calls since MySQL seems to only support the standard CAST syntax with Date types. MySQL-compatible CAST support for strings, ints, etc. a TODO .. changelog:: :version: 0.2.2 :released: Mon Jun 05 2006 .. change:: :tags: :tickets: 190 big improvements to polymorphic inheritance behavior, enabling it to work with adjacency list table structures .. change:: :tags: :tickets: major fixes and refactorings to inheritance relationships overall, more unit tests .. change:: :tags: :tickets: fixed "echo_pool" flag on create_engine() .. change:: :tags: :tickets: fix to docs, removed incorrect info that close() is unsafe to use with threadlocal strategy (its totally safe !) .. change:: :tags: :tickets: 188 create_engine() can take URLs as string or unicode .. change:: :tags: :tickets: firebird support partially completed; thanks to James Ralston and Brad Clements for their efforts. .. change:: :tags: :tickets: Oracle url translation was broken, fixed, will feed host/port/sid into cx_oracle makedsn() if 'database' field is present, else uses straight TNS name from the 'host' field .. change:: :tags: :tickets: fix to using unicode criterion for query.get()/query.load() .. change:: :tags: :tickets: count() function on selectables now uses table primary key or first column instead of "1" for criterion, also uses label "rowcount" instead of "count". .. change:: :tags: :tickets: got rudimental "mapping to multiple tables" functionality cleaned up, more correctly documented .. change:: :tags: :tickets: restored global_connect() function, attaches to a DynamicMetaData instance called "default_metadata". leaving MetaData arg to Table out will use the default metadata. .. change:: :tags: :tickets: fixes to session cascade behavior, entity_name propigation .. change:: :tags: :tickets: reorganized unittests into subdirectories .. change:: :tags: :tickets: more fixes to threadlocal connection nesting patterns .. changelog:: :version: 0.2.1 :released: Mon May 29 2006 .. change:: :tags: :tickets: "pool" argument to create_engine() properly propagates .. change:: :tags: :tickets: fixes to URL, raises exception if not parsed, does not pass blank fields along to the DB connect string (a string such as user:host@/db was breaking on postgres) .. change:: :tags: :tickets: small fixes to Mapper when it inserts and tries to get new primary key values back .. change:: :tags: :tickets: rewrote half of TLEngine, the ComposedSQLEngine used with 'strategy="threadlocal"'. it now properly implements engine.begin()/ engine.commit(), which nest fully with connection.begin()/trans.commit(). added about six unittests. .. change:: :tags: :tickets: major "duh" in pool.Pool, forgot to put back the WeakValueDictionary. unittest which was supposed to check for this was also silently missing it. fixed unittest to ensure that ConnectionFairy properly falls out of scope. .. change:: :tags: :tickets: placeholder dispose() method added to SingletonThreadPool, doesnt do anything yet .. change:: :tags: :tickets: rollback() is automatically called when an exception is raised, but only if theres no transaction in process (i.e. works more like autocommit). .. change:: :tags: :tickets: fixed exception raise in sqlite if no sqlite module present .. change:: :tags: :tickets: added extra example detail for association object doc .. change:: :tags: :tickets: Connection adds checks for already being closed .. changelog:: :version: 0.2.0 :released: Sat May 27 2006 .. change:: :tags: :tickets: overhaul to Engine system so that what was formerly the SQLEngine is now a ComposedSQLEngine which consists of a variety of components, including a Dialect, ConnectionProvider, etc. This impacted all the db modules as well as Session and Mapper. .. change:: :tags: :tickets: create_engine now takes only RFC-1738-style strings: driver://user:password@host:port/database .. change:: :tags: :tickets: 152 total rewrite of connection-scoping methodology, Connection objects can now execute clause elements directly, added explicit "close" as well as support throughout Engine/ORM to handle closing properly, no longer relying upon __del__ internally to return connections to the pool. .. change:: :tags: :tickets: overhaul to Session interface and scoping. uses hibernate-style methods, including query(class), save(), save_or_update(), etc. no threadlocal scope is installed by default. Provides a binding interface to specific Engines and/or Connections so that underlying Schema objects do not need to be bound to an Engine. Added a basic SessionTransaction object that can simplistically aggregate transactions across multiple engines. .. change:: :tags: :tickets: overhaul to mapper's dependency and "cascade" behavior; dependency logic factored out of properties.py into a separate module "dependency.py". "cascade" behavior is now explicitly controllable, proper implementation of "delete", "delete-orphan", etc. dependency system can now determine at flush time if a child object has a parent or not so that it makes better decisions on how that child should be updated in the DB with regards to deletes. .. change:: :tags: :tickets: overhaul to Schema to build upon MetaData object instead of an Engine. Entire SQL/Schema system can be used with no Engines whatsoever, executed solely by an explicit Connection object. the "bound" methodlogy exists via the BoundMetaData for schema objects. ProxyEngine is generally not needed anymore and is replaced by DynamicMetaData. .. change:: :tags: :tickets: 167 true polymorphic behavior implemented, fixes .. change:: :tags: :tickets: 147 "oid" system has been totally moved into compile-time behavior; if they are used in an order_by where they are not available, the order_by doesnt get compiled, fixes .. change:: :tags: :tickets: overhaul to packaging; "mapping" is now "orm", "objectstore" is now "session", the old "objectstore" namespace gets loaded in via the "threadlocal" mod if used .. change:: :tags: :tickets: mods now called in via "import ". extensions favored over mods as mods are globally-monkeypatching .. change:: :tags: :tickets: 154 fix to add_property so that it propagates properties to inheriting mappers .. change:: :tags: :tickets: backrefs create themselves against primary mapper of its originating property, priamry/secondary join arguments can be specified to override. helps their usage with polymorphic mappers .. change:: :tags: :tickets: 31 "table exists" function has been implemented .. change:: :tags: :tickets: 98 "create_all/drop_all" added to MetaData object .. change:: :tags: :tickets: improvements and fixes to topological sort algorithm, as well as more unit tests .. change:: :tags: :tickets: tutorial page added to docs which also can be run with a custom doctest runner to ensure its properly working. docs generally overhauled to deal with new code patterns .. change:: :tags: :tickets: many more fixes, refactorings. .. change:: :tags: :tickets: migration guide is available on the Wiki at http://www.sqlalchemy.org/trac/wiki/02Migration SQLAlchemy-0.8.4/doc/_sources/changelog/changelog_03.txt0000644000076500000240000024212612251147171023551 0ustar classicstaff00000000000000 ============== 0.3 Changelog ============== .. changelog:: :version: 0.3.11 :released: Sun Oct 14 2007 .. change:: :tags: sql :tickets: tweak DISTINCT precedence for clauses like `func.count(t.c.col.distinct())` .. change:: :tags: sql :tickets: 719 Fixed detection of internal '$' characters in :bind$params .. change:: :tags: sql :tickets: 768 dont assume join criterion consists only of column objects .. change:: :tags: sql :tickets: 764 adjusted operator precedence of NOT to match '==' and others, so that ~(x==y) produces NOT (x=y), which is compatible with MySQL < 5.0 (doesn't like "NOT x=y") .. change:: :tags: orm :tickets: 687 added a check for joining from A->B using join(), along two different m2m tables. this raises an error in 0.3 but is possible in 0.4 when aliases are used. .. change:: :tags: orm :tickets: fixed small exception throw bug in Session.merge() .. change:: :tags: orm :tickets: fixed bug where mapper, being linked to a join where one table had no PK columns, would not detect that the joined table had no PK. .. change:: :tags: orm :tickets: 769 fixed bugs in determining proper sync clauses from custom inherit conditions .. change:: :tags: orm :tickets: 813 backref remove object operation doesn't fail if the other-side collection doesn't contain the item, supports noload collections .. change:: :tags: engine :tickets: fixed another occasional race condition which could occur when using pool with threadlocal setting .. change:: :tags: mysql :tickets: fixed specification of YEAR columns when generating schema .. change:: :tags: mssql :tickets: 679 added support for TIME columns (simulated using DATETIME) .. change:: :tags: mssql :tickets: 721 added support for BIGINT, MONEY, SMALLMONEY, UNIQUEIDENTIFIER and SQL_VARIANT .. change:: :tags: mssql :tickets: 684 index names are now quoted when dropping from reflected tables .. change:: :tags: mssql :tickets: can now specify a DSN for PyODBC, using a URI like mssql:///?dsn=bob .. change:: :tags: postgres :tickets: when reflecting tables from alternate schemas, the "default" placed upon the primary key, i.e. usually a sequence name, has the "schema" name unconditionally quoted, so that schema names which need quoting are fine. its slightly unnecessary for schema names which don't need quoting but not harmful. .. change:: :tags: sqlite :tickets: passthrough for stringified dates .. change:: :tags: firebird :tickets: supports_sane_rowcount() set to False due to ticket #370 (right way). .. change:: :tags: firebird :tickets: fixed reflection of Column's nullable property. .. change:: :tags: oracle :tickets: 622, 751 removed LONG_STRING, LONG_BINARY from "binary" types, so type objects don't try to read their values as LOB. .. changelog:: :version: 0.3.10 :released: Fri Jul 20 2007 .. change:: :tags: general :tickets: a new mutex that was added in 0.3.9 causes the pool_timeout feature to fail during a race condition; threads would raise TimeoutError immediately with no delay if many threads push the pool into overflow at the same time. this issue has been fixed. .. change:: :tags: sql :tickets: got connection-bound metadata to work with implicit execution .. change:: :tags: sql :tickets: 667 foreign key specs can have any chararcter in their identifiers .. change:: :tags: sql :tickets: 664 added commutativity-awareness to binary clause comparisons to each other, improves ORM lazy load optimization .. change:: :tags: orm :tickets: cleanup to connection-bound sessions, SessionTransaction .. change:: :tags: postgres :tickets: 571 fixed max identifier length (63) .. changelog:: :version: 0.3.9 :released: Sun Jul 15 2007 .. change:: :tags: general :tickets: 607 better error message for NoSuchColumnError .. change:: :tags: general :tickets: 428 finally figured out how to get setuptools version in, available as sqlalchemy.__version__ .. change:: :tags: general :tickets: the various "engine" arguments, such as "engine", "connectable", "engine_or_url", "bind_to", etc. are all present, but deprecated. they all get replaced by the single term "bind". you also set the "bind" of MetaData using metadata.bind = .. change:: :tags: ext :tickets: iteration over dict association proxies is now dict-like, not InstrumentedList-like (e.g. over keys instead of values) .. change:: :tags: ext :tickets: 597 association proxies no longer bind tightly to source collections, and are constructed with a thunk instead .. change:: :tags: ext :tickets: added selectone_by() to assignmapper .. change:: :tags: orm :tickets: forwards-compatibility with 0.4: added one(), first(), and all() to Query. almost all Query functionality from 0.4 is present in 0.3.9 for forwards-compat purposes. .. change:: :tags: orm :tickets: reset_joinpoint() really really works this time, promise ! lets you re-join from the root: query.join(['a', 'b']).filter().reset_joinpoint().\ join(['a', 'c']).filter().all() in 0.4 all join() calls start from the "root" .. change:: :tags: orm :tickets: 613 added synchronization to the mapper() construction step, to avoid thread collisions when pre-existing mappers are compiling in a different thread .. change:: :tags: orm :tickets: a warning is issued by Mapper when two primary key columns of the same name are munged into a single attribute. this happens frequently when mapping to joins (or inheritance). .. change:: :tags: orm :tickets: 598 synonym() properties are fully supported by all Query joining/ with_parent operations .. change:: :tags: orm :tickets: fixed very stupid bug when deleting items with many-to-many uselist=False relations .. change:: :tags: orm :tickets: remember all that stuff about polymorphic_union ? for joined table inheritance ? Funny thing... You sort of don't need it for joined table inheritance, you can just string all the tables together via outerjoin(). The UNION still applies if concrete tables are involved, though (since nothing to join them on). .. change:: :tags: orm :tickets: small fix to eager loading to better work with eager loads to polymorphic mappers that are using a straight "outerjoin" clause .. change:: :tags: sql :tickets: ForeignKey to a table in a schema thats not the default schema requires the schema to be explicit; i.e. ForeignKey('alt_schema.users.id') .. change:: :tags: sql :tickets: MetaData can now be constructed with an engine or url as the first argument, just like BoundMetaData .. change:: :tags: sql :tickets: BoundMetaData is now deprecated, and MetaData is a direct substitute. .. change:: :tags: sql :tickets: DynamicMetaData has been renamed to ThreadLocalMetaData. the DynamicMetaData name is deprecated and is an alias for ThreadLocalMetaData or a regular MetaData if threadlocal=False .. change:: :tags: sql :tickets: composite primary key is represented as a non-keyed set to allow for composite keys consisting of cols with the same name; occurs within a Join. helps inheritance scenarios formulate correct PK. .. change:: :tags: sql :tickets: 185 improved ability to get the "correct" and most minimal set of primary key columns from a join, equating foreign keys and otherwise equated columns. this is also mostly to help inheritance scenarios formulate the best choice of primary key columns. .. change:: :tags: sql :tickets: added 'bind' argument to Sequence.create()/drop(), ColumnDefault.execute() .. change:: :tags: sql :tickets: 650 columns can be overridden in a reflected table with a "key" attribute different than the column's name, including for primary key columns .. change:: :tags: sql :tickets: 657 fixed "ambiguous column" result detection, when dupe col names exist in a result .. change:: :tags: sql :tickets: some enhancements to "column targeting", the ability to match a column to a "corresponding" column in another selectable. this affects mostly ORM ability to map to complex joins .. change:: :tags: sql :tickets: 619 MetaData and all SchemaItems are safe to use with pickle. slow table reflections can be dumped into a pickled file to be reused later. Just reconnect the engine to the metadata after unpickling. .. change:: :tags: sql :tickets: added a mutex to QueuePool's "overflow" calculation to prevent a race condition that can bypass max_overflow .. change:: :tags: sql :tickets: 623 fixed grouping of compound selects to give correct results. will break on sqlite in some cases, but those cases were producing incorrect results anyway, sqlite doesn't support grouped compound selects .. change:: :tags: sql :tickets: 620 fixed precedence of operators so that parenthesis are correctly applied .. change:: :tags: sql :tickets: 545 calling .in_() (i.e. with no arguments) will return "CASE WHEN ( IS NULL) THEN NULL ELSE 0 END = 1)", so that NULL or False is returned in all cases, rather than throwing an error .. change:: :tags: sql :tickets: fixed "where"/"from" criterion of select() to accept a unicode string in addition to regular string - both convert to text() .. change:: :tags: sql :tickets: 558 added standalone distinct() function in addition to column.distinct() .. change:: :tags: sql :tickets: result.last_inserted_ids() should return a list that is identically sized to the primary key constraint of the table. values that were "passively" created and not available via cursor.lastrowid will be None. .. change:: :tags: sql :tickets: 589 long-identifier detection fixed to use > rather than >= for max ident length .. change:: :tags: sql :tickets: 593 fixed bug where selectable.corresponding_column(selectable.c.col) would not return selectable.c.col, if the selectable is a join of a table and another join involving the same table. messed up ORM decision making .. change:: :tags: sql :tickets: 595 added Interval type to types.py .. change:: :tags: mysql :tickets: 625 fixed catching of some errors that imply a dropped connection .. change:: :tags: mysql :tickets: 624 fixed escaping of the modulo operator .. change:: :tags: mysql :tickets: 590 added 'fields' to reserved words .. change:: :tags: mysql :tickets: various reflection enhancement/fixes .. change:: :tags: oracle :tickets: 604 datetime fixes: got subsecond TIMESTAMP to work, added OracleDate which supports types.Date with only year/month/day .. change:: :tags: oracle :tickets: added dialect flag "auto_convert_lobs", defaults to True; will cause any LOB objects detected in a result set to be forced into OracleBinary so that the LOB is read() automatically, if no typemap was present (i.e., if a textual execute() was issued). .. change:: :tags: oracle :tickets: 624 mod operator '%' produces MOD .. change:: :tags: oracle :tickets: 542 converts cx_oracle datetime objects to Python datetime.datetime when Python 2.3 used .. change:: :tags: oracle :tickets: fixed unicode conversion in Oracle TEXT type .. change:: :tags: postgres :tickets: 624 fixed escaping of the modulo operator .. change:: :tags: postgres :tickets: 570 added support for reflection of domains .. change:: :tags: postgres :tickets: types which are missing during reflection resolve to Null type instead of raising an error .. change:: :tags: postgres :tickets: the fix in "schema" above fixes reflection of foreign keys from an alt-schema table to a public schema table .. change:: :tags: sqlite :tickets: rearranged dialect initialization so it has time to warn about pysqlite1 being too old. .. change:: :tags: sqlite :tickets: sqlite better handles datetime/date/time objects mixed and matched with various Date/Time/DateTime columns .. change:: :tags: sqlite :tickets: 603 string PK column inserts dont get overwritten with OID .. change:: :tags: mssql :tickets: 634 fix port option handling for pyodbc .. change:: :tags: mssql :tickets: now able to reflect start and increment values for identity columns .. change:: :tags: mssql :tickets: preliminary support for using scope_identity() with pyodbc .. changelog:: :version: 0.3.8 :released: Sat Jun 02 2007 .. change:: :tags: engines :tickets: added detach() to Connection, allows underlying DBAPI connection to be detached from its pool, closing on dereference/close() instead of being reused by the pool. .. change:: :tags: engines :tickets: added invalidate() to Connection, immediately invalidates the Connection and its underlying DBAPI connection. .. change:: :tags: sql :tickets: _Label class overrides compare_self to return its ultimate object. meaning, if you say someexpr.label('foo') == 5, it produces the correct "someexpr == 5". .. change:: :tags: sql :tickets: _Label propagates "_hide_froms()" so that scalar selects behave more properly with regards to FROM clause #574 .. change:: :tags: sql :tickets: fix to long name generation when using oid_column as an order by (oids used heavily in mapper queries) .. change:: :tags: sql :tickets: significant speed improvement to ResultProxy, pre-caches TypeEngine dialect implementations and saves on function calls per column .. change:: :tags: sql :tickets: parenthesis are applied to clauses via a new _Grouping construct. uses operator precedence to more intelligently apply parenthesis to clauses, provides cleaner nesting of clauses (doesnt mutate clauses placed in other clauses, i.e. no 'parens' flag) .. change:: :tags: sql :tickets: added 'modifier' keyword, works like func. except does not add parenthesis. e.g. select([modifier.DISTINCT(...)]) etc. .. change:: :tags: sql :tickets: 578 removed "no group by's in a select thats part of a UNION" restriction .. change:: :tags: orm :tickets: added reset_joinpoint() method to Query, moves the "join point" back to the starting mapper. 0.4 will change the behavior of join() to reset the "join point" in all cases so this is an interim method. for forwards compatibility, ensure joins across multiple relations are specified using a single join(), i.e. join(['a', 'b', 'c']). .. change:: :tags: orm :tickets: fixed bug in query.instances() that wouldnt handle more than on additional mapper or one additional column. .. change:: :tags: orm :tickets: "delete-orphan" no longer implies "delete". ongoing effort to separate the behavior of these two operations. .. change:: :tags: orm :tickets: many-to-many relationships properly set the type of bind params for delete operations on the association table .. change:: :tags: orm :tickets: many-to-many relationships check that the number of rows deleted from the association table by a delete operation matches the expected results .. change:: :tags: orm :tickets: session.get() and session.load() propagate \**kwargs through to query .. change:: :tags: orm :tickets: 577 fix to polymorphic query which allows the original polymorphic_union to be embedded into a correlated subquery .. change:: :tags: orm :tickets: fix to select_by(=) -style joins in conjunction with many-to-many relationships, bug introduced in r2556 .. change:: :tags: orm :tickets: the "primary_key" argument to mapper() is propagated to the "polymorphic" mapper. primary key columns in this list get normalized to that of the mapper's local table. .. change:: :tags: orm :tickets: restored logging of "lazy loading clause" under sa.orm.strategies logger, got removed in 0.3.7 .. change:: :tags: orm :tickets: improved support for eagerloading of properties off of mappers that are mapped to select() statements; i.e. eagerloader is better at locating the correct selectable with which to attach its LEFT OUTER JOIN. .. change:: :tags: mysql :tickets: Nearly all MySQL column types are now supported for declaration and reflection. Added NCHAR, NVARCHAR, VARBINARY, TINYBLOB, LONGBLOB, YEAR .. change:: :tags: mysql :tickets: The sqltypes.Binary passthrough now always builds a BLOB, avoiding problems with very old database versions .. change:: :tags: mysql :tickets: support for column-level CHARACTER SET and COLLATE declarations, as well as ASCII, UNICODE, NATIONAL and BINARY shorthand. .. change:: :tags: firebird :tickets: set max identifier length to 31 .. change:: :tags: firebird :tickets: supports_sane_rowcount() set to False due to ticket #370. versioned_id_col feature wont work in FB. .. change:: :tags: firebird :tickets: some execution fixes .. change:: :tags: firebird :tickets: new association proxy implementation, implementing complete proxies to list, dict and set-based relation collections .. change:: :tags: firebird :tickets: added orderinglist, a custom list class that synchronizes an object attribute with that object's position in the list .. change:: :tags: firebird :tickets: small fix to SelectResultsExt to not bypass itself during select(). .. change:: :tags: firebird :tickets: added filter(), filter_by() to assignmapper .. changelog:: :version: 0.3.7 :released: Sun Apr 29 2007 .. change:: :tags: engines :tickets: warnings module used for issuing warnings (instead of logging) .. change:: :tags: engines :tickets: 480 cleanup of DBAPI import strategies across all engines .. change:: :tags: engines :tickets: refactoring of engine internals which reduces complexity, number of codepaths; places more state inside of ExecutionContext to allow more dialect control of cursor handling, result sets. ResultProxy totally refactored and also has two versions of "buffered" result sets used for different purposes. .. change:: :tags: engines :tickets: 514 server side cursor support fully functional in postgres. .. change:: :tags: engines :tickets: improved framework for auto-invalidation of connections that have lost their underlying database, via dialect-specific detection of exceptions corresponding to that database's disconnect related error messages. Additionally, when a "connection no longer open" condition is detected, the entire connection pool is discarded and replaced with a new instance. #516 .. change:: :tags: engines :tickets: 521 the dialects within sqlalchemy.databases become a setuptools entry points. loading the built-in database dialects works the same as always, but if none found will fall back to trying pkg_resources to load an external module .. change:: :tags: engines :tickets: Engine contains a "url" attribute referencing the url.URL object used by create_engine(). .. change:: :tags: sql :tickets: keys() of result set columns are not lowercased, come back exactly as they're expressed in cursor.description. note this causes colnames to be all caps in oracle. .. change:: :tags: sql :tickets: preliminary support for unicode table names, column names and SQL statements added, for databases which can support them. Works with sqlite and postgres so far. Mysql *mostly* works except the has_table() function does not work. Reflection works too. .. change:: :tags: sql :tickets: 522 the Unicode type is now a direct subclass of String, which now contains all the "convert_unicode" logic. This helps the variety of unicode situations that occur in db's such as MS-SQL to be better handled and allows subclassing of the Unicode datatype. .. change:: :tags: sql :tickets: ClauseElements can be used in in_() clauses now, such as bind parameters, etc. #476 .. change:: :tags: sql :tickets: reverse operators implemented for `CompareMixin` elements, allows expressions like "5 + somecolumn" etc. #474 .. change:: :tags: sql :tickets: the "where" criterion of an update() and delete() now correlates embedded select() statements against the table being updated or deleted. this works the same as nested select() statement correlation, and can be disabled via the correlate=False flag on the embedded select(). .. change:: :tags: sql :tickets: 512 column labels are now generated in the compilation phase, which means their lengths are dialect-dependent. So on oracle a label that gets truncated to 30 chars will go out to 63 characters on postgres. Also, the true labelname is always attached as the accessor on the parent Selectable so theres no need to be aware of the "truncated" label names. .. change:: :tags: sql :tickets: column label and bind param "truncation" also generate deterministic names now, based on their ordering within the full statement being compiled. this means the same statement will produce the same string across application restarts and allowing DB query plan caching to work better. .. change:: :tags: sql :tickets: 513 the "mini" column labels generated when using subqueries, which are to work around glitchy SQLite behavior that doesnt understand "foo.id" as equivalent to "id", are now only generated in the case that those named columns are selected from (part of) .. change:: :tags: sql :tickets: the label() method on ColumnElement will properly propagate the TypeEngine of the base element out to the label, including a label() created from a scalar=True select() statement. .. change:: :tags: sql :tickets: 513 MS-SQL better detects when a query is a subquery and knows not to generate ORDER BY phrases for those .. change:: :tags: sql :tickets: 505 fix for fetchmany() "size" argument being positional in most dbapis .. change:: :tags: sql :tickets: sending None as an argument to func. will produce an argument of NULL .. change:: :tags: sql :tickets: query strings in unicode URLs get keys encoded to ascii for \**kwargs compat .. change:: :tags: sql :tickets: 523 slight tweak to raw execute() change to also support tuples for positional parameters, not just lists .. change:: :tags: sql :tickets: fix to case() construct to propagate the type of the first WHEN condition as the return type of the case statement .. change:: :tags: orm :tickets: fixed critical issue when, after options(eagerload()) is used, the mapper would then always apply query "wrapping" behavior for all subsequent LIMIT/OFFSET/DISTINCT queries, even if no eager loading was applied on those subsequent queries. .. change:: :tags: orm :tickets: 541 added query.with_parent(someinstance) method. searches for target instance using lazy join criterion from parent instance. takes optional string "property" to isolate the desired relation. also adds static Query.query_from_parent(instance, property) version. .. change:: :tags: orm :tickets: 554 improved query.XXX_by(someprop=someinstance) querying to use similar methodology to with_parent, i.e. using the "lazy" clause which prevents adding the remote instance's table to the SQL, thereby making more complex conditions possible .. change:: :tags: orm :tickets: added generative versions of aggregates, i.e. sum(), avg(), etc. to query. used via query.apply_max(), apply_sum(), etc. #552 .. change:: :tags: orm :tickets: fix to using distinct() or distinct=True in combination with join() and similar .. change:: :tags: orm :tickets: corresponding to label/bindparam name generation, eager loaders generate deterministic names for the aliases they create using md5 hashes. .. change:: :tags: orm :tickets: improved/fixed custom collection classes when giving it "set"/ "sets.Set" classes or subclasses (was still looking for append() methods on them during lazy loads) .. change:: :tags: orm :tickets: restored old "column_property()" ORM function (used to be called "column()") to force any column expression to be added as a property on a mapper, particularly those that aren't present in the mapped selectable. this allows "scalar expressions" of any kind to be added as relations (though they have issues with eager loads). .. change:: :tags: orm :tickets: 533 fix to many-to-many relationships targeting polymorphic mappers .. change:: :tags: orm :tickets: 543 making progress with session.merge() as well as combining its usage with entity_name .. change:: :tags: orm :tickets: the usual adjustments to relationships between inheriting mappers, in this case establishing relation()s to subclass mappers where the join conditions come from the superclass' table .. change:: :tags: informix :tickets: informix support added ! courtesy James Zhang, who put a ton of effort in. .. change:: :tags: sqlite :tickets: removed silly behavior where sqlite would reflect UNIQUE indexes as part of the primary key (?!) .. change:: :tags: oracle :tickets: small fix to allow successive compiles of the same SELECT object which features LIMIT/OFFSET. oracle dialect needs to modify the object to have ROW_NUMBER OVER and wasn't performing the full series of steps on successive compiles. .. change:: :tags: mysql :tickets: support for SSL arguments given as inline within URL query string, prefixed with "ssl\_", courtesy terjeros@gmail.com. .. change:: :tags: , mysql :tickets: mysql uses "DESCRIBE.", catching exceptions if table doesnt exist, in order to determine if a table exists. this supports unicode table names as well as schema names. tested with MySQL5 but should work with 4.1 series as well. (#557) .. change:: :tags: extensions :tickets: big fix to AssociationProxy so that multiple AssociationProxy objects can be associated with a single association collection. .. change:: :tags: extensions :tickets: assign_mapper names methods according to their keys (i.e. __name__) #551 .. change:: :tags: mssql :tickets: pyodbc is now the preferred DB-API for MSSQL, and if no module is specifically requested, will be loaded first on a module probe. .. change:: :tags: mssql :tickets: The @@SCOPE_IDENTITY is now used instead of @@IDENTITY. This behavior may be overridden with the engine_connect "use_scope_identity" keyword parameter, which may also be specified in the dburi. .. changelog:: :version: 0.3.6 :released: Fri Mar 23 2007 .. change:: :tags: sql :tickets: bindparam() names are now repeatable! specify two distinct bindparam()s with the same name in a single statement, and the key will be shared. proper positional/named args translate at compile time. for the old behavior of "aliasing" bind parameters with conflicting names, specify "unique=True" - this option is still used internally for all the auto-genererated (value-based) bind parameters. .. change:: :tags: sql :tickets: slightly better support for bind params as column clauses, either via bindparam() or via literal(), i.e. select([literal('foo')]) .. change:: :tags: sql :tickets: MetaData can bind to an engine either via "url" or "engine" kwargs to constructor, or by using connect() method. BoundMetaData is identical to MetaData except engine_or_url param is required. DynamicMetaData is the same and provides thread-local connections be default. .. change:: :tags: sql :tickets: exists() becomes useable as a standalone selectable, not just in a WHERE clause, i.e. exists([columns], criterion).select() .. change:: :tags: sql :tickets: correlated subqueries work inside of ORDER BY, GROUP BY .. change:: :tags: sql :tickets: fixed function execution with explicit connections, i.e. conn.execute(func.dosomething()) .. change:: :tags: sql :tickets: use_labels flag on select() wont auto-create labels for literal text column elements, since we can make no assumptions about the text. to create labels for literal columns, you can say "somecol AS somelabel", or use literal_column("somecol").label("somelabel") .. change:: :tags: sql :tickets: quoting wont occur for literal columns when they are "proxied" into the column collection for their selectable (is_literal flag is propagated). literal columns are specified via literal_column("somestring"). .. change:: :tags: sql :tickets: added "fold_equivalents" boolean argument to Join.select(), which removes 'duplicate' columns from the resulting column clause that are known to be equivalent based on the join condition. this is of great usage when constructing subqueries of joins which Postgres complains about if duplicate column names are present. .. change:: :tags: sql :tickets: 503 fixed use_alter flag on ForeignKeyConstraint .. change:: :tags: sql :tickets: 506 fixed usage of 2.4-only "reversed" in topological.py .. change:: :tags: sql :tickets: 501 for hackers, refactored the "visitor" system of ClauseElement and SchemaItem so that the traversal of items is controlled by the ClauseVisitor itself, using the method visitor.traverse(item). accept_visitor() methods can still be called directly but will not do any traversal of child items. ClauseElement/SchemaItem now have a configurable get_children() method to return the collection of child elements for each parent object. This allows the full traversal of items to be clear and unambiguous (as well as loggable), with an easy method of limiting a traversal (just pass flags which are picked up by appropriate get_children() methods). .. change:: :tags: sql :tickets: the "else\_" parameter to the case statement now properly works when set to zero. .. change:: :tags: orm :tickets: the full featureset of the SelectResults extension has been merged into a new set of methods available off of Query. These methods all provide "generative" behavior, whereby the Query is copied and a new one returned with additional criterion added. The new methods include: * filter() - applies select criterion to the query * filter_by() - applies "by"-style criterion to the query * avg() - return the avg() function on the given column * join() - join to a property (or across a list of properties) * outerjoin() - like join() but uses LEFT OUTER JOIN * limit()/offset() - apply LIMIT/OFFSET range-based access which applies limit/offset: session.query(Foo)[3:5] * distinct() - apply DISTINCT * list() - evaluate the criterion and return results no incompatible changes have been made to Query's API and no methods have been deprecated. Existing methods like select(), select_by(), get(), get_by() all execute the query at once and return results like they always did. join_to()/join_via() are still there although the generative join()/outerjoin() methods are easier to use. .. change:: :tags: orm :tickets: the return value for multiple mappers used with instances() now returns a cartesian product of the requested list of mappers, represented as a list of tuples. this corresponds to the documented behavior. So that instances match up properly, the "uniquing" is disabled when this feature is used. .. change:: :tags: orm :tickets: Query has add_entity() and add_column() generative methods. these will add the given mapper/class or ColumnElement to the query at compile time, and apply them to the instances() method. the user is responsible for constructing reasonable join conditions (otherwise you can get full cartesian products). result set is the list of tuples, non-uniqued. .. change:: :tags: orm :tickets: strings and columns can also be sent to the \*args of instances() where those exact result columns will be part of the result tuples. .. change:: :tags: orm :tickets: a full select() construct can be passed to query.select() (which worked anyway), but also query.selectfirst(), query.selectone() which will be used as is (i.e. no query is compiled). works similarly to sending the results to instances(). .. change:: :tags: orm :tickets: 495 eager loading will not "aliasize" "order by" clauses that were placed in the select statement by something other than the eager loader itself, to fix possibility of dupe columns as illustrated in. however, this means you have to be more careful with the columns placed in the "order by" of Query.select(), that you have explicitly named them in your criterion (i.e. you cant rely on the eager loader adding them in for you) .. change:: :tags: orm :tickets: added a handy multi-use "identity_key()" method to Session, allowing the generation of identity keys for primary key values, instances, and rows, courtesy Daniel Miller .. change:: :tags: orm :tickets: 249 many-to-many table will be properly handled even for operations that occur on the "backref" side of the operation .. change:: :tags: orm :tickets: 492 added "refresh-expire" cascade. allows refresh() and expire() calls to propagate along relationships. .. change:: :tags: orm :tickets: 493 more fixes to polymorphic relations, involving proper lazy-clause generation on many-to-one relationships to polymorphic mappers. also fixes to detection of "direction", more specific targeting of columns that belong to the polymorphic union vs. those that dont. .. change:: :tags: orm :tickets: some fixes to relationship calcs when using "viewonly=True" to pull in other tables into the join condition which arent parent of the relationship's parent/child mappings .. change:: :tags: orm :tickets: flush fixes on cyclical-referential relationships that contain references to other instances outside of the cyclical chain, when some of the objects in the cycle are not actually part of the flush .. change:: :tags: orm :tickets: 500 put an aggressive check for "flushing object A with a collection of B's, but you put a C in the collection" error condition - **even if C is a subclass of B**, unless B's mapper loads polymorphically. Otherwise, the collection will later load a "B" which should be a "C" (since its not polymorphic) which breaks in bi-directional relationships (i.e. C has its A, but A's backref will lazyload it as a different instance of type "B") This check is going to bite some of you who do this without issues, so the error message will also document a flag "enable_typechecks=False" to disable this checking. But be aware that bi-directional relationships in particular become fragile without this check. .. change:: :tags: extensions :tickets: 472 options() method on SelectResults now implemented "generatively" like the rest of the SelectResults methods. But you're going to just use Query now anyway. .. change:: :tags: extensions :tickets: query() method is added by assignmapper. this helps with navigating to all the new generative methods on Query. .. change:: :tags: ms-sql :tickets: removed seconds input on DATE column types (probably should remove the time altogether) .. change:: :tags: ms-sql :tickets: null values in float fields no longer raise errors .. change:: :tags: ms-sql :tickets: LIMIT with OFFSET now raises an error (MS-SQL has no OFFSET support) .. change:: :tags: ms-sql :tickets: 509 added an facility to use the MSSQL type VARCHAR(max) instead of TEXT for large unsized string fields. Use the new "text_as_varchar" to turn it on. .. change:: :tags: ms-sql :tickets: ORDER BY clauses without a LIMIT are now stripped in subqueries, as MS-SQL forbids this usage .. change:: :tags: ms-sql :tickets: 480 cleanup of module importing code; specifiable DB-API module; more explicit ordering of module preferences. .. change:: :tags: oracle :tickets: got binary working for any size input ! cx_oracle works fine, it was my fault as BINARY was being passed and not BLOB for setinputsizes (also unit tests werent even setting input sizes). .. change:: :tags: oracle :tickets: also fixed CLOB read/write on a separate changeset. .. change:: :tags: oracle :tickets: auto_setinputsizes defaults to True for Oracle, fixed cases where it improperly propagated bad types. .. change:: :tags: mysql :tickets: added a catchall \**kwargs to MSString, to help reflection of obscure types (like "varchar() binary" in MS 4.0) .. change:: :tags: mysql :tickets: added explicit MSTimeStamp type which takes effect when using types.TIMESTAMP. .. changelog:: :version: 0.3.5 :released: Thu Feb 22 2007 .. change:: :tags: sql :tickets: the value of "case_sensitive" defaults to True now, regardless of the casing of the identifier, unless specifically set to False. this is because the object might be label'ed as something else which does contain mixed case, and propigating "case_sensitive=False" breaks that. Other fixes to quoting when using labels and "fake" column objects .. change:: :tags: sql :tickets: added a "supports_execution()" method to ClauseElement, so that individual kinds of clauses can express if they are appropriate for executing...such as, you can execute a "select", but not a "Table" or a "Join". .. change:: :tags: sql :tickets: fixed argument passing to straight textual execute() on engine, connection. can handle \*args or a list instance for positional, \**kwargs or a dict instance for named args, or a list of list or dicts to invoke executemany() .. change:: :tags: sql :tickets: small fix to BoundMetaData to accept unicode or string URLs .. change:: :tags: sql :tickets: 466 fixed named PrimaryKeyConstraint generation courtesy andrija at gmail .. change:: :tags: sql :tickets: 464 fixed generation of CHECK constraints on columns .. change:: :tags: sql :tickets: fixes to tometadata() operation to propagate Constraints at column and table level .. change:: :tags: oracle :tickets: 436 when returning "rowid" as the ORDER BY column or in use with ROW_NUMBER OVER, oracle dialect checks the selectable its being applied to and will switch to table PK if not applicable, i.e. for a UNION. checking for DISTINCT, GROUP BY (other places that rowid is invalid) still a TODO. allows polymorphic mappings to function. .. change:: :tags: oracle :tickets: sequences on a non-pk column will properly fire off on INSERT .. change:: :tags: oracle :tickets: 435 added PrefetchingResultProxy support to pre-fetch LOB columns when they are known to be present, fixes .. change:: :tags: oracle :tickets: 379 implemented reflection of tables based on synonyms, including across dblinks .. change:: :tags: oracle :tickets: 363 issues a log warning when a related table cant be reflected due to certain permission errors .. change:: :tags: mysql :tickets: fix to reflection on older DB's that might return array() type for "show variables like" statements .. change:: :tags: postgres :tickets: 442 better reflection of sequences for alternate-schema Tables .. change:: :tags: postgres :tickets: sequences on a non-pk column will properly fire off on INSERT .. change:: :tags: postgres :tickets: 460, 444 added PGInterval type, PGInet type .. change:: :tags: mssql :tickets: 419 preliminary support for pyodbc (Yay!) .. change:: :tags: mssql :tickets: 298 better support for NVARCHAR types added .. change:: :tags: mssql :tickets: fix for commit logic on pymssql .. change:: :tags: mssql :tickets: 456 fix for query.get() with schema .. change:: :tags: mssql :tickets: 473 fix for non-integer relationships .. change:: :tags: mssql :tickets: 419 DB-API module now selectable at run-time .. change:: :tags: tickets:422, 481, 415, mssql :tickets: now passes many more unit tests .. change:: :tags: mssql :tickets: 479 better unittest compatibility with ANSI functions .. change:: :tags: mssql :tickets: 415 improved support for implicit sequence PK columns with auto-insert .. change:: :tags: mssql :tickets: 371 fix for blank password in adodbapi .. change:: :tags: mssql :tickets: 481 fixes to get unit tests working with pyodbc .. change:: :tags: mssql :tickets: fix to auto_identity_insert on db-url query .. change:: :tags: mssql :tickets: added query_timeout to db-url query parms. currently works only for pymssql .. change:: :tags: mssql :tickets: tested with pymssql 0.8.0 (which is now LGPL) .. change:: :tags: orm, bugs :tickets: 441, 448, 439 another refactoring to relationship calculation. Allows more accurate ORM behavior with relationships from/to/between mappers, particularly polymorphic mappers, also their usage with Query, SelectResults. tickets include,,. .. change:: :tags: orm, bugs :tickets: removed deprecated method of specifying custom collections on classes; you must now use the "collection_class" option. the old way was beginning to produce conflicts when people used assign_mapper(), which now patches an "options" method, in conjunction with a relationship named "options". (relationships take precedence over monkeypatched assign_mapper methods). .. change:: :tags: orm, bugs :tickets: 454 extension() query option propagates to Mapper._instance() method so that all loading-related methods get called .. change:: :tags: orm, bugs :tickets: eager relation to an inheriting mapper wont fail if no rows returned for the relationship. .. change:: :tags: orm, bugs :tickets: 486 eager relation loading bug fixed for eager relation on multiple descendant classes .. change:: :tags: orm, bugs :tickets: 423 fix for very large topological sorts, courtesy ants.aasma at gmail .. change:: :tags: orm, bugs :tickets: eager loading is slightly more strict about detecting "self-referential" relationships, specifically between polymorphic mappers. this results in an "eager degrade" to lazy loading. .. change:: :tags: orm, bugs :tickets: 449 improved support for complex queries embedded into "where" criterion for query.select() .. change:: :tags: orm, bugs :tickets: 485 mapper options like eagerload(), lazyload(), deferred(), will work for "synonym()" relationships .. change:: :tags: orm, bugs :tickets: 445 fixed bug where cascade operations incorrectly included deleted collection items in the cascade .. change:: :tags: orm, bugs :tickets: 478 fixed relationship deletion error when one-to-many child item is moved to a new parent in a single unit of work .. change:: :tags: orm, bugs :tickets: fixed relationship deletion error where parent/child with a single column as PK/FK on the child would raise a "blank out the primary key" error, if manually deleted or "delete" cascade without "delete-orphan" was used .. change:: :tags: orm, bugs :tickets: fix to deferred so that load operation doesnt mistakenly occur when only PK col attributes are set .. change:: :tags: orm, enhancements :tickets: 385 implemented foreign_keys argument to mapper. use in conjunction with primaryjoin/secondaryjoin arguments to specify/override foreign keys defined on the Table instance. .. change:: :tags: orm, enhancements :tickets: contains_eager('foo') automatically implies eagerload('foo') .. change:: :tags: orm, enhancements :tickets: added "alias" argument to contains_eager(). use it to specify the string name or Alias instance of an alias used in the query for the eagerly loaded child items. easier to use than "decorator" .. change:: :tags: orm, enhancements :tickets: added "contains_alias()" option for result set mapping to an alias of the mapped table .. change:: :tags: orm, enhancements :tickets: 468 added support for py2.5 "with" statement with SessionTransaction .. change:: :tags: extensions :tickets: added distinct() method to SelectResults. generally should only make a difference when using count(). .. change:: :tags: extensions :tickets: 472 added options() method to SelectResults, equivalent to query.options() .. change:: :tags: extensions :tickets: 462 added optional __table_opts__ dictionary to ActiveMapper, will send kw options to Table objects .. change:: :tags: extensions :tickets: 467 added selectfirst(), selectfirst_by() to assign_mapper .. changelog:: :version: 0.3.4 :released: Tue Jan 23 2007 .. change:: :tags: general :tickets: global "insure"->"ensure" change. in US english "insure" is actually largely interchangeable with "ensure" (so says the dictionary), so I'm not completely illiterate, but its definitely sub-optimal to "ensure" which is non-ambiguous. .. change:: :tags: sql :tickets: added "fetchmany()" support to ResultProxy .. change:: :tags: sql :tickets: added support for column "key" attribute to be useable in row[]/row. .. change:: :tags: sql :tickets: changed "BooleanExpression" to subclass from "BinaryExpression", so that boolean expressions can also follow column-clause behaviors (i.e. label(), etc). .. change:: :tags: sql :tickets: trailing underscores are trimmed from func. calls, such as func.if_() .. change:: :tags: sql :tickets: fix to correlation of subqueries when the column list of the select statement is constructed with individual calls to append_column(); this fixes an ORM bug whereby nested select statements were not getting correlated with the main select generated by the Query object. .. change:: :tags: sql :tickets: another fix to subquery correlation so that a subquery which has only one FROM element will *not* correlate that single element, since at least one FROM element is required in a query. .. change:: :tags: sql :tickets: 414 default "timezone" setting is now False. this corresponds to Python's datetime behavior as well as Postgres' timestamp/time types (which is the only timezone-sensitive dialect at the moment) .. change:: :tags: sql :tickets: the "op()" function is now treated as an "operation", rather than a "comparison". the difference is, an operation produces a BinaryExpression from which further operations can occur whereas comparison produces the more restrictive BooleanExpression .. change:: :tags: sql :tickets: trying to redefine a reflected primary key column as non-primary key raises an error .. change:: :tags: sql :tickets: type system slightly modified to support TypeDecorators that can be overridden by the dialect (ok, thats not very clear, it allows the mssql tweak below to be possible) .. change:: :tags: mssql :tickets: added an NVarchar type (produces NVARCHAR), also MSUnicode which provides Unicode-translation for the NVarchar regardless of dialect convert_unicode setting. .. change:: :tags: postgres :tickets: 424 fix to the initial checkfirst for tables to take current schema into account .. change:: :tags: postgres :tickets: postgres has an optional "server_side_cursors=True" flag which will utilize server side cursors. these are appropriate for fetching only partial results and are necessary for working with very large unbounded result sets. While we'd like this to be the default behavior, different environments seem to have different results and the causes have not been isolated so we are leaving the feature off by default for now. Uses an apparently undocumented psycopg2 behavior recently discovered on the psycopg mailing list. .. change:: :tags: postgres :tickets: added "BIGSERIAL" support for postgres table with PGBigInteger/autoincrement .. change:: :tags: postgres :tickets: 402 fixes to postgres reflection to better handle when schema names are present; thanks to jason (at) ncsmags.com .. change:: :tags: mysql :tickets: 420 mysql is inconsistent with what kinds of quotes it uses in foreign keys during a SHOW CREATE TABLE, reflection updated to accomodate for all three styles .. change:: :tags: mysql :tickets: 418 mysql table create options work on a generic passthru now, i.e. Table(..., mysql_engine='InnoDB', mysql_collate="latin1_german2_ci", mysql_auto_increment="5", mysql_...), helps .. change:: :tags: firebird :tickets: 408 order of constraint creation puts primary key first before all other constraints; required for firebird, not a bad idea for others .. change:: :tags: firebird :tickets: 409 Firebird fix to autoload multifield foreign keys .. change:: :tags: firebird :tickets: 409 Firebird NUMERIC type properly handles a type without precision .. change:: :tags: oracle :tickets: *slight* support for binary, but still need to figure out how to insert reasonably large values (over 4K). requires auto_setinputsizes=True sent to create_engine(), rows must be fully fetched individually, etc. .. change:: :tags: orm :tickets: poked the first hole in the can of worms: saying query.select_by(somerelationname=someinstance) will create the join of the primary key columns represented by "somerelationname"'s mapper to the actual primary key in "someinstance". .. change:: :tags: orm :tickets: reworked how relations interact with "polymorphic" mappers, i.e. mappers that have a select_table as well as polymorphic flags. better determination of proper join conditions, interaction with user- defined join conditions, and support for self-referential polymorphic mappers. .. change:: :tags: orm :tickets: related to polymorphic mapping relations, some deeper error checking when compiling relations, to detect an ambiguous "primaryjoin" in the case that both sides of the relationship have foreign key references in the primary join condition. also tightened down conditions used to locate "relation direction", associating the "foreignkey" of the relationship with the "primaryjoin" .. change:: :tags: orm :tickets: a little bit of improvement to the concept of a "concrete" inheritance mapping, though that concept is not well fleshed out yet (added test case to support concrete mappers on top of a polymorphic base). .. change:: :tags: orm :tickets: fix to "proxy=True" behavior on synonym() .. change:: :tags: orm :tickets: 427 fixed bug where delete-orphan basically didn't work with many-to-many relationships, backref presence generally hid the symptom .. change:: :tags: orm :tickets: added a mutex to the mapper compilation step. ive been reluctant to add any kind of threading anything to SA but this is one spot that its really needed since mappers are typically "global", and while their state does not change during normal operation, the initial compilation step does modify internal state significantly, and this step usually occurs not at module-level initialization time (unless you call compile()) but at first-request time .. change:: :tags: orm :tickets: basic idea of "session.merge()" actually implemented. needs more testing. .. change:: :tags: orm :tickets: added "compile_mappers()" function as a shortcut to compiling all mappers .. change:: :tags: orm :tickets: fix to MapperExtension create_instance so that entity_name properly associated with new instance .. change:: :tags: orm :tickets: speed enhancements to ORM object instantiation, eager loading of rows .. change:: :tags: orm :tickets: 406 invalid options sent to 'cascade' string will raise an exception .. change:: :tags: orm :tickets: 407 fixed bug in mapper refresh/expire whereby eager loaders didnt properly re-populate item lists .. change:: :tags: orm :tickets: 413 fix to post_update to ensure rows are updated even for non insert/delete scenarios .. change:: :tags: orm :tickets: 412 added an error message if you actually try to modify primary key values on an entity and then flush it .. change:: :tags: extensions :tickets: 426 added "validate=False" argument to assign_mapper, if True will ensure that only mapped attributes are named .. change:: :tags: extensions :tickets: assign_mapper gets "options", "instances" functions added (i.e. MyClass.instances()) .. changelog:: :version: 0.3.3 :released: Fri Dec 15 2006 .. change:: :tags: :tickets: string-based FROM clauses fixed, i.e. select(..., from_obj=["sometext"]) .. change:: :tags: :tickets: fixes to passive_deletes flag, lazy=None (noload) flag .. change:: :tags: :tickets: added example/docs for dealing with large collections .. change:: :tags: :tickets: added object_session() method to sqlalchemy namespace .. change:: :tags: :tickets: fixed QueuePool bug whereby its better able to reconnect to a database that was not reachable (thanks to Sébastien Lelong), also fixed dispose() method .. change:: :tags: :tickets: 396 patch that makes MySQL rowcount work correctly! .. change:: :tags: :tickets: fix to MySQL catch of 2006/2014 errors to properly re-raise OperationalError exception .. changelog:: :version: 0.3.2 :released: Sun Dec 10 2006 .. change:: :tags: :tickets: 387 major connection pool bug fixed. fixes MySQL out of sync errors, will also prevent transactions getting rolled back accidentally in all DBs .. change:: :tags: :tickets: major speed enhancements vs. 0.3.1, to bring speed back to 0.2.8 levels .. change:: :tags: :tickets: made conditional dozens of debug log calls that were time-intensive to generate log messages .. change:: :tags: :tickets: fixed bug in cascade rules whereby the entire object graph could be unnecessarily cascaded on the save/update cascade .. change:: :tags: :tickets: various speedups in attributes module .. change:: :tags: :tickets: 388 identity map in Session is by default *no longer weak referencing*. to have it be weak referencing, use create_session(weak_identity_map=True) fixes .. change:: :tags: :tickets: MySQL detects errors 2006 (server has gone away) and 2014 (commands out of sync) and invalidates the connection on which it occured. .. change:: :tags: :tickets: 307 MySQL bool type fix: .. change:: :tags: :tickets: 382, 349 postgres reflection fixes: .. change:: :tags: :tickets: 247 added keywords for EXCEPT, INTERSECT, EXCEPT ALL, INTERSECT ALL .. change:: :tags: :tickets: 2110 assign_mapper in assignmapper extension returns the created mapper .. change:: :tags: :tickets: added label() function to Select class, when scalar=True is used to create a scalar subquery i.e. "select x, y, (select max(foo) from table) AS foomax from table" .. change:: :tags: :tickets: added onupdate and ondelete keyword arguments to ForeignKey; propagate to underlying ForeignKeyConstraint if present. (dont propagate in the other direction, however) .. change:: :tags: :tickets: fix to session.update() to preserve "dirty" status of incoming object .. change:: :tags: :tickets: sending a selectable to an IN via the in_() function no longer creates a "union" out of multiple selects; only one selectable to a the in_() function is allowed now (make a union yourself if union is needed) .. change:: :tags: :tickets: improved support for disabling save-update cascade via cascade="none" etc. .. change:: :tags: :tickets: added "remote_side" argument to relation(), used only with self-referential mappers to force the direction of the parent/child relationship. replaces the usage of the "foreignkey" parameter for "switching" the direction. "foreignkey" argument is deprecated for all uses and will eventually be replaced by an argument dedicated to ForeignKey specification on mappers. .. changelog:: :version: 0.3.1 :released: Mon Nov 13 2006 .. change:: :tags: engine/pool :tickets: some new Pool utility classes, updated docs .. change:: :tags: engine/pool :tickets: "use_threadlocal" on Pool defaults to False (same as create_engine) .. change:: :tags: engine/pool :tickets: fixed direct execution of Compiled objects .. change:: :tags: engine/pool :tickets: create_engine() reworked to be strict about incoming \**kwargs. all keyword arguments must be consumed by one of the dialect, connection pool, and engine constructors, else a TypeError is thrown which describes the full set of invalid kwargs in relation to the selected dialect/pool/engine configuration. .. change:: :tags: databases/types :tickets: MySQL catches exception on "describe" and reports as NoSuchTableError .. change:: :tags: databases/types :tickets: further fixes to sqlite booleans, weren't working as defaults .. change:: :tags: databases/types :tickets: fix to postgres sequence quoting when using schemas .. change:: :tags: orm :tickets: the "delete" cascade will load in all child objects, if they were not loaded already. this can be turned off (i.e. the old behavior) by setting passive_deletes=True on a relation(). .. change:: :tags: orm :tickets: adjustments to reworked eager query generation to not fail on circular eager-loaded relationships (like backrefs) .. change:: :tags: orm :tickets: fixed bug where eagerload() (nor lazyload()) option didn't properly instruct the Query whether or not to use "nesting" when producing a LIMIT query. .. change:: :tags: orm :tickets: 360 fixed bug in circular dependency sorting at flush time; if object A contained a cyclical many-to-one relationship to object B, and object B was just attached to object A, *but* object B itself wasnt changed, the many-to-one synchronize of B's primary key attribute to A's foreign key attribute wouldnt occur. .. change:: :tags: orm :tickets: 325 implemented from_obj argument for query.count, improves count function on selectresults .. change:: :tags: orm :tickets: added an assertion within the "cascade" step of ORM relationships to check that the class of object attached to a parent object is appropriate (i.e. if A.items stores B objects, raise an error if a C is appended to A.items) .. change:: :tags: orm :tickets: new extension sqlalchemy.ext.associationproxy, provides transparent "association object" mappings. new example examples/association/proxied_association.py illustrates. .. change:: :tags: orm :tickets: improvement to single table inheritance to load full hierarchies beneath the target class .. change:: :tags: orm :tickets: 362 fix to subtle condition in topological sort where a node could appear twice, for .. change:: :tags: orm :tickets: 365 additional rework to topological sort, refactoring, for .. change:: :tags: orm :tickets: "delete-orphan" for a certain type can be set on more than one parent class; the instance is an "orphan" only if its not attached to *any* of those parents .. changelog:: :version: 0.3.0 :released: Sun Oct 22 2006 .. change:: :tags: general :tickets: logging is now implemented via standard python "logging" module. "echo" keyword parameters are still functional but set/unset log levels for their respective classes/instances. all logging can be controlled directly through the Python API by setting INFO and DEBUG levels for loggers in the "sqlalchemy" namespace. class-level logging is under "sqlalchemy..", instance-level logging under "sqlalchemy...0x..<00-FF>". Test suite includes "--log-info" and "--log-debug" arguments which work independently of --verbose/--quiet. Logging added to orm to allow tracking of mapper configurations, row iteration. .. change:: :tags: general :tickets: the documentation-generation system has been overhauled to be much simpler in design and more integrated with Markdown .. change:: :tags: sqlite :tickets: sqlite boolean datatype converts False/True to 0/1 by default .. change:: :tags: sqlite :tickets: 335 fixes to Date/Time (SLDate/SLTime) types; works as good as postgres now .. change:: :tags: ms-sql :tickets: fixes bug 261 (table reflection broken for MS-SQL case-sensitive databases) .. change:: :tags: ms-sql :tickets: can now specify port for pymssql .. change:: :tags: ms-sql :tickets: introduces new "auto_identity_insert" option for auto-switching between "SET IDENTITY_INSERT" mode when values specified for IDENTITY columns .. change:: :tags: ms-sql :tickets: now supports multi-column foreign keys .. change:: :tags: ms-sql :tickets: fix to reflecting date/datetime columns .. change:: :tags: ms-sql :tickets: NCHAR and NVARCHAR type support added .. change:: :tags: oracle :tickets: Oracle has experimental support for cx_Oracle.TIMESTAMP, which requires a setinputsizes() call on the cursor that is now enabled via the 'auto_setinputsizes' flag to the oracle dialect. .. change:: :tags: firebird :tickets: aliases do not use "AS" .. change:: :tags: firebird :tickets: correctly raises NoSuchTableError when reflecting non-existent table .. change:: :tags: schema :tickets: a fair amount of cleanup to the schema package, removal of ambiguous methods, methods that are no longer needed. slightly more constrained useage, greater emphasis on explicitness .. change:: :tags: schema :tickets: the "primary_key" attribute of Table and other selectables becomes a setlike ColumnCollection object; is ordered but not numerically indexed. a comparison clause between two pks that are derived from the same underlying tables (i.e. such as two Alias objects) can be generated via table1.primary_key==table2.primary_key .. change:: :tags: schema :tickets: ForeignKey(Constraint) supports "use_alter=True", to create/drop a foreign key via ALTER. this allows circular foreign key relationships to be set up. .. change:: :tags: schema :tickets: append_item() methods removed from Table and Column; preferably construct Table/Column/related objects inline, but if needed use append_column(), append_foreign_key(), append_constraint(), etc. .. change:: :tags: schema :tickets: table.create() no longer returns the Table object, instead has no return value. the usual case is that tables are created via metadata, which is preferable since it will handle table dependencies. .. change:: :tags: schema :tickets: added UniqueConstraint (goes at Table level), CheckConstraint (goes at Table or Column level). .. change:: :tags: schema :tickets: index=False/unique=True on Column now creates a UniqueConstraint, index=True/unique=False creates a plain Index, index=True/unique=True on Column creates a unique Index. 'index' and 'unique' keyword arguments to column are now boolean only; for explcit names and groupings of indexes or unique constraints, use the UniqueConstraint/Index constructs explicitly. .. change:: :tags: schema :tickets: added autoincrement=True to Column; will disable schema generation of SERIAL/AUTO_INCREMENT/identity seq for postgres/mysql/mssql if explicitly set to False .. change:: :tags: schema :tickets: TypeEngine objects now have methods to deal with copying and comparing values of their specific type. Currently used by the ORM, see below. .. change:: :tags: schema :tickets: fixed condition that occurred during reflection when a primary key column was explciitly overridden, where the PrimaryKeyConstraint would get both the reflected and the programmatic column doubled up .. change:: :tags: schema :tickets: the "foreign_key" attribute on Column and ColumnElement in general is deprecated, in favor of the "foreign_keys" list/set-based attribute, which takes into account multiple foreign keys on one column. "foreign_key" will return the first element in the "foreign_keys" list/set or None if the list is empty. .. change:: :tags: connections/pooling/execution :tickets: connection pool tracks open cursors and automatically closes them if connection is returned to pool with cursors still opened. Can be affected by options which cause it to raise an error instead, or to do nothing. fixes issues with MySQL, others .. change:: :tags: connections/pooling/execution :tickets: fixed bug where Connection wouldnt lose its Transaction after commit/rollback .. change:: :tags: connections/pooling/execution :tickets: added scalar() method to ComposedSQLEngine, ResultProxy .. change:: :tags: connections/pooling/execution :tickets: ResultProxy will close() the underlying cursor when the ResultProxy itself is closed. this will auto-close cursors for ResultProxy objects that have had all their rows fetched (or had scalar() called). .. change:: :tags: connections/pooling/execution :tickets: ResultProxy.fetchall() internally uses DBAPI fetchall() for better efficiency, added to mapper iteration as well (courtesy Michael Twomey) .. change:: :tags: construction, sql :tickets: 292 changed "for_update" parameter to accept False/True/"nowait" and "read", the latter two of which are interpreted only by Oracle and Mysql .. change:: :tags: construction, sql :tickets: added extract() function to sql dialect (SELECT extract(field FROM expr)) .. change:: :tags: construction, sql :tickets: BooleanExpression includes new "negate" argument to specify the appropriate negation operator if one is available. .. change:: :tags: construction, sql :tickets: calling a negation on an "IN" or "IS" clause will result in "NOT IN", "IS NOT" (as opposed to NOT (x IN y)). .. change:: :tags: construction, sql :tickets: 172 Function objects know what to do in a FROM clause now. their behavior should be the same, except now you can also do things like select(['*'], from_obj=[func.my_function()]) to get multiple columns from the result, or even use sql.column() constructs to name the return columns .. change:: :tags: orm :tickets: attribute tracking modified to be more intelligent about detecting changes, particularly with mutable types. TypeEngine objects now take a greater role in defining how to compare two scalar instances, including the addition of a MutableType mixin which is implemented by PickleType. unit-of-work now tracks the "dirty" list as an expression of all persistent objects where the attribute manager detects changes. The basic issue thats fixed is detecting changes on PickleType objects, but also generalizes type handling and "modified" object checking to be more complete and extensible. .. change:: :tags: orm :tickets: a wide refactoring to "attribute loader" and "options" architectures. ColumnProperty and PropertyLoader define their loading behaivor via switchable "strategies", and MapperOptions no longer use mapper/property copying in order to function; they are instead propagated via QueryContext and SelectionContext objects at query/instances time. All of the internal copying of mappers and properties that was used to handle inheritance as well as options() has been removed; the structure of mappers and properties is much simpler than before and is clearly laid out in the new 'interfaces' module. .. change:: :tags: orm :tickets: related to the mapper/property overhaul, internal refactoring to mapper instances() method to use a SelectionContext object to track state during the operation. SLIGHT API BREAKAGE: the append_result() and populate_instances() methods on MapperExtension have a slightly different method signature now as a result of the change; hoping that these methods are not in widespread use as of yet. .. change:: :tags: orm :tickets: instances() method moved to Query now, backwards-compatible version remains on Mapper. .. change:: :tags: orm :tickets: added contains_eager() MapperOption, used in conjunction with instances() to specify properties that should be eagerly loaded from the result set, using their plain column names by default, or translated given an custom row-translation function. .. change:: :tags: orm :tickets: more rearrangements of unit-of-work commit scheme to better allow dependencies within circular flushes to work properly...updated task traversal/logging implementation .. change:: :tags: orm :tickets: 321 polymorphic mappers (i.e. using inheritance) now produces INSERT statements in order of tables across all inherited classes .. change:: :tags: orm :tickets: added an automatic "row switch" feature to mapping, which will detect a pending instance/deleted instance pair with the same identity key and convert the INSERT/DELETE to a single UPDATE .. change:: :tags: orm :tickets: "association" mappings simplified to take advantage of automatic "row switch" feature .. change:: :tags: orm :tickets: 212 "custom list classes" is now implemented via the "collection_class" keyword argument to relation(). the old way still works but is deprecated .. change:: :tags: orm :tickets: added "viewonly" flag to relation(), allows construction of relations that have no effect on the flush() process. .. change:: :tags: orm :tickets: 292 added "lockmode" argument to base Query select/get functions, including "with_lockmode" function to get a Query copy that has a default locking mode. Will translate "read"/"update" arguments into a for_update argument on the select side. .. change:: :tags: orm :tickets: implemented "version check" logic in Query/Mapper, used when version_id_col is in effect and query.with_lockmode() is used to get() an instance thats already loaded .. change:: :tags: orm :tickets: 208 post_update behavior improved; does a better job at not updating too many rows, updates only required columns .. change:: :tags: orm :tickets: 308 adjustments to eager loading so that its "eager chain" is kept separate from the normal mapper setup, thereby preventing conflicts with lazy loader operation, fixes .. change:: :tags: orm :tickets: fix to deferred group loading .. change:: :tags: orm :tickets: 346 session.flush() wont close a connection it opened .. change:: :tags: orm :tickets: added "batch=True" flag to mapper; if False, save_obj will fully save one object at a time including calls to before_XXXX and after_XXXX .. change:: :tags: orm :tickets: added "column_prefix=None" argument to mapper; prepends the given string (typically '_') to column-based attributes automatically set up from the mapper's Table .. change:: :tags: orm :tickets: 315 specifying joins in the from_obj argument of query.select() will replace the main table of the query, if the table is somewhere within the given from_obj. this makes it possible to produce custom joins and outerjoins in queries without the main table getting added twice. .. change:: :tags: orm :tickets: eagerloading is adjusted to more thoughtfully attach its LEFT OUTER JOINs to the given query, looking for custom "FROM" clauses that may have already been set up. .. change:: :tags: orm :tickets: added join_to and outerjoin_to transformative methods to SelectResults, to build up join/outerjoin conditions based on property names. also added select_from to explicitly set from_obj parameter. .. change:: :tags: orm :tickets: removed "is_primary" flag from mapper. SQLAlchemy-0.8.4/doc/_sources/changelog/changelog_04.txt0000644000076500000240000037070212251147171023554 0ustar classicstaff00000000000000 ============== 0.4 Changelog ============== .. changelog:: :version: 0.4.8 :released: Sun Oct 12 2008 .. change:: :tags: orm :tickets: 1039 Fixed bug regarding inherit_condition passed with "A=B" versus "B=A" leading to errors .. change:: :tags: orm :tickets: Changes made to new, dirty and deleted collections in SessionExtension.before_flush() will take effect for that flush. .. change:: :tags: orm :tickets: Added label() method to InstrumentedAttribute to establish forwards compatibility with 0.5. .. change:: :tags: sql :tickets: 1074 column.in_(someselect) can now be used as a columns-clause expression without the subquery bleeding into the FROM clause .. change:: :tags: mysql :tickets: 1146 Added MSMediumInteger type. .. change:: :tags: sqlite :tickets: 968 Supplied a custom strftime() function which handles dates before 1900. .. change:: :tags: sqlite :tickets: String's (and Unicode's, UnicodeText's, etc.) convert_unicode logic disabled in the sqlite dialect, to adjust for pysqlite 2.5.0's new requirement that only Python unicode objects are accepted; http://itsystementwicklung.de/pipermail/list-pysqlite/2008-March/000018.html .. change:: :tags: oracle :tickets: 1155 has_sequence() now takes schema name into account .. change:: :tags: oracle :tickets: 1121 added BFILE to the list of reflected types .. changelog:: :version: 0.4.7p1 :released: Thu Jul 31 2008 .. change:: :tags: orm :tickets: Added "add()" and "add_all()" to scoped_session methods. Workaround for 0.4.7:: from sqlalchemy.orm.scoping import ScopedSession, instrument setattr(ScopedSession, "add", instrument("add")) setattr(ScopedSession, "add_all", instrument("add_all")) .. change:: :tags: orm :tickets: Fixed non-2.3 compatible usage of set() and generator expression within relation(). .. changelog:: :version: 0.4.7 :released: Sat Jul 26 2008 .. change:: :tags: orm :tickets: 1058 The contains() operator when used with many-to-many will alias() the secondary (association) table so that multiple contains() calls will not conflict with each other .. change:: :tags: orm :tickets: fixed bug preventing merge() from functioning in conjunction with a comparable_property() .. change:: :tags: orm :tickets: the enable_typechecks=False setting on relation() now only allows subtypes with inheriting mappers. Totally unrelated types, or subtypes not set up with mapper inheritance against the target mapper are still not allowed. .. change:: :tags: orm :tickets: 976 Added is_active flag to Sessions to detect when a transaction is in progress. This flag is always True with a "transactional" (in 0.5 a non-"autocommit") Session. .. change:: :tags: sql :tickets: Fixed bug when calling select([literal('foo')]) or select([bindparam('foo')]). .. change:: :tags: schema :tickets: 571 create_all(), drop_all(), create(), drop() all raise an error if the table name or schema name contains more characters than that dialect's configured character limit. Some DB's can handle too-long table names during usage, and SQLA can handle this as well. But various reflection/ checkfirst-during-create scenarios fail since we are looking for the name within the DB's catalog tables. .. change:: :tags: schema :tickets: 571, 820 The index name generated when you say "index=True" on a Column is truncated to the length appropriate for the dialect. Additionally, an Index with a too- long name cannot be explicitly dropped with Index.drop(), similar to. .. change:: :tags: postgres :tickets: Repaired server_side_cursors to properly detect text() clauses. .. change:: :tags: postgres :tickets: 1092 Added PGCidr type. .. change:: :tags: mysql :tickets: Added 'CALL' to the list of SQL keywords which return result rows. .. change:: :tags: oracle :tickets: Oracle get_default_schema_name() "normalizes" the name before returning, meaning it returns a lower-case name when the identifier is detected as case insensitive. .. change:: :tags: oracle :tickets: 709 creating/dropping tables takes schema name into account when searching for the existing table, so that tables in other owner namespaces with the same name do not conflict .. change:: :tags: oracle :tickets: 1062 Cursors now have "arraysize" set to 50 by default on them, the value of which is configurable using the "arraysize" argument to create_engine() with the Oracle dialect. This to account for cx_oracle's default setting of "1", which has the effect of many round trips being sent to Oracle. This actually works well in conjunction with BLOB/CLOB-bound cursors, of which there are any number available but only for the life of that row request (so BufferedColumnRow is still needed, but less so). .. change:: :tags: oracle :tickets: sqlite - add SLFloat type, which matches the SQLite REAL type affinity. Previously, only SLNumeric was provided which fulfills NUMERIC affinity, but that's not the same as REAL. .. changelog:: :version: 0.4.6 :released: Sat May 10 2008 .. change:: :tags: orm :tickets: Fix to the recent relation() refactoring which fixes exotic viewonly relations which join between local and remote table multiple times, with a common column shared between the joins. .. change:: :tags: orm :tickets: Also re-established viewonly relation() configurations that join across multiple tables. .. change:: :tags: orm :tickets: 610 Added experimental relation() flag to help with primaryjoins across functions, etc., _local_remote_pairs=[tuples]. This complements a complex primaryjoin condition allowing you to provide the individual column pairs which comprise the relation's local and remote sides. Also improved lazy load SQL generation to handle placing bind params inside of functions and other expressions. (partial progress towards) .. change:: :tags: orm :tickets: 1036 repaired single table inheritance such that you can single-table inherit from a joined-table inherting mapper without issue. .. change:: :tags: orm :tickets: 1027 Fixed "concatenate tuple" bug which could occur with Query.order_by() if clause adaption had taken place. .. change:: :tags: orm :tickets: Removed ancient assertion that mapped selectables require "alias names" - the mapper creates its own alias now if none is present. Though in this case you need to use the class, not the mapped selectable, as the source of column attributes - so a warning is still issued. .. change:: :tags: orm :tickets: fixes to the "exists" function involving inheritance (any(), has(), ~contains()); the full target join will be rendered into the EXISTS clause for relations that link to subclasses. .. change:: :tags: orm :tickets: restored usage of append_result() extension method for primary query rows, when the extension is present and only a single- entity result is being returned. .. change:: :tags: orm :tickets: Also re-established viewonly relation() configurations that join across multiple tables. .. change:: :tags: orm :tickets: removed ancient assertion that mapped selectables require "alias names" - the mapper creates its own alias now if none is present. Though in this case you need to use the class, not the mapped selectable, as the source of column attributes - so a warning is still issued. .. change:: :tags: orm :tickets: 1015 refined mapper._save_obj() which was unnecessarily calling __ne__() on scalar values during flush .. change:: :tags: orm :tickets: 1019 added a feature to eager loading whereby subqueries set as column_property() with explicit label names (which is not necessary, btw) will have the label anonymized when the instance is part of the eager join, to prevent conflicts with a subquery or column of the same name on the parent object. .. change:: :tags: orm :tickets: set-based collections \|=, -=, ^= and &= are stricter about their operands and only operate on sets, frozensets or subclasses of the collection type. Previously, they would accept any duck-typed set. .. change:: :tags: orm :tickets: added an example dynamic_dict/dynamic_dict.py, illustrating a simple way to place dictionary behavior on top of a dynamic_loader. .. change:: :tags: declarative, extension :tickets: Joined table inheritance mappers use a slightly relaxed function to create the "inherit condition" to the parent table, so that other foreign keys to not-yet-declared Table objects don't trigger an error. .. change:: :tags: declarative, extension :tickets: fixed reentrant mapper compile hang when a declared attribute is used within ForeignKey, ie. ForeignKey(MyOtherClass.someattribute) .. change:: :tags: sql :tickets: Added COLLATE support via the .collate() expression operator and collate(, ) sql function. .. change:: :tags: sql :tickets: Fixed bug with union() when applied to non-Table connected select statements .. change:: :tags: sql :tickets: 1014 improved behavior of text() expressions when used as FROM clauses, such as select().select_from(text("sometext")) .. change:: :tags: sql :tickets: 1021 Column.copy() respects the value of "autoincrement", fixes usage with Migrate .. change:: :tags: engines :tickets: Pool listeners can now be provided as a dictionary of callables or a (possibly partial) duck-type of PoolListener, your choice. .. change:: :tags: engines :tickets: added "rollback_returned" option to Pool which will disable the rollback() issued when connections are returned. This flag is only safe to use with a database which does not support transactions (i.e. MySQL/MyISAM). .. change:: :tags: ext :tickets: set-based association proxies \|=, -=, ^= and &= are stricter about their operands and only operate on sets, frozensets or other association proxies. Previously, they would accept any duck-typed set. .. change:: :tags: mssql :tickets: 1005 Added "odbc_autotranslate" parameter to engine / dburi parameters. Any given string will be passed through to the ODBC connection string as: "AutoTranslate=%s" % odbc_autotranslate .. change:: :tags: mssql :tickets: Added "odbc_options" parameter to engine / dburi parameters. The given string is simply appended to the SQLAlchemy-generated odbc connection string. This should obviate the need of adding a myriad of ODBC options in the future. .. change:: :tags: firebird :tickets: Handle the "SUBSTRING(:string FROM :start FOR :length)" builtin. .. changelog:: :version: 0.4.5 :released: Fri Apr 04 2008 .. change:: :tags: orm :tickets: A small change in behavior to session.merge() - existing objects are checked for based on primary key attributes, not necessarily _instance_key. So the widely requested capability, that: x = MyObject(id=1) x = sess.merge(x) will in fact load MyObject with id #1 from the database if present, is now available. merge() still copies the state of the given object to the persistent one, so an example like the above would typically have copied "None" from all attributes of "x" onto the persistent copy. These can be reverted using session.expire(x). .. change:: :tags: orm :tickets: Also fixed behavior in merge() whereby collection elements present on the destination but not the merged collection were not being removed from the destination. .. change:: :tags: orm :tickets: 995 Added a more aggressive check for "uncompiled mappers", helps particularly with declarative layer .. change:: :tags: orm :tickets: The methodology behind "primaryjoin"/"secondaryjoin" has been refactored. Behavior should be slightly more intelligent, primarily in terms of error messages which have been pared down to be more readable. In a slight number of scenarios it can better resolve the correct foreign key than before. .. change:: :tags: orm :tickets: Added comparable_property(), adds query Comparator behavior to regular, unmanaged Python properties .. change:: :tags: orm, Company.employees.of_type(Engineer), 'machines' :tickets: the functionality of query.with_polymorphic() has been added to mapper() as a configuration option. It's set via several forms: with_polymorphic='*' with_polymorphic=[mappers] with_polymorphic=('*', selectable) with_polymorphic=([mappers], selectable) This controls the default polymorphic loading strategy for inherited mappers. When a selectable is not given, outer joins are created for all joined-table inheriting mappers requested. Note that the auto-create of joins is not compatible with concrete table inheritance. The existing select_table flag on mapper() is now deprecated and is synonymous with with_polymorphic('*', select_table). Note that the underlying "guts" of select_table have been completely removed and replaced with the newer, more flexible approach. The new approach also automatically allows eager loads to work for subclasses, if they are present, for example:: sess.query(Company).options( eagerload_all( )) to load Company objects, their employees, and the 'machines' collection of employees who happen to be Engineers. A "with_polymorphic" Query option should be introduced soon as well which would allow per-Query control of with_polymorphic() on relations. .. change:: :tags: orm :tickets: added two "experimental" features to Query, "experimental" in that their specific name/behavior is not carved in stone just yet: _values() and _from_self(). We'd like feedback on these. - _values(\*columns) is given a list of column expressions, and returns a new Query that only returns those columns. When evaluated, the return value is a list of tuples just like when using add_column() or add_entity(), the only difference is that "entity zero", i.e. the mapped class, is not included in the results. This means it finally makes sense to use group_by() and having() on Query, which have been sitting around uselessly until now. A future change to this method may include that its ability to join, filter and allow other options not related to a "resultset" are removed, so the feedback we're looking for is how people want to use _values()...i.e. at the very end, or do people prefer to continue generating after it's called. - _from_self() compiles the SELECT statement for the Query (minus any eager loaders), and returns a new Query that selects from that SELECT. So basically you can query from a Query without needing to extract the SELECT statement manually. This gives meaning to operations like query[3:5]._from_self().filter(some criterion). There's not much controversial here except that you can quickly create highly nested queries that are less efficient, and we want feedback on the naming choice. .. change:: :tags: orm :tickets: query.order_by() and query.group_by() will accept multiple arguments using \*args (like select() already does). .. change:: :tags: orm :tickets: Added some convenience descriptors to Query: query.statement returns the full SELECT construct, query.whereclause returns just the WHERE part of the SELECT construct. .. change:: :tags: orm :tickets: Fixed/covered case when using a False/0 value as a polymorphic discriminator. .. change:: :tags: orm :tickets: Fixed bug which was preventing synonym() attributes from being used with inheritance .. change:: :tags: orm :tickets: 996 Fixed SQL function truncation of trailing underscores .. change:: :tags: orm :tickets: When attributes are expired on a pending instance, an error will not be raised when the "refresh" action is triggered and no result is found. .. change:: :tags: orm :tickets: Session.execute can now find binds from metadata .. change:: :tags: orm :tickets: Adjusted the definition of "self-referential" to be any two mappers with a common parent (this affects whether or not aliased=True is required when joining with Query). .. change:: :tags: orm :tickets: Made some fixes to the "from_joinpoint" argument to query.join() so that if the previous join was aliased and this one isn't, the join still happens successfully. .. change:: :tags: orm :tickets: 895 Assorted "cascade deletes" fixes: - Fixed "cascade delete" operation of dynamic relations, which had only been implemented for foreign-key nulling behavior in 0.4.2 and not actual cascading deletes - Delete cascade without delete-orphan cascade on a many-to-one will not delete orphans which were disconnected from the parent before session.delete() is called on the parent (one-to-many already had this). - Delete cascade with delete-orphan will delete orphans whether or not it remains attached to its also-deleted parent. - delete-orphan casacde is properly detected on relations that are present on superclasses when using inheritance. .. change:: :tags: orm :tickets: Fixed order_by calculation in Query to properly alias mapper-config'ed order_by when using select_from() .. change:: :tags: orm :tickets: Refactored the diffing logic that kicks in when replacing one collection with another into collections.bulk_replace, useful to anyone building multi-level collections. .. change:: :tags: orm :tickets: Cascade traversal algorithm converted from recursive to iterative to support deep object graphs. .. change:: :tags: sql :tickets: 999 schema-qualified tables now will place the schemaname ahead of the tablename in all column expressions as well as when generating column labels. This prevents cross- schema name collisions in all cases .. change:: :tags: sql :tickets: can now allow selects which correlate all FROM clauses and have no FROM themselves. These are typically used in a scalar context, i.e. SELECT x, (SELECT x WHERE y) FROM table. Requires explicit correlate() call. .. change:: :tags: sql :tickets: 'name' is no longer a required constructor argument for Column(). It (and .key) may now be deferred until the column is added to a Table. .. change:: :tags: sql :tickets: 791, 993 like(), ilike(), contains(), startswith(), endswith() take an optional keyword argument "escape=", which is set as the escape character using the syntax "x LIKE y ESCAPE ''". .. change:: :tags: sql :tickets: random() is now a generic sql function and will compile to the database's random implementation, if any. .. change:: :tags: sql :tickets: update().values() and insert().values() take keyword arguments. .. change:: :tags: sql :tickets: Fixed an issue in select() regarding its generation of FROM clauses, in rare circumstances two clauses could be produced when one was intended to cancel out the other. Some ORM queries with lots of eager loads might have seen this symptom. .. change:: :tags: sql :tickets: The case() function now also takes a dictionary as its whens parameter. It also interprets the "THEN" expressions as values by default, meaning case([(x==y, "foo")]) will interpret "foo" as a bound value, not a SQL expression. use text(expr) for literal SQL expressions in this case. For the criterion itself, these may be literal strings only if the "value" keyword is present, otherwise SA will force explicit usage of either text() or literal(). .. change:: :tags: oracle :tickets: The "owner" keyword on Table is now deprecated, and is exactly synonymous with the "schema" keyword. Tables can now be reflected with alternate "owner" attributes, explicitly stated on the Table object or not using "schema". .. change:: :tags: oracle :tickets: All of the "magic" searching for synonyms, DBLINKs etc. during table reflection are disabled by default unless you specify "oracle_resolve_synonyms=True" on the Table object. Resolving synonyms necessarily leads to some messy guessing which we'd rather leave off by default. When the flag is set, tables and related tables will be resolved against synonyms in all cases, meaning if a synonym exists for a particular table, reflection will use it when reflecting related tables. This is stickier behavior than before which is why it's off by default. .. change:: :tags: declarative, extension :tickets: The "synonym" function is now directly usable with "declarative". Pass in the decorated property using the "descriptor" keyword argument, e.g.: somekey = synonym('_somekey', descriptor=property(g, s)) .. change:: :tags: declarative, extension :tickets: The "deferred" function is usable with "declarative". Simplest usage is to declare deferred and Column together, e.g.: data = deferred(Column(Text)) .. change:: :tags: declarative, extension :tickets: Declarative also gained @synonym_for(...) and @comparable_using(...), front-ends for synonym and comparable_property. .. change:: :tags: declarative, extension :tickets: 995 Improvements to mapper compilation when using declarative; already-compiled mappers will still trigger compiles of other uncompiled mappers when used .. change:: :tags: declarative, extension :tickets: Declarative will complete setup for Columns lacking names, allows a more DRY syntax. class Foo(Base): __tablename__ = 'foos' id = Column(Integer, primary_key=True) .. change:: :tags: declarative, extension :tickets: inheritance in declarative can be disabled when sending "inherits=None" to __mapper_args__. .. change:: :tags: declarative, extension :tickets: declarative_base() takes optional kwarg "mapper", which is any callable/class/method that produces a mapper, such as declarative_base(mapper=scopedsession.mapper). This property can also be set on individual declarative classes using the "__mapper_cls__" property. .. change:: :tags: postgres :tickets: 1001 Got PG server side cursors back into shape, added fixed unit tests as part of the default test suite. Added better uniqueness to the cursor ID .. change:: :tags: oracle :tickets: The "owner" keyword on Table is now deprecated, and is exactly synonymous with the "schema" keyword. Tables can now be reflected with alternate "owner" attributes, explicitly stated on the Table object or not using "schema". .. change:: :tags: oracle :tickets: All of the "magic" searching for synonyms, DBLINKs etc. during table reflection are disabled by default unless you specify "oracle_resolve_synonyms=True" on the Table object. Resolving synonyms necessarily leads to some messy guessing which we'd rather leave off by default. When the flag is set, tables and related tables will be resolved against synonyms in all cases, meaning if a synonym exists for a particular table, reflection will use it when reflecting related tables. This is stickier behavior than before which is why it's off by default. .. change:: :tags: mssql :tickets: 979 Reflected tables will now automatically load other tables which are referenced by Foreign keys in the auto-loaded table,. .. change:: :tags: mssql :tickets: 916 Added executemany check to skip identity fetch,. .. change:: :tags: mssql :tickets: 884 Added stubs for small date type. .. change:: :tags: mssql :tickets: Added a new 'driver' keyword parameter for the pyodbc dialect. Will substitute into the ODBC connection string if given, defaults to 'SQL Server'. .. change:: :tags: mssql :tickets: Added a new 'max_identifier_length' keyword parameter for the pyodbc dialect. .. change:: :tags: mssql :tickets: Improvements to pyodbc + Unix. If you couldn't get that combination to work before, please try again. .. change:: :tags: mysql :tickets: The connection.info keys the dialect uses to cache server settings have changed and are now namespaced. .. changelog:: :version: 0.4.4 :released: Wed Mar 12 2008 .. change:: :tags: sql :tickets: 975 Can again create aliases of selects against textual FROM clauses. .. change:: :tags: sql :tickets: The value of a bindparam() can be a callable, in which case it's evaluated at statement execution time to get the value. .. change:: :tags: sql :tickets: 978 Added exception wrapping/reconnect support to result set fetching. Reconnect works for those databases that raise a catchable data error during results (i.e. doesn't work on MySQL) .. change:: :tags: sql :tickets: 936 Implemented two-phase API for "threadlocal" engine, via engine.begin_twophase(), engine.prepare() .. change:: :tags: sql :tickets: 986 Fixed bug which was preventing UNIONS from being cloneable. .. change:: :tags: sql :tickets: Added "bind" keyword argument to insert(), update(), delete() and DDL(). The .bind property is now assignable on those statements as well as on select(). .. change:: :tags: sql :tickets: Insert statements can now be compiled with extra "prefix" words between INSERT and INTO, for vendor extensions like MySQL's INSERT IGNORE INTO table. .. change:: :tags: orm :tickets: any(), has(), contains(), ~contains(), attribute level == and != now work properly with self-referential relations - the clause inside the EXISTS is aliased on the "remote" side to distinguish it from the parent table. This applies to single table self-referential as well as inheritance-based self-referential. .. change:: :tags: orm :tickets: 985 Repaired behavior of == and != operators at the relation() level when compared against NULL for one-to-one relations .. change:: :tags: orm :tickets: Fixed bug whereby session.expire() attributes were not loading on an polymorphically-mapped instance mapped by a select_table mapper. .. change:: :tags: orm :tickets: Added query.with_polymorphic() - specifies a list of classes which descend from the base class, which will be added to the FROM clause of the query. Allows subclasses to be used within filter() criterion as well as eagerly loads the attributes of those subclasses. .. change:: :tags: orm :tickets: Your cries have been heard: removing a pending item from an attribute or collection with delete-orphan expunges the item from the session; no FlushError is raised. Note that if you session.save()'ed the pending item explicitly, the attribute/collection removal still knocks it out. .. change:: :tags: orm :tickets: session.refresh() and session.expire() raise an error when called on instances which are not persistent within the session .. change:: :tags: orm :tickets: Fixed potential generative bug when the same Query was used to generate multiple Query objects using join(). .. change:: :tags: orm :tickets: Fixed bug which was introduced in 0.4.3, whereby loading an already-persistent instance mapped with joined table inheritance would trigger a useless "secondary" load from its joined table, when using the default "select" polymorphic_fetch. This was due to attributes being marked as expired during its first load and not getting unmarked from the previous "secondary" load. Attributes are now unexpired based on presence in __dict__ after any load or commit operation succeeds. .. change:: :tags: orm :tickets: Deprecated Query methods apply_sum(), apply_max(), apply_min(), apply_avg(). Better methodologies are coming.... .. change:: :tags: orm :tickets: relation() can accept a callable for its first argument, which returns the class to be related. This is in place to assist declarative packages to define relations without classes yet being in place. .. change:: :tags: orm :tickets: Added a new "higher level" operator called "of_type()": used in join() as well as with any() and has(), qualifies the subclass which will be used in filter criterion, e.g.: query.filter(Company.employees.of_type(Engineer). any(Engineer.name=='foo')) or query.join(Company.employees.of_type(Engineer)). filter(Engineer.name=='foo') .. change:: :tags: orm :tickets: Preventive code against a potential lost-reference bug in flush(). .. change:: :tags: orm :tickets: Expressions used in filter(), filter_by() and others, when they make usage of a clause generated from a relation using the identity of a child object (e.g., filter(Parent.child==)), evaluate the actual primary key value of at execution time so that the autoflush step of the Query can complete, thereby populating the PK value of in the case that was pending. .. change:: :tags: orm :tickets: setting the relation()-level order by to a column in the many-to-many "secondary" table will now work with eager loading, previously the "order by" wasn't aliased against the secondary table's alias. .. change:: :tags: orm :tickets: Synonyms riding on top of existing descriptors are now full proxies to those descriptors. .. change:: :tags: dialects :tickets: Invalid SQLite connection URLs now raise an error. .. change:: :tags: dialects :tickets: 981 postgres TIMESTAMP renders correctly .. change:: :tags: dialects :tickets: postgres PGArray is a "mutable" type by default; when used with the ORM, mutable-style equality/ copy-on-write techniques are used to test for changes. .. change:: :tags: extensions :tickets: a new super-small "declarative" extension has been added, which allows Table and mapper() configuration to take place inline underneath a class declaration. This extension differs from ActiveMapper and Elixir in that it does not redefine any SQLAlchemy semantics at all; literal Column, Table and relation() constructs are used to define the class behavior and table definition. .. changelog:: :version: 0.4.3 :released: Thu Feb 14 2008 .. change:: :tags: sql :tickets: Added "schema.DDL", an executable free-form DDL statement. DDLs can be executed in isolation or attached to Table or MetaData instances and executed automatically when those objects are created and/or dropped. .. change:: :tags: sql :tickets: Table columns and constraints can be overridden on a an existing table (such as a table that was already reflected) using the 'useexisting=True' flag, which now takes into account the arguments passed along with it. .. change:: :tags: sql :tickets: Added a callable-based DDL events interface, adds hooks before and after Tables and MetaData create and drop. .. change:: :tags: sql :tickets: Added generative where() method to delete() and update() constructs which return a new object with criterion joined to existing criterion via AND, just like select().where(). .. change:: :tags: sql :tickets: 727 Added "ilike()" operator to column operations. Compiles to ILIKE on postgres, lower(x) LIKE lower(y) on all others. .. change:: :tags: sql :tickets: 943 Added "now()" as a generic function; on SQLite, Oracle and MSSQL compiles as "CURRENT_TIMESTAMP"; "now()" on all others. .. change:: :tags: sql :tickets: 962 The startswith(), endswith(), and contains() operators now concatenate the wildcard operator with the given operand in SQL, i.e. "'%' || " in all cases, accept text('something') operands properly .. change:: :tags: sql :tickets: 962 cast() accepts text('something') and other non-literal operands properly .. change:: :tags: sql :tickets: fixed bug in result proxy where anonymously generated column labels would not be accessible using their straight string name .. change:: :tags: sql :tickets: Deferrable constraints can now be defined. .. change:: :tags: sql :tickets: 915 Added "autocommit=True" keyword argument to select() and text(), as well as generative autocommit() method on select(); for statements which modify the database through some user-defined means other than the usual INSERT/UPDATE/ DELETE etc. This flag will enable "autocommit" behavior during execution if no transaction is in progress. .. change:: :tags: sql :tickets: The '.c.' attribute on a selectable now gets an entry for every column expression in its columns clause. Previously, "unnamed" columns like functions and CASE statements weren't getting put there. Now they will, using their full string representation if no 'name' is available. .. change:: :tags: sql :tickets: a CompositeSelect, i.e. any union(), union_all(), intersect(), etc. now asserts that each selectable contains the same number of columns. This conforms to the corresponding SQL requirement. .. change:: :tags: sql :tickets: The anonymous 'label' generated for otherwise unlabeled functions and expressions now propagates outwards at compile time for expressions like select([select([func.foo()])]). .. change:: :tags: sql :tickets: Building on the above ideas, CompositeSelects now build up their ".c." collection based on the names present in the first selectable only; corresponding_column() now works fully for all embedded selectables. .. change:: :tags: sql :tickets: Oracle and others properly encode SQL used for defaults like sequences, etc., even if no unicode idents are used since identifier preparer may return a cached unicode identifier. .. change:: :tags: sql :tickets: Column and clause comparisons to datetime objects on the left hand side of the expression now work (d < table.c.col). (datetimes on the RHS have always worked, the LHS exception is a quirk of the datetime implementation.) .. change:: :tags: orm :tickets: Every Session.begin() must now be accompanied by a corresponding commit() or rollback() unless the session is closed with Session.close(). This also includes the begin() which is implicit to a session created with transactional=True. The biggest change introduced here is that when a Session created with transactional=True raises an exception during flush(), you must call Session.rollback() or Session.close() in order for that Session to continue after an exception. .. change:: :tags: orm :tickets: 961 Fixed merge() collection-doubling bug when merging transient entities with backref'ed collections. .. change:: :tags: orm :tickets: merge(dont_load=True) does not accept transient entities, this is in continuation with the fact that merge(dont_load=True) does not accept any "dirty" objects either. .. change:: :tags: orm :tickets: Added standalone "query" class attribute generated by a scoped_session. This provides MyClass.query without using Session.mapper. Use via: MyClass.query = Session.query_property() .. change:: :tags: orm :tickets: The proper error message is raised when trying to access expired instance attributes with no session present .. change:: :tags: orm :tickets: dynamic_loader() / lazy="dynamic" now accepts and uses the order_by parameter in the same way in which it works with relation(). .. change:: :tags: orm :tickets: Added expire_all() method to Session. Calls expire() for all persistent instances. This is handy in conjunction with... .. change:: :tags: orm :tickets: Instances which have been partially or fully expired will have their expired attributes populated during a regular Query operation which affects those objects, preventing a needless second SQL statement for each instance. .. change:: :tags: orm :tickets: 938 Dynamic relations, when referenced, create a strong reference to the parent object so that the query still has a parent to call against even if the parent is only created (and otherwise dereferenced) within the scope of a single expression. .. change:: :tags: orm :tickets: Added a mapper() flag "eager_defaults". When set to True, defaults that are generated during an INSERT or UPDATE operation are post-fetched immediately, instead of being deferred until later. This mimics the old 0.3 behavior. .. change:: :tags: orm :tickets: query.join() can now accept class-mapped attributes as arguments. These can be used in place or in any combination with strings. In particular this allows construction of joins to subclasses on a polymorphic relation, i.e.: query(Company).join(['employees', Engineer.name]) .. change:: :tags: orm, ('employees', people.join(engineer)), Engineer.name :tickets: query.join() can also accept tuples of attribute name/some selectable as arguments. This allows construction of joins *from* subclasses of a polymorphic relation, i.e.: query(Company).\ join( ) .. change:: :tags: orm :tickets: General improvements to the behavior of join() in conjunction with polymorphic mappers, i.e. joining from/to polymorphic mappers and properly applying aliases. .. change:: :tags: orm :tickets: 933 Fixed/improved behavior when a mapper determines the natural "primary key" of a mapped join, it will more effectively reduce columns which are equivalent via foreign key relation. This affects how many arguments need to be sent to query.get(), among other things. .. change:: :tags: orm :tickets: 946 The lazy loader can now handle a join condition where the "bound" column (i.e. the one that gets the parent id sent as a bind parameter) appears more than once in the join condition. Specifically this allows the common task of a relation() which contains a parent-correlated subquery, such as "select only the most recent child item". .. change:: :tags: orm :tickets: Fixed bug in polymorphic inheritance where an incorrect exception is raised when base polymorphic_on column does not correspond to any columns within the local selectable of an inheriting mapper more than one level deep .. change:: :tags: orm :tickets: Fixed bug in polymorphic inheritance which made it difficult to set a working "order_by" on a polymorphic mapper. .. change:: :tags: orm :tickets: Fixed a rather expensive call in Query that was slowing down polymorphic queries. .. change:: :tags: orm :tickets: 954 "Passive defaults" and other "inline" defaults can now be loaded during a flush() call if needed; in particular, this allows constructing relations() where a foreign key column references a server-side-generated, non-primary-key column. .. change:: :tags: orm :tickets: Additional Session transaction fixes/changes: - Fixed bug with session transaction management: parent transactions weren't started on the connection when adding a connection to a nested transaction. - session.transaction now always refers to the innermost active transaction, even when commit/rollback are called directly on the session transaction object. - Two-phase transactions can now be prepared. - When preparing a two-phase transaction fails on one connection, all the connections are rolled back. - session.close() didn't close all transactions when nested transactions were used. - rollback() previously erroneously set the current transaction directly to the parent of the transaction that could be rolled back to. Now it rolls back the next transaction up that can handle it, but sets the current transaction to it's parent and inactivates the transactions in between. Inactive transactions can only be rolled back or closed, any other call results in an error. - autoflush for commit() wasn't flushing for simple subtransactions. - unitofwork flush didn't close the failed transaction when the session was not in a transaction and commiting the transaction failed. .. change:: :tags: orm :tickets: 964, 940 Miscellaneous tickets: .. change:: :tags: general :tickets: Fixed a variety of hidden and some not-so-hidden compatibility issues for Python 2.3, thanks to new support for running the full test suite on 2.3. .. change:: :tags: general :tickets: Warnings are now issued as type exceptions.SAWarning. .. change:: :tags: dialects :tickets: Better support for schemas in SQLite (linked in by ATTACH DATABASE ... AS name). In some cases in the past, schema names were ommitted from generated SQL for SQLite. This is no longer the case. .. change:: :tags: dialects :tickets: table_names on SQLite now picks up temporary tables as well. .. change:: :tags: dialects :tickets: Auto-detect an unspecified MySQL ANSI_QUOTES mode during reflection operations, support for changing the mode midstream. Manual mode setting is still required if no reflection is used. .. change:: :tags: dialects :tickets: Fixed reflection of TIME columns on SQLite. .. change:: :tags: dialects :tickets: 580 Finally added PGMacAddr type to postgres .. change:: :tags: dialects :tickets: Reflect the sequence associated to a PK field (typically with a BEFORE INSERT trigger) under Firebird .. change:: :tags: dialects :tickets: 941 Oracle assembles the correct columns in the result set column mapping when generating a LIMIT/OFFSET subquery, allows columns to map properly to result sets even if long-name truncation kicks in .. change:: :tags: dialects :tickets: MSSQL now includes EXEC in the _is_select regexp, which should allow row-returning stored procedures to be used. .. change:: :tags: dialects :tickets: MSSQL now includes an experimental implementation of LIMIT/OFFSET using the ANSI SQL row_number() function, so it requires MSSQL-2005 or higher. To enable the feature, add "has_window_funcs" to the keyword arguments for connect, or add "?has_window_funcs=1" to your dburi query arguments. .. change:: :tags: ext :tickets: Changed ext.activemapper to use a non-transactional session for the objectstore. .. change:: :tags: ext :tickets: Fixed output order of "['a'] + obj.proxied" binary operation on association-proxied lists. .. changelog:: :version: 0.4.2p3 :released: Wed Jan 09 2008 .. change:: :tags: general :tickets: sub version numbering scheme changed to suite setuptools version number rules; easy_install -u should now get this version over 0.4.2. .. change:: :tags: sql :tickets: 912 Text type is properly exported now and does not raise a warning on DDL create; String types with no length only raise warnings during CREATE TABLE .. change:: :tags: sql :tickets: new UnicodeText type is added, to specify an encoded, unlengthed Text type .. change:: :tags: sql :tickets: fixed bug in union() so that select() statements which don't derive from FromClause objects can be unioned .. change:: :tags: orm :tickets: fixed bug with session.dirty when using "mutable scalars" (such as PickleTypes) .. change:: :tags: orm :tickets: added a more descriptive error message when flushing on a relation() that has non-locally-mapped columns in its primary or secondary join condition .. change:: :tags: dialects :tickets: Fixed reflection of mysql empty string column defaults. .. change:: :tags: sql :tickets: 912 changed name of TEXT to Text since its a "generic" type; TEXT name is deprecated until 0.5. The "upgrading" behavior of String to Text when no length is present is also deprecated until 0.5; will issue a warning when used for CREATE TABLE statements (String with no length for SQL expression purposes is still fine) .. change:: :tags: sql :tickets: 924 generative select.order_by(None) / group_by(None) was not managing to reset order by/group by criterion, fixed .. change:: :tags: orm :tickets: suppressing *all* errors in InstanceState.__cleanup() now. .. change:: :tags: orm :tickets: 922 fixed an attribute history bug whereby assigning a new collection to a collection-based attribute which already had pending changes would generate incorrect history .. change:: :tags: orm :tickets: 925 fixed delete-orphan cascade bug whereby setting the same object twice to a scalar attribute could log it as an orphan .. change:: :tags: orm :tickets: Fixed cascades on a += assignment to a list-based relation. .. change:: :tags: orm :tickets: 919 synonyms can now be created against props that don't exist yet, which are later added via add_property(). This commonly includes backrefs. (i.e. you can make synonyms for backrefs without worrying about the order of operations) .. change:: :tags: orm :tickets: fixed bug which could occur with polymorphic "union" mapper which falls back to "deferred" loading of inheriting tables .. change:: :tags: orm :tickets: the "columns" collection on a mapper/mapped class (i.e. 'c') is against the mapped table, not the select_table in the case of polymorphic "union" loading (this shouldn't be noticeable). .. change:: :tags: ext :tickets: '+', '*', '+=' and '\*=' support for association proxied lists. .. change:: :tags: dialects :tickets: 923 mssql - narrowed down the test for "date"/"datetime" in MSDate/ MSDateTime subclasses so that incoming "datetime" objects don't get mis-interpreted as "date" objects and vice versa. .. change:: :tags: orm :tickets: fixed fairly critical bug whereby the same instance could be listed more than once in the unitofwork.new collection; most typically reproduced when using a combination of inheriting mappers and ScopedSession.mapper, as the multiple __init__ calls per instance could save() the object with distinct _state objects .. change:: :tags: orm :tickets: added very rudimentary yielding iterator behavior to Query. Call query.yield_per() and evaluate the Query in an iterative context; every collection of N rows will be packaged up and yielded. Use this method with extreme caution since it does not attempt to reconcile eagerly loaded collections across result batch boundaries, nor will it behave nicely if the same instance occurs in more than one batch. This means that an eagerly loaded collection will get cleared out if it's referenced in more than one batch, and in all cases attributes will be overwritten on instances that occur in more than one batch. .. change:: :tags: orm :tickets: 920 Fixed in-place set mutation operators for set collections and association proxied sets. .. change:: :tags: dialects :tickets: 913 Fixed the missing call to subtype result processor for the PGArray type. .. changelog:: :version: 0.4.2 :released: Wed Jan 02 2008 .. change:: :tags: sql :tickets: 615 generic functions ! we introduce a database of known SQL functions, such as current_timestamp, coalesce, and create explicit function objects representing them. These objects have constrained argument lists, are type aware, and can compile in a dialect-specific fashion. So saying func.char_length("foo", "bar") raises an error (too many args), func.coalesce(datetime.date(2007, 10, 5), datetime.date(2005, 10, 15)) knows that its return type is a Date. We only have a few functions represented so far but will continue to add to the system .. change:: :tags: sql :tickets: auto-reconnect support improved; a Connection can now automatically reconnect after its underlying connection is invalidated, without needing to connect() again from the engine. This allows an ORM session bound to a single Connection to not need a reconnect. Open transactions on the Connection must be rolled back after an invalidation of the underlying connection else an error is raised. Also fixed bug where disconnect detect was not being called for cursor(), rollback(), or commit(). .. change:: :tags: sql :tickets: added new flag to String and create_engine(), assert_unicode=(True|False|'warn'\|None). Defaults to `False` or `None` on create_engine() and String, `'warn'` on the Unicode type. When `True`, results in all unicode conversion operations raising an exception when a non-unicode bytestring is passed as a bind parameter. 'warn' results in a warning. It is strongly advised that all unicode-aware applications make proper use of Python unicode objects (i.e. u'hello' and not 'hello') so that data round trips accurately. .. change:: :tags: sql :tickets: generation of "unique" bind parameters has been simplified to use the same "unique identifier" mechanisms as everything else. This doesn't affect user code, except any code that might have been hardcoded against the generated names. Generated bind params now have the form "_", whereas before only the second bind of the same name would have this form. .. change:: :tags: sql :tickets: select().as_scalar() will raise an exception if the select does not have exactly one expression in its columns clause. .. change:: :tags: sql :tickets: bindparam() objects themselves can be used as keys for execute(), i.e. statement.execute({bind1:'foo', bind2:'bar'}) .. change:: :tags: sql :tickets: added new methods to TypeDecorator, process_bind_param() and process_result_value(), which automatically take advantage of the processing of the underlying type. Ideal for using with Unicode or Pickletype. TypeDecorator should now be the primary way to augment the behavior of any existing type including other TypeDecorator subclasses such as PickleType. .. change:: :tags: sql :tickets: selectables (and others) will issue a warning when two columns in their exported columns collection conflict based on name. .. change:: :tags: sql :tickets: 890 tables with schemas can still be used in sqlite, firebird, schema name just gets dropped .. change:: :tags: sql :tickets: changed the various "literal" generation functions to use an anonymous bind parameter. not much changes here except their labels now look like ":param_1", ":param_2" instead of ":literal" .. change:: :tags: sql :tickets: column labels in the form "tablename.columname", i.e. with a dot, are now supported. .. change:: :tags: sql :tickets: from_obj keyword argument to select() can be a scalar or a list. .. change:: :tags: orm :tickets: 871 a major behavioral change to collection-based backrefs: they no longer trigger lazy loads ! "reverse" adds and removes are queued up and are merged with the collection when it is actually read from and loaded; but do not trigger a load beforehand. For users who have noticed this behavior, this should be much more convenient than using dynamic relations in some cases; for those who have not, you might notice your apps using a lot fewer queries than before in some situations. .. change:: :tags: orm :tickets: mutable primary key support is added. primary key columns can be changed freely, and the identity of the instance will change upon flush. In addition, update cascades of foreign key referents (primary key or not) along relations are supported, either in tandem with the database's ON UPDATE CASCADE (required for DB's like Postgres) or issued directly by the ORM in the form of UPDATE statements, by setting the flag "passive_cascades=False". .. change:: :tags: orm :tickets: 490 inheriting mappers now inherit the MapperExtensions of their parent mapper directly, so that all methods for a particular MapperExtension are called for subclasses as well. As always, any MapperExtension can return either EXT_CONTINUE to continue extension processing or EXT_STOP to stop processing. The order of mapper resolution is: . Note that if you instantiate the same extension class separately and then apply it individually for two mappers in the same inheritance chain, the extension will be applied twice to the inheriting class, and each method will be called twice. To apply a mapper extension explicitly to each inheriting class but have each method called only once per operation, use the same instance of the extension for both mappers. .. change:: :tags: orm :tickets: 907 MapperExtension.before_update() and after_update() are now called symmetrically; previously, an instance that had no modified column attributes (but had a relation() modification) could be called with before_update() but not after_update() .. change:: :tags: orm :tickets: columns which are missing from a Query's select statement now get automatically deferred during load. .. change:: :tags: orm :tickets: 908 mapped classes which extend "object" and do not provide an __init__() method will now raise TypeError if non-empty \*args or \**kwargs are present at instance construction time (and are not consumed by any extensions such as the scoped_session mapper), consistent with the behavior of normal Python classes .. change:: :tags: orm :tickets: 899 fixed Query bug when filter_by() compares a relation against None .. change:: :tags: orm :tickets: improved support for pickling of mapped entities. Per-instance lazy/deferred/expired callables are now serializable so that they serialize and deserialize with _state. .. change:: :tags: orm :tickets: 801 new synonym() behavior: an attribute will be placed on the mapped class, if one does not exist already, in all cases. if a property already exists on the class, the synonym will decorate the property with the appropriate comparison operators so that it can be used in column expressions just like any other mapped attribute (i.e. usable in filter(), etc.) the "proxy=True" flag is deprecated and no longer means anything. Additionally, the flag "map_column=True" will automatically generate a ColumnProperty corresponding to the name of the synonym, i.e.: 'somename':synonym('_somename', map_column=True) will map the column named 'somename' to the attribute '_somename'. See the example in the mapper docs. .. change:: :tags: orm :tickets: Query.select_from() now replaces all existing FROM criterion with the given argument; the previous behavior of constructing a list of FROM clauses was generally not useful as is required filter() calls to create join criterion, and new tables introduced within filter() already add themselves to the FROM clause. The new behavior allows not just joins from the main table, but select statements as well. Filter criterion, order bys, eager load clauses will be "aliased" against the given statement. .. change:: :tags: orm :tickets: this month's refactoring of attribute instrumentation changes the "copy-on-load" behavior we've had since midway through 0.3 with "copy-on-modify" in most cases. This takes a sizable chunk of latency out of load operations and overall does less work as only attributes which are actually modified get their "committed state" copied. Only "mutable scalar" attributes (i.e. a pickled object or other mutable item), the reason for the copy-on-load change in the first place, retain the old behavior. .. change:: :tags: attrname, orm :tickets: a slight behavioral change to attributes is, del'ing an attribute does *not* cause the lazyloader of that attribute to fire off again; the "del" makes the effective value of the attribute "None". To re-trigger the "loader" for an attribute, use session.expire(instance,). .. change:: :tags: orm :tickets: query.filter(SomeClass.somechild == None), when comparing a many-to-one property to None, properly generates "id IS NULL" including that the NULL is on the right side. .. change:: :tags: orm :tickets: query.order_by() takes into account aliased joins, i.e. query.join('orders', aliased=True).order_by(Order.id) .. change:: :tags: orm :tickets: eagerload(), lazyload(), eagerload_all() take an optional second class-or-mapper argument, which will select the mapper to apply the option towards. This can select among other mappers which were added using add_entity(). .. change:: :tags: orm :tickets: eagerloading will work with mappers added via add_entity(). .. change:: :tags: orm :tickets: added "cascade delete" behavior to "dynamic" relations just like that of regular relations. if passive_deletes flag (also just added) is not set, a delete of the parent item will trigger a full load of the child items so that they can be deleted or updated accordingly. .. change:: :tags: orm :tickets: also with dynamic, implemented correct count() behavior as well as other helper methods. .. change:: :tags: orm :tickets: fix to cascades on polymorphic relations, such that cascades from an object to a polymorphic collection continue cascading along the set of attributes specific to each element in the collection. .. change:: :tags: orm :tickets: 893 query.get() and query.load() do not take existing filter or other criterion into account; these methods *always* look up the given id in the database or return the current instance from the identity map, disregarding any existing filter, join, group_by or other criterion which has been configured. .. change:: :tags: orm :tickets: 883 added support for version_id_col in conjunction with inheriting mappers. version_id_col is typically set on the base mapper in an inheritance relationship where it takes effect for all inheriting mappers. .. change:: :tags: orm :tickets: relaxed rules on column_property() expressions having labels; any ColumnElement is accepted now, as the compiler auto-labels non-labeled ColumnElements now. a selectable, like a select() statement, still requires conversion to ColumnElement via as_scalar() or label(). .. change:: :tags: orm :tickets: fixed backref bug where you could not del instance.attr if attr was None .. change:: :tags: orm :tickets: several ORM attributes have been removed or made private: mapper.get_attr_by_column(), mapper.set_attr_by_column(), mapper.pks_by_table, mapper.cascade_callable(), MapperProperty.cascade_callable(), mapper.canload(), mapper.save_obj(), mapper.delete_obj(), mapper._mapper_registry, attributes.AttributeManager .. change:: :tags: orm :tickets: Assigning an incompatible collection type to a relation attribute now raises TypeError instead of sqlalchemy's ArgumentError. .. change:: :tags: orm :tickets: 886 Bulk assignment of a MappedCollection now raises an error if a key in the incoming dictionary does not match the key that the collection's keyfunc would use for that value. .. change:: :tags: orm, newval1, newval2 :tickets: Custom collections can now specify a @converter method to translate objects used in "bulk" assignment into a stream of values, as in:: obj.col = # or obj.dictcol = {'foo': newval1, 'bar': newval2} The MappedCollection uses this hook to ensure that incoming key/value pairs are sane from the collection's perspective. .. change:: :tags: orm :tickets: 872 fixed endless loop issue when using lazy="dynamic" on both sides of a bi-directional relationship .. change:: :tags: orm :tickets: 904 more fixes to the LIMIT/OFFSET aliasing applied with Query + eagerloads, in this case when mapped against a select statement .. change:: :tags: orm :tickets: fix to self-referential eager loading such that if the same mapped instance appears in two or more distinct sets of columns in the same result set, its eagerly loaded collection will be populated regardless of whether or not all of the rows contain a set of "eager" columns for that collection. this would also show up as a KeyError when fetching results with join_depth turned on. .. change:: :tags: orm :tickets: fixed bug where Query would not apply a subquery to the SQL when LIMIT was used in conjunction with an inheriting mapper where the eager loader was only in the parent mapper. .. change:: :tags: orm :tickets: clarified the error message which occurs when you try to update() an instance with the same identity key as an instance already present in the session. .. change:: :tags: orm :tickets: some clarifications and fixes to merge(instance, dont_load=True). fixed bug where lazy loaders were getting disabled on returned instances. Also, we currently do not support merging an instance which has uncommitted changes on it, in the case that dont_load=True is used....this will now raise an error. This is due to complexities in merging the "committed state" of the given instance to correctly correspond to the newly copied instance, as well as other modified state. Since the use case for dont_load=True is caching, the given instances shouldn't have any uncommitted changes on them anyway. We also copy the instances over without using any events now, so that the 'dirty' list on the new session remains unaffected. .. change:: :tags: orm :tickets: fixed bug which could arise when using session.begin_nested() in conjunction with more than one level deep of enclosing session.begin() statements .. change:: :tags: orm :tickets: 914 fixed session.refresh() with instance that has custom entity_name .. change:: :tags: dialects :tickets: sqlite SLDate type will not erroneously render "microseconds" portion of a datetime or time object. .. change:: :tags: dialects :tickets: 902 oracle - added disconnect detection support for Oracle - some cleanup to binary/raw types so that cx_oracle.LOB is detected on an ad-hoc basis .. change:: :tags: dialects :tickets: 824, 839, 842, 901 MSSQL - PyODBC no longer has a global "set nocount on". - Fix non-identity integer PKs on autload - Better support for convert_unicode - Less strict date conversion for pyodbc/adodbapi - Schema-qualified tables / autoload .. change:: :tags: firebird, backend :tickets: 410 does properly reflect domains (partially fixing) and PassiveDefaults .. change:: :tags: 3562, firebird, backend :tickets: reverted to use default poolclass (was set to SingletonThreadPool in 0.4.0 for test purposes) .. change:: :tags: firebird, backend :tickets: map func.length() to 'char_length' (easily overridable with the UDF 'strlen' on old versions of Firebird) .. changelog:: :version: 0.4.1 :released: Sun Nov 18 2007 .. change:: :tags: sql :tickets: the "shortname" keyword parameter on bindparam() has been deprecated. .. change:: :tags: sql :tickets: Added contains operator (generates a "LIKE %%" clause). .. change:: :tags: sql :tickets: anonymous column expressions are automatically labeled. e.g. select([x* 5]) produces "SELECT x * 5 AS anon_1". This allows the labelname to be present in the cursor.description which can then be appropriately matched to result-column processing rules. (we can't reliably use positional tracking for result-column matches since text() expressions may represent multiple columns). .. change:: :tags: sql :tickets: operator overloading is now controlled by TypeEngine objects - the one built-in operator overload so far is String types overloading '+' to be the string concatenation operator. User-defined types can also define their own operator overloading by overriding the adapt_operator(self, op) method. .. change:: :tags: sql :tickets: 819 untyped bind parameters on the right side of a binary expression will be assigned the type of the left side of the operation, to better enable the appropriate bind parameter processing to take effect .. change:: :tags: sql :tickets: 833 Removed regular expression step from most statement compilations. Also fixes .. change:: :tags: sql :tickets: Fixed empty (zero column) sqlite inserts, allowing inserts on autoincrementing single column tables. .. change:: :tags: sql :tickets: Fixed expression translation of text() clauses; this repairs various ORM scenarios where literal text is used for SQL expressions .. change:: :tags: sql :tickets: Removed ClauseParameters object; compiled.params returns a regular dictionary now, as well as result.last_inserted_params() / last_updated_params(). .. change:: :tags: sql :tickets: Fixed INSERT statements w.r.t. primary key columns that have SQL-expression based default generators on them; SQL expression executes inline as normal but will not trigger a "postfetch" condition for the column, for those DB's who provide it via cursor.lastrowid .. change:: :tags: sql :tickets: 844 func. objects can be pickled/unpickled .. change:: :tags: sql :tickets: rewrote and simplified the system used to "target" columns across selectable expressions. On the SQL side this is represented by the "corresponding_column()" method. This method is used heavily by the ORM to "adapt" elements of an expression to similar, aliased expressions, as well as to target result set columns originally bound to a table or selectable to an aliased, "corresponding" expression. The new rewrite features completely consistent and accurate behavior. .. change:: :tags: sql :tickets: 573 Added a field ("info") for storing arbitrary data on schema items .. change:: :tags: sql :tickets: The "properties" collection on Connections has been renamed "info" to match schema's writable collections. Access is still available via the "properties" name until 0.5. .. change:: :tags: sql :tickets: fixed the close() method on Transaction when using strategy='threadlocal' .. change:: :tags: sql :tickets: 853 fix to compiled bind parameters to not mistakenly populate None .. change:: :tags: sql :tickets: ._execute_clauseelement becomes a public method Connectable.execute_clauseelement .. change:: :tags: orm :tickets: 843 eager loading with LIMIT/OFFSET applied no longer adds the primary table joined to a limited subquery of itself; the eager loads now join directly to the subquery which also provides the primary table's columns to the result set. This eliminates a JOIN from all eager loads with LIMIT/OFFSET. .. change:: :tags: orm :tickets: 802 session.refresh() and session.expire() now support an additional argument "attribute_names", a list of individual attribute keynames to be refreshed or expired, allowing partial reloads of attributes on an already-loaded instance. .. change:: :tags: orm :tickets: 767 added op() operator to instrumented attributes; i.e. User.name.op('ilike')('%somename%') .. change:: :tags: orm :tickets: 676 Mapped classes may now define __eq__, __hash__, and __nonzero__ methods with arbitrary semantics. The orm now handles all mapped instances on an identity-only basis. (e.g. 'is' vs '==') .. change:: :tags: orm :tickets: the "properties" accessor on Mapper is removed; it now throws an informative exception explaining the usage of mapper.get_property() and mapper.iterate_properties .. change:: :tags: orm :tickets: added having() method to Query, applies HAVING to the generated statement in the same way as filter() appends to the WHERE clause. .. change:: :tags: orm :tickets: 777 The behavior of query.options() is now fully based on paths, i.e. an option such as eagerload_all('x.y.z.y.x') will apply eagerloading to only those paths, i.e. and not 'x.y.x'; eagerload('children.children') applies only to exactly two-levels deep, etc. .. change:: :tags: orm :tickets: PickleType will compare using `==` when set up with mutable=False, and not the `is` operator. To use `is` or any other comparator, send in a custom comparison function using PickleType(comparator=my_custom_comparator). .. change:: :tags: orm :tickets: 848 query doesn't throw an error if you use distinct() and an order_by() containing UnaryExpressions (or other) together .. change:: :tags: orm :tickets: 786 order_by() expressions from joined tables are properly added to columns clause when using distinct() .. change:: :tags: orm :tickets: 858 fixed error where Query.add_column() would not accept a class-bound attribute as an argument; Query also raises an error if an invalid argument was sent to add_column() (at instances() time) .. change:: :tags: orm :tickets: added a little more checking for garbage-collection dereferences in InstanceState.__cleanup() to reduce "gc ignored" errors on app shutdown .. change:: :tags: orm :tickets: The session API has been solidified: .. change:: :tags: orm :tickets: 840 It's an error to session.save() an object which is already persistent .. change:: :tags: orm :tickets: It's an error to session.delete() an object which is *not* persistent. .. change:: :tags: orm :tickets: session.update() and session.delete() raise an error when updating or deleting an instance that is already in the session with a different identity. .. change:: :tags: orm :tickets: The session checks more carefully when determining "object X already in another session"; e.g. if you pickle a series of objects and unpickle (i.e. as in a Pylons HTTP session or similar), they can go into a new session without any conflict .. change:: :tags: orm :tickets: merge() includes a keyword argument "dont_load=True". setting this flag will cause the merge operation to not load any data from the database in response to incoming detached objects, and will accept the incoming detached object as though it were already present in that session. Use this to merge detached objects from external caching systems into the session. .. change:: :tags: orm :tickets: Deferred column attributes no longer trigger a load operation when the attribute is assigned to. In those cases, the newly assigned value will be present in the flushes' UPDATE statement unconditionally. .. change:: :tags: orm :tickets: 834 Fixed a truncation error when re-assigning a subset of a collection (obj.relation = obj.relation[1:]) .. change:: :tags: orm :tickets: 832 De-cruftified backref configuration code, backrefs which step on existing properties now raise an error .. change:: :tags: orm :tickets: 831 Improved behavior of add_property() etc., fixed involving synonym/deferred. .. change:: :tags: orm :tickets: Fixed clear_mappers() behavior to better clean up after itself. .. change:: :tags: orm :tickets: 841 Fix to "row switch" behavior, i.e. when an INSERT/DELETE is combined into a single UPDATE; many-to-many relations on the parent object update properly. .. change:: :tags: orm :tickets: Fixed __hash__ for association proxy- these collections are unhashable, just like their mutable Python counterparts. .. change:: :tags: orm :tickets: Added proxying of save_or_update, __contains__ and __iter__ methods for scoped sessions. .. change:: :tags: orm :tickets: 852 fixed very hard-to-reproduce issue where by the FROM clause of Query could get polluted by certain generative calls .. change:: :tags: dialects :tickets: Added experimental support for MaxDB (versions >= 7.6.03.007 only). .. change:: :tags: dialects :tickets: oracle will now reflect "DATE" as an OracleDateTime column, not OracleDate .. change:: :tags: dialects :tickets: 847 added awareness of schema name in oracle table_names() function, fixes metadata.reflect(schema='someschema') .. change:: :tags: dialects :tickets: MSSQL anonymous labels for selection of functions made deterministic .. change:: :tags: dialects :tickets: sqlite will reflect "DECIMAL" as a numeric column. .. change:: :tags: dialects :tickets: 828 Made access dao detection more reliable .. change:: :tags: dialects :tickets: Renamed the Dialect attribute 'preexecute_sequences' to 'preexecute_pk_sequences'. An attribute porxy is in place for out-of-tree dialects using the old name. .. change:: :tags: dialects :tickets: Added test coverage for unknown type reflection. Fixed sqlite/mysql handling of type reflection for unknown types. .. change:: :tags: dialects :tickets: Added REAL for mysql dialect (for folks exploiting the REAL_AS_FLOAT sql mode). .. change:: :tags: dialects :tickets: mysql Float, MSFloat and MSDouble constructed without arguments now produce no-argument DDL, e.g.'FLOAT'. .. change:: :tags: misc :tickets: Removed unused util.hash(). .. changelog:: :version: 0.4.0 :released: Wed Oct 17 2007 .. change:: :tags: :tickets: (see 0.4.0beta1 for the start of major changes against 0.3, as well as http://www.sqlalchemy.org/trac/wiki/WhatsNewIn04 ) .. change:: :tags: :tickets: 785 Added initial Sybase support (mxODBC so far) .. change:: :tags: :tickets: Added partial index support for PostgreSQL. Use the postgres_where keyword on the Index. .. change:: :tags: :tickets: 817 string-based query param parsing/config file parser understands wider range of string values for booleans .. change:: :tags: :tickets: 813 backref remove object operation doesn't fail if the other-side collection doesn't contain the item, supports noload collections .. change:: :tags: :tickets: 818 removed __len__ from "dynamic" collection as it would require issuing a SQL "count()" operation, thus forcing all list evaluations to issue redundant SQL .. change:: :tags: :tickets: 816 inline optimizations added to locate_dirty() which can greatly speed up repeated calls to flush(), as occurs with autoflush=True .. change:: :tags: :tickets: The IdentifierPreprarer's _requires_quotes test is now regex based. Any out-of-tree dialects that provide custom sets of legal_characters or illegal_initial_characters will need to move to regexes or override _requires_quotes. .. change:: :tags: :tickets: Firebird has supports_sane_rowcount and supports_sane_multi_rowcount set to False due to ticket #370 (right way). .. change:: :tags: :tickets: Improvements and fixes on Firebird reflection: * FBDialect now mimics OracleDialect, regarding case-sensitivity of TABLE and COLUMN names (see 'case_sensitive remotion' topic on this current file). * FBDialect.table_names() doesn't bring system tables (ticket:796). * FB now reflects Column's nullable property correctly. .. change:: :tags: :tickets: Fixed SQL compiler's awareness of top-level column labels as used in result-set processing; nested selects which contain the same column names don't affect the result or conflict with result-column metadata. .. change:: :tags: :tickets: query.get() and related functions (like many-to-one lazyloading) use compile-time-aliased bind parameter names, to prevent name conflicts with bind parameters that already exist in the mapped selectable. .. change:: :tags: :tickets: 795 Fixed three- and multi-level select and deferred inheritance loading (i.e. abc inheritance with no select_table). .. change:: :tags: :tickets: Ident passed to id_chooser in shard.py always a list. .. change:: :tags: :tickets: The no-arg ResultProxy._row_processor() is now the class attribute `_process_row`. .. change:: :tags: :tickets: 797 Added support for returning values from inserts and updates for PostgreSQL 8.2+. .. change:: :tags: :tickets: PG reflection, upon seeing the default schema name being used explicitly as the "schema" argument in a Table, will assume that this is the user's desired convention, and will explicitly set the "schema" argument in foreign-key-related reflected tables, thus making them match only with Table constructors that also use the explicit "schema" argument (even though its the default schema). In other words, SA assumes the user is being consistent in this usage. .. change:: :tags: :tickets: 808 fixed sqlite reflection of BOOL/BOOLEAN .. change:: :tags: :tickets: Added support for UPDATE with LIMIT on mysql. .. change:: :tags: :tickets: 803 null foreign key on a m2o doesn't trigger a lazyload .. change:: :tags: :tickets: 800 oracle does not implicitly convert to unicode for non-typed result sets (i.e. when no TypeEngine/String/Unicode type is even being used; previously it was detecting DBAPI types and converting regardless). should fix .. change:: :tags: :tickets: 806 fix to anonymous label generation of long table/column names .. change:: :tags: :tickets: Firebird dialect now uses SingletonThreadPool as poolclass. .. change:: :tags: :tickets: Firebird now uses dialect.preparer to format sequences names .. change:: :tags: :tickets: 810 Fixed breakage with postgres and multiple two-phase transactions. Two-phase commits and rollbacks didn't automatically end up with a new transaction as the usual dbapi commits/rollbacks do. .. change:: :tags: :tickets: Added an option to the _ScopedExt mapper extension to not automatically save new objects to session on object initialization. .. change:: :tags: :tickets: fixed Oracle non-ansi join syntax .. change:: :tags: :tickets: PickleType and Interval types (on db not supporting it natively) are now slightly faster. .. change:: :tags: :tickets: Added Float and Time types to Firebird (FBFloat and FBTime). Fixed BLOB SUB_TYPE for TEXT and Binary types. .. change:: :tags: :tickets: Changed the API for the in\_ operator. in_() now accepts a single argument that is a sequence of values or a selectable. The old API of passing in values as varargs still works but is deprecated. .. changelog:: :version: 0.4.0beta6 :released: Thu Sep 27 2007 .. change:: :tags: :tickets: The Session identity map is now *weak referencing* by default, use weak_identity_map=False to use a regular dict. The weak dict we are using is customized to detect instances which are "dirty" and maintain a temporary strong reference to those instances until changes are flushed. .. change:: :tags: :tickets: 758 Mapper compilation has been reorganized such that most compilation occurs upon mapper construction. This allows us to have fewer calls to mapper.compile() and also to allow class-based properties to force a compilation (i.e. User.addresses == 7 will compile all mappers; this is). The only caveat here is that an inheriting mapper now looks for its inherited mapper upon construction; so mappers within inheritance relationships need to be constructed in inheritance order (which should be the normal case anyway). .. change:: :tags: :tickets: added "FETCH" to the keywords detected by Postgres to indicate a result-row holding statement (i.e. in addition to "SELECT"). .. change:: :tags: :tickets: Added full list of SQLite reserved keywords so that they get escaped properly. .. change:: :tags: :tickets: Tightened up the relationship between the Query's generation of "eager load" aliases, and Query.instances() which actually grabs the eagerly loaded rows. If the aliases were not specifically generated for that statement by EagerLoader, the EagerLoader will not take effect when the rows are fetched. This prevents columns from being grabbed accidentally as being part of an eager load when they were not meant for such, which can happen with textual SQL as well as some inheritance situations. It's particularly important since the "anonymous aliasing" of columns uses simple integer counts now to generate labels. .. change:: :tags: :tickets: Removed "parameters" argument from clauseelement.compile(), replaced with "column_keys". The parameters sent to execute() only interact with the insert/update statement compilation process in terms of the column names present but not the values for those columns. Produces more consistent execute/executemany behavior, simplifies things a bit internally. .. change:: :tags: :tickets: 560 Added 'comparator' keyword argument to PickleType. By default, "mutable" PickleType does a "deep compare" of objects using their dumps() representation. But this doesn't work for dictionaries. Pickled objects which provide an adequate __eq__() implementation can be set up with "PickleType(comparator=operator.eq)" .. change:: :tags: :tickets: Added session.is_modified(obj) method; performs the same "history" comparison operation as occurs within a flush operation; setting include_collections=False gives the same result as is used when the flush determines whether or not to issue an UPDATE for the instance's row. .. change:: :tags: :tickets: 584, 761 Added "schema" argument to Sequence; use this with Postgres /Oracle when the sequence is located in an alternate schema. Implements part of, should fix. .. change:: :tags: :tickets: Fixed reflection of the empty string for mysql enums. .. change:: :tags: :tickets: 794 Changed MySQL dialect to use the older LIMIT , syntax instead of LIMIT OFFSET for folks using 3.23. .. change:: :tags: :tickets: Added 'passive_deletes="all"' flag to relation(), disables all nulling-out of foreign key attributes during a flush where the parent object is deleted. .. change:: :tags: :tickets: Column defaults and onupdates, executing inline, will add parenthesis for subqueries and other parenthesis-requiring expressions .. change:: :tags: :tickets: 793 The behavior of String/Unicode types regarding that they auto-convert to TEXT/CLOB when no length is present now occurs *only* for an exact type of String or Unicode with no arguments. If you use VARCHAR or NCHAR (subclasses of String/Unicode) with no length, they will be interpreted by the dialect as VARCHAR/NCHAR; no "magic" conversion happens there. This is less surprising behavior and in particular this helps Oracle keep string-based bind parameters as VARCHARs and not CLOBs. .. change:: :tags: :tickets: 771 Fixes to ShardedSession to work with deferred columns. .. change:: :tags: :tickets: User-defined shard_chooser() function must accept "clause=None" argument; this is the ClauseElement passed to session.execute(statement) and can be used to determine correct shard id (since execute() doesn't take an instance.) .. change:: :tags: :tickets: 764 Adjusted operator precedence of NOT to match '==' and others, so that ~(x y) produces NOT (x y), which is better compatible with older MySQL versions.. This doesn't apply to "~(x==y)" as it does in 0.3 since ~(x==y) compiles to "x != y", but still applies to operators like BETWEEN. .. change:: :tags: :tickets: 757, 768, 779, 728 Other tickets:,,. .. changelog:: :version: 0.4.0beta5 :released: .. change:: :tags: :tickets: 754 Connection pool fixes; the better performance of beta4 remains but fixes "connection overflow" and other bugs which were present (like). .. change:: :tags: :tickets: 769 Fixed bugs in determining proper sync clauses from custom inherit conditions. .. change:: :tags: :tickets: 763 Extended 'engine_from_config' coercion for QueuePool size / overflow. .. change:: :tags: :tickets: 748 mysql views can be reflected again. .. change:: :tags: :tickets: AssociationProxy can now take custom getters and setters. .. change:: :tags: :tickets: Fixed malfunctioning BETWEEN in orm queries. .. change:: :tags: :tickets: 762 Fixed OrderedProperties pickling .. change:: :tags: :tickets: SQL-expression defaults and sequences now execute "inline" for all non-primary key columns during an INSERT or UPDATE, and for all columns during an executemany()-style call. inline=True flag on any insert/update statement also forces the same behavior with a single execute(). result.postfetch_cols() is a collection of columns for which the previous single insert or update statement contained a SQL-side default expression. .. change:: :tags: :tickets: 759 Fixed PG executemany() behavior. .. change:: :tags: :tickets: postgres reflects tables with autoincrement=False for primary key columns which have no defaults. .. change:: :tags: :tickets: postgres no longer wraps executemany() with individual execute() calls, instead favoring performance. "rowcount"/"concurrency" checks with deleted items (which use executemany) are disabled with PG since psycopg2 does not report proper rowcount for executemany(). .. change:: :tags: tickets, fixed :tickets: 742 .. change:: :tags: tickets, fixed :tickets: 748 .. change:: :tags: tickets, fixed :tickets: 760 .. change:: :tags: tickets, fixed :tickets: 762 .. change:: :tags: tickets, fixed :tickets: 763 .. changelog:: :version: 0.4.0beta4 :released: Wed Aug 22 2007 .. change:: :tags: :tickets: Tidied up what ends up in your namespace when you 'from sqlalchemy import \*': .. change:: :tags: :tickets: 'table' and 'column' are no longer imported. They remain available by direct reference (as in 'sql.table' and 'sql.column') or a glob import from the sql package. It was too easy to accidentally use a sql.expressions.table instead of schema.Table when just starting out with SQLAlchemy, likewise column. .. change:: :tags: :tickets: Internal-ish classes like ClauseElement, FromClause, NullTypeEngine, etc., are also no longer imported into your namespace .. change:: :tags: :tickets: The 'Smallinteger' compatiblity name (small i!) is no longer imported, but remains in schema.py for now. SmallInteger (big I!) is still imported. .. change:: :tags: :tickets: The connection pool uses a "threadlocal" strategy internally to return the same connection already bound to a thread, for "contextual" connections; these are the connections used when you do a "connectionless" execution like insert().execute(). This is like a "partial" version of the "threadlocal" engine strategy but without the thread-local transaction part of it. We're hoping it reduces connection pool overhead as well as database usage. However, if it proves to impact stability in a negative way, we'll roll it right back. .. change:: :tags: :tickets: Fix to bind param processing such that "False" values (like blank strings) still get processed/encoded. .. change:: :tags: :tickets: 752 Fix to select() "generative" behavior, such that calling column(), select_from(), correlate(), and with_prefix() does not modify the original select object .. change:: :tags: :tickets: Added a "legacy" adapter to types, such that user-defined TypeEngine and TypeDecorator classes which define convert_bind_param() and/or convert_result_value() will continue to function. Also supports calling the super() version of those methods. .. change:: :tags: :tickets: Added session.prune(), trims away instances cached in a session that are no longer referenced elsewhere. (A utility for strong-ref identity maps). .. change:: :tags: :tickets: Added close() method to Transaction. Closes out a transaction using rollback if it's the outermost transaction, otherwise just ends without affecting the outer transaction. .. change:: :tags: :tickets: Transactional and non-transactional Session integrates better with bound connection; a close() will ensure that connection transactional state is the same as that which existed on it before being bound to the Session. .. change:: :tags: :tickets: 735 Modified SQL operator functions to be module-level operators, allowing SQL expressions to be pickleable. .. change:: :tags: :tickets: Small adjustment to mapper class.__init__ to allow for Py2.6 object.__init__() behavior. .. change:: :tags: :tickets: Fixed 'prefix' argument for select() .. change:: :tags: :tickets: Connection.begin() no longer accepts nested=True, this logic is now all in begin_nested(). .. change:: :tags: :tickets: Fixes to new "dynamic" relation loader involving cascades .. change:: :tags: tickets, fixed :tickets: 735 .. change:: :tags: tickets, fixed :tickets: 752 .. changelog:: :version: 0.4.0beta3 :released: Thu Aug 16 2007 .. change:: :tags: :tickets: SQL types optimization: .. change:: :tags: :tickets: New performance tests show a combined mass-insert/mass-select test as having 68% fewer function calls than the same test run against 0.3. .. change:: :tags: :tickets: General performance improvement of result set iteration is around 10-20%. .. change:: :tags: :tickets: In types.AbstractType, convert_bind_param() and convert_result_value() have migrated to callable-returning bind_processor() and result_processor() methods. If no callable is returned, no pre/post processing function is called. .. change:: :tags: :tickets: Hooks added throughout base/sql/defaults to optimize the calling of bind aram/result processors so that method call overhead is minimized. .. change:: :tags: :tickets: Support added for executemany() scenarios such that unneeded "last row id" logic doesn't kick in, parameters aren't excessively traversed. .. change:: :tags: :tickets: Added 'inherit_foreign_keys' arg to mapper(). .. change:: :tags: :tickets: Added support for string date passthrough in sqlite. .. change:: :tags: tickets, fixed :tickets: 738 .. change:: :tags: tickets, fixed :tickets: 739 .. change:: :tags: tickets, fixed :tickets: 743 .. change:: :tags: tickets, fixed :tickets: 744 .. changelog:: :version: 0.4.0beta2 :released: Tue Aug 14 2007 .. change:: :tags: oracle, improvements. :tickets: Auto-commit after LOAD DATA INFILE for mysql. .. change:: :tags: oracle, improvements. :tickets: A rudimental SessionExtension class has been added, allowing user-defined functionality to take place at flush(), commit(), and rollback() boundaries. .. change:: :tags: oracle, improvements. :tickets: Added engine_from_config() function for helping to create_engine() from an .ini style config. .. change:: :tags: oracle, improvements. :tickets: base_mapper() becomes a plain attribute. .. change:: :tags: oracle, improvements. :tickets: session.execute() and scalar() can search for a Table with which to bind from using the given ClauseElement. .. change:: :tags: oracle, improvements. :tickets: Session automatically extrapolates tables from mappers with binds, also uses base_mapper so that inheritance hierarchies bind automatically. .. change:: :tags: oracle, improvements. :tickets: Moved ClauseVisitor traversal back to inlined non-recursive. .. change:: :tags: tickets, fixed :tickets: 730 .. change:: :tags: tickets, fixed :tickets: 732 .. change:: :tags: tickets, fixed :tickets: 733 .. change:: :tags: tickets, fixed :tickets: 734 .. changelog:: :version: 0.4.0beta1 :released: Sun Aug 12 2007 .. change:: :tags: orm :tickets: Speed! Along with recent speedups to ResultProxy, total number of function calls significantly reduced for large loads. .. change:: :tags: orm :tickets: test/perf/masseagerload.py reports 0.4 as having the fewest number of function calls across all SA versions (0.1, 0.2, and 0.3). .. change:: :tags: orm :tickets: 213 New collection_class api and implementation. Collections are now instrumented via decorations rather than proxying. You can now have collections that manage their own membership, and your class instance will be directly exposed on the relation property. The changes are transparent for most users. .. change:: :tags: orm :tickets: InstrumentedList (as it was) is removed, and relation properties no longer have 'clear()', '.data', or any other added methods beyond those provided by the collection type. You are free, of course, to add them to a custom class. .. change:: :tags: orm :tickets: __setitem__-like assignments now fire remove events for the existing value, if any. .. change:: :tags: orm :tickets: dict-likes used as collection classes no longer need to change __iter__ semantics- itervalues() is used by default instead. This is a backwards incompatible change. .. change:: :tags: orm :tickets: Subclassing dict for a mapped collection is no longer needed in most cases. orm.collections provides canned implementations that key objects by a specified column or a custom function of your choice. .. change:: :tags: orm :tickets: Collection assignment now requires a compatible type- assigning None to clear a collection or assigning a list to a dict collection will now raise an argument error. .. change:: :tags: orm :tickets: AttributeExtension moved to interfaces, and .delete is now .remove The event method signature has also been swapped around. .. change:: :tags: orm :tickets: Major overhaul for Query: .. change:: :tags: orm :tickets: All selectXXX methods are deprecated. Generative methods are now the standard way to do things, i.e. filter(), filter_by(), all(), one(), etc. Deprecated methods are docstring'ed with their new replacements. .. change:: :tags: orm :tickets: 643 Class-level properties are now usable as query elements... no more '.c.'! "Class.c.propname" is now superceded by "Class.propname". All clause operators are supported, as well as higher level operators such as Class.prop== for scalar attributes, Class.prop.contains() and Class.prop.any() for collection-based attributes (all are also negatable). Table-based column expressions as well as columns mounted on mapped classes via 'c' are of course still fully available and can be freely mixed with the new attributes. .. change:: :tags: orm :tickets: Removed ancient query.select_by_attributename() capability. .. change:: :tags: orm :tickets: The aliasing logic used by eager loading has been generalized, so that it also adds full automatic aliasing support to Query. It's no longer necessary to create an explicit Alias to join to the same tables multiple times; *even for self-referential relationships*. - join() and outerjoin() take arguments "aliased=True". Yhis causes their joins to be built on aliased tables; subsequent calls to filter() and filter_by() will translate all table expressions (yes, real expressions using the original mapped Table) to be that of the Alias for the duration of that join() (i.e. until reset_joinpoint() or another join() is called). - join() and outerjoin() take arguments "id=". When used with "aliased=True", the id can be referenced by add_entity(cls, id=) so that you can select the joined instances even if they're from an alias. - join() and outerjoin() now work with self-referential relationships! Using "aliased=True", you can join as many levels deep as desired, i.e. query.join(['children', 'children'], aliased=True); filter criterion will be against the rightmost joined table .. change:: :tags: orm :tickets: 660 Added query.populate_existing(), marks the query to reload all attributes and collections of all instances touched in the query, including eagerly-loaded entities. .. change:: :tags: orm :tickets: Added eagerload_all(), allows eagerload_all('x.y.z') to specify eager loading of all properties in the given path. .. change:: :tags: orm :tickets: Major overhaul for Session: .. change:: :tags: orm :tickets: New function which "configures" a session called "sessionmaker()". Send various keyword arguments to this function once, returns a new class which creates a Session against that stereotype. .. change:: :tags: orm :tickets: SessionTransaction removed from "public" API. You now can call begin()/ commit()/rollback() on the Session itself. .. change:: :tags: orm :tickets: Session also supports SAVEPOINT transactions; call begin_nested(). .. change:: :tags: orm :tickets: Session supports two-phase commit behavior when vertically or horizontally partitioning (i.e., using more than one engine). Use twophase=True. .. change:: :tags: orm :tickets: Session flag "transactional=True" produces a session which always places itself into a transaction when first used. Upon commit(), rollback() or close(), the transaction ends; but begins again on the next usage. .. change:: :tags: orm :tickets: Session supports "autoflush=True". This issues a flush() before each query. Use in conjunction with transactional, and you can just save()/update() and then query, the new objects will be there. Use commit() at the end (or flush() if non-transactional) to flush remaining changes. .. change:: :tags: orm :tickets: New scoped_session() function replaces SessionContext and assignmapper. Builds onto "sessionmaker()" concept to produce a class whos Session() construction returns the thread-local session. Or, call all Session methods as class methods, i.e. Session.save(foo); Session.commit(). just like the old "objectstore" days. .. change:: :tags: orm :tickets: Added new "binds" argument to Session to support configuration of multiple binds with sessionmaker() function. .. change:: :tags: orm :tickets: A rudimental SessionExtension class has been added, allowing user-defined functionality to take place at flush(), commit(), and rollback() boundaries. .. change:: :tags: orm :tickets: Query-based relation()s available with dynamic_loader(). This is a *writable* collection (supporting append() and remove()) which is also a live Query object when accessed for reads. Ideal for dealing with very large collections where only partial loading is desired. .. change:: :tags: orm :tickets: flush()-embedded inline INSERT/UPDATE expressions. Assign any SQL expression, like "sometable.c.column + 1", to an instance's attribute. Upon flush(), the mapper detects the expression and embeds it directly in the INSERT or UPDATE statement; the attribute gets deferred on the instance so it loads the new value the next time you access it. .. change:: :tags: orm :tickets: 618 A rudimental sharding (horizontal scaling) system is introduced. This system uses a modified Session which can distribute read and write operations among multiple databases, based on user-defined functions defining the "sharding strategy". Instances and their dependents can be distributed and queried among multiple databases based on attribute values, round-robin approaches or any other user-defined system. .. change:: :tags: orm :tickets: 659 Eager loading has been enhanced to allow even more joins in more places. It now functions at any arbitrary depth along self-referential and cyclical structures. When loading cyclical structures, specify "join_depth" on relation() indicating how many times you'd like the table to join to itself; each level gets a distinct table alias. The alias names themselves are generated at compile time using a simple counting scheme now and are a lot easier on the eyes, as well as of course completely deterministic. .. change:: :tags: orm :tickets: 211 Added composite column properties. This allows you to create a type which is represented by more than one column, when using the ORM. Objects of the new type are fully functional in query expressions, comparisons, query.get() clauses, etc. and act as though they are regular single-column scalars... except they're not! Use the function composite(cls, \*columns) inside of the mapper's "properties" dict, and instances of cls will be created/mapped to a single attribute, comprised of the values correponding to \*columns. .. change:: :tags: orm :tickets: Improved support for custom column_property() attributes which feature correlated subqueries, works better with eager loading now. .. change:: :tags: orm :tickets: 611 Primary key "collapse" behavior; the mapper will analyze all columns in its given selectable for primary key "equivalence", that is, columns which are equivalent via foreign key relationship or via an explicit inherit_condition. primarily for joined-table inheritance scenarios where different named PK columns in inheriting tables should "collapse" into a single-valued (or fewer-valued) primary key. Fixes things like. .. change:: :tags: orm :tickets: Joined-table inheritance will now generate the primary key columns of all inherited classes against the root table of the join only. This implies that each row in the root table is distinct to a single instance. If for some rare reason this is not desireable, explicit primary_key settings on individual mappers will override it. .. change:: :tags: orm :tickets: When "polymorphic" flags are used with joined-table or single-table inheritance, all identity keys are generated against the root class of the inheritance hierarchy; this allows query.get() to work polymorphically using the same caching semantics as a non-polymorphic get. Note that this currently does not work with concrete inheritance. .. change:: :tags: orm :tickets: Secondary inheritance loading: polymorphic mappers can be constructed *without* a select_table argument. inheriting mappers whose tables were not represented in the initial load will issue a second SQL query immediately, once per instance (i.e. not very efficient for large lists), in order to load the remaining columns. .. change:: :tags: orm :tickets: Secondary inheritance loading can also move its second query into a column-level "deferred" load, via the "polymorphic_fetch" argument, which can be set to 'select' or 'deferred' .. change:: :tags: orm :tickets: 696 It's now possible to map only a subset of available selectable columns onto mapper properties, using include_columns/exclude_columns.. .. change:: :tags: orm :tickets: Added undefer_group() MapperOption, sets a set of "deferred" columns joined by a "group" to load as "undeferred". .. change:: :tags: orm :tickets: Rewrite of the "deterministic alias name" logic to be part of the SQL layer, produces much simpler alias and label names more in the style of Hibernate .. change:: :tags: sql :tickets: Speed! Clause compilation as well as the mechanics of SQL constructs have been streamlined and simplified to a signficant degree, for a 20-30% improvement of the statement construction/compilation overhead of 0.3. .. change:: :tags: sql :tickets: All "type" keyword arguments, such as those to bindparam(), column(), Column(), and func.(), renamed to "type\_". Those objects still name their "type" attribute as "type". .. change:: :tags: sql :tickets: case_sensitive=(True|False) setting removed from schema items, since checking this state added a lot of method call overhead and there was no decent reason to ever set it to False. Table and column names which are all lower case will be treated as case-insenstive (yes we adjust for Oracle's UPPERCASE style too). .. change:: :tags: transactions :tickets: Added context manager (with statement) support for transactions. .. change:: :tags: transactions :tickets: Added support for two phase commit, works with mysql and postgres so far. .. change:: :tags: transactions :tickets: Added a subtransaction implementation that uses savepoints. .. change:: :tags: transactions :tickets: Added support for savepoints. .. change:: :tags: metadata :tickets: Tables can be reflected from the database en-masse without declaring them in advance. MetaData(engine, reflect=True) will load all tables present in the database, or use metadata.reflect() for finer control. .. change:: :tags: metadata :tickets: DynamicMetaData has been renamed to ThreadLocalMetaData .. change:: :tags: metadata :tickets: The ThreadLocalMetaData constructor now takes no arguments. .. change:: :tags: metadata :tickets: BoundMetaData has been removed- regular MetaData is equivalent .. change:: :tags: metadata :tickets: 646 Numeric and Float types now have an "asdecimal" flag; defaults to True for Numeric, False for Float. When True, values are returned as decimal.Decimal objects; when False, values are returned as float(). The defaults of True/False are already the behavior for PG and MySQL's DBAPI modules. .. change:: :tags: metadata :tickets: 475 New SQL operator implementation which removes all hardcoded operators from expression structures and moves them into compilation; allows greater flexibility of operator compilation; for example, "+" compiles to "||" when used in a string context, or "concat(a,b)" on MySQL; whereas in a numeric context it compiles to "+". Fixes. .. change:: :tags: metadata :tickets: "Anonymous" alias and label names are now generated at SQL compilation time in a completely deterministic fashion... no more random hex IDs .. change:: :tags: metadata :tickets: Significant architectural overhaul to SQL elements (ClauseElement). All elements share a common "mutability" framework which allows a consistent approach to in-place modifications of elements as well as generative behavior. Improves stability of the ORM which makes heavy usage of mutations to SQL expressions. .. change:: :tags: metadata :tickets: select() and union()'s now have "generative" behavior. Methods like order_by() and group_by() return a *new* instance - the original instance is left unchanged. Non-generative methods remain as well. .. change:: :tags: metadata :tickets: 569, 52 The internals of select/union vastly simplified- all decision making regarding "is subquery" and "correlation" pushed to SQL generation phase. select() elements are now *never* mutated by their enclosing containers or by any dialect's compilation process .. change:: :tags: metadata :tickets: select(scalar=True) argument is deprecated; use select(..).as_scalar(). The resulting object obeys the full "column" interface and plays better within expressions. .. change:: :tags: metadata :tickets: 504 Added select().with_prefix('foo') allowing any set of keywords to be placed before the columns clause of the SELECT .. change:: :tags: metadata :tickets: 686 Added array slice support to row[] .. change:: :tags: metadata :tickets: Result sets make a better attempt at matching the DBAPI types present in cursor.description to the TypeEngine objects defined by the dialect, which are then used for result-processing. Note this only takes effect for textual SQL; constructed SQL statements always have an explicit type map. .. change:: :tags: metadata :tickets: Result sets from CRUD operations close their underlying cursor immediately and will also autoclose the connection if defined for the operation; this allows more efficient usage of connections for successive CRUD operations with less chance of "dangling connections". .. change:: :tags: metadata :tickets: 559 Column defaults and onupdate Python functions (i.e. passed to ColumnDefault) may take zero or one arguments; the one argument is the ExecutionContext, from which you can call "context.parameters[someparam]" to access the other bind parameter values affixed to the statement. The connection used for the execution is available as well so that you can pre-execute statements. .. change:: :tags: metadata :tickets: Added "explcit" create/drop/execute support for sequences (i.e. you can pass a "connectable" to each of those methods on Sequence). .. change:: :tags: metadata :tickets: Better quoting of identifiers when manipulating schemas. .. change:: :tags: metadata :tickets: Standardized the behavior for table reflection where types can't be located; NullType is substituted instead, warning is raised. .. change:: :tags: metadata :tickets: 606 ColumnCollection (i.e. the 'c' attribute on tables) follows dictionary semantics for "__contains__" .. change:: :tags: engines :tickets: Speed! The mechanics of result processing and bind parameter processing have been overhauled, streamlined and optimized to issue as little method calls as possible. Bench tests for mass INSERT and mass rowset iteration both show 0.4 to be over twice as fast as 0.3, using 68% fewer function calls. .. change:: :tags: engines :tickets: You can now hook into the pool lifecycle and run SQL statements or other logic at new each DBAPI connection, pool check-out and check-in. .. change:: :tags: engines :tickets: Connections gain a .properties collection, with contents scoped to the lifetime of the underlying DBAPI connection .. change:: :tags: engines :tickets: Removed auto_close_cursors and disallow_open_cursors arguments from Pool; reduces overhead as cursors are normally closed by ResultProxy and Connection. .. change:: :tags: extensions :tickets: proxyengine is temporarily removed, pending an actually working replacement. .. change:: :tags: extensions :tickets: SelectResults has been replaced by Query. SelectResults / SelectResultsExt still exist but just return a slightly modified Query object for backwards-compatibility. join_to() method from SelectResults isn't present anymore, need to use join(). .. change:: :tags: mysql :tickets: Table and column names loaded via reflection are now Unicode. .. change:: :tags: mysql :tickets: All standard column types are now supported, including SET. .. change:: :tags: mysql :tickets: Table reflection can now be performed in as little as one round-trip. .. change:: :tags: mysql :tickets: ANSI and ANSI_QUOTES sql modes are now supported. .. change:: :tags: mysql :tickets: Indexes are now reflected. .. change:: :tags: postgres :tickets: Added PGArray datatype for using postgres array datatypes. .. change:: :tags: oracle :tickets: 507 Very rudimental support for OUT parameters added; use sql.outparam(name, type) to set up an OUT parameter, just like bindparam(); after execution, values are avaiable via result.out_parameters dictionary. SQLAlchemy-0.8.4/doc/_sources/changelog/changelog_05.txt0000644000076500000240000033063712251147171023560 0ustar classicstaff00000000000000 ============== 0.5 Changelog ============== .. changelog:: :version: 0.5.9 :released: .. change:: :tags: sql :tickets: 1661 Fixed erroneous self_group() call in expression package. .. changelog:: :version: 0.5.8 :released: Sat Jan 16 2010 .. change:: :tags: sql :tickets: The copy() method on Column now supports uninitialized, unnamed Column objects. This allows easy creation of declarative helpers which place common columns on multiple subclasses. .. change:: :tags: sql :tickets: Default generators like Sequence() translate correctly across a copy() operation. .. change:: :tags: sql :tickets: Sequence() and other DefaultGenerator objects are accepted as the value for the "default" and "onupdate" keyword arguments of Column, in addition to being accepted positionally. .. change:: :tags: sql :tickets: 1568, 1617 Fixed a column arithmetic bug that affected column correspondence for cloned selectables which contain free-standing column expressions. This bug is generally only noticeable when exercising newer ORM behavior only availble in 0.6 via, but is more correct at the SQL expression level as well. .. change:: :tags: postgresql :tickets: 1647 The extract() function, which was slightly improved in 0.5.7, needed a lot more work to generate the correct typecast (the typecasts appear to be necessary in PG's EXTRACT quite a lot of the time). The typecast is now generated using a rule dictionary based on PG's documentation for date/time/interval arithmetic. It also accepts text() constructs again, which was broken in 0.5.7. .. change:: :tags: firebird :tickets: 1646 Recognize more errors as disconnections. .. changelog:: :version: 0.5.7 :released: Sat Dec 26 2009 .. change:: :tags: orm :tickets: 1543 contains_eager() now works with the automatically generated subquery that results when you say "query(Parent).join(Parent.somejoinedsubclass)", i.e. when Parent joins to a joined-table-inheritance subclass. Previously contains_eager() would erroneously add the subclass table to the query separately producing a cartesian product. An example is in the ticket description. .. change:: :tags: orm :tickets: 1553 query.options() now only propagate to loaded objects for potential further sub-loads only for options where such behavior is relevant, keeping various unserializable options like those generated by contains_eager() out of individual instance states. .. change:: :tags: orm :tickets: 1054 Session.execute() now locates table- and mapper-specific binds based on a passed in expression which is an insert()/update()/delete() construct. .. change:: :tags: orm :tickets: Session.merge() now properly overwrites a many-to-one or uselist=False attribute to None if the attribute is also None in the given object to be merged. .. change:: :tags: orm :tickets: 1618 Fixed a needless select which would occur when merging transient objects that contained a null primary key identifier. .. change:: :tags: orm :tickets: 1585 Mutable collection passed to the "extension" attribute of relation(), column_property() etc. will not be mutated or shared among multiple instrumentation calls, preventing duplicate extensions, such as backref populators, from being inserted into the list. .. change:: :tags: orm :tickets: 1504 Fixed the call to get_committed_value() on CompositeProperty. .. change:: :tags: orm :tickets: 1602 Fixed bug where Query would crash if a join() with no clear "left" side were called when a non-mapped column entity appeared in the columns list. .. change:: :tags: orm :tickets: 1616, 1480 Fixed bug whereby composite columns wouldn't load properly when configured on a joined-table subclass, introduced in version 0.5.6 as a result of the fix for. thx to Scott Torborg. .. change:: :tags: orm :tickets: 1556 The "use get" behavior of many-to-one relations, i.e. that a lazy load will fallback to the possibly cached query.get() value, now works across join conditions where the two compared types are not exactly the same class, but share the same "affinity" - i.e. Integer and SmallInteger. Also allows combinations of reflected and non-reflected types to work with 0.5 style type reflection, such as PGText/Text (note 0.6 reflects types as their generic versions). .. change:: :tags: orm :tickets: 1436 Fixed bug in query.update() when passing Cls.attribute as keys in the value dict and using synchronize_session='expire' ('fetch' in 0.6). .. change:: :tags: sql :tickets: 1603 Fixed bug in two-phase transaction whereby commit() method didn't set the full state which allows subsequent close() call to succeed. .. change:: :tags: sql :tickets: Fixed the "numeric" paramstyle, which apparently is the default paramstyle used by Informixdb. .. change:: :tags: sql :tickets: 1574 Repeat expressions in the columns clause of a select are deduped based on the identity of each clause element, not the actual string. This allows positional elements to render correctly even if they all render identically, such as "qmark" style bind parameters. .. change:: :tags: sql :tickets: 1632 The cursor associated with connection pool connections (i.e. _CursorFairy) now proxies `__iter__()` to the underlying cursor correctly. .. change:: :tags: sql :tickets: 1556 types now support an "affinity comparison" operation, i.e. that an Integer/SmallInteger are "compatible", or a Text/String, PickleType/Binary, etc. Part of. .. change:: :tags: sql :tickets: 1641 Fixed bug preventing alias() of an alias() from being cloned or adapted (occurs frequently in ORM operations). .. change:: :tags: sqlite :tickets: 1439 sqlite dialect properly generates CREATE INDEX for a table that is in an alternate schema. .. change:: :tags: postgresql :tickets: 1085 Added support for reflecting the DOUBLE PRECISION type, via a new postgres.PGDoublePrecision object. This is postgresql.DOUBLE_PRECISION in 0.6. .. change:: :tags: postgresql :tickets: 460 Added support for reflecting the INTERVAL YEAR TO MONTH and INTERVAL DAY TO SECOND syntaxes of the INTERVAL type. .. change:: :tags: postgresql :tickets: 1576 Corrected the "has_sequence" query to take current schema, or explicit sequence-stated schema, into account. .. change:: :tags: postgresql :tickets: 1611 Fixed the behavior of extract() to apply operator precedence rules to the "::" operator when applying the "timestamp" cast - ensures proper parenthesization. .. change:: :tags: mssql :tickets: 1561 Changed the name of TrustedConnection to Trusted_Connection when constructing pyodbc connect arguments .. change:: :tags: oracle :tickets: 1637 The "table_names" dialect function, used by MetaData .reflect(), omits "index overflow tables", a system table generated by Oracle when "index only tables" with overflow are used. These tables aren't accessible via SQL and can't be reflected. .. change:: :tags: ext :tickets: 1570, 1523 A column can be added to a joined-table declarative superclass after the class has been constructed (i.e. via class-level attribute assignment), and the column will be propagated down to subclasses. This is the reverse situation as that of, fixed in 0.5.6. .. change:: :tags: ext :tickets: 1491 Fixed a slight inaccuracy in the sharding example. Comparing equivalence of columns in the ORM is best accomplished using col1.shares_lineage(col2). .. change:: :tags: ext :tickets: 1606 Removed unused `load()` method from ShardedQuery. .. changelog:: :version: 0.5.6 :released: Sat Sep 12 2009 .. change:: :tags: orm :tickets: 1300 Fixed bug whereby inheritance discriminator part of a composite primary key would fail on updates. Continuation of. .. change:: :tags: orm :tickets: 1507 Fixed bug which disallowed one side of a many-to-many bidirectional reference to declare itself as "viewonly" .. change:: :tags: orm :tickets: 1526 Added an assertion that prevents a @validates function or other AttributeExtension from loading an unloaded collection such that internal state may be corrupted. .. change:: :tags: orm :tickets: 1519 Fixed bug which prevented two entities from mutually replacing each other's primary key values within a single flush() for some orderings of operations. .. change:: :tags: orm :tickets: 1485 Fixed an obscure issue whereby a joined-table subclass with a self-referential eager load on the base class would populate the related object's "subclass" table with data from the "subclass" table of the parent. .. change:: :tags: orm :tickets: 1477 relations() now have greater ability to be "overridden", meaning a subclass that explicitly specifies a relation() overriding that of the parent class will be honored during a flush. This is currently to support many-to-many relations from concrete inheritance setups. Outside of that use case, YMMV. .. change:: :tags: orm :tickets: 1483 Squeezed a few more unnecessary "lazy loads" out of relation(). When a collection is mutated, many-to-one backrefs on the other side will not fire off to load the "old" value, unless "single_parent=True" is set. A direct assignment of a many-to-one still loads the "old" value in order to update backref collections on that value, which may be present in the session already, thus maintaining the 0.5 behavioral contract. .. change:: :tags: orm :tickets: 1480 Fixed bug whereby a load/refresh of joined table inheritance attributes which were based on column_property() or similar would fail to evaluate. .. change:: :tags: orm :tickets: 1488 Improved support for MapperProperty objects overriding that of an inherited mapper for non-concrete inheritance setups - attribute extensions won't randomly collide with each other. .. change:: :tags: orm :tickets: 1487 UPDATE and DELETE do not support ORDER BY, LIMIT, OFFSET, etc. in standard SQL. Query.update() and Query.delete() now raise an exception if any of limit(), offset(), order_by(), group_by(), or distinct() have been called. .. change:: :tags: orm :tickets: Added AttributeExtension to sqlalchemy.orm.__all__ .. change:: :tags: orm :tickets: 1476 Improved error message when query() is called with a non-SQL /entity expression. .. change:: :tags: orm :tickets: 1440 Using False or 0 as a polymorphic discriminator now works on the base class as well as a subclass. .. change:: :tags: orm :tickets: 1424 Added enable_assertions(False) to Query which disables the usual assertions for expected state - used by Query subclasses to engineer custom state.. See http://www.sqlalchemy.org/trac/wiki/UsageRecipes/PreFilteredQuery for an example. .. change:: :tags: orm :tickets: 1501 Fixed recursion issue which occured if a mapped object's `__len__()` or `__nonzero__()` method resulted in state changes. .. change:: :tags: orm :tickets: 1506 Fixed incorrect exception raise in Weak/StrongIdentityMap.add() .. change:: :tags: orm :tickets: 1522 Fixed the error message for "could not find a FROM clause" in query.join() which would fail to issue correctly if the query was against a pure SQL construct. .. change:: :tags: orm :tickets: 1486 Fixed a somewhat hypothetical issue which would result in the wrong primary key being calculated for a mapper using the old polymorphic_union function - but this is old stuff. .. change:: :tags: sql :tickets: 1373 Fixed column.copy() to copy defaults and onupdates. .. change:: :tags: sql :tickets: Fixed a bug in extract() introduced in 0.5.4 whereby the string "field" argument was getting treated as a ClauseElement, causing various errors within more complex SQL transformations. .. change:: :tags: sql :tickets: 1420 Unary expressions such as DISTINCT propagate their type handling to result sets, allowing conversions like unicode and such to take place. .. change:: :tags: sql :tickets: 1482 Fixed bug in Table and Column whereby passing empty dict for "info" argument would raise an exception. .. change:: :tags: oracle :tickets: 1309 Backported 0.6 fix for Oracle alias names not getting truncated. .. change:: :tags: ext :tickets: 1446 The collection proxies produced by associationproxy are now pickleable. A user-defined proxy_factory however is still not pickleable unless it defines __getstate__ and __setstate__. .. change:: :tags: ext :tickets: 1468 Declarative will raise an informative exception if __table_args__ is passed as a tuple with no dict argument. Improved documentation. .. change:: :tags: ext :tickets: 1527 Table objects declared in the MetaData can now be used in string expressions sent to primaryjoin/secondaryjoin/ secondary - the name is pulled from the MetaData of the declarative base. .. change:: :tags: ext :tickets: 1523 A column can be added to a joined-table subclass after the class has been constructed (i.e. via class-level attribute assignment). The column is added to the underlying Table as always, but now the mapper will rebuild its "join" to include the new column, instead of raising an error about "no such column, use column_property() instead". .. change:: :tags: test :tickets: Added examples into the test suite so they get exercised regularly and cleaned up a couple deprecation warnings. .. changelog:: :version: 0.5.5 :released: Mon Jul 13 2009 .. change:: :tags: general :tickets: 970 unit tests have been migrated from unittest to nose. See README.unittests for information on how to run the tests. .. change:: :tags: orm :tickets: The "foreign_keys" argument of relation() will now propagate automatically to the backref in the same way that primaryjoin and secondaryjoin do. For the extremely rare use case where the backref of a relation() has intentionally different "foreign_keys" configured, both sides now need to be configured explicity (if they do in fact require this setting, see the next note...). .. change:: :tags: orm :tickets: ...the only known (and really, really rare) use case where a different foreign_keys setting was used on the forwards/backwards side, a composite foreign key that partially points to its own columns, has been enhanced such that the fk->itself aspect of the relation won't be used to determine relation direction. .. change:: :tags: orm :tickets: Session.mapper is now *deprecated*. Call session.add() if you'd like a free-standing object to be part of your session. Otherwise, a DIY version of Session.mapper is now documented at http://www.sqlalchemy.org/trac/wiki/UsageRecipes/SessionAwareMapper The method will remain deprecated throughout 0.6. .. change:: :tags: orm :tickets: 1431 Fixed Query being able to join() from individual columns of a joined-table subclass entity, i.e. query(SubClass.foo, SubcClass.bar).join(). In most cases, an error "Could not find a FROM clause to join from" would be raised. In a few others, the result would be returned in terms of the base class rather than the subclass - so applications which relied on this erroneous result need to be adjusted. .. change:: :tags: orm :tickets: 1461 Fixed a bug involving contains_eager(), which would apply itself to a secondary (i.e. lazy) load in a particular rare case, producing cartesian products. improved the targeting of query.options() on secondary loads overall. .. change:: :tags: orm :tickets: Fixed bug introduced in 0.5.4 whereby Composite types fail when default-holding columns are flushed. .. change:: :tags: orm :tickets: 1426 Fixed another 0.5.4 bug whereby mutable attributes (i.e. PickleType) wouldn't be deserialized correctly when the whole object was serialized. .. change:: :tags: orm :tickets: Fixed bug whereby session.is_modified() would raise an exception if any synonyms were in use. .. change:: :tags: orm :tickets: Fixed potential memory leak whereby previously pickled objects placed back in a session would not be fully garbage collected unless the Session were explicitly closed out. .. change:: :tags: orm :tickets: Fixed bug whereby list-based attributes, like pickletype and PGArray, failed to be merged() properly. .. change:: :tags: orm :tickets: Repaired non-working attributes.set_committed_value function. .. change:: :tags: orm :tickets: Trimmed the pickle format for InstanceState which should further reduce the memory footprint of pickled instances. The format should be backwards compatible with that of 0.5.4 and previous. .. change:: :tags: orm :tickets: 1463 sqlalchemy.orm.join and sqlalchemy.orm.outerjoin are now added to __all__ in sqlalchemy.orm.*. .. change:: :tags: orm :tickets: 1458 Fixed bug where Query exception raise would fail when a too-short composite primary key value were passed to get(). .. change:: :tags: sql :tickets: Removed an obscure feature of execute() (including connection, engine, Session) whereby a bindparam() construct can be sent as a key to the params dictionary. This usage is undocumented and is at the core of an issue whereby the bindparam() object created implicitly by a text() construct may have the same hash value as a string placed in the params dictionary and may result in an inappropriate match when computing the final bind parameters. Internal checks for this condition would add significant latency to the critical task of parameter rendering, so the behavior is removed. This is a backwards incompatible change for any application that may have been using this feature, however the feature has never been documented. .. change:: :tags: engine/pool :tickets: Implemented recreate() for StaticPool. .. changelog:: :version: 0.5.4p2 :released: Tue May 26 2009 .. change:: :tags: sql :tickets: Repaired the printing of SQL exceptions which are not based on parameters or are not executemany() style. .. change:: :tags: postgresql :tickets: Deprecated the hardcoded TIMESTAMP function, which when used as func.TIMESTAMP(value) would render "TIMESTAMP value". This breaks on some platforms as PostgreSQL doesn't allow bind parameters to be used in this context. The hard-coded uppercase is also inappropriate and there's lots of other PG casts that we'd need to support. So instead, use text constructs i.e. select(["timestamp '12/05/09'"]). .. changelog:: :version: 0.5.4p1 :released: Mon May 18 2009 .. change:: :tags: orm :tickets: Fixed an attribute error introduced in 0.5.4 which would occur when merge() was used with an incomplete object. .. changelog:: :version: 0.5.4 :released: Sun May 17 2009 .. change:: :tags: orm :tickets: 1398 Significant performance enhancements regarding Sessions/flush() in conjunction with large mapper graphs, large numbers of objects: - Removed all* O(N) scanning behavior from the flush() process, i.e. operations that were scanning the full session, including an extremely expensive one that was erroneously assuming primary key values were changing when this was not the case. * one edge case remains which may invoke a full scan, if an existing primary key attribute is modified to a new value. - The Session's "weak referencing" behavior is now *full* - no strong references whatsoever are made to a mapped object or related items/collections in its __dict__. Backrefs and other cycles in objects no longer affect the Session's ability to lose all references to unmodified objects. Objects with pending changes still are maintained strongly until flush. The implementation also improves performance by moving the "resurrection" process of garbage collected items to only be relevant for mappings that map "mutable" attributes (i.e. PickleType, composite attrs). This removes overhead from the gc process and simplifies internal behavior. If a "mutable" attribute change is the sole change on an object which is then dereferenced, the mapper will not have access to other attribute state when the UPDATE is issued. This may present itself differently to some MapperExtensions. The change also affects the internal attribute API, but not the AttributeExtension interface nor any of the publically documented attribute functions. - The unit of work no longer genererates a graph of "dependency" processors for the full graph of mappers during flush(), instead creating such processors only for those mappers which represent objects with pending changes. This saves a tremendous number of method calls in the context of a large interconnected graph of mappers. - Cached a wasteful "table sort" operation that previously occured multiple times per flush, also removing significant method call count from flush(). - Other redundant behaviors have been simplified in mapper._save_obj(). .. change:: :tags: orm :tickets: Modified query_cls on DynamicAttributeImpl to accept a full mixin version of the AppenderQuery, which allows subclassing the AppenderMixin. .. change:: :tags: orm :tickets: 1300 The "polymorphic discriminator" column may be part of a primary key, and it will be populated with the correct discriminator value. .. change:: :tags: orm :tickets: Fixed the evaluator not being able to evaluate IS NULL clauses. .. change:: :tags: orm :tickets: 1352 Fixed the "set collection" function on "dynamic" relations to initiate events correctly. Previously a collection could only be assigned to a pending parent instance, otherwise modified events would not be fired correctly. Set collection is now compatible with merge(), fixes. .. change:: :tags: orm :tickets: Allowed pickling of PropertyOption objects constructed with instrumented descriptors; previously, pickle errors would occur when pickling an object which was loaded with a descriptor-based option, such as query.options(eagerload(MyClass.foo)). .. change:: :tags: orm :tickets: 1357 Lazy loader will not use get() if the "lazy load" SQL clause matches the clause used by get(), but contains some parameters hardcoded. Previously the lazy strategy would fail with the get(). Ideally get() would be used with the hardcoded parameters but this would require further development. .. change:: :tags: orm :tickets: 1391 MapperOptions and other state associated with query.options() is no longer bundled within callables associated with each lazy/deferred-loading attribute during a load. The options are now associated with the instance's state object just once when it's populated. This removes the need in most cases for per-instance/attribute loader objects, improving load speed and memory overhead for individual instances. .. change:: :tags: orm :tickets: 1360 Fixed another location where autoflush was interfering with session.merge(). autoflush is disabled completely for the duration of merge() now. .. change:: :tags: orm :tickets: 1406 Fixed bug which prevented "mutable primary key" dependency logic from functioning properly on a one-to-one relation(). .. change:: :tags: orm :tickets: Fixed bug in relation(), introduced in 0.5.3, whereby a self referential relation from a base class to a joined-table subclass would not configure correctly. .. change:: :tags: orm :tickets: Fixed obscure mapper compilation issue when inheriting mappers are used which would result in un-initialized attributes. .. change:: :tags: orm :tickets: Fixed documentation for session weak_identity_map - the default value is True, indicating a weak referencing map in use. .. change:: :tags: orm :tickets: 1376 Fixed a unit of work issue whereby the foreign key attribute on an item contained within a collection owned by an object being deleted would not be set to None if the relation() was self-referential. .. change:: :tags: orm :tickets: 1378 Fixed Query.update() and Query.delete() failures with eagerloaded relations. .. change:: :tags: orm :tickets: It is now an error to specify both columns of a binary primaryjoin condition in the foreign_keys or remote_side collection. Whereas previously it was just nonsensical, but would succeed in a non-deterministic way. .. change:: :tags: ticket: 594, 1341, schema :tickets: Added a quote_schema() method to the IdentifierPreparer class so that dialects can override how schemas get handled. This enables the MSSQL dialect to treat schemas as multipart identifiers, such as 'database.owner'. .. change:: :tags: sql :tickets: Back-ported the "compiler" extension from SQLA 0.6. This is a standardized interface which allows the creation of custom ClauseElement subclasses and compilers. In particular it's handy as an alternative to text() when you'd like to build a construct that has database-specific compilations. See the extension docs for details. .. change:: :tags: sql :tickets: 1413 Exception messages are truncated when the list of bound parameters is larger than 10, preventing enormous multi-page exceptions from filling up screens and logfiles for large executemany() statements. .. change:: :tags: sql :tickets: ``sqlalchemy.extract()`` is now dialect sensitive and can extract components of timestamps idiomatically across the supported databases, including SQLite. .. change:: :tags: sql :tickets: 1353 Fixed __repr__() and other _get_colspec() methods on ForeignKey constructed from __clause_element__() style construct (i.e. declarative columns). .. change:: :tags: mysql :tickets: 1405 Reflecting a FOREIGN KEY construct will take into account a dotted schema.tablename combination, if the foreign key references a table in a remote schema. .. change:: :tags: mssql :tickets: Modified how savepoint logic works to prevent it from stepping on non-savepoint oriented routines. Savepoint support is still very experimental. .. change:: :tags: mssql :tickets: 1310 Added in reserved words for MSSQL that covers version 2008 and all prior versions. .. change:: :tags: mssql :tickets: 1343 Corrected problem with information schema not working with a binary collation based database. Cleaned up information schema since it is only used by mssql now. .. change:: :tags: sqlite :tickets: 1402 Corrected the SLBoolean type so that it properly treats only 1 as True. .. change:: :tags: sqlite :tickets: 1273 Corrected the float type so that it correctly maps to a SLFloat type when being reflected. .. change:: :tags: extensions :tickets: 1379 Fixed adding of deferred or other column properties to a declarative class. .. changelog:: :version: 0.5.3 :released: Tue Mar 24 2009 .. change:: :tags: orm :tickets: 1315 The "objects" argument to session.flush() is deprecated. State which represents the linkage between a parent and child object does not support "flushed" status on one side of the link and not the other, so supporting this operation leads to misleading results. .. change:: :tags: orm :tickets: Query now implements __clause_element__() which produces its selectable, which means a Query instance can be accepted in many SQL expressions, including col.in_(query), union(query1, query2), select([foo]).select_from(query), etc. .. change:: :tags: orm :tickets: 1337 Query.join() can now construct multiple FROM clauses, if needed. Such as, query(A, B).join(A.x).join(B.y) might say SELECT A.*, B.* FROM A JOIN X, B JOIN Y. Eager loading can also tack its joins onto those multiple FROM clauses. .. change:: :tags: orm :tickets: 1347 Fixed bug in dynamic_loader() where append/remove events after construction time were not being propagated to the UOW to pick up on flush(). .. change:: :tags: orm :tickets: Fixed bug where column_prefix wasn't being checked before not mapping an attribute that already had class-level name present. .. change:: :tags: orm :tickets: 1315 a session.expire() on a particular collection attribute will clear any pending backref additions as well, so that the next access correctly returns only what was present in the database. Presents some degree of a workaround for, although we are considering removing the flush([objects]) feature altogether. .. change:: :tags: orm :tickets: Session.scalar() now converts raw SQL strings to text() the same way Session.execute() does and accepts same alternative \**kw args. .. change:: :tags: orm :tickets: improvements to the "determine direction" logic of relation() such that the direction of tricky situations like mapper(A.join(B)) -> relation-> mapper(B) can be determined. .. change:: :tags: orm :tickets: 1306 When flushing partial sets of objects using session.flush([somelist]), pending objects which remain pending after the operation won't inadvertently be added as persistent. .. change:: :tags: orm :tickets: 1314 Added "post_configure_attribute" method to InstrumentationManager, so that the "listen_for_events.py" example works again. .. change:: :tags: orm :tickets: a forward and complementing backwards reference which are both of the same direction, i.e. ONETOMANY or MANYTOONE, is now detected, and an error message is raised. Saves crazy CircularDependencyErrors later on. .. change:: :tags: orm :tickets: Fixed bugs in Query regarding simultaneous selection of multiple joined-table inheritance entities with common base classes: - previously the adaption applied to "B" on "A JOIN B" would be erroneously partially applied to "A". - comparisons on relations (i.e. A.related==someb) were not getting adapted when they should. - Other filterings, like query(A).join(A.bs).filter(B.foo=='bar'), were erroneously adapting "B.foo" as though it were an "A". .. change:: :tags: orm :tickets: 1325 Fixed adaptation of EXISTS clauses via any(), has(), etc. in conjunction with an aliased object on the left and of_type() on the right. .. change:: :tags: orm :tickets: Added an attribute helper method ``set_committed_value`` in sqlalchemy.orm.attributes. Given an object, attribute name, and value, will set the value on the object as part of its "committed" state, i.e. state that is understood to have been loaded from the database. Helps with the creation of homegrown collection loaders and such. .. change:: :tags: orm :tickets: Query won't fail with weakref error when a non-mapper/class instrumented descriptor is passed, raises "Invalid column expession". .. change:: :tags: orm :tickets: Query.group_by() properly takes into account aliasing applied to the FROM clause, such as with select_from(), using with_polymorphic(), or using from_self(). .. change:: :tags: sql :tickets: An alias() of a select() will convert to a "scalar subquery" when used in an unambiguously scalar context, i.e. it's used in a comparison operation. This applies to the ORM when using query.subquery() as well. .. change:: :tags: sql :tickets: 1302 Fixed missing _label attribute on Function object, others when used in a select() with use_labels (such as when used in an ORM column_property()). .. change:: :tags: sql :tickets: 1309 anonymous alias names now truncate down to the max length allowed by the dialect. More significant on DBs like Oracle with very small character limits. .. change:: :tags: sql :tickets: the __selectable__() interface has been replaced entirely by __clause_element__(). .. change:: :tags: sql :tickets: 1299 The per-dialect cache used by TypeEngine to cache dialect-specific types is now a WeakKeyDictionary. This to prevent dialect objects from being referenced forever for an application that creates an arbitrarily large number of engines or dialects. There is a small performance penalty which will be resolved in 0.6. .. change:: :tags: sqlite :tickets: Fixed SQLite reflection methods so that non-present cursor.description, which triggers an auto-cursor close, will be detected so that no results doesn't fail on recent versions of pysqlite which raise an error when fetchone() called with no rows present. .. change:: :tags: postgresql :tickets: Index reflection won't fail when an index with multiple expressions is encountered. .. change:: :tags: postgresql :tickets: 1327 Added PGUuid and PGBit types to sqlalchemy.databases.postgres. .. change:: :tags: postgresql :tickets: 1327 Refection of unknown PG types won't crash when those types are specified within a domain. .. change:: :tags: mssql :tickets: Preliminary support for pymssql 1.0.1 .. change:: :tags: mssql :tickets: Corrected issue on mssql where max_identifier_length was not being respected. .. change:: :tags: extensions :tickets: Fixed a recursive pickling issue in serializer, triggered by an EXISTS or other embedded FROM construct. .. change:: :tags: extensions :tickets: Declarative locates the "inherits" class using a search through __bases__, to skip over mixins that are local to subclasses. .. change:: :tags: extensions :tickets: Declarative figures out joined-table inheritance primary join condition even if "inherits" mapper argument is given explicitly. .. change:: :tags: extensions :tickets: Declarative will properly interpret the "foreign_keys" argument on a backref() if it's a string. .. change:: :tags: extensions :tickets: Declarative will accept a table-bound column as a property when used in conjunction with __table__, if the column is already present in __table__. The column will be remapped to the given key the same way as when added to the mapper() properties dict. .. changelog:: :version: 0.5.2 :released: Sat Jan 24 2009 .. change:: :tags: orm :tickets: Further refined 0.5.1's warning about delete-orphan cascade placed on a many-to-many relation. First, the bad news: the warning will apply to both many-to-many as well as many-to-one relations. This is necessary since in both cases, SQLA does not scan the full set of potential parents when determining "orphan" status - for a persistent object it only detects an in-python de-association event to establish the object as an "orphan". Next, the good news: to support one-to-one via a foreign key or assocation table, or to support one-to-many via an association table, a new flag single_parent=True may be set which indicates objects linked to the relation are only meant to have a single parent. The relation will raise an error if multiple parent-association events occur within Python. .. change:: :tags: orm :tickets: 1292 Adjusted the attribute instrumentation change from 0.5.1 to fully establish instrumentation for subclasses where the mapper was created after the superclass had already been fully instrumented. .. change:: :tags: orm :tickets: Fixed bug in delete-orphan cascade whereby two one-to-one relations from two different parent classes to the same target class would prematurely expunge the instance. .. change:: :tags: orm :tickets: Fixed an eager loading bug whereby self-referential eager loading would prevent other eager loads, self referential or not, from joining to the parent JOIN properly. Thanks to Alex K for creating a great test case. .. change:: :tags: orm :tickets: session.expire() and related methods will not expire() unloaded deferred attributes. This prevents them from being needlessly loaded when the instance is refreshed. .. change:: :tags: orm :tickets: 1293 query.join()/outerjoin() will now properly join an aliased() construct to the existing left side, even if query.from_self() or query.select_from(someselectable) has been called. .. change:: :tags: sql :tickets: 1284 Further fixes to the "percent signs and spaces in column/table names" functionality. .. change:: :tags: mssql :tickets: 1291 Restored convert_unicode handling. Results were being passed on through without conversion. .. change:: :tags: mssql :tickets: 1282 Really fixing the decimal handling this time.. .. change:: :tags: Ticket:1289, mssql :tickets: Modified table reflection code to use only kwargs when constructing tables. .. changelog:: :version: 0.5.1 :released: Sat Jan 17 2009 .. change:: :tags: orm :tickets: Removed an internal join cache which could potentially leak memory when issuing query.join() repeatedly to ad-hoc selectables. .. change:: :tags: orm :tickets: The "clear()", "save()", "update()", "save_or_update()" Session methods have been deprecated, replaced by "expunge_all()" and "add()". "expunge_all()" has also been added to ScopedSession. .. change:: :tags: orm :tickets: Modernized the "no mapped table" exception and added a more explicit __table__/__tablename__ exception to declarative. .. change:: :tags: orm :tickets: 1237 Concrete inheriting mappers now instrument attributes which are inherited from the superclass, but are not defined for the concrete mapper itself, with an InstrumentedAttribute that issues a descriptive error when accessed. .. change:: :tags: orm :tickets: 1237, 781 Added a new `relation()` keyword `back_populates`. This allows configuation of backreferences using explicit relations. This is required when creating bidirectional relations between a hierarchy of concrete mappers and another class. .. change:: :tags: orm :tickets: 1237 Test coverage added for `relation()` objects specified on concrete mappers. .. change:: :tags: orm :tickets: 1276 Query.from_self() as well as query.subquery() both disable the rendering of eager joins inside the subquery produced. The "disable all eager joins" feature is available publically via a new query.enable_eagerloads() generative. .. change:: :tags: orm :tickets: Added a rudimental series of set operations to Query that receive Query objects as arguments, including union(), union_all(), intersect(), except_(), insertsect_all(), except_all(). See the API documentation for Query.union() for examples. .. change:: :tags: orm :tickets: Fixed bug that prevented Query.join() and eagerloads from attaching to a query that selected from a union or aliased union. .. change:: :tags: orm :tickets: 1237 A short documentation example added for bidirectional relations specified on concrete mappers. .. change:: :tags: orm :tickets: 1269 Mappers now instrument class attributes upon construction with the final InstrumentedAttribute object which remains persistent. The `_CompileOnAttr`/`__getattribute__()` methodology has been removed. The net effect is that Column-based mapped class attributes can now be used fully at the class level without invoking a mapper compilation operation, greatly simplifying typical usage patterns within declarative. .. change:: :tags: orm :tickets: ColumnProperty (and front-end helpers such as ``deferred``) no longer ignores unknown \**keyword arguments. .. change:: :tags: orm :tickets: Fixed a bug with the unitofwork's "row switch" mechanism, i.e. the conversion of INSERT/DELETE into an UPDATE, when combined with joined-table inheritance and an object which contained no defined values for the child table where an UPDATE with no SET clause would be rendered. .. change:: :tags: orm :tickets: 1281 Using delete-orphan on a many-to-many relation is deprecated. This produces misleading or erroneous results since SQLA does not retrieve the full list of "parents" for m2m. To get delete-orphan behavior with an m2m table, use an explcit association class so that the individual association row is treated as a parent. .. change:: :tags: orm :tickets: 1281 delete-orphan cascade always requires delete cascade. Specifying delete-orphan without delete now raises a deprecation warning. .. change:: :tags: sql :tickets: 1256 Improved the methodology to handling percent signs in column names from. Added more tests. MySQL and PostgreSQL dialects still do not issue correct CREATE TABLE statements for identifiers with percent signs in them. .. change:: :tags: schema :tickets: 1214 Index now accepts column-oriented InstrumentedAttributes (i.e. column-based mapped class attributes) as column arguments. .. change:: :tags: schema :tickets: Column with no name (as in declarative) won't raise a NoneType error when it's string output is requsted (such as in a stack trace). .. change:: :tags: schema :tickets: 1278 Fixed bug when overriding a Column with a ForeignKey on a reflected table, where derived columns (i.e. the "virtual" columns of a select, etc.) would inadvertently call upon schema-level cleanup logic intended only for the original column. .. change:: :tags: declarative :tickets: Can now specify Column objects on subclasses which have no table of their own (i.e. use single table inheritance). The columns will be appended to the base table, but only mapped by the subclass. .. change:: :tags: declarative :tickets: For both joined and single inheriting subclasses, the subclass will only map those columns which are already mapped on the superclass and those explicit on the subclass. Other columns that are present on the `Table` will be excluded from the mapping by default, which can be disabled by passing a blank `exclude_properties` collection to the `__mapper_args__`. This is so that single-inheriting classes which define their own columns are the only classes to map those columns. The effect is actually a more organized mapping than you'd normally get with explicit `mapper()` calls unless you set up the `exclude_properties` arguments explicitly. .. change:: :tags: declarative :tickets: It's an error to add new Column objects to a declarative class that specified an existing table using __table__. .. change:: :tags: mysql :tickets: Added the missing keywords from MySQL 4.1 so they get escaped properly. .. change:: :tags: mssql :tickets: 1280 Corrected handling of large decimal values with more robust tests. Removed string manipulation on floats. .. change:: :tags: mssql :tickets: Modified the do_begin handling in mssql to use the Cursor not the Connection so it is DBAPI compatible. .. change:: :tags: mssql :tickets: Corrected SAVEPOINT support on adodbapi by changing the handling of savepoint_release, which is unsupported on mssql. .. changelog:: :version: 0.5.0 :released: Tue Jan 06 2009 .. change:: :tags: general :tickets: Documentation has been converted to Sphinx. In particular, the generated API documentation has been constructed into a full blown "API Reference" section which organizes editorial documentation combined with generated docstrings. Cross linking between sections and API docs are vastly improved, a javascript-powered search feature is provided, and a full index of all classes, functions and members is provided. .. change:: :tags: general :tickets: setup.py now imports setuptools only optionally. If not present, distutils is used. The new "pip" installer is recommended over easy_install as it installs in a more simplified way. .. change:: :tags: general :tickets: added an extremely basic illustration of a PostGIS integration to the examples folder. .. change:: :tags: orm :tickets: Query.with_polymorphic() now accepts a third argument "discriminator" which will replace the value of mapper.polymorphic_on for that query. Mappers themselves no longer require polymorphic_on to be set, even if the mapper has a polymorphic_identity. When not set, the mapper will load non-polymorphically by default. Together, these two features allow a non-polymorphic concrete inheritance setup to use polymorphic loading on a per-query basis, since concrete setups are prone to many issues when used polymorphically in all cases. .. change:: :tags: orm :tickets: dynamic_loader accepts a query_class= to customize the Query classes used for both the dynamic collection and the queries built from it. .. change:: :tags: orm :tickets: 1079 query.order_by() accepts None which will remove any pending order_by state from the query, as well as cancel out any mapper/relation configured ordering. This is primarily useful for overriding the ordering specified on a dynamic_loader(). .. change:: :tags: sql :tickets: 935 RowProxy objects can be used in place of dictionary arguments sent to connection.execute() and friends. .. change:: :tags: dialect :tickets: Added a new description_encoding attribute on the dialect that is used for encoding the column name when processing the metadata. This usually defaults to utf-8. .. change:: :tags: mssql :tickets: Added in a new MSGenericBinary type. This maps to the Binary type so it can implement the specialized behavior of treating length specified types as fixed-width Binary types and non-length types as an unbound variable length Binary type. .. change:: :tags: mssql :tickets: 1249 Added in new types: MSVarBinary and MSImage. .. change:: :tags: mssql :tickets: Added in the MSReal, MSNText, MSSmallDateTime, MSTime, MSDateTimeOffset, and MSDateTime2 types .. change:: :tags: sqlite :tickets: 1266 Table reflection now stores the actual DefaultClause value for the column. .. change:: :tags: sqlite :tickets: bugfixes, behavioral changes .. change:: :tags: orm :tickets: Exceptions raised during compile_mappers() are now preserved to provide "sticky behavior" - if a hasattr() call on a pre-compiled mapped attribute triggers a failing compile and suppresses the exception, subsequent compilation is blocked and the exception will be reiterated on the next compile() call. This issue occurs frequently when using declarative. .. change:: :tags: orm :tickets: property.of_type() is now recognized on a single-table inheriting target, when used in the context of prop.of_type(..).any()/has(), as well as query.join(prop.of_type(...)). .. change:: :tags: orm :tickets: query.join() raises an error when the target of the join doesn't match the property-based attribute - while it's unlikely anyone is doing this, the SQLAlchemy author was guilty of this particular loosey-goosey behavior. .. change:: :tags: orm :tickets: 1272 Fixed bug when using weak_instance_map=False where modified events would not be intercepted for a flush(). .. change:: :tags: orm :tickets: 1268 Fixed some deep "column correspondence" issues which could impact a Query made against a selectable containing multiple versions of the same table, as well as unions and similar which contained the same table columns in different column positions at different levels. .. change:: :tags: orm :tickets: Custom comparator classes used in conjunction with column_property(), relation() etc. can define new comparison methods on the Comparator, which will become available via __getattr__() on the InstrumentedAttribute. In the case of synonym() or comparable_property(), attributes are resolved first on the user-defined descriptor, then on the user-defined comparator. .. change:: :tags: orm :tickets: 976 Added ScopedSession.is_active accessor. .. change:: :tags: orm :tickets: 1262 Can pass mapped attributes and column objects as keys to query.update({}). .. change:: :tags: orm :tickets: Mapped attributes passed to the values() of an expression level insert() or update() will use the keys of the mapped columns, not that of the mapped attribute. .. change:: :tags: orm :tickets: 1242 Corrected problem with Query.delete() and Query.update() not working properly with bind parameters. .. change:: :tags: orm :tickets: Query.select_from(), from_statement() ensure that the given argument is a FromClause, or Text/Select/Union, respectively. .. change:: :tags: orm :tickets: 1253 Query() can be passed a "composite" attribute as a column expression and it will be expanded. Somewhat related to. .. change:: :tags: orm :tickets: Query() is a little more robust when passed various column expressions such as strings, clauselists, text() constructs (which may mean it just raises an error more nicely). .. change:: :tags: orm :tickets: first() works as expected with Query.from_statement(). .. change:: :tags: orm :tickets: Fixed bug introduced in 0.5rc4 involving eager loading not functioning for properties which were added to a mapper post-compile using add_property() or equivalent. .. change:: :tags: orm :tickets: Fixed bug where many-to-many relation() with viewonly=True would not correctly reference the link between secondary->remote. .. change:: :tags: orm :tickets: 1232 Duplicate items in a list-based collection will be maintained when issuing INSERTs to a "secondary" table in a many-to-many relation. Assuming the m2m table has a unique or primary key constraint on it, this will raise the expected constraint violation instead of silently dropping the duplicate entries. Note that the old behavior remains for a one-to-many relation since collection entries in that case don't result in INSERT statements and SQLA doesn't manually police collections. .. change:: :tags: orm :tickets: Query.add_column() can accept FromClause objects in the same manner as session.query() can. .. change:: :tags: orm :tickets: Comparison of many-to-one relation to NULL is properly converted to IS NOT NULL based on not_(). .. change:: :tags: orm :tickets: 1087 Extra checks added to ensure explicit primaryjoin/secondaryjoin are ClauseElement instances, to prevent more confusing errors later on. .. change:: :tags: orm :tickets: 1236 Improved mapper() check for non-class classes. .. change:: :tags: orm :tickets: 5051 comparator_factory argument is now documented and supported by all MapperProperty types, including column_property(), relation(), backref(), and synonym(). .. change:: :tags: orm :tickets: Changed the name of PropertyLoader to RelationProperty, to be consistent with all the other names. PropertyLoader is still present as a synonym. .. change:: :tags: orm :tickets: 1099, 1228 fixed "double iter()" call causing bus errors in shard API, removed errant result.close() left over from the 0.4 version. .. change:: :tags: orm :tickets: made Session.merge cascades not trigger autoflush. Fixes merged instances getting prematurely inserted with missing values. .. change:: :tags: orm :tickets: Two fixes to help prevent out-of-band columns from being rendered in polymorphic_union inheritance scenarios (which then causes extra tables to be rendered in the FROM clause causing cartesian products): - improvements to "column adaption" for a->b->c inheritance situations to better locate columns that are related to one another via multiple levels of indirection, rather than rendering the non-adapted column. - the "polymorphic discriminator" column is only rendered for the actual mapper being queried against. The column won't be "pulled in" from a subclass or superclass mapper since it's not needed. .. change:: :tags: orm :tickets: 1072 Fixed shard_id argument on ShardedSession.execute(). .. change:: :tags: sql :tickets: 1256 Columns can again contain percent signs within their names. .. change:: :tags: sql :tickets: sqlalchemy.sql.expression.Function is now a public class. It can be subclassed to provide user-defined SQL functions in an imperative style, including with pre-established behaviors. The postgis.py example illustrates one usage of this. .. change:: :tags: sql :tickets: PickleType now favors == comparison by default, if the incoming object (such as a dict) implements __eq__(). If the object does not implement __eq__() and mutable=True, a deprecation warning is raised. .. change:: :tags: sql :tickets: 1215 Fixed the import weirdness in sqlalchemy.sql to not export __names__. .. change:: :tags: sql :tickets: 1238 Using the same ForeignKey object repeatedly raises an error instead of silently failing later. .. change:: :tags: sql :tickets: Added NotImplementedError for params() method on Insert/Update/Delete constructs. These items currently don't support this functionality, which also would be a little misleading compared to values(). .. change:: :tags: sql :tickets: 650 Reflected foreign keys will properly locate their referenced column, even if the column was given a "key" attribute different from the reflected name. This is achieved via a new flag on ForeignKey/ForeignKeyConstraint called "link_to_name", if True means the given name is the referred-to column's name, not its assigned key. .. change:: :tags: sql :tickets: 1253 select() can accept a ClauseList as a column in the same way as a Table or other selectable and the interior expressions will be used as column elements. .. change:: :tags: sql :tickets: the "passive" flag on session.is_modified() is correctly propagated to the attribute manager. .. change:: :tags: sql :tickets: union() and union_all() will not whack any order_by() that has been applied to the select()s inside. If you union() a select() with order_by() (presumably to support LIMIT/OFFSET), you should also call self_group() on it to apply parenthesis. .. change:: :tags: engine/pool :tickets: 1246 Connection.invalidate() checks for closed status to avoid attribute errors. .. change:: :tags: engine/pool :tickets: 1094 NullPool supports reconnect on failure behavior. .. change:: :tags: engine/pool :tickets: 799 Added a mutex for the initial pool creation when using pool.manage(dbapi). This prevents a minor case of "dogpile" behavior which would otherwise occur upon a heavy load startup. .. change:: :tags: engine/pool :tickets: _execute_clauseelement() goes back to being a private method. Subclassing Connection is not needed now that ConnectionProxy is available. .. change:: :tags: documentation :tickets: 1149, 1200 Tickets. .. change:: :tags: documentation :tickets: Added note about create_session() defaults. .. change:: :tags: documentation :tickets: Added section about metadata.reflect(). .. change:: :tags: documentation :tickets: Updated `TypeDecorator` section. .. change:: :tags: documentation :tickets: Rewrote the "threadlocal" strategy section of the docs due to recent confusion over this feature. .. change:: :tags: documentation :tickets: Removed badly out of date 'polymorphic_fetch' and 'select_table' docs from inheritance, reworked the second half of "joined table inheritance". .. change:: :tags: documentation :tickets: Documented `comparator_factory` kwarg, added new doc section "Custom Comparators". .. change:: :tags: mssql :tickets: 1254 Refactored the Date/Time types. The ``smalldatetime`` data type no longer truncates to a date only, and will now be mapped to the MSSmallDateTime type. .. change:: :tags: mssql :tickets: Corrected an issue with Numerics to accept an int. .. change:: :tags: mssql :tickets: Mapped ``char_length`` to the ``LEN()`` function. .. change:: :tags: mssql :tickets: If an ``INSERT`` includes a subselect the ``INSERT`` is converted from an ``INSERT INTO VALUES`` construct to a ``INSERT INTO SELECT`` construct. .. change:: :tags: mssql :tickets: If the column is part of a ``primary_key`` it will be ``NOT NULL`` since MSSQL doesn't allow ``NULL`` in primary_key columns. .. change:: :tags: mssql :tickets: 1249 ``MSBinary`` now returns a ``BINARY`` instead of an ``IMAGE``. This is a backwards incompatible change in that ``BINARY`` is a fixed length data type whereas ``IMAGE`` is a variable length data type. .. change:: :tags: mssql :tickets: 1258 ``get_default_schema_name`` is now reflected from the database based on the user's default schema. This only works with MSSQL 2005 and later. .. change:: :tags: mssql :tickets: 1248 Added collation support through the use of a new collation argument. This is supported on the following types: char, nchar, varchar, nvarchar, text, ntext. .. change:: :tags: mssql :tickets: Changes to the connection string parameters favor DSN as the default specification for pyodbc. See the mssql.py docstring for detailed usage instructions. .. change:: :tags: mssql :tickets: Added experimental support of savepoints. It currently does not work fully with sessions. .. change:: :tags: mssql :tickets: 1243 Support for three levels of column nullability: NULL, NOT NULL, and the database's configured default. The default Column configuration (nullable=True) will now generate NULL in the DDL. Previously no specification was emitted and the database default would take effect (usually NULL, but not always). To explicitly request the database default, configure columns with nullable=None and no specification will be emitted in DDL. This is backwards incompatible behavior. .. change:: :tags: postgres :tickets: 1267 "%" signs in text() constructs are automatically escaped to "%%". Because of the backwards incompatible nature of this change, a warning is emitted if '%%' is detected in the string. .. change:: :tags: postgres :tickets: Calling alias.execute() in conjunction with server_side_cursors won't raise AttributeError. .. change:: :tags: postgres :tickets: 714 Added Index reflection support to PostgreSQL, using a great patch we long neglected, submitted by Ken Kuhlman. .. change:: :tags: oracle :tickets: Adjusted the format of create_xid() to repair two-phase commit. We now have field reports of Oracle two-phase commit working properly with this change. .. change:: :tags: oracle :tickets: 1233 Added OracleNVarchar type, produces NVARCHAR2, and also subclasses Unicode so that convert_unicode=True by default. NVARCHAR2 reflects into this type automatically so these columns pass unicode on a reflected table with no explicit convert_unicode=True flags. .. change:: :tags: oracle :tickets: 1265 Fixed bug which was preventing out params of certain types from being received; thanks a ton to huddlej at wwu.edu ! .. change:: :tags: mysql :tickets: "%" signs in text() constructs are automatically escaped to "%%". Because of the backwards incompatible nature of this change, a warning is emitted if '%%' is detected in the string. .. change:: :tags: mysql :tickets: 1241 Fixed bug in exception raise when FK columns not present during reflection. .. change:: :tags: mysql :tickets: Fixed bug involving reflection of a remote-schema table with a foreign key ref to another table in that schema. .. change:: :tags: associationproxy :tickets: The association proxy properties are make themselves available at the class level, e.g. MyClass.aproxy. Previously this evaluated to None. .. change:: :tags: declarative :tickets: The full list of arguments accepted as string by backref() includes 'primaryjoin', 'secondaryjoin', 'secondary', 'foreign_keys', 'remote_side', 'order_by'. .. changelog:: :version: 0.5.0rc4 :released: Fri Nov 14 2008 .. change:: :tags: orm :tickets: Query.count() has been enhanced to do the "right thing" in a wider variety of cases. It can now count multiple-entity queries, as well as column-based queries. Note that this means if you say query(A, B).count() without any joining criterion, it's going to count the cartesian product of A*B. Any query which is against column-based entities will automatically issue "SELECT count(1) FROM (SELECT...)" so that the real rowcount is returned, meaning a query such as query(func.count(A.name)).count() will return a value of one, since that query would return one row. .. change:: :tags: orm :tickets: Lots of performance tuning. A rough guesstimate over various ORM operations places it 10% faster over 0.5.0rc3, 25-30% over 0.4.8. .. change:: :tags: orm :tickets: bugfixes and behavioral changes .. change:: :tags: general :tickets: global "propigate"->"propagate" change. .. change:: :tags: orm :tickets: Adjustments to the enhanced garbage collection on InstanceState to better guard against errors due to lost state. .. change:: :tags: orm :tickets: 1220 Query.get() returns a more informative error message when executed against multiple entities. .. change:: :tags: orm :tickets: 1140, 1221 Restored NotImplementedError on Cls.relation.in_() .. change:: :tags: orm :tickets: 1226 Fixed PendingDeprecationWarning involving order_by parameter on relation(). .. change:: :tags: sql :tickets: Removed the 'properties' attribute of the Connection object, Connection.info should be used. .. change:: :tags: sql :tickets: Restored "active rowcount" fetch before ResultProxy autocloses the cursor. This was removed in 0.5rc3. .. change:: :tags: sql :tickets: Rearranged the `load_dialect_impl()` method in `TypeDecorator` such that it will take effect even if the user-defined `TypeDecorator` uses another `TypeDecorator` as its impl. .. change:: :tags: access :tickets: Added support for Currency type. .. change:: :tags: access :tickets: 1017 Functions were not return their result. .. change:: :tags: access :tickets: 1017 Corrected problem with joins. Access only support LEFT OUTER or INNER not just JOIN by itself. .. change:: :tags: mssql :tickets: Lots of cleanup and fixes to correct problems with limit and offset. .. change:: :tags: mssql :tickets: Correct situation where subqueries as part of a binary expression need to be translated to use the IN and NOT IN syntax. .. change:: :tags: mssql :tickets: 1216 Fixed E Notation issue that prevented the ability to insert decimal values less than 1E-6. .. change:: :tags: mssql :tickets: 1217 Corrected problems with reflection when dealing with schemas, particularly when those schemas are the default schema. .. change:: :tags: mssql :tickets: Corrected problem with casting a zero length item to a varchar. It now correctly adjusts the CAST. .. change:: :tags: ext :tickets: Can now use a custom "inherit_condition" in __mapper_args__ when using declarative. .. change:: :tags: ext :tickets: fixed string-based "remote_side", "order_by" and others not propagating correctly when used in backref(). .. changelog:: :version: 0.5.0rc3 :released: Fri Nov 07 2008 .. change:: :tags: orm :tickets: Added two new hooks to SessionExtension: after_bulk_delete() and after_bulk_update(). after_bulk_delete() is called after a bulk delete() operation on a query. after_bulk_update() is called after a bulk update() operation on a query. .. change:: :tags: sql :tickets: SQL compiler optimizations and complexity reduction. The call count for compiling a typical select() construct is 20% less versus 0.5.0rc2. .. change:: :tags: sql :tickets: 1211 Dialects can now generate label names of adjustable length. Pass in the argument "label_length=" to create_engine() to adjust how many characters max will be present in dynamically generated column labels, i.e. "somecolumn AS somelabel". Any value less than 6 will result in a label of minimal size, consisting of an underscore and a numeric counter. The compiler uses the value of dialect.max_identifier_length as a default. .. change:: :tags: ext :tickets: Added a new extension sqlalchemy.ext.serializer. Provides Serializer/Deserializer "classes" which mirror Pickle/Unpickle, as well as dumps() and loads(). This serializer implements an "external object" pickler which keeps key context-sensitive objects, including engines, sessions, metadata, Tables/Columns, and mappers, outside of the pickle stream, and can later restore the pickle using any engine/metadata/session provider. This is used not for pickling regular object instances, which are pickleable without any special logic, but for pickling expression objects and full Query objects, such that all mapper/engine/session dependencies can be restored at unpickle time. .. change:: :tags: oracle :tickets: Wrote a docstring for Oracle dialect. Apparently that Ohloh "few source code comments" label is starting to sting :). .. change:: :tags: oracle :tickets: 536 Removed FIRST_ROWS() optimize flag when using LIMIT/OFFSET, can be reenabled with optimize_limits=True create_engine() flag. .. change:: :tags: oracle :tickets: bugfixes and behavioral changes .. change:: :tags: orm :tickets: "not equals" comparisons of simple many-to-one relation to an instance will not drop into an EXISTS clause and will compare foreign key columns instead. .. change:: :tags: orm :tickets: Removed not-really-working use cases of comparing a collection to an iterable. Use contains() to test for collection membership. .. change:: :tags: orm :tickets: 1171 Improved the behavior of aliased() objects such that they more accurately adapt the expressions generated, which helps particularly with self-referential comparisons. .. change:: :tags: orm :tickets: Fixed bug involving primaryjoin/secondaryjoin conditions constructed from class-bound attributes (as often occurs when using declarative), which later would be inappropriately aliased by Query, particularly with the various EXISTS based comparators. .. change:: :tags: orm :tickets: Fixed bug when using multiple query.join() with an aliased-bound descriptor which would lose the left alias. .. change:: :tags: orm :tickets: Improved weakref identity map memory management to no longer require mutexing, resurrects garbage collected instance on a lazy basis for an InstanceState with pending changes. .. change:: :tags: orm :tickets: InstanceState object now removes circular references to itself upon disposal to keep it outside of cyclic garbage collection. .. change:: :tags: orm :tickets: relation() won't hide unrelated ForeignKey errors inside of the "please specify primaryjoin" message when determining join condition. .. change:: :tags: orm :tickets: 1218 Fixed bug in Query involving order_by() in conjunction with multiple aliases of the same class (will add tests in) .. change:: :tags: orm :tickets: When using Query.join() with an explicit clause for the ON clause, the clause will be aliased in terms of the left side of the join, allowing scenarios like query(Source). from_self().join((Dest, Source.id==Dest.source_id)) to work properly. .. change:: :tags: orm :tickets: polymorphic_union() function respects the "key" of each Column if they differ from the column's name. .. change:: :tags: orm :tickets: 1183 Repaired support for "passive-deletes" on a many-to-one relation() with "delete" cascade. .. change:: :tags: orm :tickets: 1213 Fixed bug in composite types which prevented a primary-key composite type from being mutated. .. change:: :tags: orm :tickets: 1202 Added more granularity to internal attribute access, such that cascade and flush operations will not initialize unloaded attributes and collections, leaving them intact for a lazy-load later on. Backref events still initialize attrbutes and collections for pending instances. .. change:: :tags: sql :tickets: 1212 Simplified the check for ResultProxy "autoclose without results" to be based solely on presence of cursor.description. All the regexp-based guessing about statements returning rows has been removed. .. change:: :tags: sql :tickets: 1194 Direct execution of a union() construct will properly set up result-row processing. .. change:: :tags: sql :tickets: The internal notion of an "OID" or "ROWID" column has been removed. It's basically not used by any dialect, and the possibility of its usage with psycopg2's cursor.lastrowid is basically gone now that INSERT..RETURNING is available. .. change:: :tags: sql :tickets: Removed "default_order_by()" method on all FromClause objects. .. change:: :tags: sql :tickets: Repaired the table.tometadata() method so that a passed-in schema argument is propagated to ForeignKey constructs. .. change:: :tags: sql :tickets: Slightly changed behavior of IN operator for comparing to empty collections. Now results in inequality comparison against self. More portable, but breaks with stored procedures that aren't pure functions. .. change:: :tags: oracle :tickets: Setting the auto_convert_lobs to False on create_engine() will also instruct the OracleBinary type to return the cx_oracle LOB object unchanged. .. change:: :tags: mysql :tickets: Fixed foreign key reflection in the edge case where a Table's explicit schema= is the same as the schema (database) the connection is attached to. .. change:: :tags: mysql :tickets: No longer expects include_columns in table reflection to be lower case. .. change:: :tags: ext :tickets: 1174 Fixed bug preventing declarative-bound "column" objects from being used in column_mapped_collection(). .. change:: :tags: misc :tickets: 1077 util.flatten_iterator() func doesn't interpret strings with __iter__() methods as iterators, such as in pypy. .. changelog:: :version: 0.5.0rc2 :released: Sun Oct 12 2008 .. change:: :tags: orm :tickets: Fixed bug involving read/write relation()s that contain literal or other non-column expressions within their primaryjoin condition equated to a foreign key column. .. change:: :tags: orm :tickets: "non-batch" mode in mapper(), a feature which allows mapper extension methods to be called as each instance is updated/inserted, now honors the insert order of the objects given. .. change:: :tags: orm :tickets: Fixed RLock-related bug in mapper which could deadlock upon reentrant mapper compile() calls, something that occurs when using declarative constructs inside of ForeignKey objects. .. change:: :tags: orm :tickets: ScopedSession.query_property now accepts a query_cls factory, overriding the session's configured query_cls. .. change:: :tags: orm :tickets: Fixed shared state bug interfering with ScopedSession.mapper's ability to apply default __init__ implementations on object subclasses. .. change:: :tags: orm :tickets: 1177 Fixed up slices on Query (i.e. query[x:y]) to work properly for zero length slices, slices with None on either end. .. change:: :tags: orm :tickets: Added an example illustrating Celko's "nested sets" as a SQLA mapping. .. change:: :tags: orm :tickets: contains_eager() with an alias argument works even when the alias is embedded in a SELECT, as when sent to the Query via query.select_from(). .. change:: :tags: orm :tickets: 1180 contains_eager() usage is now compatible with a Query that also contains a regular eager load and limit/offset, in that the columns are added to the Query-generated subquery. .. change:: :tags: orm :tickets: session.execute() will execute a Sequence object passed to it (regression from 0.4). .. change:: :tags: orm :tickets: Removed the "raiseerror" keyword argument from object_mapper() and class_mapper(). These functions raise in all cases if the given class/instance is not mapped. .. change:: :tags: orm :tickets: Fixed session.transaction.commit() on a autocommit=False session not starting a new transaction. .. change:: :tags: orm :tickets: Some adjustments to Session.identity_map's weak referencing behavior to reduce asynchronous GC side effects. .. change:: :tags: orm :tickets: 1182 Adjustment to Session's post-flush accounting of newly "clean" objects to better protect against operating on objects as they're asynchronously gc'ed. .. change:: :tags: sql :tickets: 1074 column.in_(someselect) can now be used as a columns-clause expression without the subquery bleeding into the FROM clause .. change:: :tags: sqlite :tickets: 968 Overhauled SQLite date/time bind/result processing to use regular expressions and format strings, rather than strptime/strftime, to generically support pre-1900 dates, dates with microseconds. .. change:: :tags: sqlite :tickets: String's (and Unicode's, UnicodeText's, etc.) convert_unicode logic disabled in the sqlite dialect, to adjust for pysqlite 2.5.0's new requirement that only Python unicode objects are accepted; http://itsystementwicklung.de/pipermail/list-pysqlite/2008-March/000018.html .. change:: :tags: mysql :tickets: Temporary tables are now reflectable. .. change:: :tags: oracle :tickets: 1187 Oracle will detect string-based statements which contain comments at the front before a SELECT as SELECT statements. .. changelog:: :version: 0.5.0rc1 :released: Thu Sep 11 2008 .. change:: :tags: orm :tickets: Query now has delete() and update(values) methods. This allows to perform bulk deletes/updates with the Query object. .. change:: :tags: orm :tickets: The RowTuple object returned by Query(\*cols) now features keynames which prefer mapped attribute names over column keys, column keys over column names, i.e. Query(Class.foo, Class.bar) will have names "foo" and "bar" even if those are not the names of the underlying Column objects. Direct Column objects such as Query(table.c.col) will return the "key" attribute of the Column. .. change:: :tags: orm :tickets: Added scalar() and value() methods to Query, each return a single scalar value. scalar() takes no arguments and is roughly equivalent to first()[0], value() takes a single column expression and is roughly equivalent to values(expr).next()[0]. .. change:: :tags: orm :tickets: Improved the determination of the FROM clause when placing SQL expressions in the query() list of entities. In particular scalar subqueries should not "leak" their inner FROM objects out into the enclosing query. .. change:: :tags: orm :tickets: Joins along a relation() from a mapped class to a mapped subclass, where the mapped subclass is configured with single table inheritance, will include an IN clause which limits the subtypes of the joined class to those requested, within the ON clause of the join. This takes effect for eager load joins as well as query.join(). Note that in some scenarios the IN clause will appear in the WHERE clause of the query as well since this discrimination has multiple trigger points. .. change:: :tags: orm :tickets: AttributeExtension has been refined such that the event is fired before the mutation actually occurs. Additionally, the append() and set() methods must now return the given value, which is used as the value to be used in the mutation operation. This allows creation of validating AttributeListeners which raise before the action actually occurs, and which can change the given value into something else before its used. .. change:: :tags: orm :tickets: column_property(), composite_property(), and relation() now accept a single or list of AttributeExtensions using the "extension" keyword argument. .. change:: :tags: orm :tickets: query.order_by().get() silently drops the "ORDER BY" from the query issued by GET but does not raise an exception. .. change:: :tags: orm :tickets: Added a Validator AttributeExtension, as well as a @validates decorator which is used in a similar fashion as @reconstructor, and marks a method as validating one or more mapped attributes. .. change:: :tags: orm :tickets: 1140 class.someprop.in_() raises NotImplementedError pending the implementation of "in\_" for relation .. change:: :tags: orm :tickets: 1127 Fixed primary key update for many-to-many collections where the collection had not been loaded yet .. change:: :tags: orm :tickets: Fixed bug whereby deferred() columns with a group in conjunction with an otherwise unrelated synonym() would produce an AttributeError during deferred load. .. change:: :tags: orm :tickets: 1128 The before_flush() hook on SessionExtension takes place before the list of new/dirty/deleted is calculated for the final time, allowing routines within before_flush() to further change the state of the Session before the flush proceeds. .. change:: :tags: orm :tickets: The "extension" argument to Session and others can now optionally be a list, supporting events sent to multiple SessionExtension instances. Session places SessionExtensions in Session.extensions. .. change:: :tags: orm :tickets: Reentrant calls to flush() raise an error. This also serves as a rudimentary, but not foolproof, check against concurrent calls to Session.flush(). .. change:: :tags: orm :tickets: Improved the behavior of query.join() when joining to joined-table inheritance subclasses, using explicit join criteria (i.e. not on a relation). .. change:: :tags: orm :tickets: @orm.attributes.reconstitute and MapperExtension.reconstitute have been renamed to @orm.reconstructor and MapperExtension.reconstruct_instance .. change:: :tags: orm :tickets: 1129 Fixed @reconstructor hook for subclasses which inherit from a base class. .. change:: :tags: orm :tickets: 1132 The composite() property type now supports a __set_composite_values__() method on the composite class which is required if the class represents state using attribute names other than the column's keynames; default-generated values now get populated properly upon flush. Also, composites with attributes set to None compare correctly. .. change:: :tags: orm :tickets: The 3-tuple of iterables returned by attributes.get_history() may now be a mix of lists and tuples. (Previously members were always lists.) .. change:: :tags: orm :tickets: 1151 Fixed bug whereby changing a primary key attribute on an entity where the attribute's previous value had been expired would produce an error upon flush(). .. change:: :tags: orm :tickets: Fixed custom instrumentation bug whereby get_instance_dict() was not called for newly constructed instances not loaded by the ORM. .. change:: :tags: orm :tickets: 1150 Session.delete() adds the given object to the session if not already present. This was a regression bug from 0.4. .. change:: :tags: orm :tickets: The `echo_uow` flag on `Session` is deprecated, and unit-of-work logging is now application-level only, not per-session level. .. change:: :tags: orm :tickets: 1153 Removed conflicting `contains()` operator from `InstrumentedAttribute` which didn't accept `escape` kwaarg. .. change:: :tags: declarative :tickets: 1161 Fixed bug whereby mapper couldn't initialize if a composite primary key referenced another table that was not defined yet. .. change:: :tags: declarative :tickets: Fixed exception throw which would occur when string-based primaryjoin condition was used in conjunction with backref. .. change:: :tags: schema :tickets: 1033 Added "sorted_tables" accessor to MetaData, which returns Table objects sorted in order of dependency as a list. This deprecates the MetaData.table_iterator() method. The "reverse=False" keyword argument has also been removed from util.sort_tables(); use the Python 'reversed' function to reverse the results. .. change:: :tags: schema :tickets: The 'length' argument to all Numeric types has been renamed to 'scale'. 'length' is deprecated and is still accepted with a warning. .. change:: :tags: schema :tickets: Dropped 0.3-compatibility for user defined types (convert_result_value, convert_bind_param). .. change:: :tags: sql :tickets: 1068 Temporarily rolled back the "ORDER BY" enhancement from. This feature is on hold pending further development. .. change:: :tags: sql :tickets: The exists() construct won't "export" its contained list of elements as FROM clauses, allowing them to be used more effectively in the columns clause of a SELECT. .. change:: :tags: sql :tickets: 798 and_() and or_() now generate a ColumnElement, allowing boolean expressions as result columns, i.e. select([and_(1, 0)]). .. change:: :tags: sql :tickets: Bind params now subclass ColumnElement which allows them to be selectable by orm.query (they already had most ColumnElement semantics). .. change:: :tags: sql :tickets: Added select_from() method to exists() construct, which becomes more and more compatible with a regular select(). .. change:: :tags: sql :tickets: 1160 Added func.min(), func.max(), func.sum() as "generic functions", which basically allows for their return type to be determined automatically. Helps with dates on SQLite, decimal types, others. .. change:: :tags: sql :tickets: added decimal.Decimal as an "auto-detect" type; bind parameters and generic functions will set their type to Numeric when a Decimal is used. .. change:: :tags: mysql :tickets: The 'length' argument to MSInteger, MSBigInteger, MSTinyInteger, MSSmallInteger and MSYear has been renamed to 'display_width'. .. change:: :tags: mysql :tickets: 1146 Added MSMediumInteger type. .. change:: :tags: mysql :tickets: the function func.utc_timestamp() compiles to UTC_TIMESTAMP, without the parenthesis, which seem to get in the way when using in conjunction with executemany(). .. change:: :tags: oracle :tickets: 536 limit/offset no longer uses ROW NUMBER OVER to limit rows, and instead uses subqueries in conjunction with a special Oracle optimization comment. Allows LIMIT/OFFSET to work in conjunction with DISTINCT. .. change:: :tags: oracle :tickets: 1155 has_sequence() now takes the current "schema" argument into account .. change:: :tags: oracle :tickets: 1121 added BFILE to reflected type names .. changelog:: :version: 0.5.0beta3 :released: Mon Aug 04 2008 .. change:: :tags: orm :tickets: The "entity_name" feature of SQLAlchemy mappers has been removed. For rationale, see http://tinyurl.com/6nm2ne .. change:: :tags: orm :tickets: the "autoexpire" flag on Session, sessionmaker(), and scoped_session() has been renamed to "expire_on_commit". It does not affect the expiration behavior of rollback(). .. change:: :tags: orm :tickets: fixed endless loop bug which could occur within a mapper's deferred load of inherited attributes. .. change:: :tags: orm :tickets: a legacy-support flag "_enable_transaction_accounting" flag added to Session which when False, disables all transaction-level object accounting, including expire on rollback, expire on commit, new/deleted list maintenance, and autoflush on begin. .. change:: :tags: orm :tickets: The 'cascade' parameter to relation() accepts None as a value, which is equivalent to no cascades. .. change:: :tags: orm :tickets: A critical fix to dynamic relations allows the "modified" history to be properly cleared after a flush(). .. change:: :tags: orm :tickets: user-defined @properties on a class are detected and left in place during mapper initialization. This means that a table-bound column of the same name will not be mapped at all if a @property is in the way (and the column is not remapped to a different name), nor will an instrumented attribute from an inherited class be applied. The same rules apply for names excluded using the include_properties/exclude_properties collections. .. change:: :tags: orm :tickets: Added a new SessionExtension hook called after_attach(). This is called at the point of attachment for objects via add(), add_all(), delete(), and merge(). .. change:: :tags: orm :tickets: 1111 A mapper which inherits from another, when inheriting the columns of its inherited mapper, will use any reassigned property names specified in that inheriting mapper. Previously, if "Base" had reassigned "base_id" to the name "id", "SubBase(Base)" would still get an attribute called "base_id". This could be worked around by explicitly stating the column in each submapper as well but this is fairly unworkable and also impossible when using declarative. .. change:: :tags: orm :tickets: Fixed a series of potential race conditions in Session whereby asynchronous GC could remove unmodified, no longer referenced items from the session as they were present in a list of items to be processed, typically during session.expunge_all() and dependent methods. .. change:: :tags: orm :tickets: Some improvements to the _CompileOnAttr mechanism which should reduce the probability of "Attribute x was not replaced during compile" warnings. (this generally applies to SQLA hackers, like Elixir devs). .. change:: :tags: orm :tickets: Fixed bug whereby the "unsaved, pending instance" FlushError raised for a pending orphan would not take superclass mappers into account when generating the list of relations responsible for the error. .. change:: :tags: sql :tickets: func.count() with no arguments renders as COUNT(*), equivalent to func.count(text('*')). .. change:: :tags: sql :tickets: 1068 simple label names in ORDER BY expressions render as themselves, and not as a re-statement of their corresponding expression. This feature is currently enabled only for SQLite, MySQL, and PostgreSQL. It can be enabled on other dialects as each is shown to support this behavior. .. change:: :tags: ext :tickets: Class-bound attributes sent as arguments to relation()'s remote_side and foreign_keys parameters are now accepted, allowing them to be used with declarative. Additionally fixed bugs involving order_by being specified as a class-bound attribute in conjunction with eager loading. .. change:: :tags: ext :tickets: declarative initialization of Columns adjusted so that non-renamed columns initialize in the same way as a non declarative mapper. This allows an inheriting mapper to set up its same-named "id" columns in particular such that the parent "id" column is favored over the child column, reducing database round trips when this value is requested. .. change:: :tags: mysql :tickets: 1110 Quoting of MSEnum values for use in CREATE TABLE is now optional & will be quoted on demand as required. (Quoting was always optional for use with existing tables.) .. changelog:: :version: 0.5.0beta2 :released: Mon Jul 14 2008 .. change:: :tags: orm :tickets: 870 In addition to expired attributes, deferred attributes also load if their data is present in the result set. .. change:: :tags: orm :tickets: session.refresh() raises an informative error message if the list of attributes does not include any column-based attributes. .. change:: :tags: orm :tickets: query() raises an informative error message if no columns or mappers are specified. .. change:: :tags: orm :tickets: lazy loaders now trigger autoflush before proceeding. This allows expire() of a collection or scalar relation to function properly in the context of autoflush. .. change:: :tags: orm :tickets: 887 column_property() attributes which represent SQL expressions or columns that are not present in the mapped tables (such as those from views) are automatically expired after an INSERT or UPDATE, assuming they have not been locally modified, so that they are refreshed with the most recent data upon access. .. change:: :tags: orm :tickets: 1082 Fixed explicit, self-referential joins between two joined-table inheritance mappers when using query.join(cls, aliased=True). .. change:: :tags: orm :tickets: Fixed query.join() when used in conjunction with a columns-only clause and an SQL-expression ON clause in the join. .. change:: :tags: orm :tickets: The "allow_column_override" flag from mapper() has been removed. This flag is virtually always misunderstood. Its specific functionality is available via the include_properties/exclude_properties mapper arguments. .. change:: :tags: orm :tickets: 1066 Repaired `__str__()` method on Query. .. change:: :tags: orm :tickets: Session.bind gets used as a default even when table/mapper specific binds are defined. .. change:: :tags: schema :tickets: 1075 Added prefixes option to `Table` that accepts a list of strings to insert after CREATE in the CREATE TABLE statement. .. change:: :tags: schema :tickets: Unicode, UnicodeText types now set "assert_unicode" and "convert_unicode" by default, but accept overriding \**kwargs for these values. .. change:: :tags: sql :tickets: Added new match() operator that performs a full-text search. Supported on PostgreSQL, SQLite, MySQL, MS-SQL, and Oracle backends. .. change:: :tags: sqlite :tickets: 1090 Modified SQLite's representation of "microseconds" to match the output of str(somedatetime), i.e. in that the microseconds are represented as fractional seconds in string format. This makes SQLA's SQLite date type compatible with datetimes that were saved directly using Pysqlite (which just calls str()). Note that this is incompatible with the existing microseconds values in a SQLA 0.4 generated SQLite database file. To get the old behavior globally: from sqlalchemy.databases.sqlite import DateTimeMixin DateTimeMixin.__legacy_microseconds__ = True To get the behavior on individual DateTime types: t = sqlite.SLDateTime() t.__legacy_microseconds__ = True Then use "t" as the type on the Column. .. change:: :tags: sqlite :tickets: SQLite Date, DateTime, and Time types only accept Python datetime objects now, not strings. If you'd like to format dates as strings yourself with SQLite, use a String type. If you'd like them to return datetime objects anyway despite their accepting strings as input, make a TypeDecorator around String - SQLA doesn't encourage this pattern. .. change:: :tags: extensions :tickets: 1096 Declarative supports a __table_args__ class variable, which is either a dictionary, or tuple of the form (arg1, arg2, ..., {kwarg1:value, ...}) which contains positional + kw arguments to be passed to the Table constructor. .. changelog:: :version: 0.5.0beta1 :released: Thu Jun 12 2008 .. change:: :tags: :tickets: The "__init__" trigger/decorator added by mapper now attempts to exactly mirror the argument signature of the original __init__. The pass-through for '_sa_session' is no longer implicit- you must allow for this keyword argument in your constructor. .. change:: :tags: :tickets: ClassState is renamed to ClassManager. .. change:: :tags: :tickets: Classes may supply their own InstrumentationManager by providing a __sa_instrumentation_manager__ property. .. change:: :tags: :tickets: Custom instrumentation may use any mechanism to associate a ClassManager with a class and an InstanceState with an instance. Attributes on those objects are still the default association mechanism used by SQLAlchemy's native instrumentation. .. change:: :tags: :tickets: Moved entity_name, _sa_session_id, and _instance_key from the instance object to the instance state. These values are still available in the old way, which is now deprecated, using descriptors attached to the class. A deprecation warning will be issued when accessed. .. change:: :tags: :tickets: The _prepare_instrumentation alias for prepare_instrumentation has been removed. .. change:: :tags: :tickets: sqlalchemy.exceptions has been renamed to sqlalchemy.exc. The module may be imported under either name. .. change:: :tags: :tickets: ORM-related exceptions are now defined in sqlalchemy.orm.exc. ConcurrentModificationError, FlushError, and UnmappedColumnError compatibility aliases are installed in sqlalchemy.exc during the import of sqlalchemy.orm. .. change:: :tags: :tickets: sqlalchemy.logging has been renamed to sqlalchemy.log. .. change:: :tags: :tickets: The transitional sqlalchemy.log.SADeprecationWarning alias for the warning's definition in sqlalchemy.exc has been removed. .. change:: :tags: :tickets: exc.AssertionError has been removed and usage replaced with Python's built-in AssertionError. .. change:: :tags: :tickets: The behavior of MapperExtensions attached to multiple, entity_name= primary mappers for a single class has been altered. The first mapper() defined for a class is the only mapper eligible for the MapperExtension 'instrument_class', 'init_instance' and 'init_failed' events. This is backwards incompatible; previously the extensions of last mapper defined would receive these events. .. change:: :tags: firebird :tickets: Added support for returning values from inserts (2.0+ only), updates and deletes (2.1+ only). .. change:: :tags: general :tickets: global "propigate"->"propagate" change. .. change:: :tags: orm :tickets: polymorphic_union() function respects the "key" of each Column if they differ from the column's name. .. change:: :tags: orm :tickets: 1199 Fixed 0.4-only bug preventing composite columns from working properly with inheriting mappers .. change:: :tags: orm :tickets: Fixed RLock-related bug in mapper which could deadlock upon reentrant mapper compile() calls, something that occurs when using declarative constructs inside of ForeignKey objects. Ported from 0.5. .. change:: :tags: orm :tickets: 1213 Fixed bug in composite types which prevented a primary-key composite type from being mutated. .. change:: :tags: orm :tickets: 976 Added ScopedSession.is_active accessor. .. change:: :tags: orm :tickets: 939 Class-bound accessor can be used as the argument to relation() order_by. .. change:: :tags: orm :tickets: 1072 Fixed shard_id argument on ShardedSession.execute(). .. change:: :tags: sql :tickets: 1246 Connection.invalidate() checks for closed status to avoid attribute errors. .. change:: :tags: sql :tickets: 1094 NullPool supports reconnect on failure behavior. .. change:: :tags: sql :tickets: 1299 The per-dialect cache used by TypeEngine to cache dialect-specific types is now a WeakKeyDictionary. This to prevent dialect objects from being referenced forever for an application that creates an arbitrarily large number of engines or dialects. There is a small performance penalty which will be resolved in 0.6. .. change:: :tags: sql :tickets: Fixed SQLite reflection methods so that non-present cursor.description, which triggers an auto-cursor close, will be detected so that no results doesn't fail on recent versions of pysqlite which raise an error when fetchone() called with no rows present. .. change:: :tags: postgres :tickets: 714 Added Index reflection support to Postgres, using a great patch we long neglected, submitted by Ken Kuhlman. .. change:: :tags: mysql :tickets: 1241 Fixed bug in exception raise when FK columns not present during reflection. .. change:: :tags: oracle :tickets: 1265 Fixed bug which was preventing out params of certain types from being received; thanks a ton to huddlej at wwu.edu ! SQLAlchemy-0.8.4/doc/_sources/changelog/changelog_06.txt0000644000076500000240000051414512251147171023557 0ustar classicstaff00000000000000 ============== 0.6 Changelog ============== .. changelog:: :version: 0.6.9 :released: Sat May 05 2012 .. change:: :tags: general :tickets: 2279 Adjusted the "importlater" mechanism, which is used internally to resolve import cycles, such that the usage of __import__ is completed when the import of sqlalchemy or sqlalchemy.orm is done, thereby avoiding any usage of __import__ after the application starts new threads, fixes. .. change:: :tags: orm :tickets: 2197 Fixed bug whereby the source clause used by query.join() would be inconsistent if against a column expression that combined multiple entities together. .. change:: :tags: orm, bug :tickets: 2310 fixed inappropriate evaluation of user-mapped object in a boolean context within query.get(). .. change:: :tags: orm :tickets: 2228 Fixed bug apparent only in Python 3 whereby sorting of persistent + pending objects during flush would produce an illegal comparison, if the persistent object primary key is not a single integer. .. change:: :tags: orm :tickets: 2234 Fixed bug where query.join() + aliased=True from a joined-inh structure to itself on relationship() with join condition on the child table would convert the lead entity into the joined one inappropriately. .. change:: :tags: orm :tickets: 2287 Fixed bug whereby mapper.order_by attribute would be ignored in the "inner" query within a subquery eager load. . .. change:: :tags: orm :tickets: 2215 Fixed bug whereby if a mapped class redefined __hash__() or __eq__() to something non-standard, which is a supported use case as SQLA should never consult these, the methods would be consulted if the class was part of a "composite" (i.e. non-single-entity) result set. .. change:: :tags: orm :tickets: 2188 Fixed subtle bug that caused SQL to blow up if: column_property() against subquery + joinedload + LIMIT + order by the column property() occurred. . .. change:: :tags: orm :tickets: 2207 The join condition produced by with_parent as well as when using a "dynamic" relationship against a parent will generate unique bindparams, rather than incorrectly repeating the same bindparam. . .. change:: :tags: orm :tickets: 2199 Repaired the "no statement condition" assertion in Query which would attempt to raise if a generative method were called after from_statement() were called.. .. change:: :tags: orm :tickets: 1776 Cls.column.collate("some collation") now works. .. change:: :tags: orm, bug :tickets: 2297 Fixed the error formatting raised when a tuple is inadvertently passed to session.query(). .. change:: :tags: engine :tickets: 2317 Backported the fix for introduced in 0.7.4, which ensures that the connection is in a valid state before attempting to call rollback()/prepare()/release() on savepoint and two-phase transactions. .. change:: :tags: sql :tickets: 2188 Fixed two subtle bugs involving column correspondence in a selectable, one with the same labeled subquery repeated, the other when the label has been "grouped" and loses itself. Affects. .. change:: :tags: sql :tickets: Fixed bug whereby "warn on unicode" flag would get set for the String type when used with certain dialects. This bug is not in 0.7. .. change:: :tags: sql :tickets: 2270 Fixed bug whereby with_only_columns() method of Select would fail if a selectable were passed.. However, the FROM behavior is still incorrect here, so you need 0.7 in any case for this use case to be usable. .. change:: :tags: schema :tickets: Added an informative error message when ForeignKeyConstraint refers to a column name in the parent that is not found. .. change:: :tags: postgresql :tickets: 2291, 2141 Fixed bug related to whereby the same modified index behavior in PG 9 affected primary key reflection on a renamed column.. .. change:: :tags: mysql :tickets: 2186 Fixed OurSQL dialect to use ansi-neutral quote symbol "'" for XA commands instead of '"'. . .. change:: :tags: mysql :tickets: 2225 a CREATE TABLE will put the COLLATE option after CHARSET, which appears to be part of MySQL's arbitrary rules regarding if it will actually work or not. .. change:: :tags: mssql, bug :tickets: 2269 Decode incoming values when retrieving list of index names and the names of columns within those indexes. .. change:: :tags: oracle :tickets: 2200 Added ORA-00028 to disconnect codes, use cx_oracle _Error.code to get at the code,. .. change:: :tags: oracle :tickets: 2220 repaired the oracle.RAW type which did not generate the correct DDL. .. change:: :tags: oracle :tickets: 2212 added CURRENT to reserved word list. .. change:: :tags: examples :tickets: 2266 Adjusted dictlike-polymorphic.py example to apply the CAST such that it works on PG, other databases. .. changelog:: :version: 0.6.8 :released: Sun Jun 05 2011 .. change:: :tags: orm :tickets: 2144 Calling query.get() against a column-based entity is invalid, this condition now raises a deprecation warning. .. change:: :tags: orm :tickets: 2151 a non_primary mapper will inherit the _identity_class of the primary mapper. This so that a non_primary established against a class that's normally in an inheritance mapping will produce results that are identity-map compatible with that of the primary mapper .. change:: :tags: orm :tickets: 2148 Backported 0.7's identity map implementation, which does not use a mutex around removal. This as some users were still getting deadlocks despite the adjustments in 0.6.7; the 0.7 approach that doesn't use a mutex does not appear to produce "dictionary changed size" issues, the original rationale for the mutex. .. change:: :tags: orm :tickets: 2163 Fixed the error message emitted for "can't execute syncrule for destination column 'q'; mapper 'X' does not map this column" to reference the correct mapper. . .. change:: :tags: orm :tickets: 2149 Fixed bug where determination of "self referential" relationship would fail with no workaround for joined-inh subclass related to itself, or joined-inh subclass related to a subclass of that with no cols in the sub-sub class in the join condition. .. change:: :tags: orm :tickets: 2153 mapper() will ignore non-configured foreign keys to unrelated tables when determining inherit condition between parent and child class. This is equivalent to behavior already applied to declarative. Note that 0.7 has a more comprehensive solution to this, altering how join() itself determines an FK error. .. change:: :tags: orm :tickets: 2171 Fixed bug whereby mapper mapped to an anonymous alias would fail if logging were used, due to unescaped % sign in the alias name. .. change:: :tags: orm :tickets: 2170 Modify the text of the message which occurs when the "identity" key isn't detected on flush, to include the common cause that the Column isn't set up to detect auto-increment correctly;. .. change:: :tags: orm :tickets: 2182 Fixed bug where transaction-level "deleted" collection wouldn't be cleared of expunged states, raising an error if they later became transient. .. change:: :tags: sql :tickets: 2147 Fixed bug whereby if FetchedValue was passed to column server_onupdate, it would not have its parent "column" assigned, added test coverage for all column default assignment patterns. .. change:: :tags: sql :tickets: 2167 Fixed bug whereby nesting a label of a select() with another label in it would produce incorrect exported columns. Among other things this would break an ORM column_property() mapping against another column_property(). . .. change:: :tags: engine :tickets: 2178 Adjusted the __contains__() method of a RowProxy result row such that no exception throw is generated internally; NoSuchColumnError() also will generate its message regardless of whether or not the column construct can be coerced to a string.. .. change:: :tags: postgresql :tickets: 2141 Fixed bug affecting PG 9 whereby index reflection would fail if against a column whose name had changed. . .. change:: :tags: postgresql :tickets: 2175 Some unit test fixes regarding numeric arrays, MATCH operator. A potential floating-point inaccuracy issue was fixed, and certain tests of the MATCH operator only execute within an EN-oriented locale for now. . .. change:: :tags: mssql :tickets: 2169 Fixed bug in MSSQL dialect whereby the aliasing applied to a schema-qualified table would leak into enclosing select statements. .. change:: :tags: mssql :tickets: 2159 Fixed bug whereby DATETIME2 type would fail on the "adapt" step when used in result sets or bound parameters. This issue is not in 0.7. .. changelog:: :version: 0.6.7 :released: Wed Apr 13 2011 .. change:: :tags: orm :tickets: 2087 Tightened the iterate vs. remove mutex around the identity map iteration, attempting to reduce the chance of an (extremely rare) reentrant gc operation causing a deadlock. Might remove the mutex in 0.7. .. change:: :tags: orm :tickets: 2030 Added a `name` argument to `Query.subquery()`, to allow a fixed name to be assigned to the alias object. .. change:: :tags: orm :tickets: 2019 A warning is emitted when a joined-table inheriting mapper has no primary keys on the locally mapped table (but has pks on the superclass table). .. change:: :tags: orm :tickets: 2038 Fixed bug where "middle" class in a polymorphic hierarchy would have no 'polymorphic_on' column if it didn't also specify a 'polymorphic_identity', leading to strange errors upon refresh, wrong class loaded when querying from that target. Also emits the correct WHERE criterion when using single table inheritance. .. change:: :tags: orm :tickets: 1995 Fixed bug where a column with a SQL or server side default that was excluded from a mapping with include_properties or exclude_properties would result in UnmappedColumnError. .. change:: :tags: orm :tickets: 2046 A warning is emitted in the unusual case that an append or similar event on a collection occurs after the parent object has been dereferenced, which prevents the parent from being marked as "dirty" in the session. This will be an exception in 0.7. .. change:: :tags: orm :tickets: 2098 Fixed bug in query.options() whereby a path applied to a lazyload using string keys could overlap a same named attribute on the wrong entity. Note 0.7 has an updated version of this fix. .. change:: :tags: orm :tickets: 2063 Reworded the exception raised when a flush is attempted of a subclass that is not polymorphic against the supertype. .. change:: :tags: orm :tickets: 2123 Some fixes to the state handling regarding backrefs, typically when autoflush=False, where the back-referenced collection wouldn't properly handle add/removes with no net change. Thanks to Richard Murri for the test case + patch. .. change:: :tags: orm :tickets: 2130 a "having" clause would be copied from the inside to the outside query if from_self() were used.. .. change:: :tags: sql :tickets: 2028 Column.copy(), as used in table.tometadata(), copies the 'doc' attribute. .. change:: :tags: sql :tickets: 2023 Added some defs to the resultproxy.c extension so that the extension compiles and runs on Python 2.4. .. change:: :tags: sql :tickets: 2042 The compiler extension now supports overriding the default compilation of expression._BindParamClause including that the auto-generated binds within the VALUES/SET clause of an insert()/update() statement will also use the new compilation rules. .. change:: :tags: sql :tickets: 2089 Added accessors to ResultProxy "returns_rows", "is_insert" .. change:: :tags: sql :tickets: 2116 The limit/offset keywords to select() as well as the value passed to select.limit()/offset() will be coerced to integer. .. change:: :tags: engine :tickets: 2102 Fixed bug in QueuePool, SingletonThreadPool whereby connections that were discarded via overflow or periodic cleanup() were not explicitly closed, leaving garbage collection to the task instead. This generally only affects non-reference-counting backends like Jython and Pypy. Thanks to Jaimy Azle for spotting this. .. change:: :tags: sqlite :tickets: 2115 Fixed bug where reflection of foreign key created as "REFERENCES " without col name would fail. .. change:: :tags: postgresql :tickets: 1083 When explicit sequence execution derives the name of the auto-generated sequence of a SERIAL column, which currently only occurs if implicit_returning=False, now accommodates if the table + column name is greater than 63 characters using the same logic Postgresql uses. .. change:: :tags: postgresql :tickets: 2044 Added an additional libpq message to the list of "disconnect" exceptions, "could not receive data from server" .. change:: :tags: postgresql :tickets: 2092 Added RESERVED_WORDS for postgresql dialect. .. change:: :tags: postgresql :tickets: 2073 Fixed the BIT type to allow a "length" parameter, "varying" parameter. Reflection also fixed. .. change:: :tags: informix :tickets: 2092 Added RESERVED_WORDS informix dialect. .. change:: :tags: mssql :tickets: 2071 Rewrote the query used to get the definition of a view, typically when using the Inspector interface, to use sys.sql_modules instead of the information schema, thereby allowing views definitions longer than 4000 characters to be fully returned. .. change:: :tags: mysql :tickets: 2047 oursql dialect accepts the same "ssl" arguments in create_engine() as that of MySQLdb. .. change:: :tags: firebird :tickets: 2083 The "implicit_returning" flag on create_engine() is honored if set to False. .. change:: :tags: oracle :tickets: 2100 Using column names that would require quotes for the column itself or for a name-generated bind parameter, such as names with special characters, underscores, non-ascii characters, now properly translate bind parameter keys when talking to cx_oracle. .. change:: :tags: oracle :tickets: 2116 Oracle dialect adds use_binds_for_limits=False create_engine() flag, will render the LIMIT/OFFSET values inline instead of as binds, reported to modify the execution plan used by Oracle. .. change:: :tags: ext :tickets: 2090 The horizontal_shard ShardedSession class accepts the common Session argument "query_cls" as a constructor argument, to enable further subclassing of ShardedQuery. .. change:: :tags: declarative :tickets: 2050 Added an explicit check for the case that the name 'metadata' is used for a column attribute on a declarative class. .. change:: :tags: declarative :tickets: 2061 Fix error message referencing old @classproperty name to reference @declared_attr .. change:: :tags: declarative :tickets: 2091 Arguments in __mapper_args__ that aren't "hashable" aren't mistaken for always-hashable, possibly-column arguments. .. change:: :tags: documentation :tickets: 2029 Documented SQLite DATE/TIME/DATETIME types. .. change:: :tags: examples :tickets: 2090 The Beaker caching example allows a "query_cls" argument to the query_callable() function. .. changelog:: :version: 0.6.6 :released: Sat Jan 08 2011 .. change:: :tags: orm :tickets: Fixed bug whereby a non-"mutable" attribute modified event which occurred on an object that was clean except for preceding mutable attribute changes would fail to strongly reference itself in the identity map. This would cause the object to be garbage collected, losing track of any changes that weren't previously saved in the "mutable changes" dictionary. .. change:: :tags: orm :tickets: 2013 Fixed bug whereby "passive_deletes='all'" wasn't passing the correct symbols to lazy loaders during flush, thereby causing an unwarranted load. .. change:: :tags: orm :tickets: 1997 Fixed bug which prevented composite mapped attributes from being used on a mapped select statement.. Note the workings of composite are slated to change significantly in 0.7. .. change:: :tags: orm :tickets: 1976 active_history flag also added to composite(). The flag has no effect in 0.6, but is instead a placeholder flag for forwards compatibility, as it applies in 0.7 for composites. .. change:: :tags: orm :tickets: 2002 Fixed uow bug whereby expired objects passed to Session.delete() would not have unloaded references or collections taken into account when deleting objects, despite passive_deletes remaining at its default of False. .. change:: :tags: orm :tickets: 1987 A warning is emitted when version_id_col is specified on an inheriting mapper when the inherited mapper already has one, if those column expressions are not the same. .. change:: :tags: orm :tickets: 1954 "innerjoin" flag doesn't take effect along the chain of joinedload() joins if a previous join in that chain is an outer join, thus allowing primary rows without a referenced child row to be correctly returned in results. .. change:: :tags: orm :tickets: 1964 Fixed bug regarding "subqueryload" strategy whereby strategy would fail if the entity was an aliased() construct. .. change:: :tags: orm :tickets: 2014 Fixed bug regarding "subqueryload" strategy whereby the join would fail if using a multi-level load of the form from A->joined-subclass->C .. change:: :tags: orm :tickets: 1968 Fixed indexing of Query objects by -1. It was erroneously transformed to the empty slice -1:0 that resulted in IndexError. .. change:: :tags: orm :tickets: 1971 The mapper argument "primary_key" can be passed as a single column as well as a list or tuple. The documentation examples that illustrated it as a scalar value have been changed to lists. .. change:: :tags: orm :tickets: 1961 Added active_history flag to relationship() and column_property(), forces attribute events to always load the "old" value, so that it's available to attributes.get_history(). .. change:: :tags: orm :tickets: 1977 Query.get() will raise if the number of params in a composite key is too large, as well as too small. .. change:: :tags: orm :tickets: 1992 Backport of "optimized get" fix from 0.7, improves the generation of joined-inheritance "load expired row" behavior. .. change:: :tags: orm :tickets: A little more verbiage to the "primaryjoin" error, in an unusual condition that the join condition "works" for viewonly but doesn't work for non-viewonly, and foreign_keys wasn't used - adds "foreign_keys" to the suggestion. Also add "foreign_keys" to the suggestion for the generic "direction" error. .. change:: :tags: sql :tickets: 1984 Fixed operator precedence rules for multiple chains of a single non-associative operator. I.e. "x - (y - z)" will compile as "x - (y - z)" and not "x - y - z". Also works with labels, i.e. "x - (y - z).label('foo')" .. change:: :tags: sql :tickets: 1967 The 'info' attribute of Column is copied during Column.copy(), i.e. as occurs when using columns in declarative mixins. .. change:: :tags: sql :tickets: Added a bind processor for booleans which coerces to int, for DBAPIs such as pymssql that naively call str() on values. .. change:: :tags: sql :tickets: 2000 CheckConstraint will copy its 'initially', 'deferrable', and '_create_rule' attributes within a copy()/tometadata() .. change:: :tags: engine :tickets: The "unicode warning" against non-unicode bind data is now raised only when the Unicode type is used explictly; not when convert_unicode=True is used on the engine or String type. .. change:: :tags: engine :tickets: 1978 Fixed memory leak in C version of Decimal result processor. .. change:: :tags: engine :tickets: 1871 Implemented sequence check capability for the C version of RowProxy, as well as 2.7 style "collections.Sequence" registration for RowProxy. .. change:: :tags: engine :tickets: 1998 Threadlocal engine methods rollback(), commit(), prepare() won't raise if no transaction is in progress; this was a regression introduced in 0.6. .. change:: :tags: engine :tickets: 2004 Threadlocal engine returns itself upon begin(), begin_nested(); engine then implements contextmanager methods to allow the "with" statement. .. change:: :tags: postgresql :tickets: 1984 Single element tuple expressions inside an IN clause parenthesize correctly, also from .. change:: :tags: postgresql :tickets: 1955 Ensured every numeric, float, int code, scalar + array, are recognized by psycopg2 and pg8000's "numeric" base type. .. change:: :tags: postgresql :tickets: 1956 Added as_uuid=True flag to the UUID type, will receive and return values as Python UUID() objects rather than strings. Currently, the UUID type is only known to work with psycopg2. .. change:: :tags: postgresql :tickets: 1989 Fixed bug whereby KeyError would occur with non-ENUM supported PG versions after a pool dispose+recreate would occur. .. change:: :tags: mysql :tickets: 1960 Fixed error handling for Jython + zxjdbc, such that has_table() property works again. Regression from 0.6.3 (we don't have a Jython buildbot, sorry) .. change:: :tags: sqlite :tickets: 1851 The REFERENCES clause in a CREATE TABLE that includes a remote schema to another table with the same schema name now renders the remote name without the schema clause, as required by SQLite. .. change:: :tags: sqlite :tickets: On the same theme, the REFERENCES clause in a CREATE TABLE that includes a remote schema to a *different* schema than that of the parent table doesn't render at all, as cross-schema references do not appear to be supported. .. change:: :tags: mssql :tickets: 1770 The rewrite of index reflection in was unfortunately not tested correctly, and returned incorrect results. This regression is now fixed. .. change:: :tags: oracle :tickets: 1953 The cx_oracle "decimal detection" logic, which takes place for result set columns with ambiguous numeric characteristics, now uses the decimal point character determined by the locale/ NLS_LANG setting, using an on-first-connect detection of this character. cx_oracle 5.0.3 or greater is also required when using a non-period-decimal-point NLS_LANG setting.. .. change:: :tags: firebird :tickets: 2012 Firebird numeric type now checks for Decimal explicitly, lets float() pass right through, thereby allowing special values such as float('inf'). .. change:: :tags: declarative :tickets: 1972 An error is raised if __table_args__ is not in tuple or dict format, and is not None. .. change:: :tags: sqlsoup :tickets: 1975 Added "map_to()" method to SqlSoup, which is a "master" method which accepts explicit arguments for each aspect of the selectable and mapping, including a base class per mapping. .. change:: :tags: sqlsoup :tickets: Mapped selectables used with the map(), with_labels(), join() methods no longer put the given argument into the internal "cache" dictionary. Particularly since the join() and select() objects are created in the method itself this was pretty much a pure memory leaking behavior. .. change:: :tags: examples :tickets: The versioning example now supports detection of changes in an associated relationship(). .. changelog:: :version: 0.6.5 :released: Sun Oct 24 2010 .. change:: :tags: orm :tickets: 1914 Added a new "lazyload" option "immediateload". Issues the usual "lazy" load operation automatically as the object is populated. The use case here is when loading objects to be placed in an offline cache, or otherwise used after the session isn't available, and straight 'select' loading, not 'joined' or 'subquery', is desired. .. change:: :tags: orm :tickets: 1920 New Query methods: query.label(name), query.as_scalar(), return the query's statement as a scalar subquery with /without label; query.with_entities(\*ent), replaces the SELECT list of the query with new entities. Roughly equivalent to a generative form of query.values() which accepts mapped entities as well as column expressions. .. change:: :tags: orm :tickets: Fixed recursion bug which could occur when moving an object from one reference to another, with backrefs involved, where the initiating parent was a subclass (with its own mapper) of the previous parent. .. change:: :tags: orm :tickets: 1918 Fixed a regression in 0.6.4 which occurred if you passed an empty list to "include_properties" on mapper() .. change:: :tags: orm :tickets: Fixed labeling bug in Query whereby the NamedTuple would mis-apply labels if any of the column expressions were un-labeled. .. change:: :tags: orm :tickets: 1925 Patched a case where query.join() would adapt the right side to the right side of the left's join inappropriately .. change:: :tags: orm :tickets: Query.select_from() has been beefed up to help ensure that a subsequent call to query.join() will use the select_from() entity, assuming it's a mapped entity and not a plain selectable, as the default "left" side, not the first entity in the Query object's list of entities. .. change:: :tags: orm :tickets: The exception raised by Session when it is used subsequent to a subtransaction rollback (which is what happens when a flush fails in autocommit=False mode) has now been reworded (this is the "inactive due to a rollback in a subtransaction" message). In particular, if the rollback was due to an exception during flush(), the message states this is the case, and reiterates the string form of the original exception that occurred during flush. If the session is closed due to explicit usage of subtransactions (not very common), the message just states this is the case. .. change:: :tags: orm :tickets: The exception raised by Mapper when repeated requests to its initialization are made after initialization already failed no longer assumes the "hasattr" case, since there's other scenarios in which this message gets emitted, and the message also does not compound onto itself multiple times - you get the same message for each attempt at usage. The misnomer "compiles" is being traded out for "initialize". .. change:: :tags: orm :tickets: 1935 Fixed bug in query.update() where 'evaluate' or 'fetch' expiration would fail if the column expression key was a class attribute with a different keyname as the actual column name. .. change:: :tags: orm :tickets: Added an assertion during flush which ensures that no NULL-holding identity keys were generated on "newly persistent" objects. This can occur when user defined code inadvertently triggers flushes on not-fully-loaded objects. .. change:: :tags: orm :tickets: 1910 lazy loads for relationship attributes now use the current state, not the "committed" state, of foreign and primary key attributes when issuing SQL, if a flush is not in process. Previously, only the database-committed state would be used. In particular, this would cause a many-to-one get()-on-lazyload operation to fail, as autoflush is not triggered on these loads when the attributes are determined and the "committed" state may not be available. .. change:: :tags: orm :tickets: A new flag on relationship(), load_on_pending, allows the lazy loader to fire off on pending objects without a flush taking place, as well as a transient object that's been manually "attached" to the session. Note that this flag blocks attribute events from taking place when an object is loaded, so backrefs aren't available until after a flush. The flag is only intended for very specific use cases. .. change:: :tags: orm :tickets: Another new flag on relationship(), cascade_backrefs, disables the "save-update" cascade when the event was initiated on the "reverse" side of a bidirectional relationship. This is a cleaner behavior so that many-to-ones can be set on a transient object without it getting sucked into the child object's session, while still allowing the forward collection to cascade. We *might* default this to False in 0.7. .. change:: :tags: orm :tickets: Slight improvement to the behavior of "passive_updates=False" when placed only on the many-to-one side of a relationship; documentation has been clarified that passive_updates=False should really be on the one-to-many side. .. change:: :tags: orm :tickets: Placing passive_deletes=True on a many-to-one emits a warning, since you probably intended to put it on the one-to-many side. .. change:: :tags: orm :tickets: Fixed bug that would prevent "subqueryload" from working correctly with single table inheritance for a relationship from a subclass - the "where type in (x, y, z)" only gets placed on the inside, instead of repeatedly. .. change:: :tags: orm :tickets: When using from_self() with single table inheritance, the "where type in (x, y, z)" is placed on the outside of the query only, instead of repeatedly. May make some more adjustments to this. .. change:: :tags: orm :tickets: 1924 scoped_session emits a warning when configure() is called if a Session is already present (checks only the current thread) .. change:: :tags: orm :tickets: 1932 reworked the internals of mapper.cascade_iterator() to cut down method calls by about 9% in some circumstances. .. change:: :tags: sql :tickets: Fixed bug in TypeDecorator whereby the dialect-specific type was getting pulled in to generate the DDL for a given type, which didn't always return the correct result. .. change:: :tags: sql :tickets: TypeDecorator can now have a fully constructed type specified as its "impl", in addition to a type class. .. change:: :tags: sql :tickets: TypeDecorator will now place itself as the resulting type for a binary expression where the type coercion rules would normally return its impl type - previously, a copy of the impl type would be returned which would have the TypeDecorator embedded into it as the "dialect" impl, this was probably an unintentional way of achieving the desired effect. .. change:: :tags: sql :tickets: TypeDecorator.load_dialect_impl() returns "self.impl" by default, i.e. not the dialect implementation type of "self.impl". This to support compilation correctly. Behavior can be user-overridden in exactly the same way as before to the same effect. .. change:: :tags: sql :tickets: Added type_coerce(expr, type\_) expression element. Treats the given expression as the given type when evaluating expressions and processing result rows, but does not affect the generation of SQL, other than an anonymous label. .. change:: :tags: sql :tickets: Table.tometadata() now copies Index objects associated with the Table as well. .. change:: :tags: sql :tickets: Table.tometadata() issues a warning if the given Table is already present in the target MetaData - the existing Table object is returned. .. change:: :tags: sql :tickets: An informative error message is raised if a Column which has not yet been assigned a name, i.e. as in declarative, is used in a context where it is exported to the columns collection of an enclosing select() construct, or if any construct involving that column is compiled before its name is assigned. .. change:: :tags: sql :tickets: 1862 as_scalar(), label() can be called on a selectable which contains a Column that is not yet named. .. change:: :tags: sql :tickets: 1907 Fixed recursion overflow which could occur when operating with two expressions both of type "NullType", but not the singleton NULLTYPE instance. .. change:: :tags: declarative :tickets: 1922 @classproperty (soon/now @declared_attr) takes effect for __mapper_args__, __table_args__, __tablename__ on a base class that is not a mixin, as well as mixins. .. change:: :tags: declarative :tickets: 1915 @classproperty 's official name/location for usage with declarative is sqlalchemy.ext.declarative.declared_attr. Same thing, but moving there since it is more of a "marker" that's specific to declararative, not just an attribute technique. .. change:: :tags: declarative :tickets: 1931, 1930 Fixed bug whereby columns on a mixin wouldn't propagate correctly to a single-table, or joined-table, inheritance scheme where the attribute name is different than that of the column.,. .. change:: :tags: declarative :tickets: A mixin can now specify a column that overrides a column of the same name associated with a superclass. Thanks to Oystein Haaland. .. change:: :tags: engine :tickets: Fixed a regression in 0.6.4 whereby the change that allowed cursor errors to be raised consistently broke the result.lastrowid accessor. Test coverage has been added for result.lastrowid. Note that lastrowid is only supported by Pysqlite and some MySQL drivers, so isn't super-useful in the general case. .. change:: :tags: engine :tickets: the logging message emitted by the engine when a connection is first used is now "BEGIN (implicit)" to emphasize that DBAPI has no explicit begin(). .. change:: :tags: engine :tickets: 1936 added "views=True" option to metadata.reflect(), will add the list of available views to those being reflected. .. change:: :tags: engine :tickets: 1899 engine_from_config() now accepts 'debug' for 'echo', 'echo_pool', 'force' for 'convert_unicode', boolean values for 'use_native_unicode'. .. change:: :tags: postgresql :tickets: Added "as_tuple" flag to ARRAY type, returns results as tuples instead of lists to allow hashing. .. change:: :tags: postgresql :tickets: 1933 Fixed bug which prevented "domain" built from a custom type such as "enum" from being reflected. .. change:: :tags: mysql :tickets: 1940 Fixed bug involving reflection of CURRENT_TIMESTAMP default used with ON UPDATE clause, thanks to Taavi Burns .. change:: :tags: oracle :tickets: 1878 The implicit_retunring argument to create_engine() is now honored regardless of detected version of Oracle. Previously, the flag would be forced to False if server version info was < 10. .. change:: :tags: mssql :tickets: 1946 Fixed reflection bug which did not properly handle reflection of unknown types. .. change:: :tags: mssql :tickets: 1943 Fixed bug where aliasing of tables with "schema" would fail to compile properly. .. change:: :tags: mssql :tickets: 1770 Rewrote the reflection of indexes to use sys. catalogs, so that column names of any configuration (spaces, embedded commas, etc.) can be reflected. Note that reflection of indexes requires SQL Server 2005 or greater. .. change:: :tags: mssql :tickets: 1952 mssql+pymssql dialect now honors the "port" portion of the URL instead of discarding it. .. change:: :tags: informix :tickets: 1906 *Major* cleanup / modernization of the Informix dialect for 0.6, courtesy Florian Apolloner. .. change:: :tags: tests :tickets: the NoseSQLAlchemyPlugin has been moved to a new package "sqlalchemy_nose" which installs along with "sqlalchemy". This so that the "nosetests" script works as always but also allows the --with-coverage option to turn on coverage before SQLAlchemy modules are imported, allowing coverage to work correctly. .. change:: :tags: misc :tickets: 1890 CircularDependencyError now has .cycles and .edges members, which are the set of elements involved in one or more cycles, and the set of edges as 2-tuples. .. changelog:: :version: 0.6.4 :released: Tue Sep 07 2010 .. change:: :tags: orm :tickets: The name ConcurrentModificationError has been changed to StaleDataError, and descriptive error messages have been revised to reflect exactly what the issue is. Both names will remain available for the forseeable future for schemes that may be specifying ConcurrentModificationError in an "except:" clause. .. change:: :tags: orm :tickets: 1891 Added a mutex to the identity map which mutexes remove operations against iteration methods, which now pre-buffer before returning an iterable. This because asyncrhonous gc can remove items via the gc thread at any time. .. change:: :tags: orm :tickets: The Session class is now present in sqlalchemy.orm.*. We're moving away from the usage of create_session(), which has non-standard defaults, for those situations where a one-step Session constructor is desired. Most users should stick with sessionmaker() for general use, however. .. change:: :tags: orm :tickets: query.with_parent() now accepts transient objects and will use the non-persistent values of their pk/fk attributes in order to formulate the criterion. Docs are also clarified as to the purpose of with_parent(). .. change:: :tags: orm :tickets: The include_properties and exclude_properties arguments to mapper() now accept Column objects as members in addition to strings. This so that same-named Column objects, such as those within a join(), can be disambiguated. .. change:: :tags: orm :tickets: 1896 A warning is now emitted if a mapper is created against a join or other single selectable that includes multiple columns with the same name in its .c. collection, and those columns aren't explictly named as part of the same or separate attributes (or excluded). In 0.7 this warning will be an exception. Note that this warning is not emitted when the combination occurs as a result of inheritance, so that attributes still allow being overridden naturally.. In 0.7 this will be improved further. .. change:: :tags: orm :tickets: 1896 The primary_key argument to mapper() can now specify a series of columns that are only a subset of the calculated "primary key" columns of the mapped selectable, without an error being raised. This helps for situations where a selectable's effective primary key is simpler than the number of columns in the selectable that are actually marked as "primary_key", such as a join against two tables on their primary key columns. .. change:: :tags: orm :tickets: An object that's been deleted now gets a flag 'deleted', which prohibits the object from being re-add()ed to the session, as previously the object would live in the identity map silently until its attributes were accessed. The make_transient() function now resets this flag along with the "key" flag. .. change:: :tags: orm :tickets: make_transient() can be safely called on an already transient instance. .. change:: :tags: orm :tickets: a warning is emitted in mapper() if the polymorphic_on column is not present either in direct or derived form in the mapped selectable or in the with_polymorphic selectable, instead of silently ignoring it. Look for this to become an exception in 0.7. .. change:: :tags: orm :tickets: Another pass through the series of error messages emitted when relationship() is configured with ambiguous arguments. The "foreign_keys" setting is no longer mentioned, as it is almost never needed and it is preferable users set up correct ForeignKey metadata, which is now the recommendation. If 'foreign_keys' is used and is incorrect, the message suggests the attribute is probably unnecessary. Docs for the attribute are beefed up. This because all confused relationship() users on the ML appear to be attempting to use foreign_keys due to the message, which only confuses them further since Table metadata is much clearer. .. change:: :tags: orm :tickets: 1877 If the "secondary" table has no ForeignKey metadata and no foreign_keys is set, even though the user is passing screwed up information, it is assumed that primary/secondaryjoin expressions should consider only and all cols in "secondary" to be foreign. It's not possible with "secondary" for the foreign keys to be elsewhere in any case. A warning is now emitted instead of an error, and the mapping succeeds. .. change:: :tags: orm :tickets: 1856 Moving an o2m object from one collection to another, or vice versa changing the referenced object by an m2o, where the foreign key is also a member of the primary key, will now be more carefully checked during flush if the change in value of the foreign key on the "many" side is the result of a change in the primary key of the "one" side, or if the "one" is just a different object. In one case, a cascade-capable DB would have cascaded the value already and we need to look at the "new" PK value to do an UPDATE, in the other we need to continue looking at the "old". We now look at the "old", assuming passive_updates=True, unless we know it was a PK switch that triggered the change. .. change:: :tags: orm :tickets: 1857 The value of version_id_col can be changed manually, and this will result in an UPDATE of the row. Versioned UPDATEs and DELETEs now use the "committed" value of the version_id_col in the WHERE clause and not the pending changed value. The version generator is also bypassed if manual changes are present on the attribute. .. change:: :tags: orm :tickets: Repaired the usage of merge() when used with concrete inheriting mappers. Such mappers frequently have so-called "concrete" attributes, which are subclass attributes that "disable" propagation from the parent - these needed to allow a merge() operation to pass through without effect. .. change:: :tags: orm :tickets: 1863 Specifying a non-column based argument for column_mapped_collection, including string, text() etc., will raise an error message that specifically asks for a column element, no longer misleads with incorrect information about text() or literal(). .. change:: :tags: orm :tickets: Similarly, for relationship(), foreign_keys, remote_side, order_by - all column-based expressions are enforced - lists of strings are explicitly disallowed since this is a very common error .. change:: :tags: orm :tickets: 1864 Dynamic attributes don't support collection population - added an assertion for when set_committed_value() is called, as well as when joinedload() or subqueryload() options are applied to a dynamic attribute, instead of failure / silent failure. .. change:: :tags: orm :tickets: 1852 Fixed bug whereby generating a Query derived from one which had the same column repeated with different label names, typically in some UNION situations, would fail to propagate the inner columns completely to the outer query. .. change:: :tags: orm :tickets: 1881 object_session() raises the proper UnmappedInstanceError when presented with an unmapped instance. .. change:: :tags: orm :tickets: Applied further memoizations to calculated Mapper properties, with significant (~90%) runtime mapper.py call count reduction in heavily polymorphic mapping configurations. .. change:: :tags: orm :tickets: mapper _get_col_to_prop private method used by the versioning example is deprecated; now use mapper.get_property_by_column() which will remain the public method for this. .. change:: :tags: orm :tickets: the versioning example works correctly now if versioning on a col that was formerly NULL. .. change:: :tags: sql :tickets: Calling execute() on an alias() construct is pending deprecation for 0.7, as it is not itself an "executable" construct. It currently "proxies" its inner element and is conditionally "executable" but this is not the kind of ambiguity we like these days. .. change:: :tags: sql :tickets: The execute() and scalar() methods of ClauseElement are now moved appropriately to the Executable subclass. ClauseElement.execute()/ scalar() are still present and are pending deprecation in 0.7, but note these would always raise an error anyway if you were not an Executable (unless you were an alias(), see previous note). .. change:: :tags: sql :tickets: Added basic math expression coercion for Numeric->Integer, so that resulting type is Numeric regardless of the direction of the expression. .. change:: :tags: sql :tickets: 1855 Changed the scheme used to generate truncated "auto" index names when using the "index=True" flag on Column. The truncation only takes place with the auto-generated name, not one that is user-defined (an error would be raised instead), and the truncation scheme itself is now based on a fragment of an md5 hash of the identifier name, so that multiple indexes on columns with similar names still have unique names. .. change:: :tags: sql :tickets: 1412 The generated index name also is based on a "max index name length" attribute which is separate from the "max identifier length" - this to appease MySQL who has a max length of 64 for index names, separate from their overall max length of 255. .. change:: :tags: sql :tickets: the text() construct, if placed in a column oriented situation, will at least return NULLTYPE for its type instead of None, allowing it to be used a little more freely for ad-hoc column expressions than before. literal_column() is still the better choice, however. .. change:: :tags: sql :tickets: Added full description of parent table/column, target table/column in error message raised when ForeignKey can't resolve target. .. change:: :tags: sql :tickets: 1865 Fixed bug whereby replacing composite foreign key columns in a reflected table would cause an attempt to remove the reflected constraint from the table a second time, raising a KeyError. .. change:: :tags: sql :tickets: the _Label construct, i.e. the one that is produced whenever you say somecol.label(), now counts itself in its "proxy_set" unioned with that of it's contained column's proxy set, instead of directly returning that of the contained column. This allows column correspondence operations which depend on the identity of the _Labels themselves to return the correct result .. change:: :tags: sql :tickets: 1852 fixes ORM bug. .. change:: :tags: engine :tickets: Calling fetchone() or similar on a result that has already been exhausted, has been closed, or is not a result-returning result now raises ResourceClosedError, a subclass of InvalidRequestError, in all cases, regardless of backend. Previously, some DBAPIs would raise ProgrammingError (i.e. pysqlite), others would return None leading to downstream breakages (i.e. MySQL-python). .. change:: :tags: engine :tickets: 1894 Fixed bug in Connection whereby if a "disconnect" event occurred in the "initialize" phase of the first connection pool connect, an AttributeError would be raised when the Connection would attempt to invalidate the DBAPI connection. .. change:: :tags: engine :tickets: Connection, ResultProxy, as well as Session use ResourceClosedError for all "this connection/transaction/result is closed" types of errors. .. change:: :tags: engine :tickets: Connection.invalidate() can be called more than once and subsequent calls do nothing. .. change:: :tags: declarative :tickets: if @classproperty is used with a regular class-bound mapper property attribute, it will be called to get the actual attribute value during initialization. Currently, there's no advantage to using @classproperty on a column or relationship attribute of a declarative class that isn't a mixin - evaluation is at the same time as if @classproperty weren't used. But here we at least allow it to function as expected. .. change:: :tags: declarative :tickets: Fixed bug where "Can't add additional column" message would display the wrong name. .. change:: :tags: postgresql :tickets: Fixed the psycopg2 dialect to use its set_isolation_level() method instead of relying upon the base "SET SESSION ISOLATION" command, as psycopg2 resets the isolation level on each new transaction otherwise. .. change:: :tags: mssql :tickets: Fixed "default schema" query to work with pymssql backend. .. change:: :tags: firebird :tickets: Fixed bug whereby a column default would fail to reflect if the "default" keyword were lower case. .. change:: :tags: oracle :tickets: 1879 Added ROWID type to the Oracle dialect, for those cases where an explicit CAST might be needed. .. change:: :tags: oracle :tickets: 1867 Oracle reflection of indexes has been tuned so that indexes which include some or all primary key columns, but not the same set of columns as that of the primary key, are reflected. Indexes which contain the identical columns as that of the primary key are skipped within reflection, as the index in that case is assumed to be the auto-generated primary key index. Previously, any index with PK columns present would be skipped. Thanks to Kent Bower for the patch. .. change:: :tags: oracle :tickets: 1868 Oracle now reflects the names of primary key constraints - also thanks to Kent Bower. .. change:: :tags: informix :tickets: 1904 Applied patches from to get basic Informix functionality up again. We rely upon end-user testing to ensure that Informix is working to some degree. .. change:: :tags: documentation :tickets: The docs have been reorganized such that the "API Reference" section is gone - all the docstrings from there which were public API are moved into the context of the main doc section that talks about it. Main docs divided into "SQLAlchemy Core" and "SQLAlchemy ORM" sections, mapper/relationship docs have been broken out. Lots of sections rewritten and/or reorganized. .. change:: :tags: examples :tickets: The beaker_caching example has been reorgnized such that the Session, cache manager, declarative_base are part of environment, and custom cache code is portable and now within "caching_query.py". This allows the example to be easier to "drop in" to existing projects. .. change:: :tags: examples :tickets: 1887 the history_meta versioning recipe sets "unique=False" when copying columns, so that the versioning table handles multiple rows with repeating values. .. changelog:: :version: 0.6.3 :released: Thu Jul 15 2010 .. change:: :tags: orm :tickets: 1845 Removed errant many-to-many load in unitofwork which triggered unnecessarily on expired/unloaded collections. This load now takes place only if passive_updates is False and the parent primary key has changed, or if passive_deletes is False and a delete of the parent has occurred. .. change:: :tags: orm :tickets: 1853 Column-entities (i.e. query(Foo.id)) copy their state more fully when queries are derived from themselves + a selectable (i.e. from_self(), union(), etc.), so that join() and such have the correct state to work from. .. change:: :tags: orm :tickets: 1853 Fixed bug where Query.join() would fail if querying a non-ORM column then joining without an on clause when a FROM clause is already present, now raises a checked exception the same way it does when the clause is not present. .. change:: :tags: orm :tickets: 1142 Improved the check for an "unmapped class", including the case where the superclass is mapped but the subclass is not. Any attempts to access cls._sa_class_manager.mapper now raise UnmappedClassError(). .. change:: :tags: orm :tickets: Added "column_descriptions" accessor to Query, returns a list of dictionaries containing naming/typing information about the entities the Query will return. Can be helpful for building GUIs on top of ORM queries. .. change:: :tags: mysql :tickets: 1848 The _extract_error_code() method now works correctly with each MySQL dialect ( MySQL-python, OurSQL, MySQL-Connector-Python, PyODBC). Previously, the reconnect logic would fail for OperationalError conditions, however since MySQLdb and OurSQL have their own reconnect feature, there was no symptom for these drivers here unless one watched the logs. .. change:: :tags: oracle :tickets: 1840 More tweaks to cx_oracle Decimal handling. "Ambiguous" numerics with no decimal place are coerced to int at the connection handler level. The advantage here is that ints come back as ints without SQLA type objects being involved and without needless conversion to Decimal first. Unfortunately, some exotic subquery cases can even see different types between individual result rows, so the Numeric handler, when instructed to return Decimal, can't take full advantage of "native decimal" mode and must run isinstance() on every value to check if its Decimal already. Reopen of .. changelog:: :version: 0.6.2 :released: Tue Jul 06 2010 .. change:: :tags: orm :tickets: Query.join() will check for a call of the form query.join(target, clause_expression), i.e. missing the tuple, and raise an informative error message that this is the wrong calling form. .. change:: :tags: orm :tickets: 1824 Fixed bug regarding flushes on self-referential bi-directional many-to-many relationships, where two objects made to mutually reference each other in one flush would fail to insert a row for both sides. Regression from 0.5. .. change:: :tags: orm :tickets: the post_update feature of relationship() has been reworked architecturally to integrate more closely with the new 0.6 unit of work. The motivation for the change is so that multiple "post update" calls, each affecting different foreign key columns of the same row, are executed in a single UPDATE statement, rather than one UPDATE statement per column per row. Multiple row updates are also batched into executemany()s as possible, while maintaining consistent row ordering. .. change:: :tags: orm :tickets: Query.statement, Query.subquery(), etc. now transfer the values of bind parameters, i.e. those specified by query.params(), into the resulting SQL expression. Previously the values would not be transferred and bind parameters would come out as None. .. change:: :tags: orm :tickets: Subquery-eager-loading now works with Query objects which include params(), as well as get() Queries. .. change:: :tags: orm :tickets: Can now call make_transient() on an instance that is referenced by parent objects via many-to-one, without the parent's foreign key value getting temporarily set to None - this was a function of the "detect primary key switch" flush handler. It now ignores objects that are no longer in the "persistent" state, and the parent's foreign key identifier is left unaffected. .. change:: :tags: orm :tickets: query.order_by() now accepts False, which cancels any existing order_by() state on the Query, allowing subsequent generative methods to be called which do not support ORDER BY. This is not the same as the already existing feature of passing None, which suppresses any existing order_by() settings, including those configured on the mapper. False will make it as though order_by() was never called, while None is an active setting. .. change:: :tags: orm :tickets: An instance which is moved to "transient", has an incomplete or missing set of primary key attributes, and contains expired attributes, will raise an InvalidRequestError if an expired attribute is accessed, instead of getting a recursion overflow. .. change:: :tags: orm :tickets: The make_transient() function is now in the generated documentation. .. change:: :tags: orm :tickets: make_transient() removes all "loader" callables from the state being made transient, removing any "expired" state - all unloaded attributes reset back to undefined, None/empty on access. .. change:: :tags: sql :tickets: 1822 The warning emitted by the Unicode and String types with convert_unicode=True no longer embeds the actual value passed. This so that the Python warning registry does not continue to grow in size, the warning is emitted once as per the warning filter settings, and large string values don't pollute the output. .. change:: :tags: sql :tickets: Fixed bug that would prevent overridden clause compilation from working for "annotated" expression elements, which are often generated by the ORM. .. change:: :tags: sql :tickets: 1400 The argument to "ESCAPE" of a LIKE operator or similar is passed through render_literal_value(), which may implement escaping of backslashes. .. change:: :tags: sql :tickets: Fixed bug in Enum type which blew away native_enum flag when used with TypeDecorators or other adaption scenarios. .. change:: :tags: sql :tickets: Inspector hits bind.connect() when invoked to ensure initialize has been called. the internal name ".conn" is changed to ".bind", since that's what it is. .. change:: :tags: sql :tickets: Modified the internals of "column annotation" such that a custom Column subclass can safely override _constructor to return Column, for the purposes of making "configurational" column classes that aren't involved in proxying, etc. .. change:: :tags: sql :tickets: 1829 Column.copy() takes along the "unique" attribute among others, fixes regarding declarative mixins .. change:: :tags: postgresql :tickets: 1400 render_literal_value() is overridden which escapes backslashes, currently applies to the ESCAPE clause of LIKE and similar expressions. Ultimately this will have to detect the value of "standard_conforming_strings" for full behavior. .. change:: :tags: postgresql :tickets: 1836 Won't generate "CREATE TYPE" / "DROP TYPE" if using types.Enum on a PG version prior to 8.3 - the supports_native_enum flag is fully honored. .. change:: :tags: mysql :tickets: 1826 MySQL dialect doesn't emit CAST() for MySQL version detected < 4.0.2. This allows the unicode check on connect to proceed. .. change:: :tags: mysql :tickets: MySQL dialect now detects NO_BACKSLASH_ESCAPES sql mode, in addition to ANSI_QUOTES. .. change:: :tags: mysql :tickets: 1400 render_literal_value() is overridden which escapes backslashes, currently applies to the ESCAPE clause of LIKE and similar expressions. This behavior is derived from detecting the value of NO_BACKSLASH_ESCAPES. .. change:: :tags: oracle :tickets: 1819 Fixed ora-8 compatibility flags such that they don't cache a stale value from before the first database connection actually occurs. .. change:: :tags: oracle :tickets: 1840 Oracle's "native decimal" metadata begins to return ambiguous typing information about numerics when columns are embedded in subqueries as well as when ROWNUM is consulted with subqueries, as we do for limit/offset. We've added these ambiguous conditions to the cx_oracle "convert to Decimal()" handler, so that we receive numerics as Decimal in more cases instead of as floats. These are then converted, if requested, into Integer or Float, or otherwise kept as the lossless Decimal. .. change:: :tags: mssql :tickets: 1825 If server_version_info is outside the usual range of (8, ), (9, ), (10, ), a warning is emitted which suggests checking that the FreeTDS version configuration is using 7.0 or 8.0, not 4.2. .. change:: :tags: firebird :tickets: 1823 Fixed incorrect signature in do_execute(), error introduced in 0.6.1. .. change:: :tags: firebird :tickets: 1813 Firebird dialect adds CHAR, VARCHAR types which accept a "charset" flag, to support Firebird "CHARACTER SET" clause. .. change:: :tags: declarative :tickets: 1805, 1796, 1751 Added support for @classproperty to provide any kind of schema/mapping construct from a declarative mixin, including columns with foreign keys, relationships, column_property, deferred. This solves all such issues on declarative mixins. An error is raised if any MapperProperty subclass is specified on a mixin without using @classproperty. .. change:: :tags: declarative :tickets: 1821 a mixin class can now define a column that matches one which is present on a __table__ defined on a subclass. It cannot, however, define one that is not present in the __table__, and the error message here now works. .. change:: :tags: extension, compiler :tickets: 1838 The 'default' compiler is automatically copied over when overriding the compilation of a built in clause construct, so no KeyError is raised if the user-defined compiler is specific to certain backends and compilation for a different backend is invoked. .. change:: :tags: documentation :tickets: 1820 Added documentation for the Inspector. .. change:: :tags: documentation :tickets: 1830 Fixed @memoized_property and @memoized_instancemethod decorators so that Sphinx documentation picks up these attributes and methods, such as ResultProxy.inserted_primary_key. .. changelog:: :version: 0.6.1 :released: Mon May 31 2010 .. change:: :tags: orm :tickets: 1782 Fixed regression introduced in 0.6.0 involving improper history accounting on mutable attributes. .. change:: :tags: orm :tickets: 1807 Fixed regression introduced in 0.6.0 unit of work refactor that broke updates for bi-directional relationship() with post_update=True. .. change:: :tags: orm :tickets: 1789 session.merge() will not expire attributes on the returned instance if that instance is "pending". .. change:: :tags: orm :tickets: 1802 fixed __setstate__ method of CollectionAdapter to not fail during deserialize where parent InstanceState not yet unserialized. .. change:: :tags: orm :tickets: 1797 Added internal warning in case an instance without a full PK happened to be expired and then was asked to refresh. .. change:: :tags: orm :tickets: Added more aggressive caching to the mapper's usage of UPDATE, INSERT, and DELETE expressions. Assuming the statement has no per-object SQL expressions attached, the expression objects are cached by the mapper after the first create, and their compiled form is stored persistently in a cache dictionary for the duration of the related Engine. The cache is an LRUCache for the rare case that a mapper receives an extremely high number of different column patterns as UPDATEs. .. change:: :tags: sql :tickets: 1793 expr.in_() now accepts a text() construct as the argument. Grouping parenthesis are added automatically, i.e. usage is like `col.in_(text("select id from table"))`. .. change:: :tags: sql :tickets: Columns of _Binary type (i.e. LargeBinary, BLOB, etc.) will coerce a "basestring" on the right side into a _Binary as well so that required DBAPI processing takes place. .. change:: :tags: sql :tickets: 1801 Added table.add_is_dependent_on(othertable), allows manual placement of dependency rules between two Table objects for use within create_all(), drop_all(), sorted_tables. .. change:: :tags: sql :tickets: 1778 Fixed bug that prevented implicit RETURNING from functioning properly with composite primary key that contained zeroes. .. change:: :tags: sql :tickets: Fixed errant space character when generating ADD CONSTRAINT for a named UNIQUE constraint. .. change:: :tags: sql :tickets: 1571 Fixed "table" argument on constructor of ForeginKeyConstraint .. change:: :tags: sql :tickets: 1786 Fixed bug in connection pool cursor wrapper whereby if a cursor threw an exception on close(), the logging of the message would fail. .. change:: :tags: sql :tickets: the _make_proxy() method of ColumnClause and Column now use self.__class__ to determine the class of object to be returned instead of hardcoding to ColumnClause/Column, making it slightly easier to produce specific subclasses of these which work in alias/subquery situations. .. change:: :tags: sql :tickets: 1798 func.XXX() doesn't inadvertently resolve to non-Function classes (e.g. fixes func.text()). .. change:: :tags: engines :tickets: 1781 Fixed building the C extensions on Python 2.4. .. change:: :tags: engines :tickets: Pool classes will reuse the same "pool_logging_name" setting after a dispose() occurs. .. change:: :tags: engines :tickets: Engine gains an "execution_options" argument and update_execution_options() method, which will apply to all connections generated by this engine. .. change:: :tags: mysql :tickets: 1794 func.sysdate() emits "SYSDATE()", i.e. with the ending parenthesis, on MySQL. .. change:: :tags: sqlite :tickets: 1812 Fixed concatenation of constraints when "PRIMARY KEY" constraint gets moved to column level due to SQLite AUTOINCREMENT keyword being rendered. .. change:: :tags: oracle :tickets: 1775 Added a check for cx_oracle versions lower than version 5, in which case the incompatible "output type handler" won't be used. This will impact decimal accuracy and some unicode handling issues. .. change:: :tags: oracle :tickets: 1790 Fixed use_ansi=False mode, which was producing broken WHERE clauses in pretty much all cases. .. change:: :tags: oracle :tickets: 1808 Re-established support for Oracle 8 with cx_oracle, including that use_ansi is set to False automatically, NVARCHAR2 and NCLOB are not rendered for Unicode, "native unicode" check doesn't fail, cx_oracle "native unicode" mode is disabled, VARCHAR() is emitted with bytes count instead of char count. .. change:: :tags: oracle :tickets: 1670 oracle_xe 5 doesn't accept a Python unicode object in its connect string in normal Python 2.x mode - so we coerce to str() directly. non-ascii characters aren't supported in connect strings here since we don't know what encoding we could use. .. change:: :tags: oracle :tickets: 1815 FOR UPDATE is emitted in the syntactically correct position when limit/offset is used, i.e. the ROWNUM subquery. However, Oracle can't really handle FOR UPDATE with ORDER BY or with subqueries, so its still not very usable, but at least SQLA gets the SQL past the Oracle parser. .. change:: :tags: firebird :tickets: 1521 Added a label to the query used within has_table() and has_sequence() to work with older versions of Firebird that don't provide labels for result columns. .. change:: :tags: firebird :tickets: 1779 Added integer coercion to the "type_conv" attribute when passed via query string, so that it is properly interpreted by Kinterbasdb. .. change:: :tags: firebird :tickets: 1646 Added 'connection shutdown' to the list of exception strings which indicate a dropped connection. .. change:: :tags: sqlsoup :tickets: 1783 the SqlSoup constructor accepts a `base` argument which specifies the base class to use for mapped classes, the default being `object`. .. changelog:: :version: 0.6.0 :released: Sun Apr 18 2010 .. change:: :tags: orm :tickets: 1742, 1081 Unit of work internals have been rewritten. Units of work with large numbers of objects interdependent objects can now be flushed without recursion overflows as there is no longer reliance upon recursive calls. The number of internal structures now stays constant for a particular session state, regardless of how many relationships are present on mappings. The flow of events now corresponds to a linear list of steps, generated by the mappers and relationships based on actual work to be done, filtered through a single topological sort for correct ordering. Flush actions are assembled using far fewer steps and less memory. .. change:: :tags: orm :tickets: Along with the UOW rewrite, this also removes an issue introduced in 0.6beta3 regarding topological cycle detection for units of work with long dependency cycles. We now use an algorithm written by Guido (thanks Guido!). .. change:: :tags: orm :tickets: 1764 one-to-many relationships now maintain a list of positive parent-child associations within the flush, preventing previous parents marked as deleted from cascading a delete or NULL foreign key set on those child objects, despite the end-user not removing the child from the old association. .. change:: :tags: orm :tickets: 1495 A collection lazy load will switch off default eagerloading on the reverse many-to-one side, since that loading is by definition unnecessary. .. change:: :tags: orm :tickets: Session.refresh() now does an equivalent expire() on the given instance first, so that the "refresh-expire" cascade is propagated. Previously, refresh() was not affected in any way by the presence of "refresh-expire" cascade. This is a change in behavior versus that of 0.6beta2, where the "lockmode" flag passed to refresh() would cause a version check to occur. Since the instance is first expired, refresh() always upgrades the object to the most recent version. .. change:: :tags: orm :tickets: 1754 The 'refresh-expire' cascade, when reaching a pending object, will expunge the object if the cascade also includes "delete-orphan", or will simply detach it otherwise. .. change:: :tags: orm :tickets: 1756 id(obj) is no longer used internally within topological.py, as the sorting functions now require hashable objects only. .. change:: :tags: orm :tickets: The ORM will set the docstring of all generated descriptors to None by default. This can be overridden using 'doc' (or if using Sphinx, attribute docstrings work too). .. change:: :tags: orm :tickets: Added kw argument 'doc' to all mapper property callables as well as Column(). Will assemble the string 'doc' as the '__doc__' attribute on the descriptor. .. change:: :tags: orm :tickets: 1761 Usage of version_id_col on a backend that supports cursor.rowcount for execute() but not executemany() now works when a delete is issued (already worked for saves, since those don't use executemany()). For a backend that doesn't support cursor.rowcount at all, a warning is emitted the same as with saves. .. change:: :tags: orm :tickets: The ORM now short-term caches the "compiled" form of insert() and update() constructs when flushing lists of objects of all the same class, thereby avoiding redundant compilation per individual INSERT/UPDATE within an individual flush() call. .. change:: :tags: orm :tickets: internal getattr(), setattr(), getcommitted() methods on ColumnProperty, CompositeProperty, RelationshipProperty have been underscored (i.e. are private), signature has changed. .. change:: :tags: engines :tickets: 1757 The C extension now also works with DBAPIs which use custom sequences as row (and not only tuples). .. change:: :tags: sql :tickets: 1755 Restored some bind-labeling logic from 0.5 which ensures that tables with column names that overlap another column of the form "_" won't produce errors if column._label is used as a bind name during an UPDATE. Test coverage which wasn't present in 0.5 has been added. .. change:: :tags: sql :tickets: 1729 somejoin.select(fold_equivalents=True) is no longer deprecated, and will eventually be rolled into a more comprehensive version of the feature for. .. change:: :tags: sql :tickets: 1759 the Numeric type raises an *enormous* warning when expected to convert floats to Decimal from a DBAPI that returns floats. This includes SQLite, Sybase, MS-SQL. .. change:: :tags: sql :tickets: Fixed an error in expression typing which caused an endless loop for expressions with two NULL types. .. change:: :tags: sql :tickets: Fixed bug in execution_options() feature whereby the existing Transaction and other state information from the parent connection would not be propagated to the sub-connection. .. change:: :tags: sql :tickets: Added new 'compiled_cache' execution option. A dictionary where Compiled objects will be cached when the Connection compiles a clause expression into a dialect- and parameter- specific Compiled object. It is the user's responsibility to manage the size of this dictionary, which will have keys corresponding to the dialect, clause element, the column names within the VALUES or SET clause of an INSERT or UPDATE, as well as the "batch" mode for an INSERT or UPDATE statement. .. change:: :tags: sql :tickets: 1769 Added get_pk_constraint() to reflection.Inspector, similar to get_primary_keys() except returns a dict that includes the name of the constraint, for supported backends (PG so far). .. change:: :tags: sql :tickets: 1771 Table.create() and Table.drop() no longer apply metadata- level create/drop events. .. change:: :tags: ext :tickets: the compiler extension now allows @compiles decorators on base classes that extend to child classes, @compiles decorators on child classes that aren't broken by a @compiles decorator on the base class. .. change:: :tags: ext :tickets: Declarative will raise an informative error message if a non-mapped class attribute is referenced in the string-based relationship() arguments. .. change:: :tags: ext :tickets: Further reworked the "mixin" logic in declarative to additionally allow __mapper_args__ as a @classproperty on a mixin, such as to dynamically assign polymorphic_identity. .. change:: :tags: postgresql :tickets: 1071 Postgresql now reflects sequence names associated with SERIAL columns correctly, after the name of the sequence has been changed. Thanks to Kumar McMillan for the patch. .. change:: :tags: postgresql :tickets: Repaired missing import in psycopg2._PGNumeric type when unknown numeric is received. .. change:: :tags: postgresql :tickets: psycopg2/pg8000 dialects now aware of REAL[], FLOAT[], DOUBLE_PRECISION[], NUMERIC[] return types without raising an exception. .. change:: :tags: postgresql :tickets: 1769 Postgresql reflects the name of primary key constraints, if one exists. .. change:: :tags: oracle :tickets: Now using cx_oracle output converters so that the DBAPI returns natively the kinds of values we prefer: .. change:: :tags: oracle :tickets: 1759 NUMBER values with positive precision + scale convert to cx_oracle.STRING and then to Decimal. This allows perfect precision for the Numeric type when using cx_oracle. .. change:: :tags: oracle :tickets: STRING/FIXED_CHAR now convert to unicode natively. SQLAlchemy's String types then don't need to apply any kind of conversions. .. change:: :tags: firebird :tickets: The functionality of result.rowcount can be disabled on a per-engine basis by setting 'enable_rowcount=False' on create_engine(). Normally, cursor.rowcount is called after any UPDATE or DELETE statement unconditionally, because the cursor is then closed and Firebird requires an open cursor in order to get a rowcount. This call is slightly expensive however so it can be disabled. To re-enable on a per-execution basis, the 'enable_rowcount=True' execution option may be used. .. change:: :tags: examples :tickets: Updated attribute_shard.py example to use a more robust method of searching a Query for binary expressions which compare columns against literal values. .. changelog:: :version: 0.6beta3 :released: Sun Mar 28 2010 .. change:: :tags: orm :tickets: 1675 Major feature: Added new "subquery" loading capability to relationship(). This is an eager loading option which generates a second SELECT for each collection represented in a query, across all parents at once. The query re-issues the original end-user query wrapped in a subquery, applies joins out to the target collection, and loads all those collections fully in one result, similar to "joined" eager loading but using all inner joins and not re-fetching full parent rows repeatedly (as most DBAPIs seem to do, even if columns are skipped). Subquery loading is available at mapper config level using "lazy='subquery'" and at the query options level using "subqueryload(props..)", "subqueryload_all(props...)". .. change:: :tags: orm :tickets: To accomodate the fact that there are now two kinds of eager loading available, the new names for eagerload() and eagerload_all() are joinedload() and joinedload_all(). The old names will remain as synonyms for the foreseeable future. .. change:: :tags: orm :tickets: The "lazy" flag on the relationship() function now accepts a string argument for all kinds of loading: "select", "joined", "subquery", "noload" and "dynamic", where the default is now "select". The old values of True/ False/None still retain their usual meanings and will remain as synonyms for the foreseeable future. .. change:: :tags: orm :tickets: 921 Added with_hint() method to Query() construct. This calls directly down to select().with_hint() and also accepts entities as well as tables and aliases. See with_hint() in the SQL section below. .. change:: :tags: orm :tickets: Fixed bug in Query whereby calling q.join(prop).from_self(...). join(prop) would fail to render the second join outside the subquery, when joining on the same criterion as was on the inside. .. change:: :tags: orm :tickets: Fixed bug in Query whereby the usage of aliased() constructs would fail if the underlying table (but not the actual alias) were referenced inside the subquery generated by q.from_self() or q.select_from(). .. change:: :tags: orm :tickets: Fixed bug which affected all eagerload() and similar options such that "remote" eager loads, i.e. eagerloads off of a lazy load such as query(A).options(eagerload(A.b, B.c)) wouldn't eagerload anything, but using eagerload("b.c") would work fine. .. change:: :tags: orm :tickets: Query gains an add_columns(\*columns) method which is a multi- version of add_column(col). add_column(col) is future deprecated. .. change:: :tags: orm :tickets: Query.join() will detect if the end result will be "FROM A JOIN A", and will raise an error if so. .. change:: :tags: orm :tickets: Query.join(Cls.propname, from_joinpoint=True) will check more carefully that "Cls" is compatible with the current joinpoint, and act the same way as Query.join("propname", from_joinpoint=True) in that regard. .. change:: :tags: sql :tickets: 921 Added with_hint() method to select() construct. Specify a table/alias, hint text, and optional dialect name, and "hints" will be rendered in the appropriate place in the statement. Works for Oracle, Sybase, MySQL. .. change:: :tags: sql :tickets: 1747 Fixed bug introduced in 0.6beta2 where column labels would render inside of column expressions already assigned a label. .. change:: :tags: postgresql :tickets: 877 The psycopg2 dialect will log NOTICE messages via the "sqlalchemy.dialects.postgresql" logger name. .. change:: :tags: postgresql :tickets: 997 the TIME and TIMESTAMP types are now availble from the postgresql dialect directly, which add the PG-specific argument 'precision' to both. 'precision' and 'timezone' are correctly reflected for both TIME and TIMEZONE types. .. change:: :tags: mysql :tickets: 1752 No longer guessing that TINYINT(1) should be BOOLEAN when reflecting - TINYINT(1) is returned. Use Boolean/ BOOLEAN in table definition to get boolean conversion behavior. .. change:: :tags: oracle :tickets: 1744 The Oracle dialect will issue VARCHAR type definitions using character counts, i.e. VARCHAR2(50 CHAR), so that the column is sized in terms of characters and not bytes. Column reflection of character types will also use ALL_TAB_COLUMNS.CHAR_LENGTH instead of ALL_TAB_COLUMNS.DATA_LENGTH. Both of these behaviors take effect when the server version is 9 or higher - for version 8, the old behaviors are used. .. change:: :tags: declarative :tickets: 1746 Using a mixin won't break if the mixin implements an unpredictable __getattribute__(), i.e. Zope interfaces. .. change:: :tags: declarative :tickets: 1749 Using @classdecorator and similar on mixins to define __tablename__, __table_args__, etc. now works if the method references attributes on the ultimate subclass. .. change:: :tags: declarative :tickets: 1751 relationships and columns with foreign keys aren't allowed on declarative mixins, sorry. .. change:: :tags: ext :tickets: The sqlalchemy.orm.shard module now becomes an extension, sqlalchemy.ext.horizontal_shard. The old import works with a deprecation warning. .. changelog:: :version: 0.6beta2 :released: Sat Mar 20 2010 .. change:: :tags: py3k :tickets: Improved the installation/test setup regarding Python 3, now that Distribute runs on Py3k. distribute_setup.py is now included. See README.py3k for Python 3 installation/ testing instructions. .. change:: :tags: orm :tickets: 1740 The official name for the relation() function is now relationship(), to eliminate confusion over the relational algebra term. relation() however will remain available in equal capacity for the foreseeable future. .. change:: :tags: orm :tickets: 1692 Added "version_id_generator" argument to Mapper, this is a callable that, given the current value of the "version_id_col", returns the next version number. Can be used for alternate versioning schemes such as uuid, timestamps. .. change:: :tags: orm :tickets: added "lockmode" kw argument to Session.refresh(), will pass through the string value to Query the same as in with_lockmode(), will also do version check for a version_id_col-enabled mapping. .. change:: :tags: orm :tickets: 1188 Fixed bug whereby calling query(A).join(A.bs).add_entity(B) in a joined inheritance scenario would double-add B as a target and produce an invalid query. .. change:: :tags: orm :tickets: 1674 Fixed bug in session.rollback() which involved not removing formerly "pending" objects from the session before re-integrating "deleted" objects, typically occured with natural primary keys. If there was a primary key conflict between them, the attach of the deleted would fail internally. The formerly "pending" objects are now expunged first. .. change:: :tags: orm :tickets: 1719 Removed a lot of logging that nobody really cares about, logging that remains will respond to live changes in the log level. No significant overhead is added. .. change:: :tags: orm :tickets: Fixed bug in session.merge() which prevented dict-like collections from merging. .. change:: :tags: orm :tickets: session.merge() works with relations that specifically don't include "merge" in their cascade options - the target is ignored completely. .. change:: :tags: orm :tickets: 1681 session.merge() will not expire existing scalar attributes on an existing target if the target has a value for that attribute, even if the incoming merged doesn't have a value for the attribute. This prevents unnecessary loads on existing items. Will still mark the attr as expired if the destination doesn't have the attr, though, which fulfills some contracts of deferred cols. .. change:: :tags: orm :tickets: 1680 The "allow_null_pks" flag is now called "allow_partial_pks", defaults to True, acts like it did in 0.5 again. Except, it also is implemented within merge() such that a SELECT won't be issued for an incoming instance with partially NULL primary key if the flag is False. .. change:: :tags: orm :tickets: 1737 Fixed bug in 0.6-reworked "many-to-one" optimizations such that a many-to-one that is against a non-primary key column on the remote table (i.e. foreign key against a UNIQUE column) will pull the "old" value in from the database during a change, since if it's in the session we will need it for proper history/backref accounting, and we can't pull from the local identity map on a non-primary key column. .. change:: :tags: orm :tickets: 1731 fixed internal error which would occur if calling has() or similar complex expression on a single-table inheritance relation(). .. change:: :tags: orm :tickets: 1688 query.one() no longer applies LIMIT to the query, this to ensure that it fully counts all object identities present in the result, even in the case where joins may conceal multiple identities for two or more rows. As a bonus, one() can now also be called with a query that issued from_statement() to start with since it no longer modifies the query. .. change:: :tags: orm :tickets: 1727 query.get() now returns None if queried for an identifier that is present in the identity map with a different class than the one requested, i.e. when using polymorphic loading. .. change:: :tags: orm :tickets: 1706 A major fix in query.join(), when the "on" clause is an attribute of an aliased() construct, but there is already an existing join made out to a compatible target, query properly joins to the right aliased() construct instead of sticking onto the right side of the existing join. .. change:: :tags: orm :tickets: 1362 Slight improvement to the fix for to not issue needless updates of the primary key column during a so-called "row switch" operation, i.e. add + delete of two objects with the same PK. .. change:: :tags: orm :tickets: Now uses sqlalchemy.orm.exc.DetachedInstanceError when an attribute load or refresh action fails due to object being detached from any Session. UnboundExecutionError is specific to engines bound to sessions and statements. .. change:: :tags: orm :tickets: Query called in the context of an expression will render disambiguating labels in all cases. Note that this does not apply to the existing .statement and .subquery() accessor/method, which still honors the .with_labels() setting that defaults to False. .. change:: :tags: orm :tickets: 1676 Query.union() retains disambiguating labels within the returned statement, thus avoiding various SQL composition errors which can result from column name conflicts. .. change:: :tags: orm :tickets: Fixed bug in attribute history that inadvertently invoked __eq__ on mapped instances. .. change:: :tags: orm :tickets: Some internal streamlining of object loading grants a small speedup for large results, estimates are around 10-15%. Gave the "state" internals a good solid cleanup with less complexity, datamembers, method calls, blank dictionary creates. .. change:: :tags: orm :tickets: 1689 Documentation clarification for query.delete() .. change:: :tags: orm :tickets: Fixed cascade bug in many-to-one relation() when attribute was set to None, introduced in r6711 (cascade deleted items into session during add()). .. change:: :tags: orm :tickets: 1736 Calling query.order_by() or query.distinct() before calling query.select_from(), query.with_polymorphic(), or query.from_statement() raises an exception now instead of silently dropping those criterion. .. change:: :tags: orm :tickets: 1735 query.scalar() now raises an exception if more than one row is returned. All other behavior remains the same. .. change:: :tags: orm :tickets: 1692 Fixed bug which caused "row switch" logic, that is an INSERT and DELETE replaced by an UPDATE, to fail when version_id_col was in use. .. change:: :tags: sql :tickets: 1714 join() will now simulate a NATURAL JOIN by default. Meaning, if the left side is a join, it will attempt to join the right side to the rightmost side of the left first, and not raise any exceptions about ambiguous join conditions if successful even if there are further join targets across the rest of the left. .. change:: :tags: sql :tickets: The most common result processors conversion function were moved to the new "processors" module. Dialect authors are encouraged to use those functions whenever they correspond to their needs instead of implementing custom ones. .. change:: :tags: sql :tickets: 1694, 1698 SchemaType and subclasses Boolean, Enum are now serializable, including their ddl listener and other event callables. .. change:: :tags: sql :tickets: Some platforms will now interpret certain literal values as non-bind parameters, rendered literally into the SQL statement. This to support strict SQL-92 rules that are enforced by some platforms including MS-SQL and Sybase. In this model, bind parameters aren't allowed in the columns clause of a SELECT, nor are certain ambiguous expressions like "?=?". When this mode is enabled, the base compiler will render the binds as inline literals, but only across strings and numeric values. Other types such as dates will raise an error, unless the dialect subclass defines a literal rendering function for those. The bind parameter must have an embedded literal value already or an error is raised (i.e. won't work with straight bindparam('x')). Dialects can also expand upon the areas where binds are not accepted, such as within argument lists of functions (which don't work on MS-SQL when native SQL binding is used). .. change:: :tags: sql :tickets: Added "unicode_errors" parameter to String, Unicode, etc. Behaves like the 'errors' keyword argument to the standard library's string.decode() functions. This flag requires that `convert_unicode` is set to `"force"` - otherwise, SQLAlchemy is not guaranteed to handle the task of unicode conversion. Note that this flag adds significant performance overhead to row-fetching operations for backends that already return unicode objects natively (which most DBAPIs do). This flag should only be used as an absolute last resort for reading strings from a column with varied or corrupted encodings, which only applies to databases that accept invalid encodings in the first place (i.e. MySQL. *not* PG, Sqlite, etc.) .. change:: :tags: sql :tickets: Added math negation operator support, -x. .. change:: :tags: sql :tickets: FunctionElement subclasses are now directly executable the same way any func.foo() construct is, with automatic SELECT being applied when passed to execute(). .. change:: :tags: sql :tickets: The "type" and "bind" keyword arguments of a func.foo() construct are now local to "func." constructs and are not part of the FunctionElement base class, allowing a "type" to be handled in a custom constructor or class-level variable. .. change:: :tags: sql :tickets: Restored the keys() method to ResultProxy. .. change:: :tags: sql :tickets: 1647, 1683 The type/expression system now does a more complete job of determining the return type from an expression as well as the adaptation of the Python operator into a SQL operator, based on the full left/right/operator of the given expression. In particular the date/time/interval system created for Postgresql EXTRACT in has now been generalized into the type system. The previous behavior which often occured of an expression "column + literal" forcing the type of "literal" to be the same as that of "column" will now usually not occur - the type of "literal" is first derived from the Python type of the literal, assuming standard native Python types + date types, before falling back to that of the known type on the other side of the expression. If the "fallback" type is compatible (i.e. CHAR from String), the literal side will use that. TypeDecorator types override this by default to coerce the "literal" side unconditionally, which can be changed by implementing the coerce_compared_value() method. Also part of. .. change:: :tags: sql :tickets: Made sqlalchemy.sql.expressions.Executable part of public API, used for any expression construct that can be sent to execute(). FunctionElement now inherits Executable so that it gains execution_options(), which are also propagated to the select() that's generated within execute(). Executable in turn subclasses _Generative which marks any ClauseElement that supports the @_generative decorator - these may also become "public" for the benefit of the compiler extension at some point. .. change:: :tags: sql :tickets: 1579 A change to the solution for - an end-user defined bind parameter name that directly conflicts with a column-named bind generated directly from the SET or VALUES clause of an update/insert generates a compile error. This reduces call counts and eliminates some cases where undesirable name conflicts could still occur. .. change:: :tags: sql :tickets: 1705 Column() requires a type if it has no foreign keys (this is not new). An error is now raised if a Column() has no type and no foreign keys. .. change:: :tags: sql :tickets: 1717 the "scale" argument of the Numeric() type is honored when coercing a returned floating point value into a string on its way to Decimal - this allows accuracy to function on SQLite, MySQL. .. change:: :tags: sql :tickets: the copy() method of Column now copies over uninitialized "on table attach" events. Helps with the new declarative "mixin" capability. .. change:: :tags: engines :tickets: Added an optional C extension to speed up the sql layer by reimplementing RowProxy and the most common result processors. The actual speedups will depend heavily on your DBAPI and the mix of datatypes used in your tables, and can vary from a 30% improvement to more than 200%. It also provides a modest (~15-20%) indirect improvement to ORM speed for large queries. Note that it is *not* built/installed by default. See README for installation instructions. .. change:: :tags: engines :tickets: the execution sequence pulls all rowcount/last inserted ID info from the cursor before commit() is called on the DBAPI connection in an "autocommit" scenario. This helps mxodbc with rowcount and is probably a good idea overall. .. change:: :tags: engines :tickets: 1719 Opened up logging a bit such that isEnabledFor() is called more often, so that changes to the log level for engine/pool will be reflected on next connect. This adds a small amount of method call overhead. It's negligible and will make life a lot easier for all those situations when logging just happens to be configured after create_engine() is called. .. change:: :tags: engines :tickets: The assert_unicode flag is deprecated. SQLAlchemy will raise a warning in all cases where it is asked to encode a non-unicode Python string, as well as when a Unicode or UnicodeType type is explicitly passed a bytestring. The String type will do nothing for DBAPIs that already accept Python unicode objects. .. change:: :tags: engines :tickets: Bind parameters are sent as a tuple instead of a list. Some backend drivers will not accept bind parameters as a list. .. change:: :tags: engines :tickets: threadlocal engine wasn't properly closing the connection upon close() - fixed that. .. change:: :tags: engines :tickets: Transaction object doesn't rollback or commit if it isn't "active", allows more accurate nesting of begin/rollback/commit. .. change:: :tags: engines :tickets: Python unicode objects as binds result in the Unicode type, not string, thus eliminating a certain class of unicode errors on drivers that don't support unicode binds. .. change:: :tags: engines :tickets: 1555 Added "logging_name" argument to create_engine(), Pool() constructor as well as "pool_logging_name" argument to create_engine() which filters down to that of Pool. Issues the given string name within the "name" field of logging messages instead of the default hex identifier string. .. change:: :tags: engines :tickets: The visit_pool() method of Dialect is removed, and replaced with on_connect(). This method returns a callable which receives the raw DBAPI connection after each one is created. The callable is assembled into a first_connect/connect pool listener by the connection strategy if non-None. Provides a simpler interface for dialects. .. change:: :tags: engines :tickets: 1728 StaticPool now initializes, disposes and recreates without opening a new connection - the connection is only opened when first requested. dispose() also works on AssertionPool now. .. change:: :tags: ticket: 1673, metadata :tickets: Added the ability to strip schema information when using "tometadata" by passing "schema=None" as an argument. If schema is not specified then the table's schema is retained. .. change:: :tags: declarative :tickets: DeclarativeMeta exclusively uses cls.__dict__ (not dict\_) as the source of class information; _as_declarative exclusively uses the dict\_ passed to it as the source of class information (which when using DeclarativeMeta is cls.__dict__). This should in theory make it easier for custom metaclasses to modify the state passed into _as_declarative. .. change:: :tags: declarative :tickets: 1707 declarative now accepts mixin classes directly, as a means to provide common functional and column-based elements on all subclasses, as well as a means to propagate a fixed set of __table_args__ or __mapper_args__ to subclasses. For custom combinations of __table_args__/__mapper_args__ from an inherited mixin to local, descriptors can now be used. New details are all up in the Declarative documentation. Thanks to Chris Withers for putting up with my strife on this. .. change:: :tags: declarative :tickets: 1393 the __mapper_args__ dict is copied when propagating to a subclass, and is taken straight off the class __dict__ to avoid any propagation from the parent. mapper inheritance already propagates the things you want from the parent mapper. .. change:: :tags: declarative :tickets: 1732 An exception is raised when a single-table subclass specifies a column that is already present on the base class. .. change:: :tags: mysql :tickets: 1655 Fixed reflection bug whereby when COLLATE was present, nullable flag and server defaults would not be reflected. .. change:: :tags: mysql :tickets: Fixed reflection of TINYINT(1) "boolean" columns defined with integer flags like UNSIGNED. .. change:: :tags: mysql :tickets: 1668 Further fixes for the mysql-connector dialect. .. change:: :tags: mysql :tickets: 1496 Composite PK table on InnoDB where the "autoincrement" column isn't first will emit an explicit "KEY" phrase within CREATE TABLE thereby avoiding errors. .. change:: :tags: mysql :tickets: 1634 Added reflection/create table support for a wide range of MySQL keywords. .. change:: :tags: mysql :tickets: 1580 Fixed import error which could occur reflecting tables on a Windows host .. change:: :tags: mssql :tickets: Re-established support for the pymssql dialect. .. change:: :tags: mssql :tickets: Various fixes for implicit returning, reflection, etc. - the MS-SQL dialects aren't quite complete in 0.6 yet (but are close) .. change:: :tags: mssql :tickets: 1710 Added basic support for mxODBC. .. change:: :tags: mssql :tickets: Removed the text_as_varchar option. .. change:: :tags: oracle :tickets: "out" parameters require a type that is supported by cx_oracle. An error will be raised if no cx_oracle type can be found. .. change:: :tags: oracle :tickets: Oracle 'DATE' now does not perform any result processing, as the DATE type in Oracle stores full date+time objects, that's what you'll get. Note that the generic types.Date type *will* still call value.date() on incoming values, however. When reflecting a table, the reflected type will be 'DATE'. .. change:: :tags: oracle :tickets: 1670 Added preliminary support for Oracle's WITH_UNICODE mode. At the very least this establishes initial support for cx_Oracle with Python 3. When WITH_UNICODE mode is used in Python 2.xx, a large and scary warning is emitted asking that the user seriously consider the usage of this difficult mode of operation. .. change:: :tags: oracle :tickets: 1712 The except_() method now renders as MINUS on Oracle, which is more or less equivalent on that platform. .. change:: :tags: oracle :tickets: 651 Added support for rendering and reflecting TIMESTAMP WITH TIME ZONE, i.e. TIMESTAMP(timezone=True). .. change:: :tags: oracle :tickets: Oracle INTERVAL type can now be reflected. .. change:: :tags: sqlite :tickets: 1685 Added "native_datetime=True" flag to create_engine(). This will cause the DATE and TIMESTAMP types to skip all bind parameter and result row processing, under the assumption that PARSE_DECLTYPES has been enabled on the connection. Note that this is not entirely compatible with the "func.current_date()", which will be returned as a string. .. change:: :tags: sybase :tickets: Implemented a preliminary working dialect for Sybase, with sub-implementations for Python-Sybase as well as Pyodbc. Handles table creates/drops and basic round trip functionality. Does not yet include reflection or comprehensive support of unicode/special expressions/etc. .. change:: :tags: examples :tickets: Changed the beaker cache example a bit to have a separate RelationCache option for lazyload caching. This object does a lookup among any number of potential attributes more efficiently by grouping several into a common structure. Both FromCache and RelationCache are simpler individually. .. change:: :tags: documentation :tickets: 1700 Major cleanup work in the docs to link class, function, and method names into the API docs. .. changelog:: :version: 0.6beta1 :released: Wed Feb 03 2010 .. change:: :tags: release, major :tickets: For the full set of feature descriptions, see http://www.sqlalchemy.org/trac/wiki/06Migration . This document is a work in progress. .. change:: :tags: release, major :tickets: All bug fixes and feature enhancements from the most recent 0.5 version and below are also included within 0.6. .. change:: :tags: release, major :tickets: Platforms targeted now include Python 2.4/2.5/2.6, Python 3.1, Jython2.5. .. change:: :tags: orm :tickets: Changes to query.update() and query.delete(): - the 'expire' option on query.update() has been renamed to 'fetch', thus matching that of query.delete(). 'expire' is deprecated and issues a warning. - query.update() and query.delete() both default to 'evaluate' for the synchronize strategy. - the 'synchronize' strategy for update() and delete() raises an error on failure. There is no implicit fallback onto "fetch". Failure of evaluation is based on the structure of criteria, so success/failure is deterministic based on code structure. .. change:: :tags: orm :tickets: 1186, 1492, 1544 Enhancements on many-to-one relations: - many-to-one relations now fire off a lazyload in fewer cases, including in most cases will not fetch the "old" value when a new one is replaced. - many-to-one relation to a joined-table subclass now uses get() for a simple load (known as the "use_get" condition), i.e. Related->Sub(Base), without the need to redefine the primaryjoin condition in terms of the base table. - specifying a foreign key with a declarative column, i.e. ForeignKey(MyRelatedClass.id) doesn't break the "use_get" condition from taking place - relation(), eagerload(), and eagerload_all() now feature an option called "innerjoin". Specify `True` or `False` to control whether an eager join is constructed as an INNER or OUTER join. Default is `False` as always. The mapper options will override whichever setting is specified on relation(). Should generally be set for many-to-one, not nullable foreign key relations to allow improved join performance. - the behavior of eagerloading such that the main query is wrapped in a subquery when LIMIT/OFFSET are present now makes an exception for the case when all eager loads are many-to-one joins. In those cases, the eager joins are against the parent table directly along with the limit/offset without the extra overhead of a subquery, since a many-to-one join does not add rows to the result. .. change:: :tags: orm :tickets: Enhancements / Changes on Session.merge(): .. change:: :tags: orm :tickets: the "dont_load=True" flag on Session.merge() is deprecated and is now "load=False". .. change:: :tags: orm :tickets: Session.merge() is performance optimized, using half the call counts for "load=False" mode compared to 0.5 and significantly fewer SQL queries in the case of collections for "load=True" mode. .. change:: :tags: orm :tickets: merge() will not issue a needless merge of attributes if the given instance is the same instance which is already present. .. change:: :tags: orm :tickets: merge() now also merges the "options" associated with a given state, i.e. those passed through query.options() which follow along with an instance, such as options to eagerly- or lazyily- load various attributes. This is essential for the construction of highly integrated caching schemes. This is a subtle behavioral change vs. 0.5. .. change:: :tags: orm :tickets: A bug was fixed regarding the serialization of the "loader path" present on an instance's state, which is also necessary when combining the usage of merge() with serialized state and associated options that should be preserved. .. change:: :tags: orm :tickets: The all new merge() is showcased in a new comprehensive example of how to integrate Beaker with SQLAlchemy. See the notes in the "examples" note below. .. change:: :tags: orm :tickets: 1362 Primary key values can now be changed on a joined-table inheritance object, and ON UPDATE CASCADE will be taken into account when the flush happens. Set the new "passive_updates" flag to False on mapper() when using SQLite or MySQL/MyISAM. .. change:: :tags: orm :tickets: 1671 flush() now detects when a primary key column was updated by an ON UPDATE CASCADE operation from another primary key, and can then locate the row for a subsequent UPDATE on the new PK value. This occurs when a relation() is there to establish the relationship as well as passive_updates=True. .. change:: :tags: orm :tickets: the "save-update" cascade will now cascade the pending *removed* values from a scalar or collection attribute into the new session during an add() operation. This so that the flush() operation will also delete or modify rows of those disconnected items. .. change:: :tags: orm :tickets: 1531 Using a "dynamic" loader with a "secondary" table now produces a query where the "secondary" table is *not* aliased. This allows the secondary Table object to be used in the "order_by" attribute of the relation(), and also allows it to be used in filter criterion against the dynamic relation. .. change:: :tags: orm :tickets: 1643 relation() with uselist=False will emit a warning when an eager or lazy load locates more than one valid value for the row. This may be due to primaryjoin/secondaryjoin conditions which aren't appropriate for an eager LEFT OUTER JOIN or for other conditions. .. change:: :tags: orm :tickets: 1633 an explicit check occurs when a synonym() is used with map_column=True, when a ColumnProperty (deferred or otherwise) exists separately in the properties dictionary sent to mapper with the same keyname. Instead of silently replacing the existing property (and possible options on that property), an error is raised. .. change:: :tags: orm :tickets: a "dynamic" loader sets up its query criterion at construction time so that the actual query is returned from non-cloning accessors like "statement". .. change:: :tags: orm :tickets: the "named tuple" objects returned when iterating a Query() are now pickleable. .. change:: :tags: orm :tickets: 1542 mapping to a select() construct now requires that you make an alias() out of it distinctly. This to eliminate confusion over such issues as .. change:: :tags: orm :tickets: 1537 query.join() has been reworked to provide more consistent behavior and more flexibility (includes) .. change:: :tags: orm :tickets: query.select_from() accepts multiple clauses to produce multiple comma separated entries within the FROM clause. Useful when selecting from multiple-homed join() clauses. .. change:: :tags: orm :tickets: query.select_from() also accepts mapped classes, aliased() constructs, and mappers as arguments. In particular this helps when querying from multiple joined-table classes to ensure the full join gets rendered. .. change:: :tags: orm :tickets: 1135 query.get() can be used with a mapping to an outer join where one or more of the primary key values are None. .. change:: :tags: orm :tickets: 1568 query.from_self(), query.union(), others which do a "SELECT * from (SELECT...)" type of nesting will do a better job translating column expressions within the subquery to the columns clause of the outer query. This is potentially backwards incompatible with 0.5, in that this may break queries with literal expressions that do not have labels applied (i.e. literal('foo'), etc.) .. change:: :tags: orm :tickets: 1622 relation primaryjoin and secondaryjoin now check that they are column-expressions, not just clause elements. this prohibits things like FROM expressions being placed there directly. .. change:: :tags: orm :tickets: 1415 `expression.null()` is fully understood the same way None is when comparing an object/collection-referencing attribute within query.filter(), filter_by(), etc. .. change:: :tags: orm :tickets: 1052 added "make_transient()" helper function which transforms a persistent/ detached instance into a transient one (i.e. deletes the instance_key and removes from any session.) .. change:: :tags: orm :tickets: 1339 the allow_null_pks flag on mapper() is deprecated, and the feature is turned "on" by default. This means that a row which has a non-null value for any of its primary key columns will be considered an identity. The need for this scenario typically only occurs when mapping to an outer join. .. change:: :tags: orm :tickets: the mechanics of "backref" have been fully merged into the finer grained "back_populates" system, and take place entirely within the _generate_backref() method of RelationProperty. This makes the initialization procedure of RelationProperty simpler and allows easier propagation of settings (such as from subclasses of RelationProperty) into the reverse reference. The internal BackRef() is gone and backref() returns a plain tuple that is understood by RelationProperty. .. change:: :tags: orm :tickets: 1569 The version_id_col feature on mapper() will raise a warning when used with dialects that don't support "rowcount" adequately. .. change:: :tags: orm :tickets: added "execution_options()" to Query, to so options can be passed to the resulting statement. Currently only Select-statements have these options, and the only option used is "stream_results", and the only dialect which knows "stream_results" is psycopg2. .. change:: :tags: orm :tickets: Query.yield_per() will set the "stream_results" statement option automatically. .. change:: :tags: orm :tickets: Deprecated or removed: * 'allow_null_pks' flag on mapper() is deprecated. It does nothing now and the setting is "on" in all cases. * 'transactional' flag on sessionmaker() and others is removed. Use 'autocommit=True' to indicate 'transactional=False'. * 'polymorphic_fetch' argument on mapper() is removed. Loading can be controlled using the 'with_polymorphic' option. * 'select_table' argument on mapper() is removed. Use 'with_polymorphic=("*", )' for this functionality. * 'proxy' argument on synonym() is removed. This flag did nothing throughout 0.5, as the "proxy generation" behavior is now automatic. * Passing a single list of elements to eagerload(), eagerload_all(), contains_eager(), lazyload(), defer(), and undefer() instead of multiple positional \*args is deprecated. * Passing a single list of elements to query.order_by(), query.group_by(), query.join(), or query.outerjoin() instead of multiple positional \*args is deprecated. * query.iterate_instances() is removed. Use query.instances(). * Query.query_from_parent() is removed. Use the sqlalchemy.orm.with_parent() function to produce a "parent" clause, or alternatively query.with_parent(). * query._from_self() is removed, use query.from_self() instead. * the "comparator" argument to composite() is removed. Use "comparator_factory". * RelationProperty._get_join() is removed. * the 'echo_uow' flag on Session is removed. Use logging on the "sqlalchemy.orm.unitofwork" name. * session.clear() is removed. use session.expunge_all(). * session.save(), session.update(), session.save_or_update() are removed. Use session.add() and session.add_all(). * the "objects" flag on session.flush() remains deprecated. * the "dont_load=True" flag on session.merge() is deprecated in favor of "load=False". * ScopedSession.mapper remains deprecated. See the usage recipe at http://www.sqlalchemy.org/trac/wiki/UsageRecipes/SessionAwareMapper * passing an InstanceState (internal SQLAlchemy state object) to attributes.init_collection() or attributes.get_history() is deprecated. These functions are public API and normally expect a regular mapped object instance. * the 'engine' parameter to declarative_base() is removed. Use the 'bind' keyword argument. .. change:: :tags: sql :tickets: the "autocommit" flag on select() and text() as well as select().autocommit() are deprecated - now call .execution_options(autocommit=True) on either of those constructs, also available directly on Connection and orm.Query. .. change:: :tags: sql :tickets: the autoincrement flag on column now indicates the column which should be linked to cursor.lastrowid, if that method is used. See the API docs for details. .. change:: :tags: sql :tickets: 1566 an executemany() now requires that all bound parameter sets require that all keys are present which are present in the first bound parameter set. The structure and behavior of an insert/update statement is very much determined by the first parameter set, including which defaults are going to fire off, and a minimum of guesswork is performed with all the rest so that performance is not impacted. For this reason defaults would otherwise silently "fail" for missing parameters, so this is now guarded against. .. change:: :tags: sql :tickets: returning() support is native to insert(), update(), delete(). Implementations of varying levels of functionality exist for Postgresql, Firebird, MSSQL and Oracle. returning() can be called explicitly with column expressions which are then returned in the resultset, usually via fetchone() or first(). insert() constructs will also use RETURNING implicitly to get newly generated primary key values, if the database version in use supports it (a version number check is performed). This occurs if no end-user returning() was specified. .. change:: :tags: sql :tickets: 1665 union(), intersect(), except() and other "compound" types of statements have more consistent behavior w.r.t. parenthesizing. Each compound element embedded within another will now be grouped with parenthesis - previously, the first compound element in the list would not be grouped, as SQLite doesn't like a statement to start with parenthesis. However, Postgresql in particular has precedence rules regarding INTERSECT, and it is more consistent for parenthesis to be applied equally to all sub-elements. So now, the workaround for SQLite is also what the workaround for PG was previously - when nesting compound elements, the first one usually needs ".alias().select()" called on it to wrap it inside of a subquery. .. change:: :tags: sql :tickets: 1579 insert() and update() constructs can now embed bindparam() objects using names that match the keys of columns. These bind parameters will circumvent the usual route to those keys showing up in the VALUES or SET clause of the generated SQL. .. change:: :tags: sql :tickets: 1524 the Binary type now returns data as a Python string (or a "bytes" type in Python 3), instead of the built- in "buffer" type. This allows symmetric round trips of binary data. .. change:: :tags: sql :tickets: Added a tuple_() construct, allows sets of expressions to be compared to another set, typically with IN against composite primary keys or similar. Also accepts an IN with multiple columns. The "scalar select can have only one column" error message is removed - will rely upon the database to report problems with col mismatch. .. change:: :tags: sql :tickets: User-defined "default" and "onupdate" callables which accept a context should now call upon "context.current_parameters" to get at the dictionary of bind parameters currently being processed. This dict is available in the same way regardless of single-execute or executemany-style statement execution. .. change:: :tags: sql :tickets: 1428 multi-part schema names, i.e. with dots such as "dbo.master", are now rendered in select() labels with underscores for dots, i.e. "dbo_master_table_column". This is a "friendly" label that behaves better in result sets. .. change:: :tags: sql :tickets: removed needless "counter" behavior with select() labelnames that match a column name in the table, i.e. generates "tablename_id" for "id", instead of "tablename_id_1" in an attempt to avoid naming conflicts, when the table has a column actually named "tablename_id" - this is because the labeling logic is always applied to all columns so a naming conflict will never occur. .. change:: :tags: sql :tickets: 1628 calling expr.in_([]), i.e. with an empty list, emits a warning before issuing the usual "expr != expr" clause. The "expr != expr" can be very expensive, and it's preferred that the user not issue in_() if the list is empty, instead simply not querying, or modifying the criterion as appropriate for more complex situations. .. change:: :tags: sql :tickets: Added "execution_options()" to select()/text(), which set the default options for the Connection. See the note in "engines". .. change:: :tags: sql :tickets: 1131 Deprecated or removed: * "scalar" flag on select() is removed, use select.as_scalar(). * "shortname" attribute on bindparam() is removed. * postgres_returning, firebird_returning flags on insert(), update(), delete() are deprecated, use the new returning() method. * fold_equivalents flag on join is deprecated (will remain until is implemented) .. change:: :tags: engines :tickets: 443 transaction isolation level may be specified with create_engine(... isolation_level="..."); available on postgresql and sqlite. .. change:: :tags: engines :tickets: Connection has execution_options(), generative method which accepts keywords that affect how the statement is executed w.r.t. the DBAPI. Currently supports "stream_results", causes psycopg2 to use a server side cursor for that statement, as well as "autocommit", which is the new location for the "autocommit" option from select() and text(). select() and text() also have .execution_options() as well as ORM Query(). .. change:: :tags: engines :tickets: 1630 fixed the import for entrypoint-driven dialects to not rely upon silly tb_info trick to determine import error status. .. change:: :tags: engines :tickets: added first() method to ResultProxy, returns first row and closes result set immediately. .. change:: :tags: engines :tickets: RowProxy objects are now pickleable, i.e. the object returned by result.fetchone(), result.fetchall() etc. .. change:: :tags: engines :tickets: RowProxy no longer has a close() method, as the row no longer maintains a reference to the parent. Call close() on the parent ResultProxy instead, or use autoclose. .. change:: :tags: engines :tickets: 1586 ResultProxy internals have been overhauled to greatly reduce method call counts when fetching columns. Can provide a large speed improvement (up to more than 100%) when fetching large result sets. The improvement is larger when fetching columns that have no type-level processing applied and when using results as tuples (instead of as dictionaries). Many thanks to Elixir's Gaëtan de Menten for this dramatic improvement ! .. change:: :tags: engines :tickets: Databases which rely upon postfetch of "last inserted id" to get at a generated sequence value (i.e. MySQL, MS-SQL) now work correctly when there is a composite primary key where the "autoincrement" column is not the first primary key column in the table. .. change:: :tags: engines :tickets: the last_inserted_ids() method has been renamed to the descriptor "inserted_primary_key". .. change:: :tags: engines :tickets: 1554 setting echo=False on create_engine() now sets the loglevel to WARN instead of NOTSET. This so that logging can be disabled for a particular engine even if logging for "sqlalchemy.engine" is enabled overall. Note that the default setting of "echo" is `None`. .. change:: :tags: engines :tickets: ConnectionProxy now has wrapper methods for all transaction lifecycle events, including begin(), rollback(), commit() begin_nested(), begin_prepared(), prepare(), release_savepoint(), etc. .. change:: :tags: engines :tickets: Connection pool logging now uses both INFO and DEBUG log levels for logging. INFO is for major events such as invalidated connections, DEBUG for all the acquire/return logging. `echo_pool` can be False, None, True or "debug" the same way as `echo` works. .. change:: :tags: engines :tickets: 1621 All pyodbc-dialects now support extra pyodbc-specific kw arguments 'ansi', 'unicode_results', 'autocommit'. .. change:: :tags: engines :tickets: the "threadlocal" engine has been rewritten and simplified and now supports SAVEPOINT operations. .. change:: :tags: engines :tickets: deprecated or removed * result.last_inserted_ids() is deprecated. Use result.inserted_primary_key * dialect.get_default_schema_name(connection) is now public via dialect.default_schema_name. * the "connection" argument from engine.transaction() and engine.run_callable() is removed - Connection itself now has those methods. All four methods accept \*args and \**kwargs which are passed to the given callable, as well as the operating connection. .. change:: :tags: schema :tickets: 1541 the `__contains__()` method of `MetaData` now accepts strings or `Table` objects as arguments. If given a `Table`, the argument is converted to `table.key` first, i.e. "[schemaname.]" .. change:: :tags: schema :tickets: deprecated MetaData.connect() and ThreadLocalMetaData.connect() have been removed - send the "bind" attribute to bind a metadata. .. change:: :tags: schema :tickets: deprecated metadata.table_iterator() method removed (use sorted_tables) .. change:: :tags: schema :tickets: deprecated PassiveDefault - use DefaultClause. .. change:: :tags: schema :tickets: the "metadata" argument is removed from DefaultGenerator and subclasses, but remains locally present on Sequence, which is a standalone construct in DDL. .. change:: :tags: schema :tickets: Removed public mutability from Index and Constraint objects: * ForeignKeyConstraint.append_element() * Index.append_column() * UniqueConstraint.append_column() * PrimaryKeyConstraint.add() * PrimaryKeyConstraint.remove() These should be constructed declaratively (i.e. in one construction). .. change:: :tags: schema :tickets: 1545 The "start" and "increment" attributes on Sequence now generate "START WITH" and "INCREMENT BY" by default, on Oracle and Postgresql. Firebird doesn't support these keywords right now. .. change:: :tags: schema :tickets: UniqueConstraint, Index, PrimaryKeyConstraint all accept lists of column names or column objects as arguments. .. change:: :tags: schema :tickets: Other removed things: - Table.key (no idea what this was for) - Table.primary_key is not assignable - use table.append_constraint(PrimaryKeyConstraint(...)) - Column.bind (get via column.table.bind) - Column.metadata (get via column.table.metadata) - Column.sequence (use column.default) - ForeignKey(constraint=some_parent) (is now private _constraint) .. change:: :tags: schema :tickets: The use_alter flag on ForeignKey is now a shortcut option for operations that can be hand-constructed using the DDL() event system. A side effect of this refactor is that ForeignKeyConstraint objects with use_alter=True will *not* be emitted on SQLite, which does not support ALTER for foreign keys. .. change:: :tags: schema :tickets: 1605 ForeignKey and ForeignKeyConstraint objects now correctly copy() all their public keyword arguments. .. change:: :tags: reflection/inspection :tickets: Table reflection has been expanded and generalized into a new API called "sqlalchemy.engine.reflection.Inspector". The Inspector object provides fine-grained information about a wide variety of schema information, with room for expansion, including table names, column names, view definitions, sequences, indexes, etc. .. change:: :tags: reflection/inspection :tickets: Views are now reflectable as ordinary Table objects. The same Table constructor is used, with the caveat that "effective" primary and foreign key constraints aren't part of the reflection results; these have to be specified explicitly if desired. .. change:: :tags: reflection/inspection :tickets: The existing autoload=True system now uses Inspector underneath so that each dialect need only return "raw" data about tables and other objects - Inspector is the single place that information is compiled into Table objects so that consistency is at a maximum. .. change:: :tags: ddl :tickets: the DDL system has been greatly expanded. the DDL() class now extends the more generic DDLElement(), which forms the basis of many new constructs: - CreateTable() - DropTable() - AddConstraint() - DropConstraint() - CreateIndex() - DropIndex() - CreateSequence() - DropSequence() These support "on" and "execute-at()" just like plain DDL() does. User-defined DDLElement subclasses can be created and linked to a compiler using the sqlalchemy.ext.compiler extension. .. change:: :tags: ddl :tickets: The signature of the "on" callable passed to DDL() and DDLElement() is revised as follows: ddl the DDLElement object itself event the string event name. target previously "schema_item", the Table or MetaData object triggering the event. connection the Connection object in use for the operation. \**kw keyword arguments. In the case of MetaData before/after create/drop, the list of Table objects for which CREATE/DROP DDL is to be issued is passed as the kw argument "tables". This is necessary for metadata-level DDL that is dependent on the presence of specific tables. The "schema_item" attribute of DDL has been renamed to "target". .. change:: :tags: dialect, refactor :tickets: Dialect modules are now broken into database dialects plus DBAPI implementations. Connect URLs are now preferred to be specified using dialect+driver://..., i.e. "mysql+mysqldb://scott:tiger@localhost/test". See the 0.6 documentation for examples. .. change:: :tags: dialect, refactor :tickets: the setuptools entrypoint for external dialects is now called "sqlalchemy.dialects". .. change:: :tags: dialect, refactor :tickets: the "owner" keyword argument is removed from Table. Use "schema" to represent any namespaces to be prepended to the table name. .. change:: :tags: dialect, refactor :tickets: server_version_info becomes a static attribute. .. change:: :tags: dialect, refactor :tickets: dialects receive an initialize() event on initial connection to determine connection properties. .. change:: :tags: dialect, refactor :tickets: dialects receive a visit_pool event have an opportunity to establish pool listeners. .. change:: :tags: dialect, refactor :tickets: cached TypeEngine classes are cached per-dialect class instead of per-dialect. .. change:: :tags: dialect, refactor :tickets: new UserDefinedType should be used as a base class for new types, which preserves the 0.5 behavior of get_col_spec(). .. change:: :tags: dialect, refactor :tickets: The result_processor() method of all type classes now accepts a second argument "coltype", which is the DBAPI type argument from cursor.description. This argument can help some types decide on the most efficient processing of result values. .. change:: :tags: dialect, refactor :tickets: Deprecated Dialect.get_params() removed. .. change:: :tags: dialect, refactor :tickets: Dialect.get_rowcount() has been renamed to a descriptor "rowcount", and calls cursor.rowcount directly. Dialects which need to hardwire a rowcount in for certain calls should override the method to provide different behavior. .. change:: :tags: dialect, refactor :tickets: 1566 DefaultRunner and subclasses have been removed. The job of this object has been simplified and moved into ExecutionContext. Dialects which support sequences should add a `fire_sequence()` method to their execution context implementation. .. change:: :tags: dialect, refactor :tickets: Functions and operators generated by the compiler now use (almost) regular dispatch functions of the form "visit_" and "visit__fn" to provide customed processing. This replaces the need to copy the "functions" and "operators" dictionaries in compiler subclasses with straightforward visitor methods, and also allows compiler subclasses complete control over rendering, as the full _Function or _BinaryExpression object is passed in. .. change:: :tags: postgresql :tickets: New dialects: pg8000, zxjdbc, and pypostgresql on py3k. .. change:: :tags: postgresql :tickets: The "postgres" dialect is now named "postgresql" ! Connection strings look like: postgresql://scott:tiger@localhost/test postgresql+pg8000://scott:tiger@localhost/test The "postgres" name remains for backwards compatiblity in the following ways: - There is a "postgres.py" dummy dialect which allows old URLs to work, i.e. postgres://scott:tiger@localhost/test - The "postgres" name can be imported from the old "databases" module, i.e. "from sqlalchemy.databases import postgres" as well as "dialects", "from sqlalchemy.dialects.postgres import base as pg", will send a deprecation warning. - Special expression arguments are now named "postgresql_returning" and "postgresql_where", but the older "postgres_returning" and "postgres_where" names still work with a deprecation warning. .. change:: :tags: postgresql :tickets: "postgresql_where" now accepts SQL expressions which can also include literals, which will be quoted as needed. .. change:: :tags: postgresql :tickets: The psycopg2 dialect now uses psycopg2's "unicode extension" on all new connections, which allows all String/Text/etc. types to skip the need to post-process bytestrings into unicode (an expensive step due to its volume). Other dialects which return unicode natively (pg8000, zxjdbc) also skip unicode post-processing. .. change:: :tags: postgresql :tickets: 1511 Added new ENUM type, which exists as a schema-level construct and extends the generic Enum type. Automatically associates itself with tables and their parent metadata to issue the appropriate CREATE TYPE/DROP TYPE commands as needed, supports unicode labels, supports reflection. .. change:: :tags: postgresql :tickets: INTERVAL supports an optional "precision" argument corresponding to the argument that PG accepts. .. change:: :tags: postgresql :tickets: using new dialect.initialize() feature to set up version-dependent behavior. .. change:: :tags: postgresql :tickets: 1279 somewhat better support for % signs in table/column names; psycopg2 can't handle a bind parameter name of %(foobar)s however and SQLA doesn't want to add overhead just to treat that one non-existent use case. .. change:: :tags: postgresql :tickets: 1516 Inserting NULL into a primary key + foreign key column will allow the "not null constraint" error to raise, not an attempt to execute a nonexistent "col_id_seq" sequence. .. change:: :tags: postgresql :tickets: autoincrement SELECT statements, i.e. those which select from a procedure that modifies rows, now work with server-side cursor mode (the named cursor isn't used for such statements.) .. change:: :tags: postgresql :tickets: 1636 postgresql dialect can properly detect pg "devel" version strings, i.e. "8.5devel" .. change:: :tags: postgresql :tickets: 1619 The psycopg2 now respects the statement option "stream_results". This option overrides the connection setting "server_side_cursors". If true, server side cursors will be used for the statement. If false, they will not be used, even if "server_side_cursors" is true on the connection. .. change:: :tags: mysql :tickets: New dialects: oursql, a new native dialect, MySQL Connector/Python, a native Python port of MySQLdb, and of course zxjdbc on Jython. .. change:: :tags: mysql :tickets: VARCHAR/NVARCHAR will not render without a length, raises an error before passing to MySQL. Doesn't impact CAST since VARCHAR is not allowed in MySQL CAST anyway, the dialect renders CHAR/NCHAR in those cases. .. change:: :tags: mysql :tickets: all the _detect_XXX() functions now run once underneath dialect.initialize() .. change:: :tags: mysql :tickets: 1279 somewhat better support for % signs in table/column names; MySQLdb can't handle % signs in SQL when executemany() is used, and SQLA doesn't want to add overhead just to treat that one non-existent use case. .. change:: :tags: mysql :tickets: the BINARY and MSBinary types now generate "BINARY" in all cases. Omitting the "length" parameter will generate "BINARY" with no length. Use BLOB to generate an unlengthed binary column. .. change:: :tags: mysql :tickets: the "quoting='quoted'" argument to MSEnum/ENUM is deprecated. It's best to rely upon the automatic quoting. .. change:: :tags: mysql :tickets: ENUM now subclasses the new generic Enum type, and also handles unicode values implicitly, if the given labelnames are unicode objects. .. change:: :tags: mysql :tickets: 1539 a column of type TIMESTAMP now defaults to NULL if "nullable=False" is not passed to Column(), and no default is present. This is now consistent with all other types, and in the case of TIMESTAMP explictly renders "NULL" due to MySQL's "switching" of default nullability for TIMESTAMP columns. .. change:: :tags: oracle :tickets: unit tests pass 100% with cx_oracle ! .. change:: :tags: oracle :tickets: support for cx_Oracle's "native unicode" mode which does not require NLS_LANG to be set. Use the latest 5.0.2 or later of cx_oracle. .. change:: :tags: oracle :tickets: an NCLOB type is added to the base types. .. change:: :tags: oracle :tickets: use_ansi=False won't leak into the FROM/WHERE clause of a statement that's selecting from a subquery that also uses JOIN/OUTERJOIN. .. change:: :tags: oracle :tickets: 1467 added native INTERVAL type to the dialect. This supports only the DAY TO SECOND interval type so far due to lack of support in cx_oracle for YEAR TO MONTH. .. change:: :tags: oracle :tickets: usage of the CHAR type results in cx_oracle's FIXED_CHAR dbapi type being bound to statements. .. change:: :tags: oracle :tickets: 885 the Oracle dialect now features NUMBER which intends to act justlike Oracle's NUMBER type. It is the primary numeric type returned by table reflection and attempts to return Decimal()/float/int based on the precision/scale parameters. .. change:: :tags: oracle :tickets: func.char_length is a generic function for LENGTH .. change:: :tags: oracle :tickets: ForeignKey() which includes onupdate= will emit a warning, not emit ON UPDATE CASCADE which is unsupported by oracle .. change:: :tags: oracle :tickets: the keys() method of RowProxy() now returns the result column names *normalized* to be SQLAlchemy case insensitive names. This means they will be lower case for case insensitive names, whereas the DBAPI would normally return them as UPPERCASE names. This allows row keys() to be compatible with further SQLAlchemy operations. .. change:: :tags: oracle :tickets: using new dialect.initialize() feature to set up version-dependent behavior. .. change:: :tags: oracle :tickets: 1125 using types.BigInteger with Oracle will generate NUMBER(19) .. change:: :tags: oracle :tickets: "case sensitivity" feature will detect an all-lowercase case-sensitive column name during reflect and add "quote=True" to the generated Column, so that proper quoting is maintained. .. change:: :tags: firebird :tickets: the keys() method of RowProxy() now returns the result column names *normalized* to be SQLAlchemy case insensitive names. This means they will be lower case for case insensitive names, whereas the DBAPI would normally return them as UPPERCASE names. This allows row keys() to be compatible with further SQLAlchemy operations. .. change:: :tags: firebird :tickets: using new dialect.initialize() feature to set up version-dependent behavior. .. change:: :tags: firebird :tickets: "case sensitivity" feature will detect an all-lowercase case-sensitive column name during reflect and add "quote=True" to the generated Column, so that proper quoting is maintained. .. change:: :tags: mssql :tickets: MSSQL + Pyodbc + FreeTDS now works for the most part, with possible exceptions regarding binary data as well as unicode schema identifiers. .. change:: :tags: mssql :tickets: the "has_window_funcs" flag is removed. LIMIT/OFFSET usage will use ROW NUMBER as always, and if on an older version of SQL Server, the operation fails. The behavior is exactly the same except the error is raised by SQL server instead of the dialect, and no flag setting is required to enable it. .. change:: :tags: mssql :tickets: the "auto_identity_insert" flag is removed. This feature always takes effect when an INSERT statement overrides a column that is known to have a sequence on it. As with "has_window_funcs", if the underlying driver doesn't support this, then you can't do this operation in any case, so there's no point in having a flag. .. change:: :tags: mssql :tickets: using new dialect.initialize() feature to set up version-dependent behavior. .. change:: :tags: mssql :tickets: removed references to sequence which is no longer used. implicit identities in mssql work the same as implicit sequences on any other dialects. Explicit sequences are enabled through the use of "default=Sequence()". See the MSSQL dialect documentation for more information. .. change:: :tags: sqlite :tickets: DATE, TIME and DATETIME types can now take optional storage_format and regexp argument. storage_format can be used to store those types using a custom string format. regexp allows to use a custom regular expression to match string values from the database. .. change:: :tags: sqlite :tickets: Time and DateTime types now use by a default a stricter regular expression to match strings from the database. Use the regexp argument if you are using data stored in a legacy format. .. change:: :tags: sqlite :tickets: __legacy_microseconds__ on SQLite Time and DateTime types is not supported anymore. You should use the storage_format argument instead. .. change:: :tags: sqlite :tickets: Date, Time and DateTime types are now stricter in what they accept as bind parameters: Date type only accepts date objects (and datetime ones, because they inherit from date), Time only accepts time objects, and DateTime only accepts date and datetime objects. .. change:: :tags: sqlite :tickets: 1016 Table() supports a keyword argument "sqlite_autoincrement", which applies the SQLite keyword "AUTOINCREMENT" to the single integer primary key column when generating DDL. Will prevent generation of a separate PRIMARY KEY constraint. .. change:: :tags: types :tickets: The construction of types within dialects has been totally overhauled. Dialects now define publically available types as UPPERCASE names exclusively, and internal implementation types using underscore identifiers (i.e. are private). The system by which types are expressed in SQL and DDL has been moved to the compiler system. This has the effect that there are much fewer type objects within most dialects. A detailed document on this architecture for dialect authors is in lib/sqlalchemy/dialects/type_migration_guidelines.txt . .. change:: :tags: types :tickets: Types no longer make any guesses as to default parameters. In particular, Numeric, Float, NUMERIC, FLOAT, DECIMAL don't generate any length or scale unless specified. .. change:: :tags: types :tickets: 1664 types.Binary is renamed to types.LargeBinary, it only produces BLOB, BYTEA, or a similar "long binary" type. New base BINARY and VARBINARY types have been added to access these MySQL/MS-SQL specific types in an agnostic way. .. change:: :tags: types :tickets: String/Text/Unicode types now skip the unicode() check on each result column value if the dialect has detected the DBAPI as returning Python unicode objects natively. This check is issued on first connect using "SELECT CAST 'some text' AS VARCHAR(10)" or equivalent, then checking if the returned object is a Python unicode. This allows vast performance increases for native-unicode DBAPIs, including pysqlite/sqlite3, psycopg2, and pg8000. .. change:: :tags: types :tickets: Most types result processors have been checked for possible speed improvements. Specifically, the following generic types have been optimized, resulting in varying speed improvements: Unicode, PickleType, Interval, TypeDecorator, Binary. Also the following dbapi-specific implementations have been improved: Time, Date and DateTime on Sqlite, ARRAY on Postgresql, Time on MySQL, Numeric(as_decimal=False) on MySQL, oursql and pypostgresql, DateTime on cx_oracle and LOB-based types on cx_oracle. .. change:: :tags: types :tickets: Reflection of types now returns the exact UPPERCASE type within types.py, or the UPPERCASE type within the dialect itself if the type is not a standard SQL type. This means reflection now returns more accurate information about reflected types. .. change:: :tags: types :tickets: 1511, 1109 Added a new Enum generic type. Enum is a schema-aware object to support databases which require specific DDL in order to use enum or equivalent; in the case of PG it handles the details of `CREATE TYPE`, and on other databases without native enum support will by generate VARCHAR + an inline CHECK constraint to enforce the enum. .. change:: :tags: types :tickets: 1467 The Interval type includes a "native" flag which controls if native INTERVAL types (postgresql + oracle) are selected if available, or not. "day_precision" and "second_precision" arguments are also added which propagate as appropriately to these native types. Related to. .. change:: :tags: types :tickets: 1589 The Boolean type, when used on a backend that doesn't have native boolean support, will generate a CHECK constraint "col IN (0, 1)" along with the int/smallint- based column type. This can be switched off if desired with create_constraint=False. Note that MySQL has no native boolean *or* CHECK constraint support so this feature isn't available on that platform. .. change:: :tags: types :tickets: PickleType now uses == for comparison of values when mutable=True, unless the "comparator" argument with a comparsion function is specified to the type. Objects being pickled will be compared based on identity (which defeats the purpose of mutable=True) if __eq__() is not overridden or a comparison function is not provided. .. change:: :tags: types :tickets: The default "precision" and "scale" arguments of Numeric and Float have been removed and now default to None. NUMERIC and FLOAT will be rendered with no numeric arguments by default unless these values are provided. .. change:: :tags: types :tickets: AbstractType.get_search_list() is removed - the games that was used for are no longer necessary. .. change:: :tags: types :tickets: 1125 Added a generic BigInteger type, compiles to BIGINT or NUMBER(19). .. change:: :tags: types :tickets: sqlsoup has been overhauled to explicitly support an 0.5 style session, using autocommit=False, autoflush=True. Default behavior of SQLSoup now requires the usual usage of commit() and rollback(), which have been added to its interface. An explcit Session or scoped_session can be passed to the constructor, allowing these arguments to be overridden. .. change:: :tags: types :tickets: sqlsoup db..update() and delete() now call query(cls).update() and delete(), respectively. .. change:: :tags: types :tickets: sqlsoup now has execute() and connection(), which call upon the Session methods of those names, ensuring that the bind is in terms of the SqlSoup object's bind. .. change:: :tags: types :tickets: sqlsoup objects no longer have the 'query' attribute - it's not needed for sqlsoup's usage paradigm and it gets in the way of a column that is actually named 'query'. .. change:: :tags: types :tickets: 1259 The signature of the proxy_factory callable passed to association_proxy is now (lazy_collection, creator, value_attr, association_proxy), adding a fourth argument that is the parent AssociationProxy argument. Allows serializability and subclassing of the built in collections. .. change:: :tags: types :tickets: 1372 association_proxy now has basic comparator methods .any(), .has(), .contains(), ==, !=, thanks to Scott Torborg. SQLAlchemy-0.8.4/doc/_sources/changelog/changelog_07.txt0000644000076500000240000041255412251147171023561 0ustar classicstaff00000000000000 ============== 0.7 Changelog ============== .. changelog:: :version: 0.7.11 .. change:: :tags: bug, engine :tickets: 2851 :versions: 0.8.3, 0.9.0b1 The regexp used by the :func:`~sqlalchemy.engine.url.make_url` function now parses ipv6 addresses, e.g. surrounded by brackets. .. change:: :tags: bug, orm :tickets: 2807 :versions: 0.8.3, 0.9.0b1 Fixed bug where list instrumentation would fail to represent a setslice of ``[0:0]`` correctly, which in particular could occur when using ``insert(0, item)`` with the association proxy. Due to some quirk in Python collections, the issue was much more likely with Python 3 rather than 2. .. change:: :tags: bug, sql :tickets: 2801 :versions: 0.8.3, 0.9.0b1 Fixed regression dating back to 0.7.9 whereby the name of a CTE might not be properly quoted if it was referred to in multiple FROM clauses. .. change:: :tags: mysql, bug :tickets: 2791 :versions: 0.8.3, 0.9.0b1 Updates to MySQL reserved words for versions 5.5, 5.6, courtesy Hanno Schlichting. .. change:: :tags: sql, bug, cte :tickets: 2783 :versions: 0.8.3, 0.9.0b1 Fixed bug in common table expression system where if the CTE were used only as an ``alias()`` construct, it would not render using the WITH keyword. .. change:: :tags: bug, sql :tickets: 2784 :versions: 0.8.3, 0.9.0b1 Fixed bug in :class:`.CheckConstraint` DDL where the "quote" flag from a :class:`.Column` object would not be propagated. .. change:: :tags: bug, orm :tickets: 2699 :versions: 0.8.1 Fixed bug when a query of the form: ``query(SubClass).options(subqueryload(Baseclass.attrname))``, where ``SubClass`` is a joined inh of ``BaseClass``, would fail to apply the ``JOIN`` inside the subquery on the attribute load, producing a cartesian product. The populated results still tended to be correct as additional rows are just ignored, so this issue may be present as a performance degradation in applications that are otherwise working correctly. .. change:: :tags: bug, orm :tickets: 2689 :versions: 0.8.1 Fixed bug in unit of work whereby a joined-inheritance subclass could insert the row for the "sub" table before the parent table, if the two tables had no ForeignKey constraints set up between them. .. change:: :tags: feature, postgresql :tickets: 2676 :versions: 0.8.0 Added support for Postgresql's traditional SUBSTRING function syntax, renders as "SUBSTRING(x FROM y FOR z)" when regular ``func.substring()`` is used. Courtesy Gunnlaugur Þór Briem. .. change:: :tags: bug, tests :tickets: 2669 :pullreq: 41 Fixed an import of "logging" in test_execute which was not working on some linux platforms. .. change:: :tags: bug, orm :tickets: 2674 Improved the error message emitted when a "backref loop" is detected, that is when an attribute event triggers a bidirectional assignment between two other attributes with no end. This condition can occur not just when an object of the wrong type is assigned, but also when an attribute is mis-configured to backref into an existing backref pair. .. change:: :tags: bug, orm :tickets: 2674 A warning is emitted when a MapperProperty is assigned to a mapper that replaces an existing property, if the properties in question aren't plain column-based properties. Replacement of relationship properties is rarely (ever?) what is intended and usually refers to a mapper mis-configuration. This will also warn if a backref configures itself on top of an existing one in an inheritance relationship (which is an error in 0.8). .. changelog:: :version: 0.7.10 :released: Thu Feb 7 2013 .. change:: :tags: engine, bug :tickets: 2604 :versions: 0.8.0b2 Fixed :meth:`.MetaData.reflect` to correctly use the given :class:`.Connection`, if given, without opening a second connection from that connection's :class:`.Engine`. .. change:: :tags: mssql, bug :tickets:2607 :versions: 0.8.0b2 Fixed bug whereby using "key" with Column in conjunction with "schema" for the owning Table would fail to locate result rows due to the MSSQL dialect's "schema rendering" logic's failure to take .key into account. .. change:: :tags: sql, mysql, gae :tickets: 2649 Added a conditional import to the ``gaerdbms`` dialect which attempts to import rdbms_apiproxy vs. rdbms_googleapi to work on both dev and production platforms. Also now honors the ``instance`` attribute. Courtesy Sean Lynch. Also backported enhancements to allow username/password as well as fixing error code interpretation from 0.8. .. change:: :tags: sql, bug :tickets: 2594, 2584 Backported adjustment to ``__repr__`` for :class:`.TypeDecorator` to 0.7, allows :class:`.PickleType` to produce a clean ``repr()`` to help with Alembic. .. change:: :tags: sql, bug :tickets: 2643 Fixed bug where :meth:`.Table.tometadata` would fail if a :class:`.Column` had both a foreign key as well as an alternate ".key" name for the column. .. change:: :tags: mssql, bug :tickets: 2638 Added a Py3K conditional around unnecessary .decode() call in mssql information schema, fixes reflection in Py3k. .. change:: :tags: orm, bug :tickets: 2650 Fixed potential memory leak which could occur if an arbitrary number of :class:`.sessionmaker` objects were created. The anonymous subclass created by the sessionmaker, when dereferenced, would not be garbage collected due to remaining class-level references from the event package. This issue also applies to any custom system that made use of ad-hoc subclasses in conjunction with an event dispatcher. .. change:: :tags: orm, bug :tickets: 2640 :meth:`.Query.merge_result` can now load rows from an outer join where an entity may be ``None`` without throwing an error. .. change:: :tags: sqlite, bug :tickets: 2568 :versions: 0.8.0b2 More adjustment to this SQLite related issue which was released in 0.7.9, to intercept legacy SQLite quoting characters when reflecting foreign keys. In addition to intercepting double quotes, other quoting characters such as brackets, backticks, and single quotes are now also intercepted. .. change:: :tags: sql, bug :tickets: 2631 :versions: 0.8.0b2 Fixed bug where using server_onupdate= without passing the "for_update=True" flag would apply the default object to the server_default, blowing away whatever was there. The explicit for_update=True argument shouldn't be needed with this usage (especially since the documentation shows an example without it being used) so it is now arranged internally using a copy of the given default object, if the flag isn't set to what corresponds to that argument. .. change:: :tags: oracle, bug :tickets: 2620 The Oracle LONG type, while an unbounded text type, does not appear to use the cx_Oracle.LOB type when result rows are returned, so the dialect has been repaired to exclude LONG from having cx_Oracle.LOB filtering applied. .. change:: :tags: oracle, bug :tickets: 2611 Repaired the usage of ``.prepare()`` in conjunction with cx_Oracle so that a return value of ``False`` will result in no call to ``connection.commit()``, hence avoiding "no transaction" errors. Two-phase transactions have now been shown to work in a rudimental fashion with SQLAlchemy and cx_oracle, however are subject to caveats observed with the driver; check the documentation for details. .. change:: :tags: orm, bug :tickets: 2624 The :class:`.MutableComposite` type did not allow for the :meth:`.MutableBase.coerce` method to be used, even though the code seemed to indicate this intent, so this now works and a brief example is added. As a side-effect, the mechanics of this event handler have been changed so that new :class:`.MutableComposite` types no longer add per-type global event handlers. Also in 0.8.0b2. .. change:: :tags: orm, bug :tickets: 2583 Fixed Session accounting bug whereby replacing a deleted object in the identity map with another object of the same primary key would raise a "conflicting state" error on rollback(), if the replaced primary key were established either via non-unitofwork-established INSERT statement or by primary key switch of another instance. .. change:: :tags: oracle, bug :tickets: 2561 changed the list of cx_oracle types that are excluded from the setinputsizes() step to only include STRING and UNICODE; CLOB and NCLOB are removed. This is to work around cx_oracle behavior which is broken for the executemany() call. In 0.8, this same change is applied however it is also configurable via the exclude_setinputsizes argument. .. change:: :tags: feature, mysql :tickets: 2523 Added "raise_on_warnings" flag to OurSQL dialect. .. change:: :tags: feature, mysql :tickets: 2554 Added "read_timeout" flag to MySQLdb dialect. .. changelog:: :version: 0.7.9 :released: Mon Oct 01 2012 .. change:: :tags: orm, bug :tickets: Fixed bug mostly local to new AbstractConcreteBase helper where the "type" attribute from the superclass would not be overridden on the subclass to produce the "reserved for base" error message, instead placing a do-nothing attribute there. This was inconsistent vs. using ConcreteBase as well as all the behavior of classical concrete mappings, where the "type" column from the polymorphic base would be explicitly disabled on subclasses, unless overridden explicitly. .. change:: :tags: orm, bug :tickets: A warning is emitted when lazy='dynamic' is combined with uselist=False. This is an exception raise in 0.8. .. change:: :tags: orm, bug :tickets: Fixed bug whereby user error in related-object assignment could cause recursion overflow if the assignment triggered a backref of the same name as a bi-directional attribute on the incorrect class to the same target. An informative error is raised now. .. change:: :tags: orm, bug :tickets: 2539 Fixed bug where incorrect type information would be passed when the ORM would bind the "version" column, when using the "version" feature. Tests courtesy Daniel Miller. .. change:: :tags: orm, bug :tickets: 2566 Extra logic has been added to the "flush" that occurs within Session.commit(), such that the extra state added by an after_flush() or after_flush_postexec() hook is also flushed in a subsequent flush, before the "commit" completes. Subsequent calls to flush() will continue until the after_flush hooks stop adding new state. An "overflow" counter of 100 is also in place, in the event of a broken after_flush() hook adding new content each time. .. change:: :tags: bug, sql :tickets: 2571 Fixed the DropIndex construct to support an Index associated with a Table in a remote schema. .. change:: :tags: bug, sql :tickets: 2574 Fixed bug in over() construct whereby passing an empty list for either partition_by or order_by, as opposed to None, would fail to generate correctly. Courtesy Gunnlaugur Þór Briem. .. change:: :tags: bug, sql :tickets: 2521 Fixed CTE bug whereby positional bound parameters present in the CTEs themselves would corrupt the overall ordering of bound parameters. This primarily affected SQL Server as the platform with positional binds + CTE support. .. change:: :tags: bug, sql :tickets: Fixed more un-intuitivenesses in CTEs which prevented referring to a CTE in a union of itself without it being aliased. CTEs now render uniquely on name, rendering the outermost CTE of a given name only - all other references are rendered just as the name. This even includes other CTE/SELECTs that refer to different versions of the same CTE object, such as a SELECT or a UNION ALL of that SELECT. We are somewhat loosening the usual link between object identity and lexical identity in this case. A true name conflict between two unrelated CTEs now raises an error. .. change:: :tags: bug, sql :tickets: 2512 quoting is applied to the column names inside the WITH RECURSIVE clause of a common table expression according to the quoting rules for the originating Column. .. change:: :tags: bug, sql :tickets: 2518 Fixed regression introduced in 0.7.6 whereby the FROM list of a SELECT statement could be incorrect in certain "clone+replace" scenarios. .. change:: :tags: bug, sql :tickets: 2552 Fixed bug whereby usage of a UNION or similar inside of an embedded subquery would interfere with result-column targeting, in the case that a result-column had the same ultimate name as a name inside the embedded UNION. .. change:: :tags: bug, sql :tickets: 2558 Fixed a regression since 0.6 regarding result-row targeting. It should be possible to use a select() statement with string based columns in it, that is select(['id', 'name']).select_from('mytable'), and have this statement be targetable by Column objects with those names; this is the mechanism by which query(MyClass).from_statement(some_statement) works. At some point the specific case of using select(['id']), which is equivalent to select([literal_column('id')]), stopped working here, so this has been re-instated and of course tested. .. change:: :tags: bug, sql :tickets: 2544 Added missing operators is_(), isnot() to the ColumnOperators base, so that these long-available operators are present as methods like all the other operators. .. change:: :tags: engine, bug :tickets: 2522 Fixed bug whereby a disconnect detect + dispose that occurs when the QueuePool has threads waiting for connections would leave those threads waiting for the duration of the timeout on the old pool (or indefinitely if timeout was disabled). The fix now notifies those waiters with a special exception case and has them move onto the new pool. .. change:: :tags: engine, feature :tickets: 2516 Dramatic improvement in memory usage of the event system; instance-level collections are no longer created for a particular type of event until instance-level listeners are established for that event. .. change:: :tags: engine, bug :tickets: 2529 Added gaerdbms import to mysql/__init__.py, the absense of which was preventing the new GAE dialect from being loaded. .. change:: :tags: engine, bug :tickets: 2553 Fixed cextension bug whereby the "ambiguous column error" would fail to function properly if the given index were a Column object and not a string. Note there are still some column-targeting issues here which are fixed in 0.8. .. change:: :tags: engine, bug :tickets: Fixed the repr() of Enum to include the "name" and "native_enum" flags. Helps Alembic autogenerate. .. change:: :tags: sqlite, bug :tickets: 2568 Adjusted a very old bugfix which attempted to work around a SQLite issue that itself was "fixed" as of sqlite 3.6.14, regarding quotes surrounding a table name when using the "foreign_key_list" pragma. The fix has been adjusted to not interfere with quotes that are *actually in the name* of a column or table, to as much a degree as possible; sqlite still doesn't return the correct result for foreign_key_list() if the target table actually has quotes surrounding its name, as *part* of its name (i.e. """mytable"""). .. change:: :tags: sqlite, bug :tickets: 2265 Adjusted column default reflection code to convert non-string values to string, to accommodate old SQLite versions that don't deliver default info as a string. .. change:: :tags: sqlite, feature :tickets: Added support for the localtimestamp() SQL function implemented in SQLite, courtesy Richard Mitchell. .. change:: :tags: postgresql, bug :tickets: 2531 Columns in reflected primary key constraint are now returned in the order in which the constraint itself defines them, rather than how the table orders them. Courtesy Gunnlaugur Þór Briem.. .. change:: :tags: postgresql, bug :tickets: 2570 Added 'terminating connection' to the list of messages we use to detect a disconnect with PG, which appears to be present in some versions when the server is restarted. .. change:: :tags: bug, mysql :tickets: Updated mysqlconnector interface to use updated "client flag" and "charset" APIs, courtesy David McNelis. .. change:: :tags: mssql, bug :tickets: 2538 Fixed compiler bug whereby using a correlated subquery within an ORDER BY would fail to render correctly if the stament also used LIMIT/OFFSET, due to mis-rendering within the ROW_NUMBER() OVER clause. Fix courtesy sayap .. change:: :tags: mssql, bug :tickets: 2545 Fixed compiler bug whereby a given select() would be modified if it had an "offset" attribute, causing the construct to not compile correctly a second time. .. change:: :tags: mssql, bug :tickets: Fixed bug where reflection of primary key constraint would double up columns if the same constraint/table existed in multiple schemas. .. changelog:: :version: 0.7.8 :released: Sat Jun 16 2012 .. change:: :tags: orm, bug :tickets: 2480 Fixed bug whereby subqueryload() from a polymorphic mapping to a target would incur a new invocation of the query for each distinct class encountered in the polymorphic result. .. change:: :tags: orm, bug :tickets: 2491, 1892 Fixed bug in declarative whereby the precedence of columns in a joined-table, composite column (typically for id) would fail to be correct if the columns contained names distinct from their attribute names. This would cause things like primaryjoin conditions made against the entity attributes to be incorrect. Related to as this was supposed to be part of that, this is. .. change:: :tags: orm, feature :tickets: The 'objects' argument to flush() is no longer deprecated, as some valid use cases have been identified. .. change:: :tags: orm, bug :tickets: 2508 Fixed identity_key() function which was not accepting a scalar argument for the identity. . .. change:: :tags: orm, bug :tickets: 2497 Fixed bug whereby populate_existing option would not propagate to subquery eager loaders. . .. change:: :tags: bug, sql :tickets: 2499 added BIGINT to types.__all__, BIGINT, BINARY, VARBINARY to sqlalchemy module namespace, plus test to ensure this breakage doesn't occur again. .. change:: :tags: bug, sql :tickets: 2490 Repaired common table expression rendering to function correctly when the SELECT statement contains UNION or other compound expressions, courtesy btbuilder. .. change:: :tags: bug, sql :tickets: 2482 Fixed bug whereby append_column() wouldn't function correctly on a cloned select() construct, courtesy Gunnlaugur Þór Briem. .. change:: :tags: engine, bug :tickets: 2489 Fixed memory leak in C version of result proxy whereby DBAPIs which don't deliver pure Python tuples for result rows would fail to decrement refcounts correctly. The most prominently affected DBAPI is pyodbc. .. change:: :tags: engine, bug :tickets: 2503 Fixed bug affecting Py3K whereby string positional parameters passed to engine/connection execute() would fail to be interpreted correctly, due to __iter__ being present on Py3K string.. .. change:: :tags: postgresql, bug :tickets: 2510 removed unnecessary table clause when reflecting enums,. Courtesy Gunnlaugur Þór Briem. .. change:: :tags: oracle, bug :tickets: 2483 Added ROWID to oracle.*. .. change:: :tags: feature, mysql :tickets: 2484 Added a new dialect for Google App Engine. Courtesy Richie Foreman. .. changelog:: :version: 0.7.7 :released: Sat May 05 2012 .. change:: :tags: orm, bug :tickets: 2477 Fixed issue in unit of work whereby setting a non-None self-referential many-to-one relationship to None would fail to persist the change if the former value was not already loaded.. .. change:: :tags: orm, feature :tickets: 2443 Added prefix_with() method to Query, calls upon select().prefix_with() to allow placement of MySQL SELECT directives in statements. Courtesy Diana Clarke .. change:: :tags: orm, bug :tickets: 2409 Fixed bug in 0.7.6 introduced by whereby column_mapped_collection used against columns that were mapped as joins or other indirect selectables would fail to function. .. change:: :tags: orm, feature :tickets: Added new flag to @validates include_removes. When True, collection remove and attribute del events will also be sent to the validation function, which accepts an additional argument "is_remove" when this flag is used. .. change:: :tags: orm, bug :tickets: 2449 Fixed bug whereby polymorphic_on column that's not otherwise mapped on the class would be incorrectly included in a merge() operation, raising an error. .. change:: :tags: orm, bug :tickets: 2453 Fixed bug in expression annotation mechanics which could lead to incorrect rendering of SELECT statements with aliases and joins, particularly when using column_property(). .. change:: :tags: orm, bug :tickets: 2454 Fixed bug which would prevent OrderingList from being pickleable. Courtesy Jeff Dairiki .. change:: :tags: orm, bug :tickets: Fixed bug in relationship comparisons whereby calling unimplemented methods like SomeClass.somerelationship.like() would produce a recursion overflow, instead of NotImplementedError. .. change:: :tags: bug, sql :tickets: Removed warning when Index is created with no columns; while this might not be what the user intended, it is a valid use case as an Index could be a placeholder for just an index of a certain name. .. change:: :tags: feature, sql :tickets: Added new connection event dbapi_error(). Is called for all DBAPI-level errors passing the original DBAPI exception before SQLAlchemy modifies the state of the cursor. .. change:: :tags: bug, sql :tickets: If conn.begin() fails when calling "with engine.begin()", the newly acquired Connection is closed explicitly before propagating the exception onward normally. .. change:: :tags: bug, sql :tickets: 2474 Add BINARY, VARBINARY to types.__all__. .. change:: :tags: mssql, feature :tickets: Added interim create_engine flag supports_unicode_binds to PyODBC dialect, to force whether or not the dialect passes Python unicode literals to PyODBC or not. .. change:: :tags: mssql, bug :tickets: Repaired the use_scope_identity create_engine() flag when using the pyodbc dialect. Previously this flag would be ignored if set to False. When set to False, you'll get "SELECT @@identity" after each INSERT to get at the last inserted ID, for those tables which have "implicit_returning" set to False. .. change:: :tags: mssql, bug :tickets: 2468 UPDATE..FROM syntax with SQL Server requires that the updated table be present in the FROM clause when an alias of that table is also present in the FROM clause. The updated table is now always present in the FROM, when FROM is present in the first place. Courtesy sayap. .. change:: :tags: postgresql, feature :tickets: 2445 Added new for_update/with_lockmode() options for Postgresql: for_update="read"/ with_lockmode("read"), for_update="read_nowait"/ with_lockmode("read_nowait"). These emit "FOR SHARE" and "FOR SHARE NOWAIT", respectively. Courtesy Diana Clarke .. change:: :tags: postgresql, bug :tickets: 2473 removed unnecessary table clause when reflecting domains. .. change:: :tags: bug, mysql :tickets: 2460 Fixed bug whereby column name inside of "KEY" clause for autoincrement composite column with InnoDB would double quote a name that's a reserved word. Courtesy Jeff Dairiki. .. change:: :tags: bug, mysql :tickets: Fixed bug whereby get_view_names() for "information_schema" schema would fail to retrieve views marked as "SYSTEM VIEW". courtesy Matthew Turland. .. change:: :tags: bug, mysql :tickets: 2467 Fixed bug whereby if cast() is used on a SQL expression whose type is not supported by cast() and therefore CAST isn't rendered by the dialect, the order of evaluation could change if the casted expression required that it be grouped; grouping is now applied to those expressions. .. change:: :tags: sqlite, feature :tickets: 2475 Added SQLite execution option "sqlite_raw_colnames=True", will bypass attempts to remove "." from column names returned by SQLite cursor.description. .. change:: :tags: sqlite, bug :tickets: 2525 When the primary key column of a Table is replaced, such as via extend_existing, the "auto increment" column used by insert() constructs is reset. Previously it would remain referring to the previous primary key column. .. changelog:: :version: 0.7.6 :released: Wed Mar 14 2012 .. change:: :tags: orm, bug :tickets: 2424 Fixed event registration bug which would primarily show up as events not being registered with sessionmaker() instances created after the event was associated with the Session class. .. change:: :tags: orm, bug :tickets: 2425 Fixed bug whereby a primaryjoin condition with a "literal" in it would raise an error on compile with certain kinds of deeply nested expressions which also needed to render the same bound parameter name more than once. .. change:: :tags: orm, feature :tickets: Added "no_autoflush" context manager to Session, used with with: will temporarily disable autoflush. .. change:: :tags: orm, feature :tickets: 1859 Added cte() method to Query, invokes common table expression support from the Core (see below). .. change:: :tags: orm, bug :tickets: 2403 Removed the check for number of rows affected when doing a multi-delete against mapped objects. If an ON DELETE CASCADE exists between two rows, we can't get an accurate rowcount from the DBAPI; this particular count is not supported on most DBAPIs in any case, MySQLdb is the notable case where it is. .. change:: :tags: orm, bug :tickets: 2409 Fixed bug whereby objects using attribute_mapped_collection or column_mapped_collection could not be pickled. .. change:: :tags: orm, bug :tickets: 2406 Fixed bug whereby MappedCollection would not get the appropriate collection instrumentation if it were only used in a custom subclass that used @collection.internally_instrumented. .. change:: :tags: orm, bug :tickets: 2419 Fixed bug whereby SQL adaption mechanics would fail in a very nested scenario involving joined-inheritance, joinedload(), limit(), and a derived function in the columns clause. .. change:: :tags: orm, bug :tickets: 2417 Fixed the repr() for CascadeOptions to include refresh-expire. Also reworked CascadeOptions to be a . .. change:: :tags: orm, feature :tickets: 2400 Added the ability to query for Table-bound column names when using query(sometable).filter_by(colname=value). .. change:: :tags: orm, bug :tickets: Improved the "declarative reflection" example to support single-table inheritance, multiple calls to prepare(), tables that are present in alternate schemas, establishing only a subset of classes as reflected. .. change:: :tags: orm, bug :tickets: 2390 Scaled back the test applied within flush() to check for UPDATE against partially NULL PK within one table to only actually happen if there's really an UPDATE to occur. .. change:: :tags: orm, bug :tickets: 2352 Fixed bug whereby if a method name conflicted with a column name, a TypeError would be raised when the mapper tried to inspect the __get__() method on the method object. .. change:: :tags: bug, sql :tickets: 2427 Fixed memory leak in core which would occur when C extensions were used with particular types of result fetches, in particular when orm query.count() were called. .. change:: :tags: bug, sql :tickets: 2398 Fixed issue whereby attribute-based column access on a row would raise AttributeError with non-C version, NoSuchColumnError with C version. Now raises AttributeError in both cases. .. change:: :tags: feature, sql :tickets: 1859 Added support for SQL standard common table expressions (CTE), allowing SELECT objects as the CTE source (DML not yet supported). This is invoked via the cte() method on any select() construct. .. change:: :tags: bug, sql :tickets: 2392 Added support for using the .key of a Column as a string identifier in a result set row. The .key is currently listed as an "alternate" name for a column, and is superseded by the name of a column which has that key value as its regular name. For the next major release of SQLAlchemy we may reverse this precedence so that .key takes precedence, but this is not decided on yet. .. change:: :tags: bug, sql :tickets: 2413 A warning is emitted when a not-present column is stated in the values() clause of an insert() or update() construct. Will move to an exception in 0.8. .. change:: :tags: bug, sql :tickets: 2396 A significant change to how labeling is applied to columns in SELECT statements allows "truncated" labels, that is label names that are generated in Python which exceed the maximum identifier length (note this is configurable via label_length on create_engine()), to be properly referenced when rendered inside of a subquery, as well as to be present in a result set row using their original in-Python names. .. change:: :tags: bug, sql :tickets: 2402 Fixed bug in new "autoload_replace" flag which would fail to preserve the primary key constraint of the reflected table. .. change:: :tags: bug, sql :tickets: 2380 Index will raise when arguments passed cannot be interpreted as columns or expressions. Will warn when Index is created with no columns at all. .. change:: :tags: engine, feature :tickets: 2407 Added "no_parameters=True" execution option for connections. If no parameters are present, will pass the statement as cursor.execute(statement), thereby invoking the DBAPIs behavior when no parameter collection is present; for psycopg2 and mysql-python, this means not interpreting % signs in the string. This only occurs with this option, and not just if the param list is blank, as otherwise this would produce inconsistent behavior of SQL expressions that normally escape percent signs (and while compiling, can't know ahead of time if parameters will be present in some cases). .. change:: :tags: engine, bug :tickets: Added execution_options() call to MockConnection (i.e., that used with strategy="mock") which acts as a pass through for arguments. .. change:: :tags: engine, feature :tickets: 2378 Added pool_reset_on_return argument to create_engine, allows control over "connection return" behavior. Also added new arguments 'rollback', 'commit', None to pool.reset_on_return to allow more control over connection return activity. .. change:: :tags: engine, feature :tickets: Added some decent context managers to Engine, Connection:: with engine.begin() as conn: and:: with engine.connect() as conn: Both close out the connection when done, commit or rollback transaction with errors on engine.begin(). .. change:: :tags: sqlite, bug :tickets: 2432 Fixed bug in C extensions whereby string format would not be applied to a Numeric value returned as integer; this affected primarily SQLite which does not maintain numeric scale settings. .. change:: :tags: mssql, feature :tickets: 2430 Added support for MSSQL INSERT, UPDATE, and DELETE table hints, using new with_hint() method on UpdateBase. .. change:: :tags: feature, mysql :tickets: 2386 Added support for MySQL index and primary key constraint types (i.e. USING) via new mysql_using parameter to Index and PrimaryKeyConstraint, courtesy Diana Clarke. .. change:: :tags: feature, mysql :tickets: 2394 Added support for the "isolation_level" parameter to all MySQL dialects. Thanks to mu_mind for the patch here. .. change:: :tags: oracle, feature :tickets: 2399 Added a new create_engine() flag coerce_to_decimal=False, disables the precision numeric handling which can add lots of overhead by converting all numeric values to Decimal. .. change:: :tags: oracle, bug :tickets: 2401 Added missing compilation support for LONG .. change:: :tags: oracle, bug :tickets: 2435 Added 'LEVEL' to the list of reserved words for Oracle. .. change:: :tags: examples, bug :tickets: Altered _params_from_query() function in Beaker example to pull bindparams from the fully compiled statement, as a quick means to get everything including subqueries in the columns clause, etc. .. changelog:: :version: 0.7.5 :released: Sat Jan 28 2012 .. change:: :tags: orm, bug :tickets: 2389 Fixed issue where modified session state established after a failed flush would be committed as part of the subsequent transaction that begins automatically after manual call to rollback(). The state of the session is checked within rollback(), and if new state is present, a warning is emitted and restore_snapshot() is called a second time, discarding those changes. .. change:: :tags: orm, bug :tickets: 2345 Fixed regression from 0.7.4 whereby using an already instrumented column from a superclass as "polymorphic_on" failed to resolve the underlying Column. .. change:: :tags: orm, bug :tickets: 2370 Raise an exception if xyzload_all() is used inappropriately with two non-connected relationships. .. change:: :tags: orm, feature :tickets: Added "class_registry" argument to declarative_base(). Allows two or more declarative bases to share the same registry of class names. .. change:: :tags: orm, feature :tickets: query.filter() accepts multiple criteria which will join via AND, i.e. query.filter(x==y, z>q, ...) .. change:: :tags: orm, feature :tickets: 2351 Added new capability to relationship loader options to allow "default" loader strategies. Pass '*' to any of joinedload(), lazyload(), subqueryload(), or noload() and that becomes the loader strategy used for all relationships, except for those explicitly stated in the Query. Thanks to up-and-coming contributor Kent Bower for an exhaustive and well written test suite ! .. change:: :tags: orm, bug :tickets: 2367 Fixed bug whereby event.listen(SomeClass) forced an entirely unnecessary compile of the mapper, making events very hard to set up at module import time (nobody noticed this ??) .. change:: :tags: orm, bug :tickets: Fixed bug whereby hybrid_property didn't work as a kw arg in any(), has(). .. change:: :tags: orm :tickets: Fixed regression from 0.6 whereby if "load_on_pending" relationship() flag were used where a non-"get()" lazy clause needed to be emitted on a pending object, it would fail to load. .. change:: :tags: orm, bug :tickets: 2371 ensure pickleability of all ORM exceptions for multiprocessing compatibility. .. change:: :tags: orm, bug :tickets: 2353 implemented standard "can't set attribute" / "can't delete attribute" AttributeError when setattr/delattr used on a hybrid that doesn't define fset or fdel. .. change:: :tags: orm, bug :tickets: 2362 Fixed bug where unpickled object didn't have enough of its state set up to work correctly within the unpickle() event established by the mutable object extension, if the object needed ORM attribute access within __eq__() or similar. .. change:: :tags: orm, bug :tickets: 2374 Fixed bug where "merge" cascade could mis-interpret an unloaded attribute, if the load_on_pending flag were used with relationship(). Thanks to Kent Bower for tests. .. change:: :tags: orm, feature :tickets: 2356 New declarative reflection example added, illustrates how best to mix table reflection with declarative as well as uses some new features from. .. change:: :tags: feature, sql :tickets: 2356 New reflection feature "autoload_replace"; when set to False on Table, the Table can be autoloaded without existing columns being replaced. Allows more flexible chains of Table construction/reflection to be constructed, including that it helps with combining Declarative with table reflection. See the new example on the wiki. .. change:: :tags: bug, sql :tickets: 2356 Improved the API for add_column() such that if the same column is added to its own table, an error is not raised and the constraints don't get doubled up. Also helps with some reflection/declarative patterns. .. change:: :tags: feature, sql :tickets: Added "false()" and "true()" expression constructs to sqlalchemy.sql namespace, though not part of __all__ as of yet. .. change:: :tags: feature, sql :tickets: 2361 Dialect-specific compilers now raise CompileError for all type/statement compilation issues, instead of InvalidRequestError or ArgumentError. The DDL for CREATE TABLE will re-raise CompileError to include table/column information for the problematic column. .. change:: :tags: bug, sql :tickets: 2381 Fixed issue where the "required" exception would not be raised for bindparam() with required=True, if the statement were given no parameters at all. .. change:: :tags: engine, bug :tickets: 2371 Added __reduce__ to StatementError, DBAPIError, column errors so that exceptions are pickleable, as when using multiprocessing. However, not all DBAPIs support this yet, such as psycopg2. .. change:: :tags: engine, bug :tickets: 2382 Improved error messages when a non-string or invalid string is passed to any of the date/time processors used by SQLite, including C and Python versions. .. change:: :tags: engine, bug :tickets: 2377 Fixed bug whereby a table-bound Column object named "_" which matched a column labeled as "_" could match inappropriately when targeting in a result set row. .. change:: :tags: engine, bug :tickets: 2384 Fixed bug in "mock" strategy whereby correct DDL visit method wasn't called, resulting in "CREATE/DROP SEQUENCE" statements being duplicated .. change:: :tags: sqlite, bug :tickets: 2364 the "name" of an FK constraint in SQLite is reflected as "None", not "0" or other integer value. SQLite does not appear to support constraint naming in any case. .. change:: :tags: sqlite, bug :tickets: 2368 sql.false() and sql.true() compile to 0 and 1, respectively in sqlite .. change:: :tags: sqlite, bug :tickets: removed an erroneous "raise" in the SQLite dialect when getting table names and view names, where logic is in place to fall back to an older version of SQLite that doesn't have the "sqlite_temp_master" table. .. change:: :tags: bug, mysql :tickets: 2376 fixed regexp that filters out warnings for non-reflected "PARTITION" directives, thanks to George Reilly .. change:: :tags: mssql, bug :tickets: 2340 Adjusted the regexp used in the mssql.TIME type to ensure only six digits are received for the "microseconds" portion of the value, which is expected by Python's datetime.time(). Note that support for sending microseconds doesn't seem to be possible yet with pyodbc at least. .. change:: :tags: mssql, bug :tickets: 2347 Dropped the "30 char" limit on pymssql, based on reports that it's doing things better these days. pymssql hasn't been well tested and as the DBAPI is in flux it's still not clear what the status is on this driver and how SQLAlchemy's implementation should adapt. .. change:: :tags: oracle, bug :tickets: 2388 Added ORA-03135 to the never ending list of oracle "connection lost" errors .. change:: :tags: core, bug :tickets: 2379 Changed LRUCache, used by the mapper to cache INSERT/UPDATE/DELETE statements, to use an incrementing counter instead of a timestamp to track entries, for greater reliability versus using time.time(), which can cause test failures on some platforms. .. change:: :tags: core, bug :tickets: 2383 Added a boolean check for the "finalize" function within the pool connection proxy's weakref callback before calling it, so that a warning isn't emitted that this function is None when the application is exiting and gc has removed the function from the module before the weakref callback was invoked. .. change:: :tags: bug, py3k :tickets: 2348 Fixed inappropriate usage of util.py3k flag and renamed it to util.py3k_warning, since this flag is intended to detect the -3 flag series of import restrictions only. .. change:: :tags: examples, feature :tickets: 2313 Simplified the versioning example a bit to use a declarative mixin as well as an event listener, instead of a metaclass + SessionExtension. .. change:: :tags: examples, bug :tickets: 2346 Fixed large_collection.py to close the session before dropping tables. .. changelog:: :version: 0.7.4 :released: Fri Dec 09 2011 .. change:: :tags: orm, bug :tickets: 2315 Fixed backref behavior when "popping" the value off of a many-to-one in response to a removal from a stale one-to-many - the operation is skipped, since the many-to-one has since been updated. .. change:: :tags: orm, bug :tickets: 2264 After some years of not doing this, added more granularity to the "is X a parent of Y" functionality, which is used when determining if the FK on "Y" needs to be "nulled out" as well as if "Y" should be deleted with delete-orphan cascade. The test now takes into account the Python identity of the parent as well its identity key, to see if the last known parent of Y is definitely X. If a decision can't be made, a StaleDataError is raised. The conditions where this error is raised are fairly rare, requiring that the previous parent was garbage collected, and previously could very well inappropriately update/delete a record that's since moved onto a new parent, though there may be some cases where "silent success" occurred previously that will now raise in the face of ambiguity. Expiring "Y" resets the "parent" tracker, meaning X.remove(Y) could then end up deleting Y even if X is stale, but this is the same behavior as before; it's advised to expire X also in that case. .. change:: :tags: orm, bug :tickets: 2310 fixed inappropriate evaluation of user-mapped object in a boolean context within query.get(). Also in 0.6.9. .. change:: :tags: orm, bug :tickets: 2304 Added missing comma to PASSIVE_RETURN_NEVER_SET symbol .. change:: :tags: orm, bug :tickets: 1776 Cls.column.collate("some collation") now works. Also in 0.6.9 .. change:: :tags: orm, bug :tickets: 2309 the value of a composite attribute is now expired after an insert or update operation, instead of regenerated in place. This ensures that a column value which is expired within a flush will be loaded first, before the composite is regenerated using that value. .. change:: :tags: orm, bug :tickets: 2309, 2308 The fix in also emits the "refresh" event when the composite value is loaded on access, even if all column values were already present, as is appropriate. This fixes the "mutable" extension which relies upon the "load" event to ensure the _parents dictionary is up to date, fixes. Thanks to Scott Torborg for the test case here. .. change:: :tags: orm, bug :tickets: 2312 Fixed bug whereby a subclass of a subclass using concrete inheritance in conjunction with the new ConcreteBase or AbstractConcreteBase would fail to apply the subclasses deeper than one level to the "polymorphic loader" of each base .. change:: :tags: orm, bug :tickets: 2312 Fixed bug whereby a subclass of a subclass using the new AbstractConcreteBase would fail to acquire the correct "base_mapper" attribute when the "base" mapper was generated, thereby causing failures later on. .. change:: :tags: orm, bug :tickets: 2316 Fixed bug whereby column_property() created against ORM-level column could be treated as a distinct entity when producing certain kinds of joined-inh joins. .. change:: :tags: orm, bug :tickets: 2297 Fixed the error formatting raised when a tuple is inadvertently passed to session.query(). Also in 0.6.9. .. change:: :tags: orm, bug :tickets: 2328 Calls to query.join() to a single-table inheritance subclass are now tracked, and are used to eliminate the additional WHERE.. IN criterion normally tacked on with single table inheritance, since the join should accommodate it. This allows OUTER JOIN to a single table subclass to produce the correct results, and overall will produce fewer WHERE criterion when dealing with single table inheritance joins. .. change:: :tags: orm, bug :tickets: 2339 __table_args__ can now be passed as an empty tuple as well as an empty dict.. Thanks to Fayaz Yusuf Khan for the patch. .. change:: :tags: orm, bug :tickets: 2325 Updated warning message when setting delete-orphan without delete to no longer refer to 0.6, as we never got around to upgrading this to an exception. Ideally this might be better as an exception but it's not critical either way. .. change:: :tags: orm, feature :tickets: 2345, 2238 polymorphic_on now accepts many new kinds of values: * standalone expressions that aren't otherwise mapped * column_property() objects * string names of any column_property() or attribute name of a mapped Column The docs include an example using the case() construct, which is likely to be a common constructed used here. and part of Standalone expressions in polymorphic_on propagate to single-table inheritance subclasses so that they are used in the WHERE /JOIN clause to limit rows to that subclass as is the usual behavior. .. change:: :tags: orm, feature :tickets: 2301 IdentitySet supports the - operator as the same as difference(), handy when dealing with Session.dirty etc. .. change:: :tags: orm, feature :tickets: Added new value for Column autoincrement called "ignore_fk", can be used to force autoincrement on a column that's still part of a ForeignKeyConstraint. New example in the relationship docs illustrates its use. .. change:: :tags: orm, bug :tickets: Fixed bug in get_history() when referring to a composite attribute that has no value; added coverage for get_history() regarding composites which is otherwise just a userland function. .. change:: :tags: bug, sql :tickets: 2316, 2261 related to, made some adjustments to the change from regarding the "from" list on a select(). The _froms collection is no longer memoized, as this simplifies various use cases and removes the need for a "warning" if a column is attached to a table after it was already used in an expression - the select() construct will now always produce the correct expression. There's probably no real-world performance hit here; select() objects are almost always made ad-hoc, and systems that wish to optimize the re-use of a select() would be using the "compiled_cache" feature. A hit which would occur when calling select.bind has been reduced, but the vast majority of users shouldn't be using "bound metadata" anyway :). .. change:: :tags: feature, sql :tickets: 2166, 1944 The update() construct can now accommodate multiple tables in the WHERE clause, which will render an "UPDATE..FROM" construct, recognized by Postgresql and MSSQL. When compiled on MySQL, will instead generate "UPDATE t1, t2, ..". MySQL additionally can render against multiple tables in the SET clause, if Column objects are used as keys in the "values" parameter or generative method. .. change:: :tags: feature, sql :tickets: 77 Added accessor to types called "python_type", returns the rudimentary Python type object for a particular TypeEngine instance, if known, else raises NotImplementedError. .. change:: :tags: bug, sql :tickets: 2261, 2319 further tweak to the fix from, so that generative methods work a bit better off of cloned (this is almost a non-use case though). In particular this allows with_only_columns() to behave more consistently. Added additional documentation to with_only_columns() to clarify expected behavior, which changed as a result of. .. change:: :tags: engine, bug :tickets: 2317 Fixed bug whereby transaction.rollback() would throw an error on an invalidated connection if the transaction were a two-phase or savepoint transaction. For plain transactions, rollback() is a no-op if the connection is invalidated, so while it wasn't 100% clear if it should be a no-op, at least now the interface is consistent. .. change:: :tags: feature, schema :tickets: Added new support for remote "schemas": .. change:: :tags: schema :tickets: MetaData() accepts "schema" and "quote_schema" arguments, which will be applied to the same-named arguments of a Table or Sequence which leaves these at their default of ``None``. .. change:: :tags: schema :tickets: Sequence accepts "quote_schema" argument .. change:: :tags: schema :tickets: tometadata() for Table will use the "schema" of the incoming MetaData for the new Table if the schema argument is explicitly "None" .. change:: :tags: schema :tickets: Added CreateSchema and DropSchema DDL constructs - these accept just the string name of a schema and a "quote" flag. .. change:: :tags: schema :tickets: When using default "schema" with MetaData, ForeignKey will also assume the "default" schema when locating remote table. This allows the "schema" argument on MetaData to be applied to any set of Table objects that otherwise don't have a "schema". .. change:: :tags: schema :tickets: 1679 a "has_schema" method has been implemented on dialect, but only works on Postgresql so far. Courtesy Manlio Perillo. .. change:: :tags: feature, schema :tickets: 1410 The "extend_existing" flag on Table now allows for the reflection process to take effect for a Table object that's already been defined; when autoload=True and extend_existing=True are both set, the full set of columns will be reflected from the Table which will then *overwrite* those columns already present, rather than no activity occurring. Columns that are present directly in the autoload run will be used as always, however. .. change:: :tags: bug, schema :tickets: Fixed bug whereby TypeDecorator would return a stale value for _type_affinity, when using a TypeDecorator that "switches" types, like the CHAR/UUID type. .. change:: :tags: bug, schema :tickets: Fixed bug whereby "order_by='foreign_key'" option to Inspector.get_table_names wasn't implementing the sort properly, replaced with the existing sort algorithm .. change:: :tags: bug, schema :tickets: 2305 the "name" of a column-level CHECK constraint, if present, is now rendered in the CREATE TABLE statement using "CONSTRAINT CHECK ". .. change:: :tags: pyodbc, bug :tickets: 2318 pyodbc-based dialects now parse the pyodbc accurately as far as observed pyodbc strings, including such gems as "py3-3.0.1-beta4" .. change:: :tags: postgresql, bug :tickets: 2311 Postgresql dialect memoizes that an ENUM of a particular name was processed during a create/drop sequence. This allows a create/drop sequence to work without any calls to "checkfirst", and also means with "checkfirst" turned on it only needs to check for the ENUM once. .. change:: :tags: postgresql, feature :tickets: Added create_type constructor argument to pg.ENUM. When False, no CREATE/DROP or checking for the type will be performed as part of a table create/drop event; only the create()/drop)() methods called directly will do this. Helps with Alembic "offline" scripts. .. change:: :tags: mssql, feature :tickets: 822 lifted the restriction on SAVEPOINT for SQL Server. All tests pass using it, it's not known if there are deeper issues however. .. change:: :tags: mssql, bug :tickets: 2336 repaired the with_hint() feature which wasn't implemented correctly on MSSQL - usually used for the "WITH (NOLOCK)" hint (which you shouldn't be using anyway ! use snapshot isolation instead :) ) .. change:: :tags: mssql, bug :tickets: 2318 use new pyodbc version detection for _need_decimal_fix option. .. change:: :tags: mssql, bug :tickets: 2343 don't cast "table name" as NVARCHAR on SQL Server 2000. Still mostly in the dark what incantations are needed to make PyODBC work fully with FreeTDS 0.91 here, however. .. change:: :tags: mssql, bug :tickets: 2269 Decode incoming values when retrieving list of index names and the names of columns within those indexes. .. change:: :tags: bug, mysql :tickets: Unicode adjustments allow latest pymysql (post 0.4) to pass 100% on Python 2. .. change:: :tags: ext, feature :tickets: Added an example to the hybrid docs of a "transformer" - a hybrid that returns a query-transforming callable in combination with a custom comparator. Uses a new method on Query called with_transformation(). The use case here is fairly experimental, but only adds one line of code to Query. .. change:: :tags: ext, bug :tickets: the @compiles decorator raises an informative error message when no "default" compilation handler is present, rather than KeyError. .. change:: :tags: examples, bug :tickets: Fixed bug in history_meta.py example where the "unique" flag was not removed from a single-table-inheritance subclass which generates columns to put up onto the base. .. changelog:: :version: 0.7.3 :released: Sun Oct 16 2011 .. change:: :tags: general :tickets: 2279 Adjusted the "importlater" mechanism, which is used internally to resolve import cycles, such that the usage of __import__ is completed when the import of sqlalchemy or sqlalchemy.orm is done, thereby avoiding any usage of __import__ after the application starts new threads, fixes. Also in 0.6.9. .. change:: :tags: orm :tickets: 2298 Improved query.join() such that the "left" side can more flexibly be a non-ORM selectable, such as a subquery. A selectable placed in select_from() will now be used as the left side, favored over implicit usage of a mapped entity. If the join still fails based on lack of foreign keys, the error message includes this detail. Thanks to brianrhude on IRC for the test case. .. change:: :tags: orm :tickets: 2241 Added after_soft_rollback() Session event. This event fires unconditionally whenever rollback() is called, regardless of if an actual DBAPI level rollback occurred. This event is specifically designed to allow operations with the Session to proceed after a rollback when the Session.is_active is True. .. change:: :tags: orm :tickets: added "adapt_on_names" boolean flag to orm.aliased() construct. Allows an aliased() construct to link the ORM entity to a selectable that contains aggregates or other derived forms of a particular attribute, provided the name is the same as that of the entity mapped column. .. change:: :tags: orm :tickets: Added new flag expire_on_flush=False to column_property(), marks those properties that would otherwise be considered to be "readonly", i.e. derived from SQL expressions, to retain their value after a flush has occurred, including if the parent object itself was involved in an update. .. change:: :tags: orm :tickets: 2237 Enhanced the instrumentation in the ORM to support Py3K's new argument style of "required kw arguments", i.e. fn(a, b, \*, c, d), fn(a, b, \*args, c, d). Argument signatures of mapped object's __init__ method will be preserved, including required kw rules. .. change:: :tags: orm :tickets: 2282 Fixed bug in unit of work whereby detection of "cycles" among classes in highly interlinked patterns would not produce a deterministic result; thereby sometimes missing some nodes that should be considered cycles and causing further issues down the road. Note this bug is in 0.6 also; not backported at the moment. .. change:: :tags: orm :tickets: Fixed a variety of synonym()-related regressions from 0.6: * making a synonym against a synonym now works. * synonyms made against a relationship() can be passed to query.join(), options sent to query.options(), passed by name to query.with_parent(). .. change:: :tags: orm :tickets: 2287 Fixed bug whereby mapper.order_by attribute would be ignored in the "inner" query within a subquery eager load. . Also in 0.6.9. .. change:: :tags: orm :tickets: 2267 Identity map .discard() uses dict.pop(,None) internally instead of "del" to avoid KeyError/warning during a non-determinate gc teardown .. change:: :tags: orm :tickets: 2253 Fixed regression in new composite rewrite where deferred=True option failed due to missing import .. change:: :tags: orm :tickets: 2248 Reinstated "comparator_factory" argument to composite(), removed when 0.7 was released. .. change:: :tags: orm :tickets: 2247 Fixed bug in query.join() which would occur in a complex multiple-overlapping path scenario, where the same table could be joined to twice. Thanks *much* to Dave Vitek for the excellent fix here. .. change:: :tags: orm :tickets: Query will convert an OFFSET of zero when slicing into None, so that needless OFFSET clauses are not invoked. .. change:: :tags: orm :tickets: Repaired edge case where mapper would fail to fully update internal state when a relationship on a new mapper would establish a backref on the first mapper. .. change:: :tags: orm :tickets: 2260 Fixed bug whereby if __eq__() was redefined, a relationship many-to-one lazyload would hit the __eq__() and fail. Does not apply to 0.6.9. .. change:: :tags: orm :tickets: 2196 Calling class_mapper() and passing in an object that is not a "type" (i.e. a class that could potentially be mapped) now raises an informative ArgumentError, rather than UnmappedClassError. .. change:: :tags: orm :tickets: New event hook, MapperEvents.after_configured(). Called after a configure() step has completed and mappers were in fact affected. Theoretically this event is called once per application, unless new mappings are constructed after existing ones have been used already. .. change:: :tags: orm :tickets: 2281 When an open Session is garbage collected, the objects within it which remain are considered detached again when they are add()-ed to a new Session. This is accomplished by an extra check that the previous "session_key" doesn't actually exist among the pool of Sessions. .. change:: :tags: orm :tickets: 2239 New declarative features: * __declare_last__() method, establishes an event listener for the class method that will be called when mappers are completed with the final "configure" step. * __abstract__ flag. The class will not be mapped at all when this flag is present on the class. * New helper classes ConcreteBase, AbstractConcreteBase. Allow concrete mappings using declarative which automatically set up the "polymorphic_union" when the "configure" mapper step is invoked. * The mapper itself has semi-private methods that allow the "with_polymorphic" selectable to be assigned to the mapper after it has already been configured. .. change:: :tags: orm :tickets: 2283 Declarative will warn when a subclass' base uses @declared_attr for a regular column - this attribute does not propagate to subclasses. .. change:: :tags: orm :tickets: 2280 The integer "id" used to link a mapped instance with its owning Session is now generated by a sequence generation function rather than id(Session), to eliminate the possibility of recycled id() values causing an incorrect result, no need to check that object actually in the session. .. change:: :tags: orm :tickets: 2257 Behavioral improvement: empty conjunctions such as and_() and or_() will be flattened in the context of an enclosing conjunction, i.e. and_(x, or_()) will produce 'X' and not 'X AND ()'.. .. change:: :tags: orm :tickets: 2261 Fixed bug regarding calculation of "from" list for a select() element. The "from" calc is now delayed, so that if the construct uses a Column object that is not yet attached to a Table, but is later associated with a Table, it generates SQL using the table as a FROM. This change impacted fairly deeply the mechanics of how the FROM list as well as the "correlates" collection is calculated, as some "clause adaption" schemes (these are used very heavily in the ORM) were relying upon the fact that the "froms" collection would typically be cached before the adaption completed. The rework allows it such that the "froms" collection can be cleared and re-generated at any time. .. change:: :tags: orm :tickets: 2270 Fixed bug whereby with_only_columns() method of Select would fail if a selectable were passed.. Also in 0.6.9. .. change:: :tags: schema :tickets: 2284 Modified Column.copy() to use _constructor(), which defaults to self.__class__, in order to create the new object. This allows easier support of subclassing Column. .. change:: :tags: schema :tickets: 2223 Added a slightly nicer __repr__() to SchemaItem classes. Note the repr here can't fully support the "repr is the constructor" idea since schema items can be very deeply nested/cyclical, have late initialization of some things, etc. .. change:: :tags: engine :tickets: 2254 The recreate() method in all pool classes uses self.__class__ to get at the type of pool to produce, in the case of subclassing. Note there's no usual need to subclass pools. .. change:: :tags: engine :tickets: 2243 Improvement to multi-param statement logging, long lists of bound parameter sets will be compressed with an informative indicator of the compression taking place. Exception messages use the same improved formatting. .. change:: :tags: engine :tickets: Added optional "sa_pool_key" argument to pool.manage(dbapi).connect() so that serialization of args is not necessary. .. change:: :tags: engine :tickets: 2286 The entry point resolution supported by create_engine() now supports resolution of individual DBAPI drivers on top of a built-in or entry point-resolved dialect, using the standard '+' notation - it's converted to a '.' before being resolved as an entry point. .. change:: :tags: engine :tickets: 2299 Added an exception catch + warning for the "return unicode detection" step within connect, allows databases that crash on NVARCHAR to continue initializing, assuming no NVARCHAR type implemented. .. change:: :tags: types :tickets: 2258 Extra keyword arguments to the base Float type beyond "precision" and "asdecimal" are ignored; added a deprecation warning here and additional docs, related to .. change:: :tags: sqlite :tickets: Ensured that the same ValueError is raised for illegal date/time/datetime string parsed from the database regardless of whether C extensions are in use or not. .. change:: :tags: postgresql :tickets: 2290 Added "postgresql_using" argument to Index(), produces USING clause to specify index implementation for PG. . Thanks to Ryan P. Kelly for the patch. .. change:: :tags: postgresql :tickets: 1839 Added client_encoding parameter to create_engine() when the postgresql+psycopg2 dialect is used; calls the psycopg2 set_client_encoding() method with the value upon connect. .. change:: :tags: postgresql :tickets: 2291, 2141 Fixed bug related to whereby the same modified index behavior in PG 9 affected primary key reflection on a renamed column.. Also in 0.6.9. .. change:: :tags: postgresql :tickets: 2256 Reflection functions for Table, Sequence no longer case insensitive. Names can be differ only in case and will be correctly distinguished. .. change:: :tags: postgresql :tickets: Use an atomic counter as the "random number" source for server side cursor names; conflicts have been reported in rare cases. .. change:: :tags: postgresql :tickets: 2249 Narrowed the assumption made when reflecting a foreign-key referenced table with schema in the current search path; an explicit schema will be applied to the referenced table only if it actually matches that of the referencing table, which also has an explicit schema. Previously it was assumed that "current" schema was synonymous with the full search_path. .. change:: :tags: mysql :tickets: 2225 a CREATE TABLE will put the COLLATE option after CHARSET, which appears to be part of MySQL's arbitrary rules regarding if it will actually work or not. Also in 0.6.9. .. change:: :tags: mysql :tickets: 2293 Added mysql_length parameter to Index construct, specifies "length" for indexes. .. change:: :tags: mssql :tickets: 2273 Changes to attempt support of FreeTDS 0.91 with Pyodbc. This includes that string binds are sent as Python unicode objects when FreeTDS 0.91 is detected, and a CAST(? AS NVARCHAR) is used when we detect for a table. However, I'd continue to characterize Pyodbc + FreeTDS 0.91 behavior as pretty crappy, there are still many queries such as used in reflection which cause a core dump on Linux, and it is not really usable at all on OSX, MemoryErrors abound and just plain broken unicode support. .. change:: :tags: mssql :tickets: 2277 The behavior of =/!= when comparing a scalar select to a value will no longer produce IN/NOT IN as of 0.8; this behavior is a little too heavy handed (use in_() if you want to emit IN) and now emits a deprecation warning. To get the 0.8 behavior immediately and remove the warning, a compiler recipe is given at http://www.sqlalchemy.org/docs/07/dialects/mssql.html#scalar-select-comparisons to override the behavior of visit_binary(). .. change:: :tags: mssql :tickets: 2222 "0" is accepted as an argument for limit() which will produce "TOP 0". .. change:: :tags: oracle :tickets: 2272 Fixed ReturningResultProxy for zxjdbc dialect.. Regression from 0.6. .. change:: :tags: oracle :tickets: 2252 The String type now generates VARCHAR2 on Oracle which is recommended as the default VARCHAR. Added an explicit VARCHAR2 and NVARCHAR2 to the Oracle dialect as well. Using NVARCHAR still generates "NVARCHAR2" - there is no "NVARCHAR" on Oracle - this remains a slight breakage of the "uppercase types always give exactly that" policy. VARCHAR still generates "VARCHAR", keeping with the policy. If Oracle were to ever define "VARCHAR" as something different as they claim (IMHO this will never happen), the type would be available. .. change:: :tags: ext :tickets: 2262 SQLSoup will not be included in version 0.8 of SQLAlchemy; while useful, we would like to keep SQLAlchemy itself focused on one ORM usage paradigm. SQLSoup will hopefully soon be superseded by a third party project. .. change:: :tags: ext :tickets: 2236 Added local_attr, remote_attr, attr accessors to AssociationProxy, providing quick access to the proxied attributes at the class level. .. change:: :tags: ext :tickets: 2275 Changed the update() method on association proxy dictionary to use a duck typing approach, i.e. checks for "keys", to discern between update({}) and update((a, b)). Previously, passing a dictionary that had tuples as keys would be misinterpreted as a sequence. .. change:: :tags: examples :tickets: 2266 Adjusted dictlike-polymorphic.py example to apply the CAST such that it works on PG, other databases. Also in 0.6.9. .. changelog:: :version: 0.7.2 :released: Sun Jul 31 2011 .. change:: :tags: orm :tickets: 2213 Feature enhancement: joined and subquery loading will now traverse already-present related objects and collections in search of unpopulated attributes throughout the scope of the eager load being defined, so that the eager loading that is specified via mappings or query options unconditionally takes place for the full depth, populating whatever is not already populated. Previously, this traversal would stop if a related object or collection were already present leading to inconsistent behavior (though would save on loads/cycles for an already-loaded graph). For a subqueryload, this means that the additional SELECT statements emitted by subqueryload will invoke unconditionally, no matter how much of the existing graph is already present (hence the controversy). The previous behavior of "stopping" is still in effect when a query is the result of an attribute-initiated lazyload, as otherwise an "N+1" style of collection iteration can become needlessly expensive when the same related object is encountered repeatedly. There's also an as-yet-not-public generative Query method _with_invoke_all_eagers() which selects old/new behavior .. change:: :tags: orm :tickets: 2195 A rework of "replacement traversal" within the ORM as it alters selectables to be against aliases of things (i.e. clause adaption) includes a fix for multiply-nested any()/has() constructs against a joined table structure. .. change:: :tags: orm :tickets: 2234 Fixed bug where query.join() + aliased=True from a joined-inh structure to itself on relationship() with join condition on the child table would convert the lead entity into the joined one inappropriately. Also in 0.6.9. .. change:: :tags: orm :tickets: 2205 Fixed regression from 0.6 where Session.add() against an object which contained None in a collection would raise an internal exception. Reverted this to 0.6's behavior which is to accept the None but obviously nothing is persisted. Ideally, collections with None present or on append() should at least emit a warning, which is being considered for 0.8. .. change:: :tags: orm :tickets: 2191 Load of a deferred() attribute on an object where row can't be located raises ObjectDeletedError instead of failing later on; improved the message in ObjectDeletedError to include other conditions besides a simple "delete". .. change:: :tags: orm :tickets: 2224 Fixed regression from 0.6 where a get history operation on some relationship() based attributes would fail when a lazyload would emit; this could trigger within a flush() under certain conditions. Thanks to the user who submitted the great test for this. .. change:: :tags: orm :tickets: 2228 Fixed bug apparent only in Python 3 whereby sorting of persistent + pending objects during flush would produce an illegal comparison, if the persistent object primary key is not a single integer. Also in 0.6.9 .. change:: :tags: orm :tickets: 2197 Fixed bug whereby the source clause used by query.join() would be inconsistent if against a column expression that combined multiple entities together. Also in 0.6.9 .. change:: :tags: orm :tickets: 2215 Fixed bug whereby if a mapped class redefined __hash__() or __eq__() to something non-standard, which is a supported use case as SQLA should never consult these, the methods would be consulted if the class was part of a "composite" (i.e. non-single-entity) result set. Also in 0.6.9. .. change:: :tags: orm :tickets: 2240 Added public attribute ".validators" to Mapper, an immutable dictionary view of all attributes that have been decorated with the @validates decorator. courtesy Stefano Fontanelli .. change:: :tags: orm :tickets: 2188 Fixed subtle bug that caused SQL to blow up if: column_property() against subquery + joinedload + LIMIT + order by the column property() occurred. . Also in 0.6.9 .. change:: :tags: orm :tickets: 2207 The join condition produced by with_parent as well as when using a "dynamic" relationship against a parent will generate unique bindparams, rather than incorrectly repeating the same bindparam. . Also in 0.6.9. .. change:: :tags: orm :tickets: Added the same "columns-only" check to mapper.polymorphic_on as used when receiving user arguments to relationship.order_by, foreign_keys, remote_side, etc. .. change:: :tags: orm :tickets: 2190 Fixed bug whereby comparison of column expression to a Query() would not call as_scalar() on the underlying SELECT statement to produce a scalar subquery, in the way that occurs if you called it on Query().subquery(). .. change:: :tags: orm :tickets: 2194 Fixed declarative bug where a class inheriting from a superclass of the same name would fail due to an unnecessary lookup of the name in the _decl_class_registry. .. change:: :tags: orm :tickets: 2199 Repaired the "no statement condition" assertion in Query which would attempt to raise if a generative method were called after from_statement() were called.. Also in 0.6.9. .. change:: :tags: sql :tickets: 2188 Fixed two subtle bugs involving column correspondence in a selectable, one with the same labeled subquery repeated, the other when the label has been "grouped" and loses itself. Affects. .. change:: :tags: schema :tickets: 2187 New feature: with_variant() method on all types. Produces an instance of Variant(), a special TypeDecorator which will select the usage of a different type based on the dialect in use. .. change:: :tags: schema :tickets: Added an informative error message when ForeignKeyConstraint refers to a column name in the parent that is not found. Also in 0.6.9. .. change:: :tags: schema :tickets: 2206 Fixed bug whereby adaptation of old append_ddl_listener() function was passing unexpected \**kw through to the Table event. Table gets no kws, the MetaData event in 0.6 would get "tables=somecollection", this behavior is preserved. .. change:: :tags: schema :tickets: Fixed bug where "autoincrement" detection on Table would fail if the type had no "affinity" value, in particular this would occur when using the UUID example on the site that uses TypeEngine as the "impl". .. change:: :tags: schema :tickets: 2209 Added an improved repr() to TypeEngine objects that will only display constructor args which are positional or kwargs that deviate from the default. .. change:: :tags: engine :tickets: Context manager provided by Connection.begin() will issue rollback() if the commit() fails, not just if an exception occurs. .. change:: :tags: engine :tickets: 1682 Use urllib.parse_qsl() in Python 2.6 and above, no deprecation warning about cgi.parse_qsl() .. change:: :tags: engine :tickets: Added mixin class sqlalchemy.ext.DontWrapMixin. User-defined exceptions of this type are never wrapped in StatementException when they occur in the context of a statement execution. .. change:: :tags: engine :tickets: StatementException wrapping will display the original exception class in the message. .. change:: :tags: engine :tickets: 2201 Failures on connect which raise dbapi.Error will forward the error to dialect.is_disconnect() and set the "connection_invalidated" flag if the dialect knows this to be a potentially "retryable" condition. Only Oracle ORA-01033 implemented for now. .. change:: :tags: sqlite :tickets: 2189 SQLite dialect no longer strips quotes off of reflected default value, allowing a round trip CREATE TABLE to work. This is consistent with other dialects that also maintain the exact form of the default. .. change:: :tags: postgresql :tickets: 2198 Added new "postgresql_ops" argument to Index, allows specification of PostgreSQL operator classes for indexed columns. Courtesy Filip Zyzniewski. .. change:: :tags: mysql :tickets: 2186 Fixed OurSQL dialect to use ansi-neutral quote symbol "'" for XA commands instead of '"'. . Also in 0.6.9. .. change:: :tags: mssql :tickets: Adjusted the pyodbc dialect such that bound values are passed as bytes and not unicode if the "Easysoft" unix drivers are detected. This is the same behavior as occurs with FreeTDS. Easysoft appears to segfault if Python unicodes are passed under certain circumstances. .. change:: :tags: oracle :tickets: 2200 Added ORA-00028 to disconnect codes, use cx_oracle _Error.code to get at the code,. Also in 0.6.9. .. change:: :tags: oracle :tickets: 2201 Added ORA-01033 to disconnect codes, which can be caught during a connection event. .. change:: :tags: oracle :tickets: 2220 repaired the oracle.RAW type which did not generate the correct DDL. Also in 0.6.9. .. change:: :tags: oracle :tickets: 2212 added CURRENT to reserved word list. Also in 0.6.9. .. change:: :tags: oracle :tickets: Fixed bug in the mutable extension whereby if the same type were used twice in one mapping, the attributes beyond the first would not get instrumented. .. change:: :tags: oracle :tickets: Fixed bug in the mutable extension whereby if None or a non-corresponding type were set, an error would be raised. None is now accepted which assigns None to all attributes, illegal values raise ValueError. .. change:: :tags: examples :tickets: Repaired the examples/versioning test runner to not rely upon SQLAlchemy test libs, nosetests must be run from within examples/versioning to get around setup.cfg breaking it. .. change:: :tags: examples :tickets: Tweak to examples/versioning to pick the correct foreign key in a multi-level inheritance situation. .. change:: :tags: examples :tickets: Fixed the attribute shard example to check for bind param callable correctly in 0.7 style. .. changelog:: :version: 0.7.1 :released: Sun Jun 05 2011 .. change:: :tags: general :tickets: 2184 Added a workaround for Python bug 7511 where failure of C extension build does not raise an appropriate exception on Windows 64 bit + VC express .. change:: :tags: orm :tickets: 1912 "delete-orphan" cascade is now allowed on self-referential relationships - this since SQLA 0.7 no longer enforces "parent with no child" at the ORM level; this check is left up to foreign key nullability. Related to .. change:: :tags: orm :tickets: 2180 Repaired new "mutable" extension to propagate events to subclasses correctly; don't create multiple event listeners for subclasses either. .. change:: :tags: orm :tickets: 2170 Modify the text of the message which occurs when the "identity" key isn't detected on flush, to include the common cause that the Column isn't set up to detect auto-increment correctly;. Also in 0.6.8. .. change:: :tags: orm :tickets: 2182 Fixed bug where transaction-level "deleted" collection wouldn't be cleared of expunged states, raising an error if they later became transient. Also in 0.6.8. .. change:: :tags: sql :tickets: Fixed bug whereby metadata.reflect(bind) would close a Connection passed as a bind argument. Regression from 0.6. .. change:: :tags: sql :tickets: Streamlined the process by which a Select determines what's in it's '.c' collection. Behaves identically, except that a raw ClauseList() passed to select([]) (which is not a documented case anyway) will now be expanded into its individual column elements instead of being ignored. .. change:: :tags: engine :tickets: Deprecate schema/SQL-oriented methods on Connection/Engine that were never well known and are redundant: reflecttable(), create(), drop(), text(), engine.func .. change:: :tags: engine :tickets: 2178 Adjusted the __contains__() method of a RowProxy result row such that no exception throw is generated internally; NoSuchColumnError() also will generate its message regardless of whether or not the column construct can be coerced to a string.. Also in 0.6.8. .. change:: :tags: sqlite :tickets: 2173 Accept None from cursor.fetchone() when "PRAGMA read_uncommitted" is called to determine current isolation mode at connect time and default to SERIALIZABLE; this to support SQLite versions pre-3.3.0 that did not have this feature. .. change:: :tags: postgresql :tickets: 2175 Some unit test fixes regarding numeric arrays, MATCH operator. A potential floating-point inaccuracy issue was fixed, and certain tests of the MATCH operator only execute within an EN-oriented locale for now. . Also in 0.6.8. .. change:: :tags: mysql :tickets: Unit tests pass 100% on MySQL installed on windows. .. change:: :tags: mysql :tickets: 2181 Removed the "adjust casing" step that would fail when reflecting a table on MySQL on windows with a mixed case name. After some experimenting with a windows MySQL server, it's been determined that this step wasn't really helping the situation much; MySQL does not return FK names with proper casing on non-windows platforms either, and removing the step at least allows the reflection to act more like it does on other OSes. A warning here has been considered but its difficult to determine under what conditions such a warning can be raised, so punted on that for now - added some docs instead. .. change:: :tags: mysql :tickets: supports_sane_rowcount will be set to False if using MySQLdb and the DBAPI doesn't provide the constants.CLIENT module. .. changelog:: :version: 0.7.0 :released: Fri May 20 2011 .. change:: :tags: :tickets: This section documents those changes from 0.7b4 to 0.7.0. For an overview of what's new in SQLAlchemy 0.7, see http://www.sqlalchemy.org/trac/wiki/07Migration .. change:: :tags: orm :tickets: 2069 Fixed regression introduced in 0.7b4 (!) whereby query.options(someoption("nonexistent name")) would fail to raise an error. Also added additional error catching for cases where the option would try to build off a column-based element, further fixed up some of the error messages tailored in .. change:: :tags: orm :tickets: 2162 query.count() emits "count(*)" instead of "count(1)". .. change:: :tags: orm :tickets: 2155 Fine tuning of Query clause adaptation when from_self(), union(), or other "select from myself" operation, such that plain SQL expression elements added to filter(), order_by() etc. which are present in the nested "from myself" query *will* be adapted in the same way an ORM expression element will, since these elements are otherwise not easily accessible. .. change:: :tags: orm :tickets: 2149 Fixed bug where determination of "self referential" relationship would fail with no workaround for joined-inh subclass related to itself, or joined-inh subclass related to a subclass of that with no cols in the sub-sub class in the join condition. Also in 0.6.8. .. change:: :tags: orm :tickets: 2153 mapper() will ignore non-configured foreign keys to unrelated tables when determining inherit condition between parent and child class, but will raise as usual for unresolved columns and table names regarding the inherited table. This is an enhanced generalization of behavior that was already applied to declarative previously. 0.6.8 has a more conservative version of this which doesn't fundamentally alter how join conditions are determined. .. change:: :tags: orm :tickets: 2144 It is an error to call query.get() when the given entity is not a single, full class entity or mapper (i.e. a column). This is a deprecation warning in 0.6.8. .. change:: :tags: orm :tickets: 2148 Fixed a potential KeyError which under some circumstances could occur with the identity map, part of .. change:: :tags: orm :tickets: added Query.with_session() method, switches Query to use a different session. .. change:: :tags: orm :tickets: 2131 horizontal shard query should use execution options per connection as per .. change:: :tags: orm :tickets: 2151 a non_primary mapper will inherit the _identity_class of the primary mapper. This so that a non_primary established against a class that's normally in an inheritance mapping will produce results that are identity-map compatible with that of the primary mapper (also in 0.6.8) .. change:: :tags: orm :tickets: 2163 Fixed the error message emitted for "can't execute syncrule for destination column 'q'; mapper 'X' does not map this column" to reference the correct mapper. . Also in 0.6.8. .. change:: :tags: orm :tickets: 1502 polymorphic_union() gets a "cast_nulls" option, disables the usage of CAST when it renders the labeled NULL columns. .. change:: :tags: orm :tickets: polymorphic_union() renders the columns in their original table order, as according to the first table/selectable in the list of polymorphic unions in which they appear. (which is itself an unordered mapping unless you pass an OrderedDict). .. change:: :tags: orm :tickets: 2171 Fixed bug whereby mapper mapped to an anonymous alias would fail if logging were used, due to unescaped % sign in the alias name. Also in 0.6.8. .. change:: :tags: sql :tickets: 2167 Fixed bug whereby nesting a label of a select() with another label in it would produce incorrect exported columns. Among other things this would break an ORM column_property() mapping against another column_property(). . Also in 0.6.8 .. change:: :tags: sql :tickets: Changed the handling in determination of join conditions such that foreign key errors are only considered between the two given tables. That is, t1.join(t2) will report FK errors that involve 't1' or 't2', but anything involving 't3' will be skipped. This affects join(), as well as ORM relationship and inherit condition logic. .. change:: :tags: sql :tickets: Some improvements to error handling inside of the execute procedure to ensure auto-close connections are really closed when very unusual DBAPI errors occur. .. change:: :tags: sql :tickets: metadata.reflect() and reflection.Inspector() had some reliance on GC to close connections which were internally procured, fixed this. .. change:: :tags: sql :tickets: 2140 Added explicit check for when Column .name is assigned as blank string .. change:: :tags: sql :tickets: 2147 Fixed bug whereby if FetchedValue was passed to column server_onupdate, it would not have its parent "column" assigned, added test coverage for all column default assignment patterns. also in 0.6.8 .. change:: :tags: postgresql :tickets: Fixed the psycopg2_version parsing in the psycopg2 dialect. .. change:: :tags: postgresql :tickets: 2141 Fixed bug affecting PG 9 whereby index reflection would fail if against a column whose name had changed. . Also in 0.6.8. .. change:: :tags: mssql :tickets: 2169 Fixed bug in MSSQL dialect whereby the aliasing applied to a schema-qualified table would leak into enclosing select statements. Also in 0.6.8. .. change:: :tags: documentation :tickets: 2152 Removed the usage of the "collections.MutableMapping" abc from the ext.mutable docs as it was being used incorrectly and makes the example more difficult to understand in any case. .. change:: :tags: examples :tickets: removed the ancient "polymorphic association" examples and replaced with an updated set of examples that use declarative mixins, "generic_associations". Each presents an alternative table layout. .. change:: :tags: ext :tickets: 2143 Fixed bugs in sqlalchemy.ext.mutable extension where `None` was not appropriately handled, replacement events were not appropriately handled. .. changelog:: :version: 0.7.0b4 :released: Sun Apr 17 2011 .. change:: :tags: general :tickets: Changes to the format of CHANGES, this file. The format changes have been applied to the 0.7 releases. .. change:: :tags: general :tickets: The "-declarative" changes will now be listed directly under the "-orm" section, as these are closely related. .. change:: :tags: general :tickets: The 0.5 series changes have been moved to the file CHANGES_PRE_06 which replaces CHANGES_PRE_05. .. change:: :tags: general :tickets: The changelog for 0.6.7 and subsequent within the 0.6 series is now listed only in the CHANGES file within the 0.6 branch. In the 0.7 CHANGES file (i.e. this file), all the 0.6 changes are listed inline within the 0.7 section in which they were also applied (since all 0.6 changes are in 0.7 as well). Changes that apply to an 0.6 version here are noted as are if any differences in implementation/behavior are present. .. change:: :tags: orm :tickets: 2122 Some fixes to "evaulate" and "fetch" evaluation when query.update(), query.delete() are called. The retrieval of records is done after autoflush in all cases, and before update/delete is emitted, guarding against unflushed data present as well as expired objects failing during the evaluation. .. change:: :tags: orm :tickets: 2063 Reworded the exception raised when a flush is attempted of a subclass that is not polymorphic against the supertype. .. change:: :tags: orm :tickets: Still more wording adjustments when a query option can't find the target entity. Explain that the path must be from one of the root entities. .. change:: :tags: orm :tickets: 2123 Some fixes to the state handling regarding backrefs, typically when autoflush=False, where the back-referenced collection wouldn't properly handle add/removes with no net change. Thanks to Richard Murri for the test case + patch. (also in 0.6.7). .. change:: :tags: orm :tickets: 2127 Added checks inside the UOW to detect the unusual condition of being asked to UPDATE or DELETE on a primary key value that contains NULL in it. .. change:: :tags: orm :tickets: 2127 Some refinements to attribute history. More changes are pending possibly in 0.8, but for now history has been modified such that scalar history doesn't have a "side effect" of populating None for a non-present value. This allows a slightly better ability to distinguish between a None set and no actual change, affects as well. .. change:: :tags: orm :tickets: 2130 a "having" clause would be copied from the inside to the outside query if from_self() were used; in particular this would break an 0.7 style count() query. (also in 0.6.7) .. change:: :tags: orm :tickets: 2131 the Query.execution_options() method now passes those options to the Connection rather than the SELECT statement, so that all available options including isolation level and compiled cache may be used. .. change:: :tags: sql :tickets: 2131 The "compiled_cache" execution option now raises an error when passed to a SELECT statement rather than a Connection. Previously it was being ignored entirely. We may look into having this option work on a per-statement level at some point. .. change:: :tags: sql :tickets: Restored the "catchall" constructor on the base TypeEngine class, with a deprecation warning. This so that code which does something like Integer(11) still succeeds. .. change:: :tags: sql :tickets: 2104 Fixed regression whereby MetaData() coming back from unpickling did not keep track of new things it keeps track of now, i.e. collection of Sequence objects, list of schema names. .. change:: :tags: sql :tickets: 2116 The limit/offset keywords to select() as well as the value passed to select.limit()/offset() will be coerced to integer. (also in 0.6.7) .. change:: :tags: sql :tickets: fixed bug where "from" clause gathering from an over() clause would be an itertools.chain() and not a list, causing "can only concatenate list" TypeError when combined with other clauses. .. change:: :tags: sql :tickets: 2134 Fixed incorrect usage of "," in over() clause being placed between the "partition" and "order by" clauses. .. change:: :tags: sql :tickets: 2105 Before/after attach events for PrimaryKeyConstraint now function, tests added for before/after events on all constraint types. .. change:: :tags: sql :tickets: 2117 Added explicit true()/false() constructs to expression lib - coercion rules will intercept "False"/"True" into these constructs. In 0.6, the constructs were typically converted straight to string, which was no longer accepted in 0.7. .. change:: :tags: engine :tickets: 2129 The C extension is now enabled by default on CPython 2.x with a fallback to pure python if it fails to compile. .. change:: :tags: schema :tickets: 2109 The 'useexisting' flag on Table has been superceded by a new pair of flags 'keep_existing' and 'extend_existing'. 'extend_existing' is equivalent to 'useexisting' - the existing Table is returned, and additional constructor elements are added. With 'keep_existing', the existing Table is returned, but additional constructor elements are not added - these elements are only applied when the Table is newly created. .. change:: :tags: types :tickets: 2081 REAL has been added to the core types. Supported by Postgresql, SQL Server, MySQL, SQLite. Note that the SQL Server and MySQL versions, which add extra arguments, are also still available from those dialects. .. change:: :tags: types :tickets: 2106 Added @event.listens_for() decorator, given target + event name, applies the decorated function as a listener. .. change:: :tags: pool :tickets: 2103 AssertionPool now stores the traceback indicating where the currently checked out connection was acquired; this traceback is reported within the assertion raised upon a second concurrent checkout; courtesy Gunnlaugur Briem .. change:: :tags: pool :tickets: The "pool.manage" feature doesn't use pickle anymore to hash the arguments for each pool. .. change:: :tags: sqlite :tickets: 2115 Fixed bug where reflection of foreign key created as "REFERENCES " without col name would fail. (also in 0.6.7) .. change:: :tags: postgresql :tickets: Psycopg2 for Python 3 is now supported. .. change:: :tags: postgresql :tickets: 2132 Fixed support for precision numerics when using pg8000. .. change:: :tags: oracle :tickets: 2100 Using column names that would require quotes for the column itself or for a name-generated bind parameter, such as names with special characters, underscores, non-ascii characters, now properly translate bind parameter keys when talking to cx_oracle. (Also in 0.6.7) .. change:: :tags: oracle :tickets: 2116 Oracle dialect adds use_binds_for_limits=False create_engine() flag, will render the LIMIT/OFFSET values inline instead of as binds, reported to modify the execution plan used by Oracle. (Also in 0.6.7) .. change:: :tags: documentation :tickets: 2029 Documented SQLite DATE/TIME/DATETIME types. (also in 0.6.7) .. change:: :tags: documentation :tickets: 2118 Fixed mutable extension docs to show the correct type-association methods. .. changelog:: :version: 0.7.0b3 :released: Sun Mar 20 2011 .. change:: :tags: general :tickets: Lots of fixes to unit tests when run under Pypy (courtesy Alex Gaynor). .. change:: :tags: orm :tickets: 2093 Changed the underlying approach to query.count(). query.count() is now in all cases exactly: query. from_self(func.count(literal_column('1'))). scalar() That is, "select count(1) from ()". This produces a subquery in all cases, but vastly simplifies all the guessing count() tried to do previously, which would still fail in many scenarios particularly when joined table inheritance and other joins were involved. If the subquery produced for an otherwise very simple count is really an issue, use query(func.count()) as an optimization. .. change:: :tags: orm :tickets: 2087 some changes to the identity map regarding rare weakref callbacks during iterations. The mutex has been removed as it apparently can cause a reentrant (i.e. in one thread) deadlock, perhaps when gc collects objects at the point of iteration in order to gain more memory. It is hoped that "dictionary changed during iteration" will be exceedingly rare as iteration methods internally acquire the full list of objects in a single values() call. Note 0.6.7 has a more conservative fix here which still keeps the mutex in place. .. change:: :tags: orm :tickets: 2082 A tweak to the unit of work causes it to order the flush along relationship() dependencies even if the given objects don't have any inter-attribute references in memory, which was the behavior in 0.5 and earlier, so a flush of Parent/Child with only foreign key/primary key set will succeed. This while still maintaining 0.6 and above's not generating a ton of useless internal dependency structures within the flush that don't correspond to state actually within the current flush. .. change:: :tags: orm :tickets: 2069 Improvements to the error messages emitted when querying against column-only entities in conjunction with (typically incorrectly) using loader options, where the parent entity is not fully present. .. change:: :tags: orm :tickets: 2098 Fixed bug in query.options() whereby a path applied to a lazyload using string keys could overlap a same named attribute on the wrong entity. Note 0.6.7 has a more conservative fix to this. .. change:: :tags: declarative :tickets: 2091 Arguments in __mapper_args__ that aren't "hashable" aren't mistaken for always-hashable, possibly-column arguments. (also in 0.6.7) .. change:: :tags: sql :tickets: Added a fully descriptive error message for the case where Column is subclassed and _make_proxy() fails to make a copy due to TypeError on the constructor. The method _constructor should be implemented in this case. .. change:: :tags: sql :tickets: 2095 Added new event "column_reflect" for Table objects. Receives the info dictionary about a Column before the object is generated within reflection, and allows modification to the dictionary for control over most aspects of the resulting Column including key, name, type, info dictionary. .. change:: :tags: sql :tickets: To help with the "column_reflect" event being used with specific Table objects instead of all instances of Table, listeners can be added to a Table object inline with its construction using a new argument "listeners", a list of tuples of the form (, ), which are applied to the Table before the reflection process begins. .. change:: :tags: sql :tickets: 2085 Added new generic function "next_value()", accepts a Sequence object as its argument and renders the appropriate "next value" generation string on the target platform, if supported. Also provides ".next_value()" method on Sequence itself. .. change:: :tags: sql :tickets: 2084 func.next_value() or other SQL expression can be embedded directly into an insert() construct, and if implicit or explicit "returning" is used in conjunction with a primary key column, the newly generated value will be present in result.inserted_primary_key. .. change:: :tags: sql :tickets: 2089 Added accessors to ResultProxy "returns_rows", "is_insert" (also in 0.6.7) .. change:: :tags: engine :tickets: 2097 Fixed AssertionPool regression bug. .. change:: :tags: engine :tickets: 2060 Changed exception raised to ArgumentError when an invalid dialect is specified. .. change:: :tags: postgresql :tickets: 2092 Added RESERVED_WORDS for postgresql dialect. (also in 0.6.7) .. change:: :tags: postgresql :tickets: 2073 Fixed the BIT type to allow a "length" parameter, "varying" parameter. Reflection also fixed. (also in 0.6.7) .. change:: :tags: mssql :tickets: 2071 Rewrote the query used to get the definition of a view, typically when using the Inspector interface, to use sys.sql_modules instead of the information schema, thereby allowing views definitions longer than 4000 characters to be fully returned. (also in 0.6.7) .. change:: :tags: firebird :tickets: 2083 The "implicit_returning" flag on create_engine() is honored if set to False. (also in 0.6.7) .. change:: :tags: informix :tickets: 2092 Added RESERVED_WORDS informix dialect. (also in 0.6.7) .. change:: :tags: ext :tickets: 2090 The horizontal_shard ShardedSession class accepts the common Session argument "query_cls" as a constructor argument, to enable further subclassing of ShardedQuery. (also in 0.6.7) .. change:: :tags: examples :tickets: Updated the association, association proxy examples to use declarative, added a new example dict_of_sets_with_default.py, a "pushing the envelope" example of association proxy. .. change:: :tags: examples :tickets: 2090 The Beaker caching example allows a "query_cls" argument to the query_callable() function. (also in 0.6.7) .. changelog:: :version: 0.7.0b2 :released: Sat Feb 19 2011 .. change:: :tags: orm :tickets: 2053 Fixed bug whereby Session.merge() would call the load() event with one too few arguments. .. change:: :tags: orm :tickets: 2052 Added logic which prevents the generation of events from a MapperExtension or SessionExtension from generating do-nothing events for all the methods not overridden. .. change:: :tags: declarative :tickets: 2058 Fixed regression whereby composite() with Column objects placed inline would fail to initialize. The Column objects can now be inline with the composite() or external and pulled in via name or object ref. .. change:: :tags: declarative :tickets: 2061 Fix error message referencing old @classproperty name to reference @declared_attr (also in 0.6.7) .. change:: :tags: declarative :tickets: 1468 the dictionary at the end of the __table_args__ tuple is now optional. .. change:: :tags: sql :tickets: 2059 Renamed the EngineEvents event class to ConnectionEvents. As these classes are never accessed directly by end-user code, this strictly is a documentation change for end users. Also simplified how events get linked to engines and connections internally. .. change:: :tags: sql :tickets: 2055 The Sequence() construct, when passed a MetaData() object via its 'metadata' argument, will be included in CREATE/DROP statements within metadata.create_all() and metadata.drop_all(), including "checkfirst" logic. .. change:: :tags: sql :tickets: 2064 The Column.references() method now returns True if it has a foreign key referencing the given column exactly, not just it's parent table. .. change:: :tags: postgresql :tickets: 2065 Fixed regression from 0.6 where SMALLINT and BIGINT types would both generate SERIAL on an integer PK column, instead of SMALLINT and BIGSERIAL .. change:: :tags: ext :tickets: 2054 Association proxy now has correct behavior for any(), has(), and contains() when proxying a many-to-one scalar attribute to a one-to-many collection (i.e. the reverse of the 'typical' association proxy use case) .. change:: :tags: examples :tickets: Beaker example now takes into account 'limit' and 'offset', bind params within embedded FROM clauses (like when you use union() or from_self()) when generating a cache key. .. changelog:: :version: 0.7.0b1 :released: Sat Feb 12 2011 .. change:: :tags: :tickets: Detailed descriptions of each change below are described at: http://www.sqlalchemy.org/trac/wiki/07Migration .. change:: :tags: general :tickets: 1902 New event system, supercedes all extensions, listeners, etc. .. change:: :tags: general :tickets: 1926 Logging enhancements .. change:: :tags: general :tickets: 1949 Setup no longer installs a Nose plugin .. change:: :tags: general :tickets: The "sqlalchemy.exceptions" alias in sys.modules has been removed. Base SQLA exceptions are available via "from sqlalchemy import exc". The "exceptions" alias for "exc" remains in "sqlalchemy" for now, it's just not patched into sys.modules. .. change:: :tags: orm :tickets: 1923 More succinct form of query.join(target, onclause) .. change:: :tags: orm :tickets: 1903 Hybrid Attributes, implements/supercedes synonym() .. change:: :tags: orm :tickets: 2008 Rewrite of composites .. change:: :tags: orm :tickets: Mutation Event Extension, supercedes "mutable=True" .. seealso:: :ref:`07_migration_mutation_extension` .. change:: :tags: orm :tickets: 1980 PickleType and ARRAY mutability turned off by default .. change:: :tags: orm :tickets: 1895 Simplified polymorphic_on assignment .. change:: :tags: orm :tickets: 1912 Flushing of Orphans that have no parent is allowed .. change:: :tags: orm :tickets: 2041 Adjusted flush accounting step to occur before the commit in the case of autocommit=True. This allows autocommit=True to work appropriately with expire_on_commit=True, and also allows post-flush session hooks to operate in the same transactional context as when autocommit=False. .. change:: :tags: orm :tickets: 1973 Warnings generated when collection members, scalar referents not part of the flush .. change:: :tags: orm :tickets: 1876 Non-`Table`-derived constructs can be mapped .. change:: :tags: orm :tickets: 1942 Tuple label names in Query Improved .. change:: :tags: orm :tickets: 1892 Mapped column attributes reference the most specific column first .. change:: :tags: orm :tickets: 1896 Mapping to joins with two or more same-named columns requires explicit declaration .. change:: :tags: orm :tickets: 1875 Mapper requires that polymorphic_on column be present in the mapped selectable .. change:: :tags: orm :tickets: 1966 compile_mappers() renamed configure_mappers(), simplified configuration internals .. change:: :tags: orm :tickets: 2018 the aliased() function, if passed a SQL FromClause element (i.e. not a mapped class), will return element.alias() instead of raising an error on AliasedClass. .. change:: :tags: orm :tickets: 2027 Session.merge() will check the version id of the incoming state against that of the database, assuming the mapping uses version ids and incoming state has a version_id assigned, and raise StaleDataError if they don't match. .. change:: :tags: orm :tickets: 1996 Session.connection(), Session.execute() accept 'bind', to allow execute/connection operations to participate in the open transaction of an engine explicitly. .. change:: :tags: orm :tickets: Query.join(), Query.outerjoin(), eagerload(), eagerload_all(), others no longer allow lists of attributes as arguments (i.e. option([x, y, z]) form, deprecated since 0.5) .. change:: :tags: orm :tickets: ScopedSession.mapper is removed (deprecated since 0.5). .. change:: :tags: orm :tickets: 2031 Horizontal shard query places 'shard_id' in context.attributes where it's accessible by the "load()" event. .. change:: :tags: orm :tickets: 2032 A single contains_eager() call across multiple entities will indicate all collections along that path should load, instead of requiring distinct contains_eager() calls for each endpoint (which was never correctly documented). .. change:: :tags: orm :tickets: The "name" field used in orm.aliased() now renders in the resulting SQL statement. .. change:: :tags: orm :tickets: 1473 Session weak_instance_dict=False is deprecated. .. change:: :tags: orm :tickets: 2046 An exception is raised in the unusual case that an append or similar event on a collection occurs after the parent object has been dereferenced, which prevents the parent from being marked as "dirty" in the session. Was a warning in 0.6.6. .. change:: :tags: orm :tickets: 1069 Query.distinct() now accepts column expressions as \*args, interpreted by the Postgresql dialect as DISTINCT ON (). .. change:: :tags: orm :tickets: 2049 Additional tuning to "many-to-one" relationship loads during a flush(). A change in version 0.6.6 ([ticket:2002]) required that more "unnecessary" m2o loads during a flush could occur. Extra loading modes have been added so that the SQL emitted in this specific use case is trimmed back, while still retrieving the information the flush needs in order to not miss anything. .. change:: :tags: orm :tickets: the value of "passive" as passed to attributes.get_history() should be one of the constants defined in the attributes package. Sending True or False is deprecated. .. change:: :tags: orm :tickets: 2030 Added a `name` argument to `Query.subquery()`, to allow a fixed name to be assigned to the alias object. (also in 0.6.7) .. change:: :tags: orm :tickets: 2019 A warning is emitted when a joined-table inheriting mapper has no primary keys on the locally mapped table (but has pks on the superclass table). (also in 0.6.7) .. change:: :tags: orm :tickets: 2038 Fixed bug where "middle" class in a polymorphic hierarchy would have no 'polymorphic_on' column if it didn't also specify a 'polymorphic_identity', leading to strange errors upon refresh, wrong class loaded when querying from that target. Also emits the correct WHERE criterion when using single table inheritance. (also in 0.6.7) .. change:: :tags: orm :tickets: 1995 Fixed bug where a column with a SQL or server side default that was excluded from a mapping with include_properties or exclude_properties would result in UnmappedColumnError. (also in 0.6.7) .. change:: :tags: orm :tickets: 2046 A warning is emitted in the unusual case that an append or similar event on a collection occurs after the parent object has been dereferenced, which prevents the parent from being marked as "dirty" in the session. This will be an exception in 0.7. (also in 0.6.7) .. change:: :tags: declarative :tickets: 2050 Added an explicit check for the case that the name 'metadata' is used for a column attribute on a declarative class. (also in 0.6.7) .. change:: :tags: sql :tickets: 1844 Added over() function, method to FunctionElement classes, produces the _Over() construct which in turn generates "window functions", i.e. " OVER (PARTITION BY , ORDER BY )". .. change:: :tags: sql :tickets: 805 LIMIT/OFFSET clauses now use bind parameters .. change:: :tags: sql :tickets: 1069 select.distinct() now accepts column expressions as \*args, interpreted by the Postgresql dialect as DISTINCT ON (). Note this was already available via passing a list to the `distinct` keyword argument to select(). .. change:: :tags: sql :tickets: select.prefix_with() accepts multiple expressions (i.e. \*expr), 'prefix' keyword argument to select() accepts a list or tuple. .. change:: :tags: sql :tickets: Passing a string to the `distinct` keyword argument of `select()` for the purpose of emitting special MySQL keywords (DISTINCTROW etc.) is deprecated - use `prefix_with()` for this. .. change:: :tags: sql :tickets: 2006, 2005 TypeDecorator works with primary key columns .. change:: :tags: sql :tickets: 1897 DDL() constructs now escape percent signs .. change:: :tags: sql :tickets: 1917, 1893 Table.c / MetaData.tables refined a bit, don't allow direct mutation .. change:: :tags: sql :tickets: 1950 Callables passed to `bindparam()` don't get evaluated .. change:: :tags: sql :tickets: 1870 types.type_map is now private, types._type_map .. change:: :tags: sql :tickets: 1982 Non-public Pool methods underscored .. change:: :tags: sql :tickets: 723 Added NULLS FIRST and NULLS LAST support. It's implemented as an extension to the asc() and desc() operators, called nullsfirst() and nullslast(). .. change:: :tags: sql :tickets: The Index() construct can be created inline with a Table definition, using strings as column names, as an alternative to the creation of the index outside of the Table. .. change:: :tags: sql :tickets: 2001 execution_options() on Connection accepts "isolation_level" argument, sets transaction isolation level for that connection only until returned to the connection pool, for thsoe backends which support it (SQLite, Postgresql) .. change:: :tags: sql :tickets: 2005 A TypeDecorator of Integer can be used with a primary key column, and the "autoincrement" feature of various dialects as well as the "sqlite_autoincrement" flag will honor the underlying database type as being Integer-based. .. change:: :tags: sql :tickets: 2020, 2021 Established consistency when server_default is present on an Integer PK column. SQLA doesn't pre-fetch these, nor do they come back in cursor.lastrowid (DBAPI). Ensured all backends consistently return None in result.inserted_primary_key for these. Regarding reflection for this case, reflection of an int PK col with a server_default sets the "autoincrement" flag to False, except in the case of a PG SERIAL col where we detected a sequence default. .. change:: :tags: sql :tickets: 2006 Result-row processors are applied to pre-executed SQL defaults, as well as cursor.lastrowid, when determining the contents of result.inserted_primary_key. .. change:: :tags: sql :tickets: Bind parameters present in the "columns clause" of a select are now auto-labeled like other "anonymous" clauses, which among other things allows their "type" to be meaningful when the row is fetched, as in result row processors. .. change:: :tags: sql :tickets: TypeDecorator is present in the "sqlalchemy" import space. .. change:: :tags: sql :tickets: 2015 Non-DBAPI errors which occur in the scope of an `execute()` call are now wrapped in sqlalchemy.exc.StatementError, and the text of the SQL statement and repr() of params is included. This makes it easier to identify statement executions which fail before the DBAPI becomes involved. .. change:: :tags: sql :tickets: 2048 The concept of associating a ".bind" directly with a ClauseElement has been explicitly moved to Executable, i.e. the mixin that describes ClauseElements which represent engine-executable constructs. This change is an improvement to internal organization and is unlikely to affect any real-world usage. .. change:: :tags: sql :tickets: 2028 Column.copy(), as used in table.tometadata(), copies the 'doc' attribute. (also in 0.6.7) .. change:: :tags: sql :tickets: 2023 Added some defs to the resultproxy.c extension so that the extension compiles and runs on Python 2.4. (also in 0.6.7) .. change:: :tags: sql :tickets: 2042 The compiler extension now supports overriding the default compilation of expression._BindParamClause including that the auto-generated binds within the VALUES/SET clause of an insert()/update() statement will also use the new compilation rules. (also in 0.6.7) .. change:: :tags: sql :tickets: 1921 SQLite dialect now uses `NullPool` for file-based databases .. change:: :tags: sql :tickets: 2036 The path given as the location of a sqlite database is now normalized via os.path.abspath(), so that directory changes within the process don't affect the ultimate location of a relative file path. .. change:: :tags: postgresql :tickets: 1083 When explicit sequence execution derives the name of the auto-generated sequence of a SERIAL column, which currently only occurs if implicit_returning=False, now accommodates if the table + column name is greater than 63 characters using the same logic Postgresql uses. (also in 0.6.7) .. change:: :tags: postgresql :tickets: 2044 Added an additional libpq message to the list of "disconnect" exceptions, "could not receive data from server" (also in 0.6.7) .. change:: :tags: mssql :tickets: 1833 the String/Unicode types, and their counterparts VARCHAR/ NVARCHAR, emit "max" as the length when no length is specified, so that the default length, normally '1' as per SQL server documentation, is instead 'unbounded'. This also occurs for the VARBINARY type.. This behavior makes these types more closely compatible with Postgresql's VARCHAR type which is similarly unbounded when no length is specified. .. change:: :tags: mysql :tickets: 1991 New DBAPI support for pymysql, a pure Python port of MySQL-python. .. change:: :tags: mysql :tickets: 2047 oursql dialect accepts the same "ssl" arguments in create_engine() as that of MySQLdb. (also in 0.6.7) .. change:: :tags: firebird :tickets: 1885 Some adjustments so that Interbase is supported as well. FB/Interbase version idents are parsed into a structure such as (8, 1, 1, 'interbase') or (2, 1, 588, 'firebird') so they can be distinguished. SQLAlchemy-0.8.4/doc/_sources/changelog/changelog_08.txt0000644000076500000240000033207512251147345023564 0ustar classicstaff00000000000000 ============== 0.8 Changelog ============== .. changelog_imports:: .. include:: changelog_07.rst :start-line: 5 .. changelog:: :version: 0.8.4 :released: December 8, 2013 .. change:: :tags: bug, engine :versions: 0.9.0b2 :tickets: 2881 A DBAPI that raises an error on ``connect()`` which is not a subclass of dbapi.Error (such as ``TypeError``, ``NotImplementedError``, etc.) will propagate the exception unchanged. Previously, the error handling specific to the ``connect()`` routine would both inappropriately run the exception through the dialect's :meth:`.Dialect.is_disconnect` routine as well as wrap it in a :class:`sqlalchemy.exc.DBAPIError`. It is now propagated unchanged in the same way as occurs within the execute process. .. change:: :tags: bug, engine, pool :versions: 0.9.0b2 :tickets: 2880 The :class:`.QueuePool` has been enhanced to not block new connection attempts when an existing connection attempt is blocking. Previously, the production of new connections was serialized within the block that monitored overflow; the overflow counter is now altered within it's own critical section outside of the connection process itself. .. change:: :tags: bug, engine, pool :versions: 0.9.0b2 :tickets: 2522 Made a slight adjustment to the logic which waits for a pooled connection to be available, such that for a connection pool with no timeout specified, it will every half a second break out of the wait to check for the so-called "abort" flag, which allows the waiter to break out in case the whole connection pool was dumped; normally the waiter should break out due to a notify_all() but it's possible this notify_all() is missed in very slim cases. This is an extension of logic first introduced in 0.8.0, and the issue has only been observed occasionally in stress tests. .. change:: :tags: bug, mssql :versions: 0.9.0b2 :pullreq: bitbucket:7 Fixed bug introduced in 0.8.0 where the ``DROP INDEX`` statement for an index in MSSQL would render incorrectly if the index were in an alternate schema; the schemaname/tablename would be reversed. The format has been also been revised to match current MSSQL documentation. Courtesy Derek Harland. .. change:: :tags: feature, sql :tickets: 1443 :versions: 0.9.0b1 Added support for "unique constraint" reflection, via the :meth:`.Inspector.get_unique_constraints` method. Thanks for Roman Podolyaka for the patch. .. change:: :tags: bug, oracle :tickets: 2864 :versions: 0.9.0b2 Added ORA-02396 "maximum idle time" error code to list of "is disconnect" codes with cx_oracle. .. change:: :tags: bug, engine :tickets: 2871 :versions: 0.9.0b2 Fixed bug where SQL statement would be improperly ASCII-encoded when a pre-DBAPI :class:`.StatementError` were raised within :meth:`.Connection.execute`, causing encoding errors for non-ASCII statements. The stringification now remains within Python unicode thus avoiding encoding errors. .. change:: :tags: bug, oracle :tickets: 2870 :versions: 0.9.0b2 Fixed bug where Oracle ``VARCHAR`` types given with no length (e.g. for a ``CAST`` or similar) would incorrectly render ``None CHAR`` or similar. .. change:: :tags: bug, ext :tickets: 2869 :versions: 0.9.0b2 Fixed bug which prevented the ``serializer`` extension from working correctly with table or column names that contain non-ASCII characters. .. change:: :tags: bug, orm :tickets: 2818 :versions: 0.9.0b2 Fixed a regression introduced by :ticket:`2818` where the EXISTS query being generated would produce a "columns being replaced" warning for a statement with two same-named columns, as the internal SELECT wouldn't have use_labels set. .. change:: :tags: bug, postgresql :tickets: 2855 :versions: 0.9.0b2 Fixed bug where index reflection would mis-interpret indkey values when using the pypostgresql adapter, which returns these values as lists vs. psycopg2's return type of string. .. changelog:: :version: 0.8.3 :released: October 26, 2013 .. change:: :tags: bug, oracle :tickets: 2853 :versions: 0.9.0b1 Fixed bug where Oracle table reflection using synonyms would fail if the synonym and the table were in different remote schemas. Patch to fix courtesy Kyle Derr. .. change:: :tags: bug, sql :tickets: 2849 :versions: 0.9.0b1 Fixed bug where :func:`.type_coerce` would not interpret ORM elements with a ``__clause_element__()`` method properly. .. change:: :tags: bug, sql :tickets: 2842 :versions: 0.9.0b1 The :class:`.Enum` and :class:`.Boolean` types now bypass any custom (e.g. TypeDecorator) type in use when producing the CHECK constraint for the "non native" type. This so that the custom type isn't involved in the expression within the CHECK, since this expression is against the "impl" value and not the "decorated" value. .. change:: :tags: bug, postgresql :tickets: 2844 :versions: 0.9.0b1 Removed a 128-character truncation from the reflection of the server default for a column; this code was original from PG system views which truncated the string for readability. .. change:: :tags: bug, mysql :tickets: 2721, 2839 :versions: 0.9.0b1 The change in :ticket:`2721`, which is that the ``deferrable`` keyword of :class:`.ForeignKeyConstraint` is silently ignored on the MySQL backend, will be reverted as of 0.9; this keyword will now render again, raising errors on MySQL as it is not understood - the same behavior will also apply to the ``initially`` keyword. In 0.8, the keywords will remain ignored but a warning is emitted. Additionally, the ``match`` keyword now raises a :exc:`.CompileError` on 0.9 and emits a warning on 0.8; this keyword is not only silently ignored by MySQL but also breaks the ON UPDATE/ON DELETE options. To use a :class:`.ForeignKeyConstraint` that does not render or renders differently on MySQL, use a custom compilation option. An example of this usage has been added to the documentation, see :ref:`mysql_foreign_keys`. .. change:: :tags: bug, sql :tickets: 2825 :versions: 0.9.0b1 The ``.unique`` flag on :class:`.Index` could be produced as ``None`` if it was generated from a :class:`.Column` that didn't specify ``unique`` (where it defaults to ``None``). The flag will now always be ``True`` or ``False``. .. change:: :tags: feature, orm :tickets: 2836 :versions: 0.9.0b1 Added new option to :func:`.relationship` ``distinct_target_key``. This enables the subquery eager loader strategy to apply a DISTINCT to the innermost SELECT subquery, to assist in the case where duplicate rows are generated by the innermost query which corresponds to this relationship (there's not yet a general solution to the issue of dupe rows within subquery eager loading, however, when joins outside of the innermost subquery produce dupes). When the flag is set to ``True``, the DISTINCT is rendered unconditionally, and when it is set to ``None``, DISTINCT is rendered if the innermost relationship targets columns that do not comprise a full primary key. The option defaults to False in 0.8 (e.g. off by default in all cases), None in 0.9 (e.g. automatic by default). Thanks to Alexander Koval for help with this. .. seealso:: :ref:`change_2836` .. change:: :tags: bug, mysql :tickets: 2515 :versions: 0.9.0b1 MySQL-connector dialect now allows options in the create_engine query string to override those defaults set up in the connect, including "buffered" and "raise_on_warnings". .. change:: :tags: bug, postgresql :tickets: 2742 :versions: 0.9.0b1 Parenthesis will be applied to a compound SQL expression as rendered in the column list of a CREATE INDEX statement. .. change:: :tags: bug, sql :tickets: 2742 :versions: 0.9.0b1 Fixed bug in default compiler plus those of postgresql, mysql, and mssql to ensure that any literal SQL expression values are rendered directly as literals, instead of as bound parameters, within a CREATE INDEX statement. This also changes the rendering scheme for other DDL such as constraints. .. change:: :tags: bug, sql :tickets: 2815 :versions: 0.9.0b1 A :func:`.select` that is made to refer to itself in its FROM clause, typically via in-place mutation, will raise an informative error message rather than causing a recursion overflow. .. change:: :tags: bug, orm :tickets: 2813 :versions: 0.9.0b1 Fixed bug where using an annotation such as :func:`.remote` or :func:`.foreign` on a :class:`.Column` before association with a parent :class:`.Table` could produce issues related to the parent table not rendering within joins, due to the inherent copy operation performed by an annotation. .. change:: :tags: bug, sql :tickets: 2831 Non-working "schema" argument on :class:`.ForeignKey` is deprecated; raises a warning. Removed in 0.9. .. change:: :tags: bug, postgresql :tickets: 2819 :versions: 0.9.0b1 Fixed bug where Postgresql version strings that had a prefix preceding the words "Postgresql" or "EnterpriseDB" would not parse. Courtesy Scott Schaefer. .. change:: :tags: feature, engine :tickets: 2821 :versions: 0.9.0b1 ``repr()`` for the :class:`.URL` of an :class:`.Engine` will now conceal the password using asterisks. Courtesy Gunnlaugur Þór Briem. .. change:: :tags: bug, orm :tickets: 2818 :versions: 0.9.0b1 Fixed bug where :meth:`.Query.exists` failed to work correctly without any WHERE criterion. Courtesy Vladimir Magamedov. .. change:: :tags: bug, sql :tickets: 2811 :versions: 0.9.0b1 Fixed bug where using the ``column_reflect`` event to change the ``.key`` of the incoming :class:`.Column` would prevent primary key constraints, indexes, and foreign key constraints from being correctly reflected. .. change:: :tags: feature :versions: 0.9.0b1 Added a new flag ``system=True`` to :class:`.Column`, which marks the column as a "system" column which is automatically made present by the database (such as Postgresql ``oid`` or ``xmin``). The column will be omitted from the ``CREATE TABLE`` statement but will otherwise be available for querying. In addition, the :class:`.CreateColumn` construct can be appled to a custom compilation rule which allows skipping of columns, by producing a rule that returns ``None``. .. change:: :tags: bug, orm :tickets: 2779 Backported a change from 0.9 whereby the iteration of a hierarchy of mappers used in polymorphic inheritance loads is sorted, which allows the SELECT statements generated for polymorphic queries to have deterministic rendering, which in turn helps with caching schemes that cache on the SQL string itself. .. change:: :tags: bug, orm :tickets: 2794 :versions: 0.9.0b1 Fixed a potential issue in an ordered sequence implementation used by the ORM to iterate mapper hierarchies; under the Jython interpreter this implementation wasn't ordered, even though cPython and Pypy maintained ordering. .. change:: :tags: bug, examples :versions: 0.9.0b1 Added "autoincrement=False" to the history table created in the versioning example, as this table shouldn't have autoinc on it in any case, courtesy Patrick Schmid. .. change:: :tags: bug, sql :versions: 0.9.0b1 The :meth:`.ColumnOperators.notin_` operator added in 0.8 now properly produces the negation of the expression "IN" returns when used against an empty collection. .. change:: :tags: feature, examples :versions: 0.9.0b1 Improved the examples in ``examples/generic_associations``, including that ``discriminator_on_association.py`` makes use of single table inheritance do the work with the "discriminator". Also added a true "generic foreign key" example, which works similarly to other popular frameworks in that it uses an open-ended integer to point to any other table, foregoing traditional referential integrity. While we don't recommend this pattern, information wants to be free. .. change:: :tags: feature, orm, declarative :versions: 0.9.0b1 Added a convenience class decorator :func:`.as_declarative`, is a wrapper for :func:`.declarative_base` which allows an existing base class to be applied using a nifty class-decorated approach. .. change:: :tags: bug, orm :tickets: 2786 :versions: 0.9.0b1 Fixed bug in ORM-level event registration where the "raw" or "propagate" flags could potentially be mis-configured in some "unmapped base class" configurations. .. change:: :tags: bug, orm :tickets: 2778 :versions: 0.9.0b1 A performance fix related to the usage of the :func:`.defer` option when loading mapped entities. The function overhead of applying a per-object deferred callable to an instance at load time was significantly higher than that of just loading the data from the row (note that ``defer()`` is meant to reduce DB/network overhead, not necessarily function call count); the function call overhead is now less than that of loading data from the column in all cases. There is also a reduction in the number of "lazy callable" objects created per load from N (total deferred values in the result) to 1 (total number of deferred cols). .. change:: :tags: bug, sqlite :tickets: 2781 :versions: 0.9.0b1 The newly added SQLite DATETIME arguments storage_format and regexp apparently were not fully implemented correctly; while the arguments were accepted, in practice they would have no effect; this has been fixed. .. change:: :tags: bug, sql, postgresql :tickets: 2780 :versions: 0.9.0b1 Fixed bug where the expression system relied upon the ``str()`` form of a some expressions when referring to the ``.c`` collection on a ``select()`` construct, but the ``str()`` form isn't available since the element relies on dialect-specific compilation constructs, notably the ``__getitem__()`` operator as used with a Postgresql ``ARRAY`` element. The fix also adds a new exception class :exc:`.UnsupportedCompilationError` which is raised in those cases where a compiler is asked to compile something it doesn't know how to. .. change:: :tags: bug, engine, oracle :tickets: 2776 :versions: 0.9.0b1 Dialect.initialize() is not called a second time if an :class:`.Engine` is recreated, due to a disconnect error. This fixes a particular issue in the Oracle 8 dialect, but in general the dialect.initialize() phase should only be once per dialect. .. change:: :tags: feature, sql :tickets: 722 Added new method to the :func:`.insert` construct :meth:`.Insert.from_select`. Given a list of columns and a selectable, renders ``INSERT INTO (table) (columns) SELECT ..``. .. change:: :tags: feature, sql :versions: 0.9.0b1 The :func:`.update`, :func:`.insert`, and :func:`.delete` constructs will now interpret ORM entities as target tables to be operated upon, e.g.:: from sqlalchemy import insert, update, delete ins = insert(SomeMappedClass).values(x=5) del_ = delete(SomeMappedClass).where(SomeMappedClass.id == 5) upd = update(SomeMappedClass).where(SomeMappedClass.id == 5).values(name='ed') .. change:: :tags: bug, orm :tickets: 2773 :versions: 0.9.0b1 Fixed bug whereby attribute history functions would fail when an object we moved from "persistent" to "pending" using the :func:`.make_transient` function, for operations involving collection-based backrefs. .. change:: :tags: bug, engine, pool :tickets: 2772 :versions: 0.9.0b1 Fixed bug where :class:`.QueuePool` would lose the correct checked out count if an existing pooled connection failed to reconnect after an invalidate or recycle event. .. changelog:: :version: 0.8.2 :released: July 3, 2013 .. change:: :tags: bug, mysql :tickets: 2768 :versions: 0.9.0b1 Fixed bug when using multi-table UPDATE where a supplemental table is a SELECT with its own bound parameters, where the positioning of the bound parameters would be reversed versus the statement itself when using MySQL's special syntax. .. change:: :tags: bug, sqlite :tickets: 2764 :versions: 0.9.0b1 Added :class:`sqlalchemy.types.BIGINT` to the list of type names that can be reflected by the SQLite dialect; courtesy Russell Stuart. .. change:: :tags: feature, orm, declarative :tickets: 2761 :versions: 0.9.0b1 ORM descriptors such as hybrid properties can now be referenced by name in a string argument used with ``order_by``, ``primaryjoin``, or similar in :func:`.relationship`, in addition to column-bound attributes. .. change:: :tags: feature, firebird :tickets: 2763 :versions: 0.9.0b1 Added new flag ``retaining=True`` to the kinterbasdb and fdb dialects. This controls the value of the ``retaining`` flag sent to the ``commit()`` and ``rollback()`` methods of the DBAPI connection. Due to historical concerns, this flag defaults to ``True`` in 0.8.2, however in 0.9.0b1 this flag defaults to ``False``. .. change:: :tags: requirements :versions: 0.9.0b1 The Python `mock `_ library is now required in order to run the unit test suite. While part of the standard library as of Python 3.3, previous Python installations will need to install this in order to run unit tests or to use the ``sqlalchemy.testing`` package for external dialects. .. change:: :tags: bug, orm :tickets: 2750 :versions: 0.9.0b1 A warning is emitted when trying to flush an object of an inherited class where the polymorphic discriminator has been assigned to a value that is invalid for the class. .. change:: :tags: bug, postgresql :tickets: 2740 :versions: 0.9.0b1 The behavior of :func:`.extract` has been simplified on the Postgresql dialect to no longer inject a hardcoded ``::timestamp`` or similar cast into the given expression, as this interfered with types such as timezone-aware datetimes, but also does not appear to be at all necessary with modern versions of psycopg2. .. change:: :tags: bug, firebird :tickets: 2757 :versions: 0.9.0b1 Type lookup when reflecting the Firebird types LONG and INT64 has been fixed so that LONG is treated as INTEGER, INT64 treated as BIGINT, unless the type has a "precision" in which case it's treated as NUMERIC. Patch courtesy Russell Stuart. .. change:: :tags: bug, postgresql :tickets: 2766 :versions: 0.9.0b1 Fixed bug in HSTORE type where keys/values that contained backslashed quotes would not be escaped correctly when using the "non native" (i.e. non-psycopg2) means of translating HSTORE data. Patch courtesy Ryan Kelly. .. change:: :tags: bug, postgresql :tickets: 2767 :versions: 0.9.0b1 Fixed bug where the order of columns in a multi-column Postgresql index would be reflected in the wrong order. Courtesy Roman Podolyaka. .. change:: :tags: bug, sql :tickets: 2746, 2668 :versions: 0.9.0b1 Multiple fixes to the correlation behavior of :class:`.Select` constructs, first introduced in 0.8.0: * To satisfy the use case where FROM entries should be correlated outwards to a SELECT that encloses another, which then encloses this one, correlation now works across multiple levels when explicit correlation is established via :meth:`.Select.correlate`, provided that the target select is somewhere along the chain contained by a WHERE/ORDER BY/columns clause, not just nested FROM clauses. This makes :meth:`.Select.correlate` act more compatibly to that of 0.7 again while still maintaining the new "smart" correlation. * When explicit correlation is not used, the usual "implicit" correlation limits its behavior to just the immediate enclosing SELECT, to maximize compatibility with 0.7 applications, and also prevents correlation across nested FROMs in this case, maintaining compatibility with 0.8.0/0.8.1. * The :meth:`.Select.correlate_except` method was not preventing the given FROM clauses from correlation in all cases, and also would cause FROM clauses to be incorrectly omitted entirely (more like what 0.7 would do), this has been fixed. * Calling `select.correlate_except(None)` will enter all FROM clauses into correlation as would be expected. .. change:: :tags: bug, ext :versions: 0.9.0b1 Fixed bug whereby if a composite type were set up with a function instead of a class, the mutable extension would trip up when it tried to check that column for being a :class:`.MutableComposite` (which it isn't). Courtesy asldevi. .. change:: :tags: feature, sql :tickets: 2744, 2734 Provided a new attribute for :class:`.TypeDecorator` called :attr:`.TypeDecorator.coerce_to_is_types`, to make it easier to control how comparisons using ``==`` or ``!=`` to ``None`` and boolean types goes about producing an ``IS`` expression, or a plain equality expression with a bound parameter. .. change:: :tags: feature, postgresql :versions: 0.9.0b1 Support for Postgresql 9.2 range types has been added. Currently, no type translation is provided, so works directly with strings or psycopg2 2.5 range extension types at the moment. Patch courtesy Chris Withers. .. change:: :tags: bug, examples :versions: 0.9.0b1 Fixed an issue with the "versioning" recipe whereby a many-to-one reference could produce a meaningless version for the target, even though it was not changed, when backrefs were present. Patch courtesy Matt Chisholm. .. change:: :tags: feature, postgresql :tickets: 2072 :versions: 0.9.0b1 Added support for "AUTOCOMMIT" isolation when using the psycopg2 DBAPI. The keyword is available via the ``isolation_level`` execution option. Patch courtesy Roman Podolyaka. .. change:: :tags: bug, orm :tickets: 2759 :versions: 0.9.0b1 Fixed bug in polymorphic SQL generation where multiple joined-inheritance entities against the same base class joined to each other as well would not track columns on the base table independently of each other if the string of joins were more than two entities long. .. change:: :tags: bug, engine :pullreq: github:6 :versions: 0.9.0b1 Fixed bug where the ``reset_on_return`` argument to various :class:`.Pool` implementations would not be propagated when the pool was regenerated. Courtesy Eevee. .. change:: :tags: bug, orm :tickets: 2754 :versions: 0.9.0b1 Fixed bug where sending a composite attribute into :meth:`.Query.order_by` would produce a parenthesized expression not accepted by some databases. .. change:: :tags: bug, orm :tickets: 2755 :versions: 0.9.0b1 Fixed the interaction between composite attributes and the :func:`.aliased` function. Previously, composite attributes wouldn't work correctly in comparison operations when aliasing was applied. .. change:: :tags: bug, mysql :tickets: 2715 :versions: 0.9.0b1 Added another conditional to the ``mysql+gaerdbms`` dialect to detect so-called "development" mode, where we should use the ``rdbms_mysqldb`` DBAPI. Patch courtesy Brett Slatkin. .. change:: :tags: feature, mysql :tickets: 2704 :versions: 0.9.0b1 The ``mysql_length`` parameter used with :class:`.Index` can now be passed as a dictionary of column names/lengths, for use with composite indexes. Big thanks to Roman Podolyaka for the patch. .. change:: :tags: bug, mssql :tickets: 2747 :versions: 0.9.0b1 When querying the information schema on SQL Server 2000, removed a CAST call that was added in 0.8.1 to help with driver issues, which apparently is not compatible on 2000. The CAST remains in place for SQL Server 2005 and greater. .. change:: :tags: bug, mysql :tickets: 2721 :versions: 0.9.0b1 The ``deferrable`` keyword argument on :class:`.ForeignKey` and :class:`.ForeignKeyConstraint` will not render the ``DEFERRABLE`` keyword on the MySQL dialect. For a long time we left this in place because a non-deferrable foreign key would act very differently than a deferrable one, but some environments just disable FKs on MySQL, so we'll be less opinionated here. .. change:: :tags: bug, ext, orm :tickets: 2730 :versions: 0.9.0b1 Fixed bug where :class:`.MutableDict` didn't report a change event when ``clear()`` was called. .. change:: :tags: bug, sql :tickets: 2738 :versions: 0.9.0b1 Fixed bug whereby joining a select() of a table "A" with multiple foreign key paths to a table "B", to that table "B", would fail to produce the "ambiguous join condition" error that would be reported if you join table "A" directly to "B"; it would instead produce a join condition with multiple criteria. .. change:: :tags: bug, sql, reflection :tickets: 2728 :versions: 0.9.0b1 Fixed bug whereby using :meth:`.MetaData.reflect` across a remote schema as well as a local schema could produce wrong results in the case where both schemas had a table of the same name. .. change:: :tags: bug, sql :tickets: 2726 :versions: 0.9.0b1 Removed the "not implemented" ``__iter__()`` call from the base :class:`.ColumnOperators` class, while this was introduced in 0.8.0 to prevent an endless, memory-growing loop when one also implements a ``__getitem__()`` method on a custom operator and then calls erroneously ``list()`` on that object, it had the effect of causing column elements to report that they were in fact iterable types which then throw an error when you try to iterate. There's no real way to have both sides here so we stick with Python best practices. Careful with implementing ``__getitem__()`` on your custom operators! .. change:: :tags: feature, orm :tickets: 2736 Added a new method :meth:`.Query.select_entity_from` which will in 0.9 replace part of the functionality of :meth:`.Query.select_from`. In 0.8, the two methods perform the same function, so that code can be migrated to use the :meth:`.Query.select_entity_from` method as appropriate. See the 0.9 migration guide for details. .. change:: :tags: bug, orm :tickets: 2737 Fixed a regression caused by :ticket:`2682` whereby the evaluation invoked by :meth:`.Query.update` and :meth:`.Query.delete` would hit upon unsupported ``True`` and ``False`` symbols which now appear due to the usage of ``IS``. .. change:: :tags: bug, postgresql :pullreq: github:2 :tickets: 2735 Fixed the HSTORE type to correctly encode/decode for unicode. This is always on, as the hstore is a textual type, and matches the behavior of psycopg2 when using Python 3. Courtesy Dmitry Mugtasimov. .. change:: :tags: bug, examples Fixed a small bug in the dogpile example where the generation of SQL cache keys wasn't applying deduping labels to the statement the same way :class:`.Query` normally does. .. change:: :tags: bug, engine, sybase :tickets: 2732 Fixed a bug where the routine to detect the correct kwargs being sent to :func:`.create_engine` would fail in some cases, such as with the Sybase dialect. .. change:: :tags: bug, orm :tickets: 2481 Fixed a regression from 0.7 caused by this ticket, which made the check for recursion overflow in self-referential eager joining too loose, missing a particular circumstance where a subclass had lazy="joined" or "subquery" configured and the load was a "with_polymorphic" against the base. .. change:: :tags: bug, orm :tickets: 2718 Fixed a regression from 0.7 where the contextmanager feature of :meth:`.Session.begin_nested` would fail to correctly roll back the transaction when a flush error occurred, instead raising its own exception while leaving the session still pending a rollback. .. change:: :tags: bug, mysql Updated mysqlconnector dialect to check for disconnect based on the apparent string message sent in the exception; tested against mysqlconnector 1.0.9. .. change:: :tags: bug, sql, mssql :tickets: 2682 Regression from this ticket caused the unsupported keyword "true" to render, added logic to convert this to 1/0 for SQL server. .. changelog:: :version: 0.8.1 :released: April 27, 2013 .. change:: :tags: bug, orm :tickets: 2698 Fixes to the ``sqlalchemy.ext.serializer`` extension, including that the "id" passed from the pickler is turned into a string to prevent against bytes being parsed on Py3K, as well as that ``relationship()`` and ``orm.join()`` constructs are now properly serialized. .. change:: :tags: bug, orm :tickets: 2714 A significant improvement to the inner workings of query.join(), such that the decisionmaking involved on how to join has been dramatically simplified. New test cases now pass such as multiple joins extending from the middle of an already complex series of joins involving inheritance and such. Joining from deeply nested subquery structures is still complicated and not without caveats, but with these improvements the edge cases are hopefully pushed even farther out to the edges. .. change:: :tags: feature, orm :tickets: 2673 Added a convenience method to Query that turns a query into an EXISTS subquery of the form ``EXISTS (SELECT 1 FROM ... WHERE ...)``. .. change:: :tags: bug, orm Added a conditional to the unpickling process for ORM mapped objects, such that if the reference to the object were lost when the object was pickled, we don't erroneously try to set up _sa_instance_state - fixes a NoneType error. .. change:: :tags: bug, postgresql :tickets: 2712 Opened up the checking for "disconnect" with psycopg2/libpq to check for all the various "disconnect" messages within the full exception hierarchy. Specifically the "closed the connection unexpectedly" message has now been seen in at least three different exception types. Courtesy Eli Collins. .. change:: :tags: bug, sql, mysql :tickets: 2682 Fully implemented the IS and IS NOT operators with regards to the True/False constants. An expression like ``col.is_(True)`` will now render ``col IS true`` on the target platform, rather than converting the True/ False constant to an integer bound parameter. This allows the ``is_()`` operator to work on MySQL when given True/False constants. .. change:: :tags: bug, postgresql :tickets: 2681 The operators for the Postgresql ARRAY type supports input types of sets, generators, etc. even when a dimension is not specified, by turning the given iterable into a collection unconditionally. .. change:: :tags: bug, mysql Fixes to support the latest cymysql DBAPI, courtesy Hajime Nakagami. .. change:: :tags: bug, mysql :tickets: 2663 Improvements to the operation of the pymysql dialect on Python 3, including some important decode/bytes steps. Issues remain with BLOB types due to driver issues. Courtesy Ben Trofatter. .. change:: :tags: bug, orm :tickets: 2710 Fixed bug where many-to-many relationship with uselist=False would fail to delete the association row and raise an error if the scalar attribute were set to None. This was a regression introduced by the changes for :ticket:`2229`. .. change:: :tags: bug, orm :tickets: 2708 Improved the behavior of instance management regarding the creation of strong references within the Session; an object will no longer have an internal reference cycle created if it's in the transient state or moves into the detached state - the strong ref is created only when the object is attached to a Session and is removed when the object is detached. This makes it somewhat safer for an object to have a `__del__()` method, even though this is not recommended, as relationships with backrefs produce cycles too. A warning has been added when a class with a `__del__()` method is mapped. .. change:: :tags: bug, sql :tickets: 2702 A major fix to the way in which a select() object produces labeled columns when apply_labels() is used; this mode produces a SELECT where each column is labeled as in _, to remove column name collisions for a multiple table select. The fix is that if two labels collide when combined with the table name, i.e. "foo.bar_id" and "foo_bar.id", anonymous aliasing will be applied to one of the dupes. This allows the ORM to handle both columns independently; previously, 0.7 would in some cases silently emit a second SELECT for the column that was "duped", and in 0.8 an ambiguous column error would be emitted. The "keys" applied to the .c. collection of the select() will also be deduped, so that the "column being replaced" warning will no longer emit for any select() that specifies use_labels, though the dupe key will be given an anonymous label which isn't generally user-friendly. .. change:: :tags: bug, mysql Updated a regexp to correctly extract error code on google app engine v1.7.5 and newer. Courtesy Dan Ring. .. change:: :tags: bug, examples Fixed a long-standing bug in the caching example, where the limit/offset parameter values wouldn't be taken into account when computing the cache key. The _key_from_query() function has been simplified to work directly from the final compiled statement in order to get at both the full statement as well as the fully processed parameter list. .. change:: :tags: bug, mssql :tickets: 2355 Part of a longer series of fixes needed for pyodbc+ mssql, a CAST to NVARCHAR(max) has been added to the bound parameter for the table name and schema name in all information schema queries to avoid the issue of comparing NVARCHAR to NTEXT, which seems to be rejected by the ODBC driver in some cases, such as FreeTDS (0.91 only?) plus unicode bound parameters being passed. The issue seems to be specific to the SQL Server information schema tables and the workaround is harmless for those cases where the problem doesn't exist in the first place. .. change:: :tags: bug, sql :tickets: 2691 Fixed bug where disconnect detect on error would raise an attribute error if the error were being raised after the Connection object had already been closed. .. change:: :tags: bug, sql :tickets: 2703 Reworked internal exception raises that emit a rollback() before re-raising, so that the stack trace is preserved from sys.exc_info() before entering the rollback. This so that the traceback is preserved when using coroutine frameworks which may have switched contexts before the rollback function returns. .. change:: :tags: bug, orm :tickets: 2697 Fixed bug whereby ORM would run the wrong kind of query when refreshing an inheritance-mapped class where the superclass was mapped to a non-Table object, like a custom join() or a select(), running a query that assumed a hierarchy that's mapped to individual Table-per-class. .. change:: :tags: bug, orm Fixed `__repr__()` on mapper property constructs to work before the object is initialized, so that Sphinx builds with recent Sphinx versions can read them. .. change:: :tags: bug, sql, postgresql The _Binary base type now converts values through the bytes() callable when run on Python 3; in particular psycopg2 2.5 with Python 3.3 seems to now be returning the "memoryview" type, so this is converted to bytes before return. .. change:: :tags: bug, sql :tickets: 2695 Improvements to Connection auto-invalidation handling. If a non-disconnect error occurs, but leads to a delayed disconnect error within error handling (happens with MySQL), the disconnect condition is detected. The Connection can now also be closed when in an invalid state, meaning it will raise "closed" on next usage, and additionally the "close with result" feature will work even if the autorollback in an error handling routine fails and regardless of whether the condition is a disconnect or not. .. change:: :tags: bug, orm, declarative :tickets: 2656 Fixed indirect regression regarding :func:`.has_inherited_table`, where since it considers the current class' ``__table__``, was sensitive to when it was called. This is 0.7's behavior also, but in 0.7 things tended to "work out" within events like ``__mapper_args__()``. :func:`.has_inherited_table` now only considers superclasses, so should return the same answer regarding the current class no matter when it's called (obviously assuming the state of the superclass). .. change:: :tags: bug, mssql Added support for additional "disconnect" messages to the pymssql dialect. Courtesy John Anderson. .. change:: :tags: feature, sql Loosened the check on dialect-specific argument names passed to Table(); since we want to support external dialects and also want to support args without a certain dialect being installed, it only checks the format of the arg now, rather than looking for that dialect in sqlalchemy.dialects. .. change:: :tags: bug, sql Fixed bug whereby a DBAPI that can return "0" for cursor.lastrowid would not function correctly in conjunction with :attr:`.ResultProxy.inserted_primary_key`. .. change:: :tags: bug, mssql :tickets: 2683 Fixed Py3K bug regarding "binary" types and pymssql. Courtesy Marc Abramowitz. .. change:: :tags: bug, postgresql :tickets: 2680 Added missing HSTORE type to postgresql type names so that the type can be reflected. .. changelog:: :version: 0.8.0 :released: March 9, 2013 .. note:: There are some new behavioral changes as of 0.8.0 not present in 0.8.0b2. They are present in the migration document as follows: * :ref:`legacy_is_orphan_addition` * :ref:`metadata_create_drop_tables` * :ref:`correlation_context_specific` .. change:: :tags: feature, orm :tickets: 2675 A meaningful :attr:`.QueryableAttribute.info` attribute is added, which proxies down to the ``.info`` attribute on either the :class:`.schema.Column` object if directly present, or the :class:`.MapperProperty` otherwise. The full behavior is documented and ensured by tests to remain stable. .. change:: :tags: bug, sql :tickets: 2668 The behavior of SELECT correlation has been improved such that the :meth:`.Select.correlate` and :meth:`.Select.correlate_except` methods, as well as their ORM analogues, will still retain "auto-correlation" behavior in that the FROM clause is modified only if the output would be legal SQL; that is, the FROM clause is left intact if the correlated SELECT is not used in the context of an enclosing SELECT inside of the WHERE, columns, or HAVING clause. The two methods now only specify conditions to the default "auto correlation", rather than absolute FROM lists. .. change:: :tags: feature, mysql New dialect for CyMySQL added, courtesy Hajime Nakagami. .. change:: :tags: bug, orm :tickets: 2674 Improved checking for an existing backref name conflict during mapper configuration; will now test for name conflicts on superclasses and subclasses, in addition to the current mapper, as these conflicts break things just as much. This is new for 0.8, but see below for a warning that will also be triggered in 0.7.11. .. change:: :tags: bug, orm :tickets: 2674 Improved the error message emitted when a "backref loop" is detected, that is when an attribute event triggers a bidirectional assignment between two other attributes with no end. This condition can occur not just when an object of the wrong type is assigned, but also when an attribute is mis-configured to backref into an existing backref pair. Also in 0.7.11. .. change:: :tags: bug, orm :tickets: 2674 A warning is emitted when a MapperProperty is assigned to a mapper that replaces an existing property, if the properties in question aren't plain column-based properties. Replacement of relationship properties is rarely (ever?) what is intended and usually refers to a mapper mis-configuration. Also in 0.7.11. .. change:: :tags: feature, orm Can set/change the "cascade" attribute on a :func:`.relationship` construct after it's been constructed already. This is not a pattern for normal use but we like to change the setting for demonstration purposes in tutorials. .. change:: :tags: bug, schema :tickets: 2664 :meth:`.MetaData.create_all` and :meth:`.MetaData.drop_all` will now accommodate an empty list as an instruction to not create/drop any items, rather than ignoring the collection. .. change:: :tags: bug, tests :tickets: 2669 Fixed an import of "logging" in test_execute which was not working on some linux platforms. Also in 0.7.11. .. change:: :tags: bug, orm :tickets: 2662 A clear error message is emitted if an event handler attempts to emit SQL on a Session within the after_commit() handler, where there is not a viable transaction in progress. .. change:: :tags: bug, orm :tickets: 2665 Detection of a primary key change within the process of cascading a natural primary key update will succeed even if the key is composite and only some of the attributes have changed. .. change:: :tags: feature, orm :tickets: 2658 Added new helper function :func:`.was_deleted`, returns True if the given object was the subject of a :meth:`.Session.delete` operation. .. change:: :tags: bug, orm :tickets: 2658 An object that's deleted from a session will be de-associated with that session fully after the transaction is committed, that is the :func:`.object_session` function will return None. .. change:: :tags: bug, oracle The cx_oracle dialect will no longer run the bind parameter names through ``encode()``, as this is not valid on Python 3, and prevented statements from functioning correctly on Python 3. We now encode only if ``supports_unicode_binds`` is False, which is not the case for cx_oracle when at least version 5 of cx_oracle is used. .. change:: :tags: bug, orm :tickets: 2661 Fixed bug whereby :meth:`.Query.yield_per` would set the execution options incorrectly, thereby breaking subsequent usage of the :meth:`.Query.execution_options` method. Courtesy Ryan Kelly. .. change:: :tags: bug, orm :tickets: 1768 Fixed the consideration of the ``between()`` operator so that it works correctly with the new relationship local/remote system. .. change:: :tags: bug, sql :tickets: 2660, 1768 Fixed a bug regarding column annotations which in particular could impact some usages of the new :func:`.orm.remote` and :func:`.orm.local` annotation functions, where annotations could be lost when the column were used in a subsequent expression. .. change:: :tags: bug, mysql, gae :tickets: 2649 Added a conditional import to the ``gaerdbms`` dialect which attempts to import rdbms_apiproxy vs. rdbms_googleapi to work on both dev and production platforms. Also now honors the ``instance`` attribute. Courtesy Sean Lynch. Also in 0.7.10. .. change:: :tags: bug, sql :tickets: 2496 The :meth:`.ColumnOperators.in_` operator will now coerce values of ``None`` to :func:`.null`. .. change:: :tags: feature, sql :tickets: 2657 Added a new argument to :class:`.Enum` and its base :class:`.SchemaType` ``inherit_schema``. When set to ``True``, the type will set its ``schema`` attribute of that of the :class:`.Table` to which it is associated. This also occurs during a :meth:`.Table.tometadata` operation; the :class:`.SchemaType` is now copied in all cases when :meth:`.Table.tometadata` happens, and if ``inherit_schema=True``, the type will take on the new schema name passed to the method. The ``schema`` is important when used with the Postgresql backend, as the type results in a ``CREATE TYPE`` statement. .. change:: :tags: feature, postgresql Added :meth:`.postgresql.ARRAY.Comparator.any` and :meth:`.postgresql.ARRAY.Comparator.all` methods, as well as standalone expression constructs. Big thanks to Audrius Kažukauskas for the terrific work here. .. change:: :tags: sql, bug :tickets: 2643 Fixed bug where :meth:`.Table.tometadata` would fail if a :class:`.Column` had both a foreign key as well as an alternate ".key" name for the column. Also in 0.7.10. .. change:: :tags: sql, bug :tickets: 2629 insert().returning() raises an informative CompileError if attempted to compile on a dialect that doesn't support RETURNING. .. change:: :tags: orm, bug :tickets: 2655 the consideration of a pending object as an "orphan" has been modified to more closely match the behavior as that of persistent objects, which is that the object is expunged from the :class:`.Session` as soon as it is de-associated from any of its orphan-enabled parents. Previously, the pending object would be expunged only if de-associated from all of its orphan-enabled parents. The new flag ``legacy_is_orphan`` is added to :func:`.orm.mapper` which re-establishes the legacy behavior. See the change note and example case at :ref:`legacy_is_orphan_addition` for a detailed discussion of this change. .. change:: :tags: orm, bug :tickets: 2653 Fixed the (most likely never used) "@collection.link" collection method, which fires off each time the collection is associated or de-associated with a mapped object - the decorator was not tested or functional. The decorator method is now named :meth:`.collection.linker` though the name "link" remains for backwards compatibility. Courtesy Luca Wehrstedt. .. change:: :tags: orm, bug :tickets: 2654 Made some fixes to the system of producing custom instrumented collections, mainly that the usage of the @collection decorators will now honor the __mro__ of the given class, applying the logic of the sub-most classes' version of a particular collection method. Previously, it wasn't predictable when subclassing an existing instrumented class such as :class:`.MappedCollection` whether or not custom methods would resolve correctly. .. change:: :tags: orm, removed The undocumented (and hopefully unused) system of producing custom collections using an ``__instrumentation__`` datastructure associated with the collection has been removed, as this was a complex and untested feature which was also essentially redundant versus the decorator approach. Other internal simplifcations to the orm.collections module have been made as well. .. change:: :tags: mssql, feature Added ``mssql_include`` and ``mssql_clustered`` options to :class:`.Index`, renders the ``INCLUDE`` and ``CLUSTERED`` keywords, respectively. Courtesy Derek Harland. .. change:: :tags: sql, feature :tickets: 695 :class:`.Index` now supports arbitrary SQL expressions and/or functions, in addition to straight columns. Common modifiers include using ``somecolumn.desc()`` for a descending index and ``func.lower(somecolumn)`` for a case-insensitive index, depending on the capabilities of the target backend. .. change:: :tags: mssql, bug :tickets: 2638 Added a py3K conditional around unnecessary .decode() call in mssql information schema, fixes reflection in Py3K. Also in 0.7.10. .. change:: :tags: orm, bug :tickets: 2650 Fixed potential memory leak which could occur if an arbitrary number of :class:`.sessionmaker` objects were created. The anonymous subclass created by the sessionmaker, when dereferenced, would not be garbage collected due to remaining class-level references from the event package. This issue also applies to any custom system that made use of ad-hoc subclasses in conjunction with an event dispatcher. Also in 0.7.10. .. change:: :tags: mssql, bug Fixed a regression whereby the "collation" parameter of the character types CHAR, NCHAR, etc. stopped working, as "collation" is now supported by the base string types. The TEXT, NCHAR, CHAR, VARCHAR types within the MSSQL dialect are now synonyms for the base types. .. change:: :tags: mssql, feature :tickets: 2644 DDL for IDENTITY columns is now supported on non-primary key columns, by establishing a :class:`.Sequence` construct on any integer column. Courtesy Derek Harland. .. change:: :tags: examples, bug Fixed a regression in the examples/dogpile_caching example which was due to the change in :ticket:`2614`. .. change:: :tags: orm, bug :tickets: 2640 :meth:`.Query.merge_result` can now load rows from an outer join where an entity may be ``None`` without throwing an error. Also in 0.7.10. .. change:: :tags: sql, bug :tickets: 2648 Tweaked the "REQUIRED" symbol used by the compiler to identify INSERT/UPDATE bound parameters that need to be passed, so that it's more easily identifiable when writing custom bind-handling code. .. change:: :tags: postgresql, bug Fixed bug in :class:`~sqlalchemy.dialects.postgresql.array()` construct whereby using it inside of an :func:`.expression.insert` construct would produce an error regarding a parameter issue in the ``self_group()`` method. .. change:: :tags: orm, feature Extended the :doc:`/core/inspection` system so that all Python descriptors associated with the ORM or its extensions can be retrieved. This fulfills the common request of being able to inspect all :class:`.QueryableAttribute` descriptors in addition to extension types such as :class:`.hybrid_property` and :class:`.AssociationProxy`. See :attr:`.Mapper.all_orm_descriptors`. .. change:: :tags: mysql, feature GAE dialect now accepts username/password arguments in the URL, courtesy Owen Nelson. .. change:: :tags: mysql, bug GAE dialect won't fail on None match if the error code can't be extracted from the exception throw; courtesy Owen Nelson. .. change:: :tags: orm, bug :tickets: 2637 Fixes to the "dynamic" loader on :func:`.relationship`, includes that backrefs will work properly even when autoflush is disabled, history events are more accurate in scenarios where multiple add/remove of the same object occurs. .. changelog:: :version: 0.8.0b2 :released: December 14, 2012 .. change:: :tags: orm, bug :tickets: 2635 The :meth:`.Query.select_from` method can now be used with a :func:`.aliased` construct without it interfering with the entities being selected. Basically, a statement like this:: ua = aliased(User) session.query(User.name).select_from(ua).join(User, User.name > ua.name) Will maintain the columns clause of the SELECT as coming from the unaliased "user", as specified; the select_from only takes place in the FROM clause:: SELECT users.name AS users_name FROM users AS users_1 JOIN users ON users.name < users_1.name Note that this behavior is in contrast to the original, older use case for :meth:`.Query.select_from`, which is that of restating the mapped entity in terms of a different selectable:: session.query(User.name).\ select_from(user_table.select().where(user_table.c.id > 5)) Which produces:: SELECT anon_1.name AS anon_1_name FROM (SELECT users.id AS id, users.name AS name FROM users WHERE users.id > :id_1) AS anon_1 It was the "aliasing" behavior of the latter use case that was getting in the way of the former use case. The method now specifically considers a SQL expression like :func:`.expression.select` or :func:`.expression.alias` separately from a mapped entity like a :func:`.aliased` construct. .. change:: :tags: sql, bug :tickets: 2633 Fixed a regression caused by :ticket:`2410` whereby a :class:`.CheckConstraint` would apply itself back to the original table during a :meth:`.Table.tometadata` operation, as it would parse the SQL expression for a parent table. The operation now copies the given expression to correspond to the new table. .. change:: :tags: oracle, bug :tickets: 2619 Fixed table reflection for Oracle when accessing a synonym that refers to a DBLINK remote database; while the syntax has been present in the Oracle dialect for some time, up until now it has never been tested. The syntax has been tested against a sample database linking to itself, however there's still some uncertainty as to what should be used for the "owner" when querying the remote database for table information. Currently, the value of "username" from user_db_links is used to match the "owner". .. change:: :tags: orm, feature :tickets: 2601 Added :meth:`.KeyedTuple._asdict` and :attr:`.KeyedTuple._fields` to the :class:`.KeyedTuple` class to provide some degree of compatibility with the Python standard library ``collections.namedtuple()``. .. change:: :tags: sql, bug :tickets: 2610 Fixed bug whereby using a label_length on dialect that was smaller than the size of actual column identifiers would fail to render the columns correctly in a SELECT statement. .. change:: :tags: sql, feature :tickets: 2623 The :class:`.Insert` construct now supports multi-valued inserts, that is, an INSERT that renders like "INSERT INTO table VALUES (...), (...), ...". Supported by Postgresql, SQLite, and MySQL. Big thanks to Idan Kamara for doing the legwork on this one. .. change:: :tags: oracle, bug :tickets: 2620 The Oracle LONG type, while an unbounded text type, does not appear to use the cx_Oracle.LOB type when result rows are returned, so the dialect has been repaired to exclude LONG from having cx_Oracle.LOB filtering applied. Also in 0.7.10. .. change:: :tags: oracle, bug :tickets: 2611 Repaired the usage of ``.prepare()`` in conjunction with cx_Oracle so that a return value of ``False`` will result in no call to ``connection.commit()``, hence avoiding "no transaction" errors. Two-phase transactions have now been shown to work in a rudimental fashion with SQLAlchemy and cx_oracle, however are subject to caveats observed with the driver; check the documentation for details. Also in 0.7.10. .. change:: :tags: sql, bug :tickets: 2618 The :class:`~sqlalchemy.types.DECIMAL` type now honors the "precision" and "scale" arguments when rendering DDL. .. change:: :tags: orm, bug :tickets: 2624 The :class:`.MutableComposite` type did not allow for the :meth:`.MutableBase.coerce` method to be used, even though the code seemed to indicate this intent, so this now works and a brief example is added. As a side-effect, the mechanics of this event handler have been changed so that new :class:`.MutableComposite` types no longer add per-type global event handlers. Also in 0.7.10. .. change:: :tags: sql, bug :tickets: 2621 Made an adjustment to the "boolean", (i.e. ``__nonzero__``) evaluation of binary expressions, i.e. ``x1 == x2``, such that the "auto-grouping" applied by :class:`.BinaryExpression` in some cases won't get in the way of this comparison. Previously, an expression like:: expr1 = mycolumn > 2 bool(expr1 == expr1) Would evaulate as ``False``, even though this is an identity comparison, because ``mycolumn > 2`` would be "grouped" before being placed into the :class:`.BinaryExpression`, thus changing its identity. :class:`.BinaryExpression` now keeps track of the "original" objects passed in. Additionally the ``__nonzero__`` method now only returns if the operator is ``==`` or ``!=`` - all others raise ``TypeError``. .. change:: :tags: firebird, bug :tickets: 2622 Added missing import for "fdb" to the experimental "firebird+fdb" dialect. .. change:: :tags: orm, feature Allow synonyms to be used when defining primary and secondary joins for relationships. .. change:: :tags: orm, bug :tickets: 2614 A second overhaul of aliasing/internal pathing mechanics now allows two subclasses to have different relationships of the same name, supported with subquery or joined eager loading on both simultaneously when a full polymorphic load is used. .. change:: :tags: orm, bug :tickets: 2617 Fixed bug whereby a multi-hop subqueryload within a particular with_polymorphic load would produce a KeyError. Takes advantage of the same internal pathing overhaul as :ticket:`2614`. .. change:: :tags: sql, bug Fixed a gotcha where inadvertently calling list() on a :class:`.ColumnElement` would go into an endless loop, if :meth:`.ColumnOperators.__getitem__` were implemented. A new NotImplementedError is emitted via ``__iter__()``. .. change:: :tags: orm, extensions, feature The :mod:`sqlalchemy.ext.mutable` extension now includes the example :class:`.MutableDict` class as part of the extension. .. change:: :tags: postgresql, feature :tickets: 2606 :class:`.HSTORE` is now available in the Postgresql dialect. Will also use psycopg2's extensions if available. Courtesy Audrius Kažukauskas. .. change:: :tags: sybase, feature :tickets: 1753 Reflection support has been added to the Sybase dialect. Big thanks to Ben Trofatter for all the work developing and testing this. .. change:: :tags: engine, feature The :meth:`.Connection.connect` and :meth:`.Connection.contextual_connect` methods now return a "branched" version so that the :meth:`.Connection.close` method can be called on the returned connection without affecting the original. Allows symmetry when using :class:`.Engine` and :class:`.Connection` objects as context managers:: with conn.connect() as c: # leaves the Connection open c.execute("...") with engine.connect() as c: # closes the Connection c.execute("...") .. change:: :tags: engine The "reflect=True" argument to :class:`~sqlalchemy.schema.MetaData` is deprecated. Please use the :meth:`.MetaData.reflect` method. .. change:: :tags: sql, bug :tickets: 2603 Fixed bug in type_coerce() whereby typing information could be lost if the statement were used as a subquery inside of another statement, as well as other similar situations. Among other things, would cause typing information to be lost when the Oracle/mssql dialects would apply limit/offset wrappings. .. change:: :tags: orm, bug :tickets: 2602 Fixed regression where query.update() would produce an error if an object matched by the "fetch" synchronization strategy wasn't locally present. Courtesy Scott Torborg. .. change:: :tags: sql, bug :tickets: 2597 Fixed bug whereby the ".key" of a Column wasn't being used when producing a "proxy" of the column against a selectable. This probably didn't occur in 0.7 since 0.7 doesn't respect the ".key" in a wider range of scenarios. .. change:: :tags: mssql, feature :tickets: 2600 Support for reflection of the "name" of primary key constraints added, courtesy Dave Moore. .. change:: :tags: informix Some cruft regarding informix transaction handling has been removed, including a feature that would skip calling commit()/rollback() as well as some hardcoded isolation level assumptions on begin().. The status of this dialect is not well understood as we don't have any users working with it, nor any access to an Informix database. If someone with access to Informix wants to help test this dialect, please let us know. .. change:: :tags: pool, feature The :class:`.Pool` will now log all connection.close() operations equally, including closes which occur for invalidated connections, detached connections, and connections beyond the pool capacity. .. change:: :tags: pool, feature :tickets: 2611 The :class:`.Pool` now consults the :class:`.Dialect` for functionality regarding how the connection should be "auto rolled back", as well as closed. This grants more control of transaction scope to the dialect, so that we will be better able to implement transactional workarounds like those potentially needed for pysqlite and cx_oracle. .. change:: :tags: pool, feature Added new :meth:`.PoolEvents.reset` hook to capture the event before a connection is auto-rolled back, upon return to the pool. Together with :meth:`.ConnectionEvents.rollback` this allows all rollback events to be intercepted. .. changelog:: :version: 0.8.0b1 :released: October 30, 2012 .. change:: :tags: sql, bug :tickets: 2593 Fixed bug where keyword arguments passed to :meth:`.Compiler.process` wouldn't get propagated to the column expressions present in the columns clause of a SELECT statement. In particular this would come up when used by custom compilation schemes that relied upon special flags. .. change:: :tags: sql, feature Added a new method :meth:`.Engine.execution_options` to :class:`.Engine`. This method works similarly to :meth:`.Connection.execution_options` in that it creates a copy of the parent object which will refer to the new set of options. The method can be used to build sharding schemes where each engine shares the same underlying pool of connections. The method has been tested against the horizontal shard recipe in the ORM as well. .. seealso:: :meth:`.Engine.execution_options` .. change:: :tags: sql, orm, bug :tickets: 2595 The auto-correlation feature of :func:`.select`, and by proxy that of :class:`.Query`, will not take effect for a SELECT statement that is being rendered directly in the FROM list of the enclosing SELECT. Correlation in SQL only applies to column expressions such as those in the WHERE, ORDER BY, columns clause. .. change:: :tags: sqlite :changeset: c3addcc9ffad Added :class:`.types.NCHAR`, :class:`.types.NVARCHAR` to the SQLite dialect's list of recognized type names for reflection. SQLite returns the name given to a type as the name returned. .. change:: :tags: examples :tickets: 2589 The Beaker caching example has been converted to use `dogpile.cache `_. This is a new caching library written by the same creator of Beaker's caching internals, and represents a vastly improved, simplified, and modernized system of caching. .. seealso:: :ref:`examples_caching` .. change:: :tags: general :tickets: SQLAlchemy 0.8 now targets Python 2.5 and above. Python 2.4 is no longer supported. .. change:: :tags: removed, general :tickets: 2433 The "sqlalchemy.exceptions" synonym for "sqlalchemy.exc" is removed fully. .. change:: :tags: removed, orm :tickets: 2442 The legacy "mutable" system of the ORM, including the MutableType class as well as the mutable=True flag on PickleType and postgresql.ARRAY has been removed. In-place mutations are detected by the ORM using the sqlalchemy.ext.mutable extension, introduced in 0.7. The removal of MutableType and associated constructs removes a great deal of complexity from SQLAlchemy's internals. The approach performed poorly as it would incur a scan of the full contents of the Session when in use. .. change:: :tags: orm, moved :tickets: The InstrumentationManager interface and the entire related system of alternate class implementation is now moved out to sqlalchemy.ext.instrumentation. This is a seldom used system that adds significant complexity and overhead to the mechanics of class instrumentation. The new architecture allows it to remain unused until InstrumentationManager is actually imported, at which point it is bootstrapped into the core. .. change:: :tags: orm, feature :tickets: 1401 Major rewrite of relationship() internals now allow join conditions which include columns pointing to themselves within composite foreign keys. A new API for very specialized primaryjoin conditions is added, allowing conditions based on SQL functions, CAST, etc. to be handled by placing the annotation functions remote() and foreign() inline within the expression when necessary. Previous recipes using the semi-private _local_remote_pairs approach can be upgraded to this new approach. .. seealso:: :ref:`feature_relationship_08` .. change:: :tags: orm, bug :tickets: 2527 ORM will perform extra effort to determine that an FK dependency between two tables is not significant during flush if the tables are related via joined inheritance and the FK dependency is not part of the inherit_condition, saves the user a use_alter directive. .. change:: :tags: orm, feature :tickets: 2333 New standalone function with_polymorphic() provides the functionality of query.with_polymorphic() in a standalone form. It can be applied to any entity within a query, including as the target of a join in place of the "of_type()" modifier. .. change:: :tags: orm, feature :tickets: 1106, 2438 The of_type() construct on attributes now accepts aliased() class constructs as well as with_polymorphic constructs, and works with query.join(), any(), has(), and also eager loaders subqueryload(), joinedload(), contains_eager() .. change:: :tags: orm, feature :tickets: 2585 Improvements to event listening for mapped classes allows that unmapped classes can be specified for instance- and mapper-events. The established events will be automatically set up on subclasses of that class when the propagate=True flag is passed, and the events will be set up for that class itself if and when it is ultimately mapped. .. change:: :tags: orm, bug :tickets: 2590 The instrumentation events class_instrument(), class_uninstrument(), and attribute_instrument() will now fire off only for descendant classes of the class assigned to listen(). Previously, an event listener would be assigned to listen for all classes in all cases regardless of the "target" argument passed. .. change:: :tags: orm, bug :tickets: 1900 with_polymorphic() produces JOINs in the correct order and with correct inheriting tables in the case of sending multi-level subclasses in an arbitrary order or with intermediary classes missing. .. change:: :tags: orm, feature :tickets: 2485 The "deferred declarative reflection" system has been moved into the declarative extension itself, using the new DeferredReflection class. This class is now tested with both single and joined table inheritance use cases. .. change:: :tags: orm, feature :tickets: 2208 Added new core function "inspect()", which serves as a generic gateway to introspection into mappers, objects, others. The Mapper and InstanceState objects have been enhanced with a public API that allows inspection of mapped attributes, including filters for column-bound or relationship-bound properties, inspection of current object state, history of attributes, etc. .. change:: :tags: orm, feature :tickets: 2452 Calling rollback() within a session.begin_nested() will now only expire those objects that had net changes within the scope of that transaction, that is objects which were dirty or were modified on a flush. This allows the typical use case for begin_nested(), that of altering a small subset of objects, to leave in place the data from the larger enclosing set of objects that weren't modified in that sub-transaction. .. change:: :tags: orm, feature :tickets: 2372 Added utility feature Session.enable_relationship_loading(), supersedes relationship.load_on_pending. Both features should be avoided, however. .. change:: :tags: orm, feature :tickets: Added support for .info dictionary argument to column_property(), relationship(), composite(). All MapperProperty classes have an auto-creating .info dict available overall. .. change:: :tags: orm, feature :tickets: 2229 Adding/removing None from a mapped collection now generates attribute events. Previously, a None append would be ignored in some cases. Related to. .. change:: :tags: orm, feature :tickets: 2229 The presence of None in a mapped collection now raises an error during flush. Previously, None values in collections would be silently ignored. .. change:: :tags: orm, feature :tickets: The Query.update() method is now more lenient as to the table being updated. Plain Table objects are better supported now, and additional a joined-inheritance subclass may be used with update(); the subclass table will be the target of the update, and if the parent table is referenced in the WHERE clause, the compiler will call upon UPDATE..FROM syntax as allowed by the dialect to satisfy the WHERE clause. MySQL's multi-table update feature is also supported if columns are specified by object in the "values" dicitionary. PG's DELETE..USING is also not available in Core yet. .. change:: :tags: orm, feature :tickets: New session events after_transaction_create and after_transaction_end allows tracking of new SessionTransaction objects. If the object is inspected, can be used to determine when a session first becomes active and when it deactivates. .. change:: :tags: orm, feature :tickets: 2592 The Query can now load entity/scalar-mixed "tuple" rows that contain types which aren't hashable, by setting the flag "hashable=False" on the corresponding TypeEngine object in use. Custom types that return unhashable types (typically lists) can set this flag to False. .. change:: :tags: orm, bug :tickets: 2481 Improvements to joined/subquery eager loading dealing with chains of subclass entities sharing a common base, with no specific "join depth" provided. Will chain out to each subclass mapper individually before detecting a "cycle", rather than considering the base class to be the source of the "cycle". .. change:: :tags: orm, bug :tickets: 2320 The "passive" flag on Session.is_modified() no longer has any effect. is_modified() in all cases looks only at local in-memory modified flags and will not emit any SQL or invoke loader callables/initializers. .. change:: :tags: orm, bug :tickets: 2405 The warning emitted when using delete-orphan cascade with one-to-many or many-to-many without single-parent=True is now an error. The ORM would fail to function subsequent to this warning in any case. .. change:: :tags: orm, bug :tickets: 2350 Lazy loads emitted within flush events such as before_flush(), before_update(), etc. will now function as they would within non-event code, regarding consideration of the PK/FK values used in the lazy-emitted query. Previously, special flags would be established that would cause lazy loads to load related items based on the "previous" value of the parent PK/FK values specifically when called upon within a flush; the signal to load in this way is now localized to where the unit of work actually needs to load that way. Note that the UOW does sometimes load these collections before the before_update() event is called, so the usage of "passive_updates" or not can affect whether or not a collection will represent the "old" or "new" data, when accessed within a flush event, based on when the lazy load was emitted. The change is backwards incompatible in the exceedingly small chance that user event code depended on the old behavior. .. change:: :tags: orm, feature :tickets: 2179 Query now "auto correlates" by default in the same way as select() does. Previously, a Query used as a subquery in another would require the correlate() method be called explicitly in order to correlate a table on the inside to the outside. As always, correlate(None) disables correlation. .. change:: :tags: orm, feature :tickets: 2464 The after_attach event is now emitted after the object is established in Session.new or Session.identity_map upon Session.add(), Session.merge(), etc., so that the object is represented in these collections when the event is called. Added before_attach event to accommodate use cases that need autoflush w pre-attached object. .. change:: :tags: orm, feature :tickets: The Session will produce warnings when unsupported methods are used inside the "execute" portion of the flush. These are the familiar methods add(), delete(), etc. as well as collection and related-object manipulations, as called within mapper-level flush events like after_insert(), after_update(), etc. It's been prominently documented for a long time that SQLAlchemy cannot guarantee results when the Session is manipulated within the execution of the flush plan, however users are still doing it, so now there's a warning. Maybe someday the Session will be enhanced to support these operations inside of the flush, but for now, results can't be guaranteed. .. change:: :tags: orm, bug :tickets: 2582, 2566 Continuing regarding extra state post-flush due to event listeners; any states that are marked as "dirty" from an attribute perspective, usually via column-attribute set events within after_insert(), after_update(), etc., will get the "history" flag reset in all cases, instead of only those instances that were part of the flush. This has the effect that this "dirty" state doesn't carry over after the flush and won't result in UPDATE statements. A warning is emitted to this effect; the set_committed_state() method can be used to assign attributes on objects without producing history events. .. change:: :tags: orm, feature :tickets: 2245 ORM entities can be passed to the core select() construct as well as to the select_from(), correlate(), and correlate_except() methods of select(), where they will be unwrapped into selectables. .. change:: :tags: orm, feature :tickets: 2245 Some support for auto-rendering of a relationship join condition based on the mapped attribute, with usage of core SQL constructs. E.g. select([SomeClass]).where(SomeClass.somerelationship) would render SELECT from "someclass" and use the primaryjoin of "somerelationship" as the WHERE clause. This changes the previous meaning of "SomeClass.somerelationship" when used in a core SQL context; previously, it would "resolve" to the parent selectable, which wasn't generally useful. Also works with query.filter(). Related to. .. change:: :tags: orm, feature :tickets: 2526 The registry of classes in declarative_base() is now a WeakValueDictionary. So subclasses of "Base" that are dereferenced will be garbage collected, *if they are not referred to by any other mappers/superclass mappers*. See the next note for this ticket. .. change:: :tags: orm, feature :tickets: 2472 Conflicts between columns on single-inheritance declarative subclasses, with or without using a mixin, can be resolved using a new @declared_attr usage described in the documentation. .. change:: :tags: orm, feature :tickets: 2472 declared_attr can now be used on non-mixin classes, even though this is generally only useful for single-inheritance subclass column conflict resolution. .. change:: :tags: orm, feature :tickets: 2517 declared_attr can now be used with attributes that are not Column or MapperProperty; including any user-defined value as well as association proxy objects. .. change:: :tags: orm, bug :tickets: 2565 Fixed a disconnect that slowly evolved between a @declared_attr Column and a directly-defined Column on a mixin. In both cases, the Column will be applied to the declared class' table, but not to that of a joined inheritance subclass. Previously, the directly-defined Column would be placed on both the base and the sub table, which isn't typically what's desired. .. change:: :tags: orm, feature :tickets: 2526 *Very limited* support for inheriting mappers to be GC'ed when the class itself is deferenced. The mapper must not have its own table (i.e. single table inh only) without polymorphic attributes in place. This allows for the use case of creating a temporary subclass of a declarative mapped class, with no table or mapping directives of its own, to be garbage collected when dereferenced by a unit test. .. change:: :tags: orm, feature :tickets: 2338 Declarative now maintains a registry of classes by string name as well as by full module-qualified name. Multiple classes with the same name can now be looked up based on a module-qualified string within relationship(). Simple class name lookups where more than one class shares the same name now raises an informative error message. .. change:: :tags: orm, feature :tickets: 2535 Can now provide class-bound attributes that override columns which are of any non-ORM type, not just descriptors. .. change:: :tags: orm, feature :tickets: 1729 Added with_labels and reduce_columns keyword arguments to Query.subquery(), to provide two alternate strategies for producing queries with uniquely- named columns. . .. change:: :tags: orm, feature :tickets: 2476 A warning is emitted when a reference to an instrumented collection is no longer associated with the parent class due to expiration/attribute refresh/collection replacement, but an append or remove operation is received on the now-detached collection. .. change:: :tags: orm, bug :tickets: 2549 Declarative can now propagate a column declared on a single-table inheritance subclass up to the parent class' table, when the parent class is itself mapped to a join() or select() statement, directly or via joined inheritance, and not just a Table. .. change:: :tags: orm, bug :tickets: An error is emitted when uselist=False is combined with a "dynamic" loader. This is a warning in 0.7.9. .. change:: :tags: removed, orm :tickets: Deprecated identifiers removed: * allow_null_pks mapper() argument (use allow_partial_pks) * _get_col_to_prop() mapper method (use get_property_by_column()) * dont_load argument to Session.merge() (use load=True) * sqlalchemy.orm.shard module (use sqlalchemy.ext.horizontal_shard) .. change:: :tags: engine, feature :tickets: 2511 Connection event listeners can now be associated with individual Connection objects, not just Engine objects. .. change:: :tags: engine, feature :tickets: 2459 The before_cursor_execute event fires off for so-called "_cursor_execute" events, which are usually special-case executions of primary-key bound sequences and default-generation SQL phrases that invoke separately when RETURNING is not used with INSERT. .. change:: :tags: engine, feature :tickets: The libraries used by the test suite have been moved around a bit so that they are part of the SQLAlchemy install again. In addition, a new suite of tests is present in the new sqlalchemy.testing.suite package. This is an under-development system that hopes to provide a universal testing suite for external dialects. Dialects which are maintained outside of SQLAlchemy can use the new test fixture as the framework for their own tests, and will get for free a "compliance" suite of dialect-focused tests, including an improved "requirements" system where specific capabilities and features can be enabled or disabled for testing. .. change:: :tags: engine, bug :tickets: The Inspector.get_table_names() order_by="foreign_key" feature now sorts tables by dependee first, to be consistent with util.sort_tables and metadata.sorted_tables. .. change:: :tags: engine, bug :tickets: 2522 Fixed bug whereby if a database restart affected multiple connections, each connection would individually invoke a new disposal of the pool, even though only one disposal is needed. .. change:: :tags: engine, feature :tickets: 2462 Added a new system for registration of new dialects in-process without using an entrypoint. See the docs for "Registering New Dialects". .. change:: :tags: engine, feature :tickets: 2556 The "required" flag is set to True by default, if not passed explicitly, on bindparam() if the "value" or "callable" parameters are not passed. This will cause statement execution to check for the parameter being present in the final collection of bound parameters, rather than implicitly assigning None. .. change:: :tags: engine, feature :tickets: Various API tweaks to the "dialect" API to better support highly specialized systems such as the Akiban database, including more hooks to allow an execution context to access type processors. .. change:: :tags: engine, bug :tickets: 2397 The names of the columns on the .c. attribute of a select().apply_labels() is now based on _ instead of _, for those columns that have a distinctly named .key. .. change:: :tags: engine, feature :tickets: 2422 Inspector.get_primary_keys() is deprecated; use Inspector.get_pk_constraint(). Courtesy Diana Clarke. .. change:: :tags: engine, bug :tickets: The autoload_replace flag on Table, when False, will cause any reflected foreign key constraints which refer to already-declared columns to be skipped, assuming that the in-Python declared column will take over the task of specifying in-Python ForeignKey or ForeignKeyConstraint declarations. .. change:: :tags: engine, bug :tickets: 2498 The ResultProxy methods inserted_primary_key, last_updated_params(), last_inserted_params(), postfetch_cols(), prefetch_cols() all assert that the given statement is a compiled construct, and is an insert() or update() statement as is appropriate, else raise InvalidRequestError. .. change:: :tags: engine, feature :tickets: New C extension module "utils" has been added for additional function speedups as we have time to implement. .. change:: :tags: engine :tickets: ResultProxy.last_inserted_ids is removed, replaced by inserted_primary_key. .. change:: :tags: feature, sql :tickets: 2547 Major rework of operator system in Core, to allow redefinition of existing operators as well as addition of new operators at the type level. New types can be created from existing ones which add or redefine operations that are exported out to column expressions, in a similar manner to how the ORM has allowed comparator_factory. The new architecture moves this capability into the Core so that it is consistently usable in all cases, propagating cleanly using existing type propagation behavior. .. change:: :tags: feature, sql :tickets: 1534, 2547 To complement, types can now provide "bind expressions" and "column expressions" which allow compile-time injection of SQL expressions into statements on a per-column or per-bind level. This is to suit the use case of a type which needs to augment bind- and result- behavior at the SQL level, as opposed to in the Python level. Allows for schemes like transparent encryption/ decryption, usage of Postgis functions, etc. .. change:: :tags: feature, sql :tickets: The Core oeprator system now includes the `getitem` operator, i.e. the bracket operator in Python. This is used at first to provide index and slice behavior to the Postgresql ARRAY type, and also provides a hook for end-user definition of custom __getitem__ schemes which can be applied at the type level as well as within ORM-level custom operator schemes. `lshift` (<<) and `rshift` (>>) are also supported as optional operators. Note that this change has the effect that descriptor-based __getitem__ schemes used by the ORM in conjunction with synonym() or other "descriptor-wrapped" schemes will need to start using a custom comparator in order to maintain this behavior. .. change:: :tags: feature, sql :tickets: 2537 Revised the rules used to determine the operator precedence for the user-defined operator, i.e. that granted using the ``op()`` method. Previously, the smallest precedence was applied in all cases, now the default precedence is zero, lower than all operators except "comma" (such as, used in the argument list of a ``func`` call) and "AS", and is also customizable via the "precedence" argument on the ``op()`` method. .. change:: :tags: feature, sql :tickets: 2276 Added "collation" parameter to all String types. When present, renders as COLLATE . This to support the COLLATE keyword now supported by several databases including MySQL, SQLite, and Postgresql. .. change:: :tags: change, sql :tickets: The Text() type renders the length given to it, if a length was specified. .. change:: :tags: feature, sql :tickets: Custom unary operators can now be used by combining operators.custom_op() with UnaryExpression(). .. change:: :tags: bug, sql :tickets: 2564 A tweak to column precedence which moves the "concat" and "match" operators to be the same as that of "is", "like", and others; this helps with parenthesization rendering when used in conjunction with "IS". .. change:: :tags: feature, sql :tickets: Enhanced GenericFunction and func.* to allow for user-defined GenericFunction subclasses to be available via the func.* namespace automatically by classname, optionally using a package name, as well as with the ability to have the rendered name different from the identified name in func.*. .. change:: :tags: feature, sql :tickets: 2562 The cast() and extract() constructs will now be produced via the func.* accessor as well, as users naturally try to access these names from func.* they might as well do what's expected, even though the returned object is not a FunctionElement. .. change:: :tags: changed, sql :tickets: Most classes in expression.sql are no longer preceded with an underscore, i.e. Label, SelectBase, Generative, CompareMixin. _BindParamClause is also renamed to BindParameter. The old underscore names for these classes will remain available as synonyms for the foreseeable future. .. change:: :tags: feature, sql :tickets: 2208 The Inspector object can now be acquired using the new inspect() service, part of .. change:: :tags: feature, sql :tickets: 2418 The column_reflect event now accepts the Inspector object as the first argument, preceding "table". Code which uses the 0.7 version of this very new event will need modification to add the "inspector" object as the first argument. .. change:: :tags: feature, sql :tickets: 2423 The behavior of column targeting in result sets is now case sensitive by default. SQLAlchemy for many years would run a case-insensitive conversion on these values, probably to alleviate early case sensitivity issues with dialects like Oracle and Firebird. These issues have been more cleanly solved in more modern versions so the performance hit of calling lower() on identifiers is removed. The case insensitive comparisons can be re-enabled by setting "case_insensitive=False" on create_engine(). .. change:: :tags: bug, sql :tickets: 2591 Applying a column expression to a select statement using a label with or without other modifying constructs will no longer "target" that expression to the underlying Column; this affects ORM operations that rely upon Column targeting in order to retrieve results. That is, a query like query(User.id, User.id.label('foo')) will now track the value of each "User.id" expression separately instead of munging them together. It is not expected that any users will be impacted by this; however, a usage that uses select() in conjunction with query.from_statement() and attempts to load fully composed ORM entities may not function as expected if the select() named Column objects with arbitrary .label() names, as these will no longer target to the Column objects mapped by that entity. .. change:: :tags: feature, sql :tickets: 2415 The "unconsumed column names" warning emitted when keys are present in insert.values() or update.values() that aren't in the target table is now an exception. .. change:: :tags: feature, sql :tickets: 2502 Added "MATCH" clause to ForeignKey, ForeignKeyConstraint, courtesy Ryan Kelly. .. change:: :tags: feature, sql :tickets: 2507 Added support for DELETE and UPDATE from an alias of a table, which would assumedly be related to itself elsewhere in the query, courtesy Ryan Kelly. .. change:: :tags: feature, sql :tickets: select() features a correlate_except() method, auto correlates all selectables except those passed. .. change:: :tags: feature, sql :tickets: 2431 The prefix_with() method is now available on each of select(), insert(), update(), delete(), all with the same API, accepting multiple prefix calls, as well as a "dialect name" so that the prefix can be limited to one kind of dialect. .. change:: :tags: feature, sql :tickets: 1729 Added reduce_columns() method to select() construct, replaces columns inline using the util.reduce_columns utility function to remove equivalent columns. reduce_columns() also adds "with_only_synonyms" to limit the reduction just to those columns which have the same name. The deprecated fold_equivalents() feature is removed. .. change:: :tags: feature, sql :tickets: 2470 Reworked the startswith(), endswith(), contains() operators to do a better job with negation (NOT LIKE), and also to assemble them at compilation time so that their rendered SQL can be altered, such as in the case for Firebird STARTING WITH .. change:: :tags: feature, sql :tickets: 2463 Added a hook to the system of rendering CREATE TABLE that provides access to the render for each Column individually, by constructing a @compiles function against the new schema.CreateColumn construct. .. change:: :tags: feature, sql :tickets: "scalar" selects now have a WHERE method to help with generative building. Also slight adjustment regarding how SS "correlates" columns; the new methodology no longer applies meaning to the underlying Table column being selected. This improves some fairly esoteric situations, and the logic that was there didn't seem to have any purpose. .. change:: :tags: bug, sql :tickets: 2520 Fixes to the interpretation of the Column "default" parameter as a callable to not pass ExecutionContext into a keyword argument parameter. .. change:: :tags: bug, sql :tickets: 2410 All of UniqueConstraint, ForeignKeyConstraint, CheckConstraint, and PrimaryKeyConstraint will attach themselves to their parent table automatically when they refer to a Table-bound Column object directly (i.e. not just string column name), and refer to one and only one Table. Prior to 0.8 this behavior occurred for UniqueConstraint and PrimaryKeyConstraint, but not ForeignKeyConstraint or CheckConstraint. .. change:: :tags: bug, sql :tickets: 2594 TypeDecorator now includes a generic repr() that works in terms of the "impl" type by default. This is a behavioral change for those TypeDecorator classes that specify a custom __init__ method; those types will need to re-define __repr__() if they need __repr__() to provide a faithful constructor representation. .. change:: :tags: bug, sql :tickets: 2168 column.label(None) now produces an anonymous label, instead of returning the column object itself, consistent with the behavior of label(column, None). .. change:: :tags: feature, sql :tickets: 2455 An explicit error is raised when a ForeignKeyConstraint() that was constructed to refer to multiple remote tables is first used. .. change:: :tags: access, feature :tickets: the MS Access dialect has been moved to its own project on Bitbucket, taking advantage of the new SQLAlchemy dialect compliance suite. The dialect is still in very rough shape and probably not ready for general use yet, however it does have *extremely* rudimental functionality now. https://bitbucket.org/zzzeek/sqlalchemy-access .. change:: :tags: maxdb, moved :tickets: The MaxDB dialect, which hasn't been functional for several years, is moved out to a pending bitbucket project, https://bitbucket.org/zzzeek/sqlalchemy-maxdb. .. change:: :tags: sqlite, feature :tickets: 2363 the SQLite date and time types have been overhauled to support a more open ended format for input and output, using name based format strings and regexps. A new argument "microseconds" also provides the option to omit the "microseconds" portion of timestamps. Thanks to Nathan Wright for the work and tests on this. .. change:: :tags: mssql, feature :tickets: SQL Server dialect can be given database-qualified schema names, i.e. "schema='mydatabase.dbo'"; reflection operations will detect this, split the schema among the "." to get the owner separately, and emit a "USE mydatabase" statement before reflecting targets within the "dbo" owner; the existing database returned from DB_NAME() is then restored. .. change:: :tags: mssql, bug :tickets: 2277 removed legacy behavior whereby a column comparison to a scalar SELECT via == would coerce to an IN with the SQL server dialect. This is implicit behavior which fails in other scenarios so is removed. Code which relies on this needs to be modified to use column.in_(select) explicitly. .. change:: :tags: mssql, feature :tickets: updated support for the mxodbc driver; mxodbc 3.2.1 is recommended for full compatibility. .. change:: :tags: postgresql, feature :tickets: 2441 postgresql.ARRAY features an optional "dimension" argument, will assign a specific number of dimensions to the array which will render in DDL as ARRAY[][]..., also improves performance of bind/result processing. .. change:: :tags: postgresql, feature :tickets: postgresql.ARRAY now supports indexing and slicing. The Python [] operator is available on all SQL expressions that are of type ARRAY; integer or simple slices can be passed. The slices can also be used on the assignment side in the SET clause of an UPDATE statement by passing them into Update.values(); see the docs for examples. .. change:: :tags: postgresql, feature :tickets: Added new "array literal" construct postgresql.array(). Basically a "tuple" that renders as ARRAY[1,2,3]. .. change:: :tags: postgresql, feature :tickets: 2506 Added support for the Postgresql ONLY keyword, which can appear corresponding to a table in a SELECT, UPDATE, or DELETE statement. The phrase is established using with_hint(). Courtesy Ryan Kelly .. change:: :tags: postgresql, feature :tickets: The "ischema_names" dictionary of the Postgresql dialect is "unofficially" customizable. Meaning, new types such as PostGIS types can be added into this dictionary, and the PG type reflection code should be able to handle simple types with variable numbers of arguments. The functionality here is "unofficial" for three reasons: 1. this is not an "official" API. Ideally an "official" API would allow custom type-handling callables at the dialect or global level in a generic way. 2. This is only implemented for the PG dialect, in particular because PG has broad support for custom types vs. other database backends. A real API would be implemented at the default dialect level. 3. The reflection code here is only tested against simple types and probably has issues with more compositional types. patch courtesy Éric Lemoine. .. change:: :tags: firebird, feature :tickets: 2470 The "startswith()" operator renders as "STARTING WITH", "~startswith()" renders as "NOT STARTING WITH", using FB's more efficient operator. .. change:: :tags: firebird, bug :tickets: 2505 CompileError is raised when VARCHAR with no length is attempted to be emitted, same way as MySQL. .. change:: :tags: firebird, bug :tickets: Firebird now uses strict "ansi bind rules" so that bound parameters don't render in the columns clause of a statement - they render literally instead. .. change:: :tags: firebird, bug :tickets: Support for passing datetime as date when using the DateTime type with Firebird; other dialects support this. .. change:: :tags: firebird, feature :tickets: 2504 An experimental dialect for the fdb driver is added, but is untested as I cannot get the fdb package to build. .. change:: :tags: bug, mysql :tickets: 2404 Dialect no longer emits expensive server collations query, as well as server casing, on first connect. These functions are still available as semi-private. .. change:: :tags: feature, mysql :tickets: 2534 Added TIME type to mysql dialect, accepts "fst" argument which is the new "fractional seconds" specifier for recent MySQL versions. The datatype will interpret a microseconds portion received from the driver, however note that at this time most/all MySQL DBAPIs do not support returning this value. .. change:: :tags: oracle, bug :tickets: 2437 Quoting information is now passed along from a Column with quote=True when generating a same-named bound parameter to the bindparam() object, as is the case in generated INSERT and UPDATE statements, so that unknown reserved names can be fully supported. .. change:: :tags: oracle, feature :tickets: 2561 The types of columns excluded from the setinputsizes() set can be customized by sending a list of string DBAPI type names to exclude, using the exclude_setinputsizes dialect parameter. This list was previously fixed. The list also now defaults to STRING, UNICODE, removing CLOB, NCLOB from the list. .. change:: :tags: oracle, bug :tickets: The CreateIndex construct in Oracle will now schema-qualify the name of the index to be that of the parent table. Previously this name was omitted which apparently creates the index in the default schema, rather than that of the table. .. change:: :tags: sql, feature :tickets: 2580 Added :meth:`.ColumnOperators.notin_`, :meth:`.ColumnOperators.notlike`, :meth:`.ColumnOperators.notilike` to :class:`.ColumnOperators`. .. change:: :tags: sql, removed The long-deprecated and non-functional ``assert_unicode`` flag on :func:`.create_engine` as well as :class:`.String` is removed. SQLAlchemy-0.8.4/doc/_sources/changelog/index.txt0000644000076500000240000000110712251147171022417 0ustar classicstaff00000000000000.. _changelog_toplevel: Changes and Migration ===================== SQLAlchemy changelogs and migration guides are now integrated within the main documentation. Current Migration Guide ------------------------ .. toctree:: :maxdepth: 1 migration_08 Change logs ----------- .. toctree:: :maxdepth: 2 changelog_08 changelog_07 changelog_06 changelog_05 changelog_04 changelog_03 changelog_02 changelog_01 Older Migration Guides ---------------------- .. toctree:: :maxdepth: 1 migration_07 migration_06 migration_05 migration_04 SQLAlchemy-0.8.4/doc/_sources/changelog/migration_04.txt0000644000076500000240000006240012251147171023607 0ustar classicstaff00000000000000============================= What's new in SQLAlchemy 0.4? ============================= .. admonition:: About this Document This document describes changes between SQLAlchemy version 0.3, last released October 14, 2007, and SQLAlchemy version 0.4, last released October 12, 2008. Document date: March 21, 2008 First Things First ================== If you're using any ORM features, make sure you import from ``sqlalchemy.orm``: :: from sqlalchemy import * from sqlalchemy.orm import * Secondly, anywhere you used to say ``engine=``, ``connectable=``, ``bind_to=``, ``something.engine``, ``metadata.connect()``, use ``bind``: :: myengine = create_engine('sqlite://') meta = MetaData(myengine) meta2 = MetaData() meta2.bind = myengine session = create_session(bind=myengine) statement = select([table], bind=myengine) Got those ? Good! You're now (95%) 0.4 compatible. If you're using 0.3.10, you can make these changes immediately; they'll work there too. Module Imports ============== In 0.3, "``from sqlachemy import *``" would import all of sqlachemy's sub-modules into your namespace. Version 0.4 no longer imports sub-modules into the namespace. This may mean you need to add extra imports into your code. In 0.3, this code worked: :: from sqlalchemy import * class UTCDateTime(types.TypeDecorator): pass In 0.4, one must do: :: from sqlalchemy import * from sqlalchemy import types class UTCDateTime(types.TypeDecorator): pass Object Relational Mapping ========================= Querying -------- New Query API ^^^^^^^^^^^^^ Query is standardized on the generative interface (old interface is still there, just deprecated). While most of the generative interface is available in 0.3, the 0.4 Query has the inner guts to match the generative outside, and has a lot more tricks. All result narrowing is via ``filter()`` and ``filter_by()``, limiting/offset is either through array slices or ``limit()``/``offset()``, joining is via ``join()`` and ``outerjoin()`` (or more manually, through ``select_from()`` as well as manually-formed criteria). To avoid deprecation warnings, you must make some changes to your 03 code User.query.get_by( \**kwargs ) :: User.query.filter_by(**kwargs).first() User.query.select_by( \**kwargs ) :: User.query.filter_by(**kwargs).all() User.query.select() :: User.query.filter(xxx).all() New Property-Based Expression Constructs ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ By far the most palpable difference within the ORM is that you can now construct your query criterion using class-based attributes directly. The ".c." prefix is no longer needed when working with mapped classes: :: session.query(User).filter(and_(User.name == 'fred', User.id > 17)) While simple column-based comparisons are no big deal, the class attributes have some new "higher level" constructs available, including what was previously only available in ``filter_by()``: :: # comparison of scalar relations to an instance filter(Address.user == user) # return all users who contain a particular address filter(User.addresses.contains(address)) # return all users who *dont* contain the address filter(~User.address.contains(address)) # return all users who contain a particular address with # the email_address like '%foo%' filter(User.addresses.any(Address.email_address.like('%foo%'))) # same, email address equals 'foo@bar.com'. can fall back to keyword # args for simple comparisons filter(User.addresses.any(email_address = 'foo@bar.com')) # return all Addresses whose user attribute has the username 'ed' filter(Address.user.has(name='ed')) # return all Addresses whose user attribute has the username 'ed' # and an id > 5 (mixing clauses with kwargs) filter(Address.user.has(User.id > 5, name='ed')) The ``Column`` collection remains available on mapped classes in the ``.c`` attribute. Note that property-based expressions are only available with mapped properties of mapped classes. ``.c`` is still used to access columns in regular tables and selectable objects produced from SQL Expressions. Automatic Join Aliasing ^^^^^^^^^^^^^^^^^^^^^^^ We've had join() and outerjoin() for a while now: :: session.query(Order).join('items')... Now you can alias them: :: session.query(Order).join('items', aliased=True). filter(Item.name='item 1').join('items', aliased=True).filter(Item.name=='item 3') The above will create two joins from orders->items using aliases. the ``filter()`` call subsequent to each will adjust its table criterion to that of the alias. To get at the ``Item`` objects, use ``add_entity()`` and target each join with an ``id``: :: session.query(Order).join('items', id='j1', aliased=True). filter(Item.name == 'item 1').join('items', aliased=True, id='j2'). filter(Item.name == 'item 3').add_entity(Item, id='j1').add_entity(Item, id='j2') Returns tuples in the form: ``(Order, Item, Item)``. Self-referential Queries ^^^^^^^^^^^^^^^^^^^^^^^^ So query.join() can make aliases now. What does that give us ? Self-referential queries ! Joins can be done without any ``Alias`` objects: :: # standard self-referential TreeNode mapper with backref mapper(TreeNode, tree_nodes, properties={ 'children':relation(TreeNode, backref=backref('parent', remote_side=tree_nodes.id)) }) # query for node with child containing "bar" two levels deep session.query(TreeNode).join(["children", "children"], aliased=True).filter_by(name='bar') To add criterion for each table along the way in an aliased join, you can use ``from_joinpoint`` to keep joining against the same line of aliases: :: # search for the treenode along the path "n1/n12/n122" # first find a Node with name="n122" q = sess.query(Node).filter_by(name='n122') # then join to parent with "n12" q = q.join('parent', aliased=True).filter_by(name='n12') # join again to the next parent with 'n1'. use 'from_joinpoint' # so we join from the previous point, instead of joining off the # root table q = q.join('parent', aliased=True, from_joinpoint=True).filter_by(name='n1') node = q.first() ``query.populate_existing()`` ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ The eager version of ``query.load()`` (or ``session.refresh()``). Every instance loaded from the query, including all eagerly loaded items, get refreshed immediately if already present in the session: :: session.query(Blah).populate_existing().all() Relations --------- SQL Clauses Embedded in Updates/Inserts ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ For inline execution of SQL clauses, embedded right in the UPDATE or INSERT, during a ``flush()``: :: myobject.foo = mytable.c.value + 1 user.pwhash = func.md5(password) order.hash = text("select hash from hashing_table") The column-attribute is set up with a deferred loader after the operation, so that it issues the SQL to load the new value when you next access. Self-referential and Cyclical Eager Loading ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ Since our alias-fu has improved, ``relation()`` can join along the same table \*any number of times*; you tell it how deep you want to go. Lets show the self-referential ``TreeNode`` more clearly: :: nodes = Table('nodes', metadata, Column('id', Integer, primary_key=True), Column('parent_id', Integer, ForeignKey('nodes.id')), Column('name', String(30))) class TreeNode(object): pass mapper(TreeNode, nodes, properties={ 'children':relation(TreeNode, lazy=False, join_depth=3) }) So what happens when we say: :: create_session().query(TreeNode).all() ? A join along aliases, three levels deep off the parent: :: SELECT nodes_3.id AS nodes_3_id, nodes_3.parent_id AS nodes_3_parent_id, nodes_3.name AS nodes_3_name, nodes_2.id AS nodes_2_id, nodes_2.parent_id AS nodes_2_parent_id, nodes_2.name AS nodes_2_name, nodes_1.id AS nodes_1_id, nodes_1.parent_id AS nodes_1_parent_id, nodes_1.name AS nodes_1_name, nodes.id AS nodes_id, nodes.parent_id AS nodes_parent_id, nodes.name AS nodes_name FROM nodes LEFT OUTER JOIN nodes AS nodes_1 ON nodes.id = nodes_1.parent_id LEFT OUTER JOIN nodes AS nodes_2 ON nodes_1.id = nodes_2.parent_id LEFT OUTER JOIN nodes AS nodes_3 ON nodes_2.id = nodes_3.parent_id ORDER BY nodes.oid, nodes_1.oid, nodes_2.oid, nodes_3.oid Notice the nice clean alias names too. The joining doesn't care if it's against the same immediate table or some other object which then cycles back to the beginining. Any kind of chain of eager loads can cycle back onto itself when ``join_depth`` is specified. When not present, eager loading automatically stops when it hits a cycle. Composite Types ^^^^^^^^^^^^^^^ This is one from the Hibernate camp. Composite Types let you define a custom datatype that is composed of more than one column (or one column, if you wanted). Lets define a new type, ``Point``. Stores an x/y coordinate: :: class Point(object): def __init__(self, x, y): self.x = x self.y = y def __composite_values__(self): return self.x, self.y def __eq__(self, other): return other.x == self.x and other.y == self.y def __ne__(self, other): return not self.__eq__(other) The way the ``Point`` object is defined is specific to a custom type; constructor takes a list of arguments, and the ``__composite_values__()`` method produces a sequence of those arguments. The order will match up to our mapper, as we'll see in a moment. Let's create a table of vertices storing two points per row: :: vertices = Table('vertices', metadata, Column('id', Integer, primary_key=True), Column('x1', Integer), Column('y1', Integer), Column('x2', Integer), Column('y2', Integer), ) Then, map it ! We'll create a ``Vertex`` object which stores two ``Point`` objects: :: class Vertex(object): def __init__(self, start, end): self.start = start self.end = end mapper(Vertex, vertices, properties={ 'start':composite(Point, vertices.c.x1, vertices.c.y1), 'end':composite(Point, vertices.c.x2, vertices.c.y2) }) Once you've set up your composite type, it's usable just like any other type: :: v = Vertex(Point(3, 4), Point(26,15)) session.save(v) session.flush() # works in queries too q = session.query(Vertex).filter(Vertex.start == Point(3, 4)) If you'd like to define the way the mapped attributes generate SQL clauses when used in expressions, create your own ``sqlalchemy.orm.PropComparator`` subclass, defining any of the common operators (like ``__eq__()``, ``__le__()``, etc.), and send it in to ``composite()``. Composite types work as primary keys too, and are usable in ``query.get()``: :: # a Document class which uses a composite Version # object as primary key document = query.get(Version(1, 'a')) ``dynamic_loader()`` relations ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ A ``relation()`` that returns a live ``Query`` object for all read operations. Write operations are limited to just ``append()`` and ``remove()``, changes to the collection are not visible until the session is flushed. This feature is particularly handy with an "autoflushing" session which will flush before each query. :: mapper(Foo, foo_table, properties={ 'bars':dynamic_loader(Bar, backref='foo', ) }) session = create_session(autoflush=True) foo = session.query(Foo).first() foo.bars.append(Bar(name='lala')) for bar in foo.bars.filter(Bar.name=='lala'): print bar session.commit() New Options: ``undefer_group()``, ``eagerload_all()`` ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ A couple of query options which are handy. ``undefer_group()`` marks a whole group of "deferred" columns as undeferred: :: mapper(Class, table, properties={ 'foo' : deferred(table.c.foo, group='group1'), 'bar' : deferred(table.c.bar, group='group1'), 'bat' : deferred(table.c.bat, group='group1'), ) session.query(Class).options(undefer_group('group1')).filter(...).all() and ``eagerload_all()`` sets a chain of attributes to be eager in one pass: :: mapper(Foo, foo_table, properties={ 'bar':relation(Bar) }) mapper(Bar, bar_table, properties={ 'bat':relation(Bat) }) mapper(Bat, bat_table) # eager load bar and bat session.query(Foo).options(eagerload_all('bar.bat')).filter(...).all() New Collection API ^^^^^^^^^^^^^^^^^^ Collections are no longer proxied by an {{{InstrumentedList}}} proxy, and access to members, methods and attributes is direct. Decorators now intercept objects entering and leaving the collection, and it is now possible to easily write a custom collection class that manages its own membership. Flexible decorators also replace the named method interface of custom collections in 0.3, allowing any class to be easily adapted to use as a collection container. Dictionary-based collections are now much easier to use and fully ``dict``-like. Changing ``__iter__`` is no longer needed for ``dict``s, and new built-in ``dict`` types cover many needs: :: # use a dictionary relation keyed by a column relation(Item, collection_class=column_mapped_collection(items.c.keyword)) # or named attribute relation(Item, collection_class=attribute_mapped_collection('keyword')) # or any function you like relation(Item, collection_class=mapped_collection(lambda entity: entity.a + entity.b)) Existing 0.3 ``dict``-like and freeform object derived collection classes will need to be updated for the new API. In most cases this is simply a matter of adding a couple decorators to the class definition. Mapped Relations from External Tables/Subqueries ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ This feature quietly appeared in 0.3 but has been improved in 0.4 thanks to better ability to convert subqueries against a table into subqueries against an alias of that table; this is key for eager loading, aliased joins in queries, etc. It reduces the need to create mappers against select statements when you just need to add some extra columns or subqueries: :: mapper(User, users, properties={ 'fullname': column_property((users.c.firstname + users.c.lastname).label('fullname')), 'numposts': column_property( select([func.count(1)], users.c.id==posts.c.user_id).correlate(users).label('posts') ) }) a typical query looks like: :: SELECT (SELECT count(1) FROM posts WHERE users.id = posts.user_id) AS count, users.firstname || users.lastname AS fullname, users.id AS users_id, users.firstname AS users_firstname, users.lastname AS users_lastname FROM users ORDER BY users.oid Horizontal Scaling (Sharding) API --------------------------------- [browser:/sqlalchemy/trunk/examples/sharding/attribute_shard .py] Sessions -------- New Session Create Paradigm; SessionContext, assignmapper Deprecated ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ That's right, the whole shebang is being replaced with two configurational functions. Using both will produce the most 0.1-ish feel we've had since 0.1 (i.e., the least amount of typing). Configure your own ``Session`` class right where you define your ``engine`` (or anywhere): :: from sqlalchemy import create_engine from sqlalchemy.orm import sessionmaker engine = create_engine('myengine://') Session = sessionmaker(bind=engine, autoflush=True, transactional=True) # use the new Session() freely sess = Session() sess.save(someobject) sess.flush() If you need to post-configure your Session, say with an engine, add it later with ``configure()``: :: Session.configure(bind=create_engine(...)) All the behaviors of ``SessionContext`` and the ``query`` and ``__init__`` methods of ``assignmapper`` are moved into the new ``scoped_session()`` function, which is compatible with both ``sessionmaker`` as well as ``create_session()``: :: from sqlalchemy.orm import scoped_session, sessionmaker Session = scoped_session(sessionmaker(autoflush=True, transactional=True)) Session.configure(bind=engine) u = User(name='wendy') sess = Session() sess.save(u) sess.commit() # Session constructor is thread-locally scoped. Everyone gets the same # Session in the thread when scope="thread". sess2 = Session() assert sess is sess2 When using a thread-local ``Session``, the returned class has all of ``Session's`` interface implemented as classmethods, and "assignmapper"'s functionality is available using the ``mapper`` classmethod. Just like the old ``objectstore`` days.... :: # "assignmapper"-like functionality available via ScopedSession.mapper Session.mapper(User, users_table) u = User(name='wendy') Session.commit() Sessions are again Weak Referencing By Default ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ The weak_identity_map flag is now set to ``True`` by default on Session. Instances which are externally deferenced and fall out of scope are removed from the session automatically. However, items which have "dirty" changes present will remain strongly referenced until those changes are flushed at which case the object reverts to being weakly referenced (this works for 'mutable' types, like picklable attributes, as well). Setting weak_identity_map to ``False`` restores the old strong-referencing behavior for those of you using the session like a cache. Auto-Transactional Sessions ^^^^^^^^^^^^^^^^^^^^^^^^^^^ As you might have noticed above, we are calling ``commit()`` on ``Session``. The flag ``transactional=True`` means the ``Session`` is always in a transaction, ``commit()`` persists permanently. Auto-Flushing Sessions ^^^^^^^^^^^^^^^^^^^^^^ Also, ``autoflush=True`` means the ``Session`` will ``flush()`` before each ``query`` as well as when you call ``flush()`` or ``commit()``. So now this will work: :: Session = sessionmaker(bind=engine, autoflush=True, transactional=True) u = User(name='wendy') sess = Session() sess.save(u) # wendy is flushed, comes right back from a query wendy = sess.query(User).filter_by(name='wendy').one() Transactional methods moved onto sessions ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ ``commit()`` and ``rollback()``, as well as ``begin()`` are now directly on ``Session``. No more need to use ``SessionTransaction`` for anything (it remains in the background). :: Session = sessionmaker(autoflush=True, transactional=False) sess = Session() sess.begin() # use the session sess.commit() # commit transaction Sharing a ``Session`` with an enclosing engine-level (i.e. non-ORM) transaction is easy: :: Session = sessionmaker(autoflush=True, transactional=False) conn = engine.connect() trans = conn.begin() sess = Session(bind=conn) # ... session is transactional # commit the outermost transaction trans.commit() Nested Session Transactions with SAVEPOINT ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ Available at the Engine and ORM level. ORM docs so far: http://www.sqlalchemy.org/docs/04/session.html#unitofwork_ma naging Two-Phase Commit Sessions ^^^^^^^^^^^^^^^^^^^^^^^^^ Available at the Engine and ORM level. ORM docs so far: http://www.sqlalchemy.org/docs/04/session.html#unitofwork_ma naging Inheritance ----------- Polymorphic Inheritance with No Joins or Unions ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ New docs for inheritance: http://www.sqlalchemy.org/docs/04 /mappers.html#advdatamapping_mapper_inheritance_joined Better Polymorphic Behavior with ``get()`` ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ All classes within a joined-table inheritance hierarchy get an ``_instance_key`` using the base class, i.e. ``(BaseClass, (1, ), None)``. That way when you call ``get()`` a ``Query`` against the base class, it can locate subclass instances in the current identity map without querying the database. Types ----- Custom Subclasses of ``sqlalchemy.types.TypeDecorator`` ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ There is a `New API `_ for subclassing a TypeDecorator. Using the 0.3 API causes compilation errors in some cases. SQL Expressions =============== All New, Deterministic Label/Alias Generation --------------------------------------------- All the "anonymous" labels and aliases use a simple _ format now. SQL is much easier to read and is compatible with plan optimizer caches. Just check out some of the examples in the tutorials: http://www.sqlalchemy.org/docs/04/ormtutorial.html http://www.sqlalchemy.org/docs/04/sqlexpression.html Generative select() Constructs ------------------------------ This is definitely the way to go with ``select()``. See htt p://www.sqlalchemy.org/docs/04/sqlexpression.html#sql_transf orm . New Operator System ------------------- SQL operators and more or less every SQL keyword there is are now abstracted into the compiler layer. They now act intelligently and are type/backend aware, see: http://www.sq lalchemy.org/docs/04/sqlexpression.html#sql_operators All ``type`` Keyword Arguments Renamed to ``type_`` --------------------------------------------------- Just like it says: :: b = bindparam('foo', type_=String) in\_ Function Changed to Accept Sequence or Selectable ------------------------------------------------------ The in\_ function now takes a sequence of values or a selectable as its sole argument. The previous API of passing in values as positional arguments still works, but is now deprecated. This means that :: my_table.select(my_table.c.id.in_(1,2,3) my_table.select(my_table.c.id.in_(*listOfIds) should be changed to :: my_table.select(my_table.c.id.in_([1,2,3]) my_table.select(my_table.c.id.in_(listOfIds) Schema and Reflection ===================== ``MetaData``, ``BoundMetaData``, ``DynamicMetaData``... ------------------------------------------------------- In the 0.3.x series, ``BoundMetaData`` and ``DynamicMetaData`` were deprecated in favor of ``MetaData`` and ``ThreadLocalMetaData``. The older names have been removed in 0.4. Updating is simple: :: +-------------------------------------+-------------------------+ |If You Had | Now Use | +=====================================+=========================+ | ``MetaData`` | ``MetaData`` | +-------------------------------------+-------------------------+ | ``BoundMetaData`` | ``MetaData`` | +-------------------------------------+-------------------------+ | ``DynamicMetaData`` (with one | ``MetaData`` | | engine or threadlocal=False) | | +-------------------------------------+-------------------------+ | ``DynamicMetaData`` | ``ThreadLocalMetaData`` | | (with different engines per thread) | | +-------------------------------------+-------------------------+ The seldom-used ``name`` parameter to ``MetaData`` types has been removed. The ``ThreadLocalMetaData`` constructor now takes no arguments. Both types can now be bound to an ``Engine`` or a single ``Connection``. One Step Multi-Table Reflection ------------------------------- You can now load table definitions and automatically create ``Table`` objects from an entire database or schema in one pass: :: >>> metadata = MetaData(myengine, reflect=True) >>> metadata.tables.keys() ['table_a', 'table_b', 'table_c', '...'] ``MetaData`` also gains a ``.reflect()`` method enabling finer control over the loading process, including specification of a subset of available tables to load. SQL Execution ============= ``engine``, ``connectable``, and ``bind_to`` are all now ``bind`` ----------------------------------------------------------------- ``Transactions``, ``NestedTransactions`` and ``TwoPhaseTransactions`` --------------------------------------------------------------------- Connection Pool Events ---------------------- The connection pool now fires events when new DB-API connections are created, checked out and checked back into the pool. You can use these to execute session-scoped SQL setup statements on fresh connections, for example. Oracle Engine Fixed ------------------- In 0.3.11, there were bugs in the Oracle Engine on how Primary Keys are handled. These bugs could cause programs that worked fine with other engines, such as sqlite, to fail when using the Oracle Engine. In 0.4, the Oracle Engine has been reworked, fixing these Primary Key problems. Out Parameters for Oracle ------------------------- :: result = engine.execute(text("begin foo(:x, :y, :z); end;", bindparams=[bindparam('x', Numeric), outparam('y', Numeric), outparam('z', Numeric)]), x=5) assert result.out_parameters == {'y':10, 'z':75} Connection-bound ``MetaData``, ``Sessions`` ------------------------------------------- ``MetaData`` and ``Session`` can be explicitly bound to a connection: :: conn = engine.connect() sess = create_session(bind=conn) Faster, More Foolproof ``ResultProxy`` Objects ---------------------------------------------- SQLAlchemy-0.8.4/doc/_sources/changelog/migration_05.txt0000644000076500000240000006332212251147171023614 0ustar classicstaff00000000000000============================= What's new in SQLAlchemy 0.5? ============================= .. admonition:: About this Document This document describes changes between SQLAlchemy version 0.4, last released October 12, 2008, and SQLAlchemy version 0.5, last released January 16, 2010. Document date: August 4, 2009 This guide documents API changes which affect users migrating their applications from the 0.4 series of SQLAlchemy to 0.5. It's also recommended for those working from `Essential SQLAlchemy `_, which only covers 0.4 and seems to even have some old 0.3isms in it. Note that SQLAlchemy 0.5 removes many behaviors which were deprecated throughout the span of the 0.4 series, and also deprecates more behaviors specific to 0.4. Major Documentation Changes =========================== Some sections of the documentation have been completely rewritten and can serve as an introduction to new ORM features. The ``Query`` and ``Session`` objects in particular have some distinct differences in API and behavior which fundamentally change many of the basic ways things are done, particularly with regards to constructing highly customized ORM queries and dealing with stale session state, commits and rollbacks. * `ORM Tutorial `_ * `Session Documentation `_ Deprecations Source =================== Another source of information is documented within a series of unit tests illustrating up to date usages of some common ``Query`` patterns; this file can be viewed at [source:sqlalchemy/trunk/test/orm/test_deprecations.py]. Requirements Changes ==================== * Python 2.4 or higher is required. The SQLAlchemy 0.4 line is the last version with Python 2.3 support. Object Relational Mapping ========================= * **Column level expressions within Query.** - as detailed in the `tutorial `_, ``Query`` has the capability to create specific SELECT statements, not just those against full rows: :: session.query(User.name, func.count(Address.id).label("numaddresses")).join(Address).group_by(User.name) The tuples returned by any multi-column/entity query are *named*' tuples: :: for row in session.query(User.name, func.count(Address.id).label('numaddresses')).join(Address).group_by(User.name): print "name", row.name, "number", row.numaddresses ``Query`` has a ``statement`` accessor, as well as a ``subquery()`` method which allow ``Query`` to be used to create more complex combinations: :: subq = session.query(Keyword.id.label('keyword_id')).filter(Keyword.name.in_(['beans', 'carrots'])).subquery() recipes = session.query(Recipe).filter(exists(). where(Recipe.id==recipe_keywords.c.recipe_id). where(recipe_keywords.c.keyword_id==subq.c.keyword_id) ) * **Explicit ORM aliases are recommended for aliased joins** - The ``aliased()`` function produces an "alias" of a class, which allows fine-grained control of aliases in conjunction with ORM queries. While a table-level alias (i.e. ``table.alias()``) is still usable, an ORM level alias retains the semantics of the ORM mapped object which is significant for inheritance mappings, options, and other scenarios. E.g.: :: Friend = aliased(Person) session.query(Person, Friend).join((Friend, Person.friends)).all() * **query.join() greatly enhanced.** - You can now specify the target and ON clause for a join in multiple ways. A target class alone can be provided where SQLA will attempt to form a join to it via foreign key in the same way as ``table.join(someothertable)``. A target and an explicit ON condition can be provided, where the ON condition can be a ``relation()`` name, an actual class descriptor, or a SQL expression. Or the old way of just a ``relation()`` name or class descriptor works too. See the ORM tutorial which has several examples. * **Declarative is recommended for applications which don't require (and don't prefer) abstraction between tables and mappers** - The [/docs/05/reference/ext/declarative.html Declarative] module, which is used to combine the expression of ``Table``, ``mapper()``, and user defined class objects together, is highly recommended as it simplifies application configuration, ensures the "one mapper per class" pattern, and allows the full range of configuration available to distinct ``mapper()`` calls. Separate ``mapper()`` and ``Table`` usage is now referred to as "classical SQLAlchemy usage" and of course is freely mixable with declarative. * **The .c. attribute has been removed** from classes (i.e. ``MyClass.c.somecolumn``). As is the case in 0.4, class- level properties are usable as query elements, i.e. ``Class.c.propname`` is now superseded by ``Class.propname``, and the ``c`` attribute continues to remain on ``Table`` objects where they indicate the namespace of ``Column`` objects present on the table. To get at the Table for a mapped class (if you didn't keep it around already): :: table = class_mapper(someclass).mapped_table Iterate through columns: :: for col in table.c: print col Work with a specific column: :: table.c.somecolumn The class-bound descriptors support the full set of Column operators as well as the documented relation-oriented operators like ``has()``, ``any()``, ``contains()``, etc. The reason for the hard removal of ``.c.`` is that in 0.5, class-bound descriptors carry potentially different meaning, as well as information regarding class mappings, versus plain ``Column`` objects - and there are use cases where you'd specifically want to use one or the other. Generally, using class-bound descriptors invokes a set of mapping/polymorphic aware translations, and using table- bound columns does not. In 0.4, these translations were applied across the board to all expressions, but 0.5 differentiates completely between columns and mapped descriptors, only applying translations to the latter. So in many cases, particularly when dealing with joined table inheritance configurations as well as when using ``query()``, ``Class.propname`` and ``table.c.colname`` are not interchangeable. For example, ``session.query(users.c.id, users.c.name)`` is different versus ``session.query(User.id, User.name)``; in the latter case, the ``Query`` is aware of the mapper in use and further mapper-specific operations like ``query.join()``, ``query.with_parent()`` etc. may be used, but in the former case cannot. Additionally, in polymorphic inheritance scenarios, the class-bound descriptors refer to the columns present in the polymorphic selectable in use, not necessarily the table column which directly corresponds to the descriptor. For example, a set of classes related by joined-table inheritance to the ``person`` table along the ``person_id`` column of each table will all have their ``Class.person_id`` attribute mapped to the ``person_id`` column in ``person``, and not their subclass table. Version 0.4 would map this behavior onto table-bound ``Column`` objects automatically. In 0.5, this automatic conversion has been removed, so that you in fact *can* use table-bound columns as a means to override the translations which occur with polymorphic querying; this allows ``Query`` to be able to create optimized selects among joined-table or concrete-table inheritance setups, as well as portable subqueries, etc. * **Session Now Synchronizes Automatically with Transactions.** Session now synchronizes against the transaction automatically by default, including autoflush and autoexpire. A transaction is present at all times unless disabled using the ``autocommit`` option. When all three flags are set to their default, the Session recovers gracefully after rollbacks and it's very difficult to get stale data into the session. See the new Session documentation for details. * **Implicit Order By Is Removed**. This will impact ORM users who rely upon SA's "implicit ordering" behavior, which states that all Query objects which don't have an ``order_by()`` will ORDER BY the "id" or "oid" column of the primary mapped table, and all lazy/eagerly loaded collections apply a similar ordering. In 0.5, automatic ordering must be explicitly configured on ``mapper()`` and ``relation()`` objects (if desired), or otherwise when using ``Query``. To convert an 0.4 mapping to 0.5, such that its ordering behavior will be extremely similar to 0.4 or previous, use the ``order_by`` setting on ``mapper()`` and ``relation()``: :: mapper(User, users, properties={ 'addresses':relation(Address, order_by=addresses.c.id) }, order_by=users.c.id) To set ordering on a backref, use the ``backref()`` function: :: 'keywords':relation(Keyword, secondary=item_keywords, order_by=keywords.c.name, backref=backref('items', order_by=items.c.id)) Using declarative ? To help with the new ``order_by`` requirement, ``order_by`` and friends can now be set using strings which are evaluated in Python later on (this works **only** with declarative, not plain mappers): :: class MyClass(MyDeclarativeBase): ... 'addresses':relation("Address", order_by="Address.id") It's generally a good idea to set ``order_by`` on ``relation()s`` which load list-based collections of items, since that ordering cannot otherwise be affected. Other than that, the best practice is to use ``Query.order_by()`` to control ordering of the primary entities being loaded. * **Session is now autoflush=True/autoexpire=True/autocommit=False.** - To set it up, just call ``sessionmaker()`` with no arguments. The name ``transactional=True`` is now ``autocommit=False``. Flushes occur upon each query issued (disable with ``autoflush=False``), within each ``commit()`` (as always), and before each ``begin_nested()`` (so rolling back to the SAVEPOINT is meaningful). All objects are expired after each ``commit()`` and after each ``rollback()``. After rollback, pending objects are expunged, deleted objects move back to persistent. These defaults work together very nicely and there's really no more need for old techniques like ``clear()`` (which is renamed to ``expunge_all()`` as well). P.S.: sessions are now reusable after a ``rollback()``. Scalar and collection attribute changes, adds and deletes are all rolled back. * **session.add() replaces session.save(), session.update(), session.save_or_update().** - the ``session.add(someitem)`` and ``session.add_all([list of items])`` methods replace ``save()``, ``update()``, and ``save_or_update()``. Those methods will remain deprecated throughout 0.5. * **backref configuration made less verbose.** - The ``backref()`` function now uses the ``primaryjoin`` and ``secondaryjoin`` arguments of the forwards-facing ``relation()`` when they are not explicitly stated. It's no longer necessary to specify ``primaryjoin``/``secondaryjoin`` in both directions separately. * **Simplified polymorphic options.** - The ORM's "polymorphic load" behavior has been simplified. In 0.4, mapper() had an argument called ``polymorphic_fetch`` which could be configured as ``select`` or ``deferred``. This option is removed; the mapper will now just defer any columns which were not present in the SELECT statement. The actual SELECT statement used is controlled by the ``with_polymorphic`` mapper argument (which is also in 0.4 and replaces ``select_table``), as well as the ``with_polymorphic()`` method on ``Query`` (also in 0.4). An improvement to the deferred loading of inheriting classes is that the mapper now produces the "optimized" version of the SELECT statement in all cases; that is, if class B inherits from A, and several attributes only present on class B have been expired, the refresh operation will only include B's table in the SELECT statement and will not JOIN to A. * The ``execute()`` method on ``Session`` converts plain strings into ``text()`` constructs, so that bind parameters may all be specified as ":bindname" without needing to call ``text()`` explicitly. If "raw" SQL is desired here, use ``session.connection().execute("raw text")``. * ``session.Query().iterate_instances()`` has been renamed to just ``instances()``. The old ``instances()`` method returning a list instead of an iterator no longer exists. If you were relying on that behavior, you should use ``list(your_query.instances())``. Extending the ORM ================= In 0.5 we're moving forward with more ways to modify and extend the ORM. Heres a summary: * **MapperExtension.** - This is the classic extension class, which remains. Methods which should rarely be needed are ``create_instance()`` and ``populate_instance()``. To control the initialization of an object when it's loaded from the database, use the ``reconstruct_instance()`` method, or more easily the ``@reconstructor`` decorator described in the documentation. * **SessionExtension.** - This is an easy to use extension class for session events. In particular, it provides ``before_flush()``, ``after_flush()`` and ``after_flush_postexec()`` methods. It's usage is recommended over ``MapperExtension.before_XXX`` in many cases since within ``before_flush()`` you can modify the flush plan of the session freely, something which cannot be done from within ``MapperExtension``. * **AttributeExtension.** - This class is now part of the public API, and allows the interception of userland events on attributes, including attribute set and delete operations, and collection appends and removes. It also allows the value to be set or appended to be modified. The ``@validates`` decorator, described in the documentation, provides a quick way to mark any mapped attributes as being "validated" by a particular class method. * **Attribute Instrumentation Customization.** - An API is provided for ambitious efforts to entirely replace SQLAlchemy's attribute instrumentation, or just to augment it in some cases. This API was produced for the purposes of the Trellis toolkit, but is available as a public API. Some examples are provided in the distribution in the ``/examples/custom_attributes`` directory. Schema/Types ============ * **String with no length no longer generates TEXT, it generates VARCHAR** - The ``String`` type no longer magically converts into a ``Text`` type when specified with no length. This only has an effect when CREATE TABLE is issued, as it will issue ``VARCHAR`` with no length parameter, which is not valid on many (but not all) databases. To create a TEXT (or CLOB, i.e. unbounded string) column, use the ``Text`` type. * **PickleType() with mutable=True requires an __eq__() method** - The ``PickleType`` type needs to compare values when mutable=True. The method of comparing ``pickle.dumps()`` is inefficient and unreliable. If an incoming object does not implement ``__eq__()`` and is also not ``None``, the ``dumps()`` comparison is used but a warning is raised. For types which implement ``__eq__()`` which includes all dictionaries, lists, etc., comparison will use ``==`` and is now reliable by default. * **convert_bind_param() and convert_result_value() methods of TypeEngine/TypeDecorator are removed.** - The O'Reilly book unfortunately documented these methods even though they were deprecated post 0.3. For a user-defined type which subclasses ``TypeEngine``, the ``bind_processor()`` and ``result_processor()`` methods should be used for bind/result processing. Any user defined type, whether extending ``TypeEngine`` or ``TypeDecorator``, which uses the old 0.3 style can be easily adapted to the new style using the following adapter: :: class AdaptOldConvertMethods(object): """A mixin which adapts 0.3-style convert_bind_param and convert_result_value methods """ def bind_processor(self, dialect): def convert(value): return self.convert_bind_param(value, dialect) return convert def result_processor(self, dialect): def convert(value): return self.convert_result_value(value, dialect) return convert def convert_result_value(self, value, dialect): return value def convert_bind_param(self, value, dialect): return value To use the above mixin: :: class MyType(AdaptOldConvertMethods, TypeEngine): # ... * The ``quote`` flag on ``Column`` and ``Table`` as well as the ``quote_schema`` flag on ``Table`` now control quoting both positively and negatively. The default is ``None``, meaning let regular quoting rules take effect. When ``True``, quoting is forced on. When ``False``, quoting is forced off. * Column ``DEFAULT`` value DDL can now be more conveniently specified with ``Column(..., server_default='val')``, deprecating ``Column(..., PassiveDefault('val'))``. ``default=`` is now exclusively for Python-initiated default values, and can coexist with server_default. A new ``server_default=FetchedValue()`` replaces the ``PassiveDefault('')`` idiom for marking columns as subject to influence from external triggers and has no DDL side effects. * SQLite's ``DateTime``, ``Time`` and ``Date`` types now **only accept datetime objects, not strings** as bind parameter input. If you'd like to create your own "hybrid" type which accepts strings and returns results as date objects (from whatever format you'd like), create a ``TypeDecorator`` that builds on ``String``. If you only want string-based dates, just use ``String``. * Additionally, the ``DateTime`` and ``Time`` types, when used with SQLite, now represent the "microseconds" field of the Python ``datetime.datetime`` object in the same manner as ``str(datetime)`` - as fractional seconds, not a count of microseconds. That is: :: dt = datetime.datetime(2008, 6, 27, 12, 0, 0, 125) # 125 usec # old way '2008-06-27 12:00:00.125' # new way '2008-06-27 12:00:00.000125' So if an existing SQLite file-based database intends to be used across 0.4 and 0.5, you either have to upgrade the datetime columns to store the new format (NOTE: please test this, I'm pretty sure its correct): :: UPDATE mytable SET somedatecol = substr(somedatecol, 0, 19) || '.' || substr((substr(somedatecol, 21, -1) / 1000000), 3, -1); or, enable "legacy" mode as follows: :: from sqlalchemy.databases.sqlite import DateTimeMixin DateTimeMixin.__legacy_microseconds__ = True Connection Pool no longer threadlocal by default ================================================ 0.4 has an unfortunate default setting of "pool_threadlocal=True", leading to surprise behavior when, for example, using multiple Sessions within a single thread. This flag is now off in 0.5. To re-enable 0.4's behavior, specify ``pool_threadlocal=True`` to ``create_engine()``, or alternatively use the "threadlocal" strategy via ``strategy="threadlocal"``. \*args Accepted, \*args No Longer Accepted ========================================== The policy with ``method(\*args)`` vs. ``method([args])`` is, if the method accepts a variable-length set of items which represent a fixed structure, it takes ``\*args``. If the method accepts a variable-length set of items that are data-driven, it takes ``[args]``. * The various Query.options() functions ``eagerload()``, ``eagerload_all()``, ``lazyload()``, ``contains_eager()``, ``defer()``, ``undefer()`` all accept variable-length ``\*keys`` as their argument now, which allows a path to be formulated using descriptors, ie.: :: query.options(eagerload_all(User.orders, Order.items, Item.keywords)) A single array argument is still accepted for backwards compatibility. * Similarly, the ``Query.join()`` and ``Query.outerjoin()`` methods accept a variable length \*args, with a single array accepted for backwards compatibility: :: query.join('orders', 'items') query.join(User.orders, Order.items) * the ``in_()`` method on columns and similar only accepts a list argument now. It no longer accepts ``\*args``. Removed ======= * **entity_name** - This feature was always problematic and rarely used. 0.5's more deeply fleshed out use cases revealed further issues with ``entity_name`` which led to its removal. If different mappings are required for a single class, break the class into separate subclasses and map them separately. An example of this is at [wiki:UsageRecipes/EntityName]. More information regarding rationale is described at http://groups.google.c om/group/sqlalchemy/browse_thread/thread/9e23a0641a88b96d? hl=en . * **get()/load() cleanup** The ``load()`` method has been removed. It's functionality was kind of arbitrary and basically copied from Hibernate, where it's also not a particularly meaningful method. To get equivalent functionality: :: x = session.query(SomeClass).populate_existing().get(7) ``Session.get(cls, id)`` and ``Session.load(cls, id)`` have been removed. ``Session.get()`` is redundant vs. ``session.query(cls).get(id)``. ``MapperExtension.get()`` is also removed (as is ``MapperExtension.load()``). To override the functionality of ``Query.get()``, use a subclass: :: class MyQuery(Query): def get(self, ident): # ... session = sessionmaker(query_cls=MyQuery)() ad1 = session.query(Address).get(1) * ``sqlalchemy.orm.relation()`` The following deprecated keyword arguments have been removed: foreignkey, association, private, attributeext, is_backref In particular, ``attributeext`` is replaced with ``extension`` - the ``AttributeExtension`` class is now in the public API. * ``session.Query()`` The following deprecated functions have been removed: list, scalar, count_by, select_whereclause, get_by, select_by, join_by, selectfirst, selectone, select, execute, select_statement, select_text, join_to, join_via, selectfirst_by, selectone_by, apply_max, apply_min, apply_avg, apply_sum Additionally, the ``id`` keyword argument to ``join()``, ``outerjoin()``, ``add_entity()`` and ``add_column()`` has been removed. To target table aliases in ``Query`` to result columns, use the ``aliased`` construct: :: from sqlalchemy.orm import aliased address_alias = aliased(Address) print session.query(User, address_alias).join((address_alias, User.addresses)).all() * ``sqlalchemy.orm.Mapper`` * instances() * get_session() - this method was not very noticeable, but had the effect of associating lazy loads with a particular session even if the parent object was entirely detached, when an extension such as ``scoped_session()`` or the old ``SessionContextExt`` was used. It's possible that some applications which relied upon this behavior will no longer work as expected; but the better programming practice here is to always ensure objects are present within sessions if database access from their attributes are required. * ``mapper(MyClass, mytable)`` Mapped classes no are longer instrumented with a "c" class attribute; e.g. ``MyClass.c`` * ``sqlalchemy.orm.collections`` The _prepare_instrumentation alias for prepare_instrumentation has been removed. * ``sqlalchemy.orm`` Removed the ``EXT_PASS`` alias of ``EXT_CONTINUE``. * ``sqlalchemy.engine`` The alias from ``DefaultDialect.preexecute_sequences`` to ``.preexecute_pk_sequences`` has been removed. The deprecated engine_descriptors() function has been removed. * ``sqlalchemy.ext.activemapper`` Module removed. * ``sqlalchemy.ext.assignmapper`` Module removed. * ``sqlalchemy.ext.associationproxy`` Pass-through of keyword args on the proxy's ``.append(item, \**kw)`` has been removed and is now simply ``.append(item)`` * ``sqlalchemy.ext.selectresults``, ``sqlalchemy.mods.selectresults`` Modules removed. * ``sqlalchemy.ext.declarative`` ``declared_synonym()`` removed. * ``sqlalchemy.ext.sessioncontext`` Module removed. * ``sqlalchemy.log`` The ``SADeprecationWarning`` alias to ``sqlalchemy.exc.SADeprecationWarning`` has been removed. * ``sqlalchemy.exc`` ``exc.AssertionError`` has been removed and usage replaced by the Python built-in of the same name. * ``sqlalchemy.databases.mysql`` The deprecated ``get_version_info`` dialect method has been removed. Renamed or Moved ================ * ``sqlalchemy.exceptions`` is now ``sqlalchemy.exc`` The module may still be imported under the old name until 0.6. * ``FlushError``, ``ConcurrentModificationError``, ``UnmappedColumnError`` -> sqlalchemy.orm.exc These exceptions moved to the orm package. Importing 'sqlalchemy.orm' will install aliases in sqlalchemy.exc for compatibility until 0.6. * ``sqlalchemy.logging`` -> ``sqlalchemy.log`` This internal module was renamed. No longer needs to be special cased when packaging SA with py2app and similar tools that scan imports. * ``session.Query().iterate_instances()`` -> ``session.Query().instances()``. Deprecated ========== * ``Session.save()``, ``Session.update()``, ``Session.save_or_update()`` All three replaced by ``Session.add()`` * ``sqlalchemy.PassiveDefault`` Use ``Column(server_default=...)`` Translates to sqlalchemy.DefaultClause() under the hood. * ``session.Query().iterate_instances()``. It has been renamed to ``instances()``. SQLAlchemy-0.8.4/doc/_sources/changelog/migration_06.txt0000644000076500000240000012662012251147171023616 0ustar classicstaff00000000000000============================== What's New in SQLAlchemy 0.6? ============================== .. admonition:: About this Document This document describes changes between SQLAlchemy version 0.5, last released January 16, 2010, and SQLAlchemy version 0.6, last released May 5, 2012. Document date: June 6, 2010 This guide documents API changes which affect users migrating their applications from the 0.5 series of SQLAlchemy to 0.6. Note that SQLAlchemy 0.6 removes some behaviors which were deprecated throughout the span of the 0.5 series, and also deprecates more behaviors specific to 0.5. Platform Support ================ * cPython versions 2.4 and upwards throughout the 2.xx series * Jython 2.5.1 - using the zxJDBC DBAPI included with Jython. * cPython 3.x - see [source:sqlalchemy/trunk/README.py3k] for information on how to build for python3. New Dialect System ================== Dialect modules are now broken up into distinct subcomponents, within the scope of a single database backend. Dialect implementations are now in the ``sqlalchemy.dialects`` package. The ``sqlalchemy.databases`` package still exists as a placeholder to provide some level of backwards compatibility for simple imports. For each supported database, a sub-package exists within ``sqlalchemy.dialects`` where several files are contained. Each package contains a module called ``base.py`` which defines the specific SQL dialect used by that database. It also contains one or more "driver" modules, each one corresponding to a specific DBAPI - these files are named corresponding to the DBAPI itself, such as ``pysqlite``, ``cx_oracle``, or ``pyodbc``. The classes used by SQLAlchemy dialects are first declared in the ``base.py`` module, defining all behavioral characteristics defined by the database. These include capability mappings, such as "supports sequences", "supports returning", etc., type definitions, and SQL compilation rules. Each "driver" module in turn provides subclasses of those classes as needed which override the default behavior to accommodate the additional features, behaviors, and quirks of that DBAPI. For DBAPIs that support multiple backends (pyodbc, zxJDBC, mxODBC), the dialect module will use mixins from the ``sqlalchemy.connectors`` package, which provide functionality common to that DBAPI across all backends, most typically dealing with connect arguments. This means that connecting using pyodbc, zxJDBC or mxODBC (when implemented) is extremely consistent across supported backends. The URL format used by ``create_engine()`` has been enhanced to handle any number of DBAPIs for a particular backend, using a scheme that is inspired by that of JDBC. The previous format still works, and will select a "default" DBAPI implementation, such as the Postgresql URL below that will use psycopg2: :: create_engine('postgresql://scott:tiger@localhost/test') However to specify a specific DBAPI backend such as pg8000, add it to the "protocol" section of the URL using a plus sign "+": :: create_engine('postgresql+pg8000://scott:tiger@localhost/test') Important Dialect Links: * Documentation on connect arguments: http://www.sqlalchemy.org/docs/06/dbengine.html#create- engine-url-arguments. * Reference documentation for individual dialects: http://ww w.sqlalchemy.org/docs/06/reference/dialects/index.html * The tips and tricks at DatabaseNotes. Other notes regarding dialects: * the type system has been changed dramatically in SQLAlchemy 0.6. This has an impact on all dialects regarding naming conventions, behaviors, and implementations. See the section on "Types" below. * the ``ResultProxy`` object now offers a 2x speed improvement in some cases thanks to some refactorings. * the ``RowProxy``, i.e. individual result row object, is now directly pickleable. * the setuptools entrypoint used to locate external dialects is now called ``sqlalchemy.dialects``. An external dialect written against 0.4 or 0.5 will need to be modified to work with 0.6 in any case so this change does not add any additional difficulties. * dialects now receive an initialize() event on initial connection to determine connection properties. * Functions and operators generated by the compiler now use (almost) regular dispatch functions of the form "visit_" and "visit__fn" to provide customed processing. This replaces the need to copy the "functions" and "operators" dictionaries in compiler subclasses with straightforward visitor methods, and also allows compiler subclasses complete control over rendering, as the full _Function or _BinaryExpression object is passed in. Dialect Imports --------------- The import structure of dialects has changed. Each dialect now exports its base "dialect" class as well as the full set of SQL types supported on that dialect via ``sqlalchemy.dialects.``. For example, to import a set of PG types: :: from sqlalchemy.dialects.postgresql import INTEGER, BIGINT, SMALLINT,\ VARCHAR, MACADDR, DATE, BYTEA Above, ``INTEGER`` is actually the plain ``INTEGER`` type from ``sqlalchemy.types``, but the PG dialect makes it available in the same way as those types which are specific to PG, such as ``BYTEA`` and ``MACADDR``. Expression Language Changes =========================== An Important Expression Language Gotcha --------------------------------------- There's one quite significant behavioral change to the expression language which may affect some applications. The boolean value of Python boolean expressions, i.e. ``==``, ``!=``, and similar, now evaluates accurately with regards to the two clause objects being compared. As we know, comparing a ``ClauseElement`` to any other object returns another ``ClauseElement``: :: >>> from sqlalchemy.sql import column >>> column('foo') == 5 This so that Python expressions produce SQL expressions when converted to strings: :: >>> str(column('foo') == 5) 'foo = :foo_1' But what happens if we say this? :: >>> if column('foo') == 5: ... print "yes" ... In previous versions of SQLAlchemy, the returned ``_BinaryExpression`` was a plain Python object which evaluated to ``True``. Now it evaluates to whether or not the actual ``ClauseElement`` should have the same hash value as to that being compared. Meaning: :: >>> bool(column('foo') == 5) False >>> bool(column('foo') == column('foo')) False >>> c = column('foo') >>> bool(c == c) True >>> That means code such as the following: :: if expression: print "the expression is:", expression Would not evaluate if ``expression`` was a binary clause. Since the above pattern should never be used, the base ``ClauseElement`` now raises an exception if called in a boolean context: :: >>> bool(c) Traceback (most recent call last): File "", line 1, in ... raise TypeError("Boolean value of this clause is not defined") TypeError: Boolean value of this clause is not defined Code that wants to check for the presence of a ``ClauseElement`` expression should instead say: :: if expression is not None: print "the expression is:", expression Keep in mind, **this applies to Table and Column objects too**. The rationale for the change is twofold: * Comparisons of the form ``if c1 == c2: `` can actually be written now * Support for correct hashing of ``ClauseElement`` objects now works on alternate platforms, namely Jython. Up until this point SQLAlchemy relied heavily on the specific behavior of cPython in this regard (and still had occasional problems with it). Stricter "executemany" Behavior ------------------------------- An "executemany" in SQLAlchemy corresponds to a call to ``execute()``, passing along a collection of bind parameter sets: :: connection.execute(table.insert(), {'data':'row1'}, {'data':'row2'}, {'data':'row3'}) When the ``Connection`` object sends off the given ``insert()`` construct for compilation, it passes to the compiler the keynames present in the first set of binds passed along to determine the construction of the statement's VALUES clause. Users familiar with this construct will know that additional keys present in the remaining dictionaries don't have any impact. What's different now is that all subsequent dictionaries need to include at least *every* key that is present in the first dictionary. This means that a call like this no longer works: :: connection.execute(table.insert(), {'timestamp':today, 'data':'row1'}, {'timestamp':today, 'data':'row2'}, {'data':'row3'}) Because the third row does not specify the 'timestamp' column. Previous versions of SQLAlchemy would simply insert NULL for these missing columns. However, if the ``timestamp`` column in the above example contained a Python-side default value or function, it would *not* be used. This because the "executemany" operation is optimized for maximum performance across huge numbers of parameter sets, and does not attempt to evaluate Python-side defaults for those missing keys. Because defaults are often implemented either as SQL expressions which are embedded inline with the INSERT statement, or are server side expressions which again are triggered based on the structure of the INSERT string, which by definition cannot fire off conditionally based on each parameter set, it would be inconsistent for Python side defaults to behave differently vs. SQL/server side defaults. (SQL expression based defaults are embedded inline as of the 0.5 series, again to minimize the impact of huge numbers of parameter sets). SQLAlchemy 0.6 therefore establishes predictable consistency by forbidding any subsequent parameter sets from leaving any fields blank. That way, there's no more silent failure of Python side default values and functions, which additionally are allowed to remain consistent in their behavior versus SQL and server side defaults. UNION and other "compound" constructs parenthesize consistently --------------------------------------------------------------- A rule that was designed to help SQLite has been removed, that of the first compound element within another compound (such as, a ``union()`` inside of an ``except_()``) wouldn't be parenthesized. This is inconsistent and produces the wrong results on Postgresql, which has precedence rules regarding INTERSECTION, and its generally a surprise. When using complex composites with SQLite, you now need to turn the first element into a subquery (which is also compatible on PG). A new example is in the SQL expression tutorial at the end of [http://www.sqlalchemy.org/docs/06/sqlexpression.html #unions-and-other-set-operations]. See :ticket:`1665` and r6690 for more background. C Extensions for Result Fetching ================================ The ``ResultProxy`` and related elements, including most common "row processing" functions such as unicode conversion, numerical/boolean conversions and date parsing, have been re-implemented as optional C extensions for the purposes of performance. This represents the beginning of SQLAlchemy's path to the "dark side" where we hope to continue improving performance by reimplementing critical sections in C. The extensions can be built by specifying ``--with-cextensions``, i.e. ``python setup.py --with- cextensions install``. The extensions have the most dramatic impact on result fetching using direct ``ResultProxy`` access, i.e. that which is returned by ``engine.execute()``, ``connection.execute()``, or ``session.execute()``. Within results returned by an ORM ``Query`` object, result fetching is not as high a percentage of overhead, so ORM performance improves more modestly, and mostly in the realm of fetching large result sets. The performance improvements highly depend on the dbapi in use and on the syntax used to access the columns of each row (eg ``row['name']`` is much faster than ``row.name``). The current extensions have no impact on the speed of inserts/updates/deletes, nor do they improve the latency of SQL execution, that is, an application that spends most of its time executing many statements with very small result sets will not see much improvement. Performance has been improved in 0.6 versus 0.5 regardless of the extensions. A quick overview of what connecting and fetching 50,000 rows looks like with SQLite, using mostly direct SQLite access, a ``ResultProxy``, and a simple mapped ORM object: :: sqlite select/native: 0.260s 0.6 / C extension sqlalchemy.sql select: 0.360s sqlalchemy.orm fetch: 2.500s 0.6 / Pure Python sqlalchemy.sql select: 0.600s sqlalchemy.orm fetch: 3.000s 0.5 / Pure Python sqlalchemy.sql select: 0.790s sqlalchemy.orm fetch: 4.030s Above, the ORM fetches the rows 33% faster than 0.5 due to in-python performance enhancements. With the C extensions we get another 20%. However, ``ResultProxy`` fetches improve by 67% with the C extension versus not. Other tests report as much as a 200% speed improvement for some scenarios, such as those where lots of string conversions are occurring. New Schema Capabilities ======================= The ``sqlalchemy.schema`` package has received some long- needed attention. The most visible change is the newly expanded DDL system. In SQLAlchemy, it was possible since version 0.5 to create custom DDL strings and associate them with tables or metadata objects: :: from sqlalchemy.schema import DDL DDL('CREATE TRIGGER users_trigger ...').execute_at('after-create', metadata) Now the full suite of DDL constructs are available under the same system, including those for CREATE TABLE, ADD CONSTRAINT, etc.: :: from sqlalchemy.schema import Constraint, AddConstraint AddContraint(CheckConstraint("value > 5")).execute_at('after-create', mytable) Additionally, all the DDL objects are now regular ``ClauseElement`` objects just like any other SQLAlchemy expression object: :: from sqlalchemy.schema import CreateTable create = CreateTable(mytable) # dumps the CREATE TABLE as a string print create # executes the CREATE TABLE statement engine.execute(create) and using the ``sqlalchemy.ext.compiler`` extension you can make your own: :: from sqlalchemy.schema import DDLElement from sqlalchemy.ext.compiler import compiles class AlterColumn(DDLElement): def __init__(self, column, cmd): self.column = column self.cmd = cmd @compiles(AlterColumn) def visit_alter_column(element, compiler, **kw): return "ALTER TABLE %s ALTER COLUMN %s %s ..." % ( element.column.table.name, element.column.name, element.cmd ) engine.execute(AlterColumn(table.c.mycolumn, "SET DEFAULT 'test'")) Deprecated/Removed Schema Elements ---------------------------------- The schema package has also been greatly streamlined. Many options and methods which were deprecated throughout 0.5 have been removed. Other little known accessors and methods have also been removed. * the "owner" keyword argument is removed from ``Table``. Use "schema" to represent any namespaces to be prepended to the table name. * deprecated ``MetaData.connect()`` and ``ThreadLocalMetaData.connect()`` have been removed - send the "bind" attribute to bind a metadata. * deprecated metadata.table_iterator() method removed (use sorted_tables) * the "metadata" argument is removed from ``DefaultGenerator`` and subclasses, but remains locally present on ``Sequence``, which is a standalone construct in DDL. * deprecated ``PassiveDefault`` - use ``DefaultClause``. * Removed public mutability from ``Index`` and ``Constraint`` objects: * ``ForeignKeyConstraint.append_element()`` * ``Index.append_column()`` * ``UniqueConstraint.append_column()`` * ``PrimaryKeyConstraint.add()`` * ``PrimaryKeyConstraint.remove()`` These should be constructed declaratively (i.e. in one construction). * Other removed things: * ``Table.key`` (no idea what this was for) * ``Column.bind`` (get via column.table.bind) * ``Column.metadata`` (get via column.table.metadata) * ``Column.sequence`` (use column.default) Other Behavioral Changes ------------------------ * ``UniqueConstraint``, ``Index``, ``PrimaryKeyConstraint`` all accept lists of column names or column objects as arguments. * The ``use_alter`` flag on ``ForeignKey`` is now a shortcut option for operations that can be hand-constructed using the ``DDL()`` event system. A side effect of this refactor is that ``ForeignKeyConstraint`` objects with ``use_alter=True`` will *not* be emitted on SQLite, which does not support ALTER for foreign keys. This has no effect on SQLite's behavior since SQLite does not actually honor FOREIGN KEY constraints. * ``Table.primary_key`` is not assignable - use ``table.append_constraint(PrimaryKeyConstraint(...))`` * A ``Column`` definition with a ``ForeignKey`` and no type, e.g. ``Column(name, ForeignKey(sometable.c.somecol))`` used to get the type of the referenced column. Now support for that automatic type inference is partial and may not work in all cases. Logging opened up ================= At the expense of a few extra method calls here and there, you can set log levels for INFO and DEBUG after an engine, pool, or mapper has been created, and logging will commence. The ``isEnabledFor(INFO)`` method is now called per-``Connection`` and ``isEnabledFor(DEBUG)`` per-``ResultProxy`` if already enabled on the parent connection. Pool logging sends to ``log.info()`` and ``log.debug()`` with no check - note that pool checkout/checkin is typically once per transaction. Reflection/Inspector API ======================== The reflection system, which allows reflection of table columns via ``Table('sometable', metadata, autoload=True)`` has been opened up into its own fine-grained API, which allows direct inspection of database elements such as tables, columns, constraints, indexes, and more. This API expresses return values as simple lists of strings, dictionaries, and ``TypeEngine`` objects. The internals of ``autoload=True`` now build upon this system such that the translation of raw database information into ``sqlalchemy.schema`` constructs is centralized and the contract of individual dialects greatly simplified, vastly reducing bugs and inconsistencies across different backends. To use an inspector: :: from sqlalchemy.engine.reflection import Inspector insp = Inspector.from_engine(my_engine) print insp.get_schema_names() the ``from_engine()`` method will in some cases provide a backend-specific inspector with additional capabilities, such as that of Postgresql which provides a ``get_table_oid()`` method: :: my_engine = create_engine('postgresql://...') pg_insp = Inspector.from_engine(my_engine) print pg_insp.get_table_oid('my_table') RETURNING Support ================= The ``insert()``, ``update()`` and ``delete()`` constructs now support a ``returning()`` method, which corresponds to the SQL RETURNING clause as supported by Postgresql, Oracle, MS-SQL, and Firebird. It is not supported for any other backend at this time. Given a list of column expressions in the same manner as that of a ``select()`` construct, the values of these columns will be returned as a regular result set: :: result = connection.execute( table.insert().values(data='some data').returning(table.c.id, table.c.timestamp) ) row = result.first() print "ID:", row['id'], "Timestamp:", row['timestamp'] The implementation of RETURNING across the four supported backends varies wildly, in the case of Oracle requiring an intricate usage of OUT parameters which are re-routed into a "mock" result set, and in the case of MS-SQL using an awkward SQL syntax. The usage of RETURNING is subject to limitations: * it does not work for any "executemany()" style of execution. This is a limitation of all supported DBAPIs. * Some backends, such as Oracle, only support RETURNING that returns a single row - this includes UPDATE and DELETE statements, meaning the update() or delete() construct must match only a single row, or an error is raised (by Oracle, not SQLAlchemy). RETURNING is also used automatically by SQLAlchemy, when available and when not otherwise specified by an explicit ``returning()`` call, to fetch the value of newly generated primary key values for single-row INSERT statements. This means there's no more "SELECT nextval(sequence)" pre- execution for insert statements where the primary key value is required. Truth be told, implicit RETURNING feature does incur more method overhead than the old "select nextval()" system, which used a quick and dirty cursor.execute() to get at the sequence value, and in the case of Oracle requires additional binding of out parameters. So if method/protocol overhead is proving to be more expensive than additional database round trips, the feature can be disabled by specifying ``implicit_returning=False`` to ``create_engine()``. Type System Changes =================== New Archicture -------------- The type system has been completely reworked behind the scenes to provide two goals: * Separate the handling of bind parameters and result row values, typically a DBAPI requirement, from the SQL specification of the type itself, which is a database requirement. This is consistent with the overall dialect refactor that separates database SQL behavior from DBAPI. * Establish a clear and consistent contract for generating DDL from a ``TypeEngine`` object and for constructing ``TypeEngine`` objects based on column reflection. Highlights of these changes include: * The construction of types within dialects has been totally overhauled. Dialects now define publically available types as UPPERCASE names exclusively, and internal implementation types using underscore identifiers (i.e. are private). The system by which types are expressed in SQL and DDL has been moved to the compiler system. This has the effect that there are much fewer type objects within most dialects. A detailed document on this architecture for dialect authors is in [source:/lib/sqlalc hemy/dialects/type_migration_guidelines.txt]. * Reflection of types now returns the exact UPPERCASE type within types.py, or the UPPERCASE type within the dialect itself if the type is not a standard SQL type. This means reflection now returns more accurate information about reflected types. * User defined types that subclass ``TypeEngine`` and wish to provide ``get_col_spec()`` should now subclass ``UserDefinedType``. * The ``result_processor()`` method on all type classes now accepts an additional argument ``coltype``. This is the DBAPI type object attached to cursor.description, and should be used when applicable to make better decisions on what kind of result-processing callable should be returned. Ideally result processor functions would never need to use ``isinstance()``, which is an expensive call at this level. Native Unicode Mode ------------------- As more DBAPIs support returning Python unicode objects directly, the base dialect now performs a check upon the first connection which establishes whether or not the DBAPI returns a Python unicode object for a basic select of a VARCHAR value. If so, the ``String`` type and all subclasses (i.e. ``Text``, ``Unicode``, etc.) will skip the "unicode" check/conversion step when result rows are received. This offers a dramatic performance increase for large result sets. The "unicode mode" currently is known to work with: * sqlite3 / pysqlite * psycopg2 - SQLA 0.6 now uses the "UNICODE" type extension by default on each psycopg2 connection object * pg8000 * cx_oracle (we use an output processor - nice feature !) Other types may choose to disable unicode processing as needed, such as the ``NVARCHAR`` type when used with MS-SQL. In particular, if porting an application based on a DBAPI that formerly returned non-unicode strings, the "native unicode" mode has a plainly different default behavior - columns that are declared as ``String`` or ``VARCHAR`` now return unicode by default whereas they would return strings before. This can break code which expects non-unicode strings. The psycopg2 "native unicode" mode can be disabled by passing ``use_native_unicode=False`` to ``create_engine()``. A more general solution for string columns that explicitly do not want a unicode object is to use a ``TypeDecorator`` that converts unicode back to utf-8, or whatever is desired: :: class UTF8Encoded(TypeDecorator): """Unicode type which coerces to utf-8.""" impl = sa.VARCHAR def process_result_value(self, value, dialect): if isinstance(value, unicode): value = value.encode('utf-8') return value Note that the ``assert_unicode`` flag is now deprecated. SQLAlchemy allows the DBAPI and backend database in use to handle Unicode parameters when available, and does not add operational overhead by checking the incoming type; modern systems like sqlite and Postgresql will raise an encoding error on their end if invalid data is passed. In those cases where SQLAlchemy does need to coerce a bind parameter from Python Unicode to an encoded string, or when the Unicode type is used explicitly, a warning is raised if the object is a bytestring. This warning can be suppressed or converted to an exception using the Python warnings filter documented at: http://docs.python.org/library/warnings.html Generic Enum Type ----------------- We now have an ``Enum`` in the ``types`` module. This is a string type that is given a collection of "labels" which constrain the possible values given to those labels. By default, this type generates a ``VARCHAR`` using the size of the largest label, and applies a CHECK constraint to the table within the CREATE TABLE statement. When using MySQL, the type by default uses MySQL's ENUM type, and when using Postgresql the type will generate a user defined type using ``CREATE TYPE AS ENUM``. In order to create the type using Postgresql, the ``name`` parameter must be specified to the constructor. The type also accepts a ``native_enum=False`` option which will issue the VARCHAR/CHECK strategy for all databases. Note that Postgresql ENUM types currently don't work with pg8000 or zxjdbc. Reflection Returns Dialect-Specific Types ----------------------------------------- Reflection now returns the most specific type possible from the database. That is, if you create a table using ``String``, then reflect it back, the reflected column will likely be ``VARCHAR``. For dialects that support a more specific form of the type, that's what you'll get. So a ``Text`` type would come back as ``oracle.CLOB`` on Oracle, a ``LargeBinary`` might be an ``mysql.MEDIUMBLOB`` etc. The obvious advantage here is that reflection preserves as much information possible from what the database had to say. Some applications that deal heavily in table metadata may wish to compare types across reflected tables and/or non- reflected tables. There's a semi-private accessor available on ``TypeEngine`` called ``_type_affinity`` and an associated comparison helper ``_compare_type_affinity``. This accessor returns the "generic" ``types`` class which the type corresponds to: :: >>> String(50)._compare_type_affinity(postgresql.VARCHAR(50)) True >>> Integer()._compare_type_affinity(mysql.REAL) False Miscellaneous API Changes ------------------------- The usual "generic" types are still the general system in use, i.e. ``String``, ``Float``, ``DateTime``. There's a few changes there: * Types no longer make any guesses as to default parameters. In particular, ``Numeric``, ``Float``, as well as subclasses NUMERIC, FLOAT, DECIMAL don't generate any length or scale unless specified. This also continues to include the controversial ``String`` and ``VARCHAR`` types (although MySQL dialect will pre-emptively raise when asked to render VARCHAR with no length). No defaults are assumed, and if they are used in a CREATE TABLE statement, an error will be raised if the underlying database does not allow non-lengthed versions of these types. * the ``Binary`` type has been renamed to ``LargeBinary``, for BLOB/BYTEA/similar types. For ``BINARY`` and ``VARBINARY``, those are present directly as ``types.BINARY``, ``types.VARBINARY``, as well as in the MySQL and MS-SQL dialects. * ``PickleType`` now uses == for comparison of values when mutable=True, unless the "comparator" argument with a comparison function is specified to the type. If you are pickling a custom object you should implement an ``__eq__()`` method so that value-based comparisons are accurate. * The default "precision" and "scale" arguments of Numeric and Float have been removed and now default to None. NUMERIC and FLOAT will be rendered with no numeric arguments by default unless these values are provided. * DATE, TIME and DATETIME types on SQLite can now take optional "storage_format" and "regexp" argument. "storage_format" can be used to store those types using a custom string format. "regexp" allows to use a custom regular expression to match string values from the database. * ``__legacy_microseconds__`` on SQLite ``Time`` and ``DateTime`` types is not supported anymore. You should use the new "storage_format" argument instead. * ``DateTime`` types on SQLite now use by a default a stricter regular expression to match strings from the database. Use the new "regexp" argument if you are using data stored in a legacy format. ORM Changes =========== Upgrading an ORM application from 0.5 to 0.6 should require little to no changes, as the ORM's behavior remains almost identical. There are some default argument and name changes, and some loading behaviors have been improved. New Unit of Work ---------------- The internals for the unit of work, primarily ``topological.py`` and ``unitofwork.py``, have been completely rewritten and are vastly simplified. This should have no impact on usage, as all existing behavior during flush has been maintained exactly (or at least, as far as it is exercised by our testsuite and the handful of production environments which have tested it heavily). The performance of flush() now uses 20-30% fewer method calls and should also use less memory. The intent and flow of the source code should now be reasonably easy to follow, and the architecture of the flush is fairly open-ended at this point, creating room for potential new areas of sophistication. The flush process no longer has any reliance on recursion so flush plans of arbitrary size and complexity can be flushed. Additionally, the mapper's "save" process, which issues INSERT and UPDATE statements, now caches the "compiled" form of the two statements so that callcounts are further dramatically reduced with very large flushes. Any changes in behavior observed with flush versus earlier versions of 0.6 or 0.5 should be reported to us ASAP - we'll make sure no functionality is lost. Changes to ``query.update()`` and ``query.delete()`` ---------------------------------------------------- * the 'expire' option on query.update() has been renamed to 'fetch', thus matching that of query.delete() * ``query.update()`` and ``query.delete()`` both default to 'evaluate' for the synchronize strategy. * the 'synchronize' strategy for update() and delete() raises an error on failure. There is no implicit fallback onto "fetch". Failure of evaluation is based on the structure of criteria, so success/failure is deterministic based on code structure. ``relation()`` is officially named ``relationship()`` ----------------------------------------------------- This to solve the long running issue that "relation" means a "table or derived table" in relational algebra terms. The ``relation()`` name, which is less typing, will hang around for the foreseeable future so this change should be entirely painless. Subquery eager loading ---------------------- A new kind of eager loading is added called "subquery" loading. This is a load that emits a second SQL query immediately after the first which loads full collections for all the parents in the first query, joining upwards to the parent using INNER JOIN. Subquery loading is used simlarly to the current joined-eager loading, using the ```subqueryload()```` and ````subqueryload_all()```` options as well as the ````lazy='subquery'```` setting on ````relationship()```. The subquery load is usually much more efficient for loading many larger collections as it uses INNER JOIN unconditionally and also doesn't re-load parent rows. ```eagerload()````, ````eagerload_all()```` is now ````joinedload()````, ````joinedload_all()``` ------------------------------------------------------------------------------------------------ To make room for the new subquery load feature, the existing ```eagerload()````/````eagerload_all()```` options are now superceded by ````joinedload()```` and ````joinedload_all()````. The old names will hang around for the foreseeable future just like ````relation()```. ```lazy=False|None|True|'dynamic'```` now accepts ````lazy='noload'|'joined'|'subquery'|'select'|'dynamic'``` ------------------------------------------------------------------------------------------------------------- Continuing on the theme of loader strategies opened up, the standard keywords for the ```lazy```` option on ````relationship()```` are now ````select```` for lazy loading (via a SELECT issued on attribute access), ````joined```` for joined-eager loading, ````subquery```` for subquery-eager loading, ````noload```` for no loading should occur, and ````dynamic```` for a "dynamic" relationship. The old ````True````, ````False````, ````None``` arguments are still accepted with the identical behavior as before. innerjoin=True on relation, joinedload -------------------------------------- Joined-eagerly loaded scalars and collections can now be instructed to use INNER JOIN instead of OUTER JOIN. On Postgresql this is observed to provide a 300-600% speedup on some queries. Set this flag for any many-to-one which is on a NOT NULLable foreign key, and similarly for any collection where related items are guaranteed to exist. At mapper level: :: mapper(Child, child) mapper(Parent, parent, properties={ 'child':relationship(Child, lazy='joined', innerjoin=True) }) At query time level: :: session.query(Parent).options(joinedload(Parent.child, innerjoin=True)).all() The ``innerjoin=True`` flag at the ``relationship()`` level will also take effect for any ``joinedload()`` option which does not override the value. Many-to-one Enhancements ------------------------ * many-to-one relations now fire off a lazyload in fewer cases, including in most cases will not fetch the "old" value when a new one is replaced. * many-to-one relation to a joined-table subclass now uses get() for a simple load (known as the "use_get" condition), i.e. ``Related``->``Sub(Base)``, without the need to redefine the primaryjoin condition in terms of the base table. [ticket:1186] * specifying a foreign key with a declarative column, i.e. ``ForeignKey(MyRelatedClass.id)`` doesn't break the "use_get" condition from taking place [ticket:1492] * relationship(), joinedload(), and joinedload_all() now feature an option called "innerjoin". Specify ``True`` or ``False`` to control whether an eager join is constructed as an INNER or OUTER join. Default is ``False`` as always. The mapper options will override whichever setting is specified on relationship(). Should generally be set for many-to-one, not nullable foreign key relations to allow improved join performance. [ticket:1544] * the behavior of joined eager loading such that the main query is wrapped in a subquery when LIMIT/OFFSET are present now makes an exception for the case when all eager loads are many-to-one joins. In those cases, the eager joins are against the parent table directly along with the limit/offset without the extra overhead of a subquery, since a many-to-one join does not add rows to the result. For example, in 0.5 this query: :: session.query(Address).options(eagerload(Address.user)).limit(10) would produce SQL like: :: SELECT * FROM (SELECT * FROM addresses LIMIT 10) AS anon_1 LEFT OUTER JOIN users AS users_1 ON users_1.id = anon_1.addresses_user_id This because the presence of any eager loaders suggests that some or all of them may relate to multi-row collections, which would necessitate wrapping any kind of rowcount-sensitive modifiers like LIMIT inside of a subquery. In 0.6, that logic is more sensitive and can detect if all eager loaders represent many-to-ones, in which case the eager joins don't affect the rowcount: :: SELECT * FROM addresses LEFT OUTER JOIN users AS users_1 ON users_1.id = addresses.user_id LIMIT 10 Mutable Primary Keys with Joined Table Inheritance -------------------------------------------------- A joined table inheritance config where the child table has a PK that foreign keys to the parent PK can now be updated on a CASCADE-capable database like Postgresql. ``mapper()`` now has an option ``passive_updates=True`` which indicates this foreign key is updated automatically. If on a non-cascading database like SQLite or MySQL/MyISAM, set this flag to ``False``. A future feature enhancement will try to get this flag to be auto-configuring based on dialect/table style in use. Beaker Caching -------------- A promising new example of Beaker integration is in ``examples/beaker_caching``. This is a straightforward recipe which applies a Beaker cache within the result- generation engine of ``Query``. Cache parameters are provided via ``query.options()``, and allows full control over the contents of the cache. SQLAlchemy 0.6 includes improvements to the ``Session.merge()`` method to support this and similar recipes, as well as to provide significantly improved performance in most scenarios. Other Changes ------------- * the "row tuple" object returned by ``Query`` when multiple column/entities are selected is now picklable as well as higher performing. * ``query.join()`` has been reworked to provide more consistent behavior and more flexibility (includes [ticket:1537]) * ``query.select_from()`` accepts multiple clauses to produce multiple comma separated entries within the FROM clause. Useful when selecting from multiple-homed join() clauses. * the "dont_load=True" flag on ``Session.merge()`` is deprecated and is now "load=False". * added "make_transient()" helper function which transforms a persistent/ detached instance into a transient one (i.e. deletes the instance_key and removes from any session.) [ticket:1052] * the allow_null_pks flag on mapper() is deprecated and has been renamed to allow_partial_pks. It is turned "on" by default. This means that a row which has a non-null value for any of its primary key columns will be considered an identity. The need for this scenario typically only occurs when mapping to an outer join. When set to False, a PK that has NULLs in it will not be considered a primary key - in particular this means a result row will come back as None (or not be filled into a collection), and new in 0.6 also indicates that session.merge() won't issue a round trip to the database for such a PK value. [ticket:1680] * the mechanics of "backref" have been fully merged into the finer grained "back_populates" system, and take place entirely within the ``_generate_backref()`` method of ``RelationProperty``. This makes the initialization procedure of ``RelationProperty`` simpler and allows easier propagation of settings (such as from subclasses of ``RelationProperty``) into the reverse reference. The internal ``BackRef()`` is gone and ``backref()`` returns a plain tuple that is understood by ``RelationProperty``. * the keys attribute of ``ResultProxy`` is now a method, so references to it (``result.keys``) must be changed to method invocations (``result.keys()``) * ``ResultProxy.last_inserted_ids`` is now deprecated, use ``ResultProxy.inserted_primary_key`` instead. Deprecated/Removed ORM Elements ------------------------------- Most elements that were deprecated throughout 0.5 and raised deprecation warnings have been removed (with a few exceptions). All elements that were marked "pending deprecation" are now deprecated and will raise a warning upon use. * 'transactional' flag on sessionmaker() and others is removed. Use 'autocommit=True' to indicate 'transactional=False'. * 'polymorphic_fetch' argument on mapper() is removed. Loading can be controlled using the 'with_polymorphic' option. * 'select_table' argument on mapper() is removed. Use 'with_polymorphic=("*", )' for this functionality. * 'proxy' argument on synonym() is removed. This flag did nothing throughout 0.5, as the "proxy generation" behavior is now automatic. * Passing a single list of elements to joinedload(), joinedload_all(), contains_eager(), lazyload(), defer(), and undefer() instead of multiple positional \*args is deprecated. * Passing a single list of elements to query.order_by(), query.group_by(), query.join(), or query.outerjoin() instead of multiple positional \*args is deprecated. * ``query.iterate_instances()`` is removed. Use ``query.instances()``. * ``Query.query_from_parent()`` is removed. Use the sqlalchemy.orm.with_parent() function to produce a "parent" clause, or alternatively ``query.with_parent()``. * ``query._from_self()`` is removed, use ``query.from_self()`` instead. * the "comparator" argument to composite() is removed. Use "comparator_factory". * ``RelationProperty._get_join()`` is removed. * the 'echo_uow' flag on Session is removed. Use logging on the "sqlalchemy.orm.unitofwork" name. * ``session.clear()`` is removed. use ``session.expunge_all()``. * ``session.save()``, ``session.update()``, ``session.save_or_update()`` are removed. Use ``session.add()`` and ``session.add_all()``. * the "objects" flag on session.flush() remains deprecated. * the "dont_load=True" flag on session.merge() is deprecated in favor of "load=False". * ``ScopedSession.mapper`` remains deprecated. See the usage recipe at http://www.sqlalchemy.org/trac/wiki/Usag eRecipes/SessionAwareMapper * passing an ``InstanceState`` (internal SQLAlchemy state object) to ``attributes.init_collection()`` or ``attributes.get_history()`` is deprecated. These functions are public API and normally expect a regular mapped object instance. * the 'engine' parameter to ``declarative_base()`` is removed. Use the 'bind' keyword argument. Extensions ========== SQLSoup ------- SQLSoup has been modernized and updated to reflect common 0.5/0.6 capabilities, including well defined session integration. Please read the new docs at [http://www.sqlalc hemy.org/docs/06/reference/ext/sqlsoup.html]. Declarative ----------- The ``DeclarativeMeta`` (default metaclass for ``declarative_base``) previously allowed subclasses to modify ``dict_`` to add class attributes (e.g. columns). This no longer works, the ``DeclarativeMeta`` constructor now ignores ``dict_``. Instead, the class attributes should be assigned directly, e.g. ``cls.id=Column(...)``, or the `MixIn class `_ approach should be used instead of the metaclass approach. SQLAlchemy-0.8.4/doc/_sources/changelog/migration_07.txt0000644000076500000240000014100112251147171023605 0ustar classicstaff00000000000000============================== What's New in SQLAlchemy 0.7? ============================== .. admonition:: About this Document This document describes changes between SQLAlchemy version 0.6, last released May 5, 2012, and SQLAlchemy version 0.7, undergoing maintenance releases as of October, 2012. Document date: July 27, 2011 Introduction ============ This guide introduces what's new in SQLAlchemy version 0.7, and also documents changes which affect users migrating their applications from the 0.6 series of SQLAlchemy to 0.7. To as great a degree as possible, changes are made in such a way as to not break compatibility with applications built for 0.6. The changes that are necessarily not backwards compatible are very few, and all but one, the change to mutable attribute defaults, should affect an exceedingly small portion of applications - many of the changes regard non-public APIs and undocumented hacks some users may have been attempting to use. A second, even smaller class of non-backwards-compatible changes is also documented. This class of change regards those features and behaviors that have been deprecated at least since version 0.5 and have been raising warnings since their deprecation. These changes would only affect applications that are still using 0.4- or early 0.5-style APIs. As the project matures, we have fewer and fewer of these kinds of changes with 0.x level releases, which is a product of our API having ever fewer features that are less than ideal for the use cases they were meant to solve. An array of existing functionalities have been superseded in SQLAlchemy 0.7. There's not much difference between the terms "superseded" and "deprecated", except that the former has a much weaker suggestion of the old feature would ever be removed. In 0.7, features like ``synonym`` and ``comparable_property``, as well as all the ``Extension`` and other event classes, have been superseded. But these "superseded" features have been re-implemented such that their implementations live mostly outside of core ORM code, so their continued "hanging around" doesn't impact SQLAlchemy's ability to further streamline and refine its internals, and we expect them to remain within the API for the foreseeable future. New Features ============ New Event System ---------------- SQLAlchemy started early with the ``MapperExtension`` class, which provided hooks into the persistence cycle of mappers. As SQLAlchemy quickly became more componentized, pushing mappers into a more focused configurational role, many more "extension", "listener", and "proxy" classes popped up to solve various activity-interception use cases in an ad-hoc fashion. Part of this was driven by the divergence of activities; ``ConnectionProxy`` objects wanted to provide a system of rewriting statements and parameters; ``AttributeExtension`` provided a system of replacing incoming values, and ``DDL`` objects had events that could be switched off of dialect-sensitive callables. 0.7 re-implements virtually all of these plugin points with a new, unified approach, which retains all the functionalities of the different systems, provides more flexibility and less boilerplate, performs better, and eliminates the need to learn radically different APIs for each event subsystem. The pre-existing classes ``MapperExtension``, ``SessionExtension``, ``AttributeExtension``, ``ConnectionProxy``, ``PoolListener`` as well as the ``DDLElement.execute_at`` method are deprecated and now implemented in terms of the new system - these APIs remain fully functional and are expected to remain in place for the foreseeable future. The new approach uses named events and user-defined callables to associate activities with events. The API's look and feel was driven by such diverse sources as JQuery, Blinker, and Hibernate, and was also modified further on several occasions during conferences with dozens of users on Twitter, which appears to have a much higher response rate than the mailing list for such questions. It also features an open-ended system of target specification that allows events to be associated with API classes, such as for all ``Session`` or ``Engine`` objects, with specific instances of API classes, such as for a specific ``Pool`` or ``Mapper``, as well as for related objects like a user- defined class that's mapped, or something as specific as a certain attribute on instances of a particular subclass of a mapped parent class. Individual listener subsystems can apply wrappers to incoming user- defined listener functions which modify how they are called - an mapper event can receive either the instance of the object being operated upon, or its underlying ``InstanceState`` object. An attribute event can opt whether or not to have the responsibility of returning a new value. Several systems now build upon the new event API, including the new "mutable attributes" API as well as composite attributes. The greater emphasis on events has also led to the introduction of a handful of new events, including attribute expiration and refresh operations, pickle loads/dumps operations, completed mapper construction operations. .. seealso:: :ref:`event_toplevel` :ticket:`1902` Hybrid Attributes, implements/supersedes synonym(), comparable_property() ------------------------------------------------------------------------- The "derived attributes" example has now been turned into an official extension. The typical use case for ``synonym()`` is to provide descriptor access to a mapped column; the use case for ``comparable_property()`` is to be able to return a ``PropComparator`` from any descriptor. In practice, the approach of "derived" is easier to use, more extensible, is implemented in a few dozen lines of pure Python with almost no imports, and doesn't require the ORM core to even be aware of it. The feature is now known as the "Hybrid Attributes" extension. ``synonym()`` and ``comparable_property()`` are still part of the ORM, though their implementations have been moved outwards, building on an approach that is similar to that of the hybrid extension, so that the core ORM mapper/query/property modules aren't really aware of them otherwise. .. seealso:: :ref:`hybrids_toplevel` :ticket:`1903` Speed Enhancements ------------------ As is customary with all major SQLA releases, a wide pass through the internals to reduce overhead and callcounts has been made which further reduces the work needed in common scenarios. Highlights of this release include: * The flush process will now bundle INSERT statements into batches fed to ``cursor.executemany()``, for rows where the primary key is already present. In particular this usually applies to the "child" table on a joined table inheritance configuration, meaning the number of calls to ``cursor.execute`` for a large bulk insert of joined- table objects can be cut in half, allowing native DBAPI optimizations to take place for those statements passed to ``cursor.executemany()`` (such as re-using a prepared statement). * The codepath invoked when accessing a many-to-one reference to a related object that's already loaded has been greatly simplified. The identity map is checked directly without the need to generate a new ``Query`` object first, which is expensive in the context of thousands of in-memory many-to-ones being accessed. The usage of constructed-per-call "loader" objects is also no longer used for the majority of lazy attribute loads. * The rewrite of composites allows a shorter codepath when mapper internals access mapped attributes within a flush. * New inlined attribute access functions replace the previous usage of "history" when the "save-update" and other cascade operations need to cascade among the full scope of datamembers associated with an attribute. This reduces the overhead of generating a new ``History`` object for this speed-critical operation. * The internals of the ``ExecutionContext``, the object corresponding to a statement execution, have been inlined and simplified. * The ``bind_processor()`` and ``result_processor()`` callables generated by types for each statement execution are now cached (carefully, so as to avoid memory leaks for ad-hoc types and dialects) for the lifespan of that type, further reducing per-statement call overhead. * The collection of "bind processors" for a particular ``Compiled`` instance of a statement is also cached on the ``Compiled`` object, taking further advantage of the "compiled cache" used by the flush process to re-use the same compiled form of INSERT, UPDATE, DELETE statements. A demonstration of callcount reduction including a sample benchmark script is at http://techspot.zzzeek.org/2010/12/12/a-tale-of-three- profiles/ Composites Rewritten -------------------- The "composite" feature has been rewritten, like ``synonym()`` and ``comparable_property()``, to use a lighter weight implementation based on descriptors and events, rather than building into the ORM internals. This allowed the removal of some latency from the mapper/unit of work internals, and simplifies the workings of composite. The composite attribute now no longer conceals the underlying columns it builds upon, which now remain as regular attributes. Composites can also act as a proxy for ``relationship()`` as well as ``Column()`` attributes. The major backwards-incompatible change of composites is that they no longer use the ``mutable=True`` system to detect in-place mutations. Please use the `Mutation Tracking `_ extension to establish in-place change events to existing composite usage. .. seealso:: :ref:`mapper_composite` :ref:`mutable_toplevel` :ticket:`2008` :ticket:`2024` More succinct form of query.join(target, onclause) -------------------------------------------------- The default method of issuing ``query.join()`` to a target with an explicit onclause is now: :: query.join(SomeClass, SomeClass.id==ParentClass.some_id) In 0.6, this usage was considered to be an error, because ``join()`` accepts multiple arguments corresponding to multiple JOIN clauses - the two-argument form needed to be in a tuple to disambiguate between single-argument and two- argument join targets. In the middle of 0.6 we added detection and an error message for this specific calling style, since it was so common. In 0.7, since we are detecting the exact pattern anyway, and since having to type out a tuple for no reason is extremely annoying, the non- tuple method now becomes the "normal" way to do it. The "multiple JOIN" use case is exceedingly rare compared to the single join case, and multiple joins these days are more clearly represented by multiple calls to ``join()``. The tuple form will remain for backwards compatibility. Note that all the other forms of ``query.join()`` remain unchanged: :: query.join(MyClass.somerelation) query.join("somerelation") query.join(MyTarget) # ... etc `Querying with Joins `_ :ticket:`1923` .. _07_migration_mutation_extension: Mutation event extension, supersedes "mutable=True" --------------------------------------------------- A new extension, :ref:`mutable_toplevel`, provides a mechanism by which user-defined datatypes can provide change events back to the owning parent or parents. The extension includes an approach for scalar database values, such as those managed by :class:`.PickleType`, ``postgresql.ARRAY``, or other custom ``MutableType`` classes, as well as an approach for ORM "composites", those configured using :func:`~.sqlalchemy.orm.composite`. .. seealso:: :ref:`mutable_toplevel` NULLS FIRST / NULLS LAST operators ---------------------------------- These are implemented as an extension to the ``asc()`` and ``desc()`` operators, called ``nullsfirst()`` and ``nullslast()``. .. seealso:: :func:`.nullsfirst` :func:`.nullslast` :ticket:`723` select.distinct(), query.distinct() accepts \*args for Postgresql DISTINCT ON ----------------------------------------------------------------------------- This was already available by passing a list of expressions to the ``distinct`` keyword argument of ``select()``, the ``distinct()`` method of ``select()`` and ``Query`` now accept positional arguments which are rendered as DISTINCT ON when a Postgresql backend is used. `distinct() `_ `Query.distinct() `_ :ticket:`1069` ``Index()`` can be placed inline inside of ``Table``, ``__table_args__`` ------------------------------------------------------------------------ The Index() construct can be created inline with a Table definition, using strings as column names, as an alternative to the creation of the index outside of the Table. That is: :: Table('mytable', metadata, Column('id',Integer, primary_key=True), Column('name', String(50), nullable=False), Index('idx_name', 'name') ) The primary rationale here is for the benefit of declarative ``__table_args__``, particularly when used with mixins: :: class HasNameMixin(object): name = Column('name', String(50), nullable=False) @declared_attr def __table_args__(cls): return (Index('name'), {}) class User(HasNameMixin, Base): __tablename__ = 'user' id = Column('id', Integer, primary_key=True) `Indexes `_ Window Function SQL Construct ----------------------------- A "window function" provides to a statement information about the result set as it's produced. This allows criteria against various things like "row number", "rank" and so forth. They are known to be supported at least by Postgresql, SQL Server and Oracle, possibly others. The best introduction to window functions is on Postgresql's site, where window functions have been supported since version 8.4: http://www.postgresql.org/docs/9.0/static/tutorial- window.html SQLAlchemy provides a simple construct typically invoked via an existing function clause, using the ``over()`` method, which accepts ``order_by`` and ``partition_by`` keyword arguments. Below we replicate the first example in PG's tutorial: :: from sqlalchemy.sql import table, column, select, func empsalary = table('empsalary', column('depname'), column('empno'), column('salary')) s = select([ empsalary, func.avg(empsalary.c.salary). over(partition_by=empsalary.c.depname). label('avg') ]) print s SQL: :: SELECT empsalary.depname, empsalary.empno, empsalary.salary, avg(empsalary.salary) OVER (PARTITION BY empsalary.depname) AS avg FROM empsalary `sqlalchemy.sql.expression.over `_ :ticket:`1844` execution_options() on Connection accepts "isolation_level" argument -------------------------------------------------------------------- This sets the transaction isolation level for a single ``Connection``, until that ``Connection`` is closed and its underlying DBAPI resource returned to the connection pool, upon which the isolation level is reset back to the default. The default isolation level is set using the ``isolation_level`` argument to ``create_engine()``. Transaction isolation support is currently only supported by the Postgresql and SQLite backends. `execution_options() `_ :ticket:`2001` ``TypeDecorator`` works with integer primary key columns -------------------------------------------------------- A ``TypeDecorator`` which extends the behavior of ``Integer`` can be used with a primary key column. The "autoincrement" feature of ``Column`` will now recognize that the underlying database column is still an integer so that lastrowid mechanisms continue to function. The ``TypeDecorator`` itself will have its result value processor applied to newly generated primary keys, including those received by the DBAPI ``cursor.lastrowid`` accessor. :ticket:`2005` :ticket:`2006` ``TypeDecorator`` is present in the "sqlalchemy" import space ------------------------------------------------------------- No longer need to import this from ``sqlalchemy.types``, it's now mirrored in ``sqlalchemy``. New Dialects ------------ Dialects have been added: * a MySQLdb driver for the Drizzle database: `Drizzle `_ * support for the pymysql DBAPI: `pymsql Notes `_ * psycopg2 now works with Python 3 Behavioral Changes (Backwards Compatible) ========================================= C Extensions Build by Default ----------------------------- This is as of 0.7b4. The exts will build if cPython 2.xx is detected. If the build fails, such as on a windows install, that condition is caught and the non-C install proceeds. The C exts won't build if Python 3 or Pypy is used. Query.count() simplified, should work virtually always ------------------------------------------------------ The very old guesswork which occurred within ``Query.count()`` has been modernized to use ``.from_self()``. That is, ``query.count()`` is now equivalent to: :: query.from_self(func.count(literal_column('1'))).scalar() Previously, internal logic attempted to rewrite the columns clause of the query itself, and upon detection of a "subquery" condition, such as a column-based query that might have aggregates in it, or a query with DISTINCT, would go through a convoluted process of rewriting the columns clause. This logic failed in complex conditions, particularly those involving joined table inheritance, and was long obsolete by the more comprehensive ``.from_self()`` call. The SQL emitted by ``query.count()`` is now always of the form: :: SELECT count(1) AS count_1 FROM ( SELECT user.id AS user_id, user.name AS user_name from user ) AS anon_1 that is, the original query is preserved entirely inside of a subquery, with no more guessing as to how count should be applied. :ticket:`2093` To emit a non-subquery form of count() ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ MySQL users have already reported that the MyISAM engine not surprisingly falls over completely with this simple change. Note that for a simple ``count()`` that optimizes for DBs that can't handle simple subqueries, ``func.count()`` should be used: :: from sqlalchemy import func session.query(func.count(MyClass.id)).scalar() or for ``count(*)``: :: from sqlalchemy import func, literal_column session.query(func.count(literal_column('*'))).select_from(MyClass).scalar() LIMIT/OFFSET clauses now use bind parameters -------------------------------------------- The LIMIT and OFFSET clauses, or their backend equivalents (i.e. TOP, ROW NUMBER OVER, etc.), use bind parameters for the actual values, for all backends which support it (most except for Sybase). This allows better query optimizer performance as the textual string for multiple statements with differing LIMIT/OFFSET are now identical. :ticket:`805` Logging enhancements -------------------- Vinay Sajip has provided a patch to our logging system such that the "hex string" embedded in logging statements for engines and pools is no longer needed to allow the ``echo`` flag to work correctly. A new system that uses filtered logging objects allows us to maintain our current behavior of ``echo`` being local to individual engines without the need for additional identifying strings local to those engines. :ticket:`1926` Simplified polymorphic_on assignment ------------------------------------ The population of the ``polymorphic_on`` column-mapped attribute, when used in an inheritance scenario, now occurs when the object is constructed, i.e. its ``__init__`` method is called, using the init event. The attribute then behaves the same as any other column-mapped attribute. Previously, special logic would fire off during flush to populate this column, which prevented any user code from modifying its behavior. The new approach improves upon this in three ways: 1. the polymorphic identity is now present on the object as soon as its constructed; 2. the polymorphic identity can be changed by user code without any difference in behavior from any other column-mapped attribute; 3. the internals of the mapper during flush are simplified and no longer need to make special checks for this column. :ticket:`1895` contains_eager() chains across multiple paths (i.e. "all()") ------------------------------------------------------------ The ```contains_eager()```` modifier now will chain itself for a longer path without the need to emit individual ````contains_eager()``` calls. Instead of: :: session.query(A).options(contains_eager(A.b), contains_eager(A.b, B.c)) you can say: :: session.query(A).options(contains_eager(A.b, B.c)) :ticket:`2032` Flushing of orphans that have no parent is allowed -------------------------------------------------- We've had a long standing behavior that checks for a so- called "orphan" during flush, that is, an object which is associated with a ``relationship()`` that specifies "delete- orphan" cascade, has been newly added to the session for an INSERT, and no parent relationship has been established. This check was added years ago to accommodate some test cases which tested the orphan behavior for consistency. In modern SQLA, this check is no longer needed on the Python side. The equivalent behavior of the "orphan check" is accomplished by making the foreign key reference to the object's parent row NOT NULL, where the database does its job of establishing data consistency in the same way SQLA allows most other operations to do. If the object's parent foreign key is nullable, then the row can be inserted. The "orphan" behavior runs when the object was persisted with a particular parent, and is then disassociated with that parent, leading to a DELETE statement emitted for it. :ticket:`1912` Warnings generated when collection members, scalar referents not part of the flush ---------------------------------------------------------------------------------- Warnings are now emitted when related objects referenced via a loaded ``relationship()`` on a parent object marked as "dirty" are not present in the current ``Session``. The ``save-update`` cascade takes effect when objects are added to the ``Session``, or when objects are first associated with a parent, so that an object and everything related to it are usually all present in the same ``Session``. However, if ``save-update`` cascade is disabled for a particular ``relationship()``, then this behavior does not occur, and the flush process does not try to correct for it, instead staying consistent to the configured cascade behavior. Previously, when such objects were detected during the flush, they were silently skipped. The new behavior is that a warning is emitted, for the purposes of alerting to a situation that more often than not is the source of unexpected behavior. :ticket:`1973` Setup no longer installs a Nose plugin -------------------------------------- Since we moved to nose we've used a plugin that installs via setuptools, so that the ``nosetests`` script would automatically run SQLA's plugin code, necessary for our tests to have a full environment. In the middle of 0.6, we realized that the import pattern here meant that Nose's "coverage" plugin would break, since "coverage" requires that it be started before any modules to be covered are imported; so in the middle of 0.6 we made the situation worse by adding a separate ``sqlalchemy-nose`` package to the build to overcome this. In 0.7 we've done away with trying to get ``nosetests`` to work automatically, since the SQLAlchemy module would produce a large number of nose configuration options for all usages of ``nosetests``, not just the SQLAlchemy unit tests themselves, and the additional ``sqlalchemy-nose`` install was an even worse idea, producing an extra package in Python environments. The ``sqla_nose.py`` script in 0.7 is now the only way to run the tests with nose. :ticket:`1949` Non-``Table``-derived constructs can be mapped ---------------------------------------------- A construct that isn't against any ``Table`` at all, like a function, can be mapped. :: from sqlalchemy import select, func from sqlalchemy.orm import mapper class Subset(object): pass selectable = select(["x", "y", "z"]).select_from(func.some_db_function()).alias() mapper(Subset, selectable, primary_key=[selectable.c.x]) :ticket:`1876` aliased() accepts ``FromClause`` elements ----------------------------------------- This is a convenience helper such that in the case a plain ``FromClause``, such as a ``select``, ``Table`` or ``join`` is passed to the ``orm.aliased()`` construct, it passes through to the ``.alias()`` method of that from construct rather than constructing an ORM level ``AliasedClass``. :ticket:`2018` Session.connection(), Session.execute() accept 'bind' ----------------------------------------------------- This is to allow execute/connection operations to participate in the open transaction of an engine explicitly. It also allows custom subclasses of ``Session`` that implement their own ``get_bind()`` method and arguments to use those custom arguments with both the ``execute()`` and ``connection()`` methods equally. `Session.connection `_ `Session.execute `_ :ticket:`1996` Standalone bind parameters in columns clause auto-labeled. ---------------------------------------------------------- Bind parameters present in the "columns clause" of a select are now auto-labeled like other "anonymous" clauses, which among other things allows their "type" to be meaningful when the row is fetched, as in result row processors. SQLite - relative file paths are normalized through os.path.abspath() --------------------------------------------------------------------- This so that a script that changes the current directory will continue to target the same location as subsequent SQLite connections are established. :ticket:`2036` MS-SQL - ``String``/``Unicode``/``VARCHAR``/``NVARCHAR``/``VARBINARY`` emit "max" for no length ----------------------------------------------------------------------------------------------- On the MS-SQL backend, the String/Unicode types, and their counterparts VARCHAR/ NVARCHAR, as well as VARBINARY (:ticket:`1833`) emit "max" as the length when no length is specified. This makes it more compatible with Postgresql's VARCHAR type which is similarly unbounded when no length specified. SQL Server defaults the length on these types to '1' when no length is specified. Behavioral Changes (Backwards Incompatible) =========================================== Note again, aside from the default mutability change, most of these changes are \*extremely minor* and will not affect most users. ``PickleType`` and ARRAY mutability turned off by default --------------------------------------------------------- This change refers to the default behavior of the ORM when mapping columns that have either the ``PickleType`` or ``postgresql.ARRAY`` datatypes. The ``mutable`` flag is now set to ``False`` by default. If an existing application uses these types and depends upon detection of in-place mutations, the type object must be constructed with ``mutable=True`` to restore the 0.6 behavior: :: Table('mytable', metadata, # .... Column('pickled_data', PickleType(mutable=True)) ) The ``mutable=True`` flag is being phased out, in favor of the new `Mutation Tracking `_ extension. This extension provides a mechanism by which user-defined datatypes can provide change events back to the owning parent or parents. The previous approach of using ``mutable=True`` does not provide for change events - instead, the ORM must scan through all mutable values present in a session and compare them against their original value for changes every time ``flush()`` is called, which is a very time consuming event. This is a holdover from the very early days of SQLAlchemy when ``flush()`` was not automatic and the history tracking system was not nearly as sophisticated as it is now. Existing applications which use ``PickleType``, ``postgresql.ARRAY`` or other ``MutableType`` subclasses, and require in-place mutation detection, should migrate to the new mutation tracking system, as ``mutable=True`` is likely to be deprecated in the future. :ticket:`1980` Mutability detection of ``composite()`` requires the Mutation Tracking Extension -------------------------------------------------------------------------------- So-called "composite" mapped attributes, those configured using the technique described at `Composite Column Types `_, have been re-implemented such that the ORM internals are no longer aware of them (leading to shorter and more efficient codepaths in critical sections). While composite types are generally intended to be treated as immutable value objects, this was never enforced. For applications that use composites with mutability, the `Mutation Tracking `_ extension offers a base class which establishes a mechanism for user-defined composite types to send change event messages back to the owning parent or parents of each object. Applications which use composite types and rely upon in- place mutation detection of these objects should either migrate to the "mutation tracking" extension, or change the usage of the composite types such that in-place changes are no longer needed (i.e., treat them as immutable value objects). SQLite - the SQLite dialect now uses ``NullPool`` for file-based databases -------------------------------------------------------------------------- This change is **99.999% backwards compatible**, unless you are using temporary tables across connection pool connections. A file-based SQLite connection is blazingly fast, and using ``NullPool`` means that each call to ``Engine.connect`` creates a new pysqlite connection. Previously, the ``SingletonThreadPool`` was used, which meant that all connections to a certain engine in a thread would be the same connection. It's intended that the new approach is more intuitive, particularly when multiple connections are used. ``SingletonThreadPool`` is still the default engine when a ``:memory:`` database is used. Note that this change **breaks temporary tables used across Session commits**, due to the way SQLite handles temp tables. See the note at http://www.sqlalchemy.org/docs/dialects/sqlite.html#using- temporary-tables-with-sqlite if temporary tables beyond the scope of one pool connection are desired. :ticket:`1921` ``Session.merge()`` checks version ids for versioned mappers ------------------------------------------------------------ Session.merge() will check the version id of the incoming state against that of the database, assuming the mapping uses version ids and incoming state has a version_id assigned, and raise StaleDataError if they don't match. This is the correct behavior, in that if incoming state contains a stale version id, it should be assumed the state is stale. If merging data into a versioned state, the version id attribute can be left undefined, and no version check will take place. This check was confirmed by examining what Hibernate does - both the ``merge()`` and the versioning features were originally adapted from Hibernate. :ticket:`2027` Tuple label names in Query Improved ----------------------------------- This improvement is potentially slightly backwards incompatible for an application that relied upon the old behavior. Given two mapped classes ``Foo`` and ``Bar`` each with a column ``spam``: :: qa = session.query(Foo.spam) qb = session.query(Bar.spam) qu = qa.union(qb) The name given to the single column yielded by ``qu`` will be ``spam``. Previously it would be something like ``foo_spam`` due to the way the ``union`` would combine things, which is inconsistent with the name ``spam`` in the case of a non-unioned query. :ticket:`1942` Mapped column attributes reference the most specific column first ----------------------------------------------------------------- This is a change to the behavior involved when a mapped column attribute references multiple columns, specifically when dealing with an attribute on a joined-table subclass that has the same name as that of an attribute on the superclass. Using declarative, the scenario is this: :: class Parent(Base): __tablename__ = 'parent' id = Column(Integer, primary_key=True) class Child(Parent): __tablename__ = 'child' id = Column(Integer, ForeignKey('parent.id'), primary_key=True) Above, the attribute ``Child.id`` refers to both the ``child.id`` column as well as ``parent.id`` - this due to the name of the attribute. If it were named differently on the class, such as ``Child.child_id``, it then maps distinctly to ``child.id``, with ``Child.id`` being the same attribute as ``Parent.id``. When the ``id`` attribute is made to reference both ``parent.id`` and ``child.id``, it stores them in an ordered list. An expression such as ``Child.id`` then refers to just *one* of those columns when rendered. Up until 0.6, this column would be ``parent.id``. In 0.7, it is the less surprising ``child.id``. The legacy of this behavior deals with behaviors and restrictions of the ORM that don't really apply anymore; all that was needed was to reverse the order. A primary advantage of this approach is that it's now easier to construct ``primaryjoin`` expressions that refer to the local column: :: class Child(Parent): __tablename__ = 'child' id = Column(Integer, ForeignKey('parent.id'), primary_key=True) some_related = relationship("SomeRelated", primaryjoin="Child.id==SomeRelated.child_id") class SomeRelated(Base): __tablename__ = 'some_related' id = Column(Integer, primary_key=True) child_id = Column(Integer, ForeignKey('child.id')) Prior to 0.7 the ``Child.id`` expression would reference ``Parent.id``, and it would be necessary to map ``child.id`` to a distinct attribute. It also means that a query like this one changes its behavior: :: session.query(Parent).filter(Child.id > 7) In 0.6, this would render: :: SELECT parent.id AS parent_id FROM parent WHERE parent.id > :id_1 in 0.7, you get: :: SELECT parent.id AS parent_id FROM parent, child WHERE child.id > :id_1 which you'll note is a cartesian product - this behavior is now equivalent to that of any other attribute that is local to ``Child``. The ``with_polymorphic()`` method, or a similar strategy of explicitly joining the underlying ``Table`` objects, is used to render a query against all ``Parent`` objects with criteria against ``Child``, in the same manner as that of 0.5 and 0.6: :: print s.query(Parent).with_polymorphic([Child]).filter(Child.id > 7) Which on both 0.6 and 0.7 renders: :: SELECT parent.id AS parent_id, child.id AS child_id FROM parent LEFT OUTER JOIN child ON parent.id = child.id WHERE child.id > :id_1 Another effect of this change is that a joined-inheritance load across two tables will populate from the child table's value, not that of the parent table. An unusual case is that a query against "Parent" using ``with_polymorphic="*"`` issues a query against "parent", with a LEFT OUTER JOIN to "child". The row is located in "Parent", sees the polymorphic identity corresponds to "Child", but suppose the actual row in "child" has been *deleted*. Due to this corruption, the row comes in with all the columns corresponding to "child" set to NULL - this is now the value that gets populated, not the one in the parent table. :ticket:`1892` Mapping to joins with two or more same-named columns requires explicit declaration ---------------------------------------------------------------------------------- This is somewhat related to the previous change in :ticket:`1892`. When mapping to a join, same-named columns must be explicitly linked to mapped attributes, i.e. as described in `Mapping a Class Against Multiple Tables `_. Given two tables ``foo`` and ``bar``, each with a primary key column ``id``, the following now produces an error: :: foobar = foo.join(bar, foo.c.id==bar.c.foo_id) mapper(FooBar, foobar) This because the ``mapper()`` refuses to guess what column is the primary representation of ``FooBar.id`` - is it ``foo.c.id`` or is it ``bar.c.id`` ? The attribute must be explicit: :: foobar = foo.join(bar, foo.c.id==bar.c.foo_id) mapper(FooBar, foobar, properties={ 'id':[foo.c.id, bar.c.id] }) :ticket:`1896` Mapper requires that polymorphic_on column be present in the mapped selectable ------------------------------------------------------------------------------ This is a warning in 0.6, now an error in 0.7. The column given for ``polymorphic_on`` must be in the mapped selectable. This to prevent some occasional user errors such as: :: mapper(SomeClass, sometable, polymorphic_on=some_lookup_table.c.id) where above the polymorphic_on needs to be on a ``sometable`` column, in this case perhaps ``sometable.c.some_lookup_id``. There are also some "polymorphic union" scenarios where similar mistakes sometimes occur. Such a configuration error has always been "wrong", and the above mapping doesn't work as specified - the column would be ignored. It is however potentially backwards incompatible in the rare case that an application has been unknowingly relying upon this behavior. :ticket:`1875` ``DDL()`` constructs now escape percent signs --------------------------------------------- Previously, percent signs in ``DDL()`` strings would have to be escaped, i.e. ``%%`` depending on DBAPI, for those DBAPIs that accept ``pyformat`` or ``format`` binds (i.e. psycopg2, mysql-python), which was inconsistent versus ``text()`` constructs which did this automatically. The same escaping now occurs for ``DDL()`` as for ``text()``. :ticket:`1897` ``Table.c`` / ``MetaData.tables`` refined a bit, don't allow direct mutation ---------------------------------------------------------------------------- Another area where some users were tinkering around in such a way that doesn't actually work as expected, but still left an exceedingly small chance that some application was relying upon this behavior, the construct returned by the ``.c`` attribute on ``Table`` and the ``.tables`` attribute on ``MetaData`` is explicitly non-mutable. The "mutable" version of the construct is now private. Adding columns to ``.c`` involves using the ``append_column()`` method of ``Table``, which ensures things are associated with the parent ``Table`` in the appropriate way; similarly, ``MetaData.tables`` has a contract with the ``Table`` objects stored in this dictionary, as well as a little bit of new bookkeeping in that a ``set()`` of all schema names is tracked, which is satisfied only by using the public ``Table`` constructor as well as ``Table.tometadata()``. It is of course possible that the ``ColumnCollection`` and ``dict`` collections consulted by these attributes could someday implement events on all of their mutational methods such that the appropriate bookkeeping occurred upon direct mutation of the collections, but until someone has the motivation to implement all that along with dozens of new unit tests, narrowing the paths to mutation of these collections will ensure no application is attempting to rely upon usages that are currently not supported. :ticket:`1893` :ticket:`1917` server_default consistently returns None for all inserted_primary_key values ---------------------------------------------------------------------------- Established consistency when server_default is present on an Integer PK column. SQLA doesn't pre-fetch these, nor do they come back in cursor.lastrowid (DBAPI). Ensured all backends consistently return None in result.inserted_primary_key for these - some backends may have returned a value previously. Using a server_default on a primary key column is extremely unusual. If a special function or SQL expression is used to generate primary key defaults, this should be established as a Python-side "default" instead of server_default. Regarding reflection for this case, reflection of an int PK col with a server_default sets the "autoincrement" flag to False, except in the case of a PG SERIAL col where we detected a sequence default. :ticket:`2020` :ticket:`2021` The ``sqlalchemy.exceptions`` alias in sys.modules is removed ------------------------------------------------------------- For a few years we've added the string ``sqlalchemy.exceptions`` to ``sys.modules``, so that a statement like "``import sqlalchemy.exceptions``" would work. The name of the core exceptions module has been ``exc`` for a long time now, so the recommended import for this module is: :: from sqlalchemy import exc The ``exceptions`` name is still present in "``sqlalchemy``" for applications which might have said ``from sqlalchemy import exceptions``, but they should also start using the ``exc`` name. Query Timing Recipe Changes --------------------------- While not part of SQLAlchemy itself, it's worth mentioning that the rework of the ``ConnectionProxy`` into the new event system means it is no longer appropriate for the "Timing all Queries" recipe. Please adjust query-timers to use the ``before_cursor_execute()`` and ``after_cursor_execute()`` events, demonstrated in the updated recipe UsageRecipes/Profiling. Deprecated API ============== Default constructor on types will not accept arguments ------------------------------------------------------ Simple types like ``Integer``, ``Date`` etc. in the core types module don't accept arguments. The default constructor that accepts/ignores a catchall ``\*args, \**kwargs`` is restored as of 0.7b4/0.7.0, but emits a deprecation warning. If arguments are being used with a core type like ``Integer``, it may be that you intended to use a dialect specific type, such as ``sqlalchemy.dialects.mysql.INTEGER`` which does accept a "display_width" argument for example. compile_mappers() renamed configure_mappers(), simplified configuration internals --------------------------------------------------------------------------------- This system slowly morphed from something small, implemented local to an individual mapper, and poorly named into something that's more of a global "registry-" level function and poorly named, so we've fixed both by moving the implementation out of ``Mapper`` altogether and renaming it to ``configure_mappers()``. It is of course normally not needed for an application to call ``configure_mappers()`` as this process occurs on an as-needed basis, as soon as the mappings are needed via attribute or query access. :ticket:`1966` Core listener/proxy superseded by event listeners ------------------------------------------------- ``PoolListener``, ``ConnectionProxy``, ``DDLElement.execute_at`` are superseded by ``event.listen()``, using the ``PoolEvents``, ``EngineEvents``, ``DDLEvents`` dispatch targets, respectively. ORM extensions superseded by event listeners -------------------------------------------- ``MapperExtension``, ``AttributeExtension``, ``SessionExtension`` are superseded by ``event.listen()``, using the ``MapperEvents``/``InstanceEvents``, ``AttributeEvents``, ``SessionEvents``, dispatch targets, respectively. Sending a string to 'distinct' in select() for MySQL should be done via prefixes -------------------------------------------------------------------------------- This obscure feature allows this pattern with the MySQL backend: :: select([mytable], distinct='ALL', prefixes=['HIGH_PRIORITY']) The ``prefixes`` keyword or ``prefix_with()`` method should be used for non-standard or unusual prefixes: :: select([mytable]).prefix_with('HIGH_PRIORITY', 'ALL') ``useexisting`` superseded by ``extend_existing`` and ``keep_existing`` ----------------------------------------------------------------------- The ``useexisting`` flag on Table has been superseded by a new pair of flags ``keep_existing`` and ``extend_existing``. ``extend_existing`` is equivalent to ``useexisting`` - the existing Table is returned, and additional constructor elements are added. With ``keep_existing``, the existing Table is returned, but additional constructor elements are not added - these elements are only applied when the Table is newly created. Backwards Incompatible API Changes ================================== Callables passed to ``bindparam()`` don't get evaluated - affects the Beaker example ------------------------------------------------------------------------------------ :ticket:`1950` Note this affects the Beaker caching example, where the workings of the ``_params_from_query()`` function needed a slight adjustment. If you're using code from the Beaker example, this change should be applied. types.type_map is now private, types._type_map ---------------------------------------------- We noticed some users tapping into this dictionary inside of ``sqlalchemy.types`` as a shortcut to associating Python types with SQL types. We can't guarantee the contents or format of this dictionary, and additionally the business of associating Python types in a one-to-one fashion has some grey areas that should are best decided by individual applications, so we've underscored this attribute. :ticket:`1870` Renamed the ``alias`` keyword arg of standalone ``alias()`` function to ``name`` -------------------------------------------------------------------------------- This so that the keyword argument ``name`` matches that of the ``alias()`` methods on all ``FromClause`` objects as well as the ``name`` argument on ``Query.subquery()``. Only code that uses the standalone ``alias()`` function, and not the method bound functions, and passes the alias name using the explicit keyword name ``alias``, and not positionally, would need modification here. Non-public ``Pool`` methods underscored --------------------------------------- All methods of ``Pool`` and subclasses which are not intended for public use have been renamed with underscores. That they were not named this way previously was a bug. Pooling methods now underscored or removed: ``Pool.create_connection()`` -> ``Pool._create_connection()`` ``Pool.do_get()`` -> ``Pool._do_get()`` ``Pool.do_return_conn()`` -> ``Pool._do_return_conn()`` ``Pool.do_return_invalid()`` -> removed, was not used ``Pool.return_conn()`` -> ``Pool._return_conn()`` ``Pool.get()`` -> ``Pool._get()``, public API is ``Pool.connect()`` ``SingletonThreadPool.cleanup()`` -> ``_cleanup()`` ``SingletonThreadPool.dispose_local()`` -> removed, use ``conn.invalidate()`` :ticket:`1982` Previously Deprecated, Now Removed ================================== Query.join(), Query.outerjoin(), eagerload(), eagerload_all(), others no longer allow lists of attributes as arguments ---------------------------------------------------------------------------------------------------------------------- Passing a list of attributes or attribute names to ``Query.join``, ``eagerload()``, and similar has been deprecated since 0.5: :: # old way, deprecated since 0.5 session.query(Houses).join([Houses.rooms, Room.closets]) session.query(Houses).options(eagerload_all([Houses.rooms, Room.closets])) These methods all accept \*args as of the 0.5 series: :: # current way, in place since 0.5 session.query(Houses).join(Houses.rooms, Room.closets) session.query(Houses).options(eagerload_all(Houses.rooms, Room.closets)) ``ScopedSession.mapper`` is removed ----------------------------------- This feature provided a mapper extension which linked class- based functionality with a particular ``ScopedSession``, in particular providing the behavior such that new object instances would be automatically associated with that session. The feature was overused by tutorials and frameworks which led to great user confusion due to its implicit behavior, and was deprecated in 0.5.5. Techniques for replicating its functionality are at [wiki:UsageRecipes/SessionAwareMapper] SQLAlchemy-0.8.4/doc/_sources/changelog/migration_08.txt0000644000076500000240000014431712251147171023623 0ustar classicstaff00000000000000============================== What's New in SQLAlchemy 0.8? ============================== .. admonition:: About this Document This document describes changes between SQLAlchemy version 0.7, undergoing maintenance releases as of October, 2012, and SQLAlchemy version 0.8, which is expected for release in early 2013. Document date: October 25, 2012 Updated: March 9, 2013 Introduction ============ This guide introduces what's new in SQLAlchemy version 0.8, and also documents changes which affect users migrating their applications from the 0.7 series of SQLAlchemy to 0.8. SQLAlchemy releases are closing in on 1.0, and each new version since 0.5 features fewer major usage changes. Most applications that are settled into modern 0.7 patterns should be movable to 0.8 with no changes. Applications that use 0.6 and even 0.5 patterns should be directly migratable to 0.8 as well, though larger applications may want to test with each interim version. Platform Support ================ Targeting Python 2.5 and Up Now ------------------------------- SQLAlchemy 0.8 will target Python 2.5 and forward; compatibility for Python 2.4 is being dropped. The internals will be able to make usage of Python ternaries (that is, ``x if y else z``) which will improve things versus the usage of ``y and x or z``, which naturally has been the source of some bugs, as well as context managers (that is, ``with:``) and perhaps in some cases ``try:/except:/else:`` blocks which will help with code readability. SQLAlchemy will eventually drop 2.5 support as well - when 2.6 is reached as the baseline, SQLAlchemy will move to use 2.6/3.3 in-place compatibility, removing the usage of the ``2to3`` tool and maintaining a source base that works with Python 2 and 3 at the same time. New ORM Features ================ .. _feature_relationship_08: Rewritten :func:`.relationship` mechanics ----------------------------------------- 0.8 features a much improved and capable system regarding how :func:`.relationship` determines how to join between two entities. The new system includes these features: * The ``primaryjoin`` argument is **no longer needed** when constructing a :func:`.relationship` against a class that has multiple foreign key paths to the target. Only the ``foreign_keys`` argument is needed to specify those columns which should be included: :: class Parent(Base): __tablename__ = 'parent' id = Column(Integer, primary_key=True) child_id_one = Column(Integer, ForeignKey('child.id')) child_id_two = Column(Integer, ForeignKey('child.id')) child_one = relationship("Child", foreign_keys=child_id_one) child_two = relationship("Child", foreign_keys=child_id_two) class Child(Base): __tablename__ = 'child' id = Column(Integer, primary_key=True) * relationships against self-referential, composite foreign keys where **a column points to itself** are now supported. The canonical case is as follows: :: class Folder(Base): __tablename__ = 'folder' __table_args__ = ( ForeignKeyConstraint( ['account_id', 'parent_id'], ['folder.account_id', 'folder.folder_id']), ) account_id = Column(Integer, primary_key=True) folder_id = Column(Integer, primary_key=True) parent_id = Column(Integer) name = Column(String) parent_folder = relationship("Folder", backref="child_folders", remote_side=[account_id, folder_id] ) Above, the ``Folder`` refers to its parent ``Folder`` joining from ``account_id`` to itself, and ``parent_id`` to ``folder_id``. When SQLAlchemy constructs an auto- join, no longer can it assume all columns on the "remote" side are aliased, and all columns on the "local" side are not - the ``account_id`` column is **on both sides**. So the internal relationship mechanics were totally rewritten to support an entirely different system whereby two copies of ``account_id`` are generated, each containing different *annotations* to determine their role within the statement. Note the join condition within a basic eager load: :: SELECT folder.account_id AS folder_account_id, folder.folder_id AS folder_folder_id, folder.parent_id AS folder_parent_id, folder.name AS folder_name, folder_1.account_id AS folder_1_account_id, folder_1.folder_id AS folder_1_folder_id, folder_1.parent_id AS folder_1_parent_id, folder_1.name AS folder_1_name FROM folder LEFT OUTER JOIN folder AS folder_1 ON folder_1.account_id = folder.account_id AND folder.folder_id = folder_1.parent_id WHERE folder.folder_id = ? AND folder.account_id = ? * Previously difficult custom join conditions, like those involving functions and/or CASTing of types, will now function as expected in most cases:: class HostEntry(Base): __tablename__ = 'host_entry' id = Column(Integer, primary_key=True) ip_address = Column(INET) content = Column(String(50)) # relationship() using explicit foreign_keys, remote_side parent_host = relationship("HostEntry", primaryjoin=ip_address == cast(content, INET), foreign_keys=content, remote_side=ip_address ) The new :func:`.relationship` mechanics make use of a SQLAlchemy concept known as :term:`annotations`. These annotations are also available to application code explicitly via the :func:`.foreign` and :func:`.remote` functions, either as a means to improve readability for advanced configurations or to directly inject an exact configuration, bypassing the usual join-inspection heuristics:: from sqlalchemy.orm import foreign, remote class HostEntry(Base): __tablename__ = 'host_entry' id = Column(Integer, primary_key=True) ip_address = Column(INET) content = Column(String(50)) # relationship() using explicit foreign() and remote() annotations # in lieu of separate arguments parent_host = relationship("HostEntry", primaryjoin=remote(ip_address) == \ cast(foreign(content), INET), ) .. seealso:: :ref:`relationship_configure_joins` - a newly revised section on :func:`.relationship` detailing the latest techniques for customizing related attributes and collection access. :ticket:`1401` :ticket:`610` .. _feature_orminspection_08: New Class/Object Inspection System ---------------------------------- Lots of SQLAlchemy users are writing systems that require the ability to inspect the attributes of a mapped class, including being able to get at the primary key columns, object relationships, plain attributes, and so forth, typically for the purpose of building data-marshalling systems, like JSON/XML conversion schemes and of course form libraries galore. Originally, the :class:`.Table` and :class:`.Column` model were the original inspection points, which have a well-documented system. While SQLAlchemy ORM models are also fully introspectable, this has never been a fully stable and supported feature, and users tended to not have a clear idea how to get at this information. 0.8 now provides a consistent, stable and fully documented API for this purpose, including an inspection system which works on mapped classes, instances, attributes, and other Core and ORM constructs. The entrypoint to this system is the core-level :func:`.inspect` function. In most cases, the object being inspected is one already part of SQLAlchemy's system, such as :class:`.Mapper`, :class:`.InstanceState`, :class:`.Inspector`. In some cases, new objects have been added with the job of providing the inspection API in certain contexts, such as :class:`.AliasedInsp` and :class:`.AttributeState`. A walkthrough of some key capabilities follows:: >>> class User(Base): ... __tablename__ = 'user' ... id = Column(Integer, primary_key=True) ... name = Column(String) ... name_syn = synonym(name) ... addresses = relationship("Address") ... >>> # universal entry point is inspect() >>> b = inspect(User) >>> # b in this case is the Mapper >>> b >>> # Column namespace >>> b.columns.id Column('id', Integer(), table=, primary_key=True, nullable=False) >>> # mapper's perspective of the primary key >>> b.primary_key (Column('id', Integer(), table=, primary_key=True, nullable=False),) >>> # MapperProperties available from .attrs >>> b.attrs.keys() ['name_syn', 'addresses', 'id', 'name'] >>> # .column_attrs, .relationships, etc. filter this collection >>> b.column_attrs.keys() ['id', 'name'] >>> list(b.relationships) [] >>> # they are also namespaces >>> b.column_attrs.id >>> b.relationships.addresses >>> # point inspect() at a mapped, class level attribute, >>> # returns the attribute itself >>> b = inspect(User.addresses) >>> b >>> # From here we can get the mapper: >>> b.mapper >>> # the parent inspector, in this case a mapper >>> b.parent >>> # an expression >>> print b.expression "user".id = address.user_id >>> # inspect works on instances >>> u1 = User(id=3, name='x') >>> b = inspect(u1) >>> # it returns the InstanceState >>> b >>> # similar attrs accessor refers to the >>> b.attrs.keys() ['id', 'name_syn', 'addresses', 'name'] >>> # attribute interface - from attrs, you get a state object >>> b.attrs.id >>> # this object can give you, current value... >>> b.attrs.id.value 3 >>> # ... current history >>> b.attrs.id.history History(added=[3], unchanged=(), deleted=()) >>> # InstanceState can also provide session state information >>> # lets assume the object is persistent >>> s = Session() >>> s.add(u1) >>> s.commit() >>> # now we can get primary key identity, always >>> # works in query.get() >>> b.identity (3,) >>> # the mapper level key >>> b.identity_key (, (3,)) >>> # state within the session >>> b.persistent, b.transient, b.deleted, b.detached (True, False, False, False) >>> # owning session >>> b.session .. seealso:: :ref:`core_inspection_toplevel` :ticket:`2208` New with_polymorphic() feature, can be used anywhere ---------------------------------------------------- The :meth:`.Query.with_polymorphic` method allows the user to specify which tables should be present when querying against a joined-table entity. Unfortunately the method is awkward and only applies to the first entity in the list, and otherwise has awkward behaviors both in usage as well as within the internals. A new enhancement to the :func:`.aliased` construct has been added called :func:`.with_polymorphic` which allows any entity to be "aliased" into a "polymorphic" version of itself, freely usable anywhere: :: from sqlalchemy.orm import with_polymorphic palias = with_polymorphic(Person, [Engineer, Manager]) session.query(Company).\ join(palias, Company.employees).\ filter(or_(Engineer.language=='java', Manager.hair=='pointy')) .. seealso:: :ref:`with_polymorphic` - newly updated documentation for polymorphic loading control. :ticket:`2333` of_type() works with alias(), with_polymorphic(), any(), has(), joinedload(), subqueryload(), contains_eager() -------------------------------------------------------------------------------------------------------------- The :meth:`.PropComparator.of_type` method is used to specify a specific subtype to use when constructing SQL expressions along a :func:`.relationship` that has a :term:`polymorphic` mapping as its target. This method can now be used to target *any number* of target subtypes, by combining it with the new :func:`.with_polymorphic` function:: # use eager loading in conjunction with with_polymorphic targets Job_P = with_polymorphic(Job, [SubJob, ExtraJob], aliased=True) q = s.query(DataContainer).\ join(DataContainer.jobs.of_type(Job_P)).\ options(contains_eager(DataContainer.jobs.of_type(Job_P))) The method now works equally well in most places a regular relationship attribute is accepted, including with loader functions like :func:`.joinedload`, :func:`.subqueryload`, :func:`.contains_eager`, and comparison methods like :meth:`.PropComparator.any` and :meth:`.PropComparator.has`:: # use eager loading in conjunction with with_polymorphic targets Job_P = with_polymorphic(Job, [SubJob, ExtraJob], aliased=True) q = s.query(DataContainer).\ join(DataContainer.jobs.of_type(Job_P)).\ options(contains_eager(DataContainer.jobs.of_type(Job_P))) # pass subclasses to eager loads (implicitly applies with_polymorphic) q = s.query(ParentThing).\ options( joinedload_all( ParentThing.container, DataContainer.jobs.of_type(SubJob) )) # control self-referential aliasing with any()/has() Job_A = aliased(Job) q = s.query(Job).join(DataContainer.jobs).\ filter( DataContainer.jobs.of_type(Job_A).\ any(and_(Job_A.id < Job.id, Job_A.type=='fred') ) ) .. seealso:: :ref:`of_type` :ticket:`2438` :ticket:`1106` Events Can Be Applied to Unmapped Superclasses ---------------------------------------------- Mapper and instance events can now be associated with an unmapped superclass, where those events will be propagated to subclasses as those subclasses are mapped. The ``propagate=True`` flag should be used. This feature allows events to be associated with a declarative base class:: from sqlalchemy.ext.declarative import declarative_base Base = declarative_base() @event.listens_for("load", Base, propagate=True) def on_load(target, context): print "New instance loaded:", target # on_load() will be applied to SomeClass class SomeClass(Base): __tablename__ = 'sometable' # ... :ticket:`2585` Declarative Distinguishes Between Modules/Packages -------------------------------------------------- A key feature of Declarative is the ability to refer to other mapped classes using their string name. The registry of class names is now sensitive to the owning module and package of a given class. The classes can be referred to via dotted name in expressions:: class Snack(Base): # ... peanuts = relationship("nuts.Peanut", primaryjoin="nuts.Peanut.snack_id == Snack.id") The resolution allows that any full or partial disambiguating package name can be used. If the path to a particular class is still ambiguous, an error is raised. :ticket:`2338` New DeferredReflection Feature in Declarative --------------------------------------------- The "deferred reflection" example has been moved to a supported feature within Declarative. This feature allows the construction of declarative mapped classes with only placeholder ``Table`` metadata, until a ``prepare()`` step is called, given an ``Engine`` with which to reflect fully all tables and establish actual mappings. The system supports overriding of columns, single and joined inheritance, as well as distinct bases-per-engine. A full declarative configuration can now be created against an existing table that is assembled upon engine creation time in one step: :: class ReflectedOne(DeferredReflection, Base): __abstract__ = True class ReflectedTwo(DeferredReflection, Base): __abstract__ = True class MyClass(ReflectedOne): __tablename__ = 'mytable' class MyOtherClass(ReflectedOne): __tablename__ = 'myothertable' class YetAnotherClass(ReflectedTwo): __tablename__ = 'yetanothertable' ReflectedOne.prepare(engine_one) ReflectedTwo.prepare(engine_two) .. seealso:: :class:`.DeferredReflection` :ticket:`2485` ORM Classes Now Accepted by Core Constructs ------------------------------------------- While the SQL expressions used with :meth:`.Query.filter`, such as ``User.id == 5``, have always been compatible for use with core constructs such as :func:`.select`, the mapped class itself would not be recognized when passed to :func:`.select`, :meth:`.Select.select_from`, or :meth:`.Select.correlate`. A new SQL registration system allows a mapped class to be accepted as a FROM clause within the core:: from sqlalchemy import select stmt = select([User]).where(User.id == 5) Above, the mapped ``User`` class will expand into :class:`.Table` to which :class:`.User` is mapped. :ticket:`2245` Query.update() supports UPDATE..FROM ------------------------------------- The new UPDATE..FROM mechanics work in query.update(). Below, we emit an UPDATE against ``SomeEntity``, adding a FROM clause (or equivalent, depending on backend) against ``SomeOtherEntity``:: query(SomeEntity).\ filter(SomeEntity.id==SomeOtherEntity.id).\ filter(SomeOtherEntity.foo=='bar').\ update({"data":"x"}) In particular, updates to joined-inheritance entities are supported, provided the target of the UPDATE is local to the table being filtered on, or if the parent and child tables are mixed, they are joined explicitly in the query. Below, given ``Engineer`` as a joined subclass of ``Person``: :: query(Engineer).\ filter(Person.id==Engineer.id).\ filter(Person.name=='dilbert').\ update({"engineer_data":"java"}) would produce: :: UPDATE engineer SET engineer_data='java' FROM person WHERE person.id=engineer.id AND person.name='dilbert' :ticket:`2365` rollback() will only roll back "dirty" objects from a begin_nested() -------------------------------------------------------------------- A behavioral change that should improve efficiency for those users using SAVEPOINT via ``Session.begin_nested()`` - upon ``rollback()``, only those objects that were made dirty since the last flush will be expired, the rest of the ``Session`` remains intact. This because a ROLLBACK to a SAVEPOINT does not terminate the containing transaction's isolation, so no expiry is needed except for those changes that were not flushed in the current transaction. :ticket:`2452` Caching Example now uses dogpile.cache --------------------------------------- The caching example now uses `dogpile.cache `_. Dogpile.cache is a rewrite of the caching portion of Beaker, featuring vastly simpler and faster operation, as well as support for distributed locking. Note that the SQLAlchemy APIs used by the Dogpile example as well as the previous Beaker example have changed slightly, in particular this change is needed as illustrated in the Beaker example:: --- examples/beaker_caching/caching_query.py +++ examples/beaker_caching/caching_query.py @@ -222,7 +222,8 @@ """ if query._current_path: - mapper, key = query._current_path[-2:] + mapper, prop = query._current_path[-2:] + key = prop.key for cls in mapper.class_.__mro__: if (cls, key) in self._relationship_options: .. seealso:: :mod:`dogpile_caching` :ticket:`2589` New Core Features ================== Fully extensible, type-level operator support in Core ----------------------------------------------------- The Core has to date never had any system of adding support for new SQL operators to Column and other expression constructs, other than the :meth:`.ColumnOperators.op` method which is "just enough" to make things work. There has also never been any system in place for Core which allows the behavior of existing operators to be overridden. Up until now, the only way operators could be flexibly redefined was in the ORM layer, using :func:`.column_property` given a ``comparator_factory`` argument. Third party libraries like GeoAlchemy therefore were forced to be ORM-centric and rely upon an array of hacks to apply new opertions as well as to get them to propagate correctly. The new operator system in Core adds the one hook that's been missing all along, which is to associate new and overridden operators with *types*. Since after all, it's not really a column, CAST operator, or SQL function that really drives what kinds of operations are present, it's the *type* of the expression. The implementation details are minimal - only a few extra methods are added to the core :class:`.ColumnElement` type so that it consults it's :class:`.TypeEngine` object for an optional set of operators. New or revised operations can be associated with any type, either via subclassing of an existing type, by using :class:`.TypeDecorator`, or "globally across-the-board" by attaching a new :class:`.TypeEngine.Comparator` object to an existing type class. For example, to add logarithm support to :class:`.Numeric` types: :: from sqlalchemy.types import Numeric from sqlalchemy.sql import func class CustomNumeric(Numeric): class comparator_factory(Numeric.Comparator): def log(self, other): return func.log(self.expr, other) The new type is usable like any other type: :: data = Table('data', metadata, Column('id', Integer, primary_key=True), Column('x', CustomNumeric(10, 5)), Column('y', CustomNumeric(10, 5)) ) stmt = select([data.c.x.log(data.c.y)]).where(data.c.x.log(2) < value) print conn.execute(stmt).fetchall() New features which have come from this immediately include support for Postgresql's HSTORE type, as well as new operations associated with Postgresql's ARRAY type. It also paves the way for existing types to acquire lots more operators that are specific to those types, such as more string, integer and date operators. .. seealso:: :ref:`types_operators` :class:`.HSTORE` :ticket:`2547` Type Expressions ----------------- SQL expressions can now be associated with types. Historically, :class:`.TypeEngine` has always allowed Python-side functions which receive both bound parameters as well as result row values, passing them through a Python side conversion function on the way to/back from the database. The new feature allows similar functionality, except on the database side:: from sqlalchemy.types import String from sqlalchemy import func, Table, Column, MetaData class LowerString(String): def bind_expression(self, bindvalue): return func.lower(bindvalue) def column_expression(self, col): return func.lower(col) metadata = MetaData() test_table = Table( 'test_table', metadata, Column('data', LowerString) ) Above, the ``LowerString`` type defines a SQL expression that will be emitted whenever the ``test_table.c.data`` column is rendered in the columns clause of a SELECT statement:: >>> print select([test_table]).where(test_table.c.data == 'HI') SELECT lower(test_table.data) AS data FROM test_table WHERE test_table.data = lower(:data_1) This feature is also used heavily by the new release of GeoAlchemy, to embed PostGIS expressions inline in SQL based on type rules. .. seealso:: :ref:`types_sql_value_processing` :ticket:`1534` Core Inspection System ----------------------- The :func:`.inspect` function introduced in :ref:`feature_orminspection_08` also applies to the core. Applied to an :class:`.Engine` it produces an :class:`.Inspector` object:: from sqlalchemy import inspect from sqlalchemy import create_engine engine = create_engine("postgresql://scott:tiger@localhost/test") insp = inspect(engine) print insp.get_table_names() It can also be applied to any :class:`.ClauseElement`, which returns the :class:`.ClauseElement` itself, such as :class:`.Table`, :class:`.Column`, :class:`.Select`, etc. This allows it to work fluently between Core and ORM constructs. New Method :meth:`.Select.correlate_except` ------------------------------------------- :func:`.select` now has a method :meth:`.Select.correlate_except` which specifies "correlate on all FROM clauses except those specified". It can be used for mapping scenarios where a related subquery should correlate normally, except against a particular target selectable:: class SnortEvent(Base): __tablename__ = "event" id = Column(Integer, primary_key=True) signature = Column(Integer, ForeignKey("signature.id")) signatures = relationship("Signature", lazy=False) class Signature(Base): __tablename__ = "signature" id = Column(Integer, primary_key=True) sig_count = column_property( select([func.count('*')]).\ where(SnortEvent.signature == id). correlate_except(SnortEvent) ) .. seealso:: :meth:`.Select.correlate_except` Postgresql HSTORE type ---------------------- Support for Postgresql's ``HSTORE`` type is now available as :class:`.postgresql.HSTORE`. This type makes great usage of the new operator system to provide a full range of operators for HSTORE types, including index access, concatenation, and containment methods such as :meth:`~.HSTORE.comparator_factory.has_key`, :meth:`~.HSTORE.comparator_factory.has_any`, and :meth:`~.HSTORE.comparator_factory.matrix`:: from sqlalchemy.dialects.postgresql import HSTORE data = Table('data_table', metadata, Column('id', Integer, primary_key=True), Column('hstore_data', HSTORE) ) engine.execute( select([data.c.hstore_data['some_key']]) ).scalar() engine.execute( select([data.c.hstore_data.matrix()]) ).scalar() .. seealso:: :class:`.postgresql.HSTORE` :class:`.postgresql.hstore` :ticket:`2606` Enhanced Postgresql ARRAY type ------------------------------ The :class:`.postgresql.ARRAY` type will accept an optional "dimension" argument, pinning it to a fixed number of dimensions and greatly improving efficiency when retrieving results: :: # old way, still works since PG supports N-dimensions per row: Column("my_array", postgresql.ARRAY(Integer)) # new way, will render ARRAY with correct number of [] in DDL, # will process binds and results more efficiently as we don't need # to guess how many levels deep to go Column("my_array", postgresql.ARRAY(Integer, dimensions=2)) The type also introduces new operators, using the new type-specific operator framework. New operations include indexed access:: result = conn.execute( select([mytable.c.arraycol[2]]) ) slice access in SELECT:: result = conn.execute( select([mytable.c.arraycol[2:4]]) ) slice updates in UPDATE:: conn.execute( mytable.update().values({mytable.c.arraycol[2:3]: [7, 8]}) ) freestanding array literals:: >>> from sqlalchemy.dialects import postgresql >>> conn.scalar( ... select([ ... postgresql.array([1, 2]) + postgresql.array([3, 4, 5]) ... ]) ... ) [1, 2, 3, 4, 5] array concatenation, where below, the right side ``[4, 5, 6]`` is coerced into an array literal:: select([mytable.c.arraycol + [4, 5, 6]]) .. seealso:: :class:`.postgresql.ARRAY` :class:`.postgresql.array` :ticket:`2441` New, configurable DATE, TIME types for SQLite --------------------------------------------- SQLite has no built-in DATE, TIME, or DATETIME types, and instead provides some support for storage of date and time values either as strings or integers. The date and time types for SQLite are enhanced in 0.8 to be much more configurable as to the specific format, including that the "microseconds" portion is optional, as well as pretty much everything else. :: Column('sometimestamp', sqlite.DATETIME(truncate_microseconds=True)) Column('sometimestamp', sqlite.DATETIME( storage_format=( "%(year)04d%(month)02d%(day)02d" "%(hour)02d%(minute)02d%(second)02d%(microsecond)06d" ), regexp="(\d{4})(\d{2})(\d{2})(\d{2})(\d{2})(\d{2})(\d{6})" ) ) Column('somedate', sqlite.DATE( storage_format="%(month)02d/%(day)02d/%(year)04d", regexp="(?P\d+)/(?P\d+)/(?P\d+)", ) ) Huge thanks to Nate Dub for the sprinting on this at Pycon 2012. .. seealso:: :class:`.sqlite.DATETIME` :class:`.sqlite.DATE` :class:`.sqlite.TIME` :ticket:`2363` "COLLATE" supported across all dialects; in particular MySQL, Postgresql, SQLite -------------------------------------------------------------------------------- The "collate" keyword, long accepted by the MySQL dialect, is now established on all :class:`.String` types and will render on any backend, including when features such as :meth:`.MetaData.create_all` and :func:`.cast` is used:: >>> stmt = select([cast(sometable.c.somechar, String(20, collation='utf8'))]) >>> print stmt SELECT CAST(sometable.somechar AS VARCHAR(20) COLLATE "utf8") AS anon_1 FROM sometable .. seealso:: :class:`.String` :ticket:`2276` "Prefixes" now supported for :func:`.update`, :func:`.delete` ------------------------------------------------------------- Geared towards MySQL, a "prefix" can be rendered within any of these constructs. E.g.:: stmt = table.delete().prefix_with("LOW_PRIORITY", dialect="mysql") stmt = table.update().prefix_with("LOW_PRIORITY", dialect="mysql") The method is new in addition to those which already existed on :func:`.insert`, :func:`.select` and :class:`.Query`. .. seealso:: :meth:`.Update.prefix_with` :meth:`.Delete.prefix_with` :meth:`.Insert.prefix_with` :meth:`.Select.prefix_with` :meth:`.Query.prefix_with` :ticket:`2431` Behavioral Changes ================== .. _legacy_is_orphan_addition: The consideration of a "pending" object as an "orphan" has been made more aggressive ------------------------------------------------------------------------------------ This is a late add to the 0.8 series, however it is hoped that the new behavior is generally more consistent and intuitive in a wider variety of situations. The ORM has since at least version 0.4 included behavior such that an object that's "pending", meaning that it's associated with a :class:`.Session` but hasn't been inserted into the database yet, is automatically expunged from the :class:`.Session` when it becomes an "orphan", which means it has been de-associated with a parent object that refers to it with ``delete-orphan`` cascade on the configured :func:`.relationship`. This behavior is intended to approximately mirror the behavior of a persistent (that is, already inserted) object, where the ORM will emit a DELETE for such objects that become orphans based on the interception of detachment events. The behavioral change comes into play for objects that are referred to by multiple kinds of parents that each specify ``delete-orphan``; the typical example is an :ref:`association object ` that bridges two other kinds of objects in a many-to-many pattern. Previously, the behavior was such that the pending object would be expunged only when de-associated with *all* of its parents. With the behavioral change, the pending object is expunged as soon as it is de-associated from *any* of the parents that it was previously associated with. This behavior is intended to more closely match that of persistent objects, which are deleted as soon as they are de-associated from any parent. The rationale for the older behavior dates back at least to version 0.4, and was basically a defensive decision to try to alleviate confusion when an object was still being constructed for INSERT. But the reality is that the object is re-associated with the :class:`.Session` as soon as it is attached to any new parent in any case. It's still possible to flush an object that is not associated with all of its required parents, if the object was either not associated with those parents in the first place, or if it was expunged, but then re-associated with a :class:`.Session` via a subsequent attachment event but still not fully associated. In this situation, it is expected that the database would emit an integrity error, as there are likely NOT NULL foreign key columns that are unpopulated. The ORM makes the decision to let these INSERT attempts occur, based on the judgment that an object that is only partially associated with its required parents but has been actively associated with some of them, is more often than not a user error, rather than an intentional omission which should be silently skipped - silently skipping the INSERT here would make user errors of this nature very hard to debug. The old behavior, for applications that might have been relying upon it, can be re-enabled for any :class:`.Mapper` by specifying the flag ``legacy_is_orphan`` as a mapper option. The new behavior allows the following test case to work:: from sqlalchemy import Column, Integer, String, ForeignKey from sqlalchemy.orm import relationship, backref from sqlalchemy.ext.declarative import declarative_base Base = declarative_base() class User(Base): __tablename__ = 'user' id = Column(Integer, primary_key=True) name = Column(String(64)) class UserKeyword(Base): __tablename__ = 'user_keyword' user_id = Column(Integer, ForeignKey('user.id'), primary_key=True) keyword_id = Column(Integer, ForeignKey('keyword.id'), primary_key=True) user = relationship(User, backref=backref("user_keywords", cascade="all, delete-orphan") ) keyword = relationship("Keyword", backref=backref("user_keywords", cascade="all, delete-orphan") ) # uncomment this to enable the old behavior # __mapper_args__ = {"legacy_is_orphan": True} class Keyword(Base): __tablename__ = 'keyword' id = Column(Integer, primary_key=True) keyword = Column('keyword', String(64)) from sqlalchemy import create_engine from sqlalchemy.orm import Session # note we're using Postgresql to ensure that referential integrity # is enforced, for demonstration purposes. e = create_engine("postgresql://scott:tiger@localhost/test", echo=True) Base.metadata.drop_all(e) Base.metadata.create_all(e) session = Session(e) u1 = User(name="u1") k1 = Keyword(keyword="k1") session.add_all([u1, k1]) uk1 = UserKeyword(keyword=k1, user=u1) # previously, if session.flush() were called here, # this operation would succeed, but if session.flush() # were not called here, the operation fails with an # integrity error. # session.flush() del u1.user_keywords[0] session.commit() :ticket:`2655` The after_attach event fires after the item is associated with the Session instead of before; before_attach added ----------------------------------------------------------------------------------------------------------------- Event handlers which use after_attach can now assume the given instance is associated with the given session: :: @event.listens_for(Session, "after_attach") def after_attach(session, instance): assert instance in session Some use cases require that it work this way. However, other use cases require that the item is *not* yet part of the session, such as when a query, intended to load some state required for an instance, emits autoflush first and would otherwise prematurely flush the target object. Those use cases should use the new "before_attach" event: :: @event.listens_for(Session, "before_attach") def before_attach(session, instance): instance.some_necessary_attribute = session.query(Widget).\ filter_by(instance.widget_name).\ first() :ticket:`2464` Query now auto-correlates like a select() does ---------------------------------------------- Previously it was necessary to call :meth:`.Query.correlate` in order to have a column- or WHERE-subquery correlate to the parent: :: subq = session.query(Entity.value).\ filter(Entity.id==Parent.entity_id).\ correlate(Parent).\ as_scalar() session.query(Parent).filter(subq=="some value") This was the opposite behavior of a plain ``select()`` construct which would assume auto-correlation by default. The above statement in 0.8 will correlate automatically: :: subq = session.query(Entity.value).\ filter(Entity.id==Parent.entity_id).\ as_scalar() session.query(Parent).filter(subq=="some value") like in ``select()``, correlation can be disabled by calling ``query.correlate(None)`` or manually set by passing an entity, ``query.correlate(someentity)``. :ticket:`2179` .. _correlation_context_specific: Correlation is now always context-specific ------------------------------------------ To allow a wider variety of correlation scenarios, the behavior of :meth:`.Select.correlate` and :meth:`.Query.correlate` has changed slightly such that the SELECT statement will omit the "correlated" target from the FROM clause only if the statement is actually used in that context. Additionally, it's no longer possible for a SELECT statement that's placed as a FROM in an enclosing SELECT statement to "correlate" (i.e. omit) a FROM clause. This change only makes things better as far as rendering SQL, in that it's no longer possible to render illegal SQL where there are insufficient FROM objects relative to what's being selected:: from sqlalchemy.sql import table, column, select t1 = table('t1', column('x')) t2 = table('t2', column('y')) s = select([t1, t2]).correlate(t1) print(s) Prior to this change, the above would return:: SELECT t1.x, t2.y FROM t2 which is invalid SQL as "t1" is not referred to in any FROM clause. Now, in the absense of an enclosing SELECT, it returns:: SELECT t1.x, t2.y FROM t1, t2 Within a SELECT, the correlation takes effect as expected:: s2 = select([t1, t2]).where(t1.c.x == t2.c.y).where(t1.c.x == s) print (s2) SELECT t1.x, t2.y FROM t1, t2 WHERE t1.x = t2.y AND t1.x = (SELECT t1.x, t2.y FROM t2) This change is not expected to impact any existing applications, as the correlation behavior remains identical for properly constructed expressions. Only an application that relies, most likely within a testing scenario, on the invalid string output of a correlated SELECT used in a non-correlating context would see any change. :ticket:`2668` .. _metadata_create_drop_tables: create_all() and drop_all() will now honor an empty list as such ---------------------------------------------------------------- The methods :meth:`.MetaData.create_all` and :meth:`.MetaData.drop_all` will now accept a list of :class:`.Table` objects that is empty, and will not emit any CREATE or DROP statements. Previously, an empty list was interepreted the same as passing ``None`` for a collection, and CREATE/DROP would be emitted for all items unconditionally. This is a bug fix but some applications may have been relying upon the previous behavior. :ticket:`2664` Repaired the Event Targeting of :class:`.InstrumentationEvents` ---------------------------------------------------------------- The :class:`.InstrumentationEvents` series of event targets have documented that the events will only be fired off according to the actual class passed as a target. Through 0.7, this wasn't the case, and any event listener applied to :class:`.InstrumentationEvents` would be invoked for all classes mapped. In 0.8, additional logic has been added so that the events will only invoke for those classes sent in. The ``propagate`` flag here is set to ``True`` by default as class instrumentation events are typically used to intercept classes that aren't yet created. :ticket:`2590` No more magic coercion of "=" to IN when comparing to subquery in MS-SQL ------------------------------------------------------------------------ We found a very old behavior in the MSSQL dialect which would attempt to rescue users from themselves when doing something like this: :: scalar_subq = select([someothertable.c.id]).where(someothertable.c.data=='foo') select([sometable]).where(sometable.c.id==scalar_subq) SQL Server doesn't allow an equality comparison to a scalar SELECT, that is, "x = (SELECT something)". The MSSQL dialect would convert this to an IN. The same thing would happen however upon a comparison like "(SELECT something) = x", and overall this level of guessing is outside of SQLAlchemy's usual scope so the behavior is removed. :ticket:`2277` Fixed the behavior of :meth:`.Session.is_modified` -------------------------------------------------- The :meth:`.Session.is_modified` method accepts an argument ``passive`` which basically should not be necessary, the argument in all cases should be the value ``True`` - when left at its default of ``False`` it would have the effect of hitting the database, and often triggering autoflush which would itself change the results. In 0.8 the ``passive`` argument will have no effect, and unloaded attributes will never be checked for history since by definition there can be no pending state change on an unloaded attribute. .. seealso:: :meth:`.Session.is_modified` :ticket:`2320` :attr:`.Column.key` is honored in the :attr:`.Select.c` attribute of :func:`.select` with :meth:`.Select.apply_labels` ----------------------------------------------------------------------------------------------------------------------- Users of the expression system know that :meth:`.Select.apply_labels` prepends the table name to each column name, affecting the names that are available from :attr:`.Select.c`: :: s = select([table1]).apply_labels() s.c.table1_col1 s.c.table1_col2 Before 0.8, if the :class:`.Column` had a different :attr:`.Column.key`, this key would be ignored, inconsistently versus when :meth:`.Select.apply_labels` were not used: :: # before 0.8 table1 = Table('t1', metadata, Column('col1', Integer, key='column_one') ) s = select([table1]) s.c.column_one # would be accessible like this s.c.col1 # would raise AttributeError s = select([table1]).apply_labels() s.c.table1_column_one # would raise AttributeError s.c.table1_col1 # would be accessible like this In 0.8, :attr:`.Column.key` is honored in both cases: :: # with 0.8 table1 = Table('t1', metadata, Column('col1', Integer, key='column_one') ) s = select([table1]) s.c.column_one # works s.c.col1 # AttributeError s = select([table1]).apply_labels() s.c.table1_column_one # works s.c.table1_col1 # AttributeError All other behavior regarding "name" and "key" are the same, including that the rendered SQL will still use the form ``_`` - the emphasis here was on preventing the :attr:`.Column.key` contents from being rendered into the ``SELECT`` statement so that there are no issues with special/ non-ascii characters used in the :attr:`.Column.key`. :ticket:`2397` single_parent warning is now an error ------------------------------------- A :func:`.relationship` that is many-to-one or many-to-many and specifies "cascade='all, delete-orphan'", which is an awkward but nonetheless supported use case (with restrictions) will now raise an error if the relationship does not specify the ``single_parent=True`` option. Previously it would only emit a warning, but a failure would follow almost immediately within the attribute system in any case. :ticket:`2405` Adding the ``inspector`` argument to the ``column_reflect`` event ----------------------------------------------------------------- 0.7 added a new event called ``column_reflect``, provided so that the reflection of columns could be augmented as each one were reflected. We got this event slightly wrong in that the event gave no way to get at the current ``Inspector`` and ``Connection`` being used for the reflection, in the case that additional information from the database is needed. As this is a new event not widely used yet, we'll be adding the ``inspector`` argument into it directly: :: @event.listens_for(Table, "column_reflect") def listen_for_col(inspector, table, column_info): # ... :ticket:`2418` Disabling auto-detect of collations, casing for MySQL ----------------------------------------------------- The MySQL dialect does two calls, one very expensive, to load all possible collations from the database as well as information on casing, the first time an ``Engine`` connects. Neither of these collections are used for any SQLAlchemy functions, so these calls will be changed to no longer be emitted automatically. Applications that might have relied on these collections being present on ``engine.dialect`` will need to call upon ``_detect_collations()`` and ``_detect_casing()`` directly. :ticket:`2404` "Unconsumed column names" warning becomes an exception ------------------------------------------------------ Referring to a non-existent column in an ``insert()`` or ``update()`` construct will raise an error instead of a warning: :: t1 = table('t1', column('x')) t1.insert().values(x=5, z=5) # raises "Unconsumed column names: z" :ticket:`2415` Inspector.get_primary_keys() is deprecated, use Inspector.get_pk_constraint --------------------------------------------------------------------------- These two methods on ``Inspector`` were redundant, where ``get_primary_keys()`` would return the same information as ``get_pk_constraint()`` minus the name of the constraint: :: >>> insp.get_primary_keys() ["a", "b"] >>> insp.get_pk_constraint() {"name":"pk_constraint", "constrained_columns":["a", "b"]} :ticket:`2422` Case-insensitive result row names will be disabled in most cases ---------------------------------------------------------------- A very old behavior, the column names in ``RowProxy`` were always compared case-insensitively: :: >>> row = result.fetchone() >>> row['foo'] == row['FOO'] == row['Foo'] True This was for the benefit of a few dialects which in the early days needed this, like Oracle and Firebird, but in modern usage we have more accurate ways of dealing with the case-insensitive behavior of these two platforms. Going forward, this behavior will be available only optionally, by passing the flag ```case_sensitive=False``` to ```create_engine()```, but otherwise column names requested from the row must match as far as casing. :ticket:`2423` ``InstrumentationManager`` and alternate class instrumentation is now an extension ---------------------------------------------------------------------------------- The ``sqlalchemy.orm.interfaces.InstrumentationManager`` class is moved to ``sqlalchemy.ext.instrumentation.InstrumentationManager``. The "alternate instrumentation" system was built for the benefit of a very small number of installations that needed to work with existing or unusual class instrumentation systems, and generally is very seldom used. The complexity of this system has been exported to an ``ext.`` module. It remains unused until once imported, typically when a third party library imports ``InstrumentationManager``, at which point it is injected back into ``sqlalchemy.orm`` by replacing the default ``InstrumentationFactory`` with ``ExtendedInstrumentationRegistry``. Removed ======= SQLSoup ------- SQLSoup is a handy package that presents an alternative interface on top of the SQLAlchemy ORM. SQLSoup is now moved into its own project and documented/released separately; see https://bitbucket.org/zzzeek/sqlsoup. SQLSoup is a very simple tool that could also benefit from contributors who are interested in its style of usage. :ticket:`2262` MutableType ----------- The older "mutable" system within the SQLAlchemy ORM has been removed. This refers to the ``MutableType`` interface which was applied to types such as ``PickleType`` and conditionally to ``TypeDecorator``, and since very early SQLAlchemy versions has provided a way for the ORM to detect changes in so-called "mutable" data structures such as JSON structures and pickled objects. However, the implementation was never reasonable and forced a very inefficient mode of usage on the unit-of-work which caused an expensive scan of all objects to take place during flush. In 0.7, the `sqlalchemy.ext.mutable `_ extension was introduced so that user-defined datatypes can appropriately send events to the unit of work as changes occur. Today, usage of ``MutableType`` is expected to be low, as warnings have been in place for some years now regarding its inefficiency. :ticket:`2442` sqlalchemy.exceptions (has been sqlalchemy.exc for years) --------------------------------------------------------- We had left in an alias ``sqlalchemy.exceptions`` to attempt to make it slightly easier for some very old libraries that hadn't yet been upgraded to use ``sqlalchemy.exc``. Some users are still being confused by it however so in 0.8 we're taking it out entirely to eliminate any of that confusion. :ticket:`2433` SQLAlchemy-0.8.4/doc/_sources/contents.txt0000644000076500000240000000052012251147171021214 0ustar classicstaff00000000000000.. _contents: Table of Contents ================= Full table of contents. For a high level overview of all documentation, see :ref:`index_toplevel`. .. toctree:: :maxdepth: 3 intro orm/index core/index dialects/index changelog/index Indices and tables ------------------ * :ref:`genindex` * :ref:`search` SQLAlchemy-0.8.4/doc/_sources/copyright.txt0000644000076500000240000000240212251147171021370 0ustar classicstaff00000000000000:orphan: ==================== Appendix: Copyright ==================== This is the MIT license: ``_ Copyright (c) 2005-2013 Michael Bayer and contributors. SQLAlchemy is a trademark of Michael Bayer. Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions: The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software. THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. SQLAlchemy-0.8.4/doc/_sources/core/0000755000076500000240000000000012251151573017552 5ustar classicstaff00000000000000SQLAlchemy-0.8.4/doc/_sources/core/compiler.txt0000644000076500000240000000027512251147171022130 0ustar classicstaff00000000000000.. _sqlalchemy.ext.compiler_toplevel: Custom SQL Constructs and Compilation Extension =============================================== .. automodule:: sqlalchemy.ext.compiler :members:SQLAlchemy-0.8.4/doc/_sources/core/connections.txt0000644000076500000240000005340412251147171022642 0ustar classicstaff00000000000000.. _connections_toplevel: ===================================== Working with Engines and Connections ===================================== .. module:: sqlalchemy.engine This section details direct usage of the :class:`.Engine`, :class:`.Connection`, and related objects. Its important to note that when using the SQLAlchemy ORM, these objects are not generally accessed; instead, the :class:`.Session` object is used as the interface to the database. However, for applications that are built around direct usage of textual SQL statements and/or SQL expression constructs without involvement by the ORM's higher level management services, the :class:`.Engine` and :class:`.Connection` are king (and queen?) - read on. Basic Usage =========== Recall from :doc:`/core/engines` that an :class:`.Engine` is created via the :func:`.create_engine` call:: engine = create_engine('mysql://scott:tiger@localhost/test') The typical usage of :func:`.create_engine()` is once per particular database URL, held globally for the lifetime of a single application process. A single :class:`.Engine` manages many individual DBAPI connections on behalf of the process and is intended to be called upon in a concurrent fashion. The :class:`.Engine` is **not** synonymous to the DBAPI ``connect`` function, which represents just one connection resource - the :class:`.Engine` is most efficient when created just once at the module level of an application, not per-object or per-function call. For a multiple-process application that uses the ``os.fork`` system call, or for example the Python ``multiprocessing`` module, it's usually required that a separate :class:`.Engine` be used for each child process. This is because the :class:`.Engine` maintains a reference to a connection pool that ultimately references DBAPI connections - these tend to not be portable across process boundaries. An :class:`.Engine` that is configured not to use pooling (which is achieved via the usage of :class:`.NullPool`) does not have this requirement. The engine can be used directly to issue SQL to the database. The most generic way is first procure a connection resource, which you get via the :meth:`.Engine.connect` method:: connection = engine.connect() result = connection.execute("select username from users") for row in result: print "username:", row['username'] connection.close() The connection is an instance of :class:`.Connection`, which is a **proxy** object for an actual DBAPI connection. The DBAPI connection is retrieved from the connection pool at the point at which :class:`.Connection` is created. The returned result is an instance of :class:`.ResultProxy`, which references a DBAPI cursor and provides a largely compatible interface with that of the DBAPI cursor. The DBAPI cursor will be closed by the :class:`.ResultProxy` when all of its result rows (if any) are exhausted. A :class:`.ResultProxy` that returns no rows, such as that of an UPDATE statement (without any returned rows), releases cursor resources immediately upon construction. When the :meth:`~.Connection.close` method is called, the referenced DBAPI connection is :term:`released` to the connection pool. From the perspective of the database itself, nothing is actually "closed", assuming pooling is in use. The pooling mechanism issues a ``rollback()`` call on the DBAPI connection so that any transactional state or locks are removed, and the connection is ready for its next usage. The above procedure can be performed in a shorthand way by using the :meth:`~.Engine.execute` method of :class:`.Engine` itself:: result = engine.execute("select username from users") for row in result: print "username:", row['username'] Where above, the :meth:`~.Engine.execute` method acquires a new :class:`.Connection` on its own, executes the statement with that object, and returns the :class:`.ResultProxy`. In this case, the :class:`.ResultProxy` contains a special flag known as ``close_with_result``, which indicates that when its underlying DBAPI cursor is closed, the :class:`.Connection` object itself is also closed, which again returns the DBAPI connection to the connection pool, releasing transactional resources. If the :class:`.ResultProxy` potentially has rows remaining, it can be instructed to close out its resources explicitly:: result.close() If the :class:`.ResultProxy` has pending rows remaining and is dereferenced by the application without being closed, Python garbage collection will ultimately close out the cursor as well as trigger a return of the pooled DBAPI connection resource to the pool (SQLAlchemy achieves this by the usage of weakref callbacks - *never* the ``__del__`` method) - however it's never a good idea to rely upon Python garbage collection to manage resources. Our example above illustrated the execution of a textual SQL string. The :meth:`~.Connection.execute` method can of course accommodate more than that, including the variety of SQL expression constructs described in :ref:`sqlexpression_toplevel`. Using Transactions ================== .. note:: This section describes how to use transactions when working directly with :class:`.Engine` and :class:`.Connection` objects. When using the SQLAlchemy ORM, the public API for transaction control is via the :class:`.Session` object, which makes usage of the :class:`.Transaction` object internally. See :ref:`unitofwork_transaction` for further information. The :class:`~sqlalchemy.engine.Connection` object provides a :meth:`~.Connection.begin` method which returns a :class:`.Transaction` object. This object is usually used within a try/except clause so that it is guaranteed to invoke :meth:`.Transaction.rollback` or :meth:`.Transaction.commit`:: connection = engine.connect() trans = connection.begin() try: r1 = connection.execute(table1.select()) connection.execute(table1.insert(), col1=7, col2='this is some data') trans.commit() except: trans.rollback() raise The above block can be created more succinctly using context managers, either given an :class:`.Engine`:: # runs a transaction with engine.begin() as connection: r1 = connection.execute(table1.select()) connection.execute(table1.insert(), col1=7, col2='this is some data') Or from the :class:`.Connection`, in which case the :class:`.Transaction` object is available as well:: with connection.begin() as trans: r1 = connection.execute(table1.select()) connection.execute(table1.insert(), col1=7, col2='this is some data') .. _connections_nested_transactions: Nesting of Transaction Blocks ------------------------------ The :class:`.Transaction` object also handles "nested" behavior by keeping track of the outermost begin/commit pair. In this example, two functions both issue a transaction on a :class:`.Connection`, but only the outermost :class:`.Transaction` object actually takes effect when it is committed. .. sourcecode:: python+sql # method_a starts a transaction and calls method_b def method_a(connection): trans = connection.begin() # open a transaction try: method_b(connection) trans.commit() # transaction is committed here except: trans.rollback() # this rolls back the transaction unconditionally raise # method_b also starts a transaction def method_b(connection): trans = connection.begin() # open a transaction - this runs in the context of method_a's transaction try: connection.execute("insert into mytable values ('bat', 'lala')") connection.execute(mytable.insert(), col1='bat', col2='lala') trans.commit() # transaction is not committed yet except: trans.rollback() # this rolls back the transaction unconditionally raise # open a Connection and call method_a conn = engine.connect() method_a(conn) conn.close() Above, ``method_a`` is called first, which calls ``connection.begin()``. Then it calls ``method_b``. When ``method_b`` calls ``connection.begin()``, it just increments a counter that is decremented when it calls ``commit()``. If either ``method_a`` or ``method_b`` calls ``rollback()``, the whole transaction is rolled back. The transaction is not committed until ``method_a`` calls the ``commit()`` method. This "nesting" behavior allows the creation of functions which "guarantee" that a transaction will be used if one was not already available, but will automatically participate in an enclosing transaction if one exists. .. index:: single: thread safety; transactions .. _autocommit: Understanding Autocommit ======================== The previous transaction example illustrates how to use :class:`.Transaction` so that several executions can take part in the same transaction. What happens when we issue an INSERT, UPDATE or DELETE call without using :class:`.Transaction`? While some DBAPI implementations provide various special "non-transactional" modes, the core behavior of DBAPI per PEP-0249 is that a *transaction is always in progress*, providing only ``rollback()`` and ``commit()`` methods but no ``begin()``. SQLAlchemy assumes this is the case for any given DBAPI. Given this requirement, SQLAlchemy implements its own "autocommit" feature which works completely consistently across all backends. This is achieved by detecting statements which represent data-changing operations, i.e. INSERT, UPDATE, DELETE, as well as data definition language (DDL) statements such as CREATE TABLE, ALTER TABLE, and then issuing a COMMIT automatically if no transaction is in progress. The detection is based on the presence of the ``autocommit=True`` execution option on the statement. If the statement is a text-only statement and the flag is not set, a regular expression is used to detect INSERT, UPDATE, DELETE, as well as a variety of other commands for a particular backend:: conn = engine.connect() conn.execute("INSERT INTO users VALUES (1, 'john')") # autocommits The "autocommit" feature is only in effect when no :class:`.Transaction` has otherwise been declared. This means the feature is not generally used with the ORM, as the :class:`.Session` object by default always maintains an ongoing :class:`.Transaction`. Full control of the "autocommit" behavior is available using the generative :meth:`.Connection.execution_options` method provided on :class:`.Connection`, :class:`.Engine`, :class:`.Executable`, using the "autocommit" flag which will turn on or off the autocommit for the selected scope. For example, a :func:`.text` construct representing a stored procedure that commits might use it so that a SELECT statement will issue a COMMIT:: engine.execute(text("SELECT my_mutating_procedure()").execution_options(autocommit=True)) .. _dbengine_implicit: Connectionless Execution, Implicit Execution ============================================= Recall from the first section we mentioned executing with and without explicit usage of :class:`.Connection`. "Connectionless" execution refers to the usage of the ``execute()`` method on an object which is not a :class:`.Connection`. This was illustrated using the :meth:`~.Engine.execute` method of :class:`.Engine`:: result = engine.execute("select username from users") for row in result: print "username:", row['username'] In addition to "connectionless" execution, it is also possible to use the :meth:`~.Executable.execute` method of any :class:`.Executable` construct, which is a marker for SQL expression objects that support execution. The SQL expression object itself references an :class:`.Engine` or :class:`.Connection` known as the **bind**, which it uses in order to provide so-called "implicit" execution services. Given a table as below:: from sqlalchemy import MetaData, Table, Column, Integer meta = MetaData() users_table = Table('users', meta, Column('id', Integer, primary_key=True), Column('name', String(50)) ) Explicit execution delivers the SQL text or constructed SQL expression to the :meth:`~.Connection.execute` method of :class:`~sqlalchemy.engine.Connection`: .. sourcecode:: python+sql engine = create_engine('sqlite:///file.db') connection = engine.connect() result = connection.execute(users_table.select()) for row in result: # .... connection.close() Explicit, connectionless execution delivers the expression to the :meth:`~.Engine.execute` method of :class:`~sqlalchemy.engine.Engine`: .. sourcecode:: python+sql engine = create_engine('sqlite:///file.db') result = engine.execute(users_table.select()) for row in result: # .... result.close() Implicit execution is also connectionless, and makes usage of the :meth:`~.Executable.execute` method on the expression itself. This method is provided as part of the :class:`.Executable` class, which refers to a SQL statement that is sufficient for being invoked against the database. The method makes usage of the assumption that either an :class:`~sqlalchemy.engine.Engine` or :class:`~sqlalchemy.engine.Connection` has been **bound** to the expression object. By "bound" we mean that the special attribute :attr:`.MetaData.bind` has been used to associate a series of :class:`.Table` objects and all SQL constructs derived from them with a specific engine:: engine = create_engine('sqlite:///file.db') meta.bind = engine result = users_table.select().execute() for row in result: # .... result.close() Above, we associate an :class:`.Engine` with a :class:`.MetaData` object using the special attribute :attr:`.MetaData.bind`. The :func:`.select` construct produced from the :class:`.Table` object has a method :meth:`~.Executable.execute`, which will search for an :class:`.Engine` that's "bound" to the :class:`.Table`. Overall, the usage of "bound metadata" has three general effects: * SQL statement objects gain an :meth:`.Executable.execute` method which automatically locates a "bind" with which to execute themselves. * The ORM :class:`.Session` object supports using "bound metadata" in order to establish which :class:`.Engine` should be used to invoke SQL statements on behalf of a particular mapped class, though the :class:`.Session` also features its own explicit system of establishing complex :class:`.Engine`/ mapped class configurations. * The :meth:`.MetaData.create_all`, :meth:`.MetaData.drop_all`, :meth:`.Table.create`, :meth:`.Table.drop`, and "autoload" features all make usage of the bound :class:`.Engine` automatically without the need to pass it explicitly. .. note:: The concepts of "bound metadata" and "implicit execution" are not emphasized in modern SQLAlchemy. While they offer some convenience, they are no longer required by any API and are never necessary. In applications where multiple :class:`.Engine` objects are present, each one logically associated with a certain set of tables (i.e. *vertical sharding*), the "bound metadata" technique can be used so that individual :class:`.Table` can refer to the appropriate :class:`.Engine` automatically; in particular this is supported within the ORM via the :class:`.Session` object as a means to associate :class:`.Table` objects with an appropriate :class:`.Engine`, as an alternative to using the bind arguments accepted directly by the :class:`.Session`. However, the "implicit execution" technique is not at all appropriate for use with the ORM, as it bypasses the transactional context maintained by the :class:`.Session`. Overall, in the *vast majority* of cases, "bound metadata" and "implicit execution" are **not useful**. While "bound metadata" has a marginal level of usefulness with regards to ORM configuration, "implicit execution" is a very old usage pattern that in most cases is more confusing than it is helpful, and its usage is discouraged. Both patterns seem to encourage the overuse of expedient "short cuts" in application design which lead to problems later on. Modern SQLAlchemy usage, especially the ORM, places a heavy stress on working within the context of a transaction at all times; the "implicit execution" concept makes the job of associating statement execution with a particular transaction much more difficult. The :meth:`.Executable.execute` method on a particular SQL statement usually implies that the execution is not part of any particular transaction, which is usually not the desired effect. In both "connectionless" examples, the :class:`~sqlalchemy.engine.Connection` is created behind the scenes; the :class:`~sqlalchemy.engine.ResultProxy` returned by the ``execute()`` call references the :class:`~sqlalchemy.engine.Connection` used to issue the SQL statement. When the :class:`.ResultProxy` is closed, the underlying :class:`.Connection` is closed for us, resulting in the DBAPI connection being returned to the pool with transactional resources removed. .. _threadlocal_strategy: Using the Threadlocal Execution Strategy ======================================== The "threadlocal" engine strategy is an optional feature which can be used by non-ORM applications to associate transactions with the current thread, such that all parts of the application can participate in that transaction implicitly without the need to explicitly reference a :class:`.Connection`. .. note:: The "threadlocal" feature is generally discouraged. It's designed for a particular pattern of usage which is generally considered as a legacy pattern. It has **no impact** on the "thread safety" of SQLAlchemy components or one's application. It also should not be used when using an ORM :class:`~sqlalchemy.orm.session.Session` object, as the :class:`~sqlalchemy.orm.session.Session` itself represents an ongoing transaction and itself handles the job of maintaining connection and transactional resources. Enabling ``threadlocal`` is achieved as follows:: db = create_engine('mysql://localhost/test', strategy='threadlocal') The above :class:`.Engine` will now acquire a :class:`.Connection` using connection resources derived from a thread-local variable whenever :meth:`.Engine.execute` or :meth:`.Engine.contextual_connect` is called. This connection resource is maintained as long as it is referenced, which allows multiple points of an application to share a transaction while using connectionless execution:: def call_operation1(): engine.execute("insert into users values (?, ?)", 1, "john") def call_operation2(): users.update(users.c.user_id==5).execute(name='ed') db.begin() try: call_operation1() call_operation2() db.commit() except: db.rollback() Explicit execution can be mixed with connectionless execution by using the :meth:`.Engine.connect` method to acquire a :class:`.Connection` that is not part of the threadlocal scope:: db.begin() conn = db.connect() try: conn.execute(log_table.insert(), message="Operation started") call_operation1() call_operation2() db.commit() conn.execute(log_table.insert(), message="Operation succeeded") except: db.rollback() conn.execute(log_table.insert(), message="Operation failed") finally: conn.close() To access the :class:`.Connection` that is bound to the threadlocal scope, call :meth:`.Engine.contextual_connect`:: conn = db.contextual_connect() call_operation3(conn) conn.close() Calling :meth:`~.Connection.close` on the "contextual" connection does not :term:`release` its resources until all other usages of that resource are closed as well, including that any ongoing transactions are rolled back or committed. Registering New Dialects ======================== The :func:`.create_engine` function call locates the given dialect using setuptools entrypoints. These entry points can be established for third party dialects within the setup.py script. For example, to create a new dialect "foodialect://", the steps are as follows: 1. Create a package called ``foodialect``. 2. The package should have a module containing the dialect class, which is typically a subclass of :class:`sqlalchemy.engine.default.DefaultDialect`. In this example let's say it's called ``FooDialect`` and its module is accessed via ``foodialect.dialect``. 3. The entry point can be established in setup.py as follows:: entry_points=""" [sqlalchemy.dialects] foodialect = foodialect.dialect:FooDialect """ If the dialect is providing support for a particular DBAPI on top of an existing SQLAlchemy-supported database, the name can be given including a database-qualification. For example, if ``FooDialect`` were in fact a MySQL dialect, the entry point could be established like this:: entry_points=""" [sqlalchemy.dialects] mysql.foodialect = foodialect.dialect:FooDialect """ The above entrypoint would then be accessed as ``create_engine("mysql+foodialect://")``. Registering Dialects In-Process ------------------------------- SQLAlchemy also allows a dialect to be registered within the current process, bypassing the need for separate installation. Use the ``register()`` function as follows:: from sqlalchemy.dialects import registry registry.register("mysql.foodialect", "myapp.dialect", "MyMySQLDialect") The above will respond to ``create_engine("mysql+foodialect://")`` and load the ``MyMySQLDialect`` class from the ``myapp.dialect`` module. .. versionadded:: 0.8 Connection / Engine API ======================= .. autoclass:: Connection :members: .. autoclass:: Connectable :members: .. autoclass:: Engine :members: .. autoclass:: NestedTransaction :members: .. autoclass:: sqlalchemy.engine.ResultProxy :members: .. autoclass:: sqlalchemy.engine.RowProxy :members: .. autoclass:: Transaction :members: .. autoclass:: TwoPhaseTransaction :members: SQLAlchemy-0.8.4/doc/_sources/core/constraints.txt0000644000076500000240000003335012251147171022665 0ustar classicstaff00000000000000.. _metadata_constraints_toplevel: .. _metadata_constraints: .. module:: sqlalchemy.schema ================================= Defining Constraints and Indexes ================================= .. _metadata_foreignkeys: This section will discuss SQL :term:`constraints` and indexes. In SQLAlchemy the key classes include :class:`.ForeignKeyConstraint` and :class:`.Index`. Defining Foreign Keys --------------------- A *foreign key* in SQL is a table-level construct that constrains one or more columns in that table to only allow values that are present in a different set of columns, typically but not always located on a different table. We call the columns which are constrained the *foreign key* columns and the columns which they are constrained towards the *referenced* columns. The referenced columns almost always define the primary key for their owning table, though there are exceptions to this. The foreign key is the "joint" that connects together pairs of rows which have a relationship with each other, and SQLAlchemy assigns very deep importance to this concept in virtually every area of its operation. In SQLAlchemy as well as in DDL, foreign key constraints can be defined as additional attributes within the table clause, or for single-column foreign keys they may optionally be specified within the definition of a single column. The single column foreign key is more common, and at the column level is specified by constructing a :class:`~sqlalchemy.schema.ForeignKey` object as an argument to a :class:`~sqlalchemy.schema.Column` object:: user_preference = Table('user_preference', metadata, Column('pref_id', Integer, primary_key=True), Column('user_id', Integer, ForeignKey("user.user_id"), nullable=False), Column('pref_name', String(40), nullable=False), Column('pref_value', String(100)) ) Above, we define a new table ``user_preference`` for which each row must contain a value in the ``user_id`` column that also exists in the ``user`` table's ``user_id`` column. The argument to :class:`~sqlalchemy.schema.ForeignKey` is most commonly a string of the form *.*, or for a table in a remote schema or "owner" of the form *..*. It may also be an actual :class:`~sqlalchemy.schema.Column` object, which as we'll see later is accessed from an existing :class:`~sqlalchemy.schema.Table` object via its ``c`` collection:: ForeignKey(user.c.user_id) The advantage to using a string is that the in-python linkage between ``user`` and ``user_preference`` is resolved only when first needed, so that table objects can be easily spread across multiple modules and defined in any order. Foreign keys may also be defined at the table level, using the :class:`~sqlalchemy.schema.ForeignKeyConstraint` object. This object can describe a single- or multi-column foreign key. A multi-column foreign key is known as a *composite* foreign key, and almost always references a table that has a composite primary key. Below we define a table ``invoice`` which has a composite primary key:: invoice = Table('invoice', metadata, Column('invoice_id', Integer, primary_key=True), Column('ref_num', Integer, primary_key=True), Column('description', String(60), nullable=False) ) And then a table ``invoice_item`` with a composite foreign key referencing ``invoice``:: invoice_item = Table('invoice_item', metadata, Column('item_id', Integer, primary_key=True), Column('item_name', String(60), nullable=False), Column('invoice_id', Integer, nullable=False), Column('ref_num', Integer, nullable=False), ForeignKeyConstraint(['invoice_id', 'ref_num'], ['invoice.invoice_id', 'invoice.ref_num']) ) It's important to note that the :class:`~sqlalchemy.schema.ForeignKeyConstraint` is the only way to define a composite foreign key. While we could also have placed individual :class:`~sqlalchemy.schema.ForeignKey` objects on both the ``invoice_item.invoice_id`` and ``invoice_item.ref_num`` columns, SQLAlchemy would not be aware that these two values should be paired together - it would be two individual foreign key constraints instead of a single composite foreign key referencing two columns. .. _use_alter: Creating/Dropping Foreign Key Constraints via ALTER ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ In all the above examples, the :class:`~sqlalchemy.schema.ForeignKey` object causes the "REFERENCES" keyword to be added inline to a column definition within a "CREATE TABLE" statement when :func:`~sqlalchemy.schema.MetaData.create_all` is issued, and :class:`~sqlalchemy.schema.ForeignKeyConstraint` invokes the "CONSTRAINT" keyword inline with "CREATE TABLE". There are some cases where this is undesireable, particularly when two tables reference each other mutually, each with a foreign key referencing the other. In such a situation at least one of the foreign key constraints must be generated after both tables have been built. To support such a scheme, :class:`~sqlalchemy.schema.ForeignKey` and :class:`~sqlalchemy.schema.ForeignKeyConstraint` offer the flag ``use_alter=True``. When using this flag, the constraint will be generated using a definition similar to "ALTER TABLE ADD CONSTRAINT ...". Since a name is required, the ``name`` attribute must also be specified. For example:: node = Table('node', meta, Column('node_id', Integer, primary_key=True), Column('primary_element', Integer, ForeignKey('element.element_id', use_alter=True, name='fk_node_element_id') ) ) element = Table('element', meta, Column('element_id', Integer, primary_key=True), Column('parent_node_id', Integer), ForeignKeyConstraint( ['parent_node_id'], ['node.node_id'], use_alter=True, name='fk_element_parent_node_id' ) ) ON UPDATE and ON DELETE ~~~~~~~~~~~~~~~~~~~~~~~ Most databases support *cascading* of foreign key values, that is the when a parent row is updated the new value is placed in child rows, or when the parent row is deleted all corresponding child rows are set to null or deleted. In data definition language these are specified using phrases like "ON UPDATE CASCADE", "ON DELETE CASCADE", and "ON DELETE SET NULL", corresponding to foreign key constraints. The phrase after "ON UPDATE" or "ON DELETE" may also other allow other phrases that are specific to the database in use. The :class:`~sqlalchemy.schema.ForeignKey` and :class:`~sqlalchemy.schema.ForeignKeyConstraint` objects support the generation of this clause via the ``onupdate`` and ``ondelete`` keyword arguments. The value is any string which will be output after the appropriate "ON UPDATE" or "ON DELETE" phrase:: child = Table('child', meta, Column('id', Integer, ForeignKey('parent.id', onupdate="CASCADE", ondelete="CASCADE"), primary_key=True ) ) composite = Table('composite', meta, Column('id', Integer, primary_key=True), Column('rev_id', Integer), Column('note_id', Integer), ForeignKeyConstraint( ['rev_id', 'note_id'], ['revisions.id', 'revisions.note_id'], onupdate="CASCADE", ondelete="SET NULL" ) ) Note that these clauses are not supported on SQLite, and require ``InnoDB`` tables when used with MySQL. They may also not be supported on other databases. UNIQUE Constraint ----------------- Unique constraints can be created anonymously on a single column using the ``unique`` keyword on :class:`~sqlalchemy.schema.Column`. Explicitly named unique constraints and/or those with multiple columns are created via the :class:`~sqlalchemy.schema.UniqueConstraint` table-level construct. .. sourcecode:: python+sql meta = MetaData() mytable = Table('mytable', meta, # per-column anonymous unique constraint Column('col1', Integer, unique=True), Column('col2', Integer), Column('col3', Integer), # explicit/composite unique constraint. 'name' is optional. UniqueConstraint('col2', 'col3', name='uix_1') ) CHECK Constraint ---------------- Check constraints can be named or unnamed and can be created at the Column or Table level, using the :class:`~sqlalchemy.schema.CheckConstraint` construct. The text of the check constraint is passed directly through to the database, so there is limited "database independent" behavior. Column level check constraints generally should only refer to the column to which they are placed, while table level constraints can refer to any columns in the table. Note that some databases do not actively support check constraints such as MySQL. .. sourcecode:: python+sql meta = MetaData() mytable = Table('mytable', meta, # per-column CHECK constraint Column('col1', Integer, CheckConstraint('col1>5')), Column('col2', Integer), Column('col3', Integer), # table level CHECK constraint. 'name' is optional. CheckConstraint('col2 > col3 + 5', name='check1') ) {sql}mytable.create(engine) CREATE TABLE mytable ( col1 INTEGER CHECK (col1>5), col2 INTEGER, col3 INTEGER, CONSTRAINT check1 CHECK (col2 > col3 + 5) ){stop} Setting up Constraints when using the Declarative ORM Extension ---------------------------------------------------------------- The :class:`.Table` is the SQLAlchemy Core construct that allows one to define table metadata, which among other things can be used by the SQLAlchemy ORM as a target to map a class. The :ref:`Declarative ` extension allows the :class:`.Table` object to be created automatically, given the contents of the table primarily as a mapping of :class:`.Column` objects. To apply table-level constraint objects such as :class:`.ForeignKeyConstraint` to a table defined using Declarative, use the ``__table_args__`` attribute, described at :ref:`declarative_table_args`. Constraints API --------------- .. autoclass:: Constraint .. autoclass:: CheckConstraint .. autoclass:: ColumnCollectionConstraint .. autoclass:: ForeignKey :members: .. autoclass:: ForeignKeyConstraint :members: .. autoclass:: PrimaryKeyConstraint .. autoclass:: UniqueConstraint .. _schema_indexes: Indexes ------- Indexes can be created anonymously (using an auto-generated name ``ix_``) for a single column using the inline ``index`` keyword on :class:`~sqlalchemy.schema.Column`, which also modifies the usage of ``unique`` to apply the uniqueness to the index itself, instead of adding a separate UNIQUE constraint. For indexes with specific names or which encompass more than one column, use the :class:`~sqlalchemy.schema.Index` construct, which requires a name. Below we illustrate a :class:`~sqlalchemy.schema.Table` with several :class:`~sqlalchemy.schema.Index` objects associated. The DDL for "CREATE INDEX" is issued right after the create statements for the table: .. sourcecode:: python+sql meta = MetaData() mytable = Table('mytable', meta, # an indexed column, with index "ix_mytable_col1" Column('col1', Integer, index=True), # a uniquely indexed column with index "ix_mytable_col2" Column('col2', Integer, index=True, unique=True), Column('col3', Integer), Column('col4', Integer), Column('col5', Integer), Column('col6', Integer), ) # place an index on col3, col4 Index('idx_col34', mytable.c.col3, mytable.c.col4) # place a unique index on col5, col6 Index('myindex', mytable.c.col5, mytable.c.col6, unique=True) {sql}mytable.create(engine) CREATE TABLE mytable ( col1 INTEGER, col2 INTEGER, col3 INTEGER, col4 INTEGER, col5 INTEGER, col6 INTEGER ) CREATE INDEX ix_mytable_col1 ON mytable (col1) CREATE UNIQUE INDEX ix_mytable_col2 ON mytable (col2) CREATE UNIQUE INDEX myindex ON mytable (col5, col6) CREATE INDEX idx_col34 ON mytable (col3, col4){stop} Note in the example above, the :class:`.Index` construct is created externally to the table which it corresponds, using :class:`.Column` objects directly. :class:`.Index` also supports "inline" definition inside the :class:`.Table`, using string names to identify columns:: meta = MetaData() mytable = Table('mytable', meta, Column('col1', Integer), Column('col2', Integer), Column('col3', Integer), Column('col4', Integer), # place an index on col1, col2 Index('idx_col12', 'col1', 'col2'), # place a unique index on col3, col4 Index('idx_col34', 'col3', 'col4', unique=True) ) .. versionadded:: 0.7 Support of "inline" definition inside the :class:`.Table` for :class:`.Index`\ . The :class:`~sqlalchemy.schema.Index` object also supports its own ``create()`` method: .. sourcecode:: python+sql i = Index('someindex', mytable.c.col5) {sql}i.create(engine) CREATE INDEX someindex ON mytable (col5){stop} .. _schema_indexes_functional: Functional Indexes ~~~~~~~~~~~~~~~~~~~ :class:`.Index` supports SQL and function expressions, as supported by the target backend. To create an index against a column using a descending value, the :meth:`.ColumnElement.desc` modifier may be used:: from sqlalchemy import Index Index('someindex', mytable.c.somecol.desc()) Or with a backend that supports functional indexes such as Postgresql, a "case insensitive" index can be created using the ``lower()`` function:: from sqlalchemy import func, Index Index('someindex', func.lower(mytable.c.somecol)) .. versionadded:: 0.8 :class:`.Index` supports SQL expressions and functions as well as plain columns. Index API --------- .. autoclass:: Index :members: SQLAlchemy-0.8.4/doc/_sources/core/ddl.txt0000644000076500000240000002103012251147171021051 0ustar classicstaff00000000000000.. _metadata_ddl_toplevel: .. _metadata_ddl: .. module:: sqlalchemy.schema Customizing DDL =============== In the preceding sections we've discussed a variety of schema constructs including :class:`~sqlalchemy.schema.Table`, :class:`~sqlalchemy.schema.ForeignKeyConstraint`, :class:`~sqlalchemy.schema.CheckConstraint`, and :class:`~sqlalchemy.schema.Sequence`. Throughout, we've relied upon the ``create()`` and :func:`~sqlalchemy.schema.MetaData.create_all` methods of :class:`~sqlalchemy.schema.Table` and :class:`~sqlalchemy.schema.MetaData` in order to issue data definition language (DDL) for all constructs. When issued, a pre-determined order of operations is invoked, and DDL to create each table is created unconditionally including all constraints and other objects associated with it. For more complex scenarios where database-specific DDL is required, SQLAlchemy offers two techniques which can be used to add any DDL based on any condition, either accompanying the standard generation of tables or by itself. .. _schema_ddl_sequences: Controlling DDL Sequences ------------------------- The ``sqlalchemy.schema`` package contains SQL expression constructs that provide DDL expressions. For example, to produce a ``CREATE TABLE`` statement: .. sourcecode:: python+sql from sqlalchemy.schema import CreateTable {sql}engine.execute(CreateTable(mytable)) CREATE TABLE mytable ( col1 INTEGER, col2 INTEGER, col3 INTEGER, col4 INTEGER, col5 INTEGER, col6 INTEGER ){stop} Above, the :class:`~sqlalchemy.schema.CreateTable` construct works like any other expression construct (such as ``select()``, ``table.insert()``, etc.). A full reference of available constructs is in :ref:`schema_api_ddl`. The DDL constructs all extend a common base class which provides the capability to be associated with an individual :class:`~sqlalchemy.schema.Table` or :class:`~sqlalchemy.schema.MetaData` object, to be invoked upon create/drop events. Consider the example of a table which contains a CHECK constraint: .. sourcecode:: python+sql users = Table('users', metadata, Column('user_id', Integer, primary_key=True), Column('user_name', String(40), nullable=False), CheckConstraint('length(user_name) >= 8',name="cst_user_name_length") ) {sql}users.create(engine) CREATE TABLE users ( user_id SERIAL NOT NULL, user_name VARCHAR(40) NOT NULL, PRIMARY KEY (user_id), CONSTRAINT cst_user_name_length CHECK (length(user_name) >= 8) ){stop} The above table contains a column "user_name" which is subject to a CHECK constraint that validates that the length of the string is at least eight characters. When a ``create()`` is issued for this table, DDL for the :class:`~sqlalchemy.schema.CheckConstraint` will also be issued inline within the table definition. The :class:`~sqlalchemy.schema.CheckConstraint` construct can also be constructed externally and associated with the :class:`~sqlalchemy.schema.Table` afterwards:: constraint = CheckConstraint('length(user_name) >= 8',name="cst_user_name_length") users.append_constraint(constraint) So far, the effect is the same. However, if we create DDL elements corresponding to the creation and removal of this constraint, and associate them with the :class:`.Table` as events, these new events will take over the job of issuing DDL for the constraint. Additionally, the constraint will be added via ALTER: .. sourcecode:: python+sql from sqlalchemy import event event.listen( users, "after_create", AddConstraint(constraint) ) event.listen( users, "before_drop", DropConstraint(constraint) ) {sql}users.create(engine) CREATE TABLE users ( user_id SERIAL NOT NULL, user_name VARCHAR(40) NOT NULL, PRIMARY KEY (user_id) ) ALTER TABLE users ADD CONSTRAINT cst_user_name_length CHECK (length(user_name) >= 8){stop} {sql}users.drop(engine) ALTER TABLE users DROP CONSTRAINT cst_user_name_length DROP TABLE users{stop} The real usefulness of the above becomes clearer once we illustrate the :meth:`.DDLElement.execute_if` method. This method returns a modified form of the DDL callable which will filter on criteria before responding to a received event. It accepts a parameter ``dialect``, which is the string name of a dialect or a tuple of such, which will limit the execution of the item to just those dialects. It also accepts a ``callable_`` parameter which may reference a Python callable which will be invoked upon event reception, returning ``True`` or ``False`` indicating if the event should proceed. If our :class:`~sqlalchemy.schema.CheckConstraint` was only supported by Postgresql and not other databases, we could limit its usage to just that dialect:: event.listen( users, 'after_create', AddConstraint(constraint).execute_if(dialect='postgresql') ) event.listen( users, 'before_drop', DropConstraint(constraint).execute_if(dialect='postgresql') ) Or to any set of dialects:: event.listen( users, "after_create", AddConstraint(constraint).execute_if(dialect=('postgresql', 'mysql')) ) event.listen( users, "before_drop", DropConstraint(constraint).execute_if(dialect=('postgresql', 'mysql')) ) When using a callable, the callable is passed the ddl element, the :class:`.Table` or :class:`.MetaData` object whose "create" or "drop" event is in progress, and the :class:`.Connection` object being used for the operation, as well as additional information as keyword arguments. The callable can perform checks, such as whether or not a given item already exists. Below we define ``should_create()`` and ``should_drop()`` callables that check for the presence of our named constraint: .. sourcecode:: python+sql def should_create(ddl, target, connection, **kw): row = connection.execute("select conname from pg_constraint where conname='%s'" % ddl.element.name).scalar() return not bool(row) def should_drop(ddl, target, connection, **kw): return not should_create(ddl, target, connection, **kw) event.listen( users, "after_create", AddConstraint(constraint).execute_if(callable_=should_create) ) event.listen( users, "before_drop", DropConstraint(constraint).execute_if(callable_=should_drop) ) {sql}users.create(engine) CREATE TABLE users ( user_id SERIAL NOT NULL, user_name VARCHAR(40) NOT NULL, PRIMARY KEY (user_id) ) select conname from pg_constraint where conname='cst_user_name_length' ALTER TABLE users ADD CONSTRAINT cst_user_name_length CHECK (length(user_name) >= 8){stop} {sql}users.drop(engine) select conname from pg_constraint where conname='cst_user_name_length' ALTER TABLE users DROP CONSTRAINT cst_user_name_length DROP TABLE users{stop} Custom DDL ---------- Custom DDL phrases are most easily achieved using the :class:`~sqlalchemy.schema.DDL` construct. This construct works like all the other DDL elements except it accepts a string which is the text to be emitted: .. sourcecode:: python+sql event.listen( metadata, "after_create", DDL("ALTER TABLE users ADD CONSTRAINT " "cst_user_name_length " " CHECK (length(user_name) >= 8)") ) A more comprehensive method of creating libraries of DDL constructs is to use custom compilation - see :ref:`sqlalchemy.ext.compiler_toplevel` for details. .. _schema_api_ddl: DDL Expression Constructs API ----------------------------- .. autoclass:: DDLElement :members: :undoc-members: .. autoclass:: DDL :members: :undoc-members: .. autoclass:: CreateTable :members: :undoc-members: .. autoclass:: DropTable :members: :undoc-members: .. autoclass:: CreateColumn :members: :undoc-members: .. autoclass:: CreateSequence :members: :undoc-members: .. autoclass:: DropSequence :members: :undoc-members: .. autoclass:: CreateIndex :members: :undoc-members: .. autoclass:: DropIndex :members: :undoc-members: .. autoclass:: AddConstraint :members: :undoc-members: .. autoclass:: DropConstraint :members: :undoc-members: .. autoclass:: CreateSchema :members: :undoc-members: .. autoclass:: DropSchema :members: :undoc-members: SQLAlchemy-0.8.4/doc/_sources/core/defaults.txt0000644000076500000240000003451412251147171022130 0ustar classicstaff00000000000000.. _metadata_defaults_toplevel: .. _metadata_defaults: .. module:: sqlalchemy.schema Column Insert/Update Defaults ============================== SQLAlchemy provides a very rich featureset regarding column level events which take place during INSERT and UPDATE statements. Options include: * Scalar values used as defaults during INSERT and UPDATE operations * Python functions which execute upon INSERT and UPDATE operations * SQL expressions which are embedded in INSERT statements (or in some cases execute beforehand) * SQL expressions which are embedded in UPDATE statements * Server side default values used during INSERT * Markers for server-side triggers used during UPDATE The general rule for all insert/update defaults is that they only take effect if no value for a particular column is passed as an ``execute()`` parameter; otherwise, the given value is used. Scalar Defaults --------------- The simplest kind of default is a scalar value used as the default value of a column:: Table("mytable", meta, Column("somecolumn", Integer, default=12) ) Above, the value "12" will be bound as the column value during an INSERT if no other value is supplied. A scalar value may also be associated with an UPDATE statement, though this is not very common (as UPDATE statements are usually looking for dynamic defaults):: Table("mytable", meta, Column("somecolumn", Integer, onupdate=25) ) Python-Executed Functions ------------------------- The ``default`` and ``onupdate`` keyword arguments also accept Python functions. These functions are invoked at the time of insert or update if no other value for that column is supplied, and the value returned is used for the column's value. Below illustrates a crude "sequence" that assigns an incrementing counter to a primary key column:: # a function which counts upwards i = 0 def mydefault(): global i i += 1 return i t = Table("mytable", meta, Column('id', Integer, primary_key=True, default=mydefault), ) It should be noted that for real "incrementing sequence" behavior, the built-in capabilities of the database should normally be used, which may include sequence objects or other autoincrementing capabilities. For primary key columns, SQLAlchemy will in most cases use these capabilities automatically. See the API documentation for :class:`~sqlalchemy.schema.Column` including the ``autoincrement`` flag, as well as the section on :class:`~sqlalchemy.schema.Sequence` later in this chapter for background on standard primary key generation techniques. To illustrate onupdate, we assign the Python ``datetime`` function ``now`` to the ``onupdate`` attribute:: import datetime t = Table("mytable", meta, Column('id', Integer, primary_key=True), # define 'last_updated' to be populated with datetime.now() Column('last_updated', DateTime, onupdate=datetime.datetime.now), ) When an update statement executes and no value is passed for ``last_updated``, the ``datetime.datetime.now()`` Python function is executed and its return value used as the value for ``last_updated``. Notice that we provide ``now`` as the function itself without calling it (i.e. there are no parenthesis following) - SQLAlchemy will execute the function at the time the statement executes. Context-Sensitive Default Functions ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ The Python functions used by ``default`` and ``onupdate`` may also make use of the current statement's context in order to determine a value. The `context` of a statement is an internal SQLAlchemy object which contains all information about the statement being executed, including its source expression, the parameters associated with it and the cursor. The typical use case for this context with regards to default generation is to have access to the other values being inserted or updated on the row. To access the context, provide a function that accepts a single ``context`` argument:: def mydefault(context): return context.current_parameters['counter'] + 12 t = Table('mytable', meta, Column('counter', Integer), Column('counter_plus_twelve', Integer, default=mydefault, onupdate=mydefault) ) Above we illustrate a default function which will execute for all INSERT and UPDATE statements where a value for ``counter_plus_twelve`` was otherwise not provided, and the value will be that of whatever value is present in the execution for the ``counter`` column, plus the number 12. While the context object passed to the default function has many attributes, the ``current_parameters`` member is a special member provided only during the execution of a default function for the purposes of deriving defaults from its existing values. For a single statement that is executing many sets of bind parameters, the user-defined function is called for each set of parameters, and ``current_parameters`` will be provided with each individual parameter set for each execution. SQL Expressions --------------- The "default" and "onupdate" keywords may also be passed SQL expressions, including select statements or direct function calls:: t = Table("mytable", meta, Column('id', Integer, primary_key=True), # define 'create_date' to default to now() Column('create_date', DateTime, default=func.now()), # define 'key' to pull its default from the 'keyvalues' table Column('key', String(20), default=keyvalues.select(keyvalues.c.type='type1', limit=1)), # define 'last_modified' to use the current_timestamp SQL function on update Column('last_modified', DateTime, onupdate=func.utc_timestamp()) ) Above, the ``create_date`` column will be populated with the result of the ``now()`` SQL function (which, depending on backend, compiles into ``NOW()`` or ``CURRENT_TIMESTAMP`` in most cases) during an INSERT statement, and the ``key`` column with the result of a SELECT subquery from another table. The ``last_modified`` column will be populated with the value of ``UTC_TIMESTAMP()``, a function specific to MySQL, when an UPDATE statement is emitted for this table. Note that when using ``func`` functions, unlike when using Python `datetime` functions we *do* call the function, i.e. with parenthesis "()" - this is because what we want in this case is the return value of the function, which is the SQL expression construct that will be rendered into the INSERT or UPDATE statement. The above SQL functions are usually executed "inline" with the INSERT or UPDATE statement being executed, meaning, a single statement is executed which embeds the given expressions or subqueries within the VALUES or SET clause of the statement. Although in some cases, the function is "pre-executed" in a SELECT statement of its own beforehand. This happens when all of the following is true: * the column is a primary key column * the database dialect does not support a usable ``cursor.lastrowid`` accessor (or equivalent); this currently includes PostgreSQL, Oracle, and Firebird, as well as some MySQL dialects. * the dialect does not support the "RETURNING" clause or similar, or the ``implicit_returning`` flag is set to ``False`` for the dialect. Dialects which support RETURNING currently include Postgresql, Oracle, Firebird, and MS-SQL. * the statement is a single execution, i.e. only supplies one set of parameters and doesn't use "executemany" behavior * the ``inline=True`` flag is not set on the :class:`~sqlalchemy.sql.expression.Insert()` or :class:`~sqlalchemy.sql.expression.Update()` construct, and the statement has not defined an explicit `returning()` clause. Whether or not the default generation clause "pre-executes" is not something that normally needs to be considered, unless it is being addressed for performance reasons. When the statement is executed with a single set of parameters (that is, it is not an "executemany" style execution), the returned :class:`~sqlalchemy.engine.ResultProxy` will contain a collection accessible via ``result.postfetch_cols()`` which contains a list of all :class:`~sqlalchemy.schema.Column` objects which had an inline-executed default. Similarly, all parameters which were bound to the statement, including all Python and SQL expressions which were pre-executed, are present in the ``last_inserted_params()`` or ``last_updated_params()`` collections on :class:`~sqlalchemy.engine.ResultProxy`. The ``inserted_primary_key`` collection contains a list of primary key values for the row inserted (a list so that single-column and composite-column primary keys are represented in the same format). Server Side Defaults -------------------- A variant on the SQL expression default is the ``server_default``, which gets placed in the CREATE TABLE statement during a ``create()`` operation: .. sourcecode:: python+sql t = Table('test', meta, Column('abc', String(20), server_default='abc'), Column('created_at', DateTime, server_default=text("sysdate")) ) A create call for the above table will produce:: CREATE TABLE test ( abc varchar(20) default 'abc', created_at datetime default sysdate ) The behavior of ``server_default`` is similar to that of a regular SQL default; if it's placed on a primary key column for a database which doesn't have a way to "postfetch" the ID, and the statement is not "inlined", the SQL expression is pre-executed; otherwise, SQLAlchemy lets the default fire off on the database side normally. .. _triggered_columns: Triggered Columns ------------------ Columns with values set by a database trigger or other external process may be called out using :class:`.FetchedValue` as a marker:: t = Table('test', meta, Column('abc', String(20), server_default=FetchedValue()), Column('def', String(20), server_onupdate=FetchedValue()) ) .. versionchanged:: 0.8.0b2,0.7.10 The ``for_update`` argument on :class:`.FetchedValue` is set automatically when specified as the ``server_onupdate`` argument. If using an older version, specify the onupdate above as ``server_onupdate=FetchedValue(for_update=True)``. These markers do not emit a "default" clause when the table is created, however they do set the same internal flags as a static ``server_default`` clause, providing hints to higher-level tools that a "post-fetch" of these rows should be performed after an insert or update. .. note:: It's generally not appropriate to use :class:`.FetchedValue` in conjunction with a primary key column, particularly when using the ORM or any other scenario where the :attr:`.ResultProxy.inserted_primary_key` attribute is required. This is becaue the "post-fetch" operation requires that the primary key value already be available, so that the row can be selected on its primary key. For a server-generated primary key value, all databases provide special accessors or other techniques in order to acquire the "last inserted primary key" column of a table. These mechanisms aren't affected by the presence of :class:`.FetchedValue`. For special situations where triggers are used to generate primary key values, and the database in use does not support the ``RETURNING`` clause, it may be necessary to forego the usage of the trigger and instead apply the SQL expression or function as a "pre execute" expression:: t = Table('test', meta, Column('abc', MyType, default=func.generate_new_value(), primary_key=True) ) Where above, when :meth:`.Table.insert` is used, the ``func.generate_new_value()`` expression will be pre-executed in the context of a scalar ``SELECT`` statement, and the new value will be applied to the subsequent ``INSERT``, while at the same time being made available to the :attr:`.ResultProxy.inserted_primary_key` attribute. Defining Sequences ------------------- SQLAlchemy represents database sequences using the :class:`~sqlalchemy.schema.Sequence` object, which is considered to be a special case of "column default". It only has an effect on databases which have explicit support for sequences, which currently includes Postgresql, Oracle, and Firebird. The :class:`~sqlalchemy.schema.Sequence` object is otherwise ignored. The :class:`~sqlalchemy.schema.Sequence` may be placed on any column as a "default" generator to be used during INSERT operations, and can also be configured to fire off during UPDATE operations if desired. It is most commonly used in conjunction with a single integer primary key column:: table = Table("cartitems", meta, Column("cart_id", Integer, Sequence('cart_id_seq'), primary_key=True), Column("description", String(40)), Column("createdate", DateTime()) ) Where above, the table "cartitems" is associated with a sequence named "cart_id_seq". When INSERT statements take place for "cartitems", and no value is passed for the "cart_id" column, the "cart_id_seq" sequence will be used to generate a value. When the :class:`~sqlalchemy.schema.Sequence` is associated with a table, CREATE and DROP statements issued for that table will also issue CREATE/DROP for the sequence object as well, thus "bundling" the sequence object with its parent table. The :class:`~sqlalchemy.schema.Sequence` object also implements special functionality to accommodate Postgresql's SERIAL datatype. The SERIAL type in PG automatically generates a sequence that is used implicitly during inserts. This means that if a :class:`~sqlalchemy.schema.Table` object defines a :class:`~sqlalchemy.schema.Sequence` on its primary key column so that it works with Oracle and Firebird, the :class:`~sqlalchemy.schema.Sequence` would get in the way of the "implicit" sequence that PG would normally use. For this use case, add the flag ``optional=True`` to the :class:`~sqlalchemy.schema.Sequence` object - this indicates that the :class:`~sqlalchemy.schema.Sequence` should only be used if the database provides no other option for generating primary key identifiers. The :class:`~sqlalchemy.schema.Sequence` object also has the ability to be executed standalone like a SQL expression, which has the effect of calling its "next value" function:: seq = Sequence('some_sequence') nextid = connection.execute(seq) Default Objects API ------------------- .. autoclass:: ColumnDefault .. autoclass:: DefaultClause .. autoclass:: DefaultGenerator .. autoclass:: FetchedValue .. autoclass:: PassiveDefault .. autoclass:: Sequence :members: SQLAlchemy-0.8.4/doc/_sources/core/dml.txt0000644000076500000240000000116212251147171021066 0ustar classicstaff00000000000000Insert, Updates, Deletes ======================== INSERT, UPDATE and DELETE statements build on a hierarchy starting with :class:`.UpdateBase`. The :class:`.Insert` and :class:`.Update` constructs build on the intermediary :class:`.ValuesBase`. .. module:: sqlalchemy.sql.expression .. autofunction:: delete .. autofunction:: insert .. autofunction:: update .. autoclass:: Delete :members: :inherited-members: .. autoclass:: Insert :members: :inherited-members: .. autoclass:: Update :members: :inherited-members: .. autoclass:: UpdateBase :members: .. autoclass:: ValuesBase :members: SQLAlchemy-0.8.4/doc/_sources/core/engines.txt0000644000076500000240000002705012251147171021746 0ustar classicstaff00000000000000.. _engines_toplevel: ==================== Engine Configuration ==================== The :class:`.Engine` is the starting point for any SQLAlchemy application. It's "home base" for the actual database and its :term:`DBAPI`, delivered to the SQLAlchemy application through a connection pool and a :class:`.Dialect`, which describes how to talk to a specific kind of database/DBAPI combination. The general structure can be illustrated as follows: .. image:: sqla_engine_arch.png Where above, an :class:`.Engine` references both a :class:`.Dialect` and a :class:`.Pool`, which together interpret the DBAPI's module functions as well as the behavior of the database. Creating an engine is just a matter of issuing a single call, :func:`.create_engine()`:: from sqlalchemy import create_engine engine = create_engine('postgresql://scott:tiger@localhost:5432/mydatabase') The above engine creates a :class:`.Dialect` object tailored towards PostgreSQL, as well as a :class:`.Pool` object which will establish a DBAPI connection at ``localhost:5432`` when a connection request is first received. Note that the :class:`.Engine` and its underlying :class:`.Pool` do **not** establish the first actual DBAPI connection until the :meth:`.Engine.connect` method is called, or an operation which is dependent on this method such as :meth:`.Engine.execute` is invoked. In this way, :class:`.Engine` and :class:`.Pool` can be said to have a *lazy initialization* behavior. The :class:`.Engine`, once created, can either be used directly to interact with the database, or can be passed to a :class:`.Session` object to work with the ORM. This section covers the details of configuring an :class:`.Engine`. The next section, :ref:`connections_toplevel`, will detail the usage API of the :class:`.Engine` and similar, typically for non-ORM applications. .. _supported_dbapis: Supported Databases ==================== SQLAlchemy includes many :class:`.Dialect` implementations for various backends. Dialects for the most common databases are included with SQLAlchemy; a handful of others require an additional install of a separate dialect. See the section :ref:`dialect_toplevel` for information on the various backends available. .. _create_engine_args: Engine Creation API =================== Keyword options can also be specified to :func:`~sqlalchemy.create_engine`, following the string URL as follows: .. sourcecode:: python+sql db = create_engine('postgresql://...', encoding='latin1', echo=True) .. autofunction:: sqlalchemy.create_engine .. autofunction:: sqlalchemy.engine_from_config Database Urls ============= SQLAlchemy indicates the source of an Engine strictly via `RFC-1738 `_ style URLs, combined with optional keyword arguments to specify options for the Engine. The form of the URL is:: dialect+driver://username:password@host:port/database Dialect names include the identifying name of the SQLAlchemy dialect which include ``sqlite``, ``mysql``, ``postgresql``, ``oracle``, ``mssql``, and ``firebird``. The drivername is the name of the DBAPI to be used to connect to the database using all lowercase letters. If not specified, a "default" DBAPI will be imported if available - this default is typically the most widely known driver available for that backend (i.e. cx_oracle, pysqlite/sqlite3, psycopg2, mysqldb). For Jython connections, specify the `zxjdbc` driver, which is the JDBC-DBAPI bridge included with Jython. .. autofunction:: sqlalchemy.engine.url.make_url Postgresql ---------- The Postgresql dialect uses psycopg2 as the default DBAPI:: # default engine = create_engine('postgresql://scott:tiger@localhost/mydatabase') # psycopg2 engine = create_engine('postgresql+psycopg2://scott:tiger@localhost/mydatabase') # pg8000 engine = create_engine('postgresql+pg8000://scott:tiger@localhost/mydatabase') # Jython engine = create_engine('postgresql+zxjdbc://scott:tiger@localhost/mydatabase') More notes on connecting to Postgresql at :ref:`postgresql_toplevel`. MySQL ----- The MySQL dialect uses mysql-python as the default DBAPI:: # default engine = create_engine('mysql://scott:tiger@localhost/foo') # mysql-python engine = create_engine('mysql+mysqldb://scott:tiger@localhost/foo') # OurSQL engine = create_engine('mysql+oursql://scott:tiger@localhost/foo') More notes on connecting to MySQL at :ref:`mysql_toplevel`. Oracle ------ cx_oracle is usually used here:: engine = create_engine('oracle://scott:tiger@127.0.0.1:1521/sidname') engine = create_engine('oracle+cx_oracle://scott:tiger@tnsname') More notes on connecting to Oracle at :ref:`oracle_toplevel`. Microsoft SQL Server -------------------- There are a few drivers for SQL Server, currently PyODBC is the most solid:: engine = create_engine('mssql+pyodbc://mydsn') More notes on connecting to SQL Server at :ref:`mssql_toplevel`. SQLite ------ SQLite connects to file based databases. The same URL format is used, omitting the hostname, and using the "file" portion as the filename of the database. This has the effect of four slashes being present for an absolute file path:: # sqlite:/// # where is relative: engine = create_engine('sqlite:///foo.db') # or absolute, starting with a slash: engine = create_engine('sqlite:////absolute/path/to/foo.db') To use a SQLite ``:memory:`` database, specify an empty URL:: engine = create_engine('sqlite://') More notes on connecting to SQLite at :ref:`sqlite_toplevel`. Others ------ See :ref:`dialect_toplevel`, the top-level page for all dialect documentation. URL API -------- .. autoclass:: sqlalchemy.engine.url.URL :members: Pooling ======= The :class:`.Engine` will ask the connection pool for a connection when the ``connect()`` or ``execute()`` methods are called. The default connection pool, :class:`~.QueuePool`, will open connections to the database on an as-needed basis. As concurrent statements are executed, :class:`.QueuePool` will grow its pool of connections to a default size of five, and will allow a default "overflow" of ten. Since the :class:`.Engine` is essentially "home base" for the connection pool, it follows that you should keep a single :class:`.Engine` per database established within an application, rather than creating a new one for each connection. .. note:: :class:`.QueuePool` is not used by default for SQLite engines. See :ref:`sqlite_toplevel` for details on SQLite connection pool usage. For more information on connection pooling, see :ref:`pooling_toplevel`. .. _custom_dbapi_args: Custom DBAPI connect() arguments ================================= Custom arguments used when issuing the ``connect()`` call to the underlying DBAPI may be issued in three distinct ways. String-based arguments can be passed directly from the URL string as query arguments: .. sourcecode:: python+sql db = create_engine('postgresql://scott:tiger@localhost/test?argument1=foo&argument2=bar') If SQLAlchemy's database connector is aware of a particular query argument, it may convert its type from string to its proper type. :func:`~sqlalchemy.create_engine` also takes an argument ``connect_args`` which is an additional dictionary that will be passed to ``connect()``. This can be used when arguments of a type other than string are required, and SQLAlchemy's database connector has no type conversion logic present for that parameter: .. sourcecode:: python+sql db = create_engine('postgresql://scott:tiger@localhost/test', connect_args = {'argument1':17, 'argument2':'bar'}) The most customizable connection method of all is to pass a ``creator`` argument, which specifies a callable that returns a DBAPI connection: .. sourcecode:: python+sql def connect(): return psycopg.connect(user='scott', host='localhost') db = create_engine('postgresql://', creator=connect) .. _dbengine_logging: Configuring Logging ==================== Python's standard `logging `_ module is used to implement informational and debug log output with SQLAlchemy. This allows SQLAlchemy's logging to integrate in a standard way with other applications and libraries. The ``echo`` and ``echo_pool`` flags that are present on :func:`~sqlalchemy.create_engine`, as well as the ``echo_uow`` flag used on :class:`~sqlalchemy.orm.session.Session`, all interact with regular loggers. This section assumes familiarity with the above linked logging module. All logging performed by SQLAlchemy exists underneath the ``sqlalchemy`` namespace, as used by ``logging.getLogger('sqlalchemy')``. When logging has been configured (i.e. such as via ``logging.basicConfig()``), the general namespace of SA loggers that can be turned on is as follows: * ``sqlalchemy.engine`` - controls SQL echoing. set to ``logging.INFO`` for SQL query output, ``logging.DEBUG`` for query + result set output. * ``sqlalchemy.dialects`` - controls custom logging for SQL dialects. See the documentation of individual dialects for details. * ``sqlalchemy.pool`` - controls connection pool logging. set to ``logging.INFO`` or lower to log connection pool checkouts/checkins. * ``sqlalchemy.orm`` - controls logging of various ORM functions. set to ``logging.INFO`` for information on mapper configurations. For example, to log SQL queries using Python logging instead of the ``echo=True`` flag:: import logging logging.basicConfig() logging.getLogger('sqlalchemy.engine').setLevel(logging.INFO) By default, the log level is set to ``logging.WARN`` within the entire ``sqlalchemy`` namespace so that no log operations occur, even within an application that has logging enabled otherwise. The ``echo`` flags present as keyword arguments to :func:`~sqlalchemy.create_engine` and others as well as the ``echo`` property on :class:`~sqlalchemy.engine.Engine`, when set to ``True``, will first attempt to ensure that logging is enabled. Unfortunately, the ``logging`` module provides no way of determining if output has already been configured (note we are referring to if a logging configuration has been set up, not just that the logging level is set). For this reason, any ``echo=True`` flags will result in a call to ``logging.basicConfig()`` using sys.stdout as the destination. It also sets up a default format using the level name, timestamp, and logger name. Note that this configuration has the affect of being configured **in addition** to any existing logger configurations. Therefore, **when using Python logging, ensure all echo flags are set to False at all times**, to avoid getting duplicate log lines. The logger name of instance such as an :class:`~sqlalchemy.engine.Engine` or :class:`~sqlalchemy.pool.Pool` defaults to using a truncated hex identifier string. To set this to a specific name, use the "logging_name" and "pool_logging_name" keyword arguments with :func:`sqlalchemy.create_engine`. .. note:: The SQLAlchemy :class:`.Engine` conserves Python function call overhead by only emitting log statements when the current logging level is detected as ``logging.INFO`` or ``logging.DEBUG``. It only checks this level when a new connection is procured from the connection pool. Therefore when changing the logging configuration for an already-running application, any :class:`.Connection` that's currently active, or more commonly a :class:`~.orm.session.Session` object that's active in a transaction, won't log any SQL according to the new configuration until a new :class:`.Connection` is procured (in the case of :class:`~.orm.session.Session`, this is after the current transaction ends and a new one begins). SQLAlchemy-0.8.4/doc/_sources/core/event.txt0000644000076500000240000001002012251147171021424 0ustar classicstaff00000000000000.. _event_toplevel: Events ====== SQLAlchemy includes an event API which publishes a wide variety of hooks into the internals of both SQLAlchemy Core and ORM. .. versionadded:: 0.7 The system supercedes the previous system of "extension", "proxy", and "listener" classes. Event Registration ------------------ Subscribing to an event occurs through a single API point, the :func:`.listen` function. This function accepts a user-defined listening function, a string identifier which identifies the event to be intercepted, and a target. Additional positional and keyword arguments may be supported by specific types of events, which may specify alternate interfaces for the given event function, or provide instructions regarding secondary event targets based on the given target. The name of an event and the argument signature of a corresponding listener function is derived from a class bound specification method, which exists bound to a marker class that's described in the documentation. For example, the documentation for :meth:`.PoolEvents.connect` indicates that the event name is ``"connect"`` and that a user-defined listener function should receive two positional arguments:: from sqlalchemy.event import listen from sqlalchemy.pool import Pool def my_on_connect(dbapi_con, connection_record): print "New DBAPI connection:", dbapi_con listen(Pool, 'connect', my_on_connect) Targets ------- The :func:`.listen` function is very flexible regarding targets. It generally accepts classes, instances of those classes, and related classes or objects from which the appropriate target can be derived. For example, the above mentioned ``"connect"`` event accepts :class:`.Engine` classes and objects as well as :class:`.Pool` classes and objects:: from sqlalchemy.event import listen from sqlalchemy.pool import Pool, QueuePool from sqlalchemy import create_engine from sqlalchemy.engine import Engine import psycopg2 def connect(): return psycopg2.connect(username='ed', host='127.0.0.1', dbname='test') my_pool = QueuePool(connect) my_engine = create_engine('postgresql://ed@localhost/test') # associate listener with all instances of Pool listen(Pool, 'connect', my_on_connect) # associate listener with all instances of Pool # via the Engine class listen(Engine, 'connect', my_on_connect) # associate listener with my_pool listen(my_pool, 'connect', my_on_connect) # associate listener with my_engine.pool listen(my_engine, 'connect', my_on_connect) Modifiers ---------- Some listeners allow modifiers to be passed to :func:`.listen`. These modifiers sometimes provide alternate calling signatures for listeners. Such as with ORM events, some event listeners can have a return value which modifies the subsequent handling. By default, no listener ever requires a return value, but by passing ``retval=True`` this value can be supported:: def validate_phone(target, value, oldvalue, initiator): """Strip non-numeric characters from a phone number""" return re.sub(r'(?![0-9])', '', value) # setup listener on UserContact.phone attribute, instructing # it to use the return value listen(UserContact.phone, 'set', validate_phone, retval=True) Event Reference ---------------- Both SQLAlchemy Core and SQLAlchemy ORM feature a wide variety of event hooks: * **Core Events** - these are described in :ref:`core_event_toplevel` and include event hooks specific to connection pool lifecycle, SQL statement execution, transaction lifecycle, and schema creation and teardown. * **ORM Events** - these are described in :ref:`orm_event_toplevel`, and include event hooks specific to class and attribute instrumentation, object initialization hooks, attribute on-change hooks, session state, flush, and commit hooks, mapper initialization, object/result population, and per-instance persistence hooks. API Reference ------------- .. autofunction:: sqlalchemy.event.listen .. autofunction:: sqlalchemy.event.listens_for SQLAlchemy-0.8.4/doc/_sources/core/events.txt0000644000076500000240000000152212251147171021616 0ustar classicstaff00000000000000.. _core_event_toplevel: Core Events ============ This section describes the event interfaces provided in SQLAlchemy Core. For an introduction to the event listening API, see :ref:`event_toplevel`. ORM events are described in :ref:`orm_event_toplevel`. .. autoclass:: sqlalchemy.event.base.Events :members: .. versionadded:: 0.7 The event system supercedes the previous system of "extension", "listener", and "proxy" classes. Connection Pool Events ----------------------- .. autoclass:: sqlalchemy.events.PoolEvents :members: SQL Execution and Connection Events ------------------------------------ .. autoclass:: sqlalchemy.events.ConnectionEvents :members: Schema Events ----------------------- .. autoclass:: sqlalchemy.events.DDLEvents :members: .. autoclass:: sqlalchemy.events.SchemaEventTarget :members: SQLAlchemy-0.8.4/doc/_sources/core/exceptions.txt0000644000076500000240000000011512251147171022470 0ustar classicstaff00000000000000Core Exceptions =============== .. automodule:: sqlalchemy.exc :members:SQLAlchemy-0.8.4/doc/_sources/core/expression_api.txt0000644000076500000240000000057712251147171023353 0ustar classicstaff00000000000000.. _expression_api_toplevel: SQL Statements and Expressions API ================================== .. module:: sqlalchemy.sql.expression This section presents the API reference for the SQL Expression Language. For a full introduction to its usage, see :ref:`sqlexpression_toplevel`. .. toctree:: :maxdepth: 1 sqlelement selectable dml functions types SQLAlchemy-0.8.4/doc/_sources/core/functions.txt0000644000076500000240000000150312251147171022321 0ustar classicstaff00000000000000.. _functions_toplevel: .. _generic_functions: ========================= SQL and Generic Functions ========================= .. module:: sqlalchemy.sql.expression SQL functions which are known to SQLAlchemy with regards to database-specific rendering, return types and argument behavior. Generic functions are invoked like all SQL functions, using the :attr:`func` attribute:: select([func.count()]).select_from(sometable) Note that any name not known to :attr:`func` generates the function name as is - there is no restriction on what SQL functions can be called, known or unknown to SQLAlchemy, built-in or user defined. The section here only describes those functions where SQLAlchemy already knows what argument and return types are in use. .. automodule:: sqlalchemy.sql.functions :members: :undoc-members: SQLAlchemy-0.8.4/doc/_sources/core/index.txt0000644000076500000240000000105012251147171021415 0ustar classicstaff00000000000000.. _core_toplevel: SQLAlchemy Core =============== The breadth of SQLAlchemy’s SQL rendering engine, DBAPI integration, transaction integration, and schema description services are documented here. In contrast to the ORM’s domain-centric mode of usage, the SQL Expression Language provides a schema-centric usage paradigm. .. toctree:: :maxdepth: 3 tutorial expression_api schema engines connections pooling event events compiler inspection serializer interfaces exceptions internals SQLAlchemy-0.8.4/doc/_sources/core/inspection.txt0000644000076500000240000000315312251147171022467 0ustar classicstaff00000000000000.. _core_inspection_toplevel: .. _inspection_toplevel: Runtime Inspection API ====================== .. automodule:: sqlalchemy.inspection :members: Available Inspection Targets ---------------------------- Below is a listing of many of the most common inspection targets. * :class:`.Connectable` (i.e. :class:`.Engine`, :class:`.Connection`) - returns an :class:`.Inspector` object. * :class:`.ClauseElement` - all SQL expression components, including :class:`.Table`, :class:`.Column`, serve as their own inspection objects, meaning any of these objects passed to :func:`.inspect` return themselves. * ``object`` - an object given will be checked by the ORM for a mapping - if so, an :class:`.InstanceState` is returned representing the mapped state of the object. The :class:`.InstanceState` also provides access to per attribute state via the :class:`.AttributeState` interface as well as the per-flush "history" of any attribute via the :class:`.History` object. * ``type`` (i.e. a class) - a class given will be checked by the ORM for a mapping - if so, a :class:`.Mapper` for that class is returned. * mapped attribute - passing a mapped attribute to :func:`.inspect`, such as ``inspect(MyClass.some_attribute)``, returns a :class:`.QueryableAttribute` object, which is the :term:`descriptor` associated with a mapped class. This descriptor refers to a :class:`.MapperProperty`, which is usually an instance of :class:`.ColumnProperty` or :class:`.RelationshipProperty`, via its :attr:`.QueryableAttribute.property` attribute. * :class:`.AliasedClass` - returns an :class:`.AliasedInsp` object. SQLAlchemy-0.8.4/doc/_sources/core/interfaces.txt0000644000076500000240000000137012251147171022436 0ustar classicstaff00000000000000.. _dep_interfaces_core_toplevel: Deprecated Event Interfaces ============================ .. module:: sqlalchemy.interfaces This section describes the class-based core event interface introduced in SQLAlchemy 0.5. The ORM analogue is described at :ref:`dep_interfaces_orm_toplevel`. .. deprecated:: 0.7 The new event system described in :ref:`event_toplevel` replaces the extension/proxy/listener system, providing a consistent interface to all events without the need for subclassing. Execution, Connection and Cursor Events --------------------------------------- .. autoclass:: ConnectionProxy :members: :undoc-members: Connection Pool Events ---------------------- .. autoclass:: PoolListener :members: :undoc-members: SQLAlchemy-0.8.4/doc/_sources/core/internals.txt0000644000076500000240000000136412251147171022315 0ustar classicstaff00000000000000.. _core_internal_toplevel: Core Internals ============== Some key internal constructs are listed here. .. currentmodule: sqlalchemy .. autoclass:: sqlalchemy.engine.interfaces.Compiled :members: .. autoclass:: sqlalchemy.sql.compiler.DDLCompiler :members: :inherited-members: .. autoclass:: sqlalchemy.engine.default.DefaultDialect :members: :inherited-members: .. autoclass:: sqlalchemy.engine.interfaces.Dialect :members: .. autoclass:: sqlalchemy.engine.default.DefaultExecutionContext :members: .. autoclass:: sqlalchemy.engine.interfaces.ExecutionContext :members: .. autoclass:: sqlalchemy.sql.compiler.IdentifierPreparer :members: .. autoclass:: sqlalchemy.sql.compiler.SQLCompiler :members: SQLAlchemy-0.8.4/doc/_sources/core/metadata.txt0000644000076500000240000002752012251147171022100 0ustar classicstaff00000000000000.. _metadata_toplevel: .. _metadata_describing_toplevel: .. _metadata_describing: ================================== Describing Databases with MetaData ================================== .. module:: sqlalchemy.schema This section discusses the fundamental :class:`.Table`, :class:`.Column` and :class:`.MetaData` objects. A collection of metadata entities is stored in an object aptly named :class:`~sqlalchemy.schema.MetaData`:: from sqlalchemy import * metadata = MetaData() :class:`~sqlalchemy.schema.MetaData` is a container object that keeps together many different features of a database (or multiple databases) being described. To represent a table, use the :class:`~sqlalchemy.schema.Table` class. Its two primary arguments are the table name, then the :class:`~sqlalchemy.schema.MetaData` object which it will be associated with. The remaining positional arguments are mostly :class:`~sqlalchemy.schema.Column` objects describing each column:: user = Table('user', metadata, Column('user_id', Integer, primary_key = True), Column('user_name', String(16), nullable = False), Column('email_address', String(60)), Column('password', String(20), nullable = False) ) Above, a table called ``user`` is described, which contains four columns. The primary key of the table consists of the ``user_id`` column. Multiple columns may be assigned the ``primary_key=True`` flag which denotes a multi-column primary key, known as a *composite* primary key. Note also that each column describes its datatype using objects corresponding to genericized types, such as :class:`~sqlalchemy.types.Integer` and :class:`~sqlalchemy.types.String`. SQLAlchemy features dozens of types of varying levels of specificity as well as the ability to create custom types. Documentation on the type system can be found at :ref:`types`. Accessing Tables and Columns ---------------------------- The :class:`~sqlalchemy.schema.MetaData` object contains all of the schema constructs we've associated with it. It supports a few methods of accessing these table objects, such as the ``sorted_tables`` accessor which returns a list of each :class:`~sqlalchemy.schema.Table` object in order of foreign key dependency (that is, each table is preceded by all tables which it references):: >>> for t in metadata.sorted_tables: ... print t.name user user_preference invoice invoice_item In most cases, individual :class:`~sqlalchemy.schema.Table` objects have been explicitly declared, and these objects are typically accessed directly as module-level variables in an application. Once a :class:`~sqlalchemy.schema.Table` has been defined, it has a full set of accessors which allow inspection of its properties. Given the following :class:`~sqlalchemy.schema.Table` definition:: employees = Table('employees', metadata, Column('employee_id', Integer, primary_key=True), Column('employee_name', String(60), nullable=False), Column('employee_dept', Integer, ForeignKey("departments.department_id")) ) Note the :class:`~sqlalchemy.schema.ForeignKey` object used in this table - this construct defines a reference to a remote table, and is fully described in :ref:`metadata_foreignkeys`. Methods of accessing information about this table include:: # access the column "EMPLOYEE_ID": employees.columns.employee_id # or just employees.c.employee_id # via string employees.c['employee_id'] # iterate through all columns for c in employees.c: print c # get the table's primary key columns for primary_key in employees.primary_key: print primary_key # get the table's foreign key objects: for fkey in employees.foreign_keys: print fkey # access the table's MetaData: employees.metadata # access the table's bound Engine or Connection, if its MetaData is bound: employees.bind # access a column's name, type, nullable, primary key, foreign key employees.c.employee_id.name employees.c.employee_id.type employees.c.employee_id.nullable employees.c.employee_id.primary_key employees.c.employee_dept.foreign_keys # get the "key" of a column, which defaults to its name, but can # be any user-defined string: employees.c.employee_name.key # access a column's table: employees.c.employee_id.table is employees # get the table related by a foreign key list(employees.c.employee_dept.foreign_keys)[0].column.table Creating and Dropping Database Tables ------------------------------------- Once you've defined some :class:`~sqlalchemy.schema.Table` objects, assuming you're working with a brand new database one thing you might want to do is issue CREATE statements for those tables and their related constructs (as an aside, it's also quite possible that you *don't* want to do this, if you already have some preferred methodology such as tools included with your database or an existing scripting system - if that's the case, feel free to skip this section - SQLAlchemy has no requirement that it be used to create your tables). The usual way to issue CREATE is to use :func:`~sqlalchemy.schema.MetaData.create_all` on the :class:`~sqlalchemy.schema.MetaData` object. This method will issue queries that first check for the existence of each individual table, and if not found will issue the CREATE statements: .. sourcecode:: python+sql engine = create_engine('sqlite:///:memory:') metadata = MetaData() user = Table('user', metadata, Column('user_id', Integer, primary_key = True), Column('user_name', String(16), nullable = False), Column('email_address', String(60), key='email'), Column('password', String(20), nullable = False) ) user_prefs = Table('user_prefs', metadata, Column('pref_id', Integer, primary_key=True), Column('user_id', Integer, ForeignKey("user.user_id"), nullable=False), Column('pref_name', String(40), nullable=False), Column('pref_value', String(100)) ) {sql}metadata.create_all(engine) PRAGMA table_info(user){} CREATE TABLE user( user_id INTEGER NOT NULL PRIMARY KEY, user_name VARCHAR(16) NOT NULL, email_address VARCHAR(60), password VARCHAR(20) NOT NULL ) PRAGMA table_info(user_prefs){} CREATE TABLE user_prefs( pref_id INTEGER NOT NULL PRIMARY KEY, user_id INTEGER NOT NULL REFERENCES user(user_id), pref_name VARCHAR(40) NOT NULL, pref_value VARCHAR(100) ) :func:`~sqlalchemy.schema.MetaData.create_all` creates foreign key constraints between tables usually inline with the table definition itself, and for this reason it also generates the tables in order of their dependency. There are options to change this behavior such that ``ALTER TABLE`` is used instead. Dropping all tables is similarly achieved using the :func:`~sqlalchemy.schema.MetaData.drop_all` method. This method does the exact opposite of :func:`~sqlalchemy.schema.MetaData.create_all` - the presence of each table is checked first, and tables are dropped in reverse order of dependency. Creating and dropping individual tables can be done via the ``create()`` and ``drop()`` methods of :class:`~sqlalchemy.schema.Table`. These methods by default issue the CREATE or DROP regardless of the table being present: .. sourcecode:: python+sql engine = create_engine('sqlite:///:memory:') meta = MetaData() employees = Table('employees', meta, Column('employee_id', Integer, primary_key=True), Column('employee_name', String(60), nullable=False, key='name'), Column('employee_dept', Integer, ForeignKey("departments.department_id")) ) {sql}employees.create(engine) CREATE TABLE employees( employee_id SERIAL NOT NULL PRIMARY KEY, employee_name VARCHAR(60) NOT NULL, employee_dept INTEGER REFERENCES departments(department_id) ) {} ``drop()`` method: .. sourcecode:: python+sql {sql}employees.drop(engine) DROP TABLE employees {} To enable the "check first for the table existing" logic, add the ``checkfirst=True`` argument to ``create()`` or ``drop()``:: employees.create(engine, checkfirst=True) employees.drop(engine, checkfirst=False) .. _schema_migrations: Altering Schemas through Migrations ----------------------------------- While SQLAlchemy directly supports emitting CREATE and DROP statements for schema constructs, the ability to alter those constructs, usually via the ALTER statement as well as other database-specific constructs, is outside of the scope of SQLAlchemy itself. While it's easy enough to emit ALTER statements and similar by hand, such as by passing a string to :meth:`.Connection.execute` or by using the :class:`.DDL` construct, it's a common practice to automate the maintenance of database schemas in relation to application code using schema migration tools. There are two major migration tools available for SQLAlchemy: * `Alembic `_ - Written by the author of SQLAlchemy, Alembic features a highly customizable environment and a minimalistic usage pattern, supporting such features as transactional DDL, automatic generation of "candidate" migrations, an "offline" mode which generates SQL scripts, and support for branch resolution. * `SQLAlchemy-Migrate `_ - The original migration tool for SQLAlchemy, SQLAlchemy-Migrate is widely used and continues under active development. SQLAlchemy-Migrate includes features such as SQL script generation, ORM class generation, ORM model comparison, and extensive support for SQLite migrations. Specifying the Schema Name --------------------------- Some databases support the concept of multiple schemas. A :class:`~sqlalchemy.schema.Table` can reference this by specifying the ``schema`` keyword argument:: financial_info = Table('financial_info', meta, Column('id', Integer, primary_key=True), Column('value', String(100), nullable=False), schema='remote_banks' ) Within the :class:`~sqlalchemy.schema.MetaData` collection, this table will be identified by the combination of ``financial_info`` and ``remote_banks``. If another table called ``financial_info`` is referenced without the ``remote_banks`` schema, it will refer to a different :class:`~sqlalchemy.schema.Table`. :class:`~sqlalchemy.schema.ForeignKey` objects can specify references to columns in this table using the form ``remote_banks.financial_info.id``. The ``schema`` argument should be used for any name qualifiers required, including Oracle's "owner" attribute and similar. It also can accommodate a dotted name for longer schemes:: schema="dbo.scott" Backend-Specific Options ------------------------ :class:`~sqlalchemy.schema.Table` supports database-specific options. For example, MySQL has different table backend types, including "MyISAM" and "InnoDB". This can be expressed with :class:`~sqlalchemy.schema.Table` using ``mysql_engine``:: addresses = Table('engine_email_addresses', meta, Column('address_id', Integer, primary_key = True), Column('remote_user_id', Integer, ForeignKey(users.c.user_id)), Column('email_address', String(20)), mysql_engine='InnoDB' ) Other backends may support table-level options as well - these would be described in the individual documentation sections for each dialect. Column, Table, MetaData API --------------------------- .. autoclass:: Column :members: :inherited-members: :undoc-members: .. autoclass:: MetaData :members: :undoc-members: .. autoclass:: SchemaItem :members: .. autoclass:: Table :members: :inherited-members: :undoc-members: .. autoclass:: ThreadLocalMetaData :members: :undoc-members: SQLAlchemy-0.8.4/doc/_sources/core/pooling.txt0000644000076500000240000003350312251147171021765 0ustar classicstaff00000000000000.. _pooling_toplevel: Connection Pooling ================== .. module:: sqlalchemy.pool A connection pool is a standard technique used to maintain long running connections in memory for efficient re-use, as well as to provide management for the total number of connections an application might use simultaneously. Particularly for server-side web applications, a connection pool is the standard way to maintain a "pool" of active database connections in memory which are reused across requests. SQLAlchemy includes several connection pool implementations which integrate with the :class:`.Engine`. They can also be used directly for applications that want to add pooling to an otherwise plain DBAPI approach. Connection Pool Configuration ----------------------------- The :class:`~.engine.Engine` returned by the :func:`~sqlalchemy.create_engine` function in most cases has a :class:`.QueuePool` integrated, pre-configured with reasonable pooling defaults. If you're reading this section only to learn how to enable pooling - congratulations! You're already done. The most common :class:`.QueuePool` tuning parameters can be passed directly to :func:`~sqlalchemy.create_engine` as keyword arguments: ``pool_size``, ``max_overflow``, ``pool_recycle`` and ``pool_timeout``. For example:: engine = create_engine('postgresql://me@localhost/mydb', pool_size=20, max_overflow=0) In the case of SQLite, the :class:`.SingletonThreadPool` or :class:`.NullPool` are selected by the dialect to provide greater compatibility with SQLite's threading and locking model, as well as to provide a reasonable default behavior to SQLite "memory" databases, which maintain their entire dataset within the scope of a single connection. All SQLAlchemy pool implementations have in common that none of them "pre create" connections - all implementations wait until first use before creating a connection. At that point, if no additional concurrent checkout requests for more connections are made, no additional connections are created. This is why it's perfectly fine for :func:`.create_engine` to default to using a :class:`.QueuePool` of size five without regard to whether or not the application really needs five connections queued up - the pool would only grow to that size if the application actually used five connections concurrently, in which case the usage of a small pool is an entirely appropriate default behavior. Switching Pool Implementations ------------------------------ The usual way to use a different kind of pool with :func:`.create_engine` is to use the ``poolclass`` argument. This argument accepts a class imported from the ``sqlalchemy.pool`` module, and handles the details of building the pool for you. Common options include specifying :class:`.QueuePool` with SQLite:: from sqlalchemy.pool import QueuePool engine = create_engine('sqlite:///file.db', poolclass=QueuePool) Disabling pooling using :class:`.NullPool`:: from sqlalchemy.pool import NullPool engine = create_engine( 'postgresql+psycopg2://scott:tiger@localhost/test', poolclass=NullPool) Using a Custom Connection Function ---------------------------------- All :class:`.Pool` classes accept an argument ``creator`` which is a callable that creates a new connection. :func:`.create_engine` accepts this function to pass onto the pool via an argument of the same name:: import sqlalchemy.pool as pool import psycopg2 def getconn(): c = psycopg2.connect(username='ed', host='127.0.0.1', dbname='test') # do things with 'c' to set up return c engine = create_engine('postgresql+psycopg2://', creator=getconn) For most "initialize on connection" routines, it's more convenient to use the :class:`.PoolEvents` event hooks, so that the usual URL argument to :func:`.create_engine` is still usable. ``creator`` is there as a last resort for when a DBAPI has some form of ``connect`` that is not at all supported by SQLAlchemy. Constructing a Pool ------------------------ To use a :class:`.Pool` by itself, the ``creator`` function is the only argument that's required and is passed first, followed by any additional options:: import sqlalchemy.pool as pool import psycopg2 def getconn(): c = psycopg2.connect(username='ed', host='127.0.0.1', dbname='test') return c mypool = pool.QueuePool(getconn, max_overflow=10, pool_size=5) DBAPI connections can then be procured from the pool using the :meth:`.Pool.connect` function. The return value of this method is a DBAPI connection that's contained within a transparent proxy:: # get a connection conn = mypool.connect() # use it cursor = conn.cursor() cursor.execute("select foo") The purpose of the transparent proxy is to intercept the ``close()`` call, such that instead of the DBAPI connection being closed, it's returned to the pool:: # "close" the connection. Returns # it to the pool. conn.close() The proxy also returns its contained DBAPI connection to the pool when it is garbage collected, though it's not deterministic in Python that this occurs immediately (though it is typical with cPython). The ``close()`` step also performs the important step of calling the ``rollback()`` method of the DBAPI connection. This is so that any existing transaction on the connection is removed, not only ensuring that no existing state remains on next usage, but also so that table and row locks are released as well as that any isolated data snapshots are removed. This behavior can be disabled using the ``reset_on_return`` option of :class:`.Pool`. A particular pre-created :class:`.Pool` can be shared with one or more engines by passing it to the ``pool`` argument of :func:`.create_engine`:: e = create_engine('postgresql://', pool=mypool) Pool Events ----------- Connection pools support an event interface that allows hooks to execute upon first connect, upon each new connection, and upon checkout and checkin of connections. See :class:`.PoolEvents` for details. Dealing with Disconnects ------------------------ The connection pool has the ability to refresh individual connections as well as its entire set of connections, setting the previously pooled connections as "invalid". A common use case is allow the connection pool to gracefully recover when the database server has been restarted, and all previously established connections are no longer functional. There are two approaches to this. Disconnect Handling - Optimistic ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ The most common approach is to let SQLAlchemy handle disconnects as they occur, at which point the pool is refreshed. This assumes the :class:`.Pool` is used in conjunction with a :class:`.Engine`. The :class:`.Engine` has logic which can detect disconnection events and refresh the pool automatically. When the :class:`.Connection` attempts to use a DBAPI connection, and an exception is raised that corresponds to a "disconnect" event, the connection is invalidated. The :class:`.Connection` then calls the :meth:`.Pool.recreate` method, effectively invalidating all connections not currently checked out so that they are replaced with new ones upon next checkout:: from sqlalchemy import create_engine, exc e = create_engine(...) c = e.connect() try: # suppose the database has been restarted. c.execute("SELECT * FROM table") c.close() except exc.DBAPIError, e: # an exception is raised, Connection is invalidated. if e.connection_invalidated: print "Connection was invalidated!" # after the invalidate event, a new connection # starts with a new Pool c = e.connect() c.execute("SELECT * FROM table") The above example illustrates that no special intervention is needed, the pool continues normally after a disconnection event is detected. However, an exception is raised. In a typical web application using an ORM Session, the above condition would correspond to a single request failing with a 500 error, then the web application continuing normally beyond that. Hence the approach is "optimistic" in that frequent database restarts are not anticipated. Setting Pool Recycle ~~~~~~~~~~~~~~~~~~~~~~~ An additional setting that can augment the "optimistic" approach is to set the pool recycle parameter. This parameter prevents the pool from using a particular connection that has passed a certain age, and is appropriate for database backends such as MySQL that automatically close connections that have been stale after a particular period of time:: from sqlalchemy import create_engine e = create_engine("mysql://scott:tiger@localhost/test", pool_recycle=3600) Above, any DBAPI connection that has been open for more than one hour will be invalidated and replaced, upon next checkout. Note that the invalidation **only** occurs during checkout - not on any connections that are held in a checked out state. ``pool_recycle`` is a function of the :class:`.Pool` itself, independent of whether or not an :class:`.Engine` is in use. Disconnect Handling - Pessimistic ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ At the expense of some extra SQL emitted for each connection checked out from the pool, a "ping" operation established by a checkout event handler can detect an invalid connection before it's used:: from sqlalchemy import exc from sqlalchemy import event from sqlalchemy.pool import Pool @event.listens_for(Pool, "checkout") def ping_connection(dbapi_connection, connection_record, connection_proxy): cursor = dbapi_connection.cursor() try: cursor.execute("SELECT 1") except: # optional - dispose the whole pool # instead of invalidating one at a time # connection_proxy._pool.dispose() # raise DisconnectionError - pool will try # connecting again up to three times before raising. raise exc.DisconnectionError() cursor.close() Above, the :class:`.Pool` object specifically catches :class:`~sqlalchemy.exc.DisconnectionError` and attempts to create a new DBAPI connection, up to three times, before giving up and then raising :class:`~sqlalchemy.exc.InvalidRequestError`, failing the connection. This recipe will ensure that a new :class:`.Connection` will succeed even if connections in the pool have gone stale, provided that the database server is actually running. The expense is that of an additional execution performed per checkout. When using the ORM :class:`.Session`, there is one connection checkout per transaction, so the expense is fairly low. The ping approach above also works with straight connection pool usage, that is, even if no :class:`.Engine` were involved. The event handler can be tested using a script like the following, restarting the database server at the point at which the script pauses for input:: from sqlalchemy import create_engine e = create_engine("mysql://scott:tiger@localhost/test", echo_pool=True) c1 = e.connect() c2 = e.connect() c3 = e.connect() c1.close() c2.close() c3.close() # pool size is now three. print "Restart the server" raw_input() for i in xrange(10): c = e.connect() print c.execute("select 1").fetchall() c.close() API Documentation - Available Pool Implementations --------------------------------------------------- .. autoclass:: sqlalchemy.pool.Pool .. automethod:: __init__ .. automethod:: connect .. automethod:: dispose .. automethod:: recreate .. automethod:: unique_connection .. autoclass:: sqlalchemy.pool.QueuePool .. automethod:: __init__ .. automethod:: connect .. automethod:: unique_connection .. autoclass:: SingletonThreadPool .. automethod:: __init__ .. autoclass:: AssertionPool .. autoclass:: NullPool .. autoclass:: StaticPool Pooling Plain DB-API Connections -------------------------------- Any :pep:`249` DB-API module can be "proxied" through the connection pool transparently. Usage of the DB-API is exactly as before, except the ``connect()`` method will consult the pool. Below we illustrate this with ``psycopg2``:: import sqlalchemy.pool as pool import psycopg2 as psycopg psycopg = pool.manage(psycopg) # then connect normally connection = psycopg.connect(database='test', username='scott', password='tiger') This produces a :class:`_DBProxy` object which supports the same ``connect()`` function as the original DB-API module. Upon connection, a connection proxy object is returned, which delegates its calls to a real DB-API connection object. This connection object is stored persistently within a connection pool (an instance of :class:`.Pool`) that corresponds to the exact connection arguments sent to the ``connect()`` function. The connection proxy supports all of the methods on the original connection object, most of which are proxied via ``__getattr__()``. The ``close()`` method will return the connection to the pool, and the ``cursor()`` method will return a proxied cursor object. Both the connection proxy and the cursor proxy will also return the underlying connection to the pool after they have both been garbage collected, which is detected via weakref callbacks (``__del__`` is not used). Additionally, when connections are returned to the pool, a ``rollback()`` is issued on the connection unconditionally. This is to release any locks still held by the connection that may have resulted from normal activity. By default, the ``connect()`` method will return the same connection that is already checked out in the current thread. This allows a particular connection to be used in a given thread without needing to pass it around between functions. To disable this behavior, specify ``use_threadlocal=False`` to the ``manage()`` function. .. autofunction:: sqlalchemy.pool.manage .. autofunction:: sqlalchemy.pool.clear_managers SQLAlchemy-0.8.4/doc/_sources/core/reflection.txt0000644000076500000240000001633212251147171022451 0ustar classicstaff00000000000000.. module:: sqlalchemy.schema .. _metadata_reflection_toplevel: .. _metadata_reflection: Reflecting Database Objects =========================== A :class:`~sqlalchemy.schema.Table` object can be instructed to load information about itself from the corresponding database schema object already existing within the database. This process is called *reflection*. In the most simple case you need only specify the table name, a :class:`~sqlalchemy.schema.MetaData` object, and the ``autoload=True`` flag. If the :class:`~sqlalchemy.schema.MetaData` is not persistently bound, also add the ``autoload_with`` argument:: >>> messages = Table('messages', meta, autoload=True, autoload_with=engine) >>> [c.name for c in messages.columns] ['message_id', 'message_name', 'date'] The above operation will use the given engine to query the database for information about the ``messages`` table, and will then generate :class:`~sqlalchemy.schema.Column`, :class:`~sqlalchemy.schema.ForeignKey`, and other objects corresponding to this information as though the :class:`~sqlalchemy.schema.Table` object were hand-constructed in Python. When tables are reflected, if a given table references another one via foreign key, a second :class:`~sqlalchemy.schema.Table` object is created within the :class:`~sqlalchemy.schema.MetaData` object representing the connection. Below, assume the table ``shopping_cart_items`` references a table named ``shopping_carts``. Reflecting the ``shopping_cart_items`` table has the effect such that the ``shopping_carts`` table will also be loaded:: >>> shopping_cart_items = Table('shopping_cart_items', meta, autoload=True, autoload_with=engine) >>> 'shopping_carts' in meta.tables: True The :class:`~sqlalchemy.schema.MetaData` has an interesting "singleton-like" behavior such that if you requested both tables individually, :class:`~sqlalchemy.schema.MetaData` will ensure that exactly one :class:`~sqlalchemy.schema.Table` object is created for each distinct table name. The :class:`~sqlalchemy.schema.Table` constructor actually returns to you the already-existing :class:`~sqlalchemy.schema.Table` object if one already exists with the given name. Such as below, we can access the already generated ``shopping_carts`` table just by naming it:: shopping_carts = Table('shopping_carts', meta) Of course, it's a good idea to use ``autoload=True`` with the above table regardless. This is so that the table's attributes will be loaded if they have not been already. The autoload operation only occurs for the table if it hasn't already been loaded; once loaded, new calls to :class:`~sqlalchemy.schema.Table` with the same name will not re-issue any reflection queries. Overriding Reflected Columns ----------------------------- Individual columns can be overridden with explicit values when reflecting tables; this is handy for specifying custom datatypes, constraints such as primary keys that may not be configured within the database, etc.:: >>> mytable = Table('mytable', meta, ... Column('id', Integer, primary_key=True), # override reflected 'id' to have primary key ... Column('mydata', Unicode(50)), # override reflected 'mydata' to be Unicode ... autoload=True) Reflecting Views ----------------- The reflection system can also reflect views. Basic usage is the same as that of a table:: my_view = Table("some_view", metadata, autoload=True) Above, ``my_view`` is a :class:`~sqlalchemy.schema.Table` object with :class:`~sqlalchemy.schema.Column` objects representing the names and types of each column within the view "some_view". Usually, it's desired to have at least a primary key constraint when reflecting a view, if not foreign keys as well. View reflection doesn't extrapolate these constraints. Use the "override" technique for this, specifying explicitly those columns which are part of the primary key or have foreign key constraints:: my_view = Table("some_view", metadata, Column("view_id", Integer, primary_key=True), Column("related_thing", Integer, ForeignKey("othertable.thing_id")), autoload=True ) Reflecting All Tables at Once ----------------------------- The :class:`~sqlalchemy.schema.MetaData` object can also get a listing of tables and reflect the full set. This is achieved by using the :func:`~sqlalchemy.schema.MetaData.reflect` method. After calling it, all located tables are present within the :class:`~sqlalchemy.schema.MetaData` object's dictionary of tables:: meta = MetaData() meta.reflect(bind=someengine) users_table = meta.tables['users'] addresses_table = meta.tables['addresses'] ``metadata.reflect()`` also provides a handy way to clear or delete all the rows in a database:: meta = MetaData() meta.reflect(bind=someengine) for table in reversed(meta.sorted_tables): someengine.execute(table.delete()) Fine Grained Reflection with Inspector -------------------------------------- A low level interface which provides a backend-agnostic system of loading lists of schema, table, column, and constraint descriptions from a given database is also available. This is known as the "Inspector":: from sqlalchemy import create_engine from sqlalchemy.engine import reflection engine = create_engine('...') insp = reflection.Inspector.from_engine(engine) print insp.get_table_names() .. autoclass:: sqlalchemy.engine.reflection.Inspector :members: :undoc-members: Limitations of Reflection ------------------------- It's important to note that the reflection process recreates :class:`.Table` metadata using only information which is represented in the relational database. This process by definition cannot restore aspects of a schema that aren't actually stored in the database. State which is not available from reflection includes but is not limited to: * Client side defaults, either Python functions or SQL expressions defined using the ``default`` keyword of :class:`.Column` (note this is separate from ``server_default``, which specifically is what's available via reflection). * Column information, e.g. data that might have been placed into the :attr:`.Column.info` dictionary * The value of the ``.quote`` setting for :class:`.Column` or :class:`.Table` * The assocation of a particular :class:`.Sequence` with a given :class:`.Column` The relational database also in many cases reports on table metadata in a different format than what was specified in SQLAlchemy. The :class:`.Table` objects returned from reflection cannot be always relied upon to produce the identical DDL as the original Python-defined :class:`.Table` objects. Areas where this occurs includes server defaults, column-associated sequences and various idosyncrasies regarding constraints and datatypes. Server side defaults may be returned with cast directives (typically Postgresql will include a ``::`` cast) or different quoting patterns than originally specified. Another category of limitation includes schema structures for which reflection is only partially or not yet defined. Recent improvements to reflection allow things like views, indexes and foreign key options to be reflected. As of this writing, structures like CHECK constraints, table comments, and triggers are not reflected. SQLAlchemy-0.8.4/doc/_sources/core/schema.txt0000644000076500000240000000325012251147171021552 0ustar classicstaff00000000000000.. _schema_toplevel: ========================== Schema Definition Language ========================== .. module:: sqlalchemy.schema This section references SQLAlchemy **schema metadata**, a comprehensive system of describing and inspecting database schemas. The core of SQLAlchemy's query and object mapping operations are supported by *database metadata*, which is comprised of Python objects that describe tables and other schema-level objects. These objects are at the core of three major types of operations - issuing CREATE and DROP statements (known as *DDL*), constructing SQL queries, and expressing information about structures that already exist within the database. Database metadata can be expressed by explicitly naming the various components and their properties, using constructs such as :class:`~sqlalchemy.schema.Table`, :class:`~sqlalchemy.schema.Column`, :class:`~sqlalchemy.schema.ForeignKey` and :class:`~sqlalchemy.schema.Sequence`, all of which are imported from the ``sqlalchemy.schema`` package. It can also be generated by SQLAlchemy using a process called *reflection*, which means you start with a single object such as :class:`~sqlalchemy.schema.Table`, assign it a name, and then instruct SQLAlchemy to load all the additional information related to that name from a particular engine source. A key feature of SQLAlchemy's database metadata constructs is that they are designed to be used in a *declarative* style which closely resembles that of real DDL. They are therefore most intuitive to those who have some background in creating real schema generation scripts. .. toctree:: :maxdepth: 1 metadata reflection defaults constraints ddl SQLAlchemy-0.8.4/doc/_sources/core/selectable.txt0000644000076500000240000000260312251147171022416 0ustar classicstaff00000000000000Selectables, Tables, FROM objects ================================= The term "selectable" refers to any object that rows can be selected from; in SQLAlchemy, these objects descend from :class:`.FromClause` and their distinguishing feature is their :attr:`.FromClause.c` attribute, which is a namespace of all the columns contained within the FROM clause (these elements are themselves :class:`.ColumnElement` subclasses). .. module:: sqlalchemy.sql.expression .. autofunction:: alias .. autofunction:: except_ .. autofunction:: except_all .. autofunction:: exists .. autofunction:: intersect .. autofunction:: intersect_all .. autofunction:: join .. autofunction:: outerjoin .. autofunction:: select .. autofunction:: subquery .. autofunction:: sqlalchemy.sql.expression.table .. autofunction:: union .. autofunction:: union_all .. autoclass:: Alias :members: :inherited-members: .. autoclass:: CompoundSelect :members: :inherited-members: .. autoclass:: CTE :members: :inherited-members: .. autoclass:: Executable :members: .. autoclass:: FromClause :members: .. autoclass:: Join :members: :inherited-members: .. autoclass:: ScalarSelect :members: .. autoclass:: Select :members: :inherited-members: .. autoclass:: Selectable :members: .. autoclass:: SelectBase :members: .. autoclass:: TableClause :members: :inherited-members: SQLAlchemy-0.8.4/doc/_sources/core/serializer.txt0000644000076500000240000000021312251147171022457 0ustar classicstaff00000000000000Expression Serializer Extension =============================== .. automodule:: sqlalchemy.ext.serializer :members: :undoc-members: SQLAlchemy-0.8.4/doc/_sources/core/sqlelement.txt0000644000076500000240000000445412251147171022472 0ustar classicstaff00000000000000Column Elements and Expressions =============================== .. module:: sqlalchemy.sql.expression The most fundamental part of the SQL expression API are the "column elements", which allow for basic SQL expression support. The core of all SQL expression constructs is the :class:`.ClauseElement`, which is the base for several sub-branches. The :class:`.ColumnElement` class is the fundamental unit used to construct any kind of typed SQL expression. .. autofunction:: and_ .. autofunction:: asc .. autofunction:: between .. autofunction:: bindparam .. autofunction:: case .. autofunction:: cast .. autofunction:: sqlalchemy.sql.expression.column .. autofunction:: collate .. autofunction:: desc .. autofunction:: distinct .. autofunction:: extract .. autofunction:: false .. autodata:: func .. autofunction:: label .. autofunction:: literal .. autofunction:: literal_column .. autofunction:: not_ .. autofunction:: null .. autofunction:: nullsfirst .. autofunction:: nullslast .. autofunction:: or_ .. autofunction:: outparam .. autofunction:: over .. autofunction:: text .. autofunction:: true .. autofunction:: tuple_ .. autofunction:: type_coerce .. autoclass:: BinaryExpression :members: :inherited-members: .. autoclass:: BindParameter :members: :inherited-members: .. autoclass:: Case :members: .. autoclass:: Cast :members: .. autoclass:: ClauseElement :members: .. autoclass:: ClauseList :members: .. autoclass:: ColumnClause :members: :inherited-members: .. autoclass:: ColumnCollection :members: .. autoclass:: ColumnElement :members: :inherited-members: :undoc-members: .. autoclass:: sqlalchemy.sql.operators.ColumnOperators :members: :special-members: :inherited-members: .. autoclass:: Extract :members: .. autoclass:: sqlalchemy.sql.expression.False_ :members: .. autoclass:: Label :members: .. autoclass:: sqlalchemy.sql.expression.Null :members: .. autoclass:: Over :members: .. autoclass:: TextClause :members: .. autoclass:: Tuple :members: .. autoclass:: sqlalchemy.sql.expression.True_ :members: .. autoclass:: sqlalchemy.sql.operators.custom_op :members: .. autoclass:: sqlalchemy.sql.operators.Operators :members: :special-members: .. autoclass:: UnaryExpression :members: SQLAlchemy-0.8.4/doc/_sources/core/tutorial.txt0000644000076500000240000020661012251147171022162 0ustar classicstaff00000000000000.. _sqlexpression_toplevel: ================================ SQL Expression Language Tutorial ================================ The SQLAlchemy Expression Language presents a system of representing relational database structures and expressions using Python constructs. These constructs are modeled to resemble those of the underlying database as closely as possible, while providing a modicum of abstraction of the various implementation differences between database backends. While the constructs attempt to represent equivalent concepts between backends with consistent structures, they do not conceal useful concepts that are unique to particular subsets of backends. The Expression Language therefore presents a method of writing backend-neutral SQL expressions, but does not attempt to enforce that expressions are backend-neutral. The Expression Language is in contrast to the Object Relational Mapper, which is a distinct API that builds on top of the Expression Language. Whereas the ORM, introduced in :ref:`ormtutorial_toplevel`, presents a high level and abstracted pattern of usage, which itself is an example of applied usage of the Expression Language, the Expression Language presents a system of representing the primitive constructs of the relational database directly without opinion. While there is overlap among the usage patterns of the ORM and the Expression Language, the similarities are more superficial than they may at first appear. One approaches the structure and content of data from the perspective of a user-defined `domain model `_ which is transparently persisted and refreshed from its underlying storage model. The other approaches it from the perspective of literal schema and SQL expression representations which are explicitly composed into messages consumed individually by the database. A successful application may be constructed using the Expression Language exclusively, though the application will need to define its own system of translating application concepts into individual database messages and from individual database result sets. Alternatively, an application constructed with the ORM may, in advanced scenarios, make occasional usage of the Expression Language directly in certain areas where specific database interactions are required. The following tutorial is in doctest format, meaning each ``>>>`` line represents something you can type at a Python command prompt, and the following text represents the expected return value. The tutorial has no prerequisites. Version Check ============= A quick check to verify that we are on at least **version 0.8** of SQLAlchemy: .. sourcecode:: pycon+sql >>> import sqlalchemy >>> sqlalchemy.__version__ # doctest:+SKIP 0.8.0 Connecting ========== For this tutorial we will use an in-memory-only SQLite database. This is an easy way to test things without needing to have an actual database defined anywhere. To connect we use :func:`~sqlalchemy.create_engine`: .. sourcecode:: pycon+sql >>> from sqlalchemy import create_engine >>> engine = create_engine('sqlite:///:memory:', echo=True) The ``echo`` flag is a shortcut to setting up SQLAlchemy logging, which is accomplished via Python's standard ``logging`` module. With it enabled, we'll see all the generated SQL produced. If you are working through this tutorial and want less output generated, set it to ``False``. This tutorial will format the SQL behind a popup window so it doesn't get in our way; just click the "SQL" links to see what's being generated. Define and Create Tables ========================= The SQL Expression Language constructs its expressions in most cases against table columns. In SQLAlchemy, a column is most often represented by an object called :class:`~sqlalchemy.schema.Column`, and in all cases a :class:`~sqlalchemy.schema.Column` is associated with a :class:`~sqlalchemy.schema.Table`. A collection of :class:`~sqlalchemy.schema.Table` objects and their associated child objects is referred to as **database metadata**. In this tutorial we will explicitly lay out several :class:`~sqlalchemy.schema.Table` objects, but note that SA can also "import" whole sets of :class:`~sqlalchemy.schema.Table` objects automatically from an existing database (this process is called **table reflection**). We define our tables all within a catalog called :class:`~sqlalchemy.schema.MetaData`, using the :class:`~sqlalchemy.schema.Table` construct, which resembles regular SQL CREATE TABLE statements. We'll make two tables, one of which represents "users" in an application, and another which represents zero or more "email addreses" for each row in the "users" table: .. sourcecode:: pycon+sql >>> from sqlalchemy import Table, Column, Integer, String, MetaData, ForeignKey >>> metadata = MetaData() >>> users = Table('users', metadata, ... Column('id', Integer, primary_key=True), ... Column('name', String), ... Column('fullname', String), ... ) >>> addresses = Table('addresses', metadata, ... Column('id', Integer, primary_key=True), ... Column('user_id', None, ForeignKey('users.id')), ... Column('email_address', String, nullable=False) ... ) All about how to define :class:`~sqlalchemy.schema.Table` objects, as well as how to create them from an existing database automatically, is described in :ref:`metadata_toplevel`. Next, to tell the :class:`~sqlalchemy.schema.MetaData` we'd actually like to create our selection of tables for real inside the SQLite database, we use :func:`~sqlalchemy.schema.MetaData.create_all`, passing it the ``engine`` instance which points to our database. This will check for the presence of each table first before creating, so it's safe to call multiple times: .. sourcecode:: pycon+sql {sql}>>> metadata.create_all(engine) #doctest: +NORMALIZE_WHITESPACE PRAGMA table_info("users") () PRAGMA table_info("addresses") () CREATE TABLE users ( id INTEGER NOT NULL, name VARCHAR, fullname VARCHAR, PRIMARY KEY (id) ) () COMMIT CREATE TABLE addresses ( id INTEGER NOT NULL, user_id INTEGER, email_address VARCHAR NOT NULL, PRIMARY KEY (id), FOREIGN KEY(user_id) REFERENCES users (id) ) () COMMIT .. note:: Users familiar with the syntax of CREATE TABLE may notice that the VARCHAR columns were generated without a length; on SQLite and Postgresql, this is a valid datatype, but on others, it's not allowed. So if running this tutorial on one of those databases, and you wish to use SQLAlchemy to issue CREATE TABLE, a "length" may be provided to the :class:`~sqlalchemy.types.String` type as below:: Column('name', String(50)) The length field on :class:`~sqlalchemy.types.String`, as well as similar precision/scale fields available on :class:`~sqlalchemy.types.Integer`, :class:`~sqlalchemy.types.Numeric`, etc. are not referenced by SQLAlchemy other than when creating tables. Additionally, Firebird and Oracle require sequences to generate new primary key identifiers, and SQLAlchemy doesn't generate or assume these without being instructed. For that, you use the :class:`~sqlalchemy.schema.Sequence` construct:: from sqlalchemy import Sequence Column('id', Integer, Sequence('user_id_seq'), primary_key=True) A full, foolproof :class:`~sqlalchemy.schema.Table` is therefore:: users = Table('users', metadata, Column('id', Integer, Sequence('user_id_seq'), primary_key=True), Column('name', String(50)), Column('fullname', String(50)), Column('password', String(12)) ) We include this more verbose :class:`~.schema.Table` construct separately to highlight the difference between a minimal construct geared primarily towards in-Python usage only, versus one that will be used to emit CREATE TABLE statements on a particular set of backends with more stringent requirements. .. _coretutorial_insert_expressions: Insert Expressions ================== The first SQL expression we'll create is the :class:`~sqlalchemy.sql.expression.Insert` construct, which represents an INSERT statement. This is typically created relative to its target table:: >>> ins = users.insert() To see a sample of the SQL this construct produces, use the ``str()`` function:: >>> str(ins) 'INSERT INTO users (id, name, fullname) VALUES (:id, :name, :fullname)' Notice above that the INSERT statement names every column in the ``users`` table. This can be limited by using the ``values()`` method, which establishes the VALUES clause of the INSERT explicitly:: >>> ins = users.insert().values(name='jack', fullname='Jack Jones') >>> str(ins) 'INSERT INTO users (name, fullname) VALUES (:name, :fullname)' Above, while the ``values`` method limited the VALUES clause to just two columns, the actual data we placed in ``values`` didn't get rendered into the string; instead we got named bind parameters. As it turns out, our data *is* stored within our :class:`~sqlalchemy.sql.expression.Insert` construct, but it typically only comes out when the statement is actually executed; since the data consists of literal values, SQLAlchemy automatically generates bind parameters for them. We can peek at this data for now by looking at the compiled form of the statement:: >>> ins.compile().params #doctest: +NORMALIZE_WHITESPACE {'fullname': 'Jack Jones', 'name': 'jack'} Executing ========== The interesting part of an :class:`~sqlalchemy.sql.expression.Insert` is executing it. In this tutorial, we will generally focus on the most explicit method of executing a SQL construct, and later touch upon some "shortcut" ways to do it. The ``engine`` object we created is a repository for database connections capable of issuing SQL to the database. To acquire a connection, we use the ``connect()`` method:: >>> conn = engine.connect() >>> conn #doctest: +ELLIPSIS The :class:`~sqlalchemy.engine.Connection` object represents an actively checked out DBAPI connection resource. Lets feed it our :class:`~sqlalchemy.sql.expression.Insert` object and see what happens: .. sourcecode:: pycon+sql >>> result = conn.execute(ins) {opensql}INSERT INTO users (name, fullname) VALUES (?, ?) ('jack', 'Jack Jones') COMMIT So the INSERT statement was now issued to the database. Although we got positional "qmark" bind parameters instead of "named" bind parameters in the output. How come ? Because when executed, the :class:`~sqlalchemy.engine.Connection` used the SQLite **dialect** to help generate the statement; when we use the ``str()`` function, the statement isn't aware of this dialect, and falls back onto a default which uses named parameters. We can view this manually as follows: .. sourcecode:: pycon+sql >>> ins.bind = engine >>> str(ins) 'INSERT INTO users (name, fullname) VALUES (?, ?)' What about the ``result`` variable we got when we called ``execute()`` ? As the SQLAlchemy :class:`~sqlalchemy.engine.Connection` object references a DBAPI connection, the result, known as a :class:`~sqlalchemy.engine.ResultProxy` object, is analogous to the DBAPI cursor object. In the case of an INSERT, we can get important information from it, such as the primary key values which were generated from our statement: .. sourcecode:: pycon+sql >>> result.inserted_primary_key [1] The value of ``1`` was automatically generated by SQLite, but only because we did not specify the ``id`` column in our :class:`~sqlalchemy.sql.expression.Insert` statement; otherwise, our explicit value would have been used. In either case, SQLAlchemy always knows how to get at a newly generated primary key value, even though the method of generating them is different across different databases; each database's :class:`~sqlalchemy.engine.base.Dialect` knows the specific steps needed to determine the correct value (or values; note that ``inserted_primary_key`` returns a list so that it supports composite primary keys). Executing Multiple Statements ============================== Our insert example above was intentionally a little drawn out to show some various behaviors of expression language constructs. In the usual case, an :class:`~sqlalchemy.sql.expression.Insert` statement is usually compiled against the parameters sent to the ``execute()`` method on :class:`~sqlalchemy.engine.Connection`, so that there's no need to use the ``values`` keyword with :class:`~sqlalchemy.sql.expression.Insert`. Lets create a generic :class:`~sqlalchemy.sql.expression.Insert` statement again and use it in the "normal" way: .. sourcecode:: pycon+sql >>> ins = users.insert() >>> conn.execute(ins, id=2, name='wendy', fullname='Wendy Williams') # doctest: +ELLIPSIS {opensql}INSERT INTO users (id, name, fullname) VALUES (?, ?, ?) (2, 'wendy', 'Wendy Williams') COMMIT {stop} Above, because we specified all three columns in the ``execute()`` method, the compiled :class:`~.expression.Insert` included all three columns. The :class:`~.expression.Insert` statement is compiled at execution time based on the parameters we specified; if we specified fewer parameters, the :class:`~.expression.Insert` would have fewer entries in its VALUES clause. To issue many inserts using DBAPI's ``executemany()`` method, we can send in a list of dictionaries each containing a distinct set of parameters to be inserted, as we do here to add some email addresses: .. sourcecode:: pycon+sql >>> conn.execute(addresses.insert(), [ # doctest: +ELLIPSIS ... {'user_id': 1, 'email_address' : 'jack@yahoo.com'}, ... {'user_id': 1, 'email_address' : 'jack@msn.com'}, ... {'user_id': 2, 'email_address' : 'www@www.org'}, ... {'user_id': 2, 'email_address' : 'wendy@aol.com'}, ... ]) {opensql}INSERT INTO addresses (user_id, email_address) VALUES (?, ?) ((1, 'jack@yahoo.com'), (1, 'jack@msn.com'), (2, 'www@www.org'), (2, 'wendy@aol.com')) COMMIT {stop} Above, we again relied upon SQLite's automatic generation of primary key identifiers for each ``addresses`` row. When executing multiple sets of parameters, each dictionary must have the **same** set of keys; i.e. you cant have fewer keys in some dictionaries than others. This is because the :class:`~sqlalchemy.sql.expression.Insert` statement is compiled against the **first** dictionary in the list, and it's assumed that all subsequent argument dictionaries are compatible with that statement. .. _coretutorial_selecting: Selecting ========== We began with inserts just so that our test database had some data in it. The more interesting part of the data is selecting it ! We'll cover UPDATE and DELETE statements later. The primary construct used to generate SELECT statements is the :func:`.select` function: .. sourcecode:: pycon+sql >>> from sqlalchemy.sql import select >>> s = select([users]) >>> result = conn.execute(s) # doctest: +NORMALIZE_WHITESPACE {opensql}SELECT users.id, users.name, users.fullname FROM users () Above, we issued a basic :func:`.select` call, placing the ``users`` table within the COLUMNS clause of the select, and then executing. SQLAlchemy expanded the ``users`` table into the set of each of its columns, and also generated a FROM clause for us. The result returned is again a :class:`~sqlalchemy.engine.ResultProxy` object, which acts much like a DBAPI cursor, including methods such as :func:`~sqlalchemy.engine.ResultProxy.fetchone` and :func:`~sqlalchemy.engine.ResultProxy.fetchall`. The easiest way to get rows from it is to just iterate: .. sourcecode:: pycon+sql >>> for row in result: ... print row (1, u'jack', u'Jack Jones') (2, u'wendy', u'Wendy Williams') Above, we see that printing each row produces a simple tuple-like result. We have more options at accessing the data in each row. One very common way is through dictionary access, using the string names of columns: .. sourcecode:: pycon+sql {sql}>>> result = conn.execute(s) # doctest: +NORMALIZE_WHITESPACE SELECT users.id, users.name, users.fullname FROM users () {stop}>>> row = result.fetchone() >>> print "name:", row['name'], "; fullname:", row['fullname'] name: jack ; fullname: Jack Jones Integer indexes work as well: .. sourcecode:: pycon+sql >>> row = result.fetchone() >>> print "name:", row[1], "; fullname:", row[2] name: wendy ; fullname: Wendy Williams But another way, whose usefulness will become apparent later on, is to use the :class:`~sqlalchemy.schema.Column` objects directly as keys: .. sourcecode:: pycon+sql {sql}>>> for row in conn.execute(s): # doctest: +NORMALIZE_WHITESPACE ... print "name:", row[users.c.name], "; fullname:", row[users.c.fullname] SELECT users.id, users.name, users.fullname FROM users () {stop}name: jack ; fullname: Jack Jones name: wendy ; fullname: Wendy Williams Result sets which have pending rows remaining should be explicitly closed before discarding. While the cursor and connection resources referenced by the :class:`~sqlalchemy.engine.ResultProxy` will be respectively closed and returned to the connection pool when the object is garbage collected, it's better to make it explicit as some database APIs are very picky about such things: .. sourcecode:: pycon+sql >>> result.close() If we'd like to more carefully control the columns which are placed in the COLUMNS clause of the select, we reference individual :class:`~sqlalchemy.schema.Column` objects from our :class:`~sqlalchemy.schema.Table`. These are available as named attributes off the ``c`` attribute of the :class:`~sqlalchemy.schema.Table` object: .. sourcecode:: pycon+sql >>> s = select([users.c.name, users.c.fullname]) {sql}>>> result = conn.execute(s) # doctest: +NORMALIZE_WHITESPACE SELECT users.name, users.fullname FROM users () {stop}>>> for row in result: #doctest: +NORMALIZE_WHITESPACE ... print row (u'jack', u'Jack Jones') (u'wendy', u'Wendy Williams') Lets observe something interesting about the FROM clause. Whereas the generated statement contains two distinct sections, a "SELECT columns" part and a "FROM table" part, our :func:`.select` construct only has a list containing columns. How does this work ? Let's try putting *two* tables into our :func:`.select` statement: .. sourcecode:: pycon+sql {sql}>>> for row in conn.execute(select([users, addresses])): ... print row # doctest: +NORMALIZE_WHITESPACE SELECT users.id, users.name, users.fullname, addresses.id, addresses.user_id, addresses.email_address FROM users, addresses () {stop}(1, u'jack', u'Jack Jones', 1, 1, u'jack@yahoo.com') (1, u'jack', u'Jack Jones', 2, 1, u'jack@msn.com') (1, u'jack', u'Jack Jones', 3, 2, u'www@www.org') (1, u'jack', u'Jack Jones', 4, 2, u'wendy@aol.com') (2, u'wendy', u'Wendy Williams', 1, 1, u'jack@yahoo.com') (2, u'wendy', u'Wendy Williams', 2, 1, u'jack@msn.com') (2, u'wendy', u'Wendy Williams', 3, 2, u'www@www.org') (2, u'wendy', u'Wendy Williams', 4, 2, u'wendy@aol.com') It placed **both** tables into the FROM clause. But also, it made a real mess. Those who are familiar with SQL joins know that this is a **Cartesian product**; each row from the ``users`` table is produced against each row from the ``addresses`` table. So to put some sanity into this statement, we need a WHERE clause. We do that using :meth:`.Select.where`: .. sourcecode:: pycon+sql >>> s = select([users, addresses]).where(users.c.id == addresses.c.user_id) {sql}>>> for row in conn.execute(s): ... print row # doctest: +NORMALIZE_WHITESPACE SELECT users.id, users.name, users.fullname, addresses.id, addresses.user_id, addresses.email_address FROM users, addresses WHERE users.id = addresses.user_id () {stop}(1, u'jack', u'Jack Jones', 1, 1, u'jack@yahoo.com') (1, u'jack', u'Jack Jones', 2, 1, u'jack@msn.com') (2, u'wendy', u'Wendy Williams', 3, 2, u'www@www.org') (2, u'wendy', u'Wendy Williams', 4, 2, u'wendy@aol.com') So that looks a lot better, we added an expression to our :func:`.select` which had the effect of adding ``WHERE users.id = addresses.user_id`` to our statement, and our results were managed down so that the join of ``users`` and ``addresses`` rows made sense. But let's look at that expression? It's using just a Python equality operator between two different :class:`~sqlalchemy.schema.Column` objects. It should be clear that something is up. Saying ``1 == 1`` produces ``True``, and ``1 == 2`` produces ``False``, not a WHERE clause. So lets see exactly what that expression is doing: .. sourcecode:: pycon+sql >>> users.c.id == addresses.c.user_id #doctest: +ELLIPSIS Wow, surprise ! This is neither a ``True`` nor a ``False``. Well what is it ? .. sourcecode:: pycon+sql >>> str(users.c.id == addresses.c.user_id) 'users.id = addresses.user_id' As you can see, the ``==`` operator is producing an object that is very much like the :class:`~.expression.Insert` and :func:`.select` objects we've made so far, thanks to Python's ``__eq__()`` builtin; you call ``str()`` on it and it produces SQL. By now, one can see that everything we are working with is ultimately the same type of object. SQLAlchemy terms the base class of all of these expressions as :class:`~.expression.ColumnElement`. Operators ========== Since we've stumbled upon SQLAlchemy's operator paradigm, let's go through some of its capabilities. We've seen how to equate two columns to each other: .. sourcecode:: pycon+sql >>> print users.c.id == addresses.c.user_id users.id = addresses.user_id If we use a literal value (a literal meaning, not a SQLAlchemy clause object), we get a bind parameter: .. sourcecode:: pycon+sql >>> print users.c.id == 7 users.id = :id_1 The ``7`` literal is embedded the resulting :class:`~.expression.ColumnElement`; we can use the same trick we did with the :class:`~sqlalchemy.sql.expression.Insert` object to see it: .. sourcecode:: pycon+sql >>> (users.c.id == 7).compile().params {u'id_1': 7} Most Python operators, as it turns out, produce a SQL expression here, like equals, not equals, etc.: .. sourcecode:: pycon+sql >>> print users.c.id != 7 users.id != :id_1 >>> # None converts to IS NULL >>> print users.c.name == None users.name IS NULL >>> # reverse works too >>> print 'fred' > users.c.name users.name < :name_1 If we add two integer columns together, we get an addition expression: .. sourcecode:: pycon+sql >>> print users.c.id + addresses.c.id users.id + addresses.id Interestingly, the type of the :class:`~sqlalchemy.schema.Column` is important! If we use ``+`` with two string based columns (recall we put types like :class:`~sqlalchemy.types.Integer` and :class:`~sqlalchemy.types.String` on our :class:`~sqlalchemy.schema.Column` objects at the beginning), we get something different: .. sourcecode:: pycon+sql >>> print users.c.name + users.c.fullname users.name || users.fullname Where ``||`` is the string concatenation operator used on most databases. But not all of them. MySQL users, fear not: .. sourcecode:: pycon+sql >>> print (users.c.name + users.c.fullname).\ ... compile(bind=create_engine('mysql://')) concat(users.name, users.fullname) The above illustrates the SQL that's generated for an :class:`~sqlalchemy.engine.Engine` that's connected to a MySQL database; the ``||`` operator now compiles as MySQL's ``concat()`` function. If you have come across an operator which really isn't available, you can always use the :meth:`.ColumnOperators.op` method; this generates whatever operator you need: .. sourcecode:: pycon+sql >>> print users.c.name.op('tiddlywinks')('foo') users.name tiddlywinks :name_1 This function can also be used to make bitwise operators explicit. For example:: somecolumn.op('&')(0xff) is a bitwise AND of the value in `somecolumn`. Operator Customization ----------------------- While :meth:`.ColumnOperators.op` is handy to get at a custom operator in a hurry, the Core supports fundamental customization and extension of the operator system at the type level. The behavior of existing operators can be modified on a per-type basis, and new operations can be defined which become available for all column expressions that are part of that particular type. See the section :ref:`types_operators` for a description. Conjunctions ============= We'd like to show off some of our operators inside of :func:`.select` constructs. But we need to lump them together a little more, so let's first introduce some conjunctions. Conjunctions are those little words like AND and OR that put things together. We'll also hit upon NOT. :func:`.and_`, :func:`.or_`, and :func:`.not_` can work from the corresponding functions SQLAlchemy provides (notice we also throw in a :meth:`~.ColumnOperators.like`): .. sourcecode:: pycon+sql >>> from sqlalchemy.sql import and_, or_, not_ >>> print and_( ... users.c.name.like('j%'), ... users.c.id == addresses.c.user_id, #doctest: +NORMALIZE_WHITESPACE ... or_( ... addresses.c.email_address == 'wendy@aol.com', ... addresses.c.email_address == 'jack@yahoo.com' ... ), ... not_(users.c.id > 5) ... ) users.name LIKE :name_1 AND users.id = addresses.user_id AND (addresses.email_address = :email_address_1 OR addresses.email_address = :email_address_2) AND users.id <= :id_1 And you can also use the re-jiggered bitwise AND, OR and NOT operators, although because of Python operator precedence you have to watch your parenthesis: .. sourcecode:: pycon+sql >>> print users.c.name.like('j%') & (users.c.id == addresses.c.user_id) & \ ... ( ... (addresses.c.email_address == 'wendy@aol.com') | \ ... (addresses.c.email_address == 'jack@yahoo.com') ... ) \ ... & ~(users.c.id>5) # doctest: +NORMALIZE_WHITESPACE users.name LIKE :name_1 AND users.id = addresses.user_id AND (addresses.email_address = :email_address_1 OR addresses.email_address = :email_address_2) AND users.id <= :id_1 So with all of this vocabulary, let's select all users who have an email address at AOL or MSN, whose name starts with a letter between "m" and "z", and we'll also generate a column containing their full name combined with their email address. We will add two new constructs to this statement, :meth:`~.ColumnOperators.between` and :meth:`~.ColumnElement.label`. :meth:`~.ColumnOperators.between` produces a BETWEEN clause, and :meth:`~.ColumnElement.label` is used in a column expression to produce labels using the ``AS`` keyword; it's recommended when selecting from expressions that otherwise would not have a name: .. sourcecode:: pycon+sql >>> s = select([(users.c.fullname + ... ", " + addresses.c.email_address). ... label('title')]).\ ... where( ... and_( ... users.c.id == addresses.c.user_id, ... users.c.name.between('m', 'z'), ... or_( ... addresses.c.email_address.like('%@aol.com'), ... addresses.c.email_address.like('%@msn.com') ... ) ... ) ... ) >>> conn.execute(s).fetchall() #doctest: +NORMALIZE_WHITESPACE SELECT users.fullname || ? || addresses.email_address AS title FROM users, addresses WHERE users.id = addresses.user_id AND users.name BETWEEN ? AND ? AND (addresses.email_address LIKE ? OR addresses.email_address LIKE ?) (', ', 'm', 'z', '%@aol.com', '%@msn.com') [(u'Wendy Williams, wendy@aol.com',)] Once again, SQLAlchemy figured out the FROM clause for our statement. In fact it will determine the FROM clause based on all of its other bits; the columns clause, the where clause, and also some other elements which we haven't covered yet, which include ORDER BY, GROUP BY, and HAVING. A shortcut to using :func:`.and_` is to chain together multiple :meth:`~.Select.where` clauses. The above can also be written as: .. sourcecode:: pycon+sql >>> s = select([(users.c.fullname + ... ", " + addresses.c.email_address). ... label('title')]).\ ... where(users.c.id == addresses.c.user_id).\ ... where(users.c.name.between('m', 'z')).\ ... where( ... or_( ... addresses.c.email_address.like('%@aol.com'), ... addresses.c.email_address.like('%@msn.com') ... ) ... ) >>> conn.execute(s).fetchall() #doctest: +NORMALIZE_WHITESPACE SELECT users.fullname || ? || addresses.email_address AS title FROM users, addresses WHERE users.id = addresses.user_id AND users.name BETWEEN ? AND ? AND (addresses.email_address LIKE ? OR addresses.email_address LIKE ?) (', ', 'm', 'z', '%@aol.com', '%@msn.com') [(u'Wendy Williams, wendy@aol.com',)] The way that we can build up a :func:`.select` construct through successive method calls is called :term:`method chaining`. .. _sqlexpression_text: Using Text =========== Our last example really became a handful to type. Going from what one understands to be a textual SQL expression into a Python construct which groups components together in a programmatic style can be hard. That's why SQLAlchemy lets you just use strings too. The :func:`~.expression.text` construct represents any textual statement, in a backend-agnostic way. To use bind parameters with :func:`~.expression.text`, always use the named colon format. Such as below, we create a :func:`~.expression.text` and execute it, feeding in the bind parameters to the :meth:`~.Connection.execute` method: .. sourcecode:: pycon+sql >>> from sqlalchemy.sql import text >>> s = text( ... "SELECT users.fullname || ', ' || addresses.email_address AS title " ... "FROM users, addresses " ... "WHERE users.id = addresses.user_id " ... "AND users.name BETWEEN :x AND :y " ... "AND (addresses.email_address LIKE :e1 " ... "OR addresses.email_address LIKE :e2)") {sql}>>> conn.execute(s, x='m', y='z', e1='%@aol.com', e2='%@msn.com').fetchall() # doctest:+NORMALIZE_WHITESPACE SELECT users.fullname || ', ' || addresses.email_address AS title FROM users, addresses WHERE users.id = addresses.user_id AND users.name BETWEEN ? AND ? AND (addresses.email_address LIKE ? OR addresses.email_address LIKE ?) ('m', 'z', '%@aol.com', '%@msn.com') {stop}[(u'Wendy Williams, wendy@aol.com',)] To gain a "hybrid" approach, the :func:`.select` construct accepts strings for most of its arguments. Below we combine the usage of strings with our constructed :func:`.select` object, by using the :func:`.select` object to structure the statement, and strings to provide all the content within the structure. For this example, SQLAlchemy is not given any :class:`~sqlalchemy.schema.Column` or :class:`~sqlalchemy.schema.Table` objects in any of its expressions, so it cannot generate a FROM clause. So we also use the :meth:`~.Select.select_from` method, which accepts a :class:`.FromClause` or string expression to be placed within the FROM clause: .. sourcecode:: pycon+sql >>> s = select([ ... "users.fullname || ', ' || addresses.email_address AS title" ... ]).\ ... where( ... and_( ... "users.id = addresses.user_id", ... "users.name BETWEEN 'm' AND 'z'", ... "(addresses.email_address LIKE :x OR addresses.email_address LIKE :y)" ... ) ... ).select_from('users, addresses') {sql}>>> conn.execute(s, x='%@aol.com', y='%@msn.com').fetchall() #doctest: +NORMALIZE_WHITESPACE SELECT users.fullname || ', ' || addresses.email_address AS title FROM users, addresses WHERE users.id = addresses.user_id AND users.name BETWEEN 'm' AND 'z' AND (addresses.email_address LIKE ? OR addresses.email_address LIKE ?) ('%@aol.com', '%@msn.com') {stop}[(u'Wendy Williams, wendy@aol.com',)] Going from constructed SQL to text, we lose some capabilities. We lose the capability for SQLAlchemy to compile our expression to a specific target database; above, our expression won't work with MySQL since it has no ``||`` construct. It also becomes more tedious for SQLAlchemy to be made aware of the datatypes in use; for example, if our bind parameters required UTF-8 encoding before going in, or conversion from a Python ``datetime`` into a string (as is required with SQLite), we would have to add extra information to our :func:`~.expression.text` construct. Similar issues arise on the result set side, where SQLAlchemy also performs type-specific data conversion in some cases; still more information can be added to :func:`~.expression.text` to work around this. But what we really lose from our statement is the ability to manipulate it, transform it, and analyze it. These features are critical when using the ORM, which makes heavy usage of relational transformations. To show off what we mean, we'll first introduce the ALIAS construct and the JOIN construct, just so we have some juicier bits to play with. Using Aliases ============== The alias in SQL corresponds to a "renamed" version of a table or SELECT statement, which occurs anytime you say "SELECT .. FROM sometable AS someothername". The ``AS`` creates a new name for the table. Aliases are a key construct as they allow any table or subquery to be referenced by a unique name. In the case of a table, this allows the same table to be named in the FROM clause multiple times. In the case of a SELECT statement, it provides a parent name for the columns represented by the statement, allowing them to be referenced relative to this name. In SQLAlchemy, any :class:`.Table`, :func:`.select` construct, or other selectable can be turned into an alias using the :meth:`.FromClause.alias` method, which produces a :class:`.Alias` construct. As an example, suppose we know that our user ``jack`` has two particular email addresses. How can we locate jack based on the combination of those two addresses? To accomplish this, we'd use a join to the ``addresses`` table, once for each address. We create two :class:`.Alias` constructs against ``addresses``, and then use them both within a :func:`.select` construct: .. sourcecode:: pycon+sql >>> a1 = addresses.alias() >>> a2 = addresses.alias() >>> s = select([users]).\ ... where(and_( ... users.c.id == a1.c.user_id, ... users.c.id == a2.c.user_id, ... a1.c.email_address == 'jack@msn.com', ... a2.c.email_address == 'jack@yahoo.com' ... )) {sql}>>> conn.execute(s).fetchall() # doctest: +NORMALIZE_WHITESPACE SELECT users.id, users.name, users.fullname FROM users, addresses AS addresses_1, addresses AS addresses_2 WHERE users.id = addresses_1.user_id AND users.id = addresses_2.user_id AND addresses_1.email_address = ? AND addresses_2.email_address = ? ('jack@msn.com', 'jack@yahoo.com') {stop}[(1, u'jack', u'Jack Jones')] Note that the :class:`.Alias` construct generated the names ``addresses_1`` and ``addresses_2`` in the final SQL result. The generation of these names is determined by the position of the construct within the statement. If we created a query using only the second ``a2`` alias, the name would come out as ``addresses_1``. The generation of the names is also *deterministic*, meaning the same SQLAlchemy statement construct will produce the identical SQL string each time it is rendered for a particular dialect. Since on the outside, we refer to the alias using the :class:`.Alias` construct itself, we don't need to be concerned about the generated name. However, for the purposes of debugging, it can be specified by passing a string name to the :meth:`.FromClause.alias` method:: >>> a1 = addresses.alias('a1') Aliases can of course be used for anything which you can SELECT from, including SELECT statements themselves. We can self-join the ``users`` table back to the :func:`.select` we've created by making an alias of the entire statement. The ``correlate(None)`` directive is to avoid SQLAlchemy's attempt to "correlate" the inner ``users`` table with the outer one: .. sourcecode:: pycon+sql >>> a1 = s.correlate(None).alias() >>> s = select([users.c.name]).where(users.c.id == a1.c.id) {sql}>>> conn.execute(s).fetchall() # doctest: +NORMALIZE_WHITESPACE SELECT users.name FROM users, (SELECT users.id AS id, users.name AS name, users.fullname AS fullname FROM users, addresses AS addresses_1, addresses AS addresses_2 WHERE users.id = addresses_1.user_id AND users.id = addresses_2.user_id AND addresses_1.email_address = ? AND addresses_2.email_address = ?) AS anon_1 WHERE users.id = anon_1.id ('jack@msn.com', 'jack@yahoo.com') {stop}[(u'jack',)] Using Joins ============ We're halfway along to being able to construct any SELECT expression. The next cornerstone of the SELECT is the JOIN expression. We've already been doing joins in our examples, by just placing two tables in either the columns clause or the where clause of the :func:`.select` construct. But if we want to make a real "JOIN" or "OUTERJOIN" construct, we use the :meth:`~.FromClause.join` and :meth:`~.FromClause.outerjoin` methods, most commonly accessed from the left table in the join: .. sourcecode:: pycon+sql >>> print users.join(addresses) users JOIN addresses ON users.id = addresses.user_id The alert reader will see more surprises; SQLAlchemy figured out how to JOIN the two tables ! The ON condition of the join, as it's called, was automatically generated based on the :class:`~sqlalchemy.schema.ForeignKey` object which we placed on the ``addresses`` table way at the beginning of this tutorial. Already the ``join()`` construct is looking like a much better way to join tables. Of course you can join on whatever expression you want, such as if we want to join on all users who use the same name in their email address as their username: .. sourcecode:: pycon+sql >>> print users.join(addresses, ... addresses.c.email_address.like(users.c.name + '%') ... ) users JOIN addresses ON addresses.email_address LIKE (users.name || :name_1) When we create a :func:`.select` construct, SQLAlchemy looks around at the tables we've mentioned and then places them in the FROM clause of the statement. When we use JOINs however, we know what FROM clause we want, so here we make use of the :meth:`~.Select.select_from` method: .. sourcecode:: pycon+sql >>> s = select([users.c.fullname]).select_from( ... users.join(addresses, ... addresses.c.email_address.like(users.c.name + '%')) ... ) {sql}>>> conn.execute(s).fetchall() # doctest: +NORMALIZE_WHITESPACE SELECT users.fullname FROM users JOIN addresses ON addresses.email_address LIKE (users.name || ?) ('%',) {stop}[(u'Jack Jones',), (u'Jack Jones',), (u'Wendy Williams',)] The :meth:`~.FromClause.outerjoin` method creates ``LEFT OUTER JOIN`` constructs, and is used in the same way as :meth:`~.FromClause.join`: .. sourcecode:: pycon+sql >>> s = select([users.c.fullname]).select_from(users.outerjoin(addresses)) >>> print s # doctest: +NORMALIZE_WHITESPACE SELECT users.fullname FROM users LEFT OUTER JOIN addresses ON users.id = addresses.user_id That's the output ``outerjoin()`` produces, unless, of course, you're stuck in a gig using Oracle prior to version 9, and you've set up your engine (which would be using ``OracleDialect``) to use Oracle-specific SQL: .. sourcecode:: pycon+sql >>> from sqlalchemy.dialects.oracle import dialect as OracleDialect >>> print s.compile(dialect=OracleDialect(use_ansi=False)) # doctest: +NORMALIZE_WHITESPACE SELECT users.fullname FROM users, addresses WHERE users.id = addresses.user_id(+) If you don't know what that SQL means, don't worry ! The secret tribe of Oracle DBAs don't want their black magic being found out ;). Everything Else ================ The concepts of creating SQL expressions have been introduced. What's left are more variants of the same themes. So now we'll catalog the rest of the important things we'll need to know. Bind Parameter Objects ---------------------- Throughout all these examples, SQLAlchemy is busy creating bind parameters wherever literal expressions occur. You can also specify your own bind parameters with your own names, and use the same statement repeatedly. The database dialect converts to the appropriate named or positional style, as here where it converts to positional for SQLite: .. sourcecode:: pycon+sql >>> from sqlalchemy.sql import bindparam >>> s = users.select(users.c.name == bindparam('username')) {sql}>>> conn.execute(s, username='wendy').fetchall() # doctest: +NORMALIZE_WHITESPACE SELECT users.id, users.name, users.fullname FROM users WHERE users.name = ? ('wendy',) {stop}[(2, u'wendy', u'Wendy Williams')] Another important aspect of bind parameters is that they may be assigned a type. The type of the bind parameter will determine its behavior within expressions and also how the data bound to it is processed before being sent off to the database: .. sourcecode:: pycon+sql >>> s = users.select(users.c.name.like(bindparam('username', type_=String) + text("'%'"))) {sql}>>> conn.execute(s, username='wendy').fetchall() # doctest: +NORMALIZE_WHITESPACE SELECT users.id, users.name, users.fullname FROM users WHERE users.name LIKE (? || '%') ('wendy',) {stop}[(2, u'wendy', u'Wendy Williams')] Bind parameters of the same name can also be used multiple times, where only a single named value is needed in the execute parameters: .. sourcecode:: pycon+sql >>> s = select([users, addresses]).\ ... where( ... or_( ... users.c.name.like( ... bindparam('name', type_=String) + text("'%'")), ... addresses.c.email_address.like( ... bindparam('name', type_=String) + text("'@%'")) ... ) ... ).\ ... select_from(users.outerjoin(addresses)).\ ... order_by(addresses.c.id) {sql}>>> conn.execute(s, name='jack').fetchall() # doctest: +NORMALIZE_WHITESPACE SELECT users.id, users.name, users.fullname, addresses.id, addresses.user_id, addresses.email_address FROM users LEFT OUTER JOIN addresses ON users.id = addresses.user_id WHERE users.name LIKE (? || '%') OR addresses.email_address LIKE (? || '@%') ORDER BY addresses.id ('jack', 'jack') {stop}[(1, u'jack', u'Jack Jones', 1, 1, u'jack@yahoo.com'), (1, u'jack', u'Jack Jones', 2, 1, u'jack@msn.com')] Functions --------- SQL functions are created using the :data:`~.expression.func` keyword, which generates functions using attribute access: .. sourcecode:: pycon+sql >>> from sqlalchemy.sql import func >>> print func.now() now() >>> print func.concat('x', 'y') concat(:param_1, :param_2) By "generates", we mean that **any** SQL function is created based on the word you choose:: >>> print func.xyz_my_goofy_function() # doctest: +NORMALIZE_WHITESPACE xyz_my_goofy_function() Certain function names are known by SQLAlchemy, allowing special behavioral rules to be applied. Some for example are "ANSI" functions, which mean they don't get the parenthesis added after them, such as CURRENT_TIMESTAMP: .. sourcecode:: pycon+sql >>> print func.current_timestamp() CURRENT_TIMESTAMP Functions are most typically used in the columns clause of a select statement, and can also be labeled as well as given a type. Labeling a function is recommended so that the result can be targeted in a result row based on a string name, and assigning it a type is required when you need result-set processing to occur, such as for Unicode conversion and date conversions. Below, we use the result function ``scalar()`` to just read the first column of the first row and then close the result; the label, even though present, is not important in this case: .. sourcecode:: pycon+sql >>> conn.execute( ... select([ ... func.max(addresses.c.email_address, type_=String). ... label('maxemail') ... ]) ... ).scalar() # doctest: +NORMALIZE_WHITESPACE {opensql}SELECT max(addresses.email_address) AS maxemail FROM addresses () {stop}u'www@www.org' Databases such as PostgreSQL and Oracle which support functions that return whole result sets can be assembled into selectable units, which can be used in statements. Such as, a database function ``calculate()`` which takes the parameters ``x`` and ``y``, and returns three columns which we'd like to name ``q``, ``z`` and ``r``, we can construct using "lexical" column objects as well as bind parameters: .. sourcecode:: pycon+sql >>> from sqlalchemy.sql import column >>> calculate = select([column('q'), column('z'), column('r')]).\ ... select_from( ... func.calculate( ... bindparam('x'), ... bindparam('y') ... ) ... ) >>> calc = calculate.alias() >>> print select([users]).where(users.c.id > calc.c.z) # doctest: +NORMALIZE_WHITESPACE SELECT users.id, users.name, users.fullname FROM users, (SELECT q, z, r FROM calculate(:x, :y)) AS anon_1 WHERE users.id > anon_1.z If we wanted to use our ``calculate`` statement twice with different bind parameters, the :func:`~sqlalchemy.sql.expression.ClauseElement.unique_params` function will create copies for us, and mark the bind parameters as "unique" so that conflicting names are isolated. Note we also make two separate aliases of our selectable: .. sourcecode:: pycon+sql >>> calc1 = calculate.alias('c1').unique_params(x=17, y=45) >>> calc2 = calculate.alias('c2').unique_params(x=5, y=12) >>> s = select([users]).\ ... where(users.c.id.between(calc1.c.z, calc2.c.z)) >>> print s # doctest: +NORMALIZE_WHITESPACE SELECT users.id, users.name, users.fullname FROM users, (SELECT q, z, r FROM calculate(:x_1, :y_1)) AS c1, (SELECT q, z, r FROM calculate(:x_2, :y_2)) AS c2 WHERE users.id BETWEEN c1.z AND c2.z >>> s.compile().params {u'x_2': 5, u'y_2': 12, u'y_1': 45, u'x_1': 17} Window Functions ----------------- Any :class:`.FunctionElement`, including functions generated by :data:`~.expression.func`, can be turned into a "window function", that is an OVER clause, using the :meth:`~.FunctionElement.over` method: .. sourcecode:: pycon+sql >>> s = select([ ... users.c.id, ... func.row_number().over(order_by=users.c.name) ... ]) >>> print s # doctest: +NORMALIZE_WHITESPACE SELECT users.id, row_number() OVER (ORDER BY users.name) AS anon_1 FROM users Unions and Other Set Operations ------------------------------- Unions come in two flavors, UNION and UNION ALL, which are available via module level functions :func:`~.expression.union` and :func:`~.expression.union_all`: .. sourcecode:: pycon+sql >>> from sqlalchemy.sql import union >>> u = union( ... addresses.select(). ... where(addresses.c.email_address == 'foo@bar.com'), ... addresses.select(). ... where(addresses.c.email_address.like('%@yahoo.com')), ... ).order_by(addresses.c.email_address) {sql}>>> conn.execute(u).fetchall() # doctest: +NORMALIZE_WHITESPACE SELECT addresses.id, addresses.user_id, addresses.email_address FROM addresses WHERE addresses.email_address = ? UNION SELECT addresses.id, addresses.user_id, addresses.email_address FROM addresses WHERE addresses.email_address LIKE ? ORDER BY addresses.email_address ('foo@bar.com', '%@yahoo.com') {stop}[(1, 1, u'jack@yahoo.com')] Also available, though not supported on all databases, are :func:`~.expression.intersect`, :func:`~.expression.intersect_all`, :func:`~.expression.except_`, and :func:`~.expression.except_all`: .. sourcecode:: pycon+sql >>> from sqlalchemy.sql import except_ >>> u = except_( ... addresses.select(). ... where(addresses.c.email_address.like('%@%.com')), ... addresses.select(). ... where(addresses.c.email_address.like('%@msn.com')) ... ) {sql}>>> conn.execute(u).fetchall() # doctest: +NORMALIZE_WHITESPACE SELECT addresses.id, addresses.user_id, addresses.email_address FROM addresses WHERE addresses.email_address LIKE ? EXCEPT SELECT addresses.id, addresses.user_id, addresses.email_address FROM addresses WHERE addresses.email_address LIKE ? ('%@%.com', '%@msn.com') {stop}[(1, 1, u'jack@yahoo.com'), (4, 2, u'wendy@aol.com')] A common issue with so-called "compound" selectables arises due to the fact that they nest with parenthesis. SQLite in particular doesn't like a statement that starts with parenthesis. So when nesting a "compound" inside a "compound", it's often necessary to apply ``.alias().select()`` to the first element of the outermost compound, if that element is also a compound. For example, to nest a "union" and a "select" inside of "except\_", SQLite will want the "union" to be stated as a subquery: .. sourcecode:: pycon+sql >>> u = except_( ... union( ... addresses.select(). ... where(addresses.c.email_address.like('%@yahoo.com')), ... addresses.select(). ... where(addresses.c.email_address.like('%@msn.com')) ... ).alias().select(), # apply subquery here ... addresses.select(addresses.c.email_address.like('%@msn.com')) ... ) {sql}>>> conn.execute(u).fetchall() # doctest: +NORMALIZE_WHITESPACE SELECT anon_1.id, anon_1.user_id, anon_1.email_address FROM (SELECT addresses.id AS id, addresses.user_id AS user_id, addresses.email_address AS email_address FROM addresses WHERE addresses.email_address LIKE ? UNION SELECT addresses.id AS id, addresses.user_id AS user_id, addresses.email_address AS email_address FROM addresses WHERE addresses.email_address LIKE ?) AS anon_1 EXCEPT SELECT addresses.id, addresses.user_id, addresses.email_address FROM addresses WHERE addresses.email_address LIKE ? ('%@yahoo.com', '%@msn.com', '%@msn.com') {stop}[(1, 1, u'jack@yahoo.com')] .. _scalar_selects: Scalar Selects -------------- A scalar select is a SELECT that returns exactly one row and one column. It can then be used as a column expression. A scalar select is often a :term:`correlated subquery`, which relies upon the enclosing SELECT statement in order to acquire at least one of its FROM clauses. The :func:`.select` construct can be modified to act as a column expression by calling either the :meth:`~.SelectBase.as_scalar` or :meth:`~.SelectBase.label` method: .. sourcecode:: pycon+sql >>> stmt = select([func.count(addresses.c.id)]).\ ... where(users.c.id == addresses.c.user_id).\ ... as_scalar() The above construct is now a :class:`~.expression.ScalarSelect` object, and is no longer part of the :class:`~.expression.FromClause` hierarchy; it instead is within the :class:`~.expression.ColumnElement` family of expression constructs. We can place this construct the same as any other column within another :func:`.select`: .. sourcecode:: pycon+sql >>> conn.execute(select([users.c.name, stmt])).fetchall() # doctest: +NORMALIZE_WHITESPACE {opensql}SELECT users.name, (SELECT count(addresses.id) AS count_1 FROM addresses WHERE users.id = addresses.user_id) AS anon_1 FROM users () {stop}[(u'jack', 2), (u'wendy', 2)] To apply a non-anonymous column name to our scalar select, we create it using :meth:`.SelectBase.label` instead: .. sourcecode:: pycon+sql >>> stmt = select([func.count(addresses.c.id)]).\ ... where(users.c.id == addresses.c.user_id).\ ... label("address_count") >>> conn.execute(select([users.c.name, stmt])).fetchall() # doctest: +NORMALIZE_WHITESPACE {opensql}SELECT users.name, (SELECT count(addresses.id) AS count_1 FROM addresses WHERE users.id = addresses.user_id) AS address_count FROM users () {stop}[(u'jack', 2), (u'wendy', 2)] .. _correlated_subqueries: Correlated Subqueries --------------------- Notice in the examples on :ref:`scalar_selects`, the FROM clause of each embedded select did not contain the ``users`` table in its FROM clause. This is because SQLAlchemy automatically :term:`correlates` embedded FROM objects to that of an enclosing query, if present, and if the inner SELECT statement would still have at least one FROM clause of its own. For example: .. sourcecode:: pycon+sql >>> stmt = select([addresses.c.user_id]).\ ... where(addresses.c.user_id == users.c.id).\ ... where(addresses.c.email_address == 'jack@yahoo.com') >>> enclosing_stmt = select([users.c.name]).where(users.c.id == stmt) >>> conn.execute(enclosing_stmt).fetchall() # doctest: +NORMALIZE_WHITESPACE {opensql}SELECT users.name FROM users WHERE users.id = (SELECT addresses.user_id FROM addresses WHERE addresses.user_id = users.id AND addresses.email_address = ?) ('jack@yahoo.com',) {stop}[(u'jack',)] Auto-correlation will usually do what's expected, however it can also be controlled. For example, if we wanted a statement to correlate only to the ``addresses`` table but not the ``users`` table, even if both were present in the enclosing SELECT, we use the :meth:`~.Select.correlate` method to specify those FROM clauses that may be correlated: .. sourcecode:: pycon+sql >>> stmt = select([users.c.id]).\ ... where(users.c.id == addresses.c.user_id).\ ... where(users.c.name == 'jack').\ ... correlate(addresses) >>> enclosing_stmt = select( ... [users.c.name, addresses.c.email_address]).\ ... select_from(users.join(addresses)).\ ... where(users.c.id == stmt) >>> conn.execute(enclosing_stmt).fetchall() # doctest: +NORMALIZE_WHITESPACE {opensql}SELECT users.name, addresses.email_address FROM users JOIN addresses ON users.id = addresses.user_id WHERE users.id = (SELECT users.id FROM users WHERE users.id = addresses.user_id AND users.name = ?) ('jack',) {stop}[(u'jack', u'jack@yahoo.com'), (u'jack', u'jack@msn.com')] To entirely disable a statement from correlating, we can pass ``None`` as the argument: .. sourcecode:: pycon+sql >>> stmt = select([users.c.id]).\ ... where(users.c.name == 'wendy').\ ... correlate(None) >>> enclosing_stmt = select([users.c.name]).\ ... where(users.c.id == stmt) >>> conn.execute(enclosing_stmt).fetchall() # doctest: +NORMALIZE_WHITESPACE {opensql}SELECT users.name FROM users WHERE users.id = (SELECT users.id FROM users WHERE users.name = ?) ('wendy',) {stop}[(u'wendy',)] We can also control correlation via exclusion, using the :meth:`.Select.correlate_except` method. Such as, we can write our SELECT for the ``users`` table by telling it to correlate all FROM clauses except for ``users``: .. sourcecode:: pycon+sql >>> stmt = select([users.c.id]).\ ... where(users.c.id == addresses.c.user_id).\ ... where(users.c.name == 'jack').\ ... correlate_except(users) >>> enclosing_stmt = select( ... [users.c.name, addresses.c.email_address]).\ ... select_from(users.join(addresses)).\ ... where(users.c.id == stmt) >>> conn.execute(enclosing_stmt).fetchall() # doctest: +NORMALIZE_WHITESPACE {opensql}SELECT users.name, addresses.email_address FROM users JOIN addresses ON users.id = addresses.user_id WHERE users.id = (SELECT users.id FROM users WHERE users.id = addresses.user_id AND users.name = ?) ('jack',) {stop}[(u'jack', u'jack@yahoo.com'), (u'jack', u'jack@msn.com')] Ordering, Grouping, Limiting, Offset...ing... --------------------------------------------- Ordering is done by passing column expressions to the :meth:`~.SelectBase.order_by` method: .. sourcecode:: pycon+sql >>> stmt = select([users.c.name]).order_by(users.c.name) >>> conn.execute(stmt).fetchall() # doctest: +NORMALIZE_WHITESPACE {opensql}SELECT users.name FROM users ORDER BY users.name () {stop}[(u'jack',), (u'wendy',)] Ascending or descending can be controlled using the :meth:`~.ColumnElement.asc` and :meth:`~.ColumnElement.desc` modifiers: .. sourcecode:: pycon+sql >>> stmt = select([users.c.name]).order_by(users.c.name.desc()) >>> conn.execute(stmt).fetchall() # doctest: +NORMALIZE_WHITESPACE {opensql}SELECT users.name FROM users ORDER BY users.name DESC () {stop}[(u'wendy',), (u'jack',)] Grouping refers to the GROUP BY clause, and is usually used in conjunction with aggregate functions to establish groups of rows to be aggregated. This is provided via the :meth:`~.SelectBase.group_by` method: .. sourcecode:: pycon+sql >>> stmt = select([users.c.name, func.count(addresses.c.id)]).\ ... select_from(users.join(addresses)).\ ... group_by(users.c.name) >>> conn.execute(stmt).fetchall() # doctest: +NORMALIZE_WHITESPACE {opensql}SELECT users.name, count(addresses.id) AS count_1 FROM users JOIN addresses ON users.id = addresses.user_id GROUP BY users.name () {stop}[(u'jack', 2), (u'wendy', 2)] HAVING can be used to filter results on an aggregate value, after GROUP BY has been applied. It's available here via the :meth:`~.Select.having` method: .. sourcecode:: pycon+sql >>> stmt = select([users.c.name, func.count(addresses.c.id)]).\ ... select_from(users.join(addresses)).\ ... group_by(users.c.name).\ ... having(func.length(users.c.name) > 4) >>> conn.execute(stmt).fetchall() # doctest: +NORMALIZE_WHITESPACE {opensql}SELECT users.name, count(addresses.id) AS count_1 FROM users JOIN addresses ON users.id = addresses.user_id GROUP BY users.name HAVING length(users.name) > ? (4,) {stop}[(u'wendy', 2)] A common system of dealing with duplicates in composed SELECT statments is the DISTINCT modifier. A simple DISTINCT clause can be added using the :meth:`.Select.distinct` method: .. sourcecode:: pycon+sql >>> stmt = select([users.c.name]).\ ... where(addresses.c.email_address. ... contains(users.c.name)).\ ... distinct() >>> conn.execute(stmt).fetchall() # doctest: +NORMALIZE_WHITESPACE {opensql}SELECT DISTINCT users.name FROM users, addresses WHERE addresses.email_address LIKE '%%' || users.name || '%%' () {stop}[(u'jack',), (u'wendy',)] Most database backends support a system of limiting how many rows are returned, and the majority also feature a means of starting to return rows after a given "offset". While common backends like Postgresql, MySQL and SQLite support LIMIT and OFFSET keywords, other backends need to refer to more esoteric features such as "window functions" and row ids to achieve the same effect. The :meth:`~.Select.limit` and :meth:`~.Select.offset` methods provide an easy abstraction into the current backend's methodology: .. sourcecode:: pycon+sql >>> stmt = select([users.c.name, addresses.c.email_address]).\ ... select_from(users.join(addresses)).\ ... limit(1).offset(1) >>> conn.execute(stmt).fetchall() # doctest: +NORMALIZE_WHITESPACE {opensql}SELECT users.name, addresses.email_address FROM users JOIN addresses ON users.id = addresses.user_id LIMIT ? OFFSET ? (1, 1) {stop}[(u'jack', u'jack@msn.com')] .. _inserts_and_updates: Inserts, Updates and Deletes ============================ We've seen :meth:`~.TableClause.insert` demonstrated earlier in this tutorial. Where :meth:`~.TableClause.insert` prodces INSERT, the :meth:`~.TableClause.update` method produces UPDATE. Both of these constructs feature a method called :meth:`~.ValuesBase.values` which specifies the VALUES or SET clause of the statement. The :meth:`~.ValuesBase.values` method accommodates any column expression as a value: .. sourcecode:: pycon+sql >>> stmt = users.update().\ ... values(fullname="Fullname: " + users.c.name) >>> conn.execute(stmt) #doctest: +ELLIPSIS {opensql}UPDATE users SET fullname=(? || users.name) ('Fullname: ',) COMMIT {stop} When using :meth:`~.TableClause.insert` or :meth:`~.TableClause.update` in an "execute many" context, we may also want to specify named bound parameters which we can refer to in the argument list. The two constructs will automatically generate bound placeholders for any column names passed in the dictionaries sent to :meth:`~.Connection.execute` at execution time. However, if we wish to use explicitly targeted named parameters with composed expressions, we need to use the :func:`~.expression.bindparam` construct. When using :func:`~.expression.bindparam` with :meth:`~.TableClause.insert` or :meth:`~.TableClause.update`, the names of the table's columns themselves are reserved for the "automatic" generation of bind names. We can combine the usage of implicitly available bind names and explicitly named parameters as in the example below: .. sourcecode:: pycon+sql >>> stmt = users.insert().\ ... values(name=bindparam('_name') + " .. name") >>> conn.execute(stmt, [ # doctest: +ELLIPSIS ... {'id':4, '_name':'name1'}, ... {'id':5, '_name':'name2'}, ... {'id':6, '_name':'name3'}, ... ]) {opensql}INSERT INTO users (id, name) VALUES (?, (? || ?)) ((4, 'name1', ' .. name'), (5, 'name2', ' .. name'), (6, 'name3', ' .. name')) COMMIT An UPDATE statement is emitted using the :meth:`~.TableClause.update` construct. This works much like an INSERT, except there is an additional WHERE clause that can be specified: .. sourcecode:: pycon+sql >>> stmt = users.update().\ ... where(users.c.name == 'jack').\ ... values(name='ed') >>> conn.execute(stmt) #doctest: +ELLIPSIS {opensql}UPDATE users SET name=? WHERE users.name = ? ('ed', 'jack') COMMIT {stop} When using :meth:`~.TableClause.update` in an "execute many" context, we may wish to also use explicitly named bound parameters in the WHERE clause. Again, :func:`~.expression.bindparam` is the construct used to achieve this: .. sourcecode:: pycon+sql >>> stmt = users.update().\ ... where(users.c.name == bindparam('oldname')).\ ... values(name=bindparam('newname')) >>> conn.execute(stmt, [ ... {'oldname':'jack', 'newname':'ed'}, ... {'oldname':'wendy', 'newname':'mary'}, ... {'oldname':'jim', 'newname':'jake'}, ... ]) #doctest: +ELLIPSIS {opensql}UPDATE users SET name=? WHERE users.name = ? (('ed', 'jack'), ('mary', 'wendy'), ('jake', 'jim')) COMMIT {stop} Correlated Updates ------------------ A correlated update lets you update a table using selection from another table, or the same table: .. sourcecode:: pycon+sql >>> stmt = select([addresses.c.email_address]).\ ... where(addresses.c.user_id == users.c.id).\ ... limit(1) >>> conn.execute(users.update().values(fullname=stmt)) #doctest: +ELLIPSIS,+NORMALIZE_WHITESPACE {opensql}UPDATE users SET fullname=(SELECT addresses.email_address FROM addresses WHERE addresses.user_id = users.id LIMIT ? OFFSET ?) (1, 0) COMMIT {stop} .. _multi_table_updates: Multiple Table Updates ---------------------- .. versionadded:: 0.7.4 The Postgresql, Microsoft SQL Server, and MySQL backends all support UPDATE statements that refer to multiple tables. For PG and MSSQL, this is the "UPDATE FROM" syntax, which updates one table at a time, but can reference additional tables in an additional "FROM" clause that can then be referenced in the WHERE clause directly. On MySQL, multiple tables can be embedded into a single UPDATE statement separated by a comma. The SQLAlchemy :func:`.update` construct supports both of these modes implicitly, by specifying multiple tables in the WHERE clause:: stmt = users.update().\ values(name='ed wood').\ where(users.c.id == addresses.c.id).\ where(addresses.c.email_address.startswith('ed%')) conn.execute(stmt) The resulting SQL from the above statement would render as:: UPDATE users SET name=:name FROM addresses WHERE users.id = addresses.id AND addresses.email_address LIKE :email_address_1 || '%%' When using MySQL, columns from each table can be assigned to in the SET clause directly, using the dictionary form passed to :meth:`.Update.values`:: stmt = users.update().\ values({ users.c.name:'ed wood', addresses.c.email_address:'ed.wood@foo.com' }).\ where(users.c.id == addresses.c.id).\ where(addresses.c.email_address.startswith('ed%')) The tables are referenced explicitly in the SET clause:: UPDATE users, addresses SET addresses.email_address=%s, users.name=%s WHERE users.id = addresses.id AND addresses.email_address LIKE concat(%s, '%%') SQLAlchemy doesn't do anything special when these constructs are used on a non-supporting database. The ``UPDATE FROM`` syntax generates by default when multiple tables are present, and the statement will be rejected by the database if this syntax is not supported. .. _deletes: Deletes ------- Finally, a delete. This is accomplished easily enough using the :meth:`~.TableClause.delete` construct: .. sourcecode:: pycon+sql >>> conn.execute(addresses.delete()) #doctest: +ELLIPSIS {opensql}DELETE FROM addresses () COMMIT {stop} >>> conn.execute(users.delete().where(users.c.name > 'm')) #doctest: +ELLIPSIS {opensql}DELETE FROM users WHERE users.name > ? ('m',) COMMIT {stop} Matched Row Counts ------------------ Both of :meth:`~.TableClause.update` and :meth:`~.TableClause.delete` are associated with *matched row counts*. This is a number indicating the number of rows that were matched by the WHERE clause. Note that by "matched", this includes rows where no UPDATE actually took place. The value is available as :attr:`~.ResultProxy.rowcount`: .. sourcecode:: pycon+sql >>> result = conn.execute(users.delete()) #doctest: +ELLIPSIS {opensql}DELETE FROM users () COMMIT {stop}>>> result.rowcount 1 Further Reference ================== Expression Language Reference: :ref:`expression_api_toplevel` Database Metadata Reference: :ref:`metadata_toplevel` Engine Reference: :doc:`/core/engines` Connection Reference: :ref:`connections_toplevel` Types Reference: :ref:`types_toplevel` SQLAlchemy-0.8.4/doc/_sources/core/types.txt0000644000076500000240000005657312251147171021476 0ustar classicstaff00000000000000.. _types_toplevel: Column and Data Types ===================== .. module:: sqlalchemy.types SQLAlchemy provides abstractions for most common database data types, and a mechanism for specifying your own custom data types. The methods and attributes of type objects are rarely used directly. Type objects are supplied to :class:`~sqlalchemy.Table` definitions and can be supplied as type hints to `functions` for occasions where the database driver returns an incorrect type. .. code-block:: pycon >>> users = Table('users', metadata, ... Column('id', Integer, primary_key=True) ... Column('login', String(32)) ... ) SQLAlchemy will use the ``Integer`` and ``String(32)`` type information when issuing a ``CREATE TABLE`` statement and will use it again when reading back rows ``SELECTed`` from the database. Functions that accept a type (such as :func:`~sqlalchemy.Column`) will typically accept a type class or instance; ``Integer`` is equivalent to ``Integer()`` with no construction arguments in this case. .. _types_generic: Generic Types ------------- Generic types specify a column that can read, write and store a particular type of Python data. SQLAlchemy will choose the best database column type available on the target database when issuing a ``CREATE TABLE`` statement. For complete control over which column type is emitted in ``CREATE TABLE``, such as ``VARCHAR`` see `SQL Standard Types`_ and the other sections of this chapter. .. autoclass:: BigInteger :members: .. autoclass:: Boolean :members: .. autoclass:: Date :members: .. autoclass:: DateTime :members: .. autoclass:: Enum :members: __init__, create, drop .. autoclass:: Float :members: .. autoclass:: Integer :members: .. autoclass:: Interval :members: .. autoclass:: LargeBinary :members: .. autoclass:: Numeric :members: .. autoclass:: PickleType :members: .. autoclass:: SchemaType :members: :undoc-members: .. autoclass:: SmallInteger :members: .. autoclass:: String :members: .. autoclass:: Text :members: .. autoclass:: Time :members: .. autoclass:: Unicode :members: .. autoclass:: UnicodeText :members: .. _types_sqlstandard: SQL Standard Types ------------------ The SQL standard types always create database column types of the same name when ``CREATE TABLE`` is issued. Some types may not be supported on all databases. .. autoclass:: BIGINT .. autoclass:: BINARY .. autoclass:: BLOB .. autoclass:: BOOLEAN .. autoclass:: CHAR .. autoclass:: CLOB .. autoclass:: DATE .. autoclass:: DATETIME .. autoclass:: DECIMAL .. autoclass:: FLOAT .. autoclass:: INT .. autoclass:: sqlalchemy.types.INTEGER .. autoclass:: NCHAR .. autoclass:: NVARCHAR .. autoclass:: NUMERIC .. autoclass:: REAL .. autoclass:: SMALLINT .. autoclass:: TEXT .. autoclass:: TIME .. autoclass:: TIMESTAMP .. autoclass:: VARBINARY .. autoclass:: VARCHAR .. _types_vendor: Vendor-Specific Types --------------------- Database-specific types are also available for import from each database's dialect module. See the :ref:`dialect_toplevel` reference for the database you're interested in. For example, MySQL has a ``BIGINT`` type and PostgreSQL has an ``INET`` type. To use these, import them from the module explicitly:: from sqlalchemy.dialects import mysql table = Table('foo', metadata, Column('id', mysql.BIGINT), Column('enumerates', mysql.ENUM('a', 'b', 'c')) ) Or some PostgreSQL types:: from sqlalchemy.dialects import postgresql table = Table('foo', metadata, Column('ipaddress', postgresql.INET), Column('elements', postgresql.ARRAY(String)) ) Each dialect provides the full set of typenames supported by that backend within its `__all__` collection, so that a simple `import *` or similar will import all supported types as implemented for that backend:: from sqlalchemy.dialects.postgresql import * t = Table('mytable', metadata, Column('id', INTEGER, primary_key=True), Column('name', VARCHAR(300)), Column('inetaddr', INET) ) Where above, the INTEGER and VARCHAR types are ultimately from sqlalchemy.types, and INET is specific to the Postgresql dialect. Some dialect level types have the same name as the SQL standard type, but also provide additional arguments. For example, MySQL implements the full range of character and string types including additional arguments such as `collation` and `charset`:: from sqlalchemy.dialects.mysql import VARCHAR, TEXT table = Table('foo', meta, Column('col1', VARCHAR(200, collation='binary')), Column('col2', TEXT(charset='latin1')) ) .. _types_custom: Custom Types ------------ A variety of methods exist to redefine the behavior of existing types as well as to provide new ones. Overriding Type Compilation ~~~~~~~~~~~~~~~~~~~~~~~~~~~ A frequent need is to force the "string" version of a type, that is the one rendered in a CREATE TABLE statement or other SQL function like CAST, to be changed. For example, an application may want to force the rendering of ``BINARY`` for all platforms except for one, in which is wants ``BLOB`` to be rendered. Usage of an existing generic type, in this case :class:`.LargeBinary`, is preferred for most use cases. But to control types more accurately, a compilation directive that is per-dialect can be associated with any type:: from sqlalchemy.ext.compiler import compiles from sqlalchemy.types import BINARY @compiles(BINARY, "sqlite") def compile_binary_sqlite(type_, compiler, **kw): return "BLOB" The above code allows the usage of :class:`.types.BINARY`, which will produce the string ``BINARY`` against all backends except SQLite, in which case it will produce ``BLOB``. See the section :ref:`type_compilation_extension`, a subsection of :ref:`sqlalchemy.ext.compiler_toplevel`, for additional examples. .. _types_typedecorator: Augmenting Existing Types ~~~~~~~~~~~~~~~~~~~~~~~~~ The :class:`.TypeDecorator` allows the creation of custom types which add bind-parameter and result-processing behavior to an existing type object. It is used when additional in-Python marshaling of data to and from the database is required. .. note:: The bind- and result-processing of :class:`.TypeDecorator` is *in addition* to the processing already performed by the hosted type, which is customized by SQLAlchemy on a per-DBAPI basis to perform processing specific to that DBAPI. To change the DBAPI-level processing for an existing type, see the section :ref:`replacing_processors`. .. autoclass:: TypeDecorator :members: :inherited-members: TypeDecorator Recipes ~~~~~~~~~~~~~~~~~~~~~ A few key :class:`.TypeDecorator` recipes follow. .. _coerce_to_unicode: Coercing Encoded Strings to Unicode ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ A common source of confusion regarding the :class:`.Unicode` type is that it is intended to deal *only* with Python ``unicode`` objects on the Python side, meaning values passed to it as bind parameters must be of the form ``u'some string'`` if using Python 2 and not 3. The encoding/decoding functions it performs are only to suit what the DBAPI in use requires, and are primarily a private implementation detail. The use case of a type that can safely receive Python bytestrings, that is strings that contain non-ASCII characters and are not ``u''`` objects in Python 2, can be achieved using a :class:`.TypeDecorator` which coerces as needed:: from sqlalchemy.types import TypeDecorator, Unicode class CoerceUTF8(TypeDecorator): """Safely coerce Python bytestrings to Unicode before passing off to the database.""" impl = Unicode def process_bind_param(self, value, dialect): if isinstance(value, str): value = value.decode('utf-8') return value Rounding Numerics ^^^^^^^^^^^^^^^^^ Some database connectors like those of SQL Server choke if a Decimal is passed with too many decimal places. Here's a recipe that rounds them down:: from sqlalchemy.types import TypeDecorator, Numeric from decimal import Decimal class SafeNumeric(TypeDecorator): """Adds quantization to Numeric.""" impl = Numeric def __init__(self, *arg, **kw): TypeDecorator.__init__(self, *arg, **kw) self.quantize_int = -(self.impl.precision - self.impl.scale) self.quantize = Decimal(10) ** self.quantize_int def process_bind_param(self, value, dialect): if isinstance(value, Decimal) and \ value.as_tuple()[2] < self.quantize_int: value = value.quantize(self.quantize) return value .. _custom_guid_type: Backend-agnostic GUID Type ^^^^^^^^^^^^^^^^^^^^^^^^^^ Receives and returns Python uuid() objects. Uses the PG UUID type when using Postgresql, CHAR(32) on other backends, storing them in stringified hex format. Can be modified to store binary in CHAR(16) if desired:: from sqlalchemy.types import TypeDecorator, CHAR from sqlalchemy.dialects.postgresql import UUID import uuid class GUID(TypeDecorator): """Platform-independent GUID type. Uses Postgresql's UUID type, otherwise uses CHAR(32), storing as stringified hex values. """ impl = CHAR def load_dialect_impl(self, dialect): if dialect.name == 'postgresql': return dialect.type_descriptor(UUID()) else: return dialect.type_descriptor(CHAR(32)) def process_bind_param(self, value, dialect): if value is None: return value elif dialect.name == 'postgresql': return str(value) else: if not isinstance(value, uuid.UUID): return "%.32x" % uuid.UUID(value) else: # hexstring return "%.32x" % value def process_result_value(self, value, dialect): if value is None: return value else: return uuid.UUID(value) Marshal JSON Strings ^^^^^^^^^^^^^^^^^^^^^ This type uses ``simplejson`` to marshal Python data structures to/from JSON. Can be modified to use Python's builtin json encoder:: from sqlalchemy.types import TypeDecorator, VARCHAR import json class JSONEncodedDict(TypeDecorator): """Represents an immutable structure as a json-encoded string. Usage:: JSONEncodedDict(255) """ impl = VARCHAR def process_bind_param(self, value, dialect): if value is not None: value = json.dumps(value) return value def process_result_value(self, value, dialect): if value is not None: value = json.loads(value) return value Note that the ORM by default will not detect "mutability" on such a type - meaning, in-place changes to values will not be detected and will not be flushed. Without further steps, you instead would need to replace the existing value with a new one on each parent object to detect changes. Note that there's nothing wrong with this, as many applications may not require that the values are ever mutated once created. For those which do have this requirment, support for mutability is best applied using the ``sqlalchemy.ext.mutable`` extension - see the example in :ref:`mutable_toplevel`. .. _replacing_processors: Replacing the Bind/Result Processing of Existing Types ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ Most augmentation of type behavior at the bind/result level is achieved using :class:`.TypeDecorator`. For the rare scenario where the specific processing applied by SQLAlchemy at the DBAPI level needs to be replaced, the SQLAlchemy type can be subclassed directly, and the ``bind_processor()`` or ``result_processor()`` methods can be overridden. Doing so requires that the ``adapt()`` method also be overridden. This method is the mechanism by which SQLAlchemy produces DBAPI-specific type behavior during statement execution. Overriding it allows a copy of the custom type to be used in lieu of a DBAPI-specific type. Below we subclass the :class:`.types.TIME` type to have custom result processing behavior. The ``process()`` function will receive ``value`` from the DBAPI cursor directly:: class MySpecialTime(TIME): def __init__(self, special_argument): super(MySpecialTime, self).__init__() self.special_argument = special_argument def result_processor(self, dialect, coltype): import datetime time = datetime.time def process(value): if value is not None: microseconds = value.microseconds seconds = value.seconds minutes = seconds / 60 return time( minutes / 60, minutes % 60, seconds - minutes * 60, microseconds) else: return None return process def adapt(self, impltype): return MySpecialTime(self.special_argument) .. _types_sql_value_processing: Applying SQL-level Bind/Result Processing ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ As seen in the sections :ref:`types_typedecorator` and :ref:`replacing_processors`, SQLAlchemy allows Python functions to be invoked both when parameters are sent to a statement, as well as when result rows are loaded from the database, to apply transformations to the values as they are sent to or from the database. It is also possible to define SQL-level transformations as well. The rationale here is when only the relational database contains a particular series of functions that are necessary to coerce incoming and outgoing data between an application and persistence format. Examples include using database-defined encryption/decryption functions, as well as stored procedures that handle geographic data. The Postgis extension to Postgresql includes an extensive array of SQL functions that are necessary for coercing data into particular formats. Any :class:`.TypeEngine`, :class:`.UserDefinedType` or :class:`.TypeDecorator` subclass can include implementations of :meth:`.TypeEngine.bind_expression` and/or :meth:`.TypeEngine.column_expression`, which when defined to return a non-``None`` value should return a :class:`.ColumnElement` expression to be injected into the SQL statement, either surrounding bound parameters or a column expression. For example, to build a ``Geometry`` type which will apply the Postgis function ``ST_GeomFromText`` to all outgoing values and the function ``ST_AsText`` to all incoming data, we can create our own subclass of :class:`.UserDefinedType` which provides these methods in conjunction with :data:`~.sqlalchemy.sql.expression.func`:: from sqlalchemy import func from sqlalchemy.types import UserDefinedType class Geometry(UserDefinedType): def get_col_spec(self): return "GEOMETRY" def bind_expression(self, bindvalue): return func.ST_GeomFromText(bindvalue, type_=self) def column_expression(self, col): return func.ST_AsText(col, type_=self) We can apply the ``Geometry`` type into :class:`.Table` metadata and use it in a :func:`.select` construct:: geometry = Table('geometry', metadata, Column('geom_id', Integer, primary_key=True), Column('geom_data', Geometry) ) print select([geometry]).where( geometry.c.geom_data == 'LINESTRING(189412 252431,189631 259122)') The resulting SQL embeds both functions as appropriate. ``ST_AsText`` is applied to the columns clause so that the return value is run through the function before passing into a result set, and ``ST_GeomFromText`` is run on the bound parameter so that the passed-in value is converted:: SELECT geometry.geom_id, ST_AsText(geometry.geom_data) AS geom_data_1 FROM geometry WHERE geometry.geom_data = ST_GeomFromText(:geom_data_2) The :meth:`.TypeEngine.column_expression` method interacts with the mechanics of the compiler such that the SQL expression does not interfere with the labeling of the wrapped expression. Such as, if we rendered a :func:`.select` against a :func:`.label` of our expression, the string label is moved to the outside of the wrapped expression:: print select([geometry.c.geom_data.label('my_data')]) Output:: SELECT ST_AsText(geometry.geom_data) AS my_data FROM geometry For an example of subclassing a built in type directly, we subclass :class:`.postgresql.BYTEA` to provide a ``PGPString``, which will make use of the Postgresql ``pgcrypto`` extension to encrpyt/decrypt values transparently:: from sqlalchemy import create_engine, String, select, func, \ MetaData, Table, Column, type_coerce from sqlalchemy.dialects.postgresql import BYTEA class PGPString(BYTEA): def __init__(self, passphrase, length=None): super(PGPString, self).__init__(length) self.passphrase = passphrase def bind_expression(self, bindvalue): # convert the bind's type from PGPString to # String, so that it's passed to psycopg2 as is without # a dbapi.Binary wrapper bindvalue = type_coerce(bindvalue, String) return func.pgp_sym_encrypt(bindvalue, self.passphrase) def column_expression(self, col): return func.pgp_sym_decrypt(col, self.passphrase) metadata = MetaData() message = Table('message', metadata, Column('username', String(50)), Column('message', PGPString("this is my passphrase", length=1000)), ) engine = create_engine("postgresql://scott:tiger@localhost/test", echo=True) with engine.begin() as conn: metadata.create_all(conn) conn.execute(message.insert(), username="some user", message="this is my message") print conn.scalar( select([message.c.message]).\ where(message.c.username == "some user") ) The ``pgp_sym_encrypt`` and ``pgp_sym_decrypt`` functions are applied to the INSERT and SELECT statements:: INSERT INTO message (username, message) VALUES (%(username)s, pgp_sym_encrypt(%(message)s, %(pgp_sym_encrypt_1)s)) {'username': 'some user', 'message': 'this is my message', 'pgp_sym_encrypt_1': 'this is my passphrase'} SELECT pgp_sym_decrypt(message.message, %(pgp_sym_decrypt_1)s) AS message_1 FROM message WHERE message.username = %(username_1)s {'pgp_sym_decrypt_1': 'this is my passphrase', 'username_1': 'some user'} .. versionadded:: 0.8 Added the :meth:`.TypeEngine.bind_expression` and :meth:`.TypeEngine.column_expression` methods. See also: :ref:`examples_postgis` .. _types_operators: Redefining and Creating New Operators ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ SQLAlchemy Core defines a fixed set of expression operators available to all column expressions. Some of these operations have the effect of overloading Python's built in operators; examples of such operators include :meth:`.ColumnOperators.__eq__` (``table.c.somecolumn == 'foo'``), :meth:`.ColumnOperators.__invert__` (``~table.c.flag``), and :meth:`.ColumnOperators.__add__` (``table.c.x + table.c.y``). Other operators are exposed as explicit methods on column expressions, such as :meth:`.ColumnOperators.in_` (``table.c.value.in_(['x', 'y'])``) and :meth:`.ColumnOperators.like` (``table.c.value.like('%ed%')``). The Core expression constructs in all cases consult the type of the expression in order to determine the behavior of existing operators, as well as to locate additional operators that aren't part of the built in set. The :class:`.TypeEngine` base class defines a root "comparison" implementation :class:`.TypeEngine.Comparator`, and many specific types provide their own sub-implementations of this class. User-defined :class:`.TypeEngine.Comparator` implementations can be built directly into a simple subclass of a particular type in order to override or define new operations. Below, we create a :class:`.Integer` subclass which overrides the :meth:`.ColumnOperators.__add__` operator:: from sqlalchemy import Integer class MyInt(Integer): class comparator_factory(Integer.Comparator): def __add__(self, other): return self.op("goofy")(other) The above configuration creates a new class ``MyInt``, which establishes the :attr:`.TypeEngine.comparator_factory` attribute as referring to a new class, subclassing the :class:`.TypeEngine.Comparator` class associated with the :class:`.Integer` type. Usage:: >>> sometable = Table("sometable", metadata, Column("data", MyInt)) >>> print sometable.c.data + 5 sometable.data goofy :data_1 The implementation for :meth:`.ColumnOperators.__add__` is consulted by an owning SQL expression, by instantiating the :class:`.TypeEngine.Comparator` with itself as the ``expr`` attribute. The mechanics of the expression system are such that operations continue recursively until an expression object produces a new SQL expression construct. Above, we could just as well have said ``self.expr.op("goofy")(other)`` instead of ``self.op("goofy")(other)``. New methods added to a :class:`.TypeEngine.Comparator` are exposed on an owning SQL expression using a ``__getattr__`` scheme, which exposes methods added to :class:`.TypeEngine.Comparator` onto the owning :class:`.ColumnElement`. For example, to add a ``log()`` function to integers:: from sqlalchemy import Integer, func class MyInt(Integer): class comparator_factory(Integer.Comparator): def log(self, other): return func.log(self.expr, other) Using the above type:: >>> print sometable.c.data.log(5) log(:log_1, :log_2) Unary operations are also possible. For example, to add an implementation of the Postgresql factorial operator, we combine the :class:`.UnaryExpression` construct along with a :class:`.custom_op` to produce the factorial expression:: from sqlalchemy import Integer from sqlalchemy.sql.expression import UnaryExpression from sqlalchemy.sql import operators class MyInteger(Integer): class comparator_factory(Integer.Comparator): def factorial(self): return UnaryExpression(self.expr, modifier=operators.custom_op("!"), type_=MyInteger) Using the above type:: >>> from sqlalchemy.sql import column >>> print column('x', MyInteger).factorial() x ! See also: :attr:`.TypeEngine.comparator_factory` .. versionadded:: 0.8 The expression system was enhanced to support customization of operators on a per-type level. Creating New Types ~~~~~~~~~~~~~~~~~~ The :class:`.UserDefinedType` class is provided as a simple base class for defining entirely new database types. Use this to represent native database types not known by SQLAlchemy. If only Python translation behavior is needed, use :class:`.TypeDecorator` instead. .. autoclass:: UserDefinedType :members: .. _types_api: Base Type API -------------- .. autoclass:: AbstractType :members: .. autoclass:: TypeEngine :members: .. autoclass:: Concatenable :members: :inherited-members: .. autoclass:: NullType .. autoclass:: Variant :members: with_variant, __init__ SQLAlchemy-0.8.4/doc/_sources/dialects/0000755000076500000240000000000012251151573020412 5ustar classicstaff00000000000000SQLAlchemy-0.8.4/doc/_sources/dialects/drizzle.txt0000644000076500000240000000250712251147171022641 0ustar classicstaff00000000000000.. _drizzle_toplevel: Drizzle ======= .. automodule:: sqlalchemy.dialects.drizzle.base Drizzle Data Types ------------------ As with all SQLAlchemy dialects, all UPPERCASE types that are known to be valid with Drizzle are importable from the top level dialect:: from sqlalchemy.dialects.drizzle import \ BIGINT, BINARY, BLOB, BOOLEAN, CHAR, DATE, DATETIME, DECIMAL, DOUBLE, ENUM, FLOAT, INT, INTEGER, NUMERIC, TEXT, TIME, TIMESTAMP, VARBINARY, VARCHAR Types which are specific to Drizzle, or have Drizzle-specific construction arguments, are as follows: .. currentmodule:: sqlalchemy.dialects.drizzle .. autoclass:: BIGINT :members: __init__ .. autoclass:: CHAR :members: __init__ .. autoclass:: DECIMAL :members: __init__ .. autoclass:: DOUBLE :members: __init__ .. autoclass:: ENUM :members: __init__ .. autoclass:: FLOAT :members: __init__ .. autoclass:: INTEGER :members: __init__ .. autoclass:: NUMERIC :members: __init__ .. autoclass:: REAL :members: __init__ .. autoclass:: TEXT :members: __init__ .. autoclass:: TIMESTAMP :members: __init__ .. autoclass:: VARCHAR :members: __init__ MySQL-Python ------------ .. automodule:: sqlalchemy.dialects.drizzle.mysqldb SQLAlchemy-0.8.4/doc/_sources/dialects/firebird.txt0000644000076500000240000000035312251147171022741 0ustar classicstaff00000000000000.. _firebird_toplevel: Firebird ======== .. automodule:: sqlalchemy.dialects.firebird.base kinterbasdb ----------- .. automodule:: sqlalchemy.dialects.firebird.kinterbasdb fdb --- .. automodule:: sqlalchemy.dialects.firebird.fdb SQLAlchemy-0.8.4/doc/_sources/dialects/index.txt0000644000076500000240000000417512251147171022270 0ustar classicstaff00000000000000.. _dialect_toplevel: Dialects ======== The **dialect** is the system SQLAlchemy uses to communicate with various types of :term:`DBAPI` implementations and databases. The sections that follow contain reference documentation and notes specific to the usage of each backend, as well as notes for the various DBAPIs. All dialects require that an appropriate DBAPI driver is installed. Included Dialects ----------------- .. toctree:: :maxdepth: 1 :glob: drizzle firebird informix mssql mysql oracle postgresql sqlite sybase .. _external_toplevel: External Dialects ----------------- .. versionchanged:: 0.8 As of SQLAlchemy 0.8, several dialects have been moved to external projects, and dialects for new databases will also be published as external projects. The rationale here is to keep the base SQLAlchemy install and test suite from growing inordinately large. The "classic" dialects such as SQLite, MySQL, Postgresql, Oracle, SQL Server, and Firebird will remain in the Core for the time being. Current external dialect projects for SQLAlchemy include: Production Ready ^^^^^^^^^^^^^^^^ * `ibm_db_sa `_ - driver for IBM DB2, developed jointly by IBM and SQLAlchemy developers. * `sqlalchemy-sqlany `_ - driver for SAP Sybase SQL Anywhere, developed by SAP. * `sqlalchemy-monetdb `_ - driver for MonetDB. Experimental / Incomplete ^^^^^^^^^^^^^^^^^^^^^^^^^^ * `sqlalchemy-access `_ - driver for Microsoft Access. * `CALCHIPAN `_ - Adapts `Pandas `_ dataframes to SQLAlchemy. * `sqlalchemy-akiban `_ - driver and ORM extensions for the `Akiban `_ database. * `sqlalchemy-cubrid `_ - driver for the CUBRID database. * `sqlalchemy-maxdb `_ - driver for the MaxDB database SQLAlchemy-0.8.4/doc/_sources/dialects/informix.txt0000644000076500000240000000025412251147171023006 0ustar classicstaff00000000000000.. _informix_toplevel: Informix ======== .. automodule:: sqlalchemy.dialects.informix.base informixdb ---------- .. automodule:: sqlalchemy.dialects.informix.informixdbSQLAlchemy-0.8.4/doc/_sources/dialects/mssql.txt0000644000076500000240000000415712251147171022320 0ustar classicstaff00000000000000.. _mssql_toplevel: Microsoft SQL Server ==================== .. automodule:: sqlalchemy.dialects.mssql.base SQL Server Data Types ----------------------- As with all SQLAlchemy dialects, all UPPERCASE types that are known to be valid with SQL server are importable from the top level dialect, whether they originate from :mod:`sqlalchemy.types` or from the local dialect:: from sqlalchemy.dialects.mssql import \ BIGINT, BINARY, BIT, CHAR, DATE, DATETIME, DATETIME2, \ DATETIMEOFFSET, DECIMAL, FLOAT, IMAGE, INTEGER, MONEY, \ NCHAR, NTEXT, NUMERIC, NVARCHAR, REAL, SMALLDATETIME, \ SMALLINT, SMALLMONEY, SQL_VARIANT, TEXT, TIME, \ TIMESTAMP, TINYINT, UNIQUEIDENTIFIER, VARBINARY, VARCHAR Types which are specific to SQL Server, or have SQL Server-specific construction arguments, are as follows: .. currentmodule:: sqlalchemy.dialects.mssql .. autoclass:: BIT :members: __init__ .. autoclass:: CHAR :members: __init__ .. autoclass:: DATETIME2 :members: __init__ .. autoclass:: DATETIMEOFFSET :members: __init__ .. autoclass:: IMAGE :members: __init__ .. autoclass:: MONEY :members: __init__ .. autoclass:: NCHAR :members: __init__ .. autoclass:: NTEXT :members: __init__ .. autoclass:: NVARCHAR :members: __init__ .. autoclass:: REAL :members: __init__ .. autoclass:: SMALLDATETIME :members: __init__ .. autoclass:: SMALLMONEY :members: __init__ .. autoclass:: SQL_VARIANT :members: __init__ .. autoclass:: TEXT :members: __init__ .. autoclass:: TIME :members: __init__ .. autoclass:: TINYINT :members: __init__ .. autoclass:: UNIQUEIDENTIFIER :members: __init__ .. autoclass:: VARCHAR :members: __init__ PyODBC ------ .. automodule:: sqlalchemy.dialects.mssql.pyodbc mxODBC ------ .. automodule:: sqlalchemy.dialects.mssql.mxodbc pymssql ------- .. automodule:: sqlalchemy.dialects.mssql.pymssql zxjdbc -------------- .. automodule:: sqlalchemy.dialects.mssql.zxjdbc AdoDBAPI -------- .. automodule:: sqlalchemy.dialects.mssql.adodbapi SQLAlchemy-0.8.4/doc/_sources/dialects/mysql.txt0000644000076500000240000000613312251147171022322 0ustar classicstaff00000000000000.. _mysql_toplevel: MySQL ===== .. automodule:: sqlalchemy.dialects.mysql.base MySQL Data Types ------------------ As with all SQLAlchemy dialects, all UPPERCASE types that are known to be valid with MySQL are importable from the top level dialect:: from sqlalchemy.dialects.mysql import \ BIGINT, BINARY, BIT, BLOB, BOOLEAN, CHAR, DATE, \ DATETIME, DECIMAL, DECIMAL, DOUBLE, ENUM, FLOAT, INTEGER, \ LONGBLOB, LONGTEXT, MEDIUMBLOB, MEDIUMINT, MEDIUMTEXT, NCHAR, \ NUMERIC, NVARCHAR, REAL, SET, SMALLINT, TEXT, TIME, TIMESTAMP, \ TINYBLOB, TINYINT, TINYTEXT, VARBINARY, VARCHAR, YEAR Types which are specific to MySQL, or have MySQL-specific construction arguments, are as follows: .. currentmodule:: sqlalchemy.dialects.mysql .. autoclass:: BIGINT :members: __init__ .. autoclass:: BINARY :members: __init__ .. autoclass:: BIT :members: __init__ .. autoclass:: BLOB :members: __init__ .. autoclass:: BOOLEAN :members: __init__ .. autoclass:: CHAR :members: __init__ .. autoclass:: DATE :members: __init__ .. autoclass:: DATETIME :members: __init__ .. autoclass:: DECIMAL :members: __init__ .. autoclass:: DOUBLE :members: __init__ .. autoclass:: ENUM :members: __init__ .. autoclass:: FLOAT :members: __init__ .. autoclass:: INTEGER :members: __init__ .. autoclass:: LONGBLOB :members: __init__ .. autoclass:: LONGTEXT :members: __init__ .. autoclass:: MEDIUMBLOB :members: __init__ .. autoclass:: MEDIUMINT :members: __init__ .. autoclass:: MEDIUMTEXT :members: __init__ .. autoclass:: NCHAR :members: __init__ .. autoclass:: NUMERIC :members: __init__ .. autoclass:: NVARCHAR :members: __init__ .. autoclass:: REAL :members: __init__ .. autoclass:: SET :members: __init__ .. autoclass:: SMALLINT :members: __init__ .. autoclass:: TEXT :members: __init__ .. autoclass:: TIME :members: __init__ .. autoclass:: TIMESTAMP :members: __init__ .. autoclass:: TINYBLOB :members: __init__ .. autoclass:: TINYINT :members: __init__ .. autoclass:: TINYTEXT :members: __init__ .. autoclass:: VARBINARY :members: __init__ .. autoclass:: VARCHAR :members: __init__ .. autoclass:: YEAR :members: __init__ MySQL-Python -------------------- .. automodule:: sqlalchemy.dialects.mysql.mysqldb OurSQL -------------- .. automodule:: sqlalchemy.dialects.mysql.oursql pymysql ------------- .. automodule:: sqlalchemy.dialects.mysql.pymysql MySQL-Connector ---------------------- .. automodule:: sqlalchemy.dialects.mysql.mysqlconnector cymysql ------------ .. automodule:: sqlalchemy.dialects.mysql.cymysql Google App Engine ----------------------- .. automodule:: sqlalchemy.dialects.mysql.gaerdbms pyodbc ------ .. automodule:: sqlalchemy.dialects.mysql.pyodbc zxjdbc -------------- .. automodule:: sqlalchemy.dialects.mysql.zxjdbc SQLAlchemy-0.8.4/doc/_sources/dialects/oracle.txt0000644000076500000240000000233412251147171022421 0ustar classicstaff00000000000000.. _oracle_toplevel: Oracle ====== .. automodule:: sqlalchemy.dialects.oracle.base Oracle Data Types ------------------- As with all SQLAlchemy dialects, all UPPERCASE types that are known to be valid with Oracle are importable from the top level dialect, whether they originate from :mod:`sqlalchemy.types` or from the local dialect:: from sqlalchemy.dialects.oracle import \ BFILE, BLOB, CHAR, CLOB, DATE, DATETIME, \ DOUBLE_PRECISION, FLOAT, INTERVAL, LONG, NCLOB, \ NUMBER, NVARCHAR, NVARCHAR2, RAW, TIMESTAMP, VARCHAR, \ VARCHAR2 Types which are specific to Oracle, or have Oracle-specific construction arguments, are as follows: .. currentmodule:: sqlalchemy.dialects.oracle .. autoclass:: BFILE :members: __init__ .. autoclass:: DOUBLE_PRECISION :members: __init__ .. autoclass:: INTERVAL :members: __init__ .. autoclass:: NCLOB :members: __init__ .. autoclass:: NUMBER :members: __init__ .. autoclass:: LONG :members: __init__ .. autoclass:: RAW :members: __init__ cx_Oracle ---------- .. automodule:: sqlalchemy.dialects.oracle.cx_oracle zxjdbc ------- .. automodule:: sqlalchemy.dialects.oracle.zxjdbc SQLAlchemy-0.8.4/doc/_sources/dialects/postgresql.txt0000644000076500000240000000602412251147171023357 0ustar classicstaff00000000000000.. _postgresql_toplevel: PostgreSQL ========== .. automodule:: sqlalchemy.dialects.postgresql.base PostgreSQL Data Types ------------------------ As with all SQLAlchemy dialects, all UPPERCASE types that are known to be valid with Postgresql are importable from the top level dialect, whether they originate from :mod:`sqlalchemy.types` or from the local dialect:: from sqlalchemy.dialects.postgresql import \ ARRAY, BIGINT, BIT, BOOLEAN, BYTEA, CHAR, CIDR, DATE, \ DOUBLE_PRECISION, ENUM, FLOAT, HSTORE, INET, INTEGER, \ INTERVAL, MACADDR, NUMERIC, REAL, SMALLINT, TEXT, TIME, \ TIMESTAMP, UUID, VARCHAR, INT4RANGE, INT8RANGE, NUMRANGE, \ DATERANGE, TSRANGE, TSTZRANGE Types which are specific to PostgreSQL, or have PostgreSQL-specific construction arguments, are as follows: .. currentmodule:: sqlalchemy.dialects.postgresql .. autoclass:: array .. autoclass:: ARRAY :members: __init__, Comparator .. autoclass:: Any .. autoclass:: All .. autoclass:: BIT :members: __init__ .. autoclass:: BYTEA :members: __init__ .. autoclass:: CIDR :members: __init__ .. autoclass:: DOUBLE_PRECISION :members: __init__ .. autoclass:: ENUM :members: __init__, create, drop .. autoclass:: HSTORE :members: .. autoclass:: hstore :members: .. autoclass:: INET :members: __init__ .. autoclass:: INTERVAL :members: __init__ .. autoclass:: MACADDR :members: __init__ .. autoclass:: REAL :members: __init__ .. autoclass:: UUID :members: __init__ Range Types ~~~~~~~~~~~ The new range column types founds in PostgreSQL 9.2 onwards are catered for by the following types: .. autoclass:: INT4RANGE .. autoclass:: INT8RANGE .. autoclass:: NUMRANGE .. autoclass:: DATERANGE .. autoclass:: TSRANGE .. autoclass:: TSTZRANGE The types above get most of their functionality from the following mixin: .. autoclass:: sqlalchemy.dialects.postgresql.ranges.RangeOperators :members: .. warning:: The range type DDL support should work with any Postgres DBAPI driver, however the data types returned may vary. If you are using ``psycopg2``, it's recommended to upgrade to version 2.5 or later before using these column types. PostgreSQL Constraint Types --------------------------- SQLAlchemy supports Postgresql EXCLUDE constraints via the :class:`ExcludeConstraint` class: .. autoclass:: ExcludeConstraint :members: __init__ For example:: from sqlalchemy.dialects.postgresql import ExcludeConstraint, TSRANGE class RoomBookings(Base): room = Column(Integer(), primary_key=True) during = Column(TSRANGE()) __table_args__ = ( ExcludeConstraint(('room', '='), ('during', '&&')), ) psycopg2 -------------- .. automodule:: sqlalchemy.dialects.postgresql.psycopg2 py-postgresql -------------------- .. automodule:: sqlalchemy.dialects.postgresql.pypostgresql pg8000 -------------- .. automodule:: sqlalchemy.dialects.postgresql.pg8000 zxjdbc -------------- .. automodule:: sqlalchemy.dialects.postgresql.zxjdbc SQLAlchemy-0.8.4/doc/_sources/dialects/sqlite.txt0000644000076500000240000000133512251147171022455 0ustar classicstaff00000000000000.. _sqlite_toplevel: SQLite ====== .. automodule:: sqlalchemy.dialects.sqlite.base SQLite Data Types ------------------------ As with all SQLAlchemy dialects, all UPPERCASE types that are known to be valid with SQLite are importable from the top level dialect, whether they originate from :mod:`sqlalchemy.types` or from the local dialect:: from sqlalchemy.dialects.sqlite import \ BLOB, BOOLEAN, CHAR, DATE, DATETIME, DECIMAL, FLOAT, \ INTEGER, NUMERIC, SMALLINT, TEXT, TIME, TIMESTAMP, \ VARCHAR .. module:: sqlalchemy.dialects.sqlite .. autoclass:: DATETIME .. autoclass:: DATE .. autoclass:: TIME Pysqlite -------- .. automodule:: sqlalchemy.dialects.sqlite.pysqliteSQLAlchemy-0.8.4/doc/_sources/dialects/sybase.txt0000644000076500000240000000047612251147171022447 0ustar classicstaff00000000000000.. _sybase_toplevel: Sybase ====== .. automodule:: sqlalchemy.dialects.sybase.base python-sybase ------------------- .. automodule:: sqlalchemy.dialects.sybase.pysybase pyodbc ------------ .. automodule:: sqlalchemy.dialects.sybase.pyodbc mxodbc ------------ .. automodule:: sqlalchemy.dialects.sybase.mxodbc SQLAlchemy-0.8.4/doc/_sources/faq.txt0000644000076500000240000010641712251147171020142 0ustar classicstaff00000000000000:orphan: .. _faq_toplevel: ============================ Frequently Asked Questions ============================ .. contents:: :local: :class: faq :backlinks: none Connections / Engines ===================== How do I configure logging? --------------------------- See :ref:`dbengine_logging`. How do I pool database connections? Are my connections pooled? ---------------------------------------------------------------- SQLAlchemy performs application-level connection pooling automatically in most cases. With the exception of SQLite, a :class:`.Engine` object refers to a :class:`.QueuePool` as a source of connectivity. For more detail, see :ref:`engines_toplevel` and :ref:`pooling_toplevel`. How do I pass custom connect arguments to my database API? ----------------------------------------------------------- The :func:`.create_engine` call accepts additional arguments either directly via the ``connect_args`` keyword argument:: e = create_engine("mysql://scott:tiger@localhost/test", connect_args={"encoding": "utf8"}) Or for basic string and integer arguments, they can usually be specified in the query string of the URL:: e = create_engine("mysql://scott:tiger@localhost/test?encoding=utf8") .. seealso:: :ref:`custom_dbapi_args` "MySQL Server has gone away" ---------------------------- There are two major causes for this error: 1. The MySQL client closes connections which have been idle for a set period of time, defaulting to eight hours. This can be avoided by using the ``pool_recycle`` setting with :func:`.create_engine`, described at :ref:`mysql_connection_timeouts`. 2. Usage of the MySQLdb :term:`DBAPI`, or a similar DBAPI, in a non-threadsafe manner, or in an otherwise inappropriate way. The MySQLdb connection object is not threadsafe - this expands out to any SQLAlchemy system that links to a single connection, which includes the ORM :class:`.Session`. For background on how :class:`.Session` should be used in a multithreaded environment, see :ref:`session_faq_threadsafe`. Why does SQLAlchemy issue so many ROLLBACKs? --------------------------------------------- SQLAlchemy currently assumes DBAPI connections are in "non-autocommit" mode - this is the default behavior of the Python database API, meaning it must be assumed that a transaction is always in progress. The connection pool issues ``connection.rollback()`` when a connection is returned. This is so that any transactional resources remaining on the connection are released. On a database like Postgresql or MSSQL where table resources are aggressively locked, this is critical so that rows and tables don't remain locked within connections that are no longer in use. An application can otherwise hang. It's not just for locks, however, and is equally critical on any database that has any kind of transaction isolation, including MySQL with InnoDB. Any connection that is still inside an old transaction will return stale data, if that data was already queried on that connection within isolation. For background on why you might see stale data even on MySQL, see http://dev.mysql.com/doc/refman/5.1/en/innodb-transaction-model.html I'm on MyISAM - how do I turn it off? ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ The behavior of the connection pool's connection return behavior can be configured using ``reset_on_return``:: from sqlalchemy import create_engine from sqlalchemy.pool import QueuePool engine = create_engine('mysql://scott:tiger@localhost/myisam_database', pool=QueuePool(reset_on_return=False)) I'm on SQL Server - how do I turn those ROLLBACKs into COMMITs? ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ ``reset_on_return`` accepts the values ``commit``, ``rollback`` in addition to ``True``, ``False``, and ``None``. Setting to ``commit`` will cause a COMMIT as any connection is returned to the pool:: engine = create_engine('mssql://scott:tiger@mydsn', pool=QueuePool(reset_on_return='commit')) I am using multiple connections with a SQLite database (typically to test transaction operation), and my test program is not working! ---------------------------------------------------------------------------------------------------------------------------------------------------------- If using a SQLite ``:memory:`` database, or a version of SQLAlchemy prior to version 0.7, the default connection pool is the :class:`.SingletonThreadPool`, which maintains exactly one SQLite connection per thread. So two connections in use in the same thread will actually be the same SQLite connection. Make sure you're not using a :memory: database and use :class:`.NullPool`, which is the default for non-memory databases in current SQLAlchemy versions. .. seealso:: :ref:`pysqlite_threading_pooling` - info on PySQLite's behavior. How do I get at the raw DBAPI connection when using an Engine? -------------------------------------------------------------- With a regular SA engine-level Connection, you can get at a pool-proxied version of the DBAPI connection via the :attr:`.Connection.connection` attribute on :class:`.Connection`, and for the really-real DBAPI connection you can call the :attr:`.ConnectionFairy.connection` attribute on that - but there should never be any need to access the non-pool-proxied DBAPI connection, as all methods are proxied through:: engine = create_engine(...) conn = engine.connect() conn.connection. cursor = conn.connection.cursor() You must ensure that you revert any isolation level settings or other operation-specific settings on the connection back to normal before returning it to the pool. As an alternative to reverting settings, you can call the :meth:`.Connection.detach` method on either :class:`.Connection` or the proxied connection, which will de-associate the connection from the pool such that it will be closed and discarded when :meth:`.Connection.close` is called:: conn = engine.connect() conn.detach() # detaches the DBAPI connection from the connection pool conn.connection. conn.close() # connection is closed for real, the pool replaces it with a new connection MetaData / Schema ================== My program is hanging when I say ``table.drop()`` / ``metadata.drop_all()`` ---------------------------------------------------------------------------- This usually corresponds to two conditions: 1. using PostgreSQL, which is really strict about table locks, and 2. you have a connection still open which contains locks on the table and is distinct from the connection being used for the DROP statement. Heres the most minimal version of the pattern:: connection = engine.connect() result = connection.execute(mytable.select()) mytable.drop(engine) Above, a connection pool connection is still checked out; furthermore, the result object above also maintains a link to this connection. If "implicit execution" is used, the result will hold this connection opened until the result object is closed or all rows are exhausted. The call to ``mytable.drop(engine)`` attempts to emit DROP TABLE on a second connection procured from the :class:`.Engine` which will lock. The solution is to close out all connections before emitting DROP TABLE:: connection = engine.connect() result = connection.execute(mytable.select()) # fully read result sets result.fetchall() # close connections connection.close() # now locks are removed mytable.drop(engine) Does SQLAlchemy support ALTER TABLE, CREATE VIEW, CREATE TRIGGER, Schema Upgrade Functionality? ----------------------------------------------------------------------------------------------- General ALTER support isn't present in SQLAlchemy directly. For special DDL on an ad-hoc basis, the :class:`.DDL` and related constructs can be used. See :doc:`core/ddl` for a discussion on this subject. A more comprehensive option is to use schema migration tools, such as Alembic or SQLAlchemy-Migrate; see :ref:`schema_migrations` for discussion on this. How can I sort Table objects in order of their dependency? ----------------------------------------------------------- This is available via the :attr:`.MetaData.sorted_tables` function:: metadata = MetaData() # ... add Table objects to metadata ti = metadata.sorted_tables: for t in ti: print t How can I get the CREATE TABLE/ DROP TABLE output as a string? --------------------------------------------------------------- Modern SQLAlchemy has clause constructs which represent DDL operations. These can be rendered to strings like any other SQL expression:: from sqlalchemy.schema import CreateTable print CreateTable(mytable) To get the string specific to a certain engine:: print CreateTable(mytable).compile(engine) There's also a special form of :class:`.Engine` that can let you dump an entire metadata creation sequence, using this recipe:: def dump(sql, *multiparams, **params): print sql.compile(dialect=engine.dialect) engine = create_engine('postgresql://', strategy='mock', executor=dump) metadata.create_all(engine, checkfirst=False) The `Alembic `_ tool also supports an "offline" SQL generation mode that renders database migrations as SQL scripts. How can I subclass Table/Column to provide certain behaviors/configurations? ------------------------------------------------------------------------------ :class:`.Table` and :class:`.Column` are not good targets for direct subclassing. However, there are simple ways to get on-construction behaviors using creation functions, and behaviors related to the linkages between schema objects such as constraint conventions or naming conventions using attachment events. An example of many of these techniques can be seen at `Naming Conventions `_. SQL Expressions ================= Why does ``.col.in_([])`` Produce ``col != col``? Why not ``1=0``? ------------------------------------------------------------------- A little introduction to the issue. The IN operator in SQL, given a list of elements to compare against a column, generally does not accept an empty list, that is while it is valid to say:: column IN (1, 2, 3) it's not valid to say:: column IN () SQLAlchemy's :meth:`.Operators.in_` operator, when given an empty list, produces this expression:: column != column As of version 0.6, it also produces a warning stating that a less efficient comparison operation will be rendered. This expression is the only one that is both database agnostic and produces correct results. For example, the naive approach of "just evaluate to false, by comparing 1=0 or 1!=1", does not handle nulls properly. An expression like:: NOT column != column will not return a row when "column" is null, but an expression which does not take the column into account:: NOT 1=0 will. Closer to the mark is the following CASE expression:: CASE WHEN column IS NOT NULL THEN 1=0 ELSE NULL END We don't use this expression due to its verbosity, and its also not typically accepted by Oracle within a WHERE clause - depending on how you phrase it, you'll either get "ORA-00905: missing keyword" or "ORA-00920: invalid relational operator". It's also still less efficient than just rendering SQL without the clause altogether (or not issuing the SQL at all, if the statement is just a simple search). The best approach therefore is to avoid the usage of IN given an argument list of zero length. Instead, don't emit the Query in the first place, if no rows should be returned. The warning is best promoted to a full error condition using the Python warnings filter (see http://docs.python.org/library/warnings.html). ORM Configuration ================== How do I map a table that has no primary key? --------------------------------------------- In almost all cases, a table does have a so-called :term:`candidate key`, which is a column or series of columns that uniquely identify a row. If a table truly doesn't have this, and has actual fully duplicate rows, the table is not corresponding to `first normal form `_ and cannot be mapped. Otherwise, whatever columns comprise the best candidate key can be applied directly to the mapper:: class SomeClass(Base): __table__ = some_table_with_no_pk __mapper_args__ = { 'primary_key':[some_table_with_no_pk.c.uid, some_table_with_no_pk.c.bar] } Better yet is when using fully declared table metadata, use the ``primary_key=True`` flag on those columns:: class SomeClass(Base): __tablename__ = "some_table_with_no_pk" uid = Column(Integer, primary_key=True) bar = Column(String, primary_key=True) All tables in a relational database should have primary keys. Even a many-to-many association table - the primary key would be the composite of the two association columns:: CREATE TABLE my_association ( user_id INTEGER REFERENCES user(id), account_id INTEGER REFERENCES account(id), PRIMARY KEY (user_id, account_id) ) How do I configure a Column that is a Python reserved word or similar? ---------------------------------------------------------------------------- Column-based attributes can be given any name desired in the mapping. See :ref:`mapper_column_distinct_names`. How do I get a list of all columns, relationships, mapped attributes, etc. given a mapped class? ------------------------------------------------------------------------------------------------- This information is all available from the :class:`.Mapper` object. To get at the :class:`.Mapper` for a particular mapped class, call the :func:`.inspect` function on it:: from sqlalchemy import inspect mapper = inspect(MyClass) From there, all information about the class can be acquired using such methods as: * :attr:`.Mapper.attrs` - a namespace of all mapped attributes. The attributes themselves are instances of :class:`.MapperProperty`, which contain additional attributes that can lead to the mapped SQL expression or column, if applicable. * :attr:`.Mapper.column_attrs` - the mapped attribute namespace limited to column and SQL expression attributes. You might want to use :attr:`.Mapper.columns` to get at the :class:`.Column` objects directly. * :attr:`.Mapper.relationships` - namespace of all :class:`.RelationshipProperty` attributes. * :attr:`.Mapper.all_orm_descriptors` - namespace of all mapped attributes, plus user-defined attributes defined using systems such as :class:`.hybrid_property`, :class:`.AssociationProxy` and others. * :attr:`.Mapper.columns` - A namespace of :class:`.Column` objects and other named SQL expressions associated with the mapping. * :attr:`.Mapper.mapped_table` - The :class:`.Table` or other selectable to which this mapper is mapped. * :attr:`.Mapper.local_table` - The :class:`.Table` that is "local" to this mapper; this differs from :attr:`.Mapper.mapped_table` in the case of a mapper mapped using inheritance to a composed selectable. I'm using Declarative and setting primaryjoin/secondaryjoin using an ``and_()`` or ``or_()``, and I am getting an error message about foreign keys. ------------------------------------------------------------------------------------------------------------------------------------------------------------------ Are you doing this?:: class MyClass(Base): # .... foo = relationship("Dest", primaryjoin=and_("MyClass.id==Dest.foo_id", "MyClass.foo==Dest.bar")) That's an ``and_()`` of two string expressions, which SQLAlchemy cannot apply any mapping towards. Declarative allows :func:`.relationship` arguments to be specified as strings, which are converted into expression objects using ``eval()``. But this doesn't occur inside of an ``and_()`` expression - it's a special operation declarative applies only to the *entirety* of what's passed to primaryjoin or other arguments as a string:: class MyClass(Base): # .... foo = relationship("Dest", primaryjoin="and_(MyClass.id==Dest.foo_id, MyClass.foo==Dest.bar)") Or if the objects you need are already available, skip the strings:: class MyClass(Base): # .... foo = relationship(Dest, primaryjoin=and_(MyClass.id==Dest.foo_id, MyClass.foo==Dest.bar)) The same idea applies to all the other arguments, such as ``foreign_keys``:: # wrong ! foo = relationship(Dest, foreign_keys=["Dest.foo_id", "Dest.bar_id"]) # correct ! foo = relationship(Dest, foreign_keys="[Dest.foo_id, Dest.bar_id]") # also correct ! foo = relationship(Dest, foreign_keys=[Dest.foo_id, Dest.bar_id]) # if you're using columns from the class that you're inside of, just use the column objects ! class MyClass(Base): foo_id = Column(...) bar_id = Column(...) # ... foo = relationship(Dest, foreign_keys=[foo_id, bar_id]) Sessions / Queries =================== "This Session's transaction has been rolled back due to a previous exception during flush." (or similar) --------------------------------------------------------------------------------------------------------- This is an error that occurs when a :meth:`.Session.flush` raises an exception, rolls back the transaction, but further commands upon the `Session` are called without an explicit call to :meth:`.Session.rollback` or :meth:`.Session.close`. It usually corresponds to an application that catches an exception upon :meth:`.Session.flush` or :meth:`.Session.commit` and does not properly handle the exception. For example:: from sqlalchemy import create_engine, Column, Integer from sqlalchemy.orm import sessionmaker from sqlalchemy.ext.declarative import declarative_base Base = declarative_base(create_engine('sqlite://')) class Foo(Base): __tablename__ = 'foo' id = Column(Integer, primary_key=True) Base.metadata.create_all() session = sessionmaker()() # constraint violation session.add_all([Foo(id=1), Foo(id=1)]) try: session.commit() except: # ignore error pass # continue using session without rolling back session.commit() The usage of the :class:`.Session` should fit within a structure similar to this:: try: session.commit() except: session.rollback() raise finally: session.close() # optional, depends on use case Many things can cause a failure within the try/except besides flushes. You should always have some kind of "framing" of your session operations so that connection and transaction resources have a definitive boundary, otherwise your application doesn't really have its usage of resources under control. This is not to say that you need to put try/except blocks all throughout your application - on the contrary, this would be a terrible idea. You should architect your application such that there is one (or few) point(s) of "framing" around session operations. For a detailed discussion on how to organize usage of the :class:`.Session`, please see :ref:`session_faq_whentocreate`. But why does flush() insist on issuing a ROLLBACK? ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ It would be great if :meth:`.Session.flush` could partially complete and then not roll back, however this is beyond its current capabilities since its internal bookkeeping would have to be modified such that it can be halted at any time and be exactly consistent with what's been flushed to the database. While this is theoretically possible, the usefulness of the enhancement is greatly decreased by the fact that many database operations require a ROLLBACK in any case. Postgres in particular has operations which, once failed, the transaction is not allowed to continue:: test=> create table foo(id integer primary key); NOTICE: CREATE TABLE / PRIMARY KEY will create implicit index "foo_pkey" for table "foo" CREATE TABLE test=> begin; BEGIN test=> insert into foo values(1); INSERT 0 1 test=> commit; COMMIT test=> begin; BEGIN test=> insert into foo values(1); ERROR: duplicate key value violates unique constraint "foo_pkey" test=> insert into foo values(2); ERROR: current transaction is aborted, commands ignored until end of transaction block What SQLAlchemy offers that solves both issues is support of SAVEPOINT, via :meth:`.Session.begin_nested`. Using :meth:`.Session.begin_nested`, you can frame an operation that may potentially fail within a transaction, and then "roll back" to the point before its failure while maintaining the enclosing transaction. But why isn't the one automatic call to ROLLBACK enough? Why must I ROLLBACK again? ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ This is again a matter of the :class:`.Session` providing a consistent interface and refusing to guess about what context its being used. For example, the :class:`.Session` supports "framing" above within multiple levels. Such as, suppose you had a decorator ``@with_session()``, which did this:: def with_session(fn): def go(*args, **kw): session.begin(subtransactions=True) try: ret = fn(*args, **kw) session.commit() return ret except: session.rollback() raise return go The above decorator begins a transaction if one does not exist already, and then commits it, if it were the creator. The "subtransactions" flag means that if :meth:`.Session.begin` were already called by an enclosing function, nothing happens except a counter is incremented - this counter is decremented when :meth:`.Session.commit` is called and only when it goes back to zero does the actual COMMIT happen. It allows this usage pattern:: @with_session def one(): # do stuff two() @with_session def two(): # etc. one() two() ``one()`` can call ``two()``, or ``two()`` can be called by itself, and the ``@with_session`` decorator ensures the appropriate "framing" - the transaction boundaries stay on the outermost call level. As you can see, if ``two()`` calls ``flush()`` which throws an exception and then issues a ``rollback()``, there will *always* be a second ``rollback()`` performed by the decorator, and possibly a third corresponding to two levels of decorator. If the ``flush()`` pushed the ``rollback()`` all the way out to the top of the stack, and then we said that all remaining ``rollback()`` calls are moot, there is some silent behavior going on there. A poorly written enclosing method might suppress the exception, and then call ``commit()`` assuming nothing is wrong, and then you have a silent failure condition. The main reason people get this error in fact is because they didn't write clean "framing" code and they would have had other problems down the road. If you think the above use case is a little exotic, the same kind of thing comes into play if you want to SAVEPOINT- you might call ``begin_nested()`` several times, and the ``commit()``/``rollback()`` calls each resolve the most recent ``begin_nested()``. The meaning of ``rollback()`` or ``commit()`` is dependent upon which enclosing block it is called, and you might have any sequence of ``rollback()``/``commit()`` in any order, and its the level of nesting that determines their behavior. In both of the above cases, if ``flush()`` broke the nesting of transaction blocks, the behavior is, depending on scenario, anywhere from "magic" to silent failure to blatant interruption of code flow. ``flush()`` makes its own "subtransaction", so that a transaction is started up regardless of the external transactional state, and when complete it calls ``commit()``, or ``rollback()`` upon failure - but that ``rollback()`` corresponds to its own subtransaction - it doesn't want to guess how you'd like to handle the external "framing" of the transaction, which could be nested many levels with any combination of subtransactions and real SAVEPOINTs. The job of starting/ending the "frame" is kept consistently with the code external to the ``flush()``, and we made a decision that this was the most consistent approach. I'm inserting 400,000 rows with the ORM and it's really slow! -------------------------------------------------------------- The SQLAlchemy ORM uses the :term:`unit of work` pattern when synchronizing changes to the database. This pattern goes far beyond simple "inserts" of data. It includes that attributes which are assigned on objects are received using an attribute instrumentation system which tracks changes on objects as they are made, includes that all rows inserted are tracked in an identity map which has the effect that for each row SQLAlchemy must retrieve its "last inserted id" if not already given, and also involves that rows to be inserted are scanned and sorted for dependencies as needed. Objects are also subject to a fair degree of bookkeeping in order to keep all of this running, which for a very large number of rows at once can create an inordinate amount of time spent with large data structures, hence it's best to chunk these. Basically, unit of work is a large degree of automation in order to automate the task of persisting a complex object graph into a relational database with no explicit persistence code, and this automation has a price. ORMs are basically not intended for high-performance bulk inserts - this is the whole reason SQLAlchemy offers the Core in addition to the ORM as a first-class component. For the use case of fast bulk inserts, the SQL generation and execution system that the ORM builds on top of is part of the Core. Using this system directly, we can produce an INSERT that is competitive with using the raw database API directly. The example below illustrates time-based tests for four different methods of inserting rows, going from the most automated to the least. With cPython 2.7, runtimes observed:: classics-MacBook-Pro:sqlalchemy classic$ python test.py SQLAlchemy ORM: Total time for 100000 records 14.3528850079 secs SQLAlchemy ORM pk given: Total time for 100000 records 10.0164160728 secs SQLAlchemy Core: Total time for 100000 records 0.775382995605 secs sqlite3: Total time for 100000 records 0.676795005798 sec We can reduce the time by a factor of three using recent versions of `Pypy `_:: classics-MacBook-Pro:sqlalchemy classic$ /usr/local/src/pypy-2.1-beta2-osx64/bin/pypy test.py SQLAlchemy ORM: Total time for 100000 records 5.88369488716 secs SQLAlchemy ORM pk given: Total time for 100000 records 3.52294301987 secs SQLAlchemy Core: Total time for 100000 records 0.613556146622 secs sqlite3: Total time for 100000 records 0.442467927933 sec Script:: import time import sqlite3 from sqlalchemy.ext.declarative import declarative_base from sqlalchemy import Column, Integer, String, create_engine from sqlalchemy.orm import scoped_session, sessionmaker Base = declarative_base() DBSession = scoped_session(sessionmaker()) engine = None class Customer(Base): __tablename__ = "customer" id = Column(Integer, primary_key=True) name = Column(String(255)) def init_sqlalchemy(dbname='sqlite:///sqlalchemy.db'): global engine engine = create_engine(dbname, echo=False) DBSession.remove() DBSession.configure(bind=engine, autoflush=False, expire_on_commit=False) Base.metadata.drop_all(engine) Base.metadata.create_all(engine) def test_sqlalchemy_orm(n=100000): init_sqlalchemy() t0 = time.time() for i in range(n): customer = Customer() customer.name = 'NAME ' + str(i) DBSession.add(customer) if i % 1000 == 0: DBSession.flush() DBSession.commit() print("SQLAlchemy ORM: Total time for " + str(n) + " records " + str(time.time() - t0) + " secs") def test_sqlalchemy_orm_pk_given(n=100000): init_sqlalchemy() t0 = time.time() for i in range(n): customer = Customer(id=i+1, name="NAME " + str(i)) DBSession.add(customer) if i % 1000 == 0: DBSession.flush() DBSession.commit() print("SQLAlchemy ORM pk given: Total time for " + str(n) + " records " + str(time.time() - t0) + " secs") def test_sqlalchemy_core(n=100000): init_sqlalchemy() t0 = time.time() engine.execute( Customer.__table__.insert(), [{"name": 'NAME ' + str(i)} for i in range(n)] ) print("SQLAlchemy Core: Total time for " + str(n) + " records " + str(time.time() - t0) + " secs") def init_sqlite3(dbname): conn = sqlite3.connect(dbname) c = conn.cursor() c.execute("DROP TABLE IF EXISTS customer") c.execute("CREATE TABLE customer (id INTEGER NOT NULL, " "name VARCHAR(255), PRIMARY KEY(id))") conn.commit() return conn def test_sqlite3(n=100000, dbname='sqlite3.db'): conn = init_sqlite3(dbname) c = conn.cursor() t0 = time.time() for i in range(n): row = ('NAME ' + str(i),) c.execute("INSERT INTO customer (name) VALUES (?)", row) conn.commit() print("sqlite3: Total time for " + str(n) + " records " + str(time.time() - t0) + " sec") if __name__ == '__main__': test_sqlalchemy_orm(100000) test_sqlalchemy_orm_pk_given(100000) test_sqlalchemy_core(100000) test_sqlite3(100000) How do I make a Query that always adds a certain filter to every query? ------------------------------------------------------------------------------------------------ See the recipe at `PreFilteredQuery `_. I've created a mapping against an Outer Join, and while the query returns rows, no objects are returned. Why not? ------------------------------------------------------------------------------------------------------------------ Rows returned by an outer join may contain NULL for part of the primary key, as the primary key is the composite of both tables. The :class:`.Query` object ignores incoming rows that don't have an acceptable primary key. Based on the setting of the ``allow_partial_pks`` flag on :func:`.mapper`, a primary key is accepted if the value has at least one non-NULL value, or alternatively if the value has no NULL values. See ``allow_partial_pks`` at :func:`.mapper`. I'm using ``joinedload()`` or ``lazy=False`` to create a JOIN/OUTER JOIN and SQLAlchemy is not constructing the correct query when I try to add a WHERE, ORDER BY, LIMIT, etc. (which relies upon the (OUTER) JOIN) ----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- The joins generated by joined eager loading are only used to fully load related collections, and are designed to have no impact on the primary results of the query. Since they are anonymously aliased, they cannot be referenced directly. For detail on this beahvior, see :doc:`orm/loading`. Query has no ``__len__()``, why not? ------------------------------------ The Python ``__len__()`` magic method applied to an object allows the ``len()`` builtin to be used to determine the length of the collection. It's intuitive that a SQL query object would link ``__len__()`` to the :meth:`.Query.count` method, which emits a `SELECT COUNT`. The reason this is not possible is because evaluating the query as a list would incur two SQL calls instead of one:: class Iterates(object): def __len__(self): print "LEN!" return 5 def __iter__(self): print "ITER!" return iter([1, 2, 3, 4, 5]) list(Iterates()) output:: ITER! LEN! How Do I use Textual SQL with ORM Queries? ------------------------------------------- See: * :ref:`orm_tutorial_literal_sql` - Ad-hoc textual blocks with :class:`.Query` * :ref:`session_sql_expressions` - Using :class:`.Session` with textual SQL directly. I'm calling ``Session.delete(myobject)`` and it isn't removed from the parent collection! ------------------------------------------------------------------------------------------ See :ref:`session_deleting_from_collections` for a description of this behavior. why isnt my ``__init__()`` called when I load objects? ------------------------------------------------------ See :ref:`mapping_constructors` for a description of this behavior. how do I use ON DELETE CASCADE with SA's ORM? ---------------------------------------------- SQLAlchemy will always issue UPDATE or DELETE statements for dependent rows which are currently loaded in the :class:`.Session`. For rows which are not loaded, it will by default issue SELECT statements to load those rows and udpate/delete those as well; in other words it assumes there is no ON DELETE CASCADE configured. To configure SQLAlchemy to cooperate with ON DELETE CASCADE, see :ref:`passive_deletes`. I set the "foo_id" attribute on my instance to "7", but the "foo" attribute is still ``None`` - shouldn't it have loaded Foo with id #7? ---------------------------------------------------------------------------------------------------------------------------------------------------- The ORM is not constructed in such a way as to support immediate population of relationships driven from foreign key attribute changes - instead, it is designed to work the other way around - foreign key attributes are handled by the ORM behind the scenes, the end user sets up object relationships naturally. Therefore, the recommended way to set ``o.foo`` is to do just that - set it!:: foo = Session.query(Foo).get(7) o.foo = foo Session.commit() Manipulation of foreign key attributes is of course entirely legal. However, setting a foreign-key attribute to a new value currently does not trigger an "expire" event of the :func:`.relationship` in which it's involved (this may be implemented in the future). This means that for the following sequence:: o = Session.query(SomeClass).first() assert o.foo is None o.foo_id = 7 ``o.foo`` is loaded when we checked it for ``None``. Setting ``o.foo_id=7`` will have the value of "7" as pending, but no flush has occurred. For ``o.foo`` to load based on the foreign key mutation is usually achieved naturally after the commit, which both flushes the new foreign key value and expires all state:: Session.commit() assert o.foo is A more minimal operation is to expire the attribute individually. The :meth:`.Session.flush` is also needed if the object is pending (hasn't been INSERTed yet), or if the relationship is many-to-one prior to 0.6.5:: Session.expire(o, ['foo']) Session.flush() assert o.foo is Where above, expiring the attribute triggers a lazy load on the next access of ``o.foo``. The object does not "autoflush" on access of ``o.foo`` if the object is pending, since it is usually desirable that a pending object doesn't autoflush prematurely and/or excessively, while its state is still being populated. Also see the recipe `ExpireRelationshipOnFKChange `_, which features a mechanism to actually achieve this behavior to a reasonable degree in simple situations. Is there a way to automagically have only unique keywords (or other kinds of objects) without doing a query for the keyword and getting a reference to the row containing that keyword? --------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- When people read the many-to-many example in the docs, they get hit with the fact that if you create the same ``Keyword`` twice, it gets put in the DB twice. Which is somewhat inconvenient. This `UniqueObject `_ recipe was created to address this issue. SQLAlchemy-0.8.4/doc/_sources/glossary.txt0000644000076500000240000005702412251147171021235 0ustar classicstaff00000000000000:orphan: .. _glossary: ======== Glossary ======== .. note:: The Glossary is a brand new addition to the documentation. While sparse at the moment we hope to fill it up with plenty of new terms soon! .. glossary:: :sorted: annotations Annotations are a concept used internally by SQLAlchemy in order to store additional information along with :class:`.ClauseElement` objects. A Python dictionary is associated with a copy of the object, which contains key/value pairs significant to various internal systems, mostly within the ORM:: some_column = Column('some_column', Integer) some_column_annotated = some_column._annotate({"entity": User}) The annotation system differs from the public dictionary :attr:`.Column.info` in that the above annotation operation creates a *copy* of the new :class:`.Column`, rather than considering all annotation values to be part of a single unit. The ORM creates copies of expression objects in order to apply annotations that are specific to their context, such as to differentiate columns that should render themselves as relative to a joined-inheritance entity versus those which should render relative to their immediate parent table alone, as well as to differentiate columns within the "join condition" of a relationship where the column in some cases needs to be expressed in terms of one particular table alias or another, based on its position within the join expression. descriptor descriptors In Python, a descriptor is an object attribute with “binding behavior”, one whose attribute access has been overridden by methods in the `descriptor protocol `_. Those methods are __get__(), __set__(), and __delete__(). If any of those methods are defined for an object, it is said to be a descriptor. In SQLAlchemy, descriptors are used heavily in order to provide attribute behavior on mapped classes. When a class is mapped as such:: class MyClass(Base): __tablename__ = 'foo' id = Column(Integer, primary_key=True) data = Column(String) The ``MyClass`` class will be :term:`mapped` when its definition is complete, at which point the ``id`` and ``data`` attributes, starting out as :class:`.Column` objects, will be replaced by the :term:`instrumentation` system with instances of :class:`.InstrumentedAttribute`, which are descriptors that provide the above mentioned ``__get__()``, ``__set__()`` and ``__delete__()`` methods. The :class:`.InstrumentedAttribute` will generate a SQL expression when used at the class level:: >>> print MyClass.data == 5 data = :data_1 and at the instance level, keeps track of changes to values, and also :term:`lazy loads` unloaded attributes from the database:: >>> m1 = MyClass() >>> m1.id = 5 >>> m1.data = "some data" >>> from sqlalchemy import inspect >>> inspect(m1).attrs.data.history.added "some data" discriminator A result-set column which is used during :term:`polymorphic` loading to determine what kind of mapped class should be applied to a particular incoming result row. In SQLAlchemy, the classes are always part of a hierarchy mapping using inheritance mapping. .. seealso:: :ref:`inheritance_toplevel` instrumentation instrumented Instrumentation refers to the process of augmenting the functionality and attribute set of a particular class. Ideally, the behavior of the class should remain close to a regular class, except that additional behviors and features are made available. The SQLAlchemy :term:`mapping` process, among other things, adds database-enabled :term:`descriptors` to a mapped class which each represent a particular database column or relationship to a related class. identity map A mapping between Python objects and their database identities. The identity map is a collection that's associated with an ORM :term:`session` object, and maintains a single instance of every database object keyed to its identity. The advantage to this pattern is that all operations which occur for a particular database identity are transparently coordinated onto a single object instance. When using an identity map in conjunction with an :term:`isolated` transaction, having a reference to an object that's known to have a particular primary key can be considered from a practical standpoint to be a proxy to the actual database row. .. seealso:: Martin Fowler - Identity Map - http://martinfowler.com/eaaCatalog/identityMap.html lazy load lazy loads In object relational mapping, a "lazy load" refers to an attribute that does not contain its database-side value for some period of time, typically when the object is first loaded. Instead, the attribute receives a *memoization* that causes it to go out to the database and load its data when it's first used. Using this pattern, the complexity and time spent within object fetches can sometimes be reduced, in that attributes for related tables don't need to be addressed immediately. .. seealso:: `Lazy Load (on Martin Fowler) `_ :term:`N plus one problem` :doc:`orm/loading` mapping mapped We say a class is "mapped" when it has been passed through the :func:`.orm.mapper` function. This process associates the class with a database table or other :term:`selectable` construct, so that instances of it can be persisted using a :class:`.Session` as well as loaded using a :class:`.Query`. N plus one problem The N plus one problem is a common side effect of the :term:`lazy load` pattern, whereby an application wishes to iterate through a related attribute or collection on each member of a result set of objects, where that attribute or collection is set to be loaded via the lazy load pattern. The net result is that a SELECT statement is emitted to load the initial result set of parent objects; then, as the application iterates through each member, an additional SELECT statement is emitted for each member in order to load the related attribute or collection for that member. The end result is that for a result set of N parent objects, there will be N + 1 SELECT statements emitted. The N plus one problem is alleviated using :term:`eager loading`. .. seealso:: :doc:`orm/loading` polymorphic polymorphically Refers to a function that handles several types at once. In SQLAlchemy, the term is usually applied to the concept of an ORM mapped class whereby a query operation will return different subclasses based on information in the result set, typically by checking the value of a particular column in the result known as the :term:`discriminator`. Polymorphic loading in SQLAlchemy implies that a one or a combination of three different schemes are used to map a hierarchy of classes; "joined", "single", and "concrete". The section :ref:`inheritance_toplevel` describes inheritance mapping fully. generative A term that SQLAlchemy uses to refer what's normally known as :term:`method chaining`; see that term for details. method chaining An object-oriented technique whereby the state of an object is constructed by calling methods on the object. The object features any number of methods, each of which return a new object (or in some cases the same object) with additional state added to the object. The two SQLAlchemy objects that make the most use of method chaining are the :class:`~.expression.Select` object and the :class:`~.orm.query.Query` object. For example, a :class:`~.expression.Select` object can be assigned two expressions to its WHERE clause as well as an ORDER BY clause by calling upon the :meth:`~.Select.where` and :meth:`~.Select.order_by` methods:: stmt = select([user.c.name]).\ where(user.c.id > 5).\ where(user.c.name.like('e%').\ order_by(user.c.name) Each method call above returns a copy of the original :class:`~.expression.Select` object with additional qualifiers added. .. seealso:: :term:`generative` release releases released In the context of SQLAlchemy, the term "released" refers to the process of ending the usage of a particular database connection. SQLAlchemy features the usage of connection pools, which allows configurability as to the lifespan of database connections. When using a pooled connection, the process of "closing" it, i.e. invoking a statement like ``connection.close()``, may have the effect of the connection being returned to an existing pool, or it may have the effect of actually shutting down the underlying TCP/IP connection referred to by that connection - which one takes place depends on configuration as well as the current state of the pool. So we used the term *released* instead, to mean "do whatever it is you do with connections when we're done using them". The term will sometimes be used in the phrase, "release transactional resources", to indicate more explicitly that what we are actually "releasing" is any transactional state which as accumulated upon the connection. In most situations, the proces of selecting from tables, emitting updates, etc. acquires :term:`isolated` state upon that connection as well as potential row or table locks. This state is all local to a particular transaction on the connection, and is released when we emit a rollback. An important feature of the connection pool is that when we return a connection to the pool, the ``connection.rollback()`` method of the DBAPI is called as well, so that as the connection is set up to be used again, it's in a "clean" state with no references held to the previous series of operations. .. seealso:: :ref:`pooling_toplevel` DBAPI DBAPI is shorthand for the phrase "Python Database API Specification". This is a widely used specification within Python to define common usage patterns for all database connection packages. The DBAPI is a "low level" API which is typically the lowest level system used in a Python application to talk to a database. SQLAlchemy's :term:`dialect` system is constructed around the operation of the DBAPI, providing individual dialect classes which service a specific DBAPI on top of a specific database engine; for example, the :func:`.create_engine` URL ``postgresql+psycopg2://@localhost/test`` refers to the :mod:`psycopg2 <.postgresql.psycopg2>` DBAPI/dialect combination, whereas the URL ``mysql+mysqldb://@localhost/test`` refers to the :mod:`MySQL for Python <.mysql.mysqldb>` DBAPI DBAPI/dialect combination. .. seealso:: `PEP 249 - Python Database API Specification v2.0 `_ unit of work This pattern is where the system transparently keeps track of changes to objects and periodically flushes all those pending changes out to the database. SQLAlchemy's Session implements this pattern fully in a manner similar to that of Hibernate. .. seealso:: `Unit of Work by Martin Fowler `_ :doc:`orm/session` Session The container or scope for ORM database operations. Sessions load instances from the database, track changes to mapped instances and persist changes in a single unit of work when flushed. .. seealso:: :doc:`orm/session` columns clause The portion of the ``SELECT`` statement which enumerates the SQL expressions to be returned in the result set. The expressions follow the ``SELECT`` keyword directly and are a comma-separated list of individual expressions. E.g.: .. sourcecode:: sql SELECT user_account.name, user_account.email FROM user_account WHERE user_account.name = 'fred' Above, the list of columns ``user_acount.name``, ``user_account.email`` is the columns clause of the ``SELECT``. WHERE clause The portion of the ``SELECT`` statement which indicates criteria by which rows should be filtered. It is a single SQL expression which follows the keyword ``WHERE``. .. sourcecode:: sql SELECT user_account.name, user_account.email FROM user_account WHERE user_account.name = 'fred' AND user_account.status = 'E' Above, the phrase ``WHERE user_account.name = 'fred' AND user_account.status = 'E'`` comprises the WHERE clause of the ``SELECT``. FROM clause The portion of the ``SELECT`` statement which incicates the initial source of rows. A simple ``SELECT`` will feature one or more table names in its FROM clause. Multiple sources are separated by a comma: .. sourcecode:: sql SELECT user.name, address.email_address FROM user, address WHERE user.id=address.user_id The FROM clause is also where explicit joins are specified. We can rewrite the above ``SELECT`` using a single ``FROM`` element which consists of a ``JOIN`` of the two tables: .. sourcecode:: sql SELECT user.name, address.email_address FROM user JOIN address ON user.id=address.user_id subquery Refers to a ``SELECT`` statement that is embedded within an enclosing ``SELECT``. A subquery comes in two general flavors, one known as a "scalar select" which specifically must return exactly one row and one column, and the other form which acts as a "derived table" and serves as a source of rows for the FROM clause of another select. A scalar select is eligble to be placed in the :term:`WHERE clause`, :term:`columns clause`, ORDER BY clause or HAVING clause of the enclosing select, whereas the derived table form is eligible to be placed in the FROM clause of the enclosing ``SELECT``. Examples: 1. a scalar subquery placed in the :term:`columns clause` of an enclosing ``SELECT``. The subquery in this example is a :term:`correlated subquery` because part of the rows which it selects from are given via the enclosing statement. .. sourcecode:: sql SELECT id, (SELECT name FROM address WHERE address.user_id=user.id) FROM user 2. a scalar subquery placed in the :term:`WHERE clause` of an enclosing ``SELECT``. This subquery in this example is not correlated as it selects a fixed result. .. sourcecode:: sql SELECT id, name FROM user WHERE status=(SELECT status_id FROM status_code WHERE code='C') 3. a derived table subquery placed in the :term:`FROM clause` of an enclosing ``SELECT``. Such a subquery is almost always given an alias name. .. sourcecode:: sql SELECT user.id, user.name, ad_subq.email_address FROM user JOIN (select user_id, email_address FROM address WHERE address_type='Q') AS ad_subq ON user.id = ad_subq.user_id correlates correlated subquery correlated subqueries A :term:`subquery` is correlated if it depends on data in the enclosing ``SELECT``. Below, a subquery selects the aggregate value ``MIN(a.id)`` from the ``email_address`` table, such that it will be invoked for each value of ``user_account.id``, correlating the value of this column against the ``email_address.user_account_id`` column: .. sourcecode:: sql SELECT user_account.name, email_address.email FROM user_account JOIN email_address ON user_account.id=email_address.user_account_id WHERE email_address.id = ( SELECT MIN(a.id) FROM email_address AS a WHERE a.user_account_id=user_account.id ) The above subquery refers to the ``user_account`` table, which is not itself in the ``FROM`` clause of this nested query. Instead, the ``user_account`` table is recieved from the enclosing query, where each row selected from ``user_account`` results in a distinct execution of the subquery. A correlated subquery is in most cases present in the :term:`WHERE clause` or :term:`columns clause` of the immediately enclosing ``SELECT`` statement, as well as in the ORDER BY or HAVING clause. In less common cases, a correlated subquery may be present in the :term:`FROM clause` of an enclosing ``SELECT``; in these cases the correlation is typically due to the enclosing ``SELECT`` itself being enclosed in the WHERE, ORDER BY, columns or HAVING clause of another ``SELECT``, such as: .. sourcecode:: sql SELECT parent.id FROM parent WHERE EXISTS ( SELECT * FROM ( SELECT child.id AS id, child.parent_id AS parent_id, child.pos AS pos FROM child WHERE child.parent_id = parent.id ORDER BY child.pos LIMIT 3) WHERE id = 7) Correlation from one ``SELECT`` directly to one which encloses the correlated query via its ``FROM`` clause is not possible, because the correlation can only proceed once the original source rows from the enclosing statement's FROM clause are available. ACID ACID model An acronym for "Atomicity, Consistency, Isolation, Durability"; a set of properties that guarantee that database transactions are processed reliably. (via Wikipedia) .. seealso:: :term:`atomicity` :term:`consistency` :term:`isolation` :term:`durability` http://en.wikipedia.org/wiki/ACID_Model atomicity Atomicity is one of the components of the :term:`ACID` model, and requires that each transaction is "all or nothing": if one part of the transaction fails, the entire transaction fails, and the database state is left unchanged. An atomic system must guarantee atomicity in each and every situation, including power failures, errors, and crashes. (via Wikipedia) .. seealso:: :term:`ACID` http://en.wikipedia.org/wiki/Atomicity_(database_systems) consistency Consistency is one of the compoments of the :term:`ACID` model, and ensures that any transaction will bring the database from one valid state to another. Any data written to the database must be valid according to all defined rules, including but not limited to :term:`constraints`, cascades, triggers, and any combination thereof. (via Wikipedia) .. seealso:: :term:`ACID` http://en.wikipedia.org/wiki/Consistency_(database_systems) isolation isolated The isolation property of the :term:`ACID` model ensures that the concurrent execution of transactions results in a system state that would be obtained if transactions were executed serially, i.e. one after the other. Each transaction must execute in total isolation i.e. if T1 and T2 execute concurrently then each should remain independent of the other. (via Wikipedia) .. seealso:: :term:`ACID` http://en.wikipedia.org/wiki/Isolation_(database_systems) durability Durability is a property of the :term:`ACID` model which means that once a transaction has been committed, it will remain so, even in the event of power loss, crashes, or errors. In a relational database, for instance, once a group of SQL statements execute, the results need to be stored permanently (even if the database crashes immediately thereafter). (via Wikipedia) .. seealso:: :term:`ACID` http://en.wikipedia.org/wiki/Durability_(database_systems) RETURNING This is a non-SQL standard clause provided in various forms by certain backends, which provides the service of returning a result set upon execution of an INSERT, UPDATE or DELETE statement. Any set of columns from the matched rows can be returned, as though they were produced from a SELECT statement. The RETURNING clause provides both a dramatic performance boost to common update/select scenarios, including retrieval of inline- or default- generated primary key values and defaults at the moment they were created, as well as a way to get at server-generated default values in an atomic way. An example of RETURNING, idiomatic to Postgresql, looks like:: INSERT INTO user_account (name) VALUES ('new name') RETURNING id, timestamp Above, the INSERT statement will provide upon execution a result set which includes the values of the columns ``user_account.id`` and ``user_account.timestamp``, which above should have been generated as default values as they are not included otherwise (but note any series of columns or SQL expressions can be placed into RETURNING, not just default-value columns). The backends that currently support RETURNING or a similar construct are Postgresql, SQL Server, Oracle, and Firebird. The Postgresql and Firebird implementations are generally full featured, whereas the implementations of SQL Server and Oracle have caveats. On SQL Server, the clause is known as "OUTPUT INSERTED" for INSERT and UPDATE statements and "OUTPUT DELETED" for DELETE statements; the key caveat is that triggers are not supported in conjunction with this keyword. On Oracle, it is known as "RETURNING...INTO", and requires that the value be placed into an OUT paramter, meaning not only is the syntax awkward, but it can also only be used for one row at a time. SQLAlchemy's :meth:`.UpdateBase.returning` system provides a layer of abstraction on top of the RETURNING systems of these backends to provide a consistent interface for returning columns. The ORM also includes many optimizations that make use of RETURNING when available. SQLAlchemy-0.8.4/doc/_sources/index.txt0000644000076500000240000000732212251147171020475 0ustar classicstaff00000000000000:orphan: .. _index_toplevel: ======================== SQLAlchemy Documentation ======================== Getting Started =============== A high level view and getting set up. :ref:`Overview ` | :ref:`Installation Guide ` | :doc:`Frequently Asked Questions ` | :doc:`Migration from 0.7 ` | :doc:`Glossary ` | :doc:`Changelog catalog ` SQLAlchemy ORM ============== Here, the Object Relational Mapper is introduced and fully described. If you want to work with higher-level SQL which is constructed automatically for you, as well as automated persistence of Python objects, proceed first to the tutorial. * **Read this first:** :doc:`orm/tutorial` * **ORM Configuration:** :doc:`Mapper Configuration ` | :doc:`Relationship Configuration ` | :doc:`Inheritance Mapping ` | :doc:`Advanced Collection Configuration ` * **Configuration Extensions:** :doc:`Declarative Extension ` | :doc:`Association Proxy ` | :doc:`Hybrid Attributes ` | :doc:`Mutable Scalars ` | :doc:`Ordered List ` * **ORM Usage:** :doc:`Session Usage and Guidelines ` | :doc:`Query API reference ` | :doc:`Relationship Loading Techniques ` * **Extending the ORM:** :doc:`ORM Event Interfaces ` | :doc:`Internals API ` * **Other:** :doc:`Introduction to Examples ` | :doc:`Deprecated Event Interfaces ` | :doc:`ORM Exceptions ` | :doc:`Horizontal Sharding ` | :doc:`Alternate Instrumentation ` SQLAlchemy Core =============== The breadth of SQLAlchemy's SQL rendering engine, DBAPI integration, transaction integration, and schema description services are documented here. In contrast to the ORM's domain-centric mode of usage, the SQL Expression Language provides a schema-centric usage paradigm. * **Read this first:** :doc:`core/tutorial` * **All the Built In SQL:** :doc:`SQL Expression API ` * **Engines, Connections, Pools:** :doc:`Engine Configuration ` | :doc:`Connections, Transactions ` | :doc:`Connection Pooling ` * **Schema Definition:** :ref:`Tables and Columns ` | :ref:`Database Introspection (Reflection) ` | :ref:`Insert/Update Defaults ` | :ref:`Constraints and Indexes ` | :ref:`Using Data Definition Language (DDL) ` * **Datatypes:** :ref:`Overview ` | :ref:`Generic Types ` | :ref:`SQL Standard Types ` | :ref:`Vendor Specific Types ` | :ref:`Building Custom Types ` | :ref:`Defining New Operators ` | :ref:`API ` * **Extending the Core:** :doc:`SQLAlchemy Events ` | :doc:`Core Event Interfaces ` | :doc:`Creating Custom SQL Constructs ` | :doc:`Internals API ` * **Other:** :doc:`Runtime Inspection API ` | :doc:`core/interfaces` | :doc:`core/exceptions` Dialect Documentation ====================== The **dialect** is the system SQLAlchemy uses to communicate with various types of DBAPIs and databases. This section describes notes, options, and usage patterns regarding individual dialects. :doc:`Index of all Dialects ` SQLAlchemy-0.8.4/doc/_sources/intro.txt0000644000076500000240000001576312251147171020531 0ustar classicstaff00000000000000.. _overview_toplevel: ======== Overview ======== .. _overview: Overview ======== The SQLAlchemy SQL Toolkit and Object Relational Mapper is a comprehensive set of tools for working with databases and Python. It has several distinct areas of functionality which can be used individually or combined together. Its major components are illustrated in below, with component dependencies organized into layers: .. image:: sqla_arch_small.png Above, the two most significant front-facing portions of SQLAlchemy are the **Object Relational Mapper** and the **SQL Expression Language**. SQL Expressions can be used independently of the ORM. When using the ORM, the SQL Expression language remains part of the public facing API as it is used within object-relational configurations and queries. .. _doc_overview: Documentation Overview ====================== The documentation is separated into three sections: :ref:`orm_toplevel`, :ref:`core_toplevel`, and :ref:`dialect_toplevel`. In :ref:`orm_toplevel`, the Object Relational Mapper is introduced and fully described. New users should begin with the :ref:`ormtutorial_toplevel`. If you want to work with higher-level SQL which is constructed automatically for you, as well as management of Python objects, proceed to this tutorial. In :ref:`core_toplevel`, the breadth of SQLAlchemy's SQL and database integration and description services are documented, the core of which is the SQL Expression language. The SQL Expression Language is a toolkit all its own, independent of the ORM package, which can be used to construct manipulable SQL expressions which can be programmatically constructed, modified, and executed, returning cursor-like result sets. In contrast to the ORM's domain-centric mode of usage, the expression language provides a schema-centric usage paradigm. New users should begin here with :ref:`sqlexpression_toplevel`. SQLAlchemy engine, connection, and pooling services are also described in :ref:`core_toplevel`. In :ref:`dialect_toplevel`, reference documentation for all provided database and DBAPI backends is provided. Code Examples ============= Working code examples, mostly regarding the ORM, are included in the SQLAlchemy distribution. A description of all the included example applications is at :ref:`examples_toplevel`. There is also a wide variety of examples involving both core SQLAlchemy constructs as well as the ORM on the wiki. See `Theatrum Chemicum `_. .. _installation: Installation Guide ================== Supported Platforms ------------------- SQLAlchemy has been tested against the following platforms: * cPython since version 2.5, through the 2.xx series * cPython version 3, throughout all 3.xx series * `Jython `_ 2.5 or greater * `Pypy `_ 1.5 or greater .. versionchanged:: 0.8 Python 2.5 is now the minimum Python version supported. Supported Installation Methods ------------------------------- SQLAlchemy supports installation using standard Python "distutils" or "setuptools" methodologies. An overview of potential setups is as follows: * **Plain Python Distutils** - SQLAlchemy can be installed with a clean Python install using the services provided via `Python Distutils `_, using the ``setup.py`` script. The C extensions as well as Python 3 builds are supported. * **Standard Setuptools** - When using `setuptools `_, SQLAlchemy can be installed via ``setup.py`` or ``easy_install``, and the C extensions are supported. setuptools is not supported on Python 3 at the time of this writing. * **Distribute** - With `distribute `_, SQLAlchemy can be installed via ``setup.py`` or ``easy_install``, and the C extensions as well as Python 3 builds are supported. * **pip** - `pip `_ is an installer that rides on top of ``setuptools`` or ``distribute``, replacing the usage of ``easy_install``. It is often preferred for its simpler mode of usage. Install via easy_install or pip ------------------------------- When ``easy_install`` or ``pip`` is available, the distribution can be downloaded from Pypi and installed in one step:: easy_install SQLAlchemy Or with pip:: pip install SQLAlchemy This command will download the latest version of SQLAlchemy from the `Python Cheese Shop `_ and install it to your system. Installing using setup.py ---------------------------------- Otherwise, you can install from the distribution using the ``setup.py`` script:: python setup.py install Installing the C Extensions ---------------------------------- SQLAlchemy includes C extensions which provide an extra speed boost for dealing with result sets. Currently, the extensions are only supported on the 2.xx series of cPython, not Python 3 or Pypy. setup.py will automatically build the extensions if an appropriate platform is detected. If the build of the C extensions fails, due to missing compiler or other issue, the setup process will output a warning message, and re-run the build without the C extensions, upon completion reporting final status. To run the build/install without even attempting to compile the C extensions, pass the flag ``--without-cextensions`` to the ``setup.py`` script:: python setup.py --without-cextensions install Or with pip:: pip install --global-option='--without-cextensions' SQLAlchemy .. note:: The ``--without-cextensions`` flag is available **only** if ``setuptools`` or ``distribute`` is installed. It is not available on a plain Python ``distutils`` installation. The library will still install without the C extensions if they cannot be built, however. Installing on Python 3 ---------------------------------- SQLAlchemy ships as Python 2 code. For Python 3 usage, the ``setup.py`` script will invoke the Python ``2to3`` tool on the build, plugging in an extra "preprocessor" as well. The 2to3 step works with Python distutils (part of the standard Python install) and Distribute - it will **not** work with a non-Distribute setuptools installation. Installing a Database API ---------------------------------- SQLAlchemy is designed to operate with a :term:`DBAPI` implementation built for a particular database, and includes support for the most popular databases. The individual database sections in :doc:`/dialects/index` enumerate the available DBAPIs for each database, including external links. Checking the Installed SQLAlchemy Version ------------------------------------------ This documentation covers SQLAlchemy version 0.8. If you're working on a system that already has SQLAlchemy installed, check the version from your Python prompt like this: .. sourcecode:: python+sql >>> import sqlalchemy >>> sqlalchemy.__version__ # doctest: +SKIP 0.8.0 .. _migration: 0.7 to 0.8 Migration ===================== Notes on what's changed from 0.7 to 0.8 is available here at :doc:`changelog/migration_08`. SQLAlchemy-0.8.4/doc/_sources/orm/0000755000076500000240000000000012251151573017417 5ustar classicstaff00000000000000SQLAlchemy-0.8.4/doc/_sources/orm/collections.txt0000644000076500000240000005331412251147171022503 0ustar classicstaff00000000000000.. _collections_toplevel: .. currentmodule:: sqlalchemy.orm ======================================= Collection Configuration and Techniques ======================================= The :func:`.relationship` function defines a linkage between two classes. When the linkage defines a one-to-many or many-to-many relationship, it's represented as a Python collection when objects are loaded and manipulated. This section presents additional information about collection configuration and techniques. .. _largecollections: .. currentmodule:: sqlalchemy.orm Working with Large Collections =============================== The default behavior of :func:`.relationship` is to fully load the collection of items in, as according to the loading strategy of the relationship. Additionally, the :class:`.Session` by default only knows how to delete objects which are actually present within the session. When a parent instance is marked for deletion and flushed, the :class:`.Session` loads its full list of child items in so that they may either be deleted as well, or have their foreign key value set to null; this is to avoid constraint violations. For large collections of child items, there are several strategies to bypass full loading of child items both at load time as well as deletion time. .. _dynamic_relationship: Dynamic Relationship Loaders ----------------------------- A key feature to enable management of a large collection is the so-called "dynamic" relationship. This is an optional form of :func:`~sqlalchemy.orm.relationship` which returns a :class:`~sqlalchemy.orm.query.Query` object in place of a collection when accessed. :func:`~sqlalchemy.orm.query.Query.filter` criterion may be applied as well as limits and offsets, either explicitly or via array slices:: class User(Base): __tablename__ = 'user' posts = relationship(Post, lazy="dynamic") jack = session.query(User).get(id) # filter Jack's blog posts posts = jack.posts.filter(Post.headline=='this is a post') # apply array slices posts = jack.posts[5:20] The dynamic relationship supports limited write operations, via the ``append()`` and ``remove()`` methods:: oldpost = jack.posts.filter(Post.headline=='old post').one() jack.posts.remove(oldpost) jack.posts.append(Post('new post')) Since the read side of the dynamic relationship always queries the database, changes to the underlying collection will not be visible until the data has been flushed. However, as long as "autoflush" is enabled on the :class:`.Session` in use, this will occur automatically each time the collection is about to emit a query. To place a dynamic relationship on a backref, use the :func:`~.orm.backref` function in conjunction with ``lazy='dynamic'``:: class Post(Base): __table__ = posts_table user = relationship(User, backref=backref('posts', lazy='dynamic') ) Note that eager/lazy loading options cannot be used in conjunction dynamic relationships at this time. .. note:: The :func:`~.orm.dynamic_loader` function is essentially the same as :func:`~.orm.relationship` with the ``lazy='dynamic'`` argument specified. .. warning:: The "dynamic" loader applies to **collections only**. It is not valid to use "dynamic" loaders with many-to-one, one-to-one, or uselist=False relationships. Newer versions of SQLAlchemy emit warnings or exceptions in these cases. Setting Noload --------------- A "noload" relationship never loads from the database, even when accessed. It is configured using ``lazy='noload'``:: class MyClass(Base): __tablename__ = 'some_table' children = relationship(MyOtherClass, lazy='noload') Above, the ``children`` collection is fully writeable, and changes to it will be persisted to the database as well as locally available for reading at the time they are added. However when instances of ``MyClass`` are freshly loaded from the database, the ``children`` collection stays empty. .. _passive_deletes: Using Passive Deletes ---------------------- Use ``passive_deletes=True`` to disable child object loading on a DELETE operation, in conjunction with "ON DELETE (CASCADE|SET NULL)" on your database to automatically cascade deletes to child objects:: class MyClass(Base): __tablename__ = 'mytable' id = Column(Integer, primary_key=True) children = relationship("MyOtherClass", cascade="all, delete-orphan", passive_deletes=True) class MyOtherClass(Base): __tablename__ = 'myothertable' id = Column(Integer, primary_key=True) parent_id = Column(Integer, ForeignKey('mytable.id', ondelete='CASCADE') ) .. note:: To use "ON DELETE CASCADE", the underlying database engine must support foreign keys. * When using MySQL, an appropriate storage engine must be selected. See :ref:`mysql_storage_engines` for details. * When using SQLite, foreign key support must be enabled explicitly. See :ref:`sqlite_foreign_keys` for details. When ``passive_deletes`` is applied, the ``children`` relationship will not be loaded into memory when an instance of ``MyClass`` is marked for deletion. The ``cascade="all, delete-orphan"`` *will* take effect for instances of ``MyOtherClass`` which are currently present in the session; however for instances of ``MyOtherClass`` which are not loaded, SQLAlchemy assumes that "ON DELETE CASCADE" rules will ensure that those rows are deleted by the database. .. currentmodule:: sqlalchemy.orm.collections .. _custom_collections: Customizing Collection Access ============================= Mapping a one-to-many or many-to-many relationship results in a collection of values accessible through an attribute on the parent instance. By default, this collection is a ``list``:: class Parent(Base): __tablename__ = 'parent' parent_id = Column(Integer, primary_key=True) children = relationship(Child) parent = Parent() parent.children.append(Child()) print parent.children[0] Collections are not limited to lists. Sets, mutable sequences and almost any other Python object that can act as a container can be used in place of the default list, by specifying the ``collection_class`` option on :func:`~sqlalchemy.orm.relationship`:: class Parent(Base): __tablename__ = 'parent' parent_id = Column(Integer, primary_key=True) # use a set children = relationship(Child, collection_class=set) parent = Parent() child = Child() parent.children.add(child) assert child in parent.children Dictionary Collections ----------------------- A little extra detail is needed when using a dictionary as a collection. This because objects are always loaded from the database as lists, and a key-generation strategy must be available to populate the dictionary correctly. The :func:`.attribute_mapped_collection` function is by far the most common way to achieve a simple dictionary collection. It produces a dictionary class that will apply a particular attribute of the mapped class as a key. Below we map an ``Item`` class containing a dictionary of ``Note`` items keyed to the ``Note.keyword`` attribute:: from sqlalchemy import Column, Integer, String, ForeignKey from sqlalchemy.orm import relationship from sqlalchemy.orm.collections import attribute_mapped_collection from sqlalchemy.ext.declarative import declarative_base Base = declarative_base() class Item(Base): __tablename__ = 'item' id = Column(Integer, primary_key=True) notes = relationship("Note", collection_class=attribute_mapped_collection('keyword'), cascade="all, delete-orphan") class Note(Base): __tablename__ = 'note' id = Column(Integer, primary_key=True) item_id = Column(Integer, ForeignKey('item.id'), nullable=False) keyword = Column(String) text = Column(String) def __init__(self, keyword, text): self.keyword = keyword self.text = text ``Item.notes`` is then a dictionary:: >>> item = Item() >>> item.notes['a'] = Note('a', 'atext') >>> item.notes.items() {'a': <__main__.Note object at 0x2eaaf0>} :func:`.attribute_mapped_collection` will ensure that the ``.keyword`` attribute of each ``Note`` complies with the key in the dictionary. Such as, when assigning to ``Item.notes``, the dictionary key we supply must match that of the actual ``Note`` object:: item = Item() item.notes = { 'a': Note('a', 'atext'), 'b': Note('b', 'btext') } The attribute which :func:`.attribute_mapped_collection` uses as a key does not need to be mapped at all! Using a regular Python ``@property`` allows virtually any detail or combination of details about the object to be used as the key, as below when we establish it as a tuple of ``Note.keyword`` and the first ten letters of the ``Note.text`` field:: class Item(Base): __tablename__ = 'item' id = Column(Integer, primary_key=True) notes = relationship("Note", collection_class=attribute_mapped_collection('note_key'), backref="item", cascade="all, delete-orphan") class Note(Base): __tablename__ = 'note' id = Column(Integer, primary_key=True) item_id = Column(Integer, ForeignKey('item.id'), nullable=False) keyword = Column(String) text = Column(String) @property def note_key(self): return (self.keyword, self.text[0:10]) def __init__(self, keyword, text): self.keyword = keyword self.text = text Above we added a ``Note.item`` backref. Assigning to this reverse relationship, the ``Note`` is added to the ``Item.notes`` dictionary and the key is generated for us automatically:: >>> item = Item() >>> n1 = Note("a", "atext") >>> n1.item = item >>> item.notes {('a', 'atext'): <__main__.Note object at 0x2eaaf0>} Other built-in dictionary types include :func:`.column_mapped_collection`, which is almost like :func:`.attribute_mapped_collection` except given the :class:`.Column` object directly:: from sqlalchemy.orm.collections import column_mapped_collection class Item(Base): __tablename__ = 'item' id = Column(Integer, primary_key=True) notes = relationship("Note", collection_class=column_mapped_collection(Note.__table__.c.keyword), cascade="all, delete-orphan") as well as :func:`.mapped_collection` which is passed any callable function. Note that it's usually easier to use :func:`.attribute_mapped_collection` along with a ``@property`` as mentioned earlier:: from sqlalchemy.orm.collections import mapped_collection class Item(Base): __tablename__ = 'item' id = Column(Integer, primary_key=True) notes = relationship("Note", collection_class=mapped_collection(lambda note: note.text[0:10]), cascade="all, delete-orphan") Dictionary mappings are often combined with the "Association Proxy" extension to produce streamlined dictionary views. See :ref:`proxying_dictionaries` and :ref:`composite_association_proxy` for examples. .. autofunction:: attribute_mapped_collection .. autofunction:: column_mapped_collection .. autofunction:: mapped_collection Custom Collection Implementations ================================== You can use your own types for collections as well. In simple cases, inherting from ``list`` or ``set``, adding custom behavior, is all that's needed. In other cases, special decorators are needed to tell SQLAlchemy more detail about how the collection operates. .. topic:: Do I need a custom collection implementation? In most cases not at all! The most common use cases for a "custom" collection is one that validates or marshals incoming values into a new form, such as a string that becomes a class instance, or one which goes a step beyond and represents the data internally in some fashion, presenting a "view" of that data on the outside of a different form. For the first use case, the :func:`.orm.validates` decorator is by far the simplest way to intercept incoming values in all cases for the purposes of validation and simple marshaling. See :ref:`simple_validators` for an example of this. For the second use case, the :ref:`associationproxy_toplevel` extension is a well-tested, widely used system that provides a read/write "view" of a collection in terms of some attribute present on the target object. As the target attribute can be a ``@property`` that returns virtually anything, a wide array of "alternative" views of a collection can be constructed with just a few functions. This approach leaves the underlying mapped collection unaffected and avoids the need to carefully tailor collection behavior on a method-by-method basis. Customized collections are useful when the collection needs to have special behaviors upon access or mutation operations that can't otherwise be modeled externally to the collection. They can of course be combined with the above two approaches. Collections in SQLAlchemy are transparently *instrumented*. Instrumentation means that normal operations on the collection are tracked and result in changes being written to the database at flush time. Additionally, collection operations can fire *events* which indicate some secondary operation must take place. Examples of a secondary operation include saving the child item in the parent's :class:`~sqlalchemy.orm.session.Session` (i.e. the ``save-update`` cascade), as well as synchronizing the state of a bi-directional relationship (i.e. a :func:`.backref`). The collections package understands the basic interface of lists, sets and dicts and will automatically apply instrumentation to those built-in types and their subclasses. Object-derived types that implement a basic collection interface are detected and instrumented via duck-typing: .. sourcecode:: python+sql class ListLike(object): def __init__(self): self.data = [] def append(self, item): self.data.append(item) def remove(self, item): self.data.remove(item) def extend(self, items): self.data.extend(items) def __iter__(self): return iter(self.data) def foo(self): return 'foo' ``append``, ``remove``, and ``extend`` are known list-like methods, and will be instrumented automatically. ``__iter__`` is not a mutator method and won't be instrumented, and ``foo`` won't be either. Duck-typing (i.e. guesswork) isn't rock-solid, of course, so you can be explicit about the interface you are implementing by providing an ``__emulates__`` class attribute:: class SetLike(object): __emulates__ = set def __init__(self): self.data = set() def append(self, item): self.data.add(item) def remove(self, item): self.data.remove(item) def __iter__(self): return iter(self.data) This class looks list-like because of ``append``, but ``__emulates__`` forces it to set-like. ``remove`` is known to be part of the set interface and will be instrumented. But this class won't work quite yet: a little glue is needed to adapt it for use by SQLAlchemy. The ORM needs to know which methods to use to append, remove and iterate over members of the collection. When using a type like ``list`` or ``set``, the appropriate methods are well-known and used automatically when present. This set-like class does not provide the expected ``add`` method, so we must supply an explicit mapping for the ORM via a decorator. Annotating Custom Collections via Decorators -------------------------------------------- Decorators can be used to tag the individual methods the ORM needs to manage collections. Use them when your class doesn't quite meet the regular interface for its container type, or when you otherwise would like to use a different method to get the job done. .. sourcecode:: python+sql from sqlalchemy.orm.collections import collection class SetLike(object): __emulates__ = set def __init__(self): self.data = set() @collection.appender def append(self, item): self.data.add(item) def remove(self, item): self.data.remove(item) def __iter__(self): return iter(self.data) And that's all that's needed to complete the example. SQLAlchemy will add instances via the ``append`` method. ``remove`` and ``__iter__`` are the default methods for sets and will be used for removing and iteration. Default methods can be changed as well: .. sourcecode:: python+sql from sqlalchemy.orm.collections import collection class MyList(list): @collection.remover def zark(self, item): # do something special... @collection.iterator def hey_use_this_instead_for_iteration(self): # ... There is no requirement to be list-, or set-like at all. Collection classes can be any shape, so long as they have the append, remove and iterate interface marked for SQLAlchemy's use. Append and remove methods will be called with a mapped entity as the single argument, and iterator methods are called with no arguments and must return an iterator. .. autoclass:: collection :members: .. _dictionary_collections: Custom Dictionary-Based Collections ----------------------------------- The :class:`.MappedCollection` class can be used as a base class for your custom types or as a mix-in to quickly add ``dict`` collection support to other classes. It uses a keying function to delegate to ``__setitem__`` and ``__delitem__``: .. sourcecode:: python+sql from sqlalchemy.util import OrderedDict from sqlalchemy.orm.collections import MappedCollection class NodeMap(OrderedDict, MappedCollection): """Holds 'Node' objects, keyed by the 'name' attribute with insert order maintained.""" def __init__(self, *args, **kw): MappedCollection.__init__(self, keyfunc=lambda node: node.name) OrderedDict.__init__(self, *args, **kw) When subclassing :class:`.MappedCollection`, user-defined versions of ``__setitem__()`` or ``__delitem__()`` should be decorated with :meth:`.collection.internally_instrumented`, **if** they call down to those same methods on :class:`.MappedCollection`. This because the methods on :class:`.MappedCollection` are already instrumented - calling them from within an already instrumented call can cause events to be fired off repeatedly, or inappropriately, leading to internal state corruption in rare cases:: from sqlalchemy.orm.collections import MappedCollection,\ collection class MyMappedCollection(MappedCollection): """Use @internally_instrumented when your methods call down to already-instrumented methods. """ @collection.internally_instrumented def __setitem__(self, key, value, _sa_initiator=None): # do something with key, value super(MyMappedCollection, self).__setitem__(key, value, _sa_initiator) @collection.internally_instrumented def __delitem__(self, key, _sa_initiator=None): # do something with key super(MyMappedCollection, self).__delitem__(key, _sa_initiator) The ORM understands the ``dict`` interface just like lists and sets, and will automatically instrument all dict-like methods if you choose to subclass ``dict`` or provide dict-like collection behavior in a duck-typed class. You must decorate appender and remover methods, however- there are no compatible methods in the basic dictionary interface for SQLAlchemy to use by default. Iteration will go through ``itervalues()`` unless otherwise decorated. .. note:: Due to a bug in MappedCollection prior to version 0.7.6, this workaround usually needs to be called before a custom subclass of :class:`.MappedCollection` which uses :meth:`.collection.internally_instrumented` can be used:: from sqlalchemy.orm.collections import _instrument_class, MappedCollection _instrument_class(MappedCollection) This will ensure that the :class:`.MappedCollection` has been properly initialized with custom ``__setitem__()`` and ``__delitem__()`` methods before used in a custom subclass. .. autoclass:: sqlalchemy.orm.collections.MappedCollection :members: Instrumentation and Custom Types -------------------------------- Many custom types and existing library classes can be used as a entity collection type as-is without further ado. However, it is important to note that the instrumentation process will modify the type, adding decorators around methods automatically. The decorations are lightweight and no-op outside of relationships, but they do add unneeded overhead when triggered elsewhere. When using a library class as a collection, it can be good practice to use the "trivial subclass" trick to restrict the decorations to just your usage in relationships. For example: .. sourcecode:: python+sql class MyAwesomeList(some.great.library.AwesomeList): pass # ... relationship(..., collection_class=MyAwesomeList) The ORM uses this approach for built-ins, quietly substituting a trivial subclass when a ``list``, ``set`` or ``dict`` is used directly. Collection Internals ===================== Various internal methods. .. autofunction:: bulk_replace .. autoclass:: collection .. autofunction:: collection_adapter .. autoclass:: CollectionAdapter .. autoclass:: InstrumentedDict .. autoclass:: InstrumentedList .. autoclass:: InstrumentedSet .. autofunction:: prepare_instrumentation SQLAlchemy-0.8.4/doc/_sources/orm/deprecated.txt0000644000076500000240000000155512251147171022265 0ustar classicstaff00000000000000:orphan: .. _dep_interfaces_orm_toplevel: Deprecated ORM Event Interfaces ================================ .. module:: sqlalchemy.orm.interfaces This section describes the class-based ORM event interface which first existed in SQLAlchemy 0.1, which progressed with more kinds of events up until SQLAlchemy 0.5. The non-ORM analogue is described at :ref:`dep_interfaces_core_toplevel`. .. deprecated:: 0.7 As of SQLAlchemy 0.7, the new event system described in :ref:`event_toplevel` replaces the extension/proxy/listener system, providing a consistent interface to all events without the need for subclassing. Mapper Events ----------------- .. autoclass:: MapperExtension :members: Session Events ----------------- .. autoclass:: SessionExtension :members: Attribute Events -------------------- .. autoclass:: AttributeExtension :members: SQLAlchemy-0.8.4/doc/_sources/orm/events.txt0000644000076500000240000000163112251147171021464 0ustar classicstaff00000000000000.. _orm_event_toplevel: ORM Events ========== The ORM includes a wide variety of hooks available for subscription. .. versionadded:: 0.7 The event supercedes the previous system of "extension" classes. For an introduction to the event API, see :ref:`event_toplevel`. Non-ORM events such as those regarding connections and low-level statement execution are described in :ref:`core_event_toplevel`. Attribute Events ---------------- .. autoclass:: sqlalchemy.orm.events.AttributeEvents :members: Mapper Events --------------- .. autoclass:: sqlalchemy.orm.events.MapperEvents :members: Instance Events --------------- .. autoclass:: sqlalchemy.orm.events.InstanceEvents :members: Session Events -------------- .. autoclass:: sqlalchemy.orm.events.SessionEvents :members: Instrumentation Events ----------------------- .. autoclass:: sqlalchemy.orm.events.InstrumentationEvents :members: SQLAlchemy-0.8.4/doc/_sources/orm/examples.txt0000644000076500000240000000461012251147171021776 0ustar classicstaff00000000000000.. _examples_toplevel: Examples ======== The SQLAlchemy distribution includes a variety of code examples illustrating a select set of patterns, some typical and some not so typical. All are runnable and can be found in the ``/examples`` directory of the distribution. Each example contains a README in its ``__init__.py`` file, each of which are listed below. Additional SQLAlchemy examples, some user contributed, are available on the wiki at ``_. .. _examples_adjacencylist: Adjacency List -------------- Location: /examples/adjacency_list/ .. automodule:: adjacency_list .. _examples_associations: Associations ------------ Location: /examples/association/ .. automodule:: association .. _examples_instrumentation: Attribute Instrumentation ------------------------- Location: /examples/custom_attributes/ .. automodule:: custom_attributes .. _examples_caching: Dogpile Caching --------------- Location: /examples/dogpile_caching/ .. automodule:: dogpile_caching Directed Graphs --------------- Location: /examples/graphs/ .. automodule:: graphs Dynamic Relations as Dictionaries ---------------------------------- Location: /examples/dynamic_dict/ .. automodule:: dynamic_dict .. _examples_generic_associations: Generic Associations -------------------- Location: /examples/generic_associations .. automodule:: generic_associations .. _examples_sharding: Horizontal Sharding ------------------- Location: /examples/sharding .. automodule:: sharding Inheritance Mappings -------------------- Location: /examples/inheritance/ .. automodule:: inheritance Large Collections ----------------- Location: /examples/large_collection/ .. automodule:: large_collection Nested Sets ----------- Location: /examples/nested_sets/ .. automodule:: nested_sets Polymorphic Associations ------------------------ See :ref:`examples_generic_associations` for a modern version of polymorphic associations. .. _examples_postgis: PostGIS Integration ------------------- Location: /examples/postgis .. automodule:: postgis Versioned Objects ----------------- Location: /examples/versioning .. automodule:: versioning Vertical Attribute Mapping -------------------------- Location: /examples/vertical .. automodule:: vertical .. _examples_xmlpersistence: XML Persistence --------------- Location: /examples/elementtree/ .. automodule:: elementtree SQLAlchemy-0.8.4/doc/_sources/orm/exceptions.txt0000644000076500000240000000011712251147171022337 0ustar classicstaff00000000000000ORM Exceptions ============== .. automodule:: sqlalchemy.orm.exc :members:SQLAlchemy-0.8.4/doc/_sources/orm/extensions/0000755000076500000240000000000012251151573021616 5ustar classicstaff00000000000000SQLAlchemy-0.8.4/doc/_sources/orm/extensions/associationproxy.txt0000644000076500000240000004772312251147171026011 0ustar classicstaff00000000000000.. _associationproxy_toplevel: Association Proxy ================= .. module:: sqlalchemy.ext.associationproxy ``associationproxy`` is used to create a read/write view of a target attribute across a relationship. It essentially conceals the usage of a "middle" attribute between two endpoints, and can be used to cherry-pick fields from a collection of related objects or to reduce the verbosity of using the association object pattern. Applied creatively, the association proxy allows the construction of sophisticated collections and dictionary views of virtually any geometry, persisted to the database using standard, transparently configured relational patterns. Simplifying Scalar Collections ------------------------------ Consider a many-to-many mapping between two classes, ``User`` and ``Keyword``. Each ``User`` can have any number of ``Keyword`` objects, and vice-versa (the many-to-many pattern is described at :ref:`relationships_many_to_many`):: from sqlalchemy import Column, Integer, String, ForeignKey, Table from sqlalchemy.orm import relationship from sqlalchemy.ext.declarative import declarative_base Base = declarative_base() class User(Base): __tablename__ = 'user' id = Column(Integer, primary_key=True) name = Column(String(64)) kw = relationship("Keyword", secondary=lambda: userkeywords_table) def __init__(self, name): self.name = name class Keyword(Base): __tablename__ = 'keyword' id = Column(Integer, primary_key=True) keyword = Column('keyword', String(64)) def __init__(self, keyword): self.keyword = keyword userkeywords_table = Table('userkeywords', Base.metadata, Column('user_id', Integer, ForeignKey("user.id"), primary_key=True), Column('keyword_id', Integer, ForeignKey("keyword.id"), primary_key=True) ) Reading and manipulating the collection of "keyword" strings associated with ``User`` requires traversal from each collection element to the ``.keyword`` attribute, which can be awkward:: >>> user = User('jek') >>> user.kw.append(Keyword('cheese inspector')) >>> print(user.kw) [<__main__.Keyword object at 0x12bf830>] >>> print(user.kw[0].keyword) cheese inspector >>> print([keyword.keyword for keyword in user.kw]) ['cheese inspector'] The ``association_proxy`` is applied to the ``User`` class to produce a "view" of the ``kw`` relationship, which only exposes the string value of ``.keyword`` associated with each ``Keyword`` object:: from sqlalchemy.ext.associationproxy import association_proxy class User(Base): __tablename__ = 'user' id = Column(Integer, primary_key=True) name = Column(String(64)) kw = relationship("Keyword", secondary=lambda: userkeywords_table) def __init__(self, name): self.name = name # proxy the 'keyword' attribute from the 'kw' relationship keywords = association_proxy('kw', 'keyword') We can now reference the ``.keywords`` collection as a listing of strings, which is both readable and writable. New ``Keyword`` objects are created for us transparently:: >>> user = User('jek') >>> user.keywords.append('cheese inspector') >>> user.keywords ['cheese inspector'] >>> user.keywords.append('snack ninja') >>> user.kw [<__main__.Keyword object at 0x12cdd30>, <__main__.Keyword object at 0x12cde30>] The :class:`.AssociationProxy` object produced by the :func:`.association_proxy` function is an instance of a `Python descriptor `_. It is always declared with the user-defined class being mapped, regardless of whether Declarative or classical mappings via the :func:`.mapper` function are used. The proxy functions by operating upon the underlying mapped attribute or collection in response to operations, and changes made via the proxy are immediately apparent in the mapped attribute, as well as vice versa. The underlying attribute remains fully accessible. When first accessed, the association proxy performs introspection operations on the target collection so that its behavior corresponds correctly. Details such as if the locally proxied attribute is a collection (as is typical) or a scalar reference, as well as if the collection acts like a set, list, or dictionary is taken into account, so that the proxy should act just like the underlying collection or attribute does. Creation of New Values ----------------------- When a list append() event (or set add(), dictionary __setitem__(), or scalar assignment event) is intercepted by the association proxy, it instantiates a new instance of the "intermediary" object using its constructor, passing as a single argument the given value. In our example above, an operation like:: user.keywords.append('cheese inspector') Is translated by the association proxy into the operation:: user.kw.append(Keyword('cheese inspector')) The example works here because we have designed the constructor for ``Keyword`` to accept a single positional argument, ``keyword``. For those cases where a single-argument constructor isn't feasible, the association proxy's creational behavior can be customized using the ``creator`` argument, which references a callable (i.e. Python function) that will produce a new object instance given the singular argument. Below we illustrate this using a lambda as is typical:: class User(Base): # ... # use Keyword(keyword=kw) on append() events keywords = association_proxy('kw', 'keyword', creator=lambda kw: Keyword(keyword=kw)) The ``creator`` function accepts a single argument in the case of a list- or set- based collection, or a scalar attribute. In the case of a dictionary-based collection, it accepts two arguments, "key" and "value". An example of this is below in :ref:`proxying_dictionaries`. Simplifying Association Objects ------------------------------- The "association object" pattern is an extended form of a many-to-many relationship, and is described at :ref:`association_pattern`. Association proxies are useful for keeping "association objects" out the way during regular use. Suppose our ``userkeywords`` table above had additional columns which we'd like to map explicitly, but in most cases we don't require direct access to these attributes. Below, we illustrate a new mapping which introduces the ``UserKeyword`` class, which is mapped to the ``userkeywords`` table illustrated earlier. This class adds an additional column ``special_key``, a value which we occasionally want to access, but not in the usual case. We create an association proxy on the ``User`` class called ``keywords``, which will bridge the gap from the ``user_keywords`` collection of ``User`` to the ``.keyword`` attribute present on each ``UserKeyword``:: from sqlalchemy import Column, Integer, String, ForeignKey from sqlalchemy.orm import relationship, backref from sqlalchemy.ext.associationproxy import association_proxy from sqlalchemy.ext.declarative import declarative_base Base = declarative_base() class User(Base): __tablename__ = 'user' id = Column(Integer, primary_key=True) name = Column(String(64)) # association proxy of "user_keywords" collection # to "keyword" attribute keywords = association_proxy('user_keywords', 'keyword') def __init__(self, name): self.name = name class UserKeyword(Base): __tablename__ = 'user_keyword' user_id = Column(Integer, ForeignKey('user.id'), primary_key=True) keyword_id = Column(Integer, ForeignKey('keyword.id'), primary_key=True) special_key = Column(String(50)) # bidirectional attribute/collection of "user"/"user_keywords" user = relationship(User, backref=backref("user_keywords", cascade="all, delete-orphan") ) # reference to the "Keyword" object keyword = relationship("Keyword") def __init__(self, keyword=None, user=None, special_key=None): self.user = user self.keyword = keyword self.special_key = special_key class Keyword(Base): __tablename__ = 'keyword' id = Column(Integer, primary_key=True) keyword = Column('keyword', String(64)) def __init__(self, keyword): self.keyword = keyword def __repr__(self): return 'Keyword(%s)' % repr(self.keyword) With the above configuration, we can operate upon the ``.keywords`` collection of each ``User`` object, and the usage of ``UserKeyword`` is concealed:: >>> user = User('log') >>> for kw in (Keyword('new_from_blammo'), Keyword('its_big')): ... user.keywords.append(kw) ... >>> print(user.keywords) [Keyword('new_from_blammo'), Keyword('its_big')] Where above, each ``.keywords.append()`` operation is equivalent to:: >>> user.user_keywords.append(UserKeyword(Keyword('its_heavy'))) The ``UserKeyword`` association object has two attributes here which are populated; the ``.keyword`` attribute is populated directly as a result of passing the ``Keyword`` object as the first argument. The ``.user`` argument is then assigned as the ``UserKeyword`` object is appended to the ``User.user_keywords`` collection, where the bidirectional relationship configured between ``User.user_keywords`` and ``UserKeyword.user`` results in a population of the ``UserKeyword.user`` attribute. The ``special_key`` argument above is left at its default value of ``None``. For those cases where we do want ``special_key`` to have a value, we create the ``UserKeyword`` object explicitly. Below we assign all three attributes, where the assignment of ``.user`` has the effect of the ``UserKeyword`` being appended to the ``User.user_keywords`` collection:: >>> UserKeyword(Keyword('its_wood'), user, special_key='my special key') The association proxy returns to us a collection of ``Keyword`` objects represented by all these operations:: >>> user.keywords [Keyword('new_from_blammo'), Keyword('its_big'), Keyword('its_heavy'), Keyword('its_wood')] .. _proxying_dictionaries: Proxying to Dictionary Based Collections ----------------------------------------- The association proxy can proxy to dictionary based collections as well. SQLAlchemy mappings usually use the :func:`.attribute_mapped_collection` collection type to create dictionary collections, as well as the extended techniques described in :ref:`dictionary_collections`. The association proxy adjusts its behavior when it detects the usage of a dictionary-based collection. When new values are added to the dictionary, the association proxy instantiates the intermediary object by passing two arguments to the creation function instead of one, the key and the value. As always, this creation function defaults to the constructor of the intermediary class, and can be customized using the ``creator`` argument. Below, we modify our ``UserKeyword`` example such that the ``User.user_keywords`` collection will now be mapped using a dictionary, where the ``UserKeyword.special_key`` argument will be used as the key for the dictionary. We then apply a ``creator`` argument to the ``User.keywords`` proxy so that these values are assigned appropriately when new elements are added to the dictionary:: from sqlalchemy import Column, Integer, String, ForeignKey from sqlalchemy.orm import relationship, backref from sqlalchemy.ext.associationproxy import association_proxy from sqlalchemy.ext.declarative import declarative_base from sqlalchemy.orm.collections import attribute_mapped_collection Base = declarative_base() class User(Base): __tablename__ = 'user' id = Column(Integer, primary_key=True) name = Column(String(64)) # proxy to 'user_keywords', instantiating UserKeyword # assigning the new key to 'special_key', values to # 'keyword'. keywords = association_proxy('user_keywords', 'keyword', creator=lambda k, v: UserKeyword(special_key=k, keyword=v) ) def __init__(self, name): self.name = name class UserKeyword(Base): __tablename__ = 'user_keyword' user_id = Column(Integer, ForeignKey('user.id'), primary_key=True) keyword_id = Column(Integer, ForeignKey('keyword.id'), primary_key=True) special_key = Column(String) # bidirectional user/user_keywords relationships, mapping # user_keywords with a dictionary against "special_key" as key. user = relationship(User, backref=backref( "user_keywords", collection_class=attribute_mapped_collection("special_key"), cascade="all, delete-orphan" ) ) keyword = relationship("Keyword") class Keyword(Base): __tablename__ = 'keyword' id = Column(Integer, primary_key=True) keyword = Column('keyword', String(64)) def __init__(self, keyword): self.keyword = keyword def __repr__(self): return 'Keyword(%s)' % repr(self.keyword) We illustrate the ``.keywords`` collection as a dictionary, mapping the ``UserKeyword.string_key`` value to ``Keyword`` objects:: >>> user = User('log') >>> user.keywords['sk1'] = Keyword('kw1') >>> user.keywords['sk2'] = Keyword('kw2') >>> print(user.keywords) {'sk1': Keyword('kw1'), 'sk2': Keyword('kw2')} .. _composite_association_proxy: Composite Association Proxies ----------------------------- Given our previous examples of proxying from relationship to scalar attribute, proxying across an association object, and proxying dictionaries, we can combine all three techniques together to give ``User`` a ``keywords`` dictionary that deals strictly with the string value of ``special_key`` mapped to the string ``keyword``. Both the ``UserKeyword`` and ``Keyword`` classes are entirely concealed. This is achieved by building an association proxy on ``User`` that refers to an association proxy present on ``UserKeyword``:: from sqlalchemy import Column, Integer, String, ForeignKey from sqlalchemy.orm import relationship, backref from sqlalchemy.ext.associationproxy import association_proxy from sqlalchemy.ext.declarative import declarative_base from sqlalchemy.orm.collections import attribute_mapped_collection Base = declarative_base() class User(Base): __tablename__ = 'user' id = Column(Integer, primary_key=True) name = Column(String(64)) # the same 'user_keywords'->'keyword' proxy as in # the basic dictionary example keywords = association_proxy( 'user_keywords', 'keyword', creator=lambda k, v: UserKeyword(special_key=k, keyword=v) ) def __init__(self, name): self.name = name class UserKeyword(Base): __tablename__ = 'user_keyword' user_id = Column(Integer, ForeignKey('user.id'), primary_key=True) keyword_id = Column(Integer, ForeignKey('keyword.id'), primary_key=True) special_key = Column(String) user = relationship(User, backref=backref( "user_keywords", collection_class=attribute_mapped_collection("special_key"), cascade="all, delete-orphan" ) ) # the relationship to Keyword is now called # 'kw' kw = relationship("Keyword") # 'keyword' is changed to be a proxy to the # 'keyword' attribute of 'Keyword' keyword = association_proxy('kw', 'keyword') class Keyword(Base): __tablename__ = 'keyword' id = Column(Integer, primary_key=True) keyword = Column('keyword', String(64)) def __init__(self, keyword): self.keyword = keyword ``User.keywords`` is now a dictionary of string to string, where ``UserKeyword`` and ``Keyword`` objects are created and removed for us transparently using the association proxy. In the example below, we illustrate usage of the assignment operator, also appropriately handled by the association proxy, to apply a dictionary value to the collection at once:: >>> user = User('log') >>> user.keywords = { ... 'sk1':'kw1', ... 'sk2':'kw2' ... } >>> print(user.keywords) {'sk1': 'kw1', 'sk2': 'kw2'} >>> user.keywords['sk3'] = 'kw3' >>> del user.keywords['sk2'] >>> print(user.keywords) {'sk1': 'kw1', 'sk3': 'kw3'} >>> # illustrate un-proxied usage ... print(user.user_keywords['sk3'].kw) <__main__.Keyword object at 0x12ceb90> One caveat with our example above is that because ``Keyword`` objects are created for each dictionary set operation, the example fails to maintain uniqueness for the ``Keyword`` objects on their string name, which is a typical requirement for a tagging scenario such as this one. For this use case the recipe `UniqueObject `_, or a comparable creational strategy, is recommended, which will apply a "lookup first, then create" strategy to the constructor of the ``Keyword`` class, so that an already existing ``Keyword`` is returned if the given name is already present. Querying with Association Proxies --------------------------------- The :class:`.AssociationProxy` features simple SQL construction capabilities which relate down to the underlying :func:`.relationship` in use as well as the target attribute. For example, the :meth:`.RelationshipProperty.Comparator.any` and :meth:`.RelationshipProperty.Comparator.has` operations are available, and will produce a "nested" EXISTS clause, such as in our basic association object example:: >>> print(session.query(User).filter(User.keywords.any(keyword='jek'))) SELECT user.id AS user_id, user.name AS user_name FROM user WHERE EXISTS (SELECT 1 FROM user_keyword WHERE user.id = user_keyword.user_id AND (EXISTS (SELECT 1 FROM keyword WHERE keyword.id = user_keyword.keyword_id AND keyword.keyword = :keyword_1))) For a proxy to a scalar attribute, ``__eq__()`` is supported:: >>> print(session.query(UserKeyword).filter(UserKeyword.keyword == 'jek')) SELECT user_keyword.* FROM user_keyword WHERE EXISTS (SELECT 1 FROM keyword WHERE keyword.id = user_keyword.keyword_id AND keyword.keyword = :keyword_1) and ``.contains()`` is available for a proxy to a scalar collection:: >>> print(session.query(User).filter(User.keywords.contains('jek'))) SELECT user.* FROM user WHERE EXISTS (SELECT 1 FROM userkeywords, keyword WHERE user.id = userkeywords.user_id AND keyword.id = userkeywords.keyword_id AND keyword.keyword = :keyword_1) :class:`.AssociationProxy` can be used with :meth:`.Query.join` somewhat manually using the :attr:`~.AssociationProxy.attr` attribute in a star-args context:: q = session.query(User).join(*User.keywords.attr) .. versionadded:: 0.7.3 :attr:`~.AssociationProxy.attr` attribute in a star-args context. :attr:`~.AssociationProxy.attr` is composed of :attr:`.AssociationProxy.local_attr` and :attr:`.AssociationProxy.remote_attr`, which are just synonyms for the actual proxied attributes, and can also be used for querying:: uka = aliased(UserKeyword) ka = aliased(Keyword) q = session.query(User).\ join(uka, User.keywords.local_attr).\ join(ka, User.keywords.remote_attr) .. versionadded:: 0.7.3 :attr:`.AssociationProxy.local_attr` and :attr:`.AssociationProxy.remote_attr`, synonyms for the actual proxied attributes, and usable for querying. API Documentation ----------------- .. autofunction:: association_proxy .. autoclass:: AssociationProxy :members: :undoc-members: .. autodata:: ASSOCIATION_PROXYSQLAlchemy-0.8.4/doc/_sources/orm/extensions/declarative.txt0000644000076500000240000000104412251147171024640 0ustar classicstaff00000000000000.. _declarative_toplevel: Declarative =========== .. automodule:: sqlalchemy.ext.declarative API Reference ------------- .. autofunction:: declarative_base .. autofunction:: as_declarative .. autoclass:: declared_attr .. autofunction:: sqlalchemy.ext.declarative.api._declarative_constructor .. autofunction:: has_inherited_table .. autofunction:: synonym_for .. autofunction:: comparable_using .. autofunction:: instrument_declarative .. autoclass:: AbstractConcreteBase .. autoclass:: ConcreteBase .. autoclass:: DeferredReflection SQLAlchemy-0.8.4/doc/_sources/orm/extensions/horizontal_shard.txt0000644000076500000240000000032512251147171025730 0ustar classicstaff00000000000000Horizontal Sharding =================== .. automodule:: sqlalchemy.ext.horizontal_shard API Documentation ----------------- .. autoclass:: ShardedSession :members: .. autoclass:: ShardedQuery :members: SQLAlchemy-0.8.4/doc/_sources/orm/extensions/hybrid.txt0000644000076500000240000000046612251147171023645 0ustar classicstaff00000000000000.. _hybrids_toplevel: Hybrid Attributes ================= .. automodule:: sqlalchemy.ext.hybrid API Reference ------------- .. autoclass:: hybrid_method :members: .. autoclass:: hybrid_property :members: .. autoclass:: Comparator .. autodata:: HYBRID_METHOD .. autodata:: HYBRID_PROPERTY SQLAlchemy-0.8.4/doc/_sources/orm/extensions/index.txt0000644000076500000240000000115212251147171023464 0ustar classicstaff00000000000000.. _plugins: .. _sqlalchemy.ext: ORM Extensions ============== SQLAlchemy has a variety of ORM extensions available, which add additional functionality to the core behavior. The extensions build almost entirely on public core and ORM APIs and users should be encouraged to read their source code to further their understanding of their behavior. In particular the "Horizontal Sharding", "Hybrid Attributes", and "Mutation Tracking" extensions are very succinct. .. toctree:: :maxdepth: 1 associationproxy declarative mutable orderinglist horizontal_shard hybrid instrumentation SQLAlchemy-0.8.4/doc/_sources/orm/extensions/instrumentation.txt0000644000076500000240000000061312251147171025621 0ustar classicstaff00000000000000.. _instrumentation_toplevel: Alternate Class Instrumentation ================================ .. automodule:: sqlalchemy.ext.instrumentation API Reference ------------- .. autodata:: INSTRUMENTATION_MANAGER .. autoclass:: InstrumentationManager :members: :undoc-members: .. autodata:: instrumentation_finders .. autoclass:: ExtendedInstrumentationRegistry :members: SQLAlchemy-0.8.4/doc/_sources/orm/extensions/mutable.txt0000644000076500000240000000050512251147171024007 0ustar classicstaff00000000000000.. _mutable_toplevel: Mutation Tracking ================== .. automodule:: sqlalchemy.ext.mutable API Reference ------------- .. autoclass:: MutableBase :members: _parents, coerce .. autoclass:: Mutable :members: .. autoclass:: MutableComposite :members: .. autoclass:: MutableDict :members: SQLAlchemy-0.8.4/doc/_sources/orm/extensions/orderinglist.txt0000644000076500000240000000043212251147171025062 0ustar classicstaff00000000000000Ordering List ============= .. automodule:: sqlalchemy.ext.orderinglist API Reference ------------- .. autofunction:: ordering_list .. autofunction:: count_from_0 .. autofunction:: count_from_1 .. autofunction:: count_from_n_factory .. autoclass:: OrderingList :members: SQLAlchemy-0.8.4/doc/_sources/orm/index.txt0000644000076500000240000000077712251147171021301 0ustar classicstaff00000000000000.. _orm_toplevel: SQLAlchemy ORM =============== Here, the Object Relational Mapper is introduced and fully described. If you want to work with higher-level SQL which is constructed automatically for you, as well as automated persistence of Python objects, proceed first to the tutorial. .. toctree:: :maxdepth: 3 tutorial mapper_config relationships collections inheritance session query loading events extensions/index examples exceptions internals SQLAlchemy-0.8.4/doc/_sources/orm/inheritance.txt0000644000076500000240000007233112251147171022456 0ustar classicstaff00000000000000.. _inheritance_toplevel: Mapping Class Inheritance Hierarchies ====================================== SQLAlchemy supports three forms of inheritance: **single table inheritance**, where several types of classes are represented by a single table, **concrete table inheritance**, where each type of class is represented by independent tables, and **joined table inheritance**, where the class hierarchy is broken up among dependent tables, each class represented by its own table that only includes those attributes local to that class. The most common forms of inheritance are single and joined table, while concrete inheritance presents more configurational challenges. When mappers are configured in an inheritance relationship, SQLAlchemy has the ability to load elements :term:`polymorphically`, meaning that a single query can return objects of multiple types. Joined Table Inheritance ------------------------- In joined table inheritance, each class along a particular classes' list of parents is represented by a unique table. The total set of attributes for a particular instance is represented as a join along all tables in its inheritance path. Here, we first define the ``Employee`` class. This table will contain a primary key column (or columns), and a column for each attribute that's represented by ``Employee``. In this case it's just ``name``:: class Employee(Base): __tablename__ = 'employee' id = Column(Integer, primary_key=True) name = Column(String(50)) type = Column(String(50)) __mapper_args__ = { 'polymorphic_identity':'employee', 'polymorphic_on':type } The mapped table also has a column called ``type``. The purpose of this column is to act as the **discriminator**, and stores a value which indicates the type of object represented within the row. The column may be of any datatype, though string and integer are the most common. The discriminator column is only needed if polymorphic loading is desired, as is usually the case. It is not strictly necessary that it be present directly on the base mapped table, and can instead be defined on a derived select statement that's used when the class is queried; however, this is a much more sophisticated configuration scenario. The mapping receives additional arguments via the ``__mapper_args__`` dictionary. Here the ``type`` column is explicitly stated as the discriminator column, and the **polymorphic identity** of ``employee`` is also given; this is the value that will be stored in the polymorphic discriminator column for instances of this class. We next define ``Engineer`` and ``Manager`` subclasses of ``Employee``. Each contains columns that represent the attributes unique to the subclass they represent. Each table also must contain a primary key column (or columns), and in most cases a foreign key reference to the parent table:: class Engineer(Employee): __tablename__ = 'engineer' id = Column(Integer, ForeignKey('employee.id'), primary_key=True) engineer_name = Column(String(30)) __mapper_args__ = { 'polymorphic_identity':'engineer', } class Manager(Employee): __tablename__ = 'manager' id = Column(Integer, ForeignKey('employee.id'), primary_key=True) manager_name = Column(String(30)) __mapper_args__ = { 'polymorphic_identity':'manager', } It is standard practice that the same column is used for both the role of primary key as well as foreign key to the parent table, and that the column is also named the same as that of the parent table. However, both of these practices are optional. Separate columns may be used for primary key and parent-relationship, the column may be named differently than that of the parent, and even a custom join condition can be specified between parent and child tables instead of using a foreign key. .. topic:: Joined inheritance primary keys One natural effect of the joined table inheritance configuration is that the identity of any mapped object can be determined entirely from the base table. This has obvious advantages, so SQLAlchemy always considers the primary key columns of a joined inheritance class to be those of the base table only. In other words, the ``id`` columns of both the ``engineer`` and ``manager`` tables are not used to locate ``Engineer`` or ``Manager`` objects - only the value in ``employee.id`` is considered. ``engineer.id`` and ``manager.id`` are still of course critical to the proper operation of the pattern overall as they are used to locate the joined row, once the parent row has been determined within a statement. With the joined inheritance mapping complete, querying against ``Employee`` will return a combination of ``Employee``, ``Engineer`` and ``Manager`` objects. Newly saved ``Engineer``, ``Manager``, and ``Employee`` objects will automatically populate the ``employee.type`` column with ``engineer``, ``manager``, or ``employee``, as appropriate. .. _with_polymorphic: Basic Control of Which Tables are Queried ++++++++++++++++++++++++++++++++++++++++++ The :func:`.orm.with_polymorphic` function and the :func:`~sqlalchemy.orm.query.Query.with_polymorphic` method of :class:`~sqlalchemy.orm.query.Query` affects the specific tables which the :class:`.Query` selects from. Normally, a query such as this:: session.query(Employee).all() ...selects only from the ``employee`` table. When loading fresh from the database, our joined-table setup will query from the parent table only, using SQL such as this: .. sourcecode:: python+sql {opensql} SELECT employee.id AS employee_id, employee.name AS employee_name, employee.type AS employee_type FROM employee [] As attributes are requested from those ``Employee`` objects which are represented in either the ``engineer`` or ``manager`` child tables, a second load is issued for the columns in that related row, if the data was not already loaded. So above, after accessing the objects you'd see further SQL issued along the lines of: .. sourcecode:: python+sql {opensql} SELECT manager.id AS manager_id, manager.manager_data AS manager_manager_data FROM manager WHERE ? = manager.id [5] SELECT engineer.id AS engineer_id, engineer.engineer_info AS engineer_engineer_info FROM engineer WHERE ? = engineer.id [2] This behavior works well when issuing searches for small numbers of items, such as when using :meth:`.Query.get`, since the full range of joined tables are not pulled in to the SQL statement unnecessarily. But when querying a larger span of rows which are known to be of many types, you may want to actively join to some or all of the joined tables. The ``with_polymorphic`` feature provides this. Telling our query to polymorphically load ``Engineer`` and ``Manager`` objects, we can use the :func:`.orm.with_polymorphic` function to create a new aliased class which represents a select of the base table combined with outer joins to each of the inheriting tables:: from sqlalchemy.orm import with_polymorphic eng_plus_manager = with_polymorphic(Employee, [Engineer, Manager]) query = session.query(eng_plus_manager) The above produces a query which joins the ``employee`` table to both the ``engineer`` and ``manager`` tables like the following: .. sourcecode:: python+sql query.all() {opensql} SELECT employee.id AS employee_id, engineer.id AS engineer_id, manager.id AS manager_id, employee.name AS employee_name, employee.type AS employee_type, engineer.engineer_info AS engineer_engineer_info, manager.manager_data AS manager_manager_data FROM employee LEFT OUTER JOIN engineer ON employee.id = engineer.id LEFT OUTER JOIN manager ON employee.id = manager.id [] The entity returned by :func:`.orm.with_polymorphic` is an :class:`.AliasedClass` object, which can be used in a :class:`.Query` like any other alias, including named attributes for those attributes on the ``Employee`` class. In our example, ``eng_plus_manager`` becomes the entity that we use to refer to the three-way outer join above. It also includes namespaces for each class named in the list of classes, so that attributes specific to those subclasses can be called upon as well. The following example illustrates calling upon attributes specific to ``Engineer`` as well as ``Manager`` in terms of ``eng_plus_manager``:: eng_plus_manager = with_polymorphic(Employee, [Engineer, Manager]) query = session.query(eng_plus_manager).filter( or_( eng_plus_manager.Engineer.engineer_info=='x', eng_plus_manager.Manager.manager_data=='y' ) ) :func:`.orm.with_polymorphic` accepts a single class or mapper, a list of classes/mappers, or the string ``'*'`` to indicate all subclasses: .. sourcecode:: python+sql # join to the engineer table entity = with_polymorphic(Employee, Engineer) # join to the engineer and manager tables entity = with_polymorphic(Employee, [Engineer, Manager]) # join to all subclass tables entity = query.with_polymorphic(Employee, '*') # use with Query session.query(entity).all() It also accepts a second argument ``selectable`` which replaces the automatic join creation and instead selects directly from the selectable given. This feature is normally used with "concrete" inheritance, described later, but can be used with any kind of inheritance setup in the case that specialized SQL should be used to load polymorphically:: # custom selectable employee = Employee.__table__ manager = Manager.__table__ engineer = Engineer.__table__ entity = with_polymorphic( Employee, [Engineer, Manager], employee.outerjoin(manager).outerjoin(engineer) ) # use with Query session.query(entity).all() Note that if you only need to load a single subtype, such as just the ``Engineer`` objects, :func:`.orm.with_polymorphic` is not needed since you would query against the ``Engineer`` class directly. :meth:`.Query.with_polymorphic` has the same purpose as :func:`.orm.with_polymorphic`, except is not as flexible in its usage patterns in that it only applies to the first full mapping, which then impacts all occurrences of that class or the target subclasses within the :class:`.Query`. For simple cases it might be considered to be more succinct:: session.query(Employee).with_polymorphic([Engineer, Manager]).\ filter(or_(Engineer.engineer_info=='w', Manager.manager_data=='q')) .. versionadded:: 0.8 :func:`.orm.with_polymorphic`, an improved version of :meth:`.Query.with_polymorphic` method. The mapper also accepts ``with_polymorphic`` as a configurational argument so that the joined-style load will be issued automatically. This argument may be the string ``'*'``, a list of classes, or a tuple consisting of either, followed by a selectable:: class Employee(Base): __tablename__ = 'employee' id = Column(Integer, primary_key=True) type = Column(String(20)) __mapper_args__ = { 'polymorphic_on':type, 'polymorphic_identity':'employee', 'with_polymorphic':'*' } class Engineer(Employee): __tablename__ = 'engineer' id = Column(Integer, ForeignKey('employee.id'), primary_key=True) __mapper_args__ = {'polymorphic_identity':'engineer'} class Manager(Employee): __tablename__ = 'manager' id = Column(Integer, ForeignKey('employee.id'), primary_key=True) __mapper_args__ = {'polymorphic_identity':'manager'} The above mapping will produce a query similar to that of ``with_polymorphic('*')`` for every query of ``Employee`` objects. Using :func:`.orm.with_polymorphic` or :meth:`.Query.with_polymorphic` will override the mapper-level ``with_polymorphic`` setting. .. autofunction:: sqlalchemy.orm.with_polymorphic Advanced Control of Which Tables are Queried +++++++++++++++++++++++++++++++++++++++++++++ The ``with_polymorphic`` functions work fine for simplistic scenarios. However, direct control of table rendering is called for, such as the case when one wants to render to only the subclass table and not the parent table. This use case can be achieved by using the mapped :class:`.Table` objects directly. For example, to query the name of employees with particular criterion:: engineer = Engineer.__table__ manager = Manager.__table__ session.query(Employee.name).\ outerjoin((engineer, engineer.c.employee_id==Employee.employee_id)).\ outerjoin((manager, manager.c.employee_id==Employee.employee_id)).\ filter(or_(Engineer.engineer_info=='w', Manager.manager_data=='q')) The base table, in this case the "employees" table, isn't always necessary. A SQL query is always more efficient with fewer joins. Here, if we wanted to just load information specific to manager or engineer, we can instruct :class:`.Query` to use only those tables. The ``FROM`` clause is determined by what's specified in the :meth:`.Session.query`, :meth:`.Query.filter`, or :meth:`.Query.select_from` methods:: session.query(Manager.manager_data).select_from(manager) session.query(engineer.c.id).\ filter(engineer.c.engineer_info==manager.c.manager_data) .. _of_type: Creating Joins to Specific Subtypes +++++++++++++++++++++++++++++++++++ The :func:`~sqlalchemy.orm.interfaces.PropComparator.of_type` method is a helper which allows the construction of joins along :func:`~sqlalchemy.orm.relationship` paths while narrowing the criterion to specific subclasses. Suppose the ``employees`` table represents a collection of employees which are associated with a ``Company`` object. We'll add a ``company_id`` column to the ``employees`` table and a new table ``companies``: .. sourcecode:: python+sql class Company(Base): __tablename__ = 'company' id = Column(Integer, primary_key=True) name = Column(String(50)) employees = relationship("Employee", backref='company', cascade='all, delete-orphan') class Employee(Base): __tablename__ = 'employee' id = Column(Integer, primary_key=True) type = Column(String(20)) company_id = Column(Integer, ForeignKey('company.id')) __mapper_args__ = { 'polymorphic_on':type, 'polymorphic_identity':'employee', 'with_polymorphic':'*' } class Engineer(Employee): __tablename__ = 'engineer' id = Column(Integer, ForeignKey('employee.id'), primary_key=True) engineer_info = Column(String(50)) __mapper_args__ = {'polymorphic_identity':'engineer'} class Manager(Employee): __tablename__ = 'manager' id = Column(Integer, ForeignKey('employee.id'), primary_key=True) manager_data = Column(String(50)) __mapper_args__ = {'polymorphic_identity':'manager'} When querying from ``Company`` onto the ``Employee`` relationship, the ``join()`` method as well as the ``any()`` and ``has()`` operators will create a join from ``company`` to ``employee``, without including ``engineer`` or ``manager`` in the mix. If we wish to have criterion which is specifically against the ``Engineer`` class, we can tell those methods to join or subquery against the joined table representing the subclass using the :meth:`~.orm.interfaces.PropComparator.of_type` operator:: session.query(Company).\ join(Company.employees.of_type(Engineer)).\ filter(Engineer.engineer_info=='someinfo') A longhand version of this would involve spelling out the full target selectable within a 2-tuple:: employee = Employee.__table__ engineer = Engineer.__table__ session.query(Company).\ join((employee.join(engineer), Company.employees)).\ filter(Engineer.engineer_info=='someinfo') :func:`~sqlalchemy.orm.interfaces.PropComparator.of_type` accepts a single class argument. More flexibility can be achieved either by joining to an explicit join as above, or by using the :func:`.orm.with_polymorphic` function to create a polymorphic selectable:: manager_and_engineer = with_polymorphic( Employee, [Manager, Engineer], aliased=True) session.query(Company).\ join(manager_and_engineer, Company.employees).\ filter( or_(manager_and_engineer.Engineer.engineer_info=='someinfo', manager_and_engineer.Manager.manager_data=='somedata') ) Above, we use the ``aliased=True`` argument with :func:`.orm.with_polymorhpic` so that the right hand side of the join between ``Company`` and ``manager_and_engineer`` is converted into an aliased subquery. Some backends, such as SQLite and older versions of MySQL can't handle a FROM clause of the following form:: FROM x JOIN (y JOIN z ON ) ON Using ``aliased=True`` instead renders it more like:: FROM x JOIN (SELECT * FROM y JOIN z ON ) AS anon_1 ON The above join can also be expressed more succinctly by combining ``of_type()`` with the polymorphic construct:: manager_and_engineer = with_polymorphic( Employee, [Manager, Engineer], aliased=True) session.query(Company).\ join(Company.employees.of_type(manager_and_engineer)).\ filter( or_(manager_and_engineer.Engineer.engineer_info=='someinfo', manager_and_engineer.Manager.manager_data=='somedata') ) The ``any()`` and ``has()`` operators also can be used with :func:`~sqlalchemy.orm.interfaces.PropComparator.of_type` when the embedded criterion is in terms of a subclass:: session.query(Company).\ filter( Company.employees.of_type(Engineer). any(Engineer.engineer_info=='someinfo') ).all() Note that the ``any()`` and ``has()`` are both shorthand for a correlated EXISTS query. To build one by hand looks like:: session.query(Company).filter( exists([1], and_(Engineer.engineer_info=='someinfo', employees.c.company_id==companies.c.company_id), from_obj=employees.join(engineers) ) ).all() The EXISTS subquery above selects from the join of ``employees`` to ``engineers``, and also specifies criterion which correlates the EXISTS subselect back to the parent ``companies`` table. .. versionadded:: 0.8 :func:`~sqlalchemy.orm.interfaces.PropComparator.of_type` accepts :func:`.orm.aliased` and :func:`.orm.with_polymorphic` constructs in conjunction with :meth:`.Query.join`, ``any()`` and ``has()``. Eager Loading of Specific Subtypes ++++++++++++++++++++++++++++++++++ The :func:`.joinedload` and :func:`.subqueryload` options also support paths which make use of :func:`~sqlalchemy.orm.interfaces.PropComparator.of_type`. Below we load ``Company`` rows while eagerly loading related ``Engineer`` objects, querying the ``employee`` and ``engineer`` tables simultaneously:: session.query(Company).\ options(subqueryload_all(Company.employees.of_type(Engineer), Engineer.machines)) .. versionadded:: 0.8 :func:`.joinedload` and :func:`.subqueryload` support paths that are qualified with :func:`~sqlalchemy.orm.interfaces.PropComparator.of_type`. Single Table Inheritance ------------------------ Single table inheritance is where the attributes of the base class as well as all subclasses are represented within a single table. A column is present in the table for every attribute mapped to the base class and all subclasses; the columns which correspond to a single subclass are nullable. This configuration looks much like joined-table inheritance except there's only one table. In this case, a ``type`` column is required, as there would be no other way to discriminate between classes. The table is specified in the base mapper only; for the inheriting classes, leave their ``table`` parameter blank: .. sourcecode:: python+sql class Employee(Base): __tablename__ = 'employee' id = Column(Integer, primary_key=True) name = Column(String(50)) manager_data = Column(String(50)) engineer_info = Column(String(50)) type = Column(String(20)) __mapper_args__ = { 'polymorphic_on':type, 'polymorphic_identity':'employee' } class Manager(Employee): __mapper_args__ = { 'polymorphic_identity':'manager' } class Engineer(Employee): __mapper_args__ = { 'polymorphic_identity':'engineer' } Note that the mappers for the derived classes Manager and Engineer omit the ``__tablename__``, indicating they do not have a mapped table of their own. .. _concrete_inheritance: Concrete Table Inheritance -------------------------- .. note:: this section is currently using classical mappings. The Declarative system fully supports concrete inheritance however. See the links below for more information on using declarative with concrete table inheritance. This form of inheritance maps each class to a distinct table, as below: .. sourcecode:: python+sql employees_table = Table('employees', metadata, Column('employee_id', Integer, primary_key=True), Column('name', String(50)), ) managers_table = Table('managers', metadata, Column('employee_id', Integer, primary_key=True), Column('name', String(50)), Column('manager_data', String(50)), ) engineers_table = Table('engineers', metadata, Column('employee_id', Integer, primary_key=True), Column('name', String(50)), Column('engineer_info', String(50)), ) Notice in this case there is no ``type`` column. If polymorphic loading is not required, there's no advantage to using ``inherits`` here; you just define a separate mapper for each class. .. sourcecode:: python+sql mapper(Employee, employees_table) mapper(Manager, managers_table) mapper(Engineer, engineers_table) To load polymorphically, the ``with_polymorphic`` argument is required, along with a selectable indicating how rows should be loaded. In this case we must construct a UNION of all three tables. SQLAlchemy includes a helper function to create these called :func:`~sqlalchemy.orm.util.polymorphic_union`, which will map all the different columns into a structure of selects with the same numbers and names of columns, and also generate a virtual ``type`` column for each subselect: .. sourcecode:: python+sql pjoin = polymorphic_union({ 'employee': employees_table, 'manager': managers_table, 'engineer': engineers_table }, 'type', 'pjoin') employee_mapper = mapper(Employee, employees_table, with_polymorphic=('*', pjoin), polymorphic_on=pjoin.c.type, polymorphic_identity='employee') manager_mapper = mapper(Manager, managers_table, inherits=employee_mapper, concrete=True, polymorphic_identity='manager') engineer_mapper = mapper(Engineer, engineers_table, inherits=employee_mapper, concrete=True, polymorphic_identity='engineer') Upon select, the polymorphic union produces a query like this: .. sourcecode:: python+sql session.query(Employee).all() {opensql} SELECT pjoin.type AS pjoin_type, pjoin.manager_data AS pjoin_manager_data, pjoin.employee_id AS pjoin_employee_id, pjoin.name AS pjoin_name, pjoin.engineer_info AS pjoin_engineer_info FROM ( SELECT employees.employee_id AS employee_id, CAST(NULL AS VARCHAR(50)) AS manager_data, employees.name AS name, CAST(NULL AS VARCHAR(50)) AS engineer_info, 'employee' AS type FROM employees UNION ALL SELECT managers.employee_id AS employee_id, managers.manager_data AS manager_data, managers.name AS name, CAST(NULL AS VARCHAR(50)) AS engineer_info, 'manager' AS type FROM managers UNION ALL SELECT engineers.employee_id AS employee_id, CAST(NULL AS VARCHAR(50)) AS manager_data, engineers.name AS name, engineers.engineer_info AS engineer_info, 'engineer' AS type FROM engineers ) AS pjoin [] Concrete Inheritance with Declarative ++++++++++++++++++++++++++++++++++++++ .. versionadded:: 0.7.3 The :ref:`declarative_toplevel` module includes helpers for concrete inheritance. See :ref:`declarative_concrete_helpers` for more information. Using Relationships with Inheritance ------------------------------------ Both joined-table and single table inheritance scenarios produce mappings which are usable in :func:`~sqlalchemy.orm.relationship` functions; that is, it's possible to map a parent object to a child object which is polymorphic. Similarly, inheriting mappers can have :func:`~sqlalchemy.orm.relationship` objects of their own at any level, which are inherited to each child class. The only requirement for relationships is that there is a table relationship between parent and child. An example is the following modification to the joined table inheritance example, which sets a bi-directional relationship between ``Employee`` and ``Company``: .. sourcecode:: python+sql employees_table = Table('employees', metadata, Column('employee_id', Integer, primary_key=True), Column('name', String(50)), Column('company_id', Integer, ForeignKey('companies.company_id')) ) companies = Table('companies', metadata, Column('company_id', Integer, primary_key=True), Column('name', String(50))) class Company(object): pass mapper(Company, companies, properties={ 'employees': relationship(Employee, backref='company') }) Relationships with Concrete Inheritance +++++++++++++++++++++++++++++++++++++++ In a concrete inheritance scenario, mapping relationships is more challenging since the distinct classes do not share a table. In this case, you *can* establish a relationship from parent to child if a join condition can be constructed from parent to child, if each child table contains a foreign key to the parent: .. sourcecode:: python+sql companies = Table('companies', metadata, Column('id', Integer, primary_key=True), Column('name', String(50))) employees_table = Table('employees', metadata, Column('employee_id', Integer, primary_key=True), Column('name', String(50)), Column('company_id', Integer, ForeignKey('companies.id')) ) managers_table = Table('managers', metadata, Column('employee_id', Integer, primary_key=True), Column('name', String(50)), Column('manager_data', String(50)), Column('company_id', Integer, ForeignKey('companies.id')) ) engineers_table = Table('engineers', metadata, Column('employee_id', Integer, primary_key=True), Column('name', String(50)), Column('engineer_info', String(50)), Column('company_id', Integer, ForeignKey('companies.id')) ) mapper(Employee, employees_table, with_polymorphic=('*', pjoin), polymorphic_on=pjoin.c.type, polymorphic_identity='employee') mapper(Manager, managers_table, inherits=employee_mapper, concrete=True, polymorphic_identity='manager') mapper(Engineer, engineers_table, inherits=employee_mapper, concrete=True, polymorphic_identity='engineer') mapper(Company, companies, properties={ 'employees': relationship(Employee) }) The big limitation with concrete table inheritance is that :func:`~sqlalchemy.orm.relationship` objects placed on each concrete mapper do **not** propagate to child mappers. If you want to have the same :func:`~sqlalchemy.orm.relationship` objects set up on all concrete mappers, they must be configured manually on each. To configure back references in such a configuration the ``back_populates`` keyword may be used instead of ``backref``, such as below where both ``A(object)`` and ``B(A)`` bidirectionally reference ``C``:: ajoin = polymorphic_union({ 'a':a_table, 'b':b_table }, 'type', 'ajoin') mapper(A, a_table, with_polymorphic=('*', ajoin), polymorphic_on=ajoin.c.type, polymorphic_identity='a', properties={ 'some_c':relationship(C, back_populates='many_a') }) mapper(B, b_table,inherits=A, concrete=True, polymorphic_identity='b', properties={ 'some_c':relationship(C, back_populates='many_a') }) mapper(C, c_table, properties={ 'many_a':relationship(A, collection_class=set, back_populates='some_c'), }) Using Inheritance with Declarative ----------------------------------- Declarative makes inheritance configuration more intuitive. See the docs at :ref:`declarative_inheritance`. SQLAlchemy-0.8.4/doc/_sources/orm/internals.txt0000644000076500000240000000256412251147171022165 0ustar classicstaff00000000000000.. _orm_internal_toplevel: ORM Internals ============= Key ORM constructs, not otherwise covered in other sections, are listed here. .. currentmodule: sqlalchemy.orm .. autoclass:: sqlalchemy.orm.state.AttributeState :members: :inherited-members: .. autoclass:: sqlalchemy.orm.instrumentation.ClassManager :members: :inherited-members: .. autoclass:: sqlalchemy.orm.properties.ColumnProperty :members: :inherited-members: .. autoclass:: sqlalchemy.orm.descriptor_props.CompositeProperty :members: .. autoclass:: sqlalchemy.orm.interfaces._InspectionAttr :members: .. autoclass:: sqlalchemy.orm.state.InstanceState :members: .. autoclass:: sqlalchemy.orm.attributes.InstrumentedAttribute :members: __get__, __set__, __delete__ :undoc-members: .. autoclass:: sqlalchemy.orm.interfaces.MapperProperty :members: .. autodata:: sqlalchemy.orm.interfaces.NOT_EXTENSION .. autoclass:: sqlalchemy.orm.interfaces.PropComparator :members: :inherited-members: .. autoclass:: sqlalchemy.orm.properties.RelationshipProperty :members: :inherited-members: .. autoclass:: sqlalchemy.orm.descriptor_props.SynonymProperty :members: :inherited-members: .. autoclass:: sqlalchemy.orm.query.QueryContext :members: .. autoclass:: sqlalchemy.orm.attributes.QueryableAttribute :members: :inherited-members: SQLAlchemy-0.8.4/doc/_sources/orm/loading.txt0000644000076500000240000005524712251147171021611 0ustar classicstaff00000000000000.. currentmodule:: sqlalchemy.orm Relationship Loading Techniques =============================== A big part of SQLAlchemy is providing a wide range of control over how related objects get loaded when querying. This behavior can be configured at mapper construction time using the ``lazy`` parameter to the :func:`.relationship` function, as well as by using options with the :class:`.Query` object. Using Loader Strategies: Lazy Loading, Eager Loading ---------------------------------------------------- By default, all inter-object relationships are **lazy loading**. The scalar or collection attribute associated with a :func:`~sqlalchemy.orm.relationship` contains a trigger which fires the first time the attribute is accessed. This trigger, in all but one case, issues a SQL call at the point of access in order to load the related object or objects: .. sourcecode:: python+sql {sql}>>> jack.addresses SELECT addresses.id AS addresses_id, addresses.email_address AS addresses_email_address, addresses.user_id AS addresses_user_id FROM addresses WHERE ? = addresses.user_id [5] {stop}[, ] The one case where SQL is not emitted is for a simple many-to-one relationship, when the related object can be identified by its primary key alone and that object is already present in the current :class:`.Session`. This default behavior of "load upon attribute access" is known as "lazy" or "select" loading - the name "select" because a "SELECT" statement is typically emitted when the attribute is first accessed. In the :ref:`ormtutorial_toplevel`, we introduced the concept of **Eager Loading**. We used an ``option`` in conjunction with the :class:`~sqlalchemy.orm.query.Query` object in order to indicate that a relationship should be loaded at the same time as the parent, within a single SQL query. This option, known as :func:`.joinedload`, connects a JOIN (by default a LEFT OUTER join) to the statement and populates the scalar/collection from the same result set as that of the parent: .. sourcecode:: python+sql {sql}>>> jack = session.query(User).\ ... options(joinedload('addresses')).\ ... filter_by(name='jack').all() #doctest: +NORMALIZE_WHITESPACE SELECT addresses_1.id AS addresses_1_id, addresses_1.email_address AS addresses_1_email_address, addresses_1.user_id AS addresses_1_user_id, users.id AS users_id, users.name AS users_name, users.fullname AS users_fullname, users.password AS users_password FROM users LEFT OUTER JOIN addresses AS addresses_1 ON users.id = addresses_1.user_id WHERE users.name = ? ['jack'] In addition to "joined eager loading", a second option for eager loading exists, called "subquery eager loading". This kind of eager loading emits an additional SQL statement for each collection requested, aggregated across all parent objects: .. sourcecode:: python+sql {sql}>>> jack = session.query(User).\ ... options(subqueryload('addresses')).\ ... filter_by(name='jack').all() SELECT users.id AS users_id, users.name AS users_name, users.fullname AS users_fullname, users.password AS users_password FROM users WHERE users.name = ? ('jack',) SELECT addresses.id AS addresses_id, addresses.email_address AS addresses_email_address, addresses.user_id AS addresses_user_id, anon_1.users_id AS anon_1_users_id FROM (SELECT users.id AS users_id FROM users WHERE users.name = ?) AS anon_1 JOIN addresses ON anon_1.users_id = addresses.user_id ORDER BY anon_1.users_id, addresses.id ('jack',) The default **loader strategy** for any :func:`~sqlalchemy.orm.relationship` is configured by the ``lazy`` keyword argument, which defaults to ``select`` - this indicates a "select" statement . Below we set it as ``joined`` so that the ``children`` relationship is eager loading, using a join: .. sourcecode:: python+sql # load the 'children' collection using LEFT OUTER JOIN mapper(Parent, parent_table, properties={ 'children': relationship(Child, lazy='joined') }) We can also set it to eagerly load using a second query for all collections, using ``subquery``: .. sourcecode:: python+sql # load the 'children' attribute using a join to a subquery mapper(Parent, parent_table, properties={ 'children': relationship(Child, lazy='subquery') }) When querying, all three choices of loader strategy are available on a per-query basis, using the :func:`~sqlalchemy.orm.joinedload`, :func:`~sqlalchemy.orm.subqueryload` and :func:`~sqlalchemy.orm.lazyload` query options: .. sourcecode:: python+sql # set children to load lazily session.query(Parent).options(lazyload('children')).all() # set children to load eagerly with a join session.query(Parent).options(joinedload('children')).all() # set children to load eagerly with a second statement session.query(Parent).options(subqueryload('children')).all() To reference a relationship that is deeper than one level, separate the names by periods: .. sourcecode:: python+sql session.query(Parent).options(joinedload('foo.bar.bat')).all() When using dot-separated names with :func:`~sqlalchemy.orm.joinedload` or :func:`~sqlalchemy.orm.subqueryload`, the option applies **only** to the actual attribute named, and **not** its ancestors. For example, suppose a mapping from ``A`` to ``B`` to ``C``, where the relationships, named ``atob`` and ``btoc``, are both lazy-loading. A statement like the following: .. sourcecode:: python+sql session.query(A).options(joinedload('atob.btoc')).all() will load only ``A`` objects to start. When the ``atob`` attribute on each ``A`` is accessed, the returned ``B`` objects will *eagerly* load their ``C`` objects. Therefore, to modify the eager load to load both ``atob`` as well as ``btoc``, place joinedloads for both: .. sourcecode:: python+sql session.query(A).options(joinedload('atob'), joinedload('atob.btoc')).all() or more succinctly just use :func:`~sqlalchemy.orm.joinedload_all` or :func:`~sqlalchemy.orm.subqueryload_all`: .. sourcecode:: python+sql session.query(A).options(joinedload_all('atob.btoc')).all() There are two other loader strategies available, **dynamic loading** and **no loading**; these are described in :ref:`largecollections`. Default Loading Strategies -------------------------- .. versionadded:: 0.7.5 Default loader strategies as a new feature. Each of :func:`.joinedload`, :func:`.subqueryload`, :func:`.lazyload`, and :func:`.noload` can be used to set the default style of :func:`.relationship` loading for a particular query, affecting all :func:`.relationship` -mapped attributes not otherwise specified in the :class:`.Query`. This feature is available by passing the string ``'*'`` as the argument to any of these options:: session.query(MyClass).options(lazyload('*')) Above, the ``lazyload('*')`` option will supercede the ``lazy`` setting of all :func:`.relationship` constructs in use for that query, except for those which use the ``'dynamic'`` style of loading. If some relationships specify ``lazy='joined'`` or ``lazy='subquery'``, for example, using ``lazyload('*')`` will unilaterally cause all those relationships to use ``'select'`` loading, e.g. emit a SELECT statement when each attribute is accessed. The option does not supercede loader options stated in the query, such as :func:`.eagerload`, :func:`.subqueryload`, etc. The query below will still use joined loading for the ``widget`` relationship:: session.query(MyClass).options( lazyload('*'), joinedload(MyClass.widget) ) If multiple ``'*'`` options are passed, the last one overrides those previously passed. .. _zen_of_eager_loading: The Zen of Eager Loading ------------------------- The philosophy behind loader strategies is that any set of loading schemes can be applied to a particular query, and *the results don't change* - only the number of SQL statements required to fully load related objects and collections changes. A particular query might start out using all lazy loads. After using it in context, it might be revealed that particular attributes or collections are always accessed, and that it would be more efficient to change the loader strategy for these. The strategy can be changed with no other modifications to the query, the results will remain identical, but fewer SQL statements would be emitted. In theory (and pretty much in practice), nothing you can do to the :class:`.Query` would make it load a different set of primary or related objects based on a change in loader strategy. How :func:`joinedload` in particular achieves this result of not impacting entity rows returned in any way is that it creates an anonymous alias of the joins it adds to your query, so that they can't be referenced by other parts of the query. For example, the query below uses :func:`.joinedload` to create a LEFT OUTER JOIN from ``users`` to ``addresses``, however the ``ORDER BY`` added against ``Address.email_address`` is not valid - the ``Address`` entity is not named in the query: .. sourcecode:: python+sql >>> jack = session.query(User).\ ... options(joinedload(User.addresses)).\ ... filter(User.name=='jack').\ ... order_by(Address.email_address).all() {opensql}SELECT addresses_1.id AS addresses_1_id, addresses_1.email_address AS addresses_1_email_address, addresses_1.user_id AS addresses_1_user_id, users.id AS users_id, users.name AS users_name, users.fullname AS users_fullname, users.password AS users_password FROM users LEFT OUTER JOIN addresses AS addresses_1 ON users.id = addresses_1.user_id WHERE users.name = ? ORDER BY addresses.email_address <-- this part is wrong ! ['jack'] Above, ``ORDER BY addresses.email_address`` is not valid since ``addresses`` is not in the FROM list. The correct way to load the ``User`` records and order by email address is to use :meth:`.Query.join`: .. sourcecode:: python+sql >>> jack = session.query(User).\ ... join(User.addresses).\ ... filter(User.name=='jack').\ ... order_by(Address.email_address).all() {opensql} SELECT users.id AS users_id, users.name AS users_name, users.fullname AS users_fullname, users.password AS users_password FROM users JOIN addresses ON users.id = addresses.user_id WHERE users.name = ? ORDER BY addresses.email_address ['jack'] The statement above is of course not the same as the previous one, in that the columns from ``addresses`` are not included in the result at all. We can add :func:`.joinedload` back in, so that there are two joins - one is that which we are ordering on, the other is used anonymously to load the contents of the ``User.addresses`` collection: .. sourcecode:: python+sql >>> jack = session.query(User).\ ... join(User.addresses).\ ... options(joinedload(User.addresses)).\ ... filter(User.name=='jack').\ ... order_by(Address.email_address).all() {opensql}SELECT addresses_1.id AS addresses_1_id, addresses_1.email_address AS addresses_1_email_address, addresses_1.user_id AS addresses_1_user_id, users.id AS users_id, users.name AS users_name, users.fullname AS users_fullname, users.password AS users_password FROM users JOIN addresses ON users.id = addresses.user_id LEFT OUTER JOIN addresses AS addresses_1 ON users.id = addresses_1.user_id WHERE users.name = ? ORDER BY addresses.email_address ['jack'] What we see above is that our usage of :meth:`.Query.join` is to supply JOIN clauses we'd like to use in subsequent query criterion, whereas our usage of :func:`.joinedload` only concerns itself with the loading of the ``User.addresses`` collection, for each ``User`` in the result. In this case, the two joins most probably appear redundant - which they are. If we wanted to use just one JOIN for collection loading as well as ordering, we use the :func:`.contains_eager` option, described in :ref:`contains_eager` below. But to see why :func:`joinedload` does what it does, consider if we were **filtering** on a particular ``Address``: .. sourcecode:: python+sql >>> jack = session.query(User).\ ... join(User.addresses).\ ... options(joinedload(User.addresses)).\ ... filter(User.name=='jack').\ ... filter(Address.email_address=='someaddress@foo.com').\ ... all() {opensql}SELECT addresses_1.id AS addresses_1_id, addresses_1.email_address AS addresses_1_email_address, addresses_1.user_id AS addresses_1_user_id, users.id AS users_id, users.name AS users_name, users.fullname AS users_fullname, users.password AS users_password FROM users JOIN addresses ON users.id = addresses.user_id LEFT OUTER JOIN addresses AS addresses_1 ON users.id = addresses_1.user_id WHERE users.name = ? AND addresses.email_address = ? ['jack', 'someaddress@foo.com'] Above, we can see that the two JOINs have very different roles. One will match exactly one row, that of the join of ``User`` and ``Address`` where ``Address.email_address=='someaddress@foo.com'``. The other LEFT OUTER JOIN will match *all* ``Address`` rows related to ``User``, and is only used to populate the ``User.addresses`` collection, for those ``User`` objects that are returned. By changing the usage of :func:`.joinedload` to another style of loading, we can change how the collection is loaded completely independently of SQL used to retrieve the actual ``User`` rows we want. Below we change :func:`.joinedload` into :func:`.subqueryload`: .. sourcecode:: python+sql >>> jack = session.query(User).\ ... join(User.addresses).\ ... options(subqueryload(User.addresses)).\ ... filter(User.name=='jack').\ ... filter(Address.email_address=='someaddress@foo.com').\ ... all() {opensql}SELECT users.id AS users_id, users.name AS users_name, users.fullname AS users_fullname, users.password AS users_password FROM users JOIN addresses ON users.id = addresses.user_id WHERE users.name = ? AND addresses.email_address = ? ['jack', 'someaddress@foo.com'] # ... subqueryload() emits a SELECT in order # to load all address records ... When using joined eager loading, if the query contains a modifier that impacts the rows returned externally to the joins, such as when using DISTINCT, LIMIT, OFFSET or equivalent, the completed statement is first wrapped inside a subquery, and the joins used specifically for joined eager loading are applied to the subquery. SQLAlchemy's joined eager loading goes the extra mile, and then ten miles further, to absolutely ensure that it does not affect the end result of the query, only the way collections and related objects are loaded, no matter what the format of the query is. What Kind of Loading to Use ? ----------------------------- Which type of loading to use typically comes down to optimizing the tradeoff between number of SQL executions, complexity of SQL emitted, and amount of data fetched. Lets take two examples, a :func:`~sqlalchemy.orm.relationship` which references a collection, and a :func:`~sqlalchemy.orm.relationship` that references a scalar many-to-one reference. * One to Many Collection * When using the default lazy loading, if you load 100 objects, and then access a collection on each of them, a total of 101 SQL statements will be emitted, although each statement will typically be a simple SELECT without any joins. * When using joined loading, the load of 100 objects and their collections will emit only one SQL statement. However, the total number of rows fetched will be equal to the sum of the size of all the collections, plus one extra row for each parent object that has an empty collection. Each row will also contain the full set of columns represented by the parents, repeated for each collection item - SQLAlchemy does not re-fetch these columns other than those of the primary key, however most DBAPIs (with some exceptions) will transmit the full data of each parent over the wire to the client connection in any case. Therefore joined eager loading only makes sense when the size of the collections are relatively small. The LEFT OUTER JOIN can also be performance intensive compared to an INNER join. * When using subquery loading, the load of 100 objects will emit two SQL statements. The second statement will fetch a total number of rows equal to the sum of the size of all collections. An INNER JOIN is used, and a minimum of parent columns are requested, only the primary keys. So a subquery load makes sense when the collections are larger. * When multiple levels of depth are used with joined or subquery loading, loading collections-within- collections will multiply the total number of rows fetched in a cartesian fashion. Both forms of eager loading always join from the original parent class. * Many to One Reference * When using the default lazy loading, a load of 100 objects will like in the case of the collection emit as many as 101 SQL statements. However - there is a significant exception to this, in that if the many-to-one reference is a simple foreign key reference to the target's primary key, each reference will be checked first in the current identity map using :meth:`.Query.get`. So here, if the collection of objects references a relatively small set of target objects, or the full set of possible target objects have already been loaded into the session and are strongly referenced, using the default of `lazy='select'` is by far the most efficient way to go. * When using joined loading, the load of 100 objects will emit only one SQL statement. The join will be a LEFT OUTER JOIN, and the total number of rows will be equal to 100 in all cases. If you know that each parent definitely has a child (i.e. the foreign key reference is NOT NULL), the joined load can be configured with ``innerjoin=True``, which is usually specified within the :func:`~sqlalchemy.orm.relationship`. For a load of objects where there are many possible target references which may have not been loaded already, joined loading with an INNER JOIN is extremely efficient. * Subquery loading will issue a second load for all the child objects, so for a load of 100 objects there would be two SQL statements emitted. There's probably not much advantage here over joined loading, however, except perhaps that subquery loading can use an INNER JOIN in all cases whereas joined loading requires that the foreign key is NOT NULL. .. _joinedload_and_join: .. _contains_eager: Routing Explicit Joins/Statements into Eagerly Loaded Collections ------------------------------------------------------------------ The behavior of :func:`~sqlalchemy.orm.joinedload()` is such that joins are created automatically, using anonymous aliases as targets, the results of which are routed into collections and scalar references on loaded objects. It is often the case that a query already includes the necessary joins which represent a particular collection or scalar reference, and the joins added by the joinedload feature are redundant - yet you'd still like the collections/references to be populated. For this SQLAlchemy supplies the :func:`~sqlalchemy.orm.contains_eager()` option. This option is used in the same manner as the :func:`~sqlalchemy.orm.joinedload()` option except it is assumed that the :class:`~sqlalchemy.orm.query.Query` will specify the appropriate joins explicitly. Below it's used with a ``from_statement`` load:: # mapping is the users->addresses mapping mapper(User, users_table, properties={ 'addresses': relationship(Address, addresses_table) }) # define a query on USERS with an outer join to ADDRESSES statement = users_table.outerjoin(addresses_table).select().apply_labels() # construct a Query object which expects the "addresses" results query = session.query(User).options(contains_eager('addresses')) # get results normally r = query.from_statement(statement) It works just as well with an inline :meth:`.Query.join` or :meth:`.Query.outerjoin`:: session.query(User).outerjoin(User.addresses).options(contains_eager(User.addresses)).all() If the "eager" portion of the statement is "aliased", the ``alias`` keyword argument to :func:`~sqlalchemy.orm.contains_eager` may be used to indicate it. This is a string alias name or reference to an actual :class:`~sqlalchemy.sql.expression.Alias` (or other selectable) object: .. sourcecode:: python+sql # use an alias of the Address entity adalias = aliased(Address) # construct a Query object which expects the "addresses" results query = session.query(User).\ outerjoin(adalias, User.addresses).\ options(contains_eager(User.addresses, alias=adalias)) # get results normally {sql}r = query.all() SELECT users.user_id AS users_user_id, users.user_name AS users_user_name, adalias.address_id AS adalias_address_id, adalias.user_id AS adalias_user_id, adalias.email_address AS adalias_email_address, (...other columns...) FROM users LEFT OUTER JOIN email_addresses AS email_addresses_1 ON users.user_id = email_addresses_1.user_id The ``alias`` argument is used only as a source of columns to match up to the result set. You can use it to match up the result to arbitrary label names in a string SQL statement, by passing a :func:`.select` which links those labels to the mapped :class:`.Table`:: # label the columns of the addresses table eager_columns = select([ addresses.c.address_id.label('a1'), addresses.c.email_address.label('a2'), addresses.c.user_id.label('a3')]) # select from a raw SQL statement which uses those label names for the # addresses table. contains_eager() matches them up. query = session.query(User).\ from_statement("select users.*, addresses.address_id as a1, " "addresses.email_address as a2, addresses.user_id as a3 " "from users left outer join addresses on users.user_id=addresses.user_id").\ options(contains_eager(User.addresses, alias=eager_columns)) The path given as the argument to :func:`.contains_eager` needs to be a full path from the starting entity. For example if we were loading ``Users->orders->Order->items->Item``, the string version would look like:: query(User).options(contains_eager('orders', 'items')) Or using the class-bound descriptor:: query(User).options(contains_eager(User.orders, Order.items)) Relation Loader API -------------------- .. autofunction:: contains_alias .. autofunction:: contains_eager .. autofunction:: eagerload .. autofunction:: eagerload_all .. autofunction:: immediateload .. autofunction:: joinedload .. autofunction:: joinedload_all .. autofunction:: lazyload .. autofunction:: noload .. autofunction:: subqueryload .. autofunction:: subqueryload_all SQLAlchemy-0.8.4/doc/_sources/orm/mapper_config.txt0000644000076500000240000014267012251147171023002 0ustar classicstaff00000000000000.. module:: sqlalchemy.orm .. _mapper_config_toplevel: ==================== Mapper Configuration ==================== This section describes a variety of configurational patterns that are usable with mappers. It assumes you've worked through :ref:`ormtutorial_toplevel` and know how to construct and use rudimentary mappers and relationships. .. _classical_mapping: Classical Mappings ================== A *Classical Mapping* refers to the configuration of a mapped class using the :func:`.mapper` function, without using the Declarative system. As an example, start with the declarative mapping introduced in :ref:`ormtutorial_toplevel`:: class User(Base): __tablename__ = 'users' id = Column(Integer, primary_key=True) name = Column(String) fullname = Column(String) password = Column(String) In "classical" form, the table metadata is created separately with the :class:`.Table` construct, then associated with the ``User`` class via the :func:`.mapper` function:: from sqlalchemy import Table, MetaData, Column, ForeignKey, Integer, String from sqlalchemy.orm import mapper metadata = MetaData() user = Table('user', metadata, Column('id', Integer, primary_key=True), Column('name', String(50)), Column('fullname', String(50)), Column('password', String(12)) ) class User(object): def __init__(self, name, fullname, password): self.name = name self.fullname = fullname self.password = password mapper(User, user) Information about mapped attributes, such as relationships to other classes, are provided via the ``properties`` dictionary. The example below illustrates a second :class:`.Table` object, mapped to a class called ``Address``, then linked to ``User`` via :func:`.relationship`:: address = Table('address', metadata, Column('id', Integer, primary_key=True), Column('user_id', Integer, ForeignKey('user.id')), Column('email_address', String(50)) ) mapper(User, user, properties={ 'addresses' : relationship(Address, backref='user', order_by=address.c.id) }) mapper(Address, address) When using classical mappings, classes must be provided directly without the benefit of the "string lookup" system provided by Declarative. SQL expressions are typically specified in terms of the :class:`.Table` objects, i.e. ``address.c.id`` above for the ``Address`` relationship, and not ``Address.id``, as ``Address`` may not yet be linked to table metadata, nor can we specify a string here. Some examples in the documentation still use the classical approach, but note that the classical as well as Declarative approaches are **fully interchangeable**. Both systems ultimately create the same configuration, consisting of a :class:`.Table`, user-defined class, linked together with a :func:`.mapper`. When we talk about "the behavior of :func:`.mapper`", this includes when using the Declarative system as well - it's still used, just behind the scenes. Customizing Column Properties ============================== The default behavior of :func:`~.orm.mapper` is to assemble all the columns in the mapped :class:`.Table` into mapped object attributes, each of which are named according to the name of the column itself (specifically, the ``key`` attribute of :class:`.Column`). This behavior can be modified in several ways. .. _mapper_column_distinct_names: Naming Columns Distinctly from Attribute Names ---------------------------------------------- A mapping by default shares the same name for a :class:`.Column` as that of the mapped attribute - specifically it matches the :attr:`.Column.key` attribute on :class:`.Column`, which by default is the same as the :attr:`.Column.name`. The name assigned to the Python attribute which maps to :class:`.Column` can be different from either :attr:`.Column.name` or :attr:`.Column.key` just by assigning it that way, as we illustrate here in a Declarative mapping:: class User(Base): __tablename__ = 'user' id = Column('user_id', Integer, primary_key=True) name = Column('user_name', String(50)) Where above ``User.id`` resolves to a column named ``user_id`` and ``User.name`` resolves to a column named ``user_name``. When mapping to an existing table, the :class:`.Column` object can be referenced directly:: class User(Base): __table__ = user_table id = user_table.c.user_id name = user_table.c.user_name Or in a classical mapping, placed in the ``properties`` dictionary with the desired key:: mapper(User, user_table, properties={ 'id': user_table.c.user_id, 'name': user_table.c.user_name, }) In the next section we'll examine the usage of ``.key`` more closely. .. _mapper_automated_reflection_schemes: Automating Column Naming Schemes from Reflected Tables ------------------------------------------------------ In the previous section :ref:`mapper_column_distinct_names`, we showed how a :class:`.Column` explicitly mapped to a class can have a different attribute name than the column. But what if we aren't listing out :class:`.Column` objects explicitly, and instead are automating the production of :class:`.Table` objects using reflection (e.g. as described in :ref:`metadata_reflection_toplevel`)? In this case we can make use of the :meth:`.DDLEvents.column_reflect` event to intercept the production of :class:`.Column` objects and provide them with the :attr:`.Column.key` of our choice:: @event.listens_for(Table, "column_reflect") def column_reflect(inspector, table, column_info): # set column.key = "attr_" column_info['key'] = "attr_%s" % column_info['name'].lower() With the above event, the reflection of :class:`.Column` objects will be intercepted with our event that adds a new ".key" element, such as in a mapping as below:: class MyClass(Base): __table__ = Table("some_table", Base.metadata, autoload=True, autoload_with=some_engine) If we want to qualify our event to only react for the specific :class:`.MetaData` object above, we can check for it in our event:: @event.listens_for(Table, "column_reflect") def column_reflect(inspector, table, column_info): if table.metadata is Base.metadata: # set column.key = "attr_" column_info['key'] = "attr_%s" % column_info['name'].lower() .. _column_prefix: Naming All Columns with a Prefix -------------------------------- A quick approach to prefix column names, typically when mapping to an existing :class:`.Table` object, is to use ``column_prefix``:: class User(Base): __table__ = user_table __mapper_args__ = {'column_prefix':'_'} The above will place attribute names such as ``_user_id``, ``_user_name``, ``_password`` etc. on the mapped ``User`` class. This approach is uncommon in modern usage. For dealing with reflected tables, a more flexible approach is to use that described in :ref:`mapper_automated_reflection_schemes`. Using column_property for column level options ----------------------------------------------- Options can be specified when mapping a :class:`.Column` using the :func:`.column_property` function. This function explicitly creates the :class:`.ColumnProperty` used by the :func:`.mapper` to keep track of the :class:`.Column`; normally, the :func:`.mapper` creates this automatically. Using :func:`.column_property`, we can pass additional arguments about how we'd like the :class:`.Column` to be mapped. Below, we pass an option ``active_history``, which specifies that a change to this column's value should result in the former value being loaded first:: from sqlalchemy.orm import column_property class User(Base): __tablename__ = 'user' id = Column(Integer, primary_key=True) name = column_property(Column(String(50)), active_history=True) :func:`.column_property` is also used to map a single attribute to multiple columns. This use case arises when mapping to a :func:`~.expression.join` which has attributes which are equated to each other:: class User(Base): __table__ = user.join(address) # assign "user.id", "address.user_id" to the # "id" attribute id = column_property(user_table.c.id, address_table.c.user_id) For more examples featuring this usage, see :ref:`maptojoin`. Another place where :func:`.column_property` is needed is to specify SQL expressions as mapped attributes, such as below where we create an attribute ``fullname`` that is the string concatenation of the ``firstname`` and ``lastname`` columns:: class User(Base): __tablename__ = 'user' id = Column(Integer, primary_key=True) firstname = Column(String(50)) lastname = Column(String(50)) fullname = column_property(firstname + " " + lastname) See examples of this usage at :ref:`mapper_sql_expressions`. .. autofunction:: column_property .. _include_exclude_cols: Mapping a Subset of Table Columns --------------------------------- Sometimes, a :class:`.Table` object was made available using the reflection process described at :ref:`metadata_reflection` to load the table's structure from the database. For such a table that has lots of columns that don't need to be referenced in the application, the ``include_properties`` or ``exclude_properties`` arguments can specify that only a subset of columns should be mapped. For example:: class User(Base): __table__ = user_table __mapper_args__ = { 'include_properties' :['user_id', 'user_name'] } ...will map the ``User`` class to the ``user_table`` table, only including the ``user_id`` and ``user_name`` columns - the rest are not referenced. Similarly:: class Address(Base): __table__ = address_table __mapper_args__ = { 'exclude_properties' : ['street', 'city', 'state', 'zip'] } ...will map the ``Address`` class to the ``address_table`` table, including all columns present except ``street``, ``city``, ``state``, and ``zip``. When this mapping is used, the columns that are not included will not be referenced in any SELECT statements emitted by :class:`.Query`, nor will there be any mapped attribute on the mapped class which represents the column; assigning an attribute of that name will have no effect beyond that of a normal Python attribute assignment. In some cases, multiple columns may have the same name, such as when mapping to a join of two or more tables that share some column name. ``include_properties`` and ``exclude_properties`` can also accommodate :class:`.Column` objects to more accurately describe which columns should be included or excluded:: class UserAddress(Base): __table__ = user_table.join(addresses_table) __mapper_args__ = { 'exclude_properties' :[address_table.c.id], 'primary_key' : [user_table.c.id] } .. note:: insert and update defaults configured on individual :class:`.Column` objects, i.e. those described at :ref:`metadata_defaults` including those configured by the ``default``, ``update``, ``server_default`` and ``server_onupdate`` arguments, will continue to function normally even if those :class:`.Column` objects are not mapped. This is because in the case of ``default`` and ``update``, the :class:`.Column` object is still present on the underlying :class:`.Table`, thus allowing the default functions to take place when the ORM emits an INSERT or UPDATE, and in the case of ``server_default`` and ``server_onupdate``, the relational database itself maintains these functions. .. _deferred: Deferred Column Loading ======================== This feature allows particular columns of a table be loaded only upon direct access, instead of when the entity is queried using :class:`.Query`. This feature is useful when one wants to avoid loading a large text or binary field into memory when it's not needed. Individual columns can be lazy loaded by themselves or placed into groups that lazy-load together, using the :func:`.orm.deferred` function to mark them as "deferred". In the example below, we define a mapping that will load each of ``.excerpt`` and ``.photo`` in separate, individual-row SELECT statements when each attribute is first referenced on the individual object instance:: from sqlalchemy.orm import deferred from sqlalchemy import Integer, String, Text, Binary, Column class Book(Base): __tablename__ = 'book' book_id = Column(Integer, primary_key=True) title = Column(String(200), nullable=False) summary = Column(String(2000)) excerpt = deferred(Column(Text)) photo = deferred(Column(Binary)) Classical mappings as always place the usage of :func:`.orm.deferred` in the ``properties`` dictionary against the table-bound :class:`.Column`:: mapper(Book, book_table, properties={ 'photo':deferred(book_table.c.photo) }) Deferred columns can be associated with a "group" name, so that they load together when any of them are first accessed. The example below defines a mapping with a ``photos`` deferred group. When one ``.photo`` is accessed, all three photos will be loaded in one SELECT statement. The ``.excerpt`` will be loaded separately when it is accessed:: class Book(Base): __tablename__ = 'book' book_id = Column(Integer, primary_key=True) title = Column(String(200), nullable=False) summary = Column(String(2000)) excerpt = deferred(Column(Text)) photo1 = deferred(Column(Binary), group='photos') photo2 = deferred(Column(Binary), group='photos') photo3 = deferred(Column(Binary), group='photos') You can defer or undefer columns at the :class:`~sqlalchemy.orm.query.Query` level using the :func:`.orm.defer` and :func:`.orm.undefer` query options:: from sqlalchemy.orm import defer, undefer query = session.query(Book) query.options(defer('summary')).all() query.options(undefer('excerpt')).all() And an entire "deferred group", i.e. which uses the ``group`` keyword argument to :func:`.orm.deferred`, can be undeferred using :func:`.orm.undefer_group`, sending in the group name:: from sqlalchemy.orm import undefer_group query = session.query(Book) query.options(undefer_group('photos')).all() Column Deferral API ------------------- .. autofunction:: deferred .. autofunction:: defer .. autofunction:: undefer .. autofunction:: undefer_group .. _mapper_sql_expressions: SQL Expressions as Mapped Attributes ===================================== Attributes on a mapped class can be linked to SQL expressions, which can be used in queries. Using a Hybrid -------------- The easiest and most flexible way to link relatively simple SQL expressions to a class is to use a so-called "hybrid attribute", described in the section :ref:`hybrids_toplevel`. The hybrid provides for an expression that works at both the Python level as well as at the SQL expression level. For example, below we map a class ``User``, containing attributes ``firstname`` and ``lastname``, and include a hybrid that will provide for us the ``fullname``, which is the string concatenation of the two:: from sqlalchemy.ext.hybrid import hybrid_property class User(Base): __tablename__ = 'user' id = Column(Integer, primary_key=True) firstname = Column(String(50)) lastname = Column(String(50)) @hybrid_property def fullname(self): return self.firstname + " " + self.lastname Above, the ``fullname`` attribute is interpreted at both the instance and class level, so that it is available from an instance:: some_user = session.query(User).first() print some_user.fullname as well as usable wtihin queries:: some_user = session.query(User).filter(User.fullname == "John Smith").first() The string concatenation example is a simple one, where the Python expression can be dual purposed at the instance and class level. Often, the SQL expression must be distinguished from the Python expression, which can be achieved using :meth:`.hybrid_property.expression`. Below we illustrate the case where a conditional needs to be present inside the hybrid, using the ``if`` statement in Python and the :func:`.sql.expression.case` construct for SQL expressions:: from sqlalchemy.ext.hybrid import hybrid_property from sqlalchemy.sql import case class User(Base): __tablename__ = 'user' id = Column(Integer, primary_key=True) firstname = Column(String(50)) lastname = Column(String(50)) @hybrid_property def fullname(self): if self.firstname is not None: return self.firstname + " " + self.lastname else: return self.lastname @fullname.expression def fullname(cls): return case([ (cls.firstname != None, cls.firstname + " " + cls.lastname), ], else_ = cls.lastname) .. _mapper_column_property_sql_expressions: Using column_property --------------------- The :func:`.orm.column_property` function can be used to map a SQL expression in a manner similar to a regularly mapped :class:`.Column`. With this technique, the attribute is loaded along with all other column-mapped attributes at load time. This is in some cases an advantage over the usage of hybrids, as the value can be loaded up front at the same time as the parent row of the object, particularly if the expression is one which links to other tables (typically as a correlated subquery) to access data that wouldn't normally be available on an already loaded object. Disadvantages to using :func:`.orm.column_property` for SQL expressions include that the expression must be compatible with the SELECT statement emitted for the class as a whole, and there are also some configurational quirks which can occur when using :func:`.orm.column_property` from declarative mixins. Our "fullname" example can be expressed using :func:`.orm.column_property` as follows:: from sqlalchemy.orm import column_property class User(Base): __tablename__ = 'user' id = Column(Integer, primary_key=True) firstname = Column(String(50)) lastname = Column(String(50)) fullname = column_property(firstname + " " + lastname) Correlated subqueries may be used as well. Below we use the :func:`.select` construct to create a SELECT that links together the count of ``Address`` objects available for a particular ``User``:: from sqlalchemy.orm import column_property from sqlalchemy import select, func from sqlalchemy import Column, Integer, String, ForeignKey from sqlalchemy.ext.declarative import declarative_base Base = declarative_base() class Address(Base): __tablename__ = 'address' id = Column(Integer, primary_key=True) user_id = Column(Integer, ForeignKey('user.id')) class User(Base): __tablename__ = 'user' id = Column(Integer, primary_key=True) address_count = column_property( select([func.count(Address.id)]).\ where(Address.user_id==id).\ correlate_except(Address) ) In the above example, we define a :func:`.select` construct like the following:: select([func.count(Address.id)]).\ where(Address.user_id==id).\ correlate_except(Address) The meaning of the above statement is, select the count of ``Address.id`` rows where the ``Address.user_id`` column is equated to ``id``, which in the context of the ``User`` class is the :class:`.Column` named ``id`` (note that ``id`` is also the name of a Python built in function, which is not what we want to use here - if we were outside of the ``User`` class definition, we'd use ``User.id``). The :meth:`.select.correlate_except` directive indicates that each element in the FROM clause of this :func:`.select` may be omitted from the FROM list (that is, correlated to the enclosing SELECT statement against ``User``) except for the one corresponding to ``Address``. This isn't strictly necessary, but prevents ``Address`` from being inadvertently omitted from the FROM list in the case of a long string of joins between ``User`` and ``Address`` tables where SELECT statements against ``Address`` are nested. If import issues prevent the :func:`.column_property` from being defined inline with the class, it can be assigned to the class after both are configured. In Declarative this has the effect of calling :meth:`.Mapper.add_property` to add an additional property after the fact:: User.address_count = column_property( select([func.count(Address.id)]).\ where(Address.user_id==User.id) ) For many-to-many relationships, use :func:`.and_` to join the fields of the association table to both tables in a relation, illustrated here with a classical mapping:: from sqlalchemy import and_ mapper(Author, authors, properties={ 'book_count': column_property( select([func.count(books.c.id)], and_( book_authors.c.author_id==authors.c.id, book_authors.c.book_id==books.c.id ))) }) Using a plain descriptor ------------------------- In cases where a SQL query more elaborate than what :func:`.orm.column_property` or :class:`.hybrid_property` can provide must be emitted, a regular Python function accessed as an attribute can be used, assuming the expression only needs to be available on an already-loaded instance. The function is decorated with Python's own ``@property`` decorator to mark it as a read-only attribute. Within the function, :func:`.object_session` is used to locate the :class:`.Session` corresponding to the current object, which is then used to emit a query:: from sqlalchemy.orm import object_session from sqlalchemy import select, func class User(Base): __tablename__ = 'user' id = Column(Integer, primary_key=True) firstname = Column(String(50)) lastname = Column(String(50)) @property def address_count(self): return object_session(self).\ scalar( select([func.count(Address.id)]).\ where(Address.user_id==self.id) ) The plain descriptor approach is useful as a last resort, but is less performant in the usual case than both the hybrid and column property approaches, in that it needs to emit a SQL query upon each access. Changing Attribute Behavior ============================ .. _simple_validators: Simple Validators ----------------- A quick way to add a "validation" routine to an attribute is to use the :func:`~sqlalchemy.orm.validates` decorator. An attribute validator can raise an exception, halting the process of mutating the attribute's value, or can change the given value into something different. Validators, like all attribute extensions, are only called by normal userland code; they are not issued when the ORM is populating the object:: from sqlalchemy.orm import validates class EmailAddress(Base): __tablename__ = 'address' id = Column(Integer, primary_key=True) email = Column(String) @validates('email') def validate_email(self, key, address): assert '@' in address return address Validators also receive collection events, when items are added to a collection:: from sqlalchemy.orm import validates class User(Base): # ... addresses = relationship("Address") @validates('addresses') def validate_address(self, key, address): assert '@' in address.email return address Note that the :func:`~.validates` decorator is a convenience function built on top of attribute events. An application that requires more control over configuration of attribute change behavior can make use of this system, described at :class:`~.AttributeEvents`. .. autofunction:: validates .. _synonyms: Using Descriptors and Hybrids ----------------------------- A more comprehensive way to produce modified behavior for an attribute is to use descriptors. These are commonly used in Python using the ``property()`` function. The standard SQLAlchemy technique for descriptors is to create a plain descriptor, and to have it read/write from a mapped attribute with a different name. Below we illustrate this using Python 2.6-style properties:: class EmailAddress(Base): __tablename__ = 'email_address' id = Column(Integer, primary_key=True) # name the attribute with an underscore, # different from the column name _email = Column("email", String) # then create an ".email" attribute # to get/set "._email" @property def email(self): return self._email @email.setter def email(self, email): self._email = email The approach above will work, but there's more we can add. While our ``EmailAddress`` object will shuttle the value through the ``email`` descriptor and into the ``_email`` mapped attribute, the class level ``EmailAddress.email`` attribute does not have the usual expression semantics usable with :class:`.Query`. To provide these, we instead use the :mod:`~sqlalchemy.ext.hybrid` extension as follows:: from sqlalchemy.ext.hybrid import hybrid_property class EmailAddress(Base): __tablename__ = 'email_address' id = Column(Integer, primary_key=True) _email = Column("email", String) @hybrid_property def email(self): return self._email @email.setter def email(self, email): self._email = email The ``.email`` attribute, in addition to providing getter/setter behavior when we have an instance of ``EmailAddress``, also provides a SQL expression when used at the class level, that is, from the ``EmailAddress`` class directly: .. sourcecode:: python+sql from sqlalchemy.orm import Session session = Session() {sql}address = session.query(EmailAddress).\ filter(EmailAddress.email == 'address@example.com').\ one() SELECT address.email AS address_email, address.id AS address_id FROM address WHERE address.email = ? ('address@example.com',) {stop} address.email = 'otheraddress@example.com' {sql}session.commit() UPDATE address SET email=? WHERE address.id = ? ('otheraddress@example.com', 1) COMMIT {stop} The :class:`~.hybrid_property` also allows us to change the behavior of the attribute, including defining separate behaviors when the attribute is accessed at the instance level versus at the class/expression level, using the :meth:`.hybrid_property.expression` modifier. Such as, if we wanted to add a host name automatically, we might define two sets of string manipulation logic:: class EmailAddress(Base): __tablename__ = 'email_address' id = Column(Integer, primary_key=True) _email = Column("email", String) @hybrid_property def email(self): """Return the value of _email up until the last twelve characters.""" return self._email[:-12] @email.setter def email(self, email): """Set the value of _email, tacking on the twelve character value @example.com.""" self._email = email + "@example.com" @email.expression def email(cls): """Produce a SQL expression that represents the value of the _email column, minus the last twelve characters.""" return func.substr(cls._email, 0, func.length(cls._email) - 12) Above, accessing the ``email`` property of an instance of ``EmailAddress`` will return the value of the ``_email`` attribute, removing or adding the hostname ``@example.com`` from the value. When we query against the ``email`` attribute, a SQL function is rendered which produces the same effect: .. sourcecode:: python+sql {sql}address = session.query(EmailAddress).filter(EmailAddress.email == 'address').one() SELECT address.email AS address_email, address.id AS address_id FROM address WHERE substr(address.email, ?, length(address.email) - ?) = ? (0, 12, 'address') {stop} Read more about Hybrids at :ref:`hybrids_toplevel`. Synonyms -------- Synonyms are a mapper-level construct that applies expression behavior to a descriptor based attribute. .. versionchanged:: 0.7 The functionality of synonym is superceded as of 0.7 by hybrid attributes. .. autofunction:: synonym .. _custom_comparators: Operator Customization ---------------------- The "operators" used by the SQLAlchemy ORM and Core expression language are fully customizable. For example, the comparison expression ``User.name == 'ed'`` makes usage of an operator built into Python itself called ``operator.eq`` - the actual SQL construct which SQLAlchemy associates with such an operator can be modified. New operations can be associated with column expressions as well. The operators which take place for column expressions are most directly redefined at the type level - see the section :ref:`types_operators` for a description. ORM level functions like :func:`.column_property`, :func:`.relationship`, and :func:`.composite` also provide for operator redefinition at the ORM level, by passing a :class:`.PropComparator` subclass to the ``comparator_factory`` argument of each function. Customization of operators at this level is a rare use case. See the documentation at :class:`.PropComparator` for an overview. .. _mapper_composite: Composite Column Types ======================= Sets of columns can be associated with a single user-defined datatype. The ORM provides a single attribute which represents the group of columns using the class you provide. .. versionchanged:: 0.7 Composites have been simplified such that they no longer "conceal" the underlying column based attributes. Additionally, in-place mutation is no longer automatic; see the section below on enabling mutability to support tracking of in-place changes. A simple example represents pairs of columns as a ``Point`` object. ``Point`` represents such a pair as ``.x`` and ``.y``:: class Point(object): def __init__(self, x, y): self.x = x self.y = y def __composite_values__(self): return self.x, self.y def __repr__(self): return "Point(x=%r, y=%r)" % (self.x, self.y) def __eq__(self, other): return isinstance(other, Point) and \ other.x == self.x and \ other.y == self.y def __ne__(self, other): return not self.__eq__(other) The requirements for the custom datatype class are that it have a constructor which accepts positional arguments corresponding to its column format, and also provides a method ``__composite_values__()`` which returns the state of the object as a list or tuple, in order of its column-based attributes. It also should supply adequate ``__eq__()`` and ``__ne__()`` methods which test the equality of two instances. We will create a mapping to a table ``vertice``, which represents two points as ``x1/y1`` and ``x2/y2``. These are created normally as :class:`.Column` objects. Then, the :func:`.composite` function is used to assign new attributes that will represent sets of columns via the ``Point`` class:: from sqlalchemy import Column, Integer from sqlalchemy.orm import composite from sqlalchemy.ext.declarative import declarative_base Base = declarative_base() class Vertex(Base): __tablename__ = 'vertice' id = Column(Integer, primary_key=True) x1 = Column(Integer) y1 = Column(Integer) x2 = Column(Integer) y2 = Column(Integer) start = composite(Point, x1, y1) end = composite(Point, x2, y2) A classical mapping above would define each :func:`.composite` against the existing table:: mapper(Vertex, vertice_table, properties={ 'start':composite(Point, vertice_table.c.x1, vertice_table.c.y1), 'end':composite(Point, vertice_table.c.x2, vertice_table.c.y2), }) We can now persist and use ``Vertex`` instances, as well as query for them, using the ``.start`` and ``.end`` attributes against ad-hoc ``Point`` instances: .. sourcecode:: python+sql >>> v = Vertex(start=Point(3, 4), end=Point(5, 6)) >>> session.add(v) >>> q = session.query(Vertex).filter(Vertex.start == Point(3, 4)) {sql}>>> print q.first().start BEGIN (implicit) INSERT INTO vertice (x1, y1, x2, y2) VALUES (?, ?, ?, ?) (3, 4, 5, 6) SELECT vertice.id AS vertice_id, vertice.x1 AS vertice_x1, vertice.y1 AS vertice_y1, vertice.x2 AS vertice_x2, vertice.y2 AS vertice_y2 FROM vertice WHERE vertice.x1 = ? AND vertice.y1 = ? LIMIT ? OFFSET ? (3, 4, 1, 0) {stop}Point(x=3, y=4) .. autofunction:: composite Tracking In-Place Mutations on Composites ----------------------------------------- In-place changes to an existing composite value are not tracked automatically. Instead, the composite class needs to provide events to its parent object explicitly. This task is largely automated via the usage of the :class:`.MutableComposite` mixin, which uses events to associate each user-defined composite object with all parent associations. Please see the example in :ref:`mutable_composites`. .. versionchanged:: 0.7 In-place changes to an existing composite value are no longer tracked automatically; the functionality is superseded by the :class:`.MutableComposite` class. .. _composite_operations: Redefining Comparison Operations for Composites ----------------------------------------------- The "equals" comparison operation by default produces an AND of all corresponding columns equated to one another. This can be changed using the ``comparator_factory`` argument to :func:`.composite`, where we specify a custom :class:`.CompositeProperty.Comparator` class to define existing or new operations. Below we illustrate the "greater than" operator, implementing the same expression that the base "greater than" does:: from sqlalchemy.orm.properties import CompositeProperty from sqlalchemy import sql class PointComparator(CompositeProperty.Comparator): def __gt__(self, other): """redefine the 'greater than' operation""" return sql.and_(*[a>b for a, b in zip(self.__clause_element__().clauses, other.__composite_values__())]) class Vertex(Base): ___tablename__ = 'vertice' id = Column(Integer, primary_key=True) x1 = Column(Integer) y1 = Column(Integer) x2 = Column(Integer) y2 = Column(Integer) start = composite(Point, x1, y1, comparator_factory=PointComparator) end = composite(Point, x2, y2, comparator_factory=PointComparator) .. _maptojoin: Mapping a Class against Multiple Tables ======================================== Mappers can be constructed against arbitrary relational units (called *selectables*) in addition to plain tables. For example, the :func:`~.expression.join` function creates a selectable unit comprised of multiple tables, complete with its own composite primary key, which can be mapped in the same way as a :class:`.Table`:: from sqlalchemy import Table, Column, Integer, \ String, MetaData, join, ForeignKey from sqlalchemy.ext.declarative import declarative_base from sqlalchemy.orm import column_property metadata = MetaData() # define two Table objects user_table = Table('user', metadata, Column('id', Integer, primary_key=True), Column('name', String), ) address_table = Table('address', metadata, Column('id', Integer, primary_key=True), Column('user_id', Integer, ForeignKey('user.id')), Column('email_address', String) ) # define a join between them. This # takes place across the user.id and address.user_id # columns. user_address_join = join(user_table, address_table) Base = declarative_base() # map to it class AddressUser(Base): __table__ = user_address_join id = column_property(user_table.c.id, address_table.c.user_id) address_id = address_table.c.id In the example above, the join expresses columns for both the ``user`` and the ``address`` table. The ``user.id`` and ``address.user_id`` columns are equated by foreign key, so in the mapping they are defined as one attribute, ``AddressUser.id``, using :func:`.column_property` to indicate a specialized column mapping. Based on this part of the configuration, the mapping will copy new primary key values from ``user.id`` into the ``address.user_id`` column when a flush occurs. Additionally, the ``address.id`` column is mapped explicitly to an attribute named ``address_id``. This is to **disambiguate** the mapping of the ``address.id`` column from the same-named ``AddressUser.id`` attribute, which here has been assigned to refer to the ``user`` table combined with the ``address.user_id`` foreign key. The natural primary key of the above mapping is the composite of ``(user.id, address.id)``, as these are the primary key columns of the ``user`` and ``address`` table combined together. The identity of an ``AddressUser`` object will be in terms of these two values, and is represented from an ``AddressUser`` object as ``(AddressUser.id, AddressUser.address_id)``. Mapping a Class against Arbitrary Selects ========================================= Similar to mapping against a join, a plain :func:`~.expression.select` object can be used with a mapper as well. The example fragment below illustrates mapping a class called ``Customer`` to a :func:`~.expression.select` which includes a join to a subquery:: from sqlalchemy import select, func subq = select([ func.count(orders.c.id).label('order_count'), func.max(orders.c.price).label('highest_order'), orders.c.customer_id ]).group_by(orders.c.customer_id).alias() customer_select = select([customers, subq]).\ select_from( join(customers, subq, customers.c.id == subq.c.customer_id) ).alias() class Customer(Base): __table__ = customer_select Above, the full row represented by ``customer_select`` will be all the columns of the ``customers`` table, in addition to those columns exposed by the ``subq`` subquery, which are ``order_count``, ``highest_order``, and ``customer_id``. Mapping the ``Customer`` class to this selectable then creates a class which will contain those attributes. When the ORM persists new instances of ``Customer``, only the ``customers`` table will actually receive an INSERT. This is because the primary key of the ``orders`` table is not represented in the mapping; the ORM will only emit an INSERT into a table for which it has mapped the primary key. .. note:: The practice of mapping to arbitrary SELECT statements, especially complex ones as above, is almost never needed; it necessarily tends to produce complex queries which are often less efficient than that which would be produced by direct query construction. The practice is to some degree based on the very early history of SQLAlchemy where the :func:`.mapper` construct was meant to represent the primary querying interface; in modern usage, the :class:`.Query` object can be used to construct virtually any SELECT statement, including complex composites, and should be favored over the "map-to-selectable" approach. Multiple Mappers for One Class ============================== In modern SQLAlchemy, a particular class is only mapped by one :func:`.mapper` at a time. The rationale here is that the :func:`.mapper` modifies the class itself, not only persisting it towards a particular :class:`.Table`, but also *instrumenting* attributes upon the class which are structured specifically according to the table metadata. One potential use case for another mapper to exist at the same time is if we wanted to load instances of our class not just from the immediate :class:`.Table` to which it is mapped, but from another selectable that is a derivation of that :class:`.Table`. While there technically is a way to create such a :func:`.mapper`, using the ``non_primary=True`` option, this approach is virtually never needed. Instead, we use the functionality of the :class:`.Query` object to achieve this, using a method such as :meth:`.Query.select_from` or :meth:`.Query.from_statement` to specify a derived selectable. Another potential use is if we genuinely want instances of our class to be persisted into different tables at different times; certain kinds of data sharding configurations may persist a particular class into tables that are identical in structure except for their name. For this kind of pattern, Python offers a better approach than the complexity of mapping the same class multiple times, which is to instead create new mapped classes for each target table. SQLAlchemy refers to this as the "entity name" pattern, which is described as a recipe at `Entity Name `_. .. _mapping_constructors: Constructors and Object Initialization ======================================= Mapping imposes no restrictions or requirements on the constructor (``__init__``) method for the class. You are free to require any arguments for the function that you wish, assign attributes to the instance that are unknown to the ORM, and generally do anything else you would normally do when writing a constructor for a Python class. The SQLAlchemy ORM does not call ``__init__`` when recreating objects from database rows. The ORM's process is somewhat akin to the Python standard library's ``pickle`` module, invoking the low level ``__new__`` method and then quietly restoring attributes directly on the instance rather than calling ``__init__``. If you need to do some setup on database-loaded instances before they're ready to use, you can use the ``@reconstructor`` decorator to tag a method as the ORM counterpart to ``__init__``. SQLAlchemy will call this method with no arguments every time it loads or reconstructs one of your instances. This is useful for recreating transient properties that are normally assigned in your ``__init__``:: from sqlalchemy import orm class MyMappedClass(object): def __init__(self, data): self.data = data # we need stuff on all instances, but not in the database. self.stuff = [] @orm.reconstructor def init_on_load(self): self.stuff = [] When ``obj = MyMappedClass()`` is executed, Python calls the ``__init__`` method as normal and the ``data`` argument is required. When instances are loaded during a :class:`~sqlalchemy.orm.query.Query` operation as in ``query(MyMappedClass).one()``, ``init_on_load`` is called. Any method may be tagged as the :func:`~sqlalchemy.orm.reconstructor`, even the ``__init__`` method. SQLAlchemy will call the reconstructor method with no arguments. Scalar (non-collection) database-mapped attributes of the instance will be available for use within the function. Eagerly-loaded collections are generally not yet available and will usually only contain the first element. ORM state changes made to objects at this stage will not be recorded for the next flush() operation, so the activity within a reconstructor should be conservative. :func:`~sqlalchemy.orm.reconstructor` is a shortcut into a larger system of "instance level" events, which can be subscribed to using the event API - see :class:`.InstanceEvents` for the full API description of these events. .. autofunction:: reconstructor .. _mapper_version_counter: Configuring a Version Counter ============================= The :class:`.Mapper` supports management of a :term:`version id column`, which is a single table column that increments or otherwise updates its value each time an ``UPDATE`` to the mapped table occurs. This value is checked each time the ORM emits an ``UPDATE`` or ``DELETE`` against the row to ensure that the value held in memory matches the database value. The purpose of this feature is to detect when two concurrent transactions are modifying the same row at roughly the same time, or alternatively to provide a guard against the usage of a "stale" row in a system that might be re-using data from a previous transaction without refreshing (e.g. if one sets ``expire_on_commit=False`` with a :class:`.Session`, it is possible to re-use the data from a previous transaction). .. topic:: Concurrent transaction updates When detecting concurrent updates within transactions, it is typically the case that the database's transaction isolation level is below the level of :term:`repeatable read`; otherwise, the transaction will not be exposed to a new row value created by a concurrent update which conflicts with the locally updated value. In this case, the SQLAlchemy versioning feature will typically not be useful for in-transaction conflict detection, though it still can be used for cross-transaction staleness detection. The database that enforces repeatable reads will typically either have locked the target row against a concurrent update, or is employing some form of multi version concurrency control such that it will emit an error when the transaction is committed. SQLAlchemy's version_id_col is an alternative which allows version tracking to occur for specific tables within a transaction that otherwise might not have this isolation level set. .. seealso:: `Repeatable Read Isolation Level `_ - Postgresql's implementation of repeatable read, including a description of the error condition. Simple Version Counting ----------------------- The most straightforward way to track versions is to add an integer column to the mapped table, then establish it as the ``version_id_col`` within the mapper options:: class User(Base): __tablename__ = 'user' id = Column(Integer, primary_key=True) version_id = Column(Integer, nullable=False) name = Column(String(50), nullable=False) __mapper_args__ = { "version_id_col": version_id } Above, the ``User`` mapping tracks integer versions using the column ``version_id``. When an object of type ``User`` is first flushed, the ``version_id`` column will be given a value of "1". Then, an UPDATE of the table later on will always be emitted in a manner similar to the following:: UPDATE user SET version_id=:version_id, name=:name WHERE user.id = :user_id AND user.version_id = :user_version_id {"name": "new name", "version_id": 2, "user_id": 1, "user_version_id": 1} The above UPDATE statement is updating the row that not only matches ``user.id = 1``, it also is requiring that ``user.version_id = 1``, where "1" is the last version identifier we've been known to use on this object. If a transaction elsewhere has modifed the row independently, this version id will no longer match, and the UPDATE statement will report that no rows matched; this is the condition that SQLAlchemy tests, that exactly one row matched our UPDATE (or DELETE) statement. If zero rows match, that indicates our version of the data is stale, and a :class:`.StaleDataError` is raised. .. _custom_version_counter: Custom Version Counters / Types ------------------------------- Other kinds of values or counters can be used for versioning. Common types include dates and GUIDs. When using an alternate type or counter scheme, SQLAlchemy provides a hook for this scheme using the ``version_id_generator`` argument, which accepts a version generation callable. This callable is passed the value of the current known version, and is expected to return the subsequent version. For example, if we wanted to track the versioning of our ``User`` class using a randomly generated GUID, we could do this (note that some backends support a native GUID type, but we illustrate here using a simple string):: import uuid class User(Base): __tablename__ = 'user' id = Column(Integer, primary_key=True) version_uuid = Column(String(32)) name = Column(String(50), nullable=False) __mapper_args__ = { 'version_id_col':version_uuid, 'version_id_generator':lambda version: uuid.uuid4().hex } The persistence engine will call upon ``uuid.uuid4()`` each time a ``User`` object is subject to an INSERT or an UPDATE. In this case, our version generation function can disregard the incoming value of ``version``, as the ``uuid4()`` function generates identifiers without any prerequisite value. If we were using a sequential versioning scheme such as numeric or a special character system, we could make use of the given ``version`` in order to help determine the subsequent value. .. seealso:: :ref:`custom_guid_type` Class Mapping API ================= .. autofunction:: mapper .. autofunction:: object_mapper .. autofunction:: class_mapper .. autofunction:: configure_mappers .. autofunction:: clear_mappers .. autofunction:: sqlalchemy.orm.util.identity_key .. autofunction:: sqlalchemy.orm.util.polymorphic_union .. autoclass:: sqlalchemy.orm.mapper.Mapper :members: SQLAlchemy-0.8.4/doc/_sources/orm/query.txt0000644000076500000240000000166612251147171021335 0ustar classicstaff00000000000000.. _query_api_toplevel: Querying ======== This section provides API documentation for the :class:`.Query` object and related constructs. For an in-depth introduction to querying with the SQLAlchemy ORM, please see the :ref:`ormtutorial_toplevel`. .. module:: sqlalchemy.orm The Query Object ---------------- :class:`~.Query` is produced in terms of a given :class:`~.Session`, using the :func:`~.Query.query` function:: q = session.query(SomeMappedClass) Following is the full interface for the :class:`.Query` object. .. autoclass:: sqlalchemy.orm.query.Query :members: ORM-Specific Query Constructs ----------------------------- .. autofunction:: sqlalchemy.orm.aliased .. autoclass:: sqlalchemy.orm.util.AliasedClass .. autoclass:: sqlalchemy.orm.util.AliasedInsp .. autoclass:: sqlalchemy.util.KeyedTuple :members: keys, _fields, _asdict .. autofunction:: join .. autofunction:: outerjoin .. autofunction:: with_parent SQLAlchemy-0.8.4/doc/_sources/orm/relationships.txt0000644000076500000240000016100512251147171023046 0ustar classicstaff00000000000000.. module:: sqlalchemy.orm .. _relationship_config_toplevel: Relationship Configuration ========================== This section describes the :func:`relationship` function and in depth discussion of its usage. The reference material here continues into the next section, :ref:`collections_toplevel`, which has additional detail on configuration of collections via :func:`relationship`. .. _relationship_patterns: Basic Relational Patterns -------------------------- A quick walkthrough of the basic relational patterns. The imports used for each of the following sections is as follows:: from sqlalchemy import Table, Column, Integer, ForeignKey from sqlalchemy.orm import relationship, backref from sqlalchemy.ext.declarative import declarative_base Base = declarative_base() One To Many ~~~~~~~~~~~~ A one to many relationship places a foreign key on the child table referencing the parent. :func:`.relationship` is then specified on the parent, as referencing a collection of items represented by the child:: class Parent(Base): __tablename__ = 'parent' id = Column(Integer, primary_key=True) children = relationship("Child") class Child(Base): __tablename__ = 'child' id = Column(Integer, primary_key=True) parent_id = Column(Integer, ForeignKey('parent.id')) To establish a bidirectional relationship in one-to-many, where the "reverse" side is a many to one, specify the ``backref`` option:: class Parent(Base): __tablename__ = 'parent' id = Column(Integer, primary_key=True) children = relationship("Child", backref="parent") class Child(Base): __tablename__ = 'child' id = Column(Integer, primary_key=True) parent_id = Column(Integer, ForeignKey('parent.id')) ``Child`` will get a ``parent`` attribute with many-to-one semantics. Many To One ~~~~~~~~~~~~ Many to one places a foreign key in the parent table referencing the child. :func:`.relationship` is declared on the parent, where a new scalar-holding attribute will be created:: class Parent(Base): __tablename__ = 'parent' id = Column(Integer, primary_key=True) child_id = Column(Integer, ForeignKey('child.id')) child = relationship("Child") class Child(Base): __tablename__ = 'child' id = Column(Integer, primary_key=True) Bidirectional behavior is achieved by specifying ``backref="parents"``, which will place a one-to-many collection on the ``Child`` class:: class Parent(Base): __tablename__ = 'parent' id = Column(Integer, primary_key=True) child_id = Column(Integer, ForeignKey('child.id')) child = relationship("Child", backref="parents") One To One ~~~~~~~~~~~ One To One is essentially a bidirectional relationship with a scalar attribute on both sides. To achieve this, the ``uselist=False`` flag indicates the placement of a scalar attribute instead of a collection on the "many" side of the relationship. To convert one-to-many into one-to-one:: class Parent(Base): __tablename__ = 'parent' id = Column(Integer, primary_key=True) child = relationship("Child", uselist=False, backref="parent") class Child(Base): __tablename__ = 'child' id = Column(Integer, primary_key=True) parent_id = Column(Integer, ForeignKey('parent.id')) Or to turn a one-to-many backref into one-to-one, use the :func:`.backref` function to provide arguments for the reverse side:: class Parent(Base): __tablename__ = 'parent' id = Column(Integer, primary_key=True) child_id = Column(Integer, ForeignKey('child.id')) child = relationship("Child", backref=backref("parent", uselist=False)) class Child(Base): __tablename__ = 'child' id = Column(Integer, primary_key=True) .. _relationships_many_to_many: Many To Many ~~~~~~~~~~~~~ Many to Many adds an association table between two classes. The association table is indicated by the ``secondary`` argument to :func:`.relationship`. Usually, the :class:`.Table` uses the :class:`.MetaData` object associated with the declarative base class, so that the :class:`.ForeignKey` directives can locate the remote tables with which to link:: association_table = Table('association', Base.metadata, Column('left_id', Integer, ForeignKey('left.id')), Column('right_id', Integer, ForeignKey('right.id')) ) class Parent(Base): __tablename__ = 'left' id = Column(Integer, primary_key=True) children = relationship("Child", secondary=association_table) class Child(Base): __tablename__ = 'right' id = Column(Integer, primary_key=True) For a bidirectional relationship, both sides of the relationship contain a collection. The ``backref`` keyword will automatically use the same ``secondary`` argument for the reverse relationship:: association_table = Table('association', Base.metadata, Column('left_id', Integer, ForeignKey('left.id')), Column('right_id', Integer, ForeignKey('right.id')) ) class Parent(Base): __tablename__ = 'left' id = Column(Integer, primary_key=True) children = relationship("Child", secondary=association_table, backref="parents") class Child(Base): __tablename__ = 'right' id = Column(Integer, primary_key=True) The ``secondary`` argument of :func:`.relationship` also accepts a callable that returns the ultimate argument, which is evaluated only when mappers are first used. Using this, we can define the ``association_table`` at a later point, as long as it's available to the callable after all module initialization is complete:: class Parent(Base): __tablename__ = 'left' id = Column(Integer, primary_key=True) children = relationship("Child", secondary=lambda: association_table, backref="parents") With the declarative extension in use, the traditional "string name of the table" is accepted as well, matching the name of the table as stored in ``Base.metadata.tables``:: class Parent(Base): __tablename__ = 'left' id = Column(Integer, primary_key=True) children = relationship("Child", secondary="association", backref="parents") Deleting Rows from the Many to Many Table ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ A behavior which is unique to the ``secondary`` argument to :func:`.relationship` is that the :class:`.Table` which is specified here is automatically subject to INSERT and DELETE statements, as objects are added or removed from the collection. There is **no need to delete from this table manually**. The act of removing a record from the collection will have the effect of the row being deleted on flush:: # row will be deleted from the "secondary" table # automatically myparent.children.remove(somechild) A question which often arises is how the row in the "secondary" table can be deleted when the child object is handed directly to :meth:`.Session.delete`:: session.delete(somechild) There are several possibilities here: * If there is a :func:`.relationship` from ``Parent`` to ``Child``, but there is **not** a reverse-relationship that links a particular ``Child`` to each ``Parent``, SQLAlchemy will not have any awareness that when deleting this particular ``Child`` object, it needs to maintain the "secondary" table that links it to the ``Parent``. No delete of the "secondary" table will occur. * If there is a relationship that links a particular ``Child`` to each ``Parent``, suppose it's called ``Child.parents``, SQLAlchemy by default will load in the ``Child.parents`` collection to locate all ``Parent`` objects, and remove each row from the "secondary" table which establishes this link. Note that this relationship does not need to be bidrectional; SQLAlchemy is strictly looking at every :func:`.relationship` associated with the ``Child`` object being deleted. * A higher performing option here is to use ON DELETE CASCADE directives with the foreign keys used by the database. Assuming the database supports this feature, the database itself can be made to automatically delete rows in the "secondary" table as referencing rows in "child" are deleted. SQLAlchemy can be instructed to forego actively loading in the ``Child.parents`` collection in this case using the ``passive_deletes=True`` directive on :func:`.relationship`; see :ref:`passive_deletes` for more details on this. Note again, these behaviors are *only* relevant to the ``secondary`` option used with :func:`.relationship`. If dealing with association tables that are mapped explicitly and are *not* present in the ``secondary`` option of a relevant :func:`.relationship`, cascade rules can be used instead to automatically delete entities in reaction to a related entity being deleted - see :ref:`unitofwork_cascades` for information on this feature. .. _association_pattern: Association Object ~~~~~~~~~~~~~~~~~~ The association object pattern is a variant on many-to-many: it's used when your association table contains additional columns beyond those which are foreign keys to the left and right tables. Instead of using the ``secondary`` argument, you map a new class directly to the association table. The left side of the relationship references the association object via one-to-many, and the association class references the right side via many-to-one. Below we illustrate an association table mapped to the ``Association`` class which includes a column called ``extra_data``, which is a string value that is stored along with each association between ``Parent`` and ``Child``:: class Association(Base): __tablename__ = 'association' left_id = Column(Integer, ForeignKey('left.id'), primary_key=True) right_id = Column(Integer, ForeignKey('right.id'), primary_key=True) extra_data = Column(String(50)) child = relationship("Child") class Parent(Base): __tablename__ = 'left' id = Column(Integer, primary_key=True) children = relationship("Association") class Child(Base): __tablename__ = 'right' id = Column(Integer, primary_key=True) The bidirectional version adds backrefs to both relationships:: class Association(Base): __tablename__ = 'association' left_id = Column(Integer, ForeignKey('left.id'), primary_key=True) right_id = Column(Integer, ForeignKey('right.id'), primary_key=True) extra_data = Column(String(50)) child = relationship("Child", backref="parent_assocs") class Parent(Base): __tablename__ = 'left' id = Column(Integer, primary_key=True) children = relationship("Association", backref="parent") class Child(Base): __tablename__ = 'right' id = Column(Integer, primary_key=True) Working with the association pattern in its direct form requires that child objects are associated with an association instance before being appended to the parent; similarly, access from parent to child goes through the association object:: # create parent, append a child via association p = Parent() a = Association(extra_data="some data") a.child = Child() p.children.append(a) # iterate through child objects via association, including association # attributes for assoc in p.children: print assoc.extra_data print assoc.child To enhance the association object pattern such that direct access to the ``Association`` object is optional, SQLAlchemy provides the :ref:`associationproxy_toplevel` extension. This extension allows the configuration of attributes which will access two "hops" with a single access, one "hop" to the associated object, and a second to a target attribute. .. note:: When using the association object pattern, it is advisable that the association-mapped table not be used as the ``secondary`` argument on a :func:`.relationship` elsewhere, unless that :func:`.relationship` contains the option ``viewonly=True``. SQLAlchemy otherwise may attempt to emit redundant INSERT and DELETE statements on the same table, if similar state is detected on the related attribute as well as the associated object. .. _self_referential: Adjacency List Relationships ----------------------------- The **adjacency list** pattern is a common relational pattern whereby a table contains a foreign key reference to itself. This is the most common way to represent hierarchical data in flat tables. Other methods include **nested sets**, sometimes called "modified preorder", as well as **materialized path**. Despite the appeal that modified preorder has when evaluated for its fluency within SQL queries, the adjacency list model is probably the most appropriate pattern for the large majority of hierarchical storage needs, for reasons of concurrency, reduced complexity, and that modified preorder has little advantage over an application which can fully load subtrees into the application space. In this example, we'll work with a single mapped class called ``Node``, representing a tree structure:: class Node(Base): __tablename__ = 'node' id = Column(Integer, primary_key=True) parent_id = Column(Integer, ForeignKey('node.id')) data = Column(String(50)) children = relationship("Node") With this structure, a graph such as the following:: root --+---> child1 +---> child2 --+--> subchild1 | +--> subchild2 +---> child3 Would be represented with data such as:: id parent_id data --- ------- ---- 1 NULL root 2 1 child1 3 1 child2 4 3 subchild1 5 3 subchild2 6 1 child3 The :func:`.relationship` configuration here works in the same way as a "normal" one-to-many relationship, with the exception that the "direction", i.e. whether the relationship is one-to-many or many-to-one, is assumed by default to be one-to-many. To establish the relationship as many-to-one, an extra directive is added known as ``remote_side``, which is a :class:`.Column` or collection of :class:`.Column` objects that indicate those which should be considered to be "remote":: class Node(Base): __tablename__ = 'node' id = Column(Integer, primary_key=True) parent_id = Column(Integer, ForeignKey('node.id')) data = Column(String(50)) parent = relationship("Node", remote_side=[id]) Where above, the ``id`` column is applied as the ``remote_side`` of the ``parent`` :func:`.relationship`, thus establishing ``parent_id`` as the "local" side, and the relationship then behaves as a many-to-one. As always, both directions can be combined into a bidirectional relationship using the :func:`.backref` function:: class Node(Base): __tablename__ = 'node' id = Column(Integer, primary_key=True) parent_id = Column(Integer, ForeignKey('node.id')) data = Column(String(50)) children = relationship("Node", backref=backref('parent', remote_side=[id]) ) There are several examples included with SQLAlchemy illustrating self-referential strategies; these include :ref:`examples_adjacencylist` and :ref:`examples_xmlpersistence`. Composite Adjacency Lists ~~~~~~~~~~~~~~~~~~~~~~~~~ A sub-category of the adjacency list relationship is the rare case where a particular column is present on both the "local" and "remote" side of the join condition. An example is the ``Folder`` class below; using a composite primary key, the ``account_id`` column refers to itself, to indicate sub folders which are within the same account as that of the parent; while ``folder_id`` refers to a specific folder within that account:: class Folder(Base): __tablename__ = 'folder' __table_args__ = ( ForeignKeyConstraint( ['account_id', 'parent_id'], ['folder.account_id', 'folder.folder_id']), ) account_id = Column(Integer, primary_key=True) folder_id = Column(Integer, primary_key=True) parent_id = Column(Integer) name = Column(String) parent_folder = relationship("Folder", backref="child_folders", remote_side=[account_id, folder_id] ) Above, we pass ``account_id`` into the ``remote_side`` list. :func:`.relationship` recognizes that the ``account_id`` column here is on both sides, and aligns the "remote" column along with the ``folder_id`` column, which it recognizes as uniquely present on the "remote" side. .. versionadded:: 0.8 Support for self-referential composite keys in :func:`.relationship` where a column points to itself. Self-Referential Query Strategies ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ Querying of self-referential structures works like any other query:: # get all nodes named 'child2' session.query(Node).filter(Node.data=='child2') However extra care is needed when attempting to join along the foreign key from one level of the tree to the next. In SQL, a join from a table to itself requires that at least one side of the expression be "aliased" so that it can be unambiguously referred to. Recall from :ref:`ormtutorial_aliases` in the ORM tutorial that the :func:`.orm.aliased` construct is normally used to provide an "alias" of an ORM entity. Joining from ``Node`` to itself using this technique looks like: .. sourcecode:: python+sql from sqlalchemy.orm import aliased nodealias = aliased(Node) {sql}session.query(Node).filter(Node.data=='subchild1').\ join(nodealias, Node.parent).\ filter(nodealias.data=="child2").\ all() SELECT node.id AS node_id, node.parent_id AS node_parent_id, node.data AS node_data FROM node JOIN node AS node_1 ON node.parent_id = node_1.id WHERE node.data = ? AND node_1.data = ? ['subchild1', 'child2'] :meth:`.Query.join` also includes a feature known as ``aliased=True`` that can shorten the verbosity self-referential joins, at the expense of query flexibility. This feature performs a similar "aliasing" step to that above, without the need for an explicit entity. Calls to :meth:`.Query.filter` and similar subsequent to the aliased join will **adapt** the ``Node`` entity to be that of the alias: .. sourcecode:: python+sql {sql}session.query(Node).filter(Node.data=='subchild1').\ join(Node.parent, aliased=True).\ filter(Node.data=='child2').\ all() SELECT node.id AS node_id, node.parent_id AS node_parent_id, node.data AS node_data FROM node JOIN node AS node_1 ON node_1.id = node.parent_id WHERE node.data = ? AND node_1.data = ? ['subchild1', 'child2'] To add criterion to multiple points along a longer join, add ``from_joinpoint=True`` to the additional :meth:`~.Query.join` calls: .. sourcecode:: python+sql # get all nodes named 'subchild1' with a # parent named 'child2' and a grandparent 'root' {sql}session.query(Node).\ filter(Node.data=='subchild1').\ join(Node.parent, aliased=True).\ filter(Node.data=='child2').\ join(Node.parent, aliased=True, from_joinpoint=True).\ filter(Node.data=='root').\ all() SELECT node.id AS node_id, node.parent_id AS node_parent_id, node.data AS node_data FROM node JOIN node AS node_1 ON node_1.id = node.parent_id JOIN node AS node_2 ON node_2.id = node_1.parent_id WHERE node.data = ? AND node_1.data = ? AND node_2.data = ? ['subchild1', 'child2', 'root'] :meth:`.Query.reset_joinpoint` will also remove the "aliasing" from filtering calls:: session.query(Node).\ join(Node.children, aliased=True).\ filter(Node.data == 'foo').\ reset_joinpoint().\ filter(Node.data == 'bar') For an example of using ``aliased=True`` to arbitrarily join along a chain of self-referential nodes, see :ref:`examples_xmlpersistence`. Configuring Self-Referential Eager Loading ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ Eager loading of relationships occurs using joins or outerjoins from parent to child table during a normal query operation, such that the parent and its immediate child collection or reference can be populated from a single SQL statement, or a second statement for all immediate child collections. SQLAlchemy's joined and subquery eager loading use aliased tables in all cases when joining to related items, so are compatible with self-referential joining. However, to use eager loading with a self-referential relationship, SQLAlchemy needs to be told how many levels deep it should join and/or query; otherwise the eager load will not take place at all. This depth setting is configured via ``join_depth``: .. sourcecode:: python+sql class Node(Base): __tablename__ = 'node' id = Column(Integer, primary_key=True) parent_id = Column(Integer, ForeignKey('node.id')) data = Column(String(50)) children = relationship("Node", lazy="joined", join_depth=2) {sql}session.query(Node).all() SELECT node_1.id AS node_1_id, node_1.parent_id AS node_1_parent_id, node_1.data AS node_1_data, node_2.id AS node_2_id, node_2.parent_id AS node_2_parent_id, node_2.data AS node_2_data, node.id AS node_id, node.parent_id AS node_parent_id, node.data AS node_data FROM node LEFT OUTER JOIN node AS node_2 ON node.id = node_2.parent_id LEFT OUTER JOIN node AS node_1 ON node_2.id = node_1.parent_id [] .. _relationships_backref: Linking Relationships with Backref ---------------------------------- The ``backref`` keyword argument was first introduced in :ref:`ormtutorial_toplevel`, and has been mentioned throughout many of the examples here. What does it actually do ? Let's start with the canonical ``User`` and ``Address`` scenario:: from sqlalchemy import Integer, ForeignKey, String, Column from sqlalchemy.ext.declarative import declarative_base from sqlalchemy.orm import relationship Base = declarative_base() class User(Base): __tablename__ = 'user' id = Column(Integer, primary_key=True) name = Column(String) addresses = relationship("Address", backref="user") class Address(Base): __tablename__ = 'address' id = Column(Integer, primary_key=True) email = Column(String) user_id = Column(Integer, ForeignKey('user.id')) The above configuration establishes a collection of ``Address`` objects on ``User`` called ``User.addresses``. It also establishes a ``.user`` attribute on ``Address`` which will refer to the parent ``User`` object. In fact, the ``backref`` keyword is only a common shortcut for placing a second ``relationship`` onto the ``Address`` mapping, including the establishment of an event listener on both sides which will mirror attribute operations in both directions. The above configuration is equivalent to:: from sqlalchemy import Integer, ForeignKey, String, Column from sqlalchemy.ext.declarative import declarative_base from sqlalchemy.orm import relationship Base = declarative_base() class User(Base): __tablename__ = 'user' id = Column(Integer, primary_key=True) name = Column(String) addresses = relationship("Address", back_populates="user") class Address(Base): __tablename__ = 'address' id = Column(Integer, primary_key=True) email = Column(String) user_id = Column(Integer, ForeignKey('user.id')) user = relationship("User", back_populates="addresses") Above, we add a ``.user`` relationship to ``Address`` explicitly. On both relationships, the ``back_populates`` directive tells each relationship about the other one, indicating that they should establish "bidirectional" behavior between each other. The primary effect of this configuration is that the relationship adds event handlers to both attributes which have the behavior of "when an append or set event occurs here, set ourselves onto the incoming attribute using this particular attribute name". The behavior is illustrated as follows. Start with a ``User`` and an ``Address`` instance. The ``.addresses`` collection is empty, and the ``.user`` attribute is ``None``:: >>> u1 = User() >>> a1 = Address() >>> u1.addresses [] >>> print a1.user None However, once the ``Address`` is appended to the ``u1.addresses`` collection, both the collection and the scalar attribute have been populated:: >>> u1.addresses.append(a1) >>> u1.addresses [<__main__.Address object at 0x12a6ed0>] >>> a1.user <__main__.User object at 0x12a6590> This behavior of course works in reverse for removal operations as well, as well as for equivalent operations on both sides. Such as when ``.user`` is set again to ``None``, the ``Address`` object is removed from the reverse collection:: >>> a1.user = None >>> u1.addresses [] The manipulation of the ``.addresses`` collection and the ``.user`` attribute occurs entirely in Python without any interaction with the SQL database. Without this behavior, the proper state would be apparent on both sides once the data has been flushed to the database, and later reloaded after a commit or expiration operation occurs. The ``backref``/``back_populates`` behavior has the advantage that common bidirectional operations can reflect the correct state without requiring a database round trip. Remember, when the ``backref`` keyword is used on a single relationship, it's exactly the same as if the above two relationships were created individually using ``back_populates`` on each. Backref Arguments ~~~~~~~~~~~~~~~~~~ We've established that the ``backref`` keyword is merely a shortcut for building two individual :func:`.relationship` constructs that refer to each other. Part of the behavior of this shortcut is that certain configurational arguments applied to the :func:`.relationship` will also be applied to the other direction - namely those arguments that describe the relationship at a schema level, and are unlikely to be different in the reverse direction. The usual case here is a many-to-many :func:`.relationship` that has a ``secondary`` argument, or a one-to-many or many-to-one which has a ``primaryjoin`` argument (the ``primaryjoin`` argument is discussed in :ref:`relationship_primaryjoin`). Such as if we limited the list of ``Address`` objects to those which start with "tony":: from sqlalchemy import Integer, ForeignKey, String, Column from sqlalchemy.ext.declarative import declarative_base from sqlalchemy.orm import relationship Base = declarative_base() class User(Base): __tablename__ = 'user' id = Column(Integer, primary_key=True) name = Column(String) addresses = relationship("Address", primaryjoin="and_(User.id==Address.user_id, " "Address.email.startswith('tony'))", backref="user") class Address(Base): __tablename__ = 'address' id = Column(Integer, primary_key=True) email = Column(String) user_id = Column(Integer, ForeignKey('user.id')) We can observe, by inspecting the resulting property, that both sides of the relationship have this join condition applied:: >>> print User.addresses.property.primaryjoin "user".id = address.user_id AND address.email LIKE :email_1 || '%%' >>> >>> print Address.user.property.primaryjoin "user".id = address.user_id AND address.email LIKE :email_1 || '%%' >>> This reuse of arguments should pretty much do the "right thing" - it uses only arguments that are applicable, and in the case of a many-to-many relationship, will reverse the usage of ``primaryjoin`` and ``secondaryjoin`` to correspond to the other direction (see the example in :ref:`self_referential_many_to_many` for this). It's very often the case however that we'd like to specify arguments that are specific to just the side where we happened to place the "backref". This includes :func:`.relationship` arguments like ``lazy``, ``remote_side``, ``cascade`` and ``cascade_backrefs``. For this case we use the :func:`.backref` function in place of a string:: # from sqlalchemy.orm import backref class User(Base): __tablename__ = 'user' id = Column(Integer, primary_key=True) name = Column(String) addresses = relationship("Address", backref=backref("user", lazy="joined")) Where above, we placed a ``lazy="joined"`` directive only on the ``Address.user`` side, indicating that when a query against ``Address`` is made, a join to the ``User`` entity should be made automatically which will populate the ``.user`` attribute of each returned ``Address``. The :func:`.backref` function formatted the arguments we gave it into a form that is interpreted by the receiving :func:`.relationship` as additional arguments to be applied to the new relationship it creates. One Way Backrefs ~~~~~~~~~~~~~~~~~ An unusual case is that of the "one way backref". This is where the "back-populating" behavior of the backref is only desirable in one direction. An example of this is a collection which contains a filtering ``primaryjoin`` condition. We'd like to append items to this collection as needed, and have them populate the "parent" object on the incoming object. However, we'd also like to have items that are not part of the collection, but still have the same "parent" association - these items should never be in the collection. Taking our previous example, where we established a ``primaryjoin`` that limited the collection only to ``Address`` objects whose email address started with the word ``tony``, the usual backref behavior is that all items populate in both directions. We wouldn't want this behavior for a case like the following:: >>> u1 = User() >>> a1 = Address(email='mary') >>> a1.user = u1 >>> u1.addresses [<__main__.Address object at 0x1411910>] Above, the ``Address`` object that doesn't match the criterion of "starts with 'tony'" is present in the ``addresses`` collection of ``u1``. After these objects are flushed, the transaction committed and their attributes expired for a re-load, the ``addresses`` collection will hit the database on next access and no longer have this ``Address`` object present, due to the filtering condition. But we can do away with this unwanted side of the "backref" behavior on the Python side by using two separate :func:`.relationship` constructs, placing ``back_populates`` only on one side:: from sqlalchemy import Integer, ForeignKey, String, Column from sqlalchemy.ext.declarative import declarative_base from sqlalchemy.orm import relationship Base = declarative_base() class User(Base): __tablename__ = 'user' id = Column(Integer, primary_key=True) name = Column(String) addresses = relationship("Address", primaryjoin="and_(User.id==Address.user_id, " "Address.email.startswith('tony'))", back_populates="user") class Address(Base): __tablename__ = 'address' id = Column(Integer, primary_key=True) email = Column(String) user_id = Column(Integer, ForeignKey('user.id')) user = relationship("User") With the above scenario, appending an ``Address`` object to the ``.addresses`` collection of a ``User`` will always establish the ``.user`` attribute on that ``Address``:: >>> u1 = User() >>> a1 = Address(email='tony') >>> u1.addresses.append(a1) >>> a1.user <__main__.User object at 0x1411850> However, applying a ``User`` to the ``.user`` attribute of an ``Address``, will not append the ``Address`` object to the collection:: >>> a2 = Address(email='mary') >>> a2.user = u1 >>> a2 in u1.addresses False Of course, we've disabled some of the usefulness of ``backref`` here, in that when we do append an ``Address`` that corresponds to the criteria of ``email.startswith('tony')``, it won't show up in the ``User.addresses`` collection until the session is flushed, and the attributes reloaded after a commit or expire operation. While we could consider an attribute event that checks this criterion in Python, this starts to cross the line of duplicating too much SQL behavior in Python. The backref behavior itself is only a slight transgression of this philosophy - SQLAlchemy tries to keep these to a minimum overall. .. _relationship_configure_joins: Configuring how Relationship Joins ------------------------------------ :func:`.relationship` will normally create a join between two tables by examining the foreign key relationship between the two tables to determine which columns should be compared. There are a variety of situations where this behavior needs to be customized. .. _relationship_foreign_keys: Handling Multiple Join Paths ~~~~~~~~~~~~~~~~~~~~~~~~~~~~ One of the most common situations to deal with is when there are more than one foreign key path between two tables. Consider a ``Customer`` class that contains two foreign keys to an ``Address`` class:: from sqlalchemy import Integer, ForeignKey, String, Column from sqlalchemy.ext.declarative import declarative_base from sqlalchemy.orm import relationship Base = declarative_base() class Customer(Base): __tablename__ = 'customer' id = Column(Integer, primary_key=True) name = Column(String) billing_address_id = Column(Integer, ForeignKey("address.id")) shipping_address_id = Column(Integer, ForeignKey("address.id")) billing_address = relationship("Address") shipping_address = relationship("Address") class Address(Base): __tablename__ = 'address' id = Column(Integer, primary_key=True) street = Column(String) city = Column(String) state = Column(String) zip = Column(String) The above mapping, when we attempt to use it, will produce the error:: sqlalchemy.exc.AmbiguousForeignKeysError: Could not determine join condition between parent/child tables on relationship Customer.billing_address - there are multiple foreign key paths linking the tables. Specify the 'foreign_keys' argument, providing a list of those columns which should be counted as containing a foreign key reference to the parent table. The above message is pretty long. There are many potential messages that :func:`.relationship` can return, which have been carefully tailored to detect a variety of common configurational issues; most will suggest the additional configuration that's needed to resolve the ambiguity or other missing information. In this case, the message wants us to qualify each :func:`.relationship` by instructing for each one which foreign key column should be considered, and the appropriate form is as follows:: class Customer(Base): __tablename__ = 'customer' id = Column(Integer, primary_key=True) name = Column(String) billing_address_id = Column(Integer, ForeignKey("address.id")) shipping_address_id = Column(Integer, ForeignKey("address.id")) billing_address = relationship("Address", foreign_keys=[billing_address_id]) shipping_address = relationship("Address", foreign_keys=[shipping_address_id]) Above, we specify the ``foreign_keys`` argument, which is a :class:`.Column` or list of :class:`.Column` objects which indicate those columns to be considered "foreign", or in other words, the columns that contain a value referring to a parent table. Loading the ``Customer.billing_address`` relationship from a ``Customer`` object will use the value present in ``billing_address_id`` in order to identify the row in ``Address`` to be loaded; similarly, ``shipping_address_id`` is used for the ``shipping_address`` relationship. The linkage of the two columns also plays a role during persistence; the newly generated primary key of a just-inserted ``Address`` object will be copied into the appropriate foreign key column of an associated ``Customer`` object during a flush. When specifying ``foreign_keys`` with Declarative, we can also use string names to specify, however it is important that if using a list, the **list is part of the string**:: billing_address = relationship("Address", foreign_keys="[Customer.billing_address_id]") In this specific example, the list is not necessary in any case as there's only one :class:`.Column` we need:: billing_address = relationship("Address", foreign_keys="Customer.billing_address_id") .. versionchanged:: 0.8 :func:`.relationship` can resolve ambiguity between foreign key targets on the basis of the ``foreign_keys`` argument alone; the ``primaryjoin`` argument is no longer needed in this situation. .. _relationship_primaryjoin: Specifying Alternate Join Conditions ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ The default behavior of :func:`.relationship` when constructing a join is that it equates the value of primary key columns on one side to that of foreign-key-referring columns on the other. We can change this criterion to be anything we'd like using the ``primaryjoin`` argument, as well as the ``secondaryjoin`` argument in the case when a "secondary" table is used. In the example below, using the ``User`` class as well as an ``Address`` class which stores a street address, we create a relationship ``boston_addresses`` which will only load those ``Address`` objects which specify a city of "Boston":: from sqlalchemy import Integer, ForeignKey, String, Column from sqlalchemy.ext.declarative import declarative_base from sqlalchemy.orm import relationship Base = declarative_base() class User(Base): __tablename__ = 'user' id = Column(Integer, primary_key=True) name = Column(String) addresses = relationship("Address", primaryjoin="and_(User.id==Address.user_id, " "Address.city=='Boston')") class Address(Base): __tablename__ = 'address' id = Column(Integer, primary_key=True) user_id = Column(Integer, ForeignKey('user.id')) street = Column(String) city = Column(String) state = Column(String) zip = Column(String) Within this string SQL expression, we made use of the :func:`.and_` conjunction construct to establish two distinct predicates for the join condition - joining both the ``User.id`` and ``Address.user_id`` columns to each other, as well as limiting rows in ``Address`` to just ``city='Boston'``. When using Declarative, rudimentary SQL functions like :func:`.and_` are automatically available in the evaluated namespace of a string :func:`.relationship` argument. The custom criteria we use in a ``primaryjoin`` is generally only significant when SQLAlchemy is rendering SQL in order to load or represent this relationship. That is, it's used in the SQL statement that's emitted in order to perform a per-attribute lazy load, or when a join is constructed at query time, such as via :meth:`.Query.join`, or via the eager "joined" or "subquery" styles of loading. When in-memory objects are being manipulated, we can place any ``Address`` object we'd like into the ``boston_addresses`` collection, regardless of what the value of the ``.city`` attribute is. The objects will remain present in the collection until the attribute is expired and re-loaded from the database where the criterion is applied. When a flush occurs, the objects inside of ``boston_addresses`` will be flushed unconditionally, assigning value of the primary key ``user.id`` column onto the foreign-key-holding ``address.user_id`` column for each row. The ``city`` criteria has no effect here, as the flush process only cares about synchronizing primary key values into referencing foreign key values. .. _relationship_custom_foreign: Creating Custom Foreign Conditions ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ Another element of the primary join condition is how those columns considered "foreign" are determined. Usually, some subset of :class:`.Column` objects will specify :class:`.ForeignKey`, or otherwise be part of a :class:`.ForeignKeyConstraint` that's relevant to the join condition. :func:`.relationship` looks to this foreign key status as it decides how it should load and persist data for this relationship. However, the ``primaryjoin`` argument can be used to create a join condition that doesn't involve any "schema" level foreign keys. We can combine ``primaryjoin`` along with ``foreign_keys`` and ``remote_side`` explicitly in order to establish such a join. Below, a class ``HostEntry`` joins to itself, equating the string ``content`` column to the ``ip_address`` column, which is a Postgresql type called ``INET``. We need to use :func:`.cast` in order to cast one side of the join to the type of the other:: from sqlalchemy import cast, String, Column, Integer from sqlalchemy.orm import relationship from sqlalchemy.dialects.postgresql import INET from sqlalchemy.ext.declarative import declarative_base Base = declarative_base() class HostEntry(Base): __tablename__ = 'host_entry' id = Column(Integer, primary_key=True) ip_address = Column(INET) content = Column(String(50)) # relationship() using explicit foreign_keys, remote_side parent_host = relationship("HostEntry", primaryjoin=ip_address == cast(content, INET), foreign_keys=content, remote_side=ip_address ) The above relationship will produce a join like:: SELECT host_entry.id, host_entry.ip_address, host_entry.content FROM host_entry JOIN host_entry AS host_entry_1 ON host_entry_1.ip_address = CAST(host_entry.content AS INET) An alternative syntax to the above is to use the :func:`.foreign` and :func:`.remote` :term:`annotations`, inline within the ``primaryjoin`` expression. This syntax represents the annotations that :func:`.relationship` normally applies by itself to the join condition given the ``foreign_keys`` and ``remote_side`` arguments; the functions are provided in the API in the rare case that :func:`.relationship` can't determine the exact location of these features on its own:: from sqlalchemy.orm import foreign, remote class HostEntry(Base): __tablename__ = 'host_entry' id = Column(Integer, primary_key=True) ip_address = Column(INET) content = Column(String(50)) # relationship() using explicit foreign() and remote() annotations # in lieu of separate arguments parent_host = relationship("HostEntry", primaryjoin=remote(ip_address) == \ cast(foreign(content), INET), ) .. _self_referential_many_to_many: Self-Referential Many-to-Many Relationship ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ Many to many relationships can be customized by one or both of ``primaryjoin`` and ``secondaryjoin`` - the latter is significant for a relationship that specifies a many-to-many reference using the ``secondary`` argument. A common situation which involves the usage of ``primaryjoin`` and ``secondaryjoin`` is when establishing a many-to-many relationship from a class to itself, as shown below:: from sqlalchemy import Integer, ForeignKey, String, Column, Table from sqlalchemy.ext.declarative import declarative_base from sqlalchemy.orm import relationship Base = declarative_base() node_to_node = Table("node_to_node", Base.metadata, Column("left_node_id", Integer, ForeignKey("node.id"), primary_key=True), Column("right_node_id", Integer, ForeignKey("node.id"), primary_key=True) ) class Node(Base): __tablename__ = 'node' id = Column(Integer, primary_key=True) label = Column(String) right_nodes = relationship("Node", secondary=node_to_node, primaryjoin=id==node_to_node.c.left_node_id, secondaryjoin=id==node_to_node.c.right_node_id, backref="left_nodes" ) Where above, SQLAlchemy can't know automatically which columns should connect to which for the ``right_nodes`` and ``left_nodes`` relationships. The ``primaryjoin`` and ``secondaryjoin`` arguments establish how we'd like to join to the association table. In the Declarative form above, as we are declaring these conditions within the Python block that corresponds to the ``Node`` class, the ``id`` variable is available directly as the ``Column`` object we wish to join with. A classical mapping situation here is similar, where ``node_to_node`` can be joined to ``node.c.id``:: from sqlalchemy import Integer, ForeignKey, String, Column, Table, MetaData from sqlalchemy.orm import relationship, mapper metadata = MetaData() node_to_node = Table("node_to_node", metadata, Column("left_node_id", Integer, ForeignKey("node.id"), primary_key=True), Column("right_node_id", Integer, ForeignKey("node.id"), primary_key=True) ) node = Table("node", metadata, Column('id', Integer, primary_key=True), Column('label', String) ) class Node(object): pass mapper(Node, node, properties={ 'right_nodes':relationship(Node, secondary=node_to_node, primaryjoin=node.c.id==node_to_node.c.left_node_id, secondaryjoin=node.c.id==node_to_node.c.right_node_id, backref="left_nodes" )}) Note that in both examples, the ``backref`` keyword specifies a ``left_nodes`` backref - when :func:`.relationship` creates the second relationship in the reverse direction, it's smart enough to reverse the ``primaryjoin`` and ``secondaryjoin`` arguments. Building Query-Enabled Properties ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ Very ambitious custom join conditions may fail to be directly persistable, and in some cases may not even load correctly. To remove the persistence part of the equation, use the flag ``viewonly=True`` on the :func:`~sqlalchemy.orm.relationship`, which establishes it as a read-only attribute (data written to the collection will be ignored on flush()). However, in extreme cases, consider using a regular Python property in conjunction with :class:`~sqlalchemy.orm.query.Query` as follows: .. sourcecode:: python+sql class User(Base): __tablename__ = 'user' id = Column(Integer, primary_key=True) def _get_addresses(self): return object_session(self).query(Address).with_parent(self).filter(...).all() addresses = property(_get_addresses) .. _post_update: Rows that point to themselves / Mutually Dependent Rows ------------------------------------------------------- This is a very specific case where relationship() must perform an INSERT and a second UPDATE in order to properly populate a row (and vice versa an UPDATE and DELETE in order to delete without violating foreign key constraints). The two use cases are: * A table contains a foreign key to itself, and a single row will have a foreign key value pointing to its own primary key. * Two tables each contain a foreign key referencing the other table, with a row in each table referencing the other. For example:: user --------------------------------- user_id name related_user_id 1 'ed' 1 Or:: widget entry ------------------------------------------- --------------------------------- widget_id name favorite_entry_id entry_id name widget_id 1 'somewidget' 5 5 'someentry' 1 In the first case, a row points to itself. Technically, a database that uses sequences such as PostgreSQL or Oracle can INSERT the row at once using a previously generated value, but databases which rely upon autoincrement-style primary key identifiers cannot. The :func:`~sqlalchemy.orm.relationship` always assumes a "parent/child" model of row population during flush, so unless you are populating the primary key/foreign key columns directly, :func:`~sqlalchemy.orm.relationship` needs to use two statements. In the second case, the "widget" row must be inserted before any referring "entry" rows, but then the "favorite_entry_id" column of that "widget" row cannot be set until the "entry" rows have been generated. In this case, it's typically impossible to insert the "widget" and "entry" rows using just two INSERT statements; an UPDATE must be performed in order to keep foreign key constraints fulfilled. The exception is if the foreign keys are configured as "deferred until commit" (a feature some databases support) and if the identifiers were populated manually (again essentially bypassing :func:`~sqlalchemy.orm.relationship`). To enable the usage of a supplementary UPDATE statement, we use the ``post_update`` option of :func:`.relationship`. This specifies that the linkage between the two rows should be created using an UPDATE statement after both rows have been INSERTED; it also causes the rows to be de-associated with each other via UPDATE before a DELETE is emitted. The flag should be placed on just *one* of the relationships, preferably the many-to-one side. Below we illustrate a complete example, including two :class:`.ForeignKey` constructs, one which specifies ``use_alter=True`` to help with emitting CREATE TABLE statements:: from sqlalchemy import Integer, ForeignKey, Column from sqlalchemy.ext.declarative import declarative_base from sqlalchemy.orm import relationship Base = declarative_base() class Entry(Base): __tablename__ = 'entry' entry_id = Column(Integer, primary_key=True) widget_id = Column(Integer, ForeignKey('widget.widget_id')) name = Column(String(50)) class Widget(Base): __tablename__ = 'widget' widget_id = Column(Integer, primary_key=True) favorite_entry_id = Column(Integer, ForeignKey('entry.entry_id', use_alter=True, name="fk_favorite_entry")) name = Column(String(50)) entries = relationship(Entry, primaryjoin= widget_id==Entry.widget_id) favorite_entry = relationship(Entry, primaryjoin= favorite_entry_id==Entry.entry_id, post_update=True) When a structure against the above configuration is flushed, the "widget" row will be INSERTed minus the "favorite_entry_id" value, then all the "entry" rows will be INSERTed referencing the parent "widget" row, and then an UPDATE statement will populate the "favorite_entry_id" column of the "widget" table (it's one row at a time for the time being): .. sourcecode:: pycon+sql >>> w1 = Widget(name='somewidget') >>> e1 = Entry(name='someentry') >>> w1.favorite_entry = e1 >>> w1.entries = [e1] >>> session.add_all([w1, e1]) {sql}>>> session.commit() BEGIN (implicit) INSERT INTO widget (favorite_entry_id, name) VALUES (?, ?) (None, 'somewidget') INSERT INTO entry (widget_id, name) VALUES (?, ?) (1, 'someentry') UPDATE widget SET favorite_entry_id=? WHERE widget.widget_id = ? (1, 1) COMMIT An additional configuration we can specify is to supply a more comprehensive foreign key constraint on ``Widget``, such that it's guaranteed that ``favorite_entry_id`` refers to an ``Entry`` that also refers to this ``Widget``. We can use a composite foreign key, as illustrated below:: from sqlalchemy import Integer, ForeignKey, String, \ Column, UniqueConstraint, ForeignKeyConstraint from sqlalchemy.ext.declarative import declarative_base from sqlalchemy.orm import relationship Base = declarative_base() class Entry(Base): __tablename__ = 'entry' entry_id = Column(Integer, primary_key=True) widget_id = Column(Integer, ForeignKey('widget.widget_id')) name = Column(String(50)) __table_args__ = ( UniqueConstraint("entry_id", "widget_id"), ) class Widget(Base): __tablename__ = 'widget' widget_id = Column(Integer, autoincrement='ignore_fk', primary_key=True) favorite_entry_id = Column(Integer) name = Column(String(50)) __table_args__ = ( ForeignKeyConstraint( ["widget_id", "favorite_entry_id"], ["entry.widget_id", "entry.entry_id"], name="fk_favorite_entry", use_alter=True ), ) entries = relationship(Entry, primaryjoin= widget_id==Entry.widget_id, foreign_keys=Entry.widget_id) favorite_entry = relationship(Entry, primaryjoin= favorite_entry_id==Entry.entry_id, foreign_keys=favorite_entry_id, post_update=True) The above mapping features a composite :class:`.ForeignKeyConstraint` bridging the ``widget_id`` and ``favorite_entry_id`` columns. To ensure that ``Widget.widget_id`` remains an "autoincrementing" column we specify ``autoincrement='ignore_fk'`` on :class:`.Column`, and additionally on each :func:`.relationship` we must limit those columns considered as part of the foreign key for the purposes of joining and cross-population. .. versionadded:: 0.7.4 ``autoincrement='ignore_fk'`` on :class:`.Column`\ . .. _passive_updates: Mutable Primary Keys / Update Cascades --------------------------------------- When the primary key of an entity changes, related items which reference the primary key must also be updated as well. For databases which enforce referential integrity, it's required to use the database's ON UPDATE CASCADE functionality in order to propagate primary key changes to referenced foreign keys - the values cannot be out of sync for any moment. For databases that don't support this, such as SQLite and MySQL without their referential integrity options turned on, the ``passive_updates`` flag can be set to ``False``, most preferably on a one-to-many or many-to-many :func:`.relationship`, which instructs SQLAlchemy to issue UPDATE statements individually for objects referenced in the collection, loading them into memory if not already locally present. The ``passive_updates`` flag can also be ``False`` in conjunction with ON UPDATE CASCADE functionality, although in that case the unit of work will be issuing extra SELECT and UPDATE statements unnecessarily. A typical mutable primary key setup might look like:: class User(Base): __tablename__ = 'user' username = Column(String(50), primary_key=True) fullname = Column(String(100)) # passive_updates=False *only* needed if the database # does not implement ON UPDATE CASCADE addresses = relationship("Address", passive_updates=False) class Address(Base): __tablename__ = 'address' email = Column(String(50), primary_key=True) username = Column(String(50), ForeignKey('user.username', onupdate="cascade") ) ``passive_updates`` is set to ``True`` by default, indicating that ON UPDATE CASCADE is expected to be in place in the usual case for foreign keys that expect to have a mutating parent key. ``passive_updates=False`` may be configured on any direction of relationship, i.e. one-to-many, many-to-one, and many-to-many, although it is much more effective when placed just on the one-to-many or many-to-many side. Configuring the ``passive_updates=False`` only on the many-to-one side will have only a partial effect, as the unit of work searches only through the current identity map for objects that may be referencing the one with a mutating primary key, not throughout the database. Relationships API ----------------- .. autofunction:: relationship .. autofunction:: backref .. autofunction:: relation .. autofunction:: dynamic_loader .. autofunction:: foreign .. autofunction:: remote SQLAlchemy-0.8.4/doc/_sources/orm/session.txt0000644000076500000240000026053012251147171021650 0ustar classicstaff00000000000000.. _session_toplevel: ================= Using the Session ================= .. module:: sqlalchemy.orm.session The :func:`.orm.mapper` function and :mod:`~sqlalchemy.ext.declarative` extensions are the primary configurational interface for the ORM. Once mappings are configured, the primary usage interface for persistence operations is the :class:`.Session`. What does the Session do ? ========================== In the most general sense, the :class:`~.Session` establishes all conversations with the database and represents a "holding zone" for all the objects which you've loaded or associated with it during its lifespan. It provides the entrypoint to acquire a :class:`.Query` object, which sends queries to the database using the :class:`~.Session` object's current database connection, populating result rows into objects that are then stored in the :class:`.Session`, inside a structure called the `Identity Map `_ - a data structure that maintains unique copies of each object, where "unique" means "only one object with a particular primary key". The :class:`.Session` begins in an essentially stateless form. Once queries are issued or other objects are persisted with it, it requests a connection resource from an :class:`.Engine` that is associated either with the :class:`.Session` itself or with the mapped :class:`.Table` objects being operated upon. This connection represents an ongoing transaction, which remains in effect until the :class:`.Session` is instructed to commit or roll back its pending state. All changes to objects maintained by a :class:`.Session` are tracked - before the database is queried again or before the current transaction is committed, it **flushes** all pending changes to the database. This is known as the `Unit of Work `_ pattern. When using a :class:`.Session`, it's important to note that the objects which are associated with it are **proxy objects** to the transaction being held by the :class:`.Session` - there are a variety of events that will cause objects to re-access the database in order to keep synchronized. It is possible to "detach" objects from a :class:`.Session`, and to continue using them, though this practice has its caveats. It's intended that usually, you'd re-associate detached objects with another :class:`.Session` when you want to work with them again, so that they can resume their normal task of representing database state. .. _session_getting: Getting a Session ================= :class:`.Session` is a regular Python class which can be directly instantiated. However, to standardize how sessions are configured and acquired, the :class:`.sessionmaker` class is normally used to create a top level :class:`.Session` configuration which can then be used throughout an application without the need to repeat the configurational arguments. The usage of :class:`.sessionmaker` is illustrated below: .. sourcecode:: python+sql from sqlalchemy import create_engine from sqlalchemy.orm import sessionmaker # an Engine, which the Session will use for connection # resources some_engine = create_engine('postgresql://scott:tiger@localhost/') # create a configured "Session" class Session = sessionmaker(bind=some_engine) # create a Session session = Session() # work with sess myobject = MyObject('foo', 'bar') session.add(myobject) session.commit() Above, the :class:`.sessionmaker` call creates a factory for us, which we assign to the name ``Session``. This factory, when called, will create a new :class:`.Session` object using the configurational arguments we've given the factory. In this case, as is typical, we've configured the factory to specify a particular :class:`.Engine` for connection resources. A typical setup will associate the :class:`.sessionmaker` with an :class:`.Engine`, so that each :class:`.Session` generated will use this :class:`.Engine` to acquire connection resources. This association can be set up as in the example above, using the ``bind`` argument. When you write your application, place the :class:`.sessionmaker` factory at the global level. This factory can then be used by the rest of the applcation as the source of new :class:`.Session` instances, keeping the configuration for how :class:`.Session` objects are constructed in one place. The :class:`.sessionmaker` factory can also be used in conjunction with other helpers, which are passed a user-defined :class:`.sessionmaker` that is then maintained by the helper. Some of these helpers are discussed in the section :ref:`session_faq_whentocreate`. Adding Additional Configuration to an Existing sessionmaker() -------------------------------------------------------------- A common scenario is where the :class:`.sessionmaker` is invoked at module import time, however the generation of one or more :class:`.Engine` instances to be associated with the :class:`.sessionmaker` has not yet proceeded. For this use case, the :class:`.sessionmaker` construct offers the :meth:`.sessionmaker.configure` method, which will place additional configuration directives into an existing :class:`.sessionmaker` that will take place when the construct is invoked:: from sqlalchemy.orm import sessionmaker from sqlalchemy import create_engine # configure Session class with desired options Session = sessionmaker() # later, we create the engine engine = create_engine('postgresql://...') # associate it with our custom Session class Session.configure(bind=engine) # work with the session session = Session() Creating Ad-Hoc Session Objects with Alternate Arguments --------------------------------------------------------- For the use case where an application needs to create a new :class:`.Session` with special arguments that deviate from what is normally used throughout the application, such as a :class:`.Session` that binds to an alternate source of connectivity, or a :class:`.Session` that should have other arguments such as ``expire_on_commit`` established differently from what most of the application wants, specific arguments can be passed to the :class:`.sessionmaker` factory's :meth:`.sessionmaker.__call__` method. These arguments will override whatever configurations have already been placed, such as below, where a new :class:`.Session` is constructed against a specific :class:`.Connection`:: # at the module level, the global sessionmaker, # bound to a specific Engine Session = sessionmaker(bind=engine) # later, some unit of code wants to create a # Session that is bound to a specific Connection conn = engine.connect() session = Session(bind=conn) The typical rationale for the association of a :class:`.Session` with a specific :class:`.Connection` is that of a test fixture that maintains an external transaction - see :ref:`session_external_transaction` for an example of this. Using the Session ================== .. _session_object_states: Quickie Intro to Object States ------------------------------ It's helpful to know the states which an instance can have within a session: * **Transient** - an instance that's not in a session, and is not saved to the database; i.e. it has no database identity. The only relationship such an object has to the ORM is that its class has a ``mapper()`` associated with it. * **Pending** - when you :meth:`~.Session.add` a transient instance, it becomes pending. It still wasn't actually flushed to the database yet, but it will be when the next flush occurs. * **Persistent** - An instance which is present in the session and has a record in the database. You get persistent instances by either flushing so that the pending instances become persistent, or by querying the database for existing instances (or moving persistent instances from other sessions into your local session). * **Detached** - an instance which has a record in the database, but is not in any session. There's nothing wrong with this, and you can use objects normally when they're detached, **except** they will not be able to issue any SQL in order to load collections or attributes which are not yet loaded, or were marked as "expired". Knowing these states is important, since the :class:`.Session` tries to be strict about ambiguous operations (such as trying to save the same object to two different sessions at the same time). .. _session_faq: Session Frequently Asked Questions ----------------------------------- When do I make a :class:`.sessionmaker`? ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ Just one time, somewhere in your application's global scope. It should be looked upon as part of your application's configuration. If your application has three .py files in a package, you could, for example, place the :class:`.sessionmaker` line in your ``__init__.py`` file; from that point on your other modules say "from mypackage import Session". That way, everyone else just uses :class:`.Session()`, and the configuration of that session is controlled by that central point. If your application starts up, does imports, but does not know what database it's going to be connecting to, you can bind the :class:`.Session` at the "class" level to the engine later on, using :meth:`.sessionmaker.configure`. In the examples in this section, we will frequently show the :class:`.sessionmaker` being created right above the line where we actually invoke :class:`.Session`. But that's just for example's sake! In reality, the :class:`.sessionmaker` would be somewhere at the module level. The calls to instantiate :class:`.Session` would then be placed at the point in the application where database conversations begin. .. _session_faq_whentocreate: When do I construct a :class:`.Session`, when do I commit it, and when do I close it? ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ .. topic:: tl;dr; As a general rule, keep the lifecycle of the session **separate and external** from functions and objects that access and/or manipulate database data. A :class:`.Session` is typically constructed at the beginning of a logical operation where database access is potentially anticipated. The :class:`.Session`, whenever it is used to talk to the database, begins a database transaction as soon as it starts communicating. Assuming the ``autocommit`` flag is left at its recommended default of ``False``, this transaction remains in progress until the :class:`.Session` is rolled back, committed, or closed. The :class:`.Session` will begin a new transaction if it is used again, subsequent to the previous transaction ending; from this it follows that the :class:`.Session` is capable of having a lifespan across many transactions, though only one at a time. We refer to these two concepts as **transaction scope** and **session scope**. The implication here is that the SQLAlchemy ORM is encouraging the developer to establish these two scopes in their application, including not only when the scopes begin and end, but also the expanse of those scopes, for example should a single :class:`.Session` instance be local to the execution flow within a function or method, should it be a global object used by the entire application, or somewhere in between these two. The burden placed on the developer to determine this scope is one area where the SQLAlchemy ORM necessarily has a strong opinion about how the database should be used. The :term:`unit of work` pattern is specifically one of accumulating changes over time and flushing them periodically, keeping in-memory state in sync with what's known to be present in a local transaction. This pattern is only effective when meaningful transaction scopes are in place. It's usually not very hard to determine the best points at which to begin and end the scope of a :class:`.Session`, though the wide variety of application architectures possible can introduce challenging situations. A common choice is to tear down the :class:`.Session` at the same time the transaction ends, meaning the transaction and session scopes are the same. This is a great choice to start out with as it removes the need to consider session scope as separate from transaction scope. While there's no one-size-fits-all recommendation for how transaction scope should be determined, there are common patterns. Especially if one is writing a web application, the choice is pretty much established. A web application is the easiest case because such an appication is already constructed around a single, consistent scope - this is the **request**, which represents an incoming request from a browser, the processing of that request to formulate a response, and finally the delivery of that response back to the client. Integrating web applications with the :class:`.Session` is then the straightforward task of linking the scope of the :class:`.Session` to that of the request. The :class:`.Session` can be established as the request begins, or using a :term:`lazy initialization` pattern which establishes one as soon as it is needed. The request then proceeds, with some system in place where application logic can access the current :class:`.Session` in a manner associated with how the actual request object is accessed. As the request ends, the :class:`.Session` is torn down as well, usually through the usage of event hooks provided by the web framework. The transaction used by the :class:`.Session` may also be committed at this point, or alternatively the application may opt for an explicit commit pattern, only committing for those requests where one is warranted, but still always tearing down the :class:`.Session` unconditionally at the end. Most web frameworks include infrastructure to establish a single :class:`.Session`, associated with the request, which is correctly constructed and torn down corresponding torn down at the end of a request. Such infrastructure pieces include products such as `Flask-SQLAlchemy `_, for usage in conjunction with the Flask web framework, and `Zope-SQLAlchemy `_, for usage in conjunction with the Pyramid and Zope frameworks. SQLAlchemy strongly recommends that these products be used as available. In those situations where integration libraries are not available, SQLAlchemy includes its own "helper" class known as :class:`.scoped_session`. A tutorial on the usage of this object is at :ref:`unitofwork_contextual`. It provides both a quick way to associate a :class:`.Session` with the current thread, as well as patterns to associate :class:`.Session` objects with other kinds of scopes. As mentioned before, for non-web applications there is no one clear pattern, as applications themselves don't have just one pattern of architecture. The best strategy is to attempt to demarcate "operations", points at which a particular thread begins to perform a series of operations for some period of time, which can be committed at the end. Some examples: * A background daemon which spawns off child forks would want to create a :class:`.Session` local to each child process, work with that :class:`.Session` through the life of the "job" that the fork is handling, then tear it down when the job is completed. * For a command-line script, the application would create a single, global :class:`.Session` that is established when the program begins to do its work, and commits it right as the program is completing its task. * For a GUI interface-driven application, the scope of the :class:`.Session` may best be within the scope of a user-generated event, such as a button push. Or, the scope may correspond to explicit user interaction, such as the user "opening" a series of records, then "saving" them. As a general rule, the application should manage the lifecycle of the session *externally* to functions that deal with specific data. This is a fundamental separation of concerns which keeps data-specific operations agnostic of the context in which they access and manipulate that data. E.g. **don't do this**:: ### this is the **wrong way to do it** ### class ThingOne(object): def go(self): session = Session() try: session.query(FooBar).update({"x": 5}) session.commit() except: session.rollback() raise class ThingTwo(object): def go(self): session = Session() try: session.query(Widget).update({"q": 18}) session.commit() except: session.rollback() raise def run_my_program(): ThingOne().go() ThingTwo().go() Keep the lifecycle of the session (and usually the transaction) **separate and external**:: ### this is a **better** (but not the only) way to do it ### class ThingOne(object): def go(self, session): session.query(FooBar).update({"x": 5}) class ThingTwo(object): def go(self, session): session.query(Widget).update({"q": 18}) def run_my_program(): session = Session() try: ThingOne().go(session) ThingTwo().go(session) session.commit() except: session.rollback() raise finally: session.close() The advanced developer will try to keep the details of session, transaction and exception management as far as possible from the details of the program doing its work. For example, we can further separate concerns using a `context manager `_:: ### another way (but again *not the only way*) to do it ### from contextlib import contextmanager @contextmanager def session_scope(): """Provide a transactional scope around a series of operations.""" session = Session() try: yield session session.commit() except: session.rollback() raise finally: session.close() def run_my_program(): with session_scope() as session: ThingOne().go(session) ThingTwo().go(session) Is the Session a cache? ~~~~~~~~~~~~~~~~~~~~~~~~~~~ Yeee...no. It's somewhat used as a cache, in that it implements the :term:`identity map` pattern, and stores objects keyed to their primary key. However, it doesn't do any kind of query caching. This means, if you say ``session.query(Foo).filter_by(name='bar')``, even if ``Foo(name='bar')`` is right there, in the identity map, the session has no idea about that. It has to issue SQL to the database, get the rows back, and then when it sees the primary key in the row, *then* it can look in the local identity map and see that the object is already there. It's only when you say ``query.get({some primary key})`` that the :class:`~sqlalchemy.orm.session.Session` doesn't have to issue a query. Additionally, the Session stores object instances using a weak reference by default. This also defeats the purpose of using the Session as a cache. The :class:`.Session` is not designed to be a global object from which everyone consults as a "registry" of objects. That's more the job of a **second level cache**. SQLAlchemy provides a pattern for implementing second level caching using `dogpile.cache `_, via the :ref:`examples_caching` example. How can I get the :class:`~sqlalchemy.orm.session.Session` for a certain object? ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ Use the :meth:`~.Session.object_session` classmethod available on :class:`~sqlalchemy.orm.session.Session`:: session = Session.object_session(someobject) The newer :ref:`core_inspection_toplevel` system can also be used:: from sqlalchemy import inspect session = inspect(object).session .. _session_faq_threadsafe: Is the session thread-safe? ~~~~~~~~~~~~~~~~~~~~~~~~~~~ The :class:`.Session` is very much intended to be used in a **non-concurrent** fashion, which usually means in only one thread at a time. The :class:`.Session` should be used in such a way that one instance exists for a single series of operations within a single transaction. One expedient way to get this effect is by associating a :class:`.Session` with the current thread (see :ref:`unitofwork_contextual` for background). Another is to use a pattern where the :class:`.Session` is passed between functions and is otherwise not shared with other threads. The bigger point is that you should not *want* to use the session with multiple concurrent threads. That would be like having everyone at a restaurant all eat from the same plate. The session is a local "workspace" that you use for a specific set of tasks; you don't want to, or need to, share that session with other threads who are doing some other task. Making sure the :class:`.Session` is only used in a single concurrent thread at a time is called a "share nothing" approach to concurrency. But actually, not sharing the :class:`.Session` implies a more significant pattern; it means not just the :class:`.Session` object itself, but also **all objects that are associated with that Session**, must be kept within the scope of a single concurrent thread. The set of mapped objects associated with a :class:`.Session` are essentially proxies for data within database rows accessed over a database connection, and so just like the :class:`.Session` itself, the whole set of objects is really just a large-scale proxy for a database connection (or connections). Ultimately, it's mostly the DBAPI connection itself that we're keeping away from concurrent access; but since the :class:`.Session` and all the objects associated with it are all proxies for that DBAPI connection, the entire graph is essentially not safe for concurrent access. If there are in fact multiple threads participating in the same task, then you may consider sharing the session and its objects between those threads; however, in this extremely unusual scenario the application would need to ensure that a proper locking scheme is implemented so that there isn't *concurrent* access to the :class:`.Session` or its state. A more common approach to this situation is to maintain a single :class:`.Session` per concurrent thread, but to instead *copy* objects from one :class:`.Session` to another, often using the :meth:`.Session.merge` method to copy the state of an object into a new object local to a different :class:`.Session`. Querying -------- The :meth:`~.Session.query` function takes one or more *entities* and returns a new :class:`~sqlalchemy.orm.query.Query` object which will issue mapper queries within the context of this Session. An entity is defined as a mapped class, a :class:`~sqlalchemy.orm.mapper.Mapper` object, an orm-enabled *descriptor*, or an ``AliasedClass`` object:: # query from a class session.query(User).filter_by(name='ed').all() # query with multiple classes, returns tuples session.query(User, Address).join('addresses').filter_by(name='ed').all() # query using orm-enabled descriptors session.query(User.name, User.fullname).all() # query from a mapper user_mapper = class_mapper(User) session.query(user_mapper) When :class:`~sqlalchemy.orm.query.Query` returns results, each object instantiated is stored within the identity map. When a row matches an object which is already present, the same object is returned. In the latter case, whether or not the row is populated onto an existing object depends upon whether the attributes of the instance have been *expired* or not. A default-configured :class:`~sqlalchemy.orm.session.Session` automatically expires all instances along transaction boundaries, so that with a normally isolated transaction, there shouldn't be any issue of instances representing data which is stale with regards to the current transaction. The :class:`.Query` object is introduced in great detail in :ref:`ormtutorial_toplevel`, and further documented in :ref:`query_api_toplevel`. Adding New or Existing Items ---------------------------- :meth:`~.Session.add` is used to place instances in the session. For *transient* (i.e. brand new) instances, this will have the effect of an INSERT taking place for those instances upon the next flush. For instances which are *persistent* (i.e. were loaded by this session), they are already present and do not need to be added. Instances which are *detached* (i.e. have been removed from a session) may be re-associated with a session using this method:: user1 = User(name='user1') user2 = User(name='user2') session.add(user1) session.add(user2) session.commit() # write changes to the database To add a list of items to the session at once, use :meth:`~.Session.add_all`:: session.add_all([item1, item2, item3]) The :meth:`~.Session.add` operation **cascades** along the ``save-update`` cascade. For more details see the section :ref:`unitofwork_cascades`. .. _unitofwork_merging: Merging ------- :meth:`~.Session.merge` transfers state from an outside object into a new or already existing instance within a session. It also reconciles the incoming data against the state of the database, producing a history stream which will be applied towards the next flush, or alternatively can be made to produce a simple "transfer" of state without producing change history or accessing the database. Usage is as follows:: merged_object = session.merge(existing_object) When given an instance, it follows these steps: * It examines the primary key of the instance. If it's present, it attempts to locate that instance in the local identity map. If the ``load=True`` flag is left at its default, it also checks the database for this primary key if not located locally. * If the given instance has no primary key, or if no instance can be found with the primary key given, a new instance is created. * The state of the given instance is then copied onto the located/newly created instance. For attributes which are present on the source instance, the value is transferred to the target instance. For mapped attributes which aren't present on the source, the attribute is expired on the target instance, discarding its existing value. If the ``load=True`` flag is left at its default, this copy process emits events and will load the target object's unloaded collections for each attribute present on the source object, so that the incoming state can be reconciled against what's present in the database. If ``load`` is passed as ``False``, the incoming data is "stamped" directly without producing any history. * The operation is cascaded to related objects and collections, as indicated by the ``merge`` cascade (see :ref:`unitofwork_cascades`). * The new instance is returned. With :meth:`~.Session.merge`, the given "source" instance is not modifed nor is it associated with the target :class:`.Session`, and remains available to be merged with any number of other :class:`.Session` objects. :meth:`~.Session.merge` is useful for taking the state of any kind of object structure without regard for its origins or current session associations and copying its state into a new session. Here's some examples: * An application which reads an object structure from a file and wishes to save it to the database might parse the file, build up the structure, and then use :meth:`~.Session.merge` to save it to the database, ensuring that the data within the file is used to formulate the primary key of each element of the structure. Later, when the file has changed, the same process can be re-run, producing a slightly different object structure, which can then be ``merged`` in again, and the :class:`~sqlalchemy.orm.session.Session` will automatically update the database to reflect those changes, loading each object from the database by primary key and then updating its state with the new state given. * An application is storing objects in an in-memory cache, shared by many :class:`.Session` objects simultaneously. :meth:`~.Session.merge` is used each time an object is retrieved from the cache to create a local copy of it in each :class:`.Session` which requests it. The cached object remains detached; only its state is moved into copies of itself that are local to individual :class:`~.Session` objects. In the caching use case, it's common that the ``load=False`` flag is used to remove the overhead of reconciling the object's state with the database. There's also a "bulk" version of :meth:`~.Session.merge` called :meth:`~.Query.merge_result` that was designed to work with cache-extended :class:`.Query` objects - see the section :ref:`examples_caching`. * An application wants to transfer the state of a series of objects into a :class:`.Session` maintained by a worker thread or other concurrent system. :meth:`~.Session.merge` makes a copy of each object to be placed into this new :class:`.Session`. At the end of the operation, the parent thread/process maintains the objects it started with, and the thread/worker can proceed with local copies of those objects. In the "transfer between threads/processes" use case, the application may want to use the ``load=False`` flag as well to avoid overhead and redundant SQL queries as the data is transferred. Merge Tips ~~~~~~~~~~ :meth:`~.Session.merge` is an extremely useful method for many purposes. However, it deals with the intricate border between objects that are transient/detached and those that are persistent, as well as the automated transferrence of state. The wide variety of scenarios that can present themselves here often require a more careful approach to the state of objects. Common problems with merge usually involve some unexpected state regarding the object being passed to :meth:`~.Session.merge`. Lets use the canonical example of the User and Address objects:: class User(Base): __tablename__ = 'user' id = Column(Integer, primary_key=True) name = Column(String(50), nullable=False) addresses = relationship("Address", backref="user") class Address(Base): __tablename__ = 'address' id = Column(Integer, primary_key=True) email_address = Column(String(50), nullable=False) user_id = Column(Integer, ForeignKey('user.id'), nullable=False) Assume a ``User`` object with one ``Address``, already persistent:: >>> u1 = User(name='ed', addresses=[Address(email_address='ed@ed.com')]) >>> session.add(u1) >>> session.commit() We now create ``a1``, an object outside the session, which we'd like to merge on top of the existing ``Address``:: >>> existing_a1 = u1.addresses[0] >>> a1 = Address(id=existing_a1.id) A surprise would occur if we said this:: >>> a1.user = u1 >>> a1 = session.merge(a1) >>> session.commit() sqlalchemy.orm.exc.FlushError: New instance
with identity key (, (1,)) conflicts with persistent instance
Why is that ? We weren't careful with our cascades. The assignment of ``a1.user`` to a persistent object cascaded to the backref of ``User.addresses`` and made our ``a1`` object pending, as though we had added it. Now we have *two* ``Address`` objects in the session:: >>> a1 = Address() >>> a1.user = u1 >>> a1 in session True >>> existing_a1 in session True >>> a1 is existing_a1 False Above, our ``a1`` is already pending in the session. The subsequent :meth:`~.Session.merge` operation essentially does nothing. Cascade can be configured via the ``cascade`` option on :func:`.relationship`, although in this case it would mean removing the ``save-update`` cascade from the ``User.addresses`` relationship - and usually, that behavior is extremely convenient. The solution here would usually be to not assign ``a1.user`` to an object already persistent in the target session. The ``cascade_backrefs=False`` option of :func:`.relationship` will also prevent the ``Address`` from being added to the session via the ``a1.user = u1`` assignment. Further detail on cascade operation is at :ref:`unitofwork_cascades`. Another example of unexpected state:: >>> a1 = Address(id=existing_a1.id, user_id=u1.id) >>> assert a1.user is None >>> True >>> a1 = session.merge(a1) >>> session.commit() sqlalchemy.exc.IntegrityError: (IntegrityError) address.user_id may not be NULL Here, we accessed a1.user, which returned its default value of ``None``, which as a result of this access, has been placed in the ``__dict__`` of our object ``a1``. Normally, this operation creates no change event, so the ``user_id`` attribute takes precedence during a flush. But when we merge the ``Address`` object into the session, the operation is equivalent to:: >>> existing_a1.id = existing_a1.id >>> existing_a1.user_id = u1.id >>> existing_a1.user = None Where above, both ``user_id`` and ``user`` are assigned to, and change events are emitted for both. The ``user`` association takes precedence, and None is applied to ``user_id``, causing a failure. Most :meth:`~.Session.merge` issues can be examined by first checking - is the object prematurely in the session ? .. sourcecode:: python+sql >>> a1 = Address(id=existing_a1, user_id=user.id) >>> assert a1 not in session >>> a1 = session.merge(a1) Or is there state on the object that we don't want ? Examining ``__dict__`` is a quick way to check:: >>> a1 = Address(id=existing_a1, user_id=user.id) >>> a1.user >>> a1.__dict__ {'_sa_instance_state': , 'user_id': 1, 'id': 1, 'user': None} >>> # we don't want user=None merged, remove it >>> del a1.user >>> a1 = session.merge(a1) >>> # success >>> session.commit() Deleting -------- The :meth:`~.Session.delete` method places an instance into the Session's list of objects to be marked as deleted:: # mark two objects to be deleted session.delete(obj1) session.delete(obj2) # commit (or flush) session.commit() .. _session_deleting_from_collections: Deleting from Collections ~~~~~~~~~~~~~~~~~~~~~~~~~~ A common confusion that arises regarding :meth:`~.Session.delete` is when objects which are members of a collection are being deleted. While the collection member is marked for deletion from the database, this does not impact the collection itself in memory until the collection is expired. Below, we illustrate that even after an ``Address`` object is marked for deletion, it's still present in the collection associated with the parent ``User``, even after a flush:: >>> address = user.addresses[1] >>> session.delete(address) >>> session.flush() >>> address in user.addresses True When the above session is committed, all attributes are expired. The next access of ``user.addresses`` will re-load the collection, revealing the desired state:: >>> session.commit() >>> address in user.addresses False The usual practice of deleting items within collections is to forego the usage of :meth:`~.Session.delete` directly, and instead use cascade behavior to automatically invoke the deletion as a result of removing the object from the parent collection. The ``delete-orphan`` cascade accomplishes this, as illustrated in the example below:: mapper(User, users_table, properties={ 'addresses':relationship(Address, cascade="all, delete, delete-orphan") }) del user.addresses[1] session.flush() Where above, upon removing the ``Address`` object from the ``User.addresses`` collection, the ``delete-orphan`` cascade has the effect of marking the ``Address`` object for deletion in the same way as passing it to :meth:`~.Session.delete`. See also :ref:`unitofwork_cascades` for detail on cascades. Deleting based on Filter Criterion ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ The caveat with ``Session.delete()`` is that you need to have an object handy already in order to delete. The Query includes a :func:`~sqlalchemy.orm.query.Query.delete` method which deletes based on filtering criteria:: session.query(User).filter(User.id==7).delete() The ``Query.delete()`` method includes functionality to "expire" objects already in the session which match the criteria. However it does have some caveats, including that "delete" and "delete-orphan" cascades won't be fully expressed for collections which are already loaded. See the API docs for :meth:`~sqlalchemy.orm.query.Query.delete` for more details. .. _session_flushing: Flushing -------- When the :class:`~sqlalchemy.orm.session.Session` is used with its default configuration, the flush step is nearly always done transparently. Specifically, the flush occurs before any individual :class:`~sqlalchemy.orm.query.Query` is issued, as well as within the :meth:`~.Session.commit` call before the transaction is committed. It also occurs before a SAVEPOINT is issued when :meth:`~.Session.begin_nested` is used. Regardless of the autoflush setting, a flush can always be forced by issuing :meth:`~.Session.flush`:: session.flush() The "flush-on-Query" aspect of the behavior can be disabled by constructing :class:`.sessionmaker` with the flag ``autoflush=False``:: Session = sessionmaker(autoflush=False) Additionally, autoflush can be temporarily disabled by setting the ``autoflush`` flag at any time:: mysession = Session() mysession.autoflush = False Some autoflush-disable recipes are available at `DisableAutoFlush `_. The flush process *always* occurs within a transaction, even if the :class:`~sqlalchemy.orm.session.Session` has been configured with ``autocommit=True``, a setting that disables the session's persistent transactional state. If no transaction is present, :meth:`~.Session.flush` creates its own transaction and commits it. Any failures during flush will always result in a rollback of whatever transaction is present. If the Session is not in ``autocommit=True`` mode, an explicit call to :meth:`~.Session.rollback` is required after a flush fails, even though the underlying transaction will have been rolled back already - this is so that the overall nesting pattern of so-called "subtransactions" is consistently maintained. .. _session_committing: Committing ---------- :meth:`~.Session.commit` is used to commit the current transaction. It always issues :meth:`~.Session.flush` beforehand to flush any remaining state to the database; this is independent of the "autoflush" setting. If no transaction is present, it raises an error. Note that the default behavior of the :class:`~sqlalchemy.orm.session.Session` is that a "transaction" is always present; this behavior can be disabled by setting ``autocommit=True``. In autocommit mode, a transaction can be initiated by calling the :meth:`~.Session.begin` method. .. note:: The term "transaction" here refers to a transactional construct within the :class:`.Session` itself which may be maintaining zero or more actual database (DBAPI) transactions. An individual DBAPI connection begins participation in the "transaction" as it is first used to execute a SQL statement, then remains present until the session-level "transaction" is completed. See :ref:`unitofwork_transaction` for further detail. Another behavior of :meth:`~.Session.commit` is that by default it expires the state of all instances present after the commit is complete. This is so that when the instances are next accessed, either through attribute access or by them being present in a :class:`~sqlalchemy.orm.query.Query` result set, they receive the most recent state. To disable this behavior, configure :class:`.sessionmaker` with ``expire_on_commit=False``. Normally, instances loaded into the :class:`~sqlalchemy.orm.session.Session` are never changed by subsequent queries; the assumption is that the current transaction is isolated so the state most recently loaded is correct as long as the transaction continues. Setting ``autocommit=True`` works against this model to some degree since the :class:`~sqlalchemy.orm.session.Session` behaves in exactly the same way with regard to attribute state, except no transaction is present. .. _session_rollback: Rolling Back ------------ :meth:`~.Session.rollback` rolls back the current transaction. With a default configured session, the post-rollback state of the session is as follows: * All transactions are rolled back and all connections returned to the connection pool, unless the Session was bound directly to a Connection, in which case the connection is still maintained (but still rolled back). * Objects which were initially in the *pending* state when they were added to the :class:`~sqlalchemy.orm.session.Session` within the lifespan of the transaction are expunged, corresponding to their INSERT statement being rolled back. The state of their attributes remains unchanged. * Objects which were marked as *deleted* within the lifespan of the transaction are promoted back to the *persistent* state, corresponding to their DELETE statement being rolled back. Note that if those objects were first *pending* within the transaction, that operation takes precedence instead. * All objects not expunged are fully expired. With that state understood, the :class:`~sqlalchemy.orm.session.Session` may safely continue usage after a rollback occurs. When a :meth:`~.Session.flush` fails, typically for reasons like primary key, foreign key, or "not nullable" constraint violations, a :meth:`~.Session.rollback` is issued automatically (it's currently not possible for a flush to continue after a partial failure). However, the flush process always uses its own transactional demarcator called a *subtransaction*, which is described more fully in the docstrings for :class:`~sqlalchemy.orm.session.Session`. What it means here is that even though the database transaction has been rolled back, the end user must still issue :meth:`~.Session.rollback` to fully reset the state of the :class:`~sqlalchemy.orm.session.Session`. Expunging --------- Expunge removes an object from the Session, sending persistent instances to the detached state, and pending instances to the transient state: .. sourcecode:: python+sql session.expunge(obj1) To remove all items, call :meth:`~.Session.expunge_all` (this method was formerly known as ``clear()``). Closing ------- The :meth:`~.Session.close` method issues a :meth:`~.Session.expunge_all`, and :term:`releases` any transactional/connection resources. When connections are returned to the connection pool, transactional state is rolled back as well. Refreshing / Expiring --------------------- The Session normally works in the context of an ongoing transaction (with the default setting of autoflush=False). Most databases offer "isolated" transactions - this refers to a series of behaviors that allow the work within a transaction to remain consistent as time passes, regardless of the activities outside of that transaction. A key feature of a high degree of transaction isolation is that emitting the same SELECT statement twice will return the same results as when it was called the first time, even if the data has been modified in another transaction. For this reason, the :class:`.Session` gains very efficient behavior by loading the attributes of each instance only once. Subsequent reads of the same row in the same transaction are assumed to have the same value. The user application also gains directly from this assumption, that the transaction is regarded as a temporary shield against concurrent changes - a good application will ensure that isolation levels are set appropriately such that this assumption can be made, given the kind of data being worked with. To clear out the currently loaded state on an instance, the instance or its individual attributes can be marked as "expired", which results in a reload to occur upon next access of any of the instance's attrbutes. The instance can also be immediately reloaded from the database. The :meth:`~.Session.expire` and :meth:`~.Session.refresh` methods achieve this:: # immediately re-load attributes on obj1, obj2 session.refresh(obj1) session.refresh(obj2) # expire objects obj1, obj2, attributes will be reloaded # on the next access: session.expire(obj1) session.expire(obj2) When an expired object reloads, all non-deferred column-based attributes are loaded in one query. Current behavior for expired relationship-based attributes is that they load individually upon access - this behavior may be enhanced in a future release. When a refresh is invoked on an object, the ultimate operation is equivalent to a :meth:`.Query.get`, so any relationships configured with eager loading should also load within the scope of the refresh operation. :meth:`~.Session.refresh` and :meth:`~.Session.expire` also support being passed a list of individual attribute names in which to be refreshed. These names can refer to any attribute, column-based or relationship based:: # immediately re-load the attributes 'hello', 'world' on obj1, obj2 session.refresh(obj1, ['hello', 'world']) session.refresh(obj2, ['hello', 'world']) # expire the attributes 'hello', 'world' objects obj1, obj2, attributes will be reloaded # on the next access: session.expire(obj1, ['hello', 'world']) session.expire(obj2, ['hello', 'world']) The full contents of the session may be expired at once using :meth:`~.Session.expire_all`:: session.expire_all() Note that :meth:`~.Session.expire_all` is called **automatically** whenever :meth:`~.Session.commit` or :meth:`~.Session.rollback` are called. If using the session in its default mode of autocommit=False and with a well-isolated transactional environment (which is provided by most backends with the notable exception of MySQL MyISAM), there is virtually *no reason* to ever call :meth:`~.Session.expire_all` directly - plenty of state will remain on the current transaction until it is rolled back or committed or otherwise removed. :meth:`~.Session.refresh` and :meth:`~.Session.expire` similarly are usually only necessary when an UPDATE or DELETE has been issued manually within the transaction using :meth:`.Session.execute()`. Session Attributes ------------------ The :class:`~sqlalchemy.orm.session.Session` itself acts somewhat like a set-like collection. All items present may be accessed using the iterator interface:: for obj in session: print obj And presence may be tested for using regular "contains" semantics:: if obj in session: print "Object is present" The session is also keeping track of all newly created (i.e. pending) objects, all objects which have had changes since they were last loaded or saved (i.e. "dirty"), and everything that's been marked as deleted:: # pending objects recently added to the Session session.new # persistent objects which currently have changes detected # (this collection is now created on the fly each time the property is called) session.dirty # persistent objects that have been marked as deleted via session.delete(obj) session.deleted # dictionary of all persistent objects, keyed on their # identity key session.identity_map (Documentation: :attr:`.Session.new`, :attr:`.Session.dirty`, :attr:`.Session.deleted`, :attr:`.Session.identity_map`). Note that objects within the session are by default *weakly referenced*. This means that when they are dereferenced in the outside application, they fall out of scope from within the :class:`~sqlalchemy.orm.session.Session` as well and are subject to garbage collection by the Python interpreter. The exceptions to this include objects which are pending, objects which are marked as deleted, or persistent objects which have pending changes on them. After a full flush, these collections are all empty, and all objects are again weakly referenced. To disable the weak referencing behavior and force all objects within the session to remain until explicitly expunged, configure :class:`.sessionmaker` with the ``weak_identity_map=False`` setting. .. _unitofwork_cascades: Cascades ======== Mappers support the concept of configurable **cascade** behavior on :func:`~sqlalchemy.orm.relationship` constructs. This refers to how operations performed on a parent object relative to a particular :class:`.Session` should be propagated to items referred to by that relationship. The default cascade behavior is usually suitable for most situations, and the option is normally invoked explicitly in order to enable ``delete`` and ``delete-orphan`` cascades, which refer to how the relationship should be treated when the parent is marked for deletion as well as when a child is de-associated from its parent. Cascade behavior is configured by setting the ``cascade`` keyword argument on :func:`~sqlalchemy.orm.relationship`:: class Order(Base): __tablename__ = 'order' items = relationship("Item", cascade="all, delete-orphan") customer = relationship("User", secondary=user_orders_table, cascade="save-update") To set cascades on a backref, the same flag can be used with the :func:`~.sqlalchemy.orm.backref` function, which ultimately feeds its arguments back into :func:`~sqlalchemy.orm.relationship`:: class Item(Base): __tablename__ = 'item' order = relationship("Order", backref=backref("items", cascade="all, delete-orphan") ) The default value of ``cascade`` is ``save-update, merge``. The ``all`` symbol in the cascade options indicates that all cascade flags should be enabled, with the exception of ``delete-orphan``. Typically, cascade is usually left at its default, or configured as ``all, delete-orphan``, indicating the child objects should be treated as "owned" by the parent. The list of available values which can be specified in ``cascade`` are as follows: * ``save-update`` - Indicates that when an object is placed into a :class:`.Session` via :meth:`.Session.add`, all the objects associated with it via this :func:`~sqlalchemy.orm.relationship` should also be added to that same :class:`.Session`. Additionally, if this object is already present in a :class:`.Session`, child objects will be added to that session as they are associated with this parent, i.e. as they are appended to lists, added to sets, or otherwise associated with the parent. ``save-update`` cascade also cascades the *pending history* of the target attribute, meaning that objects which were removed from a scalar or collection attribute whose changes have not yet been flushed are also placed into the target session. This is because they may have foreign key attributes present which will need to be updated to no longer refer to the parent. The ``save-update`` cascade is on by default, and it's common to not even be aware of it. It's customary that only a single call to :meth:`.Session.add` against the lead object of a structure has the effect of placing the full structure of objects into the :class:`.Session` at once. However, it can be turned off, which would imply that objects associated with a parent would need to be placed individually using :meth:`.Session.add` calls for each one. Another default behavior of ``save-update`` cascade is that it will take effect in the reverse direction, that is, associating a child with a parent when a backref is present means both relationships are affected; the parent will be added to the child's session. To disable this somewhat indirect session addition, use the ``cascade_backrefs=False`` option described below in :ref:`backref_cascade`. * ``delete`` - This cascade indicates that when the parent object is marked for deletion, the related objects should also be marked for deletion. Without this cascade present, SQLAlchemy will set the foreign key on a one-to-many relationship to NULL when the parent object is deleted. When enabled, the row is instead deleted. ``delete`` cascade is often used in conjunction with ``delete-orphan`` cascade, as is appropriate for an object whose foreign key is not intended to be nullable. On some backends, it's also a good idea to set ``ON DELETE`` on the foreign key itself; see the section :ref:`passive_deletes` for more details. Note that for many-to-many relationships which make usage of the ``secondary`` argument to :func:`~.sqlalchemy.orm.relationship`, SQLAlchemy always emits a DELETE for the association row in between "parent" and "child", when the parent is deleted or whenever the linkage between a particular parent and child is broken. * ``delete-orphan`` - This cascade adds behavior to the ``delete`` cascade, such that a child object will be marked for deletion when it is de-associated from the parent, not just when the parent is marked for deletion. This is a common feature when dealing with a related object that is "owned" by its parent, with a NOT NULL foreign key, so that removal of the item from the parent collection results in its deletion. ``delete-orphan`` cascade implies that each child object can only have one parent at a time, so is configured in the vast majority of cases on a one-to-many relationship. Setting it on a many-to-one or many-to-many relationship is more awkward; for this use case, SQLAlchemy requires that the :func:`~sqlalchemy.orm.relationship` be configured with the ``single_parent=True`` function, which establishes Python-side validation that ensures the object is associated with only one parent at a time. * ``merge`` - This cascade indicates that the :meth:`.Session.merge` operation should be propagated from a parent that's the subject of the :meth:`.Session.merge` call down to referred objects. This cascade is also on by default. * ``refresh-expire`` - A less common option, indicates that the :meth:`.Session.expire` operation should be propagated from a parent down to referred objects. When using :meth:`.Session.refresh`, the referred objects are expired only, but not actually refreshed. * ``expunge`` - Indicate that when the parent object is removed from the :class:`.Session` using :meth:`.Session.expunge`, the operation should be propagated down to referred objects. .. _backref_cascade: Controlling Cascade on Backrefs ------------------------------- The ``save-update`` cascade takes place on backrefs by default. This means that, given a mapping such as this:: mapper(Order, order_table, properties={ 'items' : relationship(Item, backref='order') }) If an ``Order`` is already in the session, and is assigned to the ``order`` attribute of an ``Item``, the backref appends the ``Order`` to the ``items`` collection of that ``Order``, resulting in the ``save-update`` cascade taking place:: >>> o1 = Order() >>> session.add(o1) >>> o1 in session True >>> i1 = Item() >>> i1.order = o1 >>> i1 in o1.items True >>> i1 in session True This behavior can be disabled using the ``cascade_backrefs`` flag:: mapper(Order, order_table, properties={ 'items' : relationship(Item, backref='order', cascade_backrefs=False) }) So above, the assignment of ``i1.order = o1`` will append ``i1`` to the ``items`` collection of ``o1``, but will not add ``i1`` to the session. You can, of course, :meth:`~.Session.add` ``i1`` to the session at a later point. This option may be helpful for situations where an object needs to be kept out of a session until it's construction is completed, but still needs to be given associations to objects which are already persistent in the target session. .. _unitofwork_transaction: Managing Transactions ===================== A newly constructed :class:`.Session` may be said to be in the "begin" state. In this state, the :class:`.Session` has not established any connection or transactional state with any of the :class:`.Engine` objects that may be associated with it. The :class:`.Session` then receives requests to operate upon a database connection. Typically, this means it is called upon to execute SQL statements using a particular :class:`.Engine`, which may be via :meth:`.Session.query`, :meth:`.Session.execute`, or within a flush operation of pending data, which occurs when such state exists and :meth:`.Session.commit` or :meth:`.Session.flush` is called. As these requests are received, each new :class:`.Engine` encountered is associated with an ongoing transactional state maintained by the :class:`.Session`. When the first :class:`.Engine` is operated upon, the :class:`.Session` can be said to have left the "begin" state and entered "transactional" state. For each :class:`.Engine` encountered, a :class:`.Connection` is associated with it, which is acquired via the :meth:`.Engine.contextual_connect` method. If a :class:`.Connection` was directly associated with the :class:`.Session` (see :ref:`session_external_transaction` for an example of this), it is added to the transactional state directly. For each :class:`.Connection`, the :class:`.Session` also maintains a :class:`.Transaction` object, which is acquired by calling :meth:`.Connection.begin` on each :class:`.Connection`, or if the :class:`.Session` object has been established using the flag ``twophase=True``, a :class:`.TwoPhaseTransaction` object acquired via :meth:`.Connection.begin_twophase`. These transactions are all committed or rolled back corresponding to the invocation of the :meth:`.Session.commit` and :meth:`.Session.rollback` methods. A commit operation will also call the :meth:`.TwoPhaseTransaction.prepare` method on all transactions if applicable. When the transactional state is completed after a rollback or commit, the :class:`.Session` :term:`releases` all :class:`.Transaction` and :class:`.Connection` resources, and goes back to the "begin" state, which will again invoke new :class:`.Connection` and :class:`.Transaction` objects as new requests to emit SQL statements are received. The example below illustrates this lifecycle:: engine = create_engine("...") Session = sessionmaker(bind=engine) # new session. no connections are in use. session = Session() try: # first query. a Connection is acquired # from the Engine, and a Transaction # started. item1 = session.query(Item).get(1) # second query. the same Connection/Transaction # are used. item2 = session.query(Item).get(2) # pending changes are created. item1.foo = 'bar' item2.bar = 'foo' # commit. The pending changes above # are flushed via flush(), the Transaction # is committed, the Connection object closed # and discarded, the underlying DBAPI connection # returned to the connection pool. session.commit() except: # on rollback, the same closure of state # as that of commit proceeds. session.rollback() raise .. _session_begin_nested: Using SAVEPOINT --------------- SAVEPOINT transactions, if supported by the underlying engine, may be delineated using the :meth:`~.Session.begin_nested` method:: Session = sessionmaker() session = Session() session.add(u1) session.add(u2) session.begin_nested() # establish a savepoint session.add(u3) session.rollback() # rolls back u3, keeps u1 and u2 session.commit() # commits u1 and u2 :meth:`~.Session.begin_nested` may be called any number of times, which will issue a new SAVEPOINT with a unique identifier for each call. For each :meth:`~.Session.begin_nested` call, a corresponding :meth:`~.Session.rollback` or :meth:`~.Session.commit` must be issued. When :meth:`~.Session.begin_nested` is called, a :meth:`~.Session.flush` is unconditionally issued (regardless of the ``autoflush`` setting). This is so that when a :meth:`~.Session.rollback` occurs, the full state of the session is expired, thus causing all subsequent attribute/instance access to reference the full state of the :class:`~sqlalchemy.orm.session.Session` right before :meth:`~.Session.begin_nested` was called. :meth:`~.Session.begin_nested`, in the same manner as the less often used :meth:`~.Session.begin` method, returns a transactional object which also works as a context manager. It can be succinctly used around individual record inserts in order to catch things like unique constraint exceptions:: for record in records: try: with session.begin_nested(): session.merge(record) except: print "Skipped record %s" % record session.commit() .. _session_autocommit: Autocommit Mode --------------- The example of :class:`.Session` transaction lifecycle illustrated at the start of :ref:`unitofwork_transaction` applies to a :class:`.Session` configured in the default mode of ``autocommit=False``. Constructing a :class:`.Session` with ``autocommit=True`` produces a :class:`.Session` placed into "autocommit" mode, where each SQL statement invoked by a :meth:`.Session.query` or :meth:`.Session.execute` occurs using a new connection from the connection pool, discarding it after results have been iterated. The :meth:`.Session.flush` operation still occurs within the scope of a single transaction, though this transaction is closed out after the :meth:`.Session.flush` operation completes. .. warning:: "autocommit" mode should **not be considered for general use**. If used, it should always be combined with the usage of :meth:`.Session.begin` and :meth:`.Session.commit`, to ensure a transaction demarcation. Executing queries outside of a demarcated transaction is a legacy mode of usage, and can in some cases lead to concurrent connection checkouts. In the absense of a demarcated transaction, the :class:`.Session` cannot make appropriate decisions as to when autoflush should occur nor when auto-expiration should occur, so these features should be disabled with ``autoflush=False, expire_on_commit=False``. Modern usage of "autocommit" is for framework integrations that need to control specifically when the "begin" state occurs. A session which is configured with ``autocommit=True`` may be placed into the "begin" state using the :meth:`.Session.begin` method. After the cycle completes upon :meth:`.Session.commit` or :meth:`.Session.rollback`, connection and transaction resources are :term:`released` and the :class:`.Session` goes back into "autocommit" mode, until :meth:`.Session.begin` is called again:: Session = sessionmaker(bind=engine, autocommit=True) session = Session() session.begin() try: item1 = session.query(Item).get(1) item2 = session.query(Item).get(2) item1.foo = 'bar' item2.bar = 'foo' session.commit() except: session.rollback() raise The :meth:`.Session.begin` method also returns a transactional token which is compatible with the Python 2.6 ``with`` statement:: Session = sessionmaker(bind=engine, autocommit=True) session = Session() with session.begin(): item1 = session.query(Item).get(1) item2 = session.query(Item).get(2) item1.foo = 'bar' item2.bar = 'foo' .. _session_subtransactions: Using Subtransactions with Autocommit ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ A subtransaction indicates usage of the :meth:`.Session.begin` method in conjunction with the ``subtransactions=True`` flag. This produces a non-transactional, delimiting construct that allows nesting of calls to :meth:`~.Session.begin` and :meth:`~.Session.commit`. It's purpose is to allow the construction of code that can function within a transaction both independently of any external code that starts a transaction, as well as within a block that has already demarcated a transaction. ``subtransactions=True`` is generally only useful in conjunction with autocommit, and is equivalent to the pattern described at :ref:`connections_nested_transactions`, where any number of functions can call :meth:`.Connection.begin` and :meth:`.Transaction.commit` as though they are the initiator of the transaction, but in fact may be participating in an already ongoing transaction:: # method_a starts a transaction and calls method_b def method_a(session): session.begin(subtransactions=True) try: method_b(session) session.commit() # transaction is committed here except: session.rollback() # rolls back the transaction raise # method_b also starts a transaction, but when # called from method_a participates in the ongoing # transaction. def method_b(session): session.begin(subtransactions=True) try: session.add(SomeObject('bat', 'lala')) session.commit() # transaction is not committed yet except: session.rollback() # rolls back the transaction, in this case # the one that was initiated in method_a(). raise # create a Session and call method_a session = Session(autocommit=True) method_a(session) session.close() Subtransactions are used by the :meth:`.Session.flush` process to ensure that the flush operation takes place within a transaction, regardless of autocommit. When autocommit is disabled, it is still useful in that it forces the :class:`.Session` into a "pending rollback" state, as a failed flush cannot be resumed in mid-operation, where the end user still maintains the "scope" of the transaction overall. .. _session_twophase: Enabling Two-Phase Commit ------------------------- For backends which support two-phase operaration (currently MySQL and PostgreSQL), the session can be instructed to use two-phase commit semantics. This will coordinate the committing of transactions across databases so that the transaction is either committed or rolled back in all databases. You can also :meth:`~.Session.prepare` the session for interacting with transactions not managed by SQLAlchemy. To use two phase transactions set the flag ``twophase=True`` on the session:: engine1 = create_engine('postgresql://db1') engine2 = create_engine('postgresql://db2') Session = sessionmaker(twophase=True) # bind User operations to engine 1, Account operations to engine 2 Session.configure(binds={User:engine1, Account:engine2}) session = Session() # .... work with accounts and users # commit. session will issue a flush to all DBs, and a prepare step to all DBs, # before committing both transactions session.commit() Embedding SQL Insert/Update Expressions into a Flush ===================================================== This feature allows the value of a database column to be set to a SQL expression instead of a literal value. It's especially useful for atomic updates, calling stored procedures, etc. All you do is assign an expression to an attribute:: class SomeClass(object): pass mapper(SomeClass, some_table) someobject = session.query(SomeClass).get(5) # set 'value' attribute to a SQL expression adding one someobject.value = some_table.c.value + 1 # issues "UPDATE some_table SET value=value+1" session.commit() This technique works both for INSERT and UPDATE statements. After the flush/commit operation, the ``value`` attribute on ``someobject`` above is expired, so that when next accessed the newly generated value will be loaded from the database. .. _session_sql_expressions: Using SQL Expressions with Sessions ==================================== SQL expressions and strings can be executed via the :class:`~sqlalchemy.orm.session.Session` within its transactional context. This is most easily accomplished using the :meth:`~.Session.execute` method, which returns a :class:`~sqlalchemy.engine.ResultProxy` in the same manner as an :class:`~sqlalchemy.engine.Engine` or :class:`~sqlalchemy.engine.Connection`:: Session = sessionmaker(bind=engine) session = Session() # execute a string statement result = session.execute("select * from table where id=:id", {'id':7}) # execute a SQL expression construct result = session.execute(select([mytable]).where(mytable.c.id==7)) The current :class:`~sqlalchemy.engine.Connection` held by the :class:`~sqlalchemy.orm.session.Session` is accessible using the :meth:`~.Session.connection` method:: connection = session.connection() The examples above deal with a :class:`~sqlalchemy.orm.session.Session` that's bound to a single :class:`~sqlalchemy.engine.Engine` or :class:`~sqlalchemy.engine.Connection`. To execute statements using a :class:`~sqlalchemy.orm.session.Session` which is bound either to multiple engines, or none at all (i.e. relies upon bound metadata), both :meth:`~.Session.execute` and :meth:`~.Session.connection` accept a ``mapper`` keyword argument, which is passed a mapped class or :class:`~sqlalchemy.orm.mapper.Mapper` instance, which is used to locate the proper context for the desired engine:: Session = sessionmaker() session = Session() # need to specify mapper or class when executing result = session.execute("select * from table where id=:id", {'id':7}, mapper=MyMappedClass) result = session.execute(select([mytable], mytable.c.id==7), mapper=MyMappedClass) connection = session.connection(MyMappedClass) .. _session_external_transaction: Joining a Session into an External Transaction =============================================== If a :class:`.Connection` is being used which is already in a transactional state (i.e. has a :class:`.Transaction` established), a :class:`.Session` can be made to participate within that transaction by just binding the :class:`.Session` to that :class:`.Connection`. The usual rationale for this is a test suite that allows ORM code to work freely with a :class:`.Session`, including the ability to call :meth:`.Session.commit`, where afterwards the entire database interaction is rolled back:: from sqlalchemy.orm import sessionmaker from sqlalchemy import create_engine from unittest import TestCase # global application scope. create Session class, engine Session = sessionmaker() engine = create_engine('postgresql://...') class SomeTest(TestCase): def setUp(self): # connect to the database self.connection = engine.connect() # begin a non-ORM transaction self.trans = connection.begin() # bind an individual Session to the connection self.session = Session(bind=self.connection) def test_something(self): # use the session in tests. self.session.add(Foo()) self.session.commit() def tearDown(self): # rollback - everything that happened with the # Session above (including calls to commit()) # is rolled back. self.trans.rollback() self.session.close() # return connection to the Engine self.connection.close() Above, we issue :meth:`.Session.commit` as well as :meth:`.Transaction.rollback`. This is an example of where we take advantage of the :class:`.Connection` object's ability to maintain *subtransactions*, or nested begin/commit-or-rollback pairs where only the outermost begin/commit pair actually commits the transaction, or if the outermost block rolls back, everything is rolled back. .. _unitofwork_contextual: Contextual/Thread-local Sessions ================================= Recall from the section :ref:`session_faq_whentocreate`, the concept of "session scopes" was introduced, with an emphasis on web applications and the practice of linking the scope of a :class:`.Session` with that of a web request. Most modern web frameworks include integration tools so that the scope of the :class:`.Session` can be managed automatically, and these tools should be used as they are available. SQLAlchemy includes its own helper object, which helps with the establishment of user-defined :class:`.Session` scopes. It is also used by third-party integration systems to help construct their integration schemes. The object is the :class:`.scoped_session` object, and it represents a **registry** of :class:`.Session` objects. If you're not familiar with the registry pattern, a good introduction can be found in `Patterns of Enterprise Architecture `_. .. note:: The :class:`.scoped_session` object is a very popular and useful object used by many SQLAlchemy applications. However, it is important to note that it presents **only one approach** to the issue of :class:`.Session` management. If you're new to SQLAlchemy, and especially if the term "thread-local variable" seems strange to you, we recommend that if possible you familiarize first with an off-the-shelf integration system such as `Flask-SQLAlchemy `_ or `zope.sqlalchemy `_. A :class:`.scoped_session` is constructed by calling it, passing it a **factory** which can create new :class:`.Session` objects. A factory is just something that produces a new object when called, and in the case of :class:`.Session`, the most common factory is the :class:`.sessionmaker`, introduced earlier in this section. Below we illustrate this usage:: >>> from sqlalchemy.orm import scoped_session >>> from sqlalchemy.orm import sessionmaker >>> session_factory = sessionmaker(bind=some_engine) >>> Session = scoped_session(session_factory) The :class:`.scoped_session` object we've created will now call upon the :class:`.sessionmaker` when we "call" the registry:: >>> some_session = Session() Above, ``some_session`` is an instance of :class:`.Session`, which we can now use to talk to the database. This same :class:`.Session` is also present within the :class:`.scoped_session` registry we've created. If we call upon the registry a second time, we get back the **same** :class:`.Session`:: >>> some_other_session = Session() >>> some_session is some_other_session True This pattern allows disparate sections of the application to call upon a global :class:`.scoped_session`, so that all those areas may share the same session without the need to pass it explicitly. The :class:`.Session` we've established in our registry will remain, until we explicitly tell our regsitry to dispose of it, by calling :meth:`.scoped_session.remove`:: >>> Session.remove() The :meth:`.scoped_session.remove` method first calls :meth:`.Session.close` on the current :class:`.Session`, which has the effect of releasing any connection/transactional resources owned by the :class:`.Session` first, then discarding the :class:`.Session` itself. "Releasing" here means that connections are returned to their connection pool and any transactional state is rolled back, ultimately using the ``rollback()`` method of the underlying DBAPI connection. At this point, the :class:`.scoped_session` object is "empty", and will create a **new** :class:`.Session` when called again. As illustrated below, this is not the same :class:`.Session` we had before:: >>> new_session = Session() >>> new_session is some_session False The above series of steps illustrates the idea of the "registry" pattern in a nutshell. With that basic idea in hand, we can discuss some of the details of how this pattern proceeds. Implicit Method Access ---------------------- The job of the :class:`.scoped_session` is simple; hold onto a :class:`.Session` for all who ask for it. As a means of producing more transparent access to this :class:`.Session`, the :class:`.scoped_session` also includes **proxy behavior**, meaning that the registry itself can be treated just like a :class:`.Session` directly; when methods are called on this object, they are **proxied** to the underlying :class:`.Session` being maintained by the registry:: Session = scoped_session(some_factory) # equivalent to: # # session = Session() # print session.query(MyClass).all() # print Session.query(MyClass).all() The above code accomplishes the same task as that of acquiring the current :class:`.Session` by calling upon the registry, then using that :class:`.Session`. Thread-Local Scope ------------------ Users who are familiar with multithreaded programming will note that representing anything as a global variable is usually a bad idea, as it implies that the global object will be accessed by many threads concurrently. The :class:`.Session` object is entirely designed to be used in a **non-concurrent** fashion, which in terms of multithreading means "only in one thread at a time". So our above example of :class:`.scoped_session` usage, where the same :class:`.Session` object is maintained across multiple calls, suggests that some process needs to be in place such that mutltiple calls across many threads don't actually get a handle to the same session. We call this notion **thread local storage**, which means, a special object is used that will maintain a distinct object per each application thread. Python provides this via the `threading.local() `_ construct. The :class:`.scoped_session` object by default uses this object as storage, so that a single :class:`.Session` is maintained for all who call upon the :class:`.scoped_session` registry, but only within the scope of a single thread. Callers who call upon the registry in a different thread get a :class:`.Session` instance that is local to that other thread. Using this technique, the :class:`.scoped_session` provides a quick and relatively simple (if one is familiar with thread-local storage) way of providing a single, global object in an application that is safe to be called upon from multiple threads. The :meth:`.scoped_session.remove` method, as always, removes the current :class:`.Session` associated with the thread, if any. However, one advantage of the ``threading.local()`` object is that if the application thread itself ends, the "storage" for that thread is also garbage collected. So it is in fact "safe" to use thread local scope with an application that spawns and tears down threads, without the need to call :meth:`.scoped_session.remove`. However, the scope of transactions themselves, i.e. ending them via :meth:`.Session.commit` or :meth:`.Session.rollback`, will usually still be something that must be explicitly arranged for at the appropriate time, unless the application actually ties the lifespan of a thread to the lifespan of a transaction. .. _session_lifespan: Using Thread-Local Scope with Web Applications ---------------------------------------------- As discussed in the section :ref:`session_faq_whentocreate`, a web application is architected around the concept of a **web request**, and integrating such an application with the :class:`.Session` usually implies that the :class:`.Session` will be associated with that request. As it turns out, most Python web frameworks, with notable exceptions such as the asynchronous frameworks Twisted and Tornado, use threads in a simple way, such that a particular web request is received, processed, and completed within the scope of a single *worker thread*. When the request ends, the worker thread is released to a pool of workers where it is available to handle another request. This simple correspondence of web request and thread means that to associate a :class:`.Session` with a thread implies it is also associated with the web request running within that thread, and vice versa, provided that the :class:`.Session` is created only after the web request begins and torn down just before the web request ends. So it is a common practice to use :class:`.scoped_session` as a quick way to integrate the :class:`.Session` with a web application. The sequence diagram below illustrates this flow:: Web Server Web Framework SQLAlchemy ORM Code -------------- -------------- ------------------------------ startup -> Web framework # Session registry is established initializes Session = scoped_session(sessionmaker()) incoming web request -> web request -> # The registry is *optionally* starts # called upon explicitly to create # a Session local to the thread and/or request Session() # the Session registry can otherwise # be used at any time, creating the # request-local Session() if not present, # or returning the existing one Session.query(MyClass) # ... Session.add(some_object) # ... # if data was modified, commit the # transaction Session.commit() web request ends -> # the registry is instructed to # remove the Session Session.remove() sends output <- outgoing web <- response Using the above flow, the process of integrating the :class:`.Session` with the web application has exactly two requirements: 1. Create a single :class:`.scoped_session` registry when the web application first starts, ensuring that this object is accessible by the rest of the application. 2. Ensure that :meth:`.scoped_session.remove` is called when the web request ends, usually by integrating with the web framework's event system to establish an "on request end" event. As noted earlier, the above pattern is **just one potential way** to integrate a :class:`.Session` with a web framework, one which in particular makes the significant assumption that the **web framework associates web requests with application threads**. It is however **strongly recommended that the integration tools provided with the web framework itself be used, if available**, instead of :class:`.scoped_session`. In particular, while using a thread local can be convenient, it is preferable that the :class:`.Session` be associated **directly with the request**, rather than with the current thread. The next section on custom scopes details a more advanced configuration which can combine the usage of :class:`.scoped_session` with direct request based scope, or any kind of scope. Using Custom Created Scopes --------------------------- The :class:`.scoped_session` object's default behavior of "thread local" scope is only one of many options on how to "scope" a :class:`.Session`. A custom scope can be defined based on any existing system of getting at "the current thing we are working with". Suppose a web framework defines a library function ``get_current_request()``. An application built using this framework can call this function at any time, and the result will be some kind of ``Request`` object that represents the current request being processed. If the ``Request`` object is hashable, then this function can be easily integrated with :class:`.scoped_session` to associate the :class:`.Session` with the request. Below we illustrate this in conjunction with a hypothetical event marker provided by the web framework ``on_request_end``, which allows code to be invoked whenever a request ends:: from my_web_framework import get_current_request, on_request_end from sqlalchemy.orm import scoped_session, sessionmaker Session = scoped_session(sessionmaker(bind=some_engine), scopefunc=get_current_request) @on_request_end def remove_session(req): Session.remove() Above, we instantiate :class:`.scoped_session` in the usual way, except that we pass our request-returning function as the "scopefunc". This instructs :class:`.scoped_session` to use this function to generate a dictionary key whenever the registry is called upon to return the current :class:`.Session`. In this case it is particularly important that we ensure a reliable "remove" system is implemented, as this dictionary is not otherwise self-managed. Contextual Session API ---------------------- .. autoclass:: sqlalchemy.orm.scoping.scoped_session :members: .. autoclass:: sqlalchemy.util.ScopedRegistry :members: .. autoclass:: sqlalchemy.util.ThreadLocalRegistry .. _session_partitioning: Partitioning Strategies ======================= Simple Vertical Partitioning ---------------------------- Vertical partitioning places different kinds of objects, or different tables, across multiple databases:: engine1 = create_engine('postgresql://db1') engine2 = create_engine('postgresql://db2') Session = sessionmaker(twophase=True) # bind User operations to engine 1, Account operations to engine 2 Session.configure(binds={User:engine1, Account:engine2}) session = Session() Above, operations against either class will make usage of the :class:`.Engine` linked to that class. Upon a flush operation, similar rules take place to ensure each class is written to the right database. The transactions among the multiple databases can optionally be coordinated via two phase commit, if the underlying backend supports it. See :ref:`session_twophase` for an example. Custom Vertical Partitioning ---------------------------- More comprehensive rule-based class-level partitioning can be built by overriding the :meth:`.Session.get_bind` method. Below we illustrate a custom :class:`.Session` which delivers the following rules: 1. Flush operations are delivered to the engine named ``master``. 2. Operations on objects that subclass ``MyOtherClass`` all occur on the ``other`` engine. 3. Read operations for all other classes occur on a random choice of the ``slave1`` or ``slave2`` database. :: engines = { 'master':create_engine("sqlite:///master.db"), 'other':create_engine("sqlite:///other.db"), 'slave1':create_engine("sqlite:///slave1.db"), 'slave2':create_engine("sqlite:///slave2.db"), } from sqlalchemy.orm import Session, sessionmaker import random class RoutingSession(Session): def get_bind(self, mapper=None, clause=None): if mapper and issubclass(mapper.class_, MyOtherClass): return engines['other'] elif self._flushing: return engines['master'] else: return engines[ random.choice(['slave1','slave2']) ] The above :class:`.Session` class is plugged in using the ``class_`` argument to :class:`.sessionmaker`:: Session = sessionmaker(class_=RoutingSession) This approach can be combined with multiple :class:`.MetaData` objects, using an approach such as that of using the declarative ``__abstract__`` keyword, described at :ref:`declarative_abstract`. Horizontal Partitioning ----------------------- Horizontal partitioning partitions the rows of a single table (or a set of tables) across multiple databases. See the "sharding" example: :ref:`examples_sharding`. Sessions API ============ Session and sessionmaker() --------------------------- .. autoclass:: sessionmaker :members: :inherited-members: .. autoclass:: sqlalchemy.orm.session.Session :members: :inherited-members: .. autoclass:: sqlalchemy.orm.session.SessionTransaction :members: Session Utilites ---------------- .. autofunction:: make_transient .. autofunction:: object_session .. autofunction:: was_deleted Attribute and State Management Utilities ----------------------------------------- These functions are provided by the SQLAlchemy attribute instrumentation API to provide a detailed interface for dealing with instances, attribute values, and history. Some of them are useful when constructing event listener functions, such as those described in :doc:`/orm/events`. .. currentmodule:: sqlalchemy.orm.util .. autofunction:: object_state .. currentmodule:: sqlalchemy.orm.attributes .. autofunction:: del_attribute .. autofunction:: get_attribute .. autofunction:: get_history .. autofunction:: init_collection .. autofunction:: flag_modified .. function:: instance_state Return the :class:`.InstanceState` for a given mapped object. This function is the internal version of :func:`.object_state`. The :func:`.object_state` and/or the :func:`.inspect` function is preferred here as they each emit an informative exception if the given object is not mapped. .. autofunction:: sqlalchemy.orm.instrumentation.is_instrumented .. autofunction:: set_attribute .. autofunction:: set_committed_value .. autoclass:: History :members: SQLAlchemy-0.8.4/doc/_sources/orm/tutorial.txt0000644000076500000240000025153312251147171022033 0ustar classicstaff00000000000000.. _ormtutorial_toplevel: ========================== Object Relational Tutorial ========================== The SQLAlchemy Object Relational Mapper presents a method of associating user-defined Python classes with database tables, and instances of those classes (objects) with rows in their corresponding tables. It includes a system that transparently synchronizes all changes in state between objects and their related rows, called a `unit of work `_, as well as a system for expressing database queries in terms of the user defined classes and their defined relationships between each other. The ORM is in contrast to the SQLAlchemy Expression Language, upon which the ORM is constructed. Whereas the SQL Expression Language, introduced in :ref:`sqlexpression_toplevel`, presents a system of representing the primitive constructs of the relational database directly without opinion, the ORM presents a high level and abstracted pattern of usage, which itself is an example of applied usage of the Expression Language. While there is overlap among the usage patterns of the ORM and the Expression Language, the similarities are more superficial than they may at first appear. One approaches the structure and content of data from the perspective of a user-defined `domain model `_ which is transparently persisted and refreshed from its underlying storage model. The other approaches it from the perspective of literal schema and SQL expression representations which are explicitly composed into messages consumed individually by the database. A successful application may be constructed using the Object Relational Mapper exclusively. In advanced situations, an application constructed with the ORM may make occasional usage of the Expression Language directly in certain areas where specific database interactions are required. The following tutorial is in doctest format, meaning each ``>>>`` line represents something you can type at a Python command prompt, and the following text represents the expected return value. Version Check ============= A quick check to verify that we are on at least **version 0.8** of SQLAlchemy:: >>> import sqlalchemy >>> sqlalchemy.__version__ # doctest:+SKIP 0.8.0 Connecting ========== For this tutorial we will use an in-memory-only SQLite database. To connect we use :func:`~sqlalchemy.create_engine`:: >>> from sqlalchemy import create_engine >>> engine = create_engine('sqlite:///:memory:', echo=True) The ``echo`` flag is a shortcut to setting up SQLAlchemy logging, which is accomplished via Python's standard ``logging`` module. With it enabled, we'll see all the generated SQL produced. If you are working through this tutorial and want less output generated, set it to ``False``. This tutorial will format the SQL behind a popup window so it doesn't get in our way; just click the "SQL" links to see what's being generated. The return value of :func:`.create_engine` is an instance of :class:`.Engine`, and it represents the core interface to the database, adapted through a **dialect** that handles the details of the database and DBAPI in use. In this case the SQLite dialect will interpret instructions to the Python built-in ``sqlite3`` module. The :class:`.Engine` has not actually tried to connect to the database yet; that happens only the first time it is asked to perform a task against the database. We can illustrate this by asking it to perform a simple SELECT statement: .. sourcecode:: python+sql {sql}>>> engine.execute("select 1").scalar() select 1 () {stop}1 As the :meth:`.Engine.execute` method is called, the :class:`.Engine` establishes a connection to the SQLite database, which is then used to emit the SQL. The connection is then returned to an internal connection pool where it will be reused on subsequent statement executions. While we illustrate direct usage of the :class:`.Engine` here, this isn't typically necessary when using the ORM, where the :class:`.Engine`, once created, is used behind the scenes by the ORM as we'll see shortly. Declare a Mapping ================= When using the ORM, the configurational process starts by describing the database tables we'll be dealing with, and then by defining our own classes which will be mapped to those tables. In modern SQLAlchemy, these two tasks are usually performed together, using a system known as :ref:`declarative_toplevel`, which allows us to create classes that include directives to describe the actual database table they will be mapped to. Classes mapped using the Declarative system are defined in terms of a base class which maintains a catalog of classes and tables relative to that base - this is known as the **declarative base class**. Our application will usually have just one instance of this base in a commonly imported module. We create the base class using the :func:`.declarative_base` function, as follows:: >>> from sqlalchemy.ext.declarative import declarative_base >>> Base = declarative_base() Now that we have a "base", we can define any number of mapped classes in terms of it. We will start with just a single table called ``users``, which will store records for the end-users using our application. A new class called ``User`` will be the class to which we map this table. The imports we'll need to accomplish this include objects that represent the components of our table, including the :class:`.Column` class which represents a database column, as well as the :class:`.Integer` and :class:`.String` classes that represent basic datatypes used in columns:: >>> from sqlalchemy import Column, Integer, String >>> class User(Base): ... __tablename__ = 'users' ... ... id = Column(Integer, primary_key=True) ... name = Column(String) ... fullname = Column(String) ... password = Column(String) ... ... def __init__(self, name, fullname, password): ... self.name = name ... self.fullname = fullname ... self.password = password ... ... def __repr__(self): ... return "" % (self.name, self.fullname, self.password) The above ``User`` class establishes details about the table being mapped, including the name of the table denoted by the ``__tablename__`` attribute, a set of columns ``id``, ``name``, ``fullname`` and ``password``, where the ``id`` column will also be the primary key of the table. While its certainly possible that some database tables don't have primary key columns (as is also the case with views, which can also be mapped), the ORM in order to actually map to a particular table needs there to be at least one column denoted as a primary key column; multiple-column, i.e. composite, primary keys are of course entirely feasible as well. We define a constructor via ``__init__()`` and also a ``__repr__()`` method - both are optional. The class of course can have any number of other methods and attributes as required by the application, as it's basically just a plain Python class. Inheriting from ``Base`` is also only a requirement of the declarative configurational system, which itself is optional and relatively open ended; at its core, the SQLAlchemy ORM only requires that a class be a so-called "new style class", that is, it inherits from ``object`` in Python 2, in order to be mapped. All classes in Python 3 are "new style" classes. .. topic:: The Non Opinionated Philosophy In our ``User`` mapping example, it was required that we identify the name of the table in use, as well as the names and characteristics of all columns which we care about, including which column or columns represent the primary key, as well as some basic information about the types in use. SQLAlchemy never makes assumptions about these decisions - the developer must always be explicit about specific conventions in use. However, that doesn't mean the task can't be automated. While this tutorial will keep things explicit, developers are encouraged to make use of helper functions as well as "Declarative Mixins" to automate their tasks in large scale applications. The section :ref:`declarative_mixins` introduces many of these techniques. With our ``User`` class constructed via the Declarative system, we have defined information about our table, known as **table metadata**, as well as a user-defined class which is linked to this table, known as a **mapped class**. Declarative has provided for us a shorthand system for what in SQLAlchemy is called a "Classical Mapping", which specifies these two units separately and is discussed in :ref:`classical_mapping`. The table is actually represented by a datastructure known as :class:`.Table`, and the mapping represented by a :class:`.Mapper` object generated by a function called :func:`.mapper`. Declarative performs both of these steps for us, making available the :class:`.Table` it has created via the ``__table__`` attribute:: >>> User.__table__ # doctest: +NORMALIZE_WHITESPACE Table('users', MetaData(None), Column('id', Integer(), table=, primary_key=True, nullable=False), Column('name', String(), table=), Column('fullname', String(), table=), Column('password', String(), table=), schema=None) and while rarely needed, making available the :class:`.Mapper` object via the ``__mapper__`` attribute:: >>> User.__mapper__ # doctest: +ELLIPSIS The Declarative base class also contains a catalog of all the :class:`.Table` objects that have been defined called :class:`.MetaData`, available via the ``.metadata`` attribute. In this example, we are defining new tables that have yet to be created in our SQLite database, so one helpful feature the :class:`.MetaData` object offers is the ability to issue CREATE TABLE statements to the database for all tables that don't yet exist. We illustrate this by calling the :meth:`.MetaData.create_all` method, passing in our :class:`.Engine` as a source of database connectivity. We will see that special commands are first emitted to check for the presence of the ``users`` table, and following that the actual ``CREATE TABLE`` statement: .. sourcecode:: python+sql >>> Base.metadata.create_all(engine) # doctest:+ELLIPSIS,+NORMALIZE_WHITESPACE {opensql}PRAGMA table_info("users") () CREATE TABLE users ( id INTEGER NOT NULL, name VARCHAR, fullname VARCHAR, password VARCHAR, PRIMARY KEY (id) ) () COMMIT .. topic:: Minimal Table Descriptions vs. Full Descriptions Users familiar with the syntax of CREATE TABLE may notice that the VARCHAR columns were generated without a length; on SQLite and Postgresql, this is a valid datatype, but on others, it's not allowed. So if running this tutorial on one of those databases, and you wish to use SQLAlchemy to issue CREATE TABLE, a "length" may be provided to the :class:`~sqlalchemy.types.String` type as below:: Column(String(50)) The length field on :class:`~sqlalchemy.types.String`, as well as similar precision/scale fields available on :class:`~sqlalchemy.types.Integer`, :class:`~sqlalchemy.types.Numeric`, etc. are not referenced by SQLAlchemy other than when creating tables. Additionally, Firebird and Oracle require sequences to generate new primary key identifiers, and SQLAlchemy doesn't generate or assume these without being instructed. For that, you use the :class:`~sqlalchemy.schema.Sequence` construct:: from sqlalchemy import Sequence Column(Integer, Sequence('user_id_seq'), primary_key=True) A full, foolproof :class:`~sqlalchemy.schema.Table` generated via our declarative mapping is therefore:: class User(Base): __tablename__ = 'users' id = Column(Integer, Sequence('user_id_seq'), primary_key=True) name = Column(String(50)) fullname = Column(String(50)) password = Column(String(12)) def __init__(self, name, fullname, password): self.name = name self.fullname = fullname self.password = password def __repr__(self): return "" % (self.name, self.fullname, self.password) We include this more verbose table definition separately to highlight the difference between a minimal construct geared primarily towards in-Python usage only, versus one that will be used to emit CREATE TABLE statements on a particular set of backends with more stringent requirements. Create an Instance of the Mapped Class ====================================== With mappings complete, let's now create and inspect a ``User`` object:: >>> ed_user = User('ed', 'Ed Jones', 'edspassword') >>> ed_user.name 'ed' >>> ed_user.password 'edspassword' >>> str(ed_user.id) 'None' The ``id`` attribute, which while not defined by our ``__init__()`` method, exists with a value of ``None`` on our ``User`` instance due to the ``id`` column we declared in our mapping. By default, the ORM creates class attributes for all columns present in the table being mapped. These class attributes exist as :term:`descriptors`, and define **instrumentation** for the mapped class. The functionality of this instrumentation includes the ability to fire on change events, track modifications, and to automatically load new data from the database when needed. Since we have not yet told SQLAlchemy to persist ``Ed Jones`` within the database, its id is ``None``. When we persist the object later, this attribute will be populated with a newly generated value. .. topic:: The default ``__init__()`` method Note that in our ``User`` example we supplied an ``__init__()`` method, which receives ``name``, ``fullname`` and ``password`` as positional arguments. The Declarative system supplies for us a default constructor if one is not already present, which accepts keyword arguments of the same name as that of the mapped attributes. Below we define ``User`` without specifying a constructor:: class User(Base): __tablename__ = 'users' id = Column(Integer, primary_key=True) name = Column(String) fullname = Column(String) password = Column(String) Our ``User`` class above will make usage of the default constructor, and provide ``id``, ``name``, ``fullname``, and ``password`` as keyword arguments:: u1 = User(name='ed', fullname='Ed Jones', password='foobar') Creating a Session ================== We're now ready to start talking to the database. The ORM's "handle" to the database is the :class:`~sqlalchemy.orm.session.Session`. When we first set up the application, at the same level as our :func:`~sqlalchemy.create_engine` statement, we define a :class:`~sqlalchemy.orm.session.Session` class which will serve as a factory for new :class:`~sqlalchemy.orm.session.Session` objects:: >>> from sqlalchemy.orm import sessionmaker >>> Session = sessionmaker(bind=engine) In the case where your application does not yet have an :class:`~sqlalchemy.engine.Engine` when you define your module-level objects, just set it up like this:: >>> Session = sessionmaker() Later, when you create your engine with :func:`~sqlalchemy.create_engine`, connect it to the :class:`~sqlalchemy.orm.session.Session` using :meth:`~.sessionmaker.configure`:: >>> Session.configure(bind=engine) # once engine is available This custom-made :class:`~sqlalchemy.orm.session.Session` class will create new :class:`~sqlalchemy.orm.session.Session` objects which are bound to our database. Other transactional characteristics may be defined when calling :func:`~.sessionmaker` as well; these are described in a later chapter. Then, whenever you need to have a conversation with the database, you instantiate a :class:`~sqlalchemy.orm.session.Session`:: >>> session = Session() The above :class:`~sqlalchemy.orm.session.Session` is associated with our SQLite-enabled :class:`.Engine`, but it hasn't opened any connections yet. When it's first used, it retrieves a connection from a pool of connections maintained by the :class:`.Engine`, and holds onto it until we commit all changes and/or close the session object. .. topic:: Session Creational Patterns The business of acquiring a :class:`.Session` has a good deal of variety based on the variety of types of applications and frameworks out there. Keep in mind the :class:`.Session` is just a workspace for your objects, local to a particular database connection - if you think of an application thread as a guest at a dinner party, the :class:`.Session` is the guest's plate and the objects it holds are the food (and the database...the kitchen?)! Hints on how :class:`.Session` is integrated into an application are at :ref:`session_faq`. Adding New Objects ================== To persist our ``User`` object, we :meth:`~.Session.add` it to our :class:`~sqlalchemy.orm.session.Session`:: >>> ed_user = User('ed', 'Ed Jones', 'edspassword') >>> session.add(ed_user) At this point, we say that the instance is **pending**; no SQL has yet been issued and the object is not yet represented by a row in the database. The :class:`~sqlalchemy.orm.session.Session` will issue the SQL to persist ``Ed Jones`` as soon as is needed, using a process known as a **flush**. If we query the database for ``Ed Jones``, all pending information will first be flushed, and the query is issued immediately thereafter. For example, below we create a new :class:`~sqlalchemy.orm.query.Query` object which loads instances of ``User``. We "filter by" the ``name`` attribute of ``ed``, and indicate that we'd like only the first result in the full list of rows. A ``User`` instance is returned which is equivalent to that which we've added: .. sourcecode:: python+sql {sql}>>> our_user = session.query(User).filter_by(name='ed').first() # doctest:+ELLIPSIS,+NORMALIZE_WHITESPACE BEGIN (implicit) INSERT INTO users (name, fullname, password) VALUES (?, ?, ?) ('ed', 'Ed Jones', 'edspassword') SELECT users.id AS users_id, users.name AS users_name, users.fullname AS users_fullname, users.password AS users_password FROM users WHERE users.name = ? LIMIT ? OFFSET ? ('ed', 1, 0) {stop}>>> our_user In fact, the :class:`~sqlalchemy.orm.session.Session` has identified that the row returned is the **same** row as one already represented within its internal map of objects, so we actually got back the identical instance as that which we just added:: >>> ed_user is our_user True The ORM concept at work here is known as an `identity map `_ and ensures that all operations upon a particular row within a :class:`~sqlalchemy.orm.session.Session` operate upon the same set of data. Once an object with a particular primary key is present in the :class:`~sqlalchemy.orm.session.Session`, all SQL queries on that :class:`~sqlalchemy.orm.session.Session` will always return the same Python object for that particular primary key; it also will raise an error if an attempt is made to place a second, already-persisted object with the same primary key within the session. We can add more ``User`` objects at once using :func:`~sqlalchemy.orm.session.Session.add_all`: .. sourcecode:: python+sql >>> session.add_all([ ... User('wendy', 'Wendy Williams', 'foobar'), ... User('mary', 'Mary Contrary', 'xxg527'), ... User('fred', 'Fred Flinstone', 'blah')]) Also, we've decided the password for Ed isn't too secure, so lets change it: .. sourcecode:: python+sql >>> ed_user.password = 'f8s7ccs' The :class:`~sqlalchemy.orm.session.Session` is paying attention. It knows, for example, that ``Ed Jones`` has been modified: .. sourcecode:: python+sql >>> session.dirty IdentitySet([]) and that three new ``User`` objects are pending: .. sourcecode:: python+sql >>> session.new # doctest: +SKIP IdentitySet([, , ]) We tell the :class:`~sqlalchemy.orm.session.Session` that we'd like to issue all remaining changes to the database and commit the transaction, which has been in progress throughout. We do this via :meth:`~.Session.commit`: .. sourcecode:: python+sql {sql}>>> session.commit() UPDATE users SET password=? WHERE users.id = ? ('f8s7ccs', 1) INSERT INTO users (name, fullname, password) VALUES (?, ?, ?) ('wendy', 'Wendy Williams', 'foobar') INSERT INTO users (name, fullname, password) VALUES (?, ?, ?) ('mary', 'Mary Contrary', 'xxg527') INSERT INTO users (name, fullname, password) VALUES (?, ?, ?) ('fred', 'Fred Flinstone', 'blah') COMMIT :meth:`~.Session.commit` flushes whatever remaining changes remain to the database, and commits the transaction. The connection resources referenced by the session are now returned to the connection pool. Subsequent operations with this session will occur in a **new** transaction, which will again re-acquire connection resources when first needed. If we look at Ed's ``id`` attribute, which earlier was ``None``, it now has a value: .. sourcecode:: python+sql {sql}>>> ed_user.id # doctest: +NORMALIZE_WHITESPACE BEGIN (implicit) SELECT users.id AS users_id, users.name AS users_name, users.fullname AS users_fullname, users.password AS users_password FROM users WHERE users.id = ? (1,) {stop}1 After the :class:`~sqlalchemy.orm.session.Session` inserts new rows in the database, all newly generated identifiers and database-generated defaults become available on the instance, either immediately or via load-on-first-access. In this case, the entire row was re-loaded on access because a new transaction was begun after we issued :meth:`~.Session.commit`. SQLAlchemy by default refreshes data from a previous transaction the first time it's accessed within a new transaction, so that the most recent state is available. The level of reloading is configurable as is described in :doc:`/orm/session`. .. topic:: Session Object States As our ``User`` object moved from being outside the :class:`.Session`, to inside the :class:`.Session` without a primary key, to actually being inserted, it moved between three out of four available "object states" - **transient**, **pending**, and **persistent**. Being aware of these states and what they mean is always a good idea - be sure to read :ref:`session_object_states` for a quick overview. Rolling Back ============ Since the :class:`~sqlalchemy.orm.session.Session` works within a transaction, we can roll back changes made too. Let's make two changes that we'll revert; ``ed_user``'s user name gets set to ``Edwardo``: .. sourcecode:: python+sql >>> ed_user.name = 'Edwardo' and we'll add another erroneous user, ``fake_user``: .. sourcecode:: python+sql >>> fake_user = User('fakeuser', 'Invalid', '12345') >>> session.add(fake_user) Querying the session, we can see that they're flushed into the current transaction: .. sourcecode:: python+sql {sql}>>> session.query(User).filter(User.name.in_(['Edwardo', 'fakeuser'])).all() #doctest: +NORMALIZE_WHITESPACE UPDATE users SET name=? WHERE users.id = ? ('Edwardo', 1) INSERT INTO users (name, fullname, password) VALUES (?, ?, ?) ('fakeuser', 'Invalid', '12345') SELECT users.id AS users_id, users.name AS users_name, users.fullname AS users_fullname, users.password AS users_password FROM users WHERE users.name IN (?, ?) ('Edwardo', 'fakeuser') {stop}[, ] Rolling back, we can see that ``ed_user``'s name is back to ``ed``, and ``fake_user`` has been kicked out of the session: .. sourcecode:: python+sql {sql}>>> session.rollback() ROLLBACK {stop} {sql}>>> ed_user.name #doctest: +NORMALIZE_WHITESPACE BEGIN (implicit) SELECT users.id AS users_id, users.name AS users_name, users.fullname AS users_fullname, users.password AS users_password FROM users WHERE users.id = ? (1,) {stop}u'ed' >>> fake_user in session False issuing a SELECT illustrates the changes made to the database: .. sourcecode:: python+sql {sql}>>> session.query(User).filter(User.name.in_(['ed', 'fakeuser'])).all() #doctest: +NORMALIZE_WHITESPACE SELECT users.id AS users_id, users.name AS users_name, users.fullname AS users_fullname, users.password AS users_password FROM users WHERE users.name IN (?, ?) ('ed', 'fakeuser') {stop}[] .. _ormtutorial_querying: Querying ======== A :class:`~sqlalchemy.orm.query.Query` object is created using the :class:`~sqlalchemy.orm.session.Session.query()` method on :class:`~sqlalchemy.orm.session.Session`. This function takes a variable number of arguments, which can be any combination of classes and class-instrumented descriptors. Below, we indicate a :class:`~sqlalchemy.orm.query.Query` which loads ``User`` instances. When evaluated in an iterative context, the list of ``User`` objects present is returned: .. sourcecode:: python+sql {sql}>>> for instance in session.query(User).order_by(User.id): # doctest: +NORMALIZE_WHITESPACE ... print instance.name, instance.fullname SELECT users.id AS users_id, users.name AS users_name, users.fullname AS users_fullname, users.password AS users_password FROM users ORDER BY users.id () {stop}ed Ed Jones wendy Wendy Williams mary Mary Contrary fred Fred Flinstone The :class:`~sqlalchemy.orm.query.Query` also accepts ORM-instrumented descriptors as arguments. Any time multiple class entities or column-based entities are expressed as arguments to the :class:`~sqlalchemy.orm.session.Session.query()` function, the return result is expressed as tuples: .. sourcecode:: python+sql {sql}>>> for name, fullname in session.query(User.name, User.fullname): # doctest: +NORMALIZE_WHITESPACE ... print name, fullname SELECT users.name AS users_name, users.fullname AS users_fullname FROM users () {stop}ed Ed Jones wendy Wendy Williams mary Mary Contrary fred Fred Flinstone The tuples returned by :class:`~sqlalchemy.orm.query.Query` are *named* tuples, supplied by the :class:`.KeyedTuple` class, and can be treated much like an ordinary Python object. The names are the same as the attribute's name for an attribute, and the class name for a class: .. sourcecode:: python+sql {sql}>>> for row in session.query(User, User.name).all(): #doctest: +NORMALIZE_WHITESPACE ... print row.User, row.name SELECT users.id AS users_id, users.name AS users_name, users.fullname AS users_fullname, users.password AS users_password FROM users () {stop} ed wendy mary fred You can control the names of individual column expressions using the :meth:`~.CompareMixin.label` construct, which is available from any :class:`.ColumnElement`-derived object, as well as any class attribute which is mapped to one (such as ``User.name``): .. sourcecode:: python+sql {sql}>>> for row in session.query(User.name.label('name_label')).all(): #doctest: +NORMALIZE_WHITESPACE ... print(row.name_label) SELECT users.name AS name_label FROM users (){stop} ed wendy mary fred The name given to a full entity such as ``User``, assuming that multiple entities are present in the call to :meth:`~.Session.query`, can be controlled using :class:`~.orm.aliased` : .. sourcecode:: python+sql >>> from sqlalchemy.orm import aliased >>> user_alias = aliased(User, name='user_alias') {sql}>>> for row in session.query(user_alias, user_alias.name).all(): #doctest: +NORMALIZE_WHITESPACE ... print row.user_alias SELECT user_alias.id AS user_alias_id, user_alias.name AS user_alias_name, user_alias.fullname AS user_alias_fullname, user_alias.password AS user_alias_password FROM users AS user_alias (){stop} Basic operations with :class:`~sqlalchemy.orm.query.Query` include issuing LIMIT and OFFSET, most conveniently using Python array slices and typically in conjunction with ORDER BY: .. sourcecode:: python+sql {sql}>>> for u in session.query(User).order_by(User.id)[1:3]: #doctest: +NORMALIZE_WHITESPACE ... print u SELECT users.id AS users_id, users.name AS users_name, users.fullname AS users_fullname, users.password AS users_password FROM users ORDER BY users.id LIMIT ? OFFSET ? (2, 1){stop} and filtering results, which is accomplished either with :func:`~sqlalchemy.orm.query.Query.filter_by`, which uses keyword arguments: .. sourcecode:: python+sql {sql}>>> for name, in session.query(User.name).\ ... filter_by(fullname='Ed Jones'): # doctest: +NORMALIZE_WHITESPACE ... print name SELECT users.name AS users_name FROM users WHERE users.fullname = ? ('Ed Jones',) {stop}ed ...or :func:`~sqlalchemy.orm.query.Query.filter`, which uses more flexible SQL expression language constructs. These allow you to use regular Python operators with the class-level attributes on your mapped class: .. sourcecode:: python+sql {sql}>>> for name, in session.query(User.name).\ ... filter(User.fullname=='Ed Jones'): # doctest: +NORMALIZE_WHITESPACE ... print name SELECT users.name AS users_name FROM users WHERE users.fullname = ? ('Ed Jones',) {stop}ed The :class:`~sqlalchemy.orm.query.Query` object is fully **generative**, meaning that most method calls return a new :class:`~sqlalchemy.orm.query.Query` object upon which further criteria may be added. For example, to query for users named "ed" with a full name of "Ed Jones", you can call :func:`~sqlalchemy.orm.query.Query.filter` twice, which joins criteria using ``AND``: .. sourcecode:: python+sql {sql}>>> for user in session.query(User).\ ... filter(User.name=='ed').\ ... filter(User.fullname=='Ed Jones'): # doctest: +NORMALIZE_WHITESPACE ... print user SELECT users.id AS users_id, users.name AS users_name, users.fullname AS users_fullname, users.password AS users_password FROM users WHERE users.name = ? AND users.fullname = ? ('ed', 'Ed Jones') {stop} Common Filter Operators ----------------------- Here's a rundown of some of the most common operators used in :func:`~sqlalchemy.orm.query.Query.filter`: * equals:: query.filter(User.name == 'ed') * not equals:: query.filter(User.name != 'ed') * LIKE:: query.filter(User.name.like('%ed%')) * IN:: query.filter(User.name.in_(['ed', 'wendy', 'jack'])) # works with query objects too: query.filter(User.name.in_(session.query(User.name).filter(User.name.like('%ed%')))) * NOT IN:: query.filter(~User.name.in_(['ed', 'wendy', 'jack'])) * IS NULL:: filter(User.name == None) * IS NOT NULL:: filter(User.name != None) * AND:: from sqlalchemy import and_ filter(and_(User.name == 'ed', User.fullname == 'Ed Jones')) # or call filter()/filter_by() multiple times filter(User.name == 'ed').filter(User.fullname == 'Ed Jones') * OR:: from sqlalchemy import or_ filter(or_(User.name == 'ed', User.name == 'wendy')) * match:: query.filter(User.name.match('wendy')) The contents of the match parameter are database backend specific. Returning Lists and Scalars --------------------------- The :meth:`~sqlalchemy.orm.query.Query.all()`, :meth:`~sqlalchemy.orm.query.Query.one()`, and :meth:`~sqlalchemy.orm.query.Query.first()` methods of :class:`~sqlalchemy.orm.query.Query` immediately issue SQL and return a non-iterator value. :meth:`~sqlalchemy.orm.query.Query.all()` returns a list: .. sourcecode:: python+sql >>> query = session.query(User).filter(User.name.like('%ed')).order_by(User.id) {sql}>>> query.all() #doctest: +NORMALIZE_WHITESPACE SELECT users.id AS users_id, users.name AS users_name, users.fullname AS users_fullname, users.password AS users_password FROM users WHERE users.name LIKE ? ORDER BY users.id ('%ed',) {stop}[, ] :meth:`~sqlalchemy.orm.query.Query.first()` applies a limit of one and returns the first result as a scalar: .. sourcecode:: python+sql {sql}>>> query.first() #doctest: +NORMALIZE_WHITESPACE SELECT users.id AS users_id, users.name AS users_name, users.fullname AS users_fullname, users.password AS users_password FROM users WHERE users.name LIKE ? ORDER BY users.id LIMIT ? OFFSET ? ('%ed', 1, 0) {stop} :meth:`~sqlalchemy.orm.query.Query.one()`, fully fetches all rows, and if not exactly one object identity or composite row is present in the result, raises an error: .. sourcecode:: python+sql {sql}>>> from sqlalchemy.orm.exc import MultipleResultsFound >>> try: #doctest: +NORMALIZE_WHITESPACE ... user = query.one() ... except MultipleResultsFound, e: ... print e SELECT users.id AS users_id, users.name AS users_name, users.fullname AS users_fullname, users.password AS users_password FROM users WHERE users.name LIKE ? ORDER BY users.id ('%ed',) {stop}Multiple rows were found for one() .. sourcecode:: python+sql {sql}>>> from sqlalchemy.orm.exc import NoResultFound >>> try: #doctest: +NORMALIZE_WHITESPACE ... user = query.filter(User.id == 99).one() ... except NoResultFound, e: ... print e SELECT users.id AS users_id, users.name AS users_name, users.fullname AS users_fullname, users.password AS users_password FROM users WHERE users.name LIKE ? AND users.id = ? ORDER BY users.id ('%ed', 99) {stop}No row was found for one() .. _orm_tutorial_literal_sql: Using Literal SQL ----------------- Literal strings can be used flexibly with :class:`~sqlalchemy.orm.query.Query`. Most methods accept strings in addition to SQLAlchemy clause constructs. For example, :meth:`~sqlalchemy.orm.query.Query.filter()` and :meth:`~sqlalchemy.orm.query.Query.order_by()`: .. sourcecode:: python+sql {sql}>>> for user in session.query(User).\ ... filter("id<224").\ ... order_by("id").all(): #doctest: +NORMALIZE_WHITESPACE ... print user.name SELECT users.id AS users_id, users.name AS users_name, users.fullname AS users_fullname, users.password AS users_password FROM users WHERE id<224 ORDER BY id () {stop}ed wendy mary fred Bind parameters can be specified with string-based SQL, using a colon. To specify the values, use the :meth:`~sqlalchemy.orm.query.Query.params()` method: .. sourcecode:: python+sql {sql}>>> session.query(User).filter("id<:value and name=:name").\ ... params(value=224, name='fred').order_by(User.id).one() # doctest: +NORMALIZE_WHITESPACE SELECT users.id AS users_id, users.name AS users_name, users.fullname AS users_fullname, users.password AS users_password FROM users WHERE id To use an entirely string-based statement, using :meth:`~sqlalchemy.orm.query.Query.from_statement()`; just ensure that the columns clause of the statement contains the column names normally used by the mapper (below illustrated using an asterisk): .. sourcecode:: python+sql {sql}>>> session.query(User).from_statement( ... "SELECT * FROM users where name=:name").\ ... params(name='ed').all() SELECT * FROM users where name=? ('ed',) {stop}[] You can use :meth:`~sqlalchemy.orm.query.Query.from_statement()` to go completely "raw", using string names to identify desired columns: .. sourcecode:: python+sql {sql}>>> session.query("id", "name", "thenumber12").\ ... from_statement("SELECT id, name, 12 as " ... "thenumber12 FROM users where name=:name").\ ... params(name='ed').all() SELECT id, name, 12 as thenumber12 FROM users where name=? ('ed',) {stop}[(1, u'ed', 12)] .. topic:: Pros and Cons of Literal SQL :class:`.Query` is constructed like the rest of SQLAlchemy, in that it tries to always allow "falling back" to a less automated, lower level approach to things. Accepting strings for all SQL fragments is a big part of that, so that you can bypass the need to organize SQL constructs if you know specifically what string output you'd like. But when using literal strings, the :class:`.Query` no longer knows anything about that part of the SQL construct being emitted, and has no ability to **transform** it to adapt to new contexts. For example, suppose we selected ``User`` objects and ordered by the ``name`` column, using a string to indicate ``name``: .. sourcecode:: python+sql >>> q = session.query(User.id, User.name) {sql}>>> q.order_by("name").all() SELECT users.id AS users_id, users.name AS users_name FROM users ORDER BY name () {stop}[(1, u'ed'), (4, u'fred'), (3, u'mary'), (2, u'wendy')] Perfectly fine. But suppose, before we got a hold of the :class:`.Query`, some sophisticated transformations were applied to it, such as below where we use :meth:`~.Query.from_self`, a particularly advanced method, to retrieve pairs of user names with different numbers of characters:: >>> from sqlalchemy import func >>> ua = aliased(User) >>> q = q.from_self(User.id, User.name, ua.name).\ ... filter(User.name < ua.name).\ ... filter(func.length(ua.name) != func.length(User.name)) The :class:`.Query` now represents a select from a subquery, where ``User`` is represented twice both inside and outside of the subquery. Telling the :class:`.Query` to order by "name" doesn't really give us much guarantee which "name" it's going to order on. In this case it assumes "name" is against the outer "aliased" ``User`` construct: .. sourcecode:: python+sql {sql}>>> q.order_by("name").all() #doctest: +NORMALIZE_WHITESPACE SELECT anon_1.users_id AS anon_1_users_id, anon_1.users_name AS anon_1_users_name, users_1.name AS users_1_name FROM (SELECT users.id AS users_id, users.name AS users_name FROM users) AS anon_1, users AS users_1 WHERE anon_1.users_name < users_1.name AND length(users_1.name) != length(anon_1.users_name) ORDER BY name () {stop}[(1, u'ed', u'fred'), (1, u'ed', u'mary'), (1, u'ed', u'wendy'), (3, u'mary', u'wendy'), (4, u'fred', u'wendy')] Only if we use the SQL element directly, in this case ``User.name`` or ``ua.name``, do we give :class:`.Query` enough information to know for sure which "name" we'd like to order on, where we can see we get different results for each: .. sourcecode:: python+sql {sql}>>> q.order_by(ua.name).all() #doctest: +NORMALIZE_WHITESPACE SELECT anon_1.users_id AS anon_1_users_id, anon_1.users_name AS anon_1_users_name, users_1.name AS users_1_name FROM (SELECT users.id AS users_id, users.name AS users_name FROM users) AS anon_1, users AS users_1 WHERE anon_1.users_name < users_1.name AND length(users_1.name) != length(anon_1.users_name) ORDER BY users_1.name () {stop}[(1, u'ed', u'fred'), (1, u'ed', u'mary'), (1, u'ed', u'wendy'), (3, u'mary', u'wendy'), (4, u'fred', u'wendy')] {sql}>>> q.order_by(User.name).all() #doctest: +NORMALIZE_WHITESPACE SELECT anon_1.users_id AS anon_1_users_id, anon_1.users_name AS anon_1_users_name, users_1.name AS users_1_name FROM (SELECT users.id AS users_id, users.name AS users_name FROM users) AS anon_1, users AS users_1 WHERE anon_1.users_name < users_1.name AND length(users_1.name) != length(anon_1.users_name) ORDER BY anon_1.users_name () {stop}[(1, u'ed', u'wendy'), (1, u'ed', u'mary'), (1, u'ed', u'fred'), (4, u'fred', u'wendy'), (3, u'mary', u'wendy')] Counting -------- :class:`~sqlalchemy.orm.query.Query` includes a convenience method for counting called :meth:`~sqlalchemy.orm.query.Query.count()`: .. sourcecode:: python+sql {sql}>>> session.query(User).filter(User.name.like('%ed')).count() #doctest: +NORMALIZE_WHITESPACE SELECT count(*) AS count_1 FROM (SELECT users.id AS users_id, users.name AS users_name, users.fullname AS users_fullname, users.password AS users_password FROM users WHERE users.name LIKE ?) AS anon_1 ('%ed',) {stop}2 The :meth:`~.Query.count()` method is used to determine how many rows the SQL statement would return. Looking at the generated SQL above, SQLAlchemy always places whatever it is we are querying into a subquery, then counts the rows from that. In some cases this can be reduced to a simpler ``SELECT count(*) FROM table``, however modern versions of SQLAlchemy don't try to guess when this is appropriate, as the exact SQL can be emitted using more explicit means. For situations where the "thing to be counted" needs to be indicated specifically, we can specify the "count" function directly using the expression ``func.count()``, available from the :attr:`~sqlalchemy.sql.expression.func` construct. Below we use it to return the count of each distinct user name: .. sourcecode:: python+sql >>> from sqlalchemy import func {sql}>>> session.query(func.count(User.name), User.name).group_by(User.name).all() #doctest: +NORMALIZE_WHITESPACE SELECT count(users.name) AS count_1, users.name AS users_name FROM users GROUP BY users.name () {stop}[(1, u'ed'), (1, u'fred'), (1, u'mary'), (1, u'wendy')] To achieve our simple ``SELECT count(*) FROM table``, we can apply it as: .. sourcecode:: python+sql {sql}>>> session.query(func.count('*')).select_from(User).scalar() SELECT count(?) AS count_1 FROM users ('*',) {stop}4 The usage of :meth:`~.Query.select_from` can be removed if we express the count in terms of the ``User`` primary key directly: .. sourcecode:: python+sql {sql}>>> session.query(func.count(User.id)).scalar() #doctest: +NORMALIZE_WHITESPACE SELECT count(users.id) AS count_1 FROM users () {stop}4 Building a Relationship ======================= Let's consider how a second table, related to ``User``, can be mapped and queried. Users in our system can store any number of email addresses associated with their username. This implies a basic one to many association from the ``users`` to a new table which stores email addresses, which we will call ``addresses``. Using declarative, we define this table along with its mapped class, ``Address``: .. sourcecode:: python+sql >>> from sqlalchemy import ForeignKey >>> from sqlalchemy.orm import relationship, backref >>> class Address(Base): ... __tablename__ = 'addresses' ... id = Column(Integer, primary_key=True) ... email_address = Column(String, nullable=False) ... user_id = Column(Integer, ForeignKey('users.id')) ... ... user = relationship("User", backref=backref('addresses', order_by=id)) ... ... def __init__(self, email_address): ... self.email_address = email_address ... ... def __repr__(self): ... return "" % self.email_address The above class introduces the :class:`.ForeignKey` construct, which is a directive applied to :class:`.Column` that indicates that values in this column should be **constrained** to be values present in the named remote column. This is a core feature of relational databases, and is the "glue" that transforms an otherwise unconnected collection of tables to have rich overlapping relationships. The :class:`.ForeignKey` above expresses that values in the ``addresses.user_id`` column should be constrained to those values in the ``users.id`` column, i.e. its primary key. A second directive, known as :func:`.relationship`, tells the ORM that the ``Address`` class itself should be linked to the ``User`` class, using the attribute ``Address.user``. :func:`.relationship` uses the foreign key relationships between the two tables to determine the nature of this linkage, determining that ``Address.user`` will be **many-to-one**. A subdirective of :func:`.relationship` called :func:`.backref` is placed inside of :func:`.relationship`, providing details about the relationship as expressed in reverse, that of a collection of ``Address`` objects on ``User`` referenced by ``User.addresses``. The reverse side of a many-to-one relationship is always **one-to-many**. A full catalog of available :func:`.relationship` configurations is at :ref:`relationship_patterns`. The two complementing relationships ``Address.user`` and ``User.addresses`` are referred to as a **bidirectional relationship**, and is a key feature of the SQLAlchemy ORM. The section :ref:`relationships_backref` discusses the "backref" feature in detail. Arguments to :func:`.relationship` which concern the remote class can be specified using strings, assuming the Declarative system is in use. Once all mappings are complete, these strings are evaluated as Python expressions in order to produce the actual argument, in the above case the ``User`` class. The names which are allowed during this evaluation include, among other things, the names of all classes which have been created in terms of the declared base. Below we illustrate creation of the same "addresses/user" bidirectional relationship in terms of ``User`` instead of ``Address``:: class User(Base): # .... addresses = relationship("Address", order_by="Address.id", backref="user") See the docstring for :func:`.relationship` for more detail on argument style. .. topic:: Did you know ? * a FOREIGN KEY constraint in most (though not all) relational databases can only link to a primary key column, or a column that has a UNIQUE constraint. * a FOREIGN KEY constraint that refers to a multiple column primary key, and itself has multiple columns, is known as a "composite foreign key". It can also reference a subset of those columns. * FOREIGN KEY columns can automatically update themselves, in response to a change in the referenced column or row. This is known as the CASCADE *referential action*, and is a built in function of the relational database. * FOREIGN KEY can refer to its own table. This is referred to as a "self-referential" foreign key. * Read more about foreign keys at `Foreign Key - Wikipedia `_. We'll need to create the ``addresses`` table in the database, so we will issue another CREATE from our metadata, which will skip over tables which have already been created: .. sourcecode:: python+sql {sql}>>> Base.metadata.create_all(engine) # doctest: +NORMALIZE_WHITESPACE PRAGMA table_info("users") () PRAGMA table_info("addresses") () CREATE TABLE addresses ( id INTEGER NOT NULL, email_address VARCHAR NOT NULL, user_id INTEGER, PRIMARY KEY (id), FOREIGN KEY(user_id) REFERENCES users (id) ) () COMMIT Working with Related Objects ============================= Now when we create a ``User``, a blank ``addresses`` collection will be present. Various collection types, such as sets and dictionaries, are possible here (see :ref:`custom_collections` for details), but by default, the collection is a Python list. .. sourcecode:: python+sql >>> jack = User('jack', 'Jack Bean', 'gjffdd') >>> jack.addresses [] We are free to add ``Address`` objects on our ``User`` object. In this case we just assign a full list directly: .. sourcecode:: python+sql >>> jack.addresses = [ ... Address(email_address='jack@google.com'), ... Address(email_address='j25@yahoo.com')] When using a bidirectional relationship, elements added in one direction automatically become visible in the other direction. This behavior occurs based on attribute on-change events and is evaluated in Python, without using any SQL: .. sourcecode:: python+sql >>> jack.addresses[1] >>> jack.addresses[1].user Let's add and commit ``Jack Bean`` to the database. ``jack`` as well as the two ``Address`` members in the corresponding ``addresses`` collection are both added to the session at once, using a process known as **cascading**: .. sourcecode:: python+sql >>> session.add(jack) {sql}>>> session.commit() INSERT INTO users (name, fullname, password) VALUES (?, ?, ?) ('jack', 'Jack Bean', 'gjffdd') INSERT INTO addresses (email_address, user_id) VALUES (?, ?) ('jack@google.com', 5) INSERT INTO addresses (email_address, user_id) VALUES (?, ?) ('j25@yahoo.com', 5) COMMIT Querying for Jack, we get just Jack back. No SQL is yet issued for Jack's addresses: .. sourcecode:: python+sql {sql}>>> jack = session.query(User).\ ... filter_by(name='jack').one() #doctest: +NORMALIZE_WHITESPACE BEGIN (implicit) SELECT users.id AS users_id, users.name AS users_name, users.fullname AS users_fullname, users.password AS users_password FROM users WHERE users.name = ? ('jack',) {stop}>>> jack Let's look at the ``addresses`` collection. Watch the SQL: .. sourcecode:: python+sql {sql}>>> jack.addresses #doctest: +NORMALIZE_WHITESPACE SELECT addresses.id AS addresses_id, addresses.email_address AS addresses_email_address, addresses.user_id AS addresses_user_id FROM addresses WHERE ? = addresses.user_id ORDER BY addresses.id (5,) {stop}[, ] When we accessed the ``addresses`` collection, SQL was suddenly issued. This is an example of a **lazy loading relationship**. The ``addresses`` collection is now loaded and behaves just like an ordinary list. We'll cover ways to optimize the loading of this collection in a bit. .. _ormtutorial_joins: Querying with Joins ==================== Now that we have two tables, we can show some more features of :class:`.Query`, specifically how to create queries that deal with both tables at the same time. The `Wikipedia page on SQL JOIN `_ offers a good introduction to join techniques, several of which we'll illustrate here. To construct a simple implicit join between ``User`` and ``Address``, we can use :meth:`.Query.filter()` to equate their related columns together. Below we load the ``User`` and ``Address`` entities at once using this method: .. sourcecode:: python+sql {sql}>>> for u, a in session.query(User, Address).\ ... filter(User.id==Address.user_id).\ ... filter(Address.email_address=='jack@google.com').\ ... all(): # doctest: +NORMALIZE_WHITESPACE ... print u, a SELECT users.id AS users_id, users.name AS users_name, users.fullname AS users_fullname, users.password AS users_password, addresses.id AS addresses_id, addresses.email_address AS addresses_email_address, addresses.user_id AS addresses_user_id FROM users, addresses WHERE users.id = addresses.user_id AND addresses.email_address = ? ('jack@google.com',) {stop} The actual SQL JOIN syntax, on the other hand, is most easily achieved using the :meth:`.Query.join` method: .. sourcecode:: python+sql {sql}>>> session.query(User).join(Address).\ ... filter(Address.email_address=='jack@google.com').\ ... all() #doctest: +NORMALIZE_WHITESPACE SELECT users.id AS users_id, users.name AS users_name, users.fullname AS users_fullname, users.password AS users_password FROM users JOIN addresses ON users.id = addresses.user_id WHERE addresses.email_address = ? ('jack@google.com',) {stop}[] :meth:`.Query.join` knows how to join between ``User`` and ``Address`` because there's only one foreign key between them. If there were no foreign keys, or several, :meth:`.Query.join` works better when one of the following forms are used:: query.join(Address, User.id==Address.user_id) # explicit condition query.join(User.addresses) # specify relationship from left to right query.join(Address, User.addresses) # same, with explicit target query.join('addresses') # same, using a string As you would expect, the same idea is used for "outer" joins, using the :meth:`~.Query.outerjoin` function:: query.outerjoin(User.addresses) # LEFT OUTER JOIN The reference documentation for :meth:`~.Query.join` contains detailed information and examples of the calling styles accepted by this method; :meth:`~.Query.join` is an important method at the center of usage for any SQL-fluent application. .. _ormtutorial_aliases: Using Aliases ------------- When querying across multiple tables, if the same table needs to be referenced more than once, SQL typically requires that the table be *aliased* with another name, so that it can be distinguished against other occurrences of that table. The :class:`~sqlalchemy.orm.query.Query` supports this most explicitly using the :attr:`~sqlalchemy.orm.aliased` construct. Below we join to the ``Address`` entity twice, to locate a user who has two distinct email addresses at the same time: .. sourcecode:: python+sql >>> from sqlalchemy.orm import aliased >>> adalias1 = aliased(Address) >>> adalias2 = aliased(Address) {sql}>>> for username, email1, email2 in \ ... session.query(User.name, adalias1.email_address, adalias2.email_address).\ ... join(adalias1, User.addresses).\ ... join(adalias2, User.addresses).\ ... filter(adalias1.email_address=='jack@google.com').\ ... filter(adalias2.email_address=='j25@yahoo.com'): ... print username, email1, email2 # doctest: +NORMALIZE_WHITESPACE SELECT users.name AS users_name, addresses_1.email_address AS addresses_1_email_address, addresses_2.email_address AS addresses_2_email_address FROM users JOIN addresses AS addresses_1 ON users.id = addresses_1.user_id JOIN addresses AS addresses_2 ON users.id = addresses_2.user_id WHERE addresses_1.email_address = ? AND addresses_2.email_address = ? ('jack@google.com', 'j25@yahoo.com') {stop}jack jack@google.com j25@yahoo.com Using Subqueries ---------------- The :class:`~sqlalchemy.orm.query.Query` is suitable for generating statements which can be used as subqueries. Suppose we wanted to load ``User`` objects along with a count of how many ``Address`` records each user has. The best way to generate SQL like this is to get the count of addresses grouped by user ids, and JOIN to the parent. In this case we use a LEFT OUTER JOIN so that we get rows back for those users who don't have any addresses, e.g.:: SELECT users.*, adr_count.address_count FROM users LEFT OUTER JOIN (SELECT user_id, count(*) AS address_count FROM addresses GROUP BY user_id) AS adr_count ON users.id=adr_count.user_id Using the :class:`~sqlalchemy.orm.query.Query`, we build a statement like this from the inside out. The ``statement`` accessor returns a SQL expression representing the statement generated by a particular :class:`~sqlalchemy.orm.query.Query` - this is an instance of a :func:`~.expression.select` construct, which are described in :ref:`sqlexpression_toplevel`:: >>> from sqlalchemy.sql import func >>> stmt = session.query(Address.user_id, func.count('*').\ ... label('address_count')).\ ... group_by(Address.user_id).subquery() The ``func`` keyword generates SQL functions, and the ``subquery()`` method on :class:`~sqlalchemy.orm.query.Query` produces a SQL expression construct representing a SELECT statement embedded within an alias (it's actually shorthand for ``query.statement.alias()``). Once we have our statement, it behaves like a :class:`~sqlalchemy.schema.Table` construct, such as the one we created for ``users`` at the start of this tutorial. The columns on the statement are accessible through an attribute called ``c``: .. sourcecode:: python+sql {sql}>>> for u, count in session.query(User, stmt.c.address_count).\ ... outerjoin(stmt, User.id==stmt.c.user_id).order_by(User.id): # doctest: +NORMALIZE_WHITESPACE ... print u, count SELECT users.id AS users_id, users.name AS users_name, users.fullname AS users_fullname, users.password AS users_password, anon_1.address_count AS anon_1_address_count FROM users LEFT OUTER JOIN (SELECT addresses.user_id AS user_id, count(?) AS address_count FROM addresses GROUP BY addresses.user_id) AS anon_1 ON users.id = anon_1.user_id ORDER BY users.id ('*',) {stop} None None None None 2 Selecting Entities from Subqueries ---------------------------------- Above, we just selected a result that included a column from a subquery. What if we wanted our subquery to map to an entity ? For this we use ``aliased()`` to associate an "alias" of a mapped class to a subquery: .. sourcecode:: python+sql {sql}>>> stmt = session.query(Address).\ ... filter(Address.email_address != 'j25@yahoo.com').\ ... subquery() >>> adalias = aliased(Address, stmt) >>> for user, address in session.query(User, adalias).\ ... join(adalias, User.addresses): # doctest: +NORMALIZE_WHITESPACE ... print user, address SELECT users.id AS users_id, users.name AS users_name, users.fullname AS users_fullname, users.password AS users_password, anon_1.id AS anon_1_id, anon_1.email_address AS anon_1_email_address, anon_1.user_id AS anon_1_user_id FROM users JOIN (SELECT addresses.id AS id, addresses.email_address AS email_address, addresses.user_id AS user_id FROM addresses WHERE addresses.email_address != ?) AS anon_1 ON users.id = anon_1.user_id ('j25@yahoo.com',) {stop} Using EXISTS ------------ The EXISTS keyword in SQL is a boolean operator which returns True if the given expression contains any rows. It may be used in many scenarios in place of joins, and is also useful for locating rows which do not have a corresponding row in a related table. There is an explicit EXISTS construct, which looks like this: .. sourcecode:: python+sql >>> from sqlalchemy.sql import exists >>> stmt = exists().where(Address.user_id==User.id) {sql}>>> for name, in session.query(User.name).filter(stmt): # doctest: +NORMALIZE_WHITESPACE ... print name SELECT users.name AS users_name FROM users WHERE EXISTS (SELECT * FROM addresses WHERE addresses.user_id = users.id) () {stop}jack The :class:`~sqlalchemy.orm.query.Query` features several operators which make usage of EXISTS automatically. Above, the statement can be expressed along the ``User.addresses`` relationship using :meth:`~.RelationshipProperty.Comparator.any`: .. sourcecode:: python+sql {sql}>>> for name, in session.query(User.name).\ ... filter(User.addresses.any()): # doctest: +NORMALIZE_WHITESPACE ... print name SELECT users.name AS users_name FROM users WHERE EXISTS (SELECT 1 FROM addresses WHERE users.id = addresses.user_id) () {stop}jack :meth:`~.RelationshipProperty.Comparator.any` takes criterion as well, to limit the rows matched: .. sourcecode:: python+sql {sql}>>> for name, in session.query(User.name).\ ... filter(User.addresses.any(Address.email_address.like('%google%'))): # doctest: +NORMALIZE_WHITESPACE ... print name SELECT users.name AS users_name FROM users WHERE EXISTS (SELECT 1 FROM addresses WHERE users.id = addresses.user_id AND addresses.email_address LIKE ?) ('%google%',) {stop}jack :meth:`~.RelationshipProperty.Comparator.has` is the same operator as :meth:`~.RelationshipProperty.Comparator.any` for many-to-one relationships (note the ``~`` operator here too, which means "NOT"): .. sourcecode:: python+sql {sql}>>> session.query(Address).\ ... filter(~Address.user.has(User.name=='jack')).all() # doctest: +NORMALIZE_WHITESPACE SELECT addresses.id AS addresses_id, addresses.email_address AS addresses_email_address, addresses.user_id AS addresses_user_id FROM addresses WHERE NOT (EXISTS (SELECT 1 FROM users WHERE users.id = addresses.user_id AND users.name = ?)) ('jack',) {stop}[] Common Relationship Operators ----------------------------- Here's all the operators which build on relationships - each one is linked to its API documentation which includes full details on usage and behavior: * :meth:`~.RelationshipProperty.Comparator.__eq__` (many-to-one "equals" comparison):: query.filter(Address.user == someuser) * :meth:`~.RelationshipProperty.Comparator.__ne__` (many-to-one "not equals" comparison):: query.filter(Address.user != someuser) * IS NULL (many-to-one comparison, also uses :meth:`~.RelationshipProperty.Comparator.__eq__`):: query.filter(Address.user == None) * :meth:`~.RelationshipProperty.Comparator.contains` (used for one-to-many collections):: query.filter(User.addresses.contains(someaddress)) * :meth:`~.RelationshipProperty.Comparator.any` (used for collections):: query.filter(User.addresses.any(Address.email_address == 'bar')) # also takes keyword arguments: query.filter(User.addresses.any(email_address='bar')) * :meth:`~.RelationshipProperty.Comparator.has` (used for scalar references):: query.filter(Address.user.has(name='ed')) * :meth:`.Query.with_parent` (used for any relationship):: session.query(Address).with_parent(someuser, 'addresses') Eager Loading ============= Recall earlier that we illustrated a **lazy loading** operation, when we accessed the ``User.addresses`` collection of a ``User`` and SQL was emitted. If you want to reduce the number of queries (dramatically, in many cases), we can apply an **eager load** to the query operation. SQLAlchemy offers three types of eager loading, two of which are automatic, and a third which involves custom criterion. All three are usually invoked via functions known as **query options** which give additional instructions to the :class:`.Query` on how we would like various attributes to be loaded, via the :meth:`.Query.options` method. Subquery Load ------------- In this case we'd like to indicate that ``User.addresses`` should load eagerly. A good choice for loading a set of objects as well as their related collections is the :func:`.orm.subqueryload` option, which emits a second SELECT statement that fully loads the collections associated with the results just loaded. The name "subquery" originates from the fact that the SELECT statement constructed directly via the :class:`.Query` is re-used, embedded as a subquery into a SELECT against the related table. This is a little elaborate but very easy to use: .. sourcecode:: python+sql >>> from sqlalchemy.orm import subqueryload {sql}>>> jack = session.query(User).\ ... options(subqueryload(User.addresses)).\ ... filter_by(name='jack').one() #doctest: +NORMALIZE_WHITESPACE SELECT users.id AS users_id, users.name AS users_name, users.fullname AS users_fullname, users.password AS users_password FROM users WHERE users.name = ? ('jack',) SELECT addresses.id AS addresses_id, addresses.email_address AS addresses_email_address, addresses.user_id AS addresses_user_id, anon_1.users_id AS anon_1_users_id FROM (SELECT users.id AS users_id FROM users WHERE users.name = ?) AS anon_1 JOIN addresses ON anon_1.users_id = addresses.user_id ORDER BY anon_1.users_id, addresses.id ('jack',) {stop}>>> jack >>> jack.addresses [, ] Joined Load ------------- The other automatic eager loading function is more well known and is called :func:`.orm.joinedload`. This style of loading emits a JOIN, by default a LEFT OUTER JOIN, so that the lead object as well as the related object or collection is loaded in one step. We illustrate loading the same ``addresses`` collection in this way - note that even though the ``User.addresses`` collection on ``jack`` is actually populated right now, the query will emit the extra join regardless: .. sourcecode:: python+sql >>> from sqlalchemy.orm import joinedload {sql}>>> jack = session.query(User).\ ... options(joinedload(User.addresses)).\ ... filter_by(name='jack').one() #doctest: +NORMALIZE_WHITESPACE SELECT users.id AS users_id, users.name AS users_name, users.fullname AS users_fullname, users.password AS users_password, addresses_1.id AS addresses_1_id, addresses_1.email_address AS addresses_1_email_address, addresses_1.user_id AS addresses_1_user_id FROM users LEFT OUTER JOIN addresses AS addresses_1 ON users.id = addresses_1.user_id WHERE users.name = ? ORDER BY addresses_1.id ('jack',) {stop}>>> jack >>> jack.addresses [, ] Note that even though the OUTER JOIN resulted in two rows, we still only got one instance of ``User`` back. This is because :class:`.Query` applies a "uniquing" strategy, based on object identity, to the returned entities. This is specifically so that joined eager loading can be applied without affecting the query results. While :func:`.joinedload` has been around for a long time, :func:`.subqueryload` is a newer form of eager loading. :func:`.subqueryload` tends to be more appropriate for loading related collections while :func:`.joinedload` tends to be better suited for many-to-one relationships, due to the fact that only one row is loaded for both the lead and the related object. .. topic:: ``joinedload()`` is not a replacement for ``join()`` The join created by :func:`.joinedload` is anonymously aliased such that it **does not affect the query results**. An :meth:`.Query.order_by` or :meth:`.Query.filter` call **cannot** reference these aliased tables - so-called "user space" joins are constructed using :meth:`.Query.join`. The rationale for this is that :func:`.joinedload` is only applied in order to affect how related objects or collections are loaded as an optimizing detail - it can be added or removed with no impact on actual results. See the section :ref:`zen_of_eager_loading` for a detailed description of how this is used. Explicit Join + Eagerload -------------------------- A third style of eager loading is when we are constructing a JOIN explicitly in order to locate the primary rows, and would like to additionally apply the extra table to a related object or collection on the primary object. This feature is supplied via the :func:`.orm.contains_eager` function, and is most typically useful for pre-loading the many-to-one object on a query that needs to filter on that same object. Below we illustrate loading an ``Address`` row as well as the related ``User`` object, filtering on the ``User`` named "jack" and using :func:`.orm.contains_eager` to apply the "user" columns to the ``Address.user`` attribute: .. sourcecode:: python+sql >>> from sqlalchemy.orm import contains_eager {sql}>>> jacks_addresses = session.query(Address).\ ... join(Address.user).\ ... filter(User.name=='jack').\ ... options(contains_eager(Address.user)).\ ... all() #doctest: +NORMALIZE_WHITESPACE SELECT users.id AS users_id, users.name AS users_name, users.fullname AS users_fullname, users.password AS users_password, addresses.id AS addresses_id, addresses.email_address AS addresses_email_address, addresses.user_id AS addresses_user_id FROM addresses JOIN users ON users.id = addresses.user_id WHERE users.name = ? ('jack',) {stop}>>> jacks_addresses [, ] >>> jacks_addresses[0].user For more information on eager loading, including how to configure various forms of loading by default, see the section :doc:`/orm/loading`. Deleting ======== Let's try to delete ``jack`` and see how that goes. We'll mark as deleted in the session, then we'll issue a ``count`` query to see that no rows remain: .. sourcecode:: python+sql >>> session.delete(jack) {sql}>>> session.query(User).filter_by(name='jack').count() # doctest: +NORMALIZE_WHITESPACE UPDATE addresses SET user_id=? WHERE addresses.id = ? (None, 1) UPDATE addresses SET user_id=? WHERE addresses.id = ? (None, 2) DELETE FROM users WHERE users.id = ? (5,) SELECT count(*) AS count_1 FROM (SELECT users.id AS users_id, users.name AS users_name, users.fullname AS users_fullname, users.password AS users_password FROM users WHERE users.name = ?) AS anon_1 ('jack',) {stop}0 So far, so good. How about Jack's ``Address`` objects ? .. sourcecode:: python+sql {sql}>>> session.query(Address).filter( ... Address.email_address.in_(['jack@google.com', 'j25@yahoo.com']) ... ).count() # doctest: +NORMALIZE_WHITESPACE SELECT count(*) AS count_1 FROM (SELECT addresses.id AS addresses_id, addresses.email_address AS addresses_email_address, addresses.user_id AS addresses_user_id FROM addresses WHERE addresses.email_address IN (?, ?)) AS anon_1 ('jack@google.com', 'j25@yahoo.com') {stop}2 Uh oh, they're still there ! Analyzing the flush SQL, we can see that the ``user_id`` column of each address was set to NULL, but the rows weren't deleted. SQLAlchemy doesn't assume that deletes cascade, you have to tell it to do so. .. _tutorial_delete_cascade: Configuring delete/delete-orphan Cascade ---------------------------------------- We will configure **cascade** options on the ``User.addresses`` relationship to change the behavior. While SQLAlchemy allows you to add new attributes and relationships to mappings at any point in time, in this case the existing relationship needs to be removed, so we need to tear down the mappings completely and start again - we'll close the :class:`.Session`:: >>> session.close() and use a new :func:`.declarative_base`:: >>> Base = declarative_base() Next we'll declare the ``User`` class, adding in the ``addresses`` relationship including the cascade configuration (we'll leave the constructor out too):: >>> class User(Base): ... __tablename__ = 'users' ... ... id = Column(Integer, primary_key=True) ... name = Column(String) ... fullname = Column(String) ... password = Column(String) ... ... addresses = relationship("Address", backref='user', cascade="all, delete, delete-orphan") ... ... def __repr__(self): ... return "" % (self.name, self.fullname, self.password) Then we recreate ``Address``, noting that in this case we've created the ``Address.user`` relationship via the ``User`` class already:: >>> class Address(Base): ... __tablename__ = 'addresses' ... id = Column(Integer, primary_key=True) ... email_address = Column(String, nullable=False) ... user_id = Column(Integer, ForeignKey('users.id')) ... ... def __repr__(self): ... return "" % self.email_address Now when we load the user ``jack`` (below using :meth:`~.Query.get`, which loads by primary key), removing an address from the corresponding ``addresses`` collection will result in that ``Address`` being deleted: .. sourcecode:: python+sql # load Jack by primary key {sql}>>> jack = session.query(User).get(5) #doctest: +NORMALIZE_WHITESPACE BEGIN (implicit) SELECT users.id AS users_id, users.name AS users_name, users.fullname AS users_fullname, users.password AS users_password FROM users WHERE users.id = ? (5,) {stop} # remove one Address (lazy load fires off) {sql}>>> del jack.addresses[1] #doctest: +NORMALIZE_WHITESPACE SELECT addresses.id AS addresses_id, addresses.email_address AS addresses_email_address, addresses.user_id AS addresses_user_id FROM addresses WHERE ? = addresses.user_id (5,) {stop} # only one address remains {sql}>>> session.query(Address).filter( ... Address.email_address.in_(['jack@google.com', 'j25@yahoo.com']) ... ).count() # doctest: +NORMALIZE_WHITESPACE DELETE FROM addresses WHERE addresses.id = ? (2,) SELECT count(*) AS count_1 FROM (SELECT addresses.id AS addresses_id, addresses.email_address AS addresses_email_address, addresses.user_id AS addresses_user_id FROM addresses WHERE addresses.email_address IN (?, ?)) AS anon_1 ('jack@google.com', 'j25@yahoo.com') {stop}1 Deleting Jack will delete both Jack and the remaining ``Address`` associated with the user: .. sourcecode:: python+sql >>> session.delete(jack) {sql}>>> session.query(User).filter_by(name='jack').count() # doctest: +NORMALIZE_WHITESPACE DELETE FROM addresses WHERE addresses.id = ? (1,) DELETE FROM users WHERE users.id = ? (5,) SELECT count(*) AS count_1 FROM (SELECT users.id AS users_id, users.name AS users_name, users.fullname AS users_fullname, users.password AS users_password FROM users WHERE users.name = ?) AS anon_1 ('jack',) {stop}0 {sql}>>> session.query(Address).filter( ... Address.email_address.in_(['jack@google.com', 'j25@yahoo.com']) ... ).count() # doctest: +NORMALIZE_WHITESPACE SELECT count(*) AS count_1 FROM (SELECT addresses.id AS addresses_id, addresses.email_address AS addresses_email_address, addresses.user_id AS addresses_user_id FROM addresses WHERE addresses.email_address IN (?, ?)) AS anon_1 ('jack@google.com', 'j25@yahoo.com') {stop}0 .. topic:: More on Cascades Further detail on configuration of cascades is at :ref:`unitofwork_cascades`. The cascade functionality can also integrate smoothly with the ``ON DELETE CASCADE`` functionality of the relational database. See :ref:`passive_deletes` for details. Building a Many To Many Relationship ==================================== We're moving into the bonus round here, but lets show off a many-to-many relationship. We'll sneak in some other features too, just to take a tour. We'll make our application a blog application, where users can write ``BlogPost`` items, which have ``Keyword`` items associated with them. For a plain many-to-many, we need to create an un-mapped :class:`.Table` construct to serve as the association table. This looks like the following:: >>> from sqlalchemy import Table, Text >>> # association table >>> post_keywords = Table('post_keywords', Base.metadata, ... Column('post_id', Integer, ForeignKey('posts.id')), ... Column('keyword_id', Integer, ForeignKey('keywords.id')) ... ) Above, we can see declaring a :class:`.Table` directly is a little different than declaring a mapped class. :class:`.Table` is a constructor function, so each individual :class:`.Column` argument is separated by a comma. The :class:`.Column` object is also given its name explicitly, rather than it being taken from an assigned attribute name. Next we define ``BlogPost`` and ``Keyword``, with a :func:`.relationship` linked via the ``post_keywords`` table:: >>> class BlogPost(Base): ... __tablename__ = 'posts' ... ... id = Column(Integer, primary_key=True) ... user_id = Column(Integer, ForeignKey('users.id')) ... headline = Column(String(255), nullable=False) ... body = Column(Text) ... ... # many to many BlogPost<->Keyword ... keywords = relationship('Keyword', secondary=post_keywords, backref='posts') ... ... def __init__(self, headline, body, author): ... self.author = author ... self.headline = headline ... self.body = body ... ... def __repr__(self): ... return "BlogPost(%r, %r, %r)" % (self.headline, self.body, self.author) >>> class Keyword(Base): ... __tablename__ = 'keywords' ... ... id = Column(Integer, primary_key=True) ... keyword = Column(String(50), nullable=False, unique=True) ... ... def __init__(self, keyword): ... self.keyword = keyword Above, the many-to-many relationship is ``BlogPost.keywords``. The defining feature of a many-to-many relationship is the ``secondary`` keyword argument which references a :class:`~sqlalchemy.schema.Table` object representing the association table. This table only contains columns which reference the two sides of the relationship; if it has *any* other columns, such as its own primary key, or foreign keys to other tables, SQLAlchemy requires a different usage pattern called the "association object", described at :ref:`association_pattern`. We would also like our ``BlogPost`` class to have an ``author`` field. We will add this as another bidirectional relationship, except one issue we'll have is that a single user might have lots of blog posts. When we access ``User.posts``, we'd like to be able to filter results further so as not to load the entire collection. For this we use a setting accepted by :func:`~sqlalchemy.orm.relationship` called ``lazy='dynamic'``, which configures an alternate **loader strategy** on the attribute. To use it on the "reverse" side of a :func:`~sqlalchemy.orm.relationship`, we use the :func:`~sqlalchemy.orm.backref` function: .. sourcecode:: python+sql >>> from sqlalchemy.orm import backref >>> # "dynamic" loading relationship to User >>> BlogPost.author = relationship(User, backref=backref('posts', lazy='dynamic')) Create new tables: .. sourcecode:: python+sql {sql}>>> Base.metadata.create_all(engine) # doctest: +NORMALIZE_WHITESPACE PRAGMA table_info("users") () PRAGMA table_info("addresses") () PRAGMA table_info("posts") () PRAGMA table_info("keywords") () PRAGMA table_info("post_keywords") () CREATE TABLE posts ( id INTEGER NOT NULL, user_id INTEGER, headline VARCHAR(255) NOT NULL, body TEXT, PRIMARY KEY (id), FOREIGN KEY(user_id) REFERENCES users (id) ) () COMMIT CREATE TABLE keywords ( id INTEGER NOT NULL, keyword VARCHAR(50) NOT NULL, PRIMARY KEY (id), UNIQUE (keyword) ) () COMMIT CREATE TABLE post_keywords ( post_id INTEGER, keyword_id INTEGER, FOREIGN KEY(post_id) REFERENCES posts (id), FOREIGN KEY(keyword_id) REFERENCES keywords (id) ) () COMMIT Usage is not too different from what we've been doing. Let's give Wendy some blog posts: .. sourcecode:: python+sql {sql}>>> wendy = session.query(User).\ ... filter_by(name='wendy').\ ... one() #doctest: +NORMALIZE_WHITESPACE SELECT users.id AS users_id, users.name AS users_name, users.fullname AS users_fullname, users.password AS users_password FROM users WHERE users.name = ? ('wendy',) {stop} >>> post = BlogPost("Wendy's Blog Post", "This is a test", wendy) >>> session.add(post) We're storing keywords uniquely in the database, but we know that we don't have any yet, so we can just create them: .. sourcecode:: python+sql >>> post.keywords.append(Keyword('wendy')) >>> post.keywords.append(Keyword('firstpost')) We can now look up all blog posts with the keyword 'firstpost'. We'll use the ``any`` operator to locate "blog posts where any of its keywords has the keyword string 'firstpost'": .. sourcecode:: python+sql {sql}>>> session.query(BlogPost).\ ... filter(BlogPost.keywords.any(keyword='firstpost')).\ ... all() #doctest: +NORMALIZE_WHITESPACE INSERT INTO keywords (keyword) VALUES (?) ('wendy',) INSERT INTO keywords (keyword) VALUES (?) ('firstpost',) INSERT INTO posts (user_id, headline, body) VALUES (?, ?, ?) (2, "Wendy's Blog Post", 'This is a test') INSERT INTO post_keywords (post_id, keyword_id) VALUES (?, ?) ((1, 1), (1, 2)) SELECT posts.id AS posts_id, posts.user_id AS posts_user_id, posts.headline AS posts_headline, posts.body AS posts_body FROM posts WHERE EXISTS (SELECT 1 FROM post_keywords, keywords WHERE posts.id = post_keywords.post_id AND keywords.id = post_keywords.keyword_id AND keywords.keyword = ?) ('firstpost',) {stop}[BlogPost("Wendy's Blog Post", 'This is a test', )] If we want to look up posts owned by the user ``wendy``, we can tell the query to narrow down to that ``User`` object as a parent: .. sourcecode:: python+sql {sql}>>> session.query(BlogPost).\ ... filter(BlogPost.author==wendy).\ ... filter(BlogPost.keywords.any(keyword='firstpost')).\ ... all() #doctest: +NORMALIZE_WHITESPACE SELECT posts.id AS posts_id, posts.user_id AS posts_user_id, posts.headline AS posts_headline, posts.body AS posts_body FROM posts WHERE ? = posts.user_id AND (EXISTS (SELECT 1 FROM post_keywords, keywords WHERE posts.id = post_keywords.post_id AND keywords.id = post_keywords.keyword_id AND keywords.keyword = ?)) (2, 'firstpost') {stop}[BlogPost("Wendy's Blog Post", 'This is a test', )] Or we can use Wendy's own ``posts`` relationship, which is a "dynamic" relationship, to query straight from there: .. sourcecode:: python+sql {sql}>>> wendy.posts.\ ... filter(BlogPost.keywords.any(keyword='firstpost')).\ ... all() #doctest: +NORMALIZE_WHITESPACE SELECT posts.id AS posts_id, posts.user_id AS posts_user_id, posts.headline AS posts_headline, posts.body AS posts_body FROM posts WHERE ? = posts.user_id AND (EXISTS (SELECT 1 FROM post_keywords, keywords WHERE posts.id = post_keywords.post_id AND keywords.id = post_keywords.keyword_id AND keywords.keyword = ?)) (2, 'firstpost') {stop}[BlogPost("Wendy's Blog Post", 'This is a test', )] Further Reference ================== Query Reference: :ref:`query_api_toplevel` Mapper Reference: :ref:`mapper_config_toplevel` Relationship Reference: :ref:`relationship_config_toplevel` Session Reference: :doc:`/orm/session` SQLAlchemy-0.8.4/doc/_static/0000755000076500000240000000000012251151573016426 5ustar classicstaff00000000000000SQLAlchemy-0.8.4/doc/_static/basic.css0000644000076500000240000002041712251147504020224 0ustar classicstaff00000000000000/* * basic.css * ~~~~~~~~~ * * Sphinx stylesheet -- basic theme. * * :copyright: Copyright 2007-2013 by the Sphinx team, see AUTHORS. * :license: BSD, see LICENSE for details. * */ /* -- main layout ----------------------------------------------------------- */ div.clearer { clear: both; } /* -- relbar ---------------------------------------------------------------- */ div.related { width: 100%; font-size: 90%; } div.related h3 { display: none; } div.related ul { margin: 0; padding: 0 0 0 10px; list-style: none; } div.related li { display: inline; } div.related li.right { float: right; margin-right: 5px; } /* -- sidebar --------------------------------------------------------------- */ div.sphinxsidebarwrapper { padding: 10px 5px 0 10px; } div.sphinxsidebar { float: left; width: 230px; margin-left: -100%; font-size: 90%; } div.sphinxsidebar ul { list-style: none; } div.sphinxsidebar ul ul, div.sphinxsidebar ul.want-points { margin-left: 20px; list-style: square; } div.sphinxsidebar ul ul { margin-top: 0; margin-bottom: 0; } div.sphinxsidebar form { margin-top: 10px; } div.sphinxsidebar input { border: 1px solid #98dbcc; font-family: sans-serif; font-size: 1em; } div.sphinxsidebar #searchbox input[type="text"] { width: 170px; } div.sphinxsidebar #searchbox input[type="submit"] { width: 30px; } img { border: 0; } /* -- search page ----------------------------------------------------------- */ ul.search { margin: 10px 0 0 20px; padding: 0; } ul.search li { padding: 5px 0 5px 20px; background-image: url(file.png); background-repeat: no-repeat; background-position: 0 7px; } ul.search li a { font-weight: bold; } ul.search li div.context { color: #888; margin: 2px 0 0 30px; text-align: left; } ul.keywordmatches li.goodmatch a { font-weight: bold; } /* -- index page ------------------------------------------------------------ */ table.contentstable { width: 90%; } table.contentstable p.biglink { line-height: 150%; } a.biglink { font-size: 1.3em; } span.linkdescr { font-style: italic; padding-top: 5px; font-size: 90%; } /* -- general index --------------------------------------------------------- */ table.indextable { width: 100%; } table.indextable td { text-align: left; vertical-align: top; } table.indextable dl, table.indextable dd { margin-top: 0; margin-bottom: 0; } table.indextable tr.pcap { height: 10px; } table.indextable tr.cap { margin-top: 10px; background-color: #f2f2f2; } img.toggler { margin-right: 3px; margin-top: 3px; cursor: pointer; } div.modindex-jumpbox { border-top: 1px solid #ddd; border-bottom: 1px solid #ddd; margin: 1em 0 1em 0; padding: 0.4em; } div.genindex-jumpbox { border-top: 1px solid #ddd; border-bottom: 1px solid #ddd; margin: 1em 0 1em 0; padding: 0.4em; } /* -- general body styles --------------------------------------------------- */ a.headerlink { visibility: hidden; } h1:hover > a.headerlink, h2:hover > a.headerlink, h3:hover > a.headerlink, h4:hover > a.headerlink, h5:hover > a.headerlink, h6:hover > a.headerlink, dt:hover > a.headerlink { visibility: visible; } div.body p.caption { text-align: inherit; } div.body td { text-align: left; } .field-list ul { padding-left: 1em; } .first { margin-top: 0 !important; } p.rubric { margin-top: 30px; font-weight: bold; } img.align-left, .figure.align-left, object.align-left { clear: left; float: left; margin-right: 1em; } img.align-right, .figure.align-right, object.align-right { clear: right; float: right; margin-left: 1em; } img.align-center, .figure.align-center, object.align-center { display: block; margin-left: auto; margin-right: auto; } .align-left { text-align: left; } .align-center { text-align: center; } .align-right { text-align: right; } /* -- sidebars -------------------------------------------------------------- */ div.sidebar { margin: 0 0 0.5em 1em; border: 1px solid #ddb; padding: 7px 7px 0 7px; background-color: #ffe; width: 40%; float: right; } p.sidebar-title { font-weight: bold; } /* -- topics ---------------------------------------------------------------- */ div.topic { border: 1px solid #ccc; padding: 7px 7px 0 7px; margin: 10px 0 10px 0; } p.topic-title { font-size: 1.1em; font-weight: bold; margin-top: 10px; } /* -- admonitions ----------------------------------------------------------- */ div.admonition { margin-top: 10px; margin-bottom: 10px; padding: 7px; } div.admonition dt { font-weight: bold; } div.admonition dl { margin-bottom: 0; } p.admonition-title { margin: 0px 10px 5px 0px; font-weight: bold; } div.body p.centered { text-align: center; margin-top: 25px; } /* -- tables ---------------------------------------------------------------- */ table.docutils { border: 0; border-collapse: collapse; } table.docutils td, table.docutils th { padding: 1px 8px 1px 5px; border-top: 0; border-left: 0; border-right: 0; border-bottom: 1px solid #aaa; } table.field-list td, table.field-list th { border: 0 !important; } table.footnote td, table.footnote th { border: 0 !important; } th { text-align: left; padding-right: 5px; } table.citation { border-left: solid 1px gray; margin-left: 1px; } table.citation td { border-bottom: none; } /* -- other body styles ----------------------------------------------------- */ ol.arabic { list-style: decimal; } ol.loweralpha { list-style: lower-alpha; } ol.upperalpha { list-style: upper-alpha; } ol.lowerroman { list-style: lower-roman; } ol.upperroman { list-style: upper-roman; } dl { margin-bottom: 15px; } dd p { margin-top: 0px; } dd ul, dd table { margin-bottom: 10px; } dd { margin-top: 3px; margin-bottom: 10px; margin-left: 30px; } dt:target, .highlighted { background-color: #fbe54e; } dl.glossary dt { font-weight: bold; font-size: 1.1em; } .field-list ul { margin: 0; padding-left: 1em; } .field-list p { margin: 0; } .refcount { color: #060; } .optional { font-size: 1.3em; } .versionmodified { font-style: italic; } .system-message { background-color: #fda; padding: 5px; border: 3px solid red; } .footnote:target { background-color: #ffa; } .line-block { display: block; margin-top: 1em; margin-bottom: 1em; } .line-block .line-block { margin-top: 0; margin-bottom: 0; margin-left: 1.5em; } .guilabel, .menuselection { font-family: sans-serif; } .accelerator { text-decoration: underline; } .classifier { font-style: oblique; } abbr, acronym { border-bottom: dotted 1px; cursor: help; } /* -- code displays --------------------------------------------------------- */ pre { overflow: auto; overflow-y: hidden; /* fixes display issues on Chrome browsers */ } td.linenos pre { padding: 5px 0px; border: 0; background-color: transparent; color: #aaa; } table.highlighttable { margin-left: 0.5em; } table.highlighttable td { padding: 0 0.5em 0 0.5em; } tt.descname { background-color: transparent; font-weight: bold; font-size: 1.2em; } tt.descclassname { background-color: transparent; } tt.xref, a tt { background-color: transparent; font-weight: bold; } h1 tt, h2 tt, h3 tt, h4 tt, h5 tt, h6 tt { background-color: transparent; } .viewcode-link { float: right; } .viewcode-back { float: right; font-family: sans-serif; } div.viewcode-block:target { margin: -1px -10px; padding: 0 10px; } /* -- math display ---------------------------------------------------------- */ img.math { vertical-align: middle; } div.body div.math p { text-align: center; } span.eqno { float: right; } /* -- printout stylesheet --------------------------------------------------- */ @media print { div.document, div.documentwrapper, div.bodywrapper { margin: 0 !important; width: 100%; } div.sphinxsidebar, div.related, div.footer, #top-link { display: none; } }SQLAlchemy-0.8.4/doc/_static/comment-bright.png0000644000076500000240000000665412167630573022076 0ustar classicstaff00000000000000PNG  IHDRa OiCCPPhotoshop ICC profilexڝSgTS=BKKoR RB&*! J!QEEȠQ, !{kּ> H3Q5 B.@ $pd!s#~<<+"x M0B\t8K@zB@F&S`cbP-`'{[! eDh;VEX0fK9-0IWfH  0Q){`##xFW<+*x<$9E[-qWW.(I+6aa@.y24x6_-"bbϫp@t~,/;m%h^ uf@Wp~<5j>{-]cK'Xto(hw?G%fIq^D$.Tʳ?D*A, `6B$BB dr`)B(Ͱ*`/@4Qhp.U=pa( Aa!ڈbX#!H$ ɈQ"K5H1RT UH=r9\F;2G1Q= C7F dt1r=6Ыhڏ>C03l0.B8, c˱" VcϱwE 6wB aAHXLXNH $4 7 Q'"K&b21XH,#/{C7$C2'ITFnR#,4H#dk9, +ȅ3![ b@qS(RjJ4e2AURݨT5ZBRQ4u9̓IKhhitݕNWGw Ljg(gwLӋT071oUX**| J&*/Tު UUT^S}FU3S ԖUPSSg;goT?~YYLOCQ_ cx,!k u5&|v*=9C3J3WRf?qtN (~))4L1e\kXHQG6EYAJ'\'GgSSݧ M=:.kDwn^Loy}/TmG X $ <5qo</QC]@Caaᄑ.ȽJtq]zۯ6iܟ4)Y3sCQ? 0k߬~OCOg#/c/Wװwa>>r><72Y_7ȷOo_C#dz%gA[z|!?:eAAA!h쐭!ΑiP~aa~ 'W?pX15wCsDDDޛg1O9-J5*>.j<74?.fYXXIlK9.*6nl {/]py.,:@LN8A*%w% yg"/6шC\*NH*Mz쑼5y$3,幄'L Lݛ:v m2=:1qB!Mggfvˬen/kY- BTZ(*geWf͉9+̳ې7ᒶKW-X潬j9(xoʿܔĹdff-[n ڴ VE/(ۻCɾUUMfeI?m]Nmq#׹=TR+Gw- 6 U#pDy  :v{vg/jBFS[b[O>zG499?rCd&ˮ/~јѡ򗓿m|x31^VwwO| (hSЧc3-bKGD pHYs  tIME 6 B\<IDAT8˅Kh]es1mA`jh[-E(FEaA!bIȐ*BX"؁4)NURZ!Mhjssm؋^-\gg ]o|Ҭ[346>zd ]#8Oݺt{5uIXN!I=@Vf=v1}e>;fvnvxaHrʪJF`D¹WZ]S%S)WAb |0K=So7D~\~q-˟\aMZ,S'*} F`Nnz674U H3Q5 B.@ $pd!s#~<<+"x M0B\t8K@zB@F&S`cbP-`'{[! eDh;VEX0fK9-0IWfH  0Q){`##xFW<+*x<$9E[-qWW.(I+6aa@.y24x6_-"bbϫp@t~,/;m%h^ uf@Wp~<5j>{-]cK'Xto(hw?G%fIq^D$.Tʳ?D*A, `6B$BB dr`)B(Ͱ*`/@4Qhp.U=pa( Aa!ڈbX#!H$ ɈQ"K5H1RT UH=r9\F;2G1Q= C7F dt1r=6Ыhڏ>C03l0.B8, c˱" VcϱwE 6wB aAHXLXNH $4 7 Q'"K&b21XH,#/{C7$C2'ITFnR#,4H#dk9, +ȅ3![ b@qS(RjJ4e2AURݨT5ZBRQ4u9̓IKhhitݕNWGw Ljg(gwLӋT071oUX**| J&*/Tު UUT^S}FU3S ԖUPSSg;goT?~YYLOCQ_ cx,!k u5&|v*=9C3J3WRf?qtN (~))4L1e\kXHQG6EYAJ'\'GgSSݧ M=:.kDwn^Loy}/TmG X $ <5qo</QC]@Caaᄑ.ȽJtq]zۯ6iܟ4)Y3sCQ? 0k߬~OCOg#/c/Wװwa>>r><72Y_7ȷOo_C#dz%gA[z|!?:eAAA!h쐭!ΑiP~aa~ 'W?pX15wCsDDDޛg1O9-J5*>.j<74?.fYXXIlK9.*6nl {/]py.,:@LN8A*%w% yg"/6шC\*NH*Mz쑼5y$3,幄'L Lݛ:v m2=:1qB!Mggfvˬen/kY- BTZ(*geWf͉9+̳ې7ᒶKW-X潬j9(xoʿܔĹdff-[n ڴ VE/(ۻCɾUUMfeI?m]Nmq#׹=TR+Gw- 6 U#pDy  :v{vg/jBFS[b[O>zG499?rCd&ˮ/~јѡ򗓿m|x31^VwwO| (hSЧc3-bKGD pHYs  tIME!,IDAT8e_Hu?}s3y˕U2MvQ֊FE.łĊbE$DDZF5b@Q":2{n.s<_ y?mwV@tR`}Z _# _=_@ w^R%6gC-έ(K>| ${} H3Q5 B.@ $pd!s#~<<+"x M0B\t8K@zB@F&S`cbP-`'{[! eDh;VEX0fK9-0IWfH  0Q){`##xFW<+*x<$9E[-qWW.(I+6aa@.y24x6_-"bbϫp@t~,/;m%h^ uf@Wp~<5j>{-]cK'Xto(hw?G%fIq^D$.Tʳ?D*A, `6B$BB dr`)B(Ͱ*`/@4Qhp.U=pa( Aa!ڈbX#!H$ ɈQ"K5H1RT UH=r9\F;2G1Q= C7F dt1r=6Ыhڏ>C03l0.B8, c˱" VcϱwE 6wB aAHXLXNH $4 7 Q'"K&b21XH,#/{C7$C2'ITFnR#,4H#dk9, +ȅ3![ b@qS(RjJ4e2AURݨT5ZBRQ4u9̓IKhhitݕNWGw Ljg(gwLӋT071oUX**| J&*/Tު UUT^S}FU3S ԖUPSSg;goT?~YYLOCQ_ cx,!k u5&|v*=9C3J3WRf?qtN (~))4L1e\kXHQG6EYAJ'\'GgSSݧ M=:.kDwn^Loy}/TmG X $ <5qo</QC]@Caaᄑ.ȽJtq]zۯ6iܟ4)Y3sCQ? 0k߬~OCOg#/c/Wװwa>>r><72Y_7ȷOo_C#dz%gA[z|!?:eAAA!h쐭!ΑiP~aa~ 'W?pX15wCsDDDޛg1O9-J5*>.j<74?.fYXXIlK9.*6nl {/]py.,:@LN8A*%w% yg"/6шC\*NH*Mz쑼5y$3,幄'L Lݛ:v m2=:1qB!Mggfvˬen/kY- BTZ(*geWf͉9+̳ې7ᒶKW-X潬j9(xoʿܔĹdff-[n ڴ VE/(ۻCɾUUMfeI?m]Nmq#׹=TR+Gw- 6 U#pDy  :v{vg/jBFS[b[O>zG499?rCd&ˮ/~јѡ򗓿m|x31^VwwO| (hSЧc3-bKGD pHYs  tIME 1;VIDAT8ukU?sg4h`G1 RQܸp%Bn"bЍXJ .4V iZ##T;m!4bP~7r>ιbwc;m;oӍAΆ ζZ^/|s{;yR=9(rtVoG1w#_ө{*E&!(LVuoᲵ‘D PG4 :&~*ݳreu: S-,U^E&JY[P!RB ŖޞʖR@_ȐdBfNvHf"2T]R j'B1ddAak/DIJD D2H&L`&L $Ex,6|~_\P $MH`I=@Z||ttvgcЕWTZ'3rje"ܵx9W> mb|byfFRx{w%DZC$wdցHmWnta(M<~;9]C/_;Տ#}o`zSڷ_>:;x컓?yݩ|}~wam-/7=0S5RP"*֯ IENDB`SQLAlchemy-0.8.4/doc/_static/default.css0000644000076500000240000000771012251147504020570 0ustar classicstaff00000000000000/* * default.css_t * ~~~~~~~~~~~~~ * * Sphinx stylesheet -- default theme. * * :copyright: Copyright 2007-2013 by the Sphinx team, see AUTHORS. * :license: BSD, see LICENSE for details. * */ @import url("basic.css"); /* -- page layout ----------------------------------------------------------- */ body { font-family: sans-serif; font-size: 100%; background-color: #11303d; color: #000; margin: 0; padding: 0; } div.document { background-color: #1c4e63; } div.documentwrapper { float: left; width: 100%; } div.bodywrapper { margin: 0 0 0 230px; } div.body { background-color: #ffffff; color: #000000; padding: 0 20px 30px 20px; } div.footer { color: #ffffff; width: 100%; padding: 9px 0 9px 0; text-align: center; font-size: 75%; } div.footer a { color: #ffffff; text-decoration: underline; } div.related { background-color: #133f52; line-height: 30px; color: #ffffff; } div.related a { color: #ffffff; } div.sphinxsidebar { } div.sphinxsidebar h3 { font-family: 'Trebuchet MS', sans-serif; color: #ffffff; font-size: 1.4em; font-weight: normal; margin: 0; padding: 0; } div.sphinxsidebar h3 a { color: #ffffff; } div.sphinxsidebar h4 { font-family: 'Trebuchet MS', sans-serif; color: #ffffff; font-size: 1.3em; font-weight: normal; margin: 5px 0 0 0; padding: 0; } div.sphinxsidebar p { color: #ffffff; } div.sphinxsidebar p.topless { margin: 5px 10px 10px 10px; } div.sphinxsidebar ul { margin: 10px; padding: 0; color: #ffffff; } div.sphinxsidebar a { color: #98dbcc; } div.sphinxsidebar input { border: 1px solid #98dbcc; font-family: sans-serif; font-size: 1em; } /* -- hyperlink styles ------------------------------------------------------ */ a { color: #355f7c; text-decoration: none; } a:visited { color: #355f7c; text-decoration: none; } a:hover { text-decoration: underline; } /* -- body styles ----------------------------------------------------------- */ div.body h1, div.body h2, div.body h3, div.body h4, div.body h5, div.body h6 { font-family: 'Trebuchet MS', sans-serif; background-color: #f2f2f2; font-weight: normal; color: #20435c; border-bottom: 1px solid #ccc; margin: 20px -20px 10px -20px; padding: 3px 0 3px 10px; } div.body h1 { margin-top: 0; font-size: 200%; } div.body h2 { font-size: 160%; } div.body h3 { font-size: 140%; } div.body h4 { font-size: 120%; } div.body h5 { font-size: 110%; } div.body h6 { font-size: 100%; } a.headerlink { color: #c60f0f; font-size: 0.8em; padding: 0 4px 0 4px; text-decoration: none; } a.headerlink:hover { background-color: #c60f0f; color: white; } div.body p, div.body dd, div.body li { text-align: justify; line-height: 130%; } div.admonition p.admonition-title + p { display: inline; } div.admonition p { margin-bottom: 5px; } div.admonition pre { margin-bottom: 5px; } div.admonition ul, div.admonition ol { margin-bottom: 5px; } div.note { background-color: #eee; border: 1px solid #ccc; } div.seealso { background-color: #ffc; border: 1px solid #ff6; } div.topic { background-color: #eee; } div.warning { background-color: #ffe4e4; border: 1px solid #f66; } p.admonition-title { display: inline; } p.admonition-title:after { content: ":"; } pre { padding: 5px; background-color: #eeffcc; color: #333333; line-height: 120%; border: 1px solid #ac9; border-left: none; border-right: none; } tt { background-color: #ecf0f3; padding: 0 1px 0 1px; font-size: 0.95em; } th { background-color: #ede; } .warning tt { background: #efc2c2; } .note tt { background: #d6d6d6; } .viewcode-back { font-family: sans-serif; } div.viewcode-block:target { background-color: #f4debf; border-top: 1px solid #ac9; border-bottom: 1px solid #ac9; }SQLAlchemy-0.8.4/doc/_static/docs.css0000644000076500000240000001764112251147171020100 0ustar classicstaff00000000000000/* global */ body { background-color: #FDFBFC; margin:38px; color:#333333; } a { font-weight:normal; text-decoration:none; } form { display:inline; } /* hyperlinks */ a:link, a:visited, a:active { /*color:#0000FF;*/ color: #990000; } a:hover { color: #FF0000; /*color:#700000;*/ text-decoration:underline; } /* paragraph links after sections. These aren't visible until hovering over the tag, then have a "reverse video" effect over the actual link */ a.headerlink { font-size: 0.8em; padding: 0 4px 0 4px; text-decoration: none; visibility: hidden; } h1:hover > a.headerlink, h2:hover > a.headerlink, h3:hover > a.headerlink, h4:hover > a.headerlink, h5:hover > a.headerlink, h6:hover > a.headerlink, dt:hover > a.headerlink { visibility: visible; } a.headerlink:hover { background-color: #990000; color: white; } /* Container setup */ #docs-container { max-width:1000px; } /* header/footer elements */ #docs-header h1 { font-size:20px; color: #222222; margin: 0; padding: 0; } #docs-header { font-family:Verdana,sans-serif; font-size:.9em; } #docs-top-navigation, #docs-bottom-navigation { font-family: Verdana, sans-serif; background-color: #FBFBEE; border: solid 1px #CCC; padding:10px; font-size:.8em; } #docs-top-navigation { margin:10px 0px 10px 0px; line-height:1.2em; } .docs-navigation-links { font-family:Verdana,sans-serif; } #docs-bottom-navigation { float:right; margin: 1em 0 1em 5px; } #docs-copyright { font-size:.85em; padding:5px 0px; } #docs-header h1, #docs-top-navigation h1, #docs-top-navigation h2 { font-family:Tahoma,Geneva,sans-serif; font-weight:normal; } #docs-top-navigation h2 { margin:16px 4px 7px 5px; font-size:1.6em; } #docs-search { float:right; } #docs-top-page-control { float:right; width:350px; } #docs-top-page-control ul { padding:0; margin:0; } #docs-top-page-control li { font-size:.9em; list-style-type:none; padding:1px 8px; } #docs-container .version-num { font-weight: bold; } /* content container, sidebar */ #docs-body-container { background-color:#EFEFEF; border: solid 1px #CCC; } #docs-body, #docs-sidebar { /*font-family: helvetica, arial, sans-serif; font-size:.9em;*/ font-family: Verdana, sans-serif; font-size:.85em; line-height:1.5em; } #docs-body { min-height: 700px; } #docs-sidebar > ul { font-size:.85em; } #docs-sidebar { float:left; width:212px; padding: 10px 0 0 15px; font-size:.85em; } #docs-sidebar h3, #docs-sidebar h4 { background-color: #DDDDDD; color: #222222; font-family: Verdana,sans-serif; font-size: 1.1em; font-weight: normal; margin: 10px 0 0 -15px; padding: 5px 10px 5px 10px; text-shadow: 1px 1px 0 white; width:210px; } #docs-sidebar h3 a, #docs-sidebar h4 a { color: #222222; } #docs-sidebar ul { margin: 10px 10px 10px 0px; padding: 0; list-style: none outside none; } #docs-sidebar ul ul { margin-bottom: 0; margin-top: 0; list-style: square outside none; margin-left: 20px; } #docs-body { background-color:#FFFFFF; padding:1px 10px 10px 10px; } #docs-body.withsidebar { margin: 0 0 0 230px; border-left:3px solid #DFDFDF; } #docs-body h1, #docs-body h2, #docs-body h3, #docs-body h4 { font-family:Helvetica, Arial, sans-serif; } #docs-body h1 { /* hide the

for each content section. */ display:none; font-size:2.0em; } #docs-body h2 { font-size:1.8em; border-top:1px solid; /*border-bottom:1px solid;*/ padding-top:20px; } #sqlalchemy-documentation h2 { border-top:none; padding-top:0; } #docs-body h3 { font-size:1.4em; } /* SQL popup, code styles */ .highlight { background:none; } #docs-container pre { font-size:1.2em; } #docs-container .pre { font-size:1.1em; } #docs-container pre { background-color: #f0f0f0; border: solid 1px #ccc; box-shadow: 2px 2px 3px #DFDFDF; padding:10px; margin: 5px 0px 5px 0px; overflow:auto; line-height:1.3em; } .popup_sql, .show_sql { background-color: #FBFBEE; padding:5px 10px; margin:10px -5px; border:1px dashed; } /* the [SQL] links used to display SQL */ #docs-container .sql_link { font-weight:normal; font-family: arial, sans-serif; font-size:.9em; text-transform: uppercase; color:#990000; border:1px solid; padding:1px 2px 1px 2px; margin:0px 10px 0px 15px; float:right; line-height:1.2em; } #docs-container a.sql_link, #docs-container .sql_link { text-decoration: none; padding:1px 2px; } #docs-container a.sql_link:hover { text-decoration: none; color:#fff; border:1px solid #900; background-color: #900; } /* changeset stuff */ #docs-container a.changeset-link { font-size: 0.8em; padding: 0 4px 0 4px; text-decoration: none; } /* docutils-specific elements */ th.field-name { text-align:right; } div.note, div.warning, p.deprecated, div.topic, div.admonition { background-color:#EEFFEF; } div.faq { background-color: #EFEFEF; } div.faq ul { list-style: square outside none; } div.admonition, div.topic, .deprecated, .versionadded, .versionchanged { border:1px solid #CCCCCC; padding:5px 10px; font-size:.9em; margin-top:5px; box-shadow: 2px 2px 3px #DFDFDF; } /* grrr sphinx changing your document structures, removing classes.... */ .versionadded .versionmodified, .versionchanged .versionmodified, .deprecated .versionmodified, .versionadded > p:first-child > span:first-child, .versionchanged > p:first-child > span:first-child, .deprecated > p:first-child > span:first-child { background-color: #ECF0F3; color: #990000; font-style: italic; } div.inherited-member { border:1px solid #CCCCCC; padding:5px 5px; font-size:.9em; box-shadow: 2px 2px 3px #DFDFDF; } div.warning .admonition-title { color:#FF0000; } div.admonition .admonition-title, div.topic .topic-title { font-weight:bold; } .viewcode-back, .viewcode-link { float:right; } dl.function > dt, dl.attribute > dt, dl.classmethod > dt, dl.method > dt, dl.class > dt, dl.exception > dt { background-color:#F0F0F0; margin:25px -10px 10px 10px; padding: 0px 10px; } dl.glossary > dt { font-weight:bold; font-size:1.1em; padding-top:10px; } dt:target, span.highlight { background-color:#FBE54E; } a.headerlink { font-size: 0.8em; padding: 0 4px 0 4px; text-decoration: none; visibility: hidden; } h1:hover > a.headerlink, h2:hover > a.headerlink, h3:hover > a.headerlink, h4:hover > a.headerlink, h5:hover > a.headerlink, h6:hover > a.headerlink, dt:hover > a.headerlink { visibility: visible; } a.headerlink:hover { background-color: #00f; color: white; } .clearboth { clear:both; } tt.descname { background-color:transparent; font-size:1.2em; font-weight:bold; } tt.descclassname { background-color:transparent; } tt { background-color:#ECF0F3; padding:0 1px; } /* syntax highlighting overrides */ .k, .kn {color:#0908CE;} .o {color:#BF0005;} .go {color:#804049;} /* special "index page" sections with specific formatting */ div#sqlalchemy-documentation { font-size:.95em; } div#sqlalchemy-documentation em { font-style:normal; } div#sqlalchemy-documentation .rubric{ font-size:14px; background-color:#EEFFEF; padding:5px; border:1px solid #BFBFBF; } div#sqlalchemy-documentation a, div#sqlalchemy-documentation li { padding:5px 0px; } div#getting-started { border-bottom:1px solid; } div#sqlalchemy-documentation div#sqlalchemy-orm { float:left; width:48%; } div#sqlalchemy-documentation div#sqlalchemy-core { float:left; width:48%; margin:0; padding-left:10px; border-left:1px solid; } div#dialect-documentation { border-top:1px solid; /*clear:left;*/ } div .versionwarning, div .version-warning { font-size:12px; font-color:red; border:1px solid; padding:4px 4px; margin:8px 0px 2px 0px; background:#FFBBBB; } SQLAlchemy-0.8.4/doc/_static/doctools.js0000644000076500000240000001473412167630573020633 0ustar classicstaff00000000000000/* * doctools.js * ~~~~~~~~~~~ * * Sphinx JavaScript utilities for all documentation. * * :copyright: Copyright 2007-2013 by the Sphinx team, see AUTHORS. * :license: BSD, see LICENSE for details. * */ /** * select a different prefix for underscore */ $u = _.noConflict(); /** * make the code below compatible with browsers without * an installed firebug like debugger if (!window.console || !console.firebug) { var names = ["log", "debug", "info", "warn", "error", "assert", "dir", "dirxml", "group", "groupEnd", "time", "timeEnd", "count", "trace", "profile", "profileEnd"]; window.console = {}; for (var i = 0; i < names.length; ++i) window.console[names[i]] = function() {}; } */ /** * small helper function to urldecode strings */ jQuery.urldecode = function(x) { return decodeURIComponent(x).replace(/\+/g, ' '); }; /** * small helper function to urlencode strings */ jQuery.urlencode = encodeURIComponent; /** * This function returns the parsed url parameters of the * current request. Multiple values per key are supported, * it will always return arrays of strings for the value parts. */ jQuery.getQueryParameters = function(s) { if (typeof s == 'undefined') s = document.location.search; var parts = s.substr(s.indexOf('?') + 1).split('&'); var result = {}; for (var i = 0; i < parts.length; i++) { var tmp = parts[i].split('=', 2); var key = jQuery.urldecode(tmp[0]); var value = jQuery.urldecode(tmp[1]); if (key in result) result[key].push(value); else result[key] = [value]; } return result; }; /** * highlight a given string on a jquery object by wrapping it in * span elements with the given class name. */ jQuery.fn.highlightText = function(text, className) { function highlight(node) { if (node.nodeType == 3) { var val = node.nodeValue; var pos = val.toLowerCase().indexOf(text); if (pos >= 0 && !jQuery(node.parentNode).hasClass(className)) { var span = document.createElement("span"); span.className = className; span.appendChild(document.createTextNode(val.substr(pos, text.length))); node.parentNode.insertBefore(span, node.parentNode.insertBefore( document.createTextNode(val.substr(pos + text.length)), node.nextSibling)); node.nodeValue = val.substr(0, pos); } } else if (!jQuery(node).is("button, select, textarea")) { jQuery.each(node.childNodes, function() { highlight(this); }); } } return this.each(function() { highlight(this); }); }; /** * Small JavaScript module for the documentation. */ var Documentation = { init : function() { this.fixFirefoxAnchorBug(); this.highlightSearchWords(); this.initIndexTable(); }, /** * i18n support */ TRANSLATIONS : {}, PLURAL_EXPR : function(n) { return n == 1 ? 0 : 1; }, LOCALE : 'unknown', // gettext and ngettext don't access this so that the functions // can safely bound to a different name (_ = Documentation.gettext) gettext : function(string) { var translated = Documentation.TRANSLATIONS[string]; if (typeof translated == 'undefined') return string; return (typeof translated == 'string') ? translated : translated[0]; }, ngettext : function(singular, plural, n) { var translated = Documentation.TRANSLATIONS[singular]; if (typeof translated == 'undefined') return (n == 1) ? singular : plural; return translated[Documentation.PLURALEXPR(n)]; }, addTranslations : function(catalog) { for (var key in catalog.messages) this.TRANSLATIONS[key] = catalog.messages[key]; this.PLURAL_EXPR = new Function('n', 'return +(' + catalog.plural_expr + ')'); this.LOCALE = catalog.locale; }, /** * add context elements like header anchor links */ addContextElements : function() { $('div[id] > :header:first').each(function() { $('\u00B6'). attr('href', '#' + this.id). attr('title', _('Permalink to this headline')). appendTo(this); }); $('dt[id]').each(function() { $('\u00B6'). attr('href', '#' + this.id). attr('title', _('Permalink to this definition')). appendTo(this); }); }, /** * workaround a firefox stupidity */ fixFirefoxAnchorBug : function() { if (document.location.hash && $.browser.mozilla) window.setTimeout(function() { document.location.href += ''; }, 10); }, /** * highlight the search words provided in the url in the text */ highlightSearchWords : function() { var params = $.getQueryParameters(); var terms = (params.highlight) ? params.highlight[0].split(/\s+/) : []; if (terms.length) { var body = $('div.body'); window.setTimeout(function() { $.each(terms, function() { body.highlightText(this.toLowerCase(), 'highlighted'); }); }, 10); $('') .appendTo($('#searchbox')); } }, /** * init the domain index toggle buttons */ initIndexTable : function() { var togglers = $('img.toggler').click(function() { var src = $(this).attr('src'); var idnum = $(this).attr('id').substr(7); $('tr.cg-' + idnum).toggle(); if (src.substr(-9) == 'minus.png') $(this).attr('src', src.substr(0, src.length-9) + 'plus.png'); else $(this).attr('src', src.substr(0, src.length-8) + 'minus.png'); }).css('display', ''); if (DOCUMENTATION_OPTIONS.COLLAPSE_INDEX) { togglers.click(); } }, /** * helper function to hide the search marks again */ hideSearchWords : function() { $('#searchbox .highlight-link').fadeOut(300); $('span.highlighted').removeClass('highlighted'); }, /** * make the url absolute */ makeURL : function(relativeURL) { return DOCUMENTATION_OPTIONS.URL_ROOT + '/' + relativeURL; }, /** * get the current relative url */ getCurrentURL : function() { var path = document.location.pathname; var parts = path.split(/\//); $.each(DOCUMENTATION_OPTIONS.URL_ROOT.split(/\//), function() { if (this == '..') parts.pop(); }); var url = parts.join('/'); return path.substring(url.lastIndexOf('/') + 1, path.length - 1); } }; // quick alias for translations _ = Documentation.gettext; $(document).ready(function() { Documentation.init(); }); SQLAlchemy-0.8.4/doc/_static/down-pressed.png0000644000076500000240000000056012167630573021557 0ustar classicstaff00000000000000PNG  IHDRasRGBbKGDC pHYs B(xtIME -vF#IDAT8!OAJ, ++@I vbÿ@W7F HN#48646TMvv޼7Dsax1U q;< E-f)j%po4xF78G>)- EYm4%7YTk-Qa"NWAo-yeq,) Ypt\hqmszG]Nar߶s^l vh\2%0EeRvIENDB`SQLAlchemy-0.8.4/doc/_static/down.png0000644000076500000240000000055312167630573020116 0ustar classicstaff00000000000000PNG  IHDRasRGBbKGDC pHYs B(xtIME"U{IDAT8ҡNCAJ, ++@4>/U^,~T&3M^^^PM6ٹs*RJa)eG*W<"F Fg78G>q OIp:sAj5GنyD^+yU:p_%G@D|aOs(yM,"msx:.b@D|`Vٟ۲иeKſ/G!IENDB`SQLAlchemy-0.8.4/doc/_static/file.png0000644000076500000240000000061012167630573020060 0ustar classicstaff00000000000000PNG  IHDRabKGD pHYs  tIME  )TIDAT8˭J@Ir('[ "&xYZ X0!i|_@tD] #xjv YNaEi(əy@D&`6PZk$)5%"z.NA#Aba`Vs_3c,2mj [klvy|!Iմy;v "߮a?A7`c^nk?Bg}TЙD# "RD1yER*6MJ3K_Ut8F~IENDB`SQLAlchemy-0.8.4/doc/_static/init.js0000644000076500000240000000036412251147171017731 0ustar classicstaff00000000000000 function initSQLPopups() { $('div.popup_sql').hide(); $('a.sql_link').click(function() { $(this).nextAll('div.popup_sql:first').toggle(); return false; }) } $(document).ready(function() { initSQLPopups(); }); SQLAlchemy-0.8.4/doc/_static/jquery.js0000644000076500000240000026725412167630573020333 0ustar classicstaff00000000000000/*! jQuery v1.7.1 jquery.com | jquery.org/license */ (function(a,b){function cy(a){return f.isWindow(a)?a:a.nodeType===9?a.defaultView||a.parentWindow:!1}function cv(a){if(!ck[a]){var b=c.body,d=f("<"+a+">").appendTo(b),e=d.css("display");d.remove();if(e==="none"||e===""){cl||(cl=c.createElement("iframe"),cl.frameBorder=cl.width=cl.height=0),b.appendChild(cl);if(!cm||!cl.createElement)cm=(cl.contentWindow||cl.contentDocument).document,cm.write((c.compatMode==="CSS1Compat"?"":"")+""),cm.close();d=cm.createElement(a),cm.body.appendChild(d),e=f.css(d,"display"),b.removeChild(cl)}ck[a]=e}return ck[a]}function cu(a,b){var c={};f.each(cq.concat.apply([],cq.slice(0,b)),function(){c[this]=a});return c}function ct(){cr=b}function cs(){setTimeout(ct,0);return cr=f.now()}function cj(){try{return new a.ActiveXObject("Microsoft.XMLHTTP")}catch(b){}}function ci(){try{return new a.XMLHttpRequest}catch(b){}}function cc(a,c){a.dataFilter&&(c=a.dataFilter(c,a.dataType));var d=a.dataTypes,e={},g,h,i=d.length,j,k=d[0],l,m,n,o,p;for(g=1;g0){if(c!=="border")for(;g=0===c})}function S(a){return!a||!a.parentNode||a.parentNode.nodeType===11}function K(){return!0}function J(){return!1}function n(a,b,c){var d=b+"defer",e=b+"queue",g=b+"mark",h=f._data(a,d);h&&(c==="queue"||!f._data(a,e))&&(c==="mark"||!f._data(a,g))&&setTimeout(function(){!f._data(a,e)&&!f._data(a,g)&&(f.removeData(a,d,!0),h.fire())},0)}function m(a){for(var b in a){if(b==="data"&&f.isEmptyObject(a[b]))continue;if(b!=="toJSON")return!1}return!0}function l(a,c,d){if(d===b&&a.nodeType===1){var e="data-"+c.replace(k,"-$1").toLowerCase();d=a.getAttribute(e);if(typeof d=="string"){try{d=d==="true"?!0:d==="false"?!1:d==="null"?null:f.isNumeric(d)?parseFloat(d):j.test(d)?f.parseJSON(d):d}catch(g){}f.data(a,c,d)}else d=b}return d}function h(a){var b=g[a]={},c,d;a=a.split(/\s+/);for(c=0,d=a.length;c)[^>]*$|#([\w\-]*)$)/,j=/\S/,k=/^\s+/,l=/\s+$/,m=/^<(\w+)\s*\/?>(?:<\/\1>)?$/,n=/^[\],:{}\s]*$/,o=/\\(?:["\\\/bfnrt]|u[0-9a-fA-F]{4})/g,p=/"[^"\\\n\r]*"|true|false|null|-?\d+(?:\.\d*)?(?:[eE][+\-]?\d+)?/g,q=/(?:^|:|,)(?:\s*\[)+/g,r=/(webkit)[ \/]([\w.]+)/,s=/(opera)(?:.*version)?[ \/]([\w.]+)/,t=/(msie) ([\w.]+)/,u=/(mozilla)(?:.*? rv:([\w.]+))?/,v=/-([a-z]|[0-9])/ig,w=/^-ms-/,x=function(a,b){return(b+"").toUpperCase()},y=d.userAgent,z,A,B,C=Object.prototype.toString,D=Object.prototype.hasOwnProperty,E=Array.prototype.push,F=Array.prototype.slice,G=String.prototype.trim,H=Array.prototype.indexOf,I={};e.fn=e.prototype={constructor:e,init:function(a,d,f){var g,h,j,k;if(!a)return this;if(a.nodeType){this.context=this[0]=a,this.length=1;return this}if(a==="body"&&!d&&c.body){this.context=c,this[0]=c.body,this.selector=a,this.length=1;return this}if(typeof a=="string"){a.charAt(0)!=="<"||a.charAt(a.length-1)!==">"||a.length<3?g=i.exec(a):g=[null,a,null];if(g&&(g[1]||!d)){if(g[1]){d=d instanceof e?d[0]:d,k=d?d.ownerDocument||d:c,j=m.exec(a),j?e.isPlainObject(d)?(a=[c.createElement(j[1])],e.fn.attr.call(a,d,!0)):a=[k.createElement(j[1])]:(j=e.buildFragment([g[1]],[k]),a=(j.cacheable?e.clone(j.fragment):j.fragment).childNodes);return e.merge(this,a)}h=c.getElementById(g[2]);if(h&&h.parentNode){if(h.id!==g[2])return f.find(a);this.length=1,this[0]=h}this.context=c,this.selector=a;return this}return!d||d.jquery?(d||f).find(a):this.constructor(d).find(a)}if(e.isFunction(a))return f.ready(a);a.selector!==b&&(this.selector=a.selector,this.context=a.context);return e.makeArray(a,this)},selector:"",jquery:"1.7.1",length:0,size:function(){return this.length},toArray:function(){return F.call(this,0)},get:function(a){return a==null?this.toArray():a<0?this[this.length+a]:this[a]},pushStack:function(a,b,c){var d=this.constructor();e.isArray(a)?E.apply(d,a):e.merge(d,a),d.prevObject=this,d.context=this.context,b==="find"?d.selector=this.selector+(this.selector?" ":"")+c:b&&(d.selector=this.selector+"."+b+"("+c+")");return d},each:function(a,b){return e.each(this,a,b)},ready:function(a){e.bindReady(),A.add(a);return this},eq:function(a){a=+a;return a===-1?this.slice(a):this.slice(a,a+1)},first:function(){return this.eq(0)},last:function(){return this.eq(-1)},slice:function(){return this.pushStack(F.apply(this,arguments),"slice",F.call(arguments).join(","))},map:function(a){return this.pushStack(e.map(this,function(b,c){return a.call(b,c,b)}))},end:function(){return this.prevObject||this.constructor(null)},push:E,sort:[].sort,splice:[].splice},e.fn.init.prototype=e.fn,e.extend=e.fn.extend=function(){var a,c,d,f,g,h,i=arguments[0]||{},j=1,k=arguments.length,l=!1;typeof i=="boolean"&&(l=i,i=arguments[1]||{},j=2),typeof i!="object"&&!e.isFunction(i)&&(i={}),k===j&&(i=this,--j);for(;j0)return;A.fireWith(c,[e]),e.fn.trigger&&e(c).trigger("ready").off("ready")}},bindReady:function(){if(!A){A=e.Callbacks("once memory");if(c.readyState==="complete")return setTimeout(e.ready,1);if(c.addEventListener)c.addEventListener("DOMContentLoaded",B,!1),a.addEventListener("load",e.ready,!1);else if(c.attachEvent){c.attachEvent("onreadystatechange",B),a.attachEvent("onload",e.ready);var b=!1;try{b=a.frameElement==null}catch(d){}c.documentElement.doScroll&&b&&J()}}},isFunction:function(a){return e.type(a)==="function"},isArray:Array.isArray||function(a){return e.type(a)==="array"},isWindow:function(a){return a&&typeof a=="object"&&"setInterval"in a},isNumeric:function(a){return!isNaN(parseFloat(a))&&isFinite(a)},type:function(a){return a==null?String(a):I[C.call(a)]||"object"},isPlainObject:function(a){if(!a||e.type(a)!=="object"||a.nodeType||e.isWindow(a))return!1;try{if(a.constructor&&!D.call(a,"constructor")&&!D.call(a.constructor.prototype,"isPrototypeOf"))return!1}catch(c){return!1}var d;for(d in a);return d===b||D.call(a,d)},isEmptyObject:function(a){for(var b in a)return!1;return!0},error:function(a){throw new Error(a)},parseJSON:function(b){if(typeof b!="string"||!b)return null;b=e.trim(b);if(a.JSON&&a.JSON.parse)return a.JSON.parse(b);if(n.test(b.replace(o,"@").replace(p,"]").replace(q,"")))return(new Function("return "+b))();e.error("Invalid JSON: "+b)},parseXML:function(c){var d,f;try{a.DOMParser?(f=new DOMParser,d=f.parseFromString(c,"text/xml")):(d=new ActiveXObject("Microsoft.XMLDOM"),d.async="false",d.loadXML(c))}catch(g){d=b}(!d||!d.documentElement||d.getElementsByTagName("parsererror").length)&&e.error("Invalid XML: "+c);return d},noop:function(){},globalEval:function(b){b&&j.test(b)&&(a.execScript||function(b){a.eval.call(a,b)})(b)},camelCase:function(a){return a.replace(w,"ms-").replace(v,x)},nodeName:function(a,b){return a.nodeName&&a.nodeName.toUpperCase()===b.toUpperCase()},each:function(a,c,d){var f,g=0,h=a.length,i=h===b||e.isFunction(a);if(d){if(i){for(f in a)if(c.apply(a[f],d)===!1)break}else for(;g0&&a[0]&&a[j-1]||j===0||e.isArray(a));if(k)for(;i1?i.call(arguments,0):b,j.notifyWith(k,e)}}function l(a){return function(c){b[a]=arguments.length>1?i.call(arguments,0):c,--g||j.resolveWith(j,b)}}var b=i.call(arguments,0),c=0,d=b.length,e=Array(d),g=d,h=d,j=d<=1&&a&&f.isFunction(a.promise)?a:f.Deferred(),k=j.promise();if(d>1){for(;c
a",d=q.getElementsByTagName("*"),e=q.getElementsByTagName("a")[0];if(!d||!d.length||!e)return{};g=c.createElement("select"),h=g.appendChild(c.createElement("option")),i=q.getElementsByTagName("input")[0],b={leadingWhitespace:q.firstChild.nodeType===3,tbody:!q.getElementsByTagName("tbody").length,htmlSerialize:!!q.getElementsByTagName("link").length,style:/top/.test(e.getAttribute("style")),hrefNormalized:e.getAttribute("href")==="/a",opacity:/^0.55/.test(e.style.opacity),cssFloat:!!e.style.cssFloat,checkOn:i.value==="on",optSelected:h.selected,getSetAttribute:q.className!=="t",enctype:!!c.createElement("form").enctype,html5Clone:c.createElement("nav").cloneNode(!0).outerHTML!=="<:nav>",submitBubbles:!0,changeBubbles:!0,focusinBubbles:!1,deleteExpando:!0,noCloneEvent:!0,inlineBlockNeedsLayout:!1,shrinkWrapBlocks:!1,reliableMarginRight:!0},i.checked=!0,b.noCloneChecked=i.cloneNode(!0).checked,g.disabled=!0,b.optDisabled=!h.disabled;try{delete q.test}catch(s){b.deleteExpando=!1}!q.addEventListener&&q.attachEvent&&q.fireEvent&&(q.attachEvent("onclick",function(){b.noCloneEvent=!1}),q.cloneNode(!0).fireEvent("onclick")),i=c.createElement("input"),i.value="t",i.setAttribute("type","radio"),b.radioValue=i.value==="t",i.setAttribute("checked","checked"),q.appendChild(i),k=c.createDocumentFragment(),k.appendChild(q.lastChild),b.checkClone=k.cloneNode(!0).cloneNode(!0).lastChild.checked,b.appendChecked=i.checked,k.removeChild(i),k.appendChild(q),q.innerHTML="",a.getComputedStyle&&(j=c.createElement("div"),j.style.width="0",j.style.marginRight="0",q.style.width="2px",q.appendChild(j),b.reliableMarginRight=(parseInt((a.getComputedStyle(j,null)||{marginRight:0}).marginRight,10)||0)===0);if(q.attachEvent)for(o in{submit:1,change:1,focusin:1})n="on"+o,p=n in q,p||(q.setAttribute(n,"return;"),p=typeof q[n]=="function"),b[o+"Bubbles"]=p;k.removeChild(q),k=g=h=j=q=i=null,f(function(){var a,d,e,g,h,i,j,k,m,n,o,r=c.getElementsByTagName("body")[0];!r||(j=1,k="position:absolute;top:0;left:0;width:1px;height:1px;margin:0;",m="visibility:hidden;border:0;",n="style='"+k+"border:5px solid #000;padding:0;'",o="
"+""+"
",a=c.createElement("div"),a.style.cssText=m+"width:0;height:0;position:static;top:0;margin-top:"+j+"px",r.insertBefore(a,r.firstChild),q=c.createElement("div"),a.appendChild(q),q.innerHTML="
t
",l=q.getElementsByTagName("td"),p=l[0].offsetHeight===0,l[0].style.display="",l[1].style.display="none",b.reliableHiddenOffsets=p&&l[0].offsetHeight===0,q.innerHTML="",q.style.width=q.style.paddingLeft="1px",f.boxModel=b.boxModel=q.offsetWidth===2,typeof q.style.zoom!="undefined"&&(q.style.display="inline",q.style.zoom=1,b.inlineBlockNeedsLayout=q.offsetWidth===2,q.style.display="",q.innerHTML="
",b.shrinkWrapBlocks=q.offsetWidth!==2),q.style.cssText=k+m,q.innerHTML=o,d=q.firstChild,e=d.firstChild,h=d.nextSibling.firstChild.firstChild,i={doesNotAddBorder:e.offsetTop!==5,doesAddBorderForTableAndCells:h.offsetTop===5},e.style.position="fixed",e.style.top="20px",i.fixedPosition=e.offsetTop===20||e.offsetTop===15,e.style.position=e.style.top="",d.style.overflow="hidden",d.style.position="relative",i.subtractsBorderForOverflowNotVisible=e.offsetTop===-5,i.doesNotIncludeMarginInBodyOffset=r.offsetTop!==j,r.removeChild(a),q=a=null,f.extend(b,i))});return b}();var j=/^(?:\{.*\}|\[.*\])$/,k=/([A-Z])/g;f.extend({cache:{},uuid:0,expando:"jQuery"+(f.fn.jquery+Math.random()).replace(/\D/g,""),noData:{embed:!0,object:"clsid:D27CDB6E-AE6D-11cf-96B8-444553540000",applet:!0},hasData:function(a){a=a.nodeType?f.cache[a[f.expando]]:a[f.expando];return!!a&&!m(a)},data:function(a,c,d,e){if(!!f.acceptData(a)){var g,h,i,j=f.expando,k=typeof c=="string",l=a.nodeType,m=l?f.cache:a,n=l?a[j]:a[j]&&j,o=c==="events";if((!n||!m[n]||!o&&!e&&!m[n].data)&&k&&d===b)return;n||(l?a[j]=n=++f.uuid:n=j),m[n]||(m[n]={},l||(m[n].toJSON=f.noop));if(typeof c=="object"||typeof c=="function")e?m[n]=f.extend(m[n],c):m[n].data=f.extend(m[n].data,c);g=h=m[n],e||(h.data||(h.data={}),h=h.data),d!==b&&(h[f.camelCase(c)]=d);if(o&&!h[c])return g.events;k?(i=h[c],i==null&&(i=h[f.camelCase(c)])):i=h;return i}},removeData:function(a,b,c){if(!!f.acceptData(a)){var d,e,g,h=f.expando,i=a.nodeType,j=i?f.cache:a,k=i?a[h]:h;if(!j[k])return;if(b){d=c?j[k]:j[k].data;if(d){f.isArray(b)||(b in d?b=[b]:(b=f.camelCase(b),b in d?b=[b]:b=b.split(" ")));for(e=0,g=b.length;e-1)return!0;return!1},val:function(a){var c,d,e,g=this[0];{if(!!arguments.length){e=f.isFunction(a);return this.each(function(d){var g=f(this),h;if(this.nodeType===1){e?h=a.call(this,d,g.val()):h=a,h==null?h="":typeof h=="number"?h+="":f.isArray(h)&&(h=f.map(h,function(a){return a==null?"":a+""})),c=f.valHooks[this.nodeName.toLowerCase()]||f.valHooks[this.type];if(!c||!("set"in c)||c.set(this,h,"value")===b)this.value=h}})}if(g){c=f.valHooks[g.nodeName.toLowerCase()]||f.valHooks[g.type];if(c&&"get"in c&&(d=c.get(g,"value"))!==b)return d;d=g.value;return typeof d=="string"?d.replace(q,""):d==null?"":d}}}}),f.extend({valHooks:{option:{get:function(a){var b=a.attributes.value;return!b||b.specified?a.value:a.text}},select:{get:function(a){var b,c,d,e,g=a.selectedIndex,h=[],i=a.options,j=a.type==="select-one";if(g<0)return null;c=j?g:0,d=j?g+1:i.length;for(;c=0}),c.length||(a.selectedIndex=-1);return c}}},attrFn:{val:!0,css:!0,html:!0,text:!0,data:!0,width:!0,height:!0,offset:!0},attr:function(a,c,d,e){var g,h,i,j=a.nodeType;if(!!a&&j!==3&&j!==8&&j!==2){if(e&&c in f.attrFn)return f(a)[c](d);if(typeof a.getAttribute=="undefined")return f.prop(a,c,d);i=j!==1||!f.isXMLDoc(a),i&&(c=c.toLowerCase(),h=f.attrHooks[c]||(u.test(c)?x:w));if(d!==b){if(d===null){f.removeAttr(a,c);return}if(h&&"set"in h&&i&&(g=h.set(a,d,c))!==b)return g;a.setAttribute(c,""+d);return d}if(h&&"get"in h&&i&&(g=h.get(a,c))!==null)return g;g=a.getAttribute(c);return g===null?b:g}},removeAttr:function(a,b){var c,d,e,g,h=0;if(b&&a.nodeType===1){d=b.toLowerCase().split(p),g=d.length;for(;h=0}})});var z=/^(?:textarea|input|select)$/i,A=/^([^\.]*)?(?:\.(.+))?$/,B=/\bhover(\.\S+)?\b/,C=/^key/,D=/^(?:mouse|contextmenu)|click/,E=/^(?:focusinfocus|focusoutblur)$/,F=/^(\w*)(?:#([\w\-]+))?(?:\.([\w\-]+))?$/,G=function(a){var b=F.exec(a);b&&(b[1]=(b[1]||"").toLowerCase(),b[3]=b[3]&&new RegExp("(?:^|\\s)"+b[3]+"(?:\\s|$)"));return b},H=function(a,b){var c=a.attributes||{};return(!b[1]||a.nodeName.toLowerCase()===b[1])&&(!b[2]||(c.id||{}).value===b[2])&&(!b[3]||b[3].test((c["class"]||{}).value))},I=function(a){return f.event.special.hover?a:a.replace(B,"mouseenter$1 mouseleave$1")}; f.event={add:function(a,c,d,e,g){var h,i,j,k,l,m,n,o,p,q,r,s;if(!(a.nodeType===3||a.nodeType===8||!c||!d||!(h=f._data(a)))){d.handler&&(p=d,d=p.handler),d.guid||(d.guid=f.guid++),j=h.events,j||(h.events=j={}),i=h.handle,i||(h.handle=i=function(a){return typeof f!="undefined"&&(!a||f.event.triggered!==a.type)?f.event.dispatch.apply(i.elem,arguments):b},i.elem=a),c=f.trim(I(c)).split(" ");for(k=0;k=0&&(h=h.slice(0,-1),k=!0),h.indexOf(".")>=0&&(i=h.split("."),h=i.shift(),i.sort());if((!e||f.event.customEvent[h])&&!f.event.global[h])return;c=typeof c=="object"?c[f.expando]?c:new f.Event(h,c):new f.Event(h),c.type=h,c.isTrigger=!0,c.exclusive=k,c.namespace=i.join("."),c.namespace_re=c.namespace?new RegExp("(^|\\.)"+i.join("\\.(?:.*\\.)?")+"(\\.|$)"):null,o=h.indexOf(":")<0?"on"+h:"";if(!e){j=f.cache;for(l in j)j[l].events&&j[l].events[h]&&f.event.trigger(c,d,j[l].handle.elem,!0);return}c.result=b,c.target||(c.target=e),d=d!=null?f.makeArray(d):[],d.unshift(c),p=f.event.special[h]||{};if(p.trigger&&p.trigger.apply(e,d)===!1)return;r=[[e,p.bindType||h]];if(!g&&!p.noBubble&&!f.isWindow(e)){s=p.delegateType||h,m=E.test(s+h)?e:e.parentNode,n=null;for(;m;m=m.parentNode)r.push([m,s]),n=m;n&&n===e.ownerDocument&&r.push([n.defaultView||n.parentWindow||a,s])}for(l=0;le&&i.push({elem:this,matches:d.slice(e)});for(j=0;j0?this.on(b,null,a,c):this.trigger(b)},f.attrFn&&(f.attrFn[b]=!0),C.test(b)&&(f.event.fixHooks[b]=f.event.keyHooks),D.test(b)&&(f.event.fixHooks[b]=f.event.mouseHooks)}),function(){function x(a,b,c,e,f,g){for(var h=0,i=e.length;h0){k=j;break}}j=j[a]}e[h]=k}}}function w(a,b,c,e,f,g){for(var h=0,i=e.length;h+~,(\[\\]+)+|[>+~])(\s*,\s*)?((?:.|\r|\n)*)/g,d="sizcache"+(Math.random()+"").replace(".",""),e=0,g=Object.prototype.toString,h=!1,i=!0,j=/\\/g,k=/\r\n/g,l=/\W/;[0,0].sort(function(){i=!1;return 0});var m=function(b,d,e,f){e=e||[],d=d||c;var h=d;if(d.nodeType!==1&&d.nodeType!==9)return[];if(!b||typeof b!="string")return e;var i,j,k,l,n,q,r,t,u=!0,v=m.isXML(d),w=[],x=b;do{a.exec(""),i=a.exec(x);if(i){x=i[3],w.push(i[1]);if(i[2]){l=i[3];break}}}while(i);if(w.length>1&&p.exec(b))if(w.length===2&&o.relative[w[0]])j=y(w[0]+w[1],d,f);else{j=o.relative[w[0]]?[d]:m(w.shift(),d);while(w.length)b=w.shift(),o.relative[b]&&(b+=w.shift()),j=y(b,j,f)}else{!f&&w.length>1&&d.nodeType===9&&!v&&o.match.ID.test(w[0])&&!o.match.ID.test(w[w.length-1])&&(n=m.find(w.shift(),d,v),d=n.expr?m.filter(n.expr,n.set)[0]:n.set[0]);if(d){n=f?{expr:w.pop(),set:s(f)}:m.find(w.pop(),w.length===1&&(w[0]==="~"||w[0]==="+")&&d.parentNode?d.parentNode:d,v),j=n.expr?m.filter(n.expr,n.set):n.set,w.length>0?k=s(j):u=!1;while(w.length)q=w.pop(),r=q,o.relative[q]?r=w.pop():q="",r==null&&(r=d),o.relative[q](k,r,v)}else k=w=[]}k||(k=j),k||m.error(q||b);if(g.call(k)==="[object Array]")if(!u)e.push.apply(e,k);else if(d&&d.nodeType===1)for(t=0;k[t]!=null;t++)k[t]&&(k[t]===!0||k[t].nodeType===1&&m.contains(d,k[t]))&&e.push(j[t]);else for(t=0;k[t]!=null;t++)k[t]&&k[t].nodeType===1&&e.push(j[t]);else s(k,e);l&&(m(l,h,e,f),m.uniqueSort(e));return e};m.uniqueSort=function(a){if(u){h=i,a.sort(u);if(h)for(var b=1;b0},m.find=function(a,b,c){var d,e,f,g,h,i;if(!a)return[];for(e=0,f=o.order.length;e":function(a,b){var c,d=typeof b=="string",e=0,f=a.length;if(d&&!l.test(b)){b=b.toLowerCase();for(;e=0)?c||d.push(h):c&&(b[g]=!1));return!1},ID:function(a){return a[1].replace(j,"")},TAG:function(a,b){return a[1].replace(j,"").toLowerCase()},CHILD:function(a){if(a[1]==="nth"){a[2]||m.error(a[0]),a[2]=a[2].replace(/^\+|\s*/g,"");var b=/(-?)(\d*)(?:n([+\-]?\d*))?/.exec(a[2]==="even"&&"2n"||a[2]==="odd"&&"2n+1"||!/\D/.test(a[2])&&"0n+"+a[2]||a[2]);a[2]=b[1]+(b[2]||1)-0,a[3]=b[3]-0}else a[2]&&m.error(a[0]);a[0]=e++;return a},ATTR:function(a,b,c,d,e,f){var g=a[1]=a[1].replace(j,"");!f&&o.attrMap[g]&&(a[1]=o.attrMap[g]),a[4]=(a[4]||a[5]||"").replace(j,""),a[2]==="~="&&(a[4]=" "+a[4]+" ");return a},PSEUDO:function(b,c,d,e,f){if(b[1]==="not")if((a.exec(b[3])||"").length>1||/^\w/.test(b[3]))b[3]=m(b[3],null,null,c);else{var g=m.filter(b[3],c,d,!0^f);d||e.push.apply(e,g);return!1}else if(o.match.POS.test(b[0])||o.match.CHILD.test(b[0]))return!0;return b},POS:function(a){a.unshift(!0);return a}},filters:{enabled:function(a){return a.disabled===!1&&a.type!=="hidden"},disabled:function(a){return a.disabled===!0},checked:function(a){return a.checked===!0},selected:function(a){a.parentNode&&a.parentNode.selectedIndex;return a.selected===!0},parent:function(a){return!!a.firstChild},empty:function(a){return!a.firstChild},has:function(a,b,c){return!!m(c[3],a).length},header:function(a){return/h\d/i.test(a.nodeName)},text:function(a){var b=a.getAttribute("type"),c=a.type;return a.nodeName.toLowerCase()==="input"&&"text"===c&&(b===c||b===null)},radio:function(a){return a.nodeName.toLowerCase()==="input"&&"radio"===a.type},checkbox:function(a){return a.nodeName.toLowerCase()==="input"&&"checkbox"===a.type},file:function(a){return a.nodeName.toLowerCase()==="input"&&"file"===a.type},password:function(a){return a.nodeName.toLowerCase()==="input"&&"password"===a.type},submit:function(a){var b=a.nodeName.toLowerCase();return(b==="input"||b==="button")&&"submit"===a.type},image:function(a){return a.nodeName.toLowerCase()==="input"&&"image"===a.type},reset:function(a){var b=a.nodeName.toLowerCase();return(b==="input"||b==="button")&&"reset"===a.type},button:function(a){var b=a.nodeName.toLowerCase();return b==="input"&&"button"===a.type||b==="button"},input:function(a){return/input|select|textarea|button/i.test(a.nodeName)},focus:function(a){return a===a.ownerDocument.activeElement}},setFilters:{first:function(a,b){return b===0},last:function(a,b,c,d){return b===d.length-1},even:function(a,b){return b%2===0},odd:function(a,b){return b%2===1},lt:function(a,b,c){return bc[3]-0},nth:function(a,b,c){return c[3]-0===b},eq:function(a,b,c){return c[3]-0===b}},filter:{PSEUDO:function(a,b,c,d){var e=b[1],f=o.filters[e];if(f)return f(a,c,b,d);if(e==="contains")return(a.textContent||a.innerText||n([a])||"").indexOf(b[3])>=0;if(e==="not"){var g=b[3];for(var h=0,i=g.length;h=0}},ID:function(a,b){return a.nodeType===1&&a.getAttribute("id")===b},TAG:function(a,b){return b==="*"&&a.nodeType===1||!!a.nodeName&&a.nodeName.toLowerCase()===b},CLASS:function(a,b){return(" "+(a.className||a.getAttribute("class"))+" ").indexOf(b)>-1},ATTR:function(a,b){var c=b[1],d=m.attr?m.attr(a,c):o.attrHandle[c]?o.attrHandle[c](a):a[c]!=null?a[c]:a.getAttribute(c),e=d+"",f=b[2],g=b[4];return d==null?f==="!=":!f&&m.attr?d!=null:f==="="?e===g:f==="*="?e.indexOf(g)>=0:f==="~="?(" "+e+" ").indexOf(g)>=0:g?f==="!="?e!==g:f==="^="?e.indexOf(g)===0:f==="$="?e.substr(e.length-g.length)===g:f==="|="?e===g||e.substr(0,g.length+1)===g+"-":!1:e&&d!==!1},POS:function(a,b,c,d){var e=b[2],f=o.setFilters[e];if(f)return f(a,c,b,d)}}},p=o.match.POS,q=function(a,b){return"\\"+(b-0+1)};for(var r in o.match)o.match[r]=new RegExp(o.match[r].source+/(?![^\[]*\])(?![^\(]*\))/.source),o.leftMatch[r]=new RegExp(/(^(?:.|\r|\n)*?)/.source+o.match[r].source.replace(/\\(\d+)/g,q));var s=function(a,b){a=Array.prototype.slice.call(a,0);if(b){b.push.apply(b,a);return b}return a};try{Array.prototype.slice.call(c.documentElement.childNodes,0)[0].nodeType}catch(t){s=function(a,b){var c=0,d=b||[];if(g.call(a)==="[object Array]")Array.prototype.push.apply(d,a);else if(typeof a.length=="number")for(var e=a.length;c",e.insertBefore(a,e.firstChild),c.getElementById(d)&&(o.find.ID=function(a,c,d){if(typeof c.getElementById!="undefined"&&!d){var e=c.getElementById(a[1]);return e?e.id===a[1]||typeof e.getAttributeNode!="undefined"&&e.getAttributeNode("id").nodeValue===a[1]?[e]:b:[]}},o.filter.ID=function(a,b){var c=typeof a.getAttributeNode!="undefined"&&a.getAttributeNode("id");return a.nodeType===1&&c&&c.nodeValue===b}),e.removeChild(a),e=a=null}(),function(){var a=c.createElement("div");a.appendChild(c.createComment("")),a.getElementsByTagName("*").length>0&&(o.find.TAG=function(a,b){var c=b.getElementsByTagName(a[1]);if(a[1]==="*"){var d=[];for(var e=0;c[e];e++)c[e].nodeType===1&&d.push(c[e]);c=d}return c}),a.innerHTML="",a.firstChild&&typeof a.firstChild.getAttribute!="undefined"&&a.firstChild.getAttribute("href")!=="#"&&(o.attrHandle.href=function(a){return a.getAttribute("href",2)}),a=null}(),c.querySelectorAll&&function(){var a=m,b=c.createElement("div"),d="__sizzle__";b.innerHTML="

";if(!b.querySelectorAll||b.querySelectorAll(".TEST").length!==0){m=function(b,e,f,g){e=e||c;if(!g&&!m.isXML(e)){var h=/^(\w+$)|^\.([\w\-]+$)|^#([\w\-]+$)/.exec(b);if(h&&(e.nodeType===1||e.nodeType===9)){if(h[1])return s(e.getElementsByTagName(b),f);if(h[2]&&o.find.CLASS&&e.getElementsByClassName)return s(e.getElementsByClassName(h[2]),f)}if(e.nodeType===9){if(b==="body"&&e.body)return s([e.body],f);if(h&&h[3]){var i=e.getElementById(h[3]);if(!i||!i.parentNode)return s([],f);if(i.id===h[3])return s([i],f)}try{return s(e.querySelectorAll(b),f)}catch(j){}}else if(e.nodeType===1&&e.nodeName.toLowerCase()!=="object"){var k=e,l=e.getAttribute("id"),n=l||d,p=e.parentNode,q=/^\s*[+~]/.test(b);l?n=n.replace(/'/g,"\\$&"):e.setAttribute("id",n),q&&p&&(e=e.parentNode);try{if(!q||p)return s(e.querySelectorAll("[id='"+n+"'] "+b),f)}catch(r){}finally{l||k.removeAttribute("id")}}}return a(b,e,f,g)};for(var e in a)m[e]=a[e];b=null}}(),function(){var a=c.documentElement,b=a.matchesSelector||a.mozMatchesSelector||a.webkitMatchesSelector||a.msMatchesSelector;if(b){var d=!b.call(c.createElement("div"),"div"),e=!1;try{b.call(c.documentElement,"[test!='']:sizzle")}catch(f){e=!0}m.matchesSelector=function(a,c){c=c.replace(/\=\s*([^'"\]]*)\s*\]/g,"='$1']");if(!m.isXML(a))try{if(e||!o.match.PSEUDO.test(c)&&!/!=/.test(c)){var f=b.call(a,c);if(f||!d||a.document&&a.document.nodeType!==11)return f}}catch(g){}return m(c,null,null,[a]).length>0}}}(),function(){var a=c.createElement("div");a.innerHTML="
";if(!!a.getElementsByClassName&&a.getElementsByClassName("e").length!==0){a.lastChild.className="e";if(a.getElementsByClassName("e").length===1)return;o.order.splice(1,0,"CLASS"),o.find.CLASS=function(a,b,c){if(typeof b.getElementsByClassName!="undefined"&&!c)return b.getElementsByClassName(a[1])},a=null}}(),c.documentElement.contains?m.contains=function(a,b){return a!==b&&(a.contains?a.contains(b):!0)}:c.documentElement.compareDocumentPosition?m.contains=function(a,b){return!!(a.compareDocumentPosition(b)&16)}:m.contains=function(){return!1},m.isXML=function(a){var b=(a?a.ownerDocument||a:0).documentElement;return b?b.nodeName!=="HTML":!1};var y=function(a,b,c){var d,e=[],f="",g=b.nodeType?[b]:b;while(d=o.match.PSEUDO.exec(a))f+=d[0],a=a.replace(o.match.PSEUDO,"");a=o.relative[a]?a+"*":a;for(var h=0,i=g.length;h0)for(h=g;h=0:f.filter(a,this).length>0:this.filter(a).length>0)},closest:function(a,b){var c=[],d,e,g=this[0];if(f.isArray(a)){var h=1;while(g&&g.ownerDocument&&g!==b){for(d=0;d-1:f.find.matchesSelector(g,a)){c.push(g);break}g=g.parentNode;if(!g||!g.ownerDocument||g===b||g.nodeType===11)break}}c=c.length>1?f.unique(c):c;return this.pushStack(c,"closest",a)},index:function(a){if(!a)return this[0]&&this[0].parentNode?this.prevAll().length:-1;if(typeof a=="string")return f.inArray(this[0],f(a));return f.inArray(a.jquery?a[0]:a,this)},add:function(a,b){var c=typeof a=="string"?f(a,b):f.makeArray(a&&a.nodeType?[a]:a),d=f.merge(this.get(),c);return this.pushStack(S(c[0])||S(d[0])?d:f.unique(d))},andSelf:function(){return this.add(this.prevObject)}}),f.each({parent:function(a){var b=a.parentNode;return b&&b.nodeType!==11?b:null},parents:function(a){return f.dir(a,"parentNode")},parentsUntil:function(a,b,c){return f.dir(a,"parentNode",c)},next:function(a){return f.nth(a,2,"nextSibling")},prev:function(a){return f.nth(a,2,"previousSibling")},nextAll:function(a){return f.dir(a,"nextSibling")},prevAll:function(a){return f.dir(a,"previousSibling")},nextUntil:function(a,b,c){return f.dir(a,"nextSibling",c)},prevUntil:function(a,b,c){return f.dir(a,"previousSibling",c)},siblings:function(a){return f.sibling(a.parentNode.firstChild,a)},children:function(a){return f.sibling(a.firstChild)},contents:function(a){return f.nodeName(a,"iframe")?a.contentDocument||a.contentWindow.document:f.makeArray(a.childNodes)}},function(a,b){f.fn[a]=function(c,d){var e=f.map(this,b,c);L.test(a)||(d=c),d&&typeof d=="string"&&(e=f.filter(d,e)),e=this.length>1&&!R[a]?f.unique(e):e,(this.length>1||N.test(d))&&M.test(a)&&(e=e.reverse());return this.pushStack(e,a,P.call(arguments).join(","))}}),f.extend({filter:function(a,b,c){c&&(a=":not("+a+")");return b.length===1?f.find.matchesSelector(b[0],a)?[b[0]]:[]:f.find.matches(a,b)},dir:function(a,c,d){var e=[],g=a[c];while(g&&g.nodeType!==9&&(d===b||g.nodeType!==1||!f(g).is(d)))g.nodeType===1&&e.push(g),g=g[c];return e},nth:function(a,b,c,d){b=b||1;var e=0;for(;a;a=a[c])if(a.nodeType===1&&++e===b)break;return a},sibling:function(a,b){var c=[];for(;a;a=a.nextSibling)a.nodeType===1&&a!==b&&c.push(a);return c}});var V="abbr|article|aside|audio|canvas|datalist|details|figcaption|figure|footer|header|hgroup|mark|meter|nav|output|progress|section|summary|time|video",W=/ jQuery\d+="(?:\d+|null)"/g,X=/^\s+/,Y=/<(?!area|br|col|embed|hr|img|input|link|meta|param)(([\w:]+)[^>]*)\/>/ig,Z=/<([\w:]+)/,$=/",""],legend:[1,"
","
"],thead:[1,"","
"],tr:[2,"","
"],td:[3,"","
"],col:[2,"","
"],area:[1,"",""],_default:[0,"",""]},bh=U(c);bg.optgroup=bg.option,bg.tbody=bg.tfoot=bg.colgroup=bg.caption=bg.thead,bg.th=bg.td,f.support.htmlSerialize||(bg._default=[1,"div
","
"]),f.fn.extend({text:function(a){if(f.isFunction(a))return this.each(function(b){var c=f(this);c.text(a.call(this,b,c.text()))});if(typeof a!="object"&&a!==b)return this.empty().append((this[0]&&this[0].ownerDocument||c).createTextNode(a));return f.text(this)},wrapAll:function(a){if(f.isFunction(a))return this.each(function(b){f(this).wrapAll(a.call(this,b))});if(this[0]){var b=f(a,this[0].ownerDocument).eq(0).clone(!0);this[0].parentNode&&b.insertBefore(this[0]),b.map(function(){var a=this;while(a.firstChild&&a.firstChild.nodeType===1)a=a.firstChild;return a}).append(this)}return this},wrapInner:function(a){if(f.isFunction(a))return this.each(function(b){f(this).wrapInner(a.call(this,b))});return this.each(function(){var b=f(this),c=b.contents();c.length?c.wrapAll(a):b.append(a)})},wrap:function(a){var b=f.isFunction(a);return this.each(function(c){f(this).wrapAll(b?a.call(this,c):a)})},unwrap:function(){return this.parent().each(function(){f.nodeName(this,"body")||f(this).replaceWith(this.childNodes)}).end()},append:function(){return this.domManip(arguments,!0,function(a){this.nodeType===1&&this.appendChild(a)})},prepend:function(){return this.domManip(arguments,!0,function(a){this.nodeType===1&&this.insertBefore(a,this.firstChild)})},before:function(){if(this[0]&&this[0].parentNode)return this.domManip(arguments,!1,function(a){this.parentNode.insertBefore(a,this)});if(arguments.length){var a=f.clean(arguments);a.push.apply(a,this.toArray());return this.pushStack(a,"before",arguments)}},after:function(){if(this[0]&&this[0].parentNode)return this.domManip(arguments,!1,function(a){this.parentNode.insertBefore(a,this.nextSibling)});if(arguments.length){var a=this.pushStack(this,"after",arguments);a.push.apply(a,f.clean(arguments));return a}},remove:function(a,b){for(var c=0,d;(d=this[c])!=null;c++)if(!a||f.filter(a,[d]).length)!b&&d.nodeType===1&&(f.cleanData(d.getElementsByTagName("*")),f.cleanData([d])),d.parentNode&&d.parentNode.removeChild(d);return this},empty:function() {for(var a=0,b;(b=this[a])!=null;a++){b.nodeType===1&&f.cleanData(b.getElementsByTagName("*"));while(b.firstChild)b.removeChild(b.firstChild)}return this},clone:function(a,b){a=a==null?!1:a,b=b==null?a:b;return this.map(function(){return f.clone(this,a,b)})},html:function(a){if(a===b)return this[0]&&this[0].nodeType===1?this[0].innerHTML.replace(W,""):null;if(typeof a=="string"&&!ba.test(a)&&(f.support.leadingWhitespace||!X.test(a))&&!bg[(Z.exec(a)||["",""])[1].toLowerCase()]){a=a.replace(Y,"<$1>");try{for(var c=0,d=this.length;c1&&l0?this.clone(!0):this).get();f(e[h])[b](j),d=d.concat(j)}return this.pushStack(d,a,e.selector)}}),f.extend({clone:function(a,b,c){var d,e,g,h=f.support.html5Clone||!bc.test("<"+a.nodeName)?a.cloneNode(!0):bo(a);if((!f.support.noCloneEvent||!f.support.noCloneChecked)&&(a.nodeType===1||a.nodeType===11)&&!f.isXMLDoc(a)){bk(a,h),d=bl(a),e=bl(h);for(g=0;d[g];++g)e[g]&&bk(d[g],e[g])}if(b){bj(a,h);if(c){d=bl(a),e=bl(h);for(g=0;d[g];++g)bj(d[g],e[g])}}d=e=null;return h},clean:function(a,b,d,e){var g;b=b||c,typeof b.createElement=="undefined"&&(b=b.ownerDocument||b[0]&&b[0].ownerDocument||c);var h=[],i;for(var j=0,k;(k=a[j])!=null;j++){typeof k=="number"&&(k+="");if(!k)continue;if(typeof k=="string")if(!_.test(k))k=b.createTextNode(k);else{k=k.replace(Y,"<$1>");var l=(Z.exec(k)||["",""])[1].toLowerCase(),m=bg[l]||bg._default,n=m[0],o=b.createElement("div");b===c?bh.appendChild(o):U(b).appendChild(o),o.innerHTML=m[1]+k+m[2];while(n--)o=o.lastChild;if(!f.support.tbody){var p=$.test(k),q=l==="table"&&!p?o.firstChild&&o.firstChild.childNodes:m[1]===""&&!p?o.childNodes:[];for(i=q.length-1;i>=0;--i)f.nodeName(q[i],"tbody")&&!q[i].childNodes.length&&q[i].parentNode.removeChild(q[i])}!f.support.leadingWhitespace&&X.test(k)&&o.insertBefore(b.createTextNode(X.exec(k)[0]),o.firstChild),k=o.childNodes}var r;if(!f.support.appendChecked)if(k[0]&&typeof (r=k.length)=="number")for(i=0;i=0)return b+"px"}}}),f.support.opacity||(f.cssHooks.opacity={get:function(a,b){return br.test((b&&a.currentStyle?a.currentStyle.filter:a.style.filter)||"")?parseFloat(RegExp.$1)/100+"":b?"1":""},set:function(a,b){var c=a.style,d=a.currentStyle,e=f.isNumeric(b)?"alpha(opacity="+b*100+")":"",g=d&&d.filter||c.filter||"";c.zoom=1;if(b>=1&&f.trim(g.replace(bq,""))===""){c.removeAttribute("filter");if(d&&!d.filter)return}c.filter=bq.test(g)?g.replace(bq,e):g+" "+e}}),f(function(){f.support.reliableMarginRight||(f.cssHooks.marginRight={get:function(a,b){var c;f.swap(a,{display:"inline-block"},function(){b?c=bz(a,"margin-right","marginRight"):c=a.style.marginRight});return c}})}),c.defaultView&&c.defaultView.getComputedStyle&&(bA=function(a,b){var c,d,e;b=b.replace(bs,"-$1").toLowerCase(),(d=a.ownerDocument.defaultView)&&(e=d.getComputedStyle(a,null))&&(c=e.getPropertyValue(b),c===""&&!f.contains(a.ownerDocument.documentElement,a)&&(c=f.style(a,b)));return c}),c.documentElement.currentStyle&&(bB=function(a,b){var c,d,e,f=a.currentStyle&&a.currentStyle[b],g=a.style;f===null&&g&&(e=g[b])&&(f=e),!bt.test(f)&&bu.test(f)&&(c=g.left,d=a.runtimeStyle&&a.runtimeStyle.left,d&&(a.runtimeStyle.left=a.currentStyle.left),g.left=b==="fontSize"?"1em":f||0,f=g.pixelLeft+"px",g.left=c,d&&(a.runtimeStyle.left=d));return f===""?"auto":f}),bz=bA||bB,f.expr&&f.expr.filters&&(f.expr.filters.hidden=function(a){var b=a.offsetWidth,c=a.offsetHeight;return b===0&&c===0||!f.support.reliableHiddenOffsets&&(a.style&&a.style.display||f.css(a,"display"))==="none"},f.expr.filters.visible=function(a){return!f.expr.filters.hidden(a)});var bD=/%20/g,bE=/\[\]$/,bF=/\r?\n/g,bG=/#.*$/,bH=/^(.*?):[ \t]*([^\r\n]*)\r?$/mg,bI=/^(?:color|date|datetime|datetime-local|email|hidden|month|number|password|range|search|tel|text|time|url|week)$/i,bJ=/^(?:about|app|app\-storage|.+\-extension|file|res|widget):$/,bK=/^(?:GET|HEAD)$/,bL=/^\/\//,bM=/\?/,bN=/)<[^<]*)*<\/script>/gi,bO=/^(?:select|textarea)/i,bP=/\s+/,bQ=/([?&])_=[^&]*/,bR=/^([\w\+\.\-]+:)(?:\/\/([^\/?#:]*)(?::(\d+))?)?/,bS=f.fn.load,bT={},bU={},bV,bW,bX=["*/"]+["*"];try{bV=e.href}catch(bY){bV=c.createElement("a"),bV.href="",bV=bV.href}bW=bR.exec(bV.toLowerCase())||[],f.fn.extend({load:function(a,c,d){if(typeof a!="string"&&bS)return bS.apply(this,arguments);if(!this.length)return this;var e=a.indexOf(" ");if(e>=0){var g=a.slice(e,a.length);a=a.slice(0,e)}var h="GET";c&&(f.isFunction(c)?(d=c,c=b):typeof c=="object"&&(c=f.param(c,f.ajaxSettings.traditional),h="POST"));var i=this;f.ajax({url:a,type:h,dataType:"html",data:c,complete:function(a,b,c){c=a.responseText,a.isResolved()&&(a.done(function(a){c=a}),i.html(g?f("
").append(c.replace(bN,"")).find(g):c)),d&&i.each(d,[c,b,a])}});return this},serialize:function(){return f.param(this.serializeArray())},serializeArray:function(){return this.map(function(){return this.elements?f.makeArray(this.elements):this}).filter(function(){return this.name&&!this.disabled&&(this.checked||bO.test(this.nodeName)||bI.test(this.type))}).map(function(a,b){var c=f(this).val();return c==null?null:f.isArray(c)?f.map(c,function(a,c){return{name:b.name,value:a.replace(bF,"\r\n")}}):{name:b.name,value:c.replace(bF,"\r\n")}}).get()}}),f.each("ajaxStart ajaxStop ajaxComplete ajaxError ajaxSuccess ajaxSend".split(" "),function(a,b){f.fn[b]=function(a){return this.on(b,a)}}),f.each(["get","post"],function(a,c){f[c]=function(a,d,e,g){f.isFunction(d)&&(g=g||e,e=d,d=b);return f.ajax({type:c,url:a,data:d,success:e,dataType:g})}}),f.extend({getScript:function(a,c){return f.get(a,b,c,"script")},getJSON:function(a,b,c){return f.get(a,b,c,"json")},ajaxSetup:function(a,b){b?b_(a,f.ajaxSettings):(b=a,a=f.ajaxSettings),b_(a,b);return a},ajaxSettings:{url:bV,isLocal:bJ.test(bW[1]),global:!0,type:"GET",contentType:"application/x-www-form-urlencoded",processData:!0,async:!0,accepts:{xml:"application/xml, text/xml",html:"text/html",text:"text/plain",json:"application/json, text/javascript","*":bX},contents:{xml:/xml/,html:/html/,json:/json/},responseFields:{xml:"responseXML",text:"responseText"},converters:{"* text":a.String,"text html":!0,"text json":f.parseJSON,"text xml":f.parseXML},flatOptions:{context:!0,url:!0}},ajaxPrefilter:bZ(bT),ajaxTransport:bZ(bU),ajax:function(a,c){function w(a,c,l,m){if(s!==2){s=2,q&&clearTimeout(q),p=b,n=m||"",v.readyState=a>0?4:0;var o,r,u,w=c,x=l?cb(d,v,l):b,y,z;if(a>=200&&a<300||a===304){if(d.ifModified){if(y=v.getResponseHeader("Last-Modified"))f.lastModified[k]=y;if(z=v.getResponseHeader("Etag"))f.etag[k]=z}if(a===304)w="notmodified",o=!0;else try{r=cc(d,x),w="success",o=!0}catch(A){w="parsererror",u=A}}else{u=w;if(!w||a)w="error",a<0&&(a=0)}v.status=a,v.statusText=""+(c||w),o?h.resolveWith(e,[r,w,v]):h.rejectWith(e,[v,w,u]),v.statusCode(j),j=b,t&&g.trigger("ajax"+(o?"Success":"Error"),[v,d,o?r:u]),i.fireWith(e,[v,w]),t&&(g.trigger("ajaxComplete",[v,d]),--f.active||f.event.trigger("ajaxStop"))}}typeof a=="object"&&(c=a,a=b),c=c||{};var d=f.ajaxSetup({},c),e=d.context||d,g=e!==d&&(e.nodeType||e instanceof f)?f(e):f.event,h=f.Deferred(),i=f.Callbacks("once memory"),j=d.statusCode||{},k,l={},m={},n,o,p,q,r,s=0,t,u,v={readyState:0,setRequestHeader:function(a,b){if(!s){var c=a.toLowerCase();a=m[c]=m[c]||a,l[a]=b}return this},getAllResponseHeaders:function(){return s===2?n:null},getResponseHeader:function(a){var c;if(s===2){if(!o){o={};while(c=bH.exec(n))o[c[1].toLowerCase()]=c[2]}c=o[a.toLowerCase()]}return c===b?null:c},overrideMimeType:function(a){s||(d.mimeType=a);return this},abort:function(a){a=a||"abort",p&&p.abort(a),w(0,a);return this}};h.promise(v),v.success=v.done,v.error=v.fail,v.complete=i.add,v.statusCode=function(a){if(a){var b;if(s<2)for(b in a)j[b]=[j[b],a[b]];else b=a[v.status],v.then(b,b)}return this},d.url=((a||d.url)+"").replace(bG,"").replace(bL,bW[1]+"//"),d.dataTypes=f.trim(d.dataType||"*").toLowerCase().split(bP),d.crossDomain==null&&(r=bR.exec(d.url.toLowerCase()),d.crossDomain=!(!r||r[1]==bW[1]&&r[2]==bW[2]&&(r[3]||(r[1]==="http:"?80:443))==(bW[3]||(bW[1]==="http:"?80:443)))),d.data&&d.processData&&typeof d.data!="string"&&(d.data=f.param(d.data,d.traditional)),b$(bT,d,c,v);if(s===2)return!1;t=d.global,d.type=d.type.toUpperCase(),d.hasContent=!bK.test(d.type),t&&f.active++===0&&f.event.trigger("ajaxStart");if(!d.hasContent){d.data&&(d.url+=(bM.test(d.url)?"&":"?")+d.data,delete d.data),k=d.url;if(d.cache===!1){var x=f.now(),y=d.url.replace(bQ,"$1_="+x);d.url=y+(y===d.url?(bM.test(d.url)?"&":"?")+"_="+x:"")}}(d.data&&d.hasContent&&d.contentType!==!1||c.contentType)&&v.setRequestHeader("Content-Type",d.contentType),d.ifModified&&(k=k||d.url,f.lastModified[k]&&v.setRequestHeader("If-Modified-Since",f.lastModified[k]),f.etag[k]&&v.setRequestHeader("If-None-Match",f.etag[k])),v.setRequestHeader("Accept",d.dataTypes[0]&&d.accepts[d.dataTypes[0]]?d.accepts[d.dataTypes[0]]+(d.dataTypes[0]!=="*"?", "+bX+"; q=0.01":""):d.accepts["*"]);for(u in d.headers)v.setRequestHeader(u,d.headers[u]);if(d.beforeSend&&(d.beforeSend.call(e,v,d)===!1||s===2)){v.abort();return!1}for(u in{success:1,error:1,complete:1})v[u](d[u]);p=b$(bU,d,c,v);if(!p)w(-1,"No Transport");else{v.readyState=1,t&&g.trigger("ajaxSend",[v,d]),d.async&&d.timeout>0&&(q=setTimeout(function(){v.abort("timeout")},d.timeout));try{s=1,p.send(l,w)}catch(z){if(s<2)w(-1,z);else throw z}}return v},param:function(a,c){var d=[],e=function(a,b){b=f.isFunction(b)?b():b,d[d.length]=encodeURIComponent(a)+"="+encodeURIComponent(b)};c===b&&(c=f.ajaxSettings.traditional);if(f.isArray(a)||a.jquery&&!f.isPlainObject(a))f.each(a,function(){e(this.name,this.value)});else for(var g in a)ca(g,a[g],c,e);return d.join("&").replace(bD,"+")}}),f.extend({active:0,lastModified:{},etag:{}});var cd=f.now(),ce=/(\=)\?(&|$)|\?\?/i;f.ajaxSetup({jsonp:"callback",jsonpCallback:function(){return f.expando+"_"+cd++}}),f.ajaxPrefilter("json jsonp",function(b,c,d){var e=b.contentType==="application/x-www-form-urlencoded"&&typeof b.data=="string";if(b.dataTypes[0]==="jsonp"||b.jsonp!==!1&&(ce.test(b.url)||e&&ce.test(b.data))){var g,h=b.jsonpCallback=f.isFunction(b.jsonpCallback)?b.jsonpCallback():b.jsonpCallback,i=a[h],j=b.url,k=b.data,l="$1"+h+"$2";b.jsonp!==!1&&(j=j.replace(ce,l),b.url===j&&(e&&(k=k.replace(ce,l)),b.data===k&&(j+=(/\?/.test(j)?"&":"?")+b.jsonp+"="+h))),b.url=j,b.data=k,a[h]=function(a){g=[a]},d.always(function(){a[h]=i,g&&f.isFunction(i)&&a[h](g[0])}),b.converters["script json"]=function(){g||f.error(h+" was not called");return g[0]},b.dataTypes[0]="json";return"script"}}),f.ajaxSetup({accepts:{script:"text/javascript, application/javascript, application/ecmascript, application/x-ecmascript"},contents:{script:/javascript|ecmascript/},converters:{"text script":function(a){f.globalEval(a);return a}}}),f.ajaxPrefilter("script",function(a){a.cache===b&&(a.cache=!1),a.crossDomain&&(a.type="GET",a.global=!1)}),f.ajaxTransport("script",function(a){if(a.crossDomain){var d,e=c.head||c.getElementsByTagName("head")[0]||c.documentElement;return{send:function(f,g){d=c.createElement("script"),d.async="async",a.scriptCharset&&(d.charset=a.scriptCharset),d.src=a.url,d.onload=d.onreadystatechange=function(a,c){if(c||!d.readyState||/loaded|complete/.test(d.readyState))d.onload=d.onreadystatechange=null,e&&d.parentNode&&e.removeChild(d),d=b,c||g(200,"success")},e.insertBefore(d,e.firstChild)},abort:function(){d&&d.onload(0,1)}}}});var cf=a.ActiveXObject?function(){for(var a in ch)ch[a](0,1)}:!1,cg=0,ch;f.ajaxSettings.xhr=a.ActiveXObject?function(){return!this.isLocal&&ci()||cj()}:ci,function(a){f.extend(f.support,{ajax:!!a,cors:!!a&&"withCredentials"in a})}(f.ajaxSettings.xhr()),f.support.ajax&&f.ajaxTransport(function(c){if(!c.crossDomain||f.support.cors){var d;return{send:function(e,g){var h=c.xhr(),i,j;c.username?h.open(c.type,c.url,c.async,c.username,c.password):h.open(c.type,c.url,c.async);if(c.xhrFields)for(j in c.xhrFields)h[j]=c.xhrFields[j];c.mimeType&&h.overrideMimeType&&h.overrideMimeType(c.mimeType),!c.crossDomain&&!e["X-Requested-With"]&&(e["X-Requested-With"]="XMLHttpRequest");try{for(j in e)h.setRequestHeader(j,e[j])}catch(k){}h.send(c.hasContent&&c.data||null),d=function(a,e){var j,k,l,m,n;try{if(d&&(e||h.readyState===4)){d=b,i&&(h.onreadystatechange=f.noop,cf&&delete ch[i]);if(e)h.readyState!==4&&h.abort();else{j=h.status,l=h.getAllResponseHeaders(),m={},n=h.responseXML,n&&n.documentElement&&(m.xml=n),m.text=h.responseText;try{k=h.statusText}catch(o){k=""}!j&&c.isLocal&&!c.crossDomain?j=m.text?200:404:j===1223&&(j=204)}}}catch(p){e||g(-1,p)}m&&g(j,k,m,l)},!c.async||h.readyState===4?d():(i=++cg,cf&&(ch||(ch={},f(a).unload(cf)),ch[i]=d),h.onreadystatechange=d)},abort:function(){d&&d(0,1)}}}});var ck={},cl,cm,cn=/^(?:toggle|show|hide)$/,co=/^([+\-]=)?([\d+.\-]+)([a-z%]*)$/i,cp,cq=[["height","marginTop","marginBottom","paddingTop","paddingBottom"],["width","marginLeft","marginRight","paddingLeft","paddingRight"],["opacity"]],cr;f.fn.extend({show:function(a,b,c){var d,e;if(a||a===0)return this.animate(cu("show",3),a,b,c);for(var g=0,h=this.length;g=i.duration+this.startTime){this.now=this.end,this.pos=this.state=1,this.update(),i.animatedProperties[this.prop]=!0;for(b in i.animatedProperties)i.animatedProperties[b]!==!0&&(g=!1);if(g){i.overflow!=null&&!f.support.shrinkWrapBlocks&&f.each(["","X","Y"],function(a,b){h.style["overflow"+b]=i.overflow[a]}),i.hide&&f(h).hide();if(i.hide||i.show)for(b in i.animatedProperties)f.style(h,b,i.orig[b]),f.removeData(h,"fxshow"+b,!0),f.removeData(h,"toggle"+b,!0);d=i.complete,d&&(i.complete=!1,d.call(h))}return!1}i.duration==Infinity?this.now=e:(c=e-this.startTime,this.state=c/i.duration,this.pos=f.easing[i.animatedProperties[this.prop]](this.state,c,0,1,i.duration),this.now=this.start+(this.end-this.start)*this.pos),this.update();return!0}},f.extend(f.fx,{tick:function(){var a,b=f.timers,c=0;for(;c-1,k={},l={},m,n;j?(l=e.position(),m=l.top,n=l.left):(m=parseFloat(h)||0,n=parseFloat(i)||0),f.isFunction(b)&&(b=b.call(a,c,g)),b.top!=null&&(k.top=b.top-g.top+m),b.left!=null&&(k.left=b.left-g.left+n),"using"in b?b.using.call(a,k):e.css(k)}},f.fn.extend({position:function(){if(!this[0])return null;var a=this[0],b=this.offsetParent(),c=this.offset(),d=cx.test(b[0].nodeName)?{top:0,left:0}:b.offset();c.top-=parseFloat(f.css(a,"marginTop"))||0,c.left-=parseFloat(f.css(a,"marginLeft"))||0,d.top+=parseFloat(f.css(b[0],"borderTopWidth"))||0,d.left+=parseFloat(f.css(b[0],"borderLeftWidth"))||0;return{top:c.top-d.top,left:c.left-d.left}},offsetParent:function(){return this.map(function(){var a=this.offsetParent||c.body;while(a&&!cx.test(a.nodeName)&&f.css(a,"position")==="static")a=a.offsetParent;return a})}}),f.each(["Left","Top"],function(a,c){var d="scroll"+c;f.fn[d]=function(c){var e,g;if(c===b){e=this[0];if(!e)return null;g=cy(e);return g?"pageXOffset"in g?g[a?"pageYOffset":"pageXOffset"]:f.support.boxModel&&g.document.documentElement[d]||g.document.body[d]:e[d]}return this.each(function(){g=cy(this),g?g.scrollTo(a?f(g).scrollLeft():c,a?c:f(g).scrollTop()):this[d]=c})}}),f.each(["Height","Width"],function(a,c){var d=c.toLowerCase();f.fn["inner"+c]=function(){var a=this[0];return a?a.style?parseFloat(f.css(a,d,"padding")):this[d]():null},f.fn["outer"+c]=function(a){var b=this[0];return b?b.style?parseFloat(f.css(b,d,a?"margin":"border")):this[d]():null},f.fn[d]=function(a){var e=this[0];if(!e)return a==null?null:this;if(f.isFunction(a))return this.each(function(b){var c=f(this);c[d](a.call(this,b,c[d]()))});if(f.isWindow(e)){var g=e.document.documentElement["client"+c],h=e.document.body;return e.document.compatMode==="CSS1Compat"&&g||h&&h["client"+c]||g}if(e.nodeType===9)return Math.max(e.documentElement["client"+c],e.body["scroll"+c],e.documentElement["scroll"+c],e.body["offset"+c],e.documentElement["offset"+c]);if(a===b){var i=f.css(e,d),j=parseFloat(i);return f.isNumeric(j)?j:i}return this.css(d,typeof a=="string"?a:a+"px")}}),a.jQuery=a.$=f,typeof define=="function"&&define.amd&&define.amd.jQuery&&define("jquery",[],function(){return f})})(window);SQLAlchemy-0.8.4/doc/_static/minus.png0000644000076500000240000000030712167630573020277 0ustar classicstaff00000000000000PNG  IHDR &q pHYs  tIME <8tEXtComment̖RIDATcz(BpipPc |IENDB`SQLAlchemy-0.8.4/doc/_static/pygments.css0000644000076500000240000000753412251147504021016 0ustar classicstaff00000000000000.highlight .hll { background-color: #ffffcc } .highlight { background: #eeffcc; } .highlight .c { color: #408090; font-style: italic } /* Comment */ .highlight .err { border: 1px solid #FF0000 } /* Error */ .highlight .k { color: #007020; font-weight: bold } /* Keyword */ .highlight .o { color: #666666 } /* Operator */ .highlight .cm { color: #408090; font-style: italic } /* Comment.Multiline */ .highlight .cp { color: #007020 } /* Comment.Preproc */ .highlight .c1 { color: #408090; font-style: italic } /* Comment.Single */ .highlight .cs { color: #408090; background-color: #fff0f0 } /* Comment.Special */ .highlight .gd { color: #A00000 } /* Generic.Deleted */ .highlight .ge { font-style: italic } /* Generic.Emph */ .highlight .gr { color: #FF0000 } /* Generic.Error */ .highlight .gh { color: #000080; font-weight: bold } /* Generic.Heading */ .highlight .gi { color: #00A000 } /* Generic.Inserted */ .highlight .go { color: #333333 } /* Generic.Output */ .highlight .gp { color: #c65d09; font-weight: bold } /* Generic.Prompt */ .highlight .gs { font-weight: bold } /* Generic.Strong */ .highlight .gu { color: #800080; font-weight: bold } /* Generic.Subheading */ .highlight .gt { color: #0044DD } /* Generic.Traceback */ .highlight .kc { color: #007020; font-weight: bold } /* Keyword.Constant */ .highlight .kd { color: #007020; font-weight: bold } /* Keyword.Declaration */ .highlight .kn { color: #007020; font-weight: bold } /* Keyword.Namespace */ .highlight .kp { color: #007020 } /* Keyword.Pseudo */ .highlight .kr { color: #007020; font-weight: bold } /* Keyword.Reserved */ .highlight .kt { color: #902000 } /* Keyword.Type */ .highlight .m { color: #208050 } /* Literal.Number */ .highlight .s { color: #4070a0 } /* Literal.String */ .highlight .na { color: #4070a0 } /* Name.Attribute */ .highlight .nb { color: #007020 } /* Name.Builtin */ .highlight .nc { color: #0e84b5; font-weight: bold } /* Name.Class */ .highlight .no { color: #60add5 } /* Name.Constant */ .highlight .nd { color: #555555; font-weight: bold } /* Name.Decorator */ .highlight .ni { color: #d55537; font-weight: bold } /* Name.Entity */ .highlight .ne { color: #007020 } /* Name.Exception */ .highlight .nf { color: #06287e } /* Name.Function */ .highlight .nl { color: #002070; font-weight: bold } /* Name.Label */ .highlight .nn { color: #0e84b5; font-weight: bold } /* Name.Namespace */ .highlight .nt { color: #062873; font-weight: bold } /* Name.Tag */ .highlight .nv { color: #bb60d5 } /* Name.Variable */ .highlight .ow { color: #007020; font-weight: bold } /* Operator.Word */ .highlight .w { color: #bbbbbb } /* Text.Whitespace */ .highlight .mf { color: #208050 } /* Literal.Number.Float */ .highlight .mh { color: #208050 } /* Literal.Number.Hex */ .highlight .mi { color: #208050 } /* Literal.Number.Integer */ .highlight .mo { color: #208050 } /* Literal.Number.Oct */ .highlight .sb { color: #4070a0 } /* Literal.String.Backtick */ .highlight .sc { color: #4070a0 } /* Literal.String.Char */ .highlight .sd { color: #4070a0; font-style: italic } /* Literal.String.Doc */ .highlight .s2 { color: #4070a0 } /* Literal.String.Double */ .highlight .se { color: #4070a0; font-weight: bold } /* Literal.String.Escape */ .highlight .sh { color: #4070a0 } /* Literal.String.Heredoc */ .highlight .si { color: #70a0d0; font-style: italic } /* Literal.String.Interpol */ .highlight .sx { color: #c65d09 } /* Literal.String.Other */ .highlight .sr { color: #235388 } /* Literal.String.Regex */ .highlight .s1 { color: #4070a0 } /* Literal.String.Single */ .highlight .ss { color: #517918 } /* Literal.String.Symbol */ .highlight .bp { color: #007020 } /* Name.Builtin.Pseudo */ .highlight .vc { color: #bb60d5 } /* Name.Variable.Class */ .highlight .vg { color: #bb60d5 } /* Name.Variable.Global */ .highlight .vi { color: #bb60d5 } /* Name.Variable.Instance */ .highlight .il { color: #208050 } /* Literal.Number.Integer.Long */SQLAlchemy-0.8.4/doc/_static/searchtools.js0000644000076500000240000004264712251147504021326 0ustar classicstaff00000000000000/* * searchtools.js_t * ~~~~~~~~~~~~~~~~ * * Sphinx JavaScript utilties for the full-text search. * * :copyright: Copyright 2007-2013 by the Sphinx team, see AUTHORS. * :license: BSD, see LICENSE for details. * */ /** * Porter Stemmer */ var Stemmer = function() { var step2list = { ational: 'ate', tional: 'tion', enci: 'ence', anci: 'ance', izer: 'ize', bli: 'ble', alli: 'al', entli: 'ent', eli: 'e', ousli: 'ous', ization: 'ize', ation: 'ate', ator: 'ate', alism: 'al', iveness: 'ive', fulness: 'ful', ousness: 'ous', aliti: 'al', iviti: 'ive', biliti: 'ble', logi: 'log' }; var step3list = { icate: 'ic', ative: '', alize: 'al', iciti: 'ic', ical: 'ic', ful: '', ness: '' }; var c = "[^aeiou]"; // consonant var v = "[aeiouy]"; // vowel var C = c + "[^aeiouy]*"; // consonant sequence var V = v + "[aeiou]*"; // vowel sequence var mgr0 = "^(" + C + ")?" + V + C; // [C]VC... is m>0 var meq1 = "^(" + C + ")?" + V + C + "(" + V + ")?$"; // [C]VC[V] is m=1 var mgr1 = "^(" + C + ")?" + V + C + V + C; // [C]VCVC... is m>1 var s_v = "^(" + C + ")?" + v; // vowel in stem this.stemWord = function (w) { var stem; var suffix; var firstch; var origword = w; if (w.length < 3) return w; var re; var re2; var re3; var re4; firstch = w.substr(0,1); if (firstch == "y") w = firstch.toUpperCase() + w.substr(1); // Step 1a re = /^(.+?)(ss|i)es$/; re2 = /^(.+?)([^s])s$/; if (re.test(w)) w = w.replace(re,"$1$2"); else if (re2.test(w)) w = w.replace(re2,"$1$2"); // Step 1b re = /^(.+?)eed$/; re2 = /^(.+?)(ed|ing)$/; if (re.test(w)) { var fp = re.exec(w); re = new RegExp(mgr0); if (re.test(fp[1])) { re = /.$/; w = w.replace(re,""); } } else if (re2.test(w)) { var fp = re2.exec(w); stem = fp[1]; re2 = new RegExp(s_v); if (re2.test(stem)) { w = stem; re2 = /(at|bl|iz)$/; re3 = new RegExp("([^aeiouylsz])\\1$"); re4 = new RegExp("^" + C + v + "[^aeiouwxy]$"); if (re2.test(w)) w = w + "e"; else if (re3.test(w)) { re = /.$/; w = w.replace(re,""); } else if (re4.test(w)) w = w + "e"; } } // Step 1c re = /^(.+?)y$/; if (re.test(w)) { var fp = re.exec(w); stem = fp[1]; re = new RegExp(s_v); if (re.test(stem)) w = stem + "i"; } // Step 2 re = /^(.+?)(ational|tional|enci|anci|izer|bli|alli|entli|eli|ousli|ization|ation|ator|alism|iveness|fulness|ousness|aliti|iviti|biliti|logi)$/; if (re.test(w)) { var fp = re.exec(w); stem = fp[1]; suffix = fp[2]; re = new RegExp(mgr0); if (re.test(stem)) w = stem + step2list[suffix]; } // Step 3 re = /^(.+?)(icate|ative|alize|iciti|ical|ful|ness)$/; if (re.test(w)) { var fp = re.exec(w); stem = fp[1]; suffix = fp[2]; re = new RegExp(mgr0); if (re.test(stem)) w = stem + step3list[suffix]; } // Step 4 re = /^(.+?)(al|ance|ence|er|ic|able|ible|ant|ement|ment|ent|ou|ism|ate|iti|ous|ive|ize)$/; re2 = /^(.+?)(s|t)(ion)$/; if (re.test(w)) { var fp = re.exec(w); stem = fp[1]; re = new RegExp(mgr1); if (re.test(stem)) w = stem; } else if (re2.test(w)) { var fp = re2.exec(w); stem = fp[1] + fp[2]; re2 = new RegExp(mgr1); if (re2.test(stem)) w = stem; } // Step 5 re = /^(.+?)e$/; if (re.test(w)) { var fp = re.exec(w); stem = fp[1]; re = new RegExp(mgr1); re2 = new RegExp(meq1); re3 = new RegExp("^" + C + v + "[^aeiouwxy]$"); if (re.test(stem) || (re2.test(stem) && !(re3.test(stem)))) w = stem; } re = /ll$/; re2 = new RegExp(mgr1); if (re.test(w) && re2.test(w)) { re = /.$/; w = w.replace(re,""); } // and turn initial Y back to y if (firstch == "y") w = firstch.toLowerCase() + w.substr(1); return w; } } /** * Simple result scoring code. */ var Scorer = { // Implement the following function to further tweak the score for each result // The function takes a result array [filename, title, anchor, descr, score] // and returns the new score. /* score: function(result) { return result[4]; }, */ // query matches the full name of an object objNameMatch: 11, // or matches in the last dotted part of the object name objPartialMatch: 6, // Additive scores depending on the priority of the object objPrio: {0: 15, // used to be importantResults 1: 5, // used to be objectResults 2: -5}, // used to be unimportantResults // Used when the priority is not in the mapping. objPrioDefault: 0, // query found in title title: 15, // query found in terms term: 5 }; /** * Search Module */ var Search = { _index : null, _queued_query : null, _pulse_status : -1, init : function() { var params = $.getQueryParameters(); if (params.q) { var query = params.q[0]; $('input[name="q"]')[0].value = query; this.performSearch(query); } }, loadIndex : function(url) { $.ajax({type: "GET", url: url, data: null, dataType: "script", cache: true, complete: function(jqxhr, textstatus) { if (textstatus != "success") { document.getElementById("searchindexloader").src = url; } }}); }, setIndex : function(index) { var q; this._index = index; if ((q = this._queued_query) !== null) { this._queued_query = null; Search.query(q); } }, hasIndex : function() { return this._index !== null; }, deferQuery : function(query) { this._queued_query = query; }, stopPulse : function() { this._pulse_status = 0; }, startPulse : function() { if (this._pulse_status >= 0) return; function pulse() { var i; Search._pulse_status = (Search._pulse_status + 1) % 4; var dotString = ''; for (i = 0; i < Search._pulse_status; i++) dotString += '.'; Search.dots.text(dotString); if (Search._pulse_status > -1) window.setTimeout(pulse, 500); } pulse(); }, /** * perform a search for something (or wait until index is loaded) */ performSearch : function(query) { // create the required interface elements this.out = $('#search-results'); this.title = $('

' + _('Searching') + '

').appendTo(this.out); this.dots = $('').appendTo(this.title); this.status = $('

').appendTo(this.out); this.output = $('
'); } // Prettify the comment rating. comment.pretty_rating = comment.rating + ' point' + (comment.rating == 1 ? '' : 's'); // Make a class (for displaying not yet moderated comments differently) comment.css_class = comment.displayed ? '' : ' moderate'; // Create a div for this comment. var context = $.extend({}, opts, comment); var div = $(renderTemplate(commentTemplate, context)); // If the user has voted on this comment, highlight the correct arrow. if (comment.vote) { var direction = (comment.vote == 1) ? 'u' : 'd'; div.find('#' + direction + 'v' + comment.id).hide(); div.find('#' + direction + 'u' + comment.id).show(); } if (opts.moderator || comment.text != '[deleted]') { div.find('a.reply').show(); if (comment.proposal_diff) div.find('#sp' + comment.id).show(); if (opts.moderator && !comment.displayed) div.find('#cm' + comment.id).show(); if (opts.moderator || (opts.username == comment.username)) div.find('#dc' + comment.id).show(); } return div; } /** * A simple template renderer. Placeholders such as <%id%> are replaced * by context['id'] with items being escaped. Placeholders such as <#id#> * are not escaped. */ function renderTemplate(template, context) { var esc = $(document.createElement('div')); function handle(ph, escape) { var cur = context; $.each(ph.split('.'), function() { cur = cur[this]; }); return escape ? esc.text(cur || "").html() : cur; } return template.replace(/<([%#])([\w\.]*)\1>/g, function() { return handle(arguments[2], arguments[1] == '%' ? true : false); }); } /** Flash an error message briefly. */ function showError(message) { $(document.createElement('div')).attr({'class': 'popup-error'}) .append($(document.createElement('div')) .attr({'class': 'error-message'}).text(message)) .appendTo('body') .fadeIn("slow") .delay(2000) .fadeOut("slow"); } /** Add a link the user uses to open the comments popup. */ $.fn.comment = function() { return this.each(function() { var id = $(this).attr('id').substring(1); var count = COMMENT_METADATA[id]; var title = count + ' comment' + (count == 1 ? '' : 's'); var image = count > 0 ? opts.commentBrightImage : opts.commentImage; var addcls = count == 0 ? ' nocomment' : ''; $(this) .append( $(document.createElement('a')).attr({ href: '#', 'class': 'sphinx-comment-open' + addcls, id: 'ao' + id }) .append($(document.createElement('img')).attr({ src: image, alt: 'comment', title: title })) .click(function(event) { event.preventDefault(); show($(this).attr('id').substring(2)); }) ) .append( $(document.createElement('a')).attr({ href: '#', 'class': 'sphinx-comment-close hidden', id: 'ah' + id }) .append($(document.createElement('img')).attr({ src: opts.closeCommentImage, alt: 'close', title: 'close' })) .click(function(event) { event.preventDefault(); hide($(this).attr('id').substring(2)); }) ); }); }; var opts = { processVoteURL: '/_process_vote', addCommentURL: '/_add_comment', getCommentsURL: '/_get_comments', acceptCommentURL: '/_accept_comment', deleteCommentURL: '/_delete_comment', commentImage: '/static/_static/comment.png', closeCommentImage: '/static/_static/comment-close.png', loadingImage: '/static/_static/ajax-loader.gif', commentBrightImage: '/static/_static/comment-bright.png', upArrow: '/static/_static/up.png', downArrow: '/static/_static/down.png', upArrowPressed: '/static/_static/up-pressed.png', downArrowPressed: '/static/_static/down-pressed.png', voting: false, moderator: false }; if (typeof COMMENT_OPTIONS != "undefined") { opts = jQuery.extend(opts, COMMENT_OPTIONS); } var popupTemplate = '\
\

\ Sort by:\ best rated\ newest\ oldest\

\
Comments
\
\ loading comments...
\
    \
    \

    Add a comment\ (markup):

    \
    \ reStructured text markup: *emph*, **strong**, \ ``code``, \ code blocks: :: and an indented block after blank line
    \
    \ \

    \ \ Propose a change ▹\ \ \ Propose a change ▿\ \

    \ \ \ \ \ \
    \
    '; var commentTemplate = '\
    \
    \
    \ \ \ \ \ \ \
    \
    \ \ \ \ \ \ \
    \
    \
    \

    \ <%username%>\ <%pretty_rating%>\ <%time.delta%>\

    \
    <#text#>
    \

    \ \ reply ▿\ proposal ▹\ proposal ▿\ \ \

    \
    \
    <#proposal_diff#>\
            
    \
      \
      \
      \
      \ '; var replyTemplate = '\
    • \
      \
      \ \ \ \ \ \ \
      \
    • '; $(document).ready(function() { init(); }); })(jQuery); $(document).ready(function() { // add comment anchors for all paragraphs that are commentable $('.sphinx-has-comment').comment(); // highlight search words in search results $("div.context").each(function() { var params = $.getQueryParameters(); var terms = (params.q) ? params.q[0].split(/\s+/) : []; var result = $(this); $.each(terms, function() { result.highlightText(this.toLowerCase(), 'highlighted'); }); }); // directly open comment window if requested var anchor = document.location.hash; if (anchor.substring(0, 9) == '#comment-') { $('#ao' + anchor.substring(9)).click(); document.location.hash = '#s' + anchor.substring(9); } }); SQLAlchemy-0.8.4/doc/build/0000755000076500000240000000000012251151573016077 5ustar classicstaff00000000000000SQLAlchemy-0.8.4/doc/build/builder/0000755000076500000240000000000012251151573017525 5ustar classicstaff00000000000000SQLAlchemy-0.8.4/doc/build/builder/__init__.py0000644000076500000240000000000012251147171021623 0ustar classicstaff00000000000000SQLAlchemy-0.8.4/doc/build/builder/autodoc_mods.py0000644000076500000240000000647312251150015022557 0ustar classicstaff00000000000000import re def autodoc_skip_member(app, what, name, obj, skip, options): if what == 'class' and skip and \ name in ('__init__', '__eq__', '__ne__', '__lt__', '__le__', '__call__') and \ obj.__doc__: return False else: return skip _convert_modname = { } _convert_modname_w_class = { ("sqlalchemy.engine.interfaces", "Connectable"): "sqlalchemy.engine" } def _adjust_rendered_mod_name(modname, objname): if modname in _convert_modname: return _convert_modname[modname] elif (modname, objname) in _convert_modname_w_class: return _convert_modname_w_class[(modname, objname)] else: return modname # im sure this is in the app somewhere, but I don't really # know where, so we're doing it here. _track_autodoced = {} _inherited_names = set() def autodoc_process_docstring(app, what, name, obj, options, lines): if what == "class": _track_autodoced[name] = obj # need to translate module names for bases, others # as we document lots of symbols in namespace modules # outside of their source bases = [] for base in obj.__bases__: if base is not object: bases.append(":class:`%s.%s`" % ( _adjust_rendered_mod_name(base.__module__, base.__name__), base.__name__)) if bases: lines[:0] = [ "Bases: %s" % (", ".join(bases)), "" ] elif what in ("attribute", "method") and \ options.get("inherited-members"): m = re.match(r'(.*?)\.([\w_]+)$', name) if m: clsname, attrname = m.group(1, 2) if clsname in _track_autodoced: cls = _track_autodoced[clsname] for supercls in cls.__mro__: if attrname in supercls.__dict__: break if supercls is not cls: _inherited_names.add("%s.%s" % (supercls.__module__, supercls.__name__)) _inherited_names.add("%s.%s.%s" % (supercls.__module__, supercls.__name__, attrname)) lines[:0] = [ ".. container:: inherited_member", "", " *inherited from the* :%s:`~%s.%s.%s` *%s of* :class:`~%s.%s`" % ( "attr" if what == "attribute" else "meth", _adjust_rendered_mod_name(supercls.__module__, supercls.__name__), supercls.__name__, attrname, what, _adjust_rendered_mod_name(supercls.__module__, supercls.__name__), supercls.__name__ ), "" ] def missing_reference(app, env, node, contnode): if node.attributes['reftarget'] in _inherited_names: return node.children[0] else: return None def setup(app): app.connect('autodoc-skip-member', autodoc_skip_member) app.connect('autodoc-process-docstring', autodoc_process_docstring) app.connect('missing-reference', missing_reference) SQLAlchemy-0.8.4/doc/build/builder/changelog.py0000644000076500000240000003002112251150015022010 0ustar classicstaff00000000000000import re from sphinx.util.compat import Directive from docutils.statemachine import StringList from docutils import nodes, utils import textwrap import itertools import collections import md5 def _comma_list(text): return re.split(r"\s*,\s*", text.strip()) def _parse_content(content): d = {} d['text'] = [] idx = 0 for line in content: idx += 1 m = re.match(r' *\:(.+?)\:(?: +(.+))?', line) if m: attrname, value = m.group(1, 2) d[attrname] = value or '' else: break d["text"] = content[idx:] return d class EnvDirective(object): @property def env(self): return self.state.document.settings.env @classmethod def changes(cls, env): return env.temp_data['ChangeLogDirective_changes'] class ChangeLogDirective(EnvDirective, Directive): has_content = True default_section = 'misc' def _organize_by_section(self, changes): compound_sections = [(s, s.split(" ")) for s in self.sections if " " in s] bysection = collections.defaultdict(list) all_sections = set() for rec in changes: if self.version not in rec['versions']: continue inner_tag = rec['tags'].intersection(self.inner_tag_sort) if inner_tag: inner_tag = inner_tag.pop() else: inner_tag = "" for compound, comp_words in compound_sections: if rec['tags'].issuperset(comp_words): bysection[(compound, inner_tag)].append(rec) all_sections.add(compound) break else: intersect = rec['tags'].intersection(self.sections) if intersect: for sec in rec['sorted_tags']: if sec in intersect: bysection[(sec, inner_tag)].append(rec) all_sections.add(sec) break else: bysection[(self.default_section, inner_tag)].append(rec) return bysection, all_sections def _setup_run(self): self.sections = self.env.config.changelog_sections self.inner_tag_sort = self.env.config.changelog_inner_tag_sort + [""] if 'ChangeLogDirective_changes' not in self.env.temp_data: self.env.temp_data['ChangeLogDirective_changes'] = [] self._parsed_content = _parse_content(self.content) self.version = version = self._parsed_content.get('version', '') self.env.temp_data['ChangeLogDirective_version'] = version p = nodes.paragraph('', '',) self.state.nested_parse(self.content[1:], 0, p) def run(self): self._setup_run() if 'ChangeLogDirective_includes' in self.env.temp_data: return [] changes = self.changes(self.env) output = [] id_prefix = "change-%s" % (self.version, ) topsection = self._run_top(id_prefix) output.append(topsection) bysection, all_sections = self._organize_by_section(changes) counter = itertools.count() sections_to_render = [s for s in self.sections if s in all_sections] if not sections_to_render: for cat in self.inner_tag_sort: append_sec = self._append_node() for rec in bysection[(self.default_section, cat)]: rec["id"] = "%s-%s" % (id_prefix, next(counter)) self._render_rec(rec, None, cat, append_sec) if append_sec.children: topsection.append(append_sec) else: for section in sections_to_render + [self.default_section]: sec = nodes.section('', nodes.title(section, section), ids=["%s-%s" % (id_prefix, section.replace(" ", "-"))] ) append_sec = self._append_node() sec.append(append_sec) for cat in self.inner_tag_sort: for rec in bysection[(section, cat)]: rec["id"] = "%s-%s" % (id_prefix, next(counter)) self._render_rec(rec, section, cat, append_sec) if append_sec.children: topsection.append(sec) return output def _append_node(self): return nodes.bullet_list() def _run_top(self, id_prefix): version = self._parsed_content.get('version', '') topsection = nodes.section('', nodes.title(version, version), ids=[id_prefix] ) if self._parsed_content.get("released"): topsection.append(nodes.Text("Released: %s" % self._parsed_content['released'])) else: topsection.append(nodes.Text("no release date")) intro_para = nodes.paragraph('', '') len_ = -1 for len_, text in enumerate(self._parsed_content['text']): if ".. change::" in text: break # if encountered any text elements that didn't start with # ".. change::", those become the intro if len_ > 0: self.state.nested_parse(self._parsed_content['text'][0:len_], 0, intro_para) topsection.append(intro_para) return topsection def _render_rec(self, rec, section, cat, append_sec): para = rec['node'].deepcopy() text = _text_rawsource_from_node(para) to_hash = "%s %s" % (self.version, text[0:100]) targetid = "change-%s" % ( md5.md5(to_hash.encode('ascii', 'ignore') ).hexdigest()) targetnode = nodes.target('', '', ids=[targetid]) para.insert(0, targetnode) permalink = nodes.reference('', '', nodes.Text("(link)", "(link)"), refid=targetid, classes=['changeset-link'] ) para.append(permalink) if len(rec['versions']) > 1: backported_changes = rec['sorted_versions'][rec['sorted_versions'].index(self.version) + 1:] if backported_changes: backported = nodes.paragraph('') backported.append(nodes.Text("This change is also ", "")) backported.append(nodes.strong("", "backported")) backported.append(nodes.Text(" to: %s" % ", ".join(backported_changes), "")) para.append(backported) insert_ticket = nodes.paragraph('') para.append(insert_ticket) i = 0 for collection, render, prefix in ( (rec['tickets'], self.env.config.changelog_render_ticket, "#%s"), (rec['pullreq'], self.env.config.changelog_render_pullreq, "pull request %s"), (rec['changeset'], self.env.config.changelog_render_changeset, "r%s"), ): for refname in collection: if i > 0: insert_ticket.append(nodes.Text(", ", ", ")) else: insert_ticket.append(nodes.Text("References: """)) i += 1 if render is not None: refuri = render % refname node = nodes.reference('', '', nodes.Text(prefix % refname, prefix % refname), refuri=refuri ) else: node = nodes.Text(prefix % refname, prefix % refname) insert_ticket.append(node) if rec['tags']: tag_node = nodes.strong('', " ".join("[%s]" % t for t in [t1 for t1 in [section, cat] if t1 in rec['tags']] + list(rec['tags'].difference([section, cat])) ) + " " ) para.children[0].insert(0, tag_node) append_sec.append( nodes.list_item('', nodes.target('', '', ids=[rec['id']]), para ) ) class ChangeLogImportDirective(EnvDirective, Directive): has_content = True def _setup_run(self): if 'ChangeLogDirective_changes' not in self.env.temp_data: self.env.temp_data['ChangeLogDirective_changes'] = [] def run(self): self._setup_run() # tell ChangeLogDirective we're here, also prevent # nested .. include calls if 'ChangeLogDirective_includes' not in self.env.temp_data: self.env.temp_data['ChangeLogDirective_includes'] = True p = nodes.paragraph('', '',) self.state.nested_parse(self.content, 0, p) del self.env.temp_data['ChangeLogDirective_includes'] return [] class ChangeDirective(EnvDirective, Directive): has_content = True def run(self): content = _parse_content(self.content) p = nodes.paragraph('', '',) sorted_tags = _comma_list(content.get('tags', '')) declared_version = self.env.temp_data['ChangeLogDirective_version'] versions = set(_comma_list(content.get("versions", ""))).difference(['']).\ union([declared_version]) # if we don't refer to any other versions and we're in an include, # skip if len(versions) == 1 and 'ChangeLogDirective_includes' in self.env.temp_data: return [] def int_ver(ver): out = [] for dig in ver.split("."): try: out.append(int(dig)) except ValueError: out.append(0) return tuple(out) rec = { 'tags': set(sorted_tags).difference(['']), 'tickets': set(_comma_list(content.get('tickets', ''))).difference(['']), 'pullreq': set(_comma_list(content.get('pullreq', ''))).difference(['']), 'changeset': set(_comma_list(content.get('changeset', ''))).difference(['']), 'node': p, 'type': "change", "title": content.get("title", None), 'sorted_tags': sorted_tags, "versions": versions, "sorted_versions": list(reversed(sorted(versions, key=int_ver))) } if "declarative" in rec['tags']: rec['tags'].add("orm") self.state.nested_parse(content['text'], 0, p) ChangeLogDirective.changes(self.env).append(rec) return [] def _text_rawsource_from_node(node): src = [] stack = [node] while stack: n = stack.pop(0) if isinstance(n, nodes.Text): src.append(n.rawsource) stack.extend(n.children) return "".join(src) def _rst2sphinx(text): return StringList( [line.strip() for line in textwrap.dedent(text).split("\n")] ) def make_ticket_link(name, rawtext, text, lineno, inliner, options={}, content=[]): env = inliner.document.settings.env render_ticket = env.config.changelog_render_ticket or "%s" prefix = "#%s" if render_ticket: ref = render_ticket % text node = nodes.reference(rawtext, prefix % text, refuri=ref, **options) else: node = nodes.Text(prefix % text, prefix % text) return [node], [] def setup(app): app.add_directive('changelog', ChangeLogDirective) app.add_directive('change', ChangeDirective) app.add_directive('changelog_imports', ChangeLogImportDirective) app.add_config_value("changelog_sections", [], 'env') app.add_config_value("changelog_inner_tag_sort", [], 'env') app.add_config_value("changelog_render_ticket", None, 'env' ) app.add_config_value("changelog_render_pullreq", None, 'env' ) app.add_config_value("changelog_render_changeset", None, 'env' ) app.add_role('ticket', make_ticket_link) SQLAlchemy-0.8.4/doc/build/builder/dialect_info.py0000644000076500000240000001433612251147171022525 0ustar classicstaff00000000000000import re from sphinx.util.compat import Directive from docutils import nodes class DialectDirective(Directive): has_content = True _dialects = {} def _parse_content(self): d = {} d['default'] = self.content[0] d['text'] = [] idx = 0 for line in self.content[1:]: idx += 1 m = re.match(r'\:(.+?)\: +(.+)', line) if m: attrname, value = m.group(1, 2) d[attrname] = value else: break d["text"] = self.content[idx + 1:] return d def _dbapi_node(self): dialect_name, dbapi_name = self.dialect_name.split("+") try: dialect_directive = self._dialects[dialect_name] except KeyError: raise Exception("No .. dialect:: %s directive has been established" % dialect_name) output = [] content = self._parse_content() parent_section_ref = self.state.parent.children[0]['ids'][0] self._append_dbapi_bullet(dialect_name, dbapi_name, content['name'], parent_section_ref) p = nodes.paragraph('', '', nodes.Text( "Support for the %s database via the %s driver." % ( dialect_directive.database_name, content['name'] ), "Support for the %s database via the %s driver." % ( dialect_directive.database_name, content['name'] ) ), ) self.state.nested_parse(content['text'], 0, p) output.append(p) if "url" in content or "driverurl" in content: sec = nodes.section( '', nodes.title("DBAPI", "DBAPI"), ids=["dialect-%s-%s-url" % (dialect_name, dbapi_name)] ) if "url" in content: text = "Documentation and download information (if applicable) "\ "for %s is available at:\n" % content["name"] uri = content['url'] sec.append( nodes.paragraph('', '', nodes.Text(text, text), nodes.reference('', '', nodes.Text(uri, uri), refuri=uri, ) ) ) if "driverurl" in content: text = "Drivers for this database are available at:\n" sec.append( nodes.paragraph('', '', nodes.Text(text, text), nodes.reference('', '', nodes.Text(content['driverurl'], content['driverurl']), refuri=content['driverurl'] ) ) ) output.append(sec) if "connectstring" in content: sec = nodes.section( '', nodes.title("Connecting", "Connecting"), nodes.paragraph('', '', nodes.Text("Connect String:", "Connect String:"), nodes.literal_block(content['connectstring'], content['connectstring']) ), ids=["dialect-%s-%s-connect" % (dialect_name, dbapi_name)] ) output.append(sec) return output def _dialect_node(self): self._dialects[self.dialect_name] = self content = self._parse_content() self.database_name = content['name'] self.bullets = nodes.bullet_list() text = "The following dialect/DBAPI options are available. "\ "Please refer to individual DBAPI sections for connect information." sec = nodes.section('', nodes.paragraph('', '', nodes.Text( "Support for the %s database." % content['name'], "Support for the %s database." % content['name'] ), ), nodes.title("DBAPI Support", "DBAPI Support"), nodes.paragraph('', '', nodes.Text(text, text), self.bullets ), ids=["dialect-%s" % self.dialect_name] ) return [sec] def _append_dbapi_bullet(self, dialect_name, dbapi_name, name, idname): env = self.state.document.settings.env dialect_directive = self._dialects[dialect_name] try: relative_uri = env.app.builder.get_relative_uri(dialect_directive.docname, self.docname) except: relative_uri = "" list_node = nodes.list_item('', nodes.paragraph('', '', nodes.reference('', '', nodes.Text(name, name), refdocname=self.docname, refuri= relative_uri + "#" + idname ), #nodes.Text(" ", " "), #nodes.reference('', '', # nodes.Text("(connectstring)", "(connectstring)"), # refdocname=self.docname, # refuri=env.app.builder.get_relative_uri( # dialect_directive.docname, self.docname) + ## "#" + ("dialect-%s-%s-connect" % # (dialect_name, dbapi_name)) # ) ) ) dialect_directive.bullets.append(list_node) def run(self): env = self.state.document.settings.env self.docname = env.docname self.dialect_name = dialect_name = self.content[0] has_dbapi = "+" in dialect_name if has_dbapi: return self._dbapi_node() else: return self._dialect_node() def setup(app): app.add_directive('dialect', DialectDirective) SQLAlchemy-0.8.4/doc/build/builder/mako.py0000644000076500000240000000444212251147171021031 0ustar classicstaff00000000000000from __future__ import absolute_import from sphinx.application import TemplateBridge from sphinx.jinja2glue import BuiltinTemplateLoader from mako.lookup import TemplateLookup import os rtd = os.environ.get('READTHEDOCS', None) == 'True' class MakoBridge(TemplateBridge): def init(self, builder, *args, **kw): self.jinja2_fallback = BuiltinTemplateLoader() self.jinja2_fallback.init(builder, *args, **kw) builder.config.html_context['release_date'] = builder.config['release_date'] builder.config.html_context['site_base'] = builder.config['site_base'] self.lookup = TemplateLookup(directories=builder.config.templates_path, #format_exceptions=True, imports=[ "from builder import util" ] ) if rtd: # RTD layout, imported from sqlalchemy.org import urllib2 template = urllib2.urlopen(builder.config['site_base'] + "/docs_adapter.mako").read() self.lookup.put_string("docs_adapter.mako", template) setup_ctx = urllib2.urlopen(builder.config['site_base'] + "/docs_adapter.py").read() lcls = {} exec(setup_ctx, lcls) self.setup_ctx = lcls['setup_context'] def setup_ctx(self, context): pass def render(self, template, context): template = template.replace(".html", ".mako") context['prevtopic'] = context.pop('prev', None) context['nexttopic'] = context.pop('next', None) # local docs layout context['rtd'] = False context['toolbar'] = False context['base'] = "static_base.mako" # override context attributes self.setup_ctx(context) context.setdefault('_', lambda x: x) return self.lookup.get_template(template).render_unicode(**context) def render_string(self, template, context): # this is used for .js, .css etc. and we don't have # local copies of that stuff here so use the jinja render. return self.jinja2_fallback.render_string(template, context) def setup(app): app.config['template_bridge'] = "builder.mako.MakoBridge" app.add_config_value('release_date', "", 'env') app.add_config_value('site_base', "", 'env') app.add_config_value('build_number', "", 'env') SQLAlchemy-0.8.4/doc/build/builder/sqlformatter.py0000644000076500000240000001054112251147171022622 0ustar classicstaff00000000000000from pygments.lexer import RegexLexer, bygroups, using from pygments.token import Token from pygments.filter import Filter from pygments.filter import apply_filters from pygments.lexers import PythonLexer, PythonConsoleLexer from sphinx.highlighting import PygmentsBridge from pygments.formatters import HtmlFormatter, LatexFormatter import re def _strip_trailing_whitespace(iter_): buf = list(iter_) if buf: buf[-1] = (buf[-1][0], buf[-1][1].rstrip()) for t, v in buf: yield t, v class StripDocTestFilter(Filter): def filter(self, lexer, stream): for ttype, value in stream: if ttype is Token.Comment and re.match(r'#\s*doctest:', value): continue yield ttype, value class PyConWithSQLLexer(RegexLexer): name = 'PyCon+SQL' aliases = ['pycon+sql'] flags = re.IGNORECASE | re.DOTALL tokens = { 'root': [ (r'{sql}', Token.Sql.Link, 'sqlpopup'), (r'{opensql}', Token.Sql.Open, 'opensqlpopup'), (r'.*?\n', using(PythonConsoleLexer)) ], 'sqlpopup': [ ( r'(.*?\n)((?:PRAGMA|BEGIN|SELECT|INSERT|DELETE|ROLLBACK|' 'COMMIT|ALTER|UPDATE|CREATE|DROP|PRAGMA' '|DESCRIBE).*?(?:{stop}\n?|$))', bygroups(using(PythonConsoleLexer), Token.Sql.Popup), "#pop" ) ], 'opensqlpopup': [ ( r'.*?(?:{stop}\n*|$)', Token.Sql, "#pop" ) ] } class PythonWithSQLLexer(RegexLexer): name = 'Python+SQL' aliases = ['pycon+sql'] flags = re.IGNORECASE | re.DOTALL tokens = { 'root': [ (r'{sql}', Token.Sql.Link, 'sqlpopup'), (r'{opensql}', Token.Sql.Open, 'opensqlpopup'), (r'.*?\n', using(PythonLexer)) ], 'sqlpopup': [ ( r'(.*?\n)((?:PRAGMA|BEGIN|SELECT|INSERT|DELETE|ROLLBACK' '|COMMIT|ALTER|UPDATE|CREATE|DROP' '|PRAGMA|DESCRIBE).*?(?:{stop}\n?|$))', bygroups(using(PythonLexer), Token.Sql.Popup), "#pop" ) ], 'opensqlpopup': [ ( r'.*?(?:{stop}\n*|$)', Token.Sql, "#pop" ) ] } class PopupSQLFormatter(HtmlFormatter): def _format_lines(self, tokensource): buf = [] for ttype, value in apply_filters(tokensource, [StripDocTestFilter()]): if ttype in Token.Sql: for t, v in HtmlFormatter._format_lines(self, iter(buf)): yield t, v buf = [] if ttype is Token.Sql: yield 1, "
      %s
      " % \ re.sub(r'(?:[{stop}|\n]*)$', '', value) elif ttype is Token.Sql.Link: yield 1, "sql" elif ttype is Token.Sql.Popup: yield 1, "" % \ re.sub(r'(?:[{stop}|\n]*)$', '', value) else: buf.append((ttype, value)) for t, v in _strip_trailing_whitespace( HtmlFormatter._format_lines(self, iter(buf))): yield t, v class PopupLatexFormatter(LatexFormatter): def _filter_tokens(self, tokensource): for ttype, value in apply_filters(tokensource, [StripDocTestFilter()]): if ttype in Token.Sql: if ttype is not Token.Sql.Link and ttype is not Token.Sql.Open: yield Token.Literal, re.sub(r'{stop}', '', value) else: continue else: yield ttype, value def format(self, tokensource, outfile): LatexFormatter.format(self, self._filter_tokens(tokensource), outfile) def setup(app): app.add_lexer('pycon+sql', PyConWithSQLLexer()) app.add_lexer('python+sql', PythonWithSQLLexer()) PygmentsBridge.html_formatter = PopupSQLFormatter PygmentsBridge.latex_formatter = PopupLatexFormatter SQLAlchemy-0.8.4/doc/build/builder/util.py0000644000076500000240000000044412251147171021055 0ustar classicstaff00000000000000import re def striptags(text): return re.compile(r'<[^>]*>').sub('', text) def go(m): # .html with no anchor if present, otherwise "#" for top of page return m.group(1) or '#' def strip_toplevel_anchors(text): return re.compile(r'(\.html)?#[-\w]+-toplevel').sub(go, text) SQLAlchemy-0.8.4/doc/build/changelog/0000755000076500000240000000000012251151573020026 5ustar classicstaff00000000000000SQLAlchemy-0.8.4/doc/build/changelog/changelog_01.rst0000644000076500000240000006444312251147171023021 0ustar classicstaff00000000000000 ============== 0.1 Changelog ============== .. changelog:: :version: 0.1.7 :released: Fri May 05 2006 .. change:: :tags: :tickets: some fixes to topological sort algorithm .. change:: :tags: :tickets: added DISTINCT ON support to Postgres (just supply distinct=[col1,col2..]) .. change:: :tags: :tickets: added __mod__ (% operator) to sql expressions .. change:: :tags: :tickets: "order_by" mapper property inherited from inheriting mapper .. change:: :tags: :tickets: fix to column type used when mapper UPDATES/DELETEs .. change:: :tags: :tickets: with convert_unicode=True, reflection was failing, has been fixed .. change:: :tags: :tickets: types types types! still werent working....have to use TypeDecorator again :( .. change:: :tags: :tickets: mysql binary type converts array output to buffer, fixes PickleType .. change:: :tags: :tickets: fixed the attributes.py memory leak once and for all .. change:: :tags: :tickets: unittests are qualified based on the databases that support each one .. change:: :tags: :tickets: fixed bug where column defaults would clobber VALUES clause of insert objects .. change:: :tags: :tickets: fixed bug where table def w/ schema name would force engine connection .. change:: :tags: :tickets: fix for parenthesis to work correctly with subqueries in INSERT/UPDATE .. change:: :tags: :tickets: HistoryArraySet gets extend() method .. change:: :tags: :tickets: fixed lazyload support for other comparison operators besides = .. change:: :tags: :tickets: lazyload fix where two comparisons in the join condition point to the samem column .. change:: :tags: :tickets: added "construct_new" flag to mapper, will use __new__ to create instances instead of __init__ (standard in 0.2) .. change:: :tags: :tickets: added selectresults.py to SVN, missed it last time .. change:: :tags: :tickets: tweak to allow a many-to-many relationship from a table to itself via an association table .. change:: :tags: :tickets: small fix to "translate_row" function used by polymorphic example .. change:: :tags: :tickets: create_engine uses cgi.parse_qsl to read query string (out the window in 0.2) .. change:: :tags: :tickets: tweaks to CAST operator .. change:: :tags: :tickets: fixed function names LOCAL_TIME/LOCAL_TIMESTAMP -> LOCALTIME/LOCALTIMESTAMP .. change:: :tags: :tickets: fixed order of ORDER BY/HAVING in compile .. changelog:: :version: 0.1.6 :released: Wed Apr 12 2006 .. change:: :tags: :tickets: support for MS-SQL added courtesy Rick Morrison, Runar Petursson .. change:: :tags: :tickets: the latest SQLSoup from J. Ellis .. change:: :tags: :tickets: ActiveMapper has preliminary support for inheritance (Jeff Watkins) .. change:: :tags: :tickets: added a "mods" system which allows pluggable modules that modify/augment core functionality, using the function "install_mods(\*modnames)". .. change:: :tags: :tickets: added the first "mod", SelectResults, which modifies mapper selects to return generators that turn ranges into LIMIT/OFFSET queries (Jonas Borgstr? .. change:: :tags: :tickets: factored out querying capabilities of Mapper into a separate Query object which is Session-centric. this improves the performance of mapper.using(session) and makes other things possible. .. change:: :tags: :tickets: objectstore/Session refactored, the official way to save objects is now via the flush() method. The begin/commit functionality of Session is factored into LegacySession which is still established as the default behavior, until the 0.2 series. .. change:: :tags: :tickets: types system is bound to an engine at query compile time, not schema construction time. this simplifies the types system as well as the ProxyEngine. .. change:: :tags: :tickets: added 'version_id' keyword argument to mapper. this keyword should reference a Column object with type Integer, preferably non-nullable, which will be used on the mapped table to track version numbers. this number is incremented on each save operation and is specifed in the UPDATE/DELETE conditions so that it factors into the returned row count, which results in a ConcurrencyError if the value received is not the expected count. .. change:: :tags: :tickets: added 'entity_name' keyword argument to mapper. a mapper is now associated with a class via the class object as well as an optional entity_name parameter, which is a string defaulting to None. any number of primary mappers can be created for a class, qualified by the entity name. instances of those classes will issue all of their load and save operations through their entity_name-qualified mapper, and maintain separate a identity in the identity map for an otherwise equilvalent object. .. change:: :tags: :tickets: overhaul to the attributes system. code has been clarified, and also fixed to support proper polymorphic behavior on object attributes. .. change:: :tags: :tickets: added "for_update" flag to Select objects .. change:: :tags: :tickets: some fixes for backrefs .. change:: :tags: :tickets: fix for postgres1 DateTime type .. change:: :tags: :tickets: documentation pages mostly switched over to Markdown syntax .. changelog:: :version: 0.1.5 :released: Mon Mar 27 2006 .. change:: :tags: :tickets: added SQLSession concept to SQLEngine. this object keeps track of retrieving a connection from the connection pool as well as an in-progress transaction. methods push_session() and pop_session() added to SQLEngine which push/pop a new SQLSession onto the engine, allowing operation upon a second connection "nested" within the previous one, allowing nested transactions. Other tricks are sure to come later regarding SQLSession. .. change:: :tags: :tickets: added nest_on argument to objectstore.Session. This is a single SQLEngine or list of engines for which push_session()/pop_session() will be called each time this Session becomes the active session (via objectstore.push_session() or equivalent). This allows a unit of work Session to take advantage of the nested transaction feature without explicitly calling push_session/pop_session on the engine. .. change:: :tags: :tickets: factored apart objectstore/unitofwork to separate "Session scoping" from "uow commit heavy lifting" .. change:: :tags: :tickets: added populate_instance() method to MapperExtension. allows an extension to modify the population of object attributes. this method can call the populate_instance() method on another mapper to proxy the attribute population from one mapper to another; some row translation logic is also built in to help with this. .. change:: :tags: :tickets: fixed Oracle8-compatibility "use_ansi" flag which converts JOINs to comparisons with the = and (+) operators, passes basic unittests .. change:: :tags: :tickets: tweaks to Oracle LIMIT/OFFSET support .. change:: :tags: :tickets: Oracle reflection uses ALL_** views instead of USER_** to get larger list of stuff to reflect from .. change:: :tags: :tickets: 105 fixes to Oracle foreign key reflection .. change:: :tags: :tickets: objectstore.commit(obj1, obj2,...) adds an extra step to seek out private relations on properties and delete child objects, even though its not a global commit .. change:: :tags: :tickets: lots and lots of fixes to mappers which use inheritance, strengthened the concept of relations on a mapper being made towards the "local" table for that mapper, not the tables it inherits. allows more complex compositional patterns to work with lazy/eager loading. .. change:: :tags: :tickets: added support for mappers to inherit from others based on the same table, just specify the same table as that of both parent/child mapper. .. change:: :tags: :tickets: some minor speed improvements to the attributes system with regards to instantiating and populating new objects. .. change:: :tags: :tickets: fixed MySQL binary unit test .. change:: :tags: :tickets: INSERTs can receive clause elements as VALUES arguments, not just literal values .. change:: :tags: :tickets: support for calling multi-tokened functions, i.e. schema.mypkg.func() .. change:: :tags: :tickets: added J. Ellis' SQLSoup module to extensions package .. change:: :tags: :tickets: added "polymorphic" examples illustrating methods to load multiple object types from one mapper, the second of which uses the new populate_instance() method. small improvements to mapper, UNION construct to help the examples along .. change:: :tags: :tickets: improvements/fixes to session.refresh()/session.expire() (which may have been called "invalidate" earlier..) .. change:: :tags: :tickets: added session.expunge() which totally removes an object from the current session .. change:: :tags: :tickets: added \*args, \**kwargs pass-thru to engine.transaction(func) allowing easier creation of transactionalizing decorator functions .. change:: :tags: :tickets: added iterator interface to ResultProxy: "for row in result:..." .. change:: :tags: :tickets: added assertion to tx = session.begin(); tx.rollback(); tx.begin(), i.e. cant use it after a rollback() .. change:: :tags: :tickets: added date conversion on bind parameter fix to SQLite enabling dates to work with pysqlite1 .. change:: :tags: :tickets: 116 improvements to subqueries to more intelligently construct their FROM clauses .. change:: :tags: :tickets: added PickleType to types. .. change:: :tags: :tickets: fixed two bugs with column labels with regards to bind parameters: bind param keynames they are now generated from a column "label" in all relevant cases to take advantage of excess-name-length rules, and checks for a peculiar collision against a column named the same as "tablename_colname" added .. change:: :tags: :tickets: major overhaul to unit of work documentation, other documentation sections. .. change:: :tags: :tickets: fixed attributes bug where if an object is committed, its lazy-loaded list got blown away if it hadnt been loaded .. change:: :tags: :tickets: added unique_connection() method to engine, connection pool to return a connection that is not part of the thread-local context or any current transaction .. change:: :tags: :tickets: added invalidate() function to pooled connection. will remove the connection from the pool. still need work for engines to auto-reconnect to a stale DB though. .. change:: :tags: :tickets: added distinct() function to column elements so you can do func.count(mycol.distinct()) .. change:: :tags: :tickets: added "always_refresh" flag to Mapper, creates a mapper that will always refresh the attributes of objects it gets/selects from the DB, overwriting any changes made. .. changelog:: :version: 0.1.4 :released: Mon Mar 13 2006 .. change:: :tags: :tickets: create_engine() now uses genericized parameters; host/hostname, db/dbname/database, password/passwd, etc. for all engine connections. makes engine URIs much more "universal" .. change:: :tags: :tickets: added support for SELECT statements embedded into a column clause, using the flag "scalar=True" .. change:: :tags: :tickets: another overhaul to EagerLoading when used in conjunction with mappers that inherit; improvements to eager loads figuring out their aliased queries correctly, also relations set up against a mapper with inherited mappers will create joins against the table that is specific to the mapper itself (i.e. and not any tables that are inherited/are further down the inheritance chain), this can be overridden by using custom primary/secondary joins. .. change:: :tags: :tickets: added J.Ellis patch to mapper.py so that selectone() throws an exception if query returns more than one object row, selectfirst() to not throw the exception. also adds selectfirst_by (synonymous with get_by) and selectone_by .. change:: :tags: :tickets: added onupdate parameter to Column, will exec SQL/python upon an update statement.Also adds "for_update=True" to all DefaultGenerator subclasses .. change:: :tags: :tickets: added support for Oracle table reflection contributed by Andrija Zaric; still some bugs to work out regarding composite primary keys/dictionary selection .. change:: :tags: :tickets: checked in an initial Firebird module, awaiting testing. .. change:: :tags: :tickets: added sql.ClauseParameters dictionary object as the result for compiled.get_params(), does late-typeprocessing of bind parameters so that the original values are easier to access .. change:: :tags: :tickets: more docs for indexes, column defaults, connection pooling, engine construction .. change:: :tags: :tickets: overhaul to the construction of the types system. uses a simpler inheritance pattern so that any of the generic types can be easily subclassed, with no need for TypeDecorator. .. change:: :tags: :tickets: added "convert_unicode=False" parameter to SQLEngine, will cause all String types to perform unicode encoding/decoding (makes Strings act like Unicodes) .. change:: :tags: :tickets: added 'encoding="utf8"' parameter to engine. the given encoding will be used for all encode/decode calls within Unicode types as well as Strings when convert_unicode=True. .. change:: :tags: :tickets: improved support for mapping against UNIONs, added polymorph.py example to illustrate multi-class mapping against a UNION .. change:: :tags: :tickets: fix to SQLite LIMIT/OFFSET syntax .. change:: :tags: :tickets: fix to Oracle LIMIT syntax .. change:: :tags: :tickets: added backref() function, allows backreferences to have keyword arguments that will be passed to the backref. .. change:: :tags: :tickets: Sequences and ColumnDefault objects can do execute()/scalar() standalone .. change:: :tags: :tickets: SQL functions (i.e. func.foo()) can do execute()/scalar() standalone .. change:: :tags: :tickets: fix to SQL functions so that the ANSI-standard functions, i.e. current_timestamp etc., do not specify parenthesis. all other functions do. .. change:: :tags: :tickets: added settattr_clean and append_clean to SmartProperty, which set attributes without triggering a "dirty" event or any history. used as: myclass.prop1.setattr_clean(myobject, 'hi') .. change:: :tags: :tickets: improved support to column defaults when used by mappers; mappers will pull pre-executed defaults from statement's executed bind parameters (pre-conversion) to populate them into a saved object's attributes; if any PassiveDefaults have fired off, will instead post-fetch the row from the DB to populate the object. .. change:: :tags: :tickets: added 'get_session().invalidate(\*obj)' method to objectstore, instances will refresh() themselves upon the next attribute access. .. change:: :tags: :tickets: improvements to SQL func calls including an "engine" keyword argument so they can be execute()d or scalar()ed standalone, also added func accessor to SQLEngine .. change:: :tags: :tickets: fix to MySQL4 custom table engines, i.e. TYPE instead of ENGINE .. change:: :tags: :tickets: slightly enhanced logging, includes timestamps and a somewhat configurable formatting system, in lieu of a full-blown logging system .. change:: :tags: :tickets: improvements to the ActiveMapper class from the TG gang, including many-to-many relationships .. change:: :tags: :tickets: added Double and TinyInt support to mysql .. changelog:: :version: 0.1.3 :released: Thu Mar 02 2006 .. change:: :tags: :tickets: completed "post_update" feature, will add a second update statement before inserts and after deletes in order to reconcile a relationship without any dependencies being created; used when persisting two rows that are dependent on each other .. change:: :tags: :tickets: completed mapper.using(session) function, localized per-object Session functionality; objects can be declared and manipulated as local to any user-defined Session .. change:: :tags: :tickets: fix to Oracle "row_number over" clause with multiple tables .. change:: :tags: :tickets: mapper.get() was not selecting multiple-keyed objects if the mapper's table was a join, such as in an inheritance relationship, this is fixed. .. change:: :tags: :tickets: overhaul to sql/schema packages so that the sql package can run all on its own, producing selects, inserts, etc. without any engine dependencies. builds upon new TableClause/ColumnClause lexical objects. Schema's Table/Column objects are the "physical" subclasses of them. simplifies schema/sql relationship, extensions (like proxyengine), and speeds overall performance by a large margin. removes the entire getattr() behavior that plagued 0.1.1. .. change:: :tags: :tickets: refactoring of how the mapper "synchronizes" data between two objects into a separate module, works better with properties attached to a mapper that has an additional inheritance relationship to one of the related tables, also the same methodology used to synchronize parent/child objects now used by mapper to synchronize between inherited and inheriting mappers. .. change:: :tags: :tickets: made objectstore "check for out-of-identitymap" more aggressive, will perform the check when object attributes are modified or the object is deleted .. change:: :tags: :tickets: Index object fully implemented, can be constructed standalone, or via "index" and "unique" arguments on Columns. .. change:: :tags: :tickets: added "convert_unicode" flag to SQLEngine, will treat all String/CHAR types as Unicode types, with raw-byte/utf-8 translation on the bind parameter and result set side. .. change:: :tags: :tickets: postgres maintains a list of ANSI functions that must have no parenthesis so function calls with no arguments work consistently .. change:: :tags: :tickets: tables can be created with no engine specified. this will default their engine to a module-scoped "default engine" which is a ProxyEngine. this engine can be connected via the function "global_connect". .. change:: :tags: :tickets: added "refresh(\*obj)" method to objectstore / Session to reload the attributes of any set of objects from the database unconditionally .. changelog:: :version: 0.1.2 :released: Fri Feb 24 2006 .. change:: :tags: :tickets: fixed a recursive call in schema that was somehow running 994 times then returning normally. broke nothing, slowed down everything. thanks to jpellerin for finding this. .. changelog:: :version: 0.1.1 :released: Thu Feb 23 2006 .. change:: :tags: :tickets: small fix to Function class so that expressions with a func.foo() use the type of the Function object (i.e. the left side) as the type of the boolean expression, not the other side which is more of a moving target (changeset 1020). .. change:: :tags: :tickets: creating self-referring mappers with backrefs slightly easier (but still not that easy - changeset 1019) .. change:: :tags: :tickets: fixes to one-to-one mappings (changeset 1015) .. change:: :tags: :tickets: psycopg1 date/time issue with None fixed (changeset 1005) .. change:: :tags: :tickets: two issues related to postgres, which doesnt want to give you the "lastrowid" since oids are deprecated: * postgres database-side defaults that are on primary key cols *do* execute explicitly beforehand, even though thats not the idea of a PassiveDefault. this is because sequences on columns get reflected as PassiveDefaults, but need to be explicitly executed on a primary key col so we know what we just inserted. * if you did add a row that has a bunch of database-side defaults on it, and the PassiveDefault thing was working the old way, i.e. they just execute on the DB side, the "cant get the row back without an OID" exception that occurred also will not happen unless someone (usually the ORM) explicitly asks for it. .. change:: :tags: :tickets: fixed a glitch with engine.execute_compiled where it was making a second ResultProxy that just got thrown away. .. change:: :tags: :tickets: began to implement newer logic in object properities. you can now say myclass.attr.property, which will give you the PropertyLoader corresponding to that attribute, i.e. myclass.mapper.props['attr'] .. change:: :tags: :tickets: eager loading has been internally overhauled to use aliases at all times. more complicated chains of eager loads can now be created without any need for explicit "use aliases"-type instructions. EagerLoader code is also much simpler now. .. change:: :tags: :tickets: a new somewhat experimental flag "use_update" added to relations, indicates that this relationship should be handled by a second UPDATE statement, either after a primary INSERT or before a primary DELETE. handles circular row dependencies. .. change:: :tags: :tickets: added exceptions module, all raised exceptions (except for some KeyError/AttributeError exceptions) descend from these classes. .. change:: :tags: :tickets: fix to date types with MySQL, returned timedelta converted to datetime.time .. change:: :tags: :tickets: two-phase objectstore.commit operations (i.e. begin/commit) now return a transactional object (SessionTrans), to more clearly indicate transaction boundaries. .. change:: :tags: :tickets: Index object with create/drop support added to schema .. change:: :tags: :tickets: fix to postgres, where it will explicitly pre-execute a PassiveDefault on a table if it is a primary key column, pursuant to the ongoing "we cant get inserted rows back from postgres" issue .. change:: :tags: :tickets: change to information_schema query that gets back postgres table defs, now uses explicit JOIN keyword, since one user had faster performance with 8.1 .. change:: :tags: :tickets: fix to engine.process_defaults so it works correctly with a table that has different column name/column keys (changset 982) .. change:: :tags: :tickets: a column can only be attached to one table - this is now asserted .. change:: :tags: :tickets: postgres time types descend from Time type .. change:: :tags: :tickets: fix to alltests so that it runs types test (now named testtypes) .. change:: :tags: :tickets: fix to Join object so that it correctly exports its foreign keys (cs 973) .. change:: :tags: :tickets: creating relationships against mappers that use inheritance fixed (cs 973) SQLAlchemy-0.8.4/doc/build/changelog/changelog_02.rst0000644000076500000240000007516112251150015023010 0ustar classicstaff00000000000000 ============== 0.2 Changelog ============== .. changelog:: :version: 0.2.8 :released: Tue Sep 05 2006 .. change:: :tags: :tickets: cleanup on connection methods + documentation. custom DBAPI arguments specified in query string, 'connect_args' argument to 'create_engine', or custom creation function via 'creator' function to 'create_engine'. .. change:: :tags: :tickets: 274 added "recycle" argument to Pool, is "pool_recycle" on create_engine, defaults to 3600 seconds; connections after this age will be closed and replaced with a new one, to handle db's that automatically close stale connections .. change:: :tags: :tickets: 121 changed "invalidate" semantics with pooled connection; will instruct the underlying connection record to reconnect the next time its called. "invalidate" will also automatically be called if any error is thrown in the underlying call to connection.cursor(). this will hopefully allow the connection pool to reconnect to a database that had been stopped and started without restarting the connecting application .. change:: :tags: :tickets: eesh ! the tutorial doctest was broken for quite some time. .. change:: :tags: :tickets: add_property() method on mapper does a "compile all mappers" step in case the given property references a non-compiled mapper (as it did in the case of the tutorial !) .. change:: :tags: :tickets: 277 check for pg sequence already existing before create .. change:: :tags: :tickets: if a contextual session is established via MapperExtension.get_session (as it is using the sessioncontext plugin, etc), a lazy load operation will use that session by default if the parent object is not persistent with a session already. .. change:: :tags: :tickets: lazy loads will not fire off for an object that does not have a database identity (why? see http://www.sqlalchemy.org/trac/wiki/WhyDontForeignKeysLoadData) .. change:: :tags: :tickets: unit-of-work does a better check for "orphaned" objects that are part of a "delete-orphan" cascade, for certain conditions where the parent isnt available to cascade from. .. change:: :tags: :tickets: mappers can tell if one of their objects is an "orphan" based on interactions with the attribute package. this check is based on a status flag maintained for each relationship when objects are attached and detached from each other. .. change:: :tags: :tickets: it is now invalid to declare a self-referential relationship with "delete-orphan" (as the abovementioned check would make them impossible to save) .. change:: :tags: :tickets: improved the check for objects being part of a session when the unit of work seeks to flush() them as part of a relationship.. .. change:: :tags: :tickets: 280 statement execution supports using the same BindParam object more than once in an expression; simplified handling of positional parameters. nice job by Bill Noon figuring out the basic idea. .. change:: :tags: :tickets: 60, 71 postgres reflection moved to use pg_schema tables, can be overridden with use_information_schema=True argument to create_engine. .. change:: :tags: :tickets: 155 added case_sensitive argument to MetaData, Table, Column, determines itself automatically based on if a parent schemaitem has a non-None setting for the flag, or if not, then whether the identifier name is all lower case or not. when set to True, quoting is applied to identifiers with mixed or uppercase identifiers. quoting is also applied automatically in all cases to identifiers that are known to be reserved words or contain other non-standard characters. various database dialects can override all of this behavior, but currently they are all using the default behavior. tested with postgres, mysql, sqlite, oracle. needs more testing with firebird, ms-sql. part of the ongoing work with .. change:: :tags: :tickets: unit tests updated to run without any pysqlite installed; pool test uses a mock DBAPI .. change:: :tags: :tickets: 281 urls support escaped characters in passwords .. change:: :tags: :tickets: added limit/offset to UNION queries (though not yet in oracle) .. change:: :tags: :tickets: added "timezone=True" flag to DateTime and Time types. postgres so far will convert this to "TIME[STAMP] (WITH|WITHOUT) TIME ZONE", so that control over timezone presence is more controllable (psycopg2 returns datetimes with tzinfo's if available, which can create confusion against datetimes that dont). .. change:: :tags: :tickets: 287 fix to using query.count() with distinct, \**kwargs with SelectResults count() .. change:: :tags: :tickets: 289 deregister Table from MetaData when autoload fails; .. change:: :tags: :tickets: 293 import of py2.5s sqlite3 .. change:: :tags: :tickets: 296 unicode fix for startswith()/endswith() .. changelog:: :version: 0.2.7 :released: Sat Aug 12 2006 .. change:: :tags: :tickets: quoting facilities set up so that database-specific quoting can be turned on for individual table, schema, and column identifiers when used in all queries/creates/drops. Enabled via "quote=True" in Table or Column, as well as "quote_schema=True" in Table. Thanks to Aaron Spike for the excellent efforts. .. change:: :tags: :tickets: assignmapper was setting is_primary=True, causing all sorts of mayhem by not raising an error when redundant mappers were set up, fixed .. change:: :tags: :tickets: added allow_null_pks option to Mapper, allows rows where some primary key columns are null (i.e. when mapping to outer joins etc) .. change:: :tags: :tickets: modifcation to unitofwork to not maintain ordering within the "new" list or within the UOWTask "objects" list; instead, new objects are tagged with an ordering identifier as they are registered as new with the session, and the INSERT statements are then sorted within the mapper save_obj. the INSERT ordering has basically been pushed all the way to the end of the flush cycle. that way the various sorts and organizations occuring within UOWTask (particularly the circular task sort) dont have to worry about maintaining order (which they werent anyway) .. change:: :tags: :tickets: fixed reflection of foreign keys to autoload the referenced table if it was not loaded already .. change:: :tags: :tickets: 256 - pass URL query string arguments to connect() function .. change:: :tags: :tickets: 257 - oracle boolean type .. change:: :tags: :tickets: custom primary/secondary join conditions in a relation *will* be propagated to backrefs by default. specifying a backref() will override this behavior. .. change:: :tags: :tickets: better check for ambiguous join conditions in sql.Join; propagates to a better error message in PropertyLoader (i.e. relation()/backref()) for when the join condition can't be reasonably determined. .. change:: :tags: :tickets: sqlite creates ForeignKeyConstraint objects properly upon table reflection. .. change:: :tags: :tickets: 224 adjustments to pool stemming from changes made for. overflow counter should only be decremented if the connection actually succeeded. added a test script to attempt testing this. .. change:: :tags: :tickets: fixed mysql reflection of default values to be PassiveDefault .. change:: :tags: :tickets: 263, 264 added reflected 'tinyint', 'mediumint' type to MS-SQL. .. change:: :tags: :tickets: SingletonThreadPool has a size and does a cleanup pass, so that only a given number of thread-local connections stay around (needed for sqlite applications that dispose of threads en masse) .. change:: :tags: :tickets: 267, 265 fixed small pickle bug(s) with lazy loaders .. change:: :tags: :tickets: fixed possible error in mysql reflection where certain versions return an array instead of string for SHOW CREATE TABLE call .. change:: :tags: :tickets: 1770 fix to lazy loads when mapping to joins .. change:: :tags: :tickets: all create()/drop() calls have a keyword argument of "connectable". "engine" is deprecated. .. change:: :tags: :tickets: fixed ms-sql connect() to work with adodbapi .. change:: :tags: :tickets: added "nowait" flag to Select() .. change:: :tags: :tickets: 271 inheritance check uses issubclass() instead of direct __mro__ check to make sure class A inherits from B, allowing mapper inheritance to more flexibly correspond to class inheritance .. change:: :tags: :tickets: 252 SelectResults will use a subselect, when calling an aggregate (i.e. max, min, etc.) on a SelectResults that has an ORDER BY clause .. change:: :tags: :tickets: 269 fixes to types so that database-specific types more easily used; fixes to mysql text types to work with this methodology .. change:: :tags: :tickets: some fixes to sqlite date type organization .. change:: :tags: :tickets: 263 added MSTinyInteger to MS-SQL .. changelog:: :version: 0.2.6 :released: Thu Jul 20 2006 .. change:: :tags: :tickets: 76 big overhaul to schema to allow truly composite primary and foreign key constraints, via new ForeignKeyConstraint and PrimaryKeyConstraint objects. Existing methods of primary/foreign key creation have not been changed but use these new objects behind the scenes. table creation and reflection is now more table oriented rather than column oriented. .. change:: :tags: :tickets: overhaul to MapperExtension calling scheme, wasnt working very well previously .. change:: :tags: :tickets: tweaks to ActiveMapper, supports self-referential relationships .. change:: :tags: :tickets: slight rearrangement to objectstore (in activemapper/threadlocal) so that the SessionContext is referenced by '.context' instead of subclassed directly. .. change:: :tags: :tickets: activemapper will use threadlocal's objectstore if the mod is activated when activemapper is imported .. change:: :tags: :tickets: small fix to URL regexp to allow filenames with '@' in them .. change:: :tags: :tickets: fixes to Session expunge/update/etc...needs more cleanup. .. change:: :tags: :tickets: select_table mappers *still* werent always compiling .. change:: :tags: :tickets: fixed up Boolean datatype .. change:: :tags: :tickets: added count()/count_by() to list of methods proxied by assignmapper; this also adds them to activemapper .. change:: :tags: :tickets: connection exceptions wrapped in DBAPIError .. change:: :tags: :tickets: ActiveMapper now supports autoloading column definitions from the database if you supply a __autoload__ = True attribute in your mapping inner-class. Currently this does not support reflecting any relationships. .. change:: :tags: :tickets: deferred column load could screw up the connection status in a flush() under some circumstances, this was fixed .. change:: :tags: :tickets: expunge() was not working with cascade, fixed. .. change:: :tags: :tickets: potential endless loop in cascading operations fixed. .. change:: :tags: :tickets: added "synonym()" function, applied to properties to have a propname the same as another, for the purposes of overriding props and allowing the original propname to be accessible in select_by(). .. change:: :tags: :tickets: fix to typing in clause construction which specifically helps type issues with polymorphic_union (CAST/ColumnClause propagates its type to proxy columns) .. change:: :tags: :tickets: mapper compilation work ongoing, someday it'll work....moved around the initialization of MapperProperty objects to be after all mappers are created to better handle circular compilations. do_init() method is called on all properties now which are more aware of their "inherited" status if so. .. change:: :tags: :tickets: eager loads explicitly disallowed on self-referential relationships, or relationships to an inheriting mapper (which is also self-referential) .. change:: :tags: :tickets: 244 reduced bind param size in query._get to appease the picky oracle .. change:: :tags: :tickets: 234 added 'checkfirst' argument to table.create()/table.drop(), as well as table.exists() .. change:: :tags: :tickets: 245 some other ongoing fixes to inheritance .. change:: :tags: :tickets: attribute/backref/orphan/history-tracking tweaks as usual... .. changelog:: :version: 0.2.5 :released: Sat Jul 08 2006 .. change:: :tags: :tickets: fixed endless loop bug in select_by(), if the traversal hit two mappers that referenced each other .. change:: :tags: :tickets: upgraded all unittests to insert './lib/' into sys.path, working around new setuptools PYTHONPATH-killing behavior .. change:: :tags: :tickets: further fixes with attributes/dependencies/etc.... .. change:: :tags: :tickets: improved error handling for when DynamicMetaData is not connected .. change:: :tags: :tickets: MS-SQL support largely working (tested with pymssql) .. change:: :tags: :tickets: ordering of UPDATE and DELETE statements within groups is now in order of primary key values, for more deterministic ordering .. change:: :tags: :tickets: after_insert/delete/update mapper extensions now called per object, not per-object-per-table .. change:: :tags: :tickets: further fixes/refactorings to mapper compilation .. changelog:: :version: 0.2.4 :released: Tue Jun 27 2006 .. change:: :tags: :tickets: try/except when the mapper sets init.__name__ on a mapped class, supports python 2.3 .. change:: :tags: :tickets: fixed bug where threadlocal engine would still autocommit despite a transaction in progress .. change:: :tags: :tickets: lazy load and deferred load operations require the parent object to be in a Session to do the operation; whereas before the operation would just return a blank list or None, it now raises an exception. .. change:: :tags: :tickets: Session.update() is slightly more lenient if the session to which the given object was formerly attached to was garbage collected; otherwise still requires you explicitly remove the instance from the previous Session. .. change:: :tags: :tickets: fixes to mapper compilation, checking for more error conditions .. change:: :tags: :tickets: small fix to eager loading combined with ordering/limit/offset .. change:: :tags: :tickets: 206 utterly remarkable: added a single space between 'CREATE TABLE' and '(' since *thats how MySQL indicates a non- reserved word tablename.....* .. change:: :tags: :tickets: more fixes to inheritance, related to many-to-many relations properly saving .. change:: :tags: :tickets: fixed bug when specifying explicit module to mysql dialect .. change:: :tags: :tickets: when QueuePool times out it raises a TimeoutError instead of erroneously making another connection .. change:: :tags: :tickets: Queue.Queue usage in pool has been replaced with a locally modified version (works in py2.3/2.4!) that uses a threading.RLock for a mutex. this is to fix a reported case where a ConnectionFairy's __del__() method got called within the Queue's get() method, which then returns its connection to the Queue via the put() method, causing a reentrant hang unless threading.RLock is used. .. change:: :tags: :tickets: postgres will not place SERIAL keyword on a primary key column if it has a foreign key constraint .. change:: :tags: :tickets: 221 cursor() method on ConnectionFairy allows db-specific extension arguments to be propagated .. change:: :tags: :tickets: 225 lazy load bind params properly propagate column type .. change:: :tags: :tickets: new MySQL types: MSEnum, MSTinyText, MSMediumText, MSLongText, etc. more support for MS-specific length/precision params in numeric types patch courtesy Mike Bernson .. change:: :tags: :tickets: 224 some fixes to connection pool invalidate() .. changelog:: :version: 0.2.3 :released: Sat Jun 17 2006 .. change:: :tags: :tickets: overhaul to mapper compilation to be deferred. this allows mappers to be constructed in any order, and their relationships to each other are compiled when the mappers are first used. .. change:: :tags: :tickets: fixed a pretty big speed bottleneck in cascading behavior particularly when backrefs were in use .. change:: :tags: :tickets: the attribute instrumentation module has been completely rewritten; its now a large degree simpler and clearer, slightly faster. the "history" of an attribute is no longer micromanaged with each change and is instead part of a "CommittedState" object created when the instance is first loaded. HistoryArraySet is gone, the behavior of list attributes is now more open ended (i.e. theyre not sets anymore). .. change:: :tags: :tickets: py2.4 "set" construct used internally, falls back to sets.Set when "set" not available/ordering is needed. .. change:: :tags: :tickets: fix to transaction control, so that repeated rollback() calls dont fail (was failing pretty badly when flush() would raise an exception in a larger try/except transaction block) .. change:: :tags: :tickets: 151 "foreignkey" argument to relation() can also be a list. fixed auto-foreignkey detection .. change:: :tags: :tickets: fixed bug where tables with schema names werent getting indexed in the MetaData object properly .. change:: :tags: :tickets: 207 fixed bug where Column with redefined "key" property wasnt getting type conversion happening in the ResultProxy .. change:: :tags: :tickets: fixed 'port' attribute of URL to be an integer if present .. change:: :tags: :tickets: fixed old bug where if a many-to-many table mapped as "secondary" had extra columns, delete operations didnt work .. change:: :tags: :tickets: bugfixes for mapping against UNION queries .. change:: :tags: :tickets: fixed incorrect exception class thrown when no DB driver present .. change:: :tags: :tickets: 138 added NonExistentTable exception thrown when reflecting a table that doesnt exist .. change:: :tags: :tickets: small fix to ActiveMapper regarding one-to-one backrefs, other refactorings .. change:: :tags: :tickets: overridden constructor in mapped classes gets __name__ and __doc__ from the original class .. change:: :tags: :tickets: 200 fixed small bug in selectresult.py regarding mapper extension .. change:: :tags: :tickets: small tweak to cascade_mappers, not very strongly supported function at the moment .. change:: :tags: :tickets: 202 some fixes to between(), column.between() to propagate typing information better .. change:: :tags: :tickets: 203 if an object fails to be constructed, is not added to the session .. change:: :tags: :tickets: CAST function has been made into its own clause object with its own compilation function in ansicompiler; allows MySQL to silently ignore most CAST calls since MySQL seems to only support the standard CAST syntax with Date types. MySQL-compatible CAST support for strings, ints, etc. a TODO .. changelog:: :version: 0.2.2 :released: Mon Jun 05 2006 .. change:: :tags: :tickets: 190 big improvements to polymorphic inheritance behavior, enabling it to work with adjacency list table structures .. change:: :tags: :tickets: major fixes and refactorings to inheritance relationships overall, more unit tests .. change:: :tags: :tickets: fixed "echo_pool" flag on create_engine() .. change:: :tags: :tickets: fix to docs, removed incorrect info that close() is unsafe to use with threadlocal strategy (its totally safe !) .. change:: :tags: :tickets: 188 create_engine() can take URLs as string or unicode .. change:: :tags: :tickets: firebird support partially completed; thanks to James Ralston and Brad Clements for their efforts. .. change:: :tags: :tickets: Oracle url translation was broken, fixed, will feed host/port/sid into cx_oracle makedsn() if 'database' field is present, else uses straight TNS name from the 'host' field .. change:: :tags: :tickets: fix to using unicode criterion for query.get()/query.load() .. change:: :tags: :tickets: count() function on selectables now uses table primary key or first column instead of "1" for criterion, also uses label "rowcount" instead of "count". .. change:: :tags: :tickets: got rudimental "mapping to multiple tables" functionality cleaned up, more correctly documented .. change:: :tags: :tickets: restored global_connect() function, attaches to a DynamicMetaData instance called "default_metadata". leaving MetaData arg to Table out will use the default metadata. .. change:: :tags: :tickets: fixes to session cascade behavior, entity_name propigation .. change:: :tags: :tickets: reorganized unittests into subdirectories .. change:: :tags: :tickets: more fixes to threadlocal connection nesting patterns .. changelog:: :version: 0.2.1 :released: Mon May 29 2006 .. change:: :tags: :tickets: "pool" argument to create_engine() properly propagates .. change:: :tags: :tickets: fixes to URL, raises exception if not parsed, does not pass blank fields along to the DB connect string (a string such as user:host@/db was breaking on postgres) .. change:: :tags: :tickets: small fixes to Mapper when it inserts and tries to get new primary key values back .. change:: :tags: :tickets: rewrote half of TLEngine, the ComposedSQLEngine used with 'strategy="threadlocal"'. it now properly implements engine.begin()/ engine.commit(), which nest fully with connection.begin()/trans.commit(). added about six unittests. .. change:: :tags: :tickets: major "duh" in pool.Pool, forgot to put back the WeakValueDictionary. unittest which was supposed to check for this was also silently missing it. fixed unittest to ensure that ConnectionFairy properly falls out of scope. .. change:: :tags: :tickets: placeholder dispose() method added to SingletonThreadPool, doesnt do anything yet .. change:: :tags: :tickets: rollback() is automatically called when an exception is raised, but only if theres no transaction in process (i.e. works more like autocommit). .. change:: :tags: :tickets: fixed exception raise in sqlite if no sqlite module present .. change:: :tags: :tickets: added extra example detail for association object doc .. change:: :tags: :tickets: Connection adds checks for already being closed .. changelog:: :version: 0.2.0 :released: Sat May 27 2006 .. change:: :tags: :tickets: overhaul to Engine system so that what was formerly the SQLEngine is now a ComposedSQLEngine which consists of a variety of components, including a Dialect, ConnectionProvider, etc. This impacted all the db modules as well as Session and Mapper. .. change:: :tags: :tickets: create_engine now takes only RFC-1738-style strings: driver://user:password@host:port/database .. change:: :tags: :tickets: 152 total rewrite of connection-scoping methodology, Connection objects can now execute clause elements directly, added explicit "close" as well as support throughout Engine/ORM to handle closing properly, no longer relying upon __del__ internally to return connections to the pool. .. change:: :tags: :tickets: overhaul to Session interface and scoping. uses hibernate-style methods, including query(class), save(), save_or_update(), etc. no threadlocal scope is installed by default. Provides a binding interface to specific Engines and/or Connections so that underlying Schema objects do not need to be bound to an Engine. Added a basic SessionTransaction object that can simplistically aggregate transactions across multiple engines. .. change:: :tags: :tickets: overhaul to mapper's dependency and "cascade" behavior; dependency logic factored out of properties.py into a separate module "dependency.py". "cascade" behavior is now explicitly controllable, proper implementation of "delete", "delete-orphan", etc. dependency system can now determine at flush time if a child object has a parent or not so that it makes better decisions on how that child should be updated in the DB with regards to deletes. .. change:: :tags: :tickets: overhaul to Schema to build upon MetaData object instead of an Engine. Entire SQL/Schema system can be used with no Engines whatsoever, executed solely by an explicit Connection object. the "bound" methodlogy exists via the BoundMetaData for schema objects. ProxyEngine is generally not needed anymore and is replaced by DynamicMetaData. .. change:: :tags: :tickets: 167 true polymorphic behavior implemented, fixes .. change:: :tags: :tickets: 147 "oid" system has been totally moved into compile-time behavior; if they are used in an order_by where they are not available, the order_by doesnt get compiled, fixes .. change:: :tags: :tickets: overhaul to packaging; "mapping" is now "orm", "objectstore" is now "session", the old "objectstore" namespace gets loaded in via the "threadlocal" mod if used .. change:: :tags: :tickets: mods now called in via "import ". extensions favored over mods as mods are globally-monkeypatching .. change:: :tags: :tickets: 154 fix to add_property so that it propagates properties to inheriting mappers .. change:: :tags: :tickets: backrefs create themselves against primary mapper of its originating property, priamry/secondary join arguments can be specified to override. helps their usage with polymorphic mappers .. change:: :tags: :tickets: 31 "table exists" function has been implemented .. change:: :tags: :tickets: 98 "create_all/drop_all" added to MetaData object .. change:: :tags: :tickets: improvements and fixes to topological sort algorithm, as well as more unit tests .. change:: :tags: :tickets: tutorial page added to docs which also can be run with a custom doctest runner to ensure its properly working. docs generally overhauled to deal with new code patterns .. change:: :tags: :tickets: many more fixes, refactorings. .. change:: :tags: :tickets: migration guide is available on the Wiki at http://www.sqlalchemy.org/trac/wiki/02Migration SQLAlchemy-0.8.4/doc/build/changelog/changelog_03.rst0000644000076500000240000024212612251147171023017 0ustar classicstaff00000000000000 ============== 0.3 Changelog ============== .. changelog:: :version: 0.3.11 :released: Sun Oct 14 2007 .. change:: :tags: sql :tickets: tweak DISTINCT precedence for clauses like `func.count(t.c.col.distinct())` .. change:: :tags: sql :tickets: 719 Fixed detection of internal '$' characters in :bind$params .. change:: :tags: sql :tickets: 768 dont assume join criterion consists only of column objects .. change:: :tags: sql :tickets: 764 adjusted operator precedence of NOT to match '==' and others, so that ~(x==y) produces NOT (x=y), which is compatible with MySQL < 5.0 (doesn't like "NOT x=y") .. change:: :tags: orm :tickets: 687 added a check for joining from A->B using join(), along two different m2m tables. this raises an error in 0.3 but is possible in 0.4 when aliases are used. .. change:: :tags: orm :tickets: fixed small exception throw bug in Session.merge() .. change:: :tags: orm :tickets: fixed bug where mapper, being linked to a join where one table had no PK columns, would not detect that the joined table had no PK. .. change:: :tags: orm :tickets: 769 fixed bugs in determining proper sync clauses from custom inherit conditions .. change:: :tags: orm :tickets: 813 backref remove object operation doesn't fail if the other-side collection doesn't contain the item, supports noload collections .. change:: :tags: engine :tickets: fixed another occasional race condition which could occur when using pool with threadlocal setting .. change:: :tags: mysql :tickets: fixed specification of YEAR columns when generating schema .. change:: :tags: mssql :tickets: 679 added support for TIME columns (simulated using DATETIME) .. change:: :tags: mssql :tickets: 721 added support for BIGINT, MONEY, SMALLMONEY, UNIQUEIDENTIFIER and SQL_VARIANT .. change:: :tags: mssql :tickets: 684 index names are now quoted when dropping from reflected tables .. change:: :tags: mssql :tickets: can now specify a DSN for PyODBC, using a URI like mssql:///?dsn=bob .. change:: :tags: postgres :tickets: when reflecting tables from alternate schemas, the "default" placed upon the primary key, i.e. usually a sequence name, has the "schema" name unconditionally quoted, so that schema names which need quoting are fine. its slightly unnecessary for schema names which don't need quoting but not harmful. .. change:: :tags: sqlite :tickets: passthrough for stringified dates .. change:: :tags: firebird :tickets: supports_sane_rowcount() set to False due to ticket #370 (right way). .. change:: :tags: firebird :tickets: fixed reflection of Column's nullable property. .. change:: :tags: oracle :tickets: 622, 751 removed LONG_STRING, LONG_BINARY from "binary" types, so type objects don't try to read their values as LOB. .. changelog:: :version: 0.3.10 :released: Fri Jul 20 2007 .. change:: :tags: general :tickets: a new mutex that was added in 0.3.9 causes the pool_timeout feature to fail during a race condition; threads would raise TimeoutError immediately with no delay if many threads push the pool into overflow at the same time. this issue has been fixed. .. change:: :tags: sql :tickets: got connection-bound metadata to work with implicit execution .. change:: :tags: sql :tickets: 667 foreign key specs can have any chararcter in their identifiers .. change:: :tags: sql :tickets: 664 added commutativity-awareness to binary clause comparisons to each other, improves ORM lazy load optimization .. change:: :tags: orm :tickets: cleanup to connection-bound sessions, SessionTransaction .. change:: :tags: postgres :tickets: 571 fixed max identifier length (63) .. changelog:: :version: 0.3.9 :released: Sun Jul 15 2007 .. change:: :tags: general :tickets: 607 better error message for NoSuchColumnError .. change:: :tags: general :tickets: 428 finally figured out how to get setuptools version in, available as sqlalchemy.__version__ .. change:: :tags: general :tickets: the various "engine" arguments, such as "engine", "connectable", "engine_or_url", "bind_to", etc. are all present, but deprecated. they all get replaced by the single term "bind". you also set the "bind" of MetaData using metadata.bind = .. change:: :tags: ext :tickets: iteration over dict association proxies is now dict-like, not InstrumentedList-like (e.g. over keys instead of values) .. change:: :tags: ext :tickets: 597 association proxies no longer bind tightly to source collections, and are constructed with a thunk instead .. change:: :tags: ext :tickets: added selectone_by() to assignmapper .. change:: :tags: orm :tickets: forwards-compatibility with 0.4: added one(), first(), and all() to Query. almost all Query functionality from 0.4 is present in 0.3.9 for forwards-compat purposes. .. change:: :tags: orm :tickets: reset_joinpoint() really really works this time, promise ! lets you re-join from the root: query.join(['a', 'b']).filter().reset_joinpoint().\ join(['a', 'c']).filter().all() in 0.4 all join() calls start from the "root" .. change:: :tags: orm :tickets: 613 added synchronization to the mapper() construction step, to avoid thread collisions when pre-existing mappers are compiling in a different thread .. change:: :tags: orm :tickets: a warning is issued by Mapper when two primary key columns of the same name are munged into a single attribute. this happens frequently when mapping to joins (or inheritance). .. change:: :tags: orm :tickets: 598 synonym() properties are fully supported by all Query joining/ with_parent operations .. change:: :tags: orm :tickets: fixed very stupid bug when deleting items with many-to-many uselist=False relations .. change:: :tags: orm :tickets: remember all that stuff about polymorphic_union ? for joined table inheritance ? Funny thing... You sort of don't need it for joined table inheritance, you can just string all the tables together via outerjoin(). The UNION still applies if concrete tables are involved, though (since nothing to join them on). .. change:: :tags: orm :tickets: small fix to eager loading to better work with eager loads to polymorphic mappers that are using a straight "outerjoin" clause .. change:: :tags: sql :tickets: ForeignKey to a table in a schema thats not the default schema requires the schema to be explicit; i.e. ForeignKey('alt_schema.users.id') .. change:: :tags: sql :tickets: MetaData can now be constructed with an engine or url as the first argument, just like BoundMetaData .. change:: :tags: sql :tickets: BoundMetaData is now deprecated, and MetaData is a direct substitute. .. change:: :tags: sql :tickets: DynamicMetaData has been renamed to ThreadLocalMetaData. the DynamicMetaData name is deprecated and is an alias for ThreadLocalMetaData or a regular MetaData if threadlocal=False .. change:: :tags: sql :tickets: composite primary key is represented as a non-keyed set to allow for composite keys consisting of cols with the same name; occurs within a Join. helps inheritance scenarios formulate correct PK. .. change:: :tags: sql :tickets: 185 improved ability to get the "correct" and most minimal set of primary key columns from a join, equating foreign keys and otherwise equated columns. this is also mostly to help inheritance scenarios formulate the best choice of primary key columns. .. change:: :tags: sql :tickets: added 'bind' argument to Sequence.create()/drop(), ColumnDefault.execute() .. change:: :tags: sql :tickets: 650 columns can be overridden in a reflected table with a "key" attribute different than the column's name, including for primary key columns .. change:: :tags: sql :tickets: 657 fixed "ambiguous column" result detection, when dupe col names exist in a result .. change:: :tags: sql :tickets: some enhancements to "column targeting", the ability to match a column to a "corresponding" column in another selectable. this affects mostly ORM ability to map to complex joins .. change:: :tags: sql :tickets: 619 MetaData and all SchemaItems are safe to use with pickle. slow table reflections can be dumped into a pickled file to be reused later. Just reconnect the engine to the metadata after unpickling. .. change:: :tags: sql :tickets: added a mutex to QueuePool's "overflow" calculation to prevent a race condition that can bypass max_overflow .. change:: :tags: sql :tickets: 623 fixed grouping of compound selects to give correct results. will break on sqlite in some cases, but those cases were producing incorrect results anyway, sqlite doesn't support grouped compound selects .. change:: :tags: sql :tickets: 620 fixed precedence of operators so that parenthesis are correctly applied .. change:: :tags: sql :tickets: 545 calling .in_() (i.e. with no arguments) will return "CASE WHEN ( IS NULL) THEN NULL ELSE 0 END = 1)", so that NULL or False is returned in all cases, rather than throwing an error .. change:: :tags: sql :tickets: fixed "where"/"from" criterion of select() to accept a unicode string in addition to regular string - both convert to text() .. change:: :tags: sql :tickets: 558 added standalone distinct() function in addition to column.distinct() .. change:: :tags: sql :tickets: result.last_inserted_ids() should return a list that is identically sized to the primary key constraint of the table. values that were "passively" created and not available via cursor.lastrowid will be None. .. change:: :tags: sql :tickets: 589 long-identifier detection fixed to use > rather than >= for max ident length .. change:: :tags: sql :tickets: 593 fixed bug where selectable.corresponding_column(selectable.c.col) would not return selectable.c.col, if the selectable is a join of a table and another join involving the same table. messed up ORM decision making .. change:: :tags: sql :tickets: 595 added Interval type to types.py .. change:: :tags: mysql :tickets: 625 fixed catching of some errors that imply a dropped connection .. change:: :tags: mysql :tickets: 624 fixed escaping of the modulo operator .. change:: :tags: mysql :tickets: 590 added 'fields' to reserved words .. change:: :tags: mysql :tickets: various reflection enhancement/fixes .. change:: :tags: oracle :tickets: 604 datetime fixes: got subsecond TIMESTAMP to work, added OracleDate which supports types.Date with only year/month/day .. change:: :tags: oracle :tickets: added dialect flag "auto_convert_lobs", defaults to True; will cause any LOB objects detected in a result set to be forced into OracleBinary so that the LOB is read() automatically, if no typemap was present (i.e., if a textual execute() was issued). .. change:: :tags: oracle :tickets: 624 mod operator '%' produces MOD .. change:: :tags: oracle :tickets: 542 converts cx_oracle datetime objects to Python datetime.datetime when Python 2.3 used .. change:: :tags: oracle :tickets: fixed unicode conversion in Oracle TEXT type .. change:: :tags: postgres :tickets: 624 fixed escaping of the modulo operator .. change:: :tags: postgres :tickets: 570 added support for reflection of domains .. change:: :tags: postgres :tickets: types which are missing during reflection resolve to Null type instead of raising an error .. change:: :tags: postgres :tickets: the fix in "schema" above fixes reflection of foreign keys from an alt-schema table to a public schema table .. change:: :tags: sqlite :tickets: rearranged dialect initialization so it has time to warn about pysqlite1 being too old. .. change:: :tags: sqlite :tickets: sqlite better handles datetime/date/time objects mixed and matched with various Date/Time/DateTime columns .. change:: :tags: sqlite :tickets: 603 string PK column inserts dont get overwritten with OID .. change:: :tags: mssql :tickets: 634 fix port option handling for pyodbc .. change:: :tags: mssql :tickets: now able to reflect start and increment values for identity columns .. change:: :tags: mssql :tickets: preliminary support for using scope_identity() with pyodbc .. changelog:: :version: 0.3.8 :released: Sat Jun 02 2007 .. change:: :tags: engines :tickets: added detach() to Connection, allows underlying DBAPI connection to be detached from its pool, closing on dereference/close() instead of being reused by the pool. .. change:: :tags: engines :tickets: added invalidate() to Connection, immediately invalidates the Connection and its underlying DBAPI connection. .. change:: :tags: sql :tickets: _Label class overrides compare_self to return its ultimate object. meaning, if you say someexpr.label('foo') == 5, it produces the correct "someexpr == 5". .. change:: :tags: sql :tickets: _Label propagates "_hide_froms()" so that scalar selects behave more properly with regards to FROM clause #574 .. change:: :tags: sql :tickets: fix to long name generation when using oid_column as an order by (oids used heavily in mapper queries) .. change:: :tags: sql :tickets: significant speed improvement to ResultProxy, pre-caches TypeEngine dialect implementations and saves on function calls per column .. change:: :tags: sql :tickets: parenthesis are applied to clauses via a new _Grouping construct. uses operator precedence to more intelligently apply parenthesis to clauses, provides cleaner nesting of clauses (doesnt mutate clauses placed in other clauses, i.e. no 'parens' flag) .. change:: :tags: sql :tickets: added 'modifier' keyword, works like func. except does not add parenthesis. e.g. select([modifier.DISTINCT(...)]) etc. .. change:: :tags: sql :tickets: 578 removed "no group by's in a select thats part of a UNION" restriction .. change:: :tags: orm :tickets: added reset_joinpoint() method to Query, moves the "join point" back to the starting mapper. 0.4 will change the behavior of join() to reset the "join point" in all cases so this is an interim method. for forwards compatibility, ensure joins across multiple relations are specified using a single join(), i.e. join(['a', 'b', 'c']). .. change:: :tags: orm :tickets: fixed bug in query.instances() that wouldnt handle more than on additional mapper or one additional column. .. change:: :tags: orm :tickets: "delete-orphan" no longer implies "delete". ongoing effort to separate the behavior of these two operations. .. change:: :tags: orm :tickets: many-to-many relationships properly set the type of bind params for delete operations on the association table .. change:: :tags: orm :tickets: many-to-many relationships check that the number of rows deleted from the association table by a delete operation matches the expected results .. change:: :tags: orm :tickets: session.get() and session.load() propagate \**kwargs through to query .. change:: :tags: orm :tickets: 577 fix to polymorphic query which allows the original polymorphic_union to be embedded into a correlated subquery .. change:: :tags: orm :tickets: fix to select_by(=) -style joins in conjunction with many-to-many relationships, bug introduced in r2556 .. change:: :tags: orm :tickets: the "primary_key" argument to mapper() is propagated to the "polymorphic" mapper. primary key columns in this list get normalized to that of the mapper's local table. .. change:: :tags: orm :tickets: restored logging of "lazy loading clause" under sa.orm.strategies logger, got removed in 0.3.7 .. change:: :tags: orm :tickets: improved support for eagerloading of properties off of mappers that are mapped to select() statements; i.e. eagerloader is better at locating the correct selectable with which to attach its LEFT OUTER JOIN. .. change:: :tags: mysql :tickets: Nearly all MySQL column types are now supported for declaration and reflection. Added NCHAR, NVARCHAR, VARBINARY, TINYBLOB, LONGBLOB, YEAR .. change:: :tags: mysql :tickets: The sqltypes.Binary passthrough now always builds a BLOB, avoiding problems with very old database versions .. change:: :tags: mysql :tickets: support for column-level CHARACTER SET and COLLATE declarations, as well as ASCII, UNICODE, NATIONAL and BINARY shorthand. .. change:: :tags: firebird :tickets: set max identifier length to 31 .. change:: :tags: firebird :tickets: supports_sane_rowcount() set to False due to ticket #370. versioned_id_col feature wont work in FB. .. change:: :tags: firebird :tickets: some execution fixes .. change:: :tags: firebird :tickets: new association proxy implementation, implementing complete proxies to list, dict and set-based relation collections .. change:: :tags: firebird :tickets: added orderinglist, a custom list class that synchronizes an object attribute with that object's position in the list .. change:: :tags: firebird :tickets: small fix to SelectResultsExt to not bypass itself during select(). .. change:: :tags: firebird :tickets: added filter(), filter_by() to assignmapper .. changelog:: :version: 0.3.7 :released: Sun Apr 29 2007 .. change:: :tags: engines :tickets: warnings module used for issuing warnings (instead of logging) .. change:: :tags: engines :tickets: 480 cleanup of DBAPI import strategies across all engines .. change:: :tags: engines :tickets: refactoring of engine internals which reduces complexity, number of codepaths; places more state inside of ExecutionContext to allow more dialect control of cursor handling, result sets. ResultProxy totally refactored and also has two versions of "buffered" result sets used for different purposes. .. change:: :tags: engines :tickets: 514 server side cursor support fully functional in postgres. .. change:: :tags: engines :tickets: improved framework for auto-invalidation of connections that have lost their underlying database, via dialect-specific detection of exceptions corresponding to that database's disconnect related error messages. Additionally, when a "connection no longer open" condition is detected, the entire connection pool is discarded and replaced with a new instance. #516 .. change:: :tags: engines :tickets: 521 the dialects within sqlalchemy.databases become a setuptools entry points. loading the built-in database dialects works the same as always, but if none found will fall back to trying pkg_resources to load an external module .. change:: :tags: engines :tickets: Engine contains a "url" attribute referencing the url.URL object used by create_engine(). .. change:: :tags: sql :tickets: keys() of result set columns are not lowercased, come back exactly as they're expressed in cursor.description. note this causes colnames to be all caps in oracle. .. change:: :tags: sql :tickets: preliminary support for unicode table names, column names and SQL statements added, for databases which can support them. Works with sqlite and postgres so far. Mysql *mostly* works except the has_table() function does not work. Reflection works too. .. change:: :tags: sql :tickets: 522 the Unicode type is now a direct subclass of String, which now contains all the "convert_unicode" logic. This helps the variety of unicode situations that occur in db's such as MS-SQL to be better handled and allows subclassing of the Unicode datatype. .. change:: :tags: sql :tickets: ClauseElements can be used in in_() clauses now, such as bind parameters, etc. #476 .. change:: :tags: sql :tickets: reverse operators implemented for `CompareMixin` elements, allows expressions like "5 + somecolumn" etc. #474 .. change:: :tags: sql :tickets: the "where" criterion of an update() and delete() now correlates embedded select() statements against the table being updated or deleted. this works the same as nested select() statement correlation, and can be disabled via the correlate=False flag on the embedded select(). .. change:: :tags: sql :tickets: 512 column labels are now generated in the compilation phase, which means their lengths are dialect-dependent. So on oracle a label that gets truncated to 30 chars will go out to 63 characters on postgres. Also, the true labelname is always attached as the accessor on the parent Selectable so theres no need to be aware of the "truncated" label names. .. change:: :tags: sql :tickets: column label and bind param "truncation" also generate deterministic names now, based on their ordering within the full statement being compiled. this means the same statement will produce the same string across application restarts and allowing DB query plan caching to work better. .. change:: :tags: sql :tickets: 513 the "mini" column labels generated when using subqueries, which are to work around glitchy SQLite behavior that doesnt understand "foo.id" as equivalent to "id", are now only generated in the case that those named columns are selected from (part of) .. change:: :tags: sql :tickets: the label() method on ColumnElement will properly propagate the TypeEngine of the base element out to the label, including a label() created from a scalar=True select() statement. .. change:: :tags: sql :tickets: 513 MS-SQL better detects when a query is a subquery and knows not to generate ORDER BY phrases for those .. change:: :tags: sql :tickets: 505 fix for fetchmany() "size" argument being positional in most dbapis .. change:: :tags: sql :tickets: sending None as an argument to func. will produce an argument of NULL .. change:: :tags: sql :tickets: query strings in unicode URLs get keys encoded to ascii for \**kwargs compat .. change:: :tags: sql :tickets: 523 slight tweak to raw execute() change to also support tuples for positional parameters, not just lists .. change:: :tags: sql :tickets: fix to case() construct to propagate the type of the first WHEN condition as the return type of the case statement .. change:: :tags: orm :tickets: fixed critical issue when, after options(eagerload()) is used, the mapper would then always apply query "wrapping" behavior for all subsequent LIMIT/OFFSET/DISTINCT queries, even if no eager loading was applied on those subsequent queries. .. change:: :tags: orm :tickets: 541 added query.with_parent(someinstance) method. searches for target instance using lazy join criterion from parent instance. takes optional string "property" to isolate the desired relation. also adds static Query.query_from_parent(instance, property) version. .. change:: :tags: orm :tickets: 554 improved query.XXX_by(someprop=someinstance) querying to use similar methodology to with_parent, i.e. using the "lazy" clause which prevents adding the remote instance's table to the SQL, thereby making more complex conditions possible .. change:: :tags: orm :tickets: added generative versions of aggregates, i.e. sum(), avg(), etc. to query. used via query.apply_max(), apply_sum(), etc. #552 .. change:: :tags: orm :tickets: fix to using distinct() or distinct=True in combination with join() and similar .. change:: :tags: orm :tickets: corresponding to label/bindparam name generation, eager loaders generate deterministic names for the aliases they create using md5 hashes. .. change:: :tags: orm :tickets: improved/fixed custom collection classes when giving it "set"/ "sets.Set" classes or subclasses (was still looking for append() methods on them during lazy loads) .. change:: :tags: orm :tickets: restored old "column_property()" ORM function (used to be called "column()") to force any column expression to be added as a property on a mapper, particularly those that aren't present in the mapped selectable. this allows "scalar expressions" of any kind to be added as relations (though they have issues with eager loads). .. change:: :tags: orm :tickets: 533 fix to many-to-many relationships targeting polymorphic mappers .. change:: :tags: orm :tickets: 543 making progress with session.merge() as well as combining its usage with entity_name .. change:: :tags: orm :tickets: the usual adjustments to relationships between inheriting mappers, in this case establishing relation()s to subclass mappers where the join conditions come from the superclass' table .. change:: :tags: informix :tickets: informix support added ! courtesy James Zhang, who put a ton of effort in. .. change:: :tags: sqlite :tickets: removed silly behavior where sqlite would reflect UNIQUE indexes as part of the primary key (?!) .. change:: :tags: oracle :tickets: small fix to allow successive compiles of the same SELECT object which features LIMIT/OFFSET. oracle dialect needs to modify the object to have ROW_NUMBER OVER and wasn't performing the full series of steps on successive compiles. .. change:: :tags: mysql :tickets: support for SSL arguments given as inline within URL query string, prefixed with "ssl\_", courtesy terjeros@gmail.com. .. change:: :tags: , mysql :tickets: mysql uses "DESCRIBE.", catching exceptions if table doesnt exist, in order to determine if a table exists. this supports unicode table names as well as schema names. tested with MySQL5 but should work with 4.1 series as well. (#557) .. change:: :tags: extensions :tickets: big fix to AssociationProxy so that multiple AssociationProxy objects can be associated with a single association collection. .. change:: :tags: extensions :tickets: assign_mapper names methods according to their keys (i.e. __name__) #551 .. change:: :tags: mssql :tickets: pyodbc is now the preferred DB-API for MSSQL, and if no module is specifically requested, will be loaded first on a module probe. .. change:: :tags: mssql :tickets: The @@SCOPE_IDENTITY is now used instead of @@IDENTITY. This behavior may be overridden with the engine_connect "use_scope_identity" keyword parameter, which may also be specified in the dburi. .. changelog:: :version: 0.3.6 :released: Fri Mar 23 2007 .. change:: :tags: sql :tickets: bindparam() names are now repeatable! specify two distinct bindparam()s with the same name in a single statement, and the key will be shared. proper positional/named args translate at compile time. for the old behavior of "aliasing" bind parameters with conflicting names, specify "unique=True" - this option is still used internally for all the auto-genererated (value-based) bind parameters. .. change:: :tags: sql :tickets: slightly better support for bind params as column clauses, either via bindparam() or via literal(), i.e. select([literal('foo')]) .. change:: :tags: sql :tickets: MetaData can bind to an engine either via "url" or "engine" kwargs to constructor, or by using connect() method. BoundMetaData is identical to MetaData except engine_or_url param is required. DynamicMetaData is the same and provides thread-local connections be default. .. change:: :tags: sql :tickets: exists() becomes useable as a standalone selectable, not just in a WHERE clause, i.e. exists([columns], criterion).select() .. change:: :tags: sql :tickets: correlated subqueries work inside of ORDER BY, GROUP BY .. change:: :tags: sql :tickets: fixed function execution with explicit connections, i.e. conn.execute(func.dosomething()) .. change:: :tags: sql :tickets: use_labels flag on select() wont auto-create labels for literal text column elements, since we can make no assumptions about the text. to create labels for literal columns, you can say "somecol AS somelabel", or use literal_column("somecol").label("somelabel") .. change:: :tags: sql :tickets: quoting wont occur for literal columns when they are "proxied" into the column collection for their selectable (is_literal flag is propagated). literal columns are specified via literal_column("somestring"). .. change:: :tags: sql :tickets: added "fold_equivalents" boolean argument to Join.select(), which removes 'duplicate' columns from the resulting column clause that are known to be equivalent based on the join condition. this is of great usage when constructing subqueries of joins which Postgres complains about if duplicate column names are present. .. change:: :tags: sql :tickets: 503 fixed use_alter flag on ForeignKeyConstraint .. change:: :tags: sql :tickets: 506 fixed usage of 2.4-only "reversed" in topological.py .. change:: :tags: sql :tickets: 501 for hackers, refactored the "visitor" system of ClauseElement and SchemaItem so that the traversal of items is controlled by the ClauseVisitor itself, using the method visitor.traverse(item). accept_visitor() methods can still be called directly but will not do any traversal of child items. ClauseElement/SchemaItem now have a configurable get_children() method to return the collection of child elements for each parent object. This allows the full traversal of items to be clear and unambiguous (as well as loggable), with an easy method of limiting a traversal (just pass flags which are picked up by appropriate get_children() methods). .. change:: :tags: sql :tickets: the "else\_" parameter to the case statement now properly works when set to zero. .. change:: :tags: orm :tickets: the full featureset of the SelectResults extension has been merged into a new set of methods available off of Query. These methods all provide "generative" behavior, whereby the Query is copied and a new one returned with additional criterion added. The new methods include: * filter() - applies select criterion to the query * filter_by() - applies "by"-style criterion to the query * avg() - return the avg() function on the given column * join() - join to a property (or across a list of properties) * outerjoin() - like join() but uses LEFT OUTER JOIN * limit()/offset() - apply LIMIT/OFFSET range-based access which applies limit/offset: session.query(Foo)[3:5] * distinct() - apply DISTINCT * list() - evaluate the criterion and return results no incompatible changes have been made to Query's API and no methods have been deprecated. Existing methods like select(), select_by(), get(), get_by() all execute the query at once and return results like they always did. join_to()/join_via() are still there although the generative join()/outerjoin() methods are easier to use. .. change:: :tags: orm :tickets: the return value for multiple mappers used with instances() now returns a cartesian product of the requested list of mappers, represented as a list of tuples. this corresponds to the documented behavior. So that instances match up properly, the "uniquing" is disabled when this feature is used. .. change:: :tags: orm :tickets: Query has add_entity() and add_column() generative methods. these will add the given mapper/class or ColumnElement to the query at compile time, and apply them to the instances() method. the user is responsible for constructing reasonable join conditions (otherwise you can get full cartesian products). result set is the list of tuples, non-uniqued. .. change:: :tags: orm :tickets: strings and columns can also be sent to the \*args of instances() where those exact result columns will be part of the result tuples. .. change:: :tags: orm :tickets: a full select() construct can be passed to query.select() (which worked anyway), but also query.selectfirst(), query.selectone() which will be used as is (i.e. no query is compiled). works similarly to sending the results to instances(). .. change:: :tags: orm :tickets: 495 eager loading will not "aliasize" "order by" clauses that were placed in the select statement by something other than the eager loader itself, to fix possibility of dupe columns as illustrated in. however, this means you have to be more careful with the columns placed in the "order by" of Query.select(), that you have explicitly named them in your criterion (i.e. you cant rely on the eager loader adding them in for you) .. change:: :tags: orm :tickets: added a handy multi-use "identity_key()" method to Session, allowing the generation of identity keys for primary key values, instances, and rows, courtesy Daniel Miller .. change:: :tags: orm :tickets: 249 many-to-many table will be properly handled even for operations that occur on the "backref" side of the operation .. change:: :tags: orm :tickets: 492 added "refresh-expire" cascade. allows refresh() and expire() calls to propagate along relationships. .. change:: :tags: orm :tickets: 493 more fixes to polymorphic relations, involving proper lazy-clause generation on many-to-one relationships to polymorphic mappers. also fixes to detection of "direction", more specific targeting of columns that belong to the polymorphic union vs. those that dont. .. change:: :tags: orm :tickets: some fixes to relationship calcs when using "viewonly=True" to pull in other tables into the join condition which arent parent of the relationship's parent/child mappings .. change:: :tags: orm :tickets: flush fixes on cyclical-referential relationships that contain references to other instances outside of the cyclical chain, when some of the objects in the cycle are not actually part of the flush .. change:: :tags: orm :tickets: 500 put an aggressive check for "flushing object A with a collection of B's, but you put a C in the collection" error condition - **even if C is a subclass of B**, unless B's mapper loads polymorphically. Otherwise, the collection will later load a "B" which should be a "C" (since its not polymorphic) which breaks in bi-directional relationships (i.e. C has its A, but A's backref will lazyload it as a different instance of type "B") This check is going to bite some of you who do this without issues, so the error message will also document a flag "enable_typechecks=False" to disable this checking. But be aware that bi-directional relationships in particular become fragile without this check. .. change:: :tags: extensions :tickets: 472 options() method on SelectResults now implemented "generatively" like the rest of the SelectResults methods. But you're going to just use Query now anyway. .. change:: :tags: extensions :tickets: query() method is added by assignmapper. this helps with navigating to all the new generative methods on Query. .. change:: :tags: ms-sql :tickets: removed seconds input on DATE column types (probably should remove the time altogether) .. change:: :tags: ms-sql :tickets: null values in float fields no longer raise errors .. change:: :tags: ms-sql :tickets: LIMIT with OFFSET now raises an error (MS-SQL has no OFFSET support) .. change:: :tags: ms-sql :tickets: 509 added an facility to use the MSSQL type VARCHAR(max) instead of TEXT for large unsized string fields. Use the new "text_as_varchar" to turn it on. .. change:: :tags: ms-sql :tickets: ORDER BY clauses without a LIMIT are now stripped in subqueries, as MS-SQL forbids this usage .. change:: :tags: ms-sql :tickets: 480 cleanup of module importing code; specifiable DB-API module; more explicit ordering of module preferences. .. change:: :tags: oracle :tickets: got binary working for any size input ! cx_oracle works fine, it was my fault as BINARY was being passed and not BLOB for setinputsizes (also unit tests werent even setting input sizes). .. change:: :tags: oracle :tickets: also fixed CLOB read/write on a separate changeset. .. change:: :tags: oracle :tickets: auto_setinputsizes defaults to True for Oracle, fixed cases where it improperly propagated bad types. .. change:: :tags: mysql :tickets: added a catchall \**kwargs to MSString, to help reflection of obscure types (like "varchar() binary" in MS 4.0) .. change:: :tags: mysql :tickets: added explicit MSTimeStamp type which takes effect when using types.TIMESTAMP. .. changelog:: :version: 0.3.5 :released: Thu Feb 22 2007 .. change:: :tags: sql :tickets: the value of "case_sensitive" defaults to True now, regardless of the casing of the identifier, unless specifically set to False. this is because the object might be label'ed as something else which does contain mixed case, and propigating "case_sensitive=False" breaks that. Other fixes to quoting when using labels and "fake" column objects .. change:: :tags: sql :tickets: added a "supports_execution()" method to ClauseElement, so that individual kinds of clauses can express if they are appropriate for executing...such as, you can execute a "select", but not a "Table" or a "Join". .. change:: :tags: sql :tickets: fixed argument passing to straight textual execute() on engine, connection. can handle \*args or a list instance for positional, \**kwargs or a dict instance for named args, or a list of list or dicts to invoke executemany() .. change:: :tags: sql :tickets: small fix to BoundMetaData to accept unicode or string URLs .. change:: :tags: sql :tickets: 466 fixed named PrimaryKeyConstraint generation courtesy andrija at gmail .. change:: :tags: sql :tickets: 464 fixed generation of CHECK constraints on columns .. change:: :tags: sql :tickets: fixes to tometadata() operation to propagate Constraints at column and table level .. change:: :tags: oracle :tickets: 436 when returning "rowid" as the ORDER BY column or in use with ROW_NUMBER OVER, oracle dialect checks the selectable its being applied to and will switch to table PK if not applicable, i.e. for a UNION. checking for DISTINCT, GROUP BY (other places that rowid is invalid) still a TODO. allows polymorphic mappings to function. .. change:: :tags: oracle :tickets: sequences on a non-pk column will properly fire off on INSERT .. change:: :tags: oracle :tickets: 435 added PrefetchingResultProxy support to pre-fetch LOB columns when they are known to be present, fixes .. change:: :tags: oracle :tickets: 379 implemented reflection of tables based on synonyms, including across dblinks .. change:: :tags: oracle :tickets: 363 issues a log warning when a related table cant be reflected due to certain permission errors .. change:: :tags: mysql :tickets: fix to reflection on older DB's that might return array() type for "show variables like" statements .. change:: :tags: postgres :tickets: 442 better reflection of sequences for alternate-schema Tables .. change:: :tags: postgres :tickets: sequences on a non-pk column will properly fire off on INSERT .. change:: :tags: postgres :tickets: 460, 444 added PGInterval type, PGInet type .. change:: :tags: mssql :tickets: 419 preliminary support for pyodbc (Yay!) .. change:: :tags: mssql :tickets: 298 better support for NVARCHAR types added .. change:: :tags: mssql :tickets: fix for commit logic on pymssql .. change:: :tags: mssql :tickets: 456 fix for query.get() with schema .. change:: :tags: mssql :tickets: 473 fix for non-integer relationships .. change:: :tags: mssql :tickets: 419 DB-API module now selectable at run-time .. change:: :tags: tickets:422, 481, 415, mssql :tickets: now passes many more unit tests .. change:: :tags: mssql :tickets: 479 better unittest compatibility with ANSI functions .. change:: :tags: mssql :tickets: 415 improved support for implicit sequence PK columns with auto-insert .. change:: :tags: mssql :tickets: 371 fix for blank password in adodbapi .. change:: :tags: mssql :tickets: 481 fixes to get unit tests working with pyodbc .. change:: :tags: mssql :tickets: fix to auto_identity_insert on db-url query .. change:: :tags: mssql :tickets: added query_timeout to db-url query parms. currently works only for pymssql .. change:: :tags: mssql :tickets: tested with pymssql 0.8.0 (which is now LGPL) .. change:: :tags: orm, bugs :tickets: 441, 448, 439 another refactoring to relationship calculation. Allows more accurate ORM behavior with relationships from/to/between mappers, particularly polymorphic mappers, also their usage with Query, SelectResults. tickets include,,. .. change:: :tags: orm, bugs :tickets: removed deprecated method of specifying custom collections on classes; you must now use the "collection_class" option. the old way was beginning to produce conflicts when people used assign_mapper(), which now patches an "options" method, in conjunction with a relationship named "options". (relationships take precedence over monkeypatched assign_mapper methods). .. change:: :tags: orm, bugs :tickets: 454 extension() query option propagates to Mapper._instance() method so that all loading-related methods get called .. change:: :tags: orm, bugs :tickets: eager relation to an inheriting mapper wont fail if no rows returned for the relationship. .. change:: :tags: orm, bugs :tickets: 486 eager relation loading bug fixed for eager relation on multiple descendant classes .. change:: :tags: orm, bugs :tickets: 423 fix for very large topological sorts, courtesy ants.aasma at gmail .. change:: :tags: orm, bugs :tickets: eager loading is slightly more strict about detecting "self-referential" relationships, specifically between polymorphic mappers. this results in an "eager degrade" to lazy loading. .. change:: :tags: orm, bugs :tickets: 449 improved support for complex queries embedded into "where" criterion for query.select() .. change:: :tags: orm, bugs :tickets: 485 mapper options like eagerload(), lazyload(), deferred(), will work for "synonym()" relationships .. change:: :tags: orm, bugs :tickets: 445 fixed bug where cascade operations incorrectly included deleted collection items in the cascade .. change:: :tags: orm, bugs :tickets: 478 fixed relationship deletion error when one-to-many child item is moved to a new parent in a single unit of work .. change:: :tags: orm, bugs :tickets: fixed relationship deletion error where parent/child with a single column as PK/FK on the child would raise a "blank out the primary key" error, if manually deleted or "delete" cascade without "delete-orphan" was used .. change:: :tags: orm, bugs :tickets: fix to deferred so that load operation doesnt mistakenly occur when only PK col attributes are set .. change:: :tags: orm, enhancements :tickets: 385 implemented foreign_keys argument to mapper. use in conjunction with primaryjoin/secondaryjoin arguments to specify/override foreign keys defined on the Table instance. .. change:: :tags: orm, enhancements :tickets: contains_eager('foo') automatically implies eagerload('foo') .. change:: :tags: orm, enhancements :tickets: added "alias" argument to contains_eager(). use it to specify the string name or Alias instance of an alias used in the query for the eagerly loaded child items. easier to use than "decorator" .. change:: :tags: orm, enhancements :tickets: added "contains_alias()" option for result set mapping to an alias of the mapped table .. change:: :tags: orm, enhancements :tickets: 468 added support for py2.5 "with" statement with SessionTransaction .. change:: :tags: extensions :tickets: added distinct() method to SelectResults. generally should only make a difference when using count(). .. change:: :tags: extensions :tickets: 472 added options() method to SelectResults, equivalent to query.options() .. change:: :tags: extensions :tickets: 462 added optional __table_opts__ dictionary to ActiveMapper, will send kw options to Table objects .. change:: :tags: extensions :tickets: 467 added selectfirst(), selectfirst_by() to assign_mapper .. changelog:: :version: 0.3.4 :released: Tue Jan 23 2007 .. change:: :tags: general :tickets: global "insure"->"ensure" change. in US english "insure" is actually largely interchangeable with "ensure" (so says the dictionary), so I'm not completely illiterate, but its definitely sub-optimal to "ensure" which is non-ambiguous. .. change:: :tags: sql :tickets: added "fetchmany()" support to ResultProxy .. change:: :tags: sql :tickets: added support for column "key" attribute to be useable in row[]/row. .. change:: :tags: sql :tickets: changed "BooleanExpression" to subclass from "BinaryExpression", so that boolean expressions can also follow column-clause behaviors (i.e. label(), etc). .. change:: :tags: sql :tickets: trailing underscores are trimmed from func. calls, such as func.if_() .. change:: :tags: sql :tickets: fix to correlation of subqueries when the column list of the select statement is constructed with individual calls to append_column(); this fixes an ORM bug whereby nested select statements were not getting correlated with the main select generated by the Query object. .. change:: :tags: sql :tickets: another fix to subquery correlation so that a subquery which has only one FROM element will *not* correlate that single element, since at least one FROM element is required in a query. .. change:: :tags: sql :tickets: 414 default "timezone" setting is now False. this corresponds to Python's datetime behavior as well as Postgres' timestamp/time types (which is the only timezone-sensitive dialect at the moment) .. change:: :tags: sql :tickets: the "op()" function is now treated as an "operation", rather than a "comparison". the difference is, an operation produces a BinaryExpression from which further operations can occur whereas comparison produces the more restrictive BooleanExpression .. change:: :tags: sql :tickets: trying to redefine a reflected primary key column as non-primary key raises an error .. change:: :tags: sql :tickets: type system slightly modified to support TypeDecorators that can be overridden by the dialect (ok, thats not very clear, it allows the mssql tweak below to be possible) .. change:: :tags: mssql :tickets: added an NVarchar type (produces NVARCHAR), also MSUnicode which provides Unicode-translation for the NVarchar regardless of dialect convert_unicode setting. .. change:: :tags: postgres :tickets: 424 fix to the initial checkfirst for tables to take current schema into account .. change:: :tags: postgres :tickets: postgres has an optional "server_side_cursors=True" flag which will utilize server side cursors. these are appropriate for fetching only partial results and are necessary for working with very large unbounded result sets. While we'd like this to be the default behavior, different environments seem to have different results and the causes have not been isolated so we are leaving the feature off by default for now. Uses an apparently undocumented psycopg2 behavior recently discovered on the psycopg mailing list. .. change:: :tags: postgres :tickets: added "BIGSERIAL" support for postgres table with PGBigInteger/autoincrement .. change:: :tags: postgres :tickets: 402 fixes to postgres reflection to better handle when schema names are present; thanks to jason (at) ncsmags.com .. change:: :tags: mysql :tickets: 420 mysql is inconsistent with what kinds of quotes it uses in foreign keys during a SHOW CREATE TABLE, reflection updated to accomodate for all three styles .. change:: :tags: mysql :tickets: 418 mysql table create options work on a generic passthru now, i.e. Table(..., mysql_engine='InnoDB', mysql_collate="latin1_german2_ci", mysql_auto_increment="5", mysql_...), helps .. change:: :tags: firebird :tickets: 408 order of constraint creation puts primary key first before all other constraints; required for firebird, not a bad idea for others .. change:: :tags: firebird :tickets: 409 Firebird fix to autoload multifield foreign keys .. change:: :tags: firebird :tickets: 409 Firebird NUMERIC type properly handles a type without precision .. change:: :tags: oracle :tickets: *slight* support for binary, but still need to figure out how to insert reasonably large values (over 4K). requires auto_setinputsizes=True sent to create_engine(), rows must be fully fetched individually, etc. .. change:: :tags: orm :tickets: poked the first hole in the can of worms: saying query.select_by(somerelationname=someinstance) will create the join of the primary key columns represented by "somerelationname"'s mapper to the actual primary key in "someinstance". .. change:: :tags: orm :tickets: reworked how relations interact with "polymorphic" mappers, i.e. mappers that have a select_table as well as polymorphic flags. better determination of proper join conditions, interaction with user- defined join conditions, and support for self-referential polymorphic mappers. .. change:: :tags: orm :tickets: related to polymorphic mapping relations, some deeper error checking when compiling relations, to detect an ambiguous "primaryjoin" in the case that both sides of the relationship have foreign key references in the primary join condition. also tightened down conditions used to locate "relation direction", associating the "foreignkey" of the relationship with the "primaryjoin" .. change:: :tags: orm :tickets: a little bit of improvement to the concept of a "concrete" inheritance mapping, though that concept is not well fleshed out yet (added test case to support concrete mappers on top of a polymorphic base). .. change:: :tags: orm :tickets: fix to "proxy=True" behavior on synonym() .. change:: :tags: orm :tickets: 427 fixed bug where delete-orphan basically didn't work with many-to-many relationships, backref presence generally hid the symptom .. change:: :tags: orm :tickets: added a mutex to the mapper compilation step. ive been reluctant to add any kind of threading anything to SA but this is one spot that its really needed since mappers are typically "global", and while their state does not change during normal operation, the initial compilation step does modify internal state significantly, and this step usually occurs not at module-level initialization time (unless you call compile()) but at first-request time .. change:: :tags: orm :tickets: basic idea of "session.merge()" actually implemented. needs more testing. .. change:: :tags: orm :tickets: added "compile_mappers()" function as a shortcut to compiling all mappers .. change:: :tags: orm :tickets: fix to MapperExtension create_instance so that entity_name properly associated with new instance .. change:: :tags: orm :tickets: speed enhancements to ORM object instantiation, eager loading of rows .. change:: :tags: orm :tickets: 406 invalid options sent to 'cascade' string will raise an exception .. change:: :tags: orm :tickets: 407 fixed bug in mapper refresh/expire whereby eager loaders didnt properly re-populate item lists .. change:: :tags: orm :tickets: 413 fix to post_update to ensure rows are updated even for non insert/delete scenarios .. change:: :tags: orm :tickets: 412 added an error message if you actually try to modify primary key values on an entity and then flush it .. change:: :tags: extensions :tickets: 426 added "validate=False" argument to assign_mapper, if True will ensure that only mapped attributes are named .. change:: :tags: extensions :tickets: assign_mapper gets "options", "instances" functions added (i.e. MyClass.instances()) .. changelog:: :version: 0.3.3 :released: Fri Dec 15 2006 .. change:: :tags: :tickets: string-based FROM clauses fixed, i.e. select(..., from_obj=["sometext"]) .. change:: :tags: :tickets: fixes to passive_deletes flag, lazy=None (noload) flag .. change:: :tags: :tickets: added example/docs for dealing with large collections .. change:: :tags: :tickets: added object_session() method to sqlalchemy namespace .. change:: :tags: :tickets: fixed QueuePool bug whereby its better able to reconnect to a database that was not reachable (thanks to Sébastien Lelong), also fixed dispose() method .. change:: :tags: :tickets: 396 patch that makes MySQL rowcount work correctly! .. change:: :tags: :tickets: fix to MySQL catch of 2006/2014 errors to properly re-raise OperationalError exception .. changelog:: :version: 0.3.2 :released: Sun Dec 10 2006 .. change:: :tags: :tickets: 387 major connection pool bug fixed. fixes MySQL out of sync errors, will also prevent transactions getting rolled back accidentally in all DBs .. change:: :tags: :tickets: major speed enhancements vs. 0.3.1, to bring speed back to 0.2.8 levels .. change:: :tags: :tickets: made conditional dozens of debug log calls that were time-intensive to generate log messages .. change:: :tags: :tickets: fixed bug in cascade rules whereby the entire object graph could be unnecessarily cascaded on the save/update cascade .. change:: :tags: :tickets: various speedups in attributes module .. change:: :tags: :tickets: 388 identity map in Session is by default *no longer weak referencing*. to have it be weak referencing, use create_session(weak_identity_map=True) fixes .. change:: :tags: :tickets: MySQL detects errors 2006 (server has gone away) and 2014 (commands out of sync) and invalidates the connection on which it occured. .. change:: :tags: :tickets: 307 MySQL bool type fix: .. change:: :tags: :tickets: 382, 349 postgres reflection fixes: .. change:: :tags: :tickets: 247 added keywords for EXCEPT, INTERSECT, EXCEPT ALL, INTERSECT ALL .. change:: :tags: :tickets: 2110 assign_mapper in assignmapper extension returns the created mapper .. change:: :tags: :tickets: added label() function to Select class, when scalar=True is used to create a scalar subquery i.e. "select x, y, (select max(foo) from table) AS foomax from table" .. change:: :tags: :tickets: added onupdate and ondelete keyword arguments to ForeignKey; propagate to underlying ForeignKeyConstraint if present. (dont propagate in the other direction, however) .. change:: :tags: :tickets: fix to session.update() to preserve "dirty" status of incoming object .. change:: :tags: :tickets: sending a selectable to an IN via the in_() function no longer creates a "union" out of multiple selects; only one selectable to a the in_() function is allowed now (make a union yourself if union is needed) .. change:: :tags: :tickets: improved support for disabling save-update cascade via cascade="none" etc. .. change:: :tags: :tickets: added "remote_side" argument to relation(), used only with self-referential mappers to force the direction of the parent/child relationship. replaces the usage of the "foreignkey" parameter for "switching" the direction. "foreignkey" argument is deprecated for all uses and will eventually be replaced by an argument dedicated to ForeignKey specification on mappers. .. changelog:: :version: 0.3.1 :released: Mon Nov 13 2006 .. change:: :tags: engine/pool :tickets: some new Pool utility classes, updated docs .. change:: :tags: engine/pool :tickets: "use_threadlocal" on Pool defaults to False (same as create_engine) .. change:: :tags: engine/pool :tickets: fixed direct execution of Compiled objects .. change:: :tags: engine/pool :tickets: create_engine() reworked to be strict about incoming \**kwargs. all keyword arguments must be consumed by one of the dialect, connection pool, and engine constructors, else a TypeError is thrown which describes the full set of invalid kwargs in relation to the selected dialect/pool/engine configuration. .. change:: :tags: databases/types :tickets: MySQL catches exception on "describe" and reports as NoSuchTableError .. change:: :tags: databases/types :tickets: further fixes to sqlite booleans, weren't working as defaults .. change:: :tags: databases/types :tickets: fix to postgres sequence quoting when using schemas .. change:: :tags: orm :tickets: the "delete" cascade will load in all child objects, if they were not loaded already. this can be turned off (i.e. the old behavior) by setting passive_deletes=True on a relation(). .. change:: :tags: orm :tickets: adjustments to reworked eager query generation to not fail on circular eager-loaded relationships (like backrefs) .. change:: :tags: orm :tickets: fixed bug where eagerload() (nor lazyload()) option didn't properly instruct the Query whether or not to use "nesting" when producing a LIMIT query. .. change:: :tags: orm :tickets: 360 fixed bug in circular dependency sorting at flush time; if object A contained a cyclical many-to-one relationship to object B, and object B was just attached to object A, *but* object B itself wasnt changed, the many-to-one synchronize of B's primary key attribute to A's foreign key attribute wouldnt occur. .. change:: :tags: orm :tickets: 325 implemented from_obj argument for query.count, improves count function on selectresults .. change:: :tags: orm :tickets: added an assertion within the "cascade" step of ORM relationships to check that the class of object attached to a parent object is appropriate (i.e. if A.items stores B objects, raise an error if a C is appended to A.items) .. change:: :tags: orm :tickets: new extension sqlalchemy.ext.associationproxy, provides transparent "association object" mappings. new example examples/association/proxied_association.py illustrates. .. change:: :tags: orm :tickets: improvement to single table inheritance to load full hierarchies beneath the target class .. change:: :tags: orm :tickets: 362 fix to subtle condition in topological sort where a node could appear twice, for .. change:: :tags: orm :tickets: 365 additional rework to topological sort, refactoring, for .. change:: :tags: orm :tickets: "delete-orphan" for a certain type can be set on more than one parent class; the instance is an "orphan" only if its not attached to *any* of those parents .. changelog:: :version: 0.3.0 :released: Sun Oct 22 2006 .. change:: :tags: general :tickets: logging is now implemented via standard python "logging" module. "echo" keyword parameters are still functional but set/unset log levels for their respective classes/instances. all logging can be controlled directly through the Python API by setting INFO and DEBUG levels for loggers in the "sqlalchemy" namespace. class-level logging is under "sqlalchemy..", instance-level logging under "sqlalchemy...0x..<00-FF>". Test suite includes "--log-info" and "--log-debug" arguments which work independently of --verbose/--quiet. Logging added to orm to allow tracking of mapper configurations, row iteration. .. change:: :tags: general :tickets: the documentation-generation system has been overhauled to be much simpler in design and more integrated with Markdown .. change:: :tags: sqlite :tickets: sqlite boolean datatype converts False/True to 0/1 by default .. change:: :tags: sqlite :tickets: 335 fixes to Date/Time (SLDate/SLTime) types; works as good as postgres now .. change:: :tags: ms-sql :tickets: fixes bug 261 (table reflection broken for MS-SQL case-sensitive databases) .. change:: :tags: ms-sql :tickets: can now specify port for pymssql .. change:: :tags: ms-sql :tickets: introduces new "auto_identity_insert" option for auto-switching between "SET IDENTITY_INSERT" mode when values specified for IDENTITY columns .. change:: :tags: ms-sql :tickets: now supports multi-column foreign keys .. change:: :tags: ms-sql :tickets: fix to reflecting date/datetime columns .. change:: :tags: ms-sql :tickets: NCHAR and NVARCHAR type support added .. change:: :tags: oracle :tickets: Oracle has experimental support for cx_Oracle.TIMESTAMP, which requires a setinputsizes() call on the cursor that is now enabled via the 'auto_setinputsizes' flag to the oracle dialect. .. change:: :tags: firebird :tickets: aliases do not use "AS" .. change:: :tags: firebird :tickets: correctly raises NoSuchTableError when reflecting non-existent table .. change:: :tags: schema :tickets: a fair amount of cleanup to the schema package, removal of ambiguous methods, methods that are no longer needed. slightly more constrained useage, greater emphasis on explicitness .. change:: :tags: schema :tickets: the "primary_key" attribute of Table and other selectables becomes a setlike ColumnCollection object; is ordered but not numerically indexed. a comparison clause between two pks that are derived from the same underlying tables (i.e. such as two Alias objects) can be generated via table1.primary_key==table2.primary_key .. change:: :tags: schema :tickets: ForeignKey(Constraint) supports "use_alter=True", to create/drop a foreign key via ALTER. this allows circular foreign key relationships to be set up. .. change:: :tags: schema :tickets: append_item() methods removed from Table and Column; preferably construct Table/Column/related objects inline, but if needed use append_column(), append_foreign_key(), append_constraint(), etc. .. change:: :tags: schema :tickets: table.create() no longer returns the Table object, instead has no return value. the usual case is that tables are created via metadata, which is preferable since it will handle table dependencies. .. change:: :tags: schema :tickets: added UniqueConstraint (goes at Table level), CheckConstraint (goes at Table or Column level). .. change:: :tags: schema :tickets: index=False/unique=True on Column now creates a UniqueConstraint, index=True/unique=False creates a plain Index, index=True/unique=True on Column creates a unique Index. 'index' and 'unique' keyword arguments to column are now boolean only; for explcit names and groupings of indexes or unique constraints, use the UniqueConstraint/Index constructs explicitly. .. change:: :tags: schema :tickets: added autoincrement=True to Column; will disable schema generation of SERIAL/AUTO_INCREMENT/identity seq for postgres/mysql/mssql if explicitly set to False .. change:: :tags: schema :tickets: TypeEngine objects now have methods to deal with copying and comparing values of their specific type. Currently used by the ORM, see below. .. change:: :tags: schema :tickets: fixed condition that occurred during reflection when a primary key column was explciitly overridden, where the PrimaryKeyConstraint would get both the reflected and the programmatic column doubled up .. change:: :tags: schema :tickets: the "foreign_key" attribute on Column and ColumnElement in general is deprecated, in favor of the "foreign_keys" list/set-based attribute, which takes into account multiple foreign keys on one column. "foreign_key" will return the first element in the "foreign_keys" list/set or None if the list is empty. .. change:: :tags: connections/pooling/execution :tickets: connection pool tracks open cursors and automatically closes them if connection is returned to pool with cursors still opened. Can be affected by options which cause it to raise an error instead, or to do nothing. fixes issues with MySQL, others .. change:: :tags: connections/pooling/execution :tickets: fixed bug where Connection wouldnt lose its Transaction after commit/rollback .. change:: :tags: connections/pooling/execution :tickets: added scalar() method to ComposedSQLEngine, ResultProxy .. change:: :tags: connections/pooling/execution :tickets: ResultProxy will close() the underlying cursor when the ResultProxy itself is closed. this will auto-close cursors for ResultProxy objects that have had all their rows fetched (or had scalar() called). .. change:: :tags: connections/pooling/execution :tickets: ResultProxy.fetchall() internally uses DBAPI fetchall() for better efficiency, added to mapper iteration as well (courtesy Michael Twomey) .. change:: :tags: construction, sql :tickets: 292 changed "for_update" parameter to accept False/True/"nowait" and "read", the latter two of which are interpreted only by Oracle and Mysql .. change:: :tags: construction, sql :tickets: added extract() function to sql dialect (SELECT extract(field FROM expr)) .. change:: :tags: construction, sql :tickets: BooleanExpression includes new "negate" argument to specify the appropriate negation operator if one is available. .. change:: :tags: construction, sql :tickets: calling a negation on an "IN" or "IS" clause will result in "NOT IN", "IS NOT" (as opposed to NOT (x IN y)). .. change:: :tags: construction, sql :tickets: 172 Function objects know what to do in a FROM clause now. their behavior should be the same, except now you can also do things like select(['*'], from_obj=[func.my_function()]) to get multiple columns from the result, or even use sql.column() constructs to name the return columns .. change:: :tags: orm :tickets: attribute tracking modified to be more intelligent about detecting changes, particularly with mutable types. TypeEngine objects now take a greater role in defining how to compare two scalar instances, including the addition of a MutableType mixin which is implemented by PickleType. unit-of-work now tracks the "dirty" list as an expression of all persistent objects where the attribute manager detects changes. The basic issue thats fixed is detecting changes on PickleType objects, but also generalizes type handling and "modified" object checking to be more complete and extensible. .. change:: :tags: orm :tickets: a wide refactoring to "attribute loader" and "options" architectures. ColumnProperty and PropertyLoader define their loading behaivor via switchable "strategies", and MapperOptions no longer use mapper/property copying in order to function; they are instead propagated via QueryContext and SelectionContext objects at query/instances time. All of the internal copying of mappers and properties that was used to handle inheritance as well as options() has been removed; the structure of mappers and properties is much simpler than before and is clearly laid out in the new 'interfaces' module. .. change:: :tags: orm :tickets: related to the mapper/property overhaul, internal refactoring to mapper instances() method to use a SelectionContext object to track state during the operation. SLIGHT API BREAKAGE: the append_result() and populate_instances() methods on MapperExtension have a slightly different method signature now as a result of the change; hoping that these methods are not in widespread use as of yet. .. change:: :tags: orm :tickets: instances() method moved to Query now, backwards-compatible version remains on Mapper. .. change:: :tags: orm :tickets: added contains_eager() MapperOption, used in conjunction with instances() to specify properties that should be eagerly loaded from the result set, using their plain column names by default, or translated given an custom row-translation function. .. change:: :tags: orm :tickets: more rearrangements of unit-of-work commit scheme to better allow dependencies within circular flushes to work properly...updated task traversal/logging implementation .. change:: :tags: orm :tickets: 321 polymorphic mappers (i.e. using inheritance) now produces INSERT statements in order of tables across all inherited classes .. change:: :tags: orm :tickets: added an automatic "row switch" feature to mapping, which will detect a pending instance/deleted instance pair with the same identity key and convert the INSERT/DELETE to a single UPDATE .. change:: :tags: orm :tickets: "association" mappings simplified to take advantage of automatic "row switch" feature .. change:: :tags: orm :tickets: 212 "custom list classes" is now implemented via the "collection_class" keyword argument to relation(). the old way still works but is deprecated .. change:: :tags: orm :tickets: added "viewonly" flag to relation(), allows construction of relations that have no effect on the flush() process. .. change:: :tags: orm :tickets: 292 added "lockmode" argument to base Query select/get functions, including "with_lockmode" function to get a Query copy that has a default locking mode. Will translate "read"/"update" arguments into a for_update argument on the select side. .. change:: :tags: orm :tickets: implemented "version check" logic in Query/Mapper, used when version_id_col is in effect and query.with_lockmode() is used to get() an instance thats already loaded .. change:: :tags: orm :tickets: 208 post_update behavior improved; does a better job at not updating too many rows, updates only required columns .. change:: :tags: orm :tickets: 308 adjustments to eager loading so that its "eager chain" is kept separate from the normal mapper setup, thereby preventing conflicts with lazy loader operation, fixes .. change:: :tags: orm :tickets: fix to deferred group loading .. change:: :tags: orm :tickets: 346 session.flush() wont close a connection it opened .. change:: :tags: orm :tickets: added "batch=True" flag to mapper; if False, save_obj will fully save one object at a time including calls to before_XXXX and after_XXXX .. change:: :tags: orm :tickets: added "column_prefix=None" argument to mapper; prepends the given string (typically '_') to column-based attributes automatically set up from the mapper's Table .. change:: :tags: orm :tickets: 315 specifying joins in the from_obj argument of query.select() will replace the main table of the query, if the table is somewhere within the given from_obj. this makes it possible to produce custom joins and outerjoins in queries without the main table getting added twice. .. change:: :tags: orm :tickets: eagerloading is adjusted to more thoughtfully attach its LEFT OUTER JOINs to the given query, looking for custom "FROM" clauses that may have already been set up. .. change:: :tags: orm :tickets: added join_to and outerjoin_to transformative methods to SelectResults, to build up join/outerjoin conditions based on property names. also added select_from to explicitly set from_obj parameter. .. change:: :tags: orm :tickets: removed "is_primary" flag from mapper. SQLAlchemy-0.8.4/doc/build/changelog/changelog_04.rst0000644000076500000240000037070212251147171023022 0ustar classicstaff00000000000000 ============== 0.4 Changelog ============== .. changelog:: :version: 0.4.8 :released: Sun Oct 12 2008 .. change:: :tags: orm :tickets: 1039 Fixed bug regarding inherit_condition passed with "A=B" versus "B=A" leading to errors .. change:: :tags: orm :tickets: Changes made to new, dirty and deleted collections in SessionExtension.before_flush() will take effect for that flush. .. change:: :tags: orm :tickets: Added label() method to InstrumentedAttribute to establish forwards compatibility with 0.5. .. change:: :tags: sql :tickets: 1074 column.in_(someselect) can now be used as a columns-clause expression without the subquery bleeding into the FROM clause .. change:: :tags: mysql :tickets: 1146 Added MSMediumInteger type. .. change:: :tags: sqlite :tickets: 968 Supplied a custom strftime() function which handles dates before 1900. .. change:: :tags: sqlite :tickets: String's (and Unicode's, UnicodeText's, etc.) convert_unicode logic disabled in the sqlite dialect, to adjust for pysqlite 2.5.0's new requirement that only Python unicode objects are accepted; http://itsystementwicklung.de/pipermail/list-pysqlite/2008-March/000018.html .. change:: :tags: oracle :tickets: 1155 has_sequence() now takes schema name into account .. change:: :tags: oracle :tickets: 1121 added BFILE to the list of reflected types .. changelog:: :version: 0.4.7p1 :released: Thu Jul 31 2008 .. change:: :tags: orm :tickets: Added "add()" and "add_all()" to scoped_session methods. Workaround for 0.4.7:: from sqlalchemy.orm.scoping import ScopedSession, instrument setattr(ScopedSession, "add", instrument("add")) setattr(ScopedSession, "add_all", instrument("add_all")) .. change:: :tags: orm :tickets: Fixed non-2.3 compatible usage of set() and generator expression within relation(). .. changelog:: :version: 0.4.7 :released: Sat Jul 26 2008 .. change:: :tags: orm :tickets: 1058 The contains() operator when used with many-to-many will alias() the secondary (association) table so that multiple contains() calls will not conflict with each other .. change:: :tags: orm :tickets: fixed bug preventing merge() from functioning in conjunction with a comparable_property() .. change:: :tags: orm :tickets: the enable_typechecks=False setting on relation() now only allows subtypes with inheriting mappers. Totally unrelated types, or subtypes not set up with mapper inheritance against the target mapper are still not allowed. .. change:: :tags: orm :tickets: 976 Added is_active flag to Sessions to detect when a transaction is in progress. This flag is always True with a "transactional" (in 0.5 a non-"autocommit") Session. .. change:: :tags: sql :tickets: Fixed bug when calling select([literal('foo')]) or select([bindparam('foo')]). .. change:: :tags: schema :tickets: 571 create_all(), drop_all(), create(), drop() all raise an error if the table name or schema name contains more characters than that dialect's configured character limit. Some DB's can handle too-long table names during usage, and SQLA can handle this as well. But various reflection/ checkfirst-during-create scenarios fail since we are looking for the name within the DB's catalog tables. .. change:: :tags: schema :tickets: 571, 820 The index name generated when you say "index=True" on a Column is truncated to the length appropriate for the dialect. Additionally, an Index with a too- long name cannot be explicitly dropped with Index.drop(), similar to. .. change:: :tags: postgres :tickets: Repaired server_side_cursors to properly detect text() clauses. .. change:: :tags: postgres :tickets: 1092 Added PGCidr type. .. change:: :tags: mysql :tickets: Added 'CALL' to the list of SQL keywords which return result rows. .. change:: :tags: oracle :tickets: Oracle get_default_schema_name() "normalizes" the name before returning, meaning it returns a lower-case name when the identifier is detected as case insensitive. .. change:: :tags: oracle :tickets: 709 creating/dropping tables takes schema name into account when searching for the existing table, so that tables in other owner namespaces with the same name do not conflict .. change:: :tags: oracle :tickets: 1062 Cursors now have "arraysize" set to 50 by default on them, the value of which is configurable using the "arraysize" argument to create_engine() with the Oracle dialect. This to account for cx_oracle's default setting of "1", which has the effect of many round trips being sent to Oracle. This actually works well in conjunction with BLOB/CLOB-bound cursors, of which there are any number available but only for the life of that row request (so BufferedColumnRow is still needed, but less so). .. change:: :tags: oracle :tickets: sqlite - add SLFloat type, which matches the SQLite REAL type affinity. Previously, only SLNumeric was provided which fulfills NUMERIC affinity, but that's not the same as REAL. .. changelog:: :version: 0.4.6 :released: Sat May 10 2008 .. change:: :tags: orm :tickets: Fix to the recent relation() refactoring which fixes exotic viewonly relations which join between local and remote table multiple times, with a common column shared between the joins. .. change:: :tags: orm :tickets: Also re-established viewonly relation() configurations that join across multiple tables. .. change:: :tags: orm :tickets: 610 Added experimental relation() flag to help with primaryjoins across functions, etc., _local_remote_pairs=[tuples]. This complements a complex primaryjoin condition allowing you to provide the individual column pairs which comprise the relation's local and remote sides. Also improved lazy load SQL generation to handle placing bind params inside of functions and other expressions. (partial progress towards) .. change:: :tags: orm :tickets: 1036 repaired single table inheritance such that you can single-table inherit from a joined-table inherting mapper without issue. .. change:: :tags: orm :tickets: 1027 Fixed "concatenate tuple" bug which could occur with Query.order_by() if clause adaption had taken place. .. change:: :tags: orm :tickets: Removed ancient assertion that mapped selectables require "alias names" - the mapper creates its own alias now if none is present. Though in this case you need to use the class, not the mapped selectable, as the source of column attributes - so a warning is still issued. .. change:: :tags: orm :tickets: fixes to the "exists" function involving inheritance (any(), has(), ~contains()); the full target join will be rendered into the EXISTS clause for relations that link to subclasses. .. change:: :tags: orm :tickets: restored usage of append_result() extension method for primary query rows, when the extension is present and only a single- entity result is being returned. .. change:: :tags: orm :tickets: Also re-established viewonly relation() configurations that join across multiple tables. .. change:: :tags: orm :tickets: removed ancient assertion that mapped selectables require "alias names" - the mapper creates its own alias now if none is present. Though in this case you need to use the class, not the mapped selectable, as the source of column attributes - so a warning is still issued. .. change:: :tags: orm :tickets: 1015 refined mapper._save_obj() which was unnecessarily calling __ne__() on scalar values during flush .. change:: :tags: orm :tickets: 1019 added a feature to eager loading whereby subqueries set as column_property() with explicit label names (which is not necessary, btw) will have the label anonymized when the instance is part of the eager join, to prevent conflicts with a subquery or column of the same name on the parent object. .. change:: :tags: orm :tickets: set-based collections \|=, -=, ^= and &= are stricter about their operands and only operate on sets, frozensets or subclasses of the collection type. Previously, they would accept any duck-typed set. .. change:: :tags: orm :tickets: added an example dynamic_dict/dynamic_dict.py, illustrating a simple way to place dictionary behavior on top of a dynamic_loader. .. change:: :tags: declarative, extension :tickets: Joined table inheritance mappers use a slightly relaxed function to create the "inherit condition" to the parent table, so that other foreign keys to not-yet-declared Table objects don't trigger an error. .. change:: :tags: declarative, extension :tickets: fixed reentrant mapper compile hang when a declared attribute is used within ForeignKey, ie. ForeignKey(MyOtherClass.someattribute) .. change:: :tags: sql :tickets: Added COLLATE support via the .collate() expression operator and collate(, ) sql function. .. change:: :tags: sql :tickets: Fixed bug with union() when applied to non-Table connected select statements .. change:: :tags: sql :tickets: 1014 improved behavior of text() expressions when used as FROM clauses, such as select().select_from(text("sometext")) .. change:: :tags: sql :tickets: 1021 Column.copy() respects the value of "autoincrement", fixes usage with Migrate .. change:: :tags: engines :tickets: Pool listeners can now be provided as a dictionary of callables or a (possibly partial) duck-type of PoolListener, your choice. .. change:: :tags: engines :tickets: added "rollback_returned" option to Pool which will disable the rollback() issued when connections are returned. This flag is only safe to use with a database which does not support transactions (i.e. MySQL/MyISAM). .. change:: :tags: ext :tickets: set-based association proxies \|=, -=, ^= and &= are stricter about their operands and only operate on sets, frozensets or other association proxies. Previously, they would accept any duck-typed set. .. change:: :tags: mssql :tickets: 1005 Added "odbc_autotranslate" parameter to engine / dburi parameters. Any given string will be passed through to the ODBC connection string as: "AutoTranslate=%s" % odbc_autotranslate .. change:: :tags: mssql :tickets: Added "odbc_options" parameter to engine / dburi parameters. The given string is simply appended to the SQLAlchemy-generated odbc connection string. This should obviate the need of adding a myriad of ODBC options in the future. .. change:: :tags: firebird :tickets: Handle the "SUBSTRING(:string FROM :start FOR :length)" builtin. .. changelog:: :version: 0.4.5 :released: Fri Apr 04 2008 .. change:: :tags: orm :tickets: A small change in behavior to session.merge() - existing objects are checked for based on primary key attributes, not necessarily _instance_key. So the widely requested capability, that: x = MyObject(id=1) x = sess.merge(x) will in fact load MyObject with id #1 from the database if present, is now available. merge() still copies the state of the given object to the persistent one, so an example like the above would typically have copied "None" from all attributes of "x" onto the persistent copy. These can be reverted using session.expire(x). .. change:: :tags: orm :tickets: Also fixed behavior in merge() whereby collection elements present on the destination but not the merged collection were not being removed from the destination. .. change:: :tags: orm :tickets: 995 Added a more aggressive check for "uncompiled mappers", helps particularly with declarative layer .. change:: :tags: orm :tickets: The methodology behind "primaryjoin"/"secondaryjoin" has been refactored. Behavior should be slightly more intelligent, primarily in terms of error messages which have been pared down to be more readable. In a slight number of scenarios it can better resolve the correct foreign key than before. .. change:: :tags: orm :tickets: Added comparable_property(), adds query Comparator behavior to regular, unmanaged Python properties .. change:: :tags: orm, Company.employees.of_type(Engineer), 'machines' :tickets: the functionality of query.with_polymorphic() has been added to mapper() as a configuration option. It's set via several forms: with_polymorphic='*' with_polymorphic=[mappers] with_polymorphic=('*', selectable) with_polymorphic=([mappers], selectable) This controls the default polymorphic loading strategy for inherited mappers. When a selectable is not given, outer joins are created for all joined-table inheriting mappers requested. Note that the auto-create of joins is not compatible with concrete table inheritance. The existing select_table flag on mapper() is now deprecated and is synonymous with with_polymorphic('*', select_table). Note that the underlying "guts" of select_table have been completely removed and replaced with the newer, more flexible approach. The new approach also automatically allows eager loads to work for subclasses, if they are present, for example:: sess.query(Company).options( eagerload_all( )) to load Company objects, their employees, and the 'machines' collection of employees who happen to be Engineers. A "with_polymorphic" Query option should be introduced soon as well which would allow per-Query control of with_polymorphic() on relations. .. change:: :tags: orm :tickets: added two "experimental" features to Query, "experimental" in that their specific name/behavior is not carved in stone just yet: _values() and _from_self(). We'd like feedback on these. - _values(\*columns) is given a list of column expressions, and returns a new Query that only returns those columns. When evaluated, the return value is a list of tuples just like when using add_column() or add_entity(), the only difference is that "entity zero", i.e. the mapped class, is not included in the results. This means it finally makes sense to use group_by() and having() on Query, which have been sitting around uselessly until now. A future change to this method may include that its ability to join, filter and allow other options not related to a "resultset" are removed, so the feedback we're looking for is how people want to use _values()...i.e. at the very end, or do people prefer to continue generating after it's called. - _from_self() compiles the SELECT statement for the Query (minus any eager loaders), and returns a new Query that selects from that SELECT. So basically you can query from a Query without needing to extract the SELECT statement manually. This gives meaning to operations like query[3:5]._from_self().filter(some criterion). There's not much controversial here except that you can quickly create highly nested queries that are less efficient, and we want feedback on the naming choice. .. change:: :tags: orm :tickets: query.order_by() and query.group_by() will accept multiple arguments using \*args (like select() already does). .. change:: :tags: orm :tickets: Added some convenience descriptors to Query: query.statement returns the full SELECT construct, query.whereclause returns just the WHERE part of the SELECT construct. .. change:: :tags: orm :tickets: Fixed/covered case when using a False/0 value as a polymorphic discriminator. .. change:: :tags: orm :tickets: Fixed bug which was preventing synonym() attributes from being used with inheritance .. change:: :tags: orm :tickets: 996 Fixed SQL function truncation of trailing underscores .. change:: :tags: orm :tickets: When attributes are expired on a pending instance, an error will not be raised when the "refresh" action is triggered and no result is found. .. change:: :tags: orm :tickets: Session.execute can now find binds from metadata .. change:: :tags: orm :tickets: Adjusted the definition of "self-referential" to be any two mappers with a common parent (this affects whether or not aliased=True is required when joining with Query). .. change:: :tags: orm :tickets: Made some fixes to the "from_joinpoint" argument to query.join() so that if the previous join was aliased and this one isn't, the join still happens successfully. .. change:: :tags: orm :tickets: 895 Assorted "cascade deletes" fixes: - Fixed "cascade delete" operation of dynamic relations, which had only been implemented for foreign-key nulling behavior in 0.4.2 and not actual cascading deletes - Delete cascade without delete-orphan cascade on a many-to-one will not delete orphans which were disconnected from the parent before session.delete() is called on the parent (one-to-many already had this). - Delete cascade with delete-orphan will delete orphans whether or not it remains attached to its also-deleted parent. - delete-orphan casacde is properly detected on relations that are present on superclasses when using inheritance. .. change:: :tags: orm :tickets: Fixed order_by calculation in Query to properly alias mapper-config'ed order_by when using select_from() .. change:: :tags: orm :tickets: Refactored the diffing logic that kicks in when replacing one collection with another into collections.bulk_replace, useful to anyone building multi-level collections. .. change:: :tags: orm :tickets: Cascade traversal algorithm converted from recursive to iterative to support deep object graphs. .. change:: :tags: sql :tickets: 999 schema-qualified tables now will place the schemaname ahead of the tablename in all column expressions as well as when generating column labels. This prevents cross- schema name collisions in all cases .. change:: :tags: sql :tickets: can now allow selects which correlate all FROM clauses and have no FROM themselves. These are typically used in a scalar context, i.e. SELECT x, (SELECT x WHERE y) FROM table. Requires explicit correlate() call. .. change:: :tags: sql :tickets: 'name' is no longer a required constructor argument for Column(). It (and .key) may now be deferred until the column is added to a Table. .. change:: :tags: sql :tickets: 791, 993 like(), ilike(), contains(), startswith(), endswith() take an optional keyword argument "escape=", which is set as the escape character using the syntax "x LIKE y ESCAPE ''". .. change:: :tags: sql :tickets: random() is now a generic sql function and will compile to the database's random implementation, if any. .. change:: :tags: sql :tickets: update().values() and insert().values() take keyword arguments. .. change:: :tags: sql :tickets: Fixed an issue in select() regarding its generation of FROM clauses, in rare circumstances two clauses could be produced when one was intended to cancel out the other. Some ORM queries with lots of eager loads might have seen this symptom. .. change:: :tags: sql :tickets: The case() function now also takes a dictionary as its whens parameter. It also interprets the "THEN" expressions as values by default, meaning case([(x==y, "foo")]) will interpret "foo" as a bound value, not a SQL expression. use text(expr) for literal SQL expressions in this case. For the criterion itself, these may be literal strings only if the "value" keyword is present, otherwise SA will force explicit usage of either text() or literal(). .. change:: :tags: oracle :tickets: The "owner" keyword on Table is now deprecated, and is exactly synonymous with the "schema" keyword. Tables can now be reflected with alternate "owner" attributes, explicitly stated on the Table object or not using "schema". .. change:: :tags: oracle :tickets: All of the "magic" searching for synonyms, DBLINKs etc. during table reflection are disabled by default unless you specify "oracle_resolve_synonyms=True" on the Table object. Resolving synonyms necessarily leads to some messy guessing which we'd rather leave off by default. When the flag is set, tables and related tables will be resolved against synonyms in all cases, meaning if a synonym exists for a particular table, reflection will use it when reflecting related tables. This is stickier behavior than before which is why it's off by default. .. change:: :tags: declarative, extension :tickets: The "synonym" function is now directly usable with "declarative". Pass in the decorated property using the "descriptor" keyword argument, e.g.: somekey = synonym('_somekey', descriptor=property(g, s)) .. change:: :tags: declarative, extension :tickets: The "deferred" function is usable with "declarative". Simplest usage is to declare deferred and Column together, e.g.: data = deferred(Column(Text)) .. change:: :tags: declarative, extension :tickets: Declarative also gained @synonym_for(...) and @comparable_using(...), front-ends for synonym and comparable_property. .. change:: :tags: declarative, extension :tickets: 995 Improvements to mapper compilation when using declarative; already-compiled mappers will still trigger compiles of other uncompiled mappers when used .. change:: :tags: declarative, extension :tickets: Declarative will complete setup for Columns lacking names, allows a more DRY syntax. class Foo(Base): __tablename__ = 'foos' id = Column(Integer, primary_key=True) .. change:: :tags: declarative, extension :tickets: inheritance in declarative can be disabled when sending "inherits=None" to __mapper_args__. .. change:: :tags: declarative, extension :tickets: declarative_base() takes optional kwarg "mapper", which is any callable/class/method that produces a mapper, such as declarative_base(mapper=scopedsession.mapper). This property can also be set on individual declarative classes using the "__mapper_cls__" property. .. change:: :tags: postgres :tickets: 1001 Got PG server side cursors back into shape, added fixed unit tests as part of the default test suite. Added better uniqueness to the cursor ID .. change:: :tags: oracle :tickets: The "owner" keyword on Table is now deprecated, and is exactly synonymous with the "schema" keyword. Tables can now be reflected with alternate "owner" attributes, explicitly stated on the Table object or not using "schema". .. change:: :tags: oracle :tickets: All of the "magic" searching for synonyms, DBLINKs etc. during table reflection are disabled by default unless you specify "oracle_resolve_synonyms=True" on the Table object. Resolving synonyms necessarily leads to some messy guessing which we'd rather leave off by default. When the flag is set, tables and related tables will be resolved against synonyms in all cases, meaning if a synonym exists for a particular table, reflection will use it when reflecting related tables. This is stickier behavior than before which is why it's off by default. .. change:: :tags: mssql :tickets: 979 Reflected tables will now automatically load other tables which are referenced by Foreign keys in the auto-loaded table,. .. change:: :tags: mssql :tickets: 916 Added executemany check to skip identity fetch,. .. change:: :tags: mssql :tickets: 884 Added stubs for small date type. .. change:: :tags: mssql :tickets: Added a new 'driver' keyword parameter for the pyodbc dialect. Will substitute into the ODBC connection string if given, defaults to 'SQL Server'. .. change:: :tags: mssql :tickets: Added a new 'max_identifier_length' keyword parameter for the pyodbc dialect. .. change:: :tags: mssql :tickets: Improvements to pyodbc + Unix. If you couldn't get that combination to work before, please try again. .. change:: :tags: mysql :tickets: The connection.info keys the dialect uses to cache server settings have changed and are now namespaced. .. changelog:: :version: 0.4.4 :released: Wed Mar 12 2008 .. change:: :tags: sql :tickets: 975 Can again create aliases of selects against textual FROM clauses. .. change:: :tags: sql :tickets: The value of a bindparam() can be a callable, in which case it's evaluated at statement execution time to get the value. .. change:: :tags: sql :tickets: 978 Added exception wrapping/reconnect support to result set fetching. Reconnect works for those databases that raise a catchable data error during results (i.e. doesn't work on MySQL) .. change:: :tags: sql :tickets: 936 Implemented two-phase API for "threadlocal" engine, via engine.begin_twophase(), engine.prepare() .. change:: :tags: sql :tickets: 986 Fixed bug which was preventing UNIONS from being cloneable. .. change:: :tags: sql :tickets: Added "bind" keyword argument to insert(), update(), delete() and DDL(). The .bind property is now assignable on those statements as well as on select(). .. change:: :tags: sql :tickets: Insert statements can now be compiled with extra "prefix" words between INSERT and INTO, for vendor extensions like MySQL's INSERT IGNORE INTO table. .. change:: :tags: orm :tickets: any(), has(), contains(), ~contains(), attribute level == and != now work properly with self-referential relations - the clause inside the EXISTS is aliased on the "remote" side to distinguish it from the parent table. This applies to single table self-referential as well as inheritance-based self-referential. .. change:: :tags: orm :tickets: 985 Repaired behavior of == and != operators at the relation() level when compared against NULL for one-to-one relations .. change:: :tags: orm :tickets: Fixed bug whereby session.expire() attributes were not loading on an polymorphically-mapped instance mapped by a select_table mapper. .. change:: :tags: orm :tickets: Added query.with_polymorphic() - specifies a list of classes which descend from the base class, which will be added to the FROM clause of the query. Allows subclasses to be used within filter() criterion as well as eagerly loads the attributes of those subclasses. .. change:: :tags: orm :tickets: Your cries have been heard: removing a pending item from an attribute or collection with delete-orphan expunges the item from the session; no FlushError is raised. Note that if you session.save()'ed the pending item explicitly, the attribute/collection removal still knocks it out. .. change:: :tags: orm :tickets: session.refresh() and session.expire() raise an error when called on instances which are not persistent within the session .. change:: :tags: orm :tickets: Fixed potential generative bug when the same Query was used to generate multiple Query objects using join(). .. change:: :tags: orm :tickets: Fixed bug which was introduced in 0.4.3, whereby loading an already-persistent instance mapped with joined table inheritance would trigger a useless "secondary" load from its joined table, when using the default "select" polymorphic_fetch. This was due to attributes being marked as expired during its first load and not getting unmarked from the previous "secondary" load. Attributes are now unexpired based on presence in __dict__ after any load or commit operation succeeds. .. change:: :tags: orm :tickets: Deprecated Query methods apply_sum(), apply_max(), apply_min(), apply_avg(). Better methodologies are coming.... .. change:: :tags: orm :tickets: relation() can accept a callable for its first argument, which returns the class to be related. This is in place to assist declarative packages to define relations without classes yet being in place. .. change:: :tags: orm :tickets: Added a new "higher level" operator called "of_type()": used in join() as well as with any() and has(), qualifies the subclass which will be used in filter criterion, e.g.: query.filter(Company.employees.of_type(Engineer). any(Engineer.name=='foo')) or query.join(Company.employees.of_type(Engineer)). filter(Engineer.name=='foo') .. change:: :tags: orm :tickets: Preventive code against a potential lost-reference bug in flush(). .. change:: :tags: orm :tickets: Expressions used in filter(), filter_by() and others, when they make usage of a clause generated from a relation using the identity of a child object (e.g., filter(Parent.child==)), evaluate the actual primary key value of at execution time so that the autoflush step of the Query can complete, thereby populating the PK value of in the case that was pending. .. change:: :tags: orm :tickets: setting the relation()-level order by to a column in the many-to-many "secondary" table will now work with eager loading, previously the "order by" wasn't aliased against the secondary table's alias. .. change:: :tags: orm :tickets: Synonyms riding on top of existing descriptors are now full proxies to those descriptors. .. change:: :tags: dialects :tickets: Invalid SQLite connection URLs now raise an error. .. change:: :tags: dialects :tickets: 981 postgres TIMESTAMP renders correctly .. change:: :tags: dialects :tickets: postgres PGArray is a "mutable" type by default; when used with the ORM, mutable-style equality/ copy-on-write techniques are used to test for changes. .. change:: :tags: extensions :tickets: a new super-small "declarative" extension has been added, which allows Table and mapper() configuration to take place inline underneath a class declaration. This extension differs from ActiveMapper and Elixir in that it does not redefine any SQLAlchemy semantics at all; literal Column, Table and relation() constructs are used to define the class behavior and table definition. .. changelog:: :version: 0.4.3 :released: Thu Feb 14 2008 .. change:: :tags: sql :tickets: Added "schema.DDL", an executable free-form DDL statement. DDLs can be executed in isolation or attached to Table or MetaData instances and executed automatically when those objects are created and/or dropped. .. change:: :tags: sql :tickets: Table columns and constraints can be overridden on a an existing table (such as a table that was already reflected) using the 'useexisting=True' flag, which now takes into account the arguments passed along with it. .. change:: :tags: sql :tickets: Added a callable-based DDL events interface, adds hooks before and after Tables and MetaData create and drop. .. change:: :tags: sql :tickets: Added generative where() method to delete() and update() constructs which return a new object with criterion joined to existing criterion via AND, just like select().where(). .. change:: :tags: sql :tickets: 727 Added "ilike()" operator to column operations. Compiles to ILIKE on postgres, lower(x) LIKE lower(y) on all others. .. change:: :tags: sql :tickets: 943 Added "now()" as a generic function; on SQLite, Oracle and MSSQL compiles as "CURRENT_TIMESTAMP"; "now()" on all others. .. change:: :tags: sql :tickets: 962 The startswith(), endswith(), and contains() operators now concatenate the wildcard operator with the given operand in SQL, i.e. "'%' || " in all cases, accept text('something') operands properly .. change:: :tags: sql :tickets: 962 cast() accepts text('something') and other non-literal operands properly .. change:: :tags: sql :tickets: fixed bug in result proxy where anonymously generated column labels would not be accessible using their straight string name .. change:: :tags: sql :tickets: Deferrable constraints can now be defined. .. change:: :tags: sql :tickets: 915 Added "autocommit=True" keyword argument to select() and text(), as well as generative autocommit() method on select(); for statements which modify the database through some user-defined means other than the usual INSERT/UPDATE/ DELETE etc. This flag will enable "autocommit" behavior during execution if no transaction is in progress. .. change:: :tags: sql :tickets: The '.c.' attribute on a selectable now gets an entry for every column expression in its columns clause. Previously, "unnamed" columns like functions and CASE statements weren't getting put there. Now they will, using their full string representation if no 'name' is available. .. change:: :tags: sql :tickets: a CompositeSelect, i.e. any union(), union_all(), intersect(), etc. now asserts that each selectable contains the same number of columns. This conforms to the corresponding SQL requirement. .. change:: :tags: sql :tickets: The anonymous 'label' generated for otherwise unlabeled functions and expressions now propagates outwards at compile time for expressions like select([select([func.foo()])]). .. change:: :tags: sql :tickets: Building on the above ideas, CompositeSelects now build up their ".c." collection based on the names present in the first selectable only; corresponding_column() now works fully for all embedded selectables. .. change:: :tags: sql :tickets: Oracle and others properly encode SQL used for defaults like sequences, etc., even if no unicode idents are used since identifier preparer may return a cached unicode identifier. .. change:: :tags: sql :tickets: Column and clause comparisons to datetime objects on the left hand side of the expression now work (d < table.c.col). (datetimes on the RHS have always worked, the LHS exception is a quirk of the datetime implementation.) .. change:: :tags: orm :tickets: Every Session.begin() must now be accompanied by a corresponding commit() or rollback() unless the session is closed with Session.close(). This also includes the begin() which is implicit to a session created with transactional=True. The biggest change introduced here is that when a Session created with transactional=True raises an exception during flush(), you must call Session.rollback() or Session.close() in order for that Session to continue after an exception. .. change:: :tags: orm :tickets: 961 Fixed merge() collection-doubling bug when merging transient entities with backref'ed collections. .. change:: :tags: orm :tickets: merge(dont_load=True) does not accept transient entities, this is in continuation with the fact that merge(dont_load=True) does not accept any "dirty" objects either. .. change:: :tags: orm :tickets: Added standalone "query" class attribute generated by a scoped_session. This provides MyClass.query without using Session.mapper. Use via: MyClass.query = Session.query_property() .. change:: :tags: orm :tickets: The proper error message is raised when trying to access expired instance attributes with no session present .. change:: :tags: orm :tickets: dynamic_loader() / lazy="dynamic" now accepts and uses the order_by parameter in the same way in which it works with relation(). .. change:: :tags: orm :tickets: Added expire_all() method to Session. Calls expire() for all persistent instances. This is handy in conjunction with... .. change:: :tags: orm :tickets: Instances which have been partially or fully expired will have their expired attributes populated during a regular Query operation which affects those objects, preventing a needless second SQL statement for each instance. .. change:: :tags: orm :tickets: 938 Dynamic relations, when referenced, create a strong reference to the parent object so that the query still has a parent to call against even if the parent is only created (and otherwise dereferenced) within the scope of a single expression. .. change:: :tags: orm :tickets: Added a mapper() flag "eager_defaults". When set to True, defaults that are generated during an INSERT or UPDATE operation are post-fetched immediately, instead of being deferred until later. This mimics the old 0.3 behavior. .. change:: :tags: orm :tickets: query.join() can now accept class-mapped attributes as arguments. These can be used in place or in any combination with strings. In particular this allows construction of joins to subclasses on a polymorphic relation, i.e.: query(Company).join(['employees', Engineer.name]) .. change:: :tags: orm, ('employees', people.join(engineer)), Engineer.name :tickets: query.join() can also accept tuples of attribute name/some selectable as arguments. This allows construction of joins *from* subclasses of a polymorphic relation, i.e.: query(Company).\ join( ) .. change:: :tags: orm :tickets: General improvements to the behavior of join() in conjunction with polymorphic mappers, i.e. joining from/to polymorphic mappers and properly applying aliases. .. change:: :tags: orm :tickets: 933 Fixed/improved behavior when a mapper determines the natural "primary key" of a mapped join, it will more effectively reduce columns which are equivalent via foreign key relation. This affects how many arguments need to be sent to query.get(), among other things. .. change:: :tags: orm :tickets: 946 The lazy loader can now handle a join condition where the "bound" column (i.e. the one that gets the parent id sent as a bind parameter) appears more than once in the join condition. Specifically this allows the common task of a relation() which contains a parent-correlated subquery, such as "select only the most recent child item". .. change:: :tags: orm :tickets: Fixed bug in polymorphic inheritance where an incorrect exception is raised when base polymorphic_on column does not correspond to any columns within the local selectable of an inheriting mapper more than one level deep .. change:: :tags: orm :tickets: Fixed bug in polymorphic inheritance which made it difficult to set a working "order_by" on a polymorphic mapper. .. change:: :tags: orm :tickets: Fixed a rather expensive call in Query that was slowing down polymorphic queries. .. change:: :tags: orm :tickets: 954 "Passive defaults" and other "inline" defaults can now be loaded during a flush() call if needed; in particular, this allows constructing relations() where a foreign key column references a server-side-generated, non-primary-key column. .. change:: :tags: orm :tickets: Additional Session transaction fixes/changes: - Fixed bug with session transaction management: parent transactions weren't started on the connection when adding a connection to a nested transaction. - session.transaction now always refers to the innermost active transaction, even when commit/rollback are called directly on the session transaction object. - Two-phase transactions can now be prepared. - When preparing a two-phase transaction fails on one connection, all the connections are rolled back. - session.close() didn't close all transactions when nested transactions were used. - rollback() previously erroneously set the current transaction directly to the parent of the transaction that could be rolled back to. Now it rolls back the next transaction up that can handle it, but sets the current transaction to it's parent and inactivates the transactions in between. Inactive transactions can only be rolled back or closed, any other call results in an error. - autoflush for commit() wasn't flushing for simple subtransactions. - unitofwork flush didn't close the failed transaction when the session was not in a transaction and commiting the transaction failed. .. change:: :tags: orm :tickets: 964, 940 Miscellaneous tickets: .. change:: :tags: general :tickets: Fixed a variety of hidden and some not-so-hidden compatibility issues for Python 2.3, thanks to new support for running the full test suite on 2.3. .. change:: :tags: general :tickets: Warnings are now issued as type exceptions.SAWarning. .. change:: :tags: dialects :tickets: Better support for schemas in SQLite (linked in by ATTACH DATABASE ... AS name). In some cases in the past, schema names were ommitted from generated SQL for SQLite. This is no longer the case. .. change:: :tags: dialects :tickets: table_names on SQLite now picks up temporary tables as well. .. change:: :tags: dialects :tickets: Auto-detect an unspecified MySQL ANSI_QUOTES mode during reflection operations, support for changing the mode midstream. Manual mode setting is still required if no reflection is used. .. change:: :tags: dialects :tickets: Fixed reflection of TIME columns on SQLite. .. change:: :tags: dialects :tickets: 580 Finally added PGMacAddr type to postgres .. change:: :tags: dialects :tickets: Reflect the sequence associated to a PK field (typically with a BEFORE INSERT trigger) under Firebird .. change:: :tags: dialects :tickets: 941 Oracle assembles the correct columns in the result set column mapping when generating a LIMIT/OFFSET subquery, allows columns to map properly to result sets even if long-name truncation kicks in .. change:: :tags: dialects :tickets: MSSQL now includes EXEC in the _is_select regexp, which should allow row-returning stored procedures to be used. .. change:: :tags: dialects :tickets: MSSQL now includes an experimental implementation of LIMIT/OFFSET using the ANSI SQL row_number() function, so it requires MSSQL-2005 or higher. To enable the feature, add "has_window_funcs" to the keyword arguments for connect, or add "?has_window_funcs=1" to your dburi query arguments. .. change:: :tags: ext :tickets: Changed ext.activemapper to use a non-transactional session for the objectstore. .. change:: :tags: ext :tickets: Fixed output order of "['a'] + obj.proxied" binary operation on association-proxied lists. .. changelog:: :version: 0.4.2p3 :released: Wed Jan 09 2008 .. change:: :tags: general :tickets: sub version numbering scheme changed to suite setuptools version number rules; easy_install -u should now get this version over 0.4.2. .. change:: :tags: sql :tickets: 912 Text type is properly exported now and does not raise a warning on DDL create; String types with no length only raise warnings during CREATE TABLE .. change:: :tags: sql :tickets: new UnicodeText type is added, to specify an encoded, unlengthed Text type .. change:: :tags: sql :tickets: fixed bug in union() so that select() statements which don't derive from FromClause objects can be unioned .. change:: :tags: orm :tickets: fixed bug with session.dirty when using "mutable scalars" (such as PickleTypes) .. change:: :tags: orm :tickets: added a more descriptive error message when flushing on a relation() that has non-locally-mapped columns in its primary or secondary join condition .. change:: :tags: dialects :tickets: Fixed reflection of mysql empty string column defaults. .. change:: :tags: sql :tickets: 912 changed name of TEXT to Text since its a "generic" type; TEXT name is deprecated until 0.5. The "upgrading" behavior of String to Text when no length is present is also deprecated until 0.5; will issue a warning when used for CREATE TABLE statements (String with no length for SQL expression purposes is still fine) .. change:: :tags: sql :tickets: 924 generative select.order_by(None) / group_by(None) was not managing to reset order by/group by criterion, fixed .. change:: :tags: orm :tickets: suppressing *all* errors in InstanceState.__cleanup() now. .. change:: :tags: orm :tickets: 922 fixed an attribute history bug whereby assigning a new collection to a collection-based attribute which already had pending changes would generate incorrect history .. change:: :tags: orm :tickets: 925 fixed delete-orphan cascade bug whereby setting the same object twice to a scalar attribute could log it as an orphan .. change:: :tags: orm :tickets: Fixed cascades on a += assignment to a list-based relation. .. change:: :tags: orm :tickets: 919 synonyms can now be created against props that don't exist yet, which are later added via add_property(). This commonly includes backrefs. (i.e. you can make synonyms for backrefs without worrying about the order of operations) .. change:: :tags: orm :tickets: fixed bug which could occur with polymorphic "union" mapper which falls back to "deferred" loading of inheriting tables .. change:: :tags: orm :tickets: the "columns" collection on a mapper/mapped class (i.e. 'c') is against the mapped table, not the select_table in the case of polymorphic "union" loading (this shouldn't be noticeable). .. change:: :tags: ext :tickets: '+', '*', '+=' and '\*=' support for association proxied lists. .. change:: :tags: dialects :tickets: 923 mssql - narrowed down the test for "date"/"datetime" in MSDate/ MSDateTime subclasses so that incoming "datetime" objects don't get mis-interpreted as "date" objects and vice versa. .. change:: :tags: orm :tickets: fixed fairly critical bug whereby the same instance could be listed more than once in the unitofwork.new collection; most typically reproduced when using a combination of inheriting mappers and ScopedSession.mapper, as the multiple __init__ calls per instance could save() the object with distinct _state objects .. change:: :tags: orm :tickets: added very rudimentary yielding iterator behavior to Query. Call query.yield_per() and evaluate the Query in an iterative context; every collection of N rows will be packaged up and yielded. Use this method with extreme caution since it does not attempt to reconcile eagerly loaded collections across result batch boundaries, nor will it behave nicely if the same instance occurs in more than one batch. This means that an eagerly loaded collection will get cleared out if it's referenced in more than one batch, and in all cases attributes will be overwritten on instances that occur in more than one batch. .. change:: :tags: orm :tickets: 920 Fixed in-place set mutation operators for set collections and association proxied sets. .. change:: :tags: dialects :tickets: 913 Fixed the missing call to subtype result processor for the PGArray type. .. changelog:: :version: 0.4.2 :released: Wed Jan 02 2008 .. change:: :tags: sql :tickets: 615 generic functions ! we introduce a database of known SQL functions, such as current_timestamp, coalesce, and create explicit function objects representing them. These objects have constrained argument lists, are type aware, and can compile in a dialect-specific fashion. So saying func.char_length("foo", "bar") raises an error (too many args), func.coalesce(datetime.date(2007, 10, 5), datetime.date(2005, 10, 15)) knows that its return type is a Date. We only have a few functions represented so far but will continue to add to the system .. change:: :tags: sql :tickets: auto-reconnect support improved; a Connection can now automatically reconnect after its underlying connection is invalidated, without needing to connect() again from the engine. This allows an ORM session bound to a single Connection to not need a reconnect. Open transactions on the Connection must be rolled back after an invalidation of the underlying connection else an error is raised. Also fixed bug where disconnect detect was not being called for cursor(), rollback(), or commit(). .. change:: :tags: sql :tickets: added new flag to String and create_engine(), assert_unicode=(True|False|'warn'\|None). Defaults to `False` or `None` on create_engine() and String, `'warn'` on the Unicode type. When `True`, results in all unicode conversion operations raising an exception when a non-unicode bytestring is passed as a bind parameter. 'warn' results in a warning. It is strongly advised that all unicode-aware applications make proper use of Python unicode objects (i.e. u'hello' and not 'hello') so that data round trips accurately. .. change:: :tags: sql :tickets: generation of "unique" bind parameters has been simplified to use the same "unique identifier" mechanisms as everything else. This doesn't affect user code, except any code that might have been hardcoded against the generated names. Generated bind params now have the form "_", whereas before only the second bind of the same name would have this form. .. change:: :tags: sql :tickets: select().as_scalar() will raise an exception if the select does not have exactly one expression in its columns clause. .. change:: :tags: sql :tickets: bindparam() objects themselves can be used as keys for execute(), i.e. statement.execute({bind1:'foo', bind2:'bar'}) .. change:: :tags: sql :tickets: added new methods to TypeDecorator, process_bind_param() and process_result_value(), which automatically take advantage of the processing of the underlying type. Ideal for using with Unicode or Pickletype. TypeDecorator should now be the primary way to augment the behavior of any existing type including other TypeDecorator subclasses such as PickleType. .. change:: :tags: sql :tickets: selectables (and others) will issue a warning when two columns in their exported columns collection conflict based on name. .. change:: :tags: sql :tickets: 890 tables with schemas can still be used in sqlite, firebird, schema name just gets dropped .. change:: :tags: sql :tickets: changed the various "literal" generation functions to use an anonymous bind parameter. not much changes here except their labels now look like ":param_1", ":param_2" instead of ":literal" .. change:: :tags: sql :tickets: column labels in the form "tablename.columname", i.e. with a dot, are now supported. .. change:: :tags: sql :tickets: from_obj keyword argument to select() can be a scalar or a list. .. change:: :tags: orm :tickets: 871 a major behavioral change to collection-based backrefs: they no longer trigger lazy loads ! "reverse" adds and removes are queued up and are merged with the collection when it is actually read from and loaded; but do not trigger a load beforehand. For users who have noticed this behavior, this should be much more convenient than using dynamic relations in some cases; for those who have not, you might notice your apps using a lot fewer queries than before in some situations. .. change:: :tags: orm :tickets: mutable primary key support is added. primary key columns can be changed freely, and the identity of the instance will change upon flush. In addition, update cascades of foreign key referents (primary key or not) along relations are supported, either in tandem with the database's ON UPDATE CASCADE (required for DB's like Postgres) or issued directly by the ORM in the form of UPDATE statements, by setting the flag "passive_cascades=False". .. change:: :tags: orm :tickets: 490 inheriting mappers now inherit the MapperExtensions of their parent mapper directly, so that all methods for a particular MapperExtension are called for subclasses as well. As always, any MapperExtension can return either EXT_CONTINUE to continue extension processing or EXT_STOP to stop processing. The order of mapper resolution is: . Note that if you instantiate the same extension class separately and then apply it individually for two mappers in the same inheritance chain, the extension will be applied twice to the inheriting class, and each method will be called twice. To apply a mapper extension explicitly to each inheriting class but have each method called only once per operation, use the same instance of the extension for both mappers. .. change:: :tags: orm :tickets: 907 MapperExtension.before_update() and after_update() are now called symmetrically; previously, an instance that had no modified column attributes (but had a relation() modification) could be called with before_update() but not after_update() .. change:: :tags: orm :tickets: columns which are missing from a Query's select statement now get automatically deferred during load. .. change:: :tags: orm :tickets: 908 mapped classes which extend "object" and do not provide an __init__() method will now raise TypeError if non-empty \*args or \**kwargs are present at instance construction time (and are not consumed by any extensions such as the scoped_session mapper), consistent with the behavior of normal Python classes .. change:: :tags: orm :tickets: 899 fixed Query bug when filter_by() compares a relation against None .. change:: :tags: orm :tickets: improved support for pickling of mapped entities. Per-instance lazy/deferred/expired callables are now serializable so that they serialize and deserialize with _state. .. change:: :tags: orm :tickets: 801 new synonym() behavior: an attribute will be placed on the mapped class, if one does not exist already, in all cases. if a property already exists on the class, the synonym will decorate the property with the appropriate comparison operators so that it can be used in column expressions just like any other mapped attribute (i.e. usable in filter(), etc.) the "proxy=True" flag is deprecated and no longer means anything. Additionally, the flag "map_column=True" will automatically generate a ColumnProperty corresponding to the name of the synonym, i.e.: 'somename':synonym('_somename', map_column=True) will map the column named 'somename' to the attribute '_somename'. See the example in the mapper docs. .. change:: :tags: orm :tickets: Query.select_from() now replaces all existing FROM criterion with the given argument; the previous behavior of constructing a list of FROM clauses was generally not useful as is required filter() calls to create join criterion, and new tables introduced within filter() already add themselves to the FROM clause. The new behavior allows not just joins from the main table, but select statements as well. Filter criterion, order bys, eager load clauses will be "aliased" against the given statement. .. change:: :tags: orm :tickets: this month's refactoring of attribute instrumentation changes the "copy-on-load" behavior we've had since midway through 0.3 with "copy-on-modify" in most cases. This takes a sizable chunk of latency out of load operations and overall does less work as only attributes which are actually modified get their "committed state" copied. Only "mutable scalar" attributes (i.e. a pickled object or other mutable item), the reason for the copy-on-load change in the first place, retain the old behavior. .. change:: :tags: attrname, orm :tickets: a slight behavioral change to attributes is, del'ing an attribute does *not* cause the lazyloader of that attribute to fire off again; the "del" makes the effective value of the attribute "None". To re-trigger the "loader" for an attribute, use session.expire(instance,). .. change:: :tags: orm :tickets: query.filter(SomeClass.somechild == None), when comparing a many-to-one property to None, properly generates "id IS NULL" including that the NULL is on the right side. .. change:: :tags: orm :tickets: query.order_by() takes into account aliased joins, i.e. query.join('orders', aliased=True).order_by(Order.id) .. change:: :tags: orm :tickets: eagerload(), lazyload(), eagerload_all() take an optional second class-or-mapper argument, which will select the mapper to apply the option towards. This can select among other mappers which were added using add_entity(). .. change:: :tags: orm :tickets: eagerloading will work with mappers added via add_entity(). .. change:: :tags: orm :tickets: added "cascade delete" behavior to "dynamic" relations just like that of regular relations. if passive_deletes flag (also just added) is not set, a delete of the parent item will trigger a full load of the child items so that they can be deleted or updated accordingly. .. change:: :tags: orm :tickets: also with dynamic, implemented correct count() behavior as well as other helper methods. .. change:: :tags: orm :tickets: fix to cascades on polymorphic relations, such that cascades from an object to a polymorphic collection continue cascading along the set of attributes specific to each element in the collection. .. change:: :tags: orm :tickets: 893 query.get() and query.load() do not take existing filter or other criterion into account; these methods *always* look up the given id in the database or return the current instance from the identity map, disregarding any existing filter, join, group_by or other criterion which has been configured. .. change:: :tags: orm :tickets: 883 added support for version_id_col in conjunction with inheriting mappers. version_id_col is typically set on the base mapper in an inheritance relationship where it takes effect for all inheriting mappers. .. change:: :tags: orm :tickets: relaxed rules on column_property() expressions having labels; any ColumnElement is accepted now, as the compiler auto-labels non-labeled ColumnElements now. a selectable, like a select() statement, still requires conversion to ColumnElement via as_scalar() or label(). .. change:: :tags: orm :tickets: fixed backref bug where you could not del instance.attr if attr was None .. change:: :tags: orm :tickets: several ORM attributes have been removed or made private: mapper.get_attr_by_column(), mapper.set_attr_by_column(), mapper.pks_by_table, mapper.cascade_callable(), MapperProperty.cascade_callable(), mapper.canload(), mapper.save_obj(), mapper.delete_obj(), mapper._mapper_registry, attributes.AttributeManager .. change:: :tags: orm :tickets: Assigning an incompatible collection type to a relation attribute now raises TypeError instead of sqlalchemy's ArgumentError. .. change:: :tags: orm :tickets: 886 Bulk assignment of a MappedCollection now raises an error if a key in the incoming dictionary does not match the key that the collection's keyfunc would use for that value. .. change:: :tags: orm, newval1, newval2 :tickets: Custom collections can now specify a @converter method to translate objects used in "bulk" assignment into a stream of values, as in:: obj.col = # or obj.dictcol = {'foo': newval1, 'bar': newval2} The MappedCollection uses this hook to ensure that incoming key/value pairs are sane from the collection's perspective. .. change:: :tags: orm :tickets: 872 fixed endless loop issue when using lazy="dynamic" on both sides of a bi-directional relationship .. change:: :tags: orm :tickets: 904 more fixes to the LIMIT/OFFSET aliasing applied with Query + eagerloads, in this case when mapped against a select statement .. change:: :tags: orm :tickets: fix to self-referential eager loading such that if the same mapped instance appears in two or more distinct sets of columns in the same result set, its eagerly loaded collection will be populated regardless of whether or not all of the rows contain a set of "eager" columns for that collection. this would also show up as a KeyError when fetching results with join_depth turned on. .. change:: :tags: orm :tickets: fixed bug where Query would not apply a subquery to the SQL when LIMIT was used in conjunction with an inheriting mapper where the eager loader was only in the parent mapper. .. change:: :tags: orm :tickets: clarified the error message which occurs when you try to update() an instance with the same identity key as an instance already present in the session. .. change:: :tags: orm :tickets: some clarifications and fixes to merge(instance, dont_load=True). fixed bug where lazy loaders were getting disabled on returned instances. Also, we currently do not support merging an instance which has uncommitted changes on it, in the case that dont_load=True is used....this will now raise an error. This is due to complexities in merging the "committed state" of the given instance to correctly correspond to the newly copied instance, as well as other modified state. Since the use case for dont_load=True is caching, the given instances shouldn't have any uncommitted changes on them anyway. We also copy the instances over without using any events now, so that the 'dirty' list on the new session remains unaffected. .. change:: :tags: orm :tickets: fixed bug which could arise when using session.begin_nested() in conjunction with more than one level deep of enclosing session.begin() statements .. change:: :tags: orm :tickets: 914 fixed session.refresh() with instance that has custom entity_name .. change:: :tags: dialects :tickets: sqlite SLDate type will not erroneously render "microseconds" portion of a datetime or time object. .. change:: :tags: dialects :tickets: 902 oracle - added disconnect detection support for Oracle - some cleanup to binary/raw types so that cx_oracle.LOB is detected on an ad-hoc basis .. change:: :tags: dialects :tickets: 824, 839, 842, 901 MSSQL - PyODBC no longer has a global "set nocount on". - Fix non-identity integer PKs on autload - Better support for convert_unicode - Less strict date conversion for pyodbc/adodbapi - Schema-qualified tables / autoload .. change:: :tags: firebird, backend :tickets: 410 does properly reflect domains (partially fixing) and PassiveDefaults .. change:: :tags: 3562, firebird, backend :tickets: reverted to use default poolclass (was set to SingletonThreadPool in 0.4.0 for test purposes) .. change:: :tags: firebird, backend :tickets: map func.length() to 'char_length' (easily overridable with the UDF 'strlen' on old versions of Firebird) .. changelog:: :version: 0.4.1 :released: Sun Nov 18 2007 .. change:: :tags: sql :tickets: the "shortname" keyword parameter on bindparam() has been deprecated. .. change:: :tags: sql :tickets: Added contains operator (generates a "LIKE %%" clause). .. change:: :tags: sql :tickets: anonymous column expressions are automatically labeled. e.g. select([x* 5]) produces "SELECT x * 5 AS anon_1". This allows the labelname to be present in the cursor.description which can then be appropriately matched to result-column processing rules. (we can't reliably use positional tracking for result-column matches since text() expressions may represent multiple columns). .. change:: :tags: sql :tickets: operator overloading is now controlled by TypeEngine objects - the one built-in operator overload so far is String types overloading '+' to be the string concatenation operator. User-defined types can also define their own operator overloading by overriding the adapt_operator(self, op) method. .. change:: :tags: sql :tickets: 819 untyped bind parameters on the right side of a binary expression will be assigned the type of the left side of the operation, to better enable the appropriate bind parameter processing to take effect .. change:: :tags: sql :tickets: 833 Removed regular expression step from most statement compilations. Also fixes .. change:: :tags: sql :tickets: Fixed empty (zero column) sqlite inserts, allowing inserts on autoincrementing single column tables. .. change:: :tags: sql :tickets: Fixed expression translation of text() clauses; this repairs various ORM scenarios where literal text is used for SQL expressions .. change:: :tags: sql :tickets: Removed ClauseParameters object; compiled.params returns a regular dictionary now, as well as result.last_inserted_params() / last_updated_params(). .. change:: :tags: sql :tickets: Fixed INSERT statements w.r.t. primary key columns that have SQL-expression based default generators on them; SQL expression executes inline as normal but will not trigger a "postfetch" condition for the column, for those DB's who provide it via cursor.lastrowid .. change:: :tags: sql :tickets: 844 func. objects can be pickled/unpickled .. change:: :tags: sql :tickets: rewrote and simplified the system used to "target" columns across selectable expressions. On the SQL side this is represented by the "corresponding_column()" method. This method is used heavily by the ORM to "adapt" elements of an expression to similar, aliased expressions, as well as to target result set columns originally bound to a table or selectable to an aliased, "corresponding" expression. The new rewrite features completely consistent and accurate behavior. .. change:: :tags: sql :tickets: 573 Added a field ("info") for storing arbitrary data on schema items .. change:: :tags: sql :tickets: The "properties" collection on Connections has been renamed "info" to match schema's writable collections. Access is still available via the "properties" name until 0.5. .. change:: :tags: sql :tickets: fixed the close() method on Transaction when using strategy='threadlocal' .. change:: :tags: sql :tickets: 853 fix to compiled bind parameters to not mistakenly populate None .. change:: :tags: sql :tickets: ._execute_clauseelement becomes a public method Connectable.execute_clauseelement .. change:: :tags: orm :tickets: 843 eager loading with LIMIT/OFFSET applied no longer adds the primary table joined to a limited subquery of itself; the eager loads now join directly to the subquery which also provides the primary table's columns to the result set. This eliminates a JOIN from all eager loads with LIMIT/OFFSET. .. change:: :tags: orm :tickets: 802 session.refresh() and session.expire() now support an additional argument "attribute_names", a list of individual attribute keynames to be refreshed or expired, allowing partial reloads of attributes on an already-loaded instance. .. change:: :tags: orm :tickets: 767 added op() operator to instrumented attributes; i.e. User.name.op('ilike')('%somename%') .. change:: :tags: orm :tickets: 676 Mapped classes may now define __eq__, __hash__, and __nonzero__ methods with arbitrary semantics. The orm now handles all mapped instances on an identity-only basis. (e.g. 'is' vs '==') .. change:: :tags: orm :tickets: the "properties" accessor on Mapper is removed; it now throws an informative exception explaining the usage of mapper.get_property() and mapper.iterate_properties .. change:: :tags: orm :tickets: added having() method to Query, applies HAVING to the generated statement in the same way as filter() appends to the WHERE clause. .. change:: :tags: orm :tickets: 777 The behavior of query.options() is now fully based on paths, i.e. an option such as eagerload_all('x.y.z.y.x') will apply eagerloading to only those paths, i.e. and not 'x.y.x'; eagerload('children.children') applies only to exactly two-levels deep, etc. .. change:: :tags: orm :tickets: PickleType will compare using `==` when set up with mutable=False, and not the `is` operator. To use `is` or any other comparator, send in a custom comparison function using PickleType(comparator=my_custom_comparator). .. change:: :tags: orm :tickets: 848 query doesn't throw an error if you use distinct() and an order_by() containing UnaryExpressions (or other) together .. change:: :tags: orm :tickets: 786 order_by() expressions from joined tables are properly added to columns clause when using distinct() .. change:: :tags: orm :tickets: 858 fixed error where Query.add_column() would not accept a class-bound attribute as an argument; Query also raises an error if an invalid argument was sent to add_column() (at instances() time) .. change:: :tags: orm :tickets: added a little more checking for garbage-collection dereferences in InstanceState.__cleanup() to reduce "gc ignored" errors on app shutdown .. change:: :tags: orm :tickets: The session API has been solidified: .. change:: :tags: orm :tickets: 840 It's an error to session.save() an object which is already persistent .. change:: :tags: orm :tickets: It's an error to session.delete() an object which is *not* persistent. .. change:: :tags: orm :tickets: session.update() and session.delete() raise an error when updating or deleting an instance that is already in the session with a different identity. .. change:: :tags: orm :tickets: The session checks more carefully when determining "object X already in another session"; e.g. if you pickle a series of objects and unpickle (i.e. as in a Pylons HTTP session or similar), they can go into a new session without any conflict .. change:: :tags: orm :tickets: merge() includes a keyword argument "dont_load=True". setting this flag will cause the merge operation to not load any data from the database in response to incoming detached objects, and will accept the incoming detached object as though it were already present in that session. Use this to merge detached objects from external caching systems into the session. .. change:: :tags: orm :tickets: Deferred column attributes no longer trigger a load operation when the attribute is assigned to. In those cases, the newly assigned value will be present in the flushes' UPDATE statement unconditionally. .. change:: :tags: orm :tickets: 834 Fixed a truncation error when re-assigning a subset of a collection (obj.relation = obj.relation[1:]) .. change:: :tags: orm :tickets: 832 De-cruftified backref configuration code, backrefs which step on existing properties now raise an error .. change:: :tags: orm :tickets: 831 Improved behavior of add_property() etc., fixed involving synonym/deferred. .. change:: :tags: orm :tickets: Fixed clear_mappers() behavior to better clean up after itself. .. change:: :tags: orm :tickets: 841 Fix to "row switch" behavior, i.e. when an INSERT/DELETE is combined into a single UPDATE; many-to-many relations on the parent object update properly. .. change:: :tags: orm :tickets: Fixed __hash__ for association proxy- these collections are unhashable, just like their mutable Python counterparts. .. change:: :tags: orm :tickets: Added proxying of save_or_update, __contains__ and __iter__ methods for scoped sessions. .. change:: :tags: orm :tickets: 852 fixed very hard-to-reproduce issue where by the FROM clause of Query could get polluted by certain generative calls .. change:: :tags: dialects :tickets: Added experimental support for MaxDB (versions >= 7.6.03.007 only). .. change:: :tags: dialects :tickets: oracle will now reflect "DATE" as an OracleDateTime column, not OracleDate .. change:: :tags: dialects :tickets: 847 added awareness of schema name in oracle table_names() function, fixes metadata.reflect(schema='someschema') .. change:: :tags: dialects :tickets: MSSQL anonymous labels for selection of functions made deterministic .. change:: :tags: dialects :tickets: sqlite will reflect "DECIMAL" as a numeric column. .. change:: :tags: dialects :tickets: 828 Made access dao detection more reliable .. change:: :tags: dialects :tickets: Renamed the Dialect attribute 'preexecute_sequences' to 'preexecute_pk_sequences'. An attribute porxy is in place for out-of-tree dialects using the old name. .. change:: :tags: dialects :tickets: Added test coverage for unknown type reflection. Fixed sqlite/mysql handling of type reflection for unknown types. .. change:: :tags: dialects :tickets: Added REAL for mysql dialect (for folks exploiting the REAL_AS_FLOAT sql mode). .. change:: :tags: dialects :tickets: mysql Float, MSFloat and MSDouble constructed without arguments now produce no-argument DDL, e.g.'FLOAT'. .. change:: :tags: misc :tickets: Removed unused util.hash(). .. changelog:: :version: 0.4.0 :released: Wed Oct 17 2007 .. change:: :tags: :tickets: (see 0.4.0beta1 for the start of major changes against 0.3, as well as http://www.sqlalchemy.org/trac/wiki/WhatsNewIn04 ) .. change:: :tags: :tickets: 785 Added initial Sybase support (mxODBC so far) .. change:: :tags: :tickets: Added partial index support for PostgreSQL. Use the postgres_where keyword on the Index. .. change:: :tags: :tickets: 817 string-based query param parsing/config file parser understands wider range of string values for booleans .. change:: :tags: :tickets: 813 backref remove object operation doesn't fail if the other-side collection doesn't contain the item, supports noload collections .. change:: :tags: :tickets: 818 removed __len__ from "dynamic" collection as it would require issuing a SQL "count()" operation, thus forcing all list evaluations to issue redundant SQL .. change:: :tags: :tickets: 816 inline optimizations added to locate_dirty() which can greatly speed up repeated calls to flush(), as occurs with autoflush=True .. change:: :tags: :tickets: The IdentifierPreprarer's _requires_quotes test is now regex based. Any out-of-tree dialects that provide custom sets of legal_characters or illegal_initial_characters will need to move to regexes or override _requires_quotes. .. change:: :tags: :tickets: Firebird has supports_sane_rowcount and supports_sane_multi_rowcount set to False due to ticket #370 (right way). .. change:: :tags: :tickets: Improvements and fixes on Firebird reflection: * FBDialect now mimics OracleDialect, regarding case-sensitivity of TABLE and COLUMN names (see 'case_sensitive remotion' topic on this current file). * FBDialect.table_names() doesn't bring system tables (ticket:796). * FB now reflects Column's nullable property correctly. .. change:: :tags: :tickets: Fixed SQL compiler's awareness of top-level column labels as used in result-set processing; nested selects which contain the same column names don't affect the result or conflict with result-column metadata. .. change:: :tags: :tickets: query.get() and related functions (like many-to-one lazyloading) use compile-time-aliased bind parameter names, to prevent name conflicts with bind parameters that already exist in the mapped selectable. .. change:: :tags: :tickets: 795 Fixed three- and multi-level select and deferred inheritance loading (i.e. abc inheritance with no select_table). .. change:: :tags: :tickets: Ident passed to id_chooser in shard.py always a list. .. change:: :tags: :tickets: The no-arg ResultProxy._row_processor() is now the class attribute `_process_row`. .. change:: :tags: :tickets: 797 Added support for returning values from inserts and updates for PostgreSQL 8.2+. .. change:: :tags: :tickets: PG reflection, upon seeing the default schema name being used explicitly as the "schema" argument in a Table, will assume that this is the user's desired convention, and will explicitly set the "schema" argument in foreign-key-related reflected tables, thus making them match only with Table constructors that also use the explicit "schema" argument (even though its the default schema). In other words, SA assumes the user is being consistent in this usage. .. change:: :tags: :tickets: 808 fixed sqlite reflection of BOOL/BOOLEAN .. change:: :tags: :tickets: Added support for UPDATE with LIMIT on mysql. .. change:: :tags: :tickets: 803 null foreign key on a m2o doesn't trigger a lazyload .. change:: :tags: :tickets: 800 oracle does not implicitly convert to unicode for non-typed result sets (i.e. when no TypeEngine/String/Unicode type is even being used; previously it was detecting DBAPI types and converting regardless). should fix .. change:: :tags: :tickets: 806 fix to anonymous label generation of long table/column names .. change:: :tags: :tickets: Firebird dialect now uses SingletonThreadPool as poolclass. .. change:: :tags: :tickets: Firebird now uses dialect.preparer to format sequences names .. change:: :tags: :tickets: 810 Fixed breakage with postgres and multiple two-phase transactions. Two-phase commits and rollbacks didn't automatically end up with a new transaction as the usual dbapi commits/rollbacks do. .. change:: :tags: :tickets: Added an option to the _ScopedExt mapper extension to not automatically save new objects to session on object initialization. .. change:: :tags: :tickets: fixed Oracle non-ansi join syntax .. change:: :tags: :tickets: PickleType and Interval types (on db not supporting it natively) are now slightly faster. .. change:: :tags: :tickets: Added Float and Time types to Firebird (FBFloat and FBTime). Fixed BLOB SUB_TYPE for TEXT and Binary types. .. change:: :tags: :tickets: Changed the API for the in\_ operator. in_() now accepts a single argument that is a sequence of values or a selectable. The old API of passing in values as varargs still works but is deprecated. .. changelog:: :version: 0.4.0beta6 :released: Thu Sep 27 2007 .. change:: :tags: :tickets: The Session identity map is now *weak referencing* by default, use weak_identity_map=False to use a regular dict. The weak dict we are using is customized to detect instances which are "dirty" and maintain a temporary strong reference to those instances until changes are flushed. .. change:: :tags: :tickets: 758 Mapper compilation has been reorganized such that most compilation occurs upon mapper construction. This allows us to have fewer calls to mapper.compile() and also to allow class-based properties to force a compilation (i.e. User.addresses == 7 will compile all mappers; this is). The only caveat here is that an inheriting mapper now looks for its inherited mapper upon construction; so mappers within inheritance relationships need to be constructed in inheritance order (which should be the normal case anyway). .. change:: :tags: :tickets: added "FETCH" to the keywords detected by Postgres to indicate a result-row holding statement (i.e. in addition to "SELECT"). .. change:: :tags: :tickets: Added full list of SQLite reserved keywords so that they get escaped properly. .. change:: :tags: :tickets: Tightened up the relationship between the Query's generation of "eager load" aliases, and Query.instances() which actually grabs the eagerly loaded rows. If the aliases were not specifically generated for that statement by EagerLoader, the EagerLoader will not take effect when the rows are fetched. This prevents columns from being grabbed accidentally as being part of an eager load when they were not meant for such, which can happen with textual SQL as well as some inheritance situations. It's particularly important since the "anonymous aliasing" of columns uses simple integer counts now to generate labels. .. change:: :tags: :tickets: Removed "parameters" argument from clauseelement.compile(), replaced with "column_keys". The parameters sent to execute() only interact with the insert/update statement compilation process in terms of the column names present but not the values for those columns. Produces more consistent execute/executemany behavior, simplifies things a bit internally. .. change:: :tags: :tickets: 560 Added 'comparator' keyword argument to PickleType. By default, "mutable" PickleType does a "deep compare" of objects using their dumps() representation. But this doesn't work for dictionaries. Pickled objects which provide an adequate __eq__() implementation can be set up with "PickleType(comparator=operator.eq)" .. change:: :tags: :tickets: Added session.is_modified(obj) method; performs the same "history" comparison operation as occurs within a flush operation; setting include_collections=False gives the same result as is used when the flush determines whether or not to issue an UPDATE for the instance's row. .. change:: :tags: :tickets: 584, 761 Added "schema" argument to Sequence; use this with Postgres /Oracle when the sequence is located in an alternate schema. Implements part of, should fix. .. change:: :tags: :tickets: Fixed reflection of the empty string for mysql enums. .. change:: :tags: :tickets: 794 Changed MySQL dialect to use the older LIMIT , syntax instead of LIMIT OFFSET for folks using 3.23. .. change:: :tags: :tickets: Added 'passive_deletes="all"' flag to relation(), disables all nulling-out of foreign key attributes during a flush where the parent object is deleted. .. change:: :tags: :tickets: Column defaults and onupdates, executing inline, will add parenthesis for subqueries and other parenthesis-requiring expressions .. change:: :tags: :tickets: 793 The behavior of String/Unicode types regarding that they auto-convert to TEXT/CLOB when no length is present now occurs *only* for an exact type of String or Unicode with no arguments. If you use VARCHAR or NCHAR (subclasses of String/Unicode) with no length, they will be interpreted by the dialect as VARCHAR/NCHAR; no "magic" conversion happens there. This is less surprising behavior and in particular this helps Oracle keep string-based bind parameters as VARCHARs and not CLOBs. .. change:: :tags: :tickets: 771 Fixes to ShardedSession to work with deferred columns. .. change:: :tags: :tickets: User-defined shard_chooser() function must accept "clause=None" argument; this is the ClauseElement passed to session.execute(statement) and can be used to determine correct shard id (since execute() doesn't take an instance.) .. change:: :tags: :tickets: 764 Adjusted operator precedence of NOT to match '==' and others, so that ~(x y) produces NOT (x y), which is better compatible with older MySQL versions.. This doesn't apply to "~(x==y)" as it does in 0.3 since ~(x==y) compiles to "x != y", but still applies to operators like BETWEEN. .. change:: :tags: :tickets: 757, 768, 779, 728 Other tickets:,,. .. changelog:: :version: 0.4.0beta5 :released: .. change:: :tags: :tickets: 754 Connection pool fixes; the better performance of beta4 remains but fixes "connection overflow" and other bugs which were present (like). .. change:: :tags: :tickets: 769 Fixed bugs in determining proper sync clauses from custom inherit conditions. .. change:: :tags: :tickets: 763 Extended 'engine_from_config' coercion for QueuePool size / overflow. .. change:: :tags: :tickets: 748 mysql views can be reflected again. .. change:: :tags: :tickets: AssociationProxy can now take custom getters and setters. .. change:: :tags: :tickets: Fixed malfunctioning BETWEEN in orm queries. .. change:: :tags: :tickets: 762 Fixed OrderedProperties pickling .. change:: :tags: :tickets: SQL-expression defaults and sequences now execute "inline" for all non-primary key columns during an INSERT or UPDATE, and for all columns during an executemany()-style call. inline=True flag on any insert/update statement also forces the same behavior with a single execute(). result.postfetch_cols() is a collection of columns for which the previous single insert or update statement contained a SQL-side default expression. .. change:: :tags: :tickets: 759 Fixed PG executemany() behavior. .. change:: :tags: :tickets: postgres reflects tables with autoincrement=False for primary key columns which have no defaults. .. change:: :tags: :tickets: postgres no longer wraps executemany() with individual execute() calls, instead favoring performance. "rowcount"/"concurrency" checks with deleted items (which use executemany) are disabled with PG since psycopg2 does not report proper rowcount for executemany(). .. change:: :tags: tickets, fixed :tickets: 742 .. change:: :tags: tickets, fixed :tickets: 748 .. change:: :tags: tickets, fixed :tickets: 760 .. change:: :tags: tickets, fixed :tickets: 762 .. change:: :tags: tickets, fixed :tickets: 763 .. changelog:: :version: 0.4.0beta4 :released: Wed Aug 22 2007 .. change:: :tags: :tickets: Tidied up what ends up in your namespace when you 'from sqlalchemy import \*': .. change:: :tags: :tickets: 'table' and 'column' are no longer imported. They remain available by direct reference (as in 'sql.table' and 'sql.column') or a glob import from the sql package. It was too easy to accidentally use a sql.expressions.table instead of schema.Table when just starting out with SQLAlchemy, likewise column. .. change:: :tags: :tickets: Internal-ish classes like ClauseElement, FromClause, NullTypeEngine, etc., are also no longer imported into your namespace .. change:: :tags: :tickets: The 'Smallinteger' compatiblity name (small i!) is no longer imported, but remains in schema.py for now. SmallInteger (big I!) is still imported. .. change:: :tags: :tickets: The connection pool uses a "threadlocal" strategy internally to return the same connection already bound to a thread, for "contextual" connections; these are the connections used when you do a "connectionless" execution like insert().execute(). This is like a "partial" version of the "threadlocal" engine strategy but without the thread-local transaction part of it. We're hoping it reduces connection pool overhead as well as database usage. However, if it proves to impact stability in a negative way, we'll roll it right back. .. change:: :tags: :tickets: Fix to bind param processing such that "False" values (like blank strings) still get processed/encoded. .. change:: :tags: :tickets: 752 Fix to select() "generative" behavior, such that calling column(), select_from(), correlate(), and with_prefix() does not modify the original select object .. change:: :tags: :tickets: Added a "legacy" adapter to types, such that user-defined TypeEngine and TypeDecorator classes which define convert_bind_param() and/or convert_result_value() will continue to function. Also supports calling the super() version of those methods. .. change:: :tags: :tickets: Added session.prune(), trims away instances cached in a session that are no longer referenced elsewhere. (A utility for strong-ref identity maps). .. change:: :tags: :tickets: Added close() method to Transaction. Closes out a transaction using rollback if it's the outermost transaction, otherwise just ends without affecting the outer transaction. .. change:: :tags: :tickets: Transactional and non-transactional Session integrates better with bound connection; a close() will ensure that connection transactional state is the same as that which existed on it before being bound to the Session. .. change:: :tags: :tickets: 735 Modified SQL operator functions to be module-level operators, allowing SQL expressions to be pickleable. .. change:: :tags: :tickets: Small adjustment to mapper class.__init__ to allow for Py2.6 object.__init__() behavior. .. change:: :tags: :tickets: Fixed 'prefix' argument for select() .. change:: :tags: :tickets: Connection.begin() no longer accepts nested=True, this logic is now all in begin_nested(). .. change:: :tags: :tickets: Fixes to new "dynamic" relation loader involving cascades .. change:: :tags: tickets, fixed :tickets: 735 .. change:: :tags: tickets, fixed :tickets: 752 .. changelog:: :version: 0.4.0beta3 :released: Thu Aug 16 2007 .. change:: :tags: :tickets: SQL types optimization: .. change:: :tags: :tickets: New performance tests show a combined mass-insert/mass-select test as having 68% fewer function calls than the same test run against 0.3. .. change:: :tags: :tickets: General performance improvement of result set iteration is around 10-20%. .. change:: :tags: :tickets: In types.AbstractType, convert_bind_param() and convert_result_value() have migrated to callable-returning bind_processor() and result_processor() methods. If no callable is returned, no pre/post processing function is called. .. change:: :tags: :tickets: Hooks added throughout base/sql/defaults to optimize the calling of bind aram/result processors so that method call overhead is minimized. .. change:: :tags: :tickets: Support added for executemany() scenarios such that unneeded "last row id" logic doesn't kick in, parameters aren't excessively traversed. .. change:: :tags: :tickets: Added 'inherit_foreign_keys' arg to mapper(). .. change:: :tags: :tickets: Added support for string date passthrough in sqlite. .. change:: :tags: tickets, fixed :tickets: 738 .. change:: :tags: tickets, fixed :tickets: 739 .. change:: :tags: tickets, fixed :tickets: 743 .. change:: :tags: tickets, fixed :tickets: 744 .. changelog:: :version: 0.4.0beta2 :released: Tue Aug 14 2007 .. change:: :tags: oracle, improvements. :tickets: Auto-commit after LOAD DATA INFILE for mysql. .. change:: :tags: oracle, improvements. :tickets: A rudimental SessionExtension class has been added, allowing user-defined functionality to take place at flush(), commit(), and rollback() boundaries. .. change:: :tags: oracle, improvements. :tickets: Added engine_from_config() function for helping to create_engine() from an .ini style config. .. change:: :tags: oracle, improvements. :tickets: base_mapper() becomes a plain attribute. .. change:: :tags: oracle, improvements. :tickets: session.execute() and scalar() can search for a Table with which to bind from using the given ClauseElement. .. change:: :tags: oracle, improvements. :tickets: Session automatically extrapolates tables from mappers with binds, also uses base_mapper so that inheritance hierarchies bind automatically. .. change:: :tags: oracle, improvements. :tickets: Moved ClauseVisitor traversal back to inlined non-recursive. .. change:: :tags: tickets, fixed :tickets: 730 .. change:: :tags: tickets, fixed :tickets: 732 .. change:: :tags: tickets, fixed :tickets: 733 .. change:: :tags: tickets, fixed :tickets: 734 .. changelog:: :version: 0.4.0beta1 :released: Sun Aug 12 2007 .. change:: :tags: orm :tickets: Speed! Along with recent speedups to ResultProxy, total number of function calls significantly reduced for large loads. .. change:: :tags: orm :tickets: test/perf/masseagerload.py reports 0.4 as having the fewest number of function calls across all SA versions (0.1, 0.2, and 0.3). .. change:: :tags: orm :tickets: 213 New collection_class api and implementation. Collections are now instrumented via decorations rather than proxying. You can now have collections that manage their own membership, and your class instance will be directly exposed on the relation property. The changes are transparent for most users. .. change:: :tags: orm :tickets: InstrumentedList (as it was) is removed, and relation properties no longer have 'clear()', '.data', or any other added methods beyond those provided by the collection type. You are free, of course, to add them to a custom class. .. change:: :tags: orm :tickets: __setitem__-like assignments now fire remove events for the existing value, if any. .. change:: :tags: orm :tickets: dict-likes used as collection classes no longer need to change __iter__ semantics- itervalues() is used by default instead. This is a backwards incompatible change. .. change:: :tags: orm :tickets: Subclassing dict for a mapped collection is no longer needed in most cases. orm.collections provides canned implementations that key objects by a specified column or a custom function of your choice. .. change:: :tags: orm :tickets: Collection assignment now requires a compatible type- assigning None to clear a collection or assigning a list to a dict collection will now raise an argument error. .. change:: :tags: orm :tickets: AttributeExtension moved to interfaces, and .delete is now .remove The event method signature has also been swapped around. .. change:: :tags: orm :tickets: Major overhaul for Query: .. change:: :tags: orm :tickets: All selectXXX methods are deprecated. Generative methods are now the standard way to do things, i.e. filter(), filter_by(), all(), one(), etc. Deprecated methods are docstring'ed with their new replacements. .. change:: :tags: orm :tickets: 643 Class-level properties are now usable as query elements... no more '.c.'! "Class.c.propname" is now superceded by "Class.propname". All clause operators are supported, as well as higher level operators such as Class.prop== for scalar attributes, Class.prop.contains() and Class.prop.any() for collection-based attributes (all are also negatable). Table-based column expressions as well as columns mounted on mapped classes via 'c' are of course still fully available and can be freely mixed with the new attributes. .. change:: :tags: orm :tickets: Removed ancient query.select_by_attributename() capability. .. change:: :tags: orm :tickets: The aliasing logic used by eager loading has been generalized, so that it also adds full automatic aliasing support to Query. It's no longer necessary to create an explicit Alias to join to the same tables multiple times; *even for self-referential relationships*. - join() and outerjoin() take arguments "aliased=True". Yhis causes their joins to be built on aliased tables; subsequent calls to filter() and filter_by() will translate all table expressions (yes, real expressions using the original mapped Table) to be that of the Alias for the duration of that join() (i.e. until reset_joinpoint() or another join() is called). - join() and outerjoin() take arguments "id=". When used with "aliased=True", the id can be referenced by add_entity(cls, id=) so that you can select the joined instances even if they're from an alias. - join() and outerjoin() now work with self-referential relationships! Using "aliased=True", you can join as many levels deep as desired, i.e. query.join(['children', 'children'], aliased=True); filter criterion will be against the rightmost joined table .. change:: :tags: orm :tickets: 660 Added query.populate_existing(), marks the query to reload all attributes and collections of all instances touched in the query, including eagerly-loaded entities. .. change:: :tags: orm :tickets: Added eagerload_all(), allows eagerload_all('x.y.z') to specify eager loading of all properties in the given path. .. change:: :tags: orm :tickets: Major overhaul for Session: .. change:: :tags: orm :tickets: New function which "configures" a session called "sessionmaker()". Send various keyword arguments to this function once, returns a new class which creates a Session against that stereotype. .. change:: :tags: orm :tickets: SessionTransaction removed from "public" API. You now can call begin()/ commit()/rollback() on the Session itself. .. change:: :tags: orm :tickets: Session also supports SAVEPOINT transactions; call begin_nested(). .. change:: :tags: orm :tickets: Session supports two-phase commit behavior when vertically or horizontally partitioning (i.e., using more than one engine). Use twophase=True. .. change:: :tags: orm :tickets: Session flag "transactional=True" produces a session which always places itself into a transaction when first used. Upon commit(), rollback() or close(), the transaction ends; but begins again on the next usage. .. change:: :tags: orm :tickets: Session supports "autoflush=True". This issues a flush() before each query. Use in conjunction with transactional, and you can just save()/update() and then query, the new objects will be there. Use commit() at the end (or flush() if non-transactional) to flush remaining changes. .. change:: :tags: orm :tickets: New scoped_session() function replaces SessionContext and assignmapper. Builds onto "sessionmaker()" concept to produce a class whos Session() construction returns the thread-local session. Or, call all Session methods as class methods, i.e. Session.save(foo); Session.commit(). just like the old "objectstore" days. .. change:: :tags: orm :tickets: Added new "binds" argument to Session to support configuration of multiple binds with sessionmaker() function. .. change:: :tags: orm :tickets: A rudimental SessionExtension class has been added, allowing user-defined functionality to take place at flush(), commit(), and rollback() boundaries. .. change:: :tags: orm :tickets: Query-based relation()s available with dynamic_loader(). This is a *writable* collection (supporting append() and remove()) which is also a live Query object when accessed for reads. Ideal for dealing with very large collections where only partial loading is desired. .. change:: :tags: orm :tickets: flush()-embedded inline INSERT/UPDATE expressions. Assign any SQL expression, like "sometable.c.column + 1", to an instance's attribute. Upon flush(), the mapper detects the expression and embeds it directly in the INSERT or UPDATE statement; the attribute gets deferred on the instance so it loads the new value the next time you access it. .. change:: :tags: orm :tickets: 618 A rudimental sharding (horizontal scaling) system is introduced. This system uses a modified Session which can distribute read and write operations among multiple databases, based on user-defined functions defining the "sharding strategy". Instances and their dependents can be distributed and queried among multiple databases based on attribute values, round-robin approaches or any other user-defined system. .. change:: :tags: orm :tickets: 659 Eager loading has been enhanced to allow even more joins in more places. It now functions at any arbitrary depth along self-referential and cyclical structures. When loading cyclical structures, specify "join_depth" on relation() indicating how many times you'd like the table to join to itself; each level gets a distinct table alias. The alias names themselves are generated at compile time using a simple counting scheme now and are a lot easier on the eyes, as well as of course completely deterministic. .. change:: :tags: orm :tickets: 211 Added composite column properties. This allows you to create a type which is represented by more than one column, when using the ORM. Objects of the new type are fully functional in query expressions, comparisons, query.get() clauses, etc. and act as though they are regular single-column scalars... except they're not! Use the function composite(cls, \*columns) inside of the mapper's "properties" dict, and instances of cls will be created/mapped to a single attribute, comprised of the values correponding to \*columns. .. change:: :tags: orm :tickets: Improved support for custom column_property() attributes which feature correlated subqueries, works better with eager loading now. .. change:: :tags: orm :tickets: 611 Primary key "collapse" behavior; the mapper will analyze all columns in its given selectable for primary key "equivalence", that is, columns which are equivalent via foreign key relationship or via an explicit inherit_condition. primarily for joined-table inheritance scenarios where different named PK columns in inheriting tables should "collapse" into a single-valued (or fewer-valued) primary key. Fixes things like. .. change:: :tags: orm :tickets: Joined-table inheritance will now generate the primary key columns of all inherited classes against the root table of the join only. This implies that each row in the root table is distinct to a single instance. If for some rare reason this is not desireable, explicit primary_key settings on individual mappers will override it. .. change:: :tags: orm :tickets: When "polymorphic" flags are used with joined-table or single-table inheritance, all identity keys are generated against the root class of the inheritance hierarchy; this allows query.get() to work polymorphically using the same caching semantics as a non-polymorphic get. Note that this currently does not work with concrete inheritance. .. change:: :tags: orm :tickets: Secondary inheritance loading: polymorphic mappers can be constructed *without* a select_table argument. inheriting mappers whose tables were not represented in the initial load will issue a second SQL query immediately, once per instance (i.e. not very efficient for large lists), in order to load the remaining columns. .. change:: :tags: orm :tickets: Secondary inheritance loading can also move its second query into a column-level "deferred" load, via the "polymorphic_fetch" argument, which can be set to 'select' or 'deferred' .. change:: :tags: orm :tickets: 696 It's now possible to map only a subset of available selectable columns onto mapper properties, using include_columns/exclude_columns.. .. change:: :tags: orm :tickets: Added undefer_group() MapperOption, sets a set of "deferred" columns joined by a "group" to load as "undeferred". .. change:: :tags: orm :tickets: Rewrite of the "deterministic alias name" logic to be part of the SQL layer, produces much simpler alias and label names more in the style of Hibernate .. change:: :tags: sql :tickets: Speed! Clause compilation as well as the mechanics of SQL constructs have been streamlined and simplified to a signficant degree, for a 20-30% improvement of the statement construction/compilation overhead of 0.3. .. change:: :tags: sql :tickets: All "type" keyword arguments, such as those to bindparam(), column(), Column(), and func.(), renamed to "type\_". Those objects still name their "type" attribute as "type". .. change:: :tags: sql :tickets: case_sensitive=(True|False) setting removed from schema items, since checking this state added a lot of method call overhead and there was no decent reason to ever set it to False. Table and column names which are all lower case will be treated as case-insenstive (yes we adjust for Oracle's UPPERCASE style too). .. change:: :tags: transactions :tickets: Added context manager (with statement) support for transactions. .. change:: :tags: transactions :tickets: Added support for two phase commit, works with mysql and postgres so far. .. change:: :tags: transactions :tickets: Added a subtransaction implementation that uses savepoints. .. change:: :tags: transactions :tickets: Added support for savepoints. .. change:: :tags: metadata :tickets: Tables can be reflected from the database en-masse without declaring them in advance. MetaData(engine, reflect=True) will load all tables present in the database, or use metadata.reflect() for finer control. .. change:: :tags: metadata :tickets: DynamicMetaData has been renamed to ThreadLocalMetaData .. change:: :tags: metadata :tickets: The ThreadLocalMetaData constructor now takes no arguments. .. change:: :tags: metadata :tickets: BoundMetaData has been removed- regular MetaData is equivalent .. change:: :tags: metadata :tickets: 646 Numeric and Float types now have an "asdecimal" flag; defaults to True for Numeric, False for Float. When True, values are returned as decimal.Decimal objects; when False, values are returned as float(). The defaults of True/False are already the behavior for PG and MySQL's DBAPI modules. .. change:: :tags: metadata :tickets: 475 New SQL operator implementation which removes all hardcoded operators from expression structures and moves them into compilation; allows greater flexibility of operator compilation; for example, "+" compiles to "||" when used in a string context, or "concat(a,b)" on MySQL; whereas in a numeric context it compiles to "+". Fixes. .. change:: :tags: metadata :tickets: "Anonymous" alias and label names are now generated at SQL compilation time in a completely deterministic fashion... no more random hex IDs .. change:: :tags: metadata :tickets: Significant architectural overhaul to SQL elements (ClauseElement). All elements share a common "mutability" framework which allows a consistent approach to in-place modifications of elements as well as generative behavior. Improves stability of the ORM which makes heavy usage of mutations to SQL expressions. .. change:: :tags: metadata :tickets: select() and union()'s now have "generative" behavior. Methods like order_by() and group_by() return a *new* instance - the original instance is left unchanged. Non-generative methods remain as well. .. change:: :tags: metadata :tickets: 569, 52 The internals of select/union vastly simplified- all decision making regarding "is subquery" and "correlation" pushed to SQL generation phase. select() elements are now *never* mutated by their enclosing containers or by any dialect's compilation process .. change:: :tags: metadata :tickets: select(scalar=True) argument is deprecated; use select(..).as_scalar(). The resulting object obeys the full "column" interface and plays better within expressions. .. change:: :tags: metadata :tickets: 504 Added select().with_prefix('foo') allowing any set of keywords to be placed before the columns clause of the SELECT .. change:: :tags: metadata :tickets: 686 Added array slice support to row[] .. change:: :tags: metadata :tickets: Result sets make a better attempt at matching the DBAPI types present in cursor.description to the TypeEngine objects defined by the dialect, which are then used for result-processing. Note this only takes effect for textual SQL; constructed SQL statements always have an explicit type map. .. change:: :tags: metadata :tickets: Result sets from CRUD operations close their underlying cursor immediately and will also autoclose the connection if defined for the operation; this allows more efficient usage of connections for successive CRUD operations with less chance of "dangling connections". .. change:: :tags: metadata :tickets: 559 Column defaults and onupdate Python functions (i.e. passed to ColumnDefault) may take zero or one arguments; the one argument is the ExecutionContext, from which you can call "context.parameters[someparam]" to access the other bind parameter values affixed to the statement. The connection used for the execution is available as well so that you can pre-execute statements. .. change:: :tags: metadata :tickets: Added "explcit" create/drop/execute support for sequences (i.e. you can pass a "connectable" to each of those methods on Sequence). .. change:: :tags: metadata :tickets: Better quoting of identifiers when manipulating schemas. .. change:: :tags: metadata :tickets: Standardized the behavior for table reflection where types can't be located; NullType is substituted instead, warning is raised. .. change:: :tags: metadata :tickets: 606 ColumnCollection (i.e. the 'c' attribute on tables) follows dictionary semantics for "__contains__" .. change:: :tags: engines :tickets: Speed! The mechanics of result processing and bind parameter processing have been overhauled, streamlined and optimized to issue as little method calls as possible. Bench tests for mass INSERT and mass rowset iteration both show 0.4 to be over twice as fast as 0.3, using 68% fewer function calls. .. change:: :tags: engines :tickets: You can now hook into the pool lifecycle and run SQL statements or other logic at new each DBAPI connection, pool check-out and check-in. .. change:: :tags: engines :tickets: Connections gain a .properties collection, with contents scoped to the lifetime of the underlying DBAPI connection .. change:: :tags: engines :tickets: Removed auto_close_cursors and disallow_open_cursors arguments from Pool; reduces overhead as cursors are normally closed by ResultProxy and Connection. .. change:: :tags: extensions :tickets: proxyengine is temporarily removed, pending an actually working replacement. .. change:: :tags: extensions :tickets: SelectResults has been replaced by Query. SelectResults / SelectResultsExt still exist but just return a slightly modified Query object for backwards-compatibility. join_to() method from SelectResults isn't present anymore, need to use join(). .. change:: :tags: mysql :tickets: Table and column names loaded via reflection are now Unicode. .. change:: :tags: mysql :tickets: All standard column types are now supported, including SET. .. change:: :tags: mysql :tickets: Table reflection can now be performed in as little as one round-trip. .. change:: :tags: mysql :tickets: ANSI and ANSI_QUOTES sql modes are now supported. .. change:: :tags: mysql :tickets: Indexes are now reflected. .. change:: :tags: postgres :tickets: Added PGArray datatype for using postgres array datatypes. .. change:: :tags: oracle :tickets: 507 Very rudimental support for OUT parameters added; use sql.outparam(name, type) to set up an OUT parameter, just like bindparam(); after execution, values are avaiable via result.out_parameters dictionary. SQLAlchemy-0.8.4/doc/build/changelog/changelog_05.rst0000644000076500000240000033063712251147171023026 0ustar classicstaff00000000000000 ============== 0.5 Changelog ============== .. changelog:: :version: 0.5.9 :released: .. change:: :tags: sql :tickets: 1661 Fixed erroneous self_group() call in expression package. .. changelog:: :version: 0.5.8 :released: Sat Jan 16 2010 .. change:: :tags: sql :tickets: The copy() method on Column now supports uninitialized, unnamed Column objects. This allows easy creation of declarative helpers which place common columns on multiple subclasses. .. change:: :tags: sql :tickets: Default generators like Sequence() translate correctly across a copy() operation. .. change:: :tags: sql :tickets: Sequence() and other DefaultGenerator objects are accepted as the value for the "default" and "onupdate" keyword arguments of Column, in addition to being accepted positionally. .. change:: :tags: sql :tickets: 1568, 1617 Fixed a column arithmetic bug that affected column correspondence for cloned selectables which contain free-standing column expressions. This bug is generally only noticeable when exercising newer ORM behavior only availble in 0.6 via, but is more correct at the SQL expression level as well. .. change:: :tags: postgresql :tickets: 1647 The extract() function, which was slightly improved in 0.5.7, needed a lot more work to generate the correct typecast (the typecasts appear to be necessary in PG's EXTRACT quite a lot of the time). The typecast is now generated using a rule dictionary based on PG's documentation for date/time/interval arithmetic. It also accepts text() constructs again, which was broken in 0.5.7. .. change:: :tags: firebird :tickets: 1646 Recognize more errors as disconnections. .. changelog:: :version: 0.5.7 :released: Sat Dec 26 2009 .. change:: :tags: orm :tickets: 1543 contains_eager() now works with the automatically generated subquery that results when you say "query(Parent).join(Parent.somejoinedsubclass)", i.e. when Parent joins to a joined-table-inheritance subclass. Previously contains_eager() would erroneously add the subclass table to the query separately producing a cartesian product. An example is in the ticket description. .. change:: :tags: orm :tickets: 1553 query.options() now only propagate to loaded objects for potential further sub-loads only for options where such behavior is relevant, keeping various unserializable options like those generated by contains_eager() out of individual instance states. .. change:: :tags: orm :tickets: 1054 Session.execute() now locates table- and mapper-specific binds based on a passed in expression which is an insert()/update()/delete() construct. .. change:: :tags: orm :tickets: Session.merge() now properly overwrites a many-to-one or uselist=False attribute to None if the attribute is also None in the given object to be merged. .. change:: :tags: orm :tickets: 1618 Fixed a needless select which would occur when merging transient objects that contained a null primary key identifier. .. change:: :tags: orm :tickets: 1585 Mutable collection passed to the "extension" attribute of relation(), column_property() etc. will not be mutated or shared among multiple instrumentation calls, preventing duplicate extensions, such as backref populators, from being inserted into the list. .. change:: :tags: orm :tickets: 1504 Fixed the call to get_committed_value() on CompositeProperty. .. change:: :tags: orm :tickets: 1602 Fixed bug where Query would crash if a join() with no clear "left" side were called when a non-mapped column entity appeared in the columns list. .. change:: :tags: orm :tickets: 1616, 1480 Fixed bug whereby composite columns wouldn't load properly when configured on a joined-table subclass, introduced in version 0.5.6 as a result of the fix for. thx to Scott Torborg. .. change:: :tags: orm :tickets: 1556 The "use get" behavior of many-to-one relations, i.e. that a lazy load will fallback to the possibly cached query.get() value, now works across join conditions where the two compared types are not exactly the same class, but share the same "affinity" - i.e. Integer and SmallInteger. Also allows combinations of reflected and non-reflected types to work with 0.5 style type reflection, such as PGText/Text (note 0.6 reflects types as their generic versions). .. change:: :tags: orm :tickets: 1436 Fixed bug in query.update() when passing Cls.attribute as keys in the value dict and using synchronize_session='expire' ('fetch' in 0.6). .. change:: :tags: sql :tickets: 1603 Fixed bug in two-phase transaction whereby commit() method didn't set the full state which allows subsequent close() call to succeed. .. change:: :tags: sql :tickets: Fixed the "numeric" paramstyle, which apparently is the default paramstyle used by Informixdb. .. change:: :tags: sql :tickets: 1574 Repeat expressions in the columns clause of a select are deduped based on the identity of each clause element, not the actual string. This allows positional elements to render correctly even if they all render identically, such as "qmark" style bind parameters. .. change:: :tags: sql :tickets: 1632 The cursor associated with connection pool connections (i.e. _CursorFairy) now proxies `__iter__()` to the underlying cursor correctly. .. change:: :tags: sql :tickets: 1556 types now support an "affinity comparison" operation, i.e. that an Integer/SmallInteger are "compatible", or a Text/String, PickleType/Binary, etc. Part of. .. change:: :tags: sql :tickets: 1641 Fixed bug preventing alias() of an alias() from being cloned or adapted (occurs frequently in ORM operations). .. change:: :tags: sqlite :tickets: 1439 sqlite dialect properly generates CREATE INDEX for a table that is in an alternate schema. .. change:: :tags: postgresql :tickets: 1085 Added support for reflecting the DOUBLE PRECISION type, via a new postgres.PGDoublePrecision object. This is postgresql.DOUBLE_PRECISION in 0.6. .. change:: :tags: postgresql :tickets: 460 Added support for reflecting the INTERVAL YEAR TO MONTH and INTERVAL DAY TO SECOND syntaxes of the INTERVAL type. .. change:: :tags: postgresql :tickets: 1576 Corrected the "has_sequence" query to take current schema, or explicit sequence-stated schema, into account. .. change:: :tags: postgresql :tickets: 1611 Fixed the behavior of extract() to apply operator precedence rules to the "::" operator when applying the "timestamp" cast - ensures proper parenthesization. .. change:: :tags: mssql :tickets: 1561 Changed the name of TrustedConnection to Trusted_Connection when constructing pyodbc connect arguments .. change:: :tags: oracle :tickets: 1637 The "table_names" dialect function, used by MetaData .reflect(), omits "index overflow tables", a system table generated by Oracle when "index only tables" with overflow are used. These tables aren't accessible via SQL and can't be reflected. .. change:: :tags: ext :tickets: 1570, 1523 A column can be added to a joined-table declarative superclass after the class has been constructed (i.e. via class-level attribute assignment), and the column will be propagated down to subclasses. This is the reverse situation as that of, fixed in 0.5.6. .. change:: :tags: ext :tickets: 1491 Fixed a slight inaccuracy in the sharding example. Comparing equivalence of columns in the ORM is best accomplished using col1.shares_lineage(col2). .. change:: :tags: ext :tickets: 1606 Removed unused `load()` method from ShardedQuery. .. changelog:: :version: 0.5.6 :released: Sat Sep 12 2009 .. change:: :tags: orm :tickets: 1300 Fixed bug whereby inheritance discriminator part of a composite primary key would fail on updates. Continuation of. .. change:: :tags: orm :tickets: 1507 Fixed bug which disallowed one side of a many-to-many bidirectional reference to declare itself as "viewonly" .. change:: :tags: orm :tickets: 1526 Added an assertion that prevents a @validates function or other AttributeExtension from loading an unloaded collection such that internal state may be corrupted. .. change:: :tags: orm :tickets: 1519 Fixed bug which prevented two entities from mutually replacing each other's primary key values within a single flush() for some orderings of operations. .. change:: :tags: orm :tickets: 1485 Fixed an obscure issue whereby a joined-table subclass with a self-referential eager load on the base class would populate the related object's "subclass" table with data from the "subclass" table of the parent. .. change:: :tags: orm :tickets: 1477 relations() now have greater ability to be "overridden", meaning a subclass that explicitly specifies a relation() overriding that of the parent class will be honored during a flush. This is currently to support many-to-many relations from concrete inheritance setups. Outside of that use case, YMMV. .. change:: :tags: orm :tickets: 1483 Squeezed a few more unnecessary "lazy loads" out of relation(). When a collection is mutated, many-to-one backrefs on the other side will not fire off to load the "old" value, unless "single_parent=True" is set. A direct assignment of a many-to-one still loads the "old" value in order to update backref collections on that value, which may be present in the session already, thus maintaining the 0.5 behavioral contract. .. change:: :tags: orm :tickets: 1480 Fixed bug whereby a load/refresh of joined table inheritance attributes which were based on column_property() or similar would fail to evaluate. .. change:: :tags: orm :tickets: 1488 Improved support for MapperProperty objects overriding that of an inherited mapper for non-concrete inheritance setups - attribute extensions won't randomly collide with each other. .. change:: :tags: orm :tickets: 1487 UPDATE and DELETE do not support ORDER BY, LIMIT, OFFSET, etc. in standard SQL. Query.update() and Query.delete() now raise an exception if any of limit(), offset(), order_by(), group_by(), or distinct() have been called. .. change:: :tags: orm :tickets: Added AttributeExtension to sqlalchemy.orm.__all__ .. change:: :tags: orm :tickets: 1476 Improved error message when query() is called with a non-SQL /entity expression. .. change:: :tags: orm :tickets: 1440 Using False or 0 as a polymorphic discriminator now works on the base class as well as a subclass. .. change:: :tags: orm :tickets: 1424 Added enable_assertions(False) to Query which disables the usual assertions for expected state - used by Query subclasses to engineer custom state.. See http://www.sqlalchemy.org/trac/wiki/UsageRecipes/PreFilteredQuery for an example. .. change:: :tags: orm :tickets: 1501 Fixed recursion issue which occured if a mapped object's `__len__()` or `__nonzero__()` method resulted in state changes. .. change:: :tags: orm :tickets: 1506 Fixed incorrect exception raise in Weak/StrongIdentityMap.add() .. change:: :tags: orm :tickets: 1522 Fixed the error message for "could not find a FROM clause" in query.join() which would fail to issue correctly if the query was against a pure SQL construct. .. change:: :tags: orm :tickets: 1486 Fixed a somewhat hypothetical issue which would result in the wrong primary key being calculated for a mapper using the old polymorphic_union function - but this is old stuff. .. change:: :tags: sql :tickets: 1373 Fixed column.copy() to copy defaults and onupdates. .. change:: :tags: sql :tickets: Fixed a bug in extract() introduced in 0.5.4 whereby the string "field" argument was getting treated as a ClauseElement, causing various errors within more complex SQL transformations. .. change:: :tags: sql :tickets: 1420 Unary expressions such as DISTINCT propagate their type handling to result sets, allowing conversions like unicode and such to take place. .. change:: :tags: sql :tickets: 1482 Fixed bug in Table and Column whereby passing empty dict for "info" argument would raise an exception. .. change:: :tags: oracle :tickets: 1309 Backported 0.6 fix for Oracle alias names not getting truncated. .. change:: :tags: ext :tickets: 1446 The collection proxies produced by associationproxy are now pickleable. A user-defined proxy_factory however is still not pickleable unless it defines __getstate__ and __setstate__. .. change:: :tags: ext :tickets: 1468 Declarative will raise an informative exception if __table_args__ is passed as a tuple with no dict argument. Improved documentation. .. change:: :tags: ext :tickets: 1527 Table objects declared in the MetaData can now be used in string expressions sent to primaryjoin/secondaryjoin/ secondary - the name is pulled from the MetaData of the declarative base. .. change:: :tags: ext :tickets: 1523 A column can be added to a joined-table subclass after the class has been constructed (i.e. via class-level attribute assignment). The column is added to the underlying Table as always, but now the mapper will rebuild its "join" to include the new column, instead of raising an error about "no such column, use column_property() instead". .. change:: :tags: test :tickets: Added examples into the test suite so they get exercised regularly and cleaned up a couple deprecation warnings. .. changelog:: :version: 0.5.5 :released: Mon Jul 13 2009 .. change:: :tags: general :tickets: 970 unit tests have been migrated from unittest to nose. See README.unittests for information on how to run the tests. .. change:: :tags: orm :tickets: The "foreign_keys" argument of relation() will now propagate automatically to the backref in the same way that primaryjoin and secondaryjoin do. For the extremely rare use case where the backref of a relation() has intentionally different "foreign_keys" configured, both sides now need to be configured explicity (if they do in fact require this setting, see the next note...). .. change:: :tags: orm :tickets: ...the only known (and really, really rare) use case where a different foreign_keys setting was used on the forwards/backwards side, a composite foreign key that partially points to its own columns, has been enhanced such that the fk->itself aspect of the relation won't be used to determine relation direction. .. change:: :tags: orm :tickets: Session.mapper is now *deprecated*. Call session.add() if you'd like a free-standing object to be part of your session. Otherwise, a DIY version of Session.mapper is now documented at http://www.sqlalchemy.org/trac/wiki/UsageRecipes/SessionAwareMapper The method will remain deprecated throughout 0.6. .. change:: :tags: orm :tickets: 1431 Fixed Query being able to join() from individual columns of a joined-table subclass entity, i.e. query(SubClass.foo, SubcClass.bar).join(). In most cases, an error "Could not find a FROM clause to join from" would be raised. In a few others, the result would be returned in terms of the base class rather than the subclass - so applications which relied on this erroneous result need to be adjusted. .. change:: :tags: orm :tickets: 1461 Fixed a bug involving contains_eager(), which would apply itself to a secondary (i.e. lazy) load in a particular rare case, producing cartesian products. improved the targeting of query.options() on secondary loads overall. .. change:: :tags: orm :tickets: Fixed bug introduced in 0.5.4 whereby Composite types fail when default-holding columns are flushed. .. change:: :tags: orm :tickets: 1426 Fixed another 0.5.4 bug whereby mutable attributes (i.e. PickleType) wouldn't be deserialized correctly when the whole object was serialized. .. change:: :tags: orm :tickets: Fixed bug whereby session.is_modified() would raise an exception if any synonyms were in use. .. change:: :tags: orm :tickets: Fixed potential memory leak whereby previously pickled objects placed back in a session would not be fully garbage collected unless the Session were explicitly closed out. .. change:: :tags: orm :tickets: Fixed bug whereby list-based attributes, like pickletype and PGArray, failed to be merged() properly. .. change:: :tags: orm :tickets: Repaired non-working attributes.set_committed_value function. .. change:: :tags: orm :tickets: Trimmed the pickle format for InstanceState which should further reduce the memory footprint of pickled instances. The format should be backwards compatible with that of 0.5.4 and previous. .. change:: :tags: orm :tickets: 1463 sqlalchemy.orm.join and sqlalchemy.orm.outerjoin are now added to __all__ in sqlalchemy.orm.*. .. change:: :tags: orm :tickets: 1458 Fixed bug where Query exception raise would fail when a too-short composite primary key value were passed to get(). .. change:: :tags: sql :tickets: Removed an obscure feature of execute() (including connection, engine, Session) whereby a bindparam() construct can be sent as a key to the params dictionary. This usage is undocumented and is at the core of an issue whereby the bindparam() object created implicitly by a text() construct may have the same hash value as a string placed in the params dictionary and may result in an inappropriate match when computing the final bind parameters. Internal checks for this condition would add significant latency to the critical task of parameter rendering, so the behavior is removed. This is a backwards incompatible change for any application that may have been using this feature, however the feature has never been documented. .. change:: :tags: engine/pool :tickets: Implemented recreate() for StaticPool. .. changelog:: :version: 0.5.4p2 :released: Tue May 26 2009 .. change:: :tags: sql :tickets: Repaired the printing of SQL exceptions which are not based on parameters or are not executemany() style. .. change:: :tags: postgresql :tickets: Deprecated the hardcoded TIMESTAMP function, which when used as func.TIMESTAMP(value) would render "TIMESTAMP value". This breaks on some platforms as PostgreSQL doesn't allow bind parameters to be used in this context. The hard-coded uppercase is also inappropriate and there's lots of other PG casts that we'd need to support. So instead, use text constructs i.e. select(["timestamp '12/05/09'"]). .. changelog:: :version: 0.5.4p1 :released: Mon May 18 2009 .. change:: :tags: orm :tickets: Fixed an attribute error introduced in 0.5.4 which would occur when merge() was used with an incomplete object. .. changelog:: :version: 0.5.4 :released: Sun May 17 2009 .. change:: :tags: orm :tickets: 1398 Significant performance enhancements regarding Sessions/flush() in conjunction with large mapper graphs, large numbers of objects: - Removed all* O(N) scanning behavior from the flush() process, i.e. operations that were scanning the full session, including an extremely expensive one that was erroneously assuming primary key values were changing when this was not the case. * one edge case remains which may invoke a full scan, if an existing primary key attribute is modified to a new value. - The Session's "weak referencing" behavior is now *full* - no strong references whatsoever are made to a mapped object or related items/collections in its __dict__. Backrefs and other cycles in objects no longer affect the Session's ability to lose all references to unmodified objects. Objects with pending changes still are maintained strongly until flush. The implementation also improves performance by moving the "resurrection" process of garbage collected items to only be relevant for mappings that map "mutable" attributes (i.e. PickleType, composite attrs). This removes overhead from the gc process and simplifies internal behavior. If a "mutable" attribute change is the sole change on an object which is then dereferenced, the mapper will not have access to other attribute state when the UPDATE is issued. This may present itself differently to some MapperExtensions. The change also affects the internal attribute API, but not the AttributeExtension interface nor any of the publically documented attribute functions. - The unit of work no longer genererates a graph of "dependency" processors for the full graph of mappers during flush(), instead creating such processors only for those mappers which represent objects with pending changes. This saves a tremendous number of method calls in the context of a large interconnected graph of mappers. - Cached a wasteful "table sort" operation that previously occured multiple times per flush, also removing significant method call count from flush(). - Other redundant behaviors have been simplified in mapper._save_obj(). .. change:: :tags: orm :tickets: Modified query_cls on DynamicAttributeImpl to accept a full mixin version of the AppenderQuery, which allows subclassing the AppenderMixin. .. change:: :tags: orm :tickets: 1300 The "polymorphic discriminator" column may be part of a primary key, and it will be populated with the correct discriminator value. .. change:: :tags: orm :tickets: Fixed the evaluator not being able to evaluate IS NULL clauses. .. change:: :tags: orm :tickets: 1352 Fixed the "set collection" function on "dynamic" relations to initiate events correctly. Previously a collection could only be assigned to a pending parent instance, otherwise modified events would not be fired correctly. Set collection is now compatible with merge(), fixes. .. change:: :tags: orm :tickets: Allowed pickling of PropertyOption objects constructed with instrumented descriptors; previously, pickle errors would occur when pickling an object which was loaded with a descriptor-based option, such as query.options(eagerload(MyClass.foo)). .. change:: :tags: orm :tickets: 1357 Lazy loader will not use get() if the "lazy load" SQL clause matches the clause used by get(), but contains some parameters hardcoded. Previously the lazy strategy would fail with the get(). Ideally get() would be used with the hardcoded parameters but this would require further development. .. change:: :tags: orm :tickets: 1391 MapperOptions and other state associated with query.options() is no longer bundled within callables associated with each lazy/deferred-loading attribute during a load. The options are now associated with the instance's state object just once when it's populated. This removes the need in most cases for per-instance/attribute loader objects, improving load speed and memory overhead for individual instances. .. change:: :tags: orm :tickets: 1360 Fixed another location where autoflush was interfering with session.merge(). autoflush is disabled completely for the duration of merge() now. .. change:: :tags: orm :tickets: 1406 Fixed bug which prevented "mutable primary key" dependency logic from functioning properly on a one-to-one relation(). .. change:: :tags: orm :tickets: Fixed bug in relation(), introduced in 0.5.3, whereby a self referential relation from a base class to a joined-table subclass would not configure correctly. .. change:: :tags: orm :tickets: Fixed obscure mapper compilation issue when inheriting mappers are used which would result in un-initialized attributes. .. change:: :tags: orm :tickets: Fixed documentation for session weak_identity_map - the default value is True, indicating a weak referencing map in use. .. change:: :tags: orm :tickets: 1376 Fixed a unit of work issue whereby the foreign key attribute on an item contained within a collection owned by an object being deleted would not be set to None if the relation() was self-referential. .. change:: :tags: orm :tickets: 1378 Fixed Query.update() and Query.delete() failures with eagerloaded relations. .. change:: :tags: orm :tickets: It is now an error to specify both columns of a binary primaryjoin condition in the foreign_keys or remote_side collection. Whereas previously it was just nonsensical, but would succeed in a non-deterministic way. .. change:: :tags: ticket: 594, 1341, schema :tickets: Added a quote_schema() method to the IdentifierPreparer class so that dialects can override how schemas get handled. This enables the MSSQL dialect to treat schemas as multipart identifiers, such as 'database.owner'. .. change:: :tags: sql :tickets: Back-ported the "compiler" extension from SQLA 0.6. This is a standardized interface which allows the creation of custom ClauseElement subclasses and compilers. In particular it's handy as an alternative to text() when you'd like to build a construct that has database-specific compilations. See the extension docs for details. .. change:: :tags: sql :tickets: 1413 Exception messages are truncated when the list of bound parameters is larger than 10, preventing enormous multi-page exceptions from filling up screens and logfiles for large executemany() statements. .. change:: :tags: sql :tickets: ``sqlalchemy.extract()`` is now dialect sensitive and can extract components of timestamps idiomatically across the supported databases, including SQLite. .. change:: :tags: sql :tickets: 1353 Fixed __repr__() and other _get_colspec() methods on ForeignKey constructed from __clause_element__() style construct (i.e. declarative columns). .. change:: :tags: mysql :tickets: 1405 Reflecting a FOREIGN KEY construct will take into account a dotted schema.tablename combination, if the foreign key references a table in a remote schema. .. change:: :tags: mssql :tickets: Modified how savepoint logic works to prevent it from stepping on non-savepoint oriented routines. Savepoint support is still very experimental. .. change:: :tags: mssql :tickets: 1310 Added in reserved words for MSSQL that covers version 2008 and all prior versions. .. change:: :tags: mssql :tickets: 1343 Corrected problem with information schema not working with a binary collation based database. Cleaned up information schema since it is only used by mssql now. .. change:: :tags: sqlite :tickets: 1402 Corrected the SLBoolean type so that it properly treats only 1 as True. .. change:: :tags: sqlite :tickets: 1273 Corrected the float type so that it correctly maps to a SLFloat type when being reflected. .. change:: :tags: extensions :tickets: 1379 Fixed adding of deferred or other column properties to a declarative class. .. changelog:: :version: 0.5.3 :released: Tue Mar 24 2009 .. change:: :tags: orm :tickets: 1315 The "objects" argument to session.flush() is deprecated. State which represents the linkage between a parent and child object does not support "flushed" status on one side of the link and not the other, so supporting this operation leads to misleading results. .. change:: :tags: orm :tickets: Query now implements __clause_element__() which produces its selectable, which means a Query instance can be accepted in many SQL expressions, including col.in_(query), union(query1, query2), select([foo]).select_from(query), etc. .. change:: :tags: orm :tickets: 1337 Query.join() can now construct multiple FROM clauses, if needed. Such as, query(A, B).join(A.x).join(B.y) might say SELECT A.*, B.* FROM A JOIN X, B JOIN Y. Eager loading can also tack its joins onto those multiple FROM clauses. .. change:: :tags: orm :tickets: 1347 Fixed bug in dynamic_loader() where append/remove events after construction time were not being propagated to the UOW to pick up on flush(). .. change:: :tags: orm :tickets: Fixed bug where column_prefix wasn't being checked before not mapping an attribute that already had class-level name present. .. change:: :tags: orm :tickets: 1315 a session.expire() on a particular collection attribute will clear any pending backref additions as well, so that the next access correctly returns only what was present in the database. Presents some degree of a workaround for, although we are considering removing the flush([objects]) feature altogether. .. change:: :tags: orm :tickets: Session.scalar() now converts raw SQL strings to text() the same way Session.execute() does and accepts same alternative \**kw args. .. change:: :tags: orm :tickets: improvements to the "determine direction" logic of relation() such that the direction of tricky situations like mapper(A.join(B)) -> relation-> mapper(B) can be determined. .. change:: :tags: orm :tickets: 1306 When flushing partial sets of objects using session.flush([somelist]), pending objects which remain pending after the operation won't inadvertently be added as persistent. .. change:: :tags: orm :tickets: 1314 Added "post_configure_attribute" method to InstrumentationManager, so that the "listen_for_events.py" example works again. .. change:: :tags: orm :tickets: a forward and complementing backwards reference which are both of the same direction, i.e. ONETOMANY or MANYTOONE, is now detected, and an error message is raised. Saves crazy CircularDependencyErrors later on. .. change:: :tags: orm :tickets: Fixed bugs in Query regarding simultaneous selection of multiple joined-table inheritance entities with common base classes: - previously the adaption applied to "B" on "A JOIN B" would be erroneously partially applied to "A". - comparisons on relations (i.e. A.related==someb) were not getting adapted when they should. - Other filterings, like query(A).join(A.bs).filter(B.foo=='bar'), were erroneously adapting "B.foo" as though it were an "A". .. change:: :tags: orm :tickets: 1325 Fixed adaptation of EXISTS clauses via any(), has(), etc. in conjunction with an aliased object on the left and of_type() on the right. .. change:: :tags: orm :tickets: Added an attribute helper method ``set_committed_value`` in sqlalchemy.orm.attributes. Given an object, attribute name, and value, will set the value on the object as part of its "committed" state, i.e. state that is understood to have been loaded from the database. Helps with the creation of homegrown collection loaders and such. .. change:: :tags: orm :tickets: Query won't fail with weakref error when a non-mapper/class instrumented descriptor is passed, raises "Invalid column expession". .. change:: :tags: orm :tickets: Query.group_by() properly takes into account aliasing applied to the FROM clause, such as with select_from(), using with_polymorphic(), or using from_self(). .. change:: :tags: sql :tickets: An alias() of a select() will convert to a "scalar subquery" when used in an unambiguously scalar context, i.e. it's used in a comparison operation. This applies to the ORM when using query.subquery() as well. .. change:: :tags: sql :tickets: 1302 Fixed missing _label attribute on Function object, others when used in a select() with use_labels (such as when used in an ORM column_property()). .. change:: :tags: sql :tickets: 1309 anonymous alias names now truncate down to the max length allowed by the dialect. More significant on DBs like Oracle with very small character limits. .. change:: :tags: sql :tickets: the __selectable__() interface has been replaced entirely by __clause_element__(). .. change:: :tags: sql :tickets: 1299 The per-dialect cache used by TypeEngine to cache dialect-specific types is now a WeakKeyDictionary. This to prevent dialect objects from being referenced forever for an application that creates an arbitrarily large number of engines or dialects. There is a small performance penalty which will be resolved in 0.6. .. change:: :tags: sqlite :tickets: Fixed SQLite reflection methods so that non-present cursor.description, which triggers an auto-cursor close, will be detected so that no results doesn't fail on recent versions of pysqlite which raise an error when fetchone() called with no rows present. .. change:: :tags: postgresql :tickets: Index reflection won't fail when an index with multiple expressions is encountered. .. change:: :tags: postgresql :tickets: 1327 Added PGUuid and PGBit types to sqlalchemy.databases.postgres. .. change:: :tags: postgresql :tickets: 1327 Refection of unknown PG types won't crash when those types are specified within a domain. .. change:: :tags: mssql :tickets: Preliminary support for pymssql 1.0.1 .. change:: :tags: mssql :tickets: Corrected issue on mssql where max_identifier_length was not being respected. .. change:: :tags: extensions :tickets: Fixed a recursive pickling issue in serializer, triggered by an EXISTS or other embedded FROM construct. .. change:: :tags: extensions :tickets: Declarative locates the "inherits" class using a search through __bases__, to skip over mixins that are local to subclasses. .. change:: :tags: extensions :tickets: Declarative figures out joined-table inheritance primary join condition even if "inherits" mapper argument is given explicitly. .. change:: :tags: extensions :tickets: Declarative will properly interpret the "foreign_keys" argument on a backref() if it's a string. .. change:: :tags: extensions :tickets: Declarative will accept a table-bound column as a property when used in conjunction with __table__, if the column is already present in __table__. The column will be remapped to the given key the same way as when added to the mapper() properties dict. .. changelog:: :version: 0.5.2 :released: Sat Jan 24 2009 .. change:: :tags: orm :tickets: Further refined 0.5.1's warning about delete-orphan cascade placed on a many-to-many relation. First, the bad news: the warning will apply to both many-to-many as well as many-to-one relations. This is necessary since in both cases, SQLA does not scan the full set of potential parents when determining "orphan" status - for a persistent object it only detects an in-python de-association event to establish the object as an "orphan". Next, the good news: to support one-to-one via a foreign key or assocation table, or to support one-to-many via an association table, a new flag single_parent=True may be set which indicates objects linked to the relation are only meant to have a single parent. The relation will raise an error if multiple parent-association events occur within Python. .. change:: :tags: orm :tickets: 1292 Adjusted the attribute instrumentation change from 0.5.1 to fully establish instrumentation for subclasses where the mapper was created after the superclass had already been fully instrumented. .. change:: :tags: orm :tickets: Fixed bug in delete-orphan cascade whereby two one-to-one relations from two different parent classes to the same target class would prematurely expunge the instance. .. change:: :tags: orm :tickets: Fixed an eager loading bug whereby self-referential eager loading would prevent other eager loads, self referential or not, from joining to the parent JOIN properly. Thanks to Alex K for creating a great test case. .. change:: :tags: orm :tickets: session.expire() and related methods will not expire() unloaded deferred attributes. This prevents them from being needlessly loaded when the instance is refreshed. .. change:: :tags: orm :tickets: 1293 query.join()/outerjoin() will now properly join an aliased() construct to the existing left side, even if query.from_self() or query.select_from(someselectable) has been called. .. change:: :tags: sql :tickets: 1284 Further fixes to the "percent signs and spaces in column/table names" functionality. .. change:: :tags: mssql :tickets: 1291 Restored convert_unicode handling. Results were being passed on through without conversion. .. change:: :tags: mssql :tickets: 1282 Really fixing the decimal handling this time.. .. change:: :tags: Ticket:1289, mssql :tickets: Modified table reflection code to use only kwargs when constructing tables. .. changelog:: :version: 0.5.1 :released: Sat Jan 17 2009 .. change:: :tags: orm :tickets: Removed an internal join cache which could potentially leak memory when issuing query.join() repeatedly to ad-hoc selectables. .. change:: :tags: orm :tickets: The "clear()", "save()", "update()", "save_or_update()" Session methods have been deprecated, replaced by "expunge_all()" and "add()". "expunge_all()" has also been added to ScopedSession. .. change:: :tags: orm :tickets: Modernized the "no mapped table" exception and added a more explicit __table__/__tablename__ exception to declarative. .. change:: :tags: orm :tickets: 1237 Concrete inheriting mappers now instrument attributes which are inherited from the superclass, but are not defined for the concrete mapper itself, with an InstrumentedAttribute that issues a descriptive error when accessed. .. change:: :tags: orm :tickets: 1237, 781 Added a new `relation()` keyword `back_populates`. This allows configuation of backreferences using explicit relations. This is required when creating bidirectional relations between a hierarchy of concrete mappers and another class. .. change:: :tags: orm :tickets: 1237 Test coverage added for `relation()` objects specified on concrete mappers. .. change:: :tags: orm :tickets: 1276 Query.from_self() as well as query.subquery() both disable the rendering of eager joins inside the subquery produced. The "disable all eager joins" feature is available publically via a new query.enable_eagerloads() generative. .. change:: :tags: orm :tickets: Added a rudimental series of set operations to Query that receive Query objects as arguments, including union(), union_all(), intersect(), except_(), insertsect_all(), except_all(). See the API documentation for Query.union() for examples. .. change:: :tags: orm :tickets: Fixed bug that prevented Query.join() and eagerloads from attaching to a query that selected from a union or aliased union. .. change:: :tags: orm :tickets: 1237 A short documentation example added for bidirectional relations specified on concrete mappers. .. change:: :tags: orm :tickets: 1269 Mappers now instrument class attributes upon construction with the final InstrumentedAttribute object which remains persistent. The `_CompileOnAttr`/`__getattribute__()` methodology has been removed. The net effect is that Column-based mapped class attributes can now be used fully at the class level without invoking a mapper compilation operation, greatly simplifying typical usage patterns within declarative. .. change:: :tags: orm :tickets: ColumnProperty (and front-end helpers such as ``deferred``) no longer ignores unknown \**keyword arguments. .. change:: :tags: orm :tickets: Fixed a bug with the unitofwork's "row switch" mechanism, i.e. the conversion of INSERT/DELETE into an UPDATE, when combined with joined-table inheritance and an object which contained no defined values for the child table where an UPDATE with no SET clause would be rendered. .. change:: :tags: orm :tickets: 1281 Using delete-orphan on a many-to-many relation is deprecated. This produces misleading or erroneous results since SQLA does not retrieve the full list of "parents" for m2m. To get delete-orphan behavior with an m2m table, use an explcit association class so that the individual association row is treated as a parent. .. change:: :tags: orm :tickets: 1281 delete-orphan cascade always requires delete cascade. Specifying delete-orphan without delete now raises a deprecation warning. .. change:: :tags: sql :tickets: 1256 Improved the methodology to handling percent signs in column names from. Added more tests. MySQL and PostgreSQL dialects still do not issue correct CREATE TABLE statements for identifiers with percent signs in them. .. change:: :tags: schema :tickets: 1214 Index now accepts column-oriented InstrumentedAttributes (i.e. column-based mapped class attributes) as column arguments. .. change:: :tags: schema :tickets: Column with no name (as in declarative) won't raise a NoneType error when it's string output is requsted (such as in a stack trace). .. change:: :tags: schema :tickets: 1278 Fixed bug when overriding a Column with a ForeignKey on a reflected table, where derived columns (i.e. the "virtual" columns of a select, etc.) would inadvertently call upon schema-level cleanup logic intended only for the original column. .. change:: :tags: declarative :tickets: Can now specify Column objects on subclasses which have no table of their own (i.e. use single table inheritance). The columns will be appended to the base table, but only mapped by the subclass. .. change:: :tags: declarative :tickets: For both joined and single inheriting subclasses, the subclass will only map those columns which are already mapped on the superclass and those explicit on the subclass. Other columns that are present on the `Table` will be excluded from the mapping by default, which can be disabled by passing a blank `exclude_properties` collection to the `__mapper_args__`. This is so that single-inheriting classes which define their own columns are the only classes to map those columns. The effect is actually a more organized mapping than you'd normally get with explicit `mapper()` calls unless you set up the `exclude_properties` arguments explicitly. .. change:: :tags: declarative :tickets: It's an error to add new Column objects to a declarative class that specified an existing table using __table__. .. change:: :tags: mysql :tickets: Added the missing keywords from MySQL 4.1 so they get escaped properly. .. change:: :tags: mssql :tickets: 1280 Corrected handling of large decimal values with more robust tests. Removed string manipulation on floats. .. change:: :tags: mssql :tickets: Modified the do_begin handling in mssql to use the Cursor not the Connection so it is DBAPI compatible. .. change:: :tags: mssql :tickets: Corrected SAVEPOINT support on adodbapi by changing the handling of savepoint_release, which is unsupported on mssql. .. changelog:: :version: 0.5.0 :released: Tue Jan 06 2009 .. change:: :tags: general :tickets: Documentation has been converted to Sphinx. In particular, the generated API documentation has been constructed into a full blown "API Reference" section which organizes editorial documentation combined with generated docstrings. Cross linking between sections and API docs are vastly improved, a javascript-powered search feature is provided, and a full index of all classes, functions and members is provided. .. change:: :tags: general :tickets: setup.py now imports setuptools only optionally. If not present, distutils is used. The new "pip" installer is recommended over easy_install as it installs in a more simplified way. .. change:: :tags: general :tickets: added an extremely basic illustration of a PostGIS integration to the examples folder. .. change:: :tags: orm :tickets: Query.with_polymorphic() now accepts a third argument "discriminator" which will replace the value of mapper.polymorphic_on for that query. Mappers themselves no longer require polymorphic_on to be set, even if the mapper has a polymorphic_identity. When not set, the mapper will load non-polymorphically by default. Together, these two features allow a non-polymorphic concrete inheritance setup to use polymorphic loading on a per-query basis, since concrete setups are prone to many issues when used polymorphically in all cases. .. change:: :tags: orm :tickets: dynamic_loader accepts a query_class= to customize the Query classes used for both the dynamic collection and the queries built from it. .. change:: :tags: orm :tickets: 1079 query.order_by() accepts None which will remove any pending order_by state from the query, as well as cancel out any mapper/relation configured ordering. This is primarily useful for overriding the ordering specified on a dynamic_loader(). .. change:: :tags: sql :tickets: 935 RowProxy objects can be used in place of dictionary arguments sent to connection.execute() and friends. .. change:: :tags: dialect :tickets: Added a new description_encoding attribute on the dialect that is used for encoding the column name when processing the metadata. This usually defaults to utf-8. .. change:: :tags: mssql :tickets: Added in a new MSGenericBinary type. This maps to the Binary type so it can implement the specialized behavior of treating length specified types as fixed-width Binary types and non-length types as an unbound variable length Binary type. .. change:: :tags: mssql :tickets: 1249 Added in new types: MSVarBinary and MSImage. .. change:: :tags: mssql :tickets: Added in the MSReal, MSNText, MSSmallDateTime, MSTime, MSDateTimeOffset, and MSDateTime2 types .. change:: :tags: sqlite :tickets: 1266 Table reflection now stores the actual DefaultClause value for the column. .. change:: :tags: sqlite :tickets: bugfixes, behavioral changes .. change:: :tags: orm :tickets: Exceptions raised during compile_mappers() are now preserved to provide "sticky behavior" - if a hasattr() call on a pre-compiled mapped attribute triggers a failing compile and suppresses the exception, subsequent compilation is blocked and the exception will be reiterated on the next compile() call. This issue occurs frequently when using declarative. .. change:: :tags: orm :tickets: property.of_type() is now recognized on a single-table inheriting target, when used in the context of prop.of_type(..).any()/has(), as well as query.join(prop.of_type(...)). .. change:: :tags: orm :tickets: query.join() raises an error when the target of the join doesn't match the property-based attribute - while it's unlikely anyone is doing this, the SQLAlchemy author was guilty of this particular loosey-goosey behavior. .. change:: :tags: orm :tickets: 1272 Fixed bug when using weak_instance_map=False where modified events would not be intercepted for a flush(). .. change:: :tags: orm :tickets: 1268 Fixed some deep "column correspondence" issues which could impact a Query made against a selectable containing multiple versions of the same table, as well as unions and similar which contained the same table columns in different column positions at different levels. .. change:: :tags: orm :tickets: Custom comparator classes used in conjunction with column_property(), relation() etc. can define new comparison methods on the Comparator, which will become available via __getattr__() on the InstrumentedAttribute. In the case of synonym() or comparable_property(), attributes are resolved first on the user-defined descriptor, then on the user-defined comparator. .. change:: :tags: orm :tickets: 976 Added ScopedSession.is_active accessor. .. change:: :tags: orm :tickets: 1262 Can pass mapped attributes and column objects as keys to query.update({}). .. change:: :tags: orm :tickets: Mapped attributes passed to the values() of an expression level insert() or update() will use the keys of the mapped columns, not that of the mapped attribute. .. change:: :tags: orm :tickets: 1242 Corrected problem with Query.delete() and Query.update() not working properly with bind parameters. .. change:: :tags: orm :tickets: Query.select_from(), from_statement() ensure that the given argument is a FromClause, or Text/Select/Union, respectively. .. change:: :tags: orm :tickets: 1253 Query() can be passed a "composite" attribute as a column expression and it will be expanded. Somewhat related to. .. change:: :tags: orm :tickets: Query() is a little more robust when passed various column expressions such as strings, clauselists, text() constructs (which may mean it just raises an error more nicely). .. change:: :tags: orm :tickets: first() works as expected with Query.from_statement(). .. change:: :tags: orm :tickets: Fixed bug introduced in 0.5rc4 involving eager loading not functioning for properties which were added to a mapper post-compile using add_property() or equivalent. .. change:: :tags: orm :tickets: Fixed bug where many-to-many relation() with viewonly=True would not correctly reference the link between secondary->remote. .. change:: :tags: orm :tickets: 1232 Duplicate items in a list-based collection will be maintained when issuing INSERTs to a "secondary" table in a many-to-many relation. Assuming the m2m table has a unique or primary key constraint on it, this will raise the expected constraint violation instead of silently dropping the duplicate entries. Note that the old behavior remains for a one-to-many relation since collection entries in that case don't result in INSERT statements and SQLA doesn't manually police collections. .. change:: :tags: orm :tickets: Query.add_column() can accept FromClause objects in the same manner as session.query() can. .. change:: :tags: orm :tickets: Comparison of many-to-one relation to NULL is properly converted to IS NOT NULL based on not_(). .. change:: :tags: orm :tickets: 1087 Extra checks added to ensure explicit primaryjoin/secondaryjoin are ClauseElement instances, to prevent more confusing errors later on. .. change:: :tags: orm :tickets: 1236 Improved mapper() check for non-class classes. .. change:: :tags: orm :tickets: 5051 comparator_factory argument is now documented and supported by all MapperProperty types, including column_property(), relation(), backref(), and synonym(). .. change:: :tags: orm :tickets: Changed the name of PropertyLoader to RelationProperty, to be consistent with all the other names. PropertyLoader is still present as a synonym. .. change:: :tags: orm :tickets: 1099, 1228 fixed "double iter()" call causing bus errors in shard API, removed errant result.close() left over from the 0.4 version. .. change:: :tags: orm :tickets: made Session.merge cascades not trigger autoflush. Fixes merged instances getting prematurely inserted with missing values. .. change:: :tags: orm :tickets: Two fixes to help prevent out-of-band columns from being rendered in polymorphic_union inheritance scenarios (which then causes extra tables to be rendered in the FROM clause causing cartesian products): - improvements to "column adaption" for a->b->c inheritance situations to better locate columns that are related to one another via multiple levels of indirection, rather than rendering the non-adapted column. - the "polymorphic discriminator" column is only rendered for the actual mapper being queried against. The column won't be "pulled in" from a subclass or superclass mapper since it's not needed. .. change:: :tags: orm :tickets: 1072 Fixed shard_id argument on ShardedSession.execute(). .. change:: :tags: sql :tickets: 1256 Columns can again contain percent signs within their names. .. change:: :tags: sql :tickets: sqlalchemy.sql.expression.Function is now a public class. It can be subclassed to provide user-defined SQL functions in an imperative style, including with pre-established behaviors. The postgis.py example illustrates one usage of this. .. change:: :tags: sql :tickets: PickleType now favors == comparison by default, if the incoming object (such as a dict) implements __eq__(). If the object does not implement __eq__() and mutable=True, a deprecation warning is raised. .. change:: :tags: sql :tickets: 1215 Fixed the import weirdness in sqlalchemy.sql to not export __names__. .. change:: :tags: sql :tickets: 1238 Using the same ForeignKey object repeatedly raises an error instead of silently failing later. .. change:: :tags: sql :tickets: Added NotImplementedError for params() method on Insert/Update/Delete constructs. These items currently don't support this functionality, which also would be a little misleading compared to values(). .. change:: :tags: sql :tickets: 650 Reflected foreign keys will properly locate their referenced column, even if the column was given a "key" attribute different from the reflected name. This is achieved via a new flag on ForeignKey/ForeignKeyConstraint called "link_to_name", if True means the given name is the referred-to column's name, not its assigned key. .. change:: :tags: sql :tickets: 1253 select() can accept a ClauseList as a column in the same way as a Table or other selectable and the interior expressions will be used as column elements. .. change:: :tags: sql :tickets: the "passive" flag on session.is_modified() is correctly propagated to the attribute manager. .. change:: :tags: sql :tickets: union() and union_all() will not whack any order_by() that has been applied to the select()s inside. If you union() a select() with order_by() (presumably to support LIMIT/OFFSET), you should also call self_group() on it to apply parenthesis. .. change:: :tags: engine/pool :tickets: 1246 Connection.invalidate() checks for closed status to avoid attribute errors. .. change:: :tags: engine/pool :tickets: 1094 NullPool supports reconnect on failure behavior. .. change:: :tags: engine/pool :tickets: 799 Added a mutex for the initial pool creation when using pool.manage(dbapi). This prevents a minor case of "dogpile" behavior which would otherwise occur upon a heavy load startup. .. change:: :tags: engine/pool :tickets: _execute_clauseelement() goes back to being a private method. Subclassing Connection is not needed now that ConnectionProxy is available. .. change:: :tags: documentation :tickets: 1149, 1200 Tickets. .. change:: :tags: documentation :tickets: Added note about create_session() defaults. .. change:: :tags: documentation :tickets: Added section about metadata.reflect(). .. change:: :tags: documentation :tickets: Updated `TypeDecorator` section. .. change:: :tags: documentation :tickets: Rewrote the "threadlocal" strategy section of the docs due to recent confusion over this feature. .. change:: :tags: documentation :tickets: Removed badly out of date 'polymorphic_fetch' and 'select_table' docs from inheritance, reworked the second half of "joined table inheritance". .. change:: :tags: documentation :tickets: Documented `comparator_factory` kwarg, added new doc section "Custom Comparators". .. change:: :tags: mssql :tickets: 1254 Refactored the Date/Time types. The ``smalldatetime`` data type no longer truncates to a date only, and will now be mapped to the MSSmallDateTime type. .. change:: :tags: mssql :tickets: Corrected an issue with Numerics to accept an int. .. change:: :tags: mssql :tickets: Mapped ``char_length`` to the ``LEN()`` function. .. change:: :tags: mssql :tickets: If an ``INSERT`` includes a subselect the ``INSERT`` is converted from an ``INSERT INTO VALUES`` construct to a ``INSERT INTO SELECT`` construct. .. change:: :tags: mssql :tickets: If the column is part of a ``primary_key`` it will be ``NOT NULL`` since MSSQL doesn't allow ``NULL`` in primary_key columns. .. change:: :tags: mssql :tickets: 1249 ``MSBinary`` now returns a ``BINARY`` instead of an ``IMAGE``. This is a backwards incompatible change in that ``BINARY`` is a fixed length data type whereas ``IMAGE`` is a variable length data type. .. change:: :tags: mssql :tickets: 1258 ``get_default_schema_name`` is now reflected from the database based on the user's default schema. This only works with MSSQL 2005 and later. .. change:: :tags: mssql :tickets: 1248 Added collation support through the use of a new collation argument. This is supported on the following types: char, nchar, varchar, nvarchar, text, ntext. .. change:: :tags: mssql :tickets: Changes to the connection string parameters favor DSN as the default specification for pyodbc. See the mssql.py docstring for detailed usage instructions. .. change:: :tags: mssql :tickets: Added experimental support of savepoints. It currently does not work fully with sessions. .. change:: :tags: mssql :tickets: 1243 Support for three levels of column nullability: NULL, NOT NULL, and the database's configured default. The default Column configuration (nullable=True) will now generate NULL in the DDL. Previously no specification was emitted and the database default would take effect (usually NULL, but not always). To explicitly request the database default, configure columns with nullable=None and no specification will be emitted in DDL. This is backwards incompatible behavior. .. change:: :tags: postgres :tickets: 1267 "%" signs in text() constructs are automatically escaped to "%%". Because of the backwards incompatible nature of this change, a warning is emitted if '%%' is detected in the string. .. change:: :tags: postgres :tickets: Calling alias.execute() in conjunction with server_side_cursors won't raise AttributeError. .. change:: :tags: postgres :tickets: 714 Added Index reflection support to PostgreSQL, using a great patch we long neglected, submitted by Ken Kuhlman. .. change:: :tags: oracle :tickets: Adjusted the format of create_xid() to repair two-phase commit. We now have field reports of Oracle two-phase commit working properly with this change. .. change:: :tags: oracle :tickets: 1233 Added OracleNVarchar type, produces NVARCHAR2, and also subclasses Unicode so that convert_unicode=True by default. NVARCHAR2 reflects into this type automatically so these columns pass unicode on a reflected table with no explicit convert_unicode=True flags. .. change:: :tags: oracle :tickets: 1265 Fixed bug which was preventing out params of certain types from being received; thanks a ton to huddlej at wwu.edu ! .. change:: :tags: mysql :tickets: "%" signs in text() constructs are automatically escaped to "%%". Because of the backwards incompatible nature of this change, a warning is emitted if '%%' is detected in the string. .. change:: :tags: mysql :tickets: 1241 Fixed bug in exception raise when FK columns not present during reflection. .. change:: :tags: mysql :tickets: Fixed bug involving reflection of a remote-schema table with a foreign key ref to another table in that schema. .. change:: :tags: associationproxy :tickets: The association proxy properties are make themselves available at the class level, e.g. MyClass.aproxy. Previously this evaluated to None. .. change:: :tags: declarative :tickets: The full list of arguments accepted as string by backref() includes 'primaryjoin', 'secondaryjoin', 'secondary', 'foreign_keys', 'remote_side', 'order_by'. .. changelog:: :version: 0.5.0rc4 :released: Fri Nov 14 2008 .. change:: :tags: orm :tickets: Query.count() has been enhanced to do the "right thing" in a wider variety of cases. It can now count multiple-entity queries, as well as column-based queries. Note that this means if you say query(A, B).count() without any joining criterion, it's going to count the cartesian product of A*B. Any query which is against column-based entities will automatically issue "SELECT count(1) FROM (SELECT...)" so that the real rowcount is returned, meaning a query such as query(func.count(A.name)).count() will return a value of one, since that query would return one row. .. change:: :tags: orm :tickets: Lots of performance tuning. A rough guesstimate over various ORM operations places it 10% faster over 0.5.0rc3, 25-30% over 0.4.8. .. change:: :tags: orm :tickets: bugfixes and behavioral changes .. change:: :tags: general :tickets: global "propigate"->"propagate" change. .. change:: :tags: orm :tickets: Adjustments to the enhanced garbage collection on InstanceState to better guard against errors due to lost state. .. change:: :tags: orm :tickets: 1220 Query.get() returns a more informative error message when executed against multiple entities. .. change:: :tags: orm :tickets: 1140, 1221 Restored NotImplementedError on Cls.relation.in_() .. change:: :tags: orm :tickets: 1226 Fixed PendingDeprecationWarning involving order_by parameter on relation(). .. change:: :tags: sql :tickets: Removed the 'properties' attribute of the Connection object, Connection.info should be used. .. change:: :tags: sql :tickets: Restored "active rowcount" fetch before ResultProxy autocloses the cursor. This was removed in 0.5rc3. .. change:: :tags: sql :tickets: Rearranged the `load_dialect_impl()` method in `TypeDecorator` such that it will take effect even if the user-defined `TypeDecorator` uses another `TypeDecorator` as its impl. .. change:: :tags: access :tickets: Added support for Currency type. .. change:: :tags: access :tickets: 1017 Functions were not return their result. .. change:: :tags: access :tickets: 1017 Corrected problem with joins. Access only support LEFT OUTER or INNER not just JOIN by itself. .. change:: :tags: mssql :tickets: Lots of cleanup and fixes to correct problems with limit and offset. .. change:: :tags: mssql :tickets: Correct situation where subqueries as part of a binary expression need to be translated to use the IN and NOT IN syntax. .. change:: :tags: mssql :tickets: 1216 Fixed E Notation issue that prevented the ability to insert decimal values less than 1E-6. .. change:: :tags: mssql :tickets: 1217 Corrected problems with reflection when dealing with schemas, particularly when those schemas are the default schema. .. change:: :tags: mssql :tickets: Corrected problem with casting a zero length item to a varchar. It now correctly adjusts the CAST. .. change:: :tags: ext :tickets: Can now use a custom "inherit_condition" in __mapper_args__ when using declarative. .. change:: :tags: ext :tickets: fixed string-based "remote_side", "order_by" and others not propagating correctly when used in backref(). .. changelog:: :version: 0.5.0rc3 :released: Fri Nov 07 2008 .. change:: :tags: orm :tickets: Added two new hooks to SessionExtension: after_bulk_delete() and after_bulk_update(). after_bulk_delete() is called after a bulk delete() operation on a query. after_bulk_update() is called after a bulk update() operation on a query. .. change:: :tags: sql :tickets: SQL compiler optimizations and complexity reduction. The call count for compiling a typical select() construct is 20% less versus 0.5.0rc2. .. change:: :tags: sql :tickets: 1211 Dialects can now generate label names of adjustable length. Pass in the argument "label_length=" to create_engine() to adjust how many characters max will be present in dynamically generated column labels, i.e. "somecolumn AS somelabel". Any value less than 6 will result in a label of minimal size, consisting of an underscore and a numeric counter. The compiler uses the value of dialect.max_identifier_length as a default. .. change:: :tags: ext :tickets: Added a new extension sqlalchemy.ext.serializer. Provides Serializer/Deserializer "classes" which mirror Pickle/Unpickle, as well as dumps() and loads(). This serializer implements an "external object" pickler which keeps key context-sensitive objects, including engines, sessions, metadata, Tables/Columns, and mappers, outside of the pickle stream, and can later restore the pickle using any engine/metadata/session provider. This is used not for pickling regular object instances, which are pickleable without any special logic, but for pickling expression objects and full Query objects, such that all mapper/engine/session dependencies can be restored at unpickle time. .. change:: :tags: oracle :tickets: Wrote a docstring for Oracle dialect. Apparently that Ohloh "few source code comments" label is starting to sting :). .. change:: :tags: oracle :tickets: 536 Removed FIRST_ROWS() optimize flag when using LIMIT/OFFSET, can be reenabled with optimize_limits=True create_engine() flag. .. change:: :tags: oracle :tickets: bugfixes and behavioral changes .. change:: :tags: orm :tickets: "not equals" comparisons of simple many-to-one relation to an instance will not drop into an EXISTS clause and will compare foreign key columns instead. .. change:: :tags: orm :tickets: Removed not-really-working use cases of comparing a collection to an iterable. Use contains() to test for collection membership. .. change:: :tags: orm :tickets: 1171 Improved the behavior of aliased() objects such that they more accurately adapt the expressions generated, which helps particularly with self-referential comparisons. .. change:: :tags: orm :tickets: Fixed bug involving primaryjoin/secondaryjoin conditions constructed from class-bound attributes (as often occurs when using declarative), which later would be inappropriately aliased by Query, particularly with the various EXISTS based comparators. .. change:: :tags: orm :tickets: Fixed bug when using multiple query.join() with an aliased-bound descriptor which would lose the left alias. .. change:: :tags: orm :tickets: Improved weakref identity map memory management to no longer require mutexing, resurrects garbage collected instance on a lazy basis for an InstanceState with pending changes. .. change:: :tags: orm :tickets: InstanceState object now removes circular references to itself upon disposal to keep it outside of cyclic garbage collection. .. change:: :tags: orm :tickets: relation() won't hide unrelated ForeignKey errors inside of the "please specify primaryjoin" message when determining join condition. .. change:: :tags: orm :tickets: 1218 Fixed bug in Query involving order_by() in conjunction with multiple aliases of the same class (will add tests in) .. change:: :tags: orm :tickets: When using Query.join() with an explicit clause for the ON clause, the clause will be aliased in terms of the left side of the join, allowing scenarios like query(Source). from_self().join((Dest, Source.id==Dest.source_id)) to work properly. .. change:: :tags: orm :tickets: polymorphic_union() function respects the "key" of each Column if they differ from the column's name. .. change:: :tags: orm :tickets: 1183 Repaired support for "passive-deletes" on a many-to-one relation() with "delete" cascade. .. change:: :tags: orm :tickets: 1213 Fixed bug in composite types which prevented a primary-key composite type from being mutated. .. change:: :tags: orm :tickets: 1202 Added more granularity to internal attribute access, such that cascade and flush operations will not initialize unloaded attributes and collections, leaving them intact for a lazy-load later on. Backref events still initialize attrbutes and collections for pending instances. .. change:: :tags: sql :tickets: 1212 Simplified the check for ResultProxy "autoclose without results" to be based solely on presence of cursor.description. All the regexp-based guessing about statements returning rows has been removed. .. change:: :tags: sql :tickets: 1194 Direct execution of a union() construct will properly set up result-row processing. .. change:: :tags: sql :tickets: The internal notion of an "OID" or "ROWID" column has been removed. It's basically not used by any dialect, and the possibility of its usage with psycopg2's cursor.lastrowid is basically gone now that INSERT..RETURNING is available. .. change:: :tags: sql :tickets: Removed "default_order_by()" method on all FromClause objects. .. change:: :tags: sql :tickets: Repaired the table.tometadata() method so that a passed-in schema argument is propagated to ForeignKey constructs. .. change:: :tags: sql :tickets: Slightly changed behavior of IN operator for comparing to empty collections. Now results in inequality comparison against self. More portable, but breaks with stored procedures that aren't pure functions. .. change:: :tags: oracle :tickets: Setting the auto_convert_lobs to False on create_engine() will also instruct the OracleBinary type to return the cx_oracle LOB object unchanged. .. change:: :tags: mysql :tickets: Fixed foreign key reflection in the edge case where a Table's explicit schema= is the same as the schema (database) the connection is attached to. .. change:: :tags: mysql :tickets: No longer expects include_columns in table reflection to be lower case. .. change:: :tags: ext :tickets: 1174 Fixed bug preventing declarative-bound "column" objects from being used in column_mapped_collection(). .. change:: :tags: misc :tickets: 1077 util.flatten_iterator() func doesn't interpret strings with __iter__() methods as iterators, such as in pypy. .. changelog:: :version: 0.5.0rc2 :released: Sun Oct 12 2008 .. change:: :tags: orm :tickets: Fixed bug involving read/write relation()s that contain literal or other non-column expressions within their primaryjoin condition equated to a foreign key column. .. change:: :tags: orm :tickets: "non-batch" mode in mapper(), a feature which allows mapper extension methods to be called as each instance is updated/inserted, now honors the insert order of the objects given. .. change:: :tags: orm :tickets: Fixed RLock-related bug in mapper which could deadlock upon reentrant mapper compile() calls, something that occurs when using declarative constructs inside of ForeignKey objects. .. change:: :tags: orm :tickets: ScopedSession.query_property now accepts a query_cls factory, overriding the session's configured query_cls. .. change:: :tags: orm :tickets: Fixed shared state bug interfering with ScopedSession.mapper's ability to apply default __init__ implementations on object subclasses. .. change:: :tags: orm :tickets: 1177 Fixed up slices on Query (i.e. query[x:y]) to work properly for zero length slices, slices with None on either end. .. change:: :tags: orm :tickets: Added an example illustrating Celko's "nested sets" as a SQLA mapping. .. change:: :tags: orm :tickets: contains_eager() with an alias argument works even when the alias is embedded in a SELECT, as when sent to the Query via query.select_from(). .. change:: :tags: orm :tickets: 1180 contains_eager() usage is now compatible with a Query that also contains a regular eager load and limit/offset, in that the columns are added to the Query-generated subquery. .. change:: :tags: orm :tickets: session.execute() will execute a Sequence object passed to it (regression from 0.4). .. change:: :tags: orm :tickets: Removed the "raiseerror" keyword argument from object_mapper() and class_mapper(). These functions raise in all cases if the given class/instance is not mapped. .. change:: :tags: orm :tickets: Fixed session.transaction.commit() on a autocommit=False session not starting a new transaction. .. change:: :tags: orm :tickets: Some adjustments to Session.identity_map's weak referencing behavior to reduce asynchronous GC side effects. .. change:: :tags: orm :tickets: 1182 Adjustment to Session's post-flush accounting of newly "clean" objects to better protect against operating on objects as they're asynchronously gc'ed. .. change:: :tags: sql :tickets: 1074 column.in_(someselect) can now be used as a columns-clause expression without the subquery bleeding into the FROM clause .. change:: :tags: sqlite :tickets: 968 Overhauled SQLite date/time bind/result processing to use regular expressions and format strings, rather than strptime/strftime, to generically support pre-1900 dates, dates with microseconds. .. change:: :tags: sqlite :tickets: String's (and Unicode's, UnicodeText's, etc.) convert_unicode logic disabled in the sqlite dialect, to adjust for pysqlite 2.5.0's new requirement that only Python unicode objects are accepted; http://itsystementwicklung.de/pipermail/list-pysqlite/2008-March/000018.html .. change:: :tags: mysql :tickets: Temporary tables are now reflectable. .. change:: :tags: oracle :tickets: 1187 Oracle will detect string-based statements which contain comments at the front before a SELECT as SELECT statements. .. changelog:: :version: 0.5.0rc1 :released: Thu Sep 11 2008 .. change:: :tags: orm :tickets: Query now has delete() and update(values) methods. This allows to perform bulk deletes/updates with the Query object. .. change:: :tags: orm :tickets: The RowTuple object returned by Query(\*cols) now features keynames which prefer mapped attribute names over column keys, column keys over column names, i.e. Query(Class.foo, Class.bar) will have names "foo" and "bar" even if those are not the names of the underlying Column objects. Direct Column objects such as Query(table.c.col) will return the "key" attribute of the Column. .. change:: :tags: orm :tickets: Added scalar() and value() methods to Query, each return a single scalar value. scalar() takes no arguments and is roughly equivalent to first()[0], value() takes a single column expression and is roughly equivalent to values(expr).next()[0]. .. change:: :tags: orm :tickets: Improved the determination of the FROM clause when placing SQL expressions in the query() list of entities. In particular scalar subqueries should not "leak" their inner FROM objects out into the enclosing query. .. change:: :tags: orm :tickets: Joins along a relation() from a mapped class to a mapped subclass, where the mapped subclass is configured with single table inheritance, will include an IN clause which limits the subtypes of the joined class to those requested, within the ON clause of the join. This takes effect for eager load joins as well as query.join(). Note that in some scenarios the IN clause will appear in the WHERE clause of the query as well since this discrimination has multiple trigger points. .. change:: :tags: orm :tickets: AttributeExtension has been refined such that the event is fired before the mutation actually occurs. Additionally, the append() and set() methods must now return the given value, which is used as the value to be used in the mutation operation. This allows creation of validating AttributeListeners which raise before the action actually occurs, and which can change the given value into something else before its used. .. change:: :tags: orm :tickets: column_property(), composite_property(), and relation() now accept a single or list of AttributeExtensions using the "extension" keyword argument. .. change:: :tags: orm :tickets: query.order_by().get() silently drops the "ORDER BY" from the query issued by GET but does not raise an exception. .. change:: :tags: orm :tickets: Added a Validator AttributeExtension, as well as a @validates decorator which is used in a similar fashion as @reconstructor, and marks a method as validating one or more mapped attributes. .. change:: :tags: orm :tickets: 1140 class.someprop.in_() raises NotImplementedError pending the implementation of "in\_" for relation .. change:: :tags: orm :tickets: 1127 Fixed primary key update for many-to-many collections where the collection had not been loaded yet .. change:: :tags: orm :tickets: Fixed bug whereby deferred() columns with a group in conjunction with an otherwise unrelated synonym() would produce an AttributeError during deferred load. .. change:: :tags: orm :tickets: 1128 The before_flush() hook on SessionExtension takes place before the list of new/dirty/deleted is calculated for the final time, allowing routines within before_flush() to further change the state of the Session before the flush proceeds. .. change:: :tags: orm :tickets: The "extension" argument to Session and others can now optionally be a list, supporting events sent to multiple SessionExtension instances. Session places SessionExtensions in Session.extensions. .. change:: :tags: orm :tickets: Reentrant calls to flush() raise an error. This also serves as a rudimentary, but not foolproof, check against concurrent calls to Session.flush(). .. change:: :tags: orm :tickets: Improved the behavior of query.join() when joining to joined-table inheritance subclasses, using explicit join criteria (i.e. not on a relation). .. change:: :tags: orm :tickets: @orm.attributes.reconstitute and MapperExtension.reconstitute have been renamed to @orm.reconstructor and MapperExtension.reconstruct_instance .. change:: :tags: orm :tickets: 1129 Fixed @reconstructor hook for subclasses which inherit from a base class. .. change:: :tags: orm :tickets: 1132 The composite() property type now supports a __set_composite_values__() method on the composite class which is required if the class represents state using attribute names other than the column's keynames; default-generated values now get populated properly upon flush. Also, composites with attributes set to None compare correctly. .. change:: :tags: orm :tickets: The 3-tuple of iterables returned by attributes.get_history() may now be a mix of lists and tuples. (Previously members were always lists.) .. change:: :tags: orm :tickets: 1151 Fixed bug whereby changing a primary key attribute on an entity where the attribute's previous value had been expired would produce an error upon flush(). .. change:: :tags: orm :tickets: Fixed custom instrumentation bug whereby get_instance_dict() was not called for newly constructed instances not loaded by the ORM. .. change:: :tags: orm :tickets: 1150 Session.delete() adds the given object to the session if not already present. This was a regression bug from 0.4. .. change:: :tags: orm :tickets: The `echo_uow` flag on `Session` is deprecated, and unit-of-work logging is now application-level only, not per-session level. .. change:: :tags: orm :tickets: 1153 Removed conflicting `contains()` operator from `InstrumentedAttribute` which didn't accept `escape` kwaarg. .. change:: :tags: declarative :tickets: 1161 Fixed bug whereby mapper couldn't initialize if a composite primary key referenced another table that was not defined yet. .. change:: :tags: declarative :tickets: Fixed exception throw which would occur when string-based primaryjoin condition was used in conjunction with backref. .. change:: :tags: schema :tickets: 1033 Added "sorted_tables" accessor to MetaData, which returns Table objects sorted in order of dependency as a list. This deprecates the MetaData.table_iterator() method. The "reverse=False" keyword argument has also been removed from util.sort_tables(); use the Python 'reversed' function to reverse the results. .. change:: :tags: schema :tickets: The 'length' argument to all Numeric types has been renamed to 'scale'. 'length' is deprecated and is still accepted with a warning. .. change:: :tags: schema :tickets: Dropped 0.3-compatibility for user defined types (convert_result_value, convert_bind_param). .. change:: :tags: sql :tickets: 1068 Temporarily rolled back the "ORDER BY" enhancement from. This feature is on hold pending further development. .. change:: :tags: sql :tickets: The exists() construct won't "export" its contained list of elements as FROM clauses, allowing them to be used more effectively in the columns clause of a SELECT. .. change:: :tags: sql :tickets: 798 and_() and or_() now generate a ColumnElement, allowing boolean expressions as result columns, i.e. select([and_(1, 0)]). .. change:: :tags: sql :tickets: Bind params now subclass ColumnElement which allows them to be selectable by orm.query (they already had most ColumnElement semantics). .. change:: :tags: sql :tickets: Added select_from() method to exists() construct, which becomes more and more compatible with a regular select(). .. change:: :tags: sql :tickets: 1160 Added func.min(), func.max(), func.sum() as "generic functions", which basically allows for their return type to be determined automatically. Helps with dates on SQLite, decimal types, others. .. change:: :tags: sql :tickets: added decimal.Decimal as an "auto-detect" type; bind parameters and generic functions will set their type to Numeric when a Decimal is used. .. change:: :tags: mysql :tickets: The 'length' argument to MSInteger, MSBigInteger, MSTinyInteger, MSSmallInteger and MSYear has been renamed to 'display_width'. .. change:: :tags: mysql :tickets: 1146 Added MSMediumInteger type. .. change:: :tags: mysql :tickets: the function func.utc_timestamp() compiles to UTC_TIMESTAMP, without the parenthesis, which seem to get in the way when using in conjunction with executemany(). .. change:: :tags: oracle :tickets: 536 limit/offset no longer uses ROW NUMBER OVER to limit rows, and instead uses subqueries in conjunction with a special Oracle optimization comment. Allows LIMIT/OFFSET to work in conjunction with DISTINCT. .. change:: :tags: oracle :tickets: 1155 has_sequence() now takes the current "schema" argument into account .. change:: :tags: oracle :tickets: 1121 added BFILE to reflected type names .. changelog:: :version: 0.5.0beta3 :released: Mon Aug 04 2008 .. change:: :tags: orm :tickets: The "entity_name" feature of SQLAlchemy mappers has been removed. For rationale, see http://tinyurl.com/6nm2ne .. change:: :tags: orm :tickets: the "autoexpire" flag on Session, sessionmaker(), and scoped_session() has been renamed to "expire_on_commit". It does not affect the expiration behavior of rollback(). .. change:: :tags: orm :tickets: fixed endless loop bug which could occur within a mapper's deferred load of inherited attributes. .. change:: :tags: orm :tickets: a legacy-support flag "_enable_transaction_accounting" flag added to Session which when False, disables all transaction-level object accounting, including expire on rollback, expire on commit, new/deleted list maintenance, and autoflush on begin. .. change:: :tags: orm :tickets: The 'cascade' parameter to relation() accepts None as a value, which is equivalent to no cascades. .. change:: :tags: orm :tickets: A critical fix to dynamic relations allows the "modified" history to be properly cleared after a flush(). .. change:: :tags: orm :tickets: user-defined @properties on a class are detected and left in place during mapper initialization. This means that a table-bound column of the same name will not be mapped at all if a @property is in the way (and the column is not remapped to a different name), nor will an instrumented attribute from an inherited class be applied. The same rules apply for names excluded using the include_properties/exclude_properties collections. .. change:: :tags: orm :tickets: Added a new SessionExtension hook called after_attach(). This is called at the point of attachment for objects via add(), add_all(), delete(), and merge(). .. change:: :tags: orm :tickets: 1111 A mapper which inherits from another, when inheriting the columns of its inherited mapper, will use any reassigned property names specified in that inheriting mapper. Previously, if "Base" had reassigned "base_id" to the name "id", "SubBase(Base)" would still get an attribute called "base_id". This could be worked around by explicitly stating the column in each submapper as well but this is fairly unworkable and also impossible when using declarative. .. change:: :tags: orm :tickets: Fixed a series of potential race conditions in Session whereby asynchronous GC could remove unmodified, no longer referenced items from the session as they were present in a list of items to be processed, typically during session.expunge_all() and dependent methods. .. change:: :tags: orm :tickets: Some improvements to the _CompileOnAttr mechanism which should reduce the probability of "Attribute x was not replaced during compile" warnings. (this generally applies to SQLA hackers, like Elixir devs). .. change:: :tags: orm :tickets: Fixed bug whereby the "unsaved, pending instance" FlushError raised for a pending orphan would not take superclass mappers into account when generating the list of relations responsible for the error. .. change:: :tags: sql :tickets: func.count() with no arguments renders as COUNT(*), equivalent to func.count(text('*')). .. change:: :tags: sql :tickets: 1068 simple label names in ORDER BY expressions render as themselves, and not as a re-statement of their corresponding expression. This feature is currently enabled only for SQLite, MySQL, and PostgreSQL. It can be enabled on other dialects as each is shown to support this behavior. .. change:: :tags: ext :tickets: Class-bound attributes sent as arguments to relation()'s remote_side and foreign_keys parameters are now accepted, allowing them to be used with declarative. Additionally fixed bugs involving order_by being specified as a class-bound attribute in conjunction with eager loading. .. change:: :tags: ext :tickets: declarative initialization of Columns adjusted so that non-renamed columns initialize in the same way as a non declarative mapper. This allows an inheriting mapper to set up its same-named "id" columns in particular such that the parent "id" column is favored over the child column, reducing database round trips when this value is requested. .. change:: :tags: mysql :tickets: 1110 Quoting of MSEnum values for use in CREATE TABLE is now optional & will be quoted on demand as required. (Quoting was always optional for use with existing tables.) .. changelog:: :version: 0.5.0beta2 :released: Mon Jul 14 2008 .. change:: :tags: orm :tickets: 870 In addition to expired attributes, deferred attributes also load if their data is present in the result set. .. change:: :tags: orm :tickets: session.refresh() raises an informative error message if the list of attributes does not include any column-based attributes. .. change:: :tags: orm :tickets: query() raises an informative error message if no columns or mappers are specified. .. change:: :tags: orm :tickets: lazy loaders now trigger autoflush before proceeding. This allows expire() of a collection or scalar relation to function properly in the context of autoflush. .. change:: :tags: orm :tickets: 887 column_property() attributes which represent SQL expressions or columns that are not present in the mapped tables (such as those from views) are automatically expired after an INSERT or UPDATE, assuming they have not been locally modified, so that they are refreshed with the most recent data upon access. .. change:: :tags: orm :tickets: 1082 Fixed explicit, self-referential joins between two joined-table inheritance mappers when using query.join(cls, aliased=True). .. change:: :tags: orm :tickets: Fixed query.join() when used in conjunction with a columns-only clause and an SQL-expression ON clause in the join. .. change:: :tags: orm :tickets: The "allow_column_override" flag from mapper() has been removed. This flag is virtually always misunderstood. Its specific functionality is available via the include_properties/exclude_properties mapper arguments. .. change:: :tags: orm :tickets: 1066 Repaired `__str__()` method on Query. .. change:: :tags: orm :tickets: Session.bind gets used as a default even when table/mapper specific binds are defined. .. change:: :tags: schema :tickets: 1075 Added prefixes option to `Table` that accepts a list of strings to insert after CREATE in the CREATE TABLE statement. .. change:: :tags: schema :tickets: Unicode, UnicodeText types now set "assert_unicode" and "convert_unicode" by default, but accept overriding \**kwargs for these values. .. change:: :tags: sql :tickets: Added new match() operator that performs a full-text search. Supported on PostgreSQL, SQLite, MySQL, MS-SQL, and Oracle backends. .. change:: :tags: sqlite :tickets: 1090 Modified SQLite's representation of "microseconds" to match the output of str(somedatetime), i.e. in that the microseconds are represented as fractional seconds in string format. This makes SQLA's SQLite date type compatible with datetimes that were saved directly using Pysqlite (which just calls str()). Note that this is incompatible with the existing microseconds values in a SQLA 0.4 generated SQLite database file. To get the old behavior globally: from sqlalchemy.databases.sqlite import DateTimeMixin DateTimeMixin.__legacy_microseconds__ = True To get the behavior on individual DateTime types: t = sqlite.SLDateTime() t.__legacy_microseconds__ = True Then use "t" as the type on the Column. .. change:: :tags: sqlite :tickets: SQLite Date, DateTime, and Time types only accept Python datetime objects now, not strings. If you'd like to format dates as strings yourself with SQLite, use a String type. If you'd like them to return datetime objects anyway despite their accepting strings as input, make a TypeDecorator around String - SQLA doesn't encourage this pattern. .. change:: :tags: extensions :tickets: 1096 Declarative supports a __table_args__ class variable, which is either a dictionary, or tuple of the form (arg1, arg2, ..., {kwarg1:value, ...}) which contains positional + kw arguments to be passed to the Table constructor. .. changelog:: :version: 0.5.0beta1 :released: Thu Jun 12 2008 .. change:: :tags: :tickets: The "__init__" trigger/decorator added by mapper now attempts to exactly mirror the argument signature of the original __init__. The pass-through for '_sa_session' is no longer implicit- you must allow for this keyword argument in your constructor. .. change:: :tags: :tickets: ClassState is renamed to ClassManager. .. change:: :tags: :tickets: Classes may supply their own InstrumentationManager by providing a __sa_instrumentation_manager__ property. .. change:: :tags: :tickets: Custom instrumentation may use any mechanism to associate a ClassManager with a class and an InstanceState with an instance. Attributes on those objects are still the default association mechanism used by SQLAlchemy's native instrumentation. .. change:: :tags: :tickets: Moved entity_name, _sa_session_id, and _instance_key from the instance object to the instance state. These values are still available in the old way, which is now deprecated, using descriptors attached to the class. A deprecation warning will be issued when accessed. .. change:: :tags: :tickets: The _prepare_instrumentation alias for prepare_instrumentation has been removed. .. change:: :tags: :tickets: sqlalchemy.exceptions has been renamed to sqlalchemy.exc. The module may be imported under either name. .. change:: :tags: :tickets: ORM-related exceptions are now defined in sqlalchemy.orm.exc. ConcurrentModificationError, FlushError, and UnmappedColumnError compatibility aliases are installed in sqlalchemy.exc during the import of sqlalchemy.orm. .. change:: :tags: :tickets: sqlalchemy.logging has been renamed to sqlalchemy.log. .. change:: :tags: :tickets: The transitional sqlalchemy.log.SADeprecationWarning alias for the warning's definition in sqlalchemy.exc has been removed. .. change:: :tags: :tickets: exc.AssertionError has been removed and usage replaced with Python's built-in AssertionError. .. change:: :tags: :tickets: The behavior of MapperExtensions attached to multiple, entity_name= primary mappers for a single class has been altered. The first mapper() defined for a class is the only mapper eligible for the MapperExtension 'instrument_class', 'init_instance' and 'init_failed' events. This is backwards incompatible; previously the extensions of last mapper defined would receive these events. .. change:: :tags: firebird :tickets: Added support for returning values from inserts (2.0+ only), updates and deletes (2.1+ only). .. change:: :tags: general :tickets: global "propigate"->"propagate" change. .. change:: :tags: orm :tickets: polymorphic_union() function respects the "key" of each Column if they differ from the column's name. .. change:: :tags: orm :tickets: 1199 Fixed 0.4-only bug preventing composite columns from working properly with inheriting mappers .. change:: :tags: orm :tickets: Fixed RLock-related bug in mapper which could deadlock upon reentrant mapper compile() calls, something that occurs when using declarative constructs inside of ForeignKey objects. Ported from 0.5. .. change:: :tags: orm :tickets: 1213 Fixed bug in composite types which prevented a primary-key composite type from being mutated. .. change:: :tags: orm :tickets: 976 Added ScopedSession.is_active accessor. .. change:: :tags: orm :tickets: 939 Class-bound accessor can be used as the argument to relation() order_by. .. change:: :tags: orm :tickets: 1072 Fixed shard_id argument on ShardedSession.execute(). .. change:: :tags: sql :tickets: 1246 Connection.invalidate() checks for closed status to avoid attribute errors. .. change:: :tags: sql :tickets: 1094 NullPool supports reconnect on failure behavior. .. change:: :tags: sql :tickets: 1299 The per-dialect cache used by TypeEngine to cache dialect-specific types is now a WeakKeyDictionary. This to prevent dialect objects from being referenced forever for an application that creates an arbitrarily large number of engines or dialects. There is a small performance penalty which will be resolved in 0.6. .. change:: :tags: sql :tickets: Fixed SQLite reflection methods so that non-present cursor.description, which triggers an auto-cursor close, will be detected so that no results doesn't fail on recent versions of pysqlite which raise an error when fetchone() called with no rows present. .. change:: :tags: postgres :tickets: 714 Added Index reflection support to Postgres, using a great patch we long neglected, submitted by Ken Kuhlman. .. change:: :tags: mysql :tickets: 1241 Fixed bug in exception raise when FK columns not present during reflection. .. change:: :tags: oracle :tickets: 1265 Fixed bug which was preventing out params of certain types from being received; thanks a ton to huddlej at wwu.edu ! SQLAlchemy-0.8.4/doc/build/changelog/changelog_06.rst0000644000076500000240000051414512251147171023025 0ustar classicstaff00000000000000 ============== 0.6 Changelog ============== .. changelog:: :version: 0.6.9 :released: Sat May 05 2012 .. change:: :tags: general :tickets: 2279 Adjusted the "importlater" mechanism, which is used internally to resolve import cycles, such that the usage of __import__ is completed when the import of sqlalchemy or sqlalchemy.orm is done, thereby avoiding any usage of __import__ after the application starts new threads, fixes. .. change:: :tags: orm :tickets: 2197 Fixed bug whereby the source clause used by query.join() would be inconsistent if against a column expression that combined multiple entities together. .. change:: :tags: orm, bug :tickets: 2310 fixed inappropriate evaluation of user-mapped object in a boolean context within query.get(). .. change:: :tags: orm :tickets: 2228 Fixed bug apparent only in Python 3 whereby sorting of persistent + pending objects during flush would produce an illegal comparison, if the persistent object primary key is not a single integer. .. change:: :tags: orm :tickets: 2234 Fixed bug where query.join() + aliased=True from a joined-inh structure to itself on relationship() with join condition on the child table would convert the lead entity into the joined one inappropriately. .. change:: :tags: orm :tickets: 2287 Fixed bug whereby mapper.order_by attribute would be ignored in the "inner" query within a subquery eager load. . .. change:: :tags: orm :tickets: 2215 Fixed bug whereby if a mapped class redefined __hash__() or __eq__() to something non-standard, which is a supported use case as SQLA should never consult these, the methods would be consulted if the class was part of a "composite" (i.e. non-single-entity) result set. .. change:: :tags: orm :tickets: 2188 Fixed subtle bug that caused SQL to blow up if: column_property() against subquery + joinedload + LIMIT + order by the column property() occurred. . .. change:: :tags: orm :tickets: 2207 The join condition produced by with_parent as well as when using a "dynamic" relationship against a parent will generate unique bindparams, rather than incorrectly repeating the same bindparam. . .. change:: :tags: orm :tickets: 2199 Repaired the "no statement condition" assertion in Query which would attempt to raise if a generative method were called after from_statement() were called.. .. change:: :tags: orm :tickets: 1776 Cls.column.collate("some collation") now works. .. change:: :tags: orm, bug :tickets: 2297 Fixed the error formatting raised when a tuple is inadvertently passed to session.query(). .. change:: :tags: engine :tickets: 2317 Backported the fix for introduced in 0.7.4, which ensures that the connection is in a valid state before attempting to call rollback()/prepare()/release() on savepoint and two-phase transactions. .. change:: :tags: sql :tickets: 2188 Fixed two subtle bugs involving column correspondence in a selectable, one with the same labeled subquery repeated, the other when the label has been "grouped" and loses itself. Affects. .. change:: :tags: sql :tickets: Fixed bug whereby "warn on unicode" flag would get set for the String type when used with certain dialects. This bug is not in 0.7. .. change:: :tags: sql :tickets: 2270 Fixed bug whereby with_only_columns() method of Select would fail if a selectable were passed.. However, the FROM behavior is still incorrect here, so you need 0.7 in any case for this use case to be usable. .. change:: :tags: schema :tickets: Added an informative error message when ForeignKeyConstraint refers to a column name in the parent that is not found. .. change:: :tags: postgresql :tickets: 2291, 2141 Fixed bug related to whereby the same modified index behavior in PG 9 affected primary key reflection on a renamed column.. .. change:: :tags: mysql :tickets: 2186 Fixed OurSQL dialect to use ansi-neutral quote symbol "'" for XA commands instead of '"'. . .. change:: :tags: mysql :tickets: 2225 a CREATE TABLE will put the COLLATE option after CHARSET, which appears to be part of MySQL's arbitrary rules regarding if it will actually work or not. .. change:: :tags: mssql, bug :tickets: 2269 Decode incoming values when retrieving list of index names and the names of columns within those indexes. .. change:: :tags: oracle :tickets: 2200 Added ORA-00028 to disconnect codes, use cx_oracle _Error.code to get at the code,. .. change:: :tags: oracle :tickets: 2220 repaired the oracle.RAW type which did not generate the correct DDL. .. change:: :tags: oracle :tickets: 2212 added CURRENT to reserved word list. .. change:: :tags: examples :tickets: 2266 Adjusted dictlike-polymorphic.py example to apply the CAST such that it works on PG, other databases. .. changelog:: :version: 0.6.8 :released: Sun Jun 05 2011 .. change:: :tags: orm :tickets: 2144 Calling query.get() against a column-based entity is invalid, this condition now raises a deprecation warning. .. change:: :tags: orm :tickets: 2151 a non_primary mapper will inherit the _identity_class of the primary mapper. This so that a non_primary established against a class that's normally in an inheritance mapping will produce results that are identity-map compatible with that of the primary mapper .. change:: :tags: orm :tickets: 2148 Backported 0.7's identity map implementation, which does not use a mutex around removal. This as some users were still getting deadlocks despite the adjustments in 0.6.7; the 0.7 approach that doesn't use a mutex does not appear to produce "dictionary changed size" issues, the original rationale for the mutex. .. change:: :tags: orm :tickets: 2163 Fixed the error message emitted for "can't execute syncrule for destination column 'q'; mapper 'X' does not map this column" to reference the correct mapper. . .. change:: :tags: orm :tickets: 2149 Fixed bug where determination of "self referential" relationship would fail with no workaround for joined-inh subclass related to itself, or joined-inh subclass related to a subclass of that with no cols in the sub-sub class in the join condition. .. change:: :tags: orm :tickets: 2153 mapper() will ignore non-configured foreign keys to unrelated tables when determining inherit condition between parent and child class. This is equivalent to behavior already applied to declarative. Note that 0.7 has a more comprehensive solution to this, altering how join() itself determines an FK error. .. change:: :tags: orm :tickets: 2171 Fixed bug whereby mapper mapped to an anonymous alias would fail if logging were used, due to unescaped % sign in the alias name. .. change:: :tags: orm :tickets: 2170 Modify the text of the message which occurs when the "identity" key isn't detected on flush, to include the common cause that the Column isn't set up to detect auto-increment correctly;. .. change:: :tags: orm :tickets: 2182 Fixed bug where transaction-level "deleted" collection wouldn't be cleared of expunged states, raising an error if they later became transient. .. change:: :tags: sql :tickets: 2147 Fixed bug whereby if FetchedValue was passed to column server_onupdate, it would not have its parent "column" assigned, added test coverage for all column default assignment patterns. .. change:: :tags: sql :tickets: 2167 Fixed bug whereby nesting a label of a select() with another label in it would produce incorrect exported columns. Among other things this would break an ORM column_property() mapping against another column_property(). . .. change:: :tags: engine :tickets: 2178 Adjusted the __contains__() method of a RowProxy result row such that no exception throw is generated internally; NoSuchColumnError() also will generate its message regardless of whether or not the column construct can be coerced to a string.. .. change:: :tags: postgresql :tickets: 2141 Fixed bug affecting PG 9 whereby index reflection would fail if against a column whose name had changed. . .. change:: :tags: postgresql :tickets: 2175 Some unit test fixes regarding numeric arrays, MATCH operator. A potential floating-point inaccuracy issue was fixed, and certain tests of the MATCH operator only execute within an EN-oriented locale for now. . .. change:: :tags: mssql :tickets: 2169 Fixed bug in MSSQL dialect whereby the aliasing applied to a schema-qualified table would leak into enclosing select statements. .. change:: :tags: mssql :tickets: 2159 Fixed bug whereby DATETIME2 type would fail on the "adapt" step when used in result sets or bound parameters. This issue is not in 0.7. .. changelog:: :version: 0.6.7 :released: Wed Apr 13 2011 .. change:: :tags: orm :tickets: 2087 Tightened the iterate vs. remove mutex around the identity map iteration, attempting to reduce the chance of an (extremely rare) reentrant gc operation causing a deadlock. Might remove the mutex in 0.7. .. change:: :tags: orm :tickets: 2030 Added a `name` argument to `Query.subquery()`, to allow a fixed name to be assigned to the alias object. .. change:: :tags: orm :tickets: 2019 A warning is emitted when a joined-table inheriting mapper has no primary keys on the locally mapped table (but has pks on the superclass table). .. change:: :tags: orm :tickets: 2038 Fixed bug where "middle" class in a polymorphic hierarchy would have no 'polymorphic_on' column if it didn't also specify a 'polymorphic_identity', leading to strange errors upon refresh, wrong class loaded when querying from that target. Also emits the correct WHERE criterion when using single table inheritance. .. change:: :tags: orm :tickets: 1995 Fixed bug where a column with a SQL or server side default that was excluded from a mapping with include_properties or exclude_properties would result in UnmappedColumnError. .. change:: :tags: orm :tickets: 2046 A warning is emitted in the unusual case that an append or similar event on a collection occurs after the parent object has been dereferenced, which prevents the parent from being marked as "dirty" in the session. This will be an exception in 0.7. .. change:: :tags: orm :tickets: 2098 Fixed bug in query.options() whereby a path applied to a lazyload using string keys could overlap a same named attribute on the wrong entity. Note 0.7 has an updated version of this fix. .. change:: :tags: orm :tickets: 2063 Reworded the exception raised when a flush is attempted of a subclass that is not polymorphic against the supertype. .. change:: :tags: orm :tickets: 2123 Some fixes to the state handling regarding backrefs, typically when autoflush=False, where the back-referenced collection wouldn't properly handle add/removes with no net change. Thanks to Richard Murri for the test case + patch. .. change:: :tags: orm :tickets: 2130 a "having" clause would be copied from the inside to the outside query if from_self() were used.. .. change:: :tags: sql :tickets: 2028 Column.copy(), as used in table.tometadata(), copies the 'doc' attribute. .. change:: :tags: sql :tickets: 2023 Added some defs to the resultproxy.c extension so that the extension compiles and runs on Python 2.4. .. change:: :tags: sql :tickets: 2042 The compiler extension now supports overriding the default compilation of expression._BindParamClause including that the auto-generated binds within the VALUES/SET clause of an insert()/update() statement will also use the new compilation rules. .. change:: :tags: sql :tickets: 2089 Added accessors to ResultProxy "returns_rows", "is_insert" .. change:: :tags: sql :tickets: 2116 The limit/offset keywords to select() as well as the value passed to select.limit()/offset() will be coerced to integer. .. change:: :tags: engine :tickets: 2102 Fixed bug in QueuePool, SingletonThreadPool whereby connections that were discarded via overflow or periodic cleanup() were not explicitly closed, leaving garbage collection to the task instead. This generally only affects non-reference-counting backends like Jython and Pypy. Thanks to Jaimy Azle for spotting this. .. change:: :tags: sqlite :tickets: 2115 Fixed bug where reflection of foreign key created as "REFERENCES " without col name would fail. .. change:: :tags: postgresql :tickets: 1083 When explicit sequence execution derives the name of the auto-generated sequence of a SERIAL column, which currently only occurs if implicit_returning=False, now accommodates if the table + column name is greater than 63 characters using the same logic Postgresql uses. .. change:: :tags: postgresql :tickets: 2044 Added an additional libpq message to the list of "disconnect" exceptions, "could not receive data from server" .. change:: :tags: postgresql :tickets: 2092 Added RESERVED_WORDS for postgresql dialect. .. change:: :tags: postgresql :tickets: 2073 Fixed the BIT type to allow a "length" parameter, "varying" parameter. Reflection also fixed. .. change:: :tags: informix :tickets: 2092 Added RESERVED_WORDS informix dialect. .. change:: :tags: mssql :tickets: 2071 Rewrote the query used to get the definition of a view, typically when using the Inspector interface, to use sys.sql_modules instead of the information schema, thereby allowing views definitions longer than 4000 characters to be fully returned. .. change:: :tags: mysql :tickets: 2047 oursql dialect accepts the same "ssl" arguments in create_engine() as that of MySQLdb. .. change:: :tags: firebird :tickets: 2083 The "implicit_returning" flag on create_engine() is honored if set to False. .. change:: :tags: oracle :tickets: 2100 Using column names that would require quotes for the column itself or for a name-generated bind parameter, such as names with special characters, underscores, non-ascii characters, now properly translate bind parameter keys when talking to cx_oracle. .. change:: :tags: oracle :tickets: 2116 Oracle dialect adds use_binds_for_limits=False create_engine() flag, will render the LIMIT/OFFSET values inline instead of as binds, reported to modify the execution plan used by Oracle. .. change:: :tags: ext :tickets: 2090 The horizontal_shard ShardedSession class accepts the common Session argument "query_cls" as a constructor argument, to enable further subclassing of ShardedQuery. .. change:: :tags: declarative :tickets: 2050 Added an explicit check for the case that the name 'metadata' is used for a column attribute on a declarative class. .. change:: :tags: declarative :tickets: 2061 Fix error message referencing old @classproperty name to reference @declared_attr .. change:: :tags: declarative :tickets: 2091 Arguments in __mapper_args__ that aren't "hashable" aren't mistaken for always-hashable, possibly-column arguments. .. change:: :tags: documentation :tickets: 2029 Documented SQLite DATE/TIME/DATETIME types. .. change:: :tags: examples :tickets: 2090 The Beaker caching example allows a "query_cls" argument to the query_callable() function. .. changelog:: :version: 0.6.6 :released: Sat Jan 08 2011 .. change:: :tags: orm :tickets: Fixed bug whereby a non-"mutable" attribute modified event which occurred on an object that was clean except for preceding mutable attribute changes would fail to strongly reference itself in the identity map. This would cause the object to be garbage collected, losing track of any changes that weren't previously saved in the "mutable changes" dictionary. .. change:: :tags: orm :tickets: 2013 Fixed bug whereby "passive_deletes='all'" wasn't passing the correct symbols to lazy loaders during flush, thereby causing an unwarranted load. .. change:: :tags: orm :tickets: 1997 Fixed bug which prevented composite mapped attributes from being used on a mapped select statement.. Note the workings of composite are slated to change significantly in 0.7. .. change:: :tags: orm :tickets: 1976 active_history flag also added to composite(). The flag has no effect in 0.6, but is instead a placeholder flag for forwards compatibility, as it applies in 0.7 for composites. .. change:: :tags: orm :tickets: 2002 Fixed uow bug whereby expired objects passed to Session.delete() would not have unloaded references or collections taken into account when deleting objects, despite passive_deletes remaining at its default of False. .. change:: :tags: orm :tickets: 1987 A warning is emitted when version_id_col is specified on an inheriting mapper when the inherited mapper already has one, if those column expressions are not the same. .. change:: :tags: orm :tickets: 1954 "innerjoin" flag doesn't take effect along the chain of joinedload() joins if a previous join in that chain is an outer join, thus allowing primary rows without a referenced child row to be correctly returned in results. .. change:: :tags: orm :tickets: 1964 Fixed bug regarding "subqueryload" strategy whereby strategy would fail if the entity was an aliased() construct. .. change:: :tags: orm :tickets: 2014 Fixed bug regarding "subqueryload" strategy whereby the join would fail if using a multi-level load of the form from A->joined-subclass->C .. change:: :tags: orm :tickets: 1968 Fixed indexing of Query objects by -1. It was erroneously transformed to the empty slice -1:0 that resulted in IndexError. .. change:: :tags: orm :tickets: 1971 The mapper argument "primary_key" can be passed as a single column as well as a list or tuple. The documentation examples that illustrated it as a scalar value have been changed to lists. .. change:: :tags: orm :tickets: 1961 Added active_history flag to relationship() and column_property(), forces attribute events to always load the "old" value, so that it's available to attributes.get_history(). .. change:: :tags: orm :tickets: 1977 Query.get() will raise if the number of params in a composite key is too large, as well as too small. .. change:: :tags: orm :tickets: 1992 Backport of "optimized get" fix from 0.7, improves the generation of joined-inheritance "load expired row" behavior. .. change:: :tags: orm :tickets: A little more verbiage to the "primaryjoin" error, in an unusual condition that the join condition "works" for viewonly but doesn't work for non-viewonly, and foreign_keys wasn't used - adds "foreign_keys" to the suggestion. Also add "foreign_keys" to the suggestion for the generic "direction" error. .. change:: :tags: sql :tickets: 1984 Fixed operator precedence rules for multiple chains of a single non-associative operator. I.e. "x - (y - z)" will compile as "x - (y - z)" and not "x - y - z". Also works with labels, i.e. "x - (y - z).label('foo')" .. change:: :tags: sql :tickets: 1967 The 'info' attribute of Column is copied during Column.copy(), i.e. as occurs when using columns in declarative mixins. .. change:: :tags: sql :tickets: Added a bind processor for booleans which coerces to int, for DBAPIs such as pymssql that naively call str() on values. .. change:: :tags: sql :tickets: 2000 CheckConstraint will copy its 'initially', 'deferrable', and '_create_rule' attributes within a copy()/tometadata() .. change:: :tags: engine :tickets: The "unicode warning" against non-unicode bind data is now raised only when the Unicode type is used explictly; not when convert_unicode=True is used on the engine or String type. .. change:: :tags: engine :tickets: 1978 Fixed memory leak in C version of Decimal result processor. .. change:: :tags: engine :tickets: 1871 Implemented sequence check capability for the C version of RowProxy, as well as 2.7 style "collections.Sequence" registration for RowProxy. .. change:: :tags: engine :tickets: 1998 Threadlocal engine methods rollback(), commit(), prepare() won't raise if no transaction is in progress; this was a regression introduced in 0.6. .. change:: :tags: engine :tickets: 2004 Threadlocal engine returns itself upon begin(), begin_nested(); engine then implements contextmanager methods to allow the "with" statement. .. change:: :tags: postgresql :tickets: 1984 Single element tuple expressions inside an IN clause parenthesize correctly, also from .. change:: :tags: postgresql :tickets: 1955 Ensured every numeric, float, int code, scalar + array, are recognized by psycopg2 and pg8000's "numeric" base type. .. change:: :tags: postgresql :tickets: 1956 Added as_uuid=True flag to the UUID type, will receive and return values as Python UUID() objects rather than strings. Currently, the UUID type is only known to work with psycopg2. .. change:: :tags: postgresql :tickets: 1989 Fixed bug whereby KeyError would occur with non-ENUM supported PG versions after a pool dispose+recreate would occur. .. change:: :tags: mysql :tickets: 1960 Fixed error handling for Jython + zxjdbc, such that has_table() property works again. Regression from 0.6.3 (we don't have a Jython buildbot, sorry) .. change:: :tags: sqlite :tickets: 1851 The REFERENCES clause in a CREATE TABLE that includes a remote schema to another table with the same schema name now renders the remote name without the schema clause, as required by SQLite. .. change:: :tags: sqlite :tickets: On the same theme, the REFERENCES clause in a CREATE TABLE that includes a remote schema to a *different* schema than that of the parent table doesn't render at all, as cross-schema references do not appear to be supported. .. change:: :tags: mssql :tickets: 1770 The rewrite of index reflection in was unfortunately not tested correctly, and returned incorrect results. This regression is now fixed. .. change:: :tags: oracle :tickets: 1953 The cx_oracle "decimal detection" logic, which takes place for result set columns with ambiguous numeric characteristics, now uses the decimal point character determined by the locale/ NLS_LANG setting, using an on-first-connect detection of this character. cx_oracle 5.0.3 or greater is also required when using a non-period-decimal-point NLS_LANG setting.. .. change:: :tags: firebird :tickets: 2012 Firebird numeric type now checks for Decimal explicitly, lets float() pass right through, thereby allowing special values such as float('inf'). .. change:: :tags: declarative :tickets: 1972 An error is raised if __table_args__ is not in tuple or dict format, and is not None. .. change:: :tags: sqlsoup :tickets: 1975 Added "map_to()" method to SqlSoup, which is a "master" method which accepts explicit arguments for each aspect of the selectable and mapping, including a base class per mapping. .. change:: :tags: sqlsoup :tickets: Mapped selectables used with the map(), with_labels(), join() methods no longer put the given argument into the internal "cache" dictionary. Particularly since the join() and select() objects are created in the method itself this was pretty much a pure memory leaking behavior. .. change:: :tags: examples :tickets: The versioning example now supports detection of changes in an associated relationship(). .. changelog:: :version: 0.6.5 :released: Sun Oct 24 2010 .. change:: :tags: orm :tickets: 1914 Added a new "lazyload" option "immediateload". Issues the usual "lazy" load operation automatically as the object is populated. The use case here is when loading objects to be placed in an offline cache, or otherwise used after the session isn't available, and straight 'select' loading, not 'joined' or 'subquery', is desired. .. change:: :tags: orm :tickets: 1920 New Query methods: query.label(name), query.as_scalar(), return the query's statement as a scalar subquery with /without label; query.with_entities(\*ent), replaces the SELECT list of the query with new entities. Roughly equivalent to a generative form of query.values() which accepts mapped entities as well as column expressions. .. change:: :tags: orm :tickets: Fixed recursion bug which could occur when moving an object from one reference to another, with backrefs involved, where the initiating parent was a subclass (with its own mapper) of the previous parent. .. change:: :tags: orm :tickets: 1918 Fixed a regression in 0.6.4 which occurred if you passed an empty list to "include_properties" on mapper() .. change:: :tags: orm :tickets: Fixed labeling bug in Query whereby the NamedTuple would mis-apply labels if any of the column expressions were un-labeled. .. change:: :tags: orm :tickets: 1925 Patched a case where query.join() would adapt the right side to the right side of the left's join inappropriately .. change:: :tags: orm :tickets: Query.select_from() has been beefed up to help ensure that a subsequent call to query.join() will use the select_from() entity, assuming it's a mapped entity and not a plain selectable, as the default "left" side, not the first entity in the Query object's list of entities. .. change:: :tags: orm :tickets: The exception raised by Session when it is used subsequent to a subtransaction rollback (which is what happens when a flush fails in autocommit=False mode) has now been reworded (this is the "inactive due to a rollback in a subtransaction" message). In particular, if the rollback was due to an exception during flush(), the message states this is the case, and reiterates the string form of the original exception that occurred during flush. If the session is closed due to explicit usage of subtransactions (not very common), the message just states this is the case. .. change:: :tags: orm :tickets: The exception raised by Mapper when repeated requests to its initialization are made after initialization already failed no longer assumes the "hasattr" case, since there's other scenarios in which this message gets emitted, and the message also does not compound onto itself multiple times - you get the same message for each attempt at usage. The misnomer "compiles" is being traded out for "initialize". .. change:: :tags: orm :tickets: 1935 Fixed bug in query.update() where 'evaluate' or 'fetch' expiration would fail if the column expression key was a class attribute with a different keyname as the actual column name. .. change:: :tags: orm :tickets: Added an assertion during flush which ensures that no NULL-holding identity keys were generated on "newly persistent" objects. This can occur when user defined code inadvertently triggers flushes on not-fully-loaded objects. .. change:: :tags: orm :tickets: 1910 lazy loads for relationship attributes now use the current state, not the "committed" state, of foreign and primary key attributes when issuing SQL, if a flush is not in process. Previously, only the database-committed state would be used. In particular, this would cause a many-to-one get()-on-lazyload operation to fail, as autoflush is not triggered on these loads when the attributes are determined and the "committed" state may not be available. .. change:: :tags: orm :tickets: A new flag on relationship(), load_on_pending, allows the lazy loader to fire off on pending objects without a flush taking place, as well as a transient object that's been manually "attached" to the session. Note that this flag blocks attribute events from taking place when an object is loaded, so backrefs aren't available until after a flush. The flag is only intended for very specific use cases. .. change:: :tags: orm :tickets: Another new flag on relationship(), cascade_backrefs, disables the "save-update" cascade when the event was initiated on the "reverse" side of a bidirectional relationship. This is a cleaner behavior so that many-to-ones can be set on a transient object without it getting sucked into the child object's session, while still allowing the forward collection to cascade. We *might* default this to False in 0.7. .. change:: :tags: orm :tickets: Slight improvement to the behavior of "passive_updates=False" when placed only on the many-to-one side of a relationship; documentation has been clarified that passive_updates=False should really be on the one-to-many side. .. change:: :tags: orm :tickets: Placing passive_deletes=True on a many-to-one emits a warning, since you probably intended to put it on the one-to-many side. .. change:: :tags: orm :tickets: Fixed bug that would prevent "subqueryload" from working correctly with single table inheritance for a relationship from a subclass - the "where type in (x, y, z)" only gets placed on the inside, instead of repeatedly. .. change:: :tags: orm :tickets: When using from_self() with single table inheritance, the "where type in (x, y, z)" is placed on the outside of the query only, instead of repeatedly. May make some more adjustments to this. .. change:: :tags: orm :tickets: 1924 scoped_session emits a warning when configure() is called if a Session is already present (checks only the current thread) .. change:: :tags: orm :tickets: 1932 reworked the internals of mapper.cascade_iterator() to cut down method calls by about 9% in some circumstances. .. change:: :tags: sql :tickets: Fixed bug in TypeDecorator whereby the dialect-specific type was getting pulled in to generate the DDL for a given type, which didn't always return the correct result. .. change:: :tags: sql :tickets: TypeDecorator can now have a fully constructed type specified as its "impl", in addition to a type class. .. change:: :tags: sql :tickets: TypeDecorator will now place itself as the resulting type for a binary expression where the type coercion rules would normally return its impl type - previously, a copy of the impl type would be returned which would have the TypeDecorator embedded into it as the "dialect" impl, this was probably an unintentional way of achieving the desired effect. .. change:: :tags: sql :tickets: TypeDecorator.load_dialect_impl() returns "self.impl" by default, i.e. not the dialect implementation type of "self.impl". This to support compilation correctly. Behavior can be user-overridden in exactly the same way as before to the same effect. .. change:: :tags: sql :tickets: Added type_coerce(expr, type\_) expression element. Treats the given expression as the given type when evaluating expressions and processing result rows, but does not affect the generation of SQL, other than an anonymous label. .. change:: :tags: sql :tickets: Table.tometadata() now copies Index objects associated with the Table as well. .. change:: :tags: sql :tickets: Table.tometadata() issues a warning if the given Table is already present in the target MetaData - the existing Table object is returned. .. change:: :tags: sql :tickets: An informative error message is raised if a Column which has not yet been assigned a name, i.e. as in declarative, is used in a context where it is exported to the columns collection of an enclosing select() construct, or if any construct involving that column is compiled before its name is assigned. .. change:: :tags: sql :tickets: 1862 as_scalar(), label() can be called on a selectable which contains a Column that is not yet named. .. change:: :tags: sql :tickets: 1907 Fixed recursion overflow which could occur when operating with two expressions both of type "NullType", but not the singleton NULLTYPE instance. .. change:: :tags: declarative :tickets: 1922 @classproperty (soon/now @declared_attr) takes effect for __mapper_args__, __table_args__, __tablename__ on a base class that is not a mixin, as well as mixins. .. change:: :tags: declarative :tickets: 1915 @classproperty 's official name/location for usage with declarative is sqlalchemy.ext.declarative.declared_attr. Same thing, but moving there since it is more of a "marker" that's specific to declararative, not just an attribute technique. .. change:: :tags: declarative :tickets: 1931, 1930 Fixed bug whereby columns on a mixin wouldn't propagate correctly to a single-table, or joined-table, inheritance scheme where the attribute name is different than that of the column.,. .. change:: :tags: declarative :tickets: A mixin can now specify a column that overrides a column of the same name associated with a superclass. Thanks to Oystein Haaland. .. change:: :tags: engine :tickets: Fixed a regression in 0.6.4 whereby the change that allowed cursor errors to be raised consistently broke the result.lastrowid accessor. Test coverage has been added for result.lastrowid. Note that lastrowid is only supported by Pysqlite and some MySQL drivers, so isn't super-useful in the general case. .. change:: :tags: engine :tickets: the logging message emitted by the engine when a connection is first used is now "BEGIN (implicit)" to emphasize that DBAPI has no explicit begin(). .. change:: :tags: engine :tickets: 1936 added "views=True" option to metadata.reflect(), will add the list of available views to those being reflected. .. change:: :tags: engine :tickets: 1899 engine_from_config() now accepts 'debug' for 'echo', 'echo_pool', 'force' for 'convert_unicode', boolean values for 'use_native_unicode'. .. change:: :tags: postgresql :tickets: Added "as_tuple" flag to ARRAY type, returns results as tuples instead of lists to allow hashing. .. change:: :tags: postgresql :tickets: 1933 Fixed bug which prevented "domain" built from a custom type such as "enum" from being reflected. .. change:: :tags: mysql :tickets: 1940 Fixed bug involving reflection of CURRENT_TIMESTAMP default used with ON UPDATE clause, thanks to Taavi Burns .. change:: :tags: oracle :tickets: 1878 The implicit_retunring argument to create_engine() is now honored regardless of detected version of Oracle. Previously, the flag would be forced to False if server version info was < 10. .. change:: :tags: mssql :tickets: 1946 Fixed reflection bug which did not properly handle reflection of unknown types. .. change:: :tags: mssql :tickets: 1943 Fixed bug where aliasing of tables with "schema" would fail to compile properly. .. change:: :tags: mssql :tickets: 1770 Rewrote the reflection of indexes to use sys. catalogs, so that column names of any configuration (spaces, embedded commas, etc.) can be reflected. Note that reflection of indexes requires SQL Server 2005 or greater. .. change:: :tags: mssql :tickets: 1952 mssql+pymssql dialect now honors the "port" portion of the URL instead of discarding it. .. change:: :tags: informix :tickets: 1906 *Major* cleanup / modernization of the Informix dialect for 0.6, courtesy Florian Apolloner. .. change:: :tags: tests :tickets: the NoseSQLAlchemyPlugin has been moved to a new package "sqlalchemy_nose" which installs along with "sqlalchemy". This so that the "nosetests" script works as always but also allows the --with-coverage option to turn on coverage before SQLAlchemy modules are imported, allowing coverage to work correctly. .. change:: :tags: misc :tickets: 1890 CircularDependencyError now has .cycles and .edges members, which are the set of elements involved in one or more cycles, and the set of edges as 2-tuples. .. changelog:: :version: 0.6.4 :released: Tue Sep 07 2010 .. change:: :tags: orm :tickets: The name ConcurrentModificationError has been changed to StaleDataError, and descriptive error messages have been revised to reflect exactly what the issue is. Both names will remain available for the forseeable future for schemes that may be specifying ConcurrentModificationError in an "except:" clause. .. change:: :tags: orm :tickets: 1891 Added a mutex to the identity map which mutexes remove operations against iteration methods, which now pre-buffer before returning an iterable. This because asyncrhonous gc can remove items via the gc thread at any time. .. change:: :tags: orm :tickets: The Session class is now present in sqlalchemy.orm.*. We're moving away from the usage of create_session(), which has non-standard defaults, for those situations where a one-step Session constructor is desired. Most users should stick with sessionmaker() for general use, however. .. change:: :tags: orm :tickets: query.with_parent() now accepts transient objects and will use the non-persistent values of their pk/fk attributes in order to formulate the criterion. Docs are also clarified as to the purpose of with_parent(). .. change:: :tags: orm :tickets: The include_properties and exclude_properties arguments to mapper() now accept Column objects as members in addition to strings. This so that same-named Column objects, such as those within a join(), can be disambiguated. .. change:: :tags: orm :tickets: 1896 A warning is now emitted if a mapper is created against a join or other single selectable that includes multiple columns with the same name in its .c. collection, and those columns aren't explictly named as part of the same or separate attributes (or excluded). In 0.7 this warning will be an exception. Note that this warning is not emitted when the combination occurs as a result of inheritance, so that attributes still allow being overridden naturally.. In 0.7 this will be improved further. .. change:: :tags: orm :tickets: 1896 The primary_key argument to mapper() can now specify a series of columns that are only a subset of the calculated "primary key" columns of the mapped selectable, without an error being raised. This helps for situations where a selectable's effective primary key is simpler than the number of columns in the selectable that are actually marked as "primary_key", such as a join against two tables on their primary key columns. .. change:: :tags: orm :tickets: An object that's been deleted now gets a flag 'deleted', which prohibits the object from being re-add()ed to the session, as previously the object would live in the identity map silently until its attributes were accessed. The make_transient() function now resets this flag along with the "key" flag. .. change:: :tags: orm :tickets: make_transient() can be safely called on an already transient instance. .. change:: :tags: orm :tickets: a warning is emitted in mapper() if the polymorphic_on column is not present either in direct or derived form in the mapped selectable or in the with_polymorphic selectable, instead of silently ignoring it. Look for this to become an exception in 0.7. .. change:: :tags: orm :tickets: Another pass through the series of error messages emitted when relationship() is configured with ambiguous arguments. The "foreign_keys" setting is no longer mentioned, as it is almost never needed and it is preferable users set up correct ForeignKey metadata, which is now the recommendation. If 'foreign_keys' is used and is incorrect, the message suggests the attribute is probably unnecessary. Docs for the attribute are beefed up. This because all confused relationship() users on the ML appear to be attempting to use foreign_keys due to the message, which only confuses them further since Table metadata is much clearer. .. change:: :tags: orm :tickets: 1877 If the "secondary" table has no ForeignKey metadata and no foreign_keys is set, even though the user is passing screwed up information, it is assumed that primary/secondaryjoin expressions should consider only and all cols in "secondary" to be foreign. It's not possible with "secondary" for the foreign keys to be elsewhere in any case. A warning is now emitted instead of an error, and the mapping succeeds. .. change:: :tags: orm :tickets: 1856 Moving an o2m object from one collection to another, or vice versa changing the referenced object by an m2o, where the foreign key is also a member of the primary key, will now be more carefully checked during flush if the change in value of the foreign key on the "many" side is the result of a change in the primary key of the "one" side, or if the "one" is just a different object. In one case, a cascade-capable DB would have cascaded the value already and we need to look at the "new" PK value to do an UPDATE, in the other we need to continue looking at the "old". We now look at the "old", assuming passive_updates=True, unless we know it was a PK switch that triggered the change. .. change:: :tags: orm :tickets: 1857 The value of version_id_col can be changed manually, and this will result in an UPDATE of the row. Versioned UPDATEs and DELETEs now use the "committed" value of the version_id_col in the WHERE clause and not the pending changed value. The version generator is also bypassed if manual changes are present on the attribute. .. change:: :tags: orm :tickets: Repaired the usage of merge() when used with concrete inheriting mappers. Such mappers frequently have so-called "concrete" attributes, which are subclass attributes that "disable" propagation from the parent - these needed to allow a merge() operation to pass through without effect. .. change:: :tags: orm :tickets: 1863 Specifying a non-column based argument for column_mapped_collection, including string, text() etc., will raise an error message that specifically asks for a column element, no longer misleads with incorrect information about text() or literal(). .. change:: :tags: orm :tickets: Similarly, for relationship(), foreign_keys, remote_side, order_by - all column-based expressions are enforced - lists of strings are explicitly disallowed since this is a very common error .. change:: :tags: orm :tickets: 1864 Dynamic attributes don't support collection population - added an assertion for when set_committed_value() is called, as well as when joinedload() or subqueryload() options are applied to a dynamic attribute, instead of failure / silent failure. .. change:: :tags: orm :tickets: 1852 Fixed bug whereby generating a Query derived from one which had the same column repeated with different label names, typically in some UNION situations, would fail to propagate the inner columns completely to the outer query. .. change:: :tags: orm :tickets: 1881 object_session() raises the proper UnmappedInstanceError when presented with an unmapped instance. .. change:: :tags: orm :tickets: Applied further memoizations to calculated Mapper properties, with significant (~90%) runtime mapper.py call count reduction in heavily polymorphic mapping configurations. .. change:: :tags: orm :tickets: mapper _get_col_to_prop private method used by the versioning example is deprecated; now use mapper.get_property_by_column() which will remain the public method for this. .. change:: :tags: orm :tickets: the versioning example works correctly now if versioning on a col that was formerly NULL. .. change:: :tags: sql :tickets: Calling execute() on an alias() construct is pending deprecation for 0.7, as it is not itself an "executable" construct. It currently "proxies" its inner element and is conditionally "executable" but this is not the kind of ambiguity we like these days. .. change:: :tags: sql :tickets: The execute() and scalar() methods of ClauseElement are now moved appropriately to the Executable subclass. ClauseElement.execute()/ scalar() are still present and are pending deprecation in 0.7, but note these would always raise an error anyway if you were not an Executable (unless you were an alias(), see previous note). .. change:: :tags: sql :tickets: Added basic math expression coercion for Numeric->Integer, so that resulting type is Numeric regardless of the direction of the expression. .. change:: :tags: sql :tickets: 1855 Changed the scheme used to generate truncated "auto" index names when using the "index=True" flag on Column. The truncation only takes place with the auto-generated name, not one that is user-defined (an error would be raised instead), and the truncation scheme itself is now based on a fragment of an md5 hash of the identifier name, so that multiple indexes on columns with similar names still have unique names. .. change:: :tags: sql :tickets: 1412 The generated index name also is based on a "max index name length" attribute which is separate from the "max identifier length" - this to appease MySQL who has a max length of 64 for index names, separate from their overall max length of 255. .. change:: :tags: sql :tickets: the text() construct, if placed in a column oriented situation, will at least return NULLTYPE for its type instead of None, allowing it to be used a little more freely for ad-hoc column expressions than before. literal_column() is still the better choice, however. .. change:: :tags: sql :tickets: Added full description of parent table/column, target table/column in error message raised when ForeignKey can't resolve target. .. change:: :tags: sql :tickets: 1865 Fixed bug whereby replacing composite foreign key columns in a reflected table would cause an attempt to remove the reflected constraint from the table a second time, raising a KeyError. .. change:: :tags: sql :tickets: the _Label construct, i.e. the one that is produced whenever you say somecol.label(), now counts itself in its "proxy_set" unioned with that of it's contained column's proxy set, instead of directly returning that of the contained column. This allows column correspondence operations which depend on the identity of the _Labels themselves to return the correct result .. change:: :tags: sql :tickets: 1852 fixes ORM bug. .. change:: :tags: engine :tickets: Calling fetchone() or similar on a result that has already been exhausted, has been closed, or is not a result-returning result now raises ResourceClosedError, a subclass of InvalidRequestError, in all cases, regardless of backend. Previously, some DBAPIs would raise ProgrammingError (i.e. pysqlite), others would return None leading to downstream breakages (i.e. MySQL-python). .. change:: :tags: engine :tickets: 1894 Fixed bug in Connection whereby if a "disconnect" event occurred in the "initialize" phase of the first connection pool connect, an AttributeError would be raised when the Connection would attempt to invalidate the DBAPI connection. .. change:: :tags: engine :tickets: Connection, ResultProxy, as well as Session use ResourceClosedError for all "this connection/transaction/result is closed" types of errors. .. change:: :tags: engine :tickets: Connection.invalidate() can be called more than once and subsequent calls do nothing. .. change:: :tags: declarative :tickets: if @classproperty is used with a regular class-bound mapper property attribute, it will be called to get the actual attribute value during initialization. Currently, there's no advantage to using @classproperty on a column or relationship attribute of a declarative class that isn't a mixin - evaluation is at the same time as if @classproperty weren't used. But here we at least allow it to function as expected. .. change:: :tags: declarative :tickets: Fixed bug where "Can't add additional column" message would display the wrong name. .. change:: :tags: postgresql :tickets: Fixed the psycopg2 dialect to use its set_isolation_level() method instead of relying upon the base "SET SESSION ISOLATION" command, as psycopg2 resets the isolation level on each new transaction otherwise. .. change:: :tags: mssql :tickets: Fixed "default schema" query to work with pymssql backend. .. change:: :tags: firebird :tickets: Fixed bug whereby a column default would fail to reflect if the "default" keyword were lower case. .. change:: :tags: oracle :tickets: 1879 Added ROWID type to the Oracle dialect, for those cases where an explicit CAST might be needed. .. change:: :tags: oracle :tickets: 1867 Oracle reflection of indexes has been tuned so that indexes which include some or all primary key columns, but not the same set of columns as that of the primary key, are reflected. Indexes which contain the identical columns as that of the primary key are skipped within reflection, as the index in that case is assumed to be the auto-generated primary key index. Previously, any index with PK columns present would be skipped. Thanks to Kent Bower for the patch. .. change:: :tags: oracle :tickets: 1868 Oracle now reflects the names of primary key constraints - also thanks to Kent Bower. .. change:: :tags: informix :tickets: 1904 Applied patches from to get basic Informix functionality up again. We rely upon end-user testing to ensure that Informix is working to some degree. .. change:: :tags: documentation :tickets: The docs have been reorganized such that the "API Reference" section is gone - all the docstrings from there which were public API are moved into the context of the main doc section that talks about it. Main docs divided into "SQLAlchemy Core" and "SQLAlchemy ORM" sections, mapper/relationship docs have been broken out. Lots of sections rewritten and/or reorganized. .. change:: :tags: examples :tickets: The beaker_caching example has been reorgnized such that the Session, cache manager, declarative_base are part of environment, and custom cache code is portable and now within "caching_query.py". This allows the example to be easier to "drop in" to existing projects. .. change:: :tags: examples :tickets: 1887 the history_meta versioning recipe sets "unique=False" when copying columns, so that the versioning table handles multiple rows with repeating values. .. changelog:: :version: 0.6.3 :released: Thu Jul 15 2010 .. change:: :tags: orm :tickets: 1845 Removed errant many-to-many load in unitofwork which triggered unnecessarily on expired/unloaded collections. This load now takes place only if passive_updates is False and the parent primary key has changed, or if passive_deletes is False and a delete of the parent has occurred. .. change:: :tags: orm :tickets: 1853 Column-entities (i.e. query(Foo.id)) copy their state more fully when queries are derived from themselves + a selectable (i.e. from_self(), union(), etc.), so that join() and such have the correct state to work from. .. change:: :tags: orm :tickets: 1853 Fixed bug where Query.join() would fail if querying a non-ORM column then joining without an on clause when a FROM clause is already present, now raises a checked exception the same way it does when the clause is not present. .. change:: :tags: orm :tickets: 1142 Improved the check for an "unmapped class", including the case where the superclass is mapped but the subclass is not. Any attempts to access cls._sa_class_manager.mapper now raise UnmappedClassError(). .. change:: :tags: orm :tickets: Added "column_descriptions" accessor to Query, returns a list of dictionaries containing naming/typing information about the entities the Query will return. Can be helpful for building GUIs on top of ORM queries. .. change:: :tags: mysql :tickets: 1848 The _extract_error_code() method now works correctly with each MySQL dialect ( MySQL-python, OurSQL, MySQL-Connector-Python, PyODBC). Previously, the reconnect logic would fail for OperationalError conditions, however since MySQLdb and OurSQL have their own reconnect feature, there was no symptom for these drivers here unless one watched the logs. .. change:: :tags: oracle :tickets: 1840 More tweaks to cx_oracle Decimal handling. "Ambiguous" numerics with no decimal place are coerced to int at the connection handler level. The advantage here is that ints come back as ints without SQLA type objects being involved and without needless conversion to Decimal first. Unfortunately, some exotic subquery cases can even see different types between individual result rows, so the Numeric handler, when instructed to return Decimal, can't take full advantage of "native decimal" mode and must run isinstance() on every value to check if its Decimal already. Reopen of .. changelog:: :version: 0.6.2 :released: Tue Jul 06 2010 .. change:: :tags: orm :tickets: Query.join() will check for a call of the form query.join(target, clause_expression), i.e. missing the tuple, and raise an informative error message that this is the wrong calling form. .. change:: :tags: orm :tickets: 1824 Fixed bug regarding flushes on self-referential bi-directional many-to-many relationships, where two objects made to mutually reference each other in one flush would fail to insert a row for both sides. Regression from 0.5. .. change:: :tags: orm :tickets: the post_update feature of relationship() has been reworked architecturally to integrate more closely with the new 0.6 unit of work. The motivation for the change is so that multiple "post update" calls, each affecting different foreign key columns of the same row, are executed in a single UPDATE statement, rather than one UPDATE statement per column per row. Multiple row updates are also batched into executemany()s as possible, while maintaining consistent row ordering. .. change:: :tags: orm :tickets: Query.statement, Query.subquery(), etc. now transfer the values of bind parameters, i.e. those specified by query.params(), into the resulting SQL expression. Previously the values would not be transferred and bind parameters would come out as None. .. change:: :tags: orm :tickets: Subquery-eager-loading now works with Query objects which include params(), as well as get() Queries. .. change:: :tags: orm :tickets: Can now call make_transient() on an instance that is referenced by parent objects via many-to-one, without the parent's foreign key value getting temporarily set to None - this was a function of the "detect primary key switch" flush handler. It now ignores objects that are no longer in the "persistent" state, and the parent's foreign key identifier is left unaffected. .. change:: :tags: orm :tickets: query.order_by() now accepts False, which cancels any existing order_by() state on the Query, allowing subsequent generative methods to be called which do not support ORDER BY. This is not the same as the already existing feature of passing None, which suppresses any existing order_by() settings, including those configured on the mapper. False will make it as though order_by() was never called, while None is an active setting. .. change:: :tags: orm :tickets: An instance which is moved to "transient", has an incomplete or missing set of primary key attributes, and contains expired attributes, will raise an InvalidRequestError if an expired attribute is accessed, instead of getting a recursion overflow. .. change:: :tags: orm :tickets: The make_transient() function is now in the generated documentation. .. change:: :tags: orm :tickets: make_transient() removes all "loader" callables from the state being made transient, removing any "expired" state - all unloaded attributes reset back to undefined, None/empty on access. .. change:: :tags: sql :tickets: 1822 The warning emitted by the Unicode and String types with convert_unicode=True no longer embeds the actual value passed. This so that the Python warning registry does not continue to grow in size, the warning is emitted once as per the warning filter settings, and large string values don't pollute the output. .. change:: :tags: sql :tickets: Fixed bug that would prevent overridden clause compilation from working for "annotated" expression elements, which are often generated by the ORM. .. change:: :tags: sql :tickets: 1400 The argument to "ESCAPE" of a LIKE operator or similar is passed through render_literal_value(), which may implement escaping of backslashes. .. change:: :tags: sql :tickets: Fixed bug in Enum type which blew away native_enum flag when used with TypeDecorators or other adaption scenarios. .. change:: :tags: sql :tickets: Inspector hits bind.connect() when invoked to ensure initialize has been called. the internal name ".conn" is changed to ".bind", since that's what it is. .. change:: :tags: sql :tickets: Modified the internals of "column annotation" such that a custom Column subclass can safely override _constructor to return Column, for the purposes of making "configurational" column classes that aren't involved in proxying, etc. .. change:: :tags: sql :tickets: 1829 Column.copy() takes along the "unique" attribute among others, fixes regarding declarative mixins .. change:: :tags: postgresql :tickets: 1400 render_literal_value() is overridden which escapes backslashes, currently applies to the ESCAPE clause of LIKE and similar expressions. Ultimately this will have to detect the value of "standard_conforming_strings" for full behavior. .. change:: :tags: postgresql :tickets: 1836 Won't generate "CREATE TYPE" / "DROP TYPE" if using types.Enum on a PG version prior to 8.3 - the supports_native_enum flag is fully honored. .. change:: :tags: mysql :tickets: 1826 MySQL dialect doesn't emit CAST() for MySQL version detected < 4.0.2. This allows the unicode check on connect to proceed. .. change:: :tags: mysql :tickets: MySQL dialect now detects NO_BACKSLASH_ESCAPES sql mode, in addition to ANSI_QUOTES. .. change:: :tags: mysql :tickets: 1400 render_literal_value() is overridden which escapes backslashes, currently applies to the ESCAPE clause of LIKE and similar expressions. This behavior is derived from detecting the value of NO_BACKSLASH_ESCAPES. .. change:: :tags: oracle :tickets: 1819 Fixed ora-8 compatibility flags such that they don't cache a stale value from before the first database connection actually occurs. .. change:: :tags: oracle :tickets: 1840 Oracle's "native decimal" metadata begins to return ambiguous typing information about numerics when columns are embedded in subqueries as well as when ROWNUM is consulted with subqueries, as we do for limit/offset. We've added these ambiguous conditions to the cx_oracle "convert to Decimal()" handler, so that we receive numerics as Decimal in more cases instead of as floats. These are then converted, if requested, into Integer or Float, or otherwise kept as the lossless Decimal. .. change:: :tags: mssql :tickets: 1825 If server_version_info is outside the usual range of (8, ), (9, ), (10, ), a warning is emitted which suggests checking that the FreeTDS version configuration is using 7.0 or 8.0, not 4.2. .. change:: :tags: firebird :tickets: 1823 Fixed incorrect signature in do_execute(), error introduced in 0.6.1. .. change:: :tags: firebird :tickets: 1813 Firebird dialect adds CHAR, VARCHAR types which accept a "charset" flag, to support Firebird "CHARACTER SET" clause. .. change:: :tags: declarative :tickets: 1805, 1796, 1751 Added support for @classproperty to provide any kind of schema/mapping construct from a declarative mixin, including columns with foreign keys, relationships, column_property, deferred. This solves all such issues on declarative mixins. An error is raised if any MapperProperty subclass is specified on a mixin without using @classproperty. .. change:: :tags: declarative :tickets: 1821 a mixin class can now define a column that matches one which is present on a __table__ defined on a subclass. It cannot, however, define one that is not present in the __table__, and the error message here now works. .. change:: :tags: extension, compiler :tickets: 1838 The 'default' compiler is automatically copied over when overriding the compilation of a built in clause construct, so no KeyError is raised if the user-defined compiler is specific to certain backends and compilation for a different backend is invoked. .. change:: :tags: documentation :tickets: 1820 Added documentation for the Inspector. .. change:: :tags: documentation :tickets: 1830 Fixed @memoized_property and @memoized_instancemethod decorators so that Sphinx documentation picks up these attributes and methods, such as ResultProxy.inserted_primary_key. .. changelog:: :version: 0.6.1 :released: Mon May 31 2010 .. change:: :tags: orm :tickets: 1782 Fixed regression introduced in 0.6.0 involving improper history accounting on mutable attributes. .. change:: :tags: orm :tickets: 1807 Fixed regression introduced in 0.6.0 unit of work refactor that broke updates for bi-directional relationship() with post_update=True. .. change:: :tags: orm :tickets: 1789 session.merge() will not expire attributes on the returned instance if that instance is "pending". .. change:: :tags: orm :tickets: 1802 fixed __setstate__ method of CollectionAdapter to not fail during deserialize where parent InstanceState not yet unserialized. .. change:: :tags: orm :tickets: 1797 Added internal warning in case an instance without a full PK happened to be expired and then was asked to refresh. .. change:: :tags: orm :tickets: Added more aggressive caching to the mapper's usage of UPDATE, INSERT, and DELETE expressions. Assuming the statement has no per-object SQL expressions attached, the expression objects are cached by the mapper after the first create, and their compiled form is stored persistently in a cache dictionary for the duration of the related Engine. The cache is an LRUCache for the rare case that a mapper receives an extremely high number of different column patterns as UPDATEs. .. change:: :tags: sql :tickets: 1793 expr.in_() now accepts a text() construct as the argument. Grouping parenthesis are added automatically, i.e. usage is like `col.in_(text("select id from table"))`. .. change:: :tags: sql :tickets: Columns of _Binary type (i.e. LargeBinary, BLOB, etc.) will coerce a "basestring" on the right side into a _Binary as well so that required DBAPI processing takes place. .. change:: :tags: sql :tickets: 1801 Added table.add_is_dependent_on(othertable), allows manual placement of dependency rules between two Table objects for use within create_all(), drop_all(), sorted_tables. .. change:: :tags: sql :tickets: 1778 Fixed bug that prevented implicit RETURNING from functioning properly with composite primary key that contained zeroes. .. change:: :tags: sql :tickets: Fixed errant space character when generating ADD CONSTRAINT for a named UNIQUE constraint. .. change:: :tags: sql :tickets: 1571 Fixed "table" argument on constructor of ForeginKeyConstraint .. change:: :tags: sql :tickets: 1786 Fixed bug in connection pool cursor wrapper whereby if a cursor threw an exception on close(), the logging of the message would fail. .. change:: :tags: sql :tickets: the _make_proxy() method of ColumnClause and Column now use self.__class__ to determine the class of object to be returned instead of hardcoding to ColumnClause/Column, making it slightly easier to produce specific subclasses of these which work in alias/subquery situations. .. change:: :tags: sql :tickets: 1798 func.XXX() doesn't inadvertently resolve to non-Function classes (e.g. fixes func.text()). .. change:: :tags: engines :tickets: 1781 Fixed building the C extensions on Python 2.4. .. change:: :tags: engines :tickets: Pool classes will reuse the same "pool_logging_name" setting after a dispose() occurs. .. change:: :tags: engines :tickets: Engine gains an "execution_options" argument and update_execution_options() method, which will apply to all connections generated by this engine. .. change:: :tags: mysql :tickets: 1794 func.sysdate() emits "SYSDATE()", i.e. with the ending parenthesis, on MySQL. .. change:: :tags: sqlite :tickets: 1812 Fixed concatenation of constraints when "PRIMARY KEY" constraint gets moved to column level due to SQLite AUTOINCREMENT keyword being rendered. .. change:: :tags: oracle :tickets: 1775 Added a check for cx_oracle versions lower than version 5, in which case the incompatible "output type handler" won't be used. This will impact decimal accuracy and some unicode handling issues. .. change:: :tags: oracle :tickets: 1790 Fixed use_ansi=False mode, which was producing broken WHERE clauses in pretty much all cases. .. change:: :tags: oracle :tickets: 1808 Re-established support for Oracle 8 with cx_oracle, including that use_ansi is set to False automatically, NVARCHAR2 and NCLOB are not rendered for Unicode, "native unicode" check doesn't fail, cx_oracle "native unicode" mode is disabled, VARCHAR() is emitted with bytes count instead of char count. .. change:: :tags: oracle :tickets: 1670 oracle_xe 5 doesn't accept a Python unicode object in its connect string in normal Python 2.x mode - so we coerce to str() directly. non-ascii characters aren't supported in connect strings here since we don't know what encoding we could use. .. change:: :tags: oracle :tickets: 1815 FOR UPDATE is emitted in the syntactically correct position when limit/offset is used, i.e. the ROWNUM subquery. However, Oracle can't really handle FOR UPDATE with ORDER BY or with subqueries, so its still not very usable, but at least SQLA gets the SQL past the Oracle parser. .. change:: :tags: firebird :tickets: 1521 Added a label to the query used within has_table() and has_sequence() to work with older versions of Firebird that don't provide labels for result columns. .. change:: :tags: firebird :tickets: 1779 Added integer coercion to the "type_conv" attribute when passed via query string, so that it is properly interpreted by Kinterbasdb. .. change:: :tags: firebird :tickets: 1646 Added 'connection shutdown' to the list of exception strings which indicate a dropped connection. .. change:: :tags: sqlsoup :tickets: 1783 the SqlSoup constructor accepts a `base` argument which specifies the base class to use for mapped classes, the default being `object`. .. changelog:: :version: 0.6.0 :released: Sun Apr 18 2010 .. change:: :tags: orm :tickets: 1742, 1081 Unit of work internals have been rewritten. Units of work with large numbers of objects interdependent objects can now be flushed without recursion overflows as there is no longer reliance upon recursive calls. The number of internal structures now stays constant for a particular session state, regardless of how many relationships are present on mappings. The flow of events now corresponds to a linear list of steps, generated by the mappers and relationships based on actual work to be done, filtered through a single topological sort for correct ordering. Flush actions are assembled using far fewer steps and less memory. .. change:: :tags: orm :tickets: Along with the UOW rewrite, this also removes an issue introduced in 0.6beta3 regarding topological cycle detection for units of work with long dependency cycles. We now use an algorithm written by Guido (thanks Guido!). .. change:: :tags: orm :tickets: 1764 one-to-many relationships now maintain a list of positive parent-child associations within the flush, preventing previous parents marked as deleted from cascading a delete or NULL foreign key set on those child objects, despite the end-user not removing the child from the old association. .. change:: :tags: orm :tickets: 1495 A collection lazy load will switch off default eagerloading on the reverse many-to-one side, since that loading is by definition unnecessary. .. change:: :tags: orm :tickets: Session.refresh() now does an equivalent expire() on the given instance first, so that the "refresh-expire" cascade is propagated. Previously, refresh() was not affected in any way by the presence of "refresh-expire" cascade. This is a change in behavior versus that of 0.6beta2, where the "lockmode" flag passed to refresh() would cause a version check to occur. Since the instance is first expired, refresh() always upgrades the object to the most recent version. .. change:: :tags: orm :tickets: 1754 The 'refresh-expire' cascade, when reaching a pending object, will expunge the object if the cascade also includes "delete-orphan", or will simply detach it otherwise. .. change:: :tags: orm :tickets: 1756 id(obj) is no longer used internally within topological.py, as the sorting functions now require hashable objects only. .. change:: :tags: orm :tickets: The ORM will set the docstring of all generated descriptors to None by default. This can be overridden using 'doc' (or if using Sphinx, attribute docstrings work too). .. change:: :tags: orm :tickets: Added kw argument 'doc' to all mapper property callables as well as Column(). Will assemble the string 'doc' as the '__doc__' attribute on the descriptor. .. change:: :tags: orm :tickets: 1761 Usage of version_id_col on a backend that supports cursor.rowcount for execute() but not executemany() now works when a delete is issued (already worked for saves, since those don't use executemany()). For a backend that doesn't support cursor.rowcount at all, a warning is emitted the same as with saves. .. change:: :tags: orm :tickets: The ORM now short-term caches the "compiled" form of insert() and update() constructs when flushing lists of objects of all the same class, thereby avoiding redundant compilation per individual INSERT/UPDATE within an individual flush() call. .. change:: :tags: orm :tickets: internal getattr(), setattr(), getcommitted() methods on ColumnProperty, CompositeProperty, RelationshipProperty have been underscored (i.e. are private), signature has changed. .. change:: :tags: engines :tickets: 1757 The C extension now also works with DBAPIs which use custom sequences as row (and not only tuples). .. change:: :tags: sql :tickets: 1755 Restored some bind-labeling logic from 0.5 which ensures that tables with column names that overlap another column of the form "_" won't produce errors if column._label is used as a bind name during an UPDATE. Test coverage which wasn't present in 0.5 has been added. .. change:: :tags: sql :tickets: 1729 somejoin.select(fold_equivalents=True) is no longer deprecated, and will eventually be rolled into a more comprehensive version of the feature for. .. change:: :tags: sql :tickets: 1759 the Numeric type raises an *enormous* warning when expected to convert floats to Decimal from a DBAPI that returns floats. This includes SQLite, Sybase, MS-SQL. .. change:: :tags: sql :tickets: Fixed an error in expression typing which caused an endless loop for expressions with two NULL types. .. change:: :tags: sql :tickets: Fixed bug in execution_options() feature whereby the existing Transaction and other state information from the parent connection would not be propagated to the sub-connection. .. change:: :tags: sql :tickets: Added new 'compiled_cache' execution option. A dictionary where Compiled objects will be cached when the Connection compiles a clause expression into a dialect- and parameter- specific Compiled object. It is the user's responsibility to manage the size of this dictionary, which will have keys corresponding to the dialect, clause element, the column names within the VALUES or SET clause of an INSERT or UPDATE, as well as the "batch" mode for an INSERT or UPDATE statement. .. change:: :tags: sql :tickets: 1769 Added get_pk_constraint() to reflection.Inspector, similar to get_primary_keys() except returns a dict that includes the name of the constraint, for supported backends (PG so far). .. change:: :tags: sql :tickets: 1771 Table.create() and Table.drop() no longer apply metadata- level create/drop events. .. change:: :tags: ext :tickets: the compiler extension now allows @compiles decorators on base classes that extend to child classes, @compiles decorators on child classes that aren't broken by a @compiles decorator on the base class. .. change:: :tags: ext :tickets: Declarative will raise an informative error message if a non-mapped class attribute is referenced in the string-based relationship() arguments. .. change:: :tags: ext :tickets: Further reworked the "mixin" logic in declarative to additionally allow __mapper_args__ as a @classproperty on a mixin, such as to dynamically assign polymorphic_identity. .. change:: :tags: postgresql :tickets: 1071 Postgresql now reflects sequence names associated with SERIAL columns correctly, after the name of the sequence has been changed. Thanks to Kumar McMillan for the patch. .. change:: :tags: postgresql :tickets: Repaired missing import in psycopg2._PGNumeric type when unknown numeric is received. .. change:: :tags: postgresql :tickets: psycopg2/pg8000 dialects now aware of REAL[], FLOAT[], DOUBLE_PRECISION[], NUMERIC[] return types without raising an exception. .. change:: :tags: postgresql :tickets: 1769 Postgresql reflects the name of primary key constraints, if one exists. .. change:: :tags: oracle :tickets: Now using cx_oracle output converters so that the DBAPI returns natively the kinds of values we prefer: .. change:: :tags: oracle :tickets: 1759 NUMBER values with positive precision + scale convert to cx_oracle.STRING and then to Decimal. This allows perfect precision for the Numeric type when using cx_oracle. .. change:: :tags: oracle :tickets: STRING/FIXED_CHAR now convert to unicode natively. SQLAlchemy's String types then don't need to apply any kind of conversions. .. change:: :tags: firebird :tickets: The functionality of result.rowcount can be disabled on a per-engine basis by setting 'enable_rowcount=False' on create_engine(). Normally, cursor.rowcount is called after any UPDATE or DELETE statement unconditionally, because the cursor is then closed and Firebird requires an open cursor in order to get a rowcount. This call is slightly expensive however so it can be disabled. To re-enable on a per-execution basis, the 'enable_rowcount=True' execution option may be used. .. change:: :tags: examples :tickets: Updated attribute_shard.py example to use a more robust method of searching a Query for binary expressions which compare columns against literal values. .. changelog:: :version: 0.6beta3 :released: Sun Mar 28 2010 .. change:: :tags: orm :tickets: 1675 Major feature: Added new "subquery" loading capability to relationship(). This is an eager loading option which generates a second SELECT for each collection represented in a query, across all parents at once. The query re-issues the original end-user query wrapped in a subquery, applies joins out to the target collection, and loads all those collections fully in one result, similar to "joined" eager loading but using all inner joins and not re-fetching full parent rows repeatedly (as most DBAPIs seem to do, even if columns are skipped). Subquery loading is available at mapper config level using "lazy='subquery'" and at the query options level using "subqueryload(props..)", "subqueryload_all(props...)". .. change:: :tags: orm :tickets: To accomodate the fact that there are now two kinds of eager loading available, the new names for eagerload() and eagerload_all() are joinedload() and joinedload_all(). The old names will remain as synonyms for the foreseeable future. .. change:: :tags: orm :tickets: The "lazy" flag on the relationship() function now accepts a string argument for all kinds of loading: "select", "joined", "subquery", "noload" and "dynamic", where the default is now "select". The old values of True/ False/None still retain their usual meanings and will remain as synonyms for the foreseeable future. .. change:: :tags: orm :tickets: 921 Added with_hint() method to Query() construct. This calls directly down to select().with_hint() and also accepts entities as well as tables and aliases. See with_hint() in the SQL section below. .. change:: :tags: orm :tickets: Fixed bug in Query whereby calling q.join(prop).from_self(...). join(prop) would fail to render the second join outside the subquery, when joining on the same criterion as was on the inside. .. change:: :tags: orm :tickets: Fixed bug in Query whereby the usage of aliased() constructs would fail if the underlying table (but not the actual alias) were referenced inside the subquery generated by q.from_self() or q.select_from(). .. change:: :tags: orm :tickets: Fixed bug which affected all eagerload() and similar options such that "remote" eager loads, i.e. eagerloads off of a lazy load such as query(A).options(eagerload(A.b, B.c)) wouldn't eagerload anything, but using eagerload("b.c") would work fine. .. change:: :tags: orm :tickets: Query gains an add_columns(\*columns) method which is a multi- version of add_column(col). add_column(col) is future deprecated. .. change:: :tags: orm :tickets: Query.join() will detect if the end result will be "FROM A JOIN A", and will raise an error if so. .. change:: :tags: orm :tickets: Query.join(Cls.propname, from_joinpoint=True) will check more carefully that "Cls" is compatible with the current joinpoint, and act the same way as Query.join("propname", from_joinpoint=True) in that regard. .. change:: :tags: sql :tickets: 921 Added with_hint() method to select() construct. Specify a table/alias, hint text, and optional dialect name, and "hints" will be rendered in the appropriate place in the statement. Works for Oracle, Sybase, MySQL. .. change:: :tags: sql :tickets: 1747 Fixed bug introduced in 0.6beta2 where column labels would render inside of column expressions already assigned a label. .. change:: :tags: postgresql :tickets: 877 The psycopg2 dialect will log NOTICE messages via the "sqlalchemy.dialects.postgresql" logger name. .. change:: :tags: postgresql :tickets: 997 the TIME and TIMESTAMP types are now availble from the postgresql dialect directly, which add the PG-specific argument 'precision' to both. 'precision' and 'timezone' are correctly reflected for both TIME and TIMEZONE types. .. change:: :tags: mysql :tickets: 1752 No longer guessing that TINYINT(1) should be BOOLEAN when reflecting - TINYINT(1) is returned. Use Boolean/ BOOLEAN in table definition to get boolean conversion behavior. .. change:: :tags: oracle :tickets: 1744 The Oracle dialect will issue VARCHAR type definitions using character counts, i.e. VARCHAR2(50 CHAR), so that the column is sized in terms of characters and not bytes. Column reflection of character types will also use ALL_TAB_COLUMNS.CHAR_LENGTH instead of ALL_TAB_COLUMNS.DATA_LENGTH. Both of these behaviors take effect when the server version is 9 or higher - for version 8, the old behaviors are used. .. change:: :tags: declarative :tickets: 1746 Using a mixin won't break if the mixin implements an unpredictable __getattribute__(), i.e. Zope interfaces. .. change:: :tags: declarative :tickets: 1749 Using @classdecorator and similar on mixins to define __tablename__, __table_args__, etc. now works if the method references attributes on the ultimate subclass. .. change:: :tags: declarative :tickets: 1751 relationships and columns with foreign keys aren't allowed on declarative mixins, sorry. .. change:: :tags: ext :tickets: The sqlalchemy.orm.shard module now becomes an extension, sqlalchemy.ext.horizontal_shard. The old import works with a deprecation warning. .. changelog:: :version: 0.6beta2 :released: Sat Mar 20 2010 .. change:: :tags: py3k :tickets: Improved the installation/test setup regarding Python 3, now that Distribute runs on Py3k. distribute_setup.py is now included. See README.py3k for Python 3 installation/ testing instructions. .. change:: :tags: orm :tickets: 1740 The official name for the relation() function is now relationship(), to eliminate confusion over the relational algebra term. relation() however will remain available in equal capacity for the foreseeable future. .. change:: :tags: orm :tickets: 1692 Added "version_id_generator" argument to Mapper, this is a callable that, given the current value of the "version_id_col", returns the next version number. Can be used for alternate versioning schemes such as uuid, timestamps. .. change:: :tags: orm :tickets: added "lockmode" kw argument to Session.refresh(), will pass through the string value to Query the same as in with_lockmode(), will also do version check for a version_id_col-enabled mapping. .. change:: :tags: orm :tickets: 1188 Fixed bug whereby calling query(A).join(A.bs).add_entity(B) in a joined inheritance scenario would double-add B as a target and produce an invalid query. .. change:: :tags: orm :tickets: 1674 Fixed bug in session.rollback() which involved not removing formerly "pending" objects from the session before re-integrating "deleted" objects, typically occured with natural primary keys. If there was a primary key conflict between them, the attach of the deleted would fail internally. The formerly "pending" objects are now expunged first. .. change:: :tags: orm :tickets: 1719 Removed a lot of logging that nobody really cares about, logging that remains will respond to live changes in the log level. No significant overhead is added. .. change:: :tags: orm :tickets: Fixed bug in session.merge() which prevented dict-like collections from merging. .. change:: :tags: orm :tickets: session.merge() works with relations that specifically don't include "merge" in their cascade options - the target is ignored completely. .. change:: :tags: orm :tickets: 1681 session.merge() will not expire existing scalar attributes on an existing target if the target has a value for that attribute, even if the incoming merged doesn't have a value for the attribute. This prevents unnecessary loads on existing items. Will still mark the attr as expired if the destination doesn't have the attr, though, which fulfills some contracts of deferred cols. .. change:: :tags: orm :tickets: 1680 The "allow_null_pks" flag is now called "allow_partial_pks", defaults to True, acts like it did in 0.5 again. Except, it also is implemented within merge() such that a SELECT won't be issued for an incoming instance with partially NULL primary key if the flag is False. .. change:: :tags: orm :tickets: 1737 Fixed bug in 0.6-reworked "many-to-one" optimizations such that a many-to-one that is against a non-primary key column on the remote table (i.e. foreign key against a UNIQUE column) will pull the "old" value in from the database during a change, since if it's in the session we will need it for proper history/backref accounting, and we can't pull from the local identity map on a non-primary key column. .. change:: :tags: orm :tickets: 1731 fixed internal error which would occur if calling has() or similar complex expression on a single-table inheritance relation(). .. change:: :tags: orm :tickets: 1688 query.one() no longer applies LIMIT to the query, this to ensure that it fully counts all object identities present in the result, even in the case where joins may conceal multiple identities for two or more rows. As a bonus, one() can now also be called with a query that issued from_statement() to start with since it no longer modifies the query. .. change:: :tags: orm :tickets: 1727 query.get() now returns None if queried for an identifier that is present in the identity map with a different class than the one requested, i.e. when using polymorphic loading. .. change:: :tags: orm :tickets: 1706 A major fix in query.join(), when the "on" clause is an attribute of an aliased() construct, but there is already an existing join made out to a compatible target, query properly joins to the right aliased() construct instead of sticking onto the right side of the existing join. .. change:: :tags: orm :tickets: 1362 Slight improvement to the fix for to not issue needless updates of the primary key column during a so-called "row switch" operation, i.e. add + delete of two objects with the same PK. .. change:: :tags: orm :tickets: Now uses sqlalchemy.orm.exc.DetachedInstanceError when an attribute load or refresh action fails due to object being detached from any Session. UnboundExecutionError is specific to engines bound to sessions and statements. .. change:: :tags: orm :tickets: Query called in the context of an expression will render disambiguating labels in all cases. Note that this does not apply to the existing .statement and .subquery() accessor/method, which still honors the .with_labels() setting that defaults to False. .. change:: :tags: orm :tickets: 1676 Query.union() retains disambiguating labels within the returned statement, thus avoiding various SQL composition errors which can result from column name conflicts. .. change:: :tags: orm :tickets: Fixed bug in attribute history that inadvertently invoked __eq__ on mapped instances. .. change:: :tags: orm :tickets: Some internal streamlining of object loading grants a small speedup for large results, estimates are around 10-15%. Gave the "state" internals a good solid cleanup with less complexity, datamembers, method calls, blank dictionary creates. .. change:: :tags: orm :tickets: 1689 Documentation clarification for query.delete() .. change:: :tags: orm :tickets: Fixed cascade bug in many-to-one relation() when attribute was set to None, introduced in r6711 (cascade deleted items into session during add()). .. change:: :tags: orm :tickets: 1736 Calling query.order_by() or query.distinct() before calling query.select_from(), query.with_polymorphic(), or query.from_statement() raises an exception now instead of silently dropping those criterion. .. change:: :tags: orm :tickets: 1735 query.scalar() now raises an exception if more than one row is returned. All other behavior remains the same. .. change:: :tags: orm :tickets: 1692 Fixed bug which caused "row switch" logic, that is an INSERT and DELETE replaced by an UPDATE, to fail when version_id_col was in use. .. change:: :tags: sql :tickets: 1714 join() will now simulate a NATURAL JOIN by default. Meaning, if the left side is a join, it will attempt to join the right side to the rightmost side of the left first, and not raise any exceptions about ambiguous join conditions if successful even if there are further join targets across the rest of the left. .. change:: :tags: sql :tickets: The most common result processors conversion function were moved to the new "processors" module. Dialect authors are encouraged to use those functions whenever they correspond to their needs instead of implementing custom ones. .. change:: :tags: sql :tickets: 1694, 1698 SchemaType and subclasses Boolean, Enum are now serializable, including their ddl listener and other event callables. .. change:: :tags: sql :tickets: Some platforms will now interpret certain literal values as non-bind parameters, rendered literally into the SQL statement. This to support strict SQL-92 rules that are enforced by some platforms including MS-SQL and Sybase. In this model, bind parameters aren't allowed in the columns clause of a SELECT, nor are certain ambiguous expressions like "?=?". When this mode is enabled, the base compiler will render the binds as inline literals, but only across strings and numeric values. Other types such as dates will raise an error, unless the dialect subclass defines a literal rendering function for those. The bind parameter must have an embedded literal value already or an error is raised (i.e. won't work with straight bindparam('x')). Dialects can also expand upon the areas where binds are not accepted, such as within argument lists of functions (which don't work on MS-SQL when native SQL binding is used). .. change:: :tags: sql :tickets: Added "unicode_errors" parameter to String, Unicode, etc. Behaves like the 'errors' keyword argument to the standard library's string.decode() functions. This flag requires that `convert_unicode` is set to `"force"` - otherwise, SQLAlchemy is not guaranteed to handle the task of unicode conversion. Note that this flag adds significant performance overhead to row-fetching operations for backends that already return unicode objects natively (which most DBAPIs do). This flag should only be used as an absolute last resort for reading strings from a column with varied or corrupted encodings, which only applies to databases that accept invalid encodings in the first place (i.e. MySQL. *not* PG, Sqlite, etc.) .. change:: :tags: sql :tickets: Added math negation operator support, -x. .. change:: :tags: sql :tickets: FunctionElement subclasses are now directly executable the same way any func.foo() construct is, with automatic SELECT being applied when passed to execute(). .. change:: :tags: sql :tickets: The "type" and "bind" keyword arguments of a func.foo() construct are now local to "func." constructs and are not part of the FunctionElement base class, allowing a "type" to be handled in a custom constructor or class-level variable. .. change:: :tags: sql :tickets: Restored the keys() method to ResultProxy. .. change:: :tags: sql :tickets: 1647, 1683 The type/expression system now does a more complete job of determining the return type from an expression as well as the adaptation of the Python operator into a SQL operator, based on the full left/right/operator of the given expression. In particular the date/time/interval system created for Postgresql EXTRACT in has now been generalized into the type system. The previous behavior which often occured of an expression "column + literal" forcing the type of "literal" to be the same as that of "column" will now usually not occur - the type of "literal" is first derived from the Python type of the literal, assuming standard native Python types + date types, before falling back to that of the known type on the other side of the expression. If the "fallback" type is compatible (i.e. CHAR from String), the literal side will use that. TypeDecorator types override this by default to coerce the "literal" side unconditionally, which can be changed by implementing the coerce_compared_value() method. Also part of. .. change:: :tags: sql :tickets: Made sqlalchemy.sql.expressions.Executable part of public API, used for any expression construct that can be sent to execute(). FunctionElement now inherits Executable so that it gains execution_options(), which are also propagated to the select() that's generated within execute(). Executable in turn subclasses _Generative which marks any ClauseElement that supports the @_generative decorator - these may also become "public" for the benefit of the compiler extension at some point. .. change:: :tags: sql :tickets: 1579 A change to the solution for - an end-user defined bind parameter name that directly conflicts with a column-named bind generated directly from the SET or VALUES clause of an update/insert generates a compile error. This reduces call counts and eliminates some cases where undesirable name conflicts could still occur. .. change:: :tags: sql :tickets: 1705 Column() requires a type if it has no foreign keys (this is not new). An error is now raised if a Column() has no type and no foreign keys. .. change:: :tags: sql :tickets: 1717 the "scale" argument of the Numeric() type is honored when coercing a returned floating point value into a string on its way to Decimal - this allows accuracy to function on SQLite, MySQL. .. change:: :tags: sql :tickets: the copy() method of Column now copies over uninitialized "on table attach" events. Helps with the new declarative "mixin" capability. .. change:: :tags: engines :tickets: Added an optional C extension to speed up the sql layer by reimplementing RowProxy and the most common result processors. The actual speedups will depend heavily on your DBAPI and the mix of datatypes used in your tables, and can vary from a 30% improvement to more than 200%. It also provides a modest (~15-20%) indirect improvement to ORM speed for large queries. Note that it is *not* built/installed by default. See README for installation instructions. .. change:: :tags: engines :tickets: the execution sequence pulls all rowcount/last inserted ID info from the cursor before commit() is called on the DBAPI connection in an "autocommit" scenario. This helps mxodbc with rowcount and is probably a good idea overall. .. change:: :tags: engines :tickets: 1719 Opened up logging a bit such that isEnabledFor() is called more often, so that changes to the log level for engine/pool will be reflected on next connect. This adds a small amount of method call overhead. It's negligible and will make life a lot easier for all those situations when logging just happens to be configured after create_engine() is called. .. change:: :tags: engines :tickets: The assert_unicode flag is deprecated. SQLAlchemy will raise a warning in all cases where it is asked to encode a non-unicode Python string, as well as when a Unicode or UnicodeType type is explicitly passed a bytestring. The String type will do nothing for DBAPIs that already accept Python unicode objects. .. change:: :tags: engines :tickets: Bind parameters are sent as a tuple instead of a list. Some backend drivers will not accept bind parameters as a list. .. change:: :tags: engines :tickets: threadlocal engine wasn't properly closing the connection upon close() - fixed that. .. change:: :tags: engines :tickets: Transaction object doesn't rollback or commit if it isn't "active", allows more accurate nesting of begin/rollback/commit. .. change:: :tags: engines :tickets: Python unicode objects as binds result in the Unicode type, not string, thus eliminating a certain class of unicode errors on drivers that don't support unicode binds. .. change:: :tags: engines :tickets: 1555 Added "logging_name" argument to create_engine(), Pool() constructor as well as "pool_logging_name" argument to create_engine() which filters down to that of Pool. Issues the given string name within the "name" field of logging messages instead of the default hex identifier string. .. change:: :tags: engines :tickets: The visit_pool() method of Dialect is removed, and replaced with on_connect(). This method returns a callable which receives the raw DBAPI connection after each one is created. The callable is assembled into a first_connect/connect pool listener by the connection strategy if non-None. Provides a simpler interface for dialects. .. change:: :tags: engines :tickets: 1728 StaticPool now initializes, disposes and recreates without opening a new connection - the connection is only opened when first requested. dispose() also works on AssertionPool now. .. change:: :tags: ticket: 1673, metadata :tickets: Added the ability to strip schema information when using "tometadata" by passing "schema=None" as an argument. If schema is not specified then the table's schema is retained. .. change:: :tags: declarative :tickets: DeclarativeMeta exclusively uses cls.__dict__ (not dict\_) as the source of class information; _as_declarative exclusively uses the dict\_ passed to it as the source of class information (which when using DeclarativeMeta is cls.__dict__). This should in theory make it easier for custom metaclasses to modify the state passed into _as_declarative. .. change:: :tags: declarative :tickets: 1707 declarative now accepts mixin classes directly, as a means to provide common functional and column-based elements on all subclasses, as well as a means to propagate a fixed set of __table_args__ or __mapper_args__ to subclasses. For custom combinations of __table_args__/__mapper_args__ from an inherited mixin to local, descriptors can now be used. New details are all up in the Declarative documentation. Thanks to Chris Withers for putting up with my strife on this. .. change:: :tags: declarative :tickets: 1393 the __mapper_args__ dict is copied when propagating to a subclass, and is taken straight off the class __dict__ to avoid any propagation from the parent. mapper inheritance already propagates the things you want from the parent mapper. .. change:: :tags: declarative :tickets: 1732 An exception is raised when a single-table subclass specifies a column that is already present on the base class. .. change:: :tags: mysql :tickets: 1655 Fixed reflection bug whereby when COLLATE was present, nullable flag and server defaults would not be reflected. .. change:: :tags: mysql :tickets: Fixed reflection of TINYINT(1) "boolean" columns defined with integer flags like UNSIGNED. .. change:: :tags: mysql :tickets: 1668 Further fixes for the mysql-connector dialect. .. change:: :tags: mysql :tickets: 1496 Composite PK table on InnoDB where the "autoincrement" column isn't first will emit an explicit "KEY" phrase within CREATE TABLE thereby avoiding errors. .. change:: :tags: mysql :tickets: 1634 Added reflection/create table support for a wide range of MySQL keywords. .. change:: :tags: mysql :tickets: 1580 Fixed import error which could occur reflecting tables on a Windows host .. change:: :tags: mssql :tickets: Re-established support for the pymssql dialect. .. change:: :tags: mssql :tickets: Various fixes for implicit returning, reflection, etc. - the MS-SQL dialects aren't quite complete in 0.6 yet (but are close) .. change:: :tags: mssql :tickets: 1710 Added basic support for mxODBC. .. change:: :tags: mssql :tickets: Removed the text_as_varchar option. .. change:: :tags: oracle :tickets: "out" parameters require a type that is supported by cx_oracle. An error will be raised if no cx_oracle type can be found. .. change:: :tags: oracle :tickets: Oracle 'DATE' now does not perform any result processing, as the DATE type in Oracle stores full date+time objects, that's what you'll get. Note that the generic types.Date type *will* still call value.date() on incoming values, however. When reflecting a table, the reflected type will be 'DATE'. .. change:: :tags: oracle :tickets: 1670 Added preliminary support for Oracle's WITH_UNICODE mode. At the very least this establishes initial support for cx_Oracle with Python 3. When WITH_UNICODE mode is used in Python 2.xx, a large and scary warning is emitted asking that the user seriously consider the usage of this difficult mode of operation. .. change:: :tags: oracle :tickets: 1712 The except_() method now renders as MINUS on Oracle, which is more or less equivalent on that platform. .. change:: :tags: oracle :tickets: 651 Added support for rendering and reflecting TIMESTAMP WITH TIME ZONE, i.e. TIMESTAMP(timezone=True). .. change:: :tags: oracle :tickets: Oracle INTERVAL type can now be reflected. .. change:: :tags: sqlite :tickets: 1685 Added "native_datetime=True" flag to create_engine(). This will cause the DATE and TIMESTAMP types to skip all bind parameter and result row processing, under the assumption that PARSE_DECLTYPES has been enabled on the connection. Note that this is not entirely compatible with the "func.current_date()", which will be returned as a string. .. change:: :tags: sybase :tickets: Implemented a preliminary working dialect for Sybase, with sub-implementations for Python-Sybase as well as Pyodbc. Handles table creates/drops and basic round trip functionality. Does not yet include reflection or comprehensive support of unicode/special expressions/etc. .. change:: :tags: examples :tickets: Changed the beaker cache example a bit to have a separate RelationCache option for lazyload caching. This object does a lookup among any number of potential attributes more efficiently by grouping several into a common structure. Both FromCache and RelationCache are simpler individually. .. change:: :tags: documentation :tickets: 1700 Major cleanup work in the docs to link class, function, and method names into the API docs. .. changelog:: :version: 0.6beta1 :released: Wed Feb 03 2010 .. change:: :tags: release, major :tickets: For the full set of feature descriptions, see http://www.sqlalchemy.org/trac/wiki/06Migration . This document is a work in progress. .. change:: :tags: release, major :tickets: All bug fixes and feature enhancements from the most recent 0.5 version and below are also included within 0.6. .. change:: :tags: release, major :tickets: Platforms targeted now include Python 2.4/2.5/2.6, Python 3.1, Jython2.5. .. change:: :tags: orm :tickets: Changes to query.update() and query.delete(): - the 'expire' option on query.update() has been renamed to 'fetch', thus matching that of query.delete(). 'expire' is deprecated and issues a warning. - query.update() and query.delete() both default to 'evaluate' for the synchronize strategy. - the 'synchronize' strategy for update() and delete() raises an error on failure. There is no implicit fallback onto "fetch". Failure of evaluation is based on the structure of criteria, so success/failure is deterministic based on code structure. .. change:: :tags: orm :tickets: 1186, 1492, 1544 Enhancements on many-to-one relations: - many-to-one relations now fire off a lazyload in fewer cases, including in most cases will not fetch the "old" value when a new one is replaced. - many-to-one relation to a joined-table subclass now uses get() for a simple load (known as the "use_get" condition), i.e. Related->Sub(Base), without the need to redefine the primaryjoin condition in terms of the base table. - specifying a foreign key with a declarative column, i.e. ForeignKey(MyRelatedClass.id) doesn't break the "use_get" condition from taking place - relation(), eagerload(), and eagerload_all() now feature an option called "innerjoin". Specify `True` or `False` to control whether an eager join is constructed as an INNER or OUTER join. Default is `False` as always. The mapper options will override whichever setting is specified on relation(). Should generally be set for many-to-one, not nullable foreign key relations to allow improved join performance. - the behavior of eagerloading such that the main query is wrapped in a subquery when LIMIT/OFFSET are present now makes an exception for the case when all eager loads are many-to-one joins. In those cases, the eager joins are against the parent table directly along with the limit/offset without the extra overhead of a subquery, since a many-to-one join does not add rows to the result. .. change:: :tags: orm :tickets: Enhancements / Changes on Session.merge(): .. change:: :tags: orm :tickets: the "dont_load=True" flag on Session.merge() is deprecated and is now "load=False". .. change:: :tags: orm :tickets: Session.merge() is performance optimized, using half the call counts for "load=False" mode compared to 0.5 and significantly fewer SQL queries in the case of collections for "load=True" mode. .. change:: :tags: orm :tickets: merge() will not issue a needless merge of attributes if the given instance is the same instance which is already present. .. change:: :tags: orm :tickets: merge() now also merges the "options" associated with a given state, i.e. those passed through query.options() which follow along with an instance, such as options to eagerly- or lazyily- load various attributes. This is essential for the construction of highly integrated caching schemes. This is a subtle behavioral change vs. 0.5. .. change:: :tags: orm :tickets: A bug was fixed regarding the serialization of the "loader path" present on an instance's state, which is also necessary when combining the usage of merge() with serialized state and associated options that should be preserved. .. change:: :tags: orm :tickets: The all new merge() is showcased in a new comprehensive example of how to integrate Beaker with SQLAlchemy. See the notes in the "examples" note below. .. change:: :tags: orm :tickets: 1362 Primary key values can now be changed on a joined-table inheritance object, and ON UPDATE CASCADE will be taken into account when the flush happens. Set the new "passive_updates" flag to False on mapper() when using SQLite or MySQL/MyISAM. .. change:: :tags: orm :tickets: 1671 flush() now detects when a primary key column was updated by an ON UPDATE CASCADE operation from another primary key, and can then locate the row for a subsequent UPDATE on the new PK value. This occurs when a relation() is there to establish the relationship as well as passive_updates=True. .. change:: :tags: orm :tickets: the "save-update" cascade will now cascade the pending *removed* values from a scalar or collection attribute into the new session during an add() operation. This so that the flush() operation will also delete or modify rows of those disconnected items. .. change:: :tags: orm :tickets: 1531 Using a "dynamic" loader with a "secondary" table now produces a query where the "secondary" table is *not* aliased. This allows the secondary Table object to be used in the "order_by" attribute of the relation(), and also allows it to be used in filter criterion against the dynamic relation. .. change:: :tags: orm :tickets: 1643 relation() with uselist=False will emit a warning when an eager or lazy load locates more than one valid value for the row. This may be due to primaryjoin/secondaryjoin conditions which aren't appropriate for an eager LEFT OUTER JOIN or for other conditions. .. change:: :tags: orm :tickets: 1633 an explicit check occurs when a synonym() is used with map_column=True, when a ColumnProperty (deferred or otherwise) exists separately in the properties dictionary sent to mapper with the same keyname. Instead of silently replacing the existing property (and possible options on that property), an error is raised. .. change:: :tags: orm :tickets: a "dynamic" loader sets up its query criterion at construction time so that the actual query is returned from non-cloning accessors like "statement". .. change:: :tags: orm :tickets: the "named tuple" objects returned when iterating a Query() are now pickleable. .. change:: :tags: orm :tickets: 1542 mapping to a select() construct now requires that you make an alias() out of it distinctly. This to eliminate confusion over such issues as .. change:: :tags: orm :tickets: 1537 query.join() has been reworked to provide more consistent behavior and more flexibility (includes) .. change:: :tags: orm :tickets: query.select_from() accepts multiple clauses to produce multiple comma separated entries within the FROM clause. Useful when selecting from multiple-homed join() clauses. .. change:: :tags: orm :tickets: query.select_from() also accepts mapped classes, aliased() constructs, and mappers as arguments. In particular this helps when querying from multiple joined-table classes to ensure the full join gets rendered. .. change:: :tags: orm :tickets: 1135 query.get() can be used with a mapping to an outer join where one or more of the primary key values are None. .. change:: :tags: orm :tickets: 1568 query.from_self(), query.union(), others which do a "SELECT * from (SELECT...)" type of nesting will do a better job translating column expressions within the subquery to the columns clause of the outer query. This is potentially backwards incompatible with 0.5, in that this may break queries with literal expressions that do not have labels applied (i.e. literal('foo'), etc.) .. change:: :tags: orm :tickets: 1622 relation primaryjoin and secondaryjoin now check that they are column-expressions, not just clause elements. this prohibits things like FROM expressions being placed there directly. .. change:: :tags: orm :tickets: 1415 `expression.null()` is fully understood the same way None is when comparing an object/collection-referencing attribute within query.filter(), filter_by(), etc. .. change:: :tags: orm :tickets: 1052 added "make_transient()" helper function which transforms a persistent/ detached instance into a transient one (i.e. deletes the instance_key and removes from any session.) .. change:: :tags: orm :tickets: 1339 the allow_null_pks flag on mapper() is deprecated, and the feature is turned "on" by default. This means that a row which has a non-null value for any of its primary key columns will be considered an identity. The need for this scenario typically only occurs when mapping to an outer join. .. change:: :tags: orm :tickets: the mechanics of "backref" have been fully merged into the finer grained "back_populates" system, and take place entirely within the _generate_backref() method of RelationProperty. This makes the initialization procedure of RelationProperty simpler and allows easier propagation of settings (such as from subclasses of RelationProperty) into the reverse reference. The internal BackRef() is gone and backref() returns a plain tuple that is understood by RelationProperty. .. change:: :tags: orm :tickets: 1569 The version_id_col feature on mapper() will raise a warning when used with dialects that don't support "rowcount" adequately. .. change:: :tags: orm :tickets: added "execution_options()" to Query, to so options can be passed to the resulting statement. Currently only Select-statements have these options, and the only option used is "stream_results", and the only dialect which knows "stream_results" is psycopg2. .. change:: :tags: orm :tickets: Query.yield_per() will set the "stream_results" statement option automatically. .. change:: :tags: orm :tickets: Deprecated or removed: * 'allow_null_pks' flag on mapper() is deprecated. It does nothing now and the setting is "on" in all cases. * 'transactional' flag on sessionmaker() and others is removed. Use 'autocommit=True' to indicate 'transactional=False'. * 'polymorphic_fetch' argument on mapper() is removed. Loading can be controlled using the 'with_polymorphic' option. * 'select_table' argument on mapper() is removed. Use 'with_polymorphic=("*", )' for this functionality. * 'proxy' argument on synonym() is removed. This flag did nothing throughout 0.5, as the "proxy generation" behavior is now automatic. * Passing a single list of elements to eagerload(), eagerload_all(), contains_eager(), lazyload(), defer(), and undefer() instead of multiple positional \*args is deprecated. * Passing a single list of elements to query.order_by(), query.group_by(), query.join(), or query.outerjoin() instead of multiple positional \*args is deprecated. * query.iterate_instances() is removed. Use query.instances(). * Query.query_from_parent() is removed. Use the sqlalchemy.orm.with_parent() function to produce a "parent" clause, or alternatively query.with_parent(). * query._from_self() is removed, use query.from_self() instead. * the "comparator" argument to composite() is removed. Use "comparator_factory". * RelationProperty._get_join() is removed. * the 'echo_uow' flag on Session is removed. Use logging on the "sqlalchemy.orm.unitofwork" name. * session.clear() is removed. use session.expunge_all(). * session.save(), session.update(), session.save_or_update() are removed. Use session.add() and session.add_all(). * the "objects" flag on session.flush() remains deprecated. * the "dont_load=True" flag on session.merge() is deprecated in favor of "load=False". * ScopedSession.mapper remains deprecated. See the usage recipe at http://www.sqlalchemy.org/trac/wiki/UsageRecipes/SessionAwareMapper * passing an InstanceState (internal SQLAlchemy state object) to attributes.init_collection() or attributes.get_history() is deprecated. These functions are public API and normally expect a regular mapped object instance. * the 'engine' parameter to declarative_base() is removed. Use the 'bind' keyword argument. .. change:: :tags: sql :tickets: the "autocommit" flag on select() and text() as well as select().autocommit() are deprecated - now call .execution_options(autocommit=True) on either of those constructs, also available directly on Connection and orm.Query. .. change:: :tags: sql :tickets: the autoincrement flag on column now indicates the column which should be linked to cursor.lastrowid, if that method is used. See the API docs for details. .. change:: :tags: sql :tickets: 1566 an executemany() now requires that all bound parameter sets require that all keys are present which are present in the first bound parameter set. The structure and behavior of an insert/update statement is very much determined by the first parameter set, including which defaults are going to fire off, and a minimum of guesswork is performed with all the rest so that performance is not impacted. For this reason defaults would otherwise silently "fail" for missing parameters, so this is now guarded against. .. change:: :tags: sql :tickets: returning() support is native to insert(), update(), delete(). Implementations of varying levels of functionality exist for Postgresql, Firebird, MSSQL and Oracle. returning() can be called explicitly with column expressions which are then returned in the resultset, usually via fetchone() or first(). insert() constructs will also use RETURNING implicitly to get newly generated primary key values, if the database version in use supports it (a version number check is performed). This occurs if no end-user returning() was specified. .. change:: :tags: sql :tickets: 1665 union(), intersect(), except() and other "compound" types of statements have more consistent behavior w.r.t. parenthesizing. Each compound element embedded within another will now be grouped with parenthesis - previously, the first compound element in the list would not be grouped, as SQLite doesn't like a statement to start with parenthesis. However, Postgresql in particular has precedence rules regarding INTERSECT, and it is more consistent for parenthesis to be applied equally to all sub-elements. So now, the workaround for SQLite is also what the workaround for PG was previously - when nesting compound elements, the first one usually needs ".alias().select()" called on it to wrap it inside of a subquery. .. change:: :tags: sql :tickets: 1579 insert() and update() constructs can now embed bindparam() objects using names that match the keys of columns. These bind parameters will circumvent the usual route to those keys showing up in the VALUES or SET clause of the generated SQL. .. change:: :tags: sql :tickets: 1524 the Binary type now returns data as a Python string (or a "bytes" type in Python 3), instead of the built- in "buffer" type. This allows symmetric round trips of binary data. .. change:: :tags: sql :tickets: Added a tuple_() construct, allows sets of expressions to be compared to another set, typically with IN against composite primary keys or similar. Also accepts an IN with multiple columns. The "scalar select can have only one column" error message is removed - will rely upon the database to report problems with col mismatch. .. change:: :tags: sql :tickets: User-defined "default" and "onupdate" callables which accept a context should now call upon "context.current_parameters" to get at the dictionary of bind parameters currently being processed. This dict is available in the same way regardless of single-execute or executemany-style statement execution. .. change:: :tags: sql :tickets: 1428 multi-part schema names, i.e. with dots such as "dbo.master", are now rendered in select() labels with underscores for dots, i.e. "dbo_master_table_column". This is a "friendly" label that behaves better in result sets. .. change:: :tags: sql :tickets: removed needless "counter" behavior with select() labelnames that match a column name in the table, i.e. generates "tablename_id" for "id", instead of "tablename_id_1" in an attempt to avoid naming conflicts, when the table has a column actually named "tablename_id" - this is because the labeling logic is always applied to all columns so a naming conflict will never occur. .. change:: :tags: sql :tickets: 1628 calling expr.in_([]), i.e. with an empty list, emits a warning before issuing the usual "expr != expr" clause. The "expr != expr" can be very expensive, and it's preferred that the user not issue in_() if the list is empty, instead simply not querying, or modifying the criterion as appropriate for more complex situations. .. change:: :tags: sql :tickets: Added "execution_options()" to select()/text(), which set the default options for the Connection. See the note in "engines". .. change:: :tags: sql :tickets: 1131 Deprecated or removed: * "scalar" flag on select() is removed, use select.as_scalar(). * "shortname" attribute on bindparam() is removed. * postgres_returning, firebird_returning flags on insert(), update(), delete() are deprecated, use the new returning() method. * fold_equivalents flag on join is deprecated (will remain until is implemented) .. change:: :tags: engines :tickets: 443 transaction isolation level may be specified with create_engine(... isolation_level="..."); available on postgresql and sqlite. .. change:: :tags: engines :tickets: Connection has execution_options(), generative method which accepts keywords that affect how the statement is executed w.r.t. the DBAPI. Currently supports "stream_results", causes psycopg2 to use a server side cursor for that statement, as well as "autocommit", which is the new location for the "autocommit" option from select() and text(). select() and text() also have .execution_options() as well as ORM Query(). .. change:: :tags: engines :tickets: 1630 fixed the import for entrypoint-driven dialects to not rely upon silly tb_info trick to determine import error status. .. change:: :tags: engines :tickets: added first() method to ResultProxy, returns first row and closes result set immediately. .. change:: :tags: engines :tickets: RowProxy objects are now pickleable, i.e. the object returned by result.fetchone(), result.fetchall() etc. .. change:: :tags: engines :tickets: RowProxy no longer has a close() method, as the row no longer maintains a reference to the parent. Call close() on the parent ResultProxy instead, or use autoclose. .. change:: :tags: engines :tickets: 1586 ResultProxy internals have been overhauled to greatly reduce method call counts when fetching columns. Can provide a large speed improvement (up to more than 100%) when fetching large result sets. The improvement is larger when fetching columns that have no type-level processing applied and when using results as tuples (instead of as dictionaries). Many thanks to Elixir's Gaëtan de Menten for this dramatic improvement ! .. change:: :tags: engines :tickets: Databases which rely upon postfetch of "last inserted id" to get at a generated sequence value (i.e. MySQL, MS-SQL) now work correctly when there is a composite primary key where the "autoincrement" column is not the first primary key column in the table. .. change:: :tags: engines :tickets: the last_inserted_ids() method has been renamed to the descriptor "inserted_primary_key". .. change:: :tags: engines :tickets: 1554 setting echo=False on create_engine() now sets the loglevel to WARN instead of NOTSET. This so that logging can be disabled for a particular engine even if logging for "sqlalchemy.engine" is enabled overall. Note that the default setting of "echo" is `None`. .. change:: :tags: engines :tickets: ConnectionProxy now has wrapper methods for all transaction lifecycle events, including begin(), rollback(), commit() begin_nested(), begin_prepared(), prepare(), release_savepoint(), etc. .. change:: :tags: engines :tickets: Connection pool logging now uses both INFO and DEBUG log levels for logging. INFO is for major events such as invalidated connections, DEBUG for all the acquire/return logging. `echo_pool` can be False, None, True or "debug" the same way as `echo` works. .. change:: :tags: engines :tickets: 1621 All pyodbc-dialects now support extra pyodbc-specific kw arguments 'ansi', 'unicode_results', 'autocommit'. .. change:: :tags: engines :tickets: the "threadlocal" engine has been rewritten and simplified and now supports SAVEPOINT operations. .. change:: :tags: engines :tickets: deprecated or removed * result.last_inserted_ids() is deprecated. Use result.inserted_primary_key * dialect.get_default_schema_name(connection) is now public via dialect.default_schema_name. * the "connection" argument from engine.transaction() and engine.run_callable() is removed - Connection itself now has those methods. All four methods accept \*args and \**kwargs which are passed to the given callable, as well as the operating connection. .. change:: :tags: schema :tickets: 1541 the `__contains__()` method of `MetaData` now accepts strings or `Table` objects as arguments. If given a `Table`, the argument is converted to `table.key` first, i.e. "[schemaname.]" .. change:: :tags: schema :tickets: deprecated MetaData.connect() and ThreadLocalMetaData.connect() have been removed - send the "bind" attribute to bind a metadata. .. change:: :tags: schema :tickets: deprecated metadata.table_iterator() method removed (use sorted_tables) .. change:: :tags: schema :tickets: deprecated PassiveDefault - use DefaultClause. .. change:: :tags: schema :tickets: the "metadata" argument is removed from DefaultGenerator and subclasses, but remains locally present on Sequence, which is a standalone construct in DDL. .. change:: :tags: schema :tickets: Removed public mutability from Index and Constraint objects: * ForeignKeyConstraint.append_element() * Index.append_column() * UniqueConstraint.append_column() * PrimaryKeyConstraint.add() * PrimaryKeyConstraint.remove() These should be constructed declaratively (i.e. in one construction). .. change:: :tags: schema :tickets: 1545 The "start" and "increment" attributes on Sequence now generate "START WITH" and "INCREMENT BY" by default, on Oracle and Postgresql. Firebird doesn't support these keywords right now. .. change:: :tags: schema :tickets: UniqueConstraint, Index, PrimaryKeyConstraint all accept lists of column names or column objects as arguments. .. change:: :tags: schema :tickets: Other removed things: - Table.key (no idea what this was for) - Table.primary_key is not assignable - use table.append_constraint(PrimaryKeyConstraint(...)) - Column.bind (get via column.table.bind) - Column.metadata (get via column.table.metadata) - Column.sequence (use column.default) - ForeignKey(constraint=some_parent) (is now private _constraint) .. change:: :tags: schema :tickets: The use_alter flag on ForeignKey is now a shortcut option for operations that can be hand-constructed using the DDL() event system. A side effect of this refactor is that ForeignKeyConstraint objects with use_alter=True will *not* be emitted on SQLite, which does not support ALTER for foreign keys. .. change:: :tags: schema :tickets: 1605 ForeignKey and ForeignKeyConstraint objects now correctly copy() all their public keyword arguments. .. change:: :tags: reflection/inspection :tickets: Table reflection has been expanded and generalized into a new API called "sqlalchemy.engine.reflection.Inspector". The Inspector object provides fine-grained information about a wide variety of schema information, with room for expansion, including table names, column names, view definitions, sequences, indexes, etc. .. change:: :tags: reflection/inspection :tickets: Views are now reflectable as ordinary Table objects. The same Table constructor is used, with the caveat that "effective" primary and foreign key constraints aren't part of the reflection results; these have to be specified explicitly if desired. .. change:: :tags: reflection/inspection :tickets: The existing autoload=True system now uses Inspector underneath so that each dialect need only return "raw" data about tables and other objects - Inspector is the single place that information is compiled into Table objects so that consistency is at a maximum. .. change:: :tags: ddl :tickets: the DDL system has been greatly expanded. the DDL() class now extends the more generic DDLElement(), which forms the basis of many new constructs: - CreateTable() - DropTable() - AddConstraint() - DropConstraint() - CreateIndex() - DropIndex() - CreateSequence() - DropSequence() These support "on" and "execute-at()" just like plain DDL() does. User-defined DDLElement subclasses can be created and linked to a compiler using the sqlalchemy.ext.compiler extension. .. change:: :tags: ddl :tickets: The signature of the "on" callable passed to DDL() and DDLElement() is revised as follows: ddl the DDLElement object itself event the string event name. target previously "schema_item", the Table or MetaData object triggering the event. connection the Connection object in use for the operation. \**kw keyword arguments. In the case of MetaData before/after create/drop, the list of Table objects for which CREATE/DROP DDL is to be issued is passed as the kw argument "tables". This is necessary for metadata-level DDL that is dependent on the presence of specific tables. The "schema_item" attribute of DDL has been renamed to "target". .. change:: :tags: dialect, refactor :tickets: Dialect modules are now broken into database dialects plus DBAPI implementations. Connect URLs are now preferred to be specified using dialect+driver://..., i.e. "mysql+mysqldb://scott:tiger@localhost/test". See the 0.6 documentation for examples. .. change:: :tags: dialect, refactor :tickets: the setuptools entrypoint for external dialects is now called "sqlalchemy.dialects". .. change:: :tags: dialect, refactor :tickets: the "owner" keyword argument is removed from Table. Use "schema" to represent any namespaces to be prepended to the table name. .. change:: :tags: dialect, refactor :tickets: server_version_info becomes a static attribute. .. change:: :tags: dialect, refactor :tickets: dialects receive an initialize() event on initial connection to determine connection properties. .. change:: :tags: dialect, refactor :tickets: dialects receive a visit_pool event have an opportunity to establish pool listeners. .. change:: :tags: dialect, refactor :tickets: cached TypeEngine classes are cached per-dialect class instead of per-dialect. .. change:: :tags: dialect, refactor :tickets: new UserDefinedType should be used as a base class for new types, which preserves the 0.5 behavior of get_col_spec(). .. change:: :tags: dialect, refactor :tickets: The result_processor() method of all type classes now accepts a second argument "coltype", which is the DBAPI type argument from cursor.description. This argument can help some types decide on the most efficient processing of result values. .. change:: :tags: dialect, refactor :tickets: Deprecated Dialect.get_params() removed. .. change:: :tags: dialect, refactor :tickets: Dialect.get_rowcount() has been renamed to a descriptor "rowcount", and calls cursor.rowcount directly. Dialects which need to hardwire a rowcount in for certain calls should override the method to provide different behavior. .. change:: :tags: dialect, refactor :tickets: 1566 DefaultRunner and subclasses have been removed. The job of this object has been simplified and moved into ExecutionContext. Dialects which support sequences should add a `fire_sequence()` method to their execution context implementation. .. change:: :tags: dialect, refactor :tickets: Functions and operators generated by the compiler now use (almost) regular dispatch functions of the form "visit_" and "visit__fn" to provide customed processing. This replaces the need to copy the "functions" and "operators" dictionaries in compiler subclasses with straightforward visitor methods, and also allows compiler subclasses complete control over rendering, as the full _Function or _BinaryExpression object is passed in. .. change:: :tags: postgresql :tickets: New dialects: pg8000, zxjdbc, and pypostgresql on py3k. .. change:: :tags: postgresql :tickets: The "postgres" dialect is now named "postgresql" ! Connection strings look like: postgresql://scott:tiger@localhost/test postgresql+pg8000://scott:tiger@localhost/test The "postgres" name remains for backwards compatiblity in the following ways: - There is a "postgres.py" dummy dialect which allows old URLs to work, i.e. postgres://scott:tiger@localhost/test - The "postgres" name can be imported from the old "databases" module, i.e. "from sqlalchemy.databases import postgres" as well as "dialects", "from sqlalchemy.dialects.postgres import base as pg", will send a deprecation warning. - Special expression arguments are now named "postgresql_returning" and "postgresql_where", but the older "postgres_returning" and "postgres_where" names still work with a deprecation warning. .. change:: :tags: postgresql :tickets: "postgresql_where" now accepts SQL expressions which can also include literals, which will be quoted as needed. .. change:: :tags: postgresql :tickets: The psycopg2 dialect now uses psycopg2's "unicode extension" on all new connections, which allows all String/Text/etc. types to skip the need to post-process bytestrings into unicode (an expensive step due to its volume). Other dialects which return unicode natively (pg8000, zxjdbc) also skip unicode post-processing. .. change:: :tags: postgresql :tickets: 1511 Added new ENUM type, which exists as a schema-level construct and extends the generic Enum type. Automatically associates itself with tables and their parent metadata to issue the appropriate CREATE TYPE/DROP TYPE commands as needed, supports unicode labels, supports reflection. .. change:: :tags: postgresql :tickets: INTERVAL supports an optional "precision" argument corresponding to the argument that PG accepts. .. change:: :tags: postgresql :tickets: using new dialect.initialize() feature to set up version-dependent behavior. .. change:: :tags: postgresql :tickets: 1279 somewhat better support for % signs in table/column names; psycopg2 can't handle a bind parameter name of %(foobar)s however and SQLA doesn't want to add overhead just to treat that one non-existent use case. .. change:: :tags: postgresql :tickets: 1516 Inserting NULL into a primary key + foreign key column will allow the "not null constraint" error to raise, not an attempt to execute a nonexistent "col_id_seq" sequence. .. change:: :tags: postgresql :tickets: autoincrement SELECT statements, i.e. those which select from a procedure that modifies rows, now work with server-side cursor mode (the named cursor isn't used for such statements.) .. change:: :tags: postgresql :tickets: 1636 postgresql dialect can properly detect pg "devel" version strings, i.e. "8.5devel" .. change:: :tags: postgresql :tickets: 1619 The psycopg2 now respects the statement option "stream_results". This option overrides the connection setting "server_side_cursors". If true, server side cursors will be used for the statement. If false, they will not be used, even if "server_side_cursors" is true on the connection. .. change:: :tags: mysql :tickets: New dialects: oursql, a new native dialect, MySQL Connector/Python, a native Python port of MySQLdb, and of course zxjdbc on Jython. .. change:: :tags: mysql :tickets: VARCHAR/NVARCHAR will not render without a length, raises an error before passing to MySQL. Doesn't impact CAST since VARCHAR is not allowed in MySQL CAST anyway, the dialect renders CHAR/NCHAR in those cases. .. change:: :tags: mysql :tickets: all the _detect_XXX() functions now run once underneath dialect.initialize() .. change:: :tags: mysql :tickets: 1279 somewhat better support for % signs in table/column names; MySQLdb can't handle % signs in SQL when executemany() is used, and SQLA doesn't want to add overhead just to treat that one non-existent use case. .. change:: :tags: mysql :tickets: the BINARY and MSBinary types now generate "BINARY" in all cases. Omitting the "length" parameter will generate "BINARY" with no length. Use BLOB to generate an unlengthed binary column. .. change:: :tags: mysql :tickets: the "quoting='quoted'" argument to MSEnum/ENUM is deprecated. It's best to rely upon the automatic quoting. .. change:: :tags: mysql :tickets: ENUM now subclasses the new generic Enum type, and also handles unicode values implicitly, if the given labelnames are unicode objects. .. change:: :tags: mysql :tickets: 1539 a column of type TIMESTAMP now defaults to NULL if "nullable=False" is not passed to Column(), and no default is present. This is now consistent with all other types, and in the case of TIMESTAMP explictly renders "NULL" due to MySQL's "switching" of default nullability for TIMESTAMP columns. .. change:: :tags: oracle :tickets: unit tests pass 100% with cx_oracle ! .. change:: :tags: oracle :tickets: support for cx_Oracle's "native unicode" mode which does not require NLS_LANG to be set. Use the latest 5.0.2 or later of cx_oracle. .. change:: :tags: oracle :tickets: an NCLOB type is added to the base types. .. change:: :tags: oracle :tickets: use_ansi=False won't leak into the FROM/WHERE clause of a statement that's selecting from a subquery that also uses JOIN/OUTERJOIN. .. change:: :tags: oracle :tickets: 1467 added native INTERVAL type to the dialect. This supports only the DAY TO SECOND interval type so far due to lack of support in cx_oracle for YEAR TO MONTH. .. change:: :tags: oracle :tickets: usage of the CHAR type results in cx_oracle's FIXED_CHAR dbapi type being bound to statements. .. change:: :tags: oracle :tickets: 885 the Oracle dialect now features NUMBER which intends to act justlike Oracle's NUMBER type. It is the primary numeric type returned by table reflection and attempts to return Decimal()/float/int based on the precision/scale parameters. .. change:: :tags: oracle :tickets: func.char_length is a generic function for LENGTH .. change:: :tags: oracle :tickets: ForeignKey() which includes onupdate= will emit a warning, not emit ON UPDATE CASCADE which is unsupported by oracle .. change:: :tags: oracle :tickets: the keys() method of RowProxy() now returns the result column names *normalized* to be SQLAlchemy case insensitive names. This means they will be lower case for case insensitive names, whereas the DBAPI would normally return them as UPPERCASE names. This allows row keys() to be compatible with further SQLAlchemy operations. .. change:: :tags: oracle :tickets: using new dialect.initialize() feature to set up version-dependent behavior. .. change:: :tags: oracle :tickets: 1125 using types.BigInteger with Oracle will generate NUMBER(19) .. change:: :tags: oracle :tickets: "case sensitivity" feature will detect an all-lowercase case-sensitive column name during reflect and add "quote=True" to the generated Column, so that proper quoting is maintained. .. change:: :tags: firebird :tickets: the keys() method of RowProxy() now returns the result column names *normalized* to be SQLAlchemy case insensitive names. This means they will be lower case for case insensitive names, whereas the DBAPI would normally return them as UPPERCASE names. This allows row keys() to be compatible with further SQLAlchemy operations. .. change:: :tags: firebird :tickets: using new dialect.initialize() feature to set up version-dependent behavior. .. change:: :tags: firebird :tickets: "case sensitivity" feature will detect an all-lowercase case-sensitive column name during reflect and add "quote=True" to the generated Column, so that proper quoting is maintained. .. change:: :tags: mssql :tickets: MSSQL + Pyodbc + FreeTDS now works for the most part, with possible exceptions regarding binary data as well as unicode schema identifiers. .. change:: :tags: mssql :tickets: the "has_window_funcs" flag is removed. LIMIT/OFFSET usage will use ROW NUMBER as always, and if on an older version of SQL Server, the operation fails. The behavior is exactly the same except the error is raised by SQL server instead of the dialect, and no flag setting is required to enable it. .. change:: :tags: mssql :tickets: the "auto_identity_insert" flag is removed. This feature always takes effect when an INSERT statement overrides a column that is known to have a sequence on it. As with "has_window_funcs", if the underlying driver doesn't support this, then you can't do this operation in any case, so there's no point in having a flag. .. change:: :tags: mssql :tickets: using new dialect.initialize() feature to set up version-dependent behavior. .. change:: :tags: mssql :tickets: removed references to sequence which is no longer used. implicit identities in mssql work the same as implicit sequences on any other dialects. Explicit sequences are enabled through the use of "default=Sequence()". See the MSSQL dialect documentation for more information. .. change:: :tags: sqlite :tickets: DATE, TIME and DATETIME types can now take optional storage_format and regexp argument. storage_format can be used to store those types using a custom string format. regexp allows to use a custom regular expression to match string values from the database. .. change:: :tags: sqlite :tickets: Time and DateTime types now use by a default a stricter regular expression to match strings from the database. Use the regexp argument if you are using data stored in a legacy format. .. change:: :tags: sqlite :tickets: __legacy_microseconds__ on SQLite Time and DateTime types is not supported anymore. You should use the storage_format argument instead. .. change:: :tags: sqlite :tickets: Date, Time and DateTime types are now stricter in what they accept as bind parameters: Date type only accepts date objects (and datetime ones, because they inherit from date), Time only accepts time objects, and DateTime only accepts date and datetime objects. .. change:: :tags: sqlite :tickets: 1016 Table() supports a keyword argument "sqlite_autoincrement", which applies the SQLite keyword "AUTOINCREMENT" to the single integer primary key column when generating DDL. Will prevent generation of a separate PRIMARY KEY constraint. .. change:: :tags: types :tickets: The construction of types within dialects has been totally overhauled. Dialects now define publically available types as UPPERCASE names exclusively, and internal implementation types using underscore identifiers (i.e. are private). The system by which types are expressed in SQL and DDL has been moved to the compiler system. This has the effect that there are much fewer type objects within most dialects. A detailed document on this architecture for dialect authors is in lib/sqlalchemy/dialects/type_migration_guidelines.txt . .. change:: :tags: types :tickets: Types no longer make any guesses as to default parameters. In particular, Numeric, Float, NUMERIC, FLOAT, DECIMAL don't generate any length or scale unless specified. .. change:: :tags: types :tickets: 1664 types.Binary is renamed to types.LargeBinary, it only produces BLOB, BYTEA, or a similar "long binary" type. New base BINARY and VARBINARY types have been added to access these MySQL/MS-SQL specific types in an agnostic way. .. change:: :tags: types :tickets: String/Text/Unicode types now skip the unicode() check on each result column value if the dialect has detected the DBAPI as returning Python unicode objects natively. This check is issued on first connect using "SELECT CAST 'some text' AS VARCHAR(10)" or equivalent, then checking if the returned object is a Python unicode. This allows vast performance increases for native-unicode DBAPIs, including pysqlite/sqlite3, psycopg2, and pg8000. .. change:: :tags: types :tickets: Most types result processors have been checked for possible speed improvements. Specifically, the following generic types have been optimized, resulting in varying speed improvements: Unicode, PickleType, Interval, TypeDecorator, Binary. Also the following dbapi-specific implementations have been improved: Time, Date and DateTime on Sqlite, ARRAY on Postgresql, Time on MySQL, Numeric(as_decimal=False) on MySQL, oursql and pypostgresql, DateTime on cx_oracle and LOB-based types on cx_oracle. .. change:: :tags: types :tickets: Reflection of types now returns the exact UPPERCASE type within types.py, or the UPPERCASE type within the dialect itself if the type is not a standard SQL type. This means reflection now returns more accurate information about reflected types. .. change:: :tags: types :tickets: 1511, 1109 Added a new Enum generic type. Enum is a schema-aware object to support databases which require specific DDL in order to use enum or equivalent; in the case of PG it handles the details of `CREATE TYPE`, and on other databases without native enum support will by generate VARCHAR + an inline CHECK constraint to enforce the enum. .. change:: :tags: types :tickets: 1467 The Interval type includes a "native" flag which controls if native INTERVAL types (postgresql + oracle) are selected if available, or not. "day_precision" and "second_precision" arguments are also added which propagate as appropriately to these native types. Related to. .. change:: :tags: types :tickets: 1589 The Boolean type, when used on a backend that doesn't have native boolean support, will generate a CHECK constraint "col IN (0, 1)" along with the int/smallint- based column type. This can be switched off if desired with create_constraint=False. Note that MySQL has no native boolean *or* CHECK constraint support so this feature isn't available on that platform. .. change:: :tags: types :tickets: PickleType now uses == for comparison of values when mutable=True, unless the "comparator" argument with a comparsion function is specified to the type. Objects being pickled will be compared based on identity (which defeats the purpose of mutable=True) if __eq__() is not overridden or a comparison function is not provided. .. change:: :tags: types :tickets: The default "precision" and "scale" arguments of Numeric and Float have been removed and now default to None. NUMERIC and FLOAT will be rendered with no numeric arguments by default unless these values are provided. .. change:: :tags: types :tickets: AbstractType.get_search_list() is removed - the games that was used for are no longer necessary. .. change:: :tags: types :tickets: 1125 Added a generic BigInteger type, compiles to BIGINT or NUMBER(19). .. change:: :tags: types :tickets: sqlsoup has been overhauled to explicitly support an 0.5 style session, using autocommit=False, autoflush=True. Default behavior of SQLSoup now requires the usual usage of commit() and rollback(), which have been added to its interface. An explcit Session or scoped_session can be passed to the constructor, allowing these arguments to be overridden. .. change:: :tags: types :tickets: sqlsoup db..update() and delete() now call query(cls).update() and delete(), respectively. .. change:: :tags: types :tickets: sqlsoup now has execute() and connection(), which call upon the Session methods of those names, ensuring that the bind is in terms of the SqlSoup object's bind. .. change:: :tags: types :tickets: sqlsoup objects no longer have the 'query' attribute - it's not needed for sqlsoup's usage paradigm and it gets in the way of a column that is actually named 'query'. .. change:: :tags: types :tickets: 1259 The signature of the proxy_factory callable passed to association_proxy is now (lazy_collection, creator, value_attr, association_proxy), adding a fourth argument that is the parent AssociationProxy argument. Allows serializability and subclassing of the built in collections. .. change:: :tags: types :tickets: 1372 association_proxy now has basic comparator methods .any(), .has(), .contains(), ==, !=, thanks to Scott Torborg. SQLAlchemy-0.8.4/doc/build/changelog/changelog_07.rst0000644000076500000240000041255412251147171023027 0ustar classicstaff00000000000000 ============== 0.7 Changelog ============== .. changelog:: :version: 0.7.11 .. change:: :tags: bug, engine :tickets: 2851 :versions: 0.8.3, 0.9.0b1 The regexp used by the :func:`~sqlalchemy.engine.url.make_url` function now parses ipv6 addresses, e.g. surrounded by brackets. .. change:: :tags: bug, orm :tickets: 2807 :versions: 0.8.3, 0.9.0b1 Fixed bug where list instrumentation would fail to represent a setslice of ``[0:0]`` correctly, which in particular could occur when using ``insert(0, item)`` with the association proxy. Due to some quirk in Python collections, the issue was much more likely with Python 3 rather than 2. .. change:: :tags: bug, sql :tickets: 2801 :versions: 0.8.3, 0.9.0b1 Fixed regression dating back to 0.7.9 whereby the name of a CTE might not be properly quoted if it was referred to in multiple FROM clauses. .. change:: :tags: mysql, bug :tickets: 2791 :versions: 0.8.3, 0.9.0b1 Updates to MySQL reserved words for versions 5.5, 5.6, courtesy Hanno Schlichting. .. change:: :tags: sql, bug, cte :tickets: 2783 :versions: 0.8.3, 0.9.0b1 Fixed bug in common table expression system where if the CTE were used only as an ``alias()`` construct, it would not render using the WITH keyword. .. change:: :tags: bug, sql :tickets: 2784 :versions: 0.8.3, 0.9.0b1 Fixed bug in :class:`.CheckConstraint` DDL where the "quote" flag from a :class:`.Column` object would not be propagated. .. change:: :tags: bug, orm :tickets: 2699 :versions: 0.8.1 Fixed bug when a query of the form: ``query(SubClass).options(subqueryload(Baseclass.attrname))``, where ``SubClass`` is a joined inh of ``BaseClass``, would fail to apply the ``JOIN`` inside the subquery on the attribute load, producing a cartesian product. The populated results still tended to be correct as additional rows are just ignored, so this issue may be present as a performance degradation in applications that are otherwise working correctly. .. change:: :tags: bug, orm :tickets: 2689 :versions: 0.8.1 Fixed bug in unit of work whereby a joined-inheritance subclass could insert the row for the "sub" table before the parent table, if the two tables had no ForeignKey constraints set up between them. .. change:: :tags: feature, postgresql :tickets: 2676 :versions: 0.8.0 Added support for Postgresql's traditional SUBSTRING function syntax, renders as "SUBSTRING(x FROM y FOR z)" when regular ``func.substring()`` is used. Courtesy Gunnlaugur Þór Briem. .. change:: :tags: bug, tests :tickets: 2669 :pullreq: 41 Fixed an import of "logging" in test_execute which was not working on some linux platforms. .. change:: :tags: bug, orm :tickets: 2674 Improved the error message emitted when a "backref loop" is detected, that is when an attribute event triggers a bidirectional assignment between two other attributes with no end. This condition can occur not just when an object of the wrong type is assigned, but also when an attribute is mis-configured to backref into an existing backref pair. .. change:: :tags: bug, orm :tickets: 2674 A warning is emitted when a MapperProperty is assigned to a mapper that replaces an existing property, if the properties in question aren't plain column-based properties. Replacement of relationship properties is rarely (ever?) what is intended and usually refers to a mapper mis-configuration. This will also warn if a backref configures itself on top of an existing one in an inheritance relationship (which is an error in 0.8). .. changelog:: :version: 0.7.10 :released: Thu Feb 7 2013 .. change:: :tags: engine, bug :tickets: 2604 :versions: 0.8.0b2 Fixed :meth:`.MetaData.reflect` to correctly use the given :class:`.Connection`, if given, without opening a second connection from that connection's :class:`.Engine`. .. change:: :tags: mssql, bug :tickets:2607 :versions: 0.8.0b2 Fixed bug whereby using "key" with Column in conjunction with "schema" for the owning Table would fail to locate result rows due to the MSSQL dialect's "schema rendering" logic's failure to take .key into account. .. change:: :tags: sql, mysql, gae :tickets: 2649 Added a conditional import to the ``gaerdbms`` dialect which attempts to import rdbms_apiproxy vs. rdbms_googleapi to work on both dev and production platforms. Also now honors the ``instance`` attribute. Courtesy Sean Lynch. Also backported enhancements to allow username/password as well as fixing error code interpretation from 0.8. .. change:: :tags: sql, bug :tickets: 2594, 2584 Backported adjustment to ``__repr__`` for :class:`.TypeDecorator` to 0.7, allows :class:`.PickleType` to produce a clean ``repr()`` to help with Alembic. .. change:: :tags: sql, bug :tickets: 2643 Fixed bug where :meth:`.Table.tometadata` would fail if a :class:`.Column` had both a foreign key as well as an alternate ".key" name for the column. .. change:: :tags: mssql, bug :tickets: 2638 Added a Py3K conditional around unnecessary .decode() call in mssql information schema, fixes reflection in Py3k. .. change:: :tags: orm, bug :tickets: 2650 Fixed potential memory leak which could occur if an arbitrary number of :class:`.sessionmaker` objects were created. The anonymous subclass created by the sessionmaker, when dereferenced, would not be garbage collected due to remaining class-level references from the event package. This issue also applies to any custom system that made use of ad-hoc subclasses in conjunction with an event dispatcher. .. change:: :tags: orm, bug :tickets: 2640 :meth:`.Query.merge_result` can now load rows from an outer join where an entity may be ``None`` without throwing an error. .. change:: :tags: sqlite, bug :tickets: 2568 :versions: 0.8.0b2 More adjustment to this SQLite related issue which was released in 0.7.9, to intercept legacy SQLite quoting characters when reflecting foreign keys. In addition to intercepting double quotes, other quoting characters such as brackets, backticks, and single quotes are now also intercepted. .. change:: :tags: sql, bug :tickets: 2631 :versions: 0.8.0b2 Fixed bug where using server_onupdate= without passing the "for_update=True" flag would apply the default object to the server_default, blowing away whatever was there. The explicit for_update=True argument shouldn't be needed with this usage (especially since the documentation shows an example without it being used) so it is now arranged internally using a copy of the given default object, if the flag isn't set to what corresponds to that argument. .. change:: :tags: oracle, bug :tickets: 2620 The Oracle LONG type, while an unbounded text type, does not appear to use the cx_Oracle.LOB type when result rows are returned, so the dialect has been repaired to exclude LONG from having cx_Oracle.LOB filtering applied. .. change:: :tags: oracle, bug :tickets: 2611 Repaired the usage of ``.prepare()`` in conjunction with cx_Oracle so that a return value of ``False`` will result in no call to ``connection.commit()``, hence avoiding "no transaction" errors. Two-phase transactions have now been shown to work in a rudimental fashion with SQLAlchemy and cx_oracle, however are subject to caveats observed with the driver; check the documentation for details. .. change:: :tags: orm, bug :tickets: 2624 The :class:`.MutableComposite` type did not allow for the :meth:`.MutableBase.coerce` method to be used, even though the code seemed to indicate this intent, so this now works and a brief example is added. As a side-effect, the mechanics of this event handler have been changed so that new :class:`.MutableComposite` types no longer add per-type global event handlers. Also in 0.8.0b2. .. change:: :tags: orm, bug :tickets: 2583 Fixed Session accounting bug whereby replacing a deleted object in the identity map with another object of the same primary key would raise a "conflicting state" error on rollback(), if the replaced primary key were established either via non-unitofwork-established INSERT statement or by primary key switch of another instance. .. change:: :tags: oracle, bug :tickets: 2561 changed the list of cx_oracle types that are excluded from the setinputsizes() step to only include STRING and UNICODE; CLOB and NCLOB are removed. This is to work around cx_oracle behavior which is broken for the executemany() call. In 0.8, this same change is applied however it is also configurable via the exclude_setinputsizes argument. .. change:: :tags: feature, mysql :tickets: 2523 Added "raise_on_warnings" flag to OurSQL dialect. .. change:: :tags: feature, mysql :tickets: 2554 Added "read_timeout" flag to MySQLdb dialect. .. changelog:: :version: 0.7.9 :released: Mon Oct 01 2012 .. change:: :tags: orm, bug :tickets: Fixed bug mostly local to new AbstractConcreteBase helper where the "type" attribute from the superclass would not be overridden on the subclass to produce the "reserved for base" error message, instead placing a do-nothing attribute there. This was inconsistent vs. using ConcreteBase as well as all the behavior of classical concrete mappings, where the "type" column from the polymorphic base would be explicitly disabled on subclasses, unless overridden explicitly. .. change:: :tags: orm, bug :tickets: A warning is emitted when lazy='dynamic' is combined with uselist=False. This is an exception raise in 0.8. .. change:: :tags: orm, bug :tickets: Fixed bug whereby user error in related-object assignment could cause recursion overflow if the assignment triggered a backref of the same name as a bi-directional attribute on the incorrect class to the same target. An informative error is raised now. .. change:: :tags: orm, bug :tickets: 2539 Fixed bug where incorrect type information would be passed when the ORM would bind the "version" column, when using the "version" feature. Tests courtesy Daniel Miller. .. change:: :tags: orm, bug :tickets: 2566 Extra logic has been added to the "flush" that occurs within Session.commit(), such that the extra state added by an after_flush() or after_flush_postexec() hook is also flushed in a subsequent flush, before the "commit" completes. Subsequent calls to flush() will continue until the after_flush hooks stop adding new state. An "overflow" counter of 100 is also in place, in the event of a broken after_flush() hook adding new content each time. .. change:: :tags: bug, sql :tickets: 2571 Fixed the DropIndex construct to support an Index associated with a Table in a remote schema. .. change:: :tags: bug, sql :tickets: 2574 Fixed bug in over() construct whereby passing an empty list for either partition_by or order_by, as opposed to None, would fail to generate correctly. Courtesy Gunnlaugur Þór Briem. .. change:: :tags: bug, sql :tickets: 2521 Fixed CTE bug whereby positional bound parameters present in the CTEs themselves would corrupt the overall ordering of bound parameters. This primarily affected SQL Server as the platform with positional binds + CTE support. .. change:: :tags: bug, sql :tickets: Fixed more un-intuitivenesses in CTEs which prevented referring to a CTE in a union of itself without it being aliased. CTEs now render uniquely on name, rendering the outermost CTE of a given name only - all other references are rendered just as the name. This even includes other CTE/SELECTs that refer to different versions of the same CTE object, such as a SELECT or a UNION ALL of that SELECT. We are somewhat loosening the usual link between object identity and lexical identity in this case. A true name conflict between two unrelated CTEs now raises an error. .. change:: :tags: bug, sql :tickets: 2512 quoting is applied to the column names inside the WITH RECURSIVE clause of a common table expression according to the quoting rules for the originating Column. .. change:: :tags: bug, sql :tickets: 2518 Fixed regression introduced in 0.7.6 whereby the FROM list of a SELECT statement could be incorrect in certain "clone+replace" scenarios. .. change:: :tags: bug, sql :tickets: 2552 Fixed bug whereby usage of a UNION or similar inside of an embedded subquery would interfere with result-column targeting, in the case that a result-column had the same ultimate name as a name inside the embedded UNION. .. change:: :tags: bug, sql :tickets: 2558 Fixed a regression since 0.6 regarding result-row targeting. It should be possible to use a select() statement with string based columns in it, that is select(['id', 'name']).select_from('mytable'), and have this statement be targetable by Column objects with those names; this is the mechanism by which query(MyClass).from_statement(some_statement) works. At some point the specific case of using select(['id']), which is equivalent to select([literal_column('id')]), stopped working here, so this has been re-instated and of course tested. .. change:: :tags: bug, sql :tickets: 2544 Added missing operators is_(), isnot() to the ColumnOperators base, so that these long-available operators are present as methods like all the other operators. .. change:: :tags: engine, bug :tickets: 2522 Fixed bug whereby a disconnect detect + dispose that occurs when the QueuePool has threads waiting for connections would leave those threads waiting for the duration of the timeout on the old pool (or indefinitely if timeout was disabled). The fix now notifies those waiters with a special exception case and has them move onto the new pool. .. change:: :tags: engine, feature :tickets: 2516 Dramatic improvement in memory usage of the event system; instance-level collections are no longer created for a particular type of event until instance-level listeners are established for that event. .. change:: :tags: engine, bug :tickets: 2529 Added gaerdbms import to mysql/__init__.py, the absense of which was preventing the new GAE dialect from being loaded. .. change:: :tags: engine, bug :tickets: 2553 Fixed cextension bug whereby the "ambiguous column error" would fail to function properly if the given index were a Column object and not a string. Note there are still some column-targeting issues here which are fixed in 0.8. .. change:: :tags: engine, bug :tickets: Fixed the repr() of Enum to include the "name" and "native_enum" flags. Helps Alembic autogenerate. .. change:: :tags: sqlite, bug :tickets: 2568 Adjusted a very old bugfix which attempted to work around a SQLite issue that itself was "fixed" as of sqlite 3.6.14, regarding quotes surrounding a table name when using the "foreign_key_list" pragma. The fix has been adjusted to not interfere with quotes that are *actually in the name* of a column or table, to as much a degree as possible; sqlite still doesn't return the correct result for foreign_key_list() if the target table actually has quotes surrounding its name, as *part* of its name (i.e. """mytable"""). .. change:: :tags: sqlite, bug :tickets: 2265 Adjusted column default reflection code to convert non-string values to string, to accommodate old SQLite versions that don't deliver default info as a string. .. change:: :tags: sqlite, feature :tickets: Added support for the localtimestamp() SQL function implemented in SQLite, courtesy Richard Mitchell. .. change:: :tags: postgresql, bug :tickets: 2531 Columns in reflected primary key constraint are now returned in the order in which the constraint itself defines them, rather than how the table orders them. Courtesy Gunnlaugur Þór Briem.. .. change:: :tags: postgresql, bug :tickets: 2570 Added 'terminating connection' to the list of messages we use to detect a disconnect with PG, which appears to be present in some versions when the server is restarted. .. change:: :tags: bug, mysql :tickets: Updated mysqlconnector interface to use updated "client flag" and "charset" APIs, courtesy David McNelis. .. change:: :tags: mssql, bug :tickets: 2538 Fixed compiler bug whereby using a correlated subquery within an ORDER BY would fail to render correctly if the stament also used LIMIT/OFFSET, due to mis-rendering within the ROW_NUMBER() OVER clause. Fix courtesy sayap .. change:: :tags: mssql, bug :tickets: 2545 Fixed compiler bug whereby a given select() would be modified if it had an "offset" attribute, causing the construct to not compile correctly a second time. .. change:: :tags: mssql, bug :tickets: Fixed bug where reflection of primary key constraint would double up columns if the same constraint/table existed in multiple schemas. .. changelog:: :version: 0.7.8 :released: Sat Jun 16 2012 .. change:: :tags: orm, bug :tickets: 2480 Fixed bug whereby subqueryload() from a polymorphic mapping to a target would incur a new invocation of the query for each distinct class encountered in the polymorphic result. .. change:: :tags: orm, bug :tickets: 2491, 1892 Fixed bug in declarative whereby the precedence of columns in a joined-table, composite column (typically for id) would fail to be correct if the columns contained names distinct from their attribute names. This would cause things like primaryjoin conditions made against the entity attributes to be incorrect. Related to as this was supposed to be part of that, this is. .. change:: :tags: orm, feature :tickets: The 'objects' argument to flush() is no longer deprecated, as some valid use cases have been identified. .. change:: :tags: orm, bug :tickets: 2508 Fixed identity_key() function which was not accepting a scalar argument for the identity. . .. change:: :tags: orm, bug :tickets: 2497 Fixed bug whereby populate_existing option would not propagate to subquery eager loaders. . .. change:: :tags: bug, sql :tickets: 2499 added BIGINT to types.__all__, BIGINT, BINARY, VARBINARY to sqlalchemy module namespace, plus test to ensure this breakage doesn't occur again. .. change:: :tags: bug, sql :tickets: 2490 Repaired common table expression rendering to function correctly when the SELECT statement contains UNION or other compound expressions, courtesy btbuilder. .. change:: :tags: bug, sql :tickets: 2482 Fixed bug whereby append_column() wouldn't function correctly on a cloned select() construct, courtesy Gunnlaugur Þór Briem. .. change:: :tags: engine, bug :tickets: 2489 Fixed memory leak in C version of result proxy whereby DBAPIs which don't deliver pure Python tuples for result rows would fail to decrement refcounts correctly. The most prominently affected DBAPI is pyodbc. .. change:: :tags: engine, bug :tickets: 2503 Fixed bug affecting Py3K whereby string positional parameters passed to engine/connection execute() would fail to be interpreted correctly, due to __iter__ being present on Py3K string.. .. change:: :tags: postgresql, bug :tickets: 2510 removed unnecessary table clause when reflecting enums,. Courtesy Gunnlaugur Þór Briem. .. change:: :tags: oracle, bug :tickets: 2483 Added ROWID to oracle.*. .. change:: :tags: feature, mysql :tickets: 2484 Added a new dialect for Google App Engine. Courtesy Richie Foreman. .. changelog:: :version: 0.7.7 :released: Sat May 05 2012 .. change:: :tags: orm, bug :tickets: 2477 Fixed issue in unit of work whereby setting a non-None self-referential many-to-one relationship to None would fail to persist the change if the former value was not already loaded.. .. change:: :tags: orm, feature :tickets: 2443 Added prefix_with() method to Query, calls upon select().prefix_with() to allow placement of MySQL SELECT directives in statements. Courtesy Diana Clarke .. change:: :tags: orm, bug :tickets: 2409 Fixed bug in 0.7.6 introduced by whereby column_mapped_collection used against columns that were mapped as joins or other indirect selectables would fail to function. .. change:: :tags: orm, feature :tickets: Added new flag to @validates include_removes. When True, collection remove and attribute del events will also be sent to the validation function, which accepts an additional argument "is_remove" when this flag is used. .. change:: :tags: orm, bug :tickets: 2449 Fixed bug whereby polymorphic_on column that's not otherwise mapped on the class would be incorrectly included in a merge() operation, raising an error. .. change:: :tags: orm, bug :tickets: 2453 Fixed bug in expression annotation mechanics which could lead to incorrect rendering of SELECT statements with aliases and joins, particularly when using column_property(). .. change:: :tags: orm, bug :tickets: 2454 Fixed bug which would prevent OrderingList from being pickleable. Courtesy Jeff Dairiki .. change:: :tags: orm, bug :tickets: Fixed bug in relationship comparisons whereby calling unimplemented methods like SomeClass.somerelationship.like() would produce a recursion overflow, instead of NotImplementedError. .. change:: :tags: bug, sql :tickets: Removed warning when Index is created with no columns; while this might not be what the user intended, it is a valid use case as an Index could be a placeholder for just an index of a certain name. .. change:: :tags: feature, sql :tickets: Added new connection event dbapi_error(). Is called for all DBAPI-level errors passing the original DBAPI exception before SQLAlchemy modifies the state of the cursor. .. change:: :tags: bug, sql :tickets: If conn.begin() fails when calling "with engine.begin()", the newly acquired Connection is closed explicitly before propagating the exception onward normally. .. change:: :tags: bug, sql :tickets: 2474 Add BINARY, VARBINARY to types.__all__. .. change:: :tags: mssql, feature :tickets: Added interim create_engine flag supports_unicode_binds to PyODBC dialect, to force whether or not the dialect passes Python unicode literals to PyODBC or not. .. change:: :tags: mssql, bug :tickets: Repaired the use_scope_identity create_engine() flag when using the pyodbc dialect. Previously this flag would be ignored if set to False. When set to False, you'll get "SELECT @@identity" after each INSERT to get at the last inserted ID, for those tables which have "implicit_returning" set to False. .. change:: :tags: mssql, bug :tickets: 2468 UPDATE..FROM syntax with SQL Server requires that the updated table be present in the FROM clause when an alias of that table is also present in the FROM clause. The updated table is now always present in the FROM, when FROM is present in the first place. Courtesy sayap. .. change:: :tags: postgresql, feature :tickets: 2445 Added new for_update/with_lockmode() options for Postgresql: for_update="read"/ with_lockmode("read"), for_update="read_nowait"/ with_lockmode("read_nowait"). These emit "FOR SHARE" and "FOR SHARE NOWAIT", respectively. Courtesy Diana Clarke .. change:: :tags: postgresql, bug :tickets: 2473 removed unnecessary table clause when reflecting domains. .. change:: :tags: bug, mysql :tickets: 2460 Fixed bug whereby column name inside of "KEY" clause for autoincrement composite column with InnoDB would double quote a name that's a reserved word. Courtesy Jeff Dairiki. .. change:: :tags: bug, mysql :tickets: Fixed bug whereby get_view_names() for "information_schema" schema would fail to retrieve views marked as "SYSTEM VIEW". courtesy Matthew Turland. .. change:: :tags: bug, mysql :tickets: 2467 Fixed bug whereby if cast() is used on a SQL expression whose type is not supported by cast() and therefore CAST isn't rendered by the dialect, the order of evaluation could change if the casted expression required that it be grouped; grouping is now applied to those expressions. .. change:: :tags: sqlite, feature :tickets: 2475 Added SQLite execution option "sqlite_raw_colnames=True", will bypass attempts to remove "." from column names returned by SQLite cursor.description. .. change:: :tags: sqlite, bug :tickets: 2525 When the primary key column of a Table is replaced, such as via extend_existing, the "auto increment" column used by insert() constructs is reset. Previously it would remain referring to the previous primary key column. .. changelog:: :version: 0.7.6 :released: Wed Mar 14 2012 .. change:: :tags: orm, bug :tickets: 2424 Fixed event registration bug which would primarily show up as events not being registered with sessionmaker() instances created after the event was associated with the Session class. .. change:: :tags: orm, bug :tickets: 2425 Fixed bug whereby a primaryjoin condition with a "literal" in it would raise an error on compile with certain kinds of deeply nested expressions which also needed to render the same bound parameter name more than once. .. change:: :tags: orm, feature :tickets: Added "no_autoflush" context manager to Session, used with with: will temporarily disable autoflush. .. change:: :tags: orm, feature :tickets: 1859 Added cte() method to Query, invokes common table expression support from the Core (see below). .. change:: :tags: orm, bug :tickets: 2403 Removed the check for number of rows affected when doing a multi-delete against mapped objects. If an ON DELETE CASCADE exists between two rows, we can't get an accurate rowcount from the DBAPI; this particular count is not supported on most DBAPIs in any case, MySQLdb is the notable case where it is. .. change:: :tags: orm, bug :tickets: 2409 Fixed bug whereby objects using attribute_mapped_collection or column_mapped_collection could not be pickled. .. change:: :tags: orm, bug :tickets: 2406 Fixed bug whereby MappedCollection would not get the appropriate collection instrumentation if it were only used in a custom subclass that used @collection.internally_instrumented. .. change:: :tags: orm, bug :tickets: 2419 Fixed bug whereby SQL adaption mechanics would fail in a very nested scenario involving joined-inheritance, joinedload(), limit(), and a derived function in the columns clause. .. change:: :tags: orm, bug :tickets: 2417 Fixed the repr() for CascadeOptions to include refresh-expire. Also reworked CascadeOptions to be a . .. change:: :tags: orm, feature :tickets: 2400 Added the ability to query for Table-bound column names when using query(sometable).filter_by(colname=value). .. change:: :tags: orm, bug :tickets: Improved the "declarative reflection" example to support single-table inheritance, multiple calls to prepare(), tables that are present in alternate schemas, establishing only a subset of classes as reflected. .. change:: :tags: orm, bug :tickets: 2390 Scaled back the test applied within flush() to check for UPDATE against partially NULL PK within one table to only actually happen if there's really an UPDATE to occur. .. change:: :tags: orm, bug :tickets: 2352 Fixed bug whereby if a method name conflicted with a column name, a TypeError would be raised when the mapper tried to inspect the __get__() method on the method object. .. change:: :tags: bug, sql :tickets: 2427 Fixed memory leak in core which would occur when C extensions were used with particular types of result fetches, in particular when orm query.count() were called. .. change:: :tags: bug, sql :tickets: 2398 Fixed issue whereby attribute-based column access on a row would raise AttributeError with non-C version, NoSuchColumnError with C version. Now raises AttributeError in both cases. .. change:: :tags: feature, sql :tickets: 1859 Added support for SQL standard common table expressions (CTE), allowing SELECT objects as the CTE source (DML not yet supported). This is invoked via the cte() method on any select() construct. .. change:: :tags: bug, sql :tickets: 2392 Added support for using the .key of a Column as a string identifier in a result set row. The .key is currently listed as an "alternate" name for a column, and is superseded by the name of a column which has that key value as its regular name. For the next major release of SQLAlchemy we may reverse this precedence so that .key takes precedence, but this is not decided on yet. .. change:: :tags: bug, sql :tickets: 2413 A warning is emitted when a not-present column is stated in the values() clause of an insert() or update() construct. Will move to an exception in 0.8. .. change:: :tags: bug, sql :tickets: 2396 A significant change to how labeling is applied to columns in SELECT statements allows "truncated" labels, that is label names that are generated in Python which exceed the maximum identifier length (note this is configurable via label_length on create_engine()), to be properly referenced when rendered inside of a subquery, as well as to be present in a result set row using their original in-Python names. .. change:: :tags: bug, sql :tickets: 2402 Fixed bug in new "autoload_replace" flag which would fail to preserve the primary key constraint of the reflected table. .. change:: :tags: bug, sql :tickets: 2380 Index will raise when arguments passed cannot be interpreted as columns or expressions. Will warn when Index is created with no columns at all. .. change:: :tags: engine, feature :tickets: 2407 Added "no_parameters=True" execution option for connections. If no parameters are present, will pass the statement as cursor.execute(statement), thereby invoking the DBAPIs behavior when no parameter collection is present; for psycopg2 and mysql-python, this means not interpreting % signs in the string. This only occurs with this option, and not just if the param list is blank, as otherwise this would produce inconsistent behavior of SQL expressions that normally escape percent signs (and while compiling, can't know ahead of time if parameters will be present in some cases). .. change:: :tags: engine, bug :tickets: Added execution_options() call to MockConnection (i.e., that used with strategy="mock") which acts as a pass through for arguments. .. change:: :tags: engine, feature :tickets: 2378 Added pool_reset_on_return argument to create_engine, allows control over "connection return" behavior. Also added new arguments 'rollback', 'commit', None to pool.reset_on_return to allow more control over connection return activity. .. change:: :tags: engine, feature :tickets: Added some decent context managers to Engine, Connection:: with engine.begin() as conn: and:: with engine.connect() as conn: Both close out the connection when done, commit or rollback transaction with errors on engine.begin(). .. change:: :tags: sqlite, bug :tickets: 2432 Fixed bug in C extensions whereby string format would not be applied to a Numeric value returned as integer; this affected primarily SQLite which does not maintain numeric scale settings. .. change:: :tags: mssql, feature :tickets: 2430 Added support for MSSQL INSERT, UPDATE, and DELETE table hints, using new with_hint() method on UpdateBase. .. change:: :tags: feature, mysql :tickets: 2386 Added support for MySQL index and primary key constraint types (i.e. USING) via new mysql_using parameter to Index and PrimaryKeyConstraint, courtesy Diana Clarke. .. change:: :tags: feature, mysql :tickets: 2394 Added support for the "isolation_level" parameter to all MySQL dialects. Thanks to mu_mind for the patch here. .. change:: :tags: oracle, feature :tickets: 2399 Added a new create_engine() flag coerce_to_decimal=False, disables the precision numeric handling which can add lots of overhead by converting all numeric values to Decimal. .. change:: :tags: oracle, bug :tickets: 2401 Added missing compilation support for LONG .. change:: :tags: oracle, bug :tickets: 2435 Added 'LEVEL' to the list of reserved words for Oracle. .. change:: :tags: examples, bug :tickets: Altered _params_from_query() function in Beaker example to pull bindparams from the fully compiled statement, as a quick means to get everything including subqueries in the columns clause, etc. .. changelog:: :version: 0.7.5 :released: Sat Jan 28 2012 .. change:: :tags: orm, bug :tickets: 2389 Fixed issue where modified session state established after a failed flush would be committed as part of the subsequent transaction that begins automatically after manual call to rollback(). The state of the session is checked within rollback(), and if new state is present, a warning is emitted and restore_snapshot() is called a second time, discarding those changes. .. change:: :tags: orm, bug :tickets: 2345 Fixed regression from 0.7.4 whereby using an already instrumented column from a superclass as "polymorphic_on" failed to resolve the underlying Column. .. change:: :tags: orm, bug :tickets: 2370 Raise an exception if xyzload_all() is used inappropriately with two non-connected relationships. .. change:: :tags: orm, feature :tickets: Added "class_registry" argument to declarative_base(). Allows two or more declarative bases to share the same registry of class names. .. change:: :tags: orm, feature :tickets: query.filter() accepts multiple criteria which will join via AND, i.e. query.filter(x==y, z>q, ...) .. change:: :tags: orm, feature :tickets: 2351 Added new capability to relationship loader options to allow "default" loader strategies. Pass '*' to any of joinedload(), lazyload(), subqueryload(), or noload() and that becomes the loader strategy used for all relationships, except for those explicitly stated in the Query. Thanks to up-and-coming contributor Kent Bower for an exhaustive and well written test suite ! .. change:: :tags: orm, bug :tickets: 2367 Fixed bug whereby event.listen(SomeClass) forced an entirely unnecessary compile of the mapper, making events very hard to set up at module import time (nobody noticed this ??) .. change:: :tags: orm, bug :tickets: Fixed bug whereby hybrid_property didn't work as a kw arg in any(), has(). .. change:: :tags: orm :tickets: Fixed regression from 0.6 whereby if "load_on_pending" relationship() flag were used where a non-"get()" lazy clause needed to be emitted on a pending object, it would fail to load. .. change:: :tags: orm, bug :tickets: 2371 ensure pickleability of all ORM exceptions for multiprocessing compatibility. .. change:: :tags: orm, bug :tickets: 2353 implemented standard "can't set attribute" / "can't delete attribute" AttributeError when setattr/delattr used on a hybrid that doesn't define fset or fdel. .. change:: :tags: orm, bug :tickets: 2362 Fixed bug where unpickled object didn't have enough of its state set up to work correctly within the unpickle() event established by the mutable object extension, if the object needed ORM attribute access within __eq__() or similar. .. change:: :tags: orm, bug :tickets: 2374 Fixed bug where "merge" cascade could mis-interpret an unloaded attribute, if the load_on_pending flag were used with relationship(). Thanks to Kent Bower for tests. .. change:: :tags: orm, feature :tickets: 2356 New declarative reflection example added, illustrates how best to mix table reflection with declarative as well as uses some new features from. .. change:: :tags: feature, sql :tickets: 2356 New reflection feature "autoload_replace"; when set to False on Table, the Table can be autoloaded without existing columns being replaced. Allows more flexible chains of Table construction/reflection to be constructed, including that it helps with combining Declarative with table reflection. See the new example on the wiki. .. change:: :tags: bug, sql :tickets: 2356 Improved the API for add_column() such that if the same column is added to its own table, an error is not raised and the constraints don't get doubled up. Also helps with some reflection/declarative patterns. .. change:: :tags: feature, sql :tickets: Added "false()" and "true()" expression constructs to sqlalchemy.sql namespace, though not part of __all__ as of yet. .. change:: :tags: feature, sql :tickets: 2361 Dialect-specific compilers now raise CompileError for all type/statement compilation issues, instead of InvalidRequestError or ArgumentError. The DDL for CREATE TABLE will re-raise CompileError to include table/column information for the problematic column. .. change:: :tags: bug, sql :tickets: 2381 Fixed issue where the "required" exception would not be raised for bindparam() with required=True, if the statement were given no parameters at all. .. change:: :tags: engine, bug :tickets: 2371 Added __reduce__ to StatementError, DBAPIError, column errors so that exceptions are pickleable, as when using multiprocessing. However, not all DBAPIs support this yet, such as psycopg2. .. change:: :tags: engine, bug :tickets: 2382 Improved error messages when a non-string or invalid string is passed to any of the date/time processors used by SQLite, including C and Python versions. .. change:: :tags: engine, bug :tickets: 2377 Fixed bug whereby a table-bound Column object named "_" which matched a column labeled as "_" could match inappropriately when targeting in a result set row. .. change:: :tags: engine, bug :tickets: 2384 Fixed bug in "mock" strategy whereby correct DDL visit method wasn't called, resulting in "CREATE/DROP SEQUENCE" statements being duplicated .. change:: :tags: sqlite, bug :tickets: 2364 the "name" of an FK constraint in SQLite is reflected as "None", not "0" or other integer value. SQLite does not appear to support constraint naming in any case. .. change:: :tags: sqlite, bug :tickets: 2368 sql.false() and sql.true() compile to 0 and 1, respectively in sqlite .. change:: :tags: sqlite, bug :tickets: removed an erroneous "raise" in the SQLite dialect when getting table names and view names, where logic is in place to fall back to an older version of SQLite that doesn't have the "sqlite_temp_master" table. .. change:: :tags: bug, mysql :tickets: 2376 fixed regexp that filters out warnings for non-reflected "PARTITION" directives, thanks to George Reilly .. change:: :tags: mssql, bug :tickets: 2340 Adjusted the regexp used in the mssql.TIME type to ensure only six digits are received for the "microseconds" portion of the value, which is expected by Python's datetime.time(). Note that support for sending microseconds doesn't seem to be possible yet with pyodbc at least. .. change:: :tags: mssql, bug :tickets: 2347 Dropped the "30 char" limit on pymssql, based on reports that it's doing things better these days. pymssql hasn't been well tested and as the DBAPI is in flux it's still not clear what the status is on this driver and how SQLAlchemy's implementation should adapt. .. change:: :tags: oracle, bug :tickets: 2388 Added ORA-03135 to the never ending list of oracle "connection lost" errors .. change:: :tags: core, bug :tickets: 2379 Changed LRUCache, used by the mapper to cache INSERT/UPDATE/DELETE statements, to use an incrementing counter instead of a timestamp to track entries, for greater reliability versus using time.time(), which can cause test failures on some platforms. .. change:: :tags: core, bug :tickets: 2383 Added a boolean check for the "finalize" function within the pool connection proxy's weakref callback before calling it, so that a warning isn't emitted that this function is None when the application is exiting and gc has removed the function from the module before the weakref callback was invoked. .. change:: :tags: bug, py3k :tickets: 2348 Fixed inappropriate usage of util.py3k flag and renamed it to util.py3k_warning, since this flag is intended to detect the -3 flag series of import restrictions only. .. change:: :tags: examples, feature :tickets: 2313 Simplified the versioning example a bit to use a declarative mixin as well as an event listener, instead of a metaclass + SessionExtension. .. change:: :tags: examples, bug :tickets: 2346 Fixed large_collection.py to close the session before dropping tables. .. changelog:: :version: 0.7.4 :released: Fri Dec 09 2011 .. change:: :tags: orm, bug :tickets: 2315 Fixed backref behavior when "popping" the value off of a many-to-one in response to a removal from a stale one-to-many - the operation is skipped, since the many-to-one has since been updated. .. change:: :tags: orm, bug :tickets: 2264 After some years of not doing this, added more granularity to the "is X a parent of Y" functionality, which is used when determining if the FK on "Y" needs to be "nulled out" as well as if "Y" should be deleted with delete-orphan cascade. The test now takes into account the Python identity of the parent as well its identity key, to see if the last known parent of Y is definitely X. If a decision can't be made, a StaleDataError is raised. The conditions where this error is raised are fairly rare, requiring that the previous parent was garbage collected, and previously could very well inappropriately update/delete a record that's since moved onto a new parent, though there may be some cases where "silent success" occurred previously that will now raise in the face of ambiguity. Expiring "Y" resets the "parent" tracker, meaning X.remove(Y) could then end up deleting Y even if X is stale, but this is the same behavior as before; it's advised to expire X also in that case. .. change:: :tags: orm, bug :tickets: 2310 fixed inappropriate evaluation of user-mapped object in a boolean context within query.get(). Also in 0.6.9. .. change:: :tags: orm, bug :tickets: 2304 Added missing comma to PASSIVE_RETURN_NEVER_SET symbol .. change:: :tags: orm, bug :tickets: 1776 Cls.column.collate("some collation") now works. Also in 0.6.9 .. change:: :tags: orm, bug :tickets: 2309 the value of a composite attribute is now expired after an insert or update operation, instead of regenerated in place. This ensures that a column value which is expired within a flush will be loaded first, before the composite is regenerated using that value. .. change:: :tags: orm, bug :tickets: 2309, 2308 The fix in also emits the "refresh" event when the composite value is loaded on access, even if all column values were already present, as is appropriate. This fixes the "mutable" extension which relies upon the "load" event to ensure the _parents dictionary is up to date, fixes. Thanks to Scott Torborg for the test case here. .. change:: :tags: orm, bug :tickets: 2312 Fixed bug whereby a subclass of a subclass using concrete inheritance in conjunction with the new ConcreteBase or AbstractConcreteBase would fail to apply the subclasses deeper than one level to the "polymorphic loader" of each base .. change:: :tags: orm, bug :tickets: 2312 Fixed bug whereby a subclass of a subclass using the new AbstractConcreteBase would fail to acquire the correct "base_mapper" attribute when the "base" mapper was generated, thereby causing failures later on. .. change:: :tags: orm, bug :tickets: 2316 Fixed bug whereby column_property() created against ORM-level column could be treated as a distinct entity when producing certain kinds of joined-inh joins. .. change:: :tags: orm, bug :tickets: 2297 Fixed the error formatting raised when a tuple is inadvertently passed to session.query(). Also in 0.6.9. .. change:: :tags: orm, bug :tickets: 2328 Calls to query.join() to a single-table inheritance subclass are now tracked, and are used to eliminate the additional WHERE.. IN criterion normally tacked on with single table inheritance, since the join should accommodate it. This allows OUTER JOIN to a single table subclass to produce the correct results, and overall will produce fewer WHERE criterion when dealing with single table inheritance joins. .. change:: :tags: orm, bug :tickets: 2339 __table_args__ can now be passed as an empty tuple as well as an empty dict.. Thanks to Fayaz Yusuf Khan for the patch. .. change:: :tags: orm, bug :tickets: 2325 Updated warning message when setting delete-orphan without delete to no longer refer to 0.6, as we never got around to upgrading this to an exception. Ideally this might be better as an exception but it's not critical either way. .. change:: :tags: orm, feature :tickets: 2345, 2238 polymorphic_on now accepts many new kinds of values: * standalone expressions that aren't otherwise mapped * column_property() objects * string names of any column_property() or attribute name of a mapped Column The docs include an example using the case() construct, which is likely to be a common constructed used here. and part of Standalone expressions in polymorphic_on propagate to single-table inheritance subclasses so that they are used in the WHERE /JOIN clause to limit rows to that subclass as is the usual behavior. .. change:: :tags: orm, feature :tickets: 2301 IdentitySet supports the - operator as the same as difference(), handy when dealing with Session.dirty etc. .. change:: :tags: orm, feature :tickets: Added new value for Column autoincrement called "ignore_fk", can be used to force autoincrement on a column that's still part of a ForeignKeyConstraint. New example in the relationship docs illustrates its use. .. change:: :tags: orm, bug :tickets: Fixed bug in get_history() when referring to a composite attribute that has no value; added coverage for get_history() regarding composites which is otherwise just a userland function. .. change:: :tags: bug, sql :tickets: 2316, 2261 related to, made some adjustments to the change from regarding the "from" list on a select(). The _froms collection is no longer memoized, as this simplifies various use cases and removes the need for a "warning" if a column is attached to a table after it was already used in an expression - the select() construct will now always produce the correct expression. There's probably no real-world performance hit here; select() objects are almost always made ad-hoc, and systems that wish to optimize the re-use of a select() would be using the "compiled_cache" feature. A hit which would occur when calling select.bind has been reduced, but the vast majority of users shouldn't be using "bound metadata" anyway :). .. change:: :tags: feature, sql :tickets: 2166, 1944 The update() construct can now accommodate multiple tables in the WHERE clause, which will render an "UPDATE..FROM" construct, recognized by Postgresql and MSSQL. When compiled on MySQL, will instead generate "UPDATE t1, t2, ..". MySQL additionally can render against multiple tables in the SET clause, if Column objects are used as keys in the "values" parameter or generative method. .. change:: :tags: feature, sql :tickets: 77 Added accessor to types called "python_type", returns the rudimentary Python type object for a particular TypeEngine instance, if known, else raises NotImplementedError. .. change:: :tags: bug, sql :tickets: 2261, 2319 further tweak to the fix from, so that generative methods work a bit better off of cloned (this is almost a non-use case though). In particular this allows with_only_columns() to behave more consistently. Added additional documentation to with_only_columns() to clarify expected behavior, which changed as a result of. .. change:: :tags: engine, bug :tickets: 2317 Fixed bug whereby transaction.rollback() would throw an error on an invalidated connection if the transaction were a two-phase or savepoint transaction. For plain transactions, rollback() is a no-op if the connection is invalidated, so while it wasn't 100% clear if it should be a no-op, at least now the interface is consistent. .. change:: :tags: feature, schema :tickets: Added new support for remote "schemas": .. change:: :tags: schema :tickets: MetaData() accepts "schema" and "quote_schema" arguments, which will be applied to the same-named arguments of a Table or Sequence which leaves these at their default of ``None``. .. change:: :tags: schema :tickets: Sequence accepts "quote_schema" argument .. change:: :tags: schema :tickets: tometadata() for Table will use the "schema" of the incoming MetaData for the new Table if the schema argument is explicitly "None" .. change:: :tags: schema :tickets: Added CreateSchema and DropSchema DDL constructs - these accept just the string name of a schema and a "quote" flag. .. change:: :tags: schema :tickets: When using default "schema" with MetaData, ForeignKey will also assume the "default" schema when locating remote table. This allows the "schema" argument on MetaData to be applied to any set of Table objects that otherwise don't have a "schema". .. change:: :tags: schema :tickets: 1679 a "has_schema" method has been implemented on dialect, but only works on Postgresql so far. Courtesy Manlio Perillo. .. change:: :tags: feature, schema :tickets: 1410 The "extend_existing" flag on Table now allows for the reflection process to take effect for a Table object that's already been defined; when autoload=True and extend_existing=True are both set, the full set of columns will be reflected from the Table which will then *overwrite* those columns already present, rather than no activity occurring. Columns that are present directly in the autoload run will be used as always, however. .. change:: :tags: bug, schema :tickets: Fixed bug whereby TypeDecorator would return a stale value for _type_affinity, when using a TypeDecorator that "switches" types, like the CHAR/UUID type. .. change:: :tags: bug, schema :tickets: Fixed bug whereby "order_by='foreign_key'" option to Inspector.get_table_names wasn't implementing the sort properly, replaced with the existing sort algorithm .. change:: :tags: bug, schema :tickets: 2305 the "name" of a column-level CHECK constraint, if present, is now rendered in the CREATE TABLE statement using "CONSTRAINT CHECK ". .. change:: :tags: pyodbc, bug :tickets: 2318 pyodbc-based dialects now parse the pyodbc accurately as far as observed pyodbc strings, including such gems as "py3-3.0.1-beta4" .. change:: :tags: postgresql, bug :tickets: 2311 Postgresql dialect memoizes that an ENUM of a particular name was processed during a create/drop sequence. This allows a create/drop sequence to work without any calls to "checkfirst", and also means with "checkfirst" turned on it only needs to check for the ENUM once. .. change:: :tags: postgresql, feature :tickets: Added create_type constructor argument to pg.ENUM. When False, no CREATE/DROP or checking for the type will be performed as part of a table create/drop event; only the create()/drop)() methods called directly will do this. Helps with Alembic "offline" scripts. .. change:: :tags: mssql, feature :tickets: 822 lifted the restriction on SAVEPOINT for SQL Server. All tests pass using it, it's not known if there are deeper issues however. .. change:: :tags: mssql, bug :tickets: 2336 repaired the with_hint() feature which wasn't implemented correctly on MSSQL - usually used for the "WITH (NOLOCK)" hint (which you shouldn't be using anyway ! use snapshot isolation instead :) ) .. change:: :tags: mssql, bug :tickets: 2318 use new pyodbc version detection for _need_decimal_fix option. .. change:: :tags: mssql, bug :tickets: 2343 don't cast "table name" as NVARCHAR on SQL Server 2000. Still mostly in the dark what incantations are needed to make PyODBC work fully with FreeTDS 0.91 here, however. .. change:: :tags: mssql, bug :tickets: 2269 Decode incoming values when retrieving list of index names and the names of columns within those indexes. .. change:: :tags: bug, mysql :tickets: Unicode adjustments allow latest pymysql (post 0.4) to pass 100% on Python 2. .. change:: :tags: ext, feature :tickets: Added an example to the hybrid docs of a "transformer" - a hybrid that returns a query-transforming callable in combination with a custom comparator. Uses a new method on Query called with_transformation(). The use case here is fairly experimental, but only adds one line of code to Query. .. change:: :tags: ext, bug :tickets: the @compiles decorator raises an informative error message when no "default" compilation handler is present, rather than KeyError. .. change:: :tags: examples, bug :tickets: Fixed bug in history_meta.py example where the "unique" flag was not removed from a single-table-inheritance subclass which generates columns to put up onto the base. .. changelog:: :version: 0.7.3 :released: Sun Oct 16 2011 .. change:: :tags: general :tickets: 2279 Adjusted the "importlater" mechanism, which is used internally to resolve import cycles, such that the usage of __import__ is completed when the import of sqlalchemy or sqlalchemy.orm is done, thereby avoiding any usage of __import__ after the application starts new threads, fixes. Also in 0.6.9. .. change:: :tags: orm :tickets: 2298 Improved query.join() such that the "left" side can more flexibly be a non-ORM selectable, such as a subquery. A selectable placed in select_from() will now be used as the left side, favored over implicit usage of a mapped entity. If the join still fails based on lack of foreign keys, the error message includes this detail. Thanks to brianrhude on IRC for the test case. .. change:: :tags: orm :tickets: 2241 Added after_soft_rollback() Session event. This event fires unconditionally whenever rollback() is called, regardless of if an actual DBAPI level rollback occurred. This event is specifically designed to allow operations with the Session to proceed after a rollback when the Session.is_active is True. .. change:: :tags: orm :tickets: added "adapt_on_names" boolean flag to orm.aliased() construct. Allows an aliased() construct to link the ORM entity to a selectable that contains aggregates or other derived forms of a particular attribute, provided the name is the same as that of the entity mapped column. .. change:: :tags: orm :tickets: Added new flag expire_on_flush=False to column_property(), marks those properties that would otherwise be considered to be "readonly", i.e. derived from SQL expressions, to retain their value after a flush has occurred, including if the parent object itself was involved in an update. .. change:: :tags: orm :tickets: 2237 Enhanced the instrumentation in the ORM to support Py3K's new argument style of "required kw arguments", i.e. fn(a, b, \*, c, d), fn(a, b, \*args, c, d). Argument signatures of mapped object's __init__ method will be preserved, including required kw rules. .. change:: :tags: orm :tickets: 2282 Fixed bug in unit of work whereby detection of "cycles" among classes in highly interlinked patterns would not produce a deterministic result; thereby sometimes missing some nodes that should be considered cycles and causing further issues down the road. Note this bug is in 0.6 also; not backported at the moment. .. change:: :tags: orm :tickets: Fixed a variety of synonym()-related regressions from 0.6: * making a synonym against a synonym now works. * synonyms made against a relationship() can be passed to query.join(), options sent to query.options(), passed by name to query.with_parent(). .. change:: :tags: orm :tickets: 2287 Fixed bug whereby mapper.order_by attribute would be ignored in the "inner" query within a subquery eager load. . Also in 0.6.9. .. change:: :tags: orm :tickets: 2267 Identity map .discard() uses dict.pop(,None) internally instead of "del" to avoid KeyError/warning during a non-determinate gc teardown .. change:: :tags: orm :tickets: 2253 Fixed regression in new composite rewrite where deferred=True option failed due to missing import .. change:: :tags: orm :tickets: 2248 Reinstated "comparator_factory" argument to composite(), removed when 0.7 was released. .. change:: :tags: orm :tickets: 2247 Fixed bug in query.join() which would occur in a complex multiple-overlapping path scenario, where the same table could be joined to twice. Thanks *much* to Dave Vitek for the excellent fix here. .. change:: :tags: orm :tickets: Query will convert an OFFSET of zero when slicing into None, so that needless OFFSET clauses are not invoked. .. change:: :tags: orm :tickets: Repaired edge case where mapper would fail to fully update internal state when a relationship on a new mapper would establish a backref on the first mapper. .. change:: :tags: orm :tickets: 2260 Fixed bug whereby if __eq__() was redefined, a relationship many-to-one lazyload would hit the __eq__() and fail. Does not apply to 0.6.9. .. change:: :tags: orm :tickets: 2196 Calling class_mapper() and passing in an object that is not a "type" (i.e. a class that could potentially be mapped) now raises an informative ArgumentError, rather than UnmappedClassError. .. change:: :tags: orm :tickets: New event hook, MapperEvents.after_configured(). Called after a configure() step has completed and mappers were in fact affected. Theoretically this event is called once per application, unless new mappings are constructed after existing ones have been used already. .. change:: :tags: orm :tickets: 2281 When an open Session is garbage collected, the objects within it which remain are considered detached again when they are add()-ed to a new Session. This is accomplished by an extra check that the previous "session_key" doesn't actually exist among the pool of Sessions. .. change:: :tags: orm :tickets: 2239 New declarative features: * __declare_last__() method, establishes an event listener for the class method that will be called when mappers are completed with the final "configure" step. * __abstract__ flag. The class will not be mapped at all when this flag is present on the class. * New helper classes ConcreteBase, AbstractConcreteBase. Allow concrete mappings using declarative which automatically set up the "polymorphic_union" when the "configure" mapper step is invoked. * The mapper itself has semi-private methods that allow the "with_polymorphic" selectable to be assigned to the mapper after it has already been configured. .. change:: :tags: orm :tickets: 2283 Declarative will warn when a subclass' base uses @declared_attr for a regular column - this attribute does not propagate to subclasses. .. change:: :tags: orm :tickets: 2280 The integer "id" used to link a mapped instance with its owning Session is now generated by a sequence generation function rather than id(Session), to eliminate the possibility of recycled id() values causing an incorrect result, no need to check that object actually in the session. .. change:: :tags: orm :tickets: 2257 Behavioral improvement: empty conjunctions such as and_() and or_() will be flattened in the context of an enclosing conjunction, i.e. and_(x, or_()) will produce 'X' and not 'X AND ()'.. .. change:: :tags: orm :tickets: 2261 Fixed bug regarding calculation of "from" list for a select() element. The "from" calc is now delayed, so that if the construct uses a Column object that is not yet attached to a Table, but is later associated with a Table, it generates SQL using the table as a FROM. This change impacted fairly deeply the mechanics of how the FROM list as well as the "correlates" collection is calculated, as some "clause adaption" schemes (these are used very heavily in the ORM) were relying upon the fact that the "froms" collection would typically be cached before the adaption completed. The rework allows it such that the "froms" collection can be cleared and re-generated at any time. .. change:: :tags: orm :tickets: 2270 Fixed bug whereby with_only_columns() method of Select would fail if a selectable were passed.. Also in 0.6.9. .. change:: :tags: schema :tickets: 2284 Modified Column.copy() to use _constructor(), which defaults to self.__class__, in order to create the new object. This allows easier support of subclassing Column. .. change:: :tags: schema :tickets: 2223 Added a slightly nicer __repr__() to SchemaItem classes. Note the repr here can't fully support the "repr is the constructor" idea since schema items can be very deeply nested/cyclical, have late initialization of some things, etc. .. change:: :tags: engine :tickets: 2254 The recreate() method in all pool classes uses self.__class__ to get at the type of pool to produce, in the case of subclassing. Note there's no usual need to subclass pools. .. change:: :tags: engine :tickets: 2243 Improvement to multi-param statement logging, long lists of bound parameter sets will be compressed with an informative indicator of the compression taking place. Exception messages use the same improved formatting. .. change:: :tags: engine :tickets: Added optional "sa_pool_key" argument to pool.manage(dbapi).connect() so that serialization of args is not necessary. .. change:: :tags: engine :tickets: 2286 The entry point resolution supported by create_engine() now supports resolution of individual DBAPI drivers on top of a built-in or entry point-resolved dialect, using the standard '+' notation - it's converted to a '.' before being resolved as an entry point. .. change:: :tags: engine :tickets: 2299 Added an exception catch + warning for the "return unicode detection" step within connect, allows databases that crash on NVARCHAR to continue initializing, assuming no NVARCHAR type implemented. .. change:: :tags: types :tickets: 2258 Extra keyword arguments to the base Float type beyond "precision" and "asdecimal" are ignored; added a deprecation warning here and additional docs, related to .. change:: :tags: sqlite :tickets: Ensured that the same ValueError is raised for illegal date/time/datetime string parsed from the database regardless of whether C extensions are in use or not. .. change:: :tags: postgresql :tickets: 2290 Added "postgresql_using" argument to Index(), produces USING clause to specify index implementation for PG. . Thanks to Ryan P. Kelly for the patch. .. change:: :tags: postgresql :tickets: 1839 Added client_encoding parameter to create_engine() when the postgresql+psycopg2 dialect is used; calls the psycopg2 set_client_encoding() method with the value upon connect. .. change:: :tags: postgresql :tickets: 2291, 2141 Fixed bug related to whereby the same modified index behavior in PG 9 affected primary key reflection on a renamed column.. Also in 0.6.9. .. change:: :tags: postgresql :tickets: 2256 Reflection functions for Table, Sequence no longer case insensitive. Names can be differ only in case and will be correctly distinguished. .. change:: :tags: postgresql :tickets: Use an atomic counter as the "random number" source for server side cursor names; conflicts have been reported in rare cases. .. change:: :tags: postgresql :tickets: 2249 Narrowed the assumption made when reflecting a foreign-key referenced table with schema in the current search path; an explicit schema will be applied to the referenced table only if it actually matches that of the referencing table, which also has an explicit schema. Previously it was assumed that "current" schema was synonymous with the full search_path. .. change:: :tags: mysql :tickets: 2225 a CREATE TABLE will put the COLLATE option after CHARSET, which appears to be part of MySQL's arbitrary rules regarding if it will actually work or not. Also in 0.6.9. .. change:: :tags: mysql :tickets: 2293 Added mysql_length parameter to Index construct, specifies "length" for indexes. .. change:: :tags: mssql :tickets: 2273 Changes to attempt support of FreeTDS 0.91 with Pyodbc. This includes that string binds are sent as Python unicode objects when FreeTDS 0.91 is detected, and a CAST(? AS NVARCHAR) is used when we detect for a table. However, I'd continue to characterize Pyodbc + FreeTDS 0.91 behavior as pretty crappy, there are still many queries such as used in reflection which cause a core dump on Linux, and it is not really usable at all on OSX, MemoryErrors abound and just plain broken unicode support. .. change:: :tags: mssql :tickets: 2277 The behavior of =/!= when comparing a scalar select to a value will no longer produce IN/NOT IN as of 0.8; this behavior is a little too heavy handed (use in_() if you want to emit IN) and now emits a deprecation warning. To get the 0.8 behavior immediately and remove the warning, a compiler recipe is given at http://www.sqlalchemy.org/docs/07/dialects/mssql.html#scalar-select-comparisons to override the behavior of visit_binary(). .. change:: :tags: mssql :tickets: 2222 "0" is accepted as an argument for limit() which will produce "TOP 0". .. change:: :tags: oracle :tickets: 2272 Fixed ReturningResultProxy for zxjdbc dialect.. Regression from 0.6. .. change:: :tags: oracle :tickets: 2252 The String type now generates VARCHAR2 on Oracle which is recommended as the default VARCHAR. Added an explicit VARCHAR2 and NVARCHAR2 to the Oracle dialect as well. Using NVARCHAR still generates "NVARCHAR2" - there is no "NVARCHAR" on Oracle - this remains a slight breakage of the "uppercase types always give exactly that" policy. VARCHAR still generates "VARCHAR", keeping with the policy. If Oracle were to ever define "VARCHAR" as something different as they claim (IMHO this will never happen), the type would be available. .. change:: :tags: ext :tickets: 2262 SQLSoup will not be included in version 0.8 of SQLAlchemy; while useful, we would like to keep SQLAlchemy itself focused on one ORM usage paradigm. SQLSoup will hopefully soon be superseded by a third party project. .. change:: :tags: ext :tickets: 2236 Added local_attr, remote_attr, attr accessors to AssociationProxy, providing quick access to the proxied attributes at the class level. .. change:: :tags: ext :tickets: 2275 Changed the update() method on association proxy dictionary to use a duck typing approach, i.e. checks for "keys", to discern between update({}) and update((a, b)). Previously, passing a dictionary that had tuples as keys would be misinterpreted as a sequence. .. change:: :tags: examples :tickets: 2266 Adjusted dictlike-polymorphic.py example to apply the CAST such that it works on PG, other databases. Also in 0.6.9. .. changelog:: :version: 0.7.2 :released: Sun Jul 31 2011 .. change:: :tags: orm :tickets: 2213 Feature enhancement: joined and subquery loading will now traverse already-present related objects and collections in search of unpopulated attributes throughout the scope of the eager load being defined, so that the eager loading that is specified via mappings or query options unconditionally takes place for the full depth, populating whatever is not already populated. Previously, this traversal would stop if a related object or collection were already present leading to inconsistent behavior (though would save on loads/cycles for an already-loaded graph). For a subqueryload, this means that the additional SELECT statements emitted by subqueryload will invoke unconditionally, no matter how much of the existing graph is already present (hence the controversy). The previous behavior of "stopping" is still in effect when a query is the result of an attribute-initiated lazyload, as otherwise an "N+1" style of collection iteration can become needlessly expensive when the same related object is encountered repeatedly. There's also an as-yet-not-public generative Query method _with_invoke_all_eagers() which selects old/new behavior .. change:: :tags: orm :tickets: 2195 A rework of "replacement traversal" within the ORM as it alters selectables to be against aliases of things (i.e. clause adaption) includes a fix for multiply-nested any()/has() constructs against a joined table structure. .. change:: :tags: orm :tickets: 2234 Fixed bug where query.join() + aliased=True from a joined-inh structure to itself on relationship() with join condition on the child table would convert the lead entity into the joined one inappropriately. Also in 0.6.9. .. change:: :tags: orm :tickets: 2205 Fixed regression from 0.6 where Session.add() against an object which contained None in a collection would raise an internal exception. Reverted this to 0.6's behavior which is to accept the None but obviously nothing is persisted. Ideally, collections with None present or on append() should at least emit a warning, which is being considered for 0.8. .. change:: :tags: orm :tickets: 2191 Load of a deferred() attribute on an object where row can't be located raises ObjectDeletedError instead of failing later on; improved the message in ObjectDeletedError to include other conditions besides a simple "delete". .. change:: :tags: orm :tickets: 2224 Fixed regression from 0.6 where a get history operation on some relationship() based attributes would fail when a lazyload would emit; this could trigger within a flush() under certain conditions. Thanks to the user who submitted the great test for this. .. change:: :tags: orm :tickets: 2228 Fixed bug apparent only in Python 3 whereby sorting of persistent + pending objects during flush would produce an illegal comparison, if the persistent object primary key is not a single integer. Also in 0.6.9 .. change:: :tags: orm :tickets: 2197 Fixed bug whereby the source clause used by query.join() would be inconsistent if against a column expression that combined multiple entities together. Also in 0.6.9 .. change:: :tags: orm :tickets: 2215 Fixed bug whereby if a mapped class redefined __hash__() or __eq__() to something non-standard, which is a supported use case as SQLA should never consult these, the methods would be consulted if the class was part of a "composite" (i.e. non-single-entity) result set. Also in 0.6.9. .. change:: :tags: orm :tickets: 2240 Added public attribute ".validators" to Mapper, an immutable dictionary view of all attributes that have been decorated with the @validates decorator. courtesy Stefano Fontanelli .. change:: :tags: orm :tickets: 2188 Fixed subtle bug that caused SQL to blow up if: column_property() against subquery + joinedload + LIMIT + order by the column property() occurred. . Also in 0.6.9 .. change:: :tags: orm :tickets: 2207 The join condition produced by with_parent as well as when using a "dynamic" relationship against a parent will generate unique bindparams, rather than incorrectly repeating the same bindparam. . Also in 0.6.9. .. change:: :tags: orm :tickets: Added the same "columns-only" check to mapper.polymorphic_on as used when receiving user arguments to relationship.order_by, foreign_keys, remote_side, etc. .. change:: :tags: orm :tickets: 2190 Fixed bug whereby comparison of column expression to a Query() would not call as_scalar() on the underlying SELECT statement to produce a scalar subquery, in the way that occurs if you called it on Query().subquery(). .. change:: :tags: orm :tickets: 2194 Fixed declarative bug where a class inheriting from a superclass of the same name would fail due to an unnecessary lookup of the name in the _decl_class_registry. .. change:: :tags: orm :tickets: 2199 Repaired the "no statement condition" assertion in Query which would attempt to raise if a generative method were called after from_statement() were called.. Also in 0.6.9. .. change:: :tags: sql :tickets: 2188 Fixed two subtle bugs involving column correspondence in a selectable, one with the same labeled subquery repeated, the other when the label has been "grouped" and loses itself. Affects. .. change:: :tags: schema :tickets: 2187 New feature: with_variant() method on all types. Produces an instance of Variant(), a special TypeDecorator which will select the usage of a different type based on the dialect in use. .. change:: :tags: schema :tickets: Added an informative error message when ForeignKeyConstraint refers to a column name in the parent that is not found. Also in 0.6.9. .. change:: :tags: schema :tickets: 2206 Fixed bug whereby adaptation of old append_ddl_listener() function was passing unexpected \**kw through to the Table event. Table gets no kws, the MetaData event in 0.6 would get "tables=somecollection", this behavior is preserved. .. change:: :tags: schema :tickets: Fixed bug where "autoincrement" detection on Table would fail if the type had no "affinity" value, in particular this would occur when using the UUID example on the site that uses TypeEngine as the "impl". .. change:: :tags: schema :tickets: 2209 Added an improved repr() to TypeEngine objects that will only display constructor args which are positional or kwargs that deviate from the default. .. change:: :tags: engine :tickets: Context manager provided by Connection.begin() will issue rollback() if the commit() fails, not just if an exception occurs. .. change:: :tags: engine :tickets: 1682 Use urllib.parse_qsl() in Python 2.6 and above, no deprecation warning about cgi.parse_qsl() .. change:: :tags: engine :tickets: Added mixin class sqlalchemy.ext.DontWrapMixin. User-defined exceptions of this type are never wrapped in StatementException when they occur in the context of a statement execution. .. change:: :tags: engine :tickets: StatementException wrapping will display the original exception class in the message. .. change:: :tags: engine :tickets: 2201 Failures on connect which raise dbapi.Error will forward the error to dialect.is_disconnect() and set the "connection_invalidated" flag if the dialect knows this to be a potentially "retryable" condition. Only Oracle ORA-01033 implemented for now. .. change:: :tags: sqlite :tickets: 2189 SQLite dialect no longer strips quotes off of reflected default value, allowing a round trip CREATE TABLE to work. This is consistent with other dialects that also maintain the exact form of the default. .. change:: :tags: postgresql :tickets: 2198 Added new "postgresql_ops" argument to Index, allows specification of PostgreSQL operator classes for indexed columns. Courtesy Filip Zyzniewski. .. change:: :tags: mysql :tickets: 2186 Fixed OurSQL dialect to use ansi-neutral quote symbol "'" for XA commands instead of '"'. . Also in 0.6.9. .. change:: :tags: mssql :tickets: Adjusted the pyodbc dialect such that bound values are passed as bytes and not unicode if the "Easysoft" unix drivers are detected. This is the same behavior as occurs with FreeTDS. Easysoft appears to segfault if Python unicodes are passed under certain circumstances. .. change:: :tags: oracle :tickets: 2200 Added ORA-00028 to disconnect codes, use cx_oracle _Error.code to get at the code,. Also in 0.6.9. .. change:: :tags: oracle :tickets: 2201 Added ORA-01033 to disconnect codes, which can be caught during a connection event. .. change:: :tags: oracle :tickets: 2220 repaired the oracle.RAW type which did not generate the correct DDL. Also in 0.6.9. .. change:: :tags: oracle :tickets: 2212 added CURRENT to reserved word list. Also in 0.6.9. .. change:: :tags: oracle :tickets: Fixed bug in the mutable extension whereby if the same type were used twice in one mapping, the attributes beyond the first would not get instrumented. .. change:: :tags: oracle :tickets: Fixed bug in the mutable extension whereby if None or a non-corresponding type were set, an error would be raised. None is now accepted which assigns None to all attributes, illegal values raise ValueError. .. change:: :tags: examples :tickets: Repaired the examples/versioning test runner to not rely upon SQLAlchemy test libs, nosetests must be run from within examples/versioning to get around setup.cfg breaking it. .. change:: :tags: examples :tickets: Tweak to examples/versioning to pick the correct foreign key in a multi-level inheritance situation. .. change:: :tags: examples :tickets: Fixed the attribute shard example to check for bind param callable correctly in 0.7 style. .. changelog:: :version: 0.7.1 :released: Sun Jun 05 2011 .. change:: :tags: general :tickets: 2184 Added a workaround for Python bug 7511 where failure of C extension build does not raise an appropriate exception on Windows 64 bit + VC express .. change:: :tags: orm :tickets: 1912 "delete-orphan" cascade is now allowed on self-referential relationships - this since SQLA 0.7 no longer enforces "parent with no child" at the ORM level; this check is left up to foreign key nullability. Related to .. change:: :tags: orm :tickets: 2180 Repaired new "mutable" extension to propagate events to subclasses correctly; don't create multiple event listeners for subclasses either. .. change:: :tags: orm :tickets: 2170 Modify the text of the message which occurs when the "identity" key isn't detected on flush, to include the common cause that the Column isn't set up to detect auto-increment correctly;. Also in 0.6.8. .. change:: :tags: orm :tickets: 2182 Fixed bug where transaction-level "deleted" collection wouldn't be cleared of expunged states, raising an error if they later became transient. Also in 0.6.8. .. change:: :tags: sql :tickets: Fixed bug whereby metadata.reflect(bind) would close a Connection passed as a bind argument. Regression from 0.6. .. change:: :tags: sql :tickets: Streamlined the process by which a Select determines what's in it's '.c' collection. Behaves identically, except that a raw ClauseList() passed to select([]) (which is not a documented case anyway) will now be expanded into its individual column elements instead of being ignored. .. change:: :tags: engine :tickets: Deprecate schema/SQL-oriented methods on Connection/Engine that were never well known and are redundant: reflecttable(), create(), drop(), text(), engine.func .. change:: :tags: engine :tickets: 2178 Adjusted the __contains__() method of a RowProxy result row such that no exception throw is generated internally; NoSuchColumnError() also will generate its message regardless of whether or not the column construct can be coerced to a string.. Also in 0.6.8. .. change:: :tags: sqlite :tickets: 2173 Accept None from cursor.fetchone() when "PRAGMA read_uncommitted" is called to determine current isolation mode at connect time and default to SERIALIZABLE; this to support SQLite versions pre-3.3.0 that did not have this feature. .. change:: :tags: postgresql :tickets: 2175 Some unit test fixes regarding numeric arrays, MATCH operator. A potential floating-point inaccuracy issue was fixed, and certain tests of the MATCH operator only execute within an EN-oriented locale for now. . Also in 0.6.8. .. change:: :tags: mysql :tickets: Unit tests pass 100% on MySQL installed on windows. .. change:: :tags: mysql :tickets: 2181 Removed the "adjust casing" step that would fail when reflecting a table on MySQL on windows with a mixed case name. After some experimenting with a windows MySQL server, it's been determined that this step wasn't really helping the situation much; MySQL does not return FK names with proper casing on non-windows platforms either, and removing the step at least allows the reflection to act more like it does on other OSes. A warning here has been considered but its difficult to determine under what conditions such a warning can be raised, so punted on that for now - added some docs instead. .. change:: :tags: mysql :tickets: supports_sane_rowcount will be set to False if using MySQLdb and the DBAPI doesn't provide the constants.CLIENT module. .. changelog:: :version: 0.7.0 :released: Fri May 20 2011 .. change:: :tags: :tickets: This section documents those changes from 0.7b4 to 0.7.0. For an overview of what's new in SQLAlchemy 0.7, see http://www.sqlalchemy.org/trac/wiki/07Migration .. change:: :tags: orm :tickets: 2069 Fixed regression introduced in 0.7b4 (!) whereby query.options(someoption("nonexistent name")) would fail to raise an error. Also added additional error catching for cases where the option would try to build off a column-based element, further fixed up some of the error messages tailored in .. change:: :tags: orm :tickets: 2162 query.count() emits "count(*)" instead of "count(1)". .. change:: :tags: orm :tickets: 2155 Fine tuning of Query clause adaptation when from_self(), union(), or other "select from myself" operation, such that plain SQL expression elements added to filter(), order_by() etc. which are present in the nested "from myself" query *will* be adapted in the same way an ORM expression element will, since these elements are otherwise not easily accessible. .. change:: :tags: orm :tickets: 2149 Fixed bug where determination of "self referential" relationship would fail with no workaround for joined-inh subclass related to itself, or joined-inh subclass related to a subclass of that with no cols in the sub-sub class in the join condition. Also in 0.6.8. .. change:: :tags: orm :tickets: 2153 mapper() will ignore non-configured foreign keys to unrelated tables when determining inherit condition between parent and child class, but will raise as usual for unresolved columns and table names regarding the inherited table. This is an enhanced generalization of behavior that was already applied to declarative previously. 0.6.8 has a more conservative version of this which doesn't fundamentally alter how join conditions are determined. .. change:: :tags: orm :tickets: 2144 It is an error to call query.get() when the given entity is not a single, full class entity or mapper (i.e. a column). This is a deprecation warning in 0.6.8. .. change:: :tags: orm :tickets: 2148 Fixed a potential KeyError which under some circumstances could occur with the identity map, part of .. change:: :tags: orm :tickets: added Query.with_session() method, switches Query to use a different session. .. change:: :tags: orm :tickets: 2131 horizontal shard query should use execution options per connection as per .. change:: :tags: orm :tickets: 2151 a non_primary mapper will inherit the _identity_class of the primary mapper. This so that a non_primary established against a class that's normally in an inheritance mapping will produce results that are identity-map compatible with that of the primary mapper (also in 0.6.8) .. change:: :tags: orm :tickets: 2163 Fixed the error message emitted for "can't execute syncrule for destination column 'q'; mapper 'X' does not map this column" to reference the correct mapper. . Also in 0.6.8. .. change:: :tags: orm :tickets: 1502 polymorphic_union() gets a "cast_nulls" option, disables the usage of CAST when it renders the labeled NULL columns. .. change:: :tags: orm :tickets: polymorphic_union() renders the columns in their original table order, as according to the first table/selectable in the list of polymorphic unions in which they appear. (which is itself an unordered mapping unless you pass an OrderedDict). .. change:: :tags: orm :tickets: 2171 Fixed bug whereby mapper mapped to an anonymous alias would fail if logging were used, due to unescaped % sign in the alias name. Also in 0.6.8. .. change:: :tags: sql :tickets: 2167 Fixed bug whereby nesting a label of a select() with another label in it would produce incorrect exported columns. Among other things this would break an ORM column_property() mapping against another column_property(). . Also in 0.6.8 .. change:: :tags: sql :tickets: Changed the handling in determination of join conditions such that foreign key errors are only considered between the two given tables. That is, t1.join(t2) will report FK errors that involve 't1' or 't2', but anything involving 't3' will be skipped. This affects join(), as well as ORM relationship and inherit condition logic. .. change:: :tags: sql :tickets: Some improvements to error handling inside of the execute procedure to ensure auto-close connections are really closed when very unusual DBAPI errors occur. .. change:: :tags: sql :tickets: metadata.reflect() and reflection.Inspector() had some reliance on GC to close connections which were internally procured, fixed this. .. change:: :tags: sql :tickets: 2140 Added explicit check for when Column .name is assigned as blank string .. change:: :tags: sql :tickets: 2147 Fixed bug whereby if FetchedValue was passed to column server_onupdate, it would not have its parent "column" assigned, added test coverage for all column default assignment patterns. also in 0.6.8 .. change:: :tags: postgresql :tickets: Fixed the psycopg2_version parsing in the psycopg2 dialect. .. change:: :tags: postgresql :tickets: 2141 Fixed bug affecting PG 9 whereby index reflection would fail if against a column whose name had changed. . Also in 0.6.8. .. change:: :tags: mssql :tickets: 2169 Fixed bug in MSSQL dialect whereby the aliasing applied to a schema-qualified table would leak into enclosing select statements. Also in 0.6.8. .. change:: :tags: documentation :tickets: 2152 Removed the usage of the "collections.MutableMapping" abc from the ext.mutable docs as it was being used incorrectly and makes the example more difficult to understand in any case. .. change:: :tags: examples :tickets: removed the ancient "polymorphic association" examples and replaced with an updated set of examples that use declarative mixins, "generic_associations". Each presents an alternative table layout. .. change:: :tags: ext :tickets: 2143 Fixed bugs in sqlalchemy.ext.mutable extension where `None` was not appropriately handled, replacement events were not appropriately handled. .. changelog:: :version: 0.7.0b4 :released: Sun Apr 17 2011 .. change:: :tags: general :tickets: Changes to the format of CHANGES, this file. The format changes have been applied to the 0.7 releases. .. change:: :tags: general :tickets: The "-declarative" changes will now be listed directly under the "-orm" section, as these are closely related. .. change:: :tags: general :tickets: The 0.5 series changes have been moved to the file CHANGES_PRE_06 which replaces CHANGES_PRE_05. .. change:: :tags: general :tickets: The changelog for 0.6.7 and subsequent within the 0.6 series is now listed only in the CHANGES file within the 0.6 branch. In the 0.7 CHANGES file (i.e. this file), all the 0.6 changes are listed inline within the 0.7 section in which they were also applied (since all 0.6 changes are in 0.7 as well). Changes that apply to an 0.6 version here are noted as are if any differences in implementation/behavior are present. .. change:: :tags: orm :tickets: 2122 Some fixes to "evaulate" and "fetch" evaluation when query.update(), query.delete() are called. The retrieval of records is done after autoflush in all cases, and before update/delete is emitted, guarding against unflushed data present as well as expired objects failing during the evaluation. .. change:: :tags: orm :tickets: 2063 Reworded the exception raised when a flush is attempted of a subclass that is not polymorphic against the supertype. .. change:: :tags: orm :tickets: Still more wording adjustments when a query option can't find the target entity. Explain that the path must be from one of the root entities. .. change:: :tags: orm :tickets: 2123 Some fixes to the state handling regarding backrefs, typically when autoflush=False, where the back-referenced collection wouldn't properly handle add/removes with no net change. Thanks to Richard Murri for the test case + patch. (also in 0.6.7). .. change:: :tags: orm :tickets: 2127 Added checks inside the UOW to detect the unusual condition of being asked to UPDATE or DELETE on a primary key value that contains NULL in it. .. change:: :tags: orm :tickets: 2127 Some refinements to attribute history. More changes are pending possibly in 0.8, but for now history has been modified such that scalar history doesn't have a "side effect" of populating None for a non-present value. This allows a slightly better ability to distinguish between a None set and no actual change, affects as well. .. change:: :tags: orm :tickets: 2130 a "having" clause would be copied from the inside to the outside query if from_self() were used; in particular this would break an 0.7 style count() query. (also in 0.6.7) .. change:: :tags: orm :tickets: 2131 the Query.execution_options() method now passes those options to the Connection rather than the SELECT statement, so that all available options including isolation level and compiled cache may be used. .. change:: :tags: sql :tickets: 2131 The "compiled_cache" execution option now raises an error when passed to a SELECT statement rather than a Connection. Previously it was being ignored entirely. We may look into having this option work on a per-statement level at some point. .. change:: :tags: sql :tickets: Restored the "catchall" constructor on the base TypeEngine class, with a deprecation warning. This so that code which does something like Integer(11) still succeeds. .. change:: :tags: sql :tickets: 2104 Fixed regression whereby MetaData() coming back from unpickling did not keep track of new things it keeps track of now, i.e. collection of Sequence objects, list of schema names. .. change:: :tags: sql :tickets: 2116 The limit/offset keywords to select() as well as the value passed to select.limit()/offset() will be coerced to integer. (also in 0.6.7) .. change:: :tags: sql :tickets: fixed bug where "from" clause gathering from an over() clause would be an itertools.chain() and not a list, causing "can only concatenate list" TypeError when combined with other clauses. .. change:: :tags: sql :tickets: 2134 Fixed incorrect usage of "," in over() clause being placed between the "partition" and "order by" clauses. .. change:: :tags: sql :tickets: 2105 Before/after attach events for PrimaryKeyConstraint now function, tests added for before/after events on all constraint types. .. change:: :tags: sql :tickets: 2117 Added explicit true()/false() constructs to expression lib - coercion rules will intercept "False"/"True" into these constructs. In 0.6, the constructs were typically converted straight to string, which was no longer accepted in 0.7. .. change:: :tags: engine :tickets: 2129 The C extension is now enabled by default on CPython 2.x with a fallback to pure python if it fails to compile. .. change:: :tags: schema :tickets: 2109 The 'useexisting' flag on Table has been superceded by a new pair of flags 'keep_existing' and 'extend_existing'. 'extend_existing' is equivalent to 'useexisting' - the existing Table is returned, and additional constructor elements are added. With 'keep_existing', the existing Table is returned, but additional constructor elements are not added - these elements are only applied when the Table is newly created. .. change:: :tags: types :tickets: 2081 REAL has been added to the core types. Supported by Postgresql, SQL Server, MySQL, SQLite. Note that the SQL Server and MySQL versions, which add extra arguments, are also still available from those dialects. .. change:: :tags: types :tickets: 2106 Added @event.listens_for() decorator, given target + event name, applies the decorated function as a listener. .. change:: :tags: pool :tickets: 2103 AssertionPool now stores the traceback indicating where the currently checked out connection was acquired; this traceback is reported within the assertion raised upon a second concurrent checkout; courtesy Gunnlaugur Briem .. change:: :tags: pool :tickets: The "pool.manage" feature doesn't use pickle anymore to hash the arguments for each pool. .. change:: :tags: sqlite :tickets: 2115 Fixed bug where reflection of foreign key created as "REFERENCES " without col name would fail. (also in 0.6.7) .. change:: :tags: postgresql :tickets: Psycopg2 for Python 3 is now supported. .. change:: :tags: postgresql :tickets: 2132 Fixed support for precision numerics when using pg8000. .. change:: :tags: oracle :tickets: 2100 Using column names that would require quotes for the column itself or for a name-generated bind parameter, such as names with special characters, underscores, non-ascii characters, now properly translate bind parameter keys when talking to cx_oracle. (Also in 0.6.7) .. change:: :tags: oracle :tickets: 2116 Oracle dialect adds use_binds_for_limits=False create_engine() flag, will render the LIMIT/OFFSET values inline instead of as binds, reported to modify the execution plan used by Oracle. (Also in 0.6.7) .. change:: :tags: documentation :tickets: 2029 Documented SQLite DATE/TIME/DATETIME types. (also in 0.6.7) .. change:: :tags: documentation :tickets: 2118 Fixed mutable extension docs to show the correct type-association methods. .. changelog:: :version: 0.7.0b3 :released: Sun Mar 20 2011 .. change:: :tags: general :tickets: Lots of fixes to unit tests when run under Pypy (courtesy Alex Gaynor). .. change:: :tags: orm :tickets: 2093 Changed the underlying approach to query.count(). query.count() is now in all cases exactly: query. from_self(func.count(literal_column('1'))). scalar() That is, "select count(1) from ()". This produces a subquery in all cases, but vastly simplifies all the guessing count() tried to do previously, which would still fail in many scenarios particularly when joined table inheritance and other joins were involved. If the subquery produced for an otherwise very simple count is really an issue, use query(func.count()) as an optimization. .. change:: :tags: orm :tickets: 2087 some changes to the identity map regarding rare weakref callbacks during iterations. The mutex has been removed as it apparently can cause a reentrant (i.e. in one thread) deadlock, perhaps when gc collects objects at the point of iteration in order to gain more memory. It is hoped that "dictionary changed during iteration" will be exceedingly rare as iteration methods internally acquire the full list of objects in a single values() call. Note 0.6.7 has a more conservative fix here which still keeps the mutex in place. .. change:: :tags: orm :tickets: 2082 A tweak to the unit of work causes it to order the flush along relationship() dependencies even if the given objects don't have any inter-attribute references in memory, which was the behavior in 0.5 and earlier, so a flush of Parent/Child with only foreign key/primary key set will succeed. This while still maintaining 0.6 and above's not generating a ton of useless internal dependency structures within the flush that don't correspond to state actually within the current flush. .. change:: :tags: orm :tickets: 2069 Improvements to the error messages emitted when querying against column-only entities in conjunction with (typically incorrectly) using loader options, where the parent entity is not fully present. .. change:: :tags: orm :tickets: 2098 Fixed bug in query.options() whereby a path applied to a lazyload using string keys could overlap a same named attribute on the wrong entity. Note 0.6.7 has a more conservative fix to this. .. change:: :tags: declarative :tickets: 2091 Arguments in __mapper_args__ that aren't "hashable" aren't mistaken for always-hashable, possibly-column arguments. (also in 0.6.7) .. change:: :tags: sql :tickets: Added a fully descriptive error message for the case where Column is subclassed and _make_proxy() fails to make a copy due to TypeError on the constructor. The method _constructor should be implemented in this case. .. change:: :tags: sql :tickets: 2095 Added new event "column_reflect" for Table objects. Receives the info dictionary about a Column before the object is generated within reflection, and allows modification to the dictionary for control over most aspects of the resulting Column including key, name, type, info dictionary. .. change:: :tags: sql :tickets: To help with the "column_reflect" event being used with specific Table objects instead of all instances of Table, listeners can be added to a Table object inline with its construction using a new argument "listeners", a list of tuples of the form (, ), which are applied to the Table before the reflection process begins. .. change:: :tags: sql :tickets: 2085 Added new generic function "next_value()", accepts a Sequence object as its argument and renders the appropriate "next value" generation string on the target platform, if supported. Also provides ".next_value()" method on Sequence itself. .. change:: :tags: sql :tickets: 2084 func.next_value() or other SQL expression can be embedded directly into an insert() construct, and if implicit or explicit "returning" is used in conjunction with a primary key column, the newly generated value will be present in result.inserted_primary_key. .. change:: :tags: sql :tickets: 2089 Added accessors to ResultProxy "returns_rows", "is_insert" (also in 0.6.7) .. change:: :tags: engine :tickets: 2097 Fixed AssertionPool regression bug. .. change:: :tags: engine :tickets: 2060 Changed exception raised to ArgumentError when an invalid dialect is specified. .. change:: :tags: postgresql :tickets: 2092 Added RESERVED_WORDS for postgresql dialect. (also in 0.6.7) .. change:: :tags: postgresql :tickets: 2073 Fixed the BIT type to allow a "length" parameter, "varying" parameter. Reflection also fixed. (also in 0.6.7) .. change:: :tags: mssql :tickets: 2071 Rewrote the query used to get the definition of a view, typically when using the Inspector interface, to use sys.sql_modules instead of the information schema, thereby allowing views definitions longer than 4000 characters to be fully returned. (also in 0.6.7) .. change:: :tags: firebird :tickets: 2083 The "implicit_returning" flag on create_engine() is honored if set to False. (also in 0.6.7) .. change:: :tags: informix :tickets: 2092 Added RESERVED_WORDS informix dialect. (also in 0.6.7) .. change:: :tags: ext :tickets: 2090 The horizontal_shard ShardedSession class accepts the common Session argument "query_cls" as a constructor argument, to enable further subclassing of ShardedQuery. (also in 0.6.7) .. change:: :tags: examples :tickets: Updated the association, association proxy examples to use declarative, added a new example dict_of_sets_with_default.py, a "pushing the envelope" example of association proxy. .. change:: :tags: examples :tickets: 2090 The Beaker caching example allows a "query_cls" argument to the query_callable() function. (also in 0.6.7) .. changelog:: :version: 0.7.0b2 :released: Sat Feb 19 2011 .. change:: :tags: orm :tickets: 2053 Fixed bug whereby Session.merge() would call the load() event with one too few arguments. .. change:: :tags: orm :tickets: 2052 Added logic which prevents the generation of events from a MapperExtension or SessionExtension from generating do-nothing events for all the methods not overridden. .. change:: :tags: declarative :tickets: 2058 Fixed regression whereby composite() with Column objects placed inline would fail to initialize. The Column objects can now be inline with the composite() or external and pulled in via name or object ref. .. change:: :tags: declarative :tickets: 2061 Fix error message referencing old @classproperty name to reference @declared_attr (also in 0.6.7) .. change:: :tags: declarative :tickets: 1468 the dictionary at the end of the __table_args__ tuple is now optional. .. change:: :tags: sql :tickets: 2059 Renamed the EngineEvents event class to ConnectionEvents. As these classes are never accessed directly by end-user code, this strictly is a documentation change for end users. Also simplified how events get linked to engines and connections internally. .. change:: :tags: sql :tickets: 2055 The Sequence() construct, when passed a MetaData() object via its 'metadata' argument, will be included in CREATE/DROP statements within metadata.create_all() and metadata.drop_all(), including "checkfirst" logic. .. change:: :tags: sql :tickets: 2064 The Column.references() method now returns True if it has a foreign key referencing the given column exactly, not just it's parent table. .. change:: :tags: postgresql :tickets: 2065 Fixed regression from 0.6 where SMALLINT and BIGINT types would both generate SERIAL on an integer PK column, instead of SMALLINT and BIGSERIAL .. change:: :tags: ext :tickets: 2054 Association proxy now has correct behavior for any(), has(), and contains() when proxying a many-to-one scalar attribute to a one-to-many collection (i.e. the reverse of the 'typical' association proxy use case) .. change:: :tags: examples :tickets: Beaker example now takes into account 'limit' and 'offset', bind params within embedded FROM clauses (like when you use union() or from_self()) when generating a cache key. .. changelog:: :version: 0.7.0b1 :released: Sat Feb 12 2011 .. change:: :tags: :tickets: Detailed descriptions of each change below are described at: http://www.sqlalchemy.org/trac/wiki/07Migration .. change:: :tags: general :tickets: 1902 New event system, supercedes all extensions, listeners, etc. .. change:: :tags: general :tickets: 1926 Logging enhancements .. change:: :tags: general :tickets: 1949 Setup no longer installs a Nose plugin .. change:: :tags: general :tickets: The "sqlalchemy.exceptions" alias in sys.modules has been removed. Base SQLA exceptions are available via "from sqlalchemy import exc". The "exceptions" alias for "exc" remains in "sqlalchemy" for now, it's just not patched into sys.modules. .. change:: :tags: orm :tickets: 1923 More succinct form of query.join(target, onclause) .. change:: :tags: orm :tickets: 1903 Hybrid Attributes, implements/supercedes synonym() .. change:: :tags: orm :tickets: 2008 Rewrite of composites .. change:: :tags: orm :tickets: Mutation Event Extension, supercedes "mutable=True" .. seealso:: :ref:`07_migration_mutation_extension` .. change:: :tags: orm :tickets: 1980 PickleType and ARRAY mutability turned off by default .. change:: :tags: orm :tickets: 1895 Simplified polymorphic_on assignment .. change:: :tags: orm :tickets: 1912 Flushing of Orphans that have no parent is allowed .. change:: :tags: orm :tickets: 2041 Adjusted flush accounting step to occur before the commit in the case of autocommit=True. This allows autocommit=True to work appropriately with expire_on_commit=True, and also allows post-flush session hooks to operate in the same transactional context as when autocommit=False. .. change:: :tags: orm :tickets: 1973 Warnings generated when collection members, scalar referents not part of the flush .. change:: :tags: orm :tickets: 1876 Non-`Table`-derived constructs can be mapped .. change:: :tags: orm :tickets: 1942 Tuple label names in Query Improved .. change:: :tags: orm :tickets: 1892 Mapped column attributes reference the most specific column first .. change:: :tags: orm :tickets: 1896 Mapping to joins with two or more same-named columns requires explicit declaration .. change:: :tags: orm :tickets: 1875 Mapper requires that polymorphic_on column be present in the mapped selectable .. change:: :tags: orm :tickets: 1966 compile_mappers() renamed configure_mappers(), simplified configuration internals .. change:: :tags: orm :tickets: 2018 the aliased() function, if passed a SQL FromClause element (i.e. not a mapped class), will return element.alias() instead of raising an error on AliasedClass. .. change:: :tags: orm :tickets: 2027 Session.merge() will check the version id of the incoming state against that of the database, assuming the mapping uses version ids and incoming state has a version_id assigned, and raise StaleDataError if they don't match. .. change:: :tags: orm :tickets: 1996 Session.connection(), Session.execute() accept 'bind', to allow execute/connection operations to participate in the open transaction of an engine explicitly. .. change:: :tags: orm :tickets: Query.join(), Query.outerjoin(), eagerload(), eagerload_all(), others no longer allow lists of attributes as arguments (i.e. option([x, y, z]) form, deprecated since 0.5) .. change:: :tags: orm :tickets: ScopedSession.mapper is removed (deprecated since 0.5). .. change:: :tags: orm :tickets: 2031 Horizontal shard query places 'shard_id' in context.attributes where it's accessible by the "load()" event. .. change:: :tags: orm :tickets: 2032 A single contains_eager() call across multiple entities will indicate all collections along that path should load, instead of requiring distinct contains_eager() calls for each endpoint (which was never correctly documented). .. change:: :tags: orm :tickets: The "name" field used in orm.aliased() now renders in the resulting SQL statement. .. change:: :tags: orm :tickets: 1473 Session weak_instance_dict=False is deprecated. .. change:: :tags: orm :tickets: 2046 An exception is raised in the unusual case that an append or similar event on a collection occurs after the parent object has been dereferenced, which prevents the parent from being marked as "dirty" in the session. Was a warning in 0.6.6. .. change:: :tags: orm :tickets: 1069 Query.distinct() now accepts column expressions as \*args, interpreted by the Postgresql dialect as DISTINCT ON (). .. change:: :tags: orm :tickets: 2049 Additional tuning to "many-to-one" relationship loads during a flush(). A change in version 0.6.6 ([ticket:2002]) required that more "unnecessary" m2o loads during a flush could occur. Extra loading modes have been added so that the SQL emitted in this specific use case is trimmed back, while still retrieving the information the flush needs in order to not miss anything. .. change:: :tags: orm :tickets: the value of "passive" as passed to attributes.get_history() should be one of the constants defined in the attributes package. Sending True or False is deprecated. .. change:: :tags: orm :tickets: 2030 Added a `name` argument to `Query.subquery()`, to allow a fixed name to be assigned to the alias object. (also in 0.6.7) .. change:: :tags: orm :tickets: 2019 A warning is emitted when a joined-table inheriting mapper has no primary keys on the locally mapped table (but has pks on the superclass table). (also in 0.6.7) .. change:: :tags: orm :tickets: 2038 Fixed bug where "middle" class in a polymorphic hierarchy would have no 'polymorphic_on' column if it didn't also specify a 'polymorphic_identity', leading to strange errors upon refresh, wrong class loaded when querying from that target. Also emits the correct WHERE criterion when using single table inheritance. (also in 0.6.7) .. change:: :tags: orm :tickets: 1995 Fixed bug where a column with a SQL or server side default that was excluded from a mapping with include_properties or exclude_properties would result in UnmappedColumnError. (also in 0.6.7) .. change:: :tags: orm :tickets: 2046 A warning is emitted in the unusual case that an append or similar event on a collection occurs after the parent object has been dereferenced, which prevents the parent from being marked as "dirty" in the session. This will be an exception in 0.7. (also in 0.6.7) .. change:: :tags: declarative :tickets: 2050 Added an explicit check for the case that the name 'metadata' is used for a column attribute on a declarative class. (also in 0.6.7) .. change:: :tags: sql :tickets: 1844 Added over() function, method to FunctionElement classes, produces the _Over() construct which in turn generates "window functions", i.e. " OVER (PARTITION BY , ORDER BY )". .. change:: :tags: sql :tickets: 805 LIMIT/OFFSET clauses now use bind parameters .. change:: :tags: sql :tickets: 1069 select.distinct() now accepts column expressions as \*args, interpreted by the Postgresql dialect as DISTINCT ON (). Note this was already available via passing a list to the `distinct` keyword argument to select(). .. change:: :tags: sql :tickets: select.prefix_with() accepts multiple expressions (i.e. \*expr), 'prefix' keyword argument to select() accepts a list or tuple. .. change:: :tags: sql :tickets: Passing a string to the `distinct` keyword argument of `select()` for the purpose of emitting special MySQL keywords (DISTINCTROW etc.) is deprecated - use `prefix_with()` for this. .. change:: :tags: sql :tickets: 2006, 2005 TypeDecorator works with primary key columns .. change:: :tags: sql :tickets: 1897 DDL() constructs now escape percent signs .. change:: :tags: sql :tickets: 1917, 1893 Table.c / MetaData.tables refined a bit, don't allow direct mutation .. change:: :tags: sql :tickets: 1950 Callables passed to `bindparam()` don't get evaluated .. change:: :tags: sql :tickets: 1870 types.type_map is now private, types._type_map .. change:: :tags: sql :tickets: 1982 Non-public Pool methods underscored .. change:: :tags: sql :tickets: 723 Added NULLS FIRST and NULLS LAST support. It's implemented as an extension to the asc() and desc() operators, called nullsfirst() and nullslast(). .. change:: :tags: sql :tickets: The Index() construct can be created inline with a Table definition, using strings as column names, as an alternative to the creation of the index outside of the Table. .. change:: :tags: sql :tickets: 2001 execution_options() on Connection accepts "isolation_level" argument, sets transaction isolation level for that connection only until returned to the connection pool, for thsoe backends which support it (SQLite, Postgresql) .. change:: :tags: sql :tickets: 2005 A TypeDecorator of Integer can be used with a primary key column, and the "autoincrement" feature of various dialects as well as the "sqlite_autoincrement" flag will honor the underlying database type as being Integer-based. .. change:: :tags: sql :tickets: 2020, 2021 Established consistency when server_default is present on an Integer PK column. SQLA doesn't pre-fetch these, nor do they come back in cursor.lastrowid (DBAPI). Ensured all backends consistently return None in result.inserted_primary_key for these. Regarding reflection for this case, reflection of an int PK col with a server_default sets the "autoincrement" flag to False, except in the case of a PG SERIAL col where we detected a sequence default. .. change:: :tags: sql :tickets: 2006 Result-row processors are applied to pre-executed SQL defaults, as well as cursor.lastrowid, when determining the contents of result.inserted_primary_key. .. change:: :tags: sql :tickets: Bind parameters present in the "columns clause" of a select are now auto-labeled like other "anonymous" clauses, which among other things allows their "type" to be meaningful when the row is fetched, as in result row processors. .. change:: :tags: sql :tickets: TypeDecorator is present in the "sqlalchemy" import space. .. change:: :tags: sql :tickets: 2015 Non-DBAPI errors which occur in the scope of an `execute()` call are now wrapped in sqlalchemy.exc.StatementError, and the text of the SQL statement and repr() of params is included. This makes it easier to identify statement executions which fail before the DBAPI becomes involved. .. change:: :tags: sql :tickets: 2048 The concept of associating a ".bind" directly with a ClauseElement has been explicitly moved to Executable, i.e. the mixin that describes ClauseElements which represent engine-executable constructs. This change is an improvement to internal organization and is unlikely to affect any real-world usage. .. change:: :tags: sql :tickets: 2028 Column.copy(), as used in table.tometadata(), copies the 'doc' attribute. (also in 0.6.7) .. change:: :tags: sql :tickets: 2023 Added some defs to the resultproxy.c extension so that the extension compiles and runs on Python 2.4. (also in 0.6.7) .. change:: :tags: sql :tickets: 2042 The compiler extension now supports overriding the default compilation of expression._BindParamClause including that the auto-generated binds within the VALUES/SET clause of an insert()/update() statement will also use the new compilation rules. (also in 0.6.7) .. change:: :tags: sql :tickets: 1921 SQLite dialect now uses `NullPool` for file-based databases .. change:: :tags: sql :tickets: 2036 The path given as the location of a sqlite database is now normalized via os.path.abspath(), so that directory changes within the process don't affect the ultimate location of a relative file path. .. change:: :tags: postgresql :tickets: 1083 When explicit sequence execution derives the name of the auto-generated sequence of a SERIAL column, which currently only occurs if implicit_returning=False, now accommodates if the table + column name is greater than 63 characters using the same logic Postgresql uses. (also in 0.6.7) .. change:: :tags: postgresql :tickets: 2044 Added an additional libpq message to the list of "disconnect" exceptions, "could not receive data from server" (also in 0.6.7) .. change:: :tags: mssql :tickets: 1833 the String/Unicode types, and their counterparts VARCHAR/ NVARCHAR, emit "max" as the length when no length is specified, so that the default length, normally '1' as per SQL server documentation, is instead 'unbounded'. This also occurs for the VARBINARY type.. This behavior makes these types more closely compatible with Postgresql's VARCHAR type which is similarly unbounded when no length is specified. .. change:: :tags: mysql :tickets: 1991 New DBAPI support for pymysql, a pure Python port of MySQL-python. .. change:: :tags: mysql :tickets: 2047 oursql dialect accepts the same "ssl" arguments in create_engine() as that of MySQLdb. (also in 0.6.7) .. change:: :tags: firebird :tickets: 1885 Some adjustments so that Interbase is supported as well. FB/Interbase version idents are parsed into a structure such as (8, 1, 1, 'interbase') or (2, 1, 588, 'firebird') so they can be distinguished. SQLAlchemy-0.8.4/doc/build/changelog/changelog_08.rst0000644000076500000240000033207512251150015023016 0ustar classicstaff00000000000000 ============== 0.8 Changelog ============== .. changelog_imports:: .. include:: changelog_07.rst :start-line: 5 .. changelog:: :version: 0.8.4 :released: December 8, 2013 .. change:: :tags: bug, engine :versions: 0.9.0b2 :tickets: 2881 A DBAPI that raises an error on ``connect()`` which is not a subclass of dbapi.Error (such as ``TypeError``, ``NotImplementedError``, etc.) will propagate the exception unchanged. Previously, the error handling specific to the ``connect()`` routine would both inappropriately run the exception through the dialect's :meth:`.Dialect.is_disconnect` routine as well as wrap it in a :class:`sqlalchemy.exc.DBAPIError`. It is now propagated unchanged in the same way as occurs within the execute process. .. change:: :tags: bug, engine, pool :versions: 0.9.0b2 :tickets: 2880 The :class:`.QueuePool` has been enhanced to not block new connection attempts when an existing connection attempt is blocking. Previously, the production of new connections was serialized within the block that monitored overflow; the overflow counter is now altered within it's own critical section outside of the connection process itself. .. change:: :tags: bug, engine, pool :versions: 0.9.0b2 :tickets: 2522 Made a slight adjustment to the logic which waits for a pooled connection to be available, such that for a connection pool with no timeout specified, it will every half a second break out of the wait to check for the so-called "abort" flag, which allows the waiter to break out in case the whole connection pool was dumped; normally the waiter should break out due to a notify_all() but it's possible this notify_all() is missed in very slim cases. This is an extension of logic first introduced in 0.8.0, and the issue has only been observed occasionally in stress tests. .. change:: :tags: bug, mssql :versions: 0.9.0b2 :pullreq: bitbucket:7 Fixed bug introduced in 0.8.0 where the ``DROP INDEX`` statement for an index in MSSQL would render incorrectly if the index were in an alternate schema; the schemaname/tablename would be reversed. The format has been also been revised to match current MSSQL documentation. Courtesy Derek Harland. .. change:: :tags: feature, sql :tickets: 1443 :versions: 0.9.0b1 Added support for "unique constraint" reflection, via the :meth:`.Inspector.get_unique_constraints` method. Thanks for Roman Podolyaka for the patch. .. change:: :tags: bug, oracle :tickets: 2864 :versions: 0.9.0b2 Added ORA-02396 "maximum idle time" error code to list of "is disconnect" codes with cx_oracle. .. change:: :tags: bug, engine :tickets: 2871 :versions: 0.9.0b2 Fixed bug where SQL statement would be improperly ASCII-encoded when a pre-DBAPI :class:`.StatementError` were raised within :meth:`.Connection.execute`, causing encoding errors for non-ASCII statements. The stringification now remains within Python unicode thus avoiding encoding errors. .. change:: :tags: bug, oracle :tickets: 2870 :versions: 0.9.0b2 Fixed bug where Oracle ``VARCHAR`` types given with no length (e.g. for a ``CAST`` or similar) would incorrectly render ``None CHAR`` or similar. .. change:: :tags: bug, ext :tickets: 2869 :versions: 0.9.0b2 Fixed bug which prevented the ``serializer`` extension from working correctly with table or column names that contain non-ASCII characters. .. change:: :tags: bug, orm :tickets: 2818 :versions: 0.9.0b2 Fixed a regression introduced by :ticket:`2818` where the EXISTS query being generated would produce a "columns being replaced" warning for a statement with two same-named columns, as the internal SELECT wouldn't have use_labels set. .. change:: :tags: bug, postgresql :tickets: 2855 :versions: 0.9.0b2 Fixed bug where index reflection would mis-interpret indkey values when using the pypostgresql adapter, which returns these values as lists vs. psycopg2's return type of string. .. changelog:: :version: 0.8.3 :released: October 26, 2013 .. change:: :tags: bug, oracle :tickets: 2853 :versions: 0.9.0b1 Fixed bug where Oracle table reflection using synonyms would fail if the synonym and the table were in different remote schemas. Patch to fix courtesy Kyle Derr. .. change:: :tags: bug, sql :tickets: 2849 :versions: 0.9.0b1 Fixed bug where :func:`.type_coerce` would not interpret ORM elements with a ``__clause_element__()`` method properly. .. change:: :tags: bug, sql :tickets: 2842 :versions: 0.9.0b1 The :class:`.Enum` and :class:`.Boolean` types now bypass any custom (e.g. TypeDecorator) type in use when producing the CHECK constraint for the "non native" type. This so that the custom type isn't involved in the expression within the CHECK, since this expression is against the "impl" value and not the "decorated" value. .. change:: :tags: bug, postgresql :tickets: 2844 :versions: 0.9.0b1 Removed a 128-character truncation from the reflection of the server default for a column; this code was original from PG system views which truncated the string for readability. .. change:: :tags: bug, mysql :tickets: 2721, 2839 :versions: 0.9.0b1 The change in :ticket:`2721`, which is that the ``deferrable`` keyword of :class:`.ForeignKeyConstraint` is silently ignored on the MySQL backend, will be reverted as of 0.9; this keyword will now render again, raising errors on MySQL as it is not understood - the same behavior will also apply to the ``initially`` keyword. In 0.8, the keywords will remain ignored but a warning is emitted. Additionally, the ``match`` keyword now raises a :exc:`.CompileError` on 0.9 and emits a warning on 0.8; this keyword is not only silently ignored by MySQL but also breaks the ON UPDATE/ON DELETE options. To use a :class:`.ForeignKeyConstraint` that does not render or renders differently on MySQL, use a custom compilation option. An example of this usage has been added to the documentation, see :ref:`mysql_foreign_keys`. .. change:: :tags: bug, sql :tickets: 2825 :versions: 0.9.0b1 The ``.unique`` flag on :class:`.Index` could be produced as ``None`` if it was generated from a :class:`.Column` that didn't specify ``unique`` (where it defaults to ``None``). The flag will now always be ``True`` or ``False``. .. change:: :tags: feature, orm :tickets: 2836 :versions: 0.9.0b1 Added new option to :func:`.relationship` ``distinct_target_key``. This enables the subquery eager loader strategy to apply a DISTINCT to the innermost SELECT subquery, to assist in the case where duplicate rows are generated by the innermost query which corresponds to this relationship (there's not yet a general solution to the issue of dupe rows within subquery eager loading, however, when joins outside of the innermost subquery produce dupes). When the flag is set to ``True``, the DISTINCT is rendered unconditionally, and when it is set to ``None``, DISTINCT is rendered if the innermost relationship targets columns that do not comprise a full primary key. The option defaults to False in 0.8 (e.g. off by default in all cases), None in 0.9 (e.g. automatic by default). Thanks to Alexander Koval for help with this. .. seealso:: :ref:`change_2836` .. change:: :tags: bug, mysql :tickets: 2515 :versions: 0.9.0b1 MySQL-connector dialect now allows options in the create_engine query string to override those defaults set up in the connect, including "buffered" and "raise_on_warnings". .. change:: :tags: bug, postgresql :tickets: 2742 :versions: 0.9.0b1 Parenthesis will be applied to a compound SQL expression as rendered in the column list of a CREATE INDEX statement. .. change:: :tags: bug, sql :tickets: 2742 :versions: 0.9.0b1 Fixed bug in default compiler plus those of postgresql, mysql, and mssql to ensure that any literal SQL expression values are rendered directly as literals, instead of as bound parameters, within a CREATE INDEX statement. This also changes the rendering scheme for other DDL such as constraints. .. change:: :tags: bug, sql :tickets: 2815 :versions: 0.9.0b1 A :func:`.select` that is made to refer to itself in its FROM clause, typically via in-place mutation, will raise an informative error message rather than causing a recursion overflow. .. change:: :tags: bug, orm :tickets: 2813 :versions: 0.9.0b1 Fixed bug where using an annotation such as :func:`.remote` or :func:`.foreign` on a :class:`.Column` before association with a parent :class:`.Table` could produce issues related to the parent table not rendering within joins, due to the inherent copy operation performed by an annotation. .. change:: :tags: bug, sql :tickets: 2831 Non-working "schema" argument on :class:`.ForeignKey` is deprecated; raises a warning. Removed in 0.9. .. change:: :tags: bug, postgresql :tickets: 2819 :versions: 0.9.0b1 Fixed bug where Postgresql version strings that had a prefix preceding the words "Postgresql" or "EnterpriseDB" would not parse. Courtesy Scott Schaefer. .. change:: :tags: feature, engine :tickets: 2821 :versions: 0.9.0b1 ``repr()`` for the :class:`.URL` of an :class:`.Engine` will now conceal the password using asterisks. Courtesy Gunnlaugur Þór Briem. .. change:: :tags: bug, orm :tickets: 2818 :versions: 0.9.0b1 Fixed bug where :meth:`.Query.exists` failed to work correctly without any WHERE criterion. Courtesy Vladimir Magamedov. .. change:: :tags: bug, sql :tickets: 2811 :versions: 0.9.0b1 Fixed bug where using the ``column_reflect`` event to change the ``.key`` of the incoming :class:`.Column` would prevent primary key constraints, indexes, and foreign key constraints from being correctly reflected. .. change:: :tags: feature :versions: 0.9.0b1 Added a new flag ``system=True`` to :class:`.Column`, which marks the column as a "system" column which is automatically made present by the database (such as Postgresql ``oid`` or ``xmin``). The column will be omitted from the ``CREATE TABLE`` statement but will otherwise be available for querying. In addition, the :class:`.CreateColumn` construct can be appled to a custom compilation rule which allows skipping of columns, by producing a rule that returns ``None``. .. change:: :tags: bug, orm :tickets: 2779 Backported a change from 0.9 whereby the iteration of a hierarchy of mappers used in polymorphic inheritance loads is sorted, which allows the SELECT statements generated for polymorphic queries to have deterministic rendering, which in turn helps with caching schemes that cache on the SQL string itself. .. change:: :tags: bug, orm :tickets: 2794 :versions: 0.9.0b1 Fixed a potential issue in an ordered sequence implementation used by the ORM to iterate mapper hierarchies; under the Jython interpreter this implementation wasn't ordered, even though cPython and Pypy maintained ordering. .. change:: :tags: bug, examples :versions: 0.9.0b1 Added "autoincrement=False" to the history table created in the versioning example, as this table shouldn't have autoinc on it in any case, courtesy Patrick Schmid. .. change:: :tags: bug, sql :versions: 0.9.0b1 The :meth:`.ColumnOperators.notin_` operator added in 0.8 now properly produces the negation of the expression "IN" returns when used against an empty collection. .. change:: :tags: feature, examples :versions: 0.9.0b1 Improved the examples in ``examples/generic_associations``, including that ``discriminator_on_association.py`` makes use of single table inheritance do the work with the "discriminator". Also added a true "generic foreign key" example, which works similarly to other popular frameworks in that it uses an open-ended integer to point to any other table, foregoing traditional referential integrity. While we don't recommend this pattern, information wants to be free. .. change:: :tags: feature, orm, declarative :versions: 0.9.0b1 Added a convenience class decorator :func:`.as_declarative`, is a wrapper for :func:`.declarative_base` which allows an existing base class to be applied using a nifty class-decorated approach. .. change:: :tags: bug, orm :tickets: 2786 :versions: 0.9.0b1 Fixed bug in ORM-level event registration where the "raw" or "propagate" flags could potentially be mis-configured in some "unmapped base class" configurations. .. change:: :tags: bug, orm :tickets: 2778 :versions: 0.9.0b1 A performance fix related to the usage of the :func:`.defer` option when loading mapped entities. The function overhead of applying a per-object deferred callable to an instance at load time was significantly higher than that of just loading the data from the row (note that ``defer()`` is meant to reduce DB/network overhead, not necessarily function call count); the function call overhead is now less than that of loading data from the column in all cases. There is also a reduction in the number of "lazy callable" objects created per load from N (total deferred values in the result) to 1 (total number of deferred cols). .. change:: :tags: bug, sqlite :tickets: 2781 :versions: 0.9.0b1 The newly added SQLite DATETIME arguments storage_format and regexp apparently were not fully implemented correctly; while the arguments were accepted, in practice they would have no effect; this has been fixed. .. change:: :tags: bug, sql, postgresql :tickets: 2780 :versions: 0.9.0b1 Fixed bug where the expression system relied upon the ``str()`` form of a some expressions when referring to the ``.c`` collection on a ``select()`` construct, but the ``str()`` form isn't available since the element relies on dialect-specific compilation constructs, notably the ``__getitem__()`` operator as used with a Postgresql ``ARRAY`` element. The fix also adds a new exception class :exc:`.UnsupportedCompilationError` which is raised in those cases where a compiler is asked to compile something it doesn't know how to. .. change:: :tags: bug, engine, oracle :tickets: 2776 :versions: 0.9.0b1 Dialect.initialize() is not called a second time if an :class:`.Engine` is recreated, due to a disconnect error. This fixes a particular issue in the Oracle 8 dialect, but in general the dialect.initialize() phase should only be once per dialect. .. change:: :tags: feature, sql :tickets: 722 Added new method to the :func:`.insert` construct :meth:`.Insert.from_select`. Given a list of columns and a selectable, renders ``INSERT INTO (table) (columns) SELECT ..``. .. change:: :tags: feature, sql :versions: 0.9.0b1 The :func:`.update`, :func:`.insert`, and :func:`.delete` constructs will now interpret ORM entities as target tables to be operated upon, e.g.:: from sqlalchemy import insert, update, delete ins = insert(SomeMappedClass).values(x=5) del_ = delete(SomeMappedClass).where(SomeMappedClass.id == 5) upd = update(SomeMappedClass).where(SomeMappedClass.id == 5).values(name='ed') .. change:: :tags: bug, orm :tickets: 2773 :versions: 0.9.0b1 Fixed bug whereby attribute history functions would fail when an object we moved from "persistent" to "pending" using the :func:`.make_transient` function, for operations involving collection-based backrefs. .. change:: :tags: bug, engine, pool :tickets: 2772 :versions: 0.9.0b1 Fixed bug where :class:`.QueuePool` would lose the correct checked out count if an existing pooled connection failed to reconnect after an invalidate or recycle event. .. changelog:: :version: 0.8.2 :released: July 3, 2013 .. change:: :tags: bug, mysql :tickets: 2768 :versions: 0.9.0b1 Fixed bug when using multi-table UPDATE where a supplemental table is a SELECT with its own bound parameters, where the positioning of the bound parameters would be reversed versus the statement itself when using MySQL's special syntax. .. change:: :tags: bug, sqlite :tickets: 2764 :versions: 0.9.0b1 Added :class:`sqlalchemy.types.BIGINT` to the list of type names that can be reflected by the SQLite dialect; courtesy Russell Stuart. .. change:: :tags: feature, orm, declarative :tickets: 2761 :versions: 0.9.0b1 ORM descriptors such as hybrid properties can now be referenced by name in a string argument used with ``order_by``, ``primaryjoin``, or similar in :func:`.relationship`, in addition to column-bound attributes. .. change:: :tags: feature, firebird :tickets: 2763 :versions: 0.9.0b1 Added new flag ``retaining=True`` to the kinterbasdb and fdb dialects. This controls the value of the ``retaining`` flag sent to the ``commit()`` and ``rollback()`` methods of the DBAPI connection. Due to historical concerns, this flag defaults to ``True`` in 0.8.2, however in 0.9.0b1 this flag defaults to ``False``. .. change:: :tags: requirements :versions: 0.9.0b1 The Python `mock `_ library is now required in order to run the unit test suite. While part of the standard library as of Python 3.3, previous Python installations will need to install this in order to run unit tests or to use the ``sqlalchemy.testing`` package for external dialects. .. change:: :tags: bug, orm :tickets: 2750 :versions: 0.9.0b1 A warning is emitted when trying to flush an object of an inherited class where the polymorphic discriminator has been assigned to a value that is invalid for the class. .. change:: :tags: bug, postgresql :tickets: 2740 :versions: 0.9.0b1 The behavior of :func:`.extract` has been simplified on the Postgresql dialect to no longer inject a hardcoded ``::timestamp`` or similar cast into the given expression, as this interfered with types such as timezone-aware datetimes, but also does not appear to be at all necessary with modern versions of psycopg2. .. change:: :tags: bug, firebird :tickets: 2757 :versions: 0.9.0b1 Type lookup when reflecting the Firebird types LONG and INT64 has been fixed so that LONG is treated as INTEGER, INT64 treated as BIGINT, unless the type has a "precision" in which case it's treated as NUMERIC. Patch courtesy Russell Stuart. .. change:: :tags: bug, postgresql :tickets: 2766 :versions: 0.9.0b1 Fixed bug in HSTORE type where keys/values that contained backslashed quotes would not be escaped correctly when using the "non native" (i.e. non-psycopg2) means of translating HSTORE data. Patch courtesy Ryan Kelly. .. change:: :tags: bug, postgresql :tickets: 2767 :versions: 0.9.0b1 Fixed bug where the order of columns in a multi-column Postgresql index would be reflected in the wrong order. Courtesy Roman Podolyaka. .. change:: :tags: bug, sql :tickets: 2746, 2668 :versions: 0.9.0b1 Multiple fixes to the correlation behavior of :class:`.Select` constructs, first introduced in 0.8.0: * To satisfy the use case where FROM entries should be correlated outwards to a SELECT that encloses another, which then encloses this one, correlation now works across multiple levels when explicit correlation is established via :meth:`.Select.correlate`, provided that the target select is somewhere along the chain contained by a WHERE/ORDER BY/columns clause, not just nested FROM clauses. This makes :meth:`.Select.correlate` act more compatibly to that of 0.7 again while still maintaining the new "smart" correlation. * When explicit correlation is not used, the usual "implicit" correlation limits its behavior to just the immediate enclosing SELECT, to maximize compatibility with 0.7 applications, and also prevents correlation across nested FROMs in this case, maintaining compatibility with 0.8.0/0.8.1. * The :meth:`.Select.correlate_except` method was not preventing the given FROM clauses from correlation in all cases, and also would cause FROM clauses to be incorrectly omitted entirely (more like what 0.7 would do), this has been fixed. * Calling `select.correlate_except(None)` will enter all FROM clauses into correlation as would be expected. .. change:: :tags: bug, ext :versions: 0.9.0b1 Fixed bug whereby if a composite type were set up with a function instead of a class, the mutable extension would trip up when it tried to check that column for being a :class:`.MutableComposite` (which it isn't). Courtesy asldevi. .. change:: :tags: feature, sql :tickets: 2744, 2734 Provided a new attribute for :class:`.TypeDecorator` called :attr:`.TypeDecorator.coerce_to_is_types`, to make it easier to control how comparisons using ``==`` or ``!=`` to ``None`` and boolean types goes about producing an ``IS`` expression, or a plain equality expression with a bound parameter. .. change:: :tags: feature, postgresql :versions: 0.9.0b1 Support for Postgresql 9.2 range types has been added. Currently, no type translation is provided, so works directly with strings or psycopg2 2.5 range extension types at the moment. Patch courtesy Chris Withers. .. change:: :tags: bug, examples :versions: 0.9.0b1 Fixed an issue with the "versioning" recipe whereby a many-to-one reference could produce a meaningless version for the target, even though it was not changed, when backrefs were present. Patch courtesy Matt Chisholm. .. change:: :tags: feature, postgresql :tickets: 2072 :versions: 0.9.0b1 Added support for "AUTOCOMMIT" isolation when using the psycopg2 DBAPI. The keyword is available via the ``isolation_level`` execution option. Patch courtesy Roman Podolyaka. .. change:: :tags: bug, orm :tickets: 2759 :versions: 0.9.0b1 Fixed bug in polymorphic SQL generation where multiple joined-inheritance entities against the same base class joined to each other as well would not track columns on the base table independently of each other if the string of joins were more than two entities long. .. change:: :tags: bug, engine :pullreq: github:6 :versions: 0.9.0b1 Fixed bug where the ``reset_on_return`` argument to various :class:`.Pool` implementations would not be propagated when the pool was regenerated. Courtesy Eevee. .. change:: :tags: bug, orm :tickets: 2754 :versions: 0.9.0b1 Fixed bug where sending a composite attribute into :meth:`.Query.order_by` would produce a parenthesized expression not accepted by some databases. .. change:: :tags: bug, orm :tickets: 2755 :versions: 0.9.0b1 Fixed the interaction between composite attributes and the :func:`.aliased` function. Previously, composite attributes wouldn't work correctly in comparison operations when aliasing was applied. .. change:: :tags: bug, mysql :tickets: 2715 :versions: 0.9.0b1 Added another conditional to the ``mysql+gaerdbms`` dialect to detect so-called "development" mode, where we should use the ``rdbms_mysqldb`` DBAPI. Patch courtesy Brett Slatkin. .. change:: :tags: feature, mysql :tickets: 2704 :versions: 0.9.0b1 The ``mysql_length`` parameter used with :class:`.Index` can now be passed as a dictionary of column names/lengths, for use with composite indexes. Big thanks to Roman Podolyaka for the patch. .. change:: :tags: bug, mssql :tickets: 2747 :versions: 0.9.0b1 When querying the information schema on SQL Server 2000, removed a CAST call that was added in 0.8.1 to help with driver issues, which apparently is not compatible on 2000. The CAST remains in place for SQL Server 2005 and greater. .. change:: :tags: bug, mysql :tickets: 2721 :versions: 0.9.0b1 The ``deferrable`` keyword argument on :class:`.ForeignKey` and :class:`.ForeignKeyConstraint` will not render the ``DEFERRABLE`` keyword on the MySQL dialect. For a long time we left this in place because a non-deferrable foreign key would act very differently than a deferrable one, but some environments just disable FKs on MySQL, so we'll be less opinionated here. .. change:: :tags: bug, ext, orm :tickets: 2730 :versions: 0.9.0b1 Fixed bug where :class:`.MutableDict` didn't report a change event when ``clear()`` was called. .. change:: :tags: bug, sql :tickets: 2738 :versions: 0.9.0b1 Fixed bug whereby joining a select() of a table "A" with multiple foreign key paths to a table "B", to that table "B", would fail to produce the "ambiguous join condition" error that would be reported if you join table "A" directly to "B"; it would instead produce a join condition with multiple criteria. .. change:: :tags: bug, sql, reflection :tickets: 2728 :versions: 0.9.0b1 Fixed bug whereby using :meth:`.MetaData.reflect` across a remote schema as well as a local schema could produce wrong results in the case where both schemas had a table of the same name. .. change:: :tags: bug, sql :tickets: 2726 :versions: 0.9.0b1 Removed the "not implemented" ``__iter__()`` call from the base :class:`.ColumnOperators` class, while this was introduced in 0.8.0 to prevent an endless, memory-growing loop when one also implements a ``__getitem__()`` method on a custom operator and then calls erroneously ``list()`` on that object, it had the effect of causing column elements to report that they were in fact iterable types which then throw an error when you try to iterate. There's no real way to have both sides here so we stick with Python best practices. Careful with implementing ``__getitem__()`` on your custom operators! .. change:: :tags: feature, orm :tickets: 2736 Added a new method :meth:`.Query.select_entity_from` which will in 0.9 replace part of the functionality of :meth:`.Query.select_from`. In 0.8, the two methods perform the same function, so that code can be migrated to use the :meth:`.Query.select_entity_from` method as appropriate. See the 0.9 migration guide for details. .. change:: :tags: bug, orm :tickets: 2737 Fixed a regression caused by :ticket:`2682` whereby the evaluation invoked by :meth:`.Query.update` and :meth:`.Query.delete` would hit upon unsupported ``True`` and ``False`` symbols which now appear due to the usage of ``IS``. .. change:: :tags: bug, postgresql :pullreq: github:2 :tickets: 2735 Fixed the HSTORE type to correctly encode/decode for unicode. This is always on, as the hstore is a textual type, and matches the behavior of psycopg2 when using Python 3. Courtesy Dmitry Mugtasimov. .. change:: :tags: bug, examples Fixed a small bug in the dogpile example where the generation of SQL cache keys wasn't applying deduping labels to the statement the same way :class:`.Query` normally does. .. change:: :tags: bug, engine, sybase :tickets: 2732 Fixed a bug where the routine to detect the correct kwargs being sent to :func:`.create_engine` would fail in some cases, such as with the Sybase dialect. .. change:: :tags: bug, orm :tickets: 2481 Fixed a regression from 0.7 caused by this ticket, which made the check for recursion overflow in self-referential eager joining too loose, missing a particular circumstance where a subclass had lazy="joined" or "subquery" configured and the load was a "with_polymorphic" against the base. .. change:: :tags: bug, orm :tickets: 2718 Fixed a regression from 0.7 where the contextmanager feature of :meth:`.Session.begin_nested` would fail to correctly roll back the transaction when a flush error occurred, instead raising its own exception while leaving the session still pending a rollback. .. change:: :tags: bug, mysql Updated mysqlconnector dialect to check for disconnect based on the apparent string message sent in the exception; tested against mysqlconnector 1.0.9. .. change:: :tags: bug, sql, mssql :tickets: 2682 Regression from this ticket caused the unsupported keyword "true" to render, added logic to convert this to 1/0 for SQL server. .. changelog:: :version: 0.8.1 :released: April 27, 2013 .. change:: :tags: bug, orm :tickets: 2698 Fixes to the ``sqlalchemy.ext.serializer`` extension, including that the "id" passed from the pickler is turned into a string to prevent against bytes being parsed on Py3K, as well as that ``relationship()`` and ``orm.join()`` constructs are now properly serialized. .. change:: :tags: bug, orm :tickets: 2714 A significant improvement to the inner workings of query.join(), such that the decisionmaking involved on how to join has been dramatically simplified. New test cases now pass such as multiple joins extending from the middle of an already complex series of joins involving inheritance and such. Joining from deeply nested subquery structures is still complicated and not without caveats, but with these improvements the edge cases are hopefully pushed even farther out to the edges. .. change:: :tags: feature, orm :tickets: 2673 Added a convenience method to Query that turns a query into an EXISTS subquery of the form ``EXISTS (SELECT 1 FROM ... WHERE ...)``. .. change:: :tags: bug, orm Added a conditional to the unpickling process for ORM mapped objects, such that if the reference to the object were lost when the object was pickled, we don't erroneously try to set up _sa_instance_state - fixes a NoneType error. .. change:: :tags: bug, postgresql :tickets: 2712 Opened up the checking for "disconnect" with psycopg2/libpq to check for all the various "disconnect" messages within the full exception hierarchy. Specifically the "closed the connection unexpectedly" message has now been seen in at least three different exception types. Courtesy Eli Collins. .. change:: :tags: bug, sql, mysql :tickets: 2682 Fully implemented the IS and IS NOT operators with regards to the True/False constants. An expression like ``col.is_(True)`` will now render ``col IS true`` on the target platform, rather than converting the True/ False constant to an integer bound parameter. This allows the ``is_()`` operator to work on MySQL when given True/False constants. .. change:: :tags: bug, postgresql :tickets: 2681 The operators for the Postgresql ARRAY type supports input types of sets, generators, etc. even when a dimension is not specified, by turning the given iterable into a collection unconditionally. .. change:: :tags: bug, mysql Fixes to support the latest cymysql DBAPI, courtesy Hajime Nakagami. .. change:: :tags: bug, mysql :tickets: 2663 Improvements to the operation of the pymysql dialect on Python 3, including some important decode/bytes steps. Issues remain with BLOB types due to driver issues. Courtesy Ben Trofatter. .. change:: :tags: bug, orm :tickets: 2710 Fixed bug where many-to-many relationship with uselist=False would fail to delete the association row and raise an error if the scalar attribute were set to None. This was a regression introduced by the changes for :ticket:`2229`. .. change:: :tags: bug, orm :tickets: 2708 Improved the behavior of instance management regarding the creation of strong references within the Session; an object will no longer have an internal reference cycle created if it's in the transient state or moves into the detached state - the strong ref is created only when the object is attached to a Session and is removed when the object is detached. This makes it somewhat safer for an object to have a `__del__()` method, even though this is not recommended, as relationships with backrefs produce cycles too. A warning has been added when a class with a `__del__()` method is mapped. .. change:: :tags: bug, sql :tickets: 2702 A major fix to the way in which a select() object produces labeled columns when apply_labels() is used; this mode produces a SELECT where each column is labeled as in _, to remove column name collisions for a multiple table select. The fix is that if two labels collide when combined with the table name, i.e. "foo.bar_id" and "foo_bar.id", anonymous aliasing will be applied to one of the dupes. This allows the ORM to handle both columns independently; previously, 0.7 would in some cases silently emit a second SELECT for the column that was "duped", and in 0.8 an ambiguous column error would be emitted. The "keys" applied to the .c. collection of the select() will also be deduped, so that the "column being replaced" warning will no longer emit for any select() that specifies use_labels, though the dupe key will be given an anonymous label which isn't generally user-friendly. .. change:: :tags: bug, mysql Updated a regexp to correctly extract error code on google app engine v1.7.5 and newer. Courtesy Dan Ring. .. change:: :tags: bug, examples Fixed a long-standing bug in the caching example, where the limit/offset parameter values wouldn't be taken into account when computing the cache key. The _key_from_query() function has been simplified to work directly from the final compiled statement in order to get at both the full statement as well as the fully processed parameter list. .. change:: :tags: bug, mssql :tickets: 2355 Part of a longer series of fixes needed for pyodbc+ mssql, a CAST to NVARCHAR(max) has been added to the bound parameter for the table name and schema name in all information schema queries to avoid the issue of comparing NVARCHAR to NTEXT, which seems to be rejected by the ODBC driver in some cases, such as FreeTDS (0.91 only?) plus unicode bound parameters being passed. The issue seems to be specific to the SQL Server information schema tables and the workaround is harmless for those cases where the problem doesn't exist in the first place. .. change:: :tags: bug, sql :tickets: 2691 Fixed bug where disconnect detect on error would raise an attribute error if the error were being raised after the Connection object had already been closed. .. change:: :tags: bug, sql :tickets: 2703 Reworked internal exception raises that emit a rollback() before re-raising, so that the stack trace is preserved from sys.exc_info() before entering the rollback. This so that the traceback is preserved when using coroutine frameworks which may have switched contexts before the rollback function returns. .. change:: :tags: bug, orm :tickets: 2697 Fixed bug whereby ORM would run the wrong kind of query when refreshing an inheritance-mapped class where the superclass was mapped to a non-Table object, like a custom join() or a select(), running a query that assumed a hierarchy that's mapped to individual Table-per-class. .. change:: :tags: bug, orm Fixed `__repr__()` on mapper property constructs to work before the object is initialized, so that Sphinx builds with recent Sphinx versions can read them. .. change:: :tags: bug, sql, postgresql The _Binary base type now converts values through the bytes() callable when run on Python 3; in particular psycopg2 2.5 with Python 3.3 seems to now be returning the "memoryview" type, so this is converted to bytes before return. .. change:: :tags: bug, sql :tickets: 2695 Improvements to Connection auto-invalidation handling. If a non-disconnect error occurs, but leads to a delayed disconnect error within error handling (happens with MySQL), the disconnect condition is detected. The Connection can now also be closed when in an invalid state, meaning it will raise "closed" on next usage, and additionally the "close with result" feature will work even if the autorollback in an error handling routine fails and regardless of whether the condition is a disconnect or not. .. change:: :tags: bug, orm, declarative :tickets: 2656 Fixed indirect regression regarding :func:`.has_inherited_table`, where since it considers the current class' ``__table__``, was sensitive to when it was called. This is 0.7's behavior also, but in 0.7 things tended to "work out" within events like ``__mapper_args__()``. :func:`.has_inherited_table` now only considers superclasses, so should return the same answer regarding the current class no matter when it's called (obviously assuming the state of the superclass). .. change:: :tags: bug, mssql Added support for additional "disconnect" messages to the pymssql dialect. Courtesy John Anderson. .. change:: :tags: feature, sql Loosened the check on dialect-specific argument names passed to Table(); since we want to support external dialects and also want to support args without a certain dialect being installed, it only checks the format of the arg now, rather than looking for that dialect in sqlalchemy.dialects. .. change:: :tags: bug, sql Fixed bug whereby a DBAPI that can return "0" for cursor.lastrowid would not function correctly in conjunction with :attr:`.ResultProxy.inserted_primary_key`. .. change:: :tags: bug, mssql :tickets: 2683 Fixed Py3K bug regarding "binary" types and pymssql. Courtesy Marc Abramowitz. .. change:: :tags: bug, postgresql :tickets: 2680 Added missing HSTORE type to postgresql type names so that the type can be reflected. .. changelog:: :version: 0.8.0 :released: March 9, 2013 .. note:: There are some new behavioral changes as of 0.8.0 not present in 0.8.0b2. They are present in the migration document as follows: * :ref:`legacy_is_orphan_addition` * :ref:`metadata_create_drop_tables` * :ref:`correlation_context_specific` .. change:: :tags: feature, orm :tickets: 2675 A meaningful :attr:`.QueryableAttribute.info` attribute is added, which proxies down to the ``.info`` attribute on either the :class:`.schema.Column` object if directly present, or the :class:`.MapperProperty` otherwise. The full behavior is documented and ensured by tests to remain stable. .. change:: :tags: bug, sql :tickets: 2668 The behavior of SELECT correlation has been improved such that the :meth:`.Select.correlate` and :meth:`.Select.correlate_except` methods, as well as their ORM analogues, will still retain "auto-correlation" behavior in that the FROM clause is modified only if the output would be legal SQL; that is, the FROM clause is left intact if the correlated SELECT is not used in the context of an enclosing SELECT inside of the WHERE, columns, or HAVING clause. The two methods now only specify conditions to the default "auto correlation", rather than absolute FROM lists. .. change:: :tags: feature, mysql New dialect for CyMySQL added, courtesy Hajime Nakagami. .. change:: :tags: bug, orm :tickets: 2674 Improved checking for an existing backref name conflict during mapper configuration; will now test for name conflicts on superclasses and subclasses, in addition to the current mapper, as these conflicts break things just as much. This is new for 0.8, but see below for a warning that will also be triggered in 0.7.11. .. change:: :tags: bug, orm :tickets: 2674 Improved the error message emitted when a "backref loop" is detected, that is when an attribute event triggers a bidirectional assignment between two other attributes with no end. This condition can occur not just when an object of the wrong type is assigned, but also when an attribute is mis-configured to backref into an existing backref pair. Also in 0.7.11. .. change:: :tags: bug, orm :tickets: 2674 A warning is emitted when a MapperProperty is assigned to a mapper that replaces an existing property, if the properties in question aren't plain column-based properties. Replacement of relationship properties is rarely (ever?) what is intended and usually refers to a mapper mis-configuration. Also in 0.7.11. .. change:: :tags: feature, orm Can set/change the "cascade" attribute on a :func:`.relationship` construct after it's been constructed already. This is not a pattern for normal use but we like to change the setting for demonstration purposes in tutorials. .. change:: :tags: bug, schema :tickets: 2664 :meth:`.MetaData.create_all` and :meth:`.MetaData.drop_all` will now accommodate an empty list as an instruction to not create/drop any items, rather than ignoring the collection. .. change:: :tags: bug, tests :tickets: 2669 Fixed an import of "logging" in test_execute which was not working on some linux platforms. Also in 0.7.11. .. change:: :tags: bug, orm :tickets: 2662 A clear error message is emitted if an event handler attempts to emit SQL on a Session within the after_commit() handler, where there is not a viable transaction in progress. .. change:: :tags: bug, orm :tickets: 2665 Detection of a primary key change within the process of cascading a natural primary key update will succeed even if the key is composite and only some of the attributes have changed. .. change:: :tags: feature, orm :tickets: 2658 Added new helper function :func:`.was_deleted`, returns True if the given object was the subject of a :meth:`.Session.delete` operation. .. change:: :tags: bug, orm :tickets: 2658 An object that's deleted from a session will be de-associated with that session fully after the transaction is committed, that is the :func:`.object_session` function will return None. .. change:: :tags: bug, oracle The cx_oracle dialect will no longer run the bind parameter names through ``encode()``, as this is not valid on Python 3, and prevented statements from functioning correctly on Python 3. We now encode only if ``supports_unicode_binds`` is False, which is not the case for cx_oracle when at least version 5 of cx_oracle is used. .. change:: :tags: bug, orm :tickets: 2661 Fixed bug whereby :meth:`.Query.yield_per` would set the execution options incorrectly, thereby breaking subsequent usage of the :meth:`.Query.execution_options` method. Courtesy Ryan Kelly. .. change:: :tags: bug, orm :tickets: 1768 Fixed the consideration of the ``between()`` operator so that it works correctly with the new relationship local/remote system. .. change:: :tags: bug, sql :tickets: 2660, 1768 Fixed a bug regarding column annotations which in particular could impact some usages of the new :func:`.orm.remote` and :func:`.orm.local` annotation functions, where annotations could be lost when the column were used in a subsequent expression. .. change:: :tags: bug, mysql, gae :tickets: 2649 Added a conditional import to the ``gaerdbms`` dialect which attempts to import rdbms_apiproxy vs. rdbms_googleapi to work on both dev and production platforms. Also now honors the ``instance`` attribute. Courtesy Sean Lynch. Also in 0.7.10. .. change:: :tags: bug, sql :tickets: 2496 The :meth:`.ColumnOperators.in_` operator will now coerce values of ``None`` to :func:`.null`. .. change:: :tags: feature, sql :tickets: 2657 Added a new argument to :class:`.Enum` and its base :class:`.SchemaType` ``inherit_schema``. When set to ``True``, the type will set its ``schema`` attribute of that of the :class:`.Table` to which it is associated. This also occurs during a :meth:`.Table.tometadata` operation; the :class:`.SchemaType` is now copied in all cases when :meth:`.Table.tometadata` happens, and if ``inherit_schema=True``, the type will take on the new schema name passed to the method. The ``schema`` is important when used with the Postgresql backend, as the type results in a ``CREATE TYPE`` statement. .. change:: :tags: feature, postgresql Added :meth:`.postgresql.ARRAY.Comparator.any` and :meth:`.postgresql.ARRAY.Comparator.all` methods, as well as standalone expression constructs. Big thanks to Audrius Kažukauskas for the terrific work here. .. change:: :tags: sql, bug :tickets: 2643 Fixed bug where :meth:`.Table.tometadata` would fail if a :class:`.Column` had both a foreign key as well as an alternate ".key" name for the column. Also in 0.7.10. .. change:: :tags: sql, bug :tickets: 2629 insert().returning() raises an informative CompileError if attempted to compile on a dialect that doesn't support RETURNING. .. change:: :tags: orm, bug :tickets: 2655 the consideration of a pending object as an "orphan" has been modified to more closely match the behavior as that of persistent objects, which is that the object is expunged from the :class:`.Session` as soon as it is de-associated from any of its orphan-enabled parents. Previously, the pending object would be expunged only if de-associated from all of its orphan-enabled parents. The new flag ``legacy_is_orphan`` is added to :func:`.orm.mapper` which re-establishes the legacy behavior. See the change note and example case at :ref:`legacy_is_orphan_addition` for a detailed discussion of this change. .. change:: :tags: orm, bug :tickets: 2653 Fixed the (most likely never used) "@collection.link" collection method, which fires off each time the collection is associated or de-associated with a mapped object - the decorator was not tested or functional. The decorator method is now named :meth:`.collection.linker` though the name "link" remains for backwards compatibility. Courtesy Luca Wehrstedt. .. change:: :tags: orm, bug :tickets: 2654 Made some fixes to the system of producing custom instrumented collections, mainly that the usage of the @collection decorators will now honor the __mro__ of the given class, applying the logic of the sub-most classes' version of a particular collection method. Previously, it wasn't predictable when subclassing an existing instrumented class such as :class:`.MappedCollection` whether or not custom methods would resolve correctly. .. change:: :tags: orm, removed The undocumented (and hopefully unused) system of producing custom collections using an ``__instrumentation__`` datastructure associated with the collection has been removed, as this was a complex and untested feature which was also essentially redundant versus the decorator approach. Other internal simplifcations to the orm.collections module have been made as well. .. change:: :tags: mssql, feature Added ``mssql_include`` and ``mssql_clustered`` options to :class:`.Index`, renders the ``INCLUDE`` and ``CLUSTERED`` keywords, respectively. Courtesy Derek Harland. .. change:: :tags: sql, feature :tickets: 695 :class:`.Index` now supports arbitrary SQL expressions and/or functions, in addition to straight columns. Common modifiers include using ``somecolumn.desc()`` for a descending index and ``func.lower(somecolumn)`` for a case-insensitive index, depending on the capabilities of the target backend. .. change:: :tags: mssql, bug :tickets: 2638 Added a py3K conditional around unnecessary .decode() call in mssql information schema, fixes reflection in Py3K. Also in 0.7.10. .. change:: :tags: orm, bug :tickets: 2650 Fixed potential memory leak which could occur if an arbitrary number of :class:`.sessionmaker` objects were created. The anonymous subclass created by the sessionmaker, when dereferenced, would not be garbage collected due to remaining class-level references from the event package. This issue also applies to any custom system that made use of ad-hoc subclasses in conjunction with an event dispatcher. Also in 0.7.10. .. change:: :tags: mssql, bug Fixed a regression whereby the "collation" parameter of the character types CHAR, NCHAR, etc. stopped working, as "collation" is now supported by the base string types. The TEXT, NCHAR, CHAR, VARCHAR types within the MSSQL dialect are now synonyms for the base types. .. change:: :tags: mssql, feature :tickets: 2644 DDL for IDENTITY columns is now supported on non-primary key columns, by establishing a :class:`.Sequence` construct on any integer column. Courtesy Derek Harland. .. change:: :tags: examples, bug Fixed a regression in the examples/dogpile_caching example which was due to the change in :ticket:`2614`. .. change:: :tags: orm, bug :tickets: 2640 :meth:`.Query.merge_result` can now load rows from an outer join where an entity may be ``None`` without throwing an error. Also in 0.7.10. .. change:: :tags: sql, bug :tickets: 2648 Tweaked the "REQUIRED" symbol used by the compiler to identify INSERT/UPDATE bound parameters that need to be passed, so that it's more easily identifiable when writing custom bind-handling code. .. change:: :tags: postgresql, bug Fixed bug in :class:`~sqlalchemy.dialects.postgresql.array()` construct whereby using it inside of an :func:`.expression.insert` construct would produce an error regarding a parameter issue in the ``self_group()`` method. .. change:: :tags: orm, feature Extended the :doc:`/core/inspection` system so that all Python descriptors associated with the ORM or its extensions can be retrieved. This fulfills the common request of being able to inspect all :class:`.QueryableAttribute` descriptors in addition to extension types such as :class:`.hybrid_property` and :class:`.AssociationProxy`. See :attr:`.Mapper.all_orm_descriptors`. .. change:: :tags: mysql, feature GAE dialect now accepts username/password arguments in the URL, courtesy Owen Nelson. .. change:: :tags: mysql, bug GAE dialect won't fail on None match if the error code can't be extracted from the exception throw; courtesy Owen Nelson. .. change:: :tags: orm, bug :tickets: 2637 Fixes to the "dynamic" loader on :func:`.relationship`, includes that backrefs will work properly even when autoflush is disabled, history events are more accurate in scenarios where multiple add/remove of the same object occurs. .. changelog:: :version: 0.8.0b2 :released: December 14, 2012 .. change:: :tags: orm, bug :tickets: 2635 The :meth:`.Query.select_from` method can now be used with a :func:`.aliased` construct without it interfering with the entities being selected. Basically, a statement like this:: ua = aliased(User) session.query(User.name).select_from(ua).join(User, User.name > ua.name) Will maintain the columns clause of the SELECT as coming from the unaliased "user", as specified; the select_from only takes place in the FROM clause:: SELECT users.name AS users_name FROM users AS users_1 JOIN users ON users.name < users_1.name Note that this behavior is in contrast to the original, older use case for :meth:`.Query.select_from`, which is that of restating the mapped entity in terms of a different selectable:: session.query(User.name).\ select_from(user_table.select().where(user_table.c.id > 5)) Which produces:: SELECT anon_1.name AS anon_1_name FROM (SELECT users.id AS id, users.name AS name FROM users WHERE users.id > :id_1) AS anon_1 It was the "aliasing" behavior of the latter use case that was getting in the way of the former use case. The method now specifically considers a SQL expression like :func:`.expression.select` or :func:`.expression.alias` separately from a mapped entity like a :func:`.aliased` construct. .. change:: :tags: sql, bug :tickets: 2633 Fixed a regression caused by :ticket:`2410` whereby a :class:`.CheckConstraint` would apply itself back to the original table during a :meth:`.Table.tometadata` operation, as it would parse the SQL expression for a parent table. The operation now copies the given expression to correspond to the new table. .. change:: :tags: oracle, bug :tickets: 2619 Fixed table reflection for Oracle when accessing a synonym that refers to a DBLINK remote database; while the syntax has been present in the Oracle dialect for some time, up until now it has never been tested. The syntax has been tested against a sample database linking to itself, however there's still some uncertainty as to what should be used for the "owner" when querying the remote database for table information. Currently, the value of "username" from user_db_links is used to match the "owner". .. change:: :tags: orm, feature :tickets: 2601 Added :meth:`.KeyedTuple._asdict` and :attr:`.KeyedTuple._fields` to the :class:`.KeyedTuple` class to provide some degree of compatibility with the Python standard library ``collections.namedtuple()``. .. change:: :tags: sql, bug :tickets: 2610 Fixed bug whereby using a label_length on dialect that was smaller than the size of actual column identifiers would fail to render the columns correctly in a SELECT statement. .. change:: :tags: sql, feature :tickets: 2623 The :class:`.Insert` construct now supports multi-valued inserts, that is, an INSERT that renders like "INSERT INTO table VALUES (...), (...), ...". Supported by Postgresql, SQLite, and MySQL. Big thanks to Idan Kamara for doing the legwork on this one. .. change:: :tags: oracle, bug :tickets: 2620 The Oracle LONG type, while an unbounded text type, does not appear to use the cx_Oracle.LOB type when result rows are returned, so the dialect has been repaired to exclude LONG from having cx_Oracle.LOB filtering applied. Also in 0.7.10. .. change:: :tags: oracle, bug :tickets: 2611 Repaired the usage of ``.prepare()`` in conjunction with cx_Oracle so that a return value of ``False`` will result in no call to ``connection.commit()``, hence avoiding "no transaction" errors. Two-phase transactions have now been shown to work in a rudimental fashion with SQLAlchemy and cx_oracle, however are subject to caveats observed with the driver; check the documentation for details. Also in 0.7.10. .. change:: :tags: sql, bug :tickets: 2618 The :class:`~sqlalchemy.types.DECIMAL` type now honors the "precision" and "scale" arguments when rendering DDL. .. change:: :tags: orm, bug :tickets: 2624 The :class:`.MutableComposite` type did not allow for the :meth:`.MutableBase.coerce` method to be used, even though the code seemed to indicate this intent, so this now works and a brief example is added. As a side-effect, the mechanics of this event handler have been changed so that new :class:`.MutableComposite` types no longer add per-type global event handlers. Also in 0.7.10. .. change:: :tags: sql, bug :tickets: 2621 Made an adjustment to the "boolean", (i.e. ``__nonzero__``) evaluation of binary expressions, i.e. ``x1 == x2``, such that the "auto-grouping" applied by :class:`.BinaryExpression` in some cases won't get in the way of this comparison. Previously, an expression like:: expr1 = mycolumn > 2 bool(expr1 == expr1) Would evaulate as ``False``, even though this is an identity comparison, because ``mycolumn > 2`` would be "grouped" before being placed into the :class:`.BinaryExpression`, thus changing its identity. :class:`.BinaryExpression` now keeps track of the "original" objects passed in. Additionally the ``__nonzero__`` method now only returns if the operator is ``==`` or ``!=`` - all others raise ``TypeError``. .. change:: :tags: firebird, bug :tickets: 2622 Added missing import for "fdb" to the experimental "firebird+fdb" dialect. .. change:: :tags: orm, feature Allow synonyms to be used when defining primary and secondary joins for relationships. .. change:: :tags: orm, bug :tickets: 2614 A second overhaul of aliasing/internal pathing mechanics now allows two subclasses to have different relationships of the same name, supported with subquery or joined eager loading on both simultaneously when a full polymorphic load is used. .. change:: :tags: orm, bug :tickets: 2617 Fixed bug whereby a multi-hop subqueryload within a particular with_polymorphic load would produce a KeyError. Takes advantage of the same internal pathing overhaul as :ticket:`2614`. .. change:: :tags: sql, bug Fixed a gotcha where inadvertently calling list() on a :class:`.ColumnElement` would go into an endless loop, if :meth:`.ColumnOperators.__getitem__` were implemented. A new NotImplementedError is emitted via ``__iter__()``. .. change:: :tags: orm, extensions, feature The :mod:`sqlalchemy.ext.mutable` extension now includes the example :class:`.MutableDict` class as part of the extension. .. change:: :tags: postgresql, feature :tickets: 2606 :class:`.HSTORE` is now available in the Postgresql dialect. Will also use psycopg2's extensions if available. Courtesy Audrius Kažukauskas. .. change:: :tags: sybase, feature :tickets: 1753 Reflection support has been added to the Sybase dialect. Big thanks to Ben Trofatter for all the work developing and testing this. .. change:: :tags: engine, feature The :meth:`.Connection.connect` and :meth:`.Connection.contextual_connect` methods now return a "branched" version so that the :meth:`.Connection.close` method can be called on the returned connection without affecting the original. Allows symmetry when using :class:`.Engine` and :class:`.Connection` objects as context managers:: with conn.connect() as c: # leaves the Connection open c.execute("...") with engine.connect() as c: # closes the Connection c.execute("...") .. change:: :tags: engine The "reflect=True" argument to :class:`~sqlalchemy.schema.MetaData` is deprecated. Please use the :meth:`.MetaData.reflect` method. .. change:: :tags: sql, bug :tickets: 2603 Fixed bug in type_coerce() whereby typing information could be lost if the statement were used as a subquery inside of another statement, as well as other similar situations. Among other things, would cause typing information to be lost when the Oracle/mssql dialects would apply limit/offset wrappings. .. change:: :tags: orm, bug :tickets: 2602 Fixed regression where query.update() would produce an error if an object matched by the "fetch" synchronization strategy wasn't locally present. Courtesy Scott Torborg. .. change:: :tags: sql, bug :tickets: 2597 Fixed bug whereby the ".key" of a Column wasn't being used when producing a "proxy" of the column against a selectable. This probably didn't occur in 0.7 since 0.7 doesn't respect the ".key" in a wider range of scenarios. .. change:: :tags: mssql, feature :tickets: 2600 Support for reflection of the "name" of primary key constraints added, courtesy Dave Moore. .. change:: :tags: informix Some cruft regarding informix transaction handling has been removed, including a feature that would skip calling commit()/rollback() as well as some hardcoded isolation level assumptions on begin().. The status of this dialect is not well understood as we don't have any users working with it, nor any access to an Informix database. If someone with access to Informix wants to help test this dialect, please let us know. .. change:: :tags: pool, feature The :class:`.Pool` will now log all connection.close() operations equally, including closes which occur for invalidated connections, detached connections, and connections beyond the pool capacity. .. change:: :tags: pool, feature :tickets: 2611 The :class:`.Pool` now consults the :class:`.Dialect` for functionality regarding how the connection should be "auto rolled back", as well as closed. This grants more control of transaction scope to the dialect, so that we will be better able to implement transactional workarounds like those potentially needed for pysqlite and cx_oracle. .. change:: :tags: pool, feature Added new :meth:`.PoolEvents.reset` hook to capture the event before a connection is auto-rolled back, upon return to the pool. Together with :meth:`.ConnectionEvents.rollback` this allows all rollback events to be intercepted. .. changelog:: :version: 0.8.0b1 :released: October 30, 2012 .. change:: :tags: sql, bug :tickets: 2593 Fixed bug where keyword arguments passed to :meth:`.Compiler.process` wouldn't get propagated to the column expressions present in the columns clause of a SELECT statement. In particular this would come up when used by custom compilation schemes that relied upon special flags. .. change:: :tags: sql, feature Added a new method :meth:`.Engine.execution_options` to :class:`.Engine`. This method works similarly to :meth:`.Connection.execution_options` in that it creates a copy of the parent object which will refer to the new set of options. The method can be used to build sharding schemes where each engine shares the same underlying pool of connections. The method has been tested against the horizontal shard recipe in the ORM as well. .. seealso:: :meth:`.Engine.execution_options` .. change:: :tags: sql, orm, bug :tickets: 2595 The auto-correlation feature of :func:`.select`, and by proxy that of :class:`.Query`, will not take effect for a SELECT statement that is being rendered directly in the FROM list of the enclosing SELECT. Correlation in SQL only applies to column expressions such as those in the WHERE, ORDER BY, columns clause. .. change:: :tags: sqlite :changeset: c3addcc9ffad Added :class:`.types.NCHAR`, :class:`.types.NVARCHAR` to the SQLite dialect's list of recognized type names for reflection. SQLite returns the name given to a type as the name returned. .. change:: :tags: examples :tickets: 2589 The Beaker caching example has been converted to use `dogpile.cache `_. This is a new caching library written by the same creator of Beaker's caching internals, and represents a vastly improved, simplified, and modernized system of caching. .. seealso:: :ref:`examples_caching` .. change:: :tags: general :tickets: SQLAlchemy 0.8 now targets Python 2.5 and above. Python 2.4 is no longer supported. .. change:: :tags: removed, general :tickets: 2433 The "sqlalchemy.exceptions" synonym for "sqlalchemy.exc" is removed fully. .. change:: :tags: removed, orm :tickets: 2442 The legacy "mutable" system of the ORM, including the MutableType class as well as the mutable=True flag on PickleType and postgresql.ARRAY has been removed. In-place mutations are detected by the ORM using the sqlalchemy.ext.mutable extension, introduced in 0.7. The removal of MutableType and associated constructs removes a great deal of complexity from SQLAlchemy's internals. The approach performed poorly as it would incur a scan of the full contents of the Session when in use. .. change:: :tags: orm, moved :tickets: The InstrumentationManager interface and the entire related system of alternate class implementation is now moved out to sqlalchemy.ext.instrumentation. This is a seldom used system that adds significant complexity and overhead to the mechanics of class instrumentation. The new architecture allows it to remain unused until InstrumentationManager is actually imported, at which point it is bootstrapped into the core. .. change:: :tags: orm, feature :tickets: 1401 Major rewrite of relationship() internals now allow join conditions which include columns pointing to themselves within composite foreign keys. A new API for very specialized primaryjoin conditions is added, allowing conditions based on SQL functions, CAST, etc. to be handled by placing the annotation functions remote() and foreign() inline within the expression when necessary. Previous recipes using the semi-private _local_remote_pairs approach can be upgraded to this new approach. .. seealso:: :ref:`feature_relationship_08` .. change:: :tags: orm, bug :tickets: 2527 ORM will perform extra effort to determine that an FK dependency between two tables is not significant during flush if the tables are related via joined inheritance and the FK dependency is not part of the inherit_condition, saves the user a use_alter directive. .. change:: :tags: orm, feature :tickets: 2333 New standalone function with_polymorphic() provides the functionality of query.with_polymorphic() in a standalone form. It can be applied to any entity within a query, including as the target of a join in place of the "of_type()" modifier. .. change:: :tags: orm, feature :tickets: 1106, 2438 The of_type() construct on attributes now accepts aliased() class constructs as well as with_polymorphic constructs, and works with query.join(), any(), has(), and also eager loaders subqueryload(), joinedload(), contains_eager() .. change:: :tags: orm, feature :tickets: 2585 Improvements to event listening for mapped classes allows that unmapped classes can be specified for instance- and mapper-events. The established events will be automatically set up on subclasses of that class when the propagate=True flag is passed, and the events will be set up for that class itself if and when it is ultimately mapped. .. change:: :tags: orm, bug :tickets: 2590 The instrumentation events class_instrument(), class_uninstrument(), and attribute_instrument() will now fire off only for descendant classes of the class assigned to listen(). Previously, an event listener would be assigned to listen for all classes in all cases regardless of the "target" argument passed. .. change:: :tags: orm, bug :tickets: 1900 with_polymorphic() produces JOINs in the correct order and with correct inheriting tables in the case of sending multi-level subclasses in an arbitrary order or with intermediary classes missing. .. change:: :tags: orm, feature :tickets: 2485 The "deferred declarative reflection" system has been moved into the declarative extension itself, using the new DeferredReflection class. This class is now tested with both single and joined table inheritance use cases. .. change:: :tags: orm, feature :tickets: 2208 Added new core function "inspect()", which serves as a generic gateway to introspection into mappers, objects, others. The Mapper and InstanceState objects have been enhanced with a public API that allows inspection of mapped attributes, including filters for column-bound or relationship-bound properties, inspection of current object state, history of attributes, etc. .. change:: :tags: orm, feature :tickets: 2452 Calling rollback() within a session.begin_nested() will now only expire those objects that had net changes within the scope of that transaction, that is objects which were dirty or were modified on a flush. This allows the typical use case for begin_nested(), that of altering a small subset of objects, to leave in place the data from the larger enclosing set of objects that weren't modified in that sub-transaction. .. change:: :tags: orm, feature :tickets: 2372 Added utility feature Session.enable_relationship_loading(), supersedes relationship.load_on_pending. Both features should be avoided, however. .. change:: :tags: orm, feature :tickets: Added support for .info dictionary argument to column_property(), relationship(), composite(). All MapperProperty classes have an auto-creating .info dict available overall. .. change:: :tags: orm, feature :tickets: 2229 Adding/removing None from a mapped collection now generates attribute events. Previously, a None append would be ignored in some cases. Related to. .. change:: :tags: orm, feature :tickets: 2229 The presence of None in a mapped collection now raises an error during flush. Previously, None values in collections would be silently ignored. .. change:: :tags: orm, feature :tickets: The Query.update() method is now more lenient as to the table being updated. Plain Table objects are better supported now, and additional a joined-inheritance subclass may be used with update(); the subclass table will be the target of the update, and if the parent table is referenced in the WHERE clause, the compiler will call upon UPDATE..FROM syntax as allowed by the dialect to satisfy the WHERE clause. MySQL's multi-table update feature is also supported if columns are specified by object in the "values" dicitionary. PG's DELETE..USING is also not available in Core yet. .. change:: :tags: orm, feature :tickets: New session events after_transaction_create and after_transaction_end allows tracking of new SessionTransaction objects. If the object is inspected, can be used to determine when a session first becomes active and when it deactivates. .. change:: :tags: orm, feature :tickets: 2592 The Query can now load entity/scalar-mixed "tuple" rows that contain types which aren't hashable, by setting the flag "hashable=False" on the corresponding TypeEngine object in use. Custom types that return unhashable types (typically lists) can set this flag to False. .. change:: :tags: orm, bug :tickets: 2481 Improvements to joined/subquery eager loading dealing with chains of subclass entities sharing a common base, with no specific "join depth" provided. Will chain out to each subclass mapper individually before detecting a "cycle", rather than considering the base class to be the source of the "cycle". .. change:: :tags: orm, bug :tickets: 2320 The "passive" flag on Session.is_modified() no longer has any effect. is_modified() in all cases looks only at local in-memory modified flags and will not emit any SQL or invoke loader callables/initializers. .. change:: :tags: orm, bug :tickets: 2405 The warning emitted when using delete-orphan cascade with one-to-many or many-to-many without single-parent=True is now an error. The ORM would fail to function subsequent to this warning in any case. .. change:: :tags: orm, bug :tickets: 2350 Lazy loads emitted within flush events such as before_flush(), before_update(), etc. will now function as they would within non-event code, regarding consideration of the PK/FK values used in the lazy-emitted query. Previously, special flags would be established that would cause lazy loads to load related items based on the "previous" value of the parent PK/FK values specifically when called upon within a flush; the signal to load in this way is now localized to where the unit of work actually needs to load that way. Note that the UOW does sometimes load these collections before the before_update() event is called, so the usage of "passive_updates" or not can affect whether or not a collection will represent the "old" or "new" data, when accessed within a flush event, based on when the lazy load was emitted. The change is backwards incompatible in the exceedingly small chance that user event code depended on the old behavior. .. change:: :tags: orm, feature :tickets: 2179 Query now "auto correlates" by default in the same way as select() does. Previously, a Query used as a subquery in another would require the correlate() method be called explicitly in order to correlate a table on the inside to the outside. As always, correlate(None) disables correlation. .. change:: :tags: orm, feature :tickets: 2464 The after_attach event is now emitted after the object is established in Session.new or Session.identity_map upon Session.add(), Session.merge(), etc., so that the object is represented in these collections when the event is called. Added before_attach event to accommodate use cases that need autoflush w pre-attached object. .. change:: :tags: orm, feature :tickets: The Session will produce warnings when unsupported methods are used inside the "execute" portion of the flush. These are the familiar methods add(), delete(), etc. as well as collection and related-object manipulations, as called within mapper-level flush events like after_insert(), after_update(), etc. It's been prominently documented for a long time that SQLAlchemy cannot guarantee results when the Session is manipulated within the execution of the flush plan, however users are still doing it, so now there's a warning. Maybe someday the Session will be enhanced to support these operations inside of the flush, but for now, results can't be guaranteed. .. change:: :tags: orm, bug :tickets: 2582, 2566 Continuing regarding extra state post-flush due to event listeners; any states that are marked as "dirty" from an attribute perspective, usually via column-attribute set events within after_insert(), after_update(), etc., will get the "history" flag reset in all cases, instead of only those instances that were part of the flush. This has the effect that this "dirty" state doesn't carry over after the flush and won't result in UPDATE statements. A warning is emitted to this effect; the set_committed_state() method can be used to assign attributes on objects without producing history events. .. change:: :tags: orm, feature :tickets: 2245 ORM entities can be passed to the core select() construct as well as to the select_from(), correlate(), and correlate_except() methods of select(), where they will be unwrapped into selectables. .. change:: :tags: orm, feature :tickets: 2245 Some support for auto-rendering of a relationship join condition based on the mapped attribute, with usage of core SQL constructs. E.g. select([SomeClass]).where(SomeClass.somerelationship) would render SELECT from "someclass" and use the primaryjoin of "somerelationship" as the WHERE clause. This changes the previous meaning of "SomeClass.somerelationship" when used in a core SQL context; previously, it would "resolve" to the parent selectable, which wasn't generally useful. Also works with query.filter(). Related to. .. change:: :tags: orm, feature :tickets: 2526 The registry of classes in declarative_base() is now a WeakValueDictionary. So subclasses of "Base" that are dereferenced will be garbage collected, *if they are not referred to by any other mappers/superclass mappers*. See the next note for this ticket. .. change:: :tags: orm, feature :tickets: 2472 Conflicts between columns on single-inheritance declarative subclasses, with or without using a mixin, can be resolved using a new @declared_attr usage described in the documentation. .. change:: :tags: orm, feature :tickets: 2472 declared_attr can now be used on non-mixin classes, even though this is generally only useful for single-inheritance subclass column conflict resolution. .. change:: :tags: orm, feature :tickets: 2517 declared_attr can now be used with attributes that are not Column or MapperProperty; including any user-defined value as well as association proxy objects. .. change:: :tags: orm, bug :tickets: 2565 Fixed a disconnect that slowly evolved between a @declared_attr Column and a directly-defined Column on a mixin. In both cases, the Column will be applied to the declared class' table, but not to that of a joined inheritance subclass. Previously, the directly-defined Column would be placed on both the base and the sub table, which isn't typically what's desired. .. change:: :tags: orm, feature :tickets: 2526 *Very limited* support for inheriting mappers to be GC'ed when the class itself is deferenced. The mapper must not have its own table (i.e. single table inh only) without polymorphic attributes in place. This allows for the use case of creating a temporary subclass of a declarative mapped class, with no table or mapping directives of its own, to be garbage collected when dereferenced by a unit test. .. change:: :tags: orm, feature :tickets: 2338 Declarative now maintains a registry of classes by string name as well as by full module-qualified name. Multiple classes with the same name can now be looked up based on a module-qualified string within relationship(). Simple class name lookups where more than one class shares the same name now raises an informative error message. .. change:: :tags: orm, feature :tickets: 2535 Can now provide class-bound attributes that override columns which are of any non-ORM type, not just descriptors. .. change:: :tags: orm, feature :tickets: 1729 Added with_labels and reduce_columns keyword arguments to Query.subquery(), to provide two alternate strategies for producing queries with uniquely- named columns. . .. change:: :tags: orm, feature :tickets: 2476 A warning is emitted when a reference to an instrumented collection is no longer associated with the parent class due to expiration/attribute refresh/collection replacement, but an append or remove operation is received on the now-detached collection. .. change:: :tags: orm, bug :tickets: 2549 Declarative can now propagate a column declared on a single-table inheritance subclass up to the parent class' table, when the parent class is itself mapped to a join() or select() statement, directly or via joined inheritance, and not just a Table. .. change:: :tags: orm, bug :tickets: An error is emitted when uselist=False is combined with a "dynamic" loader. This is a warning in 0.7.9. .. change:: :tags: removed, orm :tickets: Deprecated identifiers removed: * allow_null_pks mapper() argument (use allow_partial_pks) * _get_col_to_prop() mapper method (use get_property_by_column()) * dont_load argument to Session.merge() (use load=True) * sqlalchemy.orm.shard module (use sqlalchemy.ext.horizontal_shard) .. change:: :tags: engine, feature :tickets: 2511 Connection event listeners can now be associated with individual Connection objects, not just Engine objects. .. change:: :tags: engine, feature :tickets: 2459 The before_cursor_execute event fires off for so-called "_cursor_execute" events, which are usually special-case executions of primary-key bound sequences and default-generation SQL phrases that invoke separately when RETURNING is not used with INSERT. .. change:: :tags: engine, feature :tickets: The libraries used by the test suite have been moved around a bit so that they are part of the SQLAlchemy install again. In addition, a new suite of tests is present in the new sqlalchemy.testing.suite package. This is an under-development system that hopes to provide a universal testing suite for external dialects. Dialects which are maintained outside of SQLAlchemy can use the new test fixture as the framework for their own tests, and will get for free a "compliance" suite of dialect-focused tests, including an improved "requirements" system where specific capabilities and features can be enabled or disabled for testing. .. change:: :tags: engine, bug :tickets: The Inspector.get_table_names() order_by="foreign_key" feature now sorts tables by dependee first, to be consistent with util.sort_tables and metadata.sorted_tables. .. change:: :tags: engine, bug :tickets: 2522 Fixed bug whereby if a database restart affected multiple connections, each connection would individually invoke a new disposal of the pool, even though only one disposal is needed. .. change:: :tags: engine, feature :tickets: 2462 Added a new system for registration of new dialects in-process without using an entrypoint. See the docs for "Registering New Dialects". .. change:: :tags: engine, feature :tickets: 2556 The "required" flag is set to True by default, if not passed explicitly, on bindparam() if the "value" or "callable" parameters are not passed. This will cause statement execution to check for the parameter being present in the final collection of bound parameters, rather than implicitly assigning None. .. change:: :tags: engine, feature :tickets: Various API tweaks to the "dialect" API to better support highly specialized systems such as the Akiban database, including more hooks to allow an execution context to access type processors. .. change:: :tags: engine, bug :tickets: 2397 The names of the columns on the .c. attribute of a select().apply_labels() is now based on _ instead of _, for those columns that have a distinctly named .key. .. change:: :tags: engine, feature :tickets: 2422 Inspector.get_primary_keys() is deprecated; use Inspector.get_pk_constraint(). Courtesy Diana Clarke. .. change:: :tags: engine, bug :tickets: The autoload_replace flag on Table, when False, will cause any reflected foreign key constraints which refer to already-declared columns to be skipped, assuming that the in-Python declared column will take over the task of specifying in-Python ForeignKey or ForeignKeyConstraint declarations. .. change:: :tags: engine, bug :tickets: 2498 The ResultProxy methods inserted_primary_key, last_updated_params(), last_inserted_params(), postfetch_cols(), prefetch_cols() all assert that the given statement is a compiled construct, and is an insert() or update() statement as is appropriate, else raise InvalidRequestError. .. change:: :tags: engine, feature :tickets: New C extension module "utils" has been added for additional function speedups as we have time to implement. .. change:: :tags: engine :tickets: ResultProxy.last_inserted_ids is removed, replaced by inserted_primary_key. .. change:: :tags: feature, sql :tickets: 2547 Major rework of operator system in Core, to allow redefinition of existing operators as well as addition of new operators at the type level. New types can be created from existing ones which add or redefine operations that are exported out to column expressions, in a similar manner to how the ORM has allowed comparator_factory. The new architecture moves this capability into the Core so that it is consistently usable in all cases, propagating cleanly using existing type propagation behavior. .. change:: :tags: feature, sql :tickets: 1534, 2547 To complement, types can now provide "bind expressions" and "column expressions" which allow compile-time injection of SQL expressions into statements on a per-column or per-bind level. This is to suit the use case of a type which needs to augment bind- and result- behavior at the SQL level, as opposed to in the Python level. Allows for schemes like transparent encryption/ decryption, usage of Postgis functions, etc. .. change:: :tags: feature, sql :tickets: The Core oeprator system now includes the `getitem` operator, i.e. the bracket operator in Python. This is used at first to provide index and slice behavior to the Postgresql ARRAY type, and also provides a hook for end-user definition of custom __getitem__ schemes which can be applied at the type level as well as within ORM-level custom operator schemes. `lshift` (<<) and `rshift` (>>) are also supported as optional operators. Note that this change has the effect that descriptor-based __getitem__ schemes used by the ORM in conjunction with synonym() or other "descriptor-wrapped" schemes will need to start using a custom comparator in order to maintain this behavior. .. change:: :tags: feature, sql :tickets: 2537 Revised the rules used to determine the operator precedence for the user-defined operator, i.e. that granted using the ``op()`` method. Previously, the smallest precedence was applied in all cases, now the default precedence is zero, lower than all operators except "comma" (such as, used in the argument list of a ``func`` call) and "AS", and is also customizable via the "precedence" argument on the ``op()`` method. .. change:: :tags: feature, sql :tickets: 2276 Added "collation" parameter to all String types. When present, renders as COLLATE . This to support the COLLATE keyword now supported by several databases including MySQL, SQLite, and Postgresql. .. change:: :tags: change, sql :tickets: The Text() type renders the length given to it, if a length was specified. .. change:: :tags: feature, sql :tickets: Custom unary operators can now be used by combining operators.custom_op() with UnaryExpression(). .. change:: :tags: bug, sql :tickets: 2564 A tweak to column precedence which moves the "concat" and "match" operators to be the same as that of "is", "like", and others; this helps with parenthesization rendering when used in conjunction with "IS". .. change:: :tags: feature, sql :tickets: Enhanced GenericFunction and func.* to allow for user-defined GenericFunction subclasses to be available via the func.* namespace automatically by classname, optionally using a package name, as well as with the ability to have the rendered name different from the identified name in func.*. .. change:: :tags: feature, sql :tickets: 2562 The cast() and extract() constructs will now be produced via the func.* accessor as well, as users naturally try to access these names from func.* they might as well do what's expected, even though the returned object is not a FunctionElement. .. change:: :tags: changed, sql :tickets: Most classes in expression.sql are no longer preceded with an underscore, i.e. Label, SelectBase, Generative, CompareMixin. _BindParamClause is also renamed to BindParameter. The old underscore names for these classes will remain available as synonyms for the foreseeable future. .. change:: :tags: feature, sql :tickets: 2208 The Inspector object can now be acquired using the new inspect() service, part of .. change:: :tags: feature, sql :tickets: 2418 The column_reflect event now accepts the Inspector object as the first argument, preceding "table". Code which uses the 0.7 version of this very new event will need modification to add the "inspector" object as the first argument. .. change:: :tags: feature, sql :tickets: 2423 The behavior of column targeting in result sets is now case sensitive by default. SQLAlchemy for many years would run a case-insensitive conversion on these values, probably to alleviate early case sensitivity issues with dialects like Oracle and Firebird. These issues have been more cleanly solved in more modern versions so the performance hit of calling lower() on identifiers is removed. The case insensitive comparisons can be re-enabled by setting "case_insensitive=False" on create_engine(). .. change:: :tags: bug, sql :tickets: 2591 Applying a column expression to a select statement using a label with or without other modifying constructs will no longer "target" that expression to the underlying Column; this affects ORM operations that rely upon Column targeting in order to retrieve results. That is, a query like query(User.id, User.id.label('foo')) will now track the value of each "User.id" expression separately instead of munging them together. It is not expected that any users will be impacted by this; however, a usage that uses select() in conjunction with query.from_statement() and attempts to load fully composed ORM entities may not function as expected if the select() named Column objects with arbitrary .label() names, as these will no longer target to the Column objects mapped by that entity. .. change:: :tags: feature, sql :tickets: 2415 The "unconsumed column names" warning emitted when keys are present in insert.values() or update.values() that aren't in the target table is now an exception. .. change:: :tags: feature, sql :tickets: 2502 Added "MATCH" clause to ForeignKey, ForeignKeyConstraint, courtesy Ryan Kelly. .. change:: :tags: feature, sql :tickets: 2507 Added support for DELETE and UPDATE from an alias of a table, which would assumedly be related to itself elsewhere in the query, courtesy Ryan Kelly. .. change:: :tags: feature, sql :tickets: select() features a correlate_except() method, auto correlates all selectables except those passed. .. change:: :tags: feature, sql :tickets: 2431 The prefix_with() method is now available on each of select(), insert(), update(), delete(), all with the same API, accepting multiple prefix calls, as well as a "dialect name" so that the prefix can be limited to one kind of dialect. .. change:: :tags: feature, sql :tickets: 1729 Added reduce_columns() method to select() construct, replaces columns inline using the util.reduce_columns utility function to remove equivalent columns. reduce_columns() also adds "with_only_synonyms" to limit the reduction just to those columns which have the same name. The deprecated fold_equivalents() feature is removed. .. change:: :tags: feature, sql :tickets: 2470 Reworked the startswith(), endswith(), contains() operators to do a better job with negation (NOT LIKE), and also to assemble them at compilation time so that their rendered SQL can be altered, such as in the case for Firebird STARTING WITH .. change:: :tags: feature, sql :tickets: 2463 Added a hook to the system of rendering CREATE TABLE that provides access to the render for each Column individually, by constructing a @compiles function against the new schema.CreateColumn construct. .. change:: :tags: feature, sql :tickets: "scalar" selects now have a WHERE method to help with generative building. Also slight adjustment regarding how SS "correlates" columns; the new methodology no longer applies meaning to the underlying Table column being selected. This improves some fairly esoteric situations, and the logic that was there didn't seem to have any purpose. .. change:: :tags: bug, sql :tickets: 2520 Fixes to the interpretation of the Column "default" parameter as a callable to not pass ExecutionContext into a keyword argument parameter. .. change:: :tags: bug, sql :tickets: 2410 All of UniqueConstraint, ForeignKeyConstraint, CheckConstraint, and PrimaryKeyConstraint will attach themselves to their parent table automatically when they refer to a Table-bound Column object directly (i.e. not just string column name), and refer to one and only one Table. Prior to 0.8 this behavior occurred for UniqueConstraint and PrimaryKeyConstraint, but not ForeignKeyConstraint or CheckConstraint. .. change:: :tags: bug, sql :tickets: 2594 TypeDecorator now includes a generic repr() that works in terms of the "impl" type by default. This is a behavioral change for those TypeDecorator classes that specify a custom __init__ method; those types will need to re-define __repr__() if they need __repr__() to provide a faithful constructor representation. .. change:: :tags: bug, sql :tickets: 2168 column.label(None) now produces an anonymous label, instead of returning the column object itself, consistent with the behavior of label(column, None). .. change:: :tags: feature, sql :tickets: 2455 An explicit error is raised when a ForeignKeyConstraint() that was constructed to refer to multiple remote tables is first used. .. change:: :tags: access, feature :tickets: the MS Access dialect has been moved to its own project on Bitbucket, taking advantage of the new SQLAlchemy dialect compliance suite. The dialect is still in very rough shape and probably not ready for general use yet, however it does have *extremely* rudimental functionality now. https://bitbucket.org/zzzeek/sqlalchemy-access .. change:: :tags: maxdb, moved :tickets: The MaxDB dialect, which hasn't been functional for several years, is moved out to a pending bitbucket project, https://bitbucket.org/zzzeek/sqlalchemy-maxdb. .. change:: :tags: sqlite, feature :tickets: 2363 the SQLite date and time types have been overhauled to support a more open ended format for input and output, using name based format strings and regexps. A new argument "microseconds" also provides the option to omit the "microseconds" portion of timestamps. Thanks to Nathan Wright for the work and tests on this. .. change:: :tags: mssql, feature :tickets: SQL Server dialect can be given database-qualified schema names, i.e. "schema='mydatabase.dbo'"; reflection operations will detect this, split the schema among the "." to get the owner separately, and emit a "USE mydatabase" statement before reflecting targets within the "dbo" owner; the existing database returned from DB_NAME() is then restored. .. change:: :tags: mssql, bug :tickets: 2277 removed legacy behavior whereby a column comparison to a scalar SELECT via == would coerce to an IN with the SQL server dialect. This is implicit behavior which fails in other scenarios so is removed. Code which relies on this needs to be modified to use column.in_(select) explicitly. .. change:: :tags: mssql, feature :tickets: updated support for the mxodbc driver; mxodbc 3.2.1 is recommended for full compatibility. .. change:: :tags: postgresql, feature :tickets: 2441 postgresql.ARRAY features an optional "dimension" argument, will assign a specific number of dimensions to the array which will render in DDL as ARRAY[][]..., also improves performance of bind/result processing. .. change:: :tags: postgresql, feature :tickets: postgresql.ARRAY now supports indexing and slicing. The Python [] operator is available on all SQL expressions that are of type ARRAY; integer or simple slices can be passed. The slices can also be used on the assignment side in the SET clause of an UPDATE statement by passing them into Update.values(); see the docs for examples. .. change:: :tags: postgresql, feature :tickets: Added new "array literal" construct postgresql.array(). Basically a "tuple" that renders as ARRAY[1,2,3]. .. change:: :tags: postgresql, feature :tickets: 2506 Added support for the Postgresql ONLY keyword, which can appear corresponding to a table in a SELECT, UPDATE, or DELETE statement. The phrase is established using with_hint(). Courtesy Ryan Kelly .. change:: :tags: postgresql, feature :tickets: The "ischema_names" dictionary of the Postgresql dialect is "unofficially" customizable. Meaning, new types such as PostGIS types can be added into this dictionary, and the PG type reflection code should be able to handle simple types with variable numbers of arguments. The functionality here is "unofficial" for three reasons: 1. this is not an "official" API. Ideally an "official" API would allow custom type-handling callables at the dialect or global level in a generic way. 2. This is only implemented for the PG dialect, in particular because PG has broad support for custom types vs. other database backends. A real API would be implemented at the default dialect level. 3. The reflection code here is only tested against simple types and probably has issues with more compositional types. patch courtesy Éric Lemoine. .. change:: :tags: firebird, feature :tickets: 2470 The "startswith()" operator renders as "STARTING WITH", "~startswith()" renders as "NOT STARTING WITH", using FB's more efficient operator. .. change:: :tags: firebird, bug :tickets: 2505 CompileError is raised when VARCHAR with no length is attempted to be emitted, same way as MySQL. .. change:: :tags: firebird, bug :tickets: Firebird now uses strict "ansi bind rules" so that bound parameters don't render in the columns clause of a statement - they render literally instead. .. change:: :tags: firebird, bug :tickets: Support for passing datetime as date when using the DateTime type with Firebird; other dialects support this. .. change:: :tags: firebird, feature :tickets: 2504 An experimental dialect for the fdb driver is added, but is untested as I cannot get the fdb package to build. .. change:: :tags: bug, mysql :tickets: 2404 Dialect no longer emits expensive server collations query, as well as server casing, on first connect. These functions are still available as semi-private. .. change:: :tags: feature, mysql :tickets: 2534 Added TIME type to mysql dialect, accepts "fst" argument which is the new "fractional seconds" specifier for recent MySQL versions. The datatype will interpret a microseconds portion received from the driver, however note that at this time most/all MySQL DBAPIs do not support returning this value. .. change:: :tags: oracle, bug :tickets: 2437 Quoting information is now passed along from a Column with quote=True when generating a same-named bound parameter to the bindparam() object, as is the case in generated INSERT and UPDATE statements, so that unknown reserved names can be fully supported. .. change:: :tags: oracle, feature :tickets: 2561 The types of columns excluded from the setinputsizes() set can be customized by sending a list of string DBAPI type names to exclude, using the exclude_setinputsizes dialect parameter. This list was previously fixed. The list also now defaults to STRING, UNICODE, removing CLOB, NCLOB from the list. .. change:: :tags: oracle, bug :tickets: The CreateIndex construct in Oracle will now schema-qualify the name of the index to be that of the parent table. Previously this name was omitted which apparently creates the index in the default schema, rather than that of the table. .. change:: :tags: sql, feature :tickets: 2580 Added :meth:`.ColumnOperators.notin_`, :meth:`.ColumnOperators.notlike`, :meth:`.ColumnOperators.notilike` to :class:`.ColumnOperators`. .. change:: :tags: sql, removed The long-deprecated and non-functional ``assert_unicode`` flag on :func:`.create_engine` as well as :class:`.String` is removed. SQLAlchemy-0.8.4/doc/build/changelog/index.rst0000644000076500000240000000110712251150015021654 0ustar classicstaff00000000000000.. _changelog_toplevel: Changes and Migration ===================== SQLAlchemy changelogs and migration guides are now integrated within the main documentation. Current Migration Guide ------------------------ .. toctree:: :maxdepth: 1 migration_08 Change logs ----------- .. toctree:: :maxdepth: 2 changelog_08 changelog_07 changelog_06 changelog_05 changelog_04 changelog_03 changelog_02 changelog_01 Older Migration Guides ---------------------- .. toctree:: :maxdepth: 1 migration_07 migration_06 migration_05 migration_04 SQLAlchemy-0.8.4/doc/build/changelog/migration_04.rst0000644000076500000240000006240012251147171023055 0ustar classicstaff00000000000000============================= What's new in SQLAlchemy 0.4? ============================= .. admonition:: About this Document This document describes changes between SQLAlchemy version 0.3, last released October 14, 2007, and SQLAlchemy version 0.4, last released October 12, 2008. Document date: March 21, 2008 First Things First ================== If you're using any ORM features, make sure you import from ``sqlalchemy.orm``: :: from sqlalchemy import * from sqlalchemy.orm import * Secondly, anywhere you used to say ``engine=``, ``connectable=``, ``bind_to=``, ``something.engine``, ``metadata.connect()``, use ``bind``: :: myengine = create_engine('sqlite://') meta = MetaData(myengine) meta2 = MetaData() meta2.bind = myengine session = create_session(bind=myengine) statement = select([table], bind=myengine) Got those ? Good! You're now (95%) 0.4 compatible. If you're using 0.3.10, you can make these changes immediately; they'll work there too. Module Imports ============== In 0.3, "``from sqlachemy import *``" would import all of sqlachemy's sub-modules into your namespace. Version 0.4 no longer imports sub-modules into the namespace. This may mean you need to add extra imports into your code. In 0.3, this code worked: :: from sqlalchemy import * class UTCDateTime(types.TypeDecorator): pass In 0.4, one must do: :: from sqlalchemy import * from sqlalchemy import types class UTCDateTime(types.TypeDecorator): pass Object Relational Mapping ========================= Querying -------- New Query API ^^^^^^^^^^^^^ Query is standardized on the generative interface (old interface is still there, just deprecated). While most of the generative interface is available in 0.3, the 0.4 Query has the inner guts to match the generative outside, and has a lot more tricks. All result narrowing is via ``filter()`` and ``filter_by()``, limiting/offset is either through array slices or ``limit()``/``offset()``, joining is via ``join()`` and ``outerjoin()`` (or more manually, through ``select_from()`` as well as manually-formed criteria). To avoid deprecation warnings, you must make some changes to your 03 code User.query.get_by( \**kwargs ) :: User.query.filter_by(**kwargs).first() User.query.select_by( \**kwargs ) :: User.query.filter_by(**kwargs).all() User.query.select() :: User.query.filter(xxx).all() New Property-Based Expression Constructs ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ By far the most palpable difference within the ORM is that you can now construct your query criterion using class-based attributes directly. The ".c." prefix is no longer needed when working with mapped classes: :: session.query(User).filter(and_(User.name == 'fred', User.id > 17)) While simple column-based comparisons are no big deal, the class attributes have some new "higher level" constructs available, including what was previously only available in ``filter_by()``: :: # comparison of scalar relations to an instance filter(Address.user == user) # return all users who contain a particular address filter(User.addresses.contains(address)) # return all users who *dont* contain the address filter(~User.address.contains(address)) # return all users who contain a particular address with # the email_address like '%foo%' filter(User.addresses.any(Address.email_address.like('%foo%'))) # same, email address equals 'foo@bar.com'. can fall back to keyword # args for simple comparisons filter(User.addresses.any(email_address = 'foo@bar.com')) # return all Addresses whose user attribute has the username 'ed' filter(Address.user.has(name='ed')) # return all Addresses whose user attribute has the username 'ed' # and an id > 5 (mixing clauses with kwargs) filter(Address.user.has(User.id > 5, name='ed')) The ``Column`` collection remains available on mapped classes in the ``.c`` attribute. Note that property-based expressions are only available with mapped properties of mapped classes. ``.c`` is still used to access columns in regular tables and selectable objects produced from SQL Expressions. Automatic Join Aliasing ^^^^^^^^^^^^^^^^^^^^^^^ We've had join() and outerjoin() for a while now: :: session.query(Order).join('items')... Now you can alias them: :: session.query(Order).join('items', aliased=True). filter(Item.name='item 1').join('items', aliased=True).filter(Item.name=='item 3') The above will create two joins from orders->items using aliases. the ``filter()`` call subsequent to each will adjust its table criterion to that of the alias. To get at the ``Item`` objects, use ``add_entity()`` and target each join with an ``id``: :: session.query(Order).join('items', id='j1', aliased=True). filter(Item.name == 'item 1').join('items', aliased=True, id='j2'). filter(Item.name == 'item 3').add_entity(Item, id='j1').add_entity(Item, id='j2') Returns tuples in the form: ``(Order, Item, Item)``. Self-referential Queries ^^^^^^^^^^^^^^^^^^^^^^^^ So query.join() can make aliases now. What does that give us ? Self-referential queries ! Joins can be done without any ``Alias`` objects: :: # standard self-referential TreeNode mapper with backref mapper(TreeNode, tree_nodes, properties={ 'children':relation(TreeNode, backref=backref('parent', remote_side=tree_nodes.id)) }) # query for node with child containing "bar" two levels deep session.query(TreeNode).join(["children", "children"], aliased=True).filter_by(name='bar') To add criterion for each table along the way in an aliased join, you can use ``from_joinpoint`` to keep joining against the same line of aliases: :: # search for the treenode along the path "n1/n12/n122" # first find a Node with name="n122" q = sess.query(Node).filter_by(name='n122') # then join to parent with "n12" q = q.join('parent', aliased=True).filter_by(name='n12') # join again to the next parent with 'n1'. use 'from_joinpoint' # so we join from the previous point, instead of joining off the # root table q = q.join('parent', aliased=True, from_joinpoint=True).filter_by(name='n1') node = q.first() ``query.populate_existing()`` ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ The eager version of ``query.load()`` (or ``session.refresh()``). Every instance loaded from the query, including all eagerly loaded items, get refreshed immediately if already present in the session: :: session.query(Blah).populate_existing().all() Relations --------- SQL Clauses Embedded in Updates/Inserts ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ For inline execution of SQL clauses, embedded right in the UPDATE or INSERT, during a ``flush()``: :: myobject.foo = mytable.c.value + 1 user.pwhash = func.md5(password) order.hash = text("select hash from hashing_table") The column-attribute is set up with a deferred loader after the operation, so that it issues the SQL to load the new value when you next access. Self-referential and Cyclical Eager Loading ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ Since our alias-fu has improved, ``relation()`` can join along the same table \*any number of times*; you tell it how deep you want to go. Lets show the self-referential ``TreeNode`` more clearly: :: nodes = Table('nodes', metadata, Column('id', Integer, primary_key=True), Column('parent_id', Integer, ForeignKey('nodes.id')), Column('name', String(30))) class TreeNode(object): pass mapper(TreeNode, nodes, properties={ 'children':relation(TreeNode, lazy=False, join_depth=3) }) So what happens when we say: :: create_session().query(TreeNode).all() ? A join along aliases, three levels deep off the parent: :: SELECT nodes_3.id AS nodes_3_id, nodes_3.parent_id AS nodes_3_parent_id, nodes_3.name AS nodes_3_name, nodes_2.id AS nodes_2_id, nodes_2.parent_id AS nodes_2_parent_id, nodes_2.name AS nodes_2_name, nodes_1.id AS nodes_1_id, nodes_1.parent_id AS nodes_1_parent_id, nodes_1.name AS nodes_1_name, nodes.id AS nodes_id, nodes.parent_id AS nodes_parent_id, nodes.name AS nodes_name FROM nodes LEFT OUTER JOIN nodes AS nodes_1 ON nodes.id = nodes_1.parent_id LEFT OUTER JOIN nodes AS nodes_2 ON nodes_1.id = nodes_2.parent_id LEFT OUTER JOIN nodes AS nodes_3 ON nodes_2.id = nodes_3.parent_id ORDER BY nodes.oid, nodes_1.oid, nodes_2.oid, nodes_3.oid Notice the nice clean alias names too. The joining doesn't care if it's against the same immediate table or some other object which then cycles back to the beginining. Any kind of chain of eager loads can cycle back onto itself when ``join_depth`` is specified. When not present, eager loading automatically stops when it hits a cycle. Composite Types ^^^^^^^^^^^^^^^ This is one from the Hibernate camp. Composite Types let you define a custom datatype that is composed of more than one column (or one column, if you wanted). Lets define a new type, ``Point``. Stores an x/y coordinate: :: class Point(object): def __init__(self, x, y): self.x = x self.y = y def __composite_values__(self): return self.x, self.y def __eq__(self, other): return other.x == self.x and other.y == self.y def __ne__(self, other): return not self.__eq__(other) The way the ``Point`` object is defined is specific to a custom type; constructor takes a list of arguments, and the ``__composite_values__()`` method produces a sequence of those arguments. The order will match up to our mapper, as we'll see in a moment. Let's create a table of vertices storing two points per row: :: vertices = Table('vertices', metadata, Column('id', Integer, primary_key=True), Column('x1', Integer), Column('y1', Integer), Column('x2', Integer), Column('y2', Integer), ) Then, map it ! We'll create a ``Vertex`` object which stores two ``Point`` objects: :: class Vertex(object): def __init__(self, start, end): self.start = start self.end = end mapper(Vertex, vertices, properties={ 'start':composite(Point, vertices.c.x1, vertices.c.y1), 'end':composite(Point, vertices.c.x2, vertices.c.y2) }) Once you've set up your composite type, it's usable just like any other type: :: v = Vertex(Point(3, 4), Point(26,15)) session.save(v) session.flush() # works in queries too q = session.query(Vertex).filter(Vertex.start == Point(3, 4)) If you'd like to define the way the mapped attributes generate SQL clauses when used in expressions, create your own ``sqlalchemy.orm.PropComparator`` subclass, defining any of the common operators (like ``__eq__()``, ``__le__()``, etc.), and send it in to ``composite()``. Composite types work as primary keys too, and are usable in ``query.get()``: :: # a Document class which uses a composite Version # object as primary key document = query.get(Version(1, 'a')) ``dynamic_loader()`` relations ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ A ``relation()`` that returns a live ``Query`` object for all read operations. Write operations are limited to just ``append()`` and ``remove()``, changes to the collection are not visible until the session is flushed. This feature is particularly handy with an "autoflushing" session which will flush before each query. :: mapper(Foo, foo_table, properties={ 'bars':dynamic_loader(Bar, backref='foo', ) }) session = create_session(autoflush=True) foo = session.query(Foo).first() foo.bars.append(Bar(name='lala')) for bar in foo.bars.filter(Bar.name=='lala'): print bar session.commit() New Options: ``undefer_group()``, ``eagerload_all()`` ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ A couple of query options which are handy. ``undefer_group()`` marks a whole group of "deferred" columns as undeferred: :: mapper(Class, table, properties={ 'foo' : deferred(table.c.foo, group='group1'), 'bar' : deferred(table.c.bar, group='group1'), 'bat' : deferred(table.c.bat, group='group1'), ) session.query(Class).options(undefer_group('group1')).filter(...).all() and ``eagerload_all()`` sets a chain of attributes to be eager in one pass: :: mapper(Foo, foo_table, properties={ 'bar':relation(Bar) }) mapper(Bar, bar_table, properties={ 'bat':relation(Bat) }) mapper(Bat, bat_table) # eager load bar and bat session.query(Foo).options(eagerload_all('bar.bat')).filter(...).all() New Collection API ^^^^^^^^^^^^^^^^^^ Collections are no longer proxied by an {{{InstrumentedList}}} proxy, and access to members, methods and attributes is direct. Decorators now intercept objects entering and leaving the collection, and it is now possible to easily write a custom collection class that manages its own membership. Flexible decorators also replace the named method interface of custom collections in 0.3, allowing any class to be easily adapted to use as a collection container. Dictionary-based collections are now much easier to use and fully ``dict``-like. Changing ``__iter__`` is no longer needed for ``dict``s, and new built-in ``dict`` types cover many needs: :: # use a dictionary relation keyed by a column relation(Item, collection_class=column_mapped_collection(items.c.keyword)) # or named attribute relation(Item, collection_class=attribute_mapped_collection('keyword')) # or any function you like relation(Item, collection_class=mapped_collection(lambda entity: entity.a + entity.b)) Existing 0.3 ``dict``-like and freeform object derived collection classes will need to be updated for the new API. In most cases this is simply a matter of adding a couple decorators to the class definition. Mapped Relations from External Tables/Subqueries ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ This feature quietly appeared in 0.3 but has been improved in 0.4 thanks to better ability to convert subqueries against a table into subqueries against an alias of that table; this is key for eager loading, aliased joins in queries, etc. It reduces the need to create mappers against select statements when you just need to add some extra columns or subqueries: :: mapper(User, users, properties={ 'fullname': column_property((users.c.firstname + users.c.lastname).label('fullname')), 'numposts': column_property( select([func.count(1)], users.c.id==posts.c.user_id).correlate(users).label('posts') ) }) a typical query looks like: :: SELECT (SELECT count(1) FROM posts WHERE users.id = posts.user_id) AS count, users.firstname || users.lastname AS fullname, users.id AS users_id, users.firstname AS users_firstname, users.lastname AS users_lastname FROM users ORDER BY users.oid Horizontal Scaling (Sharding) API --------------------------------- [browser:/sqlalchemy/trunk/examples/sharding/attribute_shard .py] Sessions -------- New Session Create Paradigm; SessionContext, assignmapper Deprecated ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ That's right, the whole shebang is being replaced with two configurational functions. Using both will produce the most 0.1-ish feel we've had since 0.1 (i.e., the least amount of typing). Configure your own ``Session`` class right where you define your ``engine`` (or anywhere): :: from sqlalchemy import create_engine from sqlalchemy.orm import sessionmaker engine = create_engine('myengine://') Session = sessionmaker(bind=engine, autoflush=True, transactional=True) # use the new Session() freely sess = Session() sess.save(someobject) sess.flush() If you need to post-configure your Session, say with an engine, add it later with ``configure()``: :: Session.configure(bind=create_engine(...)) All the behaviors of ``SessionContext`` and the ``query`` and ``__init__`` methods of ``assignmapper`` are moved into the new ``scoped_session()`` function, which is compatible with both ``sessionmaker`` as well as ``create_session()``: :: from sqlalchemy.orm import scoped_session, sessionmaker Session = scoped_session(sessionmaker(autoflush=True, transactional=True)) Session.configure(bind=engine) u = User(name='wendy') sess = Session() sess.save(u) sess.commit() # Session constructor is thread-locally scoped. Everyone gets the same # Session in the thread when scope="thread". sess2 = Session() assert sess is sess2 When using a thread-local ``Session``, the returned class has all of ``Session's`` interface implemented as classmethods, and "assignmapper"'s functionality is available using the ``mapper`` classmethod. Just like the old ``objectstore`` days.... :: # "assignmapper"-like functionality available via ScopedSession.mapper Session.mapper(User, users_table) u = User(name='wendy') Session.commit() Sessions are again Weak Referencing By Default ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ The weak_identity_map flag is now set to ``True`` by default on Session. Instances which are externally deferenced and fall out of scope are removed from the session automatically. However, items which have "dirty" changes present will remain strongly referenced until those changes are flushed at which case the object reverts to being weakly referenced (this works for 'mutable' types, like picklable attributes, as well). Setting weak_identity_map to ``False`` restores the old strong-referencing behavior for those of you using the session like a cache. Auto-Transactional Sessions ^^^^^^^^^^^^^^^^^^^^^^^^^^^ As you might have noticed above, we are calling ``commit()`` on ``Session``. The flag ``transactional=True`` means the ``Session`` is always in a transaction, ``commit()`` persists permanently. Auto-Flushing Sessions ^^^^^^^^^^^^^^^^^^^^^^ Also, ``autoflush=True`` means the ``Session`` will ``flush()`` before each ``query`` as well as when you call ``flush()`` or ``commit()``. So now this will work: :: Session = sessionmaker(bind=engine, autoflush=True, transactional=True) u = User(name='wendy') sess = Session() sess.save(u) # wendy is flushed, comes right back from a query wendy = sess.query(User).filter_by(name='wendy').one() Transactional methods moved onto sessions ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ ``commit()`` and ``rollback()``, as well as ``begin()`` are now directly on ``Session``. No more need to use ``SessionTransaction`` for anything (it remains in the background). :: Session = sessionmaker(autoflush=True, transactional=False) sess = Session() sess.begin() # use the session sess.commit() # commit transaction Sharing a ``Session`` with an enclosing engine-level (i.e. non-ORM) transaction is easy: :: Session = sessionmaker(autoflush=True, transactional=False) conn = engine.connect() trans = conn.begin() sess = Session(bind=conn) # ... session is transactional # commit the outermost transaction trans.commit() Nested Session Transactions with SAVEPOINT ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ Available at the Engine and ORM level. ORM docs so far: http://www.sqlalchemy.org/docs/04/session.html#unitofwork_ma naging Two-Phase Commit Sessions ^^^^^^^^^^^^^^^^^^^^^^^^^ Available at the Engine and ORM level. ORM docs so far: http://www.sqlalchemy.org/docs/04/session.html#unitofwork_ma naging Inheritance ----------- Polymorphic Inheritance with No Joins or Unions ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ New docs for inheritance: http://www.sqlalchemy.org/docs/04 /mappers.html#advdatamapping_mapper_inheritance_joined Better Polymorphic Behavior with ``get()`` ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ All classes within a joined-table inheritance hierarchy get an ``_instance_key`` using the base class, i.e. ``(BaseClass, (1, ), None)``. That way when you call ``get()`` a ``Query`` against the base class, it can locate subclass instances in the current identity map without querying the database. Types ----- Custom Subclasses of ``sqlalchemy.types.TypeDecorator`` ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ There is a `New API `_ for subclassing a TypeDecorator. Using the 0.3 API causes compilation errors in some cases. SQL Expressions =============== All New, Deterministic Label/Alias Generation --------------------------------------------- All the "anonymous" labels and aliases use a simple _ format now. SQL is much easier to read and is compatible with plan optimizer caches. Just check out some of the examples in the tutorials: http://www.sqlalchemy.org/docs/04/ormtutorial.html http://www.sqlalchemy.org/docs/04/sqlexpression.html Generative select() Constructs ------------------------------ This is definitely the way to go with ``select()``. See htt p://www.sqlalchemy.org/docs/04/sqlexpression.html#sql_transf orm . New Operator System ------------------- SQL operators and more or less every SQL keyword there is are now abstracted into the compiler layer. They now act intelligently and are type/backend aware, see: http://www.sq lalchemy.org/docs/04/sqlexpression.html#sql_operators All ``type`` Keyword Arguments Renamed to ``type_`` --------------------------------------------------- Just like it says: :: b = bindparam('foo', type_=String) in\_ Function Changed to Accept Sequence or Selectable ------------------------------------------------------ The in\_ function now takes a sequence of values or a selectable as its sole argument. The previous API of passing in values as positional arguments still works, but is now deprecated. This means that :: my_table.select(my_table.c.id.in_(1,2,3) my_table.select(my_table.c.id.in_(*listOfIds) should be changed to :: my_table.select(my_table.c.id.in_([1,2,3]) my_table.select(my_table.c.id.in_(listOfIds) Schema and Reflection ===================== ``MetaData``, ``BoundMetaData``, ``DynamicMetaData``... ------------------------------------------------------- In the 0.3.x series, ``BoundMetaData`` and ``DynamicMetaData`` were deprecated in favor of ``MetaData`` and ``ThreadLocalMetaData``. The older names have been removed in 0.4. Updating is simple: :: +-------------------------------------+-------------------------+ |If You Had | Now Use | +=====================================+=========================+ | ``MetaData`` | ``MetaData`` | +-------------------------------------+-------------------------+ | ``BoundMetaData`` | ``MetaData`` | +-------------------------------------+-------------------------+ | ``DynamicMetaData`` (with one | ``MetaData`` | | engine or threadlocal=False) | | +-------------------------------------+-------------------------+ | ``DynamicMetaData`` | ``ThreadLocalMetaData`` | | (with different engines per thread) | | +-------------------------------------+-------------------------+ The seldom-used ``name`` parameter to ``MetaData`` types has been removed. The ``ThreadLocalMetaData`` constructor now takes no arguments. Both types can now be bound to an ``Engine`` or a single ``Connection``. One Step Multi-Table Reflection ------------------------------- You can now load table definitions and automatically create ``Table`` objects from an entire database or schema in one pass: :: >>> metadata = MetaData(myengine, reflect=True) >>> metadata.tables.keys() ['table_a', 'table_b', 'table_c', '...'] ``MetaData`` also gains a ``.reflect()`` method enabling finer control over the loading process, including specification of a subset of available tables to load. SQL Execution ============= ``engine``, ``connectable``, and ``bind_to`` are all now ``bind`` ----------------------------------------------------------------- ``Transactions``, ``NestedTransactions`` and ``TwoPhaseTransactions`` --------------------------------------------------------------------- Connection Pool Events ---------------------- The connection pool now fires events when new DB-API connections are created, checked out and checked back into the pool. You can use these to execute session-scoped SQL setup statements on fresh connections, for example. Oracle Engine Fixed ------------------- In 0.3.11, there were bugs in the Oracle Engine on how Primary Keys are handled. These bugs could cause programs that worked fine with other engines, such as sqlite, to fail when using the Oracle Engine. In 0.4, the Oracle Engine has been reworked, fixing these Primary Key problems. Out Parameters for Oracle ------------------------- :: result = engine.execute(text("begin foo(:x, :y, :z); end;", bindparams=[bindparam('x', Numeric), outparam('y', Numeric), outparam('z', Numeric)]), x=5) assert result.out_parameters == {'y':10, 'z':75} Connection-bound ``MetaData``, ``Sessions`` ------------------------------------------- ``MetaData`` and ``Session`` can be explicitly bound to a connection: :: conn = engine.connect() sess = create_session(bind=conn) Faster, More Foolproof ``ResultProxy`` Objects ---------------------------------------------- SQLAlchemy-0.8.4/doc/build/changelog/migration_05.rst0000644000076500000240000006332212251147171023062 0ustar classicstaff00000000000000============================= What's new in SQLAlchemy 0.5? ============================= .. admonition:: About this Document This document describes changes between SQLAlchemy version 0.4, last released October 12, 2008, and SQLAlchemy version 0.5, last released January 16, 2010. Document date: August 4, 2009 This guide documents API changes which affect users migrating their applications from the 0.4 series of SQLAlchemy to 0.5. It's also recommended for those working from `Essential SQLAlchemy `_, which only covers 0.4 and seems to even have some old 0.3isms in it. Note that SQLAlchemy 0.5 removes many behaviors which were deprecated throughout the span of the 0.4 series, and also deprecates more behaviors specific to 0.4. Major Documentation Changes =========================== Some sections of the documentation have been completely rewritten and can serve as an introduction to new ORM features. The ``Query`` and ``Session`` objects in particular have some distinct differences in API and behavior which fundamentally change many of the basic ways things are done, particularly with regards to constructing highly customized ORM queries and dealing with stale session state, commits and rollbacks. * `ORM Tutorial `_ * `Session Documentation `_ Deprecations Source =================== Another source of information is documented within a series of unit tests illustrating up to date usages of some common ``Query`` patterns; this file can be viewed at [source:sqlalchemy/trunk/test/orm/test_deprecations.py]. Requirements Changes ==================== * Python 2.4 or higher is required. The SQLAlchemy 0.4 line is the last version with Python 2.3 support. Object Relational Mapping ========================= * **Column level expressions within Query.** - as detailed in the `tutorial `_, ``Query`` has the capability to create specific SELECT statements, not just those against full rows: :: session.query(User.name, func.count(Address.id).label("numaddresses")).join(Address).group_by(User.name) The tuples returned by any multi-column/entity query are *named*' tuples: :: for row in session.query(User.name, func.count(Address.id).label('numaddresses')).join(Address).group_by(User.name): print "name", row.name, "number", row.numaddresses ``Query`` has a ``statement`` accessor, as well as a ``subquery()`` method which allow ``Query`` to be used to create more complex combinations: :: subq = session.query(Keyword.id.label('keyword_id')).filter(Keyword.name.in_(['beans', 'carrots'])).subquery() recipes = session.query(Recipe).filter(exists(). where(Recipe.id==recipe_keywords.c.recipe_id). where(recipe_keywords.c.keyword_id==subq.c.keyword_id) ) * **Explicit ORM aliases are recommended for aliased joins** - The ``aliased()`` function produces an "alias" of a class, which allows fine-grained control of aliases in conjunction with ORM queries. While a table-level alias (i.e. ``table.alias()``) is still usable, an ORM level alias retains the semantics of the ORM mapped object which is significant for inheritance mappings, options, and other scenarios. E.g.: :: Friend = aliased(Person) session.query(Person, Friend).join((Friend, Person.friends)).all() * **query.join() greatly enhanced.** - You can now specify the target and ON clause for a join in multiple ways. A target class alone can be provided where SQLA will attempt to form a join to it via foreign key in the same way as ``table.join(someothertable)``. A target and an explicit ON condition can be provided, where the ON condition can be a ``relation()`` name, an actual class descriptor, or a SQL expression. Or the old way of just a ``relation()`` name or class descriptor works too. See the ORM tutorial which has several examples. * **Declarative is recommended for applications which don't require (and don't prefer) abstraction between tables and mappers** - The [/docs/05/reference/ext/declarative.html Declarative] module, which is used to combine the expression of ``Table``, ``mapper()``, and user defined class objects together, is highly recommended as it simplifies application configuration, ensures the "one mapper per class" pattern, and allows the full range of configuration available to distinct ``mapper()`` calls. Separate ``mapper()`` and ``Table`` usage is now referred to as "classical SQLAlchemy usage" and of course is freely mixable with declarative. * **The .c. attribute has been removed** from classes (i.e. ``MyClass.c.somecolumn``). As is the case in 0.4, class- level properties are usable as query elements, i.e. ``Class.c.propname`` is now superseded by ``Class.propname``, and the ``c`` attribute continues to remain on ``Table`` objects where they indicate the namespace of ``Column`` objects present on the table. To get at the Table for a mapped class (if you didn't keep it around already): :: table = class_mapper(someclass).mapped_table Iterate through columns: :: for col in table.c: print col Work with a specific column: :: table.c.somecolumn The class-bound descriptors support the full set of Column operators as well as the documented relation-oriented operators like ``has()``, ``any()``, ``contains()``, etc. The reason for the hard removal of ``.c.`` is that in 0.5, class-bound descriptors carry potentially different meaning, as well as information regarding class mappings, versus plain ``Column`` objects - and there are use cases where you'd specifically want to use one or the other. Generally, using class-bound descriptors invokes a set of mapping/polymorphic aware translations, and using table- bound columns does not. In 0.4, these translations were applied across the board to all expressions, but 0.5 differentiates completely between columns and mapped descriptors, only applying translations to the latter. So in many cases, particularly when dealing with joined table inheritance configurations as well as when using ``query()``, ``Class.propname`` and ``table.c.colname`` are not interchangeable. For example, ``session.query(users.c.id, users.c.name)`` is different versus ``session.query(User.id, User.name)``; in the latter case, the ``Query`` is aware of the mapper in use and further mapper-specific operations like ``query.join()``, ``query.with_parent()`` etc. may be used, but in the former case cannot. Additionally, in polymorphic inheritance scenarios, the class-bound descriptors refer to the columns present in the polymorphic selectable in use, not necessarily the table column which directly corresponds to the descriptor. For example, a set of classes related by joined-table inheritance to the ``person`` table along the ``person_id`` column of each table will all have their ``Class.person_id`` attribute mapped to the ``person_id`` column in ``person``, and not their subclass table. Version 0.4 would map this behavior onto table-bound ``Column`` objects automatically. In 0.5, this automatic conversion has been removed, so that you in fact *can* use table-bound columns as a means to override the translations which occur with polymorphic querying; this allows ``Query`` to be able to create optimized selects among joined-table or concrete-table inheritance setups, as well as portable subqueries, etc. * **Session Now Synchronizes Automatically with Transactions.** Session now synchronizes against the transaction automatically by default, including autoflush and autoexpire. A transaction is present at all times unless disabled using the ``autocommit`` option. When all three flags are set to their default, the Session recovers gracefully after rollbacks and it's very difficult to get stale data into the session. See the new Session documentation for details. * **Implicit Order By Is Removed**. This will impact ORM users who rely upon SA's "implicit ordering" behavior, which states that all Query objects which don't have an ``order_by()`` will ORDER BY the "id" or "oid" column of the primary mapped table, and all lazy/eagerly loaded collections apply a similar ordering. In 0.5, automatic ordering must be explicitly configured on ``mapper()`` and ``relation()`` objects (if desired), or otherwise when using ``Query``. To convert an 0.4 mapping to 0.5, such that its ordering behavior will be extremely similar to 0.4 or previous, use the ``order_by`` setting on ``mapper()`` and ``relation()``: :: mapper(User, users, properties={ 'addresses':relation(Address, order_by=addresses.c.id) }, order_by=users.c.id) To set ordering on a backref, use the ``backref()`` function: :: 'keywords':relation(Keyword, secondary=item_keywords, order_by=keywords.c.name, backref=backref('items', order_by=items.c.id)) Using declarative ? To help with the new ``order_by`` requirement, ``order_by`` and friends can now be set using strings which are evaluated in Python later on (this works **only** with declarative, not plain mappers): :: class MyClass(MyDeclarativeBase): ... 'addresses':relation("Address", order_by="Address.id") It's generally a good idea to set ``order_by`` on ``relation()s`` which load list-based collections of items, since that ordering cannot otherwise be affected. Other than that, the best practice is to use ``Query.order_by()`` to control ordering of the primary entities being loaded. * **Session is now autoflush=True/autoexpire=True/autocommit=False.** - To set it up, just call ``sessionmaker()`` with no arguments. The name ``transactional=True`` is now ``autocommit=False``. Flushes occur upon each query issued (disable with ``autoflush=False``), within each ``commit()`` (as always), and before each ``begin_nested()`` (so rolling back to the SAVEPOINT is meaningful). All objects are expired after each ``commit()`` and after each ``rollback()``. After rollback, pending objects are expunged, deleted objects move back to persistent. These defaults work together very nicely and there's really no more need for old techniques like ``clear()`` (which is renamed to ``expunge_all()`` as well). P.S.: sessions are now reusable after a ``rollback()``. Scalar and collection attribute changes, adds and deletes are all rolled back. * **session.add() replaces session.save(), session.update(), session.save_or_update().** - the ``session.add(someitem)`` and ``session.add_all([list of items])`` methods replace ``save()``, ``update()``, and ``save_or_update()``. Those methods will remain deprecated throughout 0.5. * **backref configuration made less verbose.** - The ``backref()`` function now uses the ``primaryjoin`` and ``secondaryjoin`` arguments of the forwards-facing ``relation()`` when they are not explicitly stated. It's no longer necessary to specify ``primaryjoin``/``secondaryjoin`` in both directions separately. * **Simplified polymorphic options.** - The ORM's "polymorphic load" behavior has been simplified. In 0.4, mapper() had an argument called ``polymorphic_fetch`` which could be configured as ``select`` or ``deferred``. This option is removed; the mapper will now just defer any columns which were not present in the SELECT statement. The actual SELECT statement used is controlled by the ``with_polymorphic`` mapper argument (which is also in 0.4 and replaces ``select_table``), as well as the ``with_polymorphic()`` method on ``Query`` (also in 0.4). An improvement to the deferred loading of inheriting classes is that the mapper now produces the "optimized" version of the SELECT statement in all cases; that is, if class B inherits from A, and several attributes only present on class B have been expired, the refresh operation will only include B's table in the SELECT statement and will not JOIN to A. * The ``execute()`` method on ``Session`` converts plain strings into ``text()`` constructs, so that bind parameters may all be specified as ":bindname" without needing to call ``text()`` explicitly. If "raw" SQL is desired here, use ``session.connection().execute("raw text")``. * ``session.Query().iterate_instances()`` has been renamed to just ``instances()``. The old ``instances()`` method returning a list instead of an iterator no longer exists. If you were relying on that behavior, you should use ``list(your_query.instances())``. Extending the ORM ================= In 0.5 we're moving forward with more ways to modify and extend the ORM. Heres a summary: * **MapperExtension.** - This is the classic extension class, which remains. Methods which should rarely be needed are ``create_instance()`` and ``populate_instance()``. To control the initialization of an object when it's loaded from the database, use the ``reconstruct_instance()`` method, or more easily the ``@reconstructor`` decorator described in the documentation. * **SessionExtension.** - This is an easy to use extension class for session events. In particular, it provides ``before_flush()``, ``after_flush()`` and ``after_flush_postexec()`` methods. It's usage is recommended over ``MapperExtension.before_XXX`` in many cases since within ``before_flush()`` you can modify the flush plan of the session freely, something which cannot be done from within ``MapperExtension``. * **AttributeExtension.** - This class is now part of the public API, and allows the interception of userland events on attributes, including attribute set and delete operations, and collection appends and removes. It also allows the value to be set or appended to be modified. The ``@validates`` decorator, described in the documentation, provides a quick way to mark any mapped attributes as being "validated" by a particular class method. * **Attribute Instrumentation Customization.** - An API is provided for ambitious efforts to entirely replace SQLAlchemy's attribute instrumentation, or just to augment it in some cases. This API was produced for the purposes of the Trellis toolkit, but is available as a public API. Some examples are provided in the distribution in the ``/examples/custom_attributes`` directory. Schema/Types ============ * **String with no length no longer generates TEXT, it generates VARCHAR** - The ``String`` type no longer magically converts into a ``Text`` type when specified with no length. This only has an effect when CREATE TABLE is issued, as it will issue ``VARCHAR`` with no length parameter, which is not valid on many (but not all) databases. To create a TEXT (or CLOB, i.e. unbounded string) column, use the ``Text`` type. * **PickleType() with mutable=True requires an __eq__() method** - The ``PickleType`` type needs to compare values when mutable=True. The method of comparing ``pickle.dumps()`` is inefficient and unreliable. If an incoming object does not implement ``__eq__()`` and is also not ``None``, the ``dumps()`` comparison is used but a warning is raised. For types which implement ``__eq__()`` which includes all dictionaries, lists, etc., comparison will use ``==`` and is now reliable by default. * **convert_bind_param() and convert_result_value() methods of TypeEngine/TypeDecorator are removed.** - The O'Reilly book unfortunately documented these methods even though they were deprecated post 0.3. For a user-defined type which subclasses ``TypeEngine``, the ``bind_processor()`` and ``result_processor()`` methods should be used for bind/result processing. Any user defined type, whether extending ``TypeEngine`` or ``TypeDecorator``, which uses the old 0.3 style can be easily adapted to the new style using the following adapter: :: class AdaptOldConvertMethods(object): """A mixin which adapts 0.3-style convert_bind_param and convert_result_value methods """ def bind_processor(self, dialect): def convert(value): return self.convert_bind_param(value, dialect) return convert def result_processor(self, dialect): def convert(value): return self.convert_result_value(value, dialect) return convert def convert_result_value(self, value, dialect): return value def convert_bind_param(self, value, dialect): return value To use the above mixin: :: class MyType(AdaptOldConvertMethods, TypeEngine): # ... * The ``quote`` flag on ``Column`` and ``Table`` as well as the ``quote_schema`` flag on ``Table`` now control quoting both positively and negatively. The default is ``None``, meaning let regular quoting rules take effect. When ``True``, quoting is forced on. When ``False``, quoting is forced off. * Column ``DEFAULT`` value DDL can now be more conveniently specified with ``Column(..., server_default='val')``, deprecating ``Column(..., PassiveDefault('val'))``. ``default=`` is now exclusively for Python-initiated default values, and can coexist with server_default. A new ``server_default=FetchedValue()`` replaces the ``PassiveDefault('')`` idiom for marking columns as subject to influence from external triggers and has no DDL side effects. * SQLite's ``DateTime``, ``Time`` and ``Date`` types now **only accept datetime objects, not strings** as bind parameter input. If you'd like to create your own "hybrid" type which accepts strings and returns results as date objects (from whatever format you'd like), create a ``TypeDecorator`` that builds on ``String``. If you only want string-based dates, just use ``String``. * Additionally, the ``DateTime`` and ``Time`` types, when used with SQLite, now represent the "microseconds" field of the Python ``datetime.datetime`` object in the same manner as ``str(datetime)`` - as fractional seconds, not a count of microseconds. That is: :: dt = datetime.datetime(2008, 6, 27, 12, 0, 0, 125) # 125 usec # old way '2008-06-27 12:00:00.125' # new way '2008-06-27 12:00:00.000125' So if an existing SQLite file-based database intends to be used across 0.4 and 0.5, you either have to upgrade the datetime columns to store the new format (NOTE: please test this, I'm pretty sure its correct): :: UPDATE mytable SET somedatecol = substr(somedatecol, 0, 19) || '.' || substr((substr(somedatecol, 21, -1) / 1000000), 3, -1); or, enable "legacy" mode as follows: :: from sqlalchemy.databases.sqlite import DateTimeMixin DateTimeMixin.__legacy_microseconds__ = True Connection Pool no longer threadlocal by default ================================================ 0.4 has an unfortunate default setting of "pool_threadlocal=True", leading to surprise behavior when, for example, using multiple Sessions within a single thread. This flag is now off in 0.5. To re-enable 0.4's behavior, specify ``pool_threadlocal=True`` to ``create_engine()``, or alternatively use the "threadlocal" strategy via ``strategy="threadlocal"``. \*args Accepted, \*args No Longer Accepted ========================================== The policy with ``method(\*args)`` vs. ``method([args])`` is, if the method accepts a variable-length set of items which represent a fixed structure, it takes ``\*args``. If the method accepts a variable-length set of items that are data-driven, it takes ``[args]``. * The various Query.options() functions ``eagerload()``, ``eagerload_all()``, ``lazyload()``, ``contains_eager()``, ``defer()``, ``undefer()`` all accept variable-length ``\*keys`` as their argument now, which allows a path to be formulated using descriptors, ie.: :: query.options(eagerload_all(User.orders, Order.items, Item.keywords)) A single array argument is still accepted for backwards compatibility. * Similarly, the ``Query.join()`` and ``Query.outerjoin()`` methods accept a variable length \*args, with a single array accepted for backwards compatibility: :: query.join('orders', 'items') query.join(User.orders, Order.items) * the ``in_()`` method on columns and similar only accepts a list argument now. It no longer accepts ``\*args``. Removed ======= * **entity_name** - This feature was always problematic and rarely used. 0.5's more deeply fleshed out use cases revealed further issues with ``entity_name`` which led to its removal. If different mappings are required for a single class, break the class into separate subclasses and map them separately. An example of this is at [wiki:UsageRecipes/EntityName]. More information regarding rationale is described at http://groups.google.c om/group/sqlalchemy/browse_thread/thread/9e23a0641a88b96d? hl=en . * **get()/load() cleanup** The ``load()`` method has been removed. It's functionality was kind of arbitrary and basically copied from Hibernate, where it's also not a particularly meaningful method. To get equivalent functionality: :: x = session.query(SomeClass).populate_existing().get(7) ``Session.get(cls, id)`` and ``Session.load(cls, id)`` have been removed. ``Session.get()`` is redundant vs. ``session.query(cls).get(id)``. ``MapperExtension.get()`` is also removed (as is ``MapperExtension.load()``). To override the functionality of ``Query.get()``, use a subclass: :: class MyQuery(Query): def get(self, ident): # ... session = sessionmaker(query_cls=MyQuery)() ad1 = session.query(Address).get(1) * ``sqlalchemy.orm.relation()`` The following deprecated keyword arguments have been removed: foreignkey, association, private, attributeext, is_backref In particular, ``attributeext`` is replaced with ``extension`` - the ``AttributeExtension`` class is now in the public API. * ``session.Query()`` The following deprecated functions have been removed: list, scalar, count_by, select_whereclause, get_by, select_by, join_by, selectfirst, selectone, select, execute, select_statement, select_text, join_to, join_via, selectfirst_by, selectone_by, apply_max, apply_min, apply_avg, apply_sum Additionally, the ``id`` keyword argument to ``join()``, ``outerjoin()``, ``add_entity()`` and ``add_column()`` has been removed. To target table aliases in ``Query`` to result columns, use the ``aliased`` construct: :: from sqlalchemy.orm import aliased address_alias = aliased(Address) print session.query(User, address_alias).join((address_alias, User.addresses)).all() * ``sqlalchemy.orm.Mapper`` * instances() * get_session() - this method was not very noticeable, but had the effect of associating lazy loads with a particular session even if the parent object was entirely detached, when an extension such as ``scoped_session()`` or the old ``SessionContextExt`` was used. It's possible that some applications which relied upon this behavior will no longer work as expected; but the better programming practice here is to always ensure objects are present within sessions if database access from their attributes are required. * ``mapper(MyClass, mytable)`` Mapped classes no are longer instrumented with a "c" class attribute; e.g. ``MyClass.c`` * ``sqlalchemy.orm.collections`` The _prepare_instrumentation alias for prepare_instrumentation has been removed. * ``sqlalchemy.orm`` Removed the ``EXT_PASS`` alias of ``EXT_CONTINUE``. * ``sqlalchemy.engine`` The alias from ``DefaultDialect.preexecute_sequences`` to ``.preexecute_pk_sequences`` has been removed. The deprecated engine_descriptors() function has been removed. * ``sqlalchemy.ext.activemapper`` Module removed. * ``sqlalchemy.ext.assignmapper`` Module removed. * ``sqlalchemy.ext.associationproxy`` Pass-through of keyword args on the proxy's ``.append(item, \**kw)`` has been removed and is now simply ``.append(item)`` * ``sqlalchemy.ext.selectresults``, ``sqlalchemy.mods.selectresults`` Modules removed. * ``sqlalchemy.ext.declarative`` ``declared_synonym()`` removed. * ``sqlalchemy.ext.sessioncontext`` Module removed. * ``sqlalchemy.log`` The ``SADeprecationWarning`` alias to ``sqlalchemy.exc.SADeprecationWarning`` has been removed. * ``sqlalchemy.exc`` ``exc.AssertionError`` has been removed and usage replaced by the Python built-in of the same name. * ``sqlalchemy.databases.mysql`` The deprecated ``get_version_info`` dialect method has been removed. Renamed or Moved ================ * ``sqlalchemy.exceptions`` is now ``sqlalchemy.exc`` The module may still be imported under the old name until 0.6. * ``FlushError``, ``ConcurrentModificationError``, ``UnmappedColumnError`` -> sqlalchemy.orm.exc These exceptions moved to the orm package. Importing 'sqlalchemy.orm' will install aliases in sqlalchemy.exc for compatibility until 0.6. * ``sqlalchemy.logging`` -> ``sqlalchemy.log`` This internal module was renamed. No longer needs to be special cased when packaging SA with py2app and similar tools that scan imports. * ``session.Query().iterate_instances()`` -> ``session.Query().instances()``. Deprecated ========== * ``Session.save()``, ``Session.update()``, ``Session.save_or_update()`` All three replaced by ``Session.add()`` * ``sqlalchemy.PassiveDefault`` Use ``Column(server_default=...)`` Translates to sqlalchemy.DefaultClause() under the hood. * ``session.Query().iterate_instances()``. It has been renamed to ``instances()``. SQLAlchemy-0.8.4/doc/build/changelog/migration_06.rst0000644000076500000240000012662012251147171023064 0ustar classicstaff00000000000000============================== What's New in SQLAlchemy 0.6? ============================== .. admonition:: About this Document This document describes changes between SQLAlchemy version 0.5, last released January 16, 2010, and SQLAlchemy version 0.6, last released May 5, 2012. Document date: June 6, 2010 This guide documents API changes which affect users migrating their applications from the 0.5 series of SQLAlchemy to 0.6. Note that SQLAlchemy 0.6 removes some behaviors which were deprecated throughout the span of the 0.5 series, and also deprecates more behaviors specific to 0.5. Platform Support ================ * cPython versions 2.4 and upwards throughout the 2.xx series * Jython 2.5.1 - using the zxJDBC DBAPI included with Jython. * cPython 3.x - see [source:sqlalchemy/trunk/README.py3k] for information on how to build for python3. New Dialect System ================== Dialect modules are now broken up into distinct subcomponents, within the scope of a single database backend. Dialect implementations are now in the ``sqlalchemy.dialects`` package. The ``sqlalchemy.databases`` package still exists as a placeholder to provide some level of backwards compatibility for simple imports. For each supported database, a sub-package exists within ``sqlalchemy.dialects`` where several files are contained. Each package contains a module called ``base.py`` which defines the specific SQL dialect used by that database. It also contains one or more "driver" modules, each one corresponding to a specific DBAPI - these files are named corresponding to the DBAPI itself, such as ``pysqlite``, ``cx_oracle``, or ``pyodbc``. The classes used by SQLAlchemy dialects are first declared in the ``base.py`` module, defining all behavioral characteristics defined by the database. These include capability mappings, such as "supports sequences", "supports returning", etc., type definitions, and SQL compilation rules. Each "driver" module in turn provides subclasses of those classes as needed which override the default behavior to accommodate the additional features, behaviors, and quirks of that DBAPI. For DBAPIs that support multiple backends (pyodbc, zxJDBC, mxODBC), the dialect module will use mixins from the ``sqlalchemy.connectors`` package, which provide functionality common to that DBAPI across all backends, most typically dealing with connect arguments. This means that connecting using pyodbc, zxJDBC or mxODBC (when implemented) is extremely consistent across supported backends. The URL format used by ``create_engine()`` has been enhanced to handle any number of DBAPIs for a particular backend, using a scheme that is inspired by that of JDBC. The previous format still works, and will select a "default" DBAPI implementation, such as the Postgresql URL below that will use psycopg2: :: create_engine('postgresql://scott:tiger@localhost/test') However to specify a specific DBAPI backend such as pg8000, add it to the "protocol" section of the URL using a plus sign "+": :: create_engine('postgresql+pg8000://scott:tiger@localhost/test') Important Dialect Links: * Documentation on connect arguments: http://www.sqlalchemy.org/docs/06/dbengine.html#create- engine-url-arguments. * Reference documentation for individual dialects: http://ww w.sqlalchemy.org/docs/06/reference/dialects/index.html * The tips and tricks at DatabaseNotes. Other notes regarding dialects: * the type system has been changed dramatically in SQLAlchemy 0.6. This has an impact on all dialects regarding naming conventions, behaviors, and implementations. See the section on "Types" below. * the ``ResultProxy`` object now offers a 2x speed improvement in some cases thanks to some refactorings. * the ``RowProxy``, i.e. individual result row object, is now directly pickleable. * the setuptools entrypoint used to locate external dialects is now called ``sqlalchemy.dialects``. An external dialect written against 0.4 or 0.5 will need to be modified to work with 0.6 in any case so this change does not add any additional difficulties. * dialects now receive an initialize() event on initial connection to determine connection properties. * Functions and operators generated by the compiler now use (almost) regular dispatch functions of the form "visit_" and "visit__fn" to provide customed processing. This replaces the need to copy the "functions" and "operators" dictionaries in compiler subclasses with straightforward visitor methods, and also allows compiler subclasses complete control over rendering, as the full _Function or _BinaryExpression object is passed in. Dialect Imports --------------- The import structure of dialects has changed. Each dialect now exports its base "dialect" class as well as the full set of SQL types supported on that dialect via ``sqlalchemy.dialects.``. For example, to import a set of PG types: :: from sqlalchemy.dialects.postgresql import INTEGER, BIGINT, SMALLINT,\ VARCHAR, MACADDR, DATE, BYTEA Above, ``INTEGER`` is actually the plain ``INTEGER`` type from ``sqlalchemy.types``, but the PG dialect makes it available in the same way as those types which are specific to PG, such as ``BYTEA`` and ``MACADDR``. Expression Language Changes =========================== An Important Expression Language Gotcha --------------------------------------- There's one quite significant behavioral change to the expression language which may affect some applications. The boolean value of Python boolean expressions, i.e. ``==``, ``!=``, and similar, now evaluates accurately with regards to the two clause objects being compared. As we know, comparing a ``ClauseElement`` to any other object returns another ``ClauseElement``: :: >>> from sqlalchemy.sql import column >>> column('foo') == 5 This so that Python expressions produce SQL expressions when converted to strings: :: >>> str(column('foo') == 5) 'foo = :foo_1' But what happens if we say this? :: >>> if column('foo') == 5: ... print "yes" ... In previous versions of SQLAlchemy, the returned ``_BinaryExpression`` was a plain Python object which evaluated to ``True``. Now it evaluates to whether or not the actual ``ClauseElement`` should have the same hash value as to that being compared. Meaning: :: >>> bool(column('foo') == 5) False >>> bool(column('foo') == column('foo')) False >>> c = column('foo') >>> bool(c == c) True >>> That means code such as the following: :: if expression: print "the expression is:", expression Would not evaluate if ``expression`` was a binary clause. Since the above pattern should never be used, the base ``ClauseElement`` now raises an exception if called in a boolean context: :: >>> bool(c) Traceback (most recent call last): File "", line 1, in ... raise TypeError("Boolean value of this clause is not defined") TypeError: Boolean value of this clause is not defined Code that wants to check for the presence of a ``ClauseElement`` expression should instead say: :: if expression is not None: print "the expression is:", expression Keep in mind, **this applies to Table and Column objects too**. The rationale for the change is twofold: * Comparisons of the form ``if c1 == c2: `` can actually be written now * Support for correct hashing of ``ClauseElement`` objects now works on alternate platforms, namely Jython. Up until this point SQLAlchemy relied heavily on the specific behavior of cPython in this regard (and still had occasional problems with it). Stricter "executemany" Behavior ------------------------------- An "executemany" in SQLAlchemy corresponds to a call to ``execute()``, passing along a collection of bind parameter sets: :: connection.execute(table.insert(), {'data':'row1'}, {'data':'row2'}, {'data':'row3'}) When the ``Connection`` object sends off the given ``insert()`` construct for compilation, it passes to the compiler the keynames present in the first set of binds passed along to determine the construction of the statement's VALUES clause. Users familiar with this construct will know that additional keys present in the remaining dictionaries don't have any impact. What's different now is that all subsequent dictionaries need to include at least *every* key that is present in the first dictionary. This means that a call like this no longer works: :: connection.execute(table.insert(), {'timestamp':today, 'data':'row1'}, {'timestamp':today, 'data':'row2'}, {'data':'row3'}) Because the third row does not specify the 'timestamp' column. Previous versions of SQLAlchemy would simply insert NULL for these missing columns. However, if the ``timestamp`` column in the above example contained a Python-side default value or function, it would *not* be used. This because the "executemany" operation is optimized for maximum performance across huge numbers of parameter sets, and does not attempt to evaluate Python-side defaults for those missing keys. Because defaults are often implemented either as SQL expressions which are embedded inline with the INSERT statement, or are server side expressions which again are triggered based on the structure of the INSERT string, which by definition cannot fire off conditionally based on each parameter set, it would be inconsistent for Python side defaults to behave differently vs. SQL/server side defaults. (SQL expression based defaults are embedded inline as of the 0.5 series, again to minimize the impact of huge numbers of parameter sets). SQLAlchemy 0.6 therefore establishes predictable consistency by forbidding any subsequent parameter sets from leaving any fields blank. That way, there's no more silent failure of Python side default values and functions, which additionally are allowed to remain consistent in their behavior versus SQL and server side defaults. UNION and other "compound" constructs parenthesize consistently --------------------------------------------------------------- A rule that was designed to help SQLite has been removed, that of the first compound element within another compound (such as, a ``union()`` inside of an ``except_()``) wouldn't be parenthesized. This is inconsistent and produces the wrong results on Postgresql, which has precedence rules regarding INTERSECTION, and its generally a surprise. When using complex composites with SQLite, you now need to turn the first element into a subquery (which is also compatible on PG). A new example is in the SQL expression tutorial at the end of [http://www.sqlalchemy.org/docs/06/sqlexpression.html #unions-and-other-set-operations]. See :ticket:`1665` and r6690 for more background. C Extensions for Result Fetching ================================ The ``ResultProxy`` and related elements, including most common "row processing" functions such as unicode conversion, numerical/boolean conversions and date parsing, have been re-implemented as optional C extensions for the purposes of performance. This represents the beginning of SQLAlchemy's path to the "dark side" where we hope to continue improving performance by reimplementing critical sections in C. The extensions can be built by specifying ``--with-cextensions``, i.e. ``python setup.py --with- cextensions install``. The extensions have the most dramatic impact on result fetching using direct ``ResultProxy`` access, i.e. that which is returned by ``engine.execute()``, ``connection.execute()``, or ``session.execute()``. Within results returned by an ORM ``Query`` object, result fetching is not as high a percentage of overhead, so ORM performance improves more modestly, and mostly in the realm of fetching large result sets. The performance improvements highly depend on the dbapi in use and on the syntax used to access the columns of each row (eg ``row['name']`` is much faster than ``row.name``). The current extensions have no impact on the speed of inserts/updates/deletes, nor do they improve the latency of SQL execution, that is, an application that spends most of its time executing many statements with very small result sets will not see much improvement. Performance has been improved in 0.6 versus 0.5 regardless of the extensions. A quick overview of what connecting and fetching 50,000 rows looks like with SQLite, using mostly direct SQLite access, a ``ResultProxy``, and a simple mapped ORM object: :: sqlite select/native: 0.260s 0.6 / C extension sqlalchemy.sql select: 0.360s sqlalchemy.orm fetch: 2.500s 0.6 / Pure Python sqlalchemy.sql select: 0.600s sqlalchemy.orm fetch: 3.000s 0.5 / Pure Python sqlalchemy.sql select: 0.790s sqlalchemy.orm fetch: 4.030s Above, the ORM fetches the rows 33% faster than 0.5 due to in-python performance enhancements. With the C extensions we get another 20%. However, ``ResultProxy`` fetches improve by 67% with the C extension versus not. Other tests report as much as a 200% speed improvement for some scenarios, such as those where lots of string conversions are occurring. New Schema Capabilities ======================= The ``sqlalchemy.schema`` package has received some long- needed attention. The most visible change is the newly expanded DDL system. In SQLAlchemy, it was possible since version 0.5 to create custom DDL strings and associate them with tables or metadata objects: :: from sqlalchemy.schema import DDL DDL('CREATE TRIGGER users_trigger ...').execute_at('after-create', metadata) Now the full suite of DDL constructs are available under the same system, including those for CREATE TABLE, ADD CONSTRAINT, etc.: :: from sqlalchemy.schema import Constraint, AddConstraint AddContraint(CheckConstraint("value > 5")).execute_at('after-create', mytable) Additionally, all the DDL objects are now regular ``ClauseElement`` objects just like any other SQLAlchemy expression object: :: from sqlalchemy.schema import CreateTable create = CreateTable(mytable) # dumps the CREATE TABLE as a string print create # executes the CREATE TABLE statement engine.execute(create) and using the ``sqlalchemy.ext.compiler`` extension you can make your own: :: from sqlalchemy.schema import DDLElement from sqlalchemy.ext.compiler import compiles class AlterColumn(DDLElement): def __init__(self, column, cmd): self.column = column self.cmd = cmd @compiles(AlterColumn) def visit_alter_column(element, compiler, **kw): return "ALTER TABLE %s ALTER COLUMN %s %s ..." % ( element.column.table.name, element.column.name, element.cmd ) engine.execute(AlterColumn(table.c.mycolumn, "SET DEFAULT 'test'")) Deprecated/Removed Schema Elements ---------------------------------- The schema package has also been greatly streamlined. Many options and methods which were deprecated throughout 0.5 have been removed. Other little known accessors and methods have also been removed. * the "owner" keyword argument is removed from ``Table``. Use "schema" to represent any namespaces to be prepended to the table name. * deprecated ``MetaData.connect()`` and ``ThreadLocalMetaData.connect()`` have been removed - send the "bind" attribute to bind a metadata. * deprecated metadata.table_iterator() method removed (use sorted_tables) * the "metadata" argument is removed from ``DefaultGenerator`` and subclasses, but remains locally present on ``Sequence``, which is a standalone construct in DDL. * deprecated ``PassiveDefault`` - use ``DefaultClause``. * Removed public mutability from ``Index`` and ``Constraint`` objects: * ``ForeignKeyConstraint.append_element()`` * ``Index.append_column()`` * ``UniqueConstraint.append_column()`` * ``PrimaryKeyConstraint.add()`` * ``PrimaryKeyConstraint.remove()`` These should be constructed declaratively (i.e. in one construction). * Other removed things: * ``Table.key`` (no idea what this was for) * ``Column.bind`` (get via column.table.bind) * ``Column.metadata`` (get via column.table.metadata) * ``Column.sequence`` (use column.default) Other Behavioral Changes ------------------------ * ``UniqueConstraint``, ``Index``, ``PrimaryKeyConstraint`` all accept lists of column names or column objects as arguments. * The ``use_alter`` flag on ``ForeignKey`` is now a shortcut option for operations that can be hand-constructed using the ``DDL()`` event system. A side effect of this refactor is that ``ForeignKeyConstraint`` objects with ``use_alter=True`` will *not* be emitted on SQLite, which does not support ALTER for foreign keys. This has no effect on SQLite's behavior since SQLite does not actually honor FOREIGN KEY constraints. * ``Table.primary_key`` is not assignable - use ``table.append_constraint(PrimaryKeyConstraint(...))`` * A ``Column`` definition with a ``ForeignKey`` and no type, e.g. ``Column(name, ForeignKey(sometable.c.somecol))`` used to get the type of the referenced column. Now support for that automatic type inference is partial and may not work in all cases. Logging opened up ================= At the expense of a few extra method calls here and there, you can set log levels for INFO and DEBUG after an engine, pool, or mapper has been created, and logging will commence. The ``isEnabledFor(INFO)`` method is now called per-``Connection`` and ``isEnabledFor(DEBUG)`` per-``ResultProxy`` if already enabled on the parent connection. Pool logging sends to ``log.info()`` and ``log.debug()`` with no check - note that pool checkout/checkin is typically once per transaction. Reflection/Inspector API ======================== The reflection system, which allows reflection of table columns via ``Table('sometable', metadata, autoload=True)`` has been opened up into its own fine-grained API, which allows direct inspection of database elements such as tables, columns, constraints, indexes, and more. This API expresses return values as simple lists of strings, dictionaries, and ``TypeEngine`` objects. The internals of ``autoload=True`` now build upon this system such that the translation of raw database information into ``sqlalchemy.schema`` constructs is centralized and the contract of individual dialects greatly simplified, vastly reducing bugs and inconsistencies across different backends. To use an inspector: :: from sqlalchemy.engine.reflection import Inspector insp = Inspector.from_engine(my_engine) print insp.get_schema_names() the ``from_engine()`` method will in some cases provide a backend-specific inspector with additional capabilities, such as that of Postgresql which provides a ``get_table_oid()`` method: :: my_engine = create_engine('postgresql://...') pg_insp = Inspector.from_engine(my_engine) print pg_insp.get_table_oid('my_table') RETURNING Support ================= The ``insert()``, ``update()`` and ``delete()`` constructs now support a ``returning()`` method, which corresponds to the SQL RETURNING clause as supported by Postgresql, Oracle, MS-SQL, and Firebird. It is not supported for any other backend at this time. Given a list of column expressions in the same manner as that of a ``select()`` construct, the values of these columns will be returned as a regular result set: :: result = connection.execute( table.insert().values(data='some data').returning(table.c.id, table.c.timestamp) ) row = result.first() print "ID:", row['id'], "Timestamp:", row['timestamp'] The implementation of RETURNING across the four supported backends varies wildly, in the case of Oracle requiring an intricate usage of OUT parameters which are re-routed into a "mock" result set, and in the case of MS-SQL using an awkward SQL syntax. The usage of RETURNING is subject to limitations: * it does not work for any "executemany()" style of execution. This is a limitation of all supported DBAPIs. * Some backends, such as Oracle, only support RETURNING that returns a single row - this includes UPDATE and DELETE statements, meaning the update() or delete() construct must match only a single row, or an error is raised (by Oracle, not SQLAlchemy). RETURNING is also used automatically by SQLAlchemy, when available and when not otherwise specified by an explicit ``returning()`` call, to fetch the value of newly generated primary key values for single-row INSERT statements. This means there's no more "SELECT nextval(sequence)" pre- execution for insert statements where the primary key value is required. Truth be told, implicit RETURNING feature does incur more method overhead than the old "select nextval()" system, which used a quick and dirty cursor.execute() to get at the sequence value, and in the case of Oracle requires additional binding of out parameters. So if method/protocol overhead is proving to be more expensive than additional database round trips, the feature can be disabled by specifying ``implicit_returning=False`` to ``create_engine()``. Type System Changes =================== New Archicture -------------- The type system has been completely reworked behind the scenes to provide two goals: * Separate the handling of bind parameters and result row values, typically a DBAPI requirement, from the SQL specification of the type itself, which is a database requirement. This is consistent with the overall dialect refactor that separates database SQL behavior from DBAPI. * Establish a clear and consistent contract for generating DDL from a ``TypeEngine`` object and for constructing ``TypeEngine`` objects based on column reflection. Highlights of these changes include: * The construction of types within dialects has been totally overhauled. Dialects now define publically available types as UPPERCASE names exclusively, and internal implementation types using underscore identifiers (i.e. are private). The system by which types are expressed in SQL and DDL has been moved to the compiler system. This has the effect that there are much fewer type objects within most dialects. A detailed document on this architecture for dialect authors is in [source:/lib/sqlalc hemy/dialects/type_migration_guidelines.txt]. * Reflection of types now returns the exact UPPERCASE type within types.py, or the UPPERCASE type within the dialect itself if the type is not a standard SQL type. This means reflection now returns more accurate information about reflected types. * User defined types that subclass ``TypeEngine`` and wish to provide ``get_col_spec()`` should now subclass ``UserDefinedType``. * The ``result_processor()`` method on all type classes now accepts an additional argument ``coltype``. This is the DBAPI type object attached to cursor.description, and should be used when applicable to make better decisions on what kind of result-processing callable should be returned. Ideally result processor functions would never need to use ``isinstance()``, which is an expensive call at this level. Native Unicode Mode ------------------- As more DBAPIs support returning Python unicode objects directly, the base dialect now performs a check upon the first connection which establishes whether or not the DBAPI returns a Python unicode object for a basic select of a VARCHAR value. If so, the ``String`` type and all subclasses (i.e. ``Text``, ``Unicode``, etc.) will skip the "unicode" check/conversion step when result rows are received. This offers a dramatic performance increase for large result sets. The "unicode mode" currently is known to work with: * sqlite3 / pysqlite * psycopg2 - SQLA 0.6 now uses the "UNICODE" type extension by default on each psycopg2 connection object * pg8000 * cx_oracle (we use an output processor - nice feature !) Other types may choose to disable unicode processing as needed, such as the ``NVARCHAR`` type when used with MS-SQL. In particular, if porting an application based on a DBAPI that formerly returned non-unicode strings, the "native unicode" mode has a plainly different default behavior - columns that are declared as ``String`` or ``VARCHAR`` now return unicode by default whereas they would return strings before. This can break code which expects non-unicode strings. The psycopg2 "native unicode" mode can be disabled by passing ``use_native_unicode=False`` to ``create_engine()``. A more general solution for string columns that explicitly do not want a unicode object is to use a ``TypeDecorator`` that converts unicode back to utf-8, or whatever is desired: :: class UTF8Encoded(TypeDecorator): """Unicode type which coerces to utf-8.""" impl = sa.VARCHAR def process_result_value(self, value, dialect): if isinstance(value, unicode): value = value.encode('utf-8') return value Note that the ``assert_unicode`` flag is now deprecated. SQLAlchemy allows the DBAPI and backend database in use to handle Unicode parameters when available, and does not add operational overhead by checking the incoming type; modern systems like sqlite and Postgresql will raise an encoding error on their end if invalid data is passed. In those cases where SQLAlchemy does need to coerce a bind parameter from Python Unicode to an encoded string, or when the Unicode type is used explicitly, a warning is raised if the object is a bytestring. This warning can be suppressed or converted to an exception using the Python warnings filter documented at: http://docs.python.org/library/warnings.html Generic Enum Type ----------------- We now have an ``Enum`` in the ``types`` module. This is a string type that is given a collection of "labels" which constrain the possible values given to those labels. By default, this type generates a ``VARCHAR`` using the size of the largest label, and applies a CHECK constraint to the table within the CREATE TABLE statement. When using MySQL, the type by default uses MySQL's ENUM type, and when using Postgresql the type will generate a user defined type using ``CREATE TYPE AS ENUM``. In order to create the type using Postgresql, the ``name`` parameter must be specified to the constructor. The type also accepts a ``native_enum=False`` option which will issue the VARCHAR/CHECK strategy for all databases. Note that Postgresql ENUM types currently don't work with pg8000 or zxjdbc. Reflection Returns Dialect-Specific Types ----------------------------------------- Reflection now returns the most specific type possible from the database. That is, if you create a table using ``String``, then reflect it back, the reflected column will likely be ``VARCHAR``. For dialects that support a more specific form of the type, that's what you'll get. So a ``Text`` type would come back as ``oracle.CLOB`` on Oracle, a ``LargeBinary`` might be an ``mysql.MEDIUMBLOB`` etc. The obvious advantage here is that reflection preserves as much information possible from what the database had to say. Some applications that deal heavily in table metadata may wish to compare types across reflected tables and/or non- reflected tables. There's a semi-private accessor available on ``TypeEngine`` called ``_type_affinity`` and an associated comparison helper ``_compare_type_affinity``. This accessor returns the "generic" ``types`` class which the type corresponds to: :: >>> String(50)._compare_type_affinity(postgresql.VARCHAR(50)) True >>> Integer()._compare_type_affinity(mysql.REAL) False Miscellaneous API Changes ------------------------- The usual "generic" types are still the general system in use, i.e. ``String``, ``Float``, ``DateTime``. There's a few changes there: * Types no longer make any guesses as to default parameters. In particular, ``Numeric``, ``Float``, as well as subclasses NUMERIC, FLOAT, DECIMAL don't generate any length or scale unless specified. This also continues to include the controversial ``String`` and ``VARCHAR`` types (although MySQL dialect will pre-emptively raise when asked to render VARCHAR with no length). No defaults are assumed, and if they are used in a CREATE TABLE statement, an error will be raised if the underlying database does not allow non-lengthed versions of these types. * the ``Binary`` type has been renamed to ``LargeBinary``, for BLOB/BYTEA/similar types. For ``BINARY`` and ``VARBINARY``, those are present directly as ``types.BINARY``, ``types.VARBINARY``, as well as in the MySQL and MS-SQL dialects. * ``PickleType`` now uses == for comparison of values when mutable=True, unless the "comparator" argument with a comparison function is specified to the type. If you are pickling a custom object you should implement an ``__eq__()`` method so that value-based comparisons are accurate. * The default "precision" and "scale" arguments of Numeric and Float have been removed and now default to None. NUMERIC and FLOAT will be rendered with no numeric arguments by default unless these values are provided. * DATE, TIME and DATETIME types on SQLite can now take optional "storage_format" and "regexp" argument. "storage_format" can be used to store those types using a custom string format. "regexp" allows to use a custom regular expression to match string values from the database. * ``__legacy_microseconds__`` on SQLite ``Time`` and ``DateTime`` types is not supported anymore. You should use the new "storage_format" argument instead. * ``DateTime`` types on SQLite now use by a default a stricter regular expression to match strings from the database. Use the new "regexp" argument if you are using data stored in a legacy format. ORM Changes =========== Upgrading an ORM application from 0.5 to 0.6 should require little to no changes, as the ORM's behavior remains almost identical. There are some default argument and name changes, and some loading behaviors have been improved. New Unit of Work ---------------- The internals for the unit of work, primarily ``topological.py`` and ``unitofwork.py``, have been completely rewritten and are vastly simplified. This should have no impact on usage, as all existing behavior during flush has been maintained exactly (or at least, as far as it is exercised by our testsuite and the handful of production environments which have tested it heavily). The performance of flush() now uses 20-30% fewer method calls and should also use less memory. The intent and flow of the source code should now be reasonably easy to follow, and the architecture of the flush is fairly open-ended at this point, creating room for potential new areas of sophistication. The flush process no longer has any reliance on recursion so flush plans of arbitrary size and complexity can be flushed. Additionally, the mapper's "save" process, which issues INSERT and UPDATE statements, now caches the "compiled" form of the two statements so that callcounts are further dramatically reduced with very large flushes. Any changes in behavior observed with flush versus earlier versions of 0.6 or 0.5 should be reported to us ASAP - we'll make sure no functionality is lost. Changes to ``query.update()`` and ``query.delete()`` ---------------------------------------------------- * the 'expire' option on query.update() has been renamed to 'fetch', thus matching that of query.delete() * ``query.update()`` and ``query.delete()`` both default to 'evaluate' for the synchronize strategy. * the 'synchronize' strategy for update() and delete() raises an error on failure. There is no implicit fallback onto "fetch". Failure of evaluation is based on the structure of criteria, so success/failure is deterministic based on code structure. ``relation()`` is officially named ``relationship()`` ----------------------------------------------------- This to solve the long running issue that "relation" means a "table or derived table" in relational algebra terms. The ``relation()`` name, which is less typing, will hang around for the foreseeable future so this change should be entirely painless. Subquery eager loading ---------------------- A new kind of eager loading is added called "subquery" loading. This is a load that emits a second SQL query immediately after the first which loads full collections for all the parents in the first query, joining upwards to the parent using INNER JOIN. Subquery loading is used simlarly to the current joined-eager loading, using the ```subqueryload()```` and ````subqueryload_all()```` options as well as the ````lazy='subquery'```` setting on ````relationship()```. The subquery load is usually much more efficient for loading many larger collections as it uses INNER JOIN unconditionally and also doesn't re-load parent rows. ```eagerload()````, ````eagerload_all()```` is now ````joinedload()````, ````joinedload_all()``` ------------------------------------------------------------------------------------------------ To make room for the new subquery load feature, the existing ```eagerload()````/````eagerload_all()```` options are now superceded by ````joinedload()```` and ````joinedload_all()````. The old names will hang around for the foreseeable future just like ````relation()```. ```lazy=False|None|True|'dynamic'```` now accepts ````lazy='noload'|'joined'|'subquery'|'select'|'dynamic'``` ------------------------------------------------------------------------------------------------------------- Continuing on the theme of loader strategies opened up, the standard keywords for the ```lazy```` option on ````relationship()```` are now ````select```` for lazy loading (via a SELECT issued on attribute access), ````joined```` for joined-eager loading, ````subquery```` for subquery-eager loading, ````noload```` for no loading should occur, and ````dynamic```` for a "dynamic" relationship. The old ````True````, ````False````, ````None``` arguments are still accepted with the identical behavior as before. innerjoin=True on relation, joinedload -------------------------------------- Joined-eagerly loaded scalars and collections can now be instructed to use INNER JOIN instead of OUTER JOIN. On Postgresql this is observed to provide a 300-600% speedup on some queries. Set this flag for any many-to-one which is on a NOT NULLable foreign key, and similarly for any collection where related items are guaranteed to exist. At mapper level: :: mapper(Child, child) mapper(Parent, parent, properties={ 'child':relationship(Child, lazy='joined', innerjoin=True) }) At query time level: :: session.query(Parent).options(joinedload(Parent.child, innerjoin=True)).all() The ``innerjoin=True`` flag at the ``relationship()`` level will also take effect for any ``joinedload()`` option which does not override the value. Many-to-one Enhancements ------------------------ * many-to-one relations now fire off a lazyload in fewer cases, including in most cases will not fetch the "old" value when a new one is replaced. * many-to-one relation to a joined-table subclass now uses get() for a simple load (known as the "use_get" condition), i.e. ``Related``->``Sub(Base)``, without the need to redefine the primaryjoin condition in terms of the base table. [ticket:1186] * specifying a foreign key with a declarative column, i.e. ``ForeignKey(MyRelatedClass.id)`` doesn't break the "use_get" condition from taking place [ticket:1492] * relationship(), joinedload(), and joinedload_all() now feature an option called "innerjoin". Specify ``True`` or ``False`` to control whether an eager join is constructed as an INNER or OUTER join. Default is ``False`` as always. The mapper options will override whichever setting is specified on relationship(). Should generally be set for many-to-one, not nullable foreign key relations to allow improved join performance. [ticket:1544] * the behavior of joined eager loading such that the main query is wrapped in a subquery when LIMIT/OFFSET are present now makes an exception for the case when all eager loads are many-to-one joins. In those cases, the eager joins are against the parent table directly along with the limit/offset without the extra overhead of a subquery, since a many-to-one join does not add rows to the result. For example, in 0.5 this query: :: session.query(Address).options(eagerload(Address.user)).limit(10) would produce SQL like: :: SELECT * FROM (SELECT * FROM addresses LIMIT 10) AS anon_1 LEFT OUTER JOIN users AS users_1 ON users_1.id = anon_1.addresses_user_id This because the presence of any eager loaders suggests that some or all of them may relate to multi-row collections, which would necessitate wrapping any kind of rowcount-sensitive modifiers like LIMIT inside of a subquery. In 0.6, that logic is more sensitive and can detect if all eager loaders represent many-to-ones, in which case the eager joins don't affect the rowcount: :: SELECT * FROM addresses LEFT OUTER JOIN users AS users_1 ON users_1.id = addresses.user_id LIMIT 10 Mutable Primary Keys with Joined Table Inheritance -------------------------------------------------- A joined table inheritance config where the child table has a PK that foreign keys to the parent PK can now be updated on a CASCADE-capable database like Postgresql. ``mapper()`` now has an option ``passive_updates=True`` which indicates this foreign key is updated automatically. If on a non-cascading database like SQLite or MySQL/MyISAM, set this flag to ``False``. A future feature enhancement will try to get this flag to be auto-configuring based on dialect/table style in use. Beaker Caching -------------- A promising new example of Beaker integration is in ``examples/beaker_caching``. This is a straightforward recipe which applies a Beaker cache within the result- generation engine of ``Query``. Cache parameters are provided via ``query.options()``, and allows full control over the contents of the cache. SQLAlchemy 0.6 includes improvements to the ``Session.merge()`` method to support this and similar recipes, as well as to provide significantly improved performance in most scenarios. Other Changes ------------- * the "row tuple" object returned by ``Query`` when multiple column/entities are selected is now picklable as well as higher performing. * ``query.join()`` has been reworked to provide more consistent behavior and more flexibility (includes [ticket:1537]) * ``query.select_from()`` accepts multiple clauses to produce multiple comma separated entries within the FROM clause. Useful when selecting from multiple-homed join() clauses. * the "dont_load=True" flag on ``Session.merge()`` is deprecated and is now "load=False". * added "make_transient()" helper function which transforms a persistent/ detached instance into a transient one (i.e. deletes the instance_key and removes from any session.) [ticket:1052] * the allow_null_pks flag on mapper() is deprecated and has been renamed to allow_partial_pks. It is turned "on" by default. This means that a row which has a non-null value for any of its primary key columns will be considered an identity. The need for this scenario typically only occurs when mapping to an outer join. When set to False, a PK that has NULLs in it will not be considered a primary key - in particular this means a result row will come back as None (or not be filled into a collection), and new in 0.6 also indicates that session.merge() won't issue a round trip to the database for such a PK value. [ticket:1680] * the mechanics of "backref" have been fully merged into the finer grained "back_populates" system, and take place entirely within the ``_generate_backref()`` method of ``RelationProperty``. This makes the initialization procedure of ``RelationProperty`` simpler and allows easier propagation of settings (such as from subclasses of ``RelationProperty``) into the reverse reference. The internal ``BackRef()`` is gone and ``backref()`` returns a plain tuple that is understood by ``RelationProperty``. * the keys attribute of ``ResultProxy`` is now a method, so references to it (``result.keys``) must be changed to method invocations (``result.keys()``) * ``ResultProxy.last_inserted_ids`` is now deprecated, use ``ResultProxy.inserted_primary_key`` instead. Deprecated/Removed ORM Elements ------------------------------- Most elements that were deprecated throughout 0.5 and raised deprecation warnings have been removed (with a few exceptions). All elements that were marked "pending deprecation" are now deprecated and will raise a warning upon use. * 'transactional' flag on sessionmaker() and others is removed. Use 'autocommit=True' to indicate 'transactional=False'. * 'polymorphic_fetch' argument on mapper() is removed. Loading can be controlled using the 'with_polymorphic' option. * 'select_table' argument on mapper() is removed. Use 'with_polymorphic=("*", )' for this functionality. * 'proxy' argument on synonym() is removed. This flag did nothing throughout 0.5, as the "proxy generation" behavior is now automatic. * Passing a single list of elements to joinedload(), joinedload_all(), contains_eager(), lazyload(), defer(), and undefer() instead of multiple positional \*args is deprecated. * Passing a single list of elements to query.order_by(), query.group_by(), query.join(), or query.outerjoin() instead of multiple positional \*args is deprecated. * ``query.iterate_instances()`` is removed. Use ``query.instances()``. * ``Query.query_from_parent()`` is removed. Use the sqlalchemy.orm.with_parent() function to produce a "parent" clause, or alternatively ``query.with_parent()``. * ``query._from_self()`` is removed, use ``query.from_self()`` instead. * the "comparator" argument to composite() is removed. Use "comparator_factory". * ``RelationProperty._get_join()`` is removed. * the 'echo_uow' flag on Session is removed. Use logging on the "sqlalchemy.orm.unitofwork" name. * ``session.clear()`` is removed. use ``session.expunge_all()``. * ``session.save()``, ``session.update()``, ``session.save_or_update()`` are removed. Use ``session.add()`` and ``session.add_all()``. * the "objects" flag on session.flush() remains deprecated. * the "dont_load=True" flag on session.merge() is deprecated in favor of "load=False". * ``ScopedSession.mapper`` remains deprecated. See the usage recipe at http://www.sqlalchemy.org/trac/wiki/Usag eRecipes/SessionAwareMapper * passing an ``InstanceState`` (internal SQLAlchemy state object) to ``attributes.init_collection()`` or ``attributes.get_history()`` is deprecated. These functions are public API and normally expect a regular mapped object instance. * the 'engine' parameter to ``declarative_base()`` is removed. Use the 'bind' keyword argument. Extensions ========== SQLSoup ------- SQLSoup has been modernized and updated to reflect common 0.5/0.6 capabilities, including well defined session integration. Please read the new docs at [http://www.sqlalc hemy.org/docs/06/reference/ext/sqlsoup.html]. Declarative ----------- The ``DeclarativeMeta`` (default metaclass for ``declarative_base``) previously allowed subclasses to modify ``dict_`` to add class attributes (e.g. columns). This no longer works, the ``DeclarativeMeta`` constructor now ignores ``dict_``. Instead, the class attributes should be assigned directly, e.g. ``cls.id=Column(...)``, or the `MixIn class `_ approach should be used instead of the metaclass approach. SQLAlchemy-0.8.4/doc/build/changelog/migration_07.rst0000644000076500000240000014100112251147171023053 0ustar classicstaff00000000000000============================== What's New in SQLAlchemy 0.7? ============================== .. admonition:: About this Document This document describes changes between SQLAlchemy version 0.6, last released May 5, 2012, and SQLAlchemy version 0.7, undergoing maintenance releases as of October, 2012. Document date: July 27, 2011 Introduction ============ This guide introduces what's new in SQLAlchemy version 0.7, and also documents changes which affect users migrating their applications from the 0.6 series of SQLAlchemy to 0.7. To as great a degree as possible, changes are made in such a way as to not break compatibility with applications built for 0.6. The changes that are necessarily not backwards compatible are very few, and all but one, the change to mutable attribute defaults, should affect an exceedingly small portion of applications - many of the changes regard non-public APIs and undocumented hacks some users may have been attempting to use. A second, even smaller class of non-backwards-compatible changes is also documented. This class of change regards those features and behaviors that have been deprecated at least since version 0.5 and have been raising warnings since their deprecation. These changes would only affect applications that are still using 0.4- or early 0.5-style APIs. As the project matures, we have fewer and fewer of these kinds of changes with 0.x level releases, which is a product of our API having ever fewer features that are less than ideal for the use cases they were meant to solve. An array of existing functionalities have been superseded in SQLAlchemy 0.7. There's not much difference between the terms "superseded" and "deprecated", except that the former has a much weaker suggestion of the old feature would ever be removed. In 0.7, features like ``synonym`` and ``comparable_property``, as well as all the ``Extension`` and other event classes, have been superseded. But these "superseded" features have been re-implemented such that their implementations live mostly outside of core ORM code, so their continued "hanging around" doesn't impact SQLAlchemy's ability to further streamline and refine its internals, and we expect them to remain within the API for the foreseeable future. New Features ============ New Event System ---------------- SQLAlchemy started early with the ``MapperExtension`` class, which provided hooks into the persistence cycle of mappers. As SQLAlchemy quickly became more componentized, pushing mappers into a more focused configurational role, many more "extension", "listener", and "proxy" classes popped up to solve various activity-interception use cases in an ad-hoc fashion. Part of this was driven by the divergence of activities; ``ConnectionProxy`` objects wanted to provide a system of rewriting statements and parameters; ``AttributeExtension`` provided a system of replacing incoming values, and ``DDL`` objects had events that could be switched off of dialect-sensitive callables. 0.7 re-implements virtually all of these plugin points with a new, unified approach, which retains all the functionalities of the different systems, provides more flexibility and less boilerplate, performs better, and eliminates the need to learn radically different APIs for each event subsystem. The pre-existing classes ``MapperExtension``, ``SessionExtension``, ``AttributeExtension``, ``ConnectionProxy``, ``PoolListener`` as well as the ``DDLElement.execute_at`` method are deprecated and now implemented in terms of the new system - these APIs remain fully functional and are expected to remain in place for the foreseeable future. The new approach uses named events and user-defined callables to associate activities with events. The API's look and feel was driven by such diverse sources as JQuery, Blinker, and Hibernate, and was also modified further on several occasions during conferences with dozens of users on Twitter, which appears to have a much higher response rate than the mailing list for such questions. It also features an open-ended system of target specification that allows events to be associated with API classes, such as for all ``Session`` or ``Engine`` objects, with specific instances of API classes, such as for a specific ``Pool`` or ``Mapper``, as well as for related objects like a user- defined class that's mapped, or something as specific as a certain attribute on instances of a particular subclass of a mapped parent class. Individual listener subsystems can apply wrappers to incoming user- defined listener functions which modify how they are called - an mapper event can receive either the instance of the object being operated upon, or its underlying ``InstanceState`` object. An attribute event can opt whether or not to have the responsibility of returning a new value. Several systems now build upon the new event API, including the new "mutable attributes" API as well as composite attributes. The greater emphasis on events has also led to the introduction of a handful of new events, including attribute expiration and refresh operations, pickle loads/dumps operations, completed mapper construction operations. .. seealso:: :ref:`event_toplevel` :ticket:`1902` Hybrid Attributes, implements/supersedes synonym(), comparable_property() ------------------------------------------------------------------------- The "derived attributes" example has now been turned into an official extension. The typical use case for ``synonym()`` is to provide descriptor access to a mapped column; the use case for ``comparable_property()`` is to be able to return a ``PropComparator`` from any descriptor. In practice, the approach of "derived" is easier to use, more extensible, is implemented in a few dozen lines of pure Python with almost no imports, and doesn't require the ORM core to even be aware of it. The feature is now known as the "Hybrid Attributes" extension. ``synonym()`` and ``comparable_property()`` are still part of the ORM, though their implementations have been moved outwards, building on an approach that is similar to that of the hybrid extension, so that the core ORM mapper/query/property modules aren't really aware of them otherwise. .. seealso:: :ref:`hybrids_toplevel` :ticket:`1903` Speed Enhancements ------------------ As is customary with all major SQLA releases, a wide pass through the internals to reduce overhead and callcounts has been made which further reduces the work needed in common scenarios. Highlights of this release include: * The flush process will now bundle INSERT statements into batches fed to ``cursor.executemany()``, for rows where the primary key is already present. In particular this usually applies to the "child" table on a joined table inheritance configuration, meaning the number of calls to ``cursor.execute`` for a large bulk insert of joined- table objects can be cut in half, allowing native DBAPI optimizations to take place for those statements passed to ``cursor.executemany()`` (such as re-using a prepared statement). * The codepath invoked when accessing a many-to-one reference to a related object that's already loaded has been greatly simplified. The identity map is checked directly without the need to generate a new ``Query`` object first, which is expensive in the context of thousands of in-memory many-to-ones being accessed. The usage of constructed-per-call "loader" objects is also no longer used for the majority of lazy attribute loads. * The rewrite of composites allows a shorter codepath when mapper internals access mapped attributes within a flush. * New inlined attribute access functions replace the previous usage of "history" when the "save-update" and other cascade operations need to cascade among the full scope of datamembers associated with an attribute. This reduces the overhead of generating a new ``History`` object for this speed-critical operation. * The internals of the ``ExecutionContext``, the object corresponding to a statement execution, have been inlined and simplified. * The ``bind_processor()`` and ``result_processor()`` callables generated by types for each statement execution are now cached (carefully, so as to avoid memory leaks for ad-hoc types and dialects) for the lifespan of that type, further reducing per-statement call overhead. * The collection of "bind processors" for a particular ``Compiled`` instance of a statement is also cached on the ``Compiled`` object, taking further advantage of the "compiled cache" used by the flush process to re-use the same compiled form of INSERT, UPDATE, DELETE statements. A demonstration of callcount reduction including a sample benchmark script is at http://techspot.zzzeek.org/2010/12/12/a-tale-of-three- profiles/ Composites Rewritten -------------------- The "composite" feature has been rewritten, like ``synonym()`` and ``comparable_property()``, to use a lighter weight implementation based on descriptors and events, rather than building into the ORM internals. This allowed the removal of some latency from the mapper/unit of work internals, and simplifies the workings of composite. The composite attribute now no longer conceals the underlying columns it builds upon, which now remain as regular attributes. Composites can also act as a proxy for ``relationship()`` as well as ``Column()`` attributes. The major backwards-incompatible change of composites is that they no longer use the ``mutable=True`` system to detect in-place mutations. Please use the `Mutation Tracking `_ extension to establish in-place change events to existing composite usage. .. seealso:: :ref:`mapper_composite` :ref:`mutable_toplevel` :ticket:`2008` :ticket:`2024` More succinct form of query.join(target, onclause) -------------------------------------------------- The default method of issuing ``query.join()`` to a target with an explicit onclause is now: :: query.join(SomeClass, SomeClass.id==ParentClass.some_id) In 0.6, this usage was considered to be an error, because ``join()`` accepts multiple arguments corresponding to multiple JOIN clauses - the two-argument form needed to be in a tuple to disambiguate between single-argument and two- argument join targets. In the middle of 0.6 we added detection and an error message for this specific calling style, since it was so common. In 0.7, since we are detecting the exact pattern anyway, and since having to type out a tuple for no reason is extremely annoying, the non- tuple method now becomes the "normal" way to do it. The "multiple JOIN" use case is exceedingly rare compared to the single join case, and multiple joins these days are more clearly represented by multiple calls to ``join()``. The tuple form will remain for backwards compatibility. Note that all the other forms of ``query.join()`` remain unchanged: :: query.join(MyClass.somerelation) query.join("somerelation") query.join(MyTarget) # ... etc `Querying with Joins `_ :ticket:`1923` .. _07_migration_mutation_extension: Mutation event extension, supersedes "mutable=True" --------------------------------------------------- A new extension, :ref:`mutable_toplevel`, provides a mechanism by which user-defined datatypes can provide change events back to the owning parent or parents. The extension includes an approach for scalar database values, such as those managed by :class:`.PickleType`, ``postgresql.ARRAY``, or other custom ``MutableType`` classes, as well as an approach for ORM "composites", those configured using :func:`~.sqlalchemy.orm.composite`. .. seealso:: :ref:`mutable_toplevel` NULLS FIRST / NULLS LAST operators ---------------------------------- These are implemented as an extension to the ``asc()`` and ``desc()`` operators, called ``nullsfirst()`` and ``nullslast()``. .. seealso:: :func:`.nullsfirst` :func:`.nullslast` :ticket:`723` select.distinct(), query.distinct() accepts \*args for Postgresql DISTINCT ON ----------------------------------------------------------------------------- This was already available by passing a list of expressions to the ``distinct`` keyword argument of ``select()``, the ``distinct()`` method of ``select()`` and ``Query`` now accept positional arguments which are rendered as DISTINCT ON when a Postgresql backend is used. `distinct() `_ `Query.distinct() `_ :ticket:`1069` ``Index()`` can be placed inline inside of ``Table``, ``__table_args__`` ------------------------------------------------------------------------ The Index() construct can be created inline with a Table definition, using strings as column names, as an alternative to the creation of the index outside of the Table. That is: :: Table('mytable', metadata, Column('id',Integer, primary_key=True), Column('name', String(50), nullable=False), Index('idx_name', 'name') ) The primary rationale here is for the benefit of declarative ``__table_args__``, particularly when used with mixins: :: class HasNameMixin(object): name = Column('name', String(50), nullable=False) @declared_attr def __table_args__(cls): return (Index('name'), {}) class User(HasNameMixin, Base): __tablename__ = 'user' id = Column('id', Integer, primary_key=True) `Indexes `_ Window Function SQL Construct ----------------------------- A "window function" provides to a statement information about the result set as it's produced. This allows criteria against various things like "row number", "rank" and so forth. They are known to be supported at least by Postgresql, SQL Server and Oracle, possibly others. The best introduction to window functions is on Postgresql's site, where window functions have been supported since version 8.4: http://www.postgresql.org/docs/9.0/static/tutorial- window.html SQLAlchemy provides a simple construct typically invoked via an existing function clause, using the ``over()`` method, which accepts ``order_by`` and ``partition_by`` keyword arguments. Below we replicate the first example in PG's tutorial: :: from sqlalchemy.sql import table, column, select, func empsalary = table('empsalary', column('depname'), column('empno'), column('salary')) s = select([ empsalary, func.avg(empsalary.c.salary). over(partition_by=empsalary.c.depname). label('avg') ]) print s SQL: :: SELECT empsalary.depname, empsalary.empno, empsalary.salary, avg(empsalary.salary) OVER (PARTITION BY empsalary.depname) AS avg FROM empsalary `sqlalchemy.sql.expression.over `_ :ticket:`1844` execution_options() on Connection accepts "isolation_level" argument -------------------------------------------------------------------- This sets the transaction isolation level for a single ``Connection``, until that ``Connection`` is closed and its underlying DBAPI resource returned to the connection pool, upon which the isolation level is reset back to the default. The default isolation level is set using the ``isolation_level`` argument to ``create_engine()``. Transaction isolation support is currently only supported by the Postgresql and SQLite backends. `execution_options() `_ :ticket:`2001` ``TypeDecorator`` works with integer primary key columns -------------------------------------------------------- A ``TypeDecorator`` which extends the behavior of ``Integer`` can be used with a primary key column. The "autoincrement" feature of ``Column`` will now recognize that the underlying database column is still an integer so that lastrowid mechanisms continue to function. The ``TypeDecorator`` itself will have its result value processor applied to newly generated primary keys, including those received by the DBAPI ``cursor.lastrowid`` accessor. :ticket:`2005` :ticket:`2006` ``TypeDecorator`` is present in the "sqlalchemy" import space ------------------------------------------------------------- No longer need to import this from ``sqlalchemy.types``, it's now mirrored in ``sqlalchemy``. New Dialects ------------ Dialects have been added: * a MySQLdb driver for the Drizzle database: `Drizzle `_ * support for the pymysql DBAPI: `pymsql Notes `_ * psycopg2 now works with Python 3 Behavioral Changes (Backwards Compatible) ========================================= C Extensions Build by Default ----------------------------- This is as of 0.7b4. The exts will build if cPython 2.xx is detected. If the build fails, such as on a windows install, that condition is caught and the non-C install proceeds. The C exts won't build if Python 3 or Pypy is used. Query.count() simplified, should work virtually always ------------------------------------------------------ The very old guesswork which occurred within ``Query.count()`` has been modernized to use ``.from_self()``. That is, ``query.count()`` is now equivalent to: :: query.from_self(func.count(literal_column('1'))).scalar() Previously, internal logic attempted to rewrite the columns clause of the query itself, and upon detection of a "subquery" condition, such as a column-based query that might have aggregates in it, or a query with DISTINCT, would go through a convoluted process of rewriting the columns clause. This logic failed in complex conditions, particularly those involving joined table inheritance, and was long obsolete by the more comprehensive ``.from_self()`` call. The SQL emitted by ``query.count()`` is now always of the form: :: SELECT count(1) AS count_1 FROM ( SELECT user.id AS user_id, user.name AS user_name from user ) AS anon_1 that is, the original query is preserved entirely inside of a subquery, with no more guessing as to how count should be applied. :ticket:`2093` To emit a non-subquery form of count() ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ MySQL users have already reported that the MyISAM engine not surprisingly falls over completely with this simple change. Note that for a simple ``count()`` that optimizes for DBs that can't handle simple subqueries, ``func.count()`` should be used: :: from sqlalchemy import func session.query(func.count(MyClass.id)).scalar() or for ``count(*)``: :: from sqlalchemy import func, literal_column session.query(func.count(literal_column('*'))).select_from(MyClass).scalar() LIMIT/OFFSET clauses now use bind parameters -------------------------------------------- The LIMIT and OFFSET clauses, or their backend equivalents (i.e. TOP, ROW NUMBER OVER, etc.), use bind parameters for the actual values, for all backends which support it (most except for Sybase). This allows better query optimizer performance as the textual string for multiple statements with differing LIMIT/OFFSET are now identical. :ticket:`805` Logging enhancements -------------------- Vinay Sajip has provided a patch to our logging system such that the "hex string" embedded in logging statements for engines and pools is no longer needed to allow the ``echo`` flag to work correctly. A new system that uses filtered logging objects allows us to maintain our current behavior of ``echo`` being local to individual engines without the need for additional identifying strings local to those engines. :ticket:`1926` Simplified polymorphic_on assignment ------------------------------------ The population of the ``polymorphic_on`` column-mapped attribute, when used in an inheritance scenario, now occurs when the object is constructed, i.e. its ``__init__`` method is called, using the init event. The attribute then behaves the same as any other column-mapped attribute. Previously, special logic would fire off during flush to populate this column, which prevented any user code from modifying its behavior. The new approach improves upon this in three ways: 1. the polymorphic identity is now present on the object as soon as its constructed; 2. the polymorphic identity can be changed by user code without any difference in behavior from any other column-mapped attribute; 3. the internals of the mapper during flush are simplified and no longer need to make special checks for this column. :ticket:`1895` contains_eager() chains across multiple paths (i.e. "all()") ------------------------------------------------------------ The ```contains_eager()```` modifier now will chain itself for a longer path without the need to emit individual ````contains_eager()``` calls. Instead of: :: session.query(A).options(contains_eager(A.b), contains_eager(A.b, B.c)) you can say: :: session.query(A).options(contains_eager(A.b, B.c)) :ticket:`2032` Flushing of orphans that have no parent is allowed -------------------------------------------------- We've had a long standing behavior that checks for a so- called "orphan" during flush, that is, an object which is associated with a ``relationship()`` that specifies "delete- orphan" cascade, has been newly added to the session for an INSERT, and no parent relationship has been established. This check was added years ago to accommodate some test cases which tested the orphan behavior for consistency. In modern SQLA, this check is no longer needed on the Python side. The equivalent behavior of the "orphan check" is accomplished by making the foreign key reference to the object's parent row NOT NULL, where the database does its job of establishing data consistency in the same way SQLA allows most other operations to do. If the object's parent foreign key is nullable, then the row can be inserted. The "orphan" behavior runs when the object was persisted with a particular parent, and is then disassociated with that parent, leading to a DELETE statement emitted for it. :ticket:`1912` Warnings generated when collection members, scalar referents not part of the flush ---------------------------------------------------------------------------------- Warnings are now emitted when related objects referenced via a loaded ``relationship()`` on a parent object marked as "dirty" are not present in the current ``Session``. The ``save-update`` cascade takes effect when objects are added to the ``Session``, or when objects are first associated with a parent, so that an object and everything related to it are usually all present in the same ``Session``. However, if ``save-update`` cascade is disabled for a particular ``relationship()``, then this behavior does not occur, and the flush process does not try to correct for it, instead staying consistent to the configured cascade behavior. Previously, when such objects were detected during the flush, they were silently skipped. The new behavior is that a warning is emitted, for the purposes of alerting to a situation that more often than not is the source of unexpected behavior. :ticket:`1973` Setup no longer installs a Nose plugin -------------------------------------- Since we moved to nose we've used a plugin that installs via setuptools, so that the ``nosetests`` script would automatically run SQLA's plugin code, necessary for our tests to have a full environment. In the middle of 0.6, we realized that the import pattern here meant that Nose's "coverage" plugin would break, since "coverage" requires that it be started before any modules to be covered are imported; so in the middle of 0.6 we made the situation worse by adding a separate ``sqlalchemy-nose`` package to the build to overcome this. In 0.7 we've done away with trying to get ``nosetests`` to work automatically, since the SQLAlchemy module would produce a large number of nose configuration options for all usages of ``nosetests``, not just the SQLAlchemy unit tests themselves, and the additional ``sqlalchemy-nose`` install was an even worse idea, producing an extra package in Python environments. The ``sqla_nose.py`` script in 0.7 is now the only way to run the tests with nose. :ticket:`1949` Non-``Table``-derived constructs can be mapped ---------------------------------------------- A construct that isn't against any ``Table`` at all, like a function, can be mapped. :: from sqlalchemy import select, func from sqlalchemy.orm import mapper class Subset(object): pass selectable = select(["x", "y", "z"]).select_from(func.some_db_function()).alias() mapper(Subset, selectable, primary_key=[selectable.c.x]) :ticket:`1876` aliased() accepts ``FromClause`` elements ----------------------------------------- This is a convenience helper such that in the case a plain ``FromClause``, such as a ``select``, ``Table`` or ``join`` is passed to the ``orm.aliased()`` construct, it passes through to the ``.alias()`` method of that from construct rather than constructing an ORM level ``AliasedClass``. :ticket:`2018` Session.connection(), Session.execute() accept 'bind' ----------------------------------------------------- This is to allow execute/connection operations to participate in the open transaction of an engine explicitly. It also allows custom subclasses of ``Session`` that implement their own ``get_bind()`` method and arguments to use those custom arguments with both the ``execute()`` and ``connection()`` methods equally. `Session.connection `_ `Session.execute `_ :ticket:`1996` Standalone bind parameters in columns clause auto-labeled. ---------------------------------------------------------- Bind parameters present in the "columns clause" of a select are now auto-labeled like other "anonymous" clauses, which among other things allows their "type" to be meaningful when the row is fetched, as in result row processors. SQLite - relative file paths are normalized through os.path.abspath() --------------------------------------------------------------------- This so that a script that changes the current directory will continue to target the same location as subsequent SQLite connections are established. :ticket:`2036` MS-SQL - ``String``/``Unicode``/``VARCHAR``/``NVARCHAR``/``VARBINARY`` emit "max" for no length ----------------------------------------------------------------------------------------------- On the MS-SQL backend, the String/Unicode types, and their counterparts VARCHAR/ NVARCHAR, as well as VARBINARY (:ticket:`1833`) emit "max" as the length when no length is specified. This makes it more compatible with Postgresql's VARCHAR type which is similarly unbounded when no length specified. SQL Server defaults the length on these types to '1' when no length is specified. Behavioral Changes (Backwards Incompatible) =========================================== Note again, aside from the default mutability change, most of these changes are \*extremely minor* and will not affect most users. ``PickleType`` and ARRAY mutability turned off by default --------------------------------------------------------- This change refers to the default behavior of the ORM when mapping columns that have either the ``PickleType`` or ``postgresql.ARRAY`` datatypes. The ``mutable`` flag is now set to ``False`` by default. If an existing application uses these types and depends upon detection of in-place mutations, the type object must be constructed with ``mutable=True`` to restore the 0.6 behavior: :: Table('mytable', metadata, # .... Column('pickled_data', PickleType(mutable=True)) ) The ``mutable=True`` flag is being phased out, in favor of the new `Mutation Tracking `_ extension. This extension provides a mechanism by which user-defined datatypes can provide change events back to the owning parent or parents. The previous approach of using ``mutable=True`` does not provide for change events - instead, the ORM must scan through all mutable values present in a session and compare them against their original value for changes every time ``flush()`` is called, which is a very time consuming event. This is a holdover from the very early days of SQLAlchemy when ``flush()`` was not automatic and the history tracking system was not nearly as sophisticated as it is now. Existing applications which use ``PickleType``, ``postgresql.ARRAY`` or other ``MutableType`` subclasses, and require in-place mutation detection, should migrate to the new mutation tracking system, as ``mutable=True`` is likely to be deprecated in the future. :ticket:`1980` Mutability detection of ``composite()`` requires the Mutation Tracking Extension -------------------------------------------------------------------------------- So-called "composite" mapped attributes, those configured using the technique described at `Composite Column Types `_, have been re-implemented such that the ORM internals are no longer aware of them (leading to shorter and more efficient codepaths in critical sections). While composite types are generally intended to be treated as immutable value objects, this was never enforced. For applications that use composites with mutability, the `Mutation Tracking `_ extension offers a base class which establishes a mechanism for user-defined composite types to send change event messages back to the owning parent or parents of each object. Applications which use composite types and rely upon in- place mutation detection of these objects should either migrate to the "mutation tracking" extension, or change the usage of the composite types such that in-place changes are no longer needed (i.e., treat them as immutable value objects). SQLite - the SQLite dialect now uses ``NullPool`` for file-based databases -------------------------------------------------------------------------- This change is **99.999% backwards compatible**, unless you are using temporary tables across connection pool connections. A file-based SQLite connection is blazingly fast, and using ``NullPool`` means that each call to ``Engine.connect`` creates a new pysqlite connection. Previously, the ``SingletonThreadPool`` was used, which meant that all connections to a certain engine in a thread would be the same connection. It's intended that the new approach is more intuitive, particularly when multiple connections are used. ``SingletonThreadPool`` is still the default engine when a ``:memory:`` database is used. Note that this change **breaks temporary tables used across Session commits**, due to the way SQLite handles temp tables. See the note at http://www.sqlalchemy.org/docs/dialects/sqlite.html#using- temporary-tables-with-sqlite if temporary tables beyond the scope of one pool connection are desired. :ticket:`1921` ``Session.merge()`` checks version ids for versioned mappers ------------------------------------------------------------ Session.merge() will check the version id of the incoming state against that of the database, assuming the mapping uses version ids and incoming state has a version_id assigned, and raise StaleDataError if they don't match. This is the correct behavior, in that if incoming state contains a stale version id, it should be assumed the state is stale. If merging data into a versioned state, the version id attribute can be left undefined, and no version check will take place. This check was confirmed by examining what Hibernate does - both the ``merge()`` and the versioning features were originally adapted from Hibernate. :ticket:`2027` Tuple label names in Query Improved ----------------------------------- This improvement is potentially slightly backwards incompatible for an application that relied upon the old behavior. Given two mapped classes ``Foo`` and ``Bar`` each with a column ``spam``: :: qa = session.query(Foo.spam) qb = session.query(Bar.spam) qu = qa.union(qb) The name given to the single column yielded by ``qu`` will be ``spam``. Previously it would be something like ``foo_spam`` due to the way the ``union`` would combine things, which is inconsistent with the name ``spam`` in the case of a non-unioned query. :ticket:`1942` Mapped column attributes reference the most specific column first ----------------------------------------------------------------- This is a change to the behavior involved when a mapped column attribute references multiple columns, specifically when dealing with an attribute on a joined-table subclass that has the same name as that of an attribute on the superclass. Using declarative, the scenario is this: :: class Parent(Base): __tablename__ = 'parent' id = Column(Integer, primary_key=True) class Child(Parent): __tablename__ = 'child' id = Column(Integer, ForeignKey('parent.id'), primary_key=True) Above, the attribute ``Child.id`` refers to both the ``child.id`` column as well as ``parent.id`` - this due to the name of the attribute. If it were named differently on the class, such as ``Child.child_id``, it then maps distinctly to ``child.id``, with ``Child.id`` being the same attribute as ``Parent.id``. When the ``id`` attribute is made to reference both ``parent.id`` and ``child.id``, it stores them in an ordered list. An expression such as ``Child.id`` then refers to just *one* of those columns when rendered. Up until 0.6, this column would be ``parent.id``. In 0.7, it is the less surprising ``child.id``. The legacy of this behavior deals with behaviors and restrictions of the ORM that don't really apply anymore; all that was needed was to reverse the order. A primary advantage of this approach is that it's now easier to construct ``primaryjoin`` expressions that refer to the local column: :: class Child(Parent): __tablename__ = 'child' id = Column(Integer, ForeignKey('parent.id'), primary_key=True) some_related = relationship("SomeRelated", primaryjoin="Child.id==SomeRelated.child_id") class SomeRelated(Base): __tablename__ = 'some_related' id = Column(Integer, primary_key=True) child_id = Column(Integer, ForeignKey('child.id')) Prior to 0.7 the ``Child.id`` expression would reference ``Parent.id``, and it would be necessary to map ``child.id`` to a distinct attribute. It also means that a query like this one changes its behavior: :: session.query(Parent).filter(Child.id > 7) In 0.6, this would render: :: SELECT parent.id AS parent_id FROM parent WHERE parent.id > :id_1 in 0.7, you get: :: SELECT parent.id AS parent_id FROM parent, child WHERE child.id > :id_1 which you'll note is a cartesian product - this behavior is now equivalent to that of any other attribute that is local to ``Child``. The ``with_polymorphic()`` method, or a similar strategy of explicitly joining the underlying ``Table`` objects, is used to render a query against all ``Parent`` objects with criteria against ``Child``, in the same manner as that of 0.5 and 0.6: :: print s.query(Parent).with_polymorphic([Child]).filter(Child.id > 7) Which on both 0.6 and 0.7 renders: :: SELECT parent.id AS parent_id, child.id AS child_id FROM parent LEFT OUTER JOIN child ON parent.id = child.id WHERE child.id > :id_1 Another effect of this change is that a joined-inheritance load across two tables will populate from the child table's value, not that of the parent table. An unusual case is that a query against "Parent" using ``with_polymorphic="*"`` issues a query against "parent", with a LEFT OUTER JOIN to "child". The row is located in "Parent", sees the polymorphic identity corresponds to "Child", but suppose the actual row in "child" has been *deleted*. Due to this corruption, the row comes in with all the columns corresponding to "child" set to NULL - this is now the value that gets populated, not the one in the parent table. :ticket:`1892` Mapping to joins with two or more same-named columns requires explicit declaration ---------------------------------------------------------------------------------- This is somewhat related to the previous change in :ticket:`1892`. When mapping to a join, same-named columns must be explicitly linked to mapped attributes, i.e. as described in `Mapping a Class Against Multiple Tables `_. Given two tables ``foo`` and ``bar``, each with a primary key column ``id``, the following now produces an error: :: foobar = foo.join(bar, foo.c.id==bar.c.foo_id) mapper(FooBar, foobar) This because the ``mapper()`` refuses to guess what column is the primary representation of ``FooBar.id`` - is it ``foo.c.id`` or is it ``bar.c.id`` ? The attribute must be explicit: :: foobar = foo.join(bar, foo.c.id==bar.c.foo_id) mapper(FooBar, foobar, properties={ 'id':[foo.c.id, bar.c.id] }) :ticket:`1896` Mapper requires that polymorphic_on column be present in the mapped selectable ------------------------------------------------------------------------------ This is a warning in 0.6, now an error in 0.7. The column given for ``polymorphic_on`` must be in the mapped selectable. This to prevent some occasional user errors such as: :: mapper(SomeClass, sometable, polymorphic_on=some_lookup_table.c.id) where above the polymorphic_on needs to be on a ``sometable`` column, in this case perhaps ``sometable.c.some_lookup_id``. There are also some "polymorphic union" scenarios where similar mistakes sometimes occur. Such a configuration error has always been "wrong", and the above mapping doesn't work as specified - the column would be ignored. It is however potentially backwards incompatible in the rare case that an application has been unknowingly relying upon this behavior. :ticket:`1875` ``DDL()`` constructs now escape percent signs --------------------------------------------- Previously, percent signs in ``DDL()`` strings would have to be escaped, i.e. ``%%`` depending on DBAPI, for those DBAPIs that accept ``pyformat`` or ``format`` binds (i.e. psycopg2, mysql-python), which was inconsistent versus ``text()`` constructs which did this automatically. The same escaping now occurs for ``DDL()`` as for ``text()``. :ticket:`1897` ``Table.c`` / ``MetaData.tables`` refined a bit, don't allow direct mutation ---------------------------------------------------------------------------- Another area where some users were tinkering around in such a way that doesn't actually work as expected, but still left an exceedingly small chance that some application was relying upon this behavior, the construct returned by the ``.c`` attribute on ``Table`` and the ``.tables`` attribute on ``MetaData`` is explicitly non-mutable. The "mutable" version of the construct is now private. Adding columns to ``.c`` involves using the ``append_column()`` method of ``Table``, which ensures things are associated with the parent ``Table`` in the appropriate way; similarly, ``MetaData.tables`` has a contract with the ``Table`` objects stored in this dictionary, as well as a little bit of new bookkeeping in that a ``set()`` of all schema names is tracked, which is satisfied only by using the public ``Table`` constructor as well as ``Table.tometadata()``. It is of course possible that the ``ColumnCollection`` and ``dict`` collections consulted by these attributes could someday implement events on all of their mutational methods such that the appropriate bookkeeping occurred upon direct mutation of the collections, but until someone has the motivation to implement all that along with dozens of new unit tests, narrowing the paths to mutation of these collections will ensure no application is attempting to rely upon usages that are currently not supported. :ticket:`1893` :ticket:`1917` server_default consistently returns None for all inserted_primary_key values ---------------------------------------------------------------------------- Established consistency when server_default is present on an Integer PK column. SQLA doesn't pre-fetch these, nor do they come back in cursor.lastrowid (DBAPI). Ensured all backends consistently return None in result.inserted_primary_key for these - some backends may have returned a value previously. Using a server_default on a primary key column is extremely unusual. If a special function or SQL expression is used to generate primary key defaults, this should be established as a Python-side "default" instead of server_default. Regarding reflection for this case, reflection of an int PK col with a server_default sets the "autoincrement" flag to False, except in the case of a PG SERIAL col where we detected a sequence default. :ticket:`2020` :ticket:`2021` The ``sqlalchemy.exceptions`` alias in sys.modules is removed ------------------------------------------------------------- For a few years we've added the string ``sqlalchemy.exceptions`` to ``sys.modules``, so that a statement like "``import sqlalchemy.exceptions``" would work. The name of the core exceptions module has been ``exc`` for a long time now, so the recommended import for this module is: :: from sqlalchemy import exc The ``exceptions`` name is still present in "``sqlalchemy``" for applications which might have said ``from sqlalchemy import exceptions``, but they should also start using the ``exc`` name. Query Timing Recipe Changes --------------------------- While not part of SQLAlchemy itself, it's worth mentioning that the rework of the ``ConnectionProxy`` into the new event system means it is no longer appropriate for the "Timing all Queries" recipe. Please adjust query-timers to use the ``before_cursor_execute()`` and ``after_cursor_execute()`` events, demonstrated in the updated recipe UsageRecipes/Profiling. Deprecated API ============== Default constructor on types will not accept arguments ------------------------------------------------------ Simple types like ``Integer``, ``Date`` etc. in the core types module don't accept arguments. The default constructor that accepts/ignores a catchall ``\*args, \**kwargs`` is restored as of 0.7b4/0.7.0, but emits a deprecation warning. If arguments are being used with a core type like ``Integer``, it may be that you intended to use a dialect specific type, such as ``sqlalchemy.dialects.mysql.INTEGER`` which does accept a "display_width" argument for example. compile_mappers() renamed configure_mappers(), simplified configuration internals --------------------------------------------------------------------------------- This system slowly morphed from something small, implemented local to an individual mapper, and poorly named into something that's more of a global "registry-" level function and poorly named, so we've fixed both by moving the implementation out of ``Mapper`` altogether and renaming it to ``configure_mappers()``. It is of course normally not needed for an application to call ``configure_mappers()`` as this process occurs on an as-needed basis, as soon as the mappings are needed via attribute or query access. :ticket:`1966` Core listener/proxy superseded by event listeners ------------------------------------------------- ``PoolListener``, ``ConnectionProxy``, ``DDLElement.execute_at`` are superseded by ``event.listen()``, using the ``PoolEvents``, ``EngineEvents``, ``DDLEvents`` dispatch targets, respectively. ORM extensions superseded by event listeners -------------------------------------------- ``MapperExtension``, ``AttributeExtension``, ``SessionExtension`` are superseded by ``event.listen()``, using the ``MapperEvents``/``InstanceEvents``, ``AttributeEvents``, ``SessionEvents``, dispatch targets, respectively. Sending a string to 'distinct' in select() for MySQL should be done via prefixes -------------------------------------------------------------------------------- This obscure feature allows this pattern with the MySQL backend: :: select([mytable], distinct='ALL', prefixes=['HIGH_PRIORITY']) The ``prefixes`` keyword or ``prefix_with()`` method should be used for non-standard or unusual prefixes: :: select([mytable]).prefix_with('HIGH_PRIORITY', 'ALL') ``useexisting`` superseded by ``extend_existing`` and ``keep_existing`` ----------------------------------------------------------------------- The ``useexisting`` flag on Table has been superseded by a new pair of flags ``keep_existing`` and ``extend_existing``. ``extend_existing`` is equivalent to ``useexisting`` - the existing Table is returned, and additional constructor elements are added. With ``keep_existing``, the existing Table is returned, but additional constructor elements are not added - these elements are only applied when the Table is newly created. Backwards Incompatible API Changes ================================== Callables passed to ``bindparam()`` don't get evaluated - affects the Beaker example ------------------------------------------------------------------------------------ :ticket:`1950` Note this affects the Beaker caching example, where the workings of the ``_params_from_query()`` function needed a slight adjustment. If you're using code from the Beaker example, this change should be applied. types.type_map is now private, types._type_map ---------------------------------------------- We noticed some users tapping into this dictionary inside of ``sqlalchemy.types`` as a shortcut to associating Python types with SQL types. We can't guarantee the contents or format of this dictionary, and additionally the business of associating Python types in a one-to-one fashion has some grey areas that should are best decided by individual applications, so we've underscored this attribute. :ticket:`1870` Renamed the ``alias`` keyword arg of standalone ``alias()`` function to ``name`` -------------------------------------------------------------------------------- This so that the keyword argument ``name`` matches that of the ``alias()`` methods on all ``FromClause`` objects as well as the ``name`` argument on ``Query.subquery()``. Only code that uses the standalone ``alias()`` function, and not the method bound functions, and passes the alias name using the explicit keyword name ``alias``, and not positionally, would need modification here. Non-public ``Pool`` methods underscored --------------------------------------- All methods of ``Pool`` and subclasses which are not intended for public use have been renamed with underscores. That they were not named this way previously was a bug. Pooling methods now underscored or removed: ``Pool.create_connection()`` -> ``Pool._create_connection()`` ``Pool.do_get()`` -> ``Pool._do_get()`` ``Pool.do_return_conn()`` -> ``Pool._do_return_conn()`` ``Pool.do_return_invalid()`` -> removed, was not used ``Pool.return_conn()`` -> ``Pool._return_conn()`` ``Pool.get()`` -> ``Pool._get()``, public API is ``Pool.connect()`` ``SingletonThreadPool.cleanup()`` -> ``_cleanup()`` ``SingletonThreadPool.dispose_local()`` -> removed, use ``conn.invalidate()`` :ticket:`1982` Previously Deprecated, Now Removed ================================== Query.join(), Query.outerjoin(), eagerload(), eagerload_all(), others no longer allow lists of attributes as arguments ---------------------------------------------------------------------------------------------------------------------- Passing a list of attributes or attribute names to ``Query.join``, ``eagerload()``, and similar has been deprecated since 0.5: :: # old way, deprecated since 0.5 session.query(Houses).join([Houses.rooms, Room.closets]) session.query(Houses).options(eagerload_all([Houses.rooms, Room.closets])) These methods all accept \*args as of the 0.5 series: :: # current way, in place since 0.5 session.query(Houses).join(Houses.rooms, Room.closets) session.query(Houses).options(eagerload_all(Houses.rooms, Room.closets)) ``ScopedSession.mapper`` is removed ----------------------------------- This feature provided a mapper extension which linked class- based functionality with a particular ``ScopedSession``, in particular providing the behavior such that new object instances would be automatically associated with that session. The feature was overused by tutorials and frameworks which led to great user confusion due to its implicit behavior, and was deprecated in 0.5.5. Techniques for replicating its functionality are at [wiki:UsageRecipes/SessionAwareMapper] SQLAlchemy-0.8.4/doc/build/changelog/migration_08.rst0000644000076500000240000014431712251150015023060 0ustar classicstaff00000000000000============================== What's New in SQLAlchemy 0.8? ============================== .. admonition:: About this Document This document describes changes between SQLAlchemy version 0.7, undergoing maintenance releases as of October, 2012, and SQLAlchemy version 0.8, which is expected for release in early 2013. Document date: October 25, 2012 Updated: March 9, 2013 Introduction ============ This guide introduces what's new in SQLAlchemy version 0.8, and also documents changes which affect users migrating their applications from the 0.7 series of SQLAlchemy to 0.8. SQLAlchemy releases are closing in on 1.0, and each new version since 0.5 features fewer major usage changes. Most applications that are settled into modern 0.7 patterns should be movable to 0.8 with no changes. Applications that use 0.6 and even 0.5 patterns should be directly migratable to 0.8 as well, though larger applications may want to test with each interim version. Platform Support ================ Targeting Python 2.5 and Up Now ------------------------------- SQLAlchemy 0.8 will target Python 2.5 and forward; compatibility for Python 2.4 is being dropped. The internals will be able to make usage of Python ternaries (that is, ``x if y else z``) which will improve things versus the usage of ``y and x or z``, which naturally has been the source of some bugs, as well as context managers (that is, ``with:``) and perhaps in some cases ``try:/except:/else:`` blocks which will help with code readability. SQLAlchemy will eventually drop 2.5 support as well - when 2.6 is reached as the baseline, SQLAlchemy will move to use 2.6/3.3 in-place compatibility, removing the usage of the ``2to3`` tool and maintaining a source base that works with Python 2 and 3 at the same time. New ORM Features ================ .. _feature_relationship_08: Rewritten :func:`.relationship` mechanics ----------------------------------------- 0.8 features a much improved and capable system regarding how :func:`.relationship` determines how to join between two entities. The new system includes these features: * The ``primaryjoin`` argument is **no longer needed** when constructing a :func:`.relationship` against a class that has multiple foreign key paths to the target. Only the ``foreign_keys`` argument is needed to specify those columns which should be included: :: class Parent(Base): __tablename__ = 'parent' id = Column(Integer, primary_key=True) child_id_one = Column(Integer, ForeignKey('child.id')) child_id_two = Column(Integer, ForeignKey('child.id')) child_one = relationship("Child", foreign_keys=child_id_one) child_two = relationship("Child", foreign_keys=child_id_two) class Child(Base): __tablename__ = 'child' id = Column(Integer, primary_key=True) * relationships against self-referential, composite foreign keys where **a column points to itself** are now supported. The canonical case is as follows: :: class Folder(Base): __tablename__ = 'folder' __table_args__ = ( ForeignKeyConstraint( ['account_id', 'parent_id'], ['folder.account_id', 'folder.folder_id']), ) account_id = Column(Integer, primary_key=True) folder_id = Column(Integer, primary_key=True) parent_id = Column(Integer) name = Column(String) parent_folder = relationship("Folder", backref="child_folders", remote_side=[account_id, folder_id] ) Above, the ``Folder`` refers to its parent ``Folder`` joining from ``account_id`` to itself, and ``parent_id`` to ``folder_id``. When SQLAlchemy constructs an auto- join, no longer can it assume all columns on the "remote" side are aliased, and all columns on the "local" side are not - the ``account_id`` column is **on both sides**. So the internal relationship mechanics were totally rewritten to support an entirely different system whereby two copies of ``account_id`` are generated, each containing different *annotations* to determine their role within the statement. Note the join condition within a basic eager load: :: SELECT folder.account_id AS folder_account_id, folder.folder_id AS folder_folder_id, folder.parent_id AS folder_parent_id, folder.name AS folder_name, folder_1.account_id AS folder_1_account_id, folder_1.folder_id AS folder_1_folder_id, folder_1.parent_id AS folder_1_parent_id, folder_1.name AS folder_1_name FROM folder LEFT OUTER JOIN folder AS folder_1 ON folder_1.account_id = folder.account_id AND folder.folder_id = folder_1.parent_id WHERE folder.folder_id = ? AND folder.account_id = ? * Previously difficult custom join conditions, like those involving functions and/or CASTing of types, will now function as expected in most cases:: class HostEntry(Base): __tablename__ = 'host_entry' id = Column(Integer, primary_key=True) ip_address = Column(INET) content = Column(String(50)) # relationship() using explicit foreign_keys, remote_side parent_host = relationship("HostEntry", primaryjoin=ip_address == cast(content, INET), foreign_keys=content, remote_side=ip_address ) The new :func:`.relationship` mechanics make use of a SQLAlchemy concept known as :term:`annotations`. These annotations are also available to application code explicitly via the :func:`.foreign` and :func:`.remote` functions, either as a means to improve readability for advanced configurations or to directly inject an exact configuration, bypassing the usual join-inspection heuristics:: from sqlalchemy.orm import foreign, remote class HostEntry(Base): __tablename__ = 'host_entry' id = Column(Integer, primary_key=True) ip_address = Column(INET) content = Column(String(50)) # relationship() using explicit foreign() and remote() annotations # in lieu of separate arguments parent_host = relationship("HostEntry", primaryjoin=remote(ip_address) == \ cast(foreign(content), INET), ) .. seealso:: :ref:`relationship_configure_joins` - a newly revised section on :func:`.relationship` detailing the latest techniques for customizing related attributes and collection access. :ticket:`1401` :ticket:`610` .. _feature_orminspection_08: New Class/Object Inspection System ---------------------------------- Lots of SQLAlchemy users are writing systems that require the ability to inspect the attributes of a mapped class, including being able to get at the primary key columns, object relationships, plain attributes, and so forth, typically for the purpose of building data-marshalling systems, like JSON/XML conversion schemes and of course form libraries galore. Originally, the :class:`.Table` and :class:`.Column` model were the original inspection points, which have a well-documented system. While SQLAlchemy ORM models are also fully introspectable, this has never been a fully stable and supported feature, and users tended to not have a clear idea how to get at this information. 0.8 now provides a consistent, stable and fully documented API for this purpose, including an inspection system which works on mapped classes, instances, attributes, and other Core and ORM constructs. The entrypoint to this system is the core-level :func:`.inspect` function. In most cases, the object being inspected is one already part of SQLAlchemy's system, such as :class:`.Mapper`, :class:`.InstanceState`, :class:`.Inspector`. In some cases, new objects have been added with the job of providing the inspection API in certain contexts, such as :class:`.AliasedInsp` and :class:`.AttributeState`. A walkthrough of some key capabilities follows:: >>> class User(Base): ... __tablename__ = 'user' ... id = Column(Integer, primary_key=True) ... name = Column(String) ... name_syn = synonym(name) ... addresses = relationship("Address") ... >>> # universal entry point is inspect() >>> b = inspect(User) >>> # b in this case is the Mapper >>> b >>> # Column namespace >>> b.columns.id Column('id', Integer(), table=, primary_key=True, nullable=False) >>> # mapper's perspective of the primary key >>> b.primary_key (Column('id', Integer(), table=, primary_key=True, nullable=False),) >>> # MapperProperties available from .attrs >>> b.attrs.keys() ['name_syn', 'addresses', 'id', 'name'] >>> # .column_attrs, .relationships, etc. filter this collection >>> b.column_attrs.keys() ['id', 'name'] >>> list(b.relationships) [] >>> # they are also namespaces >>> b.column_attrs.id >>> b.relationships.addresses >>> # point inspect() at a mapped, class level attribute, >>> # returns the attribute itself >>> b = inspect(User.addresses) >>> b >>> # From here we can get the mapper: >>> b.mapper >>> # the parent inspector, in this case a mapper >>> b.parent >>> # an expression >>> print b.expression "user".id = address.user_id >>> # inspect works on instances >>> u1 = User(id=3, name='x') >>> b = inspect(u1) >>> # it returns the InstanceState >>> b >>> # similar attrs accessor refers to the >>> b.attrs.keys() ['id', 'name_syn', 'addresses', 'name'] >>> # attribute interface - from attrs, you get a state object >>> b.attrs.id >>> # this object can give you, current value... >>> b.attrs.id.value 3 >>> # ... current history >>> b.attrs.id.history History(added=[3], unchanged=(), deleted=()) >>> # InstanceState can also provide session state information >>> # lets assume the object is persistent >>> s = Session() >>> s.add(u1) >>> s.commit() >>> # now we can get primary key identity, always >>> # works in query.get() >>> b.identity (3,) >>> # the mapper level key >>> b.identity_key (, (3,)) >>> # state within the session >>> b.persistent, b.transient, b.deleted, b.detached (True, False, False, False) >>> # owning session >>> b.session .. seealso:: :ref:`core_inspection_toplevel` :ticket:`2208` New with_polymorphic() feature, can be used anywhere ---------------------------------------------------- The :meth:`.Query.with_polymorphic` method allows the user to specify which tables should be present when querying against a joined-table entity. Unfortunately the method is awkward and only applies to the first entity in the list, and otherwise has awkward behaviors both in usage as well as within the internals. A new enhancement to the :func:`.aliased` construct has been added called :func:`.with_polymorphic` which allows any entity to be "aliased" into a "polymorphic" version of itself, freely usable anywhere: :: from sqlalchemy.orm import with_polymorphic palias = with_polymorphic(Person, [Engineer, Manager]) session.query(Company).\ join(palias, Company.employees).\ filter(or_(Engineer.language=='java', Manager.hair=='pointy')) .. seealso:: :ref:`with_polymorphic` - newly updated documentation for polymorphic loading control. :ticket:`2333` of_type() works with alias(), with_polymorphic(), any(), has(), joinedload(), subqueryload(), contains_eager() -------------------------------------------------------------------------------------------------------------- The :meth:`.PropComparator.of_type` method is used to specify a specific subtype to use when constructing SQL expressions along a :func:`.relationship` that has a :term:`polymorphic` mapping as its target. This method can now be used to target *any number* of target subtypes, by combining it with the new :func:`.with_polymorphic` function:: # use eager loading in conjunction with with_polymorphic targets Job_P = with_polymorphic(Job, [SubJob, ExtraJob], aliased=True) q = s.query(DataContainer).\ join(DataContainer.jobs.of_type(Job_P)).\ options(contains_eager(DataContainer.jobs.of_type(Job_P))) The method now works equally well in most places a regular relationship attribute is accepted, including with loader functions like :func:`.joinedload`, :func:`.subqueryload`, :func:`.contains_eager`, and comparison methods like :meth:`.PropComparator.any` and :meth:`.PropComparator.has`:: # use eager loading in conjunction with with_polymorphic targets Job_P = with_polymorphic(Job, [SubJob, ExtraJob], aliased=True) q = s.query(DataContainer).\ join(DataContainer.jobs.of_type(Job_P)).\ options(contains_eager(DataContainer.jobs.of_type(Job_P))) # pass subclasses to eager loads (implicitly applies with_polymorphic) q = s.query(ParentThing).\ options( joinedload_all( ParentThing.container, DataContainer.jobs.of_type(SubJob) )) # control self-referential aliasing with any()/has() Job_A = aliased(Job) q = s.query(Job).join(DataContainer.jobs).\ filter( DataContainer.jobs.of_type(Job_A).\ any(and_(Job_A.id < Job.id, Job_A.type=='fred') ) ) .. seealso:: :ref:`of_type` :ticket:`2438` :ticket:`1106` Events Can Be Applied to Unmapped Superclasses ---------------------------------------------- Mapper and instance events can now be associated with an unmapped superclass, where those events will be propagated to subclasses as those subclasses are mapped. The ``propagate=True`` flag should be used. This feature allows events to be associated with a declarative base class:: from sqlalchemy.ext.declarative import declarative_base Base = declarative_base() @event.listens_for("load", Base, propagate=True) def on_load(target, context): print "New instance loaded:", target # on_load() will be applied to SomeClass class SomeClass(Base): __tablename__ = 'sometable' # ... :ticket:`2585` Declarative Distinguishes Between Modules/Packages -------------------------------------------------- A key feature of Declarative is the ability to refer to other mapped classes using their string name. The registry of class names is now sensitive to the owning module and package of a given class. The classes can be referred to via dotted name in expressions:: class Snack(Base): # ... peanuts = relationship("nuts.Peanut", primaryjoin="nuts.Peanut.snack_id == Snack.id") The resolution allows that any full or partial disambiguating package name can be used. If the path to a particular class is still ambiguous, an error is raised. :ticket:`2338` New DeferredReflection Feature in Declarative --------------------------------------------- The "deferred reflection" example has been moved to a supported feature within Declarative. This feature allows the construction of declarative mapped classes with only placeholder ``Table`` metadata, until a ``prepare()`` step is called, given an ``Engine`` with which to reflect fully all tables and establish actual mappings. The system supports overriding of columns, single and joined inheritance, as well as distinct bases-per-engine. A full declarative configuration can now be created against an existing table that is assembled upon engine creation time in one step: :: class ReflectedOne(DeferredReflection, Base): __abstract__ = True class ReflectedTwo(DeferredReflection, Base): __abstract__ = True class MyClass(ReflectedOne): __tablename__ = 'mytable' class MyOtherClass(ReflectedOne): __tablename__ = 'myothertable' class YetAnotherClass(ReflectedTwo): __tablename__ = 'yetanothertable' ReflectedOne.prepare(engine_one) ReflectedTwo.prepare(engine_two) .. seealso:: :class:`.DeferredReflection` :ticket:`2485` ORM Classes Now Accepted by Core Constructs ------------------------------------------- While the SQL expressions used with :meth:`.Query.filter`, such as ``User.id == 5``, have always been compatible for use with core constructs such as :func:`.select`, the mapped class itself would not be recognized when passed to :func:`.select`, :meth:`.Select.select_from`, or :meth:`.Select.correlate`. A new SQL registration system allows a mapped class to be accepted as a FROM clause within the core:: from sqlalchemy import select stmt = select([User]).where(User.id == 5) Above, the mapped ``User`` class will expand into :class:`.Table` to which :class:`.User` is mapped. :ticket:`2245` Query.update() supports UPDATE..FROM ------------------------------------- The new UPDATE..FROM mechanics work in query.update(). Below, we emit an UPDATE against ``SomeEntity``, adding a FROM clause (or equivalent, depending on backend) against ``SomeOtherEntity``:: query(SomeEntity).\ filter(SomeEntity.id==SomeOtherEntity.id).\ filter(SomeOtherEntity.foo=='bar').\ update({"data":"x"}) In particular, updates to joined-inheritance entities are supported, provided the target of the UPDATE is local to the table being filtered on, or if the parent and child tables are mixed, they are joined explicitly in the query. Below, given ``Engineer`` as a joined subclass of ``Person``: :: query(Engineer).\ filter(Person.id==Engineer.id).\ filter(Person.name=='dilbert').\ update({"engineer_data":"java"}) would produce: :: UPDATE engineer SET engineer_data='java' FROM person WHERE person.id=engineer.id AND person.name='dilbert' :ticket:`2365` rollback() will only roll back "dirty" objects from a begin_nested() -------------------------------------------------------------------- A behavioral change that should improve efficiency for those users using SAVEPOINT via ``Session.begin_nested()`` - upon ``rollback()``, only those objects that were made dirty since the last flush will be expired, the rest of the ``Session`` remains intact. This because a ROLLBACK to a SAVEPOINT does not terminate the containing transaction's isolation, so no expiry is needed except for those changes that were not flushed in the current transaction. :ticket:`2452` Caching Example now uses dogpile.cache --------------------------------------- The caching example now uses `dogpile.cache `_. Dogpile.cache is a rewrite of the caching portion of Beaker, featuring vastly simpler and faster operation, as well as support for distributed locking. Note that the SQLAlchemy APIs used by the Dogpile example as well as the previous Beaker example have changed slightly, in particular this change is needed as illustrated in the Beaker example:: --- examples/beaker_caching/caching_query.py +++ examples/beaker_caching/caching_query.py @@ -222,7 +222,8 @@ """ if query._current_path: - mapper, key = query._current_path[-2:] + mapper, prop = query._current_path[-2:] + key = prop.key for cls in mapper.class_.__mro__: if (cls, key) in self._relationship_options: .. seealso:: :mod:`dogpile_caching` :ticket:`2589` New Core Features ================== Fully extensible, type-level operator support in Core ----------------------------------------------------- The Core has to date never had any system of adding support for new SQL operators to Column and other expression constructs, other than the :meth:`.ColumnOperators.op` method which is "just enough" to make things work. There has also never been any system in place for Core which allows the behavior of existing operators to be overridden. Up until now, the only way operators could be flexibly redefined was in the ORM layer, using :func:`.column_property` given a ``comparator_factory`` argument. Third party libraries like GeoAlchemy therefore were forced to be ORM-centric and rely upon an array of hacks to apply new opertions as well as to get them to propagate correctly. The new operator system in Core adds the one hook that's been missing all along, which is to associate new and overridden operators with *types*. Since after all, it's not really a column, CAST operator, or SQL function that really drives what kinds of operations are present, it's the *type* of the expression. The implementation details are minimal - only a few extra methods are added to the core :class:`.ColumnElement` type so that it consults it's :class:`.TypeEngine` object for an optional set of operators. New or revised operations can be associated with any type, either via subclassing of an existing type, by using :class:`.TypeDecorator`, or "globally across-the-board" by attaching a new :class:`.TypeEngine.Comparator` object to an existing type class. For example, to add logarithm support to :class:`.Numeric` types: :: from sqlalchemy.types import Numeric from sqlalchemy.sql import func class CustomNumeric(Numeric): class comparator_factory(Numeric.Comparator): def log(self, other): return func.log(self.expr, other) The new type is usable like any other type: :: data = Table('data', metadata, Column('id', Integer, primary_key=True), Column('x', CustomNumeric(10, 5)), Column('y', CustomNumeric(10, 5)) ) stmt = select([data.c.x.log(data.c.y)]).where(data.c.x.log(2) < value) print conn.execute(stmt).fetchall() New features which have come from this immediately include support for Postgresql's HSTORE type, as well as new operations associated with Postgresql's ARRAY type. It also paves the way for existing types to acquire lots more operators that are specific to those types, such as more string, integer and date operators. .. seealso:: :ref:`types_operators` :class:`.HSTORE` :ticket:`2547` Type Expressions ----------------- SQL expressions can now be associated with types. Historically, :class:`.TypeEngine` has always allowed Python-side functions which receive both bound parameters as well as result row values, passing them through a Python side conversion function on the way to/back from the database. The new feature allows similar functionality, except on the database side:: from sqlalchemy.types import String from sqlalchemy import func, Table, Column, MetaData class LowerString(String): def bind_expression(self, bindvalue): return func.lower(bindvalue) def column_expression(self, col): return func.lower(col) metadata = MetaData() test_table = Table( 'test_table', metadata, Column('data', LowerString) ) Above, the ``LowerString`` type defines a SQL expression that will be emitted whenever the ``test_table.c.data`` column is rendered in the columns clause of a SELECT statement:: >>> print select([test_table]).where(test_table.c.data == 'HI') SELECT lower(test_table.data) AS data FROM test_table WHERE test_table.data = lower(:data_1) This feature is also used heavily by the new release of GeoAlchemy, to embed PostGIS expressions inline in SQL based on type rules. .. seealso:: :ref:`types_sql_value_processing` :ticket:`1534` Core Inspection System ----------------------- The :func:`.inspect` function introduced in :ref:`feature_orminspection_08` also applies to the core. Applied to an :class:`.Engine` it produces an :class:`.Inspector` object:: from sqlalchemy import inspect from sqlalchemy import create_engine engine = create_engine("postgresql://scott:tiger@localhost/test") insp = inspect(engine) print insp.get_table_names() It can also be applied to any :class:`.ClauseElement`, which returns the :class:`.ClauseElement` itself, such as :class:`.Table`, :class:`.Column`, :class:`.Select`, etc. This allows it to work fluently between Core and ORM constructs. New Method :meth:`.Select.correlate_except` ------------------------------------------- :func:`.select` now has a method :meth:`.Select.correlate_except` which specifies "correlate on all FROM clauses except those specified". It can be used for mapping scenarios where a related subquery should correlate normally, except against a particular target selectable:: class SnortEvent(Base): __tablename__ = "event" id = Column(Integer, primary_key=True) signature = Column(Integer, ForeignKey("signature.id")) signatures = relationship("Signature", lazy=False) class Signature(Base): __tablename__ = "signature" id = Column(Integer, primary_key=True) sig_count = column_property( select([func.count('*')]).\ where(SnortEvent.signature == id). correlate_except(SnortEvent) ) .. seealso:: :meth:`.Select.correlate_except` Postgresql HSTORE type ---------------------- Support for Postgresql's ``HSTORE`` type is now available as :class:`.postgresql.HSTORE`. This type makes great usage of the new operator system to provide a full range of operators for HSTORE types, including index access, concatenation, and containment methods such as :meth:`~.HSTORE.comparator_factory.has_key`, :meth:`~.HSTORE.comparator_factory.has_any`, and :meth:`~.HSTORE.comparator_factory.matrix`:: from sqlalchemy.dialects.postgresql import HSTORE data = Table('data_table', metadata, Column('id', Integer, primary_key=True), Column('hstore_data', HSTORE) ) engine.execute( select([data.c.hstore_data['some_key']]) ).scalar() engine.execute( select([data.c.hstore_data.matrix()]) ).scalar() .. seealso:: :class:`.postgresql.HSTORE` :class:`.postgresql.hstore` :ticket:`2606` Enhanced Postgresql ARRAY type ------------------------------ The :class:`.postgresql.ARRAY` type will accept an optional "dimension" argument, pinning it to a fixed number of dimensions and greatly improving efficiency when retrieving results: :: # old way, still works since PG supports N-dimensions per row: Column("my_array", postgresql.ARRAY(Integer)) # new way, will render ARRAY with correct number of [] in DDL, # will process binds and results more efficiently as we don't need # to guess how many levels deep to go Column("my_array", postgresql.ARRAY(Integer, dimensions=2)) The type also introduces new operators, using the new type-specific operator framework. New operations include indexed access:: result = conn.execute( select([mytable.c.arraycol[2]]) ) slice access in SELECT:: result = conn.execute( select([mytable.c.arraycol[2:4]]) ) slice updates in UPDATE:: conn.execute( mytable.update().values({mytable.c.arraycol[2:3]: [7, 8]}) ) freestanding array literals:: >>> from sqlalchemy.dialects import postgresql >>> conn.scalar( ... select([ ... postgresql.array([1, 2]) + postgresql.array([3, 4, 5]) ... ]) ... ) [1, 2, 3, 4, 5] array concatenation, where below, the right side ``[4, 5, 6]`` is coerced into an array literal:: select([mytable.c.arraycol + [4, 5, 6]]) .. seealso:: :class:`.postgresql.ARRAY` :class:`.postgresql.array` :ticket:`2441` New, configurable DATE, TIME types for SQLite --------------------------------------------- SQLite has no built-in DATE, TIME, or DATETIME types, and instead provides some support for storage of date and time values either as strings or integers. The date and time types for SQLite are enhanced in 0.8 to be much more configurable as to the specific format, including that the "microseconds" portion is optional, as well as pretty much everything else. :: Column('sometimestamp', sqlite.DATETIME(truncate_microseconds=True)) Column('sometimestamp', sqlite.DATETIME( storage_format=( "%(year)04d%(month)02d%(day)02d" "%(hour)02d%(minute)02d%(second)02d%(microsecond)06d" ), regexp="(\d{4})(\d{2})(\d{2})(\d{2})(\d{2})(\d{2})(\d{6})" ) ) Column('somedate', sqlite.DATE( storage_format="%(month)02d/%(day)02d/%(year)04d", regexp="(?P\d+)/(?P\d+)/(?P\d+)", ) ) Huge thanks to Nate Dub for the sprinting on this at Pycon 2012. .. seealso:: :class:`.sqlite.DATETIME` :class:`.sqlite.DATE` :class:`.sqlite.TIME` :ticket:`2363` "COLLATE" supported across all dialects; in particular MySQL, Postgresql, SQLite -------------------------------------------------------------------------------- The "collate" keyword, long accepted by the MySQL dialect, is now established on all :class:`.String` types and will render on any backend, including when features such as :meth:`.MetaData.create_all` and :func:`.cast` is used:: >>> stmt = select([cast(sometable.c.somechar, String(20, collation='utf8'))]) >>> print stmt SELECT CAST(sometable.somechar AS VARCHAR(20) COLLATE "utf8") AS anon_1 FROM sometable .. seealso:: :class:`.String` :ticket:`2276` "Prefixes" now supported for :func:`.update`, :func:`.delete` ------------------------------------------------------------- Geared towards MySQL, a "prefix" can be rendered within any of these constructs. E.g.:: stmt = table.delete().prefix_with("LOW_PRIORITY", dialect="mysql") stmt = table.update().prefix_with("LOW_PRIORITY", dialect="mysql") The method is new in addition to those which already existed on :func:`.insert`, :func:`.select` and :class:`.Query`. .. seealso:: :meth:`.Update.prefix_with` :meth:`.Delete.prefix_with` :meth:`.Insert.prefix_with` :meth:`.Select.prefix_with` :meth:`.Query.prefix_with` :ticket:`2431` Behavioral Changes ================== .. _legacy_is_orphan_addition: The consideration of a "pending" object as an "orphan" has been made more aggressive ------------------------------------------------------------------------------------ This is a late add to the 0.8 series, however it is hoped that the new behavior is generally more consistent and intuitive in a wider variety of situations. The ORM has since at least version 0.4 included behavior such that an object that's "pending", meaning that it's associated with a :class:`.Session` but hasn't been inserted into the database yet, is automatically expunged from the :class:`.Session` when it becomes an "orphan", which means it has been de-associated with a parent object that refers to it with ``delete-orphan`` cascade on the configured :func:`.relationship`. This behavior is intended to approximately mirror the behavior of a persistent (that is, already inserted) object, where the ORM will emit a DELETE for such objects that become orphans based on the interception of detachment events. The behavioral change comes into play for objects that are referred to by multiple kinds of parents that each specify ``delete-orphan``; the typical example is an :ref:`association object ` that bridges two other kinds of objects in a many-to-many pattern. Previously, the behavior was such that the pending object would be expunged only when de-associated with *all* of its parents. With the behavioral change, the pending object is expunged as soon as it is de-associated from *any* of the parents that it was previously associated with. This behavior is intended to more closely match that of persistent objects, which are deleted as soon as they are de-associated from any parent. The rationale for the older behavior dates back at least to version 0.4, and was basically a defensive decision to try to alleviate confusion when an object was still being constructed for INSERT. But the reality is that the object is re-associated with the :class:`.Session` as soon as it is attached to any new parent in any case. It's still possible to flush an object that is not associated with all of its required parents, if the object was either not associated with those parents in the first place, or if it was expunged, but then re-associated with a :class:`.Session` via a subsequent attachment event but still not fully associated. In this situation, it is expected that the database would emit an integrity error, as there are likely NOT NULL foreign key columns that are unpopulated. The ORM makes the decision to let these INSERT attempts occur, based on the judgment that an object that is only partially associated with its required parents but has been actively associated with some of them, is more often than not a user error, rather than an intentional omission which should be silently skipped - silently skipping the INSERT here would make user errors of this nature very hard to debug. The old behavior, for applications that might have been relying upon it, can be re-enabled for any :class:`.Mapper` by specifying the flag ``legacy_is_orphan`` as a mapper option. The new behavior allows the following test case to work:: from sqlalchemy import Column, Integer, String, ForeignKey from sqlalchemy.orm import relationship, backref from sqlalchemy.ext.declarative import declarative_base Base = declarative_base() class User(Base): __tablename__ = 'user' id = Column(Integer, primary_key=True) name = Column(String(64)) class UserKeyword(Base): __tablename__ = 'user_keyword' user_id = Column(Integer, ForeignKey('user.id'), primary_key=True) keyword_id = Column(Integer, ForeignKey('keyword.id'), primary_key=True) user = relationship(User, backref=backref("user_keywords", cascade="all, delete-orphan") ) keyword = relationship("Keyword", backref=backref("user_keywords", cascade="all, delete-orphan") ) # uncomment this to enable the old behavior # __mapper_args__ = {"legacy_is_orphan": True} class Keyword(Base): __tablename__ = 'keyword' id = Column(Integer, primary_key=True) keyword = Column('keyword', String(64)) from sqlalchemy import create_engine from sqlalchemy.orm import Session # note we're using Postgresql to ensure that referential integrity # is enforced, for demonstration purposes. e = create_engine("postgresql://scott:tiger@localhost/test", echo=True) Base.metadata.drop_all(e) Base.metadata.create_all(e) session = Session(e) u1 = User(name="u1") k1 = Keyword(keyword="k1") session.add_all([u1, k1]) uk1 = UserKeyword(keyword=k1, user=u1) # previously, if session.flush() were called here, # this operation would succeed, but if session.flush() # were not called here, the operation fails with an # integrity error. # session.flush() del u1.user_keywords[0] session.commit() :ticket:`2655` The after_attach event fires after the item is associated with the Session instead of before; before_attach added ----------------------------------------------------------------------------------------------------------------- Event handlers which use after_attach can now assume the given instance is associated with the given session: :: @event.listens_for(Session, "after_attach") def after_attach(session, instance): assert instance in session Some use cases require that it work this way. However, other use cases require that the item is *not* yet part of the session, such as when a query, intended to load some state required for an instance, emits autoflush first and would otherwise prematurely flush the target object. Those use cases should use the new "before_attach" event: :: @event.listens_for(Session, "before_attach") def before_attach(session, instance): instance.some_necessary_attribute = session.query(Widget).\ filter_by(instance.widget_name).\ first() :ticket:`2464` Query now auto-correlates like a select() does ---------------------------------------------- Previously it was necessary to call :meth:`.Query.correlate` in order to have a column- or WHERE-subquery correlate to the parent: :: subq = session.query(Entity.value).\ filter(Entity.id==Parent.entity_id).\ correlate(Parent).\ as_scalar() session.query(Parent).filter(subq=="some value") This was the opposite behavior of a plain ``select()`` construct which would assume auto-correlation by default. The above statement in 0.8 will correlate automatically: :: subq = session.query(Entity.value).\ filter(Entity.id==Parent.entity_id).\ as_scalar() session.query(Parent).filter(subq=="some value") like in ``select()``, correlation can be disabled by calling ``query.correlate(None)`` or manually set by passing an entity, ``query.correlate(someentity)``. :ticket:`2179` .. _correlation_context_specific: Correlation is now always context-specific ------------------------------------------ To allow a wider variety of correlation scenarios, the behavior of :meth:`.Select.correlate` and :meth:`.Query.correlate` has changed slightly such that the SELECT statement will omit the "correlated" target from the FROM clause only if the statement is actually used in that context. Additionally, it's no longer possible for a SELECT statement that's placed as a FROM in an enclosing SELECT statement to "correlate" (i.e. omit) a FROM clause. This change only makes things better as far as rendering SQL, in that it's no longer possible to render illegal SQL where there are insufficient FROM objects relative to what's being selected:: from sqlalchemy.sql import table, column, select t1 = table('t1', column('x')) t2 = table('t2', column('y')) s = select([t1, t2]).correlate(t1) print(s) Prior to this change, the above would return:: SELECT t1.x, t2.y FROM t2 which is invalid SQL as "t1" is not referred to in any FROM clause. Now, in the absense of an enclosing SELECT, it returns:: SELECT t1.x, t2.y FROM t1, t2 Within a SELECT, the correlation takes effect as expected:: s2 = select([t1, t2]).where(t1.c.x == t2.c.y).where(t1.c.x == s) print (s2) SELECT t1.x, t2.y FROM t1, t2 WHERE t1.x = t2.y AND t1.x = (SELECT t1.x, t2.y FROM t2) This change is not expected to impact any existing applications, as the correlation behavior remains identical for properly constructed expressions. Only an application that relies, most likely within a testing scenario, on the invalid string output of a correlated SELECT used in a non-correlating context would see any change. :ticket:`2668` .. _metadata_create_drop_tables: create_all() and drop_all() will now honor an empty list as such ---------------------------------------------------------------- The methods :meth:`.MetaData.create_all` and :meth:`.MetaData.drop_all` will now accept a list of :class:`.Table` objects that is empty, and will not emit any CREATE or DROP statements. Previously, an empty list was interepreted the same as passing ``None`` for a collection, and CREATE/DROP would be emitted for all items unconditionally. This is a bug fix but some applications may have been relying upon the previous behavior. :ticket:`2664` Repaired the Event Targeting of :class:`.InstrumentationEvents` ---------------------------------------------------------------- The :class:`.InstrumentationEvents` series of event targets have documented that the events will only be fired off according to the actual class passed as a target. Through 0.7, this wasn't the case, and any event listener applied to :class:`.InstrumentationEvents` would be invoked for all classes mapped. In 0.8, additional logic has been added so that the events will only invoke for those classes sent in. The ``propagate`` flag here is set to ``True`` by default as class instrumentation events are typically used to intercept classes that aren't yet created. :ticket:`2590` No more magic coercion of "=" to IN when comparing to subquery in MS-SQL ------------------------------------------------------------------------ We found a very old behavior in the MSSQL dialect which would attempt to rescue users from themselves when doing something like this: :: scalar_subq = select([someothertable.c.id]).where(someothertable.c.data=='foo') select([sometable]).where(sometable.c.id==scalar_subq) SQL Server doesn't allow an equality comparison to a scalar SELECT, that is, "x = (SELECT something)". The MSSQL dialect would convert this to an IN. The same thing would happen however upon a comparison like "(SELECT something) = x", and overall this level of guessing is outside of SQLAlchemy's usual scope so the behavior is removed. :ticket:`2277` Fixed the behavior of :meth:`.Session.is_modified` -------------------------------------------------- The :meth:`.Session.is_modified` method accepts an argument ``passive`` which basically should not be necessary, the argument in all cases should be the value ``True`` - when left at its default of ``False`` it would have the effect of hitting the database, and often triggering autoflush which would itself change the results. In 0.8 the ``passive`` argument will have no effect, and unloaded attributes will never be checked for history since by definition there can be no pending state change on an unloaded attribute. .. seealso:: :meth:`.Session.is_modified` :ticket:`2320` :attr:`.Column.key` is honored in the :attr:`.Select.c` attribute of :func:`.select` with :meth:`.Select.apply_labels` ----------------------------------------------------------------------------------------------------------------------- Users of the expression system know that :meth:`.Select.apply_labels` prepends the table name to each column name, affecting the names that are available from :attr:`.Select.c`: :: s = select([table1]).apply_labels() s.c.table1_col1 s.c.table1_col2 Before 0.8, if the :class:`.Column` had a different :attr:`.Column.key`, this key would be ignored, inconsistently versus when :meth:`.Select.apply_labels` were not used: :: # before 0.8 table1 = Table('t1', metadata, Column('col1', Integer, key='column_one') ) s = select([table1]) s.c.column_one # would be accessible like this s.c.col1 # would raise AttributeError s = select([table1]).apply_labels() s.c.table1_column_one # would raise AttributeError s.c.table1_col1 # would be accessible like this In 0.8, :attr:`.Column.key` is honored in both cases: :: # with 0.8 table1 = Table('t1', metadata, Column('col1', Integer, key='column_one') ) s = select([table1]) s.c.column_one # works s.c.col1 # AttributeError s = select([table1]).apply_labels() s.c.table1_column_one # works s.c.table1_col1 # AttributeError All other behavior regarding "name" and "key" are the same, including that the rendered SQL will still use the form ``_`` - the emphasis here was on preventing the :attr:`.Column.key` contents from being rendered into the ``SELECT`` statement so that there are no issues with special/ non-ascii characters used in the :attr:`.Column.key`. :ticket:`2397` single_parent warning is now an error ------------------------------------- A :func:`.relationship` that is many-to-one or many-to-many and specifies "cascade='all, delete-orphan'", which is an awkward but nonetheless supported use case (with restrictions) will now raise an error if the relationship does not specify the ``single_parent=True`` option. Previously it would only emit a warning, but a failure would follow almost immediately within the attribute system in any case. :ticket:`2405` Adding the ``inspector`` argument to the ``column_reflect`` event ----------------------------------------------------------------- 0.7 added a new event called ``column_reflect``, provided so that the reflection of columns could be augmented as each one were reflected. We got this event slightly wrong in that the event gave no way to get at the current ``Inspector`` and ``Connection`` being used for the reflection, in the case that additional information from the database is needed. As this is a new event not widely used yet, we'll be adding the ``inspector`` argument into it directly: :: @event.listens_for(Table, "column_reflect") def listen_for_col(inspector, table, column_info): # ... :ticket:`2418` Disabling auto-detect of collations, casing for MySQL ----------------------------------------------------- The MySQL dialect does two calls, one very expensive, to load all possible collations from the database as well as information on casing, the first time an ``Engine`` connects. Neither of these collections are used for any SQLAlchemy functions, so these calls will be changed to no longer be emitted automatically. Applications that might have relied on these collections being present on ``engine.dialect`` will need to call upon ``_detect_collations()`` and ``_detect_casing()`` directly. :ticket:`2404` "Unconsumed column names" warning becomes an exception ------------------------------------------------------ Referring to a non-existent column in an ``insert()`` or ``update()`` construct will raise an error instead of a warning: :: t1 = table('t1', column('x')) t1.insert().values(x=5, z=5) # raises "Unconsumed column names: z" :ticket:`2415` Inspector.get_primary_keys() is deprecated, use Inspector.get_pk_constraint --------------------------------------------------------------------------- These two methods on ``Inspector`` were redundant, where ``get_primary_keys()`` would return the same information as ``get_pk_constraint()`` minus the name of the constraint: :: >>> insp.get_primary_keys() ["a", "b"] >>> insp.get_pk_constraint() {"name":"pk_constraint", "constrained_columns":["a", "b"]} :ticket:`2422` Case-insensitive result row names will be disabled in most cases ---------------------------------------------------------------- A very old behavior, the column names in ``RowProxy`` were always compared case-insensitively: :: >>> row = result.fetchone() >>> row['foo'] == row['FOO'] == row['Foo'] True This was for the benefit of a few dialects which in the early days needed this, like Oracle and Firebird, but in modern usage we have more accurate ways of dealing with the case-insensitive behavior of these two platforms. Going forward, this behavior will be available only optionally, by passing the flag ```case_sensitive=False``` to ```create_engine()```, but otherwise column names requested from the row must match as far as casing. :ticket:`2423` ``InstrumentationManager`` and alternate class instrumentation is now an extension ---------------------------------------------------------------------------------- The ``sqlalchemy.orm.interfaces.InstrumentationManager`` class is moved to ``sqlalchemy.ext.instrumentation.InstrumentationManager``. The "alternate instrumentation" system was built for the benefit of a very small number of installations that needed to work with existing or unusual class instrumentation systems, and generally is very seldom used. The complexity of this system has been exported to an ``ext.`` module. It remains unused until once imported, typically when a third party library imports ``InstrumentationManager``, at which point it is injected back into ``sqlalchemy.orm`` by replacing the default ``InstrumentationFactory`` with ``ExtendedInstrumentationRegistry``. Removed ======= SQLSoup ------- SQLSoup is a handy package that presents an alternative interface on top of the SQLAlchemy ORM. SQLSoup is now moved into its own project and documented/released separately; see https://bitbucket.org/zzzeek/sqlsoup. SQLSoup is a very simple tool that could also benefit from contributors who are interested in its style of usage. :ticket:`2262` MutableType ----------- The older "mutable" system within the SQLAlchemy ORM has been removed. This refers to the ``MutableType`` interface which was applied to types such as ``PickleType`` and conditionally to ``TypeDecorator``, and since very early SQLAlchemy versions has provided a way for the ORM to detect changes in so-called "mutable" data structures such as JSON structures and pickled objects. However, the implementation was never reasonable and forced a very inefficient mode of usage on the unit-of-work which caused an expensive scan of all objects to take place during flush. In 0.7, the `sqlalchemy.ext.mutable `_ extension was introduced so that user-defined datatypes can appropriately send events to the unit of work as changes occur. Today, usage of ``MutableType`` is expected to be low, as warnings have been in place for some years now regarding its inefficiency. :ticket:`2442` sqlalchemy.exceptions (has been sqlalchemy.exc for years) --------------------------------------------------------- We had left in an alias ``sqlalchemy.exceptions`` to attempt to make it slightly easier for some very old libraries that hadn't yet been upgraded to use ``sqlalchemy.exc``. Some users are still being confused by it however so in 0.8 we're taking it out entirely to eliminate any of that confusion. :ticket:`2433` SQLAlchemy-0.8.4/doc/build/conf.py0000644000076500000240000002370712251150015017375 0ustar classicstaff00000000000000# -*- coding: utf-8 -*- # # SQLAlchemy documentation build configuration file, created by # sphinx-quickstart on Wed Nov 26 19:50:10 2008. # # This file is execfile()d with the current directory set to its containing dir. # # Note that not all possible configuration values are present in this # autogenerated file. # # All configuration values have a default; values that are commented out # serve to show the default. import sys import os # If extensions (or modules to document with autodoc) are in another directory, # add these directories to sys.path here. If the directory is relative to the # documentation root, use os.path.abspath to make it absolute, like shown here. sys.path.insert(0, os.path.abspath('../../lib')) sys.path.insert(0, os.path.abspath('../../examples')) sys.path.insert(0, os.path.abspath('.')) import sqlalchemy # -- General configuration ----------------------------------------------------- # If your documentation needs a minimal Sphinx version, state it here. #needs_sphinx = '1.0' # Add any Sphinx extension module names here, as strings. They can be extensions # coming with Sphinx (named 'sphinx.ext.*') or your custom ones. extensions = [ 'sphinx.ext.autodoc', 'builder.autodoc_mods', 'builder.changelog', 'builder.dialect_info', 'builder.mako', 'builder.sqlformatter', ] # Add any paths that contain templates here, relative to this directory. # not sure why abspath() is needed here, some users # have reported this. templates_path = [os.path.abspath('templates')] nitpicky = True # The suffix of source filenames. source_suffix = '.rst' # section names used by the changelog extension. changelog_sections = ["general", "orm", "orm declarative", "orm querying", \ "orm configuration", "engine", "sql", \ "schema", \ "postgresql", "mysql", "sqlite", "mssql", \ "oracle", "firebird"] # tags to sort on inside of sections changelog_inner_tag_sort = ["feature", "bug", "moved", "changed", "removed"] # how to render changelog links changelog_render_ticket = "http://www.sqlalchemy.org/trac/ticket/%s" changelog_render_pullreq = "https://bitbucket.org/sqlalchemy/sqlalchemy/pull-request/%s" changelog_render_changeset = "http://www.sqlalchemy.org/trac/changeset/%s" # The encoding of source files. #source_encoding = 'utf-8-sig' # The master toctree document. master_doc = 'contents' # General information about the project. project = u'SQLAlchemy' copyright = u'2007-2013, the SQLAlchemy authors and contributors' # The version info for the project you're documenting, acts as replacement for # |version| and |release|, also used in various other places throughout the # built documents. # # The short X.Y version. version = "0.8" # The full version, including alpha/beta/rc tags. release = "0.8.4" release_date = "December 8, 2013" site_base = "http://www.sqlalchemy.org" # arbitrary number recognized by builders.py, incrementing this # will force a rebuild build_number = 3 # The language for content autogenerated by Sphinx. Refer to documentation # for a list of supported languages. #language = None # There are two options for replacing |today|: either, you set today to some # non-false value, then it is used: #today = '' # Else, today_fmt is used as the format for a strftime call. #today_fmt = '%B %d, %Y' # List of patterns, relative to source directory, that match files and # directories to ignore when looking for source files. exclude_patterns = ['build'] # The reST default role (used for this markup: `text`) to use for all documents. #default_role = None # If true, '()' will be appended to :func: etc. cross-reference text. #add_function_parentheses = True # If true, the current module name will be prepended to all description # unit titles (such as .. function::). #add_module_names = True # If true, sectionauthor and moduleauthor directives will be shown in the # output. They are ignored by default. #show_authors = False # The name of the Pygments (syntax highlighting) style to use. pygments_style = 'sphinx' # A list of ignored prefixes for module index sorting. #modindex_common_prefix = [] # have the "gettext" build generate .pot for each individual # .rst gettext_compact = False # -- Options for HTML output --------------------------------------------------- # The theme to use for HTML and HTML Help pages. See the documentation for # a list of builtin themes. html_theme = 'default' # Theme options are theme-specific and customize the look and feel of a theme # further. For a list of options available for each theme, see the # documentation. #html_theme_options = {} # Add any paths that contain custom themes here, relative to this directory. #html_theme_path = [] # The style sheet to use for HTML and HTML Help pages. A file of that name # must exist either in Sphinx' static/ path, or in one of the custom paths # given in html_static_path. html_style = 'default.css' # The name for this set of Sphinx documents. If None, it defaults to # " v documentation". html_title = "%s %s Documentation" % (project, version) # A shorter title for the navigation bar. Default is the same as html_title. #html_short_title = None # The name of an image file (relative to this directory) to place at the top # of the sidebar. #html_logo = None # The name of an image file (within the static path) to use as favicon of the # docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32 # pixels large. #html_favicon = None # Add any paths that contain custom static files (such as style sheets) here, # relative to this directory. They are copied after the builtin static files, # so a file named "default.css" will overwrite the builtin "default.css". html_static_path = ['static'] # If not '', a 'Last updated on:' timestamp is inserted at every page bottom, # using the given strftime format. html_last_updated_fmt = '%m/%d/%Y %H:%M:%S' # If true, SmartyPants will be used to convert quotes and dashes to # typographically correct entities. #html_use_smartypants = True # Custom sidebar templates, maps document names to template names. #html_sidebars = {} # Additional templates that should be rendered to pages, maps page names to # template names. #html_additional_pages = {} # If false, no module index is generated. html_domain_indices = False # If false, no index is generated. #html_use_index = True # If true, the index is split into individual pages for each letter. #html_split_index = False # If true, the reST sources are included in the HTML build as _sources/. #html_copy_source = True # If true, links to the reST sources are added to the pages. #html_show_sourcelink = True # If true, "Created using Sphinx" is shown in the HTML footer. Default is True. #html_show_sphinx = True # If true, "(C) Copyright ..." is shown in the HTML footer. Default is True. #html_show_copyright = True # If true, an OpenSearch description file will be output, and all pages will # contain a tag referring to it. The value of this option must be the # base URL from which the finished HTML is served. #html_use_opensearch = '' # This is the file name suffix for HTML files (e.g. ".xhtml"). #html_file_suffix = None # Output file base name for HTML help builder. htmlhelp_basename = 'SQLAlchemydoc' #autoclass_content = 'both' # -- Options for LaTeX output -------------------------------------------------- # The paper size ('letter' or 'a4'). #latex_paper_size = 'letter' # The font size ('10pt', '11pt' or '12pt'). #latex_font_size = '10pt' # Grouping the document tree into LaTeX files. List of tuples # (source start file, target name, title, author, documentclass [howto/manual]). latex_documents = [ ('contents', 'sqlalchemy_%s.tex' % release.replace('.', '_'), ur'SQLAlchemy Documentation', ur'Mike Bayer', 'manual'), ] # The name of an image file (relative to this directory) to place at the top of # the title page. #latex_logo = None # For "manual" documents, if this is true, then toplevel headings are parts, # not chapters. #latex_use_parts = False # If true, show page references after internal links. #latex_show_pagerefs = False # If true, show URL addresses after external links. #latex_show_urls = False # Additional stuff for the LaTeX preamble. # sets TOC depth to 2. latex_preamble = '\setcounter{tocdepth}{3}' # Documents to append as an appendix to all manuals. #latex_appendices = [] # If false, no module index is generated. #latex_domain_indices = True #latex_elements = { # 'papersize': 'letterpaper', # 'pointsize': '10pt', #} # -- Options for manual page output -------------------------------------------- # One entry per manual page. List of tuples # (source start file, name, description, authors, manual section). man_pages = [ ('index', 'sqlalchemy', u'SQLAlchemy Documentation', [u'SQLAlchemy authors'], 1) ] # -- Options for Epub output --------------------------------------------------- # Bibliographic Dublin Core info. epub_title = u'SQLAlchemy' epub_author = u'SQLAlchemy authors' epub_publisher = u'SQLAlchemy authors' epub_copyright = u'2013, SQLAlchemy authors' # The language of the text. It defaults to the language option # or en if the language is not set. #epub_language = '' # The scheme of the identifier. Typical schemes are ISBN or URL. #epub_scheme = '' # The unique identifier of the text. This can be a ISBN number # or the project homepage. #epub_identifier = '' # A unique identification for the text. #epub_uid = '' # HTML files that should be inserted before the pages created by sphinx. # The format is a list of tuples containing the path and title. #epub_pre_files = [] # HTML files that should be inserted after the pages created by sphinx. # The format is a list of tuples containing the path and title. #epub_post_files = [] # A list of files that should not be packed into the epub file. #epub_exclude_files = [] # The depth of the table of contents in toc.ncx. #epub_tocdepth = 3 # Allow duplicate toc entries. #epub_tocdup = True SQLAlchemy-0.8.4/doc/build/contents.rst0000644000076500000240000000052012251147171020462 0ustar classicstaff00000000000000.. _contents: Table of Contents ================= Full table of contents. For a high level overview of all documentation, see :ref:`index_toplevel`. .. toctree:: :maxdepth: 3 intro orm/index core/index dialects/index changelog/index Indices and tables ------------------ * :ref:`genindex` * :ref:`search` SQLAlchemy-0.8.4/doc/build/copyright.rst0000644000076500000240000000240212251147171020636 0ustar classicstaff00000000000000:orphan: ==================== Appendix: Copyright ==================== This is the MIT license: ``_ Copyright (c) 2005-2013 Michael Bayer and contributors. SQLAlchemy is a trademark of Michael Bayer. Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions: The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software. THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. SQLAlchemy-0.8.4/doc/build/core/0000755000076500000240000000000012251151573017027 5ustar classicstaff00000000000000SQLAlchemy-0.8.4/doc/build/core/compiler.rst0000644000076500000240000000027512251147171021376 0ustar classicstaff00000000000000.. _sqlalchemy.ext.compiler_toplevel: Custom SQL Constructs and Compilation Extension =============================================== .. automodule:: sqlalchemy.ext.compiler :members:SQLAlchemy-0.8.4/doc/build/core/connections.rst0000644000076500000240000005340412251147171022110 0ustar classicstaff00000000000000.. _connections_toplevel: ===================================== Working with Engines and Connections ===================================== .. module:: sqlalchemy.engine This section details direct usage of the :class:`.Engine`, :class:`.Connection`, and related objects. Its important to note that when using the SQLAlchemy ORM, these objects are not generally accessed; instead, the :class:`.Session` object is used as the interface to the database. However, for applications that are built around direct usage of textual SQL statements and/or SQL expression constructs without involvement by the ORM's higher level management services, the :class:`.Engine` and :class:`.Connection` are king (and queen?) - read on. Basic Usage =========== Recall from :doc:`/core/engines` that an :class:`.Engine` is created via the :func:`.create_engine` call:: engine = create_engine('mysql://scott:tiger@localhost/test') The typical usage of :func:`.create_engine()` is once per particular database URL, held globally for the lifetime of a single application process. A single :class:`.Engine` manages many individual DBAPI connections on behalf of the process and is intended to be called upon in a concurrent fashion. The :class:`.Engine` is **not** synonymous to the DBAPI ``connect`` function, which represents just one connection resource - the :class:`.Engine` is most efficient when created just once at the module level of an application, not per-object or per-function call. For a multiple-process application that uses the ``os.fork`` system call, or for example the Python ``multiprocessing`` module, it's usually required that a separate :class:`.Engine` be used for each child process. This is because the :class:`.Engine` maintains a reference to a connection pool that ultimately references DBAPI connections - these tend to not be portable across process boundaries. An :class:`.Engine` that is configured not to use pooling (which is achieved via the usage of :class:`.NullPool`) does not have this requirement. The engine can be used directly to issue SQL to the database. The most generic way is first procure a connection resource, which you get via the :meth:`.Engine.connect` method:: connection = engine.connect() result = connection.execute("select username from users") for row in result: print "username:", row['username'] connection.close() The connection is an instance of :class:`.Connection`, which is a **proxy** object for an actual DBAPI connection. The DBAPI connection is retrieved from the connection pool at the point at which :class:`.Connection` is created. The returned result is an instance of :class:`.ResultProxy`, which references a DBAPI cursor and provides a largely compatible interface with that of the DBAPI cursor. The DBAPI cursor will be closed by the :class:`.ResultProxy` when all of its result rows (if any) are exhausted. A :class:`.ResultProxy` that returns no rows, such as that of an UPDATE statement (without any returned rows), releases cursor resources immediately upon construction. When the :meth:`~.Connection.close` method is called, the referenced DBAPI connection is :term:`released` to the connection pool. From the perspective of the database itself, nothing is actually "closed", assuming pooling is in use. The pooling mechanism issues a ``rollback()`` call on the DBAPI connection so that any transactional state or locks are removed, and the connection is ready for its next usage. The above procedure can be performed in a shorthand way by using the :meth:`~.Engine.execute` method of :class:`.Engine` itself:: result = engine.execute("select username from users") for row in result: print "username:", row['username'] Where above, the :meth:`~.Engine.execute` method acquires a new :class:`.Connection` on its own, executes the statement with that object, and returns the :class:`.ResultProxy`. In this case, the :class:`.ResultProxy` contains a special flag known as ``close_with_result``, which indicates that when its underlying DBAPI cursor is closed, the :class:`.Connection` object itself is also closed, which again returns the DBAPI connection to the connection pool, releasing transactional resources. If the :class:`.ResultProxy` potentially has rows remaining, it can be instructed to close out its resources explicitly:: result.close() If the :class:`.ResultProxy` has pending rows remaining and is dereferenced by the application without being closed, Python garbage collection will ultimately close out the cursor as well as trigger a return of the pooled DBAPI connection resource to the pool (SQLAlchemy achieves this by the usage of weakref callbacks - *never* the ``__del__`` method) - however it's never a good idea to rely upon Python garbage collection to manage resources. Our example above illustrated the execution of a textual SQL string. The :meth:`~.Connection.execute` method can of course accommodate more than that, including the variety of SQL expression constructs described in :ref:`sqlexpression_toplevel`. Using Transactions ================== .. note:: This section describes how to use transactions when working directly with :class:`.Engine` and :class:`.Connection` objects. When using the SQLAlchemy ORM, the public API for transaction control is via the :class:`.Session` object, which makes usage of the :class:`.Transaction` object internally. See :ref:`unitofwork_transaction` for further information. The :class:`~sqlalchemy.engine.Connection` object provides a :meth:`~.Connection.begin` method which returns a :class:`.Transaction` object. This object is usually used within a try/except clause so that it is guaranteed to invoke :meth:`.Transaction.rollback` or :meth:`.Transaction.commit`:: connection = engine.connect() trans = connection.begin() try: r1 = connection.execute(table1.select()) connection.execute(table1.insert(), col1=7, col2='this is some data') trans.commit() except: trans.rollback() raise The above block can be created more succinctly using context managers, either given an :class:`.Engine`:: # runs a transaction with engine.begin() as connection: r1 = connection.execute(table1.select()) connection.execute(table1.insert(), col1=7, col2='this is some data') Or from the :class:`.Connection`, in which case the :class:`.Transaction` object is available as well:: with connection.begin() as trans: r1 = connection.execute(table1.select()) connection.execute(table1.insert(), col1=7, col2='this is some data') .. _connections_nested_transactions: Nesting of Transaction Blocks ------------------------------ The :class:`.Transaction` object also handles "nested" behavior by keeping track of the outermost begin/commit pair. In this example, two functions both issue a transaction on a :class:`.Connection`, but only the outermost :class:`.Transaction` object actually takes effect when it is committed. .. sourcecode:: python+sql # method_a starts a transaction and calls method_b def method_a(connection): trans = connection.begin() # open a transaction try: method_b(connection) trans.commit() # transaction is committed here except: trans.rollback() # this rolls back the transaction unconditionally raise # method_b also starts a transaction def method_b(connection): trans = connection.begin() # open a transaction - this runs in the context of method_a's transaction try: connection.execute("insert into mytable values ('bat', 'lala')") connection.execute(mytable.insert(), col1='bat', col2='lala') trans.commit() # transaction is not committed yet except: trans.rollback() # this rolls back the transaction unconditionally raise # open a Connection and call method_a conn = engine.connect() method_a(conn) conn.close() Above, ``method_a`` is called first, which calls ``connection.begin()``. Then it calls ``method_b``. When ``method_b`` calls ``connection.begin()``, it just increments a counter that is decremented when it calls ``commit()``. If either ``method_a`` or ``method_b`` calls ``rollback()``, the whole transaction is rolled back. The transaction is not committed until ``method_a`` calls the ``commit()`` method. This "nesting" behavior allows the creation of functions which "guarantee" that a transaction will be used if one was not already available, but will automatically participate in an enclosing transaction if one exists. .. index:: single: thread safety; transactions .. _autocommit: Understanding Autocommit ======================== The previous transaction example illustrates how to use :class:`.Transaction` so that several executions can take part in the same transaction. What happens when we issue an INSERT, UPDATE or DELETE call without using :class:`.Transaction`? While some DBAPI implementations provide various special "non-transactional" modes, the core behavior of DBAPI per PEP-0249 is that a *transaction is always in progress*, providing only ``rollback()`` and ``commit()`` methods but no ``begin()``. SQLAlchemy assumes this is the case for any given DBAPI. Given this requirement, SQLAlchemy implements its own "autocommit" feature which works completely consistently across all backends. This is achieved by detecting statements which represent data-changing operations, i.e. INSERT, UPDATE, DELETE, as well as data definition language (DDL) statements such as CREATE TABLE, ALTER TABLE, and then issuing a COMMIT automatically if no transaction is in progress. The detection is based on the presence of the ``autocommit=True`` execution option on the statement. If the statement is a text-only statement and the flag is not set, a regular expression is used to detect INSERT, UPDATE, DELETE, as well as a variety of other commands for a particular backend:: conn = engine.connect() conn.execute("INSERT INTO users VALUES (1, 'john')") # autocommits The "autocommit" feature is only in effect when no :class:`.Transaction` has otherwise been declared. This means the feature is not generally used with the ORM, as the :class:`.Session` object by default always maintains an ongoing :class:`.Transaction`. Full control of the "autocommit" behavior is available using the generative :meth:`.Connection.execution_options` method provided on :class:`.Connection`, :class:`.Engine`, :class:`.Executable`, using the "autocommit" flag which will turn on or off the autocommit for the selected scope. For example, a :func:`.text` construct representing a stored procedure that commits might use it so that a SELECT statement will issue a COMMIT:: engine.execute(text("SELECT my_mutating_procedure()").execution_options(autocommit=True)) .. _dbengine_implicit: Connectionless Execution, Implicit Execution ============================================= Recall from the first section we mentioned executing with and without explicit usage of :class:`.Connection`. "Connectionless" execution refers to the usage of the ``execute()`` method on an object which is not a :class:`.Connection`. This was illustrated using the :meth:`~.Engine.execute` method of :class:`.Engine`:: result = engine.execute("select username from users") for row in result: print "username:", row['username'] In addition to "connectionless" execution, it is also possible to use the :meth:`~.Executable.execute` method of any :class:`.Executable` construct, which is a marker for SQL expression objects that support execution. The SQL expression object itself references an :class:`.Engine` or :class:`.Connection` known as the **bind**, which it uses in order to provide so-called "implicit" execution services. Given a table as below:: from sqlalchemy import MetaData, Table, Column, Integer meta = MetaData() users_table = Table('users', meta, Column('id', Integer, primary_key=True), Column('name', String(50)) ) Explicit execution delivers the SQL text or constructed SQL expression to the :meth:`~.Connection.execute` method of :class:`~sqlalchemy.engine.Connection`: .. sourcecode:: python+sql engine = create_engine('sqlite:///file.db') connection = engine.connect() result = connection.execute(users_table.select()) for row in result: # .... connection.close() Explicit, connectionless execution delivers the expression to the :meth:`~.Engine.execute` method of :class:`~sqlalchemy.engine.Engine`: .. sourcecode:: python+sql engine = create_engine('sqlite:///file.db') result = engine.execute(users_table.select()) for row in result: # .... result.close() Implicit execution is also connectionless, and makes usage of the :meth:`~.Executable.execute` method on the expression itself. This method is provided as part of the :class:`.Executable` class, which refers to a SQL statement that is sufficient for being invoked against the database. The method makes usage of the assumption that either an :class:`~sqlalchemy.engine.Engine` or :class:`~sqlalchemy.engine.Connection` has been **bound** to the expression object. By "bound" we mean that the special attribute :attr:`.MetaData.bind` has been used to associate a series of :class:`.Table` objects and all SQL constructs derived from them with a specific engine:: engine = create_engine('sqlite:///file.db') meta.bind = engine result = users_table.select().execute() for row in result: # .... result.close() Above, we associate an :class:`.Engine` with a :class:`.MetaData` object using the special attribute :attr:`.MetaData.bind`. The :func:`.select` construct produced from the :class:`.Table` object has a method :meth:`~.Executable.execute`, which will search for an :class:`.Engine` that's "bound" to the :class:`.Table`. Overall, the usage of "bound metadata" has three general effects: * SQL statement objects gain an :meth:`.Executable.execute` method which automatically locates a "bind" with which to execute themselves. * The ORM :class:`.Session` object supports using "bound metadata" in order to establish which :class:`.Engine` should be used to invoke SQL statements on behalf of a particular mapped class, though the :class:`.Session` also features its own explicit system of establishing complex :class:`.Engine`/ mapped class configurations. * The :meth:`.MetaData.create_all`, :meth:`.MetaData.drop_all`, :meth:`.Table.create`, :meth:`.Table.drop`, and "autoload" features all make usage of the bound :class:`.Engine` automatically without the need to pass it explicitly. .. note:: The concepts of "bound metadata" and "implicit execution" are not emphasized in modern SQLAlchemy. While they offer some convenience, they are no longer required by any API and are never necessary. In applications where multiple :class:`.Engine` objects are present, each one logically associated with a certain set of tables (i.e. *vertical sharding*), the "bound metadata" technique can be used so that individual :class:`.Table` can refer to the appropriate :class:`.Engine` automatically; in particular this is supported within the ORM via the :class:`.Session` object as a means to associate :class:`.Table` objects with an appropriate :class:`.Engine`, as an alternative to using the bind arguments accepted directly by the :class:`.Session`. However, the "implicit execution" technique is not at all appropriate for use with the ORM, as it bypasses the transactional context maintained by the :class:`.Session`. Overall, in the *vast majority* of cases, "bound metadata" and "implicit execution" are **not useful**. While "bound metadata" has a marginal level of usefulness with regards to ORM configuration, "implicit execution" is a very old usage pattern that in most cases is more confusing than it is helpful, and its usage is discouraged. Both patterns seem to encourage the overuse of expedient "short cuts" in application design which lead to problems later on. Modern SQLAlchemy usage, especially the ORM, places a heavy stress on working within the context of a transaction at all times; the "implicit execution" concept makes the job of associating statement execution with a particular transaction much more difficult. The :meth:`.Executable.execute` method on a particular SQL statement usually implies that the execution is not part of any particular transaction, which is usually not the desired effect. In both "connectionless" examples, the :class:`~sqlalchemy.engine.Connection` is created behind the scenes; the :class:`~sqlalchemy.engine.ResultProxy` returned by the ``execute()`` call references the :class:`~sqlalchemy.engine.Connection` used to issue the SQL statement. When the :class:`.ResultProxy` is closed, the underlying :class:`.Connection` is closed for us, resulting in the DBAPI connection being returned to the pool with transactional resources removed. .. _threadlocal_strategy: Using the Threadlocal Execution Strategy ======================================== The "threadlocal" engine strategy is an optional feature which can be used by non-ORM applications to associate transactions with the current thread, such that all parts of the application can participate in that transaction implicitly without the need to explicitly reference a :class:`.Connection`. .. note:: The "threadlocal" feature is generally discouraged. It's designed for a particular pattern of usage which is generally considered as a legacy pattern. It has **no impact** on the "thread safety" of SQLAlchemy components or one's application. It also should not be used when using an ORM :class:`~sqlalchemy.orm.session.Session` object, as the :class:`~sqlalchemy.orm.session.Session` itself represents an ongoing transaction and itself handles the job of maintaining connection and transactional resources. Enabling ``threadlocal`` is achieved as follows:: db = create_engine('mysql://localhost/test', strategy='threadlocal') The above :class:`.Engine` will now acquire a :class:`.Connection` using connection resources derived from a thread-local variable whenever :meth:`.Engine.execute` or :meth:`.Engine.contextual_connect` is called. This connection resource is maintained as long as it is referenced, which allows multiple points of an application to share a transaction while using connectionless execution:: def call_operation1(): engine.execute("insert into users values (?, ?)", 1, "john") def call_operation2(): users.update(users.c.user_id==5).execute(name='ed') db.begin() try: call_operation1() call_operation2() db.commit() except: db.rollback() Explicit execution can be mixed with connectionless execution by using the :meth:`.Engine.connect` method to acquire a :class:`.Connection` that is not part of the threadlocal scope:: db.begin() conn = db.connect() try: conn.execute(log_table.insert(), message="Operation started") call_operation1() call_operation2() db.commit() conn.execute(log_table.insert(), message="Operation succeeded") except: db.rollback() conn.execute(log_table.insert(), message="Operation failed") finally: conn.close() To access the :class:`.Connection` that is bound to the threadlocal scope, call :meth:`.Engine.contextual_connect`:: conn = db.contextual_connect() call_operation3(conn) conn.close() Calling :meth:`~.Connection.close` on the "contextual" connection does not :term:`release` its resources until all other usages of that resource are closed as well, including that any ongoing transactions are rolled back or committed. Registering New Dialects ======================== The :func:`.create_engine` function call locates the given dialect using setuptools entrypoints. These entry points can be established for third party dialects within the setup.py script. For example, to create a new dialect "foodialect://", the steps are as follows: 1. Create a package called ``foodialect``. 2. The package should have a module containing the dialect class, which is typically a subclass of :class:`sqlalchemy.engine.default.DefaultDialect`. In this example let's say it's called ``FooDialect`` and its module is accessed via ``foodialect.dialect``. 3. The entry point can be established in setup.py as follows:: entry_points=""" [sqlalchemy.dialects] foodialect = foodialect.dialect:FooDialect """ If the dialect is providing support for a particular DBAPI on top of an existing SQLAlchemy-supported database, the name can be given including a database-qualification. For example, if ``FooDialect`` were in fact a MySQL dialect, the entry point could be established like this:: entry_points=""" [sqlalchemy.dialects] mysql.foodialect = foodialect.dialect:FooDialect """ The above entrypoint would then be accessed as ``create_engine("mysql+foodialect://")``. Registering Dialects In-Process ------------------------------- SQLAlchemy also allows a dialect to be registered within the current process, bypassing the need for separate installation. Use the ``register()`` function as follows:: from sqlalchemy.dialects import registry registry.register("mysql.foodialect", "myapp.dialect", "MyMySQLDialect") The above will respond to ``create_engine("mysql+foodialect://")`` and load the ``MyMySQLDialect`` class from the ``myapp.dialect`` module. .. versionadded:: 0.8 Connection / Engine API ======================= .. autoclass:: Connection :members: .. autoclass:: Connectable :members: .. autoclass:: Engine :members: .. autoclass:: NestedTransaction :members: .. autoclass:: sqlalchemy.engine.ResultProxy :members: .. autoclass:: sqlalchemy.engine.RowProxy :members: .. autoclass:: Transaction :members: .. autoclass:: TwoPhaseTransaction :members: SQLAlchemy-0.8.4/doc/build/core/constraints.rst0000644000076500000240000003335012251147171022133 0ustar classicstaff00000000000000.. _metadata_constraints_toplevel: .. _metadata_constraints: .. module:: sqlalchemy.schema ================================= Defining Constraints and Indexes ================================= .. _metadata_foreignkeys: This section will discuss SQL :term:`constraints` and indexes. In SQLAlchemy the key classes include :class:`.ForeignKeyConstraint` and :class:`.Index`. Defining Foreign Keys --------------------- A *foreign key* in SQL is a table-level construct that constrains one or more columns in that table to only allow values that are present in a different set of columns, typically but not always located on a different table. We call the columns which are constrained the *foreign key* columns and the columns which they are constrained towards the *referenced* columns. The referenced columns almost always define the primary key for their owning table, though there are exceptions to this. The foreign key is the "joint" that connects together pairs of rows which have a relationship with each other, and SQLAlchemy assigns very deep importance to this concept in virtually every area of its operation. In SQLAlchemy as well as in DDL, foreign key constraints can be defined as additional attributes within the table clause, or for single-column foreign keys they may optionally be specified within the definition of a single column. The single column foreign key is more common, and at the column level is specified by constructing a :class:`~sqlalchemy.schema.ForeignKey` object as an argument to a :class:`~sqlalchemy.schema.Column` object:: user_preference = Table('user_preference', metadata, Column('pref_id', Integer, primary_key=True), Column('user_id', Integer, ForeignKey("user.user_id"), nullable=False), Column('pref_name', String(40), nullable=False), Column('pref_value', String(100)) ) Above, we define a new table ``user_preference`` for which each row must contain a value in the ``user_id`` column that also exists in the ``user`` table's ``user_id`` column. The argument to :class:`~sqlalchemy.schema.ForeignKey` is most commonly a string of the form *.*, or for a table in a remote schema or "owner" of the form *..*. It may also be an actual :class:`~sqlalchemy.schema.Column` object, which as we'll see later is accessed from an existing :class:`~sqlalchemy.schema.Table` object via its ``c`` collection:: ForeignKey(user.c.user_id) The advantage to using a string is that the in-python linkage between ``user`` and ``user_preference`` is resolved only when first needed, so that table objects can be easily spread across multiple modules and defined in any order. Foreign keys may also be defined at the table level, using the :class:`~sqlalchemy.schema.ForeignKeyConstraint` object. This object can describe a single- or multi-column foreign key. A multi-column foreign key is known as a *composite* foreign key, and almost always references a table that has a composite primary key. Below we define a table ``invoice`` which has a composite primary key:: invoice = Table('invoice', metadata, Column('invoice_id', Integer, primary_key=True), Column('ref_num', Integer, primary_key=True), Column('description', String(60), nullable=False) ) And then a table ``invoice_item`` with a composite foreign key referencing ``invoice``:: invoice_item = Table('invoice_item', metadata, Column('item_id', Integer, primary_key=True), Column('item_name', String(60), nullable=False), Column('invoice_id', Integer, nullable=False), Column('ref_num', Integer, nullable=False), ForeignKeyConstraint(['invoice_id', 'ref_num'], ['invoice.invoice_id', 'invoice.ref_num']) ) It's important to note that the :class:`~sqlalchemy.schema.ForeignKeyConstraint` is the only way to define a composite foreign key. While we could also have placed individual :class:`~sqlalchemy.schema.ForeignKey` objects on both the ``invoice_item.invoice_id`` and ``invoice_item.ref_num`` columns, SQLAlchemy would not be aware that these two values should be paired together - it would be two individual foreign key constraints instead of a single composite foreign key referencing two columns. .. _use_alter: Creating/Dropping Foreign Key Constraints via ALTER ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ In all the above examples, the :class:`~sqlalchemy.schema.ForeignKey` object causes the "REFERENCES" keyword to be added inline to a column definition within a "CREATE TABLE" statement when :func:`~sqlalchemy.schema.MetaData.create_all` is issued, and :class:`~sqlalchemy.schema.ForeignKeyConstraint` invokes the "CONSTRAINT" keyword inline with "CREATE TABLE". There are some cases where this is undesireable, particularly when two tables reference each other mutually, each with a foreign key referencing the other. In such a situation at least one of the foreign key constraints must be generated after both tables have been built. To support such a scheme, :class:`~sqlalchemy.schema.ForeignKey` and :class:`~sqlalchemy.schema.ForeignKeyConstraint` offer the flag ``use_alter=True``. When using this flag, the constraint will be generated using a definition similar to "ALTER TABLE ADD CONSTRAINT ...". Since a name is required, the ``name`` attribute must also be specified. For example:: node = Table('node', meta, Column('node_id', Integer, primary_key=True), Column('primary_element', Integer, ForeignKey('element.element_id', use_alter=True, name='fk_node_element_id') ) ) element = Table('element', meta, Column('element_id', Integer, primary_key=True), Column('parent_node_id', Integer), ForeignKeyConstraint( ['parent_node_id'], ['node.node_id'], use_alter=True, name='fk_element_parent_node_id' ) ) ON UPDATE and ON DELETE ~~~~~~~~~~~~~~~~~~~~~~~ Most databases support *cascading* of foreign key values, that is the when a parent row is updated the new value is placed in child rows, or when the parent row is deleted all corresponding child rows are set to null or deleted. In data definition language these are specified using phrases like "ON UPDATE CASCADE", "ON DELETE CASCADE", and "ON DELETE SET NULL", corresponding to foreign key constraints. The phrase after "ON UPDATE" or "ON DELETE" may also other allow other phrases that are specific to the database in use. The :class:`~sqlalchemy.schema.ForeignKey` and :class:`~sqlalchemy.schema.ForeignKeyConstraint` objects support the generation of this clause via the ``onupdate`` and ``ondelete`` keyword arguments. The value is any string which will be output after the appropriate "ON UPDATE" or "ON DELETE" phrase:: child = Table('child', meta, Column('id', Integer, ForeignKey('parent.id', onupdate="CASCADE", ondelete="CASCADE"), primary_key=True ) ) composite = Table('composite', meta, Column('id', Integer, primary_key=True), Column('rev_id', Integer), Column('note_id', Integer), ForeignKeyConstraint( ['rev_id', 'note_id'], ['revisions.id', 'revisions.note_id'], onupdate="CASCADE", ondelete="SET NULL" ) ) Note that these clauses are not supported on SQLite, and require ``InnoDB`` tables when used with MySQL. They may also not be supported on other databases. UNIQUE Constraint ----------------- Unique constraints can be created anonymously on a single column using the ``unique`` keyword on :class:`~sqlalchemy.schema.Column`. Explicitly named unique constraints and/or those with multiple columns are created via the :class:`~sqlalchemy.schema.UniqueConstraint` table-level construct. .. sourcecode:: python+sql meta = MetaData() mytable = Table('mytable', meta, # per-column anonymous unique constraint Column('col1', Integer, unique=True), Column('col2', Integer), Column('col3', Integer), # explicit/composite unique constraint. 'name' is optional. UniqueConstraint('col2', 'col3', name='uix_1') ) CHECK Constraint ---------------- Check constraints can be named or unnamed and can be created at the Column or Table level, using the :class:`~sqlalchemy.schema.CheckConstraint` construct. The text of the check constraint is passed directly through to the database, so there is limited "database independent" behavior. Column level check constraints generally should only refer to the column to which they are placed, while table level constraints can refer to any columns in the table. Note that some databases do not actively support check constraints such as MySQL. .. sourcecode:: python+sql meta = MetaData() mytable = Table('mytable', meta, # per-column CHECK constraint Column('col1', Integer, CheckConstraint('col1>5')), Column('col2', Integer), Column('col3', Integer), # table level CHECK constraint. 'name' is optional. CheckConstraint('col2 > col3 + 5', name='check1') ) {sql}mytable.create(engine) CREATE TABLE mytable ( col1 INTEGER CHECK (col1>5), col2 INTEGER, col3 INTEGER, CONSTRAINT check1 CHECK (col2 > col3 + 5) ){stop} Setting up Constraints when using the Declarative ORM Extension ---------------------------------------------------------------- The :class:`.Table` is the SQLAlchemy Core construct that allows one to define table metadata, which among other things can be used by the SQLAlchemy ORM as a target to map a class. The :ref:`Declarative ` extension allows the :class:`.Table` object to be created automatically, given the contents of the table primarily as a mapping of :class:`.Column` objects. To apply table-level constraint objects such as :class:`.ForeignKeyConstraint` to a table defined using Declarative, use the ``__table_args__`` attribute, described at :ref:`declarative_table_args`. Constraints API --------------- .. autoclass:: Constraint .. autoclass:: CheckConstraint .. autoclass:: ColumnCollectionConstraint .. autoclass:: ForeignKey :members: .. autoclass:: ForeignKeyConstraint :members: .. autoclass:: PrimaryKeyConstraint .. autoclass:: UniqueConstraint .. _schema_indexes: Indexes ------- Indexes can be created anonymously (using an auto-generated name ``ix_``) for a single column using the inline ``index`` keyword on :class:`~sqlalchemy.schema.Column`, which also modifies the usage of ``unique`` to apply the uniqueness to the index itself, instead of adding a separate UNIQUE constraint. For indexes with specific names or which encompass more than one column, use the :class:`~sqlalchemy.schema.Index` construct, which requires a name. Below we illustrate a :class:`~sqlalchemy.schema.Table` with several :class:`~sqlalchemy.schema.Index` objects associated. The DDL for "CREATE INDEX" is issued right after the create statements for the table: .. sourcecode:: python+sql meta = MetaData() mytable = Table('mytable', meta, # an indexed column, with index "ix_mytable_col1" Column('col1', Integer, index=True), # a uniquely indexed column with index "ix_mytable_col2" Column('col2', Integer, index=True, unique=True), Column('col3', Integer), Column('col4', Integer), Column('col5', Integer), Column('col6', Integer), ) # place an index on col3, col4 Index('idx_col34', mytable.c.col3, mytable.c.col4) # place a unique index on col5, col6 Index('myindex', mytable.c.col5, mytable.c.col6, unique=True) {sql}mytable.create(engine) CREATE TABLE mytable ( col1 INTEGER, col2 INTEGER, col3 INTEGER, col4 INTEGER, col5 INTEGER, col6 INTEGER ) CREATE INDEX ix_mytable_col1 ON mytable (col1) CREATE UNIQUE INDEX ix_mytable_col2 ON mytable (col2) CREATE UNIQUE INDEX myindex ON mytable (col5, col6) CREATE INDEX idx_col34 ON mytable (col3, col4){stop} Note in the example above, the :class:`.Index` construct is created externally to the table which it corresponds, using :class:`.Column` objects directly. :class:`.Index` also supports "inline" definition inside the :class:`.Table`, using string names to identify columns:: meta = MetaData() mytable = Table('mytable', meta, Column('col1', Integer), Column('col2', Integer), Column('col3', Integer), Column('col4', Integer), # place an index on col1, col2 Index('idx_col12', 'col1', 'col2'), # place a unique index on col3, col4 Index('idx_col34', 'col3', 'col4', unique=True) ) .. versionadded:: 0.7 Support of "inline" definition inside the :class:`.Table` for :class:`.Index`\ . The :class:`~sqlalchemy.schema.Index` object also supports its own ``create()`` method: .. sourcecode:: python+sql i = Index('someindex', mytable.c.col5) {sql}i.create(engine) CREATE INDEX someindex ON mytable (col5){stop} .. _schema_indexes_functional: Functional Indexes ~~~~~~~~~~~~~~~~~~~ :class:`.Index` supports SQL and function expressions, as supported by the target backend. To create an index against a column using a descending value, the :meth:`.ColumnElement.desc` modifier may be used:: from sqlalchemy import Index Index('someindex', mytable.c.somecol.desc()) Or with a backend that supports functional indexes such as Postgresql, a "case insensitive" index can be created using the ``lower()`` function:: from sqlalchemy import func, Index Index('someindex', func.lower(mytable.c.somecol)) .. versionadded:: 0.8 :class:`.Index` supports SQL expressions and functions as well as plain columns. Index API --------- .. autoclass:: Index :members: SQLAlchemy-0.8.4/doc/build/core/ddl.rst0000644000076500000240000002103012251147171020317 0ustar classicstaff00000000000000.. _metadata_ddl_toplevel: .. _metadata_ddl: .. module:: sqlalchemy.schema Customizing DDL =============== In the preceding sections we've discussed a variety of schema constructs including :class:`~sqlalchemy.schema.Table`, :class:`~sqlalchemy.schema.ForeignKeyConstraint`, :class:`~sqlalchemy.schema.CheckConstraint`, and :class:`~sqlalchemy.schema.Sequence`. Throughout, we've relied upon the ``create()`` and :func:`~sqlalchemy.schema.MetaData.create_all` methods of :class:`~sqlalchemy.schema.Table` and :class:`~sqlalchemy.schema.MetaData` in order to issue data definition language (DDL) for all constructs. When issued, a pre-determined order of operations is invoked, and DDL to create each table is created unconditionally including all constraints and other objects associated with it. For more complex scenarios where database-specific DDL is required, SQLAlchemy offers two techniques which can be used to add any DDL based on any condition, either accompanying the standard generation of tables or by itself. .. _schema_ddl_sequences: Controlling DDL Sequences ------------------------- The ``sqlalchemy.schema`` package contains SQL expression constructs that provide DDL expressions. For example, to produce a ``CREATE TABLE`` statement: .. sourcecode:: python+sql from sqlalchemy.schema import CreateTable {sql}engine.execute(CreateTable(mytable)) CREATE TABLE mytable ( col1 INTEGER, col2 INTEGER, col3 INTEGER, col4 INTEGER, col5 INTEGER, col6 INTEGER ){stop} Above, the :class:`~sqlalchemy.schema.CreateTable` construct works like any other expression construct (such as ``select()``, ``table.insert()``, etc.). A full reference of available constructs is in :ref:`schema_api_ddl`. The DDL constructs all extend a common base class which provides the capability to be associated with an individual :class:`~sqlalchemy.schema.Table` or :class:`~sqlalchemy.schema.MetaData` object, to be invoked upon create/drop events. Consider the example of a table which contains a CHECK constraint: .. sourcecode:: python+sql users = Table('users', metadata, Column('user_id', Integer, primary_key=True), Column('user_name', String(40), nullable=False), CheckConstraint('length(user_name) >= 8',name="cst_user_name_length") ) {sql}users.create(engine) CREATE TABLE users ( user_id SERIAL NOT NULL, user_name VARCHAR(40) NOT NULL, PRIMARY KEY (user_id), CONSTRAINT cst_user_name_length CHECK (length(user_name) >= 8) ){stop} The above table contains a column "user_name" which is subject to a CHECK constraint that validates that the length of the string is at least eight characters. When a ``create()`` is issued for this table, DDL for the :class:`~sqlalchemy.schema.CheckConstraint` will also be issued inline within the table definition. The :class:`~sqlalchemy.schema.CheckConstraint` construct can also be constructed externally and associated with the :class:`~sqlalchemy.schema.Table` afterwards:: constraint = CheckConstraint('length(user_name) >= 8',name="cst_user_name_length") users.append_constraint(constraint) So far, the effect is the same. However, if we create DDL elements corresponding to the creation and removal of this constraint, and associate them with the :class:`.Table` as events, these new events will take over the job of issuing DDL for the constraint. Additionally, the constraint will be added via ALTER: .. sourcecode:: python+sql from sqlalchemy import event event.listen( users, "after_create", AddConstraint(constraint) ) event.listen( users, "before_drop", DropConstraint(constraint) ) {sql}users.create(engine) CREATE TABLE users ( user_id SERIAL NOT NULL, user_name VARCHAR(40) NOT NULL, PRIMARY KEY (user_id) ) ALTER TABLE users ADD CONSTRAINT cst_user_name_length CHECK (length(user_name) >= 8){stop} {sql}users.drop(engine) ALTER TABLE users DROP CONSTRAINT cst_user_name_length DROP TABLE users{stop} The real usefulness of the above becomes clearer once we illustrate the :meth:`.DDLElement.execute_if` method. This method returns a modified form of the DDL callable which will filter on criteria before responding to a received event. It accepts a parameter ``dialect``, which is the string name of a dialect or a tuple of such, which will limit the execution of the item to just those dialects. It also accepts a ``callable_`` parameter which may reference a Python callable which will be invoked upon event reception, returning ``True`` or ``False`` indicating if the event should proceed. If our :class:`~sqlalchemy.schema.CheckConstraint` was only supported by Postgresql and not other databases, we could limit its usage to just that dialect:: event.listen( users, 'after_create', AddConstraint(constraint).execute_if(dialect='postgresql') ) event.listen( users, 'before_drop', DropConstraint(constraint).execute_if(dialect='postgresql') ) Or to any set of dialects:: event.listen( users, "after_create", AddConstraint(constraint).execute_if(dialect=('postgresql', 'mysql')) ) event.listen( users, "before_drop", DropConstraint(constraint).execute_if(dialect=('postgresql', 'mysql')) ) When using a callable, the callable is passed the ddl element, the :class:`.Table` or :class:`.MetaData` object whose "create" or "drop" event is in progress, and the :class:`.Connection` object being used for the operation, as well as additional information as keyword arguments. The callable can perform checks, such as whether or not a given item already exists. Below we define ``should_create()`` and ``should_drop()`` callables that check for the presence of our named constraint: .. sourcecode:: python+sql def should_create(ddl, target, connection, **kw): row = connection.execute("select conname from pg_constraint where conname='%s'" % ddl.element.name).scalar() return not bool(row) def should_drop(ddl, target, connection, **kw): return not should_create(ddl, target, connection, **kw) event.listen( users, "after_create", AddConstraint(constraint).execute_if(callable_=should_create) ) event.listen( users, "before_drop", DropConstraint(constraint).execute_if(callable_=should_drop) ) {sql}users.create(engine) CREATE TABLE users ( user_id SERIAL NOT NULL, user_name VARCHAR(40) NOT NULL, PRIMARY KEY (user_id) ) select conname from pg_constraint where conname='cst_user_name_length' ALTER TABLE users ADD CONSTRAINT cst_user_name_length CHECK (length(user_name) >= 8){stop} {sql}users.drop(engine) select conname from pg_constraint where conname='cst_user_name_length' ALTER TABLE users DROP CONSTRAINT cst_user_name_length DROP TABLE users{stop} Custom DDL ---------- Custom DDL phrases are most easily achieved using the :class:`~sqlalchemy.schema.DDL` construct. This construct works like all the other DDL elements except it accepts a string which is the text to be emitted: .. sourcecode:: python+sql event.listen( metadata, "after_create", DDL("ALTER TABLE users ADD CONSTRAINT " "cst_user_name_length " " CHECK (length(user_name) >= 8)") ) A more comprehensive method of creating libraries of DDL constructs is to use custom compilation - see :ref:`sqlalchemy.ext.compiler_toplevel` for details. .. _schema_api_ddl: DDL Expression Constructs API ----------------------------- .. autoclass:: DDLElement :members: :undoc-members: .. autoclass:: DDL :members: :undoc-members: .. autoclass:: CreateTable :members: :undoc-members: .. autoclass:: DropTable :members: :undoc-members: .. autoclass:: CreateColumn :members: :undoc-members: .. autoclass:: CreateSequence :members: :undoc-members: .. autoclass:: DropSequence :members: :undoc-members: .. autoclass:: CreateIndex :members: :undoc-members: .. autoclass:: DropIndex :members: :undoc-members: .. autoclass:: AddConstraint :members: :undoc-members: .. autoclass:: DropConstraint :members: :undoc-members: .. autoclass:: CreateSchema :members: :undoc-members: .. autoclass:: DropSchema :members: :undoc-members: SQLAlchemy-0.8.4/doc/build/core/defaults.rst0000644000076500000240000003451412251147171021376 0ustar classicstaff00000000000000.. _metadata_defaults_toplevel: .. _metadata_defaults: .. module:: sqlalchemy.schema Column Insert/Update Defaults ============================== SQLAlchemy provides a very rich featureset regarding column level events which take place during INSERT and UPDATE statements. Options include: * Scalar values used as defaults during INSERT and UPDATE operations * Python functions which execute upon INSERT and UPDATE operations * SQL expressions which are embedded in INSERT statements (or in some cases execute beforehand) * SQL expressions which are embedded in UPDATE statements * Server side default values used during INSERT * Markers for server-side triggers used during UPDATE The general rule for all insert/update defaults is that they only take effect if no value for a particular column is passed as an ``execute()`` parameter; otherwise, the given value is used. Scalar Defaults --------------- The simplest kind of default is a scalar value used as the default value of a column:: Table("mytable", meta, Column("somecolumn", Integer, default=12) ) Above, the value "12" will be bound as the column value during an INSERT if no other value is supplied. A scalar value may also be associated with an UPDATE statement, though this is not very common (as UPDATE statements are usually looking for dynamic defaults):: Table("mytable", meta, Column("somecolumn", Integer, onupdate=25) ) Python-Executed Functions ------------------------- The ``default`` and ``onupdate`` keyword arguments also accept Python functions. These functions are invoked at the time of insert or update if no other value for that column is supplied, and the value returned is used for the column's value. Below illustrates a crude "sequence" that assigns an incrementing counter to a primary key column:: # a function which counts upwards i = 0 def mydefault(): global i i += 1 return i t = Table("mytable", meta, Column('id', Integer, primary_key=True, default=mydefault), ) It should be noted that for real "incrementing sequence" behavior, the built-in capabilities of the database should normally be used, which may include sequence objects or other autoincrementing capabilities. For primary key columns, SQLAlchemy will in most cases use these capabilities automatically. See the API documentation for :class:`~sqlalchemy.schema.Column` including the ``autoincrement`` flag, as well as the section on :class:`~sqlalchemy.schema.Sequence` later in this chapter for background on standard primary key generation techniques. To illustrate onupdate, we assign the Python ``datetime`` function ``now`` to the ``onupdate`` attribute:: import datetime t = Table("mytable", meta, Column('id', Integer, primary_key=True), # define 'last_updated' to be populated with datetime.now() Column('last_updated', DateTime, onupdate=datetime.datetime.now), ) When an update statement executes and no value is passed for ``last_updated``, the ``datetime.datetime.now()`` Python function is executed and its return value used as the value for ``last_updated``. Notice that we provide ``now`` as the function itself without calling it (i.e. there are no parenthesis following) - SQLAlchemy will execute the function at the time the statement executes. Context-Sensitive Default Functions ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ The Python functions used by ``default`` and ``onupdate`` may also make use of the current statement's context in order to determine a value. The `context` of a statement is an internal SQLAlchemy object which contains all information about the statement being executed, including its source expression, the parameters associated with it and the cursor. The typical use case for this context with regards to default generation is to have access to the other values being inserted or updated on the row. To access the context, provide a function that accepts a single ``context`` argument:: def mydefault(context): return context.current_parameters['counter'] + 12 t = Table('mytable', meta, Column('counter', Integer), Column('counter_plus_twelve', Integer, default=mydefault, onupdate=mydefault) ) Above we illustrate a default function which will execute for all INSERT and UPDATE statements where a value for ``counter_plus_twelve`` was otherwise not provided, and the value will be that of whatever value is present in the execution for the ``counter`` column, plus the number 12. While the context object passed to the default function has many attributes, the ``current_parameters`` member is a special member provided only during the execution of a default function for the purposes of deriving defaults from its existing values. For a single statement that is executing many sets of bind parameters, the user-defined function is called for each set of parameters, and ``current_parameters`` will be provided with each individual parameter set for each execution. SQL Expressions --------------- The "default" and "onupdate" keywords may also be passed SQL expressions, including select statements or direct function calls:: t = Table("mytable", meta, Column('id', Integer, primary_key=True), # define 'create_date' to default to now() Column('create_date', DateTime, default=func.now()), # define 'key' to pull its default from the 'keyvalues' table Column('key', String(20), default=keyvalues.select(keyvalues.c.type='type1', limit=1)), # define 'last_modified' to use the current_timestamp SQL function on update Column('last_modified', DateTime, onupdate=func.utc_timestamp()) ) Above, the ``create_date`` column will be populated with the result of the ``now()`` SQL function (which, depending on backend, compiles into ``NOW()`` or ``CURRENT_TIMESTAMP`` in most cases) during an INSERT statement, and the ``key`` column with the result of a SELECT subquery from another table. The ``last_modified`` column will be populated with the value of ``UTC_TIMESTAMP()``, a function specific to MySQL, when an UPDATE statement is emitted for this table. Note that when using ``func`` functions, unlike when using Python `datetime` functions we *do* call the function, i.e. with parenthesis "()" - this is because what we want in this case is the return value of the function, which is the SQL expression construct that will be rendered into the INSERT or UPDATE statement. The above SQL functions are usually executed "inline" with the INSERT or UPDATE statement being executed, meaning, a single statement is executed which embeds the given expressions or subqueries within the VALUES or SET clause of the statement. Although in some cases, the function is "pre-executed" in a SELECT statement of its own beforehand. This happens when all of the following is true: * the column is a primary key column * the database dialect does not support a usable ``cursor.lastrowid`` accessor (or equivalent); this currently includes PostgreSQL, Oracle, and Firebird, as well as some MySQL dialects. * the dialect does not support the "RETURNING" clause or similar, or the ``implicit_returning`` flag is set to ``False`` for the dialect. Dialects which support RETURNING currently include Postgresql, Oracle, Firebird, and MS-SQL. * the statement is a single execution, i.e. only supplies one set of parameters and doesn't use "executemany" behavior * the ``inline=True`` flag is not set on the :class:`~sqlalchemy.sql.expression.Insert()` or :class:`~sqlalchemy.sql.expression.Update()` construct, and the statement has not defined an explicit `returning()` clause. Whether or not the default generation clause "pre-executes" is not something that normally needs to be considered, unless it is being addressed for performance reasons. When the statement is executed with a single set of parameters (that is, it is not an "executemany" style execution), the returned :class:`~sqlalchemy.engine.ResultProxy` will contain a collection accessible via ``result.postfetch_cols()`` which contains a list of all :class:`~sqlalchemy.schema.Column` objects which had an inline-executed default. Similarly, all parameters which were bound to the statement, including all Python and SQL expressions which were pre-executed, are present in the ``last_inserted_params()`` or ``last_updated_params()`` collections on :class:`~sqlalchemy.engine.ResultProxy`. The ``inserted_primary_key`` collection contains a list of primary key values for the row inserted (a list so that single-column and composite-column primary keys are represented in the same format). Server Side Defaults -------------------- A variant on the SQL expression default is the ``server_default``, which gets placed in the CREATE TABLE statement during a ``create()`` operation: .. sourcecode:: python+sql t = Table('test', meta, Column('abc', String(20), server_default='abc'), Column('created_at', DateTime, server_default=text("sysdate")) ) A create call for the above table will produce:: CREATE TABLE test ( abc varchar(20) default 'abc', created_at datetime default sysdate ) The behavior of ``server_default`` is similar to that of a regular SQL default; if it's placed on a primary key column for a database which doesn't have a way to "postfetch" the ID, and the statement is not "inlined", the SQL expression is pre-executed; otherwise, SQLAlchemy lets the default fire off on the database side normally. .. _triggered_columns: Triggered Columns ------------------ Columns with values set by a database trigger or other external process may be called out using :class:`.FetchedValue` as a marker:: t = Table('test', meta, Column('abc', String(20), server_default=FetchedValue()), Column('def', String(20), server_onupdate=FetchedValue()) ) .. versionchanged:: 0.8.0b2,0.7.10 The ``for_update`` argument on :class:`.FetchedValue` is set automatically when specified as the ``server_onupdate`` argument. If using an older version, specify the onupdate above as ``server_onupdate=FetchedValue(for_update=True)``. These markers do not emit a "default" clause when the table is created, however they do set the same internal flags as a static ``server_default`` clause, providing hints to higher-level tools that a "post-fetch" of these rows should be performed after an insert or update. .. note:: It's generally not appropriate to use :class:`.FetchedValue` in conjunction with a primary key column, particularly when using the ORM or any other scenario where the :attr:`.ResultProxy.inserted_primary_key` attribute is required. This is becaue the "post-fetch" operation requires that the primary key value already be available, so that the row can be selected on its primary key. For a server-generated primary key value, all databases provide special accessors or other techniques in order to acquire the "last inserted primary key" column of a table. These mechanisms aren't affected by the presence of :class:`.FetchedValue`. For special situations where triggers are used to generate primary key values, and the database in use does not support the ``RETURNING`` clause, it may be necessary to forego the usage of the trigger and instead apply the SQL expression or function as a "pre execute" expression:: t = Table('test', meta, Column('abc', MyType, default=func.generate_new_value(), primary_key=True) ) Where above, when :meth:`.Table.insert` is used, the ``func.generate_new_value()`` expression will be pre-executed in the context of a scalar ``SELECT`` statement, and the new value will be applied to the subsequent ``INSERT``, while at the same time being made available to the :attr:`.ResultProxy.inserted_primary_key` attribute. Defining Sequences ------------------- SQLAlchemy represents database sequences using the :class:`~sqlalchemy.schema.Sequence` object, which is considered to be a special case of "column default". It only has an effect on databases which have explicit support for sequences, which currently includes Postgresql, Oracle, and Firebird. The :class:`~sqlalchemy.schema.Sequence` object is otherwise ignored. The :class:`~sqlalchemy.schema.Sequence` may be placed on any column as a "default" generator to be used during INSERT operations, and can also be configured to fire off during UPDATE operations if desired. It is most commonly used in conjunction with a single integer primary key column:: table = Table("cartitems", meta, Column("cart_id", Integer, Sequence('cart_id_seq'), primary_key=True), Column("description", String(40)), Column("createdate", DateTime()) ) Where above, the table "cartitems" is associated with a sequence named "cart_id_seq". When INSERT statements take place for "cartitems", and no value is passed for the "cart_id" column, the "cart_id_seq" sequence will be used to generate a value. When the :class:`~sqlalchemy.schema.Sequence` is associated with a table, CREATE and DROP statements issued for that table will also issue CREATE/DROP for the sequence object as well, thus "bundling" the sequence object with its parent table. The :class:`~sqlalchemy.schema.Sequence` object also implements special functionality to accommodate Postgresql's SERIAL datatype. The SERIAL type in PG automatically generates a sequence that is used implicitly during inserts. This means that if a :class:`~sqlalchemy.schema.Table` object defines a :class:`~sqlalchemy.schema.Sequence` on its primary key column so that it works with Oracle and Firebird, the :class:`~sqlalchemy.schema.Sequence` would get in the way of the "implicit" sequence that PG would normally use. For this use case, add the flag ``optional=True`` to the :class:`~sqlalchemy.schema.Sequence` object - this indicates that the :class:`~sqlalchemy.schema.Sequence` should only be used if the database provides no other option for generating primary key identifiers. The :class:`~sqlalchemy.schema.Sequence` object also has the ability to be executed standalone like a SQL expression, which has the effect of calling its "next value" function:: seq = Sequence('some_sequence') nextid = connection.execute(seq) Default Objects API ------------------- .. autoclass:: ColumnDefault .. autoclass:: DefaultClause .. autoclass:: DefaultGenerator .. autoclass:: FetchedValue .. autoclass:: PassiveDefault .. autoclass:: Sequence :members: SQLAlchemy-0.8.4/doc/build/core/dml.rst0000644000076500000240000000116212251150015020323 0ustar classicstaff00000000000000Insert, Updates, Deletes ======================== INSERT, UPDATE and DELETE statements build on a hierarchy starting with :class:`.UpdateBase`. The :class:`.Insert` and :class:`.Update` constructs build on the intermediary :class:`.ValuesBase`. .. module:: sqlalchemy.sql.expression .. autofunction:: delete .. autofunction:: insert .. autofunction:: update .. autoclass:: Delete :members: :inherited-members: .. autoclass:: Insert :members: :inherited-members: .. autoclass:: Update :members: :inherited-members: .. autoclass:: UpdateBase :members: .. autoclass:: ValuesBase :members: SQLAlchemy-0.8.4/doc/build/core/engines.rst0000644000076500000240000002705012251147171021214 0ustar classicstaff00000000000000.. _engines_toplevel: ==================== Engine Configuration ==================== The :class:`.Engine` is the starting point for any SQLAlchemy application. It's "home base" for the actual database and its :term:`DBAPI`, delivered to the SQLAlchemy application through a connection pool and a :class:`.Dialect`, which describes how to talk to a specific kind of database/DBAPI combination. The general structure can be illustrated as follows: .. image:: sqla_engine_arch.png Where above, an :class:`.Engine` references both a :class:`.Dialect` and a :class:`.Pool`, which together interpret the DBAPI's module functions as well as the behavior of the database. Creating an engine is just a matter of issuing a single call, :func:`.create_engine()`:: from sqlalchemy import create_engine engine = create_engine('postgresql://scott:tiger@localhost:5432/mydatabase') The above engine creates a :class:`.Dialect` object tailored towards PostgreSQL, as well as a :class:`.Pool` object which will establish a DBAPI connection at ``localhost:5432`` when a connection request is first received. Note that the :class:`.Engine` and its underlying :class:`.Pool` do **not** establish the first actual DBAPI connection until the :meth:`.Engine.connect` method is called, or an operation which is dependent on this method such as :meth:`.Engine.execute` is invoked. In this way, :class:`.Engine` and :class:`.Pool` can be said to have a *lazy initialization* behavior. The :class:`.Engine`, once created, can either be used directly to interact with the database, or can be passed to a :class:`.Session` object to work with the ORM. This section covers the details of configuring an :class:`.Engine`. The next section, :ref:`connections_toplevel`, will detail the usage API of the :class:`.Engine` and similar, typically for non-ORM applications. .. _supported_dbapis: Supported Databases ==================== SQLAlchemy includes many :class:`.Dialect` implementations for various backends. Dialects for the most common databases are included with SQLAlchemy; a handful of others require an additional install of a separate dialect. See the section :ref:`dialect_toplevel` for information on the various backends available. .. _create_engine_args: Engine Creation API =================== Keyword options can also be specified to :func:`~sqlalchemy.create_engine`, following the string URL as follows: .. sourcecode:: python+sql db = create_engine('postgresql://...', encoding='latin1', echo=True) .. autofunction:: sqlalchemy.create_engine .. autofunction:: sqlalchemy.engine_from_config Database Urls ============= SQLAlchemy indicates the source of an Engine strictly via `RFC-1738 `_ style URLs, combined with optional keyword arguments to specify options for the Engine. The form of the URL is:: dialect+driver://username:password@host:port/database Dialect names include the identifying name of the SQLAlchemy dialect which include ``sqlite``, ``mysql``, ``postgresql``, ``oracle``, ``mssql``, and ``firebird``. The drivername is the name of the DBAPI to be used to connect to the database using all lowercase letters. If not specified, a "default" DBAPI will be imported if available - this default is typically the most widely known driver available for that backend (i.e. cx_oracle, pysqlite/sqlite3, psycopg2, mysqldb). For Jython connections, specify the `zxjdbc` driver, which is the JDBC-DBAPI bridge included with Jython. .. autofunction:: sqlalchemy.engine.url.make_url Postgresql ---------- The Postgresql dialect uses psycopg2 as the default DBAPI:: # default engine = create_engine('postgresql://scott:tiger@localhost/mydatabase') # psycopg2 engine = create_engine('postgresql+psycopg2://scott:tiger@localhost/mydatabase') # pg8000 engine = create_engine('postgresql+pg8000://scott:tiger@localhost/mydatabase') # Jython engine = create_engine('postgresql+zxjdbc://scott:tiger@localhost/mydatabase') More notes on connecting to Postgresql at :ref:`postgresql_toplevel`. MySQL ----- The MySQL dialect uses mysql-python as the default DBAPI:: # default engine = create_engine('mysql://scott:tiger@localhost/foo') # mysql-python engine = create_engine('mysql+mysqldb://scott:tiger@localhost/foo') # OurSQL engine = create_engine('mysql+oursql://scott:tiger@localhost/foo') More notes on connecting to MySQL at :ref:`mysql_toplevel`. Oracle ------ cx_oracle is usually used here:: engine = create_engine('oracle://scott:tiger@127.0.0.1:1521/sidname') engine = create_engine('oracle+cx_oracle://scott:tiger@tnsname') More notes on connecting to Oracle at :ref:`oracle_toplevel`. Microsoft SQL Server -------------------- There are a few drivers for SQL Server, currently PyODBC is the most solid:: engine = create_engine('mssql+pyodbc://mydsn') More notes on connecting to SQL Server at :ref:`mssql_toplevel`. SQLite ------ SQLite connects to file based databases. The same URL format is used, omitting the hostname, and using the "file" portion as the filename of the database. This has the effect of four slashes being present for an absolute file path:: # sqlite:/// # where is relative: engine = create_engine('sqlite:///foo.db') # or absolute, starting with a slash: engine = create_engine('sqlite:////absolute/path/to/foo.db') To use a SQLite ``:memory:`` database, specify an empty URL:: engine = create_engine('sqlite://') More notes on connecting to SQLite at :ref:`sqlite_toplevel`. Others ------ See :ref:`dialect_toplevel`, the top-level page for all dialect documentation. URL API -------- .. autoclass:: sqlalchemy.engine.url.URL :members: Pooling ======= The :class:`.Engine` will ask the connection pool for a connection when the ``connect()`` or ``execute()`` methods are called. The default connection pool, :class:`~.QueuePool`, will open connections to the database on an as-needed basis. As concurrent statements are executed, :class:`.QueuePool` will grow its pool of connections to a default size of five, and will allow a default "overflow" of ten. Since the :class:`.Engine` is essentially "home base" for the connection pool, it follows that you should keep a single :class:`.Engine` per database established within an application, rather than creating a new one for each connection. .. note:: :class:`.QueuePool` is not used by default for SQLite engines. See :ref:`sqlite_toplevel` for details on SQLite connection pool usage. For more information on connection pooling, see :ref:`pooling_toplevel`. .. _custom_dbapi_args: Custom DBAPI connect() arguments ================================= Custom arguments used when issuing the ``connect()`` call to the underlying DBAPI may be issued in three distinct ways. String-based arguments can be passed directly from the URL string as query arguments: .. sourcecode:: python+sql db = create_engine('postgresql://scott:tiger@localhost/test?argument1=foo&argument2=bar') If SQLAlchemy's database connector is aware of a particular query argument, it may convert its type from string to its proper type. :func:`~sqlalchemy.create_engine` also takes an argument ``connect_args`` which is an additional dictionary that will be passed to ``connect()``. This can be used when arguments of a type other than string are required, and SQLAlchemy's database connector has no type conversion logic present for that parameter: .. sourcecode:: python+sql db = create_engine('postgresql://scott:tiger@localhost/test', connect_args = {'argument1':17, 'argument2':'bar'}) The most customizable connection method of all is to pass a ``creator`` argument, which specifies a callable that returns a DBAPI connection: .. sourcecode:: python+sql def connect(): return psycopg.connect(user='scott', host='localhost') db = create_engine('postgresql://', creator=connect) .. _dbengine_logging: Configuring Logging ==================== Python's standard `logging `_ module is used to implement informational and debug log output with SQLAlchemy. This allows SQLAlchemy's logging to integrate in a standard way with other applications and libraries. The ``echo`` and ``echo_pool`` flags that are present on :func:`~sqlalchemy.create_engine`, as well as the ``echo_uow`` flag used on :class:`~sqlalchemy.orm.session.Session`, all interact with regular loggers. This section assumes familiarity with the above linked logging module. All logging performed by SQLAlchemy exists underneath the ``sqlalchemy`` namespace, as used by ``logging.getLogger('sqlalchemy')``. When logging has been configured (i.e. such as via ``logging.basicConfig()``), the general namespace of SA loggers that can be turned on is as follows: * ``sqlalchemy.engine`` - controls SQL echoing. set to ``logging.INFO`` for SQL query output, ``logging.DEBUG`` for query + result set output. * ``sqlalchemy.dialects`` - controls custom logging for SQL dialects. See the documentation of individual dialects for details. * ``sqlalchemy.pool`` - controls connection pool logging. set to ``logging.INFO`` or lower to log connection pool checkouts/checkins. * ``sqlalchemy.orm`` - controls logging of various ORM functions. set to ``logging.INFO`` for information on mapper configurations. For example, to log SQL queries using Python logging instead of the ``echo=True`` flag:: import logging logging.basicConfig() logging.getLogger('sqlalchemy.engine').setLevel(logging.INFO) By default, the log level is set to ``logging.WARN`` within the entire ``sqlalchemy`` namespace so that no log operations occur, even within an application that has logging enabled otherwise. The ``echo`` flags present as keyword arguments to :func:`~sqlalchemy.create_engine` and others as well as the ``echo`` property on :class:`~sqlalchemy.engine.Engine`, when set to ``True``, will first attempt to ensure that logging is enabled. Unfortunately, the ``logging`` module provides no way of determining if output has already been configured (note we are referring to if a logging configuration has been set up, not just that the logging level is set). For this reason, any ``echo=True`` flags will result in a call to ``logging.basicConfig()`` using sys.stdout as the destination. It also sets up a default format using the level name, timestamp, and logger name. Note that this configuration has the affect of being configured **in addition** to any existing logger configurations. Therefore, **when using Python logging, ensure all echo flags are set to False at all times**, to avoid getting duplicate log lines. The logger name of instance such as an :class:`~sqlalchemy.engine.Engine` or :class:`~sqlalchemy.pool.Pool` defaults to using a truncated hex identifier string. To set this to a specific name, use the "logging_name" and "pool_logging_name" keyword arguments with :func:`sqlalchemy.create_engine`. .. note:: The SQLAlchemy :class:`.Engine` conserves Python function call overhead by only emitting log statements when the current logging level is detected as ``logging.INFO`` or ``logging.DEBUG``. It only checks this level when a new connection is procured from the connection pool. Therefore when changing the logging configuration for an already-running application, any :class:`.Connection` that's currently active, or more commonly a :class:`~.orm.session.Session` object that's active in a transaction, won't log any SQL according to the new configuration until a new :class:`.Connection` is procured (in the case of :class:`~.orm.session.Session`, this is after the current transaction ends and a new one begins). SQLAlchemy-0.8.4/doc/build/core/event.rst0000644000076500000240000001002012251150015020661 0ustar classicstaff00000000000000.. _event_toplevel: Events ====== SQLAlchemy includes an event API which publishes a wide variety of hooks into the internals of both SQLAlchemy Core and ORM. .. versionadded:: 0.7 The system supercedes the previous system of "extension", "proxy", and "listener" classes. Event Registration ------------------ Subscribing to an event occurs through a single API point, the :func:`.listen` function. This function accepts a user-defined listening function, a string identifier which identifies the event to be intercepted, and a target. Additional positional and keyword arguments may be supported by specific types of events, which may specify alternate interfaces for the given event function, or provide instructions regarding secondary event targets based on the given target. The name of an event and the argument signature of a corresponding listener function is derived from a class bound specification method, which exists bound to a marker class that's described in the documentation. For example, the documentation for :meth:`.PoolEvents.connect` indicates that the event name is ``"connect"`` and that a user-defined listener function should receive two positional arguments:: from sqlalchemy.event import listen from sqlalchemy.pool import Pool def my_on_connect(dbapi_con, connection_record): print "New DBAPI connection:", dbapi_con listen(Pool, 'connect', my_on_connect) Targets ------- The :func:`.listen` function is very flexible regarding targets. It generally accepts classes, instances of those classes, and related classes or objects from which the appropriate target can be derived. For example, the above mentioned ``"connect"`` event accepts :class:`.Engine` classes and objects as well as :class:`.Pool` classes and objects:: from sqlalchemy.event import listen from sqlalchemy.pool import Pool, QueuePool from sqlalchemy import create_engine from sqlalchemy.engine import Engine import psycopg2 def connect(): return psycopg2.connect(username='ed', host='127.0.0.1', dbname='test') my_pool = QueuePool(connect) my_engine = create_engine('postgresql://ed@localhost/test') # associate listener with all instances of Pool listen(Pool, 'connect', my_on_connect) # associate listener with all instances of Pool # via the Engine class listen(Engine, 'connect', my_on_connect) # associate listener with my_pool listen(my_pool, 'connect', my_on_connect) # associate listener with my_engine.pool listen(my_engine, 'connect', my_on_connect) Modifiers ---------- Some listeners allow modifiers to be passed to :func:`.listen`. These modifiers sometimes provide alternate calling signatures for listeners. Such as with ORM events, some event listeners can have a return value which modifies the subsequent handling. By default, no listener ever requires a return value, but by passing ``retval=True`` this value can be supported:: def validate_phone(target, value, oldvalue, initiator): """Strip non-numeric characters from a phone number""" return re.sub(r'(?![0-9])', '', value) # setup listener on UserContact.phone attribute, instructing # it to use the return value listen(UserContact.phone, 'set', validate_phone, retval=True) Event Reference ---------------- Both SQLAlchemy Core and SQLAlchemy ORM feature a wide variety of event hooks: * **Core Events** - these are described in :ref:`core_event_toplevel` and include event hooks specific to connection pool lifecycle, SQL statement execution, transaction lifecycle, and schema creation and teardown. * **ORM Events** - these are described in :ref:`orm_event_toplevel`, and include event hooks specific to class and attribute instrumentation, object initialization hooks, attribute on-change hooks, session state, flush, and commit hooks, mapper initialization, object/result population, and per-instance persistence hooks. API Reference ------------- .. autofunction:: sqlalchemy.event.listen .. autofunction:: sqlalchemy.event.listens_for SQLAlchemy-0.8.4/doc/build/core/events.rst0000644000076500000240000000152212251147171021064 0ustar classicstaff00000000000000.. _core_event_toplevel: Core Events ============ This section describes the event interfaces provided in SQLAlchemy Core. For an introduction to the event listening API, see :ref:`event_toplevel`. ORM events are described in :ref:`orm_event_toplevel`. .. autoclass:: sqlalchemy.event.base.Events :members: .. versionadded:: 0.7 The event system supercedes the previous system of "extension", "listener", and "proxy" classes. Connection Pool Events ----------------------- .. autoclass:: sqlalchemy.events.PoolEvents :members: SQL Execution and Connection Events ------------------------------------ .. autoclass:: sqlalchemy.events.ConnectionEvents :members: Schema Events ----------------------- .. autoclass:: sqlalchemy.events.DDLEvents :members: .. autoclass:: sqlalchemy.events.SchemaEventTarget :members: SQLAlchemy-0.8.4/doc/build/core/exceptions.rst0000644000076500000240000000011512251147171021736 0ustar classicstaff00000000000000Core Exceptions =============== .. automodule:: sqlalchemy.exc :members:SQLAlchemy-0.8.4/doc/build/core/expression_api.rst0000644000076500000240000000057712251147171022621 0ustar classicstaff00000000000000.. _expression_api_toplevel: SQL Statements and Expressions API ================================== .. module:: sqlalchemy.sql.expression This section presents the API reference for the SQL Expression Language. For a full introduction to its usage, see :ref:`sqlexpression_toplevel`. .. toctree:: :maxdepth: 1 sqlelement selectable dml functions types SQLAlchemy-0.8.4/doc/build/core/functions.rst0000644000076500000240000000150312251147171021567 0ustar classicstaff00000000000000.. _functions_toplevel: .. _generic_functions: ========================= SQL and Generic Functions ========================= .. module:: sqlalchemy.sql.expression SQL functions which are known to SQLAlchemy with regards to database-specific rendering, return types and argument behavior. Generic functions are invoked like all SQL functions, using the :attr:`func` attribute:: select([func.count()]).select_from(sometable) Note that any name not known to :attr:`func` generates the function name as is - there is no restriction on what SQL functions can be called, known or unknown to SQLAlchemy, built-in or user defined. The section here only describes those functions where SQLAlchemy already knows what argument and return types are in use. .. automodule:: sqlalchemy.sql.functions :members: :undoc-members: SQLAlchemy-0.8.4/doc/build/core/index.rst0000644000076500000240000000105012251147171020663 0ustar classicstaff00000000000000.. _core_toplevel: SQLAlchemy Core =============== The breadth of SQLAlchemy’s SQL rendering engine, DBAPI integration, transaction integration, and schema description services are documented here. In contrast to the ORM’s domain-centric mode of usage, the SQL Expression Language provides a schema-centric usage paradigm. .. toctree:: :maxdepth: 3 tutorial expression_api schema engines connections pooling event events compiler inspection serializer interfaces exceptions internals SQLAlchemy-0.8.4/doc/build/core/inspection.rst0000644000076500000240000000315312251147171021735 0ustar classicstaff00000000000000.. _core_inspection_toplevel: .. _inspection_toplevel: Runtime Inspection API ====================== .. automodule:: sqlalchemy.inspection :members: Available Inspection Targets ---------------------------- Below is a listing of many of the most common inspection targets. * :class:`.Connectable` (i.e. :class:`.Engine`, :class:`.Connection`) - returns an :class:`.Inspector` object. * :class:`.ClauseElement` - all SQL expression components, including :class:`.Table`, :class:`.Column`, serve as their own inspection objects, meaning any of these objects passed to :func:`.inspect` return themselves. * ``object`` - an object given will be checked by the ORM for a mapping - if so, an :class:`.InstanceState` is returned representing the mapped state of the object. The :class:`.InstanceState` also provides access to per attribute state via the :class:`.AttributeState` interface as well as the per-flush "history" of any attribute via the :class:`.History` object. * ``type`` (i.e. a class) - a class given will be checked by the ORM for a mapping - if so, a :class:`.Mapper` for that class is returned. * mapped attribute - passing a mapped attribute to :func:`.inspect`, such as ``inspect(MyClass.some_attribute)``, returns a :class:`.QueryableAttribute` object, which is the :term:`descriptor` associated with a mapped class. This descriptor refers to a :class:`.MapperProperty`, which is usually an instance of :class:`.ColumnProperty` or :class:`.RelationshipProperty`, via its :attr:`.QueryableAttribute.property` attribute. * :class:`.AliasedClass` - returns an :class:`.AliasedInsp` object. SQLAlchemy-0.8.4/doc/build/core/interfaces.rst0000644000076500000240000000137012251147171021704 0ustar classicstaff00000000000000.. _dep_interfaces_core_toplevel: Deprecated Event Interfaces ============================ .. module:: sqlalchemy.interfaces This section describes the class-based core event interface introduced in SQLAlchemy 0.5. The ORM analogue is described at :ref:`dep_interfaces_orm_toplevel`. .. deprecated:: 0.7 The new event system described in :ref:`event_toplevel` replaces the extension/proxy/listener system, providing a consistent interface to all events without the need for subclassing. Execution, Connection and Cursor Events --------------------------------------- .. autoclass:: ConnectionProxy :members: :undoc-members: Connection Pool Events ---------------------- .. autoclass:: PoolListener :members: :undoc-members: SQLAlchemy-0.8.4/doc/build/core/internals.rst0000644000076500000240000000136412251147171021563 0ustar classicstaff00000000000000.. _core_internal_toplevel: Core Internals ============== Some key internal constructs are listed here. .. currentmodule: sqlalchemy .. autoclass:: sqlalchemy.engine.interfaces.Compiled :members: .. autoclass:: sqlalchemy.sql.compiler.DDLCompiler :members: :inherited-members: .. autoclass:: sqlalchemy.engine.default.DefaultDialect :members: :inherited-members: .. autoclass:: sqlalchemy.engine.interfaces.Dialect :members: .. autoclass:: sqlalchemy.engine.default.DefaultExecutionContext :members: .. autoclass:: sqlalchemy.engine.interfaces.ExecutionContext :members: .. autoclass:: sqlalchemy.sql.compiler.IdentifierPreparer :members: .. autoclass:: sqlalchemy.sql.compiler.SQLCompiler :members: SQLAlchemy-0.8.4/doc/build/core/metadata.rst0000644000076500000240000002752012251150015021335 0ustar classicstaff00000000000000.. _metadata_toplevel: .. _metadata_describing_toplevel: .. _metadata_describing: ================================== Describing Databases with MetaData ================================== .. module:: sqlalchemy.schema This section discusses the fundamental :class:`.Table`, :class:`.Column` and :class:`.MetaData` objects. A collection of metadata entities is stored in an object aptly named :class:`~sqlalchemy.schema.MetaData`:: from sqlalchemy import * metadata = MetaData() :class:`~sqlalchemy.schema.MetaData` is a container object that keeps together many different features of a database (or multiple databases) being described. To represent a table, use the :class:`~sqlalchemy.schema.Table` class. Its two primary arguments are the table name, then the :class:`~sqlalchemy.schema.MetaData` object which it will be associated with. The remaining positional arguments are mostly :class:`~sqlalchemy.schema.Column` objects describing each column:: user = Table('user', metadata, Column('user_id', Integer, primary_key = True), Column('user_name', String(16), nullable = False), Column('email_address', String(60)), Column('password', String(20), nullable = False) ) Above, a table called ``user`` is described, which contains four columns. The primary key of the table consists of the ``user_id`` column. Multiple columns may be assigned the ``primary_key=True`` flag which denotes a multi-column primary key, known as a *composite* primary key. Note also that each column describes its datatype using objects corresponding to genericized types, such as :class:`~sqlalchemy.types.Integer` and :class:`~sqlalchemy.types.String`. SQLAlchemy features dozens of types of varying levels of specificity as well as the ability to create custom types. Documentation on the type system can be found at :ref:`types`. Accessing Tables and Columns ---------------------------- The :class:`~sqlalchemy.schema.MetaData` object contains all of the schema constructs we've associated with it. It supports a few methods of accessing these table objects, such as the ``sorted_tables`` accessor which returns a list of each :class:`~sqlalchemy.schema.Table` object in order of foreign key dependency (that is, each table is preceded by all tables which it references):: >>> for t in metadata.sorted_tables: ... print t.name user user_preference invoice invoice_item In most cases, individual :class:`~sqlalchemy.schema.Table` objects have been explicitly declared, and these objects are typically accessed directly as module-level variables in an application. Once a :class:`~sqlalchemy.schema.Table` has been defined, it has a full set of accessors which allow inspection of its properties. Given the following :class:`~sqlalchemy.schema.Table` definition:: employees = Table('employees', metadata, Column('employee_id', Integer, primary_key=True), Column('employee_name', String(60), nullable=False), Column('employee_dept', Integer, ForeignKey("departments.department_id")) ) Note the :class:`~sqlalchemy.schema.ForeignKey` object used in this table - this construct defines a reference to a remote table, and is fully described in :ref:`metadata_foreignkeys`. Methods of accessing information about this table include:: # access the column "EMPLOYEE_ID": employees.columns.employee_id # or just employees.c.employee_id # via string employees.c['employee_id'] # iterate through all columns for c in employees.c: print c # get the table's primary key columns for primary_key in employees.primary_key: print primary_key # get the table's foreign key objects: for fkey in employees.foreign_keys: print fkey # access the table's MetaData: employees.metadata # access the table's bound Engine or Connection, if its MetaData is bound: employees.bind # access a column's name, type, nullable, primary key, foreign key employees.c.employee_id.name employees.c.employee_id.type employees.c.employee_id.nullable employees.c.employee_id.primary_key employees.c.employee_dept.foreign_keys # get the "key" of a column, which defaults to its name, but can # be any user-defined string: employees.c.employee_name.key # access a column's table: employees.c.employee_id.table is employees # get the table related by a foreign key list(employees.c.employee_dept.foreign_keys)[0].column.table Creating and Dropping Database Tables ------------------------------------- Once you've defined some :class:`~sqlalchemy.schema.Table` objects, assuming you're working with a brand new database one thing you might want to do is issue CREATE statements for those tables and their related constructs (as an aside, it's also quite possible that you *don't* want to do this, if you already have some preferred methodology such as tools included with your database or an existing scripting system - if that's the case, feel free to skip this section - SQLAlchemy has no requirement that it be used to create your tables). The usual way to issue CREATE is to use :func:`~sqlalchemy.schema.MetaData.create_all` on the :class:`~sqlalchemy.schema.MetaData` object. This method will issue queries that first check for the existence of each individual table, and if not found will issue the CREATE statements: .. sourcecode:: python+sql engine = create_engine('sqlite:///:memory:') metadata = MetaData() user = Table('user', metadata, Column('user_id', Integer, primary_key = True), Column('user_name', String(16), nullable = False), Column('email_address', String(60), key='email'), Column('password', String(20), nullable = False) ) user_prefs = Table('user_prefs', metadata, Column('pref_id', Integer, primary_key=True), Column('user_id', Integer, ForeignKey("user.user_id"), nullable=False), Column('pref_name', String(40), nullable=False), Column('pref_value', String(100)) ) {sql}metadata.create_all(engine) PRAGMA table_info(user){} CREATE TABLE user( user_id INTEGER NOT NULL PRIMARY KEY, user_name VARCHAR(16) NOT NULL, email_address VARCHAR(60), password VARCHAR(20) NOT NULL ) PRAGMA table_info(user_prefs){} CREATE TABLE user_prefs( pref_id INTEGER NOT NULL PRIMARY KEY, user_id INTEGER NOT NULL REFERENCES user(user_id), pref_name VARCHAR(40) NOT NULL, pref_value VARCHAR(100) ) :func:`~sqlalchemy.schema.MetaData.create_all` creates foreign key constraints between tables usually inline with the table definition itself, and for this reason it also generates the tables in order of their dependency. There are options to change this behavior such that ``ALTER TABLE`` is used instead. Dropping all tables is similarly achieved using the :func:`~sqlalchemy.schema.MetaData.drop_all` method. This method does the exact opposite of :func:`~sqlalchemy.schema.MetaData.create_all` - the presence of each table is checked first, and tables are dropped in reverse order of dependency. Creating and dropping individual tables can be done via the ``create()`` and ``drop()`` methods of :class:`~sqlalchemy.schema.Table`. These methods by default issue the CREATE or DROP regardless of the table being present: .. sourcecode:: python+sql engine = create_engine('sqlite:///:memory:') meta = MetaData() employees = Table('employees', meta, Column('employee_id', Integer, primary_key=True), Column('employee_name', String(60), nullable=False, key='name'), Column('employee_dept', Integer, ForeignKey("departments.department_id")) ) {sql}employees.create(engine) CREATE TABLE employees( employee_id SERIAL NOT NULL PRIMARY KEY, employee_name VARCHAR(60) NOT NULL, employee_dept INTEGER REFERENCES departments(department_id) ) {} ``drop()`` method: .. sourcecode:: python+sql {sql}employees.drop(engine) DROP TABLE employees {} To enable the "check first for the table existing" logic, add the ``checkfirst=True`` argument to ``create()`` or ``drop()``:: employees.create(engine, checkfirst=True) employees.drop(engine, checkfirst=False) .. _schema_migrations: Altering Schemas through Migrations ----------------------------------- While SQLAlchemy directly supports emitting CREATE and DROP statements for schema constructs, the ability to alter those constructs, usually via the ALTER statement as well as other database-specific constructs, is outside of the scope of SQLAlchemy itself. While it's easy enough to emit ALTER statements and similar by hand, such as by passing a string to :meth:`.Connection.execute` or by using the :class:`.DDL` construct, it's a common practice to automate the maintenance of database schemas in relation to application code using schema migration tools. There are two major migration tools available for SQLAlchemy: * `Alembic `_ - Written by the author of SQLAlchemy, Alembic features a highly customizable environment and a minimalistic usage pattern, supporting such features as transactional DDL, automatic generation of "candidate" migrations, an "offline" mode which generates SQL scripts, and support for branch resolution. * `SQLAlchemy-Migrate `_ - The original migration tool for SQLAlchemy, SQLAlchemy-Migrate is widely used and continues under active development. SQLAlchemy-Migrate includes features such as SQL script generation, ORM class generation, ORM model comparison, and extensive support for SQLite migrations. Specifying the Schema Name --------------------------- Some databases support the concept of multiple schemas. A :class:`~sqlalchemy.schema.Table` can reference this by specifying the ``schema`` keyword argument:: financial_info = Table('financial_info', meta, Column('id', Integer, primary_key=True), Column('value', String(100), nullable=False), schema='remote_banks' ) Within the :class:`~sqlalchemy.schema.MetaData` collection, this table will be identified by the combination of ``financial_info`` and ``remote_banks``. If another table called ``financial_info`` is referenced without the ``remote_banks`` schema, it will refer to a different :class:`~sqlalchemy.schema.Table`. :class:`~sqlalchemy.schema.ForeignKey` objects can specify references to columns in this table using the form ``remote_banks.financial_info.id``. The ``schema`` argument should be used for any name qualifiers required, including Oracle's "owner" attribute and similar. It also can accommodate a dotted name for longer schemes:: schema="dbo.scott" Backend-Specific Options ------------------------ :class:`~sqlalchemy.schema.Table` supports database-specific options. For example, MySQL has different table backend types, including "MyISAM" and "InnoDB". This can be expressed with :class:`~sqlalchemy.schema.Table` using ``mysql_engine``:: addresses = Table('engine_email_addresses', meta, Column('address_id', Integer, primary_key = True), Column('remote_user_id', Integer, ForeignKey(users.c.user_id)), Column('email_address', String(20)), mysql_engine='InnoDB' ) Other backends may support table-level options as well - these would be described in the individual documentation sections for each dialect. Column, Table, MetaData API --------------------------- .. autoclass:: Column :members: :inherited-members: :undoc-members: .. autoclass:: MetaData :members: :undoc-members: .. autoclass:: SchemaItem :members: .. autoclass:: Table :members: :inherited-members: :undoc-members: .. autoclass:: ThreadLocalMetaData :members: :undoc-members: SQLAlchemy-0.8.4/doc/build/core/pooling.rst0000644000076500000240000003350312251147171021233 0ustar classicstaff00000000000000.. _pooling_toplevel: Connection Pooling ================== .. module:: sqlalchemy.pool A connection pool is a standard technique used to maintain long running connections in memory for efficient re-use, as well as to provide management for the total number of connections an application might use simultaneously. Particularly for server-side web applications, a connection pool is the standard way to maintain a "pool" of active database connections in memory which are reused across requests. SQLAlchemy includes several connection pool implementations which integrate with the :class:`.Engine`. They can also be used directly for applications that want to add pooling to an otherwise plain DBAPI approach. Connection Pool Configuration ----------------------------- The :class:`~.engine.Engine` returned by the :func:`~sqlalchemy.create_engine` function in most cases has a :class:`.QueuePool` integrated, pre-configured with reasonable pooling defaults. If you're reading this section only to learn how to enable pooling - congratulations! You're already done. The most common :class:`.QueuePool` tuning parameters can be passed directly to :func:`~sqlalchemy.create_engine` as keyword arguments: ``pool_size``, ``max_overflow``, ``pool_recycle`` and ``pool_timeout``. For example:: engine = create_engine('postgresql://me@localhost/mydb', pool_size=20, max_overflow=0) In the case of SQLite, the :class:`.SingletonThreadPool` or :class:`.NullPool` are selected by the dialect to provide greater compatibility with SQLite's threading and locking model, as well as to provide a reasonable default behavior to SQLite "memory" databases, which maintain their entire dataset within the scope of a single connection. All SQLAlchemy pool implementations have in common that none of them "pre create" connections - all implementations wait until first use before creating a connection. At that point, if no additional concurrent checkout requests for more connections are made, no additional connections are created. This is why it's perfectly fine for :func:`.create_engine` to default to using a :class:`.QueuePool` of size five without regard to whether or not the application really needs five connections queued up - the pool would only grow to that size if the application actually used five connections concurrently, in which case the usage of a small pool is an entirely appropriate default behavior. Switching Pool Implementations ------------------------------ The usual way to use a different kind of pool with :func:`.create_engine` is to use the ``poolclass`` argument. This argument accepts a class imported from the ``sqlalchemy.pool`` module, and handles the details of building the pool for you. Common options include specifying :class:`.QueuePool` with SQLite:: from sqlalchemy.pool import QueuePool engine = create_engine('sqlite:///file.db', poolclass=QueuePool) Disabling pooling using :class:`.NullPool`:: from sqlalchemy.pool import NullPool engine = create_engine( 'postgresql+psycopg2://scott:tiger@localhost/test', poolclass=NullPool) Using a Custom Connection Function ---------------------------------- All :class:`.Pool` classes accept an argument ``creator`` which is a callable that creates a new connection. :func:`.create_engine` accepts this function to pass onto the pool via an argument of the same name:: import sqlalchemy.pool as pool import psycopg2 def getconn(): c = psycopg2.connect(username='ed', host='127.0.0.1', dbname='test') # do things with 'c' to set up return c engine = create_engine('postgresql+psycopg2://', creator=getconn) For most "initialize on connection" routines, it's more convenient to use the :class:`.PoolEvents` event hooks, so that the usual URL argument to :func:`.create_engine` is still usable. ``creator`` is there as a last resort for when a DBAPI has some form of ``connect`` that is not at all supported by SQLAlchemy. Constructing a Pool ------------------------ To use a :class:`.Pool` by itself, the ``creator`` function is the only argument that's required and is passed first, followed by any additional options:: import sqlalchemy.pool as pool import psycopg2 def getconn(): c = psycopg2.connect(username='ed', host='127.0.0.1', dbname='test') return c mypool = pool.QueuePool(getconn, max_overflow=10, pool_size=5) DBAPI connections can then be procured from the pool using the :meth:`.Pool.connect` function. The return value of this method is a DBAPI connection that's contained within a transparent proxy:: # get a connection conn = mypool.connect() # use it cursor = conn.cursor() cursor.execute("select foo") The purpose of the transparent proxy is to intercept the ``close()`` call, such that instead of the DBAPI connection being closed, it's returned to the pool:: # "close" the connection. Returns # it to the pool. conn.close() The proxy also returns its contained DBAPI connection to the pool when it is garbage collected, though it's not deterministic in Python that this occurs immediately (though it is typical with cPython). The ``close()`` step also performs the important step of calling the ``rollback()`` method of the DBAPI connection. This is so that any existing transaction on the connection is removed, not only ensuring that no existing state remains on next usage, but also so that table and row locks are released as well as that any isolated data snapshots are removed. This behavior can be disabled using the ``reset_on_return`` option of :class:`.Pool`. A particular pre-created :class:`.Pool` can be shared with one or more engines by passing it to the ``pool`` argument of :func:`.create_engine`:: e = create_engine('postgresql://', pool=mypool) Pool Events ----------- Connection pools support an event interface that allows hooks to execute upon first connect, upon each new connection, and upon checkout and checkin of connections. See :class:`.PoolEvents` for details. Dealing with Disconnects ------------------------ The connection pool has the ability to refresh individual connections as well as its entire set of connections, setting the previously pooled connections as "invalid". A common use case is allow the connection pool to gracefully recover when the database server has been restarted, and all previously established connections are no longer functional. There are two approaches to this. Disconnect Handling - Optimistic ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ The most common approach is to let SQLAlchemy handle disconnects as they occur, at which point the pool is refreshed. This assumes the :class:`.Pool` is used in conjunction with a :class:`.Engine`. The :class:`.Engine` has logic which can detect disconnection events and refresh the pool automatically. When the :class:`.Connection` attempts to use a DBAPI connection, and an exception is raised that corresponds to a "disconnect" event, the connection is invalidated. The :class:`.Connection` then calls the :meth:`.Pool.recreate` method, effectively invalidating all connections not currently checked out so that they are replaced with new ones upon next checkout:: from sqlalchemy import create_engine, exc e = create_engine(...) c = e.connect() try: # suppose the database has been restarted. c.execute("SELECT * FROM table") c.close() except exc.DBAPIError, e: # an exception is raised, Connection is invalidated. if e.connection_invalidated: print "Connection was invalidated!" # after the invalidate event, a new connection # starts with a new Pool c = e.connect() c.execute("SELECT * FROM table") The above example illustrates that no special intervention is needed, the pool continues normally after a disconnection event is detected. However, an exception is raised. In a typical web application using an ORM Session, the above condition would correspond to a single request failing with a 500 error, then the web application continuing normally beyond that. Hence the approach is "optimistic" in that frequent database restarts are not anticipated. Setting Pool Recycle ~~~~~~~~~~~~~~~~~~~~~~~ An additional setting that can augment the "optimistic" approach is to set the pool recycle parameter. This parameter prevents the pool from using a particular connection that has passed a certain age, and is appropriate for database backends such as MySQL that automatically close connections that have been stale after a particular period of time:: from sqlalchemy import create_engine e = create_engine("mysql://scott:tiger@localhost/test", pool_recycle=3600) Above, any DBAPI connection that has been open for more than one hour will be invalidated and replaced, upon next checkout. Note that the invalidation **only** occurs during checkout - not on any connections that are held in a checked out state. ``pool_recycle`` is a function of the :class:`.Pool` itself, independent of whether or not an :class:`.Engine` is in use. Disconnect Handling - Pessimistic ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ At the expense of some extra SQL emitted for each connection checked out from the pool, a "ping" operation established by a checkout event handler can detect an invalid connection before it's used:: from sqlalchemy import exc from sqlalchemy import event from sqlalchemy.pool import Pool @event.listens_for(Pool, "checkout") def ping_connection(dbapi_connection, connection_record, connection_proxy): cursor = dbapi_connection.cursor() try: cursor.execute("SELECT 1") except: # optional - dispose the whole pool # instead of invalidating one at a time # connection_proxy._pool.dispose() # raise DisconnectionError - pool will try # connecting again up to three times before raising. raise exc.DisconnectionError() cursor.close() Above, the :class:`.Pool` object specifically catches :class:`~sqlalchemy.exc.DisconnectionError` and attempts to create a new DBAPI connection, up to three times, before giving up and then raising :class:`~sqlalchemy.exc.InvalidRequestError`, failing the connection. This recipe will ensure that a new :class:`.Connection` will succeed even if connections in the pool have gone stale, provided that the database server is actually running. The expense is that of an additional execution performed per checkout. When using the ORM :class:`.Session`, there is one connection checkout per transaction, so the expense is fairly low. The ping approach above also works with straight connection pool usage, that is, even if no :class:`.Engine` were involved. The event handler can be tested using a script like the following, restarting the database server at the point at which the script pauses for input:: from sqlalchemy import create_engine e = create_engine("mysql://scott:tiger@localhost/test", echo_pool=True) c1 = e.connect() c2 = e.connect() c3 = e.connect() c1.close() c2.close() c3.close() # pool size is now three. print "Restart the server" raw_input() for i in xrange(10): c = e.connect() print c.execute("select 1").fetchall() c.close() API Documentation - Available Pool Implementations --------------------------------------------------- .. autoclass:: sqlalchemy.pool.Pool .. automethod:: __init__ .. automethod:: connect .. automethod:: dispose .. automethod:: recreate .. automethod:: unique_connection .. autoclass:: sqlalchemy.pool.QueuePool .. automethod:: __init__ .. automethod:: connect .. automethod:: unique_connection .. autoclass:: SingletonThreadPool .. automethod:: __init__ .. autoclass:: AssertionPool .. autoclass:: NullPool .. autoclass:: StaticPool Pooling Plain DB-API Connections -------------------------------- Any :pep:`249` DB-API module can be "proxied" through the connection pool transparently. Usage of the DB-API is exactly as before, except the ``connect()`` method will consult the pool. Below we illustrate this with ``psycopg2``:: import sqlalchemy.pool as pool import psycopg2 as psycopg psycopg = pool.manage(psycopg) # then connect normally connection = psycopg.connect(database='test', username='scott', password='tiger') This produces a :class:`_DBProxy` object which supports the same ``connect()`` function as the original DB-API module. Upon connection, a connection proxy object is returned, which delegates its calls to a real DB-API connection object. This connection object is stored persistently within a connection pool (an instance of :class:`.Pool`) that corresponds to the exact connection arguments sent to the ``connect()`` function. The connection proxy supports all of the methods on the original connection object, most of which are proxied via ``__getattr__()``. The ``close()`` method will return the connection to the pool, and the ``cursor()`` method will return a proxied cursor object. Both the connection proxy and the cursor proxy will also return the underlying connection to the pool after they have both been garbage collected, which is detected via weakref callbacks (``__del__`` is not used). Additionally, when connections are returned to the pool, a ``rollback()`` is issued on the connection unconditionally. This is to release any locks still held by the connection that may have resulted from normal activity. By default, the ``connect()`` method will return the same connection that is already checked out in the current thread. This allows a particular connection to be used in a given thread without needing to pass it around between functions. To disable this behavior, specify ``use_threadlocal=False`` to the ``manage()`` function. .. autofunction:: sqlalchemy.pool.manage .. autofunction:: sqlalchemy.pool.clear_managers SQLAlchemy-0.8.4/doc/build/core/reflection.rst0000644000076500000240000001633212251150015021706 0ustar classicstaff00000000000000.. module:: sqlalchemy.schema .. _metadata_reflection_toplevel: .. _metadata_reflection: Reflecting Database Objects =========================== A :class:`~sqlalchemy.schema.Table` object can be instructed to load information about itself from the corresponding database schema object already existing within the database. This process is called *reflection*. In the most simple case you need only specify the table name, a :class:`~sqlalchemy.schema.MetaData` object, and the ``autoload=True`` flag. If the :class:`~sqlalchemy.schema.MetaData` is not persistently bound, also add the ``autoload_with`` argument:: >>> messages = Table('messages', meta, autoload=True, autoload_with=engine) >>> [c.name for c in messages.columns] ['message_id', 'message_name', 'date'] The above operation will use the given engine to query the database for information about the ``messages`` table, and will then generate :class:`~sqlalchemy.schema.Column`, :class:`~sqlalchemy.schema.ForeignKey`, and other objects corresponding to this information as though the :class:`~sqlalchemy.schema.Table` object were hand-constructed in Python. When tables are reflected, if a given table references another one via foreign key, a second :class:`~sqlalchemy.schema.Table` object is created within the :class:`~sqlalchemy.schema.MetaData` object representing the connection. Below, assume the table ``shopping_cart_items`` references a table named ``shopping_carts``. Reflecting the ``shopping_cart_items`` table has the effect such that the ``shopping_carts`` table will also be loaded:: >>> shopping_cart_items = Table('shopping_cart_items', meta, autoload=True, autoload_with=engine) >>> 'shopping_carts' in meta.tables: True The :class:`~sqlalchemy.schema.MetaData` has an interesting "singleton-like" behavior such that if you requested both tables individually, :class:`~sqlalchemy.schema.MetaData` will ensure that exactly one :class:`~sqlalchemy.schema.Table` object is created for each distinct table name. The :class:`~sqlalchemy.schema.Table` constructor actually returns to you the already-existing :class:`~sqlalchemy.schema.Table` object if one already exists with the given name. Such as below, we can access the already generated ``shopping_carts`` table just by naming it:: shopping_carts = Table('shopping_carts', meta) Of course, it's a good idea to use ``autoload=True`` with the above table regardless. This is so that the table's attributes will be loaded if they have not been already. The autoload operation only occurs for the table if it hasn't already been loaded; once loaded, new calls to :class:`~sqlalchemy.schema.Table` with the same name will not re-issue any reflection queries. Overriding Reflected Columns ----------------------------- Individual columns can be overridden with explicit values when reflecting tables; this is handy for specifying custom datatypes, constraints such as primary keys that may not be configured within the database, etc.:: >>> mytable = Table('mytable', meta, ... Column('id', Integer, primary_key=True), # override reflected 'id' to have primary key ... Column('mydata', Unicode(50)), # override reflected 'mydata' to be Unicode ... autoload=True) Reflecting Views ----------------- The reflection system can also reflect views. Basic usage is the same as that of a table:: my_view = Table("some_view", metadata, autoload=True) Above, ``my_view`` is a :class:`~sqlalchemy.schema.Table` object with :class:`~sqlalchemy.schema.Column` objects representing the names and types of each column within the view "some_view". Usually, it's desired to have at least a primary key constraint when reflecting a view, if not foreign keys as well. View reflection doesn't extrapolate these constraints. Use the "override" technique for this, specifying explicitly those columns which are part of the primary key or have foreign key constraints:: my_view = Table("some_view", metadata, Column("view_id", Integer, primary_key=True), Column("related_thing", Integer, ForeignKey("othertable.thing_id")), autoload=True ) Reflecting All Tables at Once ----------------------------- The :class:`~sqlalchemy.schema.MetaData` object can also get a listing of tables and reflect the full set. This is achieved by using the :func:`~sqlalchemy.schema.MetaData.reflect` method. After calling it, all located tables are present within the :class:`~sqlalchemy.schema.MetaData` object's dictionary of tables:: meta = MetaData() meta.reflect(bind=someengine) users_table = meta.tables['users'] addresses_table = meta.tables['addresses'] ``metadata.reflect()`` also provides a handy way to clear or delete all the rows in a database:: meta = MetaData() meta.reflect(bind=someengine) for table in reversed(meta.sorted_tables): someengine.execute(table.delete()) Fine Grained Reflection with Inspector -------------------------------------- A low level interface which provides a backend-agnostic system of loading lists of schema, table, column, and constraint descriptions from a given database is also available. This is known as the "Inspector":: from sqlalchemy import create_engine from sqlalchemy.engine import reflection engine = create_engine('...') insp = reflection.Inspector.from_engine(engine) print insp.get_table_names() .. autoclass:: sqlalchemy.engine.reflection.Inspector :members: :undoc-members: Limitations of Reflection ------------------------- It's important to note that the reflection process recreates :class:`.Table` metadata using only information which is represented in the relational database. This process by definition cannot restore aspects of a schema that aren't actually stored in the database. State which is not available from reflection includes but is not limited to: * Client side defaults, either Python functions or SQL expressions defined using the ``default`` keyword of :class:`.Column` (note this is separate from ``server_default``, which specifically is what's available via reflection). * Column information, e.g. data that might have been placed into the :attr:`.Column.info` dictionary * The value of the ``.quote`` setting for :class:`.Column` or :class:`.Table` * The assocation of a particular :class:`.Sequence` with a given :class:`.Column` The relational database also in many cases reports on table metadata in a different format than what was specified in SQLAlchemy. The :class:`.Table` objects returned from reflection cannot be always relied upon to produce the identical DDL as the original Python-defined :class:`.Table` objects. Areas where this occurs includes server defaults, column-associated sequences and various idosyncrasies regarding constraints and datatypes. Server side defaults may be returned with cast directives (typically Postgresql will include a ``::`` cast) or different quoting patterns than originally specified. Another category of limitation includes schema structures for which reflection is only partially or not yet defined. Recent improvements to reflection allow things like views, indexes and foreign key options to be reflected. As of this writing, structures like CHECK constraints, table comments, and triggers are not reflected. SQLAlchemy-0.8.4/doc/build/core/schema.rst0000644000076500000240000000325012251147171021020 0ustar classicstaff00000000000000.. _schema_toplevel: ========================== Schema Definition Language ========================== .. module:: sqlalchemy.schema This section references SQLAlchemy **schema metadata**, a comprehensive system of describing and inspecting database schemas. The core of SQLAlchemy's query and object mapping operations are supported by *database metadata*, which is comprised of Python objects that describe tables and other schema-level objects. These objects are at the core of three major types of operations - issuing CREATE and DROP statements (known as *DDL*), constructing SQL queries, and expressing information about structures that already exist within the database. Database metadata can be expressed by explicitly naming the various components and their properties, using constructs such as :class:`~sqlalchemy.schema.Table`, :class:`~sqlalchemy.schema.Column`, :class:`~sqlalchemy.schema.ForeignKey` and :class:`~sqlalchemy.schema.Sequence`, all of which are imported from the ``sqlalchemy.schema`` package. It can also be generated by SQLAlchemy using a process called *reflection*, which means you start with a single object such as :class:`~sqlalchemy.schema.Table`, assign it a name, and then instruct SQLAlchemy to load all the additional information related to that name from a particular engine source. A key feature of SQLAlchemy's database metadata constructs is that they are designed to be used in a *declarative* style which closely resembles that of real DDL. They are therefore most intuitive to those who have some background in creating real schema generation scripts. .. toctree:: :maxdepth: 1 metadata reflection defaults constraints ddl SQLAlchemy-0.8.4/doc/build/core/selectable.rst0000644000076500000240000000260312251150015021653 0ustar classicstaff00000000000000Selectables, Tables, FROM objects ================================= The term "selectable" refers to any object that rows can be selected from; in SQLAlchemy, these objects descend from :class:`.FromClause` and their distinguishing feature is their :attr:`.FromClause.c` attribute, which is a namespace of all the columns contained within the FROM clause (these elements are themselves :class:`.ColumnElement` subclasses). .. module:: sqlalchemy.sql.expression .. autofunction:: alias .. autofunction:: except_ .. autofunction:: except_all .. autofunction:: exists .. autofunction:: intersect .. autofunction:: intersect_all .. autofunction:: join .. autofunction:: outerjoin .. autofunction:: select .. autofunction:: subquery .. autofunction:: sqlalchemy.sql.expression.table .. autofunction:: union .. autofunction:: union_all .. autoclass:: Alias :members: :inherited-members: .. autoclass:: CompoundSelect :members: :inherited-members: .. autoclass:: CTE :members: :inherited-members: .. autoclass:: Executable :members: .. autoclass:: FromClause :members: .. autoclass:: Join :members: :inherited-members: .. autoclass:: ScalarSelect :members: .. autoclass:: Select :members: :inherited-members: .. autoclass:: Selectable :members: .. autoclass:: SelectBase :members: .. autoclass:: TableClause :members: :inherited-members: SQLAlchemy-0.8.4/doc/build/core/serializer.rst0000644000076500000240000000021312251147171021725 0ustar classicstaff00000000000000Expression Serializer Extension =============================== .. automodule:: sqlalchemy.ext.serializer :members: :undoc-members: SQLAlchemy-0.8.4/doc/build/core/sqla_engine_arch.png0000644000076500000240000006703512251147171023031 0ustar classicstaff00000000000000PNG  IHDRvHLiCCPICC ProfilexYgTTϒ{'0 Hf9K9J*0 AA`@$)Q (* ( F "$y97U5C>G  5֣8Rp0&pP"u-ھhTj[*?3H>^@;+W#@"DZQFE0sb lcxa{/{[}0l?!e `-/7dHH6A7LzJ-Hdb`j/]Hp4~5&'[nxS ̑' 3D uChX20E02;|ãГ-L@y>RͶ}FЛ#m,;C#M&;cþFe`(oۀ+s |@4@P ,>0K_@E81/i #1aѯq~ȸ=t*26oۺHWb[/kdege -G+Кh-*Y@ Ek*0f6ni)WsGۿ/8X]kQ>q:@?,<>"?\Ii$E^VNvmY]AĔȿhg5n߿hHwp/+:">Pv(P:+`\?H FXp pA(u@x5SXA8!v$ yH҂ ! r< ?(@iQ$TCe AO84 }aLanXU`]~>8Nsn[.?(CPR(> EEPY|TՎG=EPkh,85A;Ilt݃~G/71D F18c0 L>s Ӌy|bX2 fcb' 2cI4qV8*. +5npSU:^y#WPT|:[4C4h6hihhhiisihiih7$^oħ ^?* ]]2]%ttk&8AM!: / KD"QCt%Fs;Q*=^ޔޛ }1} <-.CC>A9FZFaF}F*cc1c2L#YBH٤:= Iɐɛ)E 듽i*r/y,l|"#&,,q,,7YXQ¬ͬì;wqdG㎡+ll:l>lYlMl)A'د@ssprdTl|ssrZ6.=ʣȓsg̫{#K Pz( |\|&|||6ES T|yw |%D+"/tF_hEXDI8SHHQ> gbX1 baqEqbA XBI"@IddAJW*F^j\UB:U ~MYE`*rLrfrrr_)**,d]݊?"f=KGTUUUbTTv)E5}QRRSѨҘפjkiQ<ʴƴutuu>6E][WO4@d<2d2t0,25737Z0V4oi1179a2bmeZk`lhcN03/2o!naѾ eԮ7B׭)"o`mmmmlۑڿvuvvdpXdtiY9 KK+ѵuyӻ(3Wdo{nnn7ܩW<0Nu?V 겧g炗O:y޳>>'}>j;7?PhXdTB$\"<#|lھ"##ȽQp Z4PxVLqjc8R\h@x F {>w xnby}P`d )'S9s'O2>TA1Yz}8# G lfyg?*{4ly{8x'.$L89qjש;jv (\{#G-ʃU?xrkH{Ӿg<|daȞ/f^\|jưoGF+މkS9n0> O?ҧx?t>ԧOsIKE~205{}oˣCod^XSY_wZM7[![[꯻ a__ 9 ;<;%+"`G0w5\xZn: ёޖуJ`e%V;ȩWL@B0M譈hw {ɫ,2 ͊\J_T:45k^g33|ibrtޢ~ׄZ&]#I5bwڞ{ݫ=.Pkfadk7e ,ά_1co[[odP.L&'MB_MM!ӛ0(3rHdf7<62?g9[^J߀^X=Q13gR2PV ;'cU45ǵtBt ӌ:_̚c-Hx,Edll9QׇMv1K,#/ ZnO8Du,/F73[ܣL$$N'>X✪&N>YF{fCG*fgo̹x䉘OiQNo)+H) K-=ZZn]PRZꊚ zu;Ӛ/\lF7O^zZzuV6l\:jot;ӥ˸[hG/}ܷ?w=x48'C/z/_}xۅѥwCBS5^Β>Z~:1r^KW ߛVnl i|eAabJE\+M @C'21lLdqfUCVl9.rrr2Sd,r/ /iIdK^)'g$﯐AҜ AUTD[fVv9\$ gC#acZ^sfQFc,#֭o$٪.]rrxXdL溴a^޽#nM= jWwϢo_L@uw%mpQc(9t0h>̾;)Z?ڢbccE'JHa=0druNr6+Bi7ӳ9fd|ɼq8mOǣײ69wSy1Ig ƑS39s3-evBUgU{h_v>!qpiҝ˕iW]Sk.|A|P|A˝Ȟަ{{?<-}]]]ߵ3@r <!7FΏثw=!!"4< &ېqBAHhyAPt OWO?*@u>!g:]^È"yY+u`8e\"#/M5[ц^]k3tt>t XpE<@|GO_@fHbdNCr>93y%lef+5.\D$55vJ">?@`$2&-Vtq>Rv҆2jrr B;E%UTԄ45EdttE4:ew[4:i=abw 9tJ\J{@0w޶>~:mA!{BDGދ,!Uw)2yf!fdW9I:5yLnYʹު \/4^ejbڱ[=Ծ?dtxcjcW&2?00ߺ0DY6[\+ܸs nް4!u8p T!51Bf_a2;.bCBP>ZmNGY$+AroX)Ǎۃ+x,ЪЦ>+t(:{jL#<"7~!ɏ4OƑ ex#h6<\Y<|<"(~kA&BcE:DĪK%J$IW4ʶݗUXVdPT6WqSVU׸9MQ30757l.o׊z̀a9'vWy{1nqT o/I@`0(xg'i\KiM?=ܗ?u49eAioқ噕y.ܨ;`Hnz{J5뜭:o6c ݛ{O.?x.5E+7yӾ4:1u9y~c!|qb[w5f- N T@1hC`$dT27| n'Q$QO8:]p#=3eaOc_xq&:REɣU=BU@4Rߘac`& n3ya,,.Aϱ9S[J);_)(!tOxDdV Pt:,}MfZKIJ*HZGt< LLͽ,,ClmGœ7]AE{{;bn!ws[^5Y_J?-C]=1ztũgcJ}*=λ׸R.51\jI>}s U fI'&@;eD@DX-5|@ Rt E+izdg"ՄhBB<1Ů r_h@DXtU@0>|8֮][ػZ*@ff͚-[ Y0V:X!Ob* UR`͚5x瑚%xY߿?233]TVRD" ޖ"Re V&TjV–"=RX5cԳjD䊀'婶W1&TWB={cYٽ{VV+Eի"P! HVjUD rLXPX`)hMnljX&Lbv7qֳ儒zVWyZtsh=ՉR(9Ւӝ"P P16 z U`}PeNu)U(>3" ;"Z $%y~:SsE@Dt$VKOw@'`BzWMVz0Q`= ֋bjF#`c$iճjt"Z=XU]D@X-;I*% !֋G$DZD/GJYD@JN@bto(LQ(DՁ^," # Z:~[DGzS]a]I92GErڟb ج3JD 4ltED@D@D@D $V\J^D@D@D@D 4ltED@D@D@D $V\J^D@D@D@D 4ltED@D@D@D $V\J^D 2~!--mzh2K].+-)Q ID@D(%ˆ@y&PիWԫ֍_ snw̺ Gb1 'Ο5+[˻9(@gHHD`!2lzק#77_;^v 5hx3еs{76 J " 1M@=1]=ʜ@HǮݹG~B[vhT;Lλ;|dtec-u[AcQB)ղ$6ПpwX-5BL8?.Od_hwmCq[xp[cIѰz޹>V^{WTŤ[Ǩ^mPmPGl:^d9 _JM/,–U;670n80\KJM(" a&܋xZ55M6h\e9oݍVM z5TMǡG?a|lto%"ѮSr 9ˊ^D Ld =]<6BgEHKF"8#гCo2U}RYre˖8лwoѣp( W0 ע)T8Pd/"PxzOT "P.Hk\3*R~/g={bѸkN(ejiYgy(iU)L+}FF,YoO<:z2d&)Ty4BJz!" " "P^ P^TSrkO lr)*)Ve7&9\)`+YRᇸ0x`q?+3\%V+Ө@!`Bլ}~gOK.ꪫ\ `RRL{I)i9Ip '!o?|zEL2f U ؒV׍*br W_&^g~.xe+mzбcG駟?f`zXyoqCKaE@D@D@bŪӿ iӦa2e z%T˶ 9v /zwUltk$VKLE@D@D@b V #$jgC9}ŽɌWLQrV`*צ,`Xwqjy@T LYr;gsLp<+뀛?Y7-PJ" " " 1@BXUUwy_q dBgɛ[ZP{W @=%" A l۶ z F" ".&Xm-nOaÆ+z)*'{,94OD3D (Jm{(?~xX>(^z%x8餓IZ$sqܖvر3f >3'R 0[U$V+#ˆ@ pV3ꫯL%YK/X\+ ~?v}AZVE"P˿ѣẠGuKW_}7x<2wYU<$a"̛73g{g \iӦQ{X Vg5UEr`78qJhѢ 8#pmO<9d8]g)pr~)^,X@y#ʄ]rUï ׯw;0=s4iڵk޽;;0Wvܵ۱N:Zů9~:k\"?kr&rH0eJ]!{޺u+6o,"@ .p<9~lٲew=&\YtP P02qDĪ:-oݺ5F۷_]p!l߾=:uΝ;m۶NUݮ6mSZXxV'LF9q.fGꃂ5P//x쁡t."P ر( S #FJ$;wt ~{Y͚5s[N8͚5 o{XNXcFKʝgJӍD*LAd5T8TmÆ 'nץKbɒ%;qQ2-{~6fG8qdc"6T(ZMQl8Թ-όm~L_/1vN4YyJji|8GE(WY l8yF G`l 9"c6\5嚃Z l~,h4d%HV ?RhLs Wp}'_peL8䂵eLϮ#ⱝyʧ(fLѲx-y Zon3]2` 8] ,r5(2(D _=|~.b?r}0Vn5ax.9OG(ju tM3gNp 3;m&\KPY WkN1];%t}k ]]kKMwWQ;68X-krX5k,)64ָkblٰw7?}r&?3\3c/ed)T)X8q=|rM sW%sOFYEy/XyOq 'f| +=3c9=*0t WM4tn9sdE [2N׌Dn&XF :w.EvBQF=|b>SfW7;Hco-T{qϙ9x4h{)9a**wxD Vr**K%CeϚtWXD LZѣ[nq nU:Β97p:6\>3v@='0I _R|{ߠſWw Pl)`hٓ5^xnZ egٛhbՓ. ŝCB=իWDe˗- (+;ꁨzX TUouau㯯:5~-L._t<#rV2+qF2)-2'W8PpWucX]5(ܶ&\?,+w p%PU;VֵVɞ++" .Qz 60\Ǎ2!,끍Pp}jSNY*'F˘PeL8`9U'τjjĪ V28`^}4D{[&\W\Y,C=,'+؎o={|dC %>wڱPVtpPA7F>k6'p,o>Ϝ9s[s1Nr׼c?G%nBU5|<'ֳs Iӻwo Wnh;k|cɅR$V+H[Cnb<*VAZ:X> TnsZ-NyKQŚ2/V #Z &XɛE UVWOx10#8;g.ɂf]Ǖ?ʸkΝՎ(le~o Y2"NM ХeCBʞ,62v=BQ2>lY\co>AXPum~-N}Qġ E4g=~zcXvWUEa6PF[T0}yꫯ +'Pam@U8ɇK2!}3EٓpX 7φBB -xD?_P=0'9]t  UP=|b4\z0Jfy= ӷo_gM/L>ׯ/*Dp Net~߯~pF.*kMV6.<rZ8J L~d/6McQP.e8 [w'ƿ q:|ۀ^dC[7z̩JVڷĄV"GI&/ځs s|~eQrG 'n M <<Kl]a!s=\7ߌVX**zU+QHQ%Vc"* z*~S<Akr+wtBij/RoXf͚ %Uw#|ػ4q^(*a5, 6ݺuCǎ@M-o+Oy*nԓLmWtI+P4/ro I(e5T+ $%$`~W_J3 ^>uc7pV" " " AT*|YKPj)vX [," " " "PJE@D@D@D@G@b5LllTFFbnm+߻vZy"TD@D@D@DBU̬\?pBիRs_>餓N+]푖}~ٴ%YdYX#UE@"G k!iĽ3GF֞ݜΜٷ;|l r ٘зNa6*,CЯw}zh2b&q1>Xy cЁT|rS38/2ZhjSN?;o?OYfk_3<?c㩘7 }\: 7x8'ovbNhQJPNGݭPjU"ED rf*t=Jֿ-r^ѽX?gZ7+]cܵ3Ѷ~ ťu_y@ZIHd`U8;1 IDATuclܷ݀6a9[j2:@7V"[J,Vp/DzK.`⥗^Yf{?ٳ'vڅn۷hܹcXx1>Spן 4pۻvmx'V޽W=}Ŵi0~x|>|˱yfjժoȑ#q[3?~;︲VuNnr#1l 4Kf5wacX3xqٿqAhP*~.z^z(áؚ.ؐux{Cޟ[sx:b<&\zzĒ(d˷\JI`WN߀Qj6k^>@п4:@ u L2_:}x_6@ 87FU_8<0,^CR=RWE_:t(>'<۴iѣGcݺug}G? k뮻0i$ycgω[ /=v܉1cƸk7x#>hy晠HݰaN=T 4<+q8Pvm7u(ix׮]:.J;L0Af^w?9cݽ _=*nZTr6.rxn lZFXv &']d.l]!\v?c\\0\4WO<ν 6Ps{3X5j7@@ؔO_o/>}YL5?3I*KgCFs.: {kl@:v# K-@NV;4}P9nܸ+V>޽{=\;^z =z@^ܧr;wvڡC1졥x lժ,X7mp0` z5z-#33g}6ڶm}1l0qzN9NNgyӦM^ZjB>.ޟ~)hyR k&#X r g?_͋CL>m'LJ;2bXc9,iR=e`Y YM@RvG/۵qspd*p9,?Bˡ\("-EU^&HpYSGjUrظ~#zގ|}.~|Fl&@C/ ǣe=Cp|\6tw/u}`/1Fw`xKTb/X3e˖Z*6l>39 _"))}7nvGI&zGAL!ˉR=\K;07T2_tW+`fpr5~wp@nf3 >Q3$/.Anөx=[bXJOfCѤ^mTzC>1|+a֌ q?+wsG@UvIZű|nxbէ^ {k4@cѩq2w֦gK×ENx Xv_فuޗY#6tV MC5C|ͺqJudƀX[p%Ǥ =cތ_tfn_kкm*j){{qpLown5}CG%fsGƘQXOZXM1cS Yr(|c\bѢE\12Lnd/%K!2?/FQhY̜==;GMǕغq!Tz#~FmFb}f _m +"P^ ${_x yv1+p=K1qPCoL)KVa.YK/lN-'QaqG{aУp+E'y_]sȯOU*&T4#' }?~(~wi-2a+>{ < 0ItX&6 ~|˽ ἕː 3Y0a4Mz3]O姡6LL{{u|-uϒj=&6b6n阆3/oVn-KWY'Ovƛ_•W^{`9VcZ q|ǓQq{97 cVOO@}f5;:+skzkT\9X\vշ"/R< =b,gqcvL:ιe"`7מ7{)(awSߔy7?l;7[vt{J岸ԩS]f P9 /4 r_k{V Z<;q魹J=s t#:!Nl[oyϿ%ym4-PrRh۱7t`gA#"P<8z^[ 5yB$T4VID LM0q5w`!nwc<=_9 c\m;v!+c36${(+q޲hU%^^qۀSF=1Y :׼O:q3Gb¶6X1 =~'^ B,oR`K8gdor7yFܒ& WbՔX@~zE^V+4WV (+r{eOVQxZjB` A4›[QHrt*4{Eޖ?k3qnX&߽%vϾ?Tuƺ]}!qH_^==9;EA~g45\}<\:$- Fc e޼l*4+jGA۳] ֢sVWe()pCʬlOVp2'W568<\+pIk{O%a 3 cdFZuَ B]8Do6#&)hԨ6b@Z]PiM/{ g~=)+}ҽvR5Gjz78Ώ1|'7mcS^]nZ7V&]ym-SűoaǯuyS(%V߇+" " "ez *ގڣ~*7z߳Tcޚ{)kպMqp'x+&x1 Z[݈cN뢾8LJmM<8{kوjMznVN`7/`L՘eND@D@DXI߄+؉8Λ4l<=BuϹ=R0CJбg=6r\ĪE ps 0ӝ"p@=Cr˙(@% Zy^%3n!<|жj 'pF~9^/m۶_[oŋx{K@90FB16~b w9kҤ'x" L`xWK/?Fnnn!do+JI@oJY*t$$%%n֭[1{l|عsgaׯlݺu0><컬fB"ex7FOyyGFD2X2GGQFZj;w.}]R޸qca>lق'|ٚ5kbĈcEJӉT 9s&^xy0ӹsg 8]v?`A'"P HVJVc:tAMLLth9< {O,2d<7nl@"~Qr,+ Ikv_&6ocJF@bWxPs{8Vw} O\pšVN"8nw=QIS̡SN9'tZh]v91c7/2#vuvى'{>p:h? TJOO;SOuSNn2"'T#f7$=~z^re# Zj5&tCmVE!}X/:tW^U>$9svҤIh߾uZ҉"~EN/R~ޮ];׃JڥKc~h0_vB,EHz$jTAhqZe#_ܺ'ы/M6aƌN~=PɢEp-8 .E:``1Az h(:[u_:ʥE0õ_jXoG˺ZxY;V6,<9Rjq6ָy#*Z*gՅ)QdM 4qX>.W.㟬tgy'fQ6 \ \k41<̆+=W |7N4ocƌq#$m'\XHla:&$#" )H^VlPh9㜍 Arb '.޽q|d%8- QyZЬY3WV'VGO;4gYfr•UgN7Gu9 c9寴~Aj".,ع _HWqgϘ =^Kq5]|K/?nrq';٘^q6m71.se' r=oMO g2wcիWwDӦM]=X8CHP@x11ˆ*\=x6|ɔl*wJew(\YV'ty뫬 _,wO>čsk{cMp@q-1j.qskْƯFϕxiٲesfϛ? `)nw ~~z–N߇]qW0t1jDwɜMcb?َY7Z?]X- k@y64ֳ_#k2%#@ÀɗbynBb/bfV4.+w ~ Z~b촼hq+-Ǽ |֬Yv%*N|EMWBrAʿw3gm5@_~)A_B9@sO$+{|TϜ=%ICJlhMefa]6ׅޖ,u3llh2 kBՆHM V6*&Ru޻wog9꧟~*+>ٿoիW-2p C`b„*E(T9F&r> &0ܥK ~qQǜz8ye|63~0v=y qwe[6i㱵s>HP@Ɔ<oky Q5ʆ^=% errfc 썥YV֭@Pg"=_z~+bLzgic} p*89ן89kX>2lhKlLZևDjHG 68l鲑1s60lt+ =xpbK 6crhli0> ZnK._mʕ?>hoF ׾}&TMxB=Wѫe <ͺu.35{젙cpǺ)#i_!hL'ke==B? :HXa3[oM2D+kMҵyVtyWƍwv֭n+ǹrWκ6ܛ>m4gy? r-W>C|(t{OEenٲŭ믃= EȈ#\*{R9&Z6o;g‰/>_[b5Ff,kaNʈ@$HFr`&oP@F 2e頇tIDAT ?ccƜ ;],3}n,#E*a"?㒹]/ KO'p["0L3̫ $ eeeS;fJ撹I\/[},5%' Z|f1{56 6*<6jz`Cb0ְӵcrFz^섢p? 1 W.߭heZQrb9`C=,<ح8? Fhom7d행UL scom[~IY HOo `~Q̼s9mbn-׷zM2!@ڱcGBw[27:T@HF| p\!@!ı-Zpv!xaZ,TGQ)2-/e,;"RD@"BDIT$zF*@Hn\$iQ\H2EQL)@x P'YfxGm۶& p'j8~\@b5y/;T$1cgq;mܸ|喽=֛5h>?"[,Sx"`BTsSSS1n8=z[rϿ;_Lf_o[7gƐ!CpAelS %PE@D@D@P5!Db&9f?G n=eAߎ j} ~ie_ܹ Ν;9b/XwV( 5#" " "3(TigBbQFNrsUV~aY1h 5jԈrZF؃:||g?uɖkosVZnmgʺ]*j$VJJD@D@D@b !O-!Fvv[K:4i;vX}] 9>`lc6lz_*&%''K.n=gSZ Ho0zWqGJ@$ d`15H؛ތ_t"ZԪ̅?CN/foXw=-\yKؼ-쁝Z/xo6ߌq-Gm0g[:HێϞy>Mc? p)X&L: J^\~NJA*Pu)PM]6<"fy0vne˖a:b pL2}9r쁴PSLӟxfK S|rP0i{_Nf\bCԽ >zF}\y)O7ǐ>mP-w5oq-i .׼tK?z,?:;VEǛfb0i(rw N ~(jxbڞ #ԁ@PQlQQ1qdbzM,E$k_X5!j. PeR׎>?f/8P4a[ߒߚX4ZouSTWbND|Mǂy{zTOZT 3&O؂Y9رW5!9'8g䫨Xn#VnJ"o8A6] IeHSj|`JŒboLX0bOJR@&.MPkvħ̣-1!j˲Ҙ(4@ט %TOIn<6~p5Aj>e>hk$VKLE@b_T{-׶gz5QPfcNLJkjwmCwƞ&zwܣ9V ʞ /ͭkC?ǥ@"`B 3RRX5aj=?]c۹_U@PvL7КX t85^t-Ei?oy UK;s-âK5![V\%;7C&b+}#8}=FCrQ [wEY5Oܣ&_ޘGVMĐ.fǗwU8yk8 [3;%YH*"P> 3sLQXtEhsN5Q(&Ng&NGc@7}+_̡\N?b<6Pٹk[R#ZRrOD $yB1IRؿ>c;&%#gֳzi޶ -؁wc)נNu9wW]x&{Cwnyw[c{ߋ]hwCHX} '__8'yc2$ $ @8@ s9csǸʵ0na >-u<)n /"P) e/' p!m.¥Sf͛qcΜ9n*@F!NURuR!ޯX^& {;VXY٠) h:97#AԪfZlE:Hm $llYk7oG^BM4i 5Ƶ\Y u:. "ipWcǎnGh ts_|Ȉ@e"@crDŽ~iǼv ,@3;tOcs)i̵cnpsk~7qp~ϟQjYPT" 1@ 5z6tVƿ]!)eu 5՝jMz;נە)ͼ|TGmP5%e( &) )(&܄eI]aqX\<4xLcaİ4e6pv_I]\tKR#?:&"P y"/(3 a7lGD &ţ M?Ccxn0~v-km g28P*e/ZV$@"`2ja]ߟ]<.ai] ,Û_=v8RFb5R@L 9F~vkvL׎kK-`“o-hѮ/" " "LY([8힢-.خǪ+5| ~?.JR%Fb<֚," " "P!WVBZl\D@D@D@D r$V#Z)j1)@HFR(&bSpXk$" " " "PL" " " " # 9JID@D@D@D$V LE@D@D@D@"G@b5rT(u%WU/ͼ)m(.QKLE@0!dn@s#0X XE+{RqBB;, bz GFD@#*1ʳ@0qDTJ'VkhՎ_VҪd" jeeQH GB511'Oƶm۰sNڵl 77aUŎ̍sժUQF gQV-4m] VVgBL@b"׮&a&@d>SR@5oONfff"++ˉռ<9w7z?뤤$T^ݱY&RRR9ĪՑk}&T2$V+rl"F&|RR њb* jTHXVouau㯯ɅbX g"'~PHO&boCسa%$ @J?^'֏US՝" "TEB0J1d‰i)FU'WPX-]X%w28VwY&XKX`_ which is transparently persisted and refreshed from its underlying storage model. The other approaches it from the perspective of literal schema and SQL expression representations which are explicitly composed into messages consumed individually by the database. A successful application may be constructed using the Expression Language exclusively, though the application will need to define its own system of translating application concepts into individual database messages and from individual database result sets. Alternatively, an application constructed with the ORM may, in advanced scenarios, make occasional usage of the Expression Language directly in certain areas where specific database interactions are required. The following tutorial is in doctest format, meaning each ``>>>`` line represents something you can type at a Python command prompt, and the following text represents the expected return value. The tutorial has no prerequisites. Version Check ============= A quick check to verify that we are on at least **version 0.8** of SQLAlchemy: .. sourcecode:: pycon+sql >>> import sqlalchemy >>> sqlalchemy.__version__ # doctest:+SKIP 0.8.0 Connecting ========== For this tutorial we will use an in-memory-only SQLite database. This is an easy way to test things without needing to have an actual database defined anywhere. To connect we use :func:`~sqlalchemy.create_engine`: .. sourcecode:: pycon+sql >>> from sqlalchemy import create_engine >>> engine = create_engine('sqlite:///:memory:', echo=True) The ``echo`` flag is a shortcut to setting up SQLAlchemy logging, which is accomplished via Python's standard ``logging`` module. With it enabled, we'll see all the generated SQL produced. If you are working through this tutorial and want less output generated, set it to ``False``. This tutorial will format the SQL behind a popup window so it doesn't get in our way; just click the "SQL" links to see what's being generated. Define and Create Tables ========================= The SQL Expression Language constructs its expressions in most cases against table columns. In SQLAlchemy, a column is most often represented by an object called :class:`~sqlalchemy.schema.Column`, and in all cases a :class:`~sqlalchemy.schema.Column` is associated with a :class:`~sqlalchemy.schema.Table`. A collection of :class:`~sqlalchemy.schema.Table` objects and their associated child objects is referred to as **database metadata**. In this tutorial we will explicitly lay out several :class:`~sqlalchemy.schema.Table` objects, but note that SA can also "import" whole sets of :class:`~sqlalchemy.schema.Table` objects automatically from an existing database (this process is called **table reflection**). We define our tables all within a catalog called :class:`~sqlalchemy.schema.MetaData`, using the :class:`~sqlalchemy.schema.Table` construct, which resembles regular SQL CREATE TABLE statements. We'll make two tables, one of which represents "users" in an application, and another which represents zero or more "email addreses" for each row in the "users" table: .. sourcecode:: pycon+sql >>> from sqlalchemy import Table, Column, Integer, String, MetaData, ForeignKey >>> metadata = MetaData() >>> users = Table('users', metadata, ... Column('id', Integer, primary_key=True), ... Column('name', String), ... Column('fullname', String), ... ) >>> addresses = Table('addresses', metadata, ... Column('id', Integer, primary_key=True), ... Column('user_id', None, ForeignKey('users.id')), ... Column('email_address', String, nullable=False) ... ) All about how to define :class:`~sqlalchemy.schema.Table` objects, as well as how to create them from an existing database automatically, is described in :ref:`metadata_toplevel`. Next, to tell the :class:`~sqlalchemy.schema.MetaData` we'd actually like to create our selection of tables for real inside the SQLite database, we use :func:`~sqlalchemy.schema.MetaData.create_all`, passing it the ``engine`` instance which points to our database. This will check for the presence of each table first before creating, so it's safe to call multiple times: .. sourcecode:: pycon+sql {sql}>>> metadata.create_all(engine) #doctest: +NORMALIZE_WHITESPACE PRAGMA table_info("users") () PRAGMA table_info("addresses") () CREATE TABLE users ( id INTEGER NOT NULL, name VARCHAR, fullname VARCHAR, PRIMARY KEY (id) ) () COMMIT CREATE TABLE addresses ( id INTEGER NOT NULL, user_id INTEGER, email_address VARCHAR NOT NULL, PRIMARY KEY (id), FOREIGN KEY(user_id) REFERENCES users (id) ) () COMMIT .. note:: Users familiar with the syntax of CREATE TABLE may notice that the VARCHAR columns were generated without a length; on SQLite and Postgresql, this is a valid datatype, but on others, it's not allowed. So if running this tutorial on one of those databases, and you wish to use SQLAlchemy to issue CREATE TABLE, a "length" may be provided to the :class:`~sqlalchemy.types.String` type as below:: Column('name', String(50)) The length field on :class:`~sqlalchemy.types.String`, as well as similar precision/scale fields available on :class:`~sqlalchemy.types.Integer`, :class:`~sqlalchemy.types.Numeric`, etc. are not referenced by SQLAlchemy other than when creating tables. Additionally, Firebird and Oracle require sequences to generate new primary key identifiers, and SQLAlchemy doesn't generate or assume these without being instructed. For that, you use the :class:`~sqlalchemy.schema.Sequence` construct:: from sqlalchemy import Sequence Column('id', Integer, Sequence('user_id_seq'), primary_key=True) A full, foolproof :class:`~sqlalchemy.schema.Table` is therefore:: users = Table('users', metadata, Column('id', Integer, Sequence('user_id_seq'), primary_key=True), Column('name', String(50)), Column('fullname', String(50)), Column('password', String(12)) ) We include this more verbose :class:`~.schema.Table` construct separately to highlight the difference between a minimal construct geared primarily towards in-Python usage only, versus one that will be used to emit CREATE TABLE statements on a particular set of backends with more stringent requirements. .. _coretutorial_insert_expressions: Insert Expressions ================== The first SQL expression we'll create is the :class:`~sqlalchemy.sql.expression.Insert` construct, which represents an INSERT statement. This is typically created relative to its target table:: >>> ins = users.insert() To see a sample of the SQL this construct produces, use the ``str()`` function:: >>> str(ins) 'INSERT INTO users (id, name, fullname) VALUES (:id, :name, :fullname)' Notice above that the INSERT statement names every column in the ``users`` table. This can be limited by using the ``values()`` method, which establishes the VALUES clause of the INSERT explicitly:: >>> ins = users.insert().values(name='jack', fullname='Jack Jones') >>> str(ins) 'INSERT INTO users (name, fullname) VALUES (:name, :fullname)' Above, while the ``values`` method limited the VALUES clause to just two columns, the actual data we placed in ``values`` didn't get rendered into the string; instead we got named bind parameters. As it turns out, our data *is* stored within our :class:`~sqlalchemy.sql.expression.Insert` construct, but it typically only comes out when the statement is actually executed; since the data consists of literal values, SQLAlchemy automatically generates bind parameters for them. We can peek at this data for now by looking at the compiled form of the statement:: >>> ins.compile().params #doctest: +NORMALIZE_WHITESPACE {'fullname': 'Jack Jones', 'name': 'jack'} Executing ========== The interesting part of an :class:`~sqlalchemy.sql.expression.Insert` is executing it. In this tutorial, we will generally focus on the most explicit method of executing a SQL construct, and later touch upon some "shortcut" ways to do it. The ``engine`` object we created is a repository for database connections capable of issuing SQL to the database. To acquire a connection, we use the ``connect()`` method:: >>> conn = engine.connect() >>> conn #doctest: +ELLIPSIS The :class:`~sqlalchemy.engine.Connection` object represents an actively checked out DBAPI connection resource. Lets feed it our :class:`~sqlalchemy.sql.expression.Insert` object and see what happens: .. sourcecode:: pycon+sql >>> result = conn.execute(ins) {opensql}INSERT INTO users (name, fullname) VALUES (?, ?) ('jack', 'Jack Jones') COMMIT So the INSERT statement was now issued to the database. Although we got positional "qmark" bind parameters instead of "named" bind parameters in the output. How come ? Because when executed, the :class:`~sqlalchemy.engine.Connection` used the SQLite **dialect** to help generate the statement; when we use the ``str()`` function, the statement isn't aware of this dialect, and falls back onto a default which uses named parameters. We can view this manually as follows: .. sourcecode:: pycon+sql >>> ins.bind = engine >>> str(ins) 'INSERT INTO users (name, fullname) VALUES (?, ?)' What about the ``result`` variable we got when we called ``execute()`` ? As the SQLAlchemy :class:`~sqlalchemy.engine.Connection` object references a DBAPI connection, the result, known as a :class:`~sqlalchemy.engine.ResultProxy` object, is analogous to the DBAPI cursor object. In the case of an INSERT, we can get important information from it, such as the primary key values which were generated from our statement: .. sourcecode:: pycon+sql >>> result.inserted_primary_key [1] The value of ``1`` was automatically generated by SQLite, but only because we did not specify the ``id`` column in our :class:`~sqlalchemy.sql.expression.Insert` statement; otherwise, our explicit value would have been used. In either case, SQLAlchemy always knows how to get at a newly generated primary key value, even though the method of generating them is different across different databases; each database's :class:`~sqlalchemy.engine.base.Dialect` knows the specific steps needed to determine the correct value (or values; note that ``inserted_primary_key`` returns a list so that it supports composite primary keys). Executing Multiple Statements ============================== Our insert example above was intentionally a little drawn out to show some various behaviors of expression language constructs. In the usual case, an :class:`~sqlalchemy.sql.expression.Insert` statement is usually compiled against the parameters sent to the ``execute()`` method on :class:`~sqlalchemy.engine.Connection`, so that there's no need to use the ``values`` keyword with :class:`~sqlalchemy.sql.expression.Insert`. Lets create a generic :class:`~sqlalchemy.sql.expression.Insert` statement again and use it in the "normal" way: .. sourcecode:: pycon+sql >>> ins = users.insert() >>> conn.execute(ins, id=2, name='wendy', fullname='Wendy Williams') # doctest: +ELLIPSIS {opensql}INSERT INTO users (id, name, fullname) VALUES (?, ?, ?) (2, 'wendy', 'Wendy Williams') COMMIT {stop} Above, because we specified all three columns in the ``execute()`` method, the compiled :class:`~.expression.Insert` included all three columns. The :class:`~.expression.Insert` statement is compiled at execution time based on the parameters we specified; if we specified fewer parameters, the :class:`~.expression.Insert` would have fewer entries in its VALUES clause. To issue many inserts using DBAPI's ``executemany()`` method, we can send in a list of dictionaries each containing a distinct set of parameters to be inserted, as we do here to add some email addresses: .. sourcecode:: pycon+sql >>> conn.execute(addresses.insert(), [ # doctest: +ELLIPSIS ... {'user_id': 1, 'email_address' : 'jack@yahoo.com'}, ... {'user_id': 1, 'email_address' : 'jack@msn.com'}, ... {'user_id': 2, 'email_address' : 'www@www.org'}, ... {'user_id': 2, 'email_address' : 'wendy@aol.com'}, ... ]) {opensql}INSERT INTO addresses (user_id, email_address) VALUES (?, ?) ((1, 'jack@yahoo.com'), (1, 'jack@msn.com'), (2, 'www@www.org'), (2, 'wendy@aol.com')) COMMIT {stop} Above, we again relied upon SQLite's automatic generation of primary key identifiers for each ``addresses`` row. When executing multiple sets of parameters, each dictionary must have the **same** set of keys; i.e. you cant have fewer keys in some dictionaries than others. This is because the :class:`~sqlalchemy.sql.expression.Insert` statement is compiled against the **first** dictionary in the list, and it's assumed that all subsequent argument dictionaries are compatible with that statement. .. _coretutorial_selecting: Selecting ========== We began with inserts just so that our test database had some data in it. The more interesting part of the data is selecting it ! We'll cover UPDATE and DELETE statements later. The primary construct used to generate SELECT statements is the :func:`.select` function: .. sourcecode:: pycon+sql >>> from sqlalchemy.sql import select >>> s = select([users]) >>> result = conn.execute(s) # doctest: +NORMALIZE_WHITESPACE {opensql}SELECT users.id, users.name, users.fullname FROM users () Above, we issued a basic :func:`.select` call, placing the ``users`` table within the COLUMNS clause of the select, and then executing. SQLAlchemy expanded the ``users`` table into the set of each of its columns, and also generated a FROM clause for us. The result returned is again a :class:`~sqlalchemy.engine.ResultProxy` object, which acts much like a DBAPI cursor, including methods such as :func:`~sqlalchemy.engine.ResultProxy.fetchone` and :func:`~sqlalchemy.engine.ResultProxy.fetchall`. The easiest way to get rows from it is to just iterate: .. sourcecode:: pycon+sql >>> for row in result: ... print row (1, u'jack', u'Jack Jones') (2, u'wendy', u'Wendy Williams') Above, we see that printing each row produces a simple tuple-like result. We have more options at accessing the data in each row. One very common way is through dictionary access, using the string names of columns: .. sourcecode:: pycon+sql {sql}>>> result = conn.execute(s) # doctest: +NORMALIZE_WHITESPACE SELECT users.id, users.name, users.fullname FROM users () {stop}>>> row = result.fetchone() >>> print "name:", row['name'], "; fullname:", row['fullname'] name: jack ; fullname: Jack Jones Integer indexes work as well: .. sourcecode:: pycon+sql >>> row = result.fetchone() >>> print "name:", row[1], "; fullname:", row[2] name: wendy ; fullname: Wendy Williams But another way, whose usefulness will become apparent later on, is to use the :class:`~sqlalchemy.schema.Column` objects directly as keys: .. sourcecode:: pycon+sql {sql}>>> for row in conn.execute(s): # doctest: +NORMALIZE_WHITESPACE ... print "name:", row[users.c.name], "; fullname:", row[users.c.fullname] SELECT users.id, users.name, users.fullname FROM users () {stop}name: jack ; fullname: Jack Jones name: wendy ; fullname: Wendy Williams Result sets which have pending rows remaining should be explicitly closed before discarding. While the cursor and connection resources referenced by the :class:`~sqlalchemy.engine.ResultProxy` will be respectively closed and returned to the connection pool when the object is garbage collected, it's better to make it explicit as some database APIs are very picky about such things: .. sourcecode:: pycon+sql >>> result.close() If we'd like to more carefully control the columns which are placed in the COLUMNS clause of the select, we reference individual :class:`~sqlalchemy.schema.Column` objects from our :class:`~sqlalchemy.schema.Table`. These are available as named attributes off the ``c`` attribute of the :class:`~sqlalchemy.schema.Table` object: .. sourcecode:: pycon+sql >>> s = select([users.c.name, users.c.fullname]) {sql}>>> result = conn.execute(s) # doctest: +NORMALIZE_WHITESPACE SELECT users.name, users.fullname FROM users () {stop}>>> for row in result: #doctest: +NORMALIZE_WHITESPACE ... print row (u'jack', u'Jack Jones') (u'wendy', u'Wendy Williams') Lets observe something interesting about the FROM clause. Whereas the generated statement contains two distinct sections, a "SELECT columns" part and a "FROM table" part, our :func:`.select` construct only has a list containing columns. How does this work ? Let's try putting *two* tables into our :func:`.select` statement: .. sourcecode:: pycon+sql {sql}>>> for row in conn.execute(select([users, addresses])): ... print row # doctest: +NORMALIZE_WHITESPACE SELECT users.id, users.name, users.fullname, addresses.id, addresses.user_id, addresses.email_address FROM users, addresses () {stop}(1, u'jack', u'Jack Jones', 1, 1, u'jack@yahoo.com') (1, u'jack', u'Jack Jones', 2, 1, u'jack@msn.com') (1, u'jack', u'Jack Jones', 3, 2, u'www@www.org') (1, u'jack', u'Jack Jones', 4, 2, u'wendy@aol.com') (2, u'wendy', u'Wendy Williams', 1, 1, u'jack@yahoo.com') (2, u'wendy', u'Wendy Williams', 2, 1, u'jack@msn.com') (2, u'wendy', u'Wendy Williams', 3, 2, u'www@www.org') (2, u'wendy', u'Wendy Williams', 4, 2, u'wendy@aol.com') It placed **both** tables into the FROM clause. But also, it made a real mess. Those who are familiar with SQL joins know that this is a **Cartesian product**; each row from the ``users`` table is produced against each row from the ``addresses`` table. So to put some sanity into this statement, we need a WHERE clause. We do that using :meth:`.Select.where`: .. sourcecode:: pycon+sql >>> s = select([users, addresses]).where(users.c.id == addresses.c.user_id) {sql}>>> for row in conn.execute(s): ... print row # doctest: +NORMALIZE_WHITESPACE SELECT users.id, users.name, users.fullname, addresses.id, addresses.user_id, addresses.email_address FROM users, addresses WHERE users.id = addresses.user_id () {stop}(1, u'jack', u'Jack Jones', 1, 1, u'jack@yahoo.com') (1, u'jack', u'Jack Jones', 2, 1, u'jack@msn.com') (2, u'wendy', u'Wendy Williams', 3, 2, u'www@www.org') (2, u'wendy', u'Wendy Williams', 4, 2, u'wendy@aol.com') So that looks a lot better, we added an expression to our :func:`.select` which had the effect of adding ``WHERE users.id = addresses.user_id`` to our statement, and our results were managed down so that the join of ``users`` and ``addresses`` rows made sense. But let's look at that expression? It's using just a Python equality operator between two different :class:`~sqlalchemy.schema.Column` objects. It should be clear that something is up. Saying ``1 == 1`` produces ``True``, and ``1 == 2`` produces ``False``, not a WHERE clause. So lets see exactly what that expression is doing: .. sourcecode:: pycon+sql >>> users.c.id == addresses.c.user_id #doctest: +ELLIPSIS Wow, surprise ! This is neither a ``True`` nor a ``False``. Well what is it ? .. sourcecode:: pycon+sql >>> str(users.c.id == addresses.c.user_id) 'users.id = addresses.user_id' As you can see, the ``==`` operator is producing an object that is very much like the :class:`~.expression.Insert` and :func:`.select` objects we've made so far, thanks to Python's ``__eq__()`` builtin; you call ``str()`` on it and it produces SQL. By now, one can see that everything we are working with is ultimately the same type of object. SQLAlchemy terms the base class of all of these expressions as :class:`~.expression.ColumnElement`. Operators ========== Since we've stumbled upon SQLAlchemy's operator paradigm, let's go through some of its capabilities. We've seen how to equate two columns to each other: .. sourcecode:: pycon+sql >>> print users.c.id == addresses.c.user_id users.id = addresses.user_id If we use a literal value (a literal meaning, not a SQLAlchemy clause object), we get a bind parameter: .. sourcecode:: pycon+sql >>> print users.c.id == 7 users.id = :id_1 The ``7`` literal is embedded the resulting :class:`~.expression.ColumnElement`; we can use the same trick we did with the :class:`~sqlalchemy.sql.expression.Insert` object to see it: .. sourcecode:: pycon+sql >>> (users.c.id == 7).compile().params {u'id_1': 7} Most Python operators, as it turns out, produce a SQL expression here, like equals, not equals, etc.: .. sourcecode:: pycon+sql >>> print users.c.id != 7 users.id != :id_1 >>> # None converts to IS NULL >>> print users.c.name == None users.name IS NULL >>> # reverse works too >>> print 'fred' > users.c.name users.name < :name_1 If we add two integer columns together, we get an addition expression: .. sourcecode:: pycon+sql >>> print users.c.id + addresses.c.id users.id + addresses.id Interestingly, the type of the :class:`~sqlalchemy.schema.Column` is important! If we use ``+`` with two string based columns (recall we put types like :class:`~sqlalchemy.types.Integer` and :class:`~sqlalchemy.types.String` on our :class:`~sqlalchemy.schema.Column` objects at the beginning), we get something different: .. sourcecode:: pycon+sql >>> print users.c.name + users.c.fullname users.name || users.fullname Where ``||`` is the string concatenation operator used on most databases. But not all of them. MySQL users, fear not: .. sourcecode:: pycon+sql >>> print (users.c.name + users.c.fullname).\ ... compile(bind=create_engine('mysql://')) concat(users.name, users.fullname) The above illustrates the SQL that's generated for an :class:`~sqlalchemy.engine.Engine` that's connected to a MySQL database; the ``||`` operator now compiles as MySQL's ``concat()`` function. If you have come across an operator which really isn't available, you can always use the :meth:`.ColumnOperators.op` method; this generates whatever operator you need: .. sourcecode:: pycon+sql >>> print users.c.name.op('tiddlywinks')('foo') users.name tiddlywinks :name_1 This function can also be used to make bitwise operators explicit. For example:: somecolumn.op('&')(0xff) is a bitwise AND of the value in `somecolumn`. Operator Customization ----------------------- While :meth:`.ColumnOperators.op` is handy to get at a custom operator in a hurry, the Core supports fundamental customization and extension of the operator system at the type level. The behavior of existing operators can be modified on a per-type basis, and new operations can be defined which become available for all column expressions that are part of that particular type. See the section :ref:`types_operators` for a description. Conjunctions ============= We'd like to show off some of our operators inside of :func:`.select` constructs. But we need to lump them together a little more, so let's first introduce some conjunctions. Conjunctions are those little words like AND and OR that put things together. We'll also hit upon NOT. :func:`.and_`, :func:`.or_`, and :func:`.not_` can work from the corresponding functions SQLAlchemy provides (notice we also throw in a :meth:`~.ColumnOperators.like`): .. sourcecode:: pycon+sql >>> from sqlalchemy.sql import and_, or_, not_ >>> print and_( ... users.c.name.like('j%'), ... users.c.id == addresses.c.user_id, #doctest: +NORMALIZE_WHITESPACE ... or_( ... addresses.c.email_address == 'wendy@aol.com', ... addresses.c.email_address == 'jack@yahoo.com' ... ), ... not_(users.c.id > 5) ... ) users.name LIKE :name_1 AND users.id = addresses.user_id AND (addresses.email_address = :email_address_1 OR addresses.email_address = :email_address_2) AND users.id <= :id_1 And you can also use the re-jiggered bitwise AND, OR and NOT operators, although because of Python operator precedence you have to watch your parenthesis: .. sourcecode:: pycon+sql >>> print users.c.name.like('j%') & (users.c.id == addresses.c.user_id) & \ ... ( ... (addresses.c.email_address == 'wendy@aol.com') | \ ... (addresses.c.email_address == 'jack@yahoo.com') ... ) \ ... & ~(users.c.id>5) # doctest: +NORMALIZE_WHITESPACE users.name LIKE :name_1 AND users.id = addresses.user_id AND (addresses.email_address = :email_address_1 OR addresses.email_address = :email_address_2) AND users.id <= :id_1 So with all of this vocabulary, let's select all users who have an email address at AOL or MSN, whose name starts with a letter between "m" and "z", and we'll also generate a column containing their full name combined with their email address. We will add two new constructs to this statement, :meth:`~.ColumnOperators.between` and :meth:`~.ColumnElement.label`. :meth:`~.ColumnOperators.between` produces a BETWEEN clause, and :meth:`~.ColumnElement.label` is used in a column expression to produce labels using the ``AS`` keyword; it's recommended when selecting from expressions that otherwise would not have a name: .. sourcecode:: pycon+sql >>> s = select([(users.c.fullname + ... ", " + addresses.c.email_address). ... label('title')]).\ ... where( ... and_( ... users.c.id == addresses.c.user_id, ... users.c.name.between('m', 'z'), ... or_( ... addresses.c.email_address.like('%@aol.com'), ... addresses.c.email_address.like('%@msn.com') ... ) ... ) ... ) >>> conn.execute(s).fetchall() #doctest: +NORMALIZE_WHITESPACE SELECT users.fullname || ? || addresses.email_address AS title FROM users, addresses WHERE users.id = addresses.user_id AND users.name BETWEEN ? AND ? AND (addresses.email_address LIKE ? OR addresses.email_address LIKE ?) (', ', 'm', 'z', '%@aol.com', '%@msn.com') [(u'Wendy Williams, wendy@aol.com',)] Once again, SQLAlchemy figured out the FROM clause for our statement. In fact it will determine the FROM clause based on all of its other bits; the columns clause, the where clause, and also some other elements which we haven't covered yet, which include ORDER BY, GROUP BY, and HAVING. A shortcut to using :func:`.and_` is to chain together multiple :meth:`~.Select.where` clauses. The above can also be written as: .. sourcecode:: pycon+sql >>> s = select([(users.c.fullname + ... ", " + addresses.c.email_address). ... label('title')]).\ ... where(users.c.id == addresses.c.user_id).\ ... where(users.c.name.between('m', 'z')).\ ... where( ... or_( ... addresses.c.email_address.like('%@aol.com'), ... addresses.c.email_address.like('%@msn.com') ... ) ... ) >>> conn.execute(s).fetchall() #doctest: +NORMALIZE_WHITESPACE SELECT users.fullname || ? || addresses.email_address AS title FROM users, addresses WHERE users.id = addresses.user_id AND users.name BETWEEN ? AND ? AND (addresses.email_address LIKE ? OR addresses.email_address LIKE ?) (', ', 'm', 'z', '%@aol.com', '%@msn.com') [(u'Wendy Williams, wendy@aol.com',)] The way that we can build up a :func:`.select` construct through successive method calls is called :term:`method chaining`. .. _sqlexpression_text: Using Text =========== Our last example really became a handful to type. Going from what one understands to be a textual SQL expression into a Python construct which groups components together in a programmatic style can be hard. That's why SQLAlchemy lets you just use strings too. The :func:`~.expression.text` construct represents any textual statement, in a backend-agnostic way. To use bind parameters with :func:`~.expression.text`, always use the named colon format. Such as below, we create a :func:`~.expression.text` and execute it, feeding in the bind parameters to the :meth:`~.Connection.execute` method: .. sourcecode:: pycon+sql >>> from sqlalchemy.sql import text >>> s = text( ... "SELECT users.fullname || ', ' || addresses.email_address AS title " ... "FROM users, addresses " ... "WHERE users.id = addresses.user_id " ... "AND users.name BETWEEN :x AND :y " ... "AND (addresses.email_address LIKE :e1 " ... "OR addresses.email_address LIKE :e2)") {sql}>>> conn.execute(s, x='m', y='z', e1='%@aol.com', e2='%@msn.com').fetchall() # doctest:+NORMALIZE_WHITESPACE SELECT users.fullname || ', ' || addresses.email_address AS title FROM users, addresses WHERE users.id = addresses.user_id AND users.name BETWEEN ? AND ? AND (addresses.email_address LIKE ? OR addresses.email_address LIKE ?) ('m', 'z', '%@aol.com', '%@msn.com') {stop}[(u'Wendy Williams, wendy@aol.com',)] To gain a "hybrid" approach, the :func:`.select` construct accepts strings for most of its arguments. Below we combine the usage of strings with our constructed :func:`.select` object, by using the :func:`.select` object to structure the statement, and strings to provide all the content within the structure. For this example, SQLAlchemy is not given any :class:`~sqlalchemy.schema.Column` or :class:`~sqlalchemy.schema.Table` objects in any of its expressions, so it cannot generate a FROM clause. So we also use the :meth:`~.Select.select_from` method, which accepts a :class:`.FromClause` or string expression to be placed within the FROM clause: .. sourcecode:: pycon+sql >>> s = select([ ... "users.fullname || ', ' || addresses.email_address AS title" ... ]).\ ... where( ... and_( ... "users.id = addresses.user_id", ... "users.name BETWEEN 'm' AND 'z'", ... "(addresses.email_address LIKE :x OR addresses.email_address LIKE :y)" ... ) ... ).select_from('users, addresses') {sql}>>> conn.execute(s, x='%@aol.com', y='%@msn.com').fetchall() #doctest: +NORMALIZE_WHITESPACE SELECT users.fullname || ', ' || addresses.email_address AS title FROM users, addresses WHERE users.id = addresses.user_id AND users.name BETWEEN 'm' AND 'z' AND (addresses.email_address LIKE ? OR addresses.email_address LIKE ?) ('%@aol.com', '%@msn.com') {stop}[(u'Wendy Williams, wendy@aol.com',)] Going from constructed SQL to text, we lose some capabilities. We lose the capability for SQLAlchemy to compile our expression to a specific target database; above, our expression won't work with MySQL since it has no ``||`` construct. It also becomes more tedious for SQLAlchemy to be made aware of the datatypes in use; for example, if our bind parameters required UTF-8 encoding before going in, or conversion from a Python ``datetime`` into a string (as is required with SQLite), we would have to add extra information to our :func:`~.expression.text` construct. Similar issues arise on the result set side, where SQLAlchemy also performs type-specific data conversion in some cases; still more information can be added to :func:`~.expression.text` to work around this. But what we really lose from our statement is the ability to manipulate it, transform it, and analyze it. These features are critical when using the ORM, which makes heavy usage of relational transformations. To show off what we mean, we'll first introduce the ALIAS construct and the JOIN construct, just so we have some juicier bits to play with. Using Aliases ============== The alias in SQL corresponds to a "renamed" version of a table or SELECT statement, which occurs anytime you say "SELECT .. FROM sometable AS someothername". The ``AS`` creates a new name for the table. Aliases are a key construct as they allow any table or subquery to be referenced by a unique name. In the case of a table, this allows the same table to be named in the FROM clause multiple times. In the case of a SELECT statement, it provides a parent name for the columns represented by the statement, allowing them to be referenced relative to this name. In SQLAlchemy, any :class:`.Table`, :func:`.select` construct, or other selectable can be turned into an alias using the :meth:`.FromClause.alias` method, which produces a :class:`.Alias` construct. As an example, suppose we know that our user ``jack`` has two particular email addresses. How can we locate jack based on the combination of those two addresses? To accomplish this, we'd use a join to the ``addresses`` table, once for each address. We create two :class:`.Alias` constructs against ``addresses``, and then use them both within a :func:`.select` construct: .. sourcecode:: pycon+sql >>> a1 = addresses.alias() >>> a2 = addresses.alias() >>> s = select([users]).\ ... where(and_( ... users.c.id == a1.c.user_id, ... users.c.id == a2.c.user_id, ... a1.c.email_address == 'jack@msn.com', ... a2.c.email_address == 'jack@yahoo.com' ... )) {sql}>>> conn.execute(s).fetchall() # doctest: +NORMALIZE_WHITESPACE SELECT users.id, users.name, users.fullname FROM users, addresses AS addresses_1, addresses AS addresses_2 WHERE users.id = addresses_1.user_id AND users.id = addresses_2.user_id AND addresses_1.email_address = ? AND addresses_2.email_address = ? ('jack@msn.com', 'jack@yahoo.com') {stop}[(1, u'jack', u'Jack Jones')] Note that the :class:`.Alias` construct generated the names ``addresses_1`` and ``addresses_2`` in the final SQL result. The generation of these names is determined by the position of the construct within the statement. If we created a query using only the second ``a2`` alias, the name would come out as ``addresses_1``. The generation of the names is also *deterministic*, meaning the same SQLAlchemy statement construct will produce the identical SQL string each time it is rendered for a particular dialect. Since on the outside, we refer to the alias using the :class:`.Alias` construct itself, we don't need to be concerned about the generated name. However, for the purposes of debugging, it can be specified by passing a string name to the :meth:`.FromClause.alias` method:: >>> a1 = addresses.alias('a1') Aliases can of course be used for anything which you can SELECT from, including SELECT statements themselves. We can self-join the ``users`` table back to the :func:`.select` we've created by making an alias of the entire statement. The ``correlate(None)`` directive is to avoid SQLAlchemy's attempt to "correlate" the inner ``users`` table with the outer one: .. sourcecode:: pycon+sql >>> a1 = s.correlate(None).alias() >>> s = select([users.c.name]).where(users.c.id == a1.c.id) {sql}>>> conn.execute(s).fetchall() # doctest: +NORMALIZE_WHITESPACE SELECT users.name FROM users, (SELECT users.id AS id, users.name AS name, users.fullname AS fullname FROM users, addresses AS addresses_1, addresses AS addresses_2 WHERE users.id = addresses_1.user_id AND users.id = addresses_2.user_id AND addresses_1.email_address = ? AND addresses_2.email_address = ?) AS anon_1 WHERE users.id = anon_1.id ('jack@msn.com', 'jack@yahoo.com') {stop}[(u'jack',)] Using Joins ============ We're halfway along to being able to construct any SELECT expression. The next cornerstone of the SELECT is the JOIN expression. We've already been doing joins in our examples, by just placing two tables in either the columns clause or the where clause of the :func:`.select` construct. But if we want to make a real "JOIN" or "OUTERJOIN" construct, we use the :meth:`~.FromClause.join` and :meth:`~.FromClause.outerjoin` methods, most commonly accessed from the left table in the join: .. sourcecode:: pycon+sql >>> print users.join(addresses) users JOIN addresses ON users.id = addresses.user_id The alert reader will see more surprises; SQLAlchemy figured out how to JOIN the two tables ! The ON condition of the join, as it's called, was automatically generated based on the :class:`~sqlalchemy.schema.ForeignKey` object which we placed on the ``addresses`` table way at the beginning of this tutorial. Already the ``join()`` construct is looking like a much better way to join tables. Of course you can join on whatever expression you want, such as if we want to join on all users who use the same name in their email address as their username: .. sourcecode:: pycon+sql >>> print users.join(addresses, ... addresses.c.email_address.like(users.c.name + '%') ... ) users JOIN addresses ON addresses.email_address LIKE (users.name || :name_1) When we create a :func:`.select` construct, SQLAlchemy looks around at the tables we've mentioned and then places them in the FROM clause of the statement. When we use JOINs however, we know what FROM clause we want, so here we make use of the :meth:`~.Select.select_from` method: .. sourcecode:: pycon+sql >>> s = select([users.c.fullname]).select_from( ... users.join(addresses, ... addresses.c.email_address.like(users.c.name + '%')) ... ) {sql}>>> conn.execute(s).fetchall() # doctest: +NORMALIZE_WHITESPACE SELECT users.fullname FROM users JOIN addresses ON addresses.email_address LIKE (users.name || ?) ('%',) {stop}[(u'Jack Jones',), (u'Jack Jones',), (u'Wendy Williams',)] The :meth:`~.FromClause.outerjoin` method creates ``LEFT OUTER JOIN`` constructs, and is used in the same way as :meth:`~.FromClause.join`: .. sourcecode:: pycon+sql >>> s = select([users.c.fullname]).select_from(users.outerjoin(addresses)) >>> print s # doctest: +NORMALIZE_WHITESPACE SELECT users.fullname FROM users LEFT OUTER JOIN addresses ON users.id = addresses.user_id That's the output ``outerjoin()`` produces, unless, of course, you're stuck in a gig using Oracle prior to version 9, and you've set up your engine (which would be using ``OracleDialect``) to use Oracle-specific SQL: .. sourcecode:: pycon+sql >>> from sqlalchemy.dialects.oracle import dialect as OracleDialect >>> print s.compile(dialect=OracleDialect(use_ansi=False)) # doctest: +NORMALIZE_WHITESPACE SELECT users.fullname FROM users, addresses WHERE users.id = addresses.user_id(+) If you don't know what that SQL means, don't worry ! The secret tribe of Oracle DBAs don't want their black magic being found out ;). Everything Else ================ The concepts of creating SQL expressions have been introduced. What's left are more variants of the same themes. So now we'll catalog the rest of the important things we'll need to know. Bind Parameter Objects ---------------------- Throughout all these examples, SQLAlchemy is busy creating bind parameters wherever literal expressions occur. You can also specify your own bind parameters with your own names, and use the same statement repeatedly. The database dialect converts to the appropriate named or positional style, as here where it converts to positional for SQLite: .. sourcecode:: pycon+sql >>> from sqlalchemy.sql import bindparam >>> s = users.select(users.c.name == bindparam('username')) {sql}>>> conn.execute(s, username='wendy').fetchall() # doctest: +NORMALIZE_WHITESPACE SELECT users.id, users.name, users.fullname FROM users WHERE users.name = ? ('wendy',) {stop}[(2, u'wendy', u'Wendy Williams')] Another important aspect of bind parameters is that they may be assigned a type. The type of the bind parameter will determine its behavior within expressions and also how the data bound to it is processed before being sent off to the database: .. sourcecode:: pycon+sql >>> s = users.select(users.c.name.like(bindparam('username', type_=String) + text("'%'"))) {sql}>>> conn.execute(s, username='wendy').fetchall() # doctest: +NORMALIZE_WHITESPACE SELECT users.id, users.name, users.fullname FROM users WHERE users.name LIKE (? || '%') ('wendy',) {stop}[(2, u'wendy', u'Wendy Williams')] Bind parameters of the same name can also be used multiple times, where only a single named value is needed in the execute parameters: .. sourcecode:: pycon+sql >>> s = select([users, addresses]).\ ... where( ... or_( ... users.c.name.like( ... bindparam('name', type_=String) + text("'%'")), ... addresses.c.email_address.like( ... bindparam('name', type_=String) + text("'@%'")) ... ) ... ).\ ... select_from(users.outerjoin(addresses)).\ ... order_by(addresses.c.id) {sql}>>> conn.execute(s, name='jack').fetchall() # doctest: +NORMALIZE_WHITESPACE SELECT users.id, users.name, users.fullname, addresses.id, addresses.user_id, addresses.email_address FROM users LEFT OUTER JOIN addresses ON users.id = addresses.user_id WHERE users.name LIKE (? || '%') OR addresses.email_address LIKE (? || '@%') ORDER BY addresses.id ('jack', 'jack') {stop}[(1, u'jack', u'Jack Jones', 1, 1, u'jack@yahoo.com'), (1, u'jack', u'Jack Jones', 2, 1, u'jack@msn.com')] Functions --------- SQL functions are created using the :data:`~.expression.func` keyword, which generates functions using attribute access: .. sourcecode:: pycon+sql >>> from sqlalchemy.sql import func >>> print func.now() now() >>> print func.concat('x', 'y') concat(:param_1, :param_2) By "generates", we mean that **any** SQL function is created based on the word you choose:: >>> print func.xyz_my_goofy_function() # doctest: +NORMALIZE_WHITESPACE xyz_my_goofy_function() Certain function names are known by SQLAlchemy, allowing special behavioral rules to be applied. Some for example are "ANSI" functions, which mean they don't get the parenthesis added after them, such as CURRENT_TIMESTAMP: .. sourcecode:: pycon+sql >>> print func.current_timestamp() CURRENT_TIMESTAMP Functions are most typically used in the columns clause of a select statement, and can also be labeled as well as given a type. Labeling a function is recommended so that the result can be targeted in a result row based on a string name, and assigning it a type is required when you need result-set processing to occur, such as for Unicode conversion and date conversions. Below, we use the result function ``scalar()`` to just read the first column of the first row and then close the result; the label, even though present, is not important in this case: .. sourcecode:: pycon+sql >>> conn.execute( ... select([ ... func.max(addresses.c.email_address, type_=String). ... label('maxemail') ... ]) ... ).scalar() # doctest: +NORMALIZE_WHITESPACE {opensql}SELECT max(addresses.email_address) AS maxemail FROM addresses () {stop}u'www@www.org' Databases such as PostgreSQL and Oracle which support functions that return whole result sets can be assembled into selectable units, which can be used in statements. Such as, a database function ``calculate()`` which takes the parameters ``x`` and ``y``, and returns three columns which we'd like to name ``q``, ``z`` and ``r``, we can construct using "lexical" column objects as well as bind parameters: .. sourcecode:: pycon+sql >>> from sqlalchemy.sql import column >>> calculate = select([column('q'), column('z'), column('r')]).\ ... select_from( ... func.calculate( ... bindparam('x'), ... bindparam('y') ... ) ... ) >>> calc = calculate.alias() >>> print select([users]).where(users.c.id > calc.c.z) # doctest: +NORMALIZE_WHITESPACE SELECT users.id, users.name, users.fullname FROM users, (SELECT q, z, r FROM calculate(:x, :y)) AS anon_1 WHERE users.id > anon_1.z If we wanted to use our ``calculate`` statement twice with different bind parameters, the :func:`~sqlalchemy.sql.expression.ClauseElement.unique_params` function will create copies for us, and mark the bind parameters as "unique" so that conflicting names are isolated. Note we also make two separate aliases of our selectable: .. sourcecode:: pycon+sql >>> calc1 = calculate.alias('c1').unique_params(x=17, y=45) >>> calc2 = calculate.alias('c2').unique_params(x=5, y=12) >>> s = select([users]).\ ... where(users.c.id.between(calc1.c.z, calc2.c.z)) >>> print s # doctest: +NORMALIZE_WHITESPACE SELECT users.id, users.name, users.fullname FROM users, (SELECT q, z, r FROM calculate(:x_1, :y_1)) AS c1, (SELECT q, z, r FROM calculate(:x_2, :y_2)) AS c2 WHERE users.id BETWEEN c1.z AND c2.z >>> s.compile().params {u'x_2': 5, u'y_2': 12, u'y_1': 45, u'x_1': 17} Window Functions ----------------- Any :class:`.FunctionElement`, including functions generated by :data:`~.expression.func`, can be turned into a "window function", that is an OVER clause, using the :meth:`~.FunctionElement.over` method: .. sourcecode:: pycon+sql >>> s = select([ ... users.c.id, ... func.row_number().over(order_by=users.c.name) ... ]) >>> print s # doctest: +NORMALIZE_WHITESPACE SELECT users.id, row_number() OVER (ORDER BY users.name) AS anon_1 FROM users Unions and Other Set Operations ------------------------------- Unions come in two flavors, UNION and UNION ALL, which are available via module level functions :func:`~.expression.union` and :func:`~.expression.union_all`: .. sourcecode:: pycon+sql >>> from sqlalchemy.sql import union >>> u = union( ... addresses.select(). ... where(addresses.c.email_address == 'foo@bar.com'), ... addresses.select(). ... where(addresses.c.email_address.like('%@yahoo.com')), ... ).order_by(addresses.c.email_address) {sql}>>> conn.execute(u).fetchall() # doctest: +NORMALIZE_WHITESPACE SELECT addresses.id, addresses.user_id, addresses.email_address FROM addresses WHERE addresses.email_address = ? UNION SELECT addresses.id, addresses.user_id, addresses.email_address FROM addresses WHERE addresses.email_address LIKE ? ORDER BY addresses.email_address ('foo@bar.com', '%@yahoo.com') {stop}[(1, 1, u'jack@yahoo.com')] Also available, though not supported on all databases, are :func:`~.expression.intersect`, :func:`~.expression.intersect_all`, :func:`~.expression.except_`, and :func:`~.expression.except_all`: .. sourcecode:: pycon+sql >>> from sqlalchemy.sql import except_ >>> u = except_( ... addresses.select(). ... where(addresses.c.email_address.like('%@%.com')), ... addresses.select(). ... where(addresses.c.email_address.like('%@msn.com')) ... ) {sql}>>> conn.execute(u).fetchall() # doctest: +NORMALIZE_WHITESPACE SELECT addresses.id, addresses.user_id, addresses.email_address FROM addresses WHERE addresses.email_address LIKE ? EXCEPT SELECT addresses.id, addresses.user_id, addresses.email_address FROM addresses WHERE addresses.email_address LIKE ? ('%@%.com', '%@msn.com') {stop}[(1, 1, u'jack@yahoo.com'), (4, 2, u'wendy@aol.com')] A common issue with so-called "compound" selectables arises due to the fact that they nest with parenthesis. SQLite in particular doesn't like a statement that starts with parenthesis. So when nesting a "compound" inside a "compound", it's often necessary to apply ``.alias().select()`` to the first element of the outermost compound, if that element is also a compound. For example, to nest a "union" and a "select" inside of "except\_", SQLite will want the "union" to be stated as a subquery: .. sourcecode:: pycon+sql >>> u = except_( ... union( ... addresses.select(). ... where(addresses.c.email_address.like('%@yahoo.com')), ... addresses.select(). ... where(addresses.c.email_address.like('%@msn.com')) ... ).alias().select(), # apply subquery here ... addresses.select(addresses.c.email_address.like('%@msn.com')) ... ) {sql}>>> conn.execute(u).fetchall() # doctest: +NORMALIZE_WHITESPACE SELECT anon_1.id, anon_1.user_id, anon_1.email_address FROM (SELECT addresses.id AS id, addresses.user_id AS user_id, addresses.email_address AS email_address FROM addresses WHERE addresses.email_address LIKE ? UNION SELECT addresses.id AS id, addresses.user_id AS user_id, addresses.email_address AS email_address FROM addresses WHERE addresses.email_address LIKE ?) AS anon_1 EXCEPT SELECT addresses.id, addresses.user_id, addresses.email_address FROM addresses WHERE addresses.email_address LIKE ? ('%@yahoo.com', '%@msn.com', '%@msn.com') {stop}[(1, 1, u'jack@yahoo.com')] .. _scalar_selects: Scalar Selects -------------- A scalar select is a SELECT that returns exactly one row and one column. It can then be used as a column expression. A scalar select is often a :term:`correlated subquery`, which relies upon the enclosing SELECT statement in order to acquire at least one of its FROM clauses. The :func:`.select` construct can be modified to act as a column expression by calling either the :meth:`~.SelectBase.as_scalar` or :meth:`~.SelectBase.label` method: .. sourcecode:: pycon+sql >>> stmt = select([func.count(addresses.c.id)]).\ ... where(users.c.id == addresses.c.user_id).\ ... as_scalar() The above construct is now a :class:`~.expression.ScalarSelect` object, and is no longer part of the :class:`~.expression.FromClause` hierarchy; it instead is within the :class:`~.expression.ColumnElement` family of expression constructs. We can place this construct the same as any other column within another :func:`.select`: .. sourcecode:: pycon+sql >>> conn.execute(select([users.c.name, stmt])).fetchall() # doctest: +NORMALIZE_WHITESPACE {opensql}SELECT users.name, (SELECT count(addresses.id) AS count_1 FROM addresses WHERE users.id = addresses.user_id) AS anon_1 FROM users () {stop}[(u'jack', 2), (u'wendy', 2)] To apply a non-anonymous column name to our scalar select, we create it using :meth:`.SelectBase.label` instead: .. sourcecode:: pycon+sql >>> stmt = select([func.count(addresses.c.id)]).\ ... where(users.c.id == addresses.c.user_id).\ ... label("address_count") >>> conn.execute(select([users.c.name, stmt])).fetchall() # doctest: +NORMALIZE_WHITESPACE {opensql}SELECT users.name, (SELECT count(addresses.id) AS count_1 FROM addresses WHERE users.id = addresses.user_id) AS address_count FROM users () {stop}[(u'jack', 2), (u'wendy', 2)] .. _correlated_subqueries: Correlated Subqueries --------------------- Notice in the examples on :ref:`scalar_selects`, the FROM clause of each embedded select did not contain the ``users`` table in its FROM clause. This is because SQLAlchemy automatically :term:`correlates` embedded FROM objects to that of an enclosing query, if present, and if the inner SELECT statement would still have at least one FROM clause of its own. For example: .. sourcecode:: pycon+sql >>> stmt = select([addresses.c.user_id]).\ ... where(addresses.c.user_id == users.c.id).\ ... where(addresses.c.email_address == 'jack@yahoo.com') >>> enclosing_stmt = select([users.c.name]).where(users.c.id == stmt) >>> conn.execute(enclosing_stmt).fetchall() # doctest: +NORMALIZE_WHITESPACE {opensql}SELECT users.name FROM users WHERE users.id = (SELECT addresses.user_id FROM addresses WHERE addresses.user_id = users.id AND addresses.email_address = ?) ('jack@yahoo.com',) {stop}[(u'jack',)] Auto-correlation will usually do what's expected, however it can also be controlled. For example, if we wanted a statement to correlate only to the ``addresses`` table but not the ``users`` table, even if both were present in the enclosing SELECT, we use the :meth:`~.Select.correlate` method to specify those FROM clauses that may be correlated: .. sourcecode:: pycon+sql >>> stmt = select([users.c.id]).\ ... where(users.c.id == addresses.c.user_id).\ ... where(users.c.name == 'jack').\ ... correlate(addresses) >>> enclosing_stmt = select( ... [users.c.name, addresses.c.email_address]).\ ... select_from(users.join(addresses)).\ ... where(users.c.id == stmt) >>> conn.execute(enclosing_stmt).fetchall() # doctest: +NORMALIZE_WHITESPACE {opensql}SELECT users.name, addresses.email_address FROM users JOIN addresses ON users.id = addresses.user_id WHERE users.id = (SELECT users.id FROM users WHERE users.id = addresses.user_id AND users.name = ?) ('jack',) {stop}[(u'jack', u'jack@yahoo.com'), (u'jack', u'jack@msn.com')] To entirely disable a statement from correlating, we can pass ``None`` as the argument: .. sourcecode:: pycon+sql >>> stmt = select([users.c.id]).\ ... where(users.c.name == 'wendy').\ ... correlate(None) >>> enclosing_stmt = select([users.c.name]).\ ... where(users.c.id == stmt) >>> conn.execute(enclosing_stmt).fetchall() # doctest: +NORMALIZE_WHITESPACE {opensql}SELECT users.name FROM users WHERE users.id = (SELECT users.id FROM users WHERE users.name = ?) ('wendy',) {stop}[(u'wendy',)] We can also control correlation via exclusion, using the :meth:`.Select.correlate_except` method. Such as, we can write our SELECT for the ``users`` table by telling it to correlate all FROM clauses except for ``users``: .. sourcecode:: pycon+sql >>> stmt = select([users.c.id]).\ ... where(users.c.id == addresses.c.user_id).\ ... where(users.c.name == 'jack').\ ... correlate_except(users) >>> enclosing_stmt = select( ... [users.c.name, addresses.c.email_address]).\ ... select_from(users.join(addresses)).\ ... where(users.c.id == stmt) >>> conn.execute(enclosing_stmt).fetchall() # doctest: +NORMALIZE_WHITESPACE {opensql}SELECT users.name, addresses.email_address FROM users JOIN addresses ON users.id = addresses.user_id WHERE users.id = (SELECT users.id FROM users WHERE users.id = addresses.user_id AND users.name = ?) ('jack',) {stop}[(u'jack', u'jack@yahoo.com'), (u'jack', u'jack@msn.com')] Ordering, Grouping, Limiting, Offset...ing... --------------------------------------------- Ordering is done by passing column expressions to the :meth:`~.SelectBase.order_by` method: .. sourcecode:: pycon+sql >>> stmt = select([users.c.name]).order_by(users.c.name) >>> conn.execute(stmt).fetchall() # doctest: +NORMALIZE_WHITESPACE {opensql}SELECT users.name FROM users ORDER BY users.name () {stop}[(u'jack',), (u'wendy',)] Ascending or descending can be controlled using the :meth:`~.ColumnElement.asc` and :meth:`~.ColumnElement.desc` modifiers: .. sourcecode:: pycon+sql >>> stmt = select([users.c.name]).order_by(users.c.name.desc()) >>> conn.execute(stmt).fetchall() # doctest: +NORMALIZE_WHITESPACE {opensql}SELECT users.name FROM users ORDER BY users.name DESC () {stop}[(u'wendy',), (u'jack',)] Grouping refers to the GROUP BY clause, and is usually used in conjunction with aggregate functions to establish groups of rows to be aggregated. This is provided via the :meth:`~.SelectBase.group_by` method: .. sourcecode:: pycon+sql >>> stmt = select([users.c.name, func.count(addresses.c.id)]).\ ... select_from(users.join(addresses)).\ ... group_by(users.c.name) >>> conn.execute(stmt).fetchall() # doctest: +NORMALIZE_WHITESPACE {opensql}SELECT users.name, count(addresses.id) AS count_1 FROM users JOIN addresses ON users.id = addresses.user_id GROUP BY users.name () {stop}[(u'jack', 2), (u'wendy', 2)] HAVING can be used to filter results on an aggregate value, after GROUP BY has been applied. It's available here via the :meth:`~.Select.having` method: .. sourcecode:: pycon+sql >>> stmt = select([users.c.name, func.count(addresses.c.id)]).\ ... select_from(users.join(addresses)).\ ... group_by(users.c.name).\ ... having(func.length(users.c.name) > 4) >>> conn.execute(stmt).fetchall() # doctest: +NORMALIZE_WHITESPACE {opensql}SELECT users.name, count(addresses.id) AS count_1 FROM users JOIN addresses ON users.id = addresses.user_id GROUP BY users.name HAVING length(users.name) > ? (4,) {stop}[(u'wendy', 2)] A common system of dealing with duplicates in composed SELECT statments is the DISTINCT modifier. A simple DISTINCT clause can be added using the :meth:`.Select.distinct` method: .. sourcecode:: pycon+sql >>> stmt = select([users.c.name]).\ ... where(addresses.c.email_address. ... contains(users.c.name)).\ ... distinct() >>> conn.execute(stmt).fetchall() # doctest: +NORMALIZE_WHITESPACE {opensql}SELECT DISTINCT users.name FROM users, addresses WHERE addresses.email_address LIKE '%%' || users.name || '%%' () {stop}[(u'jack',), (u'wendy',)] Most database backends support a system of limiting how many rows are returned, and the majority also feature a means of starting to return rows after a given "offset". While common backends like Postgresql, MySQL and SQLite support LIMIT and OFFSET keywords, other backends need to refer to more esoteric features such as "window functions" and row ids to achieve the same effect. The :meth:`~.Select.limit` and :meth:`~.Select.offset` methods provide an easy abstraction into the current backend's methodology: .. sourcecode:: pycon+sql >>> stmt = select([users.c.name, addresses.c.email_address]).\ ... select_from(users.join(addresses)).\ ... limit(1).offset(1) >>> conn.execute(stmt).fetchall() # doctest: +NORMALIZE_WHITESPACE {opensql}SELECT users.name, addresses.email_address FROM users JOIN addresses ON users.id = addresses.user_id LIMIT ? OFFSET ? (1, 1) {stop}[(u'jack', u'jack@msn.com')] .. _inserts_and_updates: Inserts, Updates and Deletes ============================ We've seen :meth:`~.TableClause.insert` demonstrated earlier in this tutorial. Where :meth:`~.TableClause.insert` prodces INSERT, the :meth:`~.TableClause.update` method produces UPDATE. Both of these constructs feature a method called :meth:`~.ValuesBase.values` which specifies the VALUES or SET clause of the statement. The :meth:`~.ValuesBase.values` method accommodates any column expression as a value: .. sourcecode:: pycon+sql >>> stmt = users.update().\ ... values(fullname="Fullname: " + users.c.name) >>> conn.execute(stmt) #doctest: +ELLIPSIS {opensql}UPDATE users SET fullname=(? || users.name) ('Fullname: ',) COMMIT {stop} When using :meth:`~.TableClause.insert` or :meth:`~.TableClause.update` in an "execute many" context, we may also want to specify named bound parameters which we can refer to in the argument list. The two constructs will automatically generate bound placeholders for any column names passed in the dictionaries sent to :meth:`~.Connection.execute` at execution time. However, if we wish to use explicitly targeted named parameters with composed expressions, we need to use the :func:`~.expression.bindparam` construct. When using :func:`~.expression.bindparam` with :meth:`~.TableClause.insert` or :meth:`~.TableClause.update`, the names of the table's columns themselves are reserved for the "automatic" generation of bind names. We can combine the usage of implicitly available bind names and explicitly named parameters as in the example below: .. sourcecode:: pycon+sql >>> stmt = users.insert().\ ... values(name=bindparam('_name') + " .. name") >>> conn.execute(stmt, [ # doctest: +ELLIPSIS ... {'id':4, '_name':'name1'}, ... {'id':5, '_name':'name2'}, ... {'id':6, '_name':'name3'}, ... ]) {opensql}INSERT INTO users (id, name) VALUES (?, (? || ?)) ((4, 'name1', ' .. name'), (5, 'name2', ' .. name'), (6, 'name3', ' .. name')) COMMIT An UPDATE statement is emitted using the :meth:`~.TableClause.update` construct. This works much like an INSERT, except there is an additional WHERE clause that can be specified: .. sourcecode:: pycon+sql >>> stmt = users.update().\ ... where(users.c.name == 'jack').\ ... values(name='ed') >>> conn.execute(stmt) #doctest: +ELLIPSIS {opensql}UPDATE users SET name=? WHERE users.name = ? ('ed', 'jack') COMMIT {stop} When using :meth:`~.TableClause.update` in an "execute many" context, we may wish to also use explicitly named bound parameters in the WHERE clause. Again, :func:`~.expression.bindparam` is the construct used to achieve this: .. sourcecode:: pycon+sql >>> stmt = users.update().\ ... where(users.c.name == bindparam('oldname')).\ ... values(name=bindparam('newname')) >>> conn.execute(stmt, [ ... {'oldname':'jack', 'newname':'ed'}, ... {'oldname':'wendy', 'newname':'mary'}, ... {'oldname':'jim', 'newname':'jake'}, ... ]) #doctest: +ELLIPSIS {opensql}UPDATE users SET name=? WHERE users.name = ? (('ed', 'jack'), ('mary', 'wendy'), ('jake', 'jim')) COMMIT {stop} Correlated Updates ------------------ A correlated update lets you update a table using selection from another table, or the same table: .. sourcecode:: pycon+sql >>> stmt = select([addresses.c.email_address]).\ ... where(addresses.c.user_id == users.c.id).\ ... limit(1) >>> conn.execute(users.update().values(fullname=stmt)) #doctest: +ELLIPSIS,+NORMALIZE_WHITESPACE {opensql}UPDATE users SET fullname=(SELECT addresses.email_address FROM addresses WHERE addresses.user_id = users.id LIMIT ? OFFSET ?) (1, 0) COMMIT {stop} .. _multi_table_updates: Multiple Table Updates ---------------------- .. versionadded:: 0.7.4 The Postgresql, Microsoft SQL Server, and MySQL backends all support UPDATE statements that refer to multiple tables. For PG and MSSQL, this is the "UPDATE FROM" syntax, which updates one table at a time, but can reference additional tables in an additional "FROM" clause that can then be referenced in the WHERE clause directly. On MySQL, multiple tables can be embedded into a single UPDATE statement separated by a comma. The SQLAlchemy :func:`.update` construct supports both of these modes implicitly, by specifying multiple tables in the WHERE clause:: stmt = users.update().\ values(name='ed wood').\ where(users.c.id == addresses.c.id).\ where(addresses.c.email_address.startswith('ed%')) conn.execute(stmt) The resulting SQL from the above statement would render as:: UPDATE users SET name=:name FROM addresses WHERE users.id = addresses.id AND addresses.email_address LIKE :email_address_1 || '%%' When using MySQL, columns from each table can be assigned to in the SET clause directly, using the dictionary form passed to :meth:`.Update.values`:: stmt = users.update().\ values({ users.c.name:'ed wood', addresses.c.email_address:'ed.wood@foo.com' }).\ where(users.c.id == addresses.c.id).\ where(addresses.c.email_address.startswith('ed%')) The tables are referenced explicitly in the SET clause:: UPDATE users, addresses SET addresses.email_address=%s, users.name=%s WHERE users.id = addresses.id AND addresses.email_address LIKE concat(%s, '%%') SQLAlchemy doesn't do anything special when these constructs are used on a non-supporting database. The ``UPDATE FROM`` syntax generates by default when multiple tables are present, and the statement will be rejected by the database if this syntax is not supported. .. _deletes: Deletes ------- Finally, a delete. This is accomplished easily enough using the :meth:`~.TableClause.delete` construct: .. sourcecode:: pycon+sql >>> conn.execute(addresses.delete()) #doctest: +ELLIPSIS {opensql}DELETE FROM addresses () COMMIT {stop} >>> conn.execute(users.delete().where(users.c.name > 'm')) #doctest: +ELLIPSIS {opensql}DELETE FROM users WHERE users.name > ? ('m',) COMMIT {stop} Matched Row Counts ------------------ Both of :meth:`~.TableClause.update` and :meth:`~.TableClause.delete` are associated with *matched row counts*. This is a number indicating the number of rows that were matched by the WHERE clause. Note that by "matched", this includes rows where no UPDATE actually took place. The value is available as :attr:`~.ResultProxy.rowcount`: .. sourcecode:: pycon+sql >>> result = conn.execute(users.delete()) #doctest: +ELLIPSIS {opensql}DELETE FROM users () COMMIT {stop}>>> result.rowcount 1 Further Reference ================== Expression Language Reference: :ref:`expression_api_toplevel` Database Metadata Reference: :ref:`metadata_toplevel` Engine Reference: :doc:`/core/engines` Connection Reference: :ref:`connections_toplevel` Types Reference: :ref:`types_toplevel` SQLAlchemy-0.8.4/doc/build/core/types.rst0000644000076500000240000005657312251150015020733 0ustar classicstaff00000000000000.. _types_toplevel: Column and Data Types ===================== .. module:: sqlalchemy.types SQLAlchemy provides abstractions for most common database data types, and a mechanism for specifying your own custom data types. The methods and attributes of type objects are rarely used directly. Type objects are supplied to :class:`~sqlalchemy.Table` definitions and can be supplied as type hints to `functions` for occasions where the database driver returns an incorrect type. .. code-block:: pycon >>> users = Table('users', metadata, ... Column('id', Integer, primary_key=True) ... Column('login', String(32)) ... ) SQLAlchemy will use the ``Integer`` and ``String(32)`` type information when issuing a ``CREATE TABLE`` statement and will use it again when reading back rows ``SELECTed`` from the database. Functions that accept a type (such as :func:`~sqlalchemy.Column`) will typically accept a type class or instance; ``Integer`` is equivalent to ``Integer()`` with no construction arguments in this case. .. _types_generic: Generic Types ------------- Generic types specify a column that can read, write and store a particular type of Python data. SQLAlchemy will choose the best database column type available on the target database when issuing a ``CREATE TABLE`` statement. For complete control over which column type is emitted in ``CREATE TABLE``, such as ``VARCHAR`` see `SQL Standard Types`_ and the other sections of this chapter. .. autoclass:: BigInteger :members: .. autoclass:: Boolean :members: .. autoclass:: Date :members: .. autoclass:: DateTime :members: .. autoclass:: Enum :members: __init__, create, drop .. autoclass:: Float :members: .. autoclass:: Integer :members: .. autoclass:: Interval :members: .. autoclass:: LargeBinary :members: .. autoclass:: Numeric :members: .. autoclass:: PickleType :members: .. autoclass:: SchemaType :members: :undoc-members: .. autoclass:: SmallInteger :members: .. autoclass:: String :members: .. autoclass:: Text :members: .. autoclass:: Time :members: .. autoclass:: Unicode :members: .. autoclass:: UnicodeText :members: .. _types_sqlstandard: SQL Standard Types ------------------ The SQL standard types always create database column types of the same name when ``CREATE TABLE`` is issued. Some types may not be supported on all databases. .. autoclass:: BIGINT .. autoclass:: BINARY .. autoclass:: BLOB .. autoclass:: BOOLEAN .. autoclass:: CHAR .. autoclass:: CLOB .. autoclass:: DATE .. autoclass:: DATETIME .. autoclass:: DECIMAL .. autoclass:: FLOAT .. autoclass:: INT .. autoclass:: sqlalchemy.types.INTEGER .. autoclass:: NCHAR .. autoclass:: NVARCHAR .. autoclass:: NUMERIC .. autoclass:: REAL .. autoclass:: SMALLINT .. autoclass:: TEXT .. autoclass:: TIME .. autoclass:: TIMESTAMP .. autoclass:: VARBINARY .. autoclass:: VARCHAR .. _types_vendor: Vendor-Specific Types --------------------- Database-specific types are also available for import from each database's dialect module. See the :ref:`dialect_toplevel` reference for the database you're interested in. For example, MySQL has a ``BIGINT`` type and PostgreSQL has an ``INET`` type. To use these, import them from the module explicitly:: from sqlalchemy.dialects import mysql table = Table('foo', metadata, Column('id', mysql.BIGINT), Column('enumerates', mysql.ENUM('a', 'b', 'c')) ) Or some PostgreSQL types:: from sqlalchemy.dialects import postgresql table = Table('foo', metadata, Column('ipaddress', postgresql.INET), Column('elements', postgresql.ARRAY(String)) ) Each dialect provides the full set of typenames supported by that backend within its `__all__` collection, so that a simple `import *` or similar will import all supported types as implemented for that backend:: from sqlalchemy.dialects.postgresql import * t = Table('mytable', metadata, Column('id', INTEGER, primary_key=True), Column('name', VARCHAR(300)), Column('inetaddr', INET) ) Where above, the INTEGER and VARCHAR types are ultimately from sqlalchemy.types, and INET is specific to the Postgresql dialect. Some dialect level types have the same name as the SQL standard type, but also provide additional arguments. For example, MySQL implements the full range of character and string types including additional arguments such as `collation` and `charset`:: from sqlalchemy.dialects.mysql import VARCHAR, TEXT table = Table('foo', meta, Column('col1', VARCHAR(200, collation='binary')), Column('col2', TEXT(charset='latin1')) ) .. _types_custom: Custom Types ------------ A variety of methods exist to redefine the behavior of existing types as well as to provide new ones. Overriding Type Compilation ~~~~~~~~~~~~~~~~~~~~~~~~~~~ A frequent need is to force the "string" version of a type, that is the one rendered in a CREATE TABLE statement or other SQL function like CAST, to be changed. For example, an application may want to force the rendering of ``BINARY`` for all platforms except for one, in which is wants ``BLOB`` to be rendered. Usage of an existing generic type, in this case :class:`.LargeBinary`, is preferred for most use cases. But to control types more accurately, a compilation directive that is per-dialect can be associated with any type:: from sqlalchemy.ext.compiler import compiles from sqlalchemy.types import BINARY @compiles(BINARY, "sqlite") def compile_binary_sqlite(type_, compiler, **kw): return "BLOB" The above code allows the usage of :class:`.types.BINARY`, which will produce the string ``BINARY`` against all backends except SQLite, in which case it will produce ``BLOB``. See the section :ref:`type_compilation_extension`, a subsection of :ref:`sqlalchemy.ext.compiler_toplevel`, for additional examples. .. _types_typedecorator: Augmenting Existing Types ~~~~~~~~~~~~~~~~~~~~~~~~~ The :class:`.TypeDecorator` allows the creation of custom types which add bind-parameter and result-processing behavior to an existing type object. It is used when additional in-Python marshaling of data to and from the database is required. .. note:: The bind- and result-processing of :class:`.TypeDecorator` is *in addition* to the processing already performed by the hosted type, which is customized by SQLAlchemy on a per-DBAPI basis to perform processing specific to that DBAPI. To change the DBAPI-level processing for an existing type, see the section :ref:`replacing_processors`. .. autoclass:: TypeDecorator :members: :inherited-members: TypeDecorator Recipes ~~~~~~~~~~~~~~~~~~~~~ A few key :class:`.TypeDecorator` recipes follow. .. _coerce_to_unicode: Coercing Encoded Strings to Unicode ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ A common source of confusion regarding the :class:`.Unicode` type is that it is intended to deal *only* with Python ``unicode`` objects on the Python side, meaning values passed to it as bind parameters must be of the form ``u'some string'`` if using Python 2 and not 3. The encoding/decoding functions it performs are only to suit what the DBAPI in use requires, and are primarily a private implementation detail. The use case of a type that can safely receive Python bytestrings, that is strings that contain non-ASCII characters and are not ``u''`` objects in Python 2, can be achieved using a :class:`.TypeDecorator` which coerces as needed:: from sqlalchemy.types import TypeDecorator, Unicode class CoerceUTF8(TypeDecorator): """Safely coerce Python bytestrings to Unicode before passing off to the database.""" impl = Unicode def process_bind_param(self, value, dialect): if isinstance(value, str): value = value.decode('utf-8') return value Rounding Numerics ^^^^^^^^^^^^^^^^^ Some database connectors like those of SQL Server choke if a Decimal is passed with too many decimal places. Here's a recipe that rounds them down:: from sqlalchemy.types import TypeDecorator, Numeric from decimal import Decimal class SafeNumeric(TypeDecorator): """Adds quantization to Numeric.""" impl = Numeric def __init__(self, *arg, **kw): TypeDecorator.__init__(self, *arg, **kw) self.quantize_int = -(self.impl.precision - self.impl.scale) self.quantize = Decimal(10) ** self.quantize_int def process_bind_param(self, value, dialect): if isinstance(value, Decimal) and \ value.as_tuple()[2] < self.quantize_int: value = value.quantize(self.quantize) return value .. _custom_guid_type: Backend-agnostic GUID Type ^^^^^^^^^^^^^^^^^^^^^^^^^^ Receives and returns Python uuid() objects. Uses the PG UUID type when using Postgresql, CHAR(32) on other backends, storing them in stringified hex format. Can be modified to store binary in CHAR(16) if desired:: from sqlalchemy.types import TypeDecorator, CHAR from sqlalchemy.dialects.postgresql import UUID import uuid class GUID(TypeDecorator): """Platform-independent GUID type. Uses Postgresql's UUID type, otherwise uses CHAR(32), storing as stringified hex values. """ impl = CHAR def load_dialect_impl(self, dialect): if dialect.name == 'postgresql': return dialect.type_descriptor(UUID()) else: return dialect.type_descriptor(CHAR(32)) def process_bind_param(self, value, dialect): if value is None: return value elif dialect.name == 'postgresql': return str(value) else: if not isinstance(value, uuid.UUID): return "%.32x" % uuid.UUID(value) else: # hexstring return "%.32x" % value def process_result_value(self, value, dialect): if value is None: return value else: return uuid.UUID(value) Marshal JSON Strings ^^^^^^^^^^^^^^^^^^^^^ This type uses ``simplejson`` to marshal Python data structures to/from JSON. Can be modified to use Python's builtin json encoder:: from sqlalchemy.types import TypeDecorator, VARCHAR import json class JSONEncodedDict(TypeDecorator): """Represents an immutable structure as a json-encoded string. Usage:: JSONEncodedDict(255) """ impl = VARCHAR def process_bind_param(self, value, dialect): if value is not None: value = json.dumps(value) return value def process_result_value(self, value, dialect): if value is not None: value = json.loads(value) return value Note that the ORM by default will not detect "mutability" on such a type - meaning, in-place changes to values will not be detected and will not be flushed. Without further steps, you instead would need to replace the existing value with a new one on each parent object to detect changes. Note that there's nothing wrong with this, as many applications may not require that the values are ever mutated once created. For those which do have this requirment, support for mutability is best applied using the ``sqlalchemy.ext.mutable`` extension - see the example in :ref:`mutable_toplevel`. .. _replacing_processors: Replacing the Bind/Result Processing of Existing Types ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ Most augmentation of type behavior at the bind/result level is achieved using :class:`.TypeDecorator`. For the rare scenario where the specific processing applied by SQLAlchemy at the DBAPI level needs to be replaced, the SQLAlchemy type can be subclassed directly, and the ``bind_processor()`` or ``result_processor()`` methods can be overridden. Doing so requires that the ``adapt()`` method also be overridden. This method is the mechanism by which SQLAlchemy produces DBAPI-specific type behavior during statement execution. Overriding it allows a copy of the custom type to be used in lieu of a DBAPI-specific type. Below we subclass the :class:`.types.TIME` type to have custom result processing behavior. The ``process()`` function will receive ``value`` from the DBAPI cursor directly:: class MySpecialTime(TIME): def __init__(self, special_argument): super(MySpecialTime, self).__init__() self.special_argument = special_argument def result_processor(self, dialect, coltype): import datetime time = datetime.time def process(value): if value is not None: microseconds = value.microseconds seconds = value.seconds minutes = seconds / 60 return time( minutes / 60, minutes % 60, seconds - minutes * 60, microseconds) else: return None return process def adapt(self, impltype): return MySpecialTime(self.special_argument) .. _types_sql_value_processing: Applying SQL-level Bind/Result Processing ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ As seen in the sections :ref:`types_typedecorator` and :ref:`replacing_processors`, SQLAlchemy allows Python functions to be invoked both when parameters are sent to a statement, as well as when result rows are loaded from the database, to apply transformations to the values as they are sent to or from the database. It is also possible to define SQL-level transformations as well. The rationale here is when only the relational database contains a particular series of functions that are necessary to coerce incoming and outgoing data between an application and persistence format. Examples include using database-defined encryption/decryption functions, as well as stored procedures that handle geographic data. The Postgis extension to Postgresql includes an extensive array of SQL functions that are necessary for coercing data into particular formats. Any :class:`.TypeEngine`, :class:`.UserDefinedType` or :class:`.TypeDecorator` subclass can include implementations of :meth:`.TypeEngine.bind_expression` and/or :meth:`.TypeEngine.column_expression`, which when defined to return a non-``None`` value should return a :class:`.ColumnElement` expression to be injected into the SQL statement, either surrounding bound parameters or a column expression. For example, to build a ``Geometry`` type which will apply the Postgis function ``ST_GeomFromText`` to all outgoing values and the function ``ST_AsText`` to all incoming data, we can create our own subclass of :class:`.UserDefinedType` which provides these methods in conjunction with :data:`~.sqlalchemy.sql.expression.func`:: from sqlalchemy import func from sqlalchemy.types import UserDefinedType class Geometry(UserDefinedType): def get_col_spec(self): return "GEOMETRY" def bind_expression(self, bindvalue): return func.ST_GeomFromText(bindvalue, type_=self) def column_expression(self, col): return func.ST_AsText(col, type_=self) We can apply the ``Geometry`` type into :class:`.Table` metadata and use it in a :func:`.select` construct:: geometry = Table('geometry', metadata, Column('geom_id', Integer, primary_key=True), Column('geom_data', Geometry) ) print select([geometry]).where( geometry.c.geom_data == 'LINESTRING(189412 252431,189631 259122)') The resulting SQL embeds both functions as appropriate. ``ST_AsText`` is applied to the columns clause so that the return value is run through the function before passing into a result set, and ``ST_GeomFromText`` is run on the bound parameter so that the passed-in value is converted:: SELECT geometry.geom_id, ST_AsText(geometry.geom_data) AS geom_data_1 FROM geometry WHERE geometry.geom_data = ST_GeomFromText(:geom_data_2) The :meth:`.TypeEngine.column_expression` method interacts with the mechanics of the compiler such that the SQL expression does not interfere with the labeling of the wrapped expression. Such as, if we rendered a :func:`.select` against a :func:`.label` of our expression, the string label is moved to the outside of the wrapped expression:: print select([geometry.c.geom_data.label('my_data')]) Output:: SELECT ST_AsText(geometry.geom_data) AS my_data FROM geometry For an example of subclassing a built in type directly, we subclass :class:`.postgresql.BYTEA` to provide a ``PGPString``, which will make use of the Postgresql ``pgcrypto`` extension to encrpyt/decrypt values transparently:: from sqlalchemy import create_engine, String, select, func, \ MetaData, Table, Column, type_coerce from sqlalchemy.dialects.postgresql import BYTEA class PGPString(BYTEA): def __init__(self, passphrase, length=None): super(PGPString, self).__init__(length) self.passphrase = passphrase def bind_expression(self, bindvalue): # convert the bind's type from PGPString to # String, so that it's passed to psycopg2 as is without # a dbapi.Binary wrapper bindvalue = type_coerce(bindvalue, String) return func.pgp_sym_encrypt(bindvalue, self.passphrase) def column_expression(self, col): return func.pgp_sym_decrypt(col, self.passphrase) metadata = MetaData() message = Table('message', metadata, Column('username', String(50)), Column('message', PGPString("this is my passphrase", length=1000)), ) engine = create_engine("postgresql://scott:tiger@localhost/test", echo=True) with engine.begin() as conn: metadata.create_all(conn) conn.execute(message.insert(), username="some user", message="this is my message") print conn.scalar( select([message.c.message]).\ where(message.c.username == "some user") ) The ``pgp_sym_encrypt`` and ``pgp_sym_decrypt`` functions are applied to the INSERT and SELECT statements:: INSERT INTO message (username, message) VALUES (%(username)s, pgp_sym_encrypt(%(message)s, %(pgp_sym_encrypt_1)s)) {'username': 'some user', 'message': 'this is my message', 'pgp_sym_encrypt_1': 'this is my passphrase'} SELECT pgp_sym_decrypt(message.message, %(pgp_sym_decrypt_1)s) AS message_1 FROM message WHERE message.username = %(username_1)s {'pgp_sym_decrypt_1': 'this is my passphrase', 'username_1': 'some user'} .. versionadded:: 0.8 Added the :meth:`.TypeEngine.bind_expression` and :meth:`.TypeEngine.column_expression` methods. See also: :ref:`examples_postgis` .. _types_operators: Redefining and Creating New Operators ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ SQLAlchemy Core defines a fixed set of expression operators available to all column expressions. Some of these operations have the effect of overloading Python's built in operators; examples of such operators include :meth:`.ColumnOperators.__eq__` (``table.c.somecolumn == 'foo'``), :meth:`.ColumnOperators.__invert__` (``~table.c.flag``), and :meth:`.ColumnOperators.__add__` (``table.c.x + table.c.y``). Other operators are exposed as explicit methods on column expressions, such as :meth:`.ColumnOperators.in_` (``table.c.value.in_(['x', 'y'])``) and :meth:`.ColumnOperators.like` (``table.c.value.like('%ed%')``). The Core expression constructs in all cases consult the type of the expression in order to determine the behavior of existing operators, as well as to locate additional operators that aren't part of the built in set. The :class:`.TypeEngine` base class defines a root "comparison" implementation :class:`.TypeEngine.Comparator`, and many specific types provide their own sub-implementations of this class. User-defined :class:`.TypeEngine.Comparator` implementations can be built directly into a simple subclass of a particular type in order to override or define new operations. Below, we create a :class:`.Integer` subclass which overrides the :meth:`.ColumnOperators.__add__` operator:: from sqlalchemy import Integer class MyInt(Integer): class comparator_factory(Integer.Comparator): def __add__(self, other): return self.op("goofy")(other) The above configuration creates a new class ``MyInt``, which establishes the :attr:`.TypeEngine.comparator_factory` attribute as referring to a new class, subclassing the :class:`.TypeEngine.Comparator` class associated with the :class:`.Integer` type. Usage:: >>> sometable = Table("sometable", metadata, Column("data", MyInt)) >>> print sometable.c.data + 5 sometable.data goofy :data_1 The implementation for :meth:`.ColumnOperators.__add__` is consulted by an owning SQL expression, by instantiating the :class:`.TypeEngine.Comparator` with itself as the ``expr`` attribute. The mechanics of the expression system are such that operations continue recursively until an expression object produces a new SQL expression construct. Above, we could just as well have said ``self.expr.op("goofy")(other)`` instead of ``self.op("goofy")(other)``. New methods added to a :class:`.TypeEngine.Comparator` are exposed on an owning SQL expression using a ``__getattr__`` scheme, which exposes methods added to :class:`.TypeEngine.Comparator` onto the owning :class:`.ColumnElement`. For example, to add a ``log()`` function to integers:: from sqlalchemy import Integer, func class MyInt(Integer): class comparator_factory(Integer.Comparator): def log(self, other): return func.log(self.expr, other) Using the above type:: >>> print sometable.c.data.log(5) log(:log_1, :log_2) Unary operations are also possible. For example, to add an implementation of the Postgresql factorial operator, we combine the :class:`.UnaryExpression` construct along with a :class:`.custom_op` to produce the factorial expression:: from sqlalchemy import Integer from sqlalchemy.sql.expression import UnaryExpression from sqlalchemy.sql import operators class MyInteger(Integer): class comparator_factory(Integer.Comparator): def factorial(self): return UnaryExpression(self.expr, modifier=operators.custom_op("!"), type_=MyInteger) Using the above type:: >>> from sqlalchemy.sql import column >>> print column('x', MyInteger).factorial() x ! See also: :attr:`.TypeEngine.comparator_factory` .. versionadded:: 0.8 The expression system was enhanced to support customization of operators on a per-type level. Creating New Types ~~~~~~~~~~~~~~~~~~ The :class:`.UserDefinedType` class is provided as a simple base class for defining entirely new database types. Use this to represent native database types not known by SQLAlchemy. If only Python translation behavior is needed, use :class:`.TypeDecorator` instead. .. autoclass:: UserDefinedType :members: .. _types_api: Base Type API -------------- .. autoclass:: AbstractType :members: .. autoclass:: TypeEngine :members: .. autoclass:: Concatenable :members: :inherited-members: .. autoclass:: NullType .. autoclass:: Variant :members: with_variant, __init__ SQLAlchemy-0.8.4/doc/build/dialects/0000755000076500000240000000000012251151573017667 5ustar classicstaff00000000000000SQLAlchemy-0.8.4/doc/build/dialects/drizzle.rst0000644000076500000240000000250712251147171022107 0ustar classicstaff00000000000000.. _drizzle_toplevel: Drizzle ======= .. automodule:: sqlalchemy.dialects.drizzle.base Drizzle Data Types ------------------ As with all SQLAlchemy dialects, all UPPERCASE types that are known to be valid with Drizzle are importable from the top level dialect:: from sqlalchemy.dialects.drizzle import \ BIGINT, BINARY, BLOB, BOOLEAN, CHAR, DATE, DATETIME, DECIMAL, DOUBLE, ENUM, FLOAT, INT, INTEGER, NUMERIC, TEXT, TIME, TIMESTAMP, VARBINARY, VARCHAR Types which are specific to Drizzle, or have Drizzle-specific construction arguments, are as follows: .. currentmodule:: sqlalchemy.dialects.drizzle .. autoclass:: BIGINT :members: __init__ .. autoclass:: CHAR :members: __init__ .. autoclass:: DECIMAL :members: __init__ .. autoclass:: DOUBLE :members: __init__ .. autoclass:: ENUM :members: __init__ .. autoclass:: FLOAT :members: __init__ .. autoclass:: INTEGER :members: __init__ .. autoclass:: NUMERIC :members: __init__ .. autoclass:: REAL :members: __init__ .. autoclass:: TEXT :members: __init__ .. autoclass:: TIMESTAMP :members: __init__ .. autoclass:: VARCHAR :members: __init__ MySQL-Python ------------ .. automodule:: sqlalchemy.dialects.drizzle.mysqldb SQLAlchemy-0.8.4/doc/build/dialects/firebird.rst0000644000076500000240000000035312251150015022176 0ustar classicstaff00000000000000.. _firebird_toplevel: Firebird ======== .. automodule:: sqlalchemy.dialects.firebird.base kinterbasdb ----------- .. automodule:: sqlalchemy.dialects.firebird.kinterbasdb fdb --- .. automodule:: sqlalchemy.dialects.firebird.fdb SQLAlchemy-0.8.4/doc/build/dialects/index.rst0000644000076500000240000000417512251150015021525 0ustar classicstaff00000000000000.. _dialect_toplevel: Dialects ======== The **dialect** is the system SQLAlchemy uses to communicate with various types of :term:`DBAPI` implementations and databases. The sections that follow contain reference documentation and notes specific to the usage of each backend, as well as notes for the various DBAPIs. All dialects require that an appropriate DBAPI driver is installed. Included Dialects ----------------- .. toctree:: :maxdepth: 1 :glob: drizzle firebird informix mssql mysql oracle postgresql sqlite sybase .. _external_toplevel: External Dialects ----------------- .. versionchanged:: 0.8 As of SQLAlchemy 0.8, several dialects have been moved to external projects, and dialects for new databases will also be published as external projects. The rationale here is to keep the base SQLAlchemy install and test suite from growing inordinately large. The "classic" dialects such as SQLite, MySQL, Postgresql, Oracle, SQL Server, and Firebird will remain in the Core for the time being. Current external dialect projects for SQLAlchemy include: Production Ready ^^^^^^^^^^^^^^^^ * `ibm_db_sa `_ - driver for IBM DB2, developed jointly by IBM and SQLAlchemy developers. * `sqlalchemy-sqlany `_ - driver for SAP Sybase SQL Anywhere, developed by SAP. * `sqlalchemy-monetdb `_ - driver for MonetDB. Experimental / Incomplete ^^^^^^^^^^^^^^^^^^^^^^^^^^ * `sqlalchemy-access `_ - driver for Microsoft Access. * `CALCHIPAN `_ - Adapts `Pandas `_ dataframes to SQLAlchemy. * `sqlalchemy-akiban `_ - driver and ORM extensions for the `Akiban `_ database. * `sqlalchemy-cubrid `_ - driver for the CUBRID database. * `sqlalchemy-maxdb `_ - driver for the MaxDB database SQLAlchemy-0.8.4/doc/build/dialects/informix.rst0000644000076500000240000000025412251150015022243 0ustar classicstaff00000000000000.. _informix_toplevel: Informix ======== .. automodule:: sqlalchemy.dialects.informix.base informixdb ---------- .. automodule:: sqlalchemy.dialects.informix.informixdbSQLAlchemy-0.8.4/doc/build/dialects/mssql.rst0000644000076500000240000000415712251147171021566 0ustar classicstaff00000000000000.. _mssql_toplevel: Microsoft SQL Server ==================== .. automodule:: sqlalchemy.dialects.mssql.base SQL Server Data Types ----------------------- As with all SQLAlchemy dialects, all UPPERCASE types that are known to be valid with SQL server are importable from the top level dialect, whether they originate from :mod:`sqlalchemy.types` or from the local dialect:: from sqlalchemy.dialects.mssql import \ BIGINT, BINARY, BIT, CHAR, DATE, DATETIME, DATETIME2, \ DATETIMEOFFSET, DECIMAL, FLOAT, IMAGE, INTEGER, MONEY, \ NCHAR, NTEXT, NUMERIC, NVARCHAR, REAL, SMALLDATETIME, \ SMALLINT, SMALLMONEY, SQL_VARIANT, TEXT, TIME, \ TIMESTAMP, TINYINT, UNIQUEIDENTIFIER, VARBINARY, VARCHAR Types which are specific to SQL Server, or have SQL Server-specific construction arguments, are as follows: .. currentmodule:: sqlalchemy.dialects.mssql .. autoclass:: BIT :members: __init__ .. autoclass:: CHAR :members: __init__ .. autoclass:: DATETIME2 :members: __init__ .. autoclass:: DATETIMEOFFSET :members: __init__ .. autoclass:: IMAGE :members: __init__ .. autoclass:: MONEY :members: __init__ .. autoclass:: NCHAR :members: __init__ .. autoclass:: NTEXT :members: __init__ .. autoclass:: NVARCHAR :members: __init__ .. autoclass:: REAL :members: __init__ .. autoclass:: SMALLDATETIME :members: __init__ .. autoclass:: SMALLMONEY :members: __init__ .. autoclass:: SQL_VARIANT :members: __init__ .. autoclass:: TEXT :members: __init__ .. autoclass:: TIME :members: __init__ .. autoclass:: TINYINT :members: __init__ .. autoclass:: UNIQUEIDENTIFIER :members: __init__ .. autoclass:: VARCHAR :members: __init__ PyODBC ------ .. automodule:: sqlalchemy.dialects.mssql.pyodbc mxODBC ------ .. automodule:: sqlalchemy.dialects.mssql.mxodbc pymssql ------- .. automodule:: sqlalchemy.dialects.mssql.pymssql zxjdbc -------------- .. automodule:: sqlalchemy.dialects.mssql.zxjdbc AdoDBAPI -------- .. automodule:: sqlalchemy.dialects.mssql.adodbapi SQLAlchemy-0.8.4/doc/build/dialects/mysql.rst0000644000076500000240000000613312251147171021570 0ustar classicstaff00000000000000.. _mysql_toplevel: MySQL ===== .. automodule:: sqlalchemy.dialects.mysql.base MySQL Data Types ------------------ As with all SQLAlchemy dialects, all UPPERCASE types that are known to be valid with MySQL are importable from the top level dialect:: from sqlalchemy.dialects.mysql import \ BIGINT, BINARY, BIT, BLOB, BOOLEAN, CHAR, DATE, \ DATETIME, DECIMAL, DECIMAL, DOUBLE, ENUM, FLOAT, INTEGER, \ LONGBLOB, LONGTEXT, MEDIUMBLOB, MEDIUMINT, MEDIUMTEXT, NCHAR, \ NUMERIC, NVARCHAR, REAL, SET, SMALLINT, TEXT, TIME, TIMESTAMP, \ TINYBLOB, TINYINT, TINYTEXT, VARBINARY, VARCHAR, YEAR Types which are specific to MySQL, or have MySQL-specific construction arguments, are as follows: .. currentmodule:: sqlalchemy.dialects.mysql .. autoclass:: BIGINT :members: __init__ .. autoclass:: BINARY :members: __init__ .. autoclass:: BIT :members: __init__ .. autoclass:: BLOB :members: __init__ .. autoclass:: BOOLEAN :members: __init__ .. autoclass:: CHAR :members: __init__ .. autoclass:: DATE :members: __init__ .. autoclass:: DATETIME :members: __init__ .. autoclass:: DECIMAL :members: __init__ .. autoclass:: DOUBLE :members: __init__ .. autoclass:: ENUM :members: __init__ .. autoclass:: FLOAT :members: __init__ .. autoclass:: INTEGER :members: __init__ .. autoclass:: LONGBLOB :members: __init__ .. autoclass:: LONGTEXT :members: __init__ .. autoclass:: MEDIUMBLOB :members: __init__ .. autoclass:: MEDIUMINT :members: __init__ .. autoclass:: MEDIUMTEXT :members: __init__ .. autoclass:: NCHAR :members: __init__ .. autoclass:: NUMERIC :members: __init__ .. autoclass:: NVARCHAR :members: __init__ .. autoclass:: REAL :members: __init__ .. autoclass:: SET :members: __init__ .. autoclass:: SMALLINT :members: __init__ .. autoclass:: TEXT :members: __init__ .. autoclass:: TIME :members: __init__ .. autoclass:: TIMESTAMP :members: __init__ .. autoclass:: TINYBLOB :members: __init__ .. autoclass:: TINYINT :members: __init__ .. autoclass:: TINYTEXT :members: __init__ .. autoclass:: VARBINARY :members: __init__ .. autoclass:: VARCHAR :members: __init__ .. autoclass:: YEAR :members: __init__ MySQL-Python -------------------- .. automodule:: sqlalchemy.dialects.mysql.mysqldb OurSQL -------------- .. automodule:: sqlalchemy.dialects.mysql.oursql pymysql ------------- .. automodule:: sqlalchemy.dialects.mysql.pymysql MySQL-Connector ---------------------- .. automodule:: sqlalchemy.dialects.mysql.mysqlconnector cymysql ------------ .. automodule:: sqlalchemy.dialects.mysql.cymysql Google App Engine ----------------------- .. automodule:: sqlalchemy.dialects.mysql.gaerdbms pyodbc ------ .. automodule:: sqlalchemy.dialects.mysql.pyodbc zxjdbc -------------- .. automodule:: sqlalchemy.dialects.mysql.zxjdbc SQLAlchemy-0.8.4/doc/build/dialects/oracle.rst0000644000076500000240000000233412251147171021667 0ustar classicstaff00000000000000.. _oracle_toplevel: Oracle ====== .. automodule:: sqlalchemy.dialects.oracle.base Oracle Data Types ------------------- As with all SQLAlchemy dialects, all UPPERCASE types that are known to be valid with Oracle are importable from the top level dialect, whether they originate from :mod:`sqlalchemy.types` or from the local dialect:: from sqlalchemy.dialects.oracle import \ BFILE, BLOB, CHAR, CLOB, DATE, DATETIME, \ DOUBLE_PRECISION, FLOAT, INTERVAL, LONG, NCLOB, \ NUMBER, NVARCHAR, NVARCHAR2, RAW, TIMESTAMP, VARCHAR, \ VARCHAR2 Types which are specific to Oracle, or have Oracle-specific construction arguments, are as follows: .. currentmodule:: sqlalchemy.dialects.oracle .. autoclass:: BFILE :members: __init__ .. autoclass:: DOUBLE_PRECISION :members: __init__ .. autoclass:: INTERVAL :members: __init__ .. autoclass:: NCLOB :members: __init__ .. autoclass:: NUMBER :members: __init__ .. autoclass:: LONG :members: __init__ .. autoclass:: RAW :members: __init__ cx_Oracle ---------- .. automodule:: sqlalchemy.dialects.oracle.cx_oracle zxjdbc ------- .. automodule:: sqlalchemy.dialects.oracle.zxjdbc SQLAlchemy-0.8.4/doc/build/dialects/postgresql.rst0000644000076500000240000000602412251147171022625 0ustar classicstaff00000000000000.. _postgresql_toplevel: PostgreSQL ========== .. automodule:: sqlalchemy.dialects.postgresql.base PostgreSQL Data Types ------------------------ As with all SQLAlchemy dialects, all UPPERCASE types that are known to be valid with Postgresql are importable from the top level dialect, whether they originate from :mod:`sqlalchemy.types` or from the local dialect:: from sqlalchemy.dialects.postgresql import \ ARRAY, BIGINT, BIT, BOOLEAN, BYTEA, CHAR, CIDR, DATE, \ DOUBLE_PRECISION, ENUM, FLOAT, HSTORE, INET, INTEGER, \ INTERVAL, MACADDR, NUMERIC, REAL, SMALLINT, TEXT, TIME, \ TIMESTAMP, UUID, VARCHAR, INT4RANGE, INT8RANGE, NUMRANGE, \ DATERANGE, TSRANGE, TSTZRANGE Types which are specific to PostgreSQL, or have PostgreSQL-specific construction arguments, are as follows: .. currentmodule:: sqlalchemy.dialects.postgresql .. autoclass:: array .. autoclass:: ARRAY :members: __init__, Comparator .. autoclass:: Any .. autoclass:: All .. autoclass:: BIT :members: __init__ .. autoclass:: BYTEA :members: __init__ .. autoclass:: CIDR :members: __init__ .. autoclass:: DOUBLE_PRECISION :members: __init__ .. autoclass:: ENUM :members: __init__, create, drop .. autoclass:: HSTORE :members: .. autoclass:: hstore :members: .. autoclass:: INET :members: __init__ .. autoclass:: INTERVAL :members: __init__ .. autoclass:: MACADDR :members: __init__ .. autoclass:: REAL :members: __init__ .. autoclass:: UUID :members: __init__ Range Types ~~~~~~~~~~~ The new range column types founds in PostgreSQL 9.2 onwards are catered for by the following types: .. autoclass:: INT4RANGE .. autoclass:: INT8RANGE .. autoclass:: NUMRANGE .. autoclass:: DATERANGE .. autoclass:: TSRANGE .. autoclass:: TSTZRANGE The types above get most of their functionality from the following mixin: .. autoclass:: sqlalchemy.dialects.postgresql.ranges.RangeOperators :members: .. warning:: The range type DDL support should work with any Postgres DBAPI driver, however the data types returned may vary. If you are using ``psycopg2``, it's recommended to upgrade to version 2.5 or later before using these column types. PostgreSQL Constraint Types --------------------------- SQLAlchemy supports Postgresql EXCLUDE constraints via the :class:`ExcludeConstraint` class: .. autoclass:: ExcludeConstraint :members: __init__ For example:: from sqlalchemy.dialects.postgresql import ExcludeConstraint, TSRANGE class RoomBookings(Base): room = Column(Integer(), primary_key=True) during = Column(TSRANGE()) __table_args__ = ( ExcludeConstraint(('room', '='), ('during', '&&')), ) psycopg2 -------------- .. automodule:: sqlalchemy.dialects.postgresql.psycopg2 py-postgresql -------------------- .. automodule:: sqlalchemy.dialects.postgresql.pypostgresql pg8000 -------------- .. automodule:: sqlalchemy.dialects.postgresql.pg8000 zxjdbc -------------- .. automodule:: sqlalchemy.dialects.postgresql.zxjdbc SQLAlchemy-0.8.4/doc/build/dialects/sqlite.rst0000644000076500000240000000133512251147171021723 0ustar classicstaff00000000000000.. _sqlite_toplevel: SQLite ====== .. automodule:: sqlalchemy.dialects.sqlite.base SQLite Data Types ------------------------ As with all SQLAlchemy dialects, all UPPERCASE types that are known to be valid with SQLite are importable from the top level dialect, whether they originate from :mod:`sqlalchemy.types` or from the local dialect:: from sqlalchemy.dialects.sqlite import \ BLOB, BOOLEAN, CHAR, DATE, DATETIME, DECIMAL, FLOAT, \ INTEGER, NUMERIC, SMALLINT, TEXT, TIME, TIMESTAMP, \ VARCHAR .. module:: sqlalchemy.dialects.sqlite .. autoclass:: DATETIME .. autoclass:: DATE .. autoclass:: TIME Pysqlite -------- .. automodule:: sqlalchemy.dialects.sqlite.pysqliteSQLAlchemy-0.8.4/doc/build/dialects/sybase.rst0000644000076500000240000000047612251147171021715 0ustar classicstaff00000000000000.. _sybase_toplevel: Sybase ====== .. automodule:: sqlalchemy.dialects.sybase.base python-sybase ------------------- .. automodule:: sqlalchemy.dialects.sybase.pysybase pyodbc ------------ .. automodule:: sqlalchemy.dialects.sybase.pyodbc mxodbc ------------ .. automodule:: sqlalchemy.dialects.sybase.mxodbc SQLAlchemy-0.8.4/doc/build/faq.rst0000644000076500000240000010641712251150015017377 0ustar classicstaff00000000000000:orphan: .. _faq_toplevel: ============================ Frequently Asked Questions ============================ .. contents:: :local: :class: faq :backlinks: none Connections / Engines ===================== How do I configure logging? --------------------------- See :ref:`dbengine_logging`. How do I pool database connections? Are my connections pooled? ---------------------------------------------------------------- SQLAlchemy performs application-level connection pooling automatically in most cases. With the exception of SQLite, a :class:`.Engine` object refers to a :class:`.QueuePool` as a source of connectivity. For more detail, see :ref:`engines_toplevel` and :ref:`pooling_toplevel`. How do I pass custom connect arguments to my database API? ----------------------------------------------------------- The :func:`.create_engine` call accepts additional arguments either directly via the ``connect_args`` keyword argument:: e = create_engine("mysql://scott:tiger@localhost/test", connect_args={"encoding": "utf8"}) Or for basic string and integer arguments, they can usually be specified in the query string of the URL:: e = create_engine("mysql://scott:tiger@localhost/test?encoding=utf8") .. seealso:: :ref:`custom_dbapi_args` "MySQL Server has gone away" ---------------------------- There are two major causes for this error: 1. The MySQL client closes connections which have been idle for a set period of time, defaulting to eight hours. This can be avoided by using the ``pool_recycle`` setting with :func:`.create_engine`, described at :ref:`mysql_connection_timeouts`. 2. Usage of the MySQLdb :term:`DBAPI`, or a similar DBAPI, in a non-threadsafe manner, or in an otherwise inappropriate way. The MySQLdb connection object is not threadsafe - this expands out to any SQLAlchemy system that links to a single connection, which includes the ORM :class:`.Session`. For background on how :class:`.Session` should be used in a multithreaded environment, see :ref:`session_faq_threadsafe`. Why does SQLAlchemy issue so many ROLLBACKs? --------------------------------------------- SQLAlchemy currently assumes DBAPI connections are in "non-autocommit" mode - this is the default behavior of the Python database API, meaning it must be assumed that a transaction is always in progress. The connection pool issues ``connection.rollback()`` when a connection is returned. This is so that any transactional resources remaining on the connection are released. On a database like Postgresql or MSSQL where table resources are aggressively locked, this is critical so that rows and tables don't remain locked within connections that are no longer in use. An application can otherwise hang. It's not just for locks, however, and is equally critical on any database that has any kind of transaction isolation, including MySQL with InnoDB. Any connection that is still inside an old transaction will return stale data, if that data was already queried on that connection within isolation. For background on why you might see stale data even on MySQL, see http://dev.mysql.com/doc/refman/5.1/en/innodb-transaction-model.html I'm on MyISAM - how do I turn it off? ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ The behavior of the connection pool's connection return behavior can be configured using ``reset_on_return``:: from sqlalchemy import create_engine from sqlalchemy.pool import QueuePool engine = create_engine('mysql://scott:tiger@localhost/myisam_database', pool=QueuePool(reset_on_return=False)) I'm on SQL Server - how do I turn those ROLLBACKs into COMMITs? ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ ``reset_on_return`` accepts the values ``commit``, ``rollback`` in addition to ``True``, ``False``, and ``None``. Setting to ``commit`` will cause a COMMIT as any connection is returned to the pool:: engine = create_engine('mssql://scott:tiger@mydsn', pool=QueuePool(reset_on_return='commit')) I am using multiple connections with a SQLite database (typically to test transaction operation), and my test program is not working! ---------------------------------------------------------------------------------------------------------------------------------------------------------- If using a SQLite ``:memory:`` database, or a version of SQLAlchemy prior to version 0.7, the default connection pool is the :class:`.SingletonThreadPool`, which maintains exactly one SQLite connection per thread. So two connections in use in the same thread will actually be the same SQLite connection. Make sure you're not using a :memory: database and use :class:`.NullPool`, which is the default for non-memory databases in current SQLAlchemy versions. .. seealso:: :ref:`pysqlite_threading_pooling` - info on PySQLite's behavior. How do I get at the raw DBAPI connection when using an Engine? -------------------------------------------------------------- With a regular SA engine-level Connection, you can get at a pool-proxied version of the DBAPI connection via the :attr:`.Connection.connection` attribute on :class:`.Connection`, and for the really-real DBAPI connection you can call the :attr:`.ConnectionFairy.connection` attribute on that - but there should never be any need to access the non-pool-proxied DBAPI connection, as all methods are proxied through:: engine = create_engine(...) conn = engine.connect() conn.connection. cursor = conn.connection.cursor() You must ensure that you revert any isolation level settings or other operation-specific settings on the connection back to normal before returning it to the pool. As an alternative to reverting settings, you can call the :meth:`.Connection.detach` method on either :class:`.Connection` or the proxied connection, which will de-associate the connection from the pool such that it will be closed and discarded when :meth:`.Connection.close` is called:: conn = engine.connect() conn.detach() # detaches the DBAPI connection from the connection pool conn.connection. conn.close() # connection is closed for real, the pool replaces it with a new connection MetaData / Schema ================== My program is hanging when I say ``table.drop()`` / ``metadata.drop_all()`` ---------------------------------------------------------------------------- This usually corresponds to two conditions: 1. using PostgreSQL, which is really strict about table locks, and 2. you have a connection still open which contains locks on the table and is distinct from the connection being used for the DROP statement. Heres the most minimal version of the pattern:: connection = engine.connect() result = connection.execute(mytable.select()) mytable.drop(engine) Above, a connection pool connection is still checked out; furthermore, the result object above also maintains a link to this connection. If "implicit execution" is used, the result will hold this connection opened until the result object is closed or all rows are exhausted. The call to ``mytable.drop(engine)`` attempts to emit DROP TABLE on a second connection procured from the :class:`.Engine` which will lock. The solution is to close out all connections before emitting DROP TABLE:: connection = engine.connect() result = connection.execute(mytable.select()) # fully read result sets result.fetchall() # close connections connection.close() # now locks are removed mytable.drop(engine) Does SQLAlchemy support ALTER TABLE, CREATE VIEW, CREATE TRIGGER, Schema Upgrade Functionality? ----------------------------------------------------------------------------------------------- General ALTER support isn't present in SQLAlchemy directly. For special DDL on an ad-hoc basis, the :class:`.DDL` and related constructs can be used. See :doc:`core/ddl` for a discussion on this subject. A more comprehensive option is to use schema migration tools, such as Alembic or SQLAlchemy-Migrate; see :ref:`schema_migrations` for discussion on this. How can I sort Table objects in order of their dependency? ----------------------------------------------------------- This is available via the :attr:`.MetaData.sorted_tables` function:: metadata = MetaData() # ... add Table objects to metadata ti = metadata.sorted_tables: for t in ti: print t How can I get the CREATE TABLE/ DROP TABLE output as a string? --------------------------------------------------------------- Modern SQLAlchemy has clause constructs which represent DDL operations. These can be rendered to strings like any other SQL expression:: from sqlalchemy.schema import CreateTable print CreateTable(mytable) To get the string specific to a certain engine:: print CreateTable(mytable).compile(engine) There's also a special form of :class:`.Engine` that can let you dump an entire metadata creation sequence, using this recipe:: def dump(sql, *multiparams, **params): print sql.compile(dialect=engine.dialect) engine = create_engine('postgresql://', strategy='mock', executor=dump) metadata.create_all(engine, checkfirst=False) The `Alembic `_ tool also supports an "offline" SQL generation mode that renders database migrations as SQL scripts. How can I subclass Table/Column to provide certain behaviors/configurations? ------------------------------------------------------------------------------ :class:`.Table` and :class:`.Column` are not good targets for direct subclassing. However, there are simple ways to get on-construction behaviors using creation functions, and behaviors related to the linkages between schema objects such as constraint conventions or naming conventions using attachment events. An example of many of these techniques can be seen at `Naming Conventions `_. SQL Expressions ================= Why does ``.col.in_([])`` Produce ``col != col``? Why not ``1=0``? ------------------------------------------------------------------- A little introduction to the issue. The IN operator in SQL, given a list of elements to compare against a column, generally does not accept an empty list, that is while it is valid to say:: column IN (1, 2, 3) it's not valid to say:: column IN () SQLAlchemy's :meth:`.Operators.in_` operator, when given an empty list, produces this expression:: column != column As of version 0.6, it also produces a warning stating that a less efficient comparison operation will be rendered. This expression is the only one that is both database agnostic and produces correct results. For example, the naive approach of "just evaluate to false, by comparing 1=0 or 1!=1", does not handle nulls properly. An expression like:: NOT column != column will not return a row when "column" is null, but an expression which does not take the column into account:: NOT 1=0 will. Closer to the mark is the following CASE expression:: CASE WHEN column IS NOT NULL THEN 1=0 ELSE NULL END We don't use this expression due to its verbosity, and its also not typically accepted by Oracle within a WHERE clause - depending on how you phrase it, you'll either get "ORA-00905: missing keyword" or "ORA-00920: invalid relational operator". It's also still less efficient than just rendering SQL without the clause altogether (or not issuing the SQL at all, if the statement is just a simple search). The best approach therefore is to avoid the usage of IN given an argument list of zero length. Instead, don't emit the Query in the first place, if no rows should be returned. The warning is best promoted to a full error condition using the Python warnings filter (see http://docs.python.org/library/warnings.html). ORM Configuration ================== How do I map a table that has no primary key? --------------------------------------------- In almost all cases, a table does have a so-called :term:`candidate key`, which is a column or series of columns that uniquely identify a row. If a table truly doesn't have this, and has actual fully duplicate rows, the table is not corresponding to `first normal form `_ and cannot be mapped. Otherwise, whatever columns comprise the best candidate key can be applied directly to the mapper:: class SomeClass(Base): __table__ = some_table_with_no_pk __mapper_args__ = { 'primary_key':[some_table_with_no_pk.c.uid, some_table_with_no_pk.c.bar] } Better yet is when using fully declared table metadata, use the ``primary_key=True`` flag on those columns:: class SomeClass(Base): __tablename__ = "some_table_with_no_pk" uid = Column(Integer, primary_key=True) bar = Column(String, primary_key=True) All tables in a relational database should have primary keys. Even a many-to-many association table - the primary key would be the composite of the two association columns:: CREATE TABLE my_association ( user_id INTEGER REFERENCES user(id), account_id INTEGER REFERENCES account(id), PRIMARY KEY (user_id, account_id) ) How do I configure a Column that is a Python reserved word or similar? ---------------------------------------------------------------------------- Column-based attributes can be given any name desired in the mapping. See :ref:`mapper_column_distinct_names`. How do I get a list of all columns, relationships, mapped attributes, etc. given a mapped class? ------------------------------------------------------------------------------------------------- This information is all available from the :class:`.Mapper` object. To get at the :class:`.Mapper` for a particular mapped class, call the :func:`.inspect` function on it:: from sqlalchemy import inspect mapper = inspect(MyClass) From there, all information about the class can be acquired using such methods as: * :attr:`.Mapper.attrs` - a namespace of all mapped attributes. The attributes themselves are instances of :class:`.MapperProperty`, which contain additional attributes that can lead to the mapped SQL expression or column, if applicable. * :attr:`.Mapper.column_attrs` - the mapped attribute namespace limited to column and SQL expression attributes. You might want to use :attr:`.Mapper.columns` to get at the :class:`.Column` objects directly. * :attr:`.Mapper.relationships` - namespace of all :class:`.RelationshipProperty` attributes. * :attr:`.Mapper.all_orm_descriptors` - namespace of all mapped attributes, plus user-defined attributes defined using systems such as :class:`.hybrid_property`, :class:`.AssociationProxy` and others. * :attr:`.Mapper.columns` - A namespace of :class:`.Column` objects and other named SQL expressions associated with the mapping. * :attr:`.Mapper.mapped_table` - The :class:`.Table` or other selectable to which this mapper is mapped. * :attr:`.Mapper.local_table` - The :class:`.Table` that is "local" to this mapper; this differs from :attr:`.Mapper.mapped_table` in the case of a mapper mapped using inheritance to a composed selectable. I'm using Declarative and setting primaryjoin/secondaryjoin using an ``and_()`` or ``or_()``, and I am getting an error message about foreign keys. ------------------------------------------------------------------------------------------------------------------------------------------------------------------ Are you doing this?:: class MyClass(Base): # .... foo = relationship("Dest", primaryjoin=and_("MyClass.id==Dest.foo_id", "MyClass.foo==Dest.bar")) That's an ``and_()`` of two string expressions, which SQLAlchemy cannot apply any mapping towards. Declarative allows :func:`.relationship` arguments to be specified as strings, which are converted into expression objects using ``eval()``. But this doesn't occur inside of an ``and_()`` expression - it's a special operation declarative applies only to the *entirety* of what's passed to primaryjoin or other arguments as a string:: class MyClass(Base): # .... foo = relationship("Dest", primaryjoin="and_(MyClass.id==Dest.foo_id, MyClass.foo==Dest.bar)") Or if the objects you need are already available, skip the strings:: class MyClass(Base): # .... foo = relationship(Dest, primaryjoin=and_(MyClass.id==Dest.foo_id, MyClass.foo==Dest.bar)) The same idea applies to all the other arguments, such as ``foreign_keys``:: # wrong ! foo = relationship(Dest, foreign_keys=["Dest.foo_id", "Dest.bar_id"]) # correct ! foo = relationship(Dest, foreign_keys="[Dest.foo_id, Dest.bar_id]") # also correct ! foo = relationship(Dest, foreign_keys=[Dest.foo_id, Dest.bar_id]) # if you're using columns from the class that you're inside of, just use the column objects ! class MyClass(Base): foo_id = Column(...) bar_id = Column(...) # ... foo = relationship(Dest, foreign_keys=[foo_id, bar_id]) Sessions / Queries =================== "This Session's transaction has been rolled back due to a previous exception during flush." (or similar) --------------------------------------------------------------------------------------------------------- This is an error that occurs when a :meth:`.Session.flush` raises an exception, rolls back the transaction, but further commands upon the `Session` are called without an explicit call to :meth:`.Session.rollback` or :meth:`.Session.close`. It usually corresponds to an application that catches an exception upon :meth:`.Session.flush` or :meth:`.Session.commit` and does not properly handle the exception. For example:: from sqlalchemy import create_engine, Column, Integer from sqlalchemy.orm import sessionmaker from sqlalchemy.ext.declarative import declarative_base Base = declarative_base(create_engine('sqlite://')) class Foo(Base): __tablename__ = 'foo' id = Column(Integer, primary_key=True) Base.metadata.create_all() session = sessionmaker()() # constraint violation session.add_all([Foo(id=1), Foo(id=1)]) try: session.commit() except: # ignore error pass # continue using session without rolling back session.commit() The usage of the :class:`.Session` should fit within a structure similar to this:: try: session.commit() except: session.rollback() raise finally: session.close() # optional, depends on use case Many things can cause a failure within the try/except besides flushes. You should always have some kind of "framing" of your session operations so that connection and transaction resources have a definitive boundary, otherwise your application doesn't really have its usage of resources under control. This is not to say that you need to put try/except blocks all throughout your application - on the contrary, this would be a terrible idea. You should architect your application such that there is one (or few) point(s) of "framing" around session operations. For a detailed discussion on how to organize usage of the :class:`.Session`, please see :ref:`session_faq_whentocreate`. But why does flush() insist on issuing a ROLLBACK? ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ It would be great if :meth:`.Session.flush` could partially complete and then not roll back, however this is beyond its current capabilities since its internal bookkeeping would have to be modified such that it can be halted at any time and be exactly consistent with what's been flushed to the database. While this is theoretically possible, the usefulness of the enhancement is greatly decreased by the fact that many database operations require a ROLLBACK in any case. Postgres in particular has operations which, once failed, the transaction is not allowed to continue:: test=> create table foo(id integer primary key); NOTICE: CREATE TABLE / PRIMARY KEY will create implicit index "foo_pkey" for table "foo" CREATE TABLE test=> begin; BEGIN test=> insert into foo values(1); INSERT 0 1 test=> commit; COMMIT test=> begin; BEGIN test=> insert into foo values(1); ERROR: duplicate key value violates unique constraint "foo_pkey" test=> insert into foo values(2); ERROR: current transaction is aborted, commands ignored until end of transaction block What SQLAlchemy offers that solves both issues is support of SAVEPOINT, via :meth:`.Session.begin_nested`. Using :meth:`.Session.begin_nested`, you can frame an operation that may potentially fail within a transaction, and then "roll back" to the point before its failure while maintaining the enclosing transaction. But why isn't the one automatic call to ROLLBACK enough? Why must I ROLLBACK again? ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ This is again a matter of the :class:`.Session` providing a consistent interface and refusing to guess about what context its being used. For example, the :class:`.Session` supports "framing" above within multiple levels. Such as, suppose you had a decorator ``@with_session()``, which did this:: def with_session(fn): def go(*args, **kw): session.begin(subtransactions=True) try: ret = fn(*args, **kw) session.commit() return ret except: session.rollback() raise return go The above decorator begins a transaction if one does not exist already, and then commits it, if it were the creator. The "subtransactions" flag means that if :meth:`.Session.begin` were already called by an enclosing function, nothing happens except a counter is incremented - this counter is decremented when :meth:`.Session.commit` is called and only when it goes back to zero does the actual COMMIT happen. It allows this usage pattern:: @with_session def one(): # do stuff two() @with_session def two(): # etc. one() two() ``one()`` can call ``two()``, or ``two()`` can be called by itself, and the ``@with_session`` decorator ensures the appropriate "framing" - the transaction boundaries stay on the outermost call level. As you can see, if ``two()`` calls ``flush()`` which throws an exception and then issues a ``rollback()``, there will *always* be a second ``rollback()`` performed by the decorator, and possibly a third corresponding to two levels of decorator. If the ``flush()`` pushed the ``rollback()`` all the way out to the top of the stack, and then we said that all remaining ``rollback()`` calls are moot, there is some silent behavior going on there. A poorly written enclosing method might suppress the exception, and then call ``commit()`` assuming nothing is wrong, and then you have a silent failure condition. The main reason people get this error in fact is because they didn't write clean "framing" code and they would have had other problems down the road. If you think the above use case is a little exotic, the same kind of thing comes into play if you want to SAVEPOINT- you might call ``begin_nested()`` several times, and the ``commit()``/``rollback()`` calls each resolve the most recent ``begin_nested()``. The meaning of ``rollback()`` or ``commit()`` is dependent upon which enclosing block it is called, and you might have any sequence of ``rollback()``/``commit()`` in any order, and its the level of nesting that determines their behavior. In both of the above cases, if ``flush()`` broke the nesting of transaction blocks, the behavior is, depending on scenario, anywhere from "magic" to silent failure to blatant interruption of code flow. ``flush()`` makes its own "subtransaction", so that a transaction is started up regardless of the external transactional state, and when complete it calls ``commit()``, or ``rollback()`` upon failure - but that ``rollback()`` corresponds to its own subtransaction - it doesn't want to guess how you'd like to handle the external "framing" of the transaction, which could be nested many levels with any combination of subtransactions and real SAVEPOINTs. The job of starting/ending the "frame" is kept consistently with the code external to the ``flush()``, and we made a decision that this was the most consistent approach. I'm inserting 400,000 rows with the ORM and it's really slow! -------------------------------------------------------------- The SQLAlchemy ORM uses the :term:`unit of work` pattern when synchronizing changes to the database. This pattern goes far beyond simple "inserts" of data. It includes that attributes which are assigned on objects are received using an attribute instrumentation system which tracks changes on objects as they are made, includes that all rows inserted are tracked in an identity map which has the effect that for each row SQLAlchemy must retrieve its "last inserted id" if not already given, and also involves that rows to be inserted are scanned and sorted for dependencies as needed. Objects are also subject to a fair degree of bookkeeping in order to keep all of this running, which for a very large number of rows at once can create an inordinate amount of time spent with large data structures, hence it's best to chunk these. Basically, unit of work is a large degree of automation in order to automate the task of persisting a complex object graph into a relational database with no explicit persistence code, and this automation has a price. ORMs are basically not intended for high-performance bulk inserts - this is the whole reason SQLAlchemy offers the Core in addition to the ORM as a first-class component. For the use case of fast bulk inserts, the SQL generation and execution system that the ORM builds on top of is part of the Core. Using this system directly, we can produce an INSERT that is competitive with using the raw database API directly. The example below illustrates time-based tests for four different methods of inserting rows, going from the most automated to the least. With cPython 2.7, runtimes observed:: classics-MacBook-Pro:sqlalchemy classic$ python test.py SQLAlchemy ORM: Total time for 100000 records 14.3528850079 secs SQLAlchemy ORM pk given: Total time for 100000 records 10.0164160728 secs SQLAlchemy Core: Total time for 100000 records 0.775382995605 secs sqlite3: Total time for 100000 records 0.676795005798 sec We can reduce the time by a factor of three using recent versions of `Pypy `_:: classics-MacBook-Pro:sqlalchemy classic$ /usr/local/src/pypy-2.1-beta2-osx64/bin/pypy test.py SQLAlchemy ORM: Total time for 100000 records 5.88369488716 secs SQLAlchemy ORM pk given: Total time for 100000 records 3.52294301987 secs SQLAlchemy Core: Total time for 100000 records 0.613556146622 secs sqlite3: Total time for 100000 records 0.442467927933 sec Script:: import time import sqlite3 from sqlalchemy.ext.declarative import declarative_base from sqlalchemy import Column, Integer, String, create_engine from sqlalchemy.orm import scoped_session, sessionmaker Base = declarative_base() DBSession = scoped_session(sessionmaker()) engine = None class Customer(Base): __tablename__ = "customer" id = Column(Integer, primary_key=True) name = Column(String(255)) def init_sqlalchemy(dbname='sqlite:///sqlalchemy.db'): global engine engine = create_engine(dbname, echo=False) DBSession.remove() DBSession.configure(bind=engine, autoflush=False, expire_on_commit=False) Base.metadata.drop_all(engine) Base.metadata.create_all(engine) def test_sqlalchemy_orm(n=100000): init_sqlalchemy() t0 = time.time() for i in range(n): customer = Customer() customer.name = 'NAME ' + str(i) DBSession.add(customer) if i % 1000 == 0: DBSession.flush() DBSession.commit() print("SQLAlchemy ORM: Total time for " + str(n) + " records " + str(time.time() - t0) + " secs") def test_sqlalchemy_orm_pk_given(n=100000): init_sqlalchemy() t0 = time.time() for i in range(n): customer = Customer(id=i+1, name="NAME " + str(i)) DBSession.add(customer) if i % 1000 == 0: DBSession.flush() DBSession.commit() print("SQLAlchemy ORM pk given: Total time for " + str(n) + " records " + str(time.time() - t0) + " secs") def test_sqlalchemy_core(n=100000): init_sqlalchemy() t0 = time.time() engine.execute( Customer.__table__.insert(), [{"name": 'NAME ' + str(i)} for i in range(n)] ) print("SQLAlchemy Core: Total time for " + str(n) + " records " + str(time.time() - t0) + " secs") def init_sqlite3(dbname): conn = sqlite3.connect(dbname) c = conn.cursor() c.execute("DROP TABLE IF EXISTS customer") c.execute("CREATE TABLE customer (id INTEGER NOT NULL, " "name VARCHAR(255), PRIMARY KEY(id))") conn.commit() return conn def test_sqlite3(n=100000, dbname='sqlite3.db'): conn = init_sqlite3(dbname) c = conn.cursor() t0 = time.time() for i in range(n): row = ('NAME ' + str(i),) c.execute("INSERT INTO customer (name) VALUES (?)", row) conn.commit() print("sqlite3: Total time for " + str(n) + " records " + str(time.time() - t0) + " sec") if __name__ == '__main__': test_sqlalchemy_orm(100000) test_sqlalchemy_orm_pk_given(100000) test_sqlalchemy_core(100000) test_sqlite3(100000) How do I make a Query that always adds a certain filter to every query? ------------------------------------------------------------------------------------------------ See the recipe at `PreFilteredQuery `_. I've created a mapping against an Outer Join, and while the query returns rows, no objects are returned. Why not? ------------------------------------------------------------------------------------------------------------------ Rows returned by an outer join may contain NULL for part of the primary key, as the primary key is the composite of both tables. The :class:`.Query` object ignores incoming rows that don't have an acceptable primary key. Based on the setting of the ``allow_partial_pks`` flag on :func:`.mapper`, a primary key is accepted if the value has at least one non-NULL value, or alternatively if the value has no NULL values. See ``allow_partial_pks`` at :func:`.mapper`. I'm using ``joinedload()`` or ``lazy=False`` to create a JOIN/OUTER JOIN and SQLAlchemy is not constructing the correct query when I try to add a WHERE, ORDER BY, LIMIT, etc. (which relies upon the (OUTER) JOIN) ----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- The joins generated by joined eager loading are only used to fully load related collections, and are designed to have no impact on the primary results of the query. Since they are anonymously aliased, they cannot be referenced directly. For detail on this beahvior, see :doc:`orm/loading`. Query has no ``__len__()``, why not? ------------------------------------ The Python ``__len__()`` magic method applied to an object allows the ``len()`` builtin to be used to determine the length of the collection. It's intuitive that a SQL query object would link ``__len__()`` to the :meth:`.Query.count` method, which emits a `SELECT COUNT`. The reason this is not possible is because evaluating the query as a list would incur two SQL calls instead of one:: class Iterates(object): def __len__(self): print "LEN!" return 5 def __iter__(self): print "ITER!" return iter([1, 2, 3, 4, 5]) list(Iterates()) output:: ITER! LEN! How Do I use Textual SQL with ORM Queries? ------------------------------------------- See: * :ref:`orm_tutorial_literal_sql` - Ad-hoc textual blocks with :class:`.Query` * :ref:`session_sql_expressions` - Using :class:`.Session` with textual SQL directly. I'm calling ``Session.delete(myobject)`` and it isn't removed from the parent collection! ------------------------------------------------------------------------------------------ See :ref:`session_deleting_from_collections` for a description of this behavior. why isnt my ``__init__()`` called when I load objects? ------------------------------------------------------ See :ref:`mapping_constructors` for a description of this behavior. how do I use ON DELETE CASCADE with SA's ORM? ---------------------------------------------- SQLAlchemy will always issue UPDATE or DELETE statements for dependent rows which are currently loaded in the :class:`.Session`. For rows which are not loaded, it will by default issue SELECT statements to load those rows and udpate/delete those as well; in other words it assumes there is no ON DELETE CASCADE configured. To configure SQLAlchemy to cooperate with ON DELETE CASCADE, see :ref:`passive_deletes`. I set the "foo_id" attribute on my instance to "7", but the "foo" attribute is still ``None`` - shouldn't it have loaded Foo with id #7? ---------------------------------------------------------------------------------------------------------------------------------------------------- The ORM is not constructed in such a way as to support immediate population of relationships driven from foreign key attribute changes - instead, it is designed to work the other way around - foreign key attributes are handled by the ORM behind the scenes, the end user sets up object relationships naturally. Therefore, the recommended way to set ``o.foo`` is to do just that - set it!:: foo = Session.query(Foo).get(7) o.foo = foo Session.commit() Manipulation of foreign key attributes is of course entirely legal. However, setting a foreign-key attribute to a new value currently does not trigger an "expire" event of the :func:`.relationship` in which it's involved (this may be implemented in the future). This means that for the following sequence:: o = Session.query(SomeClass).first() assert o.foo is None o.foo_id = 7 ``o.foo`` is loaded when we checked it for ``None``. Setting ``o.foo_id=7`` will have the value of "7" as pending, but no flush has occurred. For ``o.foo`` to load based on the foreign key mutation is usually achieved naturally after the commit, which both flushes the new foreign key value and expires all state:: Session.commit() assert o.foo is A more minimal operation is to expire the attribute individually. The :meth:`.Session.flush` is also needed if the object is pending (hasn't been INSERTed yet), or if the relationship is many-to-one prior to 0.6.5:: Session.expire(o, ['foo']) Session.flush() assert o.foo is Where above, expiring the attribute triggers a lazy load on the next access of ``o.foo``. The object does not "autoflush" on access of ``o.foo`` if the object is pending, since it is usually desirable that a pending object doesn't autoflush prematurely and/or excessively, while its state is still being populated. Also see the recipe `ExpireRelationshipOnFKChange `_, which features a mechanism to actually achieve this behavior to a reasonable degree in simple situations. Is there a way to automagically have only unique keywords (or other kinds of objects) without doing a query for the keyword and getting a reference to the row containing that keyword? --------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- When people read the many-to-many example in the docs, they get hit with the fact that if you create the same ``Keyword`` twice, it gets put in the DB twice. Which is somewhat inconvenient. This `UniqueObject `_ recipe was created to address this issue. SQLAlchemy-0.8.4/doc/build/glossary.rst0000644000076500000240000005702412251150015020472 0ustar classicstaff00000000000000:orphan: .. _glossary: ======== Glossary ======== .. note:: The Glossary is a brand new addition to the documentation. While sparse at the moment we hope to fill it up with plenty of new terms soon! .. glossary:: :sorted: annotations Annotations are a concept used internally by SQLAlchemy in order to store additional information along with :class:`.ClauseElement` objects. A Python dictionary is associated with a copy of the object, which contains key/value pairs significant to various internal systems, mostly within the ORM:: some_column = Column('some_column', Integer) some_column_annotated = some_column._annotate({"entity": User}) The annotation system differs from the public dictionary :attr:`.Column.info` in that the above annotation operation creates a *copy* of the new :class:`.Column`, rather than considering all annotation values to be part of a single unit. The ORM creates copies of expression objects in order to apply annotations that are specific to their context, such as to differentiate columns that should render themselves as relative to a joined-inheritance entity versus those which should render relative to their immediate parent table alone, as well as to differentiate columns within the "join condition" of a relationship where the column in some cases needs to be expressed in terms of one particular table alias or another, based on its position within the join expression. descriptor descriptors In Python, a descriptor is an object attribute with “binding behavior”, one whose attribute access has been overridden by methods in the `descriptor protocol `_. Those methods are __get__(), __set__(), and __delete__(). If any of those methods are defined for an object, it is said to be a descriptor. In SQLAlchemy, descriptors are used heavily in order to provide attribute behavior on mapped classes. When a class is mapped as such:: class MyClass(Base): __tablename__ = 'foo' id = Column(Integer, primary_key=True) data = Column(String) The ``MyClass`` class will be :term:`mapped` when its definition is complete, at which point the ``id`` and ``data`` attributes, starting out as :class:`.Column` objects, will be replaced by the :term:`instrumentation` system with instances of :class:`.InstrumentedAttribute`, which are descriptors that provide the above mentioned ``__get__()``, ``__set__()`` and ``__delete__()`` methods. The :class:`.InstrumentedAttribute` will generate a SQL expression when used at the class level:: >>> print MyClass.data == 5 data = :data_1 and at the instance level, keeps track of changes to values, and also :term:`lazy loads` unloaded attributes from the database:: >>> m1 = MyClass() >>> m1.id = 5 >>> m1.data = "some data" >>> from sqlalchemy import inspect >>> inspect(m1).attrs.data.history.added "some data" discriminator A result-set column which is used during :term:`polymorphic` loading to determine what kind of mapped class should be applied to a particular incoming result row. In SQLAlchemy, the classes are always part of a hierarchy mapping using inheritance mapping. .. seealso:: :ref:`inheritance_toplevel` instrumentation instrumented Instrumentation refers to the process of augmenting the functionality and attribute set of a particular class. Ideally, the behavior of the class should remain close to a regular class, except that additional behviors and features are made available. The SQLAlchemy :term:`mapping` process, among other things, adds database-enabled :term:`descriptors` to a mapped class which each represent a particular database column or relationship to a related class. identity map A mapping between Python objects and their database identities. The identity map is a collection that's associated with an ORM :term:`session` object, and maintains a single instance of every database object keyed to its identity. The advantage to this pattern is that all operations which occur for a particular database identity are transparently coordinated onto a single object instance. When using an identity map in conjunction with an :term:`isolated` transaction, having a reference to an object that's known to have a particular primary key can be considered from a practical standpoint to be a proxy to the actual database row. .. seealso:: Martin Fowler - Identity Map - http://martinfowler.com/eaaCatalog/identityMap.html lazy load lazy loads In object relational mapping, a "lazy load" refers to an attribute that does not contain its database-side value for some period of time, typically when the object is first loaded. Instead, the attribute receives a *memoization* that causes it to go out to the database and load its data when it's first used. Using this pattern, the complexity and time spent within object fetches can sometimes be reduced, in that attributes for related tables don't need to be addressed immediately. .. seealso:: `Lazy Load (on Martin Fowler) `_ :term:`N plus one problem` :doc:`orm/loading` mapping mapped We say a class is "mapped" when it has been passed through the :func:`.orm.mapper` function. This process associates the class with a database table or other :term:`selectable` construct, so that instances of it can be persisted using a :class:`.Session` as well as loaded using a :class:`.Query`. N plus one problem The N plus one problem is a common side effect of the :term:`lazy load` pattern, whereby an application wishes to iterate through a related attribute or collection on each member of a result set of objects, where that attribute or collection is set to be loaded via the lazy load pattern. The net result is that a SELECT statement is emitted to load the initial result set of parent objects; then, as the application iterates through each member, an additional SELECT statement is emitted for each member in order to load the related attribute or collection for that member. The end result is that for a result set of N parent objects, there will be N + 1 SELECT statements emitted. The N plus one problem is alleviated using :term:`eager loading`. .. seealso:: :doc:`orm/loading` polymorphic polymorphically Refers to a function that handles several types at once. In SQLAlchemy, the term is usually applied to the concept of an ORM mapped class whereby a query operation will return different subclasses based on information in the result set, typically by checking the value of a particular column in the result known as the :term:`discriminator`. Polymorphic loading in SQLAlchemy implies that a one or a combination of three different schemes are used to map a hierarchy of classes; "joined", "single", and "concrete". The section :ref:`inheritance_toplevel` describes inheritance mapping fully. generative A term that SQLAlchemy uses to refer what's normally known as :term:`method chaining`; see that term for details. method chaining An object-oriented technique whereby the state of an object is constructed by calling methods on the object. The object features any number of methods, each of which return a new object (or in some cases the same object) with additional state added to the object. The two SQLAlchemy objects that make the most use of method chaining are the :class:`~.expression.Select` object and the :class:`~.orm.query.Query` object. For example, a :class:`~.expression.Select` object can be assigned two expressions to its WHERE clause as well as an ORDER BY clause by calling upon the :meth:`~.Select.where` and :meth:`~.Select.order_by` methods:: stmt = select([user.c.name]).\ where(user.c.id > 5).\ where(user.c.name.like('e%').\ order_by(user.c.name) Each method call above returns a copy of the original :class:`~.expression.Select` object with additional qualifiers added. .. seealso:: :term:`generative` release releases released In the context of SQLAlchemy, the term "released" refers to the process of ending the usage of a particular database connection. SQLAlchemy features the usage of connection pools, which allows configurability as to the lifespan of database connections. When using a pooled connection, the process of "closing" it, i.e. invoking a statement like ``connection.close()``, may have the effect of the connection being returned to an existing pool, or it may have the effect of actually shutting down the underlying TCP/IP connection referred to by that connection - which one takes place depends on configuration as well as the current state of the pool. So we used the term *released* instead, to mean "do whatever it is you do with connections when we're done using them". The term will sometimes be used in the phrase, "release transactional resources", to indicate more explicitly that what we are actually "releasing" is any transactional state which as accumulated upon the connection. In most situations, the proces of selecting from tables, emitting updates, etc. acquires :term:`isolated` state upon that connection as well as potential row or table locks. This state is all local to a particular transaction on the connection, and is released when we emit a rollback. An important feature of the connection pool is that when we return a connection to the pool, the ``connection.rollback()`` method of the DBAPI is called as well, so that as the connection is set up to be used again, it's in a "clean" state with no references held to the previous series of operations. .. seealso:: :ref:`pooling_toplevel` DBAPI DBAPI is shorthand for the phrase "Python Database API Specification". This is a widely used specification within Python to define common usage patterns for all database connection packages. The DBAPI is a "low level" API which is typically the lowest level system used in a Python application to talk to a database. SQLAlchemy's :term:`dialect` system is constructed around the operation of the DBAPI, providing individual dialect classes which service a specific DBAPI on top of a specific database engine; for example, the :func:`.create_engine` URL ``postgresql+psycopg2://@localhost/test`` refers to the :mod:`psycopg2 <.postgresql.psycopg2>` DBAPI/dialect combination, whereas the URL ``mysql+mysqldb://@localhost/test`` refers to the :mod:`MySQL for Python <.mysql.mysqldb>` DBAPI DBAPI/dialect combination. .. seealso:: `PEP 249 - Python Database API Specification v2.0 `_ unit of work This pattern is where the system transparently keeps track of changes to objects and periodically flushes all those pending changes out to the database. SQLAlchemy's Session implements this pattern fully in a manner similar to that of Hibernate. .. seealso:: `Unit of Work by Martin Fowler `_ :doc:`orm/session` Session The container or scope for ORM database operations. Sessions load instances from the database, track changes to mapped instances and persist changes in a single unit of work when flushed. .. seealso:: :doc:`orm/session` columns clause The portion of the ``SELECT`` statement which enumerates the SQL expressions to be returned in the result set. The expressions follow the ``SELECT`` keyword directly and are a comma-separated list of individual expressions. E.g.: .. sourcecode:: sql SELECT user_account.name, user_account.email FROM user_account WHERE user_account.name = 'fred' Above, the list of columns ``user_acount.name``, ``user_account.email`` is the columns clause of the ``SELECT``. WHERE clause The portion of the ``SELECT`` statement which indicates criteria by which rows should be filtered. It is a single SQL expression which follows the keyword ``WHERE``. .. sourcecode:: sql SELECT user_account.name, user_account.email FROM user_account WHERE user_account.name = 'fred' AND user_account.status = 'E' Above, the phrase ``WHERE user_account.name = 'fred' AND user_account.status = 'E'`` comprises the WHERE clause of the ``SELECT``. FROM clause The portion of the ``SELECT`` statement which incicates the initial source of rows. A simple ``SELECT`` will feature one or more table names in its FROM clause. Multiple sources are separated by a comma: .. sourcecode:: sql SELECT user.name, address.email_address FROM user, address WHERE user.id=address.user_id The FROM clause is also where explicit joins are specified. We can rewrite the above ``SELECT`` using a single ``FROM`` element which consists of a ``JOIN`` of the two tables: .. sourcecode:: sql SELECT user.name, address.email_address FROM user JOIN address ON user.id=address.user_id subquery Refers to a ``SELECT`` statement that is embedded within an enclosing ``SELECT``. A subquery comes in two general flavors, one known as a "scalar select" which specifically must return exactly one row and one column, and the other form which acts as a "derived table" and serves as a source of rows for the FROM clause of another select. A scalar select is eligble to be placed in the :term:`WHERE clause`, :term:`columns clause`, ORDER BY clause or HAVING clause of the enclosing select, whereas the derived table form is eligible to be placed in the FROM clause of the enclosing ``SELECT``. Examples: 1. a scalar subquery placed in the :term:`columns clause` of an enclosing ``SELECT``. The subquery in this example is a :term:`correlated subquery` because part of the rows which it selects from are given via the enclosing statement. .. sourcecode:: sql SELECT id, (SELECT name FROM address WHERE address.user_id=user.id) FROM user 2. a scalar subquery placed in the :term:`WHERE clause` of an enclosing ``SELECT``. This subquery in this example is not correlated as it selects a fixed result. .. sourcecode:: sql SELECT id, name FROM user WHERE status=(SELECT status_id FROM status_code WHERE code='C') 3. a derived table subquery placed in the :term:`FROM clause` of an enclosing ``SELECT``. Such a subquery is almost always given an alias name. .. sourcecode:: sql SELECT user.id, user.name, ad_subq.email_address FROM user JOIN (select user_id, email_address FROM address WHERE address_type='Q') AS ad_subq ON user.id = ad_subq.user_id correlates correlated subquery correlated subqueries A :term:`subquery` is correlated if it depends on data in the enclosing ``SELECT``. Below, a subquery selects the aggregate value ``MIN(a.id)`` from the ``email_address`` table, such that it will be invoked for each value of ``user_account.id``, correlating the value of this column against the ``email_address.user_account_id`` column: .. sourcecode:: sql SELECT user_account.name, email_address.email FROM user_account JOIN email_address ON user_account.id=email_address.user_account_id WHERE email_address.id = ( SELECT MIN(a.id) FROM email_address AS a WHERE a.user_account_id=user_account.id ) The above subquery refers to the ``user_account`` table, which is not itself in the ``FROM`` clause of this nested query. Instead, the ``user_account`` table is recieved from the enclosing query, where each row selected from ``user_account`` results in a distinct execution of the subquery. A correlated subquery is in most cases present in the :term:`WHERE clause` or :term:`columns clause` of the immediately enclosing ``SELECT`` statement, as well as in the ORDER BY or HAVING clause. In less common cases, a correlated subquery may be present in the :term:`FROM clause` of an enclosing ``SELECT``; in these cases the correlation is typically due to the enclosing ``SELECT`` itself being enclosed in the WHERE, ORDER BY, columns or HAVING clause of another ``SELECT``, such as: .. sourcecode:: sql SELECT parent.id FROM parent WHERE EXISTS ( SELECT * FROM ( SELECT child.id AS id, child.parent_id AS parent_id, child.pos AS pos FROM child WHERE child.parent_id = parent.id ORDER BY child.pos LIMIT 3) WHERE id = 7) Correlation from one ``SELECT`` directly to one which encloses the correlated query via its ``FROM`` clause is not possible, because the correlation can only proceed once the original source rows from the enclosing statement's FROM clause are available. ACID ACID model An acronym for "Atomicity, Consistency, Isolation, Durability"; a set of properties that guarantee that database transactions are processed reliably. (via Wikipedia) .. seealso:: :term:`atomicity` :term:`consistency` :term:`isolation` :term:`durability` http://en.wikipedia.org/wiki/ACID_Model atomicity Atomicity is one of the components of the :term:`ACID` model, and requires that each transaction is "all or nothing": if one part of the transaction fails, the entire transaction fails, and the database state is left unchanged. An atomic system must guarantee atomicity in each and every situation, including power failures, errors, and crashes. (via Wikipedia) .. seealso:: :term:`ACID` http://en.wikipedia.org/wiki/Atomicity_(database_systems) consistency Consistency is one of the compoments of the :term:`ACID` model, and ensures that any transaction will bring the database from one valid state to another. Any data written to the database must be valid according to all defined rules, including but not limited to :term:`constraints`, cascades, triggers, and any combination thereof. (via Wikipedia) .. seealso:: :term:`ACID` http://en.wikipedia.org/wiki/Consistency_(database_systems) isolation isolated The isolation property of the :term:`ACID` model ensures that the concurrent execution of transactions results in a system state that would be obtained if transactions were executed serially, i.e. one after the other. Each transaction must execute in total isolation i.e. if T1 and T2 execute concurrently then each should remain independent of the other. (via Wikipedia) .. seealso:: :term:`ACID` http://en.wikipedia.org/wiki/Isolation_(database_systems) durability Durability is a property of the :term:`ACID` model which means that once a transaction has been committed, it will remain so, even in the event of power loss, crashes, or errors. In a relational database, for instance, once a group of SQL statements execute, the results need to be stored permanently (even if the database crashes immediately thereafter). (via Wikipedia) .. seealso:: :term:`ACID` http://en.wikipedia.org/wiki/Durability_(database_systems) RETURNING This is a non-SQL standard clause provided in various forms by certain backends, which provides the service of returning a result set upon execution of an INSERT, UPDATE or DELETE statement. Any set of columns from the matched rows can be returned, as though they were produced from a SELECT statement. The RETURNING clause provides both a dramatic performance boost to common update/select scenarios, including retrieval of inline- or default- generated primary key values and defaults at the moment they were created, as well as a way to get at server-generated default values in an atomic way. An example of RETURNING, idiomatic to Postgresql, looks like:: INSERT INTO user_account (name) VALUES ('new name') RETURNING id, timestamp Above, the INSERT statement will provide upon execution a result set which includes the values of the columns ``user_account.id`` and ``user_account.timestamp``, which above should have been generated as default values as they are not included otherwise (but note any series of columns or SQL expressions can be placed into RETURNING, not just default-value columns). The backends that currently support RETURNING or a similar construct are Postgresql, SQL Server, Oracle, and Firebird. The Postgresql and Firebird implementations are generally full featured, whereas the implementations of SQL Server and Oracle have caveats. On SQL Server, the clause is known as "OUTPUT INSERTED" for INSERT and UPDATE statements and "OUTPUT DELETED" for DELETE statements; the key caveat is that triggers are not supported in conjunction with this keyword. On Oracle, it is known as "RETURNING...INTO", and requires that the value be placed into an OUT paramter, meaning not only is the syntax awkward, but it can also only be used for one row at a time. SQLAlchemy's :meth:`.UpdateBase.returning` system provides a layer of abstraction on top of the RETURNING systems of these backends to provide a consistent interface for returning columns. The ORM also includes many optimizations that make use of RETURNING when available. SQLAlchemy-0.8.4/doc/build/index.rst0000644000076500000240000000732212251150015017732 0ustar classicstaff00000000000000:orphan: .. _index_toplevel: ======================== SQLAlchemy Documentation ======================== Getting Started =============== A high level view and getting set up. :ref:`Overview ` | :ref:`Installation Guide ` | :doc:`Frequently Asked Questions ` | :doc:`Migration from 0.7 ` | :doc:`Glossary ` | :doc:`Changelog catalog ` SQLAlchemy ORM ============== Here, the Object Relational Mapper is introduced and fully described. If you want to work with higher-level SQL which is constructed automatically for you, as well as automated persistence of Python objects, proceed first to the tutorial. * **Read this first:** :doc:`orm/tutorial` * **ORM Configuration:** :doc:`Mapper Configuration ` | :doc:`Relationship Configuration ` | :doc:`Inheritance Mapping ` | :doc:`Advanced Collection Configuration ` * **Configuration Extensions:** :doc:`Declarative Extension ` | :doc:`Association Proxy ` | :doc:`Hybrid Attributes ` | :doc:`Mutable Scalars ` | :doc:`Ordered List ` * **ORM Usage:** :doc:`Session Usage and Guidelines ` | :doc:`Query API reference ` | :doc:`Relationship Loading Techniques ` * **Extending the ORM:** :doc:`ORM Event Interfaces ` | :doc:`Internals API ` * **Other:** :doc:`Introduction to Examples ` | :doc:`Deprecated Event Interfaces ` | :doc:`ORM Exceptions ` | :doc:`Horizontal Sharding ` | :doc:`Alternate Instrumentation ` SQLAlchemy Core =============== The breadth of SQLAlchemy's SQL rendering engine, DBAPI integration, transaction integration, and schema description services are documented here. In contrast to the ORM's domain-centric mode of usage, the SQL Expression Language provides a schema-centric usage paradigm. * **Read this first:** :doc:`core/tutorial` * **All the Built In SQL:** :doc:`SQL Expression API ` * **Engines, Connections, Pools:** :doc:`Engine Configuration ` | :doc:`Connections, Transactions ` | :doc:`Connection Pooling ` * **Schema Definition:** :ref:`Tables and Columns ` | :ref:`Database Introspection (Reflection) ` | :ref:`Insert/Update Defaults ` | :ref:`Constraints and Indexes ` | :ref:`Using Data Definition Language (DDL) ` * **Datatypes:** :ref:`Overview ` | :ref:`Generic Types ` | :ref:`SQL Standard Types ` | :ref:`Vendor Specific Types ` | :ref:`Building Custom Types ` | :ref:`Defining New Operators ` | :ref:`API ` * **Extending the Core:** :doc:`SQLAlchemy Events ` | :doc:`Core Event Interfaces ` | :doc:`Creating Custom SQL Constructs ` | :doc:`Internals API ` * **Other:** :doc:`Runtime Inspection API ` | :doc:`core/interfaces` | :doc:`core/exceptions` Dialect Documentation ====================== The **dialect** is the system SQLAlchemy uses to communicate with various types of DBAPIs and databases. This section describes notes, options, and usage patterns regarding individual dialects. :doc:`Index of all Dialects ` SQLAlchemy-0.8.4/doc/build/intro.rst0000644000076500000240000001576312251150015017766 0ustar classicstaff00000000000000.. _overview_toplevel: ======== Overview ======== .. _overview: Overview ======== The SQLAlchemy SQL Toolkit and Object Relational Mapper is a comprehensive set of tools for working with databases and Python. It has several distinct areas of functionality which can be used individually or combined together. Its major components are illustrated in below, with component dependencies organized into layers: .. image:: sqla_arch_small.png Above, the two most significant front-facing portions of SQLAlchemy are the **Object Relational Mapper** and the **SQL Expression Language**. SQL Expressions can be used independently of the ORM. When using the ORM, the SQL Expression language remains part of the public facing API as it is used within object-relational configurations and queries. .. _doc_overview: Documentation Overview ====================== The documentation is separated into three sections: :ref:`orm_toplevel`, :ref:`core_toplevel`, and :ref:`dialect_toplevel`. In :ref:`orm_toplevel`, the Object Relational Mapper is introduced and fully described. New users should begin with the :ref:`ormtutorial_toplevel`. If you want to work with higher-level SQL which is constructed automatically for you, as well as management of Python objects, proceed to this tutorial. In :ref:`core_toplevel`, the breadth of SQLAlchemy's SQL and database integration and description services are documented, the core of which is the SQL Expression language. The SQL Expression Language is a toolkit all its own, independent of the ORM package, which can be used to construct manipulable SQL expressions which can be programmatically constructed, modified, and executed, returning cursor-like result sets. In contrast to the ORM's domain-centric mode of usage, the expression language provides a schema-centric usage paradigm. New users should begin here with :ref:`sqlexpression_toplevel`. SQLAlchemy engine, connection, and pooling services are also described in :ref:`core_toplevel`. In :ref:`dialect_toplevel`, reference documentation for all provided database and DBAPI backends is provided. Code Examples ============= Working code examples, mostly regarding the ORM, are included in the SQLAlchemy distribution. A description of all the included example applications is at :ref:`examples_toplevel`. There is also a wide variety of examples involving both core SQLAlchemy constructs as well as the ORM on the wiki. See `Theatrum Chemicum `_. .. _installation: Installation Guide ================== Supported Platforms ------------------- SQLAlchemy has been tested against the following platforms: * cPython since version 2.5, through the 2.xx series * cPython version 3, throughout all 3.xx series * `Jython `_ 2.5 or greater * `Pypy `_ 1.5 or greater .. versionchanged:: 0.8 Python 2.5 is now the minimum Python version supported. Supported Installation Methods ------------------------------- SQLAlchemy supports installation using standard Python "distutils" or "setuptools" methodologies. An overview of potential setups is as follows: * **Plain Python Distutils** - SQLAlchemy can be installed with a clean Python install using the services provided via `Python Distutils `_, using the ``setup.py`` script. The C extensions as well as Python 3 builds are supported. * **Standard Setuptools** - When using `setuptools `_, SQLAlchemy can be installed via ``setup.py`` or ``easy_install``, and the C extensions are supported. setuptools is not supported on Python 3 at the time of this writing. * **Distribute** - With `distribute `_, SQLAlchemy can be installed via ``setup.py`` or ``easy_install``, and the C extensions as well as Python 3 builds are supported. * **pip** - `pip `_ is an installer that rides on top of ``setuptools`` or ``distribute``, replacing the usage of ``easy_install``. It is often preferred for its simpler mode of usage. Install via easy_install or pip ------------------------------- When ``easy_install`` or ``pip`` is available, the distribution can be downloaded from Pypi and installed in one step:: easy_install SQLAlchemy Or with pip:: pip install SQLAlchemy This command will download the latest version of SQLAlchemy from the `Python Cheese Shop `_ and install it to your system. Installing using setup.py ---------------------------------- Otherwise, you can install from the distribution using the ``setup.py`` script:: python setup.py install Installing the C Extensions ---------------------------------- SQLAlchemy includes C extensions which provide an extra speed boost for dealing with result sets. Currently, the extensions are only supported on the 2.xx series of cPython, not Python 3 or Pypy. setup.py will automatically build the extensions if an appropriate platform is detected. If the build of the C extensions fails, due to missing compiler or other issue, the setup process will output a warning message, and re-run the build without the C extensions, upon completion reporting final status. To run the build/install without even attempting to compile the C extensions, pass the flag ``--without-cextensions`` to the ``setup.py`` script:: python setup.py --without-cextensions install Or with pip:: pip install --global-option='--without-cextensions' SQLAlchemy .. note:: The ``--without-cextensions`` flag is available **only** if ``setuptools`` or ``distribute`` is installed. It is not available on a plain Python ``distutils`` installation. The library will still install without the C extensions if they cannot be built, however. Installing on Python 3 ---------------------------------- SQLAlchemy ships as Python 2 code. For Python 3 usage, the ``setup.py`` script will invoke the Python ``2to3`` tool on the build, plugging in an extra "preprocessor" as well. The 2to3 step works with Python distutils (part of the standard Python install) and Distribute - it will **not** work with a non-Distribute setuptools installation. Installing a Database API ---------------------------------- SQLAlchemy is designed to operate with a :term:`DBAPI` implementation built for a particular database, and includes support for the most popular databases. The individual database sections in :doc:`/dialects/index` enumerate the available DBAPIs for each database, including external links. Checking the Installed SQLAlchemy Version ------------------------------------------ This documentation covers SQLAlchemy version 0.8. If you're working on a system that already has SQLAlchemy installed, check the version from your Python prompt like this: .. sourcecode:: python+sql >>> import sqlalchemy >>> sqlalchemy.__version__ # doctest: +SKIP 0.8.0 .. _migration: 0.7 to 0.8 Migration ===================== Notes on what's changed from 0.7 to 0.8 is available here at :doc:`changelog/migration_08`. SQLAlchemy-0.8.4/doc/build/Makefile0000644000076500000240000001216712251147171017545 0ustar classicstaff00000000000000# Makefile for Sphinx documentation # # You can set these variables from the command line. SPHINXOPTS = SPHINXBUILD = sphinx-build PAPER = BUILDDIR = output # Internal variables. PAPEROPT_a4 = -D latex_paper_size=a4 PAPEROPT_letter = -D latex_paper_size=letter ALLSPHINXOPTS = -d $(BUILDDIR)/doctrees $(PAPEROPT_$(PAPER)) $(SPHINXOPTS) . # the i18n builder cannot share the environment and doctrees with the others I18NSPHINXOPTS = $(PAPEROPT_$(PAPER)) $(SPHINXOPTS) . .PHONY: help clean html dirhtml singlehtml pickle json htmlhelp qthelp devhelp epub latex latexpdf text man changes linkcheck doctest dist-html site-mako gettext help: @echo "Please use \`make ' where is one of" @echo " html to make standalone HTML files" @echo " gettext to make PO message catalogs" @echo " dist-html same as html, but places files in /doc" @echo " dirhtml to make HTML files named index.html in directories" @echo " singlehtml to make a single large HTML file" @echo " pickle to make pickle files" @echo " json to make JSON files" @echo " htmlhelp to make HTML files and a HTML help project" @echo " qthelp to make HTML files and a qthelp project" @echo " devhelp to make HTML files and a Devhelp project" @echo " epub to make an epub" @echo " latex to make LaTeX files, you can set PAPER=a4 or PAPER=letter" @echo " latexpdf to make LaTeX files and run them through pdflatex" @echo " text to make text files" @echo " man to make manual pages" @echo " changes to make an overview of all changed/added/deprecated items" @echo " linkcheck to check all external links for integrity" @echo " doctest to run all doctests embedded in the documentation (if enabled)" clean: -rm -rf $(BUILDDIR)/* html: $(SPHINXBUILD) -b html -A mako_layout=html $(ALLSPHINXOPTS) $(BUILDDIR)/html @echo @echo "Build finished. The HTML pages are in $(BUILDDIR)/html." gettext: $(SPHINXBUILD) -b gettext $(I18NSPHINXOPTS) $(BUILDDIR)/locale @echo @echo "Build finished. The message catalogs are in $(BUILDDIR)/locale." dist-html: $(SPHINXBUILD) -b html -A mako_layout=html $(ALLSPHINXOPTS) .. @echo @echo "Build finished. The HTML pages are in ../." dirhtml: $(SPHINXBUILD) -b dirhtml $(ALLSPHINXOPTS) $(BUILDDIR)/dirhtml @echo @echo "Build finished. The HTML pages are in $(BUILDDIR)/dirhtml." singlehtml: $(SPHINXBUILD) -b singlehtml $(ALLSPHINXOPTS) $(BUILDDIR)/singlehtml @echo @echo "Build finished. The HTML page is in $(BUILDDIR)/singlehtml." pickle: $(SPHINXBUILD) -b pickle $(ALLSPHINXOPTS) $(BUILDDIR)/pickle @echo @echo "Build finished; now you can process the pickle files." json: $(SPHINXBUILD) -b json $(ALLSPHINXOPTS) $(BUILDDIR)/json @echo @echo "Build finished; now you can process the JSON files." htmlhelp: $(SPHINXBUILD) -b htmlhelp $(ALLSPHINXOPTS) $(BUILDDIR)/htmlhelp @echo @echo "Build finished; now you can run HTML Help Workshop with the" \ ".hhp project file in $(BUILDDIR)/htmlhelp." qthelp: $(SPHINXBUILD) -b qthelp $(ALLSPHINXOPTS) $(BUILDDIR)/qthelp @echo @echo "Build finished; now you can run "qcollectiongenerator" with the" \ ".qhcp project file in $(BUILDDIR)/qthelp, like this:" @echo "# qcollectiongenerator $(BUILDDIR)/qthelp/SQLAlchemy.qhcp" @echo "To view the help file:" @echo "# assistant -collectionFile $(BUILDDIR)/qthelp/SQLAlchemy.qhc" devhelp: $(SPHINXBUILD) -b devhelp $(ALLSPHINXOPTS) $(BUILDDIR)/devhelp @echo @echo "Build finished." @echo "To view the help file:" @echo "# mkdir -p $$HOME/.local/share/devhelp/SQLAlchemy" @echo "# ln -s $(BUILDDIR)/devhelp $$HOME/.local/share/devhelp/SQLAlchemy" @echo "# devhelp" epub: $(SPHINXBUILD) -b epub $(ALLSPHINXOPTS) $(BUILDDIR)/epub @echo @echo "Build finished. The epub file is in $(BUILDDIR)/epub." latex: $(SPHINXBUILD) -b latex $(ALLSPHINXOPTS) $(BUILDDIR)/latex cp texinputs/* $(BUILDDIR)/latex/ @echo @echo "Build finished; the LaTeX files are in $(BUILDDIR)/latex." @echo "Run \`make' in that directory to run these through (pdf)latex" \ "(use \`make latexpdf' here to do that automatically)." latexpdf: $(SPHINXBUILD) -b latex $(ALLSPHINXOPTS) $(BUILDDIR)/latex cp texinputs/* $(BUILDDIR)/latex/ @echo "Running LaTeX files through pdflatex..." make -C $(BUILDDIR)/latex all-pdf @echo "pdflatex finished; the PDF files are in $(BUILDDIR)/latex." text: $(SPHINXBUILD) -b text $(ALLSPHINXOPTS) $(BUILDDIR)/text @echo @echo "Build finished. The text files are in $(BUILDDIR)/text." man: $(SPHINXBUILD) -b man $(ALLSPHINXOPTS) $(BUILDDIR)/man @echo @echo "Build finished. The manual pages are in $(BUILDDIR)/man." changes: $(SPHINXBUILD) -b changes $(ALLSPHINXOPTS) $(BUILDDIR)/changes @echo @echo "The overview file is in $(BUILDDIR)/changes." linkcheck: $(SPHINXBUILD) -b linkcheck $(ALLSPHINXOPTS) $(BUILDDIR)/linkcheck @echo @echo "Link check complete; look for any errors in the above output " \ "or in $(BUILDDIR)/linkcheck/output.txt." doctest: $(SPHINXBUILD) -b doctest $(ALLSPHINXOPTS) . @echo "Testing of doctests in the sources finished, look at the " \ "results in $(BUILDDIR)/doctest/output.txt." SQLAlchemy-0.8.4/doc/build/orm/0000755000076500000240000000000012251151573016674 5ustar classicstaff00000000000000SQLAlchemy-0.8.4/doc/build/orm/collections.rst0000644000076500000240000005331412251147171021751 0ustar classicstaff00000000000000.. _collections_toplevel: .. currentmodule:: sqlalchemy.orm ======================================= Collection Configuration and Techniques ======================================= The :func:`.relationship` function defines a linkage between two classes. When the linkage defines a one-to-many or many-to-many relationship, it's represented as a Python collection when objects are loaded and manipulated. This section presents additional information about collection configuration and techniques. .. _largecollections: .. currentmodule:: sqlalchemy.orm Working with Large Collections =============================== The default behavior of :func:`.relationship` is to fully load the collection of items in, as according to the loading strategy of the relationship. Additionally, the :class:`.Session` by default only knows how to delete objects which are actually present within the session. When a parent instance is marked for deletion and flushed, the :class:`.Session` loads its full list of child items in so that they may either be deleted as well, or have their foreign key value set to null; this is to avoid constraint violations. For large collections of child items, there are several strategies to bypass full loading of child items both at load time as well as deletion time. .. _dynamic_relationship: Dynamic Relationship Loaders ----------------------------- A key feature to enable management of a large collection is the so-called "dynamic" relationship. This is an optional form of :func:`~sqlalchemy.orm.relationship` which returns a :class:`~sqlalchemy.orm.query.Query` object in place of a collection when accessed. :func:`~sqlalchemy.orm.query.Query.filter` criterion may be applied as well as limits and offsets, either explicitly or via array slices:: class User(Base): __tablename__ = 'user' posts = relationship(Post, lazy="dynamic") jack = session.query(User).get(id) # filter Jack's blog posts posts = jack.posts.filter(Post.headline=='this is a post') # apply array slices posts = jack.posts[5:20] The dynamic relationship supports limited write operations, via the ``append()`` and ``remove()`` methods:: oldpost = jack.posts.filter(Post.headline=='old post').one() jack.posts.remove(oldpost) jack.posts.append(Post('new post')) Since the read side of the dynamic relationship always queries the database, changes to the underlying collection will not be visible until the data has been flushed. However, as long as "autoflush" is enabled on the :class:`.Session` in use, this will occur automatically each time the collection is about to emit a query. To place a dynamic relationship on a backref, use the :func:`~.orm.backref` function in conjunction with ``lazy='dynamic'``:: class Post(Base): __table__ = posts_table user = relationship(User, backref=backref('posts', lazy='dynamic') ) Note that eager/lazy loading options cannot be used in conjunction dynamic relationships at this time. .. note:: The :func:`~.orm.dynamic_loader` function is essentially the same as :func:`~.orm.relationship` with the ``lazy='dynamic'`` argument specified. .. warning:: The "dynamic" loader applies to **collections only**. It is not valid to use "dynamic" loaders with many-to-one, one-to-one, or uselist=False relationships. Newer versions of SQLAlchemy emit warnings or exceptions in these cases. Setting Noload --------------- A "noload" relationship never loads from the database, even when accessed. It is configured using ``lazy='noload'``:: class MyClass(Base): __tablename__ = 'some_table' children = relationship(MyOtherClass, lazy='noload') Above, the ``children`` collection is fully writeable, and changes to it will be persisted to the database as well as locally available for reading at the time they are added. However when instances of ``MyClass`` are freshly loaded from the database, the ``children`` collection stays empty. .. _passive_deletes: Using Passive Deletes ---------------------- Use ``passive_deletes=True`` to disable child object loading on a DELETE operation, in conjunction with "ON DELETE (CASCADE|SET NULL)" on your database to automatically cascade deletes to child objects:: class MyClass(Base): __tablename__ = 'mytable' id = Column(Integer, primary_key=True) children = relationship("MyOtherClass", cascade="all, delete-orphan", passive_deletes=True) class MyOtherClass(Base): __tablename__ = 'myothertable' id = Column(Integer, primary_key=True) parent_id = Column(Integer, ForeignKey('mytable.id', ondelete='CASCADE') ) .. note:: To use "ON DELETE CASCADE", the underlying database engine must support foreign keys. * When using MySQL, an appropriate storage engine must be selected. See :ref:`mysql_storage_engines` for details. * When using SQLite, foreign key support must be enabled explicitly. See :ref:`sqlite_foreign_keys` for details. When ``passive_deletes`` is applied, the ``children`` relationship will not be loaded into memory when an instance of ``MyClass`` is marked for deletion. The ``cascade="all, delete-orphan"`` *will* take effect for instances of ``MyOtherClass`` which are currently present in the session; however for instances of ``MyOtherClass`` which are not loaded, SQLAlchemy assumes that "ON DELETE CASCADE" rules will ensure that those rows are deleted by the database. .. currentmodule:: sqlalchemy.orm.collections .. _custom_collections: Customizing Collection Access ============================= Mapping a one-to-many or many-to-many relationship results in a collection of values accessible through an attribute on the parent instance. By default, this collection is a ``list``:: class Parent(Base): __tablename__ = 'parent' parent_id = Column(Integer, primary_key=True) children = relationship(Child) parent = Parent() parent.children.append(Child()) print parent.children[0] Collections are not limited to lists. Sets, mutable sequences and almost any other Python object that can act as a container can be used in place of the default list, by specifying the ``collection_class`` option on :func:`~sqlalchemy.orm.relationship`:: class Parent(Base): __tablename__ = 'parent' parent_id = Column(Integer, primary_key=True) # use a set children = relationship(Child, collection_class=set) parent = Parent() child = Child() parent.children.add(child) assert child in parent.children Dictionary Collections ----------------------- A little extra detail is needed when using a dictionary as a collection. This because objects are always loaded from the database as lists, and a key-generation strategy must be available to populate the dictionary correctly. The :func:`.attribute_mapped_collection` function is by far the most common way to achieve a simple dictionary collection. It produces a dictionary class that will apply a particular attribute of the mapped class as a key. Below we map an ``Item`` class containing a dictionary of ``Note`` items keyed to the ``Note.keyword`` attribute:: from sqlalchemy import Column, Integer, String, ForeignKey from sqlalchemy.orm import relationship from sqlalchemy.orm.collections import attribute_mapped_collection from sqlalchemy.ext.declarative import declarative_base Base = declarative_base() class Item(Base): __tablename__ = 'item' id = Column(Integer, primary_key=True) notes = relationship("Note", collection_class=attribute_mapped_collection('keyword'), cascade="all, delete-orphan") class Note(Base): __tablename__ = 'note' id = Column(Integer, primary_key=True) item_id = Column(Integer, ForeignKey('item.id'), nullable=False) keyword = Column(String) text = Column(String) def __init__(self, keyword, text): self.keyword = keyword self.text = text ``Item.notes`` is then a dictionary:: >>> item = Item() >>> item.notes['a'] = Note('a', 'atext') >>> item.notes.items() {'a': <__main__.Note object at 0x2eaaf0>} :func:`.attribute_mapped_collection` will ensure that the ``.keyword`` attribute of each ``Note`` complies with the key in the dictionary. Such as, when assigning to ``Item.notes``, the dictionary key we supply must match that of the actual ``Note`` object:: item = Item() item.notes = { 'a': Note('a', 'atext'), 'b': Note('b', 'btext') } The attribute which :func:`.attribute_mapped_collection` uses as a key does not need to be mapped at all! Using a regular Python ``@property`` allows virtually any detail or combination of details about the object to be used as the key, as below when we establish it as a tuple of ``Note.keyword`` and the first ten letters of the ``Note.text`` field:: class Item(Base): __tablename__ = 'item' id = Column(Integer, primary_key=True) notes = relationship("Note", collection_class=attribute_mapped_collection('note_key'), backref="item", cascade="all, delete-orphan") class Note(Base): __tablename__ = 'note' id = Column(Integer, primary_key=True) item_id = Column(Integer, ForeignKey('item.id'), nullable=False) keyword = Column(String) text = Column(String) @property def note_key(self): return (self.keyword, self.text[0:10]) def __init__(self, keyword, text): self.keyword = keyword self.text = text Above we added a ``Note.item`` backref. Assigning to this reverse relationship, the ``Note`` is added to the ``Item.notes`` dictionary and the key is generated for us automatically:: >>> item = Item() >>> n1 = Note("a", "atext") >>> n1.item = item >>> item.notes {('a', 'atext'): <__main__.Note object at 0x2eaaf0>} Other built-in dictionary types include :func:`.column_mapped_collection`, which is almost like :func:`.attribute_mapped_collection` except given the :class:`.Column` object directly:: from sqlalchemy.orm.collections import column_mapped_collection class Item(Base): __tablename__ = 'item' id = Column(Integer, primary_key=True) notes = relationship("Note", collection_class=column_mapped_collection(Note.__table__.c.keyword), cascade="all, delete-orphan") as well as :func:`.mapped_collection` which is passed any callable function. Note that it's usually easier to use :func:`.attribute_mapped_collection` along with a ``@property`` as mentioned earlier:: from sqlalchemy.orm.collections import mapped_collection class Item(Base): __tablename__ = 'item' id = Column(Integer, primary_key=True) notes = relationship("Note", collection_class=mapped_collection(lambda note: note.text[0:10]), cascade="all, delete-orphan") Dictionary mappings are often combined with the "Association Proxy" extension to produce streamlined dictionary views. See :ref:`proxying_dictionaries` and :ref:`composite_association_proxy` for examples. .. autofunction:: attribute_mapped_collection .. autofunction:: column_mapped_collection .. autofunction:: mapped_collection Custom Collection Implementations ================================== You can use your own types for collections as well. In simple cases, inherting from ``list`` or ``set``, adding custom behavior, is all that's needed. In other cases, special decorators are needed to tell SQLAlchemy more detail about how the collection operates. .. topic:: Do I need a custom collection implementation? In most cases not at all! The most common use cases for a "custom" collection is one that validates or marshals incoming values into a new form, such as a string that becomes a class instance, or one which goes a step beyond and represents the data internally in some fashion, presenting a "view" of that data on the outside of a different form. For the first use case, the :func:`.orm.validates` decorator is by far the simplest way to intercept incoming values in all cases for the purposes of validation and simple marshaling. See :ref:`simple_validators` for an example of this. For the second use case, the :ref:`associationproxy_toplevel` extension is a well-tested, widely used system that provides a read/write "view" of a collection in terms of some attribute present on the target object. As the target attribute can be a ``@property`` that returns virtually anything, a wide array of "alternative" views of a collection can be constructed with just a few functions. This approach leaves the underlying mapped collection unaffected and avoids the need to carefully tailor collection behavior on a method-by-method basis. Customized collections are useful when the collection needs to have special behaviors upon access or mutation operations that can't otherwise be modeled externally to the collection. They can of course be combined with the above two approaches. Collections in SQLAlchemy are transparently *instrumented*. Instrumentation means that normal operations on the collection are tracked and result in changes being written to the database at flush time. Additionally, collection operations can fire *events* which indicate some secondary operation must take place. Examples of a secondary operation include saving the child item in the parent's :class:`~sqlalchemy.orm.session.Session` (i.e. the ``save-update`` cascade), as well as synchronizing the state of a bi-directional relationship (i.e. a :func:`.backref`). The collections package understands the basic interface of lists, sets and dicts and will automatically apply instrumentation to those built-in types and their subclasses. Object-derived types that implement a basic collection interface are detected and instrumented via duck-typing: .. sourcecode:: python+sql class ListLike(object): def __init__(self): self.data = [] def append(self, item): self.data.append(item) def remove(self, item): self.data.remove(item) def extend(self, items): self.data.extend(items) def __iter__(self): return iter(self.data) def foo(self): return 'foo' ``append``, ``remove``, and ``extend`` are known list-like methods, and will be instrumented automatically. ``__iter__`` is not a mutator method and won't be instrumented, and ``foo`` won't be either. Duck-typing (i.e. guesswork) isn't rock-solid, of course, so you can be explicit about the interface you are implementing by providing an ``__emulates__`` class attribute:: class SetLike(object): __emulates__ = set def __init__(self): self.data = set() def append(self, item): self.data.add(item) def remove(self, item): self.data.remove(item) def __iter__(self): return iter(self.data) This class looks list-like because of ``append``, but ``__emulates__`` forces it to set-like. ``remove`` is known to be part of the set interface and will be instrumented. But this class won't work quite yet: a little glue is needed to adapt it for use by SQLAlchemy. The ORM needs to know which methods to use to append, remove and iterate over members of the collection. When using a type like ``list`` or ``set``, the appropriate methods are well-known and used automatically when present. This set-like class does not provide the expected ``add`` method, so we must supply an explicit mapping for the ORM via a decorator. Annotating Custom Collections via Decorators -------------------------------------------- Decorators can be used to tag the individual methods the ORM needs to manage collections. Use them when your class doesn't quite meet the regular interface for its container type, or when you otherwise would like to use a different method to get the job done. .. sourcecode:: python+sql from sqlalchemy.orm.collections import collection class SetLike(object): __emulates__ = set def __init__(self): self.data = set() @collection.appender def append(self, item): self.data.add(item) def remove(self, item): self.data.remove(item) def __iter__(self): return iter(self.data) And that's all that's needed to complete the example. SQLAlchemy will add instances via the ``append`` method. ``remove`` and ``__iter__`` are the default methods for sets and will be used for removing and iteration. Default methods can be changed as well: .. sourcecode:: python+sql from sqlalchemy.orm.collections import collection class MyList(list): @collection.remover def zark(self, item): # do something special... @collection.iterator def hey_use_this_instead_for_iteration(self): # ... There is no requirement to be list-, or set-like at all. Collection classes can be any shape, so long as they have the append, remove and iterate interface marked for SQLAlchemy's use. Append and remove methods will be called with a mapped entity as the single argument, and iterator methods are called with no arguments and must return an iterator. .. autoclass:: collection :members: .. _dictionary_collections: Custom Dictionary-Based Collections ----------------------------------- The :class:`.MappedCollection` class can be used as a base class for your custom types or as a mix-in to quickly add ``dict`` collection support to other classes. It uses a keying function to delegate to ``__setitem__`` and ``__delitem__``: .. sourcecode:: python+sql from sqlalchemy.util import OrderedDict from sqlalchemy.orm.collections import MappedCollection class NodeMap(OrderedDict, MappedCollection): """Holds 'Node' objects, keyed by the 'name' attribute with insert order maintained.""" def __init__(self, *args, **kw): MappedCollection.__init__(self, keyfunc=lambda node: node.name) OrderedDict.__init__(self, *args, **kw) When subclassing :class:`.MappedCollection`, user-defined versions of ``__setitem__()`` or ``__delitem__()`` should be decorated with :meth:`.collection.internally_instrumented`, **if** they call down to those same methods on :class:`.MappedCollection`. This because the methods on :class:`.MappedCollection` are already instrumented - calling them from within an already instrumented call can cause events to be fired off repeatedly, or inappropriately, leading to internal state corruption in rare cases:: from sqlalchemy.orm.collections import MappedCollection,\ collection class MyMappedCollection(MappedCollection): """Use @internally_instrumented when your methods call down to already-instrumented methods. """ @collection.internally_instrumented def __setitem__(self, key, value, _sa_initiator=None): # do something with key, value super(MyMappedCollection, self).__setitem__(key, value, _sa_initiator) @collection.internally_instrumented def __delitem__(self, key, _sa_initiator=None): # do something with key super(MyMappedCollection, self).__delitem__(key, _sa_initiator) The ORM understands the ``dict`` interface just like lists and sets, and will automatically instrument all dict-like methods if you choose to subclass ``dict`` or provide dict-like collection behavior in a duck-typed class. You must decorate appender and remover methods, however- there are no compatible methods in the basic dictionary interface for SQLAlchemy to use by default. Iteration will go through ``itervalues()`` unless otherwise decorated. .. note:: Due to a bug in MappedCollection prior to version 0.7.6, this workaround usually needs to be called before a custom subclass of :class:`.MappedCollection` which uses :meth:`.collection.internally_instrumented` can be used:: from sqlalchemy.orm.collections import _instrument_class, MappedCollection _instrument_class(MappedCollection) This will ensure that the :class:`.MappedCollection` has been properly initialized with custom ``__setitem__()`` and ``__delitem__()`` methods before used in a custom subclass. .. autoclass:: sqlalchemy.orm.collections.MappedCollection :members: Instrumentation and Custom Types -------------------------------- Many custom types and existing library classes can be used as a entity collection type as-is without further ado. However, it is important to note that the instrumentation process will modify the type, adding decorators around methods automatically. The decorations are lightweight and no-op outside of relationships, but they do add unneeded overhead when triggered elsewhere. When using a library class as a collection, it can be good practice to use the "trivial subclass" trick to restrict the decorations to just your usage in relationships. For example: .. sourcecode:: python+sql class MyAwesomeList(some.great.library.AwesomeList): pass # ... relationship(..., collection_class=MyAwesomeList) The ORM uses this approach for built-ins, quietly substituting a trivial subclass when a ``list``, ``set`` or ``dict`` is used directly. Collection Internals ===================== Various internal methods. .. autofunction:: bulk_replace .. autoclass:: collection .. autofunction:: collection_adapter .. autoclass:: CollectionAdapter .. autoclass:: InstrumentedDict .. autoclass:: InstrumentedList .. autoclass:: InstrumentedSet .. autofunction:: prepare_instrumentation SQLAlchemy-0.8.4/doc/build/orm/deprecated.rst0000644000076500000240000000155512251147171021533 0ustar classicstaff00000000000000:orphan: .. _dep_interfaces_orm_toplevel: Deprecated ORM Event Interfaces ================================ .. module:: sqlalchemy.orm.interfaces This section describes the class-based ORM event interface which first existed in SQLAlchemy 0.1, which progressed with more kinds of events up until SQLAlchemy 0.5. The non-ORM analogue is described at :ref:`dep_interfaces_core_toplevel`. .. deprecated:: 0.7 As of SQLAlchemy 0.7, the new event system described in :ref:`event_toplevel` replaces the extension/proxy/listener system, providing a consistent interface to all events without the need for subclassing. Mapper Events ----------------- .. autoclass:: MapperExtension :members: Session Events ----------------- .. autoclass:: SessionExtension :members: Attribute Events -------------------- .. autoclass:: AttributeExtension :members: SQLAlchemy-0.8.4/doc/build/orm/events.rst0000644000076500000240000000163112251150015020721 0ustar classicstaff00000000000000.. _orm_event_toplevel: ORM Events ========== The ORM includes a wide variety of hooks available for subscription. .. versionadded:: 0.7 The event supercedes the previous system of "extension" classes. For an introduction to the event API, see :ref:`event_toplevel`. Non-ORM events such as those regarding connections and low-level statement execution are described in :ref:`core_event_toplevel`. Attribute Events ---------------- .. autoclass:: sqlalchemy.orm.events.AttributeEvents :members: Mapper Events --------------- .. autoclass:: sqlalchemy.orm.events.MapperEvents :members: Instance Events --------------- .. autoclass:: sqlalchemy.orm.events.InstanceEvents :members: Session Events -------------- .. autoclass:: sqlalchemy.orm.events.SessionEvents :members: Instrumentation Events ----------------------- .. autoclass:: sqlalchemy.orm.events.InstrumentationEvents :members: SQLAlchemy-0.8.4/doc/build/orm/examples.rst0000644000076500000240000000461012251150015021233 0ustar classicstaff00000000000000.. _examples_toplevel: Examples ======== The SQLAlchemy distribution includes a variety of code examples illustrating a select set of patterns, some typical and some not so typical. All are runnable and can be found in the ``/examples`` directory of the distribution. Each example contains a README in its ``__init__.py`` file, each of which are listed below. Additional SQLAlchemy examples, some user contributed, are available on the wiki at ``_. .. _examples_adjacencylist: Adjacency List -------------- Location: /examples/adjacency_list/ .. automodule:: adjacency_list .. _examples_associations: Associations ------------ Location: /examples/association/ .. automodule:: association .. _examples_instrumentation: Attribute Instrumentation ------------------------- Location: /examples/custom_attributes/ .. automodule:: custom_attributes .. _examples_caching: Dogpile Caching --------------- Location: /examples/dogpile_caching/ .. automodule:: dogpile_caching Directed Graphs --------------- Location: /examples/graphs/ .. automodule:: graphs Dynamic Relations as Dictionaries ---------------------------------- Location: /examples/dynamic_dict/ .. automodule:: dynamic_dict .. _examples_generic_associations: Generic Associations -------------------- Location: /examples/generic_associations .. automodule:: generic_associations .. _examples_sharding: Horizontal Sharding ------------------- Location: /examples/sharding .. automodule:: sharding Inheritance Mappings -------------------- Location: /examples/inheritance/ .. automodule:: inheritance Large Collections ----------------- Location: /examples/large_collection/ .. automodule:: large_collection Nested Sets ----------- Location: /examples/nested_sets/ .. automodule:: nested_sets Polymorphic Associations ------------------------ See :ref:`examples_generic_associations` for a modern version of polymorphic associations. .. _examples_postgis: PostGIS Integration ------------------- Location: /examples/postgis .. automodule:: postgis Versioned Objects ----------------- Location: /examples/versioning .. automodule:: versioning Vertical Attribute Mapping -------------------------- Location: /examples/vertical .. automodule:: vertical .. _examples_xmlpersistence: XML Persistence --------------- Location: /examples/elementtree/ .. automodule:: elementtree SQLAlchemy-0.8.4/doc/build/orm/exceptions.rst0000644000076500000240000000011712251147171021605 0ustar classicstaff00000000000000ORM Exceptions ============== .. automodule:: sqlalchemy.orm.exc :members:SQLAlchemy-0.8.4/doc/build/orm/extensions/0000755000076500000240000000000012251151573021073 5ustar classicstaff00000000000000SQLAlchemy-0.8.4/doc/build/orm/extensions/associationproxy.rst0000644000076500000240000004772312251150015025246 0ustar classicstaff00000000000000.. _associationproxy_toplevel: Association Proxy ================= .. module:: sqlalchemy.ext.associationproxy ``associationproxy`` is used to create a read/write view of a target attribute across a relationship. It essentially conceals the usage of a "middle" attribute between two endpoints, and can be used to cherry-pick fields from a collection of related objects or to reduce the verbosity of using the association object pattern. Applied creatively, the association proxy allows the construction of sophisticated collections and dictionary views of virtually any geometry, persisted to the database using standard, transparently configured relational patterns. Simplifying Scalar Collections ------------------------------ Consider a many-to-many mapping between two classes, ``User`` and ``Keyword``. Each ``User`` can have any number of ``Keyword`` objects, and vice-versa (the many-to-many pattern is described at :ref:`relationships_many_to_many`):: from sqlalchemy import Column, Integer, String, ForeignKey, Table from sqlalchemy.orm import relationship from sqlalchemy.ext.declarative import declarative_base Base = declarative_base() class User(Base): __tablename__ = 'user' id = Column(Integer, primary_key=True) name = Column(String(64)) kw = relationship("Keyword", secondary=lambda: userkeywords_table) def __init__(self, name): self.name = name class Keyword(Base): __tablename__ = 'keyword' id = Column(Integer, primary_key=True) keyword = Column('keyword', String(64)) def __init__(self, keyword): self.keyword = keyword userkeywords_table = Table('userkeywords', Base.metadata, Column('user_id', Integer, ForeignKey("user.id"), primary_key=True), Column('keyword_id', Integer, ForeignKey("keyword.id"), primary_key=True) ) Reading and manipulating the collection of "keyword" strings associated with ``User`` requires traversal from each collection element to the ``.keyword`` attribute, which can be awkward:: >>> user = User('jek') >>> user.kw.append(Keyword('cheese inspector')) >>> print(user.kw) [<__main__.Keyword object at 0x12bf830>] >>> print(user.kw[0].keyword) cheese inspector >>> print([keyword.keyword for keyword in user.kw]) ['cheese inspector'] The ``association_proxy`` is applied to the ``User`` class to produce a "view" of the ``kw`` relationship, which only exposes the string value of ``.keyword`` associated with each ``Keyword`` object:: from sqlalchemy.ext.associationproxy import association_proxy class User(Base): __tablename__ = 'user' id = Column(Integer, primary_key=True) name = Column(String(64)) kw = relationship("Keyword", secondary=lambda: userkeywords_table) def __init__(self, name): self.name = name # proxy the 'keyword' attribute from the 'kw' relationship keywords = association_proxy('kw', 'keyword') We can now reference the ``.keywords`` collection as a listing of strings, which is both readable and writable. New ``Keyword`` objects are created for us transparently:: >>> user = User('jek') >>> user.keywords.append('cheese inspector') >>> user.keywords ['cheese inspector'] >>> user.keywords.append('snack ninja') >>> user.kw [<__main__.Keyword object at 0x12cdd30>, <__main__.Keyword object at 0x12cde30>] The :class:`.AssociationProxy` object produced by the :func:`.association_proxy` function is an instance of a `Python descriptor `_. It is always declared with the user-defined class being mapped, regardless of whether Declarative or classical mappings via the :func:`.mapper` function are used. The proxy functions by operating upon the underlying mapped attribute or collection in response to operations, and changes made via the proxy are immediately apparent in the mapped attribute, as well as vice versa. The underlying attribute remains fully accessible. When first accessed, the association proxy performs introspection operations on the target collection so that its behavior corresponds correctly. Details such as if the locally proxied attribute is a collection (as is typical) or a scalar reference, as well as if the collection acts like a set, list, or dictionary is taken into account, so that the proxy should act just like the underlying collection or attribute does. Creation of New Values ----------------------- When a list append() event (or set add(), dictionary __setitem__(), or scalar assignment event) is intercepted by the association proxy, it instantiates a new instance of the "intermediary" object using its constructor, passing as a single argument the given value. In our example above, an operation like:: user.keywords.append('cheese inspector') Is translated by the association proxy into the operation:: user.kw.append(Keyword('cheese inspector')) The example works here because we have designed the constructor for ``Keyword`` to accept a single positional argument, ``keyword``. For those cases where a single-argument constructor isn't feasible, the association proxy's creational behavior can be customized using the ``creator`` argument, which references a callable (i.e. Python function) that will produce a new object instance given the singular argument. Below we illustrate this using a lambda as is typical:: class User(Base): # ... # use Keyword(keyword=kw) on append() events keywords = association_proxy('kw', 'keyword', creator=lambda kw: Keyword(keyword=kw)) The ``creator`` function accepts a single argument in the case of a list- or set- based collection, or a scalar attribute. In the case of a dictionary-based collection, it accepts two arguments, "key" and "value". An example of this is below in :ref:`proxying_dictionaries`. Simplifying Association Objects ------------------------------- The "association object" pattern is an extended form of a many-to-many relationship, and is described at :ref:`association_pattern`. Association proxies are useful for keeping "association objects" out the way during regular use. Suppose our ``userkeywords`` table above had additional columns which we'd like to map explicitly, but in most cases we don't require direct access to these attributes. Below, we illustrate a new mapping which introduces the ``UserKeyword`` class, which is mapped to the ``userkeywords`` table illustrated earlier. This class adds an additional column ``special_key``, a value which we occasionally want to access, but not in the usual case. We create an association proxy on the ``User`` class called ``keywords``, which will bridge the gap from the ``user_keywords`` collection of ``User`` to the ``.keyword`` attribute present on each ``UserKeyword``:: from sqlalchemy import Column, Integer, String, ForeignKey from sqlalchemy.orm import relationship, backref from sqlalchemy.ext.associationproxy import association_proxy from sqlalchemy.ext.declarative import declarative_base Base = declarative_base() class User(Base): __tablename__ = 'user' id = Column(Integer, primary_key=True) name = Column(String(64)) # association proxy of "user_keywords" collection # to "keyword" attribute keywords = association_proxy('user_keywords', 'keyword') def __init__(self, name): self.name = name class UserKeyword(Base): __tablename__ = 'user_keyword' user_id = Column(Integer, ForeignKey('user.id'), primary_key=True) keyword_id = Column(Integer, ForeignKey('keyword.id'), primary_key=True) special_key = Column(String(50)) # bidirectional attribute/collection of "user"/"user_keywords" user = relationship(User, backref=backref("user_keywords", cascade="all, delete-orphan") ) # reference to the "Keyword" object keyword = relationship("Keyword") def __init__(self, keyword=None, user=None, special_key=None): self.user = user self.keyword = keyword self.special_key = special_key class Keyword(Base): __tablename__ = 'keyword' id = Column(Integer, primary_key=True) keyword = Column('keyword', String(64)) def __init__(self, keyword): self.keyword = keyword def __repr__(self): return 'Keyword(%s)' % repr(self.keyword) With the above configuration, we can operate upon the ``.keywords`` collection of each ``User`` object, and the usage of ``UserKeyword`` is concealed:: >>> user = User('log') >>> for kw in (Keyword('new_from_blammo'), Keyword('its_big')): ... user.keywords.append(kw) ... >>> print(user.keywords) [Keyword('new_from_blammo'), Keyword('its_big')] Where above, each ``.keywords.append()`` operation is equivalent to:: >>> user.user_keywords.append(UserKeyword(Keyword('its_heavy'))) The ``UserKeyword`` association object has two attributes here which are populated; the ``.keyword`` attribute is populated directly as a result of passing the ``Keyword`` object as the first argument. The ``.user`` argument is then assigned as the ``UserKeyword`` object is appended to the ``User.user_keywords`` collection, where the bidirectional relationship configured between ``User.user_keywords`` and ``UserKeyword.user`` results in a population of the ``UserKeyword.user`` attribute. The ``special_key`` argument above is left at its default value of ``None``. For those cases where we do want ``special_key`` to have a value, we create the ``UserKeyword`` object explicitly. Below we assign all three attributes, where the assignment of ``.user`` has the effect of the ``UserKeyword`` being appended to the ``User.user_keywords`` collection:: >>> UserKeyword(Keyword('its_wood'), user, special_key='my special key') The association proxy returns to us a collection of ``Keyword`` objects represented by all these operations:: >>> user.keywords [Keyword('new_from_blammo'), Keyword('its_big'), Keyword('its_heavy'), Keyword('its_wood')] .. _proxying_dictionaries: Proxying to Dictionary Based Collections ----------------------------------------- The association proxy can proxy to dictionary based collections as well. SQLAlchemy mappings usually use the :func:`.attribute_mapped_collection` collection type to create dictionary collections, as well as the extended techniques described in :ref:`dictionary_collections`. The association proxy adjusts its behavior when it detects the usage of a dictionary-based collection. When new values are added to the dictionary, the association proxy instantiates the intermediary object by passing two arguments to the creation function instead of one, the key and the value. As always, this creation function defaults to the constructor of the intermediary class, and can be customized using the ``creator`` argument. Below, we modify our ``UserKeyword`` example such that the ``User.user_keywords`` collection will now be mapped using a dictionary, where the ``UserKeyword.special_key`` argument will be used as the key for the dictionary. We then apply a ``creator`` argument to the ``User.keywords`` proxy so that these values are assigned appropriately when new elements are added to the dictionary:: from sqlalchemy import Column, Integer, String, ForeignKey from sqlalchemy.orm import relationship, backref from sqlalchemy.ext.associationproxy import association_proxy from sqlalchemy.ext.declarative import declarative_base from sqlalchemy.orm.collections import attribute_mapped_collection Base = declarative_base() class User(Base): __tablename__ = 'user' id = Column(Integer, primary_key=True) name = Column(String(64)) # proxy to 'user_keywords', instantiating UserKeyword # assigning the new key to 'special_key', values to # 'keyword'. keywords = association_proxy('user_keywords', 'keyword', creator=lambda k, v: UserKeyword(special_key=k, keyword=v) ) def __init__(self, name): self.name = name class UserKeyword(Base): __tablename__ = 'user_keyword' user_id = Column(Integer, ForeignKey('user.id'), primary_key=True) keyword_id = Column(Integer, ForeignKey('keyword.id'), primary_key=True) special_key = Column(String) # bidirectional user/user_keywords relationships, mapping # user_keywords with a dictionary against "special_key" as key. user = relationship(User, backref=backref( "user_keywords", collection_class=attribute_mapped_collection("special_key"), cascade="all, delete-orphan" ) ) keyword = relationship("Keyword") class Keyword(Base): __tablename__ = 'keyword' id = Column(Integer, primary_key=True) keyword = Column('keyword', String(64)) def __init__(self, keyword): self.keyword = keyword def __repr__(self): return 'Keyword(%s)' % repr(self.keyword) We illustrate the ``.keywords`` collection as a dictionary, mapping the ``UserKeyword.string_key`` value to ``Keyword`` objects:: >>> user = User('log') >>> user.keywords['sk1'] = Keyword('kw1') >>> user.keywords['sk2'] = Keyword('kw2') >>> print(user.keywords) {'sk1': Keyword('kw1'), 'sk2': Keyword('kw2')} .. _composite_association_proxy: Composite Association Proxies ----------------------------- Given our previous examples of proxying from relationship to scalar attribute, proxying across an association object, and proxying dictionaries, we can combine all three techniques together to give ``User`` a ``keywords`` dictionary that deals strictly with the string value of ``special_key`` mapped to the string ``keyword``. Both the ``UserKeyword`` and ``Keyword`` classes are entirely concealed. This is achieved by building an association proxy on ``User`` that refers to an association proxy present on ``UserKeyword``:: from sqlalchemy import Column, Integer, String, ForeignKey from sqlalchemy.orm import relationship, backref from sqlalchemy.ext.associationproxy import association_proxy from sqlalchemy.ext.declarative import declarative_base from sqlalchemy.orm.collections import attribute_mapped_collection Base = declarative_base() class User(Base): __tablename__ = 'user' id = Column(Integer, primary_key=True) name = Column(String(64)) # the same 'user_keywords'->'keyword' proxy as in # the basic dictionary example keywords = association_proxy( 'user_keywords', 'keyword', creator=lambda k, v: UserKeyword(special_key=k, keyword=v) ) def __init__(self, name): self.name = name class UserKeyword(Base): __tablename__ = 'user_keyword' user_id = Column(Integer, ForeignKey('user.id'), primary_key=True) keyword_id = Column(Integer, ForeignKey('keyword.id'), primary_key=True) special_key = Column(String) user = relationship(User, backref=backref( "user_keywords", collection_class=attribute_mapped_collection("special_key"), cascade="all, delete-orphan" ) ) # the relationship to Keyword is now called # 'kw' kw = relationship("Keyword") # 'keyword' is changed to be a proxy to the # 'keyword' attribute of 'Keyword' keyword = association_proxy('kw', 'keyword') class Keyword(Base): __tablename__ = 'keyword' id = Column(Integer, primary_key=True) keyword = Column('keyword', String(64)) def __init__(self, keyword): self.keyword = keyword ``User.keywords`` is now a dictionary of string to string, where ``UserKeyword`` and ``Keyword`` objects are created and removed for us transparently using the association proxy. In the example below, we illustrate usage of the assignment operator, also appropriately handled by the association proxy, to apply a dictionary value to the collection at once:: >>> user = User('log') >>> user.keywords = { ... 'sk1':'kw1', ... 'sk2':'kw2' ... } >>> print(user.keywords) {'sk1': 'kw1', 'sk2': 'kw2'} >>> user.keywords['sk3'] = 'kw3' >>> del user.keywords['sk2'] >>> print(user.keywords) {'sk1': 'kw1', 'sk3': 'kw3'} >>> # illustrate un-proxied usage ... print(user.user_keywords['sk3'].kw) <__main__.Keyword object at 0x12ceb90> One caveat with our example above is that because ``Keyword`` objects are created for each dictionary set operation, the example fails to maintain uniqueness for the ``Keyword`` objects on their string name, which is a typical requirement for a tagging scenario such as this one. For this use case the recipe `UniqueObject `_, or a comparable creational strategy, is recommended, which will apply a "lookup first, then create" strategy to the constructor of the ``Keyword`` class, so that an already existing ``Keyword`` is returned if the given name is already present. Querying with Association Proxies --------------------------------- The :class:`.AssociationProxy` features simple SQL construction capabilities which relate down to the underlying :func:`.relationship` in use as well as the target attribute. For example, the :meth:`.RelationshipProperty.Comparator.any` and :meth:`.RelationshipProperty.Comparator.has` operations are available, and will produce a "nested" EXISTS clause, such as in our basic association object example:: >>> print(session.query(User).filter(User.keywords.any(keyword='jek'))) SELECT user.id AS user_id, user.name AS user_name FROM user WHERE EXISTS (SELECT 1 FROM user_keyword WHERE user.id = user_keyword.user_id AND (EXISTS (SELECT 1 FROM keyword WHERE keyword.id = user_keyword.keyword_id AND keyword.keyword = :keyword_1))) For a proxy to a scalar attribute, ``__eq__()`` is supported:: >>> print(session.query(UserKeyword).filter(UserKeyword.keyword == 'jek')) SELECT user_keyword.* FROM user_keyword WHERE EXISTS (SELECT 1 FROM keyword WHERE keyword.id = user_keyword.keyword_id AND keyword.keyword = :keyword_1) and ``.contains()`` is available for a proxy to a scalar collection:: >>> print(session.query(User).filter(User.keywords.contains('jek'))) SELECT user.* FROM user WHERE EXISTS (SELECT 1 FROM userkeywords, keyword WHERE user.id = userkeywords.user_id AND keyword.id = userkeywords.keyword_id AND keyword.keyword = :keyword_1) :class:`.AssociationProxy` can be used with :meth:`.Query.join` somewhat manually using the :attr:`~.AssociationProxy.attr` attribute in a star-args context:: q = session.query(User).join(*User.keywords.attr) .. versionadded:: 0.7.3 :attr:`~.AssociationProxy.attr` attribute in a star-args context. :attr:`~.AssociationProxy.attr` is composed of :attr:`.AssociationProxy.local_attr` and :attr:`.AssociationProxy.remote_attr`, which are just synonyms for the actual proxied attributes, and can also be used for querying:: uka = aliased(UserKeyword) ka = aliased(Keyword) q = session.query(User).\ join(uka, User.keywords.local_attr).\ join(ka, User.keywords.remote_attr) .. versionadded:: 0.7.3 :attr:`.AssociationProxy.local_attr` and :attr:`.AssociationProxy.remote_attr`, synonyms for the actual proxied attributes, and usable for querying. API Documentation ----------------- .. autofunction:: association_proxy .. autoclass:: AssociationProxy :members: :undoc-members: .. autodata:: ASSOCIATION_PROXYSQLAlchemy-0.8.4/doc/build/orm/extensions/declarative.rst0000644000076500000240000000104412251150015024075 0ustar classicstaff00000000000000.. _declarative_toplevel: Declarative =========== .. automodule:: sqlalchemy.ext.declarative API Reference ------------- .. autofunction:: declarative_base .. autofunction:: as_declarative .. autoclass:: declared_attr .. autofunction:: sqlalchemy.ext.declarative.api._declarative_constructor .. autofunction:: has_inherited_table .. autofunction:: synonym_for .. autofunction:: comparable_using .. autofunction:: instrument_declarative .. autoclass:: AbstractConcreteBase .. autoclass:: ConcreteBase .. autoclass:: DeferredReflection SQLAlchemy-0.8.4/doc/build/orm/extensions/horizontal_shard.rst0000644000076500000240000000032512251147171025176 0ustar classicstaff00000000000000Horizontal Sharding =================== .. automodule:: sqlalchemy.ext.horizontal_shard API Documentation ----------------- .. autoclass:: ShardedSession :members: .. autoclass:: ShardedQuery :members: SQLAlchemy-0.8.4/doc/build/orm/extensions/hybrid.rst0000644000076500000240000000046612251147171023113 0ustar classicstaff00000000000000.. _hybrids_toplevel: Hybrid Attributes ================= .. automodule:: sqlalchemy.ext.hybrid API Reference ------------- .. autoclass:: hybrid_method :members: .. autoclass:: hybrid_property :members: .. autoclass:: Comparator .. autodata:: HYBRID_METHOD .. autodata:: HYBRID_PROPERTY SQLAlchemy-0.8.4/doc/build/orm/extensions/index.rst0000644000076500000240000000115212251147171022732 0ustar classicstaff00000000000000.. _plugins: .. _sqlalchemy.ext: ORM Extensions ============== SQLAlchemy has a variety of ORM extensions available, which add additional functionality to the core behavior. The extensions build almost entirely on public core and ORM APIs and users should be encouraged to read their source code to further their understanding of their behavior. In particular the "Horizontal Sharding", "Hybrid Attributes", and "Mutation Tracking" extensions are very succinct. .. toctree:: :maxdepth: 1 associationproxy declarative mutable orderinglist horizontal_shard hybrid instrumentation SQLAlchemy-0.8.4/doc/build/orm/extensions/instrumentation.rst0000644000076500000240000000061312251150015025056 0ustar classicstaff00000000000000.. _instrumentation_toplevel: Alternate Class Instrumentation ================================ .. automodule:: sqlalchemy.ext.instrumentation API Reference ------------- .. autodata:: INSTRUMENTATION_MANAGER .. autoclass:: InstrumentationManager :members: :undoc-members: .. autodata:: instrumentation_finders .. autoclass:: ExtendedInstrumentationRegistry :members: SQLAlchemy-0.8.4/doc/build/orm/extensions/mutable.rst0000644000076500000240000000050512251150015023244 0ustar classicstaff00000000000000.. _mutable_toplevel: Mutation Tracking ================== .. automodule:: sqlalchemy.ext.mutable API Reference ------------- .. autoclass:: MutableBase :members: _parents, coerce .. autoclass:: Mutable :members: .. autoclass:: MutableComposite :members: .. autoclass:: MutableDict :members: SQLAlchemy-0.8.4/doc/build/orm/extensions/orderinglist.rst0000644000076500000240000000043212251147171024330 0ustar classicstaff00000000000000Ordering List ============= .. automodule:: sqlalchemy.ext.orderinglist API Reference ------------- .. autofunction:: ordering_list .. autofunction:: count_from_0 .. autofunction:: count_from_1 .. autofunction:: count_from_n_factory .. autoclass:: OrderingList :members: SQLAlchemy-0.8.4/doc/build/orm/index.rst0000644000076500000240000000077712251147171020547 0ustar classicstaff00000000000000.. _orm_toplevel: SQLAlchemy ORM =============== Here, the Object Relational Mapper is introduced and fully described. If you want to work with higher-level SQL which is constructed automatically for you, as well as automated persistence of Python objects, proceed first to the tutorial. .. toctree:: :maxdepth: 3 tutorial mapper_config relationships collections inheritance session query loading events extensions/index examples exceptions internals SQLAlchemy-0.8.4/doc/build/orm/inheritance.rst0000644000076500000240000007233112251150015021713 0ustar classicstaff00000000000000.. _inheritance_toplevel: Mapping Class Inheritance Hierarchies ====================================== SQLAlchemy supports three forms of inheritance: **single table inheritance**, where several types of classes are represented by a single table, **concrete table inheritance**, where each type of class is represented by independent tables, and **joined table inheritance**, where the class hierarchy is broken up among dependent tables, each class represented by its own table that only includes those attributes local to that class. The most common forms of inheritance are single and joined table, while concrete inheritance presents more configurational challenges. When mappers are configured in an inheritance relationship, SQLAlchemy has the ability to load elements :term:`polymorphically`, meaning that a single query can return objects of multiple types. Joined Table Inheritance ------------------------- In joined table inheritance, each class along a particular classes' list of parents is represented by a unique table. The total set of attributes for a particular instance is represented as a join along all tables in its inheritance path. Here, we first define the ``Employee`` class. This table will contain a primary key column (or columns), and a column for each attribute that's represented by ``Employee``. In this case it's just ``name``:: class Employee(Base): __tablename__ = 'employee' id = Column(Integer, primary_key=True) name = Column(String(50)) type = Column(String(50)) __mapper_args__ = { 'polymorphic_identity':'employee', 'polymorphic_on':type } The mapped table also has a column called ``type``. The purpose of this column is to act as the **discriminator**, and stores a value which indicates the type of object represented within the row. The column may be of any datatype, though string and integer are the most common. The discriminator column is only needed if polymorphic loading is desired, as is usually the case. It is not strictly necessary that it be present directly on the base mapped table, and can instead be defined on a derived select statement that's used when the class is queried; however, this is a much more sophisticated configuration scenario. The mapping receives additional arguments via the ``__mapper_args__`` dictionary. Here the ``type`` column is explicitly stated as the discriminator column, and the **polymorphic identity** of ``employee`` is also given; this is the value that will be stored in the polymorphic discriminator column for instances of this class. We next define ``Engineer`` and ``Manager`` subclasses of ``Employee``. Each contains columns that represent the attributes unique to the subclass they represent. Each table also must contain a primary key column (or columns), and in most cases a foreign key reference to the parent table:: class Engineer(Employee): __tablename__ = 'engineer' id = Column(Integer, ForeignKey('employee.id'), primary_key=True) engineer_name = Column(String(30)) __mapper_args__ = { 'polymorphic_identity':'engineer', } class Manager(Employee): __tablename__ = 'manager' id = Column(Integer, ForeignKey('employee.id'), primary_key=True) manager_name = Column(String(30)) __mapper_args__ = { 'polymorphic_identity':'manager', } It is standard practice that the same column is used for both the role of primary key as well as foreign key to the parent table, and that the column is also named the same as that of the parent table. However, both of these practices are optional. Separate columns may be used for primary key and parent-relationship, the column may be named differently than that of the parent, and even a custom join condition can be specified between parent and child tables instead of using a foreign key. .. topic:: Joined inheritance primary keys One natural effect of the joined table inheritance configuration is that the identity of any mapped object can be determined entirely from the base table. This has obvious advantages, so SQLAlchemy always considers the primary key columns of a joined inheritance class to be those of the base table only. In other words, the ``id`` columns of both the ``engineer`` and ``manager`` tables are not used to locate ``Engineer`` or ``Manager`` objects - only the value in ``employee.id`` is considered. ``engineer.id`` and ``manager.id`` are still of course critical to the proper operation of the pattern overall as they are used to locate the joined row, once the parent row has been determined within a statement. With the joined inheritance mapping complete, querying against ``Employee`` will return a combination of ``Employee``, ``Engineer`` and ``Manager`` objects. Newly saved ``Engineer``, ``Manager``, and ``Employee`` objects will automatically populate the ``employee.type`` column with ``engineer``, ``manager``, or ``employee``, as appropriate. .. _with_polymorphic: Basic Control of Which Tables are Queried ++++++++++++++++++++++++++++++++++++++++++ The :func:`.orm.with_polymorphic` function and the :func:`~sqlalchemy.orm.query.Query.with_polymorphic` method of :class:`~sqlalchemy.orm.query.Query` affects the specific tables which the :class:`.Query` selects from. Normally, a query such as this:: session.query(Employee).all() ...selects only from the ``employee`` table. When loading fresh from the database, our joined-table setup will query from the parent table only, using SQL such as this: .. sourcecode:: python+sql {opensql} SELECT employee.id AS employee_id, employee.name AS employee_name, employee.type AS employee_type FROM employee [] As attributes are requested from those ``Employee`` objects which are represented in either the ``engineer`` or ``manager`` child tables, a second load is issued for the columns in that related row, if the data was not already loaded. So above, after accessing the objects you'd see further SQL issued along the lines of: .. sourcecode:: python+sql {opensql} SELECT manager.id AS manager_id, manager.manager_data AS manager_manager_data FROM manager WHERE ? = manager.id [5] SELECT engineer.id AS engineer_id, engineer.engineer_info AS engineer_engineer_info FROM engineer WHERE ? = engineer.id [2] This behavior works well when issuing searches for small numbers of items, such as when using :meth:`.Query.get`, since the full range of joined tables are not pulled in to the SQL statement unnecessarily. But when querying a larger span of rows which are known to be of many types, you may want to actively join to some or all of the joined tables. The ``with_polymorphic`` feature provides this. Telling our query to polymorphically load ``Engineer`` and ``Manager`` objects, we can use the :func:`.orm.with_polymorphic` function to create a new aliased class which represents a select of the base table combined with outer joins to each of the inheriting tables:: from sqlalchemy.orm import with_polymorphic eng_plus_manager = with_polymorphic(Employee, [Engineer, Manager]) query = session.query(eng_plus_manager) The above produces a query which joins the ``employee`` table to both the ``engineer`` and ``manager`` tables like the following: .. sourcecode:: python+sql query.all() {opensql} SELECT employee.id AS employee_id, engineer.id AS engineer_id, manager.id AS manager_id, employee.name AS employee_name, employee.type AS employee_type, engineer.engineer_info AS engineer_engineer_info, manager.manager_data AS manager_manager_data FROM employee LEFT OUTER JOIN engineer ON employee.id = engineer.id LEFT OUTER JOIN manager ON employee.id = manager.id [] The entity returned by :func:`.orm.with_polymorphic` is an :class:`.AliasedClass` object, which can be used in a :class:`.Query` like any other alias, including named attributes for those attributes on the ``Employee`` class. In our example, ``eng_plus_manager`` becomes the entity that we use to refer to the three-way outer join above. It also includes namespaces for each class named in the list of classes, so that attributes specific to those subclasses can be called upon as well. The following example illustrates calling upon attributes specific to ``Engineer`` as well as ``Manager`` in terms of ``eng_plus_manager``:: eng_plus_manager = with_polymorphic(Employee, [Engineer, Manager]) query = session.query(eng_plus_manager).filter( or_( eng_plus_manager.Engineer.engineer_info=='x', eng_plus_manager.Manager.manager_data=='y' ) ) :func:`.orm.with_polymorphic` accepts a single class or mapper, a list of classes/mappers, or the string ``'*'`` to indicate all subclasses: .. sourcecode:: python+sql # join to the engineer table entity = with_polymorphic(Employee, Engineer) # join to the engineer and manager tables entity = with_polymorphic(Employee, [Engineer, Manager]) # join to all subclass tables entity = query.with_polymorphic(Employee, '*') # use with Query session.query(entity).all() It also accepts a second argument ``selectable`` which replaces the automatic join creation and instead selects directly from the selectable given. This feature is normally used with "concrete" inheritance, described later, but can be used with any kind of inheritance setup in the case that specialized SQL should be used to load polymorphically:: # custom selectable employee = Employee.__table__ manager = Manager.__table__ engineer = Engineer.__table__ entity = with_polymorphic( Employee, [Engineer, Manager], employee.outerjoin(manager).outerjoin(engineer) ) # use with Query session.query(entity).all() Note that if you only need to load a single subtype, such as just the ``Engineer`` objects, :func:`.orm.with_polymorphic` is not needed since you would query against the ``Engineer`` class directly. :meth:`.Query.with_polymorphic` has the same purpose as :func:`.orm.with_polymorphic`, except is not as flexible in its usage patterns in that it only applies to the first full mapping, which then impacts all occurrences of that class or the target subclasses within the :class:`.Query`. For simple cases it might be considered to be more succinct:: session.query(Employee).with_polymorphic([Engineer, Manager]).\ filter(or_(Engineer.engineer_info=='w', Manager.manager_data=='q')) .. versionadded:: 0.8 :func:`.orm.with_polymorphic`, an improved version of :meth:`.Query.with_polymorphic` method. The mapper also accepts ``with_polymorphic`` as a configurational argument so that the joined-style load will be issued automatically. This argument may be the string ``'*'``, a list of classes, or a tuple consisting of either, followed by a selectable:: class Employee(Base): __tablename__ = 'employee' id = Column(Integer, primary_key=True) type = Column(String(20)) __mapper_args__ = { 'polymorphic_on':type, 'polymorphic_identity':'employee', 'with_polymorphic':'*' } class Engineer(Employee): __tablename__ = 'engineer' id = Column(Integer, ForeignKey('employee.id'), primary_key=True) __mapper_args__ = {'polymorphic_identity':'engineer'} class Manager(Employee): __tablename__ = 'manager' id = Column(Integer, ForeignKey('employee.id'), primary_key=True) __mapper_args__ = {'polymorphic_identity':'manager'} The above mapping will produce a query similar to that of ``with_polymorphic('*')`` for every query of ``Employee`` objects. Using :func:`.orm.with_polymorphic` or :meth:`.Query.with_polymorphic` will override the mapper-level ``with_polymorphic`` setting. .. autofunction:: sqlalchemy.orm.with_polymorphic Advanced Control of Which Tables are Queried +++++++++++++++++++++++++++++++++++++++++++++ The ``with_polymorphic`` functions work fine for simplistic scenarios. However, direct control of table rendering is called for, such as the case when one wants to render to only the subclass table and not the parent table. This use case can be achieved by using the mapped :class:`.Table` objects directly. For example, to query the name of employees with particular criterion:: engineer = Engineer.__table__ manager = Manager.__table__ session.query(Employee.name).\ outerjoin((engineer, engineer.c.employee_id==Employee.employee_id)).\ outerjoin((manager, manager.c.employee_id==Employee.employee_id)).\ filter(or_(Engineer.engineer_info=='w', Manager.manager_data=='q')) The base table, in this case the "employees" table, isn't always necessary. A SQL query is always more efficient with fewer joins. Here, if we wanted to just load information specific to manager or engineer, we can instruct :class:`.Query` to use only those tables. The ``FROM`` clause is determined by what's specified in the :meth:`.Session.query`, :meth:`.Query.filter`, or :meth:`.Query.select_from` methods:: session.query(Manager.manager_data).select_from(manager) session.query(engineer.c.id).\ filter(engineer.c.engineer_info==manager.c.manager_data) .. _of_type: Creating Joins to Specific Subtypes +++++++++++++++++++++++++++++++++++ The :func:`~sqlalchemy.orm.interfaces.PropComparator.of_type` method is a helper which allows the construction of joins along :func:`~sqlalchemy.orm.relationship` paths while narrowing the criterion to specific subclasses. Suppose the ``employees`` table represents a collection of employees which are associated with a ``Company`` object. We'll add a ``company_id`` column to the ``employees`` table and a new table ``companies``: .. sourcecode:: python+sql class Company(Base): __tablename__ = 'company' id = Column(Integer, primary_key=True) name = Column(String(50)) employees = relationship("Employee", backref='company', cascade='all, delete-orphan') class Employee(Base): __tablename__ = 'employee' id = Column(Integer, primary_key=True) type = Column(String(20)) company_id = Column(Integer, ForeignKey('company.id')) __mapper_args__ = { 'polymorphic_on':type, 'polymorphic_identity':'employee', 'with_polymorphic':'*' } class Engineer(Employee): __tablename__ = 'engineer' id = Column(Integer, ForeignKey('employee.id'), primary_key=True) engineer_info = Column(String(50)) __mapper_args__ = {'polymorphic_identity':'engineer'} class Manager(Employee): __tablename__ = 'manager' id = Column(Integer, ForeignKey('employee.id'), primary_key=True) manager_data = Column(String(50)) __mapper_args__ = {'polymorphic_identity':'manager'} When querying from ``Company`` onto the ``Employee`` relationship, the ``join()`` method as well as the ``any()`` and ``has()`` operators will create a join from ``company`` to ``employee``, without including ``engineer`` or ``manager`` in the mix. If we wish to have criterion which is specifically against the ``Engineer`` class, we can tell those methods to join or subquery against the joined table representing the subclass using the :meth:`~.orm.interfaces.PropComparator.of_type` operator:: session.query(Company).\ join(Company.employees.of_type(Engineer)).\ filter(Engineer.engineer_info=='someinfo') A longhand version of this would involve spelling out the full target selectable within a 2-tuple:: employee = Employee.__table__ engineer = Engineer.__table__ session.query(Company).\ join((employee.join(engineer), Company.employees)).\ filter(Engineer.engineer_info=='someinfo') :func:`~sqlalchemy.orm.interfaces.PropComparator.of_type` accepts a single class argument. More flexibility can be achieved either by joining to an explicit join as above, or by using the :func:`.orm.with_polymorphic` function to create a polymorphic selectable:: manager_and_engineer = with_polymorphic( Employee, [Manager, Engineer], aliased=True) session.query(Company).\ join(manager_and_engineer, Company.employees).\ filter( or_(manager_and_engineer.Engineer.engineer_info=='someinfo', manager_and_engineer.Manager.manager_data=='somedata') ) Above, we use the ``aliased=True`` argument with :func:`.orm.with_polymorhpic` so that the right hand side of the join between ``Company`` and ``manager_and_engineer`` is converted into an aliased subquery. Some backends, such as SQLite and older versions of MySQL can't handle a FROM clause of the following form:: FROM x JOIN (y JOIN z ON ) ON Using ``aliased=True`` instead renders it more like:: FROM x JOIN (SELECT * FROM y JOIN z ON ) AS anon_1 ON The above join can also be expressed more succinctly by combining ``of_type()`` with the polymorphic construct:: manager_and_engineer = with_polymorphic( Employee, [Manager, Engineer], aliased=True) session.query(Company).\ join(Company.employees.of_type(manager_and_engineer)).\ filter( or_(manager_and_engineer.Engineer.engineer_info=='someinfo', manager_and_engineer.Manager.manager_data=='somedata') ) The ``any()`` and ``has()`` operators also can be used with :func:`~sqlalchemy.orm.interfaces.PropComparator.of_type` when the embedded criterion is in terms of a subclass:: session.query(Company).\ filter( Company.employees.of_type(Engineer). any(Engineer.engineer_info=='someinfo') ).all() Note that the ``any()`` and ``has()`` are both shorthand for a correlated EXISTS query. To build one by hand looks like:: session.query(Company).filter( exists([1], and_(Engineer.engineer_info=='someinfo', employees.c.company_id==companies.c.company_id), from_obj=employees.join(engineers) ) ).all() The EXISTS subquery above selects from the join of ``employees`` to ``engineers``, and also specifies criterion which correlates the EXISTS subselect back to the parent ``companies`` table. .. versionadded:: 0.8 :func:`~sqlalchemy.orm.interfaces.PropComparator.of_type` accepts :func:`.orm.aliased` and :func:`.orm.with_polymorphic` constructs in conjunction with :meth:`.Query.join`, ``any()`` and ``has()``. Eager Loading of Specific Subtypes ++++++++++++++++++++++++++++++++++ The :func:`.joinedload` and :func:`.subqueryload` options also support paths which make use of :func:`~sqlalchemy.orm.interfaces.PropComparator.of_type`. Below we load ``Company`` rows while eagerly loading related ``Engineer`` objects, querying the ``employee`` and ``engineer`` tables simultaneously:: session.query(Company).\ options(subqueryload_all(Company.employees.of_type(Engineer), Engineer.machines)) .. versionadded:: 0.8 :func:`.joinedload` and :func:`.subqueryload` support paths that are qualified with :func:`~sqlalchemy.orm.interfaces.PropComparator.of_type`. Single Table Inheritance ------------------------ Single table inheritance is where the attributes of the base class as well as all subclasses are represented within a single table. A column is present in the table for every attribute mapped to the base class and all subclasses; the columns which correspond to a single subclass are nullable. This configuration looks much like joined-table inheritance except there's only one table. In this case, a ``type`` column is required, as there would be no other way to discriminate between classes. The table is specified in the base mapper only; for the inheriting classes, leave their ``table`` parameter blank: .. sourcecode:: python+sql class Employee(Base): __tablename__ = 'employee' id = Column(Integer, primary_key=True) name = Column(String(50)) manager_data = Column(String(50)) engineer_info = Column(String(50)) type = Column(String(20)) __mapper_args__ = { 'polymorphic_on':type, 'polymorphic_identity':'employee' } class Manager(Employee): __mapper_args__ = { 'polymorphic_identity':'manager' } class Engineer(Employee): __mapper_args__ = { 'polymorphic_identity':'engineer' } Note that the mappers for the derived classes Manager and Engineer omit the ``__tablename__``, indicating they do not have a mapped table of their own. .. _concrete_inheritance: Concrete Table Inheritance -------------------------- .. note:: this section is currently using classical mappings. The Declarative system fully supports concrete inheritance however. See the links below for more information on using declarative with concrete table inheritance. This form of inheritance maps each class to a distinct table, as below: .. sourcecode:: python+sql employees_table = Table('employees', metadata, Column('employee_id', Integer, primary_key=True), Column('name', String(50)), ) managers_table = Table('managers', metadata, Column('employee_id', Integer, primary_key=True), Column('name', String(50)), Column('manager_data', String(50)), ) engineers_table = Table('engineers', metadata, Column('employee_id', Integer, primary_key=True), Column('name', String(50)), Column('engineer_info', String(50)), ) Notice in this case there is no ``type`` column. If polymorphic loading is not required, there's no advantage to using ``inherits`` here; you just define a separate mapper for each class. .. sourcecode:: python+sql mapper(Employee, employees_table) mapper(Manager, managers_table) mapper(Engineer, engineers_table) To load polymorphically, the ``with_polymorphic`` argument is required, along with a selectable indicating how rows should be loaded. In this case we must construct a UNION of all three tables. SQLAlchemy includes a helper function to create these called :func:`~sqlalchemy.orm.util.polymorphic_union`, which will map all the different columns into a structure of selects with the same numbers and names of columns, and also generate a virtual ``type`` column for each subselect: .. sourcecode:: python+sql pjoin = polymorphic_union({ 'employee': employees_table, 'manager': managers_table, 'engineer': engineers_table }, 'type', 'pjoin') employee_mapper = mapper(Employee, employees_table, with_polymorphic=('*', pjoin), polymorphic_on=pjoin.c.type, polymorphic_identity='employee') manager_mapper = mapper(Manager, managers_table, inherits=employee_mapper, concrete=True, polymorphic_identity='manager') engineer_mapper = mapper(Engineer, engineers_table, inherits=employee_mapper, concrete=True, polymorphic_identity='engineer') Upon select, the polymorphic union produces a query like this: .. sourcecode:: python+sql session.query(Employee).all() {opensql} SELECT pjoin.type AS pjoin_type, pjoin.manager_data AS pjoin_manager_data, pjoin.employee_id AS pjoin_employee_id, pjoin.name AS pjoin_name, pjoin.engineer_info AS pjoin_engineer_info FROM ( SELECT employees.employee_id AS employee_id, CAST(NULL AS VARCHAR(50)) AS manager_data, employees.name AS name, CAST(NULL AS VARCHAR(50)) AS engineer_info, 'employee' AS type FROM employees UNION ALL SELECT managers.employee_id AS employee_id, managers.manager_data AS manager_data, managers.name AS name, CAST(NULL AS VARCHAR(50)) AS engineer_info, 'manager' AS type FROM managers UNION ALL SELECT engineers.employee_id AS employee_id, CAST(NULL AS VARCHAR(50)) AS manager_data, engineers.name AS name, engineers.engineer_info AS engineer_info, 'engineer' AS type FROM engineers ) AS pjoin [] Concrete Inheritance with Declarative ++++++++++++++++++++++++++++++++++++++ .. versionadded:: 0.7.3 The :ref:`declarative_toplevel` module includes helpers for concrete inheritance. See :ref:`declarative_concrete_helpers` for more information. Using Relationships with Inheritance ------------------------------------ Both joined-table and single table inheritance scenarios produce mappings which are usable in :func:`~sqlalchemy.orm.relationship` functions; that is, it's possible to map a parent object to a child object which is polymorphic. Similarly, inheriting mappers can have :func:`~sqlalchemy.orm.relationship` objects of their own at any level, which are inherited to each child class. The only requirement for relationships is that there is a table relationship between parent and child. An example is the following modification to the joined table inheritance example, which sets a bi-directional relationship between ``Employee`` and ``Company``: .. sourcecode:: python+sql employees_table = Table('employees', metadata, Column('employee_id', Integer, primary_key=True), Column('name', String(50)), Column('company_id', Integer, ForeignKey('companies.company_id')) ) companies = Table('companies', metadata, Column('company_id', Integer, primary_key=True), Column('name', String(50))) class Company(object): pass mapper(Company, companies, properties={ 'employees': relationship(Employee, backref='company') }) Relationships with Concrete Inheritance +++++++++++++++++++++++++++++++++++++++ In a concrete inheritance scenario, mapping relationships is more challenging since the distinct classes do not share a table. In this case, you *can* establish a relationship from parent to child if a join condition can be constructed from parent to child, if each child table contains a foreign key to the parent: .. sourcecode:: python+sql companies = Table('companies', metadata, Column('id', Integer, primary_key=True), Column('name', String(50))) employees_table = Table('employees', metadata, Column('employee_id', Integer, primary_key=True), Column('name', String(50)), Column('company_id', Integer, ForeignKey('companies.id')) ) managers_table = Table('managers', metadata, Column('employee_id', Integer, primary_key=True), Column('name', String(50)), Column('manager_data', String(50)), Column('company_id', Integer, ForeignKey('companies.id')) ) engineers_table = Table('engineers', metadata, Column('employee_id', Integer, primary_key=True), Column('name', String(50)), Column('engineer_info', String(50)), Column('company_id', Integer, ForeignKey('companies.id')) ) mapper(Employee, employees_table, with_polymorphic=('*', pjoin), polymorphic_on=pjoin.c.type, polymorphic_identity='employee') mapper(Manager, managers_table, inherits=employee_mapper, concrete=True, polymorphic_identity='manager') mapper(Engineer, engineers_table, inherits=employee_mapper, concrete=True, polymorphic_identity='engineer') mapper(Company, companies, properties={ 'employees': relationship(Employee) }) The big limitation with concrete table inheritance is that :func:`~sqlalchemy.orm.relationship` objects placed on each concrete mapper do **not** propagate to child mappers. If you want to have the same :func:`~sqlalchemy.orm.relationship` objects set up on all concrete mappers, they must be configured manually on each. To configure back references in such a configuration the ``back_populates`` keyword may be used instead of ``backref``, such as below where both ``A(object)`` and ``B(A)`` bidirectionally reference ``C``:: ajoin = polymorphic_union({ 'a':a_table, 'b':b_table }, 'type', 'ajoin') mapper(A, a_table, with_polymorphic=('*', ajoin), polymorphic_on=ajoin.c.type, polymorphic_identity='a', properties={ 'some_c':relationship(C, back_populates='many_a') }) mapper(B, b_table,inherits=A, concrete=True, polymorphic_identity='b', properties={ 'some_c':relationship(C, back_populates='many_a') }) mapper(C, c_table, properties={ 'many_a':relationship(A, collection_class=set, back_populates='some_c'), }) Using Inheritance with Declarative ----------------------------------- Declarative makes inheritance configuration more intuitive. See the docs at :ref:`declarative_inheritance`. SQLAlchemy-0.8.4/doc/build/orm/internals.rst0000644000076500000240000000256412251150015021422 0ustar classicstaff00000000000000.. _orm_internal_toplevel: ORM Internals ============= Key ORM constructs, not otherwise covered in other sections, are listed here. .. currentmodule: sqlalchemy.orm .. autoclass:: sqlalchemy.orm.state.AttributeState :members: :inherited-members: .. autoclass:: sqlalchemy.orm.instrumentation.ClassManager :members: :inherited-members: .. autoclass:: sqlalchemy.orm.properties.ColumnProperty :members: :inherited-members: .. autoclass:: sqlalchemy.orm.descriptor_props.CompositeProperty :members: .. autoclass:: sqlalchemy.orm.interfaces._InspectionAttr :members: .. autoclass:: sqlalchemy.orm.state.InstanceState :members: .. autoclass:: sqlalchemy.orm.attributes.InstrumentedAttribute :members: __get__, __set__, __delete__ :undoc-members: .. autoclass:: sqlalchemy.orm.interfaces.MapperProperty :members: .. autodata:: sqlalchemy.orm.interfaces.NOT_EXTENSION .. autoclass:: sqlalchemy.orm.interfaces.PropComparator :members: :inherited-members: .. autoclass:: sqlalchemy.orm.properties.RelationshipProperty :members: :inherited-members: .. autoclass:: sqlalchemy.orm.descriptor_props.SynonymProperty :members: :inherited-members: .. autoclass:: sqlalchemy.orm.query.QueryContext :members: .. autoclass:: sqlalchemy.orm.attributes.QueryableAttribute :members: :inherited-members: SQLAlchemy-0.8.4/doc/build/orm/loading.rst0000644000076500000240000005524712251150015021046 0ustar classicstaff00000000000000.. currentmodule:: sqlalchemy.orm Relationship Loading Techniques =============================== A big part of SQLAlchemy is providing a wide range of control over how related objects get loaded when querying. This behavior can be configured at mapper construction time using the ``lazy`` parameter to the :func:`.relationship` function, as well as by using options with the :class:`.Query` object. Using Loader Strategies: Lazy Loading, Eager Loading ---------------------------------------------------- By default, all inter-object relationships are **lazy loading**. The scalar or collection attribute associated with a :func:`~sqlalchemy.orm.relationship` contains a trigger which fires the first time the attribute is accessed. This trigger, in all but one case, issues a SQL call at the point of access in order to load the related object or objects: .. sourcecode:: python+sql {sql}>>> jack.addresses SELECT addresses.id AS addresses_id, addresses.email_address AS addresses_email_address, addresses.user_id AS addresses_user_id FROM addresses WHERE ? = addresses.user_id [5] {stop}[, ] The one case where SQL is not emitted is for a simple many-to-one relationship, when the related object can be identified by its primary key alone and that object is already present in the current :class:`.Session`. This default behavior of "load upon attribute access" is known as "lazy" or "select" loading - the name "select" because a "SELECT" statement is typically emitted when the attribute is first accessed. In the :ref:`ormtutorial_toplevel`, we introduced the concept of **Eager Loading**. We used an ``option`` in conjunction with the :class:`~sqlalchemy.orm.query.Query` object in order to indicate that a relationship should be loaded at the same time as the parent, within a single SQL query. This option, known as :func:`.joinedload`, connects a JOIN (by default a LEFT OUTER join) to the statement and populates the scalar/collection from the same result set as that of the parent: .. sourcecode:: python+sql {sql}>>> jack = session.query(User).\ ... options(joinedload('addresses')).\ ... filter_by(name='jack').all() #doctest: +NORMALIZE_WHITESPACE SELECT addresses_1.id AS addresses_1_id, addresses_1.email_address AS addresses_1_email_address, addresses_1.user_id AS addresses_1_user_id, users.id AS users_id, users.name AS users_name, users.fullname AS users_fullname, users.password AS users_password FROM users LEFT OUTER JOIN addresses AS addresses_1 ON users.id = addresses_1.user_id WHERE users.name = ? ['jack'] In addition to "joined eager loading", a second option for eager loading exists, called "subquery eager loading". This kind of eager loading emits an additional SQL statement for each collection requested, aggregated across all parent objects: .. sourcecode:: python+sql {sql}>>> jack = session.query(User).\ ... options(subqueryload('addresses')).\ ... filter_by(name='jack').all() SELECT users.id AS users_id, users.name AS users_name, users.fullname AS users_fullname, users.password AS users_password FROM users WHERE users.name = ? ('jack',) SELECT addresses.id AS addresses_id, addresses.email_address AS addresses_email_address, addresses.user_id AS addresses_user_id, anon_1.users_id AS anon_1_users_id FROM (SELECT users.id AS users_id FROM users WHERE users.name = ?) AS anon_1 JOIN addresses ON anon_1.users_id = addresses.user_id ORDER BY anon_1.users_id, addresses.id ('jack',) The default **loader strategy** for any :func:`~sqlalchemy.orm.relationship` is configured by the ``lazy`` keyword argument, which defaults to ``select`` - this indicates a "select" statement . Below we set it as ``joined`` so that the ``children`` relationship is eager loading, using a join: .. sourcecode:: python+sql # load the 'children' collection using LEFT OUTER JOIN mapper(Parent, parent_table, properties={ 'children': relationship(Child, lazy='joined') }) We can also set it to eagerly load using a second query for all collections, using ``subquery``: .. sourcecode:: python+sql # load the 'children' attribute using a join to a subquery mapper(Parent, parent_table, properties={ 'children': relationship(Child, lazy='subquery') }) When querying, all three choices of loader strategy are available on a per-query basis, using the :func:`~sqlalchemy.orm.joinedload`, :func:`~sqlalchemy.orm.subqueryload` and :func:`~sqlalchemy.orm.lazyload` query options: .. sourcecode:: python+sql # set children to load lazily session.query(Parent).options(lazyload('children')).all() # set children to load eagerly with a join session.query(Parent).options(joinedload('children')).all() # set children to load eagerly with a second statement session.query(Parent).options(subqueryload('children')).all() To reference a relationship that is deeper than one level, separate the names by periods: .. sourcecode:: python+sql session.query(Parent).options(joinedload('foo.bar.bat')).all() When using dot-separated names with :func:`~sqlalchemy.orm.joinedload` or :func:`~sqlalchemy.orm.subqueryload`, the option applies **only** to the actual attribute named, and **not** its ancestors. For example, suppose a mapping from ``A`` to ``B`` to ``C``, where the relationships, named ``atob`` and ``btoc``, are both lazy-loading. A statement like the following: .. sourcecode:: python+sql session.query(A).options(joinedload('atob.btoc')).all() will load only ``A`` objects to start. When the ``atob`` attribute on each ``A`` is accessed, the returned ``B`` objects will *eagerly* load their ``C`` objects. Therefore, to modify the eager load to load both ``atob`` as well as ``btoc``, place joinedloads for both: .. sourcecode:: python+sql session.query(A).options(joinedload('atob'), joinedload('atob.btoc')).all() or more succinctly just use :func:`~sqlalchemy.orm.joinedload_all` or :func:`~sqlalchemy.orm.subqueryload_all`: .. sourcecode:: python+sql session.query(A).options(joinedload_all('atob.btoc')).all() There are two other loader strategies available, **dynamic loading** and **no loading**; these are described in :ref:`largecollections`. Default Loading Strategies -------------------------- .. versionadded:: 0.7.5 Default loader strategies as a new feature. Each of :func:`.joinedload`, :func:`.subqueryload`, :func:`.lazyload`, and :func:`.noload` can be used to set the default style of :func:`.relationship` loading for a particular query, affecting all :func:`.relationship` -mapped attributes not otherwise specified in the :class:`.Query`. This feature is available by passing the string ``'*'`` as the argument to any of these options:: session.query(MyClass).options(lazyload('*')) Above, the ``lazyload('*')`` option will supercede the ``lazy`` setting of all :func:`.relationship` constructs in use for that query, except for those which use the ``'dynamic'`` style of loading. If some relationships specify ``lazy='joined'`` or ``lazy='subquery'``, for example, using ``lazyload('*')`` will unilaterally cause all those relationships to use ``'select'`` loading, e.g. emit a SELECT statement when each attribute is accessed. The option does not supercede loader options stated in the query, such as :func:`.eagerload`, :func:`.subqueryload`, etc. The query below will still use joined loading for the ``widget`` relationship:: session.query(MyClass).options( lazyload('*'), joinedload(MyClass.widget) ) If multiple ``'*'`` options are passed, the last one overrides those previously passed. .. _zen_of_eager_loading: The Zen of Eager Loading ------------------------- The philosophy behind loader strategies is that any set of loading schemes can be applied to a particular query, and *the results don't change* - only the number of SQL statements required to fully load related objects and collections changes. A particular query might start out using all lazy loads. After using it in context, it might be revealed that particular attributes or collections are always accessed, and that it would be more efficient to change the loader strategy for these. The strategy can be changed with no other modifications to the query, the results will remain identical, but fewer SQL statements would be emitted. In theory (and pretty much in practice), nothing you can do to the :class:`.Query` would make it load a different set of primary or related objects based on a change in loader strategy. How :func:`joinedload` in particular achieves this result of not impacting entity rows returned in any way is that it creates an anonymous alias of the joins it adds to your query, so that they can't be referenced by other parts of the query. For example, the query below uses :func:`.joinedload` to create a LEFT OUTER JOIN from ``users`` to ``addresses``, however the ``ORDER BY`` added against ``Address.email_address`` is not valid - the ``Address`` entity is not named in the query: .. sourcecode:: python+sql >>> jack = session.query(User).\ ... options(joinedload(User.addresses)).\ ... filter(User.name=='jack').\ ... order_by(Address.email_address).all() {opensql}SELECT addresses_1.id AS addresses_1_id, addresses_1.email_address AS addresses_1_email_address, addresses_1.user_id AS addresses_1_user_id, users.id AS users_id, users.name AS users_name, users.fullname AS users_fullname, users.password AS users_password FROM users LEFT OUTER JOIN addresses AS addresses_1 ON users.id = addresses_1.user_id WHERE users.name = ? ORDER BY addresses.email_address <-- this part is wrong ! ['jack'] Above, ``ORDER BY addresses.email_address`` is not valid since ``addresses`` is not in the FROM list. The correct way to load the ``User`` records and order by email address is to use :meth:`.Query.join`: .. sourcecode:: python+sql >>> jack = session.query(User).\ ... join(User.addresses).\ ... filter(User.name=='jack').\ ... order_by(Address.email_address).all() {opensql} SELECT users.id AS users_id, users.name AS users_name, users.fullname AS users_fullname, users.password AS users_password FROM users JOIN addresses ON users.id = addresses.user_id WHERE users.name = ? ORDER BY addresses.email_address ['jack'] The statement above is of course not the same as the previous one, in that the columns from ``addresses`` are not included in the result at all. We can add :func:`.joinedload` back in, so that there are two joins - one is that which we are ordering on, the other is used anonymously to load the contents of the ``User.addresses`` collection: .. sourcecode:: python+sql >>> jack = session.query(User).\ ... join(User.addresses).\ ... options(joinedload(User.addresses)).\ ... filter(User.name=='jack').\ ... order_by(Address.email_address).all() {opensql}SELECT addresses_1.id AS addresses_1_id, addresses_1.email_address AS addresses_1_email_address, addresses_1.user_id AS addresses_1_user_id, users.id AS users_id, users.name AS users_name, users.fullname AS users_fullname, users.password AS users_password FROM users JOIN addresses ON users.id = addresses.user_id LEFT OUTER JOIN addresses AS addresses_1 ON users.id = addresses_1.user_id WHERE users.name = ? ORDER BY addresses.email_address ['jack'] What we see above is that our usage of :meth:`.Query.join` is to supply JOIN clauses we'd like to use in subsequent query criterion, whereas our usage of :func:`.joinedload` only concerns itself with the loading of the ``User.addresses`` collection, for each ``User`` in the result. In this case, the two joins most probably appear redundant - which they are. If we wanted to use just one JOIN for collection loading as well as ordering, we use the :func:`.contains_eager` option, described in :ref:`contains_eager` below. But to see why :func:`joinedload` does what it does, consider if we were **filtering** on a particular ``Address``: .. sourcecode:: python+sql >>> jack = session.query(User).\ ... join(User.addresses).\ ... options(joinedload(User.addresses)).\ ... filter(User.name=='jack').\ ... filter(Address.email_address=='someaddress@foo.com').\ ... all() {opensql}SELECT addresses_1.id AS addresses_1_id, addresses_1.email_address AS addresses_1_email_address, addresses_1.user_id AS addresses_1_user_id, users.id AS users_id, users.name AS users_name, users.fullname AS users_fullname, users.password AS users_password FROM users JOIN addresses ON users.id = addresses.user_id LEFT OUTER JOIN addresses AS addresses_1 ON users.id = addresses_1.user_id WHERE users.name = ? AND addresses.email_address = ? ['jack', 'someaddress@foo.com'] Above, we can see that the two JOINs have very different roles. One will match exactly one row, that of the join of ``User`` and ``Address`` where ``Address.email_address=='someaddress@foo.com'``. The other LEFT OUTER JOIN will match *all* ``Address`` rows related to ``User``, and is only used to populate the ``User.addresses`` collection, for those ``User`` objects that are returned. By changing the usage of :func:`.joinedload` to another style of loading, we can change how the collection is loaded completely independently of SQL used to retrieve the actual ``User`` rows we want. Below we change :func:`.joinedload` into :func:`.subqueryload`: .. sourcecode:: python+sql >>> jack = session.query(User).\ ... join(User.addresses).\ ... options(subqueryload(User.addresses)).\ ... filter(User.name=='jack').\ ... filter(Address.email_address=='someaddress@foo.com').\ ... all() {opensql}SELECT users.id AS users_id, users.name AS users_name, users.fullname AS users_fullname, users.password AS users_password FROM users JOIN addresses ON users.id = addresses.user_id WHERE users.name = ? AND addresses.email_address = ? ['jack', 'someaddress@foo.com'] # ... subqueryload() emits a SELECT in order # to load all address records ... When using joined eager loading, if the query contains a modifier that impacts the rows returned externally to the joins, such as when using DISTINCT, LIMIT, OFFSET or equivalent, the completed statement is first wrapped inside a subquery, and the joins used specifically for joined eager loading are applied to the subquery. SQLAlchemy's joined eager loading goes the extra mile, and then ten miles further, to absolutely ensure that it does not affect the end result of the query, only the way collections and related objects are loaded, no matter what the format of the query is. What Kind of Loading to Use ? ----------------------------- Which type of loading to use typically comes down to optimizing the tradeoff between number of SQL executions, complexity of SQL emitted, and amount of data fetched. Lets take two examples, a :func:`~sqlalchemy.orm.relationship` which references a collection, and a :func:`~sqlalchemy.orm.relationship` that references a scalar many-to-one reference. * One to Many Collection * When using the default lazy loading, if you load 100 objects, and then access a collection on each of them, a total of 101 SQL statements will be emitted, although each statement will typically be a simple SELECT without any joins. * When using joined loading, the load of 100 objects and their collections will emit only one SQL statement. However, the total number of rows fetched will be equal to the sum of the size of all the collections, plus one extra row for each parent object that has an empty collection. Each row will also contain the full set of columns represented by the parents, repeated for each collection item - SQLAlchemy does not re-fetch these columns other than those of the primary key, however most DBAPIs (with some exceptions) will transmit the full data of each parent over the wire to the client connection in any case. Therefore joined eager loading only makes sense when the size of the collections are relatively small. The LEFT OUTER JOIN can also be performance intensive compared to an INNER join. * When using subquery loading, the load of 100 objects will emit two SQL statements. The second statement will fetch a total number of rows equal to the sum of the size of all collections. An INNER JOIN is used, and a minimum of parent columns are requested, only the primary keys. So a subquery load makes sense when the collections are larger. * When multiple levels of depth are used with joined or subquery loading, loading collections-within- collections will multiply the total number of rows fetched in a cartesian fashion. Both forms of eager loading always join from the original parent class. * Many to One Reference * When using the default lazy loading, a load of 100 objects will like in the case of the collection emit as many as 101 SQL statements. However - there is a significant exception to this, in that if the many-to-one reference is a simple foreign key reference to the target's primary key, each reference will be checked first in the current identity map using :meth:`.Query.get`. So here, if the collection of objects references a relatively small set of target objects, or the full set of possible target objects have already been loaded into the session and are strongly referenced, using the default of `lazy='select'` is by far the most efficient way to go. * When using joined loading, the load of 100 objects will emit only one SQL statement. The join will be a LEFT OUTER JOIN, and the total number of rows will be equal to 100 in all cases. If you know that each parent definitely has a child (i.e. the foreign key reference is NOT NULL), the joined load can be configured with ``innerjoin=True``, which is usually specified within the :func:`~sqlalchemy.orm.relationship`. For a load of objects where there are many possible target references which may have not been loaded already, joined loading with an INNER JOIN is extremely efficient. * Subquery loading will issue a second load for all the child objects, so for a load of 100 objects there would be two SQL statements emitted. There's probably not much advantage here over joined loading, however, except perhaps that subquery loading can use an INNER JOIN in all cases whereas joined loading requires that the foreign key is NOT NULL. .. _joinedload_and_join: .. _contains_eager: Routing Explicit Joins/Statements into Eagerly Loaded Collections ------------------------------------------------------------------ The behavior of :func:`~sqlalchemy.orm.joinedload()` is such that joins are created automatically, using anonymous aliases as targets, the results of which are routed into collections and scalar references on loaded objects. It is often the case that a query already includes the necessary joins which represent a particular collection or scalar reference, and the joins added by the joinedload feature are redundant - yet you'd still like the collections/references to be populated. For this SQLAlchemy supplies the :func:`~sqlalchemy.orm.contains_eager()` option. This option is used in the same manner as the :func:`~sqlalchemy.orm.joinedload()` option except it is assumed that the :class:`~sqlalchemy.orm.query.Query` will specify the appropriate joins explicitly. Below it's used with a ``from_statement`` load:: # mapping is the users->addresses mapping mapper(User, users_table, properties={ 'addresses': relationship(Address, addresses_table) }) # define a query on USERS with an outer join to ADDRESSES statement = users_table.outerjoin(addresses_table).select().apply_labels() # construct a Query object which expects the "addresses" results query = session.query(User).options(contains_eager('addresses')) # get results normally r = query.from_statement(statement) It works just as well with an inline :meth:`.Query.join` or :meth:`.Query.outerjoin`:: session.query(User).outerjoin(User.addresses).options(contains_eager(User.addresses)).all() If the "eager" portion of the statement is "aliased", the ``alias`` keyword argument to :func:`~sqlalchemy.orm.contains_eager` may be used to indicate it. This is a string alias name or reference to an actual :class:`~sqlalchemy.sql.expression.Alias` (or other selectable) object: .. sourcecode:: python+sql # use an alias of the Address entity adalias = aliased(Address) # construct a Query object which expects the "addresses" results query = session.query(User).\ outerjoin(adalias, User.addresses).\ options(contains_eager(User.addresses, alias=adalias)) # get results normally {sql}r = query.all() SELECT users.user_id AS users_user_id, users.user_name AS users_user_name, adalias.address_id AS adalias_address_id, adalias.user_id AS adalias_user_id, adalias.email_address AS adalias_email_address, (...other columns...) FROM users LEFT OUTER JOIN email_addresses AS email_addresses_1 ON users.user_id = email_addresses_1.user_id The ``alias`` argument is used only as a source of columns to match up to the result set. You can use it to match up the result to arbitrary label names in a string SQL statement, by passing a :func:`.select` which links those labels to the mapped :class:`.Table`:: # label the columns of the addresses table eager_columns = select([ addresses.c.address_id.label('a1'), addresses.c.email_address.label('a2'), addresses.c.user_id.label('a3')]) # select from a raw SQL statement which uses those label names for the # addresses table. contains_eager() matches them up. query = session.query(User).\ from_statement("select users.*, addresses.address_id as a1, " "addresses.email_address as a2, addresses.user_id as a3 " "from users left outer join addresses on users.user_id=addresses.user_id").\ options(contains_eager(User.addresses, alias=eager_columns)) The path given as the argument to :func:`.contains_eager` needs to be a full path from the starting entity. For example if we were loading ``Users->orders->Order->items->Item``, the string version would look like:: query(User).options(contains_eager('orders', 'items')) Or using the class-bound descriptor:: query(User).options(contains_eager(User.orders, Order.items)) Relation Loader API -------------------- .. autofunction:: contains_alias .. autofunction:: contains_eager .. autofunction:: eagerload .. autofunction:: eagerload_all .. autofunction:: immediateload .. autofunction:: joinedload .. autofunction:: joinedload_all .. autofunction:: lazyload .. autofunction:: noload .. autofunction:: subqueryload .. autofunction:: subqueryload_all SQLAlchemy-0.8.4/doc/build/orm/mapper_config.rst0000644000076500000240000014267012251150015022237 0ustar classicstaff00000000000000.. module:: sqlalchemy.orm .. _mapper_config_toplevel: ==================== Mapper Configuration ==================== This section describes a variety of configurational patterns that are usable with mappers. It assumes you've worked through :ref:`ormtutorial_toplevel` and know how to construct and use rudimentary mappers and relationships. .. _classical_mapping: Classical Mappings ================== A *Classical Mapping* refers to the configuration of a mapped class using the :func:`.mapper` function, without using the Declarative system. As an example, start with the declarative mapping introduced in :ref:`ormtutorial_toplevel`:: class User(Base): __tablename__ = 'users' id = Column(Integer, primary_key=True) name = Column(String) fullname = Column(String) password = Column(String) In "classical" form, the table metadata is created separately with the :class:`.Table` construct, then associated with the ``User`` class via the :func:`.mapper` function:: from sqlalchemy import Table, MetaData, Column, ForeignKey, Integer, String from sqlalchemy.orm import mapper metadata = MetaData() user = Table('user', metadata, Column('id', Integer, primary_key=True), Column('name', String(50)), Column('fullname', String(50)), Column('password', String(12)) ) class User(object): def __init__(self, name, fullname, password): self.name = name self.fullname = fullname self.password = password mapper(User, user) Information about mapped attributes, such as relationships to other classes, are provided via the ``properties`` dictionary. The example below illustrates a second :class:`.Table` object, mapped to a class called ``Address``, then linked to ``User`` via :func:`.relationship`:: address = Table('address', metadata, Column('id', Integer, primary_key=True), Column('user_id', Integer, ForeignKey('user.id')), Column('email_address', String(50)) ) mapper(User, user, properties={ 'addresses' : relationship(Address, backref='user', order_by=address.c.id) }) mapper(Address, address) When using classical mappings, classes must be provided directly without the benefit of the "string lookup" system provided by Declarative. SQL expressions are typically specified in terms of the :class:`.Table` objects, i.e. ``address.c.id`` above for the ``Address`` relationship, and not ``Address.id``, as ``Address`` may not yet be linked to table metadata, nor can we specify a string here. Some examples in the documentation still use the classical approach, but note that the classical as well as Declarative approaches are **fully interchangeable**. Both systems ultimately create the same configuration, consisting of a :class:`.Table`, user-defined class, linked together with a :func:`.mapper`. When we talk about "the behavior of :func:`.mapper`", this includes when using the Declarative system as well - it's still used, just behind the scenes. Customizing Column Properties ============================== The default behavior of :func:`~.orm.mapper` is to assemble all the columns in the mapped :class:`.Table` into mapped object attributes, each of which are named according to the name of the column itself (specifically, the ``key`` attribute of :class:`.Column`). This behavior can be modified in several ways. .. _mapper_column_distinct_names: Naming Columns Distinctly from Attribute Names ---------------------------------------------- A mapping by default shares the same name for a :class:`.Column` as that of the mapped attribute - specifically it matches the :attr:`.Column.key` attribute on :class:`.Column`, which by default is the same as the :attr:`.Column.name`. The name assigned to the Python attribute which maps to :class:`.Column` can be different from either :attr:`.Column.name` or :attr:`.Column.key` just by assigning it that way, as we illustrate here in a Declarative mapping:: class User(Base): __tablename__ = 'user' id = Column('user_id', Integer, primary_key=True) name = Column('user_name', String(50)) Where above ``User.id`` resolves to a column named ``user_id`` and ``User.name`` resolves to a column named ``user_name``. When mapping to an existing table, the :class:`.Column` object can be referenced directly:: class User(Base): __table__ = user_table id = user_table.c.user_id name = user_table.c.user_name Or in a classical mapping, placed in the ``properties`` dictionary with the desired key:: mapper(User, user_table, properties={ 'id': user_table.c.user_id, 'name': user_table.c.user_name, }) In the next section we'll examine the usage of ``.key`` more closely. .. _mapper_automated_reflection_schemes: Automating Column Naming Schemes from Reflected Tables ------------------------------------------------------ In the previous section :ref:`mapper_column_distinct_names`, we showed how a :class:`.Column` explicitly mapped to a class can have a different attribute name than the column. But what if we aren't listing out :class:`.Column` objects explicitly, and instead are automating the production of :class:`.Table` objects using reflection (e.g. as described in :ref:`metadata_reflection_toplevel`)? In this case we can make use of the :meth:`.DDLEvents.column_reflect` event to intercept the production of :class:`.Column` objects and provide them with the :attr:`.Column.key` of our choice:: @event.listens_for(Table, "column_reflect") def column_reflect(inspector, table, column_info): # set column.key = "attr_" column_info['key'] = "attr_%s" % column_info['name'].lower() With the above event, the reflection of :class:`.Column` objects will be intercepted with our event that adds a new ".key" element, such as in a mapping as below:: class MyClass(Base): __table__ = Table("some_table", Base.metadata, autoload=True, autoload_with=some_engine) If we want to qualify our event to only react for the specific :class:`.MetaData` object above, we can check for it in our event:: @event.listens_for(Table, "column_reflect") def column_reflect(inspector, table, column_info): if table.metadata is Base.metadata: # set column.key = "attr_" column_info['key'] = "attr_%s" % column_info['name'].lower() .. _column_prefix: Naming All Columns with a Prefix -------------------------------- A quick approach to prefix column names, typically when mapping to an existing :class:`.Table` object, is to use ``column_prefix``:: class User(Base): __table__ = user_table __mapper_args__ = {'column_prefix':'_'} The above will place attribute names such as ``_user_id``, ``_user_name``, ``_password`` etc. on the mapped ``User`` class. This approach is uncommon in modern usage. For dealing with reflected tables, a more flexible approach is to use that described in :ref:`mapper_automated_reflection_schemes`. Using column_property for column level options ----------------------------------------------- Options can be specified when mapping a :class:`.Column` using the :func:`.column_property` function. This function explicitly creates the :class:`.ColumnProperty` used by the :func:`.mapper` to keep track of the :class:`.Column`; normally, the :func:`.mapper` creates this automatically. Using :func:`.column_property`, we can pass additional arguments about how we'd like the :class:`.Column` to be mapped. Below, we pass an option ``active_history``, which specifies that a change to this column's value should result in the former value being loaded first:: from sqlalchemy.orm import column_property class User(Base): __tablename__ = 'user' id = Column(Integer, primary_key=True) name = column_property(Column(String(50)), active_history=True) :func:`.column_property` is also used to map a single attribute to multiple columns. This use case arises when mapping to a :func:`~.expression.join` which has attributes which are equated to each other:: class User(Base): __table__ = user.join(address) # assign "user.id", "address.user_id" to the # "id" attribute id = column_property(user_table.c.id, address_table.c.user_id) For more examples featuring this usage, see :ref:`maptojoin`. Another place where :func:`.column_property` is needed is to specify SQL expressions as mapped attributes, such as below where we create an attribute ``fullname`` that is the string concatenation of the ``firstname`` and ``lastname`` columns:: class User(Base): __tablename__ = 'user' id = Column(Integer, primary_key=True) firstname = Column(String(50)) lastname = Column(String(50)) fullname = column_property(firstname + " " + lastname) See examples of this usage at :ref:`mapper_sql_expressions`. .. autofunction:: column_property .. _include_exclude_cols: Mapping a Subset of Table Columns --------------------------------- Sometimes, a :class:`.Table` object was made available using the reflection process described at :ref:`metadata_reflection` to load the table's structure from the database. For such a table that has lots of columns that don't need to be referenced in the application, the ``include_properties`` or ``exclude_properties`` arguments can specify that only a subset of columns should be mapped. For example:: class User(Base): __table__ = user_table __mapper_args__ = { 'include_properties' :['user_id', 'user_name'] } ...will map the ``User`` class to the ``user_table`` table, only including the ``user_id`` and ``user_name`` columns - the rest are not referenced. Similarly:: class Address(Base): __table__ = address_table __mapper_args__ = { 'exclude_properties' : ['street', 'city', 'state', 'zip'] } ...will map the ``Address`` class to the ``address_table`` table, including all columns present except ``street``, ``city``, ``state``, and ``zip``. When this mapping is used, the columns that are not included will not be referenced in any SELECT statements emitted by :class:`.Query`, nor will there be any mapped attribute on the mapped class which represents the column; assigning an attribute of that name will have no effect beyond that of a normal Python attribute assignment. In some cases, multiple columns may have the same name, such as when mapping to a join of two or more tables that share some column name. ``include_properties`` and ``exclude_properties`` can also accommodate :class:`.Column` objects to more accurately describe which columns should be included or excluded:: class UserAddress(Base): __table__ = user_table.join(addresses_table) __mapper_args__ = { 'exclude_properties' :[address_table.c.id], 'primary_key' : [user_table.c.id] } .. note:: insert and update defaults configured on individual :class:`.Column` objects, i.e. those described at :ref:`metadata_defaults` including those configured by the ``default``, ``update``, ``server_default`` and ``server_onupdate`` arguments, will continue to function normally even if those :class:`.Column` objects are not mapped. This is because in the case of ``default`` and ``update``, the :class:`.Column` object is still present on the underlying :class:`.Table`, thus allowing the default functions to take place when the ORM emits an INSERT or UPDATE, and in the case of ``server_default`` and ``server_onupdate``, the relational database itself maintains these functions. .. _deferred: Deferred Column Loading ======================== This feature allows particular columns of a table be loaded only upon direct access, instead of when the entity is queried using :class:`.Query`. This feature is useful when one wants to avoid loading a large text or binary field into memory when it's not needed. Individual columns can be lazy loaded by themselves or placed into groups that lazy-load together, using the :func:`.orm.deferred` function to mark them as "deferred". In the example below, we define a mapping that will load each of ``.excerpt`` and ``.photo`` in separate, individual-row SELECT statements when each attribute is first referenced on the individual object instance:: from sqlalchemy.orm import deferred from sqlalchemy import Integer, String, Text, Binary, Column class Book(Base): __tablename__ = 'book' book_id = Column(Integer, primary_key=True) title = Column(String(200), nullable=False) summary = Column(String(2000)) excerpt = deferred(Column(Text)) photo = deferred(Column(Binary)) Classical mappings as always place the usage of :func:`.orm.deferred` in the ``properties`` dictionary against the table-bound :class:`.Column`:: mapper(Book, book_table, properties={ 'photo':deferred(book_table.c.photo) }) Deferred columns can be associated with a "group" name, so that they load together when any of them are first accessed. The example below defines a mapping with a ``photos`` deferred group. When one ``.photo`` is accessed, all three photos will be loaded in one SELECT statement. The ``.excerpt`` will be loaded separately when it is accessed:: class Book(Base): __tablename__ = 'book' book_id = Column(Integer, primary_key=True) title = Column(String(200), nullable=False) summary = Column(String(2000)) excerpt = deferred(Column(Text)) photo1 = deferred(Column(Binary), group='photos') photo2 = deferred(Column(Binary), group='photos') photo3 = deferred(Column(Binary), group='photos') You can defer or undefer columns at the :class:`~sqlalchemy.orm.query.Query` level using the :func:`.orm.defer` and :func:`.orm.undefer` query options:: from sqlalchemy.orm import defer, undefer query = session.query(Book) query.options(defer('summary')).all() query.options(undefer('excerpt')).all() And an entire "deferred group", i.e. which uses the ``group`` keyword argument to :func:`.orm.deferred`, can be undeferred using :func:`.orm.undefer_group`, sending in the group name:: from sqlalchemy.orm import undefer_group query = session.query(Book) query.options(undefer_group('photos')).all() Column Deferral API ------------------- .. autofunction:: deferred .. autofunction:: defer .. autofunction:: undefer .. autofunction:: undefer_group .. _mapper_sql_expressions: SQL Expressions as Mapped Attributes ===================================== Attributes on a mapped class can be linked to SQL expressions, which can be used in queries. Using a Hybrid -------------- The easiest and most flexible way to link relatively simple SQL expressions to a class is to use a so-called "hybrid attribute", described in the section :ref:`hybrids_toplevel`. The hybrid provides for an expression that works at both the Python level as well as at the SQL expression level. For example, below we map a class ``User``, containing attributes ``firstname`` and ``lastname``, and include a hybrid that will provide for us the ``fullname``, which is the string concatenation of the two:: from sqlalchemy.ext.hybrid import hybrid_property class User(Base): __tablename__ = 'user' id = Column(Integer, primary_key=True) firstname = Column(String(50)) lastname = Column(String(50)) @hybrid_property def fullname(self): return self.firstname + " " + self.lastname Above, the ``fullname`` attribute is interpreted at both the instance and class level, so that it is available from an instance:: some_user = session.query(User).first() print some_user.fullname as well as usable wtihin queries:: some_user = session.query(User).filter(User.fullname == "John Smith").first() The string concatenation example is a simple one, where the Python expression can be dual purposed at the instance and class level. Often, the SQL expression must be distinguished from the Python expression, which can be achieved using :meth:`.hybrid_property.expression`. Below we illustrate the case where a conditional needs to be present inside the hybrid, using the ``if`` statement in Python and the :func:`.sql.expression.case` construct for SQL expressions:: from sqlalchemy.ext.hybrid import hybrid_property from sqlalchemy.sql import case class User(Base): __tablename__ = 'user' id = Column(Integer, primary_key=True) firstname = Column(String(50)) lastname = Column(String(50)) @hybrid_property def fullname(self): if self.firstname is not None: return self.firstname + " " + self.lastname else: return self.lastname @fullname.expression def fullname(cls): return case([ (cls.firstname != None, cls.firstname + " " + cls.lastname), ], else_ = cls.lastname) .. _mapper_column_property_sql_expressions: Using column_property --------------------- The :func:`.orm.column_property` function can be used to map a SQL expression in a manner similar to a regularly mapped :class:`.Column`. With this technique, the attribute is loaded along with all other column-mapped attributes at load time. This is in some cases an advantage over the usage of hybrids, as the value can be loaded up front at the same time as the parent row of the object, particularly if the expression is one which links to other tables (typically as a correlated subquery) to access data that wouldn't normally be available on an already loaded object. Disadvantages to using :func:`.orm.column_property` for SQL expressions include that the expression must be compatible with the SELECT statement emitted for the class as a whole, and there are also some configurational quirks which can occur when using :func:`.orm.column_property` from declarative mixins. Our "fullname" example can be expressed using :func:`.orm.column_property` as follows:: from sqlalchemy.orm import column_property class User(Base): __tablename__ = 'user' id = Column(Integer, primary_key=True) firstname = Column(String(50)) lastname = Column(String(50)) fullname = column_property(firstname + " " + lastname) Correlated subqueries may be used as well. Below we use the :func:`.select` construct to create a SELECT that links together the count of ``Address`` objects available for a particular ``User``:: from sqlalchemy.orm import column_property from sqlalchemy import select, func from sqlalchemy import Column, Integer, String, ForeignKey from sqlalchemy.ext.declarative import declarative_base Base = declarative_base() class Address(Base): __tablename__ = 'address' id = Column(Integer, primary_key=True) user_id = Column(Integer, ForeignKey('user.id')) class User(Base): __tablename__ = 'user' id = Column(Integer, primary_key=True) address_count = column_property( select([func.count(Address.id)]).\ where(Address.user_id==id).\ correlate_except(Address) ) In the above example, we define a :func:`.select` construct like the following:: select([func.count(Address.id)]).\ where(Address.user_id==id).\ correlate_except(Address) The meaning of the above statement is, select the count of ``Address.id`` rows where the ``Address.user_id`` column is equated to ``id``, which in the context of the ``User`` class is the :class:`.Column` named ``id`` (note that ``id`` is also the name of a Python built in function, which is not what we want to use here - if we were outside of the ``User`` class definition, we'd use ``User.id``). The :meth:`.select.correlate_except` directive indicates that each element in the FROM clause of this :func:`.select` may be omitted from the FROM list (that is, correlated to the enclosing SELECT statement against ``User``) except for the one corresponding to ``Address``. This isn't strictly necessary, but prevents ``Address`` from being inadvertently omitted from the FROM list in the case of a long string of joins between ``User`` and ``Address`` tables where SELECT statements against ``Address`` are nested. If import issues prevent the :func:`.column_property` from being defined inline with the class, it can be assigned to the class after both are configured. In Declarative this has the effect of calling :meth:`.Mapper.add_property` to add an additional property after the fact:: User.address_count = column_property( select([func.count(Address.id)]).\ where(Address.user_id==User.id) ) For many-to-many relationships, use :func:`.and_` to join the fields of the association table to both tables in a relation, illustrated here with a classical mapping:: from sqlalchemy import and_ mapper(Author, authors, properties={ 'book_count': column_property( select([func.count(books.c.id)], and_( book_authors.c.author_id==authors.c.id, book_authors.c.book_id==books.c.id ))) }) Using a plain descriptor ------------------------- In cases where a SQL query more elaborate than what :func:`.orm.column_property` or :class:`.hybrid_property` can provide must be emitted, a regular Python function accessed as an attribute can be used, assuming the expression only needs to be available on an already-loaded instance. The function is decorated with Python's own ``@property`` decorator to mark it as a read-only attribute. Within the function, :func:`.object_session` is used to locate the :class:`.Session` corresponding to the current object, which is then used to emit a query:: from sqlalchemy.orm import object_session from sqlalchemy import select, func class User(Base): __tablename__ = 'user' id = Column(Integer, primary_key=True) firstname = Column(String(50)) lastname = Column(String(50)) @property def address_count(self): return object_session(self).\ scalar( select([func.count(Address.id)]).\ where(Address.user_id==self.id) ) The plain descriptor approach is useful as a last resort, but is less performant in the usual case than both the hybrid and column property approaches, in that it needs to emit a SQL query upon each access. Changing Attribute Behavior ============================ .. _simple_validators: Simple Validators ----------------- A quick way to add a "validation" routine to an attribute is to use the :func:`~sqlalchemy.orm.validates` decorator. An attribute validator can raise an exception, halting the process of mutating the attribute's value, or can change the given value into something different. Validators, like all attribute extensions, are only called by normal userland code; they are not issued when the ORM is populating the object:: from sqlalchemy.orm import validates class EmailAddress(Base): __tablename__ = 'address' id = Column(Integer, primary_key=True) email = Column(String) @validates('email') def validate_email(self, key, address): assert '@' in address return address Validators also receive collection events, when items are added to a collection:: from sqlalchemy.orm import validates class User(Base): # ... addresses = relationship("Address") @validates('addresses') def validate_address(self, key, address): assert '@' in address.email return address Note that the :func:`~.validates` decorator is a convenience function built on top of attribute events. An application that requires more control over configuration of attribute change behavior can make use of this system, described at :class:`~.AttributeEvents`. .. autofunction:: validates .. _synonyms: Using Descriptors and Hybrids ----------------------------- A more comprehensive way to produce modified behavior for an attribute is to use descriptors. These are commonly used in Python using the ``property()`` function. The standard SQLAlchemy technique for descriptors is to create a plain descriptor, and to have it read/write from a mapped attribute with a different name. Below we illustrate this using Python 2.6-style properties:: class EmailAddress(Base): __tablename__ = 'email_address' id = Column(Integer, primary_key=True) # name the attribute with an underscore, # different from the column name _email = Column("email", String) # then create an ".email" attribute # to get/set "._email" @property def email(self): return self._email @email.setter def email(self, email): self._email = email The approach above will work, but there's more we can add. While our ``EmailAddress`` object will shuttle the value through the ``email`` descriptor and into the ``_email`` mapped attribute, the class level ``EmailAddress.email`` attribute does not have the usual expression semantics usable with :class:`.Query`. To provide these, we instead use the :mod:`~sqlalchemy.ext.hybrid` extension as follows:: from sqlalchemy.ext.hybrid import hybrid_property class EmailAddress(Base): __tablename__ = 'email_address' id = Column(Integer, primary_key=True) _email = Column("email", String) @hybrid_property def email(self): return self._email @email.setter def email(self, email): self._email = email The ``.email`` attribute, in addition to providing getter/setter behavior when we have an instance of ``EmailAddress``, also provides a SQL expression when used at the class level, that is, from the ``EmailAddress`` class directly: .. sourcecode:: python+sql from sqlalchemy.orm import Session session = Session() {sql}address = session.query(EmailAddress).\ filter(EmailAddress.email == 'address@example.com').\ one() SELECT address.email AS address_email, address.id AS address_id FROM address WHERE address.email = ? ('address@example.com',) {stop} address.email = 'otheraddress@example.com' {sql}session.commit() UPDATE address SET email=? WHERE address.id = ? ('otheraddress@example.com', 1) COMMIT {stop} The :class:`~.hybrid_property` also allows us to change the behavior of the attribute, including defining separate behaviors when the attribute is accessed at the instance level versus at the class/expression level, using the :meth:`.hybrid_property.expression` modifier. Such as, if we wanted to add a host name automatically, we might define two sets of string manipulation logic:: class EmailAddress(Base): __tablename__ = 'email_address' id = Column(Integer, primary_key=True) _email = Column("email", String) @hybrid_property def email(self): """Return the value of _email up until the last twelve characters.""" return self._email[:-12] @email.setter def email(self, email): """Set the value of _email, tacking on the twelve character value @example.com.""" self._email = email + "@example.com" @email.expression def email(cls): """Produce a SQL expression that represents the value of the _email column, minus the last twelve characters.""" return func.substr(cls._email, 0, func.length(cls._email) - 12) Above, accessing the ``email`` property of an instance of ``EmailAddress`` will return the value of the ``_email`` attribute, removing or adding the hostname ``@example.com`` from the value. When we query against the ``email`` attribute, a SQL function is rendered which produces the same effect: .. sourcecode:: python+sql {sql}address = session.query(EmailAddress).filter(EmailAddress.email == 'address').one() SELECT address.email AS address_email, address.id AS address_id FROM address WHERE substr(address.email, ?, length(address.email) - ?) = ? (0, 12, 'address') {stop} Read more about Hybrids at :ref:`hybrids_toplevel`. Synonyms -------- Synonyms are a mapper-level construct that applies expression behavior to a descriptor based attribute. .. versionchanged:: 0.7 The functionality of synonym is superceded as of 0.7 by hybrid attributes. .. autofunction:: synonym .. _custom_comparators: Operator Customization ---------------------- The "operators" used by the SQLAlchemy ORM and Core expression language are fully customizable. For example, the comparison expression ``User.name == 'ed'`` makes usage of an operator built into Python itself called ``operator.eq`` - the actual SQL construct which SQLAlchemy associates with such an operator can be modified. New operations can be associated with column expressions as well. The operators which take place for column expressions are most directly redefined at the type level - see the section :ref:`types_operators` for a description. ORM level functions like :func:`.column_property`, :func:`.relationship`, and :func:`.composite` also provide for operator redefinition at the ORM level, by passing a :class:`.PropComparator` subclass to the ``comparator_factory`` argument of each function. Customization of operators at this level is a rare use case. See the documentation at :class:`.PropComparator` for an overview. .. _mapper_composite: Composite Column Types ======================= Sets of columns can be associated with a single user-defined datatype. The ORM provides a single attribute which represents the group of columns using the class you provide. .. versionchanged:: 0.7 Composites have been simplified such that they no longer "conceal" the underlying column based attributes. Additionally, in-place mutation is no longer automatic; see the section below on enabling mutability to support tracking of in-place changes. A simple example represents pairs of columns as a ``Point`` object. ``Point`` represents such a pair as ``.x`` and ``.y``:: class Point(object): def __init__(self, x, y): self.x = x self.y = y def __composite_values__(self): return self.x, self.y def __repr__(self): return "Point(x=%r, y=%r)" % (self.x, self.y) def __eq__(self, other): return isinstance(other, Point) and \ other.x == self.x and \ other.y == self.y def __ne__(self, other): return not self.__eq__(other) The requirements for the custom datatype class are that it have a constructor which accepts positional arguments corresponding to its column format, and also provides a method ``__composite_values__()`` which returns the state of the object as a list or tuple, in order of its column-based attributes. It also should supply adequate ``__eq__()`` and ``__ne__()`` methods which test the equality of two instances. We will create a mapping to a table ``vertice``, which represents two points as ``x1/y1`` and ``x2/y2``. These are created normally as :class:`.Column` objects. Then, the :func:`.composite` function is used to assign new attributes that will represent sets of columns via the ``Point`` class:: from sqlalchemy import Column, Integer from sqlalchemy.orm import composite from sqlalchemy.ext.declarative import declarative_base Base = declarative_base() class Vertex(Base): __tablename__ = 'vertice' id = Column(Integer, primary_key=True) x1 = Column(Integer) y1 = Column(Integer) x2 = Column(Integer) y2 = Column(Integer) start = composite(Point, x1, y1) end = composite(Point, x2, y2) A classical mapping above would define each :func:`.composite` against the existing table:: mapper(Vertex, vertice_table, properties={ 'start':composite(Point, vertice_table.c.x1, vertice_table.c.y1), 'end':composite(Point, vertice_table.c.x2, vertice_table.c.y2), }) We can now persist and use ``Vertex`` instances, as well as query for them, using the ``.start`` and ``.end`` attributes against ad-hoc ``Point`` instances: .. sourcecode:: python+sql >>> v = Vertex(start=Point(3, 4), end=Point(5, 6)) >>> session.add(v) >>> q = session.query(Vertex).filter(Vertex.start == Point(3, 4)) {sql}>>> print q.first().start BEGIN (implicit) INSERT INTO vertice (x1, y1, x2, y2) VALUES (?, ?, ?, ?) (3, 4, 5, 6) SELECT vertice.id AS vertice_id, vertice.x1 AS vertice_x1, vertice.y1 AS vertice_y1, vertice.x2 AS vertice_x2, vertice.y2 AS vertice_y2 FROM vertice WHERE vertice.x1 = ? AND vertice.y1 = ? LIMIT ? OFFSET ? (3, 4, 1, 0) {stop}Point(x=3, y=4) .. autofunction:: composite Tracking In-Place Mutations on Composites ----------------------------------------- In-place changes to an existing composite value are not tracked automatically. Instead, the composite class needs to provide events to its parent object explicitly. This task is largely automated via the usage of the :class:`.MutableComposite` mixin, which uses events to associate each user-defined composite object with all parent associations. Please see the example in :ref:`mutable_composites`. .. versionchanged:: 0.7 In-place changes to an existing composite value are no longer tracked automatically; the functionality is superseded by the :class:`.MutableComposite` class. .. _composite_operations: Redefining Comparison Operations for Composites ----------------------------------------------- The "equals" comparison operation by default produces an AND of all corresponding columns equated to one another. This can be changed using the ``comparator_factory`` argument to :func:`.composite`, where we specify a custom :class:`.CompositeProperty.Comparator` class to define existing or new operations. Below we illustrate the "greater than" operator, implementing the same expression that the base "greater than" does:: from sqlalchemy.orm.properties import CompositeProperty from sqlalchemy import sql class PointComparator(CompositeProperty.Comparator): def __gt__(self, other): """redefine the 'greater than' operation""" return sql.and_(*[a>b for a, b in zip(self.__clause_element__().clauses, other.__composite_values__())]) class Vertex(Base): ___tablename__ = 'vertice' id = Column(Integer, primary_key=True) x1 = Column(Integer) y1 = Column(Integer) x2 = Column(Integer) y2 = Column(Integer) start = composite(Point, x1, y1, comparator_factory=PointComparator) end = composite(Point, x2, y2, comparator_factory=PointComparator) .. _maptojoin: Mapping a Class against Multiple Tables ======================================== Mappers can be constructed against arbitrary relational units (called *selectables*) in addition to plain tables. For example, the :func:`~.expression.join` function creates a selectable unit comprised of multiple tables, complete with its own composite primary key, which can be mapped in the same way as a :class:`.Table`:: from sqlalchemy import Table, Column, Integer, \ String, MetaData, join, ForeignKey from sqlalchemy.ext.declarative import declarative_base from sqlalchemy.orm import column_property metadata = MetaData() # define two Table objects user_table = Table('user', metadata, Column('id', Integer, primary_key=True), Column('name', String), ) address_table = Table('address', metadata, Column('id', Integer, primary_key=True), Column('user_id', Integer, ForeignKey('user.id')), Column('email_address', String) ) # define a join between them. This # takes place across the user.id and address.user_id # columns. user_address_join = join(user_table, address_table) Base = declarative_base() # map to it class AddressUser(Base): __table__ = user_address_join id = column_property(user_table.c.id, address_table.c.user_id) address_id = address_table.c.id In the example above, the join expresses columns for both the ``user`` and the ``address`` table. The ``user.id`` and ``address.user_id`` columns are equated by foreign key, so in the mapping they are defined as one attribute, ``AddressUser.id``, using :func:`.column_property` to indicate a specialized column mapping. Based on this part of the configuration, the mapping will copy new primary key values from ``user.id`` into the ``address.user_id`` column when a flush occurs. Additionally, the ``address.id`` column is mapped explicitly to an attribute named ``address_id``. This is to **disambiguate** the mapping of the ``address.id`` column from the same-named ``AddressUser.id`` attribute, which here has been assigned to refer to the ``user`` table combined with the ``address.user_id`` foreign key. The natural primary key of the above mapping is the composite of ``(user.id, address.id)``, as these are the primary key columns of the ``user`` and ``address`` table combined together. The identity of an ``AddressUser`` object will be in terms of these two values, and is represented from an ``AddressUser`` object as ``(AddressUser.id, AddressUser.address_id)``. Mapping a Class against Arbitrary Selects ========================================= Similar to mapping against a join, a plain :func:`~.expression.select` object can be used with a mapper as well. The example fragment below illustrates mapping a class called ``Customer`` to a :func:`~.expression.select` which includes a join to a subquery:: from sqlalchemy import select, func subq = select([ func.count(orders.c.id).label('order_count'), func.max(orders.c.price).label('highest_order'), orders.c.customer_id ]).group_by(orders.c.customer_id).alias() customer_select = select([customers, subq]).\ select_from( join(customers, subq, customers.c.id == subq.c.customer_id) ).alias() class Customer(Base): __table__ = customer_select Above, the full row represented by ``customer_select`` will be all the columns of the ``customers`` table, in addition to those columns exposed by the ``subq`` subquery, which are ``order_count``, ``highest_order``, and ``customer_id``. Mapping the ``Customer`` class to this selectable then creates a class which will contain those attributes. When the ORM persists new instances of ``Customer``, only the ``customers`` table will actually receive an INSERT. This is because the primary key of the ``orders`` table is not represented in the mapping; the ORM will only emit an INSERT into a table for which it has mapped the primary key. .. note:: The practice of mapping to arbitrary SELECT statements, especially complex ones as above, is almost never needed; it necessarily tends to produce complex queries which are often less efficient than that which would be produced by direct query construction. The practice is to some degree based on the very early history of SQLAlchemy where the :func:`.mapper` construct was meant to represent the primary querying interface; in modern usage, the :class:`.Query` object can be used to construct virtually any SELECT statement, including complex composites, and should be favored over the "map-to-selectable" approach. Multiple Mappers for One Class ============================== In modern SQLAlchemy, a particular class is only mapped by one :func:`.mapper` at a time. The rationale here is that the :func:`.mapper` modifies the class itself, not only persisting it towards a particular :class:`.Table`, but also *instrumenting* attributes upon the class which are structured specifically according to the table metadata. One potential use case for another mapper to exist at the same time is if we wanted to load instances of our class not just from the immediate :class:`.Table` to which it is mapped, but from another selectable that is a derivation of that :class:`.Table`. While there technically is a way to create such a :func:`.mapper`, using the ``non_primary=True`` option, this approach is virtually never needed. Instead, we use the functionality of the :class:`.Query` object to achieve this, using a method such as :meth:`.Query.select_from` or :meth:`.Query.from_statement` to specify a derived selectable. Another potential use is if we genuinely want instances of our class to be persisted into different tables at different times; certain kinds of data sharding configurations may persist a particular class into tables that are identical in structure except for their name. For this kind of pattern, Python offers a better approach than the complexity of mapping the same class multiple times, which is to instead create new mapped classes for each target table. SQLAlchemy refers to this as the "entity name" pattern, which is described as a recipe at `Entity Name `_. .. _mapping_constructors: Constructors and Object Initialization ======================================= Mapping imposes no restrictions or requirements on the constructor (``__init__``) method for the class. You are free to require any arguments for the function that you wish, assign attributes to the instance that are unknown to the ORM, and generally do anything else you would normally do when writing a constructor for a Python class. The SQLAlchemy ORM does not call ``__init__`` when recreating objects from database rows. The ORM's process is somewhat akin to the Python standard library's ``pickle`` module, invoking the low level ``__new__`` method and then quietly restoring attributes directly on the instance rather than calling ``__init__``. If you need to do some setup on database-loaded instances before they're ready to use, you can use the ``@reconstructor`` decorator to tag a method as the ORM counterpart to ``__init__``. SQLAlchemy will call this method with no arguments every time it loads or reconstructs one of your instances. This is useful for recreating transient properties that are normally assigned in your ``__init__``:: from sqlalchemy import orm class MyMappedClass(object): def __init__(self, data): self.data = data # we need stuff on all instances, but not in the database. self.stuff = [] @orm.reconstructor def init_on_load(self): self.stuff = [] When ``obj = MyMappedClass()`` is executed, Python calls the ``__init__`` method as normal and the ``data`` argument is required. When instances are loaded during a :class:`~sqlalchemy.orm.query.Query` operation as in ``query(MyMappedClass).one()``, ``init_on_load`` is called. Any method may be tagged as the :func:`~sqlalchemy.orm.reconstructor`, even the ``__init__`` method. SQLAlchemy will call the reconstructor method with no arguments. Scalar (non-collection) database-mapped attributes of the instance will be available for use within the function. Eagerly-loaded collections are generally not yet available and will usually only contain the first element. ORM state changes made to objects at this stage will not be recorded for the next flush() operation, so the activity within a reconstructor should be conservative. :func:`~sqlalchemy.orm.reconstructor` is a shortcut into a larger system of "instance level" events, which can be subscribed to using the event API - see :class:`.InstanceEvents` for the full API description of these events. .. autofunction:: reconstructor .. _mapper_version_counter: Configuring a Version Counter ============================= The :class:`.Mapper` supports management of a :term:`version id column`, which is a single table column that increments or otherwise updates its value each time an ``UPDATE`` to the mapped table occurs. This value is checked each time the ORM emits an ``UPDATE`` or ``DELETE`` against the row to ensure that the value held in memory matches the database value. The purpose of this feature is to detect when two concurrent transactions are modifying the same row at roughly the same time, or alternatively to provide a guard against the usage of a "stale" row in a system that might be re-using data from a previous transaction without refreshing (e.g. if one sets ``expire_on_commit=False`` with a :class:`.Session`, it is possible to re-use the data from a previous transaction). .. topic:: Concurrent transaction updates When detecting concurrent updates within transactions, it is typically the case that the database's transaction isolation level is below the level of :term:`repeatable read`; otherwise, the transaction will not be exposed to a new row value created by a concurrent update which conflicts with the locally updated value. In this case, the SQLAlchemy versioning feature will typically not be useful for in-transaction conflict detection, though it still can be used for cross-transaction staleness detection. The database that enforces repeatable reads will typically either have locked the target row against a concurrent update, or is employing some form of multi version concurrency control such that it will emit an error when the transaction is committed. SQLAlchemy's version_id_col is an alternative which allows version tracking to occur for specific tables within a transaction that otherwise might not have this isolation level set. .. seealso:: `Repeatable Read Isolation Level `_ - Postgresql's implementation of repeatable read, including a description of the error condition. Simple Version Counting ----------------------- The most straightforward way to track versions is to add an integer column to the mapped table, then establish it as the ``version_id_col`` within the mapper options:: class User(Base): __tablename__ = 'user' id = Column(Integer, primary_key=True) version_id = Column(Integer, nullable=False) name = Column(String(50), nullable=False) __mapper_args__ = { "version_id_col": version_id } Above, the ``User`` mapping tracks integer versions using the column ``version_id``. When an object of type ``User`` is first flushed, the ``version_id`` column will be given a value of "1". Then, an UPDATE of the table later on will always be emitted in a manner similar to the following:: UPDATE user SET version_id=:version_id, name=:name WHERE user.id = :user_id AND user.version_id = :user_version_id {"name": "new name", "version_id": 2, "user_id": 1, "user_version_id": 1} The above UPDATE statement is updating the row that not only matches ``user.id = 1``, it also is requiring that ``user.version_id = 1``, where "1" is the last version identifier we've been known to use on this object. If a transaction elsewhere has modifed the row independently, this version id will no longer match, and the UPDATE statement will report that no rows matched; this is the condition that SQLAlchemy tests, that exactly one row matched our UPDATE (or DELETE) statement. If zero rows match, that indicates our version of the data is stale, and a :class:`.StaleDataError` is raised. .. _custom_version_counter: Custom Version Counters / Types ------------------------------- Other kinds of values or counters can be used for versioning. Common types include dates and GUIDs. When using an alternate type or counter scheme, SQLAlchemy provides a hook for this scheme using the ``version_id_generator`` argument, which accepts a version generation callable. This callable is passed the value of the current known version, and is expected to return the subsequent version. For example, if we wanted to track the versioning of our ``User`` class using a randomly generated GUID, we could do this (note that some backends support a native GUID type, but we illustrate here using a simple string):: import uuid class User(Base): __tablename__ = 'user' id = Column(Integer, primary_key=True) version_uuid = Column(String(32)) name = Column(String(50), nullable=False) __mapper_args__ = { 'version_id_col':version_uuid, 'version_id_generator':lambda version: uuid.uuid4().hex } The persistence engine will call upon ``uuid.uuid4()`` each time a ``User`` object is subject to an INSERT or an UPDATE. In this case, our version generation function can disregard the incoming value of ``version``, as the ``uuid4()`` function generates identifiers without any prerequisite value. If we were using a sequential versioning scheme such as numeric or a special character system, we could make use of the given ``version`` in order to help determine the subsequent value. .. seealso:: :ref:`custom_guid_type` Class Mapping API ================= .. autofunction:: mapper .. autofunction:: object_mapper .. autofunction:: class_mapper .. autofunction:: configure_mappers .. autofunction:: clear_mappers .. autofunction:: sqlalchemy.orm.util.identity_key .. autofunction:: sqlalchemy.orm.util.polymorphic_union .. autoclass:: sqlalchemy.orm.mapper.Mapper :members: SQLAlchemy-0.8.4/doc/build/orm/query.rst0000644000076500000240000000166612251150015020572 0ustar classicstaff00000000000000.. _query_api_toplevel: Querying ======== This section provides API documentation for the :class:`.Query` object and related constructs. For an in-depth introduction to querying with the SQLAlchemy ORM, please see the :ref:`ormtutorial_toplevel`. .. module:: sqlalchemy.orm The Query Object ---------------- :class:`~.Query` is produced in terms of a given :class:`~.Session`, using the :func:`~.Query.query` function:: q = session.query(SomeMappedClass) Following is the full interface for the :class:`.Query` object. .. autoclass:: sqlalchemy.orm.query.Query :members: ORM-Specific Query Constructs ----------------------------- .. autofunction:: sqlalchemy.orm.aliased .. autoclass:: sqlalchemy.orm.util.AliasedClass .. autoclass:: sqlalchemy.orm.util.AliasedInsp .. autoclass:: sqlalchemy.util.KeyedTuple :members: keys, _fields, _asdict .. autofunction:: join .. autofunction:: outerjoin .. autofunction:: with_parent SQLAlchemy-0.8.4/doc/build/orm/relationships.rst0000644000076500000240000016100512251147171022314 0ustar classicstaff00000000000000.. module:: sqlalchemy.orm .. _relationship_config_toplevel: Relationship Configuration ========================== This section describes the :func:`relationship` function and in depth discussion of its usage. The reference material here continues into the next section, :ref:`collections_toplevel`, which has additional detail on configuration of collections via :func:`relationship`. .. _relationship_patterns: Basic Relational Patterns -------------------------- A quick walkthrough of the basic relational patterns. The imports used for each of the following sections is as follows:: from sqlalchemy import Table, Column, Integer, ForeignKey from sqlalchemy.orm import relationship, backref from sqlalchemy.ext.declarative import declarative_base Base = declarative_base() One To Many ~~~~~~~~~~~~ A one to many relationship places a foreign key on the child table referencing the parent. :func:`.relationship` is then specified on the parent, as referencing a collection of items represented by the child:: class Parent(Base): __tablename__ = 'parent' id = Column(Integer, primary_key=True) children = relationship("Child") class Child(Base): __tablename__ = 'child' id = Column(Integer, primary_key=True) parent_id = Column(Integer, ForeignKey('parent.id')) To establish a bidirectional relationship in one-to-many, where the "reverse" side is a many to one, specify the ``backref`` option:: class Parent(Base): __tablename__ = 'parent' id = Column(Integer, primary_key=True) children = relationship("Child", backref="parent") class Child(Base): __tablename__ = 'child' id = Column(Integer, primary_key=True) parent_id = Column(Integer, ForeignKey('parent.id')) ``Child`` will get a ``parent`` attribute with many-to-one semantics. Many To One ~~~~~~~~~~~~ Many to one places a foreign key in the parent table referencing the child. :func:`.relationship` is declared on the parent, where a new scalar-holding attribute will be created:: class Parent(Base): __tablename__ = 'parent' id = Column(Integer, primary_key=True) child_id = Column(Integer, ForeignKey('child.id')) child = relationship("Child") class Child(Base): __tablename__ = 'child' id = Column(Integer, primary_key=True) Bidirectional behavior is achieved by specifying ``backref="parents"``, which will place a one-to-many collection on the ``Child`` class:: class Parent(Base): __tablename__ = 'parent' id = Column(Integer, primary_key=True) child_id = Column(Integer, ForeignKey('child.id')) child = relationship("Child", backref="parents") One To One ~~~~~~~~~~~ One To One is essentially a bidirectional relationship with a scalar attribute on both sides. To achieve this, the ``uselist=False`` flag indicates the placement of a scalar attribute instead of a collection on the "many" side of the relationship. To convert one-to-many into one-to-one:: class Parent(Base): __tablename__ = 'parent' id = Column(Integer, primary_key=True) child = relationship("Child", uselist=False, backref="parent") class Child(Base): __tablename__ = 'child' id = Column(Integer, primary_key=True) parent_id = Column(Integer, ForeignKey('parent.id')) Or to turn a one-to-many backref into one-to-one, use the :func:`.backref` function to provide arguments for the reverse side:: class Parent(Base): __tablename__ = 'parent' id = Column(Integer, primary_key=True) child_id = Column(Integer, ForeignKey('child.id')) child = relationship("Child", backref=backref("parent", uselist=False)) class Child(Base): __tablename__ = 'child' id = Column(Integer, primary_key=True) .. _relationships_many_to_many: Many To Many ~~~~~~~~~~~~~ Many to Many adds an association table between two classes. The association table is indicated by the ``secondary`` argument to :func:`.relationship`. Usually, the :class:`.Table` uses the :class:`.MetaData` object associated with the declarative base class, so that the :class:`.ForeignKey` directives can locate the remote tables with which to link:: association_table = Table('association', Base.metadata, Column('left_id', Integer, ForeignKey('left.id')), Column('right_id', Integer, ForeignKey('right.id')) ) class Parent(Base): __tablename__ = 'left' id = Column(Integer, primary_key=True) children = relationship("Child", secondary=association_table) class Child(Base): __tablename__ = 'right' id = Column(Integer, primary_key=True) For a bidirectional relationship, both sides of the relationship contain a collection. The ``backref`` keyword will automatically use the same ``secondary`` argument for the reverse relationship:: association_table = Table('association', Base.metadata, Column('left_id', Integer, ForeignKey('left.id')), Column('right_id', Integer, ForeignKey('right.id')) ) class Parent(Base): __tablename__ = 'left' id = Column(Integer, primary_key=True) children = relationship("Child", secondary=association_table, backref="parents") class Child(Base): __tablename__ = 'right' id = Column(Integer, primary_key=True) The ``secondary`` argument of :func:`.relationship` also accepts a callable that returns the ultimate argument, which is evaluated only when mappers are first used. Using this, we can define the ``association_table`` at a later point, as long as it's available to the callable after all module initialization is complete:: class Parent(Base): __tablename__ = 'left' id = Column(Integer, primary_key=True) children = relationship("Child", secondary=lambda: association_table, backref="parents") With the declarative extension in use, the traditional "string name of the table" is accepted as well, matching the name of the table as stored in ``Base.metadata.tables``:: class Parent(Base): __tablename__ = 'left' id = Column(Integer, primary_key=True) children = relationship("Child", secondary="association", backref="parents") Deleting Rows from the Many to Many Table ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ A behavior which is unique to the ``secondary`` argument to :func:`.relationship` is that the :class:`.Table` which is specified here is automatically subject to INSERT and DELETE statements, as objects are added or removed from the collection. There is **no need to delete from this table manually**. The act of removing a record from the collection will have the effect of the row being deleted on flush:: # row will be deleted from the "secondary" table # automatically myparent.children.remove(somechild) A question which often arises is how the row in the "secondary" table can be deleted when the child object is handed directly to :meth:`.Session.delete`:: session.delete(somechild) There are several possibilities here: * If there is a :func:`.relationship` from ``Parent`` to ``Child``, but there is **not** a reverse-relationship that links a particular ``Child`` to each ``Parent``, SQLAlchemy will not have any awareness that when deleting this particular ``Child`` object, it needs to maintain the "secondary" table that links it to the ``Parent``. No delete of the "secondary" table will occur. * If there is a relationship that links a particular ``Child`` to each ``Parent``, suppose it's called ``Child.parents``, SQLAlchemy by default will load in the ``Child.parents`` collection to locate all ``Parent`` objects, and remove each row from the "secondary" table which establishes this link. Note that this relationship does not need to be bidrectional; SQLAlchemy is strictly looking at every :func:`.relationship` associated with the ``Child`` object being deleted. * A higher performing option here is to use ON DELETE CASCADE directives with the foreign keys used by the database. Assuming the database supports this feature, the database itself can be made to automatically delete rows in the "secondary" table as referencing rows in "child" are deleted. SQLAlchemy can be instructed to forego actively loading in the ``Child.parents`` collection in this case using the ``passive_deletes=True`` directive on :func:`.relationship`; see :ref:`passive_deletes` for more details on this. Note again, these behaviors are *only* relevant to the ``secondary`` option used with :func:`.relationship`. If dealing with association tables that are mapped explicitly and are *not* present in the ``secondary`` option of a relevant :func:`.relationship`, cascade rules can be used instead to automatically delete entities in reaction to a related entity being deleted - see :ref:`unitofwork_cascades` for information on this feature. .. _association_pattern: Association Object ~~~~~~~~~~~~~~~~~~ The association object pattern is a variant on many-to-many: it's used when your association table contains additional columns beyond those which are foreign keys to the left and right tables. Instead of using the ``secondary`` argument, you map a new class directly to the association table. The left side of the relationship references the association object via one-to-many, and the association class references the right side via many-to-one. Below we illustrate an association table mapped to the ``Association`` class which includes a column called ``extra_data``, which is a string value that is stored along with each association between ``Parent`` and ``Child``:: class Association(Base): __tablename__ = 'association' left_id = Column(Integer, ForeignKey('left.id'), primary_key=True) right_id = Column(Integer, ForeignKey('right.id'), primary_key=True) extra_data = Column(String(50)) child = relationship("Child") class Parent(Base): __tablename__ = 'left' id = Column(Integer, primary_key=True) children = relationship("Association") class Child(Base): __tablename__ = 'right' id = Column(Integer, primary_key=True) The bidirectional version adds backrefs to both relationships:: class Association(Base): __tablename__ = 'association' left_id = Column(Integer, ForeignKey('left.id'), primary_key=True) right_id = Column(Integer, ForeignKey('right.id'), primary_key=True) extra_data = Column(String(50)) child = relationship("Child", backref="parent_assocs") class Parent(Base): __tablename__ = 'left' id = Column(Integer, primary_key=True) children = relationship("Association", backref="parent") class Child(Base): __tablename__ = 'right' id = Column(Integer, primary_key=True) Working with the association pattern in its direct form requires that child objects are associated with an association instance before being appended to the parent; similarly, access from parent to child goes through the association object:: # create parent, append a child via association p = Parent() a = Association(extra_data="some data") a.child = Child() p.children.append(a) # iterate through child objects via association, including association # attributes for assoc in p.children: print assoc.extra_data print assoc.child To enhance the association object pattern such that direct access to the ``Association`` object is optional, SQLAlchemy provides the :ref:`associationproxy_toplevel` extension. This extension allows the configuration of attributes which will access two "hops" with a single access, one "hop" to the associated object, and a second to a target attribute. .. note:: When using the association object pattern, it is advisable that the association-mapped table not be used as the ``secondary`` argument on a :func:`.relationship` elsewhere, unless that :func:`.relationship` contains the option ``viewonly=True``. SQLAlchemy otherwise may attempt to emit redundant INSERT and DELETE statements on the same table, if similar state is detected on the related attribute as well as the associated object. .. _self_referential: Adjacency List Relationships ----------------------------- The **adjacency list** pattern is a common relational pattern whereby a table contains a foreign key reference to itself. This is the most common way to represent hierarchical data in flat tables. Other methods include **nested sets**, sometimes called "modified preorder", as well as **materialized path**. Despite the appeal that modified preorder has when evaluated for its fluency within SQL queries, the adjacency list model is probably the most appropriate pattern for the large majority of hierarchical storage needs, for reasons of concurrency, reduced complexity, and that modified preorder has little advantage over an application which can fully load subtrees into the application space. In this example, we'll work with a single mapped class called ``Node``, representing a tree structure:: class Node(Base): __tablename__ = 'node' id = Column(Integer, primary_key=True) parent_id = Column(Integer, ForeignKey('node.id')) data = Column(String(50)) children = relationship("Node") With this structure, a graph such as the following:: root --+---> child1 +---> child2 --+--> subchild1 | +--> subchild2 +---> child3 Would be represented with data such as:: id parent_id data --- ------- ---- 1 NULL root 2 1 child1 3 1 child2 4 3 subchild1 5 3 subchild2 6 1 child3 The :func:`.relationship` configuration here works in the same way as a "normal" one-to-many relationship, with the exception that the "direction", i.e. whether the relationship is one-to-many or many-to-one, is assumed by default to be one-to-many. To establish the relationship as many-to-one, an extra directive is added known as ``remote_side``, which is a :class:`.Column` or collection of :class:`.Column` objects that indicate those which should be considered to be "remote":: class Node(Base): __tablename__ = 'node' id = Column(Integer, primary_key=True) parent_id = Column(Integer, ForeignKey('node.id')) data = Column(String(50)) parent = relationship("Node", remote_side=[id]) Where above, the ``id`` column is applied as the ``remote_side`` of the ``parent`` :func:`.relationship`, thus establishing ``parent_id`` as the "local" side, and the relationship then behaves as a many-to-one. As always, both directions can be combined into a bidirectional relationship using the :func:`.backref` function:: class Node(Base): __tablename__ = 'node' id = Column(Integer, primary_key=True) parent_id = Column(Integer, ForeignKey('node.id')) data = Column(String(50)) children = relationship("Node", backref=backref('parent', remote_side=[id]) ) There are several examples included with SQLAlchemy illustrating self-referential strategies; these include :ref:`examples_adjacencylist` and :ref:`examples_xmlpersistence`. Composite Adjacency Lists ~~~~~~~~~~~~~~~~~~~~~~~~~ A sub-category of the adjacency list relationship is the rare case where a particular column is present on both the "local" and "remote" side of the join condition. An example is the ``Folder`` class below; using a composite primary key, the ``account_id`` column refers to itself, to indicate sub folders which are within the same account as that of the parent; while ``folder_id`` refers to a specific folder within that account:: class Folder(Base): __tablename__ = 'folder' __table_args__ = ( ForeignKeyConstraint( ['account_id', 'parent_id'], ['folder.account_id', 'folder.folder_id']), ) account_id = Column(Integer, primary_key=True) folder_id = Column(Integer, primary_key=True) parent_id = Column(Integer) name = Column(String) parent_folder = relationship("Folder", backref="child_folders", remote_side=[account_id, folder_id] ) Above, we pass ``account_id`` into the ``remote_side`` list. :func:`.relationship` recognizes that the ``account_id`` column here is on both sides, and aligns the "remote" column along with the ``folder_id`` column, which it recognizes as uniquely present on the "remote" side. .. versionadded:: 0.8 Support for self-referential composite keys in :func:`.relationship` where a column points to itself. Self-Referential Query Strategies ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ Querying of self-referential structures works like any other query:: # get all nodes named 'child2' session.query(Node).filter(Node.data=='child2') However extra care is needed when attempting to join along the foreign key from one level of the tree to the next. In SQL, a join from a table to itself requires that at least one side of the expression be "aliased" so that it can be unambiguously referred to. Recall from :ref:`ormtutorial_aliases` in the ORM tutorial that the :func:`.orm.aliased` construct is normally used to provide an "alias" of an ORM entity. Joining from ``Node`` to itself using this technique looks like: .. sourcecode:: python+sql from sqlalchemy.orm import aliased nodealias = aliased(Node) {sql}session.query(Node).filter(Node.data=='subchild1').\ join(nodealias, Node.parent).\ filter(nodealias.data=="child2").\ all() SELECT node.id AS node_id, node.parent_id AS node_parent_id, node.data AS node_data FROM node JOIN node AS node_1 ON node.parent_id = node_1.id WHERE node.data = ? AND node_1.data = ? ['subchild1', 'child2'] :meth:`.Query.join` also includes a feature known as ``aliased=True`` that can shorten the verbosity self-referential joins, at the expense of query flexibility. This feature performs a similar "aliasing" step to that above, without the need for an explicit entity. Calls to :meth:`.Query.filter` and similar subsequent to the aliased join will **adapt** the ``Node`` entity to be that of the alias: .. sourcecode:: python+sql {sql}session.query(Node).filter(Node.data=='subchild1').\ join(Node.parent, aliased=True).\ filter(Node.data=='child2').\ all() SELECT node.id AS node_id, node.parent_id AS node_parent_id, node.data AS node_data FROM node JOIN node AS node_1 ON node_1.id = node.parent_id WHERE node.data = ? AND node_1.data = ? ['subchild1', 'child2'] To add criterion to multiple points along a longer join, add ``from_joinpoint=True`` to the additional :meth:`~.Query.join` calls: .. sourcecode:: python+sql # get all nodes named 'subchild1' with a # parent named 'child2' and a grandparent 'root' {sql}session.query(Node).\ filter(Node.data=='subchild1').\ join(Node.parent, aliased=True).\ filter(Node.data=='child2').\ join(Node.parent, aliased=True, from_joinpoint=True).\ filter(Node.data=='root').\ all() SELECT node.id AS node_id, node.parent_id AS node_parent_id, node.data AS node_data FROM node JOIN node AS node_1 ON node_1.id = node.parent_id JOIN node AS node_2 ON node_2.id = node_1.parent_id WHERE node.data = ? AND node_1.data = ? AND node_2.data = ? ['subchild1', 'child2', 'root'] :meth:`.Query.reset_joinpoint` will also remove the "aliasing" from filtering calls:: session.query(Node).\ join(Node.children, aliased=True).\ filter(Node.data == 'foo').\ reset_joinpoint().\ filter(Node.data == 'bar') For an example of using ``aliased=True`` to arbitrarily join along a chain of self-referential nodes, see :ref:`examples_xmlpersistence`. Configuring Self-Referential Eager Loading ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ Eager loading of relationships occurs using joins or outerjoins from parent to child table during a normal query operation, such that the parent and its immediate child collection or reference can be populated from a single SQL statement, or a second statement for all immediate child collections. SQLAlchemy's joined and subquery eager loading use aliased tables in all cases when joining to related items, so are compatible with self-referential joining. However, to use eager loading with a self-referential relationship, SQLAlchemy needs to be told how many levels deep it should join and/or query; otherwise the eager load will not take place at all. This depth setting is configured via ``join_depth``: .. sourcecode:: python+sql class Node(Base): __tablename__ = 'node' id = Column(Integer, primary_key=True) parent_id = Column(Integer, ForeignKey('node.id')) data = Column(String(50)) children = relationship("Node", lazy="joined", join_depth=2) {sql}session.query(Node).all() SELECT node_1.id AS node_1_id, node_1.parent_id AS node_1_parent_id, node_1.data AS node_1_data, node_2.id AS node_2_id, node_2.parent_id AS node_2_parent_id, node_2.data AS node_2_data, node.id AS node_id, node.parent_id AS node_parent_id, node.data AS node_data FROM node LEFT OUTER JOIN node AS node_2 ON node.id = node_2.parent_id LEFT OUTER JOIN node AS node_1 ON node_2.id = node_1.parent_id [] .. _relationships_backref: Linking Relationships with Backref ---------------------------------- The ``backref`` keyword argument was first introduced in :ref:`ormtutorial_toplevel`, and has been mentioned throughout many of the examples here. What does it actually do ? Let's start with the canonical ``User`` and ``Address`` scenario:: from sqlalchemy import Integer, ForeignKey, String, Column from sqlalchemy.ext.declarative import declarative_base from sqlalchemy.orm import relationship Base = declarative_base() class User(Base): __tablename__ = 'user' id = Column(Integer, primary_key=True) name = Column(String) addresses = relationship("Address", backref="user") class Address(Base): __tablename__ = 'address' id = Column(Integer, primary_key=True) email = Column(String) user_id = Column(Integer, ForeignKey('user.id')) The above configuration establishes a collection of ``Address`` objects on ``User`` called ``User.addresses``. It also establishes a ``.user`` attribute on ``Address`` which will refer to the parent ``User`` object. In fact, the ``backref`` keyword is only a common shortcut for placing a second ``relationship`` onto the ``Address`` mapping, including the establishment of an event listener on both sides which will mirror attribute operations in both directions. The above configuration is equivalent to:: from sqlalchemy import Integer, ForeignKey, String, Column from sqlalchemy.ext.declarative import declarative_base from sqlalchemy.orm import relationship Base = declarative_base() class User(Base): __tablename__ = 'user' id = Column(Integer, primary_key=True) name = Column(String) addresses = relationship("Address", back_populates="user") class Address(Base): __tablename__ = 'address' id = Column(Integer, primary_key=True) email = Column(String) user_id = Column(Integer, ForeignKey('user.id')) user = relationship("User", back_populates="addresses") Above, we add a ``.user`` relationship to ``Address`` explicitly. On both relationships, the ``back_populates`` directive tells each relationship about the other one, indicating that they should establish "bidirectional" behavior between each other. The primary effect of this configuration is that the relationship adds event handlers to both attributes which have the behavior of "when an append or set event occurs here, set ourselves onto the incoming attribute using this particular attribute name". The behavior is illustrated as follows. Start with a ``User`` and an ``Address`` instance. The ``.addresses`` collection is empty, and the ``.user`` attribute is ``None``:: >>> u1 = User() >>> a1 = Address() >>> u1.addresses [] >>> print a1.user None However, once the ``Address`` is appended to the ``u1.addresses`` collection, both the collection and the scalar attribute have been populated:: >>> u1.addresses.append(a1) >>> u1.addresses [<__main__.Address object at 0x12a6ed0>] >>> a1.user <__main__.User object at 0x12a6590> This behavior of course works in reverse for removal operations as well, as well as for equivalent operations on both sides. Such as when ``.user`` is set again to ``None``, the ``Address`` object is removed from the reverse collection:: >>> a1.user = None >>> u1.addresses [] The manipulation of the ``.addresses`` collection and the ``.user`` attribute occurs entirely in Python without any interaction with the SQL database. Without this behavior, the proper state would be apparent on both sides once the data has been flushed to the database, and later reloaded after a commit or expiration operation occurs. The ``backref``/``back_populates`` behavior has the advantage that common bidirectional operations can reflect the correct state without requiring a database round trip. Remember, when the ``backref`` keyword is used on a single relationship, it's exactly the same as if the above two relationships were created individually using ``back_populates`` on each. Backref Arguments ~~~~~~~~~~~~~~~~~~ We've established that the ``backref`` keyword is merely a shortcut for building two individual :func:`.relationship` constructs that refer to each other. Part of the behavior of this shortcut is that certain configurational arguments applied to the :func:`.relationship` will also be applied to the other direction - namely those arguments that describe the relationship at a schema level, and are unlikely to be different in the reverse direction. The usual case here is a many-to-many :func:`.relationship` that has a ``secondary`` argument, or a one-to-many or many-to-one which has a ``primaryjoin`` argument (the ``primaryjoin`` argument is discussed in :ref:`relationship_primaryjoin`). Such as if we limited the list of ``Address`` objects to those which start with "tony":: from sqlalchemy import Integer, ForeignKey, String, Column from sqlalchemy.ext.declarative import declarative_base from sqlalchemy.orm import relationship Base = declarative_base() class User(Base): __tablename__ = 'user' id = Column(Integer, primary_key=True) name = Column(String) addresses = relationship("Address", primaryjoin="and_(User.id==Address.user_id, " "Address.email.startswith('tony'))", backref="user") class Address(Base): __tablename__ = 'address' id = Column(Integer, primary_key=True) email = Column(String) user_id = Column(Integer, ForeignKey('user.id')) We can observe, by inspecting the resulting property, that both sides of the relationship have this join condition applied:: >>> print User.addresses.property.primaryjoin "user".id = address.user_id AND address.email LIKE :email_1 || '%%' >>> >>> print Address.user.property.primaryjoin "user".id = address.user_id AND address.email LIKE :email_1 || '%%' >>> This reuse of arguments should pretty much do the "right thing" - it uses only arguments that are applicable, and in the case of a many-to-many relationship, will reverse the usage of ``primaryjoin`` and ``secondaryjoin`` to correspond to the other direction (see the example in :ref:`self_referential_many_to_many` for this). It's very often the case however that we'd like to specify arguments that are specific to just the side where we happened to place the "backref". This includes :func:`.relationship` arguments like ``lazy``, ``remote_side``, ``cascade`` and ``cascade_backrefs``. For this case we use the :func:`.backref` function in place of a string:: # from sqlalchemy.orm import backref class User(Base): __tablename__ = 'user' id = Column(Integer, primary_key=True) name = Column(String) addresses = relationship("Address", backref=backref("user", lazy="joined")) Where above, we placed a ``lazy="joined"`` directive only on the ``Address.user`` side, indicating that when a query against ``Address`` is made, a join to the ``User`` entity should be made automatically which will populate the ``.user`` attribute of each returned ``Address``. The :func:`.backref` function formatted the arguments we gave it into a form that is interpreted by the receiving :func:`.relationship` as additional arguments to be applied to the new relationship it creates. One Way Backrefs ~~~~~~~~~~~~~~~~~ An unusual case is that of the "one way backref". This is where the "back-populating" behavior of the backref is only desirable in one direction. An example of this is a collection which contains a filtering ``primaryjoin`` condition. We'd like to append items to this collection as needed, and have them populate the "parent" object on the incoming object. However, we'd also like to have items that are not part of the collection, but still have the same "parent" association - these items should never be in the collection. Taking our previous example, where we established a ``primaryjoin`` that limited the collection only to ``Address`` objects whose email address started with the word ``tony``, the usual backref behavior is that all items populate in both directions. We wouldn't want this behavior for a case like the following:: >>> u1 = User() >>> a1 = Address(email='mary') >>> a1.user = u1 >>> u1.addresses [<__main__.Address object at 0x1411910>] Above, the ``Address`` object that doesn't match the criterion of "starts with 'tony'" is present in the ``addresses`` collection of ``u1``. After these objects are flushed, the transaction committed and their attributes expired for a re-load, the ``addresses`` collection will hit the database on next access and no longer have this ``Address`` object present, due to the filtering condition. But we can do away with this unwanted side of the "backref" behavior on the Python side by using two separate :func:`.relationship` constructs, placing ``back_populates`` only on one side:: from sqlalchemy import Integer, ForeignKey, String, Column from sqlalchemy.ext.declarative import declarative_base from sqlalchemy.orm import relationship Base = declarative_base() class User(Base): __tablename__ = 'user' id = Column(Integer, primary_key=True) name = Column(String) addresses = relationship("Address", primaryjoin="and_(User.id==Address.user_id, " "Address.email.startswith('tony'))", back_populates="user") class Address(Base): __tablename__ = 'address' id = Column(Integer, primary_key=True) email = Column(String) user_id = Column(Integer, ForeignKey('user.id')) user = relationship("User") With the above scenario, appending an ``Address`` object to the ``.addresses`` collection of a ``User`` will always establish the ``.user`` attribute on that ``Address``:: >>> u1 = User() >>> a1 = Address(email='tony') >>> u1.addresses.append(a1) >>> a1.user <__main__.User object at 0x1411850> However, applying a ``User`` to the ``.user`` attribute of an ``Address``, will not append the ``Address`` object to the collection:: >>> a2 = Address(email='mary') >>> a2.user = u1 >>> a2 in u1.addresses False Of course, we've disabled some of the usefulness of ``backref`` here, in that when we do append an ``Address`` that corresponds to the criteria of ``email.startswith('tony')``, it won't show up in the ``User.addresses`` collection until the session is flushed, and the attributes reloaded after a commit or expire operation. While we could consider an attribute event that checks this criterion in Python, this starts to cross the line of duplicating too much SQL behavior in Python. The backref behavior itself is only a slight transgression of this philosophy - SQLAlchemy tries to keep these to a minimum overall. .. _relationship_configure_joins: Configuring how Relationship Joins ------------------------------------ :func:`.relationship` will normally create a join between two tables by examining the foreign key relationship between the two tables to determine which columns should be compared. There are a variety of situations where this behavior needs to be customized. .. _relationship_foreign_keys: Handling Multiple Join Paths ~~~~~~~~~~~~~~~~~~~~~~~~~~~~ One of the most common situations to deal with is when there are more than one foreign key path between two tables. Consider a ``Customer`` class that contains two foreign keys to an ``Address`` class:: from sqlalchemy import Integer, ForeignKey, String, Column from sqlalchemy.ext.declarative import declarative_base from sqlalchemy.orm import relationship Base = declarative_base() class Customer(Base): __tablename__ = 'customer' id = Column(Integer, primary_key=True) name = Column(String) billing_address_id = Column(Integer, ForeignKey("address.id")) shipping_address_id = Column(Integer, ForeignKey("address.id")) billing_address = relationship("Address") shipping_address = relationship("Address") class Address(Base): __tablename__ = 'address' id = Column(Integer, primary_key=True) street = Column(String) city = Column(String) state = Column(String) zip = Column(String) The above mapping, when we attempt to use it, will produce the error:: sqlalchemy.exc.AmbiguousForeignKeysError: Could not determine join condition between parent/child tables on relationship Customer.billing_address - there are multiple foreign key paths linking the tables. Specify the 'foreign_keys' argument, providing a list of those columns which should be counted as containing a foreign key reference to the parent table. The above message is pretty long. There are many potential messages that :func:`.relationship` can return, which have been carefully tailored to detect a variety of common configurational issues; most will suggest the additional configuration that's needed to resolve the ambiguity or other missing information. In this case, the message wants us to qualify each :func:`.relationship` by instructing for each one which foreign key column should be considered, and the appropriate form is as follows:: class Customer(Base): __tablename__ = 'customer' id = Column(Integer, primary_key=True) name = Column(String) billing_address_id = Column(Integer, ForeignKey("address.id")) shipping_address_id = Column(Integer, ForeignKey("address.id")) billing_address = relationship("Address", foreign_keys=[billing_address_id]) shipping_address = relationship("Address", foreign_keys=[shipping_address_id]) Above, we specify the ``foreign_keys`` argument, which is a :class:`.Column` or list of :class:`.Column` objects which indicate those columns to be considered "foreign", or in other words, the columns that contain a value referring to a parent table. Loading the ``Customer.billing_address`` relationship from a ``Customer`` object will use the value present in ``billing_address_id`` in order to identify the row in ``Address`` to be loaded; similarly, ``shipping_address_id`` is used for the ``shipping_address`` relationship. The linkage of the two columns also plays a role during persistence; the newly generated primary key of a just-inserted ``Address`` object will be copied into the appropriate foreign key column of an associated ``Customer`` object during a flush. When specifying ``foreign_keys`` with Declarative, we can also use string names to specify, however it is important that if using a list, the **list is part of the string**:: billing_address = relationship("Address", foreign_keys="[Customer.billing_address_id]") In this specific example, the list is not necessary in any case as there's only one :class:`.Column` we need:: billing_address = relationship("Address", foreign_keys="Customer.billing_address_id") .. versionchanged:: 0.8 :func:`.relationship` can resolve ambiguity between foreign key targets on the basis of the ``foreign_keys`` argument alone; the ``primaryjoin`` argument is no longer needed in this situation. .. _relationship_primaryjoin: Specifying Alternate Join Conditions ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ The default behavior of :func:`.relationship` when constructing a join is that it equates the value of primary key columns on one side to that of foreign-key-referring columns on the other. We can change this criterion to be anything we'd like using the ``primaryjoin`` argument, as well as the ``secondaryjoin`` argument in the case when a "secondary" table is used. In the example below, using the ``User`` class as well as an ``Address`` class which stores a street address, we create a relationship ``boston_addresses`` which will only load those ``Address`` objects which specify a city of "Boston":: from sqlalchemy import Integer, ForeignKey, String, Column from sqlalchemy.ext.declarative import declarative_base from sqlalchemy.orm import relationship Base = declarative_base() class User(Base): __tablename__ = 'user' id = Column(Integer, primary_key=True) name = Column(String) addresses = relationship("Address", primaryjoin="and_(User.id==Address.user_id, " "Address.city=='Boston')") class Address(Base): __tablename__ = 'address' id = Column(Integer, primary_key=True) user_id = Column(Integer, ForeignKey('user.id')) street = Column(String) city = Column(String) state = Column(String) zip = Column(String) Within this string SQL expression, we made use of the :func:`.and_` conjunction construct to establish two distinct predicates for the join condition - joining both the ``User.id`` and ``Address.user_id`` columns to each other, as well as limiting rows in ``Address`` to just ``city='Boston'``. When using Declarative, rudimentary SQL functions like :func:`.and_` are automatically available in the evaluated namespace of a string :func:`.relationship` argument. The custom criteria we use in a ``primaryjoin`` is generally only significant when SQLAlchemy is rendering SQL in order to load or represent this relationship. That is, it's used in the SQL statement that's emitted in order to perform a per-attribute lazy load, or when a join is constructed at query time, such as via :meth:`.Query.join`, or via the eager "joined" or "subquery" styles of loading. When in-memory objects are being manipulated, we can place any ``Address`` object we'd like into the ``boston_addresses`` collection, regardless of what the value of the ``.city`` attribute is. The objects will remain present in the collection until the attribute is expired and re-loaded from the database where the criterion is applied. When a flush occurs, the objects inside of ``boston_addresses`` will be flushed unconditionally, assigning value of the primary key ``user.id`` column onto the foreign-key-holding ``address.user_id`` column for each row. The ``city`` criteria has no effect here, as the flush process only cares about synchronizing primary key values into referencing foreign key values. .. _relationship_custom_foreign: Creating Custom Foreign Conditions ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ Another element of the primary join condition is how those columns considered "foreign" are determined. Usually, some subset of :class:`.Column` objects will specify :class:`.ForeignKey`, or otherwise be part of a :class:`.ForeignKeyConstraint` that's relevant to the join condition. :func:`.relationship` looks to this foreign key status as it decides how it should load and persist data for this relationship. However, the ``primaryjoin`` argument can be used to create a join condition that doesn't involve any "schema" level foreign keys. We can combine ``primaryjoin`` along with ``foreign_keys`` and ``remote_side`` explicitly in order to establish such a join. Below, a class ``HostEntry`` joins to itself, equating the string ``content`` column to the ``ip_address`` column, which is a Postgresql type called ``INET``. We need to use :func:`.cast` in order to cast one side of the join to the type of the other:: from sqlalchemy import cast, String, Column, Integer from sqlalchemy.orm import relationship from sqlalchemy.dialects.postgresql import INET from sqlalchemy.ext.declarative import declarative_base Base = declarative_base() class HostEntry(Base): __tablename__ = 'host_entry' id = Column(Integer, primary_key=True) ip_address = Column(INET) content = Column(String(50)) # relationship() using explicit foreign_keys, remote_side parent_host = relationship("HostEntry", primaryjoin=ip_address == cast(content, INET), foreign_keys=content, remote_side=ip_address ) The above relationship will produce a join like:: SELECT host_entry.id, host_entry.ip_address, host_entry.content FROM host_entry JOIN host_entry AS host_entry_1 ON host_entry_1.ip_address = CAST(host_entry.content AS INET) An alternative syntax to the above is to use the :func:`.foreign` and :func:`.remote` :term:`annotations`, inline within the ``primaryjoin`` expression. This syntax represents the annotations that :func:`.relationship` normally applies by itself to the join condition given the ``foreign_keys`` and ``remote_side`` arguments; the functions are provided in the API in the rare case that :func:`.relationship` can't determine the exact location of these features on its own:: from sqlalchemy.orm import foreign, remote class HostEntry(Base): __tablename__ = 'host_entry' id = Column(Integer, primary_key=True) ip_address = Column(INET) content = Column(String(50)) # relationship() using explicit foreign() and remote() annotations # in lieu of separate arguments parent_host = relationship("HostEntry", primaryjoin=remote(ip_address) == \ cast(foreign(content), INET), ) .. _self_referential_many_to_many: Self-Referential Many-to-Many Relationship ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ Many to many relationships can be customized by one or both of ``primaryjoin`` and ``secondaryjoin`` - the latter is significant for a relationship that specifies a many-to-many reference using the ``secondary`` argument. A common situation which involves the usage of ``primaryjoin`` and ``secondaryjoin`` is when establishing a many-to-many relationship from a class to itself, as shown below:: from sqlalchemy import Integer, ForeignKey, String, Column, Table from sqlalchemy.ext.declarative import declarative_base from sqlalchemy.orm import relationship Base = declarative_base() node_to_node = Table("node_to_node", Base.metadata, Column("left_node_id", Integer, ForeignKey("node.id"), primary_key=True), Column("right_node_id", Integer, ForeignKey("node.id"), primary_key=True) ) class Node(Base): __tablename__ = 'node' id = Column(Integer, primary_key=True) label = Column(String) right_nodes = relationship("Node", secondary=node_to_node, primaryjoin=id==node_to_node.c.left_node_id, secondaryjoin=id==node_to_node.c.right_node_id, backref="left_nodes" ) Where above, SQLAlchemy can't know automatically which columns should connect to which for the ``right_nodes`` and ``left_nodes`` relationships. The ``primaryjoin`` and ``secondaryjoin`` arguments establish how we'd like to join to the association table. In the Declarative form above, as we are declaring these conditions within the Python block that corresponds to the ``Node`` class, the ``id`` variable is available directly as the ``Column`` object we wish to join with. A classical mapping situation here is similar, where ``node_to_node`` can be joined to ``node.c.id``:: from sqlalchemy import Integer, ForeignKey, String, Column, Table, MetaData from sqlalchemy.orm import relationship, mapper metadata = MetaData() node_to_node = Table("node_to_node", metadata, Column("left_node_id", Integer, ForeignKey("node.id"), primary_key=True), Column("right_node_id", Integer, ForeignKey("node.id"), primary_key=True) ) node = Table("node", metadata, Column('id', Integer, primary_key=True), Column('label', String) ) class Node(object): pass mapper(Node, node, properties={ 'right_nodes':relationship(Node, secondary=node_to_node, primaryjoin=node.c.id==node_to_node.c.left_node_id, secondaryjoin=node.c.id==node_to_node.c.right_node_id, backref="left_nodes" )}) Note that in both examples, the ``backref`` keyword specifies a ``left_nodes`` backref - when :func:`.relationship` creates the second relationship in the reverse direction, it's smart enough to reverse the ``primaryjoin`` and ``secondaryjoin`` arguments. Building Query-Enabled Properties ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ Very ambitious custom join conditions may fail to be directly persistable, and in some cases may not even load correctly. To remove the persistence part of the equation, use the flag ``viewonly=True`` on the :func:`~sqlalchemy.orm.relationship`, which establishes it as a read-only attribute (data written to the collection will be ignored on flush()). However, in extreme cases, consider using a regular Python property in conjunction with :class:`~sqlalchemy.orm.query.Query` as follows: .. sourcecode:: python+sql class User(Base): __tablename__ = 'user' id = Column(Integer, primary_key=True) def _get_addresses(self): return object_session(self).query(Address).with_parent(self).filter(...).all() addresses = property(_get_addresses) .. _post_update: Rows that point to themselves / Mutually Dependent Rows ------------------------------------------------------- This is a very specific case where relationship() must perform an INSERT and a second UPDATE in order to properly populate a row (and vice versa an UPDATE and DELETE in order to delete without violating foreign key constraints). The two use cases are: * A table contains a foreign key to itself, and a single row will have a foreign key value pointing to its own primary key. * Two tables each contain a foreign key referencing the other table, with a row in each table referencing the other. For example:: user --------------------------------- user_id name related_user_id 1 'ed' 1 Or:: widget entry ------------------------------------------- --------------------------------- widget_id name favorite_entry_id entry_id name widget_id 1 'somewidget' 5 5 'someentry' 1 In the first case, a row points to itself. Technically, a database that uses sequences such as PostgreSQL or Oracle can INSERT the row at once using a previously generated value, but databases which rely upon autoincrement-style primary key identifiers cannot. The :func:`~sqlalchemy.orm.relationship` always assumes a "parent/child" model of row population during flush, so unless you are populating the primary key/foreign key columns directly, :func:`~sqlalchemy.orm.relationship` needs to use two statements. In the second case, the "widget" row must be inserted before any referring "entry" rows, but then the "favorite_entry_id" column of that "widget" row cannot be set until the "entry" rows have been generated. In this case, it's typically impossible to insert the "widget" and "entry" rows using just two INSERT statements; an UPDATE must be performed in order to keep foreign key constraints fulfilled. The exception is if the foreign keys are configured as "deferred until commit" (a feature some databases support) and if the identifiers were populated manually (again essentially bypassing :func:`~sqlalchemy.orm.relationship`). To enable the usage of a supplementary UPDATE statement, we use the ``post_update`` option of :func:`.relationship`. This specifies that the linkage between the two rows should be created using an UPDATE statement after both rows have been INSERTED; it also causes the rows to be de-associated with each other via UPDATE before a DELETE is emitted. The flag should be placed on just *one* of the relationships, preferably the many-to-one side. Below we illustrate a complete example, including two :class:`.ForeignKey` constructs, one which specifies ``use_alter=True`` to help with emitting CREATE TABLE statements:: from sqlalchemy import Integer, ForeignKey, Column from sqlalchemy.ext.declarative import declarative_base from sqlalchemy.orm import relationship Base = declarative_base() class Entry(Base): __tablename__ = 'entry' entry_id = Column(Integer, primary_key=True) widget_id = Column(Integer, ForeignKey('widget.widget_id')) name = Column(String(50)) class Widget(Base): __tablename__ = 'widget' widget_id = Column(Integer, primary_key=True) favorite_entry_id = Column(Integer, ForeignKey('entry.entry_id', use_alter=True, name="fk_favorite_entry")) name = Column(String(50)) entries = relationship(Entry, primaryjoin= widget_id==Entry.widget_id) favorite_entry = relationship(Entry, primaryjoin= favorite_entry_id==Entry.entry_id, post_update=True) When a structure against the above configuration is flushed, the "widget" row will be INSERTed minus the "favorite_entry_id" value, then all the "entry" rows will be INSERTed referencing the parent "widget" row, and then an UPDATE statement will populate the "favorite_entry_id" column of the "widget" table (it's one row at a time for the time being): .. sourcecode:: pycon+sql >>> w1 = Widget(name='somewidget') >>> e1 = Entry(name='someentry') >>> w1.favorite_entry = e1 >>> w1.entries = [e1] >>> session.add_all([w1, e1]) {sql}>>> session.commit() BEGIN (implicit) INSERT INTO widget (favorite_entry_id, name) VALUES (?, ?) (None, 'somewidget') INSERT INTO entry (widget_id, name) VALUES (?, ?) (1, 'someentry') UPDATE widget SET favorite_entry_id=? WHERE widget.widget_id = ? (1, 1) COMMIT An additional configuration we can specify is to supply a more comprehensive foreign key constraint on ``Widget``, such that it's guaranteed that ``favorite_entry_id`` refers to an ``Entry`` that also refers to this ``Widget``. We can use a composite foreign key, as illustrated below:: from sqlalchemy import Integer, ForeignKey, String, \ Column, UniqueConstraint, ForeignKeyConstraint from sqlalchemy.ext.declarative import declarative_base from sqlalchemy.orm import relationship Base = declarative_base() class Entry(Base): __tablename__ = 'entry' entry_id = Column(Integer, primary_key=True) widget_id = Column(Integer, ForeignKey('widget.widget_id')) name = Column(String(50)) __table_args__ = ( UniqueConstraint("entry_id", "widget_id"), ) class Widget(Base): __tablename__ = 'widget' widget_id = Column(Integer, autoincrement='ignore_fk', primary_key=True) favorite_entry_id = Column(Integer) name = Column(String(50)) __table_args__ = ( ForeignKeyConstraint( ["widget_id", "favorite_entry_id"], ["entry.widget_id", "entry.entry_id"], name="fk_favorite_entry", use_alter=True ), ) entries = relationship(Entry, primaryjoin= widget_id==Entry.widget_id, foreign_keys=Entry.widget_id) favorite_entry = relationship(Entry, primaryjoin= favorite_entry_id==Entry.entry_id, foreign_keys=favorite_entry_id, post_update=True) The above mapping features a composite :class:`.ForeignKeyConstraint` bridging the ``widget_id`` and ``favorite_entry_id`` columns. To ensure that ``Widget.widget_id`` remains an "autoincrementing" column we specify ``autoincrement='ignore_fk'`` on :class:`.Column`, and additionally on each :func:`.relationship` we must limit those columns considered as part of the foreign key for the purposes of joining and cross-population. .. versionadded:: 0.7.4 ``autoincrement='ignore_fk'`` on :class:`.Column`\ . .. _passive_updates: Mutable Primary Keys / Update Cascades --------------------------------------- When the primary key of an entity changes, related items which reference the primary key must also be updated as well. For databases which enforce referential integrity, it's required to use the database's ON UPDATE CASCADE functionality in order to propagate primary key changes to referenced foreign keys - the values cannot be out of sync for any moment. For databases that don't support this, such as SQLite and MySQL without their referential integrity options turned on, the ``passive_updates`` flag can be set to ``False``, most preferably on a one-to-many or many-to-many :func:`.relationship`, which instructs SQLAlchemy to issue UPDATE statements individually for objects referenced in the collection, loading them into memory if not already locally present. The ``passive_updates`` flag can also be ``False`` in conjunction with ON UPDATE CASCADE functionality, although in that case the unit of work will be issuing extra SELECT and UPDATE statements unnecessarily. A typical mutable primary key setup might look like:: class User(Base): __tablename__ = 'user' username = Column(String(50), primary_key=True) fullname = Column(String(100)) # passive_updates=False *only* needed if the database # does not implement ON UPDATE CASCADE addresses = relationship("Address", passive_updates=False) class Address(Base): __tablename__ = 'address' email = Column(String(50), primary_key=True) username = Column(String(50), ForeignKey('user.username', onupdate="cascade") ) ``passive_updates`` is set to ``True`` by default, indicating that ON UPDATE CASCADE is expected to be in place in the usual case for foreign keys that expect to have a mutating parent key. ``passive_updates=False`` may be configured on any direction of relationship, i.e. one-to-many, many-to-one, and many-to-many, although it is much more effective when placed just on the one-to-many or many-to-many side. Configuring the ``passive_updates=False`` only on the many-to-one side will have only a partial effect, as the unit of work searches only through the current identity map for objects that may be referencing the one with a mutating primary key, not throughout the database. Relationships API ----------------- .. autofunction:: relationship .. autofunction:: backref .. autofunction:: relation .. autofunction:: dynamic_loader .. autofunction:: foreign .. autofunction:: remote SQLAlchemy-0.8.4/doc/build/orm/session.rst0000644000076500000240000026053012251150015021105 0ustar classicstaff00000000000000.. _session_toplevel: ================= Using the Session ================= .. module:: sqlalchemy.orm.session The :func:`.orm.mapper` function and :mod:`~sqlalchemy.ext.declarative` extensions are the primary configurational interface for the ORM. Once mappings are configured, the primary usage interface for persistence operations is the :class:`.Session`. What does the Session do ? ========================== In the most general sense, the :class:`~.Session` establishes all conversations with the database and represents a "holding zone" for all the objects which you've loaded or associated with it during its lifespan. It provides the entrypoint to acquire a :class:`.Query` object, which sends queries to the database using the :class:`~.Session` object's current database connection, populating result rows into objects that are then stored in the :class:`.Session`, inside a structure called the `Identity Map `_ - a data structure that maintains unique copies of each object, where "unique" means "only one object with a particular primary key". The :class:`.Session` begins in an essentially stateless form. Once queries are issued or other objects are persisted with it, it requests a connection resource from an :class:`.Engine` that is associated either with the :class:`.Session` itself or with the mapped :class:`.Table` objects being operated upon. This connection represents an ongoing transaction, which remains in effect until the :class:`.Session` is instructed to commit or roll back its pending state. All changes to objects maintained by a :class:`.Session` are tracked - before the database is queried again or before the current transaction is committed, it **flushes** all pending changes to the database. This is known as the `Unit of Work `_ pattern. When using a :class:`.Session`, it's important to note that the objects which are associated with it are **proxy objects** to the transaction being held by the :class:`.Session` - there are a variety of events that will cause objects to re-access the database in order to keep synchronized. It is possible to "detach" objects from a :class:`.Session`, and to continue using them, though this practice has its caveats. It's intended that usually, you'd re-associate detached objects with another :class:`.Session` when you want to work with them again, so that they can resume their normal task of representing database state. .. _session_getting: Getting a Session ================= :class:`.Session` is a regular Python class which can be directly instantiated. However, to standardize how sessions are configured and acquired, the :class:`.sessionmaker` class is normally used to create a top level :class:`.Session` configuration which can then be used throughout an application without the need to repeat the configurational arguments. The usage of :class:`.sessionmaker` is illustrated below: .. sourcecode:: python+sql from sqlalchemy import create_engine from sqlalchemy.orm import sessionmaker # an Engine, which the Session will use for connection # resources some_engine = create_engine('postgresql://scott:tiger@localhost/') # create a configured "Session" class Session = sessionmaker(bind=some_engine) # create a Session session = Session() # work with sess myobject = MyObject('foo', 'bar') session.add(myobject) session.commit() Above, the :class:`.sessionmaker` call creates a factory for us, which we assign to the name ``Session``. This factory, when called, will create a new :class:`.Session` object using the configurational arguments we've given the factory. In this case, as is typical, we've configured the factory to specify a particular :class:`.Engine` for connection resources. A typical setup will associate the :class:`.sessionmaker` with an :class:`.Engine`, so that each :class:`.Session` generated will use this :class:`.Engine` to acquire connection resources. This association can be set up as in the example above, using the ``bind`` argument. When you write your application, place the :class:`.sessionmaker` factory at the global level. This factory can then be used by the rest of the applcation as the source of new :class:`.Session` instances, keeping the configuration for how :class:`.Session` objects are constructed in one place. The :class:`.sessionmaker` factory can also be used in conjunction with other helpers, which are passed a user-defined :class:`.sessionmaker` that is then maintained by the helper. Some of these helpers are discussed in the section :ref:`session_faq_whentocreate`. Adding Additional Configuration to an Existing sessionmaker() -------------------------------------------------------------- A common scenario is where the :class:`.sessionmaker` is invoked at module import time, however the generation of one or more :class:`.Engine` instances to be associated with the :class:`.sessionmaker` has not yet proceeded. For this use case, the :class:`.sessionmaker` construct offers the :meth:`.sessionmaker.configure` method, which will place additional configuration directives into an existing :class:`.sessionmaker` that will take place when the construct is invoked:: from sqlalchemy.orm import sessionmaker from sqlalchemy import create_engine # configure Session class with desired options Session = sessionmaker() # later, we create the engine engine = create_engine('postgresql://...') # associate it with our custom Session class Session.configure(bind=engine) # work with the session session = Session() Creating Ad-Hoc Session Objects with Alternate Arguments --------------------------------------------------------- For the use case where an application needs to create a new :class:`.Session` with special arguments that deviate from what is normally used throughout the application, such as a :class:`.Session` that binds to an alternate source of connectivity, or a :class:`.Session` that should have other arguments such as ``expire_on_commit`` established differently from what most of the application wants, specific arguments can be passed to the :class:`.sessionmaker` factory's :meth:`.sessionmaker.__call__` method. These arguments will override whatever configurations have already been placed, such as below, where a new :class:`.Session` is constructed against a specific :class:`.Connection`:: # at the module level, the global sessionmaker, # bound to a specific Engine Session = sessionmaker(bind=engine) # later, some unit of code wants to create a # Session that is bound to a specific Connection conn = engine.connect() session = Session(bind=conn) The typical rationale for the association of a :class:`.Session` with a specific :class:`.Connection` is that of a test fixture that maintains an external transaction - see :ref:`session_external_transaction` for an example of this. Using the Session ================== .. _session_object_states: Quickie Intro to Object States ------------------------------ It's helpful to know the states which an instance can have within a session: * **Transient** - an instance that's not in a session, and is not saved to the database; i.e. it has no database identity. The only relationship such an object has to the ORM is that its class has a ``mapper()`` associated with it. * **Pending** - when you :meth:`~.Session.add` a transient instance, it becomes pending. It still wasn't actually flushed to the database yet, but it will be when the next flush occurs. * **Persistent** - An instance which is present in the session and has a record in the database. You get persistent instances by either flushing so that the pending instances become persistent, or by querying the database for existing instances (or moving persistent instances from other sessions into your local session). * **Detached** - an instance which has a record in the database, but is not in any session. There's nothing wrong with this, and you can use objects normally when they're detached, **except** they will not be able to issue any SQL in order to load collections or attributes which are not yet loaded, or were marked as "expired". Knowing these states is important, since the :class:`.Session` tries to be strict about ambiguous operations (such as trying to save the same object to two different sessions at the same time). .. _session_faq: Session Frequently Asked Questions ----------------------------------- When do I make a :class:`.sessionmaker`? ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ Just one time, somewhere in your application's global scope. It should be looked upon as part of your application's configuration. If your application has three .py files in a package, you could, for example, place the :class:`.sessionmaker` line in your ``__init__.py`` file; from that point on your other modules say "from mypackage import Session". That way, everyone else just uses :class:`.Session()`, and the configuration of that session is controlled by that central point. If your application starts up, does imports, but does not know what database it's going to be connecting to, you can bind the :class:`.Session` at the "class" level to the engine later on, using :meth:`.sessionmaker.configure`. In the examples in this section, we will frequently show the :class:`.sessionmaker` being created right above the line where we actually invoke :class:`.Session`. But that's just for example's sake! In reality, the :class:`.sessionmaker` would be somewhere at the module level. The calls to instantiate :class:`.Session` would then be placed at the point in the application where database conversations begin. .. _session_faq_whentocreate: When do I construct a :class:`.Session`, when do I commit it, and when do I close it? ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ .. topic:: tl;dr; As a general rule, keep the lifecycle of the session **separate and external** from functions and objects that access and/or manipulate database data. A :class:`.Session` is typically constructed at the beginning of a logical operation where database access is potentially anticipated. The :class:`.Session`, whenever it is used to talk to the database, begins a database transaction as soon as it starts communicating. Assuming the ``autocommit`` flag is left at its recommended default of ``False``, this transaction remains in progress until the :class:`.Session` is rolled back, committed, or closed. The :class:`.Session` will begin a new transaction if it is used again, subsequent to the previous transaction ending; from this it follows that the :class:`.Session` is capable of having a lifespan across many transactions, though only one at a time. We refer to these two concepts as **transaction scope** and **session scope**. The implication here is that the SQLAlchemy ORM is encouraging the developer to establish these two scopes in their application, including not only when the scopes begin and end, but also the expanse of those scopes, for example should a single :class:`.Session` instance be local to the execution flow within a function or method, should it be a global object used by the entire application, or somewhere in between these two. The burden placed on the developer to determine this scope is one area where the SQLAlchemy ORM necessarily has a strong opinion about how the database should be used. The :term:`unit of work` pattern is specifically one of accumulating changes over time and flushing them periodically, keeping in-memory state in sync with what's known to be present in a local transaction. This pattern is only effective when meaningful transaction scopes are in place. It's usually not very hard to determine the best points at which to begin and end the scope of a :class:`.Session`, though the wide variety of application architectures possible can introduce challenging situations. A common choice is to tear down the :class:`.Session` at the same time the transaction ends, meaning the transaction and session scopes are the same. This is a great choice to start out with as it removes the need to consider session scope as separate from transaction scope. While there's no one-size-fits-all recommendation for how transaction scope should be determined, there are common patterns. Especially if one is writing a web application, the choice is pretty much established. A web application is the easiest case because such an appication is already constructed around a single, consistent scope - this is the **request**, which represents an incoming request from a browser, the processing of that request to formulate a response, and finally the delivery of that response back to the client. Integrating web applications with the :class:`.Session` is then the straightforward task of linking the scope of the :class:`.Session` to that of the request. The :class:`.Session` can be established as the request begins, or using a :term:`lazy initialization` pattern which establishes one as soon as it is needed. The request then proceeds, with some system in place where application logic can access the current :class:`.Session` in a manner associated with how the actual request object is accessed. As the request ends, the :class:`.Session` is torn down as well, usually through the usage of event hooks provided by the web framework. The transaction used by the :class:`.Session` may also be committed at this point, or alternatively the application may opt for an explicit commit pattern, only committing for those requests where one is warranted, but still always tearing down the :class:`.Session` unconditionally at the end. Most web frameworks include infrastructure to establish a single :class:`.Session`, associated with the request, which is correctly constructed and torn down corresponding torn down at the end of a request. Such infrastructure pieces include products such as `Flask-SQLAlchemy `_, for usage in conjunction with the Flask web framework, and `Zope-SQLAlchemy `_, for usage in conjunction with the Pyramid and Zope frameworks. SQLAlchemy strongly recommends that these products be used as available. In those situations where integration libraries are not available, SQLAlchemy includes its own "helper" class known as :class:`.scoped_session`. A tutorial on the usage of this object is at :ref:`unitofwork_contextual`. It provides both a quick way to associate a :class:`.Session` with the current thread, as well as patterns to associate :class:`.Session` objects with other kinds of scopes. As mentioned before, for non-web applications there is no one clear pattern, as applications themselves don't have just one pattern of architecture. The best strategy is to attempt to demarcate "operations", points at which a particular thread begins to perform a series of operations for some period of time, which can be committed at the end. Some examples: * A background daemon which spawns off child forks would want to create a :class:`.Session` local to each child process, work with that :class:`.Session` through the life of the "job" that the fork is handling, then tear it down when the job is completed. * For a command-line script, the application would create a single, global :class:`.Session` that is established when the program begins to do its work, and commits it right as the program is completing its task. * For a GUI interface-driven application, the scope of the :class:`.Session` may best be within the scope of a user-generated event, such as a button push. Or, the scope may correspond to explicit user interaction, such as the user "opening" a series of records, then "saving" them. As a general rule, the application should manage the lifecycle of the session *externally* to functions that deal with specific data. This is a fundamental separation of concerns which keeps data-specific operations agnostic of the context in which they access and manipulate that data. E.g. **don't do this**:: ### this is the **wrong way to do it** ### class ThingOne(object): def go(self): session = Session() try: session.query(FooBar).update({"x": 5}) session.commit() except: session.rollback() raise class ThingTwo(object): def go(self): session = Session() try: session.query(Widget).update({"q": 18}) session.commit() except: session.rollback() raise def run_my_program(): ThingOne().go() ThingTwo().go() Keep the lifecycle of the session (and usually the transaction) **separate and external**:: ### this is a **better** (but not the only) way to do it ### class ThingOne(object): def go(self, session): session.query(FooBar).update({"x": 5}) class ThingTwo(object): def go(self, session): session.query(Widget).update({"q": 18}) def run_my_program(): session = Session() try: ThingOne().go(session) ThingTwo().go(session) session.commit() except: session.rollback() raise finally: session.close() The advanced developer will try to keep the details of session, transaction and exception management as far as possible from the details of the program doing its work. For example, we can further separate concerns using a `context manager `_:: ### another way (but again *not the only way*) to do it ### from contextlib import contextmanager @contextmanager def session_scope(): """Provide a transactional scope around a series of operations.""" session = Session() try: yield session session.commit() except: session.rollback() raise finally: session.close() def run_my_program(): with session_scope() as session: ThingOne().go(session) ThingTwo().go(session) Is the Session a cache? ~~~~~~~~~~~~~~~~~~~~~~~~~~~ Yeee...no. It's somewhat used as a cache, in that it implements the :term:`identity map` pattern, and stores objects keyed to their primary key. However, it doesn't do any kind of query caching. This means, if you say ``session.query(Foo).filter_by(name='bar')``, even if ``Foo(name='bar')`` is right there, in the identity map, the session has no idea about that. It has to issue SQL to the database, get the rows back, and then when it sees the primary key in the row, *then* it can look in the local identity map and see that the object is already there. It's only when you say ``query.get({some primary key})`` that the :class:`~sqlalchemy.orm.session.Session` doesn't have to issue a query. Additionally, the Session stores object instances using a weak reference by default. This also defeats the purpose of using the Session as a cache. The :class:`.Session` is not designed to be a global object from which everyone consults as a "registry" of objects. That's more the job of a **second level cache**. SQLAlchemy provides a pattern for implementing second level caching using `dogpile.cache `_, via the :ref:`examples_caching` example. How can I get the :class:`~sqlalchemy.orm.session.Session` for a certain object? ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ Use the :meth:`~.Session.object_session` classmethod available on :class:`~sqlalchemy.orm.session.Session`:: session = Session.object_session(someobject) The newer :ref:`core_inspection_toplevel` system can also be used:: from sqlalchemy import inspect session = inspect(object).session .. _session_faq_threadsafe: Is the session thread-safe? ~~~~~~~~~~~~~~~~~~~~~~~~~~~ The :class:`.Session` is very much intended to be used in a **non-concurrent** fashion, which usually means in only one thread at a time. The :class:`.Session` should be used in such a way that one instance exists for a single series of operations within a single transaction. One expedient way to get this effect is by associating a :class:`.Session` with the current thread (see :ref:`unitofwork_contextual` for background). Another is to use a pattern where the :class:`.Session` is passed between functions and is otherwise not shared with other threads. The bigger point is that you should not *want* to use the session with multiple concurrent threads. That would be like having everyone at a restaurant all eat from the same plate. The session is a local "workspace" that you use for a specific set of tasks; you don't want to, or need to, share that session with other threads who are doing some other task. Making sure the :class:`.Session` is only used in a single concurrent thread at a time is called a "share nothing" approach to concurrency. But actually, not sharing the :class:`.Session` implies a more significant pattern; it means not just the :class:`.Session` object itself, but also **all objects that are associated with that Session**, must be kept within the scope of a single concurrent thread. The set of mapped objects associated with a :class:`.Session` are essentially proxies for data within database rows accessed over a database connection, and so just like the :class:`.Session` itself, the whole set of objects is really just a large-scale proxy for a database connection (or connections). Ultimately, it's mostly the DBAPI connection itself that we're keeping away from concurrent access; but since the :class:`.Session` and all the objects associated with it are all proxies for that DBAPI connection, the entire graph is essentially not safe for concurrent access. If there are in fact multiple threads participating in the same task, then you may consider sharing the session and its objects between those threads; however, in this extremely unusual scenario the application would need to ensure that a proper locking scheme is implemented so that there isn't *concurrent* access to the :class:`.Session` or its state. A more common approach to this situation is to maintain a single :class:`.Session` per concurrent thread, but to instead *copy* objects from one :class:`.Session` to another, often using the :meth:`.Session.merge` method to copy the state of an object into a new object local to a different :class:`.Session`. Querying -------- The :meth:`~.Session.query` function takes one or more *entities* and returns a new :class:`~sqlalchemy.orm.query.Query` object which will issue mapper queries within the context of this Session. An entity is defined as a mapped class, a :class:`~sqlalchemy.orm.mapper.Mapper` object, an orm-enabled *descriptor*, or an ``AliasedClass`` object:: # query from a class session.query(User).filter_by(name='ed').all() # query with multiple classes, returns tuples session.query(User, Address).join('addresses').filter_by(name='ed').all() # query using orm-enabled descriptors session.query(User.name, User.fullname).all() # query from a mapper user_mapper = class_mapper(User) session.query(user_mapper) When :class:`~sqlalchemy.orm.query.Query` returns results, each object instantiated is stored within the identity map. When a row matches an object which is already present, the same object is returned. In the latter case, whether or not the row is populated onto an existing object depends upon whether the attributes of the instance have been *expired* or not. A default-configured :class:`~sqlalchemy.orm.session.Session` automatically expires all instances along transaction boundaries, so that with a normally isolated transaction, there shouldn't be any issue of instances representing data which is stale with regards to the current transaction. The :class:`.Query` object is introduced in great detail in :ref:`ormtutorial_toplevel`, and further documented in :ref:`query_api_toplevel`. Adding New or Existing Items ---------------------------- :meth:`~.Session.add` is used to place instances in the session. For *transient* (i.e. brand new) instances, this will have the effect of an INSERT taking place for those instances upon the next flush. For instances which are *persistent* (i.e. were loaded by this session), they are already present and do not need to be added. Instances which are *detached* (i.e. have been removed from a session) may be re-associated with a session using this method:: user1 = User(name='user1') user2 = User(name='user2') session.add(user1) session.add(user2) session.commit() # write changes to the database To add a list of items to the session at once, use :meth:`~.Session.add_all`:: session.add_all([item1, item2, item3]) The :meth:`~.Session.add` operation **cascades** along the ``save-update`` cascade. For more details see the section :ref:`unitofwork_cascades`. .. _unitofwork_merging: Merging ------- :meth:`~.Session.merge` transfers state from an outside object into a new or already existing instance within a session. It also reconciles the incoming data against the state of the database, producing a history stream which will be applied towards the next flush, or alternatively can be made to produce a simple "transfer" of state without producing change history or accessing the database. Usage is as follows:: merged_object = session.merge(existing_object) When given an instance, it follows these steps: * It examines the primary key of the instance. If it's present, it attempts to locate that instance in the local identity map. If the ``load=True`` flag is left at its default, it also checks the database for this primary key if not located locally. * If the given instance has no primary key, or if no instance can be found with the primary key given, a new instance is created. * The state of the given instance is then copied onto the located/newly created instance. For attributes which are present on the source instance, the value is transferred to the target instance. For mapped attributes which aren't present on the source, the attribute is expired on the target instance, discarding its existing value. If the ``load=True`` flag is left at its default, this copy process emits events and will load the target object's unloaded collections for each attribute present on the source object, so that the incoming state can be reconciled against what's present in the database. If ``load`` is passed as ``False``, the incoming data is "stamped" directly without producing any history. * The operation is cascaded to related objects and collections, as indicated by the ``merge`` cascade (see :ref:`unitofwork_cascades`). * The new instance is returned. With :meth:`~.Session.merge`, the given "source" instance is not modifed nor is it associated with the target :class:`.Session`, and remains available to be merged with any number of other :class:`.Session` objects. :meth:`~.Session.merge` is useful for taking the state of any kind of object structure without regard for its origins or current session associations and copying its state into a new session. Here's some examples: * An application which reads an object structure from a file and wishes to save it to the database might parse the file, build up the structure, and then use :meth:`~.Session.merge` to save it to the database, ensuring that the data within the file is used to formulate the primary key of each element of the structure. Later, when the file has changed, the same process can be re-run, producing a slightly different object structure, which can then be ``merged`` in again, and the :class:`~sqlalchemy.orm.session.Session` will automatically update the database to reflect those changes, loading each object from the database by primary key and then updating its state with the new state given. * An application is storing objects in an in-memory cache, shared by many :class:`.Session` objects simultaneously. :meth:`~.Session.merge` is used each time an object is retrieved from the cache to create a local copy of it in each :class:`.Session` which requests it. The cached object remains detached; only its state is moved into copies of itself that are local to individual :class:`~.Session` objects. In the caching use case, it's common that the ``load=False`` flag is used to remove the overhead of reconciling the object's state with the database. There's also a "bulk" version of :meth:`~.Session.merge` called :meth:`~.Query.merge_result` that was designed to work with cache-extended :class:`.Query` objects - see the section :ref:`examples_caching`. * An application wants to transfer the state of a series of objects into a :class:`.Session` maintained by a worker thread or other concurrent system. :meth:`~.Session.merge` makes a copy of each object to be placed into this new :class:`.Session`. At the end of the operation, the parent thread/process maintains the objects it started with, and the thread/worker can proceed with local copies of those objects. In the "transfer between threads/processes" use case, the application may want to use the ``load=False`` flag as well to avoid overhead and redundant SQL queries as the data is transferred. Merge Tips ~~~~~~~~~~ :meth:`~.Session.merge` is an extremely useful method for many purposes. However, it deals with the intricate border between objects that are transient/detached and those that are persistent, as well as the automated transferrence of state. The wide variety of scenarios that can present themselves here often require a more careful approach to the state of objects. Common problems with merge usually involve some unexpected state regarding the object being passed to :meth:`~.Session.merge`. Lets use the canonical example of the User and Address objects:: class User(Base): __tablename__ = 'user' id = Column(Integer, primary_key=True) name = Column(String(50), nullable=False) addresses = relationship("Address", backref="user") class Address(Base): __tablename__ = 'address' id = Column(Integer, primary_key=True) email_address = Column(String(50), nullable=False) user_id = Column(Integer, ForeignKey('user.id'), nullable=False) Assume a ``User`` object with one ``Address``, already persistent:: >>> u1 = User(name='ed', addresses=[Address(email_address='ed@ed.com')]) >>> session.add(u1) >>> session.commit() We now create ``a1``, an object outside the session, which we'd like to merge on top of the existing ``Address``:: >>> existing_a1 = u1.addresses[0] >>> a1 = Address(id=existing_a1.id) A surprise would occur if we said this:: >>> a1.user = u1 >>> a1 = session.merge(a1) >>> session.commit() sqlalchemy.orm.exc.FlushError: New instance
      with identity key (, (1,)) conflicts with persistent instance
      Why is that ? We weren't careful with our cascades. The assignment of ``a1.user`` to a persistent object cascaded to the backref of ``User.addresses`` and made our ``a1`` object pending, as though we had added it. Now we have *two* ``Address`` objects in the session:: >>> a1 = Address() >>> a1.user = u1 >>> a1 in session True >>> existing_a1 in session True >>> a1 is existing_a1 False Above, our ``a1`` is already pending in the session. The subsequent :meth:`~.Session.merge` operation essentially does nothing. Cascade can be configured via the ``cascade`` option on :func:`.relationship`, although in this case it would mean removing the ``save-update`` cascade from the ``User.addresses`` relationship - and usually, that behavior is extremely convenient. The solution here would usually be to not assign ``a1.user`` to an object already persistent in the target session. The ``cascade_backrefs=False`` option of :func:`.relationship` will also prevent the ``Address`` from being added to the session via the ``a1.user = u1`` assignment. Further detail on cascade operation is at :ref:`unitofwork_cascades`. Another example of unexpected state:: >>> a1 = Address(id=existing_a1.id, user_id=u1.id) >>> assert a1.user is None >>> True >>> a1 = session.merge(a1) >>> session.commit() sqlalchemy.exc.IntegrityError: (IntegrityError) address.user_id may not be NULL Here, we accessed a1.user, which returned its default value of ``None``, which as a result of this access, has been placed in the ``__dict__`` of our object ``a1``. Normally, this operation creates no change event, so the ``user_id`` attribute takes precedence during a flush. But when we merge the ``Address`` object into the session, the operation is equivalent to:: >>> existing_a1.id = existing_a1.id >>> existing_a1.user_id = u1.id >>> existing_a1.user = None Where above, both ``user_id`` and ``user`` are assigned to, and change events are emitted for both. The ``user`` association takes precedence, and None is applied to ``user_id``, causing a failure. Most :meth:`~.Session.merge` issues can be examined by first checking - is the object prematurely in the session ? .. sourcecode:: python+sql >>> a1 = Address(id=existing_a1, user_id=user.id) >>> assert a1 not in session >>> a1 = session.merge(a1) Or is there state on the object that we don't want ? Examining ``__dict__`` is a quick way to check:: >>> a1 = Address(id=existing_a1, user_id=user.id) >>> a1.user >>> a1.__dict__ {'_sa_instance_state': , 'user_id': 1, 'id': 1, 'user': None} >>> # we don't want user=None merged, remove it >>> del a1.user >>> a1 = session.merge(a1) >>> # success >>> session.commit() Deleting -------- The :meth:`~.Session.delete` method places an instance into the Session's list of objects to be marked as deleted:: # mark two objects to be deleted session.delete(obj1) session.delete(obj2) # commit (or flush) session.commit() .. _session_deleting_from_collections: Deleting from Collections ~~~~~~~~~~~~~~~~~~~~~~~~~~ A common confusion that arises regarding :meth:`~.Session.delete` is when objects which are members of a collection are being deleted. While the collection member is marked for deletion from the database, this does not impact the collection itself in memory until the collection is expired. Below, we illustrate that even after an ``Address`` object is marked for deletion, it's still present in the collection associated with the parent ``User``, even after a flush:: >>> address = user.addresses[1] >>> session.delete(address) >>> session.flush() >>> address in user.addresses True When the above session is committed, all attributes are expired. The next access of ``user.addresses`` will re-load the collection, revealing the desired state:: >>> session.commit() >>> address in user.addresses False The usual practice of deleting items within collections is to forego the usage of :meth:`~.Session.delete` directly, and instead use cascade behavior to automatically invoke the deletion as a result of removing the object from the parent collection. The ``delete-orphan`` cascade accomplishes this, as illustrated in the example below:: mapper(User, users_table, properties={ 'addresses':relationship(Address, cascade="all, delete, delete-orphan") }) del user.addresses[1] session.flush() Where above, upon removing the ``Address`` object from the ``User.addresses`` collection, the ``delete-orphan`` cascade has the effect of marking the ``Address`` object for deletion in the same way as passing it to :meth:`~.Session.delete`. See also :ref:`unitofwork_cascades` for detail on cascades. Deleting based on Filter Criterion ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ The caveat with ``Session.delete()`` is that you need to have an object handy already in order to delete. The Query includes a :func:`~sqlalchemy.orm.query.Query.delete` method which deletes based on filtering criteria:: session.query(User).filter(User.id==7).delete() The ``Query.delete()`` method includes functionality to "expire" objects already in the session which match the criteria. However it does have some caveats, including that "delete" and "delete-orphan" cascades won't be fully expressed for collections which are already loaded. See the API docs for :meth:`~sqlalchemy.orm.query.Query.delete` for more details. .. _session_flushing: Flushing -------- When the :class:`~sqlalchemy.orm.session.Session` is used with its default configuration, the flush step is nearly always done transparently. Specifically, the flush occurs before any individual :class:`~sqlalchemy.orm.query.Query` is issued, as well as within the :meth:`~.Session.commit` call before the transaction is committed. It also occurs before a SAVEPOINT is issued when :meth:`~.Session.begin_nested` is used. Regardless of the autoflush setting, a flush can always be forced by issuing :meth:`~.Session.flush`:: session.flush() The "flush-on-Query" aspect of the behavior can be disabled by constructing :class:`.sessionmaker` with the flag ``autoflush=False``:: Session = sessionmaker(autoflush=False) Additionally, autoflush can be temporarily disabled by setting the ``autoflush`` flag at any time:: mysession = Session() mysession.autoflush = False Some autoflush-disable recipes are available at `DisableAutoFlush `_. The flush process *always* occurs within a transaction, even if the :class:`~sqlalchemy.orm.session.Session` has been configured with ``autocommit=True``, a setting that disables the session's persistent transactional state. If no transaction is present, :meth:`~.Session.flush` creates its own transaction and commits it. Any failures during flush will always result in a rollback of whatever transaction is present. If the Session is not in ``autocommit=True`` mode, an explicit call to :meth:`~.Session.rollback` is required after a flush fails, even though the underlying transaction will have been rolled back already - this is so that the overall nesting pattern of so-called "subtransactions" is consistently maintained. .. _session_committing: Committing ---------- :meth:`~.Session.commit` is used to commit the current transaction. It always issues :meth:`~.Session.flush` beforehand to flush any remaining state to the database; this is independent of the "autoflush" setting. If no transaction is present, it raises an error. Note that the default behavior of the :class:`~sqlalchemy.orm.session.Session` is that a "transaction" is always present; this behavior can be disabled by setting ``autocommit=True``. In autocommit mode, a transaction can be initiated by calling the :meth:`~.Session.begin` method. .. note:: The term "transaction" here refers to a transactional construct within the :class:`.Session` itself which may be maintaining zero or more actual database (DBAPI) transactions. An individual DBAPI connection begins participation in the "transaction" as it is first used to execute a SQL statement, then remains present until the session-level "transaction" is completed. See :ref:`unitofwork_transaction` for further detail. Another behavior of :meth:`~.Session.commit` is that by default it expires the state of all instances present after the commit is complete. This is so that when the instances are next accessed, either through attribute access or by them being present in a :class:`~sqlalchemy.orm.query.Query` result set, they receive the most recent state. To disable this behavior, configure :class:`.sessionmaker` with ``expire_on_commit=False``. Normally, instances loaded into the :class:`~sqlalchemy.orm.session.Session` are never changed by subsequent queries; the assumption is that the current transaction is isolated so the state most recently loaded is correct as long as the transaction continues. Setting ``autocommit=True`` works against this model to some degree since the :class:`~sqlalchemy.orm.session.Session` behaves in exactly the same way with regard to attribute state, except no transaction is present. .. _session_rollback: Rolling Back ------------ :meth:`~.Session.rollback` rolls back the current transaction. With a default configured session, the post-rollback state of the session is as follows: * All transactions are rolled back and all connections returned to the connection pool, unless the Session was bound directly to a Connection, in which case the connection is still maintained (but still rolled back). * Objects which were initially in the *pending* state when they were added to the :class:`~sqlalchemy.orm.session.Session` within the lifespan of the transaction are expunged, corresponding to their INSERT statement being rolled back. The state of their attributes remains unchanged. * Objects which were marked as *deleted* within the lifespan of the transaction are promoted back to the *persistent* state, corresponding to their DELETE statement being rolled back. Note that if those objects were first *pending* within the transaction, that operation takes precedence instead. * All objects not expunged are fully expired. With that state understood, the :class:`~sqlalchemy.orm.session.Session` may safely continue usage after a rollback occurs. When a :meth:`~.Session.flush` fails, typically for reasons like primary key, foreign key, or "not nullable" constraint violations, a :meth:`~.Session.rollback` is issued automatically (it's currently not possible for a flush to continue after a partial failure). However, the flush process always uses its own transactional demarcator called a *subtransaction*, which is described more fully in the docstrings for :class:`~sqlalchemy.orm.session.Session`. What it means here is that even though the database transaction has been rolled back, the end user must still issue :meth:`~.Session.rollback` to fully reset the state of the :class:`~sqlalchemy.orm.session.Session`. Expunging --------- Expunge removes an object from the Session, sending persistent instances to the detached state, and pending instances to the transient state: .. sourcecode:: python+sql session.expunge(obj1) To remove all items, call :meth:`~.Session.expunge_all` (this method was formerly known as ``clear()``). Closing ------- The :meth:`~.Session.close` method issues a :meth:`~.Session.expunge_all`, and :term:`releases` any transactional/connection resources. When connections are returned to the connection pool, transactional state is rolled back as well. Refreshing / Expiring --------------------- The Session normally works in the context of an ongoing transaction (with the default setting of autoflush=False). Most databases offer "isolated" transactions - this refers to a series of behaviors that allow the work within a transaction to remain consistent as time passes, regardless of the activities outside of that transaction. A key feature of a high degree of transaction isolation is that emitting the same SELECT statement twice will return the same results as when it was called the first time, even if the data has been modified in another transaction. For this reason, the :class:`.Session` gains very efficient behavior by loading the attributes of each instance only once. Subsequent reads of the same row in the same transaction are assumed to have the same value. The user application also gains directly from this assumption, that the transaction is regarded as a temporary shield against concurrent changes - a good application will ensure that isolation levels are set appropriately such that this assumption can be made, given the kind of data being worked with. To clear out the currently loaded state on an instance, the instance or its individual attributes can be marked as "expired", which results in a reload to occur upon next access of any of the instance's attrbutes. The instance can also be immediately reloaded from the database. The :meth:`~.Session.expire` and :meth:`~.Session.refresh` methods achieve this:: # immediately re-load attributes on obj1, obj2 session.refresh(obj1) session.refresh(obj2) # expire objects obj1, obj2, attributes will be reloaded # on the next access: session.expire(obj1) session.expire(obj2) When an expired object reloads, all non-deferred column-based attributes are loaded in one query. Current behavior for expired relationship-based attributes is that they load individually upon access - this behavior may be enhanced in a future release. When a refresh is invoked on an object, the ultimate operation is equivalent to a :meth:`.Query.get`, so any relationships configured with eager loading should also load within the scope of the refresh operation. :meth:`~.Session.refresh` and :meth:`~.Session.expire` also support being passed a list of individual attribute names in which to be refreshed. These names can refer to any attribute, column-based or relationship based:: # immediately re-load the attributes 'hello', 'world' on obj1, obj2 session.refresh(obj1, ['hello', 'world']) session.refresh(obj2, ['hello', 'world']) # expire the attributes 'hello', 'world' objects obj1, obj2, attributes will be reloaded # on the next access: session.expire(obj1, ['hello', 'world']) session.expire(obj2, ['hello', 'world']) The full contents of the session may be expired at once using :meth:`~.Session.expire_all`:: session.expire_all() Note that :meth:`~.Session.expire_all` is called **automatically** whenever :meth:`~.Session.commit` or :meth:`~.Session.rollback` are called. If using the session in its default mode of autocommit=False and with a well-isolated transactional environment (which is provided by most backends with the notable exception of MySQL MyISAM), there is virtually *no reason* to ever call :meth:`~.Session.expire_all` directly - plenty of state will remain on the current transaction until it is rolled back or committed or otherwise removed. :meth:`~.Session.refresh` and :meth:`~.Session.expire` similarly are usually only necessary when an UPDATE or DELETE has been issued manually within the transaction using :meth:`.Session.execute()`. Session Attributes ------------------ The :class:`~sqlalchemy.orm.session.Session` itself acts somewhat like a set-like collection. All items present may be accessed using the iterator interface:: for obj in session: print obj And presence may be tested for using regular "contains" semantics:: if obj in session: print "Object is present" The session is also keeping track of all newly created (i.e. pending) objects, all objects which have had changes since they were last loaded or saved (i.e. "dirty"), and everything that's been marked as deleted:: # pending objects recently added to the Session session.new # persistent objects which currently have changes detected # (this collection is now created on the fly each time the property is called) session.dirty # persistent objects that have been marked as deleted via session.delete(obj) session.deleted # dictionary of all persistent objects, keyed on their # identity key session.identity_map (Documentation: :attr:`.Session.new`, :attr:`.Session.dirty`, :attr:`.Session.deleted`, :attr:`.Session.identity_map`). Note that objects within the session are by default *weakly referenced*. This means that when they are dereferenced in the outside application, they fall out of scope from within the :class:`~sqlalchemy.orm.session.Session` as well and are subject to garbage collection by the Python interpreter. The exceptions to this include objects which are pending, objects which are marked as deleted, or persistent objects which have pending changes on them. After a full flush, these collections are all empty, and all objects are again weakly referenced. To disable the weak referencing behavior and force all objects within the session to remain until explicitly expunged, configure :class:`.sessionmaker` with the ``weak_identity_map=False`` setting. .. _unitofwork_cascades: Cascades ======== Mappers support the concept of configurable **cascade** behavior on :func:`~sqlalchemy.orm.relationship` constructs. This refers to how operations performed on a parent object relative to a particular :class:`.Session` should be propagated to items referred to by that relationship. The default cascade behavior is usually suitable for most situations, and the option is normally invoked explicitly in order to enable ``delete`` and ``delete-orphan`` cascades, which refer to how the relationship should be treated when the parent is marked for deletion as well as when a child is de-associated from its parent. Cascade behavior is configured by setting the ``cascade`` keyword argument on :func:`~sqlalchemy.orm.relationship`:: class Order(Base): __tablename__ = 'order' items = relationship("Item", cascade="all, delete-orphan") customer = relationship("User", secondary=user_orders_table, cascade="save-update") To set cascades on a backref, the same flag can be used with the :func:`~.sqlalchemy.orm.backref` function, which ultimately feeds its arguments back into :func:`~sqlalchemy.orm.relationship`:: class Item(Base): __tablename__ = 'item' order = relationship("Order", backref=backref("items", cascade="all, delete-orphan") ) The default value of ``cascade`` is ``save-update, merge``. The ``all`` symbol in the cascade options indicates that all cascade flags should be enabled, with the exception of ``delete-orphan``. Typically, cascade is usually left at its default, or configured as ``all, delete-orphan``, indicating the child objects should be treated as "owned" by the parent. The list of available values which can be specified in ``cascade`` are as follows: * ``save-update`` - Indicates that when an object is placed into a :class:`.Session` via :meth:`.Session.add`, all the objects associated with it via this :func:`~sqlalchemy.orm.relationship` should also be added to that same :class:`.Session`. Additionally, if this object is already present in a :class:`.Session`, child objects will be added to that session as they are associated with this parent, i.e. as they are appended to lists, added to sets, or otherwise associated with the parent. ``save-update`` cascade also cascades the *pending history* of the target attribute, meaning that objects which were removed from a scalar or collection attribute whose changes have not yet been flushed are also placed into the target session. This is because they may have foreign key attributes present which will need to be updated to no longer refer to the parent. The ``save-update`` cascade is on by default, and it's common to not even be aware of it. It's customary that only a single call to :meth:`.Session.add` against the lead object of a structure has the effect of placing the full structure of objects into the :class:`.Session` at once. However, it can be turned off, which would imply that objects associated with a parent would need to be placed individually using :meth:`.Session.add` calls for each one. Another default behavior of ``save-update`` cascade is that it will take effect in the reverse direction, that is, associating a child with a parent when a backref is present means both relationships are affected; the parent will be added to the child's session. To disable this somewhat indirect session addition, use the ``cascade_backrefs=False`` option described below in :ref:`backref_cascade`. * ``delete`` - This cascade indicates that when the parent object is marked for deletion, the related objects should also be marked for deletion. Without this cascade present, SQLAlchemy will set the foreign key on a one-to-many relationship to NULL when the parent object is deleted. When enabled, the row is instead deleted. ``delete`` cascade is often used in conjunction with ``delete-orphan`` cascade, as is appropriate for an object whose foreign key is not intended to be nullable. On some backends, it's also a good idea to set ``ON DELETE`` on the foreign key itself; see the section :ref:`passive_deletes` for more details. Note that for many-to-many relationships which make usage of the ``secondary`` argument to :func:`~.sqlalchemy.orm.relationship`, SQLAlchemy always emits a DELETE for the association row in between "parent" and "child", when the parent is deleted or whenever the linkage between a particular parent and child is broken. * ``delete-orphan`` - This cascade adds behavior to the ``delete`` cascade, such that a child object will be marked for deletion when it is de-associated from the parent, not just when the parent is marked for deletion. This is a common feature when dealing with a related object that is "owned" by its parent, with a NOT NULL foreign key, so that removal of the item from the parent collection results in its deletion. ``delete-orphan`` cascade implies that each child object can only have one parent at a time, so is configured in the vast majority of cases on a one-to-many relationship. Setting it on a many-to-one or many-to-many relationship is more awkward; for this use case, SQLAlchemy requires that the :func:`~sqlalchemy.orm.relationship` be configured with the ``single_parent=True`` function, which establishes Python-side validation that ensures the object is associated with only one parent at a time. * ``merge`` - This cascade indicates that the :meth:`.Session.merge` operation should be propagated from a parent that's the subject of the :meth:`.Session.merge` call down to referred objects. This cascade is also on by default. * ``refresh-expire`` - A less common option, indicates that the :meth:`.Session.expire` operation should be propagated from a parent down to referred objects. When using :meth:`.Session.refresh`, the referred objects are expired only, but not actually refreshed. * ``expunge`` - Indicate that when the parent object is removed from the :class:`.Session` using :meth:`.Session.expunge`, the operation should be propagated down to referred objects. .. _backref_cascade: Controlling Cascade on Backrefs ------------------------------- The ``save-update`` cascade takes place on backrefs by default. This means that, given a mapping such as this:: mapper(Order, order_table, properties={ 'items' : relationship(Item, backref='order') }) If an ``Order`` is already in the session, and is assigned to the ``order`` attribute of an ``Item``, the backref appends the ``Order`` to the ``items`` collection of that ``Order``, resulting in the ``save-update`` cascade taking place:: >>> o1 = Order() >>> session.add(o1) >>> o1 in session True >>> i1 = Item() >>> i1.order = o1 >>> i1 in o1.items True >>> i1 in session True This behavior can be disabled using the ``cascade_backrefs`` flag:: mapper(Order, order_table, properties={ 'items' : relationship(Item, backref='order', cascade_backrefs=False) }) So above, the assignment of ``i1.order = o1`` will append ``i1`` to the ``items`` collection of ``o1``, but will not add ``i1`` to the session. You can, of course, :meth:`~.Session.add` ``i1`` to the session at a later point. This option may be helpful for situations where an object needs to be kept out of a session until it's construction is completed, but still needs to be given associations to objects which are already persistent in the target session. .. _unitofwork_transaction: Managing Transactions ===================== A newly constructed :class:`.Session` may be said to be in the "begin" state. In this state, the :class:`.Session` has not established any connection or transactional state with any of the :class:`.Engine` objects that may be associated with it. The :class:`.Session` then receives requests to operate upon a database connection. Typically, this means it is called upon to execute SQL statements using a particular :class:`.Engine`, which may be via :meth:`.Session.query`, :meth:`.Session.execute`, or within a flush operation of pending data, which occurs when such state exists and :meth:`.Session.commit` or :meth:`.Session.flush` is called. As these requests are received, each new :class:`.Engine` encountered is associated with an ongoing transactional state maintained by the :class:`.Session`. When the first :class:`.Engine` is operated upon, the :class:`.Session` can be said to have left the "begin" state and entered "transactional" state. For each :class:`.Engine` encountered, a :class:`.Connection` is associated with it, which is acquired via the :meth:`.Engine.contextual_connect` method. If a :class:`.Connection` was directly associated with the :class:`.Session` (see :ref:`session_external_transaction` for an example of this), it is added to the transactional state directly. For each :class:`.Connection`, the :class:`.Session` also maintains a :class:`.Transaction` object, which is acquired by calling :meth:`.Connection.begin` on each :class:`.Connection`, or if the :class:`.Session` object has been established using the flag ``twophase=True``, a :class:`.TwoPhaseTransaction` object acquired via :meth:`.Connection.begin_twophase`. These transactions are all committed or rolled back corresponding to the invocation of the :meth:`.Session.commit` and :meth:`.Session.rollback` methods. A commit operation will also call the :meth:`.TwoPhaseTransaction.prepare` method on all transactions if applicable. When the transactional state is completed after a rollback or commit, the :class:`.Session` :term:`releases` all :class:`.Transaction` and :class:`.Connection` resources, and goes back to the "begin" state, which will again invoke new :class:`.Connection` and :class:`.Transaction` objects as new requests to emit SQL statements are received. The example below illustrates this lifecycle:: engine = create_engine("...") Session = sessionmaker(bind=engine) # new session. no connections are in use. session = Session() try: # first query. a Connection is acquired # from the Engine, and a Transaction # started. item1 = session.query(Item).get(1) # second query. the same Connection/Transaction # are used. item2 = session.query(Item).get(2) # pending changes are created. item1.foo = 'bar' item2.bar = 'foo' # commit. The pending changes above # are flushed via flush(), the Transaction # is committed, the Connection object closed # and discarded, the underlying DBAPI connection # returned to the connection pool. session.commit() except: # on rollback, the same closure of state # as that of commit proceeds. session.rollback() raise .. _session_begin_nested: Using SAVEPOINT --------------- SAVEPOINT transactions, if supported by the underlying engine, may be delineated using the :meth:`~.Session.begin_nested` method:: Session = sessionmaker() session = Session() session.add(u1) session.add(u2) session.begin_nested() # establish a savepoint session.add(u3) session.rollback() # rolls back u3, keeps u1 and u2 session.commit() # commits u1 and u2 :meth:`~.Session.begin_nested` may be called any number of times, which will issue a new SAVEPOINT with a unique identifier for each call. For each :meth:`~.Session.begin_nested` call, a corresponding :meth:`~.Session.rollback` or :meth:`~.Session.commit` must be issued. When :meth:`~.Session.begin_nested` is called, a :meth:`~.Session.flush` is unconditionally issued (regardless of the ``autoflush`` setting). This is so that when a :meth:`~.Session.rollback` occurs, the full state of the session is expired, thus causing all subsequent attribute/instance access to reference the full state of the :class:`~sqlalchemy.orm.session.Session` right before :meth:`~.Session.begin_nested` was called. :meth:`~.Session.begin_nested`, in the same manner as the less often used :meth:`~.Session.begin` method, returns a transactional object which also works as a context manager. It can be succinctly used around individual record inserts in order to catch things like unique constraint exceptions:: for record in records: try: with session.begin_nested(): session.merge(record) except: print "Skipped record %s" % record session.commit() .. _session_autocommit: Autocommit Mode --------------- The example of :class:`.Session` transaction lifecycle illustrated at the start of :ref:`unitofwork_transaction` applies to a :class:`.Session` configured in the default mode of ``autocommit=False``. Constructing a :class:`.Session` with ``autocommit=True`` produces a :class:`.Session` placed into "autocommit" mode, where each SQL statement invoked by a :meth:`.Session.query` or :meth:`.Session.execute` occurs using a new connection from the connection pool, discarding it after results have been iterated. The :meth:`.Session.flush` operation still occurs within the scope of a single transaction, though this transaction is closed out after the :meth:`.Session.flush` operation completes. .. warning:: "autocommit" mode should **not be considered for general use**. If used, it should always be combined with the usage of :meth:`.Session.begin` and :meth:`.Session.commit`, to ensure a transaction demarcation. Executing queries outside of a demarcated transaction is a legacy mode of usage, and can in some cases lead to concurrent connection checkouts. In the absense of a demarcated transaction, the :class:`.Session` cannot make appropriate decisions as to when autoflush should occur nor when auto-expiration should occur, so these features should be disabled with ``autoflush=False, expire_on_commit=False``. Modern usage of "autocommit" is for framework integrations that need to control specifically when the "begin" state occurs. A session which is configured with ``autocommit=True`` may be placed into the "begin" state using the :meth:`.Session.begin` method. After the cycle completes upon :meth:`.Session.commit` or :meth:`.Session.rollback`, connection and transaction resources are :term:`released` and the :class:`.Session` goes back into "autocommit" mode, until :meth:`.Session.begin` is called again:: Session = sessionmaker(bind=engine, autocommit=True) session = Session() session.begin() try: item1 = session.query(Item).get(1) item2 = session.query(Item).get(2) item1.foo = 'bar' item2.bar = 'foo' session.commit() except: session.rollback() raise The :meth:`.Session.begin` method also returns a transactional token which is compatible with the Python 2.6 ``with`` statement:: Session = sessionmaker(bind=engine, autocommit=True) session = Session() with session.begin(): item1 = session.query(Item).get(1) item2 = session.query(Item).get(2) item1.foo = 'bar' item2.bar = 'foo' .. _session_subtransactions: Using Subtransactions with Autocommit ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ A subtransaction indicates usage of the :meth:`.Session.begin` method in conjunction with the ``subtransactions=True`` flag. This produces a non-transactional, delimiting construct that allows nesting of calls to :meth:`~.Session.begin` and :meth:`~.Session.commit`. It's purpose is to allow the construction of code that can function within a transaction both independently of any external code that starts a transaction, as well as within a block that has already demarcated a transaction. ``subtransactions=True`` is generally only useful in conjunction with autocommit, and is equivalent to the pattern described at :ref:`connections_nested_transactions`, where any number of functions can call :meth:`.Connection.begin` and :meth:`.Transaction.commit` as though they are the initiator of the transaction, but in fact may be participating in an already ongoing transaction:: # method_a starts a transaction and calls method_b def method_a(session): session.begin(subtransactions=True) try: method_b(session) session.commit() # transaction is committed here except: session.rollback() # rolls back the transaction raise # method_b also starts a transaction, but when # called from method_a participates in the ongoing # transaction. def method_b(session): session.begin(subtransactions=True) try: session.add(SomeObject('bat', 'lala')) session.commit() # transaction is not committed yet except: session.rollback() # rolls back the transaction, in this case # the one that was initiated in method_a(). raise # create a Session and call method_a session = Session(autocommit=True) method_a(session) session.close() Subtransactions are used by the :meth:`.Session.flush` process to ensure that the flush operation takes place within a transaction, regardless of autocommit. When autocommit is disabled, it is still useful in that it forces the :class:`.Session` into a "pending rollback" state, as a failed flush cannot be resumed in mid-operation, where the end user still maintains the "scope" of the transaction overall. .. _session_twophase: Enabling Two-Phase Commit ------------------------- For backends which support two-phase operaration (currently MySQL and PostgreSQL), the session can be instructed to use two-phase commit semantics. This will coordinate the committing of transactions across databases so that the transaction is either committed or rolled back in all databases. You can also :meth:`~.Session.prepare` the session for interacting with transactions not managed by SQLAlchemy. To use two phase transactions set the flag ``twophase=True`` on the session:: engine1 = create_engine('postgresql://db1') engine2 = create_engine('postgresql://db2') Session = sessionmaker(twophase=True) # bind User operations to engine 1, Account operations to engine 2 Session.configure(binds={User:engine1, Account:engine2}) session = Session() # .... work with accounts and users # commit. session will issue a flush to all DBs, and a prepare step to all DBs, # before committing both transactions session.commit() Embedding SQL Insert/Update Expressions into a Flush ===================================================== This feature allows the value of a database column to be set to a SQL expression instead of a literal value. It's especially useful for atomic updates, calling stored procedures, etc. All you do is assign an expression to an attribute:: class SomeClass(object): pass mapper(SomeClass, some_table) someobject = session.query(SomeClass).get(5) # set 'value' attribute to a SQL expression adding one someobject.value = some_table.c.value + 1 # issues "UPDATE some_table SET value=value+1" session.commit() This technique works both for INSERT and UPDATE statements. After the flush/commit operation, the ``value`` attribute on ``someobject`` above is expired, so that when next accessed the newly generated value will be loaded from the database. .. _session_sql_expressions: Using SQL Expressions with Sessions ==================================== SQL expressions and strings can be executed via the :class:`~sqlalchemy.orm.session.Session` within its transactional context. This is most easily accomplished using the :meth:`~.Session.execute` method, which returns a :class:`~sqlalchemy.engine.ResultProxy` in the same manner as an :class:`~sqlalchemy.engine.Engine` or :class:`~sqlalchemy.engine.Connection`:: Session = sessionmaker(bind=engine) session = Session() # execute a string statement result = session.execute("select * from table where id=:id", {'id':7}) # execute a SQL expression construct result = session.execute(select([mytable]).where(mytable.c.id==7)) The current :class:`~sqlalchemy.engine.Connection` held by the :class:`~sqlalchemy.orm.session.Session` is accessible using the :meth:`~.Session.connection` method:: connection = session.connection() The examples above deal with a :class:`~sqlalchemy.orm.session.Session` that's bound to a single :class:`~sqlalchemy.engine.Engine` or :class:`~sqlalchemy.engine.Connection`. To execute statements using a :class:`~sqlalchemy.orm.session.Session` which is bound either to multiple engines, or none at all (i.e. relies upon bound metadata), both :meth:`~.Session.execute` and :meth:`~.Session.connection` accept a ``mapper`` keyword argument, which is passed a mapped class or :class:`~sqlalchemy.orm.mapper.Mapper` instance, which is used to locate the proper context for the desired engine:: Session = sessionmaker() session = Session() # need to specify mapper or class when executing result = session.execute("select * from table where id=:id", {'id':7}, mapper=MyMappedClass) result = session.execute(select([mytable], mytable.c.id==7), mapper=MyMappedClass) connection = session.connection(MyMappedClass) .. _session_external_transaction: Joining a Session into an External Transaction =============================================== If a :class:`.Connection` is being used which is already in a transactional state (i.e. has a :class:`.Transaction` established), a :class:`.Session` can be made to participate within that transaction by just binding the :class:`.Session` to that :class:`.Connection`. The usual rationale for this is a test suite that allows ORM code to work freely with a :class:`.Session`, including the ability to call :meth:`.Session.commit`, where afterwards the entire database interaction is rolled back:: from sqlalchemy.orm import sessionmaker from sqlalchemy import create_engine from unittest import TestCase # global application scope. create Session class, engine Session = sessionmaker() engine = create_engine('postgresql://...') class SomeTest(TestCase): def setUp(self): # connect to the database self.connection = engine.connect() # begin a non-ORM transaction self.trans = connection.begin() # bind an individual Session to the connection self.session = Session(bind=self.connection) def test_something(self): # use the session in tests. self.session.add(Foo()) self.session.commit() def tearDown(self): # rollback - everything that happened with the # Session above (including calls to commit()) # is rolled back. self.trans.rollback() self.session.close() # return connection to the Engine self.connection.close() Above, we issue :meth:`.Session.commit` as well as :meth:`.Transaction.rollback`. This is an example of where we take advantage of the :class:`.Connection` object's ability to maintain *subtransactions*, or nested begin/commit-or-rollback pairs where only the outermost begin/commit pair actually commits the transaction, or if the outermost block rolls back, everything is rolled back. .. _unitofwork_contextual: Contextual/Thread-local Sessions ================================= Recall from the section :ref:`session_faq_whentocreate`, the concept of "session scopes" was introduced, with an emphasis on web applications and the practice of linking the scope of a :class:`.Session` with that of a web request. Most modern web frameworks include integration tools so that the scope of the :class:`.Session` can be managed automatically, and these tools should be used as they are available. SQLAlchemy includes its own helper object, which helps with the establishment of user-defined :class:`.Session` scopes. It is also used by third-party integration systems to help construct their integration schemes. The object is the :class:`.scoped_session` object, and it represents a **registry** of :class:`.Session` objects. If you're not familiar with the registry pattern, a good introduction can be found in `Patterns of Enterprise Architecture `_. .. note:: The :class:`.scoped_session` object is a very popular and useful object used by many SQLAlchemy applications. However, it is important to note that it presents **only one approach** to the issue of :class:`.Session` management. If you're new to SQLAlchemy, and especially if the term "thread-local variable" seems strange to you, we recommend that if possible you familiarize first with an off-the-shelf integration system such as `Flask-SQLAlchemy `_ or `zope.sqlalchemy `_. A :class:`.scoped_session` is constructed by calling it, passing it a **factory** which can create new :class:`.Session` objects. A factory is just something that produces a new object when called, and in the case of :class:`.Session`, the most common factory is the :class:`.sessionmaker`, introduced earlier in this section. Below we illustrate this usage:: >>> from sqlalchemy.orm import scoped_session >>> from sqlalchemy.orm import sessionmaker >>> session_factory = sessionmaker(bind=some_engine) >>> Session = scoped_session(session_factory) The :class:`.scoped_session` object we've created will now call upon the :class:`.sessionmaker` when we "call" the registry:: >>> some_session = Session() Above, ``some_session`` is an instance of :class:`.Session`, which we can now use to talk to the database. This same :class:`.Session` is also present within the :class:`.scoped_session` registry we've created. If we call upon the registry a second time, we get back the **same** :class:`.Session`:: >>> some_other_session = Session() >>> some_session is some_other_session True This pattern allows disparate sections of the application to call upon a global :class:`.scoped_session`, so that all those areas may share the same session without the need to pass it explicitly. The :class:`.Session` we've established in our registry will remain, until we explicitly tell our regsitry to dispose of it, by calling :meth:`.scoped_session.remove`:: >>> Session.remove() The :meth:`.scoped_session.remove` method first calls :meth:`.Session.close` on the current :class:`.Session`, which has the effect of releasing any connection/transactional resources owned by the :class:`.Session` first, then discarding the :class:`.Session` itself. "Releasing" here means that connections are returned to their connection pool and any transactional state is rolled back, ultimately using the ``rollback()`` method of the underlying DBAPI connection. At this point, the :class:`.scoped_session` object is "empty", and will create a **new** :class:`.Session` when called again. As illustrated below, this is not the same :class:`.Session` we had before:: >>> new_session = Session() >>> new_session is some_session False The above series of steps illustrates the idea of the "registry" pattern in a nutshell. With that basic idea in hand, we can discuss some of the details of how this pattern proceeds. Implicit Method Access ---------------------- The job of the :class:`.scoped_session` is simple; hold onto a :class:`.Session` for all who ask for it. As a means of producing more transparent access to this :class:`.Session`, the :class:`.scoped_session` also includes **proxy behavior**, meaning that the registry itself can be treated just like a :class:`.Session` directly; when methods are called on this object, they are **proxied** to the underlying :class:`.Session` being maintained by the registry:: Session = scoped_session(some_factory) # equivalent to: # # session = Session() # print session.query(MyClass).all() # print Session.query(MyClass).all() The above code accomplishes the same task as that of acquiring the current :class:`.Session` by calling upon the registry, then using that :class:`.Session`. Thread-Local Scope ------------------ Users who are familiar with multithreaded programming will note that representing anything as a global variable is usually a bad idea, as it implies that the global object will be accessed by many threads concurrently. The :class:`.Session` object is entirely designed to be used in a **non-concurrent** fashion, which in terms of multithreading means "only in one thread at a time". So our above example of :class:`.scoped_session` usage, where the same :class:`.Session` object is maintained across multiple calls, suggests that some process needs to be in place such that mutltiple calls across many threads don't actually get a handle to the same session. We call this notion **thread local storage**, which means, a special object is used that will maintain a distinct object per each application thread. Python provides this via the `threading.local() `_ construct. The :class:`.scoped_session` object by default uses this object as storage, so that a single :class:`.Session` is maintained for all who call upon the :class:`.scoped_session` registry, but only within the scope of a single thread. Callers who call upon the registry in a different thread get a :class:`.Session` instance that is local to that other thread. Using this technique, the :class:`.scoped_session` provides a quick and relatively simple (if one is familiar with thread-local storage) way of providing a single, global object in an application that is safe to be called upon from multiple threads. The :meth:`.scoped_session.remove` method, as always, removes the current :class:`.Session` associated with the thread, if any. However, one advantage of the ``threading.local()`` object is that if the application thread itself ends, the "storage" for that thread is also garbage collected. So it is in fact "safe" to use thread local scope with an application that spawns and tears down threads, without the need to call :meth:`.scoped_session.remove`. However, the scope of transactions themselves, i.e. ending them via :meth:`.Session.commit` or :meth:`.Session.rollback`, will usually still be something that must be explicitly arranged for at the appropriate time, unless the application actually ties the lifespan of a thread to the lifespan of a transaction. .. _session_lifespan: Using Thread-Local Scope with Web Applications ---------------------------------------------- As discussed in the section :ref:`session_faq_whentocreate`, a web application is architected around the concept of a **web request**, and integrating such an application with the :class:`.Session` usually implies that the :class:`.Session` will be associated with that request. As it turns out, most Python web frameworks, with notable exceptions such as the asynchronous frameworks Twisted and Tornado, use threads in a simple way, such that a particular web request is received, processed, and completed within the scope of a single *worker thread*. When the request ends, the worker thread is released to a pool of workers where it is available to handle another request. This simple correspondence of web request and thread means that to associate a :class:`.Session` with a thread implies it is also associated with the web request running within that thread, and vice versa, provided that the :class:`.Session` is created only after the web request begins and torn down just before the web request ends. So it is a common practice to use :class:`.scoped_session` as a quick way to integrate the :class:`.Session` with a web application. The sequence diagram below illustrates this flow:: Web Server Web Framework SQLAlchemy ORM Code -------------- -------------- ------------------------------ startup -> Web framework # Session registry is established initializes Session = scoped_session(sessionmaker()) incoming web request -> web request -> # The registry is *optionally* starts # called upon explicitly to create # a Session local to the thread and/or request Session() # the Session registry can otherwise # be used at any time, creating the # request-local Session() if not present, # or returning the existing one Session.query(MyClass) # ... Session.add(some_object) # ... # if data was modified, commit the # transaction Session.commit() web request ends -> # the registry is instructed to # remove the Session Session.remove() sends output <- outgoing web <- response Using the above flow, the process of integrating the :class:`.Session` with the web application has exactly two requirements: 1. Create a single :class:`.scoped_session` registry when the web application first starts, ensuring that this object is accessible by the rest of the application. 2. Ensure that :meth:`.scoped_session.remove` is called when the web request ends, usually by integrating with the web framework's event system to establish an "on request end" event. As noted earlier, the above pattern is **just one potential way** to integrate a :class:`.Session` with a web framework, one which in particular makes the significant assumption that the **web framework associates web requests with application threads**. It is however **strongly recommended that the integration tools provided with the web framework itself be used, if available**, instead of :class:`.scoped_session`. In particular, while using a thread local can be convenient, it is preferable that the :class:`.Session` be associated **directly with the request**, rather than with the current thread. The next section on custom scopes details a more advanced configuration which can combine the usage of :class:`.scoped_session` with direct request based scope, or any kind of scope. Using Custom Created Scopes --------------------------- The :class:`.scoped_session` object's default behavior of "thread local" scope is only one of many options on how to "scope" a :class:`.Session`. A custom scope can be defined based on any existing system of getting at "the current thing we are working with". Suppose a web framework defines a library function ``get_current_request()``. An application built using this framework can call this function at any time, and the result will be some kind of ``Request`` object that represents the current request being processed. If the ``Request`` object is hashable, then this function can be easily integrated with :class:`.scoped_session` to associate the :class:`.Session` with the request. Below we illustrate this in conjunction with a hypothetical event marker provided by the web framework ``on_request_end``, which allows code to be invoked whenever a request ends:: from my_web_framework import get_current_request, on_request_end from sqlalchemy.orm import scoped_session, sessionmaker Session = scoped_session(sessionmaker(bind=some_engine), scopefunc=get_current_request) @on_request_end def remove_session(req): Session.remove() Above, we instantiate :class:`.scoped_session` in the usual way, except that we pass our request-returning function as the "scopefunc". This instructs :class:`.scoped_session` to use this function to generate a dictionary key whenever the registry is called upon to return the current :class:`.Session`. In this case it is particularly important that we ensure a reliable "remove" system is implemented, as this dictionary is not otherwise self-managed. Contextual Session API ---------------------- .. autoclass:: sqlalchemy.orm.scoping.scoped_session :members: .. autoclass:: sqlalchemy.util.ScopedRegistry :members: .. autoclass:: sqlalchemy.util.ThreadLocalRegistry .. _session_partitioning: Partitioning Strategies ======================= Simple Vertical Partitioning ---------------------------- Vertical partitioning places different kinds of objects, or different tables, across multiple databases:: engine1 = create_engine('postgresql://db1') engine2 = create_engine('postgresql://db2') Session = sessionmaker(twophase=True) # bind User operations to engine 1, Account operations to engine 2 Session.configure(binds={User:engine1, Account:engine2}) session = Session() Above, operations against either class will make usage of the :class:`.Engine` linked to that class. Upon a flush operation, similar rules take place to ensure each class is written to the right database. The transactions among the multiple databases can optionally be coordinated via two phase commit, if the underlying backend supports it. See :ref:`session_twophase` for an example. Custom Vertical Partitioning ---------------------------- More comprehensive rule-based class-level partitioning can be built by overriding the :meth:`.Session.get_bind` method. Below we illustrate a custom :class:`.Session` which delivers the following rules: 1. Flush operations are delivered to the engine named ``master``. 2. Operations on objects that subclass ``MyOtherClass`` all occur on the ``other`` engine. 3. Read operations for all other classes occur on a random choice of the ``slave1`` or ``slave2`` database. :: engines = { 'master':create_engine("sqlite:///master.db"), 'other':create_engine("sqlite:///other.db"), 'slave1':create_engine("sqlite:///slave1.db"), 'slave2':create_engine("sqlite:///slave2.db"), } from sqlalchemy.orm import Session, sessionmaker import random class RoutingSession(Session): def get_bind(self, mapper=None, clause=None): if mapper and issubclass(mapper.class_, MyOtherClass): return engines['other'] elif self._flushing: return engines['master'] else: return engines[ random.choice(['slave1','slave2']) ] The above :class:`.Session` class is plugged in using the ``class_`` argument to :class:`.sessionmaker`:: Session = sessionmaker(class_=RoutingSession) This approach can be combined with multiple :class:`.MetaData` objects, using an approach such as that of using the declarative ``__abstract__`` keyword, described at :ref:`declarative_abstract`. Horizontal Partitioning ----------------------- Horizontal partitioning partitions the rows of a single table (or a set of tables) across multiple databases. See the "sharding" example: :ref:`examples_sharding`. Sessions API ============ Session and sessionmaker() --------------------------- .. autoclass:: sessionmaker :members: :inherited-members: .. autoclass:: sqlalchemy.orm.session.Session :members: :inherited-members: .. autoclass:: sqlalchemy.orm.session.SessionTransaction :members: Session Utilites ---------------- .. autofunction:: make_transient .. autofunction:: object_session .. autofunction:: was_deleted Attribute and State Management Utilities ----------------------------------------- These functions are provided by the SQLAlchemy attribute instrumentation API to provide a detailed interface for dealing with instances, attribute values, and history. Some of them are useful when constructing event listener functions, such as those described in :doc:`/orm/events`. .. currentmodule:: sqlalchemy.orm.util .. autofunction:: object_state .. currentmodule:: sqlalchemy.orm.attributes .. autofunction:: del_attribute .. autofunction:: get_attribute .. autofunction:: get_history .. autofunction:: init_collection .. autofunction:: flag_modified .. function:: instance_state Return the :class:`.InstanceState` for a given mapped object. This function is the internal version of :func:`.object_state`. The :func:`.object_state` and/or the :func:`.inspect` function is preferred here as they each emit an informative exception if the given object is not mapped. .. autofunction:: sqlalchemy.orm.instrumentation.is_instrumented .. autofunction:: set_attribute .. autofunction:: set_committed_value .. autoclass:: History :members: SQLAlchemy-0.8.4/doc/build/orm/tutorial.rst0000644000076500000240000025153312251150015021270 0ustar classicstaff00000000000000.. _ormtutorial_toplevel: ========================== Object Relational Tutorial ========================== The SQLAlchemy Object Relational Mapper presents a method of associating user-defined Python classes with database tables, and instances of those classes (objects) with rows in their corresponding tables. It includes a system that transparently synchronizes all changes in state between objects and their related rows, called a `unit of work `_, as well as a system for expressing database queries in terms of the user defined classes and their defined relationships between each other. The ORM is in contrast to the SQLAlchemy Expression Language, upon which the ORM is constructed. Whereas the SQL Expression Language, introduced in :ref:`sqlexpression_toplevel`, presents a system of representing the primitive constructs of the relational database directly without opinion, the ORM presents a high level and abstracted pattern of usage, which itself is an example of applied usage of the Expression Language. While there is overlap among the usage patterns of the ORM and the Expression Language, the similarities are more superficial than they may at first appear. One approaches the structure and content of data from the perspective of a user-defined `domain model `_ which is transparently persisted and refreshed from its underlying storage model. The other approaches it from the perspective of literal schema and SQL expression representations which are explicitly composed into messages consumed individually by the database. A successful application may be constructed using the Object Relational Mapper exclusively. In advanced situations, an application constructed with the ORM may make occasional usage of the Expression Language directly in certain areas where specific database interactions are required. The following tutorial is in doctest format, meaning each ``>>>`` line represents something you can type at a Python command prompt, and the following text represents the expected return value. Version Check ============= A quick check to verify that we are on at least **version 0.8** of SQLAlchemy:: >>> import sqlalchemy >>> sqlalchemy.__version__ # doctest:+SKIP 0.8.0 Connecting ========== For this tutorial we will use an in-memory-only SQLite database. To connect we use :func:`~sqlalchemy.create_engine`:: >>> from sqlalchemy import create_engine >>> engine = create_engine('sqlite:///:memory:', echo=True) The ``echo`` flag is a shortcut to setting up SQLAlchemy logging, which is accomplished via Python's standard ``logging`` module. With it enabled, we'll see all the generated SQL produced. If you are working through this tutorial and want less output generated, set it to ``False``. This tutorial will format the SQL behind a popup window so it doesn't get in our way; just click the "SQL" links to see what's being generated. The return value of :func:`.create_engine` is an instance of :class:`.Engine`, and it represents the core interface to the database, adapted through a **dialect** that handles the details of the database and DBAPI in use. In this case the SQLite dialect will interpret instructions to the Python built-in ``sqlite3`` module. The :class:`.Engine` has not actually tried to connect to the database yet; that happens only the first time it is asked to perform a task against the database. We can illustrate this by asking it to perform a simple SELECT statement: .. sourcecode:: python+sql {sql}>>> engine.execute("select 1").scalar() select 1 () {stop}1 As the :meth:`.Engine.execute` method is called, the :class:`.Engine` establishes a connection to the SQLite database, which is then used to emit the SQL. The connection is then returned to an internal connection pool where it will be reused on subsequent statement executions. While we illustrate direct usage of the :class:`.Engine` here, this isn't typically necessary when using the ORM, where the :class:`.Engine`, once created, is used behind the scenes by the ORM as we'll see shortly. Declare a Mapping ================= When using the ORM, the configurational process starts by describing the database tables we'll be dealing with, and then by defining our own classes which will be mapped to those tables. In modern SQLAlchemy, these two tasks are usually performed together, using a system known as :ref:`declarative_toplevel`, which allows us to create classes that include directives to describe the actual database table they will be mapped to. Classes mapped using the Declarative system are defined in terms of a base class which maintains a catalog of classes and tables relative to that base - this is known as the **declarative base class**. Our application will usually have just one instance of this base in a commonly imported module. We create the base class using the :func:`.declarative_base` function, as follows:: >>> from sqlalchemy.ext.declarative import declarative_base >>> Base = declarative_base() Now that we have a "base", we can define any number of mapped classes in terms of it. We will start with just a single table called ``users``, which will store records for the end-users using our application. A new class called ``User`` will be the class to which we map this table. The imports we'll need to accomplish this include objects that represent the components of our table, including the :class:`.Column` class which represents a database column, as well as the :class:`.Integer` and :class:`.String` classes that represent basic datatypes used in columns:: >>> from sqlalchemy import Column, Integer, String >>> class User(Base): ... __tablename__ = 'users' ... ... id = Column(Integer, primary_key=True) ... name = Column(String) ... fullname = Column(String) ... password = Column(String) ... ... def __init__(self, name, fullname, password): ... self.name = name ... self.fullname = fullname ... self.password = password ... ... def __repr__(self): ... return "" % (self.name, self.fullname, self.password) The above ``User`` class establishes details about the table being mapped, including the name of the table denoted by the ``__tablename__`` attribute, a set of columns ``id``, ``name``, ``fullname`` and ``password``, where the ``id`` column will also be the primary key of the table. While its certainly possible that some database tables don't have primary key columns (as is also the case with views, which can also be mapped), the ORM in order to actually map to a particular table needs there to be at least one column denoted as a primary key column; multiple-column, i.e. composite, primary keys are of course entirely feasible as well. We define a constructor via ``__init__()`` and also a ``__repr__()`` method - both are optional. The class of course can have any number of other methods and attributes as required by the application, as it's basically just a plain Python class. Inheriting from ``Base`` is also only a requirement of the declarative configurational system, which itself is optional and relatively open ended; at its core, the SQLAlchemy ORM only requires that a class be a so-called "new style class", that is, it inherits from ``object`` in Python 2, in order to be mapped. All classes in Python 3 are "new style" classes. .. topic:: The Non Opinionated Philosophy In our ``User`` mapping example, it was required that we identify the name of the table in use, as well as the names and characteristics of all columns which we care about, including which column or columns represent the primary key, as well as some basic information about the types in use. SQLAlchemy never makes assumptions about these decisions - the developer must always be explicit about specific conventions in use. However, that doesn't mean the task can't be automated. While this tutorial will keep things explicit, developers are encouraged to make use of helper functions as well as "Declarative Mixins" to automate their tasks in large scale applications. The section :ref:`declarative_mixins` introduces many of these techniques. With our ``User`` class constructed via the Declarative system, we have defined information about our table, known as **table metadata**, as well as a user-defined class which is linked to this table, known as a **mapped class**. Declarative has provided for us a shorthand system for what in SQLAlchemy is called a "Classical Mapping", which specifies these two units separately and is discussed in :ref:`classical_mapping`. The table is actually represented by a datastructure known as :class:`.Table`, and the mapping represented by a :class:`.Mapper` object generated by a function called :func:`.mapper`. Declarative performs both of these steps for us, making available the :class:`.Table` it has created via the ``__table__`` attribute:: >>> User.__table__ # doctest: +NORMALIZE_WHITESPACE Table('users', MetaData(None), Column('id', Integer(), table=, primary_key=True, nullable=False), Column('name', String(), table=), Column('fullname', String(), table=), Column('password', String(), table=), schema=None) and while rarely needed, making available the :class:`.Mapper` object via the ``__mapper__`` attribute:: >>> User.__mapper__ # doctest: +ELLIPSIS The Declarative base class also contains a catalog of all the :class:`.Table` objects that have been defined called :class:`.MetaData`, available via the ``.metadata`` attribute. In this example, we are defining new tables that have yet to be created in our SQLite database, so one helpful feature the :class:`.MetaData` object offers is the ability to issue CREATE TABLE statements to the database for all tables that don't yet exist. We illustrate this by calling the :meth:`.MetaData.create_all` method, passing in our :class:`.Engine` as a source of database connectivity. We will see that special commands are first emitted to check for the presence of the ``users`` table, and following that the actual ``CREATE TABLE`` statement: .. sourcecode:: python+sql >>> Base.metadata.create_all(engine) # doctest:+ELLIPSIS,+NORMALIZE_WHITESPACE {opensql}PRAGMA table_info("users") () CREATE TABLE users ( id INTEGER NOT NULL, name VARCHAR, fullname VARCHAR, password VARCHAR, PRIMARY KEY (id) ) () COMMIT .. topic:: Minimal Table Descriptions vs. Full Descriptions Users familiar with the syntax of CREATE TABLE may notice that the VARCHAR columns were generated without a length; on SQLite and Postgresql, this is a valid datatype, but on others, it's not allowed. So if running this tutorial on one of those databases, and you wish to use SQLAlchemy to issue CREATE TABLE, a "length" may be provided to the :class:`~sqlalchemy.types.String` type as below:: Column(String(50)) The length field on :class:`~sqlalchemy.types.String`, as well as similar precision/scale fields available on :class:`~sqlalchemy.types.Integer`, :class:`~sqlalchemy.types.Numeric`, etc. are not referenced by SQLAlchemy other than when creating tables. Additionally, Firebird and Oracle require sequences to generate new primary key identifiers, and SQLAlchemy doesn't generate or assume these without being instructed. For that, you use the :class:`~sqlalchemy.schema.Sequence` construct:: from sqlalchemy import Sequence Column(Integer, Sequence('user_id_seq'), primary_key=True) A full, foolproof :class:`~sqlalchemy.schema.Table` generated via our declarative mapping is therefore:: class User(Base): __tablename__ = 'users' id = Column(Integer, Sequence('user_id_seq'), primary_key=True) name = Column(String(50)) fullname = Column(String(50)) password = Column(String(12)) def __init__(self, name, fullname, password): self.name = name self.fullname = fullname self.password = password def __repr__(self): return "" % (self.name, self.fullname, self.password) We include this more verbose table definition separately to highlight the difference between a minimal construct geared primarily towards in-Python usage only, versus one that will be used to emit CREATE TABLE statements on a particular set of backends with more stringent requirements. Create an Instance of the Mapped Class ====================================== With mappings complete, let's now create and inspect a ``User`` object:: >>> ed_user = User('ed', 'Ed Jones', 'edspassword') >>> ed_user.name 'ed' >>> ed_user.password 'edspassword' >>> str(ed_user.id) 'None' The ``id`` attribute, which while not defined by our ``__init__()`` method, exists with a value of ``None`` on our ``User`` instance due to the ``id`` column we declared in our mapping. By default, the ORM creates class attributes for all columns present in the table being mapped. These class attributes exist as :term:`descriptors`, and define **instrumentation** for the mapped class. The functionality of this instrumentation includes the ability to fire on change events, track modifications, and to automatically load new data from the database when needed. Since we have not yet told SQLAlchemy to persist ``Ed Jones`` within the database, its id is ``None``. When we persist the object later, this attribute will be populated with a newly generated value. .. topic:: The default ``__init__()`` method Note that in our ``User`` example we supplied an ``__init__()`` method, which receives ``name``, ``fullname`` and ``password`` as positional arguments. The Declarative system supplies for us a default constructor if one is not already present, which accepts keyword arguments of the same name as that of the mapped attributes. Below we define ``User`` without specifying a constructor:: class User(Base): __tablename__ = 'users' id = Column(Integer, primary_key=True) name = Column(String) fullname = Column(String) password = Column(String) Our ``User`` class above will make usage of the default constructor, and provide ``id``, ``name``, ``fullname``, and ``password`` as keyword arguments:: u1 = User(name='ed', fullname='Ed Jones', password='foobar') Creating a Session ================== We're now ready to start talking to the database. The ORM's "handle" to the database is the :class:`~sqlalchemy.orm.session.Session`. When we first set up the application, at the same level as our :func:`~sqlalchemy.create_engine` statement, we define a :class:`~sqlalchemy.orm.session.Session` class which will serve as a factory for new :class:`~sqlalchemy.orm.session.Session` objects:: >>> from sqlalchemy.orm import sessionmaker >>> Session = sessionmaker(bind=engine) In the case where your application does not yet have an :class:`~sqlalchemy.engine.Engine` when you define your module-level objects, just set it up like this:: >>> Session = sessionmaker() Later, when you create your engine with :func:`~sqlalchemy.create_engine`, connect it to the :class:`~sqlalchemy.orm.session.Session` using :meth:`~.sessionmaker.configure`:: >>> Session.configure(bind=engine) # once engine is available This custom-made :class:`~sqlalchemy.orm.session.Session` class will create new :class:`~sqlalchemy.orm.session.Session` objects which are bound to our database. Other transactional characteristics may be defined when calling :func:`~.sessionmaker` as well; these are described in a later chapter. Then, whenever you need to have a conversation with the database, you instantiate a :class:`~sqlalchemy.orm.session.Session`:: >>> session = Session() The above :class:`~sqlalchemy.orm.session.Session` is associated with our SQLite-enabled :class:`.Engine`, but it hasn't opened any connections yet. When it's first used, it retrieves a connection from a pool of connections maintained by the :class:`.Engine`, and holds onto it until we commit all changes and/or close the session object. .. topic:: Session Creational Patterns The business of acquiring a :class:`.Session` has a good deal of variety based on the variety of types of applications and frameworks out there. Keep in mind the :class:`.Session` is just a workspace for your objects, local to a particular database connection - if you think of an application thread as a guest at a dinner party, the :class:`.Session` is the guest's plate and the objects it holds are the food (and the database...the kitchen?)! Hints on how :class:`.Session` is integrated into an application are at :ref:`session_faq`. Adding New Objects ================== To persist our ``User`` object, we :meth:`~.Session.add` it to our :class:`~sqlalchemy.orm.session.Session`:: >>> ed_user = User('ed', 'Ed Jones', 'edspassword') >>> session.add(ed_user) At this point, we say that the instance is **pending**; no SQL has yet been issued and the object is not yet represented by a row in the database. The :class:`~sqlalchemy.orm.session.Session` will issue the SQL to persist ``Ed Jones`` as soon as is needed, using a process known as a **flush**. If we query the database for ``Ed Jones``, all pending information will first be flushed, and the query is issued immediately thereafter. For example, below we create a new :class:`~sqlalchemy.orm.query.Query` object which loads instances of ``User``. We "filter by" the ``name`` attribute of ``ed``, and indicate that we'd like only the first result in the full list of rows. A ``User`` instance is returned which is equivalent to that which we've added: .. sourcecode:: python+sql {sql}>>> our_user = session.query(User).filter_by(name='ed').first() # doctest:+ELLIPSIS,+NORMALIZE_WHITESPACE BEGIN (implicit) INSERT INTO users (name, fullname, password) VALUES (?, ?, ?) ('ed', 'Ed Jones', 'edspassword') SELECT users.id AS users_id, users.name AS users_name, users.fullname AS users_fullname, users.password AS users_password FROM users WHERE users.name = ? LIMIT ? OFFSET ? ('ed', 1, 0) {stop}>>> our_user In fact, the :class:`~sqlalchemy.orm.session.Session` has identified that the row returned is the **same** row as one already represented within its internal map of objects, so we actually got back the identical instance as that which we just added:: >>> ed_user is our_user True The ORM concept at work here is known as an `identity map `_ and ensures that all operations upon a particular row within a :class:`~sqlalchemy.orm.session.Session` operate upon the same set of data. Once an object with a particular primary key is present in the :class:`~sqlalchemy.orm.session.Session`, all SQL queries on that :class:`~sqlalchemy.orm.session.Session` will always return the same Python object for that particular primary key; it also will raise an error if an attempt is made to place a second, already-persisted object with the same primary key within the session. We can add more ``User`` objects at once using :func:`~sqlalchemy.orm.session.Session.add_all`: .. sourcecode:: python+sql >>> session.add_all([ ... User('wendy', 'Wendy Williams', 'foobar'), ... User('mary', 'Mary Contrary', 'xxg527'), ... User('fred', 'Fred Flinstone', 'blah')]) Also, we've decided the password for Ed isn't too secure, so lets change it: .. sourcecode:: python+sql >>> ed_user.password = 'f8s7ccs' The :class:`~sqlalchemy.orm.session.Session` is paying attention. It knows, for example, that ``Ed Jones`` has been modified: .. sourcecode:: python+sql >>> session.dirty IdentitySet([]) and that three new ``User`` objects are pending: .. sourcecode:: python+sql >>> session.new # doctest: +SKIP IdentitySet([, , ]) We tell the :class:`~sqlalchemy.orm.session.Session` that we'd like to issue all remaining changes to the database and commit the transaction, which has been in progress throughout. We do this via :meth:`~.Session.commit`: .. sourcecode:: python+sql {sql}>>> session.commit() UPDATE users SET password=? WHERE users.id = ? ('f8s7ccs', 1) INSERT INTO users (name, fullname, password) VALUES (?, ?, ?) ('wendy', 'Wendy Williams', 'foobar') INSERT INTO users (name, fullname, password) VALUES (?, ?, ?) ('mary', 'Mary Contrary', 'xxg527') INSERT INTO users (name, fullname, password) VALUES (?, ?, ?) ('fred', 'Fred Flinstone', 'blah') COMMIT :meth:`~.Session.commit` flushes whatever remaining changes remain to the database, and commits the transaction. The connection resources referenced by the session are now returned to the connection pool. Subsequent operations with this session will occur in a **new** transaction, which will again re-acquire connection resources when first needed. If we look at Ed's ``id`` attribute, which earlier was ``None``, it now has a value: .. sourcecode:: python+sql {sql}>>> ed_user.id # doctest: +NORMALIZE_WHITESPACE BEGIN (implicit) SELECT users.id AS users_id, users.name AS users_name, users.fullname AS users_fullname, users.password AS users_password FROM users WHERE users.id = ? (1,) {stop}1 After the :class:`~sqlalchemy.orm.session.Session` inserts new rows in the database, all newly generated identifiers and database-generated defaults become available on the instance, either immediately or via load-on-first-access. In this case, the entire row was re-loaded on access because a new transaction was begun after we issued :meth:`~.Session.commit`. SQLAlchemy by default refreshes data from a previous transaction the first time it's accessed within a new transaction, so that the most recent state is available. The level of reloading is configurable as is described in :doc:`/orm/session`. .. topic:: Session Object States As our ``User`` object moved from being outside the :class:`.Session`, to inside the :class:`.Session` without a primary key, to actually being inserted, it moved between three out of four available "object states" - **transient**, **pending**, and **persistent**. Being aware of these states and what they mean is always a good idea - be sure to read :ref:`session_object_states` for a quick overview. Rolling Back ============ Since the :class:`~sqlalchemy.orm.session.Session` works within a transaction, we can roll back changes made too. Let's make two changes that we'll revert; ``ed_user``'s user name gets set to ``Edwardo``: .. sourcecode:: python+sql >>> ed_user.name = 'Edwardo' and we'll add another erroneous user, ``fake_user``: .. sourcecode:: python+sql >>> fake_user = User('fakeuser', 'Invalid', '12345') >>> session.add(fake_user) Querying the session, we can see that they're flushed into the current transaction: .. sourcecode:: python+sql {sql}>>> session.query(User).filter(User.name.in_(['Edwardo', 'fakeuser'])).all() #doctest: +NORMALIZE_WHITESPACE UPDATE users SET name=? WHERE users.id = ? ('Edwardo', 1) INSERT INTO users (name, fullname, password) VALUES (?, ?, ?) ('fakeuser', 'Invalid', '12345') SELECT users.id AS users_id, users.name AS users_name, users.fullname AS users_fullname, users.password AS users_password FROM users WHERE users.name IN (?, ?) ('Edwardo', 'fakeuser') {stop}[, ] Rolling back, we can see that ``ed_user``'s name is back to ``ed``, and ``fake_user`` has been kicked out of the session: .. sourcecode:: python+sql {sql}>>> session.rollback() ROLLBACK {stop} {sql}>>> ed_user.name #doctest: +NORMALIZE_WHITESPACE BEGIN (implicit) SELECT users.id AS users_id, users.name AS users_name, users.fullname AS users_fullname, users.password AS users_password FROM users WHERE users.id = ? (1,) {stop}u'ed' >>> fake_user in session False issuing a SELECT illustrates the changes made to the database: .. sourcecode:: python+sql {sql}>>> session.query(User).filter(User.name.in_(['ed', 'fakeuser'])).all() #doctest: +NORMALIZE_WHITESPACE SELECT users.id AS users_id, users.name AS users_name, users.fullname AS users_fullname, users.password AS users_password FROM users WHERE users.name IN (?, ?) ('ed', 'fakeuser') {stop}[] .. _ormtutorial_querying: Querying ======== A :class:`~sqlalchemy.orm.query.Query` object is created using the :class:`~sqlalchemy.orm.session.Session.query()` method on :class:`~sqlalchemy.orm.session.Session`. This function takes a variable number of arguments, which can be any combination of classes and class-instrumented descriptors. Below, we indicate a :class:`~sqlalchemy.orm.query.Query` which loads ``User`` instances. When evaluated in an iterative context, the list of ``User`` objects present is returned: .. sourcecode:: python+sql {sql}>>> for instance in session.query(User).order_by(User.id): # doctest: +NORMALIZE_WHITESPACE ... print instance.name, instance.fullname SELECT users.id AS users_id, users.name AS users_name, users.fullname AS users_fullname, users.password AS users_password FROM users ORDER BY users.id () {stop}ed Ed Jones wendy Wendy Williams mary Mary Contrary fred Fred Flinstone The :class:`~sqlalchemy.orm.query.Query` also accepts ORM-instrumented descriptors as arguments. Any time multiple class entities or column-based entities are expressed as arguments to the :class:`~sqlalchemy.orm.session.Session.query()` function, the return result is expressed as tuples: .. sourcecode:: python+sql {sql}>>> for name, fullname in session.query(User.name, User.fullname): # doctest: +NORMALIZE_WHITESPACE ... print name, fullname SELECT users.name AS users_name, users.fullname AS users_fullname FROM users () {stop}ed Ed Jones wendy Wendy Williams mary Mary Contrary fred Fred Flinstone The tuples returned by :class:`~sqlalchemy.orm.query.Query` are *named* tuples, supplied by the :class:`.KeyedTuple` class, and can be treated much like an ordinary Python object. The names are the same as the attribute's name for an attribute, and the class name for a class: .. sourcecode:: python+sql {sql}>>> for row in session.query(User, User.name).all(): #doctest: +NORMALIZE_WHITESPACE ... print row.User, row.name SELECT users.id AS users_id, users.name AS users_name, users.fullname AS users_fullname, users.password AS users_password FROM users () {stop} ed wendy mary fred You can control the names of individual column expressions using the :meth:`~.CompareMixin.label` construct, which is available from any :class:`.ColumnElement`-derived object, as well as any class attribute which is mapped to one (such as ``User.name``): .. sourcecode:: python+sql {sql}>>> for row in session.query(User.name.label('name_label')).all(): #doctest: +NORMALIZE_WHITESPACE ... print(row.name_label) SELECT users.name AS name_label FROM users (){stop} ed wendy mary fred The name given to a full entity such as ``User``, assuming that multiple entities are present in the call to :meth:`~.Session.query`, can be controlled using :class:`~.orm.aliased` : .. sourcecode:: python+sql >>> from sqlalchemy.orm import aliased >>> user_alias = aliased(User, name='user_alias') {sql}>>> for row in session.query(user_alias, user_alias.name).all(): #doctest: +NORMALIZE_WHITESPACE ... print row.user_alias SELECT user_alias.id AS user_alias_id, user_alias.name AS user_alias_name, user_alias.fullname AS user_alias_fullname, user_alias.password AS user_alias_password FROM users AS user_alias (){stop} Basic operations with :class:`~sqlalchemy.orm.query.Query` include issuing LIMIT and OFFSET, most conveniently using Python array slices and typically in conjunction with ORDER BY: .. sourcecode:: python+sql {sql}>>> for u in session.query(User).order_by(User.id)[1:3]: #doctest: +NORMALIZE_WHITESPACE ... print u SELECT users.id AS users_id, users.name AS users_name, users.fullname AS users_fullname, users.password AS users_password FROM users ORDER BY users.id LIMIT ? OFFSET ? (2, 1){stop} and filtering results, which is accomplished either with :func:`~sqlalchemy.orm.query.Query.filter_by`, which uses keyword arguments: .. sourcecode:: python+sql {sql}>>> for name, in session.query(User.name).\ ... filter_by(fullname='Ed Jones'): # doctest: +NORMALIZE_WHITESPACE ... print name SELECT users.name AS users_name FROM users WHERE users.fullname = ? ('Ed Jones',) {stop}ed ...or :func:`~sqlalchemy.orm.query.Query.filter`, which uses more flexible SQL expression language constructs. These allow you to use regular Python operators with the class-level attributes on your mapped class: .. sourcecode:: python+sql {sql}>>> for name, in session.query(User.name).\ ... filter(User.fullname=='Ed Jones'): # doctest: +NORMALIZE_WHITESPACE ... print name SELECT users.name AS users_name FROM users WHERE users.fullname = ? ('Ed Jones',) {stop}ed The :class:`~sqlalchemy.orm.query.Query` object is fully **generative**, meaning that most method calls return a new :class:`~sqlalchemy.orm.query.Query` object upon which further criteria may be added. For example, to query for users named "ed" with a full name of "Ed Jones", you can call :func:`~sqlalchemy.orm.query.Query.filter` twice, which joins criteria using ``AND``: .. sourcecode:: python+sql {sql}>>> for user in session.query(User).\ ... filter(User.name=='ed').\ ... filter(User.fullname=='Ed Jones'): # doctest: +NORMALIZE_WHITESPACE ... print user SELECT users.id AS users_id, users.name AS users_name, users.fullname AS users_fullname, users.password AS users_password FROM users WHERE users.name = ? AND users.fullname = ? ('ed', 'Ed Jones') {stop} Common Filter Operators ----------------------- Here's a rundown of some of the most common operators used in :func:`~sqlalchemy.orm.query.Query.filter`: * equals:: query.filter(User.name == 'ed') * not equals:: query.filter(User.name != 'ed') * LIKE:: query.filter(User.name.like('%ed%')) * IN:: query.filter(User.name.in_(['ed', 'wendy', 'jack'])) # works with query objects too: query.filter(User.name.in_(session.query(User.name).filter(User.name.like('%ed%')))) * NOT IN:: query.filter(~User.name.in_(['ed', 'wendy', 'jack'])) * IS NULL:: filter(User.name == None) * IS NOT NULL:: filter(User.name != None) * AND:: from sqlalchemy import and_ filter(and_(User.name == 'ed', User.fullname == 'Ed Jones')) # or call filter()/filter_by() multiple times filter(User.name == 'ed').filter(User.fullname == 'Ed Jones') * OR:: from sqlalchemy import or_ filter(or_(User.name == 'ed', User.name == 'wendy')) * match:: query.filter(User.name.match('wendy')) The contents of the match parameter are database backend specific. Returning Lists and Scalars --------------------------- The :meth:`~sqlalchemy.orm.query.Query.all()`, :meth:`~sqlalchemy.orm.query.Query.one()`, and :meth:`~sqlalchemy.orm.query.Query.first()` methods of :class:`~sqlalchemy.orm.query.Query` immediately issue SQL and return a non-iterator value. :meth:`~sqlalchemy.orm.query.Query.all()` returns a list: .. sourcecode:: python+sql >>> query = session.query(User).filter(User.name.like('%ed')).order_by(User.id) {sql}>>> query.all() #doctest: +NORMALIZE_WHITESPACE SELECT users.id AS users_id, users.name AS users_name, users.fullname AS users_fullname, users.password AS users_password FROM users WHERE users.name LIKE ? ORDER BY users.id ('%ed',) {stop}[, ] :meth:`~sqlalchemy.orm.query.Query.first()` applies a limit of one and returns the first result as a scalar: .. sourcecode:: python+sql {sql}>>> query.first() #doctest: +NORMALIZE_WHITESPACE SELECT users.id AS users_id, users.name AS users_name, users.fullname AS users_fullname, users.password AS users_password FROM users WHERE users.name LIKE ? ORDER BY users.id LIMIT ? OFFSET ? ('%ed', 1, 0) {stop} :meth:`~sqlalchemy.orm.query.Query.one()`, fully fetches all rows, and if not exactly one object identity or composite row is present in the result, raises an error: .. sourcecode:: python+sql {sql}>>> from sqlalchemy.orm.exc import MultipleResultsFound >>> try: #doctest: +NORMALIZE_WHITESPACE ... user = query.one() ... except MultipleResultsFound, e: ... print e SELECT users.id AS users_id, users.name AS users_name, users.fullname AS users_fullname, users.password AS users_password FROM users WHERE users.name LIKE ? ORDER BY users.id ('%ed',) {stop}Multiple rows were found for one() .. sourcecode:: python+sql {sql}>>> from sqlalchemy.orm.exc import NoResultFound >>> try: #doctest: +NORMALIZE_WHITESPACE ... user = query.filter(User.id == 99).one() ... except NoResultFound, e: ... print e SELECT users.id AS users_id, users.name AS users_name, users.fullname AS users_fullname, users.password AS users_password FROM users WHERE users.name LIKE ? AND users.id = ? ORDER BY users.id ('%ed', 99) {stop}No row was found for one() .. _orm_tutorial_literal_sql: Using Literal SQL ----------------- Literal strings can be used flexibly with :class:`~sqlalchemy.orm.query.Query`. Most methods accept strings in addition to SQLAlchemy clause constructs. For example, :meth:`~sqlalchemy.orm.query.Query.filter()` and :meth:`~sqlalchemy.orm.query.Query.order_by()`: .. sourcecode:: python+sql {sql}>>> for user in session.query(User).\ ... filter("id<224").\ ... order_by("id").all(): #doctest: +NORMALIZE_WHITESPACE ... print user.name SELECT users.id AS users_id, users.name AS users_name, users.fullname AS users_fullname, users.password AS users_password FROM users WHERE id<224 ORDER BY id () {stop}ed wendy mary fred Bind parameters can be specified with string-based SQL, using a colon. To specify the values, use the :meth:`~sqlalchemy.orm.query.Query.params()` method: .. sourcecode:: python+sql {sql}>>> session.query(User).filter("id<:value and name=:name").\ ... params(value=224, name='fred').order_by(User.id).one() # doctest: +NORMALIZE_WHITESPACE SELECT users.id AS users_id, users.name AS users_name, users.fullname AS users_fullname, users.password AS users_password FROM users WHERE id To use an entirely string-based statement, using :meth:`~sqlalchemy.orm.query.Query.from_statement()`; just ensure that the columns clause of the statement contains the column names normally used by the mapper (below illustrated using an asterisk): .. sourcecode:: python+sql {sql}>>> session.query(User).from_statement( ... "SELECT * FROM users where name=:name").\ ... params(name='ed').all() SELECT * FROM users where name=? ('ed',) {stop}[] You can use :meth:`~sqlalchemy.orm.query.Query.from_statement()` to go completely "raw", using string names to identify desired columns: .. sourcecode:: python+sql {sql}>>> session.query("id", "name", "thenumber12").\ ... from_statement("SELECT id, name, 12 as " ... "thenumber12 FROM users where name=:name").\ ... params(name='ed').all() SELECT id, name, 12 as thenumber12 FROM users where name=? ('ed',) {stop}[(1, u'ed', 12)] .. topic:: Pros and Cons of Literal SQL :class:`.Query` is constructed like the rest of SQLAlchemy, in that it tries to always allow "falling back" to a less automated, lower level approach to things. Accepting strings for all SQL fragments is a big part of that, so that you can bypass the need to organize SQL constructs if you know specifically what string output you'd like. But when using literal strings, the :class:`.Query` no longer knows anything about that part of the SQL construct being emitted, and has no ability to **transform** it to adapt to new contexts. For example, suppose we selected ``User`` objects and ordered by the ``name`` column, using a string to indicate ``name``: .. sourcecode:: python+sql >>> q = session.query(User.id, User.name) {sql}>>> q.order_by("name").all() SELECT users.id AS users_id, users.name AS users_name FROM users ORDER BY name () {stop}[(1, u'ed'), (4, u'fred'), (3, u'mary'), (2, u'wendy')] Perfectly fine. But suppose, before we got a hold of the :class:`.Query`, some sophisticated transformations were applied to it, such as below where we use :meth:`~.Query.from_self`, a particularly advanced method, to retrieve pairs of user names with different numbers of characters:: >>> from sqlalchemy import func >>> ua = aliased(User) >>> q = q.from_self(User.id, User.name, ua.name).\ ... filter(User.name < ua.name).\ ... filter(func.length(ua.name) != func.length(User.name)) The :class:`.Query` now represents a select from a subquery, where ``User`` is represented twice both inside and outside of the subquery. Telling the :class:`.Query` to order by "name" doesn't really give us much guarantee which "name" it's going to order on. In this case it assumes "name" is against the outer "aliased" ``User`` construct: .. sourcecode:: python+sql {sql}>>> q.order_by("name").all() #doctest: +NORMALIZE_WHITESPACE SELECT anon_1.users_id AS anon_1_users_id, anon_1.users_name AS anon_1_users_name, users_1.name AS users_1_name FROM (SELECT users.id AS users_id, users.name AS users_name FROM users) AS anon_1, users AS users_1 WHERE anon_1.users_name < users_1.name AND length(users_1.name) != length(anon_1.users_name) ORDER BY name () {stop}[(1, u'ed', u'fred'), (1, u'ed', u'mary'), (1, u'ed', u'wendy'), (3, u'mary', u'wendy'), (4, u'fred', u'wendy')] Only if we use the SQL element directly, in this case ``User.name`` or ``ua.name``, do we give :class:`.Query` enough information to know for sure which "name" we'd like to order on, where we can see we get different results for each: .. sourcecode:: python+sql {sql}>>> q.order_by(ua.name).all() #doctest: +NORMALIZE_WHITESPACE SELECT anon_1.users_id AS anon_1_users_id, anon_1.users_name AS anon_1_users_name, users_1.name AS users_1_name FROM (SELECT users.id AS users_id, users.name AS users_name FROM users) AS anon_1, users AS users_1 WHERE anon_1.users_name < users_1.name AND length(users_1.name) != length(anon_1.users_name) ORDER BY users_1.name () {stop}[(1, u'ed', u'fred'), (1, u'ed', u'mary'), (1, u'ed', u'wendy'), (3, u'mary', u'wendy'), (4, u'fred', u'wendy')] {sql}>>> q.order_by(User.name).all() #doctest: +NORMALIZE_WHITESPACE SELECT anon_1.users_id AS anon_1_users_id, anon_1.users_name AS anon_1_users_name, users_1.name AS users_1_name FROM (SELECT users.id AS users_id, users.name AS users_name FROM users) AS anon_1, users AS users_1 WHERE anon_1.users_name < users_1.name AND length(users_1.name) != length(anon_1.users_name) ORDER BY anon_1.users_name () {stop}[(1, u'ed', u'wendy'), (1, u'ed', u'mary'), (1, u'ed', u'fred'), (4, u'fred', u'wendy'), (3, u'mary', u'wendy')] Counting -------- :class:`~sqlalchemy.orm.query.Query` includes a convenience method for counting called :meth:`~sqlalchemy.orm.query.Query.count()`: .. sourcecode:: python+sql {sql}>>> session.query(User).filter(User.name.like('%ed')).count() #doctest: +NORMALIZE_WHITESPACE SELECT count(*) AS count_1 FROM (SELECT users.id AS users_id, users.name AS users_name, users.fullname AS users_fullname, users.password AS users_password FROM users WHERE users.name LIKE ?) AS anon_1 ('%ed',) {stop}2 The :meth:`~.Query.count()` method is used to determine how many rows the SQL statement would return. Looking at the generated SQL above, SQLAlchemy always places whatever it is we are querying into a subquery, then counts the rows from that. In some cases this can be reduced to a simpler ``SELECT count(*) FROM table``, however modern versions of SQLAlchemy don't try to guess when this is appropriate, as the exact SQL can be emitted using more explicit means. For situations where the "thing to be counted" needs to be indicated specifically, we can specify the "count" function directly using the expression ``func.count()``, available from the :attr:`~sqlalchemy.sql.expression.func` construct. Below we use it to return the count of each distinct user name: .. sourcecode:: python+sql >>> from sqlalchemy import func {sql}>>> session.query(func.count(User.name), User.name).group_by(User.name).all() #doctest: +NORMALIZE_WHITESPACE SELECT count(users.name) AS count_1, users.name AS users_name FROM users GROUP BY users.name () {stop}[(1, u'ed'), (1, u'fred'), (1, u'mary'), (1, u'wendy')] To achieve our simple ``SELECT count(*) FROM table``, we can apply it as: .. sourcecode:: python+sql {sql}>>> session.query(func.count('*')).select_from(User).scalar() SELECT count(?) AS count_1 FROM users ('*',) {stop}4 The usage of :meth:`~.Query.select_from` can be removed if we express the count in terms of the ``User`` primary key directly: .. sourcecode:: python+sql {sql}>>> session.query(func.count(User.id)).scalar() #doctest: +NORMALIZE_WHITESPACE SELECT count(users.id) AS count_1 FROM users () {stop}4 Building a Relationship ======================= Let's consider how a second table, related to ``User``, can be mapped and queried. Users in our system can store any number of email addresses associated with their username. This implies a basic one to many association from the ``users`` to a new table which stores email addresses, which we will call ``addresses``. Using declarative, we define this table along with its mapped class, ``Address``: .. sourcecode:: python+sql >>> from sqlalchemy import ForeignKey >>> from sqlalchemy.orm import relationship, backref >>> class Address(Base): ... __tablename__ = 'addresses' ... id = Column(Integer, primary_key=True) ... email_address = Column(String, nullable=False) ... user_id = Column(Integer, ForeignKey('users.id')) ... ... user = relationship("User", backref=backref('addresses', order_by=id)) ... ... def __init__(self, email_address): ... self.email_address = email_address ... ... def __repr__(self): ... return "" % self.email_address The above class introduces the :class:`.ForeignKey` construct, which is a directive applied to :class:`.Column` that indicates that values in this column should be **constrained** to be values present in the named remote column. This is a core feature of relational databases, and is the "glue" that transforms an otherwise unconnected collection of tables to have rich overlapping relationships. The :class:`.ForeignKey` above expresses that values in the ``addresses.user_id`` column should be constrained to those values in the ``users.id`` column, i.e. its primary key. A second directive, known as :func:`.relationship`, tells the ORM that the ``Address`` class itself should be linked to the ``User`` class, using the attribute ``Address.user``. :func:`.relationship` uses the foreign key relationships between the two tables to determine the nature of this linkage, determining that ``Address.user`` will be **many-to-one**. A subdirective of :func:`.relationship` called :func:`.backref` is placed inside of :func:`.relationship`, providing details about the relationship as expressed in reverse, that of a collection of ``Address`` objects on ``User`` referenced by ``User.addresses``. The reverse side of a many-to-one relationship is always **one-to-many**. A full catalog of available :func:`.relationship` configurations is at :ref:`relationship_patterns`. The two complementing relationships ``Address.user`` and ``User.addresses`` are referred to as a **bidirectional relationship**, and is a key feature of the SQLAlchemy ORM. The section :ref:`relationships_backref` discusses the "backref" feature in detail. Arguments to :func:`.relationship` which concern the remote class can be specified using strings, assuming the Declarative system is in use. Once all mappings are complete, these strings are evaluated as Python expressions in order to produce the actual argument, in the above case the ``User`` class. The names which are allowed during this evaluation include, among other things, the names of all classes which have been created in terms of the declared base. Below we illustrate creation of the same "addresses/user" bidirectional relationship in terms of ``User`` instead of ``Address``:: class User(Base): # .... addresses = relationship("Address", order_by="Address.id", backref="user") See the docstring for :func:`.relationship` for more detail on argument style. .. topic:: Did you know ? * a FOREIGN KEY constraint in most (though not all) relational databases can only link to a primary key column, or a column that has a UNIQUE constraint. * a FOREIGN KEY constraint that refers to a multiple column primary key, and itself has multiple columns, is known as a "composite foreign key". It can also reference a subset of those columns. * FOREIGN KEY columns can automatically update themselves, in response to a change in the referenced column or row. This is known as the CASCADE *referential action*, and is a built in function of the relational database. * FOREIGN KEY can refer to its own table. This is referred to as a "self-referential" foreign key. * Read more about foreign keys at `Foreign Key - Wikipedia `_. We'll need to create the ``addresses`` table in the database, so we will issue another CREATE from our metadata, which will skip over tables which have already been created: .. sourcecode:: python+sql {sql}>>> Base.metadata.create_all(engine) # doctest: +NORMALIZE_WHITESPACE PRAGMA table_info("users") () PRAGMA table_info("addresses") () CREATE TABLE addresses ( id INTEGER NOT NULL, email_address VARCHAR NOT NULL, user_id INTEGER, PRIMARY KEY (id), FOREIGN KEY(user_id) REFERENCES users (id) ) () COMMIT Working with Related Objects ============================= Now when we create a ``User``, a blank ``addresses`` collection will be present. Various collection types, such as sets and dictionaries, are possible here (see :ref:`custom_collections` for details), but by default, the collection is a Python list. .. sourcecode:: python+sql >>> jack = User('jack', 'Jack Bean', 'gjffdd') >>> jack.addresses [] We are free to add ``Address`` objects on our ``User`` object. In this case we just assign a full list directly: .. sourcecode:: python+sql >>> jack.addresses = [ ... Address(email_address='jack@google.com'), ... Address(email_address='j25@yahoo.com')] When using a bidirectional relationship, elements added in one direction automatically become visible in the other direction. This behavior occurs based on attribute on-change events and is evaluated in Python, without using any SQL: .. sourcecode:: python+sql >>> jack.addresses[1] >>> jack.addresses[1].user Let's add and commit ``Jack Bean`` to the database. ``jack`` as well as the two ``Address`` members in the corresponding ``addresses`` collection are both added to the session at once, using a process known as **cascading**: .. sourcecode:: python+sql >>> session.add(jack) {sql}>>> session.commit() INSERT INTO users (name, fullname, password) VALUES (?, ?, ?) ('jack', 'Jack Bean', 'gjffdd') INSERT INTO addresses (email_address, user_id) VALUES (?, ?) ('jack@google.com', 5) INSERT INTO addresses (email_address, user_id) VALUES (?, ?) ('j25@yahoo.com', 5) COMMIT Querying for Jack, we get just Jack back. No SQL is yet issued for Jack's addresses: .. sourcecode:: python+sql {sql}>>> jack = session.query(User).\ ... filter_by(name='jack').one() #doctest: +NORMALIZE_WHITESPACE BEGIN (implicit) SELECT users.id AS users_id, users.name AS users_name, users.fullname AS users_fullname, users.password AS users_password FROM users WHERE users.name = ? ('jack',) {stop}>>> jack Let's look at the ``addresses`` collection. Watch the SQL: .. sourcecode:: python+sql {sql}>>> jack.addresses #doctest: +NORMALIZE_WHITESPACE SELECT addresses.id AS addresses_id, addresses.email_address AS addresses_email_address, addresses.user_id AS addresses_user_id FROM addresses WHERE ? = addresses.user_id ORDER BY addresses.id (5,) {stop}[, ] When we accessed the ``addresses`` collection, SQL was suddenly issued. This is an example of a **lazy loading relationship**. The ``addresses`` collection is now loaded and behaves just like an ordinary list. We'll cover ways to optimize the loading of this collection in a bit. .. _ormtutorial_joins: Querying with Joins ==================== Now that we have two tables, we can show some more features of :class:`.Query`, specifically how to create queries that deal with both tables at the same time. The `Wikipedia page on SQL JOIN `_ offers a good introduction to join techniques, several of which we'll illustrate here. To construct a simple implicit join between ``User`` and ``Address``, we can use :meth:`.Query.filter()` to equate their related columns together. Below we load the ``User`` and ``Address`` entities at once using this method: .. sourcecode:: python+sql {sql}>>> for u, a in session.query(User, Address).\ ... filter(User.id==Address.user_id).\ ... filter(Address.email_address=='jack@google.com').\ ... all(): # doctest: +NORMALIZE_WHITESPACE ... print u, a SELECT users.id AS users_id, users.name AS users_name, users.fullname AS users_fullname, users.password AS users_password, addresses.id AS addresses_id, addresses.email_address AS addresses_email_address, addresses.user_id AS addresses_user_id FROM users, addresses WHERE users.id = addresses.user_id AND addresses.email_address = ? ('jack@google.com',) {stop} The actual SQL JOIN syntax, on the other hand, is most easily achieved using the :meth:`.Query.join` method: .. sourcecode:: python+sql {sql}>>> session.query(User).join(Address).\ ... filter(Address.email_address=='jack@google.com').\ ... all() #doctest: +NORMALIZE_WHITESPACE SELECT users.id AS users_id, users.name AS users_name, users.fullname AS users_fullname, users.password AS users_password FROM users JOIN addresses ON users.id = addresses.user_id WHERE addresses.email_address = ? ('jack@google.com',) {stop}[] :meth:`.Query.join` knows how to join between ``User`` and ``Address`` because there's only one foreign key between them. If there were no foreign keys, or several, :meth:`.Query.join` works better when one of the following forms are used:: query.join(Address, User.id==Address.user_id) # explicit condition query.join(User.addresses) # specify relationship from left to right query.join(Address, User.addresses) # same, with explicit target query.join('addresses') # same, using a string As you would expect, the same idea is used for "outer" joins, using the :meth:`~.Query.outerjoin` function:: query.outerjoin(User.addresses) # LEFT OUTER JOIN The reference documentation for :meth:`~.Query.join` contains detailed information and examples of the calling styles accepted by this method; :meth:`~.Query.join` is an important method at the center of usage for any SQL-fluent application. .. _ormtutorial_aliases: Using Aliases ------------- When querying across multiple tables, if the same table needs to be referenced more than once, SQL typically requires that the table be *aliased* with another name, so that it can be distinguished against other occurrences of that table. The :class:`~sqlalchemy.orm.query.Query` supports this most explicitly using the :attr:`~sqlalchemy.orm.aliased` construct. Below we join to the ``Address`` entity twice, to locate a user who has two distinct email addresses at the same time: .. sourcecode:: python+sql >>> from sqlalchemy.orm import aliased >>> adalias1 = aliased(Address) >>> adalias2 = aliased(Address) {sql}>>> for username, email1, email2 in \ ... session.query(User.name, adalias1.email_address, adalias2.email_address).\ ... join(adalias1, User.addresses).\ ... join(adalias2, User.addresses).\ ... filter(adalias1.email_address=='jack@google.com').\ ... filter(adalias2.email_address=='j25@yahoo.com'): ... print username, email1, email2 # doctest: +NORMALIZE_WHITESPACE SELECT users.name AS users_name, addresses_1.email_address AS addresses_1_email_address, addresses_2.email_address AS addresses_2_email_address FROM users JOIN addresses AS addresses_1 ON users.id = addresses_1.user_id JOIN addresses AS addresses_2 ON users.id = addresses_2.user_id WHERE addresses_1.email_address = ? AND addresses_2.email_address = ? ('jack@google.com', 'j25@yahoo.com') {stop}jack jack@google.com j25@yahoo.com Using Subqueries ---------------- The :class:`~sqlalchemy.orm.query.Query` is suitable for generating statements which can be used as subqueries. Suppose we wanted to load ``User`` objects along with a count of how many ``Address`` records each user has. The best way to generate SQL like this is to get the count of addresses grouped by user ids, and JOIN to the parent. In this case we use a LEFT OUTER JOIN so that we get rows back for those users who don't have any addresses, e.g.:: SELECT users.*, adr_count.address_count FROM users LEFT OUTER JOIN (SELECT user_id, count(*) AS address_count FROM addresses GROUP BY user_id) AS adr_count ON users.id=adr_count.user_id Using the :class:`~sqlalchemy.orm.query.Query`, we build a statement like this from the inside out. The ``statement`` accessor returns a SQL expression representing the statement generated by a particular :class:`~sqlalchemy.orm.query.Query` - this is an instance of a :func:`~.expression.select` construct, which are described in :ref:`sqlexpression_toplevel`:: >>> from sqlalchemy.sql import func >>> stmt = session.query(Address.user_id, func.count('*').\ ... label('address_count')).\ ... group_by(Address.user_id).subquery() The ``func`` keyword generates SQL functions, and the ``subquery()`` method on :class:`~sqlalchemy.orm.query.Query` produces a SQL expression construct representing a SELECT statement embedded within an alias (it's actually shorthand for ``query.statement.alias()``). Once we have our statement, it behaves like a :class:`~sqlalchemy.schema.Table` construct, such as the one we created for ``users`` at the start of this tutorial. The columns on the statement are accessible through an attribute called ``c``: .. sourcecode:: python+sql {sql}>>> for u, count in session.query(User, stmt.c.address_count).\ ... outerjoin(stmt, User.id==stmt.c.user_id).order_by(User.id): # doctest: +NORMALIZE_WHITESPACE ... print u, count SELECT users.id AS users_id, users.name AS users_name, users.fullname AS users_fullname, users.password AS users_password, anon_1.address_count AS anon_1_address_count FROM users LEFT OUTER JOIN (SELECT addresses.user_id AS user_id, count(?) AS address_count FROM addresses GROUP BY addresses.user_id) AS anon_1 ON users.id = anon_1.user_id ORDER BY users.id ('*',) {stop} None None None None 2 Selecting Entities from Subqueries ---------------------------------- Above, we just selected a result that included a column from a subquery. What if we wanted our subquery to map to an entity ? For this we use ``aliased()`` to associate an "alias" of a mapped class to a subquery: .. sourcecode:: python+sql {sql}>>> stmt = session.query(Address).\ ... filter(Address.email_address != 'j25@yahoo.com').\ ... subquery() >>> adalias = aliased(Address, stmt) >>> for user, address in session.query(User, adalias).\ ... join(adalias, User.addresses): # doctest: +NORMALIZE_WHITESPACE ... print user, address SELECT users.id AS users_id, users.name AS users_name, users.fullname AS users_fullname, users.password AS users_password, anon_1.id AS anon_1_id, anon_1.email_address AS anon_1_email_address, anon_1.user_id AS anon_1_user_id FROM users JOIN (SELECT addresses.id AS id, addresses.email_address AS email_address, addresses.user_id AS user_id FROM addresses WHERE addresses.email_address != ?) AS anon_1 ON users.id = anon_1.user_id ('j25@yahoo.com',) {stop} Using EXISTS ------------ The EXISTS keyword in SQL is a boolean operator which returns True if the given expression contains any rows. It may be used in many scenarios in place of joins, and is also useful for locating rows which do not have a corresponding row in a related table. There is an explicit EXISTS construct, which looks like this: .. sourcecode:: python+sql >>> from sqlalchemy.sql import exists >>> stmt = exists().where(Address.user_id==User.id) {sql}>>> for name, in session.query(User.name).filter(stmt): # doctest: +NORMALIZE_WHITESPACE ... print name SELECT users.name AS users_name FROM users WHERE EXISTS (SELECT * FROM addresses WHERE addresses.user_id = users.id) () {stop}jack The :class:`~sqlalchemy.orm.query.Query` features several operators which make usage of EXISTS automatically. Above, the statement can be expressed along the ``User.addresses`` relationship using :meth:`~.RelationshipProperty.Comparator.any`: .. sourcecode:: python+sql {sql}>>> for name, in session.query(User.name).\ ... filter(User.addresses.any()): # doctest: +NORMALIZE_WHITESPACE ... print name SELECT users.name AS users_name FROM users WHERE EXISTS (SELECT 1 FROM addresses WHERE users.id = addresses.user_id) () {stop}jack :meth:`~.RelationshipProperty.Comparator.any` takes criterion as well, to limit the rows matched: .. sourcecode:: python+sql {sql}>>> for name, in session.query(User.name).\ ... filter(User.addresses.any(Address.email_address.like('%google%'))): # doctest: +NORMALIZE_WHITESPACE ... print name SELECT users.name AS users_name FROM users WHERE EXISTS (SELECT 1 FROM addresses WHERE users.id = addresses.user_id AND addresses.email_address LIKE ?) ('%google%',) {stop}jack :meth:`~.RelationshipProperty.Comparator.has` is the same operator as :meth:`~.RelationshipProperty.Comparator.any` for many-to-one relationships (note the ``~`` operator here too, which means "NOT"): .. sourcecode:: python+sql {sql}>>> session.query(Address).\ ... filter(~Address.user.has(User.name=='jack')).all() # doctest: +NORMALIZE_WHITESPACE SELECT addresses.id AS addresses_id, addresses.email_address AS addresses_email_address, addresses.user_id AS addresses_user_id FROM addresses WHERE NOT (EXISTS (SELECT 1 FROM users WHERE users.id = addresses.user_id AND users.name = ?)) ('jack',) {stop}[] Common Relationship Operators ----------------------------- Here's all the operators which build on relationships - each one is linked to its API documentation which includes full details on usage and behavior: * :meth:`~.RelationshipProperty.Comparator.__eq__` (many-to-one "equals" comparison):: query.filter(Address.user == someuser) * :meth:`~.RelationshipProperty.Comparator.__ne__` (many-to-one "not equals" comparison):: query.filter(Address.user != someuser) * IS NULL (many-to-one comparison, also uses :meth:`~.RelationshipProperty.Comparator.__eq__`):: query.filter(Address.user == None) * :meth:`~.RelationshipProperty.Comparator.contains` (used for one-to-many collections):: query.filter(User.addresses.contains(someaddress)) * :meth:`~.RelationshipProperty.Comparator.any` (used for collections):: query.filter(User.addresses.any(Address.email_address == 'bar')) # also takes keyword arguments: query.filter(User.addresses.any(email_address='bar')) * :meth:`~.RelationshipProperty.Comparator.has` (used for scalar references):: query.filter(Address.user.has(name='ed')) * :meth:`.Query.with_parent` (used for any relationship):: session.query(Address).with_parent(someuser, 'addresses') Eager Loading ============= Recall earlier that we illustrated a **lazy loading** operation, when we accessed the ``User.addresses`` collection of a ``User`` and SQL was emitted. If you want to reduce the number of queries (dramatically, in many cases), we can apply an **eager load** to the query operation. SQLAlchemy offers three types of eager loading, two of which are automatic, and a third which involves custom criterion. All three are usually invoked via functions known as **query options** which give additional instructions to the :class:`.Query` on how we would like various attributes to be loaded, via the :meth:`.Query.options` method. Subquery Load ------------- In this case we'd like to indicate that ``User.addresses`` should load eagerly. A good choice for loading a set of objects as well as their related collections is the :func:`.orm.subqueryload` option, which emits a second SELECT statement that fully loads the collections associated with the results just loaded. The name "subquery" originates from the fact that the SELECT statement constructed directly via the :class:`.Query` is re-used, embedded as a subquery into a SELECT against the related table. This is a little elaborate but very easy to use: .. sourcecode:: python+sql >>> from sqlalchemy.orm import subqueryload {sql}>>> jack = session.query(User).\ ... options(subqueryload(User.addresses)).\ ... filter_by(name='jack').one() #doctest: +NORMALIZE_WHITESPACE SELECT users.id AS users_id, users.name AS users_name, users.fullname AS users_fullname, users.password AS users_password FROM users WHERE users.name = ? ('jack',) SELECT addresses.id AS addresses_id, addresses.email_address AS addresses_email_address, addresses.user_id AS addresses_user_id, anon_1.users_id AS anon_1_users_id FROM (SELECT users.id AS users_id FROM users WHERE users.name = ?) AS anon_1 JOIN addresses ON anon_1.users_id = addresses.user_id ORDER BY anon_1.users_id, addresses.id ('jack',) {stop}>>> jack >>> jack.addresses [, ] Joined Load ------------- The other automatic eager loading function is more well known and is called :func:`.orm.joinedload`. This style of loading emits a JOIN, by default a LEFT OUTER JOIN, so that the lead object as well as the related object or collection is loaded in one step. We illustrate loading the same ``addresses`` collection in this way - note that even though the ``User.addresses`` collection on ``jack`` is actually populated right now, the query will emit the extra join regardless: .. sourcecode:: python+sql >>> from sqlalchemy.orm import joinedload {sql}>>> jack = session.query(User).\ ... options(joinedload(User.addresses)).\ ... filter_by(name='jack').one() #doctest: +NORMALIZE_WHITESPACE SELECT users.id AS users_id, users.name AS users_name, users.fullname AS users_fullname, users.password AS users_password, addresses_1.id AS addresses_1_id, addresses_1.email_address AS addresses_1_email_address, addresses_1.user_id AS addresses_1_user_id FROM users LEFT OUTER JOIN addresses AS addresses_1 ON users.id = addresses_1.user_id WHERE users.name = ? ORDER BY addresses_1.id ('jack',) {stop}>>> jack >>> jack.addresses [, ] Note that even though the OUTER JOIN resulted in two rows, we still only got one instance of ``User`` back. This is because :class:`.Query` applies a "uniquing" strategy, based on object identity, to the returned entities. This is specifically so that joined eager loading can be applied without affecting the query results. While :func:`.joinedload` has been around for a long time, :func:`.subqueryload` is a newer form of eager loading. :func:`.subqueryload` tends to be more appropriate for loading related collections while :func:`.joinedload` tends to be better suited for many-to-one relationships, due to the fact that only one row is loaded for both the lead and the related object. .. topic:: ``joinedload()`` is not a replacement for ``join()`` The join created by :func:`.joinedload` is anonymously aliased such that it **does not affect the query results**. An :meth:`.Query.order_by` or :meth:`.Query.filter` call **cannot** reference these aliased tables - so-called "user space" joins are constructed using :meth:`.Query.join`. The rationale for this is that :func:`.joinedload` is only applied in order to affect how related objects or collections are loaded as an optimizing detail - it can be added or removed with no impact on actual results. See the section :ref:`zen_of_eager_loading` for a detailed description of how this is used. Explicit Join + Eagerload -------------------------- A third style of eager loading is when we are constructing a JOIN explicitly in order to locate the primary rows, and would like to additionally apply the extra table to a related object or collection on the primary object. This feature is supplied via the :func:`.orm.contains_eager` function, and is most typically useful for pre-loading the many-to-one object on a query that needs to filter on that same object. Below we illustrate loading an ``Address`` row as well as the related ``User`` object, filtering on the ``User`` named "jack" and using :func:`.orm.contains_eager` to apply the "user" columns to the ``Address.user`` attribute: .. sourcecode:: python+sql >>> from sqlalchemy.orm import contains_eager {sql}>>> jacks_addresses = session.query(Address).\ ... join(Address.user).\ ... filter(User.name=='jack').\ ... options(contains_eager(Address.user)).\ ... all() #doctest: +NORMALIZE_WHITESPACE SELECT users.id AS users_id, users.name AS users_name, users.fullname AS users_fullname, users.password AS users_password, addresses.id AS addresses_id, addresses.email_address AS addresses_email_address, addresses.user_id AS addresses_user_id FROM addresses JOIN users ON users.id = addresses.user_id WHERE users.name = ? ('jack',) {stop}>>> jacks_addresses [, ] >>> jacks_addresses[0].user For more information on eager loading, including how to configure various forms of loading by default, see the section :doc:`/orm/loading`. Deleting ======== Let's try to delete ``jack`` and see how that goes. We'll mark as deleted in the session, then we'll issue a ``count`` query to see that no rows remain: .. sourcecode:: python+sql >>> session.delete(jack) {sql}>>> session.query(User).filter_by(name='jack').count() # doctest: +NORMALIZE_WHITESPACE UPDATE addresses SET user_id=? WHERE addresses.id = ? (None, 1) UPDATE addresses SET user_id=? WHERE addresses.id = ? (None, 2) DELETE FROM users WHERE users.id = ? (5,) SELECT count(*) AS count_1 FROM (SELECT users.id AS users_id, users.name AS users_name, users.fullname AS users_fullname, users.password AS users_password FROM users WHERE users.name = ?) AS anon_1 ('jack',) {stop}0 So far, so good. How about Jack's ``Address`` objects ? .. sourcecode:: python+sql {sql}>>> session.query(Address).filter( ... Address.email_address.in_(['jack@google.com', 'j25@yahoo.com']) ... ).count() # doctest: +NORMALIZE_WHITESPACE SELECT count(*) AS count_1 FROM (SELECT addresses.id AS addresses_id, addresses.email_address AS addresses_email_address, addresses.user_id AS addresses_user_id FROM addresses WHERE addresses.email_address IN (?, ?)) AS anon_1 ('jack@google.com', 'j25@yahoo.com') {stop}2 Uh oh, they're still there ! Analyzing the flush SQL, we can see that the ``user_id`` column of each address was set to NULL, but the rows weren't deleted. SQLAlchemy doesn't assume that deletes cascade, you have to tell it to do so. .. _tutorial_delete_cascade: Configuring delete/delete-orphan Cascade ---------------------------------------- We will configure **cascade** options on the ``User.addresses`` relationship to change the behavior. While SQLAlchemy allows you to add new attributes and relationships to mappings at any point in time, in this case the existing relationship needs to be removed, so we need to tear down the mappings completely and start again - we'll close the :class:`.Session`:: >>> session.close() and use a new :func:`.declarative_base`:: >>> Base = declarative_base() Next we'll declare the ``User`` class, adding in the ``addresses`` relationship including the cascade configuration (we'll leave the constructor out too):: >>> class User(Base): ... __tablename__ = 'users' ... ... id = Column(Integer, primary_key=True) ... name = Column(String) ... fullname = Column(String) ... password = Column(String) ... ... addresses = relationship("Address", backref='user', cascade="all, delete, delete-orphan") ... ... def __repr__(self): ... return "" % (self.name, self.fullname, self.password) Then we recreate ``Address``, noting that in this case we've created the ``Address.user`` relationship via the ``User`` class already:: >>> class Address(Base): ... __tablename__ = 'addresses' ... id = Column(Integer, primary_key=True) ... email_address = Column(String, nullable=False) ... user_id = Column(Integer, ForeignKey('users.id')) ... ... def __repr__(self): ... return "" % self.email_address Now when we load the user ``jack`` (below using :meth:`~.Query.get`, which loads by primary key), removing an address from the corresponding ``addresses`` collection will result in that ``Address`` being deleted: .. sourcecode:: python+sql # load Jack by primary key {sql}>>> jack = session.query(User).get(5) #doctest: +NORMALIZE_WHITESPACE BEGIN (implicit) SELECT users.id AS users_id, users.name AS users_name, users.fullname AS users_fullname, users.password AS users_password FROM users WHERE users.id = ? (5,) {stop} # remove one Address (lazy load fires off) {sql}>>> del jack.addresses[1] #doctest: +NORMALIZE_WHITESPACE SELECT addresses.id AS addresses_id, addresses.email_address AS addresses_email_address, addresses.user_id AS addresses_user_id FROM addresses WHERE ? = addresses.user_id (5,) {stop} # only one address remains {sql}>>> session.query(Address).filter( ... Address.email_address.in_(['jack@google.com', 'j25@yahoo.com']) ... ).count() # doctest: +NORMALIZE_WHITESPACE DELETE FROM addresses WHERE addresses.id = ? (2,) SELECT count(*) AS count_1 FROM (SELECT addresses.id AS addresses_id, addresses.email_address AS addresses_email_address, addresses.user_id AS addresses_user_id FROM addresses WHERE addresses.email_address IN (?, ?)) AS anon_1 ('jack@google.com', 'j25@yahoo.com') {stop}1 Deleting Jack will delete both Jack and the remaining ``Address`` associated with the user: .. sourcecode:: python+sql >>> session.delete(jack) {sql}>>> session.query(User).filter_by(name='jack').count() # doctest: +NORMALIZE_WHITESPACE DELETE FROM addresses WHERE addresses.id = ? (1,) DELETE FROM users WHERE users.id = ? (5,) SELECT count(*) AS count_1 FROM (SELECT users.id AS users_id, users.name AS users_name, users.fullname AS users_fullname, users.password AS users_password FROM users WHERE users.name = ?) AS anon_1 ('jack',) {stop}0 {sql}>>> session.query(Address).filter( ... Address.email_address.in_(['jack@google.com', 'j25@yahoo.com']) ... ).count() # doctest: +NORMALIZE_WHITESPACE SELECT count(*) AS count_1 FROM (SELECT addresses.id AS addresses_id, addresses.email_address AS addresses_email_address, addresses.user_id AS addresses_user_id FROM addresses WHERE addresses.email_address IN (?, ?)) AS anon_1 ('jack@google.com', 'j25@yahoo.com') {stop}0 .. topic:: More on Cascades Further detail on configuration of cascades is at :ref:`unitofwork_cascades`. The cascade functionality can also integrate smoothly with the ``ON DELETE CASCADE`` functionality of the relational database. See :ref:`passive_deletes` for details. Building a Many To Many Relationship ==================================== We're moving into the bonus round here, but lets show off a many-to-many relationship. We'll sneak in some other features too, just to take a tour. We'll make our application a blog application, where users can write ``BlogPost`` items, which have ``Keyword`` items associated with them. For a plain many-to-many, we need to create an un-mapped :class:`.Table` construct to serve as the association table. This looks like the following:: >>> from sqlalchemy import Table, Text >>> # association table >>> post_keywords = Table('post_keywords', Base.metadata, ... Column('post_id', Integer, ForeignKey('posts.id')), ... Column('keyword_id', Integer, ForeignKey('keywords.id')) ... ) Above, we can see declaring a :class:`.Table` directly is a little different than declaring a mapped class. :class:`.Table` is a constructor function, so each individual :class:`.Column` argument is separated by a comma. The :class:`.Column` object is also given its name explicitly, rather than it being taken from an assigned attribute name. Next we define ``BlogPost`` and ``Keyword``, with a :func:`.relationship` linked via the ``post_keywords`` table:: >>> class BlogPost(Base): ... __tablename__ = 'posts' ... ... id = Column(Integer, primary_key=True) ... user_id = Column(Integer, ForeignKey('users.id')) ... headline = Column(String(255), nullable=False) ... body = Column(Text) ... ... # many to many BlogPost<->Keyword ... keywords = relationship('Keyword', secondary=post_keywords, backref='posts') ... ... def __init__(self, headline, body, author): ... self.author = author ... self.headline = headline ... self.body = body ... ... def __repr__(self): ... return "BlogPost(%r, %r, %r)" % (self.headline, self.body, self.author) >>> class Keyword(Base): ... __tablename__ = 'keywords' ... ... id = Column(Integer, primary_key=True) ... keyword = Column(String(50), nullable=False, unique=True) ... ... def __init__(self, keyword): ... self.keyword = keyword Above, the many-to-many relationship is ``BlogPost.keywords``. The defining feature of a many-to-many relationship is the ``secondary`` keyword argument which references a :class:`~sqlalchemy.schema.Table` object representing the association table. This table only contains columns which reference the two sides of the relationship; if it has *any* other columns, such as its own primary key, or foreign keys to other tables, SQLAlchemy requires a different usage pattern called the "association object", described at :ref:`association_pattern`. We would also like our ``BlogPost`` class to have an ``author`` field. We will add this as another bidirectional relationship, except one issue we'll have is that a single user might have lots of blog posts. When we access ``User.posts``, we'd like to be able to filter results further so as not to load the entire collection. For this we use a setting accepted by :func:`~sqlalchemy.orm.relationship` called ``lazy='dynamic'``, which configures an alternate **loader strategy** on the attribute. To use it on the "reverse" side of a :func:`~sqlalchemy.orm.relationship`, we use the :func:`~sqlalchemy.orm.backref` function: .. sourcecode:: python+sql >>> from sqlalchemy.orm import backref >>> # "dynamic" loading relationship to User >>> BlogPost.author = relationship(User, backref=backref('posts', lazy='dynamic')) Create new tables: .. sourcecode:: python+sql {sql}>>> Base.metadata.create_all(engine) # doctest: +NORMALIZE_WHITESPACE PRAGMA table_info("users") () PRAGMA table_info("addresses") () PRAGMA table_info("posts") () PRAGMA table_info("keywords") () PRAGMA table_info("post_keywords") () CREATE TABLE posts ( id INTEGER NOT NULL, user_id INTEGER, headline VARCHAR(255) NOT NULL, body TEXT, PRIMARY KEY (id), FOREIGN KEY(user_id) REFERENCES users (id) ) () COMMIT CREATE TABLE keywords ( id INTEGER NOT NULL, keyword VARCHAR(50) NOT NULL, PRIMARY KEY (id), UNIQUE (keyword) ) () COMMIT CREATE TABLE post_keywords ( post_id INTEGER, keyword_id INTEGER, FOREIGN KEY(post_id) REFERENCES posts (id), FOREIGN KEY(keyword_id) REFERENCES keywords (id) ) () COMMIT Usage is not too different from what we've been doing. Let's give Wendy some blog posts: .. sourcecode:: python+sql {sql}>>> wendy = session.query(User).\ ... filter_by(name='wendy').\ ... one() #doctest: +NORMALIZE_WHITESPACE SELECT users.id AS users_id, users.name AS users_name, users.fullname AS users_fullname, users.password AS users_password FROM users WHERE users.name = ? ('wendy',) {stop} >>> post = BlogPost("Wendy's Blog Post", "This is a test", wendy) >>> session.add(post) We're storing keywords uniquely in the database, but we know that we don't have any yet, so we can just create them: .. sourcecode:: python+sql >>> post.keywords.append(Keyword('wendy')) >>> post.keywords.append(Keyword('firstpost')) We can now look up all blog posts with the keyword 'firstpost'. We'll use the ``any`` operator to locate "blog posts where any of its keywords has the keyword string 'firstpost'": .. sourcecode:: python+sql {sql}>>> session.query(BlogPost).\ ... filter(BlogPost.keywords.any(keyword='firstpost')).\ ... all() #doctest: +NORMALIZE_WHITESPACE INSERT INTO keywords (keyword) VALUES (?) ('wendy',) INSERT INTO keywords (keyword) VALUES (?) ('firstpost',) INSERT INTO posts (user_id, headline, body) VALUES (?, ?, ?) (2, "Wendy's Blog Post", 'This is a test') INSERT INTO post_keywords (post_id, keyword_id) VALUES (?, ?) ((1, 1), (1, 2)) SELECT posts.id AS posts_id, posts.user_id AS posts_user_id, posts.headline AS posts_headline, posts.body AS posts_body FROM posts WHERE EXISTS (SELECT 1 FROM post_keywords, keywords WHERE posts.id = post_keywords.post_id AND keywords.id = post_keywords.keyword_id AND keywords.keyword = ?) ('firstpost',) {stop}[BlogPost("Wendy's Blog Post", 'This is a test', )] If we want to look up posts owned by the user ``wendy``, we can tell the query to narrow down to that ``User`` object as a parent: .. sourcecode:: python+sql {sql}>>> session.query(BlogPost).\ ... filter(BlogPost.author==wendy).\ ... filter(BlogPost.keywords.any(keyword='firstpost')).\ ... all() #doctest: +NORMALIZE_WHITESPACE SELECT posts.id AS posts_id, posts.user_id AS posts_user_id, posts.headline AS posts_headline, posts.body AS posts_body FROM posts WHERE ? = posts.user_id AND (EXISTS (SELECT 1 FROM post_keywords, keywords WHERE posts.id = post_keywords.post_id AND keywords.id = post_keywords.keyword_id AND keywords.keyword = ?)) (2, 'firstpost') {stop}[BlogPost("Wendy's Blog Post", 'This is a test', )] Or we can use Wendy's own ``posts`` relationship, which is a "dynamic" relationship, to query straight from there: .. sourcecode:: python+sql {sql}>>> wendy.posts.\ ... filter(BlogPost.keywords.any(keyword='firstpost')).\ ... all() #doctest: +NORMALIZE_WHITESPACE SELECT posts.id AS posts_id, posts.user_id AS posts_user_id, posts.headline AS posts_headline, posts.body AS posts_body FROM posts WHERE ? = posts.user_id AND (EXISTS (SELECT 1 FROM post_keywords, keywords WHERE posts.id = post_keywords.post_id AND keywords.id = post_keywords.keyword_id AND keywords.keyword = ?)) (2, 'firstpost') {stop}[BlogPost("Wendy's Blog Post", 'This is a test', )] Further Reference ================== Query Reference: :ref:`query_api_toplevel` Mapper Reference: :ref:`mapper_config_toplevel` Relationship Reference: :ref:`relationship_config_toplevel` Session Reference: :doc:`/orm/session` SQLAlchemy-0.8.4/doc/build/requirements.txt0000644000076500000240000000000512251150015021344 0ustar classicstaff00000000000000mako SQLAlchemy-0.8.4/doc/build/sqla_arch_small.png0000644000076500000240000012335312251147171021740 0ustar classicstaff00000000000000PNG  IHDRMVMiCCPICC ProfilexYy8߷{&spdvyscL) Q"D(J%IRIdH2s9k>Z{X522f ,<&XJ(-PDGZ[[|v3m[@F}}|X'2**"`=3G!Dm/lc_co QS|;DLAHё 7;TlR߿ lJM Eׯ„ԄPm߰"_9B>|@~vlK{[ZZQFFB֑1zygdyJR% /`ٶyKT|':HDAINtðo)(t{,f!s@Ƃ9~ D!u8@?"-qH[4CzD }"L_~" {4 2_ATdE{k̿~FQvV9hEZV4+HwUкhm:Ҧ $b9퇵ǕD$9"ۿo+pzl@bx!!HT -l! ¡XhBP)t.AסN>z CWhF憅aXօa{Ip&Uy/1(:+%RA飬P(T**FUP>3jƢh Z SCϡ[gqzCpa$0jS3&cj1W1=)w,ˊ*cM.`^l{;]p8 & Gp'qqq)*  /<+M8M:M1M-A4BjV5Sx^%&| ~NΆ.."=q5AOp# u;%"(L!cz](qL/MoJK~~AA!2#-0>#1:02L#YHy} Iɐɗ).E }ry,l̜|1 NG2,c(VaVSP!;tw8i6N66?flvCr99l89Nspq2ssppp₹ĹlrUss-spsGr=ʣSsg̫[{#K P) |\|&||gm;7   t , '(JVHE(PPЊp56S$F7DQm=UŰb*b!bĞeD)IUp*a)TTԸ4t5yAW2}25ʋȗ?W *)WhSX)o#d]ي]?f˕UUUTbTTv)Ũ}QRQoPӨјפjӢhyiUjiiS| =;'wUoE_M?Y !aQQт^;&s&æܦ> fffs;RQ]fzc)dny XZzk-b ڦfVvmӮ}kQX.GG7z'B1gd..A.m8WGZ݆rStrrqOpqӓy jE.{z{/[;W_ӿ&@3Xlv`q\~PibIpEJUH]VShsMWpYc{߳eU EG0#XqZqeqH ≇?$%݋볷k߾}ɺgR3OKç=JM/Lўɝ9q@c}VTpzvA):yh37Alnq<\TpHG ':ZD))v''bOX3x/9 ̼ }*779orV{<4vs`>&'LeN?~5}qǩO6>>ϋ_yaj1jqkRݷߺG}XYe_=ַa#GOo¶"Q_wR|Cr$wxwNKIW D!xJMaap4~t#-+))Jڱ}S+%,%﹀`[5WYde-ZګuhkԚױսf`X鲹E +M NiGk #eg1ٷZT]tqؘ́8xDɤ{s;S?)}# CWr*s./?paEǓcNxt,5-S/mEignuv;]%wû{{{]W|?C~G~rgϭ_ WF_zwcf'(ɯS/oɞ ans|K^t3|:~ď韟7mm!Aͣ;1X;4 ^|-]!Io M#me=XXPgpPB҈K<*R8sBIJ9^KNNLs\[L'LEo@0èxdtkAc)b%c`dd`/ >v=c,ۻg?*N`YP|Md(.}Q{@ԫ1ct9%QYvZW&P.:w"2uB5Kj볷jy57'߽PӔ{` erϕW3[} ^^kqVGcRnq4R+C ߛÁ <}ȳW߼~?2rՇ̽]]z}l=4!4i98],姣s/ſd,L|XyUaƆя͠?>PvhuLV kI5s `zC>M,άb#-8Qu^f,%@%Q61m<ɋR?ervW|4BPU3QدYդݨsZ@/E?PHؘxƤYخ(+%u6)jvC9:;;.>>q &yz-7PBކ9"r#M`܍J֊s=6!N1n1)!$Q$q"})&Q!"q33c@֗|̽z<5z o/&h,IJ0! c!m*x@yr` p @L $䏳 DD3JJ+H]p|?Q(ST$ՉEzpt9z #e9nqQ 4"44n%FF{vq]"` TM"q>}=!339ezTE VvK;lwUpqpSxxx)|Zx!3aAHdLK$ eB\^XAh^~AѺYyOKB [TJ.QQ#^ s _<%}?vOs3WqDtl'OQNoUW4Ֆ;{޵IL V|A~GKڛuGOs սTx}cTmT(f+k'7n@Ex02FdL 0A8a1B%@EP 0V}H~^@Pa^ZmDY$+AroX)lǍsÕPh|iihUhhlx?e:=]-&U5H֛Aчq@gJ$'e"h6<]]\9<|<=R~kA&BcD:Dj+$%ˤNK4ɶ=UXVdPT6WPU+Pox9MQ+5077i4l.oNJzMA9'vWEyjd@pBpZ(sXcAxTZ`l|l@}Se$G9fy\Կ\r3նgykh=oDn~{rUkm;joe-z?գ7^zBjH+7Ӿo4>1}9u~c!rqb۝+k`=`OͲ?G#; pi2B~e )e#P?/|n'Q$Ϡqh t4=FV{f˄Ǿq͸uE)U=DUɄ`4o0D0l0Htv ONcsdXn孠h/<,2+sIKzJ*3-%pbk%AH[j$ -AC:KzCVF&VC>KVS6NΛGD;=hF_'lJn.{+ gF2krHI9yգ"rc|n>җՅ1_.Z/~w_\Yu_î]X]_8CόMͮ-mG+ %R B?nm- #\e!?nmmTmmF ?;V"{y6L~{/QD4 IDATx]|=)/)$zE HU(E:JUzA(MAA 6D -$@z߽&,d!ʝݝrf;wfm AA@3)@A@4TAA@ !bRA@TA@ !bRA@TA@ !bRA@TA@ {#tGvv7oF2ek/!~#o&,YO x/lt9dddagg*9}GP/y?L\&MDhhzԭ ՜) #̞=Gw-R-) @3Ʀ@8 9*)x% "*bT E?gv/*mA@ C* ݈Q%57 PzzjYYYɐjm;88q54ι[,  p!`bk WѰaCTXQaǎCr̙QvRJ(Wϼ=z+DTTTޭϣq8}t^Ɇ +hϖJ~C 0(_<^uhbjma?Bc7oF3k},C}9A@Q!M>G{4Xt) `2ܲe /^Fid;qDtUTjժ`rpBZVDkLN8ҥK^K$p~!)) .x}hٲ% OOO   ..N#KDn˼yPBm`sQ̘1x`'OĮ]Eɬs`l8oNNNI*A^G_[`0g5Mrܹ=6n܈ݻkdQre]V#)SҥKZ5l/lڴ >>>rJ\|jR 5bZ7|̓/չpBTVM#_U#h 'LիWv`#""4͗_|f/;6MJHcB\6qL4 uYf^zٳ/.cĿ^x=Pe@iN9  F-H("?0LY[bmw֊cv~X`:&kl|/OʦR&{Lۧɚ!de0i-_\}ׯ 3A}'Xf &O;u:uhlfeĨHzڴi橗ͺsفysɮիFIٜqb似X*<l.hp%A@ D-N-,"U^O\*y۴i͉kkĬ1r*=ΣlXkU6/5M,6cm:u*-[ yP-%炀 ŋ!RUbj+[6WlYmΔ7DdwBbO{jZ k& E@n&>Hd/{Oeg%&UKaTeLe)Rfo4D6d T#F`ժU>}:u///u[;ޑ煕V%Ygo֭[kse&@V4BU:Ysoꓣ @"«_m%u1MUFL*lBeX&;w⡇‘#G0l0PݻwkNOњ,{sZ6)wllZ^n65ŃYfzok&nEN.Oo弬]55rȹ 1Zl$ (WG HwK=z@8e˖ڜ!2YcGdYT) &;dmET8^YeI =xxΑ;*5o\cag홽v&,ִs|,Y[72˄Ȥ̚/:xkZ-˚%/9Z5_lП/J0DF~ U)?&Qf(=ty{ސf.J$  `bN⏽.|O7bcb4ZF- v'TE{2y+B㐵r$NA@07n}۷o׶ԝ7oLՏ5Y)U]zmZQB7{PyK=!@[ QtgΝ֯5Z5}.VOnT z⊭*5kg|r. 4GV+j<+jIFd4DEj>UQzuPOA@gHϞ=Ǽ4UKm0j!RU;*beד*kl-5?$^AF"U1KBe 7Dy=1~Y8lF_  YS_3KoA[}{ -)h*W4UTZ XA@<4՛0C:UE(z;<" @QiK)MUq:fa[U!RURfuzb{A@&S)s JGCsF@ڬUHU  ܎( ָ֩*zW( ܮXⳛא]P`ԏ) #A@/-F, .\ i,*(A@9R]jY\  wYlU[ yzRQA@0DFשM{Xi  X,u)A,D = i`"vצ}-__Gk?BdY 801, Oom.Ɛ~ ;":لy~t%:3jMĐe bfJ8ZCIn?;M־ևWq"19|!hw|uʹJm]jknQssD}G-F#1kKuџ6a‚8o SQKC%ozӠMމظ `?TǰG=8t>REapeѝ%|B:k).op۴-ÃU;JC><+ GΜM)Q8}r66#љN_-#APA#99`>Z~}] NɈ:~SYؐjp.L|~&b]Bl`MC8)i0 ˿{+=!MըE dT}zo\ժ7@'fIwٻ3*.OŜPr$L}GH˰ q%54JN‚QR=\,;7,@Hci迿^pht<}i> VsV&DC&кukn>;5RqB"2OOQ`^Ar4ݤSXc8~<6h~ ."21݆ 2iU]ZݧvG}ཷ^D5-H͢bڙ[=`#ibdN˸QV15aӦ_Mi5."9)G8m mYH|jmMWc]bX0!s_M3a۪S2ºEfh^m|u7G.]?C!nOSg'aָ #}Y8Ba;:?/?=w riZl =4H l7H'v cg w!%A̜:'/qb zg //=0} ;k$b0rڧTB9c_ۀgfu-^ŁD'Ƶ1ri 7 ہ3.%`ܜ㙹xmT[39D]ײw'LA+9™=܏N \fs>m8G{oy^Z0Ip {v6xs5ɉ^” 9ԣ9\ol2~?&4=4Sqpu ,GC= 8;+Zj2c2fn / p`RI)/vbz )^ɾLdCoǬHJDbmcD׎f<)|3> C m`zP5\aFg2`& DF$[u."65/Uu  АMn> ' fgG"Pس@rPy:Z󅭃=R݁^qhWѓd5tFm)A"֩%݇CPt9wޏ!LQ)x Ly$]aUҲ fg^9 ^ B6eࡳZ7):%?~K嚢&{>gƣ[>V&MZH.j#M_ȯH=Y1d40EizL?Gܪ p++1#A+HV܈Js&a#XV; t!M^V๐PK!6") TLؑh6&љJ's9 Ɵߎ#ϭzmKdƙaS&K[j꘿uBaM8I/*21SXm(W0۠bDUtr×F`/2SuSyn0Q^ؽ4^ FoEUqt+s6^<0lP] mAfNe<ܪ"ܜSFԆӱYV>*@1aZ8^:5@d&Y-, !e`ID4ZZ<FOpL4DˁI?c.GCƞJH1җh"v#:5^]0Z1Gp&<\i:@_vW0NՆ oWr}$K˗q% :{!u] 9ʹZ4BBPHӆ&/F#1DEܩKsa e1WCz<ϜEes ?YkRiߕOQgt ּԑ4غzlu}b]Q t "Sƌd…;.4vMV_jtZ {v'u9IKn |'U/G2:,S^0i$TT ^^^\\_ "B F /IRwG1/ `MxH?U֋0ÙF+֬G &@LAye;#\y2ZjF6p:iyyqgr)BZp ER*i;i3;_2YR.0arC*J1"8XCDJ4kNH'sT?kkb_ yu~mH>HS(n$ dA&ԧfe:"Q:ӆ)?Qu0zn4(B ʢ>Q`䃘vL\4hP'yeʉ p خ\"kʙ PV9u旍Өem$:. V8Kz1MPu _ߵ<{-fγx`e-eˢ/N#TD7ym 0tҜku;xտ|h6nי7AMGgKݟxiɧHY]4jLFKtN^&4f{G8YٸnioutLd&0[- 2n 3V{PleFr|{iMKhzeJλ[CS{۫edujw!k5_mtb b#/@E%ND0DِKTbLA@(aS5D%,T' VuUߊ0 %uHըS QA@n+ Q۪e"  PL"բs-]Qꖴ  Pw"jBB 3z:gٍZl yzAA@[!Ru|A@,2"UL]Ue>) @IfYQ9¼ |}MY=z(227[(ׂ  #GϻYשZ*+˿+Ҕ\rA@n vܩ};Ձ?@ĢⳛҐjĄB~lm׮6n܈۷k2%  P·h"4lw12Y#,J<]|Ȅj2:`5>۷GZꗣ  P"xaǛ6m+jļyLq:UC)Beuɓѣ~lݺ̟#   LprrjʕAuA=깍3btH 'U1 kI,8 \|yndq)333\`+$ 'zJlvss?4$UrCZ:UV:::j"ON!Re+WjN\ BLL09%4X 2RA@&SĤ$**T9-s%fU2"ՂЊTY[eW9"T*RB 59  ,K,X*՛r"!R-+VjIj *iy[HZA@AГ"H"=2ZTepYl HU%ȕUSD˂O*2UG_ @QPEz>bNʫ.bzRʅV&Rc2UJz"՟s9A@"'D=)be^RܤW_e$"U#^OJJ%N8BFJ EI"QuTxzR1i*W׊@^  !xӨs> 9p\~Ŗݲiyի*( p"h Ν; bAz*"uZS(  8ysC_^OF kqFʒ4 @dFtYTA@kx7Ph{ jHA@A.F`ɒ%rJ-4DF M w j‚9U#T H  w;HUSe/`vZRsؼysk#χ}!wy?]0p@xxx:&0Ft222ԥZ#y>C y?-_ww9<T-3˵  \Cҵr& ! 2/ !"&A@RA@j䂀 @~懌  ED@HIrA@A ?TCFA@""`hB~PeA~Pd~׶_OjxG~ AAAҬdA~P՛D'LoM4?P`hTHA@AA`=z4 ljMA@AȆxNMA@{웍MM7D줢S+TI  ܃"Uw#MA@`bkhI♞euttZDVVg6dHZmW81A@n?Zl i~~~5TChذ!*V°cdff!5sLԨQC_R%+W'VܣGXyy,OHDEE:<7nӧOYlذ6ly$=~y0N?6(I9.A@PTp*S~g ,}HO?W_}|#:&e˖g_?ժUC׮]q!… y$W&[&'N#"n8,k$wD :;vĖ-[pzqIM{/AA@bCVs||)YzuM;w.X KKKHƍѽ{wTP+Wڵk)SҥKZ5l/lڴ >>>hРV\˗/[MʻaFHII15J/^΅ j$ߨQ#A &`ը]f_?cDD2O8_|EIرcڤt<6lVk܌/,fRAYvx0Z<0WN rA@0a-HD `Yd6Vh&LzKOqCDp3nM65O6L;Yܐ#oyΝf"3iZ:RoV^&"4њ<󌙴i-?oNLL4ћ˖-kӧO7ٙ>l%Ef"O34KHDM!7z!s~D5G N"B3q"7ř$D_~\fM3gΘ|m&sH?[sժUDO>DLd{]r! ps={LN iFyMmڴѴvځLY%҂ <_JYSV9/DZ7ydtDlyZb5f>|8x4BٳGF۷oqif[j[t)~a94p@޽ѷo_m5FY[gMM6g0yMblW^?p4h&͛<,#MhriN:G5kA@C^Fϧh^O M,ZϛΛ7[̠&s2Y͸LLLLpYcb\&&%6yh*:k2ȟtLL\˩$ix^U0ɚɒ2&z{X̬I2Ye>.6k}cK̲ܜ( `0aM`T *{ZKi}&4E*ZU$y)oeS.k _$ @`'pKI{uMI"[+A^@z2,k]Ze;f]6ӷִhkqEZZ+j֧Z>Wr% PX{ƭG] CJFT\\F-GdS69[ke\WqZ%OYK?|?H%zֹVu^羨"U^OrE&"s Vʱqk#K׾KK={"pϽe{Zl e]+#p?&Vl~JC@=p'RkO)K$|KVōw;"wu4ܫ1j5DF֩*BUD$~_-o-o>+5nUNNZY^GvbzT>W?*yɗzn 킶zo[Qmw`QɈד%{;^R`#J[#/ &,Xm*LoS/Q[' D=ߊ2o%x;ZR +ueaKG o7dmg 5Imo}vmw,DNOTbۯHtpF~JCUG&UۧnV#}-7Y<4UYleIW?/apw Oѿ 9K瞿֩KVTR=t"E'z1DOi1WZ*TF!M>ӾjY"TuT2+wes zRx;/ V t݊sTcQ *{=Rfuzbm~yS&Hy}hDbg#(I ~RjdgANkTU@ڬUH {ܧ_k?g%' 1= 6bd!>64x <`{c"Lr}~ q$ P"0giYl-4kI 4V>J{P~N иq#Ԥ;5jTBcȼ 7F:uP^=ԫQ Y=I~ Î#Q_\mhڰh]e!yKSl YWPT" h--U  ܫ3}%N+eԏ|.@W=ph~sQ ѽyc1lm@Po\9Q{;'NL\<Ԭ̙113"˄kʰv߲t&JT:nY|+9$^]?|~7#!MUi'U%=~Фo{yУcԮZ5wuS:̌ ~gnu &WfY:^`lĆ 1`2͓+Zh{tYk xuPxV^V,i#`{pj,5*GbȜ719YQ0{V}jªo!),:5*#(R3HΊ /^j<-I7>' .#'d`׺w٧8Lմ'_5Q52 ^R2|3'?oF#/^ XobymNR C3 l˘D שu{bAuxxAQ;naRIf:VM}>>b+IZl6}#P)$"0d`՞X;g6ʏ.ŘߺNf/3$6n1qh{CubWLW#&?S!ahkcǧH3iؗ|TȿxcyvBWaȳdz$FMX׏?߅x 3/ FxxQ8\/s!:UH2A -6aa <cĮlؖwm6p&M uEؐ)5A+O: =G6wƨ[#! ZKAS1ggTs@P e 7g'8P5i%g Tnp,'aLnLho}~c||疿Аu)a,o: )" *nշZdQ-? m#_Q1˷řLlW~kwҷ~D~B+翐@TI" EX2["59ka>td|SS.ASJh 3;83V6`o2)WMG3+ 1Ϸ.VN͋25*ɖ pQÙ>Ori67]KP}[<_$@!j4NՐ_%2mѠ9wD5YmُS}HLKw&sȢB$j[,˽nnF qCeb6 专z m̚@kwl>DIKh'^ѭD ~k…ICp;vpϱN߇{LE>{0#-1nW/\Bz)|x ڕ;}JZA:~a}eR<#q_nh6)98V㳯 2hSi^p,u"vܝeh-GI(Iҏa^FVߒSZ'VD3 _N?C ?@4UzZr%ZVTSxm$\h1˺@S2~޴ njC'ѡj ̙I9_7cg xͤi3.kcyXcӟcԴ0s֥=Lۢy;/s/Erm͘t iԲLDE͚qZ|#~Hǜ!!۞P#sϩnĐ>o:G]~oL{Ft}t^2:;8r`+T }&D2z5aD4X:6Y)6}E`TT]0/EWs>Zg7{9'Oem;xy+kgЧbddJDweWy.18Sz·1y ?K3^Fw>DƘa 6, 'ln=f_/z99WRir56ycNjGf;Ңۯ !!ƌ)<ZG-:if:Y+ict MZ6GҸc &/8c:(p ۈ;W}?^*/ku&څe/bQ?K>W[bκ$HH:&qDisMH y6oZR4e?6 H6>.8}32Ba䇙? NG/mTEwOӦ3 Åx4.ޮغuƯĜ`GY==R Tn|fvu@mqO~.i?VXi~p j7<>0/ h;>Gԅ8QC@sMy[?!#U^Gk45s#rR`",} bƆyxm'_"#x 2VՐ~918 !6 _m}x`*5oy4"5}7?;˱-"&,6`=E$bD8פ'/ؠt|Γ~y^ԟ_Q"<^Sʻ M (U&^WS2{g8@)8ۘ\jNnK˵{&JIc(VAluȓ^}u:EL(IJbrJ6xSxPeKKbOTɎTO9x گi-E6p ^UKB/ɄT\ pIƷ^@l9n">~sg~G` L;/!֬ ^eǴ*K2}`ª|IInVeēixS@ְ஭ɨ4J6N0õ\ K_5Kr} >̝kq1#솪 O_m7Rs' uB VuٍSbpf[(}6|+B<>:79=X^-Q쇘BM6*NI/R1iV\K{sUyTl)h7R= fгScxjm瓨sj vlUoB>h {x <_e?/._uѨ멙 S0YKDt} sy}rƋUZ'{086&/^_ߕwOc]laͷ+Ј:`sѧA8X[<;|^-oz˚9ˎ4TtjfvgHLLogAѢ/Z.䣈u'ѝ*Q!R5T\`eׁkxd4\??~ aI oq󄳣ʵCƉh>4<;3ًa%q~= ߞiS -0#/_6s ݝg2R.3ⱦ_GdK 4||a\5) =6DϏbL+gq- oҕf'[퐞Lc89; gitNA DGzf4WrvAIӈ-ʵ `@*E^67DK9?2B OOoB7us4Ӹ*i\ӠV R(|xyTmɣ ̤ABGx>U$-ʤ2g=K Zl (Nm^`֑\-2ބg.W:NN.7)Ol=`B'HV&962;=uld؝ RA&")Y3?CXX_, gUK bUm1"jwl:s[[e~r @x/||kZu'#:|WAơ͘=% X+6¨֨*UMq 'D2ӓѨ,ځ2"= `NDgz5 Or]3ZOx}A6o F a(2'eϼGK[a~-;N>L 56 %uz5Y2[@j9geЩX: tiWCda)zp (COU]KFƔaVz&2.a˟f`Ex3yYm EkU}g 5wpE; S}g(l~x_uܛX<ei{?fu,>/)c1RFELגwWZm=%#a]17q#*ykbkTz=.h} ~lb L%@۵i'B琒m RQ&~z?w)?h()4u_73dPJ72.My|_-h_lq/-rhw6fZ.Qq1}h h>ڟ%.zy 66u'㾊HKyNf>mPvc6ju>xBM>sqLd>/qu^R:+__g|KWy|<=+B/b`ZBǚÇMG/͗H>ú!4'ِ#!LLu햄rwBN_7ѳ3} AUEdGY_u׍}Gf9D`O`.N@˵sl9~pb==`g¾E1YĪ=_~a:o@˸xNDљ6I#Bx6 KDTe&gDKr 4SE4OykKH˻n-<BQُМF^ !9܈̯BJCkrND墴駰vsxq6^8|\sLAs=QR0-ѷrR޸4OaB#!'bi@rÆwW=37<̘:^fLXO7w.AO#pL3^hحᣵX#5Lv}l{>uuf<9}3`pFY]5Ue ĝ :`x }W`›Ѓ3h ҏ0jCmpnp_ *(ґWy D0z*v H_'ԛ. hcx6t*c^ʐL5srQ eC2zDƴAhݸ<ۣᰦA9m.'}3ubٰZKšm`C;=6^=)3Sp<0 v-ۗ}`C˰!%Q'w!@ł@Arvy @;nm6Eu2*tyZ{ObbkTIK&5ٴ -(|Ѿ{g9%)]Px5:I4{un@[ߣC9.q;w"RWZN.L_\7Deh\ `\ h#^•JZ]y;)4Gh 4gL/G7밷66^,^u ŝP\q2‘>%`nBM23 kڑm-42* D} DLNshIܘ m]3G%qTz.xSgLs\6;8B9. -u* Ʌ~B[6 ݝ΄J8$\ASZfФz5\92p9J)"{ !!zTZVAduWT\,"VTĎmſXET#;ͽMgS|g̙B3wfZ'o%=f&`G~VmB᎕x誧LaLY a~~; v❂fI8CtJ  8ju)hjU%\gVՇ=oAi^f?N_J 5~J#G L9tHp͘{鉸9YPc8" G; ^gUvG v@^ ҇N2CVxC HjC@PS 琨Ƥ)%x=N@Z 5qgS#U+3j K7LaYd{pZ|}KzJ3LW43*|5=kFd1Ϫ gq}sC ,Yc:"E )%X:ju)hl+KP뀒u<}(o*o8hx`o>W?(-׮@S׬^$}=@p$N{=14\J}H3T'!fx$X2{9}hML53v^{)` P_Yoq /xL]fh*3y,C%QOL%',`& y |n^z=7i,"N{=yܦ?PJ@ -z r%P~p$N{=Iq~tt4ۚk2?賉z ͜-|f9п}L`y=G{&N@hrDD"#e(ƫW޽fSuAI`ժUhݺ$352kA8GV=LP8YБSnnT^*W_}R3$gfw3.:j>e֊|^d=עE,vL1-gt ~sߑ:$ƦO</2.\2ⶺ"FJJ[ ,W9gdI'>)`#r?o< :ƾ@MT/V께4iRSָm,m<(((@LL - +V୷uf6VXXr|fmV9@2A4g{Ϸʃ=; )o&fΜiٷA!-- 񈋋tZm;wqHJJ3=0Xǥ;vħ~:ԺBĔM%%%(**tϞ=عs'vZ|]vY౵ (ŴgϞʲhPET)Vf^k>74|M3˂=7ehXE}իe(l`Q\Yae|:w>ȫo]In#YeѩS'&*01kd1=KZv_y'BŚg6m #ܘyMCJWQQܲY4¦79^l|֢{ʉs$SiETAa'[k"4*Ncb)*om"[|ς|T!k>7I '4܋Rx?Α7N 3k7MXDSitHUjly+T  y)b~qc!o<&2wMy!6w>S=;5{n#nLi}Mf߼?*.p˵~:= f^qs嗿!K||IfV'TvI>-g}Oym;Jg/v&o^^ [HT|FB oy;0?:nðpEiIc}VLcMgT<|]3PG~$_Ȑ~ȑ> St-u@! |铣lbDD)ܗr_٩Vu$Nz=I$)Kb$YHPHV_J|?;)d:q;gؾNY^Yh8 7}:S盀=I)k}?Iϐ1}co.'=HTz0%-D vQH OyL泐_򖱕w*g[wke˖Y@qLc ƋM=z@~0qDk5(IWMcΦ_' 41ށ[xRD9CG;Ny}hB=d羄)-O^d}o[Sy}[z H~I_W>S[z;pzxSo$t:TjJ-[6)SX 8$Lk霾G'|ٳg#;;BC30|t)W6m5+bn<\y/Yc|TEXyduSA)%Tevs ?j_l4رc|rL}ʻrMxOؽ{i-22 <[D+Oł6, tu Ο|.nE;]v%vȄ;kꚟ4I[G'(%ĸЗdzb^ohwm 8#8orhol\*~.n<=\g]3K6+>~ <<BѧF_3Kb3^<ă~ [G\ćVMӊcY.} ϺO%kŹ߿+ڲy xn8mۼ{Qe5ҁ^s-8mu$N{=ySJ eh KmF2\Qfh'w1=t?1V??~R~ 7^=;^;w/_zL$_|^k2^[Ŧ)igaʋǛfek݈Y܈F_݌wGqpSB,|y|q+,wͽ\-vb^Ob{EʡĦh{|3,>+?y ; L>'aX'7>>Z|xUCKCvRYJ =>{Q'Ϙ4aat$'~}l4>gty2܋c`v"zC49CSm+S{.s~؏z., 8UeCnߚA|^u36ܮq7 ?&,ӓvA_#;=qnIGĎHP8_8w& %YZbJoي!/ȋ)#q1n䌙;B{b# %(؟b~ [S0Zs_8mu$-,}P@8dG9;#шVg"G1 +$R؋mF61Mp⪌SgFXk/Fn;ccs1s]شDXQTa.| uUB\q6's0{pZƝb|i' %— &z=7=~ٖm _0_W9f,{~،ҊB|,+8};dي}Lz\wvQ[pb<T-߈xܫ1-(.KaofܢtGv1g7X@Exػ`7v͜l^%Ne[ֽz5&>9M>[q^OJ@ 13 %{*U]1Ř^YHgp1ZY~6}3\lKI1w2̙(S7w:0"Rn)W߀LADE1~&&5: ԋq ^]kn,VG >*NۿqcLUcx$wtgӗxb8nm>}I˯)CF<_Jۋ+否q|l[ۄkX9$,F (#ĘIS.]ŨtUah nd6Ι"; +MС}&b0AT}N­e) 8ŨL6Ÿx1غѱhӮ2[#2"cox GHemf\UŘ`'3&Ȇm\&T{G"+ٍڥ-_88w|FEaǞ"q_1!)Zlц/]Wㅏ0kqI>*iYh.uƵ:|{!M>=sRk![lϟ9@IC (`%dwJB[~45˨HקHħd[tk\hdTTMԕL[Ӳmg"yw;9жku`SM@DaHjo ۟"16T+jC1j~ N]=R1gV ęw``<5V<׽;@:w}+ξ9ãex+ѱ3kHSS݃cŸ!MF,ijc:.:U!%{ײbEDF>u:Sh-ɵqC}௥ tϋ4T TTS= uBgQ}jxplxw/gҔl'6[GS'KP-Bb'TfGGo`ջPXV4' y,ůyظk'*>搦?=E<q$!7O4T%h9l#8PwOf~ .NK@tMy}xx{D. w̭zne3Ox 蟖GuĊ*0R_ߓ?jWJ@ 4+uoбch3omp63f߼_t44bb̟noI!fŽJÈν0oYGnjFkl-qS(^:f/l]~6dHSem;٭>稦S}J@ (TaM8g}=niƘnPRڴ 2Dun;483[d٫305A$ Mܩ{D6;͕#Ow^g*]c{󇡝VuӇNWO,Dp./SMEj5z4}*Fu3Z8PsbHTz:I֧+%1ǎ±4ˆuXN o0.ĸi`ꤓ6!Vf׾qˌi8wDęi +Mg2 )pWģM'x_ }ԥgdj+y&Opס{?ad6ꄔK).w*גHM1M疫x,~Y鉨? j$E LVZ#6!5)^y/+vU 61هGJˈn4uL(A&N|Gד5J@ ( K+x*TEaQ^mpoqjX7O] :w:0)*)z"3A=# iCsi#Q=4IЧ*%V3 .V x^0d-iᢱG##/_lܘ1R(3{_q]8S뒡!e~gb̷@+J3^|moJTCGd蓕1&)f&(DG_H&Ax2 u 舌==?xe]R]q\l$T}:[Svnfz~; vsk@D8pb[Gi LF%@"b͸ZˆV|,9:L2e,|뜊 3/ū3zo_p.hcBH[t6)6[;"}|Ϙ0w퀼ƨ 8mu$N{=5{J@ 4gbeƣJ=l޺.nf3^3cs:v@FY]lLbT!ղڍ7]XS 362͸LwxZfG{!G89VAL eVNa(-;m;kA"z ۬a&?#WmCC|~S^o ^3L?;?t:1;jzF .PJ LSZ $a=~QL?r׮@SS7+iGͿ(%8оd"k%ʅ^Ga|N_0s1E 11֬Y=֝Mq9YBUm a/t/2z\ (ȉH"d8'oi^OΣW*%=Diã)RJ@ (%8U-J@ 4UE@&yRBwb^O"Ǖ.ߕw֭aTTUd5)#))zqiiiqݤ5%%ŚKx}':YБ: @<7NOONJ+wMXƜv6!!nV jJy7S~wrr%R()>֑:o$>%x˅#G څ˳-[`ƌ֭U[%knÆ SM&$ݯ_?-/Di#Q(%hT<Fw „ +`ݺu(//y&͑@Cq裏@ݻ[UfK+瀝YgefF>}_2P:mui'[CPJP *ids2d?裸{oMTP%9&@l~j{*Qo1Zy HٖZ8UD*%ĸRjNHMM5BYX)4<*Li'YLidȝ>yYfqbo3xŐ}7EA"jGT@T/UANƅTYb'%'ϕeeeV30Mv!INcNUx Yҗב_ə/3W/̥{W-D[PK Ѡ)7HZl)7$ّ7LȋV!o# ?rҘK >˵2#BJ_ykOʰ/n\JzDN ("@΍hth|hJMNl>0Oȗ> ;Q'CaJ1eKבxp09-DPKHO.*>-t2='[n"•8yQL)"Ry!r|[Gדg$PK@  ᡁcOC/;N_Ĕ\7zzx\9}{~?y9UlkJ_q%“lxqoS %vzxٞ#Q* ,@흅W)%CD5''y6ydL2q;u9Ao]3W>Z>;j>>z_p$7o:'p" Uh1sP8~Xg&畏P)מ#QI+%P xuzD (%j=PJ@ (*)IDATTT딀PJ@ C@E@zZ (%S)t^@x6cEESd=.ގ7žjSP0@X`zKV_ Jv}**MERQ!J>L/h!dyPܿvɓT@Oou-C"Ȓ%*]H97o1IUQm =W (TRl۷o?e˖a͚5֬jr}d1 %Sv_~\.]dy:Qz%<Ŵš"zc1bzꅘF0I ,,K, /9s`ላz?VQޣB4f*b9mۆ & kp)ȴ4KPC&-b…{0{lFGGU+4%X|A-//winM7݄,6aܹ1uTdffvva':?%ۨ]X)%%%2s Zj.'QFaڵϷ^*×I};?%WDUjl2[nM!ᖬ_Eaa%,""0QQޣB+Ja'502MZ`Yf+ 鳬7FPAE5 FY 4'*廪WB77ieoLYWQ Q @cMFDIoUJ^K_;*P{@C"T XE53]d5JӯRo2J (4"w%c`j%cj/DYEjzqPi aUQ LٿJM1(TTCOU!J@Tj)P$YƥVU}J D Ш$ jœ˺4JSQާB&1F&1iB7&I**%PQ]%P!zz!e%y+$5m/6^ˆ%iRC@Ss(%@1sۙث :vX]5%/,%]1:9-E{+m(7}Ǣ P!gΣw[#1ѷ (W}r-H RJ@ 0jڀ6fM31rn7^7G™s0<'QXU̽Vף8[#0pbٺO6nցʱe F1FF nw ;?\|V?,$3ٓJ@ j#dz GmU1K[ 49LLBdD63;eQ! 0Wm#:9WZ_v7oa,4Bc0H1 \9b?7\29 2p`PMmvr!&. #Faru&:7ۯ7_,{"xtk2Z7{\z奈xg~{W<͊@Y)\4Th.1}RCWcOVJ ZˀdQ\R#kp#{1p[wlEiT_Uv@ɪOҎ1c@gt>]sҭ$³.3+i*./Ŏc.H.R*:J@ 6iy>FOlw1"#{ 9(GiZ;2<2 9]¯qqcTvlي7>%'CQC)a16Z8<]Uw( Jl%BA\BH~qsM,k֪y*Gǧ(_'\w'qhl\g=6EM 8])8"QZC!?*>\ (P#o&hPJq7" )`DΨ\%fx.$Bj|,ދNEfja>/Jl9\HtyᴇVHOpݖ"R9J@ 8ħ5[t#;{xSoWg3\$g ;yٵ/9FBoii^uZ:<%P!A@E5$QP@@E5rAP!A@E5$QP@@E5rAP!A@E5$QP@@E5rAP!A@E5$QP@@E5rA@:%rIѴ)bvV!~m{ٗWEz%P>PJ@ 4jCJ L؛eשּׁU:$C>,ˑ֢j/IjcJ  qOGmbػwoZnVZ֭[ޣkճDEjzqJahiii^-ZDnQjRe/9O 06ƷN: /2.\N&- |W7oSTY+HyMhnOAu\h׮N9L6 ˖-'#..OK!o+Wŋ1|1ݻw/˸ӈjQQJ 𔗗[M%%%(**BAAك;w"//k׮7|cߵkf$B/ZBll,^z!++ iiiVm5)) HRh괦Pbza45J `!QemoxhXCAZ*Waj$a:{YVVWX{и6^B0[?]P4!M b(rkVRSd6^B ]X%"4>4Hڿ/Z,kXmTןCMQ!N@D7~*Z]H!K@h/",Re~?CMQa@(EӾ]P0I p"S4/*"*\G1NE1^%(tvR9k)@! B>}XS8զa(0 `zKUT@@q}**MERQaHz;h4ɇ=ގ7žjSP0PJ@ Mӈ(PJ@ (U-J@ (%TPaO@E5싀PJ@ ("T$5%{*a_PJ@ 4զ"(%@PQ "PJ6I G (%ž1e IENDB`SQLAlchemy-0.8.4/doc/build/static/0000755000076500000240000000000012251151573017366 5ustar classicstaff00000000000000SQLAlchemy-0.8.4/doc/build/static/docs.css0000644000076500000240000001764112251150015021027 0ustar classicstaff00000000000000/* global */ body { background-color: #FDFBFC; margin:38px; color:#333333; } a { font-weight:normal; text-decoration:none; } form { display:inline; } /* hyperlinks */ a:link, a:visited, a:active { /*color:#0000FF;*/ color: #990000; } a:hover { color: #FF0000; /*color:#700000;*/ text-decoration:underline; } /* paragraph links after sections. These aren't visible until hovering over the tag, then have a "reverse video" effect over the actual link */ a.headerlink { font-size: 0.8em; padding: 0 4px 0 4px; text-decoration: none; visibility: hidden; } h1:hover > a.headerlink, h2:hover > a.headerlink, h3:hover > a.headerlink, h4:hover > a.headerlink, h5:hover > a.headerlink, h6:hover > a.headerlink, dt:hover > a.headerlink { visibility: visible; } a.headerlink:hover { background-color: #990000; color: white; } /* Container setup */ #docs-container { max-width:1000px; } /* header/footer elements */ #docs-header h1 { font-size:20px; color: #222222; margin: 0; padding: 0; } #docs-header { font-family:Verdana,sans-serif; font-size:.9em; } #docs-top-navigation, #docs-bottom-navigation { font-family: Verdana, sans-serif; background-color: #FBFBEE; border: solid 1px #CCC; padding:10px; font-size:.8em; } #docs-top-navigation { margin:10px 0px 10px 0px; line-height:1.2em; } .docs-navigation-links { font-family:Verdana,sans-serif; } #docs-bottom-navigation { float:right; margin: 1em 0 1em 5px; } #docs-copyright { font-size:.85em; padding:5px 0px; } #docs-header h1, #docs-top-navigation h1, #docs-top-navigation h2 { font-family:Tahoma,Geneva,sans-serif; font-weight:normal; } #docs-top-navigation h2 { margin:16px 4px 7px 5px; font-size:1.6em; } #docs-search { float:right; } #docs-top-page-control { float:right; width:350px; } #docs-top-page-control ul { padding:0; margin:0; } #docs-top-page-control li { font-size:.9em; list-style-type:none; padding:1px 8px; } #docs-container .version-num { font-weight: bold; } /* content container, sidebar */ #docs-body-container { background-color:#EFEFEF; border: solid 1px #CCC; } #docs-body, #docs-sidebar { /*font-family: helvetica, arial, sans-serif; font-size:.9em;*/ font-family: Verdana, sans-serif; font-size:.85em; line-height:1.5em; } #docs-body { min-height: 700px; } #docs-sidebar > ul { font-size:.85em; } #docs-sidebar { float:left; width:212px; padding: 10px 0 0 15px; font-size:.85em; } #docs-sidebar h3, #docs-sidebar h4 { background-color: #DDDDDD; color: #222222; font-family: Verdana,sans-serif; font-size: 1.1em; font-weight: normal; margin: 10px 0 0 -15px; padding: 5px 10px 5px 10px; text-shadow: 1px 1px 0 white; width:210px; } #docs-sidebar h3 a, #docs-sidebar h4 a { color: #222222; } #docs-sidebar ul { margin: 10px 10px 10px 0px; padding: 0; list-style: none outside none; } #docs-sidebar ul ul { margin-bottom: 0; margin-top: 0; list-style: square outside none; margin-left: 20px; } #docs-body { background-color:#FFFFFF; padding:1px 10px 10px 10px; } #docs-body.withsidebar { margin: 0 0 0 230px; border-left:3px solid #DFDFDF; } #docs-body h1, #docs-body h2, #docs-body h3, #docs-body h4 { font-family:Helvetica, Arial, sans-serif; } #docs-body h1 { /* hide the

      for each content section. */ display:none; font-size:2.0em; } #docs-body h2 { font-size:1.8em; border-top:1px solid; /*border-bottom:1px solid;*/ padding-top:20px; } #sqlalchemy-documentation h2 { border-top:none; padding-top:0; } #docs-body h3 { font-size:1.4em; } /* SQL popup, code styles */ .highlight { background:none; } #docs-container pre { font-size:1.2em; } #docs-container .pre { font-size:1.1em; } #docs-container pre { background-color: #f0f0f0; border: solid 1px #ccc; box-shadow: 2px 2px 3px #DFDFDF; padding:10px; margin: 5px 0px 5px 0px; overflow:auto; line-height:1.3em; } .popup_sql, .show_sql { background-color: #FBFBEE; padding:5px 10px; margin:10px -5px; border:1px dashed; } /* the [SQL] links used to display SQL */ #docs-container .sql_link { font-weight:normal; font-family: arial, sans-serif; font-size:.9em; text-transform: uppercase; color:#990000; border:1px solid; padding:1px 2px 1px 2px; margin:0px 10px 0px 15px; float:right; line-height:1.2em; } #docs-container a.sql_link, #docs-container .sql_link { text-decoration: none; padding:1px 2px; } #docs-container a.sql_link:hover { text-decoration: none; color:#fff; border:1px solid #900; background-color: #900; } /* changeset stuff */ #docs-container a.changeset-link { font-size: 0.8em; padding: 0 4px 0 4px; text-decoration: none; } /* docutils-specific elements */ th.field-name { text-align:right; } div.note, div.warning, p.deprecated, div.topic, div.admonition { background-color:#EEFFEF; } div.faq { background-color: #EFEFEF; } div.faq ul { list-style: square outside none; } div.admonition, div.topic, .deprecated, .versionadded, .versionchanged { border:1px solid #CCCCCC; padding:5px 10px; font-size:.9em; margin-top:5px; box-shadow: 2px 2px 3px #DFDFDF; } /* grrr sphinx changing your document structures, removing classes.... */ .versionadded .versionmodified, .versionchanged .versionmodified, .deprecated .versionmodified, .versionadded > p:first-child > span:first-child, .versionchanged > p:first-child > span:first-child, .deprecated > p:first-child > span:first-child { background-color: #ECF0F3; color: #990000; font-style: italic; } div.inherited-member { border:1px solid #CCCCCC; padding:5px 5px; font-size:.9em; box-shadow: 2px 2px 3px #DFDFDF; } div.warning .admonition-title { color:#FF0000; } div.admonition .admonition-title, div.topic .topic-title { font-weight:bold; } .viewcode-back, .viewcode-link { float:right; } dl.function > dt, dl.attribute > dt, dl.classmethod > dt, dl.method > dt, dl.class > dt, dl.exception > dt { background-color:#F0F0F0; margin:25px -10px 10px 10px; padding: 0px 10px; } dl.glossary > dt { font-weight:bold; font-size:1.1em; padding-top:10px; } dt:target, span.highlight { background-color:#FBE54E; } a.headerlink { font-size: 0.8em; padding: 0 4px 0 4px; text-decoration: none; visibility: hidden; } h1:hover > a.headerlink, h2:hover > a.headerlink, h3:hover > a.headerlink, h4:hover > a.headerlink, h5:hover > a.headerlink, h6:hover > a.headerlink, dt:hover > a.headerlink { visibility: visible; } a.headerlink:hover { background-color: #00f; color: white; } .clearboth { clear:both; } tt.descname { background-color:transparent; font-size:1.2em; font-weight:bold; } tt.descclassname { background-color:transparent; } tt { background-color:#ECF0F3; padding:0 1px; } /* syntax highlighting overrides */ .k, .kn {color:#0908CE;} .o {color:#BF0005;} .go {color:#804049;} /* special "index page" sections with specific formatting */ div#sqlalchemy-documentation { font-size:.95em; } div#sqlalchemy-documentation em { font-style:normal; } div#sqlalchemy-documentation .rubric{ font-size:14px; background-color:#EEFFEF; padding:5px; border:1px solid #BFBFBF; } div#sqlalchemy-documentation a, div#sqlalchemy-documentation li { padding:5px 0px; } div#getting-started { border-bottom:1px solid; } div#sqlalchemy-documentation div#sqlalchemy-orm { float:left; width:48%; } div#sqlalchemy-documentation div#sqlalchemy-core { float:left; width:48%; margin:0; padding-left:10px; border-left:1px solid; } div#dialect-documentation { border-top:1px solid; /*clear:left;*/ } div .versionwarning, div .version-warning { font-size:12px; font-color:red; border:1px solid; padding:4px 4px; margin:8px 0px 2px 0px; background:#FFBBBB; } SQLAlchemy-0.8.4/doc/build/static/init.js0000644000076500000240000000036412251147171020671 0ustar classicstaff00000000000000 function initSQLPopups() { $('div.popup_sql').hide(); $('a.sql_link').click(function() { $(this).nextAll('div.popup_sql:first').toggle(); return false; }) } $(document).ready(function() { initSQLPopups(); }); SQLAlchemy-0.8.4/doc/build/templates/0000755000076500000240000000000012251151573020075 5ustar classicstaff00000000000000SQLAlchemy-0.8.4/doc/build/templates/genindex.mako0000644000076500000240000000350412251147171022550 0ustar classicstaff00000000000000<%inherit file="layout.mako"/> <%block name="show_title" filter="util.striptags"> ${_('Index')}

      ${_('Index')}

      % for i, (key, dummy) in enumerate(genindexentries): ${i != 0 and '| ' or ''}
      ${key} % endfor
      % for i, (key, entries) in enumerate(genindexentries):

      ${key}

      <% breakat = genindexcounts[i] // 2 numcols = 1 numitems = 0 %> % for entryname, (links, subitems) in entries:
      % if links: ${entryname|h} % for unknown, link in links[1:]: , [${i}] % endfor % else: ${entryname|h} % endif
      % if subitems:
      % for subentryname, subentrylinks in subitems:
      ${subentryname|h} % for j, (unknown, link) in enumerate(subentrylinks[1:]): [${j}] % endfor
      % endfor
      % endif <% numitems = numitems + 1 + len(subitems) %> % if numcols <2 and numitems > breakat: <% numcols = numcols + 1 %>
      % endif % endfor
      % endfor <%def name="sidebarrel()"> % if split_index:

      ${_('Index')}

      % for i, (key, dummy) in enumerate(genindexentries): ${i > 0 and '| ' or ''} ${key} % endfor

      ${_('Full index on one page')}

      % endif ${parent.sidebarrel()} SQLAlchemy-0.8.4/doc/build/templates/layout.mako0000644000076500000240000001424612251147171022271 0ustar classicstaff00000000000000## coding: utf-8 <%! local_script_files = [] default_css_files = [ '_static/pygments.css', '_static/docs.css', ] %> <%doc> Structural elements are all prefixed with "docs-" to prevent conflicts when the structure is integrated into the main site. docs-container -> docs-header -> docs-search docs-version-header docs-top-navigation docs-top-page-control docs-navigation-banner docs-body-container -> docs-sidebar docs-body docs-bottom-navigation docs-copyright <%inherit file="${context['base']}"/> <% withsidebar = bool(toc) and current_page_name != 'index' %> <%block name="head_title"> % if current_page_name != 'index': ${capture(self.show_title) | util.striptags} — % endif ${docstitle|h}
      <%block name="headers"> ${parent.headers()} % for scriptfile in script_files + self.attr.local_script_files: % endfor % if hasdoc('about'): % endif % if hasdoc('copyright'): % endif % if parents: % endif % if nexttopic: % endif % if prevtopic: % endif

      ${docstitle|h}

      Release: ${release} | Release Date: ${release_date} % if pdf_url: | Download PDF % endif
      ${docstitle|h} % if parents: % for parent in parents: » ${parent['title']} % endfor % endif % if current_page_name != 'index': » ${self.show_title()} % endif

      <%block name="show_title"> ${title}

      % if withsidebar:

      Table of Contents

      ${toc} % if prevtopic:

      Previous Topic

      ${prevtopic['title']}

      % endif % if nexttopic:

      Next Topic

      ${nexttopic['title']}

      % endif % if rtd:

      Project Versions

      % endif

      Quick Search

      % endif
      ${next.body()}
      SQLAlchemy-0.8.4/doc/build/templates/page.mako0000644000076500000240000000010312251147171021653 0ustar classicstaff00000000000000<%inherit file="layout.mako"/> ${body| util.strip_toplevel_anchors}SQLAlchemy-0.8.4/doc/build/templates/search.mako0000644000076500000240000000135512251147171022216 0ustar classicstaff00000000000000<%inherit file="layout.mako"/> <%! local_script_files = ['_static/searchtools.js'] %> <%block name="show_title"> ${_('Search')} <%block name="headers"> ${parent.headers()}

      Enter Search Terms:

      <%block name="footer"> ${parent.footer()} SQLAlchemy-0.8.4/doc/build/templates/static_base.mako0000644000076500000240000000161212251147171023226 0ustar classicstaff00000000000000 ${metatags and metatags or ''} <%block name="head_title"> </%block> <%block name="css"> % for cssfile in self.attr.default_css_files + css_files: % endfor <%block name="headers"/> ${next.body()} <%block name="footer"/> SQLAlchemy-0.8.4/doc/build/testdocs.py0000644000076500000240000000362012251150015020270 0ustar classicstaff00000000000000import sys sys.path = ['../../lib', './lib/'] + sys.path import os import re import doctest import sqlalchemy.util as util import sqlalchemy.log as salog import logging rootlogger = logging.getLogger('sqlalchemy.engine.base.Engine') class MyStream(object): def write(self, string): sys.stdout.write(string) sys.stdout.flush() def flush(self): pass handler = logging.StreamHandler(MyStream()) handler.setFormatter(logging.Formatter('%(message)s')) rootlogger.addHandler(handler) def teststring(s, name, globs=None, verbose=None, report=True, optionflags=0, extraglobs=None, raise_on_error=False, parser=doctest.DocTestParser()): from doctest import DebugRunner, DocTestRunner, master # Assemble the globals. if globs is None: globs = {} else: globs = globs.copy() if extraglobs is not None: globs.update(extraglobs) if raise_on_error: runner = DebugRunner(verbose=verbose, optionflags=optionflags) else: runner = DocTestRunner(verbose=verbose, optionflags=optionflags) test = parser.get_doctest(s, globs, name, name, 0) runner.run(test) if report: runner.summarize() if master is None: master = runner else: master.merge(runner) return runner.failures, runner.tries def replace_file(s, newfile): engine = r"'(sqlite|postgresql|mysql):///.*'" engine = re.compile(engine, re.MULTILINE) s, n = re.subn(engine, "'sqlite:///" + newfile + "'", s) if not n: raise ValueError("Couldn't find suitable create_engine call to replace '%s' in it" % oldfile) return s #for filename in 'orm/tutorial','core/tutorial',: for filename in 'core/tutorial',: filename = '%s.rst' % filename s = open(filename).read() #s = replace_file(s, ':memory:') s = re.sub(r'{(?:stop|sql|opensql)}', '', s) teststring(s, filename) SQLAlchemy-0.8.4/doc/build/texinputs/0000755000076500000240000000000012251151573020142 5ustar classicstaff00000000000000SQLAlchemy-0.8.4/doc/build/texinputs/Makefile0000644000076500000240000000345112251147171021604 0ustar classicstaff00000000000000# Makefile for Sphinx LaTeX output ALLDOCS = $(basename $(wildcard *.tex)) ALLPDF = $(addsuffix .pdf,$(ALLDOCS)) ALLDVI = $(addsuffix .dvi,$(ALLDOCS)) # Prefix for archive names ARCHIVEPRREFIX = # Additional LaTeX options LATEXOPTS = -interaction=nonstopmode all: $(ALLPDF) all-pdf: $(ALLPDF) all-dvi: $(ALLDVI) all-ps: all-dvi for f in *.dvi; do dvips $$f; done all-pdf-ja: $(wildcard *.tex) ebb $(wildcard *.pdf *.png *.gif *.jpeg) platex -kanji=utf8 $(LATEXOPTS) '$<' platex -kanji=utf8 $(LATEXOPTS) '$<' platex -kanji=utf8 $(LATEXOPTS) '$<' -mendex -U -f -d '$(basename $<).dic' -s python.ist '$(basename $<).idx' platex -kanji=utf8 $(LATEXOPTS) '$<' platex -kanji=utf8 $(LATEXOPTS) '$<' dvipdfmx '$(basename $<).dvi' zip: all-$(FMT) mkdir $(ARCHIVEPREFIX)docs-$(FMT) cp $(ALLPDF) $(ARCHIVEPREFIX)docs-$(FMT) zip -q -r -9 $(ARCHIVEPREFIX)docs-$(FMT).zip $(ARCHIVEPREFIX)docs-$(FMT) rm -r $(ARCHIVEPREFIX)docs-$(FMT) tar: all-$(FMT) mkdir $(ARCHIVEPREFIX)docs-$(FMT) cp $(ALLPDF) $(ARCHIVEPREFIX)docs-$(FMT) tar cf $(ARCHIVEPREFIX)docs-$(FMT).tar $(ARCHIVEPREFIX)docs-$(FMT) rm -r $(ARCHIVEPREFIX)docs-$(FMT) bz2: tar bzip2 -9 -k $(ARCHIVEPREFIX)docs-$(FMT).tar # The number of LaTeX runs is quite conservative, but I don't expect it # to get run often, so the little extra time won't hurt. %.dvi: %.tex -latex $(LATEXOPTS) '$<' -latex $(LATEXOPTS) '$<' -latex $(LATEXOPTS) '$<' -makeindex -s python.ist '$(basename $<).idx' -latex $(LATEXOPTS) '$<' -latex $(LATEXOPTS) '$<' %.pdf: %.tex -pdflatex $(LATEXOPTS) '$<' -pdflatex $(LATEXOPTS) '$<' -pdflatex $(LATEXOPTS) '$<' -makeindex -s python.ist '$(basename $<).idx' -pdflatex $(LATEXOPTS) '$<' -pdflatex $(LATEXOPTS) '$<' clean: rm -f *.dvi *.log *.ind *.aux *.toc *.syn *.idx *.out *.ilg *.pla .PHONY: all all-pdf all-dvi all-ps clean SQLAlchemy-0.8.4/doc/build/texinputs/sphinx.sty0000644000076500000240000003314712251147171022223 0ustar classicstaff00000000000000% % sphinx.sty % % Adapted from the old python.sty, mostly written by Fred Drake, % by Georg Brandl. % \NeedsTeXFormat{LaTeX2e}[1995/12/01] \ProvidesPackage{sphinx}[2010/01/15 LaTeX package (Sphinx markup)] \RequirePackage{textcomp} \RequirePackage{fancyhdr} \RequirePackage{fancybox} \RequirePackage{titlesec} \RequirePackage{tabulary} \RequirePackage{amsmath} % for \text \RequirePackage{makeidx} \RequirePackage{framed} \RequirePackage{color} % For highlighted code. \RequirePackage{fancyvrb} % For table captions. \RequirePackage{threeparttable} % Handle footnotes in tables. \RequirePackage{footnote} \makesavenoteenv{tabulary} % For floating figures in the text. \RequirePackage{wrapfig} % Separate paragraphs by space by default. \RequirePackage{parskip} % Redefine these colors to your liking in the preamble. \definecolor{TitleColor}{rgb}{0.126,0.263,0.361} \definecolor{InnerLinkColor}{rgb}{0.208,0.374,0.486} \definecolor{OuterLinkColor}{rgb}{0.216,0.439,0.388} % Redefine these colors to something not white if you want to have colored % background and border for code examples. \definecolor{VerbatimColor}{rgb}{1,1,1} \definecolor{VerbatimBorderColor}{rgb}{1,1,1} % Uncomment these two lines to ignore the paper size and make the page % size more like a typical published manual. %\renewcommand{\paperheight}{9in} %\renewcommand{\paperwidth}{8.5in} % typical squarish manual %\renewcommand{\paperwidth}{7in} % O'Reilly ``Programmming Python'' % For graphicx, check if we are compiling under latex or pdflatex. \ifx\pdftexversion\undefined \usepackage{graphicx} \else \usepackage[pdftex]{graphicx} \fi % for PDF output, use colors and maximal compression \newif\ifsphinxpdfoutput\sphinxpdfoutputfalse \ifx\pdfoutput\undefined\else\ifcase\pdfoutput \let\py@NormalColor\relax \let\py@TitleColor\relax \else \sphinxpdfoutputtrue \input{pdfcolor} \def\py@NormalColor{\color[rgb]{0.0,0.0,0.0}} \def\py@TitleColor{\color{TitleColor}} \pdfcompresslevel=9 \fi\fi % XeLaTeX can do colors, too \ifx\XeTeXrevision\undefined\else \def\py@NormalColor{\color[rgb]{0.0,0.0,0.0}} \def\py@TitleColor{\color{TitleColor}} \fi % Increase printable page size (copied from fullpage.sty) \topmargin 0pt \advance \topmargin by -\headheight \advance \topmargin by -\headsep % attempt to work a little better for A4 users \textheight \paperheight \advance\textheight by -2in \oddsidemargin 0pt \evensidemargin 0pt %\evensidemargin -.25in % for ``manual size'' documents \marginparwidth 0.5in \textwidth \paperwidth \advance\textwidth by -2in % Style parameters and macros used by most documents here \raggedbottom \sloppy \hbadness = 5000 % don't print trivial gripes \pagestyle{empty} % start this way; change for \pagenumbering{roman} % ToC & chapters % Use this to set the font family for headers and other decor: \newcommand{\py@HeaderFamily}{\sffamily\bfseries} % Redefine the 'normal' header/footer style when using "fancyhdr" package: \@ifundefined{fancyhf}{}{ % Use \pagestyle{normal} as the primary pagestyle for text. \fancypagestyle{normal}{ \fancyhf{} \fancyfoot[LE,RO]{{\py@HeaderFamily\thepage}} \fancyfoot[LO]{{\py@HeaderFamily\nouppercase{\rightmark}}} \fancyfoot[RE]{{\py@HeaderFamily\nouppercase{\leftmark}}} \fancyhead[LE,RO]{{\py@HeaderFamily \@title, \py@release}} \renewcommand{\headrulewidth}{0.4pt} \renewcommand{\footrulewidth}{0.4pt} } % Update the plain style so we get the page number & footer line, % but not a chapter or section title. This is to keep the first % page of a chapter and the blank page between chapters `clean.' \fancypagestyle{plain}{ \fancyhf{} \fancyfoot[LE,RO]{{\py@HeaderFamily\thepage}} \renewcommand{\headrulewidth}{0pt} \renewcommand{\footrulewidth}{0.4pt} } } % Some custom font markup commands. % \newcommand{\strong}[1]{{\bf #1}} \newcommand{\code}[1]{\texttt{#1}} \newcommand{\bfcode}[1]{\code{\bfseries#1}} \newcommand{\samp}[1]{`\code{#1}'} \newcommand{\email}[1]{\textsf{#1}} % Redefine the Verbatim environment to allow border and background colors. % The original environment is still used for verbatims within tables. \let\OriginalVerbatim=\Verbatim \let\endOriginalVerbatim=\endVerbatim % Play with vspace to be able to keep the indentation. \newlength\distancetoright \newlength\leftsidespace \def\mycolorbox#1{% \setlength\leftsidespace{\@totalleftmargin}% \setlength\distancetoright{\linewidth}% \advance\distancetoright -\@totalleftmargin % \noindent\hspace*{\@totalleftmargin}% \fcolorbox{VerbatimBorderColor}{VerbatimColor}{% \begin{minipage}{\distancetoright}% \noindent\hspace*{-\leftsidespace}% #1 \end{minipage}% }% } \def\FrameCommand{\mycolorbox} \renewcommand{\Verbatim}[1][1]{% \OriginalVerbatim[#1]% } \renewcommand{\endVerbatim}{% \endOriginalVerbatim% } % Index-entry generation support. % % Command to generate two index entries (using subentries) \newcommand{\indexii}[2]{\index{#1!#2}\index{#2!#1}} % And three entries (using only one level of subentries) \newcommand{\indexiii}[3]{\index{#1!#2 #3}\index{#2!#3, #1}\index{#3!#1 #2}} % And four (again, using only one level of subentries) \newcommand{\indexiv}[4]{ \index{#1!#2 #3 #4} \index{#2!#3 #4, #1} \index{#3!#4, #1 #2} \index{#4!#1 #2 #3} } % \moduleauthor{name}{email} \newcommand{\moduleauthor}[2]{} % \sectionauthor{name}{email} \newcommand{\sectionauthor}[2]{} % Augment the sectioning commands used to get our own font family in place, % and reset some internal data items: \titleformat{\section}{\Large\py@HeaderFamily}% {\py@TitleColor\thesection}{0.5em}{\py@TitleColor}{\py@NormalColor} \titleformat{\subsection}{\large\py@HeaderFamily}% {\py@TitleColor\thesubsection}{0.5em}{\py@TitleColor}{\py@NormalColor} \titleformat{\subsubsection}{\py@HeaderFamily}% {\py@TitleColor\thesubsubsection}{0.5em}{\py@TitleColor}{\py@NormalColor} \titleformat{\paragraph}{\large\py@HeaderFamily}% {\py@TitleColor}{0em}{\py@TitleColor}{\py@NormalColor} % {fulllineitems} is the main environment for object descriptions. % \newcommand{\py@itemnewline}[1]{% \@tempdima\linewidth% \advance\@tempdima \leftmargin\makebox[\@tempdima][l]{#1}% } \newenvironment{fulllineitems}{ \begin{list}{}{\labelwidth \leftmargin \labelsep 0pt \rightmargin 0pt \topsep -\parskip \partopsep \parskip \itemsep -\parsep \let\makelabel=\py@itemnewline} }{\end{list}} % \optional is used for ``[, arg]``, i.e. desc_optional nodes. \newcommand{\optional}[1]{% {\textnormal{\Large[}}{#1}\hspace{0.5mm}{\textnormal{\Large]}}} \newlength{\py@argswidth} \newcommand{\py@sigparams}[2]{% \parbox[t]{\py@argswidth}{#1\code{)}#2}} \newcommand{\pysigline}[1]{\item[#1]\nopagebreak} \newcommand{\pysiglinewithargsret}[3]{% \settowidth{\py@argswidth}{#1\code{(}}% \addtolength{\py@argswidth}{-2\py@argswidth}% \addtolength{\py@argswidth}{\linewidth}% \item[#1\code{(}\py@sigparams{#2}{#3}]} % This version is being checked in for the historical record; it shows % how I've managed to get some aspects of this to work. It will not % be used in practice, so a subsequent revision will change things % again. This version has problems, but shows how to do something % that proved more tedious than I'd expected, so I don't want to lose % the example completely. % \newcommand{\grammartoken}[1]{\texttt{#1}} \newenvironment{productionlist}[1][\@undefined]{ \def\optional##1{{\Large[}##1{\Large]}} \def\production##1##2{\hypertarget{grammar-token-##1}{}% \code{##1}&::=&\code{##2}\\} \def\productioncont##1{& &\code{##1}\\} \def\token##1{##1} \let\grammartoken=\token \parindent=2em \indent \begin{tabular}{lcl} }{% \end{tabular} } % Notices / Admonitions % \newlength{\py@noticelength} \newcommand{\py@heavybox}{ \setlength{\fboxrule}{1pt} \setlength{\fboxsep}{7pt} \setlength{\py@noticelength}{\linewidth} \addtolength{\py@noticelength}{-2\fboxsep} \addtolength{\py@noticelength}{-2\fboxrule} \setlength{\shadowsize}{3pt} \Sbox \minipage{\py@noticelength} } \newcommand{\py@endheavybox}{ \endminipage \endSbox \fbox{\TheSbox} } % Some are quite plain: \newcommand{\py@noticestart@note}{} \newcommand{\py@noticeend@note}{} \newcommand{\py@noticestart@hint}{} \newcommand{\py@noticeend@hint}{} \newcommand{\py@noticestart@important}{} \newcommand{\py@noticeend@important}{} \newcommand{\py@noticestart@tip}{} \newcommand{\py@noticeend@tip}{} % Others gets more visible distinction: \newcommand{\py@noticestart@warning}{\py@heavybox} \newcommand{\py@noticeend@warning}{\py@endheavybox} \newcommand{\py@noticestart@caution}{\py@heavybox} \newcommand{\py@noticeend@caution}{\py@endheavybox} \newcommand{\py@noticestart@attention}{\py@heavybox} \newcommand{\py@noticeend@attention}{\py@endheavybox} \newcommand{\py@noticestart@danger}{\py@heavybox} \newcommand{\py@noticeend@danger}{\py@endheavybox} \newcommand{\py@noticestart@error}{\py@heavybox} \newcommand{\py@noticeend@error}{\py@endheavybox} \newenvironment{notice}[2]{ \def\py@noticetype{#1} \csname py@noticestart@#1\endcsname \par\strong{#2} }{\csname py@noticeend@\py@noticetype\endcsname} % Allow the release number to be specified independently of the % \date{}. This allows the date to reflect the document's date and % release to specify the release that is documented. % \newcommand{\py@release}{} \newcommand{\version}{} \newcommand{\shortversion}{} \newcommand{\releaseinfo}{} \newcommand{\releasename}{Release} \newcommand{\release}[1]{% \renewcommand{\py@release}{\releasename\space\version}% \renewcommand{\version}{#1}} \newcommand{\setshortversion}[1]{% \renewcommand{\shortversion}{#1}} \newcommand{\setreleaseinfo}[1]{% \renewcommand{\releaseinfo}{#1}} % Allow specification of the author's address separately from the % author's name. This can be used to format them differently, which % is a good thing. % \newcommand{\py@authoraddress}{} \newcommand{\authoraddress}[1]{\renewcommand{\py@authoraddress}{#1}} % This sets up the fancy chapter headings that make the documents look % at least a little better than the usual LaTeX output. % \@ifundefined{ChTitleVar}{}{ \ChNameVar{\raggedleft\normalsize\py@HeaderFamily} \ChNumVar{\raggedleft \bfseries\Large\py@HeaderFamily} \ChTitleVar{\raggedleft \rm\Huge\py@HeaderFamily} % This creates chapter heads without the leading \vspace*{}: \def\@makechapterhead#1{% {\parindent \z@ \raggedright \normalfont \ifnum \c@secnumdepth >\m@ne \DOCH \fi \interlinepenalty\@M \DOTI{#1} } } } % Redefine description environment so that it is usable inside fulllineitems. % \renewcommand{\description}{% \list{}{\labelwidth\z@% \itemindent-\leftmargin% \labelsep5pt% \let\makelabel=\descriptionlabel}} % Definition lists; requested by AMK for HOWTO documents. Probably useful % elsewhere as well, so keep in in the general style support. % \newenvironment{definitions}{% \begin{description}% \def\term##1{\item[##1]\mbox{}\\*[0mm]} }{% \end{description}% } % Tell TeX about pathological hyphenation cases: \hyphenation{Base-HTTP-Re-quest-Hand-ler} % The following is stuff copied from docutils' latex writer. % \newcommand{\optionlistlabel}[1]{\bf #1 \hfill} \newenvironment{optionlist}[1] {\begin{list}{} {\setlength{\labelwidth}{#1} \setlength{\rightmargin}{1cm} \setlength{\leftmargin}{\rightmargin} \addtolength{\leftmargin}{\labelwidth} \addtolength{\leftmargin}{\labelsep} \renewcommand{\makelabel}{\optionlistlabel}} }{\end{list}} \newlength{\lineblockindentation} \setlength{\lineblockindentation}{2.5em} \newenvironment{lineblock}[1] {\begin{list}{} {\setlength{\partopsep}{\parskip} \addtolength{\partopsep}{\baselineskip} \topsep0pt\itemsep0.15\baselineskip\parsep0pt \leftmargin#1} \raggedright} {\end{list}} % Redefine includgraphics for avoiding images larger than the screen size % If the size is not specified. \let\py@Oldincludegraphics\includegraphics \newbox\image@box% \newdimen\image@width% \renewcommand\includegraphics[2][\@empty]{% \ifx#1\@empty% \setbox\image@box=\hbox{\py@Oldincludegraphics{#2}}% \image@width\wd\image@box% \ifdim \image@width>\linewidth% \setbox\image@box=\hbox{\py@Oldincludegraphics[width=\linewidth]{#2}}% \box\image@box% \else% \py@Oldincludegraphics{#2}% \fi% \else% \py@Oldincludegraphics[#1]{#2}% \fi% } % Fix the index and bibliography environments to add an entry to the Table of % Contents; this is much nicer than just having to jump to the end of the book % and flip around, especially with multiple indexes. % \let\py@OldTheindex=\theindex \renewcommand{\theindex}{ \cleardoublepage \phantomsection \py@OldTheindex \addcontentsline{toc}{chapter}{\indexname} } \let\py@OldThebibliography=\thebibliography \renewcommand{\thebibliography}[1]{ \cleardoublepage \phantomsection \py@OldThebibliography{1} \addcontentsline{toc}{chapter}{\bibname} } % Include hyperref last. \RequirePackage[colorlinks,breaklinks, linkcolor=InnerLinkColor,filecolor=OuterLinkColor, menucolor=OuterLinkColor,urlcolor=OuterLinkColor, citecolor=InnerLinkColor]{hyperref} % Fix anchor placement for figures with captions. % (Note: we don't use a package option here; instead, we give an explicit % \capstart for figures that actually have a caption.) \RequirePackage{hypcap} % From docutils.writers.latex2e \providecommand{\DUspan}[2]{% {% group ("span") to limit the scope of styling commands \@for\node@class@name:=#1\do{% \ifcsname docutilsrole\node@class@name\endcsname% \csname docutilsrole\node@class@name\endcsname% \fi% }% {#2}% node content }% close "span" } SQLAlchemy-0.8.4/doc/changelog/0000755000076500000240000000000012251151573016727 5ustar classicstaff00000000000000SQLAlchemy-0.8.4/doc/changelog/changelog_01.html0000644000076500000240000015307212251147456022061 0ustar classicstaff00000000000000 0.1 Changelog — SQLAlchemy 0.8 Documentation

      SQLAlchemy 0.8 Documentation

      Release: 0.8.4 | Release Date: December 8, 2013

      0.1 Changelog

      0.1.7

      Released: Fri May 05 2006
      • some fixes to topological sort algorithm(link)

      • added DISTINCT ON support to Postgres (just supply distinct=[col1,col2..])(link)

      • added __mod__ (% operator) to sql expressions(link)

      • “order_by” mapper property inherited from inheriting mapper(link)

      • fix to column type used when mapper UPDATES/DELETEs(link)

      • with convert_unicode=True, reflection was failing, has been fixed(link)

      • types types types! still werent working....have to use TypeDecorator again :((link)

      • mysql binary type converts array output to buffer, fixes PickleType(link)

      • fixed the attributes.py memory leak once and for all(link)

      • unittests are qualified based on the databases that support each one(link)

      • fixed bug where column defaults would clobber VALUES clause of insert objects(link)

      • fixed bug where table def w/ schema name would force engine connection(link)

      • fix for parenthesis to work correctly with subqueries in INSERT/UPDATE(link)

      • HistoryArraySet gets extend() method(link)

      • fixed lazyload support for other comparison operators besides =(link)

      • lazyload fix where two comparisons in the join condition point to the samem column(link)

      • added “construct_new” flag to mapper, will use __new__ to create instances instead of __init__ (standard in 0.2)(link)

      • added selectresults.py to SVN, missed it last time(link)

      • tweak to allow a many-to-many relationship from a table to itself via an association table(link)

      • small fix to “translate_row” function used by polymorphic example(link)

      • create_engine uses cgi.parse_qsl to read query string (out the window in 0.2)(link)

      • tweaks to CAST operator(link)

      • fixed function names LOCAL_TIME/LOCAL_TIMESTAMP -> LOCALTIME/LOCALTIMESTAMP(link)

      • fixed order of ORDER BY/HAVING in compile(link)

      0.1.6

      Released: Wed Apr 12 2006
      • support for MS-SQL added courtesy Rick Morrison, Runar Petursson(link)

      • the latest SQLSoup from J. Ellis(link)

      • ActiveMapper has preliminary support for inheritance (Jeff Watkins)(link)

      • added a “mods” system which allows pluggable modules that modify/augment core functionality, using the function “install_mods(*modnames)”.(link)

      • added the first “mod”, SelectResults, which modifies mapper selects to return generators that turn ranges into LIMIT/OFFSET queries (Jonas Borgstr?(link)

      • factored out querying capabilities of Mapper into a separate Query object which is Session-centric. this improves the performance of mapper.using(session) and makes other things possible.(link)

      • objectstore/Session refactored, the official way to save objects is now via the flush() method. The begin/commit functionality of Session is factored into LegacySession which is still established as the default behavior, until the 0.2 series.(link)

      • types system is bound to an engine at query compile time, not schema construction time. this simplifies the types system as well as the ProxyEngine.(link)

      • added ‘version_id’ keyword argument to mapper. this keyword should reference a Column object with type Integer, preferably non-nullable, which will be used on the mapped table to track version numbers. this number is incremented on each save operation and is specifed in the UPDATE/DELETE conditions so that it factors into the returned row count, which results in a ConcurrencyError if the value received is not the expected count.(link)

      • added ‘entity_name’ keyword argument to mapper. a mapper is now associated with a class via the class object as well as an optional entity_name parameter, which is a string defaulting to None. any number of primary mappers can be created for a class, qualified by the entity name. instances of those classes will issue all of their load and save operations through their entity_name-qualified mapper, and maintain separate a identity in the identity map for an otherwise equilvalent object.(link)

      • overhaul to the attributes system. code has been clarified, and also fixed to support proper polymorphic behavior on object attributes.(link)

      • added “for_update” flag to Select objects(link)

      • some fixes for backrefs(link)

      • fix for postgres1 DateTime type(link)

      • documentation pages mostly switched over to Markdown syntax(link)

      0.1.5

      Released: Mon Mar 27 2006
      • added SQLSession concept to SQLEngine. this object keeps track of retrieving a connection from the connection pool as well as an in-progress transaction. methods push_session() and pop_session() added to SQLEngine which push/pop a new SQLSession onto the engine, allowing operation upon a second connection “nested” within the previous one, allowing nested transactions. Other tricks are sure to come later regarding SQLSession.(link)

      • added nest_on argument to objectstore.Session. This is a single SQLEngine or list of engines for which push_session()/pop_session() will be called each time this Session becomes the active session (via objectstore.push_session() or equivalent). This allows a unit of work Session to take advantage of the nested transaction feature without explicitly calling push_session/pop_session on the engine.(link)

      • factored apart objectstore/unitofwork to separate “Session scoping” from “uow commit heavy lifting”(link)

      • added populate_instance() method to MapperExtension. allows an extension to modify the population of object attributes. this method can call the populate_instance() method on another mapper to proxy the attribute population from one mapper to another; some row translation logic is also built in to help with this.(link)

      • fixed Oracle8-compatibility “use_ansi” flag which converts JOINs to comparisons with the = and (+) operators, passes basic unittests(link)

      • tweaks to Oracle LIMIT/OFFSET support(link)

      • Oracle reflection uses ALL_** views instead of USER_** to get larger list of stuff to reflect from(link)

      • fixes to Oracle foreign key reflection(link)

        References: #105

      • objectstore.commit(obj1, obj2,...) adds an extra step to seek out private relations on properties and delete child objects, even though its not a global commit(link)

      • lots and lots of fixes to mappers which use inheritance, strengthened the concept of relations on a mapper being made towards the “local” table for that mapper, not the tables it inherits. allows more complex compositional patterns to work with lazy/eager loading.(link)

      • added support for mappers to inherit from others based on the same table, just specify the same table as that of both parent/child mapper.(link)

      • some minor speed improvements to the attributes system with regards to instantiating and populating new objects.(link)

      • fixed MySQL binary unit test(link)

      • INSERTs can receive clause elements as VALUES arguments, not just literal values(link)

      • support for calling multi-tokened functions, i.e. schema.mypkg.func()(link)

      • added J. Ellis’ SQLSoup module to extensions package(link)

      • added “polymorphic” examples illustrating methods to load multiple object types from one mapper, the second of which uses the new populate_instance() method. small improvements to mapper, UNION construct to help the examples along(link)

      • improvements/fixes to session.refresh()/session.expire() (which may have been called “invalidate” earlier..)(link)

      • added session.expunge() which totally removes an object from the current session(link)

      • added *args, **kwargs pass-thru to engine.transaction(func) allowing easier creation of transactionalizing decorator functions(link)

      • added iterator interface to ResultProxy: “for row in result:...”(link)

      • added assertion to tx = session.begin(); tx.rollback(); tx.begin(), i.e. cant use it after a rollback()(link)

      • added date conversion on bind parameter fix to SQLite enabling dates to work with pysqlite1(link)

      • improvements to subqueries to more intelligently construct their FROM clauses(link)

        References: #116

      • added PickleType to types.(link)

      • fixed two bugs with column labels with regards to bind parameters: bind param keynames they are now generated from a column “label” in all relevant cases to take advantage of excess-name-length rules, and checks for a peculiar collision against a column named the same as “tablename_colname” added(link)

      • major overhaul to unit of work documentation, other documentation sections.(link)

      • fixed attributes bug where if an object is committed, its lazy-loaded list got blown away if it hadnt been loaded(link)

      • added unique_connection() method to engine, connection pool to return a connection that is not part of the thread-local context or any current transaction(link)

      • added invalidate() function to pooled connection. will remove the connection from the pool. still need work for engines to auto-reconnect to a stale DB though.(link)

      • added distinct() function to column elements so you can do func.count(mycol.distinct())(link)

      • added “always_refresh” flag to Mapper, creates a mapper that will always refresh the attributes of objects it gets/selects from the DB, overwriting any changes made.(link)

      0.1.4

      Released: Mon Mar 13 2006
      • create_engine() now uses genericized parameters; host/hostname, db/dbname/database, password/passwd, etc. for all engine connections. makes engine URIs much more “universal”(link)

      • added support for SELECT statements embedded into a column clause, using the flag “scalar=True”(link)

      • another overhaul to EagerLoading when used in conjunction with mappers that inherit; improvements to eager loads figuring out their aliased queries correctly, also relations set up against a mapper with inherited mappers will create joins against the table that is specific to the mapper itself (i.e. and not any tables that are inherited/are further down the inheritance chain), this can be overridden by using custom primary/secondary joins.(link)

      • added J.Ellis patch to mapper.py so that selectone() throws an exception if query returns more than one object row, selectfirst() to not throw the exception. also adds selectfirst_by (synonymous with get_by) and selectone_by(link)

      • added onupdate parameter to Column, will exec SQL/python upon an update statement.Also adds “for_update=True” to all DefaultGenerator subclasses(link)

      • added support for Oracle table reflection contributed by Andrija Zaric; still some bugs to work out regarding composite primary keys/dictionary selection(link)

      • checked in an initial Firebird module, awaiting testing.(link)

      • added sql.ClauseParameters dictionary object as the result for compiled.get_params(), does late-typeprocessing of bind parameters so that the original values are easier to access(link)

      • more docs for indexes, column defaults, connection pooling, engine construction(link)

      • overhaul to the construction of the types system. uses a simpler inheritance pattern so that any of the generic types can be easily subclassed, with no need for TypeDecorator.(link)

      • added “convert_unicode=False” parameter to SQLEngine, will cause all String types to perform unicode encoding/decoding (makes Strings act like Unicodes)(link)

      • added ‘encoding=”utf8”’ parameter to engine. the given encoding will be used for all encode/decode calls within Unicode types as well as Strings when convert_unicode=True.(link)

      • improved support for mapping against UNIONs, added polymorph.py example to illustrate multi-class mapping against a UNION(link)

      • fix to SQLite LIMIT/OFFSET syntax(link)

      • fix to Oracle LIMIT syntax(link)

      • added backref() function, allows backreferences to have keyword arguments that will be passed to the backref.(link)

      • Sequences and ColumnDefault objects can do execute()/scalar() standalone(link)

      • SQL functions (i.e. func.foo()) can do execute()/scalar() standalone(link)

      • fix to SQL functions so that the ANSI-standard functions, i.e. current_timestamp etc., do not specify parenthesis. all other functions do.(link)

      • added settattr_clean and append_clean to SmartProperty, which set attributes without triggering a “dirty” event or any history. used as: myclass.prop1.setattr_clean(myobject, ‘hi’)(link)

      • improved support to column defaults when used by mappers; mappers will pull pre-executed defaults from statement’s executed bind parameters (pre-conversion) to populate them into a saved object’s attributes; if any PassiveDefaults have fired off, will instead post-fetch the row from the DB to populate the object.(link)

      • added ‘get_session().invalidate(*obj)’ method to objectstore, instances will refresh() themselves upon the next attribute access.(link)

      • improvements to SQL func calls including an “engine” keyword argument so they can be execute()d or scalar()ed standalone, also added func accessor to SQLEngine(link)

      • fix to MySQL4 custom table engines, i.e. TYPE instead of ENGINE(link)

      • slightly enhanced logging, includes timestamps and a somewhat configurable formatting system, in lieu of a full-blown logging system(link)

      • improvements to the ActiveMapper class from the TG gang, including many-to-many relationships(link)

      • added Double and TinyInt support to mysql(link)

      0.1.3

      Released: Thu Mar 02 2006
      • completed “post_update” feature, will add a second update statement before inserts and after deletes in order to reconcile a relationship without any dependencies being created; used when persisting two rows that are dependent on each other(link)

      • completed mapper.using(session) function, localized per-object Session functionality; objects can be declared and manipulated as local to any user-defined Session(link)

      • fix to Oracle “row_number over” clause with multiple tables(link)

      • mapper.get() was not selecting multiple-keyed objects if the mapper’s table was a join, such as in an inheritance relationship, this is fixed.(link)

      • overhaul to sql/schema packages so that the sql package can run all on its own, producing selects, inserts, etc. without any engine dependencies. builds upon new TableClause/ColumnClause lexical objects. Schema’s Table/Column objects are the “physical” subclasses of them. simplifies schema/sql relationship, extensions (like proxyengine), and speeds overall performance by a large margin. removes the entire getattr() behavior that plagued 0.1.1.(link)

      • refactoring of how the mapper “synchronizes” data between two objects into a separate module, works better with properties attached to a mapper that has an additional inheritance relationship to one of the related tables, also the same methodology used to synchronize parent/child objects now used by mapper to synchronize between inherited and inheriting mappers.(link)

      • made objectstore “check for out-of-identitymap” more aggressive, will perform the check when object attributes are modified or the object is deleted(link)

      • Index object fully implemented, can be constructed standalone, or via “index” and “unique” arguments on Columns.(link)

      • added “convert_unicode” flag to SQLEngine, will treat all String/CHAR types as Unicode types, with raw-byte/utf-8 translation on the bind parameter and result set side.(link)

      • postgres maintains a list of ANSI functions that must have no parenthesis so function calls with no arguments work consistently(link)

      • tables can be created with no engine specified. this will default their engine to a module-scoped “default engine” which is a ProxyEngine. this engine can be connected via the function “global_connect”.(link)

      • added “refresh(*obj)” method to objectstore / Session to reload the attributes of any set of objects from the database unconditionally(link)

      0.1.2

      Released: Fri Feb 24 2006
      • fixed a recursive call in schema that was somehow running 994 times then returning normally. broke nothing, slowed down everything. thanks to jpellerin for finding this.(link)

      0.1.1

      Released: Thu Feb 23 2006
      • small fix to Function class so that expressions with a func.foo() use the type of the Function object (i.e. the left side) as the type of the boolean expression, not the other side which is more of a moving target (changeset 1020).(link)

      • creating self-referring mappers with backrefs slightly easier (but still not that easy - changeset 1019)(link)

      • fixes to one-to-one mappings (changeset 1015)(link)

      • psycopg1 date/time issue with None fixed (changeset 1005)(link)

      • two issues related to postgres, which doesnt want to give you the “lastrowid” since oids are deprecated:

        • postgres database-side defaults that are on primary key cols do execute explicitly beforehand, even though thats not the idea of a PassiveDefault. this is because sequences on columns get reflected as PassiveDefaults, but need to be explicitly executed on a primary key col so we know what we just inserted.
        • if you did add a row that has a bunch of database-side defaults on it, and the PassiveDefault thing was working the old way, i.e. they just execute on the DB side, the “cant get the row back without an OID” exception that occurred also will not happen unless someone (usually the ORM) explicitly asks for it.
        (link)

      • fixed a glitch with engine.execute_compiled where it was making a second ResultProxy that just got thrown away.(link)

      • began to implement newer logic in object properities. you can now say myclass.attr.property, which will give you the PropertyLoader corresponding to that attribute, i.e. myclass.mapper.props[‘attr’](link)

      • eager loading has been internally overhauled to use aliases at all times. more complicated chains of eager loads can now be created without any need for explicit “use aliases”-type instructions. EagerLoader code is also much simpler now.(link)

      • a new somewhat experimental flag “use_update” added to relations, indicates that this relationship should be handled by a second UPDATE statement, either after a primary INSERT or before a primary DELETE. handles circular row dependencies.(link)

      • added exceptions module, all raised exceptions (except for some KeyError/AttributeError exceptions) descend from these classes.(link)

      • fix to date types with MySQL, returned timedelta converted to datetime.time(link)

      • two-phase objectstore.commit operations (i.e. begin/commit) now return a transactional object (SessionTrans), to more clearly indicate transaction boundaries.(link)

      • Index object with create/drop support added to schema(link)

      • fix to postgres, where it will explicitly pre-execute a PassiveDefault on a table if it is a primary key column, pursuant to the ongoing “we cant get inserted rows back from postgres” issue(link)

      • change to information_schema query that gets back postgres table defs, now uses explicit JOIN keyword, since one user had faster performance with 8.1(link)

      • fix to engine.process_defaults so it works correctly with a table that has different column name/column keys (changset 982)(link)

      • a column can only be attached to one table - this is now asserted(link)

      • postgres time types descend from Time type(link)

      • fix to alltests so that it runs types test (now named testtypes)(link)

      • fix to Join object so that it correctly exports its foreign keys (cs 973)(link)

      • creating relationships against mappers that use inheritance fixed (cs 973)(link)

      SQLAlchemy-0.8.4/doc/changelog/changelog_02.html0000644000076500000240000020465212251147456022063 0ustar classicstaff00000000000000 0.2 Changelog — SQLAlchemy 0.8 Documentation

      SQLAlchemy 0.8 Documentation

      Release: 0.8.4 | Release Date: December 8, 2013

      0.2 Changelog

      0.2.8

      Released: Tue Sep 05 2006
      • cleanup on connection methods + documentation. custom DBAPI arguments specified in query string, ‘connect_args’ argument to ‘create_engine’, or custom creation function via ‘creator’ function to ‘create_engine’.(link)

      • added “recycle” argument to Pool, is “pool_recycle” on create_engine, defaults to 3600 seconds; connections after this age will be closed and replaced with a new one, to handle db’s that automatically close stale connections(link)

        References: #274

      • changed “invalidate” semantics with pooled connection; will instruct the underlying connection record to reconnect the next time its called. “invalidate” will also automatically be called if any error is thrown in the underlying call to connection.cursor(). this will hopefully allow the connection pool to reconnect to a database that had been stopped and started without restarting the connecting application(link)

        References: #121

      • eesh ! the tutorial doctest was broken for quite some time.(link)

      • add_property() method on mapper does a “compile all mappers” step in case the given property references a non-compiled mapper (as it did in the case of the tutorial !)(link)

      • check for pg sequence already existing before create(link)

        References: #277

      • if a contextual session is established via MapperExtension.get_session (as it is using the sessioncontext plugin, etc), a lazy load operation will use that session by default if the parent object is not persistent with a session already.(link)

      • lazy loads will not fire off for an object that does not have a database identity (why? see http://www.sqlalchemy.org/trac/wiki/WhyDontForeignKeysLoadData)(link)

      • unit-of-work does a better check for “orphaned” objects that are part of a “delete-orphan” cascade, for certain conditions where the parent isnt available to cascade from.(link)

      • mappers can tell if one of their objects is an “orphan” based on interactions with the attribute package. this check is based on a status flag maintained for each relationship when objects are attached and detached from each other.(link)

      • it is now invalid to declare a self-referential relationship with “delete-orphan” (as the abovementioned check would make them impossible to save)(link)

      • improved the check for objects being part of a session when the unit of work seeks to flush() them as part of a relationship..(link)

      • statement execution supports using the same BindParam object more than once in an expression; simplified handling of positional parameters. nice job by Bill Noon figuring out the basic idea.(link)

        References: #280

      • postgres reflection moved to use pg_schema tables, can be overridden with use_information_schema=True argument to create_engine.(link)

        References: #60, #71

      • added case_sensitive argument to MetaData, Table, Column, determines itself automatically based on if a parent schemaitem has a non-None setting for the flag, or if not, then whether the identifier name is all lower case or not. when set to True, quoting is applied to identifiers with mixed or uppercase identifiers. quoting is also applied automatically in all cases to identifiers that are known to be reserved words or contain other non-standard characters. various database dialects can override all of this behavior, but currently they are all using the default behavior. tested with postgres, mysql, sqlite, oracle. needs more testing with firebird, ms-sql. part of the ongoing work with(link)

        References: #155

      • unit tests updated to run without any pysqlite installed; pool test uses a mock DBAPI(link)

      • urls support escaped characters in passwords(link)

        References: #281

      • added limit/offset to UNION queries (though not yet in oracle)(link)

      • added “timezone=True” flag to DateTime and Time types. postgres so far will convert this to “TIME[STAMP] (WITH|WITHOUT) TIME ZONE”, so that control over timezone presence is more controllable (psycopg2 returns datetimes with tzinfo’s if available, which can create confusion against datetimes that dont).(link)

      • fix to using query.count() with distinct, **kwargs with SelectResults count()(link)

        References: #287

      • deregister Table from MetaData when autoload fails;(link)

        References: #289

      • import of py2.5s sqlite3(link)

        References: #293

      • unicode fix for startswith()/endswith()(link)

        References: #296

      0.2.7

      Released: Sat Aug 12 2006
      • quoting facilities set up so that database-specific quoting can be turned on for individual table, schema, and column identifiers when used in all queries/creates/drops. Enabled via “quote=True” in Table or Column, as well as “quote_schema=True” in Table. Thanks to Aaron Spike for the excellent efforts.(link)

      • assignmapper was setting is_primary=True, causing all sorts of mayhem by not raising an error when redundant mappers were set up, fixed(link)

      • added allow_null_pks option to Mapper, allows rows where some primary key columns are null (i.e. when mapping to outer joins etc)(link)

      • modifcation to unitofwork to not maintain ordering within the “new” list or within the UOWTask “objects” list; instead, new objects are tagged with an ordering identifier as they are registered as new with the session, and the INSERT statements are then sorted within the mapper save_obj. the INSERT ordering has basically been pushed all the way to the end of the flush cycle. that way the various sorts and organizations occuring within UOWTask (particularly the circular task sort) dont have to worry about maintaining order (which they werent anyway)(link)

      • fixed reflection of foreign keys to autoload the referenced table if it was not loaded already(link)

        • pass URL query string arguments to connect() function
        (link)

        References: #256

        • oracle boolean type
        (link)

        References: #257

      • custom primary/secondary join conditions in a relation will be propagated to backrefs by default. specifying a backref() will override this behavior.(link)

      • better check for ambiguous join conditions in sql.Join; propagates to a better error message in PropertyLoader (i.e. relation()/backref()) for when the join condition can’t be reasonably determined.(link)

      • sqlite creates ForeignKeyConstraint objects properly upon table reflection.(link)

      • adjustments to pool stemming from changes made for. overflow counter should only be decremented if the connection actually succeeded. added a test script to attempt testing this.(link)

        References: #224

      • fixed mysql reflection of default values to be PassiveDefault(link)

      • added reflected ‘tinyint’, ‘mediumint’ type to MS-SQL.(link)

        References: #263, #264

      • SingletonThreadPool has a size and does a cleanup pass, so that only a given number of thread-local connections stay around (needed for sqlite applications that dispose of threads en masse)(link)

      • fixed small pickle bug(s) with lazy loaders(link)

        References: #267, #265

      • fixed possible error in mysql reflection where certain versions return an array instead of string for SHOW CREATE TABLE call(link)

      • fix to lazy loads when mapping to joins(link)

        References: #1770

      • all create()/drop() calls have a keyword argument of “connectable”. “engine” is deprecated.(link)

      • fixed ms-sql connect() to work with adodbapi(link)

      • added “nowait” flag to Select()(link)

      • inheritance check uses issubclass() instead of direct __mro__ check to make sure class A inherits from B, allowing mapper inheritance to more flexibly correspond to class inheritance(link)

        References: #271

      • SelectResults will use a subselect, when calling an aggregate (i.e. max, min, etc.) on a SelectResults that has an ORDER BY clause(link)

        References: #252

      • fixes to types so that database-specific types more easily used; fixes to mysql text types to work with this methodology(link)

        References: #269

      • some fixes to sqlite date type organization(link)

      • added MSTinyInteger to MS-SQL(link)

        References: #263

      0.2.6

      Released: Thu Jul 20 2006
      • big overhaul to schema to allow truly composite primary and foreign key constraints, via new ForeignKeyConstraint and PrimaryKeyConstraint objects. Existing methods of primary/foreign key creation have not been changed but use these new objects behind the scenes. table creation and reflection is now more table oriented rather than column oriented.(link)

        References: #76

      • overhaul to MapperExtension calling scheme, wasnt working very well previously(link)

      • tweaks to ActiveMapper, supports self-referential relationships(link)

      • slight rearrangement to objectstore (in activemapper/threadlocal) so that the SessionContext is referenced by ‘.context’ instead of subclassed directly.(link)

      • activemapper will use threadlocal’s objectstore if the mod is activated when activemapper is imported(link)

      • small fix to URL regexp to allow filenames with ‘@’ in them(link)

      • fixes to Session expunge/update/etc...needs more cleanup.(link)

      • select_table mappers still werent always compiling(link)

      • fixed up Boolean datatype(link)

      • added count()/count_by() to list of methods proxied by assignmapper; this also adds them to activemapper(link)

      • connection exceptions wrapped in DBAPIError(link)

      • ActiveMapper now supports autoloading column definitions from the database if you supply a __autoload__ = True attribute in your mapping inner-class. Currently this does not support reflecting any relationships.(link)

      • deferred column load could screw up the connection status in a flush() under some circumstances, this was fixed(link)

      • expunge() was not working with cascade, fixed.(link)

      • potential endless loop in cascading operations fixed.(link)

      • added “synonym()” function, applied to properties to have a propname the same as another, for the purposes of overriding props and allowing the original propname to be accessible in select_by().(link)

      • fix to typing in clause construction which specifically helps type issues with polymorphic_union (CAST/ColumnClause propagates its type to proxy columns)(link)

      • mapper compilation work ongoing, someday it’ll work....moved around the initialization of MapperProperty objects to be after all mappers are created to better handle circular compilations. do_init() method is called on all properties now which are more aware of their “inherited” status if so.(link)

      • eager loads explicitly disallowed on self-referential relationships, or relationships to an inheriting mapper (which is also self-referential)(link)

      • reduced bind param size in query._get to appease the picky oracle(link)

        References: #244

      • added ‘checkfirst’ argument to table.create()/table.drop(), as well as table.exists()(link)

        References: #234

      • some other ongoing fixes to inheritance(link)

        References: #245

      • attribute/backref/orphan/history-tracking tweaks as usual...(link)

      0.2.5

      Released: Sat Jul 08 2006
      • fixed endless loop bug in select_by(), if the traversal hit two mappers that referenced each other(link)

      • upgraded all unittests to insert ‘./lib/’ into sys.path, working around new setuptools PYTHONPATH-killing behavior(link)

      • further fixes with attributes/dependencies/etc....(link)

      • improved error handling for when DynamicMetaData is not connected(link)

      • MS-SQL support largely working (tested with pymssql)(link)

      • ordering of UPDATE and DELETE statements within groups is now in order of primary key values, for more deterministic ordering(link)

      • after_insert/delete/update mapper extensions now called per object, not per-object-per-table(link)

      • further fixes/refactorings to mapper compilation(link)

      0.2.4

      Released: Tue Jun 27 2006
      • try/except when the mapper sets init.__name__ on a mapped class, supports python 2.3(link)

      • fixed bug where threadlocal engine would still autocommit despite a transaction in progress(link)

      • lazy load and deferred load operations require the parent object to be in a Session to do the operation; whereas before the operation would just return a blank list or None, it now raises an exception.(link)

      • Session.update() is slightly more lenient if the session to which the given object was formerly attached to was garbage collected; otherwise still requires you explicitly remove the instance from the previous Session.(link)

      • fixes to mapper compilation, checking for more error conditions(link)

      • small fix to eager loading combined with ordering/limit/offset(link)

      • utterly remarkable: added a single space between ‘CREATE TABLE’ and ‘(<the rest of it>’ since thats how MySQL indicates a non- reserved word tablename.....(link)

        References: #206

      • more fixes to inheritance, related to many-to-many relations properly saving(link)

      • fixed bug when specifying explicit module to mysql dialect(link)

      • when QueuePool times out it raises a TimeoutError instead of erroneously making another connection(link)

      • Queue.Queue usage in pool has been replaced with a locally modified version (works in py2.3/2.4!) that uses a threading.RLock for a mutex. this is to fix a reported case where a ConnectionFairy’s __del__() method got called within the Queue’s get() method, which then returns its connection to the Queue via the put() method, causing a reentrant hang unless threading.RLock is used.(link)

      • postgres will not place SERIAL keyword on a primary key column if it has a foreign key constraint(link)

      • cursor() method on ConnectionFairy allows db-specific extension arguments to be propagated(link)

        References: #221

      • lazy load bind params properly propagate column type(link)

        References: #225

      • new MySQL types: MSEnum, MSTinyText, MSMediumText, MSLongText, etc. more support for MS-specific length/precision params in numeric types patch courtesy Mike Bernson(link)

      • some fixes to connection pool invalidate()(link)

        References: #224

      0.2.3

      Released: Sat Jun 17 2006
      • overhaul to mapper compilation to be deferred. this allows mappers to be constructed in any order, and their relationships to each other are compiled when the mappers are first used.(link)

      • fixed a pretty big speed bottleneck in cascading behavior particularly when backrefs were in use(link)

      • the attribute instrumentation module has been completely rewritten; its now a large degree simpler and clearer, slightly faster. the “history” of an attribute is no longer micromanaged with each change and is instead part of a “CommittedState” object created when the instance is first loaded. HistoryArraySet is gone, the behavior of list attributes is now more open ended (i.e. theyre not sets anymore).(link)

      • py2.4 “set” construct used internally, falls back to sets.Set when “set” not available/ordering is needed.(link)

      • fix to transaction control, so that repeated rollback() calls dont fail (was failing pretty badly when flush() would raise an exception in a larger try/except transaction block)(link)

      • “foreignkey” argument to relation() can also be a list. fixed auto-foreignkey detection(link)

        References: #151

      • fixed bug where tables with schema names werent getting indexed in the MetaData object properly(link)

      • fixed bug where Column with redefined “key” property wasnt getting type conversion happening in the ResultProxy(link)

        References: #207

      • fixed ‘port’ attribute of URL to be an integer if present(link)

      • fixed old bug where if a many-to-many table mapped as “secondary” had extra columns, delete operations didnt work(link)

      • bugfixes for mapping against UNION queries(link)

      • fixed incorrect exception class thrown when no DB driver present(link)

      • added NonExistentTable exception thrown when reflecting a table that doesnt exist(link)

        References: #138

      • small fix to ActiveMapper regarding one-to-one backrefs, other refactorings(link)

      • overridden constructor in mapped classes gets __name__ and __doc__ from the original class(link)

      • fixed small bug in selectresult.py regarding mapper extension(link)

        References: #200

      • small tweak to cascade_mappers, not very strongly supported function at the moment(link)

      • some fixes to between(), column.between() to propagate typing information better(link)

        References: #202

      • if an object fails to be constructed, is not added to the session(link)

        References: #203

      • CAST function has been made into its own clause object with its own compilation function in ansicompiler; allows MySQL to silently ignore most CAST calls since MySQL seems to only support the standard CAST syntax with Date types. MySQL-compatible CAST support for strings, ints, etc. a TODO(link)

      0.2.2

      Released: Mon Jun 05 2006
      • big improvements to polymorphic inheritance behavior, enabling it to work with adjacency list table structures(link)

        References: #190

      • major fixes and refactorings to inheritance relationships overall, more unit tests(link)

      • fixed “echo_pool” flag on create_engine()(link)

      • fix to docs, removed incorrect info that close() is unsafe to use with threadlocal strategy (its totally safe !)(link)

      • create_engine() can take URLs as string or unicode(link)

        References: #188

      • firebird support partially completed; thanks to James Ralston and Brad Clements for their efforts.(link)

      • Oracle url translation was broken, fixed, will feed host/port/sid into cx_oracle makedsn() if ‘database’ field is present, else uses straight TNS name from the ‘host’ field(link)

      • fix to using unicode criterion for query.get()/query.load()(link)

      • count() function on selectables now uses table primary key or first column instead of “1” for criterion, also uses label “rowcount” instead of “count”.(link)

      • got rudimental “mapping to multiple tables” functionality cleaned up, more correctly documented(link)

      • restored global_connect() function, attaches to a DynamicMetaData instance called “default_metadata”. leaving MetaData arg to Table out will use the default metadata.(link)

      • fixes to session cascade behavior, entity_name propigation(link)

      • reorganized unittests into subdirectories(link)

      • more fixes to threadlocal connection nesting patterns(link)

      0.2.1

      Released: Mon May 29 2006
      • “pool” argument to create_engine() properly propagates(link)

      • fixes to URL, raises exception if not parsed, does not pass blank fields along to the DB connect string (a string such as user:host@/db was breaking on postgres)(link)

      • small fixes to Mapper when it inserts and tries to get new primary key values back(link)

      • rewrote half of TLEngine, the ComposedSQLEngine used with ‘strategy=”threadlocal”’. it now properly implements engine.begin()/ engine.commit(), which nest fully with connection.begin()/trans.commit(). added about six unittests.(link)

      • major “duh” in pool.Pool, forgot to put back the WeakValueDictionary. unittest which was supposed to check for this was also silently missing it. fixed unittest to ensure that ConnectionFairy properly falls out of scope.(link)

      • placeholder dispose() method added to SingletonThreadPool, doesnt do anything yet(link)

      • rollback() is automatically called when an exception is raised, but only if theres no transaction in process (i.e. works more like autocommit).(link)

      • fixed exception raise in sqlite if no sqlite module present(link)

      • added extra example detail for association object doc(link)

      • Connection adds checks for already being closed(link)

      0.2.0

      Released: Sat May 27 2006
      • overhaul to Engine system so that what was formerly the SQLEngine is now a ComposedSQLEngine which consists of a variety of components, including a Dialect, ConnectionProvider, etc. This impacted all the db modules as well as Session and Mapper.(link)

      • create_engine now takes only RFC-1738-style strings: driver://user:password@host:port/database(link)

      • total rewrite of connection-scoping methodology, Connection objects can now execute clause elements directly, added explicit “close” as well as support throughout Engine/ORM to handle closing properly, no longer relying upon __del__ internally to return connections to the pool.(link)

        References: #152

      • overhaul to Session interface and scoping. uses hibernate-style methods, including query(class), save(), save_or_update(), etc. no threadlocal scope is installed by default. Provides a binding interface to specific Engines and/or Connections so that underlying Schema objects do not need to be bound to an Engine. Added a basic SessionTransaction object that can simplistically aggregate transactions across multiple engines.(link)

      • overhaul to mapper’s dependency and “cascade” behavior; dependency logic factored out of properties.py into a separate module “dependency.py”. “cascade” behavior is now explicitly controllable, proper implementation of “delete”, “delete-orphan”, etc. dependency system can now determine at flush time if a child object has a parent or not so that it makes better decisions on how that child should be updated in the DB with regards to deletes.(link)

      • overhaul to Schema to build upon MetaData object instead of an Engine. Entire SQL/Schema system can be used with no Engines whatsoever, executed solely by an explicit Connection object. the “bound” methodlogy exists via the BoundMetaData for schema objects. ProxyEngine is generally not needed anymore and is replaced by DynamicMetaData.(link)

      • true polymorphic behavior implemented, fixes(link)

        References: #167

      • “oid” system has been totally moved into compile-time behavior; if they are used in an order_by where they are not available, the order_by doesnt get compiled, fixes(link)

        References: #147

      • overhaul to packaging; “mapping” is now “orm”, “objectstore” is now “session”, the old “objectstore” namespace gets loaded in via the “threadlocal” mod if used(link)

      • mods now called in via “import <modname>”. extensions favored over mods as mods are globally-monkeypatching(link)

      • fix to add_property so that it propagates properties to inheriting mappers(link)

        References: #154

      • backrefs create themselves against primary mapper of its originating property, priamry/secondary join arguments can be specified to override. helps their usage with polymorphic mappers(link)

      • “table exists” function has been implemented(link)

        References: #31

      • “create_all/drop_all” added to MetaData object(link)

        References: #98

      • improvements and fixes to topological sort algorithm, as well as more unit tests(link)

      • tutorial page added to docs which also can be run with a custom doctest runner to ensure its properly working. docs generally overhauled to deal with new code patterns(link)

      • many more fixes, refactorings.(link)

      • migration guide is available on the Wiki at http://www.sqlalchemy.org/trac/wiki/02Migration(link)

      SQLAlchemy-0.8.4/doc/changelog/changelog_03.html0000644000076500000240000056047412251147457022074 0ustar classicstaff00000000000000 0.3 Changelog — SQLAlchemy 0.8 Documentation

      SQLAlchemy 0.8 Documentation

      Release: 0.8.4 | Release Date: December 8, 2013

      0.3 Changelog

      0.3.11

      Released: Sun Oct 14 2007

      orm

      • [orm] added a check for joining from A->B using join(), along two different m2m tables. this raises an error in 0.3 but is possible in 0.4 when aliases are used.(link)

        References: #687

      • [orm] fixed small exception throw bug in Session.merge()(link)

      • [orm] fixed bug where mapper, being linked to a join where one table had no PK columns, would not detect that the joined table had no PK.(link)

      • [orm] fixed bugs in determining proper sync clauses from custom inherit conditions(link)

        References: #769

      • [orm] backref remove object operation doesn’t fail if the other-side collection doesn’t contain the item, supports noload collections(link)

        References: #813

      engine

      • [engine] fixed another occasional race condition which could occur when using pool with threadlocal setting(link)

      sql

      • [sql] tweak DISTINCT precedence for clauses like func.count(t.c.col.distinct())(link)

      • [sql] Fixed detection of internal ‘$’ characters in :bind$params(link)

        References: #719

      • [sql] dont assume join criterion consists only of column objects(link)

        References: #768

      • [sql] adjusted operator precedence of NOT to match ‘==’ and others, so that ~(x==y) produces NOT (x=y), which is compatible with MySQL < 5.0 (doesn’t like “NOT x=y”)(link)

        References: #764

      mysql

      • [mysql] fixed specification of YEAR columns when generating schema(link)

      sqlite

      • [sqlite] passthrough for stringified dates(link)

      mssql

      • [mssql] added support for TIME columns (simulated using DATETIME)(link)

        References: #679

      • [mssql] added support for BIGINT, MONEY, SMALLMONEY, UNIQUEIDENTIFIER and SQL_VARIANT(link)

        References: #721

      • [mssql] index names are now quoted when dropping from reflected tables(link)

        References: #684

      • [mssql] can now specify a DSN for PyODBC, using a URI like mssql:///?dsn=bob(link)

      oracle

      • [oracle] removed LONG_STRING, LONG_BINARY from “binary” types, so type objects don’t try to read their values as LOB.(link)

        References: #622, #751

      firebird

      • [firebird] supports_sane_rowcount() set to False due to ticket #370 (right way).(link)

      • [firebird] fixed reflection of Column’s nullable property.(link)

      misc

      • [postgres] when reflecting tables from alternate schemas, the “default” placed upon the primary key, i.e. usually a sequence name, has the “schema” name unconditionally quoted, so that schema names which need quoting are fine. its slightly unnecessary for schema names which don’t need quoting but not harmful.(link)

      0.3.10

      Released: Fri Jul 20 2007

      general

      • [general] a new mutex that was added in 0.3.9 causes the pool_timeout feature to fail during a race condition; threads would raise TimeoutError immediately with no delay if many threads push the pool into overflow at the same time. this issue has been fixed.(link)

      orm

      • [orm] cleanup to connection-bound sessions, SessionTransaction(link)

      sql

      • [sql] got connection-bound metadata to work with implicit execution(link)

      • [sql] foreign key specs can have any chararcter in their identifiers(link)

        References: #667

      • [sql] added commutativity-awareness to binary clause comparisons to each other, improves ORM lazy load optimization(link)

        References: #664

      misc

      • [postgres] fixed max identifier length (63)(link)

        References: #571

      0.3.9

      Released: Sun Jul 15 2007

      general

      • [general] better error message for NoSuchColumnError(link)

        References: #607

      • [general] finally figured out how to get setuptools version in, available as sqlalchemy.__version__(link)

        References: #428

      • [general] the various “engine” arguments, such as “engine”, “connectable”, “engine_or_url”, “bind_to”, etc. are all present, but deprecated. they all get replaced by the single term “bind”. you also set the “bind” of MetaData using metadata.bind = <engine or connection>(link)

      orm

      • [orm] forwards-compatibility with 0.4: added one(), first(), and all() to Query. almost all Query functionality from 0.4 is present in 0.3.9 for forwards-compat purposes.(link)

      • [orm] reset_joinpoint() really really works this time, promise ! lets you re-join from the root: query.join([‘a’, ‘b’]).filter(<crit>).reset_joinpoint().join([‘a’, ‘c’]).filter(<some other crit>).all() in 0.4 all join() calls start from the “root”(link)

      • [orm] added synchronization to the mapper() construction step, to avoid thread collisions when pre-existing mappers are compiling in a different thread(link)

        References: #613

      • [orm] a warning is issued by Mapper when two primary key columns of the same name are munged into a single attribute. this happens frequently when mapping to joins (or inheritance).(link)

      • [orm] synonym() properties are fully supported by all Query joining/ with_parent operations(link)

        References: #598

      • [orm] fixed very stupid bug when deleting items with many-to-many uselist=False relations(link)

      • [orm] remember all that stuff about polymorphic_union ? for joined table inheritance ? Funny thing... You sort of don’t need it for joined table inheritance, you can just string all the tables together via outerjoin(). The UNION still applies if concrete tables are involved, though (since nothing to join them on).(link)

      • [orm] small fix to eager loading to better work with eager loads to polymorphic mappers that are using a straight “outerjoin” clause(link)

      sql

      • [sql] ForeignKey to a table in a schema thats not the default schema requires the schema to be explicit; i.e. ForeignKey(‘alt_schema.users.id’)(link)

      • [sql] MetaData can now be constructed with an engine or url as the first argument, just like BoundMetaData(link)

      • [sql] BoundMetaData is now deprecated, and MetaData is a direct substitute.(link)

      • [sql] DynamicMetaData has been renamed to ThreadLocalMetaData. the DynamicMetaData name is deprecated and is an alias for ThreadLocalMetaData or a regular MetaData if threadlocal=False(link)

      • [sql] composite primary key is represented as a non-keyed set to allow for composite keys consisting of cols with the same name; occurs within a Join. helps inheritance scenarios formulate correct PK.(link)

      • [sql] improved ability to get the “correct” and most minimal set of primary key columns from a join, equating foreign keys and otherwise equated columns. this is also mostly to help inheritance scenarios formulate the best choice of primary key columns.(link)

        References: #185

      • [sql] added ‘bind’ argument to Sequence.create()/drop(), ColumnDefault.execute()(link)

      • [sql] columns can be overridden in a reflected table with a “key” attribute different than the column’s name, including for primary key columns(link)

        References: #650

      • [sql] fixed “ambiguous column” result detection, when dupe col names exist in a result(link)

        References: #657

      • [sql] some enhancements to “column targeting”, the ability to match a column to a “corresponding” column in another selectable. this affects mostly ORM ability to map to complex joins(link)

      • [sql] MetaData and all SchemaItems are safe to use with pickle. slow table reflections can be dumped into a pickled file to be reused later. Just reconnect the engine to the metadata after unpickling.(link)

        References: #619

      • [sql] added a mutex to QueuePool’s “overflow” calculation to prevent a race condition that can bypass max_overflow(link)

      • [sql] fixed grouping of compound selects to give correct results. will break on sqlite in some cases, but those cases were producing incorrect results anyway, sqlite doesn’t support grouped compound selects(link)

        References: #623

      • [sql] fixed precedence of operators so that parenthesis are correctly applied(link)

        References: #620

      • [sql] calling <column>.in_() (i.e. with no arguments) will return “CASE WHEN (<column> IS NULL) THEN NULL ELSE 0 END = 1)”, so that NULL or False is returned in all cases, rather than throwing an error(link)

        References: #545

      • [sql] fixed “where”/”from” criterion of select() to accept a unicode string in addition to regular string - both convert to text()(link)

      • [sql] added standalone distinct() function in addition to column.distinct()(link)

        References: #558

      • [sql] result.last_inserted_ids() should return a list that is identically sized to the primary key constraint of the table. values that were “passively” created and not available via cursor.lastrowid will be None.(link)

      • [sql] long-identifier detection fixed to use > rather than >= for max ident length(link)

        References: #589

      • [sql] fixed bug where selectable.corresponding_column(selectable.c.col) would not return selectable.c.col, if the selectable is a join of a table and another join involving the same table. messed up ORM decision making(link)

        References: #593

      • [sql] added Interval type to types.py(link)

        References: #595

      mysql

      • [mysql] fixed catching of some errors that imply a dropped connection(link)

        References: #625

      • [mysql] fixed escaping of the modulo operator(link)

        References: #624

      • [mysql] added ‘fields’ to reserved words(link)

        References: #590

      • [mysql] various reflection enhancement/fixes(link)

      sqlite

      • [sqlite] rearranged dialect initialization so it has time to warn about pysqlite1 being too old.(link)

      • [sqlite] sqlite better handles datetime/date/time objects mixed and matched with various Date/Time/DateTime columns(link)

      • [sqlite] string PK column inserts dont get overwritten with OID(link)

        References: #603

      mssql

      • [mssql] fix port option handling for pyodbc(link)

        References: #634

      • [mssql] now able to reflect start and increment values for identity columns(link)

      • [mssql] preliminary support for using scope_identity() with pyodbc(link)

      oracle

      • [oracle] datetime fixes: got subsecond TIMESTAMP to work, added OracleDate which supports types.Date with only year/month/day(link)

        References: #604

      • [oracle] added dialect flag “auto_convert_lobs”, defaults to True; will cause any LOB objects detected in a result set to be forced into OracleBinary so that the LOB is read() automatically, if no typemap was present (i.e., if a textual execute() was issued).(link)

      • [oracle] mod operator ‘%’ produces MOD(link)

        References: #624

      • [oracle] converts cx_oracle datetime objects to Python datetime.datetime when Python 2.3 used(link)

        References: #542

      • [oracle] fixed unicode conversion in Oracle TEXT type(link)

      misc

      • [ext] iteration over dict association proxies is now dict-like, not InstrumentedList-like (e.g. over keys instead of values)(link)

      • [ext] association proxies no longer bind tightly to source collections, and are constructed with a thunk instead(link)

        References: #597

      • [ext] added selectone_by() to assignmapper(link)

      • [postgres] fixed escaping of the modulo operator(link)

        References: #624

      • [postgres] added support for reflection of domains(link)

        References: #570

      • [postgres] types which are missing during reflection resolve to Null type instead of raising an error(link)

      • [postgres] the fix in “schema” above fixes reflection of foreign keys from an alt-schema table to a public schema table(link)

      0.3.8

      Released: Sat Jun 02 2007

      orm

      • [orm] added reset_joinpoint() method to Query, moves the “join point” back to the starting mapper. 0.4 will change the behavior of join() to reset the “join point” in all cases so this is an interim method. for forwards compatibility, ensure joins across multiple relations are specified using a single join(), i.e. join([‘a’, ‘b’, ‘c’]).(link)

      • [orm] fixed bug in query.instances() that wouldnt handle more than on additional mapper or one additional column.(link)

      • [orm] “delete-orphan” no longer implies “delete”. ongoing effort to separate the behavior of these two operations.(link)

      • [orm] many-to-many relationships properly set the type of bind params for delete operations on the association table(link)

      • [orm] many-to-many relationships check that the number of rows deleted from the association table by a delete operation matches the expected results(link)

      • [orm] session.get() and session.load() propagate **kwargs through to query(link)

      • [orm] fix to polymorphic query which allows the original polymorphic_union to be embedded into a correlated subquery(link)

        References: #577

      • [orm] fix to select_by(<propname>=<object instance>) -style joins in conjunction with many-to-many relationships, bug introduced in r2556(link)

      • [orm] the “primary_key” argument to mapper() is propagated to the “polymorphic” mapper. primary key columns in this list get normalized to that of the mapper’s local table.(link)

      • [orm] restored logging of “lazy loading clause” under sa.orm.strategies logger, got removed in 0.3.7(link)

      • [orm] improved support for eagerloading of properties off of mappers that are mapped to select() statements; i.e. eagerloader is better at locating the correct selectable with which to attach its LEFT OUTER JOIN.(link)

      sql

      • [sql] _Label class overrides compare_self to return its ultimate object. meaning, if you say someexpr.label(‘foo’) == 5, it produces the correct “someexpr == 5”.(link)

      • [sql] _Label propagates “_hide_froms()” so that scalar selects behave more properly with regards to FROM clause #574(link)

      • [sql] fix to long name generation when using oid_column as an order by (oids used heavily in mapper queries)(link)

      • [sql] significant speed improvement to ResultProxy, pre-caches TypeEngine dialect implementations and saves on function calls per column(link)

      • [sql] parenthesis are applied to clauses via a new _Grouping construct. uses operator precedence to more intelligently apply parenthesis to clauses, provides cleaner nesting of clauses (doesnt mutate clauses placed in other clauses, i.e. no ‘parens’ flag)(link)

      • [sql] added ‘modifier’ keyword, works like func.<foo> except does not add parenthesis. e.g. select([modifier.DISTINCT(...)]) etc.(link)

      • [sql] removed “no group by’s in a select thats part of a UNION” restriction(link)

        References: #578

      mysql

      • [mysql] Nearly all MySQL column types are now supported for declaration and reflection. Added NCHAR, NVARCHAR, VARBINARY, TINYBLOB, LONGBLOB, YEAR(link)

      • [mysql] The sqltypes.Binary passthrough now always builds a BLOB, avoiding problems with very old database versions(link)

      • [mysql] support for column-level CHARACTER SET and COLLATE declarations, as well as ASCII, UNICODE, NATIONAL and BINARY shorthand.(link)

      firebird

      • [firebird] set max identifier length to 31(link)

      • [firebird] supports_sane_rowcount() set to False due to ticket #370. versioned_id_col feature wont work in FB.(link)

      • [firebird] some execution fixes(link)

      • [firebird] new association proxy implementation, implementing complete proxies to list, dict and set-based relation collections(link)

      • [firebird] added orderinglist, a custom list class that synchronizes an object attribute with that object’s position in the list(link)

      • [firebird] small fix to SelectResultsExt to not bypass itself during select().(link)

      • [firebird] added filter(), filter_by() to assignmapper(link)

      misc

      • [engines] added detach() to Connection, allows underlying DBAPI connection to be detached from its pool, closing on dereference/close() instead of being reused by the pool.(link)

      • [engines] added invalidate() to Connection, immediately invalidates the Connection and its underlying DBAPI connection.(link)

      0.3.7

      Released: Sun Apr 29 2007

      orm

      • [orm] fixed critical issue when, after options(eagerload()) is used, the mapper would then always apply query “wrapping” behavior for all subsequent LIMIT/OFFSET/DISTINCT queries, even if no eager loading was applied on those subsequent queries.(link)

      • [orm] added query.with_parent(someinstance) method. searches for target instance using lazy join criterion from parent instance. takes optional string “property” to isolate the desired relation. also adds static Query.query_from_parent(instance, property) version.(link)

        References: #541

      • [orm] improved query.XXX_by(someprop=someinstance) querying to use similar methodology to with_parent, i.e. using the “lazy” clause which prevents adding the remote instance’s table to the SQL, thereby making more complex conditions possible(link)

        References: #554

      • [orm] added generative versions of aggregates, i.e. sum(), avg(), etc. to query. used via query.apply_max(), apply_sum(), etc. #552(link)

      • [orm] fix to using distinct() or distinct=True in combination with join() and similar(link)

      • [orm] corresponding to label/bindparam name generation, eager loaders generate deterministic names for the aliases they create using md5 hashes.(link)

      • [orm] improved/fixed custom collection classes when giving it “set”/ “sets.Set” classes or subclasses (was still looking for append() methods on them during lazy loads)(link)

      • [orm] restored old “column_property()” ORM function (used to be called “column()”) to force any column expression to be added as a property on a mapper, particularly those that aren’t present in the mapped selectable. this allows “scalar expressions” of any kind to be added as relations (though they have issues with eager loads).(link)

      • [orm] fix to many-to-many relationships targeting polymorphic mappers(link)

        References: #533

      • [orm] making progress with session.merge() as well as combining its usage with entity_name(link)

        References: #543

      • [orm] the usual adjustments to relationships between inheriting mappers, in this case establishing relation()s to subclass mappers where the join conditions come from the superclass’ table(link)

      sql

      • [sql] keys() of result set columns are not lowercased, come back exactly as they’re expressed in cursor.description. note this causes colnames to be all caps in oracle.(link)

      • [sql] preliminary support for unicode table names, column names and SQL statements added, for databases which can support them. Works with sqlite and postgres so far. Mysql mostly works except the has_table() function does not work. Reflection works too.(link)

      • [sql] the Unicode type is now a direct subclass of String, which now contains all the “convert_unicode” logic. This helps the variety of unicode situations that occur in db’s such as MS-SQL to be better handled and allows subclassing of the Unicode datatype.(link)

        References: #522

      • [sql] ClauseElements can be used in in_() clauses now, such as bind parameters, etc. #476(link)

      • [sql] reverse operators implemented for CompareMixin elements, allows expressions like “5 + somecolumn” etc. #474(link)

      • [sql] the “where” criterion of an update() and delete() now correlates embedded select() statements against the table being updated or deleted. this works the same as nested select() statement correlation, and can be disabled via the correlate=False flag on the embedded select().(link)

      • [sql] column labels are now generated in the compilation phase, which means their lengths are dialect-dependent. So on oracle a label that gets truncated to 30 chars will go out to 63 characters on postgres. Also, the true labelname is always attached as the accessor on the parent Selectable so theres no need to be aware of the “truncated” label names.(link)

        References: #512

      • [sql] column label and bind param “truncation” also generate deterministic names now, based on their ordering within the full statement being compiled. this means the same statement will produce the same string across application restarts and allowing DB query plan caching to work better.(link)

      • [sql] the “mini” column labels generated when using subqueries, which are to work around glitchy SQLite behavior that doesnt understand “foo.id” as equivalent to “id”, are now only generated in the case that those named columns are selected from (part of)(link)

        References: #513

      • [sql] the label() method on ColumnElement will properly propagate the TypeEngine of the base element out to the label, including a label() created from a scalar=True select() statement.(link)

      • [sql] MS-SQL better detects when a query is a subquery and knows not to generate ORDER BY phrases for those(link)

        References: #513

      • [sql] fix for fetchmany() “size” argument being positional in most dbapis(link)

        References: #505

      • [sql] sending None as an argument to func.<something> will produce an argument of NULL(link)

      • [sql] query strings in unicode URLs get keys encoded to ascii for **kwargs compat(link)

      • [sql] slight tweak to raw execute() change to also support tuples for positional parameters, not just lists(link)

        References: #523

      • [sql] fix to case() construct to propagate the type of the first WHEN condition as the return type of the case statement(link)

      mysql

      • [mysql] support for SSL arguments given as inline within URL query string, prefixed with “ssl_”, courtesy terjeros@gmail.com.(link)

      • [mysql] [<schemaname>] mysql uses “DESCRIBE.<tablename>”, catching exceptions if table doesnt exist, in order to determine if a table exists. this supports unicode table names as well as schema names. tested with MySQL5 but should work with 4.1 series as well. (#557)(link)

      sqlite

      • [sqlite] removed silly behavior where sqlite would reflect UNIQUE indexes as part of the primary key (?!)(link)

      mssql

      • [mssql] pyodbc is now the preferred DB-API for MSSQL, and if no module is specifically requested, will be loaded first on a module probe.(link)

      • [mssql] The @@SCOPE_IDENTITY is now used instead of @@IDENTITY. This behavior may be overridden with the engine_connect “use_scope_identity” keyword parameter, which may also be specified in the dburi.(link)

      oracle

      • [oracle] small fix to allow successive compiles of the same SELECT object which features LIMIT/OFFSET. oracle dialect needs to modify the object to have ROW_NUMBER OVER and wasn’t performing the full series of steps on successive compiles.(link)

      misc

      • [engines] warnings module used for issuing warnings (instead of logging)(link)

      • [engines] cleanup of DBAPI import strategies across all engines(link)

        References: #480

      • [engines] refactoring of engine internals which reduces complexity, number of codepaths; places more state inside of ExecutionContext to allow more dialect control of cursor handling, result sets. ResultProxy totally refactored and also has two versions of “buffered” result sets used for different purposes.(link)

      • [engines] server side cursor support fully functional in postgres.(link)

        References: #514

      • [engines] improved framework for auto-invalidation of connections that have lost their underlying database, via dialect-specific detection of exceptions corresponding to that database’s disconnect related error messages. Additionally, when a “connection no longer open” condition is detected, the entire connection pool is discarded and replaced with a new instance. #516(link)

      • [engines] the dialects within sqlalchemy.databases become a setuptools entry points. loading the built-in database dialects works the same as always, but if none found will fall back to trying pkg_resources to load an external module(link)

        References: #521

      • [engines] Engine contains a “url” attribute referencing the url.URL object used by create_engine().(link)

      • [informix] informix support added ! courtesy James Zhang, who put a ton of effort in.(link)

      • [extensions] big fix to AssociationProxy so that multiple AssociationProxy objects can be associated with a single association collection.(link)

      • [extensions] assign_mapper names methods according to their keys (i.e. __name__) #551(link)

      0.3.6

      Released: Fri Mar 23 2007

      orm

      • [orm] the full featureset of the SelectResults extension has been merged into a new set of methods available off of Query. These methods all provide “generative” behavior, whereby the Query is copied and a new one returned with additional criterion added. The new methods include:

        • filter() - applies select criterion to the query
        • filter_by() - applies “by”-style criterion to the query
        • avg() - return the avg() function on the given column
        • join() - join to a property (or across a list of properties)
        • outerjoin() - like join() but uses LEFT OUTER JOIN
        • limit()/offset() - apply LIMIT/OFFSET range-based access which applies limit/offset: session.query(Foo)[3:5]
        • distinct() - apply DISTINCT
        • list() - evaluate the criterion and return results

        no incompatible changes have been made to Query’s API and no methods have been deprecated. Existing methods like select(), select_by(), get(), get_by() all execute the query at once and return results like they always did. join_to()/join_via() are still there although the generative join()/outerjoin() methods are easier to use.

        (link)

      • [orm] the return value for multiple mappers used with instances() now returns a cartesian product of the requested list of mappers, represented as a list of tuples. this corresponds to the documented behavior. So that instances match up properly, the “uniquing” is disabled when this feature is used.(link)

      • [orm] Query has add_entity() and add_column() generative methods. these will add the given mapper/class or ColumnElement to the query at compile time, and apply them to the instances() method. the user is responsible for constructing reasonable join conditions (otherwise you can get full cartesian products). result set is the list of tuples, non-uniqued.(link)

      • [orm] strings and columns can also be sent to the *args of instances() where those exact result columns will be part of the result tuples.(link)

      • [orm] a full select() construct can be passed to query.select() (which worked anyway), but also query.selectfirst(), query.selectone() which will be used as is (i.e. no query is compiled). works similarly to sending the results to instances().(link)

      • [orm] eager loading will not “aliasize” “order by” clauses that were placed in the select statement by something other than the eager loader itself, to fix possibility of dupe columns as illustrated in. however, this means you have to be more careful with the columns placed in the “order by” of Query.select(), that you have explicitly named them in your criterion (i.e. you cant rely on the eager loader adding them in for you)(link)

        References: #495

      • [orm] added a handy multi-use “identity_key()” method to Session, allowing the generation of identity keys for primary key values, instances, and rows, courtesy Daniel Miller(link)

      • [orm] many-to-many table will be properly handled even for operations that occur on the “backref” side of the operation(link)

        References: #249

      • [orm] added “refresh-expire” cascade. allows refresh() and expire() calls to propagate along relationships.(link)

        References: #492

      • [orm] more fixes to polymorphic relations, involving proper lazy-clause generation on many-to-one relationships to polymorphic mappers. also fixes to detection of “direction”, more specific targeting of columns that belong to the polymorphic union vs. those that dont.(link)

        References: #493

      • [orm] some fixes to relationship calcs when using “viewonly=True” to pull in other tables into the join condition which arent parent of the relationship’s parent/child mappings(link)

      • [orm] flush fixes on cyclical-referential relationships that contain references to other instances outside of the cyclical chain, when some of the objects in the cycle are not actually part of the flush(link)

      • [orm] put an aggressive check for “flushing object A with a collection of B’s, but you put a C in the collection” error condition - even if C is a subclass of B, unless B’s mapper loads polymorphically. Otherwise, the collection will later load a “B” which should be a “C” (since its not polymorphic) which breaks in bi-directional relationships (i.e. C has its A, but A’s backref will lazyload it as a different instance of type “B”) This check is going to bite some of you who do this without issues, so the error message will also document a flag “enable_typechecks=False” to disable this checking. But be aware that bi-directional relationships in particular become fragile without this check.(link)

        References: #500

      sql

      • [sql] bindparam() names are now repeatable! specify two distinct bindparam()s with the same name in a single statement, and the key will be shared. proper positional/named args translate at compile time. for the old behavior of “aliasing” bind parameters with conflicting names, specify “unique=True” - this option is still used internally for all the auto-genererated (value-based) bind parameters.(link)

      • [sql] slightly better support for bind params as column clauses, either via bindparam() or via literal(), i.e. select([literal(‘foo’)])(link)

      • [sql] MetaData can bind to an engine either via “url” or “engine” kwargs to constructor, or by using connect() method. BoundMetaData is identical to MetaData except engine_or_url param is required. DynamicMetaData is the same and provides thread-local connections be default.(link)

      • [sql] exists() becomes useable as a standalone selectable, not just in a WHERE clause, i.e. exists([columns], criterion).select()(link)

      • [sql] correlated subqueries work inside of ORDER BY, GROUP BY(link)

      • [sql] fixed function execution with explicit connections, i.e. conn.execute(func.dosomething())(link)

      • [sql] use_labels flag on select() wont auto-create labels for literal text column elements, since we can make no assumptions about the text. to create labels for literal columns, you can say “somecol AS somelabel”, or use literal_column(“somecol”).label(“somelabel”)(link)

      • [sql] quoting wont occur for literal columns when they are “proxied” into the column collection for their selectable (is_literal flag is propagated). literal columns are specified via literal_column(“somestring”).(link)

      • [sql] added “fold_equivalents” boolean argument to Join.select(), which removes ‘duplicate’ columns from the resulting column clause that are known to be equivalent based on the join condition. this is of great usage when constructing subqueries of joins which Postgres complains about if duplicate column names are present.(link)

      • [sql] fixed use_alter flag on ForeignKeyConstraint(link)

        References: #503

      • [sql] fixed usage of 2.4-only “reversed” in topological.py(link)

        References: #506

      • [sql] for hackers, refactored the “visitor” system of ClauseElement and SchemaItem so that the traversal of items is controlled by the ClauseVisitor itself, using the method visitor.traverse(item). accept_visitor() methods can still be called directly but will not do any traversal of child items. ClauseElement/SchemaItem now have a configurable get_children() method to return the collection of child elements for each parent object. This allows the full traversal of items to be clear and unambiguous (as well as loggable), with an easy method of limiting a traversal (just pass flags which are picked up by appropriate get_children() methods).(link)

        References: #501

      • [sql] the “else_” parameter to the case statement now properly works when set to zero.(link)

      mysql

      • [mysql] added a catchall **kwargs to MSString, to help reflection of obscure types (like “varchar() binary” in MS 4.0)(link)

      • [mysql] added explicit MSTimeStamp type which takes effect when using types.TIMESTAMP.(link)

      oracle

      • [oracle] got binary working for any size input ! cx_oracle works fine, it was my fault as BINARY was being passed and not BLOB for setinputsizes (also unit tests werent even setting input sizes).(link)

      • [oracle] also fixed CLOB read/write on a separate changeset.(link)

      • [oracle] auto_setinputsizes defaults to True for Oracle, fixed cases where it improperly propagated bad types.(link)

      misc

      • [extensions] options() method on SelectResults now implemented “generatively” like the rest of the SelectResults methods. But you’re going to just use Query now anyway.(link)

        References: #472

      • [extensions] query() method is added by assignmapper. this helps with navigating to all the new generative methods on Query.(link)

      • [ms-sql]

        removed seconds input on DATE column types (probably
        should remove the time altogether)
        (link)

      • [ms-sql] null values in float fields no longer raise errors(link)

      • [ms-sql] LIMIT with OFFSET now raises an error (MS-SQL has no OFFSET support)(link)

      • [ms-sql] added an facility to use the MSSQL type VARCHAR(max) instead of TEXT for large unsized string fields. Use the new “text_as_varchar” to turn it on.(link)

        References: #509

      • [ms-sql] ORDER BY clauses without a LIMIT are now stripped in subqueries, as MS-SQL forbids this usage(link)

      • [ms-sql] cleanup of module importing code; specifiable DB-API module; more explicit ordering of module preferences.(link)

        References: #480

      0.3.5

      Released: Thu Feb 22 2007

      orm

      • [orm] [bugs] another refactoring to relationship calculation. Allows more accurate ORM behavior with relationships from/to/between mappers, particularly polymorphic mappers, also their usage with Query, SelectResults. tickets include,,.(link)

        References: #441, #448, #439

      • [orm] [bugs] removed deprecated method of specifying custom collections on classes; you must now use the “collection_class” option. the old way was beginning to produce conflicts when people used assign_mapper(), which now patches an “options” method, in conjunction with a relationship named “options”. (relationships take precedence over monkeypatched assign_mapper methods).(link)

      • [orm] [bugs] extension() query option propagates to Mapper._instance() method so that all loading-related methods get called(link)

        References: #454

      • [orm] [bugs] eager relation to an inheriting mapper wont fail if no rows returned for the relationship.(link)

      • [orm] [bugs] eager relation loading bug fixed for eager relation on multiple descendant classes(link)

        References: #486

      • [orm] [bugs] fix for very large topological sorts, courtesy ants.aasma at gmail(link)

        References: #423

      • [orm] [bugs] eager loading is slightly more strict about detecting “self-referential” relationships, specifically between polymorphic mappers. this results in an “eager degrade” to lazy loading.(link)

      • [orm] [bugs] improved support for complex queries embedded into “where” criterion for query.select()(link)

        References: #449

      • [orm] [bugs] mapper options like eagerload(), lazyload(), deferred(), will work for “synonym()” relationships(link)

        References: #485

      • [orm] [bugs] fixed bug where cascade operations incorrectly included deleted collection items in the cascade(link)

        References: #445

      • [orm] [bugs] fixed relationship deletion error when one-to-many child item is moved to a new parent in a single unit of work(link)

        References: #478

      • [orm] [bugs] fixed relationship deletion error where parent/child with a single column as PK/FK on the child would raise a “blank out the primary key” error, if manually deleted or “delete” cascade without “delete-orphan” was used(link)

      • [orm] [bugs] fix to deferred so that load operation doesnt mistakenly occur when only PK col attributes are set(link)

      • [orm] [enhancements] implemented foreign_keys argument to mapper. use in conjunction with primaryjoin/secondaryjoin arguments to specify/override foreign keys defined on the Table instance.(link)

        References: #385

      • [orm] [enhancements] contains_eager(‘foo’) automatically implies eagerload(‘foo’)(link)

      • [orm] [enhancements] added “alias” argument to contains_eager(). use it to specify the string name or Alias instance of an alias used in the query for the eagerly loaded child items. easier to use than “decorator”(link)

      • [orm] [enhancements] added “contains_alias()” option for result set mapping to an alias of the mapped table(link)

      • [orm] [enhancements] added support for py2.5 “with” statement with SessionTransaction(link)

        References: #468

      sql

      • [sql] the value of “case_sensitive” defaults to True now, regardless of the casing of the identifier, unless specifically set to False. this is because the object might be label’ed as something else which does contain mixed case, and propigating “case_sensitive=False” breaks that. Other fixes to quoting when using labels and “fake” column objects(link)

      • [sql] added a “supports_execution()” method to ClauseElement, so that individual kinds of clauses can express if they are appropriate for executing...such as, you can execute a “select”, but not a “Table” or a “Join”.(link)

      • [sql] fixed argument passing to straight textual execute() on engine, connection. can handle *args or a list instance for positional, **kwargs or a dict instance for named args, or a list of list or dicts to invoke executemany()(link)

      • [sql] small fix to BoundMetaData to accept unicode or string URLs(link)

      • [sql] fixed named PrimaryKeyConstraint generation courtesy andrija at gmail(link)

        References: #466

      • [sql] fixed generation of CHECK constraints on columns(link)

        References: #464

      • [sql] fixes to tometadata() operation to propagate Constraints at column and table level(link)

      mysql

      • [mysql] fix to reflection on older DB’s that might return array() type for “show variables like” statements(link)

      mssql

      • [mssql] preliminary support for pyodbc (Yay!)(link)

        References: #419

      • [mssql] better support for NVARCHAR types added(link)

        References: #298

      • [mssql] fix for commit logic on pymssql(link)

      • [mssql] fix for query.get() with schema(link)

        References: #456

      • [mssql] fix for non-integer relationships(link)

        References: #473

      • [mssql] DB-API module now selectable at run-time(link)

        References: #419

      • [mssql] [415] [tickets:422] [481] now passes many more unit tests(link)

      • [mssql] better unittest compatibility with ANSI functions(link)

        References: #479

      • [mssql] improved support for implicit sequence PK columns with auto-insert(link)

        References: #415

      • [mssql] fix for blank password in adodbapi(link)

        References: #371

      • [mssql] fixes to get unit tests working with pyodbc(link)

        References: #481

      • [mssql] fix to auto_identity_insert on db-url query(link)

      • [mssql] added query_timeout to db-url query parms. currently works only for pymssql(link)

      • [mssql] tested with pymssql 0.8.0 (which is now LGPL)(link)

      oracle

      • [oracle] when returning “rowid” as the ORDER BY column or in use with ROW_NUMBER OVER, oracle dialect checks the selectable its being applied to and will switch to table PK if not applicable, i.e. for a UNION. checking for DISTINCT, GROUP BY (other places that rowid is invalid) still a TODO. allows polymorphic mappings to function.(link)

        References: #436

      • [oracle] sequences on a non-pk column will properly fire off on INSERT(link)

      • [oracle] added PrefetchingResultProxy support to pre-fetch LOB columns when they are known to be present, fixes(link)

        References: #435

      • [oracle] implemented reflection of tables based on synonyms, including across dblinks(link)

        References: #379

      • [oracle] issues a log warning when a related table cant be reflected due to certain permission errors(link)

        References: #363

      misc

      • [postgres] better reflection of sequences for alternate-schema Tables(link)

        References: #442

      • [postgres] sequences on a non-pk column will properly fire off on INSERT(link)

      • [postgres] added PGInterval type, PGInet type(link)

        References: #460, #444

      • [extensions] added distinct() method to SelectResults. generally should only make a difference when using count().(link)

      • [extensions] added options() method to SelectResults, equivalent to query.options()(link)

        References: #472

      • [extensions] added optional __table_opts__ dictionary to ActiveMapper, will send kw options to Table objects(link)

        References: #462

      • [extensions] added selectfirst(), selectfirst_by() to assign_mapper(link)

        References: #467

      0.3.4

      Released: Tue Jan 23 2007

      general

      • [general] global “insure”->”ensure” change. in US english “insure” is actually largely interchangeable with “ensure” (so says the dictionary), so I’m not completely illiterate, but its definitely sub-optimal to “ensure” which is non-ambiguous.(link)

      orm

      • [orm] poked the first hole in the can of worms: saying query.select_by(somerelationname=someinstance) will create the join of the primary key columns represented by “somerelationname“‘s mapper to the actual primary key in “someinstance”.(link)

      • [orm] reworked how relations interact with “polymorphic” mappers, i.e. mappers that have a select_table as well as polymorphic flags. better determination of proper join conditions, interaction with user- defined join conditions, and support for self-referential polymorphic mappers.(link)

      • [orm] related to polymorphic mapping relations, some deeper error checking when compiling relations, to detect an ambiguous “primaryjoin” in the case that both sides of the relationship have foreign key references in the primary join condition. also tightened down conditions used to locate “relation direction”, associating the “foreignkey” of the relationship with the “primaryjoin”(link)

      • [orm] a little bit of improvement to the concept of a “concrete” inheritance mapping, though that concept is not well fleshed out yet (added test case to support concrete mappers on top of a polymorphic base).(link)

      • [orm] fix to “proxy=True” behavior on synonym()(link)

      • [orm] fixed bug where delete-orphan basically didn’t work with many-to-many relationships, backref presence generally hid the symptom(link)

        References: #427

      • [orm] added a mutex to the mapper compilation step. ive been reluctant to add any kind of threading anything to SA but this is one spot that its really needed since mappers are typically “global”, and while their state does not change during normal operation, the initial compilation step does modify internal state significantly, and this step usually occurs not at module-level initialization time (unless you call compile()) but at first-request time(link)

      • [orm] basic idea of “session.merge()” actually implemented. needs more testing.(link)

      • [orm] added “compile_mappers()” function as a shortcut to compiling all mappers(link)

      • [orm] fix to MapperExtension create_instance so that entity_name properly associated with new instance(link)

      • [orm] speed enhancements to ORM object instantiation, eager loading of rows(link)

      • [orm] invalid options sent to ‘cascade’ string will raise an exception(link)

        References: #406

      • [orm] fixed bug in mapper refresh/expire whereby eager loaders didnt properly re-populate item lists(link)

        References: #407

      • [orm] fix to post_update to ensure rows are updated even for non insert/delete scenarios(link)

        References: #413

      • [orm] added an error message if you actually try to modify primary key values on an entity and then flush it(link)

        References: #412

      sql

      • [sql] added “fetchmany()” support to ResultProxy(link)

      • [sql] added support for column “key” attribute to be useable in row[<key>]/row.<key>(link)

      • [sql] changed “BooleanExpression” to subclass from “BinaryExpression”, so that boolean expressions can also follow column-clause behaviors (i.e. label(), etc).(link)

      • [sql] trailing underscores are trimmed from func.<xxx> calls, such as func.if_()(link)

      • [sql] fix to correlation of subqueries when the column list of the select statement is constructed with individual calls to append_column(); this fixes an ORM bug whereby nested select statements were not getting correlated with the main select generated by the Query object.(link)

      • [sql] another fix to subquery correlation so that a subquery which has only one FROM element will not correlate that single element, since at least one FROM element is required in a query.(link)

      • [sql] default “timezone” setting is now False. this corresponds to Python’s datetime behavior as well as Postgres’ timestamp/time types (which is the only timezone-sensitive dialect at the moment)(link)

        References: #414

      • [sql] the “op()” function is now treated as an “operation”, rather than a “comparison”. the difference is, an operation produces a BinaryExpression from which further operations can occur whereas comparison produces the more restrictive BooleanExpression(link)

      • [sql] trying to redefine a reflected primary key column as non-primary key raises an error(link)

      • [sql] type system slightly modified to support TypeDecorators that can be overridden by the dialect (ok, thats not very clear, it allows the mssql tweak below to be possible)(link)

      mysql

      • [mysql] mysql is inconsistent with what kinds of quotes it uses in foreign keys during a SHOW CREATE TABLE, reflection updated to accomodate for all three styles(link)

        References: #420

      • [mysql] mysql table create options work on a generic passthru now, i.e. Table(..., mysql_engine=’InnoDB’, mysql_collate=”latin1_german2_ci”, mysql_auto_increment=”5”, mysql_<somearg>...), helps(link)

        References: #418

      mssql

      • [mssql] added an NVarchar type (produces NVARCHAR), also MSUnicode which provides Unicode-translation for the NVarchar regardless of dialect convert_unicode setting.(link)

      oracle

      • [oracle] slight support for binary, but still need to figure out how to insert reasonably large values (over 4K). requires auto_setinputsizes=True sent to create_engine(), rows must be fully fetched individually, etc.(link)

      firebird

      • [firebird] order of constraint creation puts primary key first before all other constraints; required for firebird, not a bad idea for others(link)

        References: #408

      • [firebird] Firebird fix to autoload multifield foreign keys(link)

        References: #409

      • [firebird] Firebird NUMERIC type properly handles a type without precision(link)

        References: #409

      misc

      • [postgres] fix to the initial checkfirst for tables to take current schema into account(link)

        References: #424

      • [postgres] postgres has an optional “server_side_cursors=True” flag which will utilize server side cursors. these are appropriate for fetching only partial results and are necessary for working with very large unbounded result sets. While we’d like this to be the default behavior, different environments seem to have different results and the causes have not been isolated so we are leaving the feature off by default for now. Uses an apparently undocumented psycopg2 behavior recently discovered on the psycopg mailing list.(link)

      • [postgres] added “BIGSERIAL” support for postgres table with PGBigInteger/autoincrement(link)

      • [postgres] fixes to postgres reflection to better handle when schema names are present; thanks to jason (at) ncsmags.com(link)

        References: #402

      • [extensions] added “validate=False” argument to assign_mapper, if True will ensure that only mapped attributes are named(link)

        References: #426

      • [extensions] assign_mapper gets “options”, “instances” functions added (i.e. MyClass.instances())(link)

      0.3.3

      Released: Fri Dec 15 2006
      • string-based FROM clauses fixed, i.e. select(..., from_obj=[“sometext”])(link)

      • fixes to passive_deletes flag, lazy=None (noload) flag(link)

      • added example/docs for dealing with large collections(link)

      • added object_session() method to sqlalchemy namespace(link)

      • fixed QueuePool bug whereby its better able to reconnect to a database that was not reachable (thanks to Sébastien Lelong), also fixed dispose() method(link)

      • patch that makes MySQL rowcount work correctly!(link)

        References: #396

      • fix to MySQL catch of 2006/2014 errors to properly re-raise OperationalError exception(link)

      0.3.2

      Released: Sun Dec 10 2006
      • major connection pool bug fixed. fixes MySQL out of sync errors, will also prevent transactions getting rolled back accidentally in all DBs(link)

        References: #387

      • major speed enhancements vs. 0.3.1, to bring speed back to 0.2.8 levels(link)

      • made conditional dozens of debug log calls that were time-intensive to generate log messages(link)

      • fixed bug in cascade rules whereby the entire object graph could be unnecessarily cascaded on the save/update cascade(link)

      • various speedups in attributes module(link)

      • identity map in Session is by default no longer weak referencing. to have it be weak referencing, use create_session(weak_identity_map=True) fixes(link)

        References: #388

      • MySQL detects errors 2006 (server has gone away) and 2014 (commands out of sync) and invalidates the connection on which it occured.(link)

      • MySQL bool type fix:(link)

        References: #307

      • postgres reflection fixes:(link)

        References: #382, #349

      • added keywords for EXCEPT, INTERSECT, EXCEPT ALL, INTERSECT ALL(link)

        References: #247

      • assign_mapper in assignmapper extension returns the created mapper(link)

        References: #2110

      • added label() function to Select class, when scalar=True is used to create a scalar subquery i.e. “select x, y, (select max(foo) from table) AS foomax from table”(link)

      • added onupdate and ondelete keyword arguments to ForeignKey; propagate to underlying ForeignKeyConstraint if present. (dont propagate in the other direction, however)(link)

      • fix to session.update() to preserve “dirty” status of incoming object(link)

      • sending a selectable to an IN via the in_() function no longer creates a “union” out of multiple selects; only one selectable to a the in_() function is allowed now (make a union yourself if union is needed)(link)

      • improved support for disabling save-update cascade via cascade=”none” etc.(link)

      • added “remote_side” argument to relation(), used only with self-referential mappers to force the direction of the parent/child relationship. replaces the usage of the “foreignkey” parameter for “switching” the direction. “foreignkey” argument is deprecated for all uses and will eventually be replaced by an argument dedicated to ForeignKey specification on mappers.(link)

      0.3.1

      Released: Mon Nov 13 2006

      orm

      • [orm] the “delete” cascade will load in all child objects, if they were not loaded already. this can be turned off (i.e. the old behavior) by setting passive_deletes=True on a relation().(link)

      • [orm] adjustments to reworked eager query generation to not fail on circular eager-loaded relationships (like backrefs)(link)

      • [orm] fixed bug where eagerload() (nor lazyload()) option didn’t properly instruct the Query whether or not to use “nesting” when producing a LIMIT query.(link)

      • [orm] fixed bug in circular dependency sorting at flush time; if object A contained a cyclical many-to-one relationship to object B, and object B was just attached to object A, but object B itself wasnt changed, the many-to-one synchronize of B’s primary key attribute to A’s foreign key attribute wouldnt occur.(link)

        References: #360

      • [orm] implemented from_obj argument for query.count, improves count function on selectresults(link)

        References: #325

      • [orm] added an assertion within the “cascade” step of ORM relationships to check that the class of object attached to a parent object is appropriate (i.e. if A.items stores B objects, raise an error if a C is appended to A.items)(link)

      • [orm] new extension sqlalchemy.ext.associationproxy, provides transparent “association object” mappings. new example examples/association/proxied_association.py illustrates.(link)

      • [orm] improvement to single table inheritance to load full hierarchies beneath the target class(link)

      • [orm] fix to subtle condition in topological sort where a node could appear twice, for(link)

        References: #362

      • [orm] additional rework to topological sort, refactoring, for(link)

        References: #365

      • [orm] “delete-orphan” for a certain type can be set on more than one parent class; the instance is an “orphan” only if its not attached to any of those parents(link)

      misc

      • [engine/pool] some new Pool utility classes, updated docs(link)

      • [engine/pool] “use_threadlocal” on Pool defaults to False (same as create_engine)(link)

      • [engine/pool] fixed direct execution of Compiled objects(link)

      • [engine/pool] create_engine() reworked to be strict about incoming **kwargs. all keyword arguments must be consumed by one of the dialect, connection pool, and engine constructors, else a TypeError is thrown which describes the full set of invalid kwargs in relation to the selected dialect/pool/engine configuration.(link)

      • [databases/types] MySQL catches exception on “describe” and reports as NoSuchTableError(link)

      • [databases/types] further fixes to sqlite booleans, weren’t working as defaults(link)

      • [databases/types] fix to postgres sequence quoting when using schemas(link)

      0.3.0

      Released: Sun Oct 22 2006

      general

      • [general] logging is now implemented via standard python “logging” module. “echo” keyword parameters are still functional but set/unset log levels for their respective classes/instances. all logging can be controlled directly through the Python API by setting INFO and DEBUG levels for loggers in the “sqlalchemy” namespace. class-level logging is under “sqlalchemy.<module>.<classname>”, instance-level logging under “sqlalchemy.<module>.<classname>.0x..<00-FF>”. Test suite includes “–log-info” and “–log-debug” arguments which work independently of –verbose/–quiet. Logging added to orm to allow tracking of mapper configurations, row iteration.(link)

      • [general] the documentation-generation system has been overhauled to be much simpler in design and more integrated with Markdown(link)

      orm

      • [orm] attribute tracking modified to be more intelligent about detecting changes, particularly with mutable types. TypeEngine objects now take a greater role in defining how to compare two scalar instances, including the addition of a MutableType mixin which is implemented by PickleType. unit-of-work now tracks the “dirty” list as an expression of all persistent objects where the attribute manager detects changes. The basic issue thats fixed is detecting changes on PickleType objects, but also generalizes type handling and “modified” object checking to be more complete and extensible.(link)

      • [orm] a wide refactoring to “attribute loader” and “options” architectures. ColumnProperty and PropertyLoader define their loading behaivor via switchable “strategies”, and MapperOptions no longer use mapper/property copying in order to function; they are instead propagated via QueryContext and SelectionContext objects at query/instances time. All of the internal copying of mappers and properties that was used to handle inheritance as well as options() has been removed; the structure of mappers and properties is much simpler than before and is clearly laid out in the new ‘interfaces’ module.(link)

      • [orm] related to the mapper/property overhaul, internal refactoring to mapper instances() method to use a SelectionContext object to track state during the operation. SLIGHT API BREAKAGE: the append_result() and populate_instances() methods on MapperExtension have a slightly different method signature now as a result of the change; hoping that these methods are not in widespread use as of yet.(link)

      • [orm] instances() method moved to Query now, backwards-compatible version remains on Mapper.(link)

      • [orm] added contains_eager() MapperOption, used in conjunction with instances() to specify properties that should be eagerly loaded from the result set, using their plain column names by default, or translated given an custom row-translation function.(link)

      • [orm] more rearrangements of unit-of-work commit scheme to better allow dependencies within circular flushes to work properly...updated task traversal/logging implementation(link)

      • [orm] polymorphic mappers (i.e. using inheritance) now produces INSERT statements in order of tables across all inherited classes(link)

        References: #321

      • [orm] added an automatic “row switch” feature to mapping, which will detect a pending instance/deleted instance pair with the same identity key and convert the INSERT/DELETE to a single UPDATE(link)

      • [orm] “association” mappings simplified to take advantage of automatic “row switch” feature(link)

      • [orm] “custom list classes” is now implemented via the “collection_class” keyword argument to relation(). the old way still works but is deprecated(link)

        References: #212

      • [orm] added “viewonly” flag to relation(), allows construction of relations that have no effect on the flush() process.(link)

      • [orm] added “lockmode” argument to base Query select/get functions, including “with_lockmode” function to get a Query copy that has a default locking mode. Will translate “read”/”update” arguments into a for_update argument on the select side.(link)

        References: #292

      • [orm] implemented “version check” logic in Query/Mapper, used when version_id_col is in effect and query.with_lockmode() is used to get() an instance thats already loaded(link)

      • [orm] post_update behavior improved; does a better job at not updating too many rows, updates only required columns(link)

        References: #208

      • [orm] adjustments to eager loading so that its “eager chain” is kept separate from the normal mapper setup, thereby preventing conflicts with lazy loader operation, fixes(link)

        References: #308

      • [orm] fix to deferred group loading(link)

      • [orm] session.flush() wont close a connection it opened(link)

        References: #346

      • [orm] added “batch=True” flag to mapper; if False, save_obj will fully save one object at a time including calls to before_XXXX and after_XXXX(link)

      • [orm] added “column_prefix=None” argument to mapper; prepends the given string (typically ‘_’) to column-based attributes automatically set up from the mapper’s Table(link)

      • [orm] specifying joins in the from_obj argument of query.select() will replace the main table of the query, if the table is somewhere within the given from_obj. this makes it possible to produce custom joins and outerjoins in queries without the main table getting added twice.(link)

        References: #315

      • [orm] eagerloading is adjusted to more thoughtfully attach its LEFT OUTER JOINs to the given query, looking for custom “FROM” clauses that may have already been set up.(link)

      • [orm] added join_to and outerjoin_to transformative methods to SelectResults, to build up join/outerjoin conditions based on property names. also added select_from to explicitly set from_obj parameter.(link)

      • [orm] removed “is_primary” flag from mapper.(link)

      sql

      • [sql] [construction] changed “for_update” parameter to accept False/True/”nowait” and “read”, the latter two of which are interpreted only by Oracle and Mysql(link)

        References: #292

      • [sql] [construction] added extract() function to sql dialect (SELECT extract(field FROM expr))(link)

      • [sql] [construction] BooleanExpression includes new “negate” argument to specify the appropriate negation operator if one is available.(link)

      • [sql] [construction] calling a negation on an “IN” or “IS” clause will result in “NOT IN”, “IS NOT” (as opposed to NOT (x IN y)).(link)

      • [sql] [construction] Function objects know what to do in a FROM clause now. their behavior should be the same, except now you can also do things like select([‘*’], from_obj=[func.my_function()]) to get multiple columns from the result, or even use sql.column() constructs to name the return columns(link)

        References: #172

      schema

      • [schema] a fair amount of cleanup to the schema package, removal of ambiguous methods, methods that are no longer needed. slightly more constrained useage, greater emphasis on explicitness(link)

      • [schema] the “primary_key” attribute of Table and other selectables becomes a setlike ColumnCollection object; is ordered but not numerically indexed. a comparison clause between two pks that are derived from the same underlying tables (i.e. such as two Alias objects) can be generated via table1.primary_key==table2.primary_key(link)

      • [schema] ForeignKey(Constraint) supports “use_alter=True”, to create/drop a foreign key via ALTER. this allows circular foreign key relationships to be set up.(link)

      • [schema] append_item() methods removed from Table and Column; preferably construct Table/Column/related objects inline, but if needed use append_column(), append_foreign_key(), append_constraint(), etc.(link)

      • [schema] table.create() no longer returns the Table object, instead has no return value. the usual case is that tables are created via metadata, which is preferable since it will handle table dependencies.(link)

      • [schema] added UniqueConstraint (goes at Table level), CheckConstraint (goes at Table or Column level).(link)

      • [schema] index=False/unique=True on Column now creates a UniqueConstraint, index=True/unique=False creates a plain Index, index=True/unique=True on Column creates a unique Index. ‘index’ and ‘unique’ keyword arguments to column are now boolean only; for explcit names and groupings of indexes or unique constraints, use the UniqueConstraint/Index constructs explicitly.(link)

      • [schema] added autoincrement=True to Column; will disable schema generation of SERIAL/AUTO_INCREMENT/identity seq for postgres/mysql/mssql if explicitly set to False(link)

      • [schema] TypeEngine objects now have methods to deal with copying and comparing values of their specific type. Currently used by the ORM, see below.(link)

      • [schema] fixed condition that occurred during reflection when a primary key column was explciitly overridden, where the PrimaryKeyConstraint would get both the reflected and the programmatic column doubled up(link)

      • [schema] the “foreign_key” attribute on Column and ColumnElement in general is deprecated, in favor of the “foreign_keys” list/set-based attribute, which takes into account multiple foreign keys on one column. “foreign_key” will return the first element in the “foreign_keys” list/set or None if the list is empty.(link)

      sqlite

      • [sqlite] sqlite boolean datatype converts False/True to 0/1 by default(link)

      • [sqlite] fixes to Date/Time (SLDate/SLTime) types; works as good as postgres now(link)

        References: #335

      oracle

      • [oracle] Oracle has experimental support for cx_Oracle.TIMESTAMP, which requires a setinputsizes() call on the cursor that is now enabled via the ‘auto_setinputsizes’ flag to the oracle dialect.(link)

      firebird

      • [firebird] aliases do not use “AS”(link)

      • [firebird] correctly raises NoSuchTableError when reflecting non-existent table(link)

      misc

      • [ms-sql] fixes bug 261 (table reflection broken for MS-SQL case-sensitive databases)(link)

      • [ms-sql] can now specify port for pymssql(link)

      • [ms-sql] introduces new “auto_identity_insert” option for auto-switching between “SET IDENTITY_INSERT” mode when values specified for IDENTITY columns(link)

      • [ms-sql] now supports multi-column foreign keys(link)

      • [ms-sql] fix to reflecting date/datetime columns(link)

      • [ms-sql] NCHAR and NVARCHAR type support added(link)

      • [connections/pooling/execution] connection pool tracks open cursors and automatically closes them if connection is returned to pool with cursors still opened. Can be affected by options which cause it to raise an error instead, or to do nothing. fixes issues with MySQL, others(link)

      • [connections/pooling/execution] fixed bug where Connection wouldnt lose its Transaction after commit/rollback(link)

      • [connections/pooling/execution] added scalar() method to ComposedSQLEngine, ResultProxy(link)

      • [connections/pooling/execution] ResultProxy will close() the underlying cursor when the ResultProxy itself is closed. this will auto-close cursors for ResultProxy objects that have had all their rows fetched (or had scalar() called).(link)

      • [connections/pooling/execution] ResultProxy.fetchall() internally uses DBAPI fetchall() for better efficiency, added to mapper iteration as well (courtesy Michael Twomey)(link)

      SQLAlchemy-0.8.4/doc/changelog/changelog_04.html0000644000076500000240000075724512251147457022101 0ustar classicstaff00000000000000 0.4 Changelog — SQLAlchemy 0.8 Documentation

      SQLAlchemy 0.8 Documentation

      Release: 0.8.4 | Release Date: December 8, 2013

      0.4 Changelog

      0.4.8

      Released: Sun Oct 12 2008

      orm

      • [orm] Fixed bug regarding inherit_condition passed with “A=B” versus “B=A” leading to errors(link)

        References: #1039

      • [orm] Changes made to new, dirty and deleted collections in SessionExtension.before_flush() will take effect for that flush.(link)

      • [orm] Added label() method to InstrumentedAttribute to establish forwards compatibility with 0.5.(link)

      sql

      • [sql] column.in_(someselect) can now be used as a columns-clause expression without the subquery bleeding into the FROM clause(link)

        References: #1074

      mysql

      • [mysql] Added MSMediumInteger type.(link)

        References: #1146

      sqlite

      oracle

      • [oracle] has_sequence() now takes schema name into account(link)

        References: #1155

      • [oracle] added BFILE to the list of reflected types(link)

        References: #1121

      0.4.7p1

      Released: Thu Jul 31 2008

      orm

      • [orm] Added “add()” and “add_all()” to scoped_session methods. Workaround for 0.4.7:

        from sqlalchemy.orm.scoping import ScopedSession, instrument
        setattr(ScopedSession, "add", instrument("add"))
        setattr(ScopedSession, "add_all", instrument("add_all"))
        (link)

      • [orm] Fixed non-2.3 compatible usage of set() and generator expression within relation().(link)

      0.4.7

      Released: Sat Jul 26 2008

      orm

      • [orm] The contains() operator when used with many-to-many will alias() the secondary (association) table so that multiple contains() calls will not conflict with each other(link)

        References: #1058

      • [orm] fixed bug preventing merge() from functioning in conjunction with a comparable_property()(link)

      • [orm] the enable_typechecks=False setting on relation() now only allows subtypes with inheriting mappers. Totally unrelated types, or subtypes not set up with mapper inheritance against the target mapper are still not allowed.(link)

      • [orm] Added is_active flag to Sessions to detect when a transaction is in progress. This flag is always True with a “transactional” (in 0.5 a non-“autocommit”) Session.(link)

        References: #976

      sql

      • [sql] Fixed bug when calling select([literal(‘foo’)]) or select([bindparam(‘foo’)]).(link)

      schema

      • [schema] create_all(), drop_all(), create(), drop() all raise an error if the table name or schema name contains more characters than that dialect’s configured character limit. Some DB’s can handle too-long table names during usage, and SQLA can handle this as well. But various reflection/ checkfirst-during-create scenarios fail since we are looking for the name within the DB’s catalog tables.(link)

        References: #571

      • [schema] The index name generated when you say “index=True” on a Column is truncated to the length appropriate for the dialect. Additionally, an Index with a too- long name cannot be explicitly dropped with Index.drop(), similar to.(link)

        References: #571, #820

      mysql

      • [mysql] Added ‘CALL’ to the list of SQL keywords which return result rows.(link)

      oracle

      • [oracle] Oracle get_default_schema_name() “normalizes” the name before returning, meaning it returns a lower-case name when the identifier is detected as case insensitive.(link)

      • [oracle] creating/dropping tables takes schema name into account when searching for the existing table, so that tables in other owner namespaces with the same name do not conflict(link)

        References: #709

      • [oracle] Cursors now have “arraysize” set to 50 by default on them, the value of which is configurable using the “arraysize” argument to create_engine() with the Oracle dialect. This to account for cx_oracle’s default setting of “1”, which has the effect of many round trips being sent to Oracle. This actually works well in conjunction with BLOB/CLOB-bound cursors, of which there are any number available but only for the life of that row request (so BufferedColumnRow is still needed, but less so).(link)

        References: #1062

      • [oracle]

        sqlite
        • add SLFloat type, which matches the SQLite REAL type affinity. Previously, only SLNumeric was provided which fulfills NUMERIC affinity, but that’s not the same as REAL.
        (link)

      misc

      • [postgres] Repaired server_side_cursors to properly detect text() clauses.(link)

      • [postgres] Added PGCidr type.(link)

        References: #1092

      0.4.6

      Released: Sat May 10 2008

      orm

      • [orm] Fix to the recent relation() refactoring which fixes exotic viewonly relations which join between local and remote table multiple times, with a common column shared between the joins.(link)

      • [orm] Also re-established viewonly relation() configurations that join across multiple tables.(link)

      • [orm] Added experimental relation() flag to help with primaryjoins across functions, etc., _local_remote_pairs=[tuples]. This complements a complex primaryjoin condition allowing you to provide the individual column pairs which comprise the relation’s local and remote sides. Also improved lazy load SQL generation to handle placing bind params inside of functions and other expressions. (partial progress towards)(link)

        References: #610

      • [orm] repaired single table inheritance such that you can single-table inherit from a joined-table inherting mapper without issue.(link)

        References: #1036

      • [orm] Fixed “concatenate tuple” bug which could occur with Query.order_by() if clause adaption had taken place.(link)

        References: #1027

      • [orm] Removed ancient assertion that mapped selectables require “alias names” - the mapper creates its own alias now if none is present. Though in this case you need to use the class, not the mapped selectable, as the source of column attributes - so a warning is still issued.(link)

      • [orm] fixes to the “exists” function involving inheritance (any(), has(), ~contains()); the full target join will be rendered into the EXISTS clause for relations that link to subclasses.(link)

      • [orm] restored usage of append_result() extension method for primary query rows, when the extension is present and only a single- entity result is being returned.(link)

      • [orm] Also re-established viewonly relation() configurations that join across multiple tables.(link)

      • [orm] removed ancient assertion that mapped selectables require “alias names” - the mapper creates its own alias now if none is present. Though in this case you need to use the class, not the mapped selectable, as the source of column attributes - so a warning is still issued.(link)

      • [orm] refined mapper._save_obj() which was unnecessarily calling __ne__() on scalar values during flush(link)

        References: #1015

      • [orm] added a feature to eager loading whereby subqueries set as column_property() with explicit label names (which is not necessary, btw) will have the label anonymized when the instance is part of the eager join, to prevent conflicts with a subquery or column of the same name on the parent object.(link)

        References: #1019

      • [orm] set-based collections |=, -=, ^= and &= are stricter about their operands and only operate on sets, frozensets or subclasses of the collection type. Previously, they would accept any duck-typed set.(link)

      • [orm] added an example dynamic_dict/dynamic_dict.py, illustrating a simple way to place dictionary behavior on top of a dynamic_loader.(link)

      orm declarative

      • [orm] [extension] [declarative] Joined table inheritance mappers use a slightly relaxed function to create the “inherit condition” to the parent table, so that other foreign keys to not-yet-declared Table objects don’t trigger an error.(link)

      • [orm] [extension] [declarative] fixed reentrant mapper compile hang when a declared attribute is used within ForeignKey, ie. ForeignKey(MyOtherClass.someattribute)(link)

      sql

      • [sql] Added COLLATE support via the .collate(<collation>) expression operator and collate(<expr>, <collation>) sql function.(link)

      • [sql] Fixed bug with union() when applied to non-Table connected select statements(link)

      • [sql] improved behavior of text() expressions when used as FROM clauses, such as select().select_from(text(“sometext”))(link)

        References: #1014

      • [sql] Column.copy() respects the value of “autoincrement”, fixes usage with Migrate(link)

        References: #1021

      mssql

      • [mssql] Added “odbc_autotranslate” parameter to engine / dburi parameters. Any given string will be passed through to the ODBC connection string as:

        “AutoTranslate=%s” % odbc_autotranslate
        (link)

        References: #1005

      • [mssql] Added “odbc_options” parameter to engine / dburi parameters. The given string is simply appended to the SQLAlchemy-generated odbc connection string.

        This should obviate the need of adding a myriad of ODBC options in the future.

        (link)

      firebird

      • [firebird] Handle the “SUBSTRING(:string FROM :start FOR :length)” builtin.(link)

      misc

      • [engines] Pool listeners can now be provided as a dictionary of callables or a (possibly partial) duck-type of PoolListener, your choice.(link)

      • [engines] added “rollback_returned” option to Pool which will disable the rollback() issued when connections are returned. This flag is only safe to use with a database which does not support transactions (i.e. MySQL/MyISAM).(link)

      • [ext] set-based association proxies |=, -=, ^= and &= are stricter about their operands and only operate on sets, frozensets or other association proxies. Previously, they would accept any duck-typed set.(link)

      0.4.5

      Released: Fri Apr 04 2008

      orm

      • [orm] A small change in behavior to session.merge() - existing objects are checked for based on primary key attributes, not necessarily _instance_key. So the widely requested capability, that:

        x = MyObject(id=1) x = sess.merge(x)

        will in fact load MyObject with id #1 from the database if present, is now available. merge() still copies the state of the given object to the persistent one, so an example like the above would typically have copied “None” from all attributes of “x” onto the persistent copy. These can be reverted using session.expire(x).

        (link)

      • [orm] Also fixed behavior in merge() whereby collection elements present on the destination but not the merged collection were not being removed from the destination.(link)

      • [orm] Added a more aggressive check for “uncompiled mappers”, helps particularly with declarative layer(link)

        References: #995

      • [orm] The methodology behind “primaryjoin”/”secondaryjoin” has been refactored. Behavior should be slightly more intelligent, primarily in terms of error messages which have been pared down to be more readable. In a slight number of scenarios it can better resolve the correct foreign key than before.(link)

      • [orm] Added comparable_property(), adds query Comparator behavior to regular, unmanaged Python properties(link)

      • [orm] [‘machines’] [Company.employees.of_type(Engineer)] the functionality of query.with_polymorphic() has been added to mapper() as a configuration option.

        It’s set via several forms:
        with_polymorphic=’*’ with_polymorphic=[mappers] with_polymorphic=(‘*’, selectable) with_polymorphic=([mappers], selectable)

        This controls the default polymorphic loading strategy for inherited mappers. When a selectable is not given, outer joins are created for all joined-table inheriting mappers requested. Note that the auto-create of joins is not compatible with concrete table inheritance.

        The existing select_table flag on mapper() is now deprecated and is synonymous with with_polymorphic(‘*’, select_table). Note that the underlying “guts” of select_table have been completely removed and replaced with the newer, more flexible approach.

        The new approach also automatically allows eager loads to work for subclasses, if they are present, for example:

        sess.query(Company).options(
         eagerload_all(
        ))

        to load Company objects, their employees, and the ‘machines’ collection of employees who happen to be Engineers. A “with_polymorphic” Query option should be introduced soon as well which would allow per-Query control of with_polymorphic() on relations.

        (link)

      • [orm] added two “experimental” features to Query, “experimental” in that their specific name/behavior is not carved in stone just yet: _values() and _from_self(). We’d like feedback on these.

        • _values(*columns) is given a list of column expressions, and returns a new Query that only returns those columns. When evaluated, the return value is a list of tuples just like when using add_column() or add_entity(), the only difference is that “entity zero”, i.e. the mapped class, is not included in the results. This means it finally makes sense to use group_by() and having() on Query, which have been sitting around uselessly until now.

          A future change to this method may include that its ability to join, filter and allow other options not related to a “resultset” are removed, so the feedback we’re looking for is how people want to use _values()...i.e. at the very end, or do people prefer to continue generating after it’s called.

        • _from_self() compiles the SELECT statement for the Query (minus any eager loaders), and returns a new Query that selects from that SELECT. So basically you can query from a Query without needing to extract the SELECT statement manually. This gives meaning to operations like query[3:5]._from_self().filter(some criterion). There’s not much controversial here except that you can quickly create highly nested queries that are less efficient, and we want feedback on the naming choice.
        (link)

      • [orm] query.order_by() and query.group_by() will accept multiple arguments using *args (like select() already does).(link)

      • [orm] Added some convenience descriptors to Query: query.statement returns the full SELECT construct, query.whereclause returns just the WHERE part of the SELECT construct.(link)

      • [orm] Fixed/covered case when using a False/0 value as a polymorphic discriminator.(link)

      • [orm] Fixed bug which was preventing synonym() attributes from being used with inheritance(link)

      • [orm] Fixed SQL function truncation of trailing underscores(link)

        References: #996

      • [orm] When attributes are expired on a pending instance, an error will not be raised when the “refresh” action is triggered and no result is found.(link)

      • [orm] Session.execute can now find binds from metadata(link)

      • [orm] Adjusted the definition of “self-referential” to be any two mappers with a common parent (this affects whether or not aliased=True is required when joining with Query).(link)

      • [orm] Made some fixes to the “from_joinpoint” argument to query.join() so that if the previous join was aliased and this one isn’t, the join still happens successfully.(link)

      • [orm]

        Assorted “cascade deletes” fixes:
        • Fixed “cascade delete” operation of dynamic relations, which had only been implemented for foreign-key nulling behavior in 0.4.2 and not actual cascading deletes
        • Delete cascade without delete-orphan cascade on a many-to-one will not delete orphans which were disconnected from the parent before session.delete() is called on the parent (one-to-many already had this).
        • Delete cascade with delete-orphan will delete orphans whether or not it remains attached to its also-deleted parent.
        • delete-orphan casacde is properly detected on relations that are present on superclasses when using inheritance.
        (link)

        References: #895

      • [orm] Fixed order_by calculation in Query to properly alias mapper-config’ed order_by when using select_from()(link)

      • [orm] Refactored the diffing logic that kicks in when replacing one collection with another into collections.bulk_replace, useful to anyone building multi-level collections.(link)

      • [orm] Cascade traversal algorithm converted from recursive to iterative to support deep object graphs.(link)

      orm declarative

      • [orm] [extension] [declarative] The “synonym” function is now directly usable with “declarative”. Pass in the decorated property using the “descriptor” keyword argument, e.g.: somekey = synonym(‘_somekey’, descriptor=property(g, s))(link)

      • [orm] [extension] [declarative] The “deferred” function is usable with “declarative”. Simplest usage is to declare deferred and Column together, e.g.: data = deferred(Column(Text))(link)

      • [orm] [extension] [declarative] Declarative also gained @synonym_for(...) and @comparable_using(...), front-ends for synonym and comparable_property.(link)

      • [orm] [extension] [declarative] Improvements to mapper compilation when using declarative; already-compiled mappers will still trigger compiles of other uncompiled mappers when used(link)

        References: #995

      • [orm] [extension] [declarative] Declarative will complete setup for Columns lacking names, allows a more DRY syntax.

        class Foo(Base):
        __tablename__ = ‘foos’ id = Column(Integer, primary_key=True)
        (link)

      • [orm] [extension] [declarative] inheritance in declarative can be disabled when sending “inherits=None” to __mapper_args__.(link)

      • [orm] [extension] [declarative] declarative_base() takes optional kwarg “mapper”, which is any callable/class/method that produces a mapper, such as declarative_base(mapper=scopedsession.mapper). This property can also be set on individual declarative classes using the “__mapper_cls__” property.(link)

      sql

      • [sql] schema-qualified tables now will place the schemaname ahead of the tablename in all column expressions as well as when generating column labels. This prevents cross- schema name collisions in all cases(link)

        References: #999

      • [sql] can now allow selects which correlate all FROM clauses and have no FROM themselves. These are typically used in a scalar context, i.e. SELECT x, (SELECT x WHERE y) FROM table. Requires explicit correlate() call.(link)

      • [sql] ‘name’ is no longer a required constructor argument for Column(). It (and .key) may now be deferred until the column is added to a Table.(link)

      • [sql] like(), ilike(), contains(), startswith(), endswith() take an optional keyword argument “escape=<somestring>”, which is set as the escape character using the syntax “x LIKE y ESCAPE ‘<somestring>’”.(link)

        References: #791, #993

      • [sql] random() is now a generic sql function and will compile to the database’s random implementation, if any.(link)

      • [sql] update().values() and insert().values() take keyword arguments.(link)

      • [sql] Fixed an issue in select() regarding its generation of FROM clauses, in rare circumstances two clauses could be produced when one was intended to cancel out the other. Some ORM queries with lots of eager loads might have seen this symptom.(link)

      • [sql] The case() function now also takes a dictionary as its whens parameter. It also interprets the “THEN” expressions as values by default, meaning case([(x==y, “foo”)]) will interpret “foo” as a bound value, not a SQL expression. use text(expr) for literal SQL expressions in this case. For the criterion itself, these may be literal strings only if the “value” keyword is present, otherwise SA will force explicit usage of either text() or literal().(link)

      mysql

      • [mysql] The connection.info keys the dialect uses to cache server settings have changed and are now namespaced.(link)

      mssql

      • [mssql] Reflected tables will now automatically load other tables which are referenced by Foreign keys in the auto-loaded table,.(link)

        References: #979

      • [mssql] Added executemany check to skip identity fetch,.(link)

        References: #916

      • [mssql] Added stubs for small date type.(link)

        References: #884

      • [mssql] Added a new ‘driver’ keyword parameter for the pyodbc dialect. Will substitute into the ODBC connection string if given, defaults to ‘SQL Server’.(link)

      • [mssql] Added a new ‘max_identifier_length’ keyword parameter for the pyodbc dialect.(link)

      • [mssql] Improvements to pyodbc + Unix. If you couldn’t get that combination to work before, please try again.(link)

      oracle

      • [oracle] The “owner” keyword on Table is now deprecated, and is exactly synonymous with the “schema” keyword. Tables can now be reflected with alternate “owner” attributes, explicitly stated on the Table object or not using “schema”.(link)

      • [oracle] All of the “magic” searching for synonyms, DBLINKs etc. during table reflection are disabled by default unless you specify “oracle_resolve_synonyms=True” on the Table object. Resolving synonyms necessarily leads to some messy guessing which we’d rather leave off by default. When the flag is set, tables and related tables will be resolved against synonyms in all cases, meaning if a synonym exists for a particular table, reflection will use it when reflecting related tables. This is stickier behavior than before which is why it’s off by default.(link)

      • [oracle] The “owner” keyword on Table is now deprecated, and is exactly synonymous with the “schema” keyword. Tables can now be reflected with alternate “owner” attributes, explicitly stated on the Table object or not using “schema”.(link)

      • [oracle] All of the “magic” searching for synonyms, DBLINKs etc. during table reflection are disabled by default unless you specify “oracle_resolve_synonyms=True” on the Table object. Resolving synonyms necessarily leads to some messy guessing which we’d rather leave off by default. When the flag is set, tables and related tables will be resolved against synonyms in all cases, meaning if a synonym exists for a particular table, reflection will use it when reflecting related tables. This is stickier behavior than before which is why it’s off by default.(link)

      misc

      • [postgres] Got PG server side cursors back into shape, added fixed unit tests as part of the default test suite. Added better uniqueness to the cursor ID(link)

        References: #1001

      0.4.4

      Released: Wed Mar 12 2008

      orm

      • [orm] any(), has(), contains(), ~contains(), attribute level == and != now work properly with self-referential relations - the clause inside the EXISTS is aliased on the “remote” side to distinguish it from the parent table. This applies to single table self-referential as well as inheritance-based self-referential.(link)

      • [orm] Repaired behavior of == and != operators at the relation() level when compared against NULL for one-to-one relations(link)

        References: #985

      • [orm] Fixed bug whereby session.expire() attributes were not loading on an polymorphically-mapped instance mapped by a select_table mapper.(link)

      • [orm] Added query.with_polymorphic() - specifies a list of classes which descend from the base class, which will be added to the FROM clause of the query. Allows subclasses to be used within filter() criterion as well as eagerly loads the attributes of those subclasses.(link)

      • [orm] Your cries have been heard: removing a pending item from an attribute or collection with delete-orphan expunges the item from the session; no FlushError is raised. Note that if you session.save()’ed the pending item explicitly, the attribute/collection removal still knocks it out.(link)

      • [orm] session.refresh() and session.expire() raise an error when called on instances which are not persistent within the session(link)

      • [orm] Fixed potential generative bug when the same Query was used to generate multiple Query objects using join().(link)

      • [orm] Fixed bug which was introduced in 0.4.3, whereby loading an already-persistent instance mapped with joined table inheritance would trigger a useless “secondary” load from its joined table, when using the default “select” polymorphic_fetch. This was due to attributes being marked as expired during its first load and not getting unmarked from the previous “secondary” load. Attributes are now unexpired based on presence in __dict__ after any load or commit operation succeeds.(link)

      • [orm] Deprecated Query methods apply_sum(), apply_max(), apply_min(), apply_avg(). Better methodologies are coming....(link)

      • [orm] relation() can accept a callable for its first argument, which returns the class to be related. This is in place to assist declarative packages to define relations without classes yet being in place.(link)

      • [orm] Added a new “higher level” operator called “of_type()”: used in join() as well as with any() and has(), qualifies the subclass which will be used in filter criterion, e.g.:

        query.filter(Company.employees.of_type(Engineer).
        any(Engineer.name==’foo’))

        or

        query.join(Company.employees.of_type(Engineer)).
        filter(Engineer.name==’foo’)
        (link)

      • [orm] Preventive code against a potential lost-reference bug in flush().(link)

      • [orm] Expressions used in filter(), filter_by() and others, when they make usage of a clause generated from a relation using the identity of a child object (e.g., filter(Parent.child==<somechild>)), evaluate the actual primary key value of <somechild> at execution time so that the autoflush step of the Query can complete, thereby populating the PK value of <somechild> in the case that <somechild> was pending.(link)

      • [orm] setting the relation()-level order by to a column in the many-to-many “secondary” table will now work with eager loading, previously the “order by” wasn’t aliased against the secondary table’s alias.(link)

      • [orm] Synonyms riding on top of existing descriptors are now full proxies to those descriptors.(link)

      sql

      • [sql] Can again create aliases of selects against textual FROM clauses.(link)

        References: #975

      • [sql] The value of a bindparam() can be a callable, in which case it’s evaluated at statement execution time to get the value.(link)

      • [sql] Added exception wrapping/reconnect support to result set fetching. Reconnect works for those databases that raise a catchable data error during results (i.e. doesn’t work on MySQL)(link)

        References: #978

      • [sql] Implemented two-phase API for “threadlocal” engine, via engine.begin_twophase(), engine.prepare()(link)

        References: #936

      • [sql] Fixed bug which was preventing UNIONS from being cloneable.(link)

        References: #986

      • [sql] Added “bind” keyword argument to insert(), update(), delete() and DDL(). The .bind property is now assignable on those statements as well as on select().(link)

      • [sql] Insert statements can now be compiled with extra “prefix” words between INSERT and INTO, for vendor extensions like MySQL’s INSERT IGNORE INTO table.(link)

      misc

      • [dialects] Invalid SQLite connection URLs now raise an error.(link)

      • [dialects] postgres TIMESTAMP renders correctly(link)

        References: #981

      • [dialects] postgres PGArray is a “mutable” type by default; when used with the ORM, mutable-style equality/ copy-on-write techniques are used to test for changes.(link)

      • [extensions] a new super-small “declarative” extension has been added, which allows Table and mapper() configuration to take place inline underneath a class declaration. This extension differs from ActiveMapper and Elixir in that it does not redefine any SQLAlchemy semantics at all; literal Column, Table and relation() constructs are used to define the class behavior and table definition.(link)

      0.4.3

      Released: Thu Feb 14 2008

      general

      • [general] Fixed a variety of hidden and some not-so-hidden compatibility issues for Python 2.3, thanks to new support for running the full test suite on 2.3.(link)

      • [general] Warnings are now issued as type exceptions.SAWarning.(link)

      orm

      • [orm] Every Session.begin() must now be accompanied by a corresponding commit() or rollback() unless the session is closed with Session.close(). This also includes the begin() which is implicit to a session created with transactional=True. The biggest change introduced here is that when a Session created with transactional=True raises an exception during flush(), you must call Session.rollback() or Session.close() in order for that Session to continue after an exception.(link)

      • [orm] Fixed merge() collection-doubling bug when merging transient entities with backref’ed collections.(link)

        References: #961

      • [orm] merge(dont_load=True) does not accept transient entities, this is in continuation with the fact that merge(dont_load=True) does not accept any “dirty” objects either.(link)

      • [orm] Added standalone “query” class attribute generated by a scoped_session. This provides MyClass.query without using Session.mapper. Use via:

        MyClass.query = Session.query_property()
        (link)

      • [orm] The proper error message is raised when trying to access expired instance attributes with no session present(link)

      • [orm] dynamic_loader() / lazy=”dynamic” now accepts and uses the order_by parameter in the same way in which it works with relation().(link)

      • [orm] Added expire_all() method to Session. Calls expire() for all persistent instances. This is handy in conjunction with...(link)

      • [orm] Instances which have been partially or fully expired will have their expired attributes populated during a regular Query operation which affects those objects, preventing a needless second SQL statement for each instance.(link)

      • [orm] Dynamic relations, when referenced, create a strong reference to the parent object so that the query still has a parent to call against even if the parent is only created (and otherwise dereferenced) within the scope of a single expression.(link)

        References: #938

      • [orm] Added a mapper() flag “eager_defaults”. When set to True, defaults that are generated during an INSERT or UPDATE operation are post-fetched immediately, instead of being deferred until later. This mimics the old 0.3 behavior.(link)

      • [orm] query.join() can now accept class-mapped attributes as arguments. These can be used in place or in any combination with strings. In particular this allows construction of joins to subclasses on a polymorphic relation, i.e.:

        query(Company).join([‘employees’, Engineer.name])
        (link)

      • [orm] [people.join(engineer))] [(‘employees’] [Engineer.name] query.join() can also accept tuples of attribute name/some selectable as arguments. This allows construction of joins from subclasses of a polymorphic relation, i.e.:

        query(Company).join(

        )

        (link)

      • [orm] General improvements to the behavior of join() in conjunction with polymorphic mappers, i.e. joining from/to polymorphic mappers and properly applying aliases.(link)

      • [orm] Fixed/improved behavior when a mapper determines the natural “primary key” of a mapped join, it will more effectively reduce columns which are equivalent via foreign key relation. This affects how many arguments need to be sent to query.get(), among other things.(link)

        References: #933

      • [orm] The lazy loader can now handle a join condition where the “bound” column (i.e. the one that gets the parent id sent as a bind parameter) appears more than once in the join condition. Specifically this allows the common task of a relation() which contains a parent-correlated subquery, such as “select only the most recent child item”.(link)

        References: #946

      • [orm] Fixed bug in polymorphic inheritance where an incorrect exception is raised when base polymorphic_on column does not correspond to any columns within the local selectable of an inheriting mapper more than one level deep(link)

      • [orm] Fixed bug in polymorphic inheritance which made it difficult to set a working “order_by” on a polymorphic mapper.(link)

      • [orm] Fixed a rather expensive call in Query that was slowing down polymorphic queries.(link)

      • [orm] “Passive defaults” and other “inline” defaults can now be loaded during a flush() call if needed; in particular, this allows constructing relations() where a foreign key column references a server-side-generated, non-primary-key column.(link)

        References: #954

      • [orm]

        Additional Session transaction fixes/changes:
        • Fixed bug with session transaction management: parent transactions weren’t started on the connection when adding a connection to a nested transaction.
        • session.transaction now always refers to the innermost active transaction, even when commit/rollback are called directly on the session transaction object.
        • Two-phase transactions can now be prepared.
        • When preparing a two-phase transaction fails on one connection, all the connections are rolled back.
        • session.close() didn’t close all transactions when nested transactions were used.
        • rollback() previously erroneously set the current transaction directly to the parent of the transaction that could be rolled back to. Now it rolls back the next transaction up that can handle it, but sets the current transaction to it’s parent and inactivates the transactions in between. Inactive transactions can only be rolled back or closed, any other call results in an error.
        • autoflush for commit() wasn’t flushing for simple subtransactions.
        • unitofwork flush didn’t close the failed transaction when the session was not in a transaction and commiting the transaction failed.
        (link)

      • [orm] Miscellaneous tickets:(link)

        References: #964, #940

      sql

      • [sql] Added “schema.DDL”, an executable free-form DDL statement. DDLs can be executed in isolation or attached to Table or MetaData instances and executed automatically when those objects are created and/or dropped.(link)

      • [sql] Table columns and constraints can be overridden on a an existing table (such as a table that was already reflected) using the ‘useexisting=True’ flag, which now takes into account the arguments passed along with it.(link)

      • [sql] Added a callable-based DDL events interface, adds hooks before and after Tables and MetaData create and drop.(link)

      • [sql] Added generative where(<criterion>) method to delete() and update() constructs which return a new object with criterion joined to existing criterion via AND, just like select().where().(link)

      • [sql] Added “ilike()” operator to column operations. Compiles to ILIKE on postgres, lower(x) LIKE lower(y) on all others.(link)

        References: #727

      • [sql] Added “now()” as a generic function; on SQLite, Oracle and MSSQL compiles as “CURRENT_TIMESTAMP”; “now()” on all others.(link)

        References: #943

      • [sql] The startswith(), endswith(), and contains() operators now concatenate the wildcard operator with the given operand in SQL, i.e. “’%’ || <bindparam>” in all cases, accept text(‘something’) operands properly(link)

        References: #962

      • [sql] cast() accepts text(‘something’) and other non-literal operands properly(link)

        References: #962

      • [sql] fixed bug in result proxy where anonymously generated column labels would not be accessible using their straight string name(link)

      • [sql] Deferrable constraints can now be defined.(link)

      • [sql] Added “autocommit=True” keyword argument to select() and text(), as well as generative autocommit() method on select(); for statements which modify the database through some user-defined means other than the usual INSERT/UPDATE/ DELETE etc. This flag will enable “autocommit” behavior during execution if no transaction is in progress.(link)

        References: #915

      • [sql] The ‘.c.’ attribute on a selectable now gets an entry for every column expression in its columns clause. Previously, “unnamed” columns like functions and CASE statements weren’t getting put there. Now they will, using their full string representation if no ‘name’ is available.(link)

      • [sql] a CompositeSelect, i.e. any union(), union_all(), intersect(), etc. now asserts that each selectable contains the same number of columns. This conforms to the corresponding SQL requirement.(link)

      • [sql] The anonymous ‘label’ generated for otherwise unlabeled functions and expressions now propagates outwards at compile time for expressions like select([select([func.foo()])]).(link)

      • [sql] Building on the above ideas, CompositeSelects now build up their ”.c.” collection based on the names present in the first selectable only; corresponding_column() now works fully for all embedded selectables.(link)

      • [sql] Oracle and others properly encode SQL used for defaults like sequences, etc., even if no unicode idents are used since identifier preparer may return a cached unicode identifier.(link)

      • [sql] Column and clause comparisons to datetime objects on the left hand side of the expression now work (d < table.c.col). (datetimes on the RHS have always worked, the LHS exception is a quirk of the datetime implementation.)(link)

      misc

      • [dialects] Better support for schemas in SQLite (linked in by ATTACH DATABASE ... AS name). In some cases in the past, schema names were ommitted from generated SQL for SQLite. This is no longer the case.(link)

      • [dialects] table_names on SQLite now picks up temporary tables as well.(link)

      • [dialects] Auto-detect an unspecified MySQL ANSI_QUOTES mode during reflection operations, support for changing the mode midstream. Manual mode setting is still required if no reflection is used.(link)

      • [dialects] Fixed reflection of TIME columns on SQLite.(link)

      • [dialects] Finally added PGMacAddr type to postgres(link)

        References: #580

      • [dialects] Reflect the sequence associated to a PK field (typically with a BEFORE INSERT trigger) under Firebird(link)

      • [dialects] Oracle assembles the correct columns in the result set column mapping when generating a LIMIT/OFFSET subquery, allows columns to map properly to result sets even if long-name truncation kicks in(link)

        References: #941

      • [dialects] MSSQL now includes EXEC in the _is_select regexp, which should allow row-returning stored procedures to be used.(link)

      • [dialects] MSSQL now includes an experimental implementation of LIMIT/OFFSET using the ANSI SQL row_number() function, so it requires MSSQL-2005 or higher. To enable the feature, add “has_window_funcs” to the keyword arguments for connect, or add ”?has_window_funcs=1” to your dburi query arguments.(link)

      • [ext] Changed ext.activemapper to use a non-transactional session for the objectstore.(link)

      • [ext] Fixed output order of “[‘a’] + obj.proxied” binary operation on association-proxied lists.(link)

      0.4.2p3

      Released: Wed Jan 09 2008

      general

      • [general] sub version numbering scheme changed to suite setuptools version number rules; easy_install -u should now get this version over 0.4.2.(link)

      orm

      • [orm] fixed bug with session.dirty when using “mutable scalars” (such as PickleTypes)(link)

      • [orm] added a more descriptive error message when flushing on a relation() that has non-locally-mapped columns in its primary or secondary join condition(link)

      • [orm] suppressing all errors in InstanceState.__cleanup() now.(link)

      • [orm] fixed an attribute history bug whereby assigning a new collection to a collection-based attribute which already had pending changes would generate incorrect history(link)

        References: #922

      • [orm] fixed delete-orphan cascade bug whereby setting the same object twice to a scalar attribute could log it as an orphan(link)

        References: #925

      • [orm] Fixed cascades on a += assignment to a list-based relation.(link)

      • [orm] synonyms can now be created against props that don’t exist yet, which are later added via add_property(). This commonly includes backrefs. (i.e. you can make synonyms for backrefs without worrying about the order of operations)(link)

        References: #919

      • [orm] fixed bug which could occur with polymorphic “union” mapper which falls back to “deferred” loading of inheriting tables(link)

      • [orm] the “columns” collection on a mapper/mapped class (i.e. ‘c’) is against the mapped table, not the select_table in the case of polymorphic “union” loading (this shouldn’t be noticeable).(link)

      • [orm] fixed fairly critical bug whereby the same instance could be listed more than once in the unitofwork.new collection; most typically reproduced when using a combination of inheriting mappers and ScopedSession.mapper, as the multiple __init__ calls per instance could save() the object with distinct _state objects(link)

      • [orm] added very rudimentary yielding iterator behavior to Query. Call query.yield_per(<number of rows>) and evaluate the Query in an iterative context; every collection of N rows will be packaged up and yielded. Use this method with extreme caution since it does not attempt to reconcile eagerly loaded collections across result batch boundaries, nor will it behave nicely if the same instance occurs in more than one batch. This means that an eagerly loaded collection will get cleared out if it’s referenced in more than one batch, and in all cases attributes will be overwritten on instances that occur in more than one batch.(link)

      • [orm] Fixed in-place set mutation operators for set collections and association proxied sets.(link)

        References: #920

      sql

      • [sql] Text type is properly exported now and does not raise a warning on DDL create; String types with no length only raise warnings during CREATE TABLE(link)

        References: #912

      • [sql] new UnicodeText type is added, to specify an encoded, unlengthed Text type(link)

      • [sql] fixed bug in union() so that select() statements which don’t derive from FromClause objects can be unioned(link)

      • [sql] changed name of TEXT to Text since its a “generic” type; TEXT name is deprecated until 0.5. The “upgrading” behavior of String to Text when no length is present is also deprecated until 0.5; will issue a warning when used for CREATE TABLE statements (String with no length for SQL expression purposes is still fine)(link)

        References: #912

      • [sql] generative select.order_by(None) / group_by(None) was not managing to reset order by/group by criterion, fixed(link)

        References: #924

      misc

      • [dialects] Fixed reflection of mysql empty string column defaults.(link)

      • [ext] ‘+’, ‘*’, ‘+=’ and ‘*=’ support for association proxied lists.(link)

      • [dialects] mssql - narrowed down the test for “date”/”datetime” in MSDate/ MSDateTime subclasses so that incoming “datetime” objects don’t get mis-interpreted as “date” objects and vice versa.(link)

        References: #923

      • [dialects] Fixed the missing call to subtype result processor for the PGArray type.(link)

        References: #913

      0.4.2

      Released: Wed Jan 02 2008

      orm

      • [orm] a major behavioral change to collection-based backrefs: they no longer trigger lazy loads ! “reverse” adds and removes are queued up and are merged with the collection when it is actually read from and loaded; but do not trigger a load beforehand. For users who have noticed this behavior, this should be much more convenient than using dynamic relations in some cases; for those who have not, you might notice your apps using a lot fewer queries than before in some situations.(link)

        References: #871

      • [orm] mutable primary key support is added. primary key columns can be changed freely, and the identity of the instance will change upon flush. In addition, update cascades of foreign key referents (primary key or not) along relations are supported, either in tandem with the database’s ON UPDATE CASCADE (required for DB’s like Postgres) or issued directly by the ORM in the form of UPDATE statements, by setting the flag “passive_cascades=False”.(link)

      • [orm] inheriting mappers now inherit the MapperExtensions of their parent mapper directly, so that all methods for a particular MapperExtension are called for subclasses as well. As always, any MapperExtension can return either EXT_CONTINUE to continue extension processing or EXT_STOP to stop processing. The order of mapper resolution is: <extensions declared on the classes mapper> <extensions declared on the classes’ parent mapper> <globally declared extensions>.

        Note that if you instantiate the same extension class separately and then apply it individually for two mappers in the same inheritance chain, the extension will be applied twice to the inheriting class, and each method will be called twice.

        To apply a mapper extension explicitly to each inheriting class but have each method called only once per operation, use the same instance of the extension for both mappers.

        (link)

        References: #490

      • [orm] MapperExtension.before_update() and after_update() are now called symmetrically; previously, an instance that had no modified column attributes (but had a relation() modification) could be called with before_update() but not after_update()(link)

        References: #907

      • [orm] columns which are missing from a Query’s select statement now get automatically deferred during load.(link)

      • [orm] mapped classes which extend “object” and do not provide an __init__() method will now raise TypeError if non-empty *args or **kwargs are present at instance construction time (and are not consumed by any extensions such as the scoped_session mapper), consistent with the behavior of normal Python classes(link)

        References: #908

      • [orm] fixed Query bug when filter_by() compares a relation against None(link)

        References: #899

      • [orm] improved support for pickling of mapped entities. Per-instance lazy/deferred/expired callables are now serializable so that they serialize and deserialize with _state.(link)

      • [orm] new synonym() behavior: an attribute will be placed on the mapped class, if one does not exist already, in all cases. if a property already exists on the class, the synonym will decorate the property with the appropriate comparison operators so that it can be used in column expressions just like any other mapped attribute (i.e. usable in filter(), etc.) the “proxy=True” flag is deprecated and no longer means anything. Additionally, the flag “map_column=True” will automatically generate a ColumnProperty corresponding to the name of the synonym, i.e.: ‘somename’:synonym(‘_somename’, map_column=True) will map the column named ‘somename’ to the attribute ‘_somename’. See the example in the mapper docs.(link)

        References: #801

      • [orm] Query.select_from() now replaces all existing FROM criterion with the given argument; the previous behavior of constructing a list of FROM clauses was generally not useful as is required filter() calls to create join criterion, and new tables introduced within filter() already add themselves to the FROM clause. The new behavior allows not just joins from the main table, but select statements as well. Filter criterion, order bys, eager load clauses will be “aliased” against the given statement.(link)

      • [orm] this month’s refactoring of attribute instrumentation changes the “copy-on-load” behavior we’ve had since midway through 0.3 with “copy-on-modify” in most cases. This takes a sizable chunk of latency out of load operations and overall does less work as only attributes which are actually modified get their “committed state” copied. Only “mutable scalar” attributes (i.e. a pickled object or other mutable item), the reason for the copy-on-load change in the first place, retain the old behavior.(link)

      • [orm] [attrname] a slight behavioral change to attributes is, del’ing an attribute does not cause the lazyloader of that attribute to fire off again; the “del” makes the effective value of the attribute “None”. To re-trigger the “loader” for an attribute, use session.expire(instance,).(link)

      • [orm] query.filter(SomeClass.somechild == None), when comparing a many-to-one property to None, properly generates “id IS NULL” including that the NULL is on the right side.(link)

      • [orm] query.order_by() takes into account aliased joins, i.e. query.join(‘orders’, aliased=True).order_by(Order.id)(link)

      • [orm] eagerload(), lazyload(), eagerload_all() take an optional second class-or-mapper argument, which will select the mapper to apply the option towards. This can select among other mappers which were added using add_entity().(link)

      • [orm] eagerloading will work with mappers added via add_entity().(link)

      • [orm] added “cascade delete” behavior to “dynamic” relations just like that of regular relations. if passive_deletes flag (also just added) is not set, a delete of the parent item will trigger a full load of the child items so that they can be deleted or updated accordingly.(link)

      • [orm] also with dynamic, implemented correct count() behavior as well as other helper methods.(link)

      • [orm] fix to cascades on polymorphic relations, such that cascades from an object to a polymorphic collection continue cascading along the set of attributes specific to each element in the collection.(link)

      • [orm] query.get() and query.load() do not take existing filter or other criterion into account; these methods always look up the given id in the database or return the current instance from the identity map, disregarding any existing filter, join, group_by or other criterion which has been configured.(link)

        References: #893

      • [orm] added support for version_id_col in conjunction with inheriting mappers. version_id_col is typically set on the base mapper in an inheritance relationship where it takes effect for all inheriting mappers.(link)

        References: #883

      • [orm] relaxed rules on column_property() expressions having labels; any ColumnElement is accepted now, as the compiler auto-labels non-labeled ColumnElements now. a selectable, like a select() statement, still requires conversion to ColumnElement via as_scalar() or label().(link)

      • [orm] fixed backref bug where you could not del instance.attr if attr was None(link)

      • [orm] several ORM attributes have been removed or made private: mapper.get_attr_by_column(), mapper.set_attr_by_column(), mapper.pks_by_table, mapper.cascade_callable(), MapperProperty.cascade_callable(), mapper.canload(), mapper.save_obj(), mapper.delete_obj(), mapper._mapper_registry, attributes.AttributeManager(link)

      • [orm] Assigning an incompatible collection type to a relation attribute now raises TypeError instead of sqlalchemy’s ArgumentError.(link)

      • [orm] Bulk assignment of a MappedCollection now raises an error if a key in the incoming dictionary does not match the key that the collection’s keyfunc would use for that value.(link)

        References: #886

      • [orm] [newval2] [newval1] Custom collections can now specify a @converter method to translate objects used in “bulk” assignment into a stream of values, as in:

        obj.col =
        # or
        obj.dictcol = {'foo': newval1, 'bar': newval2}

        The MappedCollection uses this hook to ensure that incoming key/value pairs are sane from the collection’s perspective.

        (link)

      • [orm] fixed endless loop issue when using lazy=”dynamic” on both sides of a bi-directional relationship(link)

        References: #872

      • [orm] more fixes to the LIMIT/OFFSET aliasing applied with Query + eagerloads, in this case when mapped against a select statement(link)

        References: #904

      • [orm] fix to self-referential eager loading such that if the same mapped instance appears in two or more distinct sets of columns in the same result set, its eagerly loaded collection will be populated regardless of whether or not all of the rows contain a set of “eager” columns for that collection. this would also show up as a KeyError when fetching results with join_depth turned on.(link)

      • [orm] fixed bug where Query would not apply a subquery to the SQL when LIMIT was used in conjunction with an inheriting mapper where the eager loader was only in the parent mapper.(link)

      • [orm] clarified the error message which occurs when you try to update() an instance with the same identity key as an instance already present in the session.(link)

      • [orm] some clarifications and fixes to merge(instance, dont_load=True). fixed bug where lazy loaders were getting disabled on returned instances. Also, we currently do not support merging an instance which has uncommitted changes on it, in the case that dont_load=True is used....this will now raise an error. This is due to complexities in merging the “committed state” of the given instance to correctly correspond to the newly copied instance, as well as other modified state. Since the use case for dont_load=True is caching, the given instances shouldn’t have any uncommitted changes on them anyway. We also copy the instances over without using any events now, so that the ‘dirty’ list on the new session remains unaffected.(link)

      • [orm] fixed bug which could arise when using session.begin_nested() in conjunction with more than one level deep of enclosing session.begin() statements(link)

      • [orm] fixed session.refresh() with instance that has custom entity_name(link)

        References: #914

      sql

      • [sql] generic functions ! we introduce a database of known SQL functions, such as current_timestamp, coalesce, and create explicit function objects representing them. These objects have constrained argument lists, are type aware, and can compile in a dialect-specific fashion. So saying func.char_length(“foo”, “bar”) raises an error (too many args), func.coalesce(datetime.date(2007, 10, 5), datetime.date(2005, 10, 15)) knows that its return type is a Date. We only have a few functions represented so far but will continue to add to the system(link)

        References: #615

      • [sql] auto-reconnect support improved; a Connection can now automatically reconnect after its underlying connection is invalidated, without needing to connect() again from the engine. This allows an ORM session bound to a single Connection to not need a reconnect. Open transactions on the Connection must be rolled back after an invalidation of the underlying connection else an error is raised. Also fixed bug where disconnect detect was not being called for cursor(), rollback(), or commit().(link)

      • [sql] added new flag to String and create_engine(), assert_unicode=(True|False|’warn’|None). Defaults to False or None on create_engine() and String, ‘warn’ on the Unicode type. When True, results in all unicode conversion operations raising an exception when a non-unicode bytestring is passed as a bind parameter. ‘warn’ results in a warning. It is strongly advised that all unicode-aware applications make proper use of Python unicode objects (i.e. u’hello’ and not ‘hello’) so that data round trips accurately.(link)

      • [sql] generation of “unique” bind parameters has been simplified to use the same “unique identifier” mechanisms as everything else. This doesn’t affect user code, except any code that might have been hardcoded against the generated names. Generated bind params now have the form “<paramname>_<num>”, whereas before only the second bind of the same name would have this form.(link)

      • [sql] select().as_scalar() will raise an exception if the select does not have exactly one expression in its columns clause.(link)

      • [sql] bindparam() objects themselves can be used as keys for execute(), i.e. statement.execute({bind1:’foo’, bind2:’bar’})(link)

      • [sql] added new methods to TypeDecorator, process_bind_param() and process_result_value(), which automatically take advantage of the processing of the underlying type. Ideal for using with Unicode or Pickletype. TypeDecorator should now be the primary way to augment the behavior of any existing type including other TypeDecorator subclasses such as PickleType.(link)

      • [sql] selectables (and others) will issue a warning when two columns in their exported columns collection conflict based on name.(link)

      • [sql] tables with schemas can still be used in sqlite, firebird, schema name just gets dropped(link)

        References: #890

      • [sql] changed the various “literal” generation functions to use an anonymous bind parameter. not much changes here except their labels now look like ”:param_1”, ”:param_2” instead of ”:literal”(link)

      • [sql] column labels in the form “tablename.columname”, i.e. with a dot, are now supported.(link)

      • [sql] from_obj keyword argument to select() can be a scalar or a list.(link)

      firebird

      • [firebird] [backend] does properly reflect domains (partially fixing) and PassiveDefaults(link)

        References: #410

      • [firebird] [3562] [backend] reverted to use default poolclass (was set to SingletonThreadPool in 0.4.0 for test purposes)(link)

      • [firebird] [backend] map func.length() to ‘char_length’ (easily overridable with the UDF ‘strlen’ on old versions of Firebird)(link)

      misc

      • [dialects] sqlite SLDate type will not erroneously render “microseconds” portion of a datetime or time object.(link)

      • [dialects]

        oracle
        • added disconnect detection support for Oracle
        • some cleanup to binary/raw types so that cx_oracle.LOB is detected on an ad-hoc basis
        (link)

        References: #902

      • [dialects]

        MSSQL
        • PyODBC no longer has a global “set nocount on”.
        • Fix non-identity integer PKs on autload
        • Better support for convert_unicode
        • Less strict date conversion for pyodbc/adodbapi
        • Schema-qualified tables / autoload
        (link)

        References: #824, #839, #842, #901

      0.4.1

      Released: Sun Nov 18 2007

      orm

      • [orm] eager loading with LIMIT/OFFSET applied no longer adds the primary table joined to a limited subquery of itself; the eager loads now join directly to the subquery which also provides the primary table’s columns to the result set. This eliminates a JOIN from all eager loads with LIMIT/OFFSET.(link)

        References: #843

      • [orm] session.refresh() and session.expire() now support an additional argument “attribute_names”, a list of individual attribute keynames to be refreshed or expired, allowing partial reloads of attributes on an already-loaded instance.(link)

        References: #802

      • [orm] added op() operator to instrumented attributes; i.e. User.name.op(‘ilike’)(‘%somename%’)(link)

        References: #767

      • [orm] Mapped classes may now define __eq__, __hash__, and __nonzero__ methods with arbitrary semantics. The orm now handles all mapped instances on an identity-only basis. (e.g. ‘is’ vs ‘==’)(link)

        References: #676

      • [orm] the “properties” accessor on Mapper is removed; it now throws an informative exception explaining the usage of mapper.get_property() and mapper.iterate_properties(link)

      • [orm] added having() method to Query, applies HAVING to the generated statement in the same way as filter() appends to the WHERE clause.(link)

      • [orm] The behavior of query.options() is now fully based on paths, i.e. an option such as eagerload_all(‘x.y.z.y.x’) will apply eagerloading to only those paths, i.e. and not ‘x.y.x’; eagerload(‘children.children’) applies only to exactly two-levels deep, etc.(link)

        References: #777

      • [orm] PickleType will compare using == when set up with mutable=False, and not the is operator. To use is or any other comparator, send in a custom comparison function using PickleType(comparator=my_custom_comparator).(link)

      • [orm] query doesn’t throw an error if you use distinct() and an order_by() containing UnaryExpressions (or other) together(link)

        References: #848

      • [orm] order_by() expressions from joined tables are properly added to columns clause when using distinct()(link)

        References: #786

      • [orm] fixed error where Query.add_column() would not accept a class-bound attribute as an argument; Query also raises an error if an invalid argument was sent to add_column() (at instances() time)(link)

        References: #858

      • [orm] added a little more checking for garbage-collection dereferences in InstanceState.__cleanup() to reduce “gc ignored” errors on app shutdown(link)

      • [orm] The session API has been solidified:(link)

      • [orm] It’s an error to session.save() an object which is already persistent(link)

        References: #840

      • [orm] It’s an error to session.delete() an object which is not persistent.(link)

      • [orm] session.update() and session.delete() raise an error when updating or deleting an instance that is already in the session with a different identity.(link)

      • [orm] The session checks more carefully when determining “object X already in another session”; e.g. if you pickle a series of objects and unpickle (i.e. as in a Pylons HTTP session or similar), they can go into a new session without any conflict(link)

      • [orm] merge() includes a keyword argument “dont_load=True”. setting this flag will cause the merge operation to not load any data from the database in response to incoming detached objects, and will accept the incoming detached object as though it were already present in that session. Use this to merge detached objects from external caching systems into the session.(link)

      • [orm] Deferred column attributes no longer trigger a load operation when the attribute is assigned to. In those cases, the newly assigned value will be present in the flushes’ UPDATE statement unconditionally.(link)

      • [orm] Fixed a truncation error when re-assigning a subset of a collection (obj.relation = obj.relation[1:])(link)

        References: #834

      • [orm] De-cruftified backref configuration code, backrefs which step on existing properties now raise an error(link)

        References: #832

      • [orm] Improved behavior of add_property() etc., fixed involving synonym/deferred.(link)

        References: #831

      • [orm] Fixed clear_mappers() behavior to better clean up after itself.(link)

      • [orm] Fix to “row switch” behavior, i.e. when an INSERT/DELETE is combined into a single UPDATE; many-to-many relations on the parent object update properly.(link)

        References: #841

      • [orm] Fixed __hash__ for association proxy- these collections are unhashable, just like their mutable Python counterparts.(link)

      • [orm] Added proxying of save_or_update, __contains__ and __iter__ methods for scoped sessions.(link)

      • [orm] fixed very hard-to-reproduce issue where by the FROM clause of Query could get polluted by certain generative calls(link)

        References: #852

      sql

      • [sql] the “shortname” keyword parameter on bindparam() has been deprecated.(link)

      • [sql] Added contains operator (generates a “LIKE %<other>%” clause).(link)

      • [sql] anonymous column expressions are automatically labeled. e.g. select([x* 5]) produces “SELECT x * 5 AS anon_1”. This allows the labelname to be present in the cursor.description which can then be appropriately matched to result-column processing rules. (we can’t reliably use positional tracking for result-column matches since text() expressions may represent multiple columns).(link)

      • [sql] operator overloading is now controlled by TypeEngine objects - the one built-in operator overload so far is String types overloading ‘+’ to be the string concatenation operator. User-defined types can also define their own operator overloading by overriding the adapt_operator(self, op) method.(link)

      • [sql] untyped bind parameters on the right side of a binary expression will be assigned the type of the left side of the operation, to better enable the appropriate bind parameter processing to take effect(link)

        References: #819

      • [sql] Removed regular expression step from most statement compilations. Also fixes(link)

        References: #833

      • [sql] Fixed empty (zero column) sqlite inserts, allowing inserts on autoincrementing single column tables.(link)

      • [sql] Fixed expression translation of text() clauses; this repairs various ORM scenarios where literal text is used for SQL expressions(link)

      • [sql] Removed ClauseParameters object; compiled.params returns a regular dictionary now, as well as result.last_inserted_params() / last_updated_params().(link)

      • [sql] Fixed INSERT statements w.r.t. primary key columns that have SQL-expression based default generators on them; SQL expression executes inline as normal but will not trigger a “postfetch” condition for the column, for those DB’s who provide it via cursor.lastrowid(link)

      • [sql] func. objects can be pickled/unpickled(link)

        References: #844

      • [sql] rewrote and simplified the system used to “target” columns across selectable expressions. On the SQL side this is represented by the “corresponding_column()” method. This method is used heavily by the ORM to “adapt” elements of an expression to similar, aliased expressions, as well as to target result set columns originally bound to a table or selectable to an aliased, “corresponding” expression. The new rewrite features completely consistent and accurate behavior.(link)

      • [sql] Added a field (“info”) for storing arbitrary data on schema items(link)

        References: #573

      • [sql] The “properties” collection on Connections has been renamed “info” to match schema’s writable collections. Access is still available via the “properties” name until 0.5.(link)

      • [sql] fixed the close() method on Transaction when using strategy=’threadlocal’(link)

      • [sql] fix to compiled bind parameters to not mistakenly populate None(link)

        References: #853

      • [sql] <Engine|Connection>._execute_clauseelement becomes a public method Connectable.execute_clauseelement(link)

      misc

      • [dialects] Added experimental support for MaxDB (versions >= 7.6.03.007 only).(link)

      • [dialects] oracle will now reflect “DATE” as an OracleDateTime column, not OracleDate(link)

      • [dialects] added awareness of schema name in oracle table_names() function, fixes metadata.reflect(schema=’someschema’)(link)

        References: #847

      • [dialects] MSSQL anonymous labels for selection of functions made deterministic(link)

      • [dialects] sqlite will reflect “DECIMAL” as a numeric column.(link)

      • [dialects] Made access dao detection more reliable(link)

        References: #828

      • [dialects] Renamed the Dialect attribute ‘preexecute_sequences’ to ‘preexecute_pk_sequences’. An attribute porxy is in place for out-of-tree dialects using the old name.(link)

      • [dialects] Added test coverage for unknown type reflection. Fixed sqlite/mysql handling of type reflection for unknown types.(link)

      • [dialects] Added REAL for mysql dialect (for folks exploiting the REAL_AS_FLOAT sql mode).(link)

      • [dialects] mysql Float, MSFloat and MSDouble constructed without arguments now produce no-argument DDL, e.g.’FLOAT’.(link)

      • [misc] Removed unused util.hash().(link)

      0.4.0

      Released: Wed Oct 17 2007
      • (see 0.4.0beta1 for the start of major changes against 0.3, as well as http://www.sqlalchemy.org/trac/wiki/WhatsNewIn04 )(link)

      • Added initial Sybase support (mxODBC so far)(link)

        References: #785

      • Added partial index support for PostgreSQL. Use the postgres_where keyword on the Index.(link)

      • string-based query param parsing/config file parser understands wider range of string values for booleans(link)

        References: #817

      • backref remove object operation doesn’t fail if the other-side collection doesn’t contain the item, supports noload collections(link)

        References: #813

      • removed __len__ from “dynamic” collection as it would require issuing a SQL “count()” operation, thus forcing all list evaluations to issue redundant SQL(link)

        References: #818

      • inline optimizations added to locate_dirty() which can greatly speed up repeated calls to flush(), as occurs with autoflush=True(link)

        References: #816

      • The IdentifierPreprarer’s _requires_quotes test is now regex based. Any out-of-tree dialects that provide custom sets of legal_characters or illegal_initial_characters will need to move to regexes or override _requires_quotes.(link)

      • Firebird has supports_sane_rowcount and supports_sane_multi_rowcount set to False due to ticket #370 (right way).(link)

      • Improvements and fixes on Firebird reflection:
        • FBDialect now mimics OracleDialect, regarding case-sensitivity of TABLE and COLUMN names (see ‘case_sensitive remotion’ topic on this current file).
        • FBDialect.table_names() doesn’t bring system tables (ticket:796).
        • FB now reflects Column’s nullable property correctly.
        (link)

      • Fixed SQL compiler’s awareness of top-level column labels as used in result-set processing; nested selects which contain the same column names don’t affect the result or conflict with result-column metadata.(link)

      • query.get() and related functions (like many-to-one lazyloading) use compile-time-aliased bind parameter names, to prevent name conflicts with bind parameters that already exist in the mapped selectable.(link)

      • Fixed three- and multi-level select and deferred inheritance loading (i.e. abc inheritance with no select_table).(link)

        References: #795

      • Ident passed to id_chooser in shard.py always a list.(link)

      • The no-arg ResultProxy._row_processor() is now the class attribute _process_row.(link)

      • Added support for returning values from inserts and updates for PostgreSQL 8.2+.(link)

        References: #797

      • PG reflection, upon seeing the default schema name being used explicitly as the “schema” argument in a Table, will assume that this is the user’s desired convention, and will explicitly set the “schema” argument in foreign-key-related reflected tables, thus making them match only with Table constructors that also use the explicit “schema” argument (even though its the default schema). In other words, SA assumes the user is being consistent in this usage.(link)

      • fixed sqlite reflection of BOOL/BOOLEAN(link)

        References: #808

      • Added support for UPDATE with LIMIT on mysql.(link)

      • null foreign key on a m2o doesn’t trigger a lazyload(link)

        References: #803

      • oracle does not implicitly convert to unicode for non-typed result sets (i.e. when no TypeEngine/String/Unicode type is even being used; previously it was detecting DBAPI types and converting regardless). should fix(link)

        References: #800

      • fix to anonymous label generation of long table/column names(link)

        References: #806

      • Firebird dialect now uses SingletonThreadPool as poolclass.(link)

      • Firebird now uses dialect.preparer to format sequences names(link)

      • Fixed breakage with postgres and multiple two-phase transactions. Two-phase commits and rollbacks didn’t automatically end up with a new transaction as the usual dbapi commits/rollbacks do.(link)

        References: #810

      • Added an option to the _ScopedExt mapper extension to not automatically save new objects to session on object initialization.(link)

      • fixed Oracle non-ansi join syntax(link)

      • PickleType and Interval types (on db not supporting it natively) are now slightly faster.(link)

      • Added Float and Time types to Firebird (FBFloat and FBTime). Fixed BLOB SUB_TYPE for TEXT and Binary types.(link)

      • Changed the API for the in_ operator. in_() now accepts a single argument that is a sequence of values or a selectable. The old API of passing in values as varargs still works but is deprecated.(link)

      0.4.0beta6

      Released: Thu Sep 27 2007
      • The Session identity map is now weak referencing by default, use weak_identity_map=False to use a regular dict. The weak dict we are using is customized to detect instances which are “dirty” and maintain a temporary strong reference to those instances until changes are flushed.(link)

      • Mapper compilation has been reorganized such that most compilation occurs upon mapper construction. This allows us to have fewer calls to mapper.compile() and also to allow class-based properties to force a compilation (i.e. User.addresses == 7 will compile all mappers; this is). The only caveat here is that an inheriting mapper now looks for its inherited mapper upon construction; so mappers within inheritance relationships need to be constructed in inheritance order (which should be the normal case anyway).(link)

        References: #758

      • added “FETCH” to the keywords detected by Postgres to indicate a result-row holding statement (i.e. in addition to “SELECT”).(link)

      • Added full list of SQLite reserved keywords so that they get escaped properly.(link)

      • Tightened up the relationship between the Query’s generation of “eager load” aliases, and Query.instances() which actually grabs the eagerly loaded rows. If the aliases were not specifically generated for that statement by EagerLoader, the EagerLoader will not take effect when the rows are fetched. This prevents columns from being grabbed accidentally as being part of an eager load when they were not meant for such, which can happen with textual SQL as well as some inheritance situations. It’s particularly important since the “anonymous aliasing” of columns uses simple integer counts now to generate labels.(link)

      • Removed “parameters” argument from clauseelement.compile(), replaced with “column_keys”. The parameters sent to execute() only interact with the insert/update statement compilation process in terms of the column names present but not the values for those columns. Produces more consistent execute/executemany behavior, simplifies things a bit internally.(link)

      • Added ‘comparator’ keyword argument to PickleType. By default, “mutable” PickleType does a “deep compare” of objects using their dumps() representation. But this doesn’t work for dictionaries. Pickled objects which provide an adequate __eq__() implementation can be set up with “PickleType(comparator=operator.eq)”(link)

        References: #560

      • Added session.is_modified(obj) method; performs the same “history” comparison operation as occurs within a flush operation; setting include_collections=False gives the same result as is used when the flush determines whether or not to issue an UPDATE for the instance’s row.(link)

      • Added “schema” argument to Sequence; use this with Postgres /Oracle when the sequence is located in an alternate schema. Implements part of, should fix.(link)

        References: #584, #761

      • Fixed reflection of the empty string for mysql enums.(link)

      • Changed MySQL dialect to use the older LIMIT <offset>, <limit> syntax instead of LIMIT <l> OFFSET <o> for folks using 3.23.(link)

        References: #794

      • Added ‘passive_deletes=”all”’ flag to relation(), disables all nulling-out of foreign key attributes during a flush where the parent object is deleted.(link)

      • Column defaults and onupdates, executing inline, will add parenthesis for subqueries and other parenthesis-requiring expressions(link)

      • The behavior of String/Unicode types regarding that they auto-convert to TEXT/CLOB when no length is present now occurs only for an exact type of String or Unicode with no arguments. If you use VARCHAR or NCHAR (subclasses of String/Unicode) with no length, they will be interpreted by the dialect as VARCHAR/NCHAR; no “magic” conversion happens there. This is less surprising behavior and in particular this helps Oracle keep string-based bind parameters as VARCHARs and not CLOBs.(link)

        References: #793

      • Fixes to ShardedSession to work with deferred columns.(link)

        References: #771

      • User-defined shard_chooser() function must accept “clause=None” argument; this is the ClauseElement passed to session.execute(statement) and can be used to determine correct shard id (since execute() doesn’t take an instance.)(link)

      • Adjusted operator precedence of NOT to match ‘==’ and others, so that ~(x <operator> y) produces NOT (x <op> y), which is better compatible with older MySQL versions.. This doesn’t apply to “~(x==y)” as it does in 0.3 since ~(x==y) compiles to “x != y”, but still applies to operators like BETWEEN.(link)

        References: #764

      • Other tickets:,,.(link)

        References: #757, #768, #779, #728

      0.4.0beta5

      no release date
      • Connection pool fixes; the better performance of beta4 remains but fixes “connection overflow” and other bugs which were present (like).(link)

        References: #754

      • Fixed bugs in determining proper sync clauses from custom inherit conditions.(link)

        References: #769

      • Extended ‘engine_from_config’ coercion for QueuePool size / overflow.(link)

        References: #763

      • mysql views can be reflected again.(link)

        References: #748

      • AssociationProxy can now take custom getters and setters.(link)

      • Fixed malfunctioning BETWEEN in orm queries.(link)

      • Fixed OrderedProperties pickling(link)

        References: #762

      • SQL-expression defaults and sequences now execute “inline” for all non-primary key columns during an INSERT or UPDATE, and for all columns during an executemany()-style call. inline=True flag on any insert/update statement also forces the same behavior with a single execute(). result.postfetch_cols() is a collection of columns for which the previous single insert or update statement contained a SQL-side default expression.(link)

      • Fixed PG executemany() behavior.(link)

        References: #759

      • postgres reflects tables with autoincrement=False for primary key columns which have no defaults.(link)

      • postgres no longer wraps executemany() with individual execute() calls, instead favoring performance. “rowcount”/”concurrency” checks with deleted items (which use executemany) are disabled with PG since psycopg2 does not report proper rowcount for executemany().(link)

      • [tickets] [fixed] (link)

        References: #742

      • [tickets] [fixed] (link)

        References: #748

      • [tickets] [fixed] (link)

        References: #760

      • [tickets] [fixed] (link)

        References: #762

      • [tickets] [fixed] (link)

        References: #763

      0.4.0beta4

      Released: Wed Aug 22 2007
      • Tidied up what ends up in your namespace when you ‘from sqlalchemy import *’:(link)

      • ‘table’ and ‘column’ are no longer imported. They remain available by direct reference (as in ‘sql.table’ and ‘sql.column’) or a glob import from the sql package. It was too easy to accidentally use a sql.expressions.table instead of schema.Table when just starting out with SQLAlchemy, likewise column.(link)

      • Internal-ish classes like ClauseElement, FromClause, NullTypeEngine, etc., are also no longer imported into your namespace(link)

      • The ‘Smallinteger’ compatiblity name (small i!) is no longer imported, but remains in schema.py for now. SmallInteger (big I!) is still imported.(link)

      • The connection pool uses a “threadlocal” strategy internally to return the same connection already bound to a thread, for “contextual” connections; these are the connections used when you do a “connectionless” execution like insert().execute(). This is like a “partial” version of the “threadlocal” engine strategy but without the thread-local transaction part of it. We’re hoping it reduces connection pool overhead as well as database usage. However, if it proves to impact stability in a negative way, we’ll roll it right back.(link)

      • Fix to bind param processing such that “False” values (like blank strings) still get processed/encoded.(link)

      • Fix to select() “generative” behavior, such that calling column(), select_from(), correlate(), and with_prefix() does not modify the original select object(link)

        References: #752

      • Added a “legacy” adapter to types, such that user-defined TypeEngine and TypeDecorator classes which define convert_bind_param() and/or convert_result_value() will continue to function. Also supports calling the super() version of those methods.(link)

      • Added session.prune(), trims away instances cached in a session that are no longer referenced elsewhere. (A utility for strong-ref identity maps).(link)

      • Added close() method to Transaction. Closes out a transaction using rollback if it’s the outermost transaction, otherwise just ends without affecting the outer transaction.(link)

      • Transactional and non-transactional Session integrates better with bound connection; a close() will ensure that connection transactional state is the same as that which existed on it before being bound to the Session.(link)

      • Modified SQL operator functions to be module-level operators, allowing SQL expressions to be pickleable.(link)

        References: #735

      • Small adjustment to mapper class.__init__ to allow for Py2.6 object.__init__() behavior.(link)

      • Fixed ‘prefix’ argument for select()(link)

      • Connection.begin() no longer accepts nested=True, this logic is now all in begin_nested().(link)

      • Fixes to new “dynamic” relation loader involving cascades(link)

      • [tickets] [fixed] (link)

        References: #735

      • [tickets] [fixed] (link)

        References: #752

      0.4.0beta3

      Released: Thu Aug 16 2007
      • SQL types optimization:(link)

      • New performance tests show a combined mass-insert/mass-select test as having 68% fewer function calls than the same test run against 0.3.(link)

      • General performance improvement of result set iteration is around 10-20%.(link)

      • In types.AbstractType, convert_bind_param() and convert_result_value() have migrated to callable-returning bind_processor() and result_processor() methods. If no callable is returned, no pre/post processing function is called.(link)

      • Hooks added throughout base/sql/defaults to optimize the calling of bind aram/result processors so that method call overhead is minimized.(link)

      • Support added for executemany() scenarios such that unneeded “last row id” logic doesn’t kick in, parameters aren’t excessively traversed.(link)

      • Added ‘inherit_foreign_keys’ arg to mapper().(link)

      • Added support for string date passthrough in sqlite.(link)

      • [tickets] [fixed] (link)

        References: #738

      • [tickets] [fixed] (link)

        References: #739

      • [tickets] [fixed] (link)

        References: #743

      • [tickets] [fixed] (link)

        References: #744

      0.4.0beta2

      Released: Tue Aug 14 2007

      oracle

      • [oracle] [improvements.] Auto-commit after LOAD DATA INFILE for mysql.(link)

      • [oracle] [improvements.] A rudimental SessionExtension class has been added, allowing user-defined functionality to take place at flush(), commit(), and rollback() boundaries.(link)

      • [oracle] [improvements.] Added engine_from_config() function for helping to create_engine() from an .ini style config.(link)

      • [oracle] [improvements.] base_mapper() becomes a plain attribute.(link)

      • [oracle] [improvements.] session.execute() and scalar() can search for a Table with which to bind from using the given ClauseElement.(link)

      • [oracle] [improvements.] Session automatically extrapolates tables from mappers with binds, also uses base_mapper so that inheritance hierarchies bind automatically.(link)

      • [oracle] [improvements.] Moved ClauseVisitor traversal back to inlined non-recursive.(link)

      misc

      0.4.0beta1

      Released: Sun Aug 12 2007

      orm

      • [orm] Speed! Along with recent speedups to ResultProxy, total number of function calls significantly reduced for large loads.(link)

      • [orm] test/perf/masseagerload.py reports 0.4 as having the fewest number of function calls across all SA versions (0.1, 0.2, and 0.3).(link)

      • [orm] New collection_class api and implementation. Collections are now instrumented via decorations rather than proxying. You can now have collections that manage their own membership, and your class instance will be directly exposed on the relation property. The changes are transparent for most users.(link)

        References: #213

      • [orm] InstrumentedList (as it was) is removed, and relation properties no longer have ‘clear()’, ‘.data’, or any other added methods beyond those provided by the collection type. You are free, of course, to add them to a custom class.(link)

      • [orm] __setitem__-like assignments now fire remove events for the existing value, if any.(link)

      • [orm] dict-likes used as collection classes no longer need to change __iter__ semantics- itervalues() is used by default instead. This is a backwards incompatible change.(link)

      • [orm] Subclassing dict for a mapped collection is no longer needed in most cases. orm.collections provides canned implementations that key objects by a specified column or a custom function of your choice.(link)

      • [orm] Collection assignment now requires a compatible type- assigning None to clear a collection or assigning a list to a dict collection will now raise an argument error.(link)

      • [orm] AttributeExtension moved to interfaces, and .delete is now .remove The event method signature has also been swapped around.(link)

      • [orm] Major overhaul for Query:(link)

      • [orm] All selectXXX methods are deprecated. Generative methods are now the standard way to do things, i.e. filter(), filter_by(), all(), one(), etc. Deprecated methods are docstring’ed with their new replacements.(link)

      • [orm] Class-level properties are now usable as query elements... no more ‘.c.’! “Class.c.propname” is now superceded by “Class.propname”. All clause operators are supported, as well as higher level operators such as Class.prop==<some instance> for scalar attributes, Class.prop.contains(<some instance>) and Class.prop.any(<some expression>) for collection-based attributes (all are also negatable). Table-based column expressions as well as columns mounted on mapped classes via ‘c’ are of course still fully available and can be freely mixed with the new attributes.(link)

        References: #643

      • [orm] Removed ancient query.select_by_attributename() capability.(link)

      • [orm] The aliasing logic used by eager loading has been generalized, so that it also adds full automatic aliasing support to Query. It’s no longer necessary to create an explicit Alias to join to the same tables multiple times; even for self-referential relationships.

        • join() and outerjoin() take arguments “aliased=True”. Yhis causes their joins to be built on aliased tables; subsequent calls to filter() and filter_by() will translate all table expressions (yes, real expressions using the original mapped Table) to be that of the Alias for the duration of that join() (i.e. until reset_joinpoint() or another join() is called).
        • join() and outerjoin() take arguments “id=<somestring>”. When used with “aliased=True”, the id can be referenced by add_entity(cls, id=<somestring>) so that you can select the joined instances even if they’re from an alias.
        • join() and outerjoin() now work with self-referential relationships! Using “aliased=True”, you can join as many levels deep as desired, i.e. query.join([‘children’, ‘children’], aliased=True); filter criterion will be against the rightmost joined table
        (link)

      • [orm] Added query.populate_existing(), marks the query to reload all attributes and collections of all instances touched in the query, including eagerly-loaded entities.(link)

        References: #660

      • [orm] Added eagerload_all(), allows eagerload_all(‘x.y.z’) to specify eager loading of all properties in the given path.(link)

      • [orm] Major overhaul for Session:(link)

      • [orm] New function which “configures” a session called “sessionmaker()”. Send various keyword arguments to this function once, returns a new class which creates a Session against that stereotype.(link)

      • [orm] SessionTransaction removed from “public” API. You now can call begin()/ commit()/rollback() on the Session itself.(link)

      • [orm] Session also supports SAVEPOINT transactions; call begin_nested().(link)

      • [orm] Session supports two-phase commit behavior when vertically or horizontally partitioning (i.e., using more than one engine). Use twophase=True.(link)

      • [orm] Session flag “transactional=True” produces a session which always places itself into a transaction when first used. Upon commit(), rollback() or close(), the transaction ends; but begins again on the next usage.(link)

      • [orm] Session supports “autoflush=True”. This issues a flush() before each query. Use in conjunction with transactional, and you can just save()/update() and then query, the new objects will be there. Use commit() at the end (or flush() if non-transactional) to flush remaining changes.(link)

      • [orm] New scoped_session() function replaces SessionContext and assignmapper. Builds onto “sessionmaker()” concept to produce a class whos Session() construction returns the thread-local session. Or, call all Session methods as class methods, i.e. Session.save(foo); Session.commit(). just like the old “objectstore” days.(link)

      • [orm] Added new “binds” argument to Session to support configuration of multiple binds with sessionmaker() function.(link)

      • [orm] A rudimental SessionExtension class has been added, allowing user-defined functionality to take place at flush(), commit(), and rollback() boundaries.(link)

      • [orm] Query-based relation()s available with dynamic_loader(). This is a writable collection (supporting append() and remove()) which is also a live Query object when accessed for reads. Ideal for dealing with very large collections where only partial loading is desired.(link)

      • [orm] flush()-embedded inline INSERT/UPDATE expressions. Assign any SQL expression, like “sometable.c.column + 1”, to an instance’s attribute. Upon flush(), the mapper detects the expression and embeds it directly in the INSERT or UPDATE statement; the attribute gets deferred on the instance so it loads the new value the next time you access it.(link)

      • [orm] A rudimental sharding (horizontal scaling) system is introduced. This system uses a modified Session which can distribute read and write operations among multiple databases, based on user-defined functions defining the “sharding strategy”. Instances and their dependents can be distributed and queried among multiple databases based on attribute values, round-robin approaches or any other user-defined system.(link)

        References: #618

      • [orm] Eager loading has been enhanced to allow even more joins in more places. It now functions at any arbitrary depth along self-referential and cyclical structures. When loading cyclical structures, specify “join_depth” on relation() indicating how many times you’d like the table to join to itself; each level gets a distinct table alias. The alias names themselves are generated at compile time using a simple counting scheme now and are a lot easier on the eyes, as well as of course completely deterministic.(link)

        References: #659

      • [orm] Added composite column properties. This allows you to create a type which is represented by more than one column, when using the ORM. Objects of the new type are fully functional in query expressions, comparisons, query.get() clauses, etc. and act as though they are regular single-column scalars... except they’re not! Use the function composite(cls, *columns) inside of the mapper’s “properties” dict, and instances of cls will be created/mapped to a single attribute, comprised of the values correponding to *columns.(link)

        References: #211

      • [orm] Improved support for custom column_property() attributes which feature correlated subqueries, works better with eager loading now.(link)

      • [orm] Primary key “collapse” behavior; the mapper will analyze all columns in its given selectable for primary key “equivalence”, that is, columns which are equivalent via foreign key relationship or via an explicit inherit_condition. primarily for joined-table inheritance scenarios where different named PK columns in inheriting tables should “collapse” into a single-valued (or fewer-valued) primary key. Fixes things like.(link)

        References: #611

      • [orm] Joined-table inheritance will now generate the primary key columns of all inherited classes against the root table of the join only. This implies that each row in the root table is distinct to a single instance. If for some rare reason this is not desireable, explicit primary_key settings on individual mappers will override it.(link)

      • [orm] When “polymorphic” flags are used with joined-table or single-table inheritance, all identity keys are generated against the root class of the inheritance hierarchy; this allows query.get() to work polymorphically using the same caching semantics as a non-polymorphic get. Note that this currently does not work with concrete inheritance.(link)

      • [orm] Secondary inheritance loading: polymorphic mappers can be constructed without a select_table argument. inheriting mappers whose tables were not represented in the initial load will issue a second SQL query immediately, once per instance (i.e. not very efficient for large lists), in order to load the remaining columns.(link)

      • [orm] Secondary inheritance loading can also move its second query into a column-level “deferred” load, via the “polymorphic_fetch” argument, which can be set to ‘select’ or ‘deferred’(link)

      • [orm] It’s now possible to map only a subset of available selectable columns onto mapper properties, using include_columns/exclude_columns..(link)

        References: #696

      • [orm] Added undefer_group() MapperOption, sets a set of “deferred” columns joined by a “group” to load as “undeferred”.(link)

      • [orm] Rewrite of the “deterministic alias name” logic to be part of the SQL layer, produces much simpler alias and label names more in the style of Hibernate(link)

      sql

      • [sql] Speed! Clause compilation as well as the mechanics of SQL constructs have been streamlined and simplified to a signficant degree, for a 20-30% improvement of the statement construction/compilation overhead of 0.3.(link)

      • [sql] All “type” keyword arguments, such as those to bindparam(), column(), Column(), and func.<something>(), renamed to “type_”. Those objects still name their “type” attribute as “type”.(link)

      • [sql] case_sensitive=(True|False) setting removed from schema items, since checking this state added a lot of method call overhead and there was no decent reason to ever set it to False. Table and column names which are all lower case will be treated as case-insenstive (yes we adjust for Oracle’s UPPERCASE style too).(link)

      mysql

      • [mysql] Table and column names loaded via reflection are now Unicode.(link)

      • [mysql] All standard column types are now supported, including SET.(link)

      • [mysql] Table reflection can now be performed in as little as one round-trip.(link)

      • [mysql] ANSI and ANSI_QUOTES sql modes are now supported.(link)

      • [mysql] Indexes are now reflected.(link)

      oracle

      • [oracle] Very rudimental support for OUT parameters added; use sql.outparam(name, type) to set up an OUT parameter, just like bindparam(); after execution, values are avaiable via result.out_parameters dictionary.(link)

        References: #507

      misc

      • [transactions] Added context manager (with statement) support for transactions.(link)

      • [transactions] Added support for two phase commit, works with mysql and postgres so far.(link)

      • [transactions] Added a subtransaction implementation that uses savepoints.(link)

      • [transactions] Added support for savepoints.(link)

      • [metadata] Tables can be reflected from the database en-masse without declaring them in advance. MetaData(engine, reflect=True) will load all tables present in the database, or use metadata.reflect() for finer control.(link)

      • [metadata] DynamicMetaData has been renamed to ThreadLocalMetaData(link)

      • [metadata] The ThreadLocalMetaData constructor now takes no arguments.(link)

      • [metadata] BoundMetaData has been removed- regular MetaData is equivalent(link)

      • [metadata] Numeric and Float types now have an “asdecimal” flag; defaults to True for Numeric, False for Float. When True, values are returned as decimal.Decimal objects; when False, values are returned as float(). The defaults of True/False are already the behavior for PG and MySQL’s DBAPI modules.(link)

        References: #646

      • [metadata] New SQL operator implementation which removes all hardcoded operators from expression structures and moves them into compilation; allows greater flexibility of operator compilation; for example, “+” compiles to “||” when used in a string context, or “concat(a,b)” on MySQL; whereas in a numeric context it compiles to “+”. Fixes.(link)

        References: #475

      • [metadata] “Anonymous” alias and label names are now generated at SQL compilation time in a completely deterministic fashion... no more random hex IDs(link)

      • [metadata] Significant architectural overhaul to SQL elements (ClauseElement). All elements share a common “mutability” framework which allows a consistent approach to in-place modifications of elements as well as generative behavior. Improves stability of the ORM which makes heavy usage of mutations to SQL expressions.(link)

      • [metadata] select() and union()’s now have “generative” behavior. Methods like order_by() and group_by() return a new instance - the original instance is left unchanged. Non-generative methods remain as well.(link)

      • [metadata] The internals of select/union vastly simplified- all decision making regarding “is subquery” and “correlation” pushed to SQL generation phase. select() elements are now never mutated by their enclosing containers or by any dialect’s compilation process(link)

        References: #569, #52

      • [metadata] select(scalar=True) argument is deprecated; use select(..).as_scalar(). The resulting object obeys the full “column” interface and plays better within expressions.(link)

      • [metadata] Added select().with_prefix(‘foo’) allowing any set of keywords to be placed before the columns clause of the SELECT(link)

        References: #504

      • [metadata] Added array slice support to row[<index>](link)

        References: #686

      • [metadata] Result sets make a better attempt at matching the DBAPI types present in cursor.description to the TypeEngine objects defined by the dialect, which are then used for result-processing. Note this only takes effect for textual SQL; constructed SQL statements always have an explicit type map.(link)

      • [metadata] Result sets from CRUD operations close their underlying cursor immediately and will also autoclose the connection if defined for the operation; this allows more efficient usage of connections for successive CRUD operations with less chance of “dangling connections”.(link)

      • [metadata] Column defaults and onupdate Python functions (i.e. passed to ColumnDefault) may take zero or one arguments; the one argument is the ExecutionContext, from which you can call “context.parameters[someparam]” to access the other bind parameter values affixed to the statement. The connection used for the execution is available as well so that you can pre-execute statements.(link)

        References: #559

      • [metadata] Added “explcit” create/drop/execute support for sequences (i.e. you can pass a “connectable” to each of those methods on Sequence).(link)

      • [metadata] Better quoting of identifiers when manipulating schemas.(link)

      • [metadata] Standardized the behavior for table reflection where types can’t be located; NullType is substituted instead, warning is raised.(link)

      • [metadata] ColumnCollection (i.e. the ‘c’ attribute on tables) follows dictionary semantics for “__contains__”(link)

        References: #606

      • [engines] Speed! The mechanics of result processing and bind parameter processing have been overhauled, streamlined and optimized to issue as little method calls as possible. Bench tests for mass INSERT and mass rowset iteration both show 0.4 to be over twice as fast as 0.3, using 68% fewer function calls.(link)

      • [engines] You can now hook into the pool lifecycle and run SQL statements or other logic at new each DBAPI connection, pool check-out and check-in.(link)

      • [engines] Connections gain a .properties collection, with contents scoped to the lifetime of the underlying DBAPI connection(link)

      • [engines] Removed auto_close_cursors and disallow_open_cursors arguments from Pool; reduces overhead as cursors are normally closed by ResultProxy and Connection.(link)

      • [extensions] proxyengine is temporarily removed, pending an actually working replacement.(link)

      • [extensions] SelectResults has been replaced by Query. SelectResults / SelectResultsExt still exist but just return a slightly modified Query object for backwards-compatibility. join_to() method from SelectResults isn’t present anymore, need to use join().(link)

      • [postgres] Added PGArray datatype for using postgres array datatypes.(link)

      SQLAlchemy-0.8.4/doc/changelog/changelog_05.html0000644000076500000240000073166712251147460022074 0ustar classicstaff00000000000000 0.5 Changelog — SQLAlchemy 0.8 Documentation

      SQLAlchemy 0.8 Documentation

      Release: 0.8.4 | Release Date: December 8, 2013

      0.5 Changelog

      0.5.9

      no release date

      sql

      • [sql] Fixed erroneous self_group() call in expression package.(link)

        References: #1661

      0.5.8

      Released: Sat Jan 16 2010

      sql

      • [sql] The copy() method on Column now supports uninitialized, unnamed Column objects. This allows easy creation of declarative helpers which place common columns on multiple subclasses.(link)

      • [sql] Default generators like Sequence() translate correctly across a copy() operation.(link)

      • [sql] Sequence() and other DefaultGenerator objects are accepted as the value for the “default” and “onupdate” keyword arguments of Column, in addition to being accepted positionally.(link)

      • [sql] Fixed a column arithmetic bug that affected column correspondence for cloned selectables which contain free-standing column expressions. This bug is generally only noticeable when exercising newer ORM behavior only availble in 0.6 via, but is more correct at the SQL expression level as well.(link)

        References: #1568, #1617

      postgresql

      • [postgresql] The extract() function, which was slightly improved in 0.5.7, needed a lot more work to generate the correct typecast (the typecasts appear to be necessary in PG’s EXTRACT quite a lot of the time). The typecast is now generated using a rule dictionary based on PG’s documentation for date/time/interval arithmetic. It also accepts text() constructs again, which was broken in 0.5.7.(link)

        References: #1647

      firebird

      • [firebird] Recognize more errors as disconnections.(link)

        References: #1646

      0.5.7

      Released: Sat Dec 26 2009

      orm

      • [orm] contains_eager() now works with the automatically generated subquery that results when you say “query(Parent).join(Parent.somejoinedsubclass)”, i.e. when Parent joins to a joined-table-inheritance subclass. Previously contains_eager() would erroneously add the subclass table to the query separately producing a cartesian product. An example is in the ticket description.(link)

        References: #1543

      • [orm] query.options() now only propagate to loaded objects for potential further sub-loads only for options where such behavior is relevant, keeping various unserializable options like those generated by contains_eager() out of individual instance states.(link)

        References: #1553

      • [orm] Session.execute() now locates table- and mapper-specific binds based on a passed in expression which is an insert()/update()/delete() construct.(link)

        References: #1054

      • [orm] Session.merge() now properly overwrites a many-to-one or uselist=False attribute to None if the attribute is also None in the given object to be merged.(link)

      • [orm] Fixed a needless select which would occur when merging transient objects that contained a null primary key identifier.(link)

        References: #1618

      • [orm] Mutable collection passed to the “extension” attribute of relation(), column_property() etc. will not be mutated or shared among multiple instrumentation calls, preventing duplicate extensions, such as backref populators, from being inserted into the list.(link)

        References: #1585

      • [orm] Fixed the call to get_committed_value() on CompositeProperty.(link)

        References: #1504

      • [orm] Fixed bug where Query would crash if a join() with no clear “left” side were called when a non-mapped column entity appeared in the columns list.(link)

        References: #1602

      • [orm] Fixed bug whereby composite columns wouldn’t load properly when configured on a joined-table subclass, introduced in version 0.5.6 as a result of the fix for. thx to Scott Torborg.(link)

        References: #1616, #1480

      • [orm] The “use get” behavior of many-to-one relations, i.e. that a lazy load will fallback to the possibly cached query.get() value, now works across join conditions where the two compared types are not exactly the same class, but share the same “affinity” - i.e. Integer and SmallInteger. Also allows combinations of reflected and non-reflected types to work with 0.5 style type reflection, such as PGText/Text (note 0.6 reflects types as their generic versions).(link)

        References: #1556

      • [orm] Fixed bug in query.update() when passing Cls.attribute as keys in the value dict and using synchronize_session=’expire’ (‘fetch’ in 0.6).(link)

        References: #1436

      sql

      • [sql] Fixed bug in two-phase transaction whereby commit() method didn’t set the full state which allows subsequent close() call to succeed.(link)

        References: #1603

      • [sql] Fixed the “numeric” paramstyle, which apparently is the default paramstyle used by Informixdb.(link)

      • [sql] Repeat expressions in the columns clause of a select are deduped based on the identity of each clause element, not the actual string. This allows positional elements to render correctly even if they all render identically, such as “qmark” style bind parameters.(link)

        References: #1574

      • [sql] The cursor associated with connection pool connections (i.e. _CursorFairy) now proxies __iter__() to the underlying cursor correctly.(link)

        References: #1632

      • [sql] types now support an “affinity comparison” operation, i.e. that an Integer/SmallInteger are “compatible”, or a Text/String, PickleType/Binary, etc. Part of.(link)

        References: #1556

      • [sql] Fixed bug preventing alias() of an alias() from being cloned or adapted (occurs frequently in ORM operations).(link)

        References: #1641

      postgresql

      • [postgresql] Added support for reflecting the DOUBLE PRECISION type, via a new postgres.PGDoublePrecision object. This is postgresql.DOUBLE_PRECISION in 0.6.(link)

        References: #1085

      • [postgresql] Added support for reflecting the INTERVAL YEAR TO MONTH and INTERVAL DAY TO SECOND syntaxes of the INTERVAL type.(link)

        References: #460

      • [postgresql] Corrected the “has_sequence” query to take current schema, or explicit sequence-stated schema, into account.(link)

        References: #1576

      • [postgresql] Fixed the behavior of extract() to apply operator precedence rules to the ”::” operator when applying the “timestamp” cast - ensures proper parenthesization.(link)

        References: #1611

      sqlite

      • [sqlite] sqlite dialect properly generates CREATE INDEX for a table that is in an alternate schema.(link)

        References: #1439

      mssql

      • [mssql] Changed the name of TrustedConnection to Trusted_Connection when constructing pyodbc connect arguments(link)

        References: #1561

      oracle

      • [oracle] The “table_names” dialect function, used by MetaData .reflect(), omits “index overflow tables”, a system table generated by Oracle when “index only tables” with overflow are used. These tables aren’t accessible via SQL and can’t be reflected.(link)

        References: #1637

      misc

      • [ext] A column can be added to a joined-table declarative superclass after the class has been constructed (i.e. via class-level attribute assignment), and the column will be propagated down to subclasses. This is the reverse situation as that of, fixed in 0.5.6.(link)

        References: #1570, #1523

      • [ext] Fixed a slight inaccuracy in the sharding example. Comparing equivalence of columns in the ORM is best accomplished using col1.shares_lineage(col2).(link)

        References: #1491

      • [ext] Removed unused load() method from ShardedQuery.(link)

        References: #1606

      0.5.6

      Released: Sat Sep 12 2009

      orm

      • [orm] Fixed bug whereby inheritance discriminator part of a composite primary key would fail on updates. Continuation of.(link)

        References: #1300

      • [orm] Fixed bug which disallowed one side of a many-to-many bidirectional reference to declare itself as “viewonly”(link)

        References: #1507

      • [orm] Added an assertion that prevents a @validates function or other AttributeExtension from loading an unloaded collection such that internal state may be corrupted.(link)

        References: #1526

      • [orm] Fixed bug which prevented two entities from mutually replacing each other’s primary key values within a single flush() for some orderings of operations.(link)

        References: #1519

      • [orm] Fixed an obscure issue whereby a joined-table subclass with a self-referential eager load on the base class would populate the related object’s “subclass” table with data from the “subclass” table of the parent.(link)

        References: #1485

      • [orm] relations() now have greater ability to be “overridden”, meaning a subclass that explicitly specifies a relation() overriding that of the parent class will be honored during a flush. This is currently to support many-to-many relations from concrete inheritance setups. Outside of that use case, YMMV.(link)

        References: #1477

      • [orm] Squeezed a few more unnecessary “lazy loads” out of relation(). When a collection is mutated, many-to-one backrefs on the other side will not fire off to load the “old” value, unless “single_parent=True” is set. A direct assignment of a many-to-one still loads the “old” value in order to update backref collections on that value, which may be present in the session already, thus maintaining the 0.5 behavioral contract.(link)

        References: #1483

      • [orm] Fixed bug whereby a load/refresh of joined table inheritance attributes which were based on column_property() or similar would fail to evaluate.(link)

        References: #1480

      • [orm] Improved support for MapperProperty objects overriding that of an inherited mapper for non-concrete inheritance setups - attribute extensions won’t randomly collide with each other.(link)

        References: #1488

      • [orm] UPDATE and DELETE do not support ORDER BY, LIMIT, OFFSET, etc. in standard SQL. Query.update() and Query.delete() now raise an exception if any of limit(), offset(), order_by(), group_by(), or distinct() have been called.(link)

        References: #1487

      • [orm] Added AttributeExtension to sqlalchemy.orm.__all__(link)

      • [orm] Improved error message when query() is called with a non-SQL /entity expression.(link)

        References: #1476

      • [orm] Using False or 0 as a polymorphic discriminator now works on the base class as well as a subclass.(link)

        References: #1440

      • [orm] Added enable_assertions(False) to Query which disables the usual assertions for expected state - used by Query subclasses to engineer custom state.. See http://www.sqlalchemy.org/trac/wiki/UsageRecipes/PreFilteredQuery for an example.(link)

        References: #1424

      • [orm] Fixed recursion issue which occured if a mapped object’s __len__() or __nonzero__() method resulted in state changes.(link)

        References: #1501

      • [orm] Fixed incorrect exception raise in Weak/StrongIdentityMap.add()(link)

        References: #1506

      • [orm] Fixed the error message for “could not find a FROM clause” in query.join() which would fail to issue correctly if the query was against a pure SQL construct.(link)

        References: #1522

      • [orm] Fixed a somewhat hypothetical issue which would result in the wrong primary key being calculated for a mapper using the old polymorphic_union function - but this is old stuff.(link)

        References: #1486

      sql

      • [sql] Fixed column.copy() to copy defaults and onupdates.(link)

        References: #1373

      • [sql] Fixed a bug in extract() introduced in 0.5.4 whereby the string “field” argument was getting treated as a ClauseElement, causing various errors within more complex SQL transformations.(link)

      • [sql] Unary expressions such as DISTINCT propagate their type handling to result sets, allowing conversions like unicode and such to take place.(link)

        References: #1420

      • [sql] Fixed bug in Table and Column whereby passing empty dict for “info” argument would raise an exception.(link)

        References: #1482

      oracle

      • [oracle] Backported 0.6 fix for Oracle alias names not getting truncated.(link)

        References: #1309

      misc

      • [ext] The collection proxies produced by associationproxy are now pickleable. A user-defined proxy_factory however is still not pickleable unless it defines __getstate__ and __setstate__.(link)

        References: #1446

      • [ext] Declarative will raise an informative exception if __table_args__ is passed as a tuple with no dict argument. Improved documentation.(link)

        References: #1468

      • [ext] Table objects declared in the MetaData can now be used in string expressions sent to primaryjoin/secondaryjoin/ secondary - the name is pulled from the MetaData of the declarative base.(link)

        References: #1527

      • [ext] A column can be added to a joined-table subclass after the class has been constructed (i.e. via class-level attribute assignment). The column is added to the underlying Table as always, but now the mapper will rebuild its “join” to include the new column, instead of raising an error about “no such column, use column_property() instead”.(link)

        References: #1523

      • [test] Added examples into the test suite so they get exercised regularly and cleaned up a couple deprecation warnings.(link)

      0.5.5

      Released: Mon Jul 13 2009

      general

      • [general] unit tests have been migrated from unittest to nose. See README.unittests for information on how to run the tests.(link)

        References: #970

      orm

      • [orm] The “foreign_keys” argument of relation() will now propagate automatically to the backref in the same way that primaryjoin and secondaryjoin do. For the extremely rare use case where the backref of a relation() has intentionally different “foreign_keys” configured, both sides now need to be configured explicity (if they do in fact require this setting, see the next note...).(link)

      • [orm] ...the only known (and really, really rare) use case where a different foreign_keys setting was used on the forwards/backwards side, a composite foreign key that partially points to its own columns, has been enhanced such that the fk->itself aspect of the relation won’t be used to determine relation direction.(link)

      • [orm] Session.mapper is now deprecated.

        Call session.add() if you’d like a free-standing object to be part of your session. Otherwise, a DIY version of Session.mapper is now documented at http://www.sqlalchemy.org/trac/wiki/UsageRecipes/SessionAwareMapper The method will remain deprecated throughout 0.6.

        (link)

      • [orm] Fixed Query being able to join() from individual columns of a joined-table subclass entity, i.e. query(SubClass.foo, SubcClass.bar).join(<anything>). In most cases, an error “Could not find a FROM clause to join from” would be raised. In a few others, the result would be returned in terms of the base class rather than the subclass - so applications which relied on this erroneous result need to be adjusted.(link)

        References: #1431

      • [orm] Fixed a bug involving contains_eager(), which would apply itself to a secondary (i.e. lazy) load in a particular rare case, producing cartesian products. improved the targeting of query.options() on secondary loads overall.(link)

        References: #1461

      • [orm] Fixed bug introduced in 0.5.4 whereby Composite types fail when default-holding columns are flushed.(link)

      • [orm] Fixed another 0.5.4 bug whereby mutable attributes (i.e. PickleType) wouldn’t be deserialized correctly when the whole object was serialized.(link)

        References: #1426

      • [orm] Fixed bug whereby session.is_modified() would raise an exception if any synonyms were in use.(link)

      • [orm] Fixed potential memory leak whereby previously pickled objects placed back in a session would not be fully garbage collected unless the Session were explicitly closed out.(link)

      • [orm] Fixed bug whereby list-based attributes, like pickletype and PGArray, failed to be merged() properly.(link)

      • [orm] Repaired non-working attributes.set_committed_value function.(link)

      • [orm] Trimmed the pickle format for InstanceState which should further reduce the memory footprint of pickled instances. The format should be backwards compatible with that of 0.5.4 and previous.(link)

      • [orm] sqlalchemy.orm.join and sqlalchemy.orm.outerjoin are now added to __all__ in sqlalchemy.orm.*.(link)

        References: #1463

      • [orm] Fixed bug where Query exception raise would fail when a too-short composite primary key value were passed to get().(link)

        References: #1458

      sql

      • [sql] Removed an obscure feature of execute() (including connection, engine, Session) whereby a bindparam() construct can be sent as a key to the params dictionary. This usage is undocumented and is at the core of an issue whereby the bindparam() object created implicitly by a text() construct may have the same hash value as a string placed in the params dictionary and may result in an inappropriate match when computing the final bind parameters. Internal checks for this condition would add significant latency to the critical task of parameter rendering, so the behavior is removed. This is a backwards incompatible change for any application that may have been using this feature, however the feature has never been documented.(link)

      misc

      • [engine/pool] Implemented recreate() for StaticPool.(link)

      0.5.4p2

      Released: Tue May 26 2009

      sql

      • [sql] Repaired the printing of SQL exceptions which are not based on parameters or are not executemany() style.(link)

      postgresql

      • [postgresql] Deprecated the hardcoded TIMESTAMP function, which when used as func.TIMESTAMP(value) would render “TIMESTAMP value”. This breaks on some platforms as PostgreSQL doesn’t allow bind parameters to be used in this context. The hard-coded uppercase is also inappropriate and there’s lots of other PG casts that we’d need to support. So instead, use text constructs i.e. select([“timestamp ‘12/05/09’”]).(link)

      0.5.4p1

      Released: Mon May 18 2009

      orm

      • [orm] Fixed an attribute error introduced in 0.5.4 which would occur when merge() was used with an incomplete object.(link)

      0.5.4

      Released: Sun May 17 2009

      orm

      • [orm] Significant performance enhancements regarding Sessions/flush() in conjunction with large mapper graphs, large numbers of objects:

        • Removed all* O(N) scanning behavior from the flush() process, i.e. operations that were scanning the full session, including an extremely expensive one that was erroneously assuming primary key values were changing when this was not the case.
          • one edge case remains which may invoke a full scan, if an existing primary key attribute is modified to a new value.
        • The Session’s “weak referencing” behavior is now full - no strong references whatsoever are made to a mapped object or related items/collections in its __dict__. Backrefs and other cycles in objects no longer affect the Session’s ability to lose all references to unmodified objects. Objects with pending changes still are maintained strongly until flush.

          The implementation also improves performance by moving the “resurrection” process of garbage collected items to only be relevant for mappings that map “mutable” attributes (i.e. PickleType, composite attrs). This removes overhead from the gc process and simplifies internal behavior.

          If a “mutable” attribute change is the sole change on an object which is then dereferenced, the mapper will not have access to other attribute state when the UPDATE is issued. This may present itself differently to some MapperExtensions.

          The change also affects the internal attribute API, but not the AttributeExtension interface nor any of the publically documented attribute functions.

        • The unit of work no longer genererates a graph of “dependency” processors for the full graph of mappers during flush(), instead creating such processors only for those mappers which represent objects with pending changes. This saves a tremendous number of method calls in the context of a large interconnected graph of mappers.
        • Cached a wasteful “table sort” operation that previously occured multiple times per flush, also removing significant method call count from flush().
        • Other redundant behaviors have been simplified in mapper._save_obj().
        (link)

        References: #1398

      • [orm] Modified query_cls on DynamicAttributeImpl to accept a full mixin version of the AppenderQuery, which allows subclassing the AppenderMixin.(link)

      • [orm] The “polymorphic discriminator” column may be part of a primary key, and it will be populated with the correct discriminator value.(link)

        References: #1300

      • [orm] Fixed the evaluator not being able to evaluate IS NULL clauses.(link)

      • [orm] Fixed the “set collection” function on “dynamic” relations to initiate events correctly. Previously a collection could only be assigned to a pending parent instance, otherwise modified events would not be fired correctly. Set collection is now compatible with merge(), fixes.(link)

        References: #1352

      • [orm] Allowed pickling of PropertyOption objects constructed with instrumented descriptors; previously, pickle errors would occur when pickling an object which was loaded with a descriptor-based option, such as query.options(eagerload(MyClass.foo)).(link)

      • [orm] Lazy loader will not use get() if the “lazy load” SQL clause matches the clause used by get(), but contains some parameters hardcoded. Previously the lazy strategy would fail with the get(). Ideally get() would be used with the hardcoded parameters but this would require further development.(link)

        References: #1357

      • [orm] MapperOptions and other state associated with query.options() is no longer bundled within callables associated with each lazy/deferred-loading attribute during a load. The options are now associated with the instance’s state object just once when it’s populated. This removes the need in most cases for per-instance/attribute loader objects, improving load speed and memory overhead for individual instances.(link)

        References: #1391

      • [orm] Fixed another location where autoflush was interfering with session.merge(). autoflush is disabled completely for the duration of merge() now.(link)

        References: #1360

      • [orm] Fixed bug which prevented “mutable primary key” dependency logic from functioning properly on a one-to-one relation().(link)

        References: #1406

      • [orm] Fixed bug in relation(), introduced in 0.5.3, whereby a self referential relation from a base class to a joined-table subclass would not configure correctly.(link)

      • [orm] Fixed obscure mapper compilation issue when inheriting mappers are used which would result in un-initialized attributes.(link)

      • [orm] Fixed documentation for session weak_identity_map - the default value is True, indicating a weak referencing map in use.(link)

      • [orm] Fixed a unit of work issue whereby the foreign key attribute on an item contained within a collection owned by an object being deleted would not be set to None if the relation() was self-referential.(link)

        References: #1376

      • [orm] Fixed Query.update() and Query.delete() failures with eagerloaded relations.(link)

        References: #1378

      • [orm] It is now an error to specify both columns of a binary primaryjoin condition in the foreign_keys or remote_side collection. Whereas previously it was just nonsensical, but would succeed in a non-deterministic way.(link)

      sql

      • [sql] Back-ported the “compiler” extension from SQLA 0.6. This is a standardized interface which allows the creation of custom ClauseElement subclasses and compilers. In particular it’s handy as an alternative to text() when you’d like to build a construct that has database-specific compilations. See the extension docs for details.(link)

      • [sql] Exception messages are truncated when the list of bound parameters is larger than 10, preventing enormous multi-page exceptions from filling up screens and logfiles for large executemany() statements.(link)

        References: #1413

      • [sql] sqlalchemy.extract() is now dialect sensitive and can extract components of timestamps idiomatically across the supported databases, including SQLite.(link)

      • [sql] Fixed __repr__() and other _get_colspec() methods on ForeignKey constructed from __clause_element__() style construct (i.e. declarative columns).(link)

        References: #1353

      schema

      • [schema] [1341] [ticket: 594] Added a quote_schema() method to the IdentifierPreparer class so that dialects can override how schemas get handled. This enables the MSSQL dialect to treat schemas as multipart identifiers, such as ‘database.owner’.(link)

      mysql

      • [mysql] Reflecting a FOREIGN KEY construct will take into account a dotted schema.tablename combination, if the foreign key references a table in a remote schema.(link)

        References: #1405

      sqlite

      • [sqlite] Corrected the SLBoolean type so that it properly treats only 1 as True.(link)

        References: #1402

      • [sqlite] Corrected the float type so that it correctly maps to a SLFloat type when being reflected.(link)

        References: #1273

      mssql

      • [mssql] Modified how savepoint logic works to prevent it from stepping on non-savepoint oriented routines. Savepoint support is still very experimental.(link)

      • [mssql] Added in reserved words for MSSQL that covers version 2008 and all prior versions.(link)

        References: #1310

      • [mssql] Corrected problem with information schema not working with a binary collation based database. Cleaned up information schema since it is only used by mssql now.(link)

        References: #1343

      misc

      • [extensions] Fixed adding of deferred or other column properties to a declarative class.(link)

        References: #1379

      0.5.3

      Released: Tue Mar 24 2009

      orm

      • [orm] The “objects” argument to session.flush() is deprecated. State which represents the linkage between a parent and child object does not support “flushed” status on one side of the link and not the other, so supporting this operation leads to misleading results.(link)

        References: #1315

      • [orm] Query now implements __clause_element__() which produces its selectable, which means a Query instance can be accepted in many SQL expressions, including col.in_(query), union(query1, query2), select([foo]).select_from(query), etc.(link)

      • [orm] Query.join() can now construct multiple FROM clauses, if needed. Such as, query(A, B).join(A.x).join(B.y) might say SELECT A.*, B.* FROM A JOIN X, B JOIN Y. Eager loading can also tack its joins onto those multiple FROM clauses.(link)

        References: #1337

      • [orm] Fixed bug in dynamic_loader() where append/remove events after construction time were not being propagated to the UOW to pick up on flush().(link)

        References: #1347

      • [orm] Fixed bug where column_prefix wasn’t being checked before not mapping an attribute that already had class-level name present.(link)

      • [orm] a session.expire() on a particular collection attribute will clear any pending backref additions as well, so that the next access correctly returns only what was present in the database. Presents some degree of a workaround for, although we are considering removing the flush([objects]) feature altogether.(link)

        References: #1315

      • [orm] Session.scalar() now converts raw SQL strings to text() the same way Session.execute() does and accepts same alternative **kw args.(link)

      • [orm] improvements to the “determine direction” logic of relation() such that the direction of tricky situations like mapper(A.join(B)) -> relation-> mapper(B) can be determined.(link)

      • [orm] When flushing partial sets of objects using session.flush([somelist]), pending objects which remain pending after the operation won’t inadvertently be added as persistent.(link)

        References: #1306

      • [orm] Added “post_configure_attribute” method to InstrumentationManager, so that the “listen_for_events.py” example works again.(link)

        References: #1314

      • [orm] a forward and complementing backwards reference which are both of the same direction, i.e. ONETOMANY or MANYTOONE, is now detected, and an error message is raised. Saves crazy CircularDependencyErrors later on.(link)

      • [orm] Fixed bugs in Query regarding simultaneous selection of multiple joined-table inheritance entities with common base classes:

        • previously the adaption applied to “B” on “A JOIN B” would be erroneously partially applied to “A”.
        • comparisons on relations (i.e. A.related==someb) were not getting adapted when they should.
        • Other filterings, like query(A).join(A.bs).filter(B.foo==’bar’), were erroneously adapting “B.foo” as though it were an “A”.
        (link)

      • [orm] Fixed adaptation of EXISTS clauses via any(), has(), etc. in conjunction with an aliased object on the left and of_type() on the right.(link)

        References: #1325

      • [orm] Added an attribute helper method set_committed_value in sqlalchemy.orm.attributes. Given an object, attribute name, and value, will set the value on the object as part of its “committed” state, i.e. state that is understood to have been loaded from the database. Helps with the creation of homegrown collection loaders and such.(link)

      • [orm] Query won’t fail with weakref error when a non-mapper/class instrumented descriptor is passed, raises “Invalid column expession”.(link)

      • [orm] Query.group_by() properly takes into account aliasing applied to the FROM clause, such as with select_from(), using with_polymorphic(), or using from_self().(link)

      sql

      • [sql] An alias() of a select() will convert to a “scalar subquery” when used in an unambiguously scalar context, i.e. it’s used in a comparison operation. This applies to the ORM when using query.subquery() as well.(link)

      • [sql] Fixed missing _label attribute on Function object, others when used in a select() with use_labels (such as when used in an ORM column_property()).(link)

        References: #1302

      • [sql] anonymous alias names now truncate down to the max length allowed by the dialect. More significant on DBs like Oracle with very small character limits.(link)

        References: #1309

      • [sql] the __selectable__() interface has been replaced entirely by __clause_element__().(link)

      • [sql] The per-dialect cache used by TypeEngine to cache dialect-specific types is now a WeakKeyDictionary. This to prevent dialect objects from being referenced forever for an application that creates an arbitrarily large number of engines or dialects. There is a small performance penalty which will be resolved in 0.6.(link)

        References: #1299

      postgresql

      • [postgresql] Index reflection won’t fail when an index with multiple expressions is encountered.(link)

      • [postgresql] Added PGUuid and PGBit types to sqlalchemy.databases.postgres.(link)

        References: #1327

      • [postgresql] Refection of unknown PG types won’t crash when those types are specified within a domain.(link)

        References: #1327

      sqlite

      • [sqlite] Fixed SQLite reflection methods so that non-present cursor.description, which triggers an auto-cursor close, will be detected so that no results doesn’t fail on recent versions of pysqlite which raise an error when fetchone() called with no rows present.(link)

      mssql

      • [mssql] Preliminary support for pymssql 1.0.1(link)

      • [mssql] Corrected issue on mssql where max_identifier_length was not being respected.(link)

      misc

      • [extensions] Fixed a recursive pickling issue in serializer, triggered by an EXISTS or other embedded FROM construct.(link)

      • [extensions] Declarative locates the “inherits” class using a search through __bases__, to skip over mixins that are local to subclasses.(link)

      • [extensions] Declarative figures out joined-table inheritance primary join condition even if “inherits” mapper argument is given explicitly.(link)

      • [extensions] Declarative will properly interpret the “foreign_keys” argument on a backref() if it’s a string.(link)

      • [extensions] Declarative will accept a table-bound column as a property when used in conjunction with __table__, if the column is already present in __table__. The column will be remapped to the given key the same way as when added to the mapper() properties dict.(link)

      0.5.2

      Released: Sat Jan 24 2009

      orm

      • [orm] Further refined 0.5.1’s warning about delete-orphan cascade placed on a many-to-many relation. First, the bad news: the warning will apply to both many-to-many as well as many-to-one relations. This is necessary since in both cases, SQLA does not scan the full set of potential parents when determining “orphan” status - for a persistent object it only detects an in-python de-association event to establish the object as an “orphan”. Next, the good news: to support one-to-one via a foreign key or assocation table, or to support one-to-many via an association table, a new flag single_parent=True may be set which indicates objects linked to the relation are only meant to have a single parent. The relation will raise an error if multiple parent-association events occur within Python.(link)

      • [orm] Adjusted the attribute instrumentation change from 0.5.1 to fully establish instrumentation for subclasses where the mapper was created after the superclass had already been fully instrumented.(link)

        References: #1292

      • [orm] Fixed bug in delete-orphan cascade whereby two one-to-one relations from two different parent classes to the same target class would prematurely expunge the instance.(link)

      • [orm] Fixed an eager loading bug whereby self-referential eager loading would prevent other eager loads, self referential or not, from joining to the parent JOIN properly. Thanks to Alex K for creating a great test case.(link)

      • [orm] session.expire() and related methods will not expire() unloaded deferred attributes. This prevents them from being needlessly loaded when the instance is refreshed.(link)

      • [orm] query.join()/outerjoin() will now properly join an aliased() construct to the existing left side, even if query.from_self() or query.select_from(someselectable) has been called.(link)

        References: #1293

      sql

      • [sql]

        Further fixes to the “percent signs and spaces in column/table
        names” functionality.
        (link)

        References: #1284

      mssql

      • [mssql] Restored convert_unicode handling. Results were being passed on through without conversion.(link)

        References: #1291

      • [mssql] Really fixing the decimal handling this time..(link)

        References: #1282

      • [mssql] [Ticket:1289] Modified table reflection code to use only kwargs when constructing tables.(link)

      0.5.1

      Released: Sat Jan 17 2009

      orm

      • [orm] Removed an internal join cache which could potentially leak memory when issuing query.join() repeatedly to ad-hoc selectables.(link)

      • [orm] The “clear()”, “save()”, “update()”, “save_or_update()” Session methods have been deprecated, replaced by “expunge_all()” and “add()”. “expunge_all()” has also been added to ScopedSession.(link)

      • [orm] Modernized the “no mapped table” exception and added a more explicit __table__/__tablename__ exception to declarative.(link)

      • [orm] Concrete inheriting mappers now instrument attributes which are inherited from the superclass, but are not defined for the concrete mapper itself, with an InstrumentedAttribute that issues a descriptive error when accessed.(link)

        References: #1237

      • [orm] Added a new relation() keyword back_populates. This allows configuation of backreferences using explicit relations. This is required when creating bidirectional relations between a hierarchy of concrete mappers and another class.(link)

        References: #1237, #781

      • [orm] Test coverage added for relation() objects specified on concrete mappers.(link)

        References: #1237

      • [orm] Query.from_self() as well as query.subquery() both disable the rendering of eager joins inside the subquery produced. The “disable all eager joins” feature is available publically via a new query.enable_eagerloads() generative.(link)

        References: #1276

      • [orm] Added a rudimental series of set operations to Query that receive Query objects as arguments, including union(), union_all(), intersect(), except_(), insertsect_all(), except_all(). See the API documentation for Query.union() for examples.(link)

      • [orm] Fixed bug that prevented Query.join() and eagerloads from attaching to a query that selected from a union or aliased union.(link)

      • [orm] A short documentation example added for bidirectional relations specified on concrete mappers.(link)

        References: #1237

      • [orm] Mappers now instrument class attributes upon construction with the final InstrumentedAttribute object which remains persistent. The _CompileOnAttr/__getattribute__() methodology has been removed. The net effect is that Column-based mapped class attributes can now be used fully at the class level without invoking a mapper compilation operation, greatly simplifying typical usage patterns within declarative.(link)

        References: #1269

      • [orm] ColumnProperty (and front-end helpers such as deferred) no longer ignores unknown **keyword arguments.(link)

      • [orm] Fixed a bug with the unitofwork’s “row switch” mechanism, i.e. the conversion of INSERT/DELETE into an UPDATE, when combined with joined-table inheritance and an object which contained no defined values for the child table where an UPDATE with no SET clause would be rendered.(link)

      • [orm] Using delete-orphan on a many-to-many relation is deprecated. This produces misleading or erroneous results since SQLA does not retrieve the full list of “parents” for m2m. To get delete-orphan behavior with an m2m table, use an explcit association class so that the individual association row is treated as a parent.(link)

        References: #1281

      • [orm] delete-orphan cascade always requires delete cascade. Specifying delete-orphan without delete now raises a deprecation warning.(link)

        References: #1281

      orm declarative

      • [declarative] [orm] Can now specify Column objects on subclasses which have no table of their own (i.e. use single table inheritance). The columns will be appended to the base table, but only mapped by the subclass.(link)

      • [declarative] [orm] For both joined and single inheriting subclasses, the subclass will only map those columns which are already mapped on the superclass and those explicit on the subclass. Other columns that are present on the Table will be excluded from the mapping by default, which can be disabled by passing a blank exclude_properties collection to the __mapper_args__. This is so that single-inheriting classes which define their own columns are the only classes to map those columns. The effect is actually a more organized mapping than you’d normally get with explicit mapper() calls unless you set up the exclude_properties arguments explicitly.(link)

      • [declarative] [orm] It’s an error to add new Column objects to a declarative class that specified an existing table using __table__.(link)

      sql

      • [sql] Improved the methodology to handling percent signs in column names from. Added more tests. MySQL and PostgreSQL dialects still do not issue correct CREATE TABLE statements for identifiers with percent signs in them.(link)

        References: #1256

      schema

      • [schema] Index now accepts column-oriented InstrumentedAttributes (i.e. column-based mapped class attributes) as column arguments.(link)

        References: #1214

      • [schema] Column with no name (as in declarative) won’t raise a NoneType error when it’s string output is requsted (such as in a stack trace).(link)

      • [schema] Fixed bug when overriding a Column with a ForeignKey on a reflected table, where derived columns (i.e. the “virtual” columns of a select, etc.) would inadvertently call upon schema-level cleanup logic intended only for the original column.(link)

        References: #1278

      mysql

      • [mysql] Added the missing keywords from MySQL 4.1 so they get escaped properly.(link)

      mssql

      • [mssql] Corrected handling of large decimal values with more robust tests. Removed string manipulation on floats.(link)

        References: #1280

      • [mssql] Modified the do_begin handling in mssql to use the Cursor not the Connection so it is DBAPI compatible.(link)

      • [mssql] Corrected SAVEPOINT support on adodbapi by changing the handling of savepoint_release, which is unsupported on mssql.(link)

      0.5.0

      Released: Tue Jan 06 2009

      general

      • [general] Documentation has been converted to Sphinx. In particular, the generated API documentation has been constructed into a full blown “API Reference” section which organizes editorial documentation combined with generated docstrings. Cross linking between sections and API docs are vastly improved, a javascript-powered search feature is provided, and a full index of all classes, functions and members is provided.(link)

      • [general] setup.py now imports setuptools only optionally. If not present, distutils is used. The new “pip” installer is recommended over easy_install as it installs in a more simplified way.(link)

      • [general] added an extremely basic illustration of a PostGIS integration to the examples folder.(link)

      orm

      • [orm] Query.with_polymorphic() now accepts a third argument “discriminator” which will replace the value of mapper.polymorphic_on for that query. Mappers themselves no longer require polymorphic_on to be set, even if the mapper has a polymorphic_identity. When not set, the mapper will load non-polymorphically by default. Together, these two features allow a non-polymorphic concrete inheritance setup to use polymorphic loading on a per-query basis, since concrete setups are prone to many issues when used polymorphically in all cases.(link)

      • [orm] dynamic_loader accepts a query_class= to customize the Query classes used for both the dynamic collection and the queries built from it.(link)

      • [orm] query.order_by() accepts None which will remove any pending order_by state from the query, as well as cancel out any mapper/relation configured ordering. This is primarily useful for overriding the ordering specified on a dynamic_loader().(link)

        References: #1079

      • [orm] Exceptions raised during compile_mappers() are now preserved to provide “sticky behavior” - if a hasattr() call on a pre-compiled mapped attribute triggers a failing compile and suppresses the exception, subsequent compilation is blocked and the exception will be reiterated on the next compile() call. This issue occurs frequently when using declarative.(link)

      • [orm] property.of_type() is now recognized on a single-table inheriting target, when used in the context of prop.of_type(..).any()/has(), as well as query.join(prop.of_type(...)).(link)

      • [orm] query.join() raises an error when the target of the join doesn’t match the property-based attribute - while it’s unlikely anyone is doing this, the SQLAlchemy author was guilty of this particular loosey-goosey behavior.(link)

      • [orm] Fixed bug when using weak_instance_map=False where modified events would not be intercepted for a flush().(link)

        References: #1272

      • [orm] Fixed some deep “column correspondence” issues which could impact a Query made against a selectable containing multiple versions of the same table, as well as unions and similar which contained the same table columns in different column positions at different levels.(link)

        References: #1268

      • [orm] Custom comparator classes used in conjunction with column_property(), relation() etc. can define new comparison methods on the Comparator, which will become available via __getattr__() on the InstrumentedAttribute. In the case of synonym() or comparable_property(), attributes are resolved first on the user-defined descriptor, then on the user-defined comparator.(link)

      • [orm] Added ScopedSession.is_active accessor.(link)

        References: #976

      • [orm] Can pass mapped attributes and column objects as keys to query.update({}).(link)

        References: #1262

      • [orm] Mapped attributes passed to the values() of an expression level insert() or update() will use the keys of the mapped columns, not that of the mapped attribute.(link)

      • [orm] Corrected problem with Query.delete() and Query.update() not working properly with bind parameters.(link)

        References: #1242

      • [orm] Query.select_from(), from_statement() ensure that the given argument is a FromClause, or Text/Select/Union, respectively.(link)

      • [orm] Query() can be passed a “composite” attribute as a column expression and it will be expanded. Somewhat related to.(link)

        References: #1253

      • [orm] Query() is a little more robust when passed various column expressions such as strings, clauselists, text() constructs (which may mean it just raises an error more nicely).(link)

      • [orm] first() works as expected with Query.from_statement().(link)

      • [orm] Fixed bug introduced in 0.5rc4 involving eager loading not functioning for properties which were added to a mapper post-compile using add_property() or equivalent.(link)

      • [orm] Fixed bug where many-to-many relation() with viewonly=True would not correctly reference the link between secondary->remote.(link)

      • [orm] Duplicate items in a list-based collection will be maintained when issuing INSERTs to a “secondary” table in a many-to-many relation. Assuming the m2m table has a unique or primary key constraint on it, this will raise the expected constraint violation instead of silently dropping the duplicate entries. Note that the old behavior remains for a one-to-many relation since collection entries in that case don’t result in INSERT statements and SQLA doesn’t manually police collections.(link)

        References: #1232

      • [orm] Query.add_column() can accept FromClause objects in the same manner as session.query() can.(link)

      • [orm] Comparison of many-to-one relation to NULL is properly converted to IS NOT NULL based on not_().(link)

      • [orm] Extra checks added to ensure explicit primaryjoin/secondaryjoin are ClauseElement instances, to prevent more confusing errors later on.(link)

        References: #1087

      • [orm] Improved mapper() check for non-class classes.(link)

        References: #1236

      • [orm] comparator_factory argument is now documented and supported by all MapperProperty types, including column_property(), relation(), backref(), and synonym().(link)

        References: #5051

      • [orm] Changed the name of PropertyLoader to RelationProperty, to be consistent with all the other names. PropertyLoader is still present as a synonym.(link)

      • [orm] fixed “double iter()” call causing bus errors in shard API, removed errant result.close() left over from the 0.4 version.(link)

        References: #1099, #1228

      • [orm] made Session.merge cascades not trigger autoflush. Fixes merged instances getting prematurely inserted with missing values.(link)

      • [orm] Two fixes to help prevent out-of-band columns from being rendered in polymorphic_union inheritance scenarios (which then causes extra tables to be rendered in the FROM clause causing cartesian products):

        • improvements to “column adaption” for a->b->c inheritance situations to better locate columns that are related to one another via multiple levels of indirection, rather than rendering the non-adapted column.
        • the “polymorphic discriminator” column is only rendered for the actual mapper being queried against. The column won’t be “pulled in” from a subclass or superclass mapper since it’s not needed.
        (link)

      • [orm] Fixed shard_id argument on ShardedSession.execute().(link)

        References: #1072

      orm declarative

      • [declarative] [orm] The full list of arguments accepted as string by backref() includes ‘primaryjoin’, ‘secondaryjoin’, ‘secondary’, ‘foreign_keys’, ‘remote_side’, ‘order_by’.(link)

      sql

      • [sql] RowProxy objects can be used in place of dictionary arguments sent to connection.execute() and friends.(link)

        References: #935

      • [sql] Columns can again contain percent signs within their names.(link)

        References: #1256

      • [sql] sqlalchemy.sql.expression.Function is now a public class. It can be subclassed to provide user-defined SQL functions in an imperative style, including with pre-established behaviors. The postgis.py example illustrates one usage of this.(link)

      • [sql] PickleType now favors == comparison by default, if the incoming object (such as a dict) implements __eq__(). If the object does not implement __eq__() and mutable=True, a deprecation warning is raised.(link)

      • [sql] Fixed the import weirdness in sqlalchemy.sql to not export __names__.(link)

        References: #1215

      • [sql] Using the same ForeignKey object repeatedly raises an error instead of silently failing later.(link)

        References: #1238

      • [sql] Added NotImplementedError for params() method on Insert/Update/Delete constructs. These items currently don’t support this functionality, which also would be a little misleading compared to values().(link)

      • [sql] Reflected foreign keys will properly locate their referenced column, even if the column was given a “key” attribute different from the reflected name. This is achieved via a new flag on ForeignKey/ForeignKeyConstraint called “link_to_name”, if True means the given name is the referred-to column’s name, not its assigned key.(link)

        References: #650

      • [sql] select() can accept a ClauseList as a column in the same way as a Table or other selectable and the interior expressions will be used as column elements.(link)

        References: #1253

      • [sql] the “passive” flag on session.is_modified() is correctly propagated to the attribute manager.(link)

      • [sql] union() and union_all() will not whack any order_by() that has been applied to the select()s inside. If you union() a select() with order_by() (presumably to support LIMIT/OFFSET), you should also call self_group() on it to apply parenthesis.(link)

      mysql

      • [mysql] “%” signs in text() constructs are automatically escaped to “%%”. Because of the backwards incompatible nature of this change, a warning is emitted if ‘%%’ is detected in the string.(link)

      • [mysql] Fixed bug in exception raise when FK columns not present during reflection.(link)

        References: #1241

      • [mysql] Fixed bug involving reflection of a remote-schema table with a foreign key ref to another table in that schema.(link)

      sqlite

      • [sqlite] Table reflection now stores the actual DefaultClause value for the column.(link)

        References: #1266

      • [sqlite] bugfixes, behavioral changes(link)

      mssql

      • [mssql] Added in a new MSGenericBinary type. This maps to the Binary type so it can implement the specialized behavior of treating length specified types as fixed-width Binary types and non-length types as an unbound variable length Binary type.(link)

      • [mssql] Added in new types: MSVarBinary and MSImage.(link)

        References: #1249

      • [mssql] Added in the MSReal, MSNText, MSSmallDateTime, MSTime, MSDateTimeOffset, and MSDateTime2 types(link)

      • [mssql] Refactored the Date/Time types. The smalldatetime data type no longer truncates to a date only, and will now be mapped to the MSSmallDateTime type.(link)

        References: #1254

      • [mssql] Corrected an issue with Numerics to accept an int.(link)

      • [mssql] Mapped char_length to the LEN() function.(link)

      • [mssql] If an INSERT includes a subselect the INSERT is converted from an INSERT INTO VALUES construct to a INSERT INTO SELECT construct.(link)

      • [mssql] If the column is part of a primary_key it will be NOT NULL since MSSQL doesn’t allow NULL in primary_key columns.(link)

      • [mssql] MSBinary now returns a BINARY instead of an IMAGE. This is a backwards incompatible change in that BINARY is a fixed length data type whereas IMAGE is a variable length data type.(link)

        References: #1249

      • [mssql] get_default_schema_name is now reflected from the database based on the user’s default schema. This only works with MSSQL 2005 and later.(link)

        References: #1258

      • [mssql] Added collation support through the use of a new collation argument. This is supported on the following types: char, nchar, varchar, nvarchar, text, ntext.(link)

        References: #1248

      • [mssql] Changes to the connection string parameters favor DSN as the default specification for pyodbc. See the mssql.py docstring for detailed usage instructions.(link)

      • [mssql] Added experimental support of savepoints. It currently does not work fully with sessions.(link)

      • [mssql] Support for three levels of column nullability: NULL, NOT NULL, and the database’s configured default. The default Column configuration (nullable=True) will now generate NULL in the DDL. Previously no specification was emitted and the database default would take effect (usually NULL, but not always). To explicitly request the database default, configure columns with nullable=None and no specification will be emitted in DDL. This is backwards incompatible behavior.(link)

        References: #1243

      oracle

      • [oracle] Adjusted the format of create_xid() to repair two-phase commit. We now have field reports of Oracle two-phase commit working properly with this change.(link)

      • [oracle] Added OracleNVarchar type, produces NVARCHAR2, and also subclasses Unicode so that convert_unicode=True by default. NVARCHAR2 reflects into this type automatically so these columns pass unicode on a reflected table with no explicit convert_unicode=True flags.(link)

        References: #1233

      • [oracle] Fixed bug which was preventing out params of certain types from being received; thanks a ton to huddlej at wwu.edu !(link)

        References: #1265

      misc

      • [dialect] Added a new description_encoding attribute on the dialect that is used for encoding the column name when processing the metadata. This usually defaults to utf-8.(link)

      • [engine/pool] Connection.invalidate() checks for closed status to avoid attribute errors.(link)

        References: #1246

      • [engine/pool] NullPool supports reconnect on failure behavior.(link)

        References: #1094

      • [engine/pool] Added a mutex for the initial pool creation when using pool.manage(dbapi). This prevents a minor case of “dogpile” behavior which would otherwise occur upon a heavy load startup.(link)

        References: #799

      • [engine/pool] _execute_clauseelement() goes back to being a private method. Subclassing Connection is not needed now that ConnectionProxy is available.(link)

      • [documentation] Tickets.(link)

        References: #1149, #1200

      • [documentation] Added note about create_session() defaults.(link)

      • [documentation] Added section about metadata.reflect().(link)

      • [documentation] Updated TypeDecorator section.(link)

      • [documentation] Rewrote the “threadlocal” strategy section of the docs due to recent confusion over this feature.(link)

      • [documentation] Removed badly out of date ‘polymorphic_fetch’ and ‘select_table’ docs from inheritance, reworked the second half of “joined table inheritance”.(link)

      • [documentation] Documented comparator_factory kwarg, added new doc section “Custom Comparators”.(link)

      • [postgres] “%” signs in text() constructs are automatically escaped to “%%”. Because of the backwards incompatible nature of this change, a warning is emitted if ‘%%’ is detected in the string.(link)

        References: #1267

      • [postgres] Calling alias.execute() in conjunction with server_side_cursors won’t raise AttributeError.(link)

      • [postgres] Added Index reflection support to PostgreSQL, using a great patch we long neglected, submitted by Ken Kuhlman.(link)

        References: #714

      • [associationproxy] The association proxy properties are make themselves available at the class level, e.g. MyClass.aproxy. Previously this evaluated to None.(link)

      0.5.0rc4

      Released: Fri Nov 14 2008

      general

      • [general] global “propigate”->”propagate” change.(link)

      orm

      • [orm] Query.count() has been enhanced to do the “right thing” in a wider variety of cases. It can now count multiple-entity queries, as well as column-based queries. Note that this means if you say query(A, B).count() without any joining criterion, it’s going to count the cartesian product of A*B. Any query which is against column-based entities will automatically issue “SELECT count(1) FROM (SELECT...)” so that the real rowcount is returned, meaning a query such as query(func.count(A.name)).count() will return a value of one, since that query would return one row.(link)

      • [orm] Lots of performance tuning. A rough guesstimate over various ORM operations places it 10% faster over 0.5.0rc3, 25-30% over 0.4.8.(link)

      • [orm] bugfixes and behavioral changes(link)

      • [orm] Adjustments to the enhanced garbage collection on InstanceState to better guard against errors due to lost state.(link)

      • [orm] Query.get() returns a more informative error message when executed against multiple entities.(link)

        References: #1220

      • [orm] Restored NotImplementedError on Cls.relation.in_()(link)

        References: #1140, #1221

      • [orm] Fixed PendingDeprecationWarning involving order_by parameter on relation().(link)

        References: #1226

      sql

      • [sql] Removed the ‘properties’ attribute of the Connection object, Connection.info should be used.(link)

      • [sql] Restored “active rowcount” fetch before ResultProxy autocloses the cursor. This was removed in 0.5rc3.(link)

      • [sql] Rearranged the load_dialect_impl() method in TypeDecorator such that it will take effect even if the user-defined TypeDecorator uses another TypeDecorator as its impl.(link)

      mssql

      • [mssql] Lots of cleanup and fixes to correct problems with limit and offset.(link)

      • [mssql] Correct situation where subqueries as part of a binary expression need to be translated to use the IN and NOT IN syntax.(link)

      • [mssql] Fixed E Notation issue that prevented the ability to insert decimal values less than 1E-6.(link)

        References: #1216

      • [mssql] Corrected problems with reflection when dealing with schemas, particularly when those schemas are the default schema.(link)

        References: #1217

      • [mssql] Corrected problem with casting a zero length item to a varchar. It now correctly adjusts the CAST.(link)

      misc

      • [access] Added support for Currency type.(link)

      • [access] Functions were not return their result.(link)

        References: #1017

      • [access] Corrected problem with joins. Access only support LEFT OUTER or INNER not just JOIN by itself.(link)

        References: #1017

      • [ext] Can now use a custom “inherit_condition” in __mapper_args__ when using declarative.(link)

      • [ext] fixed string-based “remote_side”, “order_by” and others not propagating correctly when used in backref().(link)

      0.5.0rc3

      Released: Fri Nov 07 2008

      orm

      • [orm] Added two new hooks to SessionExtension: after_bulk_delete() and after_bulk_update(). after_bulk_delete() is called after a bulk delete() operation on a query. after_bulk_update() is called after a bulk update() operation on a query.(link)

      • [orm] “not equals” comparisons of simple many-to-one relation to an instance will not drop into an EXISTS clause and will compare foreign key columns instead.(link)

      • [orm] Removed not-really-working use cases of comparing a collection to an iterable. Use contains() to test for collection membership.(link)

      • [orm] Improved the behavior of aliased() objects such that they more accurately adapt the expressions generated, which helps particularly with self-referential comparisons.(link)

        References: #1171

      • [orm] Fixed bug involving primaryjoin/secondaryjoin conditions constructed from class-bound attributes (as often occurs when using declarative), which later would be inappropriately aliased by Query, particularly with the various EXISTS based comparators.(link)

      • [orm] Fixed bug when using multiple query.join() with an aliased-bound descriptor which would lose the left alias.(link)

      • [orm] Improved weakref identity map memory management to no longer require mutexing, resurrects garbage collected instance on a lazy basis for an InstanceState with pending changes.(link)

      • [orm] InstanceState object now removes circular references to itself upon disposal to keep it outside of cyclic garbage collection.(link)

      • [orm] relation() won’t hide unrelated ForeignKey errors inside of the “please specify primaryjoin” message when determining join condition.(link)

      • [orm] Fixed bug in Query involving order_by() in conjunction with multiple aliases of the same class (will add tests in)(link)

        References: #1218

      • [orm] When using Query.join() with an explicit clause for the ON clause, the clause will be aliased in terms of the left side of the join, allowing scenarios like query(Source). from_self().join((Dest, Source.id==Dest.source_id)) to work properly.(link)

      • [orm] polymorphic_union() function respects the “key” of each Column if they differ from the column’s name.(link)

      • [orm] Repaired support for “passive-deletes” on a many-to-one relation() with “delete” cascade.(link)

        References: #1183

      • [orm] Fixed bug in composite types which prevented a primary-key composite type from being mutated.(link)

        References: #1213

      • [orm] Added more granularity to internal attribute access, such that cascade and flush operations will not initialize unloaded attributes and collections, leaving them intact for a lazy-load later on. Backref events still initialize attrbutes and collections for pending instances.(link)

        References: #1202

      sql

      • [sql] SQL compiler optimizations and complexity reduction. The call count for compiling a typical select() construct is 20% less versus 0.5.0rc2.(link)

      • [sql] Dialects can now generate label names of adjustable length. Pass in the argument “label_length=<value>” to create_engine() to adjust how many characters max will be present in dynamically generated column labels, i.e. “somecolumn AS somelabel”. Any value less than 6 will result in a label of minimal size, consisting of an underscore and a numeric counter. The compiler uses the value of dialect.max_identifier_length as a default.(link)

        References: #1211

      • [sql] Simplified the check for ResultProxy “autoclose without results” to be based solely on presence of cursor.description. All the regexp-based guessing about statements returning rows has been removed.(link)

        References: #1212

      • [sql] Direct execution of a union() construct will properly set up result-row processing.(link)

        References: #1194

      • [sql] The internal notion of an “OID” or “ROWID” column has been removed. It’s basically not used by any dialect, and the possibility of its usage with psycopg2’s cursor.lastrowid is basically gone now that INSERT..RETURNING is available.(link)

      • [sql] Removed “default_order_by()” method on all FromClause objects.(link)

      • [sql] Repaired the table.tometadata() method so that a passed-in schema argument is propagated to ForeignKey constructs.(link)

      • [sql] Slightly changed behavior of IN operator for comparing to empty collections. Now results in inequality comparison against self. More portable, but breaks with stored procedures that aren’t pure functions.(link)

      mysql

      • [mysql] Fixed foreign key reflection in the edge case where a Table’s explicit schema= is the same as the schema (database) the connection is attached to.(link)

      • [mysql] No longer expects include_columns in table reflection to be lower case.(link)

      oracle

      • [oracle] Wrote a docstring for Oracle dialect. Apparently that Ohloh “few source code comments” label is starting to sting :).(link)

      • [oracle] Removed FIRST_ROWS() optimize flag when using LIMIT/OFFSET, can be reenabled with optimize_limits=True create_engine() flag.(link)

        References: #536

      • [oracle] bugfixes and behavioral changes(link)

      • [oracle] Setting the auto_convert_lobs to False on create_engine() will also instruct the OracleBinary type to return the cx_oracle LOB object unchanged.(link)

      misc

      • [ext] Added a new extension sqlalchemy.ext.serializer. Provides Serializer/Deserializer “classes” which mirror Pickle/Unpickle, as well as dumps() and loads(). This serializer implements an “external object” pickler which keeps key context-sensitive objects, including engines, sessions, metadata, Tables/Columns, and mappers, outside of the pickle stream, and can later restore the pickle using any engine/metadata/session provider. This is used not for pickling regular object instances, which are pickleable without any special logic, but for pickling expression objects and full Query objects, such that all mapper/engine/session dependencies can be restored at unpickle time.(link)

      • [ext] Fixed bug preventing declarative-bound “column” objects from being used in column_mapped_collection().(link)

        References: #1174

      • [misc] util.flatten_iterator() func doesn’t interpret strings with __iter__() methods as iterators, such as in pypy.(link)

        References: #1077

      0.5.0rc2

      Released: Sun Oct 12 2008

      orm

      • [orm] Fixed bug involving read/write relation()s that contain literal or other non-column expressions within their primaryjoin condition equated to a foreign key column.(link)

      • [orm] “non-batch” mode in mapper(), a feature which allows mapper extension methods to be called as each instance is updated/inserted, now honors the insert order of the objects given.(link)

      • [orm] Fixed RLock-related bug in mapper which could deadlock upon reentrant mapper compile() calls, something that occurs when using declarative constructs inside of ForeignKey objects.(link)

      • [orm] ScopedSession.query_property now accepts a query_cls factory, overriding the session’s configured query_cls.(link)

      • [orm] Fixed shared state bug interfering with ScopedSession.mapper’s ability to apply default __init__ implementations on object subclasses.(link)

      • [orm] Fixed up slices on Query (i.e. query[x:y]) to work properly for zero length slices, slices with None on either end.(link)

        References: #1177

      • [orm] Added an example illustrating Celko’s “nested sets” as a SQLA mapping.(link)

      • [orm] contains_eager() with an alias argument works even when the alias is embedded in a SELECT, as when sent to the Query via query.select_from().(link)

      • [orm] contains_eager() usage is now compatible with a Query that also contains a regular eager load and limit/offset, in that the columns are added to the Query-generated subquery.(link)

        References: #1180

      • [orm] session.execute() will execute a Sequence object passed to it (regression from 0.4).(link)

      • [orm] Removed the “raiseerror” keyword argument from object_mapper() and class_mapper(). These functions raise in all cases if the given class/instance is not mapped.(link)

      • [orm] Fixed session.transaction.commit() on a autocommit=False session not starting a new transaction.(link)

      • [orm] Some adjustments to Session.identity_map’s weak referencing behavior to reduce asynchronous GC side effects.(link)

      • [orm] Adjustment to Session’s post-flush accounting of newly “clean” objects to better protect against operating on objects as they’re asynchronously gc’ed.(link)

        References: #1182

      sql

      • [sql] column.in_(someselect) can now be used as a columns-clause expression without the subquery bleeding into the FROM clause(link)

        References: #1074

      mysql

      • [mysql] Temporary tables are now reflectable.(link)

      sqlite

      • [sqlite] Overhauled SQLite date/time bind/result processing to use regular expressions and format strings, rather than strptime/strftime, to generically support pre-1900 dates, dates with microseconds.(link)

        References: #968

      • [sqlite] String’s (and Unicode’s, UnicodeText’s, etc.) convert_unicode logic disabled in the sqlite dialect, to adjust for pysqlite 2.5.0’s new requirement that only Python unicode objects are accepted; http://itsystementwicklung.de/pipermail/list-pysqlite/2008-March/000018.html(link)

      oracle

      • [oracle] Oracle will detect string-based statements which contain comments at the front before a SELECT as SELECT statements.(link)

        References: #1187

      0.5.0rc1

      Released: Thu Sep 11 2008

      orm

      • [orm] Query now has delete() and update(values) methods. This allows to perform bulk deletes/updates with the Query object.(link)

      • [orm] The RowTuple object returned by Query(*cols) now features keynames which prefer mapped attribute names over column keys, column keys over column names, i.e. Query(Class.foo, Class.bar) will have names “foo” and “bar” even if those are not the names of the underlying Column objects. Direct Column objects such as Query(table.c.col) will return the “key” attribute of the Column.(link)

      • [orm] Added scalar() and value() methods to Query, each return a single scalar value. scalar() takes no arguments and is roughly equivalent to first()[0], value() takes a single column expression and is roughly equivalent to values(expr).next()[0].(link)

      • [orm] Improved the determination of the FROM clause when placing SQL expressions in the query() list of entities. In particular scalar subqueries should not “leak” their inner FROM objects out into the enclosing query.(link)

      • [orm] Joins along a relation() from a mapped class to a mapped subclass, where the mapped subclass is configured with single table inheritance, will include an IN clause which limits the subtypes of the joined class to those requested, within the ON clause of the join. This takes effect for eager load joins as well as query.join(). Note that in some scenarios the IN clause will appear in the WHERE clause of the query as well since this discrimination has multiple trigger points.(link)

      • [orm] AttributeExtension has been refined such that the event is fired before the mutation actually occurs. Additionally, the append() and set() methods must now return the given value, which is used as the value to be used in the mutation operation. This allows creation of validating AttributeListeners which raise before the action actually occurs, and which can change the given value into something else before its used.(link)

      • [orm] column_property(), composite_property(), and relation() now accept a single or list of AttributeExtensions using the “extension” keyword argument.(link)

      • [orm] query.order_by().get() silently drops the “ORDER BY” from the query issued by GET but does not raise an exception.(link)

      • [orm] Added a Validator AttributeExtension, as well as a @validates decorator which is used in a similar fashion as @reconstructor, and marks a method as validating one or more mapped attributes.(link)

      • [orm] class.someprop.in_() raises NotImplementedError pending the implementation of “in_” for relation(link)

        References: #1140

      • [orm] Fixed primary key update for many-to-many collections where the collection had not been loaded yet(link)

        References: #1127

      • [orm] Fixed bug whereby deferred() columns with a group in conjunction with an otherwise unrelated synonym() would produce an AttributeError during deferred load.(link)

      • [orm] The before_flush() hook on SessionExtension takes place before the list of new/dirty/deleted is calculated for the final time, allowing routines within before_flush() to further change the state of the Session before the flush proceeds.(link)

        References: #1128

      • [orm] The “extension” argument to Session and others can now optionally be a list, supporting events sent to multiple SessionExtension instances. Session places SessionExtensions in Session.extensions.(link)

      • [orm] Reentrant calls to flush() raise an error. This also serves as a rudimentary, but not foolproof, check against concurrent calls to Session.flush().(link)

      • [orm] Improved the behavior of query.join() when joining to joined-table inheritance subclasses, using explicit join criteria (i.e. not on a relation).(link)

      • [orm] @orm.attributes.reconstitute and MapperExtension.reconstitute have been renamed to @orm.reconstructor and MapperExtension.reconstruct_instance(link)

      • [orm] Fixed @reconstructor hook for subclasses which inherit from a base class.(link)

        References: #1129

      • [orm] The composite() property type now supports a __set_composite_values__() method on the composite class which is required if the class represents state using attribute names other than the column’s keynames; default-generated values now get populated properly upon flush. Also, composites with attributes set to None compare correctly.(link)

        References: #1132

      • [orm] The 3-tuple of iterables returned by attributes.get_history() may now be a mix of lists and tuples. (Previously members were always lists.)(link)

      • [orm] Fixed bug whereby changing a primary key attribute on an entity where the attribute’s previous value had been expired would produce an error upon flush().(link)

        References: #1151

      • [orm] Fixed custom instrumentation bug whereby get_instance_dict() was not called for newly constructed instances not loaded by the ORM.(link)

      • [orm] Session.delete() adds the given object to the session if not already present. This was a regression bug from 0.4.(link)

        References: #1150

      • [orm] The echo_uow flag on Session is deprecated, and unit-of-work logging is now application-level only, not per-session level.(link)

      • [orm] Removed conflicting contains() operator from InstrumentedAttribute which didn’t accept escape kwaarg.(link)

        References: #1153

      orm declarative

      • [declarative] [orm] Fixed bug whereby mapper couldn’t initialize if a composite primary key referenced another table that was not defined yet.(link)

        References: #1161

      • [declarative] [orm] Fixed exception throw which would occur when string-based primaryjoin condition was used in conjunction with backref.(link)

      sql

      • [sql] Temporarily rolled back the “ORDER BY” enhancement from. This feature is on hold pending further development.(link)

        References: #1068

      • [sql] The exists() construct won’t “export” its contained list of elements as FROM clauses, allowing them to be used more effectively in the columns clause of a SELECT.(link)

      • [sql] and_() and or_() now generate a ColumnElement, allowing boolean expressions as result columns, i.e. select([and_(1, 0)]).(link)

        References: #798

      • [sql] Bind params now subclass ColumnElement which allows them to be selectable by orm.query (they already had most ColumnElement semantics).(link)

      • [sql] Added select_from() method to exists() construct, which becomes more and more compatible with a regular select().(link)

      • [sql] Added func.min(), func.max(), func.sum() as “generic functions”, which basically allows for their return type to be determined automatically. Helps with dates on SQLite, decimal types, others.(link)

        References: #1160

      • [sql] added decimal.Decimal as an “auto-detect” type; bind parameters and generic functions will set their type to Numeric when a Decimal is used.(link)

      schema

      • [schema] Added “sorted_tables” accessor to MetaData, which returns Table objects sorted in order of dependency as a list. This deprecates the MetaData.table_iterator() method. The “reverse=False” keyword argument has also been removed from util.sort_tables(); use the Python ‘reversed’ function to reverse the results.(link)

        References: #1033

      • [schema] The ‘length’ argument to all Numeric types has been renamed to ‘scale’. ‘length’ is deprecated and is still accepted with a warning.(link)

      • [schema] Dropped 0.3-compatibility for user defined types (convert_result_value, convert_bind_param).(link)

      mysql

      • [mysql] The ‘length’ argument to MSInteger, MSBigInteger, MSTinyInteger, MSSmallInteger and MSYear has been renamed to ‘display_width’.(link)

      • [mysql] Added MSMediumInteger type.(link)

        References: #1146

      • [mysql] the function func.utc_timestamp() compiles to UTC_TIMESTAMP, without the parenthesis, which seem to get in the way when using in conjunction with executemany().(link)

      oracle

      • [oracle] limit/offset no longer uses ROW NUMBER OVER to limit rows, and instead uses subqueries in conjunction with a special Oracle optimization comment. Allows LIMIT/OFFSET to work in conjunction with DISTINCT.(link)

        References: #536

      • [oracle] has_sequence() now takes the current “schema” argument into account(link)

        References: #1155

      • [oracle] added BFILE to reflected type names(link)

        References: #1121

      0.5.0beta3

      Released: Mon Aug 04 2008

      orm

      • [orm] The “entity_name” feature of SQLAlchemy mappers has been removed. For rationale, see http://tinyurl.com/6nm2ne(link)

      • [orm] the “autoexpire” flag on Session, sessionmaker(), and scoped_session() has been renamed to “expire_on_commit”. It does not affect the expiration behavior of rollback().(link)

      • [orm] fixed endless loop bug which could occur within a mapper’s deferred load of inherited attributes.(link)

      • [orm] a legacy-support flag “_enable_transaction_accounting” flag added to Session which when False, disables all transaction-level object accounting, including expire on rollback, expire on commit, new/deleted list maintenance, and autoflush on begin.(link)

      • [orm] The ‘cascade’ parameter to relation() accepts None as a value, which is equivalent to no cascades.(link)

      • [orm] A critical fix to dynamic relations allows the “modified” history to be properly cleared after a flush().(link)

      • [orm] user-defined @properties on a class are detected and left in place during mapper initialization. This means that a table-bound column of the same name will not be mapped at all if a @property is in the way (and the column is not remapped to a different name), nor will an instrumented attribute from an inherited class be applied. The same rules apply for names excluded using the include_properties/exclude_properties collections.(link)

      • [orm] Added a new SessionExtension hook called after_attach(). This is called at the point of attachment for objects via add(), add_all(), delete(), and merge().(link)

      • [orm] A mapper which inherits from another, when inheriting the columns of its inherited mapper, will use any reassigned property names specified in that inheriting mapper. Previously, if “Base” had reassigned “base_id” to the name “id”, “SubBase(Base)” would still get an attribute called “base_id”. This could be worked around by explicitly stating the column in each submapper as well but this is fairly unworkable and also impossible when using declarative.(link)

        References: #1111

      • [orm] Fixed a series of potential race conditions in Session whereby asynchronous GC could remove unmodified, no longer referenced items from the session as they were present in a list of items to be processed, typically during session.expunge_all() and dependent methods.(link)

      • [orm] Some improvements to the _CompileOnAttr mechanism which should reduce the probability of “Attribute x was not replaced during compile” warnings. (this generally applies to SQLA hackers, like Elixir devs).(link)

      • [orm] Fixed bug whereby the “unsaved, pending instance” FlushError raised for a pending orphan would not take superclass mappers into account when generating the list of relations responsible for the error.(link)

      sql

      • [sql] func.count() with no arguments renders as COUNT(*), equivalent to func.count(text(‘*’)).(link)

      • [sql] simple label names in ORDER BY expressions render as themselves, and not as a re-statement of their corresponding expression. This feature is currently enabled only for SQLite, MySQL, and PostgreSQL. It can be enabled on other dialects as each is shown to support this behavior.(link)

        References: #1068

      mysql

      • [mysql] Quoting of MSEnum values for use in CREATE TABLE is now optional & will be quoted on demand as required. (Quoting was always optional for use with existing tables.)(link)

        References: #1110

      misc

      • [ext] Class-bound attributes sent as arguments to relation()’s remote_side and foreign_keys parameters are now accepted, allowing them to be used with declarative. Additionally fixed bugs involving order_by being specified as a class-bound attribute in conjunction with eager loading.(link)

      • [ext] declarative initialization of Columns adjusted so that non-renamed columns initialize in the same way as a non declarative mapper. This allows an inheriting mapper to set up its same-named “id” columns in particular such that the parent “id” column is favored over the child column, reducing database round trips when this value is requested.(link)

      0.5.0beta2

      Released: Mon Jul 14 2008

      orm

      • [orm] In addition to expired attributes, deferred attributes also load if their data is present in the result set.(link)

        References: #870

      • [orm] session.refresh() raises an informative error message if the list of attributes does not include any column-based attributes.(link)

      • [orm] query() raises an informative error message if no columns or mappers are specified.(link)

      • [orm] lazy loaders now trigger autoflush before proceeding. This allows expire() of a collection or scalar relation to function properly in the context of autoflush.(link)

      • [orm] column_property() attributes which represent SQL expressions or columns that are not present in the mapped tables (such as those from views) are automatically expired after an INSERT or UPDATE, assuming they have not been locally modified, so that they are refreshed with the most recent data upon access.(link)

        References: #887

      • [orm] Fixed explicit, self-referential joins between two joined-table inheritance mappers when using query.join(cls, aliased=True).(link)

        References: #1082

      • [orm] Fixed query.join() when used in conjunction with a columns-only clause and an SQL-expression ON clause in the join.(link)

      • [orm] The “allow_column_override” flag from mapper() has been removed. This flag is virtually always misunderstood. Its specific functionality is available via the include_properties/exclude_properties mapper arguments.(link)

      • [orm] Repaired __str__() method on Query.(link)

        References: #1066

      • [orm] Session.bind gets used as a default even when table/mapper specific binds are defined.(link)

      sql

      • [sql] Added new match() operator that performs a full-text search. Supported on PostgreSQL, SQLite, MySQL, MS-SQL, and Oracle backends.(link)

      schema

      • [schema] Added prefixes option to Table that accepts a list of strings to insert after CREATE in the CREATE TABLE statement.(link)

        References: #1075

      • [schema] Unicode, UnicodeText types now set “assert_unicode” and “convert_unicode” by default, but accept overriding **kwargs for these values.(link)

      sqlite

      • [sqlite] Modified SQLite’s representation of “microseconds” to match the output of str(somedatetime), i.e. in that the microseconds are represented as fractional seconds in string format. This makes SQLA’s SQLite date type compatible with datetimes that were saved directly using Pysqlite (which just calls str()). Note that this is incompatible with the existing microseconds values in a SQLA 0.4 generated SQLite database file.

        To get the old behavior globally:

        from sqlalchemy.databases.sqlite import DateTimeMixin DateTimeMixin.__legacy_microseconds__ = True

        To get the behavior on individual DateTime types:

        t = sqlite.SLDateTime() t.__legacy_microseconds__ = True

        Then use “t” as the type on the Column.

        (link)

        References: #1090

      • [sqlite] SQLite Date, DateTime, and Time types only accept Python datetime objects now, not strings. If you’d like to format dates as strings yourself with SQLite, use a String type. If you’d like them to return datetime objects anyway despite their accepting strings as input, make a TypeDecorator around String - SQLA doesn’t encourage this pattern.(link)

      misc

      • [extensions] Declarative supports a __table_args__ class variable, which is either a dictionary, or tuple of the form (arg1, arg2, ..., {kwarg1:value, ...}) which contains positional + kw arguments to be passed to the Table constructor.(link)

        References: #1096

      0.5.0beta1

      Released: Thu Jun 12 2008

      general

      • [general] global “propigate”->”propagate” change.(link)

      orm

      • [orm] polymorphic_union() function respects the “key” of each Column if they differ from the column’s name.(link)

      • [orm] Fixed 0.4-only bug preventing composite columns from working properly with inheriting mappers(link)

        References: #1199

      • [orm] Fixed RLock-related bug in mapper which could deadlock upon reentrant mapper compile() calls, something that occurs when using declarative constructs inside of ForeignKey objects. Ported from 0.5.(link)

      • [orm] Fixed bug in composite types which prevented a primary-key composite type from being mutated.(link)

        References: #1213

      • [orm] Added ScopedSession.is_active accessor.(link)

        References: #976

      • [orm] Class-bound accessor can be used as the argument to relation() order_by.(link)

        References: #939

      • [orm] Fixed shard_id argument on ShardedSession.execute().(link)

        References: #1072

      sql

      • [sql] Connection.invalidate() checks for closed status to avoid attribute errors.(link)

        References: #1246

      • [sql] NullPool supports reconnect on failure behavior.(link)

        References: #1094

      • [sql] The per-dialect cache used by TypeEngine to cache dialect-specific types is now a WeakKeyDictionary. This to prevent dialect objects from being referenced forever for an application that creates an arbitrarily large number of engines or dialects. There is a small performance penalty which will be resolved in 0.6.(link)

        References: #1299

      • [sql] Fixed SQLite reflection methods so that non-present cursor.description, which triggers an auto-cursor close, will be detected so that no results doesn’t fail on recent versions of pysqlite which raise an error when fetchone() called with no rows present.(link)

      mysql

      • [mysql] Fixed bug in exception raise when FK columns not present during reflection.(link)

        References: #1241

      oracle

      • [oracle] Fixed bug which was preventing out params of certain types from being received; thanks a ton to huddlej at wwu.edu !(link)

        References: #1265

      firebird

      • [firebird] Added support for returning values from inserts (2.0+ only), updates and deletes (2.1+ only).(link)

      misc

      • The “__init__” trigger/decorator added by mapper now attempts to exactly mirror the argument signature of the original __init__. The pass-through for ‘_sa_session’ is no longer implicit- you must allow for this keyword argument in your constructor.(link)

      • ClassState is renamed to ClassManager.(link)

      • Classes may supply their own InstrumentationManager by providing a __sa_instrumentation_manager__ property.(link)

      • Custom instrumentation may use any mechanism to associate a ClassManager with a class and an InstanceState with an instance. Attributes on those objects are still the default association mechanism used by SQLAlchemy’s native instrumentation.(link)

      • Moved entity_name, _sa_session_id, and _instance_key from the instance object to the instance state. These values are still available in the old way, which is now deprecated, using descriptors attached to the class. A deprecation warning will be issued when accessed.(link)

      • The _prepare_instrumentation alias for prepare_instrumentation has been removed.(link)

      • sqlalchemy.exceptions has been renamed to sqlalchemy.exc. The module may be imported under either name.(link)

      • ORM-related exceptions are now defined in sqlalchemy.orm.exc. ConcurrentModificationError, FlushError, and UnmappedColumnError compatibility aliases are installed in sqlalchemy.exc during the import of sqlalchemy.orm.(link)

      • sqlalchemy.logging has been renamed to sqlalchemy.log.(link)

      • The transitional sqlalchemy.log.SADeprecationWarning alias for the warning’s definition in sqlalchemy.exc has been removed.(link)

      • exc.AssertionError has been removed and usage replaced with Python’s built-in AssertionError.(link)

      • The behavior of MapperExtensions attached to multiple, entity_name= primary mappers for a single class has been altered. The first mapper() defined for a class is the only mapper eligible for the MapperExtension ‘instrument_class’, ‘init_instance’ and ‘init_failed’ events. This is backwards incompatible; previously the extensions of last mapper defined would receive these events.(link)

      • [postgres] Added Index reflection support to Postgres, using a great patch we long neglected, submitted by Ken Kuhlman.(link)

        References: #714

      SQLAlchemy-0.8.4/doc/changelog/changelog_06.html0000644000076500000240000122271312251147461022062 0ustar classicstaff00000000000000 0.6 Changelog — SQLAlchemy 0.8 Documentation

      SQLAlchemy 0.8 Documentation

      Release: 0.8.4 | Release Date: December 8, 2013

      0.6 Changelog

      0.6.9

      Released: Sat May 05 2012

      general

      • [general] Adjusted the “importlater” mechanism, which is used internally to resolve import cycles, such that the usage of __import__ is completed when the import of sqlalchemy or sqlalchemy.orm is done, thereby avoiding any usage of __import__ after the application starts new threads, fixes.(link)

        References: #2279

      orm

      • [orm] [bug] fixed inappropriate evaluation of user-mapped object in a boolean context within query.get().(link)

        References: #2310

      • [orm] [bug] Fixed the error formatting raised when a tuple is inadvertently passed to session.query().(link)

        References: #2297

      • [orm] Fixed bug whereby the source clause used by query.join() would be inconsistent if against a column expression that combined multiple entities together.(link)

        References: #2197

      • [orm] Fixed bug apparent only in Python 3 whereby sorting of persistent + pending objects during flush would produce an illegal comparison, if the persistent object primary key is not a single integer.(link)

        References: #2228

      • [orm] Fixed bug where query.join() + aliased=True from a joined-inh structure to itself on relationship() with join condition on the child table would convert the lead entity into the joined one inappropriately.(link)

        References: #2234

      • [orm] Fixed bug whereby mapper.order_by attribute would be ignored in the “inner” query within a subquery eager load. .(link)

        References: #2287

      • [orm] Fixed bug whereby if a mapped class redefined __hash__() or __eq__() to something non-standard, which is a supported use case as SQLA should never consult these, the methods would be consulted if the class was part of a “composite” (i.e. non-single-entity) result set.(link)

        References: #2215

      • [orm] Fixed subtle bug that caused SQL to blow up if: column_property() against subquery + joinedload + LIMIT + order by the column property() occurred. .(link)

        References: #2188

      • [orm] The join condition produced by with_parent as well as when using a “dynamic” relationship against a parent will generate unique bindparams, rather than incorrectly repeating the same bindparam. .(link)

        References: #2207

      • [orm] Repaired the “no statement condition” assertion in Query which would attempt to raise if a generative method were called after from_statement() were called..(link)

        References: #2199

      • [orm] Cls.column.collate(“some collation”) now works.(link)

        References: #1776

      engine

      • [engine] Backported the fix for introduced in 0.7.4, which ensures that the connection is in a valid state before attempting to call rollback()/prepare()/release() on savepoint and two-phase transactions.(link)

        References: #2317

      sql

      • [sql] Fixed two subtle bugs involving column correspondence in a selectable, one with the same labeled subquery repeated, the other when the label has been “grouped” and loses itself. Affects.(link)

        References: #2188

      • [sql] Fixed bug whereby “warn on unicode” flag would get set for the String type when used with certain dialects. This bug is not in 0.7.(link)

      • [sql] Fixed bug whereby with_only_columns() method of Select would fail if a selectable were passed.. However, the FROM behavior is still incorrect here, so you need 0.7 in any case for this use case to be usable.(link)

        References: #2270

      schema

      • [schema] Added an informative error message when ForeignKeyConstraint refers to a column name in the parent that is not found.(link)

      postgresql

      • [postgresql] Fixed bug related to whereby the same modified index behavior in PG 9 affected primary key reflection on a renamed column..(link)

        References: #2291, #2141

      mysql

      • [mysql] Fixed OurSQL dialect to use ansi-neutral quote symbol “’” for XA commands instead of ‘”’. .(link)

        References: #2186

      • [mysql] a CREATE TABLE will put the COLLATE option after CHARSET, which appears to be part of MySQL’s arbitrary rules regarding if it will actually work or not.(link)

        References: #2225

      mssql

      • [mssql] [bug] Decode incoming values when retrieving list of index names and the names of columns within those indexes.(link)

        References: #2269

      oracle

      • [oracle] Added ORA-00028 to disconnect codes, use cx_oracle _Error.code to get at the code,.(link)

        References: #2200

      • [oracle] repaired the oracle.RAW type which did not generate the correct DDL.(link)

        References: #2220

      • [oracle] added CURRENT to reserved word list.(link)

        References: #2212

      misc

      • [examples] Adjusted dictlike-polymorphic.py example to apply the CAST such that it works on PG, other databases.(link)

        References: #2266

      0.6.8

      Released: Sun Jun 05 2011

      orm

      • [orm] Calling query.get() against a column-based entity is invalid, this condition now raises a deprecation warning.(link)

        References: #2144

      • [orm] a non_primary mapper will inherit the _identity_class of the primary mapper. This so that a non_primary established against a class that’s normally in an inheritance mapping will produce results that are identity-map compatible with that of the primary mapper(link)

        References: #2151

      • [orm] Backported 0.7’s identity map implementation, which does not use a mutex around removal. This as some users were still getting deadlocks despite the adjustments in 0.6.7; the 0.7 approach that doesn’t use a mutex does not appear to produce “dictionary changed size” issues, the original rationale for the mutex.(link)

        References: #2148

      • [orm] Fixed the error message emitted for “can’t execute syncrule for destination column ‘q’; mapper ‘X’ does not map this column” to reference the correct mapper. .(link)

        References: #2163

      • [orm] Fixed bug where determination of “self referential” relationship would fail with no workaround for joined-inh subclass related to itself, or joined-inh subclass related to a subclass of that with no cols in the sub-sub class in the join condition.(link)

        References: #2149

      • [orm] mapper() will ignore non-configured foreign keys to unrelated tables when determining inherit condition between parent and child class. This is equivalent to behavior already applied to declarative. Note that 0.7 has a more comprehensive solution to this, altering how join() itself determines an FK error.(link)

        References: #2153

      • [orm] Fixed bug whereby mapper mapped to an anonymous alias would fail if logging were used, due to unescaped % sign in the alias name.(link)

        References: #2171

      • [orm] Modify the text of the message which occurs when the “identity” key isn’t detected on flush, to include the common cause that the Column isn’t set up to detect auto-increment correctly;.(link)

        References: #2170

      • [orm] Fixed bug where transaction-level “deleted” collection wouldn’t be cleared of expunged states, raising an error if they later became transient.(link)

        References: #2182

      engine

      • [engine] Adjusted the __contains__() method of a RowProxy result row such that no exception throw is generated internally; NoSuchColumnError() also will generate its message regardless of whether or not the column construct can be coerced to a string..(link)

        References: #2178

      sql

      • [sql] Fixed bug whereby if FetchedValue was passed to column server_onupdate, it would not have its parent “column” assigned, added test coverage for all column default assignment patterns.(link)

        References: #2147

      • [sql] Fixed bug whereby nesting a label of a select() with another label in it would produce incorrect exported columns. Among other things this would break an ORM column_property() mapping against another column_property(). .(link)

        References: #2167

      postgresql

      • [postgresql] Fixed bug affecting PG 9 whereby index reflection would fail if against a column whose name had changed. .(link)

        References: #2141

      • [postgresql] Some unit test fixes regarding numeric arrays, MATCH operator. A potential floating-point inaccuracy issue was fixed, and certain tests of the MATCH operator only execute within an EN-oriented locale for now. .(link)

        References: #2175

      mssql

      • [mssql] Fixed bug in MSSQL dialect whereby the aliasing applied to a schema-qualified table would leak into enclosing select statements.(link)

        References: #2169

      • [mssql] Fixed bug whereby DATETIME2 type would fail on the “adapt” step when used in result sets or bound parameters. This issue is not in 0.7.(link)

        References: #2159

      0.6.7

      Released: Wed Apr 13 2011

      orm

      • [orm] Tightened the iterate vs. remove mutex around the identity map iteration, attempting to reduce the chance of an (extremely rare) reentrant gc operation causing a deadlock. Might remove the mutex in 0.7.(link)

        References: #2087

      • [orm] Added a name argument to Query.subquery(), to allow a fixed name to be assigned to the alias object.(link)

        References: #2030

      • [orm] A warning is emitted when a joined-table inheriting mapper has no primary keys on the locally mapped table (but has pks on the superclass table).(link)

        References: #2019

      • [orm] Fixed bug where “middle” class in a polymorphic hierarchy would have no ‘polymorphic_on’ column if it didn’t also specify a ‘polymorphic_identity’, leading to strange errors upon refresh, wrong class loaded when querying from that target. Also emits the correct WHERE criterion when using single table inheritance.(link)

        References: #2038

      • [orm] Fixed bug where a column with a SQL or server side default that was excluded from a mapping with include_properties or exclude_properties would result in UnmappedColumnError.(link)

        References: #1995

      • [orm] A warning is emitted in the unusual case that an append or similar event on a collection occurs after the parent object has been dereferenced, which prevents the parent from being marked as “dirty” in the session. This will be an exception in 0.7.(link)

        References: #2046

      • [orm] Fixed bug in query.options() whereby a path applied to a lazyload using string keys could overlap a same named attribute on the wrong entity. Note 0.7 has an updated version of this fix.(link)

        References: #2098

      • [orm] Reworded the exception raised when a flush is attempted of a subclass that is not polymorphic against the supertype.(link)

        References: #2063

      • [orm] Some fixes to the state handling regarding backrefs, typically when autoflush=False, where the back-referenced collection wouldn’t properly handle add/removes with no net change. Thanks to Richard Murri for the test case + patch.(link)

        References: #2123

      • [orm] a “having” clause would be copied from the inside to the outside query if from_self() were used..(link)

        References: #2130

      orm declarative

      • [declarative] [orm] Added an explicit check for the case that the name ‘metadata’ is used for a column attribute on a declarative class.(link)

        References: #2050

      • [declarative] [orm] Fix error message referencing old @classproperty name to reference @declared_attr(link)

        References: #2061

      • [declarative] [orm] Arguments in __mapper_args__ that aren’t “hashable” aren’t mistaken for always-hashable, possibly-column arguments.(link)

        References: #2091

      engine

      • [engine] Fixed bug in QueuePool, SingletonThreadPool whereby connections that were discarded via overflow or periodic cleanup() were not explicitly closed, leaving garbage collection to the task instead. This generally only affects non-reference-counting backends like Jython and Pypy. Thanks to Jaimy Azle for spotting this.(link)

        References: #2102

      sql

      • [sql] Column.copy(), as used in table.tometadata(), copies the ‘doc’ attribute.(link)

        References: #2028

      • [sql] Added some defs to the resultproxy.c extension so that the extension compiles and runs on Python 2.4.(link)

        References: #2023

      • [sql] The compiler extension now supports overriding the default compilation of expression._BindParamClause including that the auto-generated binds within the VALUES/SET clause of an insert()/update() statement will also use the new compilation rules.(link)

        References: #2042

      • [sql] Added accessors to ResultProxy “returns_rows”, “is_insert”(link)

        References: #2089

      • [sql] The limit/offset keywords to select() as well as the value passed to select.limit()/offset() will be coerced to integer.(link)

        References: #2116

      postgresql

      • [postgresql] When explicit sequence execution derives the name of the auto-generated sequence of a SERIAL column, which currently only occurs if implicit_returning=False, now accommodates if the table + column name is greater than 63 characters using the same logic Postgresql uses.(link)

        References: #1083

      • [postgresql] Added an additional libpq message to the list of “disconnect” exceptions, “could not receive data from server”(link)

        References: #2044

      • [postgresql] Added RESERVED_WORDS for postgresql dialect.(link)

        References: #2092

      • [postgresql] Fixed the BIT type to allow a “length” parameter, “varying” parameter. Reflection also fixed.(link)

        References: #2073

      mysql

      • [mysql] oursql dialect accepts the same “ssl” arguments in create_engine() as that of MySQLdb.(link)

        References: #2047

      sqlite

      • [sqlite] Fixed bug where reflection of foreign key created as “REFERENCES <tablename>” without col name would fail.(link)

        References: #2115

      mssql

      • [mssql] Rewrote the query used to get the definition of a view, typically when using the Inspector interface, to use sys.sql_modules instead of the information schema, thereby allowing views definitions longer than 4000 characters to be fully returned.(link)

        References: #2071

      oracle

      • [oracle] Using column names that would require quotes for the column itself or for a name-generated bind parameter, such as names with special characters, underscores, non-ascii characters, now properly translate bind parameter keys when talking to cx_oracle.(link)

        References: #2100

      • [oracle] Oracle dialect adds use_binds_for_limits=False create_engine() flag, will render the LIMIT/OFFSET values inline instead of as binds, reported to modify the execution plan used by Oracle.(link)

        References: #2116

      firebird

      • [firebird] The “implicit_returning” flag on create_engine() is honored if set to False.(link)

        References: #2083

      misc

      • [informix] Added RESERVED_WORDS informix dialect.(link)

        References: #2092

      • [ext] The horizontal_shard ShardedSession class accepts the common Session argument “query_cls” as a constructor argument, to enable further subclassing of ShardedQuery.(link)

        References: #2090

      • [documentation] Documented SQLite DATE/TIME/DATETIME types.(link)

        References: #2029

      • [examples] The Beaker caching example allows a “query_cls” argument to the query_callable() function.(link)

        References: #2090

      0.6.6

      Released: Sat Jan 08 2011

      orm

      • [orm] Fixed bug whereby a non-“mutable” attribute modified event which occurred on an object that was clean except for preceding mutable attribute changes would fail to strongly reference itself in the identity map. This would cause the object to be garbage collected, losing track of any changes that weren’t previously saved in the “mutable changes” dictionary.(link)

      • [orm] Fixed bug whereby “passive_deletes=’all’” wasn’t passing the correct symbols to lazy loaders during flush, thereby causing an unwarranted load.(link)

        References: #2013

      • [orm] Fixed bug which prevented composite mapped attributes from being used on a mapped select statement.. Note the workings of composite are slated to change significantly in 0.7.(link)

        References: #1997

      • [orm] active_history flag also added to composite(). The flag has no effect in 0.6, but is instead a placeholder flag for forwards compatibility, as it applies in 0.7 for composites.(link)

        References: #1976

      • [orm] Fixed uow bug whereby expired objects passed to Session.delete() would not have unloaded references or collections taken into account when deleting objects, despite passive_deletes remaining at its default of False.(link)

        References: #2002

      • [orm] A warning is emitted when version_id_col is specified on an inheriting mapper when the inherited mapper already has one, if those column expressions are not the same.(link)

        References: #1987

      • [orm] “innerjoin” flag doesn’t take effect along the chain of joinedload() joins if a previous join in that chain is an outer join, thus allowing primary rows without a referenced child row to be correctly returned in results.(link)

        References: #1954

      • [orm] Fixed bug regarding “subqueryload” strategy whereby strategy would fail if the entity was an aliased() construct.(link)

        References: #1964

      • [orm] Fixed bug regarding “subqueryload” strategy whereby the join would fail if using a multi-level load of the form from A->joined-subclass->C(link)

        References: #2014

      • [orm] Fixed indexing of Query objects by -1. It was erroneously transformed to the empty slice -1:0 that resulted in IndexError.(link)

        References: #1968

      • [orm] The mapper argument “primary_key” can be passed as a single column as well as a list or tuple. The documentation examples that illustrated it as a scalar value have been changed to lists.(link)

        References: #1971

      • [orm] Added active_history flag to relationship() and column_property(), forces attribute events to always load the “old” value, so that it’s available to attributes.get_history().(link)

        References: #1961

      • [orm] Query.get() will raise if the number of params in a composite key is too large, as well as too small.(link)

        References: #1977

      • [orm] Backport of “optimized get” fix from 0.7, improves the generation of joined-inheritance “load expired row” behavior.(link)

        References: #1992

      • [orm] A little more verbiage to the “primaryjoin” error, in an unusual condition that the join condition “works” for viewonly but doesn’t work for non-viewonly, and foreign_keys wasn’t used - adds “foreign_keys” to the suggestion. Also add “foreign_keys” to the suggestion for the generic “direction” error.(link)

      orm declarative

      • [declarative] [orm] An error is raised if __table_args__ is not in tuple or dict format, and is not None.(link)

        References: #1972

      engine

      • [engine] The “unicode warning” against non-unicode bind data is now raised only when the Unicode type is used explictly; not when convert_unicode=True is used on the engine or String type.(link)

      • [engine] Fixed memory leak in C version of Decimal result processor.(link)

        References: #1978

      • [engine] Implemented sequence check capability for the C version of RowProxy, as well as 2.7 style “collections.Sequence” registration for RowProxy.(link)

        References: #1871

      • [engine] Threadlocal engine methods rollback(), commit(), prepare() won’t raise if no transaction is in progress; this was a regression introduced in 0.6.(link)

        References: #1998

      • [engine] Threadlocal engine returns itself upon begin(), begin_nested(); engine then implements contextmanager methods to allow the “with” statement.(link)

        References: #2004

      sql

      • [sql] Fixed operator precedence rules for multiple chains of a single non-associative operator. I.e. “x - (y - z)” will compile as “x - (y - z)” and not “x - y - z”. Also works with labels, i.e. “x - (y - z).label(‘foo’)”(link)

        References: #1984

      • [sql] The ‘info’ attribute of Column is copied during Column.copy(), i.e. as occurs when using columns in declarative mixins.(link)

        References: #1967

      • [sql] Added a bind processor for booleans which coerces to int, for DBAPIs such as pymssql that naively call str() on values.(link)

      • [sql] CheckConstraint will copy its ‘initially’, ‘deferrable’, and ‘_create_rule’ attributes within a copy()/tometadata()(link)

        References: #2000

      postgresql

      • [postgresql] Single element tuple expressions inside an IN clause parenthesize correctly, also from(link)

        References: #1984

      • [postgresql] Ensured every numeric, float, int code, scalar + array, are recognized by psycopg2 and pg8000’s “numeric” base type.(link)

        References: #1955

      • [postgresql] Added as_uuid=True flag to the UUID type, will receive and return values as Python UUID() objects rather than strings. Currently, the UUID type is only known to work with psycopg2.(link)

        References: #1956

      • [postgresql] Fixed bug whereby KeyError would occur with non-ENUM supported PG versions after a pool dispose+recreate would occur.(link)

        References: #1989

      mysql

      • [mysql] Fixed error handling for Jython + zxjdbc, such that has_table() property works again. Regression from 0.6.3 (we don’t have a Jython buildbot, sorry)(link)

        References: #1960

      sqlite

      • [sqlite] The REFERENCES clause in a CREATE TABLE that includes a remote schema to another table with the same schema name now renders the remote name without the schema clause, as required by SQLite.(link)

        References: #1851

      • [sqlite] On the same theme, the REFERENCES clause in a CREATE TABLE that includes a remote schema to a different schema than that of the parent table doesn’t render at all, as cross-schema references do not appear to be supported.(link)

      mssql

      • [mssql] The rewrite of index reflection in was unfortunately not tested correctly, and returned incorrect results. This regression is now fixed.(link)

        References: #1770

      oracle

      • [oracle] The cx_oracle “decimal detection” logic, which takes place for result set columns with ambiguous numeric characteristics, now uses the decimal point character determined by the locale/ NLS_LANG setting, using an on-first-connect detection of this character. cx_oracle 5.0.3 or greater is also required when using a non-period-decimal-point NLS_LANG setting..(link)

        References: #1953

      firebird

      • [firebird] Firebird numeric type now checks for Decimal explicitly, lets float() pass right through, thereby allowing special values such as float(‘inf’).(link)

        References: #2012

      misc

      • [sqlsoup] Added “map_to()” method to SqlSoup, which is a “master” method which accepts explicit arguments for each aspect of the selectable and mapping, including a base class per mapping.(link)

        References: #1975

      • [sqlsoup] Mapped selectables used with the map(), with_labels(), join() methods no longer put the given argument into the internal “cache” dictionary. Particularly since the join() and select() objects are created in the method itself this was pretty much a pure memory leaking behavior.(link)

      • [examples] The versioning example now supports detection of changes in an associated relationship().(link)

      0.6.5

      Released: Sun Oct 24 2010

      orm

      • [orm] Added a new “lazyload” option “immediateload”. Issues the usual “lazy” load operation automatically as the object is populated. The use case here is when loading objects to be placed in an offline cache, or otherwise used after the session isn’t available, and straight ‘select’ loading, not ‘joined’ or ‘subquery’, is desired.(link)

        References: #1914

      • [orm] New Query methods: query.label(name), query.as_scalar(), return the query’s statement as a scalar subquery with /without label; query.with_entities(*ent), replaces the SELECT list of the query with new entities. Roughly equivalent to a generative form of query.values() which accepts mapped entities as well as column expressions.(link)

        References: #1920

      • [orm] Fixed recursion bug which could occur when moving an object from one reference to another, with backrefs involved, where the initiating parent was a subclass (with its own mapper) of the previous parent.(link)

      • [orm] Fixed a regression in 0.6.4 which occurred if you passed an empty list to “include_properties” on mapper()(link)

        References: #1918

      • [orm] Fixed labeling bug in Query whereby the NamedTuple would mis-apply labels if any of the column expressions were un-labeled.(link)

      • [orm] Patched a case where query.join() would adapt the right side to the right side of the left’s join inappropriately(link)

        References: #1925

      • [orm] Query.select_from() has been beefed up to help ensure that a subsequent call to query.join() will use the select_from() entity, assuming it’s a mapped entity and not a plain selectable, as the default “left” side, not the first entity in the Query object’s list of entities.(link)

      • [orm] The exception raised by Session when it is used subsequent to a subtransaction rollback (which is what happens when a flush fails in autocommit=False mode) has now been reworded (this is the “inactive due to a rollback in a subtransaction” message). In particular, if the rollback was due to an exception during flush(), the message states this is the case, and reiterates the string form of the original exception that occurred during flush. If the session is closed due to explicit usage of subtransactions (not very common), the message just states this is the case.(link)

      • [orm] The exception raised by Mapper when repeated requests to its initialization are made after initialization already failed no longer assumes the “hasattr” case, since there’s other scenarios in which this message gets emitted, and the message also does not compound onto itself multiple times - you get the same message for each attempt at usage. The misnomer “compiles” is being traded out for “initialize”.(link)

      • [orm] Fixed bug in query.update() where ‘evaluate’ or ‘fetch’ expiration would fail if the column expression key was a class attribute with a different keyname as the actual column name.(link)

        References: #1935

      • [orm] Added an assertion during flush which ensures that no NULL-holding identity keys were generated on “newly persistent” objects. This can occur when user defined code inadvertently triggers flushes on not-fully-loaded objects.(link)

      • [orm] lazy loads for relationship attributes now use the current state, not the “committed” state, of foreign and primary key attributes when issuing SQL, if a flush is not in process. Previously, only the database-committed state would be used. In particular, this would cause a many-to-one get()-on-lazyload operation to fail, as autoflush is not triggered on these loads when the attributes are determined and the “committed” state may not be available.(link)

        References: #1910

      • [orm] A new flag on relationship(), load_on_pending, allows the lazy loader to fire off on pending objects without a flush taking place, as well as a transient object that’s been manually “attached” to the session. Note that this flag blocks attribute events from taking place when an object is loaded, so backrefs aren’t available until after a flush. The flag is only intended for very specific use cases.(link)

      • [orm] Another new flag on relationship(), cascade_backrefs, disables the “save-update” cascade when the event was initiated on the “reverse” side of a bidirectional relationship. This is a cleaner behavior so that many-to-ones can be set on a transient object without it getting sucked into the child object’s session, while still allowing the forward collection to cascade. We might default this to False in 0.7.(link)

      • [orm] Slight improvement to the behavior of “passive_updates=False” when placed only on the many-to-one side of a relationship; documentation has been clarified that passive_updates=False should really be on the one-to-many side.(link)

      • [orm] Placing passive_deletes=True on a many-to-one emits a warning, since you probably intended to put it on the one-to-many side.(link)

      • [orm] Fixed bug that would prevent “subqueryload” from working correctly with single table inheritance for a relationship from a subclass - the “where type in (x, y, z)” only gets placed on the inside, instead of repeatedly.(link)

      • [orm] When using from_self() with single table inheritance, the “where type in (x, y, z)” is placed on the outside of the query only, instead of repeatedly. May make some more adjustments to this.(link)

      • [orm] scoped_session emits a warning when configure() is called if a Session is already present (checks only the current thread)(link)

        References: #1924

      • [orm] reworked the internals of mapper.cascade_iterator() to cut down method calls by about 9% in some circumstances.(link)

        References: #1932

      orm declarative

      • [declarative] [orm] @classproperty (soon/now @declared_attr) takes effect for __mapper_args__, __table_args__, __tablename__ on a base class that is not a mixin, as well as mixins.(link)

        References: #1922

      • [declarative] [orm] @classproperty ‘s official name/location for usage with declarative is sqlalchemy.ext.declarative.declared_attr. Same thing, but moving there since it is more of a “marker” that’s specific to declararative, not just an attribute technique.(link)

        References: #1915

      • [declarative] [orm] Fixed bug whereby columns on a mixin wouldn’t propagate correctly to a single-table, or joined-table, inheritance scheme where the attribute name is different than that of the column.,.(link)

        References: #1931, #1930

      • [declarative] [orm] A mixin can now specify a column that overrides a column of the same name associated with a superclass. Thanks to Oystein Haaland.(link)

      engine

      • [engine] Fixed a regression in 0.6.4 whereby the change that allowed cursor errors to be raised consistently broke the result.lastrowid accessor. Test coverage has been added for result.lastrowid. Note that lastrowid is only supported by Pysqlite and some MySQL drivers, so isn’t super-useful in the general case.(link)

      • [engine] the logging message emitted by the engine when a connection is first used is now “BEGIN (implicit)” to emphasize that DBAPI has no explicit begin().(link)

      • [engine] added “views=True” option to metadata.reflect(), will add the list of available views to those being reflected.(link)

        References: #1936

      • [engine] engine_from_config() now accepts ‘debug’ for ‘echo’, ‘echo_pool’, ‘force’ for ‘convert_unicode’, boolean values for ‘use_native_unicode’.(link)

        References: #1899

      sql

      • [sql] Fixed bug in TypeDecorator whereby the dialect-specific type was getting pulled in to generate the DDL for a given type, which didn’t always return the correct result.(link)

      • [sql] TypeDecorator can now have a fully constructed type specified as its “impl”, in addition to a type class.(link)

      • [sql] TypeDecorator will now place itself as the resulting type for a binary expression where the type coercion rules would normally return its impl type - previously, a copy of the impl type would be returned which would have the TypeDecorator embedded into it as the “dialect” impl, this was probably an unintentional way of achieving the desired effect.(link)

      • [sql] TypeDecorator.load_dialect_impl() returns “self.impl” by default, i.e. not the dialect implementation type of “self.impl”. This to support compilation correctly. Behavior can be user-overridden in exactly the same way as before to the same effect.(link)

      • [sql] Added type_coerce(expr, type_) expression element. Treats the given expression as the given type when evaluating expressions and processing result rows, but does not affect the generation of SQL, other than an anonymous label.(link)

      • [sql] Table.tometadata() now copies Index objects associated with the Table as well.(link)

      • [sql] Table.tometadata() issues a warning if the given Table is already present in the target MetaData - the existing Table object is returned.(link)

      • [sql] An informative error message is raised if a Column which has not yet been assigned a name, i.e. as in declarative, is used in a context where it is exported to the columns collection of an enclosing select() construct, or if any construct involving that column is compiled before its name is assigned.(link)

      • [sql] as_scalar(), label() can be called on a selectable which contains a Column that is not yet named.(link)

        References: #1862

      • [sql] Fixed recursion overflow which could occur when operating with two expressions both of type “NullType”, but not the singleton NULLTYPE instance.(link)

        References: #1907

      postgresql

      • [postgresql] Added “as_tuple” flag to ARRAY type, returns results as tuples instead of lists to allow hashing.(link)

      • [postgresql] Fixed bug which prevented “domain” built from a custom type such as “enum” from being reflected.(link)

        References: #1933

      mysql

      • [mysql] Fixed bug involving reflection of CURRENT_TIMESTAMP default used with ON UPDATE clause, thanks to Taavi Burns(link)

        References: #1940

      mssql

      • [mssql] Fixed reflection bug which did not properly handle reflection of unknown types.(link)

        References: #1946

      • [mssql] Fixed bug where aliasing of tables with “schema” would fail to compile properly.(link)

        References: #1943

      • [mssql] Rewrote the reflection of indexes to use sys. catalogs, so that column names of any configuration (spaces, embedded commas, etc.) can be reflected. Note that reflection of indexes requires SQL Server 2005 or greater.(link)

        References: #1770

      • [mssql] mssql+pymssql dialect now honors the “port” portion of the URL instead of discarding it.(link)

        References: #1952

      oracle

      • [oracle] The implicit_retunring argument to create_engine() is now honored regardless of detected version of Oracle. Previously, the flag would be forced to False if server version info was < 10.(link)

        References: #1878

      misc

      • [informix] Major cleanup / modernization of the Informix dialect for 0.6, courtesy Florian Apolloner.(link)

        References: #1906

      • [tests] the NoseSQLAlchemyPlugin has been moved to a new package “sqlalchemy_nose” which installs along with “sqlalchemy”. This so that the “nosetests” script works as always but also allows the –with-coverage option to turn on coverage before SQLAlchemy modules are imported, allowing coverage to work correctly.(link)

      • [misc] CircularDependencyError now has .cycles and .edges members, which are the set of elements involved in one or more cycles, and the set of edges as 2-tuples.(link)

        References: #1890

      0.6.4

      Released: Tue Sep 07 2010

      orm

      • [orm] The name ConcurrentModificationError has been changed to StaleDataError, and descriptive error messages have been revised to reflect exactly what the issue is. Both names will remain available for the forseeable future for schemes that may be specifying ConcurrentModificationError in an “except:” clause.(link)

      • [orm] Added a mutex to the identity map which mutexes remove operations against iteration methods, which now pre-buffer before returning an iterable. This because asyncrhonous gc can remove items via the gc thread at any time.(link)

        References: #1891

      • [orm] The Session class is now present in sqlalchemy.orm.*. We’re moving away from the usage of create_session(), which has non-standard defaults, for those situations where a one-step Session constructor is desired. Most users should stick with sessionmaker() for general use, however.(link)

      • [orm] query.with_parent() now accepts transient objects and will use the non-persistent values of their pk/fk attributes in order to formulate the criterion. Docs are also clarified as to the purpose of with_parent().(link)

      • [orm] The include_properties and exclude_properties arguments to mapper() now accept Column objects as members in addition to strings. This so that same-named Column objects, such as those within a join(), can be disambiguated.(link)

      • [orm] A warning is now emitted if a mapper is created against a join or other single selectable that includes multiple columns with the same name in its .c. collection, and those columns aren’t explictly named as part of the same or separate attributes (or excluded). In 0.7 this warning will be an exception. Note that this warning is not emitted when the combination occurs as a result of inheritance, so that attributes still allow being overridden naturally.. In 0.7 this will be improved further.(link)

        References: #1896

      • [orm] The primary_key argument to mapper() can now specify a series of columns that are only a subset of the calculated “primary key” columns of the mapped selectable, without an error being raised. This helps for situations where a selectable’s effective primary key is simpler than the number of columns in the selectable that are actually marked as “primary_key”, such as a join against two tables on their primary key columns.(link)

        References: #1896

      • [orm] An object that’s been deleted now gets a flag ‘deleted’, which prohibits the object from being re-add()ed to the session, as previously the object would live in the identity map silently until its attributes were accessed. The make_transient() function now resets this flag along with the “key” flag.(link)

      • [orm] make_transient() can be safely called on an already transient instance.(link)

      • [orm] a warning is emitted in mapper() if the polymorphic_on column is not present either in direct or derived form in the mapped selectable or in the with_polymorphic selectable, instead of silently ignoring it. Look for this to become an exception in 0.7.(link)

      • [orm] Another pass through the series of error messages emitted when relationship() is configured with ambiguous arguments. The “foreign_keys” setting is no longer mentioned, as it is almost never needed and it is preferable users set up correct ForeignKey metadata, which is now the recommendation. If ‘foreign_keys’ is used and is incorrect, the message suggests the attribute is probably unnecessary. Docs for the attribute are beefed up. This because all confused relationship() users on the ML appear to be attempting to use foreign_keys due to the message, which only confuses them further since Table metadata is much clearer.(link)

      • [orm] If the “secondary” table has no ForeignKey metadata and no foreign_keys is set, even though the user is passing screwed up information, it is assumed that primary/secondaryjoin expressions should consider only and all cols in “secondary” to be foreign. It’s not possible with “secondary” for the foreign keys to be elsewhere in any case. A warning is now emitted instead of an error, and the mapping succeeds.(link)

        References: #1877

      • [orm] Moving an o2m object from one collection to another, or vice versa changing the referenced object by an m2o, where the foreign key is also a member of the primary key, will now be more carefully checked during flush if the change in value of the foreign key on the “many” side is the result of a change in the primary key of the “one” side, or if the “one” is just a different object. In one case, a cascade-capable DB would have cascaded the value already and we need to look at the “new” PK value to do an UPDATE, in the other we need to continue looking at the “old”. We now look at the “old”, assuming passive_updates=True, unless we know it was a PK switch that triggered the change.(link)

        References: #1856

      • [orm] The value of version_id_col can be changed manually, and this will result in an UPDATE of the row. Versioned UPDATEs and DELETEs now use the “committed” value of the version_id_col in the WHERE clause and not the pending changed value. The version generator is also bypassed if manual changes are present on the attribute.(link)

        References: #1857

      • [orm] Repaired the usage of merge() when used with concrete inheriting mappers. Such mappers frequently have so-called “concrete” attributes, which are subclass attributes that “disable” propagation from the parent - these needed to allow a merge() operation to pass through without effect.(link)

      • [orm] Specifying a non-column based argument for column_mapped_collection, including string, text() etc., will raise an error message that specifically asks for a column element, no longer misleads with incorrect information about text() or literal().(link)

        References: #1863

      • [orm] Similarly, for relationship(), foreign_keys, remote_side, order_by - all column-based expressions are enforced - lists of strings are explicitly disallowed since this is a very common error(link)

      • [orm] Dynamic attributes don’t support collection population - added an assertion for when set_committed_value() is called, as well as when joinedload() or subqueryload() options are applied to a dynamic attribute, instead of failure / silent failure.(link)

        References: #1864

      • [orm] Fixed bug whereby generating a Query derived from one which had the same column repeated with different label names, typically in some UNION situations, would fail to propagate the inner columns completely to the outer query.(link)

        References: #1852

      • [orm] object_session() raises the proper UnmappedInstanceError when presented with an unmapped instance.(link)

        References: #1881

      • [orm] Applied further memoizations to calculated Mapper properties, with significant (~90%) runtime mapper.py call count reduction in heavily polymorphic mapping configurations.(link)

      • [orm] mapper _get_col_to_prop private method used by the versioning example is deprecated; now use mapper.get_property_by_column() which will remain the public method for this.(link)

      • [orm] the versioning example works correctly now if versioning on a col that was formerly NULL.(link)

      orm declarative

      • [declarative] [orm] if @classproperty is used with a regular class-bound mapper property attribute, it will be called to get the actual attribute value during initialization. Currently, there’s no advantage to using @classproperty on a column or relationship attribute of a declarative class that isn’t a mixin - evaluation is at the same time as if @classproperty weren’t used. But here we at least allow it to function as expected.(link)

      • [declarative] [orm] Fixed bug where “Can’t add additional column” message would display the wrong name.(link)

      engine

      • [engine] Calling fetchone() or similar on a result that has already been exhausted, has been closed, or is not a result-returning result now raises ResourceClosedError, a subclass of InvalidRequestError, in all cases, regardless of backend. Previously, some DBAPIs would raise ProgrammingError (i.e. pysqlite), others would return None leading to downstream breakages (i.e. MySQL-python).(link)

      • [engine] Fixed bug in Connection whereby if a “disconnect” event occurred in the “initialize” phase of the first connection pool connect, an AttributeError would be raised when the Connection would attempt to invalidate the DBAPI connection.(link)

        References: #1894

      • [engine] Connection, ResultProxy, as well as Session use ResourceClosedError for all “this connection/transaction/result is closed” types of errors.(link)

      • [engine] Connection.invalidate() can be called more than once and subsequent calls do nothing.(link)

      sql

      • [sql] Calling execute() on an alias() construct is pending deprecation for 0.7, as it is not itself an “executable” construct. It currently “proxies” its inner element and is conditionally “executable” but this is not the kind of ambiguity we like these days.(link)

      • [sql] The execute() and scalar() methods of ClauseElement are now moved appropriately to the Executable subclass. ClauseElement.execute()/ scalar() are still present and are pending deprecation in 0.7, but note these would always raise an error anyway if you were not an Executable (unless you were an alias(), see previous note).(link)

      • [sql] Added basic math expression coercion for Numeric->Integer, so that resulting type is Numeric regardless of the direction of the expression.(link)

      • [sql] Changed the scheme used to generate truncated “auto” index names when using the “index=True” flag on Column. The truncation only takes place with the auto-generated name, not one that is user-defined (an error would be raised instead), and the truncation scheme itself is now based on a fragment of an md5 hash of the identifier name, so that multiple indexes on columns with similar names still have unique names.(link)

        References: #1855

      • [sql] The generated index name also is based on a “max index name length” attribute which is separate from the “max identifier length” - this to appease MySQL who has a max length of 64 for index names, separate from their overall max length of 255.(link)

        References: #1412

      • [sql] the text() construct, if placed in a column oriented situation, will at least return NULLTYPE for its type instead of None, allowing it to be used a little more freely for ad-hoc column expressions than before. literal_column() is still the better choice, however.(link)

      • [sql] Added full description of parent table/column, target table/column in error message raised when ForeignKey can’t resolve target.(link)

      • [sql] Fixed bug whereby replacing composite foreign key columns in a reflected table would cause an attempt to remove the reflected constraint from the table a second time, raising a KeyError.(link)

        References: #1865

      • [sql] the _Label construct, i.e. the one that is produced whenever you say somecol.label(), now counts itself in its “proxy_set” unioned with that of it’s contained column’s proxy set, instead of directly returning that of the contained column. This allows column correspondence operations which depend on the identity of the _Labels themselves to return the correct result(link)

      • [sql] fixes ORM bug.(link)

        References: #1852

      postgresql

      • [postgresql] Fixed the psycopg2 dialect to use its set_isolation_level() method instead of relying upon the base “SET SESSION ISOLATION” command, as psycopg2 resets the isolation level on each new transaction otherwise.(link)

      mssql

      • [mssql] Fixed “default schema” query to work with pymssql backend.(link)

      oracle

      • [oracle] Added ROWID type to the Oracle dialect, for those cases where an explicit CAST might be needed.(link)

        References: #1879

      • [oracle] Oracle reflection of indexes has been tuned so that indexes which include some or all primary key columns, but not the same set of columns as that of the primary key, are reflected. Indexes which contain the identical columns as that of the primary key are skipped within reflection, as the index in that case is assumed to be the auto-generated primary key index. Previously, any index with PK columns present would be skipped. Thanks to Kent Bower for the patch.(link)

        References: #1867

      • [oracle] Oracle now reflects the names of primary key constraints - also thanks to Kent Bower.(link)

        References: #1868

      firebird

      • [firebird] Fixed bug whereby a column default would fail to reflect if the “default” keyword were lower case.(link)

      misc

      • [informix] Applied patches from to get basic Informix functionality up again. We rely upon end-user testing to ensure that Informix is working to some degree.(link)

        References: #1904

      • [documentation] The docs have been reorganized such that the “API Reference” section is gone - all the docstrings from there which were public API are moved into the context of the main doc section that talks about it. Main docs divided into “SQLAlchemy Core” and “SQLAlchemy ORM” sections, mapper/relationship docs have been broken out. Lots of sections rewritten and/or reorganized.(link)

      • [examples] The beaker_caching example has been reorgnized such that the Session, cache manager, declarative_base are part of environment, and custom cache code is portable and now within “caching_query.py”. This allows the example to be easier to “drop in” to existing projects.(link)

      • [examples] the history_meta versioning recipe sets “unique=False” when copying columns, so that the versioning table handles multiple rows with repeating values.(link)

        References: #1887

      0.6.3

      Released: Thu Jul 15 2010

      orm

      • [orm] Removed errant many-to-many load in unitofwork which triggered unnecessarily on expired/unloaded collections. This load now takes place only if passive_updates is False and the parent primary key has changed, or if passive_deletes is False and a delete of the parent has occurred.(link)

        References: #1845

      • [orm] Column-entities (i.e. query(Foo.id)) copy their state more fully when queries are derived from themselves + a selectable (i.e. from_self(), union(), etc.), so that join() and such have the correct state to work from.(link)

        References: #1853

      • [orm] Fixed bug where Query.join() would fail if querying a non-ORM column then joining without an on clause when a FROM clause is already present, now raises a checked exception the same way it does when the clause is not present.(link)

        References: #1853

      • [orm] Improved the check for an “unmapped class”, including the case where the superclass is mapped but the subclass is not. Any attempts to access cls._sa_class_manager.mapper now raise UnmappedClassError().(link)

        References: #1142

      • [orm] Added “column_descriptions” accessor to Query, returns a list of dictionaries containing naming/typing information about the entities the Query will return. Can be helpful for building GUIs on top of ORM queries.(link)

      mysql

      • [mysql] The _extract_error_code() method now works correctly with each MySQL dialect ( MySQL-python, OurSQL, MySQL-Connector-Python, PyODBC). Previously, the reconnect logic would fail for OperationalError conditions, however since MySQLdb and OurSQL have their own reconnect feature, there was no symptom for these drivers here unless one watched the logs.(link)

        References: #1848

      oracle

      • [oracle] More tweaks to cx_oracle Decimal handling. “Ambiguous” numerics with no decimal place are coerced to int at the connection handler level. The advantage here is that ints come back as ints without SQLA type objects being involved and without needless conversion to Decimal first.

        Unfortunately, some exotic subquery cases can even see different types between individual result rows, so the Numeric handler, when instructed to return Decimal, can’t take full advantage of “native decimal” mode and must run isinstance() on every value to check if its Decimal already. Reopen of

        (link)

        References: #1840

      0.6.2

      Released: Tue Jul 06 2010

      orm

      • [orm] Query.join() will check for a call of the form query.join(target, clause_expression), i.e. missing the tuple, and raise an informative error message that this is the wrong calling form.(link)

      • [orm] Fixed bug regarding flushes on self-referential bi-directional many-to-many relationships, where two objects made to mutually reference each other in one flush would fail to insert a row for both sides. Regression from 0.5.(link)

        References: #1824

      • [orm] the post_update feature of relationship() has been reworked architecturally to integrate more closely with the new 0.6 unit of work. The motivation for the change is so that multiple “post update” calls, each affecting different foreign key columns of the same row, are executed in a single UPDATE statement, rather than one UPDATE statement per column per row. Multiple row updates are also batched into executemany()s as possible, while maintaining consistent row ordering.(link)

      • [orm] Query.statement, Query.subquery(), etc. now transfer the values of bind parameters, i.e. those specified by query.params(), into the resulting SQL expression. Previously the values would not be transferred and bind parameters would come out as None.(link)

      • [orm] Subquery-eager-loading now works with Query objects which include params(), as well as get() Queries.(link)

      • [orm] Can now call make_transient() on an instance that is referenced by parent objects via many-to-one, without the parent’s foreign key value getting temporarily set to None - this was a function of the “detect primary key switch” flush handler. It now ignores objects that are no longer in the “persistent” state, and the parent’s foreign key identifier is left unaffected.(link)

      • [orm] query.order_by() now accepts False, which cancels any existing order_by() state on the Query, allowing subsequent generative methods to be called which do not support ORDER BY. This is not the same as the already existing feature of passing None, which suppresses any existing order_by() settings, including those configured on the mapper. False will make it as though order_by() was never called, while None is an active setting.(link)

      • [orm] An instance which is moved to “transient”, has an incomplete or missing set of primary key attributes, and contains expired attributes, will raise an InvalidRequestError if an expired attribute is accessed, instead of getting a recursion overflow.(link)

      • [orm] The make_transient() function is now in the generated documentation.(link)

      • [orm] make_transient() removes all “loader” callables from the state being made transient, removing any “expired” state - all unloaded attributes reset back to undefined, None/empty on access.(link)

      orm declarative

      • [declarative] [orm] Added support for @classproperty to provide any kind of schema/mapping construct from a declarative mixin, including columns with foreign keys, relationships, column_property, deferred. This solves all such issues on declarative mixins. An error is raised if any MapperProperty subclass is specified on a mixin without using @classproperty.(link)

        References: #1805, #1796, #1751

      • [declarative] [orm] a mixin class can now define a column that matches one which is present on a __table__ defined on a subclass. It cannot, however, define one that is not present in the __table__, and the error message here now works.(link)

        References: #1821

      sql

      • [sql] The warning emitted by the Unicode and String types with convert_unicode=True no longer embeds the actual value passed. This so that the Python warning registry does not continue to grow in size, the warning is emitted once as per the warning filter settings, and large string values don’t pollute the output.(link)

        References: #1822

      • [sql] Fixed bug that would prevent overridden clause compilation from working for “annotated” expression elements, which are often generated by the ORM.(link)

      • [sql] The argument to “ESCAPE” of a LIKE operator or similar is passed through render_literal_value(), which may implement escaping of backslashes.(link)

        References: #1400

      • [sql] Fixed bug in Enum type which blew away native_enum flag when used with TypeDecorators or other adaption scenarios.(link)

      • [sql] Inspector hits bind.connect() when invoked to ensure initialize has been called. the internal name ”.conn” is changed to ”.bind”, since that’s what it is.(link)

      • [sql] Modified the internals of “column annotation” such that a custom Column subclass can safely override _constructor to return Column, for the purposes of making “configurational” column classes that aren’t involved in proxying, etc.(link)

      • [sql] Column.copy() takes along the “unique” attribute among others, fixes regarding declarative mixins(link)

        References: #1829

      postgresql

      • [postgresql] render_literal_value() is overridden which escapes backslashes, currently applies to the ESCAPE clause of LIKE and similar expressions. Ultimately this will have to detect the value of “standard_conforming_strings” for full behavior.(link)

        References: #1400

      • [postgresql] Won’t generate “CREATE TYPE” / “DROP TYPE” if using types.Enum on a PG version prior to 8.3 - the supports_native_enum flag is fully honored.(link)

        References: #1836

      mysql

      • [mysql] MySQL dialect doesn’t emit CAST() for MySQL version detected < 4.0.2. This allows the unicode check on connect to proceed.(link)

        References: #1826

      • [mysql] MySQL dialect now detects NO_BACKSLASH_ESCAPES sql mode, in addition to ANSI_QUOTES.(link)

      • [mysql] render_literal_value() is overridden which escapes backslashes, currently applies to the ESCAPE clause of LIKE and similar expressions. This behavior is derived from detecting the value of NO_BACKSLASH_ESCAPES.(link)

        References: #1400

      mssql

      • [mssql] If server_version_info is outside the usual range of (8, ), (9, ), (10, ), a warning is emitted which suggests checking that the FreeTDS version configuration is using 7.0 or 8.0, not 4.2.(link)

        References: #1825

      oracle

      • [oracle] Fixed ora-8 compatibility flags such that they don’t cache a stale value from before the first database connection actually occurs.(link)

        References: #1819

      • [oracle] Oracle’s “native decimal” metadata begins to return ambiguous typing information about numerics when columns are embedded in subqueries as well as when ROWNUM is consulted with subqueries, as we do for limit/offset. We’ve added these ambiguous conditions to the cx_oracle “convert to Decimal()” handler, so that we receive numerics as Decimal in more cases instead of as floats. These are then converted, if requested, into Integer or Float, or otherwise kept as the lossless Decimal.(link)

        References: #1840

      firebird

      • [firebird] Fixed incorrect signature in do_execute(), error introduced in 0.6.1.(link)

        References: #1823

      • [firebird] Firebird dialect adds CHAR, VARCHAR types which accept a “charset” flag, to support Firebird “CHARACTER SET” clause.(link)

        References: #1813

      misc

      • [extension] [compiler] The ‘default’ compiler is automatically copied over when overriding the compilation of a built in clause construct, so no KeyError is raised if the user-defined compiler is specific to certain backends and compilation for a different backend is invoked.(link)

        References: #1838

      • [documentation] Added documentation for the Inspector.(link)

        References: #1820

      • [documentation] Fixed @memoized_property and @memoized_instancemethod decorators so that Sphinx documentation picks up these attributes and methods, such as ResultProxy.inserted_primary_key.(link)

        References: #1830

      0.6.1

      Released: Mon May 31 2010

      orm

      • [orm] Fixed regression introduced in 0.6.0 involving improper history accounting on mutable attributes.(link)

        References: #1782

      • [orm] Fixed regression introduced in 0.6.0 unit of work refactor that broke updates for bi-directional relationship() with post_update=True.(link)

        References: #1807

      • [orm] session.merge() will not expire attributes on the returned instance if that instance is “pending”.(link)

        References: #1789

      • [orm] fixed __setstate__ method of CollectionAdapter to not fail during deserialize where parent InstanceState not yet unserialized.(link)

        References: #1802

      • [orm] Added internal warning in case an instance without a full PK happened to be expired and then was asked to refresh.(link)

        References: #1797

      • [orm] Added more aggressive caching to the mapper’s usage of UPDATE, INSERT, and DELETE expressions. Assuming the statement has no per-object SQL expressions attached, the expression objects are cached by the mapper after the first create, and their compiled form is stored persistently in a cache dictionary for the duration of the related Engine. The cache is an LRUCache for the rare case that a mapper receives an extremely high number of different column patterns as UPDATEs.(link)

      sql

      • [sql] expr.in_() now accepts a text() construct as the argument. Grouping parenthesis are added automatically, i.e. usage is like col.in_(text(“select id from table”)).(link)

        References: #1793

      • [sql] Columns of _Binary type (i.e. LargeBinary, BLOB, etc.) will coerce a “basestring” on the right side into a _Binary as well so that required DBAPI processing takes place.(link)

      • [sql] Added table.add_is_dependent_on(othertable), allows manual placement of dependency rules between two Table objects for use within create_all(), drop_all(), sorted_tables.(link)

        References: #1801

      • [sql] Fixed bug that prevented implicit RETURNING from functioning properly with composite primary key that contained zeroes.(link)

        References: #1778

      • [sql] Fixed errant space character when generating ADD CONSTRAINT for a named UNIQUE constraint.(link)

      • [sql] Fixed “table” argument on constructor of ForeginKeyConstraint(link)

        References: #1571

      • [sql] Fixed bug in connection pool cursor wrapper whereby if a cursor threw an exception on close(), the logging of the message would fail.(link)

        References: #1786

      • [sql] the _make_proxy() method of ColumnClause and Column now use self.__class__ to determine the class of object to be returned instead of hardcoding to ColumnClause/Column, making it slightly easier to produce specific subclasses of these which work in alias/subquery situations.(link)

      • [sql] func.XXX() doesn’t inadvertently resolve to non-Function classes (e.g. fixes func.text()).(link)

        References: #1798

      mysql

      • [mysql] func.sysdate() emits “SYSDATE()”, i.e. with the ending parenthesis, on MySQL.(link)

        References: #1794

      sqlite

      • [sqlite] Fixed concatenation of constraints when “PRIMARY KEY” constraint gets moved to column level due to SQLite AUTOINCREMENT keyword being rendered.(link)

        References: #1812

      oracle

      • [oracle] Added a check for cx_oracle versions lower than version 5, in which case the incompatible “output type handler” won’t be used. This will impact decimal accuracy and some unicode handling issues.(link)

        References: #1775

      • [oracle] Fixed use_ansi=False mode, which was producing broken WHERE clauses in pretty much all cases.(link)

        References: #1790

      • [oracle] Re-established support for Oracle 8 with cx_oracle, including that use_ansi is set to False automatically, NVARCHAR2 and NCLOB are not rendered for Unicode, “native unicode” check doesn’t fail, cx_oracle “native unicode” mode is disabled, VARCHAR() is emitted with bytes count instead of char count.(link)

        References: #1808

      • [oracle] oracle_xe 5 doesn’t accept a Python unicode object in its connect string in normal Python 2.x mode - so we coerce to str() directly. non-ascii characters aren’t supported in connect strings here since we don’t know what encoding we could use.(link)

        References: #1670

      • [oracle] FOR UPDATE is emitted in the syntactically correct position when limit/offset is used, i.e. the ROWNUM subquery. However, Oracle can’t really handle FOR UPDATE with ORDER BY or with subqueries, so its still not very usable, but at least SQLA gets the SQL past the Oracle parser.(link)

        References: #1815

      firebird

      • [firebird] Added a label to the query used within has_table() and has_sequence() to work with older versions of Firebird that don’t provide labels for result columns.(link)

        References: #1521

      • [firebird] Added integer coercion to the “type_conv” attribute when passed via query string, so that it is properly interpreted by Kinterbasdb.(link)

        References: #1779

      • [firebird] Added ‘connection shutdown’ to the list of exception strings which indicate a dropped connection.(link)

        References: #1646

      misc

      • [engines] Fixed building the C extensions on Python 2.4.(link)

        References: #1781

      • [engines] Pool classes will reuse the same “pool_logging_name” setting after a dispose() occurs.(link)

      • [engines] Engine gains an “execution_options” argument and update_execution_options() method, which will apply to all connections generated by this engine.(link)

      • [sqlsoup] the SqlSoup constructor accepts a base argument which specifies the base class to use for mapped classes, the default being object.(link)

        References: #1783

      0.6.0

      Released: Sun Apr 18 2010

      orm

      • [orm] Unit of work internals have been rewritten. Units of work with large numbers of objects interdependent objects can now be flushed without recursion overflows as there is no longer reliance upon recursive calls. The number of internal structures now stays constant for a particular session state, regardless of how many relationships are present on mappings. The flow of events now corresponds to a linear list of steps, generated by the mappers and relationships based on actual work to be done, filtered through a single topological sort for correct ordering. Flush actions are assembled using far fewer steps and less memory.(link)

        References: #1742, #1081

      • [orm] Along with the UOW rewrite, this also removes an issue introduced in 0.6beta3 regarding topological cycle detection for units of work with long dependency cycles. We now use an algorithm written by Guido (thanks Guido!).(link)

      • [orm] one-to-many relationships now maintain a list of positive parent-child associations within the flush, preventing previous parents marked as deleted from cascading a delete or NULL foreign key set on those child objects, despite the end-user not removing the child from the old association.(link)

        References: #1764

      • [orm] A collection lazy load will switch off default eagerloading on the reverse many-to-one side, since that loading is by definition unnecessary.(link)

        References: #1495

      • [orm] Session.refresh() now does an equivalent expire() on the given instance first, so that the “refresh-expire” cascade is propagated. Previously, refresh() was not affected in any way by the presence of “refresh-expire” cascade. This is a change in behavior versus that of 0.6beta2, where the “lockmode” flag passed to refresh() would cause a version check to occur. Since the instance is first expired, refresh() always upgrades the object to the most recent version.(link)

      • [orm] The ‘refresh-expire’ cascade, when reaching a pending object, will expunge the object if the cascade also includes “delete-orphan”, or will simply detach it otherwise.(link)

        References: #1754

      • [orm] id(obj) is no longer used internally within topological.py, as the sorting functions now require hashable objects only.(link)

        References: #1756

      • [orm] The ORM will set the docstring of all generated descriptors to None by default. This can be overridden using ‘doc’ (or if using Sphinx, attribute docstrings work too).(link)

      • [orm] Added kw argument ‘doc’ to all mapper property callables as well as Column(). Will assemble the string ‘doc’ as the ‘__doc__’ attribute on the descriptor.(link)

      • [orm] Usage of version_id_col on a backend that supports cursor.rowcount for execute() but not executemany() now works when a delete is issued (already worked for saves, since those don’t use executemany()). For a backend that doesn’t support cursor.rowcount at all, a warning is emitted the same as with saves.(link)

        References: #1761

      • [orm] The ORM now short-term caches the “compiled” form of insert() and update() constructs when flushing lists of objects of all the same class, thereby avoiding redundant compilation per individual INSERT/UPDATE within an individual flush() call.(link)

      • [orm] internal getattr(), setattr(), getcommitted() methods on ColumnProperty, CompositeProperty, RelationshipProperty have been underscored (i.e. are private), signature has changed.(link)

      sql

      • [sql] Restored some bind-labeling logic from 0.5 which ensures that tables with column names that overlap another column of the form “<tablename>_<columnname>” won’t produce errors if column._label is used as a bind name during an UPDATE. Test coverage which wasn’t present in 0.5 has been added.(link)

        References: #1755

      • [sql] somejoin.select(fold_equivalents=True) is no longer deprecated, and will eventually be rolled into a more comprehensive version of the feature for.(link)

        References: #1729

      • [sql] the Numeric type raises an enormous warning when expected to convert floats to Decimal from a DBAPI that returns floats. This includes SQLite, Sybase, MS-SQL.(link)

        References: #1759

      • [sql] Fixed an error in expression typing which caused an endless loop for expressions with two NULL types.(link)

      • [sql] Fixed bug in execution_options() feature whereby the existing Transaction and other state information from the parent connection would not be propagated to the sub-connection.(link)

      • [sql] Added new ‘compiled_cache’ execution option. A dictionary where Compiled objects will be cached when the Connection compiles a clause expression into a dialect- and parameter- specific Compiled object. It is the user’s responsibility to manage the size of this dictionary, which will have keys corresponding to the dialect, clause element, the column names within the VALUES or SET clause of an INSERT or UPDATE, as well as the “batch” mode for an INSERT or UPDATE statement.(link)

      • [sql] Added get_pk_constraint() to reflection.Inspector, similar to get_primary_keys() except returns a dict that includes the name of the constraint, for supported backends (PG so far).(link)

        References: #1769

      • [sql] Table.create() and Table.drop() no longer apply metadata- level create/drop events.(link)

        References: #1771

      postgresql

      • [postgresql] Postgresql now reflects sequence names associated with SERIAL columns correctly, after the name of the sequence has been changed. Thanks to Kumar McMillan for the patch.(link)

        References: #1071

      • [postgresql] Repaired missing import in psycopg2._PGNumeric type when unknown numeric is received.(link)

      • [postgresql] psycopg2/pg8000 dialects now aware of REAL[], FLOAT[], DOUBLE_PRECISION[], NUMERIC[] return types without raising an exception.(link)

      • [postgresql] Postgresql reflects the name of primary key constraints, if one exists.(link)

        References: #1769

      oracle

      • [oracle] Now using cx_oracle output converters so that the DBAPI returns natively the kinds of values we prefer:(link)

      • [oracle] NUMBER values with positive precision + scale convert to cx_oracle.STRING and then to Decimal. This allows perfect precision for the Numeric type when using cx_oracle.(link)

        References: #1759

      • [oracle] STRING/FIXED_CHAR now convert to unicode natively. SQLAlchemy’s String types then don’t need to apply any kind of conversions.(link)

      firebird

      • [firebird] The functionality of result.rowcount can be disabled on a per-engine basis by setting ‘enable_rowcount=False’ on create_engine(). Normally, cursor.rowcount is called after any UPDATE or DELETE statement unconditionally, because the cursor is then closed and Firebird requires an open cursor in order to get a rowcount. This call is slightly expensive however so it can be disabled. To re-enable on a per-execution basis, the ‘enable_rowcount=True’ execution option may be used.(link)

      misc

      • [engines] The C extension now also works with DBAPIs which use custom sequences as row (and not only tuples).(link)

        References: #1757

      • [ext] the compiler extension now allows @compiles decorators on base classes that extend to child classes, @compiles decorators on child classes that aren’t broken by a @compiles decorator on the base class.(link)

      • [ext] Declarative will raise an informative error message if a non-mapped class attribute is referenced in the string-based relationship() arguments.(link)

      • [ext] Further reworked the “mixin” logic in declarative to additionally allow __mapper_args__ as a @classproperty on a mixin, such as to dynamically assign polymorphic_identity.(link)

      • [examples] Updated attribute_shard.py example to use a more robust method of searching a Query for binary expressions which compare columns against literal values.(link)

      0.6beta3

      Released: Sun Mar 28 2010

      orm

      • [orm] Major feature: Added new “subquery” loading capability to relationship(). This is an eager loading option which generates a second SELECT for each collection represented in a query, across all parents at once. The query re-issues the original end-user query wrapped in a subquery, applies joins out to the target collection, and loads all those collections fully in one result, similar to “joined” eager loading but using all inner joins and not re-fetching full parent rows repeatedly (as most DBAPIs seem to do, even if columns are skipped). Subquery loading is available at mapper config level using “lazy=’subquery’” and at the query options level using “subqueryload(props..)”, “subqueryload_all(props...)”.(link)

        References: #1675

      • [orm] To accomodate the fact that there are now two kinds of eager loading available, the new names for eagerload() and eagerload_all() are joinedload() and joinedload_all(). The old names will remain as synonyms for the foreseeable future.(link)

      • [orm] The “lazy” flag on the relationship() function now accepts a string argument for all kinds of loading: “select”, “joined”, “subquery”, “noload” and “dynamic”, where the default is now “select”. The old values of True/ False/None still retain their usual meanings and will remain as synonyms for the foreseeable future.(link)

      • [orm] Added with_hint() method to Query() construct. This calls directly down to select().with_hint() and also accepts entities as well as tables and aliases. See with_hint() in the SQL section below.(link)

        References: #921

      • [orm] Fixed bug in Query whereby calling q.join(prop).from_self(...). join(prop) would fail to render the second join outside the subquery, when joining on the same criterion as was on the inside.(link)

      • [orm] Fixed bug in Query whereby the usage of aliased() constructs would fail if the underlying table (but not the actual alias) were referenced inside the subquery generated by q.from_self() or q.select_from().(link)

      • [orm] Fixed bug which affected all eagerload() and similar options such that “remote” eager loads, i.e. eagerloads off of a lazy load such as query(A).options(eagerload(A.b, B.c)) wouldn’t eagerload anything, but using eagerload(“b.c”) would work fine.(link)

      • [orm] Query gains an add_columns(*columns) method which is a multi- version of add_column(col). add_column(col) is future deprecated.(link)

      • [orm] Query.join() will detect if the end result will be “FROM A JOIN A”, and will raise an error if so.(link)

      • [orm] Query.join(Cls.propname, from_joinpoint=True) will check more carefully that “Cls” is compatible with the current joinpoint, and act the same way as Query.join(“propname”, from_joinpoint=True) in that regard.(link)

      orm declarative

      • [declarative] [orm] Using a mixin won’t break if the mixin implements an unpredictable __getattribute__(), i.e. Zope interfaces.(link)

        References: #1746

      • [declarative] [orm] Using @classdecorator and similar on mixins to define __tablename__, __table_args__, etc. now works if the method references attributes on the ultimate subclass.(link)

        References: #1749

      • [declarative] [orm] relationships and columns with foreign keys aren’t allowed on declarative mixins, sorry.(link)

        References: #1751

      sql

      • [sql] Added with_hint() method to select() construct. Specify a table/alias, hint text, and optional dialect name, and “hints” will be rendered in the appropriate place in the statement. Works for Oracle, Sybase, MySQL.(link)

        References: #921

      • [sql] Fixed bug introduced in 0.6beta2 where column labels would render inside of column expressions already assigned a label.(link)

        References: #1747

      postgresql

      • [postgresql] The psycopg2 dialect will log NOTICE messages via the “sqlalchemy.dialects.postgresql” logger name.(link)

        References: #877

      • [postgresql] the TIME and TIMESTAMP types are now availble from the postgresql dialect directly, which add the PG-specific argument ‘precision’ to both. ‘precision’ and ‘timezone’ are correctly reflected for both TIME and TIMEZONE types.(link)

        References: #997

      mysql

      • [mysql] No longer guessing that TINYINT(1) should be BOOLEAN when reflecting - TINYINT(1) is returned. Use Boolean/ BOOLEAN in table definition to get boolean conversion behavior.(link)

        References: #1752

      oracle

      • [oracle] The Oracle dialect will issue VARCHAR type definitions using character counts, i.e. VARCHAR2(50 CHAR), so that the column is sized in terms of characters and not bytes. Column reflection of character types will also use ALL_TAB_COLUMNS.CHAR_LENGTH instead of ALL_TAB_COLUMNS.DATA_LENGTH. Both of these behaviors take effect when the server version is 9 or higher - for version 8, the old behaviors are used.(link)

        References: #1744

      misc

      • [ext] The sqlalchemy.orm.shard module now becomes an extension, sqlalchemy.ext.horizontal_shard. The old import works with a deprecation warning.(link)

      0.6beta2

      Released: Sat Mar 20 2010

      orm

      • [orm] The official name for the relation() function is now relationship(), to eliminate confusion over the relational algebra term. relation() however will remain available in equal capacity for the foreseeable future.(link)

        References: #1740

      • [orm] Added “version_id_generator” argument to Mapper, this is a callable that, given the current value of the “version_id_col”, returns the next version number. Can be used for alternate versioning schemes such as uuid, timestamps.(link)

        References: #1692

      • [orm] added “lockmode” kw argument to Session.refresh(), will pass through the string value to Query the same as in with_lockmode(), will also do version check for a version_id_col-enabled mapping.(link)

      • [orm] Fixed bug whereby calling query(A).join(A.bs).add_entity(B) in a joined inheritance scenario would double-add B as a target and produce an invalid query.(link)

        References: #1188

      • [orm] Fixed bug in session.rollback() which involved not removing formerly “pending” objects from the session before re-integrating “deleted” objects, typically occured with natural primary keys. If there was a primary key conflict between them, the attach of the deleted would fail internally. The formerly “pending” objects are now expunged first.(link)

        References: #1674

      • [orm] Removed a lot of logging that nobody really cares about, logging that remains will respond to live changes in the log level. No significant overhead is added.(link)

        References: #1719

      • [orm] Fixed bug in session.merge() which prevented dict-like collections from merging.(link)

      • [orm] session.merge() works with relations that specifically don’t include “merge” in their cascade options - the target is ignored completely.(link)

      • [orm] session.merge() will not expire existing scalar attributes on an existing target if the target has a value for that attribute, even if the incoming merged doesn’t have a value for the attribute. This prevents unnecessary loads on existing items. Will still mark the attr as expired if the destination doesn’t have the attr, though, which fulfills some contracts of deferred cols.(link)

        References: #1681

      • [orm] The “allow_null_pks” flag is now called “allow_partial_pks”, defaults to True, acts like it did in 0.5 again. Except, it also is implemented within merge() such that a SELECT won’t be issued for an incoming instance with partially NULL primary key if the flag is False.(link)

        References: #1680

      • [orm] Fixed bug in 0.6-reworked “many-to-one” optimizations such that a many-to-one that is against a non-primary key column on the remote table (i.e. foreign key against a UNIQUE column) will pull the “old” value in from the database during a change, since if it’s in the session we will need it for proper history/backref accounting, and we can’t pull from the local identity map on a non-primary key column.(link)

        References: #1737

      • [orm] fixed internal error which would occur if calling has() or similar complex expression on a single-table inheritance relation().(link)

        References: #1731

      • [orm] query.one() no longer applies LIMIT to the query, this to ensure that it fully counts all object identities present in the result, even in the case where joins may conceal multiple identities for two or more rows. As a bonus, one() can now also be called with a query that issued from_statement() to start with since it no longer modifies the query.(link)

        References: #1688

      • [orm] query.get() now returns None if queried for an identifier that is present in the identity map with a different class than the one requested, i.e. when using polymorphic loading.(link)

        References: #1727

      • [orm] A major fix in query.join(), when the “on” clause is an attribute of an aliased() construct, but there is already an existing join made out to a compatible target, query properly joins to the right aliased() construct instead of sticking onto the right side of the existing join.(link)

        References: #1706

      • [orm] Slight improvement to the fix for to not issue needless updates of the primary key column during a so-called “row switch” operation, i.e. add + delete of two objects with the same PK.(link)

        References: #1362

      • [orm] Now uses sqlalchemy.orm.exc.DetachedInstanceError when an attribute load or refresh action fails due to object being detached from any Session. UnboundExecutionError is specific to engines bound to sessions and statements.(link)

      • [orm] Query called in the context of an expression will render disambiguating labels in all cases. Note that this does not apply to the existing .statement and .subquery() accessor/method, which still honors the .with_labels() setting that defaults to False.(link)

      • [orm] Query.union() retains disambiguating labels within the returned statement, thus avoiding various SQL composition errors which can result from column name conflicts.(link)

        References: #1676

      • [orm] Fixed bug in attribute history that inadvertently invoked __eq__ on mapped instances.(link)

      • [orm] Some internal streamlining of object loading grants a small speedup for large results, estimates are around 10-15%. Gave the “state” internals a good solid cleanup with less complexity, datamembers, method calls, blank dictionary creates.(link)

      • [orm] Documentation clarification for query.delete()(link)

        References: #1689

      • [orm] Fixed cascade bug in many-to-one relation() when attribute was set to None, introduced in r6711 (cascade deleted items into session during add()).(link)

      • [orm] Calling query.order_by() or query.distinct() before calling query.select_from(), query.with_polymorphic(), or query.from_statement() raises an exception now instead of silently dropping those criterion.(link)

        References: #1736

      • [orm] query.scalar() now raises an exception if more than one row is returned. All other behavior remains the same.(link)

        References: #1735

      • [orm] Fixed bug which caused “row switch” logic, that is an INSERT and DELETE replaced by an UPDATE, to fail when version_id_col was in use.(link)

        References: #1692

      orm declarative

      • [declarative] [orm] DeclarativeMeta exclusively uses cls.__dict__ (not dict_) as the source of class information; _as_declarative exclusively uses the dict_ passed to it as the source of class information (which when using DeclarativeMeta is cls.__dict__). This should in theory make it easier for custom metaclasses to modify the state passed into _as_declarative.(link)

      • [declarative] [orm] declarative now accepts mixin classes directly, as a means to provide common functional and column-based elements on all subclasses, as well as a means to propagate a fixed set of __table_args__ or __mapper_args__ to subclasses. For custom combinations of __table_args__/__mapper_args__ from an inherited mixin to local, descriptors can now be used. New details are all up in the Declarative documentation. Thanks to Chris Withers for putting up with my strife on this.(link)

        References: #1707

      • [declarative] [orm] the __mapper_args__ dict is copied when propagating to a subclass, and is taken straight off the class __dict__ to avoid any propagation from the parent. mapper inheritance already propagates the things you want from the parent mapper.(link)

        References: #1393

      • [declarative] [orm] An exception is raised when a single-table subclass specifies a column that is already present on the base class.(link)

        References: #1732

      sql

      • [sql] join() will now simulate a NATURAL JOIN by default. Meaning, if the left side is a join, it will attempt to join the right side to the rightmost side of the left first, and not raise any exceptions about ambiguous join conditions if successful even if there are further join targets across the rest of the left.(link)

        References: #1714

      • [sql] The most common result processors conversion function were moved to the new “processors” module. Dialect authors are encouraged to use those functions whenever they correspond to their needs instead of implementing custom ones.(link)

      • [sql] SchemaType and subclasses Boolean, Enum are now serializable, including their ddl listener and other event callables.(link)

        References: #1694, #1698

      • [sql] Some platforms will now interpret certain literal values as non-bind parameters, rendered literally into the SQL statement. This to support strict SQL-92 rules that are enforced by some platforms including MS-SQL and Sybase. In this model, bind parameters aren’t allowed in the columns clause of a SELECT, nor are certain ambiguous expressions like ”?=?”. When this mode is enabled, the base compiler will render the binds as inline literals, but only across strings and numeric values. Other types such as dates will raise an error, unless the dialect subclass defines a literal rendering function for those. The bind parameter must have an embedded literal value already or an error is raised (i.e. won’t work with straight bindparam(‘x’)). Dialects can also expand upon the areas where binds are not accepted, such as within argument lists of functions (which don’t work on MS-SQL when native SQL binding is used).(link)

      • [sql] Added “unicode_errors” parameter to String, Unicode, etc. Behaves like the ‘errors’ keyword argument to the standard library’s string.decode() functions. This flag requires that convert_unicode is set to “force” - otherwise, SQLAlchemy is not guaranteed to handle the task of unicode conversion. Note that this flag adds significant performance overhead to row-fetching operations for backends that already return unicode objects natively (which most DBAPIs do). This flag should only be used as an absolute last resort for reading strings from a column with varied or corrupted encodings, which only applies to databases that accept invalid encodings in the first place (i.e. MySQL. not PG, Sqlite, etc.)(link)

      • [sql] Added math negation operator support, -x.(link)

      • [sql] FunctionElement subclasses are now directly executable the same way any func.foo() construct is, with automatic SELECT being applied when passed to execute().(link)

      • [sql] The “type” and “bind” keyword arguments of a func.foo() construct are now local to “func.” constructs and are not part of the FunctionElement base class, allowing a “type” to be handled in a custom constructor or class-level variable.(link)

      • [sql] Restored the keys() method to ResultProxy.(link)

      • [sql] The type/expression system now does a more complete job of determining the return type from an expression as well as the adaptation of the Python operator into a SQL operator, based on the full left/right/operator of the given expression. In particular the date/time/interval system created for Postgresql EXTRACT in has now been generalized into the type system. The previous behavior which often occured of an expression “column + literal” forcing the type of “literal” to be the same as that of “column” will now usually not occur - the type of “literal” is first derived from the Python type of the literal, assuming standard native Python types + date types, before falling back to that of the known type on the other side of the expression. If the “fallback” type is compatible (i.e. CHAR from String), the literal side will use that. TypeDecorator types override this by default to coerce the “literal” side unconditionally, which can be changed by implementing the coerce_compared_value() method. Also part of.(link)

        References: #1647, #1683

      • [sql] Made sqlalchemy.sql.expressions.Executable part of public API, used for any expression construct that can be sent to execute(). FunctionElement now inherits Executable so that it gains execution_options(), which are also propagated to the select() that’s generated within execute(). Executable in turn subclasses _Generative which marks any ClauseElement that supports the @_generative decorator - these may also become “public” for the benefit of the compiler extension at some point.(link)

      • [sql] A change to the solution for - an end-user defined bind parameter name that directly conflicts with a column-named bind generated directly from the SET or VALUES clause of an update/insert generates a compile error. This reduces call counts and eliminates some cases where undesirable name conflicts could still occur.(link)

        References: #1579

      • [sql] Column() requires a type if it has no foreign keys (this is not new). An error is now raised if a Column() has no type and no foreign keys.(link)

        References: #1705

      • [sql] the “scale” argument of the Numeric() type is honored when coercing a returned floating point value into a string on its way to Decimal - this allows accuracy to function on SQLite, MySQL.(link)

        References: #1717

      • [sql] the copy() method of Column now copies over uninitialized “on table attach” events. Helps with the new declarative “mixin” capability.(link)

      mysql

      • [mysql] Fixed reflection bug whereby when COLLATE was present, nullable flag and server defaults would not be reflected.(link)

        References: #1655

      • [mysql] Fixed reflection of TINYINT(1) “boolean” columns defined with integer flags like UNSIGNED.(link)

      • [mysql] Further fixes for the mysql-connector dialect.(link)

        References: #1668

      • [mysql] Composite PK table on InnoDB where the “autoincrement” column isn’t first will emit an explicit “KEY” phrase within CREATE TABLE thereby avoiding errors.(link)

        References: #1496

      • [mysql] Added reflection/create table support for a wide range of MySQL keywords.(link)

        References: #1634

      • [mysql] Fixed import error which could occur reflecting tables on a Windows host(link)

        References: #1580

      sqlite

      • [sqlite] Added “native_datetime=True” flag to create_engine(). This will cause the DATE and TIMESTAMP types to skip all bind parameter and result row processing, under the assumption that PARSE_DECLTYPES has been enabled on the connection. Note that this is not entirely compatible with the “func.current_date()”, which will be returned as a string.(link)

        References: #1685

      mssql

      • [mssql] Re-established support for the pymssql dialect.(link)

      • [mssql] Various fixes for implicit returning, reflection, etc. - the MS-SQL dialects aren’t quite complete in 0.6 yet (but are close)(link)

      • [mssql] Added basic support for mxODBC.(link)

        References: #1710

      • [mssql] Removed the text_as_varchar option.(link)

      oracle

      • [oracle] “out” parameters require a type that is supported by cx_oracle. An error will be raised if no cx_oracle type can be found.(link)

      • [oracle] Oracle ‘DATE’ now does not perform any result processing, as the DATE type in Oracle stores full date+time objects, that’s what you’ll get. Note that the generic types.Date type will still call value.date() on incoming values, however. When reflecting a table, the reflected type will be ‘DATE’.(link)

      • [oracle] Added preliminary support for Oracle’s WITH_UNICODE mode. At the very least this establishes initial support for cx_Oracle with Python 3. When WITH_UNICODE mode is used in Python 2.xx, a large and scary warning is emitted asking that the user seriously consider the usage of this difficult mode of operation.(link)

        References: #1670

      • [oracle] The except_() method now renders as MINUS on Oracle, which is more or less equivalent on that platform.(link)

        References: #1712

      • [oracle] Added support for rendering and reflecting TIMESTAMP WITH TIME ZONE, i.e. TIMESTAMP(timezone=True).(link)

        References: #651

      • [oracle] Oracle INTERVAL type can now be reflected.(link)

      misc

      • [py3k] Improved the installation/test setup regarding Python 3, now that Distribute runs on Py3k. distribute_setup.py is now included. See README.py3k for Python 3 installation/ testing instructions.(link)

      • [engines] Added an optional C extension to speed up the sql layer by reimplementing RowProxy and the most common result processors. The actual speedups will depend heavily on your DBAPI and the mix of datatypes used in your tables, and can vary from a 30% improvement to more than 200%. It also provides a modest (~15-20%) indirect improvement to ORM speed for large queries. Note that it is not built/installed by default. See README for installation instructions.(link)

      • [engines] the execution sequence pulls all rowcount/last inserted ID info from the cursor before commit() is called on the DBAPI connection in an “autocommit” scenario. This helps mxodbc with rowcount and is probably a good idea overall.(link)

      • [engines] Opened up logging a bit such that isEnabledFor() is called more often, so that changes to the log level for engine/pool will be reflected on next connect. This adds a small amount of method call overhead. It’s negligible and will make life a lot easier for all those situations when logging just happens to be configured after create_engine() is called.(link)

        References: #1719

      • [engines] The assert_unicode flag is deprecated. SQLAlchemy will raise a warning in all cases where it is asked to encode a non-unicode Python string, as well as when a Unicode or UnicodeType type is explicitly passed a bytestring. The String type will do nothing for DBAPIs that already accept Python unicode objects.(link)

      • [engines] Bind parameters are sent as a tuple instead of a list. Some backend drivers will not accept bind parameters as a list.(link)

      • [engines] threadlocal engine wasn’t properly closing the connection upon close() - fixed that.(link)

      • [engines] Transaction object doesn’t rollback or commit if it isn’t “active”, allows more accurate nesting of begin/rollback/commit.(link)

      • [engines] Python unicode objects as binds result in the Unicode type, not string, thus eliminating a certain class of unicode errors on drivers that don’t support unicode binds.(link)

      • [engines] Added “logging_name” argument to create_engine(), Pool() constructor as well as “pool_logging_name” argument to create_engine() which filters down to that of Pool. Issues the given string name within the “name” field of logging messages instead of the default hex identifier string.(link)

        References: #1555

      • [engines] The visit_pool() method of Dialect is removed, and replaced with on_connect(). This method returns a callable which receives the raw DBAPI connection after each one is created. The callable is assembled into a first_connect/connect pool listener by the connection strategy if non-None. Provides a simpler interface for dialects.(link)

      • [engines] StaticPool now initializes, disposes and recreates without opening a new connection - the connection is only opened when first requested. dispose() also works on AssertionPool now.(link)

        References: #1728

      • [ticket: 1673] [metadata] Added the ability to strip schema information when using “tometadata” by passing “schema=None” as an argument. If schema is not specified then the table’s schema is retained.(link)

      • [sybase] Implemented a preliminary working dialect for Sybase, with sub-implementations for Python-Sybase as well as Pyodbc. Handles table creates/drops and basic round trip functionality. Does not yet include reflection or comprehensive support of unicode/special expressions/etc.(link)

      • [examples] Changed the beaker cache example a bit to have a separate RelationCache option for lazyload caching. This object does a lookup among any number of potential attributes more efficiently by grouping several into a common structure. Both FromCache and RelationCache are simpler individually.(link)

      • [documentation] Major cleanup work in the docs to link class, function, and method names into the API docs.(link)

        References: #1700

      0.6beta1

      Released: Wed Feb 03 2010

      orm

      • [orm]

        Changes to query.update() and query.delete():
        • the ‘expire’ option on query.update() has been renamed to ‘fetch’, thus matching that of query.delete(). ‘expire’ is deprecated and issues a warning.
        • query.update() and query.delete() both default to ‘evaluate’ for the synchronize strategy.
        • the ‘synchronize’ strategy for update() and delete() raises an error on failure. There is no implicit fallback onto “fetch”. Failure of evaluation is based on the structure of criteria, so success/failure is deterministic based on code structure.
        (link)

      • [orm]

        Enhancements on many-to-one relations:
        • many-to-one relations now fire off a lazyload in fewer cases, including in most cases will not fetch the “old” value when a new one is replaced.
        • many-to-one relation to a joined-table subclass now uses get() for a simple load (known as the “use_get” condition), i.e. Related->Sub(Base), without the need to redefine the primaryjoin condition in terms of the base table.
        • specifying a foreign key with a declarative column, i.e. ForeignKey(MyRelatedClass.id) doesn’t break the “use_get” condition from taking place
        • relation(), eagerload(), and eagerload_all() now feature an option called “innerjoin”. Specify True or False to control whether an eager join is constructed as an INNER or OUTER join. Default is False as always. The mapper options will override whichever setting is specified on relation(). Should generally be set for many-to-one, not nullable foreign key relations to allow improved join performance.
        • the behavior of eagerloading such that the main query is wrapped in a subquery when LIMIT/OFFSET are present now makes an exception for the case when all eager loads are many-to-one joins. In those cases, the eager joins are against the parent table directly along with the limit/offset without the extra overhead of a subquery, since a many-to-one join does not add rows to the result.
        (link)

        References: #1186, #1492, #1544

      • [orm] Enhancements / Changes on Session.merge():(link)

      • [orm] the “dont_load=True” flag on Session.merge() is deprecated and is now “load=False”.(link)

      • [orm] Session.merge() is performance optimized, using half the call counts for “load=False” mode compared to 0.5 and significantly fewer SQL queries in the case of collections for “load=True” mode.(link)

      • [orm] merge() will not issue a needless merge of attributes if the given instance is the same instance which is already present.(link)

      • [orm] merge() now also merges the “options” associated with a given state, i.e. those passed through query.options() which follow along with an instance, such as options to eagerly- or lazyily- load various attributes. This is essential for the construction of highly integrated caching schemes. This is a subtle behavioral change vs. 0.5.(link)

      • [orm] A bug was fixed regarding the serialization of the “loader path” present on an instance’s state, which is also necessary when combining the usage of merge() with serialized state and associated options that should be preserved.(link)

      • [orm] The all new merge() is showcased in a new comprehensive example of how to integrate Beaker with SQLAlchemy. See the notes in the “examples” note below.(link)

      • [orm] Primary key values can now be changed on a joined-table inheritance object, and ON UPDATE CASCADE will be taken into account when the flush happens. Set the new “passive_updates” flag to False on mapper() when using SQLite or MySQL/MyISAM.(link)

        References: #1362

      • [orm] flush() now detects when a primary key column was updated by an ON UPDATE CASCADE operation from another primary key, and can then locate the row for a subsequent UPDATE on the new PK value. This occurs when a relation() is there to establish the relationship as well as passive_updates=True.(link)

        References: #1671

      • [orm] the “save-update” cascade will now cascade the pending removed values from a scalar or collection attribute into the new session during an add() operation. This so that the flush() operation will also delete or modify rows of those disconnected items.(link)

      • [orm] Using a “dynamic” loader with a “secondary” table now produces a query where the “secondary” table is not aliased. This allows the secondary Table object to be used in the “order_by” attribute of the relation(), and also allows it to be used in filter criterion against the dynamic relation.(link)

        References: #1531

      • [orm] relation() with uselist=False will emit a warning when an eager or lazy load locates more than one valid value for the row. This may be due to primaryjoin/secondaryjoin conditions which aren’t appropriate for an eager LEFT OUTER JOIN or for other conditions.(link)

        References: #1643

      • [orm] an explicit check occurs when a synonym() is used with map_column=True, when a ColumnProperty (deferred or otherwise) exists separately in the properties dictionary sent to mapper with the same keyname. Instead of silently replacing the existing property (and possible options on that property), an error is raised.(link)

        References: #1633

      • [orm] a “dynamic” loader sets up its query criterion at construction time so that the actual query is returned from non-cloning accessors like “statement”.(link)

      • [orm] the “named tuple” objects returned when iterating a Query() are now pickleable.(link)

      • [orm] mapping to a select() construct now requires that you make an alias() out of it distinctly. This to eliminate confusion over such issues as(link)

        References: #1542

      • [orm] query.join() has been reworked to provide more consistent behavior and more flexibility (includes)(link)

        References: #1537

      • [orm] query.select_from() accepts multiple clauses to produce multiple comma separated entries within the FROM clause. Useful when selecting from multiple-homed join() clauses.(link)

      • [orm] query.select_from() also accepts mapped classes, aliased() constructs, and mappers as arguments. In particular this helps when querying from multiple joined-table classes to ensure the full join gets rendered.(link)

      • [orm] query.get() can be used with a mapping to an outer join where one or more of the primary key values are None.(link)

        References: #1135

      • [orm] query.from_self(), query.union(), others which do a “SELECT * from (SELECT...)” type of nesting will do a better job translating column expressions within the subquery to the columns clause of the outer query. This is potentially backwards incompatible with 0.5, in that this may break queries with literal expressions that do not have labels applied (i.e. literal(‘foo’), etc.)(link)

        References: #1568

      • [orm] relation primaryjoin and secondaryjoin now check that they are column-expressions, not just clause elements. this prohibits things like FROM expressions being placed there directly.(link)

        References: #1622

      • [orm] expression.null() is fully understood the same way None is when comparing an object/collection-referencing attribute within query.filter(), filter_by(), etc.(link)

        References: #1415

      • [orm] added “make_transient()” helper function which transforms a persistent/ detached instance into a transient one (i.e. deletes the instance_key and removes from any session.)(link)

        References: #1052

      • [orm] the allow_null_pks flag on mapper() is deprecated, and the feature is turned “on” by default. This means that a row which has a non-null value for any of its primary key columns will be considered an identity. The need for this scenario typically only occurs when mapping to an outer join.(link)

        References: #1339

      • [orm] the mechanics of “backref” have been fully merged into the finer grained “back_populates” system, and take place entirely within the _generate_backref() method of RelationProperty. This makes the initialization procedure of RelationProperty simpler and allows easier propagation of settings (such as from subclasses of RelationProperty) into the reverse reference. The internal BackRef() is gone and backref() returns a plain tuple that is understood by RelationProperty.(link)

      • [orm] The version_id_col feature on mapper() will raise a warning when used with dialects that don’t support “rowcount” adequately.(link)

        References: #1569

      • [orm] added “execution_options()” to Query, to so options can be passed to the resulting statement. Currently only Select-statements have these options, and the only option used is “stream_results”, and the only dialect which knows “stream_results” is psycopg2.(link)

      • [orm] Query.yield_per() will set the “stream_results” statement option automatically.(link)

      • [orm]

        Deprecated or removed:
        • ‘allow_null_pks’ flag on mapper() is deprecated. It does nothing now and the setting is “on” in all cases.
        • ‘transactional’ flag on sessionmaker() and others is removed. Use ‘autocommit=True’ to indicate ‘transactional=False’.
        • ‘polymorphic_fetch’ argument on mapper() is removed. Loading can be controlled using the ‘with_polymorphic’ option.
        • ‘select_table’ argument on mapper() is removed. Use ‘with_polymorphic=(“*”, <some selectable>)’ for this functionality.
        • ‘proxy’ argument on synonym() is removed. This flag did nothing throughout 0.5, as the “proxy generation” behavior is now automatic.
        • Passing a single list of elements to eagerload(), eagerload_all(), contains_eager(), lazyload(), defer(), and undefer() instead of multiple positional *args is deprecated.
        • Passing a single list of elements to query.order_by(), query.group_by(), query.join(), or query.outerjoin() instead of multiple positional *args is deprecated.
        • query.iterate_instances() is removed. Use query.instances().
        • Query.query_from_parent() is removed. Use the sqlalchemy.orm.with_parent() function to produce a “parent” clause, or alternatively query.with_parent().
        • query._from_self() is removed, use query.from_self() instead.
        • the “comparator” argument to composite() is removed. Use “comparator_factory”.
        • RelationProperty._get_join() is removed.
        • the ‘echo_uow’ flag on Session is removed. Use logging on the “sqlalchemy.orm.unitofwork” name.
        • session.clear() is removed. use session.expunge_all().
        • session.save(), session.update(), session.save_or_update() are removed. Use session.add() and session.add_all().
        • the “objects” flag on session.flush() remains deprecated.
        • the “dont_load=True” flag on session.merge() is deprecated in favor of “load=False”.
        • ScopedSession.mapper remains deprecated. See the usage recipe at http://www.sqlalchemy.org/trac/wiki/UsageRecipes/SessionAwareMapper
        • passing an InstanceState (internal SQLAlchemy state object) to attributes.init_collection() or attributes.get_history() is deprecated. These functions are public API and normally expect a regular mapped object instance.
        • the ‘engine’ parameter to declarative_base() is removed. Use the ‘bind’ keyword argument.
        (link)

      sql

      • [sql] the “autocommit” flag on select() and text() as well as select().autocommit() are deprecated - now call .execution_options(autocommit=True) on either of those constructs, also available directly on Connection and orm.Query.(link)

      • [sql] the autoincrement flag on column now indicates the column which should be linked to cursor.lastrowid, if that method is used. See the API docs for details.(link)

      • [sql] an executemany() now requires that all bound parameter sets require that all keys are present which are present in the first bound parameter set. The structure and behavior of an insert/update statement is very much determined by the first parameter set, including which defaults are going to fire off, and a minimum of guesswork is performed with all the rest so that performance is not impacted. For this reason defaults would otherwise silently “fail” for missing parameters, so this is now guarded against.(link)

        References: #1566

      • [sql] returning() support is native to insert(), update(), delete(). Implementations of varying levels of functionality exist for Postgresql, Firebird, MSSQL and Oracle. returning() can be called explicitly with column expressions which are then returned in the resultset, usually via fetchone() or first().

        insert() constructs will also use RETURNING implicitly to get newly generated primary key values, if the database version in use supports it (a version number check is performed). This occurs if no end-user returning() was specified.

        (link)

      • [sql] union(), intersect(), except() and other “compound” types of statements have more consistent behavior w.r.t. parenthesizing. Each compound element embedded within another will now be grouped with parenthesis - previously, the first compound element in the list would not be grouped, as SQLite doesn’t like a statement to start with parenthesis. However, Postgresql in particular has precedence rules regarding INTERSECT, and it is more consistent for parenthesis to be applied equally to all sub-elements. So now, the workaround for SQLite is also what the workaround for PG was previously - when nesting compound elements, the first one usually needs ”.alias().select()” called on it to wrap it inside of a subquery.(link)

        References: #1665

      • [sql] insert() and update() constructs can now embed bindparam() objects using names that match the keys of columns. These bind parameters will circumvent the usual route to those keys showing up in the VALUES or SET clause of the generated SQL.(link)

        References: #1579

      • [sql] the Binary type now returns data as a Python string (or a “bytes” type in Python 3), instead of the built- in “buffer” type. This allows symmetric round trips of binary data.(link)

        References: #1524

      • [sql] Added a tuple_() construct, allows sets of expressions to be compared to another set, typically with IN against composite primary keys or similar. Also accepts an IN with multiple columns. The “scalar select can have only one column” error message is removed - will rely upon the database to report problems with col mismatch.(link)

      • [sql] User-defined “default” and “onupdate” callables which accept a context should now call upon “context.current_parameters” to get at the dictionary of bind parameters currently being processed. This dict is available in the same way regardless of single-execute or executemany-style statement execution.(link)

      • [sql] multi-part schema names, i.e. with dots such as “dbo.master”, are now rendered in select() labels with underscores for dots, i.e. “dbo_master_table_column”. This is a “friendly” label that behaves better in result sets.(link)

        References: #1428

      • [sql] removed needless “counter” behavior with select() labelnames that match a column name in the table, i.e. generates “tablename_id” for “id”, instead of “tablename_id_1” in an attempt to avoid naming conflicts, when the table has a column actually named “tablename_id” - this is because the labeling logic is always applied to all columns so a naming conflict will never occur.(link)

      • [sql] calling expr.in_([]), i.e. with an empty list, emits a warning before issuing the usual “expr != expr” clause. The “expr != expr” can be very expensive, and it’s preferred that the user not issue in_() if the list is empty, instead simply not querying, or modifying the criterion as appropriate for more complex situations.(link)

        References: #1628

      • [sql] Added “execution_options()” to select()/text(), which set the default options for the Connection. See the note in “engines”.(link)

      • [sql]

        Deprecated or removed:
        • “scalar” flag on select() is removed, use select.as_scalar().
        • “shortname” attribute on bindparam() is removed.
        • postgres_returning, firebird_returning flags on insert(), update(), delete() are deprecated, use the new returning() method.
        • fold_equivalents flag on join is deprecated (will remain until is implemented)
        (link)

        References: #1131

      schema

      • [schema] the __contains__() method of MetaData now accepts strings or Table objects as arguments. If given a Table, the argument is converted to table.key first, i.e. “[schemaname.]<tablename>”(link)

        References: #1541

      • [schema] deprecated MetaData.connect() and ThreadLocalMetaData.connect() have been removed - send the “bind” attribute to bind a metadata.(link)

      • [schema] deprecated metadata.table_iterator() method removed (use sorted_tables)(link)

      • [schema] deprecated PassiveDefault - use DefaultClause.(link)

      • [schema] the “metadata” argument is removed from DefaultGenerator and subclasses, but remains locally present on Sequence, which is a standalone construct in DDL.(link)

      • [schema] Removed public mutability from Index and Constraint objects:

        • ForeignKeyConstraint.append_element()
        • Index.append_column()
        • UniqueConstraint.append_column()
        • PrimaryKeyConstraint.add()
        • PrimaryKeyConstraint.remove()

        These should be constructed declaratively (i.e. in one construction).

        (link)

      • [schema] The “start” and “increment” attributes on Sequence now generate “START WITH” and “INCREMENT BY” by default, on Oracle and Postgresql. Firebird doesn’t support these keywords right now.(link)

        References: #1545

      • [schema] UniqueConstraint, Index, PrimaryKeyConstraint all accept lists of column names or column objects as arguments.(link)

      • [schema]

        Other removed things:
        • Table.key (no idea what this was for)
        • Table.primary_key is not assignable - use table.append_constraint(PrimaryKeyConstraint(...))
        • Column.bind (get via column.table.bind)
        • Column.metadata (get via column.table.metadata)
        • Column.sequence (use column.default)
        • ForeignKey(constraint=some_parent) (is now private _constraint)
        (link)

      • [schema] The use_alter flag on ForeignKey is now a shortcut option for operations that can be hand-constructed using the DDL() event system. A side effect of this refactor is that ForeignKeyConstraint objects with use_alter=True will not be emitted on SQLite, which does not support ALTER for foreign keys.(link)

      • [schema] ForeignKey and ForeignKeyConstraint objects now correctly copy() all their public keyword arguments.(link)

        References: #1605

      postgresql

      • [postgresql] New dialects: pg8000, zxjdbc, and pypostgresql on py3k.(link)

      • [postgresql] The “postgres” dialect is now named “postgresql” ! Connection strings look like:

        postgresql://scott:tiger@localhost/test postgresql+pg8000://scott:tiger@localhost/test

        The “postgres” name remains for backwards compatiblity in the following ways:

        • There is a “postgres.py” dummy dialect which allows old URLs to work, i.e. postgres://scott:tiger@localhost/test
        • The “postgres” name can be imported from the old “databases” module, i.e. “from sqlalchemy.databases import postgres” as well as “dialects”, “from sqlalchemy.dialects.postgres import base as pg”, will send a deprecation warning.
        • Special expression arguments are now named “postgresql_returning” and “postgresql_where”, but the older “postgres_returning” and “postgres_where” names still work with a deprecation warning.
        (link)

      • [postgresql] “postgresql_where” now accepts SQL expressions which can also include literals, which will be quoted as needed.(link)

      • [postgresql] The psycopg2 dialect now uses psycopg2’s “unicode extension” on all new connections, which allows all String/Text/etc. types to skip the need to post-process bytestrings into unicode (an expensive step due to its volume). Other dialects which return unicode natively (pg8000, zxjdbc) also skip unicode post-processing.(link)

      • [postgresql] Added new ENUM type, which exists as a schema-level construct and extends the generic Enum type. Automatically associates itself with tables and their parent metadata to issue the appropriate CREATE TYPE/DROP TYPE commands as needed, supports unicode labels, supports reflection.(link)

        References: #1511

      • [postgresql] INTERVAL supports an optional “precision” argument corresponding to the argument that PG accepts.(link)

      • [postgresql] using new dialect.initialize() feature to set up version-dependent behavior.(link)

      • [postgresql] somewhat better support for % signs in table/column names; psycopg2 can’t handle a bind parameter name of %(foobar)s however and SQLA doesn’t want to add overhead just to treat that one non-existent use case.(link)

        References: #1279

      • [postgresql] Inserting NULL into a primary key + foreign key column will allow the “not null constraint” error to raise, not an attempt to execute a nonexistent “col_id_seq” sequence.(link)

        References: #1516

      • [postgresql] autoincrement SELECT statements, i.e. those which select from a procedure that modifies rows, now work with server-side cursor mode (the named cursor isn’t used for such statements.)(link)

      • [postgresql] postgresql dialect can properly detect pg “devel” version strings, i.e. “8.5devel”(link)

        References: #1636

      • [postgresql] The psycopg2 now respects the statement option “stream_results”. This option overrides the connection setting “server_side_cursors”. If true, server side cursors will be used for the statement. If false, they will not be used, even if “server_side_cursors” is true on the connection.(link)

        References: #1619

      mysql

      • [mysql] New dialects: oursql, a new native dialect, MySQL Connector/Python, a native Python port of MySQLdb, and of course zxjdbc on Jython.(link)

      • [mysql] VARCHAR/NVARCHAR will not render without a length, raises an error before passing to MySQL. Doesn’t impact CAST since VARCHAR is not allowed in MySQL CAST anyway, the dialect renders CHAR/NCHAR in those cases.(link)

      • [mysql] all the _detect_XXX() functions now run once underneath dialect.initialize()(link)

      • [mysql] somewhat better support for % signs in table/column names; MySQLdb can’t handle % signs in SQL when executemany() is used, and SQLA doesn’t want to add overhead just to treat that one non-existent use case.(link)

        References: #1279

      • [mysql] the BINARY and MSBinary types now generate “BINARY” in all cases. Omitting the “length” parameter will generate “BINARY” with no length. Use BLOB to generate an unlengthed binary column.(link)

      • [mysql] the “quoting=’quoted’” argument to MSEnum/ENUM is deprecated. It’s best to rely upon the automatic quoting.(link)

      • [mysql] ENUM now subclasses the new generic Enum type, and also handles unicode values implicitly, if the given labelnames are unicode objects.(link)

      • [mysql] a column of type TIMESTAMP now defaults to NULL if “nullable=False” is not passed to Column(), and no default is present. This is now consistent with all other types, and in the case of TIMESTAMP explictly renders “NULL” due to MySQL’s “switching” of default nullability for TIMESTAMP columns.(link)

        References: #1539

      sqlite

      • [sqlite] DATE, TIME and DATETIME types can now take optional storage_format and regexp argument. storage_format can be used to store those types using a custom string format. regexp allows to use a custom regular expression to match string values from the database.(link)

      • [sqlite] Time and DateTime types now use by a default a stricter regular expression to match strings from the database. Use the regexp argument if you are using data stored in a legacy format.(link)

      • [sqlite] __legacy_microseconds__ on SQLite Time and DateTime types is not supported anymore. You should use the storage_format argument instead.(link)

      • [sqlite] Date, Time and DateTime types are now stricter in what they accept as bind parameters: Date type only accepts date objects (and datetime ones, because they inherit from date), Time only accepts time objects, and DateTime only accepts date and datetime objects.(link)

      • [sqlite] Table() supports a keyword argument “sqlite_autoincrement”, which applies the SQLite keyword “AUTOINCREMENT” to the single integer primary key column when generating DDL. Will prevent generation of a separate PRIMARY KEY constraint.(link)

        References: #1016

      mssql

      • [mssql] MSSQL + Pyodbc + FreeTDS now works for the most part, with possible exceptions regarding binary data as well as unicode schema identifiers.(link)

      • [mssql] the “has_window_funcs” flag is removed. LIMIT/OFFSET usage will use ROW NUMBER as always, and if on an older version of SQL Server, the operation fails. The behavior is exactly the same except the error is raised by SQL server instead of the dialect, and no flag setting is required to enable it.(link)

      • [mssql] the “auto_identity_insert” flag is removed. This feature always takes effect when an INSERT statement overrides a column that is known to have a sequence on it. As with “has_window_funcs”, if the underlying driver doesn’t support this, then you can’t do this operation in any case, so there’s no point in having a flag.(link)

      • [mssql] using new dialect.initialize() feature to set up version-dependent behavior.(link)

      • [mssql] removed references to sequence which is no longer used. implicit identities in mssql work the same as implicit sequences on any other dialects. Explicit sequences are enabled through the use of “default=Sequence()”. See the MSSQL dialect documentation for more information.(link)

      oracle

      • [oracle] unit tests pass 100% with cx_oracle !(link)

      • [oracle] support for cx_Oracle’s “native unicode” mode which does not require NLS_LANG to be set. Use the latest 5.0.2 or later of cx_oracle.(link)

      • [oracle] an NCLOB type is added to the base types.(link)

      • [oracle] use_ansi=False won’t leak into the FROM/WHERE clause of a statement that’s selecting from a subquery that also uses JOIN/OUTERJOIN.(link)

      • [oracle] added native INTERVAL type to the dialect. This supports only the DAY TO SECOND interval type so far due to lack of support in cx_oracle for YEAR TO MONTH.(link)

        References: #1467

      • [oracle] usage of the CHAR type results in cx_oracle’s FIXED_CHAR dbapi type being bound to statements.(link)

      • [oracle] the Oracle dialect now features NUMBER which intends to act justlike Oracle’s NUMBER type. It is the primary numeric type returned by table reflection and attempts to return Decimal()/float/int based on the precision/scale parameters.(link)

        References: #885

      • [oracle] func.char_length is a generic function for LENGTH(link)

      • [oracle] ForeignKey() which includes onupdate=<value> will emit a warning, not emit ON UPDATE CASCADE which is unsupported by oracle(link)

      • [oracle] the keys() method of RowProxy() now returns the result column names normalized to be SQLAlchemy case insensitive names. This means they will be lower case for case insensitive names, whereas the DBAPI would normally return them as UPPERCASE names. This allows row keys() to be compatible with further SQLAlchemy operations.(link)

      • [oracle] using new dialect.initialize() feature to set up version-dependent behavior.(link)

      • [oracle] using types.BigInteger with Oracle will generate NUMBER(19)(link)

        References: #1125

      • [oracle] “case sensitivity” feature will detect an all-lowercase case-sensitive column name during reflect and add “quote=True” to the generated Column, so that proper quoting is maintained.(link)

      firebird

      • [firebird] the keys() method of RowProxy() now returns the result column names normalized to be SQLAlchemy case insensitive names. This means they will be lower case for case insensitive names, whereas the DBAPI would normally return them as UPPERCASE names. This allows row keys() to be compatible with further SQLAlchemy operations.(link)

      • [firebird] using new dialect.initialize() feature to set up version-dependent behavior.(link)

      • [firebird] “case sensitivity” feature will detect an all-lowercase case-sensitive column name during reflect and add “quote=True” to the generated Column, so that proper quoting is maintained.(link)

      misc

      • [release] [major] For the full set of feature descriptions, see http://www.sqlalchemy.org/trac/wiki/06Migration . This document is a work in progress.(link)

      • [release] [major] All bug fixes and feature enhancements from the most recent 0.5 version and below are also included within 0.6.(link)

      • [release] [major] Platforms targeted now include Python 2.4/2.5/2.6, Python 3.1, Jython2.5.(link)

      • [engines] transaction isolation level may be specified with create_engine(... isolation_level=”...”); available on postgresql and sqlite.(link)

        References: #443

      • [engines] Connection has execution_options(), generative method which accepts keywords that affect how the statement is executed w.r.t. the DBAPI. Currently supports “stream_results”, causes psycopg2 to use a server side cursor for that statement, as well as “autocommit”, which is the new location for the “autocommit” option from select() and text(). select() and text() also have .execution_options() as well as ORM Query().(link)

      • [engines] fixed the import for entrypoint-driven dialects to not rely upon silly tb_info trick to determine import error status.(link)

        References: #1630

      • [engines] added first() method to ResultProxy, returns first row and closes result set immediately.(link)

      • [engines] RowProxy objects are now pickleable, i.e. the object returned by result.fetchone(), result.fetchall() etc.(link)

      • [engines] RowProxy no longer has a close() method, as the row no longer maintains a reference to the parent. Call close() on the parent ResultProxy instead, or use autoclose.(link)

      • [engines] ResultProxy internals have been overhauled to greatly reduce method call counts when fetching columns. Can provide a large speed improvement (up to more than 100%) when fetching large result sets. The improvement is larger when fetching columns that have no type-level processing applied and when using results as tuples (instead of as dictionaries). Many thanks to Elixir’s Gaëtan de Menten for this dramatic improvement !(link)

        References: #1586

      • [engines] Databases which rely upon postfetch of “last inserted id” to get at a generated sequence value (i.e. MySQL, MS-SQL) now work correctly when there is a composite primary key where the “autoincrement” column is not the first primary key column in the table.(link)

      • [engines] the last_inserted_ids() method has been renamed to the descriptor “inserted_primary_key”.(link)

      • [engines] setting echo=False on create_engine() now sets the loglevel to WARN instead of NOTSET. This so that logging can be disabled for a particular engine even if logging for “sqlalchemy.engine” is enabled overall. Note that the default setting of “echo” is None.(link)

        References: #1554

      • [engines] ConnectionProxy now has wrapper methods for all transaction lifecycle events, including begin(), rollback(), commit() begin_nested(), begin_prepared(), prepare(), release_savepoint(), etc.(link)

      • [engines] Connection pool logging now uses both INFO and DEBUG log levels for logging. INFO is for major events such as invalidated connections, DEBUG for all the acquire/return logging. echo_pool can be False, None, True or “debug” the same way as echo works.(link)

      • [engines] All pyodbc-dialects now support extra pyodbc-specific kw arguments ‘ansi’, ‘unicode_results’, ‘autocommit’.(link)

        References: #1621

      • [engines] the “threadlocal” engine has been rewritten and simplified and now supports SAVEPOINT operations.(link)

      • [engines]

        deprecated or removed
        • result.last_inserted_ids() is deprecated. Use result.inserted_primary_key
        • dialect.get_default_schema_name(connection) is now public via dialect.default_schema_name.
        • the “connection” argument from engine.transaction() and engine.run_callable() is removed - Connection itself now has those methods. All four methods accept *args and **kwargs which are passed to the given callable, as well as the operating connection.
        (link)

      • [reflection/inspection] Table reflection has been expanded and generalized into a new API called “sqlalchemy.engine.reflection.Inspector”. The Inspector object provides fine-grained information about a wide variety of schema information, with room for expansion, including table names, column names, view definitions, sequences, indexes, etc.(link)

      • [reflection/inspection] Views are now reflectable as ordinary Table objects. The same Table constructor is used, with the caveat that “effective” primary and foreign key constraints aren’t part of the reflection results; these have to be specified explicitly if desired.(link)

      • [reflection/inspection] The existing autoload=True system now uses Inspector underneath so that each dialect need only return “raw” data about tables and other objects - Inspector is the single place that information is compiled into Table objects so that consistency is at a maximum.(link)

      • [ddl] the DDL system has been greatly expanded. the DDL() class now extends the more generic DDLElement(), which forms the basis of many new constructs:

        • CreateTable()
        • DropTable()
        • AddConstraint()
        • DropConstraint()
        • CreateIndex()
        • DropIndex()
        • CreateSequence()
        • DropSequence()

        These support “on” and “execute-at()” just like plain DDL() does. User-defined DDLElement subclasses can be created and linked to a compiler using the sqlalchemy.ext.compiler extension.

        (link)

      • [ddl] The signature of the “on” callable passed to DDL() and DDLElement() is revised as follows:

        ddl
        the DDLElement object itself
        event
        the string event name.
        target
        previously “schema_item”, the Table or MetaData object triggering the event.
        connection
        the Connection object in use for the operation.
        **kw
        keyword arguments. In the case of MetaData before/after create/drop, the list of Table objects for which CREATE/DROP DDL is to be issued is passed as the kw argument “tables”. This is necessary for metadata-level DDL that is dependent on the presence of specific tables.
        The “schema_item” attribute of DDL has been renamed to
        “target”.
        (link)

      • [dialect] [refactor] Dialect modules are now broken into database dialects plus DBAPI implementations. Connect URLs are now preferred to be specified using dialect+driver://..., i.e. “mysql+mysqldb://scott:tiger@localhost/test”. See the 0.6 documentation for examples.(link)

      • [dialect] [refactor] the setuptools entrypoint for external dialects is now called “sqlalchemy.dialects”.(link)

      • [dialect] [refactor] the “owner” keyword argument is removed from Table. Use “schema” to represent any namespaces to be prepended to the table name.(link)

      • [dialect] [refactor] server_version_info becomes a static attribute.(link)

      • [dialect] [refactor] dialects receive an initialize() event on initial connection to determine connection properties.(link)

      • [dialect] [refactor] dialects receive a visit_pool event have an opportunity to establish pool listeners.(link)

      • [dialect] [refactor] cached TypeEngine classes are cached per-dialect class instead of per-dialect.(link)

      • [dialect] [refactor] new UserDefinedType should be used as a base class for new types, which preserves the 0.5 behavior of get_col_spec().(link)

      • [dialect] [refactor] The result_processor() method of all type classes now accepts a second argument “coltype”, which is the DBAPI type argument from cursor.description. This argument can help some types decide on the most efficient processing of result values.(link)

      • [dialect] [refactor] Deprecated Dialect.get_params() removed.(link)

      • [dialect] [refactor] Dialect.get_rowcount() has been renamed to a descriptor “rowcount”, and calls cursor.rowcount directly. Dialects which need to hardwire a rowcount in for certain calls should override the method to provide different behavior.(link)

      • [dialect] [refactor] DefaultRunner and subclasses have been removed. The job of this object has been simplified and moved into ExecutionContext. Dialects which support sequences should add a fire_sequence() method to their execution context implementation.(link)

        References: #1566

      • [dialect] [refactor] Functions and operators generated by the compiler now use (almost) regular dispatch functions of the form “visit_<opname>” and “visit_<funcname>_fn” to provide customed processing. This replaces the need to copy the “functions” and “operators” dictionaries in compiler subclasses with straightforward visitor methods, and also allows compiler subclasses complete control over rendering, as the full _Function or _BinaryExpression object is passed in.(link)

      • [types] The construction of types within dialects has been totally overhauled. Dialects now define publically available types as UPPERCASE names exclusively, and internal implementation types using underscore identifiers (i.e. are private). The system by which types are expressed in SQL and DDL has been moved to the compiler system. This has the effect that there are much fewer type objects within most dialects. A detailed document on this architecture for dialect authors is in lib/sqlalchemy/dialects/type_migration_guidelines.txt .(link)

      • [types] Types no longer make any guesses as to default parameters. In particular, Numeric, Float, NUMERIC, FLOAT, DECIMAL don’t generate any length or scale unless specified.(link)

      • [types] types.Binary is renamed to types.LargeBinary, it only produces BLOB, BYTEA, or a similar “long binary” type. New base BINARY and VARBINARY types have been added to access these MySQL/MS-SQL specific types in an agnostic way.(link)

        References: #1664

      • [types] String/Text/Unicode types now skip the unicode() check on each result column value if the dialect has detected the DBAPI as returning Python unicode objects natively. This check is issued on first connect using “SELECT CAST ‘some text’ AS VARCHAR(10)” or equivalent, then checking if the returned object is a Python unicode. This allows vast performance increases for native-unicode DBAPIs, including pysqlite/sqlite3, psycopg2, and pg8000.(link)

      • [types] Most types result processors have been checked for possible speed improvements. Specifically, the following generic types have been optimized, resulting in varying speed improvements: Unicode, PickleType, Interval, TypeDecorator, Binary. Also the following dbapi-specific implementations have been improved: Time, Date and DateTime on Sqlite, ARRAY on Postgresql, Time on MySQL, Numeric(as_decimal=False) on MySQL, oursql and pypostgresql, DateTime on cx_oracle and LOB-based types on cx_oracle.(link)

      • [types] Reflection of types now returns the exact UPPERCASE type within types.py, or the UPPERCASE type within the dialect itself if the type is not a standard SQL type. This means reflection now returns more accurate information about reflected types.(link)

      • [types] Added a new Enum generic type. Enum is a schema-aware object to support databases which require specific DDL in order to use enum or equivalent; in the case of PG it handles the details of CREATE TYPE, and on other databases without native enum support will by generate VARCHAR + an inline CHECK constraint to enforce the enum.(link)

        References: #1511, #1109

      • [types] The Interval type includes a “native” flag which controls if native INTERVAL types (postgresql + oracle) are selected if available, or not. “day_precision” and “second_precision” arguments are also added which propagate as appropriately to these native types. Related to.(link)

        References: #1467

      • [types] The Boolean type, when used on a backend that doesn’t have native boolean support, will generate a CHECK constraint “col IN (0, 1)” along with the int/smallint- based column type. This can be switched off if desired with create_constraint=False. Note that MySQL has no native boolean or CHECK constraint support so this feature isn’t available on that platform.(link)

        References: #1589

      • [types] PickleType now uses == for comparison of values when mutable=True, unless the “comparator” argument with a comparsion function is specified to the type. Objects being pickled will be compared based on identity (which defeats the purpose of mutable=True) if __eq__() is not overridden or a comparison function is not provided.(link)

      • [types] The default “precision” and “scale” arguments of Numeric and Float have been removed and now default to None. NUMERIC and FLOAT will be rendered with no numeric arguments by default unless these values are provided.(link)

      • [types] AbstractType.get_search_list() is removed - the games that was used for are no longer necessary.(link)

      • [types] Added a generic BigInteger type, compiles to BIGINT or NUMBER(19).(link)

        References: #1125

      • [types] sqlsoup has been overhauled to explicitly support an 0.5 style session, using autocommit=False, autoflush=True. Default behavior of SQLSoup now requires the usual usage of commit() and rollback(), which have been added to its interface. An explcit Session or scoped_session can be passed to the constructor, allowing these arguments to be overridden.(link)

      • [types] sqlsoup db.<sometable>.update() and delete() now call query(cls).update() and delete(), respectively.(link)

      • [types] sqlsoup now has execute() and connection(), which call upon the Session methods of those names, ensuring that the bind is in terms of the SqlSoup object’s bind.(link)

      • [types] sqlsoup objects no longer have the ‘query’ attribute - it’s not needed for sqlsoup’s usage paradigm and it gets in the way of a column that is actually named ‘query’.(link)

      • [types] The signature of the proxy_factory callable passed to association_proxy is now (lazy_collection, creator, value_attr, association_proxy), adding a fourth argument that is the parent AssociationProxy argument. Allows serializability and subclassing of the built in collections.(link)

        References: #1259

      • [types] association_proxy now has basic comparator methods .any(), .has(), .contains(), ==, !=, thanks to Scott Torborg.(link)

        References: #1372

      SQLAlchemy-0.8.4/doc/changelog/changelog_07.html0000644000076500000240000111136412251147462022063 0ustar classicstaff00000000000000 0.7 Changelog — SQLAlchemy 0.8 Documentation

      SQLAlchemy 0.8 Documentation

      Release: 0.8.4 | Release Date: December 8, 2013

      0.7 Changelog

      0.7.11

      no release date

      orm

      • [orm] [bug] Fixed bug where list instrumentation would fail to represent a setslice of [0:0] correctly, which in particular could occur when using insert(0, item) with the association proxy. Due to some quirk in Python collections, the issue was much more likely with Python 3 rather than 2.(link)

        References: #2807

      • [orm] [bug] Fixed bug when a query of the form: query(SubClass).options(subqueryload(Baseclass.attrname)), where SubClass is a joined inh of BaseClass, would fail to apply the JOIN inside the subquery on the attribute load, producing a cartesian product. The populated results still tended to be correct as additional rows are just ignored, so this issue may be present as a performance degradation in applications that are otherwise working correctly.(link)

        References: #2699

      • [orm] [bug] Fixed bug in unit of work whereby a joined-inheritance subclass could insert the row for the “sub” table before the parent table, if the two tables had no ForeignKey constraints set up between them.(link)

        References: #2689

      • [orm] [bug] Improved the error message emitted when a “backref loop” is detected, that is when an attribute event triggers a bidirectional assignment between two other attributes with no end. This condition can occur not just when an object of the wrong type is assigned, but also when an attribute is mis-configured to backref into an existing backref pair.(link)

        References: #2674

      • [orm] [bug] A warning is emitted when a MapperProperty is assigned to a mapper that replaces an existing property, if the properties in question aren’t plain column-based properties. Replacement of relationship properties is rarely (ever?) what is intended and usually refers to a mapper mis-configuration. This will also warn if a backref configures itself on top of an existing one in an inheritance relationship (which is an error in 0.8).(link)

        References: #2674

      engine

      • [engine] [bug] The regexp used by the make_url() function now parses ipv6 addresses, e.g. surrounded by brackets.(link)

        References: #2851

      sql

      • [sql] [bug] Fixed regression dating back to 0.7.9 whereby the name of a CTE might not be properly quoted if it was referred to in multiple FROM clauses.(link)

        References: #2801

      • [sql] [bug] [cte] Fixed bug in common table expression system where if the CTE were used only as an alias() construct, it would not render using the WITH keyword.(link)

        References: #2783

      • [sql] [bug] Fixed bug in CheckConstraint DDL where the “quote” flag from a Column object would not be propagated.(link)

        References: #2784

      postgresql

      • [postgresql] [feature] Added support for Postgresql’s traditional SUBSTRING function syntax, renders as “SUBSTRING(x FROM y FOR z)” when regular func.substring() is used. Courtesy Gunnlaugur Þór Briem.(link)

        References: #2676

      mysql

      • [mysql] [bug] Updates to MySQL reserved words for versions 5.5, 5.6, courtesy Hanno Schlichting.(link)

        References: #2791

      misc

      • [bug] [tests] Fixed an import of “logging” in test_execute which was not working on some linux platforms.(link)

        References: #2669, pull request 41

      0.7.10

      Released: Thu Feb 7 2013

      orm

      • [orm] [bug] Fixed potential memory leak which could occur if an arbitrary number of sessionmaker objects were created. The anonymous subclass created by the sessionmaker, when dereferenced, would not be garbage collected due to remaining class-level references from the event package. This issue also applies to any custom system that made use of ad-hoc subclasses in conjunction with an event dispatcher.(link)

        References: #2650

      • [orm] [bug] Query.merge_result() can now load rows from an outer join where an entity may be None without throwing an error.(link)

        References: #2640

      • [orm] [bug] The MutableComposite type did not allow for the MutableBase.coerce() method to be used, even though the code seemed to indicate this intent, so this now works and a brief example is added. As a side-effect, the mechanics of this event handler have been changed so that new MutableComposite types no longer add per-type global event handlers. Also in 0.8.0b2.(link)

        References: #2624

      • [orm] [bug] Fixed Session accounting bug whereby replacing a deleted object in the identity map with another object of the same primary key would raise a “conflicting state” error on rollback(), if the replaced primary key were established either via non-unitofwork-established INSERT statement or by primary key switch of another instance.(link)

        References: #2583

      engine

      sql

      • [sql] [bug] Backported adjustment to __repr__ for TypeDecorator to 0.7, allows PickleType to produce a clean repr() to help with Alembic.(link)

        References: #2594, #2584

      • [sql] [bug] Fixed bug where Table.tometadata() would fail if a Column had both a foreign key as well as an alternate ”.key” name for the column.(link)

        References: #2643

      • [sql] [bug] Fixed bug where using server_onupdate=<FetchedValue|DefaultClause> without passing the “for_update=True” flag would apply the default object to the server_default, blowing away whatever was there. The explicit for_update=True argument shouldn’t be needed with this usage (especially since the documentation shows an example without it being used) so it is now arranged internally using a copy of the given default object, if the flag isn’t set to what corresponds to that argument.(link)

        References: #2631

      • [sql] [gae] [mysql] Added a conditional import to the gaerdbms dialect which attempts to import rdbms_apiproxy vs. rdbms_googleapi to work on both dev and production platforms. Also now honors the instance attribute. Courtesy Sean Lynch. Also backported enhancements to allow username/password as well as fixing error code interpretation from 0.8.(link)

        References: #2649

      mysql

      • [mysql] [feature] Added “raise_on_warnings” flag to OurSQL dialect.(link)

        References: #2523

      • [mysql] [feature] Added “read_timeout” flag to MySQLdb dialect.(link)

        References: #2554

      sqlite

      • [sqlite] [bug] More adjustment to this SQLite related issue which was released in 0.7.9, to intercept legacy SQLite quoting characters when reflecting foreign keys. In addition to intercepting double quotes, other quoting characters such as brackets, backticks, and single quotes are now also intercepted.(link)

        References: #2568

      mssql

      • [mssql] [bug] Fixed bug whereby using “key” with Column in conjunction with “schema” for the owning Table would fail to locate result rows due to the MSSQL dialect’s “schema rendering” logic’s failure to take .key into account.(link)

      • [mssql] [bug] Added a Py3K conditional around unnecessary .decode() call in mssql information schema, fixes reflection in Py3k.(link)

        References: #2638

      oracle

      • [oracle] [bug] The Oracle LONG type, while an unbounded text type, does not appear to use the cx_Oracle.LOB type when result rows are returned, so the dialect has been repaired to exclude LONG from having cx_Oracle.LOB filtering applied.(link)

        References: #2620

      • [oracle] [bug] Repaired the usage of .prepare() in conjunction with cx_Oracle so that a return value of False will result in no call to connection.commit(), hence avoiding “no transaction” errors. Two-phase transactions have now been shown to work in a rudimental fashion with SQLAlchemy and cx_oracle, however are subject to caveats observed with the driver; check the documentation for details.(link)

        References: #2611

      • [oracle] [bug] changed the list of cx_oracle types that are excluded from the setinputsizes() step to only include STRING and UNICODE; CLOB and NCLOB are removed. This is to work around cx_oracle behavior which is broken for the executemany() call. In 0.8, this same change is applied however it is also configurable via the exclude_setinputsizes argument.(link)

        References: #2561

      0.7.9

      Released: Mon Oct 01 2012

      orm

      • [orm] [bug] Fixed bug mostly local to new AbstractConcreteBase helper where the “type” attribute from the superclass would not be overridden on the subclass to produce the “reserved for base” error message, instead placing a do-nothing attribute there. This was inconsistent vs. using ConcreteBase as well as all the behavior of classical concrete mappings, where the “type” column from the polymorphic base would be explicitly disabled on subclasses, unless overridden explicitly.(link)

      • [orm] [bug] A warning is emitted when lazy=’dynamic’ is combined with uselist=False. This is an exception raise in 0.8.(link)

      • [orm] [bug] Fixed bug whereby user error in related-object assignment could cause recursion overflow if the assignment triggered a backref of the same name as a bi-directional attribute on the incorrect class to the same target. An informative error is raised now.(link)

      • [orm] [bug] Fixed bug where incorrect type information would be passed when the ORM would bind the “version” column, when using the “version” feature. Tests courtesy Daniel Miller.(link)

        References: #2539

      • [orm] [bug] Extra logic has been added to the “flush” that occurs within Session.commit(), such that the extra state added by an after_flush() or after_flush_postexec() hook is also flushed in a subsequent flush, before the “commit” completes. Subsequent calls to flush() will continue until the after_flush hooks stop adding new state. An “overflow” counter of 100 is also in place, in the event of a broken after_flush() hook adding new content each time.(link)

        References: #2566

      engine

      • [engine] [feature] Dramatic improvement in memory usage of the event system; instance-level collections are no longer created for a particular type of event until instance-level listeners are established for that event.(link)

        References: #2516

      • [engine] [bug] Fixed bug whereby a disconnect detect + dispose that occurs when the QueuePool has threads waiting for connections would leave those threads waiting for the duration of the timeout on the old pool (or indefinitely if timeout was disabled). The fix now notifies those waiters with a special exception case and has them move onto the new pool.(link)

        References: #2522

      • [engine] [bug] Added gaerdbms import to mysql/__init__.py, the absense of which was preventing the new GAE dialect from being loaded.(link)

        References: #2529

      • [engine] [bug] Fixed cextension bug whereby the “ambiguous column error” would fail to function properly if the given index were a Column object and not a string. Note there are still some column-targeting issues here which are fixed in 0.8.(link)

        References: #2553

      • [engine] [bug] Fixed the repr() of Enum to include the “name” and “native_enum” flags. Helps Alembic autogenerate.(link)

      sql

      • [sql] [bug] Fixed the DropIndex construct to support an Index associated with a Table in a remote schema.(link)

        References: #2571

      • [sql] [bug] Fixed bug in over() construct whereby passing an empty list for either partition_by or order_by, as opposed to None, would fail to generate correctly. Courtesy Gunnlaugur Þór Briem.(link)

        References: #2574

      • [sql] [bug] Fixed CTE bug whereby positional bound parameters present in the CTEs themselves would corrupt the overall ordering of bound parameters. This primarily affected SQL Server as the platform with positional binds + CTE support.(link)

        References: #2521

      • [sql] [bug] Fixed more un-intuitivenesses in CTEs which prevented referring to a CTE in a union of itself without it being aliased. CTEs now render uniquely on name, rendering the outermost CTE of a given name only - all other references are rendered just as the name. This even includes other CTE/SELECTs that refer to different versions of the same CTE object, such as a SELECT or a UNION ALL of that SELECT. We are somewhat loosening the usual link between object identity and lexical identity in this case. A true name conflict between two unrelated CTEs now raises an error.(link)

      • [sql] [bug] quoting is applied to the column names inside the WITH RECURSIVE clause of a common table expression according to the quoting rules for the originating Column.(link)

        References: #2512

      • [sql] [bug] Fixed regression introduced in 0.7.6 whereby the FROM list of a SELECT statement could be incorrect in certain “clone+replace” scenarios.(link)

        References: #2518

      • [sql] [bug] Fixed bug whereby usage of a UNION or similar inside of an embedded subquery would interfere with result-column targeting, in the case that a result-column had the same ultimate name as a name inside the embedded UNION.(link)

        References: #2552

      • [sql] [bug] Fixed a regression since 0.6 regarding result-row targeting. It should be possible to use a select() statement with string based columns in it, that is select([‘id’, ‘name’]).select_from(‘mytable’), and have this statement be targetable by Column objects with those names; this is the mechanism by which query(MyClass).from_statement(some_statement) works. At some point the specific case of using select([‘id’]), which is equivalent to select([literal_column(‘id’)]), stopped working here, so this has been re-instated and of course tested.(link)

        References: #2558

      • [sql] [bug] Added missing operators is_(), isnot() to the ColumnOperators base, so that these long-available operators are present as methods like all the other operators.(link)

        References: #2544

      postgresql

      • [postgresql] [bug] Columns in reflected primary key constraint are now returned in the order in which the constraint itself defines them, rather than how the table orders them. Courtesy Gunnlaugur Þór Briem..(link)

        References: #2531

      • [postgresql] [bug] Added ‘terminating connection’ to the list of messages we use to detect a disconnect with PG, which appears to be present in some versions when the server is restarted.(link)

        References: #2570

      mysql

      • [mysql] [bug] Updated mysqlconnector interface to use updated “client flag” and “charset” APIs, courtesy David McNelis.(link)

      sqlite

      • [sqlite] [feature] Added support for the localtimestamp() SQL function implemented in SQLite, courtesy Richard Mitchell.(link)

      • [sqlite] [bug] Adjusted a very old bugfix which attempted to work around a SQLite issue that itself was “fixed” as of sqlite 3.6.14, regarding quotes surrounding a table name when using the “foreign_key_list” pragma. The fix has been adjusted to not interfere with quotes that are actually in the name of a column or table, to as much a degree as possible; sqlite still doesn’t return the correct result for foreign_key_list() if the target table actually has quotes surrounding its name, as part of its name (i.e. “”“mytable”“”).(link)

        References: #2568

      • [sqlite] [bug] Adjusted column default reflection code to convert non-string values to string, to accommodate old SQLite versions that don’t deliver default info as a string.(link)

        References: #2265

      mssql

      • [mssql] [bug] Fixed compiler bug whereby using a correlated subquery within an ORDER BY would fail to render correctly if the stament also used LIMIT/OFFSET, due to mis-rendering within the ROW_NUMBER() OVER clause. Fix courtesy sayap(link)

        References: #2538

      • [mssql] [bug] Fixed compiler bug whereby a given select() would be modified if it had an “offset” attribute, causing the construct to not compile correctly a second time.(link)

        References: #2545

      • [mssql] [bug] Fixed bug where reflection of primary key constraint would double up columns if the same constraint/table existed in multiple schemas.(link)

      0.7.8

      Released: Sat Jun 16 2012

      orm

      • [orm] [feature] The ‘objects’ argument to flush() is no longer deprecated, as some valid use cases have been identified.(link)

      • [orm] [bug] Fixed bug whereby subqueryload() from a polymorphic mapping to a target would incur a new invocation of the query for each distinct class encountered in the polymorphic result.(link)

        References: #2480

      • [orm] [bug] Fixed bug in declarative whereby the precedence of columns in a joined-table, composite column (typically for id) would fail to be correct if the columns contained names distinct from their attribute names. This would cause things like primaryjoin conditions made against the entity attributes to be incorrect. Related to as this was supposed to be part of that, this is.(link)

        References: #2491, #1892

      • [orm] [bug] Fixed identity_key() function which was not accepting a scalar argument for the identity. .(link)

        References: #2508

      • [orm] [bug] Fixed bug whereby populate_existing option would not propagate to subquery eager loaders. .(link)

        References: #2497

      engine

      • [engine] [bug] Fixed memory leak in C version of result proxy whereby DBAPIs which don’t deliver pure Python tuples for result rows would fail to decrement refcounts correctly. The most prominently affected DBAPI is pyodbc.(link)

        References: #2489

      • [engine] [bug] Fixed bug affecting Py3K whereby string positional parameters passed to engine/connection execute() would fail to be interpreted correctly, due to __iter__ being present on Py3K string..(link)

        References: #2503

      sql

      • [sql] [bug] added BIGINT to types.__all__, BIGINT, BINARY, VARBINARY to sqlalchemy module namespace, plus test to ensure this breakage doesn’t occur again.(link)

        References: #2499

      • [sql] [bug] Repaired common table expression rendering to function correctly when the SELECT statement contains UNION or other compound expressions, courtesy btbuilder.(link)

        References: #2490

      • [sql] [bug] Fixed bug whereby append_column() wouldn’t function correctly on a cloned select() construct, courtesy Gunnlaugur Þór Briem.(link)

        References: #2482

      postgresql

      • [postgresql] [bug] removed unnecessary table clause when reflecting enums,. Courtesy Gunnlaugur Þór Briem.(link)

        References: #2510

      mysql

      • [mysql] [feature] Added a new dialect for Google App Engine. Courtesy Richie Foreman.(link)

        References: #2484

      oracle

      • [oracle] [bug] Added ROWID to oracle.*.(link)

        References: #2483

      0.7.7

      Released: Sat May 05 2012

      orm

      • [orm] [feature] Added prefix_with() method to Query, calls upon select().prefix_with() to allow placement of MySQL SELECT directives in statements. Courtesy Diana Clarke(link)

        References: #2443

      • [orm] [feature] Added new flag to @validates include_removes. When True, collection remove and attribute del events will also be sent to the validation function, which accepts an additional argument “is_remove” when this flag is used.(link)

      • [orm] [bug] Fixed issue in unit of work whereby setting a non-None self-referential many-to-one relationship to None would fail to persist the change if the former value was not already loaded..(link)

        References: #2477

      • [orm] [bug] Fixed bug in 0.7.6 introduced by whereby column_mapped_collection used against columns that were mapped as joins or other indirect selectables would fail to function.(link)

        References: #2409

      • [orm] [bug] Fixed bug whereby polymorphic_on column that’s not otherwise mapped on the class would be incorrectly included in a merge() operation, raising an error.(link)

        References: #2449

      • [orm] [bug] Fixed bug in expression annotation mechanics which could lead to incorrect rendering of SELECT statements with aliases and joins, particularly when using column_property().(link)

        References: #2453

      • [orm] [bug] Fixed bug which would prevent OrderingList from being pickleable. Courtesy Jeff Dairiki(link)

        References: #2454

      • [orm] [bug] Fixed bug in relationship comparisons whereby calling unimplemented methods like SomeClass.somerelationship.like() would produce a recursion overflow, instead of NotImplementedError.(link)

      sql

      • [sql] [feature] Added new connection event dbapi_error(). Is called for all DBAPI-level errors passing the original DBAPI exception before SQLAlchemy modifies the state of the cursor.(link)

      • [sql] [bug] Removed warning when Index is created with no columns; while this might not be what the user intended, it is a valid use case as an Index could be a placeholder for just an index of a certain name.(link)

      • [sql] [bug] If conn.begin() fails when calling “with engine.begin()”, the newly acquired Connection is closed explicitly before propagating the exception onward normally.(link)

      • [sql] [bug] Add BINARY, VARBINARY to types.__all__.(link)

        References: #2474

      postgresql

      • [postgresql] [feature] Added new for_update/with_lockmode() options for Postgresql: for_update=”read”/ with_lockmode(“read”), for_update=”read_nowait”/ with_lockmode(“read_nowait”). These emit “FOR SHARE” and “FOR SHARE NOWAIT”, respectively. Courtesy Diana Clarke(link)

        References: #2445

      • [postgresql] [bug] removed unnecessary table clause when reflecting domains.(link)

        References: #2473

      mysql

      • [mysql] [bug] Fixed bug whereby column name inside of “KEY” clause for autoincrement composite column with InnoDB would double quote a name that’s a reserved word. Courtesy Jeff Dairiki.(link)

        References: #2460

      • [mysql] [bug] Fixed bug whereby get_view_names() for “information_schema” schema would fail to retrieve views marked as “SYSTEM VIEW”. courtesy Matthew Turland.(link)

      • [mysql] [bug] Fixed bug whereby if cast() is used on a SQL expression whose type is not supported by cast() and therefore CAST isn’t rendered by the dialect, the order of evaluation could change if the casted expression required that it be grouped; grouping is now applied to those expressions.(link)

        References: #2467

      sqlite

      • [sqlite] [feature] Added SQLite execution option “sqlite_raw_colnames=True”, will bypass attempts to remove ”.” from column names returned by SQLite cursor.description.(link)

        References: #2475

      • [sqlite] [bug] When the primary key column of a Table is replaced, such as via extend_existing, the “auto increment” column used by insert() constructs is reset. Previously it would remain referring to the previous primary key column.(link)

        References: #2525

      mssql

      • [mssql] [feature] Added interim create_engine flag supports_unicode_binds to PyODBC dialect, to force whether or not the dialect passes Python unicode literals to PyODBC or not.(link)

      • [mssql] [bug] Repaired the use_scope_identity create_engine() flag when using the pyodbc dialect. Previously this flag would be ignored if set to False. When set to False, you’ll get “SELECT @@identity” after each INSERT to get at the last inserted ID, for those tables which have “implicit_returning” set to False.(link)

      • [mssql] [bug] UPDATE..FROM syntax with SQL Server requires that the updated table be present in the FROM clause when an alias of that table is also present in the FROM clause. The updated table is now always present in the FROM, when FROM is present in the first place. Courtesy sayap.(link)

        References: #2468

      0.7.6

      Released: Wed Mar 14 2012

      orm

      • [orm] [feature] Added “no_autoflush” context manager to Session, used with with: will temporarily disable autoflush.(link)

      • [orm] [feature] Added cte() method to Query, invokes common table expression support from the Core (see below).(link)

        References: #1859

      • [orm] [feature] Added the ability to query for Table-bound column names when using query(sometable).filter_by(colname=value).(link)

        References: #2400

      • [orm] [bug] Fixed event registration bug which would primarily show up as events not being registered with sessionmaker() instances created after the event was associated with the Session class.(link)

        References: #2424

      • [orm] [bug] Fixed bug whereby a primaryjoin condition with a “literal” in it would raise an error on compile with certain kinds of deeply nested expressions which also needed to render the same bound parameter name more than once.(link)

        References: #2425

      • [orm] [bug] Removed the check for number of rows affected when doing a multi-delete against mapped objects. If an ON DELETE CASCADE exists between two rows, we can’t get an accurate rowcount from the DBAPI; this particular count is not supported on most DBAPIs in any case, MySQLdb is the notable case where it is.(link)

        References: #2403

      • [orm] [bug] Fixed bug whereby objects using attribute_mapped_collection or column_mapped_collection could not be pickled.(link)

        References: #2409

      • [orm] [bug] Fixed bug whereby MappedCollection would not get the appropriate collection instrumentation if it were only used in a custom subclass that used @collection.internally_instrumented.(link)

        References: #2406

      • [orm] [bug] Fixed bug whereby SQL adaption mechanics would fail in a very nested scenario involving joined-inheritance, joinedload(), limit(), and a derived function in the columns clause.(link)

        References: #2419

      • [orm] [bug] Fixed the repr() for CascadeOptions to include refresh-expire. Also reworked CascadeOptions to be a <frozenset>.(link)

        References: #2417

      • [orm] [bug] Improved the “declarative reflection” example to support single-table inheritance, multiple calls to prepare(), tables that are present in alternate schemas, establishing only a subset of classes as reflected.(link)

      • [orm] [bug] Scaled back the test applied within flush() to check for UPDATE against partially NULL PK within one table to only actually happen if there’s really an UPDATE to occur.(link)

        References: #2390

      • [orm] [bug] Fixed bug whereby if a method name conflicted with a column name, a TypeError would be raised when the mapper tried to inspect the __get__() method on the method object.(link)

        References: #2352

      engine

      • [engine] [feature] Added “no_parameters=True” execution option for connections. If no parameters are present, will pass the statement as cursor.execute(statement), thereby invoking the DBAPIs behavior when no parameter collection is present; for psycopg2 and mysql-python, this means not interpreting % signs in the string. This only occurs with this option, and not just if the param list is blank, as otherwise this would produce inconsistent behavior of SQL expressions that normally escape percent signs (and while compiling, can’t know ahead of time if parameters will be present in some cases).(link)

        References: #2407

      • [engine] [feature] Added pool_reset_on_return argument to create_engine, allows control over “connection return” behavior. Also added new arguments ‘rollback’, ‘commit’, None to pool.reset_on_return to allow more control over connection return activity.(link)

        References: #2378

      • [engine] [feature] Added some decent context managers to Engine, Connection:

        with engine.begin() as conn:
            <work with conn in a transaction>

        and:

        with engine.connect() as conn:
            <work with conn>

        Both close out the connection when done, commit or rollback transaction with errors on engine.begin().

        (link)

      • [engine] [bug] Added execution_options() call to MockConnection (i.e., that used with strategy=”mock”) which acts as a pass through for arguments.(link)

      sql

      • [sql] [feature] Added support for SQL standard common table expressions (CTE), allowing SELECT objects as the CTE source (DML not yet supported). This is invoked via the cte() method on any select() construct.(link)

        References: #1859

      • [sql] [bug] Fixed memory leak in core which would occur when C extensions were used with particular types of result fetches, in particular when orm query.count() were called.(link)

        References: #2427

      • [sql] [bug] Fixed issue whereby attribute-based column access on a row would raise AttributeError with non-C version, NoSuchColumnError with C version. Now raises AttributeError in both cases.(link)

        References: #2398

      • [sql] [bug] Added support for using the .key of a Column as a string identifier in a result set row. The .key is currently listed as an “alternate” name for a column, and is superseded by the name of a column which has that key value as its regular name. For the next major release of SQLAlchemy we may reverse this precedence so that .key takes precedence, but this is not decided on yet.(link)

        References: #2392

      • [sql] [bug] A warning is emitted when a not-present column is stated in the values() clause of an insert() or update() construct. Will move to an exception in 0.8.(link)

        References: #2413

      • [sql] [bug] A significant change to how labeling is applied to columns in SELECT statements allows “truncated” labels, that is label names that are generated in Python which exceed the maximum identifier length (note this is configurable via label_length on create_engine()), to be properly referenced when rendered inside of a subquery, as well as to be present in a result set row using their original in-Python names.(link)

        References: #2396

      • [sql] [bug] Fixed bug in new “autoload_replace” flag which would fail to preserve the primary key constraint of the reflected table.(link)

        References: #2402

      • [sql] [bug] Index will raise when arguments passed cannot be interpreted as columns or expressions. Will warn when Index is created with no columns at all.(link)

        References: #2380

      mysql

      • [mysql] [feature] Added support for MySQL index and primary key constraint types (i.e. USING) via new mysql_using parameter to Index and PrimaryKeyConstraint, courtesy Diana Clarke.(link)

        References: #2386

      • [mysql] [feature] Added support for the “isolation_level” parameter to all MySQL dialects. Thanks to mu_mind for the patch here.(link)

        References: #2394

      sqlite

      • [sqlite] [bug] Fixed bug in C extensions whereby string format would not be applied to a Numeric value returned as integer; this affected primarily SQLite which does not maintain numeric scale settings.(link)

        References: #2432

      mssql

      • [mssql] [feature] Added support for MSSQL INSERT, UPDATE, and DELETE table hints, using new with_hint() method on UpdateBase.(link)

        References: #2430

      oracle

      • [oracle] [feature] Added a new create_engine() flag coerce_to_decimal=False, disables the precision numeric handling which can add lots of overhead by converting all numeric values to Decimal.(link)

        References: #2399

      • [oracle] [bug] Added missing compilation support for LONG(link)

        References: #2401

      • [oracle] [bug] Added ‘LEVEL’ to the list of reserved words for Oracle.(link)

        References: #2435

      misc

      • [bug] [examples] Altered _params_from_query() function in Beaker example to pull bindparams from the fully compiled statement, as a quick means to get everything including subqueries in the columns clause, etc.(link)

      0.7.5

      Released: Sat Jan 28 2012

      orm

      • [orm] [feature] Added “class_registry” argument to declarative_base(). Allows two or more declarative bases to share the same registry of class names.(link)

      • [orm] [feature] query.filter() accepts multiple criteria which will join via AND, i.e. query.filter(x==y, z>q, ...)(link)

      • [orm] [feature] Added new capability to relationship loader options to allow “default” loader strategies. Pass ‘*’ to any of joinedload(), lazyload(), subqueryload(), or noload() and that becomes the loader strategy used for all relationships, except for those explicitly stated in the Query. Thanks to up-and-coming contributor Kent Bower for an exhaustive and well written test suite !(link)

        References: #2351

      • [orm] [feature] New declarative reflection example added, illustrates how best to mix table reflection with declarative as well as uses some new features from.(link)

        References: #2356

      • [orm] [bug] Fixed issue where modified session state established after a failed flush would be committed as part of the subsequent transaction that begins automatically after manual call to rollback(). The state of the session is checked within rollback(), and if new state is present, a warning is emitted and restore_snapshot() is called a second time, discarding those changes.(link)

        References: #2389

      • [orm] [bug] Fixed regression from 0.7.4 whereby using an already instrumented column from a superclass as “polymorphic_on” failed to resolve the underlying Column.(link)

        References: #2345

      • [orm] [bug] Raise an exception if xyzload_all() is used inappropriately with two non-connected relationships.(link)

        References: #2370

      • [orm] [bug] Fixed bug whereby event.listen(SomeClass) forced an entirely unnecessary compile of the mapper, making events very hard to set up at module import time (nobody noticed this ??)(link)

        References: #2367

      • [orm] [bug] Fixed bug whereby hybrid_property didn’t work as a kw arg in any(), has().(link)

      • [orm] [bug] ensure pickleability of all ORM exceptions for multiprocessing compatibility.(link)

        References: #2371

      • [orm] [bug] implemented standard “can’t set attribute” / “can’t delete attribute” AttributeError when setattr/delattr used on a hybrid that doesn’t define fset or fdel.(link)

        References: #2353

      • [orm] [bug] Fixed bug where unpickled object didn’t have enough of its state set up to work correctly within the unpickle() event established by the mutable object extension, if the object needed ORM attribute access within __eq__() or similar.(link)

        References: #2362

      • [orm] [bug] Fixed bug where “merge” cascade could mis-interpret an unloaded attribute, if the load_on_pending flag were used with relationship(). Thanks to Kent Bower for tests.(link)

        References: #2374

      • [orm] Fixed regression from 0.6 whereby if “load_on_pending” relationship() flag were used where a non-“get()” lazy clause needed to be emitted on a pending object, it would fail to load.(link)

      engine

      • [engine] [bug] Added __reduce__ to StatementError, DBAPIError, column errors so that exceptions are pickleable, as when using multiprocessing. However, not all DBAPIs support this yet, such as psycopg2.(link)

        References: #2371

      • [engine] [bug] Improved error messages when a non-string or invalid string is passed to any of the date/time processors used by SQLite, including C and Python versions.(link)

        References: #2382

      • [engine] [bug] Fixed bug whereby a table-bound Column object named “<a>_<b>” which matched a column labeled as “<tablename>_<colname>” could match inappropriately when targeting in a result set row.(link)

        References: #2377

      • [engine] [bug] Fixed bug in “mock” strategy whereby correct DDL visit method wasn’t called, resulting in “CREATE/DROP SEQUENCE” statements being duplicated(link)

        References: #2384

      sql

      • [sql] [feature] New reflection feature “autoload_replace”; when set to False on Table, the Table can be autoloaded without existing columns being replaced. Allows more flexible chains of Table construction/reflection to be constructed, including that it helps with combining Declarative with table reflection. See the new example on the wiki.(link)

        References: #2356

      • [sql] [feature] Added “false()” and “true()” expression constructs to sqlalchemy.sql namespace, though not part of __all__ as of yet.(link)

      • [sql] [feature] Dialect-specific compilers now raise CompileError for all type/statement compilation issues, instead of InvalidRequestError or ArgumentError. The DDL for CREATE TABLE will re-raise CompileError to include table/column information for the problematic column.(link)

        References: #2361

      • [sql] [bug] Improved the API for add_column() such that if the same column is added to its own table, an error is not raised and the constraints don’t get doubled up. Also helps with some reflection/declarative patterns.(link)

        References: #2356

      • [sql] [bug] Fixed issue where the “required” exception would not be raised for bindparam() with required=True, if the statement were given no parameters at all.(link)

        References: #2381

      mysql

      • [mysql] [bug] fixed regexp that filters out warnings for non-reflected “PARTITION” directives, thanks to George Reilly(link)

        References: #2376

      sqlite

      • [sqlite] [bug] the “name” of an FK constraint in SQLite is reflected as “None”, not “0” or other integer value. SQLite does not appear to support constraint naming in any case.(link)

        References: #2364

      • [sqlite] [bug] sql.false() and sql.true() compile to 0 and 1, respectively in sqlite(link)

        References: #2368

      • [sqlite] [bug] removed an erroneous “raise” in the SQLite dialect when getting table names and view names, where logic is in place to fall back to an older version of SQLite that doesn’t have the “sqlite_temp_master” table.(link)

      mssql

      • [mssql] [bug] Adjusted the regexp used in the mssql.TIME type to ensure only six digits are received for the “microseconds” portion of the value, which is expected by Python’s datetime.time(). Note that support for sending microseconds doesn’t seem to be possible yet with pyodbc at least.(link)

        References: #2340

      • [mssql] [bug] Dropped the “30 char” limit on pymssql, based on reports that it’s doing things better these days. pymssql hasn’t been well tested and as the DBAPI is in flux it’s still not clear what the status is on this driver and how SQLAlchemy’s implementation should adapt.(link)

        References: #2347

      oracle

      • [oracle] [bug] Added ORA-03135 to the never ending list of oracle “connection lost” errors(link)

        References: #2388

      misc

      • [feature] [examples] Simplified the versioning example a bit to use a declarative mixin as well as an event listener, instead of a metaclass + SessionExtension.(link)

        References: #2313

      • [bug] [core] Changed LRUCache, used by the mapper to cache INSERT/UPDATE/DELETE statements, to use an incrementing counter instead of a timestamp to track entries, for greater reliability versus using time.time(), which can cause test failures on some platforms.(link)

        References: #2379

      • [bug] [core] Added a boolean check for the “finalize” function within the pool connection proxy’s weakref callback before calling it, so that a warning isn’t emitted that this function is None when the application is exiting and gc has removed the function from the module before the weakref callback was invoked.(link)

        References: #2383

      • [bug] [py3k] Fixed inappropriate usage of util.py3k flag and renamed it to util.py3k_warning, since this flag is intended to detect the -3 flag series of import restrictions only.(link)

        References: #2348

      • [bug] [examples] Fixed large_collection.py to close the session before dropping tables.(link)

        References: #2346

      0.7.4

      Released: Fri Dec 09 2011

      orm

      • [orm] [feature] polymorphic_on now accepts many new kinds of values:

        • standalone expressions that aren’t otherwise mapped
        • column_property() objects
        • string names of any column_property() or attribute name of a mapped Column

        The docs include an example using the case() construct, which is likely to be a common constructed used here. and part of

        Standalone expressions in polymorphic_on propagate to single-table inheritance subclasses so that they are used in the WHERE /JOIN clause to limit rows to that subclass as is the usual behavior.

        (link)

        References: #2345, #2238

      • [orm] [feature] IdentitySet supports the - operator as the same as difference(), handy when dealing with Session.dirty etc.(link)

        References: #2301

      • [orm] [feature] Added new value for Column autoincrement called “ignore_fk”, can be used to force autoincrement on a column that’s still part of a ForeignKeyConstraint. New example in the relationship docs illustrates its use.(link)

      • [orm] [bug] Fixed backref behavior when “popping” the value off of a many-to-one in response to a removal from a stale one-to-many - the operation is skipped, since the many-to-one has since been updated.(link)

        References: #2315

      • [orm] [bug] After some years of not doing this, added more granularity to the “is X a parent of Y” functionality, which is used when determining if the FK on “Y” needs to be “nulled out” as well as if “Y” should be deleted with delete-orphan cascade. The test now takes into account the Python identity of the parent as well its identity key, to see if the last known parent of Y is definitely X. If a decision can’t be made, a StaleDataError is raised. The conditions where this error is raised are fairly rare, requiring that the previous parent was garbage collected, and previously could very well inappropriately update/delete a record that’s since moved onto a new parent, though there may be some cases where “silent success” occurred previously that will now raise in the face of ambiguity. Expiring “Y” resets the “parent” tracker, meaning X.remove(Y) could then end up deleting Y even if X is stale, but this is the same behavior as before; it’s advised to expire X also in that case.(link)

        References: #2264

      • [orm] [bug] fixed inappropriate evaluation of user-mapped object in a boolean context within query.get(). Also in 0.6.9.(link)

        References: #2310

      • [orm] [bug] Added missing comma to PASSIVE_RETURN_NEVER_SET symbol(link)

        References: #2304

      • [orm] [bug] Cls.column.collate(“some collation”) now works. Also in 0.6.9(link)

        References: #1776

      • [orm] [bug] the value of a composite attribute is now expired after an insert or update operation, instead of regenerated in place. This ensures that a column value which is expired within a flush will be loaded first, before the composite is regenerated using that value.(link)

        References: #2309

      • [orm] [bug] The fix in also emits the “refresh” event when the composite value is loaded on access, even if all column values were already present, as is appropriate. This fixes the “mutable” extension which relies upon the “load” event to ensure the _parents dictionary is up to date, fixes. Thanks to Scott Torborg for the test case here.(link)

        References: #2309, #2308

      • [orm] [bug] Fixed bug whereby a subclass of a subclass using concrete inheritance in conjunction with the new ConcreteBase or AbstractConcreteBase would fail to apply the subclasses deeper than one level to the “polymorphic loader” of each base(link)

        References: #2312

      • [orm] [bug] Fixed bug whereby a subclass of a subclass using the new AbstractConcreteBase would fail to acquire the correct “base_mapper” attribute when the “base” mapper was generated, thereby causing failures later on.(link)

        References: #2312

      • [orm] [bug] Fixed bug whereby column_property() created against ORM-level column could be treated as a distinct entity when producing certain kinds of joined-inh joins.(link)

        References: #2316

      • [orm] [bug] Fixed the error formatting raised when a tuple is inadvertently passed to session.query(). Also in 0.6.9.(link)

        References: #2297

      • [orm] [bug] Calls to query.join() to a single-table inheritance subclass are now tracked, and are used to eliminate the additional WHERE.. IN criterion normally tacked on with single table inheritance, since the join should accommodate it. This allows OUTER JOIN to a single table subclass to produce the correct results, and overall will produce fewer WHERE criterion when dealing with single table inheritance joins.(link)

        References: #2328

      • [orm] [bug] __table_args__ can now be passed as an empty tuple as well as an empty dict.. Thanks to Fayaz Yusuf Khan for the patch.(link)

        References: #2339

      • [orm] [bug] Updated warning message when setting delete-orphan without delete to no longer refer to 0.6, as we never got around to upgrading this to an exception. Ideally this might be better as an exception but it’s not critical either way.(link)

        References: #2325

      • [orm] [bug] Fixed bug in get_history() when referring to a composite attribute that has no value; added coverage for get_history() regarding composites which is otherwise just a userland function.(link)

      engine

      • [engine] [bug] Fixed bug whereby transaction.rollback() would throw an error on an invalidated connection if the transaction were a two-phase or savepoint transaction. For plain transactions, rollback() is a no-op if the connection is invalidated, so while it wasn’t 100% clear if it should be a no-op, at least now the interface is consistent.(link)

        References: #2317

      sql

      • [sql] [feature] The update() construct can now accommodate multiple tables in the WHERE clause, which will render an “UPDATE..FROM” construct, recognized by Postgresql and MSSQL. When compiled on MySQL, will instead generate “UPDATE t1, t2, ..”. MySQL additionally can render against multiple tables in the SET clause, if Column objects are used as keys in the “values” parameter or generative method.(link)

        References: #2166, #1944

      • [sql] [feature] Added accessor to types called “python_type”, returns the rudimentary Python type object for a particular TypeEngine instance, if known, else raises NotImplementedError.(link)

        References: #77

      • [sql] [bug] related to, made some adjustments to the change from regarding the “from” list on a select(). The _froms collection is no longer memoized, as this simplifies various use cases and removes the need for a “warning” if a column is attached to a table after it was already used in an expression - the select() construct will now always produce the correct expression. There’s probably no real-world performance hit here; select() objects are almost always made ad-hoc, and systems that wish to optimize the re-use of a select() would be using the “compiled_cache” feature. A hit which would occur when calling select.bind has been reduced, but the vast majority of users shouldn’t be using “bound metadata” anyway :).(link)

        References: #2316, #2261

      • [sql] [bug] further tweak to the fix from, so that generative methods work a bit better off of cloned (this is almost a non-use case though). In particular this allows with_only_columns() to behave more consistently. Added additional documentation to with_only_columns() to clarify expected behavior, which changed as a result of.(link)

        References: #2261, #2319

      schema

      • [schema] [feature] Added new support for remote “schemas”:(link)

      • [schema] [feature] The “extend_existing” flag on Table now allows for the reflection process to take effect for a Table object that’s already been defined; when autoload=True and extend_existing=True are both set, the full set of columns will be reflected from the Table which will then overwrite those columns already present, rather than no activity occurring. Columns that are present directly in the autoload run will be used as always, however.(link)

        References: #1410

      • [schema] [bug] Fixed bug whereby TypeDecorator would return a stale value for _type_affinity, when using a TypeDecorator that “switches” types, like the CHAR/UUID type.(link)

      • [schema] [bug] Fixed bug whereby “order_by=’foreign_key’” option to Inspector.get_table_names wasn’t implementing the sort properly, replaced with the existing sort algorithm(link)

      • [schema] [bug] the “name” of a column-level CHECK constraint, if present, is now rendered in the CREATE TABLE statement using “CONSTRAINT <name> CHECK <expression>”.(link)

        References: #2305

      • [schema] MetaData() accepts “schema” and “quote_schema” arguments, which will be applied to the same-named arguments of a Table or Sequence which leaves these at their default of None.(link)

      • [schema] Sequence accepts “quote_schema” argument(link)

      • [schema] tometadata() for Table will use the “schema” of the incoming MetaData for the new Table if the schema argument is explicitly “None”(link)

      • [schema] Added CreateSchema and DropSchema DDL constructs - these accept just the string name of a schema and a “quote” flag.(link)

      • [schema] When using default “schema” with MetaData, ForeignKey will also assume the “default” schema when locating remote table. This allows the “schema” argument on MetaData to be applied to any set of Table objects that otherwise don’t have a “schema”.(link)

      • [schema] a “has_schema” method has been implemented on dialect, but only works on Postgresql so far. Courtesy Manlio Perillo.(link)

        References: #1679

      postgresql

      • [postgresql] [feature] Added create_type constructor argument to pg.ENUM. When False, no CREATE/DROP or checking for the type will be performed as part of a table create/drop event; only the create()/drop)() methods called directly will do this. Helps with Alembic “offline” scripts.(link)

      • [postgresql] [bug] Postgresql dialect memoizes that an ENUM of a particular name was processed during a create/drop sequence. This allows a create/drop sequence to work without any calls to “checkfirst”, and also means with “checkfirst” turned on it only needs to check for the ENUM once.(link)

        References: #2311

      mysql

      • [mysql] [bug] Unicode adjustments allow latest pymysql (post 0.4) to pass 100% on Python 2.(link)

      mssql

      • [mssql] [feature] lifted the restriction on SAVEPOINT for SQL Server. All tests pass using it, it’s not known if there are deeper issues however.(link)

        References: #822

      • [mssql] [bug] repaired the with_hint() feature which wasn’t implemented correctly on MSSQL - usually used for the “WITH (NOLOCK)” hint (which you shouldn’t be using anyway ! use snapshot isolation instead :) )(link)

        References: #2336

      • [mssql] [bug] use new pyodbc version detection for _need_decimal_fix option.(link)

        References: #2318

      • [mssql] [bug] don’t cast “table name” as NVARCHAR on SQL Server 2000. Still mostly in the dark what incantations are needed to make PyODBC work fully with FreeTDS 0.91 here, however.(link)

        References: #2343

      • [mssql] [bug] Decode incoming values when retrieving list of index names and the names of columns within those indexes.(link)

        References: #2269

      misc

      • [feature] [ext] Added an example to the hybrid docs of a “transformer” - a hybrid that returns a query-transforming callable in combination with a custom comparator. Uses a new method on Query called with_transformation(). The use case here is fairly experimental, but only adds one line of code to Query.(link)

      • [bug] [pyodbc] pyodbc-based dialects now parse the pyodbc accurately as far as observed pyodbc strings, including such gems as “py3-3.0.1-beta4”(link)

        References: #2318

      • [bug] [ext] the @compiles decorator raises an informative error message when no “default” compilation handler is present, rather than KeyError.(link)

      • [bug] [examples] Fixed bug in history_meta.py example where the “unique” flag was not removed from a single-table-inheritance subclass which generates columns to put up onto the base.(link)

      0.7.3

      Released: Sun Oct 16 2011

      general

      • [general] Adjusted the “importlater” mechanism, which is used internally to resolve import cycles, such that the usage of __import__ is completed when the import of sqlalchemy or sqlalchemy.orm is done, thereby avoiding any usage of __import__ after the application starts new threads, fixes. Also in 0.6.9.(link)

        References: #2279

      orm

      • [orm] Improved query.join() such that the “left” side can more flexibly be a non-ORM selectable, such as a subquery. A selectable placed in select_from() will now be used as the left side, favored over implicit usage of a mapped entity. If the join still fails based on lack of foreign keys, the error message includes this detail. Thanks to brianrhude on IRC for the test case.(link)

        References: #2298

      • [orm] Added after_soft_rollback() Session event. This event fires unconditionally whenever rollback() is called, regardless of if an actual DBAPI level rollback occurred. This event is specifically designed to allow operations with the Session to proceed after a rollback when the Session.is_active is True.(link)

        References: #2241

      • [orm] added “adapt_on_names” boolean flag to orm.aliased() construct. Allows an aliased() construct to link the ORM entity to a selectable that contains aggregates or other derived forms of a particular attribute, provided the name is the same as that of the entity mapped column.(link)

      • [orm] Added new flag expire_on_flush=False to column_property(), marks those properties that would otherwise be considered to be “readonly”, i.e. derived from SQL expressions, to retain their value after a flush has occurred, including if the parent object itself was involved in an update.(link)

      • [orm] Enhanced the instrumentation in the ORM to support Py3K’s new argument style of “required kw arguments”, i.e. fn(a, b, *, c, d), fn(a, b, *args, c, d). Argument signatures of mapped object’s __init__ method will be preserved, including required kw rules.(link)

        References: #2237

      • [orm] Fixed bug in unit of work whereby detection of “cycles” among classes in highly interlinked patterns would not produce a deterministic result; thereby sometimes missing some nodes that should be considered cycles and causing further issues down the road. Note this bug is in 0.6 also; not backported at the moment.(link)

        References: #2282

      • [orm] Fixed a variety of synonym()-related regressions from 0.6:

        • making a synonym against a synonym now works.
        • synonyms made against a relationship() can be passed to query.join(), options sent to query.options(), passed by name to query.with_parent().
        (link)

      • [orm] Fixed bug whereby mapper.order_by attribute would be ignored in the “inner” query within a subquery eager load. . Also in 0.6.9.(link)

        References: #2287

      • [orm] Identity map .discard() uses dict.pop(,None) internally instead of “del” to avoid KeyError/warning during a non-determinate gc teardown(link)

        References: #2267

      • [orm] Fixed regression in new composite rewrite where deferred=True option failed due to missing import(link)

        References: #2253

      • [orm] Reinstated “comparator_factory” argument to composite(), removed when 0.7 was released.(link)

        References: #2248

      • [orm] Fixed bug in query.join() which would occur in a complex multiple-overlapping path scenario, where the same table could be joined to twice. Thanks much to Dave Vitek for the excellent fix here.(link)

        References: #2247

      • [orm] Query will convert an OFFSET of zero when slicing into None, so that needless OFFSET clauses are not invoked.(link)

      • [orm] Repaired edge case where mapper would fail to fully update internal state when a relationship on a new mapper would establish a backref on the first mapper.(link)

      • [orm] Fixed bug whereby if __eq__() was redefined, a relationship many-to-one lazyload would hit the __eq__() and fail. Does not apply to 0.6.9.(link)

        References: #2260

      • [orm] Calling class_mapper() and passing in an object that is not a “type” (i.e. a class that could potentially be mapped) now raises an informative ArgumentError, rather than UnmappedClassError.(link)

        References: #2196

      • [orm] New event hook, MapperEvents.after_configured(). Called after a configure() step has completed and mappers were in fact affected. Theoretically this event is called once per application, unless new mappings are constructed after existing ones have been used already.(link)

      • [orm] When an open Session is garbage collected, the objects within it which remain are considered detached again when they are add()-ed to a new Session. This is accomplished by an extra check that the previous “session_key” doesn’t actually exist among the pool of Sessions.(link)

        References: #2281

      • [orm] New declarative features:

        • __declare_last__() method, establishes an event listener for the class method that will be called when mappers are completed with the final “configure” step.
        • __abstract__ flag. The class will not be mapped at all when this flag is present on the class.
        • New helper classes ConcreteBase, AbstractConcreteBase. Allow concrete mappings using declarative which automatically set up the “polymorphic_union” when the “configure” mapper step is invoked.
        • The mapper itself has semi-private methods that allow the “with_polymorphic” selectable to be assigned to the mapper after it has already been configured.
        (link)

        References: #2239

      • [orm] Declarative will warn when a subclass’ base uses @declared_attr for a regular column - this attribute does not propagate to subclasses.(link)

        References: #2283

      • [orm] The integer “id” used to link a mapped instance with its owning Session is now generated by a sequence generation function rather than id(Session), to eliminate the possibility of recycled id() values causing an incorrect result, no need to check that object actually in the session.(link)

        References: #2280

      • [orm] Behavioral improvement: empty conjunctions such as and_() and or_() will be flattened in the context of an enclosing conjunction, i.e. and_(x, or_()) will produce ‘X’ and not ‘X AND ()’..(link)

        References: #2257

      • [orm] Fixed bug regarding calculation of “from” list for a select() element. The “from” calc is now delayed, so that if the construct uses a Column object that is not yet attached to a Table, but is later associated with a Table, it generates SQL using the table as a FROM. This change impacted fairly deeply the mechanics of how the FROM list as well as the “correlates” collection is calculated, as some “clause adaption” schemes (these are used very heavily in the ORM) were relying upon the fact that the “froms” collection would typically be cached before the adaption completed. The rework allows it such that the “froms” collection can be cleared and re-generated at any time.(link)

        References: #2261

      • [orm] Fixed bug whereby with_only_columns() method of Select would fail if a selectable were passed.. Also in 0.6.9.(link)

        References: #2270

      engine

      • [engine] The recreate() method in all pool classes uses self.__class__ to get at the type of pool to produce, in the case of subclassing. Note there’s no usual need to subclass pools.(link)

        References: #2254

      • [engine] Improvement to multi-param statement logging, long lists of bound parameter sets will be compressed with an informative indicator of the compression taking place. Exception messages use the same improved formatting.(link)

        References: #2243

      • [engine] Added optional “sa_pool_key” argument to pool.manage(dbapi).connect() so that serialization of args is not necessary.(link)

      • [engine] The entry point resolution supported by create_engine() now supports resolution of individual DBAPI drivers on top of a built-in or entry point-resolved dialect, using the standard ‘+’ notation - it’s converted to a ‘.’ before being resolved as an entry point.(link)

        References: #2286

      • [engine] Added an exception catch + warning for the “return unicode detection” step within connect, allows databases that crash on NVARCHAR to continue initializing, assuming no NVARCHAR type implemented.(link)

        References: #2299

      schema

      • [schema] Modified Column.copy() to use _constructor(), which defaults to self.__class__, in order to create the new object. This allows easier support of subclassing Column.(link)

        References: #2284

      • [schema] Added a slightly nicer __repr__() to SchemaItem classes. Note the repr here can’t fully support the “repr is the constructor” idea since schema items can be very deeply nested/cyclical, have late initialization of some things, etc.(link)

        References: #2223

      postgresql

      • [postgresql] Added “postgresql_using” argument to Index(), produces USING clause to specify index implementation for PG. . Thanks to Ryan P. Kelly for the patch.(link)

        References: #2290

      • [postgresql] Added client_encoding parameter to create_engine() when the postgresql+psycopg2 dialect is used; calls the psycopg2 set_client_encoding() method with the value upon connect.(link)

        References: #1839

      • [postgresql] Fixed bug related to whereby the same modified index behavior in PG 9 affected primary key reflection on a renamed column.. Also in 0.6.9.(link)

        References: #2291, #2141

      • [postgresql] Reflection functions for Table, Sequence no longer case insensitive. Names can be differ only in case and will be correctly distinguished.(link)

        References: #2256

      • [postgresql] Use an atomic counter as the “random number” source for server side cursor names; conflicts have been reported in rare cases.(link)

      • [postgresql] Narrowed the assumption made when reflecting a foreign-key referenced table with schema in the current search path; an explicit schema will be applied to the referenced table only if it actually matches that of the referencing table, which also has an explicit schema. Previously it was assumed that “current” schema was synonymous with the full search_path.(link)

        References: #2249

      mysql

      • [mysql] a CREATE TABLE will put the COLLATE option after CHARSET, which appears to be part of MySQL’s arbitrary rules regarding if it will actually work or not. Also in 0.6.9.(link)

        References: #2225

      • [mysql] Added mysql_length parameter to Index construct, specifies “length” for indexes.(link)

        References: #2293

      sqlite

      • [sqlite] Ensured that the same ValueError is raised for illegal date/time/datetime string parsed from the database regardless of whether C extensions are in use or not.(link)

      mssql

      • [mssql] Changes to attempt support of FreeTDS 0.91 with Pyodbc. This includes that string binds are sent as Python unicode objects when FreeTDS 0.91 is detected, and a CAST(? AS NVARCHAR) is used when we detect for a table. However, I’d continue to characterize Pyodbc + FreeTDS 0.91 behavior as pretty crappy, there are still many queries such as used in reflection which cause a core dump on Linux, and it is not really usable at all on OSX, MemoryErrors abound and just plain broken unicode support.(link)

        References: #2273

      • [mssql] The behavior of =/!= when comparing a scalar select to a value will no longer produce IN/NOT IN as of 0.8; this behavior is a little too heavy handed (use in_() if you want to emit IN) and now emits a deprecation warning. To get the 0.8 behavior immediately and remove the warning, a compiler recipe is given at http://www.sqlalchemy.org/docs/07/dialects/mssql.html#scalar-select-comparisons to override the behavior of visit_binary().(link)

        References: #2277

      • [mssql] “0” is accepted as an argument for limit() which will produce “TOP 0”.(link)

        References: #2222

      oracle

      • [oracle] Fixed ReturningResultProxy for zxjdbc dialect.. Regression from 0.6.(link)

        References: #2272

      • [oracle] The String type now generates VARCHAR2 on Oracle which is recommended as the default VARCHAR. Added an explicit VARCHAR2 and NVARCHAR2 to the Oracle dialect as well. Using NVARCHAR still generates “NVARCHAR2” - there is no “NVARCHAR” on Oracle - this remains a slight breakage of the “uppercase types always give exactly that” policy. VARCHAR still generates “VARCHAR”, keeping with the policy. If Oracle were to ever define “VARCHAR” as something different as they claim (IMHO this will never happen), the type would be available.(link)

        References: #2252

      misc

      • [types] Extra keyword arguments to the base Float type beyond “precision” and “asdecimal” are ignored; added a deprecation warning here and additional docs, related to(link)

        References: #2258

      • [ext] SQLSoup will not be included in version 0.8 of SQLAlchemy; while useful, we would like to keep SQLAlchemy itself focused on one ORM usage paradigm. SQLSoup will hopefully soon be superseded by a third party project.(link)

        References: #2262

      • [ext] Added local_attr, remote_attr, attr accessors to AssociationProxy, providing quick access to the proxied attributes at the class level.(link)

        References: #2236

      • [ext] Changed the update() method on association proxy dictionary to use a duck typing approach, i.e. checks for “keys”, to discern between update({}) and update((a, b)). Previously, passing a dictionary that had tuples as keys would be misinterpreted as a sequence.(link)

        References: #2275

      • [examples] Adjusted dictlike-polymorphic.py example to apply the CAST such that it works on PG, other databases. Also in 0.6.9.(link)

        References: #2266

      0.7.2

      Released: Sun Jul 31 2011

      orm

      • [orm] Feature enhancement: joined and subquery loading will now traverse already-present related objects and collections in search of unpopulated attributes throughout the scope of the eager load being defined, so that the eager loading that is specified via mappings or query options unconditionally takes place for the full depth, populating whatever is not already populated. Previously, this traversal would stop if a related object or collection were already present leading to inconsistent behavior (though would save on loads/cycles for an already-loaded graph). For a subqueryload, this means that the additional SELECT statements emitted by subqueryload will invoke unconditionally, no matter how much of the existing graph is already present (hence the controversy). The previous behavior of “stopping” is still in effect when a query is the result of an attribute-initiated lazyload, as otherwise an “N+1” style of collection iteration can become needlessly expensive when the same related object is encountered repeatedly. There’s also an as-yet-not-public generative Query method _with_invoke_all_eagers() which selects old/new behavior(link)

        References: #2213

      • [orm] A rework of “replacement traversal” within the ORM as it alters selectables to be against aliases of things (i.e. clause adaption) includes a fix for multiply-nested any()/has() constructs against a joined table structure.(link)

        References: #2195

      • [orm] Fixed bug where query.join() + aliased=True from a joined-inh structure to itself on relationship() with join condition on the child table would convert the lead entity into the joined one inappropriately. Also in 0.6.9.(link)

        References: #2234

      • [orm] Fixed regression from 0.6 where Session.add() against an object which contained None in a collection would raise an internal exception. Reverted this to 0.6’s behavior which is to accept the None but obviously nothing is persisted. Ideally, collections with None present or on append() should at least emit a warning, which is being considered for 0.8.(link)

        References: #2205

      • [orm] Load of a deferred() attribute on an object where row can’t be located raises ObjectDeletedError instead of failing later on; improved the message in ObjectDeletedError to include other conditions besides a simple “delete”.(link)

        References: #2191

      • [orm] Fixed regression from 0.6 where a get history operation on some relationship() based attributes would fail when a lazyload would emit; this could trigger within a flush() under certain conditions. Thanks to the user who submitted the great test for this.(link)

        References: #2224

      • [orm] Fixed bug apparent only in Python 3 whereby sorting of persistent + pending objects during flush would produce an illegal comparison, if the persistent object primary key is not a single integer. Also in 0.6.9(link)

        References: #2228

      • [orm] Fixed bug whereby the source clause used by query.join() would be inconsistent if against a column expression that combined multiple entities together. Also in 0.6.9(link)

        References: #2197

      • [orm] Fixed bug whereby if a mapped class redefined __hash__() or __eq__() to something non-standard, which is a supported use case as SQLA should never consult these, the methods would be consulted if the class was part of a “composite” (i.e. non-single-entity) result set. Also in 0.6.9.(link)

        References: #2215

      • [orm] Added public attribute ”.validators” to Mapper, an immutable dictionary view of all attributes that have been decorated with the @validates decorator. courtesy Stefano Fontanelli(link)

        References: #2240

      • [orm] Fixed subtle bug that caused SQL to blow up if: column_property() against subquery + joinedload + LIMIT + order by the column property() occurred. . Also in 0.6.9(link)

        References: #2188

      • [orm] The join condition produced by with_parent as well as when using a “dynamic” relationship against a parent will generate unique bindparams, rather than incorrectly repeating the same bindparam. . Also in 0.6.9.(link)

        References: #2207

      • [orm] Added the same “columns-only” check to mapper.polymorphic_on as used when receiving user arguments to relationship.order_by, foreign_keys, remote_side, etc.(link)

      • [orm] Fixed bug whereby comparison of column expression to a Query() would not call as_scalar() on the underlying SELECT statement to produce a scalar subquery, in the way that occurs if you called it on Query().subquery().(link)

        References: #2190

      • [orm] Fixed declarative bug where a class inheriting from a superclass of the same name would fail due to an unnecessary lookup of the name in the _decl_class_registry.(link)

        References: #2194

      • [orm] Repaired the “no statement condition” assertion in Query which would attempt to raise if a generative method were called after from_statement() were called.. Also in 0.6.9.(link)

        References: #2199

      engine

      • [engine] Context manager provided by Connection.begin() will issue rollback() if the commit() fails, not just if an exception occurs.(link)

      • [engine] Use urllib.parse_qsl() in Python 2.6 and above, no deprecation warning about cgi.parse_qsl()(link)

        References: #1682

      • [engine] Added mixin class sqlalchemy.ext.DontWrapMixin. User-defined exceptions of this type are never wrapped in StatementException when they occur in the context of a statement execution.(link)

      • [engine] StatementException wrapping will display the original exception class in the message.(link)

      • [engine] Failures on connect which raise dbapi.Error will forward the error to dialect.is_disconnect() and set the “connection_invalidated” flag if the dialect knows this to be a potentially “retryable” condition. Only Oracle ORA-01033 implemented for now.(link)

        References: #2201

      sql

      • [sql] Fixed two subtle bugs involving column correspondence in a selectable, one with the same labeled subquery repeated, the other when the label has been “grouped” and loses itself. Affects.(link)

        References: #2188

      schema

      • [schema] New feature: with_variant() method on all types. Produces an instance of Variant(), a special TypeDecorator which will select the usage of a different type based on the dialect in use.(link)

        References: #2187

      • [schema] Added an informative error message when ForeignKeyConstraint refers to a column name in the parent that is not found. Also in 0.6.9.(link)

      • [schema] Fixed bug whereby adaptation of old append_ddl_listener() function was passing unexpected **kw through to the Table event. Table gets no kws, the MetaData event in 0.6 would get “tables=somecollection”, this behavior is preserved.(link)

        References: #2206

      • [schema] Fixed bug where “autoincrement” detection on Table would fail if the type had no “affinity” value, in particular this would occur when using the UUID example on the site that uses TypeEngine as the “impl”.(link)

      • [schema] Added an improved repr() to TypeEngine objects that will only display constructor args which are positional or kwargs that deviate from the default.(link)

        References: #2209

      postgresql

      • [postgresql] Added new “postgresql_ops” argument to Index, allows specification of PostgreSQL operator classes for indexed columns. Courtesy Filip Zyzniewski.(link)

        References: #2198

      mysql

      • [mysql] Fixed OurSQL dialect to use ansi-neutral quote symbol “’” for XA commands instead of ‘”’. . Also in 0.6.9.(link)

        References: #2186

      sqlite

      • [sqlite] SQLite dialect no longer strips quotes off of reflected default value, allowing a round trip CREATE TABLE to work. This is consistent with other dialects that also maintain the exact form of the default.(link)

        References: #2189

      mssql

      • [mssql] Adjusted the pyodbc dialect such that bound values are passed as bytes and not unicode if the “Easysoft” unix drivers are detected. This is the same behavior as occurs with FreeTDS. Easysoft appears to segfault if Python unicodes are passed under certain circumstances.(link)

      oracle

      • [oracle] Added ORA-00028 to disconnect codes, use cx_oracle _Error.code to get at the code,. Also in 0.6.9.(link)

        References: #2200

      • [oracle] Added ORA-01033 to disconnect codes, which can be caught during a connection event.(link)

        References: #2201

      • [oracle] repaired the oracle.RAW type which did not generate the correct DDL. Also in 0.6.9.(link)

        References: #2220

      • [oracle] added CURRENT to reserved word list. Also in 0.6.9.(link)

        References: #2212

      • [oracle] Fixed bug in the mutable extension whereby if the same type were used twice in one mapping, the attributes beyond the first would not get instrumented.(link)

      • [oracle] Fixed bug in the mutable extension whereby if None or a non-corresponding type were set, an error would be raised. None is now accepted which assigns None to all attributes, illegal values raise ValueError.(link)

      misc

      • [examples] Repaired the examples/versioning test runner to not rely upon SQLAlchemy test libs, nosetests must be run from within examples/versioning to get around setup.cfg breaking it.(link)

      • [examples] Tweak to examples/versioning to pick the correct foreign key in a multi-level inheritance situation.(link)

      • [examples] Fixed the attribute shard example to check for bind param callable correctly in 0.7 style.(link)

      0.7.1

      Released: Sun Jun 05 2011

      general

      • [general] Added a workaround for Python bug 7511 where failure of C extension build does not raise an appropriate exception on Windows 64 bit + VC express(link)

        References: #2184

      orm

      • [orm] “delete-orphan” cascade is now allowed on self-referential relationships - this since SQLA 0.7 no longer enforces “parent with no child” at the ORM level; this check is left up to foreign key nullability. Related to(link)

        References: #1912

      • [orm] Repaired new “mutable” extension to propagate events to subclasses correctly; don’t create multiple event listeners for subclasses either.(link)

        References: #2180

      • [orm] Modify the text of the message which occurs when the “identity” key isn’t detected on flush, to include the common cause that the Column isn’t set up to detect auto-increment correctly;. Also in 0.6.8.(link)

        References: #2170

      • [orm] Fixed bug where transaction-level “deleted” collection wouldn’t be cleared of expunged states, raising an error if they later became transient. Also in 0.6.8.(link)

        References: #2182

      engine

      • [engine] Deprecate schema/SQL-oriented methods on Connection/Engine that were never well known and are redundant: reflecttable(), create(), drop(), text(), engine.func(link)

      • [engine] Adjusted the __contains__() method of a RowProxy result row such that no exception throw is generated internally; NoSuchColumnError() also will generate its message regardless of whether or not the column construct can be coerced to a string.. Also in 0.6.8.(link)

        References: #2178

      sql

      • [sql] Fixed bug whereby metadata.reflect(bind) would close a Connection passed as a bind argument. Regression from 0.6.(link)

      • [sql] Streamlined the process by which a Select determines what’s in it’s ‘.c’ collection. Behaves identically, except that a raw ClauseList() passed to select([]) (which is not a documented case anyway) will now be expanded into its individual column elements instead of being ignored.(link)

      postgresql

      • [postgresql] Some unit test fixes regarding numeric arrays, MATCH operator. A potential floating-point inaccuracy issue was fixed, and certain tests of the MATCH operator only execute within an EN-oriented locale for now. . Also in 0.6.8.(link)

        References: #2175

      mysql

      • [mysql] Unit tests pass 100% on MySQL installed on windows.(link)

      • [mysql] Removed the “adjust casing” step that would fail when reflecting a table on MySQL on windows with a mixed case name. After some experimenting with a windows MySQL server, it’s been determined that this step wasn’t really helping the situation much; MySQL does not return FK names with proper casing on non-windows platforms either, and removing the step at least allows the reflection to act more like it does on other OSes. A warning here has been considered but its difficult to determine under what conditions such a warning can be raised, so punted on that for now - added some docs instead.(link)

        References: #2181

      • [mysql] supports_sane_rowcount will be set to False if using MySQLdb and the DBAPI doesn’t provide the constants.CLIENT module.(link)

      sqlite

      • [sqlite] Accept None from cursor.fetchone() when “PRAGMA read_uncommitted” is called to determine current isolation mode at connect time and default to SERIALIZABLE; this to support SQLite versions pre-3.3.0 that did not have this feature.(link)

        References: #2173

      0.7.0

      Released: Fri May 20 2011

      orm

      • [orm] Fixed regression introduced in 0.7b4 (!) whereby query.options(someoption(“nonexistent name”)) would fail to raise an error. Also added additional error catching for cases where the option would try to build off a column-based element, further fixed up some of the error messages tailored in(link)

        References: #2069

      • [orm] query.count() emits “count(*)” instead of “count(1)”.(link)

        References: #2162

      • [orm] Fine tuning of Query clause adaptation when from_self(), union(), or other “select from myself” operation, such that plain SQL expression elements added to filter(), order_by() etc. which are present in the nested “from myself” query will be adapted in the same way an ORM expression element will, since these elements are otherwise not easily accessible.(link)

        References: #2155

      • [orm] Fixed bug where determination of “self referential” relationship would fail with no workaround for joined-inh subclass related to itself, or joined-inh subclass related to a subclass of that with no cols in the sub-sub class in the join condition. Also in 0.6.8.(link)

        References: #2149

      • [orm] mapper() will ignore non-configured foreign keys to unrelated tables when determining inherit condition between parent and child class, but will raise as usual for unresolved columns and table names regarding the inherited table. This is an enhanced generalization of behavior that was already applied to declarative previously. 0.6.8 has a more conservative version of this which doesn’t fundamentally alter how join conditions are determined.(link)

        References: #2153

      • [orm] It is an error to call query.get() when the given entity is not a single, full class entity or mapper (i.e. a column). This is a deprecation warning in 0.6.8.(link)

        References: #2144

      • [orm] Fixed a potential KeyError which under some circumstances could occur with the identity map, part of(link)

        References: #2148

      • [orm] added Query.with_session() method, switches Query to use a different session.(link)

      • [orm] horizontal shard query should use execution options per connection as per(link)

        References: #2131

      • [orm] a non_primary mapper will inherit the _identity_class of the primary mapper. This so that a non_primary established against a class that’s normally in an inheritance mapping will produce results that are identity-map compatible with that of the primary mapper (also in 0.6.8)(link)

        References: #2151

      • [orm] Fixed the error message emitted for “can’t execute syncrule for destination column ‘q’; mapper ‘X’ does not map this column” to reference the correct mapper. . Also in 0.6.8.(link)

        References: #2163

      • [orm] polymorphic_union() gets a “cast_nulls” option, disables the usage of CAST when it renders the labeled NULL columns.(link)

        References: #1502

      • [orm] polymorphic_union() renders the columns in their original table order, as according to the first table/selectable in the list of polymorphic unions in which they appear. (which is itself an unordered mapping unless you pass an OrderedDict).(link)

      • [orm] Fixed bug whereby mapper mapped to an anonymous alias would fail if logging were used, due to unescaped % sign in the alias name. Also in 0.6.8.(link)

        References: #2171

      sql

      • [sql] Fixed bug whereby nesting a label of a select() with another label in it would produce incorrect exported columns. Among other things this would break an ORM column_property() mapping against another column_property(). . Also in 0.6.8(link)

        References: #2167

      • [sql] Changed the handling in determination of join conditions such that foreign key errors are only considered between the two given tables. That is, t1.join(t2) will report FK errors that involve ‘t1’ or ‘t2’, but anything involving ‘t3’ will be skipped. This affects join(), as well as ORM relationship and inherit condition logic.(link)

      • [sql] Some improvements to error handling inside of the execute procedure to ensure auto-close connections are really closed when very unusual DBAPI errors occur.(link)

      • [sql] metadata.reflect() and reflection.Inspector() had some reliance on GC to close connections which were internally procured, fixed this.(link)

      • [sql] Added explicit check for when Column .name is assigned as blank string(link)

        References: #2140

      • [sql] Fixed bug whereby if FetchedValue was passed to column server_onupdate, it would not have its parent “column” assigned, added test coverage for all column default assignment patterns. also in 0.6.8(link)

        References: #2147

      postgresql

      • [postgresql] Fixed the psycopg2_version parsing in the psycopg2 dialect.(link)

      • [postgresql] Fixed bug affecting PG 9 whereby index reflection would fail if against a column whose name had changed. . Also in 0.6.8.(link)

        References: #2141

      mssql

      • [mssql] Fixed bug in MSSQL dialect whereby the aliasing applied to a schema-qualified table would leak into enclosing select statements. Also in 0.6.8.(link)

        References: #2169

      misc

      • This section documents those changes from 0.7b4 to 0.7.0. For an overview of what’s new in SQLAlchemy 0.7, see http://www.sqlalchemy.org/trac/wiki/07Migration(link)

      • [documentation] Removed the usage of the “collections.MutableMapping” abc from the ext.mutable docs as it was being used incorrectly and makes the example more difficult to understand in any case.(link)

        References: #2152

      • [examples] removed the ancient “polymorphic association” examples and replaced with an updated set of examples that use declarative mixins, “generic_associations”. Each presents an alternative table layout.(link)

      • [ext] Fixed bugs in sqlalchemy.ext.mutable extension where None was not appropriately handled, replacement events were not appropriately handled.(link)

        References: #2143

      0.7.0b4

      Released: Sun Apr 17 2011

      general

      • [general] Changes to the format of CHANGES, this file. The format changes have been applied to the 0.7 releases.(link)

      • [general] The “-declarative” changes will now be listed directly under the “-orm” section, as these are closely related.(link)

      • [general] The 0.5 series changes have been moved to the file CHANGES_PRE_06 which replaces CHANGES_PRE_05.(link)

      • [general] The changelog for 0.6.7 and subsequent within the 0.6 series is now listed only in the CHANGES file within the 0.6 branch. In the 0.7 CHANGES file (i.e. this file), all the 0.6 changes are listed inline within the 0.7 section in which they were also applied (since all 0.6 changes are in 0.7 as well). Changes that apply to an 0.6 version here are noted as are if any differences in implementation/behavior are present.(link)

      orm

      • [orm] Some fixes to “evaulate” and “fetch” evaluation when query.update(), query.delete() are called. The retrieval of records is done after autoflush in all cases, and before update/delete is emitted, guarding against unflushed data present as well as expired objects failing during the evaluation.(link)

        References: #2122

      • [orm] Reworded the exception raised when a flush is attempted of a subclass that is not polymorphic against the supertype.(link)

        References: #2063

      • [orm] Still more wording adjustments when a query option can’t find the target entity. Explain that the path must be from one of the root entities.(link)

      • [orm] Some fixes to the state handling regarding backrefs, typically when autoflush=False, where the back-referenced collection wouldn’t properly handle add/removes with no net change. Thanks to Richard Murri for the test case + patch. (also in 0.6.7).(link)

        References: #2123

      • [orm] Added checks inside the UOW to detect the unusual condition of being asked to UPDATE or DELETE on a primary key value that contains NULL in it.(link)

        References: #2127

      • [orm] Some refinements to attribute history. More changes are pending possibly in 0.8, but for now history has been modified such that scalar history doesn’t have a “side effect” of populating None for a non-present value. This allows a slightly better ability to distinguish between a None set and no actual change, affects as well.(link)

        References: #2127

      • [orm] a “having” clause would be copied from the inside to the outside query if from_self() were used; in particular this would break an 0.7 style count() query. (also in 0.6.7)(link)

        References: #2130

      • [orm] the Query.execution_options() method now passes those options to the Connection rather than the SELECT statement, so that all available options including isolation level and compiled cache may be used.(link)

        References: #2131

      engine

      • [engine] The C extension is now enabled by default on CPython 2.x with a fallback to pure python if it fails to compile.(link)

        References: #2129

      sql

      • [sql] The “compiled_cache” execution option now raises an error when passed to a SELECT statement rather than a Connection. Previously it was being ignored entirely. We may look into having this option work on a per-statement level at some point.(link)

        References: #2131

      • [sql] Restored the “catchall” constructor on the base TypeEngine class, with a deprecation warning. This so that code which does something like Integer(11) still succeeds.(link)

      • [sql] Fixed regression whereby MetaData() coming back from unpickling did not keep track of new things it keeps track of now, i.e. collection of Sequence objects, list of schema names.(link)

        References: #2104

      • [sql] The limit/offset keywords to select() as well as the value passed to select.limit()/offset() will be coerced to integer. (also in 0.6.7)(link)

        References: #2116

      • [sql] fixed bug where “from” clause gathering from an over() clause would be an itertools.chain() and not a list, causing “can only concatenate list” TypeError when combined with other clauses.(link)

      • [sql] Fixed incorrect usage of ”,” in over() clause being placed between the “partition” and “order by” clauses.(link)

        References: #2134

      • [sql] Before/after attach events for PrimaryKeyConstraint now function, tests added for before/after events on all constraint types.(link)

        References: #2105

      • [sql] Added explicit true()/false() constructs to expression lib - coercion rules will intercept “False”/”True” into these constructs. In 0.6, the constructs were typically converted straight to string, which was no longer accepted in 0.7.(link)

        References: #2117

      schema

      • [schema] The ‘useexisting’ flag on Table has been superceded by a new pair of flags ‘keep_existing’ and ‘extend_existing’. ‘extend_existing’ is equivalent to ‘useexisting’ - the existing Table is returned, and additional constructor elements are added. With ‘keep_existing’, the existing Table is returned, but additional constructor elements are not added - these elements are only applied when the Table is newly created.(link)

        References: #2109

      postgresql

      • [postgresql] Psycopg2 for Python 3 is now supported.(link)

      • [postgresql] Fixed support for precision numerics when using pg8000.(link)

        References: #2132

      sqlite

      • [sqlite] Fixed bug where reflection of foreign key created as “REFERENCES <tablename>” without col name would fail. (also in 0.6.7)(link)

        References: #2115

      oracle

      • [oracle] Using column names that would require quotes for the column itself or for a name-generated bind parameter, such as names with special characters, underscores, non-ascii characters, now properly translate bind parameter keys when talking to cx_oracle. (Also in 0.6.7)(link)

        References: #2100

      • [oracle] Oracle dialect adds use_binds_for_limits=False create_engine() flag, will render the LIMIT/OFFSET values inline instead of as binds, reported to modify the execution plan used by Oracle. (Also in 0.6.7)(link)

        References: #2116

      misc

      • [types] REAL has been added to the core types. Supported by Postgresql, SQL Server, MySQL, SQLite. Note that the SQL Server and MySQL versions, which add extra arguments, are also still available from those dialects.(link)

        References: #2081

      • [types] Added @event.listens_for() decorator, given target + event name, applies the decorated function as a listener.(link)

        References: #2106

      • [pool] AssertionPool now stores the traceback indicating where the currently checked out connection was acquired; this traceback is reported within the assertion raised upon a second concurrent checkout; courtesy Gunnlaugur Briem(link)

        References: #2103

      • [pool] The “pool.manage” feature doesn’t use pickle anymore to hash the arguments for each pool.(link)

      • [documentation] Documented SQLite DATE/TIME/DATETIME types. (also in 0.6.7)(link)

        References: #2029

      • [documentation] Fixed mutable extension docs to show the correct type-association methods.(link)

        References: #2118

      0.7.0b3

      Released: Sun Mar 20 2011

      general

      • [general] Lots of fixes to unit tests when run under Pypy (courtesy Alex Gaynor).(link)

      orm

      • [orm] Changed the underlying approach to query.count(). query.count() is now in all cases exactly:

        query.
        from_self(func.count(literal_column(‘1’))). scalar()

        That is, “select count(1) from (<full query>)”. This produces a subquery in all cases, but vastly simplifies all the guessing count() tried to do previously, which would still fail in many scenarios particularly when joined table inheritance and other joins were involved. If the subquery produced for an otherwise very simple count is really an issue, use query(func.count()) as an optimization.

        (link)

        References: #2093

      • [orm] some changes to the identity map regarding rare weakref callbacks during iterations. The mutex has been removed as it apparently can cause a reentrant (i.e. in one thread) deadlock, perhaps when gc collects objects at the point of iteration in order to gain more memory. It is hoped that “dictionary changed during iteration” will be exceedingly rare as iteration methods internally acquire the full list of objects in a single values() call. Note 0.6.7 has a more conservative fix here which still keeps the mutex in place.(link)

        References: #2087

      • [orm] A tweak to the unit of work causes it to order the flush along relationship() dependencies even if the given objects don’t have any inter-attribute references in memory, which was the behavior in 0.5 and earlier, so a flush of Parent/Child with only foreign key/primary key set will succeed. This while still maintaining 0.6 and above’s not generating a ton of useless internal dependency structures within the flush that don’t correspond to state actually within the current flush.(link)

        References: #2082

      • [orm] Improvements to the error messages emitted when querying against column-only entities in conjunction with (typically incorrectly) using loader options, where the parent entity is not fully present.(link)

        References: #2069

      • [orm] Fixed bug in query.options() whereby a path applied to a lazyload using string keys could overlap a same named attribute on the wrong entity. Note 0.6.7 has a more conservative fix to this.(link)

        References: #2098

      orm declarative

      • [declarative] [orm] Arguments in __mapper_args__ that aren’t “hashable” aren’t mistaken for always-hashable, possibly-column arguments. (also in 0.6.7)(link)

        References: #2091

      engine

      • [engine] Fixed AssertionPool regression bug.(link)

        References: #2097

      • [engine] Changed exception raised to ArgumentError when an invalid dialect is specified.(link)

        References: #2060

      sql

      • [sql] Added a fully descriptive error message for the case where Column is subclassed and _make_proxy() fails to make a copy due to TypeError on the constructor. The method _constructor should be implemented in this case.(link)

      • [sql] Added new event “column_reflect” for Table objects. Receives the info dictionary about a Column before the object is generated within reflection, and allows modification to the dictionary for control over most aspects of the resulting Column including key, name, type, info dictionary.(link)

        References: #2095

      • [sql] To help with the “column_reflect” event being used with specific Table objects instead of all instances of Table, listeners can be added to a Table object inline with its construction using a new argument “listeners”, a list of tuples of the form (<eventname>, <fn>), which are applied to the Table before the reflection process begins.(link)

      • [sql] Added new generic function “next_value()”, accepts a Sequence object as its argument and renders the appropriate “next value” generation string on the target platform, if supported. Also provides ”.next_value()” method on Sequence itself.(link)

        References: #2085

      • [sql] func.next_value() or other SQL expression can be embedded directly into an insert() construct, and if implicit or explicit “returning” is used in conjunction with a primary key column, the newly generated value will be present in result.inserted_primary_key.(link)

        References: #2084

      • [sql] Added accessors to ResultProxy “returns_rows”, “is_insert” (also in 0.6.7)(link)

        References: #2089

      postgresql

      • [postgresql] Added RESERVED_WORDS for postgresql dialect. (also in 0.6.7)(link)

        References: #2092

      • [postgresql] Fixed the BIT type to allow a “length” parameter, “varying” parameter. Reflection also fixed. (also in 0.6.7)(link)

        References: #2073

      mssql

      • [mssql] Rewrote the query used to get the definition of a view, typically when using the Inspector interface, to use sys.sql_modules instead of the information schema, thereby allowing views definitions longer than 4000 characters to be fully returned. (also in 0.6.7)(link)

        References: #2071

      firebird

      • [firebird] The “implicit_returning” flag on create_engine() is honored if set to False. (also in 0.6.7)(link)

        References: #2083

      misc

      • [informix] Added RESERVED_WORDS informix dialect. (also in 0.6.7)(link)

        References: #2092

      • [ext] The horizontal_shard ShardedSession class accepts the common Session argument “query_cls” as a constructor argument, to enable further subclassing of ShardedQuery. (also in 0.6.7)(link)

        References: #2090

      • [examples] Updated the association, association proxy examples to use declarative, added a new example dict_of_sets_with_default.py, a “pushing the envelope” example of association proxy.(link)

      • [examples] The Beaker caching example allows a “query_cls” argument to the query_callable() function. (also in 0.6.7)(link)

        References: #2090

      0.7.0b2

      Released: Sat Feb 19 2011

      orm

      • [orm] Fixed bug whereby Session.merge() would call the load() event with one too few arguments.(link)

        References: #2053

      • [orm] Added logic which prevents the generation of events from a MapperExtension or SessionExtension from generating do-nothing events for all the methods not overridden.(link)

        References: #2052

      orm declarative

      • [declarative] [orm] Fixed regression whereby composite() with Column objects placed inline would fail to initialize. The Column objects can now be inline with the composite() or external and pulled in via name or object ref.(link)

        References: #2058

      • [declarative] [orm] Fix error message referencing old @classproperty name to reference @declared_attr (also in 0.6.7)(link)

        References: #2061

      • [declarative] [orm] the dictionary at the end of the __table_args__ tuple is now optional.(link)

        References: #1468

      sql

      • [sql] Renamed the EngineEvents event class to ConnectionEvents. As these classes are never accessed directly by end-user code, this strictly is a documentation change for end users. Also simplified how events get linked to engines and connections internally.(link)

        References: #2059

      • [sql] The Sequence() construct, when passed a MetaData() object via its ‘metadata’ argument, will be included in CREATE/DROP statements within metadata.create_all() and metadata.drop_all(), including “checkfirst” logic.(link)

        References: #2055

      • [sql] The Column.references() method now returns True if it has a foreign key referencing the given column exactly, not just it’s parent table.(link)

        References: #2064

      postgresql

      • [postgresql] Fixed regression from 0.6 where SMALLINT and BIGINT types would both generate SERIAL on an integer PK column, instead of SMALLINT and BIGSERIAL(link)

        References: #2065

      misc

      • [ext] Association proxy now has correct behavior for any(), has(), and contains() when proxying a many-to-one scalar attribute to a one-to-many collection (i.e. the reverse of the ‘typical’ association proxy use case)(link)

        References: #2054

      • [examples] Beaker example now takes into account ‘limit’ and ‘offset’, bind params within embedded FROM clauses (like when you use union() or from_self()) when generating a cache key.(link)

      0.7.0b1

      Released: Sat Feb 12 2011

      general

      • [general] New event system, supercedes all extensions, listeners, etc.(link)

        References: #1902

      • [general] Logging enhancements(link)

        References: #1926

      • [general] Setup no longer installs a Nose plugin(link)

        References: #1949

      • [general] The “sqlalchemy.exceptions” alias in sys.modules has been removed. Base SQLA exceptions are available via “from sqlalchemy import exc”. The “exceptions” alias for “exc” remains in “sqlalchemy” for now, it’s just not patched into sys.modules.(link)

      orm

      • [orm] More succinct form of query.join(target, onclause)(link)

        References: #1923

      • [orm] Hybrid Attributes, implements/supercedes synonym()(link)

        References: #1903

      • [orm] Rewrite of composites(link)

        References: #2008

      • [orm] Mutation Event Extension, supercedes “mutable=True”

        (link)

      • [orm] PickleType and ARRAY mutability turned off by default(link)

        References: #1980

      • [orm] Simplified polymorphic_on assignment(link)

        References: #1895

      • [orm] Flushing of Orphans that have no parent is allowed(link)

        References: #1912

      • [orm] Adjusted flush accounting step to occur before the commit in the case of autocommit=True. This allows autocommit=True to work appropriately with expire_on_commit=True, and also allows post-flush session hooks to operate in the same transactional context as when autocommit=False.(link)

        References: #2041

      • [orm] Warnings generated when collection members, scalar referents not part of the flush(link)

        References: #1973

      • [orm] Non-Table-derived constructs can be mapped(link)

        References: #1876

      • [orm] Tuple label names in Query Improved(link)

        References: #1942

      • [orm] Mapped column attributes reference the most specific column first(link)

        References: #1892

      • [orm] Mapping to joins with two or more same-named columns requires explicit declaration(link)

        References: #1896

      • [orm] Mapper requires that polymorphic_on column be present in the mapped selectable(link)

        References: #1875

      • [orm] compile_mappers() renamed configure_mappers(), simplified configuration internals(link)

        References: #1966

      • [orm] the aliased() function, if passed a SQL FromClause element (i.e. not a mapped class), will return element.alias() instead of raising an error on AliasedClass.(link)

        References: #2018

      • [orm] Session.merge() will check the version id of the incoming state against that of the database, assuming the mapping uses version ids and incoming state has a version_id assigned, and raise StaleDataError if they don’t match.(link)

        References: #2027

      • [orm] Session.connection(), Session.execute() accept ‘bind’, to allow execute/connection operations to participate in the open transaction of an engine explicitly.(link)

        References: #1996

      • [orm] Query.join(), Query.outerjoin(), eagerload(), eagerload_all(), others no longer allow lists of attributes as arguments (i.e. option([x, y, z]) form, deprecated since 0.5)(link)

      • [orm] ScopedSession.mapper is removed (deprecated since 0.5).(link)

      • [orm] Horizontal shard query places ‘shard_id’ in context.attributes where it’s accessible by the “load()” event.(link)

        References: #2031

      • [orm] A single contains_eager() call across multiple entities will indicate all collections along that path should load, instead of requiring distinct contains_eager() calls for each endpoint (which was never correctly documented).(link)

        References: #2032

      • [orm] The “name” field used in orm.aliased() now renders in the resulting SQL statement.(link)

      • [orm] Session weak_instance_dict=False is deprecated.(link)

        References: #1473

      • [orm] An exception is raised in the unusual case that an append or similar event on a collection occurs after the parent object has been dereferenced, which prevents the parent from being marked as “dirty” in the session. Was a warning in 0.6.6.(link)

        References: #2046

      • [orm] Query.distinct() now accepts column expressions as *args, interpreted by the Postgresql dialect as DISTINCT ON (<expr>).(link)

        References: #1069

      • [orm] Additional tuning to “many-to-one” relationship loads during a flush(). A change in version 0.6.6 ([ticket:2002]) required that more “unnecessary” m2o loads during a flush could occur. Extra loading modes have been added so that the SQL emitted in this specific use case is trimmed back, while still retrieving the information the flush needs in order to not miss anything.(link)

        References: #2049

      • [orm] the value of “passive” as passed to attributes.get_history() should be one of the constants defined in the attributes package. Sending True or False is deprecated.(link)

      • [orm] Added a name argument to Query.subquery(), to allow a fixed name to be assigned to the alias object. (also in 0.6.7)(link)

        References: #2030

      • [orm] A warning is emitted when a joined-table inheriting mapper has no primary keys on the locally mapped table (but has pks on the superclass table). (also in 0.6.7)(link)

        References: #2019

      • [orm] Fixed bug where “middle” class in a polymorphic hierarchy would have no ‘polymorphic_on’ column if it didn’t also specify a ‘polymorphic_identity’, leading to strange errors upon refresh, wrong class loaded when querying from that target. Also emits the correct WHERE criterion when using single table inheritance. (also in 0.6.7)(link)

        References: #2038

      • [orm] Fixed bug where a column with a SQL or server side default that was excluded from a mapping with include_properties or exclude_properties would result in UnmappedColumnError. (also in 0.6.7)(link)

        References: #1995

      • [orm] A warning is emitted in the unusual case that an append or similar event on a collection occurs after the parent object has been dereferenced, which prevents the parent from being marked as “dirty” in the session. This will be an exception in 0.7. (also in 0.6.7)(link)

        References: #2046

      orm declarative

      • [declarative] [orm] Added an explicit check for the case that the name ‘metadata’ is used for a column attribute on a declarative class. (also in 0.6.7)(link)

        References: #2050

      sql

      • [sql] Added over() function, method to FunctionElement classes, produces the _Over() construct which in turn generates “window functions”, i.e. “<window function> OVER (PARTITION BY <partition by>, ORDER BY <order by>)”.(link)

        References: #1844

      • [sql] LIMIT/OFFSET clauses now use bind parameters(link)

        References: #805

      • [sql] select.distinct() now accepts column expressions as *args, interpreted by the Postgresql dialect as DISTINCT ON (<expr>). Note this was already available via passing a list to the distinct keyword argument to select().(link)

        References: #1069

      • [sql] select.prefix_with() accepts multiple expressions (i.e. *expr), ‘prefix’ keyword argument to select() accepts a list or tuple.(link)

      • [sql] Passing a string to the distinct keyword argument of select() for the purpose of emitting special MySQL keywords (DISTINCTROW etc.) is deprecated - use prefix_with() for this.(link)

      • [sql] TypeDecorator works with primary key columns(link)

        References: #2006, #2005

      • [sql] DDL() constructs now escape percent signs(link)

        References: #1897

      • [sql] Table.c / MetaData.tables refined a bit, don’t allow direct mutation(link)

        References: #1917, #1893

      • [sql] Callables passed to bindparam() don’t get evaluated(link)

        References: #1950

      • [sql] types.type_map is now private, types._type_map(link)

        References: #1870

      • [sql] Non-public Pool methods underscored(link)

        References: #1982

      • [sql] Added NULLS FIRST and NULLS LAST support. It’s implemented as an extension to the asc() and desc() operators, called nullsfirst() and nullslast().(link)

        References: #723

      • [sql] The Index() construct can be created inline with a Table definition, using strings as column names, as an alternative to the creation of the index outside of the Table.(link)

      • [sql] execution_options() on Connection accepts “isolation_level” argument, sets transaction isolation level for that connection only until returned to the connection pool, for thsoe backends which support it (SQLite, Postgresql)(link)

        References: #2001

      • [sql] A TypeDecorator of Integer can be used with a primary key column, and the “autoincrement” feature of various dialects as well as the “sqlite_autoincrement” flag will honor the underlying database type as being Integer-based.(link)

        References: #2005

      • [sql] Established consistency when server_default is present on an Integer PK column. SQLA doesn’t pre-fetch these, nor do they come back in cursor.lastrowid (DBAPI). Ensured all backends consistently return None in result.inserted_primary_key for these. Regarding reflection for this case, reflection of an int PK col with a server_default sets the “autoincrement” flag to False, except in the case of a PG SERIAL col where we detected a sequence default.(link)

        References: #2020, #2021

      • [sql] Result-row processors are applied to pre-executed SQL defaults, as well as cursor.lastrowid, when determining the contents of result.inserted_primary_key.(link)

        References: #2006

      • [sql] Bind parameters present in the “columns clause” of a select are now auto-labeled like other “anonymous” clauses, which among other things allows their “type” to be meaningful when the row is fetched, as in result row processors.(link)

      • [sql] TypeDecorator is present in the “sqlalchemy” import space.(link)

      • [sql] Non-DBAPI errors which occur in the scope of an execute() call are now wrapped in sqlalchemy.exc.StatementError, and the text of the SQL statement and repr() of params is included. This makes it easier to identify statement executions which fail before the DBAPI becomes involved.(link)

        References: #2015

      • [sql] The concept of associating a ”.bind” directly with a ClauseElement has been explicitly moved to Executable, i.e. the mixin that describes ClauseElements which represent engine-executable constructs. This change is an improvement to internal organization and is unlikely to affect any real-world usage.(link)

        References: #2048

      • [sql] Column.copy(), as used in table.tometadata(), copies the ‘doc’ attribute. (also in 0.6.7)(link)

        References: #2028

      • [sql] Added some defs to the resultproxy.c extension so that the extension compiles and runs on Python 2.4. (also in 0.6.7)(link)

        References: #2023

      • [sql] The compiler extension now supports overriding the default compilation of expression._BindParamClause including that the auto-generated binds within the VALUES/SET clause of an insert()/update() statement will also use the new compilation rules. (also in 0.6.7)(link)

        References: #2042

      • [sql] SQLite dialect now uses NullPool for file-based databases(link)

        References: #1921

      • [sql] The path given as the location of a sqlite database is now normalized via os.path.abspath(), so that directory changes within the process don’t affect the ultimate location of a relative file path.(link)

        References: #2036

      postgresql

      • [postgresql] When explicit sequence execution derives the name of the auto-generated sequence of a SERIAL column, which currently only occurs if implicit_returning=False, now accommodates if the table + column name is greater than 63 characters using the same logic Postgresql uses. (also in 0.6.7)(link)

        References: #1083

      • [postgresql] Added an additional libpq message to the list of “disconnect” exceptions, “could not receive data from server” (also in 0.6.7)(link)

        References: #2044

      mysql

      • [mysql] New DBAPI support for pymysql, a pure Python port of MySQL-python.(link)

        References: #1991

      • [mysql] oursql dialect accepts the same “ssl” arguments in create_engine() as that of MySQLdb. (also in 0.6.7)(link)

        References: #2047

      mssql

      • [mssql] the String/Unicode types, and their counterparts VARCHAR/ NVARCHAR, emit “max” as the length when no length is specified, so that the default length, normally ‘1’ as per SQL server documentation, is instead ‘unbounded’. This also occurs for the VARBINARY type..

        This behavior makes these types more closely compatible with Postgresql’s VARCHAR type which is similarly unbounded when no length is specified.

        (link)

        References: #1833

      firebird

      • [firebird] Some adjustments so that Interbase is supported as well. FB/Interbase version idents are parsed into a structure such as (8, 1, 1, ‘interbase’) or (2, 1, 588, ‘firebird’) so they can be distinguished.(link)

        References: #1885

      misc

      SQLAlchemy-0.8.4/doc/changelog/changelog_08.html0000644000076500000240000077455612251147463022106 0ustar classicstaff00000000000000 0.8 Changelog — SQLAlchemy 0.8 Documentation

      SQLAlchemy 0.8 Documentation

      Release: 0.8.4 | Release Date: December 8, 2013

      0.8 Changelog

      0.8.4

      Released: December 8, 2013

      orm

      • [orm] [bug] Fixed a regression introduced by #2818 where the EXISTS query being generated would produce a “columns being replaced” warning for a statement with two same-named columns, as the internal SELECT wouldn’t have use_labels set.(link)

        References: #2818

      engine

      • [engine] [bug] A DBAPI that raises an error on connect() which is not a subclass of dbapi.Error (such as TypeError, NotImplementedError, etc.) will propagate the exception unchanged. Previously, the error handling specific to the connect() routine would both inappropriately run the exception through the dialect’s Dialect.is_disconnect() routine as well as wrap it in a sqlalchemy.exc.DBAPIError. It is now propagated unchanged in the same way as occurs within the execute process.(link)

        References: #2881

      • [engine] [bug] [pool] The QueuePool has been enhanced to not block new connection attempts when an existing connection attempt is blocking. Previously, the production of new connections was serialized within the block that monitored overflow; the overflow counter is now altered within it’s own critical section outside of the connection process itself.(link)

        References: #2880

      • [engine] [bug] [pool] Made a slight adjustment to the logic which waits for a pooled connection to be available, such that for a connection pool with no timeout specified, it will every half a second break out of the wait to check for the so-called “abort” flag, which allows the waiter to break out in case the whole connection pool was dumped; normally the waiter should break out due to a notify_all() but it’s possible this notify_all() is missed in very slim cases. This is an extension of logic first introduced in 0.8.0, and the issue has only been observed occasionally in stress tests.(link)

        References: #2522

      • [engine] [bug] Fixed bug where SQL statement would be improperly ASCII-encoded when a pre-DBAPI StatementError were raised within Connection.execute(), causing encoding errors for non-ASCII statements. The stringification now remains within Python unicode thus avoiding encoding errors.(link)

        References: #2871

      sql

      postgresql

      • [postgresql] [bug] Fixed bug where index reflection would mis-interpret indkey values when using the pypostgresql adapter, which returns these values as lists vs. psycopg2’s return type of string.(link)

        References: #2855

      mssql

      • [mssql] [bug] Fixed bug introduced in 0.8.0 where the DROP INDEX statement for an index in MSSQL would render incorrectly if the index were in an alternate schema; the schemaname/tablename would be reversed. The format has been also been revised to match current MSSQL documentation. Courtesy Derek Harland.(link)

        References: pull request bitbucket:7

      oracle

      • [oracle] [bug] Added ORA-02396 “maximum idle time” error code to list of “is disconnect” codes with cx_oracle.(link)

        References: #2864

      • [oracle] [bug] Fixed bug where Oracle VARCHAR types given with no length (e.g. for a CAST or similar) would incorrectly render None CHAR or similar.(link)

        References: #2870

      misc

      • [bug] [ext] Fixed bug which prevented the serializer extension from working correctly with table or column names that contain non-ASCII characters.(link)

        References: #2869

      0.8.3

      Released: October 26, 2013

      orm

      • [orm] [feature] Added new option to relationship() distinct_target_key. This enables the subquery eager loader strategy to apply a DISTINCT to the innermost SELECT subquery, to assist in the case where duplicate rows are generated by the innermost query which corresponds to this relationship (there’s not yet a general solution to the issue of dupe rows within subquery eager loading, however, when joins outside of the innermost subquery produce dupes). When the flag is set to True, the DISTINCT is rendered unconditionally, and when it is set to None, DISTINCT is rendered if the innermost relationship targets columns that do not comprise a full primary key. The option defaults to False in 0.8 (e.g. off by default in all cases), None in 0.9 (e.g. automatic by default). Thanks to Alexander Koval for help with this.

        See also

        change_2836

        (link)

        References: #2836

      • [orm] [bug] Fixed bug where list instrumentation would fail to represent a setslice of [0:0] correctly, which in particular could occur when using insert(0, item) with the association proxy. Due to some quirk in Python collections, the issue was much more likely with Python 3 rather than 2.(link)

        This change is also backported to: 0.7.11

        References: #2807

      • [orm] [bug] Fixed bug where using an annotation such as remote() or foreign() on a Column before association with a parent Table could produce issues related to the parent table not rendering within joins, due to the inherent copy operation performed by an annotation.(link)

        References: #2813

      • [orm] [bug] Fixed bug where Query.exists() failed to work correctly without any WHERE criterion. Courtesy Vladimir Magamedov.(link)

        References: #2818

      • [orm] [bug] Backported a change from 0.9 whereby the iteration of a hierarchy of mappers used in polymorphic inheritance loads is sorted, which allows the SELECT statements generated for polymorphic queries to have deterministic rendering, which in turn helps with caching schemes that cache on the SQL string itself.(link)

        References: #2779

      • [orm] [bug] Fixed a potential issue in an ordered sequence implementation used by the ORM to iterate mapper hierarchies; under the Jython interpreter this implementation wasn’t ordered, even though cPython and Pypy maintained ordering.(link)

        References: #2794

      • [orm] [bug] Fixed bug in ORM-level event registration where the “raw” or “propagate” flags could potentially be mis-configured in some “unmapped base class” configurations.(link)

        References: #2786

      • [orm] [bug] A performance fix related to the usage of the defer() option when loading mapped entities. The function overhead of applying a per-object deferred callable to an instance at load time was significantly higher than that of just loading the data from the row (note that defer() is meant to reduce DB/network overhead, not necessarily function call count); the function call overhead is now less than that of loading data from the column in all cases. There is also a reduction in the number of “lazy callable” objects created per load from N (total deferred values in the result) to 1 (total number of deferred cols).(link)

        References: #2778

      • [orm] [bug] Fixed bug whereby attribute history functions would fail when an object we moved from “persistent” to “pending” using the make_transient() function, for operations involving collection-based backrefs.(link)

        References: #2773

      orm declarative

      • [feature] [orm] [declarative] Added a convenience class decorator as_declarative(), is a wrapper for declarative_base() which allows an existing base class to be applied using a nifty class-decorated approach.(link)

      engine

      • [engine] [feature] repr() for the URL of an Engine will now conceal the password using asterisks. Courtesy Gunnlaugur Þór Briem.(link)

        References: #2821

      • [engine] [bug] The regexp used by the make_url() function now parses ipv6 addresses, e.g. surrounded by brackets.(link)

        This change is also backported to: 0.7.11

        References: #2851

      • [engine] [bug] [oracle] Dialect.initialize() is not called a second time if an Engine is recreated, due to a disconnect error. This fixes a particular issue in the Oracle 8 dialect, but in general the dialect.initialize() phase should only be once per dialect.(link)

        References: #2776

      • [engine] [bug] [pool] Fixed bug where QueuePool would lose the correct checked out count if an existing pooled connection failed to reconnect after an invalidate or recycle event.(link)

        References: #2772

      sql

      • [sql] [feature] Added new method to the insert() construct Insert.from_select(). Given a list of columns and a selectable, renders INSERT INTO (table) (columns) SELECT ...(link)

        References: #722

      • [sql] [feature] The update(), insert(), and delete() constructs will now interpret ORM entities as target tables to be operated upon, e.g.:

        from sqlalchemy import insert, update, delete
        
        ins = insert(SomeMappedClass).values(x=5)
        
        del_ = delete(SomeMappedClass).where(SomeMappedClass.id == 5)
        
        upd = update(SomeMappedClass).where(SomeMappedClass.id == 5).values(name='ed')
        (link)

      • [sql] [bug] Fixed regression dating back to 0.7.9 whereby the name of a CTE might not be properly quoted if it was referred to in multiple FROM clauses.(link)

        This change is also backported to: 0.7.11

        References: #2801

      • [sql] [bug] [cte] Fixed bug in common table expression system where if the CTE were used only as an alias() construct, it would not render using the WITH keyword.(link)

        This change is also backported to: 0.7.11

        References: #2783

      • [sql] [bug] Fixed bug in CheckConstraint DDL where the “quote” flag from a Column object would not be propagated.(link)

        This change is also backported to: 0.7.11

        References: #2784

      • [sql] [bug] Fixed bug where type_coerce() would not interpret ORM elements with a __clause_element__() method properly.(link)

        References: #2849

      • [sql] [bug] The Enum and Boolean types now bypass any custom (e.g. TypeDecorator) type in use when producing the CHECK constraint for the “non native” type. This so that the custom type isn’t involved in the expression within the CHECK, since this expression is against the “impl” value and not the “decorated” value.(link)

        References: #2842

      • [sql] [bug] The .unique flag on Index could be produced as None if it was generated from a Column that didn’t specify unique (where it defaults to None). The flag will now always be True or False.(link)

        References: #2825

      • [sql] [bug] Fixed bug in default compiler plus those of postgresql, mysql, and mssql to ensure that any literal SQL expression values are rendered directly as literals, instead of as bound parameters, within a CREATE INDEX statement. This also changes the rendering scheme for other DDL such as constraints.(link)

        References: #2742

      • [sql] [bug] A select() that is made to refer to itself in its FROM clause, typically via in-place mutation, will raise an informative error message rather than causing a recursion overflow.(link)

        References: #2815

      • [sql] [bug] Non-working “schema” argument on ForeignKey is deprecated; raises a warning. Removed in 0.9.(link)

        References: #2831

      • [sql] [bug] Fixed bug where using the column_reflect event to change the .key of the incoming Column would prevent primary key constraints, indexes, and foreign key constraints from being correctly reflected.(link)

        References: #2811

      • [sql] [bug] The ColumnOperators.notin_() operator added in 0.8 now properly produces the negation of the expression “IN” returns when used against an empty collection.(link)

      • [sql] [bug] [postgresql] Fixed bug where the expression system relied upon the str() form of a some expressions when referring to the .c collection on a select() construct, but the str() form isn’t available since the element relies on dialect-specific compilation constructs, notably the __getitem__() operator as used with a Postgresql ARRAY element. The fix also adds a new exception class UnsupportedCompilationError which is raised in those cases where a compiler is asked to compile something it doesn’t know how to.(link)

        References: #2780

      postgresql

      • [postgresql] [bug] Removed a 128-character truncation from the reflection of the server default for a column; this code was original from PG system views which truncated the string for readability.(link)

        References: #2844

      • [postgresql] [bug] Parenthesis will be applied to a compound SQL expression as rendered in the column list of a CREATE INDEX statement.(link)

        References: #2742

      • [postgresql] [bug] Fixed bug where Postgresql version strings that had a prefix preceding the words “Postgresql” or “EnterpriseDB” would not parse. Courtesy Scott Schaefer.(link)

        References: #2819

      mysql

      • [mysql] [bug] Updates to MySQL reserved words for versions 5.5, 5.6, courtesy Hanno Schlichting.(link)

        This change is also backported to: 0.7.11

        References: #2791

      • [mysql] [bug] The change in #2721, which is that the deferrable keyword of ForeignKeyConstraint is silently ignored on the MySQL backend, will be reverted as of 0.9; this keyword will now render again, raising errors on MySQL as it is not understood - the same behavior will also apply to the initially keyword. In 0.8, the keywords will remain ignored but a warning is emitted. Additionally, the match keyword now raises a CompileError on 0.9 and emits a warning on 0.8; this keyword is not only silently ignored by MySQL but also breaks the ON UPDATE/ON DELETE options.

        To use a ForeignKeyConstraint that does not render or renders differently on MySQL, use a custom compilation option. An example of this usage has been added to the documentation, see MySQL Foreign Key Options.

        (link)

        References: #2721, #2839

      • [mysql] [bug] MySQL-connector dialect now allows options in the create_engine query string to override those defaults set up in the connect, including “buffered” and “raise_on_warnings”.(link)

        References: #2515

      sqlite

      • [sqlite] [bug] The newly added SQLite DATETIME arguments storage_format and regexp apparently were not fully implemented correctly; while the arguments were accepted, in practice they would have no effect; this has been fixed.(link)

        References: #2781

      oracle

      • [oracle] [bug] Fixed bug where Oracle table reflection using synonyms would fail if the synonym and the table were in different remote schemas. Patch to fix courtesy Kyle Derr.(link)

        References: #2853

      misc

      • [feature] Added a new flag system=True to Column, which marks the column as a “system” column which is automatically made present by the database (such as Postgresql oid or xmin). The column will be omitted from the CREATE TABLE statement but will otherwise be available for querying. In addition, the CreateColumn construct can be appled to a custom compilation rule which allows skipping of columns, by producing a rule that returns None.(link)

      • [feature] [examples] Improved the examples in examples/generic_associations, including that discriminator_on_association.py makes use of single table inheritance do the work with the “discriminator”. Also added a true “generic foreign key” example, which works similarly to other popular frameworks in that it uses an open-ended integer to point to any other table, foregoing traditional referential integrity. While we don’t recommend this pattern, information wants to be free.(link)

      • [bug] [examples] Added “autoincrement=False” to the history table created in the versioning example, as this table shouldn’t have autoinc on it in any case, courtesy Patrick Schmid.(link)

      0.8.2

      Released: July 3, 2013

      orm

      • [orm] [feature] Added a new method Query.select_entity_from() which will in 0.9 replace part of the functionality of Query.select_from(). In 0.8, the two methods perform the same function, so that code can be migrated to use the Query.select_entity_from() method as appropriate. See the 0.9 migration guide for details.(link)

        References: #2736

      • [orm] [bug] A warning is emitted when trying to flush an object of an inherited class where the polymorphic discriminator has been assigned to a value that is invalid for the class.(link)

        References: #2750

      • [orm] [bug] Fixed bug in polymorphic SQL generation where multiple joined-inheritance entities against the same base class joined to each other as well would not track columns on the base table independently of each other if the string of joins were more than two entities long.(link)

        References: #2759

      • [orm] [bug] Fixed bug where sending a composite attribute into Query.order_by() would produce a parenthesized expression not accepted by some databases.(link)

        References: #2754

      • [orm] [bug] Fixed the interaction between composite attributes and the aliased() function. Previously, composite attributes wouldn’t work correctly in comparison operations when aliasing was applied.(link)

        References: #2755

      • [orm] [bug] [ext] Fixed bug where MutableDict didn’t report a change event when clear() was called.(link)

        References: #2730

      • [orm] [bug] Fixed a regression caused by #2682 whereby the evaluation invoked by Query.update() and Query.delete() would hit upon unsupported True and False symbols which now appear due to the usage of IS.(link)

        References: #2737

      • [orm] [bug] Fixed a regression from 0.7 caused by this ticket, which made the check for recursion overflow in self-referential eager joining too loose, missing a particular circumstance where a subclass had lazy=”joined” or “subquery” configured and the load was a “with_polymorphic” against the base.(link)

        References: #2481

      • [orm] [bug] Fixed a regression from 0.7 where the contextmanager feature of Session.begin_nested() would fail to correctly roll back the transaction when a flush error occurred, instead raising its own exception while leaving the session still pending a rollback.(link)

        References: #2718

      orm declarative

      • [feature] [orm] [declarative] ORM descriptors such as hybrid properties can now be referenced by name in a string argument used with order_by, primaryjoin, or similar in relationship(), in addition to column-bound attributes.(link)

        References: #2761

      engine

      • [engine] [bug] Fixed bug where the reset_on_return argument to various Pool implementations would not be propagated when the pool was regenerated. Courtesy Eevee.(link)

        References: pull request github:6

      • [engine] [bug] [sybase] Fixed a bug where the routine to detect the correct kwargs being sent to create_engine() would fail in some cases, such as with the Sybase dialect.(link)

        References: #2732

      sql

      • [sql] [feature] Provided a new attribute for TypeDecorator called TypeDecorator.coerce_to_is_types, to make it easier to control how comparisons using == or != to None and boolean types goes about producing an IS expression, or a plain equality expression with a bound parameter.(link)

        References: #2734, #2744

      • [sql] [bug] Multiple fixes to the correlation behavior of Select constructs, first introduced in 0.8.0:

        • To satisfy the use case where FROM entries should be correlated outwards to a SELECT that encloses another, which then encloses this one, correlation now works across multiple levels when explicit correlation is established via Select.correlate(), provided that the target select is somewhere along the chain contained by a WHERE/ORDER BY/columns clause, not just nested FROM clauses. This makes Select.correlate() act more compatibly to that of 0.7 again while still maintaining the new “smart” correlation.
        • When explicit correlation is not used, the usual “implicit” correlation limits its behavior to just the immediate enclosing SELECT, to maximize compatibility with 0.7 applications, and also prevents correlation across nested FROMs in this case, maintaining compatibility with 0.8.0/0.8.1.
        • The Select.correlate_except() method was not preventing the given FROM clauses from correlation in all cases, and also would cause FROM clauses to be incorrectly omitted entirely (more like what 0.7 would do), this has been fixed.
        • Calling select.correlate_except(None) will enter all FROM clauses into correlation as would be expected.
        (link)

        References: #2668, #2746

      • [sql] [bug] Fixed bug whereby joining a select() of a table “A” with multiple foreign key paths to a table “B”, to that table “B”, would fail to produce the “ambiguous join condition” error that would be reported if you join table “A” directly to “B”; it would instead produce a join condition with multiple criteria.(link)

        References: #2738

      • [sql] [bug] [reflection] Fixed bug whereby using MetaData.reflect() across a remote schema as well as a local schema could produce wrong results in the case where both schemas had a table of the same name.(link)

        References: #2728

      • [sql] [bug] Removed the “not implemented” __iter__() call from the base ColumnOperators class, while this was introduced in 0.8.0 to prevent an endless, memory-growing loop when one also implements a __getitem__() method on a custom operator and then calls erroneously list() on that object, it had the effect of causing column elements to report that they were in fact iterable types which then throw an error when you try to iterate. There’s no real way to have both sides here so we stick with Python best practices. Careful with implementing __getitem__() on your custom operators!(link)

        References: #2726

      • [sql] [bug] [mssql] Regression from this ticket caused the unsupported keyword “true” to render, added logic to convert this to 1/0 for SQL server.(link)

        References: #2682

      postgresql

      • [postgresql] [feature] Support for Postgresql 9.2 range types has been added. Currently, no type translation is provided, so works directly with strings or psycopg2 2.5 range extension types at the moment. Patch courtesy Chris Withers.(link)

      • [postgresql] [feature] Added support for “AUTOCOMMIT” isolation when using the psycopg2 DBAPI. The keyword is available via the isolation_level execution option. Patch courtesy Roman Podolyaka.(link)

        References: #2072

      • [postgresql] [bug] The behavior of extract() has been simplified on the Postgresql dialect to no longer inject a hardcoded ::timestamp or similar cast into the given expression, as this interfered with types such as timezone-aware datetimes, but also does not appear to be at all necessary with modern versions of psycopg2.(link)

        References: #2740

      • [postgresql] [bug] Fixed bug in HSTORE type where keys/values that contained backslashed quotes would not be escaped correctly when using the “non native” (i.e. non-psycopg2) means of translating HSTORE data. Patch courtesy Ryan Kelly.(link)

        References: #2766

      • [postgresql] [bug] Fixed bug where the order of columns in a multi-column Postgresql index would be reflected in the wrong order. Courtesy Roman Podolyaka.(link)

        References: #2767

      • [postgresql] [bug] Fixed the HSTORE type to correctly encode/decode for unicode. This is always on, as the hstore is a textual type, and matches the behavior of psycopg2 when using Python 3. Courtesy Dmitry Mugtasimov.(link)

        References: #2735, pull request github:2

      mysql

      • [mysql] [feature] The mysql_length parameter used with Index can now be passed as a dictionary of column names/lengths, for use with composite indexes. Big thanks to Roman Podolyaka for the patch.(link)

        References: #2704

      • [mysql] [bug] Fixed bug when using multi-table UPDATE where a supplemental table is a SELECT with its own bound parameters, where the positioning of the bound parameters would be reversed versus the statement itself when using MySQL’s special syntax.(link)

        References: #2768

      • [mysql] [bug] Added another conditional to the mysql+gaerdbms dialect to detect so-called “development” mode, where we should use the rdbms_mysqldb DBAPI. Patch courtesy Brett Slatkin.(link)

        References: #2715

      • [mysql] [bug] The deferrable keyword argument on ForeignKey and ForeignKeyConstraint will not render the DEFERRABLE keyword on the MySQL dialect. For a long time we left this in place because a non-deferrable foreign key would act very differently than a deferrable one, but some environments just disable FKs on MySQL, so we’ll be less opinionated here.(link)

        References: #2721

      • [mysql] [bug] Updated mysqlconnector dialect to check for disconnect based on the apparent string message sent in the exception; tested against mysqlconnector 1.0.9.(link)

      sqlite

      mssql

      • [mssql] [bug] When querying the information schema on SQL Server 2000, removed a CAST call that was added in 0.8.1 to help with driver issues, which apparently is not compatible on 2000. The CAST remains in place for SQL Server 2005 and greater.(link)

        References: #2747

      firebird

      • [firebird] [feature] Added new flag retaining=True to the kinterbasdb and fdb dialects. This controls the value of the retaining flag sent to the commit() and rollback() methods of the DBAPI connection. Due to historical concerns, this flag defaults to True in 0.8.2, however in 0.9.0b1 this flag defaults to False.(link)

        References: #2763

      • [firebird] [bug] Type lookup when reflecting the Firebird types LONG and INT64 has been fixed so that LONG is treated as INTEGER, INT64 treated as BIGINT, unless the type has a “precision” in which case it’s treated as NUMERIC. Patch courtesy Russell Stuart.(link)

        References: #2757

      misc

      • [bug] [ext] Fixed bug whereby if a composite type were set up with a function instead of a class, the mutable extension would trip up when it tried to check that column for being a MutableComposite (which it isn’t). Courtesy asldevi.(link)

      • [bug] [examples] Fixed an issue with the “versioning” recipe whereby a many-to-one reference could produce a meaningless version for the target, even though it was not changed, when backrefs were present. Patch courtesy Matt Chisholm.(link)

      • [bug] [examples] Fixed a small bug in the dogpile example where the generation of SQL cache keys wasn’t applying deduping labels to the statement the same way Query normally does.(link)

      • [requirements] The Python mock library is now required in order to run the unit test suite. While part of the standard library as of Python 3.3, previous Python installations will need to install this in order to run unit tests or to use the sqlalchemy.testing package for external dialects.(link)

      0.8.1

      Released: April 27, 2013

      orm

      • [orm] [feature] Added a convenience method to Query that turns a query into an EXISTS subquery of the form EXISTS (SELECT 1 FROM ... WHERE ...).(link)

        References: #2673

      • [orm] [bug] Fixed bug when a query of the form: query(SubClass).options(subqueryload(Baseclass.attrname)), where SubClass is a joined inh of BaseClass, would fail to apply the JOIN inside the subquery on the attribute load, producing a cartesian product. The populated results still tended to be correct as additional rows are just ignored, so this issue may be present as a performance degradation in applications that are otherwise working correctly.(link)

        This change is also backported to: 0.7.11

        References: #2699

      • [orm] [bug] Fixed bug in unit of work whereby a joined-inheritance subclass could insert the row for the “sub” table before the parent table, if the two tables had no ForeignKey constraints set up between them.(link)

        This change is also backported to: 0.7.11

        References: #2689

      • [orm] [bug] Fixes to the sqlalchemy.ext.serializer extension, including that the “id” passed from the pickler is turned into a string to prevent against bytes being parsed on Py3K, as well as that relationship() and orm.join() constructs are now properly serialized.(link)

        References: #2698

      • [orm] [bug] A significant improvement to the inner workings of query.join(), such that the decisionmaking involved on how to join has been dramatically simplified. New test cases now pass such as multiple joins extending from the middle of an already complex series of joins involving inheritance and such. Joining from deeply nested subquery structures is still complicated and not without caveats, but with these improvements the edge cases are hopefully pushed even farther out to the edges.(link)

        References: #2714

      • [orm] [bug] Added a conditional to the unpickling process for ORM mapped objects, such that if the reference to the object were lost when the object was pickled, we don’t erroneously try to set up _sa_instance_state - fixes a NoneType error.(link)

      • [orm] [bug] Fixed bug where many-to-many relationship with uselist=False would fail to delete the association row and raise an error if the scalar attribute were set to None. This was a regression introduced by the changes for #2229.(link)

        References: #2710

      • [orm] [bug] Improved the behavior of instance management regarding the creation of strong references within the Session; an object will no longer have an internal reference cycle created if it’s in the transient state or moves into the detached state - the strong ref is created only when the object is attached to a Session and is removed when the object is detached. This makes it somewhat safer for an object to have a __del__() method, even though this is not recommended, as relationships with backrefs produce cycles too. A warning has been added when a class with a __del__() method is mapped.(link)

        References: #2708

      • [orm] [bug] Fixed bug whereby ORM would run the wrong kind of query when refreshing an inheritance-mapped class where the superclass was mapped to a non-Table object, like a custom join() or a select(), running a query that assumed a hierarchy that’s mapped to individual Table-per-class.(link)

        References: #2697

      • [orm] [bug] Fixed __repr__() on mapper property constructs to work before the object is initialized, so that Sphinx builds with recent Sphinx versions can read them.(link)

      orm declarative

      • [bug] [orm] [declarative] Fixed indirect regression regarding has_inherited_table(), where since it considers the current class’ __table__, was sensitive to when it was called. This is 0.7’s behavior also, but in 0.7 things tended to “work out” within events like __mapper_args__(). has_inherited_table() now only considers superclasses, so should return the same answer regarding the current class no matter when it’s called (obviously assuming the state of the superclass).(link)

        References: #2656

      sql

      • [sql] [feature] Loosened the check on dialect-specific argument names passed to Table(); since we want to support external dialects and also want to support args without a certain dialect being installed, it only checks the format of the arg now, rather than looking for that dialect in sqlalchemy.dialects.(link)

      • [sql] [bug] [mysql] Fully implemented the IS and IS NOT operators with regards to the True/False constants. An expression like col.is_(True) will now render col IS true on the target platform, rather than converting the True/ False constant to an integer bound parameter. This allows the is_() operator to work on MySQL when given True/False constants.(link)

        References: #2682

      • [sql] [bug] A major fix to the way in which a select() object produces labeled columns when apply_labels() is used; this mode produces a SELECT where each column is labeled as in <tablename>_<columnname>, to remove column name collisions for a multiple table select. The fix is that if two labels collide when combined with the table name, i.e. “foo.bar_id” and “foo_bar.id”, anonymous aliasing will be applied to one of the dupes. This allows the ORM to handle both columns independently; previously, 0.7 would in some cases silently emit a second SELECT for the column that was “duped”, and in 0.8 an ambiguous column error would be emitted. The “keys” applied to the .c. collection of the select() will also be deduped, so that the “column being replaced” warning will no longer emit for any select() that specifies use_labels, though the dupe key will be given an anonymous label which isn’t generally user-friendly.(link)

        References: #2702

      • [sql] [bug] Fixed bug where disconnect detect on error would raise an attribute error if the error were being raised after the Connection object had already been closed.(link)

        References: #2691

      • [sql] [bug] Reworked internal exception raises that emit a rollback() before re-raising, so that the stack trace is preserved from sys.exc_info() before entering the rollback. This so that the traceback is preserved when using coroutine frameworks which may have switched contexts before the rollback function returns.(link)

        References: #2703

      • [sql] [bug] [postgresql] The _Binary base type now converts values through the bytes() callable when run on Python 3; in particular psycopg2 2.5 with Python 3.3 seems to now be returning the “memoryview” type, so this is converted to bytes before return.(link)

      • [sql] [bug] Improvements to Connection auto-invalidation handling. If a non-disconnect error occurs, but leads to a delayed disconnect error within error handling (happens with MySQL), the disconnect condition is detected. The Connection can now also be closed when in an invalid state, meaning it will raise “closed” on next usage, and additionally the “close with result” feature will work even if the autorollback in an error handling routine fails and regardless of whether the condition is a disconnect or not.(link)

        References: #2695

      • [sql] [bug] Fixed bug whereby a DBAPI that can return “0” for cursor.lastrowid would not function correctly in conjunction with ResultProxy.inserted_primary_key.(link)

      postgresql

      • [postgresql] [bug] Opened up the checking for “disconnect” with psycopg2/libpq to check for all the various “disconnect” messages within the full exception hierarchy. Specifically the “closed the connection unexpectedly” message has now been seen in at least three different exception types. Courtesy Eli Collins.(link)

        References: #2712

      • [postgresql] [bug] The operators for the Postgresql ARRAY type supports input types of sets, generators, etc. even when a dimension is not specified, by turning the given iterable into a collection unconditionally.(link)

        References: #2681

      • [postgresql] [bug] Added missing HSTORE type to postgresql type names so that the type can be reflected.(link)

        References: #2680

      mysql

      • [mysql] [bug] Fixes to support the latest cymysql DBAPI, courtesy Hajime Nakagami.(link)

      • [mysql] [bug] Improvements to the operation of the pymysql dialect on Python 3, including some important decode/bytes steps. Issues remain with BLOB types due to driver issues. Courtesy Ben Trofatter.(link)

        References: #2663

      • [mysql] [bug] Updated a regexp to correctly extract error code on google app engine v1.7.5 and newer. Courtesy Dan Ring.(link)

      mssql

      • [mssql] [bug] Part of a longer series of fixes needed for pyodbc+ mssql, a CAST to NVARCHAR(max) has been added to the bound parameter for the table name and schema name in all information schema queries to avoid the issue of comparing NVARCHAR to NTEXT, which seems to be rejected by the ODBC driver in some cases, such as FreeTDS (0.91 only?) plus unicode bound parameters being passed. The issue seems to be specific to the SQL Server information schema tables and the workaround is harmless for those cases where the problem doesn’t exist in the first place.(link)

        References: #2355

      • [mssql] [bug] Added support for additional “disconnect” messages to the pymssql dialect. Courtesy John Anderson.(link)

      • [mssql] [bug] Fixed Py3K bug regarding “binary” types and pymssql. Courtesy Marc Abramowitz.(link)

        References: #2683

      misc

      • [bug] [examples] Fixed a long-standing bug in the caching example, where the limit/offset parameter values wouldn’t be taken into account when computing the cache key. The _key_from_query() function has been simplified to work directly from the final compiled statement in order to get at both the full statement as well as the fully processed parameter list.(link)

      0.8.0

      Released: March 9, 2013

      Note

      There are some new behavioral changes as of 0.8.0 not present in 0.8.0b2. They are present in the migration document as follows:

      orm

      • [orm] [feature] A meaningful QueryableAttribute.info attribute is added, which proxies down to the .info attribute on either the schema.Column object if directly present, or the MapperProperty otherwise. The full behavior is documented and ensured by tests to remain stable.(link)

        References: #2675

      • [orm] [feature] Can set/change the “cascade” attribute on a relationship() construct after it’s been constructed already. This is not a pattern for normal use but we like to change the setting for demonstration purposes in tutorials.(link)

      • [orm] [feature] Added new helper function was_deleted(), returns True if the given object was the subject of a Session.delete() operation.(link)

        References: #2658

      • [orm] [feature] Extended the Runtime Inspection API system so that all Python descriptors associated with the ORM or its extensions can be retrieved. This fulfills the common request of being able to inspect all QueryableAttribute descriptors in addition to extension types such as hybrid_property and AssociationProxy. See Mapper.all_orm_descriptors.(link)

      • [orm] [bug] Improved checking for an existing backref name conflict during mapper configuration; will now test for name conflicts on superclasses and subclasses, in addition to the current mapper, as these conflicts break things just as much. This is new for 0.8, but see below for a warning that will also be triggered in 0.7.11.(link)

        References: #2674

      • [orm] [bug] Improved the error message emitted when a “backref loop” is detected, that is when an attribute event triggers a bidirectional assignment between two other attributes with no end. This condition can occur not just when an object of the wrong type is assigned, but also when an attribute is mis-configured to backref into an existing backref pair. Also in 0.7.11.(link)

        References: #2674

      • [orm] [bug] A warning is emitted when a MapperProperty is assigned to a mapper that replaces an existing property, if the properties in question aren’t plain column-based properties. Replacement of relationship properties is rarely (ever?) what is intended and usually refers to a mapper mis-configuration. Also in 0.7.11.(link)

        References: #2674

      • [orm] [bug] A clear error message is emitted if an event handler attempts to emit SQL on a Session within the after_commit() handler, where there is not a viable transaction in progress.(link)

        References: #2662

      • [orm] [bug] Detection of a primary key change within the process of cascading a natural primary key update will succeed even if the key is composite and only some of the attributes have changed.(link)

        References: #2665

      • [orm] [bug] An object that’s deleted from a session will be de-associated with that session fully after the transaction is committed, that is the object_session() function will return None.(link)

        References: #2658

      • [orm] [bug] Fixed bug whereby Query.yield_per() would set the execution options incorrectly, thereby breaking subsequent usage of the Query.execution_options() method. Courtesy Ryan Kelly.(link)

        References: #2661

      • [orm] [bug] Fixed the consideration of the between() operator so that it works correctly with the new relationship local/remote system.(link)

        References: #1768

      • [orm] [bug] the consideration of a pending object as an “orphan” has been modified to more closely match the behavior as that of persistent objects, which is that the object is expunged from the Session as soon as it is de-associated from any of its orphan-enabled parents. Previously, the pending object would be expunged only if de-associated from all of its orphan-enabled parents. The new flag legacy_is_orphan is added to orm.mapper() which re-establishes the legacy behavior.

        See the change note and example case at The consideration of a “pending” object as an “orphan” has been made more aggressive for a detailed discussion of this change.

        (link)

        References: #2655

      • [orm] [bug] Fixed the (most likely never used) “@collection.link” collection method, which fires off each time the collection is associated or de-associated with a mapped object - the decorator was not tested or functional. The decorator method is now named collection.linker() though the name “link” remains for backwards compatibility. Courtesy Luca Wehrstedt.(link)

        References: #2653

      • [orm] [bug] Made some fixes to the system of producing custom instrumented collections, mainly that the usage of the @collection decorators will now honor the __mro__ of the given class, applying the logic of the sub-most classes’ version of a particular collection method. Previously, it wasn’t predictable when subclassing an existing instrumented class such as MappedCollection whether or not custom methods would resolve correctly.(link)

        References: #2654

      • [orm] [bug] Fixed potential memory leak which could occur if an arbitrary number of sessionmaker objects were created. The anonymous subclass created by the sessionmaker, when dereferenced, would not be garbage collected due to remaining class-level references from the event package. This issue also applies to any custom system that made use of ad-hoc subclasses in conjunction with an event dispatcher. Also in 0.7.10.(link)

        References: #2650

      • [orm] [bug] Query.merge_result() can now load rows from an outer join where an entity may be None without throwing an error. Also in 0.7.10.(link)

        References: #2640

      • [orm] [bug] Fixes to the “dynamic” loader on relationship(), includes that backrefs will work properly even when autoflush is disabled, history events are more accurate in scenarios where multiple add/remove of the same object occurs.(link)

        References: #2637

      • [orm] [removed] The undocumented (and hopefully unused) system of producing custom collections using an __instrumentation__ datastructure associated with the collection has been removed, as this was a complex and untested feature which was also essentially redundant versus the decorator approach. Other internal simplifcations to the orm.collections module have been made as well.(link)

      sql

      • [sql] [feature] Added a new argument to Enum and its base SchemaType inherit_schema. When set to True, the type will set its schema attribute of that of the Table to which it is associated. This also occurs during a Table.tometadata() operation; the SchemaType is now copied in all cases when Table.tometadata() happens, and if inherit_schema=True, the type will take on the new schema name passed to the method. The schema is important when used with the Postgresql backend, as the type results in a CREATE TYPE statement.(link)

        References: #2657

      • [sql] [feature] Index now supports arbitrary SQL expressions and/or functions, in addition to straight columns. Common modifiers include using somecolumn.desc() for a descending index and func.lower(somecolumn) for a case-insensitive index, depending on the capabilities of the target backend.(link)

        References: #695

      • [sql] [bug] The behavior of SELECT correlation has been improved such that the Select.correlate() and Select.correlate_except() methods, as well as their ORM analogues, will still retain “auto-correlation” behavior in that the FROM clause is modified only if the output would be legal SQL; that is, the FROM clause is left intact if the correlated SELECT is not used in the context of an enclosing SELECT inside of the WHERE, columns, or HAVING clause. The two methods now only specify conditions to the default “auto correlation”, rather than absolute FROM lists.(link)

        References: #2668

      • [sql] [bug] Fixed a bug regarding column annotations which in particular could impact some usages of the new orm.remote() and orm.local() annotation functions, where annotations could be lost when the column were used in a subsequent expression.(link)

        References: #1768, #2660

      • [sql] [bug] The ColumnOperators.in_() operator will now coerce values of None to null().(link)

        References: #2496

      • [sql] [bug] Fixed bug where Table.tometadata() would fail if a Column had both a foreign key as well as an alternate ”.key” name for the column. Also in 0.7.10.(link)

        References: #2643

      • [sql] [bug] insert().returning() raises an informative CompileError if attempted to compile on a dialect that doesn’t support RETURNING.(link)

        References: #2629

      • [sql] [bug] Tweaked the “REQUIRED” symbol used by the compiler to identify INSERT/UPDATE bound parameters that need to be passed, so that it’s more easily identifiable when writing custom bind-handling code.(link)

        References: #2648

      schema

      postgresql

      • [postgresql] [feature] Added support for Postgresql’s traditional SUBSTRING function syntax, renders as “SUBSTRING(x FROM y FOR z)” when regular func.substring() is used. Courtesy Gunnlaugur Þór Briem.(link)

        This change is also backported to: 0.7.11

        References: #2676

      • [postgresql] [feature] Added postgresql.ARRAY.Comparator.any() and postgresql.ARRAY.Comparator.all() methods, as well as standalone expression constructs. Big thanks to Audrius Kažukauskas for the terrific work here.(link)

      • [postgresql] [bug] Fixed bug in array() construct whereby using it inside of an expression.insert() construct would produce an error regarding a parameter issue in the self_group() method.(link)

      mysql

      • [mysql] [feature] New dialect for CyMySQL added, courtesy Hajime Nakagami.(link)

      • [mysql] [feature] GAE dialect now accepts username/password arguments in the URL, courtesy Owen Nelson.(link)

      • [mysql] [bug] [gae] Added a conditional import to the gaerdbms dialect which attempts to import rdbms_apiproxy vs. rdbms_googleapi to work on both dev and production platforms. Also now honors the instance attribute. Courtesy Sean Lynch. Also in 0.7.10.(link)

        References: #2649

      • [mysql] [bug] GAE dialect won’t fail on None match if the error code can’t be extracted from the exception throw; courtesy Owen Nelson.(link)

      mssql

      • [mssql] [feature] Added mssql_include and mssql_clustered options to Index, renders the INCLUDE and CLUSTERED keywords, respectively. Courtesy Derek Harland.(link)

      • [mssql] [feature] DDL for IDENTITY columns is now supported on non-primary key columns, by establishing a Sequence construct on any integer column. Courtesy Derek Harland.(link)

        References: #2644

      • [mssql] [bug] Added a py3K conditional around unnecessary .decode() call in mssql information schema, fixes reflection in Py3K. Also in 0.7.10.(link)

        References: #2638

      • [mssql] [bug] Fixed a regression whereby the “collation” parameter of the character types CHAR, NCHAR, etc. stopped working, as “collation” is now supported by the base string types. The TEXT, NCHAR, CHAR, VARCHAR types within the MSSQL dialect are now synonyms for the base types.(link)

      oracle

      • [oracle] [bug] The cx_oracle dialect will no longer run the bind parameter names through encode(), as this is not valid on Python 3, and prevented statements from functioning correctly on Python 3. We now encode only if supports_unicode_binds is False, which is not the case for cx_oracle when at least version 5 of cx_oracle is used.(link)

      misc

      • [bug] [tests] Fixed an import of “logging” in test_execute which was not working on some linux platforms. Also in 0.7.11.(link)

        References: #2669

      • [bug] [examples] Fixed a regression in the examples/dogpile_caching example which was due to the change in #2614.(link)

      0.8.0b2

      Released: December 14, 2012

      orm

      • [orm] [feature] Added KeyedTuple._asdict() and KeyedTuple._fields to the KeyedTuple class to provide some degree of compatibility with the Python standard library collections.namedtuple().(link)

        References: #2601

      • [orm] [feature] Allow synonyms to be used when defining primary and secondary joins for relationships.(link)

      • [orm] [feature] [extensions] The sqlalchemy.ext.mutable extension now includes the example MutableDict class as part of the extension.(link)

      • [orm] [bug] The Query.select_from() method can now be used with a aliased() construct without it interfering with the entities being selected. Basically, a statement like this:

        ua = aliased(User)
        session.query(User.name).select_from(ua).join(User, User.name > ua.name)

        Will maintain the columns clause of the SELECT as coming from the unaliased “user”, as specified; the select_from only takes place in the FROM clause:

        SELECT users.name AS users_name FROM users AS users_1
        JOIN users ON users.name < users_1.name

        Note that this behavior is in contrast to the original, older use case for Query.select_from(), which is that of restating the mapped entity in terms of a different selectable:

        session.query(User.name).\
          select_from(user_table.select().where(user_table.c.id > 5))

        Which produces:

        SELECT anon_1.name AS anon_1_name FROM (SELECT users.id AS id,
        users.name AS name FROM users WHERE users.id > :id_1) AS anon_1

        It was the “aliasing” behavior of the latter use case that was getting in the way of the former use case. The method now specifically considers a SQL expression like expression.select() or expression.alias() separately from a mapped entity like a aliased() construct.

        (link)

        References: #2635

      • [orm] [bug] The MutableComposite type did not allow for the MutableBase.coerce() method to be used, even though the code seemed to indicate this intent, so this now works and a brief example is added. As a side-effect, the mechanics of this event handler have been changed so that new MutableComposite types no longer add per-type global event handlers. Also in 0.7.10.(link)

        References: #2624

      • [orm] [bug] A second overhaul of aliasing/internal pathing mechanics now allows two subclasses to have different relationships of the same name, supported with subquery or joined eager loading on both simultaneously when a full polymorphic load is used.(link)

        References: #2614

      • [orm] [bug] Fixed bug whereby a multi-hop subqueryload within a particular with_polymorphic load would produce a KeyError. Takes advantage of the same internal pathing overhaul as #2614.(link)

        References: #2617

      • [orm] [bug] Fixed regression where query.update() would produce an error if an object matched by the “fetch” synchronization strategy wasn’t locally present. Courtesy Scott Torborg.(link)

        References: #2602

      engine

      sql

      • [sql] [feature] The Insert construct now supports multi-valued inserts, that is, an INSERT that renders like “INSERT INTO table VALUES (...), (...), ...”. Supported by Postgresql, SQLite, and MySQL. Big thanks to Idan Kamara for doing the legwork on this one.(link)

        References: #2623

      • [sql] [bug] Fixed bug where using server_onupdate=<FetchedValue|DefaultClause> without passing the “for_update=True” flag would apply the default object to the server_default, blowing away whatever was there. The explicit for_update=True argument shouldn’t be needed with this usage (especially since the documentation shows an example without it being used) so it is now arranged internally using a copy of the given default object, if the flag isn’t set to what corresponds to that argument.(link)

        This change is also backported to: 0.7.10

        References: #2631

      • [sql] [bug] Fixed a regression caused by #2410 whereby a CheckConstraint would apply itself back to the original table during a Table.tometadata() operation, as it would parse the SQL expression for a parent table. The operation now copies the given expression to correspond to the new table.(link)

        References: #2633

      • [sql] [bug] Fixed bug whereby using a label_length on dialect that was smaller than the size of actual column identifiers would fail to render the columns correctly in a SELECT statement.(link)

        References: #2610

      • [sql] [bug] The DECIMAL type now honors the “precision” and “scale” arguments when rendering DDL.(link)

        References: #2618

      • [sql] [bug] Made an adjustment to the “boolean”, (i.e. __nonzero__) evaluation of binary expressions, i.e. x1 == x2, such that the “auto-grouping” applied by BinaryExpression in some cases won’t get in the way of this comparison. Previously, an expression like:

        expr1 = mycolumn > 2
        bool(expr1 == expr1)

        Would evaulate as False, even though this is an identity comparison, because mycolumn > 2 would be “grouped” before being placed into the BinaryExpression, thus changing its identity. BinaryExpression now keeps track of the “original” objects passed in. Additionally the __nonzero__ method now only returns if the operator is == or != - all others raise TypeError.

        (link)

        References: #2621

      • [sql] [bug] Fixed a gotcha where inadvertently calling list() on a ColumnElement would go into an endless loop, if ColumnOperators.__getitem__() were implemented. A new NotImplementedError is emitted via __iter__().(link)

      • [sql] [bug] Fixed bug in type_coerce() whereby typing information could be lost if the statement were used as a subquery inside of another statement, as well as other similar situations. Among other things, would cause typing information to be lost when the Oracle/mssql dialects would apply limit/offset wrappings.(link)

        References: #2603

      • [sql] [bug] Fixed bug whereby the ”.key” of a Column wasn’t being used when producing a “proxy” of the column against a selectable. This probably didn’t occur in 0.7 since 0.7 doesn’t respect the ”.key” in a wider range of scenarios.(link)

        References: #2597

      postgresql

      • [postgresql] [feature] HSTORE is now available in the Postgresql dialect. Will also use psycopg2’s extensions if available. Courtesy Audrius Kažukauskas.(link)

        References: #2606

      sqlite

      • [sqlite] [bug] More adjustment to this SQLite related issue which was released in 0.7.9, to intercept legacy SQLite quoting characters when reflecting foreign keys. In addition to intercepting double quotes, other quoting characters such as brackets, backticks, and single quotes are now also intercepted.(link)

        This change is also backported to: 0.7.10

        References: #2568

      mssql

      • [mssql] [feature] Support for reflection of the “name” of primary key constraints added, courtesy Dave Moore.(link)

        References: #2600

      • [mssql] [bug] Fixed bug whereby using “key” with Column in conjunction with “schema” for the owning Table would fail to locate result rows due to the MSSQL dialect’s “schema rendering” logic’s failure to take .key into account.(link)

        This change is also backported to: 0.7.10

      oracle

      • [oracle] [bug] Fixed table reflection for Oracle when accessing a synonym that refers to a DBLINK remote database; while the syntax has been present in the Oracle dialect for some time, up until now it has never been tested. The syntax has been tested against a sample database linking to itself, however there’s still some uncertainty as to what should be used for the “owner” when querying the remote database for table information. Currently, the value of “username” from user_db_links is used to match the “owner”.(link)

        References: #2619

      • [oracle] [bug] The Oracle LONG type, while an unbounded text type, does not appear to use the cx_Oracle.LOB type when result rows are returned, so the dialect has been repaired to exclude LONG from having cx_Oracle.LOB filtering applied. Also in 0.7.10.(link)

        References: #2620

      • [oracle] [bug] Repaired the usage of .prepare() in conjunction with cx_Oracle so that a return value of False will result in no call to connection.commit(), hence avoiding “no transaction” errors. Two-phase transactions have now been shown to work in a rudimental fashion with SQLAlchemy and cx_oracle, however are subject to caveats observed with the driver; check the documentation for details. Also in 0.7.10.(link)

        References: #2611

      firebird

      • [firebird] [bug] Added missing import for “fdb” to the experimental “firebird+fdb” dialect.(link)

        References: #2622

      misc

      • [feature] [sybase] Reflection support has been added to the Sybase dialect. Big thanks to Ben Trofatter for all the work developing and testing this.(link)

        References: #1753

      • [feature] [pool] The Pool will now log all connection.close() operations equally, including closes which occur for invalidated connections, detached connections, and connections beyond the pool capacity.(link)

      • [feature] [pool] The Pool now consults the Dialect for functionality regarding how the connection should be “auto rolled back”, as well as closed. This grants more control of transaction scope to the dialect, so that we will be better able to implement transactional workarounds like those potentially needed for pysqlite and cx_oracle.(link)

        References: #2611

      • [feature] [pool] Added new PoolEvents.reset() hook to capture the event before a connection is auto-rolled back, upon return to the pool. Together with ConnectionEvents.rollback() this allows all rollback events to be intercepted.(link)

      • [informix] Some cruft regarding informix transaction handling has been removed, including a feature that would skip calling commit()/rollback() as well as some hardcoded isolation level assumptions on begin().. The status of this dialect is not well understood as we don’t have any users working with it, nor any access to an Informix database. If someone with access to Informix wants to help test this dialect, please let us know.(link)

      0.8.0b1

      Released: October 30, 2012

      general

      • [general] [removed] The “sqlalchemy.exceptions” synonym for “sqlalchemy.exc” is removed fully.(link)

        References: #2433

      • [general] SQLAlchemy 0.8 now targets Python 2.5 and above. Python 2.4 is no longer supported.(link)

      orm

      • [orm] [feature] Major rewrite of relationship() internals now allow join conditions which include columns pointing to themselves within composite foreign keys. A new API for very specialized primaryjoin conditions is added, allowing conditions based on SQL functions, CAST, etc. to be handled by placing the annotation functions remote() and foreign() inline within the expression when necessary. Previous recipes using the semi-private _local_remote_pairs approach can be upgraded to this new approach.

        (link)

        References: #1401

      • [orm] [feature] New standalone function with_polymorphic() provides the functionality of query.with_polymorphic() in a standalone form. It can be applied to any entity within a query, including as the target of a join in place of the “of_type()” modifier.(link)

        References: #2333

      • [orm] [feature] The of_type() construct on attributes now accepts aliased() class constructs as well as with_polymorphic constructs, and works with query.join(), any(), has(), and also eager loaders subqueryload(), joinedload(), contains_eager()(link)

        References: #1106, #2438

      • [orm] [feature] Improvements to event listening for mapped classes allows that unmapped classes can be specified for instance- and mapper-events. The established events will be automatically set up on subclasses of that class when the propagate=True flag is passed, and the events will be set up for that class itself if and when it is ultimately mapped.(link)

        References: #2585

      • [orm] [feature] The “deferred declarative reflection” system has been moved into the declarative extension itself, using the new DeferredReflection class. This class is now tested with both single and joined table inheritance use cases.(link)

        References: #2485

      • [orm] [feature] Added new core function “inspect()”, which serves as a generic gateway to introspection into mappers, objects, others. The Mapper and InstanceState objects have been enhanced with a public API that allows inspection of mapped attributes, including filters for column-bound or relationship-bound properties, inspection of current object state, history of attributes, etc.(link)

        References: #2208

      • [orm] [feature] Calling rollback() within a session.begin_nested() will now only expire those objects that had net changes within the scope of that transaction, that is objects which were dirty or were modified on a flush. This allows the typical use case for begin_nested(), that of altering a small subset of objects, to leave in place the data from the larger enclosing set of objects that weren’t modified in that sub-transaction.(link)

        References: #2452

      • [orm] [feature] Added utility feature Session.enable_relationship_loading(), supersedes relationship.load_on_pending. Both features should be avoided, however.(link)

        References: #2372

      • [orm] [feature] Added support for .info dictionary argument to column_property(), relationship(), composite(). All MapperProperty classes have an auto-creating .info dict available overall.(link)

      • [orm] [feature] Adding/removing None from a mapped collection now generates attribute events. Previously, a None append would be ignored in some cases. Related to.(link)

        References: #2229

      • [orm] [feature] The presence of None in a mapped collection now raises an error during flush. Previously, None values in collections would be silently ignored.(link)

        References: #2229

      • [orm] [feature] The Query.update() method is now more lenient as to the table being updated. Plain Table objects are better supported now, and additional a joined-inheritance subclass may be used with update(); the subclass table will be the target of the update, and if the parent table is referenced in the WHERE clause, the compiler will call upon UPDATE..FROM syntax as allowed by the dialect to satisfy the WHERE clause. MySQL’s multi-table update feature is also supported if columns are specified by object in the “values” dicitionary. PG’s DELETE..USING is also not available in Core yet.(link)

      • [orm] [feature] New session events after_transaction_create and after_transaction_end allows tracking of new SessionTransaction objects. If the object is inspected, can be used to determine when a session first becomes active and when it deactivates.(link)

      • [orm] [feature] The Query can now load entity/scalar-mixed “tuple” rows that contain types which aren’t hashable, by setting the flag “hashable=False” on the corresponding TypeEngine object in use. Custom types that return unhashable types (typically lists) can set this flag to False.(link)

        References: #2592

      • [orm] [feature] Query now “auto correlates” by default in the same way as select() does. Previously, a Query used as a subquery in another would require the correlate() method be called explicitly in order to correlate a table on the inside to the outside. As always, correlate(None) disables correlation.(link)

        References: #2179

      • [orm] [feature] The after_attach event is now emitted after the object is established in Session.new or Session.identity_map upon Session.add(), Session.merge(), etc., so that the object is represented in these collections when the event is called. Added before_attach event to accommodate use cases that need autoflush w pre-attached object.(link)

        References: #2464

      • [orm] [feature] The Session will produce warnings when unsupported methods are used inside the “execute” portion of the flush. These are the familiar methods add(), delete(), etc. as well as collection and related-object manipulations, as called within mapper-level flush events like after_insert(), after_update(), etc. It’s been prominently documented for a long time that SQLAlchemy cannot guarantee results when the Session is manipulated within the execution of the flush plan, however users are still doing it, so now there’s a warning. Maybe someday the Session will be enhanced to support these operations inside of the flush, but for now, results can’t be guaranteed.(link)

      • [orm] [feature] ORM entities can be passed to the core select() construct as well as to the select_from(), correlate(), and correlate_except() methods of select(), where they will be unwrapped into selectables.(link)

        References: #2245

      • [orm] [feature] Some support for auto-rendering of a relationship join condition based on the mapped attribute, with usage of core SQL constructs. E.g. select([SomeClass]).where(SomeClass.somerelationship) would render SELECT from “someclass” and use the primaryjoin of “somerelationship” as the WHERE clause. This changes the previous meaning of “SomeClass.somerelationship” when used in a core SQL context; previously, it would “resolve” to the parent selectable, which wasn’t generally useful. Also works with query.filter(). Related to.(link)

        References: #2245

      • [orm] [feature] The registry of classes in declarative_base() is now a WeakValueDictionary. So subclasses of “Base” that are dereferenced will be garbage collected, if they are not referred to by any other mappers/superclass mappers. See the next note for this ticket.(link)

        References: #2526

      • [orm] [feature] Conflicts between columns on single-inheritance declarative subclasses, with or without using a mixin, can be resolved using a new @declared_attr usage described in the documentation.(link)

        References: #2472

      • [orm] [feature] declared_attr can now be used on non-mixin classes, even though this is generally only useful for single-inheritance subclass column conflict resolution.(link)

        References: #2472

      • [orm] [feature] declared_attr can now be used with attributes that are not Column or MapperProperty; including any user-defined value as well as association proxy objects.(link)

        References: #2517

      • [orm] [feature] Very limited support for inheriting mappers to be GC’ed when the class itself is deferenced. The mapper must not have its own table (i.e. single table inh only) without polymorphic attributes in place. This allows for the use case of creating a temporary subclass of a declarative mapped class, with no table or mapping directives of its own, to be garbage collected when dereferenced by a unit test.(link)

        References: #2526

      • [orm] [feature] Declarative now maintains a registry of classes by string name as well as by full module-qualified name. Multiple classes with the same name can now be looked up based on a module-qualified string within relationship(). Simple class name lookups where more than one class shares the same name now raises an informative error message.(link)

        References: #2338

      • [orm] [feature] Can now provide class-bound attributes that override columns which are of any non-ORM type, not just descriptors.(link)

        References: #2535

      • [orm] [feature] Added with_labels and reduce_columns keyword arguments to Query.subquery(), to provide two alternate strategies for producing queries with uniquely- named columns. .(link)

        References: #1729

      • [orm] [feature] A warning is emitted when a reference to an instrumented collection is no longer associated with the parent class due to expiration/attribute refresh/collection replacement, but an append or remove operation is received on the now-detached collection.(link)

        References: #2476

      • [orm] [bug] ORM will perform extra effort to determine that an FK dependency between two tables is not significant during flush if the tables are related via joined inheritance and the FK dependency is not part of the inherit_condition, saves the user a use_alter directive.(link)

        References: #2527

      • [orm] [bug] The instrumentation events class_instrument(), class_uninstrument(), and attribute_instrument() will now fire off only for descendant classes of the class assigned to listen(). Previously, an event listener would be assigned to listen for all classes in all cases regardless of the “target” argument passed.(link)

        References: #2590

      • [orm] [bug] with_polymorphic() produces JOINs in the correct order and with correct inheriting tables in the case of sending multi-level subclasses in an arbitrary order or with intermediary classes missing.(link)

        References: #1900

      • [orm] [bug] Improvements to joined/subquery eager loading dealing with chains of subclass entities sharing a common base, with no specific “join depth” provided. Will chain out to each subclass mapper individually before detecting a “cycle”, rather than considering the base class to be the source of the “cycle”.(link)

        References: #2481

      • [orm] [bug] The “passive” flag on Session.is_modified() no longer has any effect. is_modified() in all cases looks only at local in-memory modified flags and will not emit any SQL or invoke loader callables/initializers.(link)

        References: #2320

      • [orm] [bug] The warning emitted when using delete-orphan cascade with one-to-many or many-to-many without single-parent=True is now an error. The ORM would fail to function subsequent to this warning in any case.(link)

        References: #2405

      • [orm] [bug] Lazy loads emitted within flush events such as before_flush(), before_update(), etc. will now function as they would within non-event code, regarding consideration of the PK/FK values used in the lazy-emitted query. Previously, special flags would be established that would cause lazy loads to load related items based on the “previous” value of the parent PK/FK values specifically when called upon within a flush; the signal to load in this way is now localized to where the unit of work actually needs to load that way. Note that the UOW does sometimes load these collections before the before_update() event is called, so the usage of “passive_updates” or not can affect whether or not a collection will represent the “old” or “new” data, when accessed within a flush event, based on when the lazy load was emitted. The change is backwards incompatible in the exceedingly small chance that user event code depended on the old behavior.(link)

        References: #2350

      • [orm] [bug] Continuing regarding extra state post-flush due to event listeners; any states that are marked as “dirty” from an attribute perspective, usually via column-attribute set events within after_insert(), after_update(), etc., will get the “history” flag reset in all cases, instead of only those instances that were part of the flush. This has the effect that this “dirty” state doesn’t carry over after the flush and won’t result in UPDATE statements. A warning is emitted to this effect; the set_committed_state() method can be used to assign attributes on objects without producing history events.(link)

        References: #2582, #2566

      • [orm] [bug] Fixed a disconnect that slowly evolved between a @declared_attr Column and a directly-defined Column on a mixin. In both cases, the Column will be applied to the declared class’ table, but not to that of a joined inheritance subclass. Previously, the directly-defined Column would be placed on both the base and the sub table, which isn’t typically what’s desired.(link)

        References: #2565

      • [orm] [bug] Declarative can now propagate a column declared on a single-table inheritance subclass up to the parent class’ table, when the parent class is itself mapped to a join() or select() statement, directly or via joined inheritance, and not just a Table.(link)

        References: #2549

      • [orm] [bug] An error is emitted when uselist=False is combined with a “dynamic” loader. This is a warning in 0.7.9.(link)

      • [orm] [moved] The InstrumentationManager interface and the entire related system of alternate class implementation is now moved out to sqlalchemy.ext.instrumentation. This is a seldom used system that adds significant complexity and overhead to the mechanics of class instrumentation. The new architecture allows it to remain unused until InstrumentationManager is actually imported, at which point it is bootstrapped into the core.(link)

      • [orm] [removed] The legacy “mutable” system of the ORM, including the MutableType class as well as the mutable=True flag on PickleType and postgresql.ARRAY has been removed. In-place mutations are detected by the ORM using the sqlalchemy.ext.mutable extension, introduced in 0.7. The removal of MutableType and associated constructs removes a great deal of complexity from SQLAlchemy’s internals. The approach performed poorly as it would incur a scan of the full contents of the Session when in use.(link)

        References: #2442

      • [orm] [removed] Deprecated identifiers removed:

        • allow_null_pks mapper() argument (use allow_partial_pks)
        • _get_col_to_prop() mapper method (use get_property_by_column())
        • dont_load argument to Session.merge() (use load=True)
        • sqlalchemy.orm.shard module (use sqlalchemy.ext.horizontal_shard)
        (link)

      engine

      • [engine] [feature] Connection event listeners can now be associated with individual Connection objects, not just Engine objects.(link)

        References: #2511

      • [engine] [feature] The before_cursor_execute event fires off for so-called “_cursor_execute” events, which are usually special-case executions of primary-key bound sequences and default-generation SQL phrases that invoke separately when RETURNING is not used with INSERT.(link)

        References: #2459

      • [engine] [feature] The libraries used by the test suite have been moved around a bit so that they are part of the SQLAlchemy install again. In addition, a new suite of tests is present in the new sqlalchemy.testing.suite package. This is an under-development system that hopes to provide a universal testing suite for external dialects. Dialects which are maintained outside of SQLAlchemy can use the new test fixture as the framework for their own tests, and will get for free a “compliance” suite of dialect-focused tests, including an improved “requirements” system where specific capabilities and features can be enabled or disabled for testing.(link)

      • [engine] [feature] Added a new system for registration of new dialects in-process without using an entrypoint. See the docs for “Registering New Dialects”.(link)

        References: #2462

      • [engine] [feature] The “required” flag is set to True by default, if not passed explicitly, on bindparam() if the “value” or “callable” parameters are not passed. This will cause statement execution to check for the parameter being present in the final collection of bound parameters, rather than implicitly assigning None.(link)

        References: #2556

      • [engine] [feature] Various API tweaks to the “dialect” API to better support highly specialized systems such as the Akiban database, including more hooks to allow an execution context to access type processors.(link)

      • [engine] [feature] Inspector.get_primary_keys() is deprecated; use Inspector.get_pk_constraint(). Courtesy Diana Clarke.(link)

        References: #2422

      • [engine] [feature] New C extension module “utils” has been added for additional function speedups as we have time to implement.(link)

      • [engine] [bug] The Inspector.get_table_names() order_by=”foreign_key” feature now sorts tables by dependee first, to be consistent with util.sort_tables and metadata.sorted_tables.(link)

      • [engine] [bug] Fixed bug whereby if a database restart affected multiple connections, each connection would individually invoke a new disposal of the pool, even though only one disposal is needed.(link)

        References: #2522

      • [engine] [bug] The names of the columns on the .c. attribute of a select().apply_labels() is now based on <tablename>_<colkey> instead of <tablename>_<colname>, for those columns that have a distinctly named .key.(link)

        References: #2397

      • [engine] [bug] The autoload_replace flag on Table, when False, will cause any reflected foreign key constraints which refer to already-declared columns to be skipped, assuming that the in-Python declared column will take over the task of specifying in-Python ForeignKey or ForeignKeyConstraint declarations.(link)

      • [engine] [bug] The ResultProxy methods inserted_primary_key, last_updated_params(), last_inserted_params(), postfetch_cols(), prefetch_cols() all assert that the given statement is a compiled construct, and is an insert() or update() statement as is appropriate, else raise InvalidRequestError.(link)

        References: #2498

      • [engine] ResultProxy.last_inserted_ids is removed, replaced by inserted_primary_key.(link)

      sql

      • [sql] [feature] Added a new method Engine.execution_options() to Engine. This method works similarly to Connection.execution_options() in that it creates a copy of the parent object which will refer to the new set of options. The method can be used to build sharding schemes where each engine shares the same underlying pool of connections. The method has been tested against the horizontal shard recipe in the ORM as well.

        (link)

      • [sql] [feature] Major rework of operator system in Core, to allow redefinition of existing operators as well as addition of new operators at the type level. New types can be created from existing ones which add or redefine operations that are exported out to column expressions, in a similar manner to how the ORM has allowed comparator_factory. The new architecture moves this capability into the Core so that it is consistently usable in all cases, propagating cleanly using existing type propagation behavior.(link)

        References: #2547

      • [sql] [feature] To complement, types can now provide “bind expressions” and “column expressions” which allow compile-time injection of SQL expressions into statements on a per-column or per-bind level. This is to suit the use case of a type which needs to augment bind- and result- behavior at the SQL level, as opposed to in the Python level. Allows for schemes like transparent encryption/ decryption, usage of Postgis functions, etc.(link)

        References: #1534, #2547

      • [sql] [feature] The Core oeprator system now includes the getitem operator, i.e. the bracket operator in Python. This is used at first to provide index and slice behavior to the Postgresql ARRAY type, and also provides a hook for end-user definition of custom __getitem__ schemes which can be applied at the type level as well as within ORM-level custom operator schemes. lshift (<<) and rshift (>>) are also supported as optional operators.

        Note that this change has the effect that descriptor-based __getitem__ schemes used by the ORM in conjunction with synonym() or other “descriptor-wrapped” schemes will need to start using a custom comparator in order to maintain this behavior.

        (link)

      • [sql] [feature] Revised the rules used to determine the operator precedence for the user-defined operator, i.e. that granted using the op() method. Previously, the smallest precedence was applied in all cases, now the default precedence is zero, lower than all operators except “comma” (such as, used in the argument list of a func call) and “AS”, and is also customizable via the “precedence” argument on the op() method.(link)

        References: #2537

      • [sql] [feature] Added “collation” parameter to all String types. When present, renders as COLLATE <collation>. This to support the COLLATE keyword now supported by several databases including MySQL, SQLite, and Postgresql.(link)

        References: #2276

      • [sql] [feature] Custom unary operators can now be used by combining operators.custom_op() with UnaryExpression().(link)

      • [sql] [feature] Enhanced GenericFunction and func.* to allow for user-defined GenericFunction subclasses to be available via the func.* namespace automatically by classname, optionally using a package name, as well as with the ability to have the rendered name different from the identified name in func.*.(link)

      • [sql] [feature] The cast() and extract() constructs will now be produced via the func.* accessor as well, as users naturally try to access these names from func.* they might as well do what’s expected, even though the returned object is not a FunctionElement.(link)

        References: #2562

      • [sql] [feature] The Inspector object can now be acquired using the new inspect() service, part of(link)

        References: #2208

      • [sql] [feature] The column_reflect event now accepts the Inspector object as the first argument, preceding “table”. Code which uses the 0.7 version of this very new event will need modification to add the “inspector” object as the first argument.(link)

        References: #2418

      • [sql] [feature] The behavior of column targeting in result sets is now case sensitive by default. SQLAlchemy for many years would run a case-insensitive conversion on these values, probably to alleviate early case sensitivity issues with dialects like Oracle and Firebird. These issues have been more cleanly solved in more modern versions so the performance hit of calling lower() on identifiers is removed. The case insensitive comparisons can be re-enabled by setting “case_insensitive=False” on create_engine().(link)

        References: #2423

      • [sql] [feature] The “unconsumed column names” warning emitted when keys are present in insert.values() or update.values() that aren’t in the target table is now an exception.(link)

        References: #2415

      • [sql] [feature] Added “MATCH” clause to ForeignKey, ForeignKeyConstraint, courtesy Ryan Kelly.(link)

        References: #2502

      • [sql] [feature] Added support for DELETE and UPDATE from an alias of a table, which would assumedly be related to itself elsewhere in the query, courtesy Ryan Kelly.(link)

        References: #2507

      • [sql] [feature] select() features a correlate_except() method, auto correlates all selectables except those passed.(link)

      • [sql] [feature] The prefix_with() method is now available on each of select(), insert(), update(), delete(), all with the same API, accepting multiple prefix calls, as well as a “dialect name” so that the prefix can be limited to one kind of dialect.(link)

        References: #2431

      • [sql] [feature] Added reduce_columns() method to select() construct, replaces columns inline using the util.reduce_columns utility function to remove equivalent columns. reduce_columns() also adds “with_only_synonyms” to limit the reduction just to those columns which have the same name. The deprecated fold_equivalents() feature is removed.(link)

        References: #1729

      • [sql] [feature] Reworked the startswith(), endswith(), contains() operators to do a better job with negation (NOT LIKE), and also to assemble them at compilation time so that their rendered SQL can be altered, such as in the case for Firebird STARTING WITH(link)

        References: #2470

      • [sql] [feature] Added a hook to the system of rendering CREATE TABLE that provides access to the render for each Column individually, by constructing a @compiles function against the new schema.CreateColumn construct.(link)

        References: #2463

      • [sql] [feature] “scalar” selects now have a WHERE method to help with generative building. Also slight adjustment regarding how SS “correlates” columns; the new methodology no longer applies meaning to the underlying Table column being selected. This improves some fairly esoteric situations, and the logic that was there didn’t seem to have any purpose.(link)

      • [sql] [feature] An explicit error is raised when a ForeignKeyConstraint() that was constructed to refer to multiple remote tables is first used.(link)

        References: #2455

      • [sql] [feature] Added ColumnOperators.notin_(), ColumnOperators.notlike(), ColumnOperators.notilike() to ColumnOperators.(link)

        References: #2580

      • [sql] [bug] Fixed bug where keyword arguments passed to Compiler.process() wouldn’t get propagated to the column expressions present in the columns clause of a SELECT statement. In particular this would come up when used by custom compilation schemes that relied upon special flags.(link)

        References: #2593

      • [sql] [bug] [orm] The auto-correlation feature of select(), and by proxy that of Query, will not take effect for a SELECT statement that is being rendered directly in the FROM list of the enclosing SELECT. Correlation in SQL only applies to column expressions such as those in the WHERE, ORDER BY, columns clause.(link)

        References: #2595

      • [sql] [bug] A tweak to column precedence which moves the “concat” and “match” operators to be the same as that of “is”, “like”, and others; this helps with parenthesization rendering when used in conjunction with “IS”.(link)

        References: #2564

      • [sql] [bug] Applying a column expression to a select statement using a label with or without other modifying constructs will no longer “target” that expression to the underlying Column; this affects ORM operations that rely upon Column targeting in order to retrieve results. That is, a query like query(User.id, User.id.label(‘foo’)) will now track the value of each “User.id” expression separately instead of munging them together. It is not expected that any users will be impacted by this; however, a usage that uses select() in conjunction with query.from_statement() and attempts to load fully composed ORM entities may not function as expected if the select() named Column objects with arbitrary .label() names, as these will no longer target to the Column objects mapped by that entity.(link)

        References: #2591

      • [sql] [bug] Fixes to the interpretation of the Column “default” parameter as a callable to not pass ExecutionContext into a keyword argument parameter.(link)

        References: #2520

      • [sql] [bug] All of UniqueConstraint, ForeignKeyConstraint, CheckConstraint, and PrimaryKeyConstraint will attach themselves to their parent table automatically when they refer to a Table-bound Column object directly (i.e. not just string column name), and refer to one and only one Table. Prior to 0.8 this behavior occurred for UniqueConstraint and PrimaryKeyConstraint, but not ForeignKeyConstraint or CheckConstraint.(link)

        References: #2410

      • [sql] [bug] TypeDecorator now includes a generic repr() that works in terms of the “impl” type by default. This is a behavioral change for those TypeDecorator classes that specify a custom __init__ method; those types will need to re-define __repr__() if they need __repr__() to provide a faithful constructor representation.(link)

        References: #2594

      • [sql] [bug] column.label(None) now produces an anonymous label, instead of returning the column object itself, consistent with the behavior of label(column, None).(link)

        References: #2168

      • [sql] [changed] Most classes in expression.sql are no longer preceded with an underscore, i.e. Label, SelectBase, Generative, CompareMixin. _BindParamClause is also renamed to BindParameter. The old underscore names for these classes will remain available as synonyms for the foreseeable future.(link)

      • [sql] [removed] The long-deprecated and non-functional assert_unicode flag on create_engine() as well as String is removed.(link)

      • [sql] [change] The Text() type renders the length given to it, if a length was specified.(link)

      postgresql

      • [postgresql] [feature] postgresql.ARRAY features an optional “dimension” argument, will assign a specific number of dimensions to the array which will render in DDL as ARRAY[][]..., also improves performance of bind/result processing.(link)

        References: #2441

      • [postgresql] [feature] postgresql.ARRAY now supports indexing and slicing. The Python [] operator is available on all SQL expressions that are of type ARRAY; integer or simple slices can be passed. The slices can also be used on the assignment side in the SET clause of an UPDATE statement by passing them into Update.values(); see the docs for examples.(link)

      • [postgresql] [feature] Added new “array literal” construct postgresql.array(). Basically a “tuple” that renders as ARRAY[1,2,3].(link)

      • [postgresql] [feature] Added support for the Postgresql ONLY keyword, which can appear corresponding to a table in a SELECT, UPDATE, or DELETE statement. The phrase is established using with_hint(). Courtesy Ryan Kelly(link)

        References: #2506

      • [postgresql] [feature] The “ischema_names” dictionary of the Postgresql dialect is “unofficially” customizable. Meaning, new types such as PostGIS types can be added into this dictionary, and the PG type reflection code should be able to handle simple types with variable numbers of arguments. The functionality here is “unofficial” for three reasons:

        1. this is not an “official” API. Ideally an “official” API would allow custom type-handling callables at the dialect or global level in a generic way.
        2. This is only implemented for the PG dialect, in particular because PG has broad support for custom types vs. other database backends. A real API would be implemented at the default dialect level.
        3. The reflection code here is only tested against simple types and probably has issues with more compositional types.

        patch courtesy Éric Lemoine.

        (link)

      mysql

      • [mysql] [feature] Added TIME type to mysql dialect, accepts “fst” argument which is the new “fractional seconds” specifier for recent MySQL versions. The datatype will interpret a microseconds portion received from the driver, however note that at this time most/all MySQL DBAPIs do not support returning this value.(link)

        References: #2534

      • [mysql] [bug] Dialect no longer emits expensive server collations query, as well as server casing, on first connect. These functions are still available as semi-private.(link)

        References: #2404

      sqlite

      • [sqlite] [feature] the SQLite date and time types have been overhauled to support a more open ended format for input and output, using name based format strings and regexps. A new argument “microseconds” also provides the option to omit the “microseconds” portion of timestamps. Thanks to Nathan Wright for the work and tests on this.(link)

        References: #2363

      • [sqlite] Added types.NCHAR, types.NVARCHAR to the SQLite dialect’s list of recognized type names for reflection. SQLite returns the name given to a type as the name returned.(link)

        References: rc3addcc9ffad

      mssql

      • [mssql] [feature] SQL Server dialect can be given database-qualified schema names, i.e. “schema=’mydatabase.dbo’”; reflection operations will detect this, split the schema among the ”.” to get the owner separately, and emit a “USE mydatabase” statement before reflecting targets within the “dbo” owner; the existing database returned from DB_NAME() is then restored.(link)

      • [mssql] [feature] updated support for the mxodbc driver; mxodbc 3.2.1 is recommended for full compatibility.(link)

      • [mssql] [bug] removed legacy behavior whereby a column comparison to a scalar SELECT via == would coerce to an IN with the SQL server dialect. This is implicit behavior which fails in other scenarios so is removed. Code which relies on this needs to be modified to use column.in_(select) explicitly.(link)

        References: #2277

      oracle

      • [oracle] [feature] The types of columns excluded from the setinputsizes() set can be customized by sending a list of string DBAPI type names to exclude, using the exclude_setinputsizes dialect parameter. This list was previously fixed. The list also now defaults to STRING, UNICODE, removing CLOB, NCLOB from the list.(link)

        References: #2561

      • [oracle] [bug] Quoting information is now passed along from a Column with quote=True when generating a same-named bound parameter to the bindparam() object, as is the case in generated INSERT and UPDATE statements, so that unknown reserved names can be fully supported.(link)

        References: #2437

      • [oracle] [bug] The CreateIndex construct in Oracle will now schema-qualify the name of the index to be that of the parent table. Previously this name was omitted which apparently creates the index in the default schema, rather than that of the table.(link)

      firebird

      • [firebird] [feature] The “startswith()” operator renders as “STARTING WITH”, “~startswith()” renders as “NOT STARTING WITH”, using FB’s more efficient operator.(link)

        References: #2470

      • [firebird] [feature] An experimental dialect for the fdb driver is added, but is untested as I cannot get the fdb package to build.(link)

        References: #2504

      • [firebird] [bug] CompileError is raised when VARCHAR with no length is attempted to be emitted, same way as MySQL.(link)

        References: #2505

      • [firebird] [bug] Firebird now uses strict “ansi bind rules” so that bound parameters don’t render in the columns clause of a statement - they render literally instead.(link)

      • [firebird] [bug] Support for passing datetime as date when using the DateTime type with Firebird; other dialects support this.(link)

      misc

      • [feature] [access] the MS Access dialect has been moved to its own project on Bitbucket, taking advantage of the new SQLAlchemy dialect compliance suite. The dialect is still in very rough shape and probably not ready for general use yet, however it does have extremely rudimental functionality now. https://bitbucket.org/zzzeek/sqlalchemy-access(link)

      • [moved] [maxdb] The MaxDB dialect, which hasn’t been functional for several years, is moved out to a pending bitbucket project, https://bitbucket.org/zzzeek/sqlalchemy-maxdb.(link)

      • [examples] The Beaker caching example has been converted to use dogpile.cache. This is a new caching library written by the same creator of Beaker’s caching internals, and represents a vastly improved, simplified, and modernized system of caching.

        See also

        Dogpile Caching

        (link)

        References: #2589

      SQLAlchemy-0.8.4/doc/changelog/index.html0000644000076500000240000004412212251147463020732 0ustar classicstaff00000000000000 Changes and Migration — SQLAlchemy 0.8 Documentation

      SQLAlchemy 0.8 Documentation

      Release: 0.8.4 | Release Date: December 8, 2013
      SQLAlchemy-0.8.4/doc/changelog/migration_04.html0000644000076500000240000024373512251147463022132 0ustar classicstaff00000000000000 What’s new in SQLAlchemy 0.4? — SQLAlchemy 0.8 Documentation

      SQLAlchemy 0.8 Documentation

      Release: 0.8.4 | Release Date: December 8, 2013
      SQLAlchemy 0.8 Documentation » Changes and Migration » What’s new in SQLAlchemy 0.4?

      What’s new in SQLAlchemy 0.4?

      Table of Contents

      Previous Topic

      What’s new in SQLAlchemy 0.5?

      Quick Search

      What’s new in SQLAlchemy 0.4?

      About this Document

      This document describes changes between SQLAlchemy version 0.3, last released October 14, 2007, and SQLAlchemy version 0.4, last released October 12, 2008.

      Document date: March 21, 2008

      First Things First

      If you’re using any ORM features, make sure you import from sqlalchemy.orm:

      from sqlalchemy import *
      from sqlalchemy.orm import *

      Secondly, anywhere you used to say engine=, connectable=, bind_to=, something.engine, metadata.connect(), use bind:

      myengine = create_engine('sqlite://')
      
      meta = MetaData(myengine)
      
      meta2 = MetaData()
      meta2.bind = myengine
      
      session = create_session(bind=myengine)
      
      statement = select([table], bind=myengine)

      Got those ? Good! You’re now (95%) 0.4 compatible. If you’re using 0.3.10, you can make these changes immediately; they’ll work there too.

      Module Imports

      In 0.3, “from sqlachemy import *” would import all of sqlachemy’s sub-modules into your namespace. Version 0.4 no longer imports sub-modules into the namespace. This may mean you need to add extra imports into your code.

      In 0.3, this code worked:

      from sqlalchemy import *
      
      class UTCDateTime(types.TypeDecorator):
          pass

      In 0.4, one must do:

      from sqlalchemy import *
      from sqlalchemy import types
      
      class UTCDateTime(types.TypeDecorator):
          pass

      Object Relational Mapping

      Querying

      New Query API

      Query is standardized on the generative interface (old interface is still there, just deprecated). While most of the generative interface is available in 0.3, the 0.4 Query has the inner guts to match the generative outside, and has a lot more tricks. All result narrowing is via filter() and filter_by(), limiting/offset is either through array slices or limit()/offset(), joining is via join() and outerjoin() (or more manually, through select_from() as well as manually-formed criteria).

      To avoid deprecation warnings, you must make some changes to your 03 code

      User.query.get_by( **kwargs )

      User.query.filter_by(**kwargs).first()

      User.query.select_by( **kwargs )

      User.query.filter_by(**kwargs).all()

      User.query.select()

      User.query.filter(xxx).all()

      New Property-Based Expression Constructs

      By far the most palpable difference within the ORM is that you can now construct your query criterion using class-based attributes directly. The ”.c.” prefix is no longer needed when working with mapped classes:

      session.query(User).filter(and_(User.name == 'fred', User.id > 17))

      While simple column-based comparisons are no big deal, the class attributes have some new “higher level” constructs available, including what was previously only available in filter_by():

      # comparison of scalar relations to an instance
      filter(Address.user == user)
      
      # return all users who contain a particular address
      filter(User.addresses.contains(address))
      
      # return all users who *dont* contain the address
      filter(~User.address.contains(address))
      
      # return all users who contain a particular address with
      # the email_address like '%foo%'
      filter(User.addresses.any(Address.email_address.like('%foo%')))
      
      # same, email address equals 'foo@bar.com'.  can fall back to keyword
      # args for simple comparisons
      filter(User.addresses.any(email_address = 'foo@bar.com'))
      
      # return all Addresses whose user attribute has the username 'ed'
      filter(Address.user.has(name='ed'))
      
      # return all Addresses whose user attribute has the username 'ed'
      # and an id > 5 (mixing clauses with kwargs)
      filter(Address.user.has(User.id > 5, name='ed'))

      The Column collection remains available on mapped classes in the .c attribute. Note that property-based expressions are only available with mapped properties of mapped classes. .c is still used to access columns in regular tables and selectable objects produced from SQL Expressions.

      Automatic Join Aliasing

      We’ve had join() and outerjoin() for a while now:

      session.query(Order).join('items')...

      Now you can alias them:

      session.query(Order).join('items', aliased=True).
         filter(Item.name='item 1').join('items', aliased=True).filter(Item.name=='item 3')

      The above will create two joins from orders->items using aliases. the filter() call subsequent to each will adjust its table criterion to that of the alias. To get at the Item objects, use add_entity() and target each join with an id:

      session.query(Order).join('items', id='j1', aliased=True).
      filter(Item.name == 'item 1').join('items', aliased=True, id='j2').
      filter(Item.name == 'item 3').add_entity(Item, id='j1').add_entity(Item, id='j2')

      Returns tuples in the form: (Order, Item, Item).

      Self-referential Queries

      So query.join() can make aliases now. What does that give us ? Self-referential queries ! Joins can be done without any Alias objects:

      # standard self-referential TreeNode mapper with backref
      mapper(TreeNode, tree_nodes, properties={
          'children':relation(TreeNode, backref=backref('parent', remote_side=tree_nodes.id))
      })
      
      # query for node with child containing "bar" two levels deep
      session.query(TreeNode).join(["children", "children"], aliased=True).filter_by(name='bar')

      To add criterion for each table along the way in an aliased join, you can use from_joinpoint to keep joining against the same line of aliases:

      # search for the treenode along the path "n1/n12/n122"
      
      # first find a Node with name="n122"
      q = sess.query(Node).filter_by(name='n122')
      
      # then join to parent with "n12"
      q = q.join('parent', aliased=True).filter_by(name='n12')
      
      # join again to the next parent with 'n1'.  use 'from_joinpoint'
      # so we join from the previous point, instead of joining off the
      # root table
      q = q.join('parent', aliased=True, from_joinpoint=True).filter_by(name='n1')
      
      node = q.first()

      query.populate_existing()

      The eager version of query.load() (or session.refresh()). Every instance loaded from the query, including all eagerly loaded items, get refreshed immediately if already present in the session:

      session.query(Blah).populate_existing().all()

      Relations

      SQL Clauses Embedded in Updates/Inserts

      For inline execution of SQL clauses, embedded right in the UPDATE or INSERT, during a flush():

      myobject.foo = mytable.c.value + 1
      
      user.pwhash = func.md5(password)
      
      order.hash = text("select hash from hashing_table")

      The column-attribute is set up with a deferred loader after the operation, so that it issues the SQL to load the new value when you next access.

      Self-referential and Cyclical Eager Loading

      Since our alias-fu has improved, relation() can join along the same table *any number of times*; you tell it how deep you want to go. Lets show the self-referential TreeNode more clearly:

      nodes = Table('nodes', metadata,
           Column('id', Integer, primary_key=True),
           Column('parent_id', Integer, ForeignKey('nodes.id')),
           Column('name', String(30)))
      
      class TreeNode(object):
          pass
      
      mapper(TreeNode, nodes, properties={
          'children':relation(TreeNode, lazy=False, join_depth=3)
      })

      So what happens when we say:

      create_session().query(TreeNode).all()

      ? A join along aliases, three levels deep off the parent:

      SELECT
      nodes_3.id AS nodes_3_id, nodes_3.parent_id AS nodes_3_parent_id, nodes_3.name AS nodes_3_name,
      nodes_2.id AS nodes_2_id, nodes_2.parent_id AS nodes_2_parent_id, nodes_2.name AS nodes_2_name,
      nodes_1.id AS nodes_1_id, nodes_1.parent_id AS nodes_1_parent_id, nodes_1.name AS nodes_1_name,
      nodes.id AS nodes_id, nodes.parent_id AS nodes_parent_id, nodes.name AS nodes_name
      FROM nodes LEFT OUTER JOIN nodes AS nodes_1 ON nodes.id = nodes_1.parent_id
      LEFT OUTER JOIN nodes AS nodes_2 ON nodes_1.id = nodes_2.parent_id
      LEFT OUTER JOIN nodes AS nodes_3 ON nodes_2.id = nodes_3.parent_id
      ORDER BY nodes.oid, nodes_1.oid, nodes_2.oid, nodes_3.oid

      Notice the nice clean alias names too. The joining doesn’t care if it’s against the same immediate table or some other object which then cycles back to the beginining. Any kind of chain of eager loads can cycle back onto itself when join_depth is specified. When not present, eager loading automatically stops when it hits a cycle.

      Composite Types

      This is one from the Hibernate camp. Composite Types let you define a custom datatype that is composed of more than one column (or one column, if you wanted). Lets define a new type, Point. Stores an x/y coordinate:

      class Point(object):
          def __init__(self, x, y):
              self.x = x
              self.y = y
          def __composite_values__(self):
              return self.x, self.y
          def __eq__(self, other):
              return other.x == self.x and other.y == self.y
          def __ne__(self, other):
              return not self.__eq__(other)

      The way the Point object is defined is specific to a custom type; constructor takes a list of arguments, and the __composite_values__() method produces a sequence of those arguments. The order will match up to our mapper, as we’ll see in a moment.

      Let’s create a table of vertices storing two points per row:

      vertices = Table('vertices', metadata,
          Column('id', Integer, primary_key=True),
          Column('x1', Integer),
          Column('y1', Integer),
          Column('x2', Integer),
          Column('y2', Integer),
          )

      Then, map it ! We’ll create a Vertex object which stores two Point objects:

      class Vertex(object):
          def __init__(self, start, end):
              self.start = start
              self.end = end
      
      mapper(Vertex, vertices, properties={
          'start':composite(Point, vertices.c.x1, vertices.c.y1),
          'end':composite(Point, vertices.c.x2, vertices.c.y2)
      })

      Once you’ve set up your composite type, it’s usable just like any other type:

      v = Vertex(Point(3, 4), Point(26,15))
      session.save(v)
      session.flush()
      
      # works in queries too
      q = session.query(Vertex).filter(Vertex.start == Point(3, 4))

      If you’d like to define the way the mapped attributes generate SQL clauses when used in expressions, create your own sqlalchemy.orm.PropComparator subclass, defining any of the common operators (like __eq__(), __le__(), etc.), and send it in to composite(). Composite types work as primary keys too, and are usable in query.get():

      # a Document class which uses a composite Version
      # object as primary key
      document = query.get(Version(1, 'a'))

      dynamic_loader() relations

      A relation() that returns a live Query object for all read operations. Write operations are limited to just append() and remove(), changes to the collection are not visible until the session is flushed. This feature is particularly handy with an “autoflushing” session which will flush before each query.

      mapper(Foo, foo_table, properties={
          'bars':dynamic_loader(Bar, backref='foo', <other relation() opts>)
      })
      
      session = create_session(autoflush=True)
      foo = session.query(Foo).first()
      
      foo.bars.append(Bar(name='lala'))
      
      for bar in foo.bars.filter(Bar.name=='lala'):
          print bar
      
      session.commit()

      New Options: undefer_group(), eagerload_all()

      A couple of query options which are handy. undefer_group() marks a whole group of “deferred” columns as undeferred:

      mapper(Class, table, properties={
          'foo' : deferred(table.c.foo, group='group1'),
          'bar' : deferred(table.c.bar, group='group1'),
          'bat' : deferred(table.c.bat, group='group1'),
      )
      
      session.query(Class).options(undefer_group('group1')).filter(...).all()

      and eagerload_all() sets a chain of attributes to be eager in one pass:

      mapper(Foo, foo_table, properties={
         'bar':relation(Bar)
      })
      mapper(Bar, bar_table, properties={
         'bat':relation(Bat)
      })
      mapper(Bat, bat_table)
      
      # eager load bar and bat
      session.query(Foo).options(eagerload_all('bar.bat')).filter(...).all()

      New Collection API

      Collections are no longer proxied by an {{{InstrumentedList}}} proxy, and access to members, methods and attributes is direct. Decorators now intercept objects entering and leaving the collection, and it is now possible to easily write a custom collection class that manages its own membership. Flexible decorators also replace the named method interface of custom collections in 0.3, allowing any class to be easily adapted to use as a collection container.

      Dictionary-based collections are now much easier to use and fully dict-like. Changing __iter__ is no longer needed for dict``s, and new built-in ``dict types cover many needs:

      # use a dictionary relation keyed by a column
      relation(Item, collection_class=column_mapped_collection(items.c.keyword))
      # or named attribute
      relation(Item, collection_class=attribute_mapped_collection('keyword'))
      # or any function you like
      relation(Item, collection_class=mapped_collection(lambda entity: entity.a + entity.b))

      Existing 0.3 dict-like and freeform object derived collection classes will need to be updated for the new API. In most cases this is simply a matter of adding a couple decorators to the class definition.

      Mapped Relations from External Tables/Subqueries

      This feature quietly appeared in 0.3 but has been improved in 0.4 thanks to better ability to convert subqueries against a table into subqueries against an alias of that table; this is key for eager loading, aliased joins in queries, etc. It reduces the need to create mappers against select statements when you just need to add some extra columns or subqueries:

      mapper(User, users, properties={
             'fullname': column_property((users.c.firstname + users.c.lastname).label('fullname')),
             'numposts': column_property(
                  select([func.count(1)], users.c.id==posts.c.user_id).correlate(users).label('posts')
             )
          })

      a typical query looks like:

      SELECT (SELECT count(1) FROM posts WHERE users.id = posts.user_id) AS count,
      users.firstname || users.lastname AS fullname,
      users.id AS users_id, users.firstname AS users_firstname, users.lastname AS users_lastname
      FROM users ORDER BY users.oid

      Horizontal Scaling (Sharding) API

      [browser:/sqlalchemy/trunk/examples/sharding/attribute_shard .py]

      Sessions

      New Session Create Paradigm; SessionContext, assignmapper Deprecated

      That’s right, the whole shebang is being replaced with two configurational functions. Using both will produce the most 0.1-ish feel we’ve had since 0.1 (i.e., the least amount of typing).

      Configure your own Session class right where you define your engine (or anywhere):

      from sqlalchemy import create_engine
      from sqlalchemy.orm import sessionmaker
      
      engine = create_engine('myengine://')
      Session = sessionmaker(bind=engine, autoflush=True, transactional=True)
      
      # use the new Session() freely
      sess = Session()
      sess.save(someobject)
      sess.flush()

      If you need to post-configure your Session, say with an engine, add it later with configure():

      Session.configure(bind=create_engine(...))

      All the behaviors of SessionContext and the query and __init__ methods of assignmapper are moved into the new scoped_session() function, which is compatible with both sessionmaker as well as create_session():

      from sqlalchemy.orm import scoped_session, sessionmaker
      
      Session = scoped_session(sessionmaker(autoflush=True, transactional=True))
      Session.configure(bind=engine)
      
      u = User(name='wendy')
      
      sess = Session()
      sess.save(u)
      sess.commit()
      
      # Session constructor is thread-locally scoped.  Everyone gets the same
      # Session in the thread when scope="thread".
      sess2 = Session()
      assert sess is sess2

      When using a thread-local Session, the returned class has all of Session's interface implemented as classmethods, and “assignmapper“‘s functionality is available using the mapper classmethod. Just like the old objectstore days....

      # "assignmapper"-like functionality available via ScopedSession.mapper
      Session.mapper(User, users_table)
      
      u = User(name='wendy')
      
      Session.commit()

      Sessions are again Weak Referencing By Default

      The weak_identity_map flag is now set to True by default on Session. Instances which are externally deferenced and fall out of scope are removed from the session automatically. However, items which have “dirty” changes present will remain strongly referenced until those changes are flushed at which case the object reverts to being weakly referenced (this works for ‘mutable’ types, like picklable attributes, as well). Setting weak_identity_map to False restores the old strong-referencing behavior for those of you using the session like a cache.

      Auto-Transactional Sessions

      As you might have noticed above, we are calling commit() on Session. The flag transactional=True means the Session is always in a transaction, commit() persists permanently.

      Auto-Flushing Sessions

      Also, autoflush=True means the Session will flush() before each query as well as when you call flush() or commit(). So now this will work:

      Session = sessionmaker(bind=engine, autoflush=True, transactional=True)
      
      u = User(name='wendy')
      
      sess = Session()
      sess.save(u)
      
      # wendy is flushed, comes right back from a query
      wendy = sess.query(User).filter_by(name='wendy').one()

      Transactional methods moved onto sessions

      commit() and rollback(), as well as begin() are now directly on Session. No more need to use SessionTransaction for anything (it remains in the background).

      Session = sessionmaker(autoflush=True, transactional=False)
      
      sess = Session()
      sess.begin()
      
      # use the session
      
      sess.commit() # commit transaction

      Sharing a Session with an enclosing engine-level (i.e. non-ORM) transaction is easy:

      Session = sessionmaker(autoflush=True, transactional=False)
      
      conn = engine.connect()
      trans = conn.begin()
      sess = Session(bind=conn)
      
      # ... session is transactional
      
      # commit the outermost transaction
      trans.commit()

      Nested Session Transactions with SAVEPOINT

      Available at the Engine and ORM level. ORM docs so far:

      http://www.sqlalchemy.org/docs/04/session.html#unitofwork_ma naging

      Two-Phase Commit Sessions

      Available at the Engine and ORM level. ORM docs so far:

      http://www.sqlalchemy.org/docs/04/session.html#unitofwork_ma naging

      Inheritance

      Polymorphic Inheritance with No Joins or Unions

      New docs for inheritance: http://www.sqlalchemy.org/docs/04 /mappers.html#advdatamapping_mapper_inheritance_joined

      Better Polymorphic Behavior with get()

      All classes within a joined-table inheritance hierarchy get an _instance_key using the base class, i.e. (BaseClass, (1, ), None). That way when you call get() a Query against the base class, it can locate subclass instances in the current identity map without querying the database.

      Types

      Custom Subclasses of sqlalchemy.types.TypeDecorator

      There is a New API for subclassing a TypeDecorator. Using the 0.3 API causes compilation errors in some cases.

      SQL Expressions

      All New, Deterministic Label/Alias Generation

      All the “anonymous” labels and aliases use a simple <name>_<number> format now. SQL is much easier to read and is compatible with plan optimizer caches. Just check out some of the examples in the tutorials: http://www.sqlalchemy.org/docs/04/ormtutorial.html http://www.sqlalchemy.org/docs/04/sqlexpression.html

      Generative select() Constructs

      This is definitely the way to go with select(). See htt p://www.sqlalchemy.org/docs/04/sqlexpression.html#sql_transf orm .

      New Operator System

      SQL operators and more or less every SQL keyword there is are now abstracted into the compiler layer. They now act intelligently and are type/backend aware, see: http://www.sq lalchemy.org/docs/04/sqlexpression.html#sql_operators

      All type Keyword Arguments Renamed to type_

      Just like it says:

      b = bindparam('foo', type_=String)

      in_ Function Changed to Accept Sequence or Selectable

      The in_ function now takes a sequence of values or a selectable as its sole argument. The previous API of passing in values as positional arguments still works, but is now deprecated. This means that

      my_table.select(my_table.c.id.in_(1,2,3)
      my_table.select(my_table.c.id.in_(*listOfIds)

      should be changed to

      my_table.select(my_table.c.id.in_([1,2,3])
      my_table.select(my_table.c.id.in_(listOfIds)

      Schema and Reflection

      MetaData, BoundMetaData, DynamicMetaData...

      In the 0.3.x series, BoundMetaData and DynamicMetaData were deprecated in favor of MetaData and ThreadLocalMetaData. The older names have been removed in 0.4. Updating is simple:

      +-------------------------------------+-------------------------+
      |If You Had                           | Now Use                 |
      +=====================================+=========================+
      | ``MetaData``                        | ``MetaData``            |
      +-------------------------------------+-------------------------+
      | ``BoundMetaData``                   | ``MetaData``            |
      +-------------------------------------+-------------------------+
      | ``DynamicMetaData`` (with one       | ``MetaData``            |
      | engine or threadlocal=False)        |                         |
      +-------------------------------------+-------------------------+
      | ``DynamicMetaData``                 | ``ThreadLocalMetaData`` |
      | (with different engines per thread) |                         |
      +-------------------------------------+-------------------------+

      The seldom-used name parameter to MetaData types has been removed. The ThreadLocalMetaData constructor now takes no arguments. Both types can now be bound to an Engine or a single Connection.

      One Step Multi-Table Reflection

      You can now load table definitions and automatically create Table objects from an entire database or schema in one pass:

      >>> metadata = MetaData(myengine, reflect=True)
      >>> metadata.tables.keys()
      ['table_a', 'table_b', 'table_c', '...']

      MetaData also gains a .reflect() method enabling finer control over the loading process, including specification of a subset of available tables to load.

      SQL Execution

      engine, connectable, and bind_to are all now bind

      Transactions, NestedTransactions and TwoPhaseTransactions

      Connection Pool Events

      The connection pool now fires events when new DB-API connections are created, checked out and checked back into the pool. You can use these to execute session-scoped SQL setup statements on fresh connections, for example.

      Oracle Engine Fixed

      In 0.3.11, there were bugs in the Oracle Engine on how Primary Keys are handled. These bugs could cause programs that worked fine with other engines, such as sqlite, to fail when using the Oracle Engine. In 0.4, the Oracle Engine has been reworked, fixing these Primary Key problems.

      Out Parameters for Oracle

      result = engine.execute(text("begin foo(:x, :y, :z); end;", bindparams=[bindparam('x', Numeric), outparam('y', Numeric), outparam('z', Numeric)]), x=5)
      assert result.out_parameters == {'y':10, 'z':75}

      Connection-bound MetaData, Sessions

      MetaData and Session can be explicitly bound to a connection:

      conn = engine.connect()
      sess = create_session(bind=conn)

      Faster, More Foolproof ResultProxy Objects

      SQLAlchemy-0.8.4/doc/changelog/migration_05.html0000644000076500000240000016556412251147464022137 0ustar classicstaff00000000000000 What’s new in SQLAlchemy 0.5? — SQLAlchemy 0.8 Documentation

      SQLAlchemy 0.8 Documentation

      Release: 0.8.4 | Release Date: December 8, 2013

      What’s new in SQLAlchemy 0.5?

      About this Document

      This document describes changes between SQLAlchemy version 0.4, last released October 12, 2008, and SQLAlchemy version 0.5, last released January 16, 2010.

      Document date: August 4, 2009

      This guide documents API changes which affect users migrating their applications from the 0.4 series of SQLAlchemy to 0.5. It’s also recommended for those working from Essential SQLAlchemy, which only covers 0.4 and seems to even have some old 0.3isms in it. Note that SQLAlchemy 0.5 removes many behaviors which were deprecated throughout the span of the 0.4 series, and also deprecates more behaviors specific to 0.4.

      Major Documentation Changes

      Some sections of the documentation have been completely rewritten and can serve as an introduction to new ORM features. The Query and Session objects in particular have some distinct differences in API and behavior which fundamentally change many of the basic ways things are done, particularly with regards to constructing highly customized ORM queries and dealing with stale session state, commits and rollbacks.

      Deprecations Source

      Another source of information is documented within a series of unit tests illustrating up to date usages of some common Query patterns; this file can be viewed at [source:sqlalchemy/trunk/test/orm/test_deprecations.py].

      Requirements Changes

      • Python 2.4 or higher is required. The SQLAlchemy 0.4 line is the last version with Python 2.3 support.

      Object Relational Mapping

      • Column level expressions within Query. - as detailed in the tutorial, Query has the capability to create specific SELECT statements, not just those against full rows:

        session.query(User.name, func.count(Address.id).label("numaddresses")).join(Address).group_by(User.name)

        The tuples returned by any multi-column/entity query are named‘ tuples:

        for row in session.query(User.name, func.count(Address.id).label('numaddresses')).join(Address).group_by(User.name):
           print "name", row.name, "number", row.numaddresses

        Query has a statement accessor, as well as a subquery() method which allow Query to be used to create more complex combinations:

        subq = session.query(Keyword.id.label('keyword_id')).filter(Keyword.name.in_(['beans', 'carrots'])).subquery()
        recipes = session.query(Recipe).filter(exists().
           where(Recipe.id==recipe_keywords.c.recipe_id).
           where(recipe_keywords.c.keyword_id==subq.c.keyword_id)
        )
      • Explicit ORM aliases are recommended for aliased joins - The aliased() function produces an “alias” of a class, which allows fine-grained control of aliases in conjunction with ORM queries. While a table-level alias (i.e. table.alias()) is still usable, an ORM level alias retains the semantics of the ORM mapped object which is significant for inheritance mappings, options, and other scenarios. E.g.:

        Friend = aliased(Person)
        session.query(Person, Friend).join((Friend, Person.friends)).all()
      • query.join() greatly enhanced. - You can now specify the target and ON clause for a join in multiple ways. A target class alone can be provided where SQLA will attempt to form a join to it via foreign key in the same way as table.join(someothertable). A target and an explicit ON condition can be provided, where the ON condition can be a relation() name, an actual class descriptor, or a SQL expression. Or the old way of just a relation() name or class descriptor works too. See the ORM tutorial which has several examples.

      • Declarative is recommended for applications which don’t require (and don’t prefer) abstraction between tables and mappers - The [/docs/05/reference/ext/declarative.html Declarative] module, which is used to combine the expression of Table, mapper(), and user defined class objects together, is highly recommended as it simplifies application configuration, ensures the “one mapper per class” pattern, and allows the full range of configuration available to distinct mapper() calls. Separate mapper() and Table usage is now referred to as “classical SQLAlchemy usage” and of course is freely mixable with declarative.

      • The .c. attribute has been removed from classes (i.e. MyClass.c.somecolumn). As is the case in 0.4, class- level properties are usable as query elements, i.e. Class.c.propname is now superseded by Class.propname, and the c attribute continues to remain on Table objects where they indicate the namespace of Column objects present on the table.

        To get at the Table for a mapped class (if you didn’t keep it around already):

        table = class_mapper(someclass).mapped_table

        Iterate through columns:

        for col in table.c:
            print col

        Work with a specific column:

        table.c.somecolumn

        The class-bound descriptors support the full set of Column operators as well as the documented relation-oriented operators like has(), any(), contains(), etc.

        The reason for the hard removal of .c. is that in 0.5, class-bound descriptors carry potentially different meaning, as well as information regarding class mappings, versus plain Column objects - and there are use cases where you’d specifically want to use one or the other. Generally, using class-bound descriptors invokes a set of mapping/polymorphic aware translations, and using table- bound columns does not. In 0.4, these translations were applied across the board to all expressions, but 0.5 differentiates completely between columns and mapped descriptors, only applying translations to the latter. So in many cases, particularly when dealing with joined table inheritance configurations as well as when using query(<columns>), Class.propname and table.c.colname are not interchangeable.

        For example, session.query(users.c.id, users.c.name) is different versus session.query(User.id, User.name); in the latter case, the Query is aware of the mapper in use and further mapper-specific operations like query.join(<propname>), query.with_parent() etc. may be used, but in the former case cannot. Additionally, in polymorphic inheritance scenarios, the class-bound descriptors refer to the columns present in the polymorphic selectable in use, not necessarily the table column which directly corresponds to the descriptor. For example, a set of classes related by joined-table inheritance to the person table along the person_id column of each table will all have their Class.person_id attribute mapped to the person_id column in person, and not their subclass table. Version 0.4 would map this behavior onto table-bound Column objects automatically. In 0.5, this automatic conversion has been removed, so that you in fact can use table-bound columns as a means to override the translations which occur with polymorphic querying; this allows Query to be able to create optimized selects among joined-table or concrete-table inheritance setups, as well as portable subqueries, etc.

      • Session Now Synchronizes Automatically with Transactions. Session now synchronizes against the transaction automatically by default, including autoflush and autoexpire. A transaction is present at all times unless disabled using the autocommit option. When all three flags are set to their default, the Session recovers gracefully after rollbacks and it’s very difficult to get stale data into the session. See the new Session documentation for details.

      • Implicit Order By Is Removed. This will impact ORM users who rely upon SA’s “implicit ordering” behavior, which states that all Query objects which don’t have an order_by() will ORDER BY the “id” or “oid” column of the primary mapped table, and all lazy/eagerly loaded collections apply a similar ordering. In 0.5, automatic ordering must be explicitly configured on mapper() and relation() objects (if desired), or otherwise when using Query.

        To convert an 0.4 mapping to 0.5, such that its ordering behavior will be extremely similar to 0.4 or previous, use the order_by setting on mapper() and relation():

        mapper(User, users, properties={
            'addresses':relation(Address, order_by=addresses.c.id)
        }, order_by=users.c.id)

        To set ordering on a backref, use the backref() function:

        'keywords':relation(Keyword, secondary=item_keywords,
              order_by=keywords.c.name, backref=backref('items', order_by=items.c.id))

        Using declarative ? To help with the new order_by requirement, order_by and friends can now be set using strings which are evaluated in Python later on (this works only with declarative, not plain mappers):

        class MyClass(MyDeclarativeBase):
            ...
            'addresses':relation("Address", order_by="Address.id")

        It’s generally a good idea to set order_by on relation()s which load list-based collections of items, since that ordering cannot otherwise be affected. Other than that, the best practice is to use Query.order_by() to control ordering of the primary entities being loaded.

      • Session is now autoflush=True/autoexpire=True/autocommit=False. - To set it up, just call sessionmaker() with no arguments. The name transactional=True is now autocommit=False. Flushes occur upon each query issued (disable with autoflush=False), within each commit() (as always), and before each begin_nested() (so rolling back to the SAVEPOINT is meaningful). All objects are expired after each commit() and after each rollback(). After rollback, pending objects are expunged, deleted objects move back to persistent. These defaults work together very nicely and there’s really no more need for old techniques like clear() (which is renamed to expunge_all() as well).

        P.S.: sessions are now reusable after a rollback(). Scalar and collection attribute changes, adds and deletes are all rolled back.

      • session.add() replaces session.save(), session.update(), session.save_or_update(). - the session.add(someitem) and session.add_all([list of items]) methods replace save(), update(), and save_or_update(). Those methods will remain deprecated throughout 0.5.

      • backref configuration made less verbose. - The backref() function now uses the primaryjoin and secondaryjoin arguments of the forwards-facing relation() when they are not explicitly stated. It’s no longer necessary to specify primaryjoin/secondaryjoin in both directions separately.

      • Simplified polymorphic options. - The ORM’s “polymorphic load” behavior has been simplified. In 0.4, mapper() had an argument called polymorphic_fetch which could be configured as select or deferred. This option is removed; the mapper will now just defer any columns which were not present in the SELECT statement. The actual SELECT statement used is controlled by the with_polymorphic mapper argument (which is also in 0.4 and replaces select_table), as well as the with_polymorphic() method on Query (also in 0.4).

        An improvement to the deferred loading of inheriting classes is that the mapper now produces the “optimized” version of the SELECT statement in all cases; that is, if class B inherits from A, and several attributes only present on class B have been expired, the refresh operation will only include B’s table in the SELECT statement and will not JOIN to A.

      • The execute() method on Session converts plain strings into text() constructs, so that bind parameters may all be specified as ”:bindname” without needing to call text() explicitly. If “raw” SQL is desired here, use session.connection().execute("raw text").

      • session.Query().iterate_instances() has been renamed to just instances(). The old instances() method returning a list instead of an iterator no longer exists. If you were relying on that behavior, you should use list(your_query.instances()).

      Extending the ORM

      In 0.5 we’re moving forward with more ways to modify and extend the ORM. Heres a summary:

      • MapperExtension. - This is the classic extension class, which remains. Methods which should rarely be needed are create_instance() and populate_instance(). To control the initialization of an object when it’s loaded from the database, use the reconstruct_instance() method, or more easily the @reconstructor decorator described in the documentation.
      • SessionExtension. - This is an easy to use extension class for session events. In particular, it provides before_flush(), after_flush() and after_flush_postexec() methods. It’s usage is recommended over MapperExtension.before_XXX in many cases since within before_flush() you can modify the flush plan of the session freely, something which cannot be done from within MapperExtension.
      • AttributeExtension. - This class is now part of the public API, and allows the interception of userland events on attributes, including attribute set and delete operations, and collection appends and removes. It also allows the value to be set or appended to be modified. The @validates decorator, described in the documentation, provides a quick way to mark any mapped attributes as being “validated” by a particular class method.
      • Attribute Instrumentation Customization. - An API is provided for ambitious efforts to entirely replace SQLAlchemy’s attribute instrumentation, or just to augment it in some cases. This API was produced for the purposes of the Trellis toolkit, but is available as a public API. Some examples are provided in the distribution in the /examples/custom_attributes directory.

      Schema/Types

      • String with no length no longer generates TEXT, it generates VARCHAR - The String type no longer magically converts into a Text type when specified with no length. This only has an effect when CREATE TABLE is issued, as it will issue VARCHAR with no length parameter, which is not valid on many (but not all) databases. To create a TEXT (or CLOB, i.e. unbounded string) column, use the Text type.

      • PickleType() with mutable=True requires an __eq__() method - The PickleType type needs to compare values when mutable=True. The method of comparing pickle.dumps() is inefficient and unreliable. If an incoming object does not implement __eq__() and is also not None, the dumps() comparison is used but a warning is raised. For types which implement __eq__() which includes all dictionaries, lists, etc., comparison will use == and is now reliable by default.

      • convert_bind_param() and convert_result_value() methods of TypeEngine/TypeDecorator are removed. - The O’Reilly book unfortunately documented these methods even though they were deprecated post 0.3. For a user-defined type which subclasses TypeEngine, the bind_processor() and result_processor() methods should be used for bind/result processing. Any user defined type, whether extending TypeEngine or TypeDecorator, which uses the old 0.3 style can be easily adapted to the new style using the following adapter:

        class AdaptOldConvertMethods(object):
            """A mixin which adapts 0.3-style convert_bind_param and
            convert_result_value methods
        
            """
            def bind_processor(self, dialect):
                def convert(value):
                    return self.convert_bind_param(value, dialect)
                return convert
        
            def result_processor(self, dialect):
                def convert(value):
                    return self.convert_result_value(value, dialect)
                return convert
        
            def convert_result_value(self, value, dialect):
                return value
        
            def convert_bind_param(self, value, dialect):
                return value

        To use the above mixin:

        class MyType(AdaptOldConvertMethods, TypeEngine):
           # ...
      • The quote flag on Column and Table as well as the quote_schema flag on Table now control quoting both positively and negatively. The default is None, meaning let regular quoting rules take effect. When True, quoting is forced on. When False, quoting is forced off.

      • Column DEFAULT value DDL can now be more conveniently specified with Column(..., server_default='val'), deprecating Column(..., PassiveDefault('val')). default= is now exclusively for Python-initiated default values, and can coexist with server_default. A new server_default=FetchedValue() replaces the PassiveDefault('') idiom for marking columns as subject to influence from external triggers and has no DDL side effects.

      • SQLite’s DateTime, Time and Date types now only accept datetime objects, not strings as bind parameter input. If you’d like to create your own “hybrid” type which accepts strings and returns results as date objects (from whatever format you’d like), create a TypeDecorator that builds on String. If you only want string-based dates, just use String.

      • Additionally, the DateTime and Time types, when used with SQLite, now represent the “microseconds” field of the Python datetime.datetime object in the same manner as str(datetime) - as fractional seconds, not a count of microseconds. That is:

        dt = datetime.datetime(2008, 6, 27, 12, 0, 0, 125)  # 125 usec
        
        # old way
        '2008-06-27 12:00:00.125'
        
        # new way
        '2008-06-27 12:00:00.000125'

        So if an existing SQLite file-based database intends to be used across 0.4 and 0.5, you either have to upgrade the datetime columns to store the new format (NOTE: please test this, I’m pretty sure its correct):

        UPDATE mytable SET somedatecol =
          substr(somedatecol, 0, 19) || '.' || substr((substr(somedatecol, 21, -1) / 1000000), 3, -1);

        or, enable “legacy” mode as follows:

        from sqlalchemy.databases.sqlite import DateTimeMixin
        DateTimeMixin.__legacy_microseconds__ = True

      Connection Pool no longer threadlocal by default

      0.4 has an unfortunate default setting of “pool_threadlocal=True”, leading to surprise behavior when, for example, using multiple Sessions within a single thread. This flag is now off in 0.5. To re-enable 0.4’s behavior, specify pool_threadlocal=True to create_engine(), or alternatively use the “threadlocal” strategy via strategy="threadlocal".

      *args Accepted, *args No Longer Accepted

      The policy with method(\*args) vs. method([args]) is, if the method accepts a variable-length set of items which represent a fixed structure, it takes \*args. If the method accepts a variable-length set of items that are data-driven, it takes [args].

      • The various Query.options() functions eagerload(), eagerload_all(), lazyload(), contains_eager(), defer(), undefer() all accept variable-length \*keys as their argument now, which allows a path to be formulated using descriptors, ie.:

        query.options(eagerload_all(User.orders, Order.items, Item.keywords))

        A single array argument is still accepted for backwards compatibility.

      • Similarly, the Query.join() and Query.outerjoin() methods accept a variable length *args, with a single array accepted for backwards compatibility:

        query.join('orders', 'items')
        query.join(User.orders, Order.items)
      • the in_() method on columns and similar only accepts a list argument now. It no longer accepts \*args.

      Removed

      • entity_name - This feature was always problematic and rarely used. 0.5’s more deeply fleshed out use cases revealed further issues with entity_name which led to its removal. If different mappings are required for a single class, break the class into separate subclasses and map them separately. An example of this is at [wiki:UsageRecipes/EntityName]. More information regarding rationale is described at http://groups.google.c om/group/sqlalchemy/browse_thread/thread/9e23a0641a88b96d? hl=en .

      • get()/load() cleanup

        The load() method has been removed. It’s functionality was kind of arbitrary and basically copied from Hibernate, where it’s also not a particularly meaningful method.

        To get equivalent functionality:

        x = session.query(SomeClass).populate_existing().get(7)

        Session.get(cls, id) and Session.load(cls, id) have been removed. Session.get() is redundant vs. session.query(cls).get(id).

        MapperExtension.get() is also removed (as is MapperExtension.load()). To override the functionality of Query.get(), use a subclass:

        class MyQuery(Query):
            def get(self, ident):
                # ...
        
        session = sessionmaker(query_cls=MyQuery)()
        
        ad1 = session.query(Address).get(1)
      • sqlalchemy.orm.relation()

        The following deprecated keyword arguments have been removed:

        foreignkey, association, private, attributeext, is_backref

        In particular, attributeext is replaced with extension - the AttributeExtension class is now in the public API.

      • session.Query()

        The following deprecated functions have been removed:

        list, scalar, count_by, select_whereclause, get_by, select_by, join_by, selectfirst, selectone, select, execute, select_statement, select_text, join_to, join_via, selectfirst_by, selectone_by, apply_max, apply_min, apply_avg, apply_sum

        Additionally, the id keyword argument to join(), outerjoin(), add_entity() and add_column() has been removed. To target table aliases in Query to result columns, use the aliased construct:

        from sqlalchemy.orm import aliased
        address_alias = aliased(Address)
        print session.query(User, address_alias).join((address_alias, User.addresses)).all()
      • sqlalchemy.orm.Mapper

        • instances()
        • get_session() - this method was not very noticeable, but had the effect of associating lazy loads with a particular session even if the parent object was entirely detached, when an extension such as scoped_session() or the old SessionContextExt was used. It’s possible that some applications which relied upon this behavior will no longer work as expected; but the better programming practice here is to always ensure objects are present within sessions if database access from their attributes are required.
      • mapper(MyClass, mytable)

        Mapped classes no are longer instrumented with a “c” class attribute; e.g. MyClass.c

      • sqlalchemy.orm.collections

        The _prepare_instrumentation alias for prepare_instrumentation has been removed.

      • sqlalchemy.orm

        Removed the EXT_PASS alias of EXT_CONTINUE.

      • sqlalchemy.engine

        The alias from DefaultDialect.preexecute_sequences to .preexecute_pk_sequences has been removed.

        The deprecated engine_descriptors() function has been removed.

      • sqlalchemy.ext.activemapper

        Module removed.

      • sqlalchemy.ext.assignmapper

        Module removed.

      • sqlalchemy.ext.associationproxy

        Pass-through of keyword args on the proxy’s .append(item, \**kw) has been removed and is now simply .append(item)

      • sqlalchemy.ext.selectresults, sqlalchemy.mods.selectresults

        Modules removed.

      • sqlalchemy.ext.declarative

        declared_synonym() removed.

      • sqlalchemy.ext.sessioncontext

        Module removed.

      • sqlalchemy.log

        The SADeprecationWarning alias to sqlalchemy.exc.SADeprecationWarning has been removed.

      • sqlalchemy.exc

        exc.AssertionError has been removed and usage replaced by the Python built-in of the same name.

      • sqlalchemy.databases.mysql

        The deprecated get_version_info dialect method has been removed.

      Renamed or Moved

      • sqlalchemy.exceptions is now sqlalchemy.exc

        The module may still be imported under the old name until 0.6.

      • FlushError, ConcurrentModificationError, UnmappedColumnError -> sqlalchemy.orm.exc

        These exceptions moved to the orm package. Importing ‘sqlalchemy.orm’ will install aliases in sqlalchemy.exc for compatibility until 0.6.

      • sqlalchemy.logging -> sqlalchemy.log

        This internal module was renamed. No longer needs to be special cased when packaging SA with py2app and similar tools that scan imports.

      • session.Query().iterate_instances() -> session.Query().instances().

      Deprecated

      • Session.save(), Session.update(), Session.save_or_update()

        All three replaced by Session.add()

      • sqlalchemy.PassiveDefault

        Use Column(server_default=...) Translates to sqlalchemy.DefaultClause() under the hood.

      • session.Query().iterate_instances(). It has been renamed to instances().

      SQLAlchemy-0.8.4/doc/changelog/migration_06.html0000644000076500000240000026177012251147464022134 0ustar classicstaff00000000000000 What’s New in SQLAlchemy 0.6? — SQLAlchemy 0.8 Documentation

      SQLAlchemy 0.8 Documentation

      Release: 0.8.4 | Release Date: December 8, 2013

      What’s New in SQLAlchemy 0.6?

      About this Document

      This document describes changes between SQLAlchemy version 0.5, last released January 16, 2010, and SQLAlchemy version 0.6, last released May 5, 2012.

      Document date: June 6, 2010

      This guide documents API changes which affect users migrating their applications from the 0.5 series of SQLAlchemy to 0.6. Note that SQLAlchemy 0.6 removes some behaviors which were deprecated throughout the span of the 0.5 series, and also deprecates more behaviors specific to 0.5.

      Platform Support

      • cPython versions 2.4 and upwards throughout the 2.xx series
      • Jython 2.5.1 - using the zxJDBC DBAPI included with Jython.
      • cPython 3.x - see [source:sqlalchemy/trunk/README.py3k] for information on how to build for python3.

      New Dialect System

      Dialect modules are now broken up into distinct subcomponents, within the scope of a single database backend. Dialect implementations are now in the sqlalchemy.dialects package. The sqlalchemy.databases package still exists as a placeholder to provide some level of backwards compatibility for simple imports.

      For each supported database, a sub-package exists within sqlalchemy.dialects where several files are contained. Each package contains a module called base.py which defines the specific SQL dialect used by that database. It also contains one or more “driver” modules, each one corresponding to a specific DBAPI - these files are named corresponding to the DBAPI itself, such as pysqlite, cx_oracle, or pyodbc. The classes used by SQLAlchemy dialects are first declared in the base.py module, defining all behavioral characteristics defined by the database. These include capability mappings, such as “supports sequences”, “supports returning”, etc., type definitions, and SQL compilation rules. Each “driver” module in turn provides subclasses of those classes as needed which override the default behavior to accommodate the additional features, behaviors, and quirks of that DBAPI. For DBAPIs that support multiple backends (pyodbc, zxJDBC, mxODBC), the dialect module will use mixins from the sqlalchemy.connectors package, which provide functionality common to that DBAPI across all backends, most typically dealing with connect arguments. This means that connecting using pyodbc, zxJDBC or mxODBC (when implemented) is extremely consistent across supported backends.

      The URL format used by create_engine() has been enhanced to handle any number of DBAPIs for a particular backend, using a scheme that is inspired by that of JDBC. The previous format still works, and will select a “default” DBAPI implementation, such as the Postgresql URL below that will use psycopg2:

      create_engine('postgresql://scott:tiger@localhost/test')

      However to specify a specific DBAPI backend such as pg8000, add it to the “protocol” section of the URL using a plus sign “+”:

      create_engine('postgresql+pg8000://scott:tiger@localhost/test')

      Important Dialect Links:

      Other notes regarding dialects:

      • the type system has been changed dramatically in SQLAlchemy 0.6. This has an impact on all dialects regarding naming conventions, behaviors, and implementations. See the section on “Types” below.
      • the ResultProxy object now offers a 2x speed improvement in some cases thanks to some refactorings.
      • the RowProxy, i.e. individual result row object, is now directly pickleable.
      • the setuptools entrypoint used to locate external dialects is now called sqlalchemy.dialects. An external dialect written against 0.4 or 0.5 will need to be modified to work with 0.6 in any case so this change does not add any additional difficulties.
      • dialects now receive an initialize() event on initial connection to determine connection properties.
      • Functions and operators generated by the compiler now use (almost) regular dispatch functions of the form “visit_<opname>” and “visit_<funcname>_fn” to provide customed processing. This replaces the need to copy the “functions” and “operators” dictionaries in compiler subclasses with straightforward visitor methods, and also allows compiler subclasses complete control over rendering, as the full _Function or _BinaryExpression object is passed in.

      Dialect Imports

      The import structure of dialects has changed. Each dialect now exports its base “dialect” class as well as the full set of SQL types supported on that dialect via sqlalchemy.dialects.<name>. For example, to import a set of PG types:

      from sqlalchemy.dialects.postgresql import INTEGER, BIGINT, SMALLINT,\
                                                  VARCHAR, MACADDR, DATE, BYTEA

      Above, INTEGER is actually the plain INTEGER type from sqlalchemy.types, but the PG dialect makes it available in the same way as those types which are specific to PG, such as BYTEA and MACADDR.

      Expression Language Changes

      An Important Expression Language Gotcha

      There’s one quite significant behavioral change to the expression language which may affect some applications. The boolean value of Python boolean expressions, i.e. ==, !=, and similar, now evaluates accurately with regards to the two clause objects being compared.

      As we know, comparing a ClauseElement to any other object returns another ClauseElement:

      >>> from sqlalchemy.sql import column
      >>> column('foo') == 5
      <sqlalchemy.sql.expression._BinaryExpression object at 0x1252490>

      This so that Python expressions produce SQL expressions when converted to strings:

      >>> str(column('foo') == 5)
      'foo = :foo_1'

      But what happens if we say this?

      >>> if column('foo') == 5:
      ...     print "yes"
      ...

      In previous versions of SQLAlchemy, the returned _BinaryExpression was a plain Python object which evaluated to True. Now it evaluates to whether or not the actual ClauseElement should have the same hash value as to that being compared. Meaning:

      >>> bool(column('foo') == 5)
      False
      >>> bool(column('foo') == column('foo'))
      False
      >>> c = column('foo')
      >>> bool(c == c)
      True
      >>>

      That means code such as the following:

      if expression:
          print "the expression is:", expression

      Would not evaluate if expression was a binary clause. Since the above pattern should never be used, the base ClauseElement now raises an exception if called in a boolean context:

      >>> bool(c)
      Traceback (most recent call last):
        File "<stdin>", line 1, in <module>
        ...
          raise TypeError("Boolean value of this clause is not defined")
      TypeError: Boolean value of this clause is not defined

      Code that wants to check for the presence of a ClauseElement expression should instead say:

      if expression is not None:
          print "the expression is:", expression

      Keep in mind, this applies to Table and Column objects too.

      The rationale for the change is twofold:

      • Comparisons of the form if c1 == c2:  <do something> can actually be written now
      • Support for correct hashing of ClauseElement objects now works on alternate platforms, namely Jython. Up until this point SQLAlchemy relied heavily on the specific behavior of cPython in this regard (and still had occasional problems with it).

      Stricter “executemany” Behavior

      An “executemany” in SQLAlchemy corresponds to a call to execute(), passing along a collection of bind parameter sets:

      connection.execute(table.insert(), {'data':'row1'}, {'data':'row2'}, {'data':'row3'})

      When the Connection object sends off the given insert() construct for compilation, it passes to the compiler the keynames present in the first set of binds passed along to determine the construction of the statement’s VALUES clause. Users familiar with this construct will know that additional keys present in the remaining dictionaries don’t have any impact. What’s different now is that all subsequent dictionaries need to include at least every key that is present in the first dictionary. This means that a call like this no longer works:

      connection.execute(table.insert(),
                              {'timestamp':today, 'data':'row1'},
                              {'timestamp':today, 'data':'row2'},
                              {'data':'row3'})

      Because the third row does not specify the ‘timestamp’ column. Previous versions of SQLAlchemy would simply insert NULL for these missing columns. However, if the timestamp column in the above example contained a Python-side default value or function, it would not be used. This because the “executemany” operation is optimized for maximum performance across huge numbers of parameter sets, and does not attempt to evaluate Python-side defaults for those missing keys. Because defaults are often implemented either as SQL expressions which are embedded inline with the INSERT statement, or are server side expressions which again are triggered based on the structure of the INSERT string, which by definition cannot fire off conditionally based on each parameter set, it would be inconsistent for Python side defaults to behave differently vs. SQL/server side defaults. (SQL expression based defaults are embedded inline as of the 0.5 series, again to minimize the impact of huge numbers of parameter sets).

      SQLAlchemy 0.6 therefore establishes predictable consistency by forbidding any subsequent parameter sets from leaving any fields blank. That way, there’s no more silent failure of Python side default values and functions, which additionally are allowed to remain consistent in their behavior versus SQL and server side defaults.

      UNION and other “compound” constructs parenthesize consistently

      A rule that was designed to help SQLite has been removed, that of the first compound element within another compound (such as, a union() inside of an except_()) wouldn’t be parenthesized. This is inconsistent and produces the wrong results on Postgresql, which has precedence rules regarding INTERSECTION, and its generally a surprise. When using complex composites with SQLite, you now need to turn the first element into a subquery (which is also compatible on PG). A new example is in the SQL expression tutorial at the end of [http://www.sqlalchemy.org/docs/06/sqlexpression.html #unions-and-other-set-operations]. See #1665 and r6690 for more background.

      C Extensions for Result Fetching

      The ResultProxy and related elements, including most common “row processing” functions such as unicode conversion, numerical/boolean conversions and date parsing, have been re-implemented as optional C extensions for the purposes of performance. This represents the beginning of SQLAlchemy’s path to the “dark side” where we hope to continue improving performance by reimplementing critical sections in C. The extensions can be built by specifying --with-cextensions, i.e. python setup.py --with- cextensions install.

      The extensions have the most dramatic impact on result fetching using direct ResultProxy access, i.e. that which is returned by engine.execute(), connection.execute(), or session.execute(). Within results returned by an ORM Query object, result fetching is not as high a percentage of overhead, so ORM performance improves more modestly, and mostly in the realm of fetching large result sets. The performance improvements highly depend on the dbapi in use and on the syntax used to access the columns of each row (eg row['name'] is much faster than row.name). The current extensions have no impact on the speed of inserts/updates/deletes, nor do they improve the latency of SQL execution, that is, an application that spends most of its time executing many statements with very small result sets will not see much improvement.

      Performance has been improved in 0.6 versus 0.5 regardless of the extensions. A quick overview of what connecting and fetching 50,000 rows looks like with SQLite, using mostly direct SQLite access, a ResultProxy, and a simple mapped ORM object:

      sqlite select/native: 0.260s
      
      0.6 / C extension
      
      sqlalchemy.sql select: 0.360s
      sqlalchemy.orm fetch: 2.500s
      
      0.6 / Pure Python
      
      sqlalchemy.sql select: 0.600s
      sqlalchemy.orm fetch: 3.000s
      
      0.5 / Pure Python
      
      sqlalchemy.sql select: 0.790s
      sqlalchemy.orm fetch: 4.030s

      Above, the ORM fetches the rows 33% faster than 0.5 due to in-python performance enhancements. With the C extensions we get another 20%. However, ResultProxy fetches improve by 67% with the C extension versus not. Other tests report as much as a 200% speed improvement for some scenarios, such as those where lots of string conversions are occurring.

      New Schema Capabilities

      The sqlalchemy.schema package has received some long- needed attention. The most visible change is the newly expanded DDL system. In SQLAlchemy, it was possible since version 0.5 to create custom DDL strings and associate them with tables or metadata objects:

      from sqlalchemy.schema import DDL
      
      DDL('CREATE TRIGGER users_trigger ...').execute_at('after-create', metadata)

      Now the full suite of DDL constructs are available under the same system, including those for CREATE TABLE, ADD CONSTRAINT, etc.:

      from sqlalchemy.schema import Constraint, AddConstraint
      
      AddContraint(CheckConstraint("value > 5")).execute_at('after-create', mytable)

      Additionally, all the DDL objects are now regular ClauseElement objects just like any other SQLAlchemy expression object:

      from sqlalchemy.schema import CreateTable
      
      create = CreateTable(mytable)
      
      # dumps the CREATE TABLE as a string
      print create
      
      # executes the CREATE TABLE statement
      engine.execute(create)

      and using the sqlalchemy.ext.compiler extension you can make your own:

      from sqlalchemy.schema import DDLElement
      from sqlalchemy.ext.compiler import compiles
      
      class AlterColumn(DDLElement):
      
          def __init__(self, column, cmd):
              self.column = column
              self.cmd = cmd
      
      @compiles(AlterColumn)
      def visit_alter_column(element, compiler, **kw):
          return "ALTER TABLE %s ALTER COLUMN %s %s ..." % (
              element.column.table.name,
              element.column.name,
              element.cmd
          )
      
      engine.execute(AlterColumn(table.c.mycolumn, "SET DEFAULT 'test'"))

      Deprecated/Removed Schema Elements

      The schema package has also been greatly streamlined. Many options and methods which were deprecated throughout 0.5 have been removed. Other little known accessors and methods have also been removed.

      • the “owner” keyword argument is removed from Table. Use “schema” to represent any namespaces to be prepended to the table name.
      • deprecated MetaData.connect() and ThreadLocalMetaData.connect() have been removed - send the “bind” attribute to bind a metadata.
      • deprecated metadata.table_iterator() method removed (use sorted_tables)
      • the “metadata” argument is removed from DefaultGenerator and subclasses, but remains locally present on Sequence, which is a standalone construct in DDL.
      • deprecated PassiveDefault - use DefaultClause.
      • Removed public mutability from Index and Constraint objects:
        • ForeignKeyConstraint.append_element()
        • Index.append_column()
        • UniqueConstraint.append_column()
        • PrimaryKeyConstraint.add()
        • PrimaryKeyConstraint.remove()

      These should be constructed declaratively (i.e. in one construction).

      • Other removed things:
        • Table.key (no idea what this was for)
        • Column.bind (get via column.table.bind)
        • Column.metadata (get via column.table.metadata)
        • Column.sequence (use column.default)

      Other Behavioral Changes

      • UniqueConstraint, Index, PrimaryKeyConstraint all accept lists of column names or column objects as arguments.
      • The use_alter flag on ForeignKey is now a shortcut option for operations that can be hand-constructed using the DDL() event system. A side effect of this refactor is that ForeignKeyConstraint objects with use_alter=True will not be emitted on SQLite, which does not support ALTER for foreign keys. This has no effect on SQLite’s behavior since SQLite does not actually honor FOREIGN KEY constraints.
      • Table.primary_key is not assignable - use table.append_constraint(PrimaryKeyConstraint(...))
      • A Column definition with a ForeignKey and no type, e.g. Column(name, ForeignKey(sometable.c.somecol)) used to get the type of the referenced column. Now support for that automatic type inference is partial and may not work in all cases.

      Logging opened up

      At the expense of a few extra method calls here and there, you can set log levels for INFO and DEBUG after an engine, pool, or mapper has been created, and logging will commence. The isEnabledFor(INFO) method is now called per-Connection and isEnabledFor(DEBUG) per-ResultProxy if already enabled on the parent connection. Pool logging sends to log.info() and log.debug() with no check - note that pool checkout/checkin is typically once per transaction.

      Reflection/Inspector API

      The reflection system, which allows reflection of table columns via Table('sometable', metadata, autoload=True) has been opened up into its own fine-grained API, which allows direct inspection of database elements such as tables, columns, constraints, indexes, and more. This API expresses return values as simple lists of strings, dictionaries, and TypeEngine objects. The internals of autoload=True now build upon this system such that the translation of raw database information into sqlalchemy.schema constructs is centralized and the contract of individual dialects greatly simplified, vastly reducing bugs and inconsistencies across different backends.

      To use an inspector:

      from sqlalchemy.engine.reflection import Inspector
      insp = Inspector.from_engine(my_engine)
      
      print insp.get_schema_names()

      the from_engine() method will in some cases provide a backend-specific inspector with additional capabilities, such as that of Postgresql which provides a get_table_oid() method:

      my_engine = create_engine('postgresql://...')
      pg_insp = Inspector.from_engine(my_engine)
      
      print pg_insp.get_table_oid('my_table')

      RETURNING Support

      The insert(), update() and delete() constructs now support a returning() method, which corresponds to the SQL RETURNING clause as supported by Postgresql, Oracle, MS-SQL, and Firebird. It is not supported for any other backend at this time.

      Given a list of column expressions in the same manner as that of a select() construct, the values of these columns will be returned as a regular result set:

      result = connection.execute(
                  table.insert().values(data='some data').returning(table.c.id, table.c.timestamp)
              )
      row = result.first()
      print "ID:", row['id'], "Timestamp:", row['timestamp']

      The implementation of RETURNING across the four supported backends varies wildly, in the case of Oracle requiring an intricate usage of OUT parameters which are re-routed into a “mock” result set, and in the case of MS-SQL using an awkward SQL syntax. The usage of RETURNING is subject to limitations:

      • it does not work for any “executemany()” style of execution. This is a limitation of all supported DBAPIs.
      • Some backends, such as Oracle, only support RETURNING that returns a single row - this includes UPDATE and DELETE statements, meaning the update() or delete() construct must match only a single row, or an error is raised (by Oracle, not SQLAlchemy).

      RETURNING is also used automatically by SQLAlchemy, when available and when not otherwise specified by an explicit returning() call, to fetch the value of newly generated primary key values for single-row INSERT statements. This means there’s no more “SELECT nextval(sequence)” pre- execution for insert statements where the primary key value is required. Truth be told, implicit RETURNING feature does incur more method overhead than the old “select nextval()” system, which used a quick and dirty cursor.execute() to get at the sequence value, and in the case of Oracle requires additional binding of out parameters. So if method/protocol overhead is proving to be more expensive than additional database round trips, the feature can be disabled by specifying implicit_returning=False to create_engine().

      Type System Changes

      New Archicture

      The type system has been completely reworked behind the scenes to provide two goals:

      • Separate the handling of bind parameters and result row values, typically a DBAPI requirement, from the SQL specification of the type itself, which is a database requirement. This is consistent with the overall dialect refactor that separates database SQL behavior from DBAPI.
      • Establish a clear and consistent contract for generating DDL from a TypeEngine object and for constructing TypeEngine objects based on column reflection.

      Highlights of these changes include:

      • The construction of types within dialects has been totally overhauled. Dialects now define publically available types as UPPERCASE names exclusively, and internal implementation types using underscore identifiers (i.e. are private). The system by which types are expressed in SQL and DDL has been moved to the compiler system. This has the effect that there are much fewer type objects within most dialects. A detailed document on this architecture for dialect authors is in [source:/lib/sqlalc hemy/dialects/type_migration_guidelines.txt].
      • Reflection of types now returns the exact UPPERCASE type within types.py, or the UPPERCASE type within the dialect itself if the type is not a standard SQL type. This means reflection now returns more accurate information about reflected types.
      • User defined types that subclass TypeEngine and wish to provide get_col_spec() should now subclass UserDefinedType.
      • The result_processor() method on all type classes now accepts an additional argument coltype. This is the DBAPI type object attached to cursor.description, and should be used when applicable to make better decisions on what kind of result-processing callable should be returned. Ideally result processor functions would never need to use isinstance(), which is an expensive call at this level.

      Native Unicode Mode

      As more DBAPIs support returning Python unicode objects directly, the base dialect now performs a check upon the first connection which establishes whether or not the DBAPI returns a Python unicode object for a basic select of a VARCHAR value. If so, the String type and all subclasses (i.e. Text, Unicode, etc.) will skip the “unicode” check/conversion step when result rows are received. This offers a dramatic performance increase for large result sets. The “unicode mode” currently is known to work with:

      • sqlite3 / pysqlite
      • psycopg2 - SQLA 0.6 now uses the “UNICODE” type extension by default on each psycopg2 connection object
      • pg8000
      • cx_oracle (we use an output processor - nice feature !)

      Other types may choose to disable unicode processing as needed, such as the NVARCHAR type when used with MS-SQL.

      In particular, if porting an application based on a DBAPI that formerly returned non-unicode strings, the “native unicode” mode has a plainly different default behavior - columns that are declared as String or VARCHAR now return unicode by default whereas they would return strings before. This can break code which expects non-unicode strings. The psycopg2 “native unicode” mode can be disabled by passing use_native_unicode=False to create_engine().

      A more general solution for string columns that explicitly do not want a unicode object is to use a TypeDecorator that converts unicode back to utf-8, or whatever is desired:

      class UTF8Encoded(TypeDecorator):
          """Unicode type which coerces to utf-8."""
      
          impl = sa.VARCHAR
      
          def process_result_value(self, value, dialect):
              if isinstance(value, unicode):
                  value = value.encode('utf-8')
              return value

      Note that the assert_unicode flag is now deprecated. SQLAlchemy allows the DBAPI and backend database in use to handle Unicode parameters when available, and does not add operational overhead by checking the incoming type; modern systems like sqlite and Postgresql will raise an encoding error on their end if invalid data is passed. In those cases where SQLAlchemy does need to coerce a bind parameter from Python Unicode to an encoded string, or when the Unicode type is used explicitly, a warning is raised if the object is a bytestring. This warning can be suppressed or converted to an exception using the Python warnings filter documented at: http://docs.python.org/library/warnings.html

      Generic Enum Type

      We now have an Enum in the types module. This is a string type that is given a collection of “labels” which constrain the possible values given to those labels. By default, this type generates a VARCHAR using the size of the largest label, and applies a CHECK constraint to the table within the CREATE TABLE statement. When using MySQL, the type by default uses MySQL’s ENUM type, and when using Postgresql the type will generate a user defined type using CREATE TYPE <mytype> AS ENUM. In order to create the type using Postgresql, the name parameter must be specified to the constructor. The type also accepts a native_enum=False option which will issue the VARCHAR/CHECK strategy for all databases. Note that Postgresql ENUM types currently don’t work with pg8000 or zxjdbc.

      Reflection Returns Dialect-Specific Types

      Reflection now returns the most specific type possible from the database. That is, if you create a table using String, then reflect it back, the reflected column will likely be VARCHAR. For dialects that support a more specific form of the type, that’s what you’ll get. So a Text type would come back as oracle.CLOB on Oracle, a LargeBinary might be an mysql.MEDIUMBLOB etc. The obvious advantage here is that reflection preserves as much information possible from what the database had to say.

      Some applications that deal heavily in table metadata may wish to compare types across reflected tables and/or non- reflected tables. There’s a semi-private accessor available on TypeEngine called _type_affinity and an associated comparison helper _compare_type_affinity. This accessor returns the “generic” types class which the type corresponds to:

      >>> String(50)._compare_type_affinity(postgresql.VARCHAR(50))
      True
      >>> Integer()._compare_type_affinity(mysql.REAL)
      False

      Miscellaneous API Changes

      The usual “generic” types are still the general system in use, i.e. String, Float, DateTime. There’s a few changes there:

      • Types no longer make any guesses as to default parameters. In particular, Numeric, Float, as well as subclasses NUMERIC, FLOAT, DECIMAL don’t generate any length or scale unless specified. This also continues to include the controversial String and VARCHAR types (although MySQL dialect will pre-emptively raise when asked to render VARCHAR with no length). No defaults are assumed, and if they are used in a CREATE TABLE statement, an error will be raised if the underlying database does not allow non-lengthed versions of these types.
      • the Binary type has been renamed to LargeBinary, for BLOB/BYTEA/similar types. For BINARY and VARBINARY, those are present directly as types.BINARY, types.VARBINARY, as well as in the MySQL and MS-SQL dialects.
      • PickleType now uses == for comparison of values when mutable=True, unless the “comparator” argument with a comparison function is specified to the type. If you are pickling a custom object you should implement an __eq__() method so that value-based comparisons are accurate.
      • The default “precision” and “scale” arguments of Numeric and Float have been removed and now default to None. NUMERIC and FLOAT will be rendered with no numeric arguments by default unless these values are provided.
      • DATE, TIME and DATETIME types on SQLite can now take optional “storage_format” and “regexp” argument. “storage_format” can be used to store those types using a custom string format. “regexp” allows to use a custom regular expression to match string values from the database.
      • __legacy_microseconds__ on SQLite Time and DateTime types is not supported anymore. You should use the new “storage_format” argument instead.
      • DateTime types on SQLite now use by a default a stricter regular expression to match strings from the database. Use the new “regexp” argument if you are using data stored in a legacy format.

      ORM Changes

      Upgrading an ORM application from 0.5 to 0.6 should require little to no changes, as the ORM’s behavior remains almost identical. There are some default argument and name changes, and some loading behaviors have been improved.

      New Unit of Work

      The internals for the unit of work, primarily topological.py and unitofwork.py, have been completely rewritten and are vastly simplified. This should have no impact on usage, as all existing behavior during flush has been maintained exactly (or at least, as far as it is exercised by our testsuite and the handful of production environments which have tested it heavily). The performance of flush() now uses 20-30% fewer method calls and should also use less memory. The intent and flow of the source code should now be reasonably easy to follow, and the architecture of the flush is fairly open-ended at this point, creating room for potential new areas of sophistication. The flush process no longer has any reliance on recursion so flush plans of arbitrary size and complexity can be flushed. Additionally, the mapper’s “save” process, which issues INSERT and UPDATE statements, now caches the “compiled” form of the two statements so that callcounts are further dramatically reduced with very large flushes.

      Any changes in behavior observed with flush versus earlier versions of 0.6 or 0.5 should be reported to us ASAP - we’ll make sure no functionality is lost.

      Changes to query.update() and query.delete()

      • the ‘expire’ option on query.update() has been renamed to ‘fetch’, thus matching that of query.delete()
      • query.update() and query.delete() both default to ‘evaluate’ for the synchronize strategy.
      • the ‘synchronize’ strategy for update() and delete() raises an error on failure. There is no implicit fallback onto “fetch”. Failure of evaluation is based on the structure of criteria, so success/failure is deterministic based on code structure.

      relation() is officially named relationship()

      This to solve the long running issue that “relation” means a “table or derived table” in relational algebra terms. The relation() name, which is less typing, will hang around for the foreseeable future so this change should be entirely painless.

      Subquery eager loading

      A new kind of eager loading is added called “subquery” loading. This is a load that emits a second SQL query immediately after the first which loads full collections for all the parents in the first query, joining upwards to the parent using INNER JOIN. Subquery loading is used simlarly to the current joined-eager loading, using the `subqueryload()`` and ``subqueryload_all()`` options as well as the ``lazy='subquery'`` setting on ``relationship()`. The subquery load is usually much more efficient for loading many larger collections as it uses INNER JOIN unconditionally and also doesn’t re-load parent rows.

      `eagerload()``, ``eagerload_all()`` is now ``joinedload()``, ``joinedload_all()`

      To make room for the new subquery load feature, the existing `eagerload()``/``eagerload_all()`` options are now superceded by ``joinedload()`` and ``joinedload_all()``. The old names will hang around for the foreseeable future just like ``relation()`.

      `lazy=False|None|True|'dynamic'`` now accepts ``lazy='noload'|'joined'|'subquery'|'select'|'dynamic'`

      Continuing on the theme of loader strategies opened up, the standard keywords for the `lazy`` option on ``relationship()`` are now ``select`` for lazy loading (via a SELECT issued on attribute access), ``joined`` for joined-eager loading, ``subquery`` for subquery-eager loading, ``noload`` for no loading should occur, and ``dynamic`` for a “dynamic” relationship. The old ``True``, ``False``, ``None` arguments are still accepted with the identical behavior as before.

      innerjoin=True on relation, joinedload

      Joined-eagerly loaded scalars and collections can now be instructed to use INNER JOIN instead of OUTER JOIN. On Postgresql this is observed to provide a 300-600% speedup on some queries. Set this flag for any many-to-one which is on a NOT NULLable foreign key, and similarly for any collection where related items are guaranteed to exist.

      At mapper level:

      mapper(Child, child)
      mapper(Parent, parent, properties={
          'child':relationship(Child, lazy='joined', innerjoin=True)
      })

      At query time level:

      session.query(Parent).options(joinedload(Parent.child, innerjoin=True)).all()

      The innerjoin=True flag at the relationship() level will also take effect for any joinedload() option which does not override the value.

      Many-to-one Enhancements

      • many-to-one relations now fire off a lazyload in fewer cases, including in most cases will not fetch the “old” value when a new one is replaced.

      • many-to-one relation to a joined-table subclass now uses get() for a simple load (known as the “use_get” condition), i.e. Related->``Sub(Base)``, without the need to redefine the primaryjoin condition in terms of the base table. [ticket:1186]

      • specifying a foreign key with a declarative column, i.e. ForeignKey(MyRelatedClass.id) doesn’t break the “use_get” condition from taking place [ticket:1492]

      • relationship(), joinedload(), and joinedload_all() now feature an option called “innerjoin”. Specify True or False to control whether an eager join is constructed as an INNER or OUTER join. Default is False as always. The mapper options will override whichever setting is specified on relationship(). Should generally be set for many-to-one, not nullable foreign key relations to allow improved join performance. [ticket:1544]

      • the behavior of joined eager loading such that the main query is wrapped in a subquery when LIMIT/OFFSET are present now makes an exception for the case when all eager loads are many-to-one joins. In those cases, the eager joins are against the parent table directly along with the limit/offset without the extra overhead of a subquery, since a many-to-one join does not add rows to the result.

        For example, in 0.5 this query:

        session.query(Address).options(eagerload(Address.user)).limit(10)

        would produce SQL like:

        SELECT * FROM
          (SELECT * FROM addresses LIMIT 10) AS anon_1
          LEFT OUTER JOIN users AS users_1 ON users_1.id = anon_1.addresses_user_id

        This because the presence of any eager loaders suggests that some or all of them may relate to multi-row collections, which would necessitate wrapping any kind of rowcount-sensitive modifiers like LIMIT inside of a subquery.

        In 0.6, that logic is more sensitive and can detect if all eager loaders represent many-to-ones, in which case the eager joins don’t affect the rowcount:

        SELECT * FROM addresses LEFT OUTER JOIN users AS users_1 ON users_1.id = addresses.user_id LIMIT 10

      Mutable Primary Keys with Joined Table Inheritance

      A joined table inheritance config where the child table has a PK that foreign keys to the parent PK can now be updated on a CASCADE-capable database like Postgresql. mapper() now has an option passive_updates=True which indicates this foreign key is updated automatically. If on a non-cascading database like SQLite or MySQL/MyISAM, set this flag to False. A future feature enhancement will try to get this flag to be auto-configuring based on dialect/table style in use.

      Beaker Caching

      A promising new example of Beaker integration is in examples/beaker_caching. This is a straightforward recipe which applies a Beaker cache within the result- generation engine of Query. Cache parameters are provided via query.options(), and allows full control over the contents of the cache. SQLAlchemy 0.6 includes improvements to the Session.merge() method to support this and similar recipes, as well as to provide significantly improved performance in most scenarios.

      Other Changes

      • the “row tuple” object returned by Query when multiple column/entities are selected is now picklable as well as higher performing.
      • query.join() has been reworked to provide more consistent behavior and more flexibility (includes [ticket:1537])
      • query.select_from() accepts multiple clauses to produce multiple comma separated entries within the FROM clause. Useful when selecting from multiple-homed join() clauses.
      • the “dont_load=True” flag on Session.merge() is deprecated and is now “load=False”.
      • added “make_transient()” helper function which transforms a persistent/ detached instance into a transient one (i.e. deletes the instance_key and removes from any session.) [ticket:1052]
      • the allow_null_pks flag on mapper() is deprecated and has been renamed to allow_partial_pks. It is turned “on” by default. This means that a row which has a non-null value for any of its primary key columns will be considered an identity. The need for this scenario typically only occurs when mapping to an outer join. When set to False, a PK that has NULLs in it will not be considered a primary key - in particular this means a result row will come back as None (or not be filled into a collection), and new in 0.6 also indicates that session.merge() won’t issue a round trip to the database for such a PK value. [ticket:1680]
      • the mechanics of “backref” have been fully merged into the finer grained “back_populates” system, and take place entirely within the _generate_backref() method of RelationProperty. This makes the initialization procedure of RelationProperty simpler and allows easier propagation of settings (such as from subclasses of RelationProperty) into the reverse reference. The internal BackRef() is gone and backref() returns a plain tuple that is understood by RelationProperty.
      • the keys attribute of ResultProxy is now a method, so references to it (result.keys) must be changed to method invocations (result.keys())
      • ResultProxy.last_inserted_ids is now deprecated, use ResultProxy.inserted_primary_key instead.

      Deprecated/Removed ORM Elements

      Most elements that were deprecated throughout 0.5 and raised deprecation warnings have been removed (with a few exceptions). All elements that were marked “pending deprecation” are now deprecated and will raise a warning upon use.

      • ‘transactional’ flag on sessionmaker() and others is removed. Use ‘autocommit=True’ to indicate ‘transactional=False’.
      • ‘polymorphic_fetch’ argument on mapper() is removed. Loading can be controlled using the ‘with_polymorphic’ option.
      • ‘select_table’ argument on mapper() is removed. Use ‘with_polymorphic=(“*”, <some selectable>)’ for this functionality.
      • ‘proxy’ argument on synonym() is removed. This flag did nothing throughout 0.5, as the “proxy generation” behavior is now automatic.
      • Passing a single list of elements to joinedload(), joinedload_all(), contains_eager(), lazyload(), defer(), and undefer() instead of multiple positional *args is deprecated.
      • Passing a single list of elements to query.order_by(), query.group_by(), query.join(), or query.outerjoin() instead of multiple positional *args is deprecated.
      • query.iterate_instances() is removed. Use query.instances().
      • Query.query_from_parent() is removed. Use the sqlalchemy.orm.with_parent() function to produce a “parent” clause, or alternatively query.with_parent().
      • query._from_self() is removed, use query.from_self() instead.
      • the “comparator” argument to composite() is removed. Use “comparator_factory”.
      • RelationProperty._get_join() is removed.
      • the ‘echo_uow’ flag on Session is removed. Use logging on the “sqlalchemy.orm.unitofwork” name.
      • session.clear() is removed. use session.expunge_all().
      • session.save(), session.update(), session.save_or_update() are removed. Use session.add() and session.add_all().
      • the “objects” flag on session.flush() remains deprecated.
      • the “dont_load=True” flag on session.merge() is deprecated in favor of “load=False”.
      • ScopedSession.mapper remains deprecated. See the usage recipe at http://www.sqlalchemy.org/trac/wiki/Usag eRecipes/SessionAwareMapper
      • passing an InstanceState (internal SQLAlchemy state object) to attributes.init_collection() or attributes.get_history() is deprecated. These functions are public API and normally expect a regular mapped object instance.
      • the ‘engine’ parameter to declarative_base() is removed. Use the ‘bind’ keyword argument.

      Extensions

      SQLSoup

      SQLSoup has been modernized and updated to reflect common 0.5/0.6 capabilities, including well defined session integration. Please read the new docs at [http://www.sqlalc hemy.org/docs/06/reference/ext/sqlsoup.html].

      Declarative

      The DeclarativeMeta (default metaclass for declarative_base) previously allowed subclasses to modify dict_ to add class attributes (e.g. columns). This no longer works, the DeclarativeMeta constructor now ignores dict_. Instead, the class attributes should be assigned directly, e.g. cls.id=Column(...), or the MixIn class approach should be used instead of the metaclass approach.

      SQLAlchemy-0.8.4/doc/changelog/migration_07.html0000644000076500000240000034231312251147464022126 0ustar classicstaff00000000000000 What’s New in SQLAlchemy 0.7? — SQLAlchemy 0.8 Documentation

      SQLAlchemy 0.8 Documentation

      Release: 0.8.4 | Release Date: December 8, 2013
      SQLAlchemy 0.8 Documentation » Changes and Migration » What’s New in SQLAlchemy 0.7?

      What’s New in SQLAlchemy 0.7?

      Table of Contents

      Previous Topic

      0.1 Changelog

      Next Topic

      What’s New in SQLAlchemy 0.6?

      Quick Search

      What’s New in SQLAlchemy 0.7?

      About this Document

      This document describes changes between SQLAlchemy version 0.6, last released May 5, 2012, and SQLAlchemy version 0.7, undergoing maintenance releases as of October, 2012.

      Document date: July 27, 2011

      Introduction

      This guide introduces what’s new in SQLAlchemy version 0.7, and also documents changes which affect users migrating their applications from the 0.6 series of SQLAlchemy to 0.7.

      To as great a degree as possible, changes are made in such a way as to not break compatibility with applications built for 0.6. The changes that are necessarily not backwards compatible are very few, and all but one, the change to mutable attribute defaults, should affect an exceedingly small portion of applications - many of the changes regard non-public APIs and undocumented hacks some users may have been attempting to use.

      A second, even smaller class of non-backwards-compatible changes is also documented. This class of change regards those features and behaviors that have been deprecated at least since version 0.5 and have been raising warnings since their deprecation. These changes would only affect applications that are still using 0.4- or early 0.5-style APIs. As the project matures, we have fewer and fewer of these kinds of changes with 0.x level releases, which is a product of our API having ever fewer features that are less than ideal for the use cases they were meant to solve.

      An array of existing functionalities have been superseded in SQLAlchemy 0.7. There’s not much difference between the terms “superseded” and “deprecated”, except that the former has a much weaker suggestion of the old feature would ever be removed. In 0.7, features like synonym and comparable_property, as well as all the Extension and other event classes, have been superseded. But these “superseded” features have been re-implemented such that their implementations live mostly outside of core ORM code, so their continued “hanging around” doesn’t impact SQLAlchemy’s ability to further streamline and refine its internals, and we expect them to remain within the API for the foreseeable future.

      New Features

      New Event System

      SQLAlchemy started early with the MapperExtension class, which provided hooks into the persistence cycle of mappers. As SQLAlchemy quickly became more componentized, pushing mappers into a more focused configurational role, many more “extension”, “listener”, and “proxy” classes popped up to solve various activity-interception use cases in an ad-hoc fashion. Part of this was driven by the divergence of activities; ConnectionProxy objects wanted to provide a system of rewriting statements and parameters; AttributeExtension provided a system of replacing incoming values, and DDL objects had events that could be switched off of dialect-sensitive callables.

      0.7 re-implements virtually all of these plugin points with a new, unified approach, which retains all the functionalities of the different systems, provides more flexibility and less boilerplate, performs better, and eliminates the need to learn radically different APIs for each event subsystem. The pre-existing classes MapperExtension, SessionExtension, AttributeExtension, ConnectionProxy, PoolListener as well as the DDLElement.execute_at method are deprecated and now implemented in terms of the new system - these APIs remain fully functional and are expected to remain in place for the foreseeable future.

      The new approach uses named events and user-defined callables to associate activities with events. The API’s look and feel was driven by such diverse sources as JQuery, Blinker, and Hibernate, and was also modified further on several occasions during conferences with dozens of users on Twitter, which appears to have a much higher response rate than the mailing list for such questions.

      It also features an open-ended system of target specification that allows events to be associated with API classes, such as for all Session or Engine objects, with specific instances of API classes, such as for a specific Pool or Mapper, as well as for related objects like a user- defined class that’s mapped, or something as specific as a certain attribute on instances of a particular subclass of a mapped parent class. Individual listener subsystems can apply wrappers to incoming user- defined listener functions which modify how they are called - an mapper event can receive either the instance of the object being operated upon, or its underlying InstanceState object. An attribute event can opt whether or not to have the responsibility of returning a new value.

      Several systems now build upon the new event API, including the new “mutable attributes” API as well as composite attributes. The greater emphasis on events has also led to the introduction of a handful of new events, including attribute expiration and refresh operations, pickle loads/dumps operations, completed mapper construction operations.

      See also

      Events

      #1902

      Hybrid Attributes, implements/supersedes synonym(), comparable_property()

      The “derived attributes” example has now been turned into an official extension. The typical use case for synonym() is to provide descriptor access to a mapped column; the use case for comparable_property() is to be able to return a PropComparator from any descriptor. In practice, the approach of “derived” is easier to use, more extensible, is implemented in a few dozen lines of pure Python with almost no imports, and doesn’t require the ORM core to even be aware of it. The feature is now known as the “Hybrid Attributes” extension.

      synonym() and comparable_property() are still part of the ORM, though their implementations have been moved outwards, building on an approach that is similar to that of the hybrid extension, so that the core ORM mapper/query/property modules aren’t really aware of them otherwise.

      #1903

      Speed Enhancements

      As is customary with all major SQLA releases, a wide pass through the internals to reduce overhead and callcounts has been made which further reduces the work needed in common scenarios. Highlights of this release include:

      • The flush process will now bundle INSERT statements into batches fed to cursor.executemany(), for rows where the primary key is already present. In particular this usually applies to the “child” table on a joined table inheritance configuration, meaning the number of calls to cursor.execute for a large bulk insert of joined- table objects can be cut in half, allowing native DBAPI optimizations to take place for those statements passed to cursor.executemany() (such as re-using a prepared statement).
      • The codepath invoked when accessing a many-to-one reference to a related object that’s already loaded has been greatly simplified. The identity map is checked directly without the need to generate a new Query object first, which is expensive in the context of thousands of in-memory many-to-ones being accessed. The usage of constructed-per-call “loader” objects is also no longer used for the majority of lazy attribute loads.
      • The rewrite of composites allows a shorter codepath when mapper internals access mapped attributes within a flush.
      • New inlined attribute access functions replace the previous usage of “history” when the “save-update” and other cascade operations need to cascade among the full scope of datamembers associated with an attribute. This reduces the overhead of generating a new History object for this speed-critical operation.
      • The internals of the ExecutionContext, the object corresponding to a statement execution, have been inlined and simplified.
      • The bind_processor() and result_processor() callables generated by types for each statement execution are now cached (carefully, so as to avoid memory leaks for ad-hoc types and dialects) for the lifespan of that type, further reducing per-statement call overhead.
      • The collection of “bind processors” for a particular Compiled instance of a statement is also cached on the Compiled object, taking further advantage of the “compiled cache” used by the flush process to re-use the same compiled form of INSERT, UPDATE, DELETE statements.

      A demonstration of callcount reduction including a sample benchmark script is at http://techspot.zzzeek.org/2010/12/12/a-tale-of-three- profiles/

      Composites Rewritten

      The “composite” feature has been rewritten, like synonym() and comparable_property(), to use a lighter weight implementation based on descriptors and events, rather than building into the ORM internals. This allowed the removal of some latency from the mapper/unit of work internals, and simplifies the workings of composite. The composite attribute now no longer conceals the underlying columns it builds upon, which now remain as regular attributes. Composites can also act as a proxy for relationship() as well as Column() attributes.

      The major backwards-incompatible change of composites is that they no longer use the mutable=True system to detect in-place mutations. Please use the Mutation Tracking extension to establish in-place change events to existing composite usage.

      #2008 #2024

      More succinct form of query.join(target, onclause)

      The default method of issuing query.join() to a target with an explicit onclause is now:

      query.join(SomeClass, SomeClass.id==ParentClass.some_id)

      In 0.6, this usage was considered to be an error, because join() accepts multiple arguments corresponding to multiple JOIN clauses - the two-argument form needed to be in a tuple to disambiguate between single-argument and two- argument join targets. In the middle of 0.6 we added detection and an error message for this specific calling style, since it was so common. In 0.7, since we are detecting the exact pattern anyway, and since having to type out a tuple for no reason is extremely annoying, the non- tuple method now becomes the “normal” way to do it. The “multiple JOIN” use case is exceedingly rare compared to the single join case, and multiple joins these days are more clearly represented by multiple calls to join().

      The tuple form will remain for backwards compatibility.

      Note that all the other forms of query.join() remain unchanged:

      query.join(MyClass.somerelation)
      query.join("somerelation")
      query.join(MyTarget)
      # ... etc

      Querying with Joins

      #1923

      Mutation event extension, supersedes “mutable=True”

      A new extension, Mutation Tracking, provides a mechanism by which user-defined datatypes can provide change events back to the owning parent or parents. The extension includes an approach for scalar database values, such as those managed by PickleType, postgresql.ARRAY, or other custom MutableType classes, as well as an approach for ORM “composites”, those configured using composite().

      NULLS FIRST / NULLS LAST operators

      These are implemented as an extension to the asc() and desc() operators, called nullsfirst() and nullslast().

      #723

      select.distinct(), query.distinct() accepts *args for Postgresql DISTINCT ON

      This was already available by passing a list of expressions to the distinct keyword argument of select(), the distinct() method of select() and Query now accept positional arguments which are rendered as DISTINCT ON when a Postgresql backend is used.

      distinct()

      Query.distinct()

      #1069

      Index() can be placed inline inside of Table, __table_args__

      The Index() construct can be created inline with a Table definition, using strings as column names, as an alternative to the creation of the index outside of the Table. That is:

      Table('mytable', metadata,
              Column('id',Integer, primary_key=True),
              Column('name', String(50), nullable=False),
              Index('idx_name', 'name')
      )

      The primary rationale here is for the benefit of declarative __table_args__, particularly when used with mixins:

      class HasNameMixin(object):
          name = Column('name', String(50), nullable=False)
          @declared_attr
          def __table_args__(cls):
              return (Index('name'), {})
      
      class User(HasNameMixin, Base):
          __tablename__ = 'user'
          id = Column('id', Integer, primary_key=True)

      Indexes

      Window Function SQL Construct

      A “window function” provides to a statement information about the result set as it’s produced. This allows criteria against various things like “row number”, “rank” and so forth. They are known to be supported at least by Postgresql, SQL Server and Oracle, possibly others.

      The best introduction to window functions is on Postgresql’s site, where window functions have been supported since version 8.4:

      http://www.postgresql.org/docs/9.0/static/tutorial- window.html

      SQLAlchemy provides a simple construct typically invoked via an existing function clause, using the over() method, which accepts order_by and partition_by keyword arguments. Below we replicate the first example in PG’s tutorial:

      from sqlalchemy.sql import table, column, select, func
      
      empsalary = table('empsalary',
                      column('depname'),
                      column('empno'),
                      column('salary'))
      
      s = select([
              empsalary,
              func.avg(empsalary.c.salary).
                    over(partition_by=empsalary.c.depname).
                    label('avg')
          ])
      
      print s

      SQL:

      SELECT empsalary.depname, empsalary.empno, empsalary.salary,
      avg(empsalary.salary) OVER (PARTITION BY empsalary.depname) AS avg
      FROM empsalary

      sqlalchemy.sql.expression.over

      #1844

      execution_options() on Connection accepts “isolation_level” argument

      This sets the transaction isolation level for a single Connection, until that Connection is closed and its underlying DBAPI resource returned to the connection pool, upon which the isolation level is reset back to the default. The default isolation level is set using the isolation_level argument to create_engine().

      Transaction isolation support is currently only supported by the Postgresql and SQLite backends.

      execution_options()

      #2001

      TypeDecorator works with integer primary key columns

      A TypeDecorator which extends the behavior of Integer can be used with a primary key column. The “autoincrement” feature of Column will now recognize that the underlying database column is still an integer so that lastrowid mechanisms continue to function. The TypeDecorator itself will have its result value processor applied to newly generated primary keys, including those received by the DBAPI cursor.lastrowid accessor.

      #2005 #2006

      TypeDecorator is present in the “sqlalchemy” import space

      No longer need to import this from sqlalchemy.types, it’s now mirrored in sqlalchemy.

      New Dialects

      Dialects have been added:

      • a MySQLdb driver for the Drizzle database:

        Drizzle

      • support for the pymysql DBAPI:

        pymsql Notes

      • psycopg2 now works with Python 3

      Behavioral Changes (Backwards Compatible)

      C Extensions Build by Default

      This is as of 0.7b4. The exts will build if cPython 2.xx is detected. If the build fails, such as on a windows install, that condition is caught and the non-C install proceeds. The C exts won’t build if Python 3 or Pypy is used.

      Query.count() simplified, should work virtually always

      The very old guesswork which occurred within Query.count() has been modernized to use .from_self(). That is, query.count() is now equivalent to:

      query.from_self(func.count(literal_column('1'))).scalar()

      Previously, internal logic attempted to rewrite the columns clause of the query itself, and upon detection of a “subquery” condition, such as a column-based query that might have aggregates in it, or a query with DISTINCT, would go through a convoluted process of rewriting the columns clause. This logic failed in complex conditions, particularly those involving joined table inheritance, and was long obsolete by the more comprehensive .from_self() call.

      The SQL emitted by query.count() is now always of the form:

      SELECT count(1) AS count_1 FROM (
          SELECT user.id AS user_id, user.name AS user_name from user
      ) AS anon_1

      that is, the original query is preserved entirely inside of a subquery, with no more guessing as to how count should be applied.

      #2093

      To emit a non-subquery form of count()

      MySQL users have already reported that the MyISAM engine not surprisingly falls over completely with this simple change. Note that for a simple count() that optimizes for DBs that can’t handle simple subqueries, func.count() should be used:

      from sqlalchemy import func
      session.query(func.count(MyClass.id)).scalar()

      or for count(*):

      from sqlalchemy import func, literal_column
      session.query(func.count(literal_column('*'))).select_from(MyClass).scalar()

      LIMIT/OFFSET clauses now use bind parameters

      The LIMIT and OFFSET clauses, or their backend equivalents (i.e. TOP, ROW NUMBER OVER, etc.), use bind parameters for the actual values, for all backends which support it (most except for Sybase). This allows better query optimizer performance as the textual string for multiple statements with differing LIMIT/OFFSET are now identical.

      #805

      Logging enhancements

      Vinay Sajip has provided a patch to our logging system such that the “hex string” embedded in logging statements for engines and pools is no longer needed to allow the echo flag to work correctly. A new system that uses filtered logging objects allows us to maintain our current behavior of echo being local to individual engines without the need for additional identifying strings local to those engines.

      #1926

      Simplified polymorphic_on assignment

      The population of the polymorphic_on column-mapped attribute, when used in an inheritance scenario, now occurs when the object is constructed, i.e. its __init__ method is called, using the init event. The attribute then behaves the same as any other column-mapped attribute. Previously, special logic would fire off during flush to populate this column, which prevented any user code from modifying its behavior. The new approach improves upon this in three ways: 1. the polymorphic identity is now present on the object as soon as its constructed; 2. the polymorphic identity can be changed by user code without any difference in behavior from any other column-mapped attribute; 3. the internals of the mapper during flush are simplified and no longer need to make special checks for this column.

      #1895

      contains_eager() chains across multiple paths (i.e. “all()”)

      The `contains_eager()`` modifier now will chain itself for a longer path without the need to emit individual ``contains_eager()` calls. Instead of:

      session.query(A).options(contains_eager(A.b), contains_eager(A.b, B.c))

      you can say:

      session.query(A).options(contains_eager(A.b, B.c))

      #2032

      Flushing of orphans that have no parent is allowed

      We’ve had a long standing behavior that checks for a so- called “orphan” during flush, that is, an object which is associated with a relationship() that specifies “delete- orphan” cascade, has been newly added to the session for an INSERT, and no parent relationship has been established. This check was added years ago to accommodate some test cases which tested the orphan behavior for consistency. In modern SQLA, this check is no longer needed on the Python side. The equivalent behavior of the “orphan check” is accomplished by making the foreign key reference to the object’s parent row NOT NULL, where the database does its job of establishing data consistency in the same way SQLA allows most other operations to do. If the object’s parent foreign key is nullable, then the row can be inserted. The “orphan” behavior runs when the object was persisted with a particular parent, and is then disassociated with that parent, leading to a DELETE statement emitted for it.

      #1912

      Warnings generated when collection members, scalar referents not part of the flush

      Warnings are now emitted when related objects referenced via a loaded relationship() on a parent object marked as “dirty” are not present in the current Session.

      The save-update cascade takes effect when objects are added to the Session, or when objects are first associated with a parent, so that an object and everything related to it are usually all present in the same Session. However, if save-update cascade is disabled for a particular relationship(), then this behavior does not occur, and the flush process does not try to correct for it, instead staying consistent to the configured cascade behavior. Previously, when such objects were detected during the flush, they were silently skipped. The new behavior is that a warning is emitted, for the purposes of alerting to a situation that more often than not is the source of unexpected behavior.

      #1973

      Setup no longer installs a Nose plugin

      Since we moved to nose we’ve used a plugin that installs via setuptools, so that the nosetests script would automatically run SQLA’s plugin code, necessary for our tests to have a full environment. In the middle of 0.6, we realized that the import pattern here meant that Nose’s “coverage” plugin would break, since “coverage” requires that it be started before any modules to be covered are imported; so in the middle of 0.6 we made the situation worse by adding a separate sqlalchemy-nose package to the build to overcome this.

      In 0.7 we’ve done away with trying to get nosetests to work automatically, since the SQLAlchemy module would produce a large number of nose configuration options for all usages of nosetests, not just the SQLAlchemy unit tests themselves, and the additional sqlalchemy-nose install was an even worse idea, producing an extra package in Python environments. The sqla_nose.py script in 0.7 is now the only way to run the tests with nose.

      #1949

      Non-Table-derived constructs can be mapped

      A construct that isn’t against any Table at all, like a function, can be mapped.

      from sqlalchemy import select, func
      from sqlalchemy.orm import mapper
      
      class Subset(object):
          pass
      selectable = select(["x", "y", "z"]).select_from(func.some_db_function()).alias()
      mapper(Subset, selectable, primary_key=[selectable.c.x])

      #1876

      aliased() accepts FromClause elements

      This is a convenience helper such that in the case a plain FromClause, such as a select, Table or join is passed to the orm.aliased() construct, it passes through to the .alias() method of that from construct rather than constructing an ORM level AliasedClass.

      #2018

      Session.connection(), Session.execute() accept ‘bind’

      This is to allow execute/connection operations to participate in the open transaction of an engine explicitly. It also allows custom subclasses of Session that implement their own get_bind() method and arguments to use those custom arguments with both the execute() and connection() methods equally.

      Session.connection Session.execute

      #1996

      Standalone bind parameters in columns clause auto-labeled.

      Bind parameters present in the “columns clause” of a select are now auto-labeled like other “anonymous” clauses, which among other things allows their “type” to be meaningful when the row is fetched, as in result row processors.

      SQLite - relative file paths are normalized through os.path.abspath()

      This so that a script that changes the current directory will continue to target the same location as subsequent SQLite connections are established.

      #2036

      MS-SQL - String/Unicode/VARCHAR/NVARCHAR/VARBINARY emit “max” for no length

      On the MS-SQL backend, the String/Unicode types, and their counterparts VARCHAR/ NVARCHAR, as well as VARBINARY (#1833) emit “max” as the length when no length is specified. This makes it more compatible with Postgresql’s VARCHAR type which is similarly unbounded when no length specified. SQL Server defaults the length on these types to ‘1’ when no length is specified.

      Behavioral Changes (Backwards Incompatible)

      Note again, aside from the default mutability change, most of these changes are *extremely minor* and will not affect most users.

      PickleType and ARRAY mutability turned off by default

      This change refers to the default behavior of the ORM when mapping columns that have either the PickleType or postgresql.ARRAY datatypes. The mutable flag is now set to False by default. If an existing application uses these types and depends upon detection of in-place mutations, the type object must be constructed with mutable=True to restore the 0.6 behavior:

      Table('mytable', metadata,
          # ....
      
          Column('pickled_data', PickleType(mutable=True))
      )

      The mutable=True flag is being phased out, in favor of the new Mutation Tracking extension. This extension provides a mechanism by which user-defined datatypes can provide change events back to the owning parent or parents.

      The previous approach of using mutable=True does not provide for change events - instead, the ORM must scan through all mutable values present in a session and compare them against their original value for changes every time flush() is called, which is a very time consuming event. This is a holdover from the very early days of SQLAlchemy when flush() was not automatic and the history tracking system was not nearly as sophisticated as it is now.

      Existing applications which use PickleType, postgresql.ARRAY or other MutableType subclasses, and require in-place mutation detection, should migrate to the new mutation tracking system, as mutable=True is likely to be deprecated in the future.

      #1980

      Mutability detection of composite() requires the Mutation Tracking Extension

      So-called “composite” mapped attributes, those configured using the technique described at Composite Column Types, have been re-implemented such that the ORM internals are no longer aware of them (leading to shorter and more efficient codepaths in critical sections). While composite types are generally intended to be treated as immutable value objects, this was never enforced. For applications that use composites with mutability, the Mutation Tracking extension offers a base class which establishes a mechanism for user-defined composite types to send change event messages back to the owning parent or parents of each object.

      Applications which use composite types and rely upon in- place mutation detection of these objects should either migrate to the “mutation tracking” extension, or change the usage of the composite types such that in-place changes are no longer needed (i.e., treat them as immutable value objects).

      SQLite - the SQLite dialect now uses NullPool for file-based databases

      This change is 99.999% backwards compatible, unless you are using temporary tables across connection pool connections.

      A file-based SQLite connection is blazingly fast, and using NullPool means that each call to Engine.connect creates a new pysqlite connection.

      Previously, the SingletonThreadPool was used, which meant that all connections to a certain engine in a thread would be the same connection. It’s intended that the new approach is more intuitive, particularly when multiple connections are used.

      SingletonThreadPool is still the default engine when a :memory: database is used.

      Note that this change breaks temporary tables used across Session commits, due to the way SQLite handles temp tables. See the note at http://www.sqlalchemy.org/docs/dialects/sqlite.html#using- temporary-tables-with-sqlite if temporary tables beyond the scope of one pool connection are desired.

      #1921

      Session.merge() checks version ids for versioned mappers

      Session.merge() will check the version id of the incoming state against that of the database, assuming the mapping uses version ids and incoming state has a version_id assigned, and raise StaleDataError if they don’t match. This is the correct behavior, in that if incoming state contains a stale version id, it should be assumed the state is stale.

      If merging data into a versioned state, the version id attribute can be left undefined, and no version check will take place.

      This check was confirmed by examining what Hibernate does - both the merge() and the versioning features were originally adapted from Hibernate.

      #2027

      Tuple label names in Query Improved

      This improvement is potentially slightly backwards incompatible for an application that relied upon the old behavior.

      Given two mapped classes Foo and Bar each with a column spam:

      qa = session.query(Foo.spam)
      qb = session.query(Bar.spam)
      
      qu = qa.union(qb)

      The name given to the single column yielded by qu will be spam. Previously it would be something like foo_spam due to the way the union would combine things, which is inconsistent with the name spam in the case of a non-unioned query.

      #1942

      Mapped column attributes reference the most specific column first

      This is a change to the behavior involved when a mapped column attribute references multiple columns, specifically when dealing with an attribute on a joined-table subclass that has the same name as that of an attribute on the superclass.

      Using declarative, the scenario is this:

      class Parent(Base):
          __tablename__ = 'parent'
          id = Column(Integer, primary_key=True)
      
      class Child(Parent):
         __tablename__ = 'child'
          id = Column(Integer, ForeignKey('parent.id'), primary_key=True)

      Above, the attribute Child.id refers to both the child.id column as well as parent.id - this due to the name of the attribute. If it were named differently on the class, such as Child.child_id, it then maps distinctly to child.id, with Child.id being the same attribute as Parent.id.

      When the id attribute is made to reference both parent.id and child.id, it stores them in an ordered list. An expression such as Child.id then refers to just one of those columns when rendered. Up until 0.6, this column would be parent.id. In 0.7, it is the less surprising child.id.

      The legacy of this behavior deals with behaviors and restrictions of the ORM that don’t really apply anymore; all that was needed was to reverse the order.

      A primary advantage of this approach is that it’s now easier to construct primaryjoin expressions that refer to the local column:

      class Child(Parent):
         __tablename__ = 'child'
          id = Column(Integer, ForeignKey('parent.id'), primary_key=True)
          some_related = relationship("SomeRelated",
                          primaryjoin="Child.id==SomeRelated.child_id")
      
      class SomeRelated(Base):
         __tablename__ = 'some_related'
          id = Column(Integer, primary_key=True)
          child_id = Column(Integer, ForeignKey('child.id'))

      Prior to 0.7 the Child.id expression would reference Parent.id, and it would be necessary to map child.id to a distinct attribute.

      It also means that a query like this one changes its behavior:

      session.query(Parent).filter(Child.id > 7)

      In 0.6, this would render:

      SELECT parent.id AS parent_id
      FROM parent
      WHERE parent.id > :id_1

      in 0.7, you get:

      SELECT parent.id AS parent_id
      FROM parent, child
      WHERE child.id > :id_1

      which you’ll note is a cartesian product - this behavior is now equivalent to that of any other attribute that is local to Child. The with_polymorphic() method, or a similar strategy of explicitly joining the underlying Table objects, is used to render a query against all Parent objects with criteria against Child, in the same manner as that of 0.5 and 0.6:

      print s.query(Parent).with_polymorphic([Child]).filter(Child.id > 7)

      Which on both 0.6 and 0.7 renders:

      SELECT parent.id AS parent_id, child.id AS child_id
      FROM parent LEFT OUTER JOIN child ON parent.id = child.id
      WHERE child.id > :id_1

      Another effect of this change is that a joined-inheritance load across two tables will populate from the child table’s value, not that of the parent table. An unusual case is that a query against “Parent” using with_polymorphic="*" issues a query against “parent”, with a LEFT OUTER JOIN to “child”. The row is located in “Parent”, sees the polymorphic identity corresponds to “Child”, but suppose the actual row in “child” has been deleted. Due to this corruption, the row comes in with all the columns corresponding to “child” set to NULL - this is now the value that gets populated, not the one in the parent table.

      #1892

      Mapping to joins with two or more same-named columns requires explicit declaration

      This is somewhat related to the previous change in #1892. When mapping to a join, same-named columns must be explicitly linked to mapped attributes, i.e. as described in Mapping a Class Against Multiple Tables.

      Given two tables foo and bar, each with a primary key column id, the following now produces an error:

      foobar = foo.join(bar, foo.c.id==bar.c.foo_id)
      mapper(FooBar, foobar)

      This because the mapper() refuses to guess what column is the primary representation of FooBar.id - is it foo.c.id or is it bar.c.id ? The attribute must be explicit:

      foobar = foo.join(bar, foo.c.id==bar.c.foo_id)
      mapper(FooBar, foobar, properties={
          'id':[foo.c.id, bar.c.id]
      })

      #1896

      Mapper requires that polymorphic_on column be present in the mapped selectable

      This is a warning in 0.6, now an error in 0.7. The column given for polymorphic_on must be in the mapped selectable. This to prevent some occasional user errors such as:

      mapper(SomeClass, sometable, polymorphic_on=some_lookup_table.c.id)

      where above the polymorphic_on needs to be on a sometable column, in this case perhaps sometable.c.some_lookup_id. There are also some “polymorphic union” scenarios where similar mistakes sometimes occur.

      Such a configuration error has always been “wrong”, and the above mapping doesn’t work as specified - the column would be ignored. It is however potentially backwards incompatible in the rare case that an application has been unknowingly relying upon this behavior.

      #1875

      DDL() constructs now escape percent signs

      Previously, percent signs in DDL() strings would have to be escaped, i.e. %% depending on DBAPI, for those DBAPIs that accept pyformat or format binds (i.e. psycopg2, mysql-python), which was inconsistent versus text() constructs which did this automatically. The same escaping now occurs for DDL() as for text().

      #1897

      Table.c / MetaData.tables refined a bit, don’t allow direct mutation

      Another area where some users were tinkering around in such a way that doesn’t actually work as expected, but still left an exceedingly small chance that some application was relying upon this behavior, the construct returned by the .c attribute on Table and the .tables attribute on MetaData is explicitly non-mutable. The “mutable” version of the construct is now private. Adding columns to .c involves using the append_column() method of Table, which ensures things are associated with the parent Table in the appropriate way; similarly, MetaData.tables has a contract with the Table objects stored in this dictionary, as well as a little bit of new bookkeeping in that a set() of all schema names is tracked, which is satisfied only by using the public Table constructor as well as Table.tometadata().

      It is of course possible that the ColumnCollection and dict collections consulted by these attributes could someday implement events on all of their mutational methods such that the appropriate bookkeeping occurred upon direct mutation of the collections, but until someone has the motivation to implement all that along with dozens of new unit tests, narrowing the paths to mutation of these collections will ensure no application is attempting to rely upon usages that are currently not supported.

      #1893 #1917

      server_default consistently returns None for all inserted_primary_key values

      Established consistency when server_default is present on an Integer PK column. SQLA doesn’t pre-fetch these, nor do they come back in cursor.lastrowid (DBAPI). Ensured all backends consistently return None in result.inserted_primary_key for these - some backends may have returned a value previously. Using a server_default on a primary key column is extremely unusual. If a special function or SQL expression is used to generate primary key defaults, this should be established as a Python-side “default” instead of server_default.

      Regarding reflection for this case, reflection of an int PK col with a server_default sets the “autoincrement” flag to False, except in the case of a PG SERIAL col where we detected a sequence default.

      #2020 #2021

      The sqlalchemy.exceptions alias in sys.modules is removed

      For a few years we’ve added the string sqlalchemy.exceptions to sys.modules, so that a statement like “import sqlalchemy.exceptions” would work. The name of the core exceptions module has been exc for a long time now, so the recommended import for this module is:

      from sqlalchemy import exc

      The exceptions name is still present in “sqlalchemy” for applications which might have said from sqlalchemy import exceptions, but they should also start using the exc name.

      Query Timing Recipe Changes

      While not part of SQLAlchemy itself, it’s worth mentioning that the rework of the ConnectionProxy into the new event system means it is no longer appropriate for the “Timing all Queries” recipe. Please adjust query-timers to use the before_cursor_execute() and after_cursor_execute() events, demonstrated in the updated recipe UsageRecipes/Profiling.

      Deprecated API

      Default constructor on types will not accept arguments

      Simple types like Integer, Date etc. in the core types module don’t accept arguments. The default constructor that accepts/ignores a catchall \*args, \**kwargs is restored as of 0.7b4/0.7.0, but emits a deprecation warning.

      If arguments are being used with a core type like Integer, it may be that you intended to use a dialect specific type, such as sqlalchemy.dialects.mysql.INTEGER which does accept a “display_width” argument for example.

      compile_mappers() renamed configure_mappers(), simplified configuration internals

      This system slowly morphed from something small, implemented local to an individual mapper, and poorly named into something that’s more of a global “registry-” level function and poorly named, so we’ve fixed both by moving the implementation out of Mapper altogether and renaming it to configure_mappers(). It is of course normally not needed for an application to call configure_mappers() as this process occurs on an as-needed basis, as soon as the mappings are needed via attribute or query access.

      #1966

      Core listener/proxy superseded by event listeners

      PoolListener, ConnectionProxy, DDLElement.execute_at are superseded by event.listen(), using the PoolEvents, EngineEvents, DDLEvents dispatch targets, respectively.

      ORM extensions superseded by event listeners

      MapperExtension, AttributeExtension, SessionExtension are superseded by event.listen(), using the MapperEvents/InstanceEvents, AttributeEvents, SessionEvents, dispatch targets, respectively.

      Sending a string to ‘distinct’ in select() for MySQL should be done via prefixes

      This obscure feature allows this pattern with the MySQL backend:

      select([mytable], distinct='ALL', prefixes=['HIGH_PRIORITY'])

      The prefixes keyword or prefix_with() method should be used for non-standard or unusual prefixes:

      select([mytable]).prefix_with('HIGH_PRIORITY', 'ALL')

      useexisting superseded by extend_existing and keep_existing

      The useexisting flag on Table has been superseded by a new pair of flags keep_existing and extend_existing. extend_existing is equivalent to useexisting - the existing Table is returned, and additional constructor elements are added. With keep_existing, the existing Table is returned, but additional constructor elements are not added - these elements are only applied when the Table is newly created.

      Backwards Incompatible API Changes

      Callables passed to bindparam() don’t get evaluated - affects the Beaker example

      #1950

      Note this affects the Beaker caching example, where the workings of the _params_from_query() function needed a slight adjustment. If you’re using code from the Beaker example, this change should be applied.

      types.type_map is now private, types._type_map

      We noticed some users tapping into this dictionary inside of sqlalchemy.types as a shortcut to associating Python types with SQL types. We can’t guarantee the contents or format of this dictionary, and additionally the business of associating Python types in a one-to-one fashion has some grey areas that should are best decided by individual applications, so we’ve underscored this attribute.

      #1870

      Renamed the alias keyword arg of standalone alias() function to name

      This so that the keyword argument name matches that of the alias() methods on all FromClause objects as well as the name argument on Query.subquery().

      Only code that uses the standalone alias() function, and not the method bound functions, and passes the alias name using the explicit keyword name alias, and not positionally, would need modification here.

      Non-public Pool methods underscored

      All methods of Pool and subclasses which are not intended for public use have been renamed with underscores. That they were not named this way previously was a bug.

      Pooling methods now underscored or removed:

      Pool.create_connection() -> Pool._create_connection()

      Pool.do_get() -> Pool._do_get()

      Pool.do_return_conn() -> Pool._do_return_conn()

      Pool.do_return_invalid() -> removed, was not used

      Pool.return_conn() -> Pool._return_conn()

      Pool.get() -> Pool._get(), public API is Pool.connect()

      SingletonThreadPool.cleanup() -> _cleanup()

      SingletonThreadPool.dispose_local() -> removed, use conn.invalidate()

      #1982

      Previously Deprecated, Now Removed

      Query.join(), Query.outerjoin(), eagerload(), eagerload_all(), others no longer allow lists of attributes as arguments

      Passing a list of attributes or attribute names to Query.join, eagerload(), and similar has been deprecated since 0.5:

      # old way, deprecated since 0.5
      session.query(Houses).join([Houses.rooms, Room.closets])
      session.query(Houses).options(eagerload_all([Houses.rooms, Room.closets]))

      These methods all accept *args as of the 0.5 series:

      # current way, in place since 0.5
      session.query(Houses).join(Houses.rooms, Room.closets)
      session.query(Houses).options(eagerload_all(Houses.rooms, Room.closets))

      ScopedSession.mapper is removed

      This feature provided a mapper extension which linked class- based functionality with a particular ScopedSession, in particular providing the behavior such that new object instances would be automatically associated with that session. The feature was overused by tutorials and frameworks which led to great user confusion due to its implicit behavior, and was deprecated in 0.5.5. Techniques for replicating its functionality are at [wiki:UsageRecipes/SessionAwareMapper]

      SQLAlchemy-0.8.4/doc/changelog/migration_08.html0000644000076500000240000047275612251147464022146 0ustar classicstaff00000000000000 What’s New in SQLAlchemy 0.8? — SQLAlchemy 0.8 Documentation

      SQLAlchemy 0.8 Documentation

      Release: 0.8.4 | Release Date: December 8, 2013
      SQLAlchemy 0.8 Documentation » Changes and Migration » What’s New in SQLAlchemy 0.8?

      What’s New in SQLAlchemy 0.8?

      Table of Contents

      Previous Topic

      Changes and Migration

      Next Topic

      0.8 Changelog

      Quick Search

      What’s New in SQLAlchemy 0.8?

      About this Document

      This document describes changes between SQLAlchemy version 0.7, undergoing maintenance releases as of October, 2012, and SQLAlchemy version 0.8, which is expected for release in early 2013.

      Document date: October 25, 2012 Updated: March 9, 2013

      Introduction

      This guide introduces what’s new in SQLAlchemy version 0.8, and also documents changes which affect users migrating their applications from the 0.7 series of SQLAlchemy to 0.8.

      SQLAlchemy releases are closing in on 1.0, and each new version since 0.5 features fewer major usage changes. Most applications that are settled into modern 0.7 patterns should be movable to 0.8 with no changes. Applications that use 0.6 and even 0.5 patterns should be directly migratable to 0.8 as well, though larger applications may want to test with each interim version.

      Platform Support

      Targeting Python 2.5 and Up Now

      SQLAlchemy 0.8 will target Python 2.5 and forward; compatibility for Python 2.4 is being dropped.

      The internals will be able to make usage of Python ternaries (that is, x if y else z) which will improve things versus the usage of y and x or z, which naturally has been the source of some bugs, as well as context managers (that is, with:) and perhaps in some cases try:/except:/else: blocks which will help with code readability.

      SQLAlchemy will eventually drop 2.5 support as well - when 2.6 is reached as the baseline, SQLAlchemy will move to use 2.6/3.3 in-place compatibility, removing the usage of the 2to3 tool and maintaining a source base that works with Python 2 and 3 at the same time.

      New ORM Features

      Rewritten relationship() mechanics

      0.8 features a much improved and capable system regarding how relationship() determines how to join between two entities. The new system includes these features:

      • The primaryjoin argument is no longer needed when constructing a relationship() against a class that has multiple foreign key paths to the target. Only the foreign_keys argument is needed to specify those columns which should be included:

        class Parent(Base):
            __tablename__ = 'parent'
            id = Column(Integer, primary_key=True)
            child_id_one = Column(Integer, ForeignKey('child.id'))
            child_id_two = Column(Integer, ForeignKey('child.id'))
        
            child_one = relationship("Child", foreign_keys=child_id_one)
            child_two = relationship("Child", foreign_keys=child_id_two)
        
        class Child(Base):
            __tablename__ = 'child'
            id = Column(Integer, primary_key=True)
      • relationships against self-referential, composite foreign keys where a column points to itself are now supported. The canonical case is as follows:

        class Folder(Base):
            __tablename__ = 'folder'
            __table_args__ = (
              ForeignKeyConstraint(
                  ['account_id', 'parent_id'],
                  ['folder.account_id', 'folder.folder_id']),
            )
        
            account_id = Column(Integer, primary_key=True)
            folder_id = Column(Integer, primary_key=True)
            parent_id = Column(Integer)
            name = Column(String)
        
            parent_folder = relationship("Folder",
                                backref="child_folders",
                                remote_side=[account_id, folder_id]
                          )

        Above, the Folder refers to its parent Folder joining from account_id to itself, and parent_id to folder_id. When SQLAlchemy constructs an auto- join, no longer can it assume all columns on the “remote” side are aliased, and all columns on the “local” side are not - the account_id column is on both sides. So the internal relationship mechanics were totally rewritten to support an entirely different system whereby two copies of account_id are generated, each containing different annotations to determine their role within the statement. Note the join condition within a basic eager load:

        SELECT
            folder.account_id AS folder_account_id,
            folder.folder_id AS folder_folder_id,
            folder.parent_id AS folder_parent_id,
            folder.name AS folder_name,
            folder_1.account_id AS folder_1_account_id,
            folder_1.folder_id AS folder_1_folder_id,
            folder_1.parent_id AS folder_1_parent_id,
            folder_1.name AS folder_1_name
        FROM folder
            LEFT OUTER JOIN folder AS folder_1
            ON
                folder_1.account_id = folder.account_id
                AND folder.folder_id = folder_1.parent_id
        
        WHERE folder.folder_id = ? AND folder.account_id = ?
      • Previously difficult custom join conditions, like those involving functions and/or CASTing of types, will now function as expected in most cases:

        class HostEntry(Base):
            __tablename__ = 'host_entry'
        
            id = Column(Integer, primary_key=True)
            ip_address = Column(INET)
            content = Column(String(50))
        
            # relationship() using explicit foreign_keys, remote_side
            parent_host = relationship("HostEntry",
                                primaryjoin=ip_address == cast(content, INET),
                                foreign_keys=content,
                                remote_side=ip_address
                            )

        The new relationship() mechanics make use of a SQLAlchemy concept known as annotations. These annotations are also available to application code explicitly via the foreign() and remote() functions, either as a means to improve readability for advanced configurations or to directly inject an exact configuration, bypassing the usual join-inspection heuristics:

        from sqlalchemy.orm import foreign, remote
        
        class HostEntry(Base):
            __tablename__ = 'host_entry'
        
            id = Column(Integer, primary_key=True)
            ip_address = Column(INET)
            content = Column(String(50))
        
            # relationship() using explicit foreign() and remote() annotations
            # in lieu of separate arguments
            parent_host = relationship("HostEntry",
                                primaryjoin=remote(ip_address) == \
                                        cast(foreign(content), INET),
                            )

      See also

      Configuring how Relationship Joins - a newly revised section on relationship() detailing the latest techniques for customizing related attributes and collection access.

      #1401 #610

      New Class/Object Inspection System

      Lots of SQLAlchemy users are writing systems that require the ability to inspect the attributes of a mapped class, including being able to get at the primary key columns, object relationships, plain attributes, and so forth, typically for the purpose of building data-marshalling systems, like JSON/XML conversion schemes and of course form libraries galore.

      Originally, the Table and Column model were the original inspection points, which have a well-documented system. While SQLAlchemy ORM models are also fully introspectable, this has never been a fully stable and supported feature, and users tended to not have a clear idea how to get at this information.

      0.8 now provides a consistent, stable and fully documented API for this purpose, including an inspection system which works on mapped classes, instances, attributes, and other Core and ORM constructs. The entrypoint to this system is the core-level inspect() function. In most cases, the object being inspected is one already part of SQLAlchemy’s system, such as Mapper, InstanceState, Inspector. In some cases, new objects have been added with the job of providing the inspection API in certain contexts, such as AliasedInsp and AttributeState.

      A walkthrough of some key capabilities follows:

      >>> class User(Base):
      ...     __tablename__ = 'user'
      ...     id = Column(Integer, primary_key=True)
      ...     name = Column(String)
      ...     name_syn = synonym(name)
      ...     addresses = relationship("Address")
      ...
      
      >>> # universal entry point is inspect()
      >>> b = inspect(User)
      
      >>> # b in this case is the Mapper
      >>> b
      <Mapper at 0x101521950; User>
      
      >>> # Column namespace
      >>> b.columns.id
      Column('id', Integer(), table=<user>, primary_key=True, nullable=False)
      
      >>> # mapper's perspective of the primary key
      >>> b.primary_key
      (Column('id', Integer(), table=<user>, primary_key=True, nullable=False),)
      
      >>> # MapperProperties available from .attrs
      >>> b.attrs.keys()
      ['name_syn', 'addresses', 'id', 'name']
      
      >>> # .column_attrs, .relationships, etc. filter this collection
      >>> b.column_attrs.keys()
      ['id', 'name']
      
      >>> list(b.relationships)
      [<sqlalchemy.orm.properties.RelationshipProperty object at 0x1015212d0>]
      
      >>> # they are also namespaces
      >>> b.column_attrs.id
      <sqlalchemy.orm.properties.ColumnProperty object at 0x101525090>
      
      >>> b.relationships.addresses
      <sqlalchemy.orm.properties.RelationshipProperty object at 0x1015212d0>
      
      >>> # point inspect() at a mapped, class level attribute,
      >>> # returns the attribute itself
      >>> b = inspect(User.addresses)
      >>> b
      <sqlalchemy.orm.attributes.InstrumentedAttribute object at 0x101521fd0>
      
      >>> # From here we can get the mapper:
      >>> b.mapper
      <Mapper at 0x101525810; Address>
      
      >>> # the parent inspector, in this case a mapper
      >>> b.parent
      <Mapper at 0x101521950; User>
      
      >>> # an expression
      >>> print b.expression
      "user".id = address.user_id
      
      >>> # inspect works on instances
      >>> u1 = User(id=3, name='x')
      >>> b = inspect(u1)
      
      >>> # it returns the InstanceState
      >>> b
      <sqlalchemy.orm.state.InstanceState object at 0x10152bed0>
      
      >>> # similar attrs accessor refers to the
      >>> b.attrs.keys()
      ['id', 'name_syn', 'addresses', 'name']
      
      >>> # attribute interface - from attrs, you get a state object
      >>> b.attrs.id
      <sqlalchemy.orm.state.AttributeState object at 0x10152bf90>
      
      >>> # this object can give you, current value...
      >>> b.attrs.id.value
      3
      
      >>> # ... current history
      >>> b.attrs.id.history
      History(added=[3], unchanged=(), deleted=())
      
      >>> # InstanceState can also provide session state information
      >>> # lets assume the object is persistent
      >>> s = Session()
      >>> s.add(u1)
      >>> s.commit()
      
      >>> # now we can get primary key identity, always
      >>> # works in query.get()
      >>> b.identity
      (3,)
      
      >>> # the mapper level key
      >>> b.identity_key
      (<class '__main__.User'>, (3,))
      
      >>> # state within the session
      >>> b.persistent, b.transient, b.deleted, b.detached
      (True, False, False, False)
      
      >>> # owning session
      >>> b.session
      <sqlalchemy.orm.session.Session object at 0x101701150>

      #2208

      New with_polymorphic() feature, can be used anywhere

      The Query.with_polymorphic() method allows the user to specify which tables should be present when querying against a joined-table entity. Unfortunately the method is awkward and only applies to the first entity in the list, and otherwise has awkward behaviors both in usage as well as within the internals. A new enhancement to the aliased() construct has been added called with_polymorphic() which allows any entity to be “aliased” into a “polymorphic” version of itself, freely usable anywhere:

      from sqlalchemy.orm import with_polymorphic
      palias = with_polymorphic(Person, [Engineer, Manager])
      session.query(Company).\
                  join(palias, Company.employees).\
                  filter(or_(Engineer.language=='java', Manager.hair=='pointy'))

      See also

      Basic Control of Which Tables are Queried - newly updated documentation for polymorphic loading control.

      #2333

      of_type() works with alias(), with_polymorphic(), any(), has(), joinedload(), subqueryload(), contains_eager()

      The PropComparator.of_type() method is used to specify a specific subtype to use when constructing SQL expressions along a relationship() that has a polymorphic mapping as its target. This method can now be used to target any number of target subtypes, by combining it with the new with_polymorphic() function:

      # use eager loading in conjunction with with_polymorphic targets
      Job_P = with_polymorphic(Job, [SubJob, ExtraJob], aliased=True)
      q = s.query(DataContainer).\
                  join(DataContainer.jobs.of_type(Job_P)).\
                      options(contains_eager(DataContainer.jobs.of_type(Job_P)))

      The method now works equally well in most places a regular relationship attribute is accepted, including with loader functions like joinedload(), subqueryload(), contains_eager(), and comparison methods like PropComparator.any() and PropComparator.has():

      # use eager loading in conjunction with with_polymorphic targets
      Job_P = with_polymorphic(Job, [SubJob, ExtraJob], aliased=True)
      q = s.query(DataContainer).\
                  join(DataContainer.jobs.of_type(Job_P)).\
                      options(contains_eager(DataContainer.jobs.of_type(Job_P)))
      
      # pass subclasses to eager loads (implicitly applies with_polymorphic)
      q = s.query(ParentThing).\
                      options(
                          joinedload_all(
                              ParentThing.container,
                              DataContainer.jobs.of_type(SubJob)
                      ))
      
      # control self-referential aliasing with any()/has()
      Job_A = aliased(Job)
      q = s.query(Job).join(DataContainer.jobs).\
                      filter(
                          DataContainer.jobs.of_type(Job_A).\
                              any(and_(Job_A.id < Job.id, Job_A.type=='fred')
                          )
                      )

      #2438 #1106

      Events Can Be Applied to Unmapped Superclasses

      Mapper and instance events can now be associated with an unmapped superclass, where those events will be propagated to subclasses as those subclasses are mapped. The propagate=True flag should be used. This feature allows events to be associated with a declarative base class:

      from sqlalchemy.ext.declarative import declarative_base
      
      Base = declarative_base()
      
      @event.listens_for("load", Base, propagate=True)
      def on_load(target, context):
          print "New instance loaded:", target
      
      # on_load() will be applied to SomeClass
      class SomeClass(Base):
          __tablename__ = 'sometable'
      
          # ...

      #2585

      Declarative Distinguishes Between Modules/Packages

      A key feature of Declarative is the ability to refer to other mapped classes using their string name. The registry of class names is now sensitive to the owning module and package of a given class. The classes can be referred to via dotted name in expressions:

      class Snack(Base):
          # ...
      
          peanuts = relationship("nuts.Peanut",
                  primaryjoin="nuts.Peanut.snack_id == Snack.id")

      The resolution allows that any full or partial disambiguating package name can be used. If the path to a particular class is still ambiguous, an error is raised.

      #2338

      New DeferredReflection Feature in Declarative

      The “deferred reflection” example has been moved to a supported feature within Declarative. This feature allows the construction of declarative mapped classes with only placeholder Table metadata, until a prepare() step is called, given an Engine with which to reflect fully all tables and establish actual mappings. The system supports overriding of columns, single and joined inheritance, as well as distinct bases-per-engine. A full declarative configuration can now be created against an existing table that is assembled upon engine creation time in one step:

      class ReflectedOne(DeferredReflection, Base):
          __abstract__ = True
      
      class ReflectedTwo(DeferredReflection, Base):
          __abstract__ = True
      
      class MyClass(ReflectedOne):
          __tablename__ = 'mytable'
      
      class MyOtherClass(ReflectedOne):
          __tablename__ = 'myothertable'
      
      class YetAnotherClass(ReflectedTwo):
          __tablename__ = 'yetanothertable'
      
      ReflectedOne.prepare(engine_one)
      ReflectedTwo.prepare(engine_two)

      #2485

      ORM Classes Now Accepted by Core Constructs

      While the SQL expressions used with Query.filter(), such as User.id == 5, have always been compatible for use with core constructs such as select(), the mapped class itself would not be recognized when passed to select(), Select.select_from(), or Select.correlate(). A new SQL registration system allows a mapped class to be accepted as a FROM clause within the core:

      from sqlalchemy import select
      
      stmt = select([User]).where(User.id == 5)

      Above, the mapped User class will expand into Table to which User is mapped.

      #2245

      Query.update() supports UPDATE..FROM

      The new UPDATE..FROM mechanics work in query.update(). Below, we emit an UPDATE against SomeEntity, adding a FROM clause (or equivalent, depending on backend) against SomeOtherEntity:

      query(SomeEntity).\
          filter(SomeEntity.id==SomeOtherEntity.id).\
          filter(SomeOtherEntity.foo=='bar').\
          update({"data":"x"})

      In particular, updates to joined-inheritance entities are supported, provided the target of the UPDATE is local to the table being filtered on, or if the parent and child tables are mixed, they are joined explicitly in the query. Below, given Engineer as a joined subclass of Person:

      query(Engineer).\
              filter(Person.id==Engineer.id).\
              filter(Person.name=='dilbert').\
              update({"engineer_data":"java"})

      would produce:

      UPDATE engineer SET engineer_data='java' FROM person
      WHERE person.id=engineer.id AND person.name='dilbert'

      #2365

      rollback() will only roll back “dirty” objects from a begin_nested()

      A behavioral change that should improve efficiency for those users using SAVEPOINT via Session.begin_nested() - upon rollback(), only those objects that were made dirty since the last flush will be expired, the rest of the Session remains intact. This because a ROLLBACK to a SAVEPOINT does not terminate the containing transaction’s isolation, so no expiry is needed except for those changes that were not flushed in the current transaction.

      #2452

      Caching Example now uses dogpile.cache

      The caching example now uses dogpile.cache. Dogpile.cache is a rewrite of the caching portion of Beaker, featuring vastly simpler and faster operation, as well as support for distributed locking.

      Note that the SQLAlchemy APIs used by the Dogpile example as well as the previous Beaker example have changed slightly, in particular this change is needed as illustrated in the Beaker example:

      --- examples/beaker_caching/caching_query.py
      +++ examples/beaker_caching/caching_query.py
      @@ -222,7 +222,8 @@
      
               """
               if query._current_path:
      -            mapper, key = query._current_path[-2:]
      +            mapper, prop = query._current_path[-2:]
      +            key = prop.key
      
                   for cls in mapper.class_.__mro__:
                       if (cls, key) in self._relationship_options:

      See also

      dogpile_caching

      #2589

      New Core Features

      Fully extensible, type-level operator support in Core

      The Core has to date never had any system of adding support for new SQL operators to Column and other expression constructs, other than the ColumnOperators.op() method which is “just enough” to make things work. There has also never been any system in place for Core which allows the behavior of existing operators to be overridden. Up until now, the only way operators could be flexibly redefined was in the ORM layer, using column_property() given a comparator_factory argument. Third party libraries like GeoAlchemy therefore were forced to be ORM-centric and rely upon an array of hacks to apply new opertions as well as to get them to propagate correctly.

      The new operator system in Core adds the one hook that’s been missing all along, which is to associate new and overridden operators with types. Since after all, it’s not really a column, CAST operator, or SQL function that really drives what kinds of operations are present, it’s the type of the expression. The implementation details are minimal - only a few extra methods are added to the core ColumnElement type so that it consults it’s TypeEngine object for an optional set of operators. New or revised operations can be associated with any type, either via subclassing of an existing type, by using TypeDecorator, or “globally across-the-board” by attaching a new TypeEngine.Comparator object to an existing type class.

      For example, to add logarithm support to Numeric types:

      from sqlalchemy.types import Numeric
      from sqlalchemy.sql import func
      
      class CustomNumeric(Numeric):
          class comparator_factory(Numeric.Comparator):
              def log(self, other):
                  return func.log(self.expr, other)

      The new type is usable like any other type:

      data = Table('data', metadata,
                Column('id', Integer, primary_key=True),
                Column('x', CustomNumeric(10, 5)),
                Column('y', CustomNumeric(10, 5))
           )
      
      stmt = select([data.c.x.log(data.c.y)]).where(data.c.x.log(2) < value)
      print conn.execute(stmt).fetchall()

      New features which have come from this immediately include support for Postgresql’s HSTORE type, as well as new operations associated with Postgresql’s ARRAY type. It also paves the way for existing types to acquire lots more operators that are specific to those types, such as more string, integer and date operators.

      #2547

      Type Expressions

      SQL expressions can now be associated with types. Historically, TypeEngine has always allowed Python-side functions which receive both bound parameters as well as result row values, passing them through a Python side conversion function on the way to/back from the database. The new feature allows similar functionality, except on the database side:

      from sqlalchemy.types import String
      from sqlalchemy import func, Table, Column, MetaData
      
      class LowerString(String):
          def bind_expression(self, bindvalue):
              return func.lower(bindvalue)
      
          def column_expression(self, col):
              return func.lower(col)
      
      metadata = MetaData()
      test_table = Table(
              'test_table',
              metadata,
              Column('data', LowerString)
      )

      Above, the LowerString type defines a SQL expression that will be emitted whenever the test_table.c.data column is rendered in the columns clause of a SELECT statement:

      >>> print select([test_table]).where(test_table.c.data == 'HI')
      SELECT lower(test_table.data) AS data
      FROM test_table
      WHERE test_table.data = lower(:data_1)

      This feature is also used heavily by the new release of GeoAlchemy, to embed PostGIS expressions inline in SQL based on type rules.

      #1534

      Core Inspection System

      The inspect() function introduced in New Class/Object Inspection System also applies to the core. Applied to an Engine it produces an Inspector object:

      from sqlalchemy import inspect
      from sqlalchemy import create_engine
      
      engine = create_engine("postgresql://scott:tiger@localhost/test")
      insp = inspect(engine)
      print insp.get_table_names()

      It can also be applied to any ClauseElement, which returns the ClauseElement itself, such as Table, Column, Select, etc. This allows it to work fluently between Core and ORM constructs.

      New Method Select.correlate_except()

      select() now has a method Select.correlate_except() which specifies “correlate on all FROM clauses except those specified”. It can be used for mapping scenarios where a related subquery should correlate normally, except against a particular target selectable:

      class SnortEvent(Base):
          __tablename__ = "event"
      
          id = Column(Integer, primary_key=True)
          signature = Column(Integer, ForeignKey("signature.id"))
      
          signatures = relationship("Signature", lazy=False)
      
      class Signature(Base):
          __tablename__ = "signature"
      
          id = Column(Integer, primary_key=True)
      
          sig_count = column_property(
                          select([func.count('*')]).\
                              where(SnortEvent.signature == id).
                              correlate_except(SnortEvent)
                      )

      Postgresql HSTORE type

      Support for Postgresql’s HSTORE type is now available as postgresql.HSTORE. This type makes great usage of the new operator system to provide a full range of operators for HSTORE types, including index access, concatenation, and containment methods such as has_key(), has_any(), and matrix():

      from sqlalchemy.dialects.postgresql import HSTORE
      
      data = Table('data_table', metadata,
              Column('id', Integer, primary_key=True),
              Column('hstore_data', HSTORE)
          )
      
      engine.execute(
          select([data.c.hstore_data['some_key']])
      ).scalar()
      
      engine.execute(
          select([data.c.hstore_data.matrix()])
      ).scalar()

      #2606

      Enhanced Postgresql ARRAY type

      The postgresql.ARRAY type will accept an optional “dimension” argument, pinning it to a fixed number of dimensions and greatly improving efficiency when retrieving results:

      # old way, still works since PG supports N-dimensions per row:
      Column("my_array", postgresql.ARRAY(Integer))
      
      # new way, will render ARRAY with correct number of [] in DDL,
      # will process binds and results more efficiently as we don't need
      # to guess how many levels deep to go
      Column("my_array", postgresql.ARRAY(Integer, dimensions=2))

      The type also introduces new operators, using the new type-specific operator framework. New operations include indexed access:

      result = conn.execute(
          select([mytable.c.arraycol[2]])
      )

      slice access in SELECT:

      result = conn.execute(
          select([mytable.c.arraycol[2:4]])
      )

      slice updates in UPDATE:

      conn.execute(
          mytable.update().values({mytable.c.arraycol[2:3]: [7, 8]})
      )

      freestanding array literals:

      >>> from sqlalchemy.dialects import postgresql
      >>> conn.scalar(
      ...    select([
      ...        postgresql.array([1, 2]) + postgresql.array([3, 4, 5])
      ...    ])
      ...  )
      [1, 2, 3, 4, 5]

      array concatenation, where below, the right side [4, 5, 6] is coerced into an array literal:

      select([mytable.c.arraycol + [4, 5, 6]])

      #2441

      New, configurable DATE, TIME types for SQLite

      SQLite has no built-in DATE, TIME, or DATETIME types, and instead provides some support for storage of date and time values either as strings or integers. The date and time types for SQLite are enhanced in 0.8 to be much more configurable as to the specific format, including that the “microseconds” portion is optional, as well as pretty much everything else.

      Column('sometimestamp', sqlite.DATETIME(truncate_microseconds=True))
      Column('sometimestamp', sqlite.DATETIME(
                          storage_format=(
                                      "%(year)04d%(month)02d%(day)02d"
                                      "%(hour)02d%(minute)02d%(second)02d%(microsecond)06d"
                          ),
                          regexp="(\d{4})(\d{2})(\d{2})(\d{2})(\d{2})(\d{2})(\d{6})"
                          )
                  )
      Column('somedate', sqlite.DATE(
                          storage_format="%(month)02d/%(day)02d/%(year)04d",
                          regexp="(?P<month>\d+)/(?P<day>\d+)/(?P<year>\d+)",
                      )
                  )

      Huge thanks to Nate Dub for the sprinting on this at Pycon 2012.

      #2363

      “COLLATE” supported across all dialects; in particular MySQL, Postgresql, SQLite

      The “collate” keyword, long accepted by the MySQL dialect, is now established on all String types and will render on any backend, including when features such as MetaData.create_all() and cast() is used:

      >>> stmt = select([cast(sometable.c.somechar, String(20, collation='utf8'))])
      >>> print stmt
      SELECT CAST(sometable.somechar AS VARCHAR(20) COLLATE "utf8") AS anon_1
      FROM sometable

      See also

      String

      #2276

      “Prefixes” now supported for update(), delete()

      Geared towards MySQL, a “prefix” can be rendered within any of these constructs. E.g.:

      stmt = table.delete().prefix_with("LOW_PRIORITY", dialect="mysql")
      
      
      stmt = table.update().prefix_with("LOW_PRIORITY", dialect="mysql")

      The method is new in addition to those which already existed on insert(), select() and Query.

      #2431

      Behavioral Changes

      The consideration of a “pending” object as an “orphan” has been made more aggressive

      This is a late add to the 0.8 series, however it is hoped that the new behavior is generally more consistent and intuitive in a wider variety of situations. The ORM has since at least version 0.4 included behavior such that an object that’s “pending”, meaning that it’s associated with a Session but hasn’t been inserted into the database yet, is automatically expunged from the Session when it becomes an “orphan”, which means it has been de-associated with a parent object that refers to it with delete-orphan cascade on the configured relationship(). This behavior is intended to approximately mirror the behavior of a persistent (that is, already inserted) object, where the ORM will emit a DELETE for such objects that become orphans based on the interception of detachment events.

      The behavioral change comes into play for objects that are referred to by multiple kinds of parents that each specify delete-orphan; the typical example is an association object that bridges two other kinds of objects in a many-to-many pattern. Previously, the behavior was such that the pending object would be expunged only when de-associated with all of its parents. With the behavioral change, the pending object is expunged as soon as it is de-associated from any of the parents that it was previously associated with. This behavior is intended to more closely match that of persistent objects, which are deleted as soon as they are de-associated from any parent.

      The rationale for the older behavior dates back at least to version 0.4, and was basically a defensive decision to try to alleviate confusion when an object was still being constructed for INSERT. But the reality is that the object is re-associated with the Session as soon as it is attached to any new parent in any case.

      It’s still possible to flush an object that is not associated with all of its required parents, if the object was either not associated with those parents in the first place, or if it was expunged, but then re-associated with a Session via a subsequent attachment event but still not fully associated. In this situation, it is expected that the database would emit an integrity error, as there are likely NOT NULL foreign key columns that are unpopulated. The ORM makes the decision to let these INSERT attempts occur, based on the judgment that an object that is only partially associated with its required parents but has been actively associated with some of them, is more often than not a user error, rather than an intentional omission which should be silently skipped - silently skipping the INSERT here would make user errors of this nature very hard to debug.

      The old behavior, for applications that might have been relying upon it, can be re-enabled for any Mapper by specifying the flag legacy_is_orphan as a mapper option.

      The new behavior allows the following test case to work:

      from sqlalchemy import Column, Integer, String, ForeignKey
      from sqlalchemy.orm import relationship, backref
      from sqlalchemy.ext.declarative import declarative_base
      
      Base = declarative_base()
      
      class User(Base):
          __tablename__ = 'user'
          id = Column(Integer, primary_key=True)
          name = Column(String(64))
      
      class UserKeyword(Base):
          __tablename__ = 'user_keyword'
          user_id = Column(Integer, ForeignKey('user.id'), primary_key=True)
          keyword_id = Column(Integer, ForeignKey('keyword.id'), primary_key=True)
      
          user = relationship(User,
                      backref=backref("user_keywords",
                                      cascade="all, delete-orphan")
                  )
      
          keyword = relationship("Keyword",
                      backref=backref("user_keywords",
                                      cascade="all, delete-orphan")
                  )
      
          # uncomment this to enable the old behavior
          # __mapper_args__ = {"legacy_is_orphan": True}
      
      class Keyword(Base):
          __tablename__ = 'keyword'
          id = Column(Integer, primary_key=True)
          keyword = Column('keyword', String(64))
      
      from sqlalchemy import create_engine
      from sqlalchemy.orm import Session
      
      # note we're using Postgresql to ensure that referential integrity
      # is enforced, for demonstration purposes.
      e = create_engine("postgresql://scott:tiger@localhost/test", echo=True)
      
      Base.metadata.drop_all(e)
      Base.metadata.create_all(e)
      
      session = Session(e)
      
      u1 = User(name="u1")
      k1 = Keyword(keyword="k1")
      
      session.add_all([u1, k1])
      
      uk1 = UserKeyword(keyword=k1, user=u1)
      
      # previously, if session.flush() were called here,
      # this operation would succeed, but if session.flush()
      # were not called here, the operation fails with an
      # integrity error.
      # session.flush()
      del u1.user_keywords[0]
      
      session.commit()

      #2655

      The after_attach event fires after the item is associated with the Session instead of before; before_attach added

      Event handlers which use after_attach can now assume the given instance is associated with the given session:

      @event.listens_for(Session, "after_attach")
      def after_attach(session, instance):
          assert instance in session

      Some use cases require that it work this way. However, other use cases require that the item is not yet part of the session, such as when a query, intended to load some state required for an instance, emits autoflush first and would otherwise prematurely flush the target object. Those use cases should use the new “before_attach” event:

      @event.listens_for(Session, "before_attach")
      def before_attach(session, instance):
          instance.some_necessary_attribute = session.query(Widget).\
                                                  filter_by(instance.widget_name).\
                                                  first()

      #2464

      Query now auto-correlates like a select() does

      Previously it was necessary to call Query.correlate() in order to have a column- or WHERE-subquery correlate to the parent:

      subq = session.query(Entity.value).\
                      filter(Entity.id==Parent.entity_id).\
                      correlate(Parent).\
                      as_scalar()
      session.query(Parent).filter(subq=="some value")

      This was the opposite behavior of a plain select() construct which would assume auto-correlation by default. The above statement in 0.8 will correlate automatically:

      subq = session.query(Entity.value).\
                      filter(Entity.id==Parent.entity_id).\
                      as_scalar()
      session.query(Parent).filter(subq=="some value")

      like in select(), correlation can be disabled by calling query.correlate(None) or manually set by passing an entity, query.correlate(someentity).

      #2179

      Correlation is now always context-specific

      To allow a wider variety of correlation scenarios, the behavior of Select.correlate() and Query.correlate() has changed slightly such that the SELECT statement will omit the “correlated” target from the FROM clause only if the statement is actually used in that context. Additionally, it’s no longer possible for a SELECT statement that’s placed as a FROM in an enclosing SELECT statement to “correlate” (i.e. omit) a FROM clause.

      This change only makes things better as far as rendering SQL, in that it’s no longer possible to render illegal SQL where there are insufficient FROM objects relative to what’s being selected:

      from sqlalchemy.sql import table, column, select
      
      t1 = table('t1', column('x'))
      t2 = table('t2', column('y'))
      s = select([t1, t2]).correlate(t1)
      
      print(s)

      Prior to this change, the above would return:

      SELECT t1.x, t2.y FROM t2

      which is invalid SQL as “t1” is not referred to in any FROM clause.

      Now, in the absense of an enclosing SELECT, it returns:

      SELECT t1.x, t2.y FROM t1, t2

      Within a SELECT, the correlation takes effect as expected:

      s2 = select([t1, t2]).where(t1.c.x == t2.c.y).where(t1.c.x == s)
      
      print (s2)
      
      SELECT t1.x, t2.y FROM t1, t2
      WHERE t1.x = t2.y AND t1.x =
          (SELECT t1.x, t2.y FROM t2)

      This change is not expected to impact any existing applications, as the correlation behavior remains identical for properly constructed expressions. Only an application that relies, most likely within a testing scenario, on the invalid string output of a correlated SELECT used in a non-correlating context would see any change.

      #2668

      create_all() and drop_all() will now honor an empty list as such

      The methods MetaData.create_all() and MetaData.drop_all() will now accept a list of Table objects that is empty, and will not emit any CREATE or DROP statements. Previously, an empty list was interepreted the same as passing None for a collection, and CREATE/DROP would be emitted for all items unconditionally.

      This is a bug fix but some applications may have been relying upon the previous behavior.

      #2664

      Repaired the Event Targeting of InstrumentationEvents

      The InstrumentationEvents series of event targets have documented that the events will only be fired off according to the actual class passed as a target. Through 0.7, this wasn’t the case, and any event listener applied to InstrumentationEvents would be invoked for all classes mapped. In 0.8, additional logic has been added so that the events will only invoke for those classes sent in. The propagate flag here is set to True by default as class instrumentation events are typically used to intercept classes that aren’t yet created.

      #2590

      No more magic coercion of “=” to IN when comparing to subquery in MS-SQL

      We found a very old behavior in the MSSQL dialect which would attempt to rescue users from themselves when doing something like this:

      scalar_subq = select([someothertable.c.id]).where(someothertable.c.data=='foo')
      select([sometable]).where(sometable.c.id==scalar_subq)

      SQL Server doesn’t allow an equality comparison to a scalar SELECT, that is, “x = (SELECT something)”. The MSSQL dialect would convert this to an IN. The same thing would happen however upon a comparison like “(SELECT something) = x”, and overall this level of guessing is outside of SQLAlchemy’s usual scope so the behavior is removed.

      #2277

      Fixed the behavior of Session.is_modified()

      The Session.is_modified() method accepts an argument passive which basically should not be necessary, the argument in all cases should be the value True - when left at its default of False it would have the effect of hitting the database, and often triggering autoflush which would itself change the results. In 0.8 the passive argument will have no effect, and unloaded attributes will never be checked for history since by definition there can be no pending state change on an unloaded attribute.

      #2320

      Column.key is honored in the Select.c attribute of select() with Select.apply_labels()

      Users of the expression system know that Select.apply_labels() prepends the table name to each column name, affecting the names that are available from Select.c:

      s = select([table1]).apply_labels()
      s.c.table1_col1
      s.c.table1_col2

      Before 0.8, if the Column had a different Column.key, this key would be ignored, inconsistently versus when Select.apply_labels() were not used:

      # before 0.8
      table1 = Table('t1', metadata,
          Column('col1', Integer, key='column_one')
      )
      s = select([table1])
      s.c.column_one # would be accessible like this
      s.c.col1 # would raise AttributeError
      
      s = select([table1]).apply_labels()
      s.c.table1_column_one # would raise AttributeError
      s.c.table1_col1 # would be accessible like this

      In 0.8, Column.key is honored in both cases:

      # with 0.8
      table1 = Table('t1', metadata,
          Column('col1', Integer, key='column_one')
      )
      s = select([table1])
      s.c.column_one # works
      s.c.col1 # AttributeError
      
      s = select([table1]).apply_labels()
      s.c.table1_column_one # works
      s.c.table1_col1 # AttributeError

      All other behavior regarding “name” and “key” are the same, including that the rendered SQL will still use the form <tablename>_<colname> - the emphasis here was on preventing the Column.key contents from being rendered into the SELECT statement so that there are no issues with special/ non-ascii characters used in the Column.key.

      #2397

      single_parent warning is now an error

      A relationship() that is many-to-one or many-to-many and specifies “cascade=’all, delete-orphan’”, which is an awkward but nonetheless supported use case (with restrictions) will now raise an error if the relationship does not specify the single_parent=True option. Previously it would only emit a warning, but a failure would follow almost immediately within the attribute system in any case.

      #2405

      Adding the inspector argument to the column_reflect event

      0.7 added a new event called column_reflect, provided so that the reflection of columns could be augmented as each one were reflected. We got this event slightly wrong in that the event gave no way to get at the current Inspector and Connection being used for the reflection, in the case that additional information from the database is needed. As this is a new event not widely used yet, we’ll be adding the inspector argument into it directly:

      @event.listens_for(Table, "column_reflect")
      def listen_for_col(inspector, table, column_info):
          # ...

      #2418

      Disabling auto-detect of collations, casing for MySQL

      The MySQL dialect does two calls, one very expensive, to load all possible collations from the database as well as information on casing, the first time an Engine connects. Neither of these collections are used for any SQLAlchemy functions, so these calls will be changed to no longer be emitted automatically. Applications that might have relied on these collections being present on engine.dialect will need to call upon _detect_collations() and _detect_casing() directly.

      #2404

      “Unconsumed column names” warning becomes an exception

      Referring to a non-existent column in an insert() or update() construct will raise an error instead of a warning:

      t1 = table('t1', column('x'))
      t1.insert().values(x=5, z=5) # raises "Unconsumed column names: z"

      #2415

      Inspector.get_primary_keys() is deprecated, use Inspector.get_pk_constraint

      These two methods on Inspector were redundant, where get_primary_keys() would return the same information as get_pk_constraint() minus the name of the constraint:

      >>> insp.get_primary_keys()
      ["a", "b"]
      
      >>> insp.get_pk_constraint()
      {"name":"pk_constraint", "constrained_columns":["a", "b"]}

      #2422

      Case-insensitive result row names will be disabled in most cases

      A very old behavior, the column names in RowProxy were always compared case-insensitively:

      >>> row = result.fetchone()
      >>> row['foo'] == row['FOO'] == row['Foo']
      True

      This was for the benefit of a few dialects which in the early days needed this, like Oracle and Firebird, but in modern usage we have more accurate ways of dealing with the case-insensitive behavior of these two platforms.

      Going forward, this behavior will be available only optionally, by passing the flag `case_sensitive=False` to `create_engine()`, but otherwise column names requested from the row must match as far as casing.

      #2423

      InstrumentationManager and alternate class instrumentation is now an extension

      The sqlalchemy.orm.interfaces.InstrumentationManager class is moved to sqlalchemy.ext.instrumentation.InstrumentationManager. The “alternate instrumentation” system was built for the benefit of a very small number of installations that needed to work with existing or unusual class instrumentation systems, and generally is very seldom used. The complexity of this system has been exported to an ext. module. It remains unused until once imported, typically when a third party library imports InstrumentationManager, at which point it is injected back into sqlalchemy.orm by replacing the default InstrumentationFactory with ExtendedInstrumentationRegistry.

      Removed

      SQLSoup

      SQLSoup is a handy package that presents an alternative interface on top of the SQLAlchemy ORM. SQLSoup is now moved into its own project and documented/released separately; see https://bitbucket.org/zzzeek/sqlsoup.

      SQLSoup is a very simple tool that could also benefit from contributors who are interested in its style of usage.

      #2262

      MutableType

      The older “mutable” system within the SQLAlchemy ORM has been removed. This refers to the MutableType interface which was applied to types such as PickleType and conditionally to TypeDecorator, and since very early SQLAlchemy versions has provided a way for the ORM to detect changes in so-called “mutable” data structures such as JSON structures and pickled objects. However, the implementation was never reasonable and forced a very inefficient mode of usage on the unit-of-work which caused an expensive scan of all objects to take place during flush. In 0.7, the sqlalchemy.ext.mutable extension was introduced so that user-defined datatypes can appropriately send events to the unit of work as changes occur.

      Today, usage of MutableType is expected to be low, as warnings have been in place for some years now regarding its inefficiency.

      #2442

      sqlalchemy.exceptions (has been sqlalchemy.exc for years)

      We had left in an alias sqlalchemy.exceptions to attempt to make it slightly easier for some very old libraries that hadn’t yet been upgraded to use sqlalchemy.exc. Some users are still being confused by it however so in 0.8 we’re taking it out entirely to eliminate any of that confusion.

      #2433

      SQLAlchemy-0.8.4/doc/contents.html0000644000076500000240000010422712251147465017536 0ustar classicstaff00000000000000 Table of Contents — SQLAlchemy 0.8 Documentation

      SQLAlchemy 0.8 Documentation

      Release: 0.8.4 | Release Date: December 8, 2013
      SQLAlchemy 0.8 Documentation » Table of Contents

      Table of Contents

      Table of Contents

      Full table of contents. For a high level overview of all documentation, see SQLAlchemy Documentation.

      Indices and tables

      SQLAlchemy-0.8.4/doc/copyright.html0000644000076500000240000001247512251147465017714 0ustar classicstaff00000000000000 Appendix: Copyright — SQLAlchemy 0.8 Documentation

      SQLAlchemy 0.8 Documentation

      Release: 0.8.4 | Release Date: December 8, 2013
      SQLAlchemy 0.8 Documentation » Appendix: Copyright

      Appendix: Copyright

      SQLAlchemy-0.8.4/doc/core/0000755000076500000240000000000012251151573015730 5ustar classicstaff00000000000000SQLAlchemy-0.8.4/doc/core/compiler.html0000644000076500000240000014462212251147465020446 0ustar classicstaff00000000000000 Custom SQL Constructs and Compilation Extension — SQLAlchemy 0.8 Documentation

      SQLAlchemy 0.8 Documentation

      Release: 0.8.4 | Release Date: December 8, 2013
      SQLAlchemy 0.8 Documentation » SQLAlchemy Core » Custom SQL Constructs and Compilation Extension

      Custom SQL Constructs and Compilation Extension

      Custom SQL Constructs and Compilation Extension

      Provides an API for creation of custom ClauseElements and compilers.

      Synopsis

      Usage involves the creation of one or more ClauseElement subclasses and one or more callables defining its compilation:

      from sqlalchemy.ext.compiler import compiles
      from sqlalchemy.sql.expression import ColumnClause
      
      class MyColumn(ColumnClause):
          pass
      
      @compiles(MyColumn)
      def compile_mycolumn(element, compiler, **kw):
          return "[%s]" % element.name

      Above, MyColumn extends ColumnClause, the base expression element for named column objects. The compiles decorator registers itself with the MyColumn class so that it is invoked when the object is compiled to a string:

      from sqlalchemy import select
      
      s = select([MyColumn('x'), MyColumn('y')])
      print str(s)

      Produces:

      SELECT [x], [y]

      Dialect-specific compilation rules

      Compilers can also be made dialect-specific. The appropriate compiler will be invoked for the dialect in use:

      from sqlalchemy.schema import DDLElement
      
      class AlterColumn(DDLElement):
      
          def __init__(self, column, cmd):
              self.column = column
              self.cmd = cmd
      
      @compiles(AlterColumn)
      def visit_alter_column(element, compiler, **kw):
          return "ALTER COLUMN %s ..." % element.column.name
      
      @compiles(AlterColumn, 'postgresql')
      def visit_alter_column(element, compiler, **kw):
          return "ALTER TABLE %s ALTER COLUMN %s ..." % (element.table.name, element.column.name)

      The second visit_alter_table will be invoked when any postgresql dialect is used.

      Compiling sub-elements of a custom expression construct

      The compiler argument is the Compiled object in use. This object can be inspected for any information about the in-progress compilation, including compiler.dialect, compiler.statement etc. The SQLCompiler and DDLCompiler both include a process() method which can be used for compilation of embedded attributes:

      from sqlalchemy.sql.expression import Executable, ClauseElement
      
      class InsertFromSelect(Executable, ClauseElement):
          def __init__(self, table, select):
              self.table = table
              self.select = select
      
      @compiles(InsertFromSelect)
      def visit_insert_from_select(element, compiler, **kw):
          return "INSERT INTO %s (%s)" % (
              compiler.process(element.table, asfrom=True),
              compiler.process(element.select)
          )
      
      insert = InsertFromSelect(t1, select([t1]).where(t1.c.x>5))
      print insert

      Produces:

      "INSERT INTO mytable (SELECT mytable.x, mytable.y, mytable.z FROM mytable WHERE mytable.x > :x_1)"

      Note

      The above InsertFromSelect construct is only an example, this actual functionality is already available using the Insert.from_select() method.

      Note

      The above InsertFromSelect construct probably wants to have “autocommit” enabled. See Enabling Autocommit on a Construct for this step.

      Cross Compiling between SQL and DDL compilers

      SQL and DDL constructs are each compiled using different base compilers - SQLCompiler and DDLCompiler. A common need is to access the compilation rules of SQL expressions from within a DDL expression. The DDLCompiler includes an accessor sql_compiler for this reason, such as below where we generate a CHECK constraint that embeds a SQL expression:

      @compiles(MyConstraint)
      def compile_my_constraint(constraint, ddlcompiler, **kw):
          return "CONSTRAINT %s CHECK (%s)" % (
              constraint.name,
              ddlcompiler.sql_compiler.process(constraint.expression)
          )

      Enabling Autocommit on a Construct

      Recall from the section Understanding Autocommit that the Engine, when asked to execute a construct in the absence of a user-defined transaction, detects if the given construct represents DML or DDL, that is, a data modification or data definition statement, which requires (or may require, in the case of DDL) that the transaction generated by the DBAPI be committed (recall that DBAPI always has a transaction going on regardless of what SQLAlchemy does). Checking for this is actually accomplished by checking for the “autocommit” execution option on the construct. When building a construct like an INSERT derivation, a new DDL type, or perhaps a stored procedure that alters data, the “autocommit” option needs to be set in order for the statement to function with “connectionless” execution (as described in Connectionless Execution, Implicit Execution).

      Currently a quick way to do this is to subclass Executable, then add the “autocommit” flag to the _execution_options dictionary (note this is a “frozen” dictionary which supplies a generative union() method):

      from sqlalchemy.sql.expression import Executable, ClauseElement
      
      class MyInsertThing(Executable, ClauseElement):
          _execution_options = \
              Executable._execution_options.union({'autocommit': True})

      More succinctly, if the construct is truly similar to an INSERT, UPDATE, or DELETE, UpdateBase can be used, which already is a subclass of Executable, ClauseElement and includes the autocommit flag:

      from sqlalchemy.sql.expression import UpdateBase
      
      class MyInsertThing(UpdateBase):
          def __init__(self, ...):
              ...

      DDL elements that subclass DDLElement already have the “autocommit” flag turned on.

      Changing the default compilation of existing constructs

      The compiler extension applies just as well to the existing constructs. When overriding the compilation of a built in SQL construct, the @compiles decorator is invoked upon the appropriate class (be sure to use the class, i.e. Insert or Select, instead of the creation function such as insert() or select()).

      Within the new compilation function, to get at the “original” compilation routine, use the appropriate visit_XXX method - this because compiler.process() will call upon the overriding routine and cause an endless loop. Such as, to add “prefix” to all insert statements:

      from sqlalchemy.sql.expression import Insert
      
      @compiles(Insert)
      def prefix_inserts(insert, compiler, **kw):
          return compiler.visit_insert(insert.prefix_with("some prefix"), **kw)

      The above compiler will prefix all INSERT statements with “some prefix” when compiled.

      Changing Compilation of Types

      compiler works for types, too, such as below where we implement the MS-SQL specific ‘max’ keyword for String/VARCHAR:

      @compiles(String, 'mssql')
      @compiles(VARCHAR, 'mssql')
      def compile_varchar(element, compiler, **kw):
          if element.length == 'max':
              return "VARCHAR('max')"
          else:
              return compiler.visit_VARCHAR(element, **kw)
      
      foo = Table('foo', metadata,
          Column('data', VARCHAR('max'))
      )

      Subclassing Guidelines

      A big part of using the compiler extension is subclassing SQLAlchemy expression constructs. To make this easier, the expression and schema packages feature a set of “bases” intended for common tasks. A synopsis is as follows:

      • ClauseElement - This is the root expression class. Any SQL expression can be derived from this base, and is probably the best choice for longer constructs such as specialized INSERT statements.

      • ColumnElement - The root of all “column-like” elements. Anything that you’d place in the “columns” clause of a SELECT statement (as well as order by and group by) can derive from this - the object will automatically have Python “comparison” behavior.

        ColumnElement classes want to have a type member which is expression’s return type. This can be established at the instance level in the constructor, or at the class level if its generally constant:

        class timestamp(ColumnElement):
            type = TIMESTAMP()
      • FunctionElement - This is a hybrid of a ColumnElement and a “from clause” like object, and represents a SQL function or stored procedure type of call. Since most databases support statements along the line of “SELECT FROM <some function>” FunctionElement adds in the ability to be used in the FROM clause of a select() construct:

        from sqlalchemy.sql.expression import FunctionElement
        
        class coalesce(FunctionElement):
            name = 'coalesce'
        
        @compiles(coalesce)
        def compile(element, compiler, **kw):
            return "coalesce(%s)" % compiler.process(element.clauses)
        
        @compiles(coalesce, 'oracle')
        def compile(element, compiler, **kw):
            if len(element.clauses) > 2:
                raise TypeError("coalesce only supports two arguments on Oracle")
            return "nvl(%s)" % compiler.process(element.clauses)
      • DDLElement - The root of all DDL expressions, like CREATE TABLE, ALTER TABLE, etc. Compilation of DDLElement subclasses is issued by a DDLCompiler instead of a SQLCompiler. DDLElement also features Table and MetaData event hooks via the execute_at() method, allowing the construct to be invoked during CREATE TABLE and DROP TABLE sequences.

      • Executable - This is a mixin which should be used with any expression class that represents a “standalone” SQL statement that can be passed directly to an execute() method. It is already implicit within DDLElement and FunctionElement.

      Further Examples

      “UTC timestamp” function

      A function that works like “CURRENT_TIMESTAMP” except applies the appropriate conversions so that the time is in UTC time. Timestamps are best stored in relational databases as UTC, without time zones. UTC so that your database doesn’t think time has gone backwards in the hour when daylight savings ends, without timezones because timezones are like character encodings - they’re best applied only at the endpoints of an application (i.e. convert to UTC upon user input, re-apply desired timezone upon display).

      For Postgresql and Microsoft SQL Server:

      from sqlalchemy.sql import expression
      from sqlalchemy.ext.compiler import compiles
      from sqlalchemy.types import DateTime
      
      class utcnow(expression.FunctionElement):
          type = DateTime()
      
      @compiles(utcnow, 'postgresql')
      def pg_utcnow(element, compiler, **kw):
          return "TIMEZONE('utc', CURRENT_TIMESTAMP)"
      
      @compiles(utcnow, 'mssql')
      def ms_utcnow(element, compiler, **kw):
          return "GETUTCDATE()"

      Example usage:

      from sqlalchemy import (
                  Table, Column, Integer, String, DateTime, MetaData
              )
      metadata = MetaData()
      event = Table("event", metadata,
          Column("id", Integer, primary_key=True),
          Column("description", String(50), nullable=False),
          Column("timestamp", DateTime, server_default=utcnow())
      )

      “GREATEST” function

      The “GREATEST” function is given any number of arguments and returns the one that is of the highest value - it’s equivalent to Python’s max function. A SQL standard version versus a CASE based version which only accommodates two arguments:

      from sqlalchemy.sql import expression
      from sqlalchemy.ext.compiler import compiles
      from sqlalchemy.types import Numeric
      
      class greatest(expression.FunctionElement):
          type = Numeric()
          name = 'greatest'
      
      @compiles(greatest)
      def default_greatest(element, compiler, **kw):
          return compiler.visit_function(element)
      
      @compiles(greatest, 'sqlite')
      @compiles(greatest, 'mssql')
      @compiles(greatest, 'oracle')
      def case_greatest(element, compiler, **kw):
          arg1, arg2 = list(element.clauses)
          return "CASE WHEN %s > %s THEN %s ELSE %s END" % (
              compiler.process(arg1),
              compiler.process(arg2),
              compiler.process(arg1),
              compiler.process(arg2),
          )

      Example usage:

      Session.query(Account).\
              filter(
                  greatest(
                      Account.checking_balance,
                      Account.savings_balance) > 10000
              )

      “false” expression

      Render a “false” constant expression, rendering as “0” on platforms that don’t have a “false” constant:

      from sqlalchemy.sql import expression
      from sqlalchemy.ext.compiler import compiles
      
      class sql_false(expression.ColumnElement):
          pass
      
      @compiles(sql_false)
      def default_false(element, compiler, **kw):
          return "false"
      
      @compiles(sql_false, 'mssql')
      @compiles(sql_false, 'mysql')
      @compiles(sql_false, 'oracle')
      def int_false(element, compiler, **kw):
          return "0"

      Example usage:

      from sqlalchemy import select, union_all
      
      exp = union_all(
          select([users.c.name, sql_false().label("enrolled")]),
          select([customers.c.name, customers.c.enrolled])
      )
      sqlalchemy.ext.compiler.compiles(class_, *specs)

      Register a function as a compiler for a given ClauseElement type.

      sqlalchemy.ext.compiler.deregister(class_)

      Remove all custom compilers associated with a given ClauseElement type.

      SQLAlchemy-0.8.4/doc/core/connections.html0000644000076500000240000055653112251147465021164 0ustar classicstaff00000000000000 Working with Engines and Connections — SQLAlchemy 0.8 Documentation

      SQLAlchemy 0.8 Documentation

      Release: 0.8.4 | Release Date: December 8, 2013
      SQLAlchemy 0.8 Documentation » SQLAlchemy Core » Working with Engines and Connections

      Working with Engines and Connections

      Working with Engines and Connections

      This section details direct usage of the Engine, Connection, and related objects. Its important to note that when using the SQLAlchemy ORM, these objects are not generally accessed; instead, the Session object is used as the interface to the database. However, for applications that are built around direct usage of textual SQL statements and/or SQL expression constructs without involvement by the ORM’s higher level management services, the Engine and Connection are king (and queen?) - read on.

      Basic Usage

      Recall from Engine Configuration that an Engine is created via the create_engine() call:

      engine = create_engine('mysql://scott:tiger@localhost/test')

      The typical usage of create_engine() is once per particular database URL, held globally for the lifetime of a single application process. A single Engine manages many individual DBAPI connections on behalf of the process and is intended to be called upon in a concurrent fashion. The Engine is not synonymous to the DBAPI connect function, which represents just one connection resource - the Engine is most efficient when created just once at the module level of an application, not per-object or per-function call.

      For a multiple-process application that uses the os.fork system call, or for example the Python multiprocessing module, it’s usually required that a separate Engine be used for each child process. This is because the Engine maintains a reference to a connection pool that ultimately references DBAPI connections - these tend to not be portable across process boundaries. An Engine that is configured not to use pooling (which is achieved via the usage of NullPool) does not have this requirement.

      The engine can be used directly to issue SQL to the database. The most generic way is first procure a connection resource, which you get via the Engine.connect() method:

      connection = engine.connect()
      result = connection.execute("select username from users")
      for row in result:
          print "username:", row['username']
      connection.close()

      The connection is an instance of Connection, which is a proxy object for an actual DBAPI connection. The DBAPI connection is retrieved from the connection pool at the point at which Connection is created.

      The returned result is an instance of ResultProxy, which references a DBAPI cursor and provides a largely compatible interface with that of the DBAPI cursor. The DBAPI cursor will be closed by the ResultProxy when all of its result rows (if any) are exhausted. A ResultProxy that returns no rows, such as that of an UPDATE statement (without any returned rows), releases cursor resources immediately upon construction.

      When the close() method is called, the referenced DBAPI connection is released to the connection pool. From the perspective of the database itself, nothing is actually “closed”, assuming pooling is in use. The pooling mechanism issues a rollback() call on the DBAPI connection so that any transactional state or locks are removed, and the connection is ready for its next usage.

      The above procedure can be performed in a shorthand way by using the execute() method of Engine itself:

      result = engine.execute("select username from users")
      for row in result:
          print "username:", row['username']

      Where above, the execute() method acquires a new Connection on its own, executes the statement with that object, and returns the ResultProxy. In this case, the ResultProxy contains a special flag known as close_with_result, which indicates that when its underlying DBAPI cursor is closed, the Connection object itself is also closed, which again returns the DBAPI connection to the connection pool, releasing transactional resources.

      If the ResultProxy potentially has rows remaining, it can be instructed to close out its resources explicitly:

      result.close()

      If the ResultProxy has pending rows remaining and is dereferenced by the application without being closed, Python garbage collection will ultimately close out the cursor as well as trigger a return of the pooled DBAPI connection resource to the pool (SQLAlchemy achieves this by the usage of weakref callbacks - never the __del__ method) - however it’s never a good idea to rely upon Python garbage collection to manage resources.

      Our example above illustrated the execution of a textual SQL string. The execute() method can of course accommodate more than that, including the variety of SQL expression constructs described in SQL Expression Language Tutorial.

      Using Transactions

      Note

      This section describes how to use transactions when working directly with Engine and Connection objects. When using the SQLAlchemy ORM, the public API for transaction control is via the Session object, which makes usage of the Transaction object internally. See Managing Transactions for further information.

      The Connection object provides a begin() method which returns a Transaction object. This object is usually used within a try/except clause so that it is guaranteed to invoke Transaction.rollback() or Transaction.commit():

      connection = engine.connect()
      trans = connection.begin()
      try:
          r1 = connection.execute(table1.select())
          connection.execute(table1.insert(), col1=7, col2='this is some data')
          trans.commit()
      except:
          trans.rollback()
          raise

      The above block can be created more succinctly using context managers, either given an Engine:

      # runs a transaction
      with engine.begin() as connection:
          r1 = connection.execute(table1.select())
          connection.execute(table1.insert(), col1=7, col2='this is some data')

      Or from the Connection, in which case the Transaction object is available as well:

      with connection.begin() as trans:
          r1 = connection.execute(table1.select())
          connection.execute(table1.insert(), col1=7, col2='this is some data')

      Nesting of Transaction Blocks

      The Transaction object also handles “nested” behavior by keeping track of the outermost begin/commit pair. In this example, two functions both issue a transaction on a Connection, but only the outermost Transaction object actually takes effect when it is committed.

      # method_a starts a transaction and calls method_b
      def method_a(connection):
          trans = connection.begin() # open a transaction
          try:
              method_b(connection)
              trans.commit()  # transaction is committed here
          except:
              trans.rollback() # this rolls back the transaction unconditionally
              raise
      
      # method_b also starts a transaction
      def method_b(connection):
          trans = connection.begin() # open a transaction - this runs in the context of method_a's transaction
          try:
              connection.execute("insert into mytable values ('bat', 'lala')")
              connection.execute(mytable.insert(), col1='bat', col2='lala')
              trans.commit()  # transaction is not committed yet
          except:
              trans.rollback() # this rolls back the transaction unconditionally
              raise
      
      # open a Connection and call method_a
      conn = engine.connect()
      method_a(conn)
      conn.close()

      Above, method_a is called first, which calls connection.begin(). Then it calls method_b. When method_b calls connection.begin(), it just increments a counter that is decremented when it calls commit(). If either method_a or method_b calls rollback(), the whole transaction is rolled back. The transaction is not committed until method_a calls the commit() method. This “nesting” behavior allows the creation of functions which “guarantee” that a transaction will be used if one was not already available, but will automatically participate in an enclosing transaction if one exists.

      Understanding Autocommit

      The previous transaction example illustrates how to use Transaction so that several executions can take part in the same transaction. What happens when we issue an INSERT, UPDATE or DELETE call without using Transaction? While some DBAPI implementations provide various special “non-transactional” modes, the core behavior of DBAPI per PEP-0249 is that a transaction is always in progress, providing only rollback() and commit() methods but no begin(). SQLAlchemy assumes this is the case for any given DBAPI.

      Given this requirement, SQLAlchemy implements its own “autocommit” feature which works completely consistently across all backends. This is achieved by detecting statements which represent data-changing operations, i.e. INSERT, UPDATE, DELETE, as well as data definition language (DDL) statements such as CREATE TABLE, ALTER TABLE, and then issuing a COMMIT automatically if no transaction is in progress. The detection is based on the presence of the autocommit=True execution option on the statement. If the statement is a text-only statement and the flag is not set, a regular expression is used to detect INSERT, UPDATE, DELETE, as well as a variety of other commands for a particular backend:

      conn = engine.connect()
      conn.execute("INSERT INTO users VALUES (1, 'john')")  # autocommits

      The “autocommit” feature is only in effect when no Transaction has otherwise been declared. This means the feature is not generally used with the ORM, as the Session object by default always maintains an ongoing Transaction.

      Full control of the “autocommit” behavior is available using the generative Connection.execution_options() method provided on Connection, Engine, Executable, using the “autocommit” flag which will turn on or off the autocommit for the selected scope. For example, a text() construct representing a stored procedure that commits might use it so that a SELECT statement will issue a COMMIT:

      engine.execute(text("SELECT my_mutating_procedure()").execution_options(autocommit=True))

      Connectionless Execution, Implicit Execution

      Recall from the first section we mentioned executing with and without explicit usage of Connection. “Connectionless” execution refers to the usage of the execute() method on an object which is not a Connection. This was illustrated using the execute() method of Engine:

      result = engine.execute("select username from users")
      for row in result:
          print "username:", row['username']

      In addition to “connectionless” execution, it is also possible to use the execute() method of any Executable construct, which is a marker for SQL expression objects that support execution. The SQL expression object itself references an Engine or Connection known as the bind, which it uses in order to provide so-called “implicit” execution services.

      Given a table as below:

      from sqlalchemy import MetaData, Table, Column, Integer
      
      meta = MetaData()
      users_table = Table('users', meta,
          Column('id', Integer, primary_key=True),
          Column('name', String(50))
      )

      Explicit execution delivers the SQL text or constructed SQL expression to the execute() method of Connection:

      engine = create_engine('sqlite:///file.db')
      connection = engine.connect()
      result = connection.execute(users_table.select())
      for row in result:
          # ....
      connection.close()

      Explicit, connectionless execution delivers the expression to the execute() method of Engine:

      engine = create_engine('sqlite:///file.db')
      result = engine.execute(users_table.select())
      for row in result:
          # ....
      result.close()

      Implicit execution is also connectionless, and makes usage of the execute() method on the expression itself. This method is provided as part of the Executable class, which refers to a SQL statement that is sufficient for being invoked against the database. The method makes usage of the assumption that either an Engine or Connection has been bound to the expression object. By “bound” we mean that the special attribute MetaData.bind has been used to associate a series of Table objects and all SQL constructs derived from them with a specific engine:

      engine = create_engine('sqlite:///file.db')
      meta.bind = engine
      result = users_table.select().execute()
      for row in result:
          # ....
      result.close()

      Above, we associate an Engine with a MetaData object using the special attribute MetaData.bind. The select() construct produced from the Table object has a method execute(), which will search for an Engine that’s “bound” to the Table.

      Overall, the usage of “bound metadata” has three general effects:

      • SQL statement objects gain an Executable.execute() method which automatically locates a “bind” with which to execute themselves.
      • The ORM Session object supports using “bound metadata” in order to establish which Engine should be used to invoke SQL statements on behalf of a particular mapped class, though the Session also features its own explicit system of establishing complex Engine/ mapped class configurations.
      • The MetaData.create_all(), MetaData.drop_all(), Table.create(), Table.drop(), and “autoload” features all make usage of the bound Engine automatically without the need to pass it explicitly.

      Note

      The concepts of “bound metadata” and “implicit execution” are not emphasized in modern SQLAlchemy. While they offer some convenience, they are no longer required by any API and are never necessary.

      In applications where multiple Engine objects are present, each one logically associated with a certain set of tables (i.e. vertical sharding), the “bound metadata” technique can be used so that individual Table can refer to the appropriate Engine automatically; in particular this is supported within the ORM via the Session object as a means to associate Table objects with an appropriate Engine, as an alternative to using the bind arguments accepted directly by the Session.

      However, the “implicit execution” technique is not at all appropriate for use with the ORM, as it bypasses the transactional context maintained by the Session.

      Overall, in the vast majority of cases, “bound metadata” and “implicit execution” are not useful. While “bound metadata” has a marginal level of usefulness with regards to ORM configuration, “implicit execution” is a very old usage pattern that in most cases is more confusing than it is helpful, and its usage is discouraged. Both patterns seem to encourage the overuse of expedient “short cuts” in application design which lead to problems later on.

      Modern SQLAlchemy usage, especially the ORM, places a heavy stress on working within the context of a transaction at all times; the “implicit execution” concept makes the job of associating statement execution with a particular transaction much more difficult. The Executable.execute() method on a particular SQL statement usually implies that the execution is not part of any particular transaction, which is usually not the desired effect.

      In both “connectionless” examples, the Connection is created behind the scenes; the ResultProxy returned by the execute() call references the Connection used to issue the SQL statement. When the ResultProxy is closed, the underlying Connection is closed for us, resulting in the DBAPI connection being returned to the pool with transactional resources removed.

      Using the Threadlocal Execution Strategy

      The “threadlocal” engine strategy is an optional feature which can be used by non-ORM applications to associate transactions with the current thread, such that all parts of the application can participate in that transaction implicitly without the need to explicitly reference a Connection.

      Note

      The “threadlocal” feature is generally discouraged. It’s designed for a particular pattern of usage which is generally considered as a legacy pattern. It has no impact on the “thread safety” of SQLAlchemy components or one’s application. It also should not be used when using an ORM Session object, as the Session itself represents an ongoing transaction and itself handles the job of maintaining connection and transactional resources.

      Enabling threadlocal is achieved as follows:

      db = create_engine('mysql://localhost/test', strategy='threadlocal')

      The above Engine will now acquire a Connection using connection resources derived from a thread-local variable whenever Engine.execute() or Engine.contextual_connect() is called. This connection resource is maintained as long as it is referenced, which allows multiple points of an application to share a transaction while using connectionless execution:

      def call_operation1():
          engine.execute("insert into users values (?, ?)", 1, "john")
      
      def call_operation2():
          users.update(users.c.user_id==5).execute(name='ed')
      
      db.begin()
      try:
          call_operation1()
          call_operation2()
          db.commit()
      except:
          db.rollback()

      Explicit execution can be mixed with connectionless execution by using the Engine.connect() method to acquire a Connection that is not part of the threadlocal scope:

      db.begin()
      conn = db.connect()
      try:
          conn.execute(log_table.insert(), message="Operation started")
          call_operation1()
          call_operation2()
          db.commit()
          conn.execute(log_table.insert(), message="Operation succeeded")
      except:
          db.rollback()
          conn.execute(log_table.insert(), message="Operation failed")
      finally:
          conn.close()

      To access the Connection that is bound to the threadlocal scope, call Engine.contextual_connect():

      conn = db.contextual_connect()
      call_operation3(conn)
      conn.close()

      Calling close() on the “contextual” connection does not release its resources until all other usages of that resource are closed as well, including that any ongoing transactions are rolled back or committed.

      Registering New Dialects

      The create_engine() function call locates the given dialect using setuptools entrypoints. These entry points can be established for third party dialects within the setup.py script. For example, to create a new dialect “foodialect://”, the steps are as follows:

      1. Create a package called foodialect.

      2. The package should have a module containing the dialect class, which is typically a subclass of sqlalchemy.engine.default.DefaultDialect. In this example let’s say it’s called FooDialect and its module is accessed via foodialect.dialect.

      3. The entry point can be established in setup.py as follows:

        entry_points="""
        [sqlalchemy.dialects]
        foodialect = foodialect.dialect:FooDialect
        """

      If the dialect is providing support for a particular DBAPI on top of an existing SQLAlchemy-supported database, the name can be given including a database-qualification. For example, if FooDialect were in fact a MySQL dialect, the entry point could be established like this:

      entry_points="""
      [sqlalchemy.dialects]
      mysql.foodialect = foodialect.dialect:FooDialect
      """

      The above entrypoint would then be accessed as create_engine("mysql+foodialect://").

      Registering Dialects In-Process

      SQLAlchemy also allows a dialect to be registered within the current process, bypassing the need for separate installation. Use the register() function as follows:

      from sqlalchemy.dialects import registry
      registry.register("mysql.foodialect", "myapp.dialect", "MyMySQLDialect")

      The above will respond to create_engine("mysql+foodialect://") and load the MyMySQLDialect class from the myapp.dialect module.

      New in version 0.8.

      Connection / Engine API

      class sqlalchemy.engine.Connection(engine, connection=None, close_with_result=False, _branch=False, _execution_options=None, _dispatch=None, _has_events=False)

      Bases: sqlalchemy.engine.Connectable

      Provides high-level functionality for a wrapped DB-API connection.

      Provides execution support for string-based SQL statements as well as ClauseElement, Compiled and DefaultGenerator objects. Provides a begin() method to return Transaction objects.

      The Connection object is not thread-safe. While a Connection can be shared among threads using properly synchronized access, it is still possible that the underlying DBAPI connection may not support shared access between threads. Check the DBAPI documentation for details.

      The Connection object represents a single dbapi connection checked out from the connection pool. In this state, the connection pool has no affect upon the connection, including its expiration or timeout state. For the connection pool to properly manage connections, connections should be returned to the connection pool (i.e. connection.close()) whenever the connection is not in use.

      __init__(engine, connection=None, close_with_result=False, _branch=False, _execution_options=None, _dispatch=None, _has_events=False)

      Construct a new Connection.

      The constructor here is not public and is only called only by an Engine. See Engine.connect() and Engine.contextual_connect() methods.

      begin()

      Begin a transaction and return a transaction handle.

      The returned object is an instance of Transaction. This object represents the “scope” of the transaction, which completes when either the Transaction.rollback() or Transaction.commit() method is called.

      Nested calls to begin() on the same Connection will return new Transaction objects that represent an emulated transaction within the scope of the enclosing transaction, that is:

      trans = conn.begin()   # outermost transaction
      trans2 = conn.begin()  # "nested"
      trans2.commit()        # does nothing
      trans.commit()         # actually commits

      Calls to Transaction.commit() only have an effect when invoked via the outermost Transaction object, though the Transaction.rollback() method of any of the Transaction objects will roll back the transaction.

      See also:

      Connection.begin_nested() - use a SAVEPOINT

      Connection.begin_twophase() - use a two phase /XID transaction

      Engine.begin() - context manager available from Engine.

      begin_nested()

      Begin a nested transaction and return a transaction handle.

      The returned object is an instance of NestedTransaction.

      Nested transactions require SAVEPOINT support in the underlying database. Any transaction in the hierarchy may commit and rollback, however the outermost transaction still controls the overall commit or rollback of the transaction of a whole.

      See also Connection.begin(), Connection.begin_twophase().

      begin_twophase(xid=None)

      Begin a two-phase or XA transaction and return a transaction handle.

      The returned object is an instance of TwoPhaseTransaction, which in addition to the methods provided by Transaction, also provides a prepare() method.

      Parameters:xid – the two phase transaction id. If not supplied, a random id will be generated.

      See also Connection.begin(), Connection.begin_twophase().

      close()

      Close this Connection.

      This results in a release of the underlying database resources, that is, the DBAPI connection referenced internally. The DBAPI connection is typically restored back to the connection-holding Pool referenced by the Engine that produced this Connection. Any transactional state present on the DBAPI connection is also unconditionally released via the DBAPI connection’s rollback() method, regardless of any Transaction object that may be outstanding with regards to this Connection.

      After close() is called, the Connection is permanently in a closed state, and will allow no further operations.

      closed

      Return True if this connection is closed.

      connect()

      Returns a branched version of this Connection.

      The Connection.close() method on the returned Connection can be called and this Connection will remain open.

      This method provides usage symmetry with Engine.connect(), including for usage with context managers.

      connection

      The underlying DB-API connection managed by this Connection.

      contextual_connect(**kwargs)

      Returns a branched version of this Connection.

      The Connection.close() method on the returned Connection can be called and this Connection will remain open.

      This method provides usage symmetry with Engine.contextual_connect(), including for usage with context managers.

      detach()

      Detach the underlying DB-API connection from its connection pool.

      This Connection instance will remain usable. When closed, the DB-API connection will be literally closed and not returned to its pool. The pool will typically lazily create a new connection to replace the detached connection.

      This method can be used to insulate the rest of an application from a modified state on a connection (such as a transaction isolation level or similar). Also see PoolListener for a mechanism to modify connection state when connections leave and return to their connection pool.

      execute(object, *multiparams, **params)

      Executes the a SQL statement construct and returns a ResultProxy.

      Parameters:
      • object

        The statement to be executed. May be one of:

      • *multiparams/**params

        represent bound parameter values to be used in the execution. Typically, the format is either a collection of one or more dictionaries passed to *multiparams:

        conn.execute(
            table.insert(),
            {"id":1, "value":"v1"},
            {"id":2, "value":"v2"}
        )

        ...or individual key/values interpreted by **params:

        conn.execute(
            table.insert(), id=1, value="v1"
        )

        In the case that a plain SQL string is passed, and the underlying DBAPI accepts positional bind parameters, a collection of tuples or individual values in *multiparams may be passed:

        conn.execute(
            "INSERT INTO table (id, value) VALUES (?, ?)",
            (1, "v1"), (2, "v2")
        )
        
        conn.execute(
            "INSERT INTO table (id, value) VALUES (?, ?)",
            1, "v1"
        )

        Note above, the usage of a question mark ”?” or other symbol is contingent upon the “paramstyle” accepted by the DBAPI in use, which may be any of “qmark”, “named”, “pyformat”, “format”, “numeric”. See pep-249 for details on paramstyle.

        To execute a textual SQL statement which uses bound parameters in a DBAPI-agnostic way, use the text() construct.

      execution_options(**opt)

      Set non-SQL options for the connection which take effect during execution.

      The method returns a copy of this Connection which references the same underlying DBAPI connection, but also defines the given execution options which will take effect for a call to execute(). As the new Connection references the same underlying resource, it’s usually a good idea to ensure that the copies would be discarded immediately, which is implicit if used as in:

      result = connection.execution_options(stream_results=True).\
                          execute(stmt)

      Note that any key/value can be passed to Connection.execution_options(), and it will be stored in the _execution_options dictionary of the Connection. It is suitable for usage by end-user schemes to communicate with event listeners, for example.

      The keywords that are currently recognized by SQLAlchemy itself include all those listed under Executable.execution_options(), as well as others that are specific to Connection.

      Parameters:
      • autocommit – Available on: Connection, statement. When True, a COMMIT will be invoked after execution when executed in ‘autocommit’ mode, i.e. when an explicit transaction is not begun on the connection. Note that DBAPI connections by default are always in a transaction - SQLAlchemy uses rules applied to different kinds of statements to determine if COMMIT will be invoked in order to provide its “autocommit” feature. Typically, all INSERT/UPDATE/DELETE statements as well as CREATE/DROP statements have autocommit behavior enabled; SELECT constructs do not. Use this option when invoking a SELECT or other specific SQL construct where COMMIT is desired (typically when calling stored procedures and such), and an explicit transaction is not in progress.
      • compiled_cache

        Available on: Connection. A dictionary where Compiled objects will be cached when the Connection compiles a clause expression into a Compiled object. It is the user’s responsibility to manage the size of this dictionary, which will have keys corresponding to the dialect, clause element, the column names within the VALUES or SET clause of an INSERT or UPDATE, as well as the “batch” mode for an INSERT or UPDATE statement. The format of this dictionary is not guaranteed to stay the same in future releases.

        Note that the ORM makes use of its own “compiled” caches for some operations, including flush operations. The caching used by the ORM internally supersedes a cache dictionary specified here.

      • isolation_level

        Available on: Connection. Set the transaction isolation level for the lifespan of this connection. Valid values include those string values accepted by the isolation_level parameter passed to create_engine(), and are database specific, including those for SQLite, PostgreSQL - see those dialect’s documentation for further info.

        Note that this option necessarily affects the underlying DBAPI connection for the lifespan of the originating Connection, and is not per-execution. This setting is not removed until the underlying DBAPI connection is returned to the connection pool, i.e. the Connection.close() method is called.

      • no_parameters

        When True, if the final parameter list or dictionary is totally empty, will invoke the statement on the cursor as cursor.execute(statement), not passing the parameter collection at all. Some DBAPIs such as psycopg2 and mysql-python consider percent signs as significant only when parameters are present; this option allows code to generate SQL containing percent signs (and possibly other characters) that is neutral regarding whether it’s executed by the DBAPI or piped into a script that’s later invoked by command line tools.

        New in version 0.7.6.

      • stream_results – Available on: Connection, statement. Indicate to the dialect that results should be “streamed” and not pre-buffered, if possible. This is a limitation of many DBAPIs. The flag is currently understood only by the psycopg2 dialect.
      in_transaction()

      Return True if a transaction is in progress.

      info

      Info dictionary associated with the underlying DBAPI connection referred to by this Connection, allowing user-defined data to be associated with the connection.

      The data here will follow along with the DBAPI connection including after it is returned to the connection pool and used again in subsequent instances of Connection.

      invalidate(exception=None)

      Invalidate the underlying DBAPI connection associated with this Connection.

      The underlying DB-API connection is literally closed (if possible), and is discarded. Its source connection pool will typically lazily create a new connection to replace it.

      Upon the next usage, this Connection will attempt to reconnect to the pool with a new connection.

      Transactions in progress remain in an “opened” state (even though the actual transaction is gone); these must be explicitly rolled back before a reconnect on this Connection can proceed. This is to prevent applications from accidentally continuing their transactional operations in a non-transactional state.

      invalidated

      Return True if this connection was invalidated.

      run_callable(callable_, *args, **kwargs)

      Given a callable object or function, execute it, passing a Connection as the first argument.

      The given *args and **kwargs are passed subsequent to the Connection argument.

      This function, along with Engine.run_callable(), allows a function to be run with a Connection or Engine object without the need to know which one is being dealt with.

      scalar(object, *multiparams, **params)

      Executes and returns the first column of the first row.

      The underlying result/cursor is closed after execution.

      transaction(callable_, *args, **kwargs)

      Execute the given function within a transaction boundary.

      The function is passed this Connection as the first argument, followed by the given *args and **kwargs, e.g.:

      def do_something(conn, x, y):
          conn.execute("some statement", {'x':x, 'y':y})
      
      conn.transaction(do_something, 5, 10)

      The operations inside the function are all invoked within the context of a single Transaction. Upon success, the transaction is committed. If an exception is raised, the transaction is rolled back before propagating the exception.

      Note

      The transaction() method is superseded by the usage of the Python with: statement, which can be used with Connection.begin():

      with conn.begin():
          conn.execute("some statement", {'x':5, 'y':10})

      As well as with Engine.begin():

      with engine.begin() as conn:
          conn.execute("some statement", {'x':5, 'y':10})

      See also:

      Engine.begin() - engine-level transactional context

      Engine.transaction() - engine-level version of Connection.transaction()

      class sqlalchemy.engine.Connectable

      Interface for an object which supports execution of SQL constructs.

      The two implementations of Connectable are Connection and Engine.

      Connectable must also implement the ‘dialect’ member which references a Dialect instance.

      connect(**kwargs)

      Return a Connection object.

      Depending on context, this may be self if this object is already an instance of Connection, or a newly procured Connection if this object is an instance of Engine.

      contextual_connect()

      Return a Connection object which may be part of an ongoing context.

      Depending on context, this may be self if this object is already an instance of Connection, or a newly procured Connection if this object is an instance of Engine.

      create(entity, **kwargs)

      Emit CREATE statements for the given schema entity.

      Deprecated since version 0.7: Use the create() method on the given schema object directly, i.e. Table.create(), Index.create(), MetaData.create_all()

      drop(entity, **kwargs)

      Emit DROP statements for the given schema entity.

      Deprecated since version 0.7: Use the drop() method on the given schema object directly, i.e. Table.drop(), Index.drop(), MetaData.drop_all()

      execute(object, *multiparams, **params)

      Executes the given construct and returns a ResultProxy.

      scalar(object, *multiparams, **params)

      Executes and returns the first column of the first row.

      The underlying cursor is closed after execution.

      class sqlalchemy.engine.Engine(pool, dialect, url, logging_name=None, echo=None, proxy=None, execution_options=None)

      Bases: sqlalchemy.engine.Connectable, sqlalchemy.log.Identified

      Connects a Pool and Dialect together to provide a source of database connectivity and behavior.

      An Engine object is instantiated publicly using the create_engine() function.

      See also:

      Engine Configuration

      Working with Engines and Connections

      begin(close_with_result=False)

      Return a context manager delivering a Connection with a Transaction established.

      E.g.:

      with engine.begin() as conn:
          conn.execute("insert into table (x, y, z) values (1, 2, 3)")
          conn.execute("my_special_procedure(5)")

      Upon successful operation, the Transaction is committed. If an error is raised, the Transaction is rolled back.

      The close_with_result flag is normally False, and indicates that the Connection will be closed when the operation is complete. When set to True, it indicates the Connection is in “single use” mode, where the ResultProxy returned by the first call to Connection.execute() will close the Connection when that ResultProxy has exhausted all result rows.

      New in version 0.7.6.

      See also:

      Engine.connect() - procure a Connection from an Engine.

      Connection.begin() - start a Transaction for a particular Connection.

      connect(**kwargs)

      Return a new Connection object.

      The Connection object is a facade that uses a DBAPI connection internally in order to communicate with the database. This connection is procured from the connection-holding Pool referenced by this Engine. When the close() method of the Connection object is called, the underlying DBAPI connection is then returned to the connection pool, where it may be used again in a subsequent call to connect().

      contextual_connect(close_with_result=False, **kwargs)

      Return a Connection object which may be part of some ongoing context.

      By default, this method does the same thing as Engine.connect(). Subclasses of Engine may override this method to provide contextual behavior.

      Parameters:close_with_result – When True, the first ResultProxy created by the Connection will call the Connection.close() method of that connection as soon as any pending result rows are exhausted. This is used to supply the “connectionless execution” behavior provided by the Engine.execute() method.
      dispose()

      Dispose of the connection pool used by this Engine.

      A new connection pool is created immediately after the old one has been disposed. This new pool, like all SQLAlchemy connection pools, does not make any actual connections to the database until one is first requested.

      This method has two general use cases:

      • When a dropped connection is detected, it is assumed that all connections held by the pool are potentially dropped, and the entire pool is replaced.
      • An application may want to use dispose() within a test suite that is creating multiple engines.

      It is critical to note that dispose() does not guarantee that the application will release all open database connections - only those connections that are checked into the pool are closed. Connections which remain checked out or have been detached from the engine are not affected.

      driver

      Driver name of the Dialect in use by this Engine.

      execute(statement, *multiparams, **params)

      Executes the given construct and returns a ResultProxy.

      The arguments are the same as those used by Connection.execute().

      Here, a Connection is acquired using the contextual_connect() method, and the statement executed with that connection. The returned ResultProxy is flagged such that when the ResultProxy is exhausted and its underlying cursor is closed, the Connection created here will also be closed, which allows its associated DBAPI connection resource to be returned to the connection pool.

      execution_options(**opt)

      Return a new Engine that will provide Connection objects with the given execution options.

      The returned Engine remains related to the original Engine in that it shares the same connection pool and other state:

      • The Pool used by the new Engine is the same instance. The Engine.dispose() method will replace the connection pool instance for the parent engine as well as this one.
      • Event listeners are “cascaded” - meaning, the new Engine inherits the events of the parent, and new events can be associated with the new Engine individually.
      • The logging configuration and logging_name is copied from the parent Engine.

      The intent of the Engine.execution_options() method is to implement “sharding” schemes where multiple Engine objects refer to the same connection pool, but are differentiated by options that would be consumed by a custom event:

      primary_engine = create_engine("mysql://")
      shard1 = primary_engine.execution_options(shard_id="shard1")
      shard2 = primary_engine.execution_options(shard_id="shard2")

      Above, the shard1 engine serves as a factory for Connection objects that will contain the execution option shard_id=shard1, and shard2 will produce Connection objects that contain the execution option shard_id=shard2.

      An event handler can consume the above execution option to perform a schema switch or other operation, given a connection. Below we emit a MySQL use statement to switch databases, at the same time keeping track of which database we’ve established using the Connection.info dictionary, which gives us a persistent storage space that follows the DBAPI connection:

      from sqlalchemy import event
      from sqlalchemy.engine import Engine
      
      shards = {"default": "base", shard_1: "db1", "shard_2": "db2"}
      
      @event.listens_for(Engine, "before_cursor_execute")
      def _switch_shard(conn, cursor, stmt, params, context, executemany):
          shard_id = conn._execution_options.get('shard_id', "default")
          current_shard = conn.info.get("current_shard", None)
      
          if current_shard != shard_id:
              cursor.execute("use %s" % shards[shard_id])
              conn.info["current_shard"] = shard_id

      New in version 0.8.

      See also

      Connection.execution_options() - update execution options on a Connection object.

      Engine.update_execution_options() - update the execution options for a given Engine in place.

      name

      String name of the Dialect in use by this Engine.

      raw_connection()

      Return a “raw” DBAPI connection from the connection pool.

      The returned object is a proxied version of the DBAPI connection object used by the underlying driver in use. The object will have all the same behavior as the real DBAPI connection, except that its close() method will result in the connection being returned to the pool, rather than being closed for real.

      This method provides direct DBAPI connection access for special situations. In most situations, the Connection object should be used, which is procured using the Engine.connect() method.

      run_callable(callable_, *args, **kwargs)

      Given a callable object or function, execute it, passing a Connection as the first argument.

      The given *args and **kwargs are passed subsequent to the Connection argument.

      This function, along with Connection.run_callable(), allows a function to be run with a Connection or Engine object without the need to know which one is being dealt with.

      table_names(schema=None, connection=None)

      Return a list of all table names available in the database.

      Parameters:
      • schema – Optional, retrieve names from a non-default schema.
      • connection – Optional, use a specified connection. Default is the contextual_connect for this Engine.
      transaction(callable_, *args, **kwargs)

      Execute the given function within a transaction boundary.

      The function is passed a Connection newly procured from Engine.contextual_connect() as the first argument, followed by the given *args and **kwargs.

      e.g.:

      def do_something(conn, x, y):
          conn.execute("some statement", {'x':x, 'y':y})
      
      engine.transaction(do_something, 5, 10)

      The operations inside the function are all invoked within the context of a single Transaction. Upon success, the transaction is committed. If an exception is raised, the transaction is rolled back before propagating the exception.

      Note

      The transaction() method is superseded by the usage of the Python with: statement, which can be used with Engine.begin():

      with engine.begin() as conn:
          conn.execute("some statement", {'x':5, 'y':10})

      See also:

      Engine.begin() - engine-level transactional context

      Connection.transaction() - connection-level version of Engine.transaction()

      update_execution_options(**opt)

      Update the default execution_options dictionary of this Engine.

      The given keys/values in **opt are added to the default execution options that will be used for all connections. The initial contents of this dictionary can be sent via the execution_options parameter to create_engine().

      class sqlalchemy.engine.NestedTransaction(connection, parent)

      Bases: sqlalchemy.engine.base.Transaction

      Represent a ‘nested’, or SAVEPOINT transaction.

      A new NestedTransaction object may be procured using the Connection.begin_nested() method.

      The interface is the same as that of Transaction.

      class sqlalchemy.engine.ResultProxy(context)

      Wraps a DB-API cursor object to provide easier access to row columns.

      Individual columns may be accessed by their integer position, case-insensitive column name, or by schema.Column object. e.g.:

      row = fetchone()
      
      col1 = row[0]    # access via integer position
      
      col2 = row['col2']   # access via name
      
      col3 = row[mytable.c.mycol] # access via Column object.

      ResultProxy also handles post-processing of result column data using TypeEngine objects, which are referenced from the originating SQL statement that produced this result set.

      close(_autoclose_connection=True)

      Close this ResultProxy.

      Closes the underlying DBAPI cursor corresponding to the execution.

      Note that any data cached within this ResultProxy is still available. For some types of results, this may include buffered rows.

      If this ResultProxy was generated from an implicit execution, the underlying Connection will also be closed (returns the underlying DBAPI connection to the connection pool.)

      This method is called automatically when:

      • all result rows are exhausted using the fetchXXX() methods.
      • cursor.description is None.
      fetchall()

      Fetch all rows, just like DB-API cursor.fetchall().

      fetchmany(size=None)

      Fetch many rows, just like DB-API cursor.fetchmany(size=cursor.arraysize).

      If rows are present, the cursor remains open after this is called. Else the cursor is automatically closed and an empty list is returned.

      fetchone()

      Fetch one row, just like DB-API cursor.fetchone().

      If a row is present, the cursor remains open after this is called. Else the cursor is automatically closed and None is returned.

      first()

      Fetch the first row and then close the result set unconditionally.

      Returns None if no row is present.

      inserted_primary_key

      Return the primary key for the row just inserted.

      The return value is a list of scalar values corresponding to the list of primary key columns in the target table.

      This only applies to single row insert() constructs which did not explicitly specify Insert.returning().

      Note that primary key columns which specify a server_default clause, or otherwise do not qualify as “autoincrement” columns (see the notes at Column), and were generated using the database-side default, will appear in this list as None unless the backend supports “returning” and the insert statement executed with the “implicit returning” enabled.

      Raises InvalidRequestError if the executed statement is not a compiled expression construct or is not an insert() construct.

      is_insert

      True if this ResultProxy is the result of a executing an expression language compiled expression.insert() construct.

      When True, this implies that the inserted_primary_key attribute is accessible, assuming the statement did not include a user defined “returning” construct.

      keys()

      Return the current set of string keys for rows.

      last_inserted_params()

      Return the collection of inserted parameters from this execution.

      Raises InvalidRequestError if the executed statement is not a compiled expression construct or is not an insert() construct.

      last_updated_params()

      Return the collection of updated parameters from this execution.

      Raises InvalidRequestError if the executed statement is not a compiled expression construct or is not an update() construct.

      lastrow_has_defaults()

      Return lastrow_has_defaults() from the underlying ExecutionContext.

      See ExecutionContext for details.

      lastrowid

      return the ‘lastrowid’ accessor on the DBAPI cursor.

      This is a DBAPI specific method and is only functional for those backends which support it, for statements where it is appropriate. It’s behavior is not consistent across backends.

      Usage of this method is normally unnecessary when using insert() expression constructs; the inserted_primary_key attribute provides a tuple of primary key values for a newly inserted row, regardless of database backend.

      postfetch_cols()

      Return postfetch_cols() from the underlying ExecutionContext.

      See ExecutionContext for details.

      Raises InvalidRequestError if the executed statement is not a compiled expression construct or is not an insert() or update() construct.

      prefetch_cols()

      Return prefetch_cols() from the underlying ExecutionContext.

      See ExecutionContext for details.

      Raises InvalidRequestError if the executed statement is not a compiled expression construct or is not an insert() or update() construct.

      returns_rows

      True if this ResultProxy returns rows.

      I.e. if it is legal to call the methods fetchone(), fetchmany() fetchall().

      rowcount

      Return the ‘rowcount’ for this result.

      The ‘rowcount’ reports the number of rows matched by the WHERE criterion of an UPDATE or DELETE statement.

      Note

      Notes regarding ResultProxy.rowcount:

      • This attribute returns the number of rows matched, which is not necessarily the same as the number of rows that were actually modified - an UPDATE statement, for example, may have no net change on a given row if the SET values given are the same as those present in the row already. Such a row would be matched but not modified. On backends that feature both styles, such as MySQL, rowcount is configured by default to return the match count in all cases.
      • ResultProxy.rowcount is only useful in conjunction with an UPDATE or DELETE statement. Contrary to what the Python DBAPI says, it does not return the number of rows available from the results of a SELECT statement as DBAPIs cannot support this functionality when rows are unbuffered.
      • ResultProxy.rowcount may not be fully implemented by all dialects. In particular, most DBAPIs do not support an aggregate rowcount result from an executemany call. The ResultProxy.supports_sane_rowcount() and ResultProxy.supports_sane_multi_rowcount() methods will report from the dialect if each usage is known to be supported.
      • Statements that use RETURNING may not return a correct rowcount.
      scalar()

      Fetch the first column of the first row, and close the result set.

      Returns None if no row is present.

      supports_sane_multi_rowcount()

      Return supports_sane_multi_rowcount from the dialect.

      See ResultProxy.rowcount for background.

      supports_sane_rowcount()

      Return supports_sane_rowcount from the dialect.

      See ResultProxy.rowcount for background.

      class sqlalchemy.engine.RowProxy(parent, row, processors, keymap)

      Bases: sqlalchemy.engine.result.BaseRowProxy

      Proxy values from a single cursor row.

      Mostly follows “ordered dictionary” behavior, mapping result values to the string-based column name, the integer position of the result in the row, as well as Column instances which can be mapped to the original Columns that produced this result set (for results that correspond to constructed SQL expressions).

      has_key(key)

      Return True if this RowProxy contains the given key.

      items()

      Return a list of tuples, each tuple containing a key/value pair.

      keys()

      Return the list of keys as strings represented by this RowProxy.

      class sqlalchemy.engine.Transaction(connection, parent)

      Represent a database transaction in progress.

      The Transaction object is procured by calling the begin() method of Connection:

      from sqlalchemy import create_engine
      engine = create_engine("postgresql://scott:tiger@localhost/test")
      connection = engine.connect()
      trans = connection.begin()
      connection.execute("insert into x (a, b) values (1, 2)")
      trans.commit()

      The object provides rollback() and commit() methods in order to control transaction boundaries. It also implements a context manager interface so that the Python with statement can be used with the Connection.begin() method:

      with connection.begin():
          connection.execute("insert into x (a, b) values (1, 2)")

      The Transaction object is not threadsafe.

      See also: Connection.begin(), Connection.begin_twophase(), Connection.begin_nested().

      close()

      Close this Transaction.

      If this transaction is the base transaction in a begin/commit nesting, the transaction will rollback(). Otherwise, the method returns.

      This is used to cancel a Transaction without affecting the scope of an enclosing transaction.

      commit()

      Commit this Transaction.

      rollback()

      Roll back this Transaction.

      class sqlalchemy.engine.TwoPhaseTransaction(connection, xid)

      Bases: sqlalchemy.engine.base.Transaction

      Represent a two-phase transaction.

      A new TwoPhaseTransaction object may be procured using the Connection.begin_twophase() method.

      The interface is the same as that of Transaction with the addition of the prepare() method.

      prepare()

      Prepare this TwoPhaseTransaction.

      After a PREPARE, the transaction can be committed.

      SQLAlchemy-0.8.4/doc/core/constraints.html0000644000076500000240000024003712251147465021200 0ustar classicstaff00000000000000 Defining Constraints and Indexes — SQLAlchemy 0.8 Documentation

      SQLAlchemy 0.8 Documentation

      Release: 0.8.4 | Release Date: December 8, 2013

      Defining Constraints and Indexes

      This section will discuss SQL constraints and indexes. In SQLAlchemy the key classes include ForeignKeyConstraint and Index.

      Defining Foreign Keys

      A foreign key in SQL is a table-level construct that constrains one or more columns in that table to only allow values that are present in a different set of columns, typically but not always located on a different table. We call the columns which are constrained the foreign key columns and the columns which they are constrained towards the referenced columns. The referenced columns almost always define the primary key for their owning table, though there are exceptions to this. The foreign key is the “joint” that connects together pairs of rows which have a relationship with each other, and SQLAlchemy assigns very deep importance to this concept in virtually every area of its operation.

      In SQLAlchemy as well as in DDL, foreign key constraints can be defined as additional attributes within the table clause, or for single-column foreign keys they may optionally be specified within the definition of a single column. The single column foreign key is more common, and at the column level is specified by constructing a ForeignKey object as an argument to a Column object:

      user_preference = Table('user_preference', metadata,
          Column('pref_id', Integer, primary_key=True),
          Column('user_id', Integer, ForeignKey("user.user_id"), nullable=False),
          Column('pref_name', String(40), nullable=False),
          Column('pref_value', String(100))
      )

      Above, we define a new table user_preference for which each row must contain a value in the user_id column that also exists in the user table’s user_id column.

      The argument to ForeignKey is most commonly a string of the form <tablename>.<columnname>, or for a table in a remote schema or “owner” of the form <schemaname>.<tablename>.<columnname>. It may also be an actual Column object, which as we’ll see later is accessed from an existing Table object via its c collection:

      ForeignKey(user.c.user_id)

      The advantage to using a string is that the in-python linkage between user and user_preference is resolved only when first needed, so that table objects can be easily spread across multiple modules and defined in any order.

      Foreign keys may also be defined at the table level, using the ForeignKeyConstraint object. This object can describe a single- or multi-column foreign key. A multi-column foreign key is known as a composite foreign key, and almost always references a table that has a composite primary key. Below we define a table invoice which has a composite primary key:

      invoice = Table('invoice', metadata,
          Column('invoice_id', Integer, primary_key=True),
          Column('ref_num', Integer, primary_key=True),
          Column('description', String(60), nullable=False)
      )

      And then a table invoice_item with a composite foreign key referencing invoice:

      invoice_item = Table('invoice_item', metadata,
          Column('item_id', Integer, primary_key=True),
          Column('item_name', String(60), nullable=False),
          Column('invoice_id', Integer, nullable=False),
          Column('ref_num', Integer, nullable=False),
          ForeignKeyConstraint(['invoice_id', 'ref_num'], ['invoice.invoice_id', 'invoice.ref_num'])
      )

      It’s important to note that the ForeignKeyConstraint is the only way to define a composite foreign key. While we could also have placed individual ForeignKey objects on both the invoice_item.invoice_id and invoice_item.ref_num columns, SQLAlchemy would not be aware that these two values should be paired together - it would be two individual foreign key constraints instead of a single composite foreign key referencing two columns.

      Creating/Dropping Foreign Key Constraints via ALTER

      In all the above examples, the ForeignKey object causes the “REFERENCES” keyword to be added inline to a column definition within a “CREATE TABLE” statement when create_all() is issued, and ForeignKeyConstraint invokes the “CONSTRAINT” keyword inline with “CREATE TABLE”. There are some cases where this is undesireable, particularly when two tables reference each other mutually, each with a foreign key referencing the other. In such a situation at least one of the foreign key constraints must be generated after both tables have been built. To support such a scheme, ForeignKey and ForeignKeyConstraint offer the flag use_alter=True. When using this flag, the constraint will be generated using a definition similar to “ALTER TABLE <tablename> ADD CONSTRAINT <name> ...”. Since a name is required, the name attribute must also be specified. For example:

      node = Table('node', meta,
          Column('node_id', Integer, primary_key=True),
          Column('primary_element', Integer,
              ForeignKey('element.element_id', use_alter=True, name='fk_node_element_id')
          )
      )
      
      element = Table('element', meta,
          Column('element_id', Integer, primary_key=True),
          Column('parent_node_id', Integer),
          ForeignKeyConstraint(
              ['parent_node_id'],
              ['node.node_id'],
              use_alter=True,
              name='fk_element_parent_node_id'
          )
      )

      ON UPDATE and ON DELETE

      Most databases support cascading of foreign key values, that is the when a parent row is updated the new value is placed in child rows, or when the parent row is deleted all corresponding child rows are set to null or deleted. In data definition language these are specified using phrases like “ON UPDATE CASCADE”, “ON DELETE CASCADE”, and “ON DELETE SET NULL”, corresponding to foreign key constraints. The phrase after “ON UPDATE” or “ON DELETE” may also other allow other phrases that are specific to the database in use. The ForeignKey and ForeignKeyConstraint objects support the generation of this clause via the onupdate and ondelete keyword arguments. The value is any string which will be output after the appropriate “ON UPDATE” or “ON DELETE” phrase:

      child = Table('child', meta,
          Column('id', Integer,
                  ForeignKey('parent.id', onupdate="CASCADE", ondelete="CASCADE"),
                  primary_key=True
          )
      )
      
      composite = Table('composite', meta,
          Column('id', Integer, primary_key=True),
          Column('rev_id', Integer),
          Column('note_id', Integer),
          ForeignKeyConstraint(
                      ['rev_id', 'note_id'],
                      ['revisions.id', 'revisions.note_id'],
                      onupdate="CASCADE", ondelete="SET NULL"
          )
      )

      Note that these clauses are not supported on SQLite, and require InnoDB tables when used with MySQL. They may also not be supported on other databases.

      UNIQUE Constraint

      Unique constraints can be created anonymously on a single column using the unique keyword on Column. Explicitly named unique constraints and/or those with multiple columns are created via the UniqueConstraint table-level construct.

      meta = MetaData()
      mytable = Table('mytable', meta,
      
          # per-column anonymous unique constraint
          Column('col1', Integer, unique=True),
      
          Column('col2', Integer),
          Column('col3', Integer),
      
          # explicit/composite unique constraint.  'name' is optional.
          UniqueConstraint('col2', 'col3', name='uix_1')
          )

      CHECK Constraint

      Check constraints can be named or unnamed and can be created at the Column or Table level, using the CheckConstraint construct. The text of the check constraint is passed directly through to the database, so there is limited “database independent” behavior. Column level check constraints generally should only refer to the column to which they are placed, while table level constraints can refer to any columns in the table.

      Note that some databases do not actively support check constraints such as MySQL.

      meta = MetaData()
      mytable = Table('mytable', meta,
      
          # per-column CHECK constraint
          Column('col1', Integer, CheckConstraint('col1>5')),
      
          Column('col2', Integer),
          Column('col3', Integer),
      
          # table level CHECK constraint.  'name' is optional.
          CheckConstraint('col2 > col3 + 5', name='check1')
          )
      
      sqlmytable.create(engine)
      

      Setting up Constraints when using the Declarative ORM Extension

      The Table is the SQLAlchemy Core construct that allows one to define table metadata, which among other things can be used by the SQLAlchemy ORM as a target to map a class. The Declarative extension allows the Table object to be created automatically, given the contents of the table primarily as a mapping of Column objects.

      To apply table-level constraint objects such as ForeignKeyConstraint to a table defined using Declarative, use the __table_args__ attribute, described at Table Configuration.

      Constraints API

      class sqlalchemy.schema.Constraint(name=None, deferrable=None, initially=None, _create_rule=None, **kw)

      Bases: sqlalchemy.schema.SchemaItem

      A table-level SQL constraint.

      class sqlalchemy.schema.CheckConstraint(sqltext, name=None, deferrable=None, initially=None, table=None, _create_rule=None, _autoattach=True)

      Bases: sqlalchemy.schema.Constraint

      A table- or column-level CHECK constraint.

      Can be included in the definition of a Table or Column.

      class sqlalchemy.schema.ColumnCollectionConstraint(*columns, **kw)

      Bases: sqlalchemy.schema.ColumnCollectionMixin, sqlalchemy.schema.Constraint

      A constraint that proxies a ColumnCollection.

      class sqlalchemy.schema.ForeignKey(column, _constraint=None, use_alter=False, name=None, onupdate=None, ondelete=None, deferrable=None, schema=None, initially=None, link_to_name=False, match=None)

      Bases: sqlalchemy.schema.SchemaItem

      Defines a dependency between two columns.

      ForeignKey is specified as an argument to a Column object, e.g.:

      t = Table("remote_table", metadata,
          Column("remote_id", ForeignKey("main_table.id"))
      )

      Note that ForeignKey is only a marker object that defines a dependency between two columns. The actual constraint is in all cases represented by the ForeignKeyConstraint object. This object will be generated automatically when a ForeignKey is associated with a Column which in turn is associated with a Table. Conversely, when ForeignKeyConstraint is applied to a Table, ForeignKey markers are automatically generated to be present on each associated Column, which are also associated with the constraint object.

      Note that you cannot define a “composite” foreign key constraint, that is a constraint between a grouping of multiple parent/child columns, using ForeignKey objects. To define this grouping, the ForeignKeyConstraint object must be used, and applied to the Table. The associated ForeignKey objects are created automatically.

      The ForeignKey objects associated with an individual Column object are available in the foreign_keys collection of that column.

      Further examples of foreign key configuration are in metadata_foreignkeys.

      __init__(column, _constraint=None, use_alter=False, name=None, onupdate=None, ondelete=None, deferrable=None, schema=None, initially=None, link_to_name=False, match=None)

      Construct a column-level FOREIGN KEY.

      The ForeignKey object when constructed generates a ForeignKeyConstraint which is associated with the parent Table object’s collection of constraints.

      Parameters:
      • column

        A single target column for the key relationship. A Column object or a column name as a string: tablename.columnkey or schema.tablename.columnkey. columnkey is the key which has been assigned to the column (defaults to the column name itself), unless link_to_name is True in which case the rendered name of the column is used.

        New in version 0.7.4: Note that if the schema name is not included, and the underlying MetaData has a “schema”, that value will be used.

      • name – Optional string. An in-database name for the key if constraint is not provided.
      • onupdate – Optional string. If set, emit ON UPDATE <value> when issuing DDL for this constraint. Typical values include CASCADE, DELETE and RESTRICT.
      • ondelete – Optional string. If set, emit ON DELETE <value> when issuing DDL for this constraint. Typical values include CASCADE, DELETE and RESTRICT.
      • deferrable – Optional bool. If set, emit DEFERRABLE or NOT DEFERRABLE when issuing DDL for this constraint.
      • initially – Optional string. If set, emit INITIALLY <value> when issuing DDL for this constraint.
      • link_to_name – if True, the string name given in column is the rendered name of the referenced column, not its locally assigned key.
      • use_alter – passed to the underlying ForeignKeyConstraint to indicate the constraint should be generated/dropped externally from the CREATE TABLE/ DROP TABLE statement. See that classes’ constructor for details.
      • match – Optional string. If set, emit MATCH <value> when issuing DDL for this constraint. Typical values include SIMPLE, PARTIAL and FULL.
      • schema – Deprecated; this flag does nothing and will be removed in 0.9.
      column

      Return the target Column referenced by this ForeignKey.

      If this ForeignKey was created using a string-based target column specification, this attribute will on first access initiate a resolution process to locate the referenced remote Column. The resolution process traverses to the parent Column, Table, and MetaData to proceed - if any of these aren’t yet present, an error is raised.

      copy(schema=None)

      Produce a copy of this ForeignKey object.

      The new ForeignKey will not be bound to any Column.

      This method is usually used by the internal copy procedures of Column, Table, and MetaData.

      Parameters:schema – The returned ForeignKey will reference the original table and column name, qualified by the given string schema name.
      get_referent(table)

      Return the Column in the given Table referenced by this ForeignKey.

      Returns None if this ForeignKey does not reference the given Table.

      references(table)

      Return True if the given Table is referenced by this ForeignKey.

      target_fullname

      Return a string based ‘column specification’ for this ForeignKey.

      This is usually the equivalent of the string-based “tablename.colname” argument first passed to the object’s constructor.

      class sqlalchemy.schema.ForeignKeyConstraint(columns, refcolumns, name=None, onupdate=None, ondelete=None, deferrable=None, initially=None, use_alter=False, link_to_name=False, match=None, table=None)

      Bases: sqlalchemy.schema.Constraint

      A table-level FOREIGN KEY constraint.

      Defines a single column or composite FOREIGN KEY ... REFERENCES constraint. For a no-frills, single column foreign key, adding a ForeignKey to the definition of a Column is a shorthand equivalent for an unnamed, single column ForeignKeyConstraint.

      Examples of foreign key configuration are in metadata_foreignkeys.

      __init__(columns, refcolumns, name=None, onupdate=None, ondelete=None, deferrable=None, initially=None, use_alter=False, link_to_name=False, match=None, table=None)

      Construct a composite-capable FOREIGN KEY.

      Parameters:
      • columns – A sequence of local column names. The named columns must be defined and present in the parent Table. The names should match the key given to each column (defaults to the name) unless link_to_name is True.
      • refcolumns – A sequence of foreign column names or Column objects. The columns must all be located within the same Table.
      • name – Optional, the in-database name of the key.
      • onupdate – Optional string. If set, emit ON UPDATE <value> when issuing DDL for this constraint. Typical values include CASCADE, DELETE and RESTRICT.
      • ondelete – Optional string. If set, emit ON DELETE <value> when issuing DDL for this constraint. Typical values include CASCADE, DELETE and RESTRICT.
      • deferrable – Optional bool. If set, emit DEFERRABLE or NOT DEFERRABLE when issuing DDL for this constraint.
      • initially – Optional string. If set, emit INITIALLY <value> when issuing DDL for this constraint.
      • link_to_name – if True, the string name given in column is the rendered name of the referenced column, not its locally assigned key.
      • use_alter – If True, do not emit the DDL for this constraint as part of the CREATE TABLE definition. Instead, generate it via an ALTER TABLE statement issued after the full collection of tables have been created, and drop it via an ALTER TABLE statement before the full collection of tables are dropped. This is shorthand for the usage of AddConstraint and DropConstraint applied as “after-create” and “before-drop” events on the MetaData object. This is normally used to generate/drop constraints on objects that are mutually dependent on each other.
      • match – Optional string. If set, emit MATCH <value> when issuing DDL for this constraint. Typical values include SIMPLE, PARTIAL and FULL.
      class sqlalchemy.schema.PrimaryKeyConstraint(*columns, **kw)

      Bases: sqlalchemy.schema.ColumnCollectionConstraint

      A table-level PRIMARY KEY constraint.

      Defines a single column or composite PRIMARY KEY constraint. For a no-frills primary key, adding primary_key=True to one or more Column definitions is a shorthand equivalent for an unnamed single- or multiple-column PrimaryKeyConstraint.

      class sqlalchemy.schema.UniqueConstraint(*columns, **kw)

      Bases: sqlalchemy.schema.ColumnCollectionConstraint

      A table-level UNIQUE constraint.

      Defines a single column or composite UNIQUE constraint. For a no-frills, single column constraint, adding unique=True to the Column definition is a shorthand equivalent for an unnamed, single column UniqueConstraint.

      Indexes

      Indexes can be created anonymously (using an auto-generated name ix_<column label>) for a single column using the inline index keyword on Column, which also modifies the usage of unique to apply the uniqueness to the index itself, instead of adding a separate UNIQUE constraint. For indexes with specific names or which encompass more than one column, use the Index construct, which requires a name.

      Below we illustrate a Table with several Index objects associated. The DDL for “CREATE INDEX” is issued right after the create statements for the table:

      meta = MetaData()
      mytable = Table('mytable', meta,
          # an indexed column, with index "ix_mytable_col1"
          Column('col1', Integer, index=True),
      
          # a uniquely indexed column with index "ix_mytable_col2"
          Column('col2', Integer, index=True, unique=True),
      
          Column('col3', Integer),
          Column('col4', Integer),
      
          Column('col5', Integer),
          Column('col6', Integer),
          )
      
      # place an index on col3, col4
      Index('idx_col34', mytable.c.col3, mytable.c.col4)
      
      # place a unique index on col5, col6
      Index('myindex', mytable.c.col5, mytable.c.col6, unique=True)
      
      sqlmytable.create(engine)
      

      Note in the example above, the Index construct is created externally to the table which it corresponds, using Column objects directly. Index also supports “inline” definition inside the Table, using string names to identify columns:

      meta = MetaData()
      mytable = Table('mytable', meta,
          Column('col1', Integer),
      
          Column('col2', Integer),
      
          Column('col3', Integer),
          Column('col4', Integer),
      
          # place an index on col1, col2
          Index('idx_col12', 'col1', 'col2'),
      
          # place a unique index on col3, col4
          Index('idx_col34', 'col3', 'col4', unique=True)
      )

      New in version 0.7: Support of “inline” definition inside the Table for Index.

      The Index object also supports its own create() method:

      i = Index('someindex', mytable.c.col5)
      sqli.create(engine)
      

      Functional Indexes

      Index supports SQL and function expressions, as supported by the target backend. To create an index against a column using a descending value, the ColumnElement.desc() modifier may be used:

      from sqlalchemy import Index
      
      Index('someindex', mytable.c.somecol.desc())

      Or with a backend that supports functional indexes such as Postgresql, a “case insensitive” index can be created using the lower() function:

      from sqlalchemy import func, Index
      
      Index('someindex', func.lower(mytable.c.somecol))

      New in version 0.8: Index supports SQL expressions and functions as well as plain columns.

      Index API

      class sqlalchemy.schema.Index(name, *expressions, **kw)

      Bases: sqlalchemy.schema.ColumnCollectionMixin, sqlalchemy.schema.SchemaItem

      A table-level INDEX.

      Defines a composite (one or more column) INDEX.

      E.g.:

      sometable = Table("sometable", metadata,
                      Column("name", String(50)),
                      Column("address", String(100))
                  )
      
      Index("some_index", sometable.c.name)

      For a no-frills, single column index, adding Column also supports index=True:

      sometable = Table("sometable", metadata,
                      Column("name", String(50), index=True)
                  )

      For a composite index, multiple columns can be specified:

      Index("some_index", sometable.c.name, sometable.c.address)

      Functional indexes are supported as well, keeping in mind that at least one Column must be present:

      Index("some_index", func.lower(sometable.c.name))

      New in version 0.8: support for functional and expression-based indexes.

      See also

      Indexes - General information on Index.

      Postgresql-Specific Index Options - PostgreSQL-specific options available for the Index construct.

      MySQL Specific Index Options - MySQL-specific options available for the Index construct.

      MSSQL-Specific Index Options - MSSQL-specific options available for the Index construct.

      __init__(name, *expressions, **kw)

      Construct an index object.

      Parameters:
      • name – The name of the index
      • *expressions – Column or SQL expressions.
      • unique – Defaults to False: create a unique index.
      • **kw – Other keyword arguments may be interpreted by specific dialects.
      bind

      Return the connectable associated with this Index.

      create(bind=None)

      Issue a CREATE statement for this Index, using the given Connectable for connectivity.

      drop(bind=None)

      Issue a DROP statement for this Index, using the given Connectable for connectivity.

      SQLAlchemy-0.8.4/doc/core/ddl.html0000644000076500000240000022005112251147465017366 0ustar classicstaff00000000000000 Customizing DDL — SQLAlchemy 0.8 Documentation

      SQLAlchemy 0.8 Documentation

      Release: 0.8.4 | Release Date: December 8, 2013

      Customizing DDL

      In the preceding sections we’ve discussed a variety of schema constructs including Table, ForeignKeyConstraint, CheckConstraint, and Sequence. Throughout, we’ve relied upon the create() and create_all() methods of Table and MetaData in order to issue data definition language (DDL) for all constructs. When issued, a pre-determined order of operations is invoked, and DDL to create each table is created unconditionally including all constraints and other objects associated with it. For more complex scenarios where database-specific DDL is required, SQLAlchemy offers two techniques which can be used to add any DDL based on any condition, either accompanying the standard generation of tables or by itself.

      Controlling DDL Sequences

      The sqlalchemy.schema package contains SQL expression constructs that provide DDL expressions. For example, to produce a CREATE TABLE statement:

      from sqlalchemy.schema import CreateTable
      sqlengine.execute(CreateTable(mytable))
      

      Above, the CreateTable construct works like any other expression construct (such as select(), table.insert(), etc.). A full reference of available constructs is in DDL Expression Constructs API.

      The DDL constructs all extend a common base class which provides the capability to be associated with an individual Table or MetaData object, to be invoked upon create/drop events. Consider the example of a table which contains a CHECK constraint:

      users = Table('users', metadata,
                     Column('user_id', Integer, primary_key=True),
                     Column('user_name', String(40), nullable=False),
                     CheckConstraint('length(user_name) >= 8',name="cst_user_name_length")
                     )
      
      sqlusers.create(engine)
      

      The above table contains a column “user_name” which is subject to a CHECK constraint that validates that the length of the string is at least eight characters. When a create() is issued for this table, DDL for the CheckConstraint will also be issued inline within the table definition.

      The CheckConstraint construct can also be constructed externally and associated with the Table afterwards:

      constraint = CheckConstraint('length(user_name) >= 8',name="cst_user_name_length")
      users.append_constraint(constraint)

      So far, the effect is the same. However, if we create DDL elements corresponding to the creation and removal of this constraint, and associate them with the Table as events, these new events will take over the job of issuing DDL for the constraint. Additionally, the constraint will be added via ALTER:

      from sqlalchemy import event
      
      event.listen(
          users,
          "after_create",
          AddConstraint(constraint)
      )
      event.listen(
          users,
          "before_drop",
          DropConstraint(constraint)
      )
      
      sqlusers.create(engine)
      
      sqlusers.drop(engine)
      

      The real usefulness of the above becomes clearer once we illustrate the DDLElement.execute_if() method. This method returns a modified form of the DDL callable which will filter on criteria before responding to a received event. It accepts a parameter dialect, which is the string name of a dialect or a tuple of such, which will limit the execution of the item to just those dialects. It also accepts a callable_ parameter which may reference a Python callable which will be invoked upon event reception, returning True or False indicating if the event should proceed.

      If our CheckConstraint was only supported by Postgresql and not other databases, we could limit its usage to just that dialect:

      event.listen(
          users,
          'after_create',
          AddConstraint(constraint).execute_if(dialect='postgresql')
      )
      event.listen(
          users,
          'before_drop',
          DropConstraint(constraint).execute_if(dialect='postgresql')
      )

      Or to any set of dialects:

      event.listen(
          users,
          "after_create",
          AddConstraint(constraint).execute_if(dialect=('postgresql', 'mysql'))
      )
      event.listen(
          users,
          "before_drop",
          DropConstraint(constraint).execute_if(dialect=('postgresql', 'mysql'))
      )

      When using a callable, the callable is passed the ddl element, the Table or MetaData object whose “create” or “drop” event is in progress, and the Connection object being used for the operation, as well as additional information as keyword arguments. The callable can perform checks, such as whether or not a given item already exists. Below we define should_create() and should_drop() callables that check for the presence of our named constraint:

      def should_create(ddl, target, connection, **kw):
          row = connection.execute("select conname from pg_constraint where conname='%s'" % ddl.element.name).scalar()
          return not bool(row)
      
      def should_drop(ddl, target, connection, **kw):
          return not should_create(ddl, target, connection, **kw)
      
      event.listen(
          users,
          "after_create",
          AddConstraint(constraint).execute_if(callable_=should_create)
      )
      event.listen(
          users,
          "before_drop",
          DropConstraint(constraint).execute_if(callable_=should_drop)
      )
      
      sqlusers.create(engine)
      
      sqlusers.drop(engine)
      

      Custom DDL

      Custom DDL phrases are most easily achieved using the DDL construct. This construct works like all the other DDL elements except it accepts a string which is the text to be emitted:

      event.listen(
          metadata,
          "after_create",
          DDL("ALTER TABLE users ADD CONSTRAINT "
              "cst_user_name_length "
              " CHECK (length(user_name) >= 8)")
      )

      A more comprehensive method of creating libraries of DDL constructs is to use custom compilation - see Custom SQL Constructs and Compilation Extension for details.

      DDL Expression Constructs API

      class sqlalchemy.schema.DDLElement

      Bases: sqlalchemy.sql.expression.Executable, sqlalchemy.schema._DDLCompiles

      Base class for DDL expression constructs.

      This class is the base for the general purpose DDL class, as well as the various create/drop clause constructs such as CreateTable, DropTable, AddConstraint, etc.

      DDLElement integrates closely with SQLAlchemy events, introduced in Events. An instance of one is itself an event receiving callable:

      event.listen(
          users,
          'after_create',
          AddConstraint(constraint).execute_if(dialect='postgresql')
      )
      __call__(target, bind, **kw)

      Execute the DDL as a ddl_listener.

      against(target)

      Return a copy of this DDL against a specific schema item.

      bind
      callable_ = None
      dialect = None
      execute(bind=None, target=None)

      Execute this DDL immediately.

      Executes the DDL statement in isolation using the supplied Connectable or Connectable assigned to the .bind property, if not supplied. If the DDL has a conditional on criteria, it will be invoked with None as the event.

      Parameters:
      • bind – Optional, an Engine or Connection. If not supplied, a valid Connectable must be present in the .bind property.
      • target – Optional, defaults to None. The target SchemaItem for the execute call. Will be passed to the on callable if any, and may also provide string expansion data for the statement. See execute_at for more information.
      execute_at(event_name, target)

      Link execution of this DDL to the DDL lifecycle of a SchemaItem.

      Deprecated since version 0.7: See DDLEvents, as well as DDLElement.execute_if().

      Links this DDLElement to a Table or MetaData instance, executing it when that schema item is created or dropped. The DDL statement will be executed using the same Connection and transactional context as the Table create/drop itself. The .bind property of this statement is ignored.

      Parameters:
      • event – One of the events defined in the schema item’s .ddl_events; e.g. ‘before-create’, ‘after-create’, ‘before-drop’ or ‘after-drop’
      • target – The Table or MetaData instance for which this DDLElement will be associated with.

      A DDLElement instance can be linked to any number of schema items.

      execute_at builds on the append_ddl_listener interface of MetaData and Table objects.

      Caveat: Creating or dropping a Table in isolation will also trigger any DDL set to execute_at that Table’s MetaData. This may change in a future release.

      execute_if(dialect=None, callable_=None, state=None)

      Return a callable that will execute this DDLElement conditionally.

      Used to provide a wrapper for event listening:

      event.listen(
                  metadata,
                  'before_create',
                  DDL("my_ddl").execute_if(dialect='postgresql')
              )
      Parameters:
      • dialect

        May be a string, tuple or a callable predicate. If a string, it will be compared to the name of the executing database dialect:

        DDL('something').execute_if(dialect='postgresql')

        If a tuple, specifies multiple dialect names:

        DDL('something').execute_if(dialect=('postgresql', 'mysql'))
      • callable

        A callable, which will be invoked with four positional arguments as well as optional keyword arguments:

        ddl:This DDL element.
        target:The Table or MetaData object which is the target of this event. May be None if the DDL is executed explicitly.
        bind:The Connection being used for DDL execution
        tables:Optional keyword argument - a list of Table objects which are to be created/ dropped within a MetaData.create_all() or drop_all() method call.
        state:Optional keyword argument - will be the state argument passed to this function.
        checkfirst:Keyword argument, will be True if the ‘checkfirst’ flag was set during the call to create(), create_all(), drop(), drop_all().

        If the callable returns a true value, the DDL statement will be executed.

      • state – any value which will be passed to the callable_ as the state keyword argument.

      See also

      DDLEvents

      Events

      on = None
      target = None
      class sqlalchemy.schema.DDL(statement, on=None, context=None, bind=None)

      Bases: sqlalchemy.schema.DDLElement

      A literal DDL statement.

      Specifies literal SQL DDL to be executed by the database. DDL objects function as DDL event listeners, and can be subscribed to those events listed in DDLEvents, using either Table or MetaData objects as targets. Basic templating support allows a single DDL instance to handle repetitive tasks for multiple tables.

      Examples:

      from sqlalchemy import event, DDL
      
      tbl = Table('users', metadata, Column('uid', Integer))
      event.listen(tbl, 'before_create', DDL('DROP TRIGGER users_trigger'))
      
      spow = DDL('ALTER TABLE %(table)s SET secretpowers TRUE')
      event.listen(tbl, 'after_create', spow.execute_if(dialect='somedb'))
      
      drop_spow = DDL('ALTER TABLE users SET secretpowers FALSE')
      connection.execute(drop_spow)

      When operating on Table events, the following statement string substitions are available:

      %(table)s  - the Table name, with any required quoting applied
      %(schema)s - the schema name, with any required quoting applied
      %(fullname)s - the Table name including schema, quoted if needed

      The DDL’s “context”, if any, will be combined with the standard substutions noted above. Keys present in the context will override the standard substitutions.

      __init__(statement, on=None, context=None, bind=None)

      Create a DDL statement.

      Parameters:
      • statement

        A string or unicode string to be executed. Statements will be processed with Python’s string formatting operator. See the context argument and the execute_at method.

        A literal ‘%’ in a statement must be escaped as ‘%%’.

        SQL bind parameters are not available in DDL statements.

      • on

        Deprecated. See DDLElement.execute_if().

        Optional filtering criteria. May be a string, tuple or a callable predicate. If a string, it will be compared to the name of the executing database dialect:

        DDL('something', on='postgresql')

        If a tuple, specifies multiple dialect names:

        DDL('something', on=('postgresql', 'mysql'))

        If a callable, it will be invoked with four positional arguments as well as optional keyword arguments:

        ddl:This DDL element.
        event:The name of the event that has triggered this DDL, such as ‘after-create’ Will be None if the DDL is executed explicitly.
        target:The Table or MetaData object which is the target of this event. May be None if the DDL is executed explicitly.
        connection:The Connection being used for DDL execution
        tables:Optional keyword argument - a list of Table objects which are to be created/ dropped within a MetaData.create_all() or drop_all() method call.

        If the callable returns a true value, the DDL statement will be executed.

      • context – Optional dictionary, defaults to None. These values will be available for use in string substitutions on the DDL statement.
      • bind – Optional. A Connectable, used by default when execute() is invoked without a bind argument.

      See also

      DDLEvents

      sqlalchemy.event

      class sqlalchemy.schema.CreateTable(element, on=None, bind=None)

      Bases: sqlalchemy.schema._CreateDropBase

      Represent a CREATE TABLE statement.

      __init__(element, on=None, bind=None)

      Create a CreateTable construct.

      Parameters:
      • element – a Table that’s the subject of the CREATE
      • on – See the description for ‘on’ in DDL.
      • bind – See the description for ‘bind’ in DDL.
      class sqlalchemy.schema.DropTable(element, on=None, bind=None)

      Bases: sqlalchemy.schema._CreateDropBase

      Represent a DROP TABLE statement.

      class sqlalchemy.schema.CreateColumn(element)

      Bases: sqlalchemy.schema._DDLCompiles

      Represent a Column as rendered in a CREATE TABLE statement, via the CreateTable construct.

      This is provided to support custom column DDL within the generation of CREATE TABLE statements, by using the compiler extension documented in Custom SQL Constructs and Compilation Extension to extend CreateColumn.

      Typical integration is to examine the incoming Column object, and to redirect compilation if a particular flag or condition is found:

      from sqlalchemy import schema
      from sqlalchemy.ext.compiler import compiles
      
      @compiles(schema.CreateColumn)
      def compile(element, compiler, **kw):
          column = element.element
      
          if "special" not in column.info:
              return compiler.visit_create_column(element, **kw)
      
          text = "%s SPECIAL DIRECTIVE %s" % (
                  column.name,
                  compiler.type_compiler.process(column.type)
              )
          default = compiler.get_column_default_string(column)
          if default is not None:
              text += " DEFAULT " + default
      
          if not column.nullable:
              text += " NOT NULL"
      
          if column.constraints:
              text += " ".join(
                          compiler.process(const)
                          for const in column.constraints)
          return text

      The above construct can be applied to a Table as follows:

      from sqlalchemy import Table, Metadata, Column, Integer, String
      from sqlalchemy import schema
      
      metadata = MetaData()
      
      table = Table('mytable', MetaData(),
              Column('x', Integer, info={"special":True}, primary_key=True),
              Column('y', String(50)),
              Column('z', String(20), info={"special":True})
          )
      
      metadata.create_all(conn)

      Above, the directives we’ve added to the Column.info collection will be detected by our custom compilation scheme:

      CREATE TABLE mytable (
              x SPECIAL DIRECTIVE INTEGER NOT NULL,
              y VARCHAR(50),
              z SPECIAL DIRECTIVE VARCHAR(20),
          PRIMARY KEY (x)
      )

      The CreateColumn construct can also be used to skip certain columns when producing a CREATE TABLE. This is accomplished by creating a compilation rule that conditionally returns None. This is essentially how to produce the same effect as using the system=True argument on Column, which marks a column as an implicitly-present “system” column.

      For example, suppose we wish to produce a Table which skips rendering of the Postgresql xmin column against the Postgresql backend, but on other backends does render it, in anticipation of a triggered rule. A conditional compilation rule could skip this name only on Postgresql:

      from sqlalchemy.schema import CreateColumn
      
      @compiles(CreateColumn, "postgresql")
      def skip_xmin(element, compiler, **kw):
          if element.element.name == 'xmin':
              return None
          else:
              return compiler.visit_create_column(element, **kw)
      
      
      my_table = Table('mytable', metadata,
                  Column('id', Integer, primary_key=True),
                  Column('xmin', Integer)
              )

      Above, a CreateTable construct will generate a CREATE TABLE which only includes the id column in the string; the xmin column will be omitted, but only against the Postgresql backend.

      New in version 0.8.3: The CreateColumn construct supports skipping of columns by returning None from a custom compilation rule.

      New in version 0.8: The CreateColumn construct was added to support custom column creation styles.

      class sqlalchemy.schema.CreateSequence(element, on=None, bind=None)

      Bases: sqlalchemy.schema._CreateDropBase

      Represent a CREATE SEQUENCE statement.

      class sqlalchemy.schema.DropSequence(element, on=None, bind=None)

      Bases: sqlalchemy.schema._CreateDropBase

      Represent a DROP SEQUENCE statement.

      class sqlalchemy.schema.CreateIndex(element, on=None, bind=None)

      Bases: sqlalchemy.schema._CreateDropBase

      Represent a CREATE INDEX statement.

      class sqlalchemy.schema.DropIndex(element, on=None, bind=None)

      Bases: sqlalchemy.schema._CreateDropBase

      Represent a DROP INDEX statement.

      class sqlalchemy.schema.AddConstraint(element, *args, **kw)

      Bases: sqlalchemy.schema._CreateDropBase

      Represent an ALTER TABLE ADD CONSTRAINT statement.

      class sqlalchemy.schema.DropConstraint(element, cascade=False, **kw)

      Bases: sqlalchemy.schema._CreateDropBase

      Represent an ALTER TABLE DROP CONSTRAINT statement.

      class sqlalchemy.schema.CreateSchema(name, quote=None, **kw)

      Bases: sqlalchemy.schema._CreateDropBase

      Represent a CREATE SCHEMA statement.

      New in version 0.7.4.

      The argument here is the string name of the schema.

      __init__(name, quote=None, **kw)

      Create a new CreateSchema construct.

      class sqlalchemy.schema.DropSchema(name, quote=None, cascade=False, **kw)

      Bases: sqlalchemy.schema._CreateDropBase

      Represent a DROP SCHEMA statement.

      The argument here is the string name of the schema.

      New in version 0.7.4.

      __init__(name, quote=None, cascade=False, **kw)

      Create a new DropSchema construct.

      SQLAlchemy-0.8.4/doc/core/defaults.html0000644000076500000240000016663712251147466020456 0ustar classicstaff00000000000000 Column Insert/Update Defaults — SQLAlchemy 0.8 Documentation

      SQLAlchemy 0.8 Documentation

      Release: 0.8.4 | Release Date: December 8, 2013

      Column Insert/Update Defaults

      SQLAlchemy provides a very rich featureset regarding column level events which take place during INSERT and UPDATE statements. Options include:

      • Scalar values used as defaults during INSERT and UPDATE operations
      • Python functions which execute upon INSERT and UPDATE operations
      • SQL expressions which are embedded in INSERT statements (or in some cases execute beforehand)
      • SQL expressions which are embedded in UPDATE statements
      • Server side default values used during INSERT
      • Markers for server-side triggers used during UPDATE

      The general rule for all insert/update defaults is that they only take effect if no value for a particular column is passed as an execute() parameter; otherwise, the given value is used.

      Scalar Defaults

      The simplest kind of default is a scalar value used as the default value of a column:

      Table("mytable", meta,
          Column("somecolumn", Integer, default=12)
      )

      Above, the value “12” will be bound as the column value during an INSERT if no other value is supplied.

      A scalar value may also be associated with an UPDATE statement, though this is not very common (as UPDATE statements are usually looking for dynamic defaults):

      Table("mytable", meta,
          Column("somecolumn", Integer, onupdate=25)
      )

      Python-Executed Functions

      The default and onupdate keyword arguments also accept Python functions. These functions are invoked at the time of insert or update if no other value for that column is supplied, and the value returned is used for the column’s value. Below illustrates a crude “sequence” that assigns an incrementing counter to a primary key column:

      # a function which counts upwards
      i = 0
      def mydefault():
          global i
          i += 1
          return i
      
      t = Table("mytable", meta,
          Column('id', Integer, primary_key=True, default=mydefault),
      )

      It should be noted that for real “incrementing sequence” behavior, the built-in capabilities of the database should normally be used, which may include sequence objects or other autoincrementing capabilities. For primary key columns, SQLAlchemy will in most cases use these capabilities automatically. See the API documentation for Column including the autoincrement flag, as well as the section on Sequence later in this chapter for background on standard primary key generation techniques.

      To illustrate onupdate, we assign the Python datetime function now to the onupdate attribute:

      import datetime
      
      t = Table("mytable", meta,
          Column('id', Integer, primary_key=True),
      
          # define 'last_updated' to be populated with datetime.now()
          Column('last_updated', DateTime, onupdate=datetime.datetime.now),
      )

      When an update statement executes and no value is passed for last_updated, the datetime.datetime.now() Python function is executed and its return value used as the value for last_updated. Notice that we provide now as the function itself without calling it (i.e. there are no parenthesis following) - SQLAlchemy will execute the function at the time the statement executes.

      Context-Sensitive Default Functions

      The Python functions used by default and onupdate may also make use of the current statement’s context in order to determine a value. The context of a statement is an internal SQLAlchemy object which contains all information about the statement being executed, including its source expression, the parameters associated with it and the cursor. The typical use case for this context with regards to default generation is to have access to the other values being inserted or updated on the row. To access the context, provide a function that accepts a single context argument:

      def mydefault(context):
          return context.current_parameters['counter'] + 12
      
      t = Table('mytable', meta,
          Column('counter', Integer),
          Column('counter_plus_twelve', Integer, default=mydefault, onupdate=mydefault)
      )

      Above we illustrate a default function which will execute for all INSERT and UPDATE statements where a value for counter_plus_twelve was otherwise not provided, and the value will be that of whatever value is present in the execution for the counter column, plus the number 12.

      While the context object passed to the default function has many attributes, the current_parameters member is a special member provided only during the execution of a default function for the purposes of deriving defaults from its existing values. For a single statement that is executing many sets of bind parameters, the user-defined function is called for each set of parameters, and current_parameters will be provided with each individual parameter set for each execution.

      SQL Expressions

      The “default” and “onupdate” keywords may also be passed SQL expressions, including select statements or direct function calls:

      t = Table("mytable", meta,
          Column('id', Integer, primary_key=True),
      
          # define 'create_date' to default to now()
          Column('create_date', DateTime, default=func.now()),
      
          # define 'key' to pull its default from the 'keyvalues' table
          Column('key', String(20), default=keyvalues.select(keyvalues.c.type='type1', limit=1)),
      
          # define 'last_modified' to use the current_timestamp SQL function on update
          Column('last_modified', DateTime, onupdate=func.utc_timestamp())
          )

      Above, the create_date column will be populated with the result of the now() SQL function (which, depending on backend, compiles into NOW() or CURRENT_TIMESTAMP in most cases) during an INSERT statement, and the key column with the result of a SELECT subquery from another table. The last_modified column will be populated with the value of UTC_TIMESTAMP(), a function specific to MySQL, when an UPDATE statement is emitted for this table.

      Note that when using func functions, unlike when using Python datetime functions we do call the function, i.e. with parenthesis “()” - this is because what we want in this case is the return value of the function, which is the SQL expression construct that will be rendered into the INSERT or UPDATE statement.

      The above SQL functions are usually executed “inline” with the INSERT or UPDATE statement being executed, meaning, a single statement is executed which embeds the given expressions or subqueries within the VALUES or SET clause of the statement. Although in some cases, the function is “pre-executed” in a SELECT statement of its own beforehand. This happens when all of the following is true:

      • the column is a primary key column
      • the database dialect does not support a usable cursor.lastrowid accessor (or equivalent); this currently includes PostgreSQL, Oracle, and Firebird, as well as some MySQL dialects.
      • the dialect does not support the “RETURNING” clause or similar, or the implicit_returning flag is set to False for the dialect. Dialects which support RETURNING currently include Postgresql, Oracle, Firebird, and MS-SQL.
      • the statement is a single execution, i.e. only supplies one set of parameters and doesn’t use “executemany” behavior
      • the inline=True flag is not set on the Insert() or Update() construct, and the statement has not defined an explicit returning() clause.

      Whether or not the default generation clause “pre-executes” is not something that normally needs to be considered, unless it is being addressed for performance reasons.

      When the statement is executed with a single set of parameters (that is, it is not an “executemany” style execution), the returned ResultProxy will contain a collection accessible via result.postfetch_cols() which contains a list of all Column objects which had an inline-executed default. Similarly, all parameters which were bound to the statement, including all Python and SQL expressions which were pre-executed, are present in the last_inserted_params() or last_updated_params() collections on ResultProxy. The inserted_primary_key collection contains a list of primary key values for the row inserted (a list so that single-column and composite-column primary keys are represented in the same format).

      Server Side Defaults

      A variant on the SQL expression default is the server_default, which gets placed in the CREATE TABLE statement during a create() operation:

      t = Table('test', meta,
          Column('abc', String(20), server_default='abc'),
          Column('created_at', DateTime, server_default=text("sysdate"))
      )

      A create call for the above table will produce:

      CREATE TABLE test (
          abc varchar(20) default 'abc',
          created_at datetime default sysdate
      )

      The behavior of server_default is similar to that of a regular SQL default; if it’s placed on a primary key column for a database which doesn’t have a way to “postfetch” the ID, and the statement is not “inlined”, the SQL expression is pre-executed; otherwise, SQLAlchemy lets the default fire off on the database side normally.

      Triggered Columns

      Columns with values set by a database trigger or other external process may be called out using FetchedValue as a marker:

      t = Table('test', meta,
          Column('abc', String(20), server_default=FetchedValue()),
          Column('def', String(20), server_onupdate=FetchedValue())
      )

      Changed in version 0.8.0b2,0.7.10: The for_update argument on FetchedValue is set automatically when specified as the server_onupdate argument. If using an older version, specify the onupdate above as server_onupdate=FetchedValue(for_update=True).

      These markers do not emit a “default” clause when the table is created, however they do set the same internal flags as a static server_default clause, providing hints to higher-level tools that a “post-fetch” of these rows should be performed after an insert or update.

      Note

      It’s generally not appropriate to use FetchedValue in conjunction with a primary key column, particularly when using the ORM or any other scenario where the ResultProxy.inserted_primary_key attribute is required. This is becaue the “post-fetch” operation requires that the primary key value already be available, so that the row can be selected on its primary key.

      For a server-generated primary key value, all databases provide special accessors or other techniques in order to acquire the “last inserted primary key” column of a table. These mechanisms aren’t affected by the presence of FetchedValue. For special situations where triggers are used to generate primary key values, and the database in use does not support the RETURNING clause, it may be necessary to forego the usage of the trigger and instead apply the SQL expression or function as a “pre execute” expression:

      t = Table('test', meta,
              Column('abc', MyType, default=func.generate_new_value(), primary_key=True)
      )

      Where above, when Table.insert() is used, the func.generate_new_value() expression will be pre-executed in the context of a scalar SELECT statement, and the new value will be applied to the subsequent INSERT, while at the same time being made available to the ResultProxy.inserted_primary_key attribute.

      Defining Sequences

      SQLAlchemy represents database sequences using the Sequence object, which is considered to be a special case of “column default”. It only has an effect on databases which have explicit support for sequences, which currently includes Postgresql, Oracle, and Firebird. The Sequence object is otherwise ignored.

      The Sequence may be placed on any column as a “default” generator to be used during INSERT operations, and can also be configured to fire off during UPDATE operations if desired. It is most commonly used in conjunction with a single integer primary key column:

      table = Table("cartitems", meta,
          Column("cart_id", Integer, Sequence('cart_id_seq'), primary_key=True),
          Column("description", String(40)),
          Column("createdate", DateTime())
      )

      Where above, the table “cartitems” is associated with a sequence named “cart_id_seq”. When INSERT statements take place for “cartitems”, and no value is passed for the “cart_id” column, the “cart_id_seq” sequence will be used to generate a value.

      When the Sequence is associated with a table, CREATE and DROP statements issued for that table will also issue CREATE/DROP for the sequence object as well, thus “bundling” the sequence object with its parent table.

      The Sequence object also implements special functionality to accommodate Postgresql’s SERIAL datatype. The SERIAL type in PG automatically generates a sequence that is used implicitly during inserts. This means that if a Table object defines a Sequence on its primary key column so that it works with Oracle and Firebird, the Sequence would get in the way of the “implicit” sequence that PG would normally use. For this use case, add the flag optional=True to the Sequence object - this indicates that the Sequence should only be used if the database provides no other option for generating primary key identifiers.

      The Sequence object also has the ability to be executed standalone like a SQL expression, which has the effect of calling its “next value” function:

      seq = Sequence('some_sequence')
      nextid = connection.execute(seq)

      Default Objects API

      class sqlalchemy.schema.ColumnDefault(arg, **kwargs)

      Bases: sqlalchemy.schema.DefaultGenerator

      A plain default value on a column.

      This could correspond to a constant, a callable function, or a SQL clause.

      ColumnDefault is generated automatically whenever the default, onupdate arguments of Column are used. A ColumnDefault can be passed positionally as well.

      For example, the following:

      Column('foo', Integer, default=50)

      Is equivalent to:

      Column('foo', Integer, ColumnDefault(50))
      class sqlalchemy.schema.DefaultClause(arg, for_update=False, _reflected=False)

      Bases: sqlalchemy.schema.FetchedValue

      A DDL-specified DEFAULT column value.

      DefaultClause is a FetchedValue that also generates a “DEFAULT” clause when “CREATE TABLE” is emitted.

      DefaultClause is generated automatically whenever the server_default, server_onupdate arguments of Column are used. A DefaultClause can be passed positionally as well.

      For example, the following:

      Column('foo', Integer, server_default="50")

      Is equivalent to:

      Column('foo', Integer, DefaultClause("50"))
      class sqlalchemy.schema.DefaultGenerator(for_update=False)

      Bases: sqlalchemy.schema._NotAColumnExpr, sqlalchemy.schema.SchemaItem

      Base class for column default values.

      class sqlalchemy.schema.FetchedValue(for_update=False)

      Bases: sqlalchemy.schema._NotAColumnExpr, sqlalchemy.events.SchemaEventTarget

      A marker for a transparent database-side default.

      Use FetchedValue when the database is configured to provide some automatic default for a column.

      E.g.:

      Column('foo', Integer, FetchedValue())

      Would indicate that some trigger or default generator will create a new value for the foo column during an INSERT.

      class sqlalchemy.schema.PassiveDefault(*arg, **kw)

      Bases: sqlalchemy.schema.DefaultClause

      A DDL-specified DEFAULT column value.

      Deprecated since version 0.6: PassiveDefault is deprecated. Use DefaultClause.

      class sqlalchemy.schema.Sequence(name, start=None, increment=None, schema=None, optional=False, quote=None, metadata=None, quote_schema=None, for_update=False)

      Bases: sqlalchemy.schema.DefaultGenerator

      Represents a named database sequence.

      The Sequence object represents the name and configurational parameters of a database sequence. It also represents a construct that can be “executed” by a SQLAlchemy Engine or Connection, rendering the appropriate “next value” function for the target database and returning a result.

      The Sequence is typically associated with a primary key column:

      some_table = Table('some_table', metadata,
          Column('id', Integer, Sequence('some_table_seq'), primary_key=True)
      )

      When CREATE TABLE is emitted for the above Table, if the target platform supports sequences, a CREATE SEQUENCE statement will be emitted as well. For platforms that don’t support sequences, the Sequence construct is ignored.

      __init__(name, start=None, increment=None, schema=None, optional=False, quote=None, metadata=None, quote_schema=None, for_update=False)

      Construct a Sequence object.

      Parameters:
      • name – The name of the sequence.
      • start – the starting index of the sequence. This value is used when the CREATE SEQUENCE command is emitted to the database as the value of the “START WITH” clause. If None, the clause is omitted, which on most platforms indicates a starting value of 1.
      • increment – the increment value of the sequence. This value is used when the CREATE SEQUENCE command is emitted to the database as the value of the “INCREMENT BY” clause. If None, the clause is omitted, which on most platforms indicates an increment of 1.
      • schema – Optional schema name for the sequence, if located in a schema other than the default.
      • optional – boolean value, when True, indicates that this Sequence object only needs to be explicitly generated on backends that don’t provide another way to generate primary key identifiers. Currently, it essentially means, “don’t create this sequence on the Postgresql backend, where the SERIAL keyword creates a sequence for us automatically”.
      • quote – boolean value, when True or False, explicitly forces quoting of the schema name on or off. When left at its default of None, normal quoting rules based on casing and reserved words take place.
      • metadata

        optional MetaData object which will be associated with this Sequence. A Sequence that is associated with a MetaData gains access to the bind of that MetaData, meaning the Sequence.create() and Sequence.drop() methods will make usage of that engine automatically.

        Changed in version 0.7: Additionally, the appropriate CREATE SEQUENCE/ DROP SEQUENCE DDL commands will be emitted corresponding to this Sequence when MetaData.create_all() and MetaData.drop_all() are invoked.

        Note that when a Sequence is applied to a Column, the Sequence is automatically associated with the MetaData object of that column’s parent Table, when that association is made. The Sequence will then be subject to automatic CREATE SEQUENCE/DROP SEQUENCE corresponding to when the Table object itself is created or dropped, rather than that of the MetaData object overall.

      • for_update – Indicates this Sequence, when associated with a Column, should be invoked for UPDATE statements on that column’s table, rather than for INSERT statements, when no value is otherwise present for that column in the statement.
      create(bind=None, checkfirst=True)

      Creates this sequence in the database.

      drop(bind=None, checkfirst=True)

      Drops this sequence from the database.

      next_value()

      Return a next_value function element which will render the appropriate increment function for this Sequence within any SQL expression.

      SQLAlchemy-0.8.4/doc/core/dml.html0000644000076500000240000046531312251147466017414 0ustar classicstaff00000000000000 Insert, Updates, Deletes — SQLAlchemy 0.8 Documentation

      SQLAlchemy 0.8 Documentation

      Release: 0.8.4 | Release Date: December 8, 2013

      Insert, Updates, Deletes

      INSERT, UPDATE and DELETE statements build on a hierarchy starting with UpdateBase. The Insert and Update constructs build on the intermediary ValuesBase.

      sqlalchemy.sql.expression.delete(table, whereclause=None, **kwargs)

      Represent a DELETE statement via the Delete SQL construct.

      Similar functionality is available via the delete() method on Table.

      Parameters:
      • table – The table to be updated.
      • whereclause – A ClauseElement describing the WHERE condition of the UPDATE statement. Note that the where() generative method may be used instead.

      See also

      Deletes - SQL Expression Tutorial

      sqlalchemy.sql.expression.insert(table, values=None, inline=False, **kwargs)

      Represent an INSERT statement via the Insert SQL construct.

      Similar functionality is available via the insert() method on Table.

      Parameters:
      • tableTableClause which is the subject of the insert.
      • values – collection of values to be inserted; see Insert.values() for a description of allowed formats here. Can be omitted entirely; a Insert construct will also dynamically render the VALUES clause at execution time based on the parameters passed to Connection.execute().
      • inline – if True, SQL defaults will be compiled ‘inline’ into the statement and not pre-executed.

      If both values and compile-time bind parameters are present, the compile-time bind parameters override the information specified within values on a per-key basis.

      The keys within values can be either Column objects or their string identifiers. Each key may reference one of:

      • a literal data value (i.e. string, number, etc.);
      • a Column object;
      • a SELECT statement.

      If a SELECT statement is specified which references this INSERT statement’s table, the statement will be correlated against the INSERT statement.

      See also

      Insert Expressions - SQL Expression Tutorial

      Inserts, Updates and Deletes - SQL Expression Tutorial

      sqlalchemy.sql.expression.update(table, whereclause=None, values=None, inline=False, **kwargs)

      Represent an UPDATE statement via the Update SQL construct.

      E.g.:

      from sqlalchemy import update
      
      stmt = update(users).where(users.c.id==5).\
              values(name='user #5')

      Similar functionality is available via the update() method on Table:

      stmt = users.update().\
                  where(users.c.id==5).\
                  values(name='user #5')
      Parameters:
      • table – A Table object representing the database table to be updated.
      • whereclause

        Optional SQL expression describing the WHERE condition of the UPDATE statement. Modern applications may prefer to use the generative where() method to specify the WHERE clause.

        The WHERE clause can refer to multiple tables. For databases which support this, an UPDATE FROM clause will be generated, or on MySQL, a multi-table update. The statement will fail on databases that don’t have support for multi-table update statements. A SQL-standard method of referring to additional tables in the WHERE clause is to use a correlated subquery:

        users.update().values(name='ed').where(
                users.c.name==select([addresses.c.email_address]).\
                            where(addresses.c.user_id==users.c.id).\
                            as_scalar()
                )

        Changed in version 0.7.4: The WHERE clause can refer to multiple tables.

      • values

        Optional dictionary which specifies the SET conditions of the UPDATE. If left as None, the SET conditions are determined from those parameters passed to the statement during the execution and/or compilation of the statement. When compiled standalone without any parameters, the SET clause generates for all columns.

        Modern applications may prefer to use the generative Update.values() method to set the values of the UPDATE statement.

      • inline – if True, SQL defaults present on Column objects via the default keyword will be compiled ‘inline’ into the statement and not pre-executed. This means that their values will not be available in the dictionary returned from ResultProxy.last_updated_params().

      If both values and compile-time bind parameters are present, the compile-time bind parameters override the information specified within values on a per-key basis.

      The keys within values can be either Column objects or their string identifiers (specifically the “key” of the Column, normally but not necessarily equivalent to its “name”). Normally, the Column objects used here are expected to be part of the target Table that is the table to be updated. However when using MySQL, a multiple-table UPDATE statement can refer to columns from any of the tables referred to in the WHERE clause.

      The values referred to in values are typically:

      • a literal data value (i.e. string, number, etc.)
      • a SQL expression, such as a related Column, a scalar-returning select() construct, etc.

      When combining select() constructs within the values clause of an update() construct, the subquery represented by the select() should be correlated to the parent table, that is, providing criterion which links the table inside the subquery to the outer table being updated:

      users.update().values(
              name=select([addresses.c.email_address]).\
                      where(addresses.c.user_id==users.c.id).\
                      as_scalar()
          )

      See also

      Inserts, Updates and Deletes - SQL Expression Language Tutorial

      class sqlalchemy.sql.expression.Delete(table, whereclause, bind=None, returning=None, prefixes=None, **kwargs)

      Bases: sqlalchemy.sql.expression.UpdateBase

      Represent a DELETE construct.

      The Delete object is created using the delete() function.

      bind
      inherited from the bind attribute of UpdateBase

      Return a ‘bind’ linked to this UpdateBase or a Table associated with it.

      compare(other, **kw)
      inherited from the compare() method of ClauseElement

      Compare this ClauseElement to the given ClauseElement.

      Subclasses should override the default behavior, which is a straight identity comparison.

      **kw are arguments consumed by subclass compare() methods and may be used to modify the criteria for comparison. (see ColumnElement)

      compile(bind=None, dialect=None, **kw)
      inherited from the compile() method of ClauseElement

      Compile this SQL expression.

      The return value is a Compiled object. Calling str() or unicode() on the returned value will yield a string representation of the result. The Compiled object also can return a dictionary of bind parameter names and values using the params accessor.

      Parameters:
      • bind – An Engine or Connection from which a Compiled will be acquired. This argument takes precedence over this ClauseElement‘s bound engine, if any.
      • column_keys – Used for INSERT and UPDATE statements, a list of column names which should be present in the VALUES clause of the compiled statement. If None, all columns from the target table object are rendered.
      • dialect – A Dialect instance from which a Compiled will be acquired. This argument takes precedence over the bind argument as well as this ClauseElement‘s bound engine, if any.
      • inline – Used for INSERT statements, for a dialect which does not support inline retrieval of newly generated primary key columns, will force the expression used to create the new primary key value to be rendered inline within the INSERT statement’s VALUES clause. This typically refers to Sequence execution but may also refer to any server-side default generation function associated with a primary key Column.
      execute(*multiparams, **params)
      inherited from the execute() method of Executable

      Compile and execute this Executable.

      execution_options(**kw)
      inherited from the execution_options() method of Executable

      Set non-SQL options for the statement which take effect during execution.

      Execution options can be set on a per-statement or per Connection basis. Additionally, the Engine and ORM Query objects provide access to execution options which they in turn configure upon connections.

      The execution_options() method is generative. A new instance of this statement is returned that contains the options:

      statement = select([table.c.x, table.c.y])
      statement = statement.execution_options(autocommit=True)

      Note that only a subset of possible execution options can be applied to a statement - these include “autocommit” and “stream_results”, but not “isolation_level” or “compiled_cache”. See Connection.execution_options() for a full list of possible options.

      params(*arg, **kw)
      inherited from the params() method of UpdateBase

      Set the parameters for the statement.

      This method raises NotImplementedError on the base class, and is overridden by ValuesBase to provide the SET/VALUES clause of UPDATE and INSERT.

      prefix_with(*expr, **kw)
      inherited from the prefix_with() method of HasPrefixes

      Add one or more expressions following the statement keyword, i.e. SELECT, INSERT, UPDATE, or DELETE. Generative.

      This is used to support backend-specific prefix keywords such as those provided by MySQL.

      E.g.:

      stmt = table.insert().prefix_with("LOW_PRIORITY", dialect="mysql")

      Multiple prefixes can be specified by multiple calls to prefix_with().

      Parameters:
      • *expr – textual or ClauseElement construct which will be rendered following the INSERT, UPDATE, or DELETE keyword.
      • **kw – A single keyword ‘dialect’ is accepted. This is an optional string dialect name which will limit rendering of this prefix to only that dialect.
      returning(*cols)
      inherited from the returning() method of UpdateBase

      Add a RETURNING or equivalent clause to this statement.

      The given list of columns represent columns within the table that is the target of the INSERT, UPDATE, or DELETE. Each element can be any column expression. Table objects will be expanded into their individual columns.

      Upon compilation, a RETURNING clause, or database equivalent, will be rendered within the statement. For INSERT and UPDATE, the values are the newly inserted/updated values. For DELETE, the values are those of the rows which were deleted.

      Upon execution, the values of the columns to be returned are made available via the result set and can be iterated using fetchone() and similar. For DBAPIs which do not natively support returning values (i.e. cx_oracle), SQLAlchemy will approximate this behavior at the result level so that a reasonable amount of behavioral neutrality is provided.

      Note that not all databases/DBAPIs support RETURNING. For those backends with no support, an exception is raised upon compilation and/or execution. For those who do support it, the functionality across backends varies greatly, including restrictions on executemany() and other statements which return multiple rows. Please read the documentation notes for the database in use in order to determine the availability of RETURNING.

      scalar(*multiparams, **params)
      inherited from the scalar() method of Executable

      Compile and execute this Executable, returning the result’s scalar representation.

      self_group(against=None)
      inherited from the self_group() method of ClauseElement

      Apply a ‘grouping’ to this ClauseElement.

      This method is overridden by subclasses to return a “grouping” construct, i.e. parenthesis. In particular it’s used by “binary” expressions to provide a grouping around themselves when placed into a larger expression, as well as by select() constructs when placed into the FROM clause of another select(). (Note that subqueries should be normally created using the Select.alias() method, as many platforms require nested SELECT statements to be named).

      As expressions are composed together, the application of self_group() is automatic - end-user code should never need to use this method directly. Note that SQLAlchemy’s clause constructs take operator precedence into account - so parenthesis might not be needed, for example, in an expression like x OR (y AND z) - AND takes precedence over OR.

      The base self_group() method of ClauseElement just returns self.

      unique_params(*optionaldict, **kwargs)
      inherited from the unique_params() method of ClauseElement

      Return a copy with bindparam() elements replaced.

      Same functionality as params(), except adds unique=True to affected bind parameters so that multiple statements can be used.

      where(whereclause)

      Add the given WHERE clause to a newly returned delete construct.

      with_hint(text, selectable=None, dialect_name='*')
      inherited from the with_hint() method of UpdateBase

      Add a table hint for a single table to this INSERT/UPDATE/DELETE statement.

      Note

      UpdateBase.with_hint() currently applies only to Microsoft SQL Server. For MySQL INSERT/UPDATE/DELETE hints, use UpdateBase.prefix_with().

      The text of the hint is rendered in the appropriate location for the database backend in use, relative to the Table that is the subject of this statement, or optionally to that of the given Table passed as the selectable argument.

      The dialect_name option will limit the rendering of a particular hint to a particular backend. Such as, to add a hint that only takes effect for SQL Server:

      mytable.insert().with_hint("WITH (PAGLOCK)", dialect_name="mssql")

      New in version 0.7.6.

      Parameters:
      • text – Text of the hint.
      • selectable – optional Table that specifies an element of the FROM clause within an UPDATE or DELETE to be the subject of the hint - applies only to certain backends.
      • dialect_name – defaults to *, if specified as the name of a particular dialect, will apply these hints only when that dialect is in use.
      class sqlalchemy.sql.expression.Insert(table, values=None, inline=False, bind=None, prefixes=None, returning=None, **kwargs)

      Bases: sqlalchemy.sql.expression.ValuesBase

      Represent an INSERT construct.

      The Insert object is created using the insert() function.

      bind
      inherited from the bind attribute of UpdateBase

      Return a ‘bind’ linked to this UpdateBase or a Table associated with it.

      compare(other, **kw)
      inherited from the compare() method of ClauseElement

      Compare this ClauseElement to the given ClauseElement.

      Subclasses should override the default behavior, which is a straight identity comparison.

      **kw are arguments consumed by subclass compare() methods and may be used to modify the criteria for comparison. (see ColumnElement)

      compile(bind=None, dialect=None, **kw)
      inherited from the compile() method of ClauseElement

      Compile this SQL expression.

      The return value is a Compiled object. Calling str() or unicode() on the returned value will yield a string representation of the result. The Compiled object also can return a dictionary of bind parameter names and values using the params accessor.

      Parameters:
      • bind – An Engine or Connection from which a Compiled will be acquired. This argument takes precedence over this ClauseElement‘s bound engine, if any.
      • column_keys – Used for INSERT and UPDATE statements, a list of column names which should be present in the VALUES clause of the compiled statement. If None, all columns from the target table object are rendered.
      • dialect – A Dialect instance from which a Compiled will be acquired. This argument takes precedence over the bind argument as well as this ClauseElement‘s bound engine, if any.
      • inline – Used for INSERT statements, for a dialect which does not support inline retrieval of newly generated primary key columns, will force the expression used to create the new primary key value to be rendered inline within the INSERT statement’s VALUES clause. This typically refers to Sequence execution but may also refer to any server-side default generation function associated with a primary key Column.
      execute(*multiparams, **params)
      inherited from the execute() method of Executable

      Compile and execute this Executable.

      execution_options(**kw)
      inherited from the execution_options() method of Executable

      Set non-SQL options for the statement which take effect during execution.

      Execution options can be set on a per-statement or per Connection basis. Additionally, the Engine and ORM Query objects provide access to execution options which they in turn configure upon connections.

      The execution_options() method is generative. A new instance of this statement is returned that contains the options:

      statement = select([table.c.x, table.c.y])
      statement = statement.execution_options(autocommit=True)

      Note that only a subset of possible execution options can be applied to a statement - these include “autocommit” and “stream_results”, but not “isolation_level” or “compiled_cache”. See Connection.execution_options() for a full list of possible options.

      from_select(names, select)

      Return a new Insert construct which represents an INSERT...FROM SELECT statement.

      e.g.:

      sel = select([table1.c.a, table1.c.b]).where(table1.c.c > 5)
      ins = table2.insert().from_select(['a', 'b'], sel)
      Parameters:
      • names – a sequence of string column names or Column objects representing the target columns.
      • select – a select() construct, FromClause or other construct which resolves into a FromClause, such as an ORM Query object, etc. The order of columns returned from this FROM clause should correspond to the order of columns sent as the names parameter; while this is not checked before passing along to the database, the database would normally raise an exception if these column lists don’t correspond.

      Note

      Depending on backend, it may be necessary for the Insert statement to be constructed using the inline=True flag; this flag will prevent the implicit usage of RETURNING when the INSERT statement is rendered, which isn’t supported on a backend such as Oracle in conjunction with an INSERT..SELECT combination:

      sel = select([table1.c.a, table1.c.b]).where(table1.c.c > 5)
      ins = table2.insert(inline=True).from_select(['a', 'b'], sel)

      New in version 0.8.3.

      params(*arg, **kw)
      inherited from the params() method of UpdateBase

      Set the parameters for the statement.

      This method raises NotImplementedError on the base class, and is overridden by ValuesBase to provide the SET/VALUES clause of UPDATE and INSERT.

      prefix_with(*expr, **kw)
      inherited from the prefix_with() method of HasPrefixes

      Add one or more expressions following the statement keyword, i.e. SELECT, INSERT, UPDATE, or DELETE. Generative.

      This is used to support backend-specific prefix keywords such as those provided by MySQL.

      E.g.:

      stmt = table.insert().prefix_with("LOW_PRIORITY", dialect="mysql")

      Multiple prefixes can be specified by multiple calls to prefix_with().

      Parameters:
      • *expr – textual or ClauseElement construct which will be rendered following the INSERT, UPDATE, or DELETE keyword.
      • **kw – A single keyword ‘dialect’ is accepted. This is an optional string dialect name which will limit rendering of this prefix to only that dialect.
      returning(*cols)
      inherited from the returning() method of UpdateBase

      Add a RETURNING or equivalent clause to this statement.

      The given list of columns represent columns within the table that is the target of the INSERT, UPDATE, or DELETE. Each element can be any column expression. Table objects will be expanded into their individual columns.

      Upon compilation, a RETURNING clause, or database equivalent, will be rendered within the statement. For INSERT and UPDATE, the values are the newly inserted/updated values. For DELETE, the values are those of the rows which were deleted.

      Upon execution, the values of the columns to be returned are made available via the result set and can be iterated using fetchone() and similar. For DBAPIs which do not natively support returning values (i.e. cx_oracle), SQLAlchemy will approximate this behavior at the result level so that a reasonable amount of behavioral neutrality is provided.

      Note that not all databases/DBAPIs support RETURNING. For those backends with no support, an exception is raised upon compilation and/or execution. For those who do support it, the functionality across backends varies greatly, including restrictions on executemany() and other statements which return multiple rows. Please read the documentation notes for the database in use in order to determine the availability of RETURNING.

      scalar(*multiparams, **params)
      inherited from the scalar() method of Executable

      Compile and execute this Executable, returning the result’s scalar representation.

      self_group(against=None)
      inherited from the self_group() method of ClauseElement

      Apply a ‘grouping’ to this ClauseElement.

      This method is overridden by subclasses to return a “grouping” construct, i.e. parenthesis. In particular it’s used by “binary” expressions to provide a grouping around themselves when placed into a larger expression, as well as by select() constructs when placed into the FROM clause of another select(). (Note that subqueries should be normally created using the Select.alias() method, as many platforms require nested SELECT statements to be named).

      As expressions are composed together, the application of self_group() is automatic - end-user code should never need to use this method directly. Note that SQLAlchemy’s clause constructs take operator precedence into account - so parenthesis might not be needed, for example, in an expression like x OR (y AND z) - AND takes precedence over OR.

      The base self_group() method of ClauseElement just returns self.

      unique_params(*optionaldict, **kwargs)
      inherited from the unique_params() method of ClauseElement

      Return a copy with bindparam() elements replaced.

      Same functionality as params(), except adds unique=True to affected bind parameters so that multiple statements can be used.

      values(*args, **kwargs)
      inherited from the values() method of ValuesBase

      specify a fixed VALUES clause for an INSERT statement, or the SET clause for an UPDATE.

      Note that the Insert and Update constructs support per-execution time formatting of the VALUES and/or SET clauses, based on the arguments passed to Connection.execute(). However, the ValuesBase.values() method can be used to “fix” a particular set of parameters into the statement.

      Multiple calls to ValuesBase.values() will produce a new construct, each one with the parameter list modified to include the new parameters sent. In the typical case of a single dictionary of parameters, the newly passed keys will replace the same keys in the previous construct. In the case of a list-based “multiple values” construct, each new list of values is extended onto the existing list of values.

      Parameters:
      • **kwargs

        key value pairs representing the string key of a Column mapped to the value to be rendered into the VALUES or SET clause:

        users.insert().values(name="some name")
        
        users.update().where(users.c.id==5).values(name="some name")
      • *args

        Alternatively, a dictionary, tuple or list of dictionaries or tuples can be passed as a single positional argument in order to form the VALUES or SET clause of the statement. The single dictionary form works the same as the kwargs form:

        users.insert().values({"name": "some name"})

        If a tuple is passed, the tuple should contain the same number of columns as the target Table:

        users.insert().values((5, "some name"))

        The Insert construct also supports multiply-rendered VALUES construct, for those backends which support this SQL syntax (SQLite, Postgresql, MySQL). This mode is indicated by passing a list of one or more dictionaries/tuples:

        users.insert().values([
                            {"name": "some name"},
                            {"name": "some other name"},
                            {"name": "yet another name"},
                        ])

        In the case of an Update construct, only the single dictionary/tuple form is accepted, else an exception is raised. It is also an exception case to attempt to mix the single-/multiple- value styles together, either through multiple ValuesBase.values() calls or by sending a list + kwargs at the same time.

        Note

        Passing a multiple values list is not the same as passing a multiple values list to the Connection.execute() method. Passing a list of parameter sets to ValuesBase.values() produces a construct of this form:

        INSERT INTO table (col1, col2, col3) VALUES
                        (col1_0, col2_0, col3_0),
                        (col1_1, col2_1, col3_1),
                        ...

        whereas a multiple list passed to Connection.execute() has the effect of using the DBAPI executemany() method, which provides a high-performance system of invoking a single-row INSERT statement many times against a series of parameter sets. The “executemany” style is supported by all database backends, as it does not depend on a special SQL syntax.

        New in version 0.8: Support for multiple-VALUES INSERT statements.

      See also

      Inserts, Updates and Deletes - SQL Expression Language Tutorial

      insert() - produce an INSERT statement

      update() - produce an UPDATE statement

      with_hint(text, selectable=None, dialect_name='*')
      inherited from the with_hint() method of UpdateBase

      Add a table hint for a single table to this INSERT/UPDATE/DELETE statement.

      Note

      UpdateBase.with_hint() currently applies only to Microsoft SQL Server. For MySQL INSERT/UPDATE/DELETE hints, use UpdateBase.prefix_with().

      The text of the hint is rendered in the appropriate location for the database backend in use, relative to the Table that is the subject of this statement, or optionally to that of the given Table passed as the selectable argument.

      The dialect_name option will limit the rendering of a particular hint to a particular backend. Such as, to add a hint that only takes effect for SQL Server:

      mytable.insert().with_hint("WITH (PAGLOCK)", dialect_name="mssql")

      New in version 0.7.6.

      Parameters:
      • text – Text of the hint.
      • selectable – optional Table that specifies an element of the FROM clause within an UPDATE or DELETE to be the subject of the hint - applies only to certain backends.
      • dialect_name – defaults to *, if specified as the name of a particular dialect, will apply these hints only when that dialect is in use.
      class sqlalchemy.sql.expression.Update(table, whereclause, values=None, inline=False, bind=None, prefixes=None, returning=None, **kwargs)

      Bases: sqlalchemy.sql.expression.ValuesBase

      Represent an Update construct.

      The Update object is created using the update() function.

      bind
      inherited from the bind attribute of UpdateBase

      Return a ‘bind’ linked to this UpdateBase or a Table associated with it.

      compare(other, **kw)
      inherited from the compare() method of ClauseElement

      Compare this ClauseElement to the given ClauseElement.

      Subclasses should override the default behavior, which is a straight identity comparison.

      **kw are arguments consumed by subclass compare() methods and may be used to modify the criteria for comparison. (see ColumnElement)

      compile(bind=None, dialect=None, **kw)
      inherited from the compile() method of ClauseElement

      Compile this SQL expression.

      The return value is a Compiled object. Calling str() or unicode() on the returned value will yield a string representation of the result. The Compiled object also can return a dictionary of bind parameter names and values using the params accessor.

      Parameters:
      • bind – An Engine or Connection from which a Compiled will be acquired. This argument takes precedence over this ClauseElement‘s bound engine, if any.
      • column_keys – Used for INSERT and UPDATE statements, a list of column names which should be present in the VALUES clause of the compiled statement. If None, all columns from the target table object are rendered.
      • dialect – A Dialect instance from which a Compiled will be acquired. This argument takes precedence over the bind argument as well as this ClauseElement‘s bound engine, if any.
      • inline – Used for INSERT statements, for a dialect which does not support inline retrieval of newly generated primary key columns, will force the expression used to create the new primary key value to be rendered inline within the INSERT statement’s VALUES clause. This typically refers to Sequence execution but may also refer to any server-side default generation function associated with a primary key Column.
      execute(*multiparams, **params)
      inherited from the execute() method of Executable

      Compile and execute this Executable.

      execution_options(**kw)
      inherited from the execution_options() method of Executable

      Set non-SQL options for the statement which take effect during execution.

      Execution options can be set on a per-statement or per Connection basis. Additionally, the Engine and ORM Query objects provide access to execution options which they in turn configure upon connections.

      The execution_options() method is generative. A new instance of this statement is returned that contains the options:

      statement = select([table.c.x, table.c.y])
      statement = statement.execution_options(autocommit=True)

      Note that only a subset of possible execution options can be applied to a statement - these include “autocommit” and “stream_results”, but not “isolation_level” or “compiled_cache”. See Connection.execution_options() for a full list of possible options.

      params(*arg, **kw)
      inherited from the params() method of UpdateBase

      Set the parameters for the statement.

      This method raises NotImplementedError on the base class, and is overridden by ValuesBase to provide the SET/VALUES clause of UPDATE and INSERT.

      prefix_with(*expr, **kw)
      inherited from the prefix_with() method of HasPrefixes

      Add one or more expressions following the statement keyword, i.e. SELECT, INSERT, UPDATE, or DELETE. Generative.

      This is used to support backend-specific prefix keywords such as those provided by MySQL.

      E.g.:

      stmt = table.insert().prefix_with("LOW_PRIORITY", dialect="mysql")

      Multiple prefixes can be specified by multiple calls to prefix_with().

      Parameters:
      • *expr – textual or ClauseElement construct which will be rendered following the INSERT, UPDATE, or DELETE keyword.
      • **kw – A single keyword ‘dialect’ is accepted. This is an optional string dialect name which will limit rendering of this prefix to only that dialect.
      returning(*cols)
      inherited from the returning() method of UpdateBase

      Add a RETURNING or equivalent clause to this statement.

      The given list of columns represent columns within the table that is the target of the INSERT, UPDATE, or DELETE. Each element can be any column expression. Table objects will be expanded into their individual columns.

      Upon compilation, a RETURNING clause, or database equivalent, will be rendered within the statement. For INSERT and UPDATE, the values are the newly inserted/updated values. For DELETE, the values are those of the rows which were deleted.

      Upon execution, the values of the columns to be returned are made available via the result set and can be iterated using fetchone() and similar. For DBAPIs which do not natively support returning values (i.e. cx_oracle), SQLAlchemy will approximate this behavior at the result level so that a reasonable amount of behavioral neutrality is provided.

      Note that not all databases/DBAPIs support RETURNING. For those backends with no support, an exception is raised upon compilation and/or execution. For those who do support it, the functionality across backends varies greatly, including restrictions on executemany() and other statements which return multiple rows. Please read the documentation notes for the database in use in order to determine the availability of RETURNING.

      scalar(*multiparams, **params)
      inherited from the scalar() method of Executable

      Compile and execute this Executable, returning the result’s scalar representation.

      self_group(against=None)
      inherited from the self_group() method of ClauseElement

      Apply a ‘grouping’ to this ClauseElement.

      This method is overridden by subclasses to return a “grouping” construct, i.e. parenthesis. In particular it’s used by “binary” expressions to provide a grouping around themselves when placed into a larger expression, as well as by select() constructs when placed into the FROM clause of another select(). (Note that subqueries should be normally created using the Select.alias() method, as many platforms require nested SELECT statements to be named).

      As expressions are composed together, the application of self_group() is automatic - end-user code should never need to use this method directly. Note that SQLAlchemy’s clause constructs take operator precedence into account - so parenthesis might not be needed, for example, in an expression like x OR (y AND z) - AND takes precedence over OR.

      The base self_group() method of ClauseElement just returns self.

      unique_params(*optionaldict, **kwargs)
      inherited from the unique_params() method of ClauseElement

      Return a copy with bindparam() elements replaced.

      Same functionality as params(), except adds unique=True to affected bind parameters so that multiple statements can be used.

      values(*args, **kwargs)
      inherited from the values() method of ValuesBase

      specify a fixed VALUES clause for an INSERT statement, or the SET clause for an UPDATE.

      Note that the Insert and Update constructs support per-execution time formatting of the VALUES and/or SET clauses, based on the arguments passed to Connection.execute(). However, the ValuesBase.values() method can be used to “fix” a particular set of parameters into the statement.

      Multiple calls to ValuesBase.values() will produce a new construct, each one with the parameter list modified to include the new parameters sent. In the typical case of a single dictionary of parameters, the newly passed keys will replace the same keys in the previous construct. In the case of a list-based “multiple values” construct, each new list of values is extended onto the existing list of values.

      Parameters:
      • **kwargs

        key value pairs representing the string key of a Column mapped to the value to be rendered into the VALUES or SET clause:

        users.insert().values(name="some name")
        
        users.update().where(users.c.id==5).values(name="some name")
      • *args

        Alternatively, a dictionary, tuple or list of dictionaries or tuples can be passed as a single positional argument in order to form the VALUES or SET clause of the statement. The single dictionary form works the same as the kwargs form:

        users.insert().values({"name": "some name"})

        If a tuple is passed, the tuple should contain the same number of columns as the target Table:

        users.insert().values((5, "some name"))

        The Insert construct also supports multiply-rendered VALUES construct, for those backends which support this SQL syntax (SQLite, Postgresql, MySQL). This mode is indicated by passing a list of one or more dictionaries/tuples:

        users.insert().values([
                            {"name": "some name"},
                            {"name": "some other name"},
                            {"name": "yet another name"},
                        ])

        In the case of an Update construct, only the single dictionary/tuple form is accepted, else an exception is raised. It is also an exception case to attempt to mix the single-/multiple- value styles together, either through multiple ValuesBase.values() calls or by sending a list + kwargs at the same time.

        Note

        Passing a multiple values list is not the same as passing a multiple values list to the Connection.execute() method. Passing a list of parameter sets to ValuesBase.values() produces a construct of this form:

        INSERT INTO table (col1, col2, col3) VALUES
                        (col1_0, col2_0, col3_0),
                        (col1_1, col2_1, col3_1),
                        ...

        whereas a multiple list passed to Connection.execute() has the effect of using the DBAPI executemany() method, which provides a high-performance system of invoking a single-row INSERT statement many times against a series of parameter sets. The “executemany” style is supported by all database backends, as it does not depend on a special SQL syntax.

        New in version 0.8: Support for multiple-VALUES INSERT statements.

      See also

      Inserts, Updates and Deletes - SQL Expression Language Tutorial

      insert() - produce an INSERT statement

      update() - produce an UPDATE statement

      where(whereclause)

      return a new update() construct with the given expression added to its WHERE clause, joined to the existing clause via AND, if any.

      with_hint(text, selectable=None, dialect_name='*')
      inherited from the with_hint() method of UpdateBase

      Add a table hint for a single table to this INSERT/UPDATE/DELETE statement.

      Note

      UpdateBase.with_hint() currently applies only to Microsoft SQL Server. For MySQL INSERT/UPDATE/DELETE hints, use UpdateBase.prefix_with().

      The text of the hint is rendered in the appropriate location for the database backend in use, relative to the Table that is the subject of this statement, or optionally to that of the given Table passed as the selectable argument.

      The dialect_name option will limit the rendering of a particular hint to a particular backend. Such as, to add a hint that only takes effect for SQL Server:

      mytable.insert().with_hint("WITH (PAGLOCK)", dialect_name="mssql")

      New in version 0.7.6.

      Parameters:
      • text – Text of the hint.
      • selectable – optional Table that specifies an element of the FROM clause within an UPDATE or DELETE to be the subject of the hint - applies only to certain backends.
      • dialect_name – defaults to *, if specified as the name of a particular dialect, will apply these hints only when that dialect is in use.
      class sqlalchemy.sql.expression.UpdateBase

      Bases: sqlalchemy.sql.expression.HasPrefixes, sqlalchemy.sql.expression.Executable, sqlalchemy.sql.expression.ClauseElement

      Form the base for INSERT, UPDATE, and DELETE statements.

      bind

      Return a ‘bind’ linked to this UpdateBase or a Table associated with it.

      params(*arg, **kw)

      Set the parameters for the statement.

      This method raises NotImplementedError on the base class, and is overridden by ValuesBase to provide the SET/VALUES clause of UPDATE and INSERT.

      returning(*cols)

      Add a RETURNING or equivalent clause to this statement.

      The given list of columns represent columns within the table that is the target of the INSERT, UPDATE, or DELETE. Each element can be any column expression. Table objects will be expanded into their individual columns.

      Upon compilation, a RETURNING clause, or database equivalent, will be rendered within the statement. For INSERT and UPDATE, the values are the newly inserted/updated values. For DELETE, the values are those of the rows which were deleted.

      Upon execution, the values of the columns to be returned are made available via the result set and can be iterated using fetchone() and similar. For DBAPIs which do not natively support returning values (i.e. cx_oracle), SQLAlchemy will approximate this behavior at the result level so that a reasonable amount of behavioral neutrality is provided.

      Note that not all databases/DBAPIs support RETURNING. For those backends with no support, an exception is raised upon compilation and/or execution. For those who do support it, the functionality across backends varies greatly, including restrictions on executemany() and other statements which return multiple rows. Please read the documentation notes for the database in use in order to determine the availability of RETURNING.

      with_hint(text, selectable=None, dialect_name='*')

      Add a table hint for a single table to this INSERT/UPDATE/DELETE statement.

      Note

      UpdateBase.with_hint() currently applies only to Microsoft SQL Server. For MySQL INSERT/UPDATE/DELETE hints, use UpdateBase.prefix_with().

      The text of the hint is rendered in the appropriate location for the database backend in use, relative to the Table that is the subject of this statement, or optionally to that of the given Table passed as the selectable argument.

      The dialect_name option will limit the rendering of a particular hint to a particular backend. Such as, to add a hint that only takes effect for SQL Server:

      mytable.insert().with_hint("WITH (PAGLOCK)", dialect_name="mssql")

      New in version 0.7.6.

      Parameters:
      • text – Text of the hint.
      • selectable – optional Table that specifies an element of the FROM clause within an UPDATE or DELETE to be the subject of the hint - applies only to certain backends.
      • dialect_name – defaults to *, if specified as the name of a particular dialect, will apply these hints only when that dialect is in use.
      class sqlalchemy.sql.expression.ValuesBase(table, values, prefixes)

      Bases: sqlalchemy.sql.expression.UpdateBase

      Supplies support for ValuesBase.values() to INSERT and UPDATE constructs.

      values(*args, **kwargs)

      specify a fixed VALUES clause for an INSERT statement, or the SET clause for an UPDATE.

      Note that the Insert and Update constructs support per-execution time formatting of the VALUES and/or SET clauses, based on the arguments passed to Connection.execute(). However, the ValuesBase.values() method can be used to “fix” a particular set of parameters into the statement.

      Multiple calls to ValuesBase.values() will produce a new construct, each one with the parameter list modified to include the new parameters sent. In the typical case of a single dictionary of parameters, the newly passed keys will replace the same keys in the previous construct. In the case of a list-based “multiple values” construct, each new list of values is extended onto the existing list of values.

      Parameters:
      • **kwargs

        key value pairs representing the string key of a Column mapped to the value to be rendered into the VALUES or SET clause:

        users.insert().values(name="some name")
        
        users.update().where(users.c.id==5).values(name="some name")
      • *args

        Alternatively, a dictionary, tuple or list of dictionaries or tuples can be passed as a single positional argument in order to form the VALUES or SET clause of the statement. The single dictionary form works the same as the kwargs form:

        users.insert().values({"name": "some name"})

        If a tuple is passed, the tuple should contain the same number of columns as the target Table:

        users.insert().values((5, "some name"))

        The Insert construct also supports multiply-rendered VALUES construct, for those backends which support this SQL syntax (SQLite, Postgresql, MySQL). This mode is indicated by passing a list of one or more dictionaries/tuples:

        users.insert().values([
                            {"name": "some name"},
                            {"name": "some other name"},
                            {"name": "yet another name"},
                        ])

        In the case of an Update construct, only the single dictionary/tuple form is accepted, else an exception is raised. It is also an exception case to attempt to mix the single-/multiple- value styles together, either through multiple ValuesBase.values() calls or by sending a list + kwargs at the same time.

        Note

        Passing a multiple values list is not the same as passing a multiple values list to the Connection.execute() method. Passing a list of parameter sets to ValuesBase.values() produces a construct of this form:

        INSERT INTO table (col1, col2, col3) VALUES
                        (col1_0, col2_0, col3_0),
                        (col1_1, col2_1, col3_1),
                        ...

        whereas a multiple list passed to Connection.execute() has the effect of using the DBAPI executemany() method, which provides a high-performance system of invoking a single-row INSERT statement many times against a series of parameter sets. The “executemany” style is supported by all database backends, as it does not depend on a special SQL syntax.

        New in version 0.8: Support for multiple-VALUES INSERT statements.

      See also

      Inserts, Updates and Deletes - SQL Expression Language Tutorial

      insert() - produce an INSERT statement

      update() - produce an UPDATE statement

      SQLAlchemy-0.8.4/doc/core/engines.html0000644000076500000240000017127012251147466020264 0ustar classicstaff00000000000000 Engine Configuration — SQLAlchemy 0.8 Documentation

      SQLAlchemy 0.8 Documentation

      Release: 0.8.4 | Release Date: December 8, 2013

      Engine Configuration

      The Engine is the starting point for any SQLAlchemy application. It’s “home base” for the actual database and its DBAPI, delivered to the SQLAlchemy application through a connection pool and a Dialect, which describes how to talk to a specific kind of database/DBAPI combination.

      The general structure can be illustrated as follows:

      ../_images/sqla_engine_arch.png

      Where above, an Engine references both a Dialect and a Pool, which together interpret the DBAPI’s module functions as well as the behavior of the database.

      Creating an engine is just a matter of issuing a single call, create_engine():

      from sqlalchemy import create_engine
      engine = create_engine('postgresql://scott:tiger@localhost:5432/mydatabase')

      The above engine creates a Dialect object tailored towards PostgreSQL, as well as a Pool object which will establish a DBAPI connection at localhost:5432 when a connection request is first received. Note that the Engine and its underlying Pool do not establish the first actual DBAPI connection until the Engine.connect() method is called, or an operation which is dependent on this method such as Engine.execute() is invoked. In this way, Engine and Pool can be said to have a lazy initialization behavior.

      The Engine, once created, can either be used directly to interact with the database, or can be passed to a Session object to work with the ORM. This section covers the details of configuring an Engine. The next section, Working with Engines and Connections, will detail the usage API of the Engine and similar, typically for non-ORM applications.

      Supported Databases

      SQLAlchemy includes many Dialect implementations for various backends. Dialects for the most common databases are included with SQLAlchemy; a handful of others require an additional install of a separate dialect.

      See the section Dialects for information on the various backends available.

      Engine Creation API

      Keyword options can also be specified to create_engine(), following the string URL as follows:

      db = create_engine('postgresql://...', encoding='latin1', echo=True)
      sqlalchemy.create_engine(*args, **kwargs)

      Create a new Engine instance.

      The standard calling form is to send the URL as the first positional argument, usually a string that indicates database dialect and connection arguments. Additional keyword arguments may then follow it which establish various options on the resulting Engine and its underlying Dialect and Pool constructs.

      The string form of the URL is dialect+driver://user:password@host/dbname[?key=value..], where dialect is a database name such as mysql, oracle, postgresql, etc., and driver the name of a DBAPI, such as psycopg2, pyodbc, cx_oracle, etc. Alternatively, the URL can be an instance of URL.

      **kwargs takes a wide variety of options which are routed towards their appropriate components. Arguments may be specific to the Engine, the underlying Dialect, as well as the Pool. Specific dialects also accept keyword arguments that are unique to that dialect. Here, we describe the parameters that are common to most create_engine() usage.

      Once established, the newly resulting Engine will request a connection from the underlying Pool once Engine.connect() is called, or a method which depends on it such as Engine.execute() is invoked. The Pool in turn will establish the first actual DBAPI connection when this request is received. The create_engine() call itself does not establish any actual DBAPI connections directly.

      See also:

      Engine Configuration

      Working with Engines and Connections

      Parameters:
      • case_sensitive=True

        if False, result column names will match in a case-insensitive fashion, that is, row['SomeColumn'].

        Changed in version 0.8: By default, result row names match case-sensitively. In version 0.7 and prior, all matches were case-insensitive.

      • connect_args – a dictionary of options which will be passed directly to the DBAPI’s connect() method as additional keyword arguments. See the example at Custom DBAPI connect() arguments.
      • convert_unicode=False

        if set to True, sets the default behavior of convert_unicode on the String type to True, regardless of a setting of False on an individual String type, thus causing all String -based columns to accommodate Python unicode objects. This flag is useful as an engine-wide setting when using a DBAPI that does not natively support Python unicode objects and raises an error when one is received (such as pyodbc with FreeTDS).

        See String for further details on what this flag indicates.

      • creator – a callable which returns a DBAPI connection. This creation function will be passed to the underlying connection pool and will be used to create all new database connections. Usage of this function causes connection parameters specified in the URL argument to be bypassed.
      • echo=False – if True, the Engine will log all statements as well as a repr() of their parameter lists to the engines logger, which defaults to sys.stdout. The echo attribute of Engine can be modified at any time to turn logging on and off. If set to the string "debug", result rows will be printed to the standard output as well. This flag ultimately controls a Python logger; see Configuring Logging for information on how to configure logging directly.
      • echo_pool=False – if True, the connection pool will log all checkouts/checkins to the logging stream, which defaults to sys.stdout. This flag ultimately controls a Python logger; see Configuring Logging for information on how to configure logging directly.
      • encoding

        Defaults to utf-8. This is the string encoding used by SQLAlchemy for string encode/decode operations which occur within SQLAlchemy, outside of the DBAPI. Most modern DBAPIs feature some degree of direct support for Python unicode objects, what you see in Python 2 as a string of the form u'some string'. For those scenarios where the DBAPI is detected as not supporting a Python unicode object, this encoding is used to determine the source/destination encoding. It is not used for those cases where the DBAPI handles unicode directly.

        To properly configure a system to accommodate Python unicode objects, the DBAPI should be configured to handle unicode to the greatest degree as is appropriate - see the notes on unicode pertaining to the specific target database in use at Dialects.

        Areas where string encoding may need to be accommodated outside of the DBAPI include zero or more of:

        • the values passed to bound parameters, corresponding to the Unicode type or the String type when convert_unicode is True;
        • the values returned in result set columns corresponding to the Unicode type or the String type when convert_unicode is True;
        • the string SQL statement passed to the DBAPI’s cursor.execute() method;
        • the string names of the keys in the bound parameter dictionary passed to the DBAPI’s cursor.execute() as well as cursor.setinputsizes() methods;
        • the string column names retrieved from the DBAPI’s cursor.description attribute.

        When using Python 3, the DBAPI is required to support all of the above values as Python unicode objects, which in Python 3 are just known as str. In Python 2, the DBAPI does not specify unicode behavior at all, so SQLAlchemy must make decisions for each of the above values on a per-DBAPI basis - implementations are completely inconsistent in their behavior.

      • execution_options – Dictionary execution options which will be applied to all connections. See execution_options()
      • implicit_returning=True – When True, a RETURNING- compatible construct, if available, will be used to fetch newly generated primary key values when a single row INSERT statement is emitted with no existing returning() clause. This applies to those backends which support RETURNING or a compatible construct, including Postgresql, Firebird, Oracle, Microsoft SQL Server. Set this to False to disable the automatic usage of RETURNING.
      • label_length=None – optional integer value which limits the size of dynamically generated column labels to that many characters. If less than 6, labels are generated as “_(counter)”. If None, the value of dialect.max_identifier_length is used instead.
      • listeners – A list of one or more PoolListener objects which will receive connection pool events.
      • logging_name – String identifier which will be used within the “name” field of logging records generated within the “sqlalchemy.engine” logger. Defaults to a hexstring of the object’s id.
      • max_overflow=10 – the number of connections to allow in connection pool “overflow”, that is connections that can be opened above and beyond the pool_size setting, which defaults to five. this is only used with QueuePool.
      • module=None – reference to a Python module object (the module itself, not its string name). Specifies an alternate DBAPI module to be used by the engine’s dialect. Each sub-dialect references a specific DBAPI which will be imported before first connect. This parameter causes the import to be bypassed, and the given module to be used instead. Can be used for testing of DBAPIs as well as to inject “mock” DBAPI implementations into the Engine.
      • pool=None – an already-constructed instance of Pool, such as a QueuePool instance. If non-None, this pool will be used directly as the underlying connection pool for the engine, bypassing whatever connection parameters are present in the URL argument. For information on constructing connection pools manually, see Connection Pooling.
      • poolclass=None – a Pool subclass, which will be used to create a connection pool instance using the connection parameters given in the URL. Note this differs from pool in that you don’t actually instantiate the pool in this case, you just indicate what type of pool to be used.
      • pool_logging_name – String identifier which will be used within the “name” field of logging records generated within the “sqlalchemy.pool” logger. Defaults to a hexstring of the object’s id.
      • pool_size=5 – the number of connections to keep open inside the connection pool. This used with QueuePool as well as SingletonThreadPool. With QueuePool, a pool_size setting of 0 indicates no limit; to disable pooling, set poolclass to NullPool instead.
      • pool_recycle=-1 – this setting causes the pool to recycle connections after the given number of seconds has passed. It defaults to -1, or no timeout. For example, setting to 3600 means connections will be recycled after one hour. Note that MySQL in particular will disconnect automatically if no activity is detected on a connection for eight hours (although this is configurable with the MySQLDB connection itself and the server configuration as well).
      • pool_reset_on_return=’rollback’

        set the “reset on return” behavior of the pool, which is whether rollback(), commit(), or nothing is called upon connections being returned to the pool. See the docstring for reset_on_return at Pool.

        New in version 0.7.6.

      • pool_timeout=30 – number of seconds to wait before giving up on getting a connection from the pool. This is only used with QueuePool.
      • strategy=’plain’

        selects alternate engine implementations. Currently available are:

      • executor=None – a function taking arguments (sql, *multiparams, **params), to which the mock strategy will dispatch all statement execution. Used only by strategy='mock'.
      sqlalchemy.engine_from_config(configuration, prefix='sqlalchemy.', **kwargs)

      Create a new Engine instance using a configuration dictionary.

      The dictionary is typically produced from a config file where keys are prefixed, such as sqlalchemy.url, sqlalchemy.echo, etc. The ‘prefix’ argument indicates the prefix to be searched for.

      A select set of keyword arguments will be “coerced” to their expected type based on string values. In a future release, this functionality will be expanded and include dialect-specific arguments.

      Database Urls

      SQLAlchemy indicates the source of an Engine strictly via RFC-1738 style URLs, combined with optional keyword arguments to specify options for the Engine. The form of the URL is:

      dialect+driver://username:password@host:port/database

      Dialect names include the identifying name of the SQLAlchemy dialect which include sqlite, mysql, postgresql, oracle, mssql, and firebird. The drivername is the name of the DBAPI to be used to connect to the database using all lowercase letters. If not specified, a “default” DBAPI will be imported if available - this default is typically the most widely known driver available for that backend (i.e. cx_oracle, pysqlite/sqlite3, psycopg2, mysqldb). For Jython connections, specify the zxjdbc driver, which is the JDBC-DBAPI bridge included with Jython.

      sqlalchemy.engine.url.make_url(name_or_url)

      Given a string or unicode instance, produce a new URL instance.

      The given string is parsed according to the RFC 1738 spec. If an existing URL object is passed, just returns the object.

      Postgresql

      The Postgresql dialect uses psycopg2 as the default DBAPI:

      # default
      engine = create_engine('postgresql://scott:tiger@localhost/mydatabase')
      
      # psycopg2
      engine = create_engine('postgresql+psycopg2://scott:tiger@localhost/mydatabase')
      
      # pg8000
      engine = create_engine('postgresql+pg8000://scott:tiger@localhost/mydatabase')
      
      # Jython
      engine = create_engine('postgresql+zxjdbc://scott:tiger@localhost/mydatabase')

      More notes on connecting to Postgresql at PostgreSQL.

      MySQL

      The MySQL dialect uses mysql-python as the default DBAPI:

      # default
      engine = create_engine('mysql://scott:tiger@localhost/foo')
      
      # mysql-python
      engine = create_engine('mysql+mysqldb://scott:tiger@localhost/foo')
      
      # OurSQL
      engine = create_engine('mysql+oursql://scott:tiger@localhost/foo')

      More notes on connecting to MySQL at MySQL.

      Oracle

      cx_oracle is usually used here:

      engine = create_engine('oracle://scott:tiger@127.0.0.1:1521/sidname')
      
      engine = create_engine('oracle+cx_oracle://scott:tiger@tnsname')

      More notes on connecting to Oracle at Oracle.

      Microsoft SQL Server

      There are a few drivers for SQL Server, currently PyODBC is the most solid:

      engine = create_engine('mssql+pyodbc://mydsn')

      More notes on connecting to SQL Server at Microsoft SQL Server.

      SQLite

      SQLite connects to file based databases. The same URL format is used, omitting the hostname, and using the “file” portion as the filename of the database. This has the effect of four slashes being present for an absolute file path:

      # sqlite://<nohostname>/<path>
      # where <path> is relative:
      engine = create_engine('sqlite:///foo.db')
      
      # or absolute, starting with a slash:
      engine = create_engine('sqlite:////absolute/path/to/foo.db')

      To use a SQLite :memory: database, specify an empty URL:

      engine = create_engine('sqlite://')

      More notes on connecting to SQLite at SQLite.

      Others

      See Dialects, the top-level page for all dialect documentation.

      URL API

      class sqlalchemy.engine.url.URL(drivername, username=None, password=None, host=None, port=None, database=None, query=None)

      Represent the components of a URL used to connect to a database.

      This object is suitable to be passed directly to a create_engine() call. The fields of the URL are parsed from a string by the make_url() function. the string format of the URL is an RFC-1738-style string.

      All initialization parameters are available as public attributes.

      Parameters:
      • drivername – the name of the database backend. This name will correspond to a module in sqlalchemy/databases or a third party plug-in.
      • username – The user name.
      • password – database password.
      • host – The name of the host.
      • port – The port number.
      • database – The database name.
      • query – A dictionary of options to be passed to the dialect and/or the DBAPI upon connect.
      get_dialect()

      Return the SQLAlchemy database dialect class corresponding to this URL’s driver name.

      translate_connect_args(names=[], **kw)

      Translate url attributes into a dictionary of connection arguments.

      Returns attributes of this url (host, database, username, password, port) as a plain dictionary. The attribute names are used as the keys by default. Unset or false attributes are omitted from the final dictionary.

      Parameters:
      • **kw – Optional, alternate key names for url attributes.
      • names – Deprecated. Same purpose as the keyword-based alternate names, but correlates the name to the original positionally.

      Pooling

      The Engine will ask the connection pool for a connection when the connect() or execute() methods are called. The default connection pool, QueuePool, will open connections to the database on an as-needed basis. As concurrent statements are executed, QueuePool will grow its pool of connections to a default size of five, and will allow a default “overflow” of ten. Since the Engine is essentially “home base” for the connection pool, it follows that you should keep a single Engine per database established within an application, rather than creating a new one for each connection.

      Note

      QueuePool is not used by default for SQLite engines. See SQLite for details on SQLite connection pool usage.

      For more information on connection pooling, see Connection Pooling.

      Custom DBAPI connect() arguments

      Custom arguments used when issuing the connect() call to the underlying DBAPI may be issued in three distinct ways. String-based arguments can be passed directly from the URL string as query arguments:

      db = create_engine('postgresql://scott:tiger@localhost/test?argument1=foo&argument2=bar')

      If SQLAlchemy’s database connector is aware of a particular query argument, it may convert its type from string to its proper type.

      create_engine() also takes an argument connect_args which is an additional dictionary that will be passed to connect(). This can be used when arguments of a type other than string are required, and SQLAlchemy’s database connector has no type conversion logic present for that parameter:

      db = create_engine('postgresql://scott:tiger@localhost/test', connect_args = {'argument1':17, 'argument2':'bar'})

      The most customizable connection method of all is to pass a creator argument, which specifies a callable that returns a DBAPI connection:

      def connect():
          return psycopg.connect(user='scott', host='localhost')
      
      db = create_engine('postgresql://', creator=connect)

      Configuring Logging

      Python’s standard logging module is used to implement informational and debug log output with SQLAlchemy. This allows SQLAlchemy’s logging to integrate in a standard way with other applications and libraries. The echo and echo_pool flags that are present on create_engine(), as well as the echo_uow flag used on Session, all interact with regular loggers.

      This section assumes familiarity with the above linked logging module. All logging performed by SQLAlchemy exists underneath the sqlalchemy namespace, as used by logging.getLogger('sqlalchemy'). When logging has been configured (i.e. such as via logging.basicConfig()), the general namespace of SA loggers that can be turned on is as follows:

      • sqlalchemy.engine - controls SQL echoing. set to logging.INFO for SQL query output, logging.DEBUG for query + result set output.
      • sqlalchemy.dialects - controls custom logging for SQL dialects. See the documentation of individual dialects for details.
      • sqlalchemy.pool - controls connection pool logging. set to logging.INFO or lower to log connection pool checkouts/checkins.
      • sqlalchemy.orm - controls logging of various ORM functions. set to logging.INFO for information on mapper configurations.

      For example, to log SQL queries using Python logging instead of the echo=True flag:

      import logging
      
      logging.basicConfig()
      logging.getLogger('sqlalchemy.engine').setLevel(logging.INFO)

      By default, the log level is set to logging.WARN within the entire sqlalchemy namespace so that no log operations occur, even within an application that has logging enabled otherwise.

      The echo flags present as keyword arguments to create_engine() and others as well as the echo property on Engine, when set to True, will first attempt to ensure that logging is enabled. Unfortunately, the logging module provides no way of determining if output has already been configured (note we are referring to if a logging configuration has been set up, not just that the logging level is set). For this reason, any echo=True flags will result in a call to logging.basicConfig() using sys.stdout as the destination. It also sets up a default format using the level name, timestamp, and logger name. Note that this configuration has the affect of being configured in addition to any existing logger configurations. Therefore, when using Python logging, ensure all echo flags are set to False at all times, to avoid getting duplicate log lines.

      The logger name of instance such as an Engine or Pool defaults to using a truncated hex identifier string. To set this to a specific name, use the “logging_name” and “pool_logging_name” keyword arguments with sqlalchemy.create_engine().

      Note

      The SQLAlchemy Engine conserves Python function call overhead by only emitting log statements when the current logging level is detected as logging.INFO or logging.DEBUG. It only checks this level when a new connection is procured from the connection pool. Therefore when changing the logging configuration for an already-running application, any Connection that’s currently active, or more commonly a Session object that’s active in a transaction, won’t log any SQL according to the new configuration until a new Connection is procured (in the case of Session, this is after the current transaction ends and a new one begins).

      SQLAlchemy-0.8.4/doc/core/event.html0000644000076500000240000004561012251147466017753 0ustar classicstaff00000000000000 Events — SQLAlchemy 0.8 Documentation

      SQLAlchemy 0.8 Documentation

      Release: 0.8.4 | Release Date: December 8, 2013

      Events

      SQLAlchemy includes an event API which publishes a wide variety of hooks into the internals of both SQLAlchemy Core and ORM.

      New in version 0.7: The system supercedes the previous system of “extension”, “proxy”, and “listener” classes.

      Event Registration

      Subscribing to an event occurs through a single API point, the listen() function. This function accepts a user-defined listening function, a string identifier which identifies the event to be intercepted, and a target. Additional positional and keyword arguments may be supported by specific types of events, which may specify alternate interfaces for the given event function, or provide instructions regarding secondary event targets based on the given target.

      The name of an event and the argument signature of a corresponding listener function is derived from a class bound specification method, which exists bound to a marker class that’s described in the documentation. For example, the documentation for PoolEvents.connect() indicates that the event name is "connect" and that a user-defined listener function should receive two positional arguments:

      from sqlalchemy.event import listen
      from sqlalchemy.pool import Pool
      
      def my_on_connect(dbapi_con, connection_record):
          print "New DBAPI connection:", dbapi_con
      
      listen(Pool, 'connect', my_on_connect)

      Targets

      The listen() function is very flexible regarding targets. It generally accepts classes, instances of those classes, and related classes or objects from which the appropriate target can be derived. For example, the above mentioned "connect" event accepts Engine classes and objects as well as Pool classes and objects:

      from sqlalchemy.event import listen
      from sqlalchemy.pool import Pool, QueuePool
      from sqlalchemy import create_engine
      from sqlalchemy.engine import Engine
      import psycopg2
      
      def connect():
          return psycopg2.connect(username='ed', host='127.0.0.1', dbname='test')
      
      my_pool = QueuePool(connect)
      my_engine = create_engine('postgresql://ed@localhost/test')
      
      # associate listener with all instances of Pool
      listen(Pool, 'connect', my_on_connect)
      
      # associate listener with all instances of Pool
      # via the Engine class
      listen(Engine, 'connect', my_on_connect)
      
      # associate listener with my_pool
      listen(my_pool, 'connect', my_on_connect)
      
      # associate listener with my_engine.pool
      listen(my_engine, 'connect', my_on_connect)

      Modifiers

      Some listeners allow modifiers to be passed to listen(). These modifiers sometimes provide alternate calling signatures for listeners. Such as with ORM events, some event listeners can have a return value which modifies the subsequent handling. By default, no listener ever requires a return value, but by passing retval=True this value can be supported:

      def validate_phone(target, value, oldvalue, initiator):
          """Strip non-numeric characters from a phone number"""
      
          return re.sub(r'(?![0-9])', '', value)
      
      # setup listener on UserContact.phone attribute, instructing
      # it to use the return value
      listen(UserContact.phone, 'set', validate_phone, retval=True)

      Event Reference

      Both SQLAlchemy Core and SQLAlchemy ORM feature a wide variety of event hooks:

      • Core Events - these are described in Core Events and include event hooks specific to connection pool lifecycle, SQL statement execution, transaction lifecycle, and schema creation and teardown.
      • ORM Events - these are described in ORM Events, and include event hooks specific to class and attribute instrumentation, object initialization hooks, attribute on-change hooks, session state, flush, and commit hooks, mapper initialization, object/result population, and per-instance persistence hooks.

      API Reference

      sqlalchemy.event.listen(target, identifier, fn, *args, **kw)

      Register a listener function for the given target.

      e.g.:

      from sqlalchemy import event
      from sqlalchemy.schema import UniqueConstraint
      
      def unique_constraint_name(const, table):
          const.name = "uq_%s_%s" % (
              table.name,
              list(const.columns)[0].name
          )
      event.listen(
              UniqueConstraint,
              "after_parent_attach",
              unique_constraint_name)
      sqlalchemy.event.listens_for(target, identifier, *args, **kw)

      Decorate a function as a listener for the given target + identifier.

      e.g.:

      from sqlalchemy import event
      from sqlalchemy.schema import UniqueConstraint
      
      @event.listens_for(UniqueConstraint, "after_parent_attach")
      def unique_constraint_name(const, table):
          const.name = "uq_%s_%s" % (
              table.name,
              list(const.columns)[0].name
          )
      SQLAlchemy-0.8.4/doc/core/events.html0000644000076500000240000024411712251147467020142 0ustar classicstaff00000000000000 Core Events — SQLAlchemy 0.8 Documentation

      SQLAlchemy 0.8 Documentation

      Release: 0.8.4 | Release Date: December 8, 2013

      Core Events

      This section describes the event interfaces provided in SQLAlchemy Core. For an introduction to the event listening API, see Events. ORM events are described in ORM Events.

      New in version 0.7: The event system supercedes the previous system of “extension”, “listener”, and “proxy” classes.

      Connection Pool Events

      class sqlalchemy.events.PoolEvents

      Bases: sqlalchemy.event.Events

      Available events for Pool.

      The methods here define the name of an event as well as the names of members that are passed to listener functions.

      e.g.:

      from sqlalchemy import event
      
      def my_on_checkout(dbapi_conn, connection_rec, connection_proxy):
          "handle an on checkout event"
      
      event.listen(Pool, 'checkout', my_on_checkout)

      In addition to accepting the Pool class and Pool instances, PoolEvents also accepts Engine objects and the Engine class as targets, which will be resolved to the .pool attribute of the given engine or the Pool class:

      engine = create_engine("postgresql://scott:tiger@localhost/test")
      
      # will associate with engine.pool
      event.listen(engine, 'checkout', my_on_checkout)
      checkin(dbapi_connection, connection_record)

      Called when a connection returns to the pool.

      Note that the connection may be closed, and may be None if the connection has been invalidated. checkin will not be called for detached connections. (They do not return to the pool.)

      Parameters:
      • dbapi_con – A raw DB-API connection
      • con_record – The _ConnectionRecord that persistently manages the connection
      checkout(dbapi_connection, connection_record, connection_proxy)

      Called when a connection is retrieved from the Pool.

      Parameters:
      • dbapi_con – A raw DB-API connection
      • con_record – The _ConnectionRecord that persistently manages the connection
      • con_proxy – The _ConnectionFairy which manages the connection for the span of the current checkout.

      If you raise a DisconnectionError, the current connection will be disposed and a fresh connection retrieved. Processing of all checkout listeners will abort and restart using the new connection.

      connect(dbapi_connection, connection_record)

      Called once for each new DB-API connection or Pool’s creator().

      Parameters:
      • dbapi_con – A newly connected raw DB-API connection (not a SQLAlchemy Connection wrapper).
      • con_record – The _ConnectionRecord that persistently manages the connection
      first_connect(dbapi_connection, connection_record)

      Called exactly once for the first DB-API connection.

      Parameters:
      • dbapi_con – A newly connected raw DB-API connection (not a SQLAlchemy Connection wrapper).
      • con_record – The _ConnectionRecord that persistently manages the connection
      reset(dbapi_con, con_record)

      Called before the “reset” action occurs for a pooled connection.

      This event represents when the rollback() method is called on the DBAPI connection before it is returned to the pool. The behavior of “reset” can be controlled, including disabled, using the reset_on_return pool argument.

      The PoolEvents.reset() event is usually followed by the the PoolEvents.checkin() event is called, except in those cases where the connection is discarded immediately after reset.

      Parameters:
      • dbapi_con – A raw DB-API connection
      • con_record – The _ConnectionRecord that persistently manages the connection

      New in version 0.8.

      SQL Execution and Connection Events

      class sqlalchemy.events.ConnectionEvents

      Bases: sqlalchemy.event.Events

      Available events for Connectable, which includes Connection and Engine.

      The methods here define the name of an event as well as the names of members that are passed to listener functions.

      An event listener can be associated with any Connectable class or instance, such as an Engine, e.g.:

      from sqlalchemy import event, create_engine
      
      def before_cursor_execute(conn, cursor, statement, parameters, context,
                                                      executemany):
          log.info("Received statement: %s" % statement)
      
      engine = create_engine('postgresql://scott:tiger@localhost/test')
      event.listen(engine, "before_cursor_execute", before_cursor_execute)

      or with a specific Connection:

      with engine.begin() as conn:
          @event.listens_for(conn, 'before_cursor_execute')
          def before_cursor_execute(conn, cursor, statement, parameters,
                                          context, executemany):
              log.info("Received statement: %s" % statement)

      The before_execute() and before_cursor_execute() events can also be established with the retval=True flag, which allows modification of the statement and parameters to be sent to the database. The before_cursor_execute() event is particularly useful here to add ad-hoc string transformations, such as comments, to all executions:

      from sqlalchemy.engine import Engine
      from sqlalchemy import event
      
      @event.listens_for(Engine, "before_cursor_execute", retval=True)
      def comment_sql_calls(conn, cursor, statement, parameters,
                                          context, executemany):
          statement = statement + " -- some comment"
          return statement, parameters

      Note

      ConnectionEvents can be established on any combination of Engine, Connection, as well as instances of each of those classes. Events across all four scopes will fire off for a given instance of Connection. However, for performance reasons, the Connection object determines at instantiation time whether or not its parent Engine has event listeners established. Event listeners added to the Engine class or to an instance of Engine after the instantiation of a dependent Connection instance will usually not be available on that Connection instance. The newly added listeners will instead take effect for Connection instances created subsequent to those event listeners being established on the parent Engine class or instance.

      Parameters:retval=False – Applies to the before_execute() and before_cursor_execute() events only. When True, the user-defined event function must have a return value, which is a tuple of parameters that replace the given statement and parameters. See those methods for a description of specific return arguments.

      Changed in version 0.8: ConnectionEvents can now be associated with any Connectable including Connection, in addition to the existing support for Engine.

      after_cursor_execute(conn, cursor, statement, parameters, context, executemany)

      Intercept low-level cursor execute() events after execution.

      Parameters:
      • connConnection object
      • cursor – DBAPI cursor object. Will have results pending if the statement was a SELECT, but these should not be consumed as they will be needed by the ResultProxy.
      • statement – string SQL statement
      • parameters – Dictionary, tuple, or list of parameters being passed to the execute() or executemany() method of the DBAPI cursor. In some cases may be None.
      • contextExecutionContext object in use. May be None.
      • executemany – boolean, if True, this is an executemany() call, if False, this is an execute() call.
      after_execute(conn, clauseelement, multiparams, params, result)

      Intercept high level execute() events after execute.

      Parameters:
      • connConnection object
      • clauseelement – SQL expression construct, Compiled instance, or string statement passed to Connection.execute().
      • multiparams – Multiple parameter sets, a list of dictionaries.
      • params – Single parameter set, a single dictionary.
      • resultResultProxy generated by the execution.
      before_cursor_execute(conn, cursor, statement, parameters, context, executemany)

      Intercept low-level cursor execute() events before execution, receiving the string SQL statement and DBAPI-specific parameter list to be invoked against a cursor.

      This event is a good choice for logging as well as late modifications to the SQL string. It’s less ideal for parameter modifications except for those which are specific to a target backend.

      This event can be optionally established with the retval=True flag. The statement and parameters arguments should be returned as a two-tuple in this case:

      @event.listens_for(Engine, "before_cursor_execute", retval=True)
      def before_cursor_execute(conn, cursor, statement,
                      parameters, context, executemany):
          # do something with statement, parameters
          return statement, parameters

      See the example at ConnectionEvents.

      Parameters:
      • connConnection object
      • cursor – DBAPI cursor object
      • statement – string SQL statement
      • parameters – Dictionary, tuple, or list of parameters being passed to the execute() or executemany() method of the DBAPI cursor. In some cases may be None.
      • contextExecutionContext object in use. May be None.
      • executemany – boolean, if True, this is an executemany() call, if False, this is an execute() call.

      See also:

      before_execute()

      after_cursor_execute()

      before_execute(conn, clauseelement, multiparams, params)

      Intercept high level execute() events, receiving uncompiled SQL constructs and other objects prior to rendering into SQL.

      This event is good for debugging SQL compilation issues as well as early manipulation of the parameters being sent to the database, as the parameter lists will be in a consistent format here.

      This event can be optionally established with the retval=True flag. The clauseelement, multiparams, and params arguments should be returned as a three-tuple in this case:

      @event.listens_for(Engine, "before_execute", retval=True)
      def before_execute(conn, conn, clauseelement, multiparams, params):
          # do something with clauseelement, multiparams, params
          return clauseelement, multiparams, params
      Parameters:
      • connConnection object
      • clauseelement – SQL expression construct, Compiled instance, or string statement passed to Connection.execute().
      • multiparams – Multiple parameter sets, a list of dictionaries.
      • params – Single parameter set, a single dictionary.

      See also:

      before_cursor_execute()

      begin(conn)

      Intercept begin() events.

      Parameters:connConnection object
      begin_twophase(conn, xid)

      Intercept begin_twophase() events.

      Parameters:
      • connConnection object
      • xid – two-phase XID identifier
      commit(conn)

      Intercept commit() events, as initiated by a Transaction.

      Note that the Pool may also “auto-commit” a DBAPI connection upon checkin, if the reset_on_return flag is set to the value 'commit'. To intercept this commit, use the PoolEvents.reset() hook.

      Parameters:connConnection object
      commit_twophase(conn, xid, is_prepared)

      Intercept commit_twophase() events.

      Parameters:
      dbapi_error(conn, cursor, statement, parameters, context, exception)

      Intercept a raw DBAPI error.

      This event is called with the DBAPI exception instance received from the DBAPI itself, before SQLAlchemy wraps the exception with it’s own exception wrappers, and before any other operations are performed on the DBAPI cursor; the existing transaction remains in effect as well as any state on the cursor.

      The use case here is to inject low-level exception handling into an Engine, typically for logging and debugging purposes. In general, user code should not modify any state or throw any exceptions here as this will interfere with SQLAlchemy’s cleanup and error handling routines.

      Subsequent to this hook, SQLAlchemy may attempt any number of operations on the connection/cursor, including closing the cursor, rolling back of the transaction in the case of connectionless execution, and disposing of the entire connection pool if a “disconnect” was detected. The exception is then wrapped in a SQLAlchemy DBAPI exception wrapper and re-thrown.

      Parameters:
      • connConnection object
      • cursor – DBAPI cursor object
      • statement – string SQL statement
      • parameters – Dictionary, tuple, or list of parameters being passed to the execute() or executemany() method of the DBAPI cursor. In some cases may be None.
      • contextExecutionContext object in use. May be None.
      • exception – The unwrapped exception emitted directly from the DBAPI. The class here is specific to the DBAPI module in use.

      New in version 0.7.7.

      prepare_twophase(conn, xid)

      Intercept prepare_twophase() events.

      Parameters:
      • connConnection object
      • xid – two-phase XID identifier
      release_savepoint(conn, name, context)

      Intercept release_savepoint() events.

      Parameters:
      rollback(conn)

      Intercept rollback() events, as initiated by a Transaction.

      Note that the Pool also “auto-rolls back” a DBAPI connection upon checkin, if the reset_on_return flag is set to its default value of 'rollback'. To intercept this rollback, use the PoolEvents.reset() hook.

      Parameters:connConnection object
      rollback_savepoint(conn, name, context)

      Intercept rollback_savepoint() events.

      Parameters:
      rollback_twophase(conn, xid, is_prepared)

      Intercept rollback_twophase() events.

      Parameters:
      savepoint(conn, name=None)

      Intercept savepoint() events.

      Parameters:
      • connConnection object
      • name – specified name used for the savepoint.

      Schema Events

      class sqlalchemy.events.DDLEvents

      Bases: sqlalchemy.event.Events

      Define event listeners for schema objects, that is, SchemaItem and SchemaEvent subclasses, including MetaData, Table, Column.

      MetaData and Table support events specifically regarding when CREATE and DROP DDL is emitted to the database.

      Attachment events are also provided to customize behavior whenever a child schema element is associated with a parent, such as, when a Column is associated with its Table, when a ForeignKeyConstraint is associated with a Table, etc.

      Example using the after_create event:

      from sqlalchemy import event
      from sqlalchemy import Table, Column, Metadata, Integer
      
      m = MetaData()
      some_table = Table('some_table', m, Column('data', Integer))
      
      def after_create(target, connection, **kw):
          connection.execute("ALTER TABLE %s SET name=foo_%s" %
                                  (target.name, target.name))
      
      event.listen(some_table, "after_create", after_create)

      DDL events integrate closely with the DDL class and the DDLElement hierarchy of DDL clause constructs, which are themselves appropriate as listener callables:

      from sqlalchemy import DDL
      event.listen(
          some_table,
          "after_create",
          DDL("ALTER TABLE %(table)s SET name=foo_%(table)s")
      )

      The methods here define the name of an event as well as the names of members that are passed to listener functions.

      See also:

      after_create(target, connection, **kw)

      Called after CREATE statments are emitted.

      Parameters:
      • target – the MetaData or Table object which is the target of the event.
      • connection – the Connection where the CREATE statement or statements have been emitted.
      • **kw – additional keyword arguments relevant to the event. The contents of this dictionary may vary across releases, and include the list of tables being generated for a metadata-level event, the checkfirst flag, and other elements used by internal events.
      after_drop(target, connection, **kw)

      Called after DROP statments are emitted.

      Parameters:
      • target – the MetaData or Table object which is the target of the event.
      • connection – the Connection where the DROP statement or statements have been emitted.
      • **kw – additional keyword arguments relevant to the event. The contents of this dictionary may vary across releases, and include the list of tables being generated for a metadata-level event, the checkfirst flag, and other elements used by internal events.
      after_parent_attach(target, parent)

      Called after a SchemaItem is associated with a parent SchemaItem.

      Parameters:
      • target – the target object
      • parent – the parent to which the target is being attached.

      event.listen() also accepts a modifier for this event:

      Parameters:propagate=False – When True, the listener function will be established for any copies made of the target object, i.e. those copies that are generated when Table.tometadata() is used.
      before_create(target, connection, **kw)

      Called before CREATE statments are emitted.

      Parameters:
      • target – the MetaData or Table object which is the target of the event.
      • connection – the Connection where the CREATE statement or statements will be emitted.
      • **kw – additional keyword arguments relevant to the event. The contents of this dictionary may vary across releases, and include the list of tables being generated for a metadata-level event, the checkfirst flag, and other elements used by internal events.
      before_drop(target, connection, **kw)

      Called before DROP statments are emitted.

      Parameters:
      • target – the MetaData or Table object which is the target of the event.
      • connection – the Connection where the DROP statement or statements will be emitted.
      • **kw – additional keyword arguments relevant to the event. The contents of this dictionary may vary across releases, and include the list of tables being generated for a metadata-level event, the checkfirst flag, and other elements used by internal events.
      before_parent_attach(target, parent)

      Called before a SchemaItem is associated with a parent SchemaItem.

      Parameters:
      • target – the target object
      • parent – the parent to which the target is being attached.

      event.listen() also accepts a modifier for this event:

      Parameters:propagate=False – When True, the listener function will be established for any copies made of the target object, i.e. those copies that are generated when Table.tometadata() is used.
      column_reflect(inspector, table, column_info)

      Called for each unit of ‘column info’ retrieved when a Table is being reflected.

      The dictionary of column information as returned by the dialect is passed, and can be modified. The dictionary is that returned in each element of the list returned by reflection.Inspector.get_columns().

      The event is called before any action is taken against this dictionary, and the contents can be modified. The Column specific arguments info, key, and quote can also be added to the dictionary and will be passed to the constructor of Column.

      Note that this event is only meaningful if either associated with the Table class across the board, e.g.:

      from sqlalchemy.schema import Table
      from sqlalchemy import event
      
      def listen_for_reflect(inspector, table, column_info):
          "receive a column_reflect event"
          # ...
      
      event.listen(
              Table,
              'column_reflect',
              listen_for_reflect)

      ...or with a specific Table instance using the listeners argument:

      def listen_for_reflect(inspector, table, column_info):
          "receive a column_reflect event"
          # ...
      
      t = Table(
          'sometable',
          autoload=True,
          listeners=[
              ('column_reflect', listen_for_reflect)
          ])

      This because the reflection process initiated by autoload=True completes within the scope of the constructor for Table.

      class sqlalchemy.events.SchemaEventTarget

      Base class for elements that are the targets of DDLEvents events.

      This includes SchemaItem as well as SchemaType.

      SQLAlchemy-0.8.4/doc/core/exceptions.html0000644000076500000240000006762612251147467021027 0ustar classicstaff00000000000000 Core Exceptions — SQLAlchemy 0.8 Documentation

      SQLAlchemy 0.8 Documentation

      Release: 0.8.4 | Release Date: December 8, 2013

      Core Exceptions

      Exceptions used with SQLAlchemy.

      The base exception class is SQLAlchemyError. Exceptions which are raised as a result of DBAPI exceptions are all subclasses of DBAPIError.

      exception sqlalchemy.exc.AmbiguousForeignKeysError

      Raised when more than one foreign key matching can be located between two selectables during a join.

      exception sqlalchemy.exc.ArgumentError

      Raised when an invalid or conflicting function argument is supplied.

      This error generally corresponds to construction time state errors.

      exception sqlalchemy.exc.CircularDependencyError(message, cycles, edges, msg=None)

      Raised by topological sorts when a circular dependency is detected.

      There are two scenarios where this error occurs:

      exception sqlalchemy.exc.CompileError

      Raised when an error occurs during SQL compilation

      exception sqlalchemy.exc.DBAPIError(statement, params, orig, connection_invalidated=False)

      Raised when the execution of a database operation fails.

      Wraps exceptions raised by the DB-API underlying the database operation. Driver-specific implementations of the standard DB-API exception types are wrapped by matching sub-types of SQLAlchemy’s DBAPIError when possible. DB-API’s Error type maps to DBAPIError in SQLAlchemy, otherwise the names are identical. Note that there is no guarantee that different DB-API implementations will raise the same exception type for any given error condition.

      DBAPIError features statement and params attributes which supply context regarding the specifics of the statement which had an issue, for the typical case when the error was raised within the context of emitting a SQL statement.

      The wrapped exception object is available in the orig attribute. Its type and properties are DB-API implementation specific.

      exception sqlalchemy.exc.DataError(statement, params, orig, connection_invalidated=False)

      Wraps a DB-API DataError.

      exception sqlalchemy.exc.DatabaseError(statement, params, orig, connection_invalidated=False)

      Wraps a DB-API DatabaseError.

      exception sqlalchemy.exc.DisconnectionError

      A disconnect is detected on a raw DB-API connection.

      This error is raised and consumed internally by a connection pool. It can be raised by the PoolEvents.checkout() event so that the host pool forces a retry; the exception will be caught three times in a row before the pool gives up and raises InvalidRequestError regarding the connection attempt.

      class sqlalchemy.exc.DontWrapMixin

      A mixin class which, when applied to a user-defined Exception class, will not be wrapped inside of StatementError if the error is emitted within the process of executing a statement.

      E.g.:

      from sqlalchemy.exc import DontWrapMixin
      
      class MyCustomException(Exception, DontWrapMixin):
          pass
      
      class MySpecialType(TypeDecorator):
          impl = String
      
          def process_bind_param(self, value, dialect):
              if value == 'invalid':
                  raise MyCustomException("invalid!")
      exception sqlalchemy.exc.IdentifierError

      Raised when a schema name is beyond the max character limit

      exception sqlalchemy.exc.IntegrityError(statement, params, orig, connection_invalidated=False)

      Wraps a DB-API IntegrityError.

      exception sqlalchemy.exc.InterfaceError(statement, params, orig, connection_invalidated=False)

      Wraps a DB-API InterfaceError.

      exception sqlalchemy.exc.InternalError(statement, params, orig, connection_invalidated=False)

      Wraps a DB-API InternalError.

      exception sqlalchemy.exc.InvalidRequestError

      SQLAlchemy was asked to do something it can’t do.

      This error generally corresponds to runtime state errors.

      exception sqlalchemy.exc.NoForeignKeysError

      Raised when no foreign keys can be located between two selectables during a join.

      exception sqlalchemy.exc.NoInspectionAvailable

      A subject passed to sqlalchemy.inspection.inspect() produced no context for inspection.

      exception sqlalchemy.exc.NoReferenceError

      Raised by ForeignKey to indicate a reference cannot be resolved.

      exception sqlalchemy.exc.NoReferencedColumnError(message, tname, cname)

      Raised by ForeignKey when the referred Column cannot be located.

      exception sqlalchemy.exc.NoReferencedTableError(message, tname)

      Raised by ForeignKey when the referred Table cannot be located.

      exception sqlalchemy.exc.NoSuchColumnError

      A nonexistent column is requested from a RowProxy.

      exception sqlalchemy.exc.NoSuchTableError

      Table does not exist or is not visible to a connection.

      exception sqlalchemy.exc.NotSupportedError(statement, params, orig, connection_invalidated=False)

      Wraps a DB-API NotSupportedError.

      exception sqlalchemy.exc.OperationalError(statement, params, orig, connection_invalidated=False)

      Wraps a DB-API OperationalError.

      exception sqlalchemy.exc.ProgrammingError(statement, params, orig, connection_invalidated=False)

      Wraps a DB-API ProgrammingError.

      exception sqlalchemy.exc.ResourceClosedError

      An operation was requested from a connection, cursor, or other object that’s in a closed state.

      exception sqlalchemy.exc.SADeprecationWarning

      Issued once per usage of a deprecated API.

      exception sqlalchemy.exc.SAPendingDeprecationWarning

      Issued once per usage of a deprecated API.

      exception sqlalchemy.exc.SAWarning

      Issued at runtime.

      exception sqlalchemy.exc.SQLAlchemyError

      Generic error class.

      exception sqlalchemy.exc.StatementError(message, statement, params, orig)

      An error occurred during execution of a SQL statement.

      StatementError wraps the exception raised during execution, and features statement and params attributes which supply context regarding the specifics of the statement which had an issue.

      The wrapped exception object is available in the orig attribute.

      orig = None

      The DBAPI exception object.

      params = None

      The parameter list being used when this exception occurred.

      statement = None

      The string SQL statement being invoked when this exception occurred.

      exception sqlalchemy.exc.TimeoutError

      Raised when a connection pool times out on getting a connection.

      exception sqlalchemy.exc.UnboundExecutionError

      SQL was attempted without a database connection to execute it on.

      exception sqlalchemy.exc.UnsupportedCompilationError(compiler, element_type)

      Raised when an operation is not supported by the given compiler.

      New in version 0.8.3.

      SQLAlchemy-0.8.4/doc/core/expression_api.html0000644000076500000240000001423312251147467021660 0ustar classicstaff00000000000000 SQL Statements and Expressions API — SQLAlchemy 0.8 Documentation

      SQLAlchemy 0.8 Documentation

      Release: 0.8.4 | Release Date: December 8, 2013

      SQL Statements and Expressions API

      This section presents the API reference for the SQL Expression Language. For a full introduction to its usage, see SQL Expression Language Tutorial.

      SQLAlchemy-0.8.4/doc/core/functions.html0000644000076500000240000013257212251147467020647 0ustar classicstaff00000000000000 SQL and Generic Functions — SQLAlchemy 0.8 Documentation

      SQLAlchemy 0.8 Documentation

      Release: 0.8.4 | Release Date: December 8, 2013

      SQL and Generic Functions

      SQL functions which are known to SQLAlchemy with regards to database-specific rendering, return types and argument behavior. Generic functions are invoked like all SQL functions, using the func attribute:

      select([func.count()]).select_from(sometable)

      Note that any name not known to func generates the function name as is - there is no restriction on what SQL functions can be called, known or unknown to SQLAlchemy, built-in or user defined. The section here only describes those functions where SQLAlchemy already knows what argument and return types are in use.

      class sqlalchemy.sql.functions.AnsiFunction(**kwargs)

      Bases: sqlalchemy.sql.functions.GenericFunction

      identifier = 'AnsiFunction'
      name = 'AnsiFunction'
      class sqlalchemy.sql.functions.GenericFunction(*args, **kwargs)

      Bases: sqlalchemy.sql.expression.Function

      Define a ‘generic’ function.

      A generic function is a pre-established Function class that is instantiated automatically when called by name from the func attribute. Note that calling any name from func has the effect that a new Function instance is created automatically, given that name. The primary use case for defining a GenericFunction class is so that a function of a particular name may be given a fixed return type. It can also include custom argument parsing schemes as well as additional methods.

      Subclasses of GenericFunction are automatically registered under the name of the class. For example, a user-defined function as_utc() would be available immediately:

      from sqlalchemy.sql.functions import GenericFunction
      from sqlalchemy.types import DateTime
      
      class as_utc(GenericFunction):
          type = DateTime
      
      print select([func.as_utc()])

      User-defined generic functions can be organized into packages by specifying the “package” attribute when defining GenericFunction. Third party libraries containing many functions may want to use this in order to avoid name conflicts with other systems. For example, if our as_utc() function were part of a package “time”:

      class as_utc(GenericFunction):
          type = DateTime
          package = "time"

      The above function would be available from func using the package name time:

      print select([func.time.as_utc()])

      A final option is to allow the function to be accessed from one name in func but to render as a different name. The identifier attribute will override the name used to access the function as loaded from func, but will retain the usage of name as the rendered name:

      class GeoBuffer(GenericFunction):
          type = Geometry
          package = "geo"
          name = "ST_Buffer"
          identifier = "buffer"

      The above function will render as follows:

      >>> print func.geo.buffer()
      ST_Buffer()

      New in version 0.8: GenericFunction now supports automatic registration of new functions as well as package and custom naming support.

      Changed in version 0.8: The attribute name type is used to specify the function’s return type at the class level. Previously, the name __return_type__ was used. This name is still recognized for backwards-compatibility.

      coerce_arguments = True
      identifier = 'GenericFunction'
      name = 'GenericFunction'
      class sqlalchemy.sql.functions.ReturnTypeFromArgs(*args, **kwargs)

      Bases: sqlalchemy.sql.functions.GenericFunction

      Define a function whose return type is the same as its arguments.

      identifier = 'ReturnTypeFromArgs'
      name = 'ReturnTypeFromArgs'
      class sqlalchemy.sql.functions.char_length(arg, **kwargs)

      Bases: sqlalchemy.sql.functions.GenericFunction

      identifier = 'char_length'
      name = 'char_length'
      type

      alias of Integer

      class sqlalchemy.sql.functions.coalesce(*args, **kwargs)

      Bases: sqlalchemy.sql.functions.ReturnTypeFromArgs

      identifier = 'coalesce'
      name = 'coalesce'
      class sqlalchemy.sql.functions.concat(*args, **kwargs)

      Bases: sqlalchemy.sql.functions.GenericFunction

      identifier = 'concat'
      name = 'concat'
      type

      alias of String

      class sqlalchemy.sql.functions.count(expression=None, **kwargs)

      Bases: sqlalchemy.sql.functions.GenericFunction

      The ANSI COUNT aggregate function. With no arguments, emits COUNT *.

      identifier = 'count'
      name = 'count'
      type

      alias of Integer

      class sqlalchemy.sql.functions.current_date(**kwargs)

      Bases: sqlalchemy.sql.functions.AnsiFunction

      identifier = 'current_date'
      name = 'current_date'
      type

      alias of Date

      class sqlalchemy.sql.functions.current_time(**kwargs)

      Bases: sqlalchemy.sql.functions.AnsiFunction

      identifier = 'current_time'
      name = 'current_time'
      type

      alias of Time

      class sqlalchemy.sql.functions.current_timestamp(**kwargs)

      Bases: sqlalchemy.sql.functions.AnsiFunction

      identifier = 'current_timestamp'
      name = 'current_timestamp'
      type

      alias of DateTime

      class sqlalchemy.sql.functions.current_user(**kwargs)

      Bases: sqlalchemy.sql.functions.AnsiFunction

      identifier = 'current_user'
      name = 'current_user'
      type

      alias of String

      class sqlalchemy.sql.functions.localtime(**kwargs)

      Bases: sqlalchemy.sql.functions.AnsiFunction

      identifier = 'localtime'
      name = 'localtime'
      type

      alias of DateTime

      class sqlalchemy.sql.functions.localtimestamp(**kwargs)

      Bases: sqlalchemy.sql.functions.AnsiFunction

      identifier = 'localtimestamp'
      name = 'localtimestamp'
      type

      alias of DateTime

      class sqlalchemy.sql.functions.max(*args, **kwargs)

      Bases: sqlalchemy.sql.functions.ReturnTypeFromArgs

      identifier = 'max'
      name = 'max'
      class sqlalchemy.sql.functions.min(*args, **kwargs)

      Bases: sqlalchemy.sql.functions.ReturnTypeFromArgs

      identifier = 'min'
      name = 'min'
      class sqlalchemy.sql.functions.next_value(seq, **kw)

      Bases: sqlalchemy.sql.functions.GenericFunction

      Represent the ‘next value’, given a Sequence as it’s single argument.

      Compiles into the appropriate function on each backend, or will raise NotImplementedError if used on a backend that does not provide support for sequences.

      identifier = 'next_value'
      name = 'next_value'
      type = Integer()
      class sqlalchemy.sql.functions.now(*args, **kwargs)

      Bases: sqlalchemy.sql.functions.GenericFunction

      identifier = 'now'
      name = 'now'
      type

      alias of DateTime

      class sqlalchemy.sql.functions.random(*args, **kwargs)

      Bases: sqlalchemy.sql.functions.GenericFunction

      identifier = 'random'
      name = 'random'
      sqlalchemy.sql.functions.register_function(identifier, fn, package='_default')

      Associate a callable with a particular func. name.

      This is normally called by _GenericMeta, but is also available by itself so that a non-Function construct can be associated with the func accessor (i.e. CAST, EXTRACT).

      class sqlalchemy.sql.functions.session_user(**kwargs)

      Bases: sqlalchemy.sql.functions.AnsiFunction

      identifier = 'session_user'
      name = 'session_user'
      type

      alias of String

      class sqlalchemy.sql.functions.sum(*args, **kwargs)

      Bases: sqlalchemy.sql.functions.ReturnTypeFromArgs

      identifier = 'sum'
      name = 'sum'
      class sqlalchemy.sql.functions.sysdate(**kwargs)

      Bases: sqlalchemy.sql.functions.AnsiFunction

      identifier = 'sysdate'
      name = 'sysdate'
      type

      alias of DateTime

      class sqlalchemy.sql.functions.user(**kwargs)

      Bases: sqlalchemy.sql.functions.AnsiFunction

      identifier = 'user'
      name = 'user'
      type

      alias of String

      SQLAlchemy-0.8.4/doc/core/index.html0000644000076500000240000005504712251147467017747 0ustar classicstaff00000000000000 SQLAlchemy Core — SQLAlchemy 0.8 Documentation

      SQLAlchemy 0.8 Documentation

      Release: 0.8.4 | Release Date: December 8, 2013

      SQLAlchemy Core

      The breadth of SQLAlchemy’s SQL rendering engine, DBAPI integration, transaction integration, and schema description services are documented here. In contrast to the ORM’s domain-centric mode of usage, the SQL Expression Language provides a schema-centric usage paradigm.

      SQLAlchemy-0.8.4/doc/core/inspection.html0000644000076500000240000004053312251147467021005 0ustar classicstaff00000000000000 Runtime Inspection API — SQLAlchemy 0.8 Documentation

      SQLAlchemy 0.8 Documentation

      Release: 0.8.4 | Release Date: December 8, 2013

      Runtime Inspection API

      The inspection module provides the inspect() function, which delivers runtime information about a wide variety of SQLAlchemy objects, both within the Core as well as the ORM.

      The inspect() function is the entry point to SQLAlchemy’s public API for viewing the configuration and construction of in-memory objects. Depending on the type of object passed to inspect(), the return value will either be a related object which provides a known interface, or in many cases it will return the object itself.

      The rationale for inspect() is twofold. One is that it replaces the need to be aware of a large variety of “information getting” functions in SQLAlchemy, such as Inspector.from_engine(), orm.attributes.instance_state(), orm.class_mapper(), and others. The other is that the return value of inspect() is guaranteed to obey a documented API, thus allowing third party tools which build on top of SQLAlchemy configurations to be constructed in a forwards-compatible way.

      New in version 0.8: The inspect() system is introduced as of version 0.8.

      sqlalchemy.inspection.inspect(subject, raiseerr=True)

      Produce an inspection object for the given target.

      The returned value in some cases may be the same object as the one given, such as if a orm.Mapper object is passed. In other cases, it will be an instance of the registered inspection type for the given object, such as if a engine.Engine is passed, an engine.Inspector object is returned.

      Parameters:
      • subject – the subject to be inspected.
      • raiseerr – When True, if the given subject does not correspond to a known SQLAlchemy inspected type, sqlalchemy.exc.NoInspectionAvailable is raised. If False, None is returned.

      Available Inspection Targets

      Below is a listing of many of the most common inspection targets.

      SQLAlchemy-0.8.4/doc/core/interfaces.html0000644000076500000240000005736612251147467020771 0ustar classicstaff00000000000000 Deprecated Event Interfaces — SQLAlchemy 0.8 Documentation

      SQLAlchemy 0.8 Documentation

      Release: 0.8.4 | Release Date: December 8, 2013

      Deprecated Event Interfaces

      This section describes the class-based core event interface introduced in SQLAlchemy 0.5. The ORM analogue is described at Deprecated ORM Event Interfaces.

      Deprecated since version 0.7: The new event system described in Events replaces the extension/proxy/listener system, providing a consistent interface to all events without the need for subclassing.

      Execution, Connection and Cursor Events

      class sqlalchemy.interfaces.ConnectionProxy

      Allows interception of statement execution by Connections.

      Note

      ConnectionProxy is deprecated. Please refer to ConnectionEvents.

      Either or both of the execute() and cursor_execute() may be implemented to intercept compiled statement and cursor level executions, e.g.:

      class MyProxy(ConnectionProxy):
          def execute(self, conn, execute, clauseelement,
                      *multiparams, **params):
              print "compiled statement:", clauseelement
              return execute(clauseelement, *multiparams, **params)
      
          def cursor_execute(self, execute, cursor, statement,
                             parameters, context, executemany):
              print "raw statement:", statement
              return execute(cursor, statement, parameters, context)

      The execute argument is a function that will fulfill the default execution behavior for the operation. The signature illustrated in the example should be used.

      The proxy is installed into an Engine via the proxy argument:

      e = create_engine('someurl://', proxy=MyProxy())
      begin(conn, begin)

      Intercept begin() events.

      begin_twophase(conn, begin_twophase, xid)

      Intercept begin_twophase() events.

      commit(conn, commit)

      Intercept commit() events.

      commit_twophase(conn, commit_twophase, xid, is_prepared)

      Intercept commit_twophase() events.

      cursor_execute(execute, cursor, statement, parameters, context, executemany)

      Intercept low-level cursor execute() events.

      execute(conn, execute, clauseelement, *multiparams, **params)

      Intercept high level execute() events.

      prepare_twophase(conn, prepare_twophase, xid)

      Intercept prepare_twophase() events.

      release_savepoint(conn, release_savepoint, name, context)

      Intercept release_savepoint() events.

      rollback(conn, rollback)

      Intercept rollback() events.

      rollback_savepoint(conn, rollback_savepoint, name, context)

      Intercept rollback_savepoint() events.

      rollback_twophase(conn, rollback_twophase, xid, is_prepared)

      Intercept rollback_twophase() events.

      savepoint(conn, savepoint, name=None)

      Intercept savepoint() events.

      Connection Pool Events

      class sqlalchemy.interfaces.PoolListener

      Hooks into the lifecycle of connections in a Pool.

      Note

      PoolListener is deprecated. Please refer to PoolEvents.

      Usage:

      class MyListener(PoolListener):
          def connect(self, dbapi_con, con_record):
              '''perform connect operations'''
          # etc.
      
      # create a new pool with a listener
      p = QueuePool(..., listeners=[MyListener()])
      
      # add a listener after the fact
      p.add_listener(MyListener())
      
      # usage with create_engine()
      e = create_engine("url://", listeners=[MyListener()])

      All of the standard connection Pool types can accept event listeners for key connection lifecycle events: creation, pool check-out and check-in. There are no events fired when a connection closes.

      For any given DB-API connection, there will be one connect event, n number of checkout events, and either n or n - 1 checkin events. (If a Connection is detached from its pool via the detach() method, it won’t be checked back in.)

      These are low-level events for low-level objects: raw Python DB-API connections, without the conveniences of the SQLAlchemy Connection wrapper, Dialect services or ClauseElement execution. If you execute SQL through the connection, explicitly closing all cursors and other resources is recommended.

      Events also receive a _ConnectionRecord, a long-lived internal Pool object that basically represents a “slot” in the connection pool. _ConnectionRecord objects have one public attribute of note: info, a dictionary whose contents are scoped to the lifetime of the DB-API connection managed by the record. You can use this shared storage area however you like.

      There is no need to subclass PoolListener to handle events. Any class that implements one or more of these methods can be used as a pool listener. The Pool will inspect the methods provided by a listener object and add the listener to one or more internal event queues based on its capabilities. In terms of efficiency and function call overhead, you’re much better off only providing implementations for the hooks you’ll be using.

      checkin(dbapi_con, con_record)

      Called when a connection returns to the pool.

      Note that the connection may be closed, and may be None if the connection has been invalidated. checkin will not be called for detached connections. (They do not return to the pool.)

      dbapi_con
      A raw DB-API connection
      con_record
      The _ConnectionRecord that persistently manages the connection
      checkout(dbapi_con, con_record, con_proxy)

      Called when a connection is retrieved from the Pool.

      dbapi_con
      A raw DB-API connection
      con_record
      The _ConnectionRecord that persistently manages the connection
      con_proxy
      The _ConnectionFairy which manages the connection for the span of the current checkout.

      If you raise an exc.DisconnectionError, the current connection will be disposed and a fresh connection retrieved. Processing of all checkout listeners will abort and restart using the new connection.

      connect(dbapi_con, con_record)

      Called once for each new DB-API connection or Pool’s creator().

      dbapi_con
      A newly connected raw DB-API connection (not a SQLAlchemy Connection wrapper).
      con_record
      The _ConnectionRecord that persistently manages the connection
      first_connect(dbapi_con, con_record)

      Called exactly once for the first DB-API connection.

      dbapi_con
      A newly connected raw DB-API connection (not a SQLAlchemy Connection wrapper).
      con_record
      The _ConnectionRecord that persistently manages the connection
      SQLAlchemy-0.8.4/doc/core/internals.html0000644000076500000240000033020712251147470020623 0ustar classicstaff00000000000000 Core Internals — SQLAlchemy 0.8 Documentation

      SQLAlchemy 0.8 Documentation

      Release: 0.8.4 | Release Date: December 8, 2013

      Table of Contents

      Previous Topic

      Core Exceptions

      Next Topic

      Dialects

      Quick Search

      Core Internals

      Some key internal constructs are listed here.

      class sqlalchemy.engine.interfaces.Compiled(dialect, statement, bind=None, compile_kwargs=immutabledict({}))

      Represent a compiled SQL or DDL expression.

      The __str__ method of the Compiled object should produce the actual text of the statement. Compiled objects are specific to their underlying database dialect, and also may or may not be specific to the columns referenced within a particular set of bind parameters. In no case should the Compiled object be dependent on the actual values of those bind parameters, even though it may reference those values as defaults.

      __init__(dialect, statement, bind=None, compile_kwargs=immutabledict({}))

      Construct a new Compiled object.

      Parameters:
      • dialectDialect to compile against.
      • statementClauseElement to be compiled.
      • bind – Optional Engine or Connection to compile this statement against.
      • compile_kwargs

        additional kwargs that will be passed to the initial call to Compiled.process().

        New in version 0.8.

      compile()

      Produce the internal string representation of this element.

      Deprecated since version 0.7: Compiled objects now compile within the constructor.

      construct_params(params=None)

      Return the bind params for this compiled object.

      Parameters:params – a dict of string/object pairs whose values will override bind values compiled in to the statement.
      execute(*multiparams, **params)

      Execute this compiled object.

      params

      Return the bind params for this compiled object.

      scalar(*multiparams, **params)

      Execute this compiled object and return the result’s scalar value.

      sql_compiler

      Return a Compiled that is capable of processing SQL expressions.

      If this compiler is one, it would likely just return ‘self’.

      class sqlalchemy.sql.compiler.DDLCompiler(dialect, statement, bind=None, compile_kwargs=immutabledict({}))

      Bases: sqlalchemy.engine.interfaces.Compiled

      __init__(dialect, statement, bind=None, compile_kwargs=immutabledict({}))
      inherited from the __init__() method of Compiled

      Construct a new Compiled object.

      Parameters:
      • dialectDialect to compile against.
      • statementClauseElement to be compiled.
      • bind – Optional Engine or Connection to compile this statement against.
      • compile_kwargs

        additional kwargs that will be passed to the initial call to Compiled.process().

        New in version 0.8.

      compile()
      inherited from the compile() method of Compiled

      Produce the internal string representation of this element.

      Deprecated since version 0.7: Compiled objects now compile within the constructor.

      define_constraint_remote_table(constraint, table, preparer)

      Format the remote table clause of a CREATE CONSTRAINT clause.

      execute(*multiparams, **params)
      inherited from the execute() method of Compiled

      Execute this compiled object.

      params
      inherited from the params attribute of Compiled

      Return the bind params for this compiled object.

      scalar(*multiparams, **params)
      inherited from the scalar() method of Compiled

      Execute this compiled object and return the result’s scalar value.

      class sqlalchemy.engine.default.DefaultDialect(convert_unicode=False, encoding='utf-8', paramstyle=None, dbapi=None, implicit_returning=None, case_sensitive=True, label_length=None, **kwargs)

      Bases: sqlalchemy.engine.interfaces.Dialect

      Default implementation of Dialect

      create_xid()

      Create a random two-phase transaction ID.

      This id will be passed to do_begin_twophase(), do_rollback_twophase(), do_commit_twophase(). Its format is unspecified.

      denormalize_name(name)
      inherited from the denormalize_name() method of Dialect

      convert the given name to a case insensitive identifier for the backend if it is an all-lowercase name.

      this method is only used if the dialect defines requires_name_normalize=True.

      do_begin_twophase(connection, xid)
      inherited from the do_begin_twophase() method of Dialect

      Begin a two phase transaction on the given connection.

      Parameters:
      do_commit_twophase(connection, xid, is_prepared=True, recover=False)
      inherited from the do_commit_twophase() method of Dialect

      Commit a two phase transaction on the given connection.

      Parameters:
      do_prepare_twophase(connection, xid)
      inherited from the do_prepare_twophase() method of Dialect

      Prepare a two phase transaction on the given connection.

      Parameters:
      do_recover_twophase(connection)
      inherited from the do_recover_twophase() method of Dialect

      Recover list of uncommited prepared two phase transaction identifiers on the given connection.

      Parameters:connection – a Connection.
      do_rollback_twophase(connection, xid, is_prepared=True, recover=False)
      inherited from the do_rollback_twophase() method of Dialect

      Rollback a two phase transaction on the given connection.

      Parameters:
      execute_sequence_format

      alias of tuple

      get_columns(connection, table_name, schema=None, **kw)
      inherited from the get_columns() method of Dialect

      Return information about columns in table_name.

      Given a Connection, a string table_name, and an optional string schema, return column information as a list of dictionaries with these keys:

      name
      the column’s name
      type
      [sqlalchemy.types#TypeEngine]
      nullable
      boolean
      default
      the column’s default value
      autoincrement
      boolean
      sequence
      a dictionary of the form
      {‘name’ : str, ‘start’ :int, ‘increment’: int}

      Additional column attributes may be present.

      get_foreign_keys(connection, table_name, schema=None, **kw)
      inherited from the get_foreign_keys() method of Dialect

      Return information about foreign_keys in table_name.

      Given a Connection, a string table_name, and an optional string schema, return foreign key information as a list of dicts with these keys:

      name
      the constraint’s name
      constrained_columns
      a list of column names that make up the foreign key
      referred_schema
      the name of the referred schema
      referred_table
      the name of the referred table
      referred_columns
      a list of column names in the referred table that correspond to constrained_columns
      get_indexes(connection, table_name, schema=None, **kw)
      inherited from the get_indexes() method of Dialect

      Return information about indexes in table_name.

      Given a Connection, a string table_name and an optional string schema, return index information as a list of dictionaries with these keys:

      name
      the index’s name
      column_names
      list of column names in order
      unique
      boolean
      get_isolation_level(dbapi_conn)
      inherited from the get_isolation_level() method of Dialect

      Given a DBAPI connection, return its isolation level.

      get_pk_constraint(conn, table_name, schema=None, **kw)

      Compatibility method, adapts the result of get_primary_keys() for those dialects which don’t implement get_pk_constraint().

      get_primary_keys(connection, table_name, schema=None, **kw)
      inherited from the get_primary_keys() method of Dialect

      Return information about primary keys in table_name.

      Deprecated. This method is only called by the default implementation of Dialect.get_pk_constraint(). Dialects should instead implement this method directly.

      get_table_names(connection, schema=None, **kw)
      inherited from the get_table_names() method of Dialect

      Return a list of table names for schema.

      get_unique_constraints(table_name, schema=None, **kw)
      inherited from the get_unique_constraints() method of Dialect

      Return information about unique constraints in table_name.

      Given a string table_name and an optional string schema, return unique constraint information as a list of dicts with these keys:

      name
      the unique constraint’s name
      column_names
      list of column names in order
      get_view_definition(connection, view_name, schema=None, **kw)
      inherited from the get_view_definition() method of Dialect

      Return view definition.

      Given a Connection, a string view_name, and an optional string schema, return the view definition.

      get_view_names(connection, schema=None, **kw)
      inherited from the get_view_names() method of Dialect

      Return a list of all view names available in the database.

      schema:
      Optional, retrieve names from a non-default schema.
      has_sequence(connection, sequence_name, schema=None)
      inherited from the has_sequence() method of Dialect

      Check the existence of a particular sequence in the database.

      Given a Connection object and a string sequence_name, return True if the given sequence exists in the database, False otherwise.

      has_table(connection, table_name, schema=None)
      inherited from the has_table() method of Dialect

      Check the existence of a particular table in the database.

      Given a Connection object and a string table_name, return True if the given table (possibly within the specified schema) exists in the database, False otherwise.

      normalize_name(name)
      inherited from the normalize_name() method of Dialect

      convert the given name to lowercase if it is detected as case insensitive.

      this method is only used if the dialect defines requires_name_normalize=True.

      on_connect()

      return a callable which sets up a newly created DBAPI connection.

      This is used to set dialect-wide per-connection options such as isolation modes, unicode modes, etc.

      If a callable is returned, it will be assembled into a pool listener that receives the direct DBAPI connection, with all wrappers removed.

      If None is returned, no listener will be generated.

      preparer

      alias of IdentifierPreparer

      set_isolation_level(dbapi_conn, level)
      inherited from the set_isolation_level() method of Dialect

      Given a DBAPI connection, set its isolation level.

      statement_compiler

      alias of SQLCompiler

      type_descriptor(typeobj)

      Provide a database-specific TypeEngine object, given the generic object which comes from the types module.

      This method looks for a dictionary called colspecs as a class or instance-level variable, and passes on to types.adapt_type().

      class sqlalchemy.engine.interfaces.Dialect

      Define the behavior of a specific database and DB-API combination.

      Any aspect of metadata definition, SQL query generation, execution, result-set handling, or anything else which varies between databases is defined under the general category of the Dialect. The Dialect acts as a factory for other database-specific object implementations including ExecutionContext, Compiled, DefaultGenerator, and TypeEngine.

      All Dialects implement the following attributes:

      name
      identifying name for the dialect from a DBAPI-neutral point of view (i.e. ‘sqlite’)
      driver
      identifying name for the dialect’s DBAPI
      positional
      True if the paramstyle for this Dialect is positional.
      paramstyle
      the paramstyle to be used (some DB-APIs support multiple paramstyles).
      convert_unicode
      True if Unicode conversion should be applied to all str types.
      encoding
      type of encoding to use for unicode, usually defaults to ‘utf-8’.
      statement_compiler
      a Compiled class used to compile SQL statements
      ddl_compiler
      a Compiled class used to compile DDL statements
      server_version_info
      a tuple containing a version number for the DB backend in use. This value is only available for supporting dialects, and is typically populated during the initial connection to the database.
      default_schema_name
      the name of the default schema. This value is only available for supporting dialects, and is typically populated during the initial connection to the database.
      execution_ctx_cls
      a ExecutionContext class used to handle statement execution
      execute_sequence_format
      either the ‘tuple’ or ‘list’ type, depending on what cursor.execute() accepts for the second argument (they vary).
      preparer
      a IdentifierPreparer class used to quote identifiers.
      supports_alter
      True if the database supports ALTER TABLE.
      max_identifier_length
      The maximum length of identifier names.
      supports_unicode_statements
      Indicate whether the DB-API can receive SQL statements as Python unicode strings
      supports_unicode_binds
      Indicate whether the DB-API can receive string bind parameters as Python unicode strings
      supports_sane_rowcount
      Indicate whether the dialect properly implements rowcount for UPDATE and DELETE statements.
      supports_sane_multi_rowcount
      Indicate whether the dialect properly implements rowcount for UPDATE and DELETE statements when executed via executemany.
      preexecute_autoincrement_sequences
      True if ‘implicit’ primary key functions must be executed separately in order to get their value. This is currently oriented towards Postgresql.
      implicit_returning
      use RETURNING or equivalent during INSERT execution in order to load newly generated primary keys and other column defaults in one execution, which are then available via inserted_primary_key. If an insert statement has returning() specified explicitly, the “implicit” functionality is not used and inserted_primary_key will not be available.
      dbapi_type_map

      A mapping of DB-API type objects present in this Dialect’s DB-API implementation mapped to TypeEngine implementations used by the dialect.

      This is used to apply types to result sets based on the DB-API types present in cursor.description; it only takes effect for result sets against textual statements where no explicit typemap was present.

      colspecs
      A dictionary of TypeEngine classes from sqlalchemy.types mapped to subclasses that are specific to the dialect class. This dictionary is class-level only and is not accessed from the dialect instance itself.
      supports_default_values
      Indicates if the construct INSERT INTO tablename DEFAULT VALUES is supported
      supports_sequences
      Indicates if the dialect supports CREATE SEQUENCE or similar.
      sequences_optional
      If True, indicates if the “optional” flag on the Sequence() construct should signal to not generate a CREATE SEQUENCE. Applies only to dialects that support sequences. Currently used only to allow Postgresql SERIAL to be used on a column that specifies Sequence() for usage on other backends.
      supports_native_enum
      Indicates if the dialect supports a native ENUM construct. This will prevent types.Enum from generating a CHECK constraint when that type is used.
      supports_native_boolean
      Indicates if the dialect supports a native boolean construct. This will prevent types.Boolean from generating a CHECK constraint when that type is used.
      connect()

      return a callable which sets up a newly created DBAPI connection.

      The callable accepts a single argument “conn” which is the DBAPI connection itself. It has no return value.

      This is used to set dialect-wide per-connection options such as isolation modes, unicode modes, etc.

      If a callable is returned, it will be assembled into a pool listener that receives the direct DBAPI connection, with all wrappers removed.

      If None is returned, no listener will be generated.

      create_connect_args(url)

      Build DB-API compatible connection arguments.

      Given a URL object, returns a tuple consisting of a *args/**kwargs suitable to send directly to the dbapi’s connect function.

      create_xid()

      Create a two-phase transaction ID.

      This id will be passed to do_begin_twophase(), do_rollback_twophase(), do_commit_twophase(). Its format is unspecified.

      denormalize_name(name)

      convert the given name to a case insensitive identifier for the backend if it is an all-lowercase name.

      this method is only used if the dialect defines requires_name_normalize=True.

      do_begin(dbapi_connection)

      Provide an implementation of connection.begin(), given a DB-API connection.

      The DBAPI has no dedicated “begin” method and it is expected that transactions are implicit. This hook is provided for those DBAPIs that might need additional help in this area.

      Note that Dialect.do_begin() is not called unless a Transaction object is in use. The Dialect.do_autocommit() hook is provided for DBAPIs that need some extra commands emitted after a commit in order to enter the next transaction, when the SQLAlchemy Connection is used in it’s default “autocommit” mode.

      Parameters:dbapi_connection – a DBAPI connection, typically proxied within a ConnectionFairy.
      do_begin_twophase(connection, xid)

      Begin a two phase transaction on the given connection.

      Parameters:
      do_close(dbapi_connection)

      Provide an implementation of connection.close(), given a DBAPI connection.

      This hook is called by the Pool when a connection has been detached from the pool, or is being returned beyond the normal capacity of the pool.

      New in version 0.8.

      do_commit(dbapi_connection)

      Provide an implementation of connection.commit(), given a DB-API connection.

      Parameters:dbapi_connection – a DBAPI connection, typically proxied within a ConnectionFairy.
      do_commit_twophase(connection, xid, is_prepared=True, recover=False)

      Commit a two phase transaction on the given connection.

      Parameters:
      do_execute(cursor, statement, parameters, context=None)

      Provide an implementation of cursor.execute(statement, parameters).

      do_execute_no_params(cursor, statement, parameters, context=None)

      Provide an implementation of cursor.execute(statement).

      The parameter collection should not be sent.

      do_executemany(cursor, statement, parameters, context=None)

      Provide an implementation of cursor.executemany(statement, parameters).

      do_prepare_twophase(connection, xid)

      Prepare a two phase transaction on the given connection.

      Parameters:
      do_recover_twophase(connection)

      Recover list of uncommited prepared two phase transaction identifiers on the given connection.

      Parameters:connection – a Connection.
      do_release_savepoint(connection, name)

      Release the named savepoint on a connection.

      Parameters:
      • connection – a Connection.
      • name – savepoint name.
      do_rollback(dbapi_connection)

      Provide an implementation of connection.rollback(), given a DB-API connection.

      Parameters:dbapi_connection – a DBAPI connection, typically proxied within a ConnectionFairy.
      do_rollback_to_savepoint(connection, name)

      Rollback a connection to the named savepoint.

      Parameters:
      • connection – a Connection.
      • name – savepoint name.
      do_rollback_twophase(connection, xid, is_prepared=True, recover=False)

      Rollback a two phase transaction on the given connection.

      Parameters:
      do_savepoint(connection, name)

      Create a savepoint with the given name.

      Parameters:
      • connection – a Connection.
      • name – savepoint name.
      get_columns(connection, table_name, schema=None, **kw)

      Return information about columns in table_name.

      Given a Connection, a string table_name, and an optional string schema, return column information as a list of dictionaries with these keys:

      name
      the column’s name
      type
      [sqlalchemy.types#TypeEngine]
      nullable
      boolean
      default
      the column’s default value
      autoincrement
      boolean
      sequence
      a dictionary of the form
      {‘name’ : str, ‘start’ :int, ‘increment’: int}

      Additional column attributes may be present.

      get_foreign_keys(connection, table_name, schema=None, **kw)

      Return information about foreign_keys in table_name.

      Given a Connection, a string table_name, and an optional string schema, return foreign key information as a list of dicts with these keys:

      name
      the constraint’s name
      constrained_columns
      a list of column names that make up the foreign key
      referred_schema
      the name of the referred schema
      referred_table
      the name of the referred table
      referred_columns
      a list of column names in the referred table that correspond to constrained_columns
      get_indexes(connection, table_name, schema=None, **kw)

      Return information about indexes in table_name.

      Given a Connection, a string table_name and an optional string schema, return index information as a list of dictionaries with these keys:

      name
      the index’s name
      column_names
      list of column names in order
      unique
      boolean
      get_isolation_level(dbapi_conn)

      Given a DBAPI connection, return its isolation level.

      get_pk_constraint(connection, table_name, schema=None, **kw)

      Return information about the primary key constraint on table_name`.

      Given a Connection, a string table_name, and an optional string schema, return primary key information as a dictionary with these keys:

      constrained_columns
      a list of column names that make up the primary key
      name
      optional name of the primary key constraint.
      get_primary_keys(connection, table_name, schema=None, **kw)

      Return information about primary keys in table_name.

      Deprecated. This method is only called by the default implementation of Dialect.get_pk_constraint(). Dialects should instead implement this method directly.

      get_table_names(connection, schema=None, **kw)

      Return a list of table names for schema.

      get_unique_constraints(table_name, schema=None, **kw)

      Return information about unique constraints in table_name.

      Given a string table_name and an optional string schema, return unique constraint information as a list of dicts with these keys:

      name
      the unique constraint’s name
      column_names
      list of column names in order
      get_view_definition(connection, view_name, schema=None, **kw)

      Return view definition.

      Given a Connection, a string view_name, and an optional string schema, return the view definition.

      get_view_names(connection, schema=None, **kw)

      Return a list of all view names available in the database.

      schema:
      Optional, retrieve names from a non-default schema.
      has_sequence(connection, sequence_name, schema=None)

      Check the existence of a particular sequence in the database.

      Given a Connection object and a string sequence_name, return True if the given sequence exists in the database, False otherwise.

      has_table(connection, table_name, schema=None)

      Check the existence of a particular table in the database.

      Given a Connection object and a string table_name, return True if the given table (possibly within the specified schema) exists in the database, False otherwise.

      initialize(connection)

      Called during strategized creation of the dialect with a connection.

      Allows dialects to configure options based on server version info or other properties.

      The connection passed here is a SQLAlchemy Connection object, with full capabilities.

      The initalize() method of the base dialect should be called via super().

      is_disconnect(e, connection, cursor)

      Return True if the given DB-API error indicates an invalid connection

      normalize_name(name)

      convert the given name to lowercase if it is detected as case insensitive.

      this method is only used if the dialect defines requires_name_normalize=True.

      reflecttable(connection, table, include_columns=None)

      Load table description from the database.

      Given a Connection and a Table object, reflect its columns and properties from the database. If include_columns (a list or set) is specified, limit the autoload to the given column names.

      The default implementation uses the Inspector interface to provide the output, building upon the granular table/column/ constraint etc. methods of Dialect.

      reset_isolation_level(dbapi_conn)

      Given a DBAPI connection, revert its isolation to the default.

      set_isolation_level(dbapi_conn, level)

      Given a DBAPI connection, set its isolation level.

      classmethod type_descriptor(typeobj)

      Transform a generic type to a dialect-specific type.

      Dialect classes will usually use the types.adapt_type() function in the types module to accomplish this.

      The returned result is cached per dialect class so can contain no dialect-instance state.

      class sqlalchemy.engine.default.DefaultExecutionContext

      Bases: sqlalchemy.engine.interfaces.ExecutionContext

      get_lastrowid()

      return self.cursor.lastrowid, or equivalent, after an INSERT.

      This may involve calling special cursor functions, issuing a new SELECT on the cursor (or a new one), or returning a stored value that was calculated within post_exec().

      This function will only be called for dialects which support “implicit” primary key generation, keep preexecute_autoincrement_sequences set to False, and when no explicit id value was bound to the statement.

      The function is called once, directly after post_exec() and before the transaction is committed or ResultProxy is generated. If the post_exec() method assigns a value to self._lastrowid, the value is used in place of calling get_lastrowid().

      Note that this method is not equivalent to the lastrowid method on ResultProxy, which is a direct proxy to the DBAPI lastrowid accessor in all cases.

      get_result_processor(type_, colname, coltype)

      Return a ‘result processor’ for a given type as present in cursor.description.

      This has a default implementation that dialects can override for context-sensitive result type handling.

      set_input_sizes(translate=None, exclude_types=None)

      Given a cursor and ClauseParameters, call the appropriate style of setinputsizes() on the cursor, using DB-API types from the bind parameter’s TypeEngine objects.

      This method only called by those dialects which require it, currently cx_oracle.

      class sqlalchemy.engine.interfaces.ExecutionContext

      A messenger object for a Dialect that corresponds to a single execution.

      ExecutionContext should have these data members:

      connection
      Connection object which can be freely used by default value generators to execute SQL. This Connection should reference the same underlying connection/transactional resources of root_connection.
      root_connection
      Connection object which is the source of this ExecutionContext. This Connection may have close_with_result=True set, in which case it can only be used once.
      dialect
      dialect which created this ExecutionContext.
      cursor
      DB-API cursor procured from the connection,
      compiled
      if passed to constructor, sqlalchemy.engine.base.Compiled object being executed,
      statement
      string version of the statement to be executed. Is either passed to the constructor, or must be created from the sql.Compiled object by the time pre_exec() has completed.
      parameters
      bind parameters passed to the execute() method. For compiled statements, this is a dictionary or list of dictionaries. For textual statements, it should be in a format suitable for the dialect’s paramstyle (i.e. dict or list of dicts for non positional, list or list of lists/tuples for positional).
      isinsert
      True if the statement is an INSERT.
      isupdate
      True if the statement is an UPDATE.
      should_autocommit
      True if the statement is a “committable” statement.
      prefetch_cols
      a list of Column objects for which a client-side default was fired off. Applies to inserts and updates.
      postfetch_cols
      a list of Column objects for which a server-side default or inline SQL expression value was fired off. Applies to inserts and updates.
      create_cursor()

      Return a new cursor generated from this ExecutionContext’s connection.

      Some dialects may wish to change the behavior of connection.cursor(), such as postgresql which may return a PG “server side” cursor.

      get_rowcount()

      Return the DBAPI cursor.rowcount value, or in some cases an interpreted value.

      See ResultProxy.rowcount for details on this.

      handle_dbapi_exception(e)

      Receive a DBAPI exception which occurred upon execute, result fetch, etc.

      lastrow_has_defaults()

      Return True if the last INSERT or UPDATE row contained inlined or database-side defaults.

      post_exec()

      Called after the execution of a compiled statement.

      If a compiled statement was passed to this ExecutionContext, the last_insert_ids, last_inserted_params, etc. datamembers should be available after this method completes.

      pre_exec()

      Called before an execution of a compiled statement.

      If a compiled statement was passed to this ExecutionContext, the statement and parameters datamembers must be initialized after this statement is complete.

      result()

      Return a result object corresponding to this ExecutionContext.

      Returns a ResultProxy.

      should_autocommit_text(statement)

      Parse the given textual statement and return True if it refers to a “committable” statement

      class sqlalchemy.sql.compiler.IdentifierPreparer(dialect, initial_quote='"', final_quote=None, escape_quote='"', omit_schema=False)

      Handle quoting and case-folding of identifiers based on options.

      __init__(dialect, initial_quote='"', final_quote=None, escape_quote='"', omit_schema=False)

      Construct a new IdentifierPreparer object.

      initial_quote
      Character that begins a delimited identifier.
      final_quote
      Character that ends a delimited identifier. Defaults to initial_quote.
      omit_schema
      Prevent prepending schema name. Useful for databases that do not support schemae.
      format_column(column, use_table=False, name=None, table_name=None)

      Prepare a quoted column name.

      format_schema(name, quote)

      Prepare a quoted schema name.

      format_table(table, use_schema=True, name=None)

      Prepare a quoted table and schema name.

      format_table_seq(table, use_schema=True)

      Format table name and schema as a tuple.

      quote_identifier(value)

      Quote an identifier.

      Subclasses should override this to provide database-dependent quoting behavior.

      quote_schema(schema, force)

      Quote a schema.

      Subclasses should override this to provide database-dependent quoting behavior.

      unformat_identifiers(identifiers)

      Unpack ‘schema.table.column’-like strings into components.

      class sqlalchemy.sql.compiler.SQLCompiler(dialect, statement, column_keys=None, inline=False, **kwargs)

      Bases: sqlalchemy.engine.interfaces.Compiled

      Default implementation of Compiled.

      Compiles ClauseElements into SQL strings. Uses a similar visit paradigm as visitors.ClauseVisitor but implements its own traversal.

      __init__(dialect, statement, column_keys=None, inline=False, **kwargs)

      Construct a new DefaultCompiler object.

      dialect
      Dialect to be used
      statement
      ClauseElement to be compiled
      column_keys
      a list of column names to be compiled into an INSERT or UPDATE statement.
      ansi_bind_rules = False

      SQL 92 doesn’t allow bind parameters to be used in the columns clause of a SELECT, nor does it allow ambiguous expressions like ”? = ?”. A compiler subclass can set this flag to False if the target driver/DB enforces this

      construct_params(params=None, _group_number=None, _check=True)

      return a dictionary of bind parameter keys and values

      default_from()

      Called when a SELECT statement has no froms, and no FROM clause is to be appended.

      Gives Oracle a chance to tack on a FROM DUAL to the string output.

      escape_literal_column(text)

      provide escaping for the literal_column() construct.

      get_select_precolumns(select)

      Called when building a SELECT statement, position is just before column list.

      isdelete = False

      class-level defaults which can be set at the instance level to define if this Compiled instance represents INSERT/UPDATE/DELETE

      isinsert = False

      class-level defaults which can be set at the instance level to define if this Compiled instance represents INSERT/UPDATE/DELETE

      isupdate = False

      class-level defaults which can be set at the instance level to define if this Compiled instance represents INSERT/UPDATE/DELETE

      params

      Return the bind param dictionary embedded into this compiled object, for those values that are present.

      render_literal_value(value, type_)

      Render the value of a bind parameter as a quoted literal.

      This is used for statement sections that do not accept bind parameters on the target driver/database.

      This should be implemented by subclasses using the quoting services of the DBAPI.

      render_table_with_column_in_update_from = False

      set to True classwide to indicate the SET clause in a multi-table UPDATE statement should qualify columns with the table name (i.e. MySQL only)

      returning = None

      holds the “returning” collection of columns if the statement is CRUD and defines returning columns either implicitly or explicitly

      returning_precedes_values = False

      set to True classwide to generate RETURNING clauses before the VALUES or WHERE clause (i.e. MSSQL)

      update_from_clause(update_stmt, from_table, extra_froms, from_hints, **kw)

      Provide a hook to override the generation of an UPDATE..FROM clause.

      MySQL and MSSQL override this.

      update_limit_clause(update_stmt)

      Provide a hook for MySQL to add LIMIT to the UPDATE

      update_tables_clause(update_stmt, from_table, extra_froms, **kw)

      Provide a hook to override the initial table clause in an UPDATE statement.

      MySQL overrides this.

      SQLAlchemy-0.8.4/doc/core/metadata.html0000644000076500000240000065237712251147470020423 0ustar classicstaff00000000000000 Describing Databases with MetaData — SQLAlchemy 0.8 Documentation

      SQLAlchemy 0.8 Documentation

      Release: 0.8.4 | Release Date: December 8, 2013

      Describing Databases with MetaData

      This section discusses the fundamental Table, Column and MetaData objects.

      A collection of metadata entities is stored in an object aptly named MetaData:

      from sqlalchemy import *
      
      metadata = MetaData()

      MetaData is a container object that keeps together many different features of a database (or multiple databases) being described.

      To represent a table, use the Table class. Its two primary arguments are the table name, then the MetaData object which it will be associated with. The remaining positional arguments are mostly Column objects describing each column:

      user = Table('user', metadata,
          Column('user_id', Integer, primary_key = True),
          Column('user_name', String(16), nullable = False),
          Column('email_address', String(60)),
          Column('password', String(20), nullable = False)
      )

      Above, a table called user is described, which contains four columns. The primary key of the table consists of the user_id column. Multiple columns may be assigned the primary_key=True flag which denotes a multi-column primary key, known as a composite primary key.

      Note also that each column describes its datatype using objects corresponding to genericized types, such as Integer and String. SQLAlchemy features dozens of types of varying levels of specificity as well as the ability to create custom types. Documentation on the type system can be found at types.

      Accessing Tables and Columns

      The MetaData object contains all of the schema constructs we’ve associated with it. It supports a few methods of accessing these table objects, such as the sorted_tables accessor which returns a list of each Table object in order of foreign key dependency (that is, each table is preceded by all tables which it references):

      >>> for t in metadata.sorted_tables:
      ...    print t.name
      user
      user_preference
      invoice
      invoice_item

      In most cases, individual Table objects have been explicitly declared, and these objects are typically accessed directly as module-level variables in an application. Once a Table has been defined, it has a full set of accessors which allow inspection of its properties. Given the following Table definition:

      employees = Table('employees', metadata,
          Column('employee_id', Integer, primary_key=True),
          Column('employee_name', String(60), nullable=False),
          Column('employee_dept', Integer, ForeignKey("departments.department_id"))
      )

      Note the ForeignKey object used in this table - this construct defines a reference to a remote table, and is fully described in metadata_foreignkeys. Methods of accessing information about this table include:

      # access the column "EMPLOYEE_ID":
      employees.columns.employee_id
      
      # or just
      employees.c.employee_id
      
      # via string
      employees.c['employee_id']
      
      # iterate through all columns
      for c in employees.c:
          print c
      
      # get the table's primary key columns
      for primary_key in employees.primary_key:
          print primary_key
      
      # get the table's foreign key objects:
      for fkey in employees.foreign_keys:
          print fkey
      
      # access the table's MetaData:
      employees.metadata
      
      # access the table's bound Engine or Connection, if its MetaData is bound:
      employees.bind
      
      # access a column's name, type, nullable, primary key, foreign key
      employees.c.employee_id.name
      employees.c.employee_id.type
      employees.c.employee_id.nullable
      employees.c.employee_id.primary_key
      employees.c.employee_dept.foreign_keys
      
      # get the "key" of a column, which defaults to its name, but can
      # be any user-defined string:
      employees.c.employee_name.key
      
      # access a column's table:
      employees.c.employee_id.table is employees
      
      # get the table related by a foreign key
      list(employees.c.employee_dept.foreign_keys)[0].column.table

      Creating and Dropping Database Tables

      Once you’ve defined some Table objects, assuming you’re working with a brand new database one thing you might want to do is issue CREATE statements for those tables and their related constructs (as an aside, it’s also quite possible that you don’t want to do this, if you already have some preferred methodology such as tools included with your database or an existing scripting system - if that’s the case, feel free to skip this section - SQLAlchemy has no requirement that it be used to create your tables).

      The usual way to issue CREATE is to use create_all() on the MetaData object. This method will issue queries that first check for the existence of each individual table, and if not found will issue the CREATE statements:

      engine = create_engine('sqlite:///:memory:')
      
      metadata = MetaData()
      
      user = Table('user', metadata,
          Column('user_id', Integer, primary_key = True),
          Column('user_name', String(16), nullable = False),
          Column('email_address', String(60), key='email'),
          Column('password', String(20), nullable = False)
      )
      
      user_prefs = Table('user_prefs', metadata,
          Column('pref_id', Integer, primary_key=True),
          Column('user_id', Integer, ForeignKey("user.user_id"), nullable=False),
          Column('pref_name', String(40), nullable=False),
          Column('pref_value', String(100))
      )
      
      sqlmetadata.create_all(engine)
      

      create_all() creates foreign key constraints between tables usually inline with the table definition itself, and for this reason it also generates the tables in order of their dependency. There are options to change this behavior such that ALTER TABLE is used instead.

      Dropping all tables is similarly achieved using the drop_all() method. This method does the exact opposite of create_all() - the presence of each table is checked first, and tables are dropped in reverse order of dependency.

      Creating and dropping individual tables can be done via the create() and drop() methods of Table. These methods by default issue the CREATE or DROP regardless of the table being present:

      engine = create_engine('sqlite:///:memory:')
      
      meta = MetaData()
      
      employees = Table('employees', meta,
          Column('employee_id', Integer, primary_key=True),
          Column('employee_name', String(60), nullable=False, key='name'),
          Column('employee_dept', Integer, ForeignKey("departments.department_id"))
      )
      sqlemployees.create(engine)
      

      drop() method:

      sqlemployees.drop(engine)
      

      To enable the “check first for the table existing” logic, add the checkfirst=True argument to create() or drop():

      employees.create(engine, checkfirst=True)
      employees.drop(engine, checkfirst=False)

      Altering Schemas through Migrations

      While SQLAlchemy directly supports emitting CREATE and DROP statements for schema constructs, the ability to alter those constructs, usually via the ALTER statement as well as other database-specific constructs, is outside of the scope of SQLAlchemy itself. While it’s easy enough to emit ALTER statements and similar by hand, such as by passing a string to Connection.execute() or by using the DDL construct, it’s a common practice to automate the maintenance of database schemas in relation to application code using schema migration tools.

      There are two major migration tools available for SQLAlchemy:

      • Alembic - Written by the author of SQLAlchemy, Alembic features a highly customizable environment and a minimalistic usage pattern, supporting such features as transactional DDL, automatic generation of “candidate” migrations, an “offline” mode which generates SQL scripts, and support for branch resolution.
      • SQLAlchemy-Migrate - The original migration tool for SQLAlchemy, SQLAlchemy-Migrate is widely used and continues under active development. SQLAlchemy-Migrate includes features such as SQL script generation, ORM class generation, ORM model comparison, and extensive support for SQLite migrations.

      Specifying the Schema Name

      Some databases support the concept of multiple schemas. A Table can reference this by specifying the schema keyword argument:

      financial_info = Table('financial_info', meta,
          Column('id', Integer, primary_key=True),
          Column('value', String(100), nullable=False),
          schema='remote_banks'
      )

      Within the MetaData collection, this table will be identified by the combination of financial_info and remote_banks. If another table called financial_info is referenced without the remote_banks schema, it will refer to a different Table. ForeignKey objects can specify references to columns in this table using the form remote_banks.financial_info.id.

      The schema argument should be used for any name qualifiers required, including Oracle’s “owner” attribute and similar. It also can accommodate a dotted name for longer schemes:

      schema="dbo.scott"

      Backend-Specific Options

      Table supports database-specific options. For example, MySQL has different table backend types, including “MyISAM” and “InnoDB”. This can be expressed with Table using mysql_engine:

      addresses = Table('engine_email_addresses', meta,
          Column('address_id', Integer, primary_key = True),
          Column('remote_user_id', Integer, ForeignKey(users.c.user_id)),
          Column('email_address', String(20)),
          mysql_engine='InnoDB'
      )

      Other backends may support table-level options as well - these would be described in the individual documentation sections for each dialect.

      Column, Table, MetaData API

      class sqlalchemy.schema.Column(*args, **kwargs)

      Bases: sqlalchemy.schema.SchemaItem, sqlalchemy.sql.expression.ColumnClause

      Represents a column in a database table.

      __eq__(other)
      inherited from the __eq__() method of ColumnOperators

      Implement the == operator.

      In a column context, produces the clause a = b. If the target is None, produces a IS NULL.

      __init__(*args, **kwargs)

      Construct a new Column object.

      Parameters:
      • name

        The name of this column as represented in the database. This argument may be the first positional argument, or specified via keyword.

        Names which contain no upper case characters will be treated as case insensitive names, and will not be quoted unless they are a reserved word. Names with any number of upper case characters will be quoted and sent exactly. Note that this behavior applies even for databases which standardize upper case names as case insensitive such as Oracle.

        The name field may be omitted at construction time and applied later, at any time before the Column is associated with a Table. This is to support convenient usage within the declarative extension.

      • type_

        The column’s type, indicated using an instance which subclasses TypeEngine. If no arguments are required for the type, the class of the type can be sent as well, e.g.:

        # use a type with arguments
        Column('data', String(50))
        
        # use no arguments
        Column('level', Integer)

        The type argument may be the second positional argument or specified by keyword.

        There is partial support for automatic detection of the type based on that of a ForeignKey associated with this column, if the type is specified as None. However, this feature is not fully implemented and may not function in all cases.

      • *args – Additional positional arguments include various SchemaItem derived constructs which will be applied as options to the column. These include instances of Constraint, ForeignKey, ColumnDefault, and Sequence. In some cases an equivalent keyword argument is available such as server_default, default and unique.
      • autoincrement

        This flag may be set to False to indicate an integer primary key column that should not be considered to be the “autoincrement” column, that is the integer primary key column which generates values implicitly upon INSERT and whose value is usually returned via the DBAPI cursor.lastrowid attribute. It defaults to True to satisfy the common use case of a table with a single integer primary key column. If the table has a composite primary key consisting of more than one integer column, set this flag to True only on the column that should be considered “autoincrement”.

        The setting only has an effect for columns which are:

        • Integer derived (i.e. INT, SMALLINT, BIGINT).
        • Part of the primary key
        • Are not referenced by any foreign keys, unless the value is specified as 'ignore_fk'

          New in version 0.7.4.

        • have no server side or client side defaults (with the exception of Postgresql SERIAL).

        The setting has these two effects on columns that meet the above criteria:

        • DDL issued for the column will include database-specific keywords intended to signify this column as an “autoincrement” column, such as AUTO INCREMENT on MySQL, SERIAL on Postgresql, and IDENTITY on MS-SQL. It does not issue AUTOINCREMENT for SQLite since this is a special SQLite flag that is not required for autoincrementing behavior. See the SQLite dialect documentation for information on SQLite’s AUTOINCREMENT.
        • The column will be considered to be available as cursor.lastrowid or equivalent, for those dialects which “post fetch” newly inserted identifiers after a row has been inserted (SQLite, MySQL, MS-SQL). It does not have any effect in this regard for databases that use sequences to generate primary key identifiers (i.e. Firebird, Postgresql, Oracle).

        Changed in version 0.7.4: autoincrement accepts a special value 'ignore_fk' to indicate that autoincrementing status regardless of foreign key references. This applies to certain composite foreign key setups, such as the one demonstrated in the ORM documentation at Rows that point to themselves / Mutually Dependent Rows.

      • default

        A scalar, Python callable, or ColumnElement expression representing the default value for this column, which will be invoked upon insert if this column is otherwise not specified in the VALUES clause of the insert. This is a shortcut to using ColumnDefault as a positional argument; see that class for full detail on the structure of the argument.

        Contrast this argument to server_default which creates a default generator on the database side.

      • doc – optional String that can be used by the ORM or similar to document attributes. This attribute does not render SQL comments (a future attribute ‘comment’ will achieve that).
      • key – An optional string identifier which will identify this Column object on the Table. When a key is provided, this is the only identifier referencing the Column within the application, including ORM attribute mapping; the name field is used only when rendering SQL.
      • index – When True, indicates that the column is indexed. This is a shortcut for using a Index construct on the table. To specify indexes with explicit names or indexes that contain multiple columns, use the Index construct instead.
      • info – Optional data dictionary which will be populated into the SchemaItem.info attribute of this object.
      • nullable – If set to the default of True, indicates the column will be rendered as allowing NULL, else it’s rendered as NOT NULL. This parameter is only used when issuing CREATE TABLE statements.
      • onupdate – A scalar, Python callable, or ClauseElement representing a default value to be applied to the column within UPDATE statements, which wil be invoked upon update if this column is not present in the SET clause of the update. This is a shortcut to using ColumnDefault as a positional argument with for_update=True.
      • primary_key – If True, marks this column as a primary key column. Multiple columns can have this flag set to specify composite primary keys. As an alternative, the primary key of a Table can be specified via an explicit PrimaryKeyConstraint object.
      • server_default

        A FetchedValue instance, str, Unicode or text() construct representing the DDL DEFAULT value for the column.

        String types will be emitted as-is, surrounded by single quotes:

        Column('x', Text, server_default="val")
        
        x TEXT DEFAULT 'val'

        A text() expression will be rendered as-is, without quotes:

        Column('y', DateTime, server_default=text('NOW()'))
        
        y DATETIME DEFAULT NOW()

        Strings and text() will be converted into a DefaultClause object upon initialization.

        Use FetchedValue to indicate that an already-existing column will generate a default value on the database side which will be available to SQLAlchemy for post-fetch after inserts. This construct does not specify any DDL and the implementation is left to the database, such as via a trigger.

      • server_onupdate – A FetchedValue instance representing a database-side default generation function. This indicates to SQLAlchemy that a newly generated value will be available after updates. This construct does not specify any DDL and the implementation is left to the database, such as via a trigger.
      • quote – Force quoting of this column’s name on or off, corresponding to True or False. When left at its default of None, the column identifier will be quoted according to whether the name is case sensitive (identifiers with at least one upper case character are treated as case sensitive), or if it’s a reserved word. This flag is only needed to force quoting of a reserved word which is not known by the SQLAlchemy dialect.
      • unique – When True, indicates that this column contains a unique constraint, or if index is True as well, indicates that the Index should be created with the unique flag. To specify multiple columns in the constraint/index or to specify an explicit name, use the UniqueConstraint or Index constructs explicitly.
      • system

        When True, indicates this is a “system” column, that is a column which is automatically made available by the database, and should not be included in the columns list for a CREATE TABLE statement.

        For more elaborate scenarios where columns should be conditionally rendered differently on different backends, consider custom compilation rules for CreateColumn.

        ..versionadded:: 0.8.3 Added the system=True parameter to
        Column.
      __le__(other)
      inherited from the __le__() method of ColumnOperators

      Implement the <= operator.

      In a column context, produces the clause a <= b.

      __lt__(other)
      inherited from the __lt__() method of ColumnOperators

      Implement the < operator.

      In a column context, produces the clause a < b.

      __ne__(other)
      inherited from the __ne__() method of ColumnOperators

      Implement the != operator.

      In a column context, produces the clause a != b. If the target is None, produces a IS NOT NULL.

      anon_label
      inherited from the anon_label attribute of ColumnElement

      provides a constant ‘anonymous label’ for this ColumnElement.

      This is a label() expression which will be named at compile time. The same label() is returned each time anon_label is called so that expressions can reference anon_label multiple times, producing the same label name at compile time.

      the compiler uses this function automatically at compile time for expressions that are known to be ‘unnamed’ like binary expressions and function calls.

      append_foreign_key(fk)
      asc()
      inherited from the asc() method of ColumnOperators

      Produce a asc() clause against the parent object.

      base_columns
      inherited from the base_columns attribute of ColumnElement
      between(cleft, cright)
      inherited from the between() method of ColumnOperators

      Produce a between() clause against the parent object, given the lower and upper range.

      bind = None
      collate(collation)
      inherited from the collate() method of ColumnOperators

      Produce a collate() clause against the parent object, given the collation string.

      comparator
      inherited from the comparator attribute of ColumnElement
      compare(other, use_proxies=False, equivalents=None, **kw)
      inherited from the compare() method of ColumnElement

      Compare this ColumnElement to another.

      Special arguments understood:

      Parameters:
      • use_proxies – when True, consider two columns that share a common base column as equivalent (i.e. shares_lineage())
      • equivalents – a dictionary of columns as keys mapped to sets of columns. If the given “other” column is present in this dictionary, if any of the columns in the corresponding set() pass the comparison test, the result is True. This is used to expand the comparison to other columns that may be known to be equivalent to this one via foreign key or other criterion.
      compile(bind=None, dialect=None, **kw)
      inherited from the compile() method of ClauseElement

      Compile this SQL expression.

      The return value is a Compiled object. Calling str() or unicode() on the returned value will yield a string representation of the result. The Compiled object also can return a dictionary of bind parameter names and values using the params accessor.

      Parameters:
      • bind – An Engine or Connection from which a Compiled will be acquired. This argument takes precedence over this ClauseElement‘s bound engine, if any.
      • column_keys – Used for INSERT and UPDATE statements, a list of column names which should be present in the VALUES clause of the compiled statement. If None, all columns from the target table object are rendered.
      • dialect – A Dialect instance from which a Compiled will be acquired. This argument takes precedence over the bind argument as well as this ClauseElement‘s bound engine, if any.
      • inline – Used for INSERT statements, for a dialect which does not support inline retrieval of newly generated primary key columns, will force the expression used to create the new primary key value to be rendered inline within the INSERT statement’s VALUES clause. This typically refers to Sequence execution but may also refer to any server-side default generation function associated with a primary key Column.
      concat(other)
      inherited from the concat() method of ColumnOperators

      Implement the ‘concat’ operator.

      In a column context, produces the clause a || b, or uses the concat() operator on MySQL.

      contains(other, **kwargs)
      inherited from the contains() method of ColumnOperators

      Implement the ‘contains’ operator.

      In a column context, produces the clause LIKE '%<other>%'

      copy(**kw)

      Create a copy of this Column, unitialized.

      This is used in Table.tometadata.

      default = None
      desc()
      inherited from the desc() method of ColumnOperators

      Produce a desc() clause against the parent object.

      description
      inherited from the description attribute of ColumnClause
      dispatch

      alias of DDLEventsDispatch

      distinct()
      inherited from the distinct() method of ColumnOperators

      Produce a distinct() clause against the parent object.

      endswith(other, **kwargs)
      inherited from the endswith() method of ColumnOperators

      Implement the ‘endswith’ operator.

      In a column context, produces the clause LIKE '%<other>'

      expression
      inherited from the expression attribute of ColumnElement

      Return a column expression.

      Part of the inspection interface; returns self.

      foreign_keys = []
      get_children(schema_visitor=False, **kwargs)
      ilike(other, escape=None)
      inherited from the ilike() method of ColumnOperators

      Implement the ilike operator.

      In a column context, produces the clause a ILIKE other.

      E.g.:

      select([sometable]).where(sometable.c.column.ilike("%foobar%"))
      Parameters:
      • other – expression to be compared
      • escape

        optional escape character, renders the ESCAPE keyword, e.g.:

        somecolumn.ilike("foo/%bar", escape="/")
      in_(other)
      inherited from the in_() method of ColumnOperators

      Implement the in operator.

      In a column context, produces the clause a IN other. “other” may be a tuple/list of column expressions, or a select() construct.

      info
      inherited from the info attribute of SchemaItem

      Info dictionary associated with the object, allowing user-defined data to be associated with this SchemaItem.

      The dictionary is automatically generated when first accessed. It can also be specified in the constructor of some objects, such as Table and Column.

      is_(other)
      inherited from the is_() method of ColumnOperators

      Implement the IS operator.

      Normally, IS is generated automatically when comparing to a value of None, which resolves to NULL. However, explicit usage of IS may be desirable if comparing to boolean values on certain platforms.

      New in version 0.7.9.

      is_clause_element = True
      is_selectable = False
      isnot(other)
      inherited from the isnot() method of ColumnOperators

      Implement the IS NOT operator.

      Normally, IS NOT is generated automatically when comparing to a value of None, which resolves to NULL. However, explicit usage of IS NOT may be desirable if comparing to boolean values on certain platforms.

      New in version 0.7.9.

      label(name)
      inherited from the label() method of ColumnElement

      Produce a column label, i.e. <columnname> AS <name>.

      This is a shortcut to the label() function.

      if ‘name’ is None, an anonymous label name will be generated.

      like(other, escape=None)
      inherited from the like() method of ColumnOperators

      Implement the like operator.

      In a column context, produces the clause a LIKE other.

      E.g.:

      select([sometable]).where(sometable.c.column.like("%foobar%"))
      Parameters:
      • other – expression to be compared
      • escape

        optional escape character, renders the ESCAPE keyword, e.g.:

        somecolumn.like("foo/%bar", escape="/")
      match(other, **kwargs)
      inherited from the match() method of ColumnOperators

      Implements the ‘match’ operator.

      In a column context, this produces a MATCH clause, i.e. MATCH '<other>'. The allowed contents of other are database backend specific.

      notilike(other, escape=None)
      inherited from the notilike() method of ColumnOperators

      implement the NOT ILIKE operator.

      This is equivalent to using negation with ColumnOperators.ilike(), i.e. ~x.ilike(y).

      New in version 0.8.

      notin_(other)
      inherited from the notin_() method of ColumnOperators

      implement the NOT IN operator.

      This is equivalent to using negation with ColumnOperators.in_(), i.e. ~x.in_(y).

      New in version 0.8.

      notlike(other, escape=None)
      inherited from the notlike() method of ColumnOperators

      implement the NOT LIKE operator.

      This is equivalent to using negation with ColumnOperators.like(), i.e. ~x.like(y).

      New in version 0.8.

      nullsfirst()
      inherited from the nullsfirst() method of ColumnOperators

      Produce a nullsfirst() clause against the parent object.

      nullslast()
      inherited from the nullslast() method of ColumnOperators

      Produce a nullslast() clause against the parent object.

      onupdate = None
      op(opstring, precedence=0)
      inherited from the op() method of Operators

      produce a generic operator function.

      e.g.:

      somecolumn.op("*")(5)

      produces:

      somecolumn * 5

      This function can also be used to make bitwise operators explicit. For example:

      somecolumn.op('&')(0xff)

      is a bitwise AND of the value in somecolumn.

      Parameters:
      • operator – a string which will be output as the infix operator between this element and the expression passed to the generated function.
      • precedence

        precedence to apply to the operator, when parenthesizing expressions. A lower number will cause the expression to be parenthesized when applied against another operator with higher precedence. The default value of 0 is lower than all operators except for the comma (,) and AS operators. A value of 100 will be higher or equal to all operators, and -100 will be lower than or equal to all operators.

        New in version 0.8: - added the ‘precedence’ argument.

      operate(op, *other, **kwargs)
      inherited from the operate() method of ColumnElement
      params(*optionaldict, **kwargs)
      inherited from the params() method of Immutable
      primary_key = False
      proxy_set
      inherited from the proxy_set attribute of ColumnElement
      quote = None
      references(column)

      Return True if this Column references the given column via foreign key.

      reverse_operate(op, other, **kwargs)
      inherited from the reverse_operate() method of ColumnElement
      self_group(against=None)
      inherited from the self_group() method of ClauseElement

      Apply a ‘grouping’ to this ClauseElement.

      This method is overridden by subclasses to return a “grouping” construct, i.e. parenthesis. In particular it’s used by “binary” expressions to provide a grouping around themselves when placed into a larger expression, as well as by select() constructs when placed into the FROM clause of another select(). (Note that subqueries should be normally created using the Select.alias() method, as many platforms require nested SELECT statements to be named).

      As expressions are composed together, the application of self_group() is automatic - end-user code should never need to use this method directly. Note that SQLAlchemy’s clause constructs take operator precedence into account - so parenthesis might not be needed, for example, in an expression like x OR (y AND z) - AND takes precedence over OR.

      The base self_group() method of ClauseElement just returns self.

      server_default = None
      server_onupdate = None
      shares_lineage(othercolumn)
      inherited from the shares_lineage() method of ColumnElement

      Return True if the given ColumnElement has a common ancestor to this ColumnElement.

      startswith(other, **kwargs)
      inherited from the startswith() method of ColumnOperators

      Implement the startwith operator.

      In a column context, produces the clause LIKE '<other>%'

      supports_execution = False
      table
      inherited from the table attribute of ColumnClause
      timetuple = None
      type
      inherited from the type attribute of ColumnElement
      unique_params(*optionaldict, **kwargs)
      inherited from the unique_params() method of Immutable
      class sqlalchemy.schema.MetaData(bind=None, reflect=False, schema=None, quote_schema=None)

      Bases: sqlalchemy.schema.SchemaItem

      A collection of Table objects and their associated schema constructs.

      Holds a collection of Table objects as well as an optional binding to an Engine or Connection. If bound, the Table objects in the collection and their columns may participate in implicit SQL execution.

      The Table objects themselves are stored in the metadata.tables dictionary.

      The bind property may be assigned to dynamically. A common pattern is to start unbound and then bind later when an engine is available:

      metadata = MetaData()
      # define tables
      Table('mytable', metadata, ...)
      # connect to an engine later, perhaps after loading a URL from a
      # configuration file
      metadata.bind = an_engine

      MetaData is a thread-safe object after tables have been explicitly defined or loaded via reflection.

      See also

      Describing Databases with MetaData - Introduction to database metadata

      __init__(bind=None, reflect=False, schema=None, quote_schema=None)

      Create a new MetaData object.

      Parameters:
      • bind – An Engine or Connection to bind to. May also be a string or URL instance, these are passed to create_engine() and this MetaData will be bound to the resulting engine.
      • reflect

        Optional, automatically load all tables from the bound database. Defaults to False. bind is required when this option is set.

        Deprecated since version 0.8: Please use the MetaData.reflect() method.

      • schema – The default schema to use for the Table, Sequence, and other objects associated with this MetaData. Defaults to None.
      • quote_schema – Sets the quote_schema flag for those Table, Sequence, and other objects which make usage of the local schema name.

      New in version 0.7.4: schema and quote_schema parameters.

      append_ddl_listener(event_name, listener)

      Append a DDL event listener to this MetaData.

      Deprecated. See DDLEvents.

      bind

      An Engine or Connection to which this MetaData is bound.

      Typically, a Engine is assigned to this attribute so that “implicit execution” may be used, or alternatively as a means of providing engine binding information to an ORM Session object:

      engine = create_engine("someurl://")
      metadata.bind = engine

      See also

      Connectionless Execution, Implicit Execution - background on “bound metadata”

      clear()

      Clear all Table objects from this MetaData.

      create_all(bind=None, tables=None, checkfirst=True)

      Create all tables stored in this metadata.

      Conditional by default, will not attempt to recreate tables already present in the target database.

      Parameters:
      • bind – A Connectable used to access the database; if None, uses the existing bind on this MetaData, if any.
      • tables – Optional list of Table objects, which is a subset of the total tables in the MetaData (others are ignored).
      • checkfirst – Defaults to True, don’t issue CREATEs for tables already present in the target database.
      drop_all(bind=None, tables=None, checkfirst=True)

      Drop all tables stored in this metadata.

      Conditional by default, will not attempt to drop tables not present in the target database.

      Parameters:
      • bind – A Connectable used to access the database; if None, uses the existing bind on this MetaData, if any.
      • tables – Optional list of Table objects, which is a subset of the total tables in the MetaData (others are ignored).
      • checkfirst – Defaults to True, only issue DROPs for tables confirmed to be present in the target database.
      is_bound()

      True if this MetaData is bound to an Engine or Connection.

      reflect(bind=None, schema=None, views=False, only=None)

      Load all available table definitions from the database.

      Automatically creates Table entries in this MetaData for any table available in the database but not yet present in the MetaData. May be called multiple times to pick up tables recently added to the database, however no special action is taken if a table in this MetaData no longer exists in the database.

      Parameters:
      • bind – A Connectable used to access the database; if None, uses the existing bind on this MetaData, if any.
      • schema – Optional, query and reflect tables from an alterate schema. If None, the schema associated with this MetaData is used, if any.
      • views – If True, also reflect views.
      • only

        Optional. Load only a sub-set of available named tables. May be specified as a sequence of names or a callable.

        If a sequence of names is provided, only those tables will be reflected. An error is raised if a table is requested but not available. Named tables already present in this MetaData are ignored.

        If a callable is provided, it will be used as a boolean predicate to filter the list of potential table names. The callable is called with a table name and this MetaData instance as positional arguments and should return a true value for any table to reflect.

      remove(table)

      Remove the given Table object from this MetaData.

      sorted_tables

      Returns a list of Table objects sorted in order of foreign key dependency.

      The sorting will place Table objects that have dependencies first, before the dependencies themselves, representing the order in which they can be created. To get the order in which the tables would be dropped, use the reversed() Python built-in.

      See also

      Inspector.sorted_tables()

      class sqlalchemy.schema.SchemaItem

      Bases: sqlalchemy.events.SchemaEventTarget, sqlalchemy.sql.visitors.Visitable

      Base class for items that define a database schema.

      get_children(**kwargs)

      used to allow SchemaVisitor access

      info

      Info dictionary associated with the object, allowing user-defined data to be associated with this SchemaItem.

      The dictionary is automatically generated when first accessed. It can also be specified in the constructor of some objects, such as Table and Column.

      class sqlalchemy.schema.Table(*args, **kw)

      Bases: sqlalchemy.schema.SchemaItem, sqlalchemy.sql.expression.TableClause

      Represent a table in a database.

      e.g.:

      mytable = Table("mytable", metadata,
                      Column('mytable_id', Integer, primary_key=True),
                      Column('value', String(50))
                 )

      The Table object constructs a unique instance of itself based on its name and optional schema name within the given MetaData object. Calling the Table constructor with the same name and same MetaData argument a second time will return the same Table object - in this way the Table constructor acts as a registry function.

      See also

      Describing Databases with MetaData - Introduction to database metadata

      Constructor arguments are as follows:

      Parameters:
      • name

        The name of this table as represented in the database.

        This property, along with the schema, indicates the singleton identity of this table in relation to its parent MetaData. Additional calls to Table with the same name, metadata, and schema name will return the same Table object.

        Names which contain no upper case characters will be treated as case insensitive names, and will not be quoted unless they are a reserved word. Names with any number of upper case characters will be quoted and sent exactly. Note that this behavior applies even for databases which standardize upper case names as case insensitive such as Oracle.

      • metadata – a MetaData object which will contain this table. The metadata is used as a point of association of this table with other tables which are referenced via foreign key. It also may be used to associate this table with a particular Connectable.
      • *args – Additional positional arguments are used primarily to add the list of Column objects contained within this table. Similar to the style of a CREATE TABLE statement, other SchemaItem constructs may be added here, including PrimaryKeyConstraint, and ForeignKeyConstraint.
      • autoload – Defaults to False: the Columns for this table should be reflected from the database. Usually there will be no Column objects in the constructor if this property is set.
      • autoload_replace

        If True, when using autoload=True and extend_existing=True, replace Column objects already present in the Table that’s in the MetaData registry with what’s reflected. Otherwise, all existing columns will be excluded from the reflection process. Note that this does not impact Column objects specified in the same call to Table which includes autoload, those always take precedence. Defaults to True.

        New in version 0.7.5.

      • autoload_with – If autoload==True, this is an optional Engine or Connection instance to be used for the table reflection. If None, the underlying MetaData’s bound connectable will be used.
      • extend_existing

        When True, indicates that if this Table is already present in the given MetaData, apply further arguments within the constructor to the existing Table.

        If extend_existing or keep_existing are not set, an error is raised if additional table modifiers are specified when the given Table is already present in the MetaData.

        Changed in version 0.7.4: extend_existing will work in conjunction with autoload=True to run a new reflection operation against the database; new Column objects will be produced from database metadata to replace those existing with the same name, and additional Column objects not present in the Table will be added.

        As is always the case with autoload=True, Column objects can be specified in the same Table constructor, which will take precedence. I.e.:

        Table("mytable", metadata,
                    Column('y', Integer),
                    extend_existing=True,
                    autoload=True,
                    autoload_with=engine
                )

        The above will overwrite all columns within mytable which are present in the database, except for y which will be used as is from the above definition. If the autoload_replace flag is set to False, no existing columns will be replaced.

      • implicit_returning – True by default - indicates that RETURNING can be used by default to fetch newly inserted primary key values, for backends which support this. Note that create_engine() also provides an implicit_returning flag.
      • include_columns – A list of strings indicating a subset of columns to be loaded via the autoload operation; table columns who aren’t present in this list will not be represented on the resulting Table object. Defaults to None which indicates all columns should be reflected.
      • info – Optional data dictionary which will be populated into the SchemaItem.info attribute of this object.
      • keep_existing

        When True, indicates that if this Table is already present in the given MetaData, ignore further arguments within the constructor to the existing Table, and return the Table object as originally created. This is to allow a function that wishes to define a new Table on first call, but on subsequent calls will return the same Table, without any of the declarations (particularly constraints) being applied a second time. Also see extend_existing.

        If extend_existing or keep_existing are not set, an error is raised if additional table modifiers are specified when the given Table is already present in the MetaData.

      • listeners

        A list of tuples of the form (<eventname>, <fn>) which will be passed to event.listen() upon construction. This alternate hook to event.listen() allows the establishment of a listener function specific to this Table before the “autoload” process begins. Particularly useful for the DDLEvents.column_reflect() event:

        def listen_for_reflect(table, column_info):
            "handle the column reflection event"
            # ...
        
        t = Table(
            'sometable',
            autoload=True,
            listeners=[
                ('column_reflect', listen_for_reflect)
            ])
      • mustexist – When True, indicates that this Table must already be present in the given MetaData collection, else an exception is raised.
      • prefixes – A list of strings to insert after CREATE in the CREATE TABLE statement. They will be separated by spaces.
      • quote – Force quoting of this table’s name on or off, corresponding to True or False. When left at its default of None, the column identifier will be quoted according to whether the name is case sensitive (identifiers with at least one upper case character are treated as case sensitive), or if it’s a reserved word. This flag is only needed to force quoting of a reserved word which is not known by the SQLAlchemy dialect.
      • quote_schema – same as ‘quote’ but applies to the schema identifier.
      • schema – The schema name for this table, which is required if the table resides in a schema other than the default selected schema for the engine’s database connection. Defaults to None.
      • useexisting – Deprecated. Use extend_existing.
      __init__(*args, **kw)

      Constructor for Table.

      This method is a no-op. See the top-level documentation for Table for constructor arguments.

      add_is_dependent_on(table)

      Add a ‘dependency’ for this Table.

      This is another Table object which must be created first before this one can, or dropped after this one.

      Usually, dependencies between tables are determined via ForeignKey objects. However, for other situations that create dependencies outside of foreign keys (rules, inheriting), this method can manually establish such a link.

      alias(name=None)
      inherited from the alias() method of FromClause

      return an alias of this FromClause.

      This is shorthand for calling:

      from sqlalchemy import alias
      a = alias(self, name=name)

      See alias() for details.

      append_column(column)

      Append a Column to this Table.

      The “key” of the newly added Column, i.e. the value of its .key attribute, will then be available in the .c collection of this Table, and the column definition will be included in any CREATE TABLE, SELECT, UPDATE, etc. statements generated from this Table construct.

      Note that this does not change the definition of the table as it exists within any underlying database, assuming that table has already been created in the database. Relational databases support the addition of columns to existing tables using the SQL ALTER command, which would need to be emitted for an already-existing table that doesn’t contain the newly added column.

      append_constraint(constraint)

      Append a Constraint to this Table.

      This has the effect of the constraint being included in any future CREATE TABLE statement, assuming specific DDL creation events have not been associated with the given Constraint object.

      Note that this does not produce the constraint within the relational database automatically, for a table that already exists in the database. To add a constraint to an existing relational database table, the SQL ALTER command must be used. SQLAlchemy also provides the AddConstraint construct which can produce this SQL when invoked as an executable clause.

      append_ddl_listener(event_name, listener)

      Append a DDL event listener to this Table.

      Deprecated. See DDLEvents.

      bind

      Return the connectable associated with this Table.

      c
      inherited from the c attribute of FromClause

      An alias for the columns attribute.

      columns
      inherited from the columns attribute of FromClause

      A named-based collection of ColumnElement objects maintained by this FromClause.

      The columns, or c collection, is the gateway to the construction of SQL expressions using table-bound or other selectable-bound columns:

      select([mytable]).where(mytable.c.somecolumn == 5)
      compare(other, **kw)
      inherited from the compare() method of ClauseElement

      Compare this ClauseElement to the given ClauseElement.

      Subclasses should override the default behavior, which is a straight identity comparison.

      **kw are arguments consumed by subclass compare() methods and may be used to modify the criteria for comparison. (see ColumnElement)

      compile(bind=None, dialect=None, **kw)
      inherited from the compile() method of ClauseElement

      Compile this SQL expression.

      The return value is a Compiled object. Calling str() or unicode() on the returned value will yield a string representation of the result. The Compiled object also can return a dictionary of bind parameter names and values using the params accessor.

      Parameters:
      • bind – An Engine or Connection from which a Compiled will be acquired. This argument takes precedence over this ClauseElement‘s bound engine, if any.
      • column_keys – Used for INSERT and UPDATE statements, a list of column names which should be present in the VALUES clause of the compiled statement. If None, all columns from the target table object are rendered.
      • dialect – A Dialect instance from which a Compiled will be acquired. This argument takes precedence over the bind argument as well as this ClauseElement‘s bound engine, if any.
      • inline – Used for INSERT statements, for a dialect which does not support inline retrieval of newly generated primary key columns, will force the expression used to create the new primary key value to be rendered inline within the INSERT statement’s VALUES clause. This typically refers to Sequence execution but may also refer to any server-side default generation function associated with a primary key Column.
      correspond_on_equivalents(column, equivalents)
      inherited from the correspond_on_equivalents() method of FromClause

      Return corresponding_column for the given column, or if None search for a match in the given dictionary.

      corresponding_column(column, require_embedded=False)
      inherited from the corresponding_column() method of FromClause

      Given a ColumnElement, return the exported ColumnElement object from this Selectable which corresponds to that original Column via a common ancestor column.

      Parameters:
      • column – the target ColumnElement to be matched
      • require_embedded – only return corresponding columns for

      the given ColumnElement, if the given ColumnElement is actually present within a sub-element of this FromClause. Normally the column will match if it merely shares a common ancestor with one of the exported columns of this FromClause.

      count(whereclause=None, **params)
      inherited from the count() method of TableClause

      return a SELECT COUNT generated against this TableClause.

      create(bind=None, checkfirst=False)

      Issue a CREATE statement for this Table, using the given Connectable for connectivity.

      delete(whereclause=None, **kwargs)
      inherited from the delete() method of TableClause

      Generate a delete() construct against this TableClause.

      E.g.:

      table.delete().where(table.c.id==7)

      See delete() for argument and usage information.

      description
      inherited from the description attribute of TableClause
      dispatch

      alias of DDLEventsDispatch

      drop(bind=None, checkfirst=False)

      Issue a DROP statement for this Table, using the given Connectable for connectivity.

      exists(bind=None)

      Return True if this table exists.

      foreign_keys
      inherited from the foreign_keys attribute of FromClause

      Return the collection of ForeignKey objects which this FromClause references.

      get_children(column_collections=True, schema_visitor=False, **kw)
      implicit_returning = False
      info
      inherited from the info attribute of SchemaItem

      Info dictionary associated with the object, allowing user-defined data to be associated with this SchemaItem.

      The dictionary is automatically generated when first accessed. It can also be specified in the constructor of some objects, such as Table and Column.

      insert(values=None, inline=False, **kwargs)
      inherited from the insert() method of TableClause

      Generate an insert() construct against this TableClause.

      E.g.:

      table.insert().values(name='foo')

      See insert() for argument and usage information.

      is_clause_element = True
      is_derived_from(fromclause)
      inherited from the is_derived_from() method of FromClause

      Return True if this FromClause is ‘derived’ from the given FromClause.

      An example would be an Alias of a Table is derived from that Table.

      is_selectable = True
      join(right, onclause=None, isouter=False)
      inherited from the join() method of FromClause

      return a join of this FromClause against another FromClause.

      key
      named_with_column = True
      outerjoin(right, onclause=None)
      inherited from the outerjoin() method of FromClause

      return an outer join of this FromClause against another FromClause.

      params(*optionaldict, **kwargs)
      inherited from the params() method of Immutable
      primary_key
      inherited from the primary_key attribute of FromClause

      Return the collection of Column objects which comprise the primary key of this FromClause.

      quote = None
      replace_selectable(old, alias)
      inherited from the replace_selectable() method of FromClause

      replace all occurrences of FromClause ‘old’ with the given Alias object, returning a copy of this FromClause.

      schema = None
      select(whereclause=None, **params)
      inherited from the select() method of FromClause

      return a SELECT of this FromClause.

      See also

      select() - general purpose method which allows for arbitrary column lists.

      selectable
      inherited from the selectable attribute of Selectable
      self_group(against=None)
      inherited from the self_group() method of ClauseElement

      Apply a ‘grouping’ to this ClauseElement.

      This method is overridden by subclasses to return a “grouping” construct, i.e. parenthesis. In particular it’s used by “binary” expressions to provide a grouping around themselves when placed into a larger expression, as well as by select() constructs when placed into the FROM clause of another select(). (Note that subqueries should be normally created using the Select.alias() method, as many platforms require nested SELECT statements to be named).

      As expressions are composed together, the application of self_group() is automatic - end-user code should never need to use this method directly. Note that SQLAlchemy’s clause constructs take operator precedence into account - so parenthesis might not be needed, for example, in an expression like x OR (y AND z) - AND takes precedence over OR.

      The base self_group() method of ClauseElement just returns self.

      supports_execution = False
      tometadata(metadata, schema=<symbol 'retain_schema>)

      Return a copy of this Table associated with a different MetaData.

      E.g.:

      some_engine = create_engine("sqlite:///some.db")
      
      # create two metadata
      meta1 = MetaData()
      meta2 = MetaData()
      
      # load 'users' from the sqlite engine
      users_table = Table('users', meta1, autoload=True,
                              autoload_with=some_engine)
      
      # create the same Table object for the plain metadata
      users_table_2 = users_table.tometadata(meta2)
      Parameters:
      • metadata – Target MetaData object.
      • schema – Optional string name of a target schema, or None for no schema. The Table object will be given this schema name upon copy. Defaults to the special symbol RETAIN_SCHEMA which indicates no change should be made to the schema name of the resulting Table.
      unique_params(*optionaldict, **kwargs)
      inherited from the unique_params() method of Immutable
      update(whereclause=None, values=None, inline=False, **kwargs)
      inherited from the update() method of TableClause

      Generate an update() construct against this TableClause.

      E.g.:

      table.update().where(table.c.id==7).values(name='foo')

      See update() for argument and usage information.

      class sqlalchemy.schema.ThreadLocalMetaData

      Bases: sqlalchemy.schema.MetaData

      A MetaData variant that presents a different bind in every thread.

      Makes the bind property of the MetaData a thread-local value, allowing this collection of tables to be bound to different Engine implementations or connections in each thread.

      The ThreadLocalMetaData starts off bound to None in each thread. Binds must be made explicitly by assigning to the bind property or using connect(). You can also re-bind dynamically multiple times per thread, just like a regular MetaData.

      __init__()

      Construct a ThreadLocalMetaData.

      bind

      The bound Engine or Connection for this thread.

      This property may be assigned an Engine or Connection, or assigned a string or URL to automatically create a basic Engine for this bind with create_engine().

      dispose()

      Dispose all bound engines, in all thread contexts.

      is_bound()

      True if there is a bind for this thread.

      SQLAlchemy-0.8.4/doc/core/pooling.html0000644000076500000240000017627512251147470020310 0ustar classicstaff00000000000000 Connection Pooling — SQLAlchemy 0.8 Documentation

      SQLAlchemy 0.8 Documentation

      Release: 0.8.4 | Release Date: December 8, 2013

      Connection Pooling

      A connection pool is a standard technique used to maintain long running connections in memory for efficient re-use, as well as to provide management for the total number of connections an application might use simultaneously.

      Particularly for server-side web applications, a connection pool is the standard way to maintain a “pool” of active database connections in memory which are reused across requests.

      SQLAlchemy includes several connection pool implementations which integrate with the Engine. They can also be used directly for applications that want to add pooling to an otherwise plain DBAPI approach.

      Connection Pool Configuration

      The Engine returned by the create_engine() function in most cases has a QueuePool integrated, pre-configured with reasonable pooling defaults. If you’re reading this section only to learn how to enable pooling - congratulations! You’re already done.

      The most common QueuePool tuning parameters can be passed directly to create_engine() as keyword arguments: pool_size, max_overflow, pool_recycle and pool_timeout. For example:

      engine = create_engine('postgresql://me@localhost/mydb',
                             pool_size=20, max_overflow=0)

      In the case of SQLite, the SingletonThreadPool or NullPool are selected by the dialect to provide greater compatibility with SQLite’s threading and locking model, as well as to provide a reasonable default behavior to SQLite “memory” databases, which maintain their entire dataset within the scope of a single connection.

      All SQLAlchemy pool implementations have in common that none of them “pre create” connections - all implementations wait until first use before creating a connection. At that point, if no additional concurrent checkout requests for more connections are made, no additional connections are created. This is why it’s perfectly fine for create_engine() to default to using a QueuePool of size five without regard to whether or not the application really needs five connections queued up - the pool would only grow to that size if the application actually used five connections concurrently, in which case the usage of a small pool is an entirely appropriate default behavior.

      Switching Pool Implementations

      The usual way to use a different kind of pool with create_engine() is to use the poolclass argument. This argument accepts a class imported from the sqlalchemy.pool module, and handles the details of building the pool for you. Common options include specifying QueuePool with SQLite:

      from sqlalchemy.pool import QueuePool
      engine = create_engine('sqlite:///file.db', poolclass=QueuePool)

      Disabling pooling using NullPool:

      from sqlalchemy.pool import NullPool
      engine = create_engine(
                'postgresql+psycopg2://scott:tiger@localhost/test',
                poolclass=NullPool)

      Using a Custom Connection Function

      All Pool classes accept an argument creator which is a callable that creates a new connection. create_engine() accepts this function to pass onto the pool via an argument of the same name:

      import sqlalchemy.pool as pool
      import psycopg2
      
      def getconn():
          c = psycopg2.connect(username='ed', host='127.0.0.1', dbname='test')
          # do things with 'c' to set up
          return c
      
      engine = create_engine('postgresql+psycopg2://', creator=getconn)

      For most “initialize on connection” routines, it’s more convenient to use the PoolEvents event hooks, so that the usual URL argument to create_engine() is still usable. creator is there as a last resort for when a DBAPI has some form of connect that is not at all supported by SQLAlchemy.

      Constructing a Pool

      To use a Pool by itself, the creator function is the only argument that’s required and is passed first, followed by any additional options:

      import sqlalchemy.pool as pool
      import psycopg2
      
      def getconn():
          c = psycopg2.connect(username='ed', host='127.0.0.1', dbname='test')
          return c
      
      mypool = pool.QueuePool(getconn, max_overflow=10, pool_size=5)

      DBAPI connections can then be procured from the pool using the Pool.connect() function. The return value of this method is a DBAPI connection that’s contained within a transparent proxy:

      # get a connection
      conn = mypool.connect()
      
      # use it
      cursor = conn.cursor()
      cursor.execute("select foo")

      The purpose of the transparent proxy is to intercept the close() call, such that instead of the DBAPI connection being closed, it’s returned to the pool:

      # "close" the connection.  Returns
      # it to the pool.
      conn.close()

      The proxy also returns its contained DBAPI connection to the pool when it is garbage collected, though it’s not deterministic in Python that this occurs immediately (though it is typical with cPython).

      The close() step also performs the important step of calling the rollback() method of the DBAPI connection. This is so that any existing transaction on the connection is removed, not only ensuring that no existing state remains on next usage, but also so that table and row locks are released as well as that any isolated data snapshots are removed. This behavior can be disabled using the reset_on_return option of Pool.

      A particular pre-created Pool can be shared with one or more engines by passing it to the pool argument of create_engine():

      e = create_engine('postgresql://', pool=mypool)

      Pool Events

      Connection pools support an event interface that allows hooks to execute upon first connect, upon each new connection, and upon checkout and checkin of connections. See PoolEvents for details.

      Dealing with Disconnects

      The connection pool has the ability to refresh individual connections as well as its entire set of connections, setting the previously pooled connections as “invalid”. A common use case is allow the connection pool to gracefully recover when the database server has been restarted, and all previously established connections are no longer functional. There are two approaches to this.

      Disconnect Handling - Optimistic

      The most common approach is to let SQLAlchemy handle disconnects as they occur, at which point the pool is refreshed. This assumes the Pool is used in conjunction with a Engine. The Engine has logic which can detect disconnection events and refresh the pool automatically.

      When the Connection attempts to use a DBAPI connection, and an exception is raised that corresponds to a “disconnect” event, the connection is invalidated. The Connection then calls the Pool.recreate() method, effectively invalidating all connections not currently checked out so that they are replaced with new ones upon next checkout:

      from sqlalchemy import create_engine, exc
      e = create_engine(...)
      c = e.connect()
      
      try:
          # suppose the database has been restarted.
          c.execute("SELECT * FROM table")
          c.close()
      except exc.DBAPIError, e:
          # an exception is raised, Connection is invalidated.
          if e.connection_invalidated:
              print "Connection was invalidated!"
      
      # after the invalidate event, a new connection
      # starts with a new Pool
      c = e.connect()
      c.execute("SELECT * FROM table")

      The above example illustrates that no special intervention is needed, the pool continues normally after a disconnection event is detected. However, an exception is raised. In a typical web application using an ORM Session, the above condition would correspond to a single request failing with a 500 error, then the web application continuing normally beyond that. Hence the approach is “optimistic” in that frequent database restarts are not anticipated.

      Setting Pool Recycle

      An additional setting that can augment the “optimistic” approach is to set the pool recycle parameter. This parameter prevents the pool from using a particular connection that has passed a certain age, and is appropriate for database backends such as MySQL that automatically close connections that have been stale after a particular period of time:

      from sqlalchemy import create_engine
      e = create_engine("mysql://scott:tiger@localhost/test", pool_recycle=3600)

      Above, any DBAPI connection that has been open for more than one hour will be invalidated and replaced, upon next checkout. Note that the invalidation only occurs during checkout - not on any connections that are held in a checked out state. pool_recycle is a function of the Pool itself, independent of whether or not an Engine is in use.

      Disconnect Handling - Pessimistic

      At the expense of some extra SQL emitted for each connection checked out from the pool, a “ping” operation established by a checkout event handler can detect an invalid connection before it’s used:

      from sqlalchemy import exc
      from sqlalchemy import event
      from sqlalchemy.pool import Pool
      
      @event.listens_for(Pool, "checkout")
      def ping_connection(dbapi_connection, connection_record, connection_proxy):
          cursor = dbapi_connection.cursor()
          try:
              cursor.execute("SELECT 1")
          except:
              # optional - dispose the whole pool
              # instead of invalidating one at a time
              # connection_proxy._pool.dispose()
      
              # raise DisconnectionError - pool will try
              # connecting again up to three times before raising.
              raise exc.DisconnectionError()
          cursor.close()

      Above, the Pool object specifically catches DisconnectionError and attempts to create a new DBAPI connection, up to three times, before giving up and then raising InvalidRequestError, failing the connection. This recipe will ensure that a new Connection will succeed even if connections in the pool have gone stale, provided that the database server is actually running. The expense is that of an additional execution performed per checkout. When using the ORM Session, there is one connection checkout per transaction, so the expense is fairly low. The ping approach above also works with straight connection pool usage, that is, even if no Engine were involved.

      The event handler can be tested using a script like the following, restarting the database server at the point at which the script pauses for input:

      from sqlalchemy import create_engine
      e = create_engine("mysql://scott:tiger@localhost/test", echo_pool=True)
      c1 = e.connect()
      c2 = e.connect()
      c3 = e.connect()
      c1.close()
      c2.close()
      c3.close()
      
      # pool size is now three.
      
      print "Restart the server"
      raw_input()
      
      for i in xrange(10):
          c = e.connect()
          print c.execute("select 1").fetchall()
          c.close()

      API Documentation - Available Pool Implementations

      class sqlalchemy.pool.Pool(creator, recycle=-1, echo=None, use_threadlocal=False, logging_name=None, reset_on_return=True, listeners=None, events=None, _dispatch=None, _dialect=None)

      Bases: sqlalchemy.log.Identified

      Abstract base class for connection pools.

      __init__(creator, recycle=-1, echo=None, use_threadlocal=False, logging_name=None, reset_on_return=True, listeners=None, events=None, _dispatch=None, _dialect=None)

      Construct a Pool.

      Parameters:
      • creator – a callable function that returns a DB-API connection object. The function will be called with parameters.
      • recycle – If set to non -1, number of seconds between connection recycling, which means upon checkout, if this timeout is surpassed the connection will be closed and replaced with a newly opened connection. Defaults to -1.
      • logging_name – String identifier which will be used within the “name” field of logging records generated within the “sqlalchemy.pool” logger. Defaults to a hexstring of the object’s id.
      • echo – If True, connections being pulled and retrieved from the pool will be logged to the standard output, as well as pool sizing information. Echoing can also be achieved by enabling logging for the “sqlalchemy.pool” namespace. Defaults to False.
      • use_threadlocal – If set to True, repeated calls to connect() within the same application thread will be guaranteed to return the same connection object, if one has already been retrieved from the pool and has not been returned yet. Offers a slight performance advantage at the cost of individual transactions by default. The unique_connection() method is provided to bypass the threadlocal behavior installed into connect().
      • reset_on_return – Configures the action to take on connections as they are returned to the pool. See the argument description in QueuePool for more detail.
      • events – a list of 2-tuples, each of the form (callable, target) which will be passed to event.listen() upon construction. Provided here so that event listeners can be assigned via create_engine before dialect-level listeners are applied.
      • listeners – Deprecated. A list of PoolListener-like objects or dictionaries of callables that receive events when DB-API connections are created, checked out and checked in to the pool. This has been superseded by listen().
      connect()

      Return a DBAPI connection from the pool.

      The connection is instrumented such that when its close() method is called, the connection will be returned to the pool.

      dispose()

      Dispose of this pool.

      This method leaves the possibility of checked-out connections remaining open, as it only affects connections that are idle in the pool.

      See also the Pool.recreate() method.

      recreate()

      Return a new Pool, of the same class as this one and configured with identical creation arguments.

      This method is used in conjunection with dispose() to close out an entire Pool and create a new one in its place.

      unique_connection()

      Produce a DBAPI connection that is not referenced by any thread-local context.

      This method is different from Pool.connect() only if the use_threadlocal flag has been set to True.

      class sqlalchemy.pool.QueuePool(creator, pool_size=5, max_overflow=10, timeout=30, **kw)

      Bases: sqlalchemy.pool.Pool

      A Pool that imposes a limit on the number of open connections.

      QueuePool is the default pooling implementation used for all Engine objects, unless the SQLite dialect is in use.

      __init__(creator, pool_size=5, max_overflow=10, timeout=30, **kw)

      Construct a QueuePool.

      Parameters:
      • creator – a callable function that returns a DB-API connection object. The function will be called with parameters.
      • pool_size – The size of the pool to be maintained, defaults to 5. This is the largest number of connections that will be kept persistently in the pool. Note that the pool begins with no connections; once this number of connections is requested, that number of connections will remain. pool_size can be set to 0 to indicate no size limit; to disable pooling, use a NullPool instead.
      • max_overflow – The maximum overflow size of the pool. When the number of checked-out connections reaches the size set in pool_size, additional connections will be returned up to this limit. When those additional connections are returned to the pool, they are disconnected and discarded. It follows then that the total number of simultaneous connections the pool will allow is pool_size + max_overflow, and the total number of “sleeping” connections the pool will allow is pool_size. max_overflow can be set to -1 to indicate no overflow limit; no limit will be placed on the total number of concurrent connections. Defaults to 10.
      • timeout – The number of seconds to wait before giving up on returning a connection. Defaults to 30.
      • recycle – If set to non -1, number of seconds between connection recycling, which means upon checkout, if this timeout is surpassed the connection will be closed and replaced with a newly opened connection. Defaults to -1.
      • echo – If True, connections being pulled and retrieved from the pool will be logged to the standard output, as well as pool sizing information. Echoing can also be achieved by enabling logging for the “sqlalchemy.pool” namespace. Defaults to False.
      • use_threadlocal – If set to True, repeated calls to connect() within the same application thread will be guaranteed to return the same connection object, if one has already been retrieved from the pool and has not been returned yet. Offers a slight performance advantage at the cost of individual transactions by default. The unique_connection() method is provided to bypass the threadlocal behavior installed into connect().
      • reset_on_return

        Determine steps to take on connections as they are returned to the pool. reset_on_return can have any of these values:

        • ‘rollback’ - call rollback() on the connection, to release locks and transaction resources. This is the default value. The vast majority of use cases should leave this value set.
        • True - same as ‘rollback’, this is here for backwards compatibility.
        • ‘commit’ - call commit() on the connection, to release locks and transaction resources. A commit here may be desirable for databases that cache query plans if a commit is emitted, such as Microsoft SQL Server. However, this value is more dangerous than ‘rollback’ because any data changes present on the transaction are committed unconditionally.
        • None - don’t do anything on the connection. This setting should only be made on a database that has no transaction support at all, namely MySQL MyISAM. By not doing anything, performance can be improved. This setting should never be selected for a database that supports transactions, as it will lead to deadlocks and stale state.
        • False - same as None, this is here for backwards compatibility.

        Changed in version 0.7.6: reset_on_return accepts values.

      • listeners – A list of PoolListener-like objects or dictionaries of callables that receive events when DB-API connections are created, checked out and checked in to the pool.
      connect()

      Return a DBAPI connection from the pool.

      The connection is instrumented such that when its close() method is called, the connection will be returned to the pool.

      unique_connection()

      Produce a DBAPI connection that is not referenced by any thread-local context.

      This method is different from Pool.connect() only if the use_threadlocal flag has been set to True.

      class sqlalchemy.pool.SingletonThreadPool(creator, pool_size=5, **kw)

      Bases: sqlalchemy.pool.Pool

      A Pool that maintains one connection per thread.

      Maintains one connection per each thread, never moving a connection to a thread other than the one which it was created in.

      Options are the same as those of Pool, as well as:

      Parameters:pool_size – The number of threads in which to maintain connections at once. Defaults to five.

      SingletonThreadPool is used by the SQLite dialect automatically when a memory-based database is used. See SQLite.

      __init__(creator, pool_size=5, **kw)
      class sqlalchemy.pool.AssertionPool(*args, **kw)

      Bases: sqlalchemy.pool.Pool

      A Pool that allows at most one checked out connection at any given time.

      This will raise an exception if more than one connection is checked out at a time. Useful for debugging code that is using more connections than desired.

      Changed in version 0.7: AssertionPool also logs a traceback of where the original connection was checked out, and reports this in the assertion error raised.

      class sqlalchemy.pool.NullPool(creator, recycle=-1, echo=None, use_threadlocal=False, logging_name=None, reset_on_return=True, listeners=None, events=None, _dispatch=None, _dialect=None)

      Bases: sqlalchemy.pool.Pool

      A Pool which does not pool connections.

      Instead it literally opens and closes the underlying DB-API connection per each connection open/close.

      Reconnect-related functions such as recycle and connection invalidation are not supported by this Pool implementation, since no connections are held persistently.

      Changed in version 0.7: NullPool is used by the SQlite dialect automatically when a file-based database is used. See SQLite.

      class sqlalchemy.pool.StaticPool(creator, recycle=-1, echo=None, use_threadlocal=False, logging_name=None, reset_on_return=True, listeners=None, events=None, _dispatch=None, _dialect=None)

      Bases: sqlalchemy.pool.Pool

      A Pool of exactly one connection, used for all requests.

      Reconnect-related functions such as recycle and connection invalidation (which is also used to support auto-reconnect) are not currently supported by this Pool implementation but may be implemented in a future release.

      Pooling Plain DB-API Connections

      Any PEP 249 DB-API module can be “proxied” through the connection pool transparently. Usage of the DB-API is exactly as before, except the connect() method will consult the pool. Below we illustrate this with psycopg2:

      import sqlalchemy.pool as pool
      import psycopg2 as psycopg
      
      psycopg = pool.manage(psycopg)
      
      # then connect normally
      connection = psycopg.connect(database='test', username='scott',
                                   password='tiger')

      This produces a _DBProxy object which supports the same connect() function as the original DB-API module. Upon connection, a connection proxy object is returned, which delegates its calls to a real DB-API connection object. This connection object is stored persistently within a connection pool (an instance of Pool) that corresponds to the exact connection arguments sent to the connect() function.

      The connection proxy supports all of the methods on the original connection object, most of which are proxied via __getattr__(). The close() method will return the connection to the pool, and the cursor() method will return a proxied cursor object. Both the connection proxy and the cursor proxy will also return the underlying connection to the pool after they have both been garbage collected, which is detected via weakref callbacks (__del__ is not used).

      Additionally, when connections are returned to the pool, a rollback() is issued on the connection unconditionally. This is to release any locks still held by the connection that may have resulted from normal activity.

      By default, the connect() method will return the same connection that is already checked out in the current thread. This allows a particular connection to be used in a given thread without needing to pass it around between functions. To disable this behavior, specify use_threadlocal=False to the manage() function.

      sqlalchemy.pool.manage(module, **params)

      Return a proxy for a DB-API module that automatically pools connections.

      Given a DB-API 2.0 module and pool management parameters, returns a proxy for the module that will automatically pool connections, creating new connection pools for each distinct set of connection arguments sent to the decorated module’s connect() function.

      Parameters:
      • module – a DB-API 2.0 database module
      • poolclass – the class used by the pool module to provide pooling. Defaults to QueuePool.
      • **params – will be passed through to poolclass
      sqlalchemy.pool.clear_managers()

      Remove all current DB-API 2.0 managers.

      All pools and connections are disposed.

      SQLAlchemy-0.8.4/doc/core/reflection.html0000644000076500000240000013364112251147470020761 0ustar classicstaff00000000000000 Reflecting Database Objects — SQLAlchemy 0.8 Documentation

      SQLAlchemy 0.8 Documentation

      Release: 0.8.4 | Release Date: December 8, 2013

      Reflecting Database Objects

      A Table object can be instructed to load information about itself from the corresponding database schema object already existing within the database. This process is called reflection. In the most simple case you need only specify the table name, a MetaData object, and the autoload=True flag. If the MetaData is not persistently bound, also add the autoload_with argument:

      >>> messages = Table('messages', meta, autoload=True, autoload_with=engine)
      >>> [c.name for c in messages.columns]
      ['message_id', 'message_name', 'date']

      The above operation will use the given engine to query the database for information about the messages table, and will then generate Column, ForeignKey, and other objects corresponding to this information as though the Table object were hand-constructed in Python.

      When tables are reflected, if a given table references another one via foreign key, a second Table object is created within the MetaData object representing the connection. Below, assume the table shopping_cart_items references a table named shopping_carts. Reflecting the shopping_cart_items table has the effect such that the shopping_carts table will also be loaded:

      >>> shopping_cart_items = Table('shopping_cart_items', meta, autoload=True, autoload_with=engine)
      >>> 'shopping_carts' in meta.tables:
      True

      The MetaData has an interesting “singleton-like” behavior such that if you requested both tables individually, MetaData will ensure that exactly one Table object is created for each distinct table name. The Table constructor actually returns to you the already-existing Table object if one already exists with the given name. Such as below, we can access the already generated shopping_carts table just by naming it:

      shopping_carts = Table('shopping_carts', meta)

      Of course, it’s a good idea to use autoload=True with the above table regardless. This is so that the table’s attributes will be loaded if they have not been already. The autoload operation only occurs for the table if it hasn’t already been loaded; once loaded, new calls to Table with the same name will not re-issue any reflection queries.

      Overriding Reflected Columns

      Individual columns can be overridden with explicit values when reflecting tables; this is handy for specifying custom datatypes, constraints such as primary keys that may not be configured within the database, etc.:

      >>> mytable = Table('mytable', meta,
      ... Column('id', Integer, primary_key=True),   # override reflected 'id' to have primary key
      ... Column('mydata', Unicode(50)),    # override reflected 'mydata' to be Unicode
      ... autoload=True)

      Reflecting Views

      The reflection system can also reflect views. Basic usage is the same as that of a table:

      my_view = Table("some_view", metadata, autoload=True)

      Above, my_view is a Table object with Column objects representing the names and types of each column within the view “some_view”.

      Usually, it’s desired to have at least a primary key constraint when reflecting a view, if not foreign keys as well. View reflection doesn’t extrapolate these constraints.

      Use the “override” technique for this, specifying explicitly those columns which are part of the primary key or have foreign key constraints:

      my_view = Table("some_view", metadata,
                      Column("view_id", Integer, primary_key=True),
                      Column("related_thing", Integer, ForeignKey("othertable.thing_id")),
                      autoload=True
      )

      Reflecting All Tables at Once

      The MetaData object can also get a listing of tables and reflect the full set. This is achieved by using the reflect() method. After calling it, all located tables are present within the MetaData object’s dictionary of tables:

      meta = MetaData()
      meta.reflect(bind=someengine)
      users_table = meta.tables['users']
      addresses_table = meta.tables['addresses']

      metadata.reflect() also provides a handy way to clear or delete all the rows in a database:

      meta = MetaData()
      meta.reflect(bind=someengine)
      for table in reversed(meta.sorted_tables):
          someengine.execute(table.delete())

      Fine Grained Reflection with Inspector

      A low level interface which provides a backend-agnostic system of loading lists of schema, table, column, and constraint descriptions from a given database is also available. This is known as the “Inspector”:

      from sqlalchemy import create_engine
      from sqlalchemy.engine import reflection
      engine = create_engine('...')
      insp = reflection.Inspector.from_engine(engine)
      print insp.get_table_names()
      class sqlalchemy.engine.reflection.Inspector(bind)

      Performs database schema inspection.

      The Inspector acts as a proxy to the reflection methods of the Dialect, providing a consistent interface as well as caching support for previously fetched metadata.

      A Inspector object is usually created via the inspect() function:

      from sqlalchemy import inspect, create_engine
      engine = create_engine('...')
      insp = inspect(engine)

      The inspection method above is equivalent to using the Inspector.from_engine() method, i.e.:

      engine = create_engine('...')
      insp = Inspector.from_engine(engine)

      Where above, the Dialect may opt to return an Inspector subclass that provides additional methods specific to the dialect’s target database.

      __init__(bind)

      Initialize a new Inspector.

      Parameters:bind – a Connectable, which is typically an instance of Engine or Connection.

      For a dialect-specific instance of Inspector, see Inspector.from_engine()

      default_schema_name

      Return the default schema name presented by the dialect for the current engine’s database user.

      E.g. this is typically public for Postgresql and dbo for SQL Server.

      classmethod from_engine(bind)

      Construct a new dialect-specific Inspector object from the given engine or connection.

      Parameters:bind – a Connectable, which is typically an instance of Engine or Connection.

      This method differs from direct a direct constructor call of Inspector in that the Dialect is given a chance to provide a dialect-specific Inspector instance, which may provide additional methods.

      See the example at Inspector.

      get_columns(table_name, schema=None, **kw)

      Return information about columns in table_name.

      Given a string table_name and an optional string schema, return column information as a list of dicts with these keys:

      name
      the column’s name
      type
      TypeEngine
      nullable
      boolean
      default
      the column’s default value
      attrs
      dict containing optional column attributes
      get_foreign_keys(table_name, schema=None, **kw)

      Return information about foreign_keys in table_name.

      Given a string table_name, and an optional string schema, return foreign key information as a list of dicts with these keys:

      constrained_columns
      a list of column names that make up the foreign key
      referred_schema
      the name of the referred schema
      referred_table
      the name of the referred table
      referred_columns
      a list of column names in the referred table that correspond to constrained_columns
      name
      optional name of the foreign key constraint.
      get_indexes(table_name, schema=None, **kw)

      Return information about indexes in table_name.

      Given a string table_name and an optional string schema, return index information as a list of dicts with these keys:

      name
      the index’s name
      column_names
      list of column names in order
      unique
      boolean
      get_pk_constraint(table_name, schema=None, **kw)

      Return information about primary key constraint on table_name.

      Given a string table_name, and an optional string schema, return primary key information as a dictionary with these keys:

      constrained_columns
      a list of column names that make up the primary key
      name
      optional name of the primary key constraint.
      get_primary_keys(table_name, schema=None, **kw)

      Return information about primary keys in table_name.

      Deprecated since version 0.7: Call to deprecated method get_primary_keys. Use get_pk_constraint instead.

      Given a string table_name, and an optional string schema, return primary key information as a list of column names.

      get_schema_names()

      Return all schema names.

      get_table_names(schema=None, order_by=None)

      Return all table names in referred to within a particular schema.

      The names are expected to be real tables only, not views. Views are instead returned using the Inspector.get_view_names() method.

      Parameters:
      • schema – Schema name. If schema is left at None, the database’s default schema is used, else the named schema is searched. If the database does not support named schemas, behavior is undefined if schema is not passed as None.
      • order_by

        Optional, may be the string “foreign_key” to sort the result on foreign key dependencies.

        Changed in version 0.8: the “foreign_key” sorting sorts tables in order of dependee to dependent; that is, in creation order, rather than in drop order. This is to maintain consistency with similar features such as MetaData.sorted_tables and util.sort_tables().

      get_table_options(table_name, schema=None, **kw)

      Return a dictionary of options specified when the table of the given name was created.

      This currently includes some options that apply to MySQL tables.

      get_unique_constraints(table_name, schema=None, **kw)

      Return information about unique constraints in table_name.

      Given a string table_name and an optional string schema, return unique constraint information as a list of dicts with these keys:

      name
      the unique constraint’s name
      column_names
      list of column names in order

      New in version 0.8.4.

      get_view_definition(view_name, schema=None)

      Return definition for view_name.

      Parameters:schema – Optional, retrieve names from a non-default schema.
      get_view_names(schema=None)

      Return all view names in schema.

      Parameters:schema – Optional, retrieve names from a non-default schema.
      reflecttable(table, include_columns, exclude_columns=())

      Given a Table object, load its internal constructs based on introspection.

      This is the underlying method used by most dialects to produce table reflection. Direct usage is like:

      from sqlalchemy import create_engine, MetaData, Table
      from sqlalchemy.engine import reflection
      
      engine = create_engine('...')
      meta = MetaData()
      user_table = Table('user', meta)
      insp = Inspector.from_engine(engine)
      insp.reflecttable(user_table, None)
      Parameters:
      • table – a Table instance.
      • include_columns – a list of string column names to include in the reflection process. If None, all columns are reflected.

      Limitations of Reflection

      It’s important to note that the reflection process recreates Table metadata using only information which is represented in the relational database. This process by definition cannot restore aspects of a schema that aren’t actually stored in the database. State which is not available from reflection includes but is not limited to:

      • Client side defaults, either Python functions or SQL expressions defined using the default keyword of Column (note this is separate from server_default, which specifically is what’s available via reflection).
      • Column information, e.g. data that might have been placed into the Column.info dictionary
      • The value of the .quote setting for Column or Table
      • The assocation of a particular Sequence with a given Column

      The relational database also in many cases reports on table metadata in a different format than what was specified in SQLAlchemy. The Table objects returned from reflection cannot be always relied upon to produce the identical DDL as the original Python-defined Table objects. Areas where this occurs includes server defaults, column-associated sequences and various idosyncrasies regarding constraints and datatypes. Server side defaults may be returned with cast directives (typically Postgresql will include a ::<type> cast) or different quoting patterns than originally specified.

      Another category of limitation includes schema structures for which reflection is only partially or not yet defined. Recent improvements to reflection allow things like views, indexes and foreign key options to be reflected. As of this writing, structures like CHECK constraints, table comments, and triggers are not reflected.

      SQLAlchemy-0.8.4/doc/core/schema.html0000644000076500000240000002026212251147470020061 0ustar classicstaff00000000000000 Schema Definition Language — SQLAlchemy 0.8 Documentation

      SQLAlchemy 0.8 Documentation

      Release: 0.8.4 | Release Date: December 8, 2013

      Schema Definition Language

      This section references SQLAlchemy schema metadata, a comprehensive system of describing and inspecting database schemas.

      The core of SQLAlchemy’s query and object mapping operations are supported by database metadata, which is comprised of Python objects that describe tables and other schema-level objects. These objects are at the core of three major types of operations - issuing CREATE and DROP statements (known as DDL), constructing SQL queries, and expressing information about structures that already exist within the database.

      Database metadata can be expressed by explicitly naming the various components and their properties, using constructs such as Table, Column, ForeignKey and Sequence, all of which are imported from the sqlalchemy.schema package. It can also be generated by SQLAlchemy using a process called reflection, which means you start with a single object such as Table, assign it a name, and then instruct SQLAlchemy to load all the additional information related to that name from a particular engine source.

      A key feature of SQLAlchemy’s database metadata constructs is that they are designed to be used in a declarative style which closely resembles that of real DDL. They are therefore most intuitive to those who have some background in creating real schema generation scripts.

      SQLAlchemy-0.8.4/doc/core/selectable.html0000644000076500000240000142662512251147471020743 0ustar classicstaff00000000000000 Selectables, Tables, FROM objects — SQLAlchemy 0.8 Documentation

      SQLAlchemy 0.8 Documentation

      Release: 0.8.4 | Release Date: December 8, 2013

      Selectables, Tables, FROM objects

      The term “selectable” refers to any object that rows can be selected from; in SQLAlchemy, these objects descend from FromClause and their distinguishing feature is their FromClause.c attribute, which is a namespace of all the columns contained within the FROM clause (these elements are themselves ColumnElement subclasses).

      sqlalchemy.sql.expression.alias(selectable, name=None)

      Return an Alias object.

      An Alias represents any FromClause with an alternate name assigned within SQL, typically using the AS clause when generated, e.g. SELECT * FROM table AS aliasname.

      Similar functionality is available via the alias() method available on all FromClause subclasses.

      When an Alias is created from a Table object, this has the effect of the table being rendered as tablename AS aliasname in a SELECT statement.

      For select() objects, the effect is that of creating a named subquery, i.e. (select ...) AS aliasname.

      The name parameter is optional, and provides the name to use in the rendered SQL. If blank, an “anonymous” name will be deterministically generated at compile time. Deterministic means the name is guaranteed to be unique against other constructs used in the same statement, and will also be the same name for each successive compilation of the same statement object.

      Parameters:
      • selectable – any FromClause subclass, such as a table, select statement, etc.
      • name – string name to be assigned as the alias. If None, a name will be deterministically generated at compile time.
      sqlalchemy.sql.expression.except_(*selects, **kwargs)

      Return an EXCEPT of multiple selectables.

      The returned object is an instance of CompoundSelect.

      *selects
      a list of Select instances.
      **kwargs
      available keyword arguments are the same as those of select().
      sqlalchemy.sql.expression.except_all(*selects, **kwargs)

      Return an EXCEPT ALL of multiple selectables.

      The returned object is an instance of CompoundSelect.

      *selects
      a list of Select instances.
      **kwargs
      available keyword arguments are the same as those of select().
      sqlalchemy.sql.expression.exists(*args, **kwargs)

      Return an EXISTS clause as applied to a Select object.

      Calling styles are of the following forms:

      # use on an existing select()
      s = select([table.c.col1]).where(table.c.col2==5)
      s = exists(s)
      
      # construct a select() at once
      exists(['*'], **select_arguments).where(criterion)
      
      # columns argument is optional, generates "EXISTS (SELECT *)"
      # by default.
      exists().where(table.c.col2==5)
      sqlalchemy.sql.expression.intersect(*selects, **kwargs)

      Return an INTERSECT of multiple selectables.

      The returned object is an instance of CompoundSelect.

      *selects
      a list of Select instances.
      **kwargs
      available keyword arguments are the same as those of select().
      sqlalchemy.sql.expression.intersect_all(*selects, **kwargs)

      Return an INTERSECT ALL of multiple selectables.

      The returned object is an instance of CompoundSelect.

      *selects
      a list of Select instances.
      **kwargs
      available keyword arguments are the same as those of select().
      sqlalchemy.sql.expression.join(left, right, onclause=None, isouter=False)

      Return a JOIN clause element (regular inner join).

      The returned object is an instance of Join.

      Similar functionality is also available via the join() method on any FromClause.

      Parameters:
      • left – The left side of the join.
      • right – The right side of the join.
      • onclause – Optional criterion for the ON clause, is derived from foreign key relationships established between left and right otherwise.

      To chain joins together, use the FromClause.join() or FromClause.outerjoin() methods on the resulting Join object.

      sqlalchemy.sql.expression.outerjoin(left, right, onclause=None)

      Return an OUTER JOIN clause element.

      The returned object is an instance of Join.

      Similar functionality is also available via the outerjoin() method on any FromClause.

      Parameters:
      • left – The left side of the join.
      • right – The right side of the join.
      • onclause – Optional criterion for the ON clause, is derived from foreign key relationships established between left and right otherwise.

      To chain joins together, use the FromClause.join() or FromClause.outerjoin() methods on the resulting Join object.

      sqlalchemy.sql.expression.select(columns=None, whereclause=None, from_obj=[], **kwargs)

      Returns a SELECT clause element.

      Similar functionality is also available via the select() method on any FromClause.

      The returned object is an instance of Select.

      All arguments which accept ClauseElement arguments also accept string arguments, which will be converted as appropriate into either text() or literal_column() constructs.

      See also

      Selecting - Core Tutorial description of select().

      Parameters:
      • columns

        A list of ClauseElement objects, typically ColumnElement objects or subclasses, which will form the columns clause of the resulting statement. For all members which are instances of Selectable, the individual ColumnElement members of the Selectable will be added individually to the columns clause. For example, specifying a Table instance will result in all the contained Column objects within to be added to the columns clause.

        This argument is not present on the form of select() available on Table.

      • whereclause – A ClauseElement expression which will be used to form the WHERE clause.
      • from_obj – A list of ClauseElement objects which will be added to the FROM clause of the resulting statement. Note that “from” objects are automatically located within the columns and whereclause ClauseElements. Use this parameter to explicitly specify “from” objects which are not automatically locatable. This could include Table objects that aren’t otherwise present, or Join objects whose presence will supercede that of the Table objects already located in the other clauses.
      • autocommit – Deprecated. Use .execution_options(autocommit=<True|False>) to set the autocommit option.
      • bind=None – an Engine or Connection instance to which the resulting Select object will be bound. The Select object will otherwise automatically bind to whatever Connectable instances can be located within its contained ClauseElement members.
      • correlate=True – indicates that this Select object should have its contained FromClause elements “correlated” to an enclosing Select object. This means that any ClauseElement instance within the “froms” collection of this Select which is also present in the “froms” collection of an enclosing select will not be rendered in the FROM clause of this select statement.
      • distinct=False

        when True, applies a DISTINCT qualifier to the columns clause of the resulting statement.

        The boolean argument may also be a column expression or list of column expressions - this is a special calling form which is understood by the Postgresql dialect to render the DISTINCT ON (<columns>) syntax.

        distinct is also available via the distinct() generative method.

      • for_update=False

        when True, applies FOR UPDATE to the end of the resulting statement.

        Certain database dialects also support alternate values for this parameter:

        • With the MySQL dialect, the value "read" translates to LOCK IN SHARE MODE.
        • With the Oracle and Postgresql dialects, the value "nowait" translates to FOR UPDATE NOWAIT.
        • With the Postgresql dialect, the values “read” and "read_nowait" translate to FOR SHARE and FOR SHARE NOWAIT, respectively.

          New in version 0.7.7.

      • group_by – a list of ClauseElement objects which will comprise the GROUP BY clause of the resulting select.
      • having – a ClauseElement that will comprise the HAVING clause of the resulting select when GROUP BY is used.
      • limit=None – a numerical value which usually compiles to a LIMIT expression in the resulting select. Databases that don’t support LIMIT will attempt to provide similar functionality.
      • offset=None – a numeric value which usually compiles to an OFFSET expression in the resulting select. Databases that don’t support OFFSET will attempt to provide similar functionality.
      • order_by – a scalar or list of ClauseElement objects which will comprise the ORDER BY clause of the resulting select.
      • use_labels=False

        when True, the statement will be generated using labels for each column in the columns clause, which qualify each column with its parent table’s (or aliases) name so that name conflicts between columns in different tables don’t occur. The format of the label is <tablename>_<column>. The “c” collection of the resulting Select object will use these names as well for targeting column members.

        use_labels is also available via the apply_labels() generative method.

      sqlalchemy.sql.expression.subquery(alias, *args, **kwargs)

      Return an Alias object derived from a Select.

      name
      alias name

      *args, **kwargs

      all other arguments are delivered to the select() function.
      sqlalchemy.sql.expression.table(name, *columns)

      Represent a textual table clause.

      The object returned is an instance of TableClause, which represents the “syntactical” portion of the schema-level Table object. It may be used to construct lightweight table constructs.

      Note that the table() function is not part of the sqlalchemy namespace. It must be imported from the sql package:

      from sqlalchemy.sql import table, column
      Parameters:
      • name – Name of the table.
      • columns – A collection of column() constructs.

      See TableClause for further examples.

      sqlalchemy.sql.expression.union(*selects, **kwargs)

      Return a UNION of multiple selectables.

      The returned object is an instance of CompoundSelect.

      A similar union() method is available on all FromClause subclasses.

      *selects
      a list of Select instances.
      **kwargs
      available keyword arguments are the same as those of select().
      sqlalchemy.sql.expression.union_all(*selects, **kwargs)

      Return a UNION ALL of multiple selectables.

      The returned object is an instance of CompoundSelect.

      A similar union_all() method is available on all FromClause subclasses.

      *selects
      a list of Select instances.
      **kwargs
      available keyword arguments are the same as those of select().
      class sqlalchemy.sql.expression.Alias(selectable, name=None)

      Bases: sqlalchemy.sql.expression.FromClause

      Represents an table or selectable alias (AS).

      Represents an alias, as typically applied to any table or sub-select within a SQL statement using the AS keyword (or without the keyword on certain databases such as Oracle).

      This object is constructed from the alias() module level function as well as the FromClause.alias() method available on all FromClause subclasses.

      alias(name=None)
      inherited from the alias() method of FromClause

      return an alias of this FromClause.

      This is shorthand for calling:

      from sqlalchemy import alias
      a = alias(self, name=name)

      See alias() for details.

      c
      inherited from the c attribute of FromClause

      An alias for the columns attribute.

      columns
      inherited from the columns attribute of FromClause

      A named-based collection of ColumnElement objects maintained by this FromClause.

      The columns, or c collection, is the gateway to the construction of SQL expressions using table-bound or other selectable-bound columns:

      select([mytable]).where(mytable.c.somecolumn == 5)
      compare(other, **kw)
      inherited from the compare() method of ClauseElement

      Compare this ClauseElement to the given ClauseElement.

      Subclasses should override the default behavior, which is a straight identity comparison.

      **kw are arguments consumed by subclass compare() methods and may be used to modify the criteria for comparison. (see ColumnElement)

      compile(bind=None, dialect=None, **kw)
      inherited from the compile() method of ClauseElement

      Compile this SQL expression.

      The return value is a Compiled object. Calling str() or unicode() on the returned value will yield a string representation of the result. The Compiled object also can return a dictionary of bind parameter names and values using the params accessor.

      Parameters:
      • bind – An Engine or Connection from which a Compiled will be acquired. This argument takes precedence over this ClauseElement‘s bound engine, if any.
      • column_keys – Used for INSERT and UPDATE statements, a list of column names which should be present in the VALUES clause of the compiled statement. If None, all columns from the target table object are rendered.
      • dialect – A Dialect instance from which a Compiled will be acquired. This argument takes precedence over the bind argument as well as this ClauseElement‘s bound engine, if any.
      • inline – Used for INSERT statements, for a dialect which does not support inline retrieval of newly generated primary key columns, will force the expression used to create the new primary key value to be rendered inline within the INSERT statement’s VALUES clause. This typically refers to Sequence execution but may also refer to any server-side default generation function associated with a primary key Column.
      correspond_on_equivalents(column, equivalents)
      inherited from the correspond_on_equivalents() method of FromClause

      Return corresponding_column for the given column, or if None search for a match in the given dictionary.

      corresponding_column(column, require_embedded=False)
      inherited from the corresponding_column() method of FromClause

      Given a ColumnElement, return the exported ColumnElement object from this Selectable which corresponds to that original Column via a common ancestor column.

      Parameters:
      • column – the target ColumnElement to be matched
      • require_embedded – only return corresponding columns for

      the given ColumnElement, if the given ColumnElement is actually present within a sub-element of this FromClause. Normally the column will match if it merely shares a common ancestor with one of the exported columns of this FromClause.

      count(whereclause=None, **params)
      inherited from the count() method of FromClause

      return a SELECT COUNT generated against this FromClause.

      foreign_keys
      inherited from the foreign_keys attribute of FromClause

      Return the collection of ForeignKey objects which this FromClause references.

      join(right, onclause=None, isouter=False)
      inherited from the join() method of FromClause

      return a join of this FromClause against another FromClause.

      outerjoin(right, onclause=None)
      inherited from the outerjoin() method of FromClause

      return an outer join of this FromClause against another FromClause.

      params(*optionaldict, **kwargs)
      inherited from the params() method of ClauseElement

      Return a copy with bindparam() elements replaced.

      Returns a copy of this ClauseElement with bindparam() elements replaced with values taken from the given dictionary:

      >>> clause = column('x') + bindparam('foo')
      >>> print clause.compile().params
      {'foo':None}
      >>> print clause.params({'foo':7}).compile().params
      {'foo':7}
      primary_key
      inherited from the primary_key attribute of FromClause

      Return the collection of Column objects which comprise the primary key of this FromClause.

      replace_selectable(old, alias)
      inherited from the replace_selectable() method of FromClause

      replace all occurrences of FromClause ‘old’ with the given Alias object, returning a copy of this FromClause.

      select(whereclause=None, **params)
      inherited from the select() method of FromClause

      return a SELECT of this FromClause.

      See also

      select() - general purpose method which allows for arbitrary column lists.

      self_group(against=None)
      inherited from the self_group() method of ClauseElement

      Apply a ‘grouping’ to this ClauseElement.

      This method is overridden by subclasses to return a “grouping” construct, i.e. parenthesis. In particular it’s used by “binary” expressions to provide a grouping around themselves when placed into a larger expression, as well as by select() constructs when placed into the FROM clause of another select(). (Note that subqueries should be normally created using the Select.alias() method, as many platforms require nested SELECT statements to be named).

      As expressions are composed together, the application of self_group() is automatic - end-user code should never need to use this method directly. Note that SQLAlchemy’s clause constructs take operator precedence into account - so parenthesis might not be needed, for example, in an expression like x OR (y AND z) - AND takes precedence over OR.

      The base self_group() method of ClauseElement just returns self.

      unique_params(*optionaldict, **kwargs)
      inherited from the unique_params() method of ClauseElement

      Return a copy with bindparam() elements replaced.

      Same functionality as params(), except adds unique=True to affected bind parameters so that multiple statements can be used.

      class sqlalchemy.sql.expression.CompoundSelect(keyword, *selects, **kwargs)

      Bases: sqlalchemy.sql.expression.SelectBase

      Forms the basis of UNION, UNION ALL, and other
      SELECT-based set operations.
      alias(name=None)
      inherited from the alias() method of FromClause

      return an alias of this FromClause.

      This is shorthand for calling:

      from sqlalchemy import alias
      a = alias(self, name=name)

      See alias() for details.

      append_group_by(*clauses)
      inherited from the append_group_by() method of SelectBase

      Append the given GROUP BY criterion applied to this selectable.

      The criterion will be appended to any pre-existing GROUP BY criterion.

      This is an in-place mutation method; the group_by() method is preferred, as it provides standard method chaining.

      append_order_by(*clauses)
      inherited from the append_order_by() method of SelectBase

      Append the given ORDER BY criterion applied to this selectable.

      The criterion will be appended to any pre-existing ORDER BY criterion.

      This is an in-place mutation method; the order_by() method is preferred, as it provides standard method chaining.

      apply_labels()
      inherited from the apply_labels() method of SelectBase

      return a new selectable with the ‘use_labels’ flag set to True.

      This will result in column expressions being generated using labels against their table name, such as “SELECT somecolumn AS tablename_somecolumn”. This allows selectables which contain multiple FROM clauses to produce a unique set of column names regardless of name conflicts among the individual FROM clauses.

      as_scalar()
      inherited from the as_scalar() method of SelectBase

      return a ‘scalar’ representation of this selectable, which can be used as a column expression.

      Typically, a select statement which has only one column in its columns clause is eligible to be used as a scalar expression.

      The returned object is an instance of ScalarSelect.

      autocommit()
      inherited from the autocommit() method of SelectBase

      return a new selectable with the ‘autocommit’ flag set to

      Deprecated since version 0.6: autocommit() is deprecated. Use Executable.execution_options() with the ‘autocommit’ flag.

      True.

      c
      inherited from the c attribute of FromClause

      An alias for the columns attribute.

      columns
      inherited from the columns attribute of FromClause

      A named-based collection of ColumnElement objects maintained by this FromClause.

      The columns, or c collection, is the gateway to the construction of SQL expressions using table-bound or other selectable-bound columns:

      select([mytable]).where(mytable.c.somecolumn == 5)
      compare(other, **kw)
      inherited from the compare() method of ClauseElement

      Compare this ClauseElement to the given ClauseElement.

      Subclasses should override the default behavior, which is a straight identity comparison.

      **kw are arguments consumed by subclass compare() methods and may be used to modify the criteria for comparison. (see ColumnElement)

      compile(bind=None, dialect=None, **kw)
      inherited from the compile() method of ClauseElement

      Compile this SQL expression.

      The return value is a Compiled object. Calling str() or unicode() on the returned value will yield a string representation of the result. The Compiled object also can return a dictionary of bind parameter names and values using the params accessor.

      Parameters:
      • bind – An Engine or Connection from which a Compiled will be acquired. This argument takes precedence over this ClauseElement‘s bound engine, if any.
      • column_keys – Used for INSERT and UPDATE statements, a list of column names which should be present in the VALUES clause of the compiled statement. If None, all columns from the target table object are rendered.
      • dialect – A Dialect instance from which a Compiled will be acquired. This argument takes precedence over the bind argument as well as this ClauseElement‘s bound engine, if any.
      • inline – Used for INSERT statements, for a dialect which does not support inline retrieval of newly generated primary key columns, will force the expression used to create the new primary key value to be rendered inline within the INSERT statement’s VALUES clause. This typically refers to Sequence execution but may also refer to any server-side default generation function associated with a primary key Column.
      correspond_on_equivalents(column, equivalents)
      inherited from the correspond_on_equivalents() method of FromClause

      Return corresponding_column for the given column, or if None search for a match in the given dictionary.

      corresponding_column(column, require_embedded=False)
      inherited from the corresponding_column() method of FromClause

      Given a ColumnElement, return the exported ColumnElement object from this Selectable which corresponds to that original Column via a common ancestor column.

      Parameters:
      • column – the target ColumnElement to be matched
      • require_embedded – only return corresponding columns for

      the given ColumnElement, if the given ColumnElement is actually present within a sub-element of this FromClause. Normally the column will match if it merely shares a common ancestor with one of the exported columns of this FromClause.

      count(whereclause=None, **params)
      inherited from the count() method of FromClause

      return a SELECT COUNT generated against this FromClause.

      cte(name=None, recursive=False)
      inherited from the cte() method of SelectBase

      Return a new CTE, or Common Table Expression instance.

      Common table expressions are a SQL standard whereby SELECT statements can draw upon secondary statements specified along with the primary statement, using a clause called “WITH”. Special semantics regarding UNION can also be employed to allow “recursive” queries, where a SELECT statement can draw upon the set of rows that have previously been selected.

      SQLAlchemy detects CTE objects, which are treated similarly to Alias objects, as special elements to be delivered to the FROM clause of the statement as well as to a WITH clause at the top of the statement.

      New in version 0.7.6.

      Parameters:
      • name – name given to the common table expression. Like _FromClause.alias(), the name can be left as None in which case an anonymous symbol will be used at query compile time.
      • recursive – if True, will render WITH RECURSIVE. A recursive common table expression is intended to be used in conjunction with UNION ALL in order to derive rows from those already selected.

      The following examples illustrate two examples from Postgresql’s documentation at http://www.postgresql.org/docs/8.4/static/queries-with.html.

      Example 1, non recursive:

      from sqlalchemy import Table, Column, String, Integer, MetaData, \
          select, func
      
      metadata = MetaData()
      
      orders = Table('orders', metadata,
          Column('region', String),
          Column('amount', Integer),
          Column('product', String),
          Column('quantity', Integer)
      )
      
      regional_sales = select([
                          orders.c.region,
                          func.sum(orders.c.amount).label('total_sales')
                      ]).group_by(orders.c.region).cte("regional_sales")
      
      
      top_regions = select([regional_sales.c.region]).\
              where(
                  regional_sales.c.total_sales >
                  select([
                      func.sum(regional_sales.c.total_sales)/10
                  ])
              ).cte("top_regions")
      
      statement = select([
                  orders.c.region,
                  orders.c.product,
                  func.sum(orders.c.quantity).label("product_units"),
                  func.sum(orders.c.amount).label("product_sales")
          ]).where(orders.c.region.in_(
              select([top_regions.c.region])
          )).group_by(orders.c.region, orders.c.product)
      
      result = conn.execute(statement).fetchall()

      Example 2, WITH RECURSIVE:

      from sqlalchemy import Table, Column, String, Integer, MetaData, \
          select, func
      
      metadata = MetaData()
      
      parts = Table('parts', metadata,
          Column('part', String),
          Column('sub_part', String),
          Column('quantity', Integer),
      )
      
      included_parts = select([
                          parts.c.sub_part,
                          parts.c.part,
                          parts.c.quantity]).\
                          where(parts.c.part=='our part').\
                          cte(recursive=True)
      
      
      incl_alias = included_parts.alias()
      parts_alias = parts.alias()
      included_parts = included_parts.union_all(
          select([
              parts_alias.c.part,
              parts_alias.c.sub_part,
              parts_alias.c.quantity
          ]).
              where(parts_alias.c.part==incl_alias.c.sub_part)
      )
      
      statement = select([
                  included_parts.c.sub_part,
                  func.sum(included_parts.c.quantity).
                    label('total_quantity')
              ]).                    select_from(included_parts.join(parts,
                          included_parts.c.part==parts.c.part)).\
              group_by(included_parts.c.sub_part)
      
      result = conn.execute(statement).fetchall()

      See also

      orm.query.Query.cte() - ORM version of SelectBase.cte().

      description
      inherited from the description attribute of FromClause

      a brief description of this FromClause.

      Used primarily for error message formatting.

      execute(*multiparams, **params)
      inherited from the execute() method of Executable

      Compile and execute this Executable.

      execution_options(**kw)
      inherited from the execution_options() method of Executable

      Set non-SQL options for the statement which take effect during execution.

      Execution options can be set on a per-statement or per Connection basis. Additionally, the Engine and ORM Query objects provide access to execution options which they in turn configure upon connections.

      The execution_options() method is generative. A new instance of this statement is returned that contains the options:

      statement = select([table.c.x, table.c.y])
      statement = statement.execution_options(autocommit=True)

      Note that only a subset of possible execution options can be applied to a statement - these include “autocommit” and “stream_results”, but not “isolation_level” or “compiled_cache”. See Connection.execution_options() for a full list of possible options.

      foreign_keys
      inherited from the foreign_keys attribute of FromClause

      Return the collection of ForeignKey objects which this FromClause references.

      group_by(*clauses)
      inherited from the group_by() method of SelectBase

      return a new selectable with the given list of GROUP BY criterion applied.

      The criterion will be appended to any pre-existing GROUP BY criterion.

      join(right, onclause=None, isouter=False)
      inherited from the join() method of FromClause

      return a join of this FromClause against another FromClause.

      label(name)
      inherited from the label() method of SelectBase

      return a ‘scalar’ representation of this selectable, embedded as a subquery with a label.

      See also

      as_scalar().

      limit(limit)
      inherited from the limit() method of SelectBase

      return a new selectable with the given LIMIT criterion applied.

      offset(offset)
      inherited from the offset() method of SelectBase

      return a new selectable with the given OFFSET criterion applied.

      order_by(*clauses)
      inherited from the order_by() method of SelectBase

      return a new selectable with the given list of ORDER BY criterion applied.

      The criterion will be appended to any pre-existing ORDER BY criterion.

      outerjoin(right, onclause=None)
      inherited from the outerjoin() method of FromClause

      return an outer join of this FromClause against another FromClause.

      params(*optionaldict, **kwargs)
      inherited from the params() method of ClauseElement

      Return a copy with bindparam() elements replaced.

      Returns a copy of this ClauseElement with bindparam() elements replaced with values taken from the given dictionary:

      >>> clause = column('x') + bindparam('foo')
      >>> print clause.compile().params
      {'foo':None}
      >>> print clause.params({'foo':7}).compile().params
      {'foo':7}
      primary_key
      inherited from the primary_key attribute of FromClause

      Return the collection of Column objects which comprise the primary key of this FromClause.

      replace_selectable(old, alias)
      inherited from the replace_selectable() method of FromClause

      replace all occurrences of FromClause ‘old’ with the given Alias object, returning a copy of this FromClause.

      scalar(*multiparams, **params)
      inherited from the scalar() method of Executable

      Compile and execute this Executable, returning the result’s scalar representation.

      select(whereclause=None, **params)
      inherited from the select() method of FromClause

      return a SELECT of this FromClause.

      See also

      select() - general purpose method which allows for arbitrary column lists.

      unique_params(*optionaldict, **kwargs)
      inherited from the unique_params() method of ClauseElement

      Return a copy with bindparam() elements replaced.

      Same functionality as params(), except adds unique=True to affected bind parameters so that multiple statements can be used.

      class sqlalchemy.sql.expression.CTE(selectable, name=None, recursive=False, _cte_alias=None, _restates=frozenset([]))

      Bases: sqlalchemy.sql.expression.Alias

      Represent a Common Table Expression.

      The CTE object is obtained using the SelectBase.cte() method from any selectable. See that method for complete examples.

      New in version 0.7.6.

      c
      inherited from the c attribute of FromClause

      An alias for the columns attribute.

      columns
      inherited from the columns attribute of FromClause

      A named-based collection of ColumnElement objects maintained by this FromClause.

      The columns, or c collection, is the gateway to the construction of SQL expressions using table-bound or other selectable-bound columns:

      select([mytable]).where(mytable.c.somecolumn == 5)
      compare(other, **kw)
      inherited from the compare() method of ClauseElement

      Compare this ClauseElement to the given ClauseElement.

      Subclasses should override the default behavior, which is a straight identity comparison.

      **kw are arguments consumed by subclass compare() methods and may be used to modify the criteria for comparison. (see ColumnElement)

      compile(bind=None, dialect=None, **kw)
      inherited from the compile() method of ClauseElement

      Compile this SQL expression.

      The return value is a Compiled object. Calling str() or unicode() on the returned value will yield a string representation of the result. The Compiled object also can return a dictionary of bind parameter names and values using the params accessor.

      Parameters:
      • bind – An Engine or Connection from which a Compiled will be acquired. This argument takes precedence over this ClauseElement‘s bound engine, if any.
      • column_keys – Used for INSERT and UPDATE statements, a list of column names which should be present in the VALUES clause of the compiled statement. If None, all columns from the target table object are rendered.
      • dialect – A Dialect instance from which a Compiled will be acquired. This argument takes precedence over the bind argument as well as this ClauseElement‘s bound engine, if any.
      • inline – Used for INSERT statements, for a dialect which does not support inline retrieval of newly generated primary key columns, will force the expression used to create the new primary key value to be rendered inline within the INSERT statement’s VALUES clause. This typically refers to Sequence execution but may also refer to any server-side default generation function associated with a primary key Column.
      correspond_on_equivalents(column, equivalents)
      inherited from the correspond_on_equivalents() method of FromClause

      Return corresponding_column for the given column, or if None search for a match in the given dictionary.

      corresponding_column(column, require_embedded=False)
      inherited from the corresponding_column() method of FromClause

      Given a ColumnElement, return the exported ColumnElement object from this Selectable which corresponds to that original Column via a common ancestor column.

      Parameters:
      • column – the target ColumnElement to be matched
      • require_embedded – only return corresponding columns for

      the given ColumnElement, if the given ColumnElement is actually present within a sub-element of this FromClause. Normally the column will match if it merely shares a common ancestor with one of the exported columns of this FromClause.

      count(whereclause=None, **params)
      inherited from the count() method of FromClause

      return a SELECT COUNT generated against this FromClause.

      foreign_keys
      inherited from the foreign_keys attribute of FromClause

      Return the collection of ForeignKey objects which this FromClause references.

      join(right, onclause=None, isouter=False)
      inherited from the join() method of FromClause

      return a join of this FromClause against another FromClause.

      outerjoin(right, onclause=None)
      inherited from the outerjoin() method of FromClause

      return an outer join of this FromClause against another FromClause.

      params(*optionaldict, **kwargs)
      inherited from the params() method of ClauseElement

      Return a copy with bindparam() elements replaced.

      Returns a copy of this ClauseElement with bindparam() elements replaced with values taken from the given dictionary:

      >>> clause = column('x') + bindparam('foo')
      >>> print clause.compile().params
      {'foo':None}
      >>> print clause.params({'foo':7}).compile().params
      {'foo':7}
      primary_key
      inherited from the primary_key attribute of FromClause

      Return the collection of Column objects which comprise the primary key of this FromClause.

      replace_selectable(old, alias)
      inherited from the replace_selectable() method of FromClause

      replace all occurrences of FromClause ‘old’ with the given Alias object, returning a copy of this FromClause.

      select(whereclause=None, **params)
      inherited from the select() method of FromClause

      return a SELECT of this FromClause.

      See also

      select() - general purpose method which allows for arbitrary column lists.

      self_group(against=None)
      inherited from the self_group() method of ClauseElement

      Apply a ‘grouping’ to this ClauseElement.

      This method is overridden by subclasses to return a “grouping” construct, i.e. parenthesis. In particular it’s used by “binary” expressions to provide a grouping around themselves when placed into a larger expression, as well as by select() constructs when placed into the FROM clause of another select(). (Note that subqueries should be normally created using the Select.alias() method, as many platforms require nested SELECT statements to be named).

      As expressions are composed together, the application of self_group() is automatic - end-user code should never need to use this method directly. Note that SQLAlchemy’s clause constructs take operator precedence into account - so parenthesis might not be needed, for example, in an expression like x OR (y AND z) - AND takes precedence over OR.

      The base self_group() method of ClauseElement just returns self.

      unique_params(*optionaldict, **kwargs)
      inherited from the unique_params() method of ClauseElement

      Return a copy with bindparam() elements replaced.

      Same functionality as params(), except adds unique=True to affected bind parameters so that multiple statements can be used.

      class sqlalchemy.sql.expression.Executable

      Bases: sqlalchemy.sql.expression.Generative

      Mark a ClauseElement as supporting execution.

      Executable is a superclass for all “statement” types of objects, including select(), delete(), update(), insert(), text().

      bind

      Returns the Engine or Connection to which this Executable is bound, or None if none found.

      This is a traversal which checks locally, then checks among the “from” clauses of associated objects until a bound engine or connection is found.

      execute(*multiparams, **params)

      Compile and execute this Executable.

      execution_options(**kw)

      Set non-SQL options for the statement which take effect during execution.

      Execution options can be set on a per-statement or per Connection basis. Additionally, the Engine and ORM Query objects provide access to execution options which they in turn configure upon connections.

      The execution_options() method is generative. A new instance of this statement is returned that contains the options:

      statement = select([table.c.x, table.c.y])
      statement = statement.execution_options(autocommit=True)

      Note that only a subset of possible execution options can be applied to a statement - these include “autocommit” and “stream_results”, but not “isolation_level” or “compiled_cache”. See Connection.execution_options() for a full list of possible options.

      scalar(*multiparams, **params)

      Compile and execute this Executable, returning the result’s scalar representation.

      class sqlalchemy.sql.expression.FromClause

      Bases: sqlalchemy.sql.expression.Selectable

      Represent an element that can be used within the FROM clause of a SELECT statement.

      The most common forms of FromClause are the Table and the select() constructs. Key features common to all FromClause objects include:

      alias(name=None)

      return an alias of this FromClause.

      This is shorthand for calling:

      from sqlalchemy import alias
      a = alias(self, name=name)

      See alias() for details.

      c

      An alias for the columns attribute.

      columns

      A named-based collection of ColumnElement objects maintained by this FromClause.

      The columns, or c collection, is the gateway to the construction of SQL expressions using table-bound or other selectable-bound columns:

      select([mytable]).where(mytable.c.somecolumn == 5)
      correspond_on_equivalents(column, equivalents)

      Return corresponding_column for the given column, or if None search for a match in the given dictionary.

      corresponding_column(column, require_embedded=False)

      Given a ColumnElement, return the exported ColumnElement object from this Selectable which corresponds to that original Column via a common ancestor column.

      Parameters:
      • column – the target ColumnElement to be matched
      • require_embedded – only return corresponding columns for

      the given ColumnElement, if the given ColumnElement is actually present within a sub-element of this FromClause. Normally the column will match if it merely shares a common ancestor with one of the exported columns of this FromClause.

      count(whereclause=None, **params)

      return a SELECT COUNT generated against this FromClause.

      description

      a brief description of this FromClause.

      Used primarily for error message formatting.

      foreign_keys

      Return the collection of ForeignKey objects which this FromClause references.

      is_derived_from(fromclause)

      Return True if this FromClause is ‘derived’ from the given FromClause.

      An example would be an Alias of a Table is derived from that Table.

      join(right, onclause=None, isouter=False)

      return a join of this FromClause against another FromClause.

      outerjoin(right, onclause=None)

      return an outer join of this FromClause against another FromClause.

      primary_key

      Return the collection of Column objects which comprise the primary key of this FromClause.

      replace_selectable(old, alias)

      replace all occurrences of FromClause ‘old’ with the given Alias object, returning a copy of this FromClause.

      select(whereclause=None, **params)

      return a SELECT of this FromClause.

      See also

      select() - general purpose method which allows for arbitrary column lists.

      class sqlalchemy.sql.expression.Join(left, right, onclause=None, isouter=False)

      Bases: sqlalchemy.sql.expression.FromClause

      represent a JOIN construct between two FromClause elements.

      The public constructor function for Join is the module-level join() function, as well as the join() method available off all FromClause subclasses.

      __init__(left, right, onclause=None, isouter=False)

      Construct a new Join.

      The usual entrypoint here is the join() function or the FromClause.join() method of any FromClause object.

      alias(name=None)

      return an alias of this Join.

      Used against a Join object, alias() calls the select() method first so that a subquery against a select() construct is generated. the select() construct also has the correlate flag set to False and will not auto-correlate inside an enclosing select() construct.

      The equivalent long-hand form, given a Join object j, is:

      from sqlalchemy import select, alias
      j = alias(
          select([j.left, j.right]).\
              select_from(j).\
              with_labels(True).\
              correlate(False),
          name=name
      )

      See alias() for further details on aliases.

      c
      inherited from the c attribute of FromClause

      An alias for the columns attribute.

      columns
      inherited from the columns attribute of FromClause

      A named-based collection of ColumnElement objects maintained by this FromClause.

      The columns, or c collection, is the gateway to the construction of SQL expressions using table-bound or other selectable-bound columns:

      select([mytable]).where(mytable.c.somecolumn == 5)
      compare(other, **kw)
      inherited from the compare() method of ClauseElement

      Compare this ClauseElement to the given ClauseElement.

      Subclasses should override the default behavior, which is a straight identity comparison.

      **kw are arguments consumed by subclass compare() methods and may be used to modify the criteria for comparison. (see ColumnElement)

      compile(bind=None, dialect=None, **kw)
      inherited from the compile() method of ClauseElement

      Compile this SQL expression.

      The return value is a Compiled object. Calling str() or unicode() on the returned value will yield a string representation of the result. The Compiled object also can return a dictionary of bind parameter names and values using the params accessor.

      Parameters:
      • bind – An Engine or Connection from which a Compiled will be acquired. This argument takes precedence over this ClauseElement‘s bound engine, if any.
      • column_keys – Used for INSERT and UPDATE statements, a list of column names which should be present in the VALUES clause of the compiled statement. If None, all columns from the target table object are rendered.
      • dialect – A Dialect instance from which a Compiled will be acquired. This argument takes precedence over the bind argument as well as this ClauseElement‘s bound engine, if any.
      • inline – Used for INSERT statements, for a dialect which does not support inline retrieval of newly generated primary key columns, will force the expression used to create the new primary key value to be rendered inline within the INSERT statement’s VALUES clause. This typically refers to Sequence execution but may also refer to any server-side default generation function associated with a primary key Column.
      correspond_on_equivalents(column, equivalents)
      inherited from the correspond_on_equivalents() method of FromClause

      Return corresponding_column for the given column, or if None search for a match in the given dictionary.

      corresponding_column(column, require_embedded=False)
      inherited from the corresponding_column() method of FromClause

      Given a ColumnElement, return the exported ColumnElement object from this Selectable which corresponds to that original Column via a common ancestor column.

      Parameters:
      • column – the target ColumnElement to be matched
      • require_embedded – only return corresponding columns for

      the given ColumnElement, if the given ColumnElement is actually present within a sub-element of this FromClause. Normally the column will match if it merely shares a common ancestor with one of the exported columns of this FromClause.

      count(whereclause=None, **params)
      inherited from the count() method of FromClause

      return a SELECT COUNT generated against this FromClause.

      foreign_keys
      inherited from the foreign_keys attribute of FromClause

      Return the collection of ForeignKey objects which this FromClause references.

      join(right, onclause=None, isouter=False)
      inherited from the join() method of FromClause

      return a join of this FromClause against another FromClause.

      outerjoin(right, onclause=None)
      inherited from the outerjoin() method of FromClause

      return an outer join of this FromClause against another FromClause.

      params(*optionaldict, **kwargs)
      inherited from the params() method of ClauseElement

      Return a copy with bindparam() elements replaced.

      Returns a copy of this ClauseElement with bindparam() elements replaced with values taken from the given dictionary:

      >>> clause = column('x') + bindparam('foo')
      >>> print clause.compile().params
      {'foo':None}
      >>> print clause.params({'foo':7}).compile().params
      {'foo':7}
      primary_key
      inherited from the primary_key attribute of FromClause

      Return the collection of Column objects which comprise the primary key of this FromClause.

      replace_selectable(old, alias)
      inherited from the replace_selectable() method of FromClause

      replace all occurrences of FromClause ‘old’ with the given Alias object, returning a copy of this FromClause.

      select(whereclause=None, **kwargs)

      Create a Select from this Join.

      The equivalent long-hand form, given a Join object j, is:

      from sqlalchemy import select
      j = select([j.left, j.right], **kw).\
                  where(whereclause).\
                  select_from(j)
      Parameters:
      • whereclause – the WHERE criterion that will be sent to the select() function
      • **kwargs – all other kwargs are sent to the underlying select() function.
      unique_params(*optionaldict, **kwargs)
      inherited from the unique_params() method of ClauseElement

      Return a copy with bindparam() elements replaced.

      Same functionality as params(), except adds unique=True to affected bind parameters so that multiple statements can be used.

      class sqlalchemy.sql.expression.ScalarSelect(element)

      Bases: sqlalchemy.sql.expression.Generative, sqlalchemy.sql.expression.Grouping

      where(crit)

      Apply a WHERE clause to the SELECT statement referred to by this ScalarSelect.

      class sqlalchemy.sql.expression.Select(columns, whereclause=None, from_obj=None, distinct=False, having=None, correlate=True, prefixes=None, **kwargs)

      Bases: sqlalchemy.sql.expression.HasPrefixes, sqlalchemy.sql.expression.SelectBase

      Represents a SELECT statement.

      See also

      select() - the function which creates a Select object.

      Selecting - Core Tutorial description of select().

      __init__(columns, whereclause=None, from_obj=None, distinct=False, having=None, correlate=True, prefixes=None, **kwargs)

      Construct a Select object.

      The public constructor for Select is the select() function; see that function for argument descriptions.

      Additional generative and mutator methods are available on the SelectBase superclass.

      alias(name=None)
      inherited from the alias() method of FromClause

      return an alias of this FromClause.

      This is shorthand for calling:

      from sqlalchemy import alias
      a = alias(self, name=name)

      See alias() for details.

      append_column(column)

      append the given column expression to the columns clause of this select() construct.

      This is an in-place mutation method; the column() method is preferred, as it provides standard method chaining.

      append_correlation(fromclause)

      append the given correlation expression to this select() construct.

      This is an in-place mutation method; the correlate() method is preferred, as it provides standard method chaining.

      append_from(fromclause)

      append the given FromClause expression to this select() construct’s FROM clause.

      This is an in-place mutation method; the select_from() method is preferred, as it provides standard method chaining.

      append_group_by(*clauses)
      inherited from the append_group_by() method of SelectBase

      Append the given GROUP BY criterion applied to this selectable.

      The criterion will be appended to any pre-existing GROUP BY criterion.

      This is an in-place mutation method; the group_by() method is preferred, as it provides standard method chaining.

      append_having(having)

      append the given expression to this select() construct’s HAVING criterion.

      The expression will be joined to existing HAVING criterion via AND.

      This is an in-place mutation method; the having() method is preferred, as it provides standard method chaining.

      append_order_by(*clauses)
      inherited from the append_order_by() method of SelectBase

      Append the given ORDER BY criterion applied to this selectable.

      The criterion will be appended to any pre-existing ORDER BY criterion.

      This is an in-place mutation method; the order_by() method is preferred, as it provides standard method chaining.

      append_prefix(clause)

      append the given columns clause prefix expression to this select() construct.

      This is an in-place mutation method; the prefix_with() method is preferred, as it provides standard method chaining.

      append_whereclause(whereclause)

      append the given expression to this select() construct’s WHERE criterion.

      The expression will be joined to existing WHERE criterion via AND.

      This is an in-place mutation method; the where() method is preferred, as it provides standard method chaining.

      apply_labels()
      inherited from the apply_labels() method of SelectBase

      return a new selectable with the ‘use_labels’ flag set to True.

      This will result in column expressions being generated using labels against their table name, such as “SELECT somecolumn AS tablename_somecolumn”. This allows selectables which contain multiple FROM clauses to produce a unique set of column names regardless of name conflicts among the individual FROM clauses.

      as_scalar()
      inherited from the as_scalar() method of SelectBase

      return a ‘scalar’ representation of this selectable, which can be used as a column expression.

      Typically, a select statement which has only one column in its columns clause is eligible to be used as a scalar expression.

      The returned object is an instance of ScalarSelect.

      autocommit()
      inherited from the autocommit() method of SelectBase

      return a new selectable with the ‘autocommit’ flag set to

      Deprecated since version 0.6: autocommit() is deprecated. Use Executable.execution_options() with the ‘autocommit’ flag.

      True.

      c
      inherited from the c attribute of FromClause

      An alias for the columns attribute.

      column(column)

      return a new select() construct with the given column expression added to its columns clause.

      columns
      inherited from the columns attribute of FromClause

      A named-based collection of ColumnElement objects maintained by this FromClause.

      The columns, or c collection, is the gateway to the construction of SQL expressions using table-bound or other selectable-bound columns:

      select([mytable]).where(mytable.c.somecolumn == 5)
      compare(other, **kw)
      inherited from the compare() method of ClauseElement

      Compare this ClauseElement to the given ClauseElement.

      Subclasses should override the default behavior, which is a straight identity comparison.

      **kw are arguments consumed by subclass compare() methods and may be used to modify the criteria for comparison. (see ColumnElement)

      compile(bind=None, dialect=None, **kw)
      inherited from the compile() method of ClauseElement

      Compile this SQL expression.

      The return value is a Compiled object. Calling str() or unicode() on the returned value will yield a string representation of the result. The Compiled object also can return a dictionary of bind parameter names and values using the params accessor.

      Parameters:
      • bind – An Engine or Connection from which a Compiled will be acquired. This argument takes precedence over this ClauseElement‘s bound engine, if any.
      • column_keys – Used for INSERT and UPDATE statements, a list of column names which should be present in the VALUES clause of the compiled statement. If None, all columns from the target table object are rendered.
      • dialect – A Dialect instance from which a Compiled will be acquired. This argument takes precedence over the bind argument as well as this ClauseElement‘s bound engine, if any.
      • inline – Used for INSERT statements, for a dialect which does not support inline retrieval of newly generated primary key columns, will force the expression used to create the new primary key value to be rendered inline within the INSERT statement’s VALUES clause. This typically refers to Sequence execution but may also refer to any server-side default generation function associated with a primary key Column.
      correlate(*fromclauses)

      return a new Select which will correlate the given FROM clauses to that of an enclosing Select.

      Calling this method turns off the Select object’s default behavior of “auto-correlation”. Normally, FROM elements which appear in a Select that encloses this one via its WHERE clause, ORDER BY, HAVING or columns clause will be omitted from this Select object’s FROM clause. Setting an explicit correlation collection using the Select.correlate() method provides a fixed list of FROM objects that can potentially take place in this process.

      When Select.correlate() is used to apply specific FROM clauses for correlation, the FROM elements become candidates for correlation regardless of how deeply nested this Select object is, relative to an enclosing Select which refers to the same FROM object. This is in contrast to the behavior of “auto-correlation” which only correlates to an immediate enclosing Select. Multi-level correlation ensures that the link between enclosed and enclosing Select is always via at least one WHERE/ORDER BY/HAVING/columns clause in order for correlation to take place.

      If None is passed, the Select object will correlate none of its FROM entries, and all will render unconditionally in the local FROM clause.

      Parameters:*fromclauses

      a list of one or more FromClause constructs, or other compatible constructs (i.e. ORM-mapped classes) to become part of the correlate collection.

      Changed in version 0.8.0: ORM-mapped classes are accepted by Select.correlate().

      Changed in version 0.8.0: The Select.correlate() method no longer unconditionally removes entries from the FROM clause; instead, the candidate FROM entries must also be matched by a FROM entry located in an enclosing Select, which ultimately encloses this one as present in the WHERE clause, ORDER BY clause, HAVING clause, or columns clause of an enclosing Select().

      Changed in version 0.8.2: explicit correlation takes place via any level of nesting of Select objects; in previous 0.8 versions, correlation would only occur relative to the immediate enclosing Select construct.

      correlate_except(*fromclauses)

      return a new Select which will omit the given FROM clauses from the auto-correlation process.

      Calling Select.correlate_except() turns off the Select object’s default behavior of “auto-correlation” for the given FROM elements. An element specified here will unconditionally appear in the FROM list, while all other FROM elements remain subject to normal auto-correlation behaviors.

      Changed in version 0.8.2: The Select.correlate_except() method was improved to fully prevent FROM clauses specified here from being omitted from the immediate FROM clause of this Select.

      If None is passed, the Select object will correlate all of its FROM entries.

      Changed in version 0.8.2: calling correlate_except(None) will correctly auto-correlate all FROM clauses.

      Parameters:*fromclauses – a list of one or more FromClause constructs, or other compatible constructs (i.e. ORM-mapped classes) to become part of the correlate-exception collection.
      correspond_on_equivalents(column, equivalents)
      inherited from the correspond_on_equivalents() method of FromClause

      Return corresponding_column for the given column, or if None search for a match in the given dictionary.

      corresponding_column(column, require_embedded=False)
      inherited from the corresponding_column() method of FromClause

      Given a ColumnElement, return the exported ColumnElement object from this Selectable which corresponds to that original Column via a common ancestor column.

      Parameters:
      • column – the target ColumnElement to be matched
      • require_embedded – only return corresponding columns for

      the given ColumnElement, if the given ColumnElement is actually present within a sub-element of this FromClause. Normally the column will match if it merely shares a common ancestor with one of the exported columns of this FromClause.

      count(whereclause=None, **params)
      inherited from the count() method of FromClause

      return a SELECT COUNT generated against this FromClause.

      cte(name=None, recursive=False)
      inherited from the cte() method of SelectBase

      Return a new CTE, or Common Table Expression instance.

      Common table expressions are a SQL standard whereby SELECT statements can draw upon secondary statements specified along with the primary statement, using a clause called “WITH”. Special semantics regarding UNION can also be employed to allow “recursive” queries, where a SELECT statement can draw upon the set of rows that have previously been selected.

      SQLAlchemy detects CTE objects, which are treated similarly to Alias objects, as special elements to be delivered to the FROM clause of the statement as well as to a WITH clause at the top of the statement.

      New in version 0.7.6.

      Parameters:
      • name – name given to the common table expression. Like _FromClause.alias(), the name can be left as None in which case an anonymous symbol will be used at query compile time.
      • recursive – if True, will render WITH RECURSIVE. A recursive common table expression is intended to be used in conjunction with UNION ALL in order to derive rows from those already selected.

      The following examples illustrate two examples from Postgresql’s documentation at http://www.postgresql.org/docs/8.4/static/queries-with.html.

      Example 1, non recursive:

      from sqlalchemy import Table, Column, String, Integer, MetaData, \
          select, func
      
      metadata = MetaData()
      
      orders = Table('orders', metadata,
          Column('region', String),
          Column('amount', Integer),
          Column('product', String),
          Column('quantity', Integer)
      )
      
      regional_sales = select([
                          orders.c.region,
                          func.sum(orders.c.amount).label('total_sales')
                      ]).group_by(orders.c.region).cte("regional_sales")
      
      
      top_regions = select([regional_sales.c.region]).\
              where(
                  regional_sales.c.total_sales >
                  select([
                      func.sum(regional_sales.c.total_sales)/10
                  ])
              ).cte("top_regions")
      
      statement = select([
                  orders.c.region,
                  orders.c.product,
                  func.sum(orders.c.quantity).label("product_units"),
                  func.sum(orders.c.amount).label("product_sales")
          ]).where(orders.c.region.in_(
              select([top_regions.c.region])
          )).group_by(orders.c.region, orders.c.product)
      
      result = conn.execute(statement).fetchall()

      Example 2, WITH RECURSIVE:

      from sqlalchemy import Table, Column, String, Integer, MetaData, \
          select, func
      
      metadata = MetaData()
      
      parts = Table('parts', metadata,
          Column('part', String),
          Column('sub_part', String),
          Column('quantity', Integer),
      )
      
      included_parts = select([
                          parts.c.sub_part,
                          parts.c.part,
                          parts.c.quantity]).\
                          where(parts.c.part=='our part').\
                          cte(recursive=True)
      
      
      incl_alias = included_parts.alias()
      parts_alias = parts.alias()
      included_parts = included_parts.union_all(
          select([
              parts_alias.c.part,
              parts_alias.c.sub_part,
              parts_alias.c.quantity
          ]).
              where(parts_alias.c.part==incl_alias.c.sub_part)
      )
      
      statement = select([
                  included_parts.c.sub_part,
                  func.sum(included_parts.c.quantity).
                    label('total_quantity')
              ]).                    select_from(included_parts.join(parts,
                          included_parts.c.part==parts.c.part)).\
              group_by(included_parts.c.sub_part)
      
      result = conn.execute(statement).fetchall()

      See also

      orm.query.Query.cte() - ORM version of SelectBase.cte().

      description
      inherited from the description attribute of FromClause

      a brief description of this FromClause.

      Used primarily for error message formatting.

      distinct(*expr)

      Return a new select() construct which will apply DISTINCT to its columns clause.

      Parameters:*expr – optional column expressions. When present, the Postgresql dialect will render a DISTINCT ON (<expressions>>) construct.
      except_(other, **kwargs)

      return a SQL EXCEPT of this select() construct against the given selectable.

      except_all(other, **kwargs)

      return a SQL EXCEPT ALL of this select() construct against the given selectable.

      execute(*multiparams, **params)
      inherited from the execute() method of Executable

      Compile and execute this Executable.

      execution_options(**kw)
      inherited from the execution_options() method of Executable

      Set non-SQL options for the statement which take effect during execution.

      Execution options can be set on a per-statement or per Connection basis. Additionally, the Engine and ORM Query objects provide access to execution options which they in turn configure upon connections.

      The execution_options() method is generative. A new instance of this statement is returned that contains the options:

      statement = select([table.c.x, table.c.y])
      statement = statement.execution_options(autocommit=True)

      Note that only a subset of possible execution options can be applied to a statement - these include “autocommit” and “stream_results”, but not “isolation_level” or “compiled_cache”. See Connection.execution_options() for a full list of possible options.

      foreign_keys
      inherited from the foreign_keys attribute of FromClause

      Return the collection of ForeignKey objects which this FromClause references.

      froms

      Return the displayed list of FromClause elements.

      get_children(column_collections=True, **kwargs)

      return child elements as per the ClauseElement specification.

      group_by(*clauses)
      inherited from the group_by() method of SelectBase

      return a new selectable with the given list of GROUP BY criterion applied.

      The criterion will be appended to any pre-existing GROUP BY criterion.

      having(having)

      return a new select() construct with the given expression added to its HAVING clause, joined to the existing clause via AND, if any.

      inner_columns

      an iterator of all ColumnElement expressions which would be rendered into the columns clause of the resulting SELECT statement.

      intersect(other, **kwargs)

      return a SQL INTERSECT of this select() construct against the given selectable.

      intersect_all(other, **kwargs)

      return a SQL INTERSECT ALL of this select() construct against the given selectable.

      join(right, onclause=None, isouter=False)
      inherited from the join() method of FromClause

      return a join of this FromClause against another FromClause.

      label(name)
      inherited from the label() method of SelectBase

      return a ‘scalar’ representation of this selectable, embedded as a subquery with a label.

      See also

      as_scalar().

      limit(limit)
      inherited from the limit() method of SelectBase

      return a new selectable with the given LIMIT criterion applied.

      locate_all_froms

      return a Set of all FromClause elements referenced by this Select.

      This set is a superset of that returned by the froms property, which is specifically for those FromClause elements that would actually be rendered.

      offset(offset)
      inherited from the offset() method of SelectBase

      return a new selectable with the given OFFSET criterion applied.

      order_by(*clauses)
      inherited from the order_by() method of SelectBase

      return a new selectable with the given list of ORDER BY criterion applied.

      The criterion will be appended to any pre-existing ORDER BY criterion.

      outerjoin(right, onclause=None)
      inherited from the outerjoin() method of FromClause

      return an outer join of this FromClause against another FromClause.

      params(*optionaldict, **kwargs)
      inherited from the params() method of ClauseElement

      Return a copy with bindparam() elements replaced.

      Returns a copy of this ClauseElement with bindparam() elements replaced with values taken from the given dictionary:

      >>> clause = column('x') + bindparam('foo')
      >>> print clause.compile().params
      {'foo':None}
      >>> print clause.params({'foo':7}).compile().params
      {'foo':7}
      prefix_with(*expr, **kw)
      inherited from the prefix_with() method of HasPrefixes

      Add one or more expressions following the statement keyword, i.e. SELECT, INSERT, UPDATE, or DELETE. Generative.

      This is used to support backend-specific prefix keywords such as those provided by MySQL.

      E.g.:

      stmt = table.insert().prefix_with("LOW_PRIORITY", dialect="mysql")

      Multiple prefixes can be specified by multiple calls to prefix_with().

      Parameters:
      • *expr – textual or ClauseElement construct which will be rendered following the INSERT, UPDATE, or DELETE keyword.
      • **kw – A single keyword ‘dialect’ is accepted. This is an optional string dialect name which will limit rendering of this prefix to only that dialect.
      primary_key
      inherited from the primary_key attribute of FromClause

      Return the collection of Column objects which comprise the primary key of this FromClause.

      reduce_columns(only_synonyms=True)

      Return a new :func`.select` construct with redundantly named, equivalently-valued columns removed from the columns clause.

      “Redundant” here means two columns where one refers to the other either based on foreign key, or via a simple equality comparison in the WHERE clause of the statement. The primary purpose of this method is to automatically construct a select statement with all uniquely-named columns, without the need to use table-qualified labels as apply_labels() does.

      When columns are omitted based on foreign key, the referred-to column is the one that’s kept. When columns are omitted based on WHERE eqivalence, the first column in the columns clause is the one that’s kept.

      Parameters:only_synonyms – when True, limit the removal of columns to those which have the same name as the equivalent. Otherwise, all columns that are equivalent to another are removed.

      New in version 0.8.

      replace_selectable(old, alias)
      inherited from the replace_selectable() method of FromClause

      replace all occurrences of FromClause ‘old’ with the given Alias object, returning a copy of this FromClause.

      scalar(*multiparams, **params)
      inherited from the scalar() method of Executable

      Compile and execute this Executable, returning the result’s scalar representation.

      select(whereclause=None, **params)
      inherited from the select() method of FromClause

      return a SELECT of this FromClause.

      See also

      select() - general purpose method which allows for arbitrary column lists.

      select_from(fromclause)

      return a new select() construct with the given FROM expression merged into its list of FROM objects.

      E.g.:

      table1 = table('t1', column('a'))
      table2 = table('t2', column('b'))
      s = select([table1.c.a]).\
          select_from(
              table1.join(table2, table1.c.a==table2.c.b)
          )

      The “from” list is a unique set on the identity of each element, so adding an already present Table or other selectable will have no effect. Passing a Join that refers to an already present Table or other selectable will have the effect of concealing the presence of that selectable as an individual element in the rendered FROM list, instead rendering it into a JOIN clause.

      While the typical purpose of Select.select_from() is to replace the default, derived FROM clause with a join, it can also be called with individual table elements, multiple times if desired, in the case that the FROM clause cannot be fully derived from the columns clause:

      select([func.count('*')]).select_from(table1)
      self_group(against=None)

      return a ‘grouping’ construct as per the ClauseElement specification.

      This produces an element that can be embedded in an expression. Note that this method is called automatically as needed when constructing expressions and should not require explicit use.

      union(other, **kwargs)

      return a SQL UNION of this select() construct against the given selectable.

      union_all(other, **kwargs)

      return a SQL UNION ALL of this select() construct against the given selectable.

      unique_params(*optionaldict, **kwargs)
      inherited from the unique_params() method of ClauseElement

      Return a copy with bindparam() elements replaced.

      Same functionality as params(), except adds unique=True to affected bind parameters so that multiple statements can be used.

      where(whereclause)

      return a new select() construct with the given expression added to its WHERE clause, joined to the existing clause via AND, if any.

      with_hint(selectable, text, dialect_name='*')

      Add an indexing hint for the given selectable to this Select.

      The text of the hint is rendered in the appropriate location for the database backend in use, relative to the given Table or Alias passed as the selectable argument. The dialect implementation typically uses Python string substitution syntax with the token %(name)s to render the name of the table or alias. E.g. when using Oracle, the following:

      select([mytable]).\
          with_hint(mytable, "+ index(%(name)s ix_mytable)")

      Would render SQL as:

      select /*+ index(mytable ix_mytable) */ ... from mytable

      The dialect_name option will limit the rendering of a particular hint to a particular backend. Such as, to add hints for both Oracle and Sybase simultaneously:

      select([mytable]).\
          with_hint(mytable, "+ index(%(name)s ix_mytable)", 'oracle').\
          with_hint(mytable, "WITH INDEX ix_mytable", 'sybase')
      with_only_columns(columns)

      Return a new select() construct with its columns clause replaced with the given columns.

      Changed in version 0.7.3: Due to a bug fix, this method has a slight behavioral change as of version 0.7.3. Prior to version 0.7.3, the FROM clause of a select() was calculated upfront and as new columns were added; in 0.7.3 and later it’s calculated at compile time, fixing an issue regarding late binding of columns to parent tables. This changes the behavior of Select.with_only_columns() in that FROM clauses no longer represented in the new list are dropped, but this behavior is more consistent in that the FROM clauses are consistently derived from the current columns clause. The original intent of this method is to allow trimming of the existing columns list to be fewer columns than originally present; the use case of replacing the columns list with an entirely different one hadn’t been anticipated until 0.7.3 was released; the usage guidelines below illustrate how this should be done.

      This method is exactly equivalent to as if the original select() had been called with the given columns clause. I.e. a statement:

      s = select([table1.c.a, table1.c.b])
      s = s.with_only_columns([table1.c.b])

      should be exactly equivalent to:

      s = select([table1.c.b])

      This means that FROM clauses which are only derived from the column list will be discarded if the new column list no longer contains that FROM:

      >>> table1 = table('t1', column('a'), column('b'))
      >>> table2 = table('t2', column('a'), column('b'))
      >>> s1 = select([table1.c.a, table2.c.b])
      >>> print s1
      SELECT t1.a, t2.b FROM t1, t2
      >>> s2 = s1.with_only_columns([table2.c.b])
      >>> print s2
      SELECT t2.b FROM t1

      The preferred way to maintain a specific FROM clause in the construct, assuming it won’t be represented anywhere else (i.e. not in the WHERE clause, etc.) is to set it using Select.select_from():

      >>> s1 = select([table1.c.a, table2.c.b]).\
      ...         select_from(table1.join(table2,
      ...                 table1.c.a==table2.c.a))
      >>> s2 = s1.with_only_columns([table2.c.b])
      >>> print s2
      SELECT t2.b FROM t1 JOIN t2 ON t1.a=t2.a

      Care should also be taken to use the correct set of column objects passed to Select.with_only_columns(). Since the method is essentially equivalent to calling the select() construct in the first place with the given columns, the columns passed to Select.with_only_columns() should usually be a subset of those which were passed to the select() construct, not those which are available from the .c collection of that select(). That is:

      s = select([table1.c.a, table1.c.b]).select_from(table1)
      s = s.with_only_columns([table1.c.b])

      and not:

      # usually incorrect
      s = s.with_only_columns([s.c.b])

      The latter would produce the SQL:

      SELECT b
      FROM (SELECT t1.a AS a, t1.b AS b
      FROM t1), t1

      Since the select() construct is essentially being asked to select both from table1 as well as itself.

      class sqlalchemy.sql.expression.Selectable

      Bases: sqlalchemy.sql.expression.ClauseElement

      mark a class as being selectable

      class sqlalchemy.sql.expression.SelectBase(use_labels=False, for_update=False, limit=None, offset=None, order_by=None, group_by=None, bind=None, autocommit=None)

      Bases: sqlalchemy.sql.expression.Executable, sqlalchemy.sql.expression.FromClause

      Base class for Select and CompoundSelect.

      append_group_by(*clauses)

      Append the given GROUP BY criterion applied to this selectable.

      The criterion will be appended to any pre-existing GROUP BY criterion.

      This is an in-place mutation method; the group_by() method is preferred, as it provides standard method chaining.

      append_order_by(*clauses)

      Append the given ORDER BY criterion applied to this selectable.

      The criterion will be appended to any pre-existing ORDER BY criterion.

      This is an in-place mutation method; the order_by() method is preferred, as it provides standard method chaining.

      apply_labels()

      return a new selectable with the ‘use_labels’ flag set to True.

      This will result in column expressions being generated using labels against their table name, such as “SELECT somecolumn AS tablename_somecolumn”. This allows selectables which contain multiple FROM clauses to produce a unique set of column names regardless of name conflicts among the individual FROM clauses.

      as_scalar()

      return a ‘scalar’ representation of this selectable, which can be used as a column expression.

      Typically, a select statement which has only one column in its columns clause is eligible to be used as a scalar expression.

      The returned object is an instance of ScalarSelect.

      autocommit()

      return a new selectable with the ‘autocommit’ flag set to

      Deprecated since version 0.6: autocommit() is deprecated. Use Executable.execution_options() with the ‘autocommit’ flag.

      True.

      cte(name=None, recursive=False)

      Return a new CTE, or Common Table Expression instance.

      Common table expressions are a SQL standard whereby SELECT statements can draw upon secondary statements specified along with the primary statement, using a clause called “WITH”. Special semantics regarding UNION can also be employed to allow “recursive” queries, where a SELECT statement can draw upon the set of rows that have previously been selected.

      SQLAlchemy detects CTE objects, which are treated similarly to Alias objects, as special elements to be delivered to the FROM clause of the statement as well as to a WITH clause at the top of the statement.

      New in version 0.7.6.

      Parameters:
      • name – name given to the common table expression. Like _FromClause.alias(), the name can be left as None in which case an anonymous symbol will be used at query compile time.
      • recursive – if True, will render WITH RECURSIVE. A recursive common table expression is intended to be used in conjunction with UNION ALL in order to derive rows from those already selected.

      The following examples illustrate two examples from Postgresql’s documentation at http://www.postgresql.org/docs/8.4/static/queries-with.html.

      Example 1, non recursive:

      from sqlalchemy import Table, Column, String, Integer, MetaData, \
          select, func
      
      metadata = MetaData()
      
      orders = Table('orders', metadata,
          Column('region', String),
          Column('amount', Integer),
          Column('product', String),
          Column('quantity', Integer)
      )
      
      regional_sales = select([
                          orders.c.region,
                          func.sum(orders.c.amount).label('total_sales')
                      ]).group_by(orders.c.region).cte("regional_sales")
      
      
      top_regions = select([regional_sales.c.region]).\
              where(
                  regional_sales.c.total_sales >
                  select([
                      func.sum(regional_sales.c.total_sales)/10
                  ])
              ).cte("top_regions")
      
      statement = select([
                  orders.c.region,
                  orders.c.product,
                  func.sum(orders.c.quantity).label("product_units"),
                  func.sum(orders.c.amount).label("product_sales")
          ]).where(orders.c.region.in_(
              select([top_regions.c.region])
          )).group_by(orders.c.region, orders.c.product)
      
      result = conn.execute(statement).fetchall()

      Example 2, WITH RECURSIVE:

      from sqlalchemy import Table, Column, String, Integer, MetaData, \
          select, func
      
      metadata = MetaData()
      
      parts = Table('parts', metadata,
          Column('part', String),
          Column('sub_part', String),
          Column('quantity', Integer),
      )
      
      included_parts = select([
                          parts.c.sub_part,
                          parts.c.part,
                          parts.c.quantity]).\
                          where(parts.c.part=='our part').\
                          cte(recursive=True)
      
      
      incl_alias = included_parts.alias()
      parts_alias = parts.alias()
      included_parts = included_parts.union_all(
          select([
              parts_alias.c.part,
              parts_alias.c.sub_part,
              parts_alias.c.quantity
          ]).
              where(parts_alias.c.part==incl_alias.c.sub_part)
      )
      
      statement = select([
                  included_parts.c.sub_part,
                  func.sum(included_parts.c.quantity).
                    label('total_quantity')
              ]).                    select_from(included_parts.join(parts,
                          included_parts.c.part==parts.c.part)).\
              group_by(included_parts.c.sub_part)
      
      result = conn.execute(statement).fetchall()

      See also

      orm.query.Query.cte() - ORM version of SelectBase.cte().

      group_by(*clauses)

      return a new selectable with the given list of GROUP BY criterion applied.

      The criterion will be appended to any pre-existing GROUP BY criterion.

      label(name)

      return a ‘scalar’ representation of this selectable, embedded as a subquery with a label.

      See also

      as_scalar().

      limit(limit)

      return a new selectable with the given LIMIT criterion applied.

      offset(offset)

      return a new selectable with the given OFFSET criterion applied.

      order_by(*clauses)

      return a new selectable with the given list of ORDER BY criterion applied.

      The criterion will be appended to any pre-existing ORDER BY criterion.

      class sqlalchemy.sql.expression.TableClause(name, *columns)

      Bases: sqlalchemy.sql.expression.Immutable, sqlalchemy.sql.expression.FromClause

      Represents a minimal “table” construct.

      The constructor for TableClause is the table() function. This produces a lightweight table object that has only a name and a collection of columns, which are typically produced by the column() function:

      from sqlalchemy.sql import table, column
      
      user = table("user",
              column("id"),
              column("name"),
              column("description"),
      )

      The TableClause construct serves as the base for the more commonly used Table object, providing the usual set of FromClause services including the .c. collection and statement generation methods.

      It does not provide all the additional schema-level services of Table, including constraints, references to other tables, or support for MetaData-level services. It’s useful on its own as an ad-hoc construct used to generate quick SQL statements when a more fully fledged Table is not on hand.

      alias(name=None)
      inherited from the alias() method of FromClause

      return an alias of this FromClause.

      This is shorthand for calling:

      from sqlalchemy import alias
      a = alias(self, name=name)

      See alias() for details.

      c
      inherited from the c attribute of FromClause

      An alias for the columns attribute.

      columns
      inherited from the columns attribute of FromClause

      A named-based collection of ColumnElement objects maintained by this FromClause.

      The columns, or c collection, is the gateway to the construction of SQL expressions using table-bound or other selectable-bound columns:

      select([mytable]).where(mytable.c.somecolumn == 5)
      compare(other, **kw)
      inherited from the compare() method of ClauseElement

      Compare this ClauseElement to the given ClauseElement.

      Subclasses should override the default behavior, which is a straight identity comparison.

      **kw are arguments consumed by subclass compare() methods and may be used to modify the criteria for comparison. (see ColumnElement)

      compile(bind=None, dialect=None, **kw)
      inherited from the compile() method of ClauseElement

      Compile this SQL expression.

      The return value is a Compiled object. Calling str() or unicode() on the returned value will yield a string representation of the result. The Compiled object also can return a dictionary of bind parameter names and values using the params accessor.

      Parameters:
      • bind – An Engine or Connection from which a Compiled will be acquired. This argument takes precedence over this ClauseElement‘s bound engine, if any.
      • column_keys – Used for INSERT and UPDATE statements, a list of column names which should be present in the VALUES clause of the compiled statement. If None, all columns from the target table object are rendered.
      • dialect – A Dialect instance from which a Compiled will be acquired. This argument takes precedence over the bind argument as well as this ClauseElement‘s bound engine, if any.
      • inline – Used for INSERT statements, for a dialect which does not support inline retrieval of newly generated primary key columns, will force the expression used to create the new primary key value to be rendered inline within the INSERT statement’s VALUES clause. This typically refers to Sequence execution but may also refer to any server-side default generation function associated with a primary key Column.
      correspond_on_equivalents(column, equivalents)
      inherited from the correspond_on_equivalents() method of FromClause

      Return corresponding_column for the given column, or if None search for a match in the given dictionary.

      corresponding_column(column, require_embedded=False)
      inherited from the corresponding_column() method of FromClause

      Given a ColumnElement, return the exported ColumnElement object from this Selectable which corresponds to that original Column via a common ancestor column.

      Parameters:
      • column – the target ColumnElement to be matched
      • require_embedded – only return corresponding columns for

      the given ColumnElement, if the given ColumnElement is actually present within a sub-element of this FromClause. Normally the column will match if it merely shares a common ancestor with one of the exported columns of this FromClause.

      count(whereclause=None, **params)

      return a SELECT COUNT generated against this TableClause.

      delete(whereclause=None, **kwargs)

      Generate a delete() construct against this TableClause.

      E.g.:

      table.delete().where(table.c.id==7)

      See delete() for argument and usage information.

      foreign_keys
      inherited from the foreign_keys attribute of FromClause

      Return the collection of ForeignKey objects which this FromClause references.

      implicit_returning = False

      TableClause doesn’t support having a primary key or column -level defaults, so implicit returning doesn’t apply.

      insert(values=None, inline=False, **kwargs)

      Generate an insert() construct against this TableClause.

      E.g.:

      table.insert().values(name='foo')

      See insert() for argument and usage information.

      is_derived_from(fromclause)
      inherited from the is_derived_from() method of FromClause

      Return True if this FromClause is ‘derived’ from the given FromClause.

      An example would be an Alias of a Table is derived from that Table.

      join(right, onclause=None, isouter=False)
      inherited from the join() method of FromClause

      return a join of this FromClause against another FromClause.

      outerjoin(right, onclause=None)
      inherited from the outerjoin() method of FromClause

      return an outer join of this FromClause against another FromClause.

      primary_key
      inherited from the primary_key attribute of FromClause

      Return the collection of Column objects which comprise the primary key of this FromClause.

      replace_selectable(old, alias)
      inherited from the replace_selectable() method of FromClause

      replace all occurrences of FromClause ‘old’ with the given Alias object, returning a copy of this FromClause.

      select(whereclause=None, **params)
      inherited from the select() method of FromClause

      return a SELECT of this FromClause.

      See also

      select() - general purpose method which allows for arbitrary column lists.

      self_group(against=None)
      inherited from the self_group() method of ClauseElement

      Apply a ‘grouping’ to this ClauseElement.

      This method is overridden by subclasses to return a “grouping” construct, i.e. parenthesis. In particular it’s used by “binary” expressions to provide a grouping around themselves when placed into a larger expression, as well as by select() constructs when placed into the FROM clause of another select(). (Note that subqueries should be normally created using the Select.alias() method, as many platforms require nested SELECT statements to be named).

      As expressions are composed together, the application of self_group() is automatic - end-user code should never need to use this method directly. Note that SQLAlchemy’s clause constructs take operator precedence into account - so parenthesis might not be needed, for example, in an expression like x OR (y AND z) - AND takes precedence over OR.

      The base self_group() method of ClauseElement just returns self.

      update(whereclause=None, values=None, inline=False, **kwargs)

      Generate an update() construct against this TableClause.

      E.g.:

      table.update().where(table.c.id==7).values(name='foo')

      See update() for argument and usage information.

      SQLAlchemy-0.8.4/doc/core/serializer.html0000644000076500000240000002415512251147471021000 0ustar classicstaff00000000000000 Expression Serializer Extension — SQLAlchemy 0.8 Documentation

      SQLAlchemy 0.8 Documentation

      Release: 0.8.4 | Release Date: December 8, 2013
      SQLAlchemy 0.8 Documentation » SQLAlchemy Core » Expression Serializer Extension

      Expression Serializer Extension

      Expression Serializer Extension

      Serializer/Deserializer objects for usage with SQLAlchemy query structures, allowing “contextual” deserialization.

      Any SQLAlchemy query structure, either based on sqlalchemy.sql.* or sqlalchemy.orm.* can be used. The mappers, Tables, Columns, Session etc. which are referenced by the structure are not persisted in serialized form, but are instead re-associated with the query structure when it is deserialized.

      Usage is nearly the same as that of the standard Python pickle module:

      from sqlalchemy.ext.serializer import loads, dumps
      metadata = MetaData(bind=some_engine)
      Session = scoped_session(sessionmaker())
      
      # ... define mappers
      
      query = Session.query(MyClass).filter(MyClass.somedata=='foo').order_by(MyClass.sortkey)
      
      # pickle the query
      serialized = dumps(query)
      
      # unpickle.  Pass in metadata + scoped_session
      query2 = loads(serialized, metadata, Session)
      
      print query2.all()

      Similar restrictions as when using raw pickle apply; mapped classes must be themselves be pickleable, meaning they are importable from a module-level namespace.

      The serializer module is only appropriate for query structures. It is not needed for:

      • instances of user-defined classes. These contain no references to engines, sessions or expression constructs in the typical case and can be serialized directly.
      • Table metadata that is to be loaded entirely from the serialized structure (i.e. is not already declared in the application). Regular pickle.loads()/dumps() can be used to fully dump any MetaData object, typically one which was reflected from an existing database at some previous point in time. The serializer module is specifically for the opposite case, where the Table metadata is already present in memory.
      sqlalchemy.ext.serializer.Serializer(*args, **kw)
      sqlalchemy.ext.serializer.Deserializer(file, metadata=None, scoped_session=None, engine=None)
      sqlalchemy.ext.serializer.dumps(obj, protocol=0)
      sqlalchemy.ext.serializer.loads(data, metadata=None, scoped_session=None, engine=None)
      SQLAlchemy-0.8.4/doc/core/sqlelement.html0000644000076500000240000130101512251147472020773 0ustar classicstaff00000000000000 Column Elements and Expressions — SQLAlchemy 0.8 Documentation

      SQLAlchemy 0.8 Documentation

      Release: 0.8.4 | Release Date: December 8, 2013

      Column Elements and Expressions

      The most fundamental part of the SQL expression API are the “column elements”, which allow for basic SQL expression support. The core of all SQL expression constructs is the ClauseElement, which is the base for several sub-branches. The ColumnElement class is the fundamental unit used to construct any kind of typed SQL expression.

      sqlalchemy.sql.expression.and_(*clauses)

      Join a list of clauses together using the AND operator.

      The & operator is also overloaded on all ColumnElement subclasses to produce the same result.

      sqlalchemy.sql.expression.asc(column)

      Return an ascending ORDER BY clause element.

      e.g.:

      someselect.order_by(asc(table1.mycol))

      produces:

      ORDER BY mycol ASC
      sqlalchemy.sql.expression.between(ctest, cleft, cright)

      Return a BETWEEN predicate clause.

      Equivalent of SQL clausetest BETWEEN clauseleft AND clauseright.

      The between() method on all ColumnElement subclasses provides similar functionality.

      sqlalchemy.sql.expression.bindparam(key, value=<symbol 'NO_ARG>, type_=None, unique=False, required=<symbol 'NO_ARG>, quote=None, callable_=None)

      Create a bind parameter clause with the given key.

      Parameters:
      • key – the key for this bind param. Will be used in the generated SQL statement for dialects that use named parameters. This value may be modified when part of a compilation operation, if other BindParameter objects exist with the same key, or if its length is too long and truncation is required.
      • value

        Initial value for this bind param. This value may be overridden by the dictionary of parameters sent to statement compilation/execution.

        Defaults to None, however if neither value nor callable are passed explicitly, the required flag will be set to True which has the effect of requiring a value be present when the statement is actually executed.

        Changed in version 0.8: The required flag is set to True automatically if value or callable is not passed.

      • callable_ – A callable function that takes the place of “value”. The function will be called at statement execution time to determine the ultimate value. Used for scenarios where the actual bind value cannot be determined at the point at which the clause construct is created, but embedded bind values are still desirable.
      • type_ – A TypeEngine object that will be used to pre-process the value corresponding to this BindParameter at execution time.
      • unique – if True, the key name of this BindParamClause will be modified if another BindParameter of the same name already has been located within the containing ClauseElement.
      • required

        If True, a value is required at execution time. If not passed, is set to True or False based on whether or not one of value or callable were passed..

        Changed in version 0.8: If the required flag is not specified, it will be set automatically to True or False depending on whether or not the value or callable parameters were specified.

      • quote – True if this parameter name requires quoting and is not currently known as a SQLAlchemy reserved word; this currently only applies to the Oracle backend.
      sqlalchemy.sql.expression.case(whens, value=None, else_=None)

      Produce a CASE statement.

      whens
      A sequence of pairs, or alternatively a dict, to be translated into “WHEN / THEN” clauses.
      value
      Optional for simple case statements, produces a column expression as in “CASE <expr> WHEN ...”
      else_
      Optional as well, for case defaults produces the “ELSE” portion of the “CASE” statement.

      The expressions used for THEN and ELSE, when specified as strings, will be interpreted as bound values. To specify textual SQL expressions for these, use the literal_column() construct.

      The expressions used for the WHEN criterion may only be literal strings when “value” is present, i.e. CASE table.somecol WHEN “x” THEN “y”. Otherwise, literal strings are not accepted in this position, and either the text(<string>) or literal(<string>) constructs must be used to interpret raw string values.

      Usage examples:

      case([(orderline.c.qty > 100, item.c.specialprice),
            (orderline.c.qty > 10, item.c.bulkprice)
          ], else_=item.c.regularprice)
      case(value=emp.c.type, whens={
              'engineer': emp.c.salary * 1.1,
              'manager':  emp.c.salary * 3,
          })

      Using literal_column(), to allow for databases that do not support bind parameters in the then clause. The type can be specified which determines the type of the case() construct overall:

      case([(orderline.c.qty > 100,
              literal_column("'greaterthan100'", String)),
            (orderline.c.qty > 10, literal_column("'greaterthan10'",
              String))
          ], else_=literal_column("'lethan10'", String))
      sqlalchemy.sql.expression.cast(clause, totype, **kwargs)

      Return a CAST function.

      Equivalent of SQL CAST(clause AS totype).

      Use with a TypeEngine subclass, i.e:

      cast(table.c.unit_price * table.c.qty, Numeric(10,4))

      or:

      cast(table.c.timestamp, DATE)
      sqlalchemy.sql.expression.column(text, type_=None)

      Return a textual column clause, as would be in the columns clause of a SELECT statement.

      The object returned is an instance of ColumnClause, which represents the “syntactical” portion of the schema-level Column object. It is often used directly within select() constructs or with lightweight table() constructs.

      Note that the column() function is not part of the sqlalchemy namespace. It must be imported from the sql package:

      from sqlalchemy.sql import table, column
      Parameters:
      • text – the name of the column. Quoting rules will be applied to the clause like any other column name. For textual column constructs that are not to be quoted, use the literal_column() function.
      • type_ – an optional TypeEngine object which will provide result-set translation for this column.

      See ColumnClause for further examples.

      sqlalchemy.sql.expression.collate(expression, collation)

      Return the clause expression COLLATE collation.

      e.g.:

      collate(mycolumn, 'utf8_bin')

      produces:

      mycolumn COLLATE utf8_bin
      sqlalchemy.sql.expression.desc(column)

      Return a descending ORDER BY clause element.

      e.g.:

      someselect.order_by(desc(table1.mycol))

      produces:

      ORDER BY mycol DESC
      sqlalchemy.sql.expression.distinct(expr)

      Return a DISTINCT clause.

      e.g.:

      distinct(a)

      renders:

      DISTINCT a
      sqlalchemy.sql.expression.extract(field, expr)

      Return the clause extract(field FROM expr).

      sqlalchemy.sql.expression.false()

      Return a False_ object, which compiles to false, or the boolean equivalent for the target dialect.

      sqlalchemy.sql.expression.func = <sqlalchemy.sql.expression._FunctionGenerator object at 0x102a77e90>

      Generate SQL function expressions.

      func is a special object instance which generates SQL functions based on name-based attributes, e.g.:

      >>> print func.count(1)
      count(:param_1)

      The element is a column-oriented SQL element like any other, and is used in that way:

      >>> print select([func.count(table.c.id)])
      SELECT count(sometable.id) FROM sometable

      Any name can be given to func. If the function name is unknown to SQLAlchemy, it will be rendered exactly as is. For common SQL functions which SQLAlchemy is aware of, the name may be interpreted as a generic function which will be compiled appropriately to the target database:

      >>> print func.current_timestamp()
      CURRENT_TIMESTAMP

      To call functions which are present in dot-separated packages, specify them in the same manner:

      >>> print func.stats.yield_curve(5, 10)
      stats.yield_curve(:yield_curve_1, :yield_curve_2)

      SQLAlchemy can be made aware of the return type of functions to enable type-specific lexical and result-based behavior. For example, to ensure that a string-based function returns a Unicode value and is similarly treated as a string in expressions, specify Unicode as the type:

      >>> print func.my_string(u'hi', type_=Unicode) + ' ' + \
      ... func.my_string(u'there', type_=Unicode)
      my_string(:my_string_1) || :my_string_2 || my_string(:my_string_3)

      The object returned by a func call is usually an instance of Function. This object meets the “column” interface, including comparison and labeling functions. The object can also be passed the execute() method of a Connection or Engine, where it will be wrapped inside of a SELECT statement first:

      print connection.execute(func.current_timestamp()).scalar()

      In a few exception cases, the func accessor will redirect a name to a built-in expression such as cast() or extract(), as these names have well-known meaning but are not exactly the same as “functions” from a SQLAlchemy perspective.

      New in version 0.8: func can return non-function expression constructs for common quasi-functional names like cast() and extract().

      Functions which are interpreted as “generic” functions know how to calculate their return type automatically. For a listing of known generic functions, see SQL and Generic Functions.

      sqlalchemy.sql.expression.label(name, obj)

      Return a Label object for the given ColumnElement.

      A label changes the name of an element in the columns clause of a SELECT statement, typically via the AS SQL keyword.

      This functionality is more conveniently available via the label() method on ColumnElement.

      name
      label name
      obj
      a ColumnElement.
      sqlalchemy.sql.expression.literal(value, type_=None)

      Return a literal clause, bound to a bind parameter.

      Literal clauses are created automatically when non- ClauseElement objects (such as strings, ints, dates, etc.) are used in a comparison operation with a ColumnElement subclass, such as a Column object. Use this function to force the generation of a literal clause, which will be created as a BindParameter with a bound value.

      Parameters:
      • value – the value to be bound. Can be any Python object supported by the underlying DB-API, or is translatable via the given type argument.
      • type_ – an optional TypeEngine which will provide bind-parameter translation for this literal.
      sqlalchemy.sql.expression.literal_column(text, type_=None)

      Return a textual column expression, as would be in the columns clause of a SELECT statement.

      The object returned supports further expressions in the same way as any other column object, including comparison, math and string operations. The type_ parameter is important to determine proper expression behavior (such as, ‘+’ means string concatenation or numerical addition based on the type).

      Parameters:
      • text – the text of the expression; can be any SQL expression. Quoting rules will not be applied. To specify a column-name expression which should be subject to quoting rules, use the column() function.
      • type_ – an optional TypeEngine object which will provide result-set translation and additional expression semantics for this column. If left as None the type will be NullType.
      sqlalchemy.sql.expression.not_(clause)

      Return a negation of the given clause, i.e. NOT(clause).

      The ~ operator is also overloaded on all ColumnElement subclasses to produce the same result.

      sqlalchemy.sql.expression.null()

      Return a Null object, which compiles to NULL.

      sqlalchemy.sql.expression.nullsfirst(column)

      Return a NULLS FIRST ORDER BY clause element.

      e.g.:

      someselect.order_by(desc(table1.mycol).nullsfirst())

      produces:

      ORDER BY mycol DESC NULLS FIRST
      sqlalchemy.sql.expression.nullslast(column)

      Return a NULLS LAST ORDER BY clause element.

      e.g.:

      someselect.order_by(desc(table1.mycol).nullslast())

      produces:

      ORDER BY mycol DESC NULLS LAST
      sqlalchemy.sql.expression.or_(*clauses)

      Join a list of clauses together using the OR operator.

      The | operator is also overloaded on all ColumnElement subclasses to produce the same result.

      sqlalchemy.sql.expression.outparam(key, type_=None)

      Create an ‘OUT’ parameter for usage in functions (stored procedures), for databases which support them.

      The outparam can be used like a regular function parameter. The “output” value will be available from the ResultProxy object via its out_parameters attribute, which returns a dictionary containing the values.

      sqlalchemy.sql.expression.over(func, partition_by=None, order_by=None)

      Produce an OVER clause against a function.

      Used against aggregate or so-called “window” functions, for database backends that support window functions.

      E.g.:

      from sqlalchemy import over
      over(func.row_number(), order_by='x')

      Would produce “ROW_NUMBER() OVER(ORDER BY x)”.

      Parameters:
      • func – a FunctionElement construct, typically generated by func.
      • partition_by – a column element or string, or a list of such, that will be used as the PARTITION BY clause of the OVER construct.
      • order_by – a column element or string, or a list of such, that will be used as the ORDER BY clause of the OVER construct.

      This function is also available from the func construct itself via the FunctionElement.over() method.

      New in version 0.7.

      sqlalchemy.sql.expression.text(text, bind=None, *args, **kwargs)

      Create a SQL construct that is represented by a literal string.

      E.g.:

      t = text("SELECT * FROM users")
      result = connection.execute(t)

      The advantages text() provides over a plain string are backend-neutral support for bind parameters, per-statement execution options, as well as bind parameter and result-column typing behavior, allowing SQLAlchemy type constructs to play a role when executing a statement that is specified literally.

      Bind parameters are specified by name, using the format :name. E.g.:

      t = text("SELECT * FROM users WHERE id=:user_id")
      result = connection.execute(t, user_id=12)

      To invoke SQLAlchemy typing logic for bind parameters, the bindparams list allows specification of bindparam() constructs which specify the type for a given name:

      t = text("SELECT id FROM users WHERE updated_at>:updated",
                  bindparams=[bindparam('updated', DateTime())]
              )

      Typing during result row processing is also an important concern. Result column types are specified using the typemap dictionary, where the keys match the names of columns. These names are taken from what the DBAPI returns as cursor.description:

      t = text("SELECT id, name FROM users",
              typemap={
                  'id':Integer,
                  'name':Unicode
              }
      )

      The text() construct is used internally for most cases when a literal string is specified for part of a larger query, such as within select(), update(), insert() or delete(). In those cases, the same bind parameter syntax is applied:

      s = select([users.c.id, users.c.name]).where("id=:user_id")
      result = connection.execute(s, user_id=12)

      Using text() explicitly usually implies the construction of a full, standalone statement. As such, SQLAlchemy refers to it as an Executable object, and it supports the Executable.execution_options() method. For example, a text() construct that should be subject to “autocommit” can be set explicitly so using the autocommit option:

      t = text("EXEC my_procedural_thing()").\
              execution_options(autocommit=True)

      Note that SQLAlchemy’s usual “autocommit” behavior applies to text() constructs - that is, statements which begin with a phrase such as INSERT, UPDATE, DELETE, or a variety of other phrases specific to certain backends, will be eligible for autocommit if no transaction is in progress.

      Parameters:
      • text – the text of the SQL statement to be created. use :<param> to specify bind parameters; they will be compiled to their engine-specific format.
      • autocommit – Deprecated. Use .execution_options(autocommit=<True|False>) to set the autocommit option.
      • bind – an optional connection or engine to be used for this text query.
      • bindparams – a list of bindparam() instances which can be used to define the types and/or initial values for the bind parameters within the textual statement; the keynames of the bindparams must match those within the text of the statement. The types will be used for pre-processing on bind values.
      • typemap – a dictionary mapping the names of columns represented in the columns clause of a SELECT statement to type objects, which will be used to perform post-processing on columns within the result set. This argument applies to any expression that returns result sets.
      sqlalchemy.sql.expression.true()

      Return a True_ object, which compiles to true, or the boolean equivalent for the target dialect.

      sqlalchemy.sql.expression.tuple_(*expr)

      Return a SQL tuple.

      Main usage is to produce a composite IN construct:

      tuple_(table.c.col1, table.c.col2).in_(
          [(1, 2), (5, 12), (10, 19)]
      )

      Warning

      The composite IN construct is not supported by all backends, and is currently known to work on Postgresql and MySQL, but not SQLite. Unsupported backends will raise a subclass of DBAPIError when such an expression is invoked.

      sqlalchemy.sql.expression.type_coerce(expr, type_)

      Coerce the given expression into the given type, on the Python side only.

      type_coerce() is roughly similar to cast(), except no “CAST” expression is rendered - the given type is only applied towards expression typing and against received result values.

      e.g.:

      from sqlalchemy.types import TypeDecorator
      import uuid
      
      class AsGuid(TypeDecorator):
          impl = String
      
          def process_bind_param(self, value, dialect):
              if value is not None:
                  return str(value)
              else:
                  return None
      
          def process_result_value(self, value, dialect):
              if value is not None:
                  return uuid.UUID(value)
              else:
                  return None
      
      conn.execute(
          select([type_coerce(mytable.c.ident, AsGuid)]).\
                  where(
                      type_coerce(mytable.c.ident, AsGuid) ==
                      uuid.uuid3(uuid.NAMESPACE_URL, 'bar')
                  )
      )
      class sqlalchemy.sql.expression.BinaryExpression(left, right, operator, type_=None, negate=None, modifiers=None)

      Bases: sqlalchemy.sql.expression.ColumnElement

      Represent an expression that is LEFT <operator> RIGHT.

      A BinaryExpression is generated automatically whenever two column expressions are used in a Python binary expresion:

      >>> from sqlalchemy.sql import column
      >>> column('a') + column('b')
      <sqlalchemy.sql.expression.BinaryExpression object at 0x101029dd0>
      >>> print column('a') + column('b')
      a + b
      Inherited-members :
       
      compare(other, **kw)

      Compare this BinaryExpression against the given BinaryExpression.

      class sqlalchemy.sql.expression.BindParameter(key, value, type_=None, unique=False, callable_=None, isoutparam=False, required=False, quote=None, _compared_to_operator=None, _compared_to_type=None)

      Bases: sqlalchemy.sql.expression.ColumnElement

      Represent a bind parameter.

      Public constructor is the bindparam() function.

      __eq__(other)
      inherited from the __eq__() method of ColumnOperators

      Implement the == operator.

      In a column context, produces the clause a = b. If the target is None, produces a IS NULL.

      __init__(key, value, type_=None, unique=False, callable_=None, isoutparam=False, required=False, quote=None, _compared_to_operator=None, _compared_to_type=None)

      Construct a BindParameter.

      Parameters:
      • key – the key for this bind param. Will be used in the generated SQL statement for dialects that use named parameters. This value may be modified when part of a compilation operation, if other BindParameter objects exist with the same key, or if its length is too long and truncation is required.
      • value – Initial value for this bind param. This value may be overridden by the dictionary of parameters sent to statement compilation/execution.
      • callable_ – A callable function that takes the place of “value”. The function will be called at statement execution time to determine the ultimate value. Used for scenarios where the actual bind value cannot be determined at the point at which the clause construct is created, but embedded bind values are still desirable.
      • type_ – A TypeEngine object that will be used to pre-process the value corresponding to this BindParameter at execution time.
      • unique – if True, the key name of this BindParamClause will be modified if another BindParameter of the same name already has been located within the containing ClauseElement.
      • quote – True if this parameter name requires quoting and is not currently known as a SQLAlchemy reserved word; this currently only applies to the Oracle backend.
      • required – a value is required at execution time.
      • isoutparam – if True, the parameter should be treated like a stored procedure “OUT” parameter.
      __le__(other)
      inherited from the __le__() method of ColumnOperators

      Implement the <= operator.

      In a column context, produces the clause a <= b.

      __lt__(other)
      inherited from the __lt__() method of ColumnOperators

      Implement the < operator.

      In a column context, produces the clause a < b.

      __ne__(other)
      inherited from the __ne__() method of ColumnOperators

      Implement the != operator.

      In a column context, produces the clause a != b. If the target is None, produces a IS NOT NULL.

      anon_label
      inherited from the anon_label attribute of ColumnElement

      provides a constant ‘anonymous label’ for this ColumnElement.

      This is a label() expression which will be named at compile time. The same label() is returned each time anon_label is called so that expressions can reference anon_label multiple times, producing the same label name at compile time.

      the compiler uses this function automatically at compile time for expressions that are known to be ‘unnamed’ like binary expressions and function calls.

      asc()
      inherited from the asc() method of ColumnOperators

      Produce a asc() clause against the parent object.

      between(cleft, cright)
      inherited from the between() method of ColumnOperators

      Produce a between() clause against the parent object, given the lower and upper range.

      collate(collation)
      inherited from the collate() method of ColumnOperators

      Produce a collate() clause against the parent object, given the collation string.

      compare(other, **kw)

      Compare this BindParameter to the given clause.

      compile(bind=None, dialect=None, **kw)
      inherited from the compile() method of ClauseElement

      Compile this SQL expression.

      The return value is a Compiled object. Calling str() or unicode() on the returned value will yield a string representation of the result. The Compiled object also can return a dictionary of bind parameter names and values using the params accessor.

      Parameters:
      • bind – An Engine or Connection from which a Compiled will be acquired. This argument takes precedence over this ClauseElement‘s bound engine, if any.
      • column_keys – Used for INSERT and UPDATE statements, a list of column names which should be present in the VALUES clause of the compiled statement. If None, all columns from the target table object are rendered.
      • dialect – A Dialect instance from which a Compiled will be acquired. This argument takes precedence over the bind argument as well as this ClauseElement‘s bound engine, if any.
      • inline – Used for INSERT statements, for a dialect which does not support inline retrieval of newly generated primary key columns, will force the expression used to create the new primary key value to be rendered inline within the INSERT statement’s VALUES clause. This typically refers to Sequence execution but may also refer to any server-side default generation function associated with a primary key Column.
      concat(other)
      inherited from the concat() method of ColumnOperators

      Implement the ‘concat’ operator.

      In a column context, produces the clause a || b, or uses the concat() operator on MySQL.

      contains(other, **kwargs)
      inherited from the contains() method of ColumnOperators

      Implement the ‘contains’ operator.

      In a column context, produces the clause LIKE '%<other>%'

      desc()
      inherited from the desc() method of ColumnOperators

      Produce a desc() clause against the parent object.

      distinct()
      inherited from the distinct() method of ColumnOperators

      Produce a distinct() clause against the parent object.

      effective_value

      Return the value of this bound parameter, taking into account if the callable parameter was set.

      The callable value will be evaluated and returned if present, else value.

      endswith(other, **kwargs)
      inherited from the endswith() method of ColumnOperators

      Implement the ‘endswith’ operator.

      In a column context, produces the clause LIKE '%<other>'

      expression
      inherited from the expression attribute of ColumnElement

      Return a column expression.

      Part of the inspection interface; returns self.

      get_children(**kwargs)
      inherited from the get_children() method of ClauseElement

      Return immediate child elements of this ClauseElement.

      This is used for visit traversal.

      **kwargs may contain flags that change the collection that is returned, for example to return a subset of items in order to cut down on larger traversals, or to return child items from a different context (such as schema-level collections instead of clause-level).

      ilike(other, escape=None)
      inherited from the ilike() method of ColumnOperators

      Implement the ilike operator.

      In a column context, produces the clause a ILIKE other.

      E.g.:

      select([sometable]).where(sometable.c.column.ilike("%foobar%"))
      Parameters:
      • other – expression to be compared
      • escape

        optional escape character, renders the ESCAPE keyword, e.g.:

        somecolumn.ilike("foo/%bar", escape="/")
      in_(other)
      inherited from the in_() method of ColumnOperators

      Implement the in operator.

      In a column context, produces the clause a IN other. “other” may be a tuple/list of column expressions, or a select() construct.

      is_(other)
      inherited from the is_() method of ColumnOperators

      Implement the IS operator.

      Normally, IS is generated automatically when comparing to a value of None, which resolves to NULL. However, explicit usage of IS may be desirable if comparing to boolean values on certain platforms.

      New in version 0.7.9.

      isnot(other)
      inherited from the isnot() method of ColumnOperators

      Implement the IS NOT operator.

      Normally, IS NOT is generated automatically when comparing to a value of None, which resolves to NULL. However, explicit usage of IS NOT may be desirable if comparing to boolean values on certain platforms.

      New in version 0.7.9.

      label(name)
      inherited from the label() method of ColumnElement

      Produce a column label, i.e. <columnname> AS <name>.

      This is a shortcut to the label() function.

      if ‘name’ is None, an anonymous label name will be generated.

      like(other, escape=None)
      inherited from the like() method of ColumnOperators

      Implement the like operator.

      In a column context, produces the clause a LIKE other.

      E.g.:

      select([sometable]).where(sometable.c.column.like("%foobar%"))
      Parameters:
      • other – expression to be compared
      • escape

        optional escape character, renders the ESCAPE keyword, e.g.:

        somecolumn.like("foo/%bar", escape="/")
      match(other, **kwargs)
      inherited from the match() method of ColumnOperators

      Implements the ‘match’ operator.

      In a column context, this produces a MATCH clause, i.e. MATCH '<other>'. The allowed contents of other are database backend specific.

      notilike(other, escape=None)
      inherited from the notilike() method of ColumnOperators

      implement the NOT ILIKE operator.

      This is equivalent to using negation with ColumnOperators.ilike(), i.e. ~x.ilike(y).

      New in version 0.8.

      notin_(other)
      inherited from the notin_() method of ColumnOperators

      implement the NOT IN operator.

      This is equivalent to using negation with ColumnOperators.in_(), i.e. ~x.in_(y).

      New in version 0.8.

      notlike(other, escape=None)
      inherited from the notlike() method of ColumnOperators

      implement the NOT LIKE operator.

      This is equivalent to using negation with ColumnOperators.like(), i.e. ~x.like(y).

      New in version 0.8.

      nullsfirst()
      inherited from the nullsfirst() method of ColumnOperators

      Produce a nullsfirst() clause against the parent object.

      nullslast()
      inherited from the nullslast() method of ColumnOperators

      Produce a nullslast() clause against the parent object.

      op(opstring, precedence=0)
      inherited from the op() method of Operators

      produce a generic operator function.

      e.g.:

      somecolumn.op("*")(5)

      produces:

      somecolumn * 5

      This function can also be used to make bitwise operators explicit. For example:

      somecolumn.op('&')(0xff)

      is a bitwise AND of the value in somecolumn.

      Parameters:
      • operator – a string which will be output as the infix operator between this element and the expression passed to the generated function.
      • precedence

        precedence to apply to the operator, when parenthesizing expressions. A lower number will cause the expression to be parenthesized when applied against another operator with higher precedence. The default value of 0 is lower than all operators except for the comma (,) and AS operators. A value of 100 will be higher or equal to all operators, and -100 will be lower than or equal to all operators.

        New in version 0.8: - added the ‘precedence’ argument.

      params(*optionaldict, **kwargs)
      inherited from the params() method of ClauseElement

      Return a copy with bindparam() elements replaced.

      Returns a copy of this ClauseElement with bindparam() elements replaced with values taken from the given dictionary:

      >>> clause = column('x') + bindparam('foo')
      >>> print clause.compile().params
      {'foo':None}
      >>> print clause.params({'foo':7}).compile().params
      {'foo':7}
      self_group(against=None)
      inherited from the self_group() method of ClauseElement

      Apply a ‘grouping’ to this ClauseElement.

      This method is overridden by subclasses to return a “grouping” construct, i.e. parenthesis. In particular it’s used by “binary” expressions to provide a grouping around themselves when placed into a larger expression, as well as by select() constructs when placed into the FROM clause of another select(). (Note that subqueries should be normally created using the Select.alias() method, as many platforms require nested SELECT statements to be named).

      As expressions are composed together, the application of self_group() is automatic - end-user code should never need to use this method directly. Note that SQLAlchemy’s clause constructs take operator precedence into account - so parenthesis might not be needed, for example, in an expression like x OR (y AND z) - AND takes precedence over OR.

      The base self_group() method of ClauseElement just returns self.

      shares_lineage(othercolumn)
      inherited from the shares_lineage() method of ColumnElement

      Return True if the given ColumnElement has a common ancestor to this ColumnElement.

      startswith(other, **kwargs)
      inherited from the startswith() method of ColumnOperators

      Implement the startwith operator.

      In a column context, produces the clause LIKE '<other>%'

      unique_params(*optionaldict, **kwargs)
      inherited from the unique_params() method of ClauseElement

      Return a copy with bindparam() elements replaced.

      Same functionality as params(), except adds unique=True to affected bind parameters so that multiple statements can be used.

      class sqlalchemy.sql.expression.Case(whens, value=None, else_=None)

      Bases: sqlalchemy.sql.expression.ColumnElement

      class sqlalchemy.sql.expression.Cast(clause, totype, **kwargs)

      Bases: sqlalchemy.sql.expression.ColumnElement

      class sqlalchemy.sql.expression.ClauseElement

      Bases: sqlalchemy.sql.visitors.Visitable

      Base class for elements of a programmatically constructed SQL expression.

      compare(other, **kw)

      Compare this ClauseElement to the given ClauseElement.

      Subclasses should override the default behavior, which is a straight identity comparison.

      **kw are arguments consumed by subclass compare() methods and may be used to modify the criteria for comparison. (see ColumnElement)

      compile(bind=None, dialect=None, **kw)

      Compile this SQL expression.

      The return value is a Compiled object. Calling str() or unicode() on the returned value will yield a string representation of the result. The Compiled object also can return a dictionary of bind parameter names and values using the params accessor.

      Parameters:
      • bind – An Engine or Connection from which a Compiled will be acquired. This argument takes precedence over this ClauseElement‘s bound engine, if any.
      • column_keys – Used for INSERT and UPDATE statements, a list of column names which should be present in the VALUES clause of the compiled statement. If None, all columns from the target table object are rendered.
      • dialect – A Dialect instance from which a Compiled will be acquired. This argument takes precedence over the bind argument as well as this ClauseElement‘s bound engine, if any.
      • inline – Used for INSERT statements, for a dialect which does not support inline retrieval of newly generated primary key columns, will force the expression used to create the new primary key value to be rendered inline within the INSERT statement’s VALUES clause. This typically refers to Sequence execution but may also refer to any server-side default generation function associated with a primary key Column.
      get_children(**kwargs)

      Return immediate child elements of this ClauseElement.

      This is used for visit traversal.

      **kwargs may contain flags that change the collection that is returned, for example to return a subset of items in order to cut down on larger traversals, or to return child items from a different context (such as schema-level collections instead of clause-level).

      params(*optionaldict, **kwargs)

      Return a copy with bindparam() elements replaced.

      Returns a copy of this ClauseElement with bindparam() elements replaced with values taken from the given dictionary:

      >>> clause = column('x') + bindparam('foo')
      >>> print clause.compile().params
      {'foo':None}
      >>> print clause.params({'foo':7}).compile().params
      {'foo':7}
      self_group(against=None)

      Apply a ‘grouping’ to this ClauseElement.

      This method is overridden by subclasses to return a “grouping” construct, i.e. parenthesis. In particular it’s used by “binary” expressions to provide a grouping around themselves when placed into a larger expression, as well as by select() constructs when placed into the FROM clause of another select(). (Note that subqueries should be normally created using the Select.alias() method, as many platforms require nested SELECT statements to be named).

      As expressions are composed together, the application of self_group() is automatic - end-user code should never need to use this method directly. Note that SQLAlchemy’s clause constructs take operator precedence into account - so parenthesis might not be needed, for example, in an expression like x OR (y AND z) - AND takes precedence over OR.

      The base self_group() method of ClauseElement just returns self.

      unique_params(*optionaldict, **kwargs)

      Return a copy with bindparam() elements replaced.

      Same functionality as params(), except adds unique=True to affected bind parameters so that multiple statements can be used.

      class sqlalchemy.sql.expression.ClauseList(*clauses, **kwargs)

      Bases: sqlalchemy.sql.expression.ClauseElement

      Describe a list of clauses, separated by an operator.

      By default, is comma-separated, such as a column listing.

      compare(other, **kw)

      Compare this ClauseList to the given ClauseList, including a comparison of all the clause items.

      class sqlalchemy.sql.expression.ColumnClause(text, selectable=None, type_=None, is_literal=False)

      Bases: sqlalchemy.sql.expression.Immutable, sqlalchemy.sql.expression.ColumnElement

      Represents a generic column expression from any textual string.

      This includes columns associated with tables, aliases and select statements, but also any arbitrary text. May or may not be bound to an underlying Selectable.

      ColumnClause is constructed by itself typically via the column() function. It may be placed directly into constructs such as select() constructs:

      from sqlalchemy.sql import column, select
      
      c1, c2 = column("c1"), column("c2")
      s = select([c1, c2]).where(c1==5)

      There is also a variant on column() known as literal_column() - the difference is that in the latter case, the string value is assumed to be an exact expression, rather than a column name, so that no quoting rules or similar are applied:

      from sqlalchemy.sql import literal_column, select
      
      s = select([literal_column("5 + 7")])

      ColumnClause can also be used in a table-like fashion by combining the column() function with the table() function, to produce a “lightweight” form of table metadata:

      from sqlalchemy.sql import table, column
      
      user = table("user",
              column("id"),
              column("name"),
              column("description"),
      )

      The above construct can be created in an ad-hoc fashion and is not associated with any schema.MetaData, unlike it’s more full fledged schema.Table counterpart.

      Parameters:
      • text – the text of the element.
      • selectable – parent selectable.
      • typetypes.TypeEngine object which can associate this ColumnClause with a type.
      • is_literal – if True, the ColumnClause is assumed to be an exact expression that will be delivered to the output with no quoting rules applied regardless of case sensitive settings. the literal_column() function is usually used to create such a ColumnClause.
      __eq__(other)
      inherited from the __eq__() method of ColumnOperators

      Implement the == operator.

      In a column context, produces the clause a = b. If the target is None, produces a IS NULL.

      __le__(other)
      inherited from the __le__() method of ColumnOperators

      Implement the <= operator.

      In a column context, produces the clause a <= b.

      __lt__(other)
      inherited from the __lt__() method of ColumnOperators

      Implement the < operator.

      In a column context, produces the clause a < b.

      __ne__(other)
      inherited from the __ne__() method of ColumnOperators

      Implement the != operator.

      In a column context, produces the clause a != b. If the target is None, produces a IS NOT NULL.

      anon_label
      inherited from the anon_label attribute of ColumnElement

      provides a constant ‘anonymous label’ for this ColumnElement.

      This is a label() expression which will be named at compile time. The same label() is returned each time anon_label is called so that expressions can reference anon_label multiple times, producing the same label name at compile time.

      the compiler uses this function automatically at compile time for expressions that are known to be ‘unnamed’ like binary expressions and function calls.

      asc()
      inherited from the asc() method of ColumnOperators

      Produce a asc() clause against the parent object.

      between(cleft, cright)
      inherited from the between() method of ColumnOperators

      Produce a between() clause against the parent object, given the lower and upper range.

      collate(collation)
      inherited from the collate() method of ColumnOperators

      Produce a collate() clause against the parent object, given the collation string.

      compare(other, use_proxies=False, equivalents=None, **kw)
      inherited from the compare() method of ColumnElement

      Compare this ColumnElement to another.

      Special arguments understood:

      Parameters:
      • use_proxies – when True, consider two columns that share a common base column as equivalent (i.e. shares_lineage())
      • equivalents – a dictionary of columns as keys mapped to sets of columns. If the given “other” column is present in this dictionary, if any of the columns in the corresponding set() pass the comparison test, the result is True. This is used to expand the comparison to other columns that may be known to be equivalent to this one via foreign key or other criterion.
      compile(bind=None, dialect=None, **kw)
      inherited from the compile() method of ClauseElement

      Compile this SQL expression.

      The return value is a Compiled object. Calling str() or unicode() on the returned value will yield a string representation of the result. The Compiled object also can return a dictionary of bind parameter names and values using the params accessor.

      Parameters:
      • bind – An Engine or Connection from which a Compiled will be acquired. This argument takes precedence over this ClauseElement‘s bound engine, if any.
      • column_keys – Used for INSERT and UPDATE statements, a list of column names which should be present in the VALUES clause of the compiled statement. If None, all columns from the target table object are rendered.
      • dialect – A Dialect instance from which a Compiled will be acquired. This argument takes precedence over the bind argument as well as this ClauseElement‘s bound engine, if any.
      • inline – Used for INSERT statements, for a dialect which does not support inline retrieval of newly generated primary key columns, will force the expression used to create the new primary key value to be rendered inline within the INSERT statement’s VALUES clause. This typically refers to Sequence execution but may also refer to any server-side default generation function associated with a primary key Column.
      concat(other)
      inherited from the concat() method of ColumnOperators

      Implement the ‘concat’ operator.

      In a column context, produces the clause a || b, or uses the concat() operator on MySQL.

      contains(other, **kwargs)
      inherited from the contains() method of ColumnOperators

      Implement the ‘contains’ operator.

      In a column context, produces the clause LIKE '%<other>%'

      desc()
      inherited from the desc() method of ColumnOperators

      Produce a desc() clause against the parent object.

      distinct()
      inherited from the distinct() method of ColumnOperators

      Produce a distinct() clause against the parent object.

      endswith(other, **kwargs)
      inherited from the endswith() method of ColumnOperators

      Implement the ‘endswith’ operator.

      In a column context, produces the clause LIKE '%<other>'

      expression
      inherited from the expression attribute of ColumnElement

      Return a column expression.

      Part of the inspection interface; returns self.

      get_children(**kwargs)
      inherited from the get_children() method of ClauseElement

      Return immediate child elements of this ClauseElement.

      This is used for visit traversal.

      **kwargs may contain flags that change the collection that is returned, for example to return a subset of items in order to cut down on larger traversals, or to return child items from a different context (such as schema-level collections instead of clause-level).

      ilike(other, escape=None)
      inherited from the ilike() method of ColumnOperators

      Implement the ilike operator.

      In a column context, produces the clause a ILIKE other.

      E.g.:

      select([sometable]).where(sometable.c.column.ilike("%foobar%"))
      Parameters:
      • other – expression to be compared
      • escape

        optional escape character, renders the ESCAPE keyword, e.g.:

        somecolumn.ilike("foo/%bar", escape="/")
      in_(other)
      inherited from the in_() method of ColumnOperators

      Implement the in operator.

      In a column context, produces the clause a IN other. “other” may be a tuple/list of column expressions, or a select() construct.

      is_(other)
      inherited from the is_() method of ColumnOperators

      Implement the IS operator.

      Normally, IS is generated automatically when comparing to a value of None, which resolves to NULL. However, explicit usage of IS may be desirable if comparing to boolean values on certain platforms.

      New in version 0.7.9.

      isnot(other)
      inherited from the isnot() method of ColumnOperators

      Implement the IS NOT operator.

      Normally, IS NOT is generated automatically when comparing to a value of None, which resolves to NULL. However, explicit usage of IS NOT may be desirable if comparing to boolean values on certain platforms.

      New in version 0.7.9.

      label(name)
      inherited from the label() method of ColumnElement

      Produce a column label, i.e. <columnname> AS <name>.

      This is a shortcut to the label() function.

      if ‘name’ is None, an anonymous label name will be generated.

      like(other, escape=None)
      inherited from the like() method of ColumnOperators

      Implement the like operator.

      In a column context, produces the clause a LIKE other.

      E.g.:

      select([sometable]).where(sometable.c.column.like("%foobar%"))
      Parameters:
      • other – expression to be compared
      • escape

        optional escape character, renders the ESCAPE keyword, e.g.:

        somecolumn.like("foo/%bar", escape="/")
      match(other, **kwargs)
      inherited from the match() method of ColumnOperators

      Implements the ‘match’ operator.

      In a column context, this produces a MATCH clause, i.e. MATCH '<other>'. The allowed contents of other are database backend specific.

      notilike(other, escape=None)
      inherited from the notilike() method of ColumnOperators

      implement the NOT ILIKE operator.

      This is equivalent to using negation with ColumnOperators.ilike(), i.e. ~x.ilike(y).

      New in version 0.8.

      notin_(other)
      inherited from the notin_() method of ColumnOperators

      implement the NOT IN operator.

      This is equivalent to using negation with ColumnOperators.in_(), i.e. ~x.in_(y).

      New in version 0.8.

      notlike(other, escape=None)
      inherited from the notlike() method of ColumnOperators

      implement the NOT LIKE operator.

      This is equivalent to using negation with ColumnOperators.like(), i.e. ~x.like(y).

      New in version 0.8.

      nullsfirst()
      inherited from the nullsfirst() method of ColumnOperators

      Produce a nullsfirst() clause against the parent object.

      nullslast()
      inherited from the nullslast() method of ColumnOperators

      Produce a nullslast() clause against the parent object.

      op(opstring, precedence=0)
      inherited from the op() method of Operators

      produce a generic operator function.

      e.g.:

      somecolumn.op("*")(5)

      produces:

      somecolumn * 5

      This function can also be used to make bitwise operators explicit. For example:

      somecolumn.op('&')(0xff)

      is a bitwise AND of the value in somecolumn.

      Parameters:
      • operator – a string which will be output as the infix operator between this element and the expression passed to the generated function.
      • precedence

        precedence to apply to the operator, when parenthesizing expressions. A lower number will cause the expression to be parenthesized when applied against another operator with higher precedence. The default value of 0 is lower than all operators except for the comma (,) and AS operators. A value of 100 will be higher or equal to all operators, and -100 will be lower than or equal to all operators.

        New in version 0.8: - added the ‘precedence’ argument.

      self_group(against=None)
      inherited from the self_group() method of ClauseElement

      Apply a ‘grouping’ to this ClauseElement.

      This method is overridden by subclasses to return a “grouping” construct, i.e. parenthesis. In particular it’s used by “binary” expressions to provide a grouping around themselves when placed into a larger expression, as well as by select() constructs when placed into the FROM clause of another select(). (Note that subqueries should be normally created using the Select.alias() method, as many platforms require nested SELECT statements to be named).

      As expressions are composed together, the application of self_group() is automatic - end-user code should never need to use this method directly. Note that SQLAlchemy’s clause constructs take operator precedence into account - so parenthesis might not be needed, for example, in an expression like x OR (y AND z) - AND takes precedence over OR.

      The base self_group() method of ClauseElement just returns self.

      shares_lineage(othercolumn)
      inherited from the shares_lineage() method of ColumnElement

      Return True if the given ColumnElement has a common ancestor to this ColumnElement.

      startswith(other, **kwargs)
      inherited from the startswith() method of ColumnOperators

      Implement the startwith operator.

      In a column context, produces the clause LIKE '<other>%'

      class sqlalchemy.sql.expression.ColumnCollection(*cols)

      Bases: sqlalchemy.util._collections.OrderedProperties

      An ordered dictionary that stores a list of ColumnElement instances.

      Overrides the __eq__() method to produce SQL clauses between sets of correlated columns.

      add(column)

      Add a column to this collection.

      The key attribute of the column will be used as the hash key for this dictionary.

      replace(column)

      add the given column to this collection, removing unaliased versions of this column as well as existing columns with the same key.

      e.g.:

      t = Table('sometable', metadata, Column('col1', Integer))
      t.columns.replace(Column('col1', Integer, key='columnone'))

      will remove the original ‘col1’ from the collection, and add the new column under the name ‘columnname’.

      Used by schema.Column to override columns during table reflection.

      class sqlalchemy.sql.expression.ColumnElement

      Bases: sqlalchemy.sql.expression.ClauseElement, sqlalchemy.sql.operators.ColumnOperators

      Represent a column-oriented SQL expression suitable for usage in the “columns” clause, WHERE clause etc. of a statement.

      While the most familiar kind of ColumnElement is the Column object, ColumnElement serves as the basis for any unit that may be present in a SQL expression, including the expressions themselves, SQL functions, bound parameters, literal expressions, keywords such as NULL, etc. ColumnElement is the ultimate base class for all such elements.

      A ColumnElement provides the ability to generate new ColumnElement objects using Python expressions. This means that Python operators such as ==, != and < are overloaded to mimic SQL operations, and allow the instantiation of further ColumnElement instances which are composed from other, more fundamental ColumnElement objects. For example, two ColumnClause objects can be added together with the addition operator + to produce a BinaryExpression. Both ColumnClause and BinaryExpression are subclasses of ColumnElement:

      >>> from sqlalchemy.sql import column
      >>> column('a') + column('b')
      <sqlalchemy.sql.expression.BinaryExpression object at 0x101029dd0>
      >>> print column('a') + column('b')
      a + b

      ColumnElement supports the ability to be a proxy element, which indicates that the ColumnElement may be associated with a Selectable which was derived from another Selectable. An example of a “derived” Selectable is an Alias of a Table. For the ambitious, an in-depth discussion of this concept can be found at Expression Transformations.

      __eq__(other)
      inherited from the __eq__() method of ColumnOperators

      Implement the == operator.

      In a column context, produces the clause a = b. If the target is None, produces a IS NULL.

      __init__
      inherited from the __init__ attribute of object

      x.__init__(...) initializes x; see help(type(x)) for signature

      __le__(other)
      inherited from the __le__() method of ColumnOperators

      Implement the <= operator.

      In a column context, produces the clause a <= b.

      __lt__(other)
      inherited from the __lt__() method of ColumnOperators

      Implement the < operator.

      In a column context, produces the clause a < b.

      __ne__(other)
      inherited from the __ne__() method of ColumnOperators

      Implement the != operator.

      In a column context, produces the clause a != b. If the target is None, produces a IS NOT NULL.

      anon_label

      provides a constant ‘anonymous label’ for this ColumnElement.

      This is a label() expression which will be named at compile time. The same label() is returned each time anon_label is called so that expressions can reference anon_label multiple times, producing the same label name at compile time.

      the compiler uses this function automatically at compile time for expressions that are known to be ‘unnamed’ like binary expressions and function calls.

      asc()
      inherited from the asc() method of ColumnOperators

      Produce a asc() clause against the parent object.

      base_columns
      between(cleft, cright)
      inherited from the between() method of ColumnOperators

      Produce a between() clause against the parent object, given the lower and upper range.

      bind = None
      collate(collation)
      inherited from the collate() method of ColumnOperators

      Produce a collate() clause against the parent object, given the collation string.

      comparator
      compare(other, use_proxies=False, equivalents=None, **kw)

      Compare this ColumnElement to another.

      Special arguments understood:

      Parameters:
      • use_proxies – when True, consider two columns that share a common base column as equivalent (i.e. shares_lineage())
      • equivalents – a dictionary of columns as keys mapped to sets of columns. If the given “other” column is present in this dictionary, if any of the columns in the corresponding set() pass the comparison test, the result is True. This is used to expand the comparison to other columns that may be known to be equivalent to this one via foreign key or other criterion.
      compile(bind=None, dialect=None, **kw)
      inherited from the compile() method of ClauseElement

      Compile this SQL expression.

      The return value is a Compiled object. Calling str() or unicode() on the returned value will yield a string representation of the result. The Compiled object also can return a dictionary of bind parameter names and values using the params accessor.

      Parameters:
      • bind – An Engine or Connection from which a Compiled will be acquired. This argument takes precedence over this ClauseElement‘s bound engine, if any.
      • column_keys – Used for INSERT and UPDATE statements, a list of column names which should be present in the VALUES clause of the compiled statement. If None, all columns from the target table object are rendered.
      • dialect – A Dialect instance from which a Compiled will be acquired. This argument takes precedence over the bind argument as well as this ClauseElement‘s bound engine, if any.
      • inline – Used for INSERT statements, for a dialect which does not support inline retrieval of newly generated primary key columns, will force the expression used to create the new primary key value to be rendered inline within the INSERT statement’s VALUES clause. This typically refers to Sequence execution but may also refer to any server-side default generation function associated with a primary key Column.
      concat(other)
      inherited from the concat() method of ColumnOperators

      Implement the ‘concat’ operator.

      In a column context, produces the clause a || b, or uses the concat() operator on MySQL.

      contains(other, **kwargs)
      inherited from the contains() method of ColumnOperators

      Implement the ‘contains’ operator.

      In a column context, produces the clause LIKE '%<other>%'

      desc()
      inherited from the desc() method of ColumnOperators

      Produce a desc() clause against the parent object.

      distinct()
      inherited from the distinct() method of ColumnOperators

      Produce a distinct() clause against the parent object.

      endswith(other, **kwargs)
      inherited from the endswith() method of ColumnOperators

      Implement the ‘endswith’ operator.

      In a column context, produces the clause LIKE '%<other>'

      expression

      Return a column expression.

      Part of the inspection interface; returns self.

      foreign_keys = []
      get_children(**kwargs)
      inherited from the get_children() method of ClauseElement

      Return immediate child elements of this ClauseElement.

      This is used for visit traversal.

      **kwargs may contain flags that change the collection that is returned, for example to return a subset of items in order to cut down on larger traversals, or to return child items from a different context (such as schema-level collections instead of clause-level).

      ilike(other, escape=None)
      inherited from the ilike() method of ColumnOperators

      Implement the ilike operator.

      In a column context, produces the clause a ILIKE other.

      E.g.:

      select([sometable]).where(sometable.c.column.ilike("%foobar%"))
      Parameters:
      • other – expression to be compared
      • escape

        optional escape character, renders the ESCAPE keyword, e.g.:

        somecolumn.ilike("foo/%bar", escape="/")
      in_(other)
      inherited from the in_() method of ColumnOperators

      Implement the in operator.

      In a column context, produces the clause a IN other. “other” may be a tuple/list of column expressions, or a select() construct.

      is_(other)
      inherited from the is_() method of ColumnOperators

      Implement the IS operator.

      Normally, IS is generated automatically when comparing to a value of None, which resolves to NULL. However, explicit usage of IS may be desirable if comparing to boolean values on certain platforms.

      New in version 0.7.9.

      is_clause_element = True
      is_selectable = False
      isnot(other)
      inherited from the isnot() method of ColumnOperators

      Implement the IS NOT operator.

      Normally, IS NOT is generated automatically when comparing to a value of None, which resolves to NULL. However, explicit usage of IS NOT may be desirable if comparing to boolean values on certain platforms.

      New in version 0.7.9.

      label(name)

      Produce a column label, i.e. <columnname> AS <name>.

      This is a shortcut to the label() function.

      if ‘name’ is None, an anonymous label name will be generated.

      like(other, escape=None)
      inherited from the like() method of ColumnOperators

      Implement the like operator.

      In a column context, produces the clause a LIKE other.

      E.g.:

      select([sometable]).where(sometable.c.column.like("%foobar%"))
      Parameters:
      • other – expression to be compared
      • escape

        optional escape character, renders the ESCAPE keyword, e.g.:

        somecolumn.like("foo/%bar", escape="/")
      match(other, **kwargs)
      inherited from the match() method of ColumnOperators

      Implements the ‘match’ operator.

      In a column context, this produces a MATCH clause, i.e. MATCH '<other>'. The allowed contents of other are database backend specific.

      notilike(other, escape=None)
      inherited from the notilike() method of ColumnOperators

      implement the NOT ILIKE operator.

      This is equivalent to using negation with ColumnOperators.ilike(), i.e. ~x.ilike(y).

      New in version 0.8.

      notin_(other)
      inherited from the notin_() method of ColumnOperators

      implement the NOT IN operator.

      This is equivalent to using negation with ColumnOperators.in_(), i.e. ~x.in_(y).

      New in version 0.8.

      notlike(other, escape=None)
      inherited from the notlike() method of ColumnOperators

      implement the NOT LIKE operator.

      This is equivalent to using negation with ColumnOperators.like(), i.e. ~x.like(y).

      New in version 0.8.

      nullsfirst()
      inherited from the nullsfirst() method of ColumnOperators

      Produce a nullsfirst() clause against the parent object.

      nullslast()
      inherited from the nullslast() method of ColumnOperators

      Produce a nullslast() clause against the parent object.

      op(opstring, precedence=0)
      inherited from the op() method of Operators

      produce a generic operator function.

      e.g.:

      somecolumn.op("*")(5)

      produces:

      somecolumn * 5

      This function can also be used to make bitwise operators explicit. For example:

      somecolumn.op('&')(0xff)

      is a bitwise AND of the value in somecolumn.

      Parameters:
      • operator – a string which will be output as the infix operator between this element and the expression passed to the generated function.
      • precedence

        precedence to apply to the operator, when parenthesizing expressions. A lower number will cause the expression to be parenthesized when applied against another operator with higher precedence. The default value of 0 is lower than all operators except for the comma (,) and AS operators. A value of 100 will be higher or equal to all operators, and -100 will be lower than or equal to all operators.

        New in version 0.8: - added the ‘precedence’ argument.

      operate(op, *other, **kwargs)
      params(*optionaldict, **kwargs)
      inherited from the params() method of ClauseElement

      Return a copy with bindparam() elements replaced.

      Returns a copy of this ClauseElement with bindparam() elements replaced with values taken from the given dictionary:

      >>> clause = column('x') + bindparam('foo')
      >>> print clause.compile().params
      {'foo':None}
      >>> print clause.params({'foo':7}).compile().params
      {'foo':7}
      primary_key = False
      proxy_set
      quote = None
      reverse_operate(op, other, **kwargs)
      self_group(against=None)
      inherited from the self_group() method of ClauseElement

      Apply a ‘grouping’ to this ClauseElement.

      This method is overridden by subclasses to return a “grouping” construct, i.e. parenthesis. In particular it’s used by “binary” expressions to provide a grouping around themselves when placed into a larger expression, as well as by select() constructs when placed into the FROM clause of another select(). (Note that subqueries should be normally created using the Select.alias() method, as many platforms require nested SELECT statements to be named).

      As expressions are composed together, the application of self_group() is automatic - end-user code should never need to use this method directly. Note that SQLAlchemy’s clause constructs take operator precedence into account - so parenthesis might not be needed, for example, in an expression like x OR (y AND z) - AND takes precedence over OR.

      The base self_group() method of ClauseElement just returns self.

      shares_lineage(othercolumn)

      Return True if the given ColumnElement has a common ancestor to this ColumnElement.

      startswith(other, **kwargs)
      inherited from the startswith() method of ColumnOperators

      Implement the startwith operator.

      In a column context, produces the clause LIKE '<other>%'

      supports_execution = False
      timetuple = None
      type
      unique_params(*optionaldict, **kwargs)
      inherited from the unique_params() method of ClauseElement

      Return a copy with bindparam() elements replaced.

      Same functionality as params(), except adds unique=True to affected bind parameters so that multiple statements can be used.

      class sqlalchemy.sql.operators.ColumnOperators

      Bases: sqlalchemy.sql.operators.Operators

      Defines boolean, comparison, and other operators for ColumnElement expressions.

      By default, all methods call down to operate() or reverse_operate(), passing in the appropriate operator function from the Python builtin operator module or a SQLAlchemy-specific operator function from sqlalchemy.expression.operators. For example the __eq__ function:

      def __eq__(self, other):
          return self.operate(operators.eq, other)

      Where operators.eq is essentially:

      def eq(a, b):
          return a == b

      The core column expression unit ColumnElement overrides Operators.operate() and others to return further ColumnElement constructs, so that the == operation above is replaced by a clause construct.

      See also:

      Redefining and Creating New Operators

      TypeEngine.comparator_factory

      ColumnOperators

      PropComparator

      __add__(other)

      Implement the + operator.

      In a column context, produces the clause a + b if the parent object has non-string affinity. If the parent object has a string affinity, produces the concatenation operator, a || b - see ColumnOperators.concat().

      __and__(other)
      inherited from the __and__() method of Operators

      Implement the & operator.

      When used with SQL expressions, results in an AND operation, equivalent to and_(), that is:

      a & b

      is equivalent to:

      from sqlalchemy import and_
      and_(a, b)

      Care should be taken when using & regarding operator precedence; the & operator has the highest precedence. The operands should be enclosed in parenthesis if they contain further sub expressions:

      (a == 2) & (b == 4)
      __delattr__
      inherited from the __delattr__ attribute of object

      x.__delattr__(‘name’) <==> del x.name

      __div__(other)

      Implement the / operator.

      In a column context, produces the clause a / b.

      __eq__(other)

      Implement the == operator.

      In a column context, produces the clause a = b. If the target is None, produces a IS NULL.

      __format__()
      inherited from the __format__() method of object

      default object formatter

      __ge__(other)

      Implement the >= operator.

      In a column context, produces the clause a >= b.

      __getattribute__
      inherited from the __getattribute__ attribute of object

      x.__getattribute__(‘name’) <==> x.name

      __getitem__(index)

      Implement the [] operator.

      This can be used by some database-specific types such as Postgresql ARRAY and HSTORE.

      __gt__(other)

      Implement the > operator.

      In a column context, produces the clause a > b.

      __hash__

      x.__hash__() <==> hash(x)

      __init__
      inherited from the __init__ attribute of object

      x.__init__(...) initializes x; see help(type(x)) for signature

      __invert__()
      inherited from the __invert__() method of Operators

      Implement the ~ operator.

      When used with SQL expressions, results in a NOT operation, equivalent to not_(), that is:

      ~a

      is equivalent to:

      from sqlalchemy import not_
      not_(a)
      __le__(other)

      Implement the <= operator.

      In a column context, produces the clause a <= b.

      __lshift__(other)

      implement the << operator.

      Not used by SQLAlchemy core, this is provided for custom operator systems which want to use << as an extension point.

      __lt__(other)

      Implement the < operator.

      In a column context, produces the clause a < b.

      __mod__(other)

      Implement the % operator.

      In a column context, produces the clause a % b.

      __mul__(other)

      Implement the * operator.

      In a column context, produces the clause a * b.

      __ne__(other)

      Implement the != operator.

      In a column context, produces the clause a != b. If the target is None, produces a IS NOT NULL.

      __neg__()

      Implement the - operator.

      In a column context, produces the clause -a.

      static __new__(S, ...) → a new object with type S, a subtype of T
      inherited from the __new__() method of object
      __or__(other)
      inherited from the __or__() method of Operators

      Implement the | operator.

      When used with SQL expressions, results in an OR operation, equivalent to or_(), that is:

      a | b

      is equivalent to:

      from sqlalchemy import or_
      or_(a, b)

      Care should be taken when using | regarding operator precedence; the | operator has the highest precedence. The operands should be enclosed in parenthesis if they contain further sub expressions:

      (a == 2) | (b == 4)
      __radd__(other)

      Implement the + operator in reverse.

      See ColumnOperators.__add__().

      __rdiv__(other)

      Implement the / operator in reverse.

      See ColumnOperators.__div__().

      __reduce__()
      inherited from the __reduce__() method of object

      helper for pickle

      __reduce_ex__()
      inherited from the __reduce_ex__() method of object

      helper for pickle

      __repr__
      inherited from the __repr__ attribute of object

      x.__repr__() <==> repr(x)

      __rmul__(other)

      Implement the * operator in reverse.

      See ColumnOperators.__mul__().

      __rshift__(other)

      implement the >> operator.

      Not used by SQLAlchemy core, this is provided for custom operator systems which want to use >> as an extension point.

      __rsub__(other)

      Implement the - operator in reverse.

      See ColumnOperators.__sub__().

      __rtruediv__(other)

      Implement the // operator in reverse.

      See ColumnOperators.__truediv__().

      __setattr__
      inherited from the __setattr__ attribute of object

      x.__setattr__(‘name’, value) <==> x.name = value

      __sizeof__() → int
      inherited from the __sizeof__() method of object

      size of object in memory, in bytes

      __str__
      inherited from the __str__ attribute of object

      x.__str__() <==> str(x)

      __sub__(other)

      Implement the - operator.

      In a column context, produces the clause a - b.

      static __subclasshook__()
      inherited from the __subclasshook__() method of object

      Abstract classes can override this to customize issubclass().

      This is invoked early on by abc.ABCMeta.__subclasscheck__(). It should return True, False or NotImplemented. If it returns NotImplemented, the normal algorithm is used. Otherwise, it overrides the normal algorithm (and the outcome is cached).

      __truediv__(other)

      Implement the // operator.

      In a column context, produces the clause a / b.

      __weakref__
      inherited from the __weakref__ attribute of Operators

      list of weak references to the object (if defined)

      asc()

      Produce a asc() clause against the parent object.

      between(cleft, cright)

      Produce a between() clause against the parent object, given the lower and upper range.

      collate(collation)

      Produce a collate() clause against the parent object, given the collation string.

      concat(other)

      Implement the ‘concat’ operator.

      In a column context, produces the clause a || b, or uses the concat() operator on MySQL.

      contains(other, **kwargs)

      Implement the ‘contains’ operator.

      In a column context, produces the clause LIKE '%<other>%'

      desc()

      Produce a desc() clause against the parent object.

      distinct()

      Produce a distinct() clause against the parent object.

      endswith(other, **kwargs)

      Implement the ‘endswith’ operator.

      In a column context, produces the clause LIKE '%<other>'

      ilike(other, escape=None)

      Implement the ilike operator.

      In a column context, produces the clause a ILIKE other.

      E.g.:

      select([sometable]).where(sometable.c.column.ilike("%foobar%"))
      Parameters:
      • other – expression to be compared
      • escape

        optional escape character, renders the ESCAPE keyword, e.g.:

        somecolumn.ilike("foo/%bar", escape="/")
      in_(other)

      Implement the in operator.

      In a column context, produces the clause a IN other. “other” may be a tuple/list of column expressions, or a select() construct.

      is_(other)

      Implement the IS operator.

      Normally, IS is generated automatically when comparing to a value of None, which resolves to NULL. However, explicit usage of IS may be desirable if comparing to boolean values on certain platforms.

      New in version 0.7.9.

      isnot(other)

      Implement the IS NOT operator.

      Normally, IS NOT is generated automatically when comparing to a value of None, which resolves to NULL. However, explicit usage of IS NOT may be desirable if comparing to boolean values on certain platforms.

      New in version 0.7.9.

      like(other, escape=None)

      Implement the like operator.

      In a column context, produces the clause a LIKE other.

      E.g.:

      select([sometable]).where(sometable.c.column.like("%foobar%"))
      Parameters:
      • other – expression to be compared
      • escape

        optional escape character, renders the ESCAPE keyword, e.g.:

        somecolumn.like("foo/%bar", escape="/")
      match(other, **kwargs)

      Implements the ‘match’ operator.

      In a column context, this produces a MATCH clause, i.e. MATCH '<other>'. The allowed contents of other are database backend specific.

      notilike(other, escape=None)

      implement the NOT ILIKE operator.

      This is equivalent to using negation with ColumnOperators.ilike(), i.e. ~x.ilike(y).

      New in version 0.8.

      notin_(other)

      implement the NOT IN operator.

      This is equivalent to using negation with ColumnOperators.in_(), i.e. ~x.in_(y).

      New in version 0.8.

      notlike(other, escape=None)

      implement the NOT LIKE operator.

      This is equivalent to using negation with ColumnOperators.like(), i.e. ~x.like(y).

      New in version 0.8.

      nullsfirst()

      Produce a nullsfirst() clause against the parent object.

      nullslast()

      Produce a nullslast() clause against the parent object.

      op(opstring, precedence=0)
      inherited from the op() method of Operators

      produce a generic operator function.

      e.g.:

      somecolumn.op("*")(5)

      produces:

      somecolumn * 5

      This function can also be used to make bitwise operators explicit. For example:

      somecolumn.op('&')(0xff)

      is a bitwise AND of the value in somecolumn.

      Parameters:
      • operator – a string which will be output as the infix operator between this element and the expression passed to the generated function.
      • precedence

        precedence to apply to the operator, when parenthesizing expressions. A lower number will cause the expression to be parenthesized when applied against another operator with higher precedence. The default value of 0 is lower than all operators except for the comma (,) and AS operators. A value of 100 will be higher or equal to all operators, and -100 will be lower than or equal to all operators.

        New in version 0.8: - added the ‘precedence’ argument.

      operate(op, *other, **kwargs)
      inherited from the operate() method of Operators

      Operate on an argument.

      This is the lowest level of operation, raises NotImplementedError by default.

      Overriding this on a subclass can allow common behavior to be applied to all operations. For example, overriding ColumnOperators to apply func.lower() to the left and right side:

      class MyComparator(ColumnOperators):
          def operate(self, op, other):
              return op(func.lower(self), func.lower(other))
      Parameters:
      • op – Operator callable.
      • *other – the ‘other’ side of the operation. Will be a single scalar for most operations.
      • **kwargs – modifiers. These may be passed by special operators such as ColumnOperators.contains().
      reverse_operate(op, other, **kwargs)
      inherited from the reverse_operate() method of Operators

      Reverse operate on an argument.

      Usage is the same as operate().

      startswith(other, **kwargs)

      Implement the startwith operator.

      In a column context, produces the clause LIKE '<other>%'

      timetuple = None

      Hack, allows datetime objects to be compared on the LHS.

      class sqlalchemy.sql.expression.Extract(field, expr, **kwargs)

      Bases: sqlalchemy.sql.expression.ColumnElement

      class sqlalchemy.sql.expression.False_

      Bases: sqlalchemy.sql.expression.ColumnElement

      Represent the false keyword in a SQL statement.

      Public constructor is the false() function.

      class sqlalchemy.sql.expression.Label(name, element, type_=None)

      Bases: sqlalchemy.sql.expression.ColumnElement

      Represents a column label (AS).

      Represent a label, as typically applied to any column-level element using the AS sql keyword.

      This object is constructed from the label() module level function as well as the label() method available on all ColumnElement subclasses.

      class sqlalchemy.sql.expression.Null

      Bases: sqlalchemy.sql.expression.ColumnElement

      Represent the NULL keyword in a SQL statement.

      Public constructor is the null() function.

      class sqlalchemy.sql.expression.Over(func, partition_by=None, order_by=None)

      Bases: sqlalchemy.sql.expression.ColumnElement

      Represent an OVER clause.

      This is a special operator against a so-called “window” function, as well as any aggregate function, which produces results relative to the result set itself. It’s supported only by certain database backends.

      class sqlalchemy.sql.expression.TextClause(text='', bind=None, bindparams=None, typemap=None, autocommit=None)

      Bases: sqlalchemy.sql.expression.Executable, sqlalchemy.sql.expression.ClauseElement

      Represent a literal SQL text fragment.

      Public constructor is the text() function.

      class sqlalchemy.sql.expression.Tuple(*clauses, **kw)

      Bases: sqlalchemy.sql.expression.ClauseList, sqlalchemy.sql.expression.ColumnElement

      class sqlalchemy.sql.expression.True_

      Bases: sqlalchemy.sql.expression.ColumnElement

      Represent the true keyword in a SQL statement.

      Public constructor is the true() function.

      class sqlalchemy.sql.operators.custom_op(opstring, precedence=0)

      Represent a ‘custom’ operator.

      custom_op is normally instantitated when the ColumnOperators.op() method is used to create a custom operator callable. The class can also be used directly when programmatically constructing expressions. E.g. to represent the “factorial” operation:

      from sqlalchemy.sql import UnaryExpression
      from sqlalchemy.sql import operators
      from sqlalchemy import Numeric
      
      unary = UnaryExpression(table.c.somecolumn,
              modifier=operators.custom_op("!"),
              type_=Numeric)
      class sqlalchemy.sql.operators.Operators

      Base of comparison and logical operators.

      Implements base methods operate() and reverse_operate(), as well as __and__(), __or__(), __invert__().

      Usually is used via its most common subclass ColumnOperators.

      __and__(other)

      Implement the & operator.

      When used with SQL expressions, results in an AND operation, equivalent to and_(), that is:

      a & b

      is equivalent to:

      from sqlalchemy import and_
      and_(a, b)

      Care should be taken when using & regarding operator precedence; the & operator has the highest precedence. The operands should be enclosed in parenthesis if they contain further sub expressions:

      (a == 2) & (b == 4)
      __invert__()

      Implement the ~ operator.

      When used with SQL expressions, results in a NOT operation, equivalent to not_(), that is:

      ~a

      is equivalent to:

      from sqlalchemy import not_
      not_(a)
      __or__(other)

      Implement the | operator.

      When used with SQL expressions, results in an OR operation, equivalent to or_(), that is:

      a | b

      is equivalent to:

      from sqlalchemy import or_
      or_(a, b)

      Care should be taken when using | regarding operator precedence; the | operator has the highest precedence. The operands should be enclosed in parenthesis if they contain further sub expressions:

      (a == 2) | (b == 4)
      __weakref__

      list of weak references to the object (if defined)

      op(opstring, precedence=0)

      produce a generic operator function.

      e.g.:

      somecolumn.op("*")(5)

      produces:

      somecolumn * 5

      This function can also be used to make bitwise operators explicit. For example:

      somecolumn.op('&')(0xff)

      is a bitwise AND of the value in somecolumn.

      Parameters:
      • operator – a string which will be output as the infix operator between this element and the expression passed to the generated function.
      • precedence

        precedence to apply to the operator, when parenthesizing expressions. A lower number will cause the expression to be parenthesized when applied against another operator with higher precedence. The default value of 0 is lower than all operators except for the comma (,) and AS operators. A value of 100 will be higher or equal to all operators, and -100 will be lower than or equal to all operators.

        New in version 0.8: - added the ‘precedence’ argument.

      operate(op, *other, **kwargs)

      Operate on an argument.

      This is the lowest level of operation, raises NotImplementedError by default.

      Overriding this on a subclass can allow common behavior to be applied to all operations. For example, overriding ColumnOperators to apply func.lower() to the left and right side:

      class MyComparator(ColumnOperators):
          def operate(self, op, other):
              return op(func.lower(self), func.lower(other))
      Parameters:
      • op – Operator callable.
      • *other – the ‘other’ side of the operation. Will be a single scalar for most operations.
      • **kwargs – modifiers. These may be passed by special operators such as ColumnOperators.contains().
      reverse_operate(op, other, **kwargs)

      Reverse operate on an argument.

      Usage is the same as operate().

      class sqlalchemy.sql.expression.UnaryExpression(element, operator=None, modifier=None, type_=None, negate=None)

      Bases: sqlalchemy.sql.expression.ColumnElement

      Define a ‘unary’ expression.

      A unary expression has a single column expression and an operator. The operator can be placed on the left (where it is called the ‘operator’) or right (where it is called the ‘modifier’) of the column expression.

      compare(other, **kw)

      Compare this UnaryExpression against the given ClauseElement.

      SQLAlchemy-0.8.4/doc/core/tutorial.html0000644000076500000240000060363012251147473020475 0ustar classicstaff00000000000000 SQL Expression Language Tutorial — SQLAlchemy 0.8 Documentation

      SQLAlchemy 0.8 Documentation

      Release: 0.8.4 | Release Date: December 8, 2013
      SQLAlchemy 0.8 Documentation » SQLAlchemy Core » SQL Expression Language Tutorial

      SQL Expression Language Tutorial

      SQL Expression Language Tutorial

      The SQLAlchemy Expression Language presents a system of representing relational database structures and expressions using Python constructs. These constructs are modeled to resemble those of the underlying database as closely as possible, while providing a modicum of abstraction of the various implementation differences between database backends. While the constructs attempt to represent equivalent concepts between backends with consistent structures, they do not conceal useful concepts that are unique to particular subsets of backends. The Expression Language therefore presents a method of writing backend-neutral SQL expressions, but does not attempt to enforce that expressions are backend-neutral.

      The Expression Language is in contrast to the Object Relational Mapper, which is a distinct API that builds on top of the Expression Language. Whereas the ORM, introduced in Object Relational Tutorial, presents a high level and abstracted pattern of usage, which itself is an example of applied usage of the Expression Language, the Expression Language presents a system of representing the primitive constructs of the relational database directly without opinion.

      While there is overlap among the usage patterns of the ORM and the Expression Language, the similarities are more superficial than they may at first appear. One approaches the structure and content of data from the perspective of a user-defined domain model which is transparently persisted and refreshed from its underlying storage model. The other approaches it from the perspective of literal schema and SQL expression representations which are explicitly composed into messages consumed individually by the database.

      A successful application may be constructed using the Expression Language exclusively, though the application will need to define its own system of translating application concepts into individual database messages and from individual database result sets. Alternatively, an application constructed with the ORM may, in advanced scenarios, make occasional usage of the Expression Language directly in certain areas where specific database interactions are required.

      The following tutorial is in doctest format, meaning each >>> line represents something you can type at a Python command prompt, and the following text represents the expected return value. The tutorial has no prerequisites.

      Version Check

      A quick check to verify that we are on at least version 0.8 of SQLAlchemy:

      >>> import sqlalchemy
      >>> sqlalchemy.__version__ 
      0.8.0

      Connecting

      For this tutorial we will use an in-memory-only SQLite database. This is an easy way to test things without needing to have an actual database defined anywhere. To connect we use create_engine():

      >>> from sqlalchemy import create_engine
      >>> engine = create_engine('sqlite:///:memory:', echo=True)

      The echo flag is a shortcut to setting up SQLAlchemy logging, which is accomplished via Python’s standard logging module. With it enabled, we’ll see all the generated SQL produced. If you are working through this tutorial and want less output generated, set it to False. This tutorial will format the SQL behind a popup window so it doesn’t get in our way; just click the “SQL” links to see what’s being generated.

      Define and Create Tables

      The SQL Expression Language constructs its expressions in most cases against table columns. In SQLAlchemy, a column is most often represented by an object called Column, and in all cases a Column is associated with a Table. A collection of Table objects and their associated child objects is referred to as database metadata. In this tutorial we will explicitly lay out several Table objects, but note that SA can also “import” whole sets of Table objects automatically from an existing database (this process is called table reflection).

      We define our tables all within a catalog called MetaData, using the Table construct, which resembles regular SQL CREATE TABLE statements. We’ll make two tables, one of which represents “users” in an application, and another which represents zero or more “email addreses” for each row in the “users” table:

      >>> from sqlalchemy import Table, Column, Integer, String, MetaData, ForeignKey
      >>> metadata = MetaData()
      >>> users = Table('users', metadata,
      ...     Column('id', Integer, primary_key=True),
      ...     Column('name', String),
      ...     Column('fullname', String),
      ... )
      
      >>> addresses = Table('addresses', metadata,
      ...   Column('id', Integer, primary_key=True),
      ...   Column('user_id', None, ForeignKey('users.id')),
      ...   Column('email_address', String, nullable=False)
      ...  )

      All about how to define Table objects, as well as how to create them from an existing database automatically, is described in Describing Databases with MetaData.

      Next, to tell the MetaData we’d actually like to create our selection of tables for real inside the SQLite database, we use create_all(), passing it the engine instance which points to our database. This will check for the presence of each table first before creating, so it’s safe to call multiple times:

      sql>>> metadata.create_all(engine) 
      

      Note

      Users familiar with the syntax of CREATE TABLE may notice that the VARCHAR columns were generated without a length; on SQLite and Postgresql, this is a valid datatype, but on others, it’s not allowed. So if running this tutorial on one of those databases, and you wish to use SQLAlchemy to issue CREATE TABLE, a “length” may be provided to the String type as below:

      Column('name', String(50))

      The length field on String, as well as similar precision/scale fields available on Integer, Numeric, etc. are not referenced by SQLAlchemy other than when creating tables.

      Additionally, Firebird and Oracle require sequences to generate new primary key identifiers, and SQLAlchemy doesn’t generate or assume these without being instructed. For that, you use the Sequence construct:

      from sqlalchemy import Sequence
      Column('id', Integer, Sequence('user_id_seq'), primary_key=True)

      A full, foolproof Table is therefore:

      users = Table('users', metadata,
         Column('id', Integer, Sequence('user_id_seq'), primary_key=True),
         Column('name', String(50)),
         Column('fullname', String(50)),
         Column('password', String(12))
      )

      We include this more verbose Table construct separately to highlight the difference between a minimal construct geared primarily towards in-Python usage only, versus one that will be used to emit CREATE TABLE statements on a particular set of backends with more stringent requirements.

      Insert Expressions

      The first SQL expression we’ll create is the Insert construct, which represents an INSERT statement. This is typically created relative to its target table:

      >>> ins = users.insert()

      To see a sample of the SQL this construct produces, use the str() function:

      >>> str(ins)
      'INSERT INTO users (id, name, fullname) VALUES (:id, :name, :fullname)'

      Notice above that the INSERT statement names every column in the users table. This can be limited by using the values() method, which establishes the VALUES clause of the INSERT explicitly:

      >>> ins = users.insert().values(name='jack', fullname='Jack Jones')
      >>> str(ins)
      'INSERT INTO users (name, fullname) VALUES (:name, :fullname)'

      Above, while the values method limited the VALUES clause to just two columns, the actual data we placed in values didn’t get rendered into the string; instead we got named bind parameters. As it turns out, our data is stored within our Insert construct, but it typically only comes out when the statement is actually executed; since the data consists of literal values, SQLAlchemy automatically generates bind parameters for them. We can peek at this data for now by looking at the compiled form of the statement:

      >>> ins.compile().params 
      {'fullname': 'Jack Jones', 'name': 'jack'}

      Executing

      The interesting part of an Insert is executing it. In this tutorial, we will generally focus on the most explicit method of executing a SQL construct, and later touch upon some “shortcut” ways to do it. The engine object we created is a repository for database connections capable of issuing SQL to the database. To acquire a connection, we use the connect() method:

      >>> conn = engine.connect()
      >>> conn 
      <sqlalchemy.engine.base.Connection object at 0x...>

      The Connection object represents an actively checked out DBAPI connection resource. Lets feed it our Insert object and see what happens:

      >>> result = conn.execute(ins)
      
      INSERT INTO users (name, fullname) VALUES (?, ?) ('jack', 'Jack Jones') COMMIT

      So the INSERT statement was now issued to the database. Although we got positional “qmark” bind parameters instead of “named” bind parameters in the output. How come ? Because when executed, the Connection used the SQLite dialect to help generate the statement; when we use the str() function, the statement isn’t aware of this dialect, and falls back onto a default which uses named parameters. We can view this manually as follows:

      >>> ins.bind = engine
      >>> str(ins)
      'INSERT INTO users (name, fullname) VALUES (?, ?)'

      What about the result variable we got when we called execute() ? As the SQLAlchemy Connection object references a DBAPI connection, the result, known as a ResultProxy object, is analogous to the DBAPI cursor object. In the case of an INSERT, we can get important information from it, such as the primary key values which were generated from our statement:

      >>> result.inserted_primary_key
      [1]

      The value of 1 was automatically generated by SQLite, but only because we did not specify the id column in our Insert statement; otherwise, our explicit value would have been used. In either case, SQLAlchemy always knows how to get at a newly generated primary key value, even though the method of generating them is different across different databases; each database’s Dialect knows the specific steps needed to determine the correct value (or values; note that inserted_primary_key returns a list so that it supports composite primary keys).

      Executing Multiple Statements

      Our insert example above was intentionally a little drawn out to show some various behaviors of expression language constructs. In the usual case, an Insert statement is usually compiled against the parameters sent to the execute() method on Connection, so that there’s no need to use the values keyword with Insert. Lets create a generic Insert statement again and use it in the “normal” way:

      >>> ins = users.insert()
      >>> conn.execute(ins, id=2, name='wendy', fullname='Wendy Williams') 
      
      INSERT INTO users (id, name, fullname) VALUES (?, ?, ?) (2, 'wendy', 'Wendy Williams') COMMIT
      <sqlalchemy.engine.result.ResultProxy object at 0x...>

      Above, because we specified all three columns in the execute() method, the compiled Insert included all three columns. The Insert statement is compiled at execution time based on the parameters we specified; if we specified fewer parameters, the Insert would have fewer entries in its VALUES clause.

      To issue many inserts using DBAPI’s executemany() method, we can send in a list of dictionaries each containing a distinct set of parameters to be inserted, as we do here to add some email addresses:

      >>> conn.execute(addresses.insert(), [ 
      ...    {'user_id': 1, 'email_address' : 'jack@yahoo.com'},
      ...    {'user_id': 1, 'email_address' : 'jack@msn.com'},
      ...    {'user_id': 2, 'email_address' : 'www@www.org'},
      ...    {'user_id': 2, 'email_address' : 'wendy@aol.com'},
      ... ])
      
      INSERT INTO addresses (user_id, email_address) VALUES (?, ?) ((1, 'jack@yahoo.com'), (1, 'jack@msn.com'), (2, 'www@www.org'), (2, 'wendy@aol.com')) COMMIT
      <sqlalchemy.engine.result.ResultProxy object at 0x...>

      Above, we again relied upon SQLite’s automatic generation of primary key identifiers for each addresses row.

      When executing multiple sets of parameters, each dictionary must have the same set of keys; i.e. you cant have fewer keys in some dictionaries than others. This is because the Insert statement is compiled against the first dictionary in the list, and it’s assumed that all subsequent argument dictionaries are compatible with that statement.

      Selecting

      We began with inserts just so that our test database had some data in it. The more interesting part of the data is selecting it ! We’ll cover UPDATE and DELETE statements later. The primary construct used to generate SELECT statements is the select() function:

      >>> from sqlalchemy.sql import select
      >>> s = select([users])
      >>> result = conn.execute(s)  
      
      SELECT users.id, users.name, users.fullname FROM users ()

      Above, we issued a basic select() call, placing the users table within the COLUMNS clause of the select, and then executing. SQLAlchemy expanded the users table into the set of each of its columns, and also generated a FROM clause for us. The result returned is again a ResultProxy object, which acts much like a DBAPI cursor, including methods such as fetchone() and fetchall(). The easiest way to get rows from it is to just iterate:

      >>> for row in result:
      ...     print row
      (1, u'jack', u'Jack Jones')
      (2, u'wendy', u'Wendy Williams')

      Above, we see that printing each row produces a simple tuple-like result. We have more options at accessing the data in each row. One very common way is through dictionary access, using the string names of columns:

      sql>>> result = conn.execute(s)  
      >>> row = result.fetchone()
      >>> print "name:", row['name'], "; fullname:", row['fullname']
      name: jack ; fullname: Jack Jones

      Integer indexes work as well:

      >>> row = result.fetchone()
      >>> print "name:", row[1], "; fullname:", row[2]
      name: wendy ; fullname: Wendy Williams

      But another way, whose usefulness will become apparent later on, is to use the Column objects directly as keys:

      sql>>> for row in conn.execute(s):  
      ...     print "name:", row[users.c.name], "; fullname:", row[users.c.fullname]
      name: jack ; fullname: Jack Jones
      name: wendy ; fullname: Wendy Williams

      Result sets which have pending rows remaining should be explicitly closed before discarding. While the cursor and connection resources referenced by the ResultProxy will be respectively closed and returned to the connection pool when the object is garbage collected, it’s better to make it explicit as some database APIs are very picky about such things:

      >>> result.close()

      If we’d like to more carefully control the columns which are placed in the COLUMNS clause of the select, we reference individual Column objects from our Table. These are available as named attributes off the c attribute of the Table object:

      >>> s = select([users.c.name, users.c.fullname])
      sql>>> result = conn.execute(s)  
      >>> for row in result:  
      ...     print row
      (u'jack', u'Jack Jones')
      (u'wendy', u'Wendy Williams')

      Lets observe something interesting about the FROM clause. Whereas the generated statement contains two distinct sections, a “SELECT columns” part and a “FROM table” part, our select() construct only has a list containing columns. How does this work ? Let’s try putting two tables into our select() statement:

      sql>>> for row in conn.execute(select([users, addresses])):
      ...     print row  
      (1, u'jack', u'Jack Jones', 1, 1, u'jack@yahoo.com')
      (1, u'jack', u'Jack Jones', 2, 1, u'jack@msn.com')
      (1, u'jack', u'Jack Jones', 3, 2, u'www@www.org')
      (1, u'jack', u'Jack Jones', 4, 2, u'wendy@aol.com')
      (2, u'wendy', u'Wendy Williams', 1, 1, u'jack@yahoo.com')
      (2, u'wendy', u'Wendy Williams', 2, 1, u'jack@msn.com')
      (2, u'wendy', u'Wendy Williams', 3, 2, u'www@www.org')
      (2, u'wendy', u'Wendy Williams', 4, 2, u'wendy@aol.com')

      It placed both tables into the FROM clause. But also, it made a real mess. Those who are familiar with SQL joins know that this is a Cartesian product; each row from the users table is produced against each row from the addresses table. So to put some sanity into this statement, we need a WHERE clause. We do that using Select.where():

      >>> s = select([users, addresses]).where(users.c.id == addresses.c.user_id)
      sql>>> for row in conn.execute(s):
      ...     print row  
      (1, u'jack', u'Jack Jones', 1, 1, u'jack@yahoo.com')
      (1, u'jack', u'Jack Jones', 2, 1, u'jack@msn.com')
      (2, u'wendy', u'Wendy Williams', 3, 2, u'www@www.org')
      (2, u'wendy', u'Wendy Williams', 4, 2, u'wendy@aol.com')

      So that looks a lot better, we added an expression to our select() which had the effect of adding WHERE users.id = addresses.user_id to our statement, and our results were managed down so that the join of users and addresses rows made sense. But let’s look at that expression? It’s using just a Python equality operator between two different Column objects. It should be clear that something is up. Saying 1 == 1 produces True, and 1 == 2 produces False, not a WHERE clause. So lets see exactly what that expression is doing:

      >>> users.c.id == addresses.c.user_id 
      <sqlalchemy.sql.expression.BinaryExpression object at 0x...>

      Wow, surprise ! This is neither a True nor a False. Well what is it ?

      >>> str(users.c.id == addresses.c.user_id)
      'users.id = addresses.user_id'

      As you can see, the == operator is producing an object that is very much like the Insert and select() objects we’ve made so far, thanks to Python’s __eq__() builtin; you call str() on it and it produces SQL. By now, one can see that everything we are working with is ultimately the same type of object. SQLAlchemy terms the base class of all of these expressions as ColumnElement.

      Operators

      Since we’ve stumbled upon SQLAlchemy’s operator paradigm, let’s go through some of its capabilities. We’ve seen how to equate two columns to each other:

      >>> print users.c.id == addresses.c.user_id
      users.id = addresses.user_id

      If we use a literal value (a literal meaning, not a SQLAlchemy clause object), we get a bind parameter:

      >>> print users.c.id == 7
      users.id = :id_1

      The 7 literal is embedded the resulting ColumnElement; we can use the same trick we did with the Insert object to see it:

      >>> (users.c.id == 7).compile().params
      {u'id_1': 7}

      Most Python operators, as it turns out, produce a SQL expression here, like equals, not equals, etc.:

      >>> print users.c.id != 7
      users.id != :id_1
      
      >>> # None converts to IS NULL
      >>> print users.c.name == None
      users.name IS NULL
      
      >>> # reverse works too
      >>> print 'fred' > users.c.name
      users.name < :name_1

      If we add two integer columns together, we get an addition expression:

      >>> print users.c.id + addresses.c.id
      users.id + addresses.id

      Interestingly, the type of the Column is important! If we use + with two string based columns (recall we put types like Integer and String on our Column objects at the beginning), we get something different:

      >>> print users.c.name + users.c.fullname
      users.name || users.fullname

      Where || is the string concatenation operator used on most databases. But not all of them. MySQL users, fear not:

      >>> print (users.c.name + users.c.fullname).\
      ...      compile(bind=create_engine('mysql://'))
      concat(users.name, users.fullname)

      The above illustrates the SQL that’s generated for an Engine that’s connected to a MySQL database; the || operator now compiles as MySQL’s concat() function.

      If you have come across an operator which really isn’t available, you can always use the ColumnOperators.op() method; this generates whatever operator you need:

      >>> print users.c.name.op('tiddlywinks')('foo')
      users.name tiddlywinks :name_1

      This function can also be used to make bitwise operators explicit. For example:

      somecolumn.op('&')(0xff)

      is a bitwise AND of the value in somecolumn.

      Operator Customization

      While ColumnOperators.op() is handy to get at a custom operator in a hurry, the Core supports fundamental customization and extension of the operator system at the type level. The behavior of existing operators can be modified on a per-type basis, and new operations can be defined which become available for all column expressions that are part of that particular type. See the section Redefining and Creating New Operators for a description.

      Conjunctions

      We’d like to show off some of our operators inside of select() constructs. But we need to lump them together a little more, so let’s first introduce some conjunctions. Conjunctions are those little words like AND and OR that put things together. We’ll also hit upon NOT. and_(), or_(), and not_() can work from the corresponding functions SQLAlchemy provides (notice we also throw in a like()):

      >>> from sqlalchemy.sql import and_, or_, not_
      >>> print and_(
      ...         users.c.name.like('j%'),
      ...         users.c.id == addresses.c.user_id, 
      ...         or_(
      ...              addresses.c.email_address == 'wendy@aol.com',
      ...              addresses.c.email_address == 'jack@yahoo.com'
      ...         ),
      ...         not_(users.c.id > 5)
      ...       )
      users.name LIKE :name_1 AND users.id = addresses.user_id AND
      (addresses.email_address = :email_address_1
         OR addresses.email_address = :email_address_2)
      AND users.id <= :id_1

      And you can also use the re-jiggered bitwise AND, OR and NOT operators, although because of Python operator precedence you have to watch your parenthesis:

      >>> print users.c.name.like('j%') & (users.c.id == addresses.c.user_id) &  \
      ...     (
      ...       (addresses.c.email_address == 'wendy@aol.com') | \
      ...       (addresses.c.email_address == 'jack@yahoo.com')
      ...     ) \
      ...     & ~(users.c.id>5) 
      users.name LIKE :name_1 AND users.id = addresses.user_id AND
      (addresses.email_address = :email_address_1
          OR addresses.email_address = :email_address_2)
      AND users.id <= :id_1

      So with all of this vocabulary, let’s select all users who have an email address at AOL or MSN, whose name starts with a letter between “m” and “z”, and we’ll also generate a column containing their full name combined with their email address. We will add two new constructs to this statement, between() and label(). between() produces a BETWEEN clause, and label() is used in a column expression to produce labels using the AS keyword; it’s recommended when selecting from expressions that otherwise would not have a name:

      >>> s = select([(users.c.fullname +
      ...               ", " + addresses.c.email_address).
      ...                label('title')]).\
      ...        where(
      ...           and_(
      ...               users.c.id == addresses.c.user_id,
      ...               users.c.name.between('m', 'z'),
      ...               or_(
      ...                  addresses.c.email_address.like('%@aol.com'),
      ...                  addresses.c.email_address.like('%@msn.com')
      ...               )
      ...           )
      ...        )
      >>> conn.execute(s).fetchall() 
      SELECT users.fullname || ? || addresses.email_address AS title
      FROM users, addresses
      WHERE users.id = addresses.user_id AND users.name BETWEEN ? AND ? AND
      (addresses.email_address LIKE ? OR addresses.email_address LIKE ?)
      (', ', 'm', 'z', '%@aol.com', '%@msn.com')
      [(u'Wendy Williams, wendy@aol.com',)]

      Once again, SQLAlchemy figured out the FROM clause for our statement. In fact it will determine the FROM clause based on all of its other bits; the columns clause, the where clause, and also some other elements which we haven’t covered yet, which include ORDER BY, GROUP BY, and HAVING.

      A shortcut to using and_() is to chain together multiple where() clauses. The above can also be written as:

      >>> s = select([(users.c.fullname +
      ...               ", " + addresses.c.email_address).
      ...                label('title')]).\
      ...        where(users.c.id == addresses.c.user_id).\
      ...        where(users.c.name.between('m', 'z')).\
      ...        where(
      ...               or_(
      ...                  addresses.c.email_address.like('%@aol.com'),
      ...                  addresses.c.email_address.like('%@msn.com')
      ...               )
      ...        )
      >>> conn.execute(s).fetchall() 
      SELECT users.fullname || ? || addresses.email_address AS title
      FROM users, addresses
      WHERE users.id = addresses.user_id AND users.name BETWEEN ? AND ? AND
      (addresses.email_address LIKE ? OR addresses.email_address LIKE ?)
      (', ', 'm', 'z', '%@aol.com', '%@msn.com')
      [(u'Wendy Williams, wendy@aol.com',)]

      The way that we can build up a select() construct through successive method calls is called method chaining.

      Using Text

      Our last example really became a handful to type. Going from what one understands to be a textual SQL expression into a Python construct which groups components together in a programmatic style can be hard. That’s why SQLAlchemy lets you just use strings too. The text() construct represents any textual statement, in a backend-agnostic way. To use bind parameters with text(), always use the named colon format. Such as below, we create a text() and execute it, feeding in the bind parameters to the execute() method:

      >>> from sqlalchemy.sql import text
      >>> s = text(
      ...     "SELECT users.fullname || ', ' || addresses.email_address AS title "
      ...         "FROM users, addresses "
      ...         "WHERE users.id = addresses.user_id "
      ...         "AND users.name BETWEEN :x AND :y "
      ...         "AND (addresses.email_address LIKE :e1 "
      ...             "OR addresses.email_address LIKE :e2)")
      sql>>> conn.execute(s, x='m', y='z', e1='%@aol.com', e2='%@msn.com').fetchall() 
      [(u'Wendy Williams, wendy@aol.com',)]

      To gain a “hybrid” approach, the select() construct accepts strings for most of its arguments. Below we combine the usage of strings with our constructed select() object, by using the select() object to structure the statement, and strings to provide all the content within the structure. For this example, SQLAlchemy is not given any Column or Table objects in any of its expressions, so it cannot generate a FROM clause. So we also use the select_from() method, which accepts a FromClause or string expression to be placed within the FROM clause:

      >>> s = select([
      ...            "users.fullname || ', ' || addresses.email_address AS title"
      ...          ]).\
      ...           where(
      ...              and_(
      ...                 "users.id = addresses.user_id",
      ...                 "users.name BETWEEN 'm' AND 'z'",
      ...                 "(addresses.email_address LIKE :x OR addresses.email_address LIKE :y)"
      ...             )
      ...           ).select_from('users, addresses')
      sql>>> conn.execute(s, x='%@aol.com', y='%@msn.com').fetchall() 
      [(u'Wendy Williams, wendy@aol.com',)]

      Going from constructed SQL to text, we lose some capabilities. We lose the capability for SQLAlchemy to compile our expression to a specific target database; above, our expression won’t work with MySQL since it has no || construct. It also becomes more tedious for SQLAlchemy to be made aware of the datatypes in use; for example, if our bind parameters required UTF-8 encoding before going in, or conversion from a Python datetime into a string (as is required with SQLite), we would have to add extra information to our text() construct. Similar issues arise on the result set side, where SQLAlchemy also performs type-specific data conversion in some cases; still more information can be added to text() to work around this. But what we really lose from our statement is the ability to manipulate it, transform it, and analyze it. These features are critical when using the ORM, which makes heavy usage of relational transformations. To show off what we mean, we’ll first introduce the ALIAS construct and the JOIN construct, just so we have some juicier bits to play with.

      Using Aliases

      The alias in SQL corresponds to a “renamed” version of a table or SELECT statement, which occurs anytime you say “SELECT .. FROM sometable AS someothername”. The AS creates a new name for the table. Aliases are a key construct as they allow any table or subquery to be referenced by a unique name. In the case of a table, this allows the same table to be named in the FROM clause multiple times. In the case of a SELECT statement, it provides a parent name for the columns represented by the statement, allowing them to be referenced relative to this name.

      In SQLAlchemy, any Table, select() construct, or other selectable can be turned into an alias using the FromClause.alias() method, which produces a Alias construct. As an example, suppose we know that our user jack has two particular email addresses. How can we locate jack based on the combination of those two addresses? To accomplish this, we’d use a join to the addresses table, once for each address. We create two Alias constructs against addresses, and then use them both within a select() construct:

      >>> a1 = addresses.alias()
      >>> a2 = addresses.alias()
      >>> s = select([users]).\
      ...        where(and_(
      ...            users.c.id == a1.c.user_id,
      ...            users.c.id == a2.c.user_id,
      ...            a1.c.email_address == 'jack@msn.com',
      ...            a2.c.email_address == 'jack@yahoo.com'
      ...        ))
      sql>>> conn.execute(s).fetchall()  
      [(1, u'jack', u'Jack Jones')]

      Note that the Alias construct generated the names addresses_1 and addresses_2 in the final SQL result. The generation of these names is determined by the position of the construct within the statement. If we created a query using only the second a2 alias, the name would come out as addresses_1. The generation of the names is also deterministic, meaning the same SQLAlchemy statement construct will produce the identical SQL string each time it is rendered for a particular dialect.

      Since on the outside, we refer to the alias using the Alias construct itself, we don’t need to be concerned about the generated name. However, for the purposes of debugging, it can be specified by passing a string name to the FromClause.alias() method:

      >>> a1 = addresses.alias('a1')

      Aliases can of course be used for anything which you can SELECT from, including SELECT statements themselves. We can self-join the users table back to the select() we’ve created by making an alias of the entire statement. The correlate(None) directive is to avoid SQLAlchemy’s attempt to “correlate” the inner users table with the outer one:

      >>> a1 = s.correlate(None).alias()
      >>> s = select([users.c.name]).where(users.c.id == a1.c.id)
      sql>>> conn.execute(s).fetchall()  
      [(u'jack',)]

      Using Joins

      We’re halfway along to being able to construct any SELECT expression. The next cornerstone of the SELECT is the JOIN expression. We’ve already been doing joins in our examples, by just placing two tables in either the columns clause or the where clause of the select() construct. But if we want to make a real “JOIN” or “OUTERJOIN” construct, we use the join() and outerjoin() methods, most commonly accessed from the left table in the join:

      >>> print users.join(addresses)
      users JOIN addresses ON users.id = addresses.user_id

      The alert reader will see more surprises; SQLAlchemy figured out how to JOIN the two tables ! The ON condition of the join, as it’s called, was automatically generated based on the ForeignKey object which we placed on the addresses table way at the beginning of this tutorial. Already the join() construct is looking like a much better way to join tables.

      Of course you can join on whatever expression you want, such as if we want to join on all users who use the same name in their email address as their username:

      >>> print users.join(addresses,
      ...                 addresses.c.email_address.like(users.c.name + '%')
      ...             )
      users JOIN addresses ON addresses.email_address LIKE (users.name || :name_1)

      When we create a select() construct, SQLAlchemy looks around at the tables we’ve mentioned and then places them in the FROM clause of the statement. When we use JOINs however, we know what FROM clause we want, so here we make use of the select_from() method:

      >>> s = select([users.c.fullname]).select_from(
      ...    users.join(addresses,
      ...             addresses.c.email_address.like(users.c.name + '%'))
      ...    )
      sql>>> conn.execute(s).fetchall()  
      [(u'Jack Jones',), (u'Jack Jones',), (u'Wendy Williams',)]

      The outerjoin() method creates LEFT OUTER JOIN constructs, and is used in the same way as join():

      >>> s = select([users.c.fullname]).select_from(users.outerjoin(addresses))
      >>> print s  
      SELECT users.fullname
          FROM users
          LEFT OUTER JOIN addresses ON users.id = addresses.user_id

      That’s the output outerjoin() produces, unless, of course, you’re stuck in a gig using Oracle prior to version 9, and you’ve set up your engine (which would be using OracleDialect) to use Oracle-specific SQL:

      >>> from sqlalchemy.dialects.oracle import dialect as OracleDialect
      >>> print s.compile(dialect=OracleDialect(use_ansi=False))  
      SELECT users.fullname
      FROM users, addresses
      WHERE users.id = addresses.user_id(+)

      If you don’t know what that SQL means, don’t worry ! The secret tribe of Oracle DBAs don’t want their black magic being found out ;).

      Everything Else

      The concepts of creating SQL expressions have been introduced. What’s left are more variants of the same themes. So now we’ll catalog the rest of the important things we’ll need to know.

      Bind Parameter Objects

      Throughout all these examples, SQLAlchemy is busy creating bind parameters wherever literal expressions occur. You can also specify your own bind parameters with your own names, and use the same statement repeatedly. The database dialect converts to the appropriate named or positional style, as here where it converts to positional for SQLite:

      >>> from sqlalchemy.sql import bindparam
      >>> s = users.select(users.c.name == bindparam('username'))
      sql>>> conn.execute(s, username='wendy').fetchall() 
      [(2, u'wendy', u'Wendy Williams')]

      Another important aspect of bind parameters is that they may be assigned a type. The type of the bind parameter will determine its behavior within expressions and also how the data bound to it is processed before being sent off to the database:

      >>> s = users.select(users.c.name.like(bindparam('username', type_=String) + text("'%'")))
      sql>>> conn.execute(s, username='wendy').fetchall() 
      [(2, u'wendy', u'Wendy Williams')]

      Bind parameters of the same name can also be used multiple times, where only a single named value is needed in the execute parameters:

      >>> s = select([users, addresses]).\
      ...     where(
      ...        or_(
      ...          users.c.name.like(
      ...                 bindparam('name', type_=String) + text("'%'")),
      ...          addresses.c.email_address.like(
      ...                 bindparam('name', type_=String) + text("'@%'"))
      ...        )
      ...     ).\
      ...     select_from(users.outerjoin(addresses)).\
      ...     order_by(addresses.c.id)
      sql>>> conn.execute(s, name='jack').fetchall() 
      [(1, u'jack', u'Jack Jones', 1, 1, u'jack@yahoo.com'), (1, u'jack', u'Jack Jones', 2, 1, u'jack@msn.com')]

      Functions

      SQL functions are created using the func keyword, which generates functions using attribute access:

      >>> from sqlalchemy.sql import func
      >>> print func.now()
      now()
      
      >>> print func.concat('x', 'y')
      concat(:param_1, :param_2)

      By “generates”, we mean that any SQL function is created based on the word you choose:

      >>> print func.xyz_my_goofy_function() 
      xyz_my_goofy_function()

      Certain function names are known by SQLAlchemy, allowing special behavioral rules to be applied. Some for example are “ANSI” functions, which mean they don’t get the parenthesis added after them, such as CURRENT_TIMESTAMP:

      >>> print func.current_timestamp()
      CURRENT_TIMESTAMP

      Functions are most typically used in the columns clause of a select statement, and can also be labeled as well as given a type. Labeling a function is recommended so that the result can be targeted in a result row based on a string name, and assigning it a type is required when you need result-set processing to occur, such as for Unicode conversion and date conversions. Below, we use the result function scalar() to just read the first column of the first row and then close the result; the label, even though present, is not important in this case:

      >>> conn.execute(
      ...     select([
      ...            func.max(addresses.c.email_address, type_=String).
      ...                label('maxemail')
      ...           ])
      ...     ).scalar() 
      
      SELECT max(addresses.email_address) AS maxemail FROM addresses ()
      u'www@www.org'

      Databases such as PostgreSQL and Oracle which support functions that return whole result sets can be assembled into selectable units, which can be used in statements. Such as, a database function calculate() which takes the parameters x and y, and returns three columns which we’d like to name q, z and r, we can construct using “lexical” column objects as well as bind parameters:

      >>> from sqlalchemy.sql import column
      >>> calculate = select([column('q'), column('z'), column('r')]).\
      ...        select_from(
      ...             func.calculate(
      ...                    bindparam('x'),
      ...                    bindparam('y')
      ...                )
      ...             )
      >>> calc = calculate.alias()
      >>> print select([users]).where(users.c.id > calc.c.z) 
      SELECT users.id, users.name, users.fullname
      FROM users, (SELECT q, z, r
      FROM calculate(:x, :y)) AS anon_1
      WHERE users.id > anon_1.z

      If we wanted to use our calculate statement twice with different bind parameters, the unique_params() function will create copies for us, and mark the bind parameters as “unique” so that conflicting names are isolated. Note we also make two separate aliases of our selectable:

      >>> calc1 = calculate.alias('c1').unique_params(x=17, y=45)
      >>> calc2 = calculate.alias('c2').unique_params(x=5, y=12)
      >>> s = select([users]).\
      ...         where(users.c.id.between(calc1.c.z, calc2.c.z))
      >>> print s 
      SELECT users.id, users.name, users.fullname
      FROM users,
          (SELECT q, z, r FROM calculate(:x_1, :y_1)) AS c1,
          (SELECT q, z, r FROM calculate(:x_2, :y_2)) AS c2
      WHERE users.id BETWEEN c1.z AND c2.z
      
      >>> s.compile().params
      {u'x_2': 5, u'y_2': 12, u'y_1': 45, u'x_1': 17}

      Window Functions

      Any FunctionElement, including functions generated by func, can be turned into a “window function”, that is an OVER clause, using the over() method:

      >>> s = select([
      ...         users.c.id,
      ...         func.row_number().over(order_by=users.c.name)
      ...     ])
      >>> print s 
      SELECT users.id, row_number() OVER (ORDER BY users.name) AS anon_1
      FROM users

      Unions and Other Set Operations

      Unions come in two flavors, UNION and UNION ALL, which are available via module level functions union() and union_all():

      >>> from sqlalchemy.sql import union
      >>> u = union(
      ...     addresses.select().
      ...             where(addresses.c.email_address == 'foo@bar.com'),
      ...    addresses.select().
      ...             where(addresses.c.email_address.like('%@yahoo.com')),
      ... ).order_by(addresses.c.email_address)
      
      sql>>> conn.execute(u).fetchall() 
      [(1, 1, u'jack@yahoo.com')]

      Also available, though not supported on all databases, are intersect(), intersect_all(), except_(), and except_all():

      >>> from sqlalchemy.sql import except_
      >>> u = except_(
      ...    addresses.select().
      ...             where(addresses.c.email_address.like('%@%.com')),
      ...    addresses.select().
      ...             where(addresses.c.email_address.like('%@msn.com'))
      ... )
      
      sql>>> conn.execute(u).fetchall() 
      [(1, 1, u'jack@yahoo.com'), (4, 2, u'wendy@aol.com')]

      A common issue with so-called “compound” selectables arises due to the fact that they nest with parenthesis. SQLite in particular doesn’t like a statement that starts with parenthesis. So when nesting a “compound” inside a “compound”, it’s often necessary to apply .alias().select() to the first element of the outermost compound, if that element is also a compound. For example, to nest a “union” and a “select” inside of “except_”, SQLite will want the “union” to be stated as a subquery:

      >>> u = except_(
      ...    union(
      ...         addresses.select().
      ...             where(addresses.c.email_address.like('%@yahoo.com')),
      ...         addresses.select().
      ...             where(addresses.c.email_address.like('%@msn.com'))
      ...     ).alias().select(),   # apply subquery here
      ...    addresses.select(addresses.c.email_address.like('%@msn.com'))
      ... )
      sql>>> conn.execute(u).fetchall()   
      [(1, 1, u'jack@yahoo.com')]

      Scalar Selects

      A scalar select is a SELECT that returns exactly one row and one column. It can then be used as a column expression. A scalar select is often a correlated subquery, which relies upon the enclosing SELECT statement in order to acquire at least one of its FROM clauses.

      The select() construct can be modified to act as a column expression by calling either the as_scalar() or label() method:

      >>> stmt = select([func.count(addresses.c.id)]).\
      ...             where(users.c.id == addresses.c.user_id).\
      ...             as_scalar()

      The above construct is now a ScalarSelect object, and is no longer part of the FromClause hierarchy; it instead is within the ColumnElement family of expression constructs. We can place this construct the same as any other column within another select():

      >>> conn.execute(select([users.c.name, stmt])).fetchall()  
      
      SELECT users.name, (SELECT count(addresses.id) AS count_1 FROM addresses WHERE users.id = addresses.user_id) AS anon_1 FROM users ()
      [(u'jack', 2), (u'wendy', 2)]

      To apply a non-anonymous column name to our scalar select, we create it using SelectBase.label() instead:

      >>> stmt = select([func.count(addresses.c.id)]).\
      ...             where(users.c.id == addresses.c.user_id).\
      ...             label("address_count")
      >>> conn.execute(select([users.c.name, stmt])).fetchall()  
      
      SELECT users.name, (SELECT count(addresses.id) AS count_1 FROM addresses WHERE users.id = addresses.user_id) AS address_count FROM users ()
      [(u'jack', 2), (u'wendy', 2)]

      Correlated Subqueries

      Notice in the examples on Scalar Selects, the FROM clause of each embedded select did not contain the users table in its FROM clause. This is because SQLAlchemy automatically correlates embedded FROM objects to that of an enclosing query, if present, and if the inner SELECT statement would still have at least one FROM clause of its own. For example:

      >>> stmt = select([addresses.c.user_id]).\
      ...             where(addresses.c.user_id == users.c.id).\
      ...             where(addresses.c.email_address == 'jack@yahoo.com')
      >>> enclosing_stmt = select([users.c.name]).where(users.c.id == stmt)
      >>> conn.execute(enclosing_stmt).fetchall()  
      
      SELECT users.name FROM users WHERE users.id = (SELECT addresses.user_id FROM addresses WHERE addresses.user_id = users.id AND addresses.email_address = ?) ('jack@yahoo.com',)
      [(u'jack',)]

      Auto-correlation will usually do what’s expected, however it can also be controlled. For example, if we wanted a statement to correlate only to the addresses table but not the users table, even if both were present in the enclosing SELECT, we use the correlate() method to specify those FROM clauses that may be correlated:

      >>> stmt = select([users.c.id]).\
      ...             where(users.c.id == addresses.c.user_id).\
      ...             where(users.c.name == 'jack').\
      ...             correlate(addresses)
      >>> enclosing_stmt = select(
      ...         [users.c.name, addresses.c.email_address]).\
      ...     select_from(users.join(addresses)).\
      ...     where(users.c.id == stmt)
      >>> conn.execute(enclosing_stmt).fetchall()  
      
      SELECT users.name, addresses.email_address FROM users JOIN addresses ON users.id = addresses.user_id WHERE users.id = (SELECT users.id FROM users WHERE users.id = addresses.user_id AND users.name = ?) ('jack',)
      [(u'jack', u'jack@yahoo.com'), (u'jack', u'jack@msn.com')]

      To entirely disable a statement from correlating, we can pass None as the argument:

      >>> stmt = select([users.c.id]).\
      ...             where(users.c.name == 'wendy').\
      ...             correlate(None)
      >>> enclosing_stmt = select([users.c.name]).\
      ...     where(users.c.id == stmt)
      >>> conn.execute(enclosing_stmt).fetchall()  
      
      SELECT users.name FROM users WHERE users.id = (SELECT users.id FROM users WHERE users.name = ?) ('wendy',)
      [(u'wendy',)]

      We can also control correlation via exclusion, using the Select.correlate_except() method. Such as, we can write our SELECT for the users table by telling it to correlate all FROM clauses except for users:

      >>> stmt = select([users.c.id]).\
      ...             where(users.c.id == addresses.c.user_id).\
      ...             where(users.c.name == 'jack').\
      ...             correlate_except(users)
      >>> enclosing_stmt = select(
      ...         [users.c.name, addresses.c.email_address]).\
      ...     select_from(users.join(addresses)).\
      ...     where(users.c.id == stmt)
      >>> conn.execute(enclosing_stmt).fetchall()  
      
      SELECT users.name, addresses.email_address FROM users JOIN addresses ON users.id = addresses.user_id WHERE users.id = (SELECT users.id FROM users WHERE users.id = addresses.user_id AND users.name = ?) ('jack',)
      [(u'jack', u'jack@yahoo.com'), (u'jack', u'jack@msn.com')]

      Ordering, Grouping, Limiting, Offset...ing...

      Ordering is done by passing column expressions to the order_by() method:

      >>> stmt = select([users.c.name]).order_by(users.c.name)
      >>> conn.execute(stmt).fetchall()  
      
      SELECT users.name FROM users ORDER BY users.name ()
      [(u'jack',), (u'wendy',)]

      Ascending or descending can be controlled using the asc() and desc() modifiers:

      >>> stmt = select([users.c.name]).order_by(users.c.name.desc())
      >>> conn.execute(stmt).fetchall()  
      
      SELECT users.name FROM users ORDER BY users.name DESC ()
      [(u'wendy',), (u'jack',)]

      Grouping refers to the GROUP BY clause, and is usually used in conjunction with aggregate functions to establish groups of rows to be aggregated. This is provided via the group_by() method:

      >>> stmt = select([users.c.name, func.count(addresses.c.id)]).\
      ...             select_from(users.join(addresses)).\
      ...             group_by(users.c.name)
      >>> conn.execute(stmt).fetchall()  
      
      SELECT users.name, count(addresses.id) AS count_1 FROM users JOIN addresses ON users.id = addresses.user_id GROUP BY users.name ()
      [(u'jack', 2), (u'wendy', 2)]

      HAVING can be used to filter results on an aggregate value, after GROUP BY has been applied. It’s available here via the having() method:

      >>> stmt = select([users.c.name, func.count(addresses.c.id)]).\
      ...             select_from(users.join(addresses)).\
      ...             group_by(users.c.name).\
      ...             having(func.length(users.c.name) > 4)
      >>> conn.execute(stmt).fetchall()  
      
      SELECT users.name, count(addresses.id) AS count_1 FROM users JOIN addresses ON users.id = addresses.user_id GROUP BY users.name HAVING length(users.name) > ? (4,)
      [(u'wendy', 2)]

      A common system of dealing with duplicates in composed SELECT statments is the DISTINCT modifier. A simple DISTINCT clause can be added using the Select.distinct() method:

      >>> stmt = select([users.c.name]).\
      ...             where(addresses.c.email_address.
      ...                    contains(users.c.name)).\
      ...             distinct()
      >>> conn.execute(stmt).fetchall()  
      
      SELECT DISTINCT users.name FROM users, addresses WHERE addresses.email_address LIKE '%%' || users.name || '%%' ()
      [(u'jack',), (u'wendy',)]

      Most database backends support a system of limiting how many rows are returned, and the majority also feature a means of starting to return rows after a given “offset”. While common backends like Postgresql, MySQL and SQLite support LIMIT and OFFSET keywords, other backends need to refer to more esoteric features such as “window functions” and row ids to achieve the same effect. The limit() and offset() methods provide an easy abstraction into the current backend’s methodology:

      >>> stmt = select([users.c.name, addresses.c.email_address]).\
      ...             select_from(users.join(addresses)).\
      ...             limit(1).offset(1)
      >>> conn.execute(stmt).fetchall()  
      
      SELECT users.name, addresses.email_address FROM users JOIN addresses ON users.id = addresses.user_id LIMIT ? OFFSET ? (1, 1)
      [(u'jack', u'jack@msn.com')]

      Inserts, Updates and Deletes

      We’ve seen insert() demonstrated earlier in this tutorial. Where insert() prodces INSERT, the update() method produces UPDATE. Both of these constructs feature a method called values() which specifies the VALUES or SET clause of the statement.

      The values() method accommodates any column expression as a value:

      >>> stmt = users.update().\
      ...             values(fullname="Fullname: " + users.c.name)
      >>> conn.execute(stmt) 
      
      UPDATE users SET fullname=(? || users.name) ('Fullname: ',) COMMIT
      <sqlalchemy.engine.result.ResultProxy object at 0x...>

      When using insert() or update() in an “execute many” context, we may also want to specify named bound parameters which we can refer to in the argument list. The two constructs will automatically generate bound placeholders for any column names passed in the dictionaries sent to execute() at execution time. However, if we wish to use explicitly targeted named parameters with composed expressions, we need to use the bindparam() construct. When using bindparam() with insert() or update(), the names of the table’s columns themselves are reserved for the “automatic” generation of bind names. We can combine the usage of implicitly available bind names and explicitly named parameters as in the example below:

      >>> stmt = users.insert().\
      ...         values(name=bindparam('_name') + " .. name")
      >>> conn.execute(stmt, [               
      ...        {'id':4, '_name':'name1'},
      ...        {'id':5, '_name':'name2'},
      ...        {'id':6, '_name':'name3'},
      ...     ])
      
      INSERT INTO users (id, name) VALUES (?, (? || ?)) ((4, 'name1', ' .. name'), (5, 'name2', ' .. name'), (6, 'name3', ' .. name')) COMMIT

      An UPDATE statement is emitted using the update() construct. This works much like an INSERT, except there is an additional WHERE clause that can be specified:

      >>> stmt = users.update().\
      ...             where(users.c.name == 'jack').\
      ...             values(name='ed')
      
      >>> conn.execute(stmt) 
      
      UPDATE users SET name=? WHERE users.name = ? ('ed', 'jack') COMMIT
      <sqlalchemy.engine.result.ResultProxy object at 0x...>

      When using update() in an “execute many” context, we may wish to also use explicitly named bound parameters in the WHERE clause. Again, bindparam() is the construct used to achieve this:

      >>> stmt = users.update().\
      ...             where(users.c.name == bindparam('oldname')).\
      ...             values(name=bindparam('newname'))
      >>> conn.execute(stmt, [
      ...     {'oldname':'jack', 'newname':'ed'},
      ...     {'oldname':'wendy', 'newname':'mary'},
      ...     {'oldname':'jim', 'newname':'jake'},
      ...     ]) 
      
      UPDATE users SET name=? WHERE users.name = ? (('ed', 'jack'), ('mary', 'wendy'), ('jake', 'jim')) COMMIT
      <sqlalchemy.engine.result.ResultProxy object at 0x...>

      Correlated Updates

      A correlated update lets you update a table using selection from another table, or the same table:

      >>> stmt = select([addresses.c.email_address]).\
      ...             where(addresses.c.user_id == users.c.id).\
      ...             limit(1)
      >>> conn.execute(users.update().values(fullname=stmt)) 
      
      UPDATE users SET fullname=(SELECT addresses.email_address FROM addresses WHERE addresses.user_id = users.id LIMIT ? OFFSET ?) (1, 0) COMMIT
      <sqlalchemy.engine.result.ResultProxy object at 0x...>

      Multiple Table Updates

      New in version 0.7.4.

      The Postgresql, Microsoft SQL Server, and MySQL backends all support UPDATE statements that refer to multiple tables. For PG and MSSQL, this is the “UPDATE FROM” syntax, which updates one table at a time, but can reference additional tables in an additional “FROM” clause that can then be referenced in the WHERE clause directly. On MySQL, multiple tables can be embedded into a single UPDATE statement separated by a comma. The SQLAlchemy update() construct supports both of these modes implicitly, by specifying multiple tables in the WHERE clause:

      stmt = users.update().\
              values(name='ed wood').\
              where(users.c.id == addresses.c.id).\
              where(addresses.c.email_address.startswith('ed%'))
      conn.execute(stmt)

      The resulting SQL from the above statement would render as:

      UPDATE users SET name=:name FROM addresses
      WHERE users.id = addresses.id AND
      addresses.email_address LIKE :email_address_1 || '%%'

      When using MySQL, columns from each table can be assigned to in the SET clause directly, using the dictionary form passed to Update.values():

      stmt = users.update().\
              values({
                  users.c.name:'ed wood',
                  addresses.c.email_address:'ed.wood@foo.com'
              }).\
              where(users.c.id == addresses.c.id).\
              where(addresses.c.email_address.startswith('ed%'))

      The tables are referenced explicitly in the SET clause:

      UPDATE users, addresses SET addresses.email_address=%s,
              users.name=%s WHERE users.id = addresses.id
              AND addresses.email_address LIKE concat(%s, '%%')

      SQLAlchemy doesn’t do anything special when these constructs are used on a non-supporting database. The UPDATE FROM syntax generates by default when multiple tables are present, and the statement will be rejected by the database if this syntax is not supported.

      Deletes

      Finally, a delete. This is accomplished easily enough using the delete() construct:

      >>> conn.execute(addresses.delete()) 
      
      DELETE FROM addresses () COMMIT
      <sqlalchemy.engine.result.ResultProxy object at 0x...> >>> conn.execute(users.delete().where(users.c.name > 'm'))
      DELETE FROM users WHERE users.name > ? ('m',) COMMIT
      <sqlalchemy.engine.result.ResultProxy object at 0x...>

      Matched Row Counts

      Both of update() and delete() are associated with matched row counts. This is a number indicating the number of rows that were matched by the WHERE clause. Note that by “matched”, this includes rows where no UPDATE actually took place. The value is available as rowcount:

      >>> result = conn.execute(users.delete()) 
      
      DELETE FROM users () COMMIT
      >>> result.rowcount 1

      Further Reference

      Expression Language Reference: SQL Statements and Expressions API

      Database Metadata Reference: Describing Databases with MetaData

      Engine Reference: Engine Configuration

      Connection Reference: Working with Engines and Connections

      Types Reference: Column and Data Types

      SQLAlchemy-0.8.4/doc/core/types.html0000644000076500000240000063743612251147474020012 0ustar classicstaff00000000000000 Column and Data Types — SQLAlchemy 0.8 Documentation

      SQLAlchemy 0.8 Documentation

      Release: 0.8.4 | Release Date: December 8, 2013

      Column and Data Types

      SQLAlchemy provides abstractions for most common database data types, and a mechanism for specifying your own custom data types.

      The methods and attributes of type objects are rarely used directly. Type objects are supplied to Table definitions and can be supplied as type hints to functions for occasions where the database driver returns an incorrect type.

      >>> users = Table('users', metadata,
      ...               Column('id', Integer, primary_key=True)
      ...               Column('login', String(32))
      ...              )

      SQLAlchemy will use the Integer and String(32) type information when issuing a CREATE TABLE statement and will use it again when reading back rows SELECTed from the database. Functions that accept a type (such as Column()) will typically accept a type class or instance; Integer is equivalent to Integer() with no construction arguments in this case.

      Generic Types

      Generic types specify a column that can read, write and store a particular type of Python data. SQLAlchemy will choose the best database column type available on the target database when issuing a CREATE TABLE statement. For complete control over which column type is emitted in CREATE TABLE, such as VARCHAR see SQL Standard Types and the other sections of this chapter.

      class sqlalchemy.types.BigInteger(*args, **kwargs)

      Bases: sqlalchemy.types.Integer

      A type for bigger int integers.

      Typically generates a BIGINT in DDL, and otherwise acts like a normal Integer on the Python side.

      class sqlalchemy.types.Boolean(create_constraint=True, name=None)

      Bases: sqlalchemy.types.TypeEngine, sqlalchemy.types.SchemaType

      A bool datatype.

      Boolean typically uses BOOLEAN or SMALLINT on the DDL side, and on the Python side deals in True or False.

      __init__(create_constraint=True, name=None)

      Construct a Boolean.

      Parameters:
      • create_constraint – defaults to True. If the boolean is generated as an int/smallint, also create a CHECK constraint on the table that ensures 1 or 0 as a value.
      • name – if a CHECK constraint is generated, specify the name of the constraint.
      class sqlalchemy.types.Date(*args, **kwargs)

      Bases: sqlalchemy.types._DateAffinity, sqlalchemy.types.TypeEngine

      A type for datetime.date() objects.

      class sqlalchemy.types.DateTime(timezone=False)

      Bases: sqlalchemy.types._DateAffinity, sqlalchemy.types.TypeEngine

      A type for datetime.datetime() objects.

      Date and time types return objects from the Python datetime module. Most DBAPIs have built in support for the datetime module, with the noted exception of SQLite. In the case of SQLite, date and time types are stored as strings which are then converted back to datetime objects when rows are returned.

      __init__(timezone=False)

      Construct a new DateTime.

      Parameters:timezone – boolean. If True, and supported by the

      backend, will produce ‘TIMESTAMP WITH TIMEZONE’. For backends that don’t support timezone aware timestamps, has no effect.

      class sqlalchemy.types.Enum(*enums, **kw)

      Bases: sqlalchemy.types.String, sqlalchemy.types.SchemaType

      Generic Enum Type.

      The Enum type provides a set of possible string values which the column is constrained towards.

      By default, uses the backend’s native ENUM type if available, else uses VARCHAR + a CHECK constraint.

      See also

      ENUM - PostgreSQL-specific type, which has additional functionality.

      __init__(*enums, **kw)

      Construct an enum.

      Keyword arguments which don’t apply to a specific backend are ignored by that backend.

      Parameters:
      • *enums – string or unicode enumeration labels. If unicode labels are present, the convert_unicode flag is auto-enabled.
      • convert_unicode – Enable unicode-aware bind parameter and result-set processing for this Enum’s data. This is set automatically based on the presence of unicode label strings.
      • metadata – Associate this type directly with a MetaData object. For types that exist on the target database as an independent schema construct (Postgresql), this type will be created and dropped within create_all() and drop_all() operations. If the type is not associated with any MetaData object, it will associate itself with each Table in which it is used, and will be created when any of those individual tables are created, after a check is performed for it’s existence. The type is only dropped when drop_all() is called for that Table object’s metadata, however.
      • name – The name of this type. This is required for Postgresql and any future supported database which requires an explicitly named type, or an explicitly named constraint in order to generate the type and/or a table that uses it.
      • native_enum – Use the database’s native ENUM type when available. Defaults to True. When False, uses VARCHAR + check constraint for all backends.
      • schema

        Schema name of this type. For types that exist on the target database as an independent schema construct (Postgresql), this parameter specifies the named schema in which the type is present.

        Note

        The schema of the Enum type does not by default make use of the schema established on the owning Table. If this behavior is desired, set the inherit_schema flag to True.

      • quote – Force quoting to be on or off on the type’s name. If left as the default of None, the usual schema-level “case sensitive”/”reserved name” rules are used to determine if this type’s name should be quoted.
      • inherit_schema

        When True, the “schema” from the owning Table will be copied to the “schema” attribute of this Enum, replacing whatever value was passed for the schema attribute. This also takes effect when using the Table.tometadata() operation.

        New in version 0.8.

      create(bind=None, checkfirst=False)

      Issue CREATE ddl for this type, if applicable.

      drop(bind=None, checkfirst=False)

      Issue DROP ddl for this type, if applicable.

      class sqlalchemy.types.Float(precision=None, asdecimal=False, **kwargs)

      Bases: sqlalchemy.types.Numeric

      A type for float numbers.

      Returns Python float objects by default, applying conversion as needed.

      __init__(precision=None, asdecimal=False, **kwargs)

      Construct a Float.

      Parameters:
      • precision – the numeric precision for use in DDL CREATE TABLE.
      • asdecimal – the same flag as that of Numeric, but defaults to False. Note that setting this flag to True results in floating point conversion.
      • **kwargs – deprecated. Additional arguments here are ignored by the default Float type. For database specific floats that support additional arguments, see that dialect’s documentation for details, such as sqlalchemy.dialects.mysql.FLOAT.
      class sqlalchemy.types.Integer(*args, **kwargs)

      Bases: sqlalchemy.types._DateAffinity, sqlalchemy.types.TypeEngine

      A type for int integers.

      class sqlalchemy.types.Interval(native=True, second_precision=None, day_precision=None)

      Bases: sqlalchemy.types._DateAffinity, sqlalchemy.types.TypeDecorator

      A type for datetime.timedelta() objects.

      The Interval type deals with datetime.timedelta objects. In PostgreSQL, the native INTERVAL type is used; for others, the value is stored as a date which is relative to the “epoch” (Jan. 1, 1970).

      Note that the Interval type does not currently provide date arithmetic operations on platforms which do not support interval types natively. Such operations usually require transformation of both sides of the expression (such as, conversion of both sides into integer epoch values first) which currently is a manual procedure (such as via func).

      __init__(native=True, second_precision=None, day_precision=None)

      Construct an Interval object.

      Parameters:
      • native – when True, use the actual INTERVAL type provided by the database, if supported (currently Postgresql, Oracle). Otherwise, represent the interval data as an epoch value regardless.
      • second_precision – For native interval types which support a “fractional seconds precision” parameter, i.e. Oracle and Postgresql
      • day_precision – for native interval types which support a “day precision” parameter, i.e. Oracle.
      coerce_compared_value(op, value)

      See TypeEngine.coerce_compared_value() for a description.

      impl

      alias of DateTime

      class sqlalchemy.types.LargeBinary(length=None)

      Bases: sqlalchemy.types._Binary

      A type for large binary byte data.

      The Binary type generates BLOB or BYTEA when tables are created, and also converts incoming values using the Binary callable provided by each DB-API.

      __init__(length=None)

      Construct a LargeBinary type.

      Parameters:length – optional, a length for the column for use in DDL statements, for those BLOB types that accept a length (i.e. MySQL). It does not produce a small BINARY/VARBINARY type - use the BINARY/VARBINARY types specifically for those. May be safely omitted if no CREATE TABLE will be issued. Certain databases may require a length for use in DDL, and will raise an exception when the CREATE TABLE DDL is issued.
      class sqlalchemy.types.Numeric(precision=None, scale=None, asdecimal=True)

      Bases: sqlalchemy.types._DateAffinity, sqlalchemy.types.TypeEngine

      A type for fixed precision numbers.

      Typically generates DECIMAL or NUMERIC. Returns decimal.Decimal objects by default, applying conversion as needed.

      Note

      The cdecimal library is a high performing alternative to Python’s built-in decimal.Decimal type, which performs very poorly in high volume situations. SQLAlchemy 0.7 is tested against cdecimal and supports it fully. The type is not necessarily supported by DBAPI implementations however, most of which contain an import for plain decimal in their source code, even though some such as psycopg2 provide hooks for alternate adapters. SQLAlchemy imports decimal globally as well. The most straightforward and foolproof way to use “cdecimal” given current DBAPI and Python support is to patch it directly into sys.modules before anything else is imported:

      import sys
      import cdecimal
      sys.modules["decimal"] = cdecimal

      While the global patch is a little ugly, it’s particularly important to use just one decimal library at a time since Python Decimal and cdecimal Decimal objects are not currently compatible with each other:

      >>> import cdecimal
      >>> import decimal
      >>> decimal.Decimal("10") == cdecimal.Decimal("10")
      False

      SQLAlchemy will provide more natural support of cdecimal if and when it becomes a standard part of Python installations and is supported by all DBAPIs.

      __init__(precision=None, scale=None, asdecimal=True)

      Construct a Numeric.

      Parameters:
      • precision – the numeric precision for use in DDL CREATE TABLE.
      • scale – the numeric scale for use in DDL CREATE TABLE.
      • asdecimal – default True. Return whether or not values should be sent as Python Decimal objects, or as floats. Different DBAPIs send one or the other based on datatypes - the Numeric type will ensure that return values are one or the other across DBAPIs consistently.

      When using the Numeric type, care should be taken to ensure that the asdecimal setting is apppropriate for the DBAPI in use - when Numeric applies a conversion from Decimal->float or float-> Decimal, this conversion incurs an additional performance overhead for all result columns received.

      DBAPIs that return Decimal natively (e.g. psycopg2) will have better accuracy and higher performance with a setting of True, as the native translation to Decimal reduces the amount of floating- point issues at play, and the Numeric type itself doesn’t need to apply any further conversions. However, another DBAPI which returns floats natively will incur an additional conversion overhead, and is still subject to floating point data loss - in which case asdecimal=False will at least remove the extra conversion overhead.

      class sqlalchemy.types.PickleType(protocol=2, pickler=None, comparator=None)

      Bases: sqlalchemy.types.TypeDecorator

      Holds Python objects, which are serialized using pickle.

      PickleType builds upon the Binary type to apply Python’s pickle.dumps() to incoming objects, and pickle.loads() on the way out, allowing any pickleable Python object to be stored as a serialized binary field.

      To allow ORM change events to propagate for elements associated with PickleType, see Mutation Tracking.

      __init__(protocol=2, pickler=None, comparator=None)

      Construct a PickleType.

      Parameters:
      • protocol – defaults to pickle.HIGHEST_PROTOCOL.
      • pickler – defaults to cPickle.pickle or pickle.pickle if cPickle is not available. May be any object with pickle-compatible dumps` and ``loads methods.
      • comparator – a 2-arg callable predicate used to compare values of this type. If left as None, the Python “equals” operator is used to compare values.
      impl

      alias of LargeBinary

      class sqlalchemy.types.SchemaType(**kw)

      Bases: sqlalchemy.events.SchemaEventTarget

      Mark a type as possibly requiring schema-level DDL for usage.

      Supports types that must be explicitly created/dropped (i.e. PG ENUM type) as well as types that are complimented by table or schema level constraints, triggers, and other rules.

      SchemaType classes can also be targets for the DDLEvents.before_parent_attach() and DDLEvents.after_parent_attach() events, where the events fire off surrounding the association of the type object with a parent Column.

      See also

      Enum

      Boolean

      adapt(impltype, **kw)
      bind
      copy(**kw)
      create(bind=None, checkfirst=False)

      Issue CREATE ddl for this type, if applicable.

      drop(bind=None, checkfirst=False)

      Issue DROP ddl for this type, if applicable.

      class sqlalchemy.types.SmallInteger(*args, **kwargs)

      Bases: sqlalchemy.types.Integer

      A type for smaller int integers.

      Typically generates a SMALLINT in DDL, and otherwise acts like a normal Integer on the Python side.

      class sqlalchemy.types.String(length=None, collation=None, convert_unicode=False, unicode_error=None, _warn_on_bytestring=False)

      Bases: sqlalchemy.types.Concatenable, sqlalchemy.types.TypeEngine

      The base for all string and character types.

      In SQL, corresponds to VARCHAR. Can also take Python unicode objects and encode to the database’s encoding in bind params (and the reverse for result sets.)

      The length field is usually required when the String type is used within a CREATE TABLE statement, as VARCHAR requires a length on most databases.

      __init__(length=None, collation=None, convert_unicode=False, unicode_error=None, _warn_on_bytestring=False)

      Create a string-holding type.

      Parameters:
      • length – optional, a length for the column for use in DDL and CAST expressions. May be safely omitted if no CREATE TABLE will be issued. Certain databases may require a length for use in DDL, and will raise an exception when the CREATE TABLE DDL is issued if a VARCHAR with no length is included. Whether the value is interpreted as bytes or characters is database specific.
      • collation

        Optional, a column-level collation for use in DDL and CAST expressions. Renders using the COLLATE keyword supported by SQLite, MySQL, and Postgresql. E.g.:

        >>> from sqlalchemy import cast, select, String
        >>> print select([cast('some string', String(collation='utf8'))])
        SELECT CAST(:param_1 AS VARCHAR COLLATE utf8) AS anon_1

        New in version 0.8: Added support for COLLATE to all string types.

      • convert_unicode

        When set to True, the String type will assume that input is to be passed as Python unicode objects, and results returned as Python unicode objects. If the DBAPI in use does not support Python unicode (which is fewer and fewer these days), SQLAlchemy will encode/decode the value, using the value of the encoding parameter passed to create_engine() as the encoding.

        When using a DBAPI that natively supports Python unicode objects, this flag generally does not need to be set. For columns that are explicitly intended to store non-ASCII data, the Unicode or UnicodeText types should be used regardless, which feature the same behavior of convert_unicode but also indicate an underlying column type that directly supports unicode, such as NVARCHAR.

        For the extremely rare case that Python unicode is to be encoded/decoded by SQLAlchemy on a backend that does natively support Python unicode, the value force can be passed here which will cause SQLAlchemy’s encode/decode services to be used unconditionally.

      • unicode_error – Optional, a method to use to handle Unicode conversion errors. Behaves like the errors keyword argument to the standard library’s string.decode() functions. This flag requires that convert_unicode is set to force - otherwise, SQLAlchemy is not guaranteed to handle the task of unicode conversion. Note that this flag adds significant performance overhead to row-fetching operations for backends that already return unicode objects natively (which most DBAPIs do). This flag should only be used as a last resort for reading strings from a column with varied or corrupted encodings.
      class sqlalchemy.types.Text(length=None, collation=None, convert_unicode=False, unicode_error=None, _warn_on_bytestring=False)

      Bases: sqlalchemy.types.String

      A variably sized string type.

      In SQL, usually corresponds to CLOB or TEXT. Can also take Python unicode objects and encode to the database’s encoding in bind params (and the reverse for result sets.) In general, TEXT objects do not have a length; while some databases will accept a length argument here, it will be rejected by others.

      class sqlalchemy.types.Time(timezone=False)

      Bases: sqlalchemy.types._DateAffinity, sqlalchemy.types.TypeEngine

      A type for datetime.time() objects.

      class sqlalchemy.types.Unicode(length=None, **kwargs)

      Bases: sqlalchemy.types.String

      A variable length Unicode string type.

      The Unicode type is a String subclass that assumes input and output as Python unicode data, and in that regard is equivalent to the usage of the convert_unicode flag with the String type. However, unlike plain String, it also implies an underlying column type that is explicitly supporting of non-ASCII data, such as NVARCHAR on Oracle and SQL Server. This can impact the output of CREATE TABLE statements and CAST functions at the dialect level, and can also affect the handling of bound parameters in some specific DBAPI scenarios.

      The encoding used by the Unicode type is usually determined by the DBAPI itself; most modern DBAPIs feature support for Python unicode objects as bound values and result set values, and the encoding should be configured as detailed in the notes for the target DBAPI in the Dialects section.

      For those DBAPIs which do not support, or are not configured to accommodate Python unicode objects directly, SQLAlchemy does the encoding and decoding outside of the DBAPI. The encoding in this scenario is determined by the encoding flag passed to create_engine().

      When using the Unicode type, it is only appropriate to pass Python unicode objects, and not plain str. If a plain str is passed under Python 2, a warning is emitted. If you notice your application emitting these warnings but you’re not sure of the source of them, the Python warnings filter, documented at http://docs.python.org/library/warnings.html, can be used to turn these warnings into exceptions which will illustrate a stack trace:

      import warnings
      warnings.simplefilter('error')

      For an application that wishes to pass plain bytestrings and Python unicode objects to the Unicode type equally, the bytestrings must first be decoded into unicode. The recipe at Coercing Encoded Strings to Unicode illustrates how this is done.

      See also:

      UnicodeText - unlengthed textual counterpart to Unicode.
      __init__(length=None, **kwargs)

      Create a Unicode object.

      Parameters are the same as that of String, with the exception that convert_unicode defaults to True.

      class sqlalchemy.types.UnicodeText(length=None, **kwargs)

      Bases: sqlalchemy.types.Text

      An unbounded-length Unicode string type.

      See Unicode for details on the unicode behavior of this object.

      Like Unicode, usage the UnicodeText type implies a unicode-capable type being used on the backend, such as NCLOB, NTEXT.

      __init__(length=None, **kwargs)

      Create a Unicode-converting Text type.

      Parameters are the same as that of Text, with the exception that convert_unicode defaults to True.

      SQL Standard Types

      The SQL standard types always create database column types of the same name when CREATE TABLE is issued. Some types may not be supported on all databases.

      class sqlalchemy.types.BIGINT(*args, **kwargs)

      Bases: sqlalchemy.types.BigInteger

      The SQL BIGINT type.

      class sqlalchemy.types.BINARY(length=None)

      Bases: sqlalchemy.types._Binary

      The SQL BINARY type.

      class sqlalchemy.types.BLOB(length=None)

      Bases: sqlalchemy.types.LargeBinary

      The SQL BLOB type.

      class sqlalchemy.types.BOOLEAN(create_constraint=True, name=None)

      Bases: sqlalchemy.types.Boolean

      The SQL BOOLEAN type.

      class sqlalchemy.types.CHAR(length=None, collation=None, convert_unicode=False, unicode_error=None, _warn_on_bytestring=False)

      Bases: sqlalchemy.types.String

      The SQL CHAR type.

      class sqlalchemy.types.CLOB(length=None, collation=None, convert_unicode=False, unicode_error=None, _warn_on_bytestring=False)

      Bases: sqlalchemy.types.Text

      The CLOB type.

      This type is found in Oracle and Informix.

      class sqlalchemy.types.DATE(*args, **kwargs)

      Bases: sqlalchemy.types.Date

      The SQL DATE type.

      class sqlalchemy.types.DATETIME(timezone=False)

      Bases: sqlalchemy.types.DateTime

      The SQL DATETIME type.

      class sqlalchemy.types.DECIMAL(precision=None, scale=None, asdecimal=True)

      Bases: sqlalchemy.types.Numeric

      The SQL DECIMAL type.

      class sqlalchemy.types.FLOAT(precision=None, asdecimal=False, **kwargs)

      Bases: sqlalchemy.types.Float

      The SQL FLOAT type.

      sqlalchemy.types.INT

      alias of INTEGER

      class sqlalchemy.types.INTEGER(*args, **kwargs)

      Bases: sqlalchemy.types.Integer

      The SQL INT or INTEGER type.

      class sqlalchemy.types.NCHAR(length=None, **kwargs)

      Bases: sqlalchemy.types.Unicode

      The SQL NCHAR type.

      class sqlalchemy.types.NVARCHAR(length=None, **kwargs)

      Bases: sqlalchemy.types.Unicode

      The SQL NVARCHAR type.

      class sqlalchemy.types.NUMERIC(precision=None, scale=None, asdecimal=True)

      Bases: sqlalchemy.types.Numeric

      The SQL NUMERIC type.

      class sqlalchemy.types.REAL(precision=None, asdecimal=False, **kwargs)

      Bases: sqlalchemy.types.Float

      The SQL REAL type.

      class sqlalchemy.types.SMALLINT(*args, **kwargs)

      Bases: sqlalchemy.types.SmallInteger

      The SQL SMALLINT type.

      class sqlalchemy.types.TEXT(length=None, collation=None, convert_unicode=False, unicode_error=None, _warn_on_bytestring=False)

      Bases: sqlalchemy.types.Text

      The SQL TEXT type.

      class sqlalchemy.types.TIME(timezone=False)

      Bases: sqlalchemy.types.Time

      The SQL TIME type.

      class sqlalchemy.types.TIMESTAMP(timezone=False)

      Bases: sqlalchemy.types.DateTime

      The SQL TIMESTAMP type.

      class sqlalchemy.types.VARBINARY(length=None)

      Bases: sqlalchemy.types._Binary

      The SQL VARBINARY type.

      class sqlalchemy.types.VARCHAR(length=None, collation=None, convert_unicode=False, unicode_error=None, _warn_on_bytestring=False)

      Bases: sqlalchemy.types.String

      The SQL VARCHAR type.

      Vendor-Specific Types

      Database-specific types are also available for import from each database’s dialect module. See the Dialects reference for the database you’re interested in.

      For example, MySQL has a BIGINT type and PostgreSQL has an INET type. To use these, import them from the module explicitly:

      from sqlalchemy.dialects import mysql
      
      table = Table('foo', metadata,
          Column('id', mysql.BIGINT),
          Column('enumerates', mysql.ENUM('a', 'b', 'c'))
      )

      Or some PostgreSQL types:

      from sqlalchemy.dialects import postgresql
      
      table = Table('foo', metadata,
          Column('ipaddress', postgresql.INET),
          Column('elements', postgresql.ARRAY(String))
      )

      Each dialect provides the full set of typenames supported by that backend within its __all__ collection, so that a simple import * or similar will import all supported types as implemented for that backend:

      from sqlalchemy.dialects.postgresql import *
      
      t = Table('mytable', metadata,
                 Column('id', INTEGER, primary_key=True),
                 Column('name', VARCHAR(300)),
                 Column('inetaddr', INET)
      )

      Where above, the INTEGER and VARCHAR types are ultimately from sqlalchemy.types, and INET is specific to the Postgresql dialect.

      Some dialect level types have the same name as the SQL standard type, but also provide additional arguments. For example, MySQL implements the full range of character and string types including additional arguments such as collation and charset:

      from sqlalchemy.dialects.mysql import VARCHAR, TEXT
      
      table = Table('foo', meta,
          Column('col1', VARCHAR(200, collation='binary')),
          Column('col2', TEXT(charset='latin1'))
      )

      Custom Types

      A variety of methods exist to redefine the behavior of existing types as well as to provide new ones.

      Overriding Type Compilation

      A frequent need is to force the “string” version of a type, that is the one rendered in a CREATE TABLE statement or other SQL function like CAST, to be changed. For example, an application may want to force the rendering of BINARY for all platforms except for one, in which is wants BLOB to be rendered. Usage of an existing generic type, in this case LargeBinary, is preferred for most use cases. But to control types more accurately, a compilation directive that is per-dialect can be associated with any type:

      from sqlalchemy.ext.compiler import compiles
      from sqlalchemy.types import BINARY
      
      @compiles(BINARY, "sqlite")
      def compile_binary_sqlite(type_, compiler, **kw):
          return "BLOB"

      The above code allows the usage of types.BINARY, which will produce the string BINARY against all backends except SQLite, in which case it will produce BLOB.

      See the section Changing Compilation of Types, a subsection of Custom SQL Constructs and Compilation Extension, for additional examples.

      Augmenting Existing Types

      The TypeDecorator allows the creation of custom types which add bind-parameter and result-processing behavior to an existing type object. It is used when additional in-Python marshaling of data to and from the database is required.

      Note

      The bind- and result-processing of TypeDecorator is in addition to the processing already performed by the hosted type, which is customized by SQLAlchemy on a per-DBAPI basis to perform processing specific to that DBAPI. To change the DBAPI-level processing for an existing type, see the section Replacing the Bind/Result Processing of Existing Types.

      class sqlalchemy.types.TypeDecorator(*args, **kwargs)

      Bases: sqlalchemy.types.TypeEngine

      Allows the creation of types which add additional functionality to an existing type.

      This method is preferred to direct subclassing of SQLAlchemy’s built-in types as it ensures that all required functionality of the underlying type is kept in place.

      Typical usage:

      import sqlalchemy.types as types
      
      class MyType(types.TypeDecorator):
          '''Prefixes Unicode values with "PREFIX:" on the way in and
          strips it off on the way out.
          '''
      
          impl = types.Unicode
      
          def process_bind_param(self, value, dialect):
              return "PREFIX:" + value
      
          def process_result_value(self, value, dialect):
              return value[7:]
      
          def copy(self):
              return MyType(self.impl.length)

      The class-level “impl” attribute is required, and can reference any TypeEngine class. Alternatively, the load_dialect_impl() method can be used to provide different type classes based on the dialect given; in this case, the “impl” variable can reference TypeEngine as a placeholder.

      Types that receive a Python type that isn’t similar to the ultimate type used may want to define the TypeDecorator.coerce_compared_value() method. This is used to give the expression system a hint when coercing Python objects into bind parameters within expressions. Consider this expression:

      mytable.c.somecol + datetime.date(2009, 5, 15)

      Above, if “somecol” is an Integer variant, it makes sense that we’re doing date arithmetic, where above is usually interpreted by databases as adding a number of days to the given date. The expression system does the right thing by not attempting to coerce the “date()” value into an integer-oriented bind parameter.

      However, in the case of TypeDecorator, we are usually changing an incoming Python type to something new - TypeDecorator by default will “coerce” the non-typed side to be the same type as itself. Such as below, we define an “epoch” type that stores a date value as an integer:

      class MyEpochType(types.TypeDecorator):
          impl = types.Integer
      
          epoch = datetime.date(1970, 1, 1)
      
          def process_bind_param(self, value, dialect):
              return (value - self.epoch).days
      
          def process_result_value(self, value, dialect):
              return self.epoch + timedelta(days=value)

      Our expression of somecol + date with the above type will coerce the “date” on the right side to also be treated as MyEpochType.

      This behavior can be overridden via the coerce_compared_value() method, which returns a type that should be used for the value of the expression. Below we set it such that an integer value will be treated as an Integer, and any other value is assumed to be a date and will be treated as a MyEpochType:

      def coerce_compared_value(self, op, value):
          if isinstance(value, int):
              return Integer()
          else:
              return self
      __init__(*args, **kwargs)

      Construct a TypeDecorator.

      Arguments sent here are passed to the constructor of the class assigned to the impl class level attribute, assuming the impl is a callable, and the resulting object is assigned to the self.impl instance attribute (thus overriding the class attribute of the same name).

      If the class level impl is not a callable (the unusual case), it will be assigned to the same instance attribute ‘as-is’, ignoring those arguments passed to the constructor.

      Subclasses can override this to customize the generation of self.impl entirely.

      adapt(cls, **kw)
      inherited from the adapt() method of TypeEngine

      Produce an “adapted” form of this type, given an “impl” class to work with.

      This method is used internally to associate generic types with “implementation” types that are specific to a particular dialect.

      bind_expression(bindvalue)
      inherited from the bind_expression() method of TypeEngine

      “Given a bind value (i.e. a BindParameter instance), return a SQL expression in its place.

      This is typically a SQL function that wraps the existing bound parameter within the statement. It is used for special data types that require literals being wrapped in some special database function in order to coerce an application-level value into a database-specific format. It is the SQL analogue of the TypeEngine.bind_processor() method.

      The method is evaluated at statement compile time, as opposed to statement construction time.

      Note that this method, when implemented, should always return the exact same structure, without any conditional logic, as it may be used in an executemany() call against an arbitrary number of bound parameter sets.

      See also:

      Applying SQL-level Bind/Result Processing

      bind_processor(dialect)

      Provide a bound value processing function for the given Dialect.

      This is the method that fulfills the TypeEngine contract for bound value conversion. TypeDecorator will wrap a user-defined implementation of process_bind_param() here.

      User-defined code can override this method directly, though its likely best to use process_bind_param() so that the processing provided by self.impl is maintained.

      Parameters:dialect – Dialect instance in use.

      This method is the reverse counterpart to the result_processor() method of this class.

      coerce_compared_value(op, value)

      Suggest a type for a ‘coerced’ Python value in an expression.

      By default, returns self. This method is called by the expression system when an object using this type is on the left or right side of an expression against a plain Python object which does not yet have a SQLAlchemy type assigned:

      expr = table.c.somecolumn + 35

      Where above, if somecolumn uses this type, this method will be called with the value operator.add and 35. The return value is whatever SQLAlchemy type should be used for 35 for this particular operation.

      coerce_to_is_types = (<type 'NoneType'>,)

      Specify those Python types which should be coerced at the expression level to “IS <constant>” when compared using == (and same for

      IS NOT in conjunction with !=.

      For most SQLAlchemy types, this includes NoneType, as well as bool.

      TypeDecorator modifies this list to only include NoneType, as typedecorator implementations that deal with boolean types are common.

      Custom TypeDecorator classes can override this attribute to return an empty tuple, in which case no values will be coerced to constants.

      ..versionadded:: 0.8.2
      Added TypeDecorator.coerce_to_is_types to allow for easier control of __eq__() __ne__() operations.
      column_expression(colexpr)
      inherited from the column_expression() method of TypeEngine

      Given a SELECT column expression, return a wrapping SQL expression.

      This is typically a SQL function that wraps a column expression as rendered in the columns clause of a SELECT statement. It is used for special data types that require columns to be wrapped in some special database function in order to coerce the value before being sent back to the application. It is the SQL analogue of the TypeEngine.result_processor() method.

      The method is evaluated at statement compile time, as opposed to statement construction time.

      See also:

      Applying SQL-level Bind/Result Processing

      compare_values(x, y)

      Given two values, compare them for equality.

      By default this calls upon TypeEngine.compare_values() of the underlying “impl”, which in turn usually uses the Python equals operator ==.

      This function is used by the ORM to compare an original-loaded value with an intercepted “changed” value, to determine if a net change has occurred.

      compile(dialect=None)
      inherited from the compile() method of TypeEngine

      Produce a string-compiled form of this TypeEngine.

      When called with no arguments, uses a “default” dialect to produce a string result.

      Parameters:dialect – a Dialect instance.
      copy()

      Produce a copy of this TypeDecorator instance.

      This is a shallow copy and is provided to fulfill part of the TypeEngine contract. It usually does not need to be overridden unless the user-defined TypeDecorator has local state that should be deep-copied.

      dialect_impl(dialect)
      inherited from the dialect_impl() method of TypeEngine

      Return a dialect-specific implementation for this TypeEngine.

      get_dbapi_type(dbapi)

      Return the DBAPI type object represented by this TypeDecorator.

      By default this calls upon TypeEngine.get_dbapi_type() of the underlying “impl”.

      load_dialect_impl(dialect)

      Return a TypeEngine object corresponding to a dialect.

      This is an end-user override hook that can be used to provide differing types depending on the given dialect. It is used by the TypeDecorator implementation of type_engine() to help determine what type should ultimately be returned for a given TypeDecorator.

      By default returns self.impl.

      process_bind_param(value, dialect)

      Receive a bound parameter value to be converted.

      Subclasses override this method to return the value that should be passed along to the underlying TypeEngine object, and from there to the DBAPI execute() method.

      The operation could be anything desired to perform custom behavior, such as transforming or serializing data. This could also be used as a hook for validating logic.

      This operation should be designed with the reverse operation in mind, which would be the process_result_value method of this class.

      Parameters:
      • value – Data to operate upon, of any type expected by this method in the subclass. Can be None.
      • dialect – the Dialect in use.
      process_result_value(value, dialect)

      Receive a result-row column value to be converted.

      Subclasses should implement this method to operate on data fetched from the database.

      Subclasses override this method to return the value that should be passed back to the application, given a value that is already processed by the underlying TypeEngine object, originally from the DBAPI cursor method fetchone() or similar.

      The operation could be anything desired to perform custom behavior, such as transforming or serializing data. This could also be used as a hook for validating logic.

      Parameters:
      • value – Data to operate upon, of any type expected by this method in the subclass. Can be None.
      • dialect – the Dialect in use.

      This operation should be designed to be reversible by the “process_bind_param” method of this class.

      python_type
      inherited from the python_type attribute of TypeEngine

      Return the Python type object expected to be returned by instances of this type, if known.

      Basically, for those types which enforce a return type, or are known across the board to do such for all common DBAPIs (like int for example), will return that type.

      If a return type is not defined, raises NotImplementedError.

      Note that any type also accommodates NULL in SQL which means you can also get back None from any type in practice.

      result_processor(dialect, coltype)

      Provide a result value processing function for the given Dialect.

      This is the method that fulfills the TypeEngine contract for result value conversion. TypeDecorator will wrap a user-defined implementation of process_result_value() here.

      User-defined code can override this method directly, though its likely best to use process_result_value() so that the processing provided by self.impl is maintained.

      Parameters:
      • dialect – Dialect instance in use.
      • coltype – An SQLAlchemy data type

      This method is the reverse counterpart to the bind_processor() method of this class.

      type_engine(dialect)

      Return a dialect-specific TypeEngine instance for this TypeDecorator.

      In most cases this returns a dialect-adapted form of the TypeEngine type represented by self.impl. Makes usage of dialect_impl() but also traverses into wrapped TypeDecorator instances. Behavior can be customized here by overriding load_dialect_impl().

      with_variant(type_, dialect_name)
      inherited from the with_variant() method of TypeEngine

      Produce a new type object that will utilize the given type when applied to the dialect of the given name.

      e.g.:

      from sqlalchemy.types import String
      from sqlalchemy.dialects import mysql
      
      s = String()
      
      s = s.with_variant(mysql.VARCHAR(collation='foo'), 'mysql')

      The construction of TypeEngine.with_variant() is always from the “fallback” type to that which is dialect specific. The returned type is an instance of Variant, which itself provides a with_variant() that can be called repeatedly.

      Parameters:
      • type – a TypeEngine that will be selected as a variant from the originating type, when a dialect of the given name is in use.
      • dialect_name – base name of the dialect which uses this type. (i.e. 'postgresql', 'mysql', etc.)

      New in version 0.7.2.

      TypeDecorator Recipes

      A few key TypeDecorator recipes follow.

      Coercing Encoded Strings to Unicode

      A common source of confusion regarding the Unicode type is that it is intended to deal only with Python unicode objects on the Python side, meaning values passed to it as bind parameters must be of the form u'some string' if using Python 2 and not 3. The encoding/decoding functions it performs are only to suit what the DBAPI in use requires, and are primarily a private implementation detail.

      The use case of a type that can safely receive Python bytestrings, that is strings that contain non-ASCII characters and are not u'' objects in Python 2, can be achieved using a TypeDecorator which coerces as needed:

      from sqlalchemy.types import TypeDecorator, Unicode
      
      class CoerceUTF8(TypeDecorator):
          """Safely coerce Python bytestrings to Unicode
          before passing off to the database."""
      
          impl = Unicode
      
          def process_bind_param(self, value, dialect):
              if isinstance(value, str):
                  value = value.decode('utf-8')
              return value

      Rounding Numerics

      Some database connectors like those of SQL Server choke if a Decimal is passed with too many decimal places. Here’s a recipe that rounds them down:

      from sqlalchemy.types import TypeDecorator, Numeric
      from decimal import Decimal
      
      class SafeNumeric(TypeDecorator):
          """Adds quantization to Numeric."""
      
          impl = Numeric
      
          def __init__(self, *arg, **kw):
              TypeDecorator.__init__(self, *arg, **kw)
              self.quantize_int = -(self.impl.precision - self.impl.scale)
              self.quantize = Decimal(10) ** self.quantize_int
      
          def process_bind_param(self, value, dialect):
              if isinstance(value, Decimal) and \
                  value.as_tuple()[2] < self.quantize_int:
                  value = value.quantize(self.quantize)
              return value

      Backend-agnostic GUID Type

      Receives and returns Python uuid() objects. Uses the PG UUID type when using Postgresql, CHAR(32) on other backends, storing them in stringified hex format. Can be modified to store binary in CHAR(16) if desired:

      from sqlalchemy.types import TypeDecorator, CHAR
      from sqlalchemy.dialects.postgresql import UUID
      import uuid
      
      class GUID(TypeDecorator):
          """Platform-independent GUID type.
      
          Uses Postgresql's UUID type, otherwise uses
          CHAR(32), storing as stringified hex values.
      
          """
          impl = CHAR
      
          def load_dialect_impl(self, dialect):
              if dialect.name == 'postgresql':
                  return dialect.type_descriptor(UUID())
              else:
                  return dialect.type_descriptor(CHAR(32))
      
          def process_bind_param(self, value, dialect):
              if value is None:
                  return value
              elif dialect.name == 'postgresql':
                  return str(value)
              else:
                  if not isinstance(value, uuid.UUID):
                      return "%.32x" % uuid.UUID(value)
                  else:
                      # hexstring
                      return "%.32x" % value
      
          def process_result_value(self, value, dialect):
              if value is None:
                  return value
              else:
                  return uuid.UUID(value)

      Marshal JSON Strings

      This type uses simplejson to marshal Python data structures to/from JSON. Can be modified to use Python’s builtin json encoder:

      from sqlalchemy.types import TypeDecorator, VARCHAR
      import json
      
      class JSONEncodedDict(TypeDecorator):
          """Represents an immutable structure as a json-encoded string.
      
          Usage::
      
              JSONEncodedDict(255)
      
          """
      
          impl = VARCHAR
      
          def process_bind_param(self, value, dialect):
              if value is not None:
                  value = json.dumps(value)
      
              return value
      
          def process_result_value(self, value, dialect):
              if value is not None:
                  value = json.loads(value)
              return value

      Note that the ORM by default will not detect “mutability” on such a type - meaning, in-place changes to values will not be detected and will not be flushed. Without further steps, you instead would need to replace the existing value with a new one on each parent object to detect changes. Note that there’s nothing wrong with this, as many applications may not require that the values are ever mutated once created. For those which do have this requirment, support for mutability is best applied using the sqlalchemy.ext.mutable extension - see the example in Mutation Tracking.

      Replacing the Bind/Result Processing of Existing Types

      Most augmentation of type behavior at the bind/result level is achieved using TypeDecorator. For the rare scenario where the specific processing applied by SQLAlchemy at the DBAPI level needs to be replaced, the SQLAlchemy type can be subclassed directly, and the bind_processor() or result_processor() methods can be overridden. Doing so requires that the adapt() method also be overridden. This method is the mechanism by which SQLAlchemy produces DBAPI-specific type behavior during statement execution. Overriding it allows a copy of the custom type to be used in lieu of a DBAPI-specific type. Below we subclass the types.TIME type to have custom result processing behavior. The process() function will receive value from the DBAPI cursor directly:

      class MySpecialTime(TIME):
          def __init__(self, special_argument):
              super(MySpecialTime, self).__init__()
              self.special_argument = special_argument
      
          def result_processor(self, dialect, coltype):
              import datetime
              time = datetime.time
              def process(value):
                  if value is not None:
                      microseconds = value.microseconds
                      seconds = value.seconds
                      minutes = seconds / 60
                      return time(
                                minutes / 60,
                                minutes % 60,
                                seconds - minutes * 60,
                                microseconds)
                  else:
                      return None
              return process
      
          def adapt(self, impltype):
              return MySpecialTime(self.special_argument)

      Applying SQL-level Bind/Result Processing

      As seen in the sections Augmenting Existing Types and Replacing the Bind/Result Processing of Existing Types, SQLAlchemy allows Python functions to be invoked both when parameters are sent to a statement, as well as when result rows are loaded from the database, to apply transformations to the values as they are sent to or from the database. It is also possible to define SQL-level transformations as well. The rationale here is when only the relational database contains a particular series of functions that are necessary to coerce incoming and outgoing data between an application and persistence format. Examples include using database-defined encryption/decryption functions, as well as stored procedures that handle geographic data. The Postgis extension to Postgresql includes an extensive array of SQL functions that are necessary for coercing data into particular formats.

      Any TypeEngine, UserDefinedType or TypeDecorator subclass can include implementations of TypeEngine.bind_expression() and/or TypeEngine.column_expression(), which when defined to return a non-None value should return a ColumnElement expression to be injected into the SQL statement, either surrounding bound parameters or a column expression. For example, to build a Geometry type which will apply the Postgis function ST_GeomFromText to all outgoing values and the function ST_AsText to all incoming data, we can create our own subclass of UserDefinedType which provides these methods in conjunction with func:

      from sqlalchemy import func
      from sqlalchemy.types import UserDefinedType
      
      class Geometry(UserDefinedType):
          def get_col_spec(self):
              return "GEOMETRY"
      
          def bind_expression(self, bindvalue):
              return func.ST_GeomFromText(bindvalue, type_=self)
      
          def column_expression(self, col):
              return func.ST_AsText(col, type_=self)

      We can apply the Geometry type into Table metadata and use it in a select() construct:

      geometry = Table('geometry', metadata,
                    Column('geom_id', Integer, primary_key=True),
                    Column('geom_data', Geometry)
                  )
      
      print select([geometry]).where(
        geometry.c.geom_data == 'LINESTRING(189412 252431,189631 259122)')

      The resulting SQL embeds both functions as appropriate. ST_AsText is applied to the columns clause so that the return value is run through the function before passing into a result set, and ST_GeomFromText is run on the bound parameter so that the passed-in value is converted:

      SELECT geometry.geom_id, ST_AsText(geometry.geom_data) AS geom_data_1
      FROM geometry
      WHERE geometry.geom_data = ST_GeomFromText(:geom_data_2)

      The TypeEngine.column_expression() method interacts with the mechanics of the compiler such that the SQL expression does not interfere with the labeling of the wrapped expression. Such as, if we rendered a select() against a label() of our expression, the string label is moved to the outside of the wrapped expression:

      print select([geometry.c.geom_data.label('my_data')])

      Output:

      SELECT ST_AsText(geometry.geom_data) AS my_data
      FROM geometry

      For an example of subclassing a built in type directly, we subclass postgresql.BYTEA to provide a PGPString, which will make use of the Postgresql pgcrypto extension to encrpyt/decrypt values transparently:

      from sqlalchemy import create_engine, String, select, func, \
              MetaData, Table, Column, type_coerce
      
      from sqlalchemy.dialects.postgresql import BYTEA
      
      class PGPString(BYTEA):
          def __init__(self, passphrase, length=None):
              super(PGPString, self).__init__(length)
              self.passphrase = passphrase
      
          def bind_expression(self, bindvalue):
              # convert the bind's type from PGPString to
              # String, so that it's passed to psycopg2 as is without
              # a dbapi.Binary wrapper
              bindvalue = type_coerce(bindvalue, String)
              return func.pgp_sym_encrypt(bindvalue, self.passphrase)
      
          def column_expression(self, col):
              return func.pgp_sym_decrypt(col, self.passphrase)
      
      metadata = MetaData()
      message = Table('message', metadata,
                      Column('username', String(50)),
                      Column('message',
                          PGPString("this is my passphrase", length=1000)),
                  )
      
      engine = create_engine("postgresql://scott:tiger@localhost/test", echo=True)
      with engine.begin() as conn:
          metadata.create_all(conn)
      
          conn.execute(message.insert(), username="some user",
                                      message="this is my message")
      
          print conn.scalar(
                  select([message.c.message]).\
                      where(message.c.username == "some user")
              )

      The pgp_sym_encrypt and pgp_sym_decrypt functions are applied to the INSERT and SELECT statements:

      INSERT INTO message (username, message)
        VALUES (%(username)s, pgp_sym_encrypt(%(message)s, %(pgp_sym_encrypt_1)s))
        {'username': 'some user', 'message': 'this is my message',
          'pgp_sym_encrypt_1': 'this is my passphrase'}
      
      SELECT pgp_sym_decrypt(message.message, %(pgp_sym_decrypt_1)s) AS message_1
        FROM message
        WHERE message.username = %(username_1)s
        {'pgp_sym_decrypt_1': 'this is my passphrase', 'username_1': 'some user'}

      New in version 0.8: Added the TypeEngine.bind_expression() and TypeEngine.column_expression() methods.

      See also:

      PostGIS Integration

      Redefining and Creating New Operators

      SQLAlchemy Core defines a fixed set of expression operators available to all column expressions. Some of these operations have the effect of overloading Python’s built in operators; examples of such operators include ColumnOperators.__eq__() (table.c.somecolumn == 'foo'), ColumnOperators.__invert__() (~table.c.flag), and ColumnOperators.__add__() (table.c.x + table.c.y). Other operators are exposed as explicit methods on column expressions, such as ColumnOperators.in_() (table.c.value.in_(['x', 'y'])) and ColumnOperators.like() (table.c.value.like('%ed%')).

      The Core expression constructs in all cases consult the type of the expression in order to determine the behavior of existing operators, as well as to locate additional operators that aren’t part of the built in set. The TypeEngine base class defines a root “comparison” implementation TypeEngine.Comparator, and many specific types provide their own sub-implementations of this class. User-defined TypeEngine.Comparator implementations can be built directly into a simple subclass of a particular type in order to override or define new operations. Below, we create a Integer subclass which overrides the ColumnOperators.__add__() operator:

      from sqlalchemy import Integer
      
      class MyInt(Integer):
          class comparator_factory(Integer.Comparator):
              def __add__(self, other):
                  return self.op("goofy")(other)

      The above configuration creates a new class MyInt, which establishes the TypeEngine.comparator_factory attribute as referring to a new class, subclassing the TypeEngine.Comparator class associated with the Integer type.

      Usage:

      >>> sometable = Table("sometable", metadata, Column("data", MyInt))
      >>> print sometable.c.data + 5
      sometable.data goofy :data_1

      The implementation for ColumnOperators.__add__() is consulted by an owning SQL expression, by instantiating the TypeEngine.Comparator with itself as the expr attribute. The mechanics of the expression system are such that operations continue recursively until an expression object produces a new SQL expression construct. Above, we could just as well have said self.expr.op("goofy")(other) instead of self.op("goofy")(other).

      New methods added to a TypeEngine.Comparator are exposed on an owning SQL expression using a __getattr__ scheme, which exposes methods added to TypeEngine.Comparator onto the owning ColumnElement. For example, to add a log() function to integers:

      from sqlalchemy import Integer, func
      
      class MyInt(Integer):
          class comparator_factory(Integer.Comparator):
              def log(self, other):
                  return func.log(self.expr, other)

      Using the above type:

      >>> print sometable.c.data.log(5)
      log(:log_1, :log_2)

      Unary operations are also possible. For example, to add an implementation of the Postgresql factorial operator, we combine the UnaryExpression construct along with a custom_op to produce the factorial expression:

      from sqlalchemy import Integer
      from sqlalchemy.sql.expression import UnaryExpression
      from sqlalchemy.sql import operators
      
      class MyInteger(Integer):
          class comparator_factory(Integer.Comparator):
              def factorial(self):
                  return UnaryExpression(self.expr,
                              modifier=operators.custom_op("!"),
                              type_=MyInteger)

      Using the above type:

      >>> from sqlalchemy.sql import column
      >>> print column('x', MyInteger).factorial()
      x !

      See also:

      TypeEngine.comparator_factory

      New in version 0.8: The expression system was enhanced to support customization of operators on a per-type level.

      Creating New Types

      The UserDefinedType class is provided as a simple base class for defining entirely new database types. Use this to represent native database types not known by SQLAlchemy. If only Python translation behavior is needed, use TypeDecorator instead.

      class sqlalchemy.types.UserDefinedType(*args, **kwargs)

      Bases: sqlalchemy.types.TypeEngine

      Base for user defined types.

      This should be the base of new types. Note that for most cases, TypeDecorator is probably more appropriate:

      import sqlalchemy.types as types
      
      class MyType(types.UserDefinedType):
          def __init__(self, precision = 8):
              self.precision = precision
      
          def get_col_spec(self):
              return "MYTYPE(%s)" % self.precision
      
          def bind_processor(self, dialect):
              def process(value):
                  return value
              return process
      
          def result_processor(self, dialect, coltype):
              def process(value):
                  return value
              return process

      Once the type is made, it’s immediately usable:

      table = Table('foo', meta,
          Column('id', Integer, primary_key=True),
          Column('data', MyType(16))
          )
      coerce_compared_value(op, value)

      Suggest a type for a ‘coerced’ Python value in an expression.

      Default behavior for UserDefinedType is the same as that of TypeDecorator; by default it returns self, assuming the compared value should be coerced into the same type as this one. See TypeDecorator.coerce_compared_value() for more detail.

      Changed in version 0.8: UserDefinedType.coerce_compared_value() now returns self by default, rather than falling onto the more fundamental behavior of TypeEngine.coerce_compared_value().

      Base Type API

      class sqlalchemy.types.AbstractType

      Bases: sqlalchemy.sql.visitors.Visitable

      Base for all types - not needed except for backwards compatibility.

      class sqlalchemy.types.TypeEngine(*args, **kwargs)

      Bases: sqlalchemy.types.AbstractType

      Base for built-in types.

      class Comparator(expr)

      Bases: sqlalchemy.sql.expression._DefaultColumnComparator

      Base class for custom comparison operations defined at the type level. See TypeEngine.comparator_factory.

      The public base class for TypeEngine.Comparator is ColumnOperators.

      TypeEngine.__init__(*args, **kwargs)

      Support implementations that were passing arguments

      TypeEngine.adapt(cls, **kw)

      Produce an “adapted” form of this type, given an “impl” class to work with.

      This method is used internally to associate generic types with “implementation” types that are specific to a particular dialect.

      TypeEngine.bind_expression(bindvalue)

      “Given a bind value (i.e. a BindParameter instance), return a SQL expression in its place.

      This is typically a SQL function that wraps the existing bound parameter within the statement. It is used for special data types that require literals being wrapped in some special database function in order to coerce an application-level value into a database-specific format. It is the SQL analogue of the TypeEngine.bind_processor() method.

      The method is evaluated at statement compile time, as opposed to statement construction time.

      Note that this method, when implemented, should always return the exact same structure, without any conditional logic, as it may be used in an executemany() call against an arbitrary number of bound parameter sets.

      See also:

      Applying SQL-level Bind/Result Processing

      TypeEngine.bind_processor(dialect)

      Return a conversion function for processing bind values.

      Returns a callable which will receive a bind parameter value as the sole positional argument and will return a value to send to the DB-API.

      If processing is not necessary, the method should return None.

      Parameters:dialect – Dialect instance in use.
      TypeEngine.coerce_compared_value(op, value)

      Suggest a type for a ‘coerced’ Python value in an expression.

      Given an operator and value, gives the type a chance to return a type which the value should be coerced into.

      The default behavior here is conservative; if the right-hand side is already coerced into a SQL type based on its Python type, it is usually left alone.

      End-user functionality extension here should generally be via TypeDecorator, which provides more liberal behavior in that it defaults to coercing the other side of the expression into this type, thus applying special Python conversions above and beyond those needed by the DBAPI to both ides. It also provides the public method TypeDecorator.coerce_compared_value() which is intended for end-user customization of this behavior.

      TypeEngine.column_expression(colexpr)

      Given a SELECT column expression, return a wrapping SQL expression.

      This is typically a SQL function that wraps a column expression as rendered in the columns clause of a SELECT statement. It is used for special data types that require columns to be wrapped in some special database function in order to coerce the value before being sent back to the application. It is the SQL analogue of the TypeEngine.result_processor() method.

      The method is evaluated at statement compile time, as opposed to statement construction time.

      See also:

      Applying SQL-level Bind/Result Processing

      TypeEngine.comparator_factory

      Bases: sqlalchemy.sql.expression._DefaultColumnComparator

      A TypeEngine.Comparator class which will apply to operations performed by owning ColumnElement objects.

      The comparator_factory attribute is a hook consulted by the core expression system when column and SQL expression operations are performed. When a TypeEngine.Comparator class is associated with this attribute, it allows custom re-definition of all existing operators, as well as definition of new operators. Existing operators include those provided by Python operator overloading such as operators.ColumnOperators.__add__() and operators.ColumnOperators.__eq__(), those provided as standard attributes of operators.ColumnOperators such as operators.ColumnOperators.like() and operators.ColumnOperators.in_().

      Rudimentary usage of this hook is allowed through simple subclassing of existing types, or alternatively by using TypeDecorator. See the documentation section Redefining and Creating New Operators for examples.

      New in version 0.8: The expression system was enhanced to support customization of operators on a per-type level.

      alias of Comparator

      TypeEngine.compare_values(x, y)

      Compare two values for equality.

      TypeEngine.compile(dialect=None)

      Produce a string-compiled form of this TypeEngine.

      When called with no arguments, uses a “default” dialect to produce a string result.

      Parameters:dialect – a Dialect instance.
      TypeEngine.dialect_impl(dialect)

      Return a dialect-specific implementation for this TypeEngine.

      TypeEngine.get_dbapi_type(dbapi)

      Return the corresponding type object from the underlying DB-API, if any.

      This can be useful for calling setinputsizes(), for example.
      TypeEngine.hashable = True

      Flag, if False, means values from this type aren’t hashable.

      Used by the ORM when uniquing result lists.

      TypeEngine.python_type

      Return the Python type object expected to be returned by instances of this type, if known.

      Basically, for those types which enforce a return type, or are known across the board to do such for all common DBAPIs (like int for example), will return that type.

      If a return type is not defined, raises NotImplementedError.

      Note that any type also accommodates NULL in SQL which means you can also get back None from any type in practice.

      TypeEngine.result_processor(dialect, coltype)

      Return a conversion function for processing result row values.

      Returns a callable which will receive a result row column value as the sole positional argument and will return a value to return to the user.

      If processing is not necessary, the method should return None.

      Parameters:
      • dialect – Dialect instance in use.
      • coltype – DBAPI coltype argument received in cursor.description.
      TypeEngine.with_variant(type_, dialect_name)

      Produce a new type object that will utilize the given type when applied to the dialect of the given name.

      e.g.:

      from sqlalchemy.types import String
      from sqlalchemy.dialects import mysql
      
      s = String()
      
      s = s.with_variant(mysql.VARCHAR(collation='foo'), 'mysql')

      The construction of TypeEngine.with_variant() is always from the “fallback” type to that which is dialect specific. The returned type is an instance of Variant, which itself provides a with_variant() that can be called repeatedly.

      Parameters:
      • type – a TypeEngine that will be selected as a variant from the originating type, when a dialect of the given name is in use.
      • dialect_name – base name of the dialect which uses this type. (i.e. 'postgresql', 'mysql', etc.)

      New in version 0.7.2.

      class sqlalchemy.types.Concatenable

      A mixin that marks a type as supporting ‘concatenation’, typically strings.

      __init__
      inherited from the __init__ attribute of object

      x.__init__(...) initializes x; see help(type(x)) for signature

      class sqlalchemy.types.NullType(*args, **kwargs)

      Bases: sqlalchemy.types.TypeEngine

      An unknown type.

      NullTypes will stand in if Table reflection encounters a column data type unknown to SQLAlchemy. The resulting columns are nearly fully usable: the DB-API adapter will handle all translation to and from the database data type.

      NullType does not have sufficient information to particpate in a CREATE TABLE statement and will raise an exception if encountered during a create() operation.

      class sqlalchemy.types.Variant(base, mapping)

      Bases: sqlalchemy.types.TypeDecorator

      A wrapping type that selects among a variety of implementations based on dialect in use.

      The Variant type is typically constructed using the TypeEngine.with_variant() method.

      New in version 0.7.2.

      Members :with_variant, __init__
      SQLAlchemy-0.8.4/doc/dialects/0000755000076500000240000000000012251151573016570 5ustar classicstaff00000000000000SQLAlchemy-0.8.4/doc/dialects/drizzle.html0000644000076500000240000007047212251147474021160 0ustar classicstaff00000000000000 Drizzle — SQLAlchemy 0.8 Documentation

      SQLAlchemy 0.8 Documentation

      Release: 0.8.4 | Release Date: December 8, 2013

      Drizzle

      Support for the Drizzle database.

      DBAPI Support

      The following dialect/DBAPI options are available. Please refer to individual DBAPI sections for connect information.

      Drizzle is a variant of MySQL. Unlike MySQL, Drizzle’s default storage engine is InnoDB (transactions, foreign-keys) rather than MyISAM. For more Notable Differences, visit the Drizzle Documentation.

      The SQLAlchemy Drizzle dialect leans heavily on the MySQL dialect, so much of the SQLAlchemy MySQL documentation is also relevant.

      Drizzle Data Types

      As with all SQLAlchemy dialects, all UPPERCASE types that are known to be valid with Drizzle are importable from the top level dialect:

      from sqlalchemy.dialects.drizzle import \
              BIGINT, BINARY, BLOB, BOOLEAN, CHAR, DATE, DATETIME,
              DECIMAL, DOUBLE, ENUM, FLOAT, INT, INTEGER,
              NUMERIC, TEXT, TIME, TIMESTAMP, VARBINARY, VARCHAR

      Types which are specific to Drizzle, or have Drizzle-specific construction arguments, are as follows:

      class sqlalchemy.dialects.drizzle.BIGINT(**kw)

      Bases: sqlalchemy.types.BIGINT

      Drizzle BIGINTEGER type.

      __init__(**kw)

      Construct a BIGINTEGER.

      class sqlalchemy.dialects.drizzle.CHAR(length=None, **kwargs)

      Bases: sqlalchemy.dialects.drizzle.base._StringType, sqlalchemy.types.CHAR

      Drizzle CHAR type, for fixed-length character data.

      __init__(length=None, **kwargs)

      Construct a CHAR.

      Parameters:
      • length – Maximum data length, in characters.
      • binary – Optional, use the default binary collation for the national character set. This does not affect the type of data stored, use a BINARY type for binary data.
      • collation – Optional, request a particular collation. Must be compatible with the national character set.
      class sqlalchemy.dialects.drizzle.DECIMAL(precision=None, scale=None, asdecimal=True, **kw)

      Bases: sqlalchemy.dialects.drizzle.base._NumericType, sqlalchemy.types.DECIMAL

      Drizzle DECIMAL type.

      __init__(precision=None, scale=None, asdecimal=True, **kw)

      Construct a DECIMAL.

      Parameters:
      • precision – Total digits in this number. If scale and precision are both None, values are stored to limits allowed by the server.
      • scale – The number of digits after the decimal point.
      class sqlalchemy.dialects.drizzle.DOUBLE(precision=None, scale=None, asdecimal=True, **kw)

      Bases: sqlalchemy.dialects.drizzle.base._FloatType

      Drizzle DOUBLE type.

      __init__(precision=None, scale=None, asdecimal=True, **kw)

      Construct a DOUBLE.

      Parameters:
      • precision – Total digits in this number. If scale and precision are both None, values are stored to limits allowed by the server.
      • scale – The number of digits after the decimal point.
      class sqlalchemy.dialects.drizzle.ENUM(*enums, **kw)

      Bases: sqlalchemy.dialects.mysql.base.ENUM

      Drizzle ENUM type.

      __init__(*enums, **kw)

      Construct an ENUM.

      Example:

      Column(‘myenum’, ENUM(“foo”, “bar”, “baz”))
      Parameters:
      • enums – The range of valid values for this ENUM. Values will be quoted when generating the schema according to the quoting flag (see below).
      • strict – Defaults to False: ensure that a given value is in this ENUM’s range of permissible values when inserting or updating rows. Note that Drizzle will not raise a fatal error if you attempt to store an out of range value- an alternate value will be stored instead. (See Drizzle ENUM documentation.)
      • collation – Optional, a column-level collation for this string value. Takes precedence to ‘binary’ short-hand.
      • binary – Defaults to False: short-hand, pick the binary collation type that matches the column’s character set. Generates BINARY in schema. This does not affect the type of data stored, only the collation of character data.
      • quoting

        Defaults to ‘auto’: automatically determine enum value quoting. If all enum values are surrounded by the same quoting character, then use ‘quoted’ mode. Otherwise, use ‘unquoted’ mode.

        ‘quoted’: values in enums are already quoted, they will be used directly when generating the schema - this usage is deprecated.

        ‘unquoted’: values in enums are not quoted, they will be escaped and surrounded by single quotes when generating the schema.

        Previous versions of this type always required manually quoted values to be supplied; future versions will always quote the string literals for you. This is a transitional option.

      class sqlalchemy.dialects.drizzle.FLOAT(precision=None, scale=None, asdecimal=False, **kw)

      Bases: sqlalchemy.dialects.drizzle.base._FloatType, sqlalchemy.types.FLOAT

      Drizzle FLOAT type.

      __init__(precision=None, scale=None, asdecimal=False, **kw)

      Construct a FLOAT.

      Parameters:
      • precision – Total digits in this number. If scale and precision are both None, values are stored to limits allowed by the server.
      • scale – The number of digits after the decimal point.
      class sqlalchemy.dialects.drizzle.INTEGER(**kw)

      Bases: sqlalchemy.types.INTEGER

      Drizzle INTEGER type.

      __init__(**kw)

      Construct an INTEGER.

      class sqlalchemy.dialects.drizzle.NUMERIC(precision=None, scale=None, asdecimal=True, **kw)

      Bases: sqlalchemy.dialects.drizzle.base._NumericType, sqlalchemy.types.NUMERIC

      Drizzle NUMERIC type.

      __init__(precision=None, scale=None, asdecimal=True, **kw)

      Construct a NUMERIC.

      Parameters:
      • precision – Total digits in this number. If scale and precision are both None, values are stored to limits allowed by the server.
      • scale – The number of digits after the decimal point.
      class sqlalchemy.dialects.drizzle.REAL(precision=None, scale=None, asdecimal=True, **kw)

      Bases: sqlalchemy.dialects.drizzle.base._FloatType, sqlalchemy.types.REAL

      Drizzle REAL type.

      __init__(precision=None, scale=None, asdecimal=True, **kw)

      Construct a REAL.

      Parameters:
      • precision – Total digits in this number. If scale and precision are both None, values are stored to limits allowed by the server.
      • scale – The number of digits after the decimal point.
      class sqlalchemy.dialects.drizzle.TEXT(length=None, **kw)

      Bases: sqlalchemy.dialects.drizzle.base._StringType, sqlalchemy.types.TEXT

      Drizzle TEXT type, for text up to 2^16 characters.

      __init__(length=None, **kw)

      Construct a TEXT.

      Parameters:
      • length – Optional, if provided the server may optimize storage by substituting the smallest TEXT type sufficient to store length characters.
      • collation – Optional, a column-level collation for this string value. Takes precedence to ‘binary’ short-hand.
      • binary – Defaults to False: short-hand, pick the binary collation type that matches the column’s character set. Generates BINARY in schema. This does not affect the type of data stored, only the collation of character data.
      class sqlalchemy.dialects.drizzle.TIMESTAMP(timezone=False)

      Bases: sqlalchemy.types.TIMESTAMP

      Drizzle TIMESTAMP type.

      __init__(timezone=False)

      Construct a new DateTime.

      Parameters:timezone – boolean. If True, and supported by the

      backend, will produce ‘TIMESTAMP WITH TIMEZONE’. For backends that don’t support timezone aware timestamps, has no effect.

      class sqlalchemy.dialects.drizzle.VARCHAR(length=None, **kwargs)

      Bases: sqlalchemy.dialects.drizzle.base._StringType, sqlalchemy.types.VARCHAR

      Drizzle VARCHAR type, for variable-length character data.

      __init__(length=None, **kwargs)

      Construct a VARCHAR.

      Parameters:
      • collation – Optional, a column-level collation for this string value. Takes precedence to ‘binary’ short-hand.
      • binary – Defaults to False: short-hand, pick the binary collation type that matches the column’s character set. Generates BINARY in schema. This does not affect the type of data stored, only the collation of character data.

      MySQL-Python

      Support for the Drizzle database via the MySQL-Python driver.

      DBAPI

      Documentation and download information (if applicable) for MySQL-Python is available at: http://sourceforge.net/projects/mysql-python

      Connecting

      Connect String:

      drizzle+mysqldb://<user>:<password>@<host>[:<port>]/<dbname>

      SQLAlchemy-0.8.4/doc/dialects/firebird.html0000644000076500000240000004750312251147474021262 0ustar classicstaff00000000000000 Firebird — SQLAlchemy 0.8 Documentation

      SQLAlchemy 0.8 Documentation

      Release: 0.8.4 | Release Date: December 8, 2013

      Firebird

      Support for the Firebird database.

      DBAPI Support

      The following dialect/DBAPI options are available. Please refer to individual DBAPI sections for connect information.

      Firebird Dialects

      Firebird offers two distinct dialects (not to be confused with a SQLAlchemy Dialect):

      dialect 1
      This is the old syntax and behaviour, inherited from Interbase pre-6.0.
      dialect 3
      This is the newer and supported syntax, introduced in Interbase 6.0.

      The SQLAlchemy Firebird dialect detects these versions and adjusts its representation of SQL accordingly. However, support for dialect 1 is not well tested and probably has incompatibilities.

      Locking Behavior

      Firebird locks tables aggressively. For this reason, a DROP TABLE may hang until other transactions are released. SQLAlchemy does its best to release transactions as quickly as possible. The most common cause of hanging transactions is a non-fully consumed result set, i.e.:

      result = engine.execute("select * from table")
      row = result.fetchone()
      return

      Where above, the ResultProxy has not been fully consumed. The connection will be returned to the pool and the transactional state rolled back once the Python garbage collector reclaims the objects which hold onto the connection, which often occurs asynchronously. The above use case can be alleviated by calling first() on the ResultProxy which will fetch the first row and immediately close all remaining cursor/connection resources.

      RETURNING support

      Firebird 2.0 supports returning a result set from inserts, and 2.1 extends that to deletes and updates. This is generically exposed by the SQLAlchemy returning() method, such as:

      # INSERT..RETURNING
      result = table.insert().returning(table.c.col1, table.c.col2).\
                     values(name='foo')
      print result.fetchall()
      
      # UPDATE..RETURNING
      raises = empl.update().returning(empl.c.id, empl.c.salary).\
                    where(empl.c.sales>100).\
                    values(dict(salary=empl.c.salary * 1.1))
      print raises.fetchall()

      kinterbasdb

      Support for the Firebird database via the kinterbasdb driver.

      DBAPI

      Documentation and download information (if applicable) for kinterbasdb is available at: http://firebirdsql.org/index.php?op=devel&sub=python

      Connecting

      Connect String:

      firebird+kinterbasdb://user:password@host:port/path/to/db[?key=value&key=value...]

      Arguments

      The Kinterbasdb backend accepts the enable_rowcount and retaining arguments accepted by the sqlalchemy.dialects.firebird.fdb dialect. In addition, it also accepts the following:

      • type_conv - select the kind of mapping done on the types: by default SQLAlchemy uses 200 with Unicode, datetime and decimal support. See the linked documents below for further information.
      • concurrency_level - set the backend policy with regards to threading issues: by default SQLAlchemy uses policy 1. See the linked documents below for futher information.

      fdb

      Support for the Firebird database via the fdb driver.

      fdb is a kinterbasdb compatible DBAPI for Firebird.

      New in version 0.8: - Support for the fdb Firebird driver.

      DBAPI

      Documentation and download information (if applicable) for fdb is available at: http://pypi.python.org/pypi/fdb/

      Connecting

      Connect String:

      firebird+fdb://user:password@host:port/path/to/db[?key=value&key=value...]

      Status

      The fdb dialect is new and not yet tested (can’t get fdb to build).

      Arguments

      The fdb dialect is based on the sqlalchemy.dialects.firebird.kinterbasdb dialect, however does not accept every argument that Kinterbasdb does.

      • enable_rowcount - True by default, setting this to False disables the usage of “cursor.rowcount” with the Kinterbasdb dialect, which SQLAlchemy ordinarily calls upon automatically after any UPDATE or DELETE statement. When disabled, SQLAlchemy’s ResultProxy will return -1 for result.rowcount. The rationale here is that Kinterbasdb requires a second round trip to the database when .rowcount is called - since SQLA’s resultproxy automatically closes the cursor after a non-result-returning statement, rowcount must be called, if at all, before the result object is returned. Additionally, cursor.rowcount may not return correct results with older versions of Firebird, and setting this flag to False will also cause the SQLAlchemy ORM to ignore its usage. The behavior can also be controlled on a per-execution basis using the enable_rowcount option with Connection.execution_options():

        conn = engine.connect().execution_options(enable_rowcount=True)
        r = conn.execute(stmt)
        print r.rowcount
      • retaining - True by default. Leaving this on True will pass the retaining=True keyword argument to the .commit() and .rollback() methods of the DBAPI connection, which can improve performance in some situations, but apparently with significant caveats. Please read the fdb and/or kinterbasdb DBAPI documentation in order to understand the implications of this flag.

        New in version 0.8.2: - retaining keyword argument specifying transaction retaining behavior. This flag will default to False in 0.9.

        See also

        http://pythonhosted.org/fdb/usage-guide.html#retaining-transactions - information on the “retaining” flag.

      SQLAlchemy-0.8.4/doc/dialects/index.html0000644000076500000240000002221612251147474020575 0ustar classicstaff00000000000000 Dialects — SQLAlchemy 0.8 Documentation

      SQLAlchemy 0.8 Documentation

      Release: 0.8.4 | Release Date: December 8, 2013

      Dialects

      The dialect is the system SQLAlchemy uses to communicate with various types of DBAPI implementations and databases. The sections that follow contain reference documentation and notes specific to the usage of each backend, as well as notes for the various DBAPIs.

      All dialects require that an appropriate DBAPI driver is installed.

      External Dialects

      Changed in version 0.8: As of SQLAlchemy 0.8, several dialects have been moved to external projects, and dialects for new databases will also be published as external projects. The rationale here is to keep the base SQLAlchemy install and test suite from growing inordinately large.

      The “classic” dialects such as SQLite, MySQL, Postgresql, Oracle, SQL Server, and Firebird will remain in the Core for the time being.

      Current external dialect projects for SQLAlchemy include:

      Production Ready

      Experimental / Incomplete

      SQLAlchemy-0.8.4/doc/dialects/informix.html0000644000076500000240000001617012251147474021323 0ustar classicstaff00000000000000 Informix — SQLAlchemy 0.8 Documentation

      SQLAlchemy 0.8 Documentation

      Release: 0.8.4 | Release Date: December 8, 2013

      Informix

      Support for the Informix database.

      DBAPI Support

      The following dialect/DBAPI options are available. Please refer to individual DBAPI sections for connect information.

      Note

      The Informix dialect functions on current SQLAlchemy versions but is not regularly tested, and may have many issues and caveats not currently handled.

      informixdb

      Support for the Informix database via the informixdb driver.

      DBAPI

      Documentation and download information (if applicable) for informixdb is available at: http://informixdb.sourceforge.net/

      Connecting

      Connect String:

      informix+informixdb://user:password@host/dbname

      SQLAlchemy-0.8.4/doc/dialects/mssql.html0000644000076500000240000022754312251147474020637 0ustar classicstaff00000000000000 Microsoft SQL Server — SQLAlchemy 0.8 Documentation

      SQLAlchemy 0.8 Documentation

      Release: 0.8.4 | Release Date: December 8, 2013
      SQLAlchemy 0.8 Documentation » Dialects » Microsoft SQL Server

      Microsoft SQL Server

      Microsoft SQL Server

      Support for the Microsoft SQL Server database.

      DBAPI Support

      The following dialect/DBAPI options are available. Please refer to individual DBAPI sections for connect information.

      Auto Increment Behavior

      IDENTITY columns are supported by using SQLAlchemy schema.Sequence() objects. In other words:

      from sqlalchemy import Table, Integer, Sequence, Column
      
      Table('test', metadata,
             Column('id', Integer,
                    Sequence('blah',100,10), primary_key=True),
             Column('name', String(20))
           ).create(some_engine)

      would yield:

      CREATE TABLE test (
        id INTEGER NOT NULL IDENTITY(100,10) PRIMARY KEY,
        name VARCHAR(20) NULL,
        )

      Note that the start and increment values for sequences are optional and will default to 1,1.

      Implicit autoincrement behavior works the same in MSSQL as it does in other dialects and results in an IDENTITY column.

      • Support for SET IDENTITY_INSERT ON mode (automagic on / off for INSERT s)
      • Support for auto-fetching of @@IDENTITY/@@SCOPE_IDENTITY() on INSERT

      Collation Support

      Character collations are supported by the base string types, specified by the string argument “collation”:

      from sqlalchemy import VARCHAR
      Column('login', VARCHAR(32, collation='Latin1_General_CI_AS'))

      When such a column is associated with a Table, the CREATE TABLE statement for this column will yield:

      login VARCHAR(32) COLLATE Latin1_General_CI_AS NULL

      New in version 0.8: Character collations are now part of the base string types.

      LIMIT/OFFSET Support

      MSSQL has no support for the LIMIT or OFFSET keysowrds. LIMIT is supported directly through the TOP Transact SQL keyword:

      select.limit

      will yield:

      SELECT TOP n

      If using SQL Server 2005 or above, LIMIT with OFFSET support is available through the ROW_NUMBER OVER construct. For versions below 2005, LIMIT with OFFSET usage will fail.

      Nullability

      MSSQL has support for three levels of column nullability. The default nullability allows nulls and is explicit in the CREATE TABLE construct:

      name VARCHAR(20) NULL

      If nullable=None is specified then no specification is made. In other words the database’s configured default is used. This will render:

      name VARCHAR(20)

      If nullable is True or False then the column will be NULL` or ``NOT NULL respectively.

      Date / Time Handling

      DATE and TIME are supported. Bind parameters are converted to datetime.datetime() objects as required by most MSSQL drivers, and results are processed from strings if needed. The DATE and TIME types are not available for MSSQL 2005 and previous - if a server version below 2008 is detected, DDL for these types will be issued as DATETIME.

      MSSQL-Specific Index Options

      The MSSQL dialect supports special options for Index.

      CLUSTERED

      The mssql_clustered option adds the CLUSTERED keyword to the index:

      Index("my_index", table.c.x, mssql_clustered=True)

      would render the index as CREATE CLUSTERED INDEX my_index ON table (x)

      New in version 0.8.

      INCLUDE

      The mssql_include option renders INCLUDE(colname) for the given string names:

      Index("my_index", table.c.x, mssql_include=['y'])

      would render the index as CREATE INDEX my_index ON table (x) INCLUDE (y)

      New in version 0.8.

      Index ordering

      Index ordering is available via functional expressions, such as:

      Index("my_index", table.c.x.desc())

      would render the index as CREATE INDEX my_index ON table (x DESC)

      New in version 0.8.

      Compatibility Levels

      MSSQL supports the notion of setting compatibility levels at the database level. This allows, for instance, to run a database that is compatible with SQL2000 while running on a SQL2005 database server. server_version_info will always return the database server version information (in this case SQL2005) and not the compatibility level information. Because of this, if running under a backwards compatibility mode SQAlchemy may attempt to use T-SQL statements that are unable to be parsed by the database server.

      Triggers

      SQLAlchemy by default uses OUTPUT INSERTED to get at newly generated primary key values via IDENTITY columns or other server side defaults. MS-SQL does not allow the usage of OUTPUT INSERTED on tables that have triggers. To disable the usage of OUTPUT INSERTED on a per-table basis, specify implicit_returning=False for each Table which has triggers:

      Table('mytable', metadata,
          Column('id', Integer, primary_key=True),
          # ...,
          implicit_returning=False
      )

      Declarative form:

      class MyClass(Base):
          # ...
          __table_args__ = {'implicit_returning':False}

      This option can also be specified engine-wide using the implicit_returning=False argument on create_engine().

      Enabling Snapshot Isolation

      Not necessarily specific to SQLAlchemy, SQL Server has a default transaction isolation mode that locks entire tables, and causes even mildly concurrent applications to have long held locks and frequent deadlocks. Enabling snapshot isolation for the database as a whole is recommended for modern levels of concurrency support. This is accomplished via the following ALTER DATABASE commands executed at the SQL prompt:

      ALTER DATABASE MyDatabase SET ALLOW_SNAPSHOT_ISOLATION ON
      
      ALTER DATABASE MyDatabase SET READ_COMMITTED_SNAPSHOT ON

      Background on SQL Server snapshot isolation is available at http://msdn.microsoft.com/en-us/library/ms175095.aspx.

      Known Issues

      • No support for more than one IDENTITY column per table
      • reflection of indexes does not work with versions older than SQL Server 2005

      SQL Server Data Types

      As with all SQLAlchemy dialects, all UPPERCASE types that are known to be valid with SQL server are importable from the top level dialect, whether they originate from sqlalchemy.types or from the local dialect:

      from sqlalchemy.dialects.mssql import \
          BIGINT, BINARY, BIT, CHAR, DATE, DATETIME, DATETIME2, \
          DATETIMEOFFSET, DECIMAL, FLOAT, IMAGE, INTEGER, MONEY, \
          NCHAR, NTEXT, NUMERIC, NVARCHAR, REAL, SMALLDATETIME, \
          SMALLINT, SMALLMONEY, SQL_VARIANT, TEXT, TIME, \
          TIMESTAMP, TINYINT, UNIQUEIDENTIFIER, VARBINARY, VARCHAR

      Types which are specific to SQL Server, or have SQL Server-specific construction arguments, are as follows:

      class sqlalchemy.dialects.mssql.BIT(*args, **kwargs)

      Bases: sqlalchemy.types.TypeEngine

      __init__(*args, **kwargs)

      Support implementations that were passing arguments

      class sqlalchemy.dialects.mssql.CHAR(length=None, collation=None, convert_unicode=False, unicode_error=None, _warn_on_bytestring=False)

      Bases: sqlalchemy.types.String

      The SQL CHAR type.

      __init__(length=None, collation=None, convert_unicode=False, unicode_error=None, _warn_on_bytestring=False)

      Create a string-holding type.

      Parameters:
      • length – optional, a length for the column for use in DDL and CAST expressions. May be safely omitted if no CREATE TABLE will be issued. Certain databases may require a length for use in DDL, and will raise an exception when the CREATE TABLE DDL is issued if a VARCHAR with no length is included. Whether the value is interpreted as bytes or characters is database specific.
      • collation

        Optional, a column-level collation for use in DDL and CAST expressions. Renders using the COLLATE keyword supported by SQLite, MySQL, and Postgresql. E.g.:

        >>> from sqlalchemy import cast, select, String
        >>> print select([cast('some string', String(collation='utf8'))])
        SELECT CAST(:param_1 AS VARCHAR COLLATE utf8) AS anon_1

        New in version 0.8: Added support for COLLATE to all string types.

      • convert_unicode

        When set to True, the String type will assume that input is to be passed as Python unicode objects, and results returned as Python unicode objects. If the DBAPI in use does not support Python unicode (which is fewer and fewer these days), SQLAlchemy will encode/decode the value, using the value of the encoding parameter passed to create_engine() as the encoding.

        When using a DBAPI that natively supports Python unicode objects, this flag generally does not need to be set. For columns that are explicitly intended to store non-ASCII data, the Unicode or UnicodeText types should be used regardless, which feature the same behavior of convert_unicode but also indicate an underlying column type that directly supports unicode, such as NVARCHAR.

        For the extremely rare case that Python unicode is to be encoded/decoded by SQLAlchemy on a backend that does natively support Python unicode, the value force can be passed here which will cause SQLAlchemy’s encode/decode services to be used unconditionally.

      • unicode_error – Optional, a method to use to handle Unicode conversion errors. Behaves like the errors keyword argument to the standard library’s string.decode() functions. This flag requires that convert_unicode is set to force - otherwise, SQLAlchemy is not guaranteed to handle the task of unicode conversion. Note that this flag adds significant performance overhead to row-fetching operations for backends that already return unicode objects natively (which most DBAPIs do). This flag should only be used as a last resort for reading strings from a column with varied or corrupted encodings.
      class sqlalchemy.dialects.mssql.DATETIME2(precision=None, **kw)

      Bases: sqlalchemy.dialects.mssql.base._DateTimeBase, sqlalchemy.types.DateTime

      class sqlalchemy.dialects.mssql.DATETIMEOFFSET(precision=None, **kwargs)

      Bases: sqlalchemy.types.TypeEngine

      class sqlalchemy.dialects.mssql.IMAGE(length=None)

      Bases: sqlalchemy.types.LargeBinary

      __init__(length=None)

      Construct a LargeBinary type.

      Parameters:length – optional, a length for the column for use in DDL statements, for those BLOB types that accept a length (i.e. MySQL). It does not produce a small BINARY/VARBINARY type - use the BINARY/VARBINARY types specifically for those. May be safely omitted if no CREATE TABLE will be issued. Certain databases may require a length for use in DDL, and will raise an exception when the CREATE TABLE DDL is issued.
      class sqlalchemy.dialects.mssql.MONEY(*args, **kwargs)

      Bases: sqlalchemy.types.TypeEngine

      __init__(*args, **kwargs)

      Support implementations that were passing arguments

      class sqlalchemy.dialects.mssql.NCHAR(length=None, **kwargs)

      Bases: sqlalchemy.types.Unicode

      The SQL NCHAR type.

      __init__(length=None, **kwargs)

      Create a Unicode object.

      Parameters are the same as that of String, with the exception that convert_unicode defaults to True.

      class sqlalchemy.dialects.mssql.NTEXT(length=None, **kwargs)

      Bases: sqlalchemy.types.UnicodeText

      MSSQL NTEXT type, for variable-length unicode text up to 2^30 characters.

      __init__(length=None, **kwargs)

      Create a Unicode-converting Text type.

      Parameters are the same as that of Text, with the exception that convert_unicode defaults to True.

      class sqlalchemy.dialects.mssql.NVARCHAR(length=None, **kwargs)

      Bases: sqlalchemy.types.Unicode

      The SQL NVARCHAR type.

      __init__(length=None, **kwargs)

      Create a Unicode object.

      Parameters are the same as that of String, with the exception that convert_unicode defaults to True.

      class sqlalchemy.dialects.mssql.REAL(**kw)

      Bases: sqlalchemy.types.REAL

      class sqlalchemy.dialects.mssql.SMALLDATETIME(timezone=False)

      Bases: sqlalchemy.dialects.mssql.base._DateTimeBase, sqlalchemy.types.DateTime

      __init__(timezone=False)

      Construct a new DateTime.

      Parameters:timezone – boolean. If True, and supported by the

      backend, will produce ‘TIMESTAMP WITH TIMEZONE’. For backends that don’t support timezone aware timestamps, has no effect.

      class sqlalchemy.dialects.mssql.SMALLMONEY(*args, **kwargs)

      Bases: sqlalchemy.types.TypeEngine

      __init__(*args, **kwargs)

      Support implementations that were passing arguments

      class sqlalchemy.dialects.mssql.SQL_VARIANT(*args, **kwargs)

      Bases: sqlalchemy.types.TypeEngine

      __init__(*args, **kwargs)

      Support implementations that were passing arguments

      class sqlalchemy.dialects.mssql.TEXT(length=None, collation=None, convert_unicode=False, unicode_error=None, _warn_on_bytestring=False)

      Bases: sqlalchemy.types.Text

      The SQL TEXT type.

      __init__(length=None, collation=None, convert_unicode=False, unicode_error=None, _warn_on_bytestring=False)

      Create a string-holding type.

      Parameters:
      • length – optional, a length for the column for use in DDL and CAST expressions. May be safely omitted if no CREATE TABLE will be issued. Certain databases may require a length for use in DDL, and will raise an exception when the CREATE TABLE DDL is issued if a VARCHAR with no length is included. Whether the value is interpreted as bytes or characters is database specific.
      • collation

        Optional, a column-level collation for use in DDL and CAST expressions. Renders using the COLLATE keyword supported by SQLite, MySQL, and Postgresql. E.g.:

        >>> from sqlalchemy import cast, select, String
        >>> print select([cast('some string', String(collation='utf8'))])
        SELECT CAST(:param_1 AS VARCHAR COLLATE utf8) AS anon_1

        New in version 0.8: Added support for COLLATE to all string types.

      • convert_unicode

        When set to True, the String type will assume that input is to be passed as Python unicode objects, and results returned as Python unicode objects. If the DBAPI in use does not support Python unicode (which is fewer and fewer these days), SQLAlchemy will encode/decode the value, using the value of the encoding parameter passed to create_engine() as the encoding.

        When using a DBAPI that natively supports Python unicode objects, this flag generally does not need to be set. For columns that are explicitly intended to store non-ASCII data, the Unicode or UnicodeText types should be used regardless, which feature the same behavior of convert_unicode but also indicate an underlying column type that directly supports unicode, such as NVARCHAR.

        For the extremely rare case that Python unicode is to be encoded/decoded by SQLAlchemy on a backend that does natively support Python unicode, the value force can be passed here which will cause SQLAlchemy’s encode/decode services to be used unconditionally.

      • unicode_error – Optional, a method to use to handle Unicode conversion errors. Behaves like the errors keyword argument to the standard library’s string.decode() functions. This flag requires that convert_unicode is set to force - otherwise, SQLAlchemy is not guaranteed to handle the task of unicode conversion. Note that this flag adds significant performance overhead to row-fetching operations for backends that already return unicode objects natively (which most DBAPIs do). This flag should only be used as a last resort for reading strings from a column with varied or corrupted encodings.
      class sqlalchemy.dialects.mssql.TIME(precision=None, **kwargs)

      Bases: sqlalchemy.types.TIME

      class sqlalchemy.dialects.mssql.TINYINT(*args, **kwargs)

      Bases: sqlalchemy.types.Integer

      __init__(*args, **kwargs)

      Support implementations that were passing arguments

      class sqlalchemy.dialects.mssql.UNIQUEIDENTIFIER(*args, **kwargs)

      Bases: sqlalchemy.types.TypeEngine

      __init__(*args, **kwargs)

      Support implementations that were passing arguments

      class sqlalchemy.dialects.mssql.VARCHAR(length=None, collation=None, convert_unicode=False, unicode_error=None, _warn_on_bytestring=False)

      Bases: sqlalchemy.types.String

      The SQL VARCHAR type.

      __init__(length=None, collation=None, convert_unicode=False, unicode_error=None, _warn_on_bytestring=False)

      Create a string-holding type.

      Parameters:
      • length – optional, a length for the column for use in DDL and CAST expressions. May be safely omitted if no CREATE TABLE will be issued. Certain databases may require a length for use in DDL, and will raise an exception when the CREATE TABLE DDL is issued if a VARCHAR with no length is included. Whether the value is interpreted as bytes or characters is database specific.
      • collation

        Optional, a column-level collation for use in DDL and CAST expressions. Renders using the COLLATE keyword supported by SQLite, MySQL, and Postgresql. E.g.:

        >>> from sqlalchemy import cast, select, String
        >>> print select([cast('some string', String(collation='utf8'))])
        SELECT CAST(:param_1 AS VARCHAR COLLATE utf8) AS anon_1

        New in version 0.8: Added support for COLLATE to all string types.

      • convert_unicode

        When set to True, the String type will assume that input is to be passed as Python unicode objects, and results returned as Python unicode objects. If the DBAPI in use does not support Python unicode (which is fewer and fewer these days), SQLAlchemy will encode/decode the value, using the value of the encoding parameter passed to create_engine() as the encoding.

        When using a DBAPI that natively supports Python unicode objects, this flag generally does not need to be set. For columns that are explicitly intended to store non-ASCII data, the Unicode or UnicodeText types should be used regardless, which feature the same behavior of convert_unicode but also indicate an underlying column type that directly supports unicode, such as NVARCHAR.

        For the extremely rare case that Python unicode is to be encoded/decoded by SQLAlchemy on a backend that does natively support Python unicode, the value force can be passed here which will cause SQLAlchemy’s encode/decode services to be used unconditionally.

      • unicode_error – Optional, a method to use to handle Unicode conversion errors. Behaves like the errors keyword argument to the standard library’s string.decode() functions. This flag requires that convert_unicode is set to force - otherwise, SQLAlchemy is not guaranteed to handle the task of unicode conversion. Note that this flag adds significant performance overhead to row-fetching operations for backends that already return unicode objects natively (which most DBAPIs do). This flag should only be used as a last resort for reading strings from a column with varied or corrupted encodings.

      PyODBC

      Support for the Microsoft SQL Server database via the PyODBC driver.

      DBAPI

      Documentation and download information (if applicable) for PyODBC is available at: http://pypi.python.org/pypi/pyodbc/

      Connecting

      Connect String:

      mssql+pyodbc://<username>:<password>@<dsnname>

      Additional Connection Examples

      Examples of pyodbc connection string URLs:

      • mssql+pyodbc://mydsn - connects using the specified DSN named mydsn. The connection string that is created will appear like:

        dsn=mydsn;Trusted_Connection=Yes
      • mssql+pyodbc://user:pass@mydsn - connects using the DSN named mydsn passing in the UID and PWD information. The connection string that is created will appear like:

        dsn=mydsn;UID=user;PWD=pass
      • mssql+pyodbc://user:pass@mydsn/?LANGUAGE=us_english - connects using the DSN named mydsn passing in the UID and PWD information, plus the additional connection configuration option LANGUAGE. The connection string that is created will appear like:

        dsn=mydsn;UID=user;PWD=pass;LANGUAGE=us_english
      • mssql+pyodbc://user:pass@host/db - connects using a connection that would appear like:

        DRIVER={SQL Server};Server=host;Database=db;UID=user;PWD=pass
      • mssql+pyodbc://user:pass@host:123/db - connects using a connection string which includes the port information using the comma syntax. This will create the following connection string:

        DRIVER={SQL Server};Server=host,123;Database=db;UID=user;PWD=pass
      • mssql+pyodbc://user:pass@host/db?port=123 - connects using a connection string that includes the port information as a separate port keyword. This will create the following connection string:

        DRIVER={SQL Server};Server=host;Database=db;UID=user;PWD=pass;port=123
      • mssql+pyodbc://user:pass@host/db?driver=MyDriver - connects using a connection string that includes a custom ODBC driver name. This will create the following connection string:

        DRIVER={MyDriver};Server=host;Database=db;UID=user;PWD=pass

      If you require a connection string that is outside the options presented above, use the odbc_connect keyword to pass in a urlencoded connection string. What gets passed in will be urldecoded and passed directly.

      For example:

      mssql+pyodbc:///?odbc_connect=dsn%3Dmydsn%3BDatabase%3Ddb

      would create the following connection string:

      dsn=mydsn;Database=db

      Encoding your connection string can be easily accomplished through the python shell. For example:

      >>> import urllib
      >>> urllib.quote_plus('dsn=mydsn;Database=db')
      'dsn%3Dmydsn%3BDatabase%3Ddb'

      Unicode Binds

      The current state of PyODBC on a unix backend with FreeTDS and/or EasySoft is poor regarding unicode; different OS platforms and versions of UnixODBC versus IODBC versus FreeTDS/EasySoft versus PyODBC itself dramatically alter how strings are received. The PyODBC dialect attempts to use all the information it knows to determine whether or not a Python unicode literal can be passed directly to the PyODBC driver or not; while SQLAlchemy can encode these to bytestrings first, some users have reported that PyODBC mis-handles bytestrings for certain encodings and requires a Python unicode object, while the author has observed widespread cases where a Python unicode is completely misinterpreted by PyODBC, particularly when dealing with the information schema tables used in table reflection, and the value must first be encoded to a bytestring.

      It is for this reason that whether or not unicode literals for bound parameters be sent to PyODBC can be controlled using the supports_unicode_binds parameter to create_engine(). When left at its default of None, the PyODBC dialect will use its best guess as to whether or not the driver deals with unicode literals well. When False, unicode literals will be encoded first, and when True unicode literals will be passed straight through. This is an interim flag that hopefully should not be needed when the unicode situation stabilizes for unix + PyODBC.

      New in version 0.7.7: supports_unicode_binds parameter to create_engine().

      mxODBC

      Support for the Microsoft SQL Server database via the mxODBC driver.

      DBAPI

      Documentation and download information (if applicable) for mxODBC is available at: http://www.egenix.com/

      Connecting

      Connect String:

      mssql+mxodbc://<username>:<password>@<dsnname>

      Execution Modes

      mxODBC features two styles of statement execution, using the cursor.execute() and cursor.executedirect() methods (the second being an extension to the DBAPI specification). The former makes use of a particular API call specific to the SQL Server Native Client ODBC driver known SQLDescribeParam, while the latter does not.

      mxODBC apparently only makes repeated use of a single prepared statement when SQLDescribeParam is used. The advantage to prepared statement reuse is one of performance. The disadvantage is that SQLDescribeParam has a limited set of scenarios in which bind parameters are understood, including that they cannot be placed within the argument lists of function calls, anywhere outside the FROM, or even within subqueries within the FROM clause - making the usage of bind parameters within SELECT statements impossible for all but the most simplistic statements.

      For this reason, the mxODBC dialect uses the “native” mode by default only for INSERT, UPDATE, and DELETE statements, and uses the escaped string mode for all other statements.

      This behavior can be controlled via execution_options() using the native_odbc_execute flag with a value of True or False, where a value of True will unconditionally use native bind parameters and a value of False will unconditionally use string-escaped parameters.

      pymssql

      Support for the Microsoft SQL Server database via the pymssql driver.

      DBAPI

      Documentation and download information (if applicable) for pymssql is available at: http://pymssql.sourceforge.net/

      Connecting

      Connect String:

      mssql+pymssql://<username>:<password>@<freetds_name>?charset=utf8

      Limitations

      pymssql inherits a lot of limitations from FreeTDS, including:

      • no support for multibyte schema identifiers
      • poor support for large decimals
      • poor support for binary fields
      • poor support for VARCHAR/CHAR fields over 255 characters

      Please consult the pymssql documentation for further information.

      zxjdbc

      Support for the Microsoft SQL Server database via the zxJDBC for Jython driver.

      DBAPI

      Drivers for this database are available at: http://jtds.sourceforge.net/

      Connecting

      Connect String:

      mssql+zxjdbc://user:pass@host:port/dbname[?key=value&key=value...]

      AdoDBAPI

      Support for the Microsoft SQL Server database via the adodbapi driver.

      DBAPI

      Documentation and download information (if applicable) for adodbapi is available at: http://adodbapi.sourceforge.net/

      Connecting

      Connect String:

      mssql+adodbapi://<username>:<password>@<dsnname>

      Note

      The adodbapi dialect is not implemented SQLAlchemy versions 0.6 and above at this time.

      SQLAlchemy-0.8.4/doc/dialects/mysql.html0000644000076500000240000033536412251147474020646 0ustar classicstaff00000000000000 MySQL — SQLAlchemy 0.8 Documentation

      SQLAlchemy 0.8 Documentation

      Release: 0.8.4 | Release Date: December 8, 2013

      MySQL

      Support for the MySQL database.

      DBAPI Support

      The following dialect/DBAPI options are available. Please refer to individual DBAPI sections for connect information.

      Supported Versions and Features

      SQLAlchemy supports MySQL starting with version 4.1 through modern releases. However, no heroic measures are taken to work around major missing SQL features - if your server version does not support sub-selects, for example, they won’t work in SQLAlchemy either.

      See the official MySQL documentation for detailed information about features supported in any given server release.

      Connection Timeouts

      MySQL features an automatic connection close behavior, for connections that have been idle for eight hours or more. To circumvent having this issue, use the pool_recycle option which controls the maximum age of any connection:

      engine = create_engine('mysql+mysqldb://...', pool_recycle=3600)

      Storage Engines

      Most MySQL server installations have a default table type of MyISAM, a non-transactional table type. During a transaction, non-transactional storage engines do not participate and continue to store table changes in autocommit mode. For fully atomic transactions as well as support for foreign key constraints, all participating tables must use a transactional engine such as InnoDB, Falcon, SolidDB, PBXT, etc.

      Storage engines can be elected when creating tables in SQLAlchemy by supplying a mysql_engine='whatever' to the Table constructor. Any MySQL table creation option can be specified in this syntax:

      Table('mytable', metadata,
            Column('data', String(32)),
            mysql_engine='InnoDB',
            mysql_charset='utf8'
           )

      See also

      The InnoDB Storage Engine - on the MySQL website.

      Case Sensitivity and Table Reflection

      MySQL has inconsistent support for case-sensitive identifier names, basing support on specific details of the underlying operating system. However, it has been observed that no matter what case sensitivity behavior is present, the names of tables in foreign key declarations are always received from the database as all-lower case, making it impossible to accurately reflect a schema where inter-related tables use mixed-case identifier names.

      Therefore it is strongly advised that table names be declared as all lower case both within SQLAlchemy as well as on the MySQL database itself, especially if database reflection features are to be used.

      Transaction Isolation Level

      create_engine() accepts an isolation_level parameter which results in the command SET SESSION TRANSACTION ISOLATION LEVEL <level> being invoked for every new connection. Valid values for this parameter are READ COMMITTED, READ UNCOMMITTED, REPEATABLE READ, and SERIALIZABLE:

      engine = create_engine(
                      "mysql://scott:tiger@localhost/test",
                      isolation_level="READ UNCOMMITTED"
                  )

      New in version 0.7.6.

      Keys

      Not all MySQL storage engines support foreign keys. For MyISAM and similar engines, the information loaded by table reflection will not include foreign keys. For these tables, you may supply a ForeignKeyConstraint at reflection time:

      Table('mytable', metadata,
            ForeignKeyConstraint(['other_id'], ['othertable.other_id']),
            autoload=True
           )

      When creating tables, SQLAlchemy will automatically set AUTO_INCREMENT on an integer primary key column:

      >>> t = Table('mytable', metadata,
      ...   Column('mytable_id', Integer, primary_key=True)
      ... )
      >>> t.create()
      CREATE TABLE mytable (
              id INTEGER NOT NULL AUTO_INCREMENT,
              PRIMARY KEY (id)
      )

      You can disable this behavior by supplying autoincrement=False to the Column. This flag can also be used to enable auto-increment on a secondary column in a multi-column key for some storage engines:

      Table('mytable', metadata,
            Column('gid', Integer, primary_key=True, autoincrement=False),
            Column('id', Integer, primary_key=True)
           )

      Ansi Quoting Style

      MySQL features two varieties of identifier “quoting style”, one using backticks and the other using quotes, e.g. `some_identifier` vs. "some_identifier". All MySQL dialects detect which version is in use by checking the value of sql_mode when a connection is first established with a particular Engine. This quoting style comes into play when rendering table and column names as well as when reflecting existing database structures. The detection is entirely automatic and no special configuration is needed to use either quoting style.

      Changed in version 0.6: detection of ANSI quoting style is entirely automatic, there’s no longer any end-user create_engine() options in this regard.

      MySQL SQL Extensions

      Many of the MySQL SQL extensions are handled through SQLAlchemy’s generic function and operator support:

      table.select(table.c.password==func.md5('plaintext'))
      table.select(table.c.username.op('regexp')('^[a-d]'))

      And of course any valid MySQL statement can be executed as a string as well.

      Some limited direct support for MySQL extensions to SQL is currently available.

      • SELECT pragma:

        select(..., prefixes=['HIGH_PRIORITY', 'SQL_SMALL_RESULT'])
      • UPDATE with LIMIT:

        update(..., mysql_limit=10)

      rowcount Support

      SQLAlchemy standardizes the DBAPI cursor.rowcount attribute to be the usual definition of “number of rows matched by an UPDATE or DELETE” statement. This is in contradiction to the default setting on most MySQL DBAPI drivers, which is “number of rows actually modified/deleted”. For this reason, the SQLAlchemy MySQL dialects always set the constants.CLIENT.FOUND_ROWS flag, or whatever is equivalent for the DBAPI in use, on connect, unless the flag value is overridden using DBAPI-specific options (such as client_flag for the MySQL-Python driver, found_rows for the OurSQL driver).

      See also:

      ResultProxy.rowcount

      CAST Support

      MySQL documents the CAST operator as available in version 4.0.2. When using the SQLAlchemy cast() function, SQLAlchemy will not render the CAST token on MySQL before this version, based on server version detection, instead rendering the internal expression directly.

      CAST may still not be desirable on an early MySQL version post-4.0.2, as it didn’t add all datatype support until 4.1.1. If your application falls into this narrow area, the behavior of CAST can be controlled using the Custom SQL Constructs and Compilation Extension system, as per the recipe below:

      from sqlalchemy.sql.expression import Cast
      from sqlalchemy.ext.compiler import compiles
      
      @compiles(Cast, 'mysql')
      def _check_mysql_version(element, compiler, **kw):
          if compiler.dialect.server_version_info < (4, 1, 0):
              return compiler.process(element.clause, **kw)
          else:
              return compiler.visit_cast(element, **kw)

      The above function, which only needs to be declared once within an application, overrides the compilation of the cast() construct to check for version 4.1.0 before fully rendering CAST; else the internal element of the construct is rendered directly.

      MySQL Specific Index Options

      MySQL-specific extensions to the Index construct are available.

      Index Length

      MySQL provides an option to create index entries with a certain length, where “length” refers to the number of characters or bytes in each value which will become part of the index. SQLAlchemy provides this feature via the mysql_length parameter:

      Index('my_index', my_table.c.data, mysql_length=10)
      
      Index('a_b_idx', my_table.c.a, my_table.c.b, mysql_length={'a': 4, 'b': 9})

      Prefix lengths are given in characters for nonbinary string types and in bytes for binary string types. The value passed to the keyword argument must be either an integer (and, thus, specify the same prefix length value for all columns of the index) or a dict in which keys are column names and values are prefix length values for corresponding columns. MySQL only allows a length for a column of an index if it is for a CHAR, VARCHAR, TEXT, BINARY, VARBINARY and BLOB.

      New in version 0.8.2: mysql_length may now be specified as a dictionary for use with composite indexes.

      Index Types

      Some MySQL storage engines permit you to specify an index type when creating an index or primary key constraint. SQLAlchemy provides this feature via the mysql_using parameter on Index:

      Index('my_index', my_table.c.data, mysql_using='hash')

      As well as the mysql_using parameter on PrimaryKeyConstraint:

      PrimaryKeyConstraint("data", mysql_using='hash')

      The value passed to the keyword argument will be simply passed through to the underlying CREATE INDEX or PRIMARY KEY clause, so it must be a valid index type for your MySQL storage engine.

      More information can be found at:

      http://dev.mysql.com/doc/refman/5.0/en/create-index.html

      http://dev.mysql.com/doc/refman/5.0/en/create-table.html

      MySQL Foreign Key Options

      MySQL does not support the foreign key arguments “DEFERRABLE”, “INITIALLY”, or “MATCH”. Using the deferrable or initially keyword argument with ForeignKeyConstraint or ForeignKey will have the effect of these keywords being ignored in a DDL expression along with a warning, however this behavior will change in a future release.

      In order to use these keywords on a foreign key while having them ignored on a MySQL backend, use a custom compile rule:

      from sqlalchemy.ext.compiler import compiles
      from sqlalchemy.schema import ForeignKeyConstraint
      
      @compiles(ForeignKeyConstraint, "mysql")
      def process(element, compiler, **kw):
          element.deferrable = element.initially = None
          return compiler.visit_foreign_key_constraint(element, **kw)

      Changed in version 0.8.3: - the MySQL backend will emit a warning when the the deferrable or initially keyword arguments of ForeignKeyConstraint and ForeignKey are used. The arguments will no longer be ignored in 0.9.

      The “MATCH” keyword is in fact more insidious, and in a future release will be explicitly disallowed by SQLAlchemy in conjunction with the MySQL backend. This argument is silently ignored by MySQL, but in addition has the effect of ON UPDATE and ON DELETE options also being ignored by the backend. Therefore MATCH should never be used with the MySQL backend; as is the case with DEFERRABLE and INITIALLY, custom compilation rules can be used to correct a MySQL ForeignKeyConstraint at DDL definition time.

      New in version 0.8.3: - the MySQL backend will emit a warning when the match keyword is used with ForeignKeyConstraint or ForeignKey. This will be a CompileError in 0.9.

      MySQL Data Types

      As with all SQLAlchemy dialects, all UPPERCASE types that are known to be valid with MySQL are importable from the top level dialect:

      from sqlalchemy.dialects.mysql import \
              BIGINT, BINARY, BIT, BLOB, BOOLEAN, CHAR, DATE, \
              DATETIME, DECIMAL, DECIMAL, DOUBLE, ENUM, FLOAT, INTEGER, \
              LONGBLOB, LONGTEXT, MEDIUMBLOB, MEDIUMINT, MEDIUMTEXT, NCHAR, \
              NUMERIC, NVARCHAR, REAL, SET, SMALLINT, TEXT, TIME, TIMESTAMP, \
              TINYBLOB, TINYINT, TINYTEXT, VARBINARY, VARCHAR, YEAR

      Types which are specific to MySQL, or have MySQL-specific construction arguments, are as follows:

      class sqlalchemy.dialects.mysql.BIGINT(display_width=None, **kw)

      Bases: sqlalchemy.dialects.mysql.base._IntegerType, sqlalchemy.types.BIGINT

      MySQL BIGINTEGER type.

      __init__(display_width=None, **kw)

      Construct a BIGINTEGER.

      Parameters:
      • display_width – Optional, maximum display width for this number.
      • unsigned – a boolean, optional.
      • zerofill – Optional. If true, values will be stored as strings left-padded with zeros. Note that this does not effect the values returned by the underlying database API, which continue to be numeric.
      class sqlalchemy.dialects.mysql.BINARY(length=None)

      Bases: sqlalchemy.types._Binary

      The SQL BINARY type.

      class sqlalchemy.dialects.mysql.BIT(length=None)

      Bases: sqlalchemy.types.TypeEngine

      MySQL BIT type.

      This type is for MySQL 5.0.3 or greater for MyISAM, and 5.0.5 or greater for MyISAM, MEMORY, InnoDB and BDB. For older versions, use a MSTinyInteger() type.

      __init__(length=None)

      Construct a BIT.

      Parameters:length – Optional, number of bits.
      class sqlalchemy.dialects.mysql.BLOB(length=None)

      Bases: sqlalchemy.types.LargeBinary

      The SQL BLOB type.

      __init__(length=None)

      Construct a LargeBinary type.

      Parameters:length – optional, a length for the column for use in DDL statements, for those BLOB types that accept a length (i.e. MySQL). It does not produce a small BINARY/VARBINARY type - use the BINARY/VARBINARY types specifically for those. May be safely omitted if no CREATE TABLE will be issued. Certain databases may require a length for use in DDL, and will raise an exception when the CREATE TABLE DDL is issued.
      class sqlalchemy.dialects.mysql.BOOLEAN(create_constraint=True, name=None)

      Bases: sqlalchemy.types.Boolean

      The SQL BOOLEAN type.

      __init__(create_constraint=True, name=None)

      Construct a Boolean.

      Parameters:
      • create_constraint – defaults to True. If the boolean is generated as an int/smallint, also create a CHECK constraint on the table that ensures 1 or 0 as a value.
      • name – if a CHECK constraint is generated, specify the name of the constraint.
      class sqlalchemy.dialects.mysql.CHAR(length=None, **kwargs)

      Bases: sqlalchemy.dialects.mysql.base._StringType, sqlalchemy.types.CHAR

      MySQL CHAR type, for fixed-length character data.

      __init__(length=None, **kwargs)

      Construct a CHAR.

      Parameters:
      • length – Maximum data length, in characters.
      • binary – Optional, use the default binary collation for the national character set. This does not affect the type of data stored, use a BINARY type for binary data.
      • collation – Optional, request a particular collation. Must be compatible with the national character set.
      class sqlalchemy.dialects.mysql.DATE(*args, **kwargs)

      Bases: sqlalchemy.types.Date

      The SQL DATE type.

      __init__(*args, **kwargs)

      Support implementations that were passing arguments

      class sqlalchemy.dialects.mysql.DATETIME(timezone=False)

      Bases: sqlalchemy.types.DateTime

      The SQL DATETIME type.

      __init__(timezone=False)

      Construct a new DateTime.

      Parameters:timezone – boolean. If True, and supported by the

      backend, will produce ‘TIMESTAMP WITH TIMEZONE’. For backends that don’t support timezone aware timestamps, has no effect.

      class sqlalchemy.dialects.mysql.DECIMAL(precision=None, scale=None, asdecimal=True, **kw)

      Bases: sqlalchemy.dialects.mysql.base._NumericType, sqlalchemy.types.DECIMAL

      MySQL DECIMAL type.

      __init__(precision=None, scale=None, asdecimal=True, **kw)

      Construct a DECIMAL.

      Parameters:
      • precision – Total digits in this number. If scale and precision are both None, values are stored to limits allowed by the server.
      • scale – The number of digits after the decimal point.
      • unsigned – a boolean, optional.
      • zerofill – Optional. If true, values will be stored as strings left-padded with zeros. Note that this does not effect the values returned by the underlying database API, which continue to be numeric.
      class sqlalchemy.dialects.mysql.DOUBLE(precision=None, scale=None, asdecimal=True, **kw)

      Bases: sqlalchemy.dialects.mysql.base._FloatType

      MySQL DOUBLE type.

      __init__(precision=None, scale=None, asdecimal=True, **kw)

      Construct a DOUBLE.

      Parameters:
      • precision – Total digits in this number. If scale and precision are both None, values are stored to limits allowed by the server.
      • scale – The number of digits after the decimal point.
      • unsigned – a boolean, optional.
      • zerofill – Optional. If true, values will be stored as strings left-padded with zeros. Note that this does not effect the values returned by the underlying database API, which continue to be numeric.
      class sqlalchemy.dialects.mysql.ENUM(*enums, **kw)

      Bases: sqlalchemy.types.Enum, sqlalchemy.dialects.mysql.base._StringType

      MySQL ENUM type.

      __init__(*enums, **kw)

      Construct an ENUM.

      Example:

      Column(‘myenum’, MSEnum(“foo”, “bar”, “baz”))
      Parameters:
      • enums – The range of valid values for this ENUM. Values will be quoted when generating the schema according to the quoting flag (see below).
      • strict – Defaults to False: ensure that a given value is in this ENUM’s range of permissible values when inserting or updating rows. Note that MySQL will not raise a fatal error if you attempt to store an out of range value- an alternate value will be stored instead. (See MySQL ENUM documentation.)
      • charset – Optional, a column-level character set for this string value. Takes precedence to ‘ascii’ or ‘unicode’ short-hand.
      • collation – Optional, a column-level collation for this string value. Takes precedence to ‘binary’ short-hand.
      • ascii – Defaults to False: short-hand for the latin1 character set, generates ASCII in schema.
      • unicode – Defaults to False: short-hand for the ucs2 character set, generates UNICODE in schema.
      • binary – Defaults to False: short-hand, pick the binary collation type that matches the column’s character set. Generates BINARY in schema. This does not affect the type of data stored, only the collation of character data.
      • quoting

        Defaults to ‘auto’: automatically determine enum value quoting. If all enum values are surrounded by the same quoting character, then use ‘quoted’ mode. Otherwise, use ‘unquoted’ mode.

        ‘quoted’: values in enums are already quoted, they will be used directly when generating the schema - this usage is deprecated.

        ‘unquoted’: values in enums are not quoted, they will be escaped and surrounded by single quotes when generating the schema.

        Previous versions of this type always required manually quoted values to be supplied; future versions will always quote the string literals for you. This is a transitional option.

      class sqlalchemy.dialects.mysql.FLOAT(precision=None, scale=None, asdecimal=False, **kw)

      Bases: sqlalchemy.dialects.mysql.base._FloatType, sqlalchemy.types.FLOAT

      MySQL FLOAT type.

      __init__(precision=None, scale=None, asdecimal=False, **kw)

      Construct a FLOAT.

      Parameters:
      • precision – Total digits in this number. If scale and precision are both None, values are stored to limits allowed by the server.
      • scale – The number of digits after the decimal point.
      • unsigned – a boolean, optional.
      • zerofill – Optional. If true, values will be stored as strings left-padded with zeros. Note that this does not effect the values returned by the underlying database API, which continue to be numeric.
      class sqlalchemy.dialects.mysql.INTEGER(display_width=None, **kw)

      Bases: sqlalchemy.dialects.mysql.base._IntegerType, sqlalchemy.types.INTEGER

      MySQL INTEGER type.

      __init__(display_width=None, **kw)

      Construct an INTEGER.

      Parameters:
      • display_width – Optional, maximum display width for this number.
      • unsigned – a boolean, optional.
      • zerofill – Optional. If true, values will be stored as strings left-padded with zeros. Note that this does not effect the values returned by the underlying database API, which continue to be numeric.
      class sqlalchemy.dialects.mysql.LONGBLOB(length=None)

      Bases: sqlalchemy.types._Binary

      MySQL LONGBLOB type, for binary data up to 2^32 bytes.

      class sqlalchemy.dialects.mysql.LONGTEXT(**kwargs)

      Bases: sqlalchemy.dialects.mysql.base._StringType

      MySQL LONGTEXT type, for text up to 2^32 characters.

      __init__(**kwargs)

      Construct a LONGTEXT.

      Parameters:
      • charset – Optional, a column-level character set for this string value. Takes precedence to ‘ascii’ or ‘unicode’ short-hand.
      • collation – Optional, a column-level collation for this string value. Takes precedence to ‘binary’ short-hand.
      • ascii – Defaults to False: short-hand for the latin1 character set, generates ASCII in schema.
      • unicode – Defaults to False: short-hand for the ucs2 character set, generates UNICODE in schema.
      • national – Optional. If true, use the server’s configured national character set.
      • binary – Defaults to False: short-hand, pick the binary collation type that matches the column’s character set. Generates BINARY in schema. This does not affect the type of data stored, only the collation of character data.
      class sqlalchemy.dialects.mysql.MEDIUMBLOB(length=None)

      Bases: sqlalchemy.types._Binary

      MySQL MEDIUMBLOB type, for binary data up to 2^24 bytes.

      class sqlalchemy.dialects.mysql.MEDIUMINT(display_width=None, **kw)

      Bases: sqlalchemy.dialects.mysql.base._IntegerType

      MySQL MEDIUMINTEGER type.

      __init__(display_width=None, **kw)

      Construct a MEDIUMINTEGER

      Parameters:
      • display_width – Optional, maximum display width for this number.
      • unsigned – a boolean, optional.
      • zerofill – Optional. If true, values will be stored as strings left-padded with zeros. Note that this does not effect the values returned by the underlying database API, which continue to be numeric.
      class sqlalchemy.dialects.mysql.MEDIUMTEXT(**kwargs)

      Bases: sqlalchemy.dialects.mysql.base._StringType

      MySQL MEDIUMTEXT type, for text up to 2^24 characters.

      __init__(**kwargs)

      Construct a MEDIUMTEXT.

      Parameters:
      • charset – Optional, a column-level character set for this string value. Takes precedence to ‘ascii’ or ‘unicode’ short-hand.
      • collation – Optional, a column-level collation for this string value. Takes precedence to ‘binary’ short-hand.
      • ascii – Defaults to False: short-hand for the latin1 character set, generates ASCII in schema.
      • unicode – Defaults to False: short-hand for the ucs2 character set, generates UNICODE in schema.
      • national – Optional. If true, use the server’s configured national character set.
      • binary – Defaults to False: short-hand, pick the binary collation type that matches the column’s character set. Generates BINARY in schema. This does not affect the type of data stored, only the collation of character data.
      class sqlalchemy.dialects.mysql.NCHAR(length=None, **kwargs)

      Bases: sqlalchemy.dialects.mysql.base._StringType, sqlalchemy.types.NCHAR

      MySQL NCHAR type.

      For fixed-length character data in the server’s configured national character set.

      __init__(length=None, **kwargs)

      Construct an NCHAR.

      Parameters:
      • length – Maximum data length, in characters.
      • binary – Optional, use the default binary collation for the national character set. This does not affect the type of data stored, use a BINARY type for binary data.
      • collation – Optional, request a particular collation. Must be compatible with the national character set.
      class sqlalchemy.dialects.mysql.NUMERIC(precision=None, scale=None, asdecimal=True, **kw)

      Bases: sqlalchemy.dialects.mysql.base._NumericType, sqlalchemy.types.NUMERIC

      MySQL NUMERIC type.

      __init__(precision=None, scale=None, asdecimal=True, **kw)

      Construct a NUMERIC.

      Parameters:
      • precision – Total digits in this number. If scale and precision are both None, values are stored to limits allowed by the server.
      • scale – The number of digits after the decimal point.
      • unsigned – a boolean, optional.
      • zerofill – Optional. If true, values will be stored as strings left-padded with zeros. Note that this does not effect the values returned by the underlying database API, which continue to be numeric.
      class sqlalchemy.dialects.mysql.NVARCHAR(length=None, **kwargs)

      Bases: sqlalchemy.dialects.mysql.base._StringType, sqlalchemy.types.NVARCHAR

      MySQL NVARCHAR type.

      For variable-length character data in the server’s configured national character set.

      __init__(length=None, **kwargs)

      Construct an NVARCHAR.

      Parameters:
      • length – Maximum data length, in characters.
      • binary – Optional, use the default binary collation for the national character set. This does not affect the type of data stored, use a BINARY type for binary data.
      • collation – Optional, request a particular collation. Must be compatible with the national character set.
      class sqlalchemy.dialects.mysql.REAL(precision=None, scale=None, asdecimal=True, **kw)

      Bases: sqlalchemy.dialects.mysql.base._FloatType, sqlalchemy.types.REAL

      MySQL REAL type.

      __init__(precision=None, scale=None, asdecimal=True, **kw)

      Construct a REAL.

      Parameters:
      • precision – Total digits in this number. If scale and precision are both None, values are stored to limits allowed by the server.
      • scale – The number of digits after the decimal point.
      • unsigned – a boolean, optional.
      • zerofill – Optional. If true, values will be stored as strings left-padded with zeros. Note that this does not effect the values returned by the underlying database API, which continue to be numeric.
      class sqlalchemy.dialects.mysql.SET(*values, **kw)

      Bases: sqlalchemy.dialects.mysql.base._StringType

      MySQL SET type.

      __init__(*values, **kw)

      Construct a SET.

      Example:

      Column('myset', MSSet("'foo'", "'bar'", "'baz'"))
      Parameters:
      • values – The range of valid values for this SET. Values will be used exactly as they appear when generating schemas. Strings must be quoted, as in the example above. Single-quotes are suggested for ANSI compatibility and are required for portability to servers with ANSI_QUOTES enabled.
      • charset – Optional, a column-level character set for this string value. Takes precedence to ‘ascii’ or ‘unicode’ short-hand.
      • collation – Optional, a column-level collation for this string value. Takes precedence to ‘binary’ short-hand.
      • ascii – Defaults to False: short-hand for the latin1 character set, generates ASCII in schema.
      • unicode – Defaults to False: short-hand for the ucs2 character set, generates UNICODE in schema.
      • binary – Defaults to False: short-hand, pick the binary collation type that matches the column’s character set. Generates BINARY in schema. This does not affect the type of data stored, only the collation of character data.
      class sqlalchemy.dialects.mysql.SMALLINT(display_width=None, **kw)

      Bases: sqlalchemy.dialects.mysql.base._IntegerType, sqlalchemy.types.SMALLINT

      MySQL SMALLINTEGER type.

      __init__(display_width=None, **kw)

      Construct a SMALLINTEGER.

      Parameters:
      • display_width – Optional, maximum display width for this number.
      • unsigned – a boolean, optional.
      • zerofill – Optional. If true, values will be stored as strings left-padded with zeros. Note that this does not effect the values returned by the underlying database API, which continue to be numeric.
      class sqlalchemy.dialects.mysql.TEXT(length=None, **kw)

      Bases: sqlalchemy.dialects.mysql.base._StringType, sqlalchemy.types.TEXT

      MySQL TEXT type, for text up to 2^16 characters.

      __init__(length=None, **kw)

      Construct a TEXT.

      Parameters:
      • length – Optional, if provided the server may optimize storage by substituting the smallest TEXT type sufficient to store length characters.
      • charset – Optional, a column-level character set for this string value. Takes precedence to ‘ascii’ or ‘unicode’ short-hand.
      • collation – Optional, a column-level collation for this string value. Takes precedence to ‘binary’ short-hand.
      • ascii – Defaults to False: short-hand for the latin1 character set, generates ASCII in schema.
      • unicode – Defaults to False: short-hand for the ucs2 character set, generates UNICODE in schema.
      • national – Optional. If true, use the server’s configured national character set.
      • binary – Defaults to False: short-hand, pick the binary collation type that matches the column’s character set. Generates BINARY in schema. This does not affect the type of data stored, only the collation of character data.
      class sqlalchemy.dialects.mysql.TIME(timezone=False, fsp=None)

      Bases: sqlalchemy.types.TIME

      MySQL TIME type.

      Recent versions of MySQL add support for fractional seconds precision. While the mysql.TIME type now supports this, note that many DBAPI drivers may not yet include support.

      __init__(timezone=False, fsp=None)

      Construct a MySQL TIME type.

      Parameters:
      • timezone – not used by the MySQL dialect.
      • fsp – fractional seconds precision value. MySQL 5.6 supports storage of fractional seconds; this parameter will be used when emitting DDL for the TIME type. Note that many DBAPI drivers may not yet have support for fractional seconds, however.

      New in version 0.8: The MySQL-specific TIME type as well as fractional seconds support.

      class sqlalchemy.dialects.mysql.TIMESTAMP(timezone=False)

      Bases: sqlalchemy.types.TIMESTAMP

      MySQL TIMESTAMP type.

      __init__(timezone=False)

      Construct a new DateTime.

      Parameters:timezone – boolean. If True, and supported by the

      backend, will produce ‘TIMESTAMP WITH TIMEZONE’. For backends that don’t support timezone aware timestamps, has no effect.

      class sqlalchemy.dialects.mysql.TINYBLOB(length=None)

      Bases: sqlalchemy.types._Binary

      MySQL TINYBLOB type, for binary data up to 2^8 bytes.

      class sqlalchemy.dialects.mysql.TINYINT(display_width=None, **kw)

      Bases: sqlalchemy.dialects.mysql.base._IntegerType

      MySQL TINYINT type.

      __init__(display_width=None, **kw)

      Construct a TINYINT.

      Parameters:
      • display_width – Optional, maximum display width for this number.
      • unsigned – a boolean, optional.
      • zerofill – Optional. If true, values will be stored as strings left-padded with zeros. Note that this does not effect the values returned by the underlying database API, which continue to be numeric.
      class sqlalchemy.dialects.mysql.TINYTEXT(**kwargs)

      Bases: sqlalchemy.dialects.mysql.base._StringType

      MySQL TINYTEXT type, for text up to 2^8 characters.

      __init__(**kwargs)

      Construct a TINYTEXT.

      Parameters:
      • charset – Optional, a column-level character set for this string value. Takes precedence to ‘ascii’ or ‘unicode’ short-hand.
      • collation – Optional, a column-level collation for this string value. Takes precedence to ‘binary’ short-hand.
      • ascii – Defaults to False: short-hand for the latin1 character set, generates ASCII in schema.
      • unicode – Defaults to False: short-hand for the ucs2 character set, generates UNICODE in schema.
      • national – Optional. If true, use the server’s configured national character set.
      • binary – Defaults to False: short-hand, pick the binary collation type that matches the column’s character set. Generates BINARY in schema. This does not affect the type of data stored, only the collation of character data.
      class sqlalchemy.dialects.mysql.VARBINARY(length=None)

      Bases: sqlalchemy.types._Binary

      The SQL VARBINARY type.

      class sqlalchemy.dialects.mysql.VARCHAR(length=None, **kwargs)

      Bases: sqlalchemy.dialects.mysql.base._StringType, sqlalchemy.types.VARCHAR

      MySQL VARCHAR type, for variable-length character data.

      __init__(length=None, **kwargs)

      Construct a VARCHAR.

      Parameters:
      • charset – Optional, a column-level character set for this string value. Takes precedence to ‘ascii’ or ‘unicode’ short-hand.
      • collation – Optional, a column-level collation for this string value. Takes precedence to ‘binary’ short-hand.
      • ascii – Defaults to False: short-hand for the latin1 character set, generates ASCII in schema.
      • unicode – Defaults to False: short-hand for the ucs2 character set, generates UNICODE in schema.
      • national – Optional. If true, use the server’s configured national character set.
      • binary – Defaults to False: short-hand, pick the binary collation type that matches the column’s character set. Generates BINARY in schema. This does not affect the type of data stored, only the collation of character data.
      class sqlalchemy.dialects.mysql.YEAR(display_width=None)

      Bases: sqlalchemy.types.TypeEngine

      MySQL YEAR type, for single byte storage of years 1901-2155.

      MySQL-Python

      Support for the MySQL database via the MySQL-Python driver.

      DBAPI

      Documentation and download information (if applicable) for MySQL-Python is available at: http://sourceforge.net/projects/mysql-python

      Connecting

      Connect String:

      mysql+mysqldb://<user>:<password>@<host>[:<port>]/<dbname>

      Unicode

      MySQLdb will accommodate Python unicode objects if the use_unicode=1 parameter, or the charset parameter, is passed as a connection argument.

      Without this setting, many MySQL server installations default to a latin1 encoding for client connections, which has the effect of all data being converted into latin1, even if you have utf8 or another character set configured on your tables and columns. With versions 4.1 and higher, you can change the connection character set either through server configuration or by including the charset parameter. The charset parameter as received by MySQL-Python also has the side-effect of enabling use_unicode=1:

      # set client encoding to utf8; all strings come back as unicode
      create_engine('mysql+mysqldb:///mydb?charset=utf8')

      Manually configuring use_unicode=0 will cause MySQL-python to return encoded strings:

      # set client encoding to utf8; all strings come back as utf8 str
      create_engine('mysql+mysqldb:///mydb?charset=utf8&use_unicode=0')

      Known Issues

      MySQL-python version 1.2.2 has a serious memory leak related to unicode conversion, a feature which is disabled via use_unicode=0. It is strongly advised to use the latest version of MySQL-Python.

      OurSQL

      Support for the MySQL database via the OurSQL driver.

      DBAPI

      Documentation and download information (if applicable) for OurSQL is available at: http://packages.python.org/oursql/

      Connecting

      Connect String:

      mysql+oursql://<user>:<password>@<host>[:<port>]/<dbname>

      Unicode

      oursql defaults to using utf8 as the connection charset, but other encodings may be used instead. Like the MySQL-Python driver, unicode support can be completely disabled:

      # oursql sets the connection charset to utf8 automatically; all strings come
      # back as utf8 str
      create_engine('mysql+oursql:///mydb?use_unicode=0')

      To not automatically use utf8 and instead use whatever the connection defaults to, there is a separate parameter:

      # use the default connection charset; all strings come back as unicode
      create_engine('mysql+oursql:///mydb?default_charset=1')
      
      # use latin1 as the connection charset; all strings come back as unicode
      create_engine('mysql+oursql:///mydb?charset=latin1')

      pymysql

      Support for the MySQL database via the PyMySQL driver.

      DBAPI

      Documentation and download information (if applicable) for PyMySQL is available at: http://code.google.com/p/pymysql/

      Connecting

      Connect String:

      mysql+pymysql://<username>:<password>@<host>/<dbname>[?<options>]

      MySQL-Python Compatibility

      The pymysql DBAPI is a pure Python port of the MySQL-python (MySQLdb) driver, and targets 100% compatibility. Most behavioral notes for MySQL-python apply to the pymysql driver as well.

      MySQL-Connector

      Support for the MySQL database via the MySQL Connector/Python driver.

      DBAPI

      Documentation and download information (if applicable) for MySQL Connector/Python is available at: https://launchpad.net/myconnpy

      Connecting

      Connect String:

      mysql+mysqlconnector://<user>:<password>@<host>[:<port>]/<dbname>

      cymysql

      Support for the MySQL database via the CyMySQL driver.

      DBAPI

      Documentation and download information (if applicable) for CyMySQL is available at: https://github.com/nakagami/CyMySQL

      Connecting

      Connect String:

      mysql+cymysql://<username>:<password>@<host>/<dbname>[?<options>]

      Google App Engine

      Support for the MySQL database via the Google Cloud SQL driver.

      This dialect is based primarily on the mysql.mysqldb dialect with minimal changes.

      New in version 0.7.8.

      DBAPI

      Documentation and download information (if applicable) for Google Cloud SQL is available at: https://developers.google.com/appengine/docs/python/cloud-sql/developers-guide

      Connecting

      Connect String:

      mysql+gaerdbms:///<dbname>?instance=<instancename>

      Pooling

      Google App Engine connections appear to be randomly recycled, so the dialect does not pool connections. The NullPool implementation is installed within the Engine by default.

      pyodbc

      Support for the MySQL database via the PyODBC driver.

      DBAPI

      Documentation and download information (if applicable) for PyODBC is available at: http://pypi.python.org/pypi/pyodbc/

      Connecting

      Connect String:

      mysql+pyodbc://<username>:<password>@<dsnname>

      Limitations

      The mysql-pyodbc dialect is subject to unresolved character encoding issues which exist within the current ODBC drivers available. (see http://code.google.com/p/pyodbc/issues/detail?id=25). Consider usage of OurSQL, MySQLdb, or MySQL-connector/Python.

      zxjdbc

      Support for the MySQL database via the zxjdbc for Jython driver.

      DBAPI

      Drivers for this database are available at: http://dev.mysql.com/downloads/connector/j/

      Connecting

      Connect String:

      mysql+zxjdbc://<user>:<password>@<hostname>[:<port>]/<database>

      Character Sets

      SQLAlchemy zxjdbc dialects pass unicode straight through to the zxjdbc/JDBC layer. To allow multiple character sets to be sent from the MySQL Connector/J JDBC driver, by default SQLAlchemy sets its characterEncoding connection property to UTF-8. It may be overriden via a create_engine URL parameter.

      SQLAlchemy-0.8.4/doc/dialects/oracle.html0000644000076500000240000015063312251147474020740 0ustar classicstaff00000000000000 Oracle — SQLAlchemy 0.8 Documentation

      SQLAlchemy 0.8 Documentation

      Release: 0.8.4 | Release Date: December 8, 2013

      Oracle

      Support for the Oracle database.

      DBAPI Support

      The following dialect/DBAPI options are available. Please refer to individual DBAPI sections for connect information.

      Connect Arguments

      The dialect supports several create_engine() arguments which affect the behavior of the dialect regardless of driver in use.

      • use_ansi - Use ANSI JOIN constructs (see the section on Oracle 8). Defaults to True. If False, Oracle-8 compatible constructs are used for joins.
      • optimize_limits - defaults to False. see the section on LIMIT/OFFSET.
      • use_binds_for_limits - defaults to True. see the section on LIMIT/OFFSET.

      Auto Increment Behavior

      SQLAlchemy Table objects which include integer primary keys are usually assumed to have “autoincrementing” behavior, meaning they can generate their own primary key values upon INSERT. Since Oracle has no “autoincrement” feature, SQLAlchemy relies upon sequences to produce these values. With the Oracle dialect, a sequence must always be explicitly specified to enable autoincrement. This is divergent with the majority of documentation examples which assume the usage of an autoincrement-capable database. To specify sequences, use the sqlalchemy.schema.Sequence object which is passed to a Column construct:

      t = Table('mytable', metadata,
            Column('id', Integer, Sequence('id_seq'), primary_key=True),
            Column(...), ...
      )

      This step is also required when using table reflection, i.e. autoload=True:

      t = Table('mytable', metadata,
            Column('id', Integer, Sequence('id_seq'), primary_key=True),
            autoload=True
      )

      Identifier Casing

      In Oracle, the data dictionary represents all case insensitive identifier names using UPPERCASE text. SQLAlchemy on the other hand considers an all-lower case identifier name to be case insensitive. The Oracle dialect converts all case insensitive identifiers to and from those two formats during schema level communication, such as reflection of tables and indexes. Using an UPPERCASE name on the SQLAlchemy side indicates a case sensitive identifier, and SQLAlchemy will quote the name - this will cause mismatches against data dictionary data received from Oracle, so unless identifier names have been truly created as case sensitive (i.e. using quoted names), all lowercase names should be used on the SQLAlchemy side.

      Unicode

      Changed in version 0.6: SQLAlchemy uses the “native unicode” mode provided as of cx_oracle 5. cx_oracle 5.0.2 or greater is recommended for support of NCLOB. If not using cx_oracle 5, the NLS_LANG environment variable needs to be set in order for the oracle client library to use proper encoding, such as “AMERICAN_AMERICA.UTF8”.

      Also note that Oracle supports unicode data through the NVARCHAR and NCLOB data types. When using the SQLAlchemy Unicode and UnicodeText types, these DDL types will be used within CREATE TABLE statements. Usage of VARCHAR2 and CLOB with unicode text still requires NLS_LANG to be set.

      LIMIT/OFFSET Support

      Oracle has no support for the LIMIT or OFFSET keywords. SQLAlchemy uses a wrapped subquery approach in conjunction with ROWNUM. The exact methodology is taken from http://www.oracle.com/technology/oramag/oracle/06-sep/o56asktom.html .

      There are two options which affect its behavior:

      • the “FIRST ROWS()” optimization keyword is not used by default. To enable the usage of this optimization directive, specify optimize_limits=True to create_engine().
      • the values passed for the limit/offset are sent as bound parameters. Some users have observed that Oracle produces a poor query plan when the values are sent as binds and not rendered literally. To render the limit/offset values literally within the SQL statement, specify use_binds_for_limits=False to create_engine().

      Some users have reported better performance when the entirely different approach of a window query is used, i.e. ROW_NUMBER() OVER (ORDER BY), to provide LIMIT/OFFSET (note that the majority of users don’t observe this). To suit this case the method used for LIMIT/OFFSET can be replaced entirely. See the recipe at http://www.sqlalchemy.org/trac/wiki/UsageRecipes/WindowFunctionsByDefault which installs a select compiler that overrides the generation of limit/offset with a window function.

      ON UPDATE CASCADE

      Oracle doesn’t have native ON UPDATE CASCADE functionality. A trigger based solution is available at http://asktom.oracle.com/tkyte/update_cascade/index.html .

      When using the SQLAlchemy ORM, the ORM has limited ability to manually issue cascading updates - specify ForeignKey objects using the “deferrable=True, initially=’deferred’” keyword arguments, and specify “passive_updates=False” on each relationship().

      Oracle 8 Compatibility

      When Oracle 8 is detected, the dialect internally configures itself to the following behaviors:

      • the use_ansi flag is set to False. This has the effect of converting all JOIN phrases into the WHERE clause, and in the case of LEFT OUTER JOIN makes use of Oracle’s (+) operator.
      • the NVARCHAR2 and NCLOB datatypes are no longer generated as DDL when the Unicode is used - VARCHAR2 and CLOB are issued instead. This because these types don’t seem to work correctly on Oracle 8 even though they are available. The NVARCHAR and NCLOB types will always generate NVARCHAR2 and NCLOB.
      • the “native unicode” mode is disabled when using cx_oracle, i.e. SQLAlchemy encodes all Python unicode objects to “string” before passing in as bind parameters.

      Oracle Data Types

      As with all SQLAlchemy dialects, all UPPERCASE types that are known to be valid with Oracle are importable from the top level dialect, whether they originate from sqlalchemy.types or from the local dialect:

      from sqlalchemy.dialects.oracle import \
                  BFILE, BLOB, CHAR, CLOB, DATE, DATETIME, \
                  DOUBLE_PRECISION, FLOAT, INTERVAL, LONG, NCLOB, \
                  NUMBER, NVARCHAR, NVARCHAR2, RAW, TIMESTAMP, VARCHAR, \
                  VARCHAR2

      Types which are specific to Oracle, or have Oracle-specific construction arguments, are as follows:

      class sqlalchemy.dialects.oracle.BFILE(length=None)

      Bases: sqlalchemy.types.LargeBinary

      __init__(length=None)

      Construct a LargeBinary type.

      Parameters:length – optional, a length for the column for use in DDL statements, for those BLOB types that accept a length (i.e. MySQL). It does not produce a small BINARY/VARBINARY type - use the BINARY/VARBINARY types specifically for those. May be safely omitted if no CREATE TABLE will be issued. Certain databases may require a length for use in DDL, and will raise an exception when the CREATE TABLE DDL is issued.
      class sqlalchemy.dialects.oracle.DOUBLE_PRECISION(precision=None, scale=None, asdecimal=None)

      Bases: sqlalchemy.types.Numeric

      class sqlalchemy.dialects.oracle.INTERVAL(day_precision=None, second_precision=None)

      Bases: sqlalchemy.types.TypeEngine

      __init__(day_precision=None, second_precision=None)

      Construct an INTERVAL.

      Note that only DAY TO SECOND intervals are currently supported. This is due to a lack of support for YEAR TO MONTH intervals within available DBAPIs (cx_oracle and zxjdbc).

      Parameters:
      • day_precision – the day precision value. this is the number of digits to store for the day field. Defaults to “2”
      • second_precision – the second precision value. this is the number of digits to store for the fractional seconds field. Defaults to “6”.
      class sqlalchemy.dialects.oracle.NCLOB(length=None, collation=None, convert_unicode=False, unicode_error=None, _warn_on_bytestring=False)

      Bases: sqlalchemy.types.Text

      __init__(length=None, collation=None, convert_unicode=False, unicode_error=None, _warn_on_bytestring=False)

      Create a string-holding type.

      Parameters:
      • length – optional, a length for the column for use in DDL and CAST expressions. May be safely omitted if no CREATE TABLE will be issued. Certain databases may require a length for use in DDL, and will raise an exception when the CREATE TABLE DDL is issued if a VARCHAR with no length is included. Whether the value is interpreted as bytes or characters is database specific.
      • collation

        Optional, a column-level collation for use in DDL and CAST expressions. Renders using the COLLATE keyword supported by SQLite, MySQL, and Postgresql. E.g.:

        >>> from sqlalchemy import cast, select, String
        >>> print select([cast('some string', String(collation='utf8'))])
        SELECT CAST(:param_1 AS VARCHAR COLLATE utf8) AS anon_1

        New in version 0.8: Added support for COLLATE to all string types.

      • convert_unicode

        When set to True, the String type will assume that input is to be passed as Python unicode objects, and results returned as Python unicode objects. If the DBAPI in use does not support Python unicode (which is fewer and fewer these days), SQLAlchemy will encode/decode the value, using the value of the encoding parameter passed to create_engine() as the encoding.

        When using a DBAPI that natively supports Python unicode objects, this flag generally does not need to be set. For columns that are explicitly intended to store non-ASCII data, the Unicode or UnicodeText types should be used regardless, which feature the same behavior of convert_unicode but also indicate an underlying column type that directly supports unicode, such as NVARCHAR.

        For the extremely rare case that Python unicode is to be encoded/decoded by SQLAlchemy on a backend that does natively support Python unicode, the value force can be passed here which will cause SQLAlchemy’s encode/decode services to be used unconditionally.

      • unicode_error – Optional, a method to use to handle Unicode conversion errors. Behaves like the errors keyword argument to the standard library’s string.decode() functions. This flag requires that convert_unicode is set to force - otherwise, SQLAlchemy is not guaranteed to handle the task of unicode conversion. Note that this flag adds significant performance overhead to row-fetching operations for backends that already return unicode objects natively (which most DBAPIs do). This flag should only be used as a last resort for reading strings from a column with varied or corrupted encodings.
      class sqlalchemy.dialects.oracle.NUMBER(precision=None, scale=None, asdecimal=None)

      Bases: sqlalchemy.types.Numeric, sqlalchemy.types.Integer

      class sqlalchemy.dialects.oracle.LONG(length=None, collation=None, convert_unicode=False, unicode_error=None, _warn_on_bytestring=False)

      Bases: sqlalchemy.types.Text

      __init__(length=None, collation=None, convert_unicode=False, unicode_error=None, _warn_on_bytestring=False)

      Create a string-holding type.

      Parameters:
      • length – optional, a length for the column for use in DDL and CAST expressions. May be safely omitted if no CREATE TABLE will be issued. Certain databases may require a length for use in DDL, and will raise an exception when the CREATE TABLE DDL is issued if a VARCHAR with no length is included. Whether the value is interpreted as bytes or characters is database specific.
      • collation

        Optional, a column-level collation for use in DDL and CAST expressions. Renders using the COLLATE keyword supported by SQLite, MySQL, and Postgresql. E.g.:

        >>> from sqlalchemy import cast, select, String
        >>> print select([cast('some string', String(collation='utf8'))])
        SELECT CAST(:param_1 AS VARCHAR COLLATE utf8) AS anon_1

        New in version 0.8: Added support for COLLATE to all string types.

      • convert_unicode

        When set to True, the String type will assume that input is to be passed as Python unicode objects, and results returned as Python unicode objects. If the DBAPI in use does not support Python unicode (which is fewer and fewer these days), SQLAlchemy will encode/decode the value, using the value of the encoding parameter passed to create_engine() as the encoding.

        When using a DBAPI that natively supports Python unicode objects, this flag generally does not need to be set. For columns that are explicitly intended to store non-ASCII data, the Unicode or UnicodeText types should be used regardless, which feature the same behavior of convert_unicode but also indicate an underlying column type that directly supports unicode, such as NVARCHAR.

        For the extremely rare case that Python unicode is to be encoded/decoded by SQLAlchemy on a backend that does natively support Python unicode, the value force can be passed here which will cause SQLAlchemy’s encode/decode services to be used unconditionally.

      • unicode_error – Optional, a method to use to handle Unicode conversion errors. Behaves like the errors keyword argument to the standard library’s string.decode() functions. This flag requires that convert_unicode is set to force - otherwise, SQLAlchemy is not guaranteed to handle the task of unicode conversion. Note that this flag adds significant performance overhead to row-fetching operations for backends that already return unicode objects natively (which most DBAPIs do). This flag should only be used as a last resort for reading strings from a column with varied or corrupted encodings.
      class sqlalchemy.dialects.oracle.RAW(length=None)

      Bases: sqlalchemy.types._Binary

      cx_Oracle

      Support for the Oracle database via the cx-Oracle driver.

      DBAPI

      Documentation and download information (if applicable) for cx-Oracle is available at: http://cx-oracle.sourceforge.net/

      Connecting

      Connect String:

      oracle+cx_oracle://user:pass@host:port/dbname[?key=value&key=value...]

      Additional Connect Arguments

      When connecting with dbname present, the host, port, and dbname tokens are converted to a TNS name using the cx_oracle makedsn() function. Otherwise, the host token is taken directly as a TNS name.

      Additional arguments which may be specified either as query string arguments on the URL, or as keyword arguments to create_engine() are:

      • allow_twophase - enable two-phase transactions. Defaults to True.

      • arraysize - set the cx_oracle.arraysize value on cursors, in SQLAlchemy it defaults to 50. See the section on “LOB Objects” below.

      • auto_convert_lobs - defaults to True, see the section on LOB objects.

      • auto_setinputsizes - the cx_oracle.setinputsizes() call is issued for all bind parameters. This is required for LOB datatypes but can be disabled to reduce overhead. Defaults to True. Specific types can be excluded from this process using the exclude_setinputsizes parameter.

      • exclude_setinputsizes - a tuple or list of string DBAPI type names to be excluded from the “auto setinputsizes” feature. The type names here must match DBAPI types that are found in the “cx_Oracle” module namespace, such as cx_Oracle.UNICODE, cx_Oracle.NCLOB, etc. Defaults to (STRING, UNICODE).

        New in version 0.8: specific DBAPI types can be excluded from the auto_setinputsizes feature via the exclude_setinputsizes attribute.

      • mode - This is given the string value of SYSDBA or SYSOPER, or alternatively an integer value. This value is only available as a URL query string argument.

      • threaded - enable multithreaded access to cx_oracle connections. Defaults to True. Note that this is the opposite default of the cx_Oracle DBAPI itself.

      Unicode

      cx_oracle 5 fully supports Python unicode objects. SQLAlchemy will pass all unicode strings directly to cx_oracle, and additionally uses an output handler so that all string based result values are returned as unicode as well. Generally, the NLS_LANG environment variable determines the nature of the encoding to be used.

      Note that this behavior is disabled when Oracle 8 is detected, as it has been observed that issues remain when passing Python unicodes to cx_oracle with Oracle 8.

      LOB Objects

      cx_oracle returns oracle LOBs using the cx_oracle.LOB object. SQLAlchemy converts these to strings so that the interface of the Binary type is consistent with that of other backends, and so that the linkage to a live cursor is not needed in scenarios like result.fetchmany() and result.fetchall(). This means that by default, LOB objects are fully fetched unconditionally by SQLAlchemy, and the linkage to a live cursor is broken.

      To disable this processing, pass auto_convert_lobs=False to create_engine().

      Two Phase Transaction Support

      Two Phase transactions are implemented using XA transactions, and are known to work in a rudimental fashion with recent versions of cx_Oracle as of SQLAlchemy 0.8.0b2, 0.7.10. However, the mechanism is not yet considered to be robust and should still be regarded as experimental.

      In particular, the cx_Oracle DBAPI as recently as 5.1.2 has a bug regarding two phase which prevents a particular DBAPI connection from being consistently usable in both prepared transactions as well as traditional DBAPI usage patterns; therefore once a particular connection is used via Connection.begin_prepared(), all subsequent usages of the underlying DBAPI connection must be within the context of prepared transactions.

      The default behavior of Engine is to maintain a pool of DBAPI connections. Therefore, due to the above glitch, a DBAPI connection that has been used in a two-phase operation, and is then returned to the pool, will not be usable in a non-two-phase context. To avoid this situation, the application can make one of several choices:

      • Disable connection pooling using NullPool
      • Ensure that the particular Engine in use is only used for two-phase operations. A Engine bound to an ORM Session which includes twophase=True will consistently use the two-phase transaction style.
      • For ad-hoc two-phase operations without disabling pooling, the DBAPI connection in use can be evicted from the connection pool using the Connection.detach method.

      Changed in version 0.8.0b2,0.7.10: Support for cx_oracle prepared transactions has been implemented and tested.

      Precision Numerics

      The SQLAlchemy dialect goes through a lot of steps to ensure that decimal numbers are sent and received with full accuracy. An “outputtypehandler” callable is associated with each cx_oracle connection object which detects numeric types and receives them as string values, instead of receiving a Python float directly, which is then passed to the Python Decimal constructor. The Numeric and Float types under the cx_oracle dialect are aware of this behavior, and will coerce the Decimal to float if the asdecimal flag is False (default on Float, optional on Numeric).

      Because the handler coerces to Decimal in all cases first, the feature can detract significantly from performance. If precision numerics aren’t required, the decimal handling can be disabled by passing the flag coerce_to_decimal=False to create_engine():

      engine = create_engine("oracle+cx_oracle://dsn",
                          coerce_to_decimal=False)

      New in version 0.7.6: Add the coerce_to_decimal flag.

      Another alternative to performance is to use the cdecimal library; see Numeric for additional notes.

      The handler attempts to use the “precision” and “scale” attributes of the result set column to best determine if subsequent incoming values should be received as Decimal as opposed to int (in which case no processing is added). There are several scenarios where OCI does not provide unambiguous data as to the numeric type, including some situations where individual rows may return a combination of floating point and integer values. Certain values for “precision” and “scale” have been observed to determine this scenario. When it occurs, the outputtypehandler receives as string and then passes off to a processing function which detects, for each returned value, if a decimal point is present, and if so converts to Decimal, otherwise to int. The intention is that simple int-based statements like “SELECT my_seq.nextval() FROM DUAL” continue to return ints and not Decimal objects, and that any kind of floating point value is received as a string so that there is no floating point loss of precision.

      The “decimal point is present” logic itself is also sensitive to locale. Under OCI, this is controlled by the NLS_LANG environment variable. Upon first connection, the dialect runs a test to determine the current “decimal” character, which can be a comma ”,” for european locales. From that point forward the outputtypehandler uses that character to represent a decimal point. Note that cx_oracle 5.0.3 or greater is required when dealing with numerics with locale settings that don’t use a period ”.” as the decimal character.

      Changed in version 0.6.6: The outputtypehandler uses a comma ”,” character to represent a decimal point.

      zxjdbc

      Support for the Oracle database via the zxJDBC for Jython driver.

      DBAPI

      Drivers for this database are available at: http://www.oracle.com/technology/software/tech/java/sqlj_jdbc/index.html.

      Connecting

      Connect String:

      oracle+zxjdbc://user:pass@host/dbname

      SQLAlchemy-0.8.4/doc/dialects/postgresql.html0000644000076500000240000041525212251147475021700 0ustar classicstaff00000000000000 PostgreSQL — SQLAlchemy 0.8 Documentation

      SQLAlchemy 0.8 Documentation

      Release: 0.8.4 | Release Date: December 8, 2013

      PostgreSQL

      Support for the PostgreSQL database.

      DBAPI Support

      The following dialect/DBAPI options are available. Please refer to individual DBAPI sections for connect information.

      Sequences/SERIAL

      PostgreSQL supports sequences, and SQLAlchemy uses these as the default means of creating new primary key values for integer-based primary key columns. When creating tables, SQLAlchemy will issue the SERIAL datatype for integer-based primary key columns, which generates a sequence and server side default corresponding to the column.

      To specify a specific named sequence to be used for primary key generation, use the Sequence() construct:

      Table('sometable', metadata,
              Column('id', Integer, Sequence('some_id_seq'), primary_key=True)
          )

      When SQLAlchemy issues a single INSERT statement, to fulfill the contract of having the “last insert identifier” available, a RETURNING clause is added to the INSERT statement which specifies the primary key columns should be returned after the statement completes. The RETURNING functionality only takes place if Postgresql 8.2 or later is in use. As a fallback approach, the sequence, whether specified explicitly or implicitly via SERIAL, is executed independently beforehand, the returned value to be used in the subsequent insert. Note that when an insert() construct is executed using “executemany” semantics, the “last inserted identifier” functionality does not apply; no RETURNING clause is emitted nor is the sequence pre-executed in this case.

      To force the usage of RETURNING by default off, specify the flag implicit_returning=False to create_engine().

      Transaction Isolation Level

      All Postgresql dialects support setting of transaction isolation level both via a dialect-specific parameter isolation_level accepted by create_engine(), as well as the isolation_level argument as passed to Connection.execution_options(). When using a non-psycopg2 dialect, this feature works by issuing the command SET SESSION CHARACTERISTICS AS TRANSACTION ISOLATION LEVEL <level> for each new connection.

      To set isolation level using create_engine():

      engine = create_engine(
                      "postgresql+pg8000://scott:tiger@localhost/test",
                      isolation_level="READ UNCOMMITTED"
                  )

      To set using per-connection execution options:

      connection = engine.connect()
      connection = connection.execution_options(isolation_level="READ COMMITTED")

      Valid values for isolation_level include:

      • READ COMMITTED
      • READ UNCOMMITTED
      • REPEATABLE READ
      • SERIALIZABLE

      The psycopg2 dialect also offers the special level AUTOCOMMIT. See Psycopg2 Transaction Isolation Level for details.

      Remote / Cross-Schema Table Introspection

      Tables can be introspected from any accessible schema, including inter-schema foreign key relationships. However, care must be taken when specifying the “schema” argument for a given Table, when the given schema is also present in PostgreSQL’s search_path variable for the current connection.

      If a FOREIGN KEY constraint reports that the remote table’s schema is within the current search_path, the “schema” attribute of the resulting Table will be set to None, unless the actual schema of the remote table matches that of the referencing table, and the “schema” argument was explicitly stated on the referencing table.

      The best practice here is to not use the schema argument on Table for any schemas that are present in search_path. search_path defaults to “public”, but care should be taken to inspect the actual value using:

      SHOW search_path;

      Changed in version 0.7.3: Prior to this version, cross-schema foreign keys when the schemas were also in the search_path could make an incorrect assumption if the schemas were explicitly stated on each Table.

      Background on PG’s search_path is at: http://www.postgresql.org/docs/9.0/static/ddl-schemas.html#DDL-SCHEMAS-PATH

      INSERT/UPDATE...RETURNING

      The dialect supports PG 8.2’s INSERT..RETURNING, UPDATE..RETURNING and DELETE..RETURNING syntaxes. INSERT..RETURNING is used by default for single-row INSERT statements in order to fetch newly generated primary key identifiers. To specify an explicit RETURNING clause, use the _UpdateBase.returning() method on a per-statement basis:

      # INSERT..RETURNING
      result = table.insert().returning(table.c.col1, table.c.col2).\
          values(name='foo')
      print result.fetchall()
      
      # UPDATE..RETURNING
      result = table.update().returning(table.c.col1, table.c.col2).\
          where(table.c.name=='foo').values(name='bar')
      print result.fetchall()
      
      # DELETE..RETURNING
      result = table.delete().returning(table.c.col1, table.c.col2).\
          where(table.c.name=='foo')
      print result.fetchall()

      FROM ONLY ...

      The dialect supports PostgreSQL’s ONLY keyword for targeting only a particular table in an inheritance hierarchy. This can be used to produce the SELECT ... FROM ONLY, UPDATE ONLY ..., and DELETE FROM ONLY ... syntaxes. It uses SQLAlchemy’s hints mechanism:

      # SELECT ... FROM ONLY ...
      result = table.select().with_hint(table, 'ONLY', 'postgresql')
      print result.fetchall()
      
      # UPDATE ONLY ...
      table.update(values=dict(foo='bar')).with_hint('ONLY',
                                                     dialect_name='postgresql')
      
      # DELETE FROM ONLY ...
      table.delete().with_hint('ONLY', dialect_name='postgresql')

      Postgresql-Specific Index Options

      Several extensions to the Index construct are available, specific to the PostgreSQL dialect.

      Partial Indexes

      Partial indexes add criterion to the index definition so that the index is applied to a subset of rows. These can be specified on Index using the postgresql_where keyword argument:

      Index('my_index', my_table.c.id, postgresql_where=tbl.c.value > 10)

      Operator Classes

      PostgreSQL allows the specification of an operator class for each column of an index (see http://www.postgresql.org/docs/8.3/interactive/indexes-opclass.html). The Index construct allows these to be specified via the postgresql_ops keyword argument:

      Index('my_index', my_table.c.id, my_table.c.data,
                              postgresql_ops={
                                  'data': 'text_pattern_ops',
                                  'id': 'int4_ops'
                              })

      New in version 0.7.2: postgresql_ops keyword argument to Index construct.

      Note that the keys in the postgresql_ops dictionary are the “key” name of the Column, i.e. the name used to access it from the .c collection of Table, which can be configured to be different than the actual name of the column as expressed in the database.

      Index Types

      PostgreSQL provides several index types: B-Tree, Hash, GiST, and GIN, as well as the ability for users to create their own (see http://www.postgresql.org/docs/8.3/static/indexes-types.html). These can be specified on Index using the postgresql_using keyword argument:

      Index('my_index', my_table.c.data, postgresql_using='gin')

      The value passed to the keyword argument will be simply passed through to the underlying CREATE INDEX command, so it must be a valid index type for your version of PostgreSQL.

      PostgreSQL Data Types

      As with all SQLAlchemy dialects, all UPPERCASE types that are known to be valid with Postgresql are importable from the top level dialect, whether they originate from sqlalchemy.types or from the local dialect:

      from sqlalchemy.dialects.postgresql import \
          ARRAY, BIGINT, BIT, BOOLEAN, BYTEA, CHAR, CIDR, DATE, \
          DOUBLE_PRECISION, ENUM, FLOAT, HSTORE, INET, INTEGER, \
          INTERVAL, MACADDR, NUMERIC, REAL, SMALLINT, TEXT, TIME, \
          TIMESTAMP, UUID, VARCHAR, INT4RANGE, INT8RANGE, NUMRANGE, \
          DATERANGE, TSRANGE, TSTZRANGE

      Types which are specific to PostgreSQL, or have PostgreSQL-specific construction arguments, are as follows:

      class sqlalchemy.dialects.postgresql.array(clauses, **kw)

      Bases: sqlalchemy.sql.expression.Tuple

      A Postgresql ARRAY literal.

      This is used to produce ARRAY literals in SQL expressions, e.g.:

      from sqlalchemy.dialects.postgresql import array
      from sqlalchemy.dialects import postgresql
      from sqlalchemy import select, func
      
      stmt = select([
                      array([1,2]) + array([3,4,5])
                  ])
      
      print stmt.compile(dialect=postgresql.dialect())

      Produces the SQL:

      SELECT ARRAY[%(param_1)s, %(param_2)s] ||
          ARRAY[%(param_3)s, %(param_4)s, %(param_5)s]) AS anon_1

      An instance of array will always have the datatype ARRAY. The “inner” type of the array is inferred from the values present, unless the type_ keyword argument is passed:

      array(['foo', 'bar'], type_=CHAR)

      New in version 0.8: Added the array literal type.

      See also:

      postgresql.ARRAY

      class sqlalchemy.dialects.postgresql.ARRAY(item_type, as_tuple=False, dimensions=None)

      Bases: sqlalchemy.types.Concatenable, sqlalchemy.types.TypeEngine

      Postgresql ARRAY type.

      Represents values as Python lists.

      An ARRAY type is constructed given the “type” of element:

      mytable = Table("mytable", metadata,
              Column("data", ARRAY(Integer))
          )

      The above type represents an N-dimensional array, meaning Postgresql will interpret values with any number of dimensions automatically. To produce an INSERT construct that passes in a 1-dimensional array of integers:

      connection.execute(
              mytable.insert(),
              data=[1,2,3]
      )

      The ARRAY type can be constructed given a fixed number of dimensions:

      mytable = Table("mytable", metadata,
              Column("data", ARRAY(Integer, dimensions=2))
          )

      This has the effect of the ARRAY type specifying that number of bracketed blocks when a Table is used in a CREATE TABLE statement, or when the type is used within a expression.cast() construct; it also causes the bind parameter and result set processing of the type to optimize itself to expect exactly that number of dimensions. Note that Postgresql itself still allows N dimensions with such a type.

      SQL expressions of type ARRAY have support for “index” and “slice” behavior. The Python [] operator works normally here, given integer indexes or slices. Note that Postgresql arrays default to 1-based indexing. The operator produces binary expression constructs which will produce the appropriate SQL, both for SELECT statements:

      select([mytable.c.data[5], mytable.c.data[2:7]])

      as well as UPDATE statements when the Update.values() method is used:

      mytable.update().values({
          mytable.c.data[5]: 7,
          mytable.c.data[2:7]: [1, 2, 3]
      })

      ARRAY provides special methods for containment operations, e.g.:

      mytable.c.data.contains([1, 2])

      For a full list of special methods see ARRAY.Comparator.

      New in version 0.8: Added support for index and slice operations to the ARRAY type, including support for UPDATE statements, and special array containment operations.

      The ARRAY type may not be supported on all DBAPIs. It is known to work on psycopg2 and not pg8000.

      See also:

      postgresql.array - produce a literal array value.

      class Comparator(expr)

      Bases: sqlalchemy.types.Comparator

      Define comparison operations for ARRAY.

      all(other, operator=<built-in function eq>)

      Return other operator ALL (array) clause.

      Argument places are switched, because ALL requires array expression to be on the right hand-side.

      E.g.:

      from sqlalchemy.sql import operators
      
      conn.execute(
          select([table.c.data]).where(
                  table.c.data.all(7, operator=operators.lt)
              )
      )
      Parameters:
      • other – expression to be compared
      • operator – an operator object from the sqlalchemy.sql.operators package, defaults to operators.eq().
      any(other, operator=<built-in function eq>)

      Return other operator ANY (array) clause.

      Argument places are switched, because ANY requires array expression to be on the right hand-side.

      E.g.:

      from sqlalchemy.sql import operators
      
      conn.execute(
          select([table.c.data]).where(
                  table.c.data.any(7, operator=operators.lt)
              )
      )
      Parameters:
      • other – expression to be compared
      • operator – an operator object from the sqlalchemy.sql.operators package, defaults to operators.eq().
      contained_by(other)

      Boolean expression. Test if elements are a proper subset of the elements of the argument array expression.

      contains(other, **kwargs)

      Boolean expression. Test if elements are a superset of the elements of the argument array expression.

      overlap(other)

      Boolean expression. Test if array has elements in common with an argument array expression.

      ARRAY.__init__(item_type, as_tuple=False, dimensions=None)

      Construct an ARRAY.

      E.g.:

      Column('myarray', ARRAY(Integer))

      Arguments are:

      Parameters:
      • item_type – The data type of items of this array. Note that dimensionality is irrelevant here, so multi-dimensional arrays like INTEGER[][], are constructed as ARRAY(Integer), not as ARRAY(ARRAY(Integer)) or such.
      • as_tuple=False – Specify whether return results should be converted to tuples from lists. DBAPIs such as psycopg2 return lists by default. When tuples are returned, the results are hashable.
      • dimensions – if non-None, the ARRAY will assume a fixed number of dimensions. This will cause the DDL emitted for this ARRAY to include the exact number of bracket clauses [], and will also optimize the performance of the type overall. Note that PG arrays are always implicitly “non-dimensioned”, meaning they can store any number of dimensions no matter how they were declared.
      class sqlalchemy.dialects.postgresql.Any(left, right, operator=<built-in function eq>)

      Bases: sqlalchemy.sql.expression.ColumnElement

      Represent the clause left operator ANY (right). right must be an array expression.

      See also

      postgresql.ARRAY

      postgresql.ARRAY.Comparator.any() - ARRAY-bound method

      class sqlalchemy.dialects.postgresql.All(left, right, operator=<built-in function eq>)

      Bases: sqlalchemy.sql.expression.ColumnElement

      Represent the clause left operator ALL (right). right must be an array expression.

      See also

      postgresql.ARRAY

      postgresql.ARRAY.Comparator.all() - ARRAY-bound method

      class sqlalchemy.dialects.postgresql.BIT(length=None, varying=False)

      Bases: sqlalchemy.types.TypeEngine

      class sqlalchemy.dialects.postgresql.BYTEA(length=None)

      Bases: sqlalchemy.types.LargeBinary

      __init__(length=None)

      Construct a LargeBinary type.

      Parameters:length – optional, a length for the column for use in DDL statements, for those BLOB types that accept a length (i.e. MySQL). It does not produce a small BINARY/VARBINARY type - use the BINARY/VARBINARY types specifically for those. May be safely omitted if no CREATE TABLE will be issued. Certain databases may require a length for use in DDL, and will raise an exception when the CREATE TABLE DDL is issued.
      class sqlalchemy.dialects.postgresql.CIDR(*args, **kwargs)

      Bases: sqlalchemy.types.TypeEngine

      __init__(*args, **kwargs)

      Support implementations that were passing arguments

      class sqlalchemy.dialects.postgresql.DOUBLE_PRECISION(precision=None, asdecimal=False, **kwargs)

      Bases: sqlalchemy.types.Float

      __init__(precision=None, asdecimal=False, **kwargs)

      Construct a Float.

      Parameters:
      • precision – the numeric precision for use in DDL CREATE TABLE.
      • asdecimal – the same flag as that of Numeric, but defaults to False. Note that setting this flag to True results in floating point conversion.
      • **kwargs – deprecated. Additional arguments here are ignored by the default Float type. For database specific floats that support additional arguments, see that dialect’s documentation for details, such as sqlalchemy.dialects.mysql.FLOAT.
      class sqlalchemy.dialects.postgresql.ENUM(*enums, **kw)

      Bases: sqlalchemy.types.Enum

      Postgresql ENUM type.

      This is a subclass of types.Enum which includes support for PG’s CREATE TYPE.

      ENUM is used automatically when using the types.Enum type on PG assuming the native_enum is left as True. However, the ENUM class can also be instantiated directly in order to access some additional Postgresql-specific options, namely finer control over whether or not CREATE TYPE should be emitted.

      Note that both types.Enum as well as ENUM feature create/drop methods; the base types.Enum type ultimately delegates to the create() and drop() methods present here.

      __init__(*enums, **kw)

      Construct an ENUM.

      Arguments are the same as that of types.Enum, but also including the following parameters.

      Parameters:create_type

      Defaults to True. Indicates that CREATE TYPE should be emitted, after optionally checking for the presence of the type, when the parent table is being created; and additionally that DROP TYPE is called when the table is dropped. When False, no check will be performed and no CREATE TYPE or DROP TYPE is emitted, unless create() or drop() are called directly. Setting to False is helpful when invoking a creation scheme to a SQL file without access to the actual database - the create() and drop() methods can be used to emit SQL to a target bind.

      New in version 0.7.4.

      create(bind=None, checkfirst=True)

      Emit CREATE TYPE for this ENUM.

      If the underlying dialect does not support Postgresql CREATE TYPE, no action is taken.

      Parameters:
      • bind – a connectable Engine, Connection, or similar object to emit SQL.
      • checkfirst – if True, a query against the PG catalog will be first performed to see if the type does not exist already before creating.
      drop(bind=None, checkfirst=True)

      Emit DROP TYPE for this ENUM.

      If the underlying dialect does not support Postgresql DROP TYPE, no action is taken.

      Parameters:
      • bind – a connectable Engine, Connection, or similar object to emit SQL.
      • checkfirst – if True, a query against the PG catalog will be first performed to see if the type actually exists before dropping.
      class sqlalchemy.dialects.postgresql.HSTORE(*args, **kwargs)

      Bases: sqlalchemy.types.Concatenable, sqlalchemy.types.TypeEngine

      Represent the Postgresql HSTORE type.

      The HSTORE type stores dictionaries containing strings, e.g.:

      data_table = Table('data_table', metadata,
          Column('id', Integer, primary_key=True),
          Column('data', HSTORE)
      )
      
      with engine.connect() as conn:
          conn.execute(
              data_table.insert(),
              data = {"key1": "value1", "key2": "value2"}
          )

      HSTORE provides for a wide range of operations, including:

      • Index operations:

        data_table.c.data['some key'] == 'some value'
      • Containment operations:

        data_table.c.data.has_key('some key')
        
        data_table.c.data.has_all(['one', 'two', 'three'])
      • Concatenation:

        data_table.c.data + {"k1": "v1"}

      For a full list of special methods see HSTORE.comparator_factory.

      For usage with the SQLAlchemy ORM, it may be desirable to combine the usage of HSTORE with MutableDict dictionary now part of the sqlalchemy.ext.mutable extension. This extension will allow “in-place” changes to the dictionary, e.g. addition of new keys or replacement/removal of existing keys to/from the current dictionary, to produce events which will be detected by the unit of work:

      from sqlalchemy.ext.mutable import MutableDict
      
      class MyClass(Base):
          __tablename__ = 'data_table'
      
          id = Column(Integer, primary_key=True)
          data = Column(MutableDict.as_mutable(HSTORE))
      
      my_object = session.query(MyClass).one()
      
      # in-place mutation, requires Mutable extension
      # in order for the ORM to detect
      my_object.data['some_key'] = 'some value'
      
      session.commit()

      When the sqlalchemy.ext.mutable extension is not used, the ORM will not be alerted to any changes to the contents of an existing dictionary, unless that dictionary value is re-assigned to the HSTORE-attribute itself, thus generating a change event.

      New in version 0.8.

      See also

      hstore - render the Postgresql hstore() function.

      class comparator_factory(expr)

      Bases: sqlalchemy.types.Comparator

      Define comparison operations for HSTORE.

      array()

      Text array expression. Returns array of alternating keys and values.

      contained_by(other)

      Boolean expression. Test if keys are a proper subset of the keys of the argument hstore expression.

      contains(other, **kwargs)

      Boolean expression. Test if keys are a superset of the keys of the argument hstore expression.

      defined(key)

      Boolean expression. Test for presence of a non-NULL value for the key. Note that the key may be a SQLA expression.

      delete(key)

      HStore expression. Returns the contents of this hstore with the given key deleted. Note that the key may be a SQLA expression.

      has_all(other)

      Boolean expression. Test for presence of all keys in the PG array.

      has_any(other)

      Boolean expression. Test for presence of any key in the PG array.

      has_key(other)

      Boolean expression. Test for presence of a key. Note that the key may be a SQLA expression.

      keys()

      Text array expression. Returns array of keys.

      matrix()

      Text array expression. Returns array of [key, value] pairs.

      slice(array)

      HStore expression. Returns a subset of an hstore defined by array of keys.

      vals()

      Text array expression. Returns array of values.

      class sqlalchemy.dialects.postgresql.hstore(*args, **kwargs)

      Bases: sqlalchemy.sql.functions.GenericFunction

      Construct an hstore value within a SQL expression using the Postgresql hstore() function.

      The hstore function accepts one or two arguments as described in the Postgresql documentation.

      E.g.:

      from sqlalchemy.dialects.postgresql import array, hstore
      
      select([hstore('key1', 'value1')])
      
      select([
              hstore(
                  array(['key1', 'key2', 'key3']),
                  array(['value1', 'value2', 'value3'])
              )
          ])

      New in version 0.8.

      See also

      HSTORE - the Postgresql HSTORE datatype.

      type

      alias of HSTORE

      class sqlalchemy.dialects.postgresql.INET(*args, **kwargs)

      Bases: sqlalchemy.types.TypeEngine

      __init__(*args, **kwargs)

      Support implementations that were passing arguments

      class sqlalchemy.dialects.postgresql.INTERVAL(precision=None)

      Bases: sqlalchemy.types.TypeEngine

      Postgresql INTERVAL type.

      The INTERVAL type may not be supported on all DBAPIs. It is known to work on psycopg2 and not pg8000 or zxjdbc.

      class sqlalchemy.dialects.postgresql.MACADDR(*args, **kwargs)

      Bases: sqlalchemy.types.TypeEngine

      __init__(*args, **kwargs)

      Support implementations that were passing arguments

      class sqlalchemy.dialects.postgresql.REAL(precision=None, asdecimal=False, **kwargs)

      Bases: sqlalchemy.types.Float

      The SQL REAL type.

      __init__(precision=None, asdecimal=False, **kwargs)

      Construct a Float.

      Parameters:
      • precision – the numeric precision for use in DDL CREATE TABLE.
      • asdecimal – the same flag as that of Numeric, but defaults to False. Note that setting this flag to True results in floating point conversion.
      • **kwargs – deprecated. Additional arguments here are ignored by the default Float type. For database specific floats that support additional arguments, see that dialect’s documentation for details, such as sqlalchemy.dialects.mysql.FLOAT.
      class sqlalchemy.dialects.postgresql.UUID(as_uuid=False)

      Bases: sqlalchemy.types.TypeEngine

      Postgresql UUID type.

      Represents the UUID column type, interpreting data either as natively returned by the DBAPI or as Python uuid objects.

      The UUID type may not be supported on all DBAPIs. It is known to work on psycopg2 and not pg8000.

      __init__(as_uuid=False)

      Construct a UUID type.

      Parameters:as_uuid=False – if True, values will be interpreted as Python uuid objects, converting to/from string via the DBAPI.

      Range Types

      The new range column types founds in PostgreSQL 9.2 onwards are catered for by the following types:

      class sqlalchemy.dialects.postgresql.INT4RANGE(*args, **kwargs)

      Bases: sqlalchemy.dialects.postgresql.ranges.RangeOperators, sqlalchemy.types.TypeEngine

      Represent the Postgresql INT4RANGE type.

      New in version 0.8.2.

      class sqlalchemy.dialects.postgresql.INT8RANGE(*args, **kwargs)

      Bases: sqlalchemy.dialects.postgresql.ranges.RangeOperators, sqlalchemy.types.TypeEngine

      Represent the Postgresql INT8RANGE type.

      New in version 0.8.2.

      class sqlalchemy.dialects.postgresql.NUMRANGE(*args, **kwargs)

      Bases: sqlalchemy.dialects.postgresql.ranges.RangeOperators, sqlalchemy.types.TypeEngine

      Represent the Postgresql NUMRANGE type.

      New in version 0.8.2.

      class sqlalchemy.dialects.postgresql.DATERANGE(*args, **kwargs)

      Bases: sqlalchemy.dialects.postgresql.ranges.RangeOperators, sqlalchemy.types.TypeEngine

      Represent the Postgresql DATERANGE type.

      New in version 0.8.2.

      class sqlalchemy.dialects.postgresql.TSRANGE(*args, **kwargs)

      Bases: sqlalchemy.dialects.postgresql.ranges.RangeOperators, sqlalchemy.types.TypeEngine

      Represent the Postgresql TSRANGE type.

      New in version 0.8.2.

      class sqlalchemy.dialects.postgresql.TSTZRANGE(*args, **kwargs)

      Bases: sqlalchemy.dialects.postgresql.ranges.RangeOperators, sqlalchemy.types.TypeEngine

      Represent the Postgresql TSTZRANGE type.

      New in version 0.8.2.

      The types above get most of their functionality from the following mixin:

      class sqlalchemy.dialects.postgresql.ranges.RangeOperators

      This mixin provides functionality for the Range Operators listed in Table 9-44 of the postgres documentation for Range Functions and Operators. It is used by all the range types provided in the postgres dialect and can likely be used for any range types you create yourself.

      No extra support is provided for the Range Functions listed in Table 9-45 of the postgres documentation. For these, the normal func() object should be used.

      New in version 0.8.2: Support for Postgresql RANGE operations.

      class comparator_factory(expr)

      Bases: sqlalchemy.types.Comparator

      Define comparison operations for range types.

      __ne__(other)

      Boolean expression. Returns true if two ranges are not equal

      adjacent_to(other)

      Boolean expression. Returns true if the range in the column is adjacent to the range in the operand.

      contained_by(other)

      Boolean expression. Returns true if the column is contained within the right hand operand.

      contains(other, **kw)

      Boolean expression. Returns true if the right hand operand, which can be an element or a range, is contained within the column.

      not_extend_left_of(other)

      Boolean expression. Returns true if the range in the column does not extend left of the range in the operand.

      not_extend_right_of(other)

      Boolean expression. Returns true if the range in the column does not extend right of the range in the operand.

      overlaps(other)

      Boolean expression. Returns true if the column overlaps (has points in common with) the right hand operand.

      strictly_left_of(other)

      Boolean expression. Returns true if the column is strictly left of the right hand operand.

      strictly_right_of(other)

      Boolean expression. Returns true if the column is strictly right of the right hand operand.

      Warning

      The range type DDL support should work with any Postgres DBAPI driver, however the data types returned may vary. If you are using psycopg2, it’s recommended to upgrade to version 2.5 or later before using these column types.

      PostgreSQL Constraint Types

      SQLAlchemy supports Postgresql EXCLUDE constraints via the ExcludeConstraint class:

      class sqlalchemy.dialects.postgresql.ExcludeConstraint(*elements, **kw)

      Bases: sqlalchemy.schema.ColumnCollectionConstraint

      A table-level EXCLUDE constraint.

      Defines an EXCLUDE constraint as described in the postgres documentation.

      __init__(*elements, **kw)
      Parameters:
      • *elements – A sequence of two tuples of the form (column, operator) where column must be a column name or Column object and operator must be a string containing the operator to use.
      • name – Optional, the in-database name of this constraint.
      • deferrable – Optional bool. If set, emit DEFERRABLE or NOT DEFERRABLE when issuing DDL for this constraint.
      • initially – Optional string. If set, emit INITIALLY <value> when issuing DDL for this constraint.
      • using – Optional string. If set, emit USING <index_method> when issuing DDL for this constraint. Defaults to ‘gist’.
      • where – Optional string. If set, emit WHERE <predicate> when issuing DDL for this constraint.

      For example:

      from sqlalchemy.dialects.postgresql import ExcludeConstraint, TSRANGE
      
      class RoomBookings(Base):
      
          room = Column(Integer(), primary_key=True)
          during = Column(TSRANGE())
      
          __table_args__ = (
              ExcludeConstraint(('room', '='), ('during', '&&')),
          )

      psycopg2

      Support for the PostgreSQL database via the psycopg2 driver.

      DBAPI

      Documentation and download information (if applicable) for psycopg2 is available at: http://pypi.python.org/pypi/psycopg2/

      Connecting

      Connect String:

      postgresql+psycopg2://user:password@host:port/dbname[?key=value&key=value...]

      psycopg2 Connect Arguments

      psycopg2-specific keyword arguments which are accepted by create_engine() are:

      • server_side_cursors: Enable the usage of “server side cursors” for SQL statements which support this feature. What this essentially means from a psycopg2 point of view is that the cursor is created using a name, e.g. connection.cursor('some name'), which has the effect that result rows are not immediately pre-fetched and buffered after statement execution, but are instead left on the server and only retrieved as needed. SQLAlchemy’s ResultProxy uses special row-buffering behavior when this feature is enabled, such that groups of 100 rows at a time are fetched over the wire to reduce conversational overhead. Note that the stream_results=True execution option is a more targeted way of enabling this mode on a per-execution basis.
      • use_native_unicode: Enable the usage of Psycopg2 “native unicode” mode per connection. True by default.
      • isolation_level: This option, available for all Posgtresql dialects, includes the AUTOCOMMIT isolation level when using the psycopg2 dialect. See Psycopg2 Transaction Isolation Level.

      Unix Domain Connections

      psycopg2 supports connecting via Unix domain connections. When the host portion of the URL is omitted, SQLAlchemy passes None to psycopg2, which specifies Unix-domain communication rather than TCP/IP communication:

      create_engine("postgresql+psycopg2://user:password@/dbname")

      By default, the socket file used is to connect to a Unix-domain socket in /tmp, or whatever socket directory was specified when PostgreSQL was built. This value can be overridden by passing a pathname to psycopg2, using host as an additional keyword argument:

      create_engine("postgresql+psycopg2://user:password@/dbname?host=/var/lib/postgresql")

      See also:

      PQconnectdbParams

      Per-Statement/Connection Execution Options

      The following DBAPI-specific options are respected when used with Connection.execution_options(), Executable.execution_options(), Query.execution_options(), in addition to those not specific to DBAPIs:

      • isolation_level - Set the transaction isolation level for the lifespan of a Connection (can only be set on a connection, not a statement or query). See Psycopg2 Transaction Isolation Level.
      • stream_results - Enable or disable usage of psycopg2 server side cursors - this feature makes use of “named” cursors in combination with special result handling methods so that result rows are not fully buffered. If None or not set, the server_side_cursors option of the Engine is used.

      Unicode

      By default, the psycopg2 driver uses the psycopg2.extensions.UNICODE extension, such that the DBAPI receives and returns all strings as Python Unicode objects directly - SQLAlchemy passes these values through without change. Psycopg2 here will encode/decode string values based on the current “client encoding” setting; by default this is the value in the postgresql.conf file, which often defaults to SQL_ASCII. Typically, this can be changed to utf-8, as a more useful default:

      #client_encoding = sql_ascii # actually, defaults to database
                                   # encoding
      client_encoding = utf8

      A second way to affect the client encoding is to set it within Psycopg2 locally. SQLAlchemy will call psycopg2’s set_client_encoding() method (see: http://initd.org/psycopg/docs/connection.html#connection.set_client_encoding) on all new connections based on the value passed to create_engine() using the client_encoding parameter:

      engine = create_engine("postgresql://user:pass@host/dbname", client_encoding='utf8')

      This overrides the encoding specified in the Postgresql client configuration.

      New in version 0.7.3: The psycopg2-specific client_encoding parameter to create_engine().

      SQLAlchemy can also be instructed to skip the usage of the psycopg2 UNICODE extension and to instead utilize it’s own unicode encode/decode services, which are normally reserved only for those DBAPIs that don’t fully support unicode directly. Passing use_native_unicode=False to create_engine() will disable usage of psycopg2.extensions.UNICODE. SQLAlchemy will instead encode data itself into Python bytestrings on the way in and coerce from bytes on the way back, using the value of the create_engine() encoding parameter, which defaults to utf-8. SQLAlchemy’s own unicode encode/decode functionality is steadily becoming obsolete as more DBAPIs support unicode fully along with the approach of Python 3; in modern usage psycopg2 should be relied upon to handle unicode.

      Transactions

      The psycopg2 dialect fully supports SAVEPOINT and two-phase commit operations.

      Psycopg2 Transaction Isolation Level

      As discussed in Transaction Isolation Level, all Postgresql dialects support setting of transaction isolation level both via the isolation_level parameter passed to create_engine(), as well as the isolation_level argument used by Connection.execution_options(). When using the psycopg2 dialect, these options make use of psycopg2’s set_isolation_level() connection method, rather than emitting a Postgresql directive; this is because psycopg2’s API-level setting is always emitted at the start of each transaction in any case.

      The psycopg2 dialect supports these constants for isolation level:

      • READ COMMITTED
      • READ UNCOMMITTED
      • REPEATABLE READ
      • SERIALIZABLE
      • AUTOCOMMIT

      New in version 0.8.2: support for AUTOCOMMIT isolation level when using psycopg2.

      NOTICE logging

      The psycopg2 dialect will log Postgresql NOTICE messages via the sqlalchemy.dialects.postgresql logger:

      import logging
      logging.getLogger('sqlalchemy.dialects.postgresql').setLevel(logging.INFO)

      HSTORE type

      The psycopg2 dialect will make use of the psycopg2.extensions.register_hstore() extension when using the HSTORE type. This replaces SQLAlchemy’s pure-Python HSTORE coercion which takes effect for other DBAPIs.

      py-postgresql

      Support for the PostgreSQL database via the py-postgresql driver.

      DBAPI

      Documentation and download information (if applicable) for py-postgresql is available at: http://python.projects.pgfoundry.org/

      Connecting

      Connect String:

      postgresql+pypostgresql://user:password@host:port/dbname[?key=value&key=value...]

      pg8000

      Support for the PostgreSQL database via the pg8000 driver.

      DBAPI

      Documentation and download information (if applicable) for pg8000 is available at: http://pybrary.net/pg8000/

      Connecting

      Connect String:

      postgresql+pg8000://user:password@host:port/dbname[?key=value&key=value...]

      Unicode

      pg8000 requires that the postgresql client encoding be configured in the postgresql.conf file in order to use encodings other than ascii. Set this value to the same value as the “encoding” parameter on create_engine(), usually “utf-8”.

      Interval

      Passing data from/to the Interval type is not supported as of yet.

      zxjdbc

      Support for the PostgreSQL database via the zxJDBC for Jython driver.

      DBAPI

      Drivers for this database are available at: http://jdbc.postgresql.org/

      Connecting

      Connect String:

      postgresql+zxjdbc://scott:tiger@localhost/db

      SQLAlchemy-0.8.4/doc/dialects/sqlite.html0000644000076500000240000014125512251147475020775 0ustar classicstaff00000000000000 SQLite — SQLAlchemy 0.8 Documentation

      SQLAlchemy 0.8 Documentation

      Release: 0.8.4 | Release Date: December 8, 2013

      SQLite

      Support for the SQLite database.

      DBAPI Support

      The following dialect/DBAPI options are available. Please refer to individual DBAPI sections for connect information.

      Date and Time Types

      SQLite does not have built-in DATE, TIME, or DATETIME types, and pysqlite does not provide out of the box functionality for translating values between Python datetime objects and a SQLite-supported format. SQLAlchemy’s own DateTime and related types provide date formatting and parsing functionality when SQlite is used. The implementation classes are DATETIME, DATE and TIME. These types represent dates and times as ISO formatted strings, which also nicely support ordering. There’s no reliance on typical “libc” internals for these functions so historical dates are fully supported.

      Auto Incrementing Behavior

      Background on SQLite’s autoincrement is at: http://sqlite.org/autoinc.html

      Two things to note:

      • The AUTOINCREMENT keyword is not required for SQLite tables to generate primary key values automatically. AUTOINCREMENT only means that the algorithm used to generate ROWID values should be slightly different.
      • SQLite does not generate primary key (i.e. ROWID) values, even for one column, if the table has a composite (i.e. multi-column) primary key. This is regardless of the AUTOINCREMENT keyword being present or not.

      To specifically render the AUTOINCREMENT keyword on the primary key column when rendering DDL, add the flag sqlite_autoincrement=True to the Table construct:

      Table('sometable', metadata,
              Column('id', Integer, primary_key=True),
              sqlite_autoincrement=True)

      Transaction Isolation Level

      create_engine() accepts an isolation_level parameter which results in the command PRAGMA read_uncommitted <level> being invoked for every new connection. Valid values for this parameter are SERIALIZABLE and READ UNCOMMITTED corresponding to a value of 0 and 1, respectively. See the section Serializable Transaction Isolation for an important workaround when using serializable isolation with Pysqlite.

      Database Locking Behavior / Concurrency

      Note that SQLite is not designed for a high level of concurrency. The database itself, being a file, is locked completely during write operations and within transactions, meaning exactly one connection has exclusive access to the database during this period - all other connections will be blocked during this time.

      The Python DBAPI specification also calls for a connection model that is always in a transaction; there is no BEGIN method, only commit and rollback. This implies that a SQLite DBAPI driver would technically allow only serialized access to a particular database file at all times. The pysqlite driver attempts to ameliorate this by deferring the actual BEGIN statement until the first DML (INSERT, UPDATE, or DELETE) is received within a transaction. While this breaks serializable isolation, it at least delays the exclusive locking inherent in SQLite’s design.

      SQLAlchemy’s default mode of usage with the ORM is known as “autocommit=False”, which means the moment the Session begins to be used, a transaction is begun. As the Session is used, the autoflush feature, also on by default, will flush out pending changes to the database before each query. The effect of this is that a Session used in its default mode will often emit DML early on, long before the transaction is actually committed. This again will have the effect of serializing access to the SQLite database. If highly concurrent reads are desired against the SQLite database, it is advised that the autoflush feature be disabled, and potentially even that autocommit be re-enabled, which has the effect of each SQL statement and flush committing changes immediately.

      For more information on SQLite’s lack of concurrency by design, please see Situations Where Another RDBMS May Work Better - High Concurrency near the bottom of the page.

      Foreign Key Support

      SQLite supports FOREIGN KEY syntax when emitting CREATE statements for tables, however by default these constraints have no effect on the operation of the table.

      Constraint checking on SQLite has three prerequisites:

      • At least version 3.6.19 of SQLite must be in use
      • The SQLite libary must be compiled without the SQLITE_OMIT_FOREIGN_KEY or SQLITE_OMIT_TRIGGER symbols enabled.
      • The PRAGMA foreign_keys = ON statement must be emitted on all connections before use.

      SQLAlchemy allows for the PRAGMA statement to be emitted automatically for new connections through the usage of events:

      from sqlalchemy.engine import Engine
      from sqlalchemy import event
      
      @event.listens_for(Engine, "connect")
      def set_sqlite_pragma(dbapi_connection, connection_record):
          cursor = dbapi_connection.cursor()
          cursor.execute("PRAGMA foreign_keys=ON")
          cursor.close()

      See also

      SQLite Foreign Key Support - on the SQLite web site.

      Events - SQLAlchemy event API.

      SQLite Data Types

      As with all SQLAlchemy dialects, all UPPERCASE types that are known to be valid with SQLite are importable from the top level dialect, whether they originate from sqlalchemy.types or from the local dialect:

      from sqlalchemy.dialects.sqlite import \
                  BLOB, BOOLEAN, CHAR, DATE, DATETIME, DECIMAL, FLOAT, \
                  INTEGER, NUMERIC, SMALLINT, TEXT, TIME, TIMESTAMP, \
                  VARCHAR
      class sqlalchemy.dialects.sqlite.DATETIME(*args, **kwargs)

      Bases: sqlalchemy.dialects.sqlite.base._DateTimeMixin, sqlalchemy.types.DateTime

      Represent a Python datetime object in SQLite using a string.

      The default string storage format is:

      "%(year)04d-%(month)02d-%(day)02d %(hour)02d:%(min)02d:%(second)02d.%(microsecond)06d"

      e.g.:

      2011-03-15 12:05:57.10558

      The storage format can be customized to some degree using the storage_format and regexp parameters, such as:

      import re
      from sqlalchemy.dialects.sqlite import DATETIME
      
      dt = DATETIME(
          storage_format="%(year)04d/%(month)02d/%(day)02d %(hour)02d:%(min)02d:%(second)02d",
          regexp=r"(\d+)/(\d+)/(\d+) (\d+)-(\d+)-(\d+)"
      )
      Parameters:
      • storage_format – format string which will be applied to the dict with keys year, month, day, hour, minute, second, and microsecond.
      • regexp – regular expression which will be applied to incoming result rows. If the regexp contains named groups, the resulting match dict is applied to the Python datetime() constructor as keyword arguments. Otherwise, if positional groups are used, the the datetime() constructor is called with positional arguments via *map(int, match_obj.groups(0)).
      class sqlalchemy.dialects.sqlite.DATE(storage_format=None, regexp=None, **kw)

      Bases: sqlalchemy.dialects.sqlite.base._DateTimeMixin, sqlalchemy.types.Date

      Represent a Python date object in SQLite using a string.

      The default string storage format is:

      "%(year)04d-%(month)02d-%(day)02d"

      e.g.:

      2011-03-15

      The storage format can be customized to some degree using the storage_format and regexp parameters, such as:

      import re
      from sqlalchemy.dialects.sqlite import DATE
      
      d = DATE(
              storage_format="%(month)02d/%(day)02d/%(year)04d",
              regexp=re.compile("(?P<month>\d+)/(?P<day>\d+)/(?P<year>\d+)")
          )
      Parameters:
      • storage_format – format string which will be applied to the dict with keys year, month, and day.
      • regexp – regular expression which will be applied to incoming result rows. If the regexp contains named groups, the resulting match dict is applied to the Python date() constructor as keyword arguments. Otherwise, if positional groups are used, the the date() constructor is called with positional arguments via *map(int, match_obj.groups(0)).
      class sqlalchemy.dialects.sqlite.TIME(*args, **kwargs)

      Bases: sqlalchemy.dialects.sqlite.base._DateTimeMixin, sqlalchemy.types.Time

      Represent a Python time object in SQLite using a string.

      The default string storage format is:

      "%(hour)02d:%(minute)02d:%(second)02d.%(microsecond)06d"

      e.g.:

      12:05:57.10558

      The storage format can be customized to some degree using the storage_format and regexp parameters, such as:

      import re
      from sqlalchemy.dialects.sqlite import TIME
      
      t = TIME(
          storage_format="%(hour)02d-%(minute)02d-%(second)02d-%(microsecond)06d",
          regexp=re.compile("(\d+)-(\d+)-(\d+)-(?:-(\d+))?")
      )
      Parameters:
      • storage_format – format string which will be applied to the dict with keys hour, minute, second, and microsecond.
      • regexp – regular expression which will be applied to incoming result rows. If the regexp contains named groups, the resulting match dict is applied to the Python time() constructor as keyword arguments. Otherwise, if positional groups are used, the the time() constructor is called with positional arguments via *map(int, match_obj.groups(0)).

      Pysqlite

      Support for the SQLite database via the pysqlite driver.

      Note that pysqlite is the same driver as the sqlite3 module included with the Python distribution.

      DBAPI

      Documentation and download information (if applicable) for pysqlite is available at: http://docs.python.org/library/sqlite3.html

      Connecting

      Connect String:

      sqlite+pysqlite:///file_path

      Driver

      When using Python 2.5 and above, the built in sqlite3 driver is already installed and no additional installation is needed. Otherwise, the pysqlite2 driver needs to be present. This is the same driver as sqlite3, just with a different name.

      The pysqlite2 driver will be loaded first, and if not found, sqlite3 is loaded. This allows an explicitly installed pysqlite driver to take precedence over the built in one. As with all dialects, a specific DBAPI module may be provided to create_engine() to control this explicitly:

      from sqlite3 import dbapi2 as sqlite
      e = create_engine('sqlite+pysqlite:///file.db', module=sqlite)

      Connect Strings

      The file specification for the SQLite database is taken as the “database” portion of the URL. Note that the format of a SQLAlchemy url is:

      driver://user:pass@host/database

      This means that the actual filename to be used starts with the characters to the right of the third slash. So connecting to a relative filepath looks like:

      # relative path
      e = create_engine('sqlite:///path/to/database.db')

      An absolute path, which is denoted by starting with a slash, means you need four slashes:

      # absolute path
      e = create_engine('sqlite:////path/to/database.db')

      To use a Windows path, regular drive specifications and backslashes can be used. Double backslashes are probably needed:

      # absolute path on Windows
      e = create_engine('sqlite:///C:\\path\\to\\database.db')

      The sqlite :memory: identifier is the default if no filepath is present. Specify sqlite:// and nothing else:

      # in-memory database
      e = create_engine('sqlite://')

      Compatibility with sqlite3 “native” date and datetime types

      The pysqlite driver includes the sqlite3.PARSE_DECLTYPES and sqlite3.PARSE_COLNAMES options, which have the effect of any column or expression explicitly cast as “date” or “timestamp” will be converted to a Python date or datetime object. The date and datetime types provided with the pysqlite dialect are not currently compatible with these options, since they render the ISO date/datetime including microseconds, which pysqlite’s driver does not. Additionally, SQLAlchemy does not at this time automatically render the “cast” syntax required for the freestanding functions “current_timestamp” and “current_date” to return datetime/date types natively. Unfortunately, pysqlite does not provide the standard DBAPI types in cursor.description, leaving SQLAlchemy with no way to detect these types on the fly without expensive per-row type checks.

      Keeping in mind that pysqlite’s parsing option is not recommended, nor should be necessary, for use with SQLAlchemy, usage of PARSE_DECLTYPES can be forced if one configures “native_datetime=True” on create_engine():

      engine = create_engine('sqlite://',
          connect_args={'detect_types': sqlite3.PARSE_DECLTYPES|sqlite3.PARSE_COLNAMES},
          native_datetime=True
      )

      With this flag enabled, the DATE and TIMESTAMP types (but note - not the DATETIME or TIME types...confused yet ?) will not perform any bind parameter or result processing. Execution of “func.current_date()” will return a string. “func.current_timestamp()” is registered as returning a DATETIME type in SQLAlchemy, so this function still receives SQLAlchemy-level result processing.

      Threading/Pooling Behavior

      Pysqlite’s default behavior is to prohibit the usage of a single connection in more than one thread. This is originally intended to work with older versions of SQLite that did not support multithreaded operation under various circumstances. In particular, older SQLite versions did not allow a :memory: database to be used in multiple threads under any circumstances.

      Pysqlite does include a now-undocumented flag known as check_same_thread which will disable this check, however note that pysqlite connections are still not safe to use in concurrently in multiple threads. In particular, any statement execution calls would need to be externally mutexed, as Pysqlite does not provide for thread-safe propagation of error messages among other things. So while even :memory: databases can be shared among threads in modern SQLite, Pysqlite doesn’t provide enough thread-safety to make this usage worth it.

      SQLAlchemy sets up pooling to work with Pysqlite’s default behavior:

      • When a :memory: SQLite database is specified, the dialect by default will use SingletonThreadPool. This pool maintains a single connection per thread, so that all access to the engine within the current thread use the same :memory: database - other threads would access a different :memory: database.

      • When a file-based database is specified, the dialect will use NullPool as the source of connections. This pool closes and discards connections which are returned to the pool immediately. SQLite file-based connections have extremely low overhead, so pooling is not necessary. The scheme also prevents a connection from being used again in a different thread and works best with SQLite’s coarse-grained file locking.

        Changed in version 0.7: Default selection of NullPool for SQLite file-based databases. Previous versions select SingletonThreadPool by default for all SQLite databases.

      Using a Memory Database in Multiple Threads

      To use a :memory: database in a multithreaded scenario, the same connection object must be shared among threads, since the database exists only within the scope of that connection. The StaticPool implementation will maintain a single connection globally, and the check_same_thread flag can be passed to Pysqlite as False:

      from sqlalchemy.pool import StaticPool
      engine = create_engine('sqlite://',
                          connect_args={'check_same_thread':False},
                          poolclass=StaticPool)

      Note that using a :memory: database in multiple threads requires a recent version of SQLite.

      Using Temporary Tables with SQLite

      Due to the way SQLite deals with temporary tables, if you wish to use a temporary table in a file-based SQLite database across multiple checkouts from the connection pool, such as when using an ORM Session where the temporary table should continue to remain after commit() or rollback() is called, a pool which maintains a single connection must be used. Use SingletonThreadPool if the scope is only needed within the current thread, or StaticPool is scope is needed within multiple threads for this case:

      # maintain the same connection per thread
      from sqlalchemy.pool import SingletonThreadPool
      engine = create_engine('sqlite:///mydb.db',
                          poolclass=SingletonThreadPool)
      
      
      # maintain the same connection across all threads
      from sqlalchemy.pool import StaticPool
      engine = create_engine('sqlite:///mydb.db',
                          poolclass=StaticPool)

      Note that SingletonThreadPool should be configured for the number of threads that are to be used; beyond that number, connections will be closed out in a non deterministic way.

      Unicode

      The pysqlite driver only returns Python unicode objects in result sets, never plain strings, and accommodates unicode objects within bound parameter values in all cases. Regardless of the SQLAlchemy string type in use, string-based result values will by Python unicode in Python 2. The Unicode type should still be used to indicate those columns that require unicode, however, so that non-unicode values passed inadvertently will emit a warning. Pysqlite will emit an error if a non-unicode string is passed containing non-ASCII characters.

      Serializable Transaction Isolation

      The pysqlite DBAPI driver has a long-standing bug in which transactional state is not begun until the first DML statement, that is INSERT, UPDATE or DELETE, is emitted. A SELECT statement will not cause transactional state to begin. While this mode of usage is fine for typical situations and has the advantage that the SQLite database file is not prematurely locked, it breaks serializable transaction isolation, which requires that the database file be locked upon any SQL being emitted.

      To work around this issue, the BEGIN keyword can be emitted at the start of each transaction. The following recipe establishes a ConnectionEvents.begin() handler to achieve this:

      from sqlalchemy import create_engine, event
      
      engine = create_engine("sqlite:///myfile.db", isolation_level='SERIALIZABLE')
      
      @event.listens_for(engine, "begin")
      def do_begin(conn):
          conn.execute("BEGIN")
      SQLAlchemy-0.8.4/doc/dialects/sybase.html0000644000076500000240000002607312251147475020762 0ustar classicstaff00000000000000 Sybase — SQLAlchemy 0.8 Documentation

      SQLAlchemy 0.8 Documentation

      Release: 0.8.4 | Release Date: December 8, 2013

      Sybase

      Support for the Sybase database.

      DBAPI Support

      The following dialect/DBAPI options are available. Please refer to individual DBAPI sections for connect information.

      Note

      The Sybase dialect functions on current SQLAlchemy versions but is not regularly tested, and may have many issues and caveats not currently handled.

      python-sybase

      Support for the Sybase database via the Python-Sybase driver.

      DBAPI

      Documentation and download information (if applicable) for Python-Sybase is available at: http://python-sybase.sourceforge.net/

      Connecting

      Connect String:

      sybase+pysybase://<username>:<password>@<dsn>/[database name]

      Unicode Support

      The python-sybase driver does not appear to support non-ASCII strings of any kind at this time.

      pyodbc

      Support for the Sybase database via the PyODBC driver.

      DBAPI

      Documentation and download information (if applicable) for PyODBC is available at: http://pypi.python.org/pypi/pyodbc/

      Connecting

      Connect String:

      sybase+pyodbc://<username>:<password>@<dsnname>[/<database>]

      Unicode Support

      The pyodbc driver currently supports usage of these Sybase types with Unicode or multibyte strings:

      CHAR
      NCHAR
      NVARCHAR
      TEXT
      VARCHAR

      Currently not supported are:

      UNICHAR
      UNITEXT
      UNIVARCHAR

      mxodbc

      Support for the Sybase database via the mxODBC driver.

      DBAPI

      Documentation and download information (if applicable) for mxODBC is available at: http://www.egenix.com/

      Connecting

      Connect String:

      sybase+mxodbc://<username>:<password>@<dsnname>

      Note

      This dialect is a stub only and is likely non functional at this time.

      SQLAlchemy-0.8.4/doc/faq.html0000644000076500000240000033227512251147475016457 0ustar classicstaff00000000000000 Frequently Asked Questions — SQLAlchemy 0.8 Documentation

      SQLAlchemy 0.8 Documentation

      Release: 0.8.4 | Release Date: December 8, 2013
      SQLAlchemy 0.8 Documentation » Frequently Asked Questions

      Frequently Asked Questions

      Table of Contents

      Quick Search

      Frequently Asked Questions

      Connections / Engines

      How do I configure logging?

      See Configuring Logging.

      How do I pool database connections? Are my connections pooled?

      SQLAlchemy performs application-level connection pooling automatically in most cases. With the exception of SQLite, a Engine object refers to a QueuePool as a source of connectivity.

      For more detail, see Engine Configuration and Connection Pooling.

      How do I pass custom connect arguments to my database API?

      The create_engine() call accepts additional arguments either directly via the connect_args keyword argument:

      e = create_engine("mysql://scott:tiger@localhost/test",
                                              connect_args={"encoding": "utf8"})

      Or for basic string and integer arguments, they can usually be specified in the query string of the URL:

      e = create_engine("mysql://scott:tiger@localhost/test?encoding=utf8")

      “MySQL Server has gone away”

      There are two major causes for this error:

      1. The MySQL client closes connections which have been idle for a set period of time, defaulting to eight hours. This can be avoided by using the pool_recycle setting with create_engine(), described at Connection Timeouts.

      2. Usage of the MySQLdb DBAPI, or a similar DBAPI, in a non-threadsafe manner, or in an otherwise inappropriate way. The MySQLdb connection object is not threadsafe - this expands out to any SQLAlchemy system that links to a single connection, which includes the ORM Session. For background on how Session should be used in a multithreaded environment, see Is the session thread-safe?.

      Why does SQLAlchemy issue so many ROLLBACKs?

      SQLAlchemy currently assumes DBAPI connections are in “non-autocommit” mode - this is the default behavior of the Python database API, meaning it must be assumed that a transaction is always in progress. The connection pool issues connection.rollback() when a connection is returned. This is so that any transactional resources remaining on the connection are released. On a database like Postgresql or MSSQL where table resources are aggressively locked, this is critical so that rows and tables don’t remain locked within connections that are no longer in use. An application can otherwise hang. It’s not just for locks, however, and is equally critical on any database that has any kind of transaction isolation, including MySQL with InnoDB. Any connection that is still inside an old transaction will return stale data, if that data was already queried on that connection within isolation. For background on why you might see stale data even on MySQL, see http://dev.mysql.com/doc/refman/5.1/en/innodb-transaction-model.html

      I’m on MyISAM - how do I turn it off?

      The behavior of the connection pool’s connection return behavior can be configured using reset_on_return:

      from sqlalchemy import create_engine
      from sqlalchemy.pool import QueuePool
      
      engine = create_engine('mysql://scott:tiger@localhost/myisam_database', pool=QueuePool(reset_on_return=False))

      I’m on SQL Server - how do I turn those ROLLBACKs into COMMITs?

      reset_on_return accepts the values commit, rollback in addition to True, False, and None. Setting to commit will cause a COMMIT as any connection is returned to the pool:

      engine = create_engine('mssql://scott:tiger@mydsn', pool=QueuePool(reset_on_return='commit'))

      I am using multiple connections with a SQLite database (typically to test transaction operation), and my test program is not working!

      If using a SQLite :memory: database, or a version of SQLAlchemy prior to version 0.7, the default connection pool is the SingletonThreadPool, which maintains exactly one SQLite connection per thread. So two connections in use in the same thread will actually be the same SQLite connection. Make sure you’re not using a :memory: database and use NullPool, which is the default for non-memory databases in current SQLAlchemy versions.

      See also

      Threading/Pooling Behavior - info on PySQLite’s behavior.

      How do I get at the raw DBAPI connection when using an Engine?

      With a regular SA engine-level Connection, you can get at a pool-proxied version of the DBAPI connection via the Connection.connection attribute on Connection, and for the really-real DBAPI connection you can call the ConnectionFairy.connection attribute on that - but there should never be any need to access the non-pool-proxied DBAPI connection, as all methods are proxied through:

      engine = create_engine(...)
      conn = engine.connect()
      conn.connection.<do DBAPI things>
      cursor = conn.connection.cursor(<DBAPI specific arguments..>)

      You must ensure that you revert any isolation level settings or other operation-specific settings on the connection back to normal before returning it to the pool.

      As an alternative to reverting settings, you can call the Connection.detach() method on either Connection or the proxied connection, which will de-associate the connection from the pool such that it will be closed and discarded when Connection.close() is called:

      conn = engine.connect()
      conn.detach()  # detaches the DBAPI connection from the connection pool
      conn.connection.<go nuts>
      conn.close()  # connection is closed for real, the pool replaces it with a new connection

      MetaData / Schema

      My program is hanging when I say table.drop() / metadata.drop_all()

      This usually corresponds to two conditions: 1. using PostgreSQL, which is really strict about table locks, and 2. you have a connection still open which contains locks on the table and is distinct from the connection being used for the DROP statement. Heres the most minimal version of the pattern:

      connection = engine.connect()
      result = connection.execute(mytable.select())
      
      mytable.drop(engine)

      Above, a connection pool connection is still checked out; furthermore, the result object above also maintains a link to this connection. If “implicit execution” is used, the result will hold this connection opened until the result object is closed or all rows are exhausted.

      The call to mytable.drop(engine) attempts to emit DROP TABLE on a second connection procured from the Engine which will lock.

      The solution is to close out all connections before emitting DROP TABLE:

      connection = engine.connect()
      result = connection.execute(mytable.select())
      
      # fully read result sets
      result.fetchall()
      
      # close connections
      connection.close()
      
      # now locks are removed
      mytable.drop(engine)

      Does SQLAlchemy support ALTER TABLE, CREATE VIEW, CREATE TRIGGER, Schema Upgrade Functionality?

      General ALTER support isn’t present in SQLAlchemy directly. For special DDL on an ad-hoc basis, the DDL and related constructs can be used. See Customizing DDL for a discussion on this subject.

      A more comprehensive option is to use schema migration tools, such as Alembic or SQLAlchemy-Migrate; see Altering Schemas through Migrations for discussion on this.

      How can I sort Table objects in order of their dependency?

      This is available via the MetaData.sorted_tables function:

      metadata = MetaData()
      # ... add Table objects to metadata
      ti = metadata.sorted_tables:
      for t in ti:
          print t

      How can I get the CREATE TABLE/ DROP TABLE output as a string?

      Modern SQLAlchemy has clause constructs which represent DDL operations. These can be rendered to strings like any other SQL expression:

      from sqlalchemy.schema import CreateTable
      
      print CreateTable(mytable)

      To get the string specific to a certain engine:

      print CreateTable(mytable).compile(engine)

      There’s also a special form of Engine that can let you dump an entire metadata creation sequence, using this recipe:

      def dump(sql, *multiparams, **params):
          print sql.compile(dialect=engine.dialect)
      engine = create_engine('postgresql://', strategy='mock', executor=dump)
      metadata.create_all(engine, checkfirst=False)

      The Alembic tool also supports an “offline” SQL generation mode that renders database migrations as SQL scripts.

      How can I subclass Table/Column to provide certain behaviors/configurations?

      Table and Column are not good targets for direct subclassing. However, there are simple ways to get on-construction behaviors using creation functions, and behaviors related to the linkages between schema objects such as constraint conventions or naming conventions using attachment events. An example of many of these techniques can be seen at Naming Conventions.

      SQL Expressions

      Why does .col.in_([]) Produce col != col? Why not 1=0?

      A little introduction to the issue. The IN operator in SQL, given a list of elements to compare against a column, generally does not accept an empty list, that is while it is valid to say:

      column IN (1, 2, 3)

      it’s not valid to say:

      column IN ()

      SQLAlchemy’s Operators.in_() operator, when given an empty list, produces this expression:

      column != column

      As of version 0.6, it also produces a warning stating that a less efficient comparison operation will be rendered. This expression is the only one that is both database agnostic and produces correct results.

      For example, the naive approach of “just evaluate to false, by comparing 1=0 or 1!=1”, does not handle nulls properly. An expression like:

      NOT column != column

      will not return a row when “column” is null, but an expression which does not take the column into account:

      NOT 1=0

      will.

      Closer to the mark is the following CASE expression:

      CASE WHEN column IS NOT NULL THEN 1=0 ELSE NULL END

      We don’t use this expression due to its verbosity, and its also not typically accepted by Oracle within a WHERE clause - depending on how you phrase it, you’ll either get “ORA-00905: missing keyword” or “ORA-00920: invalid relational operator”. It’s also still less efficient than just rendering SQL without the clause altogether (or not issuing the SQL at all, if the statement is just a simple search).

      The best approach therefore is to avoid the usage of IN given an argument list of zero length. Instead, don’t emit the Query in the first place, if no rows should be returned. The warning is best promoted to a full error condition using the Python warnings filter (see http://docs.python.org/library/warnings.html).

      ORM Configuration

      How do I map a table that has no primary key?

      In almost all cases, a table does have a so-called candidate key, which is a column or series of columns that uniquely identify a row. If a table truly doesn’t have this, and has actual fully duplicate rows, the table is not corresponding to first normal form and cannot be mapped. Otherwise, whatever columns comprise the best candidate key can be applied directly to the mapper:

      class SomeClass(Base):
              __table__ = some_table_with_no_pk
              __mapper_args__ = {
                      'primary_key':[some_table_with_no_pk.c.uid, some_table_with_no_pk.c.bar]
              }

      Better yet is when using fully declared table metadata, use the primary_key=True flag on those columns:

      class SomeClass(Base):
              __tablename__ = "some_table_with_no_pk"
      
              uid = Column(Integer, primary_key=True)
              bar = Column(String, primary_key=True)

      All tables in a relational database should have primary keys. Even a many-to-many association table - the primary key would be the composite of the two association columns:

      CREATE TABLE my_association (
        user_id INTEGER REFERENCES user(id),
        account_id INTEGER REFERENCES account(id),
        PRIMARY KEY (user_id, account_id)
      )

      How do I configure a Column that is a Python reserved word or similar?

      Column-based attributes can be given any name desired in the mapping. See Naming Columns Distinctly from Attribute Names.

      How do I get a list of all columns, relationships, mapped attributes, etc. given a mapped class?

      This information is all available from the Mapper object.

      To get at the Mapper for a particular mapped class, call the inspect() function on it:

      from sqlalchemy import inspect
      
      mapper = inspect(MyClass)

      From there, all information about the class can be acquired using such methods as:

      I’m using Declarative and setting primaryjoin/secondaryjoin using an and_() or or_(), and I am getting an error message about foreign keys.

      Are you doing this?:

      class MyClass(Base):
          # ....
      
          foo = relationship("Dest", primaryjoin=and_("MyClass.id==Dest.foo_id", "MyClass.foo==Dest.bar"))

      That’s an and_() of two string expressions, which SQLAlchemy cannot apply any mapping towards. Declarative allows relationship() arguments to be specified as strings, which are converted into expression objects using eval(). But this doesn’t occur inside of an and_() expression - it’s a special operation declarative applies only to the entirety of what’s passed to primaryjoin or other arguments as a string:

      class MyClass(Base):
          # ....
      
          foo = relationship("Dest", primaryjoin="and_(MyClass.id==Dest.foo_id, MyClass.foo==Dest.bar)")

      Or if the objects you need are already available, skip the strings:

      class MyClass(Base):
          # ....
      
          foo = relationship(Dest, primaryjoin=and_(MyClass.id==Dest.foo_id, MyClass.foo==Dest.bar))

      The same idea applies to all the other arguments, such as foreign_keys:

      # wrong !
      foo = relationship(Dest, foreign_keys=["Dest.foo_id", "Dest.bar_id"])
      
      # correct !
      foo = relationship(Dest, foreign_keys="[Dest.foo_id, Dest.bar_id]")
      
      # also correct !
      foo = relationship(Dest, foreign_keys=[Dest.foo_id, Dest.bar_id])
      
      # if you're using columns from the class that you're inside of, just use the column objects !
      class MyClass(Base):
          foo_id = Column(...)
          bar_id = Column(...)
          # ...
      
          foo = relationship(Dest, foreign_keys=[foo_id, bar_id])

      Sessions / Queries

      “This Session’s transaction has been rolled back due to a previous exception during flush.” (or similar)

      This is an error that occurs when a Session.flush() raises an exception, rolls back the transaction, but further commands upon the Session are called without an explicit call to Session.rollback() or Session.close().

      It usually corresponds to an application that catches an exception upon Session.flush() or Session.commit() and does not properly handle the exception. For example:

      from sqlalchemy import create_engine, Column, Integer
      from sqlalchemy.orm import sessionmaker
      from sqlalchemy.ext.declarative import declarative_base
      
      Base = declarative_base(create_engine('sqlite://'))
      
      class Foo(Base):
          __tablename__ = 'foo'
          id = Column(Integer, primary_key=True)
      
      Base.metadata.create_all()
      
      session = sessionmaker()()
      
      # constraint violation
      session.add_all([Foo(id=1), Foo(id=1)])
      
      try:
          session.commit()
      except:
          # ignore error
          pass
      
      # continue using session without rolling back
      session.commit()

      The usage of the Session should fit within a structure similar to this:

      try:
          <use session>
          session.commit()
      except:
         session.rollback()
         raise
      finally:
         session.close()  # optional, depends on use case

      Many things can cause a failure within the try/except besides flushes. You should always have some kind of “framing” of your session operations so that connection and transaction resources have a definitive boundary, otherwise your application doesn’t really have its usage of resources under control. This is not to say that you need to put try/except blocks all throughout your application - on the contrary, this would be a terrible idea. You should architect your application such that there is one (or few) point(s) of “framing” around session operations.

      For a detailed discussion on how to organize usage of the Session, please see When do I construct a Session, when do I commit it, and when do I close it?.

      But why does flush() insist on issuing a ROLLBACK?

      It would be great if Session.flush() could partially complete and then not roll back, however this is beyond its current capabilities since its internal bookkeeping would have to be modified such that it can be halted at any time and be exactly consistent with what’s been flushed to the database. While this is theoretically possible, the usefulness of the enhancement is greatly decreased by the fact that many database operations require a ROLLBACK in any case. Postgres in particular has operations which, once failed, the transaction is not allowed to continue:

      test=> create table foo(id integer primary key);
      NOTICE:  CREATE TABLE / PRIMARY KEY will create implicit index "foo_pkey" for table "foo"
      CREATE TABLE
      test=> begin;
      BEGIN
      test=> insert into foo values(1);
      INSERT 0 1
      test=> commit;
      COMMIT
      test=> begin;
      BEGIN
      test=> insert into foo values(1);
      ERROR:  duplicate key value violates unique constraint "foo_pkey"
      test=> insert into foo values(2);
      ERROR:  current transaction is aborted, commands ignored until end of transaction block

      What SQLAlchemy offers that solves both issues is support of SAVEPOINT, via Session.begin_nested(). Using Session.begin_nested(), you can frame an operation that may potentially fail within a transaction, and then “roll back” to the point before its failure while maintaining the enclosing transaction.

      But why isn’t the one automatic call to ROLLBACK enough? Why must I ROLLBACK again?

      This is again a matter of the Session providing a consistent interface and refusing to guess about what context its being used. For example, the Session supports “framing” above within multiple levels. Such as, suppose you had a decorator @with_session(), which did this:

      def with_session(fn):
         def go(*args, **kw):
             session.begin(subtransactions=True)
             try:
                 ret = fn(*args, **kw)
                 session.commit()
                 return ret
             except:
                 session.rollback()
                 raise
         return go

      The above decorator begins a transaction if one does not exist already, and then commits it, if it were the creator. The “subtransactions” flag means that if Session.begin() were already called by an enclosing function, nothing happens except a counter is incremented - this counter is decremented when Session.commit() is called and only when it goes back to zero does the actual COMMIT happen. It allows this usage pattern:

      @with_session
      def one():
         # do stuff
         two()
      
      
      @with_session
      def two():
         # etc.
      
      one()
      
      two()

      one() can call two(), or two() can be called by itself, and the @with_session decorator ensures the appropriate “framing” - the transaction boundaries stay on the outermost call level. As you can see, if two() calls flush() which throws an exception and then issues a rollback(), there will always be a second rollback() performed by the decorator, and possibly a third corresponding to two levels of decorator. If the flush() pushed the rollback() all the way out to the top of the stack, and then we said that all remaining rollback() calls are moot, there is some silent behavior going on there. A poorly written enclosing method might suppress the exception, and then call commit() assuming nothing is wrong, and then you have a silent failure condition. The main reason people get this error in fact is because they didn’t write clean “framing” code and they would have had other problems down the road.

      If you think the above use case is a little exotic, the same kind of thing comes into play if you want to SAVEPOINT- you might call begin_nested() several times, and the commit()/rollback() calls each resolve the most recent begin_nested(). The meaning of rollback() or commit() is dependent upon which enclosing block it is called, and you might have any sequence of rollback()/commit() in any order, and its the level of nesting that determines their behavior.

      In both of the above cases, if flush() broke the nesting of transaction blocks, the behavior is, depending on scenario, anywhere from “magic” to silent failure to blatant interruption of code flow.

      flush() makes its own “subtransaction”, so that a transaction is started up regardless of the external transactional state, and when complete it calls commit(), or rollback() upon failure - but that rollback() corresponds to its own subtransaction - it doesn’t want to guess how you’d like to handle the external “framing” of the transaction, which could be nested many levels with any combination of subtransactions and real SAVEPOINTs. The job of starting/ending the “frame” is kept consistently with the code external to the flush(), and we made a decision that this was the most consistent approach.

      I’m inserting 400,000 rows with the ORM and it’s really slow!

      The SQLAlchemy ORM uses the unit of work pattern when synchronizing changes to the database. This pattern goes far beyond simple “inserts” of data. It includes that attributes which are assigned on objects are received using an attribute instrumentation system which tracks changes on objects as they are made, includes that all rows inserted are tracked in an identity map which has the effect that for each row SQLAlchemy must retrieve its “last inserted id” if not already given, and also involves that rows to be inserted are scanned and sorted for dependencies as needed. Objects are also subject to a fair degree of bookkeeping in order to keep all of this running, which for a very large number of rows at once can create an inordinate amount of time spent with large data structures, hence it’s best to chunk these.

      Basically, unit of work is a large degree of automation in order to automate the task of persisting a complex object graph into a relational database with no explicit persistence code, and this automation has a price.

      ORMs are basically not intended for high-performance bulk inserts - this is the whole reason SQLAlchemy offers the Core in addition to the ORM as a first-class component.

      For the use case of fast bulk inserts, the SQL generation and execution system that the ORM builds on top of is part of the Core. Using this system directly, we can produce an INSERT that is competitive with using the raw database API directly.

      The example below illustrates time-based tests for four different methods of inserting rows, going from the most automated to the least. With cPython 2.7, runtimes observed:

      classics-MacBook-Pro:sqlalchemy classic$ python test.py
      SQLAlchemy ORM: Total time for 100000 records 14.3528850079 secs
      SQLAlchemy ORM pk given: Total time for 100000 records 10.0164160728 secs
      SQLAlchemy Core: Total time for 100000 records 0.775382995605 secs
      sqlite3: Total time for 100000 records 0.676795005798 sec

      We can reduce the time by a factor of three using recent versions of Pypy:

      classics-MacBook-Pro:sqlalchemy classic$ /usr/local/src/pypy-2.1-beta2-osx64/bin/pypy test.py
      SQLAlchemy ORM: Total time for 100000 records 5.88369488716 secs
      SQLAlchemy ORM pk given: Total time for 100000 records 3.52294301987 secs
      SQLAlchemy Core: Total time for 100000 records 0.613556146622 secs
      sqlite3: Total time for 100000 records 0.442467927933 sec

      Script:

      import time
      import sqlite3
      
      from sqlalchemy.ext.declarative import declarative_base
      from sqlalchemy import Column, Integer, String,  create_engine
      from sqlalchemy.orm import scoped_session, sessionmaker
      
      Base = declarative_base()
      DBSession = scoped_session(sessionmaker())
      engine = None
      
      class Customer(Base):
          __tablename__ = "customer"
          id = Column(Integer, primary_key=True)
          name = Column(String(255))
      
      def init_sqlalchemy(dbname='sqlite:///sqlalchemy.db'):
          global engine
          engine = create_engine(dbname, echo=False)
          DBSession.remove()
          DBSession.configure(bind=engine, autoflush=False, expire_on_commit=False)
          Base.metadata.drop_all(engine)
          Base.metadata.create_all(engine)
      
      def test_sqlalchemy_orm(n=100000):
          init_sqlalchemy()
          t0 = time.time()
          for i in range(n):
              customer = Customer()
              customer.name = 'NAME ' + str(i)
              DBSession.add(customer)
              if i % 1000 == 0:
                  DBSession.flush()
          DBSession.commit()
          print("SQLAlchemy ORM: Total time for " + str(n) +
                      " records " + str(time.time() - t0) + " secs")
      
      def test_sqlalchemy_orm_pk_given(n=100000):
          init_sqlalchemy()
          t0 = time.time()
          for i in range(n):
              customer = Customer(id=i+1, name="NAME " + str(i))
              DBSession.add(customer)
              if i % 1000 == 0:
                  DBSession.flush()
          DBSession.commit()
          print("SQLAlchemy ORM pk given: Total time for " + str(n) +
              " records " + str(time.time() - t0) + " secs")
      
      def test_sqlalchemy_core(n=100000):
          init_sqlalchemy()
          t0 = time.time()
          engine.execute(
              Customer.__table__.insert(),
              [{"name": 'NAME ' + str(i)} for i in range(n)]
          )
          print("SQLAlchemy Core: Total time for " + str(n) +
              " records " + str(time.time() - t0) + " secs")
      
      def init_sqlite3(dbname):
          conn = sqlite3.connect(dbname)
          c = conn.cursor()
          c.execute("DROP TABLE IF EXISTS customer")
          c.execute("CREATE TABLE customer (id INTEGER NOT NULL, "
                                      "name VARCHAR(255), PRIMARY KEY(id))")
          conn.commit()
          return conn
      
      def test_sqlite3(n=100000, dbname='sqlite3.db'):
          conn = init_sqlite3(dbname)
          c = conn.cursor()
          t0 = time.time()
          for i in range(n):
              row = ('NAME ' + str(i),)
              c.execute("INSERT INTO customer (name) VALUES (?)", row)
          conn.commit()
          print("sqlite3: Total time for " + str(n) +
              " records " + str(time.time() - t0) + " sec")
      
      if __name__ == '__main__':
          test_sqlalchemy_orm(100000)
          test_sqlalchemy_orm_pk_given(100000)
          test_sqlalchemy_core(100000)
          test_sqlite3(100000)

      How do I make a Query that always adds a certain filter to every query?

      See the recipe at PreFilteredQuery.

      I’ve created a mapping against an Outer Join, and while the query returns rows, no objects are returned. Why not?

      Rows returned by an outer join may contain NULL for part of the primary key, as the primary key is the composite of both tables. The Query object ignores incoming rows that don’t have an acceptable primary key. Based on the setting of the allow_partial_pks flag on mapper(), a primary key is accepted if the value has at least one non-NULL value, or alternatively if the value has no NULL values. See allow_partial_pks at mapper().

      I’m using joinedload() or lazy=False to create a JOIN/OUTER JOIN and SQLAlchemy is not constructing the correct query when I try to add a WHERE, ORDER BY, LIMIT, etc. (which relies upon the (OUTER) JOIN)

      The joins generated by joined eager loading are only used to fully load related collections, and are designed to have no impact on the primary results of the query. Since they are anonymously aliased, they cannot be referenced directly.

      For detail on this beahvior, see Relationship Loading Techniques.

      Query has no __len__(), why not?

      The Python __len__() magic method applied to an object allows the len() builtin to be used to determine the length of the collection. It’s intuitive that a SQL query object would link __len__() to the Query.count() method, which emits a SELECT COUNT. The reason this is not possible is because evaluating the query as a list would incur two SQL calls instead of one:

      class Iterates(object):
          def __len__(self):
              print "LEN!"
              return 5
      
          def __iter__(self):
              print "ITER!"
              return iter([1, 2, 3, 4, 5])
      
      list(Iterates())

      output:

      ITER!
      LEN!

      How Do I use Textual SQL with ORM Queries?

      See:

      I’m calling Session.delete(myobject) and it isn’t removed from the parent collection!

      See Deleting from Collections for a description of this behavior.

      why isnt my __init__() called when I load objects?

      See Constructors and Object Initialization for a description of this behavior.

      how do I use ON DELETE CASCADE with SA’s ORM?

      SQLAlchemy will always issue UPDATE or DELETE statements for dependent rows which are currently loaded in the Session. For rows which are not loaded, it will by default issue SELECT statements to load those rows and udpate/delete those as well; in other words it assumes there is no ON DELETE CASCADE configured. To configure SQLAlchemy to cooperate with ON DELETE CASCADE, see Using Passive Deletes.

      I set the “foo_id” attribute on my instance to “7”, but the “foo” attribute is still None - shouldn’t it have loaded Foo with id #7?

      The ORM is not constructed in such a way as to support immediate population of relationships driven from foreign key attribute changes - instead, it is designed to work the other way around - foreign key attributes are handled by the ORM behind the scenes, the end user sets up object relationships naturally. Therefore, the recommended way to set o.foo is to do just that - set it!:

      foo = Session.query(Foo).get(7)
      o.foo = foo
      Session.commit()

      Manipulation of foreign key attributes is of course entirely legal. However, setting a foreign-key attribute to a new value currently does not trigger an “expire” event of the relationship() in which it’s involved (this may be implemented in the future). This means that for the following sequence:

      o = Session.query(SomeClass).first()
      assert o.foo is None
      o.foo_id = 7

      o.foo is loaded when we checked it for None. Setting o.foo_id=7 will have the value of “7” as pending, but no flush has occurred.

      For o.foo to load based on the foreign key mutation is usually achieved naturally after the commit, which both flushes the new foreign key value and expires all state:

      Session.commit()
      assert o.foo is <Foo object with id 7>

      A more minimal operation is to expire the attribute individually. The Session.flush() is also needed if the object is pending (hasn’t been INSERTed yet), or if the relationship is many-to-one prior to 0.6.5:

      Session.expire(o, ['foo'])
      
      Session.flush()
      
      assert o.foo is <Foo object with id 7>

      Where above, expiring the attribute triggers a lazy load on the next access of o.foo.

      The object does not “autoflush” on access of o.foo if the object is pending, since it is usually desirable that a pending object doesn’t autoflush prematurely and/or excessively, while its state is still being populated.

      Also see the recipe ExpireRelationshipOnFKChange, which features a mechanism to actually achieve this behavior to a reasonable degree in simple situations.

      Is there a way to automagically have only unique keywords (or other kinds of objects) without doing a query for the keyword and getting a reference to the row containing that keyword?

      When people read the many-to-many example in the docs, they get hit with the fact that if you create the same Keyword twice, it gets put in the DB twice. Which is somewhat inconvenient.

      This UniqueObject recipe was created to address this issue.

      SQLAlchemy-0.8.4/doc/genindex.html0000644000076500000240000116241312251147504017476 0ustar classicstaff00000000000000 Index — SQLAlchemy 0.8 Documentation

      SQLAlchemy 0.8 Documentation

      Release: 0.8.4 | Release Date: December 8, 2013

      Index

      _ | A | B | C | D | E | F | G | H | I | J | K | L | M | N | O | P | Q | R | S | T | U | V | W | Y

      _

      __add__() (sqlalchemy.sql.operators.ColumnOperators method)
      __and__() (sqlalchemy.sql.operators.ColumnOperators method)
      (sqlalchemy.sql.operators.Operators method)
      __call__() (sqlalchemy.orm.scoping.scoped_session method)
      (sqlalchemy.orm.session.sessionmaker method)
      (sqlalchemy.orm.state.InstanceState method)
      (sqlalchemy.schema.DDLElement method)
      __delattr__ (sqlalchemy.sql.operators.ColumnOperators attribute)
      __div__() (sqlalchemy.sql.operators.ColumnOperators method)
      __eq__() (sqlalchemy.orm.properties.RelationshipProperty.Comparator method)
      (sqlalchemy.schema.Column method)
      (sqlalchemy.sql.expression.BindParameter method)
      (sqlalchemy.sql.expression.ColumnClause method)
      (sqlalchemy.sql.expression.ColumnElement method)
      (sqlalchemy.sql.operators.ColumnOperators method)
      __format__() (sqlalchemy.sql.operators.ColumnOperators method)
      __ge__() (sqlalchemy.sql.operators.ColumnOperators method)
      __getattribute__ (sqlalchemy.sql.operators.ColumnOperators attribute)
      __getitem__() (sqlalchemy.sql.operators.ColumnOperators method)
      __gt__() (sqlalchemy.sql.operators.ColumnOperators method)
      __hash__ (sqlalchemy.sql.operators.ColumnOperators attribute)
      __init__ (sqlalchemy.sql.expression.ColumnElement attribute)
      (sqlalchemy.sql.operators.ColumnOperators attribute)
      (sqlalchemy.types.Concatenable attribute)
      __init__() (sqlalchemy.dialects.drizzle.BIGINT method)
      (sqlalchemy.dialects.drizzle.CHAR method)
      (sqlalchemy.dialects.drizzle.DECIMAL method)
      (sqlalchemy.dialects.drizzle.DOUBLE method)
      (sqlalchemy.dialects.drizzle.ENUM method)
      (sqlalchemy.dialects.drizzle.FLOAT method)
      (sqlalchemy.dialects.drizzle.INTEGER method)
      (sqlalchemy.dialects.drizzle.NUMERIC method)
      (sqlalchemy.dialects.drizzle.REAL method)
      (sqlalchemy.dialects.drizzle.TEXT method)
      (sqlalchemy.dialects.drizzle.TIMESTAMP method)
      (sqlalchemy.dialects.drizzle.VARCHAR method)
      (sqlalchemy.dialects.mssql.BIT method)
      (sqlalchemy.dialects.mssql.CHAR method)
      (sqlalchemy.dialects.mssql.IMAGE method)
      (sqlalchemy.dialects.mssql.MONEY method)
      (sqlalchemy.dialects.mssql.NCHAR method)
      (sqlalchemy.dialects.mssql.NTEXT method)
      (sqlalchemy.dialects.mssql.NVARCHAR method)
      (sqlalchemy.dialects.mssql.SMALLDATETIME method)
      (sqlalchemy.dialects.mssql.SMALLMONEY method)
      (sqlalchemy.dialects.mssql.SQL_VARIANT method)
      (sqlalchemy.dialects.mssql.TEXT method)
      (sqlalchemy.dialects.mssql.TINYINT method)
      (sqlalchemy.dialects.mssql.UNIQUEIDENTIFIER method)
      (sqlalchemy.dialects.mssql.VARCHAR method)
      (sqlalchemy.dialects.mysql.BIGINT method)
      (sqlalchemy.dialects.mysql.BIT method)
      (sqlalchemy.dialects.mysql.BLOB method)
      (sqlalchemy.dialects.mysql.BOOLEAN method)
      (sqlalchemy.dialects.mysql.CHAR method)
      (sqlalchemy.dialects.mysql.DATE method)
      (sqlalchemy.dialects.mysql.DATETIME method)
      (sqlalchemy.dialects.mysql.DECIMAL method)
      (sqlalchemy.dialects.mysql.DOUBLE method)
      (sqlalchemy.dialects.mysql.ENUM method)
      (sqlalchemy.dialects.mysql.FLOAT method)
      (sqlalchemy.dialects.mysql.INTEGER method)
      (sqlalchemy.dialects.mysql.LONGTEXT method)
      (sqlalchemy.dialects.mysql.MEDIUMINT method)
      (sqlalchemy.dialects.mysql.MEDIUMTEXT method)
      (sqlalchemy.dialects.mysql.NCHAR method)
      (sqlalchemy.dialects.mysql.NUMERIC method)
      (sqlalchemy.dialects.mysql.NVARCHAR method)
      (sqlalchemy.dialects.mysql.REAL method)
      (sqlalchemy.dialects.mysql.SET method)
      (sqlalchemy.dialects.mysql.SMALLINT method)
      (sqlalchemy.dialects.mysql.TEXT method)
      (sqlalchemy.dialects.mysql.TIME method)
      (sqlalchemy.dialects.mysql.TIMESTAMP method)
      (sqlalchemy.dialects.mysql.TINYINT method)
      (sqlalchemy.dialects.mysql.TINYTEXT method)
      (sqlalchemy.dialects.mysql.VARCHAR method)
      (sqlalchemy.dialects.oracle.BFILE method)
      (sqlalchemy.dialects.oracle.INTERVAL method)
      (sqlalchemy.dialects.oracle.LONG method)
      (sqlalchemy.dialects.oracle.NCLOB method)
      (sqlalchemy.dialects.postgresql.ARRAY method)
      (sqlalchemy.dialects.postgresql.BYTEA method)
      (sqlalchemy.dialects.postgresql.CIDR method)
      (sqlalchemy.dialects.postgresql.DOUBLE_PRECISION method)
      (sqlalchemy.dialects.postgresql.ENUM method)
      (sqlalchemy.dialects.postgresql.ExcludeConstraint method)
      (sqlalchemy.dialects.postgresql.INET method)
      (sqlalchemy.dialects.postgresql.MACADDR method)
      (sqlalchemy.dialects.postgresql.REAL method)
      (sqlalchemy.dialects.postgresql.UUID method)
      (sqlalchemy.engine.Connection method)
      (sqlalchemy.engine.interfaces.Compiled method)
      (sqlalchemy.engine.reflection.Inspector method)
      (sqlalchemy.ext.associationproxy.AssociationProxy method)
      (sqlalchemy.ext.horizontal_shard.ShardedSession method)
      (sqlalchemy.ext.hybrid.hybrid_method method)
      (sqlalchemy.ext.hybrid.hybrid_property method)
      (sqlalchemy.ext.orderinglist.OrderingList method)
      (sqlalchemy.orm.collections.MappedCollection method)
      (sqlalchemy.orm.mapper.Mapper method)
      (sqlalchemy.orm.properties.ColumnProperty method)
      (sqlalchemy.orm.properties.RelationshipProperty.Comparator method)
      (sqlalchemy.orm.scoping.scoped_session method)
      (sqlalchemy.orm.session.Session method)
      (sqlalchemy.orm.session.sessionmaker method)
      (sqlalchemy.pool.Pool method)
      (sqlalchemy.pool.QueuePool method)
      (sqlalchemy.pool.SingletonThreadPool method)
      (sqlalchemy.schema.Column method)
      (sqlalchemy.schema.CreateSchema method)
      (sqlalchemy.schema.CreateTable method)
      (sqlalchemy.schema.DDL method)
      (sqlalchemy.schema.DropSchema method)
      (sqlalchemy.schema.ForeignKey method)
      (sqlalchemy.schema.ForeignKeyConstraint method)
      (sqlalchemy.schema.Index method)
      (sqlalchemy.schema.MetaData method)
      (sqlalchemy.schema.Sequence method)
      (sqlalchemy.schema.Table method)
      (sqlalchemy.schema.ThreadLocalMetaData method)
      (sqlalchemy.sql.compiler.DDLCompiler method)
      (sqlalchemy.sql.compiler.IdentifierPreparer method)
      (sqlalchemy.sql.compiler.SQLCompiler method)
      (sqlalchemy.sql.expression.BindParameter method)
      (sqlalchemy.sql.expression.Join method)
      (sqlalchemy.sql.expression.Select method)
      (sqlalchemy.types.Boolean method)
      (sqlalchemy.types.DateTime method)
      (sqlalchemy.types.Enum method)
      (sqlalchemy.types.Float method)
      (sqlalchemy.types.Interval method)
      (sqlalchemy.types.LargeBinary method)
      (sqlalchemy.types.Numeric method)
      (sqlalchemy.types.PickleType method)
      (sqlalchemy.types.String method)
      (sqlalchemy.types.TypeDecorator method)
      (sqlalchemy.types.TypeEngine method)
      (sqlalchemy.types.Unicode method)
      (sqlalchemy.types.UnicodeText method)
      (sqlalchemy.util.ScopedRegistry method)
      __invert__() (sqlalchemy.sql.operators.ColumnOperators method)
      (sqlalchemy.sql.operators.Operators method)
      __le__() (sqlalchemy.schema.Column method)
      (sqlalchemy.sql.expression.BindParameter method)
      (sqlalchemy.sql.expression.ColumnClause method)
      (sqlalchemy.sql.expression.ColumnElement method)
      (sqlalchemy.sql.operators.ColumnOperators method)
      __lshift__() (sqlalchemy.sql.operators.ColumnOperators method)
      __lt__() (sqlalchemy.schema.Column method)
      (sqlalchemy.sql.expression.BindParameter method)
      (sqlalchemy.sql.expression.ColumnClause method)
      (sqlalchemy.sql.expression.ColumnElement method)
      (sqlalchemy.sql.operators.ColumnOperators method)
      __mod__() (sqlalchemy.sql.operators.ColumnOperators method)
      __mul__() (sqlalchemy.sql.operators.ColumnOperators method)
      __ne__() (sqlalchemy.dialects.postgresql.ranges.RangeOperators.comparator_factory method)
      (sqlalchemy.orm.properties.RelationshipProperty.Comparator method)
      (sqlalchemy.schema.Column method)
      (sqlalchemy.sql.expression.BindParameter method)
      (sqlalchemy.sql.expression.ColumnClause method)
      (sqlalchemy.sql.expression.ColumnElement method)
      (sqlalchemy.sql.operators.ColumnOperators method)
      __neg__() (sqlalchemy.sql.operators.ColumnOperators method)
      __new__() (sqlalchemy.sql.operators.ColumnOperators static method)
      __or__() (sqlalchemy.sql.operators.ColumnOperators method)
      (sqlalchemy.sql.operators.Operators method)
      __radd__() (sqlalchemy.sql.operators.ColumnOperators method)
      __rdiv__() (sqlalchemy.sql.operators.ColumnOperators method)
      __reduce__() (sqlalchemy.sql.operators.ColumnOperators method)
      __reduce_ex__() (sqlalchemy.sql.operators.ColumnOperators method)
      __repr__ (sqlalchemy.sql.operators.ColumnOperators attribute)
      __rmul__() (sqlalchemy.sql.operators.ColumnOperators method)
      __rshift__() (sqlalchemy.sql.operators.ColumnOperators method)
      __rsub__() (sqlalchemy.sql.operators.ColumnOperators method)
      __rtruediv__() (sqlalchemy.sql.operators.ColumnOperators method)
      __setattr__ (sqlalchemy.sql.operators.ColumnOperators attribute)
      __sizeof__() (sqlalchemy.sql.operators.ColumnOperators method)
      __str__ (sqlalchemy.sql.operators.ColumnOperators attribute)
      __sub__() (sqlalchemy.sql.operators.ColumnOperators method)
      __subclasshook__() (sqlalchemy.sql.operators.ColumnOperators static method)
      __truediv__() (sqlalchemy.sql.operators.ColumnOperators method)
      __weakref__ (sqlalchemy.sql.operators.ColumnOperators attribute)
      (sqlalchemy.sql.operators.Operators attribute)
      _asdict() (sqlalchemy.util.KeyedTuple method)
      _declarative_constructor() (in module sqlalchemy.ext.declarative.api)
      _fields (sqlalchemy.util.KeyedTuple attribute)
      _InspectionAttr (class in sqlalchemy.orm.interfaces)
      _parents (sqlalchemy.ext.mutable.MutableBase attribute)

      A

      AbstractConcreteBase (class in sqlalchemy.ext.declarative)
      AbstractType (class in sqlalchemy.types)
      ACID
      ACID model
      active_history (sqlalchemy.orm.interfaces.AttributeExtension attribute)
      adapt() (sqlalchemy.types.SchemaType method)
      (sqlalchemy.types.TypeDecorator method)
      (sqlalchemy.types.TypeEngine method)
      adapted() (sqlalchemy.orm.interfaces.PropComparator method)
      (sqlalchemy.orm.properties.RelationshipProperty.Comparator method)
      add() (sqlalchemy.orm.session.Session method)
      (sqlalchemy.sql.expression.ColumnCollection method)
      add_all() (sqlalchemy.orm.session.Session method)
      add_column() (sqlalchemy.orm.query.Query method)
      add_columns() (sqlalchemy.orm.query.Query method)
      add_entity() (sqlalchemy.orm.query.Query method)
      add_is_dependent_on() (sqlalchemy.schema.Table method)
      add_properties() (sqlalchemy.orm.mapper.Mapper method)
      add_property() (sqlalchemy.orm.mapper.Mapper method)
      AddConstraint (class in sqlalchemy.schema)
      adds() (sqlalchemy.orm.collections.collection static method)
      adjacency_list (module)
      adjacent_to() (sqlalchemy.dialects.postgresql.ranges.RangeOperators.comparator_factory method)
      after_attach() (sqlalchemy.orm.events.SessionEvents method)
      (sqlalchemy.orm.interfaces.SessionExtension method)
      after_begin() (sqlalchemy.orm.events.SessionEvents method)
      (sqlalchemy.orm.interfaces.SessionExtension method)
      after_bulk_delete() (sqlalchemy.orm.events.SessionEvents method)
      (sqlalchemy.orm.interfaces.SessionExtension method)
      after_bulk_update() (sqlalchemy.orm.events.SessionEvents method)
      (sqlalchemy.orm.interfaces.SessionExtension method)
      after_commit() (sqlalchemy.orm.events.SessionEvents method)
      (sqlalchemy.orm.interfaces.SessionExtension method)
      after_configured() (sqlalchemy.orm.events.MapperEvents method)
      after_create() (sqlalchemy.events.DDLEvents method)
      after_cursor_execute() (sqlalchemy.events.ConnectionEvents method)
      after_delete() (sqlalchemy.orm.events.MapperEvents method)
      (sqlalchemy.orm.interfaces.MapperExtension method)
      after_drop() (sqlalchemy.events.DDLEvents method)
      after_execute() (sqlalchemy.events.ConnectionEvents method)
      after_flush() (sqlalchemy.orm.events.SessionEvents method)
      (sqlalchemy.orm.interfaces.SessionExtension method)
      after_flush_postexec() (sqlalchemy.orm.events.SessionEvents method)
      (sqlalchemy.orm.interfaces.SessionExtension method)
      after_insert() (sqlalchemy.orm.events.MapperEvents method)
      (sqlalchemy.orm.interfaces.MapperExtension method)
      after_parent_attach() (sqlalchemy.events.DDLEvents method)
      after_rollback() (sqlalchemy.orm.events.SessionEvents method)
      (sqlalchemy.orm.interfaces.SessionExtension method)
      after_soft_rollback() (sqlalchemy.orm.events.SessionEvents method)
      after_transaction_create() (sqlalchemy.orm.events.SessionEvents method)
      after_transaction_end() (sqlalchemy.orm.events.SessionEvents method)
      after_update() (sqlalchemy.orm.events.MapperEvents method)
      (sqlalchemy.orm.interfaces.MapperExtension method)
      against() (sqlalchemy.schema.DDLElement method)
      Alias (class in sqlalchemy.sql.expression)
      alias() (in module sqlalchemy.sql.expression)
      (sqlalchemy.schema.Table method)
      (sqlalchemy.sql.expression.Alias method)
      (sqlalchemy.sql.expression.CompoundSelect method)
      (sqlalchemy.sql.expression.FromClause method)
      (sqlalchemy.sql.expression.Join method)
      (sqlalchemy.sql.expression.Select method)
      (sqlalchemy.sql.expression.TableClause method)
      aliased() (in module sqlalchemy.orm)
      AliasedClass (class in sqlalchemy.orm.util)
      AliasedInsp (class in sqlalchemy.orm.util)
      All (class in sqlalchemy.dialects.postgresql)
      all() (sqlalchemy.dialects.postgresql.ARRAY.Comparator method)
      (sqlalchemy.orm.query.Query method)
      all_orm_descriptors (sqlalchemy.orm.mapper.Mapper attribute)
      AmbiguousForeignKeysError
      and_() (in module sqlalchemy.sql.expression)
      annotations
      anon_label (sqlalchemy.schema.Column attribute)
      (sqlalchemy.sql.expression.BindParameter attribute)
      (sqlalchemy.sql.expression.ColumnClause attribute)
      (sqlalchemy.sql.expression.ColumnElement attribute)
      ansi_bind_rules (sqlalchemy.sql.compiler.SQLCompiler attribute)
      AnsiFunction (class in sqlalchemy.sql.functions)
      Any (class in sqlalchemy.dialects.postgresql)
      any() (sqlalchemy.dialects.postgresql.ARRAY.Comparator method)
      (sqlalchemy.ext.associationproxy.AssociationProxy method)
      (sqlalchemy.orm.interfaces.PropComparator method)
      (sqlalchemy.orm.properties.RelationshipProperty.Comparator method)
      append() (sqlalchemy.ext.orderinglist.OrderingList method)
      (sqlalchemy.orm.events.AttributeEvents method)
      (sqlalchemy.orm.interfaces.AttributeExtension method)
      append_column() (sqlalchemy.schema.Table method)
      (sqlalchemy.sql.expression.Select method)
      append_constraint() (sqlalchemy.schema.Table method)
      append_correlation() (sqlalchemy.sql.expression.Select method)
      append_ddl_listener() (sqlalchemy.schema.MetaData method)
      (sqlalchemy.schema.Table method)
      append_foreign_key() (sqlalchemy.schema.Column method)
      append_from() (sqlalchemy.sql.expression.Select method)
      append_group_by() (sqlalchemy.sql.expression.CompoundSelect method)
      (sqlalchemy.sql.expression.Select method)
      (sqlalchemy.sql.expression.SelectBase method)
      append_having() (sqlalchemy.sql.expression.Select method)
      append_order_by() (sqlalchemy.sql.expression.CompoundSelect method)
      (sqlalchemy.sql.expression.Select method)
      (sqlalchemy.sql.expression.SelectBase method)
      append_prefix() (sqlalchemy.sql.expression.Select method)
      append_result() (sqlalchemy.orm.events.MapperEvents method)
      (sqlalchemy.orm.interfaces.MapperExtension method)
      append_whereclause() (sqlalchemy.sql.expression.Select method)
      appender() (sqlalchemy.orm.collections.collection static method)
      apply_labels() (sqlalchemy.sql.expression.CompoundSelect method)
      (sqlalchemy.sql.expression.Select method)
      (sqlalchemy.sql.expression.SelectBase method)
      ArgumentError
      array (class in sqlalchemy.dialects.postgresql)
      ARRAY (class in sqlalchemy.dialects.postgresql)
      array() (sqlalchemy.dialects.postgresql.HSTORE.comparator_factory method)
      ARRAY.Comparator (class in sqlalchemy.dialects.postgresql)
      as_declarative() (in module sqlalchemy.ext.declarative)
      as_mutable() (sqlalchemy.ext.mutable.Mutable class method)
      as_scalar() (sqlalchemy.orm.query.Query method)
      (sqlalchemy.sql.expression.CompoundSelect method)
      (sqlalchemy.sql.expression.Select method)
      (sqlalchemy.sql.expression.SelectBase method)
      asc() (in module sqlalchemy.sql.expression)
      (sqlalchemy.schema.Column method)
      (sqlalchemy.sql.expression.BindParameter method)
      (sqlalchemy.sql.expression.ColumnClause method)
      (sqlalchemy.sql.expression.ColumnElement method)
      (sqlalchemy.sql.operators.ColumnOperators method)
      AssertionPool (class in sqlalchemy.pool)
      associate_with() (sqlalchemy.ext.mutable.Mutable class method)
      associate_with_attribute() (sqlalchemy.ext.mutable.Mutable class method)
      association (module)
      ASSOCIATION_PROXY (in module sqlalchemy.ext.associationproxy)
      association_proxy() (in module sqlalchemy.ext.associationproxy)
      AssociationProxy (class in sqlalchemy.ext.associationproxy)
      atomicity
      attr (sqlalchemy.ext.associationproxy.AssociationProxy attribute)
      attribute_instrument() (sqlalchemy.orm.events.InstrumentationEvents method)
      attribute_mapped_collection() (in module sqlalchemy.orm.collections)
      AttributeEvents (class in sqlalchemy.orm.events)
      AttributeExtension (class in sqlalchemy.orm.interfaces)
      AttributeState (class in sqlalchemy.orm.state)
      attrs (sqlalchemy.orm.mapper.Mapper attribute)
      (sqlalchemy.orm.state.InstanceState attribute)
      autocommit() (sqlalchemy.sql.expression.CompoundSelect method)
      (sqlalchemy.sql.expression.Select method)
      (sqlalchemy.sql.expression.SelectBase method)
      autoflush() (sqlalchemy.orm.query.Query method)

      B

      backref() (in module sqlalchemy.orm)
      base_columns (sqlalchemy.schema.Column attribute)
      (sqlalchemy.sql.expression.ColumnElement attribute)
      base_mapper (sqlalchemy.orm.mapper.Mapper attribute)
      before_attach() (sqlalchemy.orm.events.SessionEvents method)
      before_commit() (sqlalchemy.orm.events.SessionEvents method)
      (sqlalchemy.orm.interfaces.SessionExtension method)
      before_create() (sqlalchemy.events.DDLEvents method)
      before_cursor_execute() (sqlalchemy.events.ConnectionEvents method)
      before_delete() (sqlalchemy.orm.events.MapperEvents method)
      (sqlalchemy.orm.interfaces.MapperExtension method)
      before_drop() (sqlalchemy.events.DDLEvents method)
      before_execute() (sqlalchemy.events.ConnectionEvents method)
      before_flush() (sqlalchemy.orm.events.SessionEvents method)
      (sqlalchemy.orm.interfaces.SessionExtension method)
      before_insert() (sqlalchemy.orm.events.MapperEvents method)
      (sqlalchemy.orm.interfaces.MapperExtension method)
      before_parent_attach() (sqlalchemy.events.DDLEvents method)
      before_update() (sqlalchemy.orm.events.MapperEvents method)
      (sqlalchemy.orm.interfaces.MapperExtension method)
      begin() (sqlalchemy.engine.Connection method)
      (sqlalchemy.engine.Engine method)
      (sqlalchemy.events.ConnectionEvents method)
      (sqlalchemy.interfaces.ConnectionProxy method)
      (sqlalchemy.orm.session.Session method)
      begin_nested() (sqlalchemy.engine.Connection method)
      (sqlalchemy.orm.session.Session method)
      begin_twophase() (sqlalchemy.engine.Connection method)
      (sqlalchemy.events.ConnectionEvents method)
      (sqlalchemy.interfaces.ConnectionProxy method)
      between() (in module sqlalchemy.sql.expression)
      (sqlalchemy.schema.Column method)
      (sqlalchemy.sql.expression.BindParameter method)
      (sqlalchemy.sql.expression.ColumnClause method)
      (sqlalchemy.sql.expression.ColumnElement method)
      (sqlalchemy.sql.operators.ColumnOperators method)
      BFILE (class in sqlalchemy.dialects.oracle)
      BIGINT (class in sqlalchemy.dialects.drizzle)
      (class in sqlalchemy.dialects.mysql)
      (class in sqlalchemy.types)
      BigInteger (class in sqlalchemy.types)
      BINARY (class in sqlalchemy.dialects.mysql)
      (class in sqlalchemy.types)
      BinaryExpression (class in sqlalchemy.sql.expression)
      bind (sqlalchemy.schema.Column attribute)
      (sqlalchemy.schema.DDLElement attribute)
      (sqlalchemy.schema.Index attribute)
      (sqlalchemy.schema.MetaData attribute)
      (sqlalchemy.schema.Table attribute)
      (sqlalchemy.schema.ThreadLocalMetaData attribute)
      (sqlalchemy.sql.expression.ColumnElement attribute)
      (sqlalchemy.sql.expression.Delete attribute)
      (sqlalchemy.sql.expression.Executable attribute)
      (sqlalchemy.sql.expression.Insert attribute)
      (sqlalchemy.sql.expression.Update attribute)
      (sqlalchemy.sql.expression.UpdateBase attribute)
      (sqlalchemy.types.SchemaType attribute)
      bind_expression() (sqlalchemy.types.TypeDecorator method)
      (sqlalchemy.types.TypeEngine method)
      bind_mapper() (sqlalchemy.orm.session.Session method)
      bind_processor() (sqlalchemy.types.TypeDecorator method)
      (sqlalchemy.types.TypeEngine method)
      bind_table() (sqlalchemy.orm.session.Session method)
      bindparam() (in module sqlalchemy.sql.expression)
      BindParameter (class in sqlalchemy.sql.expression)
      BIT (class in sqlalchemy.dialects.mssql)
      (class in sqlalchemy.dialects.mysql)
      (class in sqlalchemy.dialects.postgresql)
      BLOB (class in sqlalchemy.dialects.mysql)
      (class in sqlalchemy.types)
      BOOLEAN (class in sqlalchemy.dialects.mysql)
      Boolean (class in sqlalchemy.types)
      BOOLEAN (class in sqlalchemy.types)
      bulk_replace() (in module sqlalchemy.orm.collections)
      BYTEA (class in sqlalchemy.dialects.postgresql)

      C

      c (sqlalchemy.orm.mapper.Mapper attribute)
      (sqlalchemy.schema.Table attribute)
      (sqlalchemy.sql.expression.Alias attribute)
      (sqlalchemy.sql.expression.CTE attribute)
      (sqlalchemy.sql.expression.CompoundSelect attribute)
      (sqlalchemy.sql.expression.FromClause attribute)
      (sqlalchemy.sql.expression.Join attribute)
      (sqlalchemy.sql.expression.Select attribute)
      (sqlalchemy.sql.expression.TableClause attribute)
      callable_ (sqlalchemy.schema.DDLElement attribute)
      cascade (sqlalchemy.orm.interfaces.MapperProperty attribute)
      (sqlalchemy.orm.properties.RelationshipProperty attribute)
      cascade_iterator() (sqlalchemy.orm.interfaces.MapperProperty method)
      (sqlalchemy.orm.mapper.Mapper method)
      Case (class in sqlalchemy.sql.expression)
      case() (in module sqlalchemy.sql.expression)
      Cast (class in sqlalchemy.sql.expression)
      cast() (in module sqlalchemy.sql.expression)
      changed() (sqlalchemy.ext.mutable.Mutable method)
      CHAR (class in sqlalchemy.dialects.drizzle)
      (class in sqlalchemy.dialects.mssql)
      (class in sqlalchemy.dialects.mysql)
      (class in sqlalchemy.types)
      char_length (class in sqlalchemy.sql.functions)
      CheckConstraint (class in sqlalchemy.schema)
      checkin() (sqlalchemy.events.PoolEvents method)
      (sqlalchemy.interfaces.PoolListener method)
      checkout() (sqlalchemy.events.PoolEvents method)
      (sqlalchemy.interfaces.PoolListener method)
      CIDR (class in sqlalchemy.dialects.postgresql)
      CircularDependencyError
      class_ (sqlalchemy.orm.mapper.Mapper attribute)
      class_attribute (sqlalchemy.orm.interfaces.MapperProperty attribute)
      class_instrument() (sqlalchemy.orm.events.InstrumentationEvents method)
      class_manager (sqlalchemy.orm.mapper.Mapper attribute)
      class_mapper() (in module sqlalchemy.orm)
      class_uninstrument() (sqlalchemy.orm.events.InstrumentationEvents method)
      ClassManager (class in sqlalchemy.orm.instrumentation)
      ClauseElement (class in sqlalchemy.sql.expression)
      ClauseList (class in sqlalchemy.sql.expression)
      clear() (sqlalchemy.orm.collections.MappedCollection method)
      (sqlalchemy.schema.MetaData method)
      (sqlalchemy.util.ScopedRegistry method)
      clear_managers() (in module sqlalchemy.pool)
      clear_mappers() (in module sqlalchemy.orm)
      CLOB (class in sqlalchemy.types)
      close() (sqlalchemy.engine.Connection method)
      (sqlalchemy.engine.ResultProxy method)
      (sqlalchemy.engine.Transaction method)
      (sqlalchemy.orm.session.Session method)
      close_all() (sqlalchemy.orm.session.Session class method)
      (sqlalchemy.orm.session.sessionmaker class method)
      closed (sqlalchemy.engine.Connection attribute)
      coalesce (class in sqlalchemy.sql.functions)
      coerce() (sqlalchemy.ext.mutable.MutableBase class method)
      coerce_arguments (sqlalchemy.sql.functions.GenericFunction attribute)
      coerce_compared_value() (sqlalchemy.types.Interval method)
      (sqlalchemy.types.TypeDecorator method)
      (sqlalchemy.types.TypeEngine method)
      (sqlalchemy.types.UserDefinedType method)
      coerce_to_is_types (sqlalchemy.types.TypeDecorator attribute)
      collate() (in module sqlalchemy.sql.expression)
      (sqlalchemy.schema.Column method)
      (sqlalchemy.sql.expression.BindParameter method)
      (sqlalchemy.sql.expression.ColumnClause method)
      (sqlalchemy.sql.expression.ColumnElement method)
      (sqlalchemy.sql.operators.ColumnOperators method)
      collection (class in sqlalchemy.orm.collections) , [3]
      collection_adapter() (in module sqlalchemy.orm.collections)
      CollectionAdapter (class in sqlalchemy.orm.collections)
      Column (class in sqlalchemy.schema)
      column (sqlalchemy.schema.ForeignKey attribute)
      column() (in module sqlalchemy.sql.expression)
      (sqlalchemy.sql.expression.Select method)
      column_attrs (sqlalchemy.orm.mapper.Mapper attribute)
      column_descriptions (sqlalchemy.orm.query.Query attribute)
      column_expression() (sqlalchemy.types.TypeDecorator method)
      (sqlalchemy.types.TypeEngine method)
      column_mapped_collection() (in module sqlalchemy.orm.collections)
      column_property() (in module sqlalchemy.orm)
      column_reflect() (sqlalchemy.events.DDLEvents method)
      ColumnClause (class in sqlalchemy.sql.expression)
      ColumnCollection (class in sqlalchemy.sql.expression)
      ColumnCollectionConstraint (class in sqlalchemy.schema)
      ColumnComparator (sqlalchemy.orm.properties.ColumnProperty attribute)
      ColumnDefault (class in sqlalchemy.schema)
      ColumnElement (class in sqlalchemy.sql.expression)
      ColumnOperators (class in sqlalchemy.sql.operators)
      ColumnProperty (class in sqlalchemy.orm.properties)
      ColumnProperty.Comparator (class in sqlalchemy.orm.properties)
      columns (sqlalchemy.orm.mapper.Mapper attribute)
      (sqlalchemy.schema.Table attribute)
      (sqlalchemy.sql.expression.Alias attribute)
      (sqlalchemy.sql.expression.CTE attribute)
      (sqlalchemy.sql.expression.CompoundSelect attribute)
      (sqlalchemy.sql.expression.FromClause attribute)
      (sqlalchemy.sql.expression.Join attribute)
      (sqlalchemy.sql.expression.Select attribute)
      (sqlalchemy.sql.expression.TableClause attribute)
      columns clause
      commit() (sqlalchemy.engine.Transaction method)
      (sqlalchemy.events.ConnectionEvents method)
      (sqlalchemy.interfaces.ConnectionProxy method)
      (sqlalchemy.orm.session.Session method)
      commit_twophase() (sqlalchemy.events.ConnectionEvents method)
      (sqlalchemy.interfaces.ConnectionProxy method)
      common_parent() (sqlalchemy.orm.mapper.Mapper method)
      comparable_using() (in module sqlalchemy.ext.declarative)
      Comparator (class in sqlalchemy.ext.hybrid)
      comparator (sqlalchemy.schema.Column attribute)
      (sqlalchemy.sql.expression.ColumnElement attribute)
      comparator() (sqlalchemy.ext.hybrid.hybrid_property method)
      comparator_factory (sqlalchemy.types.TypeEngine attribute)
      compare() (sqlalchemy.orm.interfaces.MapperProperty method)
      (sqlalchemy.schema.Column method)
      (sqlalchemy.schema.Table method)
      (sqlalchemy.sql.expression.Alias method)
      (sqlalchemy.sql.expression.BinaryExpression method)
      (sqlalchemy.sql.expression.BindParameter method)
      (sqlalchemy.sql.expression.CTE method)
      (sqlalchemy.sql.expression.ClauseElement method)
      (sqlalchemy.sql.expression.ClauseList method)
      (sqlalchemy.sql.expression.ColumnClause method)
      (sqlalchemy.sql.expression.ColumnElement method)
      (sqlalchemy.sql.expression.CompoundSelect method)
      (sqlalchemy.sql.expression.Delete method)
      (sqlalchemy.sql.expression.Insert method)
      (sqlalchemy.sql.expression.Join method)
      (sqlalchemy.sql.expression.Select method)
      (sqlalchemy.sql.expression.TableClause method)
      (sqlalchemy.sql.expression.UnaryExpression method)
      (sqlalchemy.sql.expression.Update method)
      compare_values() (sqlalchemy.types.TypeDecorator method)
      (sqlalchemy.types.TypeEngine method)
      compile() (sqlalchemy.engine.interfaces.Compiled method)
      (sqlalchemy.orm.mapper.Mapper method)
      (sqlalchemy.schema.Column method)
      (sqlalchemy.schema.Table method)
      (sqlalchemy.sql.compiler.DDLCompiler method)
      (sqlalchemy.sql.expression.Alias method)
      (sqlalchemy.sql.expression.BindParameter method)
      (sqlalchemy.sql.expression.CTE method)
      (sqlalchemy.sql.expression.ClauseElement method)
      (sqlalchemy.sql.expression.ColumnClause method)
      (sqlalchemy.sql.expression.ColumnElement method)
      (sqlalchemy.sql.expression.CompoundSelect method)
      (sqlalchemy.sql.expression.Delete method)
      (sqlalchemy.sql.expression.Insert method)
      (sqlalchemy.sql.expression.Join method)
      (sqlalchemy.sql.expression.Select method)
      (sqlalchemy.sql.expression.TableClause method)
      (sqlalchemy.sql.expression.Update method)
      (sqlalchemy.types.TypeDecorator method)
      (sqlalchemy.types.TypeEngine method)
      Compiled (class in sqlalchemy.engine.interfaces)
      compiled (sqlalchemy.orm.mapper.Mapper attribute)
      CompileError
      compiles() (in module sqlalchemy.ext.compiler)
      composite() (in module sqlalchemy.orm)
      CompositeProperty (class in sqlalchemy.orm.descriptor_props)
      CompositeProperty.Comparator (class in sqlalchemy.orm.descriptor_props)
      composites (sqlalchemy.orm.mapper.Mapper attribute)
      CompoundSelect (class in sqlalchemy.sql.expression)
      concat (class in sqlalchemy.sql.functions)
      concat() (sqlalchemy.schema.Column method)
      (sqlalchemy.sql.expression.BindParameter method)
      (sqlalchemy.sql.expression.ColumnClause method)
      (sqlalchemy.sql.expression.ColumnElement method)
      (sqlalchemy.sql.operators.ColumnOperators method)
      Concatenable (class in sqlalchemy.types)
      concrete (sqlalchemy.orm.mapper.Mapper attribute)
      ConcreteBase (class in sqlalchemy.ext.declarative)
      ConcurrentModificationError (in module sqlalchemy.orm.exc)
      configure() (sqlalchemy.orm.scoping.scoped_session method)
      (sqlalchemy.orm.session.sessionmaker method)
      configure_mappers() (in module sqlalchemy.orm)
      configured (sqlalchemy.orm.mapper.Mapper attribute)
      connect() (sqlalchemy.engine.Connectable method)
      (sqlalchemy.engine.Connection method)
      (sqlalchemy.engine.Engine method)
      (sqlalchemy.engine.interfaces.Dialect method)
      (sqlalchemy.events.PoolEvents method)
      (sqlalchemy.interfaces.PoolListener method)
      (sqlalchemy.pool.Pool method)
      (sqlalchemy.pool.QueuePool method)
      Connectable (class in sqlalchemy.engine)
      Connection (class in sqlalchemy.engine)
      connection (sqlalchemy.engine.Connection attribute)
      connection() (sqlalchemy.orm.session.Session method)
      ConnectionEvents (class in sqlalchemy.events)
      ConnectionProxy (class in sqlalchemy.interfaces)
      consistency
      Constraint (class in sqlalchemy.schema)
      construct_params() (sqlalchemy.engine.interfaces.Compiled method)
      (sqlalchemy.sql.compiler.SQLCompiler method)
      contained_by() (sqlalchemy.dialects.postgresql.ARRAY.Comparator method)
      (sqlalchemy.dialects.postgresql.HSTORE.comparator_factory method)
      (sqlalchemy.dialects.postgresql.ranges.RangeOperators.comparator_factory method)
      contains() (sqlalchemy.dialects.postgresql.ARRAY.Comparator method)
      (sqlalchemy.dialects.postgresql.HSTORE.comparator_factory method)
      (sqlalchemy.dialects.postgresql.ranges.RangeOperators.comparator_factory method)
      (sqlalchemy.ext.associationproxy.AssociationProxy method)
      (sqlalchemy.orm.properties.RelationshipProperty.Comparator method)
      (sqlalchemy.schema.Column method)
      (sqlalchemy.sql.expression.BindParameter method)
      (sqlalchemy.sql.expression.ColumnClause method)
      (sqlalchemy.sql.expression.ColumnElement method)
      (sqlalchemy.sql.operators.ColumnOperators method)
      contains_alias() (in module sqlalchemy.orm)
      contains_eager() (in module sqlalchemy.orm)
      contextual_connect() (sqlalchemy.engine.Connectable method)
      (sqlalchemy.engine.Connection method)
      (sqlalchemy.engine.Engine method)
      converter() (sqlalchemy.orm.collections.collection static method)
      copy() (sqlalchemy.schema.Column method)
      (sqlalchemy.schema.ForeignKey method)
      (sqlalchemy.types.SchemaType method)
      (sqlalchemy.types.TypeDecorator method)
      correlate() (sqlalchemy.orm.query.Query method)
      (sqlalchemy.sql.expression.Select method)
      correlate_except() (sqlalchemy.sql.expression.Select method)
      correlated subqueries
      correlated subquery
      correlates
      correspond_on_equivalents() (sqlalchemy.schema.Table method)
      (sqlalchemy.sql.expression.Alias method)
      (sqlalchemy.sql.expression.CTE method)
      (sqlalchemy.sql.expression.CompoundSelect method)
      (sqlalchemy.sql.expression.FromClause method)
      (sqlalchemy.sql.expression.Join method)
      (sqlalchemy.sql.expression.Select method)
      (sqlalchemy.sql.expression.TableClause method)
      corresponding_column() (sqlalchemy.schema.Table method)
      (sqlalchemy.sql.expression.Alias method)
      (sqlalchemy.sql.expression.CTE method)
      (sqlalchemy.sql.expression.CompoundSelect method)
      (sqlalchemy.sql.expression.FromClause method)
      (sqlalchemy.sql.expression.Join method)
      (sqlalchemy.sql.expression.Select method)
      (sqlalchemy.sql.expression.TableClause method)
      count (class in sqlalchemy.sql.functions)
      count() (sqlalchemy.orm.query.Query method)
      (sqlalchemy.schema.Table method)
      (sqlalchemy.sql.expression.Alias method)
      (sqlalchemy.sql.expression.CTE method)
      (sqlalchemy.sql.expression.CompoundSelect method)
      (sqlalchemy.sql.expression.FromClause method)
      (sqlalchemy.sql.expression.Join method)
      (sqlalchemy.sql.expression.Select method)
      (sqlalchemy.sql.expression.TableClause method)
      count_from_0() (in module sqlalchemy.ext.orderinglist)
      count_from_1() (in module sqlalchemy.ext.orderinglist)
      count_from_n_factory() (in module sqlalchemy.ext.orderinglist)
      create() (sqlalchemy.dialects.postgresql.ENUM method)
      (sqlalchemy.engine.Connectable method)
      (sqlalchemy.schema.Index method)
      (sqlalchemy.schema.Sequence method)
      (sqlalchemy.schema.Table method)
      (sqlalchemy.types.Enum method)
      (sqlalchemy.types.SchemaType method)
      create_all() (sqlalchemy.schema.MetaData method)
      create_connect_args() (sqlalchemy.engine.interfaces.Dialect method)
      create_cursor() (sqlalchemy.engine.interfaces.ExecutionContext method)
      create_engine() (in module sqlalchemy)
      create_instance() (sqlalchemy.orm.events.MapperEvents method)
      (sqlalchemy.orm.interfaces.MapperExtension method)
      create_row_processor() (sqlalchemy.orm.interfaces.MapperProperty method)
      create_xid() (sqlalchemy.engine.default.DefaultDialect method)
      (sqlalchemy.engine.interfaces.Dialect method)
      CreateColumn (class in sqlalchemy.schema)
      CreateIndex (class in sqlalchemy.schema)
      CreateSchema (class in sqlalchemy.schema)
      CreateSequence (class in sqlalchemy.schema)
      CreateTable (class in sqlalchemy.schema)
      CTE (class in sqlalchemy.sql.expression)
      cte() (sqlalchemy.orm.query.Query method)
      (sqlalchemy.sql.expression.CompoundSelect method)
      (sqlalchemy.sql.expression.Select method)
      (sqlalchemy.sql.expression.SelectBase method)
      current_date (class in sqlalchemy.sql.functions)
      current_time (class in sqlalchemy.sql.functions)
      current_timestamp (class in sqlalchemy.sql.functions)
      current_user (class in sqlalchemy.sql.functions)
      cursor_execute() (sqlalchemy.interfaces.ConnectionProxy method)
      custom_attributes (module)
      custom_op (class in sqlalchemy.sql.operators)

      D

      DatabaseError
      DataError
      DATE (class in sqlalchemy.dialects.mysql)
      (class in sqlalchemy.dialects.sqlite)
      Date (class in sqlalchemy.types)
      DATE (class in sqlalchemy.types)
      DATERANGE (class in sqlalchemy.dialects.postgresql)
      DATETIME (class in sqlalchemy.dialects.mysql)
      (class in sqlalchemy.dialects.sqlite)
      DateTime (class in sqlalchemy.types)
      DATETIME (class in sqlalchemy.types)
      DATETIME2 (class in sqlalchemy.dialects.mssql)
      DATETIMEOFFSET (class in sqlalchemy.dialects.mssql)
      DBAPI
      dbapi_error() (sqlalchemy.events.ConnectionEvents method)
      DBAPIError
      DDL (class in sqlalchemy.schema)
      DDLCompiler (class in sqlalchemy.sql.compiler)
      DDLElement (class in sqlalchemy.schema)
      DDLEvents (class in sqlalchemy.events)
      DECIMAL (class in sqlalchemy.dialects.drizzle)
      (class in sqlalchemy.dialects.mysql)
      (class in sqlalchemy.types)
      declarative_base() (in module sqlalchemy.ext.declarative)
      declared_attr (class in sqlalchemy.ext.declarative)
      default (sqlalchemy.schema.Column attribute)
      default_from() (sqlalchemy.sql.compiler.SQLCompiler method)
      default_schema_name (sqlalchemy.engine.reflection.Inspector attribute)
      DefaultClause (class in sqlalchemy.schema)
      DefaultDialect (class in sqlalchemy.engine.default)
      DefaultExecutionContext (class in sqlalchemy.engine.default)
      DefaultGenerator (class in sqlalchemy.schema)
      defer() (in module sqlalchemy.orm)
      deferred() (in module sqlalchemy.orm)
      DeferredReflection (class in sqlalchemy.ext.declarative)
      define_constraint_remote_table() (sqlalchemy.sql.compiler.DDLCompiler method)
      defined() (sqlalchemy.dialects.postgresql.HSTORE.comparator_factory method)
      del_attribute() (in module sqlalchemy.orm.attributes)
      Delete (class in sqlalchemy.sql.expression)
      delete() (in module sqlalchemy.sql.expression)
      (sqlalchemy.dialects.postgresql.HSTORE.comparator_factory method)
      (sqlalchemy.orm.query.Query method)
      (sqlalchemy.orm.session.Session method)
      (sqlalchemy.schema.Table method)
      (sqlalchemy.sql.expression.TableClause method)
      deleted (sqlalchemy.orm.session.Session attribute)
      deleter() (sqlalchemy.ext.hybrid.hybrid_property method)
      denormalize_name() (sqlalchemy.engine.default.DefaultDialect method)
      (sqlalchemy.engine.interfaces.Dialect method)
      deregister() (in module sqlalchemy.ext.compiler)
      desc() (in module sqlalchemy.sql.expression)
      (sqlalchemy.schema.Column method)
      (sqlalchemy.sql.expression.BindParameter method)
      (sqlalchemy.sql.expression.ColumnClause method)
      (sqlalchemy.sql.expression.ColumnElement method)
      (sqlalchemy.sql.operators.ColumnOperators method)
      description (sqlalchemy.schema.Column attribute)
      (sqlalchemy.schema.Table attribute)
      (sqlalchemy.sql.expression.CompoundSelect attribute)
      (sqlalchemy.sql.expression.FromClause attribute)
      (sqlalchemy.sql.expression.Select attribute)
      descriptor
      descriptors
      Deserializer() (in module sqlalchemy.ext.serializer)
      detach() (sqlalchemy.engine.Connection method)
      detached (sqlalchemy.orm.state.InstanceState attribute)
      DetachedInstanceError
      Dialect (class in sqlalchemy.engine.interfaces)
      dialect (sqlalchemy.schema.DDLElement attribute)
      dialect_impl() (sqlalchemy.types.TypeDecorator method)
      (sqlalchemy.types.TypeEngine method)
      dict_getter() (sqlalchemy.ext.instrumentation.InstrumentationManager method)
      dirty (sqlalchemy.orm.session.Session attribute)
      DisconnectionError
      discriminator
      dispatch (sqlalchemy.schema.Column attribute)
      (sqlalchemy.schema.Table attribute)
      dispose() (sqlalchemy.engine.Engine method)
      (sqlalchemy.ext.instrumentation.InstrumentationManager method)
      (sqlalchemy.orm.instrumentation.ClassManager method)
      (sqlalchemy.pool.Pool method)
      (sqlalchemy.schema.ThreadLocalMetaData method)
      distinct() (in module sqlalchemy.sql.expression)
      (sqlalchemy.orm.query.Query method)
      (sqlalchemy.schema.Column method)
      (sqlalchemy.sql.expression.BindParameter method)
      (sqlalchemy.sql.expression.ColumnClause method)
      (sqlalchemy.sql.expression.ColumnElement method)
      (sqlalchemy.sql.expression.Select method)
      (sqlalchemy.sql.operators.ColumnOperators method)
      do_begin() (sqlalchemy.engine.interfaces.Dialect method)
      do_begin_twophase() (sqlalchemy.engine.default.DefaultDialect method)
      (sqlalchemy.engine.interfaces.Dialect method)
      do_close() (sqlalchemy.engine.interfaces.Dialect method)
      do_commit() (sqlalchemy.engine.interfaces.Dialect method)
      do_commit_twophase() (sqlalchemy.engine.default.DefaultDialect method)
      (sqlalchemy.engine.interfaces.Dialect method)
      do_execute() (sqlalchemy.engine.interfaces.Dialect method)
      do_execute_no_params() (sqlalchemy.engine.interfaces.Dialect method)
      do_executemany() (sqlalchemy.engine.interfaces.Dialect method)
      do_init() (sqlalchemy.orm.descriptor_props.CompositeProperty method)
      (sqlalchemy.orm.interfaces.MapperProperty method)
      do_prepare_twophase() (sqlalchemy.engine.default.DefaultDialect method)
      (sqlalchemy.engine.interfaces.Dialect method)
      do_recover_twophase() (sqlalchemy.engine.default.DefaultDialect method)
      (sqlalchemy.engine.interfaces.Dialect method)
      do_release_savepoint() (sqlalchemy.engine.interfaces.Dialect method)
      do_rollback() (sqlalchemy.engine.interfaces.Dialect method)
      do_rollback_to_savepoint() (sqlalchemy.engine.interfaces.Dialect method)
      do_rollback_twophase() (sqlalchemy.engine.default.DefaultDialect method)
      (sqlalchemy.engine.interfaces.Dialect method)
      do_savepoint() (sqlalchemy.engine.interfaces.Dialect method)
      dogpile_caching (module)
      DontWrapMixin (class in sqlalchemy.exc)
      DOUBLE (class in sqlalchemy.dialects.drizzle)
      (class in sqlalchemy.dialects.mysql)
      DOUBLE_PRECISION (class in sqlalchemy.dialects.oracle)
      (class in sqlalchemy.dialects.postgresql)
      driver (sqlalchemy.engine.Engine attribute)
      drop() (sqlalchemy.dialects.postgresql.ENUM method)
      (sqlalchemy.engine.Connectable method)
      (sqlalchemy.schema.Index method)
      (sqlalchemy.schema.Sequence method)
      (sqlalchemy.schema.Table method)
      (sqlalchemy.types.Enum method)
      (sqlalchemy.types.SchemaType method)
      drop_all() (sqlalchemy.schema.MetaData method)
      DropConstraint (class in sqlalchemy.schema)
      DropIndex (class in sqlalchemy.schema)
      DropSchema (class in sqlalchemy.schema)
      DropSequence (class in sqlalchemy.schema)
      DropTable (class in sqlalchemy.schema)
      dumps() (in module sqlalchemy.ext.serializer)
      durability
      dynamic_dict (module)
      dynamic_loader() (in module sqlalchemy.orm)

      E

      eagerload() (in module sqlalchemy.orm)
      eagerload_all() (in module sqlalchemy.orm)
      effective_value (sqlalchemy.sql.expression.BindParameter attribute)
      elementtree (module)
      empty() (sqlalchemy.orm.attributes.History method)
      enable_assertions() (sqlalchemy.orm.query.Query method)
      enable_eagerloads() (sqlalchemy.orm.query.Query method)
      enable_relationship_loading() (sqlalchemy.orm.session.Session method)
      endswith() (sqlalchemy.schema.Column method)
      (sqlalchemy.sql.expression.BindParameter method)
      (sqlalchemy.sql.expression.ColumnClause method)
      (sqlalchemy.sql.expression.ColumnElement method)
      (sqlalchemy.sql.operators.ColumnOperators method)
      Engine (class in sqlalchemy.engine)
      engine_from_config() (in module sqlalchemy)
      entity (sqlalchemy.orm.mapper.Mapper attribute)
      ENUM (class in sqlalchemy.dialects.drizzle)
      (class in sqlalchemy.dialects.mysql)
      (class in sqlalchemy.dialects.postgresql)
      Enum (class in sqlalchemy.types)
      escape_literal_column() (sqlalchemy.sql.compiler.SQLCompiler method)
      except_() (in module sqlalchemy.sql.expression)
      (sqlalchemy.orm.query.Query method)
      (sqlalchemy.sql.expression.Select method)
      except_all() (in module sqlalchemy.sql.expression)
      (sqlalchemy.orm.query.Query method)
      (sqlalchemy.sql.expression.Select method)
      ExcludeConstraint (class in sqlalchemy.dialects.postgresql)
      Executable (class in sqlalchemy.sql.expression)
      execute() (sqlalchemy.engine.Connectable method)
      (sqlalchemy.engine.Connection method)
      (sqlalchemy.engine.Engine method)
      (sqlalchemy.engine.interfaces.Compiled method)
      (sqlalchemy.interfaces.ConnectionProxy method)
      (sqlalchemy.orm.session.Session method)
      (sqlalchemy.schema.DDLElement method)
      (sqlalchemy.sql.compiler.DDLCompiler method)
      (sqlalchemy.sql.expression.CompoundSelect method)
      (sqlalchemy.sql.expression.Delete method)
      (sqlalchemy.sql.expression.Executable method)
      (sqlalchemy.sql.expression.Insert method)
      (sqlalchemy.sql.expression.Select method)
      (sqlalchemy.sql.expression.Update method)
      execute_at() (sqlalchemy.schema.DDLElement method)
      execute_if() (sqlalchemy.schema.DDLElement method)
      execute_sequence_format (sqlalchemy.engine.default.DefaultDialect attribute)
      execution_options() (sqlalchemy.engine.Connection method)
      (sqlalchemy.engine.Engine method)
      (sqlalchemy.orm.query.Query method)
      (sqlalchemy.sql.expression.CompoundSelect method)
      (sqlalchemy.sql.expression.Delete method)
      (sqlalchemy.sql.expression.Executable method)
      (sqlalchemy.sql.expression.Insert method)
      (sqlalchemy.sql.expression.Select method)
      (sqlalchemy.sql.expression.Update method)
      ExecutionContext (class in sqlalchemy.engine.interfaces)
      exists() (in module sqlalchemy.sql.expression)
      (sqlalchemy.orm.query.Query method)
      (sqlalchemy.schema.Table method)
      expire() (sqlalchemy.orm.events.InstanceEvents method)
      (sqlalchemy.orm.session.Session method)
      expire_all() (sqlalchemy.orm.session.Session method)
      expired_attributes (sqlalchemy.orm.state.InstanceState attribute)
      expression (sqlalchemy.orm.properties.ColumnProperty attribute)
      (sqlalchemy.schema.Column attribute)
      (sqlalchemy.sql.expression.BindParameter attribute)
      (sqlalchemy.sql.expression.ColumnClause attribute)
      (sqlalchemy.sql.expression.ColumnElement attribute)
      expression() (sqlalchemy.ext.hybrid.hybrid_method method)
      (sqlalchemy.ext.hybrid.hybrid_property method)
      expunge() (sqlalchemy.orm.session.Session method)
      expunge_all() (sqlalchemy.orm.session.Session method)
      ExtendedInstrumentationRegistry (class in sqlalchemy.ext.instrumentation)
      extension_type (sqlalchemy.ext.associationproxy.AssociationProxy attribute)
      (sqlalchemy.orm.interfaces._InspectionAttr attribute)
      Extract (class in sqlalchemy.sql.expression)
      extract() (in module sqlalchemy.sql.expression)

      F

      false() (in module sqlalchemy.sql.expression)
      False_ (class in sqlalchemy.sql.expression)
      fetchall() (sqlalchemy.engine.ResultProxy method)
      FetchedValue (class in sqlalchemy.schema)
      fetchmany() (sqlalchemy.engine.ResultProxy method)
      fetchone() (sqlalchemy.engine.ResultProxy method)
      filter() (sqlalchemy.orm.query.Query method)
      filter_by() (sqlalchemy.orm.query.Query method)
      first() (sqlalchemy.engine.ResultProxy method)
      (sqlalchemy.orm.query.Query method)
      first_connect() (sqlalchemy.events.PoolEvents method)
      (sqlalchemy.interfaces.PoolListener method)
      first_init() (sqlalchemy.orm.events.InstanceEvents method)
      flag_modified() (in module sqlalchemy.orm.attributes)
      FLOAT (class in sqlalchemy.dialects.drizzle)
      (class in sqlalchemy.dialects.mysql)
      (class in sqlalchemy.types)
      Float (class in sqlalchemy.types)
      flush() (sqlalchemy.orm.session.Session method)
      FlushError
      foreign() (in module sqlalchemy.orm)
      foreign_keys (sqlalchemy.schema.Column attribute)
      (sqlalchemy.schema.Table attribute)
      (sqlalchemy.sql.expression.Alias attribute)
      (sqlalchemy.sql.expression.CTE attribute)
      (sqlalchemy.sql.expression.ColumnElement attribute)
      (sqlalchemy.sql.expression.CompoundSelect attribute)
      (sqlalchemy.sql.expression.FromClause attribute)
      (sqlalchemy.sql.expression.Join attribute)
      (sqlalchemy.sql.expression.Select attribute)
      (sqlalchemy.sql.expression.TableClause attribute)
      ForeignKey (class in sqlalchemy.schema)
      ForeignKeyConstraint (class in sqlalchemy.schema)
      format_column() (sqlalchemy.sql.compiler.IdentifierPreparer method)
      format_schema() (sqlalchemy.sql.compiler.IdentifierPreparer method)
      format_table() (sqlalchemy.sql.compiler.IdentifierPreparer method)
      format_table_seq() (sqlalchemy.sql.compiler.IdentifierPreparer method)
      FROM clause
      from_engine() (sqlalchemy.engine.reflection.Inspector class method)
      from_select() (sqlalchemy.sql.expression.Insert method)
      from_self() (sqlalchemy.orm.query.Query method)
      from_statement() (sqlalchemy.orm.query.Query method)
      FromClause (class in sqlalchemy.sql.expression)
      froms (sqlalchemy.sql.expression.Select attribute)
      func (in module sqlalchemy.sql.expression)

      G

      generative
      generic_associations (module)
      GenericFunction (class in sqlalchemy.sql.functions)
      get() (sqlalchemy.orm.query.Query method)
      get_attribute() (in module sqlalchemy.orm.attributes)
      get_bind() (sqlalchemy.orm.session.Session method)
      get_children() (sqlalchemy.schema.Column method)
      (sqlalchemy.schema.SchemaItem method)
      (sqlalchemy.schema.Table method)
      (sqlalchemy.sql.expression.BindParameter method)
      (sqlalchemy.sql.expression.ClauseElement method)
      (sqlalchemy.sql.expression.ColumnClause method)
      (sqlalchemy.sql.expression.ColumnElement method)
      (sqlalchemy.sql.expression.Select method)
      get_columns() (sqlalchemy.engine.default.DefaultDialect method)
      (sqlalchemy.engine.interfaces.Dialect method)
      (sqlalchemy.engine.reflection.Inspector method)
      get_dbapi_type() (sqlalchemy.types.TypeDecorator method)
      (sqlalchemy.types.TypeEngine method)
      get_dialect() (sqlalchemy.engine.url.URL method)
      get_foreign_keys() (sqlalchemy.engine.default.DefaultDialect method)
      (sqlalchemy.engine.interfaces.Dialect method)
      (sqlalchemy.engine.reflection.Inspector method)
      get_history() (in module sqlalchemy.orm.attributes)
      (sqlalchemy.orm.descriptor_props.CompositeProperty method)
      get_indexes() (sqlalchemy.engine.default.DefaultDialect method)
      (sqlalchemy.engine.interfaces.Dialect method)
      (sqlalchemy.engine.reflection.Inspector method)
      get_instance_dict() (sqlalchemy.ext.instrumentation.InstrumentationManager method)
      get_isolation_level() (sqlalchemy.engine.default.DefaultDialect method)
      (sqlalchemy.engine.interfaces.Dialect method)
      get_lastrowid() (sqlalchemy.engine.default.DefaultExecutionContext method)
      get_pk_constraint() (sqlalchemy.engine.default.DefaultDialect method)
      (sqlalchemy.engine.interfaces.Dialect method)
      (sqlalchemy.engine.reflection.Inspector method)
      get_primary_keys() (sqlalchemy.engine.default.DefaultDialect method)
      (sqlalchemy.engine.interfaces.Dialect method)
      (sqlalchemy.engine.reflection.Inspector method)
      get_property() (sqlalchemy.orm.mapper.Mapper method)
      get_property_by_column() (sqlalchemy.orm.mapper.Mapper method)
      get_referent() (sqlalchemy.schema.ForeignKey method)
      get_result_processor() (sqlalchemy.engine.default.DefaultExecutionContext method)
      get_rowcount() (sqlalchemy.engine.interfaces.ExecutionContext method)
      get_schema_names() (sqlalchemy.engine.reflection.Inspector method)
      get_select_precolumns() (sqlalchemy.sql.compiler.SQLCompiler method)
      get_table_names() (sqlalchemy.engine.default.DefaultDialect method)
      (sqlalchemy.engine.interfaces.Dialect method)
      (sqlalchemy.engine.reflection.Inspector method)
      get_table_options() (sqlalchemy.engine.reflection.Inspector method)
      get_unique_constraints() (sqlalchemy.engine.default.DefaultDialect method)
      (sqlalchemy.engine.interfaces.Dialect method)
      (sqlalchemy.engine.reflection.Inspector method)
      get_view_definition() (sqlalchemy.engine.default.DefaultDialect method)
      (sqlalchemy.engine.interfaces.Dialect method)
      (sqlalchemy.engine.reflection.Inspector method)
      get_view_names() (sqlalchemy.engine.default.DefaultDialect method)
      (sqlalchemy.engine.interfaces.Dialect method)
      (sqlalchemy.engine.reflection.Inspector method)
      graphs (module)
      group_by() (sqlalchemy.orm.query.Query method)
      (sqlalchemy.sql.expression.CompoundSelect method)
      (sqlalchemy.sql.expression.Select method)
      (sqlalchemy.sql.expression.SelectBase method)

      H

      handle_dbapi_exception() (sqlalchemy.engine.interfaces.ExecutionContext method)
      has() (sqlalchemy.ext.associationproxy.AssociationProxy method)
      (sqlalchemy.orm.interfaces.PropComparator method)
      (sqlalchemy.orm.properties.RelationshipProperty.Comparator method)
      (sqlalchemy.util.ScopedRegistry method)
      has_all() (sqlalchemy.dialects.postgresql.HSTORE.comparator_factory method)
      has_any() (sqlalchemy.dialects.postgresql.HSTORE.comparator_factory method)
      has_changes() (sqlalchemy.orm.attributes.History method)
      has_identity (sqlalchemy.orm.state.InstanceState attribute)
      has_inherited_table() (in module sqlalchemy.ext.declarative)
      has_key() (sqlalchemy.dialects.postgresql.HSTORE.comparator_factory method)
      (sqlalchemy.engine.RowProxy method)
      has_parent() (sqlalchemy.orm.instrumentation.ClassManager method)
      has_sequence() (sqlalchemy.engine.default.DefaultDialect method)
      (sqlalchemy.engine.interfaces.Dialect method)
      has_table() (sqlalchemy.engine.default.DefaultDialect method)
      (sqlalchemy.engine.interfaces.Dialect method)
      hashable (sqlalchemy.types.TypeEngine attribute)
      having() (sqlalchemy.orm.query.Query method)
      (sqlalchemy.sql.expression.Select method)
      History (class in sqlalchemy.orm.attributes)
      history (sqlalchemy.orm.state.AttributeState attribute)
      HSTORE (class in sqlalchemy.dialects.postgresql)
      hstore (class in sqlalchemy.dialects.postgresql)
      HSTORE.comparator_factory (class in sqlalchemy.dialects.postgresql)
      hybrid_method (class in sqlalchemy.ext.hybrid)
      HYBRID_METHOD (in module sqlalchemy.ext.hybrid)
      hybrid_property (class in sqlalchemy.ext.hybrid)
      HYBRID_PROPERTY (in module sqlalchemy.ext.hybrid)

      I

      identifier (sqlalchemy.sql.functions.AnsiFunction attribute)
      (sqlalchemy.sql.functions.GenericFunction attribute)
      (sqlalchemy.sql.functions.ReturnTypeFromArgs attribute)
      (sqlalchemy.sql.functions.char_length attribute)
      (sqlalchemy.sql.functions.coalesce attribute)
      (sqlalchemy.sql.functions.concat attribute)
      (sqlalchemy.sql.functions.count attribute)
      (sqlalchemy.sql.functions.current_date attribute)
      (sqlalchemy.sql.functions.current_time attribute)
      (sqlalchemy.sql.functions.current_timestamp attribute)
      (sqlalchemy.sql.functions.current_user attribute)
      (sqlalchemy.sql.functions.localtime attribute)
      (sqlalchemy.sql.functions.localtimestamp attribute)
      (sqlalchemy.sql.functions.max attribute)
      (sqlalchemy.sql.functions.min attribute)
      (sqlalchemy.sql.functions.next_value attribute)
      (sqlalchemy.sql.functions.now attribute)
      (sqlalchemy.sql.functions.random attribute)
      (sqlalchemy.sql.functions.session_user attribute)
      (sqlalchemy.sql.functions.sum attribute)
      (sqlalchemy.sql.functions.sysdate attribute)
      (sqlalchemy.sql.functions.user attribute)
      IdentifierError
      IdentifierPreparer (class in sqlalchemy.sql.compiler)
      identity (sqlalchemy.orm.state.InstanceState attribute)
      identity map
      identity_key (sqlalchemy.orm.state.InstanceState attribute)
      identity_key() (in module sqlalchemy.orm.util)
      (sqlalchemy.orm.session.Session class method)
      (sqlalchemy.orm.session.sessionmaker class method)
      identity_key_from_instance() (sqlalchemy.orm.mapper.Mapper method)
      identity_key_from_primary_key() (sqlalchemy.orm.mapper.Mapper method)
      identity_key_from_row() (sqlalchemy.orm.mapper.Mapper method)
      identity_map (sqlalchemy.orm.session.Session attribute)
      ilike() (sqlalchemy.schema.Column method)
      (sqlalchemy.sql.expression.BindParameter method)
      (sqlalchemy.sql.expression.ColumnClause method)
      (sqlalchemy.sql.expression.ColumnElement method)
      (sqlalchemy.sql.operators.ColumnOperators method)
      IMAGE (class in sqlalchemy.dialects.mssql)
      immediateload() (in module sqlalchemy.orm)
      impl (sqlalchemy.types.Interval attribute)
      (sqlalchemy.types.PickleType attribute)
      implicit_returning (sqlalchemy.schema.Table attribute)
      (sqlalchemy.sql.expression.TableClause attribute)
      in_() (sqlalchemy.orm.properties.RelationshipProperty.Comparator method)
      (sqlalchemy.schema.Column method)
      (sqlalchemy.sql.expression.BindParameter method)
      (sqlalchemy.sql.expression.ColumnClause method)
      (sqlalchemy.sql.expression.ColumnElement method)
      (sqlalchemy.sql.operators.ColumnOperators method)
      in_transaction() (sqlalchemy.engine.Connection method)
      Index (class in sqlalchemy.schema)
      INET (class in sqlalchemy.dialects.postgresql)
      info (sqlalchemy.engine.Connection attribute)
      (sqlalchemy.orm.attributes.QueryableAttribute attribute)
      (sqlalchemy.orm.interfaces.MapperProperty attribute)
      (sqlalchemy.schema.Column attribute)
      (sqlalchemy.schema.SchemaItem attribute)
      (sqlalchemy.schema.Table attribute)
      inheritance (module)
      inherits (sqlalchemy.orm.mapper.Mapper attribute)
      init() (sqlalchemy.orm.events.InstanceEvents method)
      (sqlalchemy.orm.interfaces.MapperProperty method)
      init_collection() (in module sqlalchemy.orm.attributes)
      init_failed() (sqlalchemy.orm.interfaces.MapperExtension method)
      init_failure() (sqlalchemy.orm.events.InstanceEvents method)
      init_instance() (sqlalchemy.orm.interfaces.MapperExtension method)
      initialize() (sqlalchemy.engine.interfaces.Dialect method)
      initialize_instance_dict() (sqlalchemy.ext.instrumentation.InstrumentationManager method)
      inner_columns (sqlalchemy.sql.expression.Select attribute)
      Insert (class in sqlalchemy.sql.expression)
      insert() (in module sqlalchemy.sql.expression)
      (sqlalchemy.ext.orderinglist.OrderingList method)
      (sqlalchemy.schema.Table method)
      (sqlalchemy.sql.expression.TableClause method)
      inserted_primary_key (sqlalchemy.engine.ResultProxy attribute)
      inspect() (in module sqlalchemy.inspection)
      Inspector (class in sqlalchemy.engine.reflection)
      install_descriptor() (sqlalchemy.ext.instrumentation.InstrumentationManager method)
      install_member() (sqlalchemy.ext.instrumentation.InstrumentationManager method)
      install_state() (sqlalchemy.ext.instrumentation.InstrumentationManager method)
      instance_state() (in module sqlalchemy.orm.attributes)
      InstanceEvents (class in sqlalchemy.orm.events)
      instances() (sqlalchemy.orm.query.Query method)
      InstanceState (class in sqlalchemy.orm.state)
      instrument_attribute() (sqlalchemy.ext.instrumentation.InstrumentationManager method)
      instrument_class() (sqlalchemy.orm.events.MapperEvents method)
      (sqlalchemy.orm.interfaces.MapperExtension method)
      instrument_collection_class() (sqlalchemy.ext.instrumentation.InstrumentationManager method)
      instrument_declarative() (in module sqlalchemy.ext.declarative)
      instrumentation
      instrumentation_finders (in module sqlalchemy.ext.instrumentation)
      INSTRUMENTATION_MANAGER (in module sqlalchemy.ext.instrumentation)
      InstrumentationEvents (class in sqlalchemy.orm.events)
      InstrumentationManager (class in sqlalchemy.ext.instrumentation)
      instrumented
      InstrumentedAttribute (class in sqlalchemy.orm.attributes)
      InstrumentedDict (class in sqlalchemy.orm.collections)
      InstrumentedList (class in sqlalchemy.orm.collections)
      InstrumentedSet (class in sqlalchemy.orm.collections)
      INT (in module sqlalchemy.types)
      INT4RANGE (class in sqlalchemy.dialects.postgresql)
      INT8RANGE (class in sqlalchemy.dialects.postgresql)
      INTEGER (class in sqlalchemy.dialects.drizzle)
      (class in sqlalchemy.dialects.mysql)
      Integer (class in sqlalchemy.types)
      INTEGER (class in sqlalchemy.types)
      IntegrityError
      InterfaceError
      InternalError
      internally_instrumented() (sqlalchemy.orm.collections.collection static method)
      intersect() (in module sqlalchemy.sql.expression)
      (sqlalchemy.orm.query.Query method)
      (sqlalchemy.sql.expression.Select method)
      intersect_all() (in module sqlalchemy.sql.expression)
      (sqlalchemy.orm.query.Query method)
      (sqlalchemy.sql.expression.Select method)
      INTERVAL (class in sqlalchemy.dialects.oracle)
      (class in sqlalchemy.dialects.postgresql)
      Interval (class in sqlalchemy.types)
      invalidate() (sqlalchemy.engine.Connection method)
      invalidated (sqlalchemy.engine.Connection attribute)
      InvalidRequestError
      is_() (sqlalchemy.schema.Column method)
      (sqlalchemy.sql.expression.BindParameter method)
      (sqlalchemy.sql.expression.ColumnClause method)
      (sqlalchemy.sql.expression.ColumnElement method)
      (sqlalchemy.sql.operators.ColumnOperators method)
      is_active (sqlalchemy.orm.session.Session attribute)
      is_aliased_class (sqlalchemy.orm.interfaces._InspectionAttr attribute)
      is_attribute (sqlalchemy.ext.associationproxy.AssociationProxy attribute)
      (sqlalchemy.orm.interfaces._InspectionAttr attribute)
      is_bound() (sqlalchemy.schema.MetaData method)
      (sqlalchemy.schema.ThreadLocalMetaData method)
      is_clause_element (sqlalchemy.orm.interfaces._InspectionAttr attribute)
      (sqlalchemy.schema.Column attribute)
      (sqlalchemy.schema.Table attribute)
      (sqlalchemy.sql.expression.ColumnElement attribute)
      is_derived_from() (sqlalchemy.schema.Table method)
      (sqlalchemy.sql.expression.FromClause method)
      (sqlalchemy.sql.expression.TableClause method)
      is_disconnect() (sqlalchemy.engine.interfaces.Dialect method)
      is_insert (sqlalchemy.engine.ResultProxy attribute)
      is_instance (sqlalchemy.orm.interfaces._InspectionAttr attribute)
      is_instrumented() (in module sqlalchemy.orm.instrumentation)
      is_mapper (sqlalchemy.orm.interfaces._InspectionAttr attribute)
      (sqlalchemy.orm.mapper.Mapper attribute)
      is_modified() (sqlalchemy.orm.session.Session method)
      is_primary() (sqlalchemy.orm.interfaces.MapperProperty method)
      is_property (sqlalchemy.orm.interfaces._InspectionAttr attribute)
      is_selectable (sqlalchemy.orm.interfaces._InspectionAttr attribute)
      (sqlalchemy.schema.Column attribute)
      (sqlalchemy.schema.Table attribute)
      (sqlalchemy.sql.expression.ColumnElement attribute)
      isa() (sqlalchemy.orm.mapper.Mapper method)
      isdelete (sqlalchemy.sql.compiler.SQLCompiler attribute)
      isinsert (sqlalchemy.sql.compiler.SQLCompiler attribute)
      isnot() (sqlalchemy.schema.Column method)
      (sqlalchemy.sql.expression.BindParameter method)
      (sqlalchemy.sql.expression.ColumnClause method)
      (sqlalchemy.sql.expression.ColumnElement method)
      (sqlalchemy.sql.operators.ColumnOperators method)
      isolated
      isolation
      isupdate (sqlalchemy.sql.compiler.SQLCompiler attribute)
      items() (sqlalchemy.engine.RowProxy method)
      iterate_properties (sqlalchemy.orm.mapper.Mapper attribute)
      iterator() (sqlalchemy.orm.collections.collection static method)

      J

      Join (class in sqlalchemy.sql.expression)
      join() (in module sqlalchemy.orm)
      (in module sqlalchemy.sql.expression)
      (sqlalchemy.orm.query.Query method)
      (sqlalchemy.schema.Table method)
      (sqlalchemy.sql.expression.Alias method)
      (sqlalchemy.sql.expression.CTE method)
      (sqlalchemy.sql.expression.CompoundSelect method)
      (sqlalchemy.sql.expression.FromClause method)
      (sqlalchemy.sql.expression.Join method)
      (sqlalchemy.sql.expression.Select method)
      (sqlalchemy.sql.expression.TableClause method)
      joinedload() (in module sqlalchemy.orm)
      joinedload_all() (in module sqlalchemy.orm)

      K

      key (sqlalchemy.schema.Table attribute)
      KeyedTuple (class in sqlalchemy.util)
      keys() (sqlalchemy.dialects.postgresql.HSTORE.comparator_factory method)
      (sqlalchemy.engine.ResultProxy method)
      (sqlalchemy.engine.RowProxy method)
      (sqlalchemy.util.KeyedTuple method)

      L

      Label (class in sqlalchemy.sql.expression)
      label() (in module sqlalchemy.sql.expression)
      (sqlalchemy.orm.query.Query method)
      (sqlalchemy.schema.Column method)
      (sqlalchemy.sql.expression.BindParameter method)
      (sqlalchemy.sql.expression.ColumnClause method)
      (sqlalchemy.sql.expression.ColumnElement method)
      (sqlalchemy.sql.expression.CompoundSelect method)
      (sqlalchemy.sql.expression.Select method)
      (sqlalchemy.sql.expression.SelectBase method)
      large_collection (module)
      LargeBinary (class in sqlalchemy.types)
      last_inserted_params() (sqlalchemy.engine.ResultProxy method)
      last_updated_params() (sqlalchemy.engine.ResultProxy method)
      lastrow_has_defaults() (sqlalchemy.engine.interfaces.ExecutionContext method)
      (sqlalchemy.engine.ResultProxy method)
      lastrowid (sqlalchemy.engine.ResultProxy attribute)
      lazy load
      lazy loads
      lazyload() (in module sqlalchemy.orm)
      like() (sqlalchemy.schema.Column method)
      (sqlalchemy.sql.expression.BindParameter method)
      (sqlalchemy.sql.expression.ColumnClause method)
      (sqlalchemy.sql.expression.ColumnElement method)
      (sqlalchemy.sql.operators.ColumnOperators method)
      limit() (sqlalchemy.orm.query.Query method)
      (sqlalchemy.sql.expression.CompoundSelect method)
      (sqlalchemy.sql.expression.Select method)
      (sqlalchemy.sql.expression.SelectBase method)
      link() (sqlalchemy.orm.collections.collection static method)
      linker() (sqlalchemy.orm.collections.collection static method)
      listen() (in module sqlalchemy.event)
      listens_for() (in module sqlalchemy.event)
      literal() (in module sqlalchemy.sql.expression)
      literal_column() (in module sqlalchemy.sql.expression)
      load() (sqlalchemy.orm.events.InstanceEvents method)
      load_dialect_impl() (sqlalchemy.types.TypeDecorator method)
      loaded_value (sqlalchemy.orm.state.AttributeState attribute)
      loads() (in module sqlalchemy.ext.serializer)
      local_attr (sqlalchemy.ext.associationproxy.AssociationProxy attribute)
      local_table (sqlalchemy.orm.mapper.Mapper attribute)
      localtime (class in sqlalchemy.sql.functions)
      localtimestamp (class in sqlalchemy.sql.functions)
      locate_all_froms (sqlalchemy.sql.expression.Select attribute)
      LONG (class in sqlalchemy.dialects.oracle)
      LONGBLOB (class in sqlalchemy.dialects.mysql)
      LONGTEXT (class in sqlalchemy.dialects.mysql)

      M

      MACADDR (class in sqlalchemy.dialects.postgresql)
      make_transient() (in module sqlalchemy.orm.session)
      make_url() (in module sqlalchemy.engine.url)
      manage() (in module sqlalchemy.pool)
      (sqlalchemy.ext.instrumentation.InstrumentationManager method)
      (sqlalchemy.orm.instrumentation.ClassManager method)
      manager_getter() (sqlalchemy.ext.instrumentation.InstrumentationManager method)
      mapped
      mapped_collection() (in module sqlalchemy.orm.collections)
      mapped_table (sqlalchemy.orm.mapper.Mapper attribute)
      MappedCollection (class in sqlalchemy.orm.collections)
      Mapper (class in sqlalchemy.orm.mapper)
      mapper (sqlalchemy.orm.mapper.Mapper attribute)
      (sqlalchemy.orm.properties.RelationshipProperty attribute)
      (sqlalchemy.orm.properties.RelationshipProperty.Comparator attribute)
      (sqlalchemy.orm.state.InstanceState attribute)
      mapper() (in module sqlalchemy.orm)
      mapper_configured() (sqlalchemy.orm.events.MapperEvents method)
      MapperEvents (class in sqlalchemy.orm.events)
      MapperExtension (class in sqlalchemy.orm.interfaces)
      MapperProperty (class in sqlalchemy.orm.interfaces)
      mapping
      match() (sqlalchemy.schema.Column method)
      (sqlalchemy.sql.expression.BindParameter method)
      (sqlalchemy.sql.expression.ColumnClause method)
      (sqlalchemy.sql.expression.ColumnElement method)
      (sqlalchemy.sql.operators.ColumnOperators method)
      matrix() (sqlalchemy.dialects.postgresql.HSTORE.comparator_factory method)
      max (class in sqlalchemy.sql.functions)
      MEDIUMBLOB (class in sqlalchemy.dialects.mysql)
      MEDIUMINT (class in sqlalchemy.dialects.mysql)
      MEDIUMTEXT (class in sqlalchemy.dialects.mysql)
      merge() (sqlalchemy.orm.interfaces.MapperProperty method)
      (sqlalchemy.orm.session.Session method)
      merge_result() (sqlalchemy.orm.query.Query method)
      MetaData (class in sqlalchemy.schema)
      method chaining
      min (class in sqlalchemy.sql.functions)
      MONEY (class in sqlalchemy.dialects.mssql)
      MultipleResultsFound
      Mutable (class in sqlalchemy.ext.mutable)
      MutableBase (class in sqlalchemy.ext.mutable)
      MutableComposite (class in sqlalchemy.ext.mutable)
      MutableDict (class in sqlalchemy.ext.mutable)

      N

      N plus one problem
      name (sqlalchemy.engine.Engine attribute)
      (sqlalchemy.sql.functions.AnsiFunction attribute)
      (sqlalchemy.sql.functions.GenericFunction attribute)
      (sqlalchemy.sql.functions.ReturnTypeFromArgs attribute)
      (sqlalchemy.sql.functions.char_length attribute)
      (sqlalchemy.sql.functions.coalesce attribute)
      (sqlalchemy.sql.functions.concat attribute)
      (sqlalchemy.sql.functions.count attribute)
      (sqlalchemy.sql.functions.current_date attribute)
      (sqlalchemy.sql.functions.current_time attribute)
      (sqlalchemy.sql.functions.current_timestamp attribute)
      (sqlalchemy.sql.functions.current_user attribute)
      (sqlalchemy.sql.functions.localtime attribute)
      (sqlalchemy.sql.functions.localtimestamp attribute)
      (sqlalchemy.sql.functions.max attribute)
      (sqlalchemy.sql.functions.min attribute)
      (sqlalchemy.sql.functions.next_value attribute)
      (sqlalchemy.sql.functions.now attribute)
      (sqlalchemy.sql.functions.random attribute)
      (sqlalchemy.sql.functions.session_user attribute)
      (sqlalchemy.sql.functions.sum attribute)
      (sqlalchemy.sql.functions.sysdate attribute)
      (sqlalchemy.sql.functions.user attribute)
      named_with_column (sqlalchemy.schema.Table attribute)
      NCHAR (class in sqlalchemy.dialects.mssql)
      (class in sqlalchemy.dialects.mysql)
      (class in sqlalchemy.types)
      NCLOB (class in sqlalchemy.dialects.oracle)
      nested_sets (module)
      NestedTransaction (class in sqlalchemy.engine)
      new (sqlalchemy.orm.session.Session attribute)
      next_value (class in sqlalchemy.sql.functions)
      next_value() (sqlalchemy.schema.Sequence method)
      no_autoflush (sqlalchemy.orm.session.Session attribute)
      NO_STATE (in module sqlalchemy.orm.exc)
      NoForeignKeysError
      NoInspectionAvailable
      noload() (in module sqlalchemy.orm)
      non_added() (sqlalchemy.orm.attributes.History method)
      non_deleted() (sqlalchemy.orm.attributes.History method)
      non_primary (sqlalchemy.orm.mapper.Mapper attribute)
      NoReferencedColumnError
      NoReferencedTableError
      NoReferenceError
      NoResultFound
      normalize_name() (sqlalchemy.engine.default.DefaultDialect method)
      (sqlalchemy.engine.interfaces.Dialect method)
      NoSuchColumnError
      NoSuchTableError
      not_() (in module sqlalchemy.sql.expression)
      not_extend_left_of() (sqlalchemy.dialects.postgresql.ranges.RangeOperators.comparator_factory method)
      not_extend_right_of() (sqlalchemy.dialects.postgresql.ranges.RangeOperators.comparator_factory method)
      NOT_EXTENSION (in module sqlalchemy.orm.interfaces)
      notilike() (sqlalchemy.schema.Column method)
      (sqlalchemy.sql.expression.BindParameter method)
      (sqlalchemy.sql.expression.ColumnClause method)
      (sqlalchemy.sql.expression.ColumnElement method)
      (sqlalchemy.sql.operators.ColumnOperators method)
      notin_() (sqlalchemy.schema.Column method)
      (sqlalchemy.sql.expression.BindParameter method)
      (sqlalchemy.sql.expression.ColumnClause method)
      (sqlalchemy.sql.expression.ColumnElement method)
      (sqlalchemy.sql.operators.ColumnOperators method)
      notlike() (sqlalchemy.schema.Column method)
      (sqlalchemy.sql.expression.BindParameter method)
      (sqlalchemy.sql.expression.ColumnClause method)
      (sqlalchemy.sql.expression.ColumnElement method)
      (sqlalchemy.sql.operators.ColumnOperators method)
      NotSupportedError
      now (class in sqlalchemy.sql.functions)
      NTEXT (class in sqlalchemy.dialects.mssql)
      Null (class in sqlalchemy.sql.expression)
      null() (in module sqlalchemy.sql.expression)
      NullPool (class in sqlalchemy.pool)
      nullsfirst() (in module sqlalchemy.sql.expression)
      (sqlalchemy.schema.Column method)
      (sqlalchemy.sql.expression.BindParameter method)
      (sqlalchemy.sql.expression.ColumnClause method)
      (sqlalchemy.sql.expression.ColumnElement method)
      (sqlalchemy.sql.operators.ColumnOperators method)
      nullslast() (in module sqlalchemy.sql.expression)
      (sqlalchemy.schema.Column method)
      (sqlalchemy.sql.expression.BindParameter method)
      (sqlalchemy.sql.expression.ColumnClause method)
      (sqlalchemy.sql.expression.ColumnElement method)
      (sqlalchemy.sql.operators.ColumnOperators method)
      NullType (class in sqlalchemy.types)
      NUMBER (class in sqlalchemy.dialects.oracle)
      NUMERIC (class in sqlalchemy.dialects.drizzle)
      (class in sqlalchemy.dialects.mysql)
      Numeric (class in sqlalchemy.types)
      NUMERIC (class in sqlalchemy.types)
      NUMRANGE (class in sqlalchemy.dialects.postgresql)
      NVARCHAR (class in sqlalchemy.dialects.mssql)
      (class in sqlalchemy.dialects.mysql)
      (class in sqlalchemy.types)

      O

      object (sqlalchemy.orm.state.InstanceState attribute)
      object_mapper() (in module sqlalchemy.orm)
      object_session() (in module sqlalchemy.orm.session)
      (sqlalchemy.orm.session.Session class method)
      (sqlalchemy.orm.session.sessionmaker class method)
      object_state() (in module sqlalchemy.orm.util)
      ObjectDeletedError
      ObjectDereferencedError
      of_type() (sqlalchemy.orm.interfaces.PropComparator method)
      (sqlalchemy.orm.properties.RelationshipProperty.Comparator method)
      offset() (sqlalchemy.orm.query.Query method)
      (sqlalchemy.sql.expression.CompoundSelect method)
      (sqlalchemy.sql.expression.Select method)
      (sqlalchemy.sql.expression.SelectBase method)
      on (sqlalchemy.schema.DDLElement attribute)
      on_connect() (sqlalchemy.engine.default.DefaultDialect method)
      one() (sqlalchemy.orm.query.Query method)
      onupdate (sqlalchemy.schema.Column attribute)
      op() (sqlalchemy.schema.Column method)
      (sqlalchemy.sql.expression.BindParameter method)
      (sqlalchemy.sql.expression.ColumnClause method)
      (sqlalchemy.sql.expression.ColumnElement method)
      (sqlalchemy.sql.operators.ColumnOperators method)
      (sqlalchemy.sql.operators.Operators method)
      operate() (sqlalchemy.schema.Column method)
      (sqlalchemy.sql.expression.ColumnElement method)
      (sqlalchemy.sql.operators.ColumnOperators method)
      (sqlalchemy.sql.operators.Operators method)
      OperationalError
      Operators (class in sqlalchemy.sql.operators)
      options() (sqlalchemy.orm.query.Query method)
      or_() (in module sqlalchemy.sql.expression)
      order_by() (sqlalchemy.orm.query.Query method)
      (sqlalchemy.sql.expression.CompoundSelect method)
      (sqlalchemy.sql.expression.Select method)
      (sqlalchemy.sql.expression.SelectBase method)
      ordering_list() (in module sqlalchemy.ext.orderinglist)
      OrderingList (class in sqlalchemy.ext.orderinglist)
      orig (sqlalchemy.exc.StatementError attribute)
      original_init (sqlalchemy.orm.instrumentation.ClassManager attribute)
      outerjoin() (in module sqlalchemy.orm)
      (in module sqlalchemy.sql.expression)
      (sqlalchemy.orm.query.Query method)
      (sqlalchemy.schema.Table method)
      (sqlalchemy.sql.expression.Alias method)
      (sqlalchemy.sql.expression.CTE method)
      (sqlalchemy.sql.expression.CompoundSelect method)
      (sqlalchemy.sql.expression.FromClause method)
      (sqlalchemy.sql.expression.Join method)
      (sqlalchemy.sql.expression.Select method)
      (sqlalchemy.sql.expression.TableClause method)
      outparam() (in module sqlalchemy.sql.expression)
      Over (class in sqlalchemy.sql.expression)
      over() (in module sqlalchemy.sql.expression)
      overlap() (sqlalchemy.dialects.postgresql.ARRAY.Comparator method)
      overlaps() (sqlalchemy.dialects.postgresql.ranges.RangeOperators.comparator_factory method)

      P

      params (sqlalchemy.engine.interfaces.Compiled attribute)
      (sqlalchemy.exc.StatementError attribute)
      (sqlalchemy.sql.compiler.DDLCompiler attribute)
      (sqlalchemy.sql.compiler.SQLCompiler attribute)
      params() (sqlalchemy.orm.query.Query method)
      (sqlalchemy.schema.Column method)
      (sqlalchemy.schema.Table method)
      (sqlalchemy.sql.expression.Alias method)
      (sqlalchemy.sql.expression.BindParameter method)
      (sqlalchemy.sql.expression.CTE method)
      (sqlalchemy.sql.expression.ClauseElement method)
      (sqlalchemy.sql.expression.ColumnElement method)
      (sqlalchemy.sql.expression.CompoundSelect method)
      (sqlalchemy.sql.expression.Delete method)
      (sqlalchemy.sql.expression.Insert method)
      (sqlalchemy.sql.expression.Join method)
      (sqlalchemy.sql.expression.Select method)
      (sqlalchemy.sql.expression.Update method)
      (sqlalchemy.sql.expression.UpdateBase method)
      parent (sqlalchemy.orm.attributes.QueryableAttribute attribute)
      PassiveDefault (class in sqlalchemy.schema)
      pending (sqlalchemy.orm.state.InstanceState attribute)
      persistent (sqlalchemy.orm.state.InstanceState attribute)
      pickle() (sqlalchemy.orm.events.InstanceEvents method)
      PickleType (class in sqlalchemy.types)
      polymorphic
      polymorphic_identity (sqlalchemy.orm.mapper.Mapper attribute)
      polymorphic_iterator() (sqlalchemy.orm.mapper.Mapper method)
      polymorphic_map (sqlalchemy.orm.mapper.Mapper attribute)
      polymorphic_on (sqlalchemy.orm.mapper.Mapper attribute)
      polymorphic_union() (in module sqlalchemy.orm.util)
      polymorphically
      Pool (class in sqlalchemy.pool)
      PoolEvents (class in sqlalchemy.events)
      PoolListener (class in sqlalchemy.interfaces)
      pop() (sqlalchemy.ext.orderinglist.OrderingList method)
      (sqlalchemy.orm.collections.MappedCollection method)
      popitem() (sqlalchemy.orm.collections.MappedCollection method)
      populate_existing() (sqlalchemy.orm.query.Query method)
      populate_instance() (sqlalchemy.orm.events.MapperEvents method)
      (sqlalchemy.orm.interfaces.MapperExtension method)
      post_configure_attribute() (sqlalchemy.ext.instrumentation.InstrumentationManager method)
      post_exec() (sqlalchemy.engine.interfaces.ExecutionContext method)
      post_instrument_class() (sqlalchemy.orm.interfaces.MapperProperty method)
      postfetch_cols() (sqlalchemy.engine.ResultProxy method)
      postgis (module)
      pre_exec() (sqlalchemy.engine.interfaces.ExecutionContext method)
      prefetch_cols() (sqlalchemy.engine.ResultProxy method)
      prefix_with() (sqlalchemy.orm.query.Query method)
      (sqlalchemy.sql.expression.Delete method)
      (sqlalchemy.sql.expression.Insert method)
      (sqlalchemy.sql.expression.Select method)
      (sqlalchemy.sql.expression.Update method)
      prepare() (sqlalchemy.engine.TwoPhaseTransaction method)
      (sqlalchemy.orm.session.Session method)
      prepare_instrumentation() (in module sqlalchemy.orm.collections)
      prepare_twophase() (sqlalchemy.events.ConnectionEvents method)
      (sqlalchemy.interfaces.ConnectionProxy method)
      preparer (sqlalchemy.engine.default.DefaultDialect attribute)
      primary_key (sqlalchemy.orm.mapper.Mapper attribute)
      (sqlalchemy.schema.Column attribute)
      (sqlalchemy.schema.Table attribute)
      (sqlalchemy.sql.expression.Alias attribute)
      (sqlalchemy.sql.expression.CTE attribute)
      (sqlalchemy.sql.expression.ColumnElement attribute)
      (sqlalchemy.sql.expression.CompoundSelect attribute)
      (sqlalchemy.sql.expression.FromClause attribute)
      (sqlalchemy.sql.expression.Join attribute)
      (sqlalchemy.sql.expression.Select attribute)
      (sqlalchemy.sql.expression.TableClause attribute)
      primary_key_from_instance() (sqlalchemy.orm.mapper.Mapper method)
      primary_mapper() (sqlalchemy.orm.mapper.Mapper method)
      PrimaryKeyConstraint (class in sqlalchemy.schema)
      process_bind_param() (sqlalchemy.types.TypeDecorator method)
      process_result_value() (sqlalchemy.types.TypeDecorator method)
      ProgrammingError
      PropComparator (class in sqlalchemy.orm.interfaces)
      property (sqlalchemy.orm.attributes.QueryableAttribute attribute)
      proxy_set (sqlalchemy.schema.Column attribute)
      (sqlalchemy.sql.expression.ColumnElement attribute)
      prune() (sqlalchemy.orm.session.Session method)
      Python Enhancement Proposals
      PEP 249
      python_type (sqlalchemy.types.TypeDecorator attribute)
      (sqlalchemy.types.TypeEngine attribute)

      Q

      Query (class in sqlalchemy.orm.query)
      query() (sqlalchemy.orm.session.Session method)
      query_property() (sqlalchemy.orm.scoping.scoped_session method)
      QueryableAttribute (class in sqlalchemy.orm.attributes)
      QueryContext (class in sqlalchemy.orm.query)
      QueuePool (class in sqlalchemy.pool)
      quote (sqlalchemy.schema.Column attribute)
      (sqlalchemy.schema.Table attribute)
      (sqlalchemy.sql.expression.ColumnElement attribute)
      quote_identifier() (sqlalchemy.sql.compiler.IdentifierPreparer method)
      quote_schema() (sqlalchemy.sql.compiler.IdentifierPreparer method)

      R

      random (class in sqlalchemy.sql.functions)
      RangeOperators (class in sqlalchemy.dialects.postgresql.ranges)
      RangeOperators.comparator_factory (class in sqlalchemy.dialects.postgresql.ranges)
      RAW (class in sqlalchemy.dialects.oracle)
      raw_connection() (sqlalchemy.engine.Engine method)
      REAL (class in sqlalchemy.dialects.drizzle)
      (class in sqlalchemy.dialects.mssql)
      (class in sqlalchemy.dialects.mysql)
      (class in sqlalchemy.dialects.postgresql)
      (class in sqlalchemy.types)
      reconstruct_instance() (sqlalchemy.orm.interfaces.MapperExtension method)
      reconstructor() (in module sqlalchemy.orm)
      recreate() (sqlalchemy.pool.Pool method)
      reduce_columns() (sqlalchemy.sql.expression.Select method)
      references() (sqlalchemy.schema.Column method)
      (sqlalchemy.schema.ForeignKey method)
      reflect() (sqlalchemy.schema.MetaData method)
      reflecttable() (sqlalchemy.engine.interfaces.Dialect method)
      (sqlalchemy.engine.reflection.Inspector method)
      refresh() (sqlalchemy.orm.events.InstanceEvents method)
      (sqlalchemy.orm.session.Session method)
      register_function() (in module sqlalchemy.sql.functions)
      relation() (in module sqlalchemy.orm)
      relationship() (in module sqlalchemy.orm)
      RelationshipProperty (class in sqlalchemy.orm.properties)
      RelationshipProperty.Comparator (class in sqlalchemy.orm.properties)
      relationships (sqlalchemy.orm.mapper.Mapper attribute)
      release
      release_savepoint() (sqlalchemy.events.ConnectionEvents method)
      (sqlalchemy.interfaces.ConnectionProxy method)
      released
      releases
      remote() (in module sqlalchemy.orm)
      remote_attr (sqlalchemy.ext.associationproxy.AssociationProxy attribute)
      remove() (sqlalchemy.ext.orderinglist.OrderingList method)
      (sqlalchemy.orm.collections.MappedCollection method)
      (sqlalchemy.orm.events.AttributeEvents method)
      (sqlalchemy.orm.interfaces.AttributeExtension method)
      (sqlalchemy.orm.scoping.scoped_session method)
      (sqlalchemy.schema.MetaData method)
      remove_state() (sqlalchemy.ext.instrumentation.InstrumentationManager method)
      remover() (sqlalchemy.orm.collections.collection static method)
      removes() (sqlalchemy.orm.collections.collection static method)
      removes_return() (sqlalchemy.orm.collections.collection static method)
      render_literal_value() (sqlalchemy.sql.compiler.SQLCompiler method)
      render_table_with_column_in_update_from (sqlalchemy.sql.compiler.SQLCompiler attribute)
      reorder() (sqlalchemy.ext.orderinglist.OrderingList method)
      replace() (sqlalchemy.sql.expression.ColumnCollection method)
      replace_selectable() (sqlalchemy.schema.Table method)
      (sqlalchemy.sql.expression.Alias method)
      (sqlalchemy.sql.expression.CTE method)
      (sqlalchemy.sql.expression.CompoundSelect method)
      (sqlalchemy.sql.expression.FromClause method)
      (sqlalchemy.sql.expression.Join method)
      (sqlalchemy.sql.expression.Select method)
      (sqlalchemy.sql.expression.TableClause method)
      replaces() (sqlalchemy.orm.collections.collection static method)
      reset() (sqlalchemy.events.PoolEvents method)
      reset_isolation_level() (sqlalchemy.engine.interfaces.Dialect method)
      reset_joinpoint() (sqlalchemy.orm.query.Query method)
      ResourceClosedError
      result() (sqlalchemy.engine.interfaces.ExecutionContext method)
      result_processor() (sqlalchemy.types.TypeDecorator method)
      (sqlalchemy.types.TypeEngine method)
      ResultProxy (class in sqlalchemy.engine)
      resurrect() (sqlalchemy.orm.events.InstanceEvents method)
      RETURNING
      returning (sqlalchemy.sql.compiler.SQLCompiler attribute)
      returning() (sqlalchemy.sql.expression.Delete method)
      (sqlalchemy.sql.expression.Insert method)
      (sqlalchemy.sql.expression.Update method)
      (sqlalchemy.sql.expression.UpdateBase method)
      returning_precedes_values (sqlalchemy.sql.compiler.SQLCompiler attribute)
      returns_rows (sqlalchemy.engine.ResultProxy attribute)
      ReturnTypeFromArgs (class in sqlalchemy.sql.functions)
      reverse_operate() (sqlalchemy.schema.Column method)
      (sqlalchemy.sql.expression.ColumnElement method)
      (sqlalchemy.sql.operators.ColumnOperators method)
      (sqlalchemy.sql.operators.Operators method)
      rollback() (sqlalchemy.engine.Transaction method)
      (sqlalchemy.events.ConnectionEvents method)
      (sqlalchemy.interfaces.ConnectionProxy method)
      (sqlalchemy.orm.session.Session method)
      rollback_savepoint() (sqlalchemy.events.ConnectionEvents method)
      (sqlalchemy.interfaces.ConnectionProxy method)
      rollback_twophase() (sqlalchemy.events.ConnectionEvents method)
      (sqlalchemy.interfaces.ConnectionProxy method)
      rowcount (sqlalchemy.engine.ResultProxy attribute)
      RowProxy (class in sqlalchemy.engine)
      run_callable() (sqlalchemy.engine.Connection method)
      (sqlalchemy.engine.Engine method)

      S

      SADeprecationWarning
      SAPendingDeprecationWarning
      savepoint() (sqlalchemy.events.ConnectionEvents method)
      (sqlalchemy.interfaces.ConnectionProxy method)
      SAWarning
      scalar (sqlalchemy.ext.associationproxy.AssociationProxy attribute)
      scalar() (sqlalchemy.engine.Connectable method)
      (sqlalchemy.engine.Connection method)
      (sqlalchemy.engine.ResultProxy method)
      (sqlalchemy.engine.interfaces.Compiled method)
      (sqlalchemy.orm.query.Query method)
      (sqlalchemy.orm.session.Session method)
      (sqlalchemy.sql.compiler.DDLCompiler method)
      (sqlalchemy.sql.expression.CompoundSelect method)
      (sqlalchemy.sql.expression.Delete method)
      (sqlalchemy.sql.expression.Executable method)
      (sqlalchemy.sql.expression.Insert method)
      (sqlalchemy.sql.expression.Select method)
      (sqlalchemy.sql.expression.Update method)
      ScalarSelect (class in sqlalchemy.sql.expression)
      schema (sqlalchemy.schema.Table attribute)
      SchemaEventTarget (class in sqlalchemy.events)
      SchemaItem (class in sqlalchemy.schema)
      SchemaType (class in sqlalchemy.types)
      scoped_session (class in sqlalchemy.orm.scoping)
      ScopedRegistry (class in sqlalchemy.util)
      Select (class in sqlalchemy.sql.expression)
      select() (in module sqlalchemy.sql.expression)
      (sqlalchemy.schema.Table method)
      (sqlalchemy.sql.expression.Alias method)
      (sqlalchemy.sql.expression.CTE method)
      (sqlalchemy.sql.expression.CompoundSelect method)
      (sqlalchemy.sql.expression.FromClause method)
      (sqlalchemy.sql.expression.Join method)
      (sqlalchemy.sql.expression.Select method)
      (sqlalchemy.sql.expression.TableClause method)
      select_entity_from() (sqlalchemy.orm.query.Query method)
      select_from() (sqlalchemy.orm.query.Query method)
      (sqlalchemy.sql.expression.Select method)
      Selectable (class in sqlalchemy.sql.expression)
      selectable (sqlalchemy.orm.mapper.Mapper attribute)
      (sqlalchemy.orm.query.Query attribute)
      (sqlalchemy.schema.Table attribute)
      SelectBase (class in sqlalchemy.sql.expression)
      self_and_descendants (sqlalchemy.orm.mapper.Mapper attribute)
      self_group() (sqlalchemy.schema.Column method)
      (sqlalchemy.schema.Table method)
      (sqlalchemy.sql.expression.Alias method)
      (sqlalchemy.sql.expression.BindParameter method)
      (sqlalchemy.sql.expression.CTE method)
      (sqlalchemy.sql.expression.ClauseElement method)
      (sqlalchemy.sql.expression.ColumnClause method)
      (sqlalchemy.sql.expression.ColumnElement method)
      (sqlalchemy.sql.expression.Delete method)
      (sqlalchemy.sql.expression.Insert method)
      (sqlalchemy.sql.expression.Select method)
      (sqlalchemy.sql.expression.TableClause method)
      (sqlalchemy.sql.expression.Update method)
      Sequence (class in sqlalchemy.schema)
      Serializer() (in module sqlalchemy.ext.serializer)
      server_default (sqlalchemy.schema.Column attribute)
      server_onupdate (sqlalchemy.schema.Column attribute)
      Session
      (class in sqlalchemy.orm.session)
      session (sqlalchemy.orm.state.InstanceState attribute)
      session_user (class in sqlalchemy.sql.functions)
      SessionEvents (class in sqlalchemy.orm.events)
      SessionExtension (class in sqlalchemy.orm.interfaces)
      sessionmaker (class in sqlalchemy.orm.session)
      SessionTransaction (class in sqlalchemy.orm.session)
      SET (class in sqlalchemy.dialects.mysql)
      set() (sqlalchemy.orm.collections.MappedCollection method)
      (sqlalchemy.orm.events.AttributeEvents method)
      (sqlalchemy.orm.interfaces.AttributeExtension method)
      (sqlalchemy.util.ScopedRegistry method)
      set_attribute() (in module sqlalchemy.orm.attributes)
      set_committed_value() (in module sqlalchemy.orm.attributes)
      set_input_sizes() (sqlalchemy.engine.default.DefaultExecutionContext method)
      set_isolation_level() (sqlalchemy.engine.default.DefaultDialect method)
      (sqlalchemy.engine.interfaces.Dialect method)
      set_shard() (sqlalchemy.ext.horizontal_shard.ShardedQuery method)
      setdefault() (sqlalchemy.orm.collections.MappedCollection method)
      setter() (sqlalchemy.ext.hybrid.hybrid_property method)
      setup() (sqlalchemy.orm.interfaces.MapperProperty method)
      ShardedQuery (class in sqlalchemy.ext.horizontal_shard)
      ShardedSession (class in sqlalchemy.ext.horizontal_shard)
      sharding (module)
      shares_lineage() (sqlalchemy.schema.Column method)
      (sqlalchemy.sql.expression.BindParameter method)
      (sqlalchemy.sql.expression.ColumnClause method)
      (sqlalchemy.sql.expression.ColumnElement method)
      should_autocommit_text() (sqlalchemy.engine.interfaces.ExecutionContext method)
      single (sqlalchemy.orm.mapper.Mapper attribute)
      SingletonThreadPool (class in sqlalchemy.pool)
      slice() (sqlalchemy.dialects.postgresql.HSTORE.comparator_factory method)
      (sqlalchemy.orm.query.Query method)
      SMALLDATETIME (class in sqlalchemy.dialects.mssql)
      SMALLINT (class in sqlalchemy.dialects.mysql)
      (class in sqlalchemy.types)
      SmallInteger (class in sqlalchemy.types)
      SMALLMONEY (class in sqlalchemy.dialects.mssql)
      sorted_tables (sqlalchemy.schema.MetaData attribute)
      sql_compiler (sqlalchemy.engine.interfaces.Compiled attribute)
      SQL_VARIANT (class in sqlalchemy.dialects.mssql)
      sqlalchemy.dialects.drizzle.base (module)
      sqlalchemy.dialects.drizzle.mysqldb (module)
      sqlalchemy.dialects.firebird.base (module)
      sqlalchemy.dialects.firebird.fdb (module)
      sqlalchemy.dialects.firebird.kinterbasdb (module)
      sqlalchemy.dialects.informix.base (module)
      sqlalchemy.dialects.informix.informixdb (module)
      sqlalchemy.dialects.mssql.adodbapi (module)
      sqlalchemy.dialects.mssql.base (module)
      sqlalchemy.dialects.mssql.mxodbc (module)
      sqlalchemy.dialects.mssql.pymssql (module)
      sqlalchemy.dialects.mssql.pyodbc (module)
      sqlalchemy.dialects.mssql.zxjdbc (module)
      sqlalchemy.dialects.mysql.base (module)
      sqlalchemy.dialects.mysql.cymysql (module)
      sqlalchemy.dialects.mysql.gaerdbms (module)
      sqlalchemy.dialects.mysql.mysqlconnector (module)
      sqlalchemy.dialects.mysql.mysqldb (module)
      sqlalchemy.dialects.mysql.oursql (module)
      sqlalchemy.dialects.mysql.pymysql (module)
      sqlalchemy.dialects.mysql.pyodbc (module)
      sqlalchemy.dialects.mysql.zxjdbc (module)
      sqlalchemy.dialects.oracle.base (module)
      sqlalchemy.dialects.oracle.cx_oracle (module)
      sqlalchemy.dialects.oracle.zxjdbc (module)
      sqlalchemy.dialects.postgresql.base (module)
      sqlalchemy.dialects.postgresql.pg8000 (module)
      sqlalchemy.dialects.postgresql.psycopg2 (module)
      sqlalchemy.dialects.postgresql.pypostgresql (module)
      sqlalchemy.dialects.postgresql.zxjdbc (module)
      sqlalchemy.dialects.sqlite (module)
      sqlalchemy.dialects.sqlite.base (module)
      sqlalchemy.dialects.sqlite.pysqlite (module)
      sqlalchemy.dialects.sybase.base (module)
      sqlalchemy.dialects.sybase.mxodbc (module)
      sqlalchemy.dialects.sybase.pyodbc (module)
      sqlalchemy.dialects.sybase.pysybase (module)
      sqlalchemy.engine (module)
      sqlalchemy.exc (module)
      sqlalchemy.ext.associationproxy (module)
      sqlalchemy.ext.compiler (module)
      sqlalchemy.ext.declarative (module)
      sqlalchemy.ext.horizontal_shard (module)
      sqlalchemy.ext.hybrid (module)
      sqlalchemy.ext.instrumentation (module)
      sqlalchemy.ext.mutable (module)
      sqlalchemy.ext.orderinglist (module)
      sqlalchemy.ext.serializer (module)
      sqlalchemy.inspection (module)
      sqlalchemy.interfaces (module)
      sqlalchemy.orm (module) , [19] , [19]
      sqlalchemy.orm.exc (module)
      sqlalchemy.orm.interfaces (module)
      sqlalchemy.orm.session (module)
      sqlalchemy.pool (module)
      sqlalchemy.schema (module) , [19] , [19] , [19] , [19] , [19]
      sqlalchemy.sql.expression (module) , [19] , [19] , [19] , [19]
      sqlalchemy.sql.functions (module)
      sqlalchemy.types (module)
      SQLAlchemyError
      SQLCompiler (class in sqlalchemy.sql.compiler)
      StaleDataError
      startswith() (sqlalchemy.schema.Column method)
      (sqlalchemy.sql.expression.BindParameter method)
      (sqlalchemy.sql.expression.ColumnClause method)
      (sqlalchemy.sql.expression.ColumnElement method)
      (sqlalchemy.sql.operators.ColumnOperators method)
      state_getter() (sqlalchemy.ext.instrumentation.InstrumentationManager method)
      (sqlalchemy.orm.instrumentation.ClassManager class method)
      statement (sqlalchemy.exc.StatementError attribute)
      (sqlalchemy.orm.query.Query attribute)
      statement_compiler (sqlalchemy.engine.default.DefaultDialect attribute)
      StatementError
      StaticPool (class in sqlalchemy.pool)
      strictly_left_of() (sqlalchemy.dialects.postgresql.ranges.RangeOperators.comparator_factory method)
      strictly_right_of() (sqlalchemy.dialects.postgresql.ranges.RangeOperators.comparator_factory method)
      String (class in sqlalchemy.types)
      subquery
      subquery() (in module sqlalchemy.sql.expression)
      (sqlalchemy.orm.query.Query method)
      subqueryload() (in module sqlalchemy.orm)
      subqueryload_all() (in module sqlalchemy.orm)
      sum (class in sqlalchemy.sql.functions)
      sum() (sqlalchemy.orm.attributes.History method)
      supports_execution (sqlalchemy.schema.Column attribute)
      (sqlalchemy.schema.Table attribute)
      (sqlalchemy.sql.expression.ColumnElement attribute)
      supports_sane_multi_rowcount() (sqlalchemy.engine.ResultProxy method)
      supports_sane_rowcount() (sqlalchemy.engine.ResultProxy method)
      synonym() (in module sqlalchemy.orm)
      synonym_for() (in module sqlalchemy.ext.declarative)
      SynonymProperty (class in sqlalchemy.orm.descriptor_props)
      synonyms (sqlalchemy.orm.mapper.Mapper attribute)
      sysdate (class in sqlalchemy.sql.functions)

      T

      Table (class in sqlalchemy.schema)
      table (sqlalchemy.orm.properties.RelationshipProperty attribute)
      (sqlalchemy.schema.Column attribute)
      table() (in module sqlalchemy.sql.expression)
      table_names() (sqlalchemy.engine.Engine method)
      TableClause (class in sqlalchemy.sql.expression)
      tables (sqlalchemy.orm.mapper.Mapper attribute)
      target (sqlalchemy.schema.DDLElement attribute)
      target_class (sqlalchemy.ext.associationproxy.AssociationProxy attribute)
      target_fullname (sqlalchemy.schema.ForeignKey attribute)
      TEXT (class in sqlalchemy.dialects.drizzle)
      (class in sqlalchemy.dialects.mssql)
      (class in sqlalchemy.dialects.mysql)
      Text (class in sqlalchemy.types)
      TEXT (class in sqlalchemy.types)
      text() (in module sqlalchemy.sql.expression)
      TextClause (class in sqlalchemy.sql.expression)
      thread safety
      Connection
      Transaction
      transactions
      ThreadLocalMetaData (class in sqlalchemy.schema)
      ThreadLocalRegistry (class in sqlalchemy.util)
      TIME (class in sqlalchemy.dialects.mssql)
      (class in sqlalchemy.dialects.mysql)
      (class in sqlalchemy.dialects.sqlite)
      (class in sqlalchemy.types)
      Time (class in sqlalchemy.types)
      TimeoutError
      TIMESTAMP (class in sqlalchemy.dialects.drizzle)
      (class in sqlalchemy.dialects.mysql)
      (class in sqlalchemy.types)
      timetuple (sqlalchemy.schema.Column attribute)
      (sqlalchemy.sql.expression.ColumnElement attribute)
      (sqlalchemy.sql.operators.ColumnOperators attribute)
      TINYBLOB (class in sqlalchemy.dialects.mysql)
      TINYINT (class in sqlalchemy.dialects.mssql)
      (class in sqlalchemy.dialects.mysql)
      TINYTEXT (class in sqlalchemy.dialects.mysql)
      tometadata() (sqlalchemy.schema.Table method)
      Transaction (class in sqlalchemy.engine)
      transaction (sqlalchemy.orm.session.Session attribute)
      transaction() (sqlalchemy.engine.Connection method)
      (sqlalchemy.engine.Engine method)
      transient (sqlalchemy.orm.state.InstanceState attribute)
      translate_connect_args() (sqlalchemy.engine.url.URL method)
      translate_row() (sqlalchemy.orm.events.MapperEvents method)
      (sqlalchemy.orm.interfaces.MapperExtension method)
      true() (in module sqlalchemy.sql.expression)
      True_ (class in sqlalchemy.sql.expression)
      TSRANGE (class in sqlalchemy.dialects.postgresql)
      TSTZRANGE (class in sqlalchemy.dialects.postgresql)
      Tuple (class in sqlalchemy.sql.expression)
      tuple_() (in module sqlalchemy.sql.expression)
      TwoPhaseTransaction (class in sqlalchemy.engine)
      type (sqlalchemy.dialects.postgresql.hstore attribute)
      (sqlalchemy.schema.Column attribute)
      (sqlalchemy.sql.expression.ColumnElement attribute)
      (sqlalchemy.sql.functions.char_length attribute)
      (sqlalchemy.sql.functions.concat attribute)
      (sqlalchemy.sql.functions.count attribute)
      (sqlalchemy.sql.functions.current_date attribute)
      (sqlalchemy.sql.functions.current_time attribute)
      (sqlalchemy.sql.functions.current_timestamp attribute)
      (sqlalchemy.sql.functions.current_user attribute)
      (sqlalchemy.sql.functions.localtime attribute)
      (sqlalchemy.sql.functions.localtimestamp attribute)
      (sqlalchemy.sql.functions.next_value attribute)
      (sqlalchemy.sql.functions.now attribute)
      (sqlalchemy.sql.functions.session_user attribute)
      (sqlalchemy.sql.functions.sysdate attribute)
      (sqlalchemy.sql.functions.user attribute)
      type_coerce() (in module sqlalchemy.sql.expression)
      type_descriptor() (sqlalchemy.engine.default.DefaultDialect method)
      (sqlalchemy.engine.interfaces.Dialect class method)
      type_engine() (sqlalchemy.types.TypeDecorator method)
      TypeDecorator (class in sqlalchemy.types)
      TypeEngine (class in sqlalchemy.types)
      TypeEngine.Comparator (class in sqlalchemy.types)

      U

      UnaryExpression (class in sqlalchemy.sql.expression)
      UnboundExecutionError
      undefer() (in module sqlalchemy.orm)
      undefer_group() (in module sqlalchemy.orm)
      unformat_identifiers() (sqlalchemy.sql.compiler.IdentifierPreparer method)
      Unicode (class in sqlalchemy.types)
      UnicodeText (class in sqlalchemy.types)
      uninstall_descriptor() (sqlalchemy.ext.instrumentation.InstrumentationManager method)
      uninstall_member() (sqlalchemy.ext.instrumentation.InstrumentationManager method)
      union() (in module sqlalchemy.sql.expression)
      (sqlalchemy.orm.query.Query method)
      (sqlalchemy.sql.expression.Select method)
      union_all() (in module sqlalchemy.sql.expression)
      (sqlalchemy.orm.query.Query method)
      (sqlalchemy.sql.expression.Select method)
      unique_connection() (sqlalchemy.pool.Pool method)
      (sqlalchemy.pool.QueuePool method)
      unique_params() (sqlalchemy.schema.Column method)
      (sqlalchemy.schema.Table method)
      (sqlalchemy.sql.expression.Alias method)
      (sqlalchemy.sql.expression.BindParameter method)
      (sqlalchemy.sql.expression.CTE method)
      (sqlalchemy.sql.expression.ClauseElement method)
      (sqlalchemy.sql.expression.ColumnElement method)
      (sqlalchemy.sql.expression.CompoundSelect method)
      (sqlalchemy.sql.expression.Delete method)
      (sqlalchemy.sql.expression.Insert method)
      (sqlalchemy.sql.expression.Join method)
      (sqlalchemy.sql.expression.Select method)
      (sqlalchemy.sql.expression.Update method)
      UniqueConstraint (class in sqlalchemy.schema)
      UNIQUEIDENTIFIER (class in sqlalchemy.dialects.mssql)
      unit of work
      unloaded (sqlalchemy.orm.state.InstanceState attribute)
      UnmappedClassError
      UnmappedColumnError
      UnmappedError
      UnmappedInstanceError
      unmodified (sqlalchemy.orm.state.InstanceState attribute)
      unmodified_intersection() (sqlalchemy.orm.state.InstanceState method)
      unpickle() (sqlalchemy.orm.events.InstanceEvents method)
      unregister() (sqlalchemy.orm.instrumentation.ClassManager method)
      UnsupportedCompilationError
      Update (class in sqlalchemy.sql.expression)
      update() (in module sqlalchemy.sql.expression)
      (sqlalchemy.orm.collections.MappedCollection method)
      (sqlalchemy.orm.query.Query method)
      (sqlalchemy.schema.Table method)
      (sqlalchemy.sql.expression.TableClause method)
      update_execution_options() (sqlalchemy.engine.Engine method)
      update_from_clause() (sqlalchemy.sql.compiler.SQLCompiler method)
      update_limit_clause() (sqlalchemy.sql.compiler.SQLCompiler method)
      update_tables_clause() (sqlalchemy.sql.compiler.SQLCompiler method)
      UpdateBase (class in sqlalchemy.sql.expression)
      URL (class in sqlalchemy.engine.url)
      user (class in sqlalchemy.sql.functions)
      UserDefinedType (class in sqlalchemy.types)
      UUID (class in sqlalchemy.dialects.postgresql)

      V

      validates() (in module sqlalchemy.orm)
      validators (sqlalchemy.orm.mapper.Mapper attribute)
      vals() (sqlalchemy.dialects.postgresql.HSTORE.comparator_factory method)
      value (sqlalchemy.orm.state.AttributeState attribute)
      value() (sqlalchemy.orm.query.Query method)
      values() (sqlalchemy.orm.query.Query method)
      (sqlalchemy.sql.expression.Insert method)
      (sqlalchemy.sql.expression.Update method)
      (sqlalchemy.sql.expression.ValuesBase method)
      ValuesBase (class in sqlalchemy.sql.expression)
      VARBINARY (class in sqlalchemy.dialects.mysql)
      (class in sqlalchemy.types)
      VARCHAR (class in sqlalchemy.dialects.drizzle)
      (class in sqlalchemy.dialects.mssql)
      (class in sqlalchemy.dialects.mysql)
      (class in sqlalchemy.types)
      Variant (class in sqlalchemy.types)
      versioning (module)
      vertical (module)

      W

      WHERE clause
      where() (sqlalchemy.sql.expression.Delete method)
      (sqlalchemy.sql.expression.ScalarSelect method)
      (sqlalchemy.sql.expression.Select method)
      (sqlalchemy.sql.expression.Update method)
      whereclause (sqlalchemy.orm.query.Query attribute)
      with_entities() (sqlalchemy.orm.query.Query method)
      with_hint() (sqlalchemy.orm.query.Query method)
      (sqlalchemy.sql.expression.Delete method)
      (sqlalchemy.sql.expression.Insert method)
      (sqlalchemy.sql.expression.Select method)
      (sqlalchemy.sql.expression.Update method)
      (sqlalchemy.sql.expression.UpdateBase method)
      with_labels() (sqlalchemy.orm.query.Query method)
      with_lockmode() (sqlalchemy.orm.query.Query method)
      with_only_columns() (sqlalchemy.sql.expression.Select method)
      with_parent() (in module sqlalchemy.orm)
      (sqlalchemy.orm.query.Query method)
      with_polymorphic() (in module sqlalchemy.orm)
      (sqlalchemy.orm.query.Query method)
      with_polymorphic_mappers (sqlalchemy.orm.mapper.Mapper attribute)
      with_session() (sqlalchemy.orm.query.Query method)
      with_transformation() (sqlalchemy.orm.query.Query method)
      with_variant() (sqlalchemy.types.TypeDecorator method)
      (sqlalchemy.types.TypeEngine method)

      Y

      YEAR (class in sqlalchemy.dialects.mysql)
      yield_per() (sqlalchemy.orm.query.Query method)
      SQLAlchemy-0.8.4/doc/glossary.html0000644000076500000240000013652112251147475017547 0ustar classicstaff00000000000000 Glossary — SQLAlchemy 0.8 Documentation

      SQLAlchemy 0.8 Documentation

      Release: 0.8.4 | Release Date: December 8, 2013

      Glossary

      Note

      The Glossary is a brand new addition to the documentation. While sparse at the moment we hope to fill it up with plenty of new terms soon!

      ACID
      ACID model

      An acronym for “Atomicity, Consistency, Isolation, Durability”; a set of properties that guarantee that database transactions are processed reliably. (via Wikipedia)

      annotations

      Annotations are a concept used internally by SQLAlchemy in order to store additional information along with ClauseElement objects. A Python dictionary is associated with a copy of the object, which contains key/value pairs significant to various internal systems, mostly within the ORM:

      some_column = Column('some_column', Integer)
      some_column_annotated = some_column._annotate({"entity": User})

      The annotation system differs from the public dictionary Column.info in that the above annotation operation creates a copy of the new Column, rather than considering all annotation values to be part of a single unit. The ORM creates copies of expression objects in order to apply annotations that are specific to their context, such as to differentiate columns that should render themselves as relative to a joined-inheritance entity versus those which should render relative to their immediate parent table alone, as well as to differentiate columns within the “join condition” of a relationship where the column in some cases needs to be expressed in terms of one particular table alias or another, based on its position within the join expression.

      atomicity

      Atomicity is one of the components of the ACID model, and requires that each transaction is “all or nothing”: if one part of the transaction fails, the entire transaction fails, and the database state is left unchanged. An atomic system must guarantee atomicity in each and every situation, including power failures, errors, and crashes. (via Wikipedia)

      columns clause

      The portion of the SELECT statement which enumerates the SQL expressions to be returned in the result set. The expressions follow the SELECT keyword directly and are a comma-separated list of individual expressions.

      E.g.:

      SELECT user_account.name, user_account.email
      FROM user_account WHERE user_account.name = 'fred'

      Above, the list of columns user_acount.name, user_account.email is the columns clause of the SELECT.

      consistency

      Consistency is one of the compoments of the ACID model, and ensures that any transaction will bring the database from one valid state to another. Any data written to the database must be valid according to all defined rules, including but not limited to constraints, cascades, triggers, and any combination thereof. (via Wikipedia)

      correlates
      correlated subquery
      correlated subqueries

      A subquery is correlated if it depends on data in the enclosing SELECT.

      Below, a subquery selects the aggregate value MIN(a.id) from the email_address table, such that it will be invoked for each value of user_account.id, correlating the value of this column against the email_address.user_account_id column:

      SELECT user_account.name, email_address.email
       FROM user_account
       JOIN email_address ON user_account.id=email_address.user_account_id
       WHERE email_address.id = (
          SELECT MIN(a.id) FROM email_address AS a
          WHERE a.user_account_id=user_account.id
       )

      The above subquery refers to the user_account table, which is not itself in the FROM clause of this nested query. Instead, the user_account table is recieved from the enclosing query, where each row selected from user_account results in a distinct execution of the subquery.

      A correlated subquery is in most cases present in the WHERE clause or columns clause of the immediately enclosing SELECT statement, as well as in the ORDER BY or HAVING clause.

      In less common cases, a correlated subquery may be present in the FROM clause of an enclosing SELECT; in these cases the correlation is typically due to the enclosing SELECT itself being enclosed in the WHERE, ORDER BY, columns or HAVING clause of another SELECT, such as:

      SELECT parent.id FROM parent
      WHERE EXISTS (
          SELECT * FROM (
              SELECT child.id AS id, child.parent_id AS parent_id, child.pos AS pos
              FROM child
              WHERE child.parent_id = parent.id ORDER BY child.pos
          LIMIT 3)
      WHERE id = 7)

      Correlation from one SELECT directly to one which encloses the correlated query via its FROM clause is not possible, because the correlation can only proceed once the original source rows from the enclosing statement’s FROM clause are available.

      DBAPI

      DBAPI is shorthand for the phrase “Python Database API Specification”. This is a widely used specification within Python to define common usage patterns for all database connection packages. The DBAPI is a “low level” API which is typically the lowest level system used in a Python application to talk to a database. SQLAlchemy’s dialect system is constructed around the operation of the DBAPI, providing individual dialect classes which service a specific DBAPI on top of a specific database engine; for example, the create_engine() URL postgresql+psycopg2://@localhost/test refers to the psycopg2 DBAPI/dialect combination, whereas the URL mysql+mysqldb://@localhost/test refers to the MySQL for Python DBAPI DBAPI/dialect combination.

      descriptor
      descriptors

      In Python, a descriptor is an object attribute with “binding behavior”, one whose attribute access has been overridden by methods in the descriptor protocol. Those methods are __get__(), __set__(), and __delete__(). If any of those methods are defined for an object, it is said to be a descriptor.

      In SQLAlchemy, descriptors are used heavily in order to provide attribute behavior on mapped classes. When a class is mapped as such:

      class MyClass(Base):
          __tablename__ = 'foo'
      
          id = Column(Integer, primary_key=True)
          data = Column(String)

      The MyClass class will be mapped when its definition is complete, at which point the id and data attributes, starting out as Column objects, will be replaced by the instrumentation system with instances of InstrumentedAttribute, which are descriptors that provide the above mentioned __get__(), __set__() and __delete__() methods. The InstrumentedAttribute will generate a SQL expression when used at the class level:

      >>> print MyClass.data == 5
      data = :data_1

      and at the instance level, keeps track of changes to values, and also lazy loads unloaded attributes from the database:

      >>> m1 = MyClass()
      >>> m1.id = 5
      >>> m1.data = "some data"
      
      >>> from sqlalchemy import inspect
      >>> inspect(m1).attrs.data.history.added
      "some data"
      discriminator

      A result-set column which is used during polymorphic loading to determine what kind of mapped class should be applied to a particular incoming result row. In SQLAlchemy, the classes are always part of a hierarchy mapping using inheritance mapping.

      durability

      Durability is a property of the ACID model which means that once a transaction has been committed, it will remain so, even in the event of power loss, crashes, or errors. In a relational database, for instance, once a group of SQL statements execute, the results need to be stored permanently (even if the database crashes immediately thereafter). (via Wikipedia)

      FROM clause

      The portion of the SELECT statement which incicates the initial source of rows.

      A simple SELECT will feature one or more table names in its FROM clause. Multiple sources are separated by a comma:

      SELECT user.name, address.email_address
      FROM user, address
      WHERE user.id=address.user_id

      The FROM clause is also where explicit joins are specified. We can rewrite the above SELECT using a single FROM element which consists of a JOIN of the two tables:

      SELECT user.name, address.email_address
      FROM user JOIN address ON user.id=address.user_id
      generative
      A term that SQLAlchemy uses to refer what’s normally known as method chaining; see that term for details.
      identity map

      A mapping between Python objects and their database identities. The identity map is a collection that’s associated with an ORM session object, and maintains a single instance of every database object keyed to its identity. The advantage to this pattern is that all operations which occur for a particular database identity are transparently coordinated onto a single object instance. When using an identity map in conjunction with an isolated transaction, having a reference to an object that’s known to have a particular primary key can be considered from a practical standpoint to be a proxy to the actual database row.

      See also

      Martin Fowler - Identity Map - http://martinfowler.com/eaaCatalog/identityMap.html

      instrumentation
      instrumented
      Instrumentation refers to the process of augmenting the functionality and attribute set of a particular class. Ideally, the behavior of the class should remain close to a regular class, except that additional behviors and features are made available. The SQLAlchemy mapping process, among other things, adds database-enabled descriptors to a mapped class which each represent a particular database column or relationship to a related class.
      isolation
      isolated

      The isolation property of the ACID model ensures that the concurrent execution of transactions results in a system state that would be obtained if transactions were executed serially, i.e. one after the other. Each transaction must execute in total isolation i.e. if T1 and T2 execute concurrently then each should remain independent of the other. (via Wikipedia)

      lazy load
      lazy loads

      In object relational mapping, a “lazy load” refers to an attribute that does not contain its database-side value for some period of time, typically when the object is first loaded. Instead, the attribute receives a memoization that causes it to go out to the database and load its data when it’s first used. Using this pattern, the complexity and time spent within object fetches can sometimes be reduced, in that attributes for related tables don’t need to be addressed immediately.

      mapping
      mapped
      We say a class is “mapped” when it has been passed through the orm.mapper() function. This process associates the class with a database table or other selectable construct, so that instances of it can be persisted using a Session as well as loaded using a Query.
      method chaining

      An object-oriented technique whereby the state of an object is constructed by calling methods on the object. The object features any number of methods, each of which return a new object (or in some cases the same object) with additional state added to the object.

      The two SQLAlchemy objects that make the most use of method chaining are the Select object and the Query object. For example, a Select object can be assigned two expressions to its WHERE clause as well as an ORDER BY clause by calling upon the where() and order_by() methods:

      stmt = select([user.c.name]).\
                  where(user.c.id > 5).\
                  where(user.c.name.like('e%').\
                  order_by(user.c.name)

      Each method call above returns a copy of the original Select object with additional qualifiers added.

      See also

      generative

      N plus one problem

      The N plus one problem is a common side effect of the lazy load pattern, whereby an application wishes to iterate through a related attribute or collection on each member of a result set of objects, where that attribute or collection is set to be loaded via the lazy load pattern. The net result is that a SELECT statement is emitted to load the initial result set of parent objects; then, as the application iterates through each member, an additional SELECT statement is emitted for each member in order to load the related attribute or collection for that member. The end result is that for a result set of N parent objects, there will be N + 1 SELECT statements emitted.

      The N plus one problem is alleviated using eager loading.

      polymorphic
      polymorphically

      Refers to a function that handles several types at once. In SQLAlchemy, the term is usually applied to the concept of an ORM mapped class whereby a query operation will return different subclasses based on information in the result set, typically by checking the value of a particular column in the result known as the discriminator.

      Polymorphic loading in SQLAlchemy implies that a one or a combination of three different schemes are used to map a hierarchy of classes; “joined”, “single”, and “concrete”. The section Mapping Class Inheritance Hierarchies describes inheritance mapping fully.

      release
      releases
      released

      In the context of SQLAlchemy, the term “released” refers to the process of ending the usage of a particular database connection. SQLAlchemy features the usage of connection pools, which allows configurability as to the lifespan of database connections. When using a pooled connection, the process of “closing” it, i.e. invoking a statement like connection.close(), may have the effect of the connection being returned to an existing pool, or it may have the effect of actually shutting down the underlying TCP/IP connection referred to by that connection - which one takes place depends on configuration as well as the current state of the pool. So we used the term released instead, to mean “do whatever it is you do with connections when we’re done using them”.

      The term will sometimes be used in the phrase, “release transactional resources”, to indicate more explicitly that what we are actually “releasing” is any transactional state which as accumulated upon the connection. In most situations, the proces of selecting from tables, emitting updates, etc. acquires isolated state upon that connection as well as potential row or table locks. This state is all local to a particular transaction on the connection, and is released when we emit a rollback. An important feature of the connection pool is that when we return a connection to the pool, the connection.rollback() method of the DBAPI is called as well, so that as the connection is set up to be used again, it’s in a “clean” state with no references held to the previous series of operations.

      RETURNING

      This is a non-SQL standard clause provided in various forms by certain backends, which provides the service of returning a result set upon execution of an INSERT, UPDATE or DELETE statement. Any set of columns from the matched rows can be returned, as though they were produced from a SELECT statement.

      The RETURNING clause provides both a dramatic performance boost to common update/select scenarios, including retrieval of inline- or default- generated primary key values and defaults at the moment they were created, as well as a way to get at server-generated default values in an atomic way.

      An example of RETURNING, idiomatic to Postgresql, looks like:

      INSERT INTO user_account (name) VALUES ('new name') RETURNING id, timestamp

      Above, the INSERT statement will provide upon execution a result set which includes the values of the columns user_account.id and user_account.timestamp, which above should have been generated as default values as they are not included otherwise (but note any series of columns or SQL expressions can be placed into RETURNING, not just default-value columns).

      The backends that currently support RETURNING or a similar construct are Postgresql, SQL Server, Oracle, and Firebird. The Postgresql and Firebird implementations are generally full featured, whereas the implementations of SQL Server and Oracle have caveats. On SQL Server, the clause is known as “OUTPUT INSERTED” for INSERT and UPDATE statements and “OUTPUT DELETED” for DELETE statements; the key caveat is that triggers are not supported in conjunction with this keyword. On Oracle, it is known as “RETURNING...INTO”, and requires that the value be placed into an OUT paramter, meaning not only is the syntax awkward, but it can also only be used for one row at a time.

      SQLAlchemy’s UpdateBase.returning() system provides a layer of abstraction on top of the RETURNING systems of these backends to provide a consistent interface for returning columns. The ORM also includes many optimizations that make use of RETURNING when available.

      Session

      The container or scope for ORM database operations. Sessions load instances from the database, track changes to mapped instances and persist changes in a single unit of work when flushed.

      subquery

      Refers to a SELECT statement that is embedded within an enclosing SELECT.

      A subquery comes in two general flavors, one known as a “scalar select” which specifically must return exactly one row and one column, and the other form which acts as a “derived table” and serves as a source of rows for the FROM clause of another select. A scalar select is eligble to be placed in the WHERE clause, columns clause, ORDER BY clause or HAVING clause of the enclosing select, whereas the derived table form is eligible to be placed in the FROM clause of the enclosing SELECT.

      Examples:

      1. a scalar subquery placed in the columns clause of an enclosing SELECT. The subquery in this example is a correlated subquery because part of the rows which it selects from are given via the enclosing statement.

        SELECT id, (SELECT name FROM address WHERE address.user_id=user.id)
        FROM user
      2. a scalar subquery placed in the WHERE clause of an enclosing SELECT. This subquery in this example is not correlated as it selects a fixed result.

        SELECT id, name FROM user
        WHERE status=(SELECT status_id FROM status_code WHERE code='C')
      3. a derived table subquery placed in the FROM clause of an enclosing SELECT. Such a subquery is almost always given an alias name.

        SELECT user.id, user.name, ad_subq.email_address
        FROM
            user JOIN
            (select user_id, email_address FROM address WHERE address_type='Q') AS ad_subq
            ON user.id = ad_subq.user_id
      unit of work

      This pattern is where the system transparently keeps track of changes to objects and periodically flushes all those pending changes out to the database. SQLAlchemy’s Session implements this pattern fully in a manner similar to that of Hibernate.

      WHERE clause

      The portion of the SELECT statement which indicates criteria by which rows should be filtered. It is a single SQL expression which follows the keyword WHERE.

      SELECT user_account.name, user_account.email
      FROM user_account
      WHERE user_account.name = 'fred' AND user_account.status = 'E'

      Above, the phrase WHERE user_account.name = 'fred' AND user_account.status = 'E' comprises the WHERE clause of the SELECT.

      SQLAlchemy-0.8.4/doc/index.html0000644000076500000240000002430512251147475017007 0ustar classicstaff00000000000000 SQLAlchemy 0.8 Documentation

      SQLAlchemy 0.8 Documentation

      Release: 0.8.4 | Release Date: December 8, 2013

      SQLAlchemy Documentation

      Getting Started

      A high level view and getting set up.

      Overview | Installation Guide | Frequently Asked Questions | Migration from 0.7 | Glossary | Changelog catalog

      SQLAlchemy ORM

      Here, the Object Relational Mapper is introduced and fully described. If you want to work with higher-level SQL which is constructed automatically for you, as well as automated persistence of Python objects, proceed first to the tutorial.

      SQLAlchemy Core

      The breadth of SQLAlchemy’s SQL rendering engine, DBAPI integration, transaction integration, and schema description services are documented here. In contrast to the ORM’s domain-centric mode of usage, the SQL Expression Language provides a schema-centric usage paradigm.

      Dialect Documentation

      The dialect is the system SQLAlchemy uses to communicate with various types of DBAPIs and databases. This section describes notes, options, and usage patterns regarding individual dialects.

      Index of all Dialects

      SQLAlchemy-0.8.4/doc/intro.html0000644000076500000240000004351612251147475017040 0ustar classicstaff00000000000000 Overview — SQLAlchemy 0.8 Documentation

      SQLAlchemy 0.8 Documentation

      Release: 0.8.4 | Release Date: December 8, 2013

      Overview

      Overview

      The SQLAlchemy SQL Toolkit and Object Relational Mapper is a comprehensive set of tools for working with databases and Python. It has several distinct areas of functionality which can be used individually or combined together. Its major components are illustrated in below, with component dependencies organized into layers:

      _images/sqla_arch_small.png

      Above, the two most significant front-facing portions of SQLAlchemy are the Object Relational Mapper and the SQL Expression Language. SQL Expressions can be used independently of the ORM. When using the ORM, the SQL Expression language remains part of the public facing API as it is used within object-relational configurations and queries.

      Documentation Overview

      The documentation is separated into three sections: SQLAlchemy ORM, SQLAlchemy Core, and Dialects.

      In SQLAlchemy ORM, the Object Relational Mapper is introduced and fully described. New users should begin with the Object Relational Tutorial. If you want to work with higher-level SQL which is constructed automatically for you, as well as management of Python objects, proceed to this tutorial.

      In SQLAlchemy Core, the breadth of SQLAlchemy’s SQL and database integration and description services are documented, the core of which is the SQL Expression language. The SQL Expression Language is a toolkit all its own, independent of the ORM package, which can be used to construct manipulable SQL expressions which can be programmatically constructed, modified, and executed, returning cursor-like result sets. In contrast to the ORM’s domain-centric mode of usage, the expression language provides a schema-centric usage paradigm. New users should begin here with SQL Expression Language Tutorial. SQLAlchemy engine, connection, and pooling services are also described in SQLAlchemy Core.

      In Dialects, reference documentation for all provided database and DBAPI backends is provided.

      Code Examples

      Working code examples, mostly regarding the ORM, are included in the SQLAlchemy distribution. A description of all the included example applications is at Examples.

      There is also a wide variety of examples involving both core SQLAlchemy constructs as well as the ORM on the wiki. See Theatrum Chemicum.

      Installation Guide

      Supported Platforms

      SQLAlchemy has been tested against the following platforms:

      • cPython since version 2.5, through the 2.xx series
      • cPython version 3, throughout all 3.xx series
      • Jython 2.5 or greater
      • Pypy 1.5 or greater

      Changed in version 0.8: Python 2.5 is now the minimum Python version supported.

      Supported Installation Methods

      SQLAlchemy supports installation using standard Python “distutils” or “setuptools” methodologies. An overview of potential setups is as follows:

      • Plain Python Distutils - SQLAlchemy can be installed with a clean Python install using the services provided via Python Distutils, using the setup.py script. The C extensions as well as Python 3 builds are supported.
      • Standard Setuptools - When using setuptools, SQLAlchemy can be installed via setup.py or easy_install, and the C extensions are supported. setuptools is not supported on Python 3 at the time of this writing.
      • Distribute - With distribute, SQLAlchemy can be installed via setup.py or easy_install, and the C extensions as well as Python 3 builds are supported.
      • pip - pip is an installer that rides on top of setuptools or distribute, replacing the usage of easy_install. It is often preferred for its simpler mode of usage.

      Install via easy_install or pip

      When easy_install or pip is available, the distribution can be downloaded from Pypi and installed in one step:

      easy_install SQLAlchemy

      Or with pip:

      pip install SQLAlchemy

      This command will download the latest version of SQLAlchemy from the Python Cheese Shop and install it to your system.

      Installing using setup.py

      Otherwise, you can install from the distribution using the setup.py script:

      python setup.py install

      Installing the C Extensions

      SQLAlchemy includes C extensions which provide an extra speed boost for dealing with result sets. Currently, the extensions are only supported on the 2.xx series of cPython, not Python 3 or Pypy.

      setup.py will automatically build the extensions if an appropriate platform is detected. If the build of the C extensions fails, due to missing compiler or other issue, the setup process will output a warning message, and re-run the build without the C extensions, upon completion reporting final status.

      To run the build/install without even attempting to compile the C extensions, pass the flag --without-cextensions to the setup.py script:

      python setup.py --without-cextensions install

      Or with pip:

      pip install --global-option='--without-cextensions' SQLAlchemy

      Note

      The --without-cextensions flag is available only if setuptools or distribute is installed. It is not available on a plain Python distutils installation. The library will still install without the C extensions if they cannot be built, however.

      Installing on Python 3

      SQLAlchemy ships as Python 2 code. For Python 3 usage, the setup.py script will invoke the Python 2to3 tool on the build, plugging in an extra “preprocessor” as well. The 2to3 step works with Python distutils (part of the standard Python install) and Distribute - it will not work with a non-Distribute setuptools installation.

      Installing a Database API

      SQLAlchemy is designed to operate with a DBAPI implementation built for a particular database, and includes support for the most popular databases. The individual database sections in Dialects enumerate the available DBAPIs for each database, including external links.

      Checking the Installed SQLAlchemy Version

      This documentation covers SQLAlchemy version 0.8. If you’re working on a system that already has SQLAlchemy installed, check the version from your Python prompt like this:

      >>> import sqlalchemy
      >>> sqlalchemy.__version__ 
      0.8.0

      0.7 to 0.8 Migration

      Notes on what’s changed from 0.7 to 0.8 is available here at What’s New in SQLAlchemy 0.8?.

      SQLAlchemy-0.8.4/doc/orm/0000755000076500000240000000000012251151573015575 5ustar classicstaff00000000000000SQLAlchemy-0.8.4/doc/orm/collections.html0000644000076500000240000030433512251147476021020 0ustar classicstaff00000000000000 Collection Configuration and Techniques — SQLAlchemy 0.8 Documentation

      SQLAlchemy 0.8 Documentation

      Release: 0.8.4 | Release Date: December 8, 2013
      SQLAlchemy 0.8 Documentation » SQLAlchemy ORM » Collection Configuration and Techniques

      Collection Configuration and Techniques

      Collection Configuration and Techniques

      The relationship() function defines a linkage between two classes. When the linkage defines a one-to-many or many-to-many relationship, it’s represented as a Python collection when objects are loaded and manipulated. This section presents additional information about collection configuration and techniques.

      Working with Large Collections

      The default behavior of relationship() is to fully load the collection of items in, as according to the loading strategy of the relationship. Additionally, the Session by default only knows how to delete objects which are actually present within the session. When a parent instance is marked for deletion and flushed, the Session loads its full list of child items in so that they may either be deleted as well, or have their foreign key value set to null; this is to avoid constraint violations. For large collections of child items, there are several strategies to bypass full loading of child items both at load time as well as deletion time.

      Dynamic Relationship Loaders

      A key feature to enable management of a large collection is the so-called “dynamic” relationship. This is an optional form of relationship() which returns a Query object in place of a collection when accessed. filter() criterion may be applied as well as limits and offsets, either explicitly or via array slices:

      class User(Base):
          __tablename__ = 'user'
      
          posts = relationship(Post, lazy="dynamic")
      
      jack = session.query(User).get(id)
      
      # filter Jack's blog posts
      posts = jack.posts.filter(Post.headline=='this is a post')
      
      # apply array slices
      posts = jack.posts[5:20]

      The dynamic relationship supports limited write operations, via the append() and remove() methods:

      oldpost = jack.posts.filter(Post.headline=='old post').one()
      jack.posts.remove(oldpost)
      
      jack.posts.append(Post('new post'))

      Since the read side of the dynamic relationship always queries the database, changes to the underlying collection will not be visible until the data has been flushed. However, as long as “autoflush” is enabled on the Session in use, this will occur automatically each time the collection is about to emit a query.

      To place a dynamic relationship on a backref, use the backref() function in conjunction with lazy='dynamic':

      class Post(Base):
          __table__ = posts_table
      
          user = relationship(User,
                      backref=backref('posts', lazy='dynamic')
                  )

      Note that eager/lazy loading options cannot be used in conjunction dynamic relationships at this time.

      Note

      The dynamic_loader() function is essentially the same as relationship() with the lazy='dynamic' argument specified.

      Warning

      The “dynamic” loader applies to collections only. It is not valid to use “dynamic” loaders with many-to-one, one-to-one, or uselist=False relationships. Newer versions of SQLAlchemy emit warnings or exceptions in these cases.

      Setting Noload

      A “noload” relationship never loads from the database, even when accessed. It is configured using lazy='noload':

      class MyClass(Base):
          __tablename__ = 'some_table'
      
          children = relationship(MyOtherClass, lazy='noload')

      Above, the children collection is fully writeable, and changes to it will be persisted to the database as well as locally available for reading at the time they are added. However when instances of MyClass are freshly loaded from the database, the children collection stays empty.

      Using Passive Deletes

      Use passive_deletes=True to disable child object loading on a DELETE operation, in conjunction with “ON DELETE (CASCADE|SET NULL)” on your database to automatically cascade deletes to child objects:

      class MyClass(Base):
          __tablename__ = 'mytable'
          id = Column(Integer, primary_key=True)
          children = relationship("MyOtherClass",
                          cascade="all, delete-orphan",
                          passive_deletes=True)
      
      class MyOtherClass(Base):
          __tablename__ = 'myothertable'
          id = Column(Integer, primary_key=True)
          parent_id = Column(Integer,
                      ForeignKey('mytable.id', ondelete='CASCADE')
                          )

      Note

      To use “ON DELETE CASCADE”, the underlying database engine must support foreign keys.

      • When using MySQL, an appropriate storage engine must be selected. See Storage Engines for details.
      • When using SQLite, foreign key support must be enabled explicitly. See Foreign Key Support for details.

      When passive_deletes is applied, the children relationship will not be loaded into memory when an instance of MyClass is marked for deletion. The cascade="all, delete-orphan" will take effect for instances of MyOtherClass which are currently present in the session; however for instances of MyOtherClass which are not loaded, SQLAlchemy assumes that “ON DELETE CASCADE” rules will ensure that those rows are deleted by the database.

      Customizing Collection Access

      Mapping a one-to-many or many-to-many relationship results in a collection of values accessible through an attribute on the parent instance. By default, this collection is a list:

      class Parent(Base):
          __tablename__ = 'parent'
          parent_id = Column(Integer, primary_key=True)
      
          children = relationship(Child)
      
      parent = Parent()
      parent.children.append(Child())
      print parent.children[0]

      Collections are not limited to lists. Sets, mutable sequences and almost any other Python object that can act as a container can be used in place of the default list, by specifying the collection_class option on relationship():

      class Parent(Base):
          __tablename__ = 'parent'
          parent_id = Column(Integer, primary_key=True)
      
          # use a set
          children = relationship(Child, collection_class=set)
      
      parent = Parent()
      child = Child()
      parent.children.add(child)
      assert child in parent.children

      Dictionary Collections

      A little extra detail is needed when using a dictionary as a collection. This because objects are always loaded from the database as lists, and a key-generation strategy must be available to populate the dictionary correctly. The attribute_mapped_collection() function is by far the most common way to achieve a simple dictionary collection. It produces a dictionary class that will apply a particular attribute of the mapped class as a key. Below we map an Item class containing a dictionary of Note items keyed to the Note.keyword attribute:

      from sqlalchemy import Column, Integer, String, ForeignKey
      from sqlalchemy.orm import relationship
      from sqlalchemy.orm.collections import attribute_mapped_collection
      from sqlalchemy.ext.declarative import declarative_base
      
      Base = declarative_base()
      
      class Item(Base):
          __tablename__ = 'item'
          id = Column(Integer, primary_key=True)
          notes = relationship("Note",
                      collection_class=attribute_mapped_collection('keyword'),
                      cascade="all, delete-orphan")
      
      class Note(Base):
          __tablename__ = 'note'
          id = Column(Integer, primary_key=True)
          item_id = Column(Integer, ForeignKey('item.id'), nullable=False)
          keyword = Column(String)
          text = Column(String)
      
          def __init__(self, keyword, text):
              self.keyword = keyword
              self.text = text

      Item.notes is then a dictionary:

      >>> item = Item()
      >>> item.notes['a'] = Note('a', 'atext')
      >>> item.notes.items()
      {'a': <__main__.Note object at 0x2eaaf0>}

      attribute_mapped_collection() will ensure that the .keyword attribute of each Note complies with the key in the dictionary. Such as, when assigning to Item.notes, the dictionary key we supply must match that of the actual Note object:

      item = Item()
      item.notes = {
                  'a': Note('a', 'atext'),
                  'b': Note('b', 'btext')
              }

      The attribute which attribute_mapped_collection() uses as a key does not need to be mapped at all! Using a regular Python @property allows virtually any detail or combination of details about the object to be used as the key, as below when we establish it as a tuple of Note.keyword and the first ten letters of the Note.text field:

      class Item(Base):
          __tablename__ = 'item'
          id = Column(Integer, primary_key=True)
          notes = relationship("Note",
                      collection_class=attribute_mapped_collection('note_key'),
                      backref="item",
                      cascade="all, delete-orphan")
      
      class Note(Base):
          __tablename__ = 'note'
          id = Column(Integer, primary_key=True)
          item_id = Column(Integer, ForeignKey('item.id'), nullable=False)
          keyword = Column(String)
          text = Column(String)
      
          @property
          def note_key(self):
              return (self.keyword, self.text[0:10])
      
          def __init__(self, keyword, text):
              self.keyword = keyword
              self.text = text

      Above we added a Note.item backref. Assigning to this reverse relationship, the Note is added to the Item.notes dictionary and the key is generated for us automatically:

      >>> item = Item()
      >>> n1 = Note("a", "atext")
      >>> n1.item = item
      >>> item.notes
      {('a', 'atext'): <__main__.Note object at 0x2eaaf0>}

      Other built-in dictionary types include column_mapped_collection(), which is almost like attribute_mapped_collection() except given the Column object directly:

      from sqlalchemy.orm.collections import column_mapped_collection
      
      class Item(Base):
          __tablename__ = 'item'
          id = Column(Integer, primary_key=True)
          notes = relationship("Note",
                      collection_class=column_mapped_collection(Note.__table__.c.keyword),
                      cascade="all, delete-orphan")

      as well as mapped_collection() which is passed any callable function. Note that it’s usually easier to use attribute_mapped_collection() along with a @property as mentioned earlier:

      from sqlalchemy.orm.collections import mapped_collection
      
      class Item(Base):
          __tablename__ = 'item'
          id = Column(Integer, primary_key=True)
          notes = relationship("Note",
                      collection_class=mapped_collection(lambda note: note.text[0:10]),
                      cascade="all, delete-orphan")

      Dictionary mappings are often combined with the “Association Proxy” extension to produce streamlined dictionary views. See Proxying to Dictionary Based Collections and Composite Association Proxies for examples.

      sqlalchemy.orm.collections.attribute_mapped_collection(attr_name)

      A dictionary-based collection type with attribute-based keying.

      Returns a MappedCollection factory with a keying based on the ‘attr_name’ attribute of entities in the collection, where attr_name is the string name of the attribute.

      The key value must be immutable for the lifetime of the object. You can not, for example, map on foreign key values if those key values will change during the session, i.e. from None to a database-assigned integer after a session flush.

      sqlalchemy.orm.collections.column_mapped_collection(mapping_spec)

      A dictionary-based collection type with column-based keying.

      Returns a MappedCollection factory with a keying function generated from mapping_spec, which may be a Column or a sequence of Columns.

      The key value must be immutable for the lifetime of the object. You can not, for example, map on foreign key values if those key values will change during the session, i.e. from None to a database-assigned integer after a session flush.

      sqlalchemy.orm.collections.mapped_collection(keyfunc)

      A dictionary-based collection type with arbitrary keying.

      Returns a MappedCollection factory with a keying function generated from keyfunc, a callable that takes an entity and returns a key value.

      The key value must be immutable for the lifetime of the object. You can not, for example, map on foreign key values if those key values will change during the session, i.e. from None to a database-assigned integer after a session flush.

      Custom Collection Implementations

      You can use your own types for collections as well. In simple cases, inherting from list or set, adding custom behavior, is all that’s needed. In other cases, special decorators are needed to tell SQLAlchemy more detail about how the collection operates.

      Do I need a custom collection implementation?

      In most cases not at all! The most common use cases for a “custom” collection is one that validates or marshals incoming values into a new form, such as a string that becomes a class instance, or one which goes a step beyond and represents the data internally in some fashion, presenting a “view” of that data on the outside of a different form.

      For the first use case, the orm.validates() decorator is by far the simplest way to intercept incoming values in all cases for the purposes of validation and simple marshaling. See Simple Validators for an example of this.

      For the second use case, the Association Proxy extension is a well-tested, widely used system that provides a read/write “view” of a collection in terms of some attribute present on the target object. As the target attribute can be a @property that returns virtually anything, a wide array of “alternative” views of a collection can be constructed with just a few functions. This approach leaves the underlying mapped collection unaffected and avoids the need to carefully tailor collection behavior on a method-by-method basis.

      Customized collections are useful when the collection needs to have special behaviors upon access or mutation operations that can’t otherwise be modeled externally to the collection. They can of course be combined with the above two approaches.

      Collections in SQLAlchemy are transparently instrumented. Instrumentation means that normal operations on the collection are tracked and result in changes being written to the database at flush time. Additionally, collection operations can fire events which indicate some secondary operation must take place. Examples of a secondary operation include saving the child item in the parent’s Session (i.e. the save-update cascade), as well as synchronizing the state of a bi-directional relationship (i.e. a backref()).

      The collections package understands the basic interface of lists, sets and dicts and will automatically apply instrumentation to those built-in types and their subclasses. Object-derived types that implement a basic collection interface are detected and instrumented via duck-typing:

      class ListLike(object):
          def __init__(self):
              self.data = []
          def append(self, item):
              self.data.append(item)
          def remove(self, item):
              self.data.remove(item)
          def extend(self, items):
              self.data.extend(items)
          def __iter__(self):
              return iter(self.data)
          def foo(self):
              return 'foo'

      append, remove, and extend are known list-like methods, and will be instrumented automatically. __iter__ is not a mutator method and won’t be instrumented, and foo won’t be either.

      Duck-typing (i.e. guesswork) isn’t rock-solid, of course, so you can be explicit about the interface you are implementing by providing an __emulates__ class attribute:

      class SetLike(object):
          __emulates__ = set
      
          def __init__(self):
              self.data = set()
          def append(self, item):
              self.data.add(item)
          def remove(self, item):
              self.data.remove(item)
          def __iter__(self):
              return iter(self.data)

      This class looks list-like because of append, but __emulates__ forces it to set-like. remove is known to be part of the set interface and will be instrumented.

      But this class won’t work quite yet: a little glue is needed to adapt it for use by SQLAlchemy. The ORM needs to know which methods to use to append, remove and iterate over members of the collection. When using a type like list or set, the appropriate methods are well-known and used automatically when present. This set-like class does not provide the expected add method, so we must supply an explicit mapping for the ORM via a decorator.

      Annotating Custom Collections via Decorators

      Decorators can be used to tag the individual methods the ORM needs to manage collections. Use them when your class doesn’t quite meet the regular interface for its container type, or when you otherwise would like to use a different method to get the job done.

      from sqlalchemy.orm.collections import collection
      
      class SetLike(object):
          __emulates__ = set
      
          def __init__(self):
              self.data = set()
      
          @collection.appender
          def append(self, item):
              self.data.add(item)
      
          def remove(self, item):
              self.data.remove(item)
      
          def __iter__(self):
              return iter(self.data)

      And that’s all that’s needed to complete the example. SQLAlchemy will add instances via the append method. remove and __iter__ are the default methods for sets and will be used for removing and iteration. Default methods can be changed as well:

      from sqlalchemy.orm.collections import collection
      
      class MyList(list):
          @collection.remover
          def zark(self, item):
              # do something special...
      
          @collection.iterator
          def hey_use_this_instead_for_iteration(self):
              # ...

      There is no requirement to be list-, or set-like at all. Collection classes can be any shape, so long as they have the append, remove and iterate interface marked for SQLAlchemy’s use. Append and remove methods will be called with a mapped entity as the single argument, and iterator methods are called with no arguments and must return an iterator.

      class sqlalchemy.orm.collections.collection

      Decorators for entity collection classes.

      The decorators fall into two groups: annotations and interception recipes.

      The annotating decorators (appender, remover, iterator, linker, converter, internally_instrumented) indicate the method’s purpose and take no arguments. They are not written with parens:

      @collection.appender
      def append(self, append): ...

      The recipe decorators all require parens, even those that take no arguments:

      @collection.adds('entity')
      def insert(self, position, entity): ...
      
      @collection.removes_return()
      def popitem(self): ...
      static adds(arg)

      Mark the method as adding an entity to the collection.

      Adds “add to collection” handling to the method. The decorator argument indicates which method argument holds the SQLAlchemy-relevant value. Arguments can be specified positionally (i.e. integer) or by name:

      @collection.adds(1)
      def push(self, item): ...
      
      @collection.adds('entity')
      def do_stuff(self, thing, entity=None): ...
      static appender(fn)

      Tag the method as the collection appender.

      The appender method is called with one positional argument: the value to append. The method will be automatically decorated with ‘adds(1)’ if not already decorated:

      @collection.appender
      def add(self, append): ...
      
      # or, equivalently
      @collection.appender
      @collection.adds(1)
      def add(self, append): ...
      
      # for mapping type, an 'append' may kick out a previous value
      # that occupies that slot.  consider d['a'] = 'foo'- any previous
      # value in d['a'] is discarded.
      @collection.appender
      @collection.replaces(1)
      def add(self, entity):
          key = some_key_func(entity)
          previous = None
          if key in self:
              previous = self[key]
          self[key] = entity
          return previous

      If the value to append is not allowed in the collection, you may raise an exception. Something to remember is that the appender will be called for each object mapped by a database query. If the database contains rows that violate your collection semantics, you will need to get creative to fix the problem, as access via the collection will not work.

      If the appender method is internally instrumented, you must also receive the keyword argument ‘_sa_initiator’ and ensure its promulgation to collection events.

      static converter(fn)

      Tag the method as the collection converter.

      This optional method will be called when a collection is being replaced entirely, as in:

      myobj.acollection = [newvalue1, newvalue2]

      The converter method will receive the object being assigned and should return an iterable of values suitable for use by the appender method. A converter must not assign values or mutate the collection, it’s sole job is to adapt the value the user provides into an iterable of values for the ORM’s use.

      The default converter implementation will use duck-typing to do the conversion. A dict-like collection will be convert into an iterable of dictionary values, and other types will simply be iterated:

      @collection.converter
      def convert(self, other): ...

      If the duck-typing of the object does not match the type of this collection, a TypeError is raised.

      Supply an implementation of this method if you want to expand the range of possible types that can be assigned in bulk or perform validation on the values about to be assigned.

      static internally_instrumented(fn)

      Tag the method as instrumented.

      This tag will prevent any decoration from being applied to the method. Use this if you are orchestrating your own calls to collection_adapter() in one of the basic SQLAlchemy interface methods, or to prevent an automatic ABC method decoration from wrapping your implementation:

      # normally an 'extend' method on a list-like class would be
      # automatically intercepted and re-implemented in terms of
      # SQLAlchemy events and append().  your implementation will
      # never be called, unless:
      @collection.internally_instrumented
      def extend(self, items): ...
      static iterator(fn)

      Tag the method as the collection remover.

      The iterator method is called with no arguments. It is expected to return an iterator over all collection members:

      @collection.iterator
      def __iter__(self): ...

      deprecated; synonym for collection.linker().

      static linker(fn)

      Tag the method as a “linked to attribute” event handler.

      This optional event handler will be called when the collection class is linked to or unlinked from the InstrumentedAttribute. It is invoked immediately after the ‘_sa_adapter’ property is set on the instance. A single argument is passed: the collection adapter that has been linked, or None if unlinking.

      static remover(fn)

      Tag the method as the collection remover.

      The remover method is called with one positional argument: the value to remove. The method will be automatically decorated with removes_return() if not already decorated:

      @collection.remover
      def zap(self, entity): ...
      
      # or, equivalently
      @collection.remover
      @collection.removes_return()
      def zap(self, ): ...

      If the value to remove is not present in the collection, you may raise an exception or return None to ignore the error.

      If the remove method is internally instrumented, you must also receive the keyword argument ‘_sa_initiator’ and ensure its promulgation to collection events.

      static removes(arg)

      Mark the method as removing an entity in the collection.

      Adds “remove from collection” handling to the method. The decorator argument indicates which method argument holds the SQLAlchemy-relevant value to be removed. Arguments can be specified positionally (i.e. integer) or by name:

      @collection.removes(1)
      def zap(self, item): ...

      For methods where the value to remove is not known at call-time, use collection.removes_return.

      static removes_return()

      Mark the method as removing an entity in the collection.

      Adds “remove from collection” handling to the method. The return value of the method, if any, is considered the value to remove. The method arguments are not inspected:

      @collection.removes_return()
      def pop(self): ...

      For methods where the value to remove is known at call-time, use collection.remove.

      static replaces(arg)

      Mark the method as replacing an entity in the collection.

      Adds “add to collection” and “remove from collection” handling to the method. The decorator argument indicates which method argument holds the SQLAlchemy-relevant value to be added, and return value, if any will be considered the value to remove.

      Arguments can be specified positionally (i.e. integer) or by name:

      @collection.replaces(2)
      def __setitem__(self, index, item): ...

      Custom Dictionary-Based Collections

      The MappedCollection class can be used as a base class for your custom types or as a mix-in to quickly add dict collection support to other classes. It uses a keying function to delegate to __setitem__ and __delitem__:

      from sqlalchemy.util import OrderedDict
      from sqlalchemy.orm.collections import MappedCollection
      
      class NodeMap(OrderedDict, MappedCollection):
          """Holds 'Node' objects, keyed by the 'name' attribute with insert order maintained."""
      
          def __init__(self, *args, **kw):
              MappedCollection.__init__(self, keyfunc=lambda node: node.name)
              OrderedDict.__init__(self, *args, **kw)

      When subclassing MappedCollection, user-defined versions of __setitem__() or __delitem__() should be decorated with collection.internally_instrumented(), if they call down to those same methods on MappedCollection. This because the methods on MappedCollection are already instrumented - calling them from within an already instrumented call can cause events to be fired off repeatedly, or inappropriately, leading to internal state corruption in rare cases:

      from sqlalchemy.orm.collections import MappedCollection,\
                                          collection
      
      class MyMappedCollection(MappedCollection):
          """Use @internally_instrumented when your methods
          call down to already-instrumented methods.
      
          """
      
          @collection.internally_instrumented
          def __setitem__(self, key, value, _sa_initiator=None):
              # do something with key, value
              super(MyMappedCollection, self).__setitem__(key, value, _sa_initiator)
      
          @collection.internally_instrumented
          def __delitem__(self, key, _sa_initiator=None):
              # do something with key
              super(MyMappedCollection, self).__delitem__(key, _sa_initiator)

      The ORM understands the dict interface just like lists and sets, and will automatically instrument all dict-like methods if you choose to subclass dict or provide dict-like collection behavior in a duck-typed class. You must decorate appender and remover methods, however- there are no compatible methods in the basic dictionary interface for SQLAlchemy to use by default. Iteration will go through itervalues() unless otherwise decorated.

      Note

      Due to a bug in MappedCollection prior to version 0.7.6, this workaround usually needs to be called before a custom subclass of MappedCollection which uses collection.internally_instrumented() can be used:

      from sqlalchemy.orm.collections import _instrument_class, MappedCollection
      _instrument_class(MappedCollection)

      This will ensure that the MappedCollection has been properly initialized with custom __setitem__() and __delitem__() methods before used in a custom subclass.

      class sqlalchemy.orm.collections.MappedCollection(keyfunc)

      Bases: __builtin__.dict

      A basic dictionary-based collection class.

      Extends dict with the minimal bag semantics that collection classes require. set and remove are implemented in terms of a keying function: any callable that takes an object and returns an object for use as a dictionary key.

      __init__(keyfunc)

      Create a new collection with keying provided by keyfunc.

      keyfunc may be any callable any callable that takes an object and returns an object for use as a dictionary key.

      The keyfunc will be called every time the ORM needs to add a member by value-only (such as when loading instances from the database) or remove a member. The usual cautions about dictionary keying apply- keyfunc(object) should return the same output for the life of the collection. Keying based on mutable properties can result in unreachable instances “lost” in the collection.

      clear() → None. Remove all items from D.
      pop(k[, d]) → v, remove specified key and return the corresponding value.

      If key is not found, d is returned if given, otherwise KeyError is raised

      popitem() → (k, v), remove and return some (key, value) pair as a

      2-tuple; but raise KeyError if D is empty.

      remove(value, _sa_initiator=None)

      Remove an item by value, consulting the keyfunc for the key.

      set(value, _sa_initiator=None)

      Add an item by value, consulting the keyfunc for the key.

      setdefault(k[, d]) → D.get(k,d), also set D[k]=d if k not in D
      update([E], **F) → None. Update D from dict/iterable E and F.

      If E present and has a .keys() method, does: for k in E: D[k] = E[k] If E present and lacks .keys() method, does: for (k, v) in E: D[k] = v In either case, this is followed by: for k in F: D[k] = F[k]

      Instrumentation and Custom Types

      Many custom types and existing library classes can be used as a entity collection type as-is without further ado. However, it is important to note that the instrumentation process will modify the type, adding decorators around methods automatically.

      The decorations are lightweight and no-op outside of relationships, but they do add unneeded overhead when triggered elsewhere. When using a library class as a collection, it can be good practice to use the “trivial subclass” trick to restrict the decorations to just your usage in relationships. For example:

      class MyAwesomeList(some.great.library.AwesomeList):
          pass
      
      # ... relationship(..., collection_class=MyAwesomeList)

      The ORM uses this approach for built-ins, quietly substituting a trivial subclass when a list, set or dict is used directly.

      Collection Internals

      Various internal methods.

      sqlalchemy.orm.collections.bulk_replace(values, existing_adapter, new_adapter)

      Load a new collection, firing events based on prior like membership.

      Appends instances in values onto the new_adapter. Events will be fired for any instance not present in the existing_adapter. Any instances in existing_adapter not present in values will have remove events fired upon them.

      Parameters:
      • values – An iterable of collection member instances
      • existing_adapter – A CollectionAdapter of instances to be replaced
      • new_adapter – An empty CollectionAdapter to load with values
      class sqlalchemy.orm.collections.collection

      Decorators for entity collection classes.

      The decorators fall into two groups: annotations and interception recipes.

      The annotating decorators (appender, remover, iterator, linker, converter, internally_instrumented) indicate the method’s purpose and take no arguments. They are not written with parens:

      @collection.appender
      def append(self, append): ...

      The recipe decorators all require parens, even those that take no arguments:

      @collection.adds('entity')
      def insert(self, position, entity): ...
      
      @collection.removes_return()
      def popitem(self): ...
      sqlalchemy.orm.collections.collection_adapter(collection)

      Fetch the CollectionAdapter for a collection.

      class sqlalchemy.orm.collections.CollectionAdapter(attr, owner_state, data)

      Bridges between the ORM and arbitrary Python collections.

      Proxies base-level collection operations (append, remove, iterate) to the underlying Python collection, and emits add/remove events for entities entering or leaving the collection.

      The ORM uses CollectionAdapter exclusively for interaction with entity collections.

      The usage of getattr()/setattr() is currently to allow injection of custom methods, such as to unwrap Zope security proxies.

      class sqlalchemy.orm.collections.InstrumentedDict

      Bases: __builtin__.dict

      An instrumented version of the built-in dict.

      class sqlalchemy.orm.collections.InstrumentedList

      Bases: __builtin__.list

      An instrumented version of the built-in list.

      class sqlalchemy.orm.collections.InstrumentedSet

      Bases: __builtin__.set

      An instrumented version of the built-in set.

      sqlalchemy.orm.collections.prepare_instrumentation(factory)

      Prepare a callable for future use as a collection class factory.

      Given a collection class factory (either a type or no-arg callable), return another factory that will produce compatible instances when called.

      This function is responsible for converting collection_class=list into the run-time behavior of collection_class=InstrumentedList.

      SQLAlchemy-0.8.4/doc/orm/deprecated.html0000644000076500000240000012255312251147476020602 0ustar classicstaff00000000000000 Deprecated ORM Event Interfaces — SQLAlchemy 0.8 Documentation

      SQLAlchemy 0.8 Documentation

      Release: 0.8.4 | Release Date: December 8, 2013
      SQLAlchemy 0.8 Documentation » Deprecated ORM Event Interfaces

      Deprecated ORM Event Interfaces

      Deprecated ORM Event Interfaces

      This section describes the class-based ORM event interface which first existed in SQLAlchemy 0.1, which progressed with more kinds of events up until SQLAlchemy 0.5. The non-ORM analogue is described at Deprecated Event Interfaces.

      Deprecated since version 0.7: As of SQLAlchemy 0.7, the new event system described in Events replaces the extension/proxy/listener system, providing a consistent interface to all events without the need for subclassing.

      Mapper Events

      class sqlalchemy.orm.interfaces.MapperExtension

      Base implementation for Mapper event hooks.

      Note

      MapperExtension is deprecated. Please refer to event.listen() as well as MapperEvents.

      New extension classes subclass MapperExtension and are specified using the extension mapper() argument, which is a single MapperExtension or a list of such:

      from sqlalchemy.orm.interfaces import MapperExtension
      
      class MyExtension(MapperExtension):
          def before_insert(self, mapper, connection, instance):
              print "instance %s before insert !" % instance
      
      m = mapper(User, users_table, extension=MyExtension())

      A single mapper can maintain a chain of MapperExtension objects. When a particular mapping event occurs, the corresponding method on each MapperExtension is invoked serially, and each method has the ability to halt the chain from proceeding further:

      m = mapper(User, users_table, extension=[ext1, ext2, ext3])

      Each MapperExtension method returns the symbol EXT_CONTINUE by default. This symbol generally means “move to the next MapperExtension for processing”. For methods that return objects like translated rows or new object instances, EXT_CONTINUE means the result of the method should be ignored. In some cases it’s required for a default mapper activity to be performed, such as adding a new instance to a result list.

      The symbol EXT_STOP has significance within a chain of MapperExtension objects that the chain will be stopped when this symbol is returned. Like EXT_CONTINUE, it also has additional significance in some cases that a default mapper activity will not be performed.

      after_delete(mapper, connection, instance)

      Receive an object instance after that instance is deleted.

      The return value is only significant within the MapperExtension chain; the parent mapper’s behavior isn’t modified by this method.

      after_insert(mapper, connection, instance)

      Receive an object instance after that instance is inserted.

      The return value is only significant within the MapperExtension chain; the parent mapper’s behavior isn’t modified by this method.

      after_update(mapper, connection, instance)

      Receive an object instance after that instance is updated.

      The return value is only significant within the MapperExtension chain; the parent mapper’s behavior isn’t modified by this method.

      append_result(mapper, selectcontext, row, instance, result, **flags)

      Receive an object instance before that instance is appended to a result list.

      If this method returns EXT_CONTINUE, result appending will proceed normally. if this method returns any other value or None, result appending will not proceed for this instance, giving this extension an opportunity to do the appending itself, if desired.

      mapper
      The mapper doing the operation.
      selectcontext
      The QueryContext generated from the Query.
      row
      The result row from the database.
      instance
      The object instance to be appended to the result.
      result
      List to which results are being appended.
      **flags
      extra information about the row, same as criterion in create_row_processor() method of MapperProperty
      before_delete(mapper, connection, instance)

      Receive an object instance before that instance is deleted.

      Note that no changes to the overall flush plan can be made here; and manipulation of the Session will not have the desired effect. To manipulate the Session within an extension, use SessionExtension.

      The return value is only significant within the MapperExtension chain; the parent mapper’s behavior isn’t modified by this method.

      before_insert(mapper, connection, instance)

      Receive an object instance before that instance is inserted into its table.

      This is a good place to set up primary key values and such that aren’t handled otherwise.

      Column-based attributes can be modified within this method which will result in the new value being inserted. However no changes to the overall flush plan can be made, and manipulation of the Session will not have the desired effect. To manipulate the Session within an extension, use SessionExtension.

      The return value is only significant within the MapperExtension chain; the parent mapper’s behavior isn’t modified by this method.

      before_update(mapper, connection, instance)

      Receive an object instance before that instance is updated.

      Note that this method is called for all instances that are marked as “dirty”, even those which have no net changes to their column-based attributes. An object is marked as dirty when any of its column-based attributes have a “set attribute” operation called or when any of its collections are modified. If, at update time, no column-based attributes have any net changes, no UPDATE statement will be issued. This means that an instance being sent to before_update is not a guarantee that an UPDATE statement will be issued (although you can affect the outcome here).

      To detect if the column-based attributes on the object have net changes, and will therefore generate an UPDATE statement, use object_session(instance).is_modified(instance, include_collections=False).

      Column-based attributes can be modified within this method which will result in the new value being updated. However no changes to the overall flush plan can be made, and manipulation of the Session will not have the desired effect. To manipulate the Session within an extension, use SessionExtension.

      The return value is only significant within the MapperExtension chain; the parent mapper’s behavior isn’t modified by this method.

      create_instance(mapper, selectcontext, row, class_)

      Receive a row when a new object instance is about to be created from that row.

      The method can choose to create the instance itself, or it can return EXT_CONTINUE to indicate normal object creation should take place.

      mapper
      The mapper doing the operation
      selectcontext
      The QueryContext generated from the Query.
      row
      The result row from the database
      class_
      The class we are mapping.
      return value
      A new object instance, or EXT_CONTINUE
      init_failed(mapper, class_, oldinit, instance, args, kwargs)

      Receive an instance when it’s constructor has been called, and raised an exception.

      This method is only called during a userland construction of an object. It is not called when an object is loaded from the database.

      The return value is only significant within the MapperExtension chain; the parent mapper’s behavior isn’t modified by this method.

      init_instance(mapper, class_, oldinit, instance, args, kwargs)

      Receive an instance when it’s constructor is called.

      This method is only called during a userland construction of an object. It is not called when an object is loaded from the database.

      The return value is only significant within the MapperExtension chain; the parent mapper’s behavior isn’t modified by this method.

      instrument_class(mapper, class_)

      Receive a class when the mapper is first constructed, and has applied instrumentation to the mapped class.

      The return value is only significant within the MapperExtension chain; the parent mapper’s behavior isn’t modified by this method.

      populate_instance(mapper, selectcontext, row, instance, **flags)

      Receive an instance before that instance has its attributes populated.

      This usually corresponds to a newly loaded instance but may also correspond to an already-loaded instance which has unloaded attributes to be populated. The method may be called many times for a single instance, as multiple result rows are used to populate eagerly loaded collections.

      If this method returns EXT_CONTINUE, instance population will proceed normally. If any other value or None is returned, instance population will not proceed, giving this extension an opportunity to populate the instance itself, if desired.

      Deprecated since version 0.5: Most usages of this hook are obsolete. For a generic “object has been newly created from a row” hook, use reconstruct_instance(), or the @orm.reconstructor decorator.

      reconstruct_instance(mapper, instance)

      Receive an object instance after it has been created via __new__, and after initial attribute population has occurred.

      This typically occurs when the instance is created based on incoming result rows, and is only called once for that instance’s lifetime.

      Note that during a result-row load, this method is called upon the first row received for this instance. Note that some attributes and collections may or may not be loaded or even initialized, depending on what’s present in the result rows.

      The return value is only significant within the MapperExtension chain; the parent mapper’s behavior isn’t modified by this method.

      translate_row(mapper, context, row)

      Perform pre-processing on the given result row and return a new row instance.

      This is called when the mapper first receives a row, before the object identity or the instance itself has been derived from that row. The given row may or may not be a RowProxy object - it will always be a dictionary-like object which contains mapped columns as keys. The returned object should also be a dictionary-like object which recognizes mapped columns as keys.

      If the ultimate return value is EXT_CONTINUE, the row is not translated.

      Session Events

      class sqlalchemy.orm.interfaces.SessionExtension

      Base implementation for Session event hooks.

      Note

      SessionExtension is deprecated. Please refer to event.listen() as well as SessionEvents.

      Subclasses may be installed into a Session (or sessionmaker) using the extension keyword argument:

      from sqlalchemy.orm.interfaces import SessionExtension
      
      class MySessionExtension(SessionExtension):
          def before_commit(self, session):
              print "before commit!"
      
      Session = sessionmaker(extension=MySessionExtension())

      The same SessionExtension instance can be used with any number of sessions.

      after_attach(session, instance)

      Execute after an instance is attached to a session.

      This is called after an add, delete or merge.

      after_begin(session, transaction, connection)

      Execute after a transaction is begun on a connection

      transaction is the SessionTransaction. This method is called after an engine level transaction is begun on a connection.

      after_bulk_delete(session, query, query_context, result)

      Execute after a bulk delete operation to the session.

      This is called after a session.query(...).delete()

      query is the query object that this delete operation was called on. query_context was the query context object. result is the result object returned from the bulk operation.

      after_bulk_update(session, query, query_context, result)

      Execute after a bulk update operation to the session.

      This is called after a session.query(...).update()

      query is the query object that this update operation was called on. query_context was the query context object. result is the result object returned from the bulk operation.

      after_commit(session)

      Execute after a commit has occurred.

      Note that this may not be per-flush if a longer running transaction is ongoing.

      after_flush(session, flush_context)

      Execute after flush has completed, but before commit has been called.

      Note that the session’s state is still in pre-flush, i.e. ‘new’, ‘dirty’, and ‘deleted’ lists still show pre-flush state as well as the history settings on instance attributes.

      after_flush_postexec(session, flush_context)

      Execute after flush has completed, and after the post-exec state occurs.

      This will be when the ‘new’, ‘dirty’, and ‘deleted’ lists are in their final state. An actual commit() may or may not have occurred, depending on whether or not the flush started its own transaction or participated in a larger transaction.

      after_rollback(session)

      Execute after a rollback has occurred.

      Note that this may not be per-flush if a longer running transaction is ongoing.

      before_commit(session)

      Execute right before commit is called.

      Note that this may not be per-flush if a longer running transaction is ongoing.

      before_flush(session, flush_context, instances)

      Execute before flush process has started.

      instances is an optional list of objects which were passed to the flush() method.

      Attribute Events

      class sqlalchemy.orm.interfaces.AttributeExtension

      Base implementation for AttributeImpl event hooks, events that fire upon attribute mutations in user code.

      Note

      AttributeExtension is deprecated. Please refer to event.listen() as well as AttributeEvents.

      AttributeExtension is used to listen for set, remove, and append events on individual mapped attributes. It is established on an individual mapped attribute using the extension argument, available on column_property(), relationship(), and others:

      from sqlalchemy.orm.interfaces import AttributeExtension
      from sqlalchemy.orm import mapper, relationship, column_property
      
      class MyAttrExt(AttributeExtension):
          def append(self, state, value, initiator):
              print "append event !"
              return value
      
          def set(self, state, value, oldvalue, initiator):
              print "set event !"
              return value
      
      mapper(SomeClass, sometable, properties={
          'foo':column_property(sometable.c.foo, extension=MyAttrExt()),
          'bar':relationship(Bar, extension=MyAttrExt())
      })

      Note that the AttributeExtension methods append() and set() need to return the value parameter. The returned value is used as the effective value, and allows the extension to change what is ultimately persisted.

      AttributeExtension is assembled within the descriptors associated with a mapped class.

      active_history = True

      indicates that the set() method would like to receive the ‘old’ value, even if it means firing lazy callables.

      Note that active_history can also be set directly via column_property() and relationship().

      append(state, value, initiator)

      Receive a collection append event.

      The returned value will be used as the actual value to be appended.

      remove(state, value, initiator)

      Receive a remove event.

      No return value is defined.

      set(state, value, oldvalue, initiator)

      Receive a set event.

      The returned value will be used as the actual value to be set.

      SQLAlchemy-0.8.4/doc/orm/events.html0000644000076500000240000041650412251147476020010 0ustar classicstaff00000000000000 ORM Events — SQLAlchemy 0.8 Documentation

      SQLAlchemy 0.8 Documentation

      Release: 0.8.4 | Release Date: December 8, 2013

      ORM Events

      The ORM includes a wide variety of hooks available for subscription.

      New in version 0.7: The event supercedes the previous system of “extension” classes.

      For an introduction to the event API, see Events. Non-ORM events such as those regarding connections and low-level statement execution are described in Core Events.

      Attribute Events

      class sqlalchemy.orm.events.AttributeEvents

      Bases: sqlalchemy.event.Events

      Define events for object attributes.

      These are typically defined on the class-bound descriptor for the target class.

      e.g.:

      from sqlalchemy import event
      
      def my_append_listener(target, value, initiator):
          print "received append event for target: %s" % target
      
      event.listen(MyClass.collection, 'append', my_append_listener)

      Listeners have the option to return a possibly modified version of the value, when the retval=True flag is passed to listen():

      def validate_phone(target, value, oldvalue, initiator):
          "Strip non-numeric characters from a phone number"
      
          return re.sub(r'(?![0-9])', '', value)
      
      # setup listener on UserContact.phone attribute, instructing
      # it to use the return value
      listen(UserContact.phone, 'set', validate_phone, retval=True)

      A validation function like the above can also raise an exception such as ValueError to halt the operation.

      Several modifiers are available to the listen() function.

      Parameters:
      • active_history=False – When True, indicates that the “set” event would like to receive the “old” value being replaced unconditionally, even if this requires firing off database loads. Note that active_history can also be set directly via column_property() and relationship().
      • propagate=False – When True, the listener function will be established not just for the class attribute given, but for attributes of the same name on all current subclasses of that class, as well as all future subclasses of that class, using an additional listener that listens for instrumentation events.
      • raw=False – When True, the “target” argument to the event will be the InstanceState management object, rather than the mapped instance itself.
      • retval=False – when True, the user-defined event listening must return the “value” argument from the function. This gives the listening function the opportunity to change the value that is ultimately used for a “set” or “append” event.
      append(target, value, initiator)

      Receive a collection append event.

      Parameters:
      • target – the object instance receiving the event. If the listener is registered with raw=True, this will be the InstanceState object.
      • value – the value being appended. If this listener is registered with retval=True, the listener function must return this value, or a new value which replaces it.
      • initiator – the attribute implementation object which initiated this event.
      Returns:

      if the event was registered with retval=True, the given value, or a new effective value, should be returned.

      remove(target, value, initiator)

      Receive a collection remove event.

      Parameters:
      • target – the object instance receiving the event. If the listener is registered with raw=True, this will be the InstanceState object.
      • value – the value being removed.
      • initiator – the attribute implementation object which initiated this event.
      Returns:

      No return value is defined for this event.

      set(target, value, oldvalue, initiator)

      Receive a scalar set event.

      Parameters:
      • target – the object instance receiving the event. If the listener is registered with raw=True, this will be the InstanceState object.
      • value – the value being set. If this listener is registered with retval=True, the listener function must return this value, or a new value which replaces it.
      • oldvalue – the previous value being replaced. This may also be the symbol NEVER_SET or NO_VALUE. If the listener is registered with active_history=True, the previous value of the attribute will be loaded from the database if the existing value is currently unloaded or expired.
      • initiator – the attribute implementation object which initiated this event.
      Returns:

      if the event was registered with retval=True, the given value, or a new effective value, should be returned.

      Mapper Events

      class sqlalchemy.orm.events.MapperEvents

      Bases: sqlalchemy.event.Events

      Define events specific to mappings.

      e.g.:

      from sqlalchemy import event
      
      def my_before_insert_listener(mapper, connection, target):
          # execute a stored procedure upon INSERT,
          # apply the value to the row to be inserted
          target.calculated_value = connection.scalar(
                                      "select my_special_function(%d)"
                                      % target.special_number)
      
      # associate the listener function with SomeClass,
      # to execute during the "before_insert" hook
      event.listen(
          SomeClass, 'before_insert', my_before_insert_listener)

      Available targets include:

      • mapped classes
      • unmapped superclasses of mapped or to-be-mapped classes (using the propagate=True flag)
      • Mapper objects
      • the Mapper class itself and the mapper() function indicate listening for all mappers.

      Changed in version 0.8.0: mapper events can be associated with unmapped superclasses of mapped classes.

      Mapper events provide hooks into critical sections of the mapper, including those related to object instrumentation, object loading, and object persistence. In particular, the persistence methods before_insert(), and before_update() are popular places to augment the state being persisted - however, these methods operate with several significant restrictions. The user is encouraged to evaluate the SessionEvents.before_flush() and SessionEvents.after_flush() methods as more flexible and user-friendly hooks in which to apply additional database state during a flush.

      When using MapperEvents, several modifiers are available to the event.listen() function.

      Parameters:
      • propagate=False – When True, the event listener should be applied to all inheriting mappers and/or the mappers of inheriting classes, as well as any mapper which is the target of this listener.
      • raw=False – When True, the “target” argument passed to applicable event listener functions will be the instance’s InstanceState management object, rather than the mapped instance itself.
      • retval=False

        when True, the user-defined event function must have a return value, the purpose of which is either to control subsequent event propagation, or to otherwise alter the operation in progress by the mapper. Possible return values are:

        • sqlalchemy.orm.interfaces.EXT_CONTINUE - continue event processing normally.
        • sqlalchemy.orm.interfaces.EXT_STOP - cancel all subsequent event handlers in the chain.
        • other values - the return value specified by specific listeners, such as translate_row() or create_instance().
      after_configured()

      Called after a series of mappers have been configured.

      This corresponds to the orm.configure_mappers() call, which note is usually called automatically as mappings are first used.

      Theoretically this event is called once per application, but is actually called any time new mappers have been affected by a orm.configure_mappers() call. If new mappings are constructed after existing ones have already been used, this event can be called again.

      after_delete(mapper, connection, target)

      Receive an object instance after a DELETE statement has been emitted corresponding to that instance.

      This event is used to emit additional SQL statements on the given connection as well as to perform application specific bookkeeping related to a deletion event.

      The event is often called for a batch of objects of the same class after their DELETE statements have been emitted at once in a previous step.

      Warning

      Mapper-level flush events are designed to operate on attributes local to the immediate object being handled and via SQL operations with the given Connection only. Handlers here should not make alterations to the state of the Session overall, and in general should not affect any relationship() -mapped attributes, as session cascade rules will not function properly, nor is it always known if the related class has already been handled. Operations that are not supported in mapper events include:

      • Session.add()
      • Session.delete()
      • Mapped collection append, add, remove, delete, discard, etc.
      • Mapped relationship attribute set/del events, i.e. someobject.related = someotherobject

      Operations which manipulate the state of the object relative to other objects are better handled:

      Parameters:
      • mapper – the Mapper which is the target of this event.
      • connection – the Connection being used to emit DELETE statements for this instance. This provides a handle into the current transaction on the target database specific to this instance.
      • target – the mapped instance being deleted. If the event is configured with raw=True, this will instead be the InstanceState state-management object associated with the instance.
      Returns:

      No return value is supported by this event.

      after_insert(mapper, connection, target)

      Receive an object instance after an INSERT statement is emitted corresponding to that instance.

      This event is used to modify in-Python-only state on the instance after an INSERT occurs, as well as to emit additional SQL statements on the given connection.

      The event is often called for a batch of objects of the same class after their INSERT statements have been emitted at once in a previous step. In the extremely rare case that this is not desirable, the mapper() can be configured with batch=False, which will cause batches of instances to be broken up into individual (and more poorly performing) event->persist->event steps.

      Warning

      Mapper-level flush events are designed to operate on attributes local to the immediate object being handled and via SQL operations with the given Connection only. Handlers here should not make alterations to the state of the Session overall, and in general should not affect any relationship() -mapped attributes, as session cascade rules will not function properly, nor is it always known if the related class has already been handled. Operations that are not supported in mapper events include:

      • Session.add()
      • Session.delete()
      • Mapped collection append, add, remove, delete, discard, etc.
      • Mapped relationship attribute set/del events, i.e. someobject.related = someotherobject

      Operations which manipulate the state of the object relative to other objects are better handled:

      Parameters:
      • mapper – the Mapper which is the target of this event.
      • connection – the Connection being used to emit INSERT statements for this instance. This provides a handle into the current transaction on the target database specific to this instance.
      • target – the mapped instance being persisted. If the event is configured with raw=True, this will instead be the InstanceState state-management object associated with the instance.
      Returns:

      No return value is supported by this event.

      after_update(mapper, connection, target)

      Receive an object instance after an UPDATE statement is emitted corresponding to that instance.

      This event is used to modify in-Python-only state on the instance after an UPDATE occurs, as well as to emit additional SQL statements on the given connection.

      This method is called for all instances that are marked as “dirty”, even those which have no net changes to their column-based attributes, and for which no UPDATE statement has proceeded. An object is marked as dirty when any of its column-based attributes have a “set attribute” operation called or when any of its collections are modified. If, at update time, no column-based attributes have any net changes, no UPDATE statement will be issued. This means that an instance being sent to after_update() is not a guarantee that an UPDATE statement has been issued.

      To detect if the column-based attributes on the object have net changes, and therefore resulted in an UPDATE statement, use object_session(instance).is_modified(instance, include_collections=False).

      The event is often called for a batch of objects of the same class after their UPDATE statements have been emitted at once in a previous step. In the extremely rare case that this is not desirable, the mapper() can be configured with batch=False, which will cause batches of instances to be broken up into individual (and more poorly performing) event->persist->event steps.

      Warning

      Mapper-level flush events are designed to operate on attributes local to the immediate object being handled and via SQL operations with the given Connection only. Handlers here should not make alterations to the state of the Session overall, and in general should not affect any relationship() -mapped attributes, as session cascade rules will not function properly, nor is it always known if the related class has already been handled. Operations that are not supported in mapper events include:

      • Session.add()
      • Session.delete()
      • Mapped collection append, add, remove, delete, discard, etc.
      • Mapped relationship attribute set/del events, i.e. someobject.related = someotherobject

      Operations which manipulate the state of the object relative to other objects are better handled:

      Parameters:
      • mapper – the Mapper which is the target of this event.
      • connection – the Connection being used to emit UPDATE statements for this instance. This provides a handle into the current transaction on the target database specific to this instance.
      • target – the mapped instance being persisted. If the event is configured with raw=True, this will instead be the InstanceState state-management object associated with the instance.
      Returns:

      No return value is supported by this event.

      append_result(mapper, context, row, target, result, **flags)

      Receive an object instance before that instance is appended to a result list.

      This is a rarely used hook which can be used to alter the construction of a result list returned by Query.

      Parameters:
      • mapper – the Mapper which is the target of this event.
      • context – the QueryContext, which includes a handle to the current Query in progress as well as additional state information.
      • row – the result row being handled. This may be an actual RowProxy or may be a dictionary containing Column objects as keys.
      • target – the mapped instance being populated. If the event is configured with raw=True, this will instead be the InstanceState state-management object associated with the instance.
      • result – a list-like object where results are being appended.
      • **flags – Additional state information about the current handling of the row.
      Returns:

      If this method is registered with retval=True, a return value of EXT_STOP will prevent the instance from being appended to the given result list, whereas a return value of EXT_CONTINUE will result in the default behavior of appending the value to the result list.

      before_delete(mapper, connection, target)

      Receive an object instance before a DELETE statement is emitted corresponding to that instance.

      This event is used to emit additional SQL statements on the given connection as well as to perform application specific bookkeeping related to a deletion event.

      The event is often called for a batch of objects of the same class before their DELETE statements are emitted at once in a later step.

      Warning

      Mapper-level flush events are designed to operate on attributes local to the immediate object being handled and via SQL operations with the given Connection only. Handlers here should not make alterations to the state of the Session overall, and in general should not affect any relationship() -mapped attributes, as session cascade rules will not function properly, nor is it always known if the related class has already been handled. Operations that are not supported in mapper events include:

      • Session.add()
      • Session.delete()
      • Mapped collection append, add, remove, delete, discard, etc.
      • Mapped relationship attribute set/del events, i.e. someobject.related = someotherobject

      Operations which manipulate the state of the object relative to other objects are better handled:

      Parameters:
      • mapper – the Mapper which is the target of this event.
      • connection – the Connection being used to emit DELETE statements for this instance. This provides a handle into the current transaction on the target database specific to this instance.
      • target – the mapped instance being deleted. If the event is configured with raw=True, this will instead be the InstanceState state-management object associated with the instance.
      Returns:

      No return value is supported by this event.

      before_insert(mapper, connection, target)

      Receive an object instance before an INSERT statement is emitted corresponding to that instance.

      This event is used to modify local, non-object related attributes on the instance before an INSERT occurs, as well as to emit additional SQL statements on the given connection.

      The event is often called for a batch of objects of the same class before their INSERT statements are emitted at once in a later step. In the extremely rare case that this is not desirable, the mapper() can be configured with batch=False, which will cause batches of instances to be broken up into individual (and more poorly performing) event->persist->event steps.

      Warning

      Mapper-level flush events are designed to operate on attributes local to the immediate object being handled and via SQL operations with the given Connection only. Handlers here should not make alterations to the state of the Session overall, and in general should not affect any relationship() -mapped attributes, as session cascade rules will not function properly, nor is it always known if the related class has already been handled. Operations that are not supported in mapper events include:

      • Session.add()
      • Session.delete()
      • Mapped collection append, add, remove, delete, discard, etc.
      • Mapped relationship attribute set/del events, i.e. someobject.related = someotherobject

      Operations which manipulate the state of the object relative to other objects are better handled:

      Parameters:
      • mapper – the Mapper which is the target of this event.
      • connection – the Connection being used to emit INSERT statements for this instance. This provides a handle into the current transaction on the target database specific to this instance.
      • target – the mapped instance being persisted. If the event is configured with raw=True, this will instead be the InstanceState state-management object associated with the instance.
      Returns:

      No return value is supported by this event.

      before_update(mapper, connection, target)

      Receive an object instance before an UPDATE statement is emitted corresponding to that instance.

      This event is used to modify local, non-object related attributes on the instance before an UPDATE occurs, as well as to emit additional SQL statements on the given connection.

      This method is called for all instances that are marked as “dirty”, even those which have no net changes to their column-based attributes. An object is marked as dirty when any of its column-based attributes have a “set attribute” operation called or when any of its collections are modified. If, at update time, no column-based attributes have any net changes, no UPDATE statement will be issued. This means that an instance being sent to before_update() is not a guarantee that an UPDATE statement will be issued, although you can affect the outcome here by modifying attributes so that a net change in value does exist.

      To detect if the column-based attributes on the object have net changes, and will therefore generate an UPDATE statement, use object_session(instance).is_modified(instance, include_collections=False).

      The event is often called for a batch of objects of the same class before their UPDATE statements are emitted at once in a later step. In the extremely rare case that this is not desirable, the mapper() can be configured with batch=False, which will cause batches of instances to be broken up into individual (and more poorly performing) event->persist->event steps.

      Warning

      Mapper-level flush events are designed to operate on attributes local to the immediate object being handled and via SQL operations with the given Connection only. Handlers here should not make alterations to the state of the Session overall, and in general should not affect any relationship() -mapped attributes, as session cascade rules will not function properly, nor is it always known if the related class has already been handled. Operations that are not supported in mapper events include:

      • Session.add()
      • Session.delete()
      • Mapped collection append, add, remove, delete, discard, etc.
      • Mapped relationship attribute set/del events, i.e. someobject.related = someotherobject

      Operations which manipulate the state of the object relative to other objects are better handled:

      Parameters:
      • mapper – the Mapper which is the target of this event.
      • connection – the Connection being used to emit UPDATE statements for this instance. This provides a handle into the current transaction on the target database specific to this instance.
      • target – the mapped instance being persisted. If the event is configured with raw=True, this will instead be the InstanceState state-management object associated with the instance.
      Returns:

      No return value is supported by this event.

      create_instance(mapper, context, row, class_)

      Receive a row when a new object instance is about to be created from that row.

      The method can choose to create the instance itself, or it can return EXT_CONTINUE to indicate normal object creation should take place. This listener is typically registered with retval=True.

      Parameters:
      • mapper – the Mapper which is the target of this event.
      • context – the QueryContext, which includes a handle to the current Query in progress as well as additional state information.
      • row – the result row being handled. This may be an actual RowProxy or may be a dictionary containing Column objects as keys.
      • class_ – the mapped class.
      Returns:

      When configured with retval=True, the return value should be a newly created instance of the mapped class, or EXT_CONTINUE indicating that default object construction should take place.

      instrument_class(mapper, class_)

      Receive a class when the mapper is first constructed, before instrumentation is applied to the mapped class.

      This event is the earliest phase of mapper construction. Most attributes of the mapper are not yet initialized.

      This listener can either be applied to the Mapper class overall, or to any un-mapped class which serves as a base for classes that will be mapped (using the propagate=True flag):

      Base = declarative_base()
      
      @event.listens_for(Base, "instrument_class", propagate=True)
      def on_new_class(mapper, cls_):
          " ... "
      Parameters:
      • mapper – the Mapper which is the target of this event.
      • class_ – the mapped class.
      mapper_configured(mapper, class_)

      Called when the mapper for the class is fully configured.

      This event is the latest phase of mapper construction, and is invoked when the mapped classes are first used, so that relationships between mappers can be resolved. When the event is called, the mapper should be in its final state.

      While the configuration event normally occurs automatically, it can be forced to occur ahead of time, in the case where the event is needed before any actual mapper usage, by using the configure_mappers() function.

      Parameters:
      • mapper – the Mapper which is the target of this event.
      • class_ – the mapped class.
      populate_instance(mapper, context, row, target, **flags)

      Receive an instance before that instance has its attributes populated.

      This usually corresponds to a newly loaded instance but may also correspond to an already-loaded instance which has unloaded attributes to be populated. The method may be called many times for a single instance, as multiple result rows are used to populate eagerly loaded collections.

      Most usages of this hook are obsolete. For a generic “object has been newly created from a row” hook, use InstanceEvents.load().

      Parameters:
      • mapper – the Mapper which is the target of this event.
      • context – the QueryContext, which includes a handle to the current Query in progress as well as additional state information.
      • row – the result row being handled. This may be an actual RowProxy or may be a dictionary containing Column objects as keys.
      • target – the mapped instance. If the event is configured with raw=True, this will instead be the InstanceState state-management object associated with the instance.
      Returns:

      When configured with retval=True, a return value of EXT_STOP will bypass instance population by the mapper. A value of EXT_CONTINUE indicates that default instance population should take place.

      translate_row(mapper, context, row)

      Perform pre-processing on the given result row and return a new row instance.

      This listener is typically registered with retval=True. It is called when the mapper first receives a row, before the object identity or the instance itself has been derived from that row. The given row may or may not be a RowProxy object - it will always be a dictionary-like object which contains mapped columns as keys. The returned object should also be a dictionary-like object which recognizes mapped columns as keys.

      Parameters:
      • mapper – the Mapper which is the target of this event.
      • context – the QueryContext, which includes a handle to the current Query in progress as well as additional state information.
      • row – the result row being handled. This may be an actual RowProxy or may be a dictionary containing Column objects as keys.
      Returns:

      When configured with retval=True, the function should return a dictionary-like row object, or EXT_CONTINUE, indicating the original row should be used.

      Instance Events

      class sqlalchemy.orm.events.InstanceEvents

      Bases: sqlalchemy.event.Events

      Define events specific to object lifecycle.

      e.g.:

      from sqlalchemy import event
      
      def my_load_listener(target, context):
          print "on load!"
      
      event.listen(SomeClass, 'load', my_load_listener)

      Available targets include:

      • mapped classes
      • unmapped superclasses of mapped or to-be-mapped classes (using the propagate=True flag)
      • Mapper objects
      • the Mapper class itself and the mapper() function indicate listening for all mappers.

      Changed in version 0.8.0: instance events can be associated with unmapped superclasses of mapped classes.

      Instance events are closely related to mapper events, but are more specific to the instance and its instrumentation, rather than its system of persistence.

      When using InstanceEvents, several modifiers are available to the event.listen() function.

      Parameters:
      • propagate=False – When True, the event listener should be applied to all inheriting classes as well as the class which is the target of this listener.
      • raw=False – When True, the “target” argument passed to applicable event listener functions will be the instance’s InstanceState management object, rather than the mapped instance itself.
      expire(target, attrs)

      Receive an object instance after its attributes or some subset have been expired.

      ‘keys’ is a list of attribute names. If None, the entire state was expired.

      Parameters:
      • target – the mapped instance. If the event is configured with raw=True, this will instead be the InstanceState state-management object associated with the instance.
      • attrs – iterable collection of attribute names which were expired, or None if all attributes were expired.
      first_init(manager, cls)

      Called when the first instance of a particular mapping is called.

      init(target, args, kwargs)

      Receive an instance when it’s constructor is called.

      This method is only called during a userland construction of an object. It is not called when an object is loaded from the database.

      init_failure(target, args, kwargs)

      Receive an instance when it’s constructor has been called, and raised an exception.

      This method is only called during a userland construction of an object. It is not called when an object is loaded from the database.

      load(target, context)

      Receive an object instance after it has been created via __new__, and after initial attribute population has occurred.

      This typically occurs when the instance is created based on incoming result rows, and is only called once for that instance’s lifetime.

      Note that during a result-row load, this method is called upon the first row received for this instance. Note that some attributes and collections may or may not be loaded or even initialized, depending on what’s present in the result rows.

      Parameters:
      • target – the mapped instance. If the event is configured with raw=True, this will instead be the InstanceState state-management object associated with the instance.
      • context – the QueryContext corresponding to the current Query in progress. This argument may be None if the load does not correspond to a Query, such as during Session.merge().
      pickle(target, state_dict)

      Receive an object instance when its associated state is being pickled.

      Parameters:
      • target – the mapped instance. If the event is configured with raw=True, this will instead be the InstanceState state-management object associated with the instance.
      • state_dict – the dictionary returned by InstanceState.__getstate__, containing the state to be pickled.
      refresh(target, context, attrs)

      Receive an object instance after one or more attributes have been refreshed from a query.

      Parameters:
      • target – the mapped instance. If the event is configured with raw=True, this will instead be the InstanceState state-management object associated with the instance.
      • context – the QueryContext corresponding to the current Query in progress.
      • attrs – iterable collection of attribute names which were populated, or None if all column-mapped, non-deferred attributes were populated.
      resurrect(target)

      Receive an object instance as it is ‘resurrected’ from garbage collection, which occurs when a “dirty” state falls out of scope.

      Parameters:target – the mapped instance. If the event is configured with raw=True, this will instead be the InstanceState state-management object associated with the instance.
      unpickle(target, state_dict)

      Receive an object instance after it’s associated state has been unpickled.

      Parameters:
      • target – the mapped instance. If the event is configured with raw=True, this will instead be the InstanceState state-management object associated with the instance.
      • state_dict – the dictionary sent to InstanceState.__setstate__, containing the state dictionary which was pickled.

      Session Events

      class sqlalchemy.orm.events.SessionEvents

      Bases: sqlalchemy.event.Events

      Define events specific to Session lifecycle.

      e.g.:

      from sqlalchemy import event
      from sqlalchemy.orm import sessionmaker
      
      def my_before_commit(session):
          print "before commit!"
      
      Session = sessionmaker()
      
      event.listen(Session, "before_commit", my_before_commit)

      The listen() function will accept Session objects as well as the return result of sessionmaker() and scoped_session().

      Additionally, it accepts the Session class which will apply listeners to all Session instances globally.

      after_attach(session, instance)

      Execute after an instance is attached to a session.

      This is called after an add, delete or merge.

      Note

      As of 0.8, this event fires off after the item has been fully associated with the session, which is different than previous releases. For event handlers that require the object not yet be part of session state (such as handlers which may autoflush while the target object is not yet complete) consider the new before_attach() event.

      See also

      before_attach()

      after_begin(session, transaction, connection)

      Execute after a transaction is begun on a connection

      Parameters:
      after_bulk_delete(session, query, query_context, result)

      Execute after a bulk delete operation to the session.

      This is called as a result of the Query.delete() method.

      Parameters:
      • query – the Query object that this update operation was called upon.
      • query_context – The QueryContext object, corresponding to the invocation of an ORM query.
      • result – the ResultProxy returned as a result of the bulk DELETE operation.
      after_bulk_update(session, query, query_context, result)

      Execute after a bulk update operation to the session.

      This is called as a result of the Query.update() method.

      Parameters:
      • query – the Query object that this update operation was called upon.
      • query_context – The QueryContext object, corresponding to the invocation of an ORM query.
      • result – the ResultProxy returned as a result of the bulk UPDATE operation.
      after_commit(session)

      Execute after a commit has occurred.

      Note

      The after_commit() hook is not per-flush, that is, the Session can emit SQL to the database many times within the scope of a transaction. For interception of these events, use the before_flush(), after_flush(), or after_flush_postexec() events.

      Note

      The Session is not in an active tranasction when the after_commit() event is invoked, and therefore can not emit SQL. To emit SQL corresponding to every transaction, use the before_commit() event.

      Parameters:session – The target Session.
      after_flush(session, flush_context)

      Execute after flush has completed, but before commit has been called.

      Note that the session’s state is still in pre-flush, i.e. ‘new’, ‘dirty’, and ‘deleted’ lists still show pre-flush state as well as the history settings on instance attributes.

      Parameters:
      • session – The target Session.
      • flush_context – Internal UOWTransaction object which handles the details of the flush.
      after_flush_postexec(session, flush_context)

      Execute after flush has completed, and after the post-exec state occurs.

      This will be when the ‘new’, ‘dirty’, and ‘deleted’ lists are in their final state. An actual commit() may or may not have occurred, depending on whether or not the flush started its own transaction or participated in a larger transaction.

      Parameters:
      • session – The target Session.
      • flush_context – Internal UOWTransaction object which handles the details of the flush.
      after_rollback(session)

      Execute after a real DBAPI rollback has occurred.

      Note that this event only fires when the actual rollback against the database occurs - it does not fire each time the Session.rollback() method is called, if the underlying DBAPI transaction has already been rolled back. In many cases, the Session will not be in an “active” state during this event, as the current transaction is not valid. To acquire a Session which is active after the outermost rollback has proceeded, use the SessionEvents.after_soft_rollback() event, checking the Session.is_active flag.

      Parameters:session – The target Session.
      after_soft_rollback(session, previous_transaction)

      Execute after any rollback has occurred, including “soft” rollbacks that don’t actually emit at the DBAPI level.

      This corresponds to both nested and outer rollbacks, i.e. the innermost rollback that calls the DBAPI’s rollback() method, as well as the enclosing rollback calls that only pop themselves from the transaction stack.

      The given Session can be used to invoke SQL and Session.query() operations after an outermost rollback by first checking the Session.is_active flag:

      @event.listens_for(Session, "after_soft_rollback")
      def do_something(session, previous_transaction):
          if session.is_active:
              session.execute("select * from some_table")
      Parameters:

      transactional marker object which was just closed. The current SessionTransaction for the given Session is available via the Session.transaction attribute.

      New in version 0.7.3.

      after_transaction_create(session, transaction)

      Execute when a new SessionTransaction is created.

      This event differs from after_begin() in that it occurs for each SessionTransaction overall, as opposed to when transactions are begun on individual database connections. It is also invoked for nested transactions and subtransactions, and is always matched by a corresponding after_transaction_end() event (assuming normal operation of the Session).

      Parameters:

      New in version 0.8.

      after_transaction_end(session, transaction)

      Execute when the span of a SessionTransaction ends.

      This event differs from after_commit() in that it corresponds to all SessionTransaction objects in use, including those for nested transactions and subtransactions, and is always matched by a corresponding after_transaction_create() event.

      Parameters:

      New in version 0.8.

      before_attach(session, instance)

      Execute before an instance is attached to a session.

      This is called before an add, delete or merge causes the object to be part of the session.

      New in version 0.8.: Note that after_attach() now fires off after the item is part of the session. before_attach() is provided for those cases where the item should not yet be part of the session state.

      See also

      after_attach()

      before_commit(session)

      Execute before commit is called.

      Note

      The before_commit() hook is not per-flush, that is, the Session can emit SQL to the database many times within the scope of a transaction. For interception of these events, use the before_flush(), after_flush(), or after_flush_postexec() events.

      Parameters:session – The target Session.
      before_flush(session, flush_context, instances)

      Execute before flush process has started.

      Parameters:
      • session – The target Session.
      • flush_context – Internal UOWTransaction object which handles the details of the flush.
      • instances – Usually None, this is the collection of objects which can be passed to the Session.flush() method (note this usage is deprecated).

      Instrumentation Events

      class sqlalchemy.orm.events.InstrumentationEvents

      Bases: sqlalchemy.event.Events

      Events related to class instrumentation events.

      The listeners here support being established against any new style class, that is any object that is a subclass of ‘type’. Events will then be fired off for events against that class. If the “propagate=True” flag is passed to event.listen(), the event will fire off for subclasses of that class as well.

      The Python type builtin is also accepted as a target, which when used has the effect of events being emitted for all classes.

      Note the “propagate” flag here is defaulted to True, unlike the other class level events where it defaults to False. This means that new subclasses will also be the subject of these events, when a listener is established on a superclass.

      Changed in version 0.8: - events here will emit based on comparing the incoming class to the type of class passed to event.listen(). Previously, the event would fire for any class unconditionally regardless of what class was sent for listening, despite documentation which stated the contrary.

      attribute_instrument(cls, key, inst)

      Called when an attribute is instrumented.

      class_instrument(cls)

      Called after the given class is instrumented.

      To get at the ClassManager, use manager_of_class().

      class_uninstrument(cls)

      Called before the given class is uninstrumented.

      To get at the ClassManager, use manager_of_class().

      SQLAlchemy-0.8.4/doc/orm/examples.html0000644000076500000240000011557212251147476020323 0ustar classicstaff00000000000000 Examples — SQLAlchemy 0.8 Documentation

      SQLAlchemy 0.8 Documentation

      Release: 0.8.4 | Release Date: December 8, 2013

      Examples

      The SQLAlchemy distribution includes a variety of code examples illustrating a select set of patterns, some typical and some not so typical. All are runnable and can be found in the /examples directory of the distribution. Each example contains a README in its __init__.py file, each of which are listed below.

      Additional SQLAlchemy examples, some user contributed, are available on the wiki at http://www.sqlalchemy.org/trac/wiki/UsageRecipes.

      Adjacency List

      Location: /examples/adjacency_list/

      An example of a dictionary-of-dictionaries structure mapped using an adjacency list model.

      E.g.:

      node = TreeNode('rootnode')
      node.append('node1')
      node.append('node3')
      session.add(node)
      session.commit()
      
      dump_tree(node)

      Associations

      Location: /examples/association/

      Examples illustrating the usage of the “association object” pattern, where an intermediary class mediates the relationship between two classes that are associated in a many-to-many pattern.

      This directory includes the following examples:

      • basic_association.py - illustrate a many-to-many relationship between an “Order” and a collection of “Item” objects, associating a purchase price with each via an association object called “OrderItem”
      • proxied_association.py - same example as basic_association, adding in usage of sqlalchemy.ext.associationproxy to make explicit references to “OrderItem” optional.
      • dict_of_sets_with_default.py - an advanced association proxy example which illustrates nesting of association proxies to produce multi-level Python collections, in this case a dictionary with string keys and sets of integers as values, which conceal the underlying mapped classes.

      Attribute Instrumentation

      Location: /examples/custom_attributes/

      Two examples illustrating modifications to SQLAlchemy’s attribute management system.

      listen_for_events.py illustrates the usage of AttributeExtension to intercept attribute events. It additionally illustrates a way to automatically attach these listeners to all class attributes using a InstrumentationManager.

      custom_management.py illustrates much deeper usage of InstrumentationManager as well as collection adaptation, to completely change the underlying method used to store state on an object. This example was developed to illustrate techniques which would be used by other third party object instrumentation systems to interact with SQLAlchemy’s event system and is only intended for very intricate framework integrations.

      Dogpile Caching

      Location: /examples/dogpile_caching/

      Illustrates how to embed dogpile.cache functionality within the Query object, allowing full cache control as well as the ability to pull “lazy loaded” attributes from long term cache as well.

      Changed in version 0.8: The example was modernized to use dogpile.cache, replacing Beaker as the caching library in use.

      In this demo, the following techniques are illustrated:

      • Using custom subclasses of Query
      • Basic technique of circumventing Query to pull from a custom cache source instead of the database.
      • Rudimental caching with dogpile.cache, using “regions” which allow global control over a fixed set of configurations.
      • Using custom MapperOption objects to configure options on a Query, including the ability to invoke the options deep within an object graph when lazy loads occur.

      E.g.:

      # query for Person objects, specifying cache
      q = Session.query(Person).options(FromCache("default"))
      
      # specify that each Person's "addresses" collection comes from
      # cache too
      q = q.options(RelationshipCache(Person.addresses, "default"))
      
      # query
      print q.all()

      To run, both SQLAlchemy and dogpile.cache must be installed or on the current PYTHONPATH. The demo will create a local directory for datafiles, insert initial data, and run. Running the demo a second time will utilize the cache files already present, and exactly one SQL statement against two tables will be emitted - the displayed result however will utilize dozens of lazyloads that all pull from cache.

      The demo scripts themselves, in order of complexity, are run as Python modules so that relative imports work:

      python -m examples.dogpile_caching.helloworld
      
      python -m examples.dogpile_caching.relationship_caching
      
      python -m examples.dogpile_caching.advanced
      
      python -m examples.dogpile_caching.local_session_caching

      Listing of files:

      environment.py - Establish the Session, a dictionary of “regions”, a sample cache region against a .dbm file, data / cache file paths, and configurations, bootstrap fixture data if necessary.

      caching_query.py - Represent functions and classes which allow the usage of Dogpile caching with SQLAlchemy. Introduces a query option called FromCache.

      model.py - The datamodel, which represents Person that has multiple Address objects, each with PostalCode, City, Country

      fixture_data.py - creates demo PostalCode, Address, Person objects in the database.

      helloworld.py - the basic idea.

      relationship_caching.py - Illustrates how to add cache options on relationship endpoints, so that lazyloads load from cache.

      advanced.py - Further examples of how to use FromCache. Combines techniques from the first two scripts.

      local_session_caching.py - Grok everything so far ? This example creates a new dogpile.cache backend that will persist data in a dictionary which is local to the current session. remove() the session and the cache is gone.

      Directed Graphs

      Location: /examples/graphs/

      An example of persistence for a directed graph structure. The graph is stored as a collection of edges, each referencing both a “lower” and an “upper” node in a table of nodes. Basic persistence and querying for lower- and upper- neighbors are illustrated:

      n2 = Node(2)
      n5 = Node(5)
      n2.add_neighbor(n5)
      print n2.higher_neighbors()

      Dynamic Relations as Dictionaries

      Location: /examples/dynamic_dict/

      Illustrates how to place a dictionary-like facade on top of a “dynamic” relation, so that dictionary operations (assuming simple string keys) can operate upon a large collection without loading the full collection at once.

      Generic Associations

      Location: /examples/generic_associations

      Illustrates various methods of associating multiple types of parents with a particular child object.

      The examples all use the declarative extension along with declarative mixins. Each one presents the identical use case at the end - two classes, Customer and Supplier, both subclassing the HasAddresses mixin, which ensures that the parent class is provided with an addresses collection which contains Address objects.

      The configurations include:

      • table_per_related.py - illustrates a distinct table per related collection.
      • table_per_association.py - illustrates a shared collection table, using a table per association.
      • discriminator_on_association.py - shared collection table and shared association table, including a discriminator column.
      • generic_fk.py - imitates the approach taken by popular frameworks such as Django and Ruby on Rails to create a so-called “generic foreign key”.

      The discriminator_on_association.py and generic_fk.py scripts are modernized versions of recipes presented in the 2007 blog post Polymorphic Associations with SQLAlchemy. .

      Horizontal Sharding

      Location: /examples/sharding

      A basic example of using the SQLAlchemy Sharding API. Sharding refers to horizontally scaling data across multiple databases.

      The basic components of a “sharded” mapping are:

      • multiple databases, each assigned a ‘shard id’
      • a function which can return a single shard id, given an instance to be saved; this is called “shard_chooser”
      • a function which can return a list of shard ids which apply to a particular instance identifier; this is called “id_chooser”. If it returns all shard ids, all shards will be searched.
      • a function which can return a list of shard ids to try, given a particular Query (“query_chooser”). If it returns all shard ids, all shards will be queried and the results joined together.

      In this example, four sqlite databases will store information about weather data on a database-per-continent basis. We provide example shard_chooser, id_chooser and query_chooser functions. The query_chooser illustrates inspection of the SQL expression element in order to attempt to determine a single shard being requested.

      The construction of generic sharding routines is an ambitious approach to the issue of organizing instances among multiple databases. For a more plain-spoken alternative, the “distinct entity” approach is a simple method of assigning objects to different tables (and potentially database nodes) in an explicit way - described on the wiki at EntityName.

      Inheritance Mappings

      Location: /examples/inheritance/

      Working examples of single-table, joined-table, and concrete-table inheritance as described in datamapping_inheritance.

      Large Collections

      Location: /examples/large_collection/

      Large collection example.

      Illustrates the options to use with relationship() when the list of related objects is very large, including:

      • “dynamic” relationships which query slices of data as accessed
      • how to use ON DELETE CASCADE in conjunction with passive_deletes=True to greatly improve the performance of related collection deletion.

      Nested Sets

      Location: /examples/nested_sets/

      Illustrates a rudimentary way to implement the “nested sets” pattern for hierarchical data using the SQLAlchemy ORM.

      Polymorphic Associations

      See Generic Associations for a modern version of polymorphic associations.

      PostGIS Integration

      Location: /examples/postgis

      A naive example illustrating techniques to help embed PostGIS functionality.

      This example was originally developed in the hopes that it would be extrapolated into a comprehensive PostGIS integration layer. We are pleased to announce that this has come to fruition as GeoAlchemy.

      The example illustrates:

      • a DDL extension which allows CREATE/DROP to work in conjunction with AddGeometryColumn/DropGeometryColumn
      • a Geometry type, as well as a few subtypes, which convert result row values to a GIS-aware object, and also integrates with the DDL extension.
      • a GIS-aware object which stores a raw geometry value and provides a factory for functions such as AsText().
      • an ORM comparator which can override standard column methods on mapped objects to produce GIS operators.
      • an attribute event listener that intercepts strings and converts to GeomFromText().
      • a standalone operator example.

      The implementation is limited to only public, well known and simple to use extension points.

      E.g.:

      print session.query(Road).filter(Road.road_geom.intersects(r1.road_geom)).all()

      Versioned Objects

      Location: /examples/versioning

      Illustrates an extension which creates version tables for entities and stores records for each change. The same idea as Elixir’s versioned extension, but more efficient (uses attribute API to get history) and handles class inheritance. The given extensions generate an anonymous “history” class which represents historical versions of the target object.

      Usage is illustrated via a unit test module test_versioning.py, which can be run via nose:

      cd examples/versioning
      nosetests -v

      A fragment of example usage, using declarative:

      from history_meta import Versioned, versioned_session
      
      Base = declarative_base()
      
      class SomeClass(Versioned, Base):
          __tablename__ = 'sometable'
      
          id = Column(Integer, primary_key=True)
          name = Column(String(50))
      
          def __eq__(self, other):
              assert type(other) is SomeClass and other.id == self.id
      
      Session = sessionmaker(bind=engine)
      versioned_session(Session)
      
      sess = Session()
      sc = SomeClass(name='sc1')
      sess.add(sc)
      sess.commit()
      
      sc.name = 'sc1modified'
      sess.commit()
      
      assert sc.version == 2
      
      SomeClassHistory = SomeClass.__history_mapper__.class_
      
      assert sess.query(SomeClassHistory).\
                  filter(SomeClassHistory.version == 1).\
                  all() \
                  == [SomeClassHistory(version=1, name='sc1')]

      The Versioned mixin is designed to work with declarative. To use the extension with classical mappers, the _history_mapper function can be applied:

      from history_meta import _history_mapper
      
      m = mapper(SomeClass, sometable)
      _history_mapper(m)
      
      SomeHistoryClass = SomeClass.__history_mapper__.class_

      Vertical Attribute Mapping

      Location: /examples/vertical

      Illustrates “vertical table” mappings.

      A “vertical table” refers to a technique where individual attributes of an object are stored as distinct rows in a table. The “vertical table” technique is used to persist objects which can have a varied set of attributes, at the expense of simple query control and brevity. It is commonly found in content/document management systems in order to represent user-created structures flexibly.

      Two variants on the approach are given. In the second, each row references a “datatype” which contains information about the type of information stored in the attribute, such as integer, string, or date.

      Example:

      shrew = Animal(u'shrew')
      shrew[u'cuteness'] = 5
      shrew[u'weasel-like'] = False
      shrew[u'poisonous'] = True
      
      session.add(shrew)
      session.flush()
      
      q = (session.query(Animal).
           filter(Animal.facts.any(
             and_(AnimalFact.key == u'weasel-like',
                  AnimalFact.value == True))))
      print 'weasel-like animals', q.all()

      XML Persistence

      Location: /examples/elementtree/

      Illustrates three strategies for persisting and querying XML documents as represented by ElementTree in a relational database. The techniques do not apply any mappings to the ElementTree objects directly, so are compatible with the native cElementTree as well as lxml, and can be adapted to suit any kind of DOM representation system. Querying along xpath-like strings is illustrated as well.

      In order of complexity:

      • pickle.py - Quick and dirty, serialize the whole DOM into a BLOB column. While the example is very brief, it has very limited functionality.
      • adjacency_list.py - Each DOM node is stored in an individual table row, with attributes represented in a separate table. The nodes are associated in a hierarchy using an adjacency list structure. A query function is introduced which can search for nodes along any path with a given structure of attributes, basically a (very narrow) subset of xpath.
      • optimized_al.py - Uses the same strategy as adjacency_list.py, but associates each DOM row with its owning document row, so that a full document of DOM nodes can be loaded using O(1) queries - the construction of the “hierarchy” is performed after the load in a non-recursive fashion and is much more efficient.

      E.g.:

      # parse an XML file and persist in the database
      doc = ElementTree.parse("test.xml")
      session.add(Document(file, doc))
      session.commit()
      
      # locate documents with a certain path/attribute structure
      for document in find_document('/somefile/header/field2[@attr=foo]'):
          # dump the XML
          print document
      SQLAlchemy-0.8.4/doc/orm/exceptions.html0000644000076500000240000003024412251147476020656 0ustar classicstaff00000000000000 ORM Exceptions — SQLAlchemy 0.8 Documentation

      SQLAlchemy 0.8 Documentation

      Release: 0.8.4 | Release Date: December 8, 2013

      Table of Contents

      Previous Topic

      Examples

      Next Topic

      ORM Internals

      Quick Search

      ORM Exceptions

      SQLAlchemy ORM exceptions.

      sqlalchemy.orm.exc.ConcurrentModificationError

      alias of StaleDataError

      exception sqlalchemy.orm.exc.DetachedInstanceError

      An attempt to access unloaded attributes on a mapped instance that is detached.

      exception sqlalchemy.orm.exc.FlushError

      A invalid condition was detected during flush().

      exception sqlalchemy.orm.exc.MultipleResultsFound

      A single database result was required but more than one were found.

      sqlalchemy.orm.exc.NO_STATE = (<type 'exceptions.AttributeError'>, <type 'exceptions.KeyError'>)

      Exception types that may be raised by instrumentation implementations.

      exception sqlalchemy.orm.exc.NoResultFound

      A database result was required but none was found.

      exception sqlalchemy.orm.exc.ObjectDeletedError(state, msg=None)

      A refresh operation failed to retrieve the database row corresponding to an object’s known primary key identity.

      A refresh operation proceeds when an expired attribute is accessed on an object, or when Query.get() is used to retrieve an object which is, upon retrieval, detected as expired. A SELECT is emitted for the target row based on primary key; if no row is returned, this exception is raised.

      The true meaning of this exception is simply that no row exists for the primary key identifier associated with a persistent object. The row may have been deleted, or in some cases the primary key updated to a new value, outside of the ORM’s management of the target object.

      exception sqlalchemy.orm.exc.ObjectDereferencedError

      An operation cannot complete due to an object being garbage collected.

      exception sqlalchemy.orm.exc.StaleDataError

      An operation encountered database state that is unaccounted for.

      Conditions which cause this to happen include:

      • A flush may have attempted to update or delete rows and an unexpected number of rows were matched during the UPDATE or DELETE statement. Note that when version_id_col is used, rows in UPDATE or DELETE statements are also matched against the current known version identifier.

      • A mapped object with version_id_col was refreshed, and the version number coming back from the database does not match that of the object itself.

      • A object is detached from its parent object, however the object was previously attached to a different parent identity which was garbage collected, and a decision cannot be made if the new parent was really the most recent “parent”.

        New in version 0.7.4.

      exception sqlalchemy.orm.exc.UnmappedClassError(cls, msg=None)

      An mapping operation was requested for an unknown class.

      exception sqlalchemy.orm.exc.UnmappedColumnError

      Mapping operation was requested on an unknown column.

      exception sqlalchemy.orm.exc.UnmappedError

      Base for exceptions that involve expected mappings not present.

      exception sqlalchemy.orm.exc.UnmappedInstanceError(obj, msg=None)

      An mapping operation was requested for an unknown instance.

      SQLAlchemy-0.8.4/doc/orm/extensions/0000755000076500000240000000000012251151573017774 5ustar classicstaff00000000000000SQLAlchemy-0.8.4/doc/orm/extensions/associationproxy.html0000644000076500000240000025434612251147477024326 0ustar classicstaff00000000000000 Association Proxy — SQLAlchemy 0.8 Documentation

      SQLAlchemy 0.8 Documentation

      Release: 0.8.4 | Release Date: December 8, 2013

      Association Proxy

      associationproxy is used to create a read/write view of a target attribute across a relationship. It essentially conceals the usage of a “middle” attribute between two endpoints, and can be used to cherry-pick fields from a collection of related objects or to reduce the verbosity of using the association object pattern. Applied creatively, the association proxy allows the construction of sophisticated collections and dictionary views of virtually any geometry, persisted to the database using standard, transparently configured relational patterns.

      Simplifying Scalar Collections

      Consider a many-to-many mapping between two classes, User and Keyword. Each User can have any number of Keyword objects, and vice-versa (the many-to-many pattern is described at Many To Many):

      from sqlalchemy import Column, Integer, String, ForeignKey, Table
      from sqlalchemy.orm import relationship
      from sqlalchemy.ext.declarative import declarative_base
      
      Base = declarative_base()
      
      class User(Base):
          __tablename__ = 'user'
          id = Column(Integer, primary_key=True)
          name = Column(String(64))
          kw = relationship("Keyword", secondary=lambda: userkeywords_table)
      
          def __init__(self, name):
              self.name = name
      
      class Keyword(Base):
          __tablename__ = 'keyword'
          id = Column(Integer, primary_key=True)
          keyword = Column('keyword', String(64))
      
          def __init__(self, keyword):
              self.keyword = keyword
      
      userkeywords_table = Table('userkeywords', Base.metadata,
          Column('user_id', Integer, ForeignKey("user.id"),
                 primary_key=True),
          Column('keyword_id', Integer, ForeignKey("keyword.id"),
                 primary_key=True)
      )

      Reading and manipulating the collection of “keyword” strings associated with User requires traversal from each collection element to the .keyword attribute, which can be awkward:

      >>> user = User('jek')
      >>> user.kw.append(Keyword('cheese inspector'))
      >>> print(user.kw)
      [<__main__.Keyword object at 0x12bf830>]
      >>> print(user.kw[0].keyword)
      cheese inspector
      >>> print([keyword.keyword for keyword in user.kw])
      ['cheese inspector']

      The association_proxy is applied to the User class to produce a “view” of the kw relationship, which only exposes the string value of .keyword associated with each Keyword object:

      from sqlalchemy.ext.associationproxy import association_proxy
      
      class User(Base):
          __tablename__ = 'user'
          id = Column(Integer, primary_key=True)
          name = Column(String(64))
          kw = relationship("Keyword", secondary=lambda: userkeywords_table)
      
          def __init__(self, name):
              self.name = name
      
          # proxy the 'keyword' attribute from the 'kw' relationship
          keywords = association_proxy('kw', 'keyword')

      We can now reference the .keywords collection as a listing of strings, which is both readable and writable. New Keyword objects are created for us transparently:

      >>> user = User('jek')
      >>> user.keywords.append('cheese inspector')
      >>> user.keywords
      ['cheese inspector']
      >>> user.keywords.append('snack ninja')
      >>> user.kw
      [<__main__.Keyword object at 0x12cdd30>, <__main__.Keyword object at 0x12cde30>]

      The AssociationProxy object produced by the association_proxy() function is an instance of a Python descriptor. It is always declared with the user-defined class being mapped, regardless of whether Declarative or classical mappings via the mapper() function are used.

      The proxy functions by operating upon the underlying mapped attribute or collection in response to operations, and changes made via the proxy are immediately apparent in the mapped attribute, as well as vice versa. The underlying attribute remains fully accessible.

      When first accessed, the association proxy performs introspection operations on the target collection so that its behavior corresponds correctly. Details such as if the locally proxied attribute is a collection (as is typical) or a scalar reference, as well as if the collection acts like a set, list, or dictionary is taken into account, so that the proxy should act just like the underlying collection or attribute does.

      Creation of New Values

      When a list append() event (or set add(), dictionary __setitem__(), or scalar assignment event) is intercepted by the association proxy, it instantiates a new instance of the “intermediary” object using its constructor, passing as a single argument the given value. In our example above, an operation like:

      user.keywords.append('cheese inspector')

      Is translated by the association proxy into the operation:

      user.kw.append(Keyword('cheese inspector'))

      The example works here because we have designed the constructor for Keyword to accept a single positional argument, keyword. For those cases where a single-argument constructor isn’t feasible, the association proxy’s creational behavior can be customized using the creator argument, which references a callable (i.e. Python function) that will produce a new object instance given the singular argument. Below we illustrate this using a lambda as is typical:

      class User(Base):
          # ...
      
          # use Keyword(keyword=kw) on append() events
          keywords = association_proxy('kw', 'keyword',
                          creator=lambda kw: Keyword(keyword=kw))

      The creator function accepts a single argument in the case of a list- or set- based collection, or a scalar attribute. In the case of a dictionary-based collection, it accepts two arguments, “key” and “value”. An example of this is below in Proxying to Dictionary Based Collections.

      Simplifying Association Objects

      The “association object” pattern is an extended form of a many-to-many relationship, and is described at Association Object. Association proxies are useful for keeping “association objects” out the way during regular use.

      Suppose our userkeywords table above had additional columns which we’d like to map explicitly, but in most cases we don’t require direct access to these attributes. Below, we illustrate a new mapping which introduces the UserKeyword class, which is mapped to the userkeywords table illustrated earlier. This class adds an additional column special_key, a value which we occasionally want to access, but not in the usual case. We create an association proxy on the User class called keywords, which will bridge the gap from the user_keywords collection of User to the .keyword attribute present on each UserKeyword:

      from sqlalchemy import Column, Integer, String, ForeignKey
      from sqlalchemy.orm import relationship, backref
      
      from sqlalchemy.ext.associationproxy import association_proxy
      from sqlalchemy.ext.declarative import declarative_base
      
      Base = declarative_base()
      
      class User(Base):
          __tablename__ = 'user'
          id = Column(Integer, primary_key=True)
          name = Column(String(64))
      
          # association proxy of "user_keywords" collection
          # to "keyword" attribute
          keywords = association_proxy('user_keywords', 'keyword')
      
          def __init__(self, name):
              self.name = name
      
      class UserKeyword(Base):
          __tablename__ = 'user_keyword'
          user_id = Column(Integer, ForeignKey('user.id'), primary_key=True)
          keyword_id = Column(Integer, ForeignKey('keyword.id'), primary_key=True)
          special_key = Column(String(50))
      
          # bidirectional attribute/collection of "user"/"user_keywords"
          user = relationship(User,
                      backref=backref("user_keywords",
                                      cascade="all, delete-orphan")
                  )
      
          # reference to the "Keyword" object
          keyword = relationship("Keyword")
      
          def __init__(self, keyword=None, user=None, special_key=None):
              self.user = user
              self.keyword = keyword
              self.special_key = special_key
      
      class Keyword(Base):
          __tablename__ = 'keyword'
          id = Column(Integer, primary_key=True)
          keyword = Column('keyword', String(64))
      
          def __init__(self, keyword):
              self.keyword = keyword
      
          def __repr__(self):
              return 'Keyword(%s)' % repr(self.keyword)

      With the above configuration, we can operate upon the .keywords collection of each User object, and the usage of UserKeyword is concealed:

      >>> user = User('log')
      >>> for kw in (Keyword('new_from_blammo'), Keyword('its_big')):
      ...     user.keywords.append(kw)
      ...
      >>> print(user.keywords)
      [Keyword('new_from_blammo'), Keyword('its_big')]

      Where above, each .keywords.append() operation is equivalent to:

      >>> user.user_keywords.append(UserKeyword(Keyword('its_heavy')))

      The UserKeyword association object has two attributes here which are populated; the .keyword attribute is populated directly as a result of passing the Keyword object as the first argument. The .user argument is then assigned as the UserKeyword object is appended to the User.user_keywords collection, where the bidirectional relationship configured between User.user_keywords and UserKeyword.user results in a population of the UserKeyword.user attribute. The special_key argument above is left at its default value of None.

      For those cases where we do want special_key to have a value, we create the UserKeyword object explicitly. Below we assign all three attributes, where the assignment of .user has the effect of the UserKeyword being appended to the User.user_keywords collection:

      >>> UserKeyword(Keyword('its_wood'), user, special_key='my special key')

      The association proxy returns to us a collection of Keyword objects represented by all these operations:

      >>> user.keywords
      [Keyword('new_from_blammo'), Keyword('its_big'), Keyword('its_heavy'), Keyword('its_wood')]

      Proxying to Dictionary Based Collections

      The association proxy can proxy to dictionary based collections as well. SQLAlchemy mappings usually use the attribute_mapped_collection() collection type to create dictionary collections, as well as the extended techniques described in Custom Dictionary-Based Collections.

      The association proxy adjusts its behavior when it detects the usage of a dictionary-based collection. When new values are added to the dictionary, the association proxy instantiates the intermediary object by passing two arguments to the creation function instead of one, the key and the value. As always, this creation function defaults to the constructor of the intermediary class, and can be customized using the creator argument.

      Below, we modify our UserKeyword example such that the User.user_keywords collection will now be mapped using a dictionary, where the UserKeyword.special_key argument will be used as the key for the dictionary. We then apply a creator argument to the User.keywords proxy so that these values are assigned appropriately when new elements are added to the dictionary:

      from sqlalchemy import Column, Integer, String, ForeignKey
      from sqlalchemy.orm import relationship, backref
      from sqlalchemy.ext.associationproxy import association_proxy
      from sqlalchemy.ext.declarative import declarative_base
      from sqlalchemy.orm.collections import attribute_mapped_collection
      
      Base = declarative_base()
      
      class User(Base):
          __tablename__ = 'user'
          id = Column(Integer, primary_key=True)
          name = Column(String(64))
      
          # proxy to 'user_keywords', instantiating UserKeyword
          # assigning the new key to 'special_key', values to
          # 'keyword'.
          keywords = association_proxy('user_keywords', 'keyword',
                          creator=lambda k, v:
                                      UserKeyword(special_key=k, keyword=v)
                      )
      
          def __init__(self, name):
              self.name = name
      
      class UserKeyword(Base):
          __tablename__ = 'user_keyword'
          user_id = Column(Integer, ForeignKey('user.id'), primary_key=True)
          keyword_id = Column(Integer, ForeignKey('keyword.id'), primary_key=True)
          special_key = Column(String)
      
          # bidirectional user/user_keywords relationships, mapping
          # user_keywords with a dictionary against "special_key" as key.
          user = relationship(User, backref=backref(
                          "user_keywords",
                          collection_class=attribute_mapped_collection("special_key"),
                          cascade="all, delete-orphan"
                          )
                      )
          keyword = relationship("Keyword")
      
      class Keyword(Base):
          __tablename__ = 'keyword'
          id = Column(Integer, primary_key=True)
          keyword = Column('keyword', String(64))
      
          def __init__(self, keyword):
              self.keyword = keyword
      
          def __repr__(self):
              return 'Keyword(%s)' % repr(self.keyword)

      We illustrate the .keywords collection as a dictionary, mapping the UserKeyword.string_key value to Keyword objects:

      >>> user = User('log')
      
      >>> user.keywords['sk1'] = Keyword('kw1')
      >>> user.keywords['sk2'] = Keyword('kw2')
      
      >>> print(user.keywords)
      {'sk1': Keyword('kw1'), 'sk2': Keyword('kw2')}

      Composite Association Proxies

      Given our previous examples of proxying from relationship to scalar attribute, proxying across an association object, and proxying dictionaries, we can combine all three techniques together to give User a keywords dictionary that deals strictly with the string value of special_key mapped to the string keyword. Both the UserKeyword and Keyword classes are entirely concealed. This is achieved by building an association proxy on User that refers to an association proxy present on UserKeyword:

      from sqlalchemy import Column, Integer, String, ForeignKey
      from sqlalchemy.orm import relationship, backref
      
      from sqlalchemy.ext.associationproxy import association_proxy
      from sqlalchemy.ext.declarative import declarative_base
      from sqlalchemy.orm.collections import attribute_mapped_collection
      
      Base = declarative_base()
      
      class User(Base):
          __tablename__ = 'user'
          id = Column(Integer, primary_key=True)
          name = Column(String(64))
      
          # the same 'user_keywords'->'keyword' proxy as in
          # the basic dictionary example
          keywords = association_proxy(
                      'user_keywords',
                      'keyword',
                      creator=lambda k, v:
                                  UserKeyword(special_key=k, keyword=v)
                      )
      
          def __init__(self, name):
              self.name = name
      
      class UserKeyword(Base):
          __tablename__ = 'user_keyword'
          user_id = Column(Integer, ForeignKey('user.id'), primary_key=True)
          keyword_id = Column(Integer, ForeignKey('keyword.id'),
                                                          primary_key=True)
          special_key = Column(String)
          user = relationship(User, backref=backref(
                  "user_keywords",
                  collection_class=attribute_mapped_collection("special_key"),
                  cascade="all, delete-orphan"
                  )
              )
      
          # the relationship to Keyword is now called
          # 'kw'
          kw = relationship("Keyword")
      
          # 'keyword' is changed to be a proxy to the
          # 'keyword' attribute of 'Keyword'
          keyword = association_proxy('kw', 'keyword')
      
      class Keyword(Base):
          __tablename__ = 'keyword'
          id = Column(Integer, primary_key=True)
          keyword = Column('keyword', String(64))
      
          def __init__(self, keyword):
              self.keyword = keyword

      User.keywords is now a dictionary of string to string, where UserKeyword and Keyword objects are created and removed for us transparently using the association proxy. In the example below, we illustrate usage of the assignment operator, also appropriately handled by the association proxy, to apply a dictionary value to the collection at once:

      >>> user = User('log')
      >>> user.keywords = {
      ...     'sk1':'kw1',
      ...     'sk2':'kw2'
      ... }
      >>> print(user.keywords)
      {'sk1': 'kw1', 'sk2': 'kw2'}
      
      >>> user.keywords['sk3'] = 'kw3'
      >>> del user.keywords['sk2']
      >>> print(user.keywords)
      {'sk1': 'kw1', 'sk3': 'kw3'}
      
      >>> # illustrate un-proxied usage
      ... print(user.user_keywords['sk3'].kw)
      <__main__.Keyword object at 0x12ceb90>

      One caveat with our example above is that because Keyword objects are created for each dictionary set operation, the example fails to maintain uniqueness for the Keyword objects on their string name, which is a typical requirement for a tagging scenario such as this one. For this use case the recipe UniqueObject, or a comparable creational strategy, is recommended, which will apply a “lookup first, then create” strategy to the constructor of the Keyword class, so that an already existing Keyword is returned if the given name is already present.

      Querying with Association Proxies

      The AssociationProxy features simple SQL construction capabilities which relate down to the underlying relationship() in use as well as the target attribute. For example, the RelationshipProperty.Comparator.any() and RelationshipProperty.Comparator.has() operations are available, and will produce a “nested” EXISTS clause, such as in our basic association object example:

      >>> print(session.query(User).filter(User.keywords.any(keyword='jek')))
      SELECT user.id AS user_id, user.name AS user_name
      FROM user
      WHERE EXISTS (SELECT 1
      FROM user_keyword
      WHERE user.id = user_keyword.user_id AND (EXISTS (SELECT 1
      FROM keyword
      WHERE keyword.id = user_keyword.keyword_id AND keyword.keyword = :keyword_1)))

      For a proxy to a scalar attribute, __eq__() is supported:

      >>> print(session.query(UserKeyword).filter(UserKeyword.keyword == 'jek'))
      SELECT user_keyword.*
      FROM user_keyword
      WHERE EXISTS (SELECT 1
          FROM keyword
          WHERE keyword.id = user_keyword.keyword_id AND keyword.keyword = :keyword_1)

      and .contains() is available for a proxy to a scalar collection:

      >>> print(session.query(User).filter(User.keywords.contains('jek')))
      SELECT user.*
      FROM user
      WHERE EXISTS (SELECT 1
      FROM userkeywords, keyword
      WHERE user.id = userkeywords.user_id
          AND keyword.id = userkeywords.keyword_id
          AND keyword.keyword = :keyword_1)

      AssociationProxy can be used with Query.join() somewhat manually using the attr attribute in a star-args context:

      q = session.query(User).join(*User.keywords.attr)

      New in version 0.7.3: attr attribute in a star-args context.

      attr is composed of AssociationProxy.local_attr and AssociationProxy.remote_attr, which are just synonyms for the actual proxied attributes, and can also be used for querying:

      uka = aliased(UserKeyword)
      ka = aliased(Keyword)
      q = session.query(User).\
              join(uka, User.keywords.local_attr).\
              join(ka, User.keywords.remote_attr)

      New in version 0.7.3: AssociationProxy.local_attr and AssociationProxy.remote_attr, synonyms for the actual proxied attributes, and usable for querying.

      API Documentation

      sqlalchemy.ext.associationproxy.association_proxy(target_collection, attr, **kw)

      Return a Python property implementing a view of a target attribute which references an attribute on members of the target.

      The returned value is an instance of AssociationProxy.

      Implements a Python property representing a relationship as a collection of simpler values, or a scalar value. The proxied property will mimic the collection type of the target (list, dict or set), or, in the case of a one to one relationship, a simple scalar value.

      Parameters:
      • target_collection – Name of the attribute we’ll proxy to. This attribute is typically mapped by relationship() to link to a target collection, but can also be a many-to-one or non-scalar relationship.
      • attr

        Attribute on the associated instance or instances we’ll proxy for.

        For example, given a target collection of [obj1, obj2], a list created by this proxy property would look like [getattr(obj1, attr), getattr(obj2, attr)]

        If the relationship is one-to-one or otherwise uselist=False, then simply: getattr(obj, attr)

      • creator

        optional.

        When new items are added to this proxied collection, new instances of the class collected by the target collection will be created. For list and set collections, the target class constructor will be called with the ‘value’ for the new instance. For dict types, two arguments are passed: key and value.

        If you want to construct instances differently, supply a creator function that takes arguments as above and returns instances.

        For scalar relationships, creator() will be called if the target is None. If the target is present, set operations are proxied to setattr() on the associated object.

        If you have an associated object with multiple attributes, you may set up multiple association proxies mapping to different attributes. See the unit tests for examples, and for examples of how creator() functions can be used to construct the scalar relationship on-demand in this situation.

      • **kw – Passes along any other keyword arguments to AssociationProxy.
      class sqlalchemy.ext.associationproxy.AssociationProxy(target_collection, attr, creator=None, getset_factory=None, proxy_factory=None, proxy_bulk_set=None)

      Bases: sqlalchemy.orm.interfaces._InspectionAttr

      A descriptor that presents a read/write view of an object attribute.

      __init__(target_collection, attr, creator=None, getset_factory=None, proxy_factory=None, proxy_bulk_set=None)

      Construct a new AssociationProxy.

      The association_proxy() function is provided as the usual entrypoint here, though AssociationProxy can be instantiated and/or subclassed directly.

      Parameters:
      • target_collection – Name of the collection we’ll proxy to, usually created with relationship().
      • attr – Attribute on the collected instances we’ll proxy for. For example, given a target collection of [obj1, obj2], a list created by this proxy property would look like [getattr(obj1, attr), getattr(obj2, attr)]
      • creator

        Optional. When new items are added to this proxied collection, new instances of the class collected by the target collection will be created. For list and set collections, the target class constructor will be called with the ‘value’ for the new instance. For dict types, two arguments are passed: key and value.

        If you want to construct instances differently, supply a ‘creator’ function that takes arguments as above and returns instances.

      • getset_factory

        Optional. Proxied attribute access is automatically handled by routines that get and set values based on the attr argument for this proxy.

        If you would like to customize this behavior, you may supply a getset_factory callable that produces a tuple of getter and setter functions. The factory is called with two arguments, the abstract type of the underlying collection and this proxy instance.

      • proxy_factory – Optional. The type of collection to emulate is determined by sniffing the target collection. If your collection type can’t be determined by duck typing or you’d like to use a different collection implementation, you may supply a factory function to produce those collections. Only applicable to non-scalar relationships.
      • proxy_bulk_set – Optional, use with proxy_factory. See the _set() method for details.
      any(criterion=None, **kwargs)

      Produce a proxied ‘any’ expression using EXISTS.

      This expression will be a composed product using the RelationshipProperty.Comparator.any() and/or RelationshipProperty.Comparator.has() operators of the underlying proxied attributes.

      attr

      Return a tuple of (local_attr, remote_attr).

      This attribute is convenient when specifying a join using Query.join() across two relationships:

      sess.query(Parent).join(*Parent.proxied.attr)

      New in version 0.7.3.

      See also:

      AssociationProxy.local_attr

      AssociationProxy.remote_attr

      contains(obj)

      Produce a proxied ‘contains’ expression using EXISTS.

      This expression will be a composed product using the RelationshipProperty.Comparator.any() , RelationshipProperty.Comparator.has(), and/or RelationshipProperty.Comparator.contains() operators of the underlying proxied attributes.

      extension_type = <symbol 'ASSOCIATION_PROXY>
      has(criterion=None, **kwargs)

      Produce a proxied ‘has’ expression using EXISTS.

      This expression will be a composed product using the RelationshipProperty.Comparator.any() and/or RelationshipProperty.Comparator.has() operators of the underlying proxied attributes.

      is_attribute = False
      local_attr

      The ‘local’ MapperProperty referenced by this AssociationProxy.

      New in version 0.7.3.

      See also:

      AssociationProxy.attr

      AssociationProxy.remote_attr

      remote_attr

      The ‘remote’ MapperProperty referenced by this AssociationProxy.

      New in version 0.7.3.

      See also:

      AssociationProxy.attr

      AssociationProxy.local_attr

      scalar

      Return True if this AssociationProxy proxies a scalar relationship on the local side.

      target_class

      The intermediary class handled by this AssociationProxy.

      Intercepted append/set/assignment events will result in the generation of new instances of this class.

      sqlalchemy.ext.associationproxy.ASSOCIATION_PROXY = <symbol 'ASSOCIATION_PROXY>
      SQLAlchemy-0.8.4/doc/orm/extensions/declarative.html0000644000076500000240000056141612251147477023172 0ustar classicstaff00000000000000 Declarative — SQLAlchemy 0.8 Documentation

      SQLAlchemy 0.8 Documentation

      Release: 0.8.4 | Release Date: December 8, 2013

      Declarative

      Synopsis

      SQLAlchemy object-relational configuration involves the combination of Table, mapper(), and class objects to define a mapped class. declarative allows all three to be expressed at once within the class declaration. As much as possible, regular SQLAlchemy schema and ORM constructs are used directly, so that configuration between “classical” ORM usage and declarative remain highly similar.

      As a simple example:

      from sqlalchemy.ext.declarative import declarative_base
      
      Base = declarative_base()
      
      class SomeClass(Base):
          __tablename__ = 'some_table'
          id = Column(Integer, primary_key=True)
          name =  Column(String(50))

      Above, the declarative_base() callable returns a new base class from which all mapped classes should inherit. When the class definition is completed, a new Table and mapper() will have been generated.

      The resulting table and mapper are accessible via __table__ and __mapper__ attributes on the SomeClass class:

      # access the mapped Table
      SomeClass.__table__
      
      # access the Mapper
      SomeClass.__mapper__

      Defining Attributes

      In the previous example, the Column objects are automatically named with the name of the attribute to which they are assigned.

      To name columns explicitly with a name distinct from their mapped attribute, just give the column a name. Below, column “some_table_id” is mapped to the “id” attribute of SomeClass, but in SQL will be represented as “some_table_id”:

      class SomeClass(Base):
          __tablename__ = 'some_table'
          id = Column("some_table_id", Integer, primary_key=True)

      Attributes may be added to the class after its construction, and they will be added to the underlying Table and mapper() definitions as appropriate:

      SomeClass.data = Column('data', Unicode)
      SomeClass.related = relationship(RelatedInfo)

      Classes which are constructed using declarative can interact freely with classes that are mapped explicitly with mapper().

      It is recommended, though not required, that all tables share the same underlying MetaData object, so that string-configured ForeignKey references can be resolved without issue.

      Accessing the MetaData

      The declarative_base() base class contains a MetaData object where newly defined Table objects are collected. This object is intended to be accessed directly for MetaData-specific operations. Such as, to issue CREATE statements for all tables:

      engine = create_engine('sqlite://')
      Base.metadata.create_all(engine)

      declarative_base() can also receive a pre-existing MetaData object, which allows a declarative setup to be associated with an already existing traditional collection of Table objects:

      mymetadata = MetaData()
      Base = declarative_base(metadata=mymetadata)

      Configuring Relationships

      Relationships to other classes are done in the usual way, with the added feature that the class specified to relationship() may be a string name. The “class registry” associated with Base is used at mapper compilation time to resolve the name into the actual class object, which is expected to have been defined once the mapper configuration is used:

      class User(Base):
          __tablename__ = 'users'
      
          id = Column(Integer, primary_key=True)
          name = Column(String(50))
          addresses = relationship("Address", backref="user")
      
      class Address(Base):
          __tablename__ = 'addresses'
      
          id = Column(Integer, primary_key=True)
          email = Column(String(50))
          user_id = Column(Integer, ForeignKey('users.id'))

      Column constructs, since they are just that, are immediately usable, as below where we define a primary join condition on the Address class using them:

      class Address(Base):
          __tablename__ = 'addresses'
      
          id = Column(Integer, primary_key=True)
          email = Column(String(50))
          user_id = Column(Integer, ForeignKey('users.id'))
          user = relationship(User, primaryjoin=user_id == User.id)

      In addition to the main argument for relationship(), other arguments which depend upon the columns present on an as-yet undefined class may also be specified as strings. These strings are evaluated as Python expressions. The full namespace available within this evaluation includes all classes mapped for this declarative base, as well as the contents of the sqlalchemy package, including expression functions like desc() and func:

      class User(Base):
          # ....
          addresses = relationship("Address",
                               order_by="desc(Address.email)",
                               primaryjoin="Address.user_id==User.id")

      For the case where more than one module contains a class of the same name, string class names can also be specified as module-qualified paths within any of these string expressions:

      class User(Base):
          # ....
          addresses = relationship("myapp.model.address.Address",
                               order_by="desc(myapp.model.address.Address.email)",
                               primaryjoin="myapp.model.address.Address.user_id=="
                                              "myapp.model.user.User.id")

      The qualified path can be any partial path that removes ambiguity between the names. For example, to disambiguate between myapp.model.address.Address and myapp.model.lookup.Address, we can specify address.Address or lookup.Address:

      class User(Base):
          # ....
          addresses = relationship("address.Address",
                               order_by="desc(address.Address.email)",
                               primaryjoin="address.Address.user_id=="
                                              "User.id")

      New in version 0.8: module-qualified paths can be used when specifying string arguments with Declarative, in order to specify specific modules.

      Two alternatives also exist to using string-based attributes. A lambda can also be used, which will be evaluated after all mappers have been configured:

      class User(Base):
          # ...
          addresses = relationship(lambda: Address,
                               order_by=lambda: desc(Address.email),
                               primaryjoin=lambda: Address.user_id==User.id)

      Or, the relationship can be added to the class explicitly after the classes are available:

      User.addresses = relationship(Address,
                                primaryjoin=Address.user_id==User.id)

      Configuring Many-to-Many Relationships

      Many-to-many relationships are also declared in the same way with declarative as with traditional mappings. The secondary argument to relationship() is as usual passed a Table object, which is typically declared in the traditional way. The Table usually shares the MetaData object used by the declarative base:

      keywords = Table(
          'keywords', Base.metadata,
          Column('author_id', Integer, ForeignKey('authors.id')),
          Column('keyword_id', Integer, ForeignKey('keywords.id'))
          )
      
      class Author(Base):
          __tablename__ = 'authors'
          id = Column(Integer, primary_key=True)
          keywords = relationship("Keyword", secondary=keywords)

      Like other relationship() arguments, a string is accepted as well, passing the string name of the table as defined in the Base.metadata.tables collection:

      class Author(Base):
          __tablename__ = 'authors'
          id = Column(Integer, primary_key=True)
          keywords = relationship("Keyword", secondary="keywords")

      As with traditional mapping, its generally not a good idea to use a Table as the “secondary” argument which is also mapped to a class, unless the relationship() is declared with viewonly=True. Otherwise, the unit-of-work system may attempt duplicate INSERT and DELETE statements against the underlying table.

      Defining SQL Expressions

      See SQL Expressions as Mapped Attributes for examples on declaratively mapping attributes to SQL expressions.

      Table Configuration

      Table arguments other than the name, metadata, and mapped Column arguments are specified using the __table_args__ class attribute. This attribute accommodates both positional as well as keyword arguments that are normally sent to the Table constructor. The attribute can be specified in one of two forms. One is as a dictionary:

      class MyClass(Base):
          __tablename__ = 'sometable'
          __table_args__ = {'mysql_engine':'InnoDB'}

      The other, a tuple, where each argument is positional (usually constraints):

      class MyClass(Base):
          __tablename__ = 'sometable'
          __table_args__ = (
                  ForeignKeyConstraint(['id'], ['remote_table.id']),
                  UniqueConstraint('foo'),
                  )

      Keyword arguments can be specified with the above form by specifying the last argument as a dictionary:

      class MyClass(Base):
          __tablename__ = 'sometable'
          __table_args__ = (
                  ForeignKeyConstraint(['id'], ['remote_table.id']),
                  UniqueConstraint('foo'),
                  {'autoload':True}
                  )

      Using a Hybrid Approach with __table__

      As an alternative to __tablename__, a direct Table construct may be used. The Column objects, which in this case require their names, will be added to the mapping just like a regular mapping to a table:

      class MyClass(Base):
          __table__ = Table('my_table', Base.metadata,
              Column('id', Integer, primary_key=True),
              Column('name', String(50))
          )

      __table__ provides a more focused point of control for establishing table metadata, while still getting most of the benefits of using declarative. An application that uses reflection might want to load table metadata elsewhere and pass it to declarative classes:

      from sqlalchemy.ext.declarative import declarative_base
      
      Base = declarative_base()
      Base.metadata.reflect(some_engine)
      
      class User(Base):
          __table__ = metadata.tables['user']
      
      class Address(Base):
          __table__ = metadata.tables['address']

      Some configuration schemes may find it more appropriate to use __table__, such as those which already take advantage of the data-driven nature of Table to customize and/or automate schema definition.

      Note that when the __table__ approach is used, the object is immediately usable as a plain Table within the class declaration body itself, as a Python class is only another syntactical block. Below this is illustrated by using the id column in the primaryjoin condition of a relationship():

      class MyClass(Base):
          __table__ = Table('my_table', Base.metadata,
              Column('id', Integer, primary_key=True),
              Column('name', String(50))
          )
      
          widgets = relationship(Widget,
                      primaryjoin=Widget.myclass_id==__table__.c.id)

      Similarly, mapped attributes which refer to __table__ can be placed inline, as below where we assign the name column to the attribute _name, generating a synonym for name:

      from sqlalchemy.ext.declarative import synonym_for
      
      class MyClass(Base):
          __table__ = Table('my_table', Base.metadata,
              Column('id', Integer, primary_key=True),
              Column('name', String(50))
          )
      
          _name = __table__.c.name
      
          @synonym_for("_name")
          def name(self):
              return "Name: %s" % _name

      Using Reflection with Declarative

      It’s easy to set up a Table that uses autoload=True in conjunction with a mapped class:

      class MyClass(Base):
          __table__ = Table('mytable', Base.metadata,
                          autoload=True, autoload_with=some_engine)

      However, one improvement that can be made here is to not require the Engine to be available when classes are being first declared. To achieve this, use the DeferredReflection mixin, which sets up mappings only after a special prepare(engine) step is called:

      from sqlalchemy.ext.declarative import declarative_base, DeferredReflection
      
      Base = declarative_base(cls=DeferredReflection)
      
      class Foo(Base):
          __tablename__ = 'foo'
          bars = relationship("Bar")
      
      class Bar(Base):
          __tablename__ = 'bar'
      
          # illustrate overriding of "bar.foo_id" to have
          # a foreign key constraint otherwise not
          # reflected, such as when using MySQL
          foo_id = Column(Integer, ForeignKey('foo.id'))
      
      Base.prepare(e)

      New in version 0.8: Added DeferredReflection.

      Mapper Configuration

      Declarative makes use of the mapper() function internally when it creates the mapping to the declared table. The options for mapper() are passed directly through via the __mapper_args__ class attribute. As always, arguments which reference locally mapped columns can reference them directly from within the class declaration:

      from datetime import datetime
      
      class Widget(Base):
          __tablename__ = 'widgets'
      
          id = Column(Integer, primary_key=True)
          timestamp = Column(DateTime, nullable=False)
      
          __mapper_args__ = {
                          'version_id_col': timestamp,
                          'version_id_generator': lambda v:datetime.now()
                      }

      Inheritance Configuration

      Declarative supports all three forms of inheritance as intuitively as possible. The inherits mapper keyword argument is not needed as declarative will determine this from the class itself. The various “polymorphic” keyword arguments are specified using __mapper_args__.

      Joined Table Inheritance

      Joined table inheritance is defined as a subclass that defines its own table:

      class Person(Base):
          __tablename__ = 'people'
          id = Column(Integer, primary_key=True)
          discriminator = Column('type', String(50))
          __mapper_args__ = {'polymorphic_on': discriminator}
      
      class Engineer(Person):
          __tablename__ = 'engineers'
          __mapper_args__ = {'polymorphic_identity': 'engineer'}
          id = Column(Integer, ForeignKey('people.id'), primary_key=True)
          primary_language = Column(String(50))

      Note that above, the Engineer.id attribute, since it shares the same attribute name as the Person.id attribute, will in fact represent the people.id and engineers.id columns together, with the “Engineer.id” column taking precedence if queried directly. To provide the Engineer class with an attribute that represents only the engineers.id column, give it a different attribute name:

      class Engineer(Person):
          __tablename__ = 'engineers'
          __mapper_args__ = {'polymorphic_identity': 'engineer'}
          engineer_id = Column('id', Integer, ForeignKey('people.id'),
                                                      primary_key=True)
          primary_language = Column(String(50))

      Changed in version 0.7: joined table inheritance favors the subclass column over that of the superclass, such as querying above for Engineer.id. Prior to 0.7 this was the reverse.

      Single Table Inheritance

      Single table inheritance is defined as a subclass that does not have its own table; you just leave out the __table__ and __tablename__ attributes:

      class Person(Base):
          __tablename__ = 'people'
          id = Column(Integer, primary_key=True)
          discriminator = Column('type', String(50))
          __mapper_args__ = {'polymorphic_on': discriminator}
      
      class Engineer(Person):
          __mapper_args__ = {'polymorphic_identity': 'engineer'}
          primary_language = Column(String(50))

      When the above mappers are configured, the Person class is mapped to the people table before the primary_language column is defined, and this column will not be included in its own mapping. When Engineer then defines the primary_language column, the column is added to the people table so that it is included in the mapping for Engineer and is also part of the table’s full set of columns. Columns which are not mapped to Person are also excluded from any other single or joined inheriting classes using the exclude_properties mapper argument. Below, Manager will have all the attributes of Person and Manager but not the primary_language attribute of Engineer:

      class Manager(Person):
          __mapper_args__ = {'polymorphic_identity': 'manager'}
          golf_swing = Column(String(50))

      The attribute exclusion logic is provided by the exclude_properties mapper argument, and declarative’s default behavior can be disabled by passing an explicit exclude_properties collection (empty or otherwise) to the __mapper_args__.

      Resolving Column Conflicts

      Note above that the primary_language and golf_swing columns are “moved up” to be applied to Person.__table__, as a result of their declaration on a subclass that has no table of its own. A tricky case comes up when two subclasses want to specify the same column, as below:

      class Person(Base):
          __tablename__ = 'people'
          id = Column(Integer, primary_key=True)
          discriminator = Column('type', String(50))
          __mapper_args__ = {'polymorphic_on': discriminator}
      
      class Engineer(Person):
          __mapper_args__ = {'polymorphic_identity': 'engineer'}
          start_date = Column(DateTime)
      
      class Manager(Person):
          __mapper_args__ = {'polymorphic_identity': 'manager'}
          start_date = Column(DateTime)

      Above, the start_date column declared on both Engineer and Manager will result in an error:

      sqlalchemy.exc.ArgumentError: Column 'start_date' on class
      <class '__main__.Manager'> conflicts with existing
      column 'people.start_date'

      In a situation like this, Declarative can’t be sure of the intent, especially if the start_date columns had, for example, different types. A situation like this can be resolved by using declared_attr to define the Column conditionally, taking care to return the existing column via the parent __table__ if it already exists:

      from sqlalchemy.ext.declarative import declared_attr
      
      class Person(Base):
          __tablename__ = 'people'
          id = Column(Integer, primary_key=True)
          discriminator = Column('type', String(50))
          __mapper_args__ = {'polymorphic_on': discriminator}
      
      class Engineer(Person):
          __mapper_args__ = {'polymorphic_identity': 'engineer'}
      
          @declared_attr
          def start_date(cls):
              "Start date column, if not present already."
              return Person.__table__.c.get('start_date', Column(DateTime))
      
      class Manager(Person):
          __mapper_args__ = {'polymorphic_identity': 'manager'}
      
          @declared_attr
          def start_date(cls):
              "Start date column, if not present already."
              return Person.__table__.c.get('start_date', Column(DateTime))

      Above, when Manager is mapped, the start_date column is already present on the Person class. Declarative lets us return that Column as a result in this case, where it knows to skip re-assigning the same column. If the mapping is mis-configured such that the start_date column is accidentally re-assigned to a different table (such as, if we changed Manager to be joined inheritance without fixing start_date), an error is raised which indicates an existing Column is trying to be re-assigned to a different owning Table.

      New in version 0.8: declared_attr can be used on a non-mixin class, and the returned Column or other mapped attribute will be applied to the mapping as any other attribute. Previously, the resulting attribute would be ignored, and also result in a warning being emitted when a subclass was created.

      New in version 0.8: declared_attr, when used either with a mixin or non-mixin declarative class, can return an existing Column already assigned to the parent Table, to indicate that the re-assignment of the Column should be skipped, however should still be mapped on the target class, in order to resolve duplicate column conflicts.

      The same concept can be used with mixin classes (see Mixin and Custom Base Classes):

      class Person(Base):
          __tablename__ = 'people'
          id = Column(Integer, primary_key=True)
          discriminator = Column('type', String(50))
          __mapper_args__ = {'polymorphic_on': discriminator}
      
      class HasStartDate(object):
          @declared_attr
          def start_date(cls):
              return cls.__table__.c.get('start_date', Column(DateTime))
      
      class Engineer(HasStartDate, Person):
          __mapper_args__ = {'polymorphic_identity': 'engineer'}
      
      class Manager(HasStartDate, Person):
          __mapper_args__ = {'polymorphic_identity': 'manager'}

      The above mixin checks the local __table__ attribute for the column. Because we’re using single table inheritance, we’re sure that in this case, cls.__table__ refers to People.__table__. If we were mixing joined- and single-table inheritance, we might want our mixin to check more carefully if cls.__table__ is really the Table we’re looking for.

      Concrete Table Inheritance

      Concrete is defined as a subclass which has its own table and sets the concrete keyword argument to True:

      class Person(Base):
          __tablename__ = 'people'
          id = Column(Integer, primary_key=True)
          name = Column(String(50))
      
      class Engineer(Person):
          __tablename__ = 'engineers'
          __mapper_args__ = {'concrete':True}
          id = Column(Integer, primary_key=True)
          primary_language = Column(String(50))
          name = Column(String(50))

      Usage of an abstract base class is a little less straightforward as it requires usage of polymorphic_union(), which needs to be created with the Table objects before the class is built:

      engineers = Table('engineers', Base.metadata,
                      Column('id', Integer, primary_key=True),
                      Column('name', String(50)),
                      Column('primary_language', String(50))
                  )
      managers = Table('managers', Base.metadata,
                      Column('id', Integer, primary_key=True),
                      Column('name', String(50)),
                      Column('golf_swing', String(50))
                  )
      
      punion = polymorphic_union({
          'engineer':engineers,
          'manager':managers
      }, 'type', 'punion')
      
      class Person(Base):
          __table__ = punion
          __mapper_args__ = {'polymorphic_on':punion.c.type}
      
      class Engineer(Person):
          __table__ = engineers
          __mapper_args__ = {'polymorphic_identity':'engineer', 'concrete':True}
      
      class Manager(Person):
          __table__ = managers
          __mapper_args__ = {'polymorphic_identity':'manager', 'concrete':True}

      Using the Concrete Helpers

      Helper classes provides a simpler pattern for concrete inheritance. With these objects, the __declare_last__ helper is used to configure the “polymorphic” loader for the mapper after all subclasses have been declared.

      New in version 0.7.3.

      An abstract base can be declared using the AbstractConcreteBase class:

      from sqlalchemy.ext.declarative import AbstractConcreteBase
      
      class Employee(AbstractConcreteBase, Base):
          pass

      To have a concrete employee table, use ConcreteBase instead:

      from sqlalchemy.ext.declarative import ConcreteBase
      
      class Employee(ConcreteBase, Base):
          __tablename__ = 'employee'
          employee_id = Column(Integer, primary_key=True)
          name = Column(String(50))
          __mapper_args__ = {
                          'polymorphic_identity':'employee',
                          'concrete':True}

      Either Employee base can be used in the normal fashion:

      class Manager(Employee):
          __tablename__ = 'manager'
          employee_id = Column(Integer, primary_key=True)
          name = Column(String(50))
          manager_data = Column(String(40))
          __mapper_args__ = {
                          'polymorphic_identity':'manager',
                          'concrete':True}
      
      class Engineer(Employee):
          __tablename__ = 'engineer'
          employee_id = Column(Integer, primary_key=True)
          name = Column(String(50))
          engineer_info = Column(String(40))
          __mapper_args__ = {'polymorphic_identity':'engineer',
                          'concrete':True}

      Mixin and Custom Base Classes

      A common need when using declarative is to share some functionality, such as a set of common columns, some common table options, or other mapped properties, across many classes. The standard Python idioms for this is to have the classes inherit from a base which includes these common features.

      When using declarative, this idiom is allowed via the usage of a custom declarative base class, as well as a “mixin” class which is inherited from in addition to the primary base. Declarative includes several helper features to make this work in terms of how mappings are declared. An example of some commonly mixed-in idioms is below:

      from sqlalchemy.ext.declarative import declared_attr
      
      class MyMixin(object):
      
          @declared_attr
          def __tablename__(cls):
              return cls.__name__.lower()
      
          __table_args__ = {'mysql_engine': 'InnoDB'}
          __mapper_args__= {'always_refresh': True}
      
          id =  Column(Integer, primary_key=True)
      
      class MyModel(MyMixin, Base):
          name = Column(String(1000))

      Where above, the class MyModel will contain an “id” column as the primary key, a __tablename__ attribute that derives from the name of the class itself, as well as __table_args__ and __mapper_args__ defined by the MyMixin mixin class.

      There’s no fixed convention over whether MyMixin precedes Base or not. Normal Python method resolution rules apply, and the above example would work just as well with:

      class MyModel(Base, MyMixin):
          name = Column(String(1000))

      This works because Base here doesn’t define any of the variables that MyMixin defines, i.e. __tablename__, __table_args__, id, etc. If the Base did define an attribute of the same name, the class placed first in the inherits list would determine which attribute is used on the newly defined class.

      Augmenting the Base

      In addition to using a pure mixin, most of the techniques in this section can also be applied to the base class itself, for patterns that should apply to all classes derived from a particular base. This is achieved using the cls argument of the declarative_base() function:

      from sqlalchemy.ext.declarative import declared_attr
      
      class Base(object):
          @declared_attr
          def __tablename__(cls):
              return cls.__name__.lower()
      
          __table_args__ = {'mysql_engine': 'InnoDB'}
      
          id =  Column(Integer, primary_key=True)
      
      from sqlalchemy.ext.declarative import declarative_base
      
      Base = declarative_base(cls=Base)
      
      class MyModel(Base):
          name = Column(String(1000))

      Where above, MyModel and all other classes that derive from Base will have a table name derived from the class name, an id primary key column, as well as the “InnoDB” engine for MySQL.

      Mixing in Columns

      The most basic way to specify a column on a mixin is by simple declaration:

      class TimestampMixin(object):
          created_at = Column(DateTime, default=func.now())
      
      class MyModel(TimestampMixin, Base):
          __tablename__ = 'test'
      
          id =  Column(Integer, primary_key=True)
          name = Column(String(1000))

      Where above, all declarative classes that include TimestampMixin will also have a column created_at that applies a timestamp to all row insertions.

      Those familiar with the SQLAlchemy expression language know that the object identity of clause elements defines their role in a schema. Two Table objects a and b may both have a column called id, but the way these are differentiated is that a.c.id and b.c.id are two distinct Python objects, referencing their parent tables a and b respectively.

      In the case of the mixin column, it seems that only one Column object is explicitly created, yet the ultimate created_at column above must exist as a distinct Python object for each separate destination class. To accomplish this, the declarative extension creates a copy of each Column object encountered on a class that is detected as a mixin.

      This copy mechanism is limited to simple columns that have no foreign keys, as a ForeignKey itself contains references to columns which can’t be properly recreated at this level. For columns that have foreign keys, as well as for the variety of mapper-level constructs that require destination-explicit context, the declared_attr decorator is provided so that patterns common to many classes can be defined as callables:

      from sqlalchemy.ext.declarative import declared_attr
      
      class ReferenceAddressMixin(object):
          @declared_attr
          def address_id(cls):
              return Column(Integer, ForeignKey('address.id'))
      
      class User(ReferenceAddressMixin, Base):
          __tablename__ = 'user'
          id = Column(Integer, primary_key=True)

      Where above, the address_id class-level callable is executed at the point at which the User class is constructed, and the declarative extension can use the resulting Column object as returned by the method without the need to copy it.

      Changed in version >: 0.6.5 Rename 0.6.5 sqlalchemy.util.classproperty into declared_attr.

      Columns generated by declared_attr can also be referenced by __mapper_args__ to a limited degree, currently by polymorphic_on and version_id_col, by specifying the classdecorator itself into the dictionary - the declarative extension will resolve them at class construction time:

      class MyMixin:
          @declared_attr
          def type_(cls):
              return Column(String(50))
      
          __mapper_args__= {'polymorphic_on':type_}
      
      class MyModel(MyMixin, Base):
          __tablename__='test'
          id =  Column(Integer, primary_key=True)

      Mixing in Relationships

      Relationships created by relationship() are provided with declarative mixin classes exclusively using the declared_attr approach, eliminating any ambiguity which could arise when copying a relationship and its possibly column-bound contents. Below is an example which combines a foreign key column and a relationship so that two classes Foo and Bar can both be configured to reference a common target class via many-to-one:

      class RefTargetMixin(object):
          @declared_attr
          def target_id(cls):
              return Column('target_id', ForeignKey('target.id'))
      
          @declared_attr
          def target(cls):
              return relationship("Target")
      
      class Foo(RefTargetMixin, Base):
          __tablename__ = 'foo'
          id = Column(Integer, primary_key=True)
      
      class Bar(RefTargetMixin, Base):
          __tablename__ = 'bar'
          id = Column(Integer, primary_key=True)
      
      class Target(Base):
          __tablename__ = 'target'
          id = Column(Integer, primary_key=True)

      relationship() definitions which require explicit primaryjoin, order_by etc. expressions should use the string forms for these arguments, so that they are evaluated as late as possible. To reference the mixin class in these expressions, use the given cls to get its name:

      class RefTargetMixin(object):
          @declared_attr
          def target_id(cls):
              return Column('target_id', ForeignKey('target.id'))
      
          @declared_attr
          def target(cls):
              return relationship("Target",
                  primaryjoin="Target.id==%s.target_id" % cls.__name__
              )

      Mixing in deferred(), column_property(), and other MapperProperty classes

      Like relationship(), all MapperProperty subclasses such as deferred(), column_property(), etc. ultimately involve references to columns, and therefore, when used with declarative mixins, have the declared_attr requirement so that no reliance on copying is needed:

      class SomethingMixin(object):
      
          @declared_attr
          def dprop(cls):
              return deferred(Column(Integer))
      
      class Something(SomethingMixin, Base):
          __tablename__ = "something"

      Mixing in Association Proxy and Other Attributes

      Mixins can specify user-defined attributes as well as other extension units such as association_proxy(). The usage of declared_attr is required in those cases where the attribute must be tailored specifically to the target subclass. An example is when constructing multiple association_proxy() attributes which each target a different type of child object. Below is an association_proxy() / mixin example which provides a scalar list of string values to an implementing class:

      from sqlalchemy import Column, Integer, ForeignKey, String
      from sqlalchemy.orm import relationship
      from sqlalchemy.ext.associationproxy import association_proxy
      from sqlalchemy.ext.declarative import declarative_base, declared_attr
      
      Base = declarative_base()
      
      class HasStringCollection(object):
          @declared_attr
          def _strings(cls):
              class StringAttribute(Base):
                  __tablename__ = cls.string_table_name
                  id = Column(Integer, primary_key=True)
                  value = Column(String(50), nullable=False)
                  parent_id = Column(Integer,
                                  ForeignKey('%s.id' % cls.__tablename__),
                                  nullable=False)
                  def __init__(self, value):
                      self.value = value
      
              return relationship(StringAttribute)
      
          @declared_attr
          def strings(cls):
              return association_proxy('_strings', 'value')
      
      class TypeA(HasStringCollection, Base):
          __tablename__ = 'type_a'
          string_table_name = 'type_a_strings'
          id = Column(Integer(), primary_key=True)
      
      class TypeB(HasStringCollection, Base):
          __tablename__ = 'type_b'
          string_table_name = 'type_b_strings'
          id = Column(Integer(), primary_key=True)

      Above, the HasStringCollection mixin produces a relationship() which refers to a newly generated class called StringAttribute. The StringAttribute class is generated with it’s own Table definition which is local to the parent class making usage of the HasStringCollection mixin. It also produces an association_proxy() object which proxies references to the strings attribute onto the value attribute of each StringAttribute instance.

      TypeA or TypeB can be instantiated given the constructor argument strings, a list of strings:

      ta = TypeA(strings=['foo', 'bar'])
      tb = TypeA(strings=['bat', 'bar'])

      This list will generate a collection of StringAttribute objects, which are persisted into a table that’s local to either the type_a_strings or type_b_strings table:

      >>> print ta._strings
      [<__main__.StringAttribute object at 0x10151cd90>,
          <__main__.StringAttribute object at 0x10151ce10>]

      When constructing the association_proxy(), the declared_attr decorator must be used so that a distinct association_proxy() object is created for each of the TypeA and TypeB classes.

      New in version 0.8: declared_attr is usable with non-mapped attributes, including user-defined attributes as well as association_proxy().

      Controlling table inheritance with mixins

      The __tablename__ attribute in conjunction with the hierarchy of classes involved in a declarative mixin scenario controls what type of table inheritance, if any, is configured by the declarative extension.

      If the __tablename__ is computed by a mixin, you may need to control which classes get the computed attribute in order to get the type of table inheritance you require.

      For example, if you had a mixin that computes __tablename__ but where you wanted to use that mixin in a single table inheritance hierarchy, you can explicitly specify __tablename__ as None to indicate that the class should not have a table mapped:

      from sqlalchemy.ext.declarative import declared_attr
      
      class Tablename:
          @declared_attr
          def __tablename__(cls):
              return cls.__name__.lower()
      
      class Person(Tablename, Base):
          id = Column(Integer, primary_key=True)
          discriminator = Column('type', String(50))
          __mapper_args__ = {'polymorphic_on': discriminator}
      
      class Engineer(Person):
          __tablename__ = None
          __mapper_args__ = {'polymorphic_identity': 'engineer'}
          primary_language = Column(String(50))

      Alternatively, you can make the mixin intelligent enough to only return a __tablename__ in the event that no table is already mapped in the inheritance hierarchy. To help with this, a has_inherited_table() helper function is provided that returns True if a parent class already has a mapped table.

      As an example, here’s a mixin that will only allow single table inheritance:

      from sqlalchemy.ext.declarative import declared_attr
      from sqlalchemy.ext.declarative import has_inherited_table
      
      class Tablename(object):
          @declared_attr
          def __tablename__(cls):
              if has_inherited_table(cls):
                  return None
              return cls.__name__.lower()
      
      class Person(Tablename, Base):
          id = Column(Integer, primary_key=True)
          discriminator = Column('type', String(50))
          __mapper_args__ = {'polymorphic_on': discriminator}
      
      class Engineer(Person):
          primary_language = Column(String(50))
          __mapper_args__ = {'polymorphic_identity': 'engineer'}

      If you want to use a similar pattern with a mix of single and joined table inheritance, you would need a slightly different mixin and use it on any joined table child classes in addition to their parent classes:

      from sqlalchemy.ext.declarative import declared_attr
      from sqlalchemy.ext.declarative import has_inherited_table
      
      class Tablename(object):
          @declared_attr
          def __tablename__(cls):
              if (has_inherited_table(cls) and
                  Tablename not in cls.__bases__):
                  return None
              return cls.__name__.lower()
      
      class Person(Tablename, Base):
          id = Column(Integer, primary_key=True)
          discriminator = Column('type', String(50))
          __mapper_args__ = {'polymorphic_on': discriminator}
      
      # This is single table inheritance
      class Engineer(Person):
          primary_language = Column(String(50))
          __mapper_args__ = {'polymorphic_identity': 'engineer'}
      
      # This is joined table inheritance
      class Manager(Tablename, Person):
          id = Column(Integer, ForeignKey('person.id'), primary_key=True)
          preferred_recreation = Column(String(50))
          __mapper_args__ = {'polymorphic_identity': 'engineer'}

      Combining Table/Mapper Arguments from Multiple Mixins

      In the case of __table_args__ or __mapper_args__ specified with declarative mixins, you may want to combine some parameters from several mixins with those you wish to define on the class iteself. The declared_attr decorator can be used here to create user-defined collation routines that pull from multiple collections:

      from sqlalchemy.ext.declarative import declared_attr
      
      class MySQLSettings(object):
          __table_args__ = {'mysql_engine':'InnoDB'}
      
      class MyOtherMixin(object):
          __table_args__ = {'info':'foo'}
      
      class MyModel(MySQLSettings, MyOtherMixin, Base):
          __tablename__='my_model'
      
          @declared_attr
          def __table_args__(cls):
              args = dict()
              args.update(MySQLSettings.__table_args__)
              args.update(MyOtherMixin.__table_args__)
              return args
      
          id =  Column(Integer, primary_key=True)

      Creating Indexes with Mixins

      To define a named, potentially multicolumn Index that applies to all tables derived from a mixin, use the “inline” form of Index and establish it as part of __table_args__:

      class MyMixin(object):
          a =  Column(Integer)
          b =  Column(Integer)
      
          @declared_attr
          def __table_args__(cls):
              return (Index('test_idx_%s' % cls.__tablename__, 'a', 'b'),)
      
      class MyModel(MyMixin, Base):
          __tablename__ = 'atable'
          c =  Column(Integer,primary_key=True)

      Special Directives

      __declare_last__()

      The __declare_last__() hook allows definition of a class level function that is automatically called by the MapperEvents.after_configured() event, which occurs after mappings are assumed to be completed and the ‘configure’ step has finished:

      class MyClass(Base):
          @classmethod
          def __declare_last__(cls):
              ""
              # do something with mappings

      New in version 0.7.3.

      __abstract__

      __abstract__ causes declarative to skip the production of a table or mapper for the class entirely. A class can be added within a hierarchy in the same way as mixin (see Mixin and Custom Base Classes), allowing subclasses to extend just from the special class:

      class SomeAbstractBase(Base):
          __abstract__ = True
      
          def some_helpful_method(self):
              ""
      
          @declared_attr
          def __mapper_args__(cls):
              return {"helpful mapper arguments":True}
      
      class MyMappedClass(SomeAbstractBase):
          ""

      One possible use of __abstract__ is to use a distinct MetaData for different bases:

      Base = declarative_base()
      
      class DefaultBase(Base):
          __abstract__ = True
          metadata = MetaData()
      
      class OtherBase(Base):
          __abstract__ = True
          metadata = MetaData()

      Above, classes which inherit from DefaultBase will use one MetaData as the registry of tables, and those which inherit from OtherBase will use a different one. The tables themselves can then be created perhaps within distinct databases:

      DefaultBase.metadata.create_all(some_engine)
      OtherBase.metadata_create_all(some_other_engine)

      New in version 0.7.3.

      Class Constructor

      As a convenience feature, the declarative_base() sets a default constructor on classes which takes keyword arguments, and assigns them to the named attributes:

      e = Engineer(primary_language='python')

      Sessions

      Note that declarative does nothing special with sessions, and is only intended as an easier way to configure mappers and Table objects. A typical application setup using scoped_session might look like:

      engine = create_engine('postgresql://scott:tiger@localhost/test')
      Session = scoped_session(sessionmaker(autocommit=False,
                                            autoflush=False,
                                            bind=engine))
      Base = declarative_base()

      Mapped instances then make usage of Session in the usual way.

      API Reference

      sqlalchemy.ext.declarative.declarative_base(bind=None, metadata=None, mapper=None, cls=<type 'object'>, name='Base', constructor=<function __init__ at 0x10d34b8c0>, class_registry=None, metaclass=<class 'sqlalchemy.ext.declarative.api.DeclarativeMeta'>)

      Construct a base class for declarative class definitions.

      The new base class will be given a metaclass that produces appropriate Table objects and makes the appropriate mapper() calls based on the information provided declaratively in the class and any subclasses of the class.

      Parameters:
      • bind – An optional Connectable, will be assigned the bind attribute on the MetaData instance.
      • metadata – An optional MetaData instance. All Table objects implicitly declared by subclasses of the base will share this MetaData. A MetaData instance will be created if none is provided. The MetaData instance will be available via the metadata attribute of the generated declarative base class.
      • mapper – An optional callable, defaults to mapper(). Will be used to map subclasses to their Tables.
      • cls – Defaults to object. A type to use as the base for the generated declarative base class. May be a class or tuple of classes.
      • name – Defaults to Base. The display name for the generated class. Customizing this is not required, but can improve clarity in tracebacks and debugging.
      • constructor – Defaults to _declarative_constructor(), an __init__ implementation that assigns **kwargs for declared fields and relationships to an instance. If None is supplied, no __init__ will be provided and construction will fall back to cls.__init__ by way of the normal Python semantics.
      • class_registry – optional dictionary that will serve as the registry of class names-> mapped classes when string names are used to identify classes inside of relationship() and others. Allows two or more declarative base classes to share the same registry of class names for simplified inter-base relationships.
      • metaclass – Defaults to DeclarativeMeta. A metaclass or __metaclass__ compatible callable to use as the meta type of the generated declarative base class.

      See also

      as_declarative()

      sqlalchemy.ext.declarative.as_declarative(**kw)

      Class decorator for declarative_base().

      Provides a syntactical shortcut to the cls argument sent to declarative_base(), allowing the base class to be converted in-place to a “declarative” base:

      from sqlalchemy.ext.declarative import as_declarative
      
      @as_declarative()
      class Base(object):
          @declared_attr
          def __tablename__(cls):
              return cls.__name__.lower()
          id = Column(Integer, primary_key=True)
      
      class MyMappedClass(Base):
          # ...

      All keyword arguments passed to as_declarative() are passed along to declarative_base().

      New in version 0.8.3.

      class sqlalchemy.ext.declarative.declared_attr(fget, *arg, **kw)

      Bases: sqlalchemy.orm.interfaces._MappedAttribute, __builtin__.property

      Mark a class-level method as representing the definition of a mapped property or special declarative member name.

      @declared_attr turns the attribute into a scalar-like property that can be invoked from the uninstantiated class. Declarative treats attributes specifically marked with @declared_attr as returning a construct that is specific to mapping or declarative table configuration. The name of the attribute is that of what the non-dynamic version of the attribute would be.

      @declared_attr is more often than not applicable to mixins, to define relationships that are to be applied to different implementors of the class:

      class ProvidesUser(object):
          "A mixin that adds a 'user' relationship to classes."
      
          @declared_attr
          def user(self):
              return relationship("User")

      It also can be applied to mapped classes, such as to provide a “polymorphic” scheme for inheritance:

      class Employee(Base):
          id = Column(Integer, primary_key=True)
          type = Column(String(50), nullable=False)
      
          @declared_attr
          def __tablename__(cls):
              return cls.__name__.lower()
      
          @declared_attr
          def __mapper_args__(cls):
              if cls.__name__ == 'Employee':
                  return {
                          "polymorphic_on":cls.type,
                          "polymorphic_identity":"Employee"
                  }
              else:
                  return {"polymorphic_identity":cls.__name__}

      Changed in version 0.8: declared_attr can be used with non-ORM or extension attributes, such as user-defined attributes or association_proxy() objects, which will be assigned to the class at class construction time.

      sqlalchemy.ext.declarative.api._declarative_constructor(self, **kwargs)

      A simple constructor that allows initialization from kwargs.

      Sets attributes on the constructed instance using the names and values in kwargs.

      Only keys that are present as attributes of the instance’s class are allowed. These could be, for example, any mapped columns or relationships.

      sqlalchemy.ext.declarative.has_inherited_table(cls)

      Given a class, return True if any of the classes it inherits from has a mapped table, otherwise return False.

      sqlalchemy.ext.declarative.synonym_for(name, map_column=False)

      Decorator, make a Python @property a query synonym for a column.

      A decorator version of synonym(). The function being decorated is the ‘descriptor’, otherwise passes its arguments through to synonym():

      @synonym_for('col')
      @property
      def prop(self):
          return 'special sauce'

      The regular synonym() is also usable directly in a declarative setting and may be convenient for read/write properties:

      prop = synonym('col', descriptor=property(_read_prop, _write_prop))
      sqlalchemy.ext.declarative.comparable_using(comparator_factory)

      Decorator, allow a Python @property to be used in query criteria.

      This is a decorator front end to comparable_property() that passes through the comparator_factory and the function being decorated:

      @comparable_using(MyComparatorType)
      @property
      def prop(self):
          return 'special sauce'

      The regular comparable_property() is also usable directly in a declarative setting and may be convenient for read/write properties:

      prop = comparable_property(MyComparatorType)
      sqlalchemy.ext.declarative.instrument_declarative(cls, registry, metadata)

      Given a class, configure the class declaratively, using the given registry, which can be any dictionary, and MetaData object.

      class sqlalchemy.ext.declarative.AbstractConcreteBase

      Bases: sqlalchemy.ext.declarative.api.ConcreteBase

      A helper class for ‘concrete’ declarative mappings.

      AbstractConcreteBase will use the polymorphic_union() function automatically, against all tables mapped as a subclass to this class. The function is called via the __declare_last__() function, which is essentially a hook for the MapperEvents.after_configured() event.

      AbstractConcreteBase does not produce a mapped table for the class itself. Compare to ConcreteBase, which does.

      Example:

      from sqlalchemy.ext.declarative import AbstractConcreteBase
      
      class Employee(AbstractConcreteBase, Base):
          pass
      
      class Manager(Employee):
          __tablename__ = 'manager'
          employee_id = Column(Integer, primary_key=True)
          name = Column(String(50))
          manager_data = Column(String(40))
          __mapper_args__ = {
                          'polymorphic_identity':'manager',
                          'concrete':True}
      class sqlalchemy.ext.declarative.ConcreteBase

      A helper class for ‘concrete’ declarative mappings.

      ConcreteBase will use the polymorphic_union() function automatically, against all tables mapped as a subclass to this class. The function is called via the __declare_last__() function, which is essentially a hook for the MapperEvents.after_configured() event.

      ConcreteBase produces a mapped table for the class itself. Compare to AbstractConcreteBase, which does not.

      Example:

      from sqlalchemy.ext.declarative import ConcreteBase
      
      class Employee(ConcreteBase, Base):
          __tablename__ = 'employee'
          employee_id = Column(Integer, primary_key=True)
          name = Column(String(50))
          __mapper_args__ = {
                          'polymorphic_identity':'employee',
                          'concrete':True}
      
      class Manager(Employee):
          __tablename__ = 'manager'
          employee_id = Column(Integer, primary_key=True)
          name = Column(String(50))
          manager_data = Column(String(40))
          __mapper_args__ = {
                          'polymorphic_identity':'manager',
                          'concrete':True}
      class sqlalchemy.ext.declarative.DeferredReflection

      A helper class for construction of mappings based on a deferred reflection step.

      Normally, declarative can be used with reflection by setting a Table object using autoload=True as the __table__ attribute on a declarative class. The caveat is that the Table must be fully reflected, or at the very least have a primary key column, at the point at which a normal declarative mapping is constructed, meaning the Engine must be available at class declaration time.

      The DeferredReflection mixin moves the construction of mappers to be at a later point, after a specific method is called which first reflects all Table objects created so far. Classes can define it as such:

      from sqlalchemy.ext.declarative import declarative_base
      from sqlalchemy.ext.declarative import DeferredReflection
      Base = declarative_base()
      
      class MyClass(DeferredReflection, Base):
          __tablename__ = 'mytable'

      Above, MyClass is not yet mapped. After a series of classes have been defined in the above fashion, all tables can be reflected and mappings created using DeferredReflection.prepare():

      engine = create_engine("someengine://...")
      DeferredReflection.prepare(engine)

      The DeferredReflection mixin can be applied to individual classes, used as the base for the declarative base itself, or used in a custom abstract class. Using an abstract base allows that only a subset of classes to be prepared for a particular prepare step, which is necessary for applications that use more than one engine. For example, if an application has two engines, you might use two bases, and prepare each separately, e.g.:

      class ReflectedOne(DeferredReflection, Base):
          __abstract__ = True
      
      class ReflectedTwo(DeferredReflection, Base):
          __abstract__ = True
      
      class MyClass(ReflectedOne):
          __tablename__ = 'mytable'
      
      class MyOtherClass(ReflectedOne):
          __tablename__ = 'myothertable'
      
      class YetAnotherClass(ReflectedTwo):
          __tablename__ = 'yetanothertable'
      
      # ... etc.

      Above, the class hierarchies for ReflectedOne and ReflectedTwo can be configured separately:

      ReflectedOne.prepare(engine_one)
      ReflectedTwo.prepare(engine_two)

      New in version 0.8.

      SQLAlchemy-0.8.4/doc/orm/extensions/horizontal_shard.html0000644000076500000240000002325412251147477024252 0ustar classicstaff00000000000000 Horizontal Sharding — SQLAlchemy 0.8 Documentation

      SQLAlchemy 0.8 Documentation

      Release: 0.8.4 | Release Date: December 8, 2013

      Horizontal Sharding

      Horizontal sharding support.

      Defines a rudimental ‘horizontal sharding’ system which allows a Session to distribute queries and persistence operations across multiple databases.

      For a usage example, see the Horizontal Sharding example included in the source distribution.

      API Documentation

      class sqlalchemy.ext.horizontal_shard.ShardedSession(shard_chooser, id_chooser, query_chooser, shards=None, query_cls=<class 'sqlalchemy.ext.horizontal_shard.ShardedQuery'>, **kwargs)

      Bases: sqlalchemy.orm.session.Session

      __init__(shard_chooser, id_chooser, query_chooser, shards=None, query_cls=<class 'sqlalchemy.ext.horizontal_shard.ShardedQuery'>, **kwargs)

      Construct a ShardedSession.

      Parameters:
      • shard_chooser – A callable which, passed a Mapper, a mapped instance, and possibly a SQL clause, returns a shard ID. This id may be based off of the attributes present within the object, or on some round-robin scheme. If the scheme is based on a selection, it should set whatever state on the instance to mark it in the future as participating in that shard.
      • id_chooser – A callable, passed a query and a tuple of identity values, which should return a list of shard ids where the ID might reside. The databases will be queried in the order of this listing.
      • query_chooser – For a given Query, returns the list of shard_ids where the query should be issued. Results from all shards returned will be combined together into a single listing.
      • shards – A dictionary of string shard names to Engine objects.
      class sqlalchemy.ext.horizontal_shard.ShardedQuery(*args, **kwargs)

      Bases: sqlalchemy.orm.query.Query

      set_shard(shard_id)

      return a new query, limited to a single shard ID.

      all subsequent operations with the returned query will be against the single shard regardless of other state.

      SQLAlchemy-0.8.4/doc/orm/extensions/hybrid.html0000644000076500000240000026014412251147477022162 0ustar classicstaff00000000000000 Hybrid Attributes — SQLAlchemy 0.8 Documentation

      SQLAlchemy 0.8 Documentation

      Release: 0.8.4 | Release Date: December 8, 2013

      Hybrid Attributes

      Define attributes on ORM-mapped classes that have “hybrid” behavior.

      “hybrid” means the attribute has distinct behaviors defined at the class level and at the instance level.

      The hybrid extension provides a special form of method decorator, is around 50 lines of code and has almost no dependencies on the rest of SQLAlchemy. It can, in theory, work with any descriptor-based expression system.

      Consider a mapping Interval, representing integer start and end values. We can define higher level functions on mapped classes that produce SQL expressions at the class level, and Python expression evaluation at the instance level. Below, each function decorated with hybrid_method or hybrid_property may receive self as an instance of the class, or as the class itself:

      from sqlalchemy import Column, Integer
      from sqlalchemy.ext.declarative import declarative_base
      from sqlalchemy.orm import Session, aliased
      from sqlalchemy.ext.hybrid import hybrid_property, hybrid_method
      
      Base = declarative_base()
      
      class Interval(Base):
          __tablename__ = 'interval'
      
          id = Column(Integer, primary_key=True)
          start = Column(Integer, nullable=False)
          end = Column(Integer, nullable=False)
      
          def __init__(self, start, end):
              self.start = start
              self.end = end
      
          @hybrid_property
          def length(self):
              return self.end - self.start
      
          @hybrid_method
          def contains(self,point):
              return (self.start <= point) & (point < self.end)
      
          @hybrid_method
          def intersects(self, other):
              return self.contains(other.start) | self.contains(other.end)

      Above, the length property returns the difference between the end and start attributes. With an instance of Interval, this subtraction occurs in Python, using normal Python descriptor mechanics:

      >>> i1 = Interval(5, 10)
      >>> i1.length
      5

      When dealing with the Interval class itself, the hybrid_property descriptor evaluates the function body given the Interval class as the argument, which when evaluated with SQLAlchemy expression mechanics returns a new SQL expression:

      >>> print Interval.length
      interval."end" - interval.start
      
      >>> print Session().query(Interval).filter(Interval.length > 10)
      SELECT interval.id AS interval_id, interval.start AS interval_start,
      interval."end" AS interval_end
      FROM interval
      WHERE interval."end" - interval.start > :param_1

      ORM methods such as filter_by() generally use getattr() to locate attributes, so can also be used with hybrid attributes:

      >>> print Session().query(Interval).filter_by(length=5)
      SELECT interval.id AS interval_id, interval.start AS interval_start,
      interval."end" AS interval_end
      FROM interval
      WHERE interval."end" - interval.start = :param_1

      The Interval class example also illustrates two methods, contains() and intersects(), decorated with hybrid_method. This decorator applies the same idea to methods that hybrid_property applies to attributes. The methods return boolean values, and take advantage of the Python | and & bitwise operators to produce equivalent instance-level and SQL expression-level boolean behavior:

      >>> i1.contains(6)
      True
      >>> i1.contains(15)
      False
      >>> i1.intersects(Interval(7, 18))
      True
      >>> i1.intersects(Interval(25, 29))
      False
      
      >>> print Session().query(Interval).filter(Interval.contains(15))
      SELECT interval.id AS interval_id, interval.start AS interval_start,
      interval."end" AS interval_end
      FROM interval
      WHERE interval.start <= :start_1 AND interval."end" > :end_1
      
      >>> ia = aliased(Interval)
      >>> print Session().query(Interval, ia).filter(Interval.intersects(ia))
      SELECT interval.id AS interval_id, interval.start AS interval_start,
      interval."end" AS interval_end, interval_1.id AS interval_1_id,
      interval_1.start AS interval_1_start, interval_1."end" AS interval_1_end
      FROM interval, interval AS interval_1
      WHERE interval.start <= interval_1.start
          AND interval."end" > interval_1.start
          OR interval.start <= interval_1."end"
          AND interval."end" > interval_1."end"

      Defining Expression Behavior Distinct from Attribute Behavior

      Our usage of the & and | bitwise operators above was fortunate, considering our functions operated on two boolean values to return a new one. In many cases, the construction of an in-Python function and a SQLAlchemy SQL expression have enough differences that two separate Python expressions should be defined. The hybrid decorators define the hybrid_property.expression() modifier for this purpose. As an example we’ll define the radius of the interval, which requires the usage of the absolute value function:

      from sqlalchemy import func
      
      class Interval(object):
          # ...
      
          @hybrid_property
          def radius(self):
              return abs(self.length) / 2
      
          @radius.expression
          def radius(cls):
              return func.abs(cls.length) / 2

      Above the Python function abs() is used for instance-level operations, the SQL function ABS() is used via the func object for class-level expressions:

      >>> i1.radius
      2
      
      >>> print Session().query(Interval).filter(Interval.radius > 5)
      SELECT interval.id AS interval_id, interval.start AS interval_start,
          interval."end" AS interval_end
      FROM interval
      WHERE abs(interval."end" - interval.start) / :abs_1 > :param_1

      Defining Setters

      Hybrid properties can also define setter methods. If we wanted length above, when set, to modify the endpoint value:

      class Interval(object):
          # ...
      
          @hybrid_property
          def length(self):
              return self.end - self.start
      
          @length.setter
          def length(self, value):
              self.end = self.start + value

      The length(self, value) method is now called upon set:

      >>> i1 = Interval(5, 10)
      >>> i1.length
      5
      >>> i1.length = 12
      >>> i1.end
      17

      Working with Relationships

      There’s no essential difference when creating hybrids that work with related objects as opposed to column-based data. The need for distinct expressions tends to be greater. Two variants of we’ll illustrate are the “join-dependent” hybrid, and the “correlated subquery” hybrid.

      Join-Dependent Relationship Hybrid

      Consider the following declarative mapping which relates a User to a SavingsAccount:

      from sqlalchemy import Column, Integer, ForeignKey, Numeric, String
      from sqlalchemy.orm import relationship
      from sqlalchemy.ext.declarative import declarative_base
      from sqlalchemy.ext.hybrid import hybrid_property
      
      Base = declarative_base()
      
      class SavingsAccount(Base):
          __tablename__ = 'account'
          id = Column(Integer, primary_key=True)
          user_id = Column(Integer, ForeignKey('user.id'), nullable=False)
          balance = Column(Numeric(15, 5))
      
      class User(Base):
          __tablename__ = 'user'
          id = Column(Integer, primary_key=True)
          name = Column(String(100), nullable=False)
      
          accounts = relationship("SavingsAccount", backref="owner")
      
          @hybrid_property
          def balance(self):
              if self.accounts:
                  return self.accounts[0].balance
              else:
                  return None
      
          @balance.setter
          def balance(self, value):
              if not self.accounts:
                  account = Account(owner=self)
              else:
                  account = self.accounts[0]
              account.balance = value
      
          @balance.expression
          def balance(cls):
              return SavingsAccount.balance

      The above hybrid property balance works with the first SavingsAccount entry in the list of accounts for this user. The in-Python getter/setter methods can treat accounts as a Python list available on self.

      However, at the expression level, it’s expected that the User class will be used in an appropriate context such that an appropriate join to SavingsAccount will be present:

      >>> print Session().query(User, User.balance).\
      ...     join(User.accounts).filter(User.balance > 5000)
      SELECT "user".id AS user_id, "user".name AS user_name,
      account.balance AS account_balance
      FROM "user" JOIN account ON "user".id = account.user_id
      WHERE account.balance > :balance_1

      Note however, that while the instance level accessors need to worry about whether self.accounts is even present, this issue expresses itself differently at the SQL expression level, where we basically would use an outer join:

      >>> from sqlalchemy import or_
      >>> print (Session().query(User, User.balance).outerjoin(User.accounts).
      ...         filter(or_(User.balance < 5000, User.balance == None)))
      SELECT "user".id AS user_id, "user".name AS user_name,
      account.balance AS account_balance
      FROM "user" LEFT OUTER JOIN account ON "user".id = account.user_id
      WHERE account.balance <  :balance_1 OR account.balance IS NULL

      Correlated Subquery Relationship Hybrid

      We can, of course, forego being dependent on the enclosing query’s usage of joins in favor of the correlated subquery, which can portably be packed into a single column expression. A correlated subquery is more portable, but often performs more poorly at the SQL level. Using the same technique illustrated at Using column_property, we can adjust our SavingsAccount example to aggregate the balances for all accounts, and use a correlated subquery for the column expression:

      from sqlalchemy import Column, Integer, ForeignKey, Numeric, String
      from sqlalchemy.orm import relationship
      from sqlalchemy.ext.declarative import declarative_base
      from sqlalchemy.ext.hybrid import hybrid_property
      from sqlalchemy import select, func
      
      Base = declarative_base()
      
      class SavingsAccount(Base):
          __tablename__ = 'account'
          id = Column(Integer, primary_key=True)
          user_id = Column(Integer, ForeignKey('user.id'), nullable=False)
          balance = Column(Numeric(15, 5))
      
      class User(Base):
          __tablename__ = 'user'
          id = Column(Integer, primary_key=True)
          name = Column(String(100), nullable=False)
      
          accounts = relationship("SavingsAccount", backref="owner")
      
          @hybrid_property
          def balance(self):
              return sum(acc.balance for acc in self.accounts)
      
          @balance.expression
          def balance(cls):
              return select([func.sum(SavingsAccount.balance)]).\
                      where(SavingsAccount.user_id==cls.id).\
                      label('total_balance')

      The above recipe will give us the balance column which renders a correlated SELECT:

      >>> print s.query(User).filter(User.balance > 400)
      SELECT "user".id AS user_id, "user".name AS user_name
      FROM "user"
      WHERE (SELECT sum(account.balance) AS sum_1
      FROM account
      WHERE account.user_id = "user".id) > :param_1

      Building Custom Comparators

      The hybrid property also includes a helper that allows construction of custom comparators. A comparator object allows one to customize the behavior of each SQLAlchemy expression operator individually. They are useful when creating custom types that have some highly idiosyncratic behavior on the SQL side.

      The example class below allows case-insensitive comparisons on the attribute named word_insensitive:

      from sqlalchemy.ext.hybrid import Comparator, hybrid_property
      from sqlalchemy import func, Column, Integer, String
      from sqlalchemy.orm import Session
      from sqlalchemy.ext.declarative import declarative_base
      
      Base = declarative_base()
      
      class CaseInsensitiveComparator(Comparator):
          def __eq__(self, other):
              return func.lower(self.__clause_element__()) == func.lower(other)
      
      class SearchWord(Base):
          __tablename__ = 'searchword'
          id = Column(Integer, primary_key=True)
          word = Column(String(255), nullable=False)
      
          @hybrid_property
          def word_insensitive(self):
              return self.word.lower()
      
          @word_insensitive.comparator
          def word_insensitive(cls):
              return CaseInsensitiveComparator(cls.word)

      Above, SQL expressions against word_insensitive will apply the LOWER() SQL function to both sides:

      >>> print Session().query(SearchWord).filter_by(word_insensitive="Trucks")
      SELECT searchword.id AS searchword_id, searchword.word AS searchword_word
      FROM searchword
      WHERE lower(searchword.word) = lower(:lower_1)

      The CaseInsensitiveComparator above implements part of the ColumnOperators interface. A “coercion” operation like lowercasing can be applied to all comparison operations (i.e. eq, lt, gt, etc.) using Operators.operate():

      class CaseInsensitiveComparator(Comparator):
          def operate(self, op, other):
              return op(func.lower(self.__clause_element__()), func.lower(other))

      Hybrid Value Objects

      Note in our previous example, if we were to compare the word_insensitive attribute of a SearchWord instance to a plain Python string, the plain Python string would not be coerced to lower case - the CaseInsensitiveComparator we built, being returned by @word_insensitive.comparator, only applies to the SQL side.

      A more comprehensive form of the custom comparator is to construct a Hybrid Value Object. This technique applies the target value or expression to a value object which is then returned by the accessor in all cases. The value object allows control of all operations upon the value as well as how compared values are treated, both on the SQL expression side as well as the Python value side. Replacing the previous CaseInsensitiveComparator class with a new CaseInsensitiveWord class:

      class CaseInsensitiveWord(Comparator):
          "Hybrid value representing a lower case representation of a word."
      
          def __init__(self, word):
              if isinstance(word, basestring):
                  self.word = word.lower()
              elif isinstance(word, CaseInsensitiveWord):
                  self.word = word.word
              else:
                  self.word = func.lower(word)
      
          def operate(self, op, other):
              if not isinstance(other, CaseInsensitiveWord):
                  other = CaseInsensitiveWord(other)
              return op(self.word, other.word)
      
          def __clause_element__(self):
              return self.word
      
          def __str__(self):
              return self.word
      
          key = 'word'
          "Label to apply to Query tuple results"

      Above, the CaseInsensitiveWord object represents self.word, which may be a SQL function, or may be a Python native. By overriding operate() and __clause_element__() to work in terms of self.word, all comparison operations will work against the “converted” form of word, whether it be SQL side or Python side. Our SearchWord class can now deliver the CaseInsensitiveWord object unconditionally from a single hybrid call:

      class SearchWord(Base):
          __tablename__ = 'searchword'
          id = Column(Integer, primary_key=True)
          word = Column(String(255), nullable=False)
      
          @hybrid_property
          def word_insensitive(self):
              return CaseInsensitiveWord(self.word)

      The word_insensitive attribute now has case-insensitive comparison behavior universally, including SQL expression vs. Python expression (note the Python value is converted to lower case on the Python side here):

      >>> print Session().query(SearchWord).filter_by(word_insensitive="Trucks")
      SELECT searchword.id AS searchword_id, searchword.word AS searchword_word
      FROM searchword
      WHERE lower(searchword.word) = :lower_1

      SQL expression versus SQL expression:

      >>> sw1 = aliased(SearchWord)
      >>> sw2 = aliased(SearchWord)
      >>> print Session().query(
      ...                    sw1.word_insensitive,
      ...                    sw2.word_insensitive).\
      ...                        filter(
      ...                            sw1.word_insensitive > sw2.word_insensitive
      ...                        )
      SELECT lower(searchword_1.word) AS lower_1,
      lower(searchword_2.word) AS lower_2
      FROM searchword AS searchword_1, searchword AS searchword_2
      WHERE lower(searchword_1.word) > lower(searchword_2.word)

      Python only expression:

      >>> ws1 = SearchWord(word="SomeWord")
      >>> ws1.word_insensitive == "sOmEwOrD"
      True
      >>> ws1.word_insensitive == "XOmEwOrX"
      False
      >>> print ws1.word_insensitive
      someword

      The Hybrid Value pattern is very useful for any kind of value that may have multiple representations, such as timestamps, time deltas, units of measurement, currencies and encrypted passwords.

      See also

      Hybrids and Value Agnostic Types - on the techspot.zzzeek.org blog

      Value Agnostic Types, Part II - on the techspot.zzzeek.org blog

      Building Transformers

      A transformer is an object which can receive a Query object and return a new one. The Query object includes a method with_transformation() that returns a new Query transformed by the given function.

      We can combine this with the Comparator class to produce one type of recipe which can both set up the FROM clause of a query as well as assign filtering criterion.

      Consider a mapped class Node, which assembles using adjacency list into a hierarchical tree pattern:

      from sqlalchemy import Column, Integer, ForeignKey
      from sqlalchemy.orm import relationship
      from sqlalchemy.ext.declarative import declarative_base
      Base = declarative_base()
      
      class Node(Base):
          __tablename__ = 'node'
          id =Column(Integer, primary_key=True)
          parent_id = Column(Integer, ForeignKey('node.id'))
          parent = relationship("Node", remote_side=id)

      Suppose we wanted to add an accessor grandparent. This would return the parent of Node.parent. When we have an instance of Node, this is simple:

      from sqlalchemy.ext.hybrid import hybrid_property
      
      class Node(Base):
          # ...
      
          @hybrid_property
          def grandparent(self):
              return self.parent.parent

      For the expression, things are not so clear. We’d need to construct a Query where we join() twice along Node.parent to get to the grandparent. We can instead return a transforming callable that we’ll combine with the Comparator class to receive any Query object, and return a new one that’s joined to the Node.parent attribute and filtered based on the given criterion:

      from sqlalchemy.ext.hybrid import Comparator
      
      class GrandparentTransformer(Comparator):
          def operate(self, op, other):
              def transform(q):
                  cls = self.__clause_element__()
                  parent_alias = aliased(cls)
                  return q.join(parent_alias, cls.parent).\
                              filter(op(parent_alias.parent, other))
              return transform
      
      Base = declarative_base()
      
      class Node(Base):
          __tablename__ = 'node'
          id =Column(Integer, primary_key=True)
          parent_id = Column(Integer, ForeignKey('node.id'))
          parent = relationship("Node", remote_side=id)
      
          @hybrid_property
          def grandparent(self):
              return self.parent.parent
      
          @grandparent.comparator
          def grandparent(cls):
              return GrandparentTransformer(cls)

      The GrandparentTransformer overrides the core Operators.operate() method at the base of the Comparator hierarchy to return a query-transforming callable, which then runs the given comparison operation in a particular context. Such as, in the example above, the operate method is called, given the Operators.eq callable as well as the right side of the comparison Node(id=5). A function transform is then returned which will transform a Query first to join to Node.parent, then to compare parent_alias using Operators.eq against the left and right sides, passing into Query.filter:

      >>> from sqlalchemy.orm import Session
      >>> session = Session()
      sql>>> session.query(Node).\
      ...        with_transformation(Node.grandparent==Node(id=5)).\
      ...        all()
      

      We can modify the pattern to be more verbose but flexible by separating the “join” step from the “filter” step. The tricky part here is ensuring that successive instances of GrandparentTransformer use the same AliasedClass object against Node. Below we use a simple memoizing approach that associates a GrandparentTransformer with each class:

      class Node(Base):
      
          # ...
      
          @grandparent.comparator
          def grandparent(cls):
              # memoize a GrandparentTransformer
              # per class
              if '_gp' not in cls.__dict__:
                  cls._gp = GrandparentTransformer(cls)
              return cls._gp
      
      class GrandparentTransformer(Comparator):
      
          def __init__(self, cls):
              self.parent_alias = aliased(cls)
      
          @property
          def join(self):
              def go(q):
                  return q.join(self.parent_alias, Node.parent)
              return go
      
          def operate(self, op, other):
              return op(self.parent_alias.parent, other)
      sql>>> session.query(Node).\
      ...            with_transformation(Node.grandparent.join).\
      ...            filter(Node.grandparent==Node(id=5))
      

      The “transformer” pattern is an experimental pattern that starts to make usage of some functional programming paradigms. While it’s only recommended for advanced and/or patient developers, there’s probably a whole lot of amazing things it can be used for.

      API Reference

      class sqlalchemy.ext.hybrid.hybrid_method(func, expr=None)

      Bases: sqlalchemy.orm.interfaces._InspectionAttr

      A decorator which allows definition of a Python object method with both instance-level and class-level behavior.

      __init__(func, expr=None)

      Create a new hybrid_method.

      Usage is typically via decorator:

      from sqlalchemy.ext.hybrid import hybrid_method
      
      class SomeClass(object):
          @hybrid_method
          def value(self, x, y):
              return self._value + x + y
      
          @value.expression
          def value(self, x, y):
              return func.some_function(self._value, x, y)
      expression(expr)

      Provide a modifying decorator that defines a SQL-expression producing method.

      class sqlalchemy.ext.hybrid.hybrid_property(fget, fset=None, fdel=None, expr=None)

      Bases: sqlalchemy.orm.interfaces._InspectionAttr

      A decorator which allows definition of a Python descriptor with both instance-level and class-level behavior.

      __init__(fget, fset=None, fdel=None, expr=None)

      Create a new hybrid_property.

      Usage is typically via decorator:

      from sqlalchemy.ext.hybrid import hybrid_property
      
      class SomeClass(object):
          @hybrid_property
          def value(self):
              return self._value
      
          @value.setter
          def value(self, value):
              self._value = value
      comparator(comparator)

      Provide a modifying decorator that defines a custom comparator producing method.

      The return value of the decorated method should be an instance of Comparator.

      deleter(fdel)

      Provide a modifying decorator that defines a value-deletion method.

      expression(expr)

      Provide a modifying decorator that defines a SQL-expression producing method.

      setter(fset)

      Provide a modifying decorator that defines a value-setter method.

      class sqlalchemy.ext.hybrid.Comparator(expression)

      Bases: sqlalchemy.orm.interfaces.PropComparator

      A helper class that allows easy construction of custom PropComparator classes for usage with hybrids.

      sqlalchemy.ext.hybrid.HYBRID_METHOD = <symbol 'HYBRID_METHOD>
      sqlalchemy.ext.hybrid.HYBRID_PROPERTY = <symbol 'HYBRID_PROPERTY>
      SQLAlchemy-0.8.4/doc/orm/extensions/index.html0000644000076500000240000001453312251147477022007 0ustar classicstaff00000000000000 ORM Extensions — SQLAlchemy 0.8 Documentation

      SQLAlchemy 0.8 Documentation

      Release: 0.8.4 | Release Date: December 8, 2013

      Table of Contents

      Previous Topic

      ORM Events

      Next Topic

      Association Proxy

      Quick Search

      ORM Extensions

      SQLAlchemy has a variety of ORM extensions available, which add additional functionality to the core behavior.

      The extensions build almost entirely on public core and ORM APIs and users should be encouraged to read their source code to further their understanding of their behavior. In particular the “Horizontal Sharding”, “Hybrid Attributes”, and “Mutation Tracking” extensions are very succinct.

      SQLAlchemy-0.8.4/doc/orm/extensions/instrumentation.html0000644000076500000240000004565512251147477024154 0ustar classicstaff00000000000000 Alternate Class Instrumentation — SQLAlchemy 0.8 Documentation

      SQLAlchemy 0.8 Documentation

      Release: 0.8.4 | Release Date: December 8, 2013
      SQLAlchemy 0.8 Documentation » SQLAlchemy ORM » ORM Extensions » Alternate Class Instrumentation

      Alternate Class Instrumentation

      Alternate Class Instrumentation

      Extensible class instrumentation.

      The sqlalchemy.ext.instrumentation package provides for alternate systems of class instrumentation within the ORM. Class instrumentation refers to how the ORM places attributes on the class which maintain data and track changes to that data, as well as event hooks installed on the class.

      Note

      The extension package is provided for the benefit of integration with other object management packages, which already perform their own instrumentation. It is not intended for general use.

      For examples of how the instrumentation extension is used, see the example Attribute Instrumentation.

      Changed in version 0.8: The sqlalchemy.orm.instrumentation was split out so that all functionality having to do with non-standard instrumentation was moved out to sqlalchemy.ext.instrumentation. When imported, the module installs itself within sqlalchemy.orm.instrumentation so that it takes effect, including recognition of __sa_instrumentation_manager__ on mapped classes, as well instrumentation_finders being used to determine class instrumentation resolution.

      API Reference

      sqlalchemy.ext.instrumentation.INSTRUMENTATION_MANAGER = '__sa_instrumentation_manager__'

      Attribute, elects custom instrumentation when present on a mapped class.

      Allows a class to specify a slightly or wildly different technique for tracking changes made to mapped attributes and collections.

      Only one instrumentation implementation is allowed in a given object inheritance hierarchy.

      The value of this attribute must be a callable and will be passed a class object. The callable must return one of:

      • An instance of an InstrumentationManager or subclass
      • An object implementing all or some of InstrumentationManager (TODO)
      • A dictionary of callables, implementing all or some of the above (TODO)
      • An instance of a ClassManager or subclass

      This attribute is consulted by SQLAlchemy instrumentation resolution, once the sqlalchemy.ext.instrumentation module has been imported. If custom finders are installed in the global instrumentation_finders list, they may or may not choose to honor this attribute.

      class sqlalchemy.ext.instrumentation.InstrumentationManager(class_)

      User-defined class instrumentation extension.

      InstrumentationManager can be subclassed in order to change how class instrumentation proceeds. This class exists for the purposes of integration with other object management frameworks which would like to entirely modify the instrumentation methodology of the ORM, and is not intended for regular usage. For interception of class instrumentation events, see InstrumentationEvents.

      The API for this class should be considered as semi-stable, and may change slightly with new releases.

      Changed in version 0.8: InstrumentationManager was moved from sqlalchemy.orm.instrumentation to sqlalchemy.ext.instrumentation.

      dict_getter(class_)
      dispose(class_, manager)
      get_instance_dict(class_, instance)
      initialize_instance_dict(class_, instance)
      install_descriptor(class_, key, inst)
      install_member(class_, key, implementation)
      install_state(class_, instance, state)
      instrument_attribute(class_, key, inst)
      instrument_collection_class(class_, key, collection_class)
      manage(class_, manager)
      manager_getter(class_)
      post_configure_attribute(class_, key, inst)
      remove_state(class_, instance)
      state_getter(class_)
      uninstall_descriptor(class_, key)
      uninstall_member(class_, key)
      sqlalchemy.ext.instrumentation.instrumentation_finders = [<function find_native_user_instrumentation_hook at 0x10dae8668>]

      An extensible sequence of callables which return instrumentation implementations

      When a class is registered, each callable will be passed a class object. If None is returned, the next finder in the sequence is consulted. Otherwise the return must be an instrumentation factory that follows the same guidelines as sqlalchemy.ext.instrumentation.INSTRUMENTATION_MANAGER.

      By default, the only finder is find_native_user_instrumentation_hook, which searches for INSTRUMENTATION_MANAGER. If all finders return None, standard ClassManager instrumentation is used.

      class sqlalchemy.ext.instrumentation.ExtendedInstrumentationRegistry

      Bases: sqlalchemy.orm.instrumentation.InstrumentationFactory

      Extends InstrumentationFactory with additional bookkeeping, to accommodate multiple types of class managers.

      Members :
      SQLAlchemy-0.8.4/doc/orm/extensions/mutable.html0000644000076500000240000016513612251147477022337 0ustar classicstaff00000000000000 Mutation Tracking — SQLAlchemy 0.8 Documentation

      SQLAlchemy 0.8 Documentation

      Release: 0.8.4 | Release Date: December 8, 2013

      Mutation Tracking

      Provide support for tracking of in-place changes to scalar values, which are propagated into ORM change events on owning parent objects.

      New in version 0.7: sqlalchemy.ext.mutable replaces SQLAlchemy’s legacy approach to in-place mutations of scalar values; see Mutation event extension, supersedes “mutable=True”.

      Establishing Mutability on Scalar Column Values

      A typical example of a “mutable” structure is a Python dictionary. Following the example introduced in Column and Data Types, we begin with a custom type that marshals Python dictionaries into JSON strings before being persisted:

      from sqlalchemy.types import TypeDecorator, VARCHAR
      import json
      
      class JSONEncodedDict(TypeDecorator):
          "Represents an immutable structure as a json-encoded string."
      
          impl = VARCHAR
      
          def process_bind_param(self, value, dialect):
              if value is not None:
                  value = json.dumps(value)
              return value
      
          def process_result_value(self, value, dialect):
              if value is not None:
                  value = json.loads(value)
              return value

      The usage of json is only for the purposes of example. The sqlalchemy.ext.mutable extension can be used with any type whose target Python type may be mutable, including PickleType, postgresql.ARRAY, etc.

      When using the sqlalchemy.ext.mutable extension, the value itself tracks all parents which reference it. Below, we illustrate the a simple version of the MutableDict dictionary object, which applies the Mutable mixin to a plain Python dictionary:

      import collections
      from sqlalchemy.ext.mutable import Mutable
      
      class MutableDict(Mutable, dict):
          @classmethod
          def coerce(cls, key, value):
              "Convert plain dictionaries to MutableDict."
      
              if not isinstance(value, MutableDict):
                  if isinstance(value, dict):
                      return MutableDict(value)
      
                  # this call will raise ValueError
                  return Mutable.coerce(key, value)
              else:
                  return value
      
          def __setitem__(self, key, value):
              "Detect dictionary set events and emit change events."
      
              dict.__setitem__(self, key, value)
              self.changed()
      
          def __delitem__(self, key):
              "Detect dictionary del events and emit change events."
      
              dict.__delitem__(self, key)
              self.changed()

      The above dictionary class takes the approach of subclassing the Python built-in dict to produce a dict subclass which routes all mutation events through __setitem__. There are variants on this approach, such as subclassing UserDict.UserDict or collections.MutableMapping; the part that’s important to this example is that the Mutable.changed() method is called whenever an in-place change to the datastructure takes place.

      We also redefine the Mutable.coerce() method which will be used to convert any values that are not instances of MutableDict, such as the plain dictionaries returned by the json module, into the appropriate type. Defining this method is optional; we could just as well created our JSONEncodedDict such that it always returns an instance of MutableDict, and additionally ensured that all calling code uses MutableDict explicitly. When Mutable.coerce() is not overridden, any values applied to a parent object which are not instances of the mutable type will raise a ValueError.

      Our new MutableDict type offers a class method as_mutable() which we can use within column metadata to associate with types. This method grabs the given type object or class and associates a listener that will detect all future mappings of this type, applying event listening instrumentation to the mapped attribute. Such as, with classical table metadata:

      from sqlalchemy import Table, Column, Integer
      
      my_data = Table('my_data', metadata,
          Column('id', Integer, primary_key=True),
          Column('data', MutableDict.as_mutable(JSONEncodedDict))
      )

      Above, as_mutable() returns an instance of JSONEncodedDict (if the type object was not an instance already), which will intercept any attributes which are mapped against this type. Below we establish a simple mapping against the my_data table:

      from sqlalchemy import mapper
      
      class MyDataClass(object):
          pass
      
      # associates mutation listeners with MyDataClass.data
      mapper(MyDataClass, my_data)

      The MyDataClass.data member will now be notified of in place changes to its value.

      There’s no difference in usage when using declarative:

      from sqlalchemy.ext.declarative import declarative_base
      
      Base = declarative_base()
      
      class MyDataClass(Base):
          __tablename__ = 'my_data'
          id = Column(Integer, primary_key=True)
          data = Column(MutableDict.as_mutable(JSONEncodedDict))

      Any in-place changes to the MyDataClass.data member will flag the attribute as “dirty” on the parent object:

      >>> from sqlalchemy.orm import Session
      
      >>> sess = Session()
      >>> m1 = MyDataClass(data={'value1':'foo'})
      >>> sess.add(m1)
      >>> sess.commit()
      
      >>> m1.data['value1'] = 'bar'
      >>> assert m1 in sess.dirty
      True

      The MutableDict can be associated with all future instances of JSONEncodedDict in one step, using associate_with(). This is similar to as_mutable() except it will intercept all occurrences of MutableDict in all mappings unconditionally, without the need to declare it individually:

      MutableDict.associate_with(JSONEncodedDict)
      
      class MyDataClass(Base):
          __tablename__ = 'my_data'
          id = Column(Integer, primary_key=True)
          data = Column(JSONEncodedDict)

      Supporting Pickling

      The key to the sqlalchemy.ext.mutable extension relies upon the placement of a weakref.WeakKeyDictionary upon the value object, which stores a mapping of parent mapped objects keyed to the attribute name under which they are associated with this value. WeakKeyDictionary objects are not picklable, due to the fact that they contain weakrefs and function callbacks. In our case, this is a good thing, since if this dictionary were picklable, it could lead to an excessively large pickle size for our value objects that are pickled by themselves outside of the context of the parent. The developer responsibility here is only to provide a __getstate__ method that excludes the _parents() collection from the pickle stream:

      class MyMutableType(Mutable):
          def __getstate__(self):
              d = self.__dict__.copy()
              d.pop('_parents', None)
              return d

      With our dictionary example, we need to return the contents of the dict itself (and also restore them on __setstate__):

      class MutableDict(Mutable, dict):
          # ....
      
          def __getstate__(self):
              return dict(self)
      
          def __setstate__(self, state):
              self.update(state)

      In the case that our mutable value object is pickled as it is attached to one or more parent objects that are also part of the pickle, the Mutable mixin will re-establish the Mutable._parents collection on each value object as the owning parents themselves are unpickled.

      Establishing Mutability on Composites

      Composites are a special ORM feature which allow a single scalar attribute to be assigned an object value which represents information “composed” from one or more columns from the underlying mapped table. The usual example is that of a geometric “point”, and is introduced in Composite Column Types.

      Changed in version 0.7: The internals of orm.composite() have been greatly simplified and in-place mutation detection is no longer enabled by default; instead, the user-defined value must detect changes on its own and propagate them to all owning parents. The sqlalchemy.ext.mutable extension provides the helper class MutableComposite, which is a slight variant on the Mutable class.

      As is the case with Mutable, the user-defined composite class subclasses MutableComposite as a mixin, and detects and delivers change events to its parents via the MutableComposite.changed() method. In the case of a composite class, the detection is usually via the usage of Python descriptors (i.e. @property), or alternatively via the special Python method __setattr__(). Below we expand upon the Point class introduced in Composite Column Types to subclass MutableComposite and to also route attribute set events via __setattr__ to the MutableComposite.changed() method:

      from sqlalchemy.ext.mutable import MutableComposite
      
      class Point(MutableComposite):
          def __init__(self, x, y):
              self.x = x
              self.y = y
      
          def __setattr__(self, key, value):
              "Intercept set events"
      
              # set the attribute
              object.__setattr__(self, key, value)
      
              # alert all parents to the change
              self.changed()
      
          def __composite_values__(self):
              return self.x, self.y
      
          def __eq__(self, other):
              return isinstance(other, Point) and \
                  other.x == self.x and \
                  other.y == self.y
      
          def __ne__(self, other):
              return not self.__eq__(other)

      The MutableComposite class uses a Python metaclass to automatically establish listeners for any usage of orm.composite() that specifies our Point type. Below, when Point is mapped to the Vertex class, listeners are established which will route change events from Point objects to each of the Vertex.start and Vertex.end attributes:

      from sqlalchemy.orm import composite, mapper
      from sqlalchemy import Table, Column
      
      vertices = Table('vertices', metadata,
          Column('id', Integer, primary_key=True),
          Column('x1', Integer),
          Column('y1', Integer),
          Column('x2', Integer),
          Column('y2', Integer),
          )
      
      class Vertex(object):
          pass
      
      mapper(Vertex, vertices, properties={
          'start': composite(Point, vertices.c.x1, vertices.c.y1),
          'end': composite(Point, vertices.c.x2, vertices.c.y2)
      })

      Any in-place changes to the Vertex.start or Vertex.end members will flag the attribute as “dirty” on the parent object:

      >>> from sqlalchemy.orm import Session
      
      >>> sess = Session()
      >>> v1 = Vertex(start=Point(3, 4), end=Point(12, 15))
      >>> sess.add(v1)
      >>> sess.commit()
      
      >>> v1.end.x = 8
      >>> assert v1 in sess.dirty
      True

      Coercing Mutable Composites

      The MutableBase.coerce() method is also supported on composite types. In the case of MutableComposite, the MutableBase.coerce() method is only called for attribute set operations, not load operations. Overriding the MutableBase.coerce() method is essentially equivalent to using a validates() validation routine for all attributes which make use of the custom composite type:

      class Point(MutableComposite):
          # other Point methods
          # ...
      
          def coerce(cls, key, value):
              if isinstance(value, tuple):
                  value = Point(*value)
              elif not isinstance(value, Point):
                  raise ValueError("tuple or Point expected")
              return value

      New in version 0.7.10,0.8.0b2: Support for the MutableBase.coerce() method in conjunction with objects of type MutableComposite.

      Supporting Pickling

      As is the case with Mutable, the MutableComposite helper class uses a weakref.WeakKeyDictionary available via the MutableBase._parents() attribute which isn’t picklable. If we need to pickle instances of Point or its owning class Vertex, we at least need to define a __getstate__ that doesn’t include the _parents dictionary. Below we define both a __getstate__ and a __setstate__ that package up the minimal form of our Point class:

      class Point(MutableComposite):
          # ...
      
          def __getstate__(self):
              return self.x, self.y
      
          def __setstate__(self, state):
              self.x, self.y = state

      As with Mutable, the MutableComposite augments the pickling process of the parent’s object-relational state so that the MutableBase._parents() collection is restored to all Point objects.

      API Reference

      class sqlalchemy.ext.mutable.MutableBase

      Common base class to Mutable and MutableComposite.

      _parents

      Dictionary of parent object->attribute name on the parent.

      This attribute is a so-called “memoized” property. It initializes itself with a new weakref.WeakKeyDictionary the first time it is accessed, returning the same object upon subsequent access.

      classmethod coerce(key, value)

      Given a value, coerce it into the target type.

      Can be overridden by custom subclasses to coerce incoming data into a particular type.

      By default, raises ValueError.

      This method is called in different scenarios depending on if the parent class is of type Mutable or of type MutableComposite. In the case of the former, it is called for both attribute-set operations as well as during ORM loading operations. For the latter, it is only called during attribute-set operations; the mechanics of the composite() construct handle coercion during load operations.

      Parameters:
      • key – string name of the ORM-mapped attribute being set.
      • value – the incoming value.
      Returns:

      the method should return the coerced value, or raise ValueError if the coercion cannot be completed.

      class sqlalchemy.ext.mutable.Mutable

      Bases: sqlalchemy.ext.mutable.MutableBase

      Mixin that defines transparent propagation of change events to a parent object.

      See the example in Establishing Mutability on Scalar Column Values for usage information.

      classmethod as_mutable(sqltype)

      Associate a SQL type with this mutable Python type.

      This establishes listeners that will detect ORM mappings against the given type, adding mutation event trackers to those mappings.

      The type is returned, unconditionally as an instance, so that as_mutable() can be used inline:

      Table('mytable', metadata,
          Column('id', Integer, primary_key=True),
          Column('data', MyMutableType.as_mutable(PickleType))
      )

      Note that the returned type is always an instance, even if a class is given, and that only columns which are declared specifically with that type instance receive additional instrumentation.

      To associate a particular mutable type with all occurrences of a particular type, use the Mutable.associate_with() classmethod of the particular Mutable() subclass to establish a global association.

      Warning

      The listeners established by this method are global to all mappers, and are not garbage collected. Only use as_mutable() for types that are permanent to an application, not with ad-hoc types else this will cause unbounded growth in memory usage.

      classmethod associate_with(sqltype)

      Associate this wrapper with all future mapped columns of the given type.

      This is a convenience method that calls associate_with_attribute automatically.

      Warning

      The listeners established by this method are global to all mappers, and are not garbage collected. Only use associate_with() for types that are permanent to an application, not with ad-hoc types else this will cause unbounded growth in memory usage.

      classmethod associate_with_attribute(attribute)

      Establish this type as a mutation listener for the given mapped descriptor.

      changed()

      Subclasses should call this method whenever change events occur.

      class sqlalchemy.ext.mutable.MutableComposite

      Bases: sqlalchemy.ext.mutable.MutableBase

      Mixin that defines transparent propagation of change events on a SQLAlchemy “composite” object to its owning parent or parents.

      See the example in Establishing Mutability on Composites for usage information.

      Members :
      class sqlalchemy.ext.mutable.MutableDict

      Bases: sqlalchemy.ext.mutable.Mutable, __builtin__.dict

      A dictionary type that implements Mutable.

      New in version 0.8.

      Members :
      SQLAlchemy-0.8.4/doc/orm/extensions/orderinglist.html0000644000076500000240000006745112251147477023414 0ustar classicstaff00000000000000 Ordering List — SQLAlchemy 0.8 Documentation

      SQLAlchemy 0.8 Documentation

      Release: 0.8.4 | Release Date: December 8, 2013

      Ordering List

      A custom list that manages index/position information for contained elements.

      author:Jason Kirtland

      orderinglist is a helper for mutable ordered relationships. It will intercept list operations performed on a relationship()-managed collection and automatically synchronize changes in list position onto a target scalar attribute.

      Example: A slide table, where each row refers to zero or more entries in a related bullet table. The bullets within a slide are displayed in order based on the value of the position column in the bullet table. As entries are reordered in memory, the value of the position attribute should be updated to reflect the new sort order:

      Base = declarative_base()
      
      class Slide(Base):
          __tablename__ = 'slide'
      
          id = Column(Integer, primary_key=True)
          name = Column(String)
      
          bullets = relationship("Bullet", order_by="Bullet.position")
      
      class Bullet(Base):
          __tablename__ = 'bullet'
          id = Column(Integer, primary_key=True)
          slide_id = Column(Integer, ForeignKey('slide.id'))
          position = Column(Integer)
          text = Column(String)

      The standard relationship mapping will produce a list-like attribute on each Slide containing all related Bullet objects, but coping with changes in ordering is not handled automatically. When appending a Bullet into Slide.bullets, the Bullet.position attribute will remain unset until manually assigned. When the Bullet is inserted into the middle of the list, the following Bullet objects will also need to be renumbered.

      The OrderingList object automates this task, managing the position attribute on all Bullet objects in the collection. It is constructed using the ordering_list() factory:

      from sqlalchemy.ext.orderinglist import ordering_list
      
      Base = declarative_base()
      
      class Slide(Base):
          __tablename__ = 'slide'
      
          id = Column(Integer, primary_key=True)
          name = Column(String)
      
          bullets = relationship("Bullet", order_by="Bullet.position",
                                  collection_class=ordering_list('position'))
      
      class Bullet(Base):
          __tablename__ = 'bullet'
          id = Column(Integer, primary_key=True)
          slide_id = Column(Integer, ForeignKey('slide.id'))
          position = Column(Integer)
          text = Column(String)

      With the above mapping the Bullet.position attribute is managed:

      s = Slide()
      s.bullets.append(Bullet())
      s.bullets.append(Bullet())
      s.bullets[1].position
      >>> 1
      s.bullets.insert(1, Bullet())
      s.bullets[2].position
      >>> 2

      The OrderingList construct only works with changes to a collection, and not the initial load from the database, and requires that the list be sorted when loaded. Therefore, be sure to specify order_by on the relationship() against the target ordering attribute, so that the ordering is correct when first loaded.

      Warning

      OrderingList only provides limited functionality when a primary key column or unique column is the target of the sort. Since changing the order of entries often means that two rows must trade values, this is not possible when the value is constrained by a primary key or unique constraint, since one of the rows would temporarily have to point to a third available value so that the other row could take its old value. OrderingList doesn’t do any of this for you, nor does SQLAlchemy itself.

      ordering_list() takes the name of the related object’s ordering attribute as an argument. By default, the zero-based integer index of the object’s position in the ordering_list() is synchronized with the ordering attribute: index 0 will get position 0, index 1 position 1, etc. To start numbering at 1 or some other integer, provide count_from=1.

      API Reference

      sqlalchemy.ext.orderinglist.ordering_list(attr, count_from=None, **kw)

      Prepares an OrderingList factory for use in mapper definitions.

      Returns an object suitable for use as an argument to a Mapper relationship’s collection_class option. e.g.:

      from sqlalchemy.ext.orderinglist import ordering_list
      
      class Slide(Base):
          __tablename__ = 'slide'
      
          id = Column(Integer, primary_key=True)
          name = Column(String)
      
          bullets = relationship("Bullet", order_by="Bullet.position",
                                  collection_class=ordering_list('position'))
      Parameters:
      • attr – Name of the mapped attribute to use for storage and retrieval of ordering information
      • count_from – Set up an integer-based ordering, starting at count_from. For example, ordering_list('pos', count_from=1) would create a 1-based list in SQL, storing the value in the ‘pos’ column. Ignored if ordering_func is supplied.

      Additional arguments are passed to the OrderingList constructor.

      sqlalchemy.ext.orderinglist.count_from_0(index, collection)

      Numbering function: consecutive integers starting at 0.

      sqlalchemy.ext.orderinglist.count_from_1(index, collection)

      Numbering function: consecutive integers starting at 1.

      sqlalchemy.ext.orderinglist.count_from_n_factory(start)

      Numbering function: consecutive integers starting at arbitrary start.

      class sqlalchemy.ext.orderinglist.OrderingList(ordering_attr=None, ordering_func=None, reorder_on_append=False)

      Bases: __builtin__.list

      A custom list that manages position information for its children.

      The OrderingList object is normally set up using the ordering_list() factory function, used in conjunction with the relationship() function.

      __init__(ordering_attr=None, ordering_func=None, reorder_on_append=False)

      A custom list that manages position information for its children.

      OrderingList is a collection_class list implementation that syncs position in a Python list with a position attribute on the mapped objects.

      This implementation relies on the list starting in the proper order, so be sure to put an order_by on your relationship.

      Parameters:
      • ordering_attr – Name of the attribute that stores the object’s order in the relationship.
      • ordering_func

        Optional. A function that maps the position in the Python list to a value to store in the ordering_attr. Values returned are usually (but need not be!) integers.

        An ordering_func is called with two positional parameters: the index of the element in the list, and the list itself.

        If omitted, Python list indexes are used for the attribute values. Two basic pre-built numbering functions are provided in this module: count_from_0 and count_from_1. For more exotic examples like stepped numbering, alphabetical and Fibonacci numbering, see the unit tests.

      • reorder_on_append

        Default False. When appending an object with an existing (non-None) ordering value, that value will be left untouched unless reorder_on_append is true. This is an optimization to avoid a variety of dangerous unexpected database writes.

        SQLAlchemy will add instances to the list via append() when your object loads. If for some reason the result set from the database skips a step in the ordering (say, row ‘1’ is missing but you get ‘2’, ‘3’, and ‘4’), reorder_on_append=True would immediately renumber the items to ‘1’, ‘2’, ‘3’. If you have multiple sessions making changes, any of whom happen to load this collection even in passing, all of the sessions would try to “clean up” the numbering in their commits, possibly causing all but one to fail with a concurrent modification error.

        Recommend leaving this with the default of False, and just call reorder() if you’re doing append() operations with previously ordered instances or when doing some housekeeping after manual sql operations.

      append(entity)

      L.append(object) – append object to end

      insert(index, entity)

      L.insert(index, object) – insert object before index

      pop([index]) → item -- remove and return item at index (default last).

      Raises IndexError if list is empty or index is out of range.

      remove(entity)

      L.remove(value) – remove first occurrence of value. Raises ValueError if the value is not present.

      reorder()

      Synchronize ordering for the entire collection.

      Sweeps through the list and ensures that each object has accurate ordering information set.

      SQLAlchemy-0.8.4/doc/orm/index.html0000644000076500000240000010750412251147477017611 0ustar classicstaff00000000000000 SQLAlchemy ORM — SQLAlchemy 0.8 Documentation

      SQLAlchemy 0.8 Documentation

      Release: 0.8.4 | Release Date: December 8, 2013

      Table of Contents

      Previous Topic

      Overview

      Next Topic

      Object Relational Tutorial

      Quick Search

      SQLAlchemy ORM

      Here, the Object Relational Mapper is introduced and fully described. If you want to work with higher-level SQL which is constructed automatically for you, as well as automated persistence of Python objects, proceed first to the tutorial.

      SQLAlchemy-0.8.4/doc/orm/inheritance.html0000644000076500000240000027773212251147500020771 0ustar classicstaff00000000000000 Mapping Class Inheritance Hierarchies — SQLAlchemy 0.8 Documentation

      SQLAlchemy 0.8 Documentation

      Release: 0.8.4 | Release Date: December 8, 2013
      SQLAlchemy 0.8 Documentation » SQLAlchemy ORM » Mapping Class Inheritance Hierarchies

      Mapping Class Inheritance Hierarchies

      Mapping Class Inheritance Hierarchies

      SQLAlchemy supports three forms of inheritance: single table inheritance, where several types of classes are represented by a single table, concrete table inheritance, where each type of class is represented by independent tables, and joined table inheritance, where the class hierarchy is broken up among dependent tables, each class represented by its own table that only includes those attributes local to that class.

      The most common forms of inheritance are single and joined table, while concrete inheritance presents more configurational challenges.

      When mappers are configured in an inheritance relationship, SQLAlchemy has the ability to load elements polymorphically, meaning that a single query can return objects of multiple types.

      Joined Table Inheritance

      In joined table inheritance, each class along a particular classes’ list of parents is represented by a unique table. The total set of attributes for a particular instance is represented as a join along all tables in its inheritance path. Here, we first define the Employee class. This table will contain a primary key column (or columns), and a column for each attribute that’s represented by Employee. In this case it’s just name:

      class Employee(Base):
          __tablename__ = 'employee'
          id = Column(Integer, primary_key=True)
          name = Column(String(50))
          type = Column(String(50))
      
          __mapper_args__ = {
              'polymorphic_identity':'employee',
              'polymorphic_on':type
          }

      The mapped table also has a column called type. The purpose of this column is to act as the discriminator, and stores a value which indicates the type of object represented within the row. The column may be of any datatype, though string and integer are the most common.

      The discriminator column is only needed if polymorphic loading is desired, as is usually the case. It is not strictly necessary that it be present directly on the base mapped table, and can instead be defined on a derived select statement that’s used when the class is queried; however, this is a much more sophisticated configuration scenario.

      The mapping receives additional arguments via the __mapper_args__ dictionary. Here the type column is explicitly stated as the discriminator column, and the polymorphic identity of employee is also given; this is the value that will be stored in the polymorphic discriminator column for instances of this class.

      We next define Engineer and Manager subclasses of Employee. Each contains columns that represent the attributes unique to the subclass they represent. Each table also must contain a primary key column (or columns), and in most cases a foreign key reference to the parent table:

      class Engineer(Employee):
          __tablename__ = 'engineer'
          id = Column(Integer, ForeignKey('employee.id'), primary_key=True)
          engineer_name = Column(String(30))
      
          __mapper_args__ = {
              'polymorphic_identity':'engineer',
          }
      
      class Manager(Employee):
          __tablename__ = 'manager'
          id = Column(Integer, ForeignKey('employee.id'), primary_key=True)
          manager_name = Column(String(30))
      
          __mapper_args__ = {
              'polymorphic_identity':'manager',
          }

      It is standard practice that the same column is used for both the role of primary key as well as foreign key to the parent table, and that the column is also named the same as that of the parent table. However, both of these practices are optional. Separate columns may be used for primary key and parent-relationship, the column may be named differently than that of the parent, and even a custom join condition can be specified between parent and child tables instead of using a foreign key.

      Joined inheritance primary keys

      One natural effect of the joined table inheritance configuration is that the identity of any mapped object can be determined entirely from the base table. This has obvious advantages, so SQLAlchemy always considers the primary key columns of a joined inheritance class to be those of the base table only. In other words, the id columns of both the engineer and manager tables are not used to locate Engineer or Manager objects - only the value in employee.id is considered. engineer.id and manager.id are still of course critical to the proper operation of the pattern overall as they are used to locate the joined row, once the parent row has been determined within a statement.

      With the joined inheritance mapping complete, querying against Employee will return a combination of Employee, Engineer and Manager objects. Newly saved Engineer, Manager, and Employee objects will automatically populate the employee.type column with engineer, manager, or employee, as appropriate.

      Basic Control of Which Tables are Queried

      The orm.with_polymorphic() function and the with_polymorphic() method of Query affects the specific tables which the Query selects from. Normally, a query such as this:

      session.query(Employee).all()

      ...selects only from the employee table. When loading fresh from the database, our joined-table setup will query from the parent table only, using SQL such as this:

      SELECT employee.id AS employee_id, employee.name AS employee_name, employee.type AS employee_type FROM employee []

      As attributes are requested from those Employee objects which are represented in either the engineer or manager child tables, a second load is issued for the columns in that related row, if the data was not already loaded. So above, after accessing the objects you’d see further SQL issued along the lines of:

      SELECT manager.id AS manager_id, manager.manager_data AS manager_manager_data FROM manager WHERE ? = manager.id [5] SELECT engineer.id AS engineer_id, engineer.engineer_info AS engineer_engineer_info FROM engineer WHERE ? = engineer.id [2]

      This behavior works well when issuing searches for small numbers of items, such as when using Query.get(), since the full range of joined tables are not pulled in to the SQL statement unnecessarily. But when querying a larger span of rows which are known to be of many types, you may want to actively join to some or all of the joined tables. The with_polymorphic feature provides this.

      Telling our query to polymorphically load Engineer and Manager objects, we can use the orm.with_polymorphic() function to create a new aliased class which represents a select of the base table combined with outer joins to each of the inheriting tables:

      from sqlalchemy.orm import with_polymorphic
      
      eng_plus_manager = with_polymorphic(Employee, [Engineer, Manager])
      
      query = session.query(eng_plus_manager)

      The above produces a query which joins the employee table to both the engineer and manager tables like the following:

      query.all()
      
      SELECT employee.id AS employee_id, engineer.id AS engineer_id, manager.id AS manager_id, employee.name AS employee_name, employee.type AS employee_type, engineer.engineer_info AS engineer_engineer_info, manager.manager_data AS manager_manager_data FROM employee LEFT OUTER JOIN engineer ON employee.id = engineer.id LEFT OUTER JOIN manager ON employee.id = manager.id []

      The entity returned by orm.with_polymorphic() is an AliasedClass object, which can be used in a Query like any other alias, including named attributes for those attributes on the Employee class. In our example, eng_plus_manager becomes the entity that we use to refer to the three-way outer join above. It also includes namespaces for each class named in the list of classes, so that attributes specific to those subclasses can be called upon as well. The following example illustrates calling upon attributes specific to Engineer as well as Manager in terms of eng_plus_manager:

      eng_plus_manager = with_polymorphic(Employee, [Engineer, Manager])
      query = session.query(eng_plus_manager).filter(
                      or_(
                          eng_plus_manager.Engineer.engineer_info=='x',
                          eng_plus_manager.Manager.manager_data=='y'
                      )
                  )

      orm.with_polymorphic() accepts a single class or mapper, a list of classes/mappers, or the string '*' to indicate all subclasses:

      # join to the engineer table
      entity = with_polymorphic(Employee, Engineer)
      
      # join to the engineer and manager tables
      entity = with_polymorphic(Employee, [Engineer, Manager])
      
      # join to all subclass tables
      entity = query.with_polymorphic(Employee, '*')
      
      # use with Query
      session.query(entity).all()

      It also accepts a second argument selectable which replaces the automatic join creation and instead selects directly from the selectable given. This feature is normally used with “concrete” inheritance, described later, but can be used with any kind of inheritance setup in the case that specialized SQL should be used to load polymorphically:

      # custom selectable
      employee = Employee.__table__
      manager = Manager.__table__
      engineer = Engineer.__table__
      entity = with_polymorphic(
                  Employee,
                  [Engineer, Manager],
                  employee.outerjoin(manager).outerjoin(engineer)
              )
      
      # use with Query
      session.query(entity).all()

      Note that if you only need to load a single subtype, such as just the Engineer objects, orm.with_polymorphic() is not needed since you would query against the Engineer class directly.

      Query.with_polymorphic() has the same purpose as orm.with_polymorphic(), except is not as flexible in its usage patterns in that it only applies to the first full mapping, which then impacts all occurrences of that class or the target subclasses within the Query. For simple cases it might be considered to be more succinct:

      session.query(Employee).with_polymorphic([Engineer, Manager]).\
          filter(or_(Engineer.engineer_info=='w', Manager.manager_data=='q'))

      New in version 0.8: orm.with_polymorphic(), an improved version of Query.with_polymorphic() method.

      The mapper also accepts with_polymorphic as a configurational argument so that the joined-style load will be issued automatically. This argument may be the string '*', a list of classes, or a tuple consisting of either, followed by a selectable:

      class Employee(Base):
          __tablename__ = 'employee'
          id = Column(Integer, primary_key=True)
          type = Column(String(20))
      
          __mapper_args__ = {
              'polymorphic_on':type,
              'polymorphic_identity':'employee',
              'with_polymorphic':'*'
          }
      
      class Engineer(Employee):
          __tablename__ = 'engineer'
          id = Column(Integer, ForeignKey('employee.id'), primary_key=True)
          __mapper_args__ = {'polymorphic_identity':'engineer'}
      
      class Manager(Employee):
          __tablename__ = 'manager'
          id = Column(Integer, ForeignKey('employee.id'), primary_key=True)
          __mapper_args__ = {'polymorphic_identity':'manager'}

      The above mapping will produce a query similar to that of with_polymorphic('*') for every query of Employee objects.

      Using orm.with_polymorphic() or Query.with_polymorphic() will override the mapper-level with_polymorphic setting.

      sqlalchemy.orm.with_polymorphic(base, classes, selectable=False, polymorphic_on=None, aliased=False, innerjoin=False, _use_mapper_path=False)

      Produce an AliasedClass construct which specifies columns for descendant mappers of the given base.

      New in version 0.8: orm.with_polymorphic() is in addition to the existing Query method Query.with_polymorphic(), which has the same purpose but is not as flexible in its usage.

      Using this method will ensure that each descendant mapper’s tables are included in the FROM clause, and will allow filter() criterion to be used against those tables. The resulting instances will also have those columns already loaded so that no “post fetch” of those columns will be required.

      See the examples at Basic Control of Which Tables are Queried.

      Parameters:
      • base – Base class to be aliased.
      • classes – a single class or mapper, or list of class/mappers, which inherit from the base class. Alternatively, it may also be the string '*', in which case all descending mapped classes will be added to the FROM clause.
      • aliased – when True, the selectable will be wrapped in an alias, that is (SELECT * FROM <fromclauses>) AS anon_1. This can be important when using the with_polymorphic() to create the target of a JOIN on a backend that does not support parenthesized joins, such as SQLite and older versions of MySQL.
      • selectable – a table or select() statement that will be used in place of the generated FROM clause. This argument is required if any of the desired classes use concrete table inheritance, since SQLAlchemy currently cannot generate UNIONs among tables automatically. If used, the selectable argument must represent the full set of tables and columns mapped by every mapped class. Otherwise, the unaccounted mapped columns will result in their table being appended directly to the FROM clause which will usually lead to incorrect results.
      • polymorphic_on – a column to be used as the “discriminator” column for the given selectable. If not given, the polymorphic_on attribute of the base classes’ mapper will be used, if any. This is useful for mappings that don’t have polymorphic loading behavior by default.
      • innerjoin – if True, an INNER JOIN will be used. This should only be specified if querying for one specific subtype only

      Advanced Control of Which Tables are Queried

      The with_polymorphic functions work fine for simplistic scenarios. However, direct control of table rendering is called for, such as the case when one wants to render to only the subclass table and not the parent table.

      This use case can be achieved by using the mapped Table objects directly. For example, to query the name of employees with particular criterion:

      engineer = Engineer.__table__
      manager = Manager.__table__
      
      session.query(Employee.name).\
          outerjoin((engineer, engineer.c.employee_id==Employee.employee_id)).\
          outerjoin((manager, manager.c.employee_id==Employee.employee_id)).\
          filter(or_(Engineer.engineer_info=='w', Manager.manager_data=='q'))

      The base table, in this case the “employees” table, isn’t always necessary. A SQL query is always more efficient with fewer joins. Here, if we wanted to just load information specific to manager or engineer, we can instruct Query to use only those tables. The FROM clause is determined by what’s specified in the Session.query(), Query.filter(), or Query.select_from() methods:

      session.query(Manager.manager_data).select_from(manager)
      
      session.query(engineer.c.id).\
              filter(engineer.c.engineer_info==manager.c.manager_data)

      Creating Joins to Specific Subtypes

      The of_type() method is a helper which allows the construction of joins along relationship() paths while narrowing the criterion to specific subclasses. Suppose the employees table represents a collection of employees which are associated with a Company object. We’ll add a company_id column to the employees table and a new table companies:

      class Company(Base):
          __tablename__ = 'company'
          id = Column(Integer, primary_key=True)
          name = Column(String(50))
          employees = relationship("Employee",
                          backref='company',
                          cascade='all, delete-orphan')
      
      class Employee(Base):
          __tablename__ = 'employee'
          id = Column(Integer, primary_key=True)
          type = Column(String(20))
          company_id = Column(Integer, ForeignKey('company.id'))
          __mapper_args__ = {
              'polymorphic_on':type,
              'polymorphic_identity':'employee',
              'with_polymorphic':'*'
          }
      
      class Engineer(Employee):
          __tablename__ = 'engineer'
          id = Column(Integer, ForeignKey('employee.id'), primary_key=True)
          engineer_info = Column(String(50))
          __mapper_args__ = {'polymorphic_identity':'engineer'}
      
      class Manager(Employee):
          __tablename__ = 'manager'
          id = Column(Integer, ForeignKey('employee.id'), primary_key=True)
          manager_data = Column(String(50))
          __mapper_args__ = {'polymorphic_identity':'manager'}

      When querying from Company onto the Employee relationship, the join() method as well as the any() and has() operators will create a join from company to employee, without including engineer or manager in the mix. If we wish to have criterion which is specifically against the Engineer class, we can tell those methods to join or subquery against the joined table representing the subclass using the of_type() operator:

      session.query(Company).\
          join(Company.employees.of_type(Engineer)).\
          filter(Engineer.engineer_info=='someinfo')

      A longhand version of this would involve spelling out the full target selectable within a 2-tuple:

      employee = Employee.__table__
      engineer = Engineer.__table__
      
      session.query(Company).\
          join((employee.join(engineer), Company.employees)).\
          filter(Engineer.engineer_info=='someinfo')

      of_type() accepts a single class argument. More flexibility can be achieved either by joining to an explicit join as above, or by using the orm.with_polymorphic() function to create a polymorphic selectable:

      manager_and_engineer = with_polymorphic(
                                  Employee, [Manager, Engineer],
                                  aliased=True)
      
      session.query(Company).\
          join(manager_and_engineer, Company.employees).\
          filter(
              or_(manager_and_engineer.Engineer.engineer_info=='someinfo',
                  manager_and_engineer.Manager.manager_data=='somedata')
          )

      Above, we use the aliased=True argument with orm.with_polymorhpic() so that the right hand side of the join between Company and manager_and_engineer is converted into an aliased subquery. Some backends, such as SQLite and older versions of MySQL can’t handle a FROM clause of the following form:

      FROM x JOIN (y JOIN z ON <onclause>) ON <onclause>

      Using aliased=True instead renders it more like:

      FROM x JOIN (SELECT * FROM y JOIN z ON <onclause>) AS anon_1 ON <onclause>

      The above join can also be expressed more succinctly by combining of_type() with the polymorphic construct:

      manager_and_engineer = with_polymorphic(
                                  Employee, [Manager, Engineer],
                                  aliased=True)
      
      session.query(Company).\
          join(Company.employees.of_type(manager_and_engineer)).\
          filter(
              or_(manager_and_engineer.Engineer.engineer_info=='someinfo',
                  manager_and_engineer.Manager.manager_data=='somedata')
          )

      The any() and has() operators also can be used with of_type() when the embedded criterion is in terms of a subclass:

      session.query(Company).\
              filter(
                  Company.employees.of_type(Engineer).
                      any(Engineer.engineer_info=='someinfo')
                  ).all()

      Note that the any() and has() are both shorthand for a correlated EXISTS query. To build one by hand looks like:

      session.query(Company).filter(
          exists([1],
              and_(Engineer.engineer_info=='someinfo',
                  employees.c.company_id==companies.c.company_id),
              from_obj=employees.join(engineers)
          )
      ).all()

      The EXISTS subquery above selects from the join of employees to engineers, and also specifies criterion which correlates the EXISTS subselect back to the parent companies table.

      New in version 0.8: of_type() accepts orm.aliased() and orm.with_polymorphic() constructs in conjunction with Query.join(), any() and has().

      Eager Loading of Specific Subtypes

      The joinedload() and subqueryload() options also support paths which make use of of_type(). Below we load Company rows while eagerly loading related Engineer objects, querying the employee and engineer tables simultaneously:

      session.query(Company).\
          options(subqueryload_all(Company.employees.of_type(Engineer),
                          Engineer.machines))

      New in version 0.8: joinedload() and subqueryload() support paths that are qualified with of_type().

      Single Table Inheritance

      Single table inheritance is where the attributes of the base class as well as all subclasses are represented within a single table. A column is present in the table for every attribute mapped to the base class and all subclasses; the columns which correspond to a single subclass are nullable. This configuration looks much like joined-table inheritance except there’s only one table. In this case, a type column is required, as there would be no other way to discriminate between classes. The table is specified in the base mapper only; for the inheriting classes, leave their table parameter blank:

      class Employee(Base):
          __tablename__ = 'employee'
          id = Column(Integer, primary_key=True)
          name = Column(String(50))
          manager_data = Column(String(50))
          engineer_info = Column(String(50))
          type = Column(String(20))
      
          __mapper_args__ = {
              'polymorphic_on':type,
              'polymorphic_identity':'employee'
          }
      
      class Manager(Employee):
          __mapper_args__ = {
              'polymorphic_identity':'manager'
          }
      
      class Engineer(Employee):
          __mapper_args__ = {
              'polymorphic_identity':'engineer'
          }

      Note that the mappers for the derived classes Manager and Engineer omit the __tablename__, indicating they do not have a mapped table of their own.

      Concrete Table Inheritance

      Note

      this section is currently using classical mappings. The Declarative system fully supports concrete inheritance however. See the links below for more information on using declarative with concrete table inheritance.

      This form of inheritance maps each class to a distinct table, as below:

      employees_table = Table('employees', metadata,
          Column('employee_id', Integer, primary_key=True),
          Column('name', String(50)),
      )
      
      managers_table = Table('managers', metadata,
          Column('employee_id', Integer, primary_key=True),
          Column('name', String(50)),
          Column('manager_data', String(50)),
      )
      
      engineers_table = Table('engineers', metadata,
          Column('employee_id', Integer, primary_key=True),
          Column('name', String(50)),
          Column('engineer_info', String(50)),
      )

      Notice in this case there is no type column. If polymorphic loading is not required, there’s no advantage to using inherits here; you just define a separate mapper for each class.

      mapper(Employee, employees_table)
      mapper(Manager, managers_table)
      mapper(Engineer, engineers_table)

      To load polymorphically, the with_polymorphic argument is required, along with a selectable indicating how rows should be loaded. In this case we must construct a UNION of all three tables. SQLAlchemy includes a helper function to create these called polymorphic_union(), which will map all the different columns into a structure of selects with the same numbers and names of columns, and also generate a virtual type column for each subselect:

      pjoin = polymorphic_union({
          'employee': employees_table,
          'manager': managers_table,
          'engineer': engineers_table
      }, 'type', 'pjoin')
      
      employee_mapper = mapper(Employee, employees_table,
                                          with_polymorphic=('*', pjoin),
                                          polymorphic_on=pjoin.c.type,
                                          polymorphic_identity='employee')
      manager_mapper = mapper(Manager, managers_table,
                                          inherits=employee_mapper,
                                          concrete=True,
                                          polymorphic_identity='manager')
      engineer_mapper = mapper(Engineer, engineers_table,
                                          inherits=employee_mapper,
                                          concrete=True,
                                          polymorphic_identity='engineer')

      Upon select, the polymorphic union produces a query like this:

      session.query(Employee).all()
      
      SELECT pjoin.type AS pjoin_type, pjoin.manager_data AS pjoin_manager_data, pjoin.employee_id AS pjoin_employee_id, pjoin.name AS pjoin_name, pjoin.engineer_info AS pjoin_engineer_info FROM ( SELECT employees.employee_id AS employee_id, CAST(NULL AS VARCHAR(50)) AS manager_data, employees.name AS name, CAST(NULL AS VARCHAR(50)) AS engineer_info, 'employee' AS type FROM employees UNION ALL SELECT managers.employee_id AS employee_id, managers.manager_data AS manager_data, managers.name AS name, CAST(NULL AS VARCHAR(50)) AS engineer_info, 'manager' AS type FROM managers UNION ALL SELECT engineers.employee_id AS employee_id, CAST(NULL AS VARCHAR(50)) AS manager_data, engineers.name AS name, engineers.engineer_info AS engineer_info, 'engineer' AS type FROM engineers ) AS pjoin []

      Concrete Inheritance with Declarative

      New in version 0.7.3: The Declarative module includes helpers for concrete inheritance. See Using the Concrete Helpers for more information.

      Using Relationships with Inheritance

      Both joined-table and single table inheritance scenarios produce mappings which are usable in relationship() functions; that is, it’s possible to map a parent object to a child object which is polymorphic. Similarly, inheriting mappers can have relationship() objects of their own at any level, which are inherited to each child class. The only requirement for relationships is that there is a table relationship between parent and child. An example is the following modification to the joined table inheritance example, which sets a bi-directional relationship between Employee and Company:

      employees_table = Table('employees', metadata,
          Column('employee_id', Integer, primary_key=True),
          Column('name', String(50)),
          Column('company_id', Integer, ForeignKey('companies.company_id'))
      )
      
      companies = Table('companies', metadata,
         Column('company_id', Integer, primary_key=True),
         Column('name', String(50)))
      
      class Company(object):
          pass
      
      mapper(Company, companies, properties={
         'employees': relationship(Employee, backref='company')
      })

      Relationships with Concrete Inheritance

      In a concrete inheritance scenario, mapping relationships is more challenging since the distinct classes do not share a table. In this case, you can establish a relationship from parent to child if a join condition can be constructed from parent to child, if each child table contains a foreign key to the parent:

      companies = Table('companies', metadata,
         Column('id', Integer, primary_key=True),
         Column('name', String(50)))
      
      employees_table = Table('employees', metadata,
          Column('employee_id', Integer, primary_key=True),
          Column('name', String(50)),
          Column('company_id', Integer, ForeignKey('companies.id'))
      )
      
      managers_table = Table('managers', metadata,
          Column('employee_id', Integer, primary_key=True),
          Column('name', String(50)),
          Column('manager_data', String(50)),
          Column('company_id', Integer, ForeignKey('companies.id'))
      )
      
      engineers_table = Table('engineers', metadata,
          Column('employee_id', Integer, primary_key=True),
          Column('name', String(50)),
          Column('engineer_info', String(50)),
          Column('company_id', Integer, ForeignKey('companies.id'))
      )
      
      mapper(Employee, employees_table,
                      with_polymorphic=('*', pjoin),
                      polymorphic_on=pjoin.c.type,
                      polymorphic_identity='employee')
      
      mapper(Manager, managers_table,
                      inherits=employee_mapper,
                      concrete=True,
                      polymorphic_identity='manager')
      
      mapper(Engineer, engineers_table,
                      inherits=employee_mapper,
                      concrete=True,
                      polymorphic_identity='engineer')
      
      mapper(Company, companies, properties={
          'employees': relationship(Employee)
      })

      The big limitation with concrete table inheritance is that relationship() objects placed on each concrete mapper do not propagate to child mappers. If you want to have the same relationship() objects set up on all concrete mappers, they must be configured manually on each. To configure back references in such a configuration the back_populates keyword may be used instead of backref, such as below where both A(object) and B(A) bidirectionally reference C:

      ajoin = polymorphic_union({
              'a':a_table,
              'b':b_table
          }, 'type', 'ajoin')
      
      mapper(A, a_table, with_polymorphic=('*', ajoin),
          polymorphic_on=ajoin.c.type, polymorphic_identity='a',
          properties={
              'some_c':relationship(C, back_populates='many_a')
      })
      mapper(B, b_table,inherits=A, concrete=True,
          polymorphic_identity='b',
          properties={
              'some_c':relationship(C, back_populates='many_a')
      })
      mapper(C, c_table, properties={
          'many_a':relationship(A, collection_class=set,
                                      back_populates='some_c'),
      })

      Using Inheritance with Declarative

      Declarative makes inheritance configuration more intuitive. See the docs at Inheritance Configuration.

      SQLAlchemy-0.8.4/doc/orm/internals.html0000644000076500000240000033270112251147500020463 0ustar classicstaff00000000000000 ORM Internals — SQLAlchemy 0.8 Documentation

      SQLAlchemy 0.8 Documentation

      Release: 0.8.4 | Release Date: December 8, 2013

      Table of Contents

      Previous Topic

      ORM Exceptions

      Next Topic

      SQLAlchemy Core

      Quick Search

      ORM Internals

      Key ORM constructs, not otherwise covered in other sections, are listed here.

      class sqlalchemy.orm.state.AttributeState(state, key)

      Provide an inspection interface corresponding to a particular attribute on a particular mapped object.

      The AttributeState object is accessed via the InstanceState.attrs collection of a particular InstanceState:

      from sqlalchemy import inspect
      
      insp = inspect(some_mapped_object)
      attr_state = insp.attrs.some_attribute
      Inherited-members :
       
      history

      Return the current pre-flush change history for this attribute, via the History interface.

      loaded_value

      The current value of this attribute as loaded from the database.

      If the value has not been loaded, or is otherwise not present in the object’s dictionary, returns NO_VALUE.

      value

      Return the value of this attribute.

      This operation is equivalent to accessing the object’s attribute directly or via getattr(), and will fire off any pending loader callables if needed.

      class sqlalchemy.orm.instrumentation.ClassManager(class_)

      Bases: __builtin__.dict

      tracks state information at the class level.

      Inherited-members :
       
      dispose()

      Dissasociate this manager from its class.

      has_parent(state, key, optimistic=False)

      TODO

      manage()

      Mark this instance as the manager for its class.

      original_init

      x.__init__(...) initializes x; see help(type(x)) for signature

      classmethod state_getter()

      Return a (instance) -> InstanceState callable.

      “state getter” callables should raise either KeyError or AttributeError if no InstanceState could be found for the instance.

      unregister()

      remove all instrumentation established by this ClassManager.

      class sqlalchemy.orm.properties.ColumnProperty(*columns, **kwargs)

      Bases: sqlalchemy.orm.interfaces.StrategizedProperty

      Describes an object attribute that corresponds to a table column.

      Public constructor is the orm.column_property() function.

      Inherited-members :
       
      ColumnComparator

      alias of Comparator

      class Comparator(prop, parentmapper, adapter=None)

      Bases: sqlalchemy.orm.interfaces.PropComparator

      Produce boolean, comparison, and other operators for ColumnProperty attributes.

      See the documentation for PropComparator for a brief overview.

      See also:

      PropComparator

      ColumnOperators

      Redefining and Creating New Operators

      TypeEngine.comparator_factory

      ColumnProperty.__init__(*columns, **kwargs)

      Construct a ColumnProperty.

      Note the public constructor is the orm.column_property() function.

      Parameters:
      • *columns – The list of columns describes a single object property. If there are multiple tables joined together for the mapper, this list represents the equivalent column as it appears across each table.
      • group
      • deferred
      • comparator_factory
      • descriptor
      • expire_on_flush
      • extension
      • info – Optional data dictionary which will be populated into the info attribute of this object.
      ColumnProperty.expression

      Return the primary column or expression for this ColumnProperty.

      class sqlalchemy.orm.descriptor_props.CompositeProperty(class_, *attrs, **kwargs)

      Bases: sqlalchemy.orm.descriptor_props.DescriptorProperty

      Defines a “composite” mapped attribute, representing a collection of columns as one attribute.

      CompositeProperty is constructed using the composite() function.

      See also:

      Composite Column Types

      class Comparator(prop, parentmapper, adapter=None)

      Bases: sqlalchemy.orm.interfaces.PropComparator

      Produce boolean, comparison, and other operators for CompositeProperty attributes.

      See the example in Redefining Comparison Operations for Composites for an overview of usage , as well as the documentation for PropComparator.

      See also:

      PropComparator

      ColumnOperators

      Redefining and Creating New Operators

      TypeEngine.comparator_factory

      CompositeProperty.do_init()

      Initialization which occurs after the CompositeProperty has been associated with its parent mapper.

      CompositeProperty.get_history(state, dict_, passive=<symbol 'PASSIVE_OFF>)

      Provided for userland code that uses attributes.get_history().

      class sqlalchemy.orm.interfaces._InspectionAttr

      A base class applied to all ORM objects that can be returned by the inspect() function.

      The attributes defined here allow the usage of simple boolean checks to test basic facts about the object returned.

      While the boolean checks here are basically the same as using the Python isinstance() function, the flags here can be used without the need to import all of these classes, and also such that the SQLAlchemy class system can change while leaving the flags here intact for forwards-compatibility.

      extension_type = <symbol 'NOT_EXTENSION>

      The extension type, if any. Defaults to interfaces.NOT_EXTENSION

      New in version 0.8.0.

      is_aliased_class = False

      True if this object is an instance of AliasedClass.

      is_attribute = False

      True if this object is a Python descriptor.

      This can refer to one of many types. Usually a QueryableAttribute which handles attributes events on behalf of a MapperProperty. But can also be an extension type such as AssociationProxy or hybrid_property. The _InspectionAttr.extension_type will refer to a constant identifying the specific subtype.

      is_clause_element = False

      True if this object is an instance of ClauseElement.

      is_instance = False

      True if this object is an instance of InstanceState.

      is_mapper = False

      True if this object is an instance of Mapper.

      is_property = False

      True if this object is an instance of MapperProperty.

      is_selectable = False

      Return True if this object is an instance of Selectable.

      class sqlalchemy.orm.state.InstanceState(obj, manager)

      Bases: sqlalchemy.orm.interfaces._InspectionAttr

      tracks state information at the instance level.

      __call__(state, passive)

      __call__ allows the InstanceState to act as a deferred callable for loading expired attributes, which is also serializable (picklable).

      attrs

      Return a namespace representing each attribute on the mapped object, including its current value and history.

      The returned object is an instance of AttributeState.

      detached

      Return true if the object is detached.

      expired_attributes

      Return the set of keys which are ‘expired’ to be loaded by the manager’s deferred scalar loader, assuming no pending changes.

      see also the unmodified collection which is intersected against this set when a refresh operation occurs.

      has_identity

      Return True if this object has an identity key.

      This should always have the same value as the expression state.persistent or state.detached.

      identity

      Return the mapped identity of the mapped object. This is the primary key identity as persisted by the ORM which can always be passed directly to Query.get().

      Returns None if the object has no primary key identity.

      Note

      An object which is transient or pending does not have a mapped identity until it is flushed, even if its attributes include primary key values.

      identity_key

      Return the identity key for the mapped object.

      This is the key used to locate the object within the Session.identity_map mapping. It contains the identity as returned by identity within it.

      mapper

      Return the Mapper used for this mapepd object.

      object

      Return the mapped object represented by this InstanceState.

      pending

      Return true if the object is pending.

      persistent

      Return true if the object is persistent.

      session

      Return the owning Session for this instance, or None if none available.

      transient

      Return true if the object is transient.

      unloaded

      Return the set of keys which do not have a loaded value.

      This includes expired attributes and any other attribute that was never populated or modified.

      unmodified

      Return the set of keys which have no uncommitted changes

      unmodified_intersection(keys)

      Return self.unmodified.intersection(keys).

      class sqlalchemy.orm.attributes.InstrumentedAttribute(class_, key, impl=None, comparator=None, parententity=None, of_type=None)

      Bases: sqlalchemy.orm.attributes.QueryableAttribute

      Class bound instrumented attribute which adds basic descriptor methods.

      See QueryableAttribute for a description of most features.

      Undoc-members :
      class sqlalchemy.orm.interfaces.MapperProperty

      Bases: sqlalchemy.orm.interfaces._MappedAttribute, sqlalchemy.orm.interfaces._InspectionAttr

      Manage the relationship of a Mapper to a single class attribute, as well as that attribute as it appears on individual instances of the class, including attribute instrumentation, attribute access, loading behavior, and dependency calculations.

      The most common occurrences of MapperProperty are the mapped Column, which is represented in a mapping as an instance of ColumnProperty, and a reference to another class produced by relationship(), represented in the mapping as an instance of RelationshipProperty.

      cascade = frozenset([])

      The set of ‘cascade’ attribute names.

      This collection is checked before the ‘cascade_iterator’ method is called.

      cascade_iterator(type_, state, visited_instances=None, halt_on=None)

      Iterate through instances related to the given instance for a particular ‘cascade’, starting with this MapperProperty.

      Return an iterator3-tuples (instance, mapper, state).

      Note that the ‘cascade’ collection on this MapperProperty is checked first for the given type before cascade_iterator is called.

      See PropertyLoader for the related instance implementation.

      class_attribute

      Return the class-bound descriptor corresponding to this MapperProperty.

      This is basically a getattr() call:

      return getattr(self.parent.class_, self.key)

      I.e. if this MapperProperty were named addresses, and the class to which it is mapped is User, this sequence is possible:

      >>> from sqlalchemy import inspect
      >>> mapper = inspect(User)
      >>> addresses_property = mapper.attrs.addresses
      >>> addresses_property.class_attribute is User.addresses
      True
      >>> User.addresses.property is addresses_property
      True
      compare(operator, value, **kw)

      Return a compare operation for the columns represented by this MapperProperty to the given value, which may be a column value or an instance. ‘operator’ is an operator from the operators module, or from sql.Comparator.

      By default uses the PropComparator attached to this MapperProperty under the attribute name “comparator”.

      create_row_processor(context, path, mapper, row, adapter)

      Return a 3-tuple consisting of three row processing functions.

      do_init()

      Perform subclass-specific initialization post-mapper-creation steps.

      This is a template method called by the MapperProperty object’s init() method.

      info

      Info dictionary associated with the object, allowing user-defined data to be associated with this MapperProperty.

      The dictionary is generated when first accessed. Alternatively, it can be specified as a constructor argument to the column_property(), relationship(), or composite() functions.

      New in version 0.8: Added support for .info to all MapperProperty subclasses.

      init()

      Called after all mappers are created to assemble relationships between mappers and perform other post-mapper-creation initialization steps.

      is_primary()

      Return True if this MapperProperty‘s mapper is the primary mapper for its class.

      This flag is used to indicate that the MapperProperty can define attribute instrumentation for the class at the class level (as opposed to the individual instance level).

      merge(session, source_state, source_dict, dest_state, dest_dict, load, _recursive)

      Merge the attribute represented by this MapperProperty from source to destination object

      post_instrument_class(mapper)

      Perform instrumentation adjustments that need to occur after init() has completed.

      setup(context, entity, path, adapter, **kwargs)

      Called by Query for the purposes of constructing a SQL statement.

      Each MapperProperty associated with the target mapper processes the statement referenced by the query context, adding columns and/or criterion as appropriate.

      sqlalchemy.orm.interfaces.NOT_EXTENSION = <symbol 'NOT_EXTENSION>
      class sqlalchemy.orm.interfaces.PropComparator(prop, parentmapper, adapter=None)

      Bases: sqlalchemy.sql.operators.ColumnOperators

      Defines boolean, comparison, and other operators for MapperProperty objects.

      SQLAlchemy allows for operators to be redefined at both the Core and ORM level. PropComparator is the base class of operator redefinition for ORM-level operations, including those of ColumnProperty, RelationshipProperty, and CompositeProperty.

      Note

      With the advent of Hybrid properties introduced in SQLAlchemy 0.7, as well as Core-level operator redefinition in SQLAlchemy 0.8, the use case for user-defined PropComparator instances is extremely rare. See Hybrid Attributes as well as Redefining and Creating New Operators.

      User-defined subclasses of PropComparator may be created. The built-in Python comparison and math operator methods, such as operators.ColumnOperators.__eq__(), operators.ColumnOperators.__lt__(), and operators.ColumnOperators.__add__(), can be overridden to provide new operator behavior. The custom PropComparator is passed to the MapperProperty instance via the comparator_factory argument. In each case, the appropriate subclass of PropComparator should be used:

      # definition of custom PropComparator subclasses
      
      from sqlalchemy.orm.properties import \
                              ColumnProperty,\
                              CompositeProperty,\
                              RelationshipProperty
      
      class MyColumnComparator(ColumnProperty.Comparator):
          def __eq__(self, other):
              return self.__clause_element__() == other
      
      class MyRelationshipComparator(RelationshipProperty.Comparator):
          def any(self, expression):
              "define the 'any' operation"
              # ...
      
      class MyCompositeComparator(CompositeProperty.Comparator):
          def __gt__(self, other):
              "redefine the 'greater than' operation"
      
              return sql.and_(*[a>b for a, b in
                                zip(self.__clause_element__().clauses,
                                    other.__composite_values__())])
      
      
      # application of custom PropComparator subclasses
      
      from sqlalchemy.orm import column_property, relationship, composite
      from sqlalchemy import Column, String
      
      class SomeMappedClass(Base):
          some_column = column_property(Column("some_column", String),
                              comparator_factory=MyColumnComparator)
      
          some_relationship = relationship(SomeOtherClass,
                              comparator_factory=MyRelationshipComparator)
      
          some_composite = composite(
                  Column("a", String), Column("b", String),
                  comparator_factory=MyCompositeComparator
              )

      Note that for column-level operator redefinition, it’s usually simpler to define the operators at the Core level, using the TypeEngine.comparator_factory attribute. See Redefining and Creating New Operators for more detail.

      See also:

      ColumnProperty.Comparator

      RelationshipProperty.Comparator

      CompositeProperty.Comparator

      ColumnOperators

      Redefining and Creating New Operators

      TypeEngine.comparator_factory

      Inherited-members :
       
      adapted(adapter)

      Return a copy of this PropComparator which will use the given adaption function on the local side of generated expressions.

      any(criterion=None, **kwargs)

      Return true if this collection contains any member that meets the given criterion.

      The usual implementation of any() is RelationshipProperty.Comparator.any().

      Parameters:
      • criterion – an optional ClauseElement formulated against the member class’ table or attributes.
      • **kwargs – key/value pairs corresponding to member class attribute names which will be compared via equality to the corresponding values.
      has(criterion=None, **kwargs)

      Return true if this element references a member which meets the given criterion.

      The usual implementation of has() is RelationshipProperty.Comparator.has().

      Parameters:
      • criterion – an optional ClauseElement formulated against the member class’ table or attributes.
      • **kwargs – key/value pairs corresponding to member class attribute names which will be compared via equality to the corresponding values.
      of_type(class_)

      Redefine this object in terms of a polymorphic subclass.

      Returns a new PropComparator from which further criterion can be evaluated.

      e.g.:

      query.join(Company.employees.of_type(Engineer)).\
         filter(Engineer.name=='foo')
      Parameters:class_ – a class or mapper indicating that criterion will be against this specific subclass.
      class sqlalchemy.orm.properties.RelationshipProperty(argument, secondary=None, primaryjoin=None, secondaryjoin=None, foreign_keys=None, uselist=None, order_by=False, backref=None, back_populates=None, post_update=False, cascade=False, extension=None, viewonly=False, lazy=True, collection_class=None, passive_deletes=False, passive_updates=True, remote_side=None, enable_typechecks=True, join_depth=None, comparator_factory=None, single_parent=False, innerjoin=False, distinct_target_key=False, doc=None, active_history=False, cascade_backrefs=True, load_on_pending=False, strategy_class=None, _local_remote_pairs=None, query_class=None, info=None)

      Bases: sqlalchemy.orm.interfaces.StrategizedProperty

      Describes an object property that holds a single item or list of items that correspond to a related database table.

      Public constructor is the orm.relationship() function.

      See also:

      Relationship Configuration

      Inherited-members :
       
      class Comparator(prop, parentmapper, of_type=None, adapter=None)

      Bases: sqlalchemy.orm.interfaces.PropComparator

      Produce boolean, comparison, and other operators for RelationshipProperty attributes.

      See the documentation for PropComparator for a brief overview of ORM level operator definition.

      See also:

      PropComparator

      ColumnProperty.Comparator

      ColumnOperators

      Redefining and Creating New Operators

      TypeEngine.comparator_factory

      __eq__(other)

      Implement the == operator.

      In a many-to-one context, such as:

      MyClass.some_prop == <some object>

      this will typically produce a clause such as:

      mytable.related_id == <some id>

      Where <some id> is the primary key of the given object.

      The == operator provides partial functionality for non- many-to-one comparisons:

      • Comparisons against collections are not supported. Use contains().
      • Compared to a scalar one-to-many, will produce a clause that compares the target columns in the parent to the given target.
      • Compared to a scalar many-to-many, an alias of the association table will be rendered as well, forming a natural join that is part of the main body of the query. This will not work for queries that go beyond simple AND conjunctions of comparisons, such as those which use OR. Use explicit joins, outerjoins, or has() for more comprehensive non-many-to-one scalar membership tests.
      • Comparisons against None given in a one-to-many or many-to-many context produce a NOT EXISTS clause.
      __init__(prop, parentmapper, of_type=None, adapter=None)

      Construction of RelationshipProperty.Comparator is internal to the ORM’s attribute mechanics.

      __ne__(other)

      Implement the != operator.

      In a many-to-one context, such as:

      MyClass.some_prop != <some object>

      This will typically produce a clause such as:

      mytable.related_id != <some id>

      Where <some id> is the primary key of the given object.

      The != operator provides partial functionality for non- many-to-one comparisons:

      • Comparisons against collections are not supported. Use contains() in conjunction with not_().
      • Compared to a scalar one-to-many, will produce a clause that compares the target columns in the parent to the given target.
      • Compared to a scalar many-to-many, an alias of the association table will be rendered as well, forming a natural join that is part of the main body of the query. This will not work for queries that go beyond simple AND conjunctions of comparisons, such as those which use OR. Use explicit joins, outerjoins, or has() in conjunction with not_() for more comprehensive non-many-to-one scalar membership tests.
      • Comparisons against None given in a one-to-many or many-to-many context produce an EXISTS clause.
      adapted(adapter)

      Return a copy of this PropComparator which will use the given adaption function on the local side of generated expressions.

      any(criterion=None, **kwargs)

      Produce an expression that tests a collection against particular criterion, using EXISTS.

      An expression like:

      session.query(MyClass).filter(
          MyClass.somereference.any(SomeRelated.x==2)
      )

      Will produce a query like:

      SELECT * FROM my_table WHERE
      EXISTS (SELECT 1 FROM related WHERE related.my_id=my_table.id
      AND related.x=2)

      Because any() uses a correlated subquery, its performance is not nearly as good when compared against large target tables as that of using a join.

      any() is particularly useful for testing for empty collections:

      session.query(MyClass).filter(
          ~MyClass.somereference.any()
      )

      will produce:

      SELECT * FROM my_table WHERE
      NOT EXISTS (SELECT 1 FROM related WHERE
      related.my_id=my_table.id)

      any() is only valid for collections, i.e. a relationship() that has uselist=True. For scalar references, use has().

      contains(other, **kwargs)

      Return a simple expression that tests a collection for containment of a particular item.

      contains() is only valid for a collection, i.e. a relationship() that implements one-to-many or many-to-many with uselist=True.

      When used in a simple one-to-many context, an expression like:

      MyClass.contains(other)

      Produces a clause like:

      mytable.id == <some id>

      Where <some id> is the value of the foreign key attribute on other which refers to the primary key of its parent object. From this it follows that contains() is very useful when used with simple one-to-many operations.

      For many-to-many operations, the behavior of contains() has more caveats. The association table will be rendered in the statement, producing an “implicit” join, that is, includes multiple tables in the FROM clause which are equated in the WHERE clause:

      query(MyClass).filter(MyClass.contains(other))

      Produces a query like:

      SELECT * FROM my_table, my_association_table AS
      my_association_table_1 WHERE
      my_table.id = my_association_table_1.parent_id
      AND my_association_table_1.child_id = <some id>

      Where <some id> would be the primary key of other. From the above, it is clear that contains() will not work with many-to-many collections when used in queries that move beyond simple AND conjunctions, such as multiple contains() expressions joined by OR. In such cases subqueries or explicit “outer joins” will need to be used instead. See any() for a less-performant alternative using EXISTS, or refer to Query.outerjoin() as well as Querying with Joins for more details on constructing outer joins.

      has(criterion=None, **kwargs)

      Produce an expression that tests a scalar reference against particular criterion, using EXISTS.

      An expression like:

      session.query(MyClass).filter(
          MyClass.somereference.has(SomeRelated.x==2)
      )

      Will produce a query like:

      SELECT * FROM my_table WHERE
      EXISTS (SELECT 1 FROM related WHERE
      related.id==my_table.related_id AND related.x=2)

      Because has() uses a correlated subquery, its performance is not nearly as good when compared against large target tables as that of using a join.

      has() is only valid for scalar references, i.e. a relationship() that has uselist=False. For collection references, use any().

      in_(other)

      Produce an IN clause - this is not implemented for relationship()-based attributes at this time.

      mapper

      The target Mapper referred to by this :class:`.RelationshipProperty.Comparator.

      This is the “target” or “remote” side of the relationship().

      of_type(cls)

      Produce a construct that represents a particular ‘subtype’ of attribute for the parent class.

      Currently this is usable in conjunction with Query.join() and Query.outerjoin().

      RelationshipProperty.cascade

      Return the current cascade setting for this RelationshipProperty.

      RelationshipProperty.mapper

      Return the targeted Mapper for this RelationshipProperty.

      This is a lazy-initializing static attribute.

      RelationshipProperty.table

      Return the selectable linked to this

      Deprecated since version 0.7: Use .target

      RelationshipProperty object’s target Mapper.

      class sqlalchemy.orm.descriptor_props.SynonymProperty(name, map_column=None, descriptor=None, comparator_factory=None, doc=None)

      Bases: sqlalchemy.orm.descriptor_props.DescriptorProperty

      Inherited-members :
       
      class sqlalchemy.orm.query.QueryContext(query)
      class sqlalchemy.orm.attributes.QueryableAttribute(class_, key, impl=None, comparator=None, parententity=None, of_type=None)

      Bases: sqlalchemy.orm.interfaces._MappedAttribute, sqlalchemy.orm.interfaces._InspectionAttr, sqlalchemy.orm.interfaces.PropComparator

      Base class for descriptor objects that intercept attribute events on behalf of a MapperProperty object. The actual MapperProperty is accessible via the QueryableAttribute.property attribute.

      Inherited-members :
       
      info

      Return the ‘info’ dictionary for the underlying SQL element.

      The behavior here is as follows:

      New in version 0.8.0.

      parent

      Return an inspection instance representing the parent.

      This will be either an instance of Mapper or AliasedInsp, depending upon the nature of the parent entity which this attribute is associated with.

      property

      Return the MapperProperty associated with this QueryableAttribute.

      Return values here will commonly be instances of ColumnProperty or RelationshipProperty.

      SQLAlchemy-0.8.4/doc/orm/loading.html0000644000076500000240000026054512251147500020107 0ustar classicstaff00000000000000 Relationship Loading Techniques — SQLAlchemy 0.8 Documentation

      SQLAlchemy 0.8 Documentation

      Release: 0.8.4 | Release Date: December 8, 2013
      SQLAlchemy 0.8 Documentation » SQLAlchemy ORM » Relationship Loading Techniques

      Relationship Loading Techniques

      Relationship Loading Techniques

      A big part of SQLAlchemy is providing a wide range of control over how related objects get loaded when querying. This behavior can be configured at mapper construction time using the lazy parameter to the relationship() function, as well as by using options with the Query object.

      Using Loader Strategies: Lazy Loading, Eager Loading

      By default, all inter-object relationships are lazy loading. The scalar or collection attribute associated with a relationship() contains a trigger which fires the first time the attribute is accessed. This trigger, in all but one case, issues a SQL call at the point of access in order to load the related object or objects:

      sql>>> jack.addresses
      [<Address(u'jack@google.com')>, <Address(u'j25@yahoo.com')>]

      The one case where SQL is not emitted is for a simple many-to-one relationship, when the related object can be identified by its primary key alone and that object is already present in the current Session.

      This default behavior of “load upon attribute access” is known as “lazy” or “select” loading - the name “select” because a “SELECT” statement is typically emitted when the attribute is first accessed.

      In the Object Relational Tutorial, we introduced the concept of Eager Loading. We used an option in conjunction with the Query object in order to indicate that a relationship should be loaded at the same time as the parent, within a single SQL query. This option, known as joinedload(), connects a JOIN (by default a LEFT OUTER join) to the statement and populates the scalar/collection from the same result set as that of the parent:

      sql>>> jack = session.query(User).\
      ... options(joinedload('addresses')).\
      ... filter_by(name='jack').all() 
      

      In addition to “joined eager loading”, a second option for eager loading exists, called “subquery eager loading”. This kind of eager loading emits an additional SQL statement for each collection requested, aggregated across all parent objects:

      sql>>> jack = session.query(User).\
      ... options(subqueryload('addresses')).\
      ... filter_by(name='jack').all()
      

      The default loader strategy for any relationship() is configured by the lazy keyword argument, which defaults to select - this indicates a “select” statement . Below we set it as joined so that the children relationship is eager loading, using a join:

      # load the 'children' collection using LEFT OUTER JOIN
      mapper(Parent, parent_table, properties={
          'children': relationship(Child, lazy='joined')
      })

      We can also set it to eagerly load using a second query for all collections, using subquery:

      # load the 'children' attribute using a join to a subquery
      mapper(Parent, parent_table, properties={
          'children': relationship(Child, lazy='subquery')
      })

      When querying, all three choices of loader strategy are available on a per-query basis, using the joinedload(), subqueryload() and lazyload() query options:

      # set children to load lazily
      session.query(Parent).options(lazyload('children')).all()
      
      # set children to load eagerly with a join
      session.query(Parent).options(joinedload('children')).all()
      
      # set children to load eagerly with a second statement
      session.query(Parent).options(subqueryload('children')).all()

      To reference a relationship that is deeper than one level, separate the names by periods:

      session.query(Parent).options(joinedload('foo.bar.bat')).all()

      When using dot-separated names with joinedload() or subqueryload(), the option applies only to the actual attribute named, and not its ancestors. For example, suppose a mapping from A to B to C, where the relationships, named atob and btoc, are both lazy-loading. A statement like the following:

      session.query(A).options(joinedload('atob.btoc')).all()

      will load only A objects to start. When the atob attribute on each A is accessed, the returned B objects will eagerly load their C objects.

      Therefore, to modify the eager load to load both atob as well as btoc, place joinedloads for both:

      session.query(A).options(joinedload('atob'), joinedload('atob.btoc')).all()

      or more succinctly just use joinedload_all() or subqueryload_all():

      session.query(A).options(joinedload_all('atob.btoc')).all()

      There are two other loader strategies available, dynamic loading and no loading; these are described in Working with Large Collections.

      Default Loading Strategies

      New in version 0.7.5: Default loader strategies as a new feature.

      Each of joinedload(), subqueryload(), lazyload(), and noload() can be used to set the default style of relationship() loading for a particular query, affecting all relationship() -mapped attributes not otherwise specified in the Query. This feature is available by passing the string '*' as the argument to any of these options:

      session.query(MyClass).options(lazyload('*'))

      Above, the lazyload('*') option will supercede the lazy setting of all relationship() constructs in use for that query, except for those which use the 'dynamic' style of loading. If some relationships specify lazy='joined' or lazy='subquery', for example, using lazyload('*') will unilaterally cause all those relationships to use 'select' loading, e.g. emit a SELECT statement when each attribute is accessed.

      The option does not supercede loader options stated in the query, such as eagerload(), subqueryload(), etc. The query below will still use joined loading for the widget relationship:

      session.query(MyClass).options(
                                  lazyload('*'),
                                  joinedload(MyClass.widget)
                              )

      If multiple '*' options are passed, the last one overrides those previously passed.

      The Zen of Eager Loading

      The philosophy behind loader strategies is that any set of loading schemes can be applied to a particular query, and the results don’t change - only the number of SQL statements required to fully load related objects and collections changes. A particular query might start out using all lazy loads. After using it in context, it might be revealed that particular attributes or collections are always accessed, and that it would be more efficient to change the loader strategy for these. The strategy can be changed with no other modifications to the query, the results will remain identical, but fewer SQL statements would be emitted. In theory (and pretty much in practice), nothing you can do to the Query would make it load a different set of primary or related objects based on a change in loader strategy.

      How joinedload() in particular achieves this result of not impacting entity rows returned in any way is that it creates an anonymous alias of the joins it adds to your query, so that they can’t be referenced by other parts of the query. For example, the query below uses joinedload() to create a LEFT OUTER JOIN from users to addresses, however the ORDER BY added against Address.email_address is not valid - the Address entity is not named in the query:

      >>> jack = session.query(User).\
      ... options(joinedload(User.addresses)).\
      ... filter(User.name=='jack').\
      ... order_by(Address.email_address).all()
      
      SELECT addresses_1.id AS addresses_1_id, addresses_1.email_address AS addresses_1_email_address, addresses_1.user_id AS addresses_1_user_id, users.id AS users_id, users.name AS users_name, users.fullname AS users_fullname, users.password AS users_password FROM users LEFT OUTER JOIN addresses AS addresses_1 ON users.id = addresses_1.user_id WHERE users.name = ? ORDER BY addresses.email_address <-- this part is wrong ! ['jack']

      Above, ORDER BY addresses.email_address is not valid since addresses is not in the FROM list. The correct way to load the User records and order by email address is to use Query.join():

      >>> jack = session.query(User).\
      ... join(User.addresses).\
      ... filter(User.name=='jack').\
      ... order_by(Address.email_address).all()
      
      SELECT users.id AS users_id, users.name AS users_name, users.fullname AS users_fullname, users.password AS users_password FROM users JOIN addresses ON users.id = addresses.user_id WHERE users.name = ? ORDER BY addresses.email_address ['jack']

      The statement above is of course not the same as the previous one, in that the columns from addresses are not included in the result at all. We can add joinedload() back in, so that there are two joins - one is that which we are ordering on, the other is used anonymously to load the contents of the User.addresses collection:

      >>> jack = session.query(User).\
      ... join(User.addresses).\
      ... options(joinedload(User.addresses)).\
      ... filter(User.name=='jack').\
      ... order_by(Address.email_address).all()
      
      SELECT addresses_1.id AS addresses_1_id, addresses_1.email_address AS addresses_1_email_address, addresses_1.user_id AS addresses_1_user_id, users.id AS users_id, users.name AS users_name, users.fullname AS users_fullname, users.password AS users_password FROM users JOIN addresses ON users.id = addresses.user_id LEFT OUTER JOIN addresses AS addresses_1 ON users.id = addresses_1.user_id WHERE users.name = ? ORDER BY addresses.email_address ['jack']

      What we see above is that our usage of Query.join() is to supply JOIN clauses we’d like to use in subsequent query criterion, whereas our usage of joinedload() only concerns itself with the loading of the User.addresses collection, for each User in the result. In this case, the two joins most probably appear redundant - which they are. If we wanted to use just one JOIN for collection loading as well as ordering, we use the contains_eager() option, described in Routing Explicit Joins/Statements into Eagerly Loaded Collections below. But to see why joinedload() does what it does, consider if we were filtering on a particular Address:

      >>> jack = session.query(User).\
      ... join(User.addresses).\
      ... options(joinedload(User.addresses)).\
      ... filter(User.name=='jack').\
      ... filter(Address.email_address=='someaddress@foo.com').\
      ... all()
      
      SELECT addresses_1.id AS addresses_1_id, addresses_1.email_address AS addresses_1_email_address, addresses_1.user_id AS addresses_1_user_id, users.id AS users_id, users.name AS users_name, users.fullname AS users_fullname, users.password AS users_password FROM users JOIN addresses ON users.id = addresses.user_id LEFT OUTER JOIN addresses AS addresses_1 ON users.id = addresses_1.user_id WHERE users.name = ? AND addresses.email_address = ? ['jack', 'someaddress@foo.com']

      Above, we can see that the two JOINs have very different roles. One will match exactly one row, that of the join of User and Address where Address.email_address=='someaddress@foo.com'. The other LEFT OUTER JOIN will match all Address rows related to User, and is only used to populate the User.addresses collection, for those User objects that are returned.

      By changing the usage of joinedload() to another style of loading, we can change how the collection is loaded completely independently of SQL used to retrieve the actual User rows we want. Below we change joinedload() into subqueryload():

      >>> jack = session.query(User).\
      ... join(User.addresses).\
      ... options(subqueryload(User.addresses)).\
      ... filter(User.name=='jack').\
      ... filter(Address.email_address=='someaddress@foo.com').\
      ... all()
      
      SELECT users.id AS users_id, users.name AS users_name, users.fullname AS users_fullname, users.password AS users_password FROM users JOIN addresses ON users.id = addresses.user_id WHERE users.name = ? AND addresses.email_address = ? ['jack', 'someaddress@foo.com'] # ... subqueryload() emits a SELECT in order # to load all address records ...

      When using joined eager loading, if the query contains a modifier that impacts the rows returned externally to the joins, such as when using DISTINCT, LIMIT, OFFSET or equivalent, the completed statement is first wrapped inside a subquery, and the joins used specifically for joined eager loading are applied to the subquery. SQLAlchemy’s joined eager loading goes the extra mile, and then ten miles further, to absolutely ensure that it does not affect the end result of the query, only the way collections and related objects are loaded, no matter what the format of the query is.

      What Kind of Loading to Use ?

      Which type of loading to use typically comes down to optimizing the tradeoff between number of SQL executions, complexity of SQL emitted, and amount of data fetched. Lets take two examples, a relationship() which references a collection, and a relationship() that references a scalar many-to-one reference.

      • One to Many Collection
      • When using the default lazy loading, if you load 100 objects, and then access a collection on each of them, a total of 101 SQL statements will be emitted, although each statement will typically be a simple SELECT without any joins.
      • When using joined loading, the load of 100 objects and their collections will emit only one SQL statement. However, the total number of rows fetched will be equal to the sum of the size of all the collections, plus one extra row for each parent object that has an empty collection. Each row will also contain the full set of columns represented by the parents, repeated for each collection item - SQLAlchemy does not re-fetch these columns other than those of the primary key, however most DBAPIs (with some exceptions) will transmit the full data of each parent over the wire to the client connection in any case. Therefore joined eager loading only makes sense when the size of the collections are relatively small. The LEFT OUTER JOIN can also be performance intensive compared to an INNER join.
      • When using subquery loading, the load of 100 objects will emit two SQL statements. The second statement will fetch a total number of rows equal to the sum of the size of all collections. An INNER JOIN is used, and a minimum of parent columns are requested, only the primary keys. So a subquery load makes sense when the collections are larger.
      • When multiple levels of depth are used with joined or subquery loading, loading collections-within- collections will multiply the total number of rows fetched in a cartesian fashion. Both forms of eager loading always join from the original parent class.
      • Many to One Reference
      • When using the default lazy loading, a load of 100 objects will like in the case of the collection emit as many as 101 SQL statements. However - there is a significant exception to this, in that if the many-to-one reference is a simple foreign key reference to the target’s primary key, each reference will be checked first in the current identity map using Query.get(). So here, if the collection of objects references a relatively small set of target objects, or the full set of possible target objects have already been loaded into the session and are strongly referenced, using the default of lazy=’select’ is by far the most efficient way to go.
      • When using joined loading, the load of 100 objects will emit only one SQL statement. The join will be a LEFT OUTER JOIN, and the total number of rows will be equal to 100 in all cases. If you know that each parent definitely has a child (i.e. the foreign key reference is NOT NULL), the joined load can be configured with innerjoin=True, which is usually specified within the relationship(). For a load of objects where there are many possible target references which may have not been loaded already, joined loading with an INNER JOIN is extremely efficient.
      • Subquery loading will issue a second load for all the child objects, so for a load of 100 objects there would be two SQL statements emitted. There’s probably not much advantage here over joined loading, however, except perhaps that subquery loading can use an INNER JOIN in all cases whereas joined loading requires that the foreign key is NOT NULL.

      Routing Explicit Joins/Statements into Eagerly Loaded Collections

      The behavior of joinedload() is such that joins are created automatically, using anonymous aliases as targets, the results of which are routed into collections and scalar references on loaded objects. It is often the case that a query already includes the necessary joins which represent a particular collection or scalar reference, and the joins added by the joinedload feature are redundant - yet you’d still like the collections/references to be populated.

      For this SQLAlchemy supplies the contains_eager() option. This option is used in the same manner as the joinedload() option except it is assumed that the Query will specify the appropriate joins explicitly. Below it’s used with a from_statement load:

      # mapping is the users->addresses mapping
      mapper(User, users_table, properties={
          'addresses': relationship(Address, addresses_table)
      })
      
      # define a query on USERS with an outer join to ADDRESSES
      statement = users_table.outerjoin(addresses_table).select().apply_labels()
      
      # construct a Query object which expects the "addresses" results
      query = session.query(User).options(contains_eager('addresses'))
      
      # get results normally
      r = query.from_statement(statement)

      It works just as well with an inline Query.join() or Query.outerjoin():

      session.query(User).outerjoin(User.addresses).options(contains_eager(User.addresses)).all()

      If the “eager” portion of the statement is “aliased”, the alias keyword argument to contains_eager() may be used to indicate it. This is a string alias name or reference to an actual Alias (or other selectable) object:

      # use an alias of the Address entity
      adalias = aliased(Address)
      
      # construct a Query object which expects the "addresses" results
      query = session.query(User).\
          outerjoin(adalias, User.addresses).\
          options(contains_eager(User.addresses, alias=adalias))
      
      # get results normally
      sqlr = query.all()
      

      The alias argument is used only as a source of columns to match up to the result set. You can use it to match up the result to arbitrary label names in a string SQL statement, by passing a select() which links those labels to the mapped Table:

      # label the columns of the addresses table
      eager_columns = select([
                          addresses.c.address_id.label('a1'),
                          addresses.c.email_address.label('a2'),
                          addresses.c.user_id.label('a3')])
      
      # select from a raw SQL statement which uses those label names for the
      # addresses table.  contains_eager() matches them up.
      query = session.query(User).\
          from_statement("select users.*, addresses.address_id as a1, "
                  "addresses.email_address as a2, addresses.user_id as a3 "
                  "from users left outer join addresses on users.user_id=addresses.user_id").\
          options(contains_eager(User.addresses, alias=eager_columns))

      The path given as the argument to contains_eager() needs to be a full path from the starting entity. For example if we were loading Users->orders->Order->items->Item, the string version would look like:

      query(User).options(contains_eager('orders', 'items'))

      Or using the class-bound descriptor:

      query(User).options(contains_eager(User.orders, Order.items))

      Relation Loader API

      sqlalchemy.orm.contains_alias(alias)

      Return a MapperOption that will indicate to the query that the main table has been aliased.

      This is used in the very rare case that contains_eager() is being used in conjunction with a user-defined SELECT statement that aliases the parent table. E.g.:

      # define an aliased UNION called 'ulist'
      statement = users.select(users.c.user_id==7).\
                      union(users.select(users.c.user_id>7)).\
                      alias('ulist')
      
      # add on an eager load of "addresses"
      statement = statement.outerjoin(addresses).\
                      select().apply_labels()
      
      # create query, indicating "ulist" will be an
      # alias for the main table, "addresses"
      # property should be eager loaded
      query = session.query(User).options(
                              contains_alias('ulist'),
                              contains_eager('addresses'))
      
      # then get results via the statement
      results = query.from_statement(statement).all()
      Parameters:alias – is the string name of an alias, or a Alias object representing the alias.
      sqlalchemy.orm.contains_eager(*keys, **kwargs)

      Return a MapperOption that will indicate to the query that the given attribute should be eagerly loaded from columns currently in the query.

      Used with options().

      The option is used in conjunction with an explicit join that loads the desired rows, i.e.:

      sess.query(Order).\
              join(Order.user).\
              options(contains_eager(Order.user))

      The above query would join from the Order entity to its related User entity, and the returned Order objects would have the Order.user attribute pre-populated.

      contains_eager() also accepts an alias argument, which is the string name of an alias, an alias() construct, or an aliased() construct. Use this when the eagerly-loaded rows are to come from an aliased table:

      user_alias = aliased(User)
      sess.query(Order).\
              join((user_alias, Order.user)).\
              options(contains_eager(Order.user, alias=user_alias))

      See also eagerload() for the “automatic” version of this functionality.

      For additional examples of contains_eager() see Routing Explicit Joins/Statements into Eagerly Loaded Collections.

      sqlalchemy.orm.eagerload(*args, **kwargs)

      A synonym for joinedload().

      sqlalchemy.orm.eagerload_all(*args, **kwargs)

      A synonym for joinedload_all()

      sqlalchemy.orm.immediateload(*keys)

      Return a MapperOption that will convert the property of the given name or series of mapped attributes into an immediate load.

      The “immediate” load means the attribute will be fetched with a separate SELECT statement per parent in the same way as lazy loading - except the loader is guaranteed to be called at load time before the parent object is returned in the result.

      The normal behavior of lazy loading applies - if the relationship is a simple many-to-one, and the child object is already present in the Session, no SELECT statement will be emitted.

      Used with options().

      See also: lazyload(), eagerload(), subqueryload()

      New in version 0.6.5.

      sqlalchemy.orm.joinedload(*keys, **kw)

      Return a MapperOption that will convert the property of the given name or series of mapped attributes into an joined eager load.

      Changed in version 0.6beta3: This function is known as eagerload() in all versions of SQLAlchemy prior to version 0.6beta3, including the 0.5 and 0.4 series. eagerload() will remain available for the foreseeable future in order to enable cross-compatibility.

      Used with options().

      examples:

      # joined-load the "orders" collection on "User"
      query(User).options(joinedload(User.orders))
      
      # joined-load the "keywords" collection on each "Item",
      # but not the "items" collection on "Order" - those
      # remain lazily loaded.
      query(Order).options(joinedload(Order.items, Item.keywords))
      
      # to joined-load across both, use joinedload_all()
      query(Order).options(joinedload_all(Order.items, Item.keywords))
      
      # set the default strategy to be 'joined'
      query(Order).options(joinedload('*'))

      joinedload() also accepts a keyword argument innerjoin=True which indicates using an inner join instead of an outer:

      query(Order).options(joinedload(Order.user, innerjoin=True))

      Note

      The join created by joinedload() is anonymously aliased such that it does not affect the query results. An Query.order_by() or Query.filter() call cannot reference these aliased tables - so-called “user space” joins are constructed using Query.join(). The rationale for this is that joinedload() is only applied in order to affect how related objects or collections are loaded as an optimizing detail - it can be added or removed with no impact on actual results. See the section The Zen of Eager Loading for a detailed description of how this is used, including how to use a single explicit JOIN for filtering/ordering and eager loading simultaneously.

      See also: subqueryload(), lazyload()

      sqlalchemy.orm.joinedload_all(*keys, **kw)

      Return a MapperOption that will convert all properties along the given dot-separated path or series of mapped attributes into an joined eager load.

      Changed in version 0.6beta3: This function is known as eagerload_all() in all versions of SQLAlchemy prior to version 0.6beta3, including the 0.5 and 0.4 series. eagerload_all() will remain available for the foreseeable future in order to enable cross-compatibility.

      Used with options().

      For example:

      query.options(joinedload_all('orders.items.keywords'))...

      will set all of orders, orders.items, and orders.items.keywords to load in one joined eager load.

      Individual descriptors are accepted as arguments as well:

      query.options(joinedload_all(User.orders, Order.items, Item.keywords))

      The keyword arguments accept a flag innerjoin=True|False which will override the value of the innerjoin flag specified on the relationship().

      See also: subqueryload_all(), lazyload()

      sqlalchemy.orm.lazyload(*keys)

      Return a MapperOption that will convert the property of the given name or series of mapped attributes into a lazy load.

      Used with options().

      See also: eagerload(), subqueryload(), immediateload()

      sqlalchemy.orm.noload(*keys)

      Return a MapperOption that will convert the property of the given name or series of mapped attributes into a non-load.

      Used with options().

      See also: lazyload(), eagerload(), subqueryload(), immediateload()

      sqlalchemy.orm.subqueryload(*keys)

      Return a MapperOption that will convert the property of the given name or series of mapped attributes into an subquery eager load.

      Used with options().

      examples:

      # subquery-load the "orders" collection on "User"
      query(User).options(subqueryload(User.orders))
      
      # subquery-load the "keywords" collection on each "Item",
      # but not the "items" collection on "Order" - those
      # remain lazily loaded.
      query(Order).options(subqueryload(Order.items, Item.keywords))
      
      # to subquery-load across both, use subqueryload_all()
      query(Order).options(subqueryload_all(Order.items, Item.keywords))
      
      # set the default strategy to be 'subquery'
      query(Order).options(subqueryload('*'))

      See also: joinedload(), lazyload()

      sqlalchemy.orm.subqueryload_all(*keys)

      Return a MapperOption that will convert all properties along the given dot-separated path or series of mapped attributes into a subquery eager load.

      Used with options().

      For example:

      query.options(subqueryload_all('orders.items.keywords'))...

      will set all of orders, orders.items, and orders.items.keywords to load in one subquery eager load.

      Individual descriptors are accepted as arguments as well:

      query.options(subqueryload_all(User.orders, Order.items,
      Item.keywords))

      See also: joinedload_all(), lazyload(), immediateload()

      SQLAlchemy-0.8.4/doc/orm/mapper_config.html0000644000076500000240000106646112251147501021306 0ustar classicstaff00000000000000 Mapper Configuration — SQLAlchemy 0.8 Documentation

      SQLAlchemy 0.8 Documentation

      Release: 0.8.4 | Release Date: December 8, 2013

      Mapper Configuration

      This section describes a variety of configurational patterns that are usable with mappers. It assumes you’ve worked through Object Relational Tutorial and know how to construct and use rudimentary mappers and relationships.

      Classical Mappings

      A Classical Mapping refers to the configuration of a mapped class using the mapper() function, without using the Declarative system. As an example, start with the declarative mapping introduced in Object Relational Tutorial:

      class User(Base):
          __tablename__ = 'users'
      
          id = Column(Integer, primary_key=True)
          name = Column(String)
          fullname = Column(String)
          password = Column(String)

      In “classical” form, the table metadata is created separately with the Table construct, then associated with the User class via the mapper() function:

      from sqlalchemy import Table, MetaData, Column, ForeignKey, Integer, String
      from sqlalchemy.orm import mapper
      
      metadata = MetaData()
      
      user = Table('user', metadata,
                  Column('id', Integer, primary_key=True),
                  Column('name', String(50)),
                  Column('fullname', String(50)),
                  Column('password', String(12))
              )
      
      class User(object):
          def __init__(self, name, fullname, password):
              self.name = name
              self.fullname = fullname
              self.password = password
      
      mapper(User, user)

      Information about mapped attributes, such as relationships to other classes, are provided via the properties dictionary. The example below illustrates a second Table object, mapped to a class called Address, then linked to User via relationship():

      address = Table('address', metadata,
                  Column('id', Integer, primary_key=True),
                  Column('user_id', Integer, ForeignKey('user.id')),
                  Column('email_address', String(50))
                  )
      
      mapper(User, user, properties={
          'addresses' : relationship(Address, backref='user', order_by=address.c.id)
      })
      
      mapper(Address, address)

      When using classical mappings, classes must be provided directly without the benefit of the “string lookup” system provided by Declarative. SQL expressions are typically specified in terms of the Table objects, i.e. address.c.id above for the Address relationship, and not Address.id, as Address may not yet be linked to table metadata, nor can we specify a string here.

      Some examples in the documentation still use the classical approach, but note that the classical as well as Declarative approaches are fully interchangeable. Both systems ultimately create the same configuration, consisting of a Table, user-defined class, linked together with a mapper(). When we talk about “the behavior of mapper()”, this includes when using the Declarative system as well - it’s still used, just behind the scenes.

      Customizing Column Properties

      The default behavior of mapper() is to assemble all the columns in the mapped Table into mapped object attributes, each of which are named according to the name of the column itself (specifically, the key attribute of Column). This behavior can be modified in several ways.

      Naming Columns Distinctly from Attribute Names

      A mapping by default shares the same name for a Column as that of the mapped attribute - specifically it matches the Column.key attribute on Column, which by default is the same as the Column.name.

      The name assigned to the Python attribute which maps to Column can be different from either Column.name or Column.key just by assigning it that way, as we illustrate here in a Declarative mapping:

      class User(Base):
          __tablename__ = 'user'
          id = Column('user_id', Integer, primary_key=True)
          name = Column('user_name', String(50))

      Where above User.id resolves to a column named user_id and User.name resolves to a column named user_name.

      When mapping to an existing table, the Column object can be referenced directly:

      class User(Base):
          __table__ = user_table
          id = user_table.c.user_id
          name = user_table.c.user_name

      Or in a classical mapping, placed in the properties dictionary with the desired key:

      mapper(User, user_table, properties={
         'id': user_table.c.user_id,
         'name': user_table.c.user_name,
      })

      In the next section we’ll examine the usage of .key more closely.

      Automating Column Naming Schemes from Reflected Tables

      In the previous section Naming Columns Distinctly from Attribute Names, we showed how a Column explicitly mapped to a class can have a different attribute name than the column. But what if we aren’t listing out Column objects explicitly, and instead are automating the production of Table objects using reflection (e.g. as described in Reflecting Database Objects)? In this case we can make use of the DDLEvents.column_reflect() event to intercept the production of Column objects and provide them with the Column.key of our choice:

      @event.listens_for(Table, "column_reflect")
      def column_reflect(inspector, table, column_info):
          # set column.key = "attr_<lower_case_name>"
          column_info['key'] = "attr_%s" % column_info['name'].lower()

      With the above event, the reflection of Column objects will be intercepted with our event that adds a new ”.key” element, such as in a mapping as below:

      class MyClass(Base):
          __table__ = Table("some_table", Base.metadata,
                      autoload=True, autoload_with=some_engine)

      If we want to qualify our event to only react for the specific MetaData object above, we can check for it in our event:

      @event.listens_for(Table, "column_reflect")
      def column_reflect(inspector, table, column_info):
          if table.metadata is Base.metadata:
              # set column.key = "attr_<lower_case_name>"
              column_info['key'] = "attr_%s" % column_info['name'].lower()

      Naming All Columns with a Prefix

      A quick approach to prefix column names, typically when mapping to an existing Table object, is to use column_prefix:

      class User(Base):
          __table__ = user_table
          __mapper_args__ = {'column_prefix':'_'}

      The above will place attribute names such as _user_id, _user_name, _password etc. on the mapped User class.

      This approach is uncommon in modern usage. For dealing with reflected tables, a more flexible approach is to use that described in Automating Column Naming Schemes from Reflected Tables.

      Using column_property for column level options

      Options can be specified when mapping a Column using the column_property() function. This function explicitly creates the ColumnProperty used by the mapper() to keep track of the Column; normally, the mapper() creates this automatically. Using column_property(), we can pass additional arguments about how we’d like the Column to be mapped. Below, we pass an option active_history, which specifies that a change to this column’s value should result in the former value being loaded first:

      from sqlalchemy.orm import column_property
      
      class User(Base):
          __tablename__ = 'user'
      
          id = Column(Integer, primary_key=True)
          name = column_property(Column(String(50)), active_history=True)

      column_property() is also used to map a single attribute to multiple columns. This use case arises when mapping to a join() which has attributes which are equated to each other:

      class User(Base):
          __table__ = user.join(address)
      
          # assign "user.id", "address.user_id" to the
          # "id" attribute
          id = column_property(user_table.c.id, address_table.c.user_id)

      For more examples featuring this usage, see Mapping a Class against Multiple Tables.

      Another place where column_property() is needed is to specify SQL expressions as mapped attributes, such as below where we create an attribute fullname that is the string concatenation of the firstname and lastname columns:

      class User(Base):
          __tablename__ = 'user'
          id = Column(Integer, primary_key=True)
          firstname = Column(String(50))
          lastname = Column(String(50))
          fullname = column_property(firstname + " " + lastname)

      See examples of this usage at SQL Expressions as Mapped Attributes.

      sqlalchemy.orm.column_property(*cols, **kw)

      Provide a column-level property for use with a Mapper.

      Column-based properties can normally be applied to the mapper’s properties dictionary using the Column element directly. Use this function when the given column is not directly present within the mapper’s selectable; examples include SQL expressions, functions, and scalar SELECT queries.

      Columns that aren’t present in the mapper’s selectable won’t be persisted by the mapper and are effectively “read-only” attributes.

      Parameters:
      • *cols – list of Column objects to be mapped.
      • active_history=False

        When True, indicates that the “previous” value for a scalar attribute should be loaded when replaced, if not already loaded. Normally, history tracking logic for simple non-primary-key scalar values only needs to be aware of the “new” value in order to perform a flush. This flag is available for applications that make use of attributes.get_history() or Session.is_modified() which also need to know the “previous” value of the attribute.

        New in version 0.6.6.

      • comparator_factory – a class which extends ColumnProperty.Comparator which provides custom SQL clause generation for comparison operations.
      • group – a group name for this property when marked as deferred.
      • deferred – when True, the column property is “deferred”, meaning that it does not load immediately, and is instead loaded when the attribute is first accessed on an instance. See also deferred().
      • doc – optional string that will be applied as the doc on the class-bound descriptor.
      • expire_on_flush=True

        Disable expiry on flush. A column_property() which refers to a SQL expression (and not a single table-bound column) is considered to be a “read only” property; populating it has no effect on the state of data, and it can only return database state. For this reason a column_property()’s value is expired whenever the parent object is involved in a flush, that is, has any kind of “dirty” state within a flush. Setting this parameter to False will have the effect of leaving any existing value present after the flush proceeds. Note however that the Session with default expiration settings still expires all attributes after a Session.commit() call, however.

        New in version 0.7.3.

      • info

        Optional data dictionary which will be populated into the MapperProperty.info attribute of this object.

        New in version 0.8.

      • extension – an AttributeExtension instance, or list of extensions, which will be prepended to the list of attribute listeners for the resulting descriptor placed on the class. Deprecated. Please see AttributeEvents.

      Mapping a Subset of Table Columns

      Sometimes, a Table object was made available using the reflection process described at Reflecting Database Objects to load the table’s structure from the database. For such a table that has lots of columns that don’t need to be referenced in the application, the include_properties or exclude_properties arguments can specify that only a subset of columns should be mapped. For example:

      class User(Base):
          __table__ = user_table
          __mapper_args__ = {
              'include_properties' :['user_id', 'user_name']
          }

      ...will map the User class to the user_table table, only including the user_id and user_name columns - the rest are not referenced. Similarly:

      class Address(Base):
          __table__ = address_table
          __mapper_args__ = {
              'exclude_properties' : ['street', 'city', 'state', 'zip']
          }

      ...will map the Address class to the address_table table, including all columns present except street, city, state, and zip.

      When this mapping is used, the columns that are not included will not be referenced in any SELECT statements emitted by Query, nor will there be any mapped attribute on the mapped class which represents the column; assigning an attribute of that name will have no effect beyond that of a normal Python attribute assignment.

      In some cases, multiple columns may have the same name, such as when mapping to a join of two or more tables that share some column name. include_properties and exclude_properties can also accommodate Column objects to more accurately describe which columns should be included or excluded:

      class UserAddress(Base):
          __table__ = user_table.join(addresses_table)
          __mapper_args__ = {
              'exclude_properties' :[address_table.c.id],
              'primary_key' : [user_table.c.id]
          }

      Note

      insert and update defaults configured on individual Column objects, i.e. those described at metadata_defaults including those configured by the default, update, server_default and server_onupdate arguments, will continue to function normally even if those Column objects are not mapped. This is because in the case of default and update, the Column object is still present on the underlying Table, thus allowing the default functions to take place when the ORM emits an INSERT or UPDATE, and in the case of server_default and server_onupdate, the relational database itself maintains these functions.

      Deferred Column Loading

      This feature allows particular columns of a table be loaded only upon direct access, instead of when the entity is queried using Query. This feature is useful when one wants to avoid loading a large text or binary field into memory when it’s not needed. Individual columns can be lazy loaded by themselves or placed into groups that lazy-load together, using the orm.deferred() function to mark them as “deferred”. In the example below, we define a mapping that will load each of .excerpt and .photo in separate, individual-row SELECT statements when each attribute is first referenced on the individual object instance:

      from sqlalchemy.orm import deferred
      from sqlalchemy import Integer, String, Text, Binary, Column
      
      class Book(Base):
          __tablename__ = 'book'
      
          book_id = Column(Integer, primary_key=True)
          title = Column(String(200), nullable=False)
          summary = Column(String(2000))
          excerpt = deferred(Column(Text))
          photo = deferred(Column(Binary))

      Classical mappings as always place the usage of orm.deferred() in the properties dictionary against the table-bound Column:

      mapper(Book, book_table, properties={
          'photo':deferred(book_table.c.photo)
      })

      Deferred columns can be associated with a “group” name, so that they load together when any of them are first accessed. The example below defines a mapping with a photos deferred group. When one .photo is accessed, all three photos will be loaded in one SELECT statement. The .excerpt will be loaded separately when it is accessed:

      class Book(Base):
          __tablename__ = 'book'
      
          book_id = Column(Integer, primary_key=True)
          title = Column(String(200), nullable=False)
          summary = Column(String(2000))
          excerpt = deferred(Column(Text))
          photo1 = deferred(Column(Binary), group='photos')
          photo2 = deferred(Column(Binary), group='photos')
          photo3 = deferred(Column(Binary), group='photos')

      You can defer or undefer columns at the Query level using the orm.defer() and orm.undefer() query options:

      from sqlalchemy.orm import defer, undefer
      
      query = session.query(Book)
      query.options(defer('summary')).all()
      query.options(undefer('excerpt')).all()

      And an entire “deferred group”, i.e. which uses the group keyword argument to orm.deferred(), can be undeferred using orm.undefer_group(), sending in the group name:

      from sqlalchemy.orm import undefer_group
      
      query = session.query(Book)
      query.options(undefer_group('photos')).all()

      Column Deferral API

      sqlalchemy.orm.deferred(*columns, **kwargs)

      Return a DeferredColumnProperty, which indicates this object attributes should only be loaded from its corresponding table column when first accessed.

      Used with the “properties” dictionary sent to mapper().

      See also:

      Deferred Column Loading

      sqlalchemy.orm.defer(*key)

      Return a MapperOption that will convert the column property of the given name into a deferred load.

      Used with Query.options().

      e.g.:

      from sqlalchemy.orm import defer
      
      query(MyClass).options(defer("attribute_one"),
                          defer("attribute_two"))

      A class bound descriptor is also accepted:

      query(MyClass).options(
                          defer(MyClass.attribute_one),
                          defer(MyClass.attribute_two))

      A “path” can be specified onto a related or collection object using a dotted name. The orm.defer() option will be applied to that object when loaded:

      query(MyClass).options(
                          defer("related.attribute_one"),
                          defer("related.attribute_two"))

      To specify a path via class, send multiple arguments:

      query(MyClass).options(
                          defer(MyClass.related, MyOtherClass.attribute_one),
                          defer(MyClass.related, MyOtherClass.attribute_two))

      See also:

      Deferred Column Loading

      Parameters:*key – A key representing an individual path. Multiple entries are accepted to allow a multiple-token path for a single target, not multiple targets.
      sqlalchemy.orm.undefer(*key)

      Return a MapperOption that will convert the column property of the given name into a non-deferred (regular column) load.

      Used with Query.options().

      e.g.:

      from sqlalchemy.orm import undefer
      
      query(MyClass).options(
                  undefer("attribute_one"),
                  undefer("attribute_two"))

      A class bound descriptor is also accepted:

      query(MyClass).options(
                  undefer(MyClass.attribute_one),
                  undefer(MyClass.attribute_two))

      A “path” can be specified onto a related or collection object using a dotted name. The orm.undefer() option will be applied to that object when loaded:

      query(MyClass).options(
                  undefer("related.attribute_one"),
                  undefer("related.attribute_two"))

      To specify a path via class, send multiple arguments:

      query(MyClass).options(
                  undefer(MyClass.related, MyOtherClass.attribute_one),
                  undefer(MyClass.related, MyOtherClass.attribute_two))

      See also:

      orm.undefer_group() as a means to “undefer” a group of attributes at once.

      Deferred Column Loading

      Parameters:*key – A key representing an individual path. Multiple entries are accepted to allow a multiple-token path for a single target, not multiple targets.
      sqlalchemy.orm.undefer_group(name)

      Return a MapperOption that will convert the given group of deferred column properties into a non-deferred (regular column) load.

      Used with Query.options().

      e.g.:

      query(MyClass).options(undefer("group_one"))

      See also:

      Deferred Column Loading

      Parameters:name – String name of the deferred group. This name is established using the “group” name to the orm.deferred() configurational function.

      SQL Expressions as Mapped Attributes

      Attributes on a mapped class can be linked to SQL expressions, which can be used in queries.

      Using a Hybrid

      The easiest and most flexible way to link relatively simple SQL expressions to a class is to use a so-called “hybrid attribute”, described in the section Hybrid Attributes. The hybrid provides for an expression that works at both the Python level as well as at the SQL expression level. For example, below we map a class User, containing attributes firstname and lastname, and include a hybrid that will provide for us the fullname, which is the string concatenation of the two:

      from sqlalchemy.ext.hybrid import hybrid_property
      
      class User(Base):
          __tablename__ = 'user'
          id = Column(Integer, primary_key=True)
          firstname = Column(String(50))
          lastname = Column(String(50))
      
          @hybrid_property
          def fullname(self):
              return self.firstname + " " + self.lastname

      Above, the fullname attribute is interpreted at both the instance and class level, so that it is available from an instance:

      some_user = session.query(User).first()
      print some_user.fullname

      as well as usable wtihin queries:

      some_user = session.query(User).filter(User.fullname == "John Smith").first()

      The string concatenation example is a simple one, where the Python expression can be dual purposed at the instance and class level. Often, the SQL expression must be distinguished from the Python expression, which can be achieved using hybrid_property.expression(). Below we illustrate the case where a conditional needs to be present inside the hybrid, using the if statement in Python and the sql.expression.case() construct for SQL expressions:

      from sqlalchemy.ext.hybrid import hybrid_property
      from sqlalchemy.sql import case
      
      class User(Base):
          __tablename__ = 'user'
          id = Column(Integer, primary_key=True)
          firstname = Column(String(50))
          lastname = Column(String(50))
      
          @hybrid_property
          def fullname(self):
              if self.firstname is not None:
                  return self.firstname + " " + self.lastname
              else:
                  return self.lastname
      
          @fullname.expression
          def fullname(cls):
              return case([
                  (cls.firstname != None, cls.firstname + " " + cls.lastname),
              ], else_ = cls.lastname)

      Using column_property

      The orm.column_property() function can be used to map a SQL expression in a manner similar to a regularly mapped Column. With this technique, the attribute is loaded along with all other column-mapped attributes at load time. This is in some cases an advantage over the usage of hybrids, as the value can be loaded up front at the same time as the parent row of the object, particularly if the expression is one which links to other tables (typically as a correlated subquery) to access data that wouldn’t normally be available on an already loaded object.

      Disadvantages to using orm.column_property() for SQL expressions include that the expression must be compatible with the SELECT statement emitted for the class as a whole, and there are also some configurational quirks which can occur when using orm.column_property() from declarative mixins.

      Our “fullname” example can be expressed using orm.column_property() as follows:

      from sqlalchemy.orm import column_property
      
      class User(Base):
          __tablename__ = 'user'
          id = Column(Integer, primary_key=True)
          firstname = Column(String(50))
          lastname = Column(String(50))
          fullname = column_property(firstname + " " + lastname)

      Correlated subqueries may be used as well. Below we use the select() construct to create a SELECT that links together the count of Address objects available for a particular User:

      from sqlalchemy.orm import column_property
      from sqlalchemy import select, func
      from sqlalchemy import Column, Integer, String, ForeignKey
      
      from sqlalchemy.ext.declarative import declarative_base
      
      Base = declarative_base()
      
      class Address(Base):
          __tablename__ = 'address'
          id = Column(Integer, primary_key=True)
          user_id = Column(Integer, ForeignKey('user.id'))
      
      class User(Base):
          __tablename__ = 'user'
          id = Column(Integer, primary_key=True)
          address_count = column_property(
              select([func.count(Address.id)]).\
                  where(Address.user_id==id).\
                  correlate_except(Address)
          )

      In the above example, we define a select() construct like the following:

      select([func.count(Address.id)]).\
          where(Address.user_id==id).\
          correlate_except(Address)

      The meaning of the above statement is, select the count of Address.id rows where the Address.user_id column is equated to id, which in the context of the User class is the Column named id (note that id is also the name of a Python built in function, which is not what we want to use here - if we were outside of the User class definition, we’d use User.id).

      The select.correlate_except() directive indicates that each element in the FROM clause of this select() may be omitted from the FROM list (that is, correlated to the enclosing SELECT statement against User) except for the one corresponding to Address. This isn’t strictly necessary, but prevents Address from being inadvertently omitted from the FROM list in the case of a long string of joins between User and Address tables where SELECT statements against Address are nested.

      If import issues prevent the column_property() from being defined inline with the class, it can be assigned to the class after both are configured. In Declarative this has the effect of calling Mapper.add_property() to add an additional property after the fact:

      User.address_count = column_property(
              select([func.count(Address.id)]).\
                  where(Address.user_id==User.id)
          )

      For many-to-many relationships, use and_() to join the fields of the association table to both tables in a relation, illustrated here with a classical mapping:

      from sqlalchemy import and_
      
      mapper(Author, authors, properties={
          'book_count': column_property(
                              select([func.count(books.c.id)],
                                  and_(
                                      book_authors.c.author_id==authors.c.id,
                                      book_authors.c.book_id==books.c.id
                                  )))
          })

      Using a plain descriptor

      In cases where a SQL query more elaborate than what orm.column_property() or hybrid_property can provide must be emitted, a regular Python function accessed as an attribute can be used, assuming the expression only needs to be available on an already-loaded instance. The function is decorated with Python’s own @property decorator to mark it as a read-only attribute. Within the function, object_session() is used to locate the Session corresponding to the current object, which is then used to emit a query:

      from sqlalchemy.orm import object_session
      from sqlalchemy import select, func
      
      class User(Base):
          __tablename__ = 'user'
          id = Column(Integer, primary_key=True)
          firstname = Column(String(50))
          lastname = Column(String(50))
      
          @property
          def address_count(self):
              return object_session(self).\
                  scalar(
                      select([func.count(Address.id)]).\
                          where(Address.user_id==self.id)
                  )

      The plain descriptor approach is useful as a last resort, but is less performant in the usual case than both the hybrid and column property approaches, in that it needs to emit a SQL query upon each access.

      Changing Attribute Behavior

      Simple Validators

      A quick way to add a “validation” routine to an attribute is to use the validates() decorator. An attribute validator can raise an exception, halting the process of mutating the attribute’s value, or can change the given value into something different. Validators, like all attribute extensions, are only called by normal userland code; they are not issued when the ORM is populating the object:

      from sqlalchemy.orm import validates
      
      class EmailAddress(Base):
          __tablename__ = 'address'
      
          id = Column(Integer, primary_key=True)
          email = Column(String)
      
          @validates('email')
          def validate_email(self, key, address):
              assert '@' in address
              return address

      Validators also receive collection events, when items are added to a collection:

      from sqlalchemy.orm import validates
      
      class User(Base):
          # ...
      
          addresses = relationship("Address")
      
          @validates('addresses')
          def validate_address(self, key, address):
              assert '@' in address.email
              return address

      Note that the validates() decorator is a convenience function built on top of attribute events. An application that requires more control over configuration of attribute change behavior can make use of this system, described at AttributeEvents.

      sqlalchemy.orm.validates(*names, **kw)

      Decorate a method as a ‘validator’ for one or more named properties.

      Designates a method as a validator, a method which receives the name of the attribute as well as a value to be assigned, or in the case of a collection, the value to be added to the collection. The function can then raise validation exceptions to halt the process from continuing (where Python’s built-in ValueError and AssertionError exceptions are reasonable choices), or can modify or replace the value before proceeding. The function should otherwise return the given value.

      Note that a validator for a collection cannot issue a load of that collection within the validation routine - this usage raises an assertion to avoid recursion overflows. This is a reentrant condition which is not supported.

      Parameters:
      • *names – list of attribute names to be validated.
      • include_removes

        if True, “remove” events will be sent as well - the validation function must accept an additional argument “is_remove” which will be a boolean.

        New in version 0.7.7.

      Using Descriptors and Hybrids

      A more comprehensive way to produce modified behavior for an attribute is to use descriptors. These are commonly used in Python using the property() function. The standard SQLAlchemy technique for descriptors is to create a plain descriptor, and to have it read/write from a mapped attribute with a different name. Below we illustrate this using Python 2.6-style properties:

      class EmailAddress(Base):
          __tablename__ = 'email_address'
      
          id = Column(Integer, primary_key=True)
      
          # name the attribute with an underscore,
          # different from the column name
          _email = Column("email", String)
      
          # then create an ".email" attribute
          # to get/set "._email"
          @property
          def email(self):
              return self._email
      
          @email.setter
          def email(self, email):
              self._email = email

      The approach above will work, but there’s more we can add. While our EmailAddress object will shuttle the value through the email descriptor and into the _email mapped attribute, the class level EmailAddress.email attribute does not have the usual expression semantics usable with Query. To provide these, we instead use the hybrid extension as follows:

      from sqlalchemy.ext.hybrid import hybrid_property
      
      class EmailAddress(Base):
          __tablename__ = 'email_address'
      
          id = Column(Integer, primary_key=True)
      
          _email = Column("email", String)
      
          @hybrid_property
          def email(self):
              return self._email
      
          @email.setter
          def email(self, email):
              self._email = email

      The .email attribute, in addition to providing getter/setter behavior when we have an instance of EmailAddress, also provides a SQL expression when used at the class level, that is, from the EmailAddress class directly:

      from sqlalchemy.orm import Session
      session = Session()
      
      sqladdress = session.query(EmailAddress).\
                       filter(EmailAddress.email == 'address@example.com').\
                       one()
      
      address.email = 'otheraddress@example.com'
      sqlsession.commit()
      

      The hybrid_property also allows us to change the behavior of the attribute, including defining separate behaviors when the attribute is accessed at the instance level versus at the class/expression level, using the hybrid_property.expression() modifier. Such as, if we wanted to add a host name automatically, we might define two sets of string manipulation logic:

      class EmailAddress(Base):
          __tablename__ = 'email_address'
      
          id = Column(Integer, primary_key=True)
      
          _email = Column("email", String)
      
          @hybrid_property
          def email(self):
              """Return the value of _email up until the last twelve
              characters."""
      
              return self._email[:-12]
      
          @email.setter
          def email(self, email):
              """Set the value of _email, tacking on the twelve character
              value @example.com."""
      
              self._email = email + "@example.com"
      
          @email.expression
          def email(cls):
              """Produce a SQL expression that represents the value
              of the _email column, minus the last twelve characters."""
      
              return func.substr(cls._email, 0, func.length(cls._email) - 12)

      Above, accessing the email property of an instance of EmailAddress will return the value of the _email attribute, removing or adding the hostname @example.com from the value. When we query against the email attribute, a SQL function is rendered which produces the same effect:

      sqladdress = session.query(EmailAddress).filter(EmailAddress.email == 'address').one()
      

      Read more about Hybrids at Hybrid Attributes.

      Synonyms

      Synonyms are a mapper-level construct that applies expression behavior to a descriptor based attribute.

      Changed in version 0.7: The functionality of synonym is superceded as of 0.7 by hybrid attributes.

      sqlalchemy.orm.synonym(name, map_column=False, descriptor=None, comparator_factory=None, doc=None)

      Denote an attribute name as a synonym to a mapped property.

      Changed in version 0.7: synonym() is superseded by the hybrid extension. See the documentation for hybrids at Hybrid Attributes.

      Used with the properties dictionary sent to mapper():

      class MyClass(object):
          def _get_status(self):
              return self._status
          def _set_status(self, value):
              self._status = value
          status = property(_get_status, _set_status)
      
      mapper(MyClass, sometable, properties={
          "status":synonym("_status", map_column=True)
      })

      Above, the status attribute of MyClass will produce expression behavior against the table column named status, using the Python attribute _status on the mapped class to represent the underlying value.

      Parameters:
      • name – the name of the existing mapped property, which can be any other MapperProperty including column-based properties and relationships.
      • map_column – if True, an additional ColumnProperty is created on the mapper automatically, using the synonym’s name as the keyname of the property, and the keyname of this synonym() as the name of the column to map.

      Operator Customization

      The “operators” used by the SQLAlchemy ORM and Core expression language are fully customizable. For example, the comparison expression User.name == 'ed' makes usage of an operator built into Python itself called operator.eq - the actual SQL construct which SQLAlchemy associates with such an operator can be modified. New operations can be associated with column expressions as well. The operators which take place for column expressions are most directly redefined at the type level - see the section Redefining and Creating New Operators for a description.

      ORM level functions like column_property(), relationship(), and composite() also provide for operator redefinition at the ORM level, by passing a PropComparator subclass to the comparator_factory argument of each function. Customization of operators at this level is a rare use case. See the documentation at PropComparator for an overview.

      Composite Column Types

      Sets of columns can be associated with a single user-defined datatype. The ORM provides a single attribute which represents the group of columns using the class you provide.

      Changed in version 0.7: Composites have been simplified such that they no longer “conceal” the underlying column based attributes. Additionally, in-place mutation is no longer automatic; see the section below on enabling mutability to support tracking of in-place changes.

      A simple example represents pairs of columns as a Point object. Point represents such a pair as .x and .y:

      class Point(object):
          def __init__(self, x, y):
              self.x = x
              self.y = y
      
          def __composite_values__(self):
              return self.x, self.y
      
          def __repr__(self):
              return "Point(x=%r, y=%r)" % (self.x, self.y)
      
          def __eq__(self, other):
              return isinstance(other, Point) and \
                  other.x == self.x and \
                  other.y == self.y
      
          def __ne__(self, other):
              return not self.__eq__(other)

      The requirements for the custom datatype class are that it have a constructor which accepts positional arguments corresponding to its column format, and also provides a method __composite_values__() which returns the state of the object as a list or tuple, in order of its column-based attributes. It also should supply adequate __eq__() and __ne__() methods which test the equality of two instances.

      We will create a mapping to a table vertice, which represents two points as x1/y1 and x2/y2. These are created normally as Column objects. Then, the composite() function is used to assign new attributes that will represent sets of columns via the Point class:

      from sqlalchemy import Column, Integer
      from sqlalchemy.orm import composite
      from sqlalchemy.ext.declarative import declarative_base
      
      Base = declarative_base()
      
      class Vertex(Base):
          __tablename__ = 'vertice'
      
          id = Column(Integer, primary_key=True)
          x1 = Column(Integer)
          y1 = Column(Integer)
          x2 = Column(Integer)
          y2 = Column(Integer)
      
          start = composite(Point, x1, y1)
          end = composite(Point, x2, y2)

      A classical mapping above would define each composite() against the existing table:

      mapper(Vertex, vertice_table, properties={
          'start':composite(Point, vertice_table.c.x1, vertice_table.c.y1),
          'end':composite(Point, vertice_table.c.x2, vertice_table.c.y2),
      })

      We can now persist and use Vertex instances, as well as query for them, using the .start and .end attributes against ad-hoc Point instances:

      >>> v = Vertex(start=Point(3, 4), end=Point(5, 6))
      >>> session.add(v)
      >>> q = session.query(Vertex).filter(Vertex.start == Point(3, 4))
      sql>>> print q.first().start
      Point(x=3, y=4)
      sqlalchemy.orm.composite(class_, *cols, **kwargs)

      Return a composite column-based property for use with a Mapper.

      See the mapping documentation section Composite Column Types for a full usage example.

      The MapperProperty returned by composite() is the CompositeProperty.

      Parameters:
      • class_ – The “composite type” class.
      • *cols – List of Column objects to be mapped.
      • active_history=False

        When True, indicates that the “previous” value for a scalar attribute should be loaded when replaced, if not already loaded. See the same flag on column_property().

        Changed in version 0.7: This flag specifically becomes meaningful - previously it was a placeholder.

      • group – A group name for this property when marked as deferred.
      • deferred – When True, the column property is “deferred”, meaning that it does not load immediately, and is instead loaded when the attribute is first accessed on an instance. See also deferred().
      • comparator_factory – a class which extends CompositeProperty.Comparator which provides custom SQL clause generation for comparison operations.
      • doc – optional string that will be applied as the doc on the class-bound descriptor.
      • info

        Optional data dictionary which will be populated into the MapperProperty.info attribute of this object.

        New in version 0.8.

      • extension – an AttributeExtension instance, or list of extensions, which will be prepended to the list of attribute listeners for the resulting descriptor placed on the class. Deprecated. Please see AttributeEvents.

      Tracking In-Place Mutations on Composites

      In-place changes to an existing composite value are not tracked automatically. Instead, the composite class needs to provide events to its parent object explicitly. This task is largely automated via the usage of the MutableComposite mixin, which uses events to associate each user-defined composite object with all parent associations. Please see the example in Establishing Mutability on Composites.

      Changed in version 0.7: In-place changes to an existing composite value are no longer tracked automatically; the functionality is superseded by the MutableComposite class.

      Redefining Comparison Operations for Composites

      The “equals” comparison operation by default produces an AND of all corresponding columns equated to one another. This can be changed using the comparator_factory argument to composite(), where we specify a custom CompositeProperty.Comparator class to define existing or new operations. Below we illustrate the “greater than” operator, implementing the same expression that the base “greater than” does:

      from sqlalchemy.orm.properties import CompositeProperty
      from sqlalchemy import sql
      
      class PointComparator(CompositeProperty.Comparator):
          def __gt__(self, other):
              """redefine the 'greater than' operation"""
      
              return sql.and_(*[a>b for a, b in
                                zip(self.__clause_element__().clauses,
                                    other.__composite_values__())])
      
      class Vertex(Base):
          ___tablename__ = 'vertice'
      
          id = Column(Integer, primary_key=True)
          x1 = Column(Integer)
          y1 = Column(Integer)
          x2 = Column(Integer)
          y2 = Column(Integer)
      
          start = composite(Point, x1, y1,
                              comparator_factory=PointComparator)
          end = composite(Point, x2, y2,
                              comparator_factory=PointComparator)

      Mapping a Class against Multiple Tables

      Mappers can be constructed against arbitrary relational units (called selectables) in addition to plain tables. For example, the join() function creates a selectable unit comprised of multiple tables, complete with its own composite primary key, which can be mapped in the same way as a Table:

      from sqlalchemy import Table, Column, Integer, \
              String, MetaData, join, ForeignKey
      from sqlalchemy.ext.declarative import declarative_base
      from sqlalchemy.orm import column_property
      
      metadata = MetaData()
      
      # define two Table objects
      user_table = Table('user', metadata,
                  Column('id', Integer, primary_key=True),
                  Column('name', String),
              )
      
      address_table = Table('address', metadata,
                  Column('id', Integer, primary_key=True),
                  Column('user_id', Integer, ForeignKey('user.id')),
                  Column('email_address', String)
                  )
      
      # define a join between them.  This
      # takes place across the user.id and address.user_id
      # columns.
      user_address_join = join(user_table, address_table)
      
      Base = declarative_base()
      
      # map to it
      class AddressUser(Base):
          __table__ = user_address_join
      
          id = column_property(user_table.c.id, address_table.c.user_id)
          address_id = address_table.c.id

      In the example above, the join expresses columns for both the user and the address table. The user.id and address.user_id columns are equated by foreign key, so in the mapping they are defined as one attribute, AddressUser.id, using column_property() to indicate a specialized column mapping. Based on this part of the configuration, the mapping will copy new primary key values from user.id into the address.user_id column when a flush occurs.

      Additionally, the address.id column is mapped explicitly to an attribute named address_id. This is to disambiguate the mapping of the address.id column from the same-named AddressUser.id attribute, which here has been assigned to refer to the user table combined with the address.user_id foreign key.

      The natural primary key of the above mapping is the composite of (user.id, address.id), as these are the primary key columns of the user and address table combined together. The identity of an AddressUser object will be in terms of these two values, and is represented from an AddressUser object as (AddressUser.id, AddressUser.address_id).

      Mapping a Class against Arbitrary Selects

      Similar to mapping against a join, a plain select() object can be used with a mapper as well. The example fragment below illustrates mapping a class called Customer to a select() which includes a join to a subquery:

      from sqlalchemy import select, func
      
      subq = select([
                  func.count(orders.c.id).label('order_count'),
                  func.max(orders.c.price).label('highest_order'),
                  orders.c.customer_id
                  ]).group_by(orders.c.customer_id).alias()
      
      customer_select = select([customers, subq]).\
                  select_from(
                      join(customers, subq,
                              customers.c.id == subq.c.customer_id)
                  ).alias()
      
      class Customer(Base):
          __table__ = customer_select

      Above, the full row represented by customer_select will be all the columns of the customers table, in addition to those columns exposed by the subq subquery, which are order_count, highest_order, and customer_id. Mapping the Customer class to this selectable then creates a class which will contain those attributes.

      When the ORM persists new instances of Customer, only the customers table will actually receive an INSERT. This is because the primary key of the orders table is not represented in the mapping; the ORM will only emit an INSERT into a table for which it has mapped the primary key.

      Note

      The practice of mapping to arbitrary SELECT statements, especially complex ones as above, is almost never needed; it necessarily tends to produce complex queries which are often less efficient than that which would be produced by direct query construction. The practice is to some degree based on the very early history of SQLAlchemy where the mapper() construct was meant to represent the primary querying interface; in modern usage, the Query object can be used to construct virtually any SELECT statement, including complex composites, and should be favored over the “map-to-selectable” approach.

      Multiple Mappers for One Class

      In modern SQLAlchemy, a particular class is only mapped by one mapper() at a time. The rationale here is that the mapper() modifies the class itself, not only persisting it towards a particular Table, but also instrumenting attributes upon the class which are structured specifically according to the table metadata.

      One potential use case for another mapper to exist at the same time is if we wanted to load instances of our class not just from the immediate Table to which it is mapped, but from another selectable that is a derivation of that Table. While there technically is a way to create such a mapper(), using the non_primary=True option, this approach is virtually never needed. Instead, we use the functionality of the Query object to achieve this, using a method such as Query.select_from() or Query.from_statement() to specify a derived selectable.

      Another potential use is if we genuinely want instances of our class to be persisted into different tables at different times; certain kinds of data sharding configurations may persist a particular class into tables that are identical in structure except for their name. For this kind of pattern, Python offers a better approach than the complexity of mapping the same class multiple times, which is to instead create new mapped classes for each target table. SQLAlchemy refers to this as the “entity name” pattern, which is described as a recipe at Entity Name.

      Constructors and Object Initialization

      Mapping imposes no restrictions or requirements on the constructor (__init__) method for the class. You are free to require any arguments for the function that you wish, assign attributes to the instance that are unknown to the ORM, and generally do anything else you would normally do when writing a constructor for a Python class.

      The SQLAlchemy ORM does not call __init__ when recreating objects from database rows. The ORM’s process is somewhat akin to the Python standard library’s pickle module, invoking the low level __new__ method and then quietly restoring attributes directly on the instance rather than calling __init__.

      If you need to do some setup on database-loaded instances before they’re ready to use, you can use the @reconstructor decorator to tag a method as the ORM counterpart to __init__. SQLAlchemy will call this method with no arguments every time it loads or reconstructs one of your instances. This is useful for recreating transient properties that are normally assigned in your __init__:

      from sqlalchemy import orm
      
      class MyMappedClass(object):
          def __init__(self, data):
              self.data = data
              # we need stuff on all instances, but not in the database.
              self.stuff = []
      
          @orm.reconstructor
          def init_on_load(self):
              self.stuff = []

      When obj = MyMappedClass() is executed, Python calls the __init__ method as normal and the data argument is required. When instances are loaded during a Query operation as in query(MyMappedClass).one(), init_on_load is called.

      Any method may be tagged as the reconstructor(), even the __init__ method. SQLAlchemy will call the reconstructor method with no arguments. Scalar (non-collection) database-mapped attributes of the instance will be available for use within the function. Eagerly-loaded collections are generally not yet available and will usually only contain the first element. ORM state changes made to objects at this stage will not be recorded for the next flush() operation, so the activity within a reconstructor should be conservative.

      reconstructor() is a shortcut into a larger system of “instance level” events, which can be subscribed to using the event API - see InstanceEvents for the full API description of these events.

      sqlalchemy.orm.reconstructor(fn)

      Decorate a method as the ‘reconstructor’ hook.

      Designates a method as the “reconstructor”, an __init__-like method that will be called by the ORM after the instance has been loaded from the database or otherwise reconstituted.

      The reconstructor will be invoked with no arguments. Scalar (non-collection) database-mapped attributes of the instance will be available for use within the function. Eagerly-loaded collections are generally not yet available and will usually only contain the first element. ORM state changes made to objects at this stage will not be recorded for the next flush() operation, so the activity within a reconstructor should be conservative.

      Configuring a Version Counter

      The Mapper supports management of a version id column, which is a single table column that increments or otherwise updates its value each time an UPDATE to the mapped table occurs. This value is checked each time the ORM emits an UPDATE or DELETE against the row to ensure that the value held in memory matches the database value.

      The purpose of this feature is to detect when two concurrent transactions are modifying the same row at roughly the same time, or alternatively to provide a guard against the usage of a “stale” row in a system that might be re-using data from a previous transaction without refreshing (e.g. if one sets expire_on_commit=False with a Session, it is possible to re-use the data from a previous transaction).

      Concurrent transaction updates

      When detecting concurrent updates within transactions, it is typically the case that the database’s transaction isolation level is below the level of repeatable read; otherwise, the transaction will not be exposed to a new row value created by a concurrent update which conflicts with the locally updated value. In this case, the SQLAlchemy versioning feature will typically not be useful for in-transaction conflict detection, though it still can be used for cross-transaction staleness detection.

      The database that enforces repeatable reads will typically either have locked the target row against a concurrent update, or is employing some form of multi version concurrency control such that it will emit an error when the transaction is committed. SQLAlchemy’s version_id_col is an alternative which allows version tracking to occur for specific tables within a transaction that otherwise might not have this isolation level set.

      See also

      Repeatable Read Isolation Level - Postgresql’s implementation of repeatable read, including a description of the error condition.

      Simple Version Counting

      The most straightforward way to track versions is to add an integer column to the mapped table, then establish it as the version_id_col within the mapper options:

      class User(Base):
          __tablename__ = 'user'
      
          id = Column(Integer, primary_key=True)
          version_id = Column(Integer, nullable=False)
          name = Column(String(50), nullable=False)
      
          __mapper_args__ = {
              "version_id_col": version_id
          }

      Above, the User mapping tracks integer versions using the column version_id. When an object of type User is first flushed, the version_id column will be given a value of “1”. Then, an UPDATE of the table later on will always be emitted in a manner similar to the following:

      UPDATE user SET version_id=:version_id, name=:name
      WHERE user.id = :user_id AND user.version_id = :user_version_id
      {"name": "new name", "version_id": 2, "user_id": 1, "user_version_id": 1}

      The above UPDATE statement is updating the row that not only matches user.id = 1, it also is requiring that user.version_id = 1, where “1” is the last version identifier we’ve been known to use on this object. If a transaction elsewhere has modifed the row independently, this version id will no longer match, and the UPDATE statement will report that no rows matched; this is the condition that SQLAlchemy tests, that exactly one row matched our UPDATE (or DELETE) statement. If zero rows match, that indicates our version of the data is stale, and a StaleDataError is raised.

      Custom Version Counters / Types

      Other kinds of values or counters can be used for versioning. Common types include dates and GUIDs. When using an alternate type or counter scheme, SQLAlchemy provides a hook for this scheme using the version_id_generator argument, which accepts a version generation callable. This callable is passed the value of the current known version, and is expected to return the subsequent version.

      For example, if we wanted to track the versioning of our User class using a randomly generated GUID, we could do this (note that some backends support a native GUID type, but we illustrate here using a simple string):

      import uuid
      
      class User(Base):
          __tablename__ = 'user'
      
          id = Column(Integer, primary_key=True)
          version_uuid = Column(String(32))
          name = Column(String(50), nullable=False)
      
          __mapper_args__ = {
              'version_id_col':version_uuid,
              'version_id_generator':lambda version: uuid.uuid4().hex
          }

      The persistence engine will call upon uuid.uuid4() each time a User object is subject to an INSERT or an UPDATE. In this case, our version generation function can disregard the incoming value of version, as the uuid4() function generates identifiers without any prerequisite value. If we were using a sequential versioning scheme such as numeric or a special character system, we could make use of the given version in order to help determine the subsequent value.

      Class Mapping API

      sqlalchemy.orm.mapper(class_, local_table=None, *args, **params)

      Return a new Mapper object.

      This function is typically used behind the scenes via the Declarative extension. When using Declarative, many of the usual mapper() arguments are handled by the Declarative extension itself, including class_, local_table, properties, and inherits. Other options are passed to mapper() using the __mapper_args__ class variable:

      class MyClass(Base):
          __tablename__ = 'my_table'
          id = Column(Integer, primary_key=True)
          type = Column(String(50))
          alt = Column("some_alt", Integer)
      
          __mapper_args__ = {
              'polymorphic_on' : type
          }

      Explicit use of mapper() is often referred to as classical mapping. The above declarative example is equivalent in classical form to:

      my_table = Table("my_table", metadata,
          Column('id', Integer, primary_key=True),
          Column('type', String(50)),
          Column("some_alt", Integer)
      )
      
      class MyClass(object):
          pass
      
      mapper(MyClass, my_table,
          polymorphic_on=my_table.c.type,
          properties={
              'alt':my_table.c.some_alt
          })

      See also:

      Classical Mappings - discussion of direct usage of mapper()

      Parameters:
      • class_ – The class to be mapped. When using Declarative, this argument is automatically passed as the declared class itself.
      • local_table – The Table or other selectable to which the class is mapped. May be None if this mapper inherits from another mapper using single-table inheritance. When using Declarative, this argument is automatically passed by the extension, based on what is configured via the __table__ argument or via the Table produced as a result of the __tablename__ and Column arguments present.
      • always_refresh – If True, all query operations for this mapped class will overwrite all data within object instances that already exist within the session, erasing any in-memory changes with whatever information was loaded from the database. Usage of this flag is highly discouraged; as an alternative, see the method Query.populate_existing().
      • allow_partial_pks – Defaults to True. Indicates that a composite primary key with some NULL values should be considered as possibly existing within the database. This affects whether a mapper will assign an incoming row to an existing identity, as well as if Session.merge() will check the database first for a particular primary key value. A “partial primary key” can occur if one has mapped to an OUTER JOIN, for example.
      • batch – Defaults to True, indicating that save operations of multiple entities can be batched together for efficiency. Setting to False indicates that an instance will be fully saved before saving the next instance. This is used in the extremely rare case that a MapperEvents listener requires being called in between individual row persistence operations.
      • column_prefix

        A string which will be prepended to the mapped attribute name when Column objects are automatically assigned as attributes to the mapped class. Does not affect explicitly specified column-based properties.

        See the section Naming All Columns with a Prefix for an example.

      • concrete

        If True, indicates this mapper should use concrete table inheritance with its parent mapper.

        See the section Concrete Table Inheritance for an example.

      • eager_defaults – if True, the ORM will immediately fetch the value of server-generated default values after an INSERT or UPDATE, rather than leaving them as expired to be fetched on next access. This can be used for event schemes where the server-generated values are needed immediately before the flush completes. This scheme will emit an individual SELECT statement per row inserted or updated, which note can add significant performance overhead.
      • exclude_properties

        A list or set of string column names to be excluded from mapping.

        See Mapping a Subset of Table Columns for an example.

      • extension – A MapperExtension instance or list of MapperExtension instances which will be applied to all operations by this Mapper. Deprecated. Please see MapperEvents.
      • include_properties

        An inclusive list or set of string column names to map.

        See Mapping a Subset of Table Columns for an example.

      • inherits

        A mapped class or the corresponding Mapper of one indicating a superclass to which this Mapper should inherit from. The mapped class here must be a subclass of the other mapper’s class. When using Declarative, this argument is passed automatically as a result of the natural class hierarchy of the declared classes.

      • inherit_condition – For joined table inheritance, a SQL expression which will define how the two tables are joined; defaults to a natural join between the two tables.
      • inherit_foreign_keys – When inherit_condition is used and the columns present are missing a ForeignKey configuration, this parameter can be used to specify which columns are “foreign”. In most cases can be left as None.
      • legacy_is_orphan

        Boolean, defaults to False. When True, specifies that “legacy” orphan consideration is to be applied to objects mapped by this mapper, which means that a pending (that is, not persistent) object is auto-expunged from an owning Session only when it is de-associated from all parents that specify a delete-orphan cascade towards this mapper. The new default behavior is that the object is auto-expunged when it is de-associated with any of its parents that specify delete-orphan cascade. This behavior is more consistent with that of a persistent object, and allows behavior to be consistent in more scenarios independently of whether or not an orphanable object has been flushed yet or not.

        See the change note and example at The consideration of a “pending” object as an “orphan” has been made more aggressive for more detail on this change.

        New in version 0.8: - the consideration of a pending object as an “orphan” has been modified to more closely match the behavior as that of persistent objects, which is that the object is expunged from the Session as soon as it is de-associated from any of its orphan-enabled parents. Previously, the pending object would be expunged only if de-associated from all of its orphan-enabled parents. The new flag legacy_is_orphan is added to orm.mapper() which re-establishes the legacy behavior.

      • non_primary

        Specify that this Mapper is in addition to the “primary” mapper, that is, the one used for persistence. The Mapper created here may be used for ad-hoc mapping of the class to an alternate selectable, for loading only.

        The non_primary feature is rarely needed with modern usage.

      • order_by – A single Column or list of Column objects for which selection operations should use as the default ordering for entities. By default mappers have no pre-defined ordering.
      • passive_updates

        Indicates UPDATE behavior of foreign key columns when a primary key column changes on a joined-table inheritance mapping. Defaults to True.

        When True, it is assumed that ON UPDATE CASCADE is configured on the foreign key in the database, and that the database will handle propagation of an UPDATE from a source column to dependent columns on joined-table rows.

        When False, it is assumed that the database does not enforce referential integrity and will not be issuing its own CASCADE operation for an update. The Mapper here will emit an UPDATE statement for the dependent columns during a primary key change.

        See also

        Mutable Primary Keys / Update Cascades - description of a similar feature as used with relationship()

      • polymorphic_on

        Specifies the column, attribute, or SQL expression used to determine the target class for an incoming row, when inheriting classes are present.

        This value is commonly a Column object that’s present in the mapped Table:

        class Employee(Base):
            __tablename__ = 'employee'
        
            id = Column(Integer, primary_key=True)
            discriminator = Column(String(50))
        
            __mapper_args__ = {
                "polymorphic_on":discriminator,
                "polymorphic_identity":"employee"
            }

        It may also be specified as a SQL expression, as in this example where we use the case() construct to provide a conditional approach:

        class Employee(Base):
            __tablename__ = 'employee'
        
            id = Column(Integer, primary_key=True)
            discriminator = Column(String(50))
        
            __mapper_args__ = {
                "polymorphic_on":case([
                    (discriminator == "EN", "engineer"),
                    (discriminator == "MA", "manager"),
                ], else_="employee"),
                "polymorphic_identity":"employee"
            }

        It may also refer to any attribute configured with column_property(), or to the string name of one:

        class Employee(Base):
            __tablename__ = 'employee'
        
            id = Column(Integer, primary_key=True)
            discriminator = Column(String(50))
            employee_type = column_property(
                case([
                    (discriminator == "EN", "engineer"),
                    (discriminator == "MA", "manager"),
                ], else_="employee")
            )
        
            __mapper_args__ = {
                "polymorphic_on":employee_type,
                "polymorphic_identity":"employee"
            }

        Changed in version 0.7.4: polymorphic_on may be specified as a SQL expression, or refer to any attribute configured with column_property(), or to the string name of one.

        When setting polymorphic_on to reference an attribute or expression that’s not present in the locally mapped Table, yet the value of the discriminator should be persisted to the database, the value of the discriminator is not automatically set on new instances; this must be handled by the user, either through manual means or via event listeners. A typical approach to establishing such a listener looks like:

        from sqlalchemy import event
        from sqlalchemy.orm import object_mapper
        
        @event.listens_for(Employee, "init", propagate=True)
        def set_identity(instance, *arg, **kw):
            mapper = object_mapper(instance)
            instance.discriminator = mapper.polymorphic_identity

        Where above, we assign the value of polymorphic_identity for the mapped class to the discriminator attribute, thus persisting the value to the discriminator column in the database.

      • polymorphic_identity – Specifies the value which identifies this particular class as returned by the column expression referred to by the polymorphic_on setting. As rows are received, the value corresponding to the polymorphic_on column expression is compared to this value, indicating which subclass should be used for the newly reconstructed object.
      • properties – A dictionary mapping the string names of object attributes to MapperProperty instances, which define the persistence behavior of that attribute. Note that Column objects present in the mapped Table are automatically placed into ColumnProperty instances upon mapping, unless overridden. When using Declarative, this argument is passed automatically, based on all those MapperProperty instances declared in the declared class body.
      • primary_key – A list of Column objects which define the primary key to be used against this mapper’s selectable unit. This is normally simply the primary key of the local_table, but can be overridden here.
      • version_id_col

        A Column that will be used to keep a running version id of rows in the table. This is used to detect concurrent updates or the presence of stale data in a flush. The methodology is to detect if an UPDATE statement does not match the last known version id, a StaleDataError exception is thrown. By default, the column must be of Integer type, unless version_id_generator specifies an alternative version generator.

        See also

        Configuring a Version Counter - discussion of version counting and rationale.

      • version_id_generator

        Define how new version ids should be generated. Defaults to None, which indicates that a simple integer counting scheme be employed. To provide a custom versioning scheme, provide a callable function of the form:

        def generate_version(version):
            return next_version
      • with_polymorphic

        A tuple in the form (<classes>, <selectable>) indicating the default style of “polymorphic” loading, that is, which tables are queried at once. <classes> is any single or list of mappers and/or classes indicating the inherited classes that should be loaded at once. The special value '*' may be used to indicate all descending classes should be loaded immediately. The second tuple argument <selectable> indicates a selectable that will be used to query for multiple classes.

      sqlalchemy.orm.object_mapper(instance)

      Given an object, return the primary Mapper associated with the object instance.

      Raises sqlalchemy.orm.exc.UnmappedInstanceError if no mapping is configured.

      This function is available via the inspection system as:

      inspect(instance).mapper

      Using the inspection system will raise sqlalchemy.exc.NoInspectionAvailable if the instance is not part of a mapping.

      sqlalchemy.orm.class_mapper(class_, configure=True)

      Given a class, return the primary Mapper associated with the key.

      Raises UnmappedClassError if no mapping is configured on the given class, or ArgumentError if a non-class object is passed.

      Equivalent functionality is available via the inspect() function as:

      inspect(some_mapped_class)

      Using the inspection system will raise sqlalchemy.exc.NoInspectionAvailable if the class is not mapped.

      sqlalchemy.orm.configure_mappers()

      Initialize the inter-mapper relationships of all mappers that have been constructed thus far.

      This function can be called any number of times, but in most cases is handled internally.

      sqlalchemy.orm.clear_mappers()

      Remove all mappers from all classes.

      This function removes all instrumentation from classes and disposes of their associated mappers. Once called, the classes are unmapped and can be later re-mapped with new mappers.

      clear_mappers() is not for normal use, as there is literally no valid usage for it outside of very specific testing scenarios. Normally, mappers are permanent structural components of user-defined classes, and are never discarded independently of their class. If a mapped class itself is garbage collected, its mapper is automatically disposed of as well. As such, clear_mappers() is only for usage in test suites that re-use the same classes with different mappings, which is itself an extremely rare use case - the only such use case is in fact SQLAlchemy’s own test suite, and possibly the test suites of other ORM extension libraries which intend to test various combinations of mapper construction upon a fixed set of classes.

      sqlalchemy.orm.util.identity_key(*args, **kwargs)

      Generate “identity key” tuples, as are used as keys in the Session.identity_map dictionary.

      This function has several call styles:

      • identity_key(class, ident)

        This form receives a mapped class and a primary key scalar or tuple as an argument.

        E.g.:

        >>> identity_key(MyClass, (1, 2))
        (<class '__main__.MyClass'>, (1, 2))
        param class:mapped class (must be a positional argument)
        param ident:primary key, may be a scalar or tuple argument.
      • identity_key(instance=instance)

        This form will produce the identity key for a given instance. The instance need not be persistent, only that its primary key attributes are populated (else the key will contain None for those missing values).

        E.g.:

        >>> instance = MyClass(1, 2)
        >>> identity_key(instance=instance)
        (<class '__main__.MyClass'>, (1, 2))

        In this form, the given instance is ultimately run though Mapper.identity_key_from_instance(), which will have the effect of performing a database check for the corresponding row if the object is expired.

        param instance:object instance (must be given as a keyword arg)
      • identity_key(class, row=row)

        This form is similar to the class/tuple form, except is passed a database result row as a RowProxy object.

        E.g.:

        >>> row = engine.execute("select * from table where a=1 and b=2").first()
        >>> identity_key(MyClass, row=row)
        (<class '__main__.MyClass'>, (1, 2))
        param class:mapped class (must be a positional argument)
        param row:RowProxy row returned by a ResultProxy (must be given as a keyword arg)
      sqlalchemy.orm.util.polymorphic_union(table_map, typecolname, aliasname='p_union', cast_nulls=True)

      Create a UNION statement used by a polymorphic mapper.

      See Concrete Table Inheritance for an example of how this is used.

      Parameters:
      • table_map – mapping of polymorphic identities to Table objects.
      • typecolname – string name of a “discriminator” column, which will be derived from the query, producing the polymorphic identity for each row. If None, no polymorphic discriminator is generated.
      • aliasname – name of the alias() construct generated.
      • cast_nulls – if True, non-existent columns, which are represented as labeled NULLs, will be passed into CAST. This is a legacy behavior that is problematic on some backends such as Oracle - in which case it can be set to False.
      class sqlalchemy.orm.mapper.Mapper(class_, local_table, properties=None, primary_key=None, non_primary=False, inherits=None, inherit_condition=None, inherit_foreign_keys=None, extension=None, order_by=False, always_refresh=False, version_id_col=None, version_id_generator=None, polymorphic_on=None, _polymorphic_map=None, polymorphic_identity=None, concrete=False, with_polymorphic=None, allow_partial_pks=True, batch=True, column_prefix=None, include_properties=None, exclude_properties=None, passive_updates=True, eager_defaults=False, legacy_is_orphan=False, _compiled_cache_size=100)

      Bases: sqlalchemy.orm.interfaces._InspectionAttr

      Define the correlation of class attributes to database table columns.

      The Mapper object is instantiated using the mapper() function. For information about instantiating new Mapper objects, see that function’s documentation.

      When mapper() is used explicitly to link a user defined class with table metadata, this is referred to as classical mapping. Modern SQLAlchemy usage tends to favor the sqlalchemy.ext.declarative extension for class configuration, which makes usage of mapper() behind the scenes.

      Given a particular class known to be mapped by the ORM, the Mapper which maintains it can be acquired using the inspect() function:

      from sqlalchemy import inspect
      
      mapper = inspect(MyClass)

      A class which was mapped by the sqlalchemy.ext.declarative extension will also have its mapper available via the __mapper__ attribute.

      __init__(class_, local_table, properties=None, primary_key=None, non_primary=False, inherits=None, inherit_condition=None, inherit_foreign_keys=None, extension=None, order_by=False, always_refresh=False, version_id_col=None, version_id_generator=None, polymorphic_on=None, _polymorphic_map=None, polymorphic_identity=None, concrete=False, with_polymorphic=None, allow_partial_pks=True, batch=True, column_prefix=None, include_properties=None, exclude_properties=None, passive_updates=True, eager_defaults=False, legacy_is_orphan=False, _compiled_cache_size=100)

      Construct a new mapper.

      Mappers are normally constructed via the mapper() function. See for details.

      add_properties(dict_of_properties)

      Add the given dictionary of properties to this mapper, using add_property.

      add_property(key, prop)

      Add an individual MapperProperty to this mapper.

      If the mapper has not been configured yet, just adds the property to the initial properties dictionary sent to the constructor. If this Mapper has already been configured, then the given MapperProperty is configured immediately.

      all_orm_descriptors

      A namespace of all _InspectionAttr attributes associated with the mapped class.

      These attributes are in all cases Python descriptors associated with the mapped class or its superclasses.

      This namespace includes attributes that are mapped to the class as well as attributes declared by extension modules. It includes any Python descriptor type that inherits from _InspectionAttr. This includes QueryableAttribute, as well as extension types such as hybrid_property, hybrid_method and AssociationProxy.

      To distinguish between mapped attributes and extension attributes, the attribute _InspectionAttr.extension_type will refer to a constant that distinguishes between different extension types.

      When dealing with a QueryableAttribute, the QueryableAttribute.property attribute refers to the MapperProperty property, which is what you get when referring to the collection of mapped properties via Mapper.attrs.

      New in version 0.8.0.

      See also

      Mapper.attrs

      attrs

      A namespace of all MapperProperty objects associated this mapper.

      This is an object that provides each property based on its key name. For instance, the mapper for a User class which has User.name attribute would provide mapper.attrs.name, which would be the ColumnProperty representing the name column. The namespace object can also be iterated, which would yield each MapperProperty.

      Mapper has several pre-filtered views of this attribute which limit the types of properties returned, inclding synonyms, column_attrs, relationships, and composites.

      base_mapper = None

      The base-most Mapper in an inheritance chain.

      In a non-inheriting scenario, this attribute will always be this Mapper. In an inheritance scenario, it references the Mapper which is parent to all other Mapper objects in the inheritance chain.

      This is a read only attribute determined during mapper construction. Behavior is undefined if directly modified.

      c = None

      A synonym for columns.

      cascade_iterator(type_, state, halt_on=None)

      Iterate each element and its mapper in an object graph, for all relationships that meet the given cascade rule.

      Parameters:
      • type – The name of the cascade rule (i.e. save-update, delete, etc.)
      • state – The lead InstanceState. child items will be processed per the relationships defined for this object’s mapper.

      the return value are object instances; this provides a strong reference so that they don’t fall out of scope immediately.

      class_ = None

      The Python class which this Mapper maps.

      This is a read only attribute determined during mapper construction. Behavior is undefined if directly modified.

      class_manager = None

      The ClassManager which maintains event listeners and class-bound descriptors for this Mapper.

      This is a read only attribute determined during mapper construction. Behavior is undefined if directly modified.

      column_attrs

      Return a namespace of all ColumnProperty properties maintained by this Mapper.

      See also

      Mapper.attrs - namespace of all MapperProperty objects.

      columns = None

      A collection of Column or other scalar expression objects maintained by this Mapper.

      The collection behaves the same as that of the c attribute on any Table object, except that only those columns included in this mapping are present, and are keyed based on the attribute name defined in the mapping, not necessarily the key attribute of the Column itself. Additionally, scalar expressions mapped by column_property() are also present here.

      This is a read only attribute determined during mapper construction. Behavior is undefined if directly modified.

      common_parent(other)

      Return true if the given mapper shares a common inherited parent as this mapper.

      compile()

      Initialize the inter-mapper relationships of all mappers that

      Deprecated since version 0.7: Mapper.compile() is replaced by configure_mappers()

      have been constructed thus far.

      compiled

      Deprecated since version 0.7: Mapper.compiled is replaced by Mapper.configured

      composites

      Return a namespace of all CompositeProperty properties maintained by this Mapper.

      See also

      Mapper.attrs - namespace of all MapperProperty objects.

      concrete = None

      Represent True if this Mapper is a concrete inheritance mapper.

      This is a read only attribute determined during mapper construction. Behavior is undefined if directly modified.

      configured = None

      Represent True if this Mapper has been configured.

      This is a read only attribute determined during mapper construction. Behavior is undefined if directly modified.

      entity

      Part of the inspection API.

      Returns self.class_.

      get_property(key, _configure_mappers=True)

      return a MapperProperty associated with the given key.

      get_property_by_column(column)

      Given a Column object, return the MapperProperty which maps this column.

      identity_key_from_instance(instance)

      Return the identity key for the given instance, based on its primary key attributes.

      If the instance’s state is expired, calling this method will result in a database check to see if the object has been deleted. If the row no longer exists, ObjectDeletedError is raised.

      This value is typically also found on the instance state under the attribute name key.

      identity_key_from_primary_key(primary_key)

      Return an identity-map key for use in storing/retrieving an item from an identity map.

      Parameters:primary_key – A list of values indicating the identifier.
      identity_key_from_row(row, adapter=None)

      Return an identity-map key for use in storing/retrieving an item from the identity map.

      Parameters:row – A RowProxy instance. The columns which are mapped by this Mapper should be locatable in the row, preferably via the Column object directly (as is the case when a select() construct is executed), or via string names of the form <tablename>_<colname>.
      inherits = None

      References the Mapper which this Mapper inherits from, if any.

      This is a read only attribute determined during mapper construction. Behavior is undefined if directly modified.

      is_mapper = True

      Part of the inspection API.

      isa(other)

      Return True if the this mapper inherits from the given mapper.

      iterate_properties

      return an iterator of all MapperProperty objects.

      local_table = None

      The Selectable which this Mapper manages.

      Typically is an instance of Table or Alias. May also be None.

      The “local” table is the selectable that the Mapper is directly responsible for managing from an attribute access and flush perspective. For non-inheriting mappers, the local table is the same as the “mapped” table. For joined-table inheritance mappers, local_table will be the particular sub-table of the overall “join” which this Mapper represents. If this mapper is a single-table inheriting mapper, local_table will be None.

      See also

      mapped_table.

      mapped_table = None

      The Selectable to which this Mapper is mapped.

      Typically an instance of Table, Join, or Alias.

      The “mapped” table is the selectable that the mapper selects from during queries. For non-inheriting mappers, the mapped table is the same as the “local” table. For joined-table inheritance mappers, mapped_table references the full Join representing full rows for this particular subclass. For single-table inheritance mappers, mapped_table references the base table.

      See also

      local_table.

      mapper

      Part of the inspection API.

      Returns self.

      non_primary = None

      Represent True if this Mapper is a “non-primary” mapper, e.g. a mapper that is used only to selet rows but not for persistence management.

      This is a read only attribute determined during mapper construction. Behavior is undefined if directly modified.

      polymorphic_identity = None

      Represent an identifier which is matched against the polymorphic_on column during result row loading.

      Used only with inheritance, this object can be of any type which is comparable to the type of column represented by polymorphic_on.

      This is a read only attribute determined during mapper construction. Behavior is undefined if directly modified.

      polymorphic_iterator()

      Iterate through the collection including this mapper and all descendant mappers.

      This includes not just the immediately inheriting mappers but all their inheriting mappers as well.

      To iterate through an entire hierarchy, use mapper.base_mapper.polymorphic_iterator().

      polymorphic_map = None

      A mapping of “polymorphic identity” identifiers mapped to Mapper instances, within an inheritance scenario.

      The identifiers can be of any type which is comparable to the type of column represented by polymorphic_on.

      An inheritance chain of mappers will all reference the same polymorphic map object. The object is used to correlate incoming result rows to target mappers.

      This is a read only attribute determined during mapper construction. Behavior is undefined if directly modified.

      polymorphic_on = None

      The Column or SQL expression specified as the polymorphic_on argument for this Mapper, within an inheritance scenario.

      This attribute is normally a Column instance but may also be an expression, such as one derived from cast().

      This is a read only attribute determined during mapper construction. Behavior is undefined if directly modified.

      primary_key = None

      An iterable containing the collection of Column objects which comprise the ‘primary key’ of the mapped table, from the perspective of this Mapper.

      This list is against the selectable in mapped_table. In the case of inheriting mappers, some columns may be managed by a superclass mapper. For example, in the case of a Join, the primary key is determined by all of the primary key columns across all tables referenced by the Join.

      The list is also not necessarily the same as the primary key column collection associated with the underlying tables; the Mapper features a primary_key argument that can override what the Mapper considers as primary key columns.

      This is a read only attribute determined during mapper construction. Behavior is undefined if directly modified.

      primary_key_from_instance(instance)

      Return the list of primary key values for the given instance.

      If the instance’s state is expired, calling this method will result in a database check to see if the object has been deleted. If the row no longer exists, ObjectDeletedError is raised.

      primary_mapper()

      Return the primary mapper corresponding to this mapper’s class key (class).

      relationships

      Return a namespace of all RelationshipProperty properties maintained by this Mapper.

      See also

      Mapper.attrs - namespace of all MapperProperty objects.

      selectable

      The select() construct this Mapper selects from by default.

      Normally, this is equivalent to mapped_table, unless the with_polymorphic feature is in use, in which case the full “polymorphic” selectable is returned.

      self_and_descendants

      The collection including this mapper and all descendant mappers.

      This includes not just the immediately inheriting mappers but all their inheriting mappers as well.

      single = None

      Represent True if this Mapper is a single table inheritance mapper.

      local_table will be None if this flag is set.

      This is a read only attribute determined during mapper construction. Behavior is undefined if directly modified.

      synonyms

      Return a namespace of all SynonymProperty properties maintained by this Mapper.

      See also

      Mapper.attrs - namespace of all MapperProperty objects.

      tables = None

      An iterable containing the collection of Table objects which this Mapper is aware of.

      If the mapper is mapped to a Join, or an Alias representing a Select, the individual Table objects that comprise the full construct will be represented here.

      This is a read only attribute determined during mapper construction. Behavior is undefined if directly modified.

      validators = None

      An immutable dictionary of attributes which have been decorated using the validates() decorator.

      The dictionary contains string attribute names as keys mapped to the actual validation method.

      with_polymorphic_mappers

      The list of Mapper objects included in the default “polymorphic” query.

      SQLAlchemy-0.8.4/doc/orm/query.html0000644000076500000240000056444212251147502017644 0ustar classicstaff00000000000000 Querying — SQLAlchemy 0.8 Documentation

      SQLAlchemy 0.8 Documentation

      Release: 0.8.4 | Release Date: December 8, 2013

      Querying

      This section provides API documentation for the Query object and related constructs.

      For an in-depth introduction to querying with the SQLAlchemy ORM, please see the Object Relational Tutorial.

      The Query Object

      Query is produced in terms of a given Session, using the query() function:

      q = session.query(SomeMappedClass)

      Following is the full interface for the Query object.

      class sqlalchemy.orm.query.Query(entities, session=None)

      ORM-level SQL construction object.

      Query is the source of all SELECT statements generated by the ORM, both those formulated by end-user query operations as well as by high level internal operations such as related collection loading. It features a generative interface whereby successive calls return a new Query object, a copy of the former with additional criteria and options associated with it.

      Query objects are normally initially generated using the query() method of Session. For a full walkthrough of Query usage, see the Object Relational Tutorial.

      add_column(column)

      Add a column expression to the list of result columns to be returned.

      Pending deprecation: add_column() will be superseded by add_columns().

      add_columns(*column)

      Add one or more column expressions to the list of result columns to be returned.

      add_entity(entity, alias=None)

      add a mapped entity to the list of result columns to be returned.

      all()

      Return the results represented by this Query as a list.

      This results in an execution of the underlying query.

      as_scalar()

      Return the full SELECT statement represented by this Query, converted to a scalar subquery.

      Analogous to sqlalchemy.sql.expression.SelectBase.as_scalar().

      New in version 0.6.5.

      autoflush(setting)

      Return a Query with a specific ‘autoflush’ setting.

      Note that a Session with autoflush=False will not autoflush, even if this flag is set to True at the Query level. Therefore this flag is usually used only to disable autoflush for a specific Query.

      column_descriptions

      Return metadata about the columns which would be returned by this Query.

      Format is a list of dictionaries:

      user_alias = aliased(User, name='user2')
      q = sess.query(User, User.id, user_alias)
      
      # this expression:
      q.column_descriptions
      
      # would return:
      [
          {
              'name':'User',
              'type':User,
              'aliased':False,
              'expr':User,
          },
          {
              'name':'id',
              'type':Integer(),
              'aliased':False,
              'expr':User.id,
          },
          {
              'name':'user2',
              'type':User,
              'aliased':True,
              'expr':user_alias
          }
      ]
      correlate(*args)

      Return a Query construct which will correlate the given FROM clauses to that of an enclosing Query or select().

      The method here accepts mapped classes, aliased() constructs, and mapper() constructs as arguments, which are resolved into expression constructs, in addition to appropriate expression constructs.

      The correlation arguments are ultimately passed to Select.correlate() after coercion to expression constructs.

      The correlation arguments take effect in such cases as when Query.from_self() is used, or when a subquery as returned by Query.subquery() is embedded in another select() construct.

      count()

      Return a count of rows this Query would return.

      This generates the SQL for this Query as follows:

      SELECT count(1) AS count_1 FROM (
          SELECT <rest of query follows...>
      ) AS anon_1

      Changed in version 0.7: The above scheme is newly refined as of 0.7b3.

      For fine grained control over specific columns to count, to skip the usage of a subquery or otherwise control of the FROM clause, or to use other aggregate functions, use func expressions in conjunction with query(), i.e.:

      from sqlalchemy import func
      
      # count User records, without
      # using a subquery.
      session.query(func.count(User.id))
      
      # return count of user "id" grouped
      # by "name"
      session.query(func.count(User.id)).\
              group_by(User.name)
      
      from sqlalchemy import distinct
      
      # count distinct "name" values
      session.query(func.count(distinct(User.name)))
      cte(name=None, recursive=False)

      Return the full SELECT statement represented by this Query represented as a common table expression (CTE).

      New in version 0.7.6.

      Parameters and usage are the same as those of the SelectBase.cte() method; see that method for further details.

      Here is the Postgresql WITH RECURSIVE example. Note that, in this example, the included_parts cte and the incl_alias alias of it are Core selectables, which means the columns are accessed via the .c. attribute. The parts_alias object is an orm.aliased() instance of the Part entity, so column-mapped attributes are available directly:

      from sqlalchemy.orm import aliased
      
      class Part(Base):
          __tablename__ = 'part'
          part = Column(String, primary_key=True)
          sub_part = Column(String, primary_key=True)
          quantity = Column(Integer)
      
      included_parts = session.query(
                      Part.sub_part,
                      Part.part,
                      Part.quantity).\
                          filter(Part.part=="our part").\
                          cte(name="included_parts", recursive=True)
      
      incl_alias = aliased(included_parts, name="pr")
      parts_alias = aliased(Part, name="p")
      included_parts = included_parts.union_all(
          session.query(
              parts_alias.part,
              parts_alias.sub_part,
              parts_alias.quantity).\
                  filter(parts_alias.part==incl_alias.c.sub_part)
          )
      
      q = session.query(
              included_parts.c.sub_part,
              func.sum(included_parts.c.quantity).
                  label('total_quantity')
          ).\
          group_by(included_parts.c.sub_part)

      See also:

      SelectBase.cte()

      delete(synchronize_session='evaluate')

      Perform a bulk delete query.

      Deletes rows matched by this query from the database.

      Parameters:synchronize_session

      chooses the strategy for the removal of matched objects from the session. Valid values are:

      False - don’t synchronize the session. This option is the most efficient and is reliable once the session is expired, which typically occurs after a commit(), or explicitly using expire_all(). Before the expiration, objects may still remain in the session which were in fact deleted which can lead to confusing results if they are accessed via get() or already loaded collections.

      'fetch' - performs a select query before the delete to find objects that are matched by the delete query and need to be removed from the session. Matched objects are removed from the session.

      'evaluate' - Evaluate the query’s criteria in Python straight on the objects in the session. If evaluation of the criteria isn’t implemented, an error is raised. In that case you probably want to use the ‘fetch’ strategy as a fallback.

      The expression evaluator currently doesn’t account for differing string collations between the database and Python.

      Returns:the count of rows matched as returned by the database’s “row count” feature.

      This method has several key caveats:

      • The method does not offer in-Python cascading of relationships - it is assumed that ON DELETE CASCADE/SET NULL/etc. is configured for any foreign key references which require it, otherwise the database may emit an integrity violation if foreign key references are being enforced.

        After the DELETE, dependent objects in the Session which were impacted by an ON DELETE may not contain the current state, or may have been deleted. This issue is resolved once the Session is expired, which normally occurs upon Session.commit() or can be forced by using Session.expire_all(). Accessing an expired object whose row has been deleted will invoke a SELECT to locate the row; when the row is not found, an ObjectDeletedError is raised.

      • The MapperEvents.before_delete() and MapperEvents.after_delete() events are not invoked from this method. Instead, the SessionEvents.after_bulk_delete() method is provided to act upon a mass DELETE of entity rows.

      See also

      Query.update()

      Inserts, Updates and Deletes - Core SQL tutorial

      distinct(*criterion)

      Apply a DISTINCT to the query and return the newly resulting Query.

      Parameters:*expr – optional column expressions. When present, the Postgresql dialect will render a DISTINCT ON (<expressions>>) construct.
      enable_assertions(value)

      Control whether assertions are generated.

      When set to False, the returned Query will not assert its state before certain operations, including that LIMIT/OFFSET has not been applied when filter() is called, no criterion exists when get() is called, and no “from_statement()” exists when filter()/order_by()/group_by() etc. is called. This more permissive mode is used by custom Query subclasses to specify criterion or other modifiers outside of the usual usage patterns.

      Care should be taken to ensure that the usage pattern is even possible. A statement applied by from_statement() will override any criterion set by filter() or order_by(), for example.

      enable_eagerloads(value)

      Control whether or not eager joins and subqueries are rendered.

      When set to False, the returned Query will not render eager joins regardless of joinedload(), subqueryload() options or mapper-level lazy='joined'/lazy='subquery' configurations.

      This is used primarily when nesting the Query’s statement into a subquery or other selectable.

      except_(*q)

      Produce an EXCEPT of this Query against one or more queries.

      Works the same way as union(). See that method for usage examples.

      except_all(*q)

      Produce an EXCEPT ALL of this Query against one or more queries.

      Works the same way as union(). See that method for usage examples.

      execution_options(**kwargs)

      Set non-SQL options which take effect during execution.

      The options are the same as those accepted by Connection.execution_options().

      Note that the stream_results execution option is enabled automatically if the yield_per() method is used.

      exists()

      A convenience method that turns a query into an EXISTS subquery of the form EXISTS (SELECT 1 FROM ... WHERE ...).

      e.g.:

      q = session.query(User).filter(User.name == 'fred')
      session.query(q.exists())

      Producing SQL similar to:

      SELECT EXISTS (
          SELECT 1 FROM users WHERE users.name = :name_1
      ) AS anon_1

      New in version 0.8.1.

      filter(*criterion)

      apply the given filtering criterion to a copy of this Query, using SQL expressions.

      e.g.:

      session.query(MyClass).filter(MyClass.name == 'some name')

      Multiple criteria are joined together by AND:

      session.query(MyClass).\
          filter(MyClass.name == 'some name', MyClass.id > 5)

      The criterion is any SQL expression object applicable to the WHERE clause of a select. String expressions are coerced into SQL expression constructs via the text() construct.

      Changed in version 0.7.5: Multiple criteria joined by AND.

      See also:

      Query.filter_by() - filter on keyword expressions.

      filter_by(**kwargs)

      apply the given filtering criterion to a copy of this Query, using keyword expressions.

      e.g.:

      session.query(MyClass).filter_by(name = 'some name')

      Multiple criteria are joined together by AND:

      session.query(MyClass).\
          filter_by(name = 'some name', id = 5)

      The keyword expressions are extracted from the primary entity of the query, or the last entity that was the target of a call to Query.join().

      See also:

      Query.filter() - filter on SQL expressions.

      first()

      Return the first result of this Query or None if the result doesn’t contain any row.

      first() applies a limit of one within the generated SQL, so that only one primary entity row is generated on the server side (note this may consist of multiple result rows if join-loaded collections are present).

      Calling first() results in an execution of the underlying query.

      from_self(*entities)

      return a Query that selects from this Query’s SELECT statement.

      *entities - optional list of entities which will replace those being selected.

      from_statement(statement)

      Execute the given SELECT statement and return results.

      This method bypasses all internal statement compilation, and the statement is executed without modification.

      The statement argument is either a string, a select() construct, or a text() construct, and should return the set of columns appropriate to the entity class represented by this Query.

      get(ident)

      Return an instance based on the given primary key identifier, or None if not found.

      E.g.:

      my_user = session.query(User).get(5)
      
      some_object = session.query(VersionedFoo).get((5, 10))

      get() is special in that it provides direct access to the identity map of the owning Session. If the given primary key identifier is present in the local identity map, the object is returned directly from this collection and no SQL is emitted, unless the object has been marked fully expired. If not present, a SELECT is performed in order to locate the object.

      get() also will perform a check if the object is present in the identity map and marked as expired - a SELECT is emitted to refresh the object as well as to ensure that the row is still present. If not, ObjectDeletedError is raised.

      get() is only used to return a single mapped instance, not multiple instances or individual column constructs, and strictly on a single primary key value. The originating Query must be constructed in this way, i.e. against a single mapped entity, with no additional filtering criterion. Loading options via options() may be applied however, and will be used if the object is not yet locally present.

      A lazy-loading, many-to-one attribute configured by relationship(), using a simple foreign-key-to-primary-key criterion, will also use an operation equivalent to get() in order to retrieve the target value from the local identity map before querying the database. See Relationship Loading Techniques for further details on relationship loading.

      Parameters:ident – A scalar or tuple value representing the primary key. For a composite primary key, the order of identifiers corresponds in most cases to that of the mapped Table object’s primary key columns. For a mapper() that was given the primary key argument during construction, the order of identifiers corresponds to the elements present in this collection.
      Returns:The object instance, or None.
      group_by(*criterion)

      apply one or more GROUP BY criterion to the query and return the newly resulting Query

      having(criterion)

      apply a HAVING criterion to the query and return the newly resulting Query.

      having() is used in conjunction with group_by().

      HAVING criterion makes it possible to use filters on aggregate functions like COUNT, SUM, AVG, MAX, and MIN, eg.:

      q = session.query(User.id).\
                  join(User.addresses).\
                  group_by(User.id).\
                  having(func.count(Address.id) > 2)
      instances(cursor, _Query__context=None)

      Given a ResultProxy cursor as returned by connection.execute(), return an ORM result as an iterator.

      e.g.:

      result = engine.execute("select * from users")
      for u in session.query(User).instances(result):
          print u
      intersect(*q)

      Produce an INTERSECT of this Query against one or more queries.

      Works the same way as union(). See that method for usage examples.

      intersect_all(*q)

      Produce an INTERSECT ALL of this Query against one or more queries.

      Works the same way as union(). See that method for usage examples.

      join(*props, **kwargs)

      Create a SQL JOIN against this Query object’s criterion and apply generatively, returning the newly resulting Query.

      Simple Relationship Joins

      Consider a mapping between two classes User and Address, with a relationship User.addresses representing a collection of Address objects associated with each User. The most common usage of join() is to create a JOIN along this relationship, using the User.addresses attribute as an indicator for how this should occur:

      q = session.query(User).join(User.addresses)

      Where above, the call to join() along User.addresses will result in SQL equivalent to:

      SELECT user.* FROM user JOIN address ON user.id = address.user_id

      In the above example we refer to User.addresses as passed to join() as the on clause, that is, it indicates how the “ON” portion of the JOIN should be constructed. For a single-entity query such as the one above (i.e. we start by selecting only from User and nothing else), the relationship can also be specified by its string name:

      q = session.query(User).join("addresses")

      join() can also accommodate multiple “on clause” arguments to produce a chain of joins, such as below where a join across four related entities is constructed:

      q = session.query(User).join("orders", "items", "keywords")

      The above would be shorthand for three separate calls to join(), each using an explicit attribute to indicate the source entity:

      q = session.query(User).\
              join(User.orders).\
              join(Order.items).\
              join(Item.keywords)

      Joins to a Target Entity or Selectable

      A second form of join() allows any mapped entity or core selectable construct as a target. In this usage, join() will attempt to create a JOIN along the natural foreign key relationship between two entities:

      q = session.query(User).join(Address)

      The above calling form of join() will raise an error if either there are no foreign keys between the two entities, or if there are multiple foreign key linkages between them. In the above calling form, join() is called upon to create the “on clause” automatically for us. The target can be any mapped entity or selectable, such as a Table:

      q = session.query(User).join(addresses_table)

      Joins to a Target with an ON Clause

      The third calling form allows both the target entity as well as the ON clause to be passed explicitly. Suppose for example we wanted to join to Address twice, using an alias the second time. We use aliased() to create a distinct alias of Address, and join to it using the target, onclause form, so that the alias can be specified explicitly as the target along with the relationship to instruct how the ON clause should proceed:

      a_alias = aliased(Address)
      
      q = session.query(User).\
              join(User.addresses).\
              join(a_alias, User.addresses).\
              filter(Address.email_address=='ed@foo.com').\
              filter(a_alias.email_address=='ed@bar.com')

      Where above, the generated SQL would be similar to:

      SELECT user.* FROM user
          JOIN address ON user.id = address.user_id
          JOIN address AS address_1 ON user.id=address_1.user_id
          WHERE address.email_address = :email_address_1
          AND address_1.email_address = :email_address_2

      The two-argument calling form of join() also allows us to construct arbitrary joins with SQL-oriented “on clause” expressions, not relying upon configured relationships at all. Any SQL expression can be passed as the ON clause when using the two-argument form, which should refer to the target entity in some way as well as an applicable source entity:

      q = session.query(User).join(Address, User.id==Address.user_id)

      Changed in version 0.7: In SQLAlchemy 0.6 and earlier, the two argument form of join() requires the usage of a tuple: query(User).join((Address, User.id==Address.user_id)). This calling form is accepted in 0.7 and further, though is not necessary unless multiple join conditions are passed to a single join() call, which itself is also not generally necessary as it is now equivalent to multiple calls (this wasn’t always the case).

      Advanced Join Targeting and Adaption

      There is a lot of flexibility in what the “target” can be when using join(). As noted previously, it also accepts Table constructs and other selectables such as alias() and select() constructs, with either the one or two-argument forms:

      addresses_q = select([Address.user_id]).\
                  where(Address.email_address.endswith("@bar.com")).\
                  alias()
      
      q = session.query(User).\
                  join(addresses_q, addresses_q.c.user_id==User.id)

      join() also features the ability to adapt a relationship() -driven ON clause to the target selectable. Below we construct a JOIN from User to a subquery against Address, allowing the relationship denoted by User.addresses to adapt itself to the altered target:

      address_subq = session.query(Address).\
                      filter(Address.email_address == 'ed@foo.com').\
                      subquery()
      
      q = session.query(User).join(address_subq, User.addresses)

      Producing SQL similar to:

      SELECT user.* FROM user
          JOIN (
              SELECT address.id AS id,
                      address.user_id AS user_id,
                      address.email_address AS email_address
              FROM address
              WHERE address.email_address = :email_address_1
          ) AS anon_1 ON user.id = anon_1.user_id

      The above form allows one to fall back onto an explicit ON clause at any time:

      q = session.query(User).\
              join(address_subq, User.id==address_subq.c.user_id)

      Controlling what to Join From

      While join() exclusively deals with the “right” side of the JOIN, we can also control the “left” side, in those cases where it’s needed, using select_from(). Below we construct a query against Address but can still make usage of User.addresses as our ON clause by instructing the Query to select first from the User entity:

      q = session.query(Address).select_from(User).\
                      join(User.addresses).\
                      filter(User.name == 'ed')

      Which will produce SQL similar to:

      SELECT address.* FROM user
          JOIN address ON user.id=address.user_id
          WHERE user.name = :name_1

      Constructing Aliases Anonymously

      join() can construct anonymous aliases using the aliased=True flag. This feature is useful when a query is being joined algorithmically, such as when querying self-referentially to an arbitrary depth:

      q = session.query(Node).\
              join("children", "children", aliased=True)

      When aliased=True is used, the actual “alias” construct is not explicitly available. To work with it, methods such as Query.filter() will adapt the incoming entity to the last join point:

      q = session.query(Node).\
              join("children", "children", aliased=True).\
              filter(Node.name == 'grandchild 1')

      When using automatic aliasing, the from_joinpoint=True argument can allow a multi-node join to be broken into multiple calls to join(), so that each path along the way can be further filtered:

      q = session.query(Node).\
              join("children", aliased=True).\
              filter(Node.name='child 1').\
              join("children", aliased=True, from_joinpoint=True).\
              filter(Node.name == 'grandchild 1')

      The filtering aliases above can then be reset back to the original Node entity using reset_joinpoint():

      q = session.query(Node).\
              join("children", "children", aliased=True).\
              filter(Node.name == 'grandchild 1').\
              reset_joinpoint().\
              filter(Node.name == 'parent 1)

      For an example of aliased=True, see the distribution example XML Persistence which illustrates an XPath-like query system using algorithmic joins.

      Parameters:
      • *props – A collection of one or more join conditions, each consisting of a relationship-bound attribute or string relationship name representing an “on clause”, or a single target entity, or a tuple in the form of (target, onclause). A special two-argument calling form of the form target, onclause is also accepted.
      • aliased=False – If True, indicate that the JOIN target should be anonymously aliased. Subsequent calls to filter and similar will adapt the incoming criterion to the target alias, until reset_joinpoint() is called.
      • from_joinpoint=False – When using aliased=True, a setting of True here will cause the join to be from the most recent joined target, rather than starting back from the original FROM clauses of the query.

      See also:

      Querying with Joins in the ORM tutorial.

      Mapping Class Inheritance Hierarchies for details on how join() is used for inheritance relationships.

      orm.join() - a standalone ORM-level join function, used internally by Query.join(), which in previous SQLAlchemy versions was the primary ORM-level joining interface.

      label(name)

      Return the full SELECT statement represented by this Query, converted to a scalar subquery with a label of the given name.

      Analogous to sqlalchemy.sql.expression.SelectBase.label().

      New in version 0.6.5.

      limit(limit)

      Apply a LIMIT to the query and return the newly resulting

      Query.

      merge_result(iterator, load=True)

      Merge a result into this Query object’s Session.

      Given an iterator returned by a Query of the same structure as this one, return an identical iterator of results, with all mapped instances merged into the session using Session.merge(). This is an optimized method which will merge all mapped instances, preserving the structure of the result rows and unmapped columns with less method overhead than that of calling Session.merge() explicitly for each value.

      The structure of the results is determined based on the column list of this Query - if these do not correspond, unchecked errors will occur.

      The ‘load’ argument is the same as that of Session.merge().

      For an example of how merge_result() is used, see the source code for the example Dogpile Caching, where merge_result() is used to efficiently restore state from a cache back into a target Session.

      offset(offset)

      Apply an OFFSET to the query and return the newly resulting Query.

      one()

      Return exactly one result or raise an exception.

      Raises sqlalchemy.orm.exc.NoResultFound if the query selects no rows. Raises sqlalchemy.orm.exc.MultipleResultsFound if multiple object identities are returned, or if multiple rows are returned for a query that does not return object identities.

      Note that an entity query, that is, one which selects one or more mapped classes as opposed to individual column attributes, may ultimately represent many rows but only one row of unique entity or entities - this is a successful result for one().

      Calling one() results in an execution of the underlying query.

      Changed in version 0.6: one() fully fetches all results instead of applying any kind of limit, so that the “unique”-ing of entities does not conceal multiple object identities.

      options(*args)

      Return a new Query object, applying the given list of mapper options.

      Most supplied options regard changing how column- and relationship-mapped attributes are loaded. See the sections Deferred Column Loading and Relationship Loading Techniques for reference documentation.

      order_by(*criterion)

      apply one or more ORDER BY criterion to the query and return the newly resulting Query

      All existing ORDER BY settings can be suppressed by passing None - this will suppress any ORDER BY configured on mappers as well.

      Alternatively, an existing ORDER BY setting on the Query object can be entirely cancelled by passing False as the value - use this before calling methods where an ORDER BY is invalid.

      outerjoin(*props, **kwargs)

      Create a left outer join against this Query object’s criterion and apply generatively, returning the newly resulting Query.

      Usage is the same as the join() method.

      params(*args, **kwargs)

      add values for bind parameters which may have been specified in filter().

      parameters may be specified using **kwargs, or optionally a single dictionary as the first positional argument. The reason for both is that **kwargs is convenient, however some parameter dictionaries contain unicode keys in which case **kwargs cannot be used.

      populate_existing()

      Return a Query that will expire and refresh all instances as they are loaded, or reused from the current Session.

      populate_existing() does not improve behavior when the ORM is used normally - the Session object’s usual behavior of maintaining a transaction and expiring all attributes after rollback or commit handles object state automatically. This method is not intended for general use.

      prefix_with(*prefixes)

      Apply the prefixes to the query and return the newly resulting Query.

      Parameters:*prefixes – optional prefixes, typically strings, not using any commas. In particular is useful for MySQL keywords.

      e.g.:

      query = sess.query(User.name).\
          prefix_with('HIGH_PRIORITY').\
          prefix_with('SQL_SMALL_RESULT', 'ALL')

      Would render:

      SELECT HIGH_PRIORITY SQL_SMALL_RESULT ALL users.name AS users_name
      FROM users

      New in version 0.7.7.

      reset_joinpoint()

      Return a new Query, where the “join point” has been reset back to the base FROM entities of the query.

      This method is usually used in conjunction with the aliased=True feature of the join() method. See the example in join() for how this is used.

      scalar()

      Return the first element of the first result or None if no rows present. If multiple rows are returned, raises MultipleResultsFound.

      >>> session.query(Item).scalar()
      <Item>
      >>> session.query(Item.id).scalar()
      1
      >>> session.query(Item.id).filter(Item.id < 0).scalar()
      None
      >>> session.query(Item.id, Item.name).scalar()
      1
      >>> session.query(func.count(Parent.id)).scalar()
      20

      This results in an execution of the underlying query.

      select_entity_from(from_obj)

      Set the FROM clause of this Query to a core selectable, applying it as a replacement FROM clause for corresponding mapped entities.

      This method is currently equivalent to the Query.select_from() method, but in 0.9 these two methods will diverge in functionality.

      In addition to changing the FROM list, the method will also apply the given selectable to replace the FROM which the selected entities would normally select from.

      The given from_obj must be an instance of a FromClause, e.g. a select() or Alias construct.

      An example would be a Query that selects User entities, but uses Query.select_entity_from() to have the entities selected from a select() construct instead of the base user table:

      select_stmt = select([User]).where(User.id == 7)
      
      q = session.query(User).\
              select_entity_from(select_stmt).\
              filter(User.name == 'ed')

      The query generated will select User entities directly from the given select() construct, and will be:

      SELECT anon_1.id AS anon_1_id, anon_1.name AS anon_1_name
      FROM (SELECT "user".id AS id, "user".name AS name
      FROM "user"
      WHERE "user".id = :id_1) AS anon_1
      WHERE anon_1.name = :name_1

      Notice above that even the WHERE criterion was “adapted” such that the anon_1 subquery effectively replaces all references to the user table, except for the one that it refers to internally.

      Compare this to Query.select_from(), which as of version 0.9, does not affect existing entities. The statement below:

      q = session.query(User).\
              select_from(select_stmt).\
              filter(User.name == 'ed')

      Produces SQL where both the user table as well as the select_stmt construct are present as separate elements in the FROM clause. No “adaptation” of the user table is applied:

      SELECT "user".id AS user_id, "user".name AS user_name
      FROM "user", (SELECT "user".id AS id, "user".name AS name
      FROM "user"
      WHERE "user".id = :id_1) AS anon_1
      WHERE "user".name = :name_1

      Query.select_entity_from() maintains an older behavior of Query.select_from(). In modern usage, similar results can also be achieved using aliased():

      select_stmt = select([User]).where(User.id == 7)
      user_from_select = aliased(User, select_stmt.alias())
      
      q = session.query(user_from_select)
      Parameters:from_obj – a FromClause object that will replace the FROM clause of this Query.

      New in version 0.8.2: Query.select_entity_from() was added to specify the specific behavior of entity replacement, however the Query.select_from() maintains this behavior as well until 0.9.

      select_from(*from_obj)

      Set the FROM clause of this Query explicitly.

      Query.select_from() is often used in conjunction with Query.join() in order to control which entity is selected from on the “left” side of the join.

      The entity or selectable object here effectively replaces the “left edge” of any calls to join(), when no joinpoint is otherwise established - usually, the default “join point” is the leftmost entity in the Query object’s list of entities to be selected.

      A typical example:

      q = session.query(Address).select_from(User).\
          join(User.addresses).\
          filter(User.name == 'ed')

      Which produces SQL equivalent to:

      SELECT address.* FROM user
      JOIN address ON user.id=address.user_id
      WHERE user.name = :name_1
      Parameters:*from_obj – collection of one or more entities to apply to the FROM clause. Entities can be mapped classes, AliasedClass objects, Mapper objects as well as core FromClause elements like subqueries.

      Note

      Query.select_from() features a deprecated behavior whereby when passed a FromClause element, such as a select construct, it will apply that select construct to replace the FROM clause that an existing entity is joined from. This behavior is being removed in SQLAlchemy 0.9, to be replaced with the Query.select_entity_from() method. Applications which rely on this behavior to re-base query entities to an arbitrary selectable should transition to this method before upgrading to 0.9.

      selectable

      Return the Select object emitted by this Query.

      Used for inspect() compatibility, this is equivalent to:

      query.enable_eagerloads(False).with_labels().statement
      slice(start, stop)

      apply LIMIT/OFFSET to the Query based on a ” “range and return the newly resulting Query.

      statement

      The full SELECT statement represented by this Query.

      The statement by default will not have disambiguating labels applied to the construct unless with_labels(True) is called first.

      subquery(name=None, with_labels=False, reduce_columns=False)

      return the full SELECT statement represented by this Query, embedded within an Alias.

      Eager JOIN generation within the query is disabled.

      Parameters:
      • name – string name to be assigned as the alias; this is passed through to FromClause.alias(). If None, a name will be deterministically generated at compile time.
      • with_labels – if True, with_labels() will be called on the Query first to apply table-qualified labels to all columns.
      • reduce_columns

        if True, Select.reduce_columns() will be called on the resulting select() construct, to remove same-named columns where one also refers to the other via foreign key or WHERE clause equivalence.

        Changed in version 0.8: the with_labels and reduce_columns keyword arguments were added.

      union(*q)

      Produce a UNION of this Query against one or more queries.

      e.g.:

      q1 = sess.query(SomeClass).filter(SomeClass.foo=='bar')
      q2 = sess.query(SomeClass).filter(SomeClass.bar=='foo')
      
      q3 = q1.union(q2)

      The method accepts multiple Query objects so as to control the level of nesting. A series of union() calls such as:

      x.union(y).union(z).all()

      will nest on each union(), and produces:

      SELECT * FROM (SELECT * FROM (SELECT * FROM X UNION
                      SELECT * FROM y) UNION SELECT * FROM Z)

      Whereas:

      x.union(y, z).all()

      produces:

      SELECT * FROM (SELECT * FROM X UNION SELECT * FROM y UNION
                      SELECT * FROM Z)

      Note that many database backends do not allow ORDER BY to be rendered on a query called within UNION, EXCEPT, etc. To disable all ORDER BY clauses including those configured on mappers, issue query.order_by(None) - the resulting Query object will not render ORDER BY within its SELECT statement.

      union_all(*q)

      Produce a UNION ALL of this Query against one or more queries.

      Works the same way as union(). See that method for usage examples.

      update(values, synchronize_session='evaluate')

      Perform a bulk update query.

      Updates rows matched by this query in the database.

      Parameters:
      • values – a dictionary with attributes names as keys and literal values or sql expressions as values.
      • synchronize_session

        chooses the strategy to update the attributes on objects in the session. Valid values are:

        False - don’t synchronize the session. This option is the most efficient and is reliable once the session is expired, which typically occurs after a commit(), or explicitly using expire_all(). Before the expiration, updated objects may still remain in the session with stale values on their attributes, which can lead to confusing results.

        'fetch' - performs a select query before the update to find objects that are matched by the update query. The updated attributes are expired on matched objects.

        'evaluate' - Evaluate the Query’s criteria in Python straight on the objects in the session. If evaluation of the criteria isn’t implemented, an exception is raised.

        The expression evaluator currently doesn’t account for differing string collations between the database and Python.

      Returns:

      the count of rows matched as returned by the database’s “row count” feature.

      This method has several key caveats:

      • The method does not offer in-Python cascading of relationships - it is assumed that ON UPDATE CASCADE is configured for any foreign key references which require it, otherwise the database may emit an integrity violation if foreign key references are being enforced.

        After the UPDATE, dependent objects in the Session which were impacted by an ON UPDATE CASCADE may not contain the current state; this issue is resolved once the Session is expired, which normally occurs upon Session.commit() or can be forced by using Session.expire_all().

      • As of 0.8, this method will support multiple table updates, as detailed in Multiple Table Updates, and this behavior does extend to support updates of joined-inheritance and other multiple table mappings. However, the join condition of an inheritance mapper is currently not automatically rendered. Care must be taken in any multiple-table update to explicitly include the joining condition between those tables, even in mappings where this is normally automatic. E.g. if a class Engineer subclasses Employee, an UPDATE of the Engineer local table using criteria against the Employee local table might look like:

        session.query(Engineer).\
            filter(Engineer.id == Employee.id).\
            filter(Employee.name == 'dilbert').\
            update({"engineer_type": "programmer"})
      • The MapperEvents.before_update() and MapperEvents.after_update() events are not invoked from this method. Instead, the SessionEvents.after_bulk_update() method is provided to act upon a mass UPDATE of entity rows.

      See also

      Query.delete()

      Inserts, Updates and Deletes - Core SQL tutorial

      value(column)

      Return a scalar result corresponding to the given column expression.

      values(*columns)

      Return an iterator yielding result tuples corresponding to the given list of columns

      whereclause

      A readonly attribute which returns the current WHERE criterion for this Query.

      This returned value is a SQL expression construct, or None if no criterion has been established.

      with_entities(*entities)

      Return a new Query replacing the SELECT list with the given entities.

      e.g.:

      # Users, filtered on some arbitrary criterion
      # and then ordered by related email address
      q = session.query(User).\
                  join(User.address).\
                  filter(User.name.like('%ed%')).\
                  order_by(Address.email)
      
      # given *only* User.id==5, Address.email, and 'q', what
      # would the *next* User in the result be ?
      subq = q.with_entities(Address.email).\
                  order_by(None).\
                  filter(User.id==5).\
                  subquery()
      q = q.join((subq, subq.c.email < Address.email)).\
                  limit(1)

      New in version 0.6.5.

      with_hint(selectable, text, dialect_name='*')

      Add an indexing hint for the given entity or selectable to this Query.

      Functionality is passed straight through to with_hint(), with the addition that selectable can be a Table, Alias, or ORM entity / mapped class /etc.

      with_labels()

      Apply column labels to the return value of Query.statement.

      Indicates that this Query’s statement accessor should return a SELECT statement that applies labels to all columns in the form <tablename>_<columnname>; this is commonly used to disambiguate columns from multiple tables which have the same name.

      When the Query actually issues SQL to load rows, it always uses column labeling.

      with_lockmode(mode)

      Return a new Query object with the specified locking mode.

      Parameters:mode

      a string representing the desired locking mode. A corresponding value is passed to the for_update parameter of select() when the query is executed. Valid values are:

      'update' - passes for_update=True, which translates to FOR UPDATE (standard SQL, supported by most dialects)

      'update_nowait' - passes for_update='nowait', which translates to FOR UPDATE NOWAIT (supported by Oracle, PostgreSQL 8.1 upwards)

      'read' - passes for_update='read', which translates to LOCK IN SHARE MODE (for MySQL), and FOR SHARE (for PostgreSQL)

      'read_nowait' - passes for_update='read_nowait', which translates to FOR SHARE NOWAIT (supported by PostgreSQL).

      New in version 0.7.7: FOR SHARE and FOR SHARE NOWAIT (PostgreSQL).

      with_parent(instance, property=None)

      Add filtering criterion that relates the given instance to a child object or collection, using its attribute state as well as an established relationship() configuration.

      The method uses the with_parent() function to generate the clause, the result of which is passed to Query.filter().

      Parameters are the same as with_parent(), with the exception that the given property can be None, in which case a search is performed against this Query object’s target mapper.

      with_polymorphic(cls_or_mappers, selectable=None, polymorphic_on=None)

      Load columns for inheriting classes.

      Query.with_polymorphic() applies transformations to the “main” mapped class represented by this Query. The “main” mapped class here means the Query object’s first argument is a full class, i.e. session.query(SomeClass). These transformations allow additional tables to be present in the FROM clause so that columns for a joined-inheritance subclass are available in the query, both for the purposes of load-time efficiency as well as the ability to use these columns at query time.

      See the documentation section Basic Control of Which Tables are Queried for details on how this method is used.

      Changed in version 0.8: A new and more flexible function orm.with_polymorphic() supersedes Query.with_polymorphic(), as it can apply the equivalent functionality to any set of columns or classes in the Query, not just the “zero mapper”. See that function for a description of arguments.

      with_session(session)

      Return a Query that will use the given Session.

      with_transformation(fn)

      Return a new Query object transformed by the given function.

      E.g.:

      def filter_something(criterion):
          def transform(q):
              return q.filter(criterion)
          return transform
      
      q = q.with_transformation(filter_something(x==5))

      This allows ad-hoc recipes to be created for Query objects. See the example at Building Transformers.

      New in version 0.7.4.

      yield_per(count)

      Yield only count rows at a time.

      WARNING: use this method with caution; if the same instance is present in more than one batch of rows, end-user changes to attributes will be overwritten.

      In particular, it’s usually impossible to use this setting with eagerly loaded collections (i.e. any lazy=’joined’ or ‘subquery’) since those collections will be cleared for a new load when encountered in a subsequent result batch. In the case of ‘subquery’ loading, the full result for all rows is fetched which generally defeats the purpose of yield_per().

      Also note that while yield_per() will set the stream_results execution option to True, currently this is only understood by psycopg2 dialect which will stream results using server side cursors instead of pre-buffer all rows for this query. Other DBAPIs pre-buffer all rows before making them available.

      ORM-Specific Query Constructs

      sqlalchemy.orm.aliased(element, alias=None, name=None, adapt_on_names=False)

      Produce an alias of the given element, usually an AliasedClass instance.

      E.g.:

      my_alias = aliased(MyClass)
      
      session.query(MyClass, my_alias).filter(MyClass.id > my_alias.id)

      The aliased() function is used to create an ad-hoc mapping of a mapped class to a new selectable. By default, a selectable is generated from the normally mapped selectable (typically a Table) using the FromClause.alias() method. However, aliased() can also be used to link the class to a new select() statement. Also, the with_polymorphic() function is a variant of aliased() that is intended to specify a so-called “polymorphic selectable”, that corresponds to the union of several joined-inheritance subclasses at once.

      For convenience, the aliased() function also accepts plain FromClause constructs, such as a Table or select() construct. In those cases, the FromClause.alias() method is called on the object and the new Alias object returned. The returned Alias is not ORM-mapped in this case.

      Parameters:
      • element – element to be aliased. Is normally a mapped class, but for convenience can also be a FromClause element.
      • alias – Optional selectable unit to map the element to. This should normally be a Alias object corresponding to the Table to which the class is mapped, or to a select() construct that is compatible with the mapping. By default, a simple anonymous alias of the mapped table is generated.
      • name – optional string name to use for the alias, if not specified by the alias parameter. The name, among other things, forms the attribute name that will be accessible via tuples returned by a Query object.
      • adapt_on_names

        if True, more liberal “matching” will be used when mapping the mapped columns of the ORM entity to those of the given selectable - a name-based match will be performed if the given selectable doesn’t otherwise have a column that corresponds to one on the entity. The use case for this is when associating an entity with some derived selectable such as one that uses aggregate functions:

        class UnitPrice(Base):
            __tablename__ = 'unit_price'
            ...
            unit_id = Column(Integer)
            price = Column(Numeric)
        
        aggregated_unit_price = Session.query(
                                    func.sum(UnitPrice.price).label('price')
                                ).group_by(UnitPrice.unit_id).subquery()
        
        aggregated_unit_price = aliased(UnitPrice,
                    alias=aggregated_unit_price, adapt_on_names=True)

        Above, functions on aggregated_unit_price which refer to .price will return the fund.sum(UnitPrice.price).label('price') column, as it is matched on the name “price”. Ordinarily, the “price” function wouldn’t have any “column correspondence” to the actual UnitPrice.price column as it is not a proxy of the original.

        New in version 0.7.3.

      class sqlalchemy.orm.util.AliasedClass(cls, alias=None, name=None, adapt_on_names=False, with_polymorphic_mappers=(), with_polymorphic_discriminator=None, base_alias=None, use_mapper_path=False)

      Represents an “aliased” form of a mapped class for usage with Query.

      The ORM equivalent of a sqlalchemy.sql.expression.alias() construct, this object mimics the mapped class using a __getattr__ scheme and maintains a reference to a real Alias object.

      Usage is via the orm.aliased() function, or alternatively via the orm.with_polymorphic() function.

      Usage example:

      # find all pairs of users with the same name
      user_alias = aliased(User)
      session.query(User, user_alias).\
                      join((user_alias, User.id > user_alias.id)).\
                      filter(User.name==user_alias.name)

      The resulting object is an instance of AliasedClass. This object implements an attribute scheme which produces the same attribute and method interface as the original mapped class, allowing AliasedClass to be compatible with any attribute technique which works on the original class, including hybrid attributes (see Hybrid Attributes).

      The AliasedClass can be inspected for its underlying Mapper, aliased selectable, and other information using inspect():

      from sqlalchemy import inspect
      my_alias = aliased(MyClass)
      insp = inspect(my_alias)

      The resulting inspection object is an instance of AliasedInsp.

      See aliased() and with_polymorphic() for construction argument descriptions.

      class sqlalchemy.orm.util.AliasedInsp(entity, mapper, selectable, name, with_polymorphic_mappers, polymorphic_on, _base_alias, _use_mapper_path)

      Bases: sqlalchemy.orm.interfaces._InspectionAttr

      Provide an inspection interface for an AliasedClass object.

      The AliasedInsp object is returned given an AliasedClass using the inspect() function:

      from sqlalchemy import inspect
      from sqlalchemy.orm import aliased
      
      my_alias = aliased(MyMappedClass)
      insp = inspect(my_alias)

      Attributes on AliasedInsp include:

      • entity - the AliasedClass represented.
      • mapper - the Mapper mapping the underlying class.
      • selectable - the Alias construct which ultimately represents an aliased Table or Select construct.
      • name - the name of the alias. Also is used as the attribute name when returned in a result tuple from Query.
      • with_polymorphic_mappers - collection of Mapper objects indicating all those mappers expressed in the select construct for the AliasedClass.
      • polymorphic_on - an alternate column or SQL expression which will be used as the “discriminator” for a polymorphic load.
      class sqlalchemy.util.KeyedTuple

      Bases: __builtin__.tuple

      tuple subclass that adds labeled names.

      E.g.:

      >>> k = KeyedTuple([1, 2, 3], labels=["one", "two", "three"])
      >>> k.one
      1
      >>> k.two
      2

      Result rows returned by Query that contain multiple ORM entities and/or column expressions make use of this class to return rows.

      The KeyedTuple exhibits similar behavior to the collections.namedtuple() construct provided in the Python standard library, however is architected very differently. Unlike collections.namedtuple(), KeyedTuple is does not rely on creation of custom subtypes in order to represent a new series of keys, instead each KeyedTuple instance receives its list of keys in place. The subtype approach of collections.namedtuple() introduces significant complexity and performance overhead, which is not necessary for the Query object’s use case.

      Changed in version 0.8: Compatibility methods with collections.namedtuple() have been added including KeyedTuple._fields and KeyedTuple._asdict().

      See also

      Querying

      _asdict()

      Return the contents of this KeyedTuple as a dictionary.

      This method provides compatibility with collections.namedtuple(), with the exception that the dictionary returned is not ordered.

      New in version 0.8.

      _fields

      Return a tuple of string key names for this KeyedTuple.

      This method provides compatibility with collections.namedtuple().

      New in version 0.8.

      keys()

      Return a list of string key names for this KeyedTuple.

      sqlalchemy.orm.join(left, right, onclause=None, isouter=False, join_to_left=None)

      Produce an inner join between left and right clauses.

      orm.join() is an extension to the core join interface provided by sql.expression.join(), where the left and right selectables may be not only core selectable objects such as Table, but also mapped classes or AliasedClass instances. The “on” clause can be a SQL expression, or an attribute or string name referencing a configured relationship().

      orm.join() is not commonly needed in modern usage, as its functionality is encapsulated within that of the Query.join() method, which features a significant amount of automation beyond orm.join() by itself. Explicit usage of orm.join() with Query involves usage of the Query.select_from() method, as in:

      from sqlalchemy.orm import join
      session.query(User).\
          select_from(join(User, Address, User.addresses)).\
          filter(Address.email_address=='foo@bar.com')

      In modern SQLAlchemy the above join can be written more succinctly as:

      session.query(User).\
              join(User.addresses).\
              filter(Address.email_address=='foo@bar.com')

      See Query.join() for information on modern usage of ORM level joins.

      Changed in version 0.8.1: - the join_to_left parameter is no longer used, and is deprecated.

      sqlalchemy.orm.outerjoin(left, right, onclause=None, join_to_left=None)

      Produce a left outer join between left and right clauses.

      This is the “outer join” version of the orm.join() function, featuring the same behavior except that an OUTER JOIN is generated. See that function’s documentation for other usage details.

      sqlalchemy.orm.with_parent(instance, prop)

      Create filtering criterion that relates this query’s primary entity to the given related instance, using established relationship() configuration.

      The SQL rendered is the same as that rendered when a lazy loader would fire off from the given parent on that attribute, meaning that the appropriate state is taken from the parent object in Python without the need to render joins to the parent table in the rendered statement.

      Changed in version 0.6.4: This method accepts parent instances in all persistence states, including transient, persistent, and detached. Only the requisite primary key/foreign key attributes need to be populated. Previous versions didn’t work with transient instances.

      Parameters:
      • instance – An instance which has some relationship().
      • property – String property name, or class-bound attribute, which indicates what relationship from the instance should be used to reconcile the parent/child relationship.
      SQLAlchemy-0.8.4/doc/orm/relationships.html0000644000076500000240000065074112251147502021361 0ustar classicstaff00000000000000 Relationship Configuration — SQLAlchemy 0.8 Documentation

      SQLAlchemy 0.8 Documentation

      Release: 0.8.4 | Release Date: December 8, 2013

      Relationship Configuration

      This section describes the relationship() function and in depth discussion of its usage. The reference material here continues into the next section, Collection Configuration and Techniques, which has additional detail on configuration of collections via relationship().

      Basic Relational Patterns

      A quick walkthrough of the basic relational patterns.

      The imports used for each of the following sections is as follows:

      from sqlalchemy import Table, Column, Integer, ForeignKey
      from sqlalchemy.orm import relationship, backref
      from sqlalchemy.ext.declarative import declarative_base
      
      Base = declarative_base()

      One To Many

      A one to many relationship places a foreign key on the child table referencing the parent. relationship() is then specified on the parent, as referencing a collection of items represented by the child:

      class Parent(Base):
          __tablename__ = 'parent'
          id = Column(Integer, primary_key=True)
          children = relationship("Child")
      
      class Child(Base):
          __tablename__ = 'child'
          id = Column(Integer, primary_key=True)
          parent_id = Column(Integer, ForeignKey('parent.id'))

      To establish a bidirectional relationship in one-to-many, where the “reverse” side is a many to one, specify the backref option:

      class Parent(Base):
          __tablename__ = 'parent'
          id = Column(Integer, primary_key=True)
          children = relationship("Child", backref="parent")
      
      class Child(Base):
          __tablename__ = 'child'
          id = Column(Integer, primary_key=True)
          parent_id = Column(Integer, ForeignKey('parent.id'))

      Child will get a parent attribute with many-to-one semantics.

      Many To One

      Many to one places a foreign key in the parent table referencing the child. relationship() is declared on the parent, where a new scalar-holding attribute will be created:

      class Parent(Base):
          __tablename__ = 'parent'
          id = Column(Integer, primary_key=True)
          child_id = Column(Integer, ForeignKey('child.id'))
          child = relationship("Child")
      
      class Child(Base):
          __tablename__ = 'child'
          id = Column(Integer, primary_key=True)

      Bidirectional behavior is achieved by specifying backref="parents", which will place a one-to-many collection on the Child class:

      class Parent(Base):
          __tablename__ = 'parent'
          id = Column(Integer, primary_key=True)
          child_id = Column(Integer, ForeignKey('child.id'))
          child = relationship("Child", backref="parents")

      One To One

      One To One is essentially a bidirectional relationship with a scalar attribute on both sides. To achieve this, the uselist=False flag indicates the placement of a scalar attribute instead of a collection on the “many” side of the relationship. To convert one-to-many into one-to-one:

      class Parent(Base):
          __tablename__ = 'parent'
          id = Column(Integer, primary_key=True)
          child = relationship("Child", uselist=False, backref="parent")
      
      class Child(Base):
          __tablename__ = 'child'
          id = Column(Integer, primary_key=True)
          parent_id = Column(Integer, ForeignKey('parent.id'))

      Or to turn a one-to-many backref into one-to-one, use the backref() function to provide arguments for the reverse side:

      class Parent(Base):
          __tablename__ = 'parent'
          id = Column(Integer, primary_key=True)
          child_id = Column(Integer, ForeignKey('child.id'))
          child = relationship("Child", backref=backref("parent", uselist=False))
      
      class Child(Base):
          __tablename__ = 'child'
          id = Column(Integer, primary_key=True)

      Many To Many

      Many to Many adds an association table between two classes. The association table is indicated by the secondary argument to relationship(). Usually, the Table uses the MetaData object associated with the declarative base class, so that the ForeignKey directives can locate the remote tables with which to link:

      association_table = Table('association', Base.metadata,
          Column('left_id', Integer, ForeignKey('left.id')),
          Column('right_id', Integer, ForeignKey('right.id'))
      )
      
      class Parent(Base):
          __tablename__ = 'left'
          id = Column(Integer, primary_key=True)
          children = relationship("Child",
                          secondary=association_table)
      
      class Child(Base):
          __tablename__ = 'right'
          id = Column(Integer, primary_key=True)

      For a bidirectional relationship, both sides of the relationship contain a collection. The backref keyword will automatically use the same secondary argument for the reverse relationship:

      association_table = Table('association', Base.metadata,
          Column('left_id', Integer, ForeignKey('left.id')),
          Column('right_id', Integer, ForeignKey('right.id'))
      )
      
      class Parent(Base):
          __tablename__ = 'left'
          id = Column(Integer, primary_key=True)
          children = relationship("Child",
                          secondary=association_table,
                          backref="parents")
      
      class Child(Base):
          __tablename__ = 'right'
          id = Column(Integer, primary_key=True)

      The secondary argument of relationship() also accepts a callable that returns the ultimate argument, which is evaluated only when mappers are first used. Using this, we can define the association_table at a later point, as long as it’s available to the callable after all module initialization is complete:

      class Parent(Base):
          __tablename__ = 'left'
          id = Column(Integer, primary_key=True)
          children = relationship("Child",
                          secondary=lambda: association_table,
                          backref="parents")

      With the declarative extension in use, the traditional “string name of the table” is accepted as well, matching the name of the table as stored in Base.metadata.tables:

      class Parent(Base):
          __tablename__ = 'left'
          id = Column(Integer, primary_key=True)
          children = relationship("Child",
                          secondary="association",
                          backref="parents")

      Deleting Rows from the Many to Many Table

      A behavior which is unique to the secondary argument to relationship() is that the Table which is specified here is automatically subject to INSERT and DELETE statements, as objects are added or removed from the collection. There is no need to delete from this table manually. The act of removing a record from the collection will have the effect of the row being deleted on flush:

      # row will be deleted from the "secondary" table
      # automatically
      myparent.children.remove(somechild)

      A question which often arises is how the row in the “secondary” table can be deleted when the child object is handed directly to Session.delete():

      session.delete(somechild)

      There are several possibilities here:

      • If there is a relationship() from Parent to Child, but there is not a reverse-relationship that links a particular Child to each Parent, SQLAlchemy will not have any awareness that when deleting this particular Child object, it needs to maintain the “secondary” table that links it to the Parent. No delete of the “secondary” table will occur.
      • If there is a relationship that links a particular Child to each Parent, suppose it’s called Child.parents, SQLAlchemy by default will load in the Child.parents collection to locate all Parent objects, and remove each row from the “secondary” table which establishes this link. Note that this relationship does not need to be bidrectional; SQLAlchemy is strictly looking at every relationship() associated with the Child object being deleted.
      • A higher performing option here is to use ON DELETE CASCADE directives with the foreign keys used by the database. Assuming the database supports this feature, the database itself can be made to automatically delete rows in the “secondary” table as referencing rows in “child” are deleted. SQLAlchemy can be instructed to forego actively loading in the Child.parents collection in this case using the passive_deletes=True directive on relationship(); see Using Passive Deletes for more details on this.

      Note again, these behaviors are only relevant to the secondary option used with relationship(). If dealing with association tables that are mapped explicitly and are not present in the secondary option of a relevant relationship(), cascade rules can be used instead to automatically delete entities in reaction to a related entity being deleted - see Cascades for information on this feature.

      Association Object

      The association object pattern is a variant on many-to-many: it’s used when your association table contains additional columns beyond those which are foreign keys to the left and right tables. Instead of using the secondary argument, you map a new class directly to the association table. The left side of the relationship references the association object via one-to-many, and the association class references the right side via many-to-one. Below we illustrate an association table mapped to the Association class which includes a column called extra_data, which is a string value that is stored along with each association between Parent and Child:

      class Association(Base):
          __tablename__ = 'association'
          left_id = Column(Integer, ForeignKey('left.id'), primary_key=True)
          right_id = Column(Integer, ForeignKey('right.id'), primary_key=True)
          extra_data = Column(String(50))
          child = relationship("Child")
      
      class Parent(Base):
          __tablename__ = 'left'
          id = Column(Integer, primary_key=True)
          children = relationship("Association")
      
      class Child(Base):
          __tablename__ = 'right'
          id = Column(Integer, primary_key=True)

      The bidirectional version adds backrefs to both relationships:

      class Association(Base):
          __tablename__ = 'association'
          left_id = Column(Integer, ForeignKey('left.id'), primary_key=True)
          right_id = Column(Integer, ForeignKey('right.id'), primary_key=True)
          extra_data = Column(String(50))
          child = relationship("Child", backref="parent_assocs")
      
      class Parent(Base):
          __tablename__ = 'left'
          id = Column(Integer, primary_key=True)
          children = relationship("Association", backref="parent")
      
      class Child(Base):
          __tablename__ = 'right'
          id = Column(Integer, primary_key=True)

      Working with the association pattern in its direct form requires that child objects are associated with an association instance before being appended to the parent; similarly, access from parent to child goes through the association object:

      # create parent, append a child via association
      p = Parent()
      a = Association(extra_data="some data")
      a.child = Child()
      p.children.append(a)
      
      # iterate through child objects via association, including association
      # attributes
      for assoc in p.children:
          print assoc.extra_data
          print assoc.child

      To enhance the association object pattern such that direct access to the Association object is optional, SQLAlchemy provides the Association Proxy extension. This extension allows the configuration of attributes which will access two “hops” with a single access, one “hop” to the associated object, and a second to a target attribute.

      Note

      When using the association object pattern, it is advisable that the association-mapped table not be used as the secondary argument on a relationship() elsewhere, unless that relationship() contains the option viewonly=True. SQLAlchemy otherwise may attempt to emit redundant INSERT and DELETE statements on the same table, if similar state is detected on the related attribute as well as the associated object.

      Adjacency List Relationships

      The adjacency list pattern is a common relational pattern whereby a table contains a foreign key reference to itself. This is the most common way to represent hierarchical data in flat tables. Other methods include nested sets, sometimes called “modified preorder”, as well as materialized path. Despite the appeal that modified preorder has when evaluated for its fluency within SQL queries, the adjacency list model is probably the most appropriate pattern for the large majority of hierarchical storage needs, for reasons of concurrency, reduced complexity, and that modified preorder has little advantage over an application which can fully load subtrees into the application space.

      In this example, we’ll work with a single mapped class called Node, representing a tree structure:

      class Node(Base):
          __tablename__ = 'node'
          id = Column(Integer, primary_key=True)
          parent_id = Column(Integer, ForeignKey('node.id'))
          data = Column(String(50))
          children = relationship("Node")

      With this structure, a graph such as the following:

      root --+---> child1
             +---> child2 --+--> subchild1
             |              +--> subchild2
             +---> child3

      Would be represented with data such as:

      id       parent_id     data
      ---      -------       ----
      1        NULL          root
      2        1             child1
      3        1             child2
      4        3             subchild1
      5        3             subchild2
      6        1             child3

      The relationship() configuration here works in the same way as a “normal” one-to-many relationship, with the exception that the “direction”, i.e. whether the relationship is one-to-many or many-to-one, is assumed by default to be one-to-many. To establish the relationship as many-to-one, an extra directive is added known as remote_side, which is a Column or collection of Column objects that indicate those which should be considered to be “remote”:

      class Node(Base):
          __tablename__ = 'node'
          id = Column(Integer, primary_key=True)
          parent_id = Column(Integer, ForeignKey('node.id'))
          data = Column(String(50))
          parent = relationship("Node", remote_side=[id])

      Where above, the id column is applied as the remote_side of the parent relationship(), thus establishing parent_id as the “local” side, and the relationship then behaves as a many-to-one.

      As always, both directions can be combined into a bidirectional relationship using the backref() function:

      class Node(Base):
          __tablename__ = 'node'
          id = Column(Integer, primary_key=True)
          parent_id = Column(Integer, ForeignKey('node.id'))
          data = Column(String(50))
          children = relationship("Node",
                      backref=backref('parent', remote_side=[id])
                  )

      There are several examples included with SQLAlchemy illustrating self-referential strategies; these include Adjacency List and XML Persistence.

      Composite Adjacency Lists

      A sub-category of the adjacency list relationship is the rare case where a particular column is present on both the “local” and “remote” side of the join condition. An example is the Folder class below; using a composite primary key, the account_id column refers to itself, to indicate sub folders which are within the same account as that of the parent; while folder_id refers to a specific folder within that account:

      class Folder(Base):
          __tablename__ = 'folder'
          __table_args__ = (
            ForeignKeyConstraint(
                ['account_id', 'parent_id'],
                ['folder.account_id', 'folder.folder_id']),
          )
      
          account_id = Column(Integer, primary_key=True)
          folder_id = Column(Integer, primary_key=True)
          parent_id = Column(Integer)
          name = Column(String)
      
          parent_folder = relationship("Folder",
                              backref="child_folders",
                              remote_side=[account_id, folder_id]
                        )

      Above, we pass account_id into the remote_side list. relationship() recognizes that the account_id column here is on both sides, and aligns the “remote” column along with the folder_id column, which it recognizes as uniquely present on the “remote” side.

      New in version 0.8: Support for self-referential composite keys in relationship() where a column points to itself.

      Self-Referential Query Strategies

      Querying of self-referential structures works like any other query:

      # get all nodes named 'child2'
      session.query(Node).filter(Node.data=='child2')

      However extra care is needed when attempting to join along the foreign key from one level of the tree to the next. In SQL, a join from a table to itself requires that at least one side of the expression be “aliased” so that it can be unambiguously referred to.

      Recall from Using Aliases in the ORM tutorial that the orm.aliased() construct is normally used to provide an “alias” of an ORM entity. Joining from Node to itself using this technique looks like:

      from sqlalchemy.orm import aliased
      
      nodealias = aliased(Node)
      sqlsession.query(Node).filter(Node.data=='subchild1').\
                      join(nodealias, Node.parent).\
                      filter(nodealias.data=="child2").\
                      all()
      

      Query.join() also includes a feature known as aliased=True that can shorten the verbosity self-referential joins, at the expense of query flexibility. This feature performs a similar “aliasing” step to that above, without the need for an explicit entity. Calls to Query.filter() and similar subsequent to the aliased join will adapt the Node entity to be that of the alias:

      sqlsession.query(Node).filter(Node.data=='subchild1').\
              join(Node.parent, aliased=True).\
              filter(Node.data=='child2').\
              all()
      

      To add criterion to multiple points along a longer join, add from_joinpoint=True to the additional join() calls:

      # get all nodes named 'subchild1' with a
      # parent named 'child2' and a grandparent 'root'
      sqlsession.query(Node).\
              filter(Node.data=='subchild1').\
              join(Node.parent, aliased=True).\
              filter(Node.data=='child2').\
              join(Node.parent, aliased=True, from_joinpoint=True).\
              filter(Node.data=='root').\
              all()
      

      Query.reset_joinpoint() will also remove the “aliasing” from filtering calls:

      session.query(Node).\
              join(Node.children, aliased=True).\
              filter(Node.data == 'foo').\
              reset_joinpoint().\
              filter(Node.data == 'bar')

      For an example of using aliased=True to arbitrarily join along a chain of self-referential nodes, see XML Persistence.

      Configuring Self-Referential Eager Loading

      Eager loading of relationships occurs using joins or outerjoins from parent to child table during a normal query operation, such that the parent and its immediate child collection or reference can be populated from a single SQL statement, or a second statement for all immediate child collections. SQLAlchemy’s joined and subquery eager loading use aliased tables in all cases when joining to related items, so are compatible with self-referential joining. However, to use eager loading with a self-referential relationship, SQLAlchemy needs to be told how many levels deep it should join and/or query; otherwise the eager load will not take place at all. This depth setting is configured via join_depth:

      class Node(Base):
          __tablename__ = 'node'
          id = Column(Integer, primary_key=True)
          parent_id = Column(Integer, ForeignKey('node.id'))
          data = Column(String(50))
          children = relationship("Node",
                          lazy="joined",
                          join_depth=2)
      
      sqlsession.query(Node).all()
      

      Linking Relationships with Backref

      The backref keyword argument was first introduced in Object Relational Tutorial, and has been mentioned throughout many of the examples here. What does it actually do ? Let’s start with the canonical User and Address scenario:

      from sqlalchemy import Integer, ForeignKey, String, Column
      from sqlalchemy.ext.declarative import declarative_base
      from sqlalchemy.orm import relationship
      
      Base = declarative_base()
      
      class User(Base):
          __tablename__ = 'user'
          id = Column(Integer, primary_key=True)
          name = Column(String)
      
          addresses = relationship("Address", backref="user")
      
      class Address(Base):
          __tablename__ = 'address'
          id = Column(Integer, primary_key=True)
          email = Column(String)
          user_id = Column(Integer, ForeignKey('user.id'))

      The above configuration establishes a collection of Address objects on User called User.addresses. It also establishes a .user attribute on Address which will refer to the parent User object.

      In fact, the backref keyword is only a common shortcut for placing a second relationship onto the Address mapping, including the establishment of an event listener on both sides which will mirror attribute operations in both directions. The above configuration is equivalent to:

      from sqlalchemy import Integer, ForeignKey, String, Column
      from sqlalchemy.ext.declarative import declarative_base
      from sqlalchemy.orm import relationship
      
      Base = declarative_base()
      
      class User(Base):
          __tablename__ = 'user'
          id = Column(Integer, primary_key=True)
          name = Column(String)
      
          addresses = relationship("Address", back_populates="user")
      
      class Address(Base):
          __tablename__ = 'address'
          id = Column(Integer, primary_key=True)
          email = Column(String)
          user_id = Column(Integer, ForeignKey('user.id'))
      
          user = relationship("User", back_populates="addresses")

      Above, we add a .user relationship to Address explicitly. On both relationships, the back_populates directive tells each relationship about the other one, indicating that they should establish “bidirectional” behavior between each other. The primary effect of this configuration is that the relationship adds event handlers to both attributes which have the behavior of “when an append or set event occurs here, set ourselves onto the incoming attribute using this particular attribute name”. The behavior is illustrated as follows. Start with a User and an Address instance. The .addresses collection is empty, and the .user attribute is None:

      >>> u1 = User()
      >>> a1 = Address()
      >>> u1.addresses
      []
      >>> print a1.user
      None

      However, once the Address is appended to the u1.addresses collection, both the collection and the scalar attribute have been populated:

      >>> u1.addresses.append(a1)
      >>> u1.addresses
      [<__main__.Address object at 0x12a6ed0>]
      >>> a1.user
      <__main__.User object at 0x12a6590>

      This behavior of course works in reverse for removal operations as well, as well as for equivalent operations on both sides. Such as when .user is set again to None, the Address object is removed from the reverse collection:

      >>> a1.user = None
      >>> u1.addresses
      []

      The manipulation of the .addresses collection and the .user attribute occurs entirely in Python without any interaction with the SQL database. Without this behavior, the proper state would be apparent on both sides once the data has been flushed to the database, and later reloaded after a commit or expiration operation occurs. The backref/back_populates behavior has the advantage that common bidirectional operations can reflect the correct state without requiring a database round trip.

      Remember, when the backref keyword is used on a single relationship, it’s exactly the same as if the above two relationships were created individually using back_populates on each.

      Backref Arguments

      We’ve established that the backref keyword is merely a shortcut for building two individual relationship() constructs that refer to each other. Part of the behavior of this shortcut is that certain configurational arguments applied to the relationship() will also be applied to the other direction - namely those arguments that describe the relationship at a schema level, and are unlikely to be different in the reverse direction. The usual case here is a many-to-many relationship() that has a secondary argument, or a one-to-many or many-to-one which has a primaryjoin argument (the primaryjoin argument is discussed in Specifying Alternate Join Conditions). Such as if we limited the list of Address objects to those which start with “tony”:

      from sqlalchemy import Integer, ForeignKey, String, Column
      from sqlalchemy.ext.declarative import declarative_base
      from sqlalchemy.orm import relationship
      
      Base = declarative_base()
      
      class User(Base):
          __tablename__ = 'user'
          id = Column(Integer, primary_key=True)
          name = Column(String)
      
          addresses = relationship("Address",
                          primaryjoin="and_(User.id==Address.user_id, "
                              "Address.email.startswith('tony'))",
                          backref="user")
      
      class Address(Base):
          __tablename__ = 'address'
          id = Column(Integer, primary_key=True)
          email = Column(String)
          user_id = Column(Integer, ForeignKey('user.id'))

      We can observe, by inspecting the resulting property, that both sides of the relationship have this join condition applied:

      >>> print User.addresses.property.primaryjoin
      "user".id = address.user_id AND address.email LIKE :email_1 || '%%'
      >>>
      >>> print Address.user.property.primaryjoin
      "user".id = address.user_id AND address.email LIKE :email_1 || '%%'
      >>>

      This reuse of arguments should pretty much do the “right thing” - it uses only arguments that are applicable, and in the case of a many-to-many relationship, will reverse the usage of primaryjoin and secondaryjoin to correspond to the other direction (see the example in Self-Referential Many-to-Many Relationship for this).

      It’s very often the case however that we’d like to specify arguments that are specific to just the side where we happened to place the “backref”. This includes relationship() arguments like lazy, remote_side, cascade and cascade_backrefs. For this case we use the backref() function in place of a string:

      # <other imports>
      from sqlalchemy.orm import backref
      
      class User(Base):
          __tablename__ = 'user'
          id = Column(Integer, primary_key=True)
          name = Column(String)
      
          addresses = relationship("Address",
                          backref=backref("user", lazy="joined"))

      Where above, we placed a lazy="joined" directive only on the Address.user side, indicating that when a query against Address is made, a join to the User entity should be made automatically which will populate the .user attribute of each returned Address. The backref() function formatted the arguments we gave it into a form that is interpreted by the receiving relationship() as additional arguments to be applied to the new relationship it creates.

      One Way Backrefs

      An unusual case is that of the “one way backref”. This is where the “back-populating” behavior of the backref is only desirable in one direction. An example of this is a collection which contains a filtering primaryjoin condition. We’d like to append items to this collection as needed, and have them populate the “parent” object on the incoming object. However, we’d also like to have items that are not part of the collection, but still have the same “parent” association - these items should never be in the collection.

      Taking our previous example, where we established a primaryjoin that limited the collection only to Address objects whose email address started with the word tony, the usual backref behavior is that all items populate in both directions. We wouldn’t want this behavior for a case like the following:

      >>> u1 = User()
      >>> a1 = Address(email='mary')
      >>> a1.user = u1
      >>> u1.addresses
      [<__main__.Address object at 0x1411910>]

      Above, the Address object that doesn’t match the criterion of “starts with ‘tony’” is present in the addresses collection of u1. After these objects are flushed, the transaction committed and their attributes expired for a re-load, the addresses collection will hit the database on next access and no longer have this Address object present, due to the filtering condition. But we can do away with this unwanted side of the “backref” behavior on the Python side by using two separate relationship() constructs, placing back_populates only on one side:

      from sqlalchemy import Integer, ForeignKey, String, Column
      from sqlalchemy.ext.declarative import declarative_base
      from sqlalchemy.orm import relationship
      
      Base = declarative_base()
      
      class User(Base):
          __tablename__ = 'user'
          id = Column(Integer, primary_key=True)
          name = Column(String)
          addresses = relationship("Address",
                          primaryjoin="and_(User.id==Address.user_id, "
                              "Address.email.startswith('tony'))",
                          back_populates="user")
      
      class Address(Base):
          __tablename__ = 'address'
          id = Column(Integer, primary_key=True)
          email = Column(String)
          user_id = Column(Integer, ForeignKey('user.id'))
          user = relationship("User")

      With the above scenario, appending an Address object to the .addresses collection of a User will always establish the .user attribute on that Address:

      >>> u1 = User()
      >>> a1 = Address(email='tony')
      >>> u1.addresses.append(a1)
      >>> a1.user
      <__main__.User object at 0x1411850>

      However, applying a User to the .user attribute of an Address, will not append the Address object to the collection:

      >>> a2 = Address(email='mary')
      >>> a2.user = u1
      >>> a2 in u1.addresses
      False

      Of course, we’ve disabled some of the usefulness of backref here, in that when we do append an Address that corresponds to the criteria of email.startswith('tony'), it won’t show up in the User.addresses collection until the session is flushed, and the attributes reloaded after a commit or expire operation. While we could consider an attribute event that checks this criterion in Python, this starts to cross the line of duplicating too much SQL behavior in Python. The backref behavior itself is only a slight transgression of this philosophy - SQLAlchemy tries to keep these to a minimum overall.

      Configuring how Relationship Joins

      relationship() will normally create a join between two tables by examining the foreign key relationship between the two tables to determine which columns should be compared. There are a variety of situations where this behavior needs to be customized.

      Handling Multiple Join Paths

      One of the most common situations to deal with is when there are more than one foreign key path between two tables.

      Consider a Customer class that contains two foreign keys to an Address class:

      from sqlalchemy import Integer, ForeignKey, String, Column
      from sqlalchemy.ext.declarative import declarative_base
      from sqlalchemy.orm import relationship
      
      Base = declarative_base()
      
      class Customer(Base):
          __tablename__ = 'customer'
          id = Column(Integer, primary_key=True)
          name = Column(String)
      
          billing_address_id = Column(Integer, ForeignKey("address.id"))
          shipping_address_id = Column(Integer, ForeignKey("address.id"))
      
          billing_address = relationship("Address")
          shipping_address = relationship("Address")
      
      class Address(Base):
          __tablename__ = 'address'
          id = Column(Integer, primary_key=True)
          street = Column(String)
          city = Column(String)
          state = Column(String)
          zip = Column(String)

      The above mapping, when we attempt to use it, will produce the error:

      sqlalchemy.exc.AmbiguousForeignKeysError: Could not determine join
      condition between parent/child tables on relationship
      Customer.billing_address - there are multiple foreign key
      paths linking the tables.  Specify the 'foreign_keys' argument,
      providing a list of those columns which should be
      counted as containing a foreign key reference to the parent table.

      The above message is pretty long. There are many potential messages that relationship() can return, which have been carefully tailored to detect a variety of common configurational issues; most will suggest the additional configuration that’s needed to resolve the ambiguity or other missing information.

      In this case, the message wants us to qualify each relationship() by instructing for each one which foreign key column should be considered, and the appropriate form is as follows:

      class Customer(Base):
          __tablename__ = 'customer'
          id = Column(Integer, primary_key=True)
          name = Column(String)
      
          billing_address_id = Column(Integer, ForeignKey("address.id"))
          shipping_address_id = Column(Integer, ForeignKey("address.id"))
      
          billing_address = relationship("Address", foreign_keys=[billing_address_id])
          shipping_address = relationship("Address", foreign_keys=[shipping_address_id])

      Above, we specify the foreign_keys argument, which is a Column or list of Column objects which indicate those columns to be considered “foreign”, or in other words, the columns that contain a value referring to a parent table. Loading the Customer.billing_address relationship from a Customer object will use the value present in billing_address_id in order to identify the row in Address to be loaded; similarly, shipping_address_id is used for the shipping_address relationship. The linkage of the two columns also plays a role during persistence; the newly generated primary key of a just-inserted Address object will be copied into the appropriate foreign key column of an associated Customer object during a flush.

      When specifying foreign_keys with Declarative, we can also use string names to specify, however it is important that if using a list, the list is part of the string:

      billing_address = relationship("Address", foreign_keys="[Customer.billing_address_id]")

      In this specific example, the list is not necessary in any case as there’s only one Column we need:

      billing_address = relationship("Address", foreign_keys="Customer.billing_address_id")

      Changed in version 0.8: relationship() can resolve ambiguity between foreign key targets on the basis of the foreign_keys argument alone; the primaryjoin argument is no longer needed in this situation.

      Specifying Alternate Join Conditions

      The default behavior of relationship() when constructing a join is that it equates the value of primary key columns on one side to that of foreign-key-referring columns on the other. We can change this criterion to be anything we’d like using the primaryjoin argument, as well as the secondaryjoin argument in the case when a “secondary” table is used.

      In the example below, using the User class as well as an Address class which stores a street address, we create a relationship boston_addresses which will only load those Address objects which specify a city of “Boston”:

      from sqlalchemy import Integer, ForeignKey, String, Column
      from sqlalchemy.ext.declarative import declarative_base
      from sqlalchemy.orm import relationship
      
      Base = declarative_base()
      
      class User(Base):
          __tablename__ = 'user'
          id = Column(Integer, primary_key=True)
          name = Column(String)
          addresses = relationship("Address",
                          primaryjoin="and_(User.id==Address.user_id, "
                              "Address.city=='Boston')")
      
      class Address(Base):
          __tablename__ = 'address'
          id = Column(Integer, primary_key=True)
          user_id = Column(Integer, ForeignKey('user.id'))
      
          street = Column(String)
          city = Column(String)
          state = Column(String)
          zip = Column(String)

      Within this string SQL expression, we made use of the and_() conjunction construct to establish two distinct predicates for the join condition - joining both the User.id and Address.user_id columns to each other, as well as limiting rows in Address to just city='Boston'. When using Declarative, rudimentary SQL functions like and_() are automatically available in the evaluated namespace of a string relationship() argument.

      The custom criteria we use in a primaryjoin is generally only significant when SQLAlchemy is rendering SQL in order to load or represent this relationship. That is, it’s used in the SQL statement that’s emitted in order to perform a per-attribute lazy load, or when a join is constructed at query time, such as via Query.join(), or via the eager “joined” or “subquery” styles of loading. When in-memory objects are being manipulated, we can place any Address object we’d like into the boston_addresses collection, regardless of what the value of the .city attribute is. The objects will remain present in the collection until the attribute is expired and re-loaded from the database where the criterion is applied. When a flush occurs, the objects inside of boston_addresses will be flushed unconditionally, assigning value of the primary key user.id column onto the foreign-key-holding address.user_id column for each row. The city criteria has no effect here, as the flush process only cares about synchronizing primary key values into referencing foreign key values.

      Creating Custom Foreign Conditions

      Another element of the primary join condition is how those columns considered “foreign” are determined. Usually, some subset of Column objects will specify ForeignKey, or otherwise be part of a ForeignKeyConstraint that’s relevant to the join condition. relationship() looks to this foreign key status as it decides how it should load and persist data for this relationship. However, the primaryjoin argument can be used to create a join condition that doesn’t involve any “schema” level foreign keys. We can combine primaryjoin along with foreign_keys and remote_side explicitly in order to establish such a join.

      Below, a class HostEntry joins to itself, equating the string content column to the ip_address column, which is a Postgresql type called INET. We need to use cast() in order to cast one side of the join to the type of the other:

      from sqlalchemy import cast, String, Column, Integer
      from sqlalchemy.orm import relationship
      from sqlalchemy.dialects.postgresql import INET
      
      from sqlalchemy.ext.declarative import declarative_base
      
      Base = declarative_base()
      
      class HostEntry(Base):
          __tablename__ = 'host_entry'
      
          id = Column(Integer, primary_key=True)
          ip_address = Column(INET)
          content = Column(String(50))
      
          # relationship() using explicit foreign_keys, remote_side
          parent_host = relationship("HostEntry",
                              primaryjoin=ip_address == cast(content, INET),
                              foreign_keys=content,
                              remote_side=ip_address
                          )

      The above relationship will produce a join like:

      SELECT host_entry.id, host_entry.ip_address, host_entry.content
      FROM host_entry JOIN host_entry AS host_entry_1
      ON host_entry_1.ip_address = CAST(host_entry.content AS INET)

      An alternative syntax to the above is to use the foreign() and remote() annotations, inline within the primaryjoin expression. This syntax represents the annotations that relationship() normally applies by itself to the join condition given the foreign_keys and remote_side arguments; the functions are provided in the API in the rare case that relationship() can’t determine the exact location of these features on its own:

      from sqlalchemy.orm import foreign, remote
      
      class HostEntry(Base):
          __tablename__ = 'host_entry'
      
          id = Column(Integer, primary_key=True)
          ip_address = Column(INET)
          content = Column(String(50))
      
          # relationship() using explicit foreign() and remote() annotations
          # in lieu of separate arguments
          parent_host = relationship("HostEntry",
                              primaryjoin=remote(ip_address) == \
                                      cast(foreign(content), INET),
                          )

      Self-Referential Many-to-Many Relationship

      Many to many relationships can be customized by one or both of primaryjoin and secondaryjoin - the latter is significant for a relationship that specifies a many-to-many reference using the secondary argument. A common situation which involves the usage of primaryjoin and secondaryjoin is when establishing a many-to-many relationship from a class to itself, as shown below:

      from sqlalchemy import Integer, ForeignKey, String, Column, Table
      from sqlalchemy.ext.declarative import declarative_base
      from sqlalchemy.orm import relationship
      
      Base = declarative_base()
      
      node_to_node = Table("node_to_node", Base.metadata,
          Column("left_node_id", Integer, ForeignKey("node.id"), primary_key=True),
          Column("right_node_id", Integer, ForeignKey("node.id"), primary_key=True)
      )
      
      class Node(Base):
          __tablename__ = 'node'
          id = Column(Integer, primary_key=True)
          label = Column(String)
          right_nodes = relationship("Node",
                              secondary=node_to_node,
                              primaryjoin=id==node_to_node.c.left_node_id,
                              secondaryjoin=id==node_to_node.c.right_node_id,
                              backref="left_nodes"
          )

      Where above, SQLAlchemy can’t know automatically which columns should connect to which for the right_nodes and left_nodes relationships. The primaryjoin and secondaryjoin arguments establish how we’d like to join to the association table. In the Declarative form above, as we are declaring these conditions within the Python block that corresponds to the Node class, the id variable is available directly as the Column object we wish to join with.

      A classical mapping situation here is similar, where node_to_node can be joined to node.c.id:

      from sqlalchemy import Integer, ForeignKey, String, Column, Table, MetaData
      from sqlalchemy.orm import relationship, mapper
      
      metadata = MetaData()
      
      node_to_node = Table("node_to_node", metadata,
          Column("left_node_id", Integer, ForeignKey("node.id"), primary_key=True),
          Column("right_node_id", Integer, ForeignKey("node.id"), primary_key=True)
      )
      
      node = Table("node", metadata,
          Column('id', Integer, primary_key=True),
          Column('label', String)
      )
      class Node(object):
          pass
      
      mapper(Node, node, properties={
          'right_nodes':relationship(Node,
                              secondary=node_to_node,
                              primaryjoin=node.c.id==node_to_node.c.left_node_id,
                              secondaryjoin=node.c.id==node_to_node.c.right_node_id,
                              backref="left_nodes"
                          )})

      Note that in both examples, the backref keyword specifies a left_nodes backref - when relationship() creates the second relationship in the reverse direction, it’s smart enough to reverse the primaryjoin and secondaryjoin arguments.

      Building Query-Enabled Properties

      Very ambitious custom join conditions may fail to be directly persistable, and in some cases may not even load correctly. To remove the persistence part of the equation, use the flag viewonly=True on the relationship(), which establishes it as a read-only attribute (data written to the collection will be ignored on flush()). However, in extreme cases, consider using a regular Python property in conjunction with Query as follows:

      class User(Base):
          __tablename__ = 'user'
          id = Column(Integer, primary_key=True)
      
          def _get_addresses(self):
              return object_session(self).query(Address).with_parent(self).filter(...).all()
          addresses = property(_get_addresses)

      Rows that point to themselves / Mutually Dependent Rows

      This is a very specific case where relationship() must perform an INSERT and a second UPDATE in order to properly populate a row (and vice versa an UPDATE and DELETE in order to delete without violating foreign key constraints). The two use cases are:

      • A table contains a foreign key to itself, and a single row will have a foreign key value pointing to its own primary key.
      • Two tables each contain a foreign key referencing the other table, with a row in each table referencing the other.

      For example:

                user
      ---------------------------------
      user_id    name   related_user_id
         1       'ed'          1

      Or:

                   widget                                                  entry
      -------------------------------------------             ---------------------------------
      widget_id     name        favorite_entry_id             entry_id      name      widget_id
         1       'somewidget'          5                         5       'someentry'     1

      In the first case, a row points to itself. Technically, a database that uses sequences such as PostgreSQL or Oracle can INSERT the row at once using a previously generated value, but databases which rely upon autoincrement-style primary key identifiers cannot. The relationship() always assumes a “parent/child” model of row population during flush, so unless you are populating the primary key/foreign key columns directly, relationship() needs to use two statements.

      In the second case, the “widget” row must be inserted before any referring “entry” rows, but then the “favorite_entry_id” column of that “widget” row cannot be set until the “entry” rows have been generated. In this case, it’s typically impossible to insert the “widget” and “entry” rows using just two INSERT statements; an UPDATE must be performed in order to keep foreign key constraints fulfilled. The exception is if the foreign keys are configured as “deferred until commit” (a feature some databases support) and if the identifiers were populated manually (again essentially bypassing relationship()).

      To enable the usage of a supplementary UPDATE statement, we use the post_update option of relationship(). This specifies that the linkage between the two rows should be created using an UPDATE statement after both rows have been INSERTED; it also causes the rows to be de-associated with each other via UPDATE before a DELETE is emitted. The flag should be placed on just one of the relationships, preferably the many-to-one side. Below we illustrate a complete example, including two ForeignKey constructs, one which specifies use_alter=True to help with emitting CREATE TABLE statements:

      from sqlalchemy import Integer, ForeignKey, Column
      from sqlalchemy.ext.declarative import declarative_base
      from sqlalchemy.orm import relationship
      
      Base = declarative_base()
      
      class Entry(Base):
          __tablename__ = 'entry'
          entry_id = Column(Integer, primary_key=True)
          widget_id = Column(Integer, ForeignKey('widget.widget_id'))
          name = Column(String(50))
      
      class Widget(Base):
          __tablename__ = 'widget'
      
          widget_id = Column(Integer, primary_key=True)
          favorite_entry_id = Column(Integer,
                                  ForeignKey('entry.entry_id',
                                  use_alter=True,
                                  name="fk_favorite_entry"))
          name = Column(String(50))
      
          entries = relationship(Entry, primaryjoin=
                                          widget_id==Entry.widget_id)
          favorite_entry = relationship(Entry,
                                      primaryjoin=
                                          favorite_entry_id==Entry.entry_id,
                                      post_update=True)

      When a structure against the above configuration is flushed, the “widget” row will be INSERTed minus the “favorite_entry_id” value, then all the “entry” rows will be INSERTed referencing the parent “widget” row, and then an UPDATE statement will populate the “favorite_entry_id” column of the “widget” table (it’s one row at a time for the time being):

      >>> w1 = Widget(name='somewidget')
      >>> e1 = Entry(name='someentry')
      >>> w1.favorite_entry = e1
      >>> w1.entries = [e1]
      >>> session.add_all([w1, e1])
      sql>>> session.commit()
      

      An additional configuration we can specify is to supply a more comprehensive foreign key constraint on Widget, such that it’s guaranteed that favorite_entry_id refers to an Entry that also refers to this Widget. We can use a composite foreign key, as illustrated below:

      from sqlalchemy import Integer, ForeignKey, String, \
              Column, UniqueConstraint, ForeignKeyConstraint
      from sqlalchemy.ext.declarative import declarative_base
      from sqlalchemy.orm import relationship
      
      Base = declarative_base()
      
      class Entry(Base):
          __tablename__ = 'entry'
          entry_id = Column(Integer, primary_key=True)
          widget_id = Column(Integer, ForeignKey('widget.widget_id'))
          name = Column(String(50))
          __table_args__ = (
              UniqueConstraint("entry_id", "widget_id"),
          )
      
      class Widget(Base):
          __tablename__ = 'widget'
      
          widget_id = Column(Integer, autoincrement='ignore_fk', primary_key=True)
          favorite_entry_id = Column(Integer)
      
          name = Column(String(50))
      
          __table_args__ = (
              ForeignKeyConstraint(
                  ["widget_id", "favorite_entry_id"],
                  ["entry.widget_id", "entry.entry_id"],
                  name="fk_favorite_entry", use_alter=True
              ),
          )
      
          entries = relationship(Entry, primaryjoin=
                                          widget_id==Entry.widget_id,
                                          foreign_keys=Entry.widget_id)
          favorite_entry = relationship(Entry,
                                      primaryjoin=
                                          favorite_entry_id==Entry.entry_id,
                                      foreign_keys=favorite_entry_id,
                                      post_update=True)

      The above mapping features a composite ForeignKeyConstraint bridging the widget_id and favorite_entry_id columns. To ensure that Widget.widget_id remains an “autoincrementing” column we specify autoincrement='ignore_fk' on Column, and additionally on each relationship() we must limit those columns considered as part of the foreign key for the purposes of joining and cross-population.

      New in version 0.7.4: autoincrement='ignore_fk' on Column.

      Mutable Primary Keys / Update Cascades

      When the primary key of an entity changes, related items which reference the primary key must also be updated as well. For databases which enforce referential integrity, it’s required to use the database’s ON UPDATE CASCADE functionality in order to propagate primary key changes to referenced foreign keys - the values cannot be out of sync for any moment.

      For databases that don’t support this, such as SQLite and MySQL without their referential integrity options turned on, the passive_updates flag can be set to False, most preferably on a one-to-many or many-to-many relationship(), which instructs SQLAlchemy to issue UPDATE statements individually for objects referenced in the collection, loading them into memory if not already locally present. The passive_updates flag can also be False in conjunction with ON UPDATE CASCADE functionality, although in that case the unit of work will be issuing extra SELECT and UPDATE statements unnecessarily.

      A typical mutable primary key setup might look like:

      class User(Base):
          __tablename__ = 'user'
      
          username = Column(String(50), primary_key=True)
          fullname = Column(String(100))
      
          # passive_updates=False *only* needed if the database
          # does not implement ON UPDATE CASCADE
          addresses = relationship("Address", passive_updates=False)
      
      class Address(Base):
          __tablename__ = 'address'
      
          email = Column(String(50), primary_key=True)
          username = Column(String(50),
                      ForeignKey('user.username', onupdate="cascade")
                  )

      passive_updates is set to True by default, indicating that ON UPDATE CASCADE is expected to be in place in the usual case for foreign keys that expect to have a mutating parent key.

      passive_updates=False may be configured on any direction of relationship, i.e. one-to-many, many-to-one, and many-to-many, although it is much more effective when placed just on the one-to-many or many-to-many side. Configuring the passive_updates=False only on the many-to-one side will have only a partial effect, as the unit of work searches only through the current identity map for objects that may be referencing the one with a mutating primary key, not throughout the database.

      Relationships API

      sqlalchemy.orm.relationship(argument, secondary=None, **kwargs)

      Provide a relationship of a primary Mapper to a secondary Mapper.

      This corresponds to a parent-child or associative table relationship. The constructed class is an instance of RelationshipProperty.

      A typical relationship(), used in a classical mapping:

      mapper(Parent, properties={
        'children': relationship(Child)
      })

      Some arguments accepted by relationship() optionally accept a callable function, which when called produces the desired value. The callable is invoked by the parent Mapper at “mapper initialization” time, which happens only when mappers are first used, and is assumed to be after all mappings have been constructed. This can be used to resolve order-of-declaration and other dependency issues, such as if Child is declared below Parent in the same file:

      mapper(Parent, properties={
          "children":relationship(lambda: Child,
                              order_by=lambda: Child.id)
      })

      When using the Declarative extension, the Declarative initializer allows string arguments to be passed to relationship(). These string arguments are converted into callables that evaluate the string as Python code, using the Declarative class-registry as a namespace. This allows the lookup of related classes to be automatic via their string name, and removes the need to import related classes at all into the local module space:

      from sqlalchemy.ext.declarative import declarative_base
      
      Base = declarative_base()
      
      class Parent(Base):
          __tablename__ = 'parent'
          id = Column(Integer, primary_key=True)
          children = relationship("Child", order_by="Child.id")

      A full array of examples and reference documentation regarding relationship() is at Relationship Configuration.

      Parameters:
      • argument

        a mapped class, or actual Mapper instance, representing the target of the relationship.

        argument may also be passed as a callable function which is evaluated at mapper initialization time, and may be passed as a Python-evaluable string when using Declarative.

      • secondary

        for a many-to-many relationship, specifies the intermediary table, and is an instance of Table. The secondary keyword argument should generally only be used for a table that is not otherwise expressed in any class mapping, unless this relationship is declared as view only, otherwise conflicting persistence operations can occur.

        secondary may also be passed as a callable function which is evaluated at mapper initialization time.

      • active_history=False – When True, indicates that the “previous” value for a many-to-one reference should be loaded when replaced, if not already loaded. Normally, history tracking logic for simple many-to-ones only needs to be aware of the “new” value in order to perform a flush. This flag is available for applications that make use of attributes.get_history() which also need to know the “previous” value of the attribute.
      • backref – indicates the string name of a property to be placed on the related mapper’s class that will handle this relationship in the other direction. The other property will be created automatically when the mappers are configured. Can also be passed as a backref() object to control the configuration of the new relationship.
      • back_populates – Takes a string name and has the same meaning as backref, except the complementing property is not created automatically, and instead must be configured explicitly on the other mapper. The complementing property should also indicate back_populates to this relationship to ensure proper functioning.
      • cascade
        a comma-separated list of cascade rules which determines how Session operations should be “cascaded” from parent to child. This defaults to False, which means the default cascade should be used. The default value is "save-update, merge".

        Available cascades are:

        • save-update - cascade the Session.add() operation. This cascade applies both to future and past calls to add(), meaning new items added to a collection or scalar relationship get placed into the same session as that of the parent, and also applies to items which have been removed from this relationship but are still part of unflushed history.
        • merge - cascade the merge() operation
        • expunge - cascade the Session.expunge() operation
        • delete - cascade the Session.delete() operation
        • delete-orphan - if an item of the child’s type is detached from its parent, mark it for deletion.

          Changed in version 0.7: This option does not prevent a new instance of the child object from being persisted without a parent to start with; to constrain against that case, ensure the child’s foreign key column(s) is configured as NOT NULL

        • refresh-expire - cascade the Session.expire() and refresh() operations
        • all - shorthand for “save-update,merge, refresh-expire, expunge, delete”

        See the section Cascades for more background on configuring cascades.

      • cascade_backrefs=True

        a boolean value indicating if the save-update cascade should operate along an assignment event intercepted by a backref. When set to False, the attribute managed by this relationship will not cascade an incoming transient object into the session of a persistent parent, if the event is received via backref.

        That is:

        mapper(A, a_table, properties={
            'bs':relationship(B, backref="a", cascade_backrefs=False)
        })

        If an A() is present in the session, assigning it to the “a” attribute on a transient B() will not place the B() into the session. To set the flag in the other direction, i.e. so that A().bs.append(B()) won’t add a transient A() into the session for a persistent B():

        mapper(A, a_table, properties={
            'bs':relationship(B,
                    backref=backref("a", cascade_backrefs=False)
                )
        })

        See the section Cascades for more background on configuring cascades.

      • collection_class – a class or callable that returns a new list-holding object. will be used in place of a plain list for storing elements. Behavior of this attribute is described in detail at Customizing Collection Access.
      • comparator_factory – a class which extends RelationshipProperty.Comparator which provides custom SQL clause generation for comparison operations.
      • distinct_target_key=False

        Indicate if a “subquery” eager load should apply the DISTINCT keyword to the innermost SELECT statement. When set to None, the DISTINCT keyword will be applied in those cases when the target columns do not comprise the full primary key of the target table. When set to True, the DISTINCT keyword is applied to the innermost SELECT unconditionally.

        This flag defaults as False in 0.8 but will default to None in 0.9. It may be desirable to set this flag to False when the DISTINCT is reducing performance of the innermost subquery beyond that of what duplicate innermost rows may be causing.

        New in version 0.8.3: - distinct_target_key allows the subquery eager loader to apply a DISTINCT modifier to the innermost SELECT.

      • doc – docstring which will be applied to the resulting descriptor.
      • extension – an AttributeExtension instance, or list of extensions, which will be prepended to the list of attribute listeners for the resulting descriptor placed on the class. Deprecated. Please see AttributeEvents.
      • foreign_keys

        a list of columns which are to be used as “foreign key” columns, or columns which refer to the value in a remote column, within the context of this relationship() object’s primaryjoin condition. That is, if the primaryjoin condition of this relationship() is a.id == b.a_id, and the values in b.a_id are required to be present in a.id, then the “foreign key” column of this relationship() is b.a_id.

        In normal cases, the foreign_keys parameter is not required. relationship() will automatically determine which columns in the primaryjoin conditition are to be considered “foreign key” columns based on those Column objects that specify ForeignKey, or are otherwise listed as referencing columns in a ForeignKeyConstraint construct. foreign_keys is only needed when:

        1. There is more than one way to construct a join from the local table to the remote table, as there are multiple foreign key references present. Setting foreign_keys will limit the relationship() to consider just those columns specified here as “foreign”.

          Changed in version 0.8: A multiple-foreign key join ambiguity can be resolved by setting the foreign_keys parameter alone, without the need to explicitly set primaryjoin as well.

        2. The Table being mapped does not actually have ForeignKey or ForeignKeyConstraint constructs present, often because the table was reflected from a database that does not support foreign key reflection (MySQL MyISAM).
        3. The primaryjoin argument is used to construct a non-standard join condition, which makes use of columns or expressions that do not normally refer to their “parent” column, such as a join condition expressed by a complex comparison using a SQL function.

        The relationship() construct will raise informative error messages that suggest the use of the foreign_keys parameter when presented with an ambiguous condition. In typical cases, if relationship() doesn’t raise any exceptions, the foreign_keys parameter is usually not needed.

        foreign_keys may also be passed as a callable function which is evaluated at mapper initialization time, and may be passed as a Python-evaluable string when using Declarative.

        See also

        Handling Multiple Join Paths

        Creating Custom Foreign Conditions

        foreign() - allows direct annotation of the “foreign” columns within a primaryjoin condition.

        New in version 0.8: The foreign() annotation can also be applied directly to the primaryjoin expression, which is an alternate, more specific system of describing which columns in a particular primaryjoin should be considered “foreign”.

      • info

        Optional data dictionary which will be populated into the MapperProperty.info attribute of this object.

        New in version 0.8.

      • innerjoin=False

        when True, joined eager loads will use an inner join to join against related tables instead of an outer join. The purpose of this option is generally one of performance, as inner joins generally perform better than outer joins. Another reason can be the use of with_lockmode, which does not support outer joins.

        This flag can be set to True when the relationship references an object via many-to-one using local foreign keys that are not nullable, or when the reference is one-to-one or a collection that is guaranteed to have one or at least one entry.

      • join_depth – when non-None, an integer value indicating how many levels deep “eager” loaders should join on a self-referring or cyclical relationship. The number counts how many times the same Mapper shall be present in the loading condition along a particular join branch. When left at its default of None, eager loaders will stop chaining when they encounter a the same target mapper which is already higher up in the chain. This option applies both to joined- and subquery- eager loaders.
      • lazy=’select’

        specifies how the related items should be loaded. Default value is select. Values include:

        • select - items should be loaded lazily when the property is first accessed, using a separate SELECT statement, or identity map fetch for simple many-to-one references.
        • immediate - items should be loaded as the parents are loaded, using a separate SELECT statement, or identity map fetch for simple many-to-one references.

          New in version 0.6.5.

        • joined - items should be loaded “eagerly” in the same query as that of the parent, using a JOIN or LEFT OUTER JOIN. Whether the join is “outer” or not is determined by the innerjoin parameter.
        • subquery - items should be loaded “eagerly” as the parents are loaded, using one additional SQL statement, which issues a JOIN to a subquery of the original statement, for each collection requested.
        • noload - no loading should occur at any time. This is to support “write-only” attributes, or attributes which are populated in some manner specific to the application.
        • dynamic - the attribute will return a pre-configured Query object for all read operations, onto which further filtering operations can be applied before iterating the results. See the section Dynamic Relationship Loaders for more details.
        • True - a synonym for ‘select’
        • False - a synonym for ‘joined’
        • None - a synonym for ‘noload’

        Detailed discussion of loader strategies is at Relationship Loading Techniques.

      • load_on_pending=False

        Indicates loading behavior for transient or pending parent objects.

        Changed in version 0.8: load_on_pending is superseded by Session.enable_relationship_loading().

        When set to True, causes the lazy-loader to issue a query for a parent object that is not persistent, meaning it has never been flushed. This may take effect for a pending object when autoflush is disabled, or for a transient object that has been “attached” to a Session but is not part of its pending collection.

        The load_on_pending flag does not improve behavior when the ORM is used normally - object references should be constructed at the object level, not at the foreign key level, so that they are present in an ordinary way before flush() proceeds. This flag is not not intended for general use.

        New in version 0.6.5.

      • order_by

        indicates the ordering that should be applied when loading these items. order_by is expected to refer to one of the Column objects to which the target class is mapped, or the attribute itself bound to the target class which refers to the column.

        order_by may also be passed as a callable function which is evaluated at mapper initialization time, and may be passed as a Python-evaluable string when using Declarative.

      • passive_deletes=False

        Indicates loading behavior during delete operations.

        A value of True indicates that unloaded child items should not be loaded during a delete operation on the parent. Normally, when a parent item is deleted, all child items are loaded so that they can either be marked as deleted, or have their foreign key to the parent set to NULL. Marking this flag as True usually implies an ON DELETE <CASCADE|SET NULL> rule is in place which will handle updating/deleting child rows on the database side.

        Additionally, setting the flag to the string value ‘all’ will disable the “nulling out” of the child foreign keys, when there is no delete or delete-orphan cascade enabled. This is typically used when a triggering or error raise scenario is in place on the database side. Note that the foreign key attributes on in-session child objects will not be changed after a flush occurs so this is a very special use-case setting.

      • passive_updates=True

        Indicates loading and INSERT/UPDATE/DELETE behavior when the source of a foreign key value changes (i.e. an “on update” cascade), which are typically the primary key columns of the source row.

        When True, it is assumed that ON UPDATE CASCADE is configured on the foreign key in the database, and that the database will handle propagation of an UPDATE from a source column to dependent rows. Note that with databases which enforce referential integrity (i.e. PostgreSQL, MySQL with InnoDB tables), ON UPDATE CASCADE is required for this operation. The relationship() will update the value of the attribute on related items which are locally present in the session during a flush.

        When False, it is assumed that the database does not enforce referential integrity and will not be issuing its own CASCADE operation for an update. The relationship() will issue the appropriate UPDATE statements to the database in response to the change of a referenced key, and items locally present in the session during a flush will also be refreshed.

        This flag should probably be set to False if primary key changes are expected and the database in use doesn’t support CASCADE (i.e. SQLite, MySQL MyISAM tables).

        Also see the passive_updates flag on mapper().

        A future SQLAlchemy release will provide a “detect” feature for this flag.

      • post_update – this indicates that the relationship should be handled by a second UPDATE statement after an INSERT or before a DELETE. Currently, it also will issue an UPDATE after the instance was UPDATEd as well, although this technically should be improved. This flag is used to handle saving bi-directional dependencies between two individual rows (i.e. each row references the other), where it would otherwise be impossible to INSERT or DELETE both rows fully since one row exists before the other. Use this flag when a particular mapping arrangement will incur two rows that are dependent on each other, such as a table that has a one-to-many relationship to a set of child rows, and also has a column that references a single child row within that list (i.e. both tables contain a foreign key to each other). If a flush() operation returns an error that a “cyclical dependency” was detected, this is a cue that you might want to use post_update to “break” the cycle.
      • primaryjoin

        a SQL expression that will be used as the primary join of this child object against the parent object, or in a many-to-many relationship the join of the primary object to the association table. By default, this value is computed based on the foreign key relationships of the parent and child tables (or association table).

        primaryjoin may also be passed as a callable function which is evaluated at mapper initialization time, and may be passed as a Python-evaluable string when using Declarative.

      • remote_side

        used for self-referential relationships, indicates the column or list of columns that form the “remote side” of the relationship.

        remote_side may also be passed as a callable function which is evaluated at mapper initialization time, and may be passed as a Python-evaluable string when using Declarative.

        Changed in version 0.8: The remote() annotation can also be applied directly to the primaryjoin expression, which is an alternate, more specific system of describing which columns in a particular primaryjoin should be considered “remote”.

      • query_class – a Query subclass that will be used as the base of the “appender query” returned by a “dynamic” relationship, that is, a relationship that specifies lazy="dynamic" or was otherwise constructed using the orm.dynamic_loader() function.
      • secondaryjoin

        a SQL expression that will be used as the join of an association table to the child object. By default, this value is computed based on the foreign key relationships of the association and child tables.

        secondaryjoin may also be passed as a callable function which is evaluated at mapper initialization time, and may be passed as a Python-evaluable string when using Declarative.

      • single_parent=(True|False) – when True, installs a validator which will prevent objects from being associated with more than one parent at a time. This is used for many-to-one or many-to-many relationships that should be treated either as one-to-one or one-to-many. Its usage is optional unless delete-orphan cascade is also set on this relationship(), in which case its required.
      • uselist=(True|False) – a boolean that indicates if this property should be loaded as a list or a scalar. In most cases, this value is determined automatically by relationship(), based on the type and direction of the relationship - one to many forms a list, many to one forms a scalar, many to many is a list. If a scalar is desired where normally a list would be present, such as a bi-directional one-to-one relationship, set uselist to False.
      • viewonly=False – when set to True, the relationship is used only for loading objects within the relationship, and has no effect on the unit-of-work flush process. Relationships with viewonly can specify any kind of join conditions to provide additional views of related objects onto a parent object. Note that the functionality of a viewonly relationship has its limits - complicated join conditions may not compile into eager or lazy loaders properly. If this is the case, use an alternative method.

      Changed in version 0.6: relationship() was renamed from its previous name relation().

      sqlalchemy.orm.backref(name, **kwargs)

      Create a back reference with explicit keyword arguments, which are the same arguments one can send to relationship().

      Used with the backref keyword argument to relationship() in place of a string argument, e.g.:

      'items':relationship(SomeItem, backref=backref('parent', lazy='subquery'))
      sqlalchemy.orm.relation(*arg, **kw)

      A synonym for relationship().

      sqlalchemy.orm.dynamic_loader(argument, **kw)

      Construct a dynamically-loading mapper property.

      This is essentially the same as using the lazy='dynamic' argument with relationship():

      dynamic_loader(SomeClass)
      
      # is the same as
      
      relationship(SomeClass, lazy="dynamic")

      See the section Dynamic Relationship Loaders for more details on dynamic loading.

      sqlalchemy.orm.foreign(expr)

      Annotate a portion of a primaryjoin expression with a ‘foreign’ annotation.

      See the section Creating Custom Foreign Conditions for a description of use.

      New in version 0.8.

      sqlalchemy.orm.remote(expr)

      Annotate a portion of a primaryjoin expression with a ‘remote’ annotation.

      See the section Creating Custom Foreign Conditions for a description of use.

      New in version 0.8.

      SQLAlchemy-0.8.4/doc/orm/session.html0000644000076500000240000141671512251147503020163 0ustar classicstaff00000000000000 Using the Session — SQLAlchemy 0.8 Documentation

      SQLAlchemy 0.8 Documentation

      Release: 0.8.4 | Release Date: December 8, 2013

      Using the Session

      The orm.mapper() function and declarative extensions are the primary configurational interface for the ORM. Once mappings are configured, the primary usage interface for persistence operations is the Session.

      What does the Session do ?

      In the most general sense, the Session establishes all conversations with the database and represents a “holding zone” for all the objects which you’ve loaded or associated with it during its lifespan. It provides the entrypoint to acquire a Query object, which sends queries to the database using the Session object’s current database connection, populating result rows into objects that are then stored in the Session, inside a structure called the Identity Map - a data structure that maintains unique copies of each object, where “unique” means “only one object with a particular primary key”.

      The Session begins in an essentially stateless form. Once queries are issued or other objects are persisted with it, it requests a connection resource from an Engine that is associated either with the Session itself or with the mapped Table objects being operated upon. This connection represents an ongoing transaction, which remains in effect until the Session is instructed to commit or roll back its pending state.

      All changes to objects maintained by a Session are tracked - before the database is queried again or before the current transaction is committed, it flushes all pending changes to the database. This is known as the Unit of Work pattern.

      When using a Session, it’s important to note that the objects which are associated with it are proxy objects to the transaction being held by the Session - there are a variety of events that will cause objects to re-access the database in order to keep synchronized. It is possible to “detach” objects from a Session, and to continue using them, though this practice has its caveats. It’s intended that usually, you’d re-associate detached objects with another Session when you want to work with them again, so that they can resume their normal task of representing database state.

      Getting a Session

      Session is a regular Python class which can be directly instantiated. However, to standardize how sessions are configured and acquired, the sessionmaker class is normally used to create a top level Session configuration which can then be used throughout an application without the need to repeat the configurational arguments.

      The usage of sessionmaker is illustrated below:

      from sqlalchemy import create_engine
      from sqlalchemy.orm import sessionmaker
      
      # an Engine, which the Session will use for connection
      # resources
      some_engine = create_engine('postgresql://scott:tiger@localhost/')
      
      # create a configured "Session" class
      Session = sessionmaker(bind=some_engine)
      
      # create a Session
      session = Session()
      
      # work with sess
      myobject = MyObject('foo', 'bar')
      session.add(myobject)
      session.commit()

      Above, the sessionmaker call creates a factory for us, which we assign to the name Session. This factory, when called, will create a new Session object using the configurational arguments we’ve given the factory. In this case, as is typical, we’ve configured the factory to specify a particular Engine for connection resources.

      A typical setup will associate the sessionmaker with an Engine, so that each Session generated will use this Engine to acquire connection resources. This association can be set up as in the example above, using the bind argument.

      When you write your application, place the sessionmaker factory at the global level. This factory can then be used by the rest of the applcation as the source of new Session instances, keeping the configuration for how Session objects are constructed in one place.

      The sessionmaker factory can also be used in conjunction with other helpers, which are passed a user-defined sessionmaker that is then maintained by the helper. Some of these helpers are discussed in the section When do I construct a Session, when do I commit it, and when do I close it?.

      Adding Additional Configuration to an Existing sessionmaker()

      A common scenario is where the sessionmaker is invoked at module import time, however the generation of one or more Engine instances to be associated with the sessionmaker has not yet proceeded. For this use case, the sessionmaker construct offers the sessionmaker.configure() method, which will place additional configuration directives into an existing sessionmaker that will take place when the construct is invoked:

      from sqlalchemy.orm import sessionmaker
      from sqlalchemy import create_engine
      
      # configure Session class with desired options
      Session = sessionmaker()
      
      # later, we create the engine
      engine = create_engine('postgresql://...')
      
      # associate it with our custom Session class
      Session.configure(bind=engine)
      
      # work with the session
      session = Session()

      Creating Ad-Hoc Session Objects with Alternate Arguments

      For the use case where an application needs to create a new Session with special arguments that deviate from what is normally used throughout the application, such as a Session that binds to an alternate source of connectivity, or a Session that should have other arguments such as expire_on_commit established differently from what most of the application wants, specific arguments can be passed to the sessionmaker factory’s sessionmaker.__call__() method. These arguments will override whatever configurations have already been placed, such as below, where a new Session is constructed against a specific Connection:

      # at the module level, the global sessionmaker,
      # bound to a specific Engine
      Session = sessionmaker(bind=engine)
      
      # later, some unit of code wants to create a
      # Session that is bound to a specific Connection
      conn = engine.connect()
      session = Session(bind=conn)

      The typical rationale for the association of a Session with a specific Connection is that of a test fixture that maintains an external transaction - see Joining a Session into an External Transaction for an example of this.

      Using the Session

      Quickie Intro to Object States

      It’s helpful to know the states which an instance can have within a session:

      • Transient - an instance that’s not in a session, and is not saved to the database; i.e. it has no database identity. The only relationship such an object has to the ORM is that its class has a mapper() associated with it.
      • Pending - when you add() a transient instance, it becomes pending. It still wasn’t actually flushed to the database yet, but it will be when the next flush occurs.
      • Persistent - An instance which is present in the session and has a record in the database. You get persistent instances by either flushing so that the pending instances become persistent, or by querying the database for existing instances (or moving persistent instances from other sessions into your local session).
      • Detached - an instance which has a record in the database, but is not in any session. There’s nothing wrong with this, and you can use objects normally when they’re detached, except they will not be able to issue any SQL in order to load collections or attributes which are not yet loaded, or were marked as “expired”.

      Knowing these states is important, since the Session tries to be strict about ambiguous operations (such as trying to save the same object to two different sessions at the same time).

      Session Frequently Asked Questions

      When do I make a sessionmaker?

      Just one time, somewhere in your application’s global scope. It should be looked upon as part of your application’s configuration. If your application has three .py files in a package, you could, for example, place the sessionmaker line in your __init__.py file; from that point on your other modules say “from mypackage import Session”. That way, everyone else just uses Session(), and the configuration of that session is controlled by that central point.

      If your application starts up, does imports, but does not know what database it’s going to be connecting to, you can bind the Session at the “class” level to the engine later on, using sessionmaker.configure().

      In the examples in this section, we will frequently show the sessionmaker being created right above the line where we actually invoke Session. But that’s just for example’s sake! In reality, the sessionmaker would be somewhere at the module level. The calls to instantiate Session would then be placed at the point in the application where database conversations begin.

      When do I construct a Session, when do I commit it, and when do I close it?

      tl;dr;

      As a general rule, keep the lifecycle of the session separate and external from functions and objects that access and/or manipulate database data.

      A Session is typically constructed at the beginning of a logical operation where database access is potentially anticipated.

      The Session, whenever it is used to talk to the database, begins a database transaction as soon as it starts communicating. Assuming the autocommit flag is left at its recommended default of False, this transaction remains in progress until the Session is rolled back, committed, or closed. The Session will begin a new transaction if it is used again, subsequent to the previous transaction ending; from this it follows that the Session is capable of having a lifespan across many transactions, though only one at a time. We refer to these two concepts as transaction scope and session scope.

      The implication here is that the SQLAlchemy ORM is encouraging the developer to establish these two scopes in their application, including not only when the scopes begin and end, but also the expanse of those scopes, for example should a single Session instance be local to the execution flow within a function or method, should it be a global object used by the entire application, or somewhere in between these two.

      The burden placed on the developer to determine this scope is one area where the SQLAlchemy ORM necessarily has a strong opinion about how the database should be used. The unit of work pattern is specifically one of accumulating changes over time and flushing them periodically, keeping in-memory state in sync with what’s known to be present in a local transaction. This pattern is only effective when meaningful transaction scopes are in place.

      It’s usually not very hard to determine the best points at which to begin and end the scope of a Session, though the wide variety of application architectures possible can introduce challenging situations.

      A common choice is to tear down the Session at the same time the transaction ends, meaning the transaction and session scopes are the same. This is a great choice to start out with as it removes the need to consider session scope as separate from transaction scope.

      While there’s no one-size-fits-all recommendation for how transaction scope should be determined, there are common patterns. Especially if one is writing a web application, the choice is pretty much established.

      A web application is the easiest case because such an appication is already constructed around a single, consistent scope - this is the request, which represents an incoming request from a browser, the processing of that request to formulate a response, and finally the delivery of that response back to the client. Integrating web applications with the Session is then the straightforward task of linking the scope of the Session to that of the request. The Session can be established as the request begins, or using a lazy initialization pattern which establishes one as soon as it is needed. The request then proceeds, with some system in place where application logic can access the current Session in a manner associated with how the actual request object is accessed. As the request ends, the Session is torn down as well, usually through the usage of event hooks provided by the web framework. The transaction used by the Session may also be committed at this point, or alternatively the application may opt for an explicit commit pattern, only committing for those requests where one is warranted, but still always tearing down the Session unconditionally at the end.

      Most web frameworks include infrastructure to establish a single Session, associated with the request, which is correctly constructed and torn down corresponding torn down at the end of a request. Such infrastructure pieces include products such as Flask-SQLAlchemy, for usage in conjunction with the Flask web framework, and Zope-SQLAlchemy, for usage in conjunction with the Pyramid and Zope frameworks. SQLAlchemy strongly recommends that these products be used as available.

      In those situations where integration libraries are not available, SQLAlchemy includes its own “helper” class known as scoped_session. A tutorial on the usage of this object is at Contextual/Thread-local Sessions. It provides both a quick way to associate a Session with the current thread, as well as patterns to associate Session objects with other kinds of scopes.

      As mentioned before, for non-web applications there is no one clear pattern, as applications themselves don’t have just one pattern of architecture. The best strategy is to attempt to demarcate “operations”, points at which a particular thread begins to perform a series of operations for some period of time, which can be committed at the end. Some examples:

      • A background daemon which spawns off child forks would want to create a Session local to each child process, work with that Session through the life of the “job” that the fork is handling, then tear it down when the job is completed.
      • For a command-line script, the application would create a single, global Session that is established when the program begins to do its work, and commits it right as the program is completing its task.
      • For a GUI interface-driven application, the scope of the Session may best be within the scope of a user-generated event, such as a button push. Or, the scope may correspond to explicit user interaction, such as the user “opening” a series of records, then “saving” them.

      As a general rule, the application should manage the lifecycle of the session externally to functions that deal with specific data. This is a fundamental separation of concerns which keeps data-specific operations agnostic of the context in which they access and manipulate that data.

      E.g. don’t do this:

      ### this is the **wrong way to do it** ###
      
      class ThingOne(object):
          def go(self):
              session = Session()
              try:
                  session.query(FooBar).update({"x": 5})
                  session.commit()
              except:
                  session.rollback()
                  raise
      
      class ThingTwo(object):
          def go(self):
              session = Session()
              try:
                  session.query(Widget).update({"q": 18})
                  session.commit()
              except:
                  session.rollback()
                  raise
      
      def run_my_program():
          ThingOne().go()
          ThingTwo().go()

      Keep the lifecycle of the session (and usually the transaction) separate and external:

      ### this is a **better** (but not the only) way to do it ###
      
      class ThingOne(object):
          def go(self, session):
              session.query(FooBar).update({"x": 5})
      
      class ThingTwo(object):
          def go(self, session):
              session.query(Widget).update({"q": 18})
      
      def run_my_program():
          session = Session()
          try:
              ThingOne().go(session)
              ThingTwo().go(session)
      
              session.commit()
          except:
              session.rollback()
              raise
          finally:
              session.close()

      The advanced developer will try to keep the details of session, transaction and exception management as far as possible from the details of the program doing its work. For example, we can further separate concerns using a context manager:

      ### another way (but again *not the only way*) to do it ###
      
      from contextlib import contextmanager
      
      @contextmanager
      def session_scope():
          """Provide a transactional scope around a series of operations."""
          session = Session()
          try:
              yield session
              session.commit()
          except:
              session.rollback()
              raise
          finally:
              session.close()
      
      
      def run_my_program():
          with session_scope() as session:
              ThingOne().go(session)
              ThingTwo().go(session)

      Is the Session a cache?

      Yeee...no. It’s somewhat used as a cache, in that it implements the identity map pattern, and stores objects keyed to their primary key. However, it doesn’t do any kind of query caching. This means, if you say session.query(Foo).filter_by(name='bar'), even if Foo(name='bar') is right there, in the identity map, the session has no idea about that. It has to issue SQL to the database, get the rows back, and then when it sees the primary key in the row, then it can look in the local identity map and see that the object is already there. It’s only when you say query.get({some primary key}) that the Session doesn’t have to issue a query.

      Additionally, the Session stores object instances using a weak reference by default. This also defeats the purpose of using the Session as a cache.

      The Session is not designed to be a global object from which everyone consults as a “registry” of objects. That’s more the job of a second level cache. SQLAlchemy provides a pattern for implementing second level caching using dogpile.cache, via the Dogpile Caching example.

      How can I get the Session for a certain object?

      Use the object_session() classmethod available on Session:

      session = Session.object_session(someobject)

      The newer Runtime Inspection API system can also be used:

      from sqlalchemy import inspect
      session = inspect(object).session

      Is the session thread-safe?

      The Session is very much intended to be used in a non-concurrent fashion, which usually means in only one thread at a time.

      The Session should be used in such a way that one instance exists for a single series of operations within a single transaction. One expedient way to get this effect is by associating a Session with the current thread (see Contextual/Thread-local Sessions for background). Another is to use a pattern where the Session is passed between functions and is otherwise not shared with other threads.

      The bigger point is that you should not want to use the session with multiple concurrent threads. That would be like having everyone at a restaurant all eat from the same plate. The session is a local “workspace” that you use for a specific set of tasks; you don’t want to, or need to, share that session with other threads who are doing some other task.

      Making sure the Session is only used in a single concurrent thread at a time is called a “share nothing” approach to concurrency. But actually, not sharing the Session implies a more significant pattern; it means not just the Session object itself, but also all objects that are associated with that Session, must be kept within the scope of a single concurrent thread. The set of mapped objects associated with a Session are essentially proxies for data within database rows accessed over a database connection, and so just like the Session itself, the whole set of objects is really just a large-scale proxy for a database connection (or connections). Ultimately, it’s mostly the DBAPI connection itself that we’re keeping away from concurrent access; but since the Session and all the objects associated with it are all proxies for that DBAPI connection, the entire graph is essentially not safe for concurrent access.

      If there are in fact multiple threads participating in the same task, then you may consider sharing the session and its objects between those threads; however, in this extremely unusual scenario the application would need to ensure that a proper locking scheme is implemented so that there isn’t concurrent access to the Session or its state. A more common approach to this situation is to maintain a single Session per concurrent thread, but to instead copy objects from one Session to another, often using the Session.merge() method to copy the state of an object into a new object local to a different Session.

      Querying

      The query() function takes one or more entities and returns a new Query object which will issue mapper queries within the context of this Session. An entity is defined as a mapped class, a Mapper object, an orm-enabled descriptor, or an AliasedClass object:

      # query from a class
      session.query(User).filter_by(name='ed').all()
      
      # query with multiple classes, returns tuples
      session.query(User, Address).join('addresses').filter_by(name='ed').all()
      
      # query using orm-enabled descriptors
      session.query(User.name, User.fullname).all()
      
      # query from a mapper
      user_mapper = class_mapper(User)
      session.query(user_mapper)

      When Query returns results, each object instantiated is stored within the identity map. When a row matches an object which is already present, the same object is returned. In the latter case, whether or not the row is populated onto an existing object depends upon whether the attributes of the instance have been expired or not. A default-configured Session automatically expires all instances along transaction boundaries, so that with a normally isolated transaction, there shouldn’t be any issue of instances representing data which is stale with regards to the current transaction.

      The Query object is introduced in great detail in Object Relational Tutorial, and further documented in Querying.

      Adding New or Existing Items

      add() is used to place instances in the session. For transient (i.e. brand new) instances, this will have the effect of an INSERT taking place for those instances upon the next flush. For instances which are persistent (i.e. were loaded by this session), they are already present and do not need to be added. Instances which are detached (i.e. have been removed from a session) may be re-associated with a session using this method:

      user1 = User(name='user1')
      user2 = User(name='user2')
      session.add(user1)
      session.add(user2)
      
      session.commit()     # write changes to the database

      To add a list of items to the session at once, use add_all():

      session.add_all([item1, item2, item3])

      The add() operation cascades along the save-update cascade. For more details see the section Cascades.

      Merging

      merge() transfers state from an outside object into a new or already existing instance within a session. It also reconciles the incoming data against the state of the database, producing a history stream which will be applied towards the next flush, or alternatively can be made to produce a simple “transfer” of state without producing change history or accessing the database. Usage is as follows:

      merged_object = session.merge(existing_object)

      When given an instance, it follows these steps:

      • It examines the primary key of the instance. If it’s present, it attempts to locate that instance in the local identity map. If the load=True flag is left at its default, it also checks the database for this primary key if not located locally.

      • If the given instance has no primary key, or if no instance can be found with the primary key given, a new instance is created.

      • The state of the given instance is then copied onto the located/newly created instance. For attributes which are present on the source instance, the value is transferred to the target instance. For mapped attributes which aren’t present on the source, the attribute is expired on the target instance, discarding its existing value.

        If the load=True flag is left at its default, this copy process emits events and will load the target object’s unloaded collections for each attribute present on the source object, so that the incoming state can be reconciled against what’s present in the database. If load is passed as False, the incoming data is “stamped” directly without producing any history.

      • The operation is cascaded to related objects and collections, as indicated by the merge cascade (see Cascades).

      • The new instance is returned.

      With merge(), the given “source” instance is not modifed nor is it associated with the target Session, and remains available to be merged with any number of other Session objects. merge() is useful for taking the state of any kind of object structure without regard for its origins or current session associations and copying its state into a new session. Here’s some examples:

      • An application which reads an object structure from a file and wishes to save it to the database might parse the file, build up the structure, and then use merge() to save it to the database, ensuring that the data within the file is used to formulate the primary key of each element of the structure. Later, when the file has changed, the same process can be re-run, producing a slightly different object structure, which can then be merged in again, and the Session will automatically update the database to reflect those changes, loading each object from the database by primary key and then updating its state with the new state given.

      • An application is storing objects in an in-memory cache, shared by many Session objects simultaneously. merge() is used each time an object is retrieved from the cache to create a local copy of it in each Session which requests it. The cached object remains detached; only its state is moved into copies of itself that are local to individual Session objects.

        In the caching use case, it’s common that the load=False flag is used to remove the overhead of reconciling the object’s state with the database. There’s also a “bulk” version of merge() called merge_result() that was designed to work with cache-extended Query objects - see the section Dogpile Caching.

      • An application wants to transfer the state of a series of objects into a Session maintained by a worker thread or other concurrent system. merge() makes a copy of each object to be placed into this new Session. At the end of the operation, the parent thread/process maintains the objects it started with, and the thread/worker can proceed with local copies of those objects.

        In the “transfer between threads/processes” use case, the application may want to use the load=False flag as well to avoid overhead and redundant SQL queries as the data is transferred.

      Merge Tips

      merge() is an extremely useful method for many purposes. However, it deals with the intricate border between objects that are transient/detached and those that are persistent, as well as the automated transferrence of state. The wide variety of scenarios that can present themselves here often require a more careful approach to the state of objects. Common problems with merge usually involve some unexpected state regarding the object being passed to merge().

      Lets use the canonical example of the User and Address objects:

      class User(Base):
          __tablename__ = 'user'
      
          id = Column(Integer, primary_key=True)
          name = Column(String(50), nullable=False)
          addresses = relationship("Address", backref="user")
      
      class Address(Base):
          __tablename__ = 'address'
      
          id = Column(Integer, primary_key=True)
          email_address = Column(String(50), nullable=False)
          user_id = Column(Integer, ForeignKey('user.id'), nullable=False)

      Assume a User object with one Address, already persistent:

      >>> u1 = User(name='ed', addresses=[Address(email_address='ed@ed.com')])
      >>> session.add(u1)
      >>> session.commit()

      We now create a1, an object outside the session, which we’d like to merge on top of the existing Address:

      >>> existing_a1 = u1.addresses[0]
      >>> a1 = Address(id=existing_a1.id)

      A surprise would occur if we said this:

      >>> a1.user = u1
      >>> a1 = session.merge(a1)
      >>> session.commit()
      sqlalchemy.orm.exc.FlushError: New instance <Address at 0x1298f50>
      with identity key (<class '__main__.Address'>, (1,)) conflicts with
      persistent instance <Address at 0x12a25d0>

      Why is that ? We weren’t careful with our cascades. The assignment of a1.user to a persistent object cascaded to the backref of User.addresses and made our a1 object pending, as though we had added it. Now we have two Address objects in the session:

      >>> a1 = Address()
      >>> a1.user = u1
      >>> a1 in session
      True
      >>> existing_a1 in session
      True
      >>> a1 is existing_a1
      False

      Above, our a1 is already pending in the session. The subsequent merge() operation essentially does nothing. Cascade can be configured via the cascade option on relationship(), although in this case it would mean removing the save-update cascade from the User.addresses relationship - and usually, that behavior is extremely convenient. The solution here would usually be to not assign a1.user to an object already persistent in the target session.

      The cascade_backrefs=False option of relationship() will also prevent the Address from being added to the session via the a1.user = u1 assignment.

      Further detail on cascade operation is at Cascades.

      Another example of unexpected state:

      >>> a1 = Address(id=existing_a1.id, user_id=u1.id)
      >>> assert a1.user is None
      >>> True
      >>> a1 = session.merge(a1)
      >>> session.commit()
      sqlalchemy.exc.IntegrityError: (IntegrityError) address.user_id
      may not be NULL

      Here, we accessed a1.user, which returned its default value of None, which as a result of this access, has been placed in the __dict__ of our object a1. Normally, this operation creates no change event, so the user_id attribute takes precedence during a flush. But when we merge the Address object into the session, the operation is equivalent to:

      >>> existing_a1.id = existing_a1.id
      >>> existing_a1.user_id = u1.id
      >>> existing_a1.user = None

      Where above, both user_id and user are assigned to, and change events are emitted for both. The user association takes precedence, and None is applied to user_id, causing a failure.

      Most merge() issues can be examined by first checking - is the object prematurely in the session ?

      >>> a1 = Address(id=existing_a1, user_id=user.id)
      >>> assert a1 not in session
      >>> a1 = session.merge(a1)

      Or is there state on the object that we don’t want ? Examining __dict__ is a quick way to check:

      >>> a1 = Address(id=existing_a1, user_id=user.id)
      >>> a1.user
      >>> a1.__dict__
      {'_sa_instance_state': <sqlalchemy.orm.state.InstanceState object at 0x1298d10>,
          'user_id': 1,
          'id': 1,
          'user': None}
      >>> # we don't want user=None merged, remove it
      >>> del a1.user
      >>> a1 = session.merge(a1)
      >>> # success
      >>> session.commit()

      Deleting

      The delete() method places an instance into the Session’s list of objects to be marked as deleted:

      # mark two objects to be deleted
      session.delete(obj1)
      session.delete(obj2)
      
      # commit (or flush)
      session.commit()

      Deleting from Collections

      A common confusion that arises regarding delete() is when objects which are members of a collection are being deleted. While the collection member is marked for deletion from the database, this does not impact the collection itself in memory until the collection is expired. Below, we illustrate that even after an Address object is marked for deletion, it’s still present in the collection associated with the parent User, even after a flush:

      >>> address = user.addresses[1]
      >>> session.delete(address)
      >>> session.flush()
      >>> address in user.addresses
      True

      When the above session is committed, all attributes are expired. The next access of user.addresses will re-load the collection, revealing the desired state:

      >>> session.commit()
      >>> address in user.addresses
      False

      The usual practice of deleting items within collections is to forego the usage of delete() directly, and instead use cascade behavior to automatically invoke the deletion as a result of removing the object from the parent collection. The delete-orphan cascade accomplishes this, as illustrated in the example below:

      mapper(User, users_table, properties={
          'addresses':relationship(Address, cascade="all, delete, delete-orphan")
      })
      del user.addresses[1]
      session.flush()

      Where above, upon removing the Address object from the User.addresses collection, the delete-orphan cascade has the effect of marking the Address object for deletion in the same way as passing it to delete().

      See also Cascades for detail on cascades.

      Deleting based on Filter Criterion

      The caveat with Session.delete() is that you need to have an object handy already in order to delete. The Query includes a delete() method which deletes based on filtering criteria:

      session.query(User).filter(User.id==7).delete()

      The Query.delete() method includes functionality to “expire” objects already in the session which match the criteria. However it does have some caveats, including that “delete” and “delete-orphan” cascades won’t be fully expressed for collections which are already loaded. See the API docs for delete() for more details.

      Flushing

      When the Session is used with its default configuration, the flush step is nearly always done transparently. Specifically, the flush occurs before any individual Query is issued, as well as within the commit() call before the transaction is committed. It also occurs before a SAVEPOINT is issued when begin_nested() is used.

      Regardless of the autoflush setting, a flush can always be forced by issuing flush():

      session.flush()

      The “flush-on-Query” aspect of the behavior can be disabled by constructing sessionmaker with the flag autoflush=False:

      Session = sessionmaker(autoflush=False)

      Additionally, autoflush can be temporarily disabled by setting the autoflush flag at any time:

      mysession = Session()
      mysession.autoflush = False

      Some autoflush-disable recipes are available at DisableAutoFlush.

      The flush process always occurs within a transaction, even if the Session has been configured with autocommit=True, a setting that disables the session’s persistent transactional state. If no transaction is present, flush() creates its own transaction and commits it. Any failures during flush will always result in a rollback of whatever transaction is present. If the Session is not in autocommit=True mode, an explicit call to rollback() is required after a flush fails, even though the underlying transaction will have been rolled back already - this is so that the overall nesting pattern of so-called “subtransactions” is consistently maintained.

      Committing

      commit() is used to commit the current transaction. It always issues flush() beforehand to flush any remaining state to the database; this is independent of the “autoflush” setting. If no transaction is present, it raises an error. Note that the default behavior of the Session is that a “transaction” is always present; this behavior can be disabled by setting autocommit=True. In autocommit mode, a transaction can be initiated by calling the begin() method.

      Note

      The term “transaction” here refers to a transactional construct within the Session itself which may be maintaining zero or more actual database (DBAPI) transactions. An individual DBAPI connection begins participation in the “transaction” as it is first used to execute a SQL statement, then remains present until the session-level “transaction” is completed. See Managing Transactions for further detail.

      Another behavior of commit() is that by default it expires the state of all instances present after the commit is complete. This is so that when the instances are next accessed, either through attribute access or by them being present in a Query result set, they receive the most recent state. To disable this behavior, configure sessionmaker with expire_on_commit=False.

      Normally, instances loaded into the Session are never changed by subsequent queries; the assumption is that the current transaction is isolated so the state most recently loaded is correct as long as the transaction continues. Setting autocommit=True works against this model to some degree since the Session behaves in exactly the same way with regard to attribute state, except no transaction is present.

      Rolling Back

      rollback() rolls back the current transaction. With a default configured session, the post-rollback state of the session is as follows:

      • All transactions are rolled back and all connections returned to the connection pool, unless the Session was bound directly to a Connection, in which case the connection is still maintained (but still rolled back).
      • Objects which were initially in the pending state when they were added to the Session within the lifespan of the transaction are expunged, corresponding to their INSERT statement being rolled back. The state of their attributes remains unchanged.
      • Objects which were marked as deleted within the lifespan of the transaction are promoted back to the persistent state, corresponding to their DELETE statement being rolled back. Note that if those objects were first pending within the transaction, that operation takes precedence instead.
      • All objects not expunged are fully expired.

      With that state understood, the Session may safely continue usage after a rollback occurs.

      When a flush() fails, typically for reasons like primary key, foreign key, or “not nullable” constraint violations, a rollback() is issued automatically (it’s currently not possible for a flush to continue after a partial failure). However, the flush process always uses its own transactional demarcator called a subtransaction, which is described more fully in the docstrings for Session. What it means here is that even though the database transaction has been rolled back, the end user must still issue rollback() to fully reset the state of the Session.

      Expunging

      Expunge removes an object from the Session, sending persistent instances to the detached state, and pending instances to the transient state:

      session.expunge(obj1)

      To remove all items, call expunge_all() (this method was formerly known as clear()).

      Closing

      The close() method issues a expunge_all(), and releases any transactional/connection resources. When connections are returned to the connection pool, transactional state is rolled back as well.

      Refreshing / Expiring

      The Session normally works in the context of an ongoing transaction (with the default setting of autoflush=False). Most databases offer “isolated” transactions - this refers to a series of behaviors that allow the work within a transaction to remain consistent as time passes, regardless of the activities outside of that transaction. A key feature of a high degree of transaction isolation is that emitting the same SELECT statement twice will return the same results as when it was called the first time, even if the data has been modified in another transaction.

      For this reason, the Session gains very efficient behavior by loading the attributes of each instance only once. Subsequent reads of the same row in the same transaction are assumed to have the same value. The user application also gains directly from this assumption, that the transaction is regarded as a temporary shield against concurrent changes - a good application will ensure that isolation levels are set appropriately such that this assumption can be made, given the kind of data being worked with.

      To clear out the currently loaded state on an instance, the instance or its individual attributes can be marked as “expired”, which results in a reload to occur upon next access of any of the instance’s attrbutes. The instance can also be immediately reloaded from the database. The expire() and refresh() methods achieve this:

      # immediately re-load attributes on obj1, obj2
      session.refresh(obj1)
      session.refresh(obj2)
      
      # expire objects obj1, obj2, attributes will be reloaded
      # on the next access:
      session.expire(obj1)
      session.expire(obj2)

      When an expired object reloads, all non-deferred column-based attributes are loaded in one query. Current behavior for expired relationship-based attributes is that they load individually upon access - this behavior may be enhanced in a future release. When a refresh is invoked on an object, the ultimate operation is equivalent to a Query.get(), so any relationships configured with eager loading should also load within the scope of the refresh operation.

      refresh() and expire() also support being passed a list of individual attribute names in which to be refreshed. These names can refer to any attribute, column-based or relationship based:

      # immediately re-load the attributes 'hello', 'world' on obj1, obj2
      session.refresh(obj1, ['hello', 'world'])
      session.refresh(obj2, ['hello', 'world'])
      
      # expire the attributes 'hello', 'world' objects obj1, obj2, attributes will be reloaded
      # on the next access:
      session.expire(obj1, ['hello', 'world'])
      session.expire(obj2, ['hello', 'world'])

      The full contents of the session may be expired at once using expire_all():

      session.expire_all()

      Note that expire_all() is called automatically whenever commit() or rollback() are called. If using the session in its default mode of autocommit=False and with a well-isolated transactional environment (which is provided by most backends with the notable exception of MySQL MyISAM), there is virtually no reason to ever call expire_all() directly - plenty of state will remain on the current transaction until it is rolled back or committed or otherwise removed.

      refresh() and expire() similarly are usually only necessary when an UPDATE or DELETE has been issued manually within the transaction using Session.execute().

      Session Attributes

      The Session itself acts somewhat like a set-like collection. All items present may be accessed using the iterator interface:

      for obj in session:
          print obj

      And presence may be tested for using regular “contains” semantics:

      if obj in session:
          print "Object is present"

      The session is also keeping track of all newly created (i.e. pending) objects, all objects which have had changes since they were last loaded or saved (i.e. “dirty”), and everything that’s been marked as deleted:

      # pending objects recently added to the Session
      session.new
      
      # persistent objects which currently have changes detected
      # (this collection is now created on the fly each time the property is called)
      session.dirty
      
      # persistent objects that have been marked as deleted via session.delete(obj)
      session.deleted
      
      # dictionary of all persistent objects, keyed on their
      # identity key
      session.identity_map

      (Documentation: Session.new, Session.dirty, Session.deleted, Session.identity_map).

      Note that objects within the session are by default weakly referenced. This means that when they are dereferenced in the outside application, they fall out of scope from within the Session as well and are subject to garbage collection by the Python interpreter. The exceptions to this include objects which are pending, objects which are marked as deleted, or persistent objects which have pending changes on them. After a full flush, these collections are all empty, and all objects are again weakly referenced. To disable the weak referencing behavior and force all objects within the session to remain until explicitly expunged, configure sessionmaker with the weak_identity_map=False setting.

      Cascades

      Mappers support the concept of configurable cascade behavior on relationship() constructs. This refers to how operations performed on a parent object relative to a particular Session should be propagated to items referred to by that relationship. The default cascade behavior is usually suitable for most situations, and the option is normally invoked explicitly in order to enable delete and delete-orphan cascades, which refer to how the relationship should be treated when the parent is marked for deletion as well as when a child is de-associated from its parent.

      Cascade behavior is configured by setting the cascade keyword argument on relationship():

      class Order(Base):
          __tablename__ = 'order'
      
          items = relationship("Item", cascade="all, delete-orphan")
          customer = relationship("User", secondary=user_orders_table,
                                      cascade="save-update")

      To set cascades on a backref, the same flag can be used with the backref() function, which ultimately feeds its arguments back into relationship():

      class Item(Base):
          __tablename__ = 'item'
      
          order = relationship("Order",
                          backref=backref("items", cascade="all, delete-orphan")
                      )

      The default value of cascade is save-update, merge. The all symbol in the cascade options indicates that all cascade flags should be enabled, with the exception of delete-orphan. Typically, cascade is usually left at its default, or configured as all, delete-orphan, indicating the child objects should be treated as “owned” by the parent.

      The list of available values which can be specified in cascade are as follows:

      • save-update - Indicates that when an object is placed into a Session via Session.add(), all the objects associated with it via this relationship() should also be added to that same Session. Additionally, if this object is already present in a Session, child objects will be added to that session as they are associated with this parent, i.e. as they are appended to lists, added to sets, or otherwise associated with the parent.

        save-update cascade also cascades the pending history of the target attribute, meaning that objects which were removed from a scalar or collection attribute whose changes have not yet been flushed are also placed into the target session. This is because they may have foreign key attributes present which will need to be updated to no longer refer to the parent.

        The save-update cascade is on by default, and it’s common to not even be aware of it. It’s customary that only a single call to Session.add() against the lead object of a structure has the effect of placing the full structure of objects into the Session at once.

        However, it can be turned off, which would imply that objects associated with a parent would need to be placed individually using Session.add() calls for each one.

        Another default behavior of save-update cascade is that it will take effect in the reverse direction, that is, associating a child with a parent when a backref is present means both relationships are affected; the parent will be added to the child’s session. To disable this somewhat indirect session addition, use the cascade_backrefs=False option described below in Controlling Cascade on Backrefs.

      • delete - This cascade indicates that when the parent object is marked for deletion, the related objects should also be marked for deletion. Without this cascade present, SQLAlchemy will set the foreign key on a one-to-many relationship to NULL when the parent object is deleted. When enabled, the row is instead deleted.

        delete cascade is often used in conjunction with delete-orphan cascade, as is appropriate for an object whose foreign key is not intended to be nullable. On some backends, it’s also a good idea to set ON DELETE on the foreign key itself; see the section Using Passive Deletes for more details.

        Note that for many-to-many relationships which make usage of the secondary argument to relationship(), SQLAlchemy always emits a DELETE for the association row in between “parent” and “child”, when the parent is deleted or whenever the linkage between a particular parent and child is broken.

      • delete-orphan - This cascade adds behavior to the delete cascade, such that a child object will be marked for deletion when it is de-associated from the parent, not just when the parent is marked for deletion. This is a common feature when dealing with a related object that is “owned” by its parent, with a NOT NULL foreign key, so that removal of the item from the parent collection results in its deletion.

        delete-orphan cascade implies that each child object can only have one parent at a time, so is configured in the vast majority of cases on a one-to-many relationship. Setting it on a many-to-one or many-to-many relationship is more awkward; for this use case, SQLAlchemy requires that the relationship() be configured with the single_parent=True function, which establishes Python-side validation that ensures the object is associated with only one parent at a time.

      • merge - This cascade indicates that the Session.merge() operation should be propagated from a parent that’s the subject of the Session.merge() call down to referred objects. This cascade is also on by default.

      • refresh-expire - A less common option, indicates that the Session.expire() operation should be propagated from a parent down to referred objects. When using Session.refresh(), the referred objects are expired only, but not actually refreshed.

      • expunge - Indicate that when the parent object is removed from the Session using Session.expunge(), the operation should be propagated down to referred objects.

      Controlling Cascade on Backrefs

      The save-update cascade takes place on backrefs by default. This means that, given a mapping such as this:

      mapper(Order, order_table, properties={
          'items' : relationship(Item, backref='order')
      })

      If an Order is already in the session, and is assigned to the order attribute of an Item, the backref appends the Order to the items collection of that Order, resulting in the save-update cascade taking place:

      >>> o1 = Order()
      >>> session.add(o1)
      >>> o1 in session
      True
      
      >>> i1 = Item()
      >>> i1.order = o1
      >>> i1 in o1.items
      True
      >>> i1 in session
      True

      This behavior can be disabled using the cascade_backrefs flag:

      mapper(Order, order_table, properties={
          'items' : relationship(Item, backref='order',
                                      cascade_backrefs=False)
      })

      So above, the assignment of i1.order = o1 will append i1 to the items collection of o1, but will not add i1 to the session. You can, of course, add() i1 to the session at a later point. This option may be helpful for situations where an object needs to be kept out of a session until it’s construction is completed, but still needs to be given associations to objects which are already persistent in the target session.

      Managing Transactions

      A newly constructed Session may be said to be in the “begin” state. In this state, the Session has not established any connection or transactional state with any of the Engine objects that may be associated with it.

      The Session then receives requests to operate upon a database connection. Typically, this means it is called upon to execute SQL statements using a particular Engine, which may be via Session.query(), Session.execute(), or within a flush operation of pending data, which occurs when such state exists and Session.commit() or Session.flush() is called.

      As these requests are received, each new Engine encountered is associated with an ongoing transactional state maintained by the Session. When the first Engine is operated upon, the Session can be said to have left the “begin” state and entered “transactional” state. For each Engine encountered, a Connection is associated with it, which is acquired via the Engine.contextual_connect() method. If a Connection was directly associated with the Session (see Joining a Session into an External Transaction for an example of this), it is added to the transactional state directly.

      For each Connection, the Session also maintains a Transaction object, which is acquired by calling Connection.begin() on each Connection, or if the Session object has been established using the flag twophase=True, a TwoPhaseTransaction object acquired via Connection.begin_twophase(). These transactions are all committed or rolled back corresponding to the invocation of the Session.commit() and Session.rollback() methods. A commit operation will also call the TwoPhaseTransaction.prepare() method on all transactions if applicable.

      When the transactional state is completed after a rollback or commit, the Session releases all Transaction and Connection resources, and goes back to the “begin” state, which will again invoke new Connection and Transaction objects as new requests to emit SQL statements are received.

      The example below illustrates this lifecycle:

      engine = create_engine("...")
      Session = sessionmaker(bind=engine)
      
      # new session.   no connections are in use.
      session = Session()
      try:
          # first query.  a Connection is acquired
          # from the Engine, and a Transaction
          # started.
          item1 = session.query(Item).get(1)
      
          # second query.  the same Connection/Transaction
          # are used.
          item2 = session.query(Item).get(2)
      
          # pending changes are created.
          item1.foo = 'bar'
          item2.bar = 'foo'
      
          # commit.  The pending changes above
          # are flushed via flush(), the Transaction
          # is committed, the Connection object closed
          # and discarded, the underlying DBAPI connection
          # returned to the connection pool.
          session.commit()
      except:
          # on rollback, the same closure of state
          # as that of commit proceeds.
          session.rollback()
          raise

      Using SAVEPOINT

      SAVEPOINT transactions, if supported by the underlying engine, may be delineated using the begin_nested() method:

      Session = sessionmaker()
      session = Session()
      session.add(u1)
      session.add(u2)
      
      session.begin_nested() # establish a savepoint
      session.add(u3)
      session.rollback()  # rolls back u3, keeps u1 and u2
      
      session.commit() # commits u1 and u2

      begin_nested() may be called any number of times, which will issue a new SAVEPOINT with a unique identifier for each call. For each begin_nested() call, a corresponding rollback() or commit() must be issued.

      When begin_nested() is called, a flush() is unconditionally issued (regardless of the autoflush setting). This is so that when a rollback() occurs, the full state of the session is expired, thus causing all subsequent attribute/instance access to reference the full state of the Session right before begin_nested() was called.

      begin_nested(), in the same manner as the less often used begin() method, returns a transactional object which also works as a context manager. It can be succinctly used around individual record inserts in order to catch things like unique constraint exceptions:

      for record in records:
          try:
              with session.begin_nested():
                  session.merge(record)
          except:
              print "Skipped record %s" % record
      session.commit()

      Autocommit Mode

      The example of Session transaction lifecycle illustrated at the start of Managing Transactions applies to a Session configured in the default mode of autocommit=False. Constructing a Session with autocommit=True produces a Session placed into “autocommit” mode, where each SQL statement invoked by a Session.query() or Session.execute() occurs using a new connection from the connection pool, discarding it after results have been iterated. The Session.flush() operation still occurs within the scope of a single transaction, though this transaction is closed out after the Session.flush() operation completes.

      Warning

      “autocommit” mode should not be considered for general use. If used, it should always be combined with the usage of Session.begin() and Session.commit(), to ensure a transaction demarcation.

      Executing queries outside of a demarcated transaction is a legacy mode of usage, and can in some cases lead to concurrent connection checkouts.

      In the absense of a demarcated transaction, the Session cannot make appropriate decisions as to when autoflush should occur nor when auto-expiration should occur, so these features should be disabled with autoflush=False, expire_on_commit=False.

      Modern usage of “autocommit” is for framework integrations that need to control specifically when the “begin” state occurs. A session which is configured with autocommit=True may be placed into the “begin” state using the Session.begin() method. After the cycle completes upon Session.commit() or Session.rollback(), connection and transaction resources are released and the Session goes back into “autocommit” mode, until Session.begin() is called again:

      Session = sessionmaker(bind=engine, autocommit=True)
      session = Session()
      session.begin()
      try:
          item1 = session.query(Item).get(1)
          item2 = session.query(Item).get(2)
          item1.foo = 'bar'
          item2.bar = 'foo'
          session.commit()
      except:
          session.rollback()
          raise

      The Session.begin() method also returns a transactional token which is compatible with the Python 2.6 with statement:

      Session = sessionmaker(bind=engine, autocommit=True)
      session = Session()
      with session.begin():
          item1 = session.query(Item).get(1)
          item2 = session.query(Item).get(2)
          item1.foo = 'bar'
          item2.bar = 'foo'

      Using Subtransactions with Autocommit

      A subtransaction indicates usage of the Session.begin() method in conjunction with the subtransactions=True flag. This produces a non-transactional, delimiting construct that allows nesting of calls to begin() and commit(). It’s purpose is to allow the construction of code that can function within a transaction both independently of any external code that starts a transaction, as well as within a block that has already demarcated a transaction.

      subtransactions=True is generally only useful in conjunction with autocommit, and is equivalent to the pattern described at Nesting of Transaction Blocks, where any number of functions can call Connection.begin() and Transaction.commit() as though they are the initiator of the transaction, but in fact may be participating in an already ongoing transaction:

      # method_a starts a transaction and calls method_b
      def method_a(session):
          session.begin(subtransactions=True)
          try:
              method_b(session)
              session.commit()  # transaction is committed here
          except:
              session.rollback() # rolls back the transaction
              raise
      
      # method_b also starts a transaction, but when
      # called from method_a participates in the ongoing
      # transaction.
      def method_b(session):
          session.begin(subtransactions=True)
          try:
              session.add(SomeObject('bat', 'lala'))
              session.commit()  # transaction is not committed yet
          except:
              session.rollback() # rolls back the transaction, in this case
                                 # the one that was initiated in method_a().
              raise
      
      # create a Session and call method_a
      session = Session(autocommit=True)
      method_a(session)
      session.close()

      Subtransactions are used by the Session.flush() process to ensure that the flush operation takes place within a transaction, regardless of autocommit. When autocommit is disabled, it is still useful in that it forces the Session into a “pending rollback” state, as a failed flush cannot be resumed in mid-operation, where the end user still maintains the “scope” of the transaction overall.

      Enabling Two-Phase Commit

      For backends which support two-phase operaration (currently MySQL and PostgreSQL), the session can be instructed to use two-phase commit semantics. This will coordinate the committing of transactions across databases so that the transaction is either committed or rolled back in all databases. You can also prepare() the session for interacting with transactions not managed by SQLAlchemy. To use two phase transactions set the flag twophase=True on the session:

      engine1 = create_engine('postgresql://db1')
      engine2 = create_engine('postgresql://db2')
      
      Session = sessionmaker(twophase=True)
      
      # bind User operations to engine 1, Account operations to engine 2
      Session.configure(binds={User:engine1, Account:engine2})
      
      session = Session()
      
      # .... work with accounts and users
      
      # commit.  session will issue a flush to all DBs, and a prepare step to all DBs,
      # before committing both transactions
      session.commit()

      Embedding SQL Insert/Update Expressions into a Flush

      This feature allows the value of a database column to be set to a SQL expression instead of a literal value. It’s especially useful for atomic updates, calling stored procedures, etc. All you do is assign an expression to an attribute:

      class SomeClass(object):
          pass
      mapper(SomeClass, some_table)
      
      someobject = session.query(SomeClass).get(5)
      
      # set 'value' attribute to a SQL expression adding one
      someobject.value = some_table.c.value + 1
      
      # issues "UPDATE some_table SET value=value+1"
      session.commit()

      This technique works both for INSERT and UPDATE statements. After the flush/commit operation, the value attribute on someobject above is expired, so that when next accessed the newly generated value will be loaded from the database.

      Using SQL Expressions with Sessions

      SQL expressions and strings can be executed via the Session within its transactional context. This is most easily accomplished using the execute() method, which returns a ResultProxy in the same manner as an Engine or Connection:

      Session = sessionmaker(bind=engine)
      session = Session()
      
      # execute a string statement
      result = session.execute("select * from table where id=:id", {'id':7})
      
      # execute a SQL expression construct
      result = session.execute(select([mytable]).where(mytable.c.id==7))

      The current Connection held by the Session is accessible using the connection() method:

      connection = session.connection()

      The examples above deal with a Session that’s bound to a single Engine or Connection. To execute statements using a Session which is bound either to multiple engines, or none at all (i.e. relies upon bound metadata), both execute() and connection() accept a mapper keyword argument, which is passed a mapped class or Mapper instance, which is used to locate the proper context for the desired engine:

      Session = sessionmaker()
      session = Session()
      
      # need to specify mapper or class when executing
      result = session.execute("select * from table where id=:id", {'id':7}, mapper=MyMappedClass)
      
      result = session.execute(select([mytable], mytable.c.id==7), mapper=MyMappedClass)
      
      connection = session.connection(MyMappedClass)

      Joining a Session into an External Transaction

      If a Connection is being used which is already in a transactional state (i.e. has a Transaction established), a Session can be made to participate within that transaction by just binding the Session to that Connection. The usual rationale for this is a test suite that allows ORM code to work freely with a Session, including the ability to call Session.commit(), where afterwards the entire database interaction is rolled back:

      from sqlalchemy.orm import sessionmaker
      from sqlalchemy import create_engine
      from unittest import TestCase
      
      # global application scope.  create Session class, engine
      Session = sessionmaker()
      
      engine = create_engine('postgresql://...')
      
      class SomeTest(TestCase):
          def setUp(self):
              # connect to the database
              self.connection = engine.connect()
      
              # begin a non-ORM transaction
              self.trans = connection.begin()
      
              # bind an individual Session to the connection
              self.session = Session(bind=self.connection)
      
          def test_something(self):
              # use the session in tests.
      
              self.session.add(Foo())
              self.session.commit()
      
          def tearDown(self):
              # rollback - everything that happened with the
              # Session above (including calls to commit())
              # is rolled back.
              self.trans.rollback()
              self.session.close()
      
              # return connection to the Engine
              self.connection.close()

      Above, we issue Session.commit() as well as Transaction.rollback(). This is an example of where we take advantage of the Connection object’s ability to maintain subtransactions, or nested begin/commit-or-rollback pairs where only the outermost begin/commit pair actually commits the transaction, or if the outermost block rolls back, everything is rolled back.

      Contextual/Thread-local Sessions

      Recall from the section When do I construct a Session, when do I commit it, and when do I close it?, the concept of “session scopes” was introduced, with an emphasis on web applications and the practice of linking the scope of a Session with that of a web request. Most modern web frameworks include integration tools so that the scope of the Session can be managed automatically, and these tools should be used as they are available.

      SQLAlchemy includes its own helper object, which helps with the establishment of user-defined Session scopes. It is also used by third-party integration systems to help construct their integration schemes.

      The object is the scoped_session object, and it represents a registry of Session objects. If you’re not familiar with the registry pattern, a good introduction can be found in Patterns of Enterprise Architecture.

      Note

      The scoped_session object is a very popular and useful object used by many SQLAlchemy applications. However, it is important to note that it presents only one approach to the issue of Session management. If you’re new to SQLAlchemy, and especially if the term “thread-local variable” seems strange to you, we recommend that if possible you familiarize first with an off-the-shelf integration system such as Flask-SQLAlchemy or zope.sqlalchemy.

      A scoped_session is constructed by calling it, passing it a factory which can create new Session objects. A factory is just something that produces a new object when called, and in the case of Session, the most common factory is the sessionmaker, introduced earlier in this section. Below we illustrate this usage:

      >>> from sqlalchemy.orm import scoped_session
      >>> from sqlalchemy.orm import sessionmaker
      
      >>> session_factory = sessionmaker(bind=some_engine)
      >>> Session = scoped_session(session_factory)

      The scoped_session object we’ve created will now call upon the sessionmaker when we “call” the registry:

      >>> some_session = Session()

      Above, some_session is an instance of Session, which we can now use to talk to the database. This same Session is also present within the scoped_session registry we’ve created. If we call upon the registry a second time, we get back the same Session:

      >>> some_other_session = Session()
      >>> some_session is some_other_session
      True

      This pattern allows disparate sections of the application to call upon a global scoped_session, so that all those areas may share the same session without the need to pass it explicitly. The Session we’ve established in our registry will remain, until we explicitly tell our regsitry to dispose of it, by calling scoped_session.remove():

      >>> Session.remove()

      The scoped_session.remove() method first calls Session.close() on the current Session, which has the effect of releasing any connection/transactional resources owned by the Session first, then discarding the Session itself. “Releasing” here means that connections are returned to their connection pool and any transactional state is rolled back, ultimately using the rollback() method of the underlying DBAPI connection.

      At this point, the scoped_session object is “empty”, and will create a new Session when called again. As illustrated below, this is not the same Session we had before:

      >>> new_session = Session()
      >>> new_session is some_session
      False

      The above series of steps illustrates the idea of the “registry” pattern in a nutshell. With that basic idea in hand, we can discuss some of the details of how this pattern proceeds.

      Implicit Method Access

      The job of the scoped_session is simple; hold onto a Session for all who ask for it. As a means of producing more transparent access to this Session, the scoped_session also includes proxy behavior, meaning that the registry itself can be treated just like a Session directly; when methods are called on this object, they are proxied to the underlying Session being maintained by the registry:

      Session = scoped_session(some_factory)
      
      # equivalent to:
      #
      # session = Session()
      # print session.query(MyClass).all()
      #
      print Session.query(MyClass).all()

      The above code accomplishes the same task as that of acquiring the current Session by calling upon the registry, then using that Session.

      Thread-Local Scope

      Users who are familiar with multithreaded programming will note that representing anything as a global variable is usually a bad idea, as it implies that the global object will be accessed by many threads concurrently. The Session object is entirely designed to be used in a non-concurrent fashion, which in terms of multithreading means “only in one thread at a time”. So our above example of scoped_session usage, where the same Session object is maintained across multiple calls, suggests that some process needs to be in place such that mutltiple calls across many threads don’t actually get a handle to the same session. We call this notion thread local storage, which means, a special object is used that will maintain a distinct object per each application thread. Python provides this via the threading.local() construct. The scoped_session object by default uses this object as storage, so that a single Session is maintained for all who call upon the scoped_session registry, but only within the scope of a single thread. Callers who call upon the registry in a different thread get a Session instance that is local to that other thread.

      Using this technique, the scoped_session provides a quick and relatively simple (if one is familiar with thread-local storage) way of providing a single, global object in an application that is safe to be called upon from multiple threads.

      The scoped_session.remove() method, as always, removes the current Session associated with the thread, if any. However, one advantage of the threading.local() object is that if the application thread itself ends, the “storage” for that thread is also garbage collected. So it is in fact “safe” to use thread local scope with an application that spawns and tears down threads, without the need to call scoped_session.remove(). However, the scope of transactions themselves, i.e. ending them via Session.commit() or Session.rollback(), will usually still be something that must be explicitly arranged for at the appropriate time, unless the application actually ties the lifespan of a thread to the lifespan of a transaction.

      Using Thread-Local Scope with Web Applications

      As discussed in the section When do I construct a Session, when do I commit it, and when do I close it?, a web application is architected around the concept of a web request, and integrating such an application with the Session usually implies that the Session will be associated with that request. As it turns out, most Python web frameworks, with notable exceptions such as the asynchronous frameworks Twisted and Tornado, use threads in a simple way, such that a particular web request is received, processed, and completed within the scope of a single worker thread. When the request ends, the worker thread is released to a pool of workers where it is available to handle another request.

      This simple correspondence of web request and thread means that to associate a Session with a thread implies it is also associated with the web request running within that thread, and vice versa, provided that the Session is created only after the web request begins and torn down just before the web request ends. So it is a common practice to use scoped_session as a quick way to integrate the Session with a web application. The sequence diagram below illustrates this flow:

      Web Server          Web Framework        SQLAlchemy ORM Code
      --------------      --------------       ------------------------------
      startup        ->   Web framework        # Session registry is established
                          initializes          Session = scoped_session(sessionmaker())
      
      incoming
      web request    ->   web request     ->   # The registry is *optionally*
                          starts               # called upon explicitly to create
                                               # a Session local to the thread and/or request
                                               Session()
      
                                               # the Session registry can otherwise
                                               # be used at any time, creating the
                                               # request-local Session() if not present,
                                               # or returning the existing one
                                               Session.query(MyClass) # ...
      
                                               Session.add(some_object) # ...
      
                                               # if data was modified, commit the
                                               # transaction
                                               Session.commit()
      
                          web request ends  -> # the registry is instructed to
                                               # remove the Session
                                               Session.remove()
      
                          sends output      <-
      outgoing web    <-
      response

      Using the above flow, the process of integrating the Session with the web application has exactly two requirements:

      1. Create a single scoped_session registry when the web application first starts, ensuring that this object is accessible by the rest of the application.
      2. Ensure that scoped_session.remove() is called when the web request ends, usually by integrating with the web framework’s event system to establish an “on request end” event.

      As noted earlier, the above pattern is just one potential way to integrate a Session with a web framework, one which in particular makes the significant assumption that the web framework associates web requests with application threads. It is however strongly recommended that the integration tools provided with the web framework itself be used, if available, instead of scoped_session.

      In particular, while using a thread local can be convenient, it is preferable that the Session be associated directly with the request, rather than with the current thread. The next section on custom scopes details a more advanced configuration which can combine the usage of scoped_session with direct request based scope, or any kind of scope.

      Using Custom Created Scopes

      The scoped_session object’s default behavior of “thread local” scope is only one of many options on how to “scope” a Session. A custom scope can be defined based on any existing system of getting at “the current thing we are working with”.

      Suppose a web framework defines a library function get_current_request(). An application built using this framework can call this function at any time, and the result will be some kind of Request object that represents the current request being processed. If the Request object is hashable, then this function can be easily integrated with scoped_session to associate the Session with the request. Below we illustrate this in conjunction with a hypothetical event marker provided by the web framework on_request_end, which allows code to be invoked whenever a request ends:

      from my_web_framework import get_current_request, on_request_end
      from sqlalchemy.orm import scoped_session, sessionmaker
      
      Session = scoped_session(sessionmaker(bind=some_engine), scopefunc=get_current_request)
      
      @on_request_end
      def remove_session(req):
          Session.remove()

      Above, we instantiate scoped_session in the usual way, except that we pass our request-returning function as the “scopefunc”. This instructs scoped_session to use this function to generate a dictionary key whenever the registry is called upon to return the current Session. In this case it is particularly important that we ensure a reliable “remove” system is implemented, as this dictionary is not otherwise self-managed.

      Contextual Session API

      class sqlalchemy.orm.scoping.scoped_session(session_factory, scopefunc=None)

      Provides scoped management of Session objects.

      See Contextual/Thread-local Sessions for a tutorial.

      __call__(**kw)

      Return the current Session, creating it using the session factory if not present.

      Parameters:**kw – Keyword arguments will be passed to the session factory callable, if an existing Session is not present. If the Session is present and keyword arguments have been passed, InvalidRequestError is raised.
      __init__(session_factory, scopefunc=None)

      Construct a new scoped_session.

      Parameters:
      • session_factory – a factory to create new Session instances. This is usually, but not necessarily, an instance of sessionmaker.
      • scopefunc – optional function which defines the current scope. If not passed, the scoped_session object assumes “thread-local” scope, and will use a Python threading.local() in order to maintain the current Session. If passed, the function should return a hashable token; this token will be used as the key in a dictionary in order to store and retrieve the current Session.
      configure(**kwargs)

      reconfigure the sessionmaker used by this scoped_session.

      See sessionmaker.configure().

      query_property(query_cls=None)

      return a class property which produces a Query object against the class and the current Session when called.

      e.g.:

      Session = scoped_session(sessionmaker())
      
      class MyClass(object):
          query = Session.query_property()
      
      # after mappers are defined
      result = MyClass.query.filter(MyClass.name=='foo').all()

      Produces instances of the session’s configured query class by default. To override and use a custom implementation, provide a query_cls callable. The callable will be invoked with the class’s mapper as a positional argument and a session keyword argument.

      There is no limit to the number of query properties placed on a class.

      remove()

      Dispose of the current Session, if present.

      This will first call Session.close() method on the current Session, which releases any existing transactional/connection resources still being held; transactions specifically are rolled back. The Session is then discarded. Upon next usage within the same scope, the scoped_session will produce a new Session object.

      class sqlalchemy.util.ScopedRegistry(createfunc, scopefunc)

      A Registry that can store one or multiple instances of a single class on the basis of a “scope” function.

      The object implements __call__ as the “getter”, so by calling myregistry() the contained object is returned for the current scope.

      Parameters:
      • createfunc – a callable that returns a new object to be placed in the registry
      • scopefunc – a callable that will return a key to store/retrieve an object.
      __init__(createfunc, scopefunc)

      Construct a new ScopedRegistry.

      Parameters:
      • createfunc – A creation function that will generate a new value for the current scope, if none is present.
      • scopefunc – A function that returns a hashable token representing the current scope (such as, current thread identifier).
      clear()

      Clear the current scope, if any.

      has()

      Return True if an object is present in the current scope.

      set(obj)

      Set the value forthe current scope.

      class sqlalchemy.util.ThreadLocalRegistry(createfunc)

      Bases: sqlalchemy.util._collections.ScopedRegistry

      A ScopedRegistry that uses a threading.local() variable for storage.

      Partitioning Strategies

      Simple Vertical Partitioning

      Vertical partitioning places different kinds of objects, or different tables, across multiple databases:

      engine1 = create_engine('postgresql://db1')
      engine2 = create_engine('postgresql://db2')
      
      Session = sessionmaker(twophase=True)
      
      # bind User operations to engine 1, Account operations to engine 2
      Session.configure(binds={User:engine1, Account:engine2})
      
      session = Session()

      Above, operations against either class will make usage of the Engine linked to that class. Upon a flush operation, similar rules take place to ensure each class is written to the right database.

      The transactions among the multiple databases can optionally be coordinated via two phase commit, if the underlying backend supports it. See Enabling Two-Phase Commit for an example.

      Custom Vertical Partitioning

      More comprehensive rule-based class-level partitioning can be built by overriding the Session.get_bind() method. Below we illustrate a custom Session which delivers the following rules:

      1. Flush operations are delivered to the engine named master.
      2. Operations on objects that subclass MyOtherClass all occur on the other engine.
      3. Read operations for all other classes occur on a random choice of the slave1 or slave2 database.
      engines = {
          'master':create_engine("sqlite:///master.db"),
          'other':create_engine("sqlite:///other.db"),
          'slave1':create_engine("sqlite:///slave1.db"),
          'slave2':create_engine("sqlite:///slave2.db"),
      }
      
      from sqlalchemy.orm import Session, sessionmaker
      import random
      
      class RoutingSession(Session):
          def get_bind(self, mapper=None, clause=None):
              if mapper and issubclass(mapper.class_, MyOtherClass):
                  return engines['other']
              elif self._flushing:
                  return engines['master']
              else:
                  return engines[
                      random.choice(['slave1','slave2'])
                  ]

      The above Session class is plugged in using the class_ argument to sessionmaker:

      Session = sessionmaker(class_=RoutingSession)

      This approach can be combined with multiple MetaData objects, using an approach such as that of using the declarative __abstract__ keyword, described at __abstract__.

      Horizontal Partitioning

      Horizontal partitioning partitions the rows of a single table (or a set of tables) across multiple databases.

      See the “sharding” example: Horizontal Sharding.

      Sessions API

      Session and sessionmaker()

      class sqlalchemy.orm.session.sessionmaker(bind=None, class_=<class 'sqlalchemy.orm.session.Session'>, autoflush=True, autocommit=False, expire_on_commit=True, **kw)

      Bases: sqlalchemy.orm.session._SessionClassMethods

      A configurable Session factory.

      The sessionmaker factory generates new Session objects when called, creating them given the configurational arguments established here.

      e.g.:

      # global scope
      Session = sessionmaker(autoflush=False)
      
      # later, in a local scope, create and use a session:
      sess = Session()

      Any keyword arguments sent to the constructor itself will override the “configured” keywords:

      Session = sessionmaker()
      
      # bind an individual session to a connection
      sess = Session(bind=connection)

      The class also includes a method configure(), which can be used to specify additional keyword arguments to the factory, which will take effect for subsequent Session objects generated. This is usually used to associate one or more Engine objects with an existing sessionmaker factory before it is first used:

      # application starts
      Session = sessionmaker()
      
      # ... later
      engine = create_engine('sqlite:///foo.db')
      Session.configure(bind=engine)
      
      sess = Session()
      __call__(**local_kw)

      Produce a new Session object using the configuration established in this sessionmaker.

      In Python, the __call__ method is invoked on an object when it is “called” in the same way as a function:

      Session = sessionmaker()
      session = Session()  # invokes sessionmaker.__call__()
      __init__(bind=None, class_=<class 'sqlalchemy.orm.session.Session'>, autoflush=True, autocommit=False, expire_on_commit=True, **kw)

      Construct a new sessionmaker.

      All arguments here except for class_ correspond to arguments accepted by Session directly. See the Session.__init__() docstring for more details on parameters.

      Parameters:
      • bind – a Engine or other Connectable with which newly created Session objects will be associated.
      • class – class to use in order to create new Session objects. Defaults to Session.
      • autoflush – The autoflush setting to use with newly created Session objects.
      • autocommit – The autocommit setting to use with newly created Session objects.
      • expire_on_commit=True – the expire_on_commit setting to use with newly created Session objects.
      • **kw – all other keyword arguments are passed to the constructor of newly created Session objects.
      classmethod close_all()
      inherited from the close_all() method of _SessionClassMethods

      Close all sessions in memory.

      configure(**new_kw)

      (Re)configure the arguments for this sessionmaker.

      e.g.:

      Session = sessionmaker()
      
      Session.configure(bind=create_engine('sqlite://'))
      classmethod identity_key(*args, **kwargs)
      inherited from the identity_key() method of _SessionClassMethods

      Return an identity key.

      This is an alias of util.identity_key().

      classmethod object_session(instance)
      inherited from the object_session() method of _SessionClassMethods

      Return the Session to which an object belongs.

      This is an alias of object_session().

      class sqlalchemy.orm.session.Session(bind=None, autoflush=True, expire_on_commit=True, _enable_transaction_accounting=True, autocommit=False, twophase=False, weak_identity_map=True, binds=None, extension=None, query_cls=<class 'sqlalchemy.orm.query.Query'>)

      Bases: sqlalchemy.orm.session._SessionClassMethods

      Manages persistence operations for ORM-mapped objects.

      The Session’s usage paradigm is described at Using the Session.

      __init__(bind=None, autoflush=True, expire_on_commit=True, _enable_transaction_accounting=True, autocommit=False, twophase=False, weak_identity_map=True, binds=None, extension=None, query_cls=<class 'sqlalchemy.orm.query.Query'>)

      Construct a new Session.

      See also the sessionmaker function which is used to generate a Session-producing callable with a given set of arguments.

      Parameters:
      • autocommit

        Warning

        The autocommit flag is not for general use, and if it is used, queries should only be invoked within the span of a Session.begin() / Session.commit() pair. Executing queries outside of a demarcated transaction is a legacy mode of usage, and can in some cases lead to concurrent connection checkouts.

        Defaults to False. When True, the Session does not keep a persistent transaction running, and will acquire connections from the engine on an as-needed basis, returning them immediately after their use. Flushes will begin and commit (or possibly rollback) their own transaction if no transaction is present. When using this mode, the Session.begin() method is used to explicitly start transactions.

        See also

        Autocommit Mode

      • autoflush – When True, all query operations will issue a flush() call to this Session before proceeding. This is a convenience feature so that flush() need not be called repeatedly in order for database queries to retrieve results. It’s typical that autoflush is used in conjunction with autocommit=False. In this scenario, explicit calls to flush() are rarely needed; you usually only need to call commit() (which flushes) to finalize changes.
      • bind – An optional Engine or Connection to which this Session should be bound. When specified, all SQL operations performed by this session will execute via this connectable.
      • binds
        An optional dictionary which contains more granular
        “bind” information than the bind parameter provides. This dictionary can map individual Table instances as well as Mapper instances to individual Engine or Connection objects. Operations which proceed relative to a particular Mapper will consult this dictionary for the direct Mapper instance as well as the mapper’s mapped_table attribute in order to locate an connectable to use. The full resolution is described in the get_bind() method of Session. Usage looks like:
        Session = sessionmaker(binds={
            SomeMappedClass: create_engine('postgresql://engine1'),
            somemapper: create_engine('postgresql://engine2'),
            some_table: create_engine('postgresql://engine3'),
            })

        Also see the Session.bind_mapper() and Session.bind_table() methods.

      • class_ – Specify an alternate class other than sqlalchemy.orm.session.Session which should be used by the returned class. This is the only argument that is local to the sessionmaker() function, and is not sent directly to the constructor for Session.
      • _enable_transaction_accounting – Defaults to True. A legacy-only flag which when False disables all 0.5-style object accounting on transaction boundaries, including auto-expiry of instances on rollback and commit, maintenance of the “new” and “deleted” lists upon rollback, and autoflush of pending changes upon begin(), all of which are interdependent.
      • expire_on_commit – Defaults to True. When True, all instances will be fully expired after each commit(), so that all attribute/object access subsequent to a completed transaction will load from the most recent database state.
      • extension – An optional SessionExtension instance, or a list of such instances, which will receive pre- and post- commit and flush events, as well as a post-rollback event. Deprecated. Please see SessionEvents.
      • query_cls – Class which should be used to create new Query objects, as returned by the query() method. Defaults to Query.
      • twophase – When True, all transactions will be started as a “two phase” transaction, i.e. using the “two phase” semantics of the database in use along with an XID. During a commit(), after flush() has been issued for all attached databases, the prepare() method on each database’s TwoPhaseTransaction will be called. This allows each database to roll back the entire transaction, before each transaction is committed.
      • weak_identity_map – Defaults to True - when set to False, objects placed in the Session will be strongly referenced until explicitly removed or the Session is closed. Deprecated - this option is obsolete.
      add(instance, _warn=True)

      Place an object in the Session.

      Its state will be persisted to the database on the next flush operation.

      Repeated calls to add() will be ignored. The opposite of add() is expunge().

      add_all(instances)

      Add the given collection of instances to this Session.

      begin(subtransactions=False, nested=False)

      Begin a transaction on this Session.

      If this Session is already within a transaction, either a plain transaction or nested transaction, an error is raised, unless subtransactions=True or nested=True is specified.

      The subtransactions=True flag indicates that this begin() can create a subtransaction if a transaction is already in progress. For documentation on subtransactions, please see Using Subtransactions with Autocommit.

      The nested flag begins a SAVEPOINT transaction and is equivalent to calling begin_nested(). For documentation on SAVEPOINT transactions, please see Using SAVEPOINT.

      begin_nested()

      Begin a nested transaction on this Session.

      The target database(s) must support SQL SAVEPOINTs or a SQLAlchemy-supported vendor implementation of the idea.

      For documentation on SAVEPOINT transactions, please see Using SAVEPOINT.

      bind_mapper(mapper, bind)

      Bind operations for a mapper to a Connectable.

      mapper
      A mapper instance or mapped class
      bind
      Any Connectable: a Engine or Connection.

      All subsequent operations involving this mapper will use the given bind.

      bind_table(table, bind)

      Bind operations on a Table to a Connectable.

      table
      A Table instance
      bind
      Any Connectable: a Engine or Connection.

      All subsequent operations involving this Table will use the given bind.

      close()

      Close this Session.

      This clears all items and ends any transaction in progress.

      If this session were created with autocommit=False, a new transaction is immediately begun. Note that this new transaction does not use any connection resources until they are first needed.

      classmethod close_all()
      inherited from the close_all() method of _SessionClassMethods

      Close all sessions in memory.

      commit()

      Flush pending changes and commit the current transaction.

      If no transaction is in progress, this method raises an InvalidRequestError.

      By default, the Session also expires all database loaded state on all ORM-managed attributes after transaction commit. This so that subsequent operations load the most recent data from the database. This behavior can be disabled using the expire_on_commit=False option to sessionmaker or the Session constructor.

      If a subtransaction is in effect (which occurs when begin() is called multiple times), the subtransaction will be closed, and the next call to commit() will operate on the enclosing transaction.

      When using the Session in its default mode of autocommit=False, a new transaction will be begun immediately after the commit, but note that the newly begun transaction does not use any connection resources until the first SQL is actually emitted.

      See also

      Committing

      connection(mapper=None, clause=None, bind=None, close_with_result=False, **kw)

      Return a Connection object corresponding to this Session object’s transactional state.

      If this Session is configured with autocommit=False, either the Connection corresponding to the current transaction is returned, or if no transaction is in progress, a new one is begun and the Connection returned (note that no transactional state is established with the DBAPI until the first SQL statement is emitted).

      Alternatively, if this Session is configured with autocommit=True, an ad-hoc Connection is returned using Engine.contextual_connect() on the underlying Engine.

      Ambiguity in multi-bind or unbound Session objects can be resolved through any of the optional keyword arguments. This ultimately makes usage of the get_bind() method for resolution.

      Parameters:
      • bind – Optional Engine to be used as the bind. If this engine is already involved in an ongoing transaction, that connection will be used. This argument takes precedence over mapper, clause.
      • mapper – Optional mapper() mapped class, used to identify the appropriate bind. This argument takes precedence over clause.
      • clause – A ClauseElement (i.e. select(), text(), etc.) which will be used to locate a bind, if a bind cannot otherwise be identified.
      • close_with_result – Passed to Engine.connect(), indicating the Connection should be considered “single use”, automatically closing when the first result set is closed. This flag only has an effect if this Session is configured with autocommit=True and does not already have a transaction in progress.
      • **kw – Additional keyword arguments are sent to get_bind(), allowing additional arguments to be passed to custom implementations of get_bind().
      delete(instance)

      Mark an instance as deleted.

      The database delete operation occurs upon flush().

      deleted

      The set of all instances marked as ‘deleted’ within this Session

      dirty

      The set of all persistent instances considered dirty.

      E.g.:

      some_mapped_object in session.dirty

      Instances are considered dirty when they were modified but not deleted.

      Note that this ‘dirty’ calculation is ‘optimistic’; most attribute-setting or collection modification operations will mark an instance as ‘dirty’ and place it in this set, even if there is no net change to the attribute’s value. At flush time, the value of each attribute is compared to its previously saved value, and if there’s no net change, no SQL operation will occur (this is a more expensive operation so it’s only done at flush time).

      To check if an instance has actionable net changes to its attributes, use the Session.is_modified() method.

      enable_relationship_loading(obj)

      Associate an object with this Session for related object loading.

      Warning

      enable_relationship_loading() exists to serve special use cases and is not recommended for general use.

      Accesses of attributes mapped with relationship() will attempt to load a value from the database using this Session as the source of connectivity. The values will be loaded based on foreign key values present on this object - it follows that this functionality generally only works for many-to-one-relationships.

      The object will be attached to this session, but will not participate in any persistence operations; its state for almost all purposes will remain either “transient” or “detached”, except for the case of relationship loading.

      Also note that backrefs will often not work as expected. Altering a relationship-bound attribute on the target object may not fire off a backref event, if the effective value is what was already loaded from a foreign-key-holding value.

      The Session.enable_relationship_loading() method supersedes the load_on_pending flag on relationship(). Unlike that flag, Session.enable_relationship_loading() allows an object to remain transient while still being able to load related items.

      To make a transient object associated with a Session via Session.enable_relationship_loading() pending, add it to the Session using Session.add() normally.

      Session.enable_relationship_loading() does not improve behavior when the ORM is used normally - object references should be constructed at the object level, not at the foreign key level, so that they are present in an ordinary way before flush() proceeds. This method is not intended for general use.

      New in version 0.8.

      execute(clause, params=None, mapper=None, bind=None, **kw)

      Execute a SQL expression construct or string statement within the current transaction.

      Returns a ResultProxy representing results of the statement execution, in the same manner as that of an Engine or Connection.

      E.g.:

      result = session.execute(
                  user_table.select().where(user_table.c.id == 5)
              )

      execute() accepts any executable clause construct, such as select(), insert(), update(), delete(), and text(). Plain SQL strings can be passed as well, which in the case of Session.execute() only will be interpreted the same as if it were passed via a text() construct. That is, the following usage:

      result = session.execute(
                  "SELECT * FROM user WHERE id=:param",
                  {"param":5}
              )

      is equivalent to:

      from sqlalchemy import text
      result = session.execute(
                  text("SELECT * FROM user WHERE id=:param"),
                  {"param":5}
              )

      The second positional argument to Session.execute() is an optional parameter set. Similar to that of Connection.execute(), whether this is passed as a single dictionary, or a list of dictionaries, determines whether the DBAPI cursor’s execute() or executemany() is used to execute the statement. An INSERT construct may be invoked for a single row:

      result = session.execute(users.insert(), {"id": 7, "name": "somename"})

      or for multiple rows:

      result = session.execute(users.insert(), [
                              {"id": 7, "name": "somename7"},
                              {"id": 8, "name": "somename8"},
                              {"id": 9, "name": "somename9"}
                          ])

      The statement is executed within the current transactional context of this Session. The Connection which is used to execute the statement can also be acquired directly by calling the Session.connection() method. Both methods use a rule-based resolution scheme in order to determine the Connection, which in the average case is derived directly from the “bind” of the Session itself, and in other cases can be based on the mapper() and Table objects passed to the method; see the documentation for Session.get_bind() for a full description of this scheme.

      The Session.execute() method does not invoke autoflush.

      The ResultProxy returned by the Session.execute() method is returned with the “close_with_result” flag set to true; the significance of this flag is that if this Session is autocommitting and does not have a transaction-dedicated Connection available, a temporary Connection is established for the statement execution, which is closed (meaning, returned to the connection pool) when the ResultProxy has consumed all available data. This applies only when the Session is configured with autocommit=True and no transaction has been started.

      Parameters:
      • clause – An executable statement (i.e. an Executable expression such as expression.select()) or string SQL statement to be executed.
      • params – Optional dictionary, or list of dictionaries, containing bound parameter values. If a single dictionary, single-row execution occurs; if a list of dictionaries, an “executemany” will be invoked. The keys in each dictionary must correspond to parameter names present in the statement.
      • mapper – Optional mapper() or mapped class, used to identify the appropriate bind. This argument takes precedence over clause when locating a bind. See Session.get_bind() for more details.
      • bind – Optional Engine to be used as the bind. If this engine is already involved in an ongoing transaction, that connection will be used. This argument takes precedence over mapper and clause when locating a bind.
      • **kw – Additional keyword arguments are sent to Session.get_bind() to allow extensibility of “bind” schemes.

      See also

      SQL Expression Language Tutorial - Tutorial on using Core SQL constructs.

      Working with Engines and Connections - Further information on direct statement execution.

      Connection.execute() - core level statement execution method, which is Session.execute() ultimately uses in order to execute the statement.

      expire(instance, attribute_names=None)

      Expire the attributes on an instance.

      Marks the attributes of an instance as out of date. When an expired attribute is next accessed, a query will be issued to the Session object’s current transactional context in order to load all expired attributes for the given instance. Note that a highly isolated transaction will return the same values as were previously read in that same transaction, regardless of changes in database state outside of that transaction.

      To expire all objects in the Session simultaneously, use Session.expire_all().

      The Session object’s default behavior is to expire all state whenever the Session.rollback() or Session.commit() methods are called, so that new state can be loaded for the new transaction. For this reason, calling Session.expire() only makes sense for the specific case that a non-ORM SQL statement was emitted in the current transaction.

      Parameters:
      • instance – The instance to be refreshed.
      • attribute_names – optional list of string attribute names indicating a subset of attributes to be expired.
      expire_all()

      Expires all persistent instances within this Session.

      When any attributes on a persistent instance is next accessed, a query will be issued using the Session object’s current transactional context in order to load all expired attributes for the given instance. Note that a highly isolated transaction will return the same values as were previously read in that same transaction, regardless of changes in database state outside of that transaction.

      To expire individual objects and individual attributes on those objects, use Session.expire().

      The Session object’s default behavior is to expire all state whenever the Session.rollback() or Session.commit() methods are called, so that new state can be loaded for the new transaction. For this reason, calling Session.expire_all() should not be needed when autocommit is False, assuming the transaction is isolated.

      expunge(instance)

      Remove the instance from this Session.

      This will free all internal references to the instance. Cascading will be applied according to the expunge cascade rule.

      expunge_all()

      Remove all object instances from this Session.

      This is equivalent to calling expunge(obj) on all objects in this Session.

      flush(objects=None)

      Flush all the object changes to the database.

      Writes out all pending object creations, deletions and modifications to the database as INSERTs, DELETEs, UPDATEs, etc. Operations are automatically ordered by the Session’s unit of work dependency solver.

      Database operations will be issued in the current transactional context and do not affect the state of the transaction, unless an error occurs, in which case the entire transaction is rolled back. You may flush() as often as you like within a transaction to move changes from Python to the database’s transaction buffer.

      For autocommit Sessions with no active manual transaction, flush() will create a transaction on the fly that surrounds the entire set of operations int the flush.

      Parameters:objects

      Optional; restricts the flush operation to operate only on elements that are in the given collection.

      This feature is for an extremely narrow set of use cases where particular objects may need to be operated upon before the full flush() occurs. It is not intended for general use.

      get_bind(mapper=None, clause=None)

      Return a “bind” to which this Session is bound.

      The “bind” is usually an instance of Engine, except in the case where the Session has been explicitly bound directly to a Connection.

      For a multiply-bound or unbound Session, the mapper or clause arguments are used to determine the appropriate bind to return.

      Note that the “mapper” argument is usually present when Session.get_bind() is called via an ORM operation such as a Session.query(), each individual INSERT/UPDATE/DELETE operation within a Session.flush(), call, etc.

      The order of resolution is:

      1. if mapper given and session.binds is present, locate a bind based on mapper.
      2. if clause given and session.binds is present, locate a bind based on Table objects found in the given clause present in session.binds.
      3. if session.bind is present, return that.
      4. if clause given, attempt to return a bind linked to the MetaData ultimately associated with the clause.
      5. if mapper given, attempt to return a bind linked to the MetaData ultimately associated with the Table or other selectable to which the mapper is mapped.
      6. No bind can be found, UnboundExecutionError is raised.
      Parameters:
      • mapper – Optional mapper() mapped class or instance of Mapper. The bind can be derived from a Mapper first by consulting the “binds” map associated with this Session, and secondly by consulting the MetaData associated with the Table to which the Mapper is mapped for a bind.
      • clause – A ClauseElement (i.e. select(), text(), etc.). If the mapper argument is not present or could not produce a bind, the given expression construct will be searched for a bound element, typically a Table associated with bound MetaData.
      classmethod identity_key(*args, **kwargs)
      inherited from the identity_key() method of _SessionClassMethods

      Return an identity key.

      This is an alias of util.identity_key().

      identity_map = None

      A mapping of object identities to objects themselves.

      Iterating through Session.identity_map.values() provides access to the full set of persistent objects (i.e., those that have row identity) currently in the session.

      See also

      identity_key() - helper function to produce the keys used in this dictionary.

      is_active

      True if this Session is in “transaction mode” and is not in “partial rollback” state.

      The Session in its default mode of autocommit=False is essentially always in “transaction mode”, in that a SessionTransaction is associated with it as soon as it is instantiated. This SessionTransaction is immediately replaced with a new one as soon as it is ended, due to a rollback, commit, or close operation.

      “Transaction mode” does not indicate whether or not actual database connection resources are in use; the SessionTransaction object coordinates among zero or more actual database transactions, and starts out with none, accumulating individual DBAPI connections as different data sources are used within its scope. The best way to track when a particular Session has actually begun to use DBAPI resources is to implement a listener using the SessionEvents.after_begin() method, which will deliver both the Session as well as the target Connection to a user-defined event listener.

      The “partial rollback” state refers to when an “inner” transaction, typically used during a flush, encounters an error and emits a rollback of the DBAPI connection. At this point, the Session is in “partial rollback” and awaits for the user to call Session.rollback(), in order to close out the transaction stack. It is in this “partial rollback” period that the is_active flag returns False. After the call to Session.rollback(), the SessionTransaction is replaced with a new one and is_active returns True again.

      When a Session is used in autocommit=True mode, the SessionTransaction is only instantiated within the scope of a flush call, or when Session.begin() is called. So is_active will always be False outside of a flush or Session.begin() block in this mode, and will be True within the Session.begin() block as long as it doesn’t enter “partial rollback” state.

      From all the above, it follows that the only purpose to this flag is for application frameworks that wish to detect is a “rollback” is necessary within a generic error handling routine, for Session objects that would otherwise be in “partial rollback” mode. In a typical integration case, this is also not necessary as it is standard practice to emit Session.rollback() unconditionally within the outermost exception catch.

      To track the transactional state of a Session fully, use event listeners, primarily the SessionEvents.after_begin(), SessionEvents.after_commit(), SessionEvents.after_rollback() and related events.

      is_modified(instance, include_collections=True, passive=True)

      Return True if the given instance has locally modified attributes.

      This method retrieves the history for each instrumented attribute on the instance and performs a comparison of the current value to its previously committed value, if any.

      It is in effect a more expensive and accurate version of checking for the given instance in the Session.dirty collection; a full test for each attribute’s net “dirty” status is performed.

      E.g.:

      return session.is_modified(someobject)

      Changed in version 0.8: When using SQLAlchemy 0.7 and earlier, the passive flag should always be explicitly set to True, else SQL loads/autoflushes may proceed which can affect the modified state itself: session.is_modified(someobject, passive=True). In 0.8 and above, the behavior is corrected and this flag is ignored.

      A few caveats to this method apply:

      • Instances present in the Session.dirty collection may report False when tested with this method. This is because the object may have received change events via attribute mutation, thus placing it in Session.dirty, but ultimately the state is the same as that loaded from the database, resulting in no net change here.

      • Scalar attributes may not have recorded the previously set value when a new value was applied, if the attribute was not loaded, or was expired, at the time the new value was received - in these cases, the attribute is assumed to have a change, even if there is ultimately no net change against its database value. SQLAlchemy in most cases does not need the “old” value when a set event occurs, so it skips the expense of a SQL call if the old value isn’t present, based on the assumption that an UPDATE of the scalar value is usually needed, and in those few cases where it isn’t, is less expensive on average than issuing a defensive SELECT.

        The “old” value is fetched unconditionally upon set only if the attribute container has the active_history flag set to True. This flag is set typically for primary key attributes and scalar object references that are not a simple many-to-one. To set this flag for any arbitrary mapped column, use the active_history argument with column_property().

      Parameters:
      • instance – mapped instance to be tested for pending changes.
      • include_collections – Indicates if multivalued collections should be included in the operation. Setting this to False is a way to detect only local-column based properties (i.e. scalar columns or many-to-one foreign keys) that would result in an UPDATE for this instance upon flush.
      • passive

        Changed in version 0.8: Ignored for backwards compatibility. When using SQLAlchemy 0.7 and earlier, this flag should always be set to True.

      merge(instance, load=True)

      Copy the state of a given instance into a corresponding instance within this Session.

      Session.merge() examines the primary key attributes of the source instance, and attempts to reconcile it with an instance of the same primary key in the session. If not found locally, it attempts to load the object from the database based on primary key, and if none can be located, creates a new instance. The state of each attribute on the source instance is then copied to the target instance. The resulting target instance is then returned by the method; the original source instance is left unmodified, and un-associated with the Session if not already.

      This operation cascades to associated instances if the association is mapped with cascade="merge".

      See Merging for a detailed discussion of merging.

      Parameters:
      • instance – Instance to be merged.
      • load

        Boolean, when False, merge() switches into a “high performance” mode which causes it to forego emitting history events as well as all database access. This flag is used for cases such as transferring graphs of objects into a Session from a second level cache, or to transfer just-loaded objects into the Session owned by a worker thread or process without re-querying the database.

        The load=False use case adds the caveat that the given object has to be in a “clean” state, that is, has no pending changes to be flushed - even if the incoming object is detached from any Session. This is so that when the merge operation populates local attributes and cascades to related objects and collections, the values can be “stamped” onto the target object as is, without generating any history or attribute events, and without the need to reconcile the incoming data with any existing related objects or collections that might not be loaded. The resulting objects from load=False are always produced as “clean”, so it is only appropriate that the given objects should be “clean” as well, else this suggests a mis-use of the method.

      new

      The set of all instances marked as ‘new’ within this Session.

      no_autoflush

      Return a context manager that disables autoflush.

      e.g.:

      with session.no_autoflush:
      
          some_object = SomeClass()
          session.add(some_object)
          # won't autoflush
          some_object.related_thing = session.query(SomeRelated).first()

      Operations that proceed within the with: block will not be subject to flushes occurring upon query access. This is useful when initializing a series of objects which involve existing database queries, where the uncompleted object should not yet be flushed.

      New in version 0.7.6.

      classmethod object_session(instance)
      inherited from the object_session() method of _SessionClassMethods

      Return the Session to which an object belongs.

      This is an alias of object_session().

      prepare()

      Prepare the current transaction in progress for two phase commit.

      If no transaction is in progress, this method raises an InvalidRequestError.

      Only root transactions of two phase sessions can be prepared. If the current transaction is not such, an InvalidRequestError is raised.

      prune()

      Remove unreferenced instances cached in the identity map.

      Deprecated since version 0.7: The non-weak-referencing identity map feature is no longer needed.

      Note that this method is only meaningful if “weak_identity_map” is set to False. The default weak identity map is self-pruning.

      Removes any object in this Session’s identity map that is not referenced in user code, modified, new or scheduled for deletion. Returns the number of objects pruned.

      query(*entities, **kwargs)

      Return a new Query object corresponding to this Session.

      refresh(instance, attribute_names=None, lockmode=None)

      Expire and refresh the attributes on the given instance.

      A query will be issued to the database and all attributes will be refreshed with their current database value.

      Lazy-loaded relational attributes will remain lazily loaded, so that the instance-wide refresh operation will be followed immediately by the lazy load of that attribute.

      Eagerly-loaded relational attributes will eagerly load within the single refresh operation.

      Note that a highly isolated transaction will return the same values as were previously read in that same transaction, regardless of changes in database state outside of that transaction - usage of refresh() usually only makes sense if non-ORM SQL statement were emitted in the ongoing transaction, or if autocommit mode is turned on.

      Parameters:
      • attribute_names – optional. An iterable collection of string attribute names indicating a subset of attributes to be refreshed.
      • lockmode – Passed to the Query as used by with_lockmode().
      rollback()

      Rollback the current transaction in progress.

      If no transaction is in progress, this method is a pass-through.

      This method rolls back the current transaction or nested transaction regardless of subtransactions being in effect. All subtransactions up to the first real transaction are closed. Subtransactions occur when begin() is called multiple times.

      See also

      Rolling Back

      scalar(clause, params=None, mapper=None, bind=None, **kw)

      Like execute() but return a scalar result.

      transaction = None

      The current active or inactive SessionTransaction.

      class sqlalchemy.orm.session.SessionTransaction(session, parent=None, nested=False)

      A Session-level transaction.

      SessionTransaction is a mostly behind-the-scenes object not normally referenced directly by application code. It coordinates among multiple Connection objects, maintaining a database transaction for each one individually, committing or rolling them back all at once. It also provides optional two-phase commit behavior which can augment this coordination operation.

      The Session.transaction attribute of Session refers to the current SessionTransaction object in use, if any.

      A SessionTransaction is associated with a Session in its default mode of autocommit=False immediately, associated with no database connections. As the Session is called upon to emit SQL on behalf of various Engine or Connection objects, a corresponding Connection and associated Transaction is added to a collection within the SessionTransaction object, becoming one of the connection/transaction pairs maintained by the SessionTransaction.

      The lifespan of the SessionTransaction ends when the Session.commit(), Session.rollback() or Session.close() methods are called. At this point, the SessionTransaction removes its association with its parent Session. A Session that is in autocommit=False mode will create a new SessionTransaction to replace it immediately, whereas a Session that’s in autocommit=True mode will remain without a SessionTransaction until the Session.begin() method is called.

      Another detail of SessionTransaction behavior is that it is capable of “nesting”. This means that the Session.begin() method can be called while an existing SessionTransaction is already present, producing a new SessionTransaction that temporarily replaces the parent SessionTransaction. When a SessionTransaction is produced as nested, it assigns itself to the Session.transaction attribute. When it is ended via Session.commit() or Session.rollback(), it restores its parent SessionTransaction back onto the Session.transaction attribute. The behavior is effectively a stack, where Session.transaction refers to the current head of the stack.

      The purpose of this stack is to allow nesting of Session.rollback() or Session.commit() calls in context with various flavors of Session.begin(). This nesting behavior applies to when Session.begin_nested() is used to emit a SAVEPOINT transaction, and is also used to produce a so-called “subtransaction” which allows a block of code to use a begin/rollback/commit sequence regardless of whether or not its enclosing code block has begun a transaction. The flush() method, whether called explicitly or via autoflush, is the primary consumer of the “subtransaction” feature, in that it wishes to guarantee that it works within in a transaction block regardless of whether or not the Session is in transactional mode when the method is called.

      See also:

      Session.rollback()

      Session.commit()

      Session.begin()

      Session.begin_nested()

      Session.is_active

      SessionEvents.after_commit()

      SessionEvents.after_rollback()

      SessionEvents.after_soft_rollback()

      Session Utilites

      sqlalchemy.orm.session.make_transient(instance)

      Make the given instance ‘transient’.

      This will remove its association with any session and additionally will remove its “identity key”, such that it’s as though the object were newly constructed, except retaining its values. It also resets the “deleted” flag on the state if this object had been explicitly deleted by its session.

      Attributes which were “expired” or deferred at the instance level are reverted to undefined, and will not trigger any loads.

      sqlalchemy.orm.session.object_session(instance)

      Return the Session to which instance belongs.

      If the instance is not a mapped instance, an error is raised.

      Attribute and State Management Utilities

      These functions are provided by the SQLAlchemy attribute instrumentation API to provide a detailed interface for dealing with instances, attribute values, and history. Some of them are useful when constructing event listener functions, such as those described in ORM Events.

      sqlalchemy.orm.util.object_state(instance)

      Given an object, return the InstanceState associated with the object.

      Raises sqlalchemy.orm.exc.UnmappedInstanceError if no mapping is configured.

      Equivalent functionality is available via the inspect() function as:

      inspect(instance)

      Using the inspection system will raise sqlalchemy.exc.NoInspectionAvailable if the instance is not part of a mapping.

      sqlalchemy.orm.attributes.del_attribute(instance, key)

      Delete the value of an attribute, firing history events.

      This function may be used regardless of instrumentation applied directly to the class, i.e. no descriptors are required. Custom attribute management schemes will need to make usage of this method to establish attribute state as understood by SQLAlchemy.

      sqlalchemy.orm.attributes.get_attribute(instance, key)

      Get the value of an attribute, firing any callables required.

      This function may be used regardless of instrumentation applied directly to the class, i.e. no descriptors are required. Custom attribute management schemes will need to make usage of this method to make usage of attribute state as understood by SQLAlchemy.

      sqlalchemy.orm.attributes.get_history(obj, key, passive=<symbol 'PASSIVE_OFF>)

      Return a History record for the given object and attribute key.

      Parameters:
      • obj – an object whose class is instrumented by the attributes package.
      • key – string attribute name.
      • passive – indicates loading behavior for the attribute if the value is not already present. This is a bitflag attribute, which defaults to the symbol PASSIVE_OFF indicating all necessary SQL should be emitted.
      sqlalchemy.orm.attributes.init_collection(obj, key)

      Initialize a collection attribute and return the collection adapter.

      This function is used to provide direct access to collection internals for a previously unloaded attribute. e.g.:

      collection_adapter = init_collection(someobject, 'elements')
      for elem in values:
          collection_adapter.append_without_event(elem)

      For an easier way to do the above, see set_committed_value().

      obj is an instrumented object instance. An InstanceState is accepted directly for backwards compatibility but this usage is deprecated.

      sqlalchemy.orm.attributes.flag_modified(instance, key)

      Mark an attribute on an instance as ‘modified’.

      This sets the ‘modified’ flag on the instance and establishes an unconditional change event for the given attribute.

      sqlalchemy.orm.attributes.instance_state()

      Return the InstanceState for a given mapped object.

      This function is the internal version of object_state(). The object_state() and/or the inspect() function is preferred here as they each emit an informative exception if the given object is not mapped.

      sqlalchemy.orm.instrumentation.is_instrumented(instance, key)

      Return True if the given attribute on the given instance is instrumented by the attributes package.

      This function may be used regardless of instrumentation applied directly to the class, i.e. no descriptors are required.

      sqlalchemy.orm.attributes.set_attribute(instance, key, value)

      Set the value of an attribute, firing history events.

      This function may be used regardless of instrumentation applied directly to the class, i.e. no descriptors are required. Custom attribute management schemes will need to make usage of this method to establish attribute state as understood by SQLAlchemy.

      sqlalchemy.orm.attributes.set_committed_value(instance, key, value)

      Set the value of an attribute with no history events.

      Cancels any previous history present. The value should be a scalar value for scalar-holding attributes, or an iterable for any collection-holding attribute.

      This is the same underlying method used when a lazy loader fires off and loads additional data from the database. In particular, this method can be used by application code which has loaded additional attributes or collections through separate queries, which can then be attached to an instance as though it were part of its original loaded state.

      class sqlalchemy.orm.attributes.History

      Bases: sqlalchemy.orm.attributes.History

      A 3-tuple of added, unchanged and deleted values, representing the changes which have occurred on an instrumented attribute.

      The easiest way to get a History object for a particular attribute on an object is to use the inspect() function:

      from sqlalchemy import inspect
      
      hist = inspect(myobject).attrs.myattribute.history

      Each tuple member is an iterable sequence:

      • added - the collection of items added to the attribute (the first tuple element).
      • unchanged - the collection of items that have not changed on the attribute (the second tuple element).
      • deleted - the collection of items that have been removed from the attribute (the third tuple element).
      empty()

      Return True if this History has no changes and no existing, unchanged state.

      has_changes()

      Return True if this History has changes.

      non_added()

      Return a collection of unchanged + deleted.

      non_deleted()

      Return a collection of added + unchanged.

      sum()

      Return a collection of added + unchanged + deleted.

      SQLAlchemy-0.8.4/doc/orm/tutorial.html0000644000076500000240000073346212251147504020344 0ustar classicstaff00000000000000 Object Relational Tutorial — SQLAlchemy 0.8 Documentation

      SQLAlchemy 0.8 Documentation

      Release: 0.8.4 | Release Date: December 8, 2013
      SQLAlchemy 0.8 Documentation » SQLAlchemy ORM » Object Relational Tutorial

      Object Relational Tutorial

      Object Relational Tutorial

      The SQLAlchemy Object Relational Mapper presents a method of associating user-defined Python classes with database tables, and instances of those classes (objects) with rows in their corresponding tables. It includes a system that transparently synchronizes all changes in state between objects and their related rows, called a unit of work, as well as a system for expressing database queries in terms of the user defined classes and their defined relationships between each other.

      The ORM is in contrast to the SQLAlchemy Expression Language, upon which the ORM is constructed. Whereas the SQL Expression Language, introduced in SQL Expression Language Tutorial, presents a system of representing the primitive constructs of the relational database directly without opinion, the ORM presents a high level and abstracted pattern of usage, which itself is an example of applied usage of the Expression Language.

      While there is overlap among the usage patterns of the ORM and the Expression Language, the similarities are more superficial than they may at first appear. One approaches the structure and content of data from the perspective of a user-defined domain model which is transparently persisted and refreshed from its underlying storage model. The other approaches it from the perspective of literal schema and SQL expression representations which are explicitly composed into messages consumed individually by the database.

      A successful application may be constructed using the Object Relational Mapper exclusively. In advanced situations, an application constructed with the ORM may make occasional usage of the Expression Language directly in certain areas where specific database interactions are required.

      The following tutorial is in doctest format, meaning each >>> line represents something you can type at a Python command prompt, and the following text represents the expected return value.

      Version Check

      A quick check to verify that we are on at least version 0.8 of SQLAlchemy:

      >>> import sqlalchemy
      >>> sqlalchemy.__version__ 
      0.8.0

      Connecting

      For this tutorial we will use an in-memory-only SQLite database. To connect we use create_engine():

      >>> from sqlalchemy import create_engine
      >>> engine = create_engine('sqlite:///:memory:', echo=True)

      The echo flag is a shortcut to setting up SQLAlchemy logging, which is accomplished via Python’s standard logging module. With it enabled, we’ll see all the generated SQL produced. If you are working through this tutorial and want less output generated, set it to False. This tutorial will format the SQL behind a popup window so it doesn’t get in our way; just click the “SQL” links to see what’s being generated.

      The return value of create_engine() is an instance of Engine, and it represents the core interface to the database, adapted through a dialect that handles the details of the database and DBAPI in use. In this case the SQLite dialect will interpret instructions to the Python built-in sqlite3 module.

      The Engine has not actually tried to connect to the database yet; that happens only the first time it is asked to perform a task against the database. We can illustrate this by asking it to perform a simple SELECT statement:

      sql>>> engine.execute("select 1").scalar()
      1

      As the Engine.execute() method is called, the Engine establishes a connection to the SQLite database, which is then used to emit the SQL. The connection is then returned to an internal connection pool where it will be reused on subsequent statement executions. While we illustrate direct usage of the Engine here, this isn’t typically necessary when using the ORM, where the Engine, once created, is used behind the scenes by the ORM as we’ll see shortly.

      Declare a Mapping

      When using the ORM, the configurational process starts by describing the database tables we’ll be dealing with, and then by defining our own classes which will be mapped to those tables. In modern SQLAlchemy, these two tasks are usually performed together, using a system known as Declarative, which allows us to create classes that include directives to describe the actual database table they will be mapped to.

      Classes mapped using the Declarative system are defined in terms of a base class which maintains a catalog of classes and tables relative to that base - this is known as the declarative base class. Our application will usually have just one instance of this base in a commonly imported module. We create the base class using the declarative_base() function, as follows:

      >>> from sqlalchemy.ext.declarative import declarative_base
      
      >>> Base = declarative_base()

      Now that we have a “base”, we can define any number of mapped classes in terms of it. We will start with just a single table called users, which will store records for the end-users using our application. A new class called User will be the class to which we map this table. The imports we’ll need to accomplish this include objects that represent the components of our table, including the Column class which represents a database column, as well as the Integer and String classes that represent basic datatypes used in columns:

      >>> from sqlalchemy import Column, Integer, String
      >>> class User(Base):
      ...     __tablename__ = 'users'
      ...
      ...     id = Column(Integer, primary_key=True)
      ...     name = Column(String)
      ...     fullname = Column(String)
      ...     password = Column(String)
      ...
      ...     def __init__(self, name, fullname, password):
      ...         self.name = name
      ...         self.fullname = fullname
      ...         self.password = password
      ...
      ...     def __repr__(self):
      ...        return "<User('%s','%s', '%s')>" % (self.name, self.fullname, self.password)

      The above User class establishes details about the table being mapped, including the name of the table denoted by the __tablename__ attribute, a set of columns id, name, fullname and password, where the id column will also be the primary key of the table. While its certainly possible that some database tables don’t have primary key columns (as is also the case with views, which can also be mapped), the ORM in order to actually map to a particular table needs there to be at least one column denoted as a primary key column; multiple-column, i.e. composite, primary keys are of course entirely feasible as well.

      We define a constructor via __init__() and also a __repr__() method - both are optional. The class of course can have any number of other methods and attributes as required by the application, as it’s basically just a plain Python class. Inheriting from Base is also only a requirement of the declarative configurational system, which itself is optional and relatively open ended; at its core, the SQLAlchemy ORM only requires that a class be a so-called “new style class”, that is, it inherits from object in Python 2, in order to be mapped. All classes in Python 3 are “new style” classes.

      The Non Opinionated Philosophy

      In our User mapping example, it was required that we identify the name of the table in use, as well as the names and characteristics of all columns which we care about, including which column or columns represent the primary key, as well as some basic information about the types in use. SQLAlchemy never makes assumptions about these decisions - the developer must always be explicit about specific conventions in use. However, that doesn’t mean the task can’t be automated. While this tutorial will keep things explicit, developers are encouraged to make use of helper functions as well as “Declarative Mixins” to automate their tasks in large scale applications. The section Mixin and Custom Base Classes introduces many of these techniques.

      With our User class constructed via the Declarative system, we have defined information about our table, known as table metadata, as well as a user-defined class which is linked to this table, known as a mapped class. Declarative has provided for us a shorthand system for what in SQLAlchemy is called a “Classical Mapping”, which specifies these two units separately and is discussed in Classical Mappings. The table is actually represented by a datastructure known as Table, and the mapping represented by a Mapper object generated by a function called mapper(). Declarative performs both of these steps for us, making available the Table it has created via the __table__ attribute:

      >>> User.__table__ 
      Table('users', MetaData(None),
                  Column('id', Integer(), table=<users>, primary_key=True, nullable=False),
                  Column('name', String(), table=<users>),
                  Column('fullname', String(), table=<users>),
                  Column('password', String(), table=<users>), schema=None)

      and while rarely needed, making available the Mapper object via the __mapper__ attribute:

      >>> User.__mapper__ 
      <Mapper at 0x...; User>

      The Declarative base class also contains a catalog of all the Table objects that have been defined called MetaData, available via the .metadata attribute. In this example, we are defining new tables that have yet to be created in our SQLite database, so one helpful feature the MetaData object offers is the ability to issue CREATE TABLE statements to the database for all tables that don’t yet exist. We illustrate this by calling the MetaData.create_all() method, passing in our Engine as a source of database connectivity. We will see that special commands are first emitted to check for the presence of the users table, and following that the actual CREATE TABLE statement:

      >>> Base.metadata.create_all(engine) 
      
      PRAGMA table_info("users") () CREATE TABLE users ( id INTEGER NOT NULL, name VARCHAR, fullname VARCHAR, password VARCHAR, PRIMARY KEY (id) ) () COMMIT

      Minimal Table Descriptions vs. Full Descriptions

      Users familiar with the syntax of CREATE TABLE may notice that the VARCHAR columns were generated without a length; on SQLite and Postgresql, this is a valid datatype, but on others, it’s not allowed. So if running this tutorial on one of those databases, and you wish to use SQLAlchemy to issue CREATE TABLE, a “length” may be provided to the String type as below:

      Column(String(50))

      The length field on String, as well as similar precision/scale fields available on Integer, Numeric, etc. are not referenced by SQLAlchemy other than when creating tables.

      Additionally, Firebird and Oracle require sequences to generate new primary key identifiers, and SQLAlchemy doesn’t generate or assume these without being instructed. For that, you use the Sequence construct:

      from sqlalchemy import Sequence
      Column(Integer, Sequence('user_id_seq'), primary_key=True)

      A full, foolproof Table generated via our declarative mapping is therefore:

      class User(Base):
          __tablename__ = 'users'
          id = Column(Integer, Sequence('user_id_seq'), primary_key=True)
          name = Column(String(50))
          fullname = Column(String(50))
          password = Column(String(12))
      
          def __init__(self, name, fullname, password):
              self.name = name
              self.fullname = fullname
              self.password = password
      
          def __repr__(self):
              return "<User('%s','%s', '%s')>" % (self.name, self.fullname, self.password)

      We include this more verbose table definition separately to highlight the difference between a minimal construct geared primarily towards in-Python usage only, versus one that will be used to emit CREATE TABLE statements on a particular set of backends with more stringent requirements.

      Create an Instance of the Mapped Class

      With mappings complete, let’s now create and inspect a User object:

      >>> ed_user = User('ed', 'Ed Jones', 'edspassword')
      >>> ed_user.name
      'ed'
      >>> ed_user.password
      'edspassword'
      >>> str(ed_user.id)
      'None'

      The id attribute, which while not defined by our __init__() method, exists with a value of None on our User instance due to the id column we declared in our mapping. By default, the ORM creates class attributes for all columns present in the table being mapped. These class attributes exist as descriptors, and define instrumentation for the mapped class. The functionality of this instrumentation includes the ability to fire on change events, track modifications, and to automatically load new data from the database when needed.

      Since we have not yet told SQLAlchemy to persist Ed Jones within the database, its id is None. When we persist the object later, this attribute will be populated with a newly generated value.

      The default __init__() method

      Note that in our User example we supplied an __init__() method, which receives name, fullname and password as positional arguments. The Declarative system supplies for us a default constructor if one is not already present, which accepts keyword arguments of the same name as that of the mapped attributes. Below we define User without specifying a constructor:

      class User(Base):
          __tablename__ = 'users'
          id = Column(Integer, primary_key=True)
          name = Column(String)
          fullname = Column(String)
          password = Column(String)

      Our User class above will make usage of the default constructor, and provide id, name, fullname, and password as keyword arguments:

      u1 = User(name='ed', fullname='Ed Jones', password='foobar')

      Creating a Session

      We’re now ready to start talking to the database. The ORM’s “handle” to the database is the Session. When we first set up the application, at the same level as our create_engine() statement, we define a Session class which will serve as a factory for new Session objects:

      >>> from sqlalchemy.orm import sessionmaker
      >>> Session = sessionmaker(bind=engine)

      In the case where your application does not yet have an Engine when you define your module-level objects, just set it up like this:

      >>> Session = sessionmaker()

      Later, when you create your engine with create_engine(), connect it to the Session using configure():

      >>> Session.configure(bind=engine)  # once engine is available

      This custom-made Session class will create new Session objects which are bound to our database. Other transactional characteristics may be defined when calling sessionmaker() as well; these are described in a later chapter. Then, whenever you need to have a conversation with the database, you instantiate a Session:

      >>> session = Session()

      The above Session is associated with our SQLite-enabled Engine, but it hasn’t opened any connections yet. When it’s first used, it retrieves a connection from a pool of connections maintained by the Engine, and holds onto it until we commit all changes and/or close the session object.

      Session Creational Patterns

      The business of acquiring a Session has a good deal of variety based on the variety of types of applications and frameworks out there. Keep in mind the Session is just a workspace for your objects, local to a particular database connection - if you think of an application thread as a guest at a dinner party, the Session is the guest’s plate and the objects it holds are the food (and the database...the kitchen?)! Hints on how Session is integrated into an application are at Session Frequently Asked Questions.

      Adding New Objects

      To persist our User object, we add() it to our Session:

      >>> ed_user = User('ed', 'Ed Jones', 'edspassword')
      >>> session.add(ed_user)

      At this point, we say that the instance is pending; no SQL has yet been issued and the object is not yet represented by a row in the database. The Session will issue the SQL to persist Ed Jones as soon as is needed, using a process known as a flush. If we query the database for Ed Jones, all pending information will first be flushed, and the query is issued immediately thereafter.

      For example, below we create a new Query object which loads instances of User. We “filter by” the name attribute of ed, and indicate that we’d like only the first result in the full list of rows. A User instance is returned which is equivalent to that which we’ve added:

      sql>>> our_user = session.query(User).filter_by(name='ed').first() 
      >>> our_user
      <User('ed','Ed Jones', 'edspassword')>

      In fact, the Session has identified that the row returned is the same row as one already represented within its internal map of objects, so we actually got back the identical instance as that which we just added:

      >>> ed_user is our_user
      True

      The ORM concept at work here is known as an identity map and ensures that all operations upon a particular row within a Session operate upon the same set of data. Once an object with a particular primary key is present in the Session, all SQL queries on that Session will always return the same Python object for that particular primary key; it also will raise an error if an attempt is made to place a second, already-persisted object with the same primary key within the session.

      We can add more User objects at once using add_all():

      >>> session.add_all([
      ...     User('wendy', 'Wendy Williams', 'foobar'),
      ...     User('mary', 'Mary Contrary', 'xxg527'),
      ...     User('fred', 'Fred Flinstone', 'blah')])

      Also, we’ve decided the password for Ed isn’t too secure, so lets change it:

      >>> ed_user.password = 'f8s7ccs'

      The Session is paying attention. It knows, for example, that Ed Jones has been modified:

      >>> session.dirty
      IdentitySet([<User('ed','Ed Jones', 'f8s7ccs')>])

      and that three new User objects are pending:

      >>> session.new  
      IdentitySet([<User('wendy','Wendy Williams', 'foobar')>,
      <User('mary','Mary Contrary', 'xxg527')>,
      <User('fred','Fred Flinstone', 'blah')>])

      We tell the Session that we’d like to issue all remaining changes to the database and commit the transaction, which has been in progress throughout. We do this via commit():

      sql>>> session.commit()
      

      commit() flushes whatever remaining changes remain to the database, and commits the transaction. The connection resources referenced by the session are now returned to the connection pool. Subsequent operations with this session will occur in a new transaction, which will again re-acquire connection resources when first needed.

      If we look at Ed’s id attribute, which earlier was None, it now has a value:

      sql>>> ed_user.id 
      1

      After the Session inserts new rows in the database, all newly generated identifiers and database-generated defaults become available on the instance, either immediately or via load-on-first-access. In this case, the entire row was re-loaded on access because a new transaction was begun after we issued commit(). SQLAlchemy by default refreshes data from a previous transaction the first time it’s accessed within a new transaction, so that the most recent state is available. The level of reloading is configurable as is described in Using the Session.

      Session Object States

      As our User object moved from being outside the Session, to inside the Session without a primary key, to actually being inserted, it moved between three out of four available “object states” - transient, pending, and persistent. Being aware of these states and what they mean is always a good idea - be sure to read Quickie Intro to Object States for a quick overview.

      Rolling Back

      Since the Session works within a transaction, we can roll back changes made too. Let’s make two changes that we’ll revert; ed_user‘s user name gets set to Edwardo:

      >>> ed_user.name = 'Edwardo'

      and we’ll add another erroneous user, fake_user:

      >>> fake_user = User('fakeuser', 'Invalid', '12345')
      >>> session.add(fake_user)

      Querying the session, we can see that they’re flushed into the current transaction:

      sql>>> session.query(User).filter(User.name.in_(['Edwardo', 'fakeuser'])).all() 
      [<User('Edwardo','Ed Jones', 'f8s7ccs')>, <User('fakeuser','Invalid', '12345')>]

      Rolling back, we can see that ed_user‘s name is back to ed, and fake_user has been kicked out of the session:

      sql>>> session.rollback()
      
      sql>>> ed_user.name 
      u'ed'
      >>> fake_user in session
      False

      issuing a SELECT illustrates the changes made to the database:

      sql>>> session.query(User).filter(User.name.in_(['ed', 'fakeuser'])).all() 
      [<User('ed','Ed Jones', 'f8s7ccs')>]

      Querying

      A Query object is created using the query() method on Session. This function takes a variable number of arguments, which can be any combination of classes and class-instrumented descriptors. Below, we indicate a Query which loads User instances. When evaluated in an iterative context, the list of User objects present is returned:

      sql>>> for instance in session.query(User).order_by(User.id): 
      ...     print instance.name, instance.fullname
      ed Ed Jones
      wendy Wendy Williams
      mary Mary Contrary
      fred Fred Flinstone

      The Query also accepts ORM-instrumented descriptors as arguments. Any time multiple class entities or column-based entities are expressed as arguments to the query() function, the return result is expressed as tuples:

      sql>>> for name, fullname in session.query(User.name, User.fullname): 
      ...     print name, fullname
      ed Ed Jones
      wendy Wendy Williams
      mary Mary Contrary
      fred Fred Flinstone

      The tuples returned by Query are named tuples, supplied by the KeyedTuple class, and can be treated much like an ordinary Python object. The names are the same as the attribute’s name for an attribute, and the class name for a class:

      sql>>> for row in session.query(User, User.name).all(): 
      ...    print row.User, row.name
      <User('ed','Ed Jones', 'f8s7ccs')> ed
      <User('wendy','Wendy Williams', 'foobar')> wendy
      <User('mary','Mary Contrary', 'xxg527')> mary
      <User('fred','Fred Flinstone', 'blah')> fred

      You can control the names of individual column expressions using the label() construct, which is available from any ColumnElement-derived object, as well as any class attribute which is mapped to one (such as User.name):

      sql>>> for row in session.query(User.name.label('name_label')).all(): 
      ...    print(row.name_label)
      ed
      wendy
      mary
      fred

      The name given to a full entity such as User, assuming that multiple entities are present in the call to query(), can be controlled using aliased :

      >>> from sqlalchemy.orm import aliased
      >>> user_alias = aliased(User, name='user_alias')
      
      sql>>> for row in session.query(user_alias, user_alias.name).all(): 
      ...    print row.user_alias
      <User('ed','Ed Jones', 'f8s7ccs')>
      <User('wendy','Wendy Williams', 'foobar')>
      <User('mary','Mary Contrary', 'xxg527')>
      <User('fred','Fred Flinstone', 'blah')>

      Basic operations with Query include issuing LIMIT and OFFSET, most conveniently using Python array slices and typically in conjunction with ORDER BY:

      sql>>> for u in session.query(User).order_by(User.id)[1:3]: 
      ...    print u
      <User('wendy','Wendy Williams', 'foobar')>
      <User('mary','Mary Contrary', 'xxg527')>

      and filtering results, which is accomplished either with filter_by(), which uses keyword arguments:

      sql>>> for name, in session.query(User.name).\
      ...             filter_by(fullname='Ed Jones'): 
      ...    print name
      ed

      ...or filter(), which uses more flexible SQL expression language constructs. These allow you to use regular Python operators with the class-level attributes on your mapped class:

      sql>>> for name, in session.query(User.name).\
      ...             filter(User.fullname=='Ed Jones'): 
      ...    print name
      ed

      The Query object is fully generative, meaning that most method calls return a new Query object upon which further criteria may be added. For example, to query for users named “ed” with a full name of “Ed Jones”, you can call filter() twice, which joins criteria using AND:

      sql>>> for user in session.query(User).\
      ...          filter(User.name=='ed').\
      ...          filter(User.fullname=='Ed Jones'): 
      ...    print user
      <User('ed','Ed Jones', 'f8s7ccs')>

      Common Filter Operators

      Here’s a rundown of some of the most common operators used in filter():

      • equals:

        query.filter(User.name == 'ed')
      • not equals:

        query.filter(User.name != 'ed')
      • LIKE:

        query.filter(User.name.like('%ed%'))
      • IN:

        query.filter(User.name.in_(['ed', 'wendy', 'jack']))
        
        # works with query objects too:
        
        query.filter(User.name.in_(session.query(User.name).filter(User.name.like('%ed%'))))
      • NOT IN:

        query.filter(~User.name.in_(['ed', 'wendy', 'jack']))
      • IS NULL:

        filter(User.name == None)
      • IS NOT NULL:

        filter(User.name != None)
      • AND:

        from sqlalchemy import and_
        filter(and_(User.name == 'ed', User.fullname == 'Ed Jones'))
        
        # or call filter()/filter_by() multiple times
        filter(User.name == 'ed').filter(User.fullname == 'Ed Jones')
      • OR:

        from sqlalchemy import or_
        filter(or_(User.name == 'ed', User.name == 'wendy'))
      • match:

        query.filter(User.name.match('wendy'))
      The contents of the match parameter are database backend specific.

      Returning Lists and Scalars

      The all(), one(), and first() methods of Query immediately issue SQL and return a non-iterator value. all() returns a list:

      >>> query = session.query(User).filter(User.name.like('%ed')).order_by(User.id)
      sql>>> query.all() 
      [<User('ed','Ed Jones', 'f8s7ccs')>, <User('fred','Fred Flinstone', 'blah')>]

      first() applies a limit of one and returns the first result as a scalar:

      sql>>> query.first() 
      <User('ed','Ed Jones', 'f8s7ccs')>

      one(), fully fetches all rows, and if not exactly one object identity or composite row is present in the result, raises an error:

      sql>>> from sqlalchemy.orm.exc import MultipleResultsFound
      >>> try: 
      ...     user = query.one()
      ... except MultipleResultsFound, e:
      ...     print e
      Multiple rows were found for one()
      sql>>> from sqlalchemy.orm.exc import NoResultFound
      >>> try: 
      ...     user = query.filter(User.id == 99).one()
      ... except NoResultFound, e:
      ...     print e
      No row was found for one()

      Using Literal SQL

      Literal strings can be used flexibly with Query. Most methods accept strings in addition to SQLAlchemy clause constructs. For example, filter() and order_by():

      sql>>> for user in session.query(User).\
      ...             filter("id<224").\
      ...             order_by("id").all(): 
      ...     print user.name
      ed
      wendy
      mary
      fred

      Bind parameters can be specified with string-based SQL, using a colon. To specify the values, use the params() method:

      sql>>> session.query(User).filter("id<:value and name=:name").\
      ...     params(value=224, name='fred').order_by(User.id).one() 
      

      To use an entirely string-based statement, using from_statement(); just ensure that the columns clause of the statement contains the column names normally used by the mapper (below illustrated using an asterisk):

      sql>>> session.query(User).from_statement(
      ...                     "SELECT * FROM users where name=:name").\
      ...                     params(name='ed').all()
      [<User('ed','Ed Jones', 'f8s7ccs')>]

      You can use from_statement() to go completely “raw”, using string names to identify desired columns:

      sql>>> session.query("id", "name", "thenumber12").\
      ...         from_statement("SELECT id, name, 12 as "
      ...                 "thenumber12 FROM users where name=:name").\
      ...                 params(name='ed').all()
      [(1, u'ed', 12)]

      Pros and Cons of Literal SQL

      Query is constructed like the rest of SQLAlchemy, in that it tries to always allow “falling back” to a less automated, lower level approach to things. Accepting strings for all SQL fragments is a big part of that, so that you can bypass the need to organize SQL constructs if you know specifically what string output you’d like. But when using literal strings, the Query no longer knows anything about that part of the SQL construct being emitted, and has no ability to transform it to adapt to new contexts.

      For example, suppose we selected User objects and ordered by the name column, using a string to indicate name:

      >>> q = session.query(User.id, User.name)
      sql>>> q.order_by("name").all()
      [(1, u'ed'), (4, u'fred'), (3, u'mary'), (2, u'wendy')]

      Perfectly fine. But suppose, before we got a hold of the Query, some sophisticated transformations were applied to it, such as below where we use from_self(), a particularly advanced method, to retrieve pairs of user names with different numbers of characters:

      >>> from sqlalchemy import func
      >>> ua = aliased(User)
      >>> q = q.from_self(User.id, User.name, ua.name).\
      ...     filter(User.name < ua.name).\
      ...     filter(func.length(ua.name) != func.length(User.name))

      The Query now represents a select from a subquery, where User is represented twice both inside and outside of the subquery. Telling the Query to order by “name” doesn’t really give us much guarantee which “name” it’s going to order on. In this case it assumes “name” is against the outer “aliased” User construct:

      sql>>> q.order_by("name").all() 
      [(1, u'ed', u'fred'), (1, u'ed', u'mary'), (1, u'ed', u'wendy'), (3, u'mary', u'wendy'), (4, u'fred', u'wendy')]

      Only if we use the SQL element directly, in this case User.name or ua.name, do we give Query enough information to know for sure which “name” we’d like to order on, where we can see we get different results for each:

      sql>>> q.order_by(ua.name).all() 
      [(1, u'ed', u'fred'), (1, u'ed', u'mary'), (1, u'ed', u'wendy'), (3, u'mary', u'wendy'), (4, u'fred', u'wendy')]
      
      sql>>> q.order_by(User.name).all() 
      [(1, u'ed', u'wendy'), (1, u'ed', u'mary'), (1, u'ed', u'fred'), (4, u'fred', u'wendy'), (3, u'mary', u'wendy')]

      Counting

      Query includes a convenience method for counting called count():

      sql>>> session.query(User).filter(User.name.like('%ed')).count() 
      2

      The count() method is used to determine how many rows the SQL statement would return. Looking at the generated SQL above, SQLAlchemy always places whatever it is we are querying into a subquery, then counts the rows from that. In some cases this can be reduced to a simpler SELECT count(*) FROM table, however modern versions of SQLAlchemy don’t try to guess when this is appropriate, as the exact SQL can be emitted using more explicit means.

      For situations where the “thing to be counted” needs to be indicated specifically, we can specify the “count” function directly using the expression func.count(), available from the func construct. Below we use it to return the count of each distinct user name:

      >>> from sqlalchemy import func
      sql>>> session.query(func.count(User.name), User.name).group_by(User.name).all()  
      [(1, u'ed'), (1, u'fred'), (1, u'mary'), (1, u'wendy')]

      To achieve our simple SELECT count(*) FROM table, we can apply it as:

      sql>>> session.query(func.count('*')).select_from(User).scalar()
      4

      The usage of select_from() can be removed if we express the count in terms of the User primary key directly:

      sql>>> session.query(func.count(User.id)).scalar() 
      4

      Building a Relationship

      Let’s consider how a second table, related to User, can be mapped and queried. Users in our system can store any number of email addresses associated with their username. This implies a basic one to many association from the users to a new table which stores email addresses, which we will call addresses. Using declarative, we define this table along with its mapped class, Address:

      >>> from sqlalchemy import ForeignKey
      >>> from sqlalchemy.orm import relationship, backref
      
      >>> class Address(Base):
      ...     __tablename__ = 'addresses'
      ...     id = Column(Integer, primary_key=True)
      ...     email_address = Column(String, nullable=False)
      ...     user_id = Column(Integer, ForeignKey('users.id'))
      ...
      ...     user = relationship("User", backref=backref('addresses', order_by=id))
      ...
      ...     def __init__(self, email_address):
      ...         self.email_address = email_address
      ...
      ...     def __repr__(self):
      ...         return "<Address('%s')>" % self.email_address

      The above class introduces the ForeignKey construct, which is a directive applied to Column that indicates that values in this column should be constrained to be values present in the named remote column. This is a core feature of relational databases, and is the “glue” that transforms an otherwise unconnected collection of tables to have rich overlapping relationships. The ForeignKey above expresses that values in the addresses.user_id column should be constrained to those values in the users.id column, i.e. its primary key.

      A second directive, known as relationship(), tells the ORM that the Address class itself should be linked to the User class, using the attribute Address.user. relationship() uses the foreign key relationships between the two tables to determine the nature of this linkage, determining that Address.user will be many-to-one. A subdirective of relationship() called backref() is placed inside of relationship(), providing details about the relationship as expressed in reverse, that of a collection of Address objects on User referenced by User.addresses. The reverse side of a many-to-one relationship is always one-to-many. A full catalog of available relationship() configurations is at Basic Relational Patterns.

      The two complementing relationships Address.user and User.addresses are referred to as a bidirectional relationship, and is a key feature of the SQLAlchemy ORM. The section Linking Relationships with Backref discusses the “backref” feature in detail.

      Arguments to relationship() which concern the remote class can be specified using strings, assuming the Declarative system is in use. Once all mappings are complete, these strings are evaluated as Python expressions in order to produce the actual argument, in the above case the User class. The names which are allowed during this evaluation include, among other things, the names of all classes which have been created in terms of the declared base. Below we illustrate creation of the same “addresses/user” bidirectional relationship in terms of User instead of Address:

      class User(Base):
          # ....
          addresses = relationship("Address", order_by="Address.id", backref="user")

      See the docstring for relationship() for more detail on argument style.

      Did you know ?

      • a FOREIGN KEY constraint in most (though not all) relational databases can only link to a primary key column, or a column that has a UNIQUE constraint.
      • a FOREIGN KEY constraint that refers to a multiple column primary key, and itself has multiple columns, is known as a “composite foreign key”. It can also reference a subset of those columns.
      • FOREIGN KEY columns can automatically update themselves, in response to a change in the referenced column or row. This is known as the CASCADE referential action, and is a built in function of the relational database.
      • FOREIGN KEY can refer to its own table. This is referred to as a “self-referential” foreign key.
      • Read more about foreign keys at Foreign Key - Wikipedia.

      We’ll need to create the addresses table in the database, so we will issue another CREATE from our metadata, which will skip over tables which have already been created:

      sql>>> Base.metadata.create_all(engine) 
      

      Querying with Joins

      Now that we have two tables, we can show some more features of Query, specifically how to create queries that deal with both tables at the same time. The Wikipedia page on SQL JOIN offers a good introduction to join techniques, several of which we’ll illustrate here.

      To construct a simple implicit join between User and Address, we can use Query.filter() to equate their related columns together. Below we load the User and Address entities at once using this method:

      sql>>> for u, a in session.query(User, Address).\
      ...                     filter(User.id==Address.user_id).\
      ...                     filter(Address.email_address=='jack@google.com').\
      ...                     all():   
      ...     print u, a
      <User('jack','Jack Bean', 'gjffdd')> <Address('jack@google.com')>

      The actual SQL JOIN syntax, on the other hand, is most easily achieved using the Query.join() method:

      sql>>> session.query(User).join(Address).\
      ...         filter(Address.email_address=='jack@google.com').\
      ...         all() 
      [<User('jack','Jack Bean', 'gjffdd')>]

      Query.join() knows how to join between User and Address because there’s only one foreign key between them. If there were no foreign keys, or several, Query.join() works better when one of the following forms are used:

      query.join(Address, User.id==Address.user_id)    # explicit condition
      query.join(User.addresses)                       # specify relationship from left to right
      query.join(Address, User.addresses)              # same, with explicit target
      query.join('addresses')                          # same, using a string

      As you would expect, the same idea is used for “outer” joins, using the outerjoin() function:

      query.outerjoin(User.addresses)   # LEFT OUTER JOIN

      The reference documentation for join() contains detailed information and examples of the calling styles accepted by this method; join() is an important method at the center of usage for any SQL-fluent application.

      Using Aliases

      When querying across multiple tables, if the same table needs to be referenced more than once, SQL typically requires that the table be aliased with another name, so that it can be distinguished against other occurrences of that table. The Query supports this most explicitly using the aliased construct. Below we join to the Address entity twice, to locate a user who has two distinct email addresses at the same time:

      >>> from sqlalchemy.orm import aliased
      >>> adalias1 = aliased(Address)
      >>> adalias2 = aliased(Address)
      sql>>> for username, email1, email2 in \
      ...     session.query(User.name, adalias1.email_address, adalias2.email_address).\
      ...     join(adalias1, User.addresses).\
      ...     join(adalias2, User.addresses).\
      ...     filter(adalias1.email_address=='jack@google.com').\
      ...     filter(adalias2.email_address=='j25@yahoo.com'):
      ...     print username, email1, email2      
      jack jack@google.com j25@yahoo.com

      Using Subqueries

      The Query is suitable for generating statements which can be used as subqueries. Suppose we wanted to load User objects along with a count of how many Address records each user has. The best way to generate SQL like this is to get the count of addresses grouped by user ids, and JOIN to the parent. In this case we use a LEFT OUTER JOIN so that we get rows back for those users who don’t have any addresses, e.g.:

      SELECT users.*, adr_count.address_count FROM users LEFT OUTER JOIN
          (SELECT user_id, count(*) AS address_count
              FROM addresses GROUP BY user_id) AS adr_count
          ON users.id=adr_count.user_id

      Using the Query, we build a statement like this from the inside out. The statement accessor returns a SQL expression representing the statement generated by a particular Query - this is an instance of a select() construct, which are described in SQL Expression Language Tutorial:

      >>> from sqlalchemy.sql import func
      >>> stmt = session.query(Address.user_id, func.count('*').\
      ...         label('address_count')).\
      ...         group_by(Address.user_id).subquery()

      The func keyword generates SQL functions, and the subquery() method on Query produces a SQL expression construct representing a SELECT statement embedded within an alias (it’s actually shorthand for query.statement.alias()).

      Once we have our statement, it behaves like a Table construct, such as the one we created for users at the start of this tutorial. The columns on the statement are accessible through an attribute called c:

      sql>>> for u, count in session.query(User, stmt.c.address_count).\
      ...     outerjoin(stmt, User.id==stmt.c.user_id).order_by(User.id): 
      ...     print u, count
      <User('ed','Ed Jones', 'f8s7ccs')> None
      <User('wendy','Wendy Williams', 'foobar')> None
      <User('mary','Mary Contrary', 'xxg527')> None
      <User('fred','Fred Flinstone', 'blah')> None
      <User('jack','Jack Bean', 'gjffdd')> 2

      Selecting Entities from Subqueries

      Above, we just selected a result that included a column from a subquery. What if we wanted our subquery to map to an entity ? For this we use aliased() to associate an “alias” of a mapped class to a subquery:

      sql>>> stmt = session.query(Address).\
      ...                 filter(Address.email_address != 'j25@yahoo.com').\
      ...                 subquery()
      >>> adalias = aliased(Address, stmt)
      >>> for user, address in session.query(User, adalias).\
      ...         join(adalias, User.addresses): 
      ...     print user, address
      <User('jack','Jack Bean', 'gjffdd')> <Address('jack@google.com')>

      Using EXISTS

      The EXISTS keyword in SQL is a boolean operator which returns True if the given expression contains any rows. It may be used in many scenarios in place of joins, and is also useful for locating rows which do not have a corresponding row in a related table.

      There is an explicit EXISTS construct, which looks like this:

      >>> from sqlalchemy.sql import exists
      >>> stmt = exists().where(Address.user_id==User.id)
      sql>>> for name, in session.query(User.name).filter(stmt):   
      ...     print name
      jack

      The Query features several operators which make usage of EXISTS automatically. Above, the statement can be expressed along the User.addresses relationship using any():

      sql>>> for name, in session.query(User.name).\
      ...         filter(User.addresses.any()):   
      ...     print name
      jack

      any() takes criterion as well, to limit the rows matched:

      sql>>> for name, in session.query(User.name).\
      ...     filter(User.addresses.any(Address.email_address.like('%google%'))):   
      ...     print name
      jack

      has() is the same operator as any() for many-to-one relationships (note the ~ operator here too, which means “NOT”):

      sql>>> session.query(Address).\
      ...         filter(~Address.user.has(User.name=='jack')).all() 
      []

      Common Relationship Operators

      Here’s all the operators which build on relationships - each one is linked to its API documentation which includes full details on usage and behavior:

      • __eq__() (many-to-one “equals” comparison):

        query.filter(Address.user == someuser)
      • __ne__() (many-to-one “not equals” comparison):

        query.filter(Address.user != someuser)
      • IS NULL (many-to-one comparison, also uses __eq__()):

        query.filter(Address.user == None)
      • contains() (used for one-to-many collections):

        query.filter(User.addresses.contains(someaddress))
      • any() (used for collections):

        query.filter(User.addresses.any(Address.email_address == 'bar'))
        
        # also takes keyword arguments:
        query.filter(User.addresses.any(email_address='bar'))
      • has() (used for scalar references):

        query.filter(Address.user.has(name='ed'))
      • Query.with_parent() (used for any relationship):

        session.query(Address).with_parent(someuser, 'addresses')

      Eager Loading

      Recall earlier that we illustrated a lazy loading operation, when we accessed the User.addresses collection of a User and SQL was emitted. If you want to reduce the number of queries (dramatically, in many cases), we can apply an eager load to the query operation. SQLAlchemy offers three types of eager loading, two of which are automatic, and a third which involves custom criterion. All three are usually invoked via functions known as query options which give additional instructions to the Query on how we would like various attributes to be loaded, via the Query.options() method.

      Subquery Load

      In this case we’d like to indicate that User.addresses should load eagerly. A good choice for loading a set of objects as well as their related collections is the orm.subqueryload() option, which emits a second SELECT statement that fully loads the collections associated with the results just loaded. The name “subquery” originates from the fact that the SELECT statement constructed directly via the Query is re-used, embedded as a subquery into a SELECT against the related table. This is a little elaborate but very easy to use:

      >>> from sqlalchemy.orm import subqueryload
      sql>>> jack = session.query(User).\
      ...                 options(subqueryload(User.addresses)).\
      ...                 filter_by(name='jack').one() 
      >>> jack
      <User('jack','Jack Bean', 'gjffdd')>
      
      >>> jack.addresses
      [<Address('jack@google.com')>, <Address('j25@yahoo.com')>]

      Joined Load

      The other automatic eager loading function is more well known and is called orm.joinedload(). This style of loading emits a JOIN, by default a LEFT OUTER JOIN, so that the lead object as well as the related object or collection is loaded in one step. We illustrate loading the same addresses collection in this way - note that even though the User.addresses collection on jack is actually populated right now, the query will emit the extra join regardless:

      >>> from sqlalchemy.orm import joinedload
      
      sql>>> jack = session.query(User).\
      ...                        options(joinedload(User.addresses)).\
      ...                        filter_by(name='jack').one() 
      >>> jack
      <User('jack','Jack Bean', 'gjffdd')>
      
      >>> jack.addresses
      [<Address('jack@google.com')>, <Address('j25@yahoo.com')>]

      Note that even though the OUTER JOIN resulted in two rows, we still only got one instance of User back. This is because Query applies a “uniquing” strategy, based on object identity, to the returned entities. This is specifically so that joined eager loading can be applied without affecting the query results.

      While joinedload() has been around for a long time, subqueryload() is a newer form of eager loading. subqueryload() tends to be more appropriate for loading related collections while joinedload() tends to be better suited for many-to-one relationships, due to the fact that only one row is loaded for both the lead and the related object.

      joinedload() is not a replacement for join()

      The join created by joinedload() is anonymously aliased such that it does not affect the query results. An Query.order_by() or Query.filter() call cannot reference these aliased tables - so-called “user space” joins are constructed using Query.join(). The rationale for this is that joinedload() is only applied in order to affect how related objects or collections are loaded as an optimizing detail - it can be added or removed with no impact on actual results. See the section The Zen of Eager Loading for a detailed description of how this is used.

      Explicit Join + Eagerload

      A third style of eager loading is when we are constructing a JOIN explicitly in order to locate the primary rows, and would like to additionally apply the extra table to a related object or collection on the primary object. This feature is supplied via the orm.contains_eager() function, and is most typically useful for pre-loading the many-to-one object on a query that needs to filter on that same object. Below we illustrate loading an Address row as well as the related User object, filtering on the User named “jack” and using orm.contains_eager() to apply the “user” columns to the Address.user attribute:

      >>> from sqlalchemy.orm import contains_eager
      sql>>> jacks_addresses = session.query(Address).\
      ...                             join(Address.user).\
      ...                             filter(User.name=='jack').\
      ...                             options(contains_eager(Address.user)).\
      ...                             all() 
      >>> jacks_addresses
      [<Address('jack@google.com')>, <Address('j25@yahoo.com')>]
      
      >>> jacks_addresses[0].user
      <User('jack','Jack Bean', 'gjffdd')>

      For more information on eager loading, including how to configure various forms of loading by default, see the section Relationship Loading Techniques.

      Deleting

      Let’s try to delete jack and see how that goes. We’ll mark as deleted in the session, then we’ll issue a count query to see that no rows remain:

      >>> session.delete(jack)
      sql>>> session.query(User).filter_by(name='jack').count() 
      0

      So far, so good. How about Jack’s Address objects ?

      sql>>> session.query(Address).filter(
      ...     Address.email_address.in_(['jack@google.com', 'j25@yahoo.com'])
      ...  ).count() 
      2

      Uh oh, they’re still there ! Analyzing the flush SQL, we can see that the user_id column of each address was set to NULL, but the rows weren’t deleted. SQLAlchemy doesn’t assume that deletes cascade, you have to tell it to do so.

      Configuring delete/delete-orphan Cascade

      We will configure cascade options on the User.addresses relationship to change the behavior. While SQLAlchemy allows you to add new attributes and relationships to mappings at any point in time, in this case the existing relationship needs to be removed, so we need to tear down the mappings completely and start again - we’ll close the Session:

      >>> session.close()

      and use a new declarative_base():

      >>> Base = declarative_base()

      Next we’ll declare the User class, adding in the addresses relationship including the cascade configuration (we’ll leave the constructor out too):

      >>> class User(Base):
      ...     __tablename__ = 'users'
      ...
      ...     id = Column(Integer, primary_key=True)
      ...     name = Column(String)
      ...     fullname = Column(String)
      ...     password = Column(String)
      ...
      ...     addresses = relationship("Address", backref='user', cascade="all, delete, delete-orphan")
      ...
      ...     def __repr__(self):
      ...        return "<User('%s','%s', '%s')>" % (self.name, self.fullname, self.password)

      Then we recreate Address, noting that in this case we’ve created the Address.user relationship via the User class already:

      >>> class Address(Base):
      ...     __tablename__ = 'addresses'
      ...     id = Column(Integer, primary_key=True)
      ...     email_address = Column(String, nullable=False)
      ...     user_id = Column(Integer, ForeignKey('users.id'))
      ...
      ...     def __repr__(self):
      ...         return "<Address('%s')>" % self.email_address

      Now when we load the user jack (below using get(), which loads by primary key), removing an address from the corresponding addresses collection will result in that Address being deleted:

      # load Jack by primary key
      sql>>> jack = session.query(User).get(5)    
      
      # remove one Address (lazy load fires off)
      sql>>> del jack.addresses[1] 
      
      # only one address remains
      sql>>> session.query(Address).filter(
      ...     Address.email_address.in_(['jack@google.com', 'j25@yahoo.com'])
      ... ).count() 
      1

      Deleting Jack will delete both Jack and the remaining Address associated with the user:

      >>> session.delete(jack)
      
      sql>>> session.query(User).filter_by(name='jack').count() 
      0
      
      sql>>> session.query(Address).filter(
      ...    Address.email_address.in_(['jack@google.com', 'j25@yahoo.com'])
      ... ).count() 
      0

      More on Cascades

      Further detail on configuration of cascades is at Cascades. The cascade functionality can also integrate smoothly with the ON DELETE CASCADE functionality of the relational database. See Using Passive Deletes for details.

      Building a Many To Many Relationship

      We’re moving into the bonus round here, but lets show off a many-to-many relationship. We’ll sneak in some other features too, just to take a tour. We’ll make our application a blog application, where users can write BlogPost items, which have Keyword items associated with them.

      For a plain many-to-many, we need to create an un-mapped Table construct to serve as the association table. This looks like the following:

      >>> from sqlalchemy import Table, Text
      >>> # association table
      >>> post_keywords = Table('post_keywords', Base.metadata,
      ...     Column('post_id', Integer, ForeignKey('posts.id')),
      ...     Column('keyword_id', Integer, ForeignKey('keywords.id'))
      ... )

      Above, we can see declaring a Table directly is a little different than declaring a mapped class. Table is a constructor function, so each individual Column argument is separated by a comma. The Column object is also given its name explicitly, rather than it being taken from an assigned attribute name.

      Next we define BlogPost and Keyword, with a relationship() linked via the post_keywords table:

      >>> class BlogPost(Base):
      ...     __tablename__ = 'posts'
      ...
      ...     id = Column(Integer, primary_key=True)
      ...     user_id = Column(Integer, ForeignKey('users.id'))
      ...     headline = Column(String(255), nullable=False)
      ...     body = Column(Text)
      ...
      ...     # many to many BlogPost<->Keyword
      ...     keywords = relationship('Keyword', secondary=post_keywords, backref='posts')
      ...
      ...     def __init__(self, headline, body, author):
      ...         self.author = author
      ...         self.headline = headline
      ...         self.body = body
      ...
      ...     def __repr__(self):
      ...         return "BlogPost(%r, %r, %r)" % (self.headline, self.body, self.author)
      
      
      >>> class Keyword(Base):
      ...     __tablename__ = 'keywords'
      ...
      ...     id = Column(Integer, primary_key=True)
      ...     keyword = Column(String(50), nullable=False, unique=True)
      ...
      ...     def __init__(self, keyword):
      ...         self.keyword = keyword

      Above, the many-to-many relationship is BlogPost.keywords. The defining feature of a many-to-many relationship is the secondary keyword argument which references a Table object representing the association table. This table only contains columns which reference the two sides of the relationship; if it has any other columns, such as its own primary key, or foreign keys to other tables, SQLAlchemy requires a different usage pattern called the “association object”, described at Association Object.

      We would also like our BlogPost class to have an author field. We will add this as another bidirectional relationship, except one issue we’ll have is that a single user might have lots of blog posts. When we access User.posts, we’d like to be able to filter results further so as not to load the entire collection. For this we use a setting accepted by relationship() called lazy='dynamic', which configures an alternate loader strategy on the attribute. To use it on the “reverse” side of a relationship(), we use the backref() function:

      >>> from sqlalchemy.orm import backref
      >>> # "dynamic" loading relationship to User
      >>> BlogPost.author = relationship(User, backref=backref('posts', lazy='dynamic'))

      Create new tables:

      sql>>> Base.metadata.create_all(engine) 
      

      Usage is not too different from what we’ve been doing. Let’s give Wendy some blog posts:

      sql>>> wendy = session.query(User).\
      ...                 filter_by(name='wendy').\
      ...                 one() 
      >>> post = BlogPost("Wendy's Blog Post", "This is a test", wendy)
      >>> session.add(post)

      We’re storing keywords uniquely in the database, but we know that we don’t have any yet, so we can just create them:

      >>> post.keywords.append(Keyword('wendy'))
      >>> post.keywords.append(Keyword('firstpost'))

      We can now look up all blog posts with the keyword ‘firstpost’. We’ll use the any operator to locate “blog posts where any of its keywords has the keyword string ‘firstpost’”:

      sql>>> session.query(BlogPost).\
      ...             filter(BlogPost.keywords.any(keyword='firstpost')).\
      ...             all() 
      [BlogPost("Wendy's Blog Post", 'This is a test', <User('wendy','Wendy Williams', 'foobar')>)]

      If we want to look up posts owned by the user wendy, we can tell the query to narrow down to that User object as a parent:

      sql>>> session.query(BlogPost).\
      ...             filter(BlogPost.author==wendy).\
      ...             filter(BlogPost.keywords.any(keyword='firstpost')).\
      ...             all() 
      [BlogPost("Wendy's Blog Post", 'This is a test', <User('wendy','Wendy Williams', 'foobar')>)]

      Or we can use Wendy’s own posts relationship, which is a “dynamic” relationship, to query straight from there:

      sql>>> wendy.posts.\
      ...         filter(BlogPost.keywords.any(keyword='firstpost')).\
      ...         all() 
      [BlogPost("Wendy's Blog Post", 'This is a test', <User('wendy','Wendy Williams', 'foobar')>)]

      Further Reference

      Query Reference: Querying

      Mapper Reference: Mapper Configuration

      Relationship Reference: Relationship Configuration

      Session Reference: Using the Session

      SQLAlchemy-0.8.4/doc/search.html0000644000076500000240000000731012251147504017133 0ustar classicstaff00000000000000 Search — SQLAlchemy 0.8 Documentation

      SQLAlchemy 0.8 Documentation

      Release: 0.8.4 | Release Date: December 8, 2013

      Enter Search Terms:

      SQLAlchemy-0.8.4/doc/searchindex.js0000644000076500000240000066016112251147504017644 0ustar classicstaff00000000000000Search.setIndex({envversion:42,terms:{func:[2,5,10,19,26,30,33,34,37,38,39,40,41,23,7,35,51,58,61,64,65,8,28,25,72,73,76],interchang:[38,34,8],four:[37,61,40,42,46,48,67,50,73,55,56],prefix:[38,19,23,7,35,65],do_commit:27,gae:[35,19],typeerror:[38,3,23,35,67,74,19],immediateload:[40,36],emptiv:67,digit:[5,66,11,19],attributelisten:7,adodbapi:[1,38,23,7],child_id_on:58,highest_protocol:25,showcas:40,require_embed:[55,76],del_:35,columncompar:31,miller:[38,19],second:[0,1,3,21,5,8,9,14,23,27,33,34,36,37,38,39,40,19,42,7,35,46,48,49,59,51,55,58,61,66,67,28,25,73,74,75],threadlocalregistri:49,weak_instance_map:7,"_field":[37,35],char_length:[40,41,23,7],attribute_on:34,ongo:[1,38,39,26,49,32],widget:[58,21,36,49,72],somerelationnam:38,here:[4,8,9,70,12,14,19,20,26,27,31,32,33,34,36,37,21,40,41,23,61,35,46,48,49,50,51,54,55,56,57,58,59,62,16,64,66,67,28,25,71,72,75,76],column_properti:[58,38,40,19,23,7,35,65,32,57],unconsum:35,myextens:32,controversi:[67,19,23],custom_manag:42,subsect:25,get_rowcount:[40,27],substr:[35,34,19,23,8],unix:[9,19,23],txt:[40,67],unit:[1,38,39,40,19,23,7,35,8],long_binari:38,"_check":27,cruft:35,emailaddress:34,returning_precedes_valu:27,until:[26,3,5,14,7,31,32,34,37,21,39,40,19,23,43,35,46,48,49,59,58,61,65,8,67,28,25,73,76],uix_1:2,relat:[1,38,39,40,19,23,7,35],yahoo:[61,33,36],notic:[40,19,23,7,65,8,28,25,10,24,33],mytable_id:[5,55],exce:19,hole:38,herebi:24,unpack:27,couldn:[23,7],mapping_spec:3,out_paramet:[30,65,23],itself:[0,1,2,21,5,7,9,10,13,14,16,19,26,27,30,31,32,33,63,36,37,38,39,40,41,23,43,35,46,48,49,50,51,55,56,57,58,61,60,69,34,64,65,66,67,28,25,72,73,74,76],reword:[40,19],rework:[38,40,19,7,35,65,67,28],shopping_cart_item:0,"0x10152bf90":58,caution:[37,3,23],fibonacci:43,want:[3,10,12,14,41,20,26,30,33,34,36,37,21,39,40,19,23,35,46,49,55,58,61,62,64,65,8,67,28,25,70,72,74,75],type1:10,dprop:72,some_factori:49,hoc:[37,40,19,23,7,62,16,35,46,66,28,50,30,34,76],ilik:[55,30,23],how:[9,14,18,26,30,33,1,38,39,40,19,23,7,35,48,51,56,58,65,67,28,25,76],folder_1_folder_id:58,supports_execut:[38,55,30],avaiabl:23,hop:[21,35],perspect:[58,61,23,35,26,30,33,34],drop_spow:56,typea:72,mu_mind:19,typeb:72,diagram:49,wrong:[58,40,19,7,35,46,49,28,25,67,36],irrelev:51,final_quot:27,classmethod:[0,72,27,65,49,31,16],alias:[38,39,40,19,23,7,35],type_:[40,23],dispar:49,statementexcept:19,keysowrd:9,was_delet:35,feedback:23,affect:[38,40,19,23,7,35,8,67],vari:[40,19,42,4,27,66,67,50,51,9,25,55],myfil:73,first_connect:[50,40,68],column_descript:[37,40],uow:[35,39,40,19,7],uuid:[40,19,51,25,30,34],fit:[46,49,24],fix:[1,38,39,40,19,23,7,35],kirtland:43,hidden:23,easier:[58,38,39,40,3,23,35,26,65,49,28,25,67,72,74,19],mysqlset:72,poolclass:[14,48,23,73],"__setstate__":[16,40,57,7],proce:[37,20,60,2,19,7,62,63,26,49,28,69,70,56,21,40,32,34],slate:40,nosuchcolumnerror:[38,40,17,19],"__subclasscheck__":30,visit_foreign_key_constraint:5,interrupt:46,bulk_replac:[3,23],session_factori:49,pjoin_nam:75,ad_subq:69,accommod:[37,40,19,5,63,35,26,48,67,28,25,10,72,73,55,33,34,74],timeout:[14,26,35,19,48],tstzrang:51,debug:[58,38,40,48,67,50,72,14,33],secretpow:56,last_modifi:10,gaerdbm:[5,35,19],resum:49,legwork:35,some_stat:19,dsn:[38,9,22,66,7],adapt:[37,21,40,3,42,7,65,35,27,8,49,28,25,23,61,31,34,71,19],navig:38,written:[37,21,40,3,69,35,46,49,67,55,33,19],terjero:38,abramowitz:35,"0x12a25d0":49,omiss:58,mydsn:[9,46,48],md5:[5,38,40,65],my_procedural_th:30,unabl:9,col3_0:4,col3_1:4,confus:[37,1,40,7,26,58,49,28,59,25,73],clariti:72,wast:7,instruct:[0,37,75,39,40,38,7,26,52,49,51,29,61,67,21,1,33,35,57],wasn:[37,38,40,19,23,7,35,58,49],get_current_request:49,xid:[50,26,27,49,68],ordering_func:43,poollisten:[23,26,48,28,14,68],versioned_id_col:38,master:[40,49],similarli:[38,40,19,21,10,35,8,49,28,30,67,72,55,34,75,76],listen:[35,40,19,23],single_par:7,technic:[21,34,73],legal_charact:23,tree:[51,64,23,21],project:[58,40,19,5,35,28,59,51,11,71],ddlelement:[40,26,67,28,50,74,56],entri:[58,38,64,40,19,23,43,5,7,26,67,21,13,55,33,34,35,76],uniniti:[40,7],runner:[1,19],boston:21,spent:[69,46],delete_obj:23,increment:[38,39,40,19,26,27,10,55],incompat:[38,35,40,23,7],"3ddb":9,person_id:8,count_from:43,eagerli:[38,40,23,62,65,8,67,75,57,32,18],expens:[58,21,40,42,23,7,35,49,28,67,73,14,19],simplifi:[1,38,39,40,19,23,7,35,8,67],shall:[21,24],attribute_nam:[23,49],simplifc:35,microsecond:[58,19,23,7,35,8,25,73],base_id:7,macbook:46,camp:65,dummi:40,tech:66,neg:[8,23],someattribut:[23,31],"0x1411850":21,"__str__":[64,30,27,7],geom_data_1:25,geom_data_2:25,postgres_wher:[40,23],schmid:35,mediumblob:[5,67],layout:19,"_group_numb":27,restaur:49,is_insert:[26,40,19],theme:[33,40,67],busi:[61,33,28],sessionev:[37,32,57,49,28],rick:39,rich:[61,10],tzinfo:1,plate:[61,49],user_keyword:[12,58],patch:[1,38,39,40,19,7,35,28,25],sauc:72,favorite_entri:21,unexpectedli:35,blew:40,fair:[38,46],collection_class:[38,21,3,23,43,65,31,12,63,75],result:[38,39,40,19,23,7,35,65,8],concretebas:[19,72],respons:[38,40,3,23,7,16,26,49,28,61,21,12,34,19],fail:[4,7,9,70,21,12,14,17,26,1,38,39,40,19,23,43,35,46,49,58,60,69,65,28],best:[38,40,19,7,35,46,66,49,8,59,51,9,61,73,74,28,25],alt_schema:38,wikipedia:[69,61],abcmeta:30,nodemap:3,figur:[1,38,39,33,7],irc:19,extend:[35,39,40,23],extens:[1,38,39,40,19,23,7,35,8],accident:[38,26,72,23],hashabl:[40,19,25,35,49,51],regularpric:30,logic:[21,14,26,30,34,1,38,39,40,19,23,7,35,48,49,55,58,66,67,28,25,72],countri:42,login:[25,9],assum:[0,3,21,9,14,23,26,30,31,33,34,36,37,38,40,19,42,7,35,46,48,49,51,55,57,58,61,66,67,28,25,72,76],duplic:[72,38,19,7,35,46,48,21,33],metadata_foreignkei:[55,2],union:[1,38,39,40,19,23,7],fri:[38,39,19,23,7],otheraddress:34,beta4:[19,23],beta2:46,strictly_right_of:51,"__doc__":[1,40],life:[23,40,3,49],worker:49,dave:[35,19],lift:[39,19],rootnod:42,child:[1,2,3,21,23,26,30,33,34,36,37,38,39,40,19,42,7,49,50,58,69,65,67,28,72,75,76],shardedqueri:[45,40,19,7],emploi:[34,76],userdefinedtyp:[25,40,67],viewonli:[21,40,72,23,38,7,31],"_set":12,from_tabl:27,toolkit:[70,8],uselist:[38,40,3,21,7,35,31,12,19],sqlite_raw_colnam:19,metadata_create_al:72,left_nod:21,transpar:[38,3,23,61,69,35,49,25,10,12,14,33,16],sometext:[38,23],"__mro__":[1,58,35],renumb:43,split:[35,63],big:[1,38,23,61,35,65,75,74,36],rownum:[40,66],european:66,new_adapt:3,fairli:[19,23,7,35,67,14],refin:[19,23,7],render_table_with_column_in_update_from:27,tune:[14,40,19,7],sql_variant:[38,9],nonexistentt:1,users_trigg:[56,67],unchang:[58,23,7,69,35,49,28],nodes_2_parent_id:65,modnam:[1,39],memoryview:35,previous:[1,40,19,23,7,35,65,67],easi:[38,39,23,7,64,65,8,67,61,72,55,33],had:[1,38,39,40,19,23,7,35,46,65,58,49,8,10,28,67,72,12,33,17,76],"__history_mapper__":42,update_limit_claus:27,push_sess:39,quirk:[67,35,34,19,23],preserv:[37,38,40,19,7,35,67,28],map_to:40,measur:[5,64],"_error":[40,19],specif:[1,38,39,40,19,23,7,35,65,8],"_fromclaus":76,get_bi:[38,39,65,8],underli:[0,1,2,3,5,9,12,14,17,23,26,27,30,31,33,34,37,38,40,19,42,7,35,48,49,51,55,57,61,69,16,66,67,28,25,72,76],right:[37,38,2,23,24,7,64,65,58,49,51,61,30,75,21,73,40,32,55,25,76],old:[26,3,7,32,1,38,39,40,19,23,43,35,46,49,59,55,57,58,65,8,67,28,76],should_drop:56,item_typ:51,changes_pre_06:19,autocommit:[1,40,19,23,7,35,8,67,18],bottom:73,subclass:[1,38,39,40,19,23,7,35],tornado:49,associate_with_attribut:16,condit:[1,4,14,17,24,25,33,34,37,38,39,40,19,23,7,35,46,55,56,58,60,62,8,67,28,69,72,75],foo:[5,10,11,14,15,30,33,38,39,40,23,7,35,48,59,51,55,58,65,67,28,25,74,76],"__ne__":[61,23,25,16,65,51,30,31,55,34],hostentri:[58,21],node_2_parent_id:21,some_rel:28,balance_1:64,int4rang:51,"_datetimemixin":73,slightli:[1,38,39,40,19,23,7,58,49,28,72,73,63],coars:73,breakag:[38,40,19,23],append_column:[38,40,19,67,28,55,76],creativ:[12,3],wrap:[1,38,40,3,23,35,26,66,67,50,25,30,75,17,36,19],suffici:[5,25,26,11],support:[1,38,39,40,19,23,7,35,8],"_instance_kei":[65,23,7],avail:[2,4,10,17,18,19,26,1,38,40,41,23,7,35,48,54,50,56,58,65,8,67,28,74],width:[5,7],add_column:[37,38,40,19,23,7,8],disambigu:[37,58,40,28,72,34],oramag:66,class_manag:34,offer:[37,61,46,2,56,26,49,28,59,51,67,16,14,34],refcount:19,userland:[19,8,31,32,34,57],azl:40,insidi:5,second_precis:[25,40,66],interval_1_end:64,reiter:[40,7],quantize_int:25,exist:[1,38,40,19,23,7,35,65,58,67,8,28,18],address_count:[61,33,34],ddl_compil:27,catchabl:23,relax:23,"0x101521fd0":58,role:[58,38,21,28,30,75,72,36],presum:7,roll:[38,40,23,7,35,8],runtim:[58,40,35,54,17,18],intend:[8,9,23,26,63,37,21,40,19,42,7,35,46,49,55,58,34,66,28,25,72,73,74,76],anon_1_nam:[37,35],asterisk:[61,35],intens:[38,36],intent:[58,19,35,26,66,67,72,76],counter_plus_twelv:10,interval_start:64,geometr:16,time:[1,38,39,40,19,23,7,35,65,8,67],push:[1,38,39,3,23,35,46,49,28,19],osx:19,now:[1,38,39,40,19,23,7,44,35],chain:[38,39,40,19,23,35,65],engineer_id:[72,75],decid:[21,28,40,19,61],hold:[21,46,40,3,23,7,26,27,66,49,59,25,9,61,31,55],decim:[40,19,23,7,5,35,66,67,59,25,9,11,73],decis:[1,38,60,19,23,61,46,48,49,67,58],larg:[1,38,39,40,23,7,26,67,28,25,9,71,46,13,18],multivalu:49,child1:21,child2:21,synonymproperti:[34,31],usercontact:[29,57],somehistoryclass:42,new_sess:49,exact:[58,38,40,19,23,21,66,67,28,51,61,30,14,55,25],bar_id:[35,46],tear:[61,49],unsupport:[35,30,40,7],unaccount:[60,75],prevent:[58,38,40,3,23,4,7,26,27,66,49,28,21,73,14,76,34,35,57,19],sign:[40,19,67,7],"_decl_class_registri":19,user_orders_t:49,lazili:[21,26,36,49],currenc:[64,7],staticpool:[14,73,40,7],myenum:[5,11],current:[1,38,39,40,19,23,7,35],eagerload:[38,39,40,19,23,7,8],boost:[69,70],file_path:73,nonbinari:5,autogener:19,modif:[37,61,19,42,43,35,23,49,28,50,75,74,34,36],address:[0,2,4,10,23,31,33,34,36,37,21,19,42,35,46,49,55,58,61,69,65,8,67,72],along:[1,3,4,5,21,12,23,26,33,34,36,37,38,39,40,19,42,7,35,49,51,55,58,61,69,64,65,8,67,28,25,72,74,75,76],create_connect:28,queue:[1,68],reclaim:59,ourselv:21,instat:19,is_inst:31,test_sqlite3:46,msset:5,prefer:[38,39,40,23,4,7,8,49,25,70,21,55,34,76],my_string_1:30,my_string_2:30,fake:38,instal:[1,40,19,7,35,8,67],compile_binary_sqlit:25,source_st:31,value3:51,value2:51,value1:[51,16],scope:[1,58,39,19,23,62,35,26,65,67,68,50,69,73,14,28,34,55,57],tightli:38,no_arg:30,committedst:1,pgbit:7,whatsnewin04:23,peopl:[38,46,72,23],claus:[1,38,39,40,19,23,7,35],enhanc:[38,39,40,19,23,7,35,8],erecip:67,easiest:[33,34,49],behalf:[26,31,49],descriptor:[40,72,23,7,62,35,64,8,28,69,57,31,12,13,32,16,36],whatev:[61,19,25,5,45,46,48,49,8,51,10,69,67,33,34,35,76],"_lastrowid":27,problemat:[34,19,8],encapsul:37,has_schema:19,is_instru:49,recycl:[1,35,19,48],immutabledict:27,mapperopt:[38,42,23,7,34,36],dropsequ:[56,10,40],apply_min:[8,23],remap:7,passive_cascad:23,sysop:66,spot:[38,40],mapperev:[37,19,28,72,32,34,57],spow:56,date:[1,38,39,40,19,23,7,35,65,8,67,28],suck:40,data:[0,2,4,14,15,18,26,27,33,39,40,19,23,7,35,47,54,50,55,56,58,8,67,28,74],stress:[35,26],stdin:67,int64:35,callabl:[40,19,23,7,35,67],odbc_autotransl:23,ordinarili:[37,59],anon_1_users_nam:61,smalldatetim:[9,7],detect_typ:73,my_funct:38,mappedcollect:[35,3,23,19],overhaul:[1,38,39,40,23,7,35,67],node_data:21,torn:49,tort:24,palpabl:65,mapper:[1,38,39,40,19,23,7,35,65,8,67],exot:[46,40,23,43],nation:[5,38,11],modestli:67,mycustomexcept:17,homegrown:7,therebi:[38,35,40,19,23],message_1:25,"7b4":[19,28],append_without_ev:49,"7b3":37,returntypefromarg:41,didn:[37,38,40,19,23,7,5,35,46,8,33],revert:[61,19,23,65,35,46,27,49],separ:[0,1,2,21,5,9,70,42,26,27,30,33,34,36,37,38,39,40,23,7,35,48,49,55,58,61,69,64,8,67,28,72,75],isolation_:69,resultset:[40,23],compil:[1,38,39,40,19,23,7,35,65,67,28,18],inhert:[3,23],dramat:[61,40,19,69,35,67,9],current_tim:41,aproxi:7,joinedload:[35,40,19],expedi:[26,49],sqldescribeparam:9,association_proxi:[12,40,72,31],durabl:69,append_result:[38,32,57,23],client_encod:[51,19],unserializ:7,envelop:19,latin1_german2_ci:38,passive_off:[31,49],columncollectionconstraint:[51,2],"byte":[39,40,19,25,5,35,66,51,9,30],unpredict:40,reusabl:8,setdefault:3,modest:40,recov:[14,27,8],punt:19,neglect:7,oper:[1,38,39,40,19,23,7,35],spoken:42,onc:[1,38,39,40,19,23,7,65,35,26,27,48,54,68,50,67,55,58,14,56,17],memoized_instancemethod:40,supports_native_boolean:27,reopen:40,atabl:72,submit:[19,7],symmetri:[35,26],open:[1,38,40,19,23,35],posts_id:61,breviti:42,connam:56,teardown:[29,19,49],bite:38,some_db_funct:28,citi:[21,34,42],paramt:69,conveni:[37,61,23,16,35,26,8,49,68,30,72,12,14,28,34,55],uka:12,someengin:[0,72],nakagami:[5,35],setattr:[12,40,3,23,19],uk1:58,prefix_insert:74,sessiontran:39,sai:[38,39,40,23,7,26,65,67,28,33],nicer:19,not_:[33,30,31,7],argument:[1,38,39,40,19,23,7,35],grandparenttransform:64,sap:71,sat:[1,38,40,19,23,7],"__builtin__":[37,3,43,72,31,16],note:[0,2,3,4,5,8,9,10,11,14,28,17,19,20,21,26,27,30,31,32,33,34,37,38,40,41,23,7,35,48,49,50,51,55,56,57,58,61,60,69,16,64,65,66,67,68,25,70,71,72,73,74,75,76],"_detect_xxx":40,take:[1,3,4,5,7,10,11,12,14,16,21,26,27,30,32,33,63,36,37,38,39,40,19,23,43,35,46,48,49,50,51,55,56,57,58,61,69,34,64,65,8,67,28,25,72,73,76],noth:[37,38,39,2,19,69,26,48,49,25,46,67,72,73,40,36],mycomparatortyp:72,"0x2eaaf0":3,buffer:[37,38,39,40,41,35,26,49,51],compress:19,do_savepoint:27,"_type_map":19,some_object:[37,49],asdecim:[19,23,25,5,66,51,11],contained_bi:51,prefilteredqueri:[46,7],allevi:[58,59,35,69],pythonhost:59,someschema:23,drive:[58,73],identifierpreprar:23,fk_node_element_id:2,new_from_blammo:12,sum_1:64,xml:[58,18],slow:[38,39,23],mydata:0,transact:[1,38,39,40,19,23,7,35],activ:[1,21,39,2,19,23,7,35,48,49,28,55,75,58,40,14,33,34,32,57],pysybas:22,martinfowl:69,jtd:9,requir:[1,38,40,19,23,7,35],item_nam:2,twelv:34,though:[0,1,2,8,10,12,14,26,27,33,34,37,38,39,40,19,23,7,35,49,58,61,69,66,28,25,72,75],autload:23,where:[0,2,4,5,8,9,10,14,15,17,19,26,27,30,33,1,38,39,40,41,23,7,35,48,50,51,55,56,58,59,65,66,67,28,25,73,74,76],get_table_opt:0,"_cleanup":28,folder_1:58,log_tabl:26,streamlin:[40,3,23,67,28,19],assumpt:[38,40,19,61,35,26,49,51],grandpar:[21,64],screen:7,uncondition:[58,38,39,40,19,23,21,16,35,26,66,49,64,25,9,67,31,14,56,57,76],return_conn:28,refman:[5,46],cycl:[1,38,40,19,7,35,65,49,28,21,17],somechild:[21,23],mani:[1,38,39,40,19,23,7,35,65,8],after_drop:50,save_obj:[1,38,23],link_to_nam:[2,7],photo2:34,photo3:34,photo1:34,unitext:22,needlessli:[19,7],its_heavi:12,extension_typ:[12,34,31],gist:51,thousand:28,resolut:[58,2,19,23,35,49,72,55,63],fluenci:21,"_base_alia":37,eeve:35,"0x1298f50":49,former:[37,19,34,35,8,28,9,16],mcneli:19,get_property_by_column:[40,35,34],masseagerload:23,engineer_typ:37,after_delet:[37,32,57],trelli:8,canon:[58,21,49],blah:[61,9,65],sayap:19,"_hide_from":38,myapp:[26,72],ascii:[58,38,22,19,25,5,35,66,51,9,73,40],fdel:[64,19],"__init__":[0,2,5,9,10,11,14,26,27,30,39,19,23,7,35,51,55,56,65,66,67,28,25,74,76],filter_bi:[37,38,40,19,23,61,64,65,58,49,36],binari:[38,39,40,19,23,4,5,35,7,66,67,51,9,30,11,55,34,25,76],inconsist:[58,38,40,19,5,48,67,28],selectresultsext:[38,23],clause_express:40,utf8:[58,39,25,5,46,66,51,9],expirerelationshiponfkchang:46,uowtask:1,composedsqlengin:[1,38],extern:[38,40,19,23,7,35],applcat:49,clobber:39,idx_nam:28,dereferenc:[40,19,23,7,35,26,49],rest:[37,1,64,40,38,26,58,49,61,33,34],linestr:25,passphras:25,bfile:[66,23,7],strongidentitymap:7,linker:[35,3],littl:[38,40,3,23,7,46,72,67,28,25,61,21,33,19],instrument:[1,19,23,7,35,8],exercis:[67,7],begin_nest:[35,40,8,23],rshift:35,around:[3,4,5,14,26,30,33,1,38,40,19,23,7,35,46,49,55,61,69,64,8,67,28,73,76],libc:73,dark:[19,67],iodbc:9,world:[19,49],create_inst:[38,32,8,57],make_url:[35,19,48],sessionextens:[19,23,7,8,49,28,32],some_funct:64,integ:[1,38,39,40,19,23,7,35,65,67],primaryjoin:[58,38,40,19,23,7,35,8,67,28],inter:[19,5,51,72,34,36],manag:[26,3,21,70,14,28,18,23,7,30,31,33,63,38,40,19,42,43,35,50,57,58,60,62,34,65,68,72,75],supplementari:21,inherit_schema:[25,35],myproxi:68,kamara:35,dilbert:[37,58],sql_ascii:51,goosei:7,assert_unicod:[35,40,67,23,7],first_row:7,pickler:[25,35,7],assertionerror:[34,8,7],noon:1,definit:[0,1,2,19,23,38,74,65,7,26,27,58,54,28,67,40,55,56,35,18],evolv:35,exit:19,ddl:[40,19,23,7,35,8,67],notabl:[35,49,11,19],refer:[1,38,39,40,19,23,7,35,8,67],schaefer:35,power:[69,7],standpoint:69,"__name__":[1,38,46,72],acc:64,use_ansi:[39,33,40,66],stone:23,referred_column:[0,27],addconstraint:[40,55,2,67,56],user_version_id:34,neighbor:42,act:[0,37,39,40,3,23,21,69,65,35,27,49,28,25,31,12,55,33,75,19],convert_unicod:[38,39,40,23,7,27,66,48,25,9],r6690:67,"0x101701150":58,sessionmak:[40,19,42,7,62,65,35,46,23,8,67,72,32,15,57],do_commit_twophas:27,availbl:[40,7],hex:[40,23,48,28,25,34],owner_st:3,hey_use_this_instead_for_iter:3,sid:1,bundl:[28,10,7],adjacency_list:42,"_generate_backref":[40,67],congratul:14,behaivor:38,pull:[38,39,40,19,42,7,35,10,72,14,75],dirti:[38,39,40,19,23,7,35,65,67,28],myint:25,"_dispatch":[14,26],gone:[1,38,40,7,26,67,14,74],uid:[9,46,56],creat:[1,38,39,40,19,23,7,35],certain:[4,5,9,14,26,30,33,1,38,40,19,23,7,35,51,55,56,58,66,28,25,76],deferenc:[35,65],"_autoattach":2,getutcd:74,collid:[35,7],googl:[35,19,8],collin:35,connectionev:[50,35,73,19,68],join_bi:8,collis:[38,35,39,23],writabl:[12,23],freestand:[58,73],version_id_col:[38,60,40,23,72,34],"_sa_adapt":3,genuin:34,requires_name_norm:27,"_has_ev":26,vocabulari:33,mysess:49,tricki:[64,72,7],mimic:[37,12,30,23],mass:[37,1,23],nelson:35,legacysess:39,pks_by_tabl:23,illustr:[2,21,10,12,14,23,26,33,34,37,38,39,40,19,42,7,46,48,49,56,58,61,16,64,8,68,25,70,72,75,76],improperli:[38,35],code:[4,5,14,18,26,30,1,38,39,40,19,23,7,35,46,50,55,58,69,65,67,28,25,76],jason:[38,43],attr_:34,introduc:[38,70,12,13,28,20,23,31,33,34,36,37,21,40,19,42,7,35,49,59,56,58,61,62,16,68],has_window_func:[40,23],col_id_seq:40,checkin:[50,14,48,67,68],pjoin_typ:75,candid:[55,46,76],element_typ:17,sc1:42,strang:[40,19,49],condition:[58,40,67,72,55,56],polymorphic_iter:34,helloworld:42,nulltyp:[25,30,40,23],adjust:[1,38,40,19,23,7,35,64,65,28,59,31,12],small:[1,38,39,40,58,23,7,5,35,66,67,28,51,9,25,75,14,36],enterpris:49,sync:[21,38,23,49,43],past:[21,40,23],uuid4:34,uuid3:30,pass:[1,38,39,40,19,23,7,35,65,8,67],useexist:[19,23],trick:[39,40,3,65,67,33],deleg:[51,14,3],richard:[40,19],sqlite_autoincr:[40,19,73],section:[2,3,5,6,8,9,10,11,14,28,19,20,22,26,27,31,32,33,34,36,37,21,39,40,41,7,35,47,48,49,50,51,52,55,56,57,58,59,61,69,66,67,68,25,70,71,72,73,74,75],delet:[1,38,39,40,19,23,7,35,8],primary_kei:[0,26,2,3,21,5,9,10,12,23,7,30,33,34,37,38,40,42,43,46,49,51,55,56,58,61,69,16,64,65,66,67,28,25,72,73,74,75,76],succinct:19,method:[1,38,39,40,19,23,7,35],contrast:[20,61,35,54,70,55,33,76],hasn:[0,58,19,61,35,46],full:[0,2,3,4,8,21,18,23,26,27,30,33,34,36,37,38,39,40,19,42,7,35,46,47,49,51,55,56,58,69,66,67,28,25,72,75,76],hash:[38,40,19,23,7,5,65,67,51,30],postgres_return:40,unmodifi:[31,49,7],inher:[35,73],palia:58,ineffici:[58,8],utc_timestamp:[10,7],prior:[58,40,3,7,35,46,48,28,50,51,72,33,36,76],pop_sess:39,pick:[38,40,19,23,7,5,11,12,55],action:[61,40,24,23,7,49,50,51,14,55],tkyte:66,via:[1,38,39,40,19,23,7,35,65,8,67],depart:55,financial_info:55,company_id:75,ansi_bind_rul:27,decrement:[1,46,26,19],coercion:[40,19,23],select:[1,38,39,40,19,23,7,35],stdout:48,database_system:69,regional_sal:76,more:[1,38,39,40,19,23,7,35],reachabl:38,"_requires_quot":23,compani:[58,75,23,31],cach:[38,40,19,23,7,35,65],delattr:19,endpoint:[12,74,64,19,42],ix_mytable_col2:2,before_cr:[50,56],ix_mytable_col1:2,sldatetim:7,midstream:23,assignmapp:[1,38,23],"__mapper_args__":[58,40,19,23,7,35,46,72,34,75],eras:34,prompt:[61,9,33,70],scan:[58,7,35,46,8,28],challeng:[75,49],registr:[58,35,40,19,18],accept:[38,40,19,23,7,35],huge:[58,67],interconnect:7,freshli:3,fruition:42,columnkei:2,classproperti:[40,19,72],simpl:[0,2,3,12,23,25,30,31,33,16,36,40,19,42,7,35,46,57,58,62,64,65,66,67,28,69,72,75,76],conditit:21,updatebas:[69,74,19,4],referenc:[1,38,40,19,23,7,35],bind_express:[58,25],unixodbc:9,variant:[37,21,19,42,10,64,25,30,11,55,33,16],as_mut:[51,16],no_backslash_escap:40,my_data:[25,16],circumst:[1,40,19,23,35,73],visit_cast:5,node_parent_id:[21,64],django:42,non_ad:49,ordering_list:43,trade:[40,43],scott:[58,46,40,19,7,5,35,26,48,49,50,51,25,67,72,14,55],cextens:[70,19,67],all_:39,"__names__":7,nifti:35,filip:19,is_clause_el:[55,30,31],html:[69,19,23,7,5,46,65,66,67,8,59,51,25,73,28,76],bypass:[37,38,40,3,58,21,35,26,48,61,14,57,19],update_execution_opt:[26,40],parent_node_id:2,tri:[1,21,19,61,35,49],amelior:73,address_subq:37,on_load:58,achiev:[0,37,46,40,3,7,56,26,72,49,25,61,75,21,73,12,14,33,34,55,36],val:[51,55,8],found:[3,5,23,30,31,33,34,37,38,40,19,42,49,51,55,56,58,61,60,66,25,73,76],procedur:[40,19,23,7,26,49,25,30,67,2,74,57],realli:[58,38,40,19,7,8,28,14,33],msinteg:7,reduct:[35,28,40,7],reconstitut:[34,7],existing_object:49,is_properti:31,right_node_id:21,sort_tabl:[0,35,7],occurr:[61,43,31,55,16,75,76],qualifi:[37,21,39,2,19,23,69,35,26,27,72,40,55,34,75,76],postgr:[1,38,39,40,23,7,46,51],proxi:[1,38,39,40,19,23,7,35,65,8,67],value_attr:40,join_to_left:37,quicki:62,driven:[37,40,46,8,49,28,72],major:[1,38,39,40,19,23,35],lastnam:[65,34],number:[4,5,7,8,10,11,12,14,28,21,26,27,29,30,32,33,34,36,1,38,39,40,19,23,43,35,46,48,49,50,51,55,56,57,58,61,60,69,65,66,67,68,25,73,74,75],"_return_conn":28,load_dialect_impl:[25,40,7],compile_varchar:74,default_charset:5,guess:[58,61,40,19,23,7,46,67,28,9],someabstractbas:72,guest:61,vararg:23,illeg:[58,40,19],jek:12,propertyload:[1,38,39,31,7],odbc_opt:23,left_id:21,consult:[58,40,3,35,49,28,25,9,14,63,19],reus:[37,38,40,21,9,61,14],prefetchingresultproxi:38,reinstat:19,arrang:[21,35,19,49],accomod:[38,40],comput:[21,35,72,7],beforehand:[51,39,10,23,49],packag:[1,38,39,40,19,23,7,35,8,67,28],requisit:37,sell:24,myawesomelist:3,joinedload_al:40,equival:[0,2,3,4,5,10,21,12,27,30,31,33,34,36,37,38,39,40,19,23,7,35,49,55,58,61,16,64,8,28,25,74,76],mapepd:31,self:[1,38,39,40,19,23,7,35],also:[0,1,2,3,4,5,7,8,9,10,11,12,13,14,28,19,23,21,26,27,30,31,32,33,34,36,37,38,39,40,41,42,43,35,46,48,49,50,51,52,55,56,57,58,59,60,61,69,16,64,65,66,67,68,25,70,71,72,73,74,75,76],thenumber12:61,analogu:[25,32,35,68],loggabl:38,plai:[58,21,23,5,46,25,30,33],local_session_cach:42,plan:[38,40,19,35,65,66,67,8,14,32],msntext:7,max_identifier_length:[27,48,23,7],exc:[40,19,7,35,8,28],as_declar:[35,72],listens_for:[58,19,26,50,29,73,14,34,57],cover:[61,23,7,65,48,8,70,31,28,33],ext:[3,21,5,12,15,23,7,34,63,35,38,40,19,42,43,45,46,51,56,58,61,16,64,8,67,28,25,72,74],supertyp:[40,19],exp:74,jython2:40,microsoft:[74,18,4],use_alt:[38,40,21,35,67,2,17],affix:23,becau:10,session:[1,38,39,40,19,23,7,35],affin:[30,19,23,7],impact:[37,1,75,46,40,19,23,7,35,26,58,49,8,25,61,67,55,28,36],do_autocommit:27,indexerror:[40,43],visit_alter_column:[74,67],solut:[40,35,46,66,67,49],symptom:[38,40,23],factor:[1,46,39],use_schema:27,oraclenvarchar:7,mainten:[58,55,28,49,7],liabl:24,ambiti:[21,30,8,42],table_iter:[40,67,7],pwhash:65,ansifunct:41,has_inherited_t:[35,72],mylisten:68,rfc:[1,48],cri:23,bytea:[51,40,67,25],manytoon:7,set:[1,38,39,40,19,23,7,35,26,65,58,67,8,74,28,18],use_mapper_path:37,seq:[38,10,41],creator:[1,40,35,46,48,68,50,12,14],startup:[49,7],after_flush_postexec:[32,19,57,8],"_branch":26,see:[0,1,2,3,4,5,7,8,10,11,12,14,16,17,18,23,21,26,27,30,31,33,34,35,36,37,38,40,19,42,43,45,46,47,48,49,50,51,55,56,57,58,59,61,69,63,65,66,67,28,25,70,72,73,74,75,76],sec:46,clear_mapp:[34,23],sel:4,outward:[35,23,28],analog:[37,33],ohloh:7,mutex:[1,38,40,19,7,73],statement_compil:27,compliment:25,signatur:[58,38,40,19,23,7,68,25,29,30,31],identity_insert:[38,9],javascript:7,disallow:[5,1,40,7],querycontext:[38,32,57,31],frozenset:[23,19,31,76],ctest:30,bodi:[61,64,34,72,31],is_liter:[38,30],last:[39,40,19,23,7,65,8,67],noresultfound:[37,61,60],sqlite_omit_foreign_kei:73,author_id:[34,72],whole:[46,42,7,35,26,65,49,64,9,14,33,34],load:[1,38,39,40,19,23,7,35],markdown:[38,39],schedul:49,apply_avg:[8,23],"__rsub__":30,lelong:38,pycon:58,has_al:51,implicit:[38,40,19,23,7,35,8,67,28,74,18],addressus:34,unambigu:[38,66,21,7],create_d:10,nonexist:[40,17,19],fire:[1,38,39,40,19,23,7,35,65,67,28],great:[58,38,3,7,35,46,49,28,19],fund:37,someotherobject:57,"__lshift__":30,autoinc:[35,73],straight:[37,1,40,19,23,4,5,35,61,9,30,38,14,55,76],erron:[1,61,40,19,23,7,35],histor:[58,35,42,73],oldvalu:[29,32,57],durat:[40,19,23,7],formatt:30,error:[1,38,40,19,23,7,35,65,67,28],flusherror:[60,8,23,49,7],robin:[45,23],real:[0,5,9,10,11,14,26,33,37,40,19,23,7,35,46,49,51,52,56,57,67,25],pgfoundri:51,owen:35,someentri:21,autoload_replac:[55,35,19],pref_nam:[55,2],mockconnect:19,decor:[38,39,40,19,23,7,35,46,65,8,29,14,74],folder_nam:58,useag:38,obsolet:[51,32,57,49,28],seek:[1,39],shorten:21,shorter:28,funni:38,decod:[39,40,19,25,35,66,48,51,9],use_upd:39,dozen:[38,55,42,28],"_get_address":21,rev_id:2,"_do_get":28,alert:[51,33,16,28],"__or__":30,my_mutating_procedur:26,stack:[7,35,46,49,25,57],recent:[0,37,46,40,61,23,38,5,35,7,66,49,60,67,73,55],beahvior:46,noninfring:24,"_warn":49,kelli:[35,19],elem:49,pickl:[1,38,40,19,42,7,35,23,58,67,8,25,30,28,15,57],person:[58,72,24,42,8],user_alias_password:61,picki:[1,33],root_connect:27,mysql:[1,39],mapped_t:[46,34,8,49],skip_xmin:56,r6711:40,unit_id:37,"_parent":[16,19],eager:[1,38,39,40,19,23,7,35],input:[38,7,35,66,8,25,9,14,74],email_address_1:[37,33],sessioncontext:[1,23],posts_headlin:61,mapperproperti:[1,58,40,19,23,7,35,46,12,13,32],format:[0,4,8,10,26,27,30,33,34,36,37,21,39,40,19,23,7,35,48,50,56,58,61,65,66,67,28,25,73,76],unexpir:23,intuit:[58,19,46,28,52,72,75],abound:19,"__gt__":[30,34,31],falcon:5,basicconfig:48,colexpr:25,bulkpric:30,encount:[37,21,60,19,7,49,25,72],myobject:[39,65,23],sampl:[35,33,42,28],somecollect:19,order_t:49,word_insensit:64,sprint:58,stament:19,firebird_return:40,nodes_2_id:65,recognit:63,machin:[75,23],object:[1,38,39,40,19,23,7,35],server_side_cursor:[51,38,40,23,7],identity_map:[35,34,31,49,7],deferredcolumnproperti:34,prerequisit:[33,34,73],widget_id:21,coexist:8,materi:21,regular:[3,21,10,12,15,26,30,33,63,38,40,19,23,7,35,46,48,49,55,58,61,69,34,65,8,67,28,72,73,76],stuart:35,polymorphic_fetch:[67,40,8,23,7],reset_joinpoint:[37,38,23,21],repair:[35,40,19,23,7],letter:[33,3,48],primarili:[37,61,2,19,23,7,5,49,25,67,55,33,76],manager_of_class:57,geomfromtext:42,contributor:[58,19,24],anon_1_users_id:[61,36],before_xxxx:38,occupi:3,span:[8,67,68,50,75,49,57],spam:28,textual:[38,23,4,35,26,27,28,25,30,33,76],custom:[1,38,39,40,19,23,7,35],suit:[38,19,42,7,35,26,23,66,49,25,61,67,34,71],optionaldict:[76,55,30,4],xpath:[37,42],utf8_bin:30,enable_eagerload:[37,7],recipe_id:8,link:[1,3,4,70,12,18,31,33,34,36,37,38,39,40,19,23,7,35,46,48,59,55,56,62,67,28,75,76],atom:[5,69,19,49],atob:36,hasstartd:72,line:[21,64,19,61,26,65,48,49,8,67,28,74,33,75],after_transaction_cr:[35,57],union_al:[37,23,7,74,33,76],expess:7,secondaryjoin:[38,40,8,23,7],execute_at:[74,56,67,28],someus:61,lazyload:[38,39,40,19,42,23,8,67,36],doc:[21,5,23,31,34,1,38,39,40,19,42,7,35,46,49,51,55,65,8,67,28,25,73,75,76],impl:[40,19,7,35,67,25,30,31,16,17],backrefer:[39,7],parser:[40,23],"char":[38,39,22,19,7,5,35,66,51,9,25,11,73,40],"02migrat":1,invalid:[37,1,61,39,40,19,23,38,7,26,27,58,67,68,50,46,60,35,14,28,17],pg_insp:67,edwardo:61,active_histori:[21,40,49,31,32,34,57],pguuid:7,"0x12bf830":12,getcommit:40,ago:28,algorithm:[37,1,39,40,19,23,30,73],inaccuraci:[40,19,7],discrimin:[37,42,23,7,69,35,72,34,75],fresh:[50,65,75,68],pg_schema:1,hello:[23,49],users_firstnam:65,usagerecip:[66,40,42,7,8,28],extrapol:[0,23,42],partial:[0,1,2,19,23,38,7,58,67,40,55],"__mapper__":[61,34,72],define_constraint_remote_t:27,columnnam:[37,40,35,30,2,55],unmappedcolumnerror:[8,60,40,19,7],objectdeletederror:[37,60,34,19],procur:[46,19,26,27,48,14],asktom:66,my_special_funct:57,send:[38,40,19,23,35,65,67],cascade_backref:[21,40,31,49],stream_result:[37,40,4,26,51,76],sens:[25,33,23,36,49],sent:[4,5,9,14,26,27,30,32,33,34,38,40,19,23,7,35,49,50,55,57,58,66,25,72,76],gjffdd:61,"_instrument_class":3,engineer_mapp:75,real_as_float:23,node_1_id:21,recip:[35,40,19,67,8],magic:[8,23],fewer:[58,40,19,23,66,67,28,25,9,75,33,36,76],"try":[1,38,19,23,35,26,58,67,28,14,33],somecol:[38,40,67,25,30,2],race:[38,7],tablename_colnam:39,impli:[38,24,23,21,69,26,49,25,61,30,73],"__metaclass__":72,use_t:27,natur:[37,58,40,72,23,7,35,46,66,25,61,31,34,75],mysqldb:[40,19,5,46,48,28,69,11],index:[1,38,39,40,19,23,7,35,67],consistency_:69,after_soft_rollback:[57,19,49],led:[8,28],len:[74,46,7],defaultdialect:[26,27,8],let:[58,38,46,40,61,21,35,26,65,8,49,10,72,14,33,36],get_view_nam:[0,27,19],unmodified_intersect:31,some_sequ:10,int_fals:74,technolog:66,cte:[37,35,19,76],grandchild:37,defeat:[37,40,49],opinion:[35,33,49],"_make_proxi":[40,19],zip:[21,34,31],commun:[20,26,66,49,51,71],run_cal:[26,40],doubl:[38,39,40,19,23,7,5,35,11,73],upgrad:[1,58,40,19,23,35,8,67,51],next:[1,10,14,19,26,27,32,33,63,37,21,39,40,41,23,7,35,46,48,49,61,34,65,75],encrpyt:25,pickled_data:28,echo_uow:[40,48,67,7],commut:38,comparison:[4,30,31,38,39,40,19,23,7,35,46,51,55,58,62,64,65,8,67,25,74,76],initialize_instance_dict:63,idiosyncrat:64,merged_object:49,polymorphic_ident:[40,19,7,72,34,75],weaker:28,process:[1,38,40,19,23,7,35,65,58,67,8,74,28],lock:[58,38,14,26,76],slim:35,high:[37,20,46,40,4,26,49,68,50,25,61,67,73,33,18],somethingmixin:72,delai:[38,35,19,73],type_b_str:72,some_mapped_class:34,overridden:[0,1,39,38,19,23,4,5,16,7,58,51,30,69,31,40,55,34,25,76],singular:12,await:[39,49],requst:7,class_:[58,42,34,49,31,32,63,74,57],stringattribut:72,schematyp:[50,25,35,40],decisionmak:35,essenti:[21,40,3,10,16,35,64,48,49,8,51,30,72,12,56,76],loglevel:40,seriou:5,counter:[1,46,40,19,7,62,35,26,48,10,18],expired_attribut:31,element:[1,38,39,40,19,23,7,35,8],issu:[0,2,6,10,14,17,26,27,33,1,38,39,40,19,23,7,35,48,50,52,55,56,58,59,65,8,67,28,25,74,76],addresses_2_email_address:61,allow:[1,38,39,40,19,23,7,35,65,8,67],retval:[50,29,57],jigger:33,tribe:33,move:[1,38,39,40,19,23,7,35],comma:[37,21,40,19,61,69,35,66,67,9,30,55,33],ambiguousforeignkeyserror:[21,17],perfect:40,instance_kei:[40,67],interlink:19,strategizedproperti:31,infrastructur:49,therefor:[37,58,19,43,5,46,66,67,48,52,61,57,72,32,33,36],my_on_connect:29,recept:56,python:[1,38,39,40,19,23,7,35,8,67,28],overal:[1,21,75,39,40,19,23,7,10,35,26,58,49,51,30,67,32,34,57],innermost:[21,35,57,23],strateg:27,close_with_result:[26,27,49],"__delitem__":[16,3],multiprocess:[26,19],anyth:[1,38,40,3,23,7,65,27,49,25,61,21,14,33,34,74,19],instrumentedattribut:[58,3,23,7,69,31],truth:67,create_typ:[51,19],beneath:38,subset:[40,19,42,4,62,65,35,23,28,51,30,72,55,33,57,76],some_view:0,listlik:3,"static":[38,40,3,10,28,51,30,31,76],variabl:[38,40,72,21,7,5,35,26,27,66,49,8,51,9,61,11,55,33,34,25],matrix:[58,51],bag:3,m2m:[38,7],m2o:[40,19,23],auto_close_cursor:23,shut:69,high_prior:[37,5,28],unboundexecutionerror:[40,17,49],always_refresh:[39,34,72],proxied_associ:[38,42],b_tabl:75,could:[26,2,21,10,7,31,16,1,38,40,19,23,43,35,46,49,51,56,58,34,65,8,28,25,72,76],put:[1,38,40,19,23,43,46,33],pg_utcnow:74,david:19,scari:40,length:[1,38,39,40,19,23,7,35,8,67],enforc:[37,21,40,19,27,58,28,25,33,34],outsid:[37,38,60,40,3,58,7,16,35,65,48,49,28,25,9,61,55,33,34,19],timezon:[1,38,40,5,35,25,9,11,74],respond:[56,26,40],select_entity_from:[37,35],expire_on_commit:[46,34,19,49,7],blown:[39,7],scene:[1,61,26,46,49,67,34],some_id:28,owner:[40,23,7,35,64,67,2,55],philosophi:[21,36],start:[4,5,9,10,14,26,27,33,1,38,40,19,23,7,35,46,48,51,52,55,69,65,28,73],licens:24,system:[1,38,39,40,19,23,7,35],termin:[58,19],zaric:39,storage_format:[58,35,40,67,73],gotcha:35,activemapp:[1,38,39,8,23],exclus:[37,61,40,3,8,67,72,73,33],folder_1_parent_id:58,accompani:[56,23],haven:33,get_histori:[21,40,19,7,49,67,31,34],unmap:[35,40],pysqlite1:[38,39],shipping_address_id:21,compoundselect:76,stricter:[40,23],imho:19,"06migrat":40,terribl:46,st_geomfromtext:25,clearli:[38,39,65,28],mydataclass:16,messeng:27,accuraci:[25,40,66],courtesi:[1,38,39,40,19,35],"0x102a77e90":30,myrelationshipcompar:31,oracl:[1,39],valuesbas:4,placement:[21,16,40,19],latin1:[5,25,48],polymorphic_map:34,face:[70,19,8],type_conv:[59,40],server_onupd:[40,19,35,10,55,34],fact:[37,21,46,40,19,42,7,5,16,35,26,23,8,49,68,61,31,72,33,34],dbm:42,dbo:[0,35,40,55],dba:33,bring:[69,38,23],rough:[35,7],trivial:3,redirect:[30,56],"__table_opts__":38,should:[1,38,39,40,19,23,7,35,65,8,67],jan:[38,40,19,23,7,25],suppos:[37,1,64,19,21,46,49,28,61,75,12,14,33,56,36],hope:[58,38,19,23,69,35,67,42],"_params_from_queri":[19,28],meant:[35,28,34,23,7],familiar:[61,35,48,49,30,67,72,33],autom:[20,43,62,46,72,55],monetdb:71,expres:30,"0x1015212d0":58,db1:[26,49],db2:[26,71,49],deactiv:35,lala:[26,65,49],y_2:33,y_1:33,stuff:[38,46,39,34,7],do_return_invalid:28,attributeimpl:32,somemappedclass:[37,35,31,49],btw:23,frame:46,qty:30,temporarili:[40,19,23,7,43,49],dynamic_dict:[23,42],polymorph:[1,38,39,40,19,23,7,35],wire:[51,36],process_result_valu:[25,30,16,67,23],someexpr:38,next_valu:[10,41,19],paglock:4,email:[37,21,61,69,65,72,55,33,34,36],memoized_properti:40,sess2:65,parent_alia:64,dbo_master_table_column:40,checkfirst:[1,38,19,23,25,46,50,51,10,55,56],oracle_x:40,engineer_nam:75,msmediumtext:1,etc:[0,4,5,8,15,27,28,30,33,1,38,39,40,19,23,7,35,48,50,55,56,58,65,66,67,68,25,74,76],chemicum:70,detachedinstanceerror:[60,40],post_instrument_class:31,bidrect:21,anon_1_address_count:61,insuffici:58,immedi:[3,21,61,12,14,19,26,30,34,36,38,40,41,23,43,35,46,49,50,51,56,57,58,59,69,65,67,25,72,73,76],fset:[64,19],bindparamclaus:30,droptabl:[56,40],togeth:[2,4,70,12,23,26,30,31,33,34,35,37,38,40,19,42,7,45,48,55,61,8,72,76],some_sess:49,employees_t:75,strongli:[1,40,23,7,5,65,49,36],apppropri:25,intro:62,purchas:42,orderinglist:[38,19,43],"__reduce__":[30,19],site:[73,19,28],incom:[3,21,8,32,16,37,38,40,19,23,7,35,46,49,56,57,69,34,66,67,28,25,73],surprisingli:28,nullpool:[19,7],mutat:[38,35,19,23,7],rearrang:[1,38,7],referenti:[1,38,40,19,23,7,35],matthew:19,"__selectable__":7,competit:46,spell:75,append_order_bi:76,expans:[56,40,49],upon:[0,4,8,10,14,26,27,33,1,38,39,40,19,23,7,35,48,50,51,55,56,58,59,66,67,28,25,73,74,76],myspecialtyp:17,php:[59,24],expand:[58,46,40,3,4,7,48,67,30,55,33,16,19],off:[1,38,39,40,19,23,7,35,65,8,67],"_pgnumer":40,someotherent:58,exampl:[1,38,39,40,19,23,7,35,65,8,67],command:[38,46,40,19,61,5,26,27,49,51,9,10,73,55,33,70],is_primari:[1,38,31],hybrid_method:[64,34,31],paus:14,flesh:[38,8],changset:39,"__lt__":[55,30,31],glue:[61,3],no_autoflush:[19,49],"ka\u017eukauska":35,wed:[39,40,19,23],weak_identity_map:[38,65,23,49,7],circumv:[5,40,42],account_id:[58,21,46],minimalist:55,dest:[46,7],crud:[27,23],piec:49,five:[14,48],as_tupl:[51,40,25],password:[1,38,39,22,19,61,5,6,35,64,65,48,59,51,9,11,14,33,34,55,36],recurs:[37,39,40,19,42,7,35,23,67,25,34,76],desc:[2,19,35,28,9,30,72,55,33],resid:[45,55],snack_id:58,tinytext:5,bind_mapp:49,listen_for_ev:[42,7],cdecim:[25,66],allow_partial_pk:[40,35,46,34,67],nodealia:21,daylight:74,flush:[1,38,39,40,19,23,7,35],raiseerror:7,guarante:[21,40,61,69,35,26,66,49,28,67,9,25,57,13,14,76,17,36,32],"__setattr__":[30,16],folder_account_id:58,avoid:[38,46,40,41,43,35,7,65,66,49,48,28,33,34,19,3],"__getattr__":[37,25,14,7],execute_sequence_format:27,"_group":38,inner_column:76,pg_constraint:56,empno:28,stage:34,joinpoint:[37,40],pypostgresql:[51,35,40],oldpost:3,theyr:1,mutabletyp:[38,35,28],sub_part:[37,76],three:[9,70,12,14,17,42,26,31,33,34,36,37,38,23,7,35,46,48,49,50,51,52,61,69,65,8,28,72,73,75],mere:[21,55,76],merg:[38,40,19,23,7,35,67],somearg:38,sql2005:9,much:[58,38,39,40,19,42,21,65,35,26,23,72,49,68,75,61,67,11,28,33,36],deliveri:49,ordering_attr:43,do_executemani:27,rollback_return:23,select_whereclaus:8,select_argu:76,search_path:[51,19],"_compare_type_affin":67,gaynor:19,python_typ:[25,19],halt_on:[34,31],count:[1,38,39,40,19,23,7,35,65,8],writeabl:3,meta:[0,2,26,65,25,10,72,55],is_disconnect:[35,27,19],otherwis:[1,3,21,5,8,9,10,11,12,14,17,24,26,27,30,31,32,33,63,36,37,38,39,40,19,23,7,35,46,48,49,55,57,58,61,69,34,66,67,28,25,70,72,73,75,76],problem:[38,46,40,3,7,69,35,26,65,49,67],"int":[1,40,19,7,5,27,66,49,28,25,30,11,73,55],updated_at:30,inh:[35,40,19],ini:23,inf:40,ing:[54,23],keynam:[39,40,23,7,67,30,34],in_:[38,40,19,23,7,35],nonetheless:58,set_committed_valu:[40,49,7],add_listen:68,expr1:35,lookup:[21,40,19,35,72,12,34],varieti:[26,21,5,70,13,23,7,29,30,34,1,38,40,19,42,43,48,49,53,56,57,58,61,25,72],repeat:[1,38,40,19,23,7,5,49,51,9,14,34,36],highest_ord:34,rule:[58,38,39,40,19,23,7,35,8,67,18],message_nam:0,base_mapp:[34,19,23],discriminator_on_associ:[35,42],fbfloat:23,lgpl:38,"const":[29,56],truncate_microsecond:58,deviat:[19,49],somewidget:21,whack:7,uninstanti:72,ddl_listen:56,lowerstr:58,reorgniz:40,"_execution_opt":[74,26],"0x1252490":67,some_index:2,somestr:[38,23],cmd:[74,67],engine_descriptor:8,unmanag:23,pgcidr:23,callcount:[67,28],timetupl:[55,30],set_input_s:27,j25:[61,36],before_delet:[37,32,57],panda:71,"_declarative_constructor":72,user_pref:55,item2:49,item3:49,item1:49,update_cascad:66,nonetyp:[25,35,7],topolog:[1,38,39,40,67,17],told:[21,67,61],internalerror:17,reenabl:7,datetim:[5,8,9,10,11,19,30,33,1,38,39,40,41,23,7,35,59,55,58,66,67,25,74],keymap:26,postalcod:42,flinston:61,mitchel:19,node_2_id:21,total:[1,38,39,40,23,5,35,26,58,67,69,46,75,11,14,55,36],beginin:65,highli:[40,19,23,35,64,8,49,67,72,73,55,34],bookkeep:[46,63,57,28],allow_null_pk:[1,35,40,67],aaron:1,bleed:[23,7],"_functiongener":30,unescap:[40,19],attribute_instru:[35,57],setattr_clean:39,opstr:[55,30],word:[1,38,40,19,23,7,10,35,9,30,55,33],illegal_initial_charact:23,restor:[0,37,40,19,23,38,16,7,26,65,49,28,1,34,35],work:[1,38,39,40,19,23,7,35,65,8],worm:38,coalesc:[74,41,23],unnam:[55,30,2,23,7],indic:[1,39,40,19,23,7,35,8,67],somefil:42,liter:[4,5,9,11,14,26,27,30,33,34,37,38,39,40,19,23,7,35,46,49,51,56,58,62,66,25],"_extract_error_cod":40,ordinari:[21,40,49,61],evaul:[35,19],verifi:[61,33],anon_1:[37,58,23,61,35,66,67,28,51,9,25,75,33,36],recogn:[58,21,40,41,7,35,26,28,32,57,19],hasprefix:[76,4],earlier:[37,61,39,3,49,67,12,33,19],stefano:19,demonstr:[58,35,33,55,28],column_express:[58,25],zzzeek:[58,35,64,28],my_us:37,mechan:[40,19,23,7,35,67,28],order:[0,2,4,10,18,19,26,27,30,1,38,39,40,41,23,7,35,54,55,56,58,65,8,67,28,74,76],its_wood:12,softwar:[66,24],offici:[35,39,40],zen:[62,18],incic:69,demarc:49,flexibl:[37,1,40,19,42,21,65,64,23,58,67,28,29,61,75,34,57],inadvert:[40,19,7,35,73,34],wtihin:34,mydb:[5,14,73],them:[1,3,21,5,8,14,26,30,33,34,36,37,38,39,40,19,23,7,35,49,56,58,61,69,16,65,66,67,28,25,72],thei:[0,1,2,3,4,5,8,9,10,11,14,15,16,17,21,26,27,28,30,33,63,36,37,38,39,40,19,23,7,35,46,49,50,51,52,55,58,61,69,34,64,65,66,67,68,25,70,72,73,74,75,76],test_sqlalchemy_orm:46,safe:[1,38,46,40,23,25,5,26,66,51,9,73,55,33],"break":[1,38,40,19,7,35,8,67,28,21,73],band:7,db_name:35,savepoint_releas:7,objectstor:[1,39,65,23],your_queri:8,appengin:5,connectionfairi:[1,46,27],sajip:28,ommit:23,append_from:76,network:35,xyzload_al:19,posts_user_id:61,daniel:[38,19],mypackag:49,forth:[58,49,28],connection_rec:50,standard:[4,10,14,28,17,15,33,1,38,39,40,19,23,7,35,48,54,55,56,65,67,68,74,76],test_someth:49,post_id:61,"__setitem__":[12,16,3,23],reconfigur:49,created:10,traceback:[14,35,19,67,72],coerce_argu:41,createt:[56,46,40,67],regress:[35,40,19,7],guesswork:[40,3,28],subtl:[38,40,19],construct_new:39,render:[2,4,5,9,10,19,20,27,30,31,33,34,37,21,40,41,23,7,35,46,54,50,51,55,56,58,69,64,66,67,28,25,73,74,75,76],thoughtfulli:38,independ:[38,2,25,69,35,49,51,70,75,14,34,36],all_orm_descriptor:[35,46,34,31],mycompar:30,sadeprecationwarn:[8,17,7],mutablemap:[16,19],user_account_id:69,hexstr:[25,14,48],cartesian:[38,19,7,35,28,33,36],daterang:51,common_par:34,bind2:23,bind1:23,my_view:0,john:[35,26,34],addcontraint:67,target:[38,39,40,19,23,7,35,65,8],provid:[0,2,4,5,8,10,11,13,14,28,19,24,26,27,29,30,33,1,38,40,41,23,7,35,48,54,50,51,55,56,58,66,67,68,25,73,74,76],minut:[58,25,73],bernson:1,is_aliased_class:31,pessimist:54,manner:[21,7,69,35,46,8,49,28,30,67,34,36],"__main__":[58,21,3,46,49,72,12,34],inherit_foreign_kei:[34,23],realm:67,"__new__":[39,32,30,34,57],terrif:35,unhash:[35,23],latter:[38,21,35,8,49,9,30,16,76],thingon:49,yetanotherclass:[58,72],transmit:36,usernam:[21,22,19,61,5,35,26,65,9,48,25,29,14,33],lexic:[33,39,30,19],phase:[38,39,40,19,23,7,35],passthru:38,hanno:[35,19],databasenot:67,bracket:[51,35,19],wildcard:23,udf:23,notion:[9,49,7],opposit:[58,55,15,66,49],freeform:65,buildbot:40,identifi:[2,4,5,9,10,14,19,26,27,29,33,1,38,40,41,23,7,35,48,50,55,67,28],involv:[21,70,14,26,27,34,37,38,40,19,23,7,35,46,49,58,61,60,28,72,74,75],nobodi:[40,19],just:[0,1,3,4,7,21,12,26,27,30,33,34,36,37,38,39,40,19,23,43,35,46,48,49,55,56,57,58,61,69,16,65,8,67,28,25,72,73,74,75,76],latenc:[28,67,23,7],likewis:23,target_id:72,relatedinfo:72,insertfromselect:74,pg8000:[40,19,67,48],legacy_is_orphan:[58,35,34],useraddress:34,sql_fals:74,mylist:3,emb:[58,40,42,23,25,10,74],cleanli:35,emp:30,odbc_connect:9,session_scop:49,auto_incr:[5,38],some_relationship:31,awar:[2,21,5,66,9,11,13,42,30,33,34,1,38,40,23,35,48,49,61,65,8,28,25],lalchemi:65,posts_tabl:3,yet:[0,1,2,3,4,5,14,26,33,34,36,37,38,40,19,23,7,35,46,49,59,51,55,57,58,61,66,25,72,73],drawn:33,awai:[38,39,40,19,23,35,28],accord:[58,38,3,5,48,49,69,11,55,34,19],newnam:33,xrang:14,preprocessor:70,kitchen:61,some_lookup_id:28,param_5:51,param_4:51,howev:[3,4,5,10,21,14,28,23,26,30,32,33,34,36,37,38,40,19,42,7,35,46,49,50,51,55,56,57,58,59,60,61,64,65,66,67,68,25,70,72,73,75],param_1:[23,25,64,66,51,9,30,33],param_3:51,param_2:[51,33,23],prop1:39,py3k:[35,40,19,67],com:[37,38,22,7,5,46,65,66,49,69,9,61,33,34,36],col:[58,38,39,40,19,23,4,35,7,8,28,25,30],msdatetim:23,toni:21,populate_exist:[19,23],trunk:[65,8,67],wider:[58,35,23,7],guid:[1,35],sortkei:15,strong:[23,7,35,65,49,34],convolut:28,dict_gett:63,someothert:[58,8],accept_visitor:38,subscrib:[56,29,34],"__len__":[23,7],"_set_statu":34,objectdereferencederror:60,ident:[0,1,4,9,21,14,17,23,30,31,32,33,34,35,36,37,38,39,40,19,42,7,45,46,49,55,57,58,61,60,69,65,8,67,28,72,75,76],create_cursor:27,advdatamapping_mapper_inheritance_join:65,engine3:49,engine2:49,smartproperti:39,properti:[1,38,39,40,19,23,7,35],sourceforg:[22,5,6,66,59,9,11],publicli:26,execution_ctx_cl:27,sting:7,mutabledict:[51,35,16],conf:51,somekei:23,conn:[14,28,26,27,30,33,38,40,19,35,46,49,50,51,56,58,59,65,68,25,73,76],incorrectli:[38,35,40,19],perform:[0,3,4,7,9,10,21,12,14,28,23,26,30,31,32,33,63,36,37,38,39,40,19,42,43,35,46,48,49,50,51,56,57,59,61,69,34,64,66,67,68,25,73],mymappedcollect:3,amount:[37,38,40,4,46,65,25,36,76],do_execut:[40,27],descend:[38,39,2,23,35,30,33,34,75,76],doctest:[1,61,33,70,36],pool_logging_nam:[40,48],fragil:38,hang:[1,59,67,23,28],hand:[0,21,40,19,23,61,5,66,49,48,51,25,67,11,28,55,33,75,76],rais:[1,2,3,4,5,7,8,9,61,11,13,14,28,17,19,21,26,30,31,32,34,37,38,39,40,41,23,43,35,46,48,49,50,51,55,57,58,59,60,16,66,67,68,25,72,74],kept:[38,40,46,49,25,14,76],undesir:[40,2],scenario:[21,8,9,10,12,17,30,33,34,38,40,19,23,7,35,46,48,49,55,56,58,61,69,16,66,67,28,25,72,73,75],thx:7,thu:[1,38,39,40,19,23,7,5,35,48,49,51,10,67,21,13,34,25],kyle:35,client:[0,19,5,46,27,66,49,51,9,55,36],wherebi:[37,38,40,19,23,7,69,35,58,21,76],thi:[1,38,39,40,19,23,7,35],concurrentmodificationerror:[60,40,8,7],native_odbc_execut:9,account_bal:64,mydriv:9,previous_transact:57,engineers_t:75,photo:34,farther:35,unbuff:26,after_commit:[32,57,35,49],wright:35,aasma:38,spread:2,board:[50,58,8,25],do_rollback_to_savepoint:27,"_constraint":[40,2],mayb:35,monei:[38,9],local_kw:49,scopedsess:[40,19,23,7,65,67],aram:23,noinspectionavail:[13,34,17,49],deprec:[1,38,39,40,19,23,7,35],reassign:7,astext:42,manual:[58,38,21,40,19,23,43,5,7,65,66,49,48,25,11,12,55,33,34,75],percentag:67,flatten:19,primary_mapp:34,overcom:28,query_choos:[45,42],my_tabl:[72,5,34,65,67,51,31,56],peek:33,plu:[40,19,69,35,46,67,9,10,36],someclass:[37,21,64,42,23,35,46,58,49,8,72,32,28,57,19],confer:28,some_composit:31,repositori:33,post:[3,5,10,17,23,26,30,31,32,39,40,19,42,7,35,49,55,57,61,65,8,75],obj:[39,40,23,49,60,30,31,12,15,34],invoice_id:2,fromcach:[40,42],"__mul__":30,pool_timeout:[38,14,48],queuepool:[1,38,40,19,23,35,46,48,68,29,14],subchild1:21,subchild2:21,"float":[38,40,19,23,7,5,66,67,51,9,25,11,73],bound:[1,38,39,40,19,23,7,35],verbiag:40,unique_connect:[14,39],samem:39,opportun:[32,40,57],auto_setinputs:[38,66],accordingli:[59,23],jdbc:[5,51,48,67],wai:[0,2,10,13,14,26,30,33,1,38,39,40,19,23,7,35,48,51,55,58,65,8,67,28,25,73,74,76],frill:2,users_id:[61,65,36],msdatetime2:7,interv:[38,40,23,7,66,25],lowest:[69,30],isupd:27,mycol:[30,39,26],somehow:39,petursson:39,sqlalchemi:[1,38,40,19,23,7,44,35],"true":[1,38,39,40,19,23,7,35,65,8],reset:[37,38,40,19,23,35,48,49,28,50],absens:[58,19,49],maximum:[40,19,5,35,27,67,11,14],has_par:31,absenc:74,emit:[35,40,19,67,7],shardedsess:[45,40,19,23,7],request:[0,38,60,40,19,42,7,5,35,26,23,48,49,55,75,58,11,21,14,17,36],"abstract":[61,69,65,8,25,30,72,12,14,33],myotherclass:[58,3,23,49,72,34],inact:[40,23,49],encrypt:[25,35,64],elementtre:42,refactor:[1,38,39,40,23,7,67],jona:39,entrypoint:[58,40,35,26,49,67,12,76],jone:[61,33],notlik:[35,30,55],test:[5,6,8,9,10,14,22,26,29,30,33,1,38,39,40,19,23,7,35,48,50,51,55,58,59,66,67,28,25,71],hasnamemixin:28,realiti:[58,49],node_1_data:21,configu:7,"__clause_element__":[35,64,34,31,7],flag:[0,1,2,4,5,8,9,10,11,14,17,21,26,27,30,31,32,33,34,36,37,38,39,40,19,23,7,35,46,48,49,50,51,55,56,57,58,59,61,16,65,66,67,28,25,70,73,74,76],tinyint:[5,1,9,39,40],pathnam:51,original_init:31,concept:[58,38,39,2,19,23,61,69,26,49,30,72,55,33,36],broke:[46,39,40],consum:[38,61,23,4,26,49,28,50,30,59,55,33,17,76],interepret:58,unicode_result:40,impltyp:25,supplement:35,visit_varchar:74,subcompon:67,"_switch_shard":26,middl:[40,19,43,35,28,12],zone:[1,74,40,49],flask:49,graph:[38,19,23,7,46,18],rightmost:[40,23],isinst:[40,34,64,67,25,31,16],zhang:38,octob:[58,35,65,8,28],ip_address:[58,21],gui:[40,49],reconcil:[37,39,23,49],"_create_rul":[40,2],gut:[65,23],declarative_bas:[58,21,64,40,3,42,43,16,35,46,23,67,61,72,12,34,57,19],upper:[55,30,42],pickleabl:[40,19,23,7,67,25,15],htt:65,univarchar:22,lemoin:35,"5devel":40,appeas:[1,40],appear:[38,22,19,23,7,5,35,26,65,28,9,61,31,40,33,36,76],pbxt:5,appeal:21,hardwir:40,global_connect:[1,39],gener:[1,39],satisfi:[35,55,28],left_node_id:21,weakref:[14,26,16,19,7],behav:[38,40,19,23,21,66,49,28,25,9,61,67,34],pysqlit:[1,40,23,7,35,48,67,28],preexecute_autoincrement_sequ:27,regardless:[0,21,9,12,26,30,35,37,38,40,19,23,45,46,48,49,55,57,61,66,67,25,73,74,76],extra:[3,70,14,27,32,33,36,1,21,39,40,19,23,7,35,51,58,61,65,67,28,25],email_1:21,marker:[40,26,49,29,10,2,57],regex:23,"___tablename__":34,prove:[67,23],py3k_warn:19,lrucach:[40,19],live:[40,23,65,66,28,68],sequence_nam:27,execute_if:56,unseri:40,type_compil:56,logarithm:58,ibm:71,prepar:[58,40,3,23,43,35,26,27,66,49,28,50,9,72,19],cap:38,focu:33,mycolumn:[35,30,74,67],can:[1,38,39,40,19,23,7,35,65,8,67],boilerpl:28,user_db_link:35,related_th:[0,49],raiseerr:13,topic:23,heard:23,abort:[50,35,46,68],"__sizeof__":30,occur:[0,1,3,21,8,61,14,17,23,27,29,31,32,33,34,37,38,39,40,19,42,7,35,46,48,49,50,57,58,59,69,16,64,66,67,28,25,72,76],dropschema:[56,19],lxml:42,multipl:[1,38,39,40,19,23,7,35,8,67],write:[0,38,46,3,23,43,70,35,7,65,58,49,25,61,21,73,12,72,33,34],criterion:[1,26,3,38,12,4,30,31,32,36,37,21,40,19,23,7,35,51,55,64,65,75,76],sysdba:66,product:[38,19,7,35,67,28,33,18,76],aol:33,setinputs:[38,19,35,27,48,66,25],latin1_general_ci_a:9,allow_column_overrid:7,explicit:[1,38,39,40,19,23,7,35,8,67],marshal:58,column_prefix:[38,34,7],"ga\u00ebtan":40,clausetest:30,"__rdiv__":30,still:[5,8,14,19,26,30,33,1,38,39,40,41,23,7,35,51,58,65,66,67,28,25,73],dynam:[35,40,19,23,7],conjunct:[58,38,39,19,23,4,7,26,8,54,55,10,14,35,18,76],addres:33,window:[39,40,19],non:[1,38,39,40,19,23,7,35,65,67],loaded_valu:31,sqlani:71,halt:[32,46,34,57],enable_rowcount:[59,40],col6:[56,2],col4:[56,2],importlat:[40,19],col2:[39,2,7,56,26,59,51,30,4,25,76],col3:[56,26,2,4],col1:[58,39,2,7,56,26,59,51,30,4,25,76],half:[1,35,28,40,7],nov:[38,23,7],superset:[51,76],discuss:[21,2,61,56,35,46,49,51,30,55,34],nor:[38,40,19,23,43,35,7,27,49,28,51,30,67,73,33,34,57],introduct:8,wont:38,total_quant:[37,76],drop:[1,38,39,40,19,23,7,35,26,58,74],datamemb:[40,27,28],unmappedinstanceerror:[40,60,34,49],include_column:[0,55,27,23,7],januari:[8,67],domain:[38,40,19,23,7,54,33],init_sqlite3:46,arg2:[74,7],backport:[35,40,19,7],significantli:[38,40,23,35,66,67],year:[38,40,19,7,35,28],operand:[51,30,23],happen:[1,38,39,40,19,23,43,35,26,65,58,49,60,61,46,10,67,21,33],notify_al:35,shown:[21,35,19,7],accomplish:[61,19,7,27,49,28,9,72,74,33,56],lastrow_has_default:[26,27],space:[1,40,19,7],select_text:8,rational:[58,61,40,7,8,49,28,59,25,67,13,34,71,36],large_collect:[19,42],"_sessionclassmethod":49,carv:23,american_america:66,parent_host:[58,21],default_from:27,dest_dict:31,care:[37,38,40,61,21,35,65,49,51,30,72,25,76],fixture_data:42,reserved_word:[40,19],catalog:[20,40,23,61,51,33],reorder_on_append:43,coroutin:35,lambda:[21,3,65,72,12,34],user_mapp:49,directli:[1,2,3,4,5,8,9,11,12,14,15,23,21,26,27,30,31,32,33,34,37,38,40,19,42,7,35,46,48,49,50,51,55,57,58,61,69,65,66,67,28,25,72,74,75,76],qmark:[33,26,7],r2556:38,particp:25,yourself:[38,51,7],constrained_column:[0,58,27],test_idx_:72,ring:35,zap:3,size:[1,38,40,23,7,35,26,48,49,25,30,67,14,16,36],silent:[1,58,40,19,7,5,35,46,67,28],imit:42,caught:[28,17,19],largebinari:[40,25,5,66,67,51,9],type_engin:25,numrang:51,manager_id:75,searchword_1:64,friend:[8,7],searchword_2:64,uniqueconstraint:[38,40,21,35,67,29,72,2,55],especi:[19,5,35,26,49,72,34],mostli:[38,39,19,69,26,49,28,70,67,55],than:[0,1,2,21,9,10,11,14,17,26,30,31,33,34,36,37,38,39,40,19,23,7,35,46,48,49,51,55,57,58,61,60,69,65,8,67,28,25,72,73,75,76],instrumentation_find:63,relationcach:40,nodes_parent_id:65,dogpile_cach:[58,35,42],auto_identity_insert:[38,40],optimist:54,calculated_valu:57,browser:[65,49],rowset:23,"__getattribute__":[30,40,7],anywher:65,delin:49,deliv:[64,19,26,48,49,30,13,16,76],enable_relationship_load:[21,35,49],notimplementederror:[41,7,35,4,25,30,19],ambigu:[1,38,40,19,21,35,27,58,49,72],rollback_savepoint:[50,68],uncomplet:49,pgp_sym_decrypt_1:25,engin:[1,39],map_column:[40,34,23,31,72],sqlachemi:65,slave1:49,slave2:49,begin:[21,70,14,26,27,30,33,34,1,38,39,40,19,23,7,35,46,48,49,50,55,61,16,65,67,68,25,73],nvarchar2:[7,40,66,19],changes_pre_05:19,price:[37,46,34,42],my_model:72,validate_address:34,renam:[38,40,19,23,7,35],rescu:58,extendedinstrumentationregistri:[58,63],canload:23,shortnam:[40,23],concurr:[19,23,7,26,48,9,14],relationship_cach:42,read_uncommit:[19,73],vertice_y1:34,vertice_y2:34,postgres1:39,onli:[1,38,39,40,19,23,7,35,65,8,67,28],snack:[12,58],outerjoin:[38,40,19,23,7,65,8,67],overwritten:[37,38,23],hajim:35,some_key_func:3,searchword_id:64,cannot:[0,2,3,9,70,17,26,30,33,16,36,37,21,40,19,23,35,46,49,61,60,34,8,67,75,76],normalize_nam:27,myepochtyp:25,connect_arg:[1,46,48,73],truli:[1,74,46,66],exclude_typ:27,seldom:[58,35,65],arrays:[26,66,23],table_per_associ:42,handle_dbapi_except:27,n122:65,target_collect:12,sql_compil:[74,27],sometest:49,concern:[61,35,49,30,33,36],top_region:76,between:[1,38,39,40,19,23,7,35,65,8,67,28],"import":[1,38,40,19,23,7,35],iterate_properti:[34,23],visit_insert_from_select:74,supports_sequ:27,pertain:48,aggregated_unit_pric:37,tutori:[1,4,65,35,26,47,8,54,28,67,18,76],evict:66,address_id:[55,34,72,36],runar:39,overview:[20,67,19,18],thing_id:0,dispatch:[40,19,35,48,67,28,55],after_transaction_end:[35,57],exploit:23,featur:[38,39,40,19,23,7,35,65,8,67],effective_valu:30,damag:24,save:[1,38,39,40,3,42,7,65,35,23,8,49,28,67,21,74,34,75,19],sess:[37,42,23,65,49,12,16,36],clarif:[40,23],harmless:35,rebuild:7,local_timestamp:39,defaultbas:72,derefer:[38,23],anon_1_id:[37,61],emphas:[26,40],rubi:42,passive_return_never_set:19,query_cl:[40,19,7,45,8,49],stereotyp:23,log_2:25,log_1:25,composite_properti:7,pgpstring:25,henc:[14,46,35,19],"_get_statu":34,worri:[1,33,23,64],tinyurl:7,searchword_word:64,uncom:58,develop:[61,42,7,5,35,64,49,55,16,71],food:61,epoch:25,execute_clauseel:23,document:[1,38,39,40,19,7,44,35],finish:72,someon:[35,39,28],instrumentation_manag:63,cls_:57,disassoci:28,deferr:[40,23,5,35,66,51,62,2],unflush:[21,19],is_prepar:[50,27,68],touch:[33,23],speed:[1,38,39,40,23,7,67],slfloat:[23,7],versu:[58,61,40,19,23,7,69,35,64,8,67,28,9,74,33,34],exclude_setinputs:[35,66,19],escape_literal_column:27,bigger:[25,49],versa:[12,21,40,23,49],earli:[58,5,35,28,50,30,73,34],"_email":34,read:[3,4,5,9,21,12,14,20,26,33,34,37,38,39,40,19,23,7,35,46,49,59,51,53,61,65,66,67,25,72,73,76],outermost:[26,19,23,46,65,49,33,57],detract:66,test_execut:[35,19],some_engin:[49,9,72,55,15,34],threadsaf:[46,26],appic:49,benefit:[58,40,34,28,72,63],cascad:[1,38,2,19,23,7,35,26,58,67,28,40,56,18],output:[58,39,2,23,7,35,27,66,67,48,25,9,30,40,14,33,55],coerce_compared_valu:[25,40],returningresultproxi:19,andrija:[38,39],audriu:35,orderlin:30,myothermixin:72,typecolnam:34,ondelet:[38,2,3],session_us:41,reset_on_return:[19,35,46,48,50,14],column_info:[50,58,55,34],asap:67,sqlalc:67,"throw":[38,39,40,19,23,7,35,46,50,33],src:46,central:[67,49],greatli:[58,40,42,23,7,16,46,8,67,28,4],cloneabl:23,schemanam:[40,38,35,2,23],adapt_typ:27,degre:[1,40,19,23,7,35,46,48,49,28,72,73,34],processor:[40,19,23,7,35,26,27,67,28],unregist:31,your:[3,21,5,9,70,12,7,33,34,36,1,38,40,23,43,35,46,49,51,55,61,65,8,67,25,74],lob:[38,40,19,23,7,35],log:[38,39,40,19,23,7,35],area:[0,61,2,5,27,48,49,68,70,67,28,40,33],aren:[0,38,2,19,23,7,35,66,49,28,25,10,58,40,32,34,55,76],needless:[40,19,23,7],msmediuminteg:[23,7],low:[0,58,69,68,50,73,14,34,57],lot:[37,58,39,40,19,23,7,64,65,66,67,9,61,33,34],data_t:[58,51],apollon:40,sqla_nos:28,"__get__":[69,19],"default":[1,38,39,40,19,23,7,35],class_uninstru:[35,57],data_1:[58,69,25],foreignkei:[0,2,3,38,5,8,12,17,7,33,34,1,21,40,19,23,43,35,49,52,55,58,61,64,65,66,67,28,72,75,76],select_t:[1,38,40,23,7,8,67],multibyt:[9,22],myregistri:49,decreas:46,opnam:[40,67],max_overflow:[38,14,48],proxyengin:[1,39,23],add_neighbor:42,prepend:[58,21,40,38,27,67,34],valid:[3,5,8,9,11,25,26,31,33,16,36,38,40,19,7,35,46,51,56,57,69,66,62,73],ignor:[3,5,10,7,32,1,21,40,19,23,43,35,46,49,59,51,55,56,58,67,28,25,72],you:[0,1,2,3,21,5,7,9,70,11,12,14,28,20,25,26,32,33,34,36,37,38,39,40,19,23,43,35,46,48,49,50,51,52,55,57,58,62,65,8,67,68,69,72,73,74,75],poor:[9,66],event_nam:[55,56],registri:[58,21,40,19,35,26,49,28,72,55],docstr:[21,40,23,7,48,49,61],pool:[1,38,39,40,19,23,7,35],reduc:[1,38,40,19,23,7,69,35,46,65,66,67,28,51,61,21,12,25],bulk:[37,3,23,7,46,49,28,32,57],msdate:23,rangeoper:51,columnproperti:[58,38,40,23,7,31,13,34],myinteg:25,breadth:[20,70,54],messi:23,instrument_attribut:63,month:[58,38,40,23,7,66,73],correl:[38,35,65,19,23],autoexpir:[8,7],count_bi:[1,8],id_choos:[45,23,42],mymixin:72,myinsertth:74,tname:17,rowproxi:[58,40,19,7,26,67,32,34,17,57],veri:[1,2,21,10,23,26,29,31,33,34,36,37,38,40,19,42,7,35,46,49,53,58,61,64,8,67,28,25,72],special_argu:25,cright:[55,30],emul:[12,26],node1:42,informix:[38,40,19,35,25,71,18],node3:42,superced:[19,23,67,50,29,57,34,36,76],unique_param:[76,33,55,30,4],dimens:[58,51,35],unmark:23,shard_choos:[45,23,42],connection_invalid:[14,17,19],nodes_1_nam:65,consecut:43,localtim:[39,41],jpellerin:39,defaultrunn:40,folder_id:[58,21],modifc:1,excess:[46,39,16,23],rudimentari:[21,19,23,7,25,42,34],modifi:[1,38,39,2,19,24,4,7,26,23,48,67,8,40,28,56,35,18],twomei:38,extend_exist:19,set_client_encod:[51,19],ahead:[57,19,23],foreginkeyconstraint:40,"__composite_values__":[16,34,65,31],arent:38,drivernam:48,keep_exist:19,get_bind:[49,28],onupd:[38,39,40,23,7,10,21,2,55],glitch:[39,66],famili:33,dangl:23,aggress:[38,35,39,40,23],khan:19,formul:[37,38,40,8,49,31],taken:[37,61,40,23,42,25,5,35,66,50,51,30,73,12,55,76],create_constraint:[5,25,40],manager_and_engin:75,status_id:69,hurri:33,parse_colnam:73,attr_nam:3,histori:[1,21,39,40,19,42,7,69,35,23,58,49,28,31,13,32,34,57],integrityerror:[17,49],templat:[56,31],child3:21,unreli:8,oid_column:38,phrase:[38,40,69,35,46,66,30,2,56],uncheck:37,postgresql_wher:[51,40],raise_on_warn:[35,19],anoth:[0,1,3,4,5,8,10,21,30,31,33,34,36,37,38,39,40,19,23,7,35,49,55,57,61,69,66,67,28,25,72,73,76],mapped_collect:[65,3],mediumtext:5,of_typ:[35,23,7],after_configur:[19,57,72],reject:[25,35,33],type_b:72,type_a:72,noreferencedtableerror:17,passive_upd:[21,40,35,66,67,31,34],sneak:61,unlink:3,"_annot":69,stabil:[9,23],lifetim:[3,23,26,68,32,57],my_seq:66,group_on:34,longblob:[5,38],help:[21,23,26,27,30,31,33,34,1,38,39,40,19,42,7,35,49,51,58,61,8,67,25,72],btbuilder:19,soon:[58,61,40,19,23,69,35,26,49,28,34],held:[69,26,49,9,14,34],hierarchi:[38,64,40,19,42,4,62,65,7,26,23,50,51,69,72,33,63,35,18],paramet:[1,38,39,40,19,23,7,35],psycopg2:[1,38,40,19,23,7,35,26,48,67,28,25,29,14],psycopg1:39,segfault:19,bar_tabl:65,finer:[51,67,40,65,23],pool_threadloc:8,msunicod:38,if_:38,ninja:12,parenthesi:[38,39,40,23,4,10,35,7,30,55,33,76],make_transi:[35,40,67,49],concurrency_level:59,use_native_unicod:[51,40,67],foo_:50,fulli:[1,38,39,40,19,23,7,35,65,67,28],intervent:14,savingsaccount:64,utf8encod:67,heavi:[39,19,23,7,26,33],except_al:[37,76,33,7],supports_default_valu:27,mapperextens:[1,38,39,19,23,7,8,28,32,34],beyond:[37,21,3,23,35,46,27,48,28,25,31,73,14,34,17,19],todo:[1,38,63,31],event:[39,40,19,23,7,35],safeti:[26,73],subqueryload_al:[75,40,67,36],publish:[29,71,24],mytarget:28,view_id:0,asc:[33,55,30,19,28],reason:[1,4,5,9,10,21,14,34,37,38,40,23,43,35,46,48,49,50,55,58,59,8,67,28,74],base:[1,38,39,40,19,23,7,35],multifield:38,ask:[39,40,19,35,48,67,74,17,76],earliest:57,heroic:5,basi:[21,46,40,3,42,4,7,23,48,49,28,59,51,9,30,25,33,36,76],selectbas:[37,35,33,76],round:[67,40,19,23,7],undergo:[58,28],assign:[40,19,23,7,35,67],datamodel:42,singleton:[0,55,40],obviou:[75,67],appenderqueri:7,placehold:[1,58,40,19,67,25,33,34],ignore_fk:[21,55,19],isnot:[55,30,19],implementor:72,miss:[1,38,39,40,19,23,43,5,35,7,58,67,46,70,21,34],join_depth:[21,65,23,31],namedtupl:[37,35,40],order_bi:[0,1,15,7,30,31,33,34,36,37,21,39,40,19,23,43,35,61,69,8,67,28,72,76],scheme:[1,38,2,41,23,25,62,45,26,58,67,51,69,72,73,40,55,56,35,36,19],schema:[1,39],translate_row:[32,39,57],behind:[1,61,46,23,26,49,67,33,34,36],col1_0:4,col1_1:4,getter:[23,64,49,31,12,34],bower:[40,19],get_index:[0,27],settattr_clean:39,local_t:[46,34],jqueri:28,bridg:[12,21,3,58,48],str:[61,46,40,4,5,35,7,27,48,67,8,25,30,55,33,74,76],pgp_sym_encrypt_1:25,toward:[58,61,39,2,23,46,27,48,49,25,30,33,34],grei:28,randomli:[5,34,7],wasnt:[1,38],yeee:49,connectionprovid:1,"null":[1,38,40,19,23,7,35,67],edspassword:61,lib:[1,51,40,19,67],newval2:23,newval1:23,useless:[19,23],selectcontext:32,node_id:[21,64,2],compositeproperti:[40,34,31,7],literal_column:[38,40,19,27,28,30,76],dbapi_error:[50,19],somerefer:31,clear:[0,37,64,40,3,23,38,35,7,58,49,8,67,31,55,33,19],salari:[59,30,28],adapt_on_nam:[37,19],clean:[1,46,40,19,23,43,69,7,65,49,70],bidirect:[21,40,19,7,35,61,12],usual:[0,1,2,3,21,5,7,10,12,13,14,26,27,30,31,32,33,34,36,37,38,39,40,19,23,43,35,46,48,49,50,51,55,57,58,61,69,16,66,67,28,25,72,75,76],parentent:31,inserted_primary_kei:[35,40,19,67],entry_point:26,shard_id:[45,26,19,7],shard_2:26,coerc:[58,40,19,35,48,67,30],pretti:[1,21,40,19,58,49,8,36],less:[37,61,40,72,23,7,69,35,46,65,48,49,8,50,67,31,28,33,34],queu:[14,23],coltyp:[40,27,67,25],nativ:[35,40,23,7],simplist:[1,9,75],myclass_id:72,abstractconcretebas:[19,72],close:[5,14,28,17,26,27,33,34,1,38,40,19,23,7,35,46,50,52,56,57,58,59,62,68,69,73],whatsoev:[1,7],pgcrypto:25,wow:33,particip:[19,5,45,26,49,28,32,55,57],folder_parent_id:58,won:[21,40,3,7,5,35,48,49,68,67,28,33,34,76],last_insert_id:27,web:[62,14,73],numer:[1,38,40,19,23,7,35,26,65,58,67,29,30,74,33,76],fluentli:58,isol:[58,38,40,19,23,35,26,27,28,14,33,56],lowercas:[38,40,64,27,66,48],distinguish:[19,23],relationshipcach:42,both:[0,2,3,4,5,8,70,11,12,13,14,28,17,23,21,26,29,30,31,33,34,36,37,38,39,40,19,42,7,35,46,48,49,51,57,58,61,69,16,64,65,66,67,68,25,72,74,75,76],delimit:[27,49],target_fullnam:2,attr_stat:31,nodes_id:65,standard_conforming_str:40,jeff:[39,19],instrumentedlist:[38,65,3,23],header:42,linux:[35,19],stamp:[1,49],empti:[38,40,19,23,7,35],safenumer:25,double_precis:[51,40,66,7],clauseparamet:[39,27,23],imag:[9,7],coordin:[69,65,49],on_request_end:49,look:[3,21,10,12,27,33,34,36,37,38,40,19,23,35,49,61,69,65,67,28,72,73,75],kwarg:[1,4,5,9,10,11,12,19,21,26,27,30,31,32,34,35,36,37,38,39,40,41,23,7,45,48,49,51,55,57,65,28,25,72,73,76],nls_lang:[40,66],"while":[58,30,2,19,4,5,7,26,65,8,28,25,9,10,38,73,40,55,33,35,76],match:[58,38,40,19,23,7,35,26,65,48,54,28,30,67,2,55,17,76],case_sensit:[1,38,23,27,58,48],guido:40,fixtur:[35,49,42],loos:35,loop:[1,40,19,23,7,35,74],pack:64,"_compileonattr":7,readi:[35,26,18],"_state":23,supports_sane_rowcount:[38,26,27,19,23],"_statu":34,"0x10152bed0":58,grant:[35,40,24],belong:[38,49],zope:[40,3,49],conflict:[38,40,19,23,7,35,76,33,17,41],unlength:[25,40,23],usec:8,optim:[37,38,40,19,23,43,5,7,65,66,67,8,51,61,69,11,28,36],widget_nam:58,address_typ:69,temporari:[35,28,23,7],user:[0,1,2,3,4,5,6,8,9,10,11,12,15,16,17,19,23,21,26,29,30,31,32,33,34,36,37,38,39,40,41,42,7,35,46,48,49,50,51,53,55,56,57,58,59,61,69,63,64,65,66,67,28,25,70,72,73,74,76],"__add__":[25,30,31],flatten_iter:7,sqlexpress:[65,67],els:[0,1,19,23,4,74,7,26,27,58,54,30,38,55,56,35,18,76],older:[38,35,40,19,23],prepare_twophas:[50,68],commonli:[37,61,2,23,42,72,48,10,31,33,34,76],create_connect_arg:27,append_item:38,"_collect":[30,49],match_obj:73,node_1_parent_id:21,uselessli:23,pgdoubleprecis:7,nvl:74,before_attach:35,users_table_2:55,shortcut:[21,40,61,38,67,28,30,72,55,33,34],unintent:40,invalidrequesterror:[40,19,35,26,49,14,17],subsequ:[21,10,26,29,33,34,35,36,37,38,40,19,23,7,45,49,50,51,55,57,58,61,16,65,66,67,28],march:[58,35,65,23,7],game:40,characterist:[51,40,67,61],found_row:5,someent:58,signal:[35,27],resolv:[38,46,40,19,23,4,35,7,50,30,2,55,17,57],strife:40,popular:[35,70,57,49,42],interestingli:33,defaultexecutioncontext:27,engine_on:[58,72],select_by_attributenam:23,greaterthan100:30,creation:[1,38,39,19,7,35,26,58,28,74,56,18],some:[0,1,2,3,4,5,7,8,9,10,21,13,14,15,23,26,27,29,30,31,32,33,34,35,36,37,38,39,40,19,42,43,45,46,48,49,50,51,52,55,57,58,59,60,61,69,63,64,65,66,67,28,25,72,73,74,75],join_to:[38,8,23],slash:[48,73],cgi:[39,19],special_kei:12,run:[3,9,70,14,23,26,32,33,34,1,38,39,40,19,42,7,35,46,48,49,55,61,64,66,67,28,25],reverse_oper:[55,30],stem:1,funcnam:[40,67],step:[1,38,39,40,19,23,7,35],subtract:64,faith:35,wouldnt:38,bind_tabl:49,podolyaka:35,featureset:[38,10],exc_info:35,block:[1,58,35,40,7],torborg:[35,40,19,7],syncrul:[40,19],"_configure_mapp":34,within:[0,1,2,3,4,5,7,8,9,10,21,13,14,16,17,23,26,27,30,31,32,33,34,35,36,37,38,39,40,19,42,43,44,45,46,48,49,50,51,52,55,56,57,58,61,69,63,65,66,67,28,25,70,72,73,74,75,76],ensur:[0,1,26,3,21,5,8,11,14,23,7,30,34,36,37,38,40,19,42,43,35,46,48,49,58,61,69,16,64,66,28,25,75,76],uncompil:[50,23],install_descriptor:63,mapper_configur:57,captur:35,non_primari:[40,34,19],beaker_cach:[58,40,67],clear_manag:14,outparam:[30,65,23],properli:[1,38,46,40,3,23,7,35,26,27,48,72,21,58,57,19],pwd:9,newer:[61,39,3,23,7,35,49,59],class_attribut:31,info:[0,21,26,27,31,34,1,38,40,19,23,7,35,46,48,50,51,55,56,69,67,68,72],fdb:35,utf:[39,7,5,27,48,67,51,25,33],"_scopedext":23,from_joinpoint:[37,21,40,65,23],customer_select:34,preexecute_pk_sequ:[8,23],similar:[0,2,4,5,10,15,26,27,30,33,38,40,19,23,7,35,48,51,55,58,8,67,28,25,74,76],"__sa_instrumentation_manager__":[63,7],obviat:23,doesn:[0,3,21,10,7,27,33,16,37,38,40,19,23,43,35,46,49,55,58,61,65,66,67,28,25,72,73,74,76],repres:[0,2,3,4,8,10,21,12,13,28,19,23,26,27,30,31,33,34,36,37,38,40,41,42,7,35,46,48,49,50,51,55,56,61,69,16,64,66,67,68,25,72,73,74,75,76],count_from_1:43,count_from_0:43,incomplet:[40,18,7],supports_sane_multi_rowcount:[26,27,23],target_class:12,shopping_cart:0,client_flag:5,hybrid_properti:[64,19,35,46,31,34],titl:[33,34],"__format__":30,postgresql_return:40,incl_alia:[37,76],mymetadata:72,draw:76,fewest:23,referenceaddressmixin:72,william:[61,33],eval:46,"5rc3":7,"5rc4":7,register_hstor:51,svn:39,visit_:[40,67],smallmonei:[38,9],redundantli:76,format_column:27,depth:[37,21,19,23,35,30,36],unconnect:61,parentth:58,get_table_oid:67,current_us:41,friendli:[35,40,57],"_gp":64,aris:[21,24,23,49,72,33,34],identitymap:[69,39],datafil:42,misnom:40,rollback:[1,38,39,40,19,23,7,35,65,8],book_count:34,after_upd:[37,32,57,35,23],appendermixin:7,runnabl:42,someotherclass:31,fromclaus:[19,23,7],button:49,michael:[38,24],ryan:[35,19],unquot:[5,11],selectioncontext:38,lazyili:40,othercolumn:[55,30],comparator_factori:[58,21,40,19,7,35,72,67,51,30,31,34,25],c_tabl:75,download:[22,5,6,66,59,51,9,70,11,73],click:[61,33],haaland:40,poke:38,fetchxxx:26,createsequ:[56,10,40],item_keyword:8,experiment:[38,39,19,23,7,35,18],vinai:28,shard_1:26,otherbas:72,becom:[38,39,40,19,23,7,35,28],accessor:[4,10,19,26,27,30,37,38,39,40,41,23,7,35,55,58,61,64,8,67,28,74,76],createcolumn:[35,56,55],visit_insert:74,roombook:51,convers:[2,3,5,8,9,27,33,1,38,39,40,23,7,35,48,49,51,58,61,66,67,25,74],lockmod:[38,40,49],contains_eag:[38,40,19,7,35,8,67],chang:[1,38,39,40,19,23,7,35],chanc:[0,40,23,35,27,28,25],my_string_3:30,clark:[35,19],danger:[14,43],wil:55,"boolean":[0,21,5,9,10,11,27,30,31,34,1,38,39,40,19,23,7,35,49,50,51,55,61,64,67,25,73,76],cloud:5,implic:[59,49],reflectt:[0,27,19],expiri:[58,34,49],mismatch:[40,66],subjob:58,identity_key_from_primary_kei:34,providesus:72,retriev:[0,4,7,14,26,27,30,34,36,37,39,40,19,43,35,46,48,49,50,51,55,58,61,60,69,68,76],a_tabl:[21,75],ride:[70,23],visited_inst:31,with_lockmod:[37,38,40,19,21,49],"0x10dae8668":63,meet:[55,30,34,3,31],pqconnectdbparam:51,control:[1,38,40,19,23,35,26,65,58,67,8],o2m:40,opensourc:24,job_a:58,georg:19,job_p:58,circular:[1,38,39,17,7],columndefault:[38,39,10,55,23],theatrum:70,do_release_savepoint:27,callable_:[30,26,56],add_al:[58,21,40,23,7,46,8,49,61,67],ref_num:2,with_transform:[37,64,19],comparable_properti:[23,7],"02d":[58,73],charset:[5,25,9,40,19],some_table_with_no_pk:46,primarykeyconstraint:[1,38,2,19,5,35,67,40,55],outputtypehandl:66,outer:[1,38,40,19,23,4,35,7,65,66,67,28,58,55,33,76],tometadata:[38,40,19,7,35,28,50,25,55],maxemail:33,handl:[1,38,39,40,19,23,7,65,35,26,27,48,54,68,50,29,67,55,28,56],auto:[1,38,39,40,19,23,7,35],handi:[0,38,19,23,7,65,58,49,33],front:[70,34,72,23,7],manager_mapp:75,somewher:[38,35,49],slide:43,mydefault:10,mode:[38,40,19,23,7,35,8],autoincr:[38,40,19,23,21,5,35,26,27,66,28,9,10,73,55],myconnpi:5,upward:[37,10,67],table_b:65,table_c:65,table_a:65,chunk:[46,23],dialect_impl:25,tablename_somecolumn:76,drizzl:[18,28],"_current_path":58,special:[3,4,5,9,10,12,14,26,27,30,33,40,19,7,35,46,51,55,56,58,8,28,25,74,76],influenc:8,suitabl:[61,3,43,26,27,48,49,30],user_t:[0,35,34,49],"_default":41,user_alia:[37,61,36],manipul:[21,39,3,23,7,35,46,49,50,70,12,32,33,34,57],slatkin:35,higher_neighbor:42,shrew:42,has_chang:49,unwant:21,timer:28,keep:[2,12,26,27,34,21,39,19,23,7,35,46,48,49,55,61,69,65,8,67,73,71],counterpart:[19,23,28,25,30,34],as_scalar:[37,58,40,19,23,4,33,76],append_correl:76,geometri:[12,25,41,42],with_prefix:23,mustexist:55,use_get:[40,67],perfectli:[61,14],data_length:40,wrapper:[40,16,35,27,68,50,25,28,56],attach:[1,38,39,40,19,42,7,35,46,23,58,49,50,60,67,31,21,32,16,57],eager_default:[34,23],connection_record:[50,14,29,73],"final":[38,46,41,23,7,35,26,48,49,70,32,33,57,19],prone:7,deregist:[1,74],pool_siz:[14,48],methodolog:[1,38,39,23,7,34,35,66,70,55,33,63],fetchmani:[38,26,66],userkeyword:[12,58],class_instru:[35,57],association_t:21,query_properti:[23,49,7],exactli:[0,21,5,14,23,30,33,34,36,37,38,40,19,42,7,46,49,50,51,55,61,69,67,68,73,76],succinctli:[37,26,49,36,74,75],ben:35,cpython:[19,35,46,67,28,70,14],mydeclarativebas:8,state_dict:57,many_a:75,do_clos:27,default_greatest:74,exhibit:37,"function":[1,38,39,40,19,23,7,35],result_processor:[40,23,8,67,28,25],py2:[1,38,23],py3:19,identifierprepar:[27,7],typedecor:[38,39,40,19,23,7,35],tabl:[1,38,39,40,19,23,7,35],need:[0,2,4,5,8,9,10,13,14,15,17,26,27,28,30,33,1,38,39,40,19,23,7,35,46,48,50,51,55,56,58,69,65,66,67,68,25,73,74,76],border:49,users_password:[61,36],product_sal:76,fluent:61,engine_connect:38,truck:64,singl:[2,3,4,5,9,10,11,12,14,18,19,23,26,27,29,30,32,33,1,38,39,40,41,42,7,35,46,48,50,51,52,55,56,57,58,60,69,65,8,67,28,73,76],subtransact:[46,40,57,23],discov:38,untest:35,url:[1,38,40,23,35,26,67,18],setlevel:[51,48],uri:[38,39],native_enum:[51,40,19,67,25],snapshot:[14,19],table_map:34,fixed_char:40,constrain:[21,2,23,38,43,67,25,61,17],datafram:71,case_greatest:74,anywai:[1,38,40,19,23,7,28],asyncrhon:40,launchpad:5,hadn:[58,76],forev:7,zyzniewski:19,users_1_nam:61,opclass:51,is_mapp:[34,31],"__tablename__":[37,21,46,40,3,42,43,69,16,7,23,58,49,28,64,51,61,72,12,34,75],obj1:[12,39,49],obj2:[12,39,49],adalias_email_address:36,tsrang:51,joint:2,tbl:[51,56],werent:[1,38,39],endless:[1,40,23,7,35,74],enabl:[1,38,39,40,19,23,7,35,65,58,67,8,18],gunnlaugur:[35,19],contain:[0,2,4,10,14,15,19,26,27,30,33,1,38,40,41,23,7,35,51,55,56,58,65,8,67,28,25,73,71,76],grab:[16,23],legaci:[40,19,23,7,16,35,26,8,49,28,67,34],orphan:[1,38,40,19,23,7,35],distinct_target_kei:[21,35,31],strictly_left_of:51,statu:[1,38,40,19,7,35,55],bindnam:8,correctli:[1,38,39,40,3,23,7,35,66,49,28,21,12,58,76,19],tend:[58,61,64,19,35,26,34],state:[0,3,21,9,13,14,16,17,23,25,26,27,29,31,32,33,63,35,36,37,38,40,19,42,7,45,46,50,51,56,57,58,59,60,62,34,8,67,28,69,73,75],"__bases__":[72,7],version_id_gener:[40,34,72],neither:[58,33,30],msbinari:[40,7],dict_of_sets_with_default:[19,42],ken:7,sole:[1,25,65,3,7],blinker:28,kei:[1,38,39,40,19,23,7,35,65,8],itertool:19,state_gett:[63,31],disconnect:[38,40,19,23,7,35,48,54,50,17,18],thank:[1,38,39,40,19,23,7,35,65,58,67,33],update_nowait:37,jsonencodeddict:[25,16],"_check_mysql_vers":5,polici:[59,19,8],install_mod:39,update_from_claus:27,unimpl:19,column_attr:[58,46,34],after_bulk_delet:[37,32,57,7],pymssql:[1,38,40,19,7,35],orig:17,quit:[1,40,3,7,67,55],slowli:[35,28],sqlalchemy_nos:40,addition:[3,4,8,10,21,14,23,33,34,38,40,19,42,7,35,49,59,51,56,57,58,61,16,66,67,28,73,76],some_attribut:[13,31],some_mapped_object:[31,49],yai:38,disableautoflush:49,slide_id:43,referred_schema:[0,27],lower_case_nam:34,catchal:[38,19,28],"_binari":[5,25,35,40,66],replic:28,mutltipl:49,compile_mapp:[38,19,7],pjoin_manager_data:75,contextu:[1,23,62,26,15,18],class_registri:[19,72],"3dmydsn":9,dbapi2:73,f8s7cc:61,myqueri:8,demo:42,techspot:[64,28],insertsect_al:7,revis:[58,40,35,2],load_on_pend:[21,40,19,35,49,31],generic:[55,39],parti:[58,61,41,42,26,48,49,13,19],began:[39,33],install_memb:63,micromanag:1,do_prepare_twophas:27,billing_address_id:21,http:[5,6,8,9,11,22,23,24,1,40,19,42,7,35,46,59,51,58,69,65,66,67,28,25,73,76],effect:[0,3,4,5,8,9,10,11,12,14,19,21,26,27,30,32,33,63,37,38,40,41,23,7,35,46,48,49,50,51,55,56,57,58,69,34,66,67,28,25,73,75,76],logfil:7,distutil:[70,7],find_native_user_instrumentation_hook:63,transaction:39,bufferedcolumnrow:23,well:[0,1,2,3,4,5,8,9,10,21,12,13,14,16,19,20,23,25,26,29,30,31,32,33,34,36,37,38,39,40,41,42,7,35,46,48,49,50,51,55,56,57,58,59,61,62,63,64,65,66,67,28,69,70,71,72,74,75,76],"__div__":30,adjacent_to:51,undefin:[0,40,49,28,72,34],backref:[1,38,39,40,3,23,7,62,35,64,65,58,67,8,75,31,12,72,34,18,19],"_label":[38,40,7],mistaken:[40,19],detail:[1,2,3,5,12,14,26,27,31,34,36,37,21,40,19,7,35,46,48,49,51,55,56,57,58,61,69,8,67,25,76],current_timestamp:[39,40,41,23,10,30,73,74,33],comment_sql_cal:50,nullabl:[0,38,39,2,19,23,7,27,58,67,28,40,55,33,56,74],logger:[38,14,40,48,51],warrant:49,"__autoload__":1,nate:58,existing_adapt:3,replace_select:[55,76],burden:49,loss:[69,25,66],"_sa_session_id":7,lost:[38,3,23,7,35,67,19],windowfunctionsbydefault:66,ix_myt:76,book_tabl:34,necessari:[4,10,21,23,26,33,34,36,37,38,40,19,42,7,35,49,58,61,8,28,25,72,73,75],lose:[38,40,19,7,35,33],page:[1,61,39,7,48,73,18],related_id:31,revers:[0,38,40,3,23,7,35,72,49,28,25,61,30,67,21,55,33,19],twitter:28,"04d":[58,73],contin:42,reconstruct_inst:[32,8,7],home:[40,48,67],librari:[37,58,40,41,42,56,35,46,66,49,48,25,9,70,67,73,34,3],wendi:[61,33,65],"__contains__":[40,19,23],broad:35,overlap:[51,33,40,19,61],outgo:[25,49],encourag:[61,40,7,26,49,53,57],usag:[1,38,40,19,23,7,35,58,67,8,74,28,18],nutshel:49,offset:[1,38,39,40,19,23,7,35,65,67],user_id:[2,4,12,26,30,33,34,36,37,21,46,49,55,56,58,61,69,64,65,67,28,72],empsalari:28,testsuit:67,"_save_obj":[23,7],postfetch_col:[26,35,10,27,23],due:[38,40,19,23,7,35,66,67,28,73,33,76],compile_kwarg:27,inner:[37,1,75,40,19,7,35,65,49,51,67,21,33,36,76],murri:[40,19],start_1:64,neutral:[40,19,4,26,27,30,33],gain:[40,19,23,26,65,49,10,33],mugtasimov:35,overflow:[1,38,40,19,23,7,35,48,14,34],highest:[74,30],eat:49,succe:[58,40,19,23,7,35,14],as_decim:40,dml:[74,19,73],displai:[40,19,42,43,5,72,74,76],asynchron:[59,49,7],limit:[1,38,39,40,19,23,7,35,65,67],indefinit:19,default_order_bi:7,not_extend_left_of:51,instantit:30,evalu:[38,40,19,23,7,35,8,67],twist:49,contextlib:49,"2to3":[58,70],futur:[21,46,40,3,23,25,5,16,45,26,48,49,28,55,67,57,11,14,56,35,36],rememb:[38,3,21],conting:26,nodes_3_id:65,stat:30,star:12,row_numb:[38,39,19,23,66,9,30,33],with_sess:[37,46,19],unichar:22,supports_native_enum:[40,27],remove_sess:49,stai:[1,26,40,3,46,28],sphinx:[35,40,7],portion:[37,21,40,19,24,69,35,23,48,28,51,30,73,58,70,36,76],ajoin:75,decemb:35,hasstringcollect:72,classwid:27,secondli:[65,49],browse_thread:8,createindex:[56,35,40],whose:[37,21,40,41,23,69,16,27,49,68,76,65,55,33,56,19],accur:[58,38,40,19,23,43,5,35,7,49,25,67,34],aug:[1,23,7],deferredreflect:35,sorri:40,swap:23,selet:34,"__hash__":[30,40,19,23],downstream:40,"_dialect":14,appar:[38,40,19,7,35,59,9,21,12,33],msdatetimeoffset:7,solver:49,vast:[14,26,40,19,49],upd:35,enable_assert:[37,7],special_issue_concurr:59,aggreg:[37,1,64,41,38,69,26,28,30,33,36,19],unsupportedcompilationerror:[35,17],after_flush:[32,19,57,8],even:[26,3,21,5,8,9,70,14,7,27,31,32,33,34,37,38,39,40,19,23,43,35,46,48,49,55,57,58,61,69,16,64,66,28,25,73,75],child_fold:[58,21],libpq:[35,40,19],asid:[55,28],"new":[1,38,39,40,19,23,7,44,35],net:[69,22,19,7,5,6,35,26,66,49,59,51,9,25,11,40,32,57],ever:[19,23,35,49,28,25,29],metadata:[1,38,40,19,23,7,35],elimin:[58,40,19,23,28,72],centric:[58,20,39,70,54],abov:[0,2,3,21,5,8,9,10,12,14,16,19,24,26,29,30,31,33,63,36,37,38,41,23,43,35,46,48,49,59,51,55,56,57,58,61,69,34,64,65,66,67,28,25,70,72,73,74,75],never:[3,4,5,14,26,30,31,34,21,40,19,23,7,35,46,49,55,58,61,67,28,73,76],restat:35,get_search_list:40,"_read_prop":72,interpret:[38,2,19,23,7,35,26,27,66,49,48,61,51,9,30,21,40,34,25],dry:23,fkei:55,galor:58,jame:[1,38],permit:[5,24],contextmanag:[35,40,49],entity_id:58,type_descriptor:[25,27],overhead:[37,40,3,23,7,35,66,49,68,51,9,25,67,73,48,28,34,19],recommend:[46,40,19,43,35,7,66,49,68,64,51,9,8,28,72,73,12,33],noforeignkeyserror:17,calc:[38,33,19],type:[1,38,39,40,19,23,7,35],tell:[1,21,3,61,65,49,33,75],readm:[40,67,42,7],warn:[38,40,19,23,7,35,65,8,67],"__iter__":[3,23,7,35,46,65,19],kumar:40,uninstru:57,room:[51,40,67,28],some_t:[3,49,50,10,72,34,57],setup:[38,40,19,23,7,65,8,67],worth:[73,28],some_c:75,akin:34,root:[38,19,23,21,65,49,25,74],defer:[1,38,40,19,23,7,35,65,66,67,8,57,73,58,18],myspecialtim:25,give:[58,38,39,19,23,61,65,26,27,48,64,25,72,12,14,17,57,32],everi:[40,23,5,35,65,67,28,59,73,2,55,33],unsign:[5,40],quot:[0,1,40,19,23,38,10,35,7,27,8,50,25,30,11,55,56],polic:7,relationshipproperti:[58,21,40,46,31,12,13,34],pointcompar:34,abstracttyp:[25,40,23],answer:35,config:[67,40,48,23],updat:[1,38,39,40,19,23,7,35],subsecond:38,timedelta:[25,39],selectxxx:23,with_only_column:[40,19,76],synchronize_sess:[37,7],twofold:[13,67],guesstim:7,attempt:[1,4,5,8,9,70,11,14,17,23,26,33,37,21,40,19,42,7,35,46,48,49,50,55,58,61,60,66,67,28,25,72,73,76],third:[37,58,46,41,42,43,7,26,48,49,61,67,73,13,19],minim:[58,38,3,23,7,5,46,67,33,16,76],maintain:[0,1,3,12,14,26,32,63,37,21,39,40,19,23,7,35,46,49,55,58,61,69,34,66,67,28,25,73,76],borgstr:39,mycompositecompar:31,get_unique_constraint:[0,35,27],switchabl:38,"_bindparamclaus":[35,40,19],smallint:[40,19,25,5,67,51,9,73,55],"__class__":[40,19],before_xxx:8,better:[1,38,39,40,19,23,7,35],"_history_mapp":42,sqltext:2,persist:[0,3,14,15,18,20,26,28,29,32,33,1,38,39,40,19,23,7,35,46,50,57,58,69,65,8,67,68,25],sessiontransact:[1,38,23,35,65,49,32,57],"_user_nam":34,anim:42,cpickl:25,promin:[35,19],escape_quot:27,after_attach:[35,7],promis:[38,67],subqueri:[38,39,40,19,23,7,35],column_collect:[55,76],foo_pkei:46,bonu:[61,40],luca:35,use_binds_for_limit:[40,66,19],sorted_t:[0,40,7,35,46,67,55,17],side:[58,38,39,40,19,23,7,35,26,8,67,28],mean:[3,4,7,8,10,21,13,14,15,26,30,32,33,34,36,37,38,40,19,23,43,35,46,48,49,51,52,55,57,58,61,60,69,64,65,66,67,28,25,72,73,75,76],enorm:[40,7],crit:[38,76],forgot:1,extract:[37,38,40,41,23,7,35,30],after_execut:50,unbound:[38,19,7,35,8,49,28,25,55,16],typecast:7,foo_bar:35,"_gener":40,content:[58,19,23,35,67,28],rewrit:[1,58,40,19,23,69,35,28],reader:33,is_:[55,35,30,19],linear:40,mytyp:[25,10,8,67],situat:[2,21,9,10,12,26,38,40,19,23,7,35,46,49,59,55,58,61,69,66,28,25,72,73],infil:23,infix:[55,30],convent:[61,46,67,23,72],isinsert:27,after_begin:[32,57,49],ish:[65,23],iso:73,isn:[40,19,23,4,35,28,25,33],isa:34,setlik:[38,3],users_1:[61,35,67],right_id:21,"0x101521950":58,isolation_level:[35,40,19],"_mapper_registri":23,adalia:[61,36],thereof:69,get_properti:[34,23],hook:[58,55,19,23,7,74,63,35,27,49,68,50,25,29,72,14,28,34,32,57],unlik:[37,21,19,7,10,49,25,30,11,57],"06d":[58,73],oracled:[38,23],hood:8,genericfunct:[51,35,41],sometim:[21,19,69,35,28,29,34],sql_oper:65,table1_column_on:58,clauseleft:30,oracle8:39,namespac:[21,66,14,15,30,31,34,1,38,40,19,23,35,46,48,58,65,8,67,72,75,76],proxy_factori:[12,40,7],somewhat:[39,40,19,7,35,46,49,28,12,34],local_tim:39,hadnt:39,peculiar:39,get_view_definit:[0,27],silli:[38,40],discourag:[26,34],keyword:[1,38,39,40,19,23,7,35],fk_favorite_entri:21,from_self:[37,61,40,19,7,67,28],matter:[19,5,35,46,65,48,51,36],perillo:19,ibm_db_sa:71,has_ident:31,issubclass:[1,30,49],mini:38,rowtupl:7,low_prior:[58,76,4],modern:[37,58,46,40,42,4,5,7,26,48,49,28,51,9,61,67,73,34,35,25],mind:[61,73,2,67,25],my_associ:46,seen:[25,35,33,23,46],seem:[1,38,40,19,7,35,26,66,49,8,72],"_sa_instance_st":[35,49],minu:[58,40,34,23,21],tablename_id_1:40,string_kei:12,"__getstate__":[16,57,7],node_to_nod:21,current_d:[40,41,73],scope_ident:[38,9],myset:5,my_arrai:58,unknowingli:28,scopedregistri:49,"__cleanup":23,prematur:[58,46,73,49,7],tradit:[21,35,66,72,19],don:[38,40,19,23,7,35,8,67],dom:42,pointi:58,doe:[1,38,39,40,19,23,7,35,65,8,67,28],dot:[58,40,23,7,30,55,34,36],use_scope_ident:[38,19],has_kei:[58,51,26],unmappedclasserror:[40,60,34,19],visitor:[38,40,27,67,25,30,55],esoter:[35,33],probe:38,speedup:[38,35,40,67,23],syntax:[1,21,39,19,23,4,5,35,7,61,67,59,51,9,30,69,73,33,76],object_mapp:[34,7],self_and_descend:34,init_collect:[40,67,49],despit:[1,21,40,57,7],fontanelli:19,acquir:[58,61,46,40,19,4,10,35,26,49,69,30,55,33,34,57,76],libari:73,field2:42,init_on_load:34,explain:[19,23],users_lastnam:65,explic:7,defaultgener:[39,40,7,26,27,67,10],"__call__":[56,31,49],folder:[58,21,7],my_load_listen:57,stop:[37,1,2,19,23,21,56,35,64,65,61,32,33,34,36],compli:3,punion:72,bat:[26,65,36,49,72],caching_queri:[58,40,42],bar:[37,21,58,23,7,5,46,65,48,49,28,51,61,30,72,11,32,33,16,55,36],baz:[5,11],reload:[21,39,23,49,61],bad:[38,49,7],submapp:7,datatyp:[0,20,40,23,42,38,5,35,65,66,28,51,61,10,75,58,1,55,33,34,25],subject:[30,19,24,4,5,56,35,46,8,49,25,10,67,21,13,34,17,57,76],said:[69,46,48,49,28,25],durability_:69,simplest:[10,3,23],attribut:[1,38,39,40,19,23,7,35,65,8,67],"_get":[1,28],lazi:[1,38,39,40,19,23,7,35,65,8],upfront:76,"_integertyp":5,paramnam:23,against:[2,4,8,18,26,27,30,33,1,38,39,40,19,23,7,35,50,51,55,56,58,65,66,67,28,25,73,76],somelist:7,uncertainti:35,some_kei:[58,51],anon_label:[55,30],allow_twophas:66,foobar:[61,40,49,28,30,55],loader:[1,38,40,19,23,7,35,65,58,67,28,18],some_table_seq:10,with_polymorhp:75,reciev:69,liabil:24,implicit_return:[40,19,27,48,67,51,9,10,55,76],insenst:23,behvior:69,trigger:[58,39,40,19,23,7,35,26,8,67,56],interest:[0,58,33,25],basic:[1,38,39,40,23,7,35,58,67,8,18],threw:40,deeper:[38,36,42,19],suppress:[37,40,23,7,46,67],multithread:[46,66,49,73],disconnectionerror:[50,14,17,68],lethan10:30,dynamic_load:[23,7],execution_opt:[35,40,19],ugli:25,servic:[20,25,69,35,26,27,66,54,68,51,9,70,76],calcul:[38,40,19,23,7,27,49,30,31,33,76],occas:[25,28],spawn:49,"_type_affin":[19,67],msyear:7,reflectedon:[58,72],cant:[38,39,33],type_map:19,sever:[2,3,8,70,14,26,30,33,34,37,21,40,23,35,46,51,57,61,69,66,67,28,71,72,75],receiv:[3,5,9,14,28,27,29,30,32,34,37,21,39,40,19,7,35,46,48,49,50,51,56,57,58,61,69,16,64,66,67,68,25,72,73,75],make:[0,5,9,10,26,27,30,33,1,38,39,40,19,23,7,35,48,51,55,58,65,66,67,28,25,73,74],stickier:23,dbapi:[1,38,40,19,23,4,7,26,67,28,74,35,18],elli:39,assumedli:35,qualif:26,inherit:[1,38,39,40,19,23,7,35],weakli:[65,49],programm:37,paradigm:[40,19],left:[0,4,5,9,10,21,12,7,30,33,34,36,37,38,39,40,19,23,43,35,49,51,55,58,61,69,64,65,66,67,28,25,75,76],pginet:38,protocol:[69,25,15,67],firstnam:[65,34],some_helpful_method:72,parent_fold:[58,21],"_asdict":[37,35],"__dict__":[40,23,7,64,49,16],correlate_except:35,mysql_length:[5,35,19],tuple_:[30,40],character:19,get_children:[38,55,30,76],dblink:[38,35,23],correpond:23,opt:[0,26,65,49,28],applic:[1,4,5,6,8,9,70,11,12,14,15,22,21,25,26,30,31,33,34,37,38,40,19,23,7,35,46,48,59,51,55,57,58,62,16,66,67,28,69,72,73,74,76],pgarrai:[23,7],autotransl:23,background:[21,46,26,65,52,49,51,9,10,67,73,55],elabor:[61,55,34],illiter:38,daemon:49,do_return_conn:28,alltest:39,negat:[38,40,23,35,30,55],remove_st:63,unnecessari:[38,40,19,7,35,26],kwaarg:7,wwu:7,www:[1,22,42,24,7,65,23,66,67,28,76,51,9,40,33,19],deal:[1,38,24,23,7,35,65,58,54,8,67,28,18,19],addgeometrycolumn:42,maxim:35,intern:[1,38,39,40,19,23,7,35,8,67],favorite_entry_id:21,interf:[35,7],named_with_column:55,insensit:[35,40,19,23],trace:[25,35,7],yield_per:[37,35,40,23],generer:[38,7],tracker:[16,19],localhost:[58,69,46,40,25,5,26,48,49,50,51,29,67,72,14],idiomat:[69,7],burn:40,someinfo:75,promot:[46,49],"super":[40,27,3,23,25],unsaf:1,postgresql:23,conceal:[37,40,42,35,28,12,33,34,76],object_st:49,simul:[38,40],unsav:7,commit:[1,38,39,40,19,23,7,35],mysqlconnector:[5,35,19],rudiment:[1,42,23,7,45,66,35,19],subq:[37,58,34,8],down:[38,39,40,3,23,7,69,35,46,49,25,61,30,12,33,36,19],formerli:[1,40,67,49],lieu:[58,21,39,25],overus:[26,28],attribute_mapped_collect:[12,65,3,19],fraction:[7,5,35,8,66,25],fork:[26,49],form:[40,19,23,7,35,65,8,67],forc:[3,4,8,9,10,17,27,30,37,38,39,40,19,23,49,51,55,57,58,66,25,73,76],engineer_data:58,query2:[15,7],query1:7,"__declare_last__":19,bugfix:[1,19,7],glitchi:38,note_kei:3,zark:3,mutablecomposit:[16,35,34,19],"_somenam":23,unrel:[40,19,23,7],mixabl:8,scalar_subq:58,classic:[19,42,62,46,8,75,72,12,16,71,18],sale:59,thunk:38,format_schema:27,ship:70,sticki:7,somenam:[23,49],excel:[1,19],dbapi_con:[50,29,68],executioncontext:[38,40,23,35,26,27,28,50],matur:28,dialect_nam:[37,51,76,25,4],stringif:35,transferr:49,visit_binari:19,mediuminteg:5,furthermor:46,pref_valu:[55,2],some_alt:34,fsp:5,fst:35,get_attr_by_column:23,lastrowid:[38,39,40,19,23,7,35,26,27,28,10,55],skip:[37,58,46,40,19,23,43,70,35,7,49,28,51,61,67,72,55,33,56],mile:36,customari:[49,28],hierarch:[21,64,42],depend:[0,2,4,10,13,17,18,26,27,30,1,38,39,40,19,23,7,35,48,50,55,58,67,28,25],restore_snapshot:19,slboolean:7,editori:7,aspx:9,string:[1,38,39,40,19,23,7,35,65,8,67],undoc:31,lynch:[35,19],join:[1,38,39,40,19,23,7,35],unwork:7,parent_t:36,dii:7,did:[1,38,39,40,19,35,26,67,28,46,72,73,33],dif:23,iter:[3,4,21,31,33,34,37,38,39,40,19,23,7,35,46,49,55,57,61,69,8,76],item:[38,40,19,23,7,35,65,8,67],expunge_al:[67,40,8,49,7],compileerror:[5,35,17,19],collection_adapt:[3,49],msfloat:23,"_createdropbas":56,porxi:23,pgp_sym_encrypt:25,pre_exec:27,wors:28,"__instrumentation__":35,display_width:[5,28,7],assertionpool:[14,40,19],ping_connect:14,slot:[3,68],deriv:[38,40,19,23,7,65,67],stmt:[58,61,4,69,26,59,51,33,76],identity_kei:[58,38,19,49,31,34],wait:[14,35,19,48],box:73,"__repr__":[61,19,7,35,30,12,34],extrem:[21,40,23,7,35,66,49,8,67,9,25,57,31,73,28,34,36],bob:38,else_:[38,30,34],parentmapp:31,overrid:[1,38,40,41,23,4,74,7,26,27,58,54,8,67,55,56,35,19],elect:[5,63],"_cursorfairi":7,passthrough:[38,23],modul:[1,38,39,40,19,23,7,35],no_valu:[57,31],backtick:[5,35,19],include_properti:[40,34,19,7],perf:23,from_engin:[0,13,67],sake:49,query_timeout:38,univers:[58,35,39,64],visit:[19,27,25,30,11,55],connector:[25,35,40,48,67],bindparam:[1,38,40,19,23,7,35,65],dict:[0,38,40,3,23,7,5,65,35,27,72,28,59,51,30,31,73,12,16,19],thru:39,checkout:[19,48,49,68,50,67,73,14,17],notilik:[35,30,55],tandem:23,examin:[21,56,34,49,28],total_sal:76,effort:[1,38,35,8],fly:[49,73],dburi:[38,23],nextval:[66,67],uniqu:[38,39,40,19,23,7,35],imper:7,fget:[64,72],claim:[19,24],turland:19,predict:[35,67],createschema:[56,19],menten:40,crazi:7,isdelet:27,nvarchar:[38,40,19,7,35,67],get:[1,38,39,40,19,23,7,35],ping:14,idl:[5,14,46,35],pure:[40,19,7,5,67,28,51,72],map:[1,38,39,40,19,23,7,35],mar:[38,39,40,19,23,7],max:[1,38,40,19,7,35],usabl:[58,40,19,23,72,35,26,65,66,8,25,10,31,12,14,34,75],membership:[65,23,3,31,7],myrelatedclass:[40,67],udpat:46,mai:[0,1,2,3,4,5,6,8,9,10,11,12,13,14,28,16,19,22,21,26,27,29,30,31,32,33,34,35,36,37,38,39,40,41,23,7,45,46,48,49,50,51,55,56,57,58,59,60,61,69,63,64,65,66,67,68,25,72,73,74,75,76],underscor:[38,40,19,23,7,35,67],grow:[14,40,35,71,48],selectone_bi:[38,39,8],autoload_with:[0,55,34,72],"_pool":14,before_insert:[32,57],tale:28,"switch":[38,39,40,19,23,7,35,26,54,28,18],columnclaus:[1,39,40,30,55,74],longhand:75,talk:[61,40,19,69,48,49,34],cute:42,shield:49,host_entry_1:21,freetd:[9,35,40,19,48],entiti:[26,3,21,23,7,31,34,36,37,38,39,40,19,42,43,35,49,55,58,62,65,8,67,69,75],group:[1,38,2,19,23,4,35,7,65,8,54,30,40,55,74,76],monitor:35,dbapi_type_map:27,fayaz:19,is_attribut:[12,31],init_inst:[32,7],dont_load:[35,40,67,23],nolock:19,mail:[38,28],main:[37,38,40,72,23,44,46,67,30,31,36],initd:51,synopsi:18,savepoint:[40,19,23,7],statementerror:[35,17,19],initi:[0,26,2,3,5,8,14,18,23,25,7,27,29,30,31,32,16,1,38,39,40,19,42,43,35,46,48,50,51,55,57,62,66,67,69,72],sltime:38,close_al:49,myattrext:32,getitem:35,rock:3,myobj:3,exclude_properti:[40,34,19,72,7],cascadeopt:19,attributeerror:[58,39,40,19,7,60,31],email_addresses_1:36,continu:[21,46,40,19,23,7,5,35,26,66,49,8,25,67,73,14,28,34,55,57],indkei:35,interval_1_start:64,baselin:58,default_fals:74,oracle_resolve_synonym:[66,23],tablenam:[37,38,2,19,23,7,35,27,58,72,1,40,34,76],case_insensit:35,pjoin_engineer_info:75,correct:[58,38,40,19,23,7,5,35,26,8,67,28,59,33,76],after:[1,38,39,40,19,23,7,35,65,8,67],resourceclosederror:[40,17],headlin:[61,3],orm:[1,39],bayer:24,lump:33,"_string":72,org:[5,9,22,23,24,33,1,40,19,42,7,35,46,59,51,58,69,64,65,66,67,28,25,73,76],ora:[35,46,40,19],or_:[58,33,30,19,7],somename7:49,befor:[1,38,39,40,19,23,7,35,65,8,67,28],waiter:[35,19],visit_pool:40,somename8:49,somename9:49,thing:[38,39,40,19,23,7,35],firstpost:61,think:[61,74,46],frequent:[38,40,7,25,9,14],first:[1,38,39,40,19,23,7,35],caseinsensitivecompar:64,reimplement:[40,67],multipleresultsfound:[37,61,60],carri:[35,8],housekeep:43,fast:[46,23,28],oppos:[37,38,19,35,64,66,25,31,57],someselect:[30,23,7],"_fn":[40,67],workaround:[40,3,23,7,35,73,19],someprop:[38,7],averag:49,posgtresql:51,my_str:30,pendingdeprecationwarn:7,email_address_2:[37,33],users_t:[0,26,65,49,32,55,36],redefinit:[35,34,31],were:[0,1,4,5,9,10,21,14,19,26,30,31,32,33,34,36,37,38,40,41,23,7,35,46,48,49,51,57,58,61,60,69,16,64,65,8,67,28,25,72,76],has_ani:[58,51],ext_pass:8,advis:[5,21,19,23,73],interior:7,proxy_bulk_set:12,employee_mapp:75,type_a_str:72,normal:[38,39,40,19,23,7,35,67],track:[1,38,39,40,19,23,35],fakeus:61,sublicens:24,pair:[37,38,2,3,23,4,69,35,26,27,49,28,51,61,30,31,34,19],method_b:[26,49],method_a:[26,49],synonym:[1,38,39,40,19,23,7,35,67],release_savepoint:[50,40,68],on_connect:[40,27],"_genericmeta":41,gracefulli:[14,8],shop:70,serializ:[5,51,40,19,23],show:[1,38,40,19,23,21,35,65,49,51,61,32,33,34,57],ansi_quot:[5,40,23],outerjoin_to:38,undefer_group:23,"_detect_cas":58,enough:[58,55,33,19,73],black:33,extra_data:21,post_configure_attribut:[63,7],startswith:[1,21,23,35,30,55,33],uniqueidentifi:[38,9],nearli:[38,49,28,25,31,15],variou:[0,3,20,23,26,33,34,1,38,40,19,42,7,35,48,49,52,55,56,61,69,8,28,72,73,71,76],engineer_info:[72,75],table_per_rel:42,mung:[38,35],repr:[12,35,30,19,48],secondari:[1,21,39,40,3,23,7,5,35,8,49,29,61,31,12,72,76],schemaev:50,get_schema_nam:[0,67],geo:41,gem:19,remote_bank:55,its_big:12,yield:[37,23,4,49,28,9,30,55,34,76],message_id:0,stupid:38,mediat:42,summari:[34,8],wiki:[1,40,42,23,7,69,66,67,8,70,28,19],datetimemixin:[8,7],instrumenteddict:3,spars:69,sean:[35,19],adr_count:61,onetomani:7,testcas:49,somelabel:[38,7],enumer:[69,25,70],snortev:58,label:[1,38,39,40,19,23,7,35],savings_bal:74,identity_key_from_inst:34,keyword_id:[12,58,8,72,61],across:[1,38,40,19,23,7,35,8,67],"_user_id":34,august:8,parent:[1,38,39,40,19,23,7,35,65,8,67],change_2836:35,unpopul:[58,19],declared_synonym:8,tue:[1,38,40,23,7],engine_or_url:38,tour:61,basestr:[64,40],improv:[1,38,39,40,19,23,7,35,65,8,67],ischema_nam:35,remote_user_id:55,improp:40,among:[37,61,2,19,42,7,69,35,26,23,8,49,28,25,73,40,33,75,76],undocu:[38,35,28,73,7],attribute_two:34,unittest:[1,38,39,49,7],compare_self:38,cancel:[37,40,23,7,26,49,57],ultim:[37,38,40,19,21,35,26,48,49,51,30,72,25,32,33,34,57,76],cursor_execut:68,marc:35,mari:[21,33,61],mark:[3,26,31,32,33,34,35,37,21,40,19,23,7,45,46,49,55,56,57,61,65,8,67,28,25,72,76],dict_of_properti:34,those:[0,2,4,5,8,9,14,19,26,27,29,30,33,38,39,40,41,23,7,35,48,50,51,52,55,56,58,65,66,67,28,25,73,76],default_metadata:1,"__mod__":[39,30],invok:[2,3,4,5,10,21,17,41,26,30,32,34,37,38,40,19,42,7,35,48,49,50,51,55,56,57,58,61,69,8,28,25,70,72,73,74],some_necessary_attribut:58,outcom:[32,30,57],invoc:[67,57,19,49],"0x12ceb90":12,margin:[39,26],tiddlywink:33,advantag:[38,39,2,23,21,69,35,64,49,28,75,9,30,67,72,73,40,14,34,36],destin:[40,19,23,48,72,31],cluster:35,"_polymorphic_map":34,dest_stat:31,on_new_class:57,prefix_with:[37,58,19,4,35,28,74,76],sig_count:58,intermediari:[12,21,35,42,4],same:[1,38,39,40,19,23,7,35,65,8,67],fragment:[40,30,34,42,61],pad:5,pai:61,exhaust:[46,26,40,19],assist:[35,23],test_deprec:8,capabl:[39,40,19,23,35,8],some_lookup_t:28,prefetch_col:[35,26,27],ipaddress:25,somerelationship:[35,19],appropri:[2,3,4,10,21,12,14,15,41,26,27,29,30,31,33,16,36,37,38,40,19,23,35,46,48,49,50,51,58,61,64,28,25,70,71,72,74,75,76],sql_small_result:[37,5],argument2:48,multicolumn:72,argument1:48,check1:2,populate_inst:[38,32,39,8,57],roughli:[40,30,34,7],routingsess:49,execut:[1,38,39,40,19,23,7,35],"_constructor":[40,19],aspect:[0,40,19,7,27,49,33],mxodbc:[35,40,67,23],flavor:[69,33,49],pickletyp:[38,39,40,19,23,7,35,8,67],set_isolation_level:[40,27,51],parse_decltyp:[40,73],bind_processor:[25,8,23,28],jake:33,choic:[38,40,23,61,66,49,50,74,34,36],mon:[1,38,39,40,19,7],oldnam:33,mod:[1,38,39,8],shares_lineag:[55,30,7],server:[58,38,40,19,23,35,67,28,74,18],either:[0,3,4,5,8,21,13,15,26,27,28,30,31,33,34,37,38,39,40,19,23,7,35,46,48,49,50,51,56,57,58,61,65,66,67,68,25,72,75,76],fulfil:[21,40,23,25,35,68,51],ascend:[33,30],adequ:[40,34,23],user_nam:[37,34,64,28,12,55,56,36],sqlite_omit_trigg:73,comparable_us:[72,23],simplefilt:25,confirm:[55,28],columncollect:[38,30,2,23,28],read_nowait:[37,19,76],foomax:38,inject:[58,3,35,48,50,25],searchword:64,postfetch:[10,40,23],pgp_sym_decrypt:25,anon_1_user_id:61,freetds_nam:9,broken:[37,1,40,19,38,7,66,49,75,67,57],regexp:[1,58,40,19,23,7,5,35,67,73],cornerston:33,solidifi:23,weak_instance_dict:19,removes_return:3,sqlite_temp_mast:19,mstinyinteg:[5,1,7],road:[46,19,42],prohibit:[40,73],quietli:[65,34,3],asfrom:74,strip:[38,40,19,25,29,57],passive_delet:[38,40,3,23,21,31,42],complianc:35,cartitem:10,overwrit:[55,39,34,19,7],tlengin:1,ipv6:[35,19],watkin:39,manager_manager_data:75,"_use_mapper_path":[37,75],isenabledfor:[40,67],pool_reset_on_return:[19,48],possibl:[1,3,4,7,21,14,17,26,27,31,33,34,35,36,37,38,39,40,19,23,43,45,46,49,59,55,57,58,61,69,65,8,67,28,25,72,75,76],format_t:27,"_compiled_cache_s":34,"__le__":[55,30,65],extra_from:27,embed:[38,39,40,19,23,7],deadlock:[9,7,40,14,19],threadloc:[1,38,40,23,7,65],deep:[58,21,2,42,23,7,65,25],file:[38,19,23,7,8,67],oeprat:35,rdbms_apiproxi:[35,19],fill:[69,67,7],denot:[37,61,55,34,73],hybrid:[35,19,8],field:[1,38,8,40,3,23,7,66,67,48,61,25,9,30,72,12,14,33,34,55,19],cleanup:[1,38,40,23,7,8,28,50],hibern:[1,23,69,65,8,28],architectur:[38,40,23,35,67,49],custom_attribut:[8,42],sequenc:[1,38,39,40,19,23,7,35],vertex:[16,34,65],main_tabl:2,ansi:[38,39,40,41,23,35,33,19],is_select:[55,30,31],versionad:[25,55],concat:[41,23,35,30,55,33],contextual_connect:[35,26,49],vertice_t:34,unaryexpress:[25,35,30,23],descript:[0,2,4,10,21,14,20,26,27,30,31,33,34,36,37,38,40,19,23,7,46,48,49,50,54,55,56,67,25,70,73,74,76],source_dict:31,append_clean:39,escap:[1,38,40,19,23,7,35],pybrari:51,register_funct:41,represent:[61,64,42,23,4,35,7,27,28,59,30,55,33,76],init_sqlalchemi:46,dropconstraint:[40,2,56],my_association_t:31,dicitionari:35,children:[37,21,3,23,43,65,36],yetanothert:[58,72],rewrot:[1,40,19,23,7],strptime:7,altercolumn:[74,67],ws1:64,vertice_id:34,straightforward:[40,67,25,49,72,34],fals:[38,39,40,19,23,7,35,65,8],append_ddl_listen:[55,56,19],offlin:[55,46,40,19],util:[0,37,3,42,38,62,35,7,23,51,30,72,34,25,19],attrbut:[49,7],begin_prepar:[40,66],fall:[37,1,40,3,23,38,5,65,49,28,25,61,72,33,34,57,19],bottleneck:1,scopefunc:49,create_row_processor:[32,31],host_entri:[58,21],"__truediv__":30,further:[1,38,39,40,19,7,8,67,28,18],pyodbc:[38,40,19,23,7,35,48,67],is_derived_from:[55,76],abc:[30,10,3,23,19],abl:[58,38,7,35,8,49,28,61,33],day_precis:[25,40,66],intersect_al:[37,33,76],"_cursor_execut":35,"public":[38,40,19,23,7,35,8,67],schemaitem:[1,38,2,19,50,10,31,55,56],append_el:[40,67],dict_:[40,67,31],component:28,sophist:[12,61,75,67,28],rlock:[1,7],local_attr:[12,19],valu:[1,38,39,40,19,23,7,35,65,8,67],search:[0,37,46,40,19,42,38,65,7,26,23,66,49,48,75,21,55,63,18,76],install_st:63,has_sequ:[40,27,23,7],keyvalu:10,narrow:[61,42,23,5,65,49,28,75,19],primit:[61,33],transit:[37,5,11,7],parentclass:28,inappropri:[40,3,7,35,46,19],establish:[3,5,14,41,23,26,32,33,1,38,39,40,19,42,7,35,48,50,55,57,58,67,28,25,72,73,74,76],geoalchemi:[58,42],distinct:[1,38,39,40,19,23,7,35,8,67],liber:[37,25],regist:[1,74,35,19,18],two:[1,38,39,40,19,23,7,35],type_migration_guidelin:[40,67],engineer_engineer_info:75,desir:[0,21,5,10,14,26,30,32,34,36,37,38,40,23,35,46,49,51,55,57,61,8,67,28,25,73,74,75,76],calc1:33,calc2:33,mstinytext:1,merge_result:[37,35,19,49],particular:[38,40,19,23,7,35,65,8,67,28],none:[1,38,39,40,19,23,7,35,65,8],col2_0:4,vladimir:35,hous:28,"9e23a0641a88b96d":8,dev:[5,35,46,19,7],remain:[26,21,8,70,12,14,7,33,36,37,38,40,19,23,43,35,46,49,50,55,58,61,69,65,66,67,28,59,72,73,71,76],del:[58,61,19,23,49,30,12,16,57],addresses_1_id:[61,36],ddlevent:[34,28,50,25,55,56],dec:[38,19,7],def:[3,5,10,12,14,28,17,26,29,30,31,32,34,37,21,39,40,19,42,46,48,49,50,55,56,57,58,61,16,64,65,8,67,68,25,72,73,74],share:[37,38,19,42,7,65,35,26,23,49,68,30,72,73,14,34,55,75,76],shard:[35,40,19,23,7],minimum:[21,70,40,36],compars:40,identity_key_from_row:34,prodc:33,strlen:23,never_set:57,whichev:[40,67],awkward:[12,58,67,69,49],secur:[61,3],programmat:[33,30,70,38],namespace_url:30,"0x1298d10":49,regener:[35,19],name_or_url:48,declarativemeta:[40,72,67],associ:[1,38,39,40,19,23,7,35,8,67,28],signfic:23,"_key_from_queri":35,created_at:[10,72],mislead:[40,7],ddlcompil:[74,27],my_engin:[29,67],startwith:[55,30],geom_data:25,autoload:[0,1,40,19,23,38,5,26,27,66,67,50,72,55,34],through:[38,39,40,19,23,7,35,65,8],current_shard:26,itervalu:[3,23],treat:[38,39,40,19,23,7,35,64,72,49,28,25,61,30,21,55,76],acid_model:69,dbname:[29,5,6,46,66,48,51,9,39,11,14],late:[58,39,19,50,72,76],pend:[38,40,19,23,7,35,8,67],dairiki:19,good:[0,38,46,40,3,7,26,65,8,49,50,61,72,31,32,16],aliasedinsp:[37,13,58,31],timestamp:[38,39,40,19,23,7,35,67],pollut:[40,23],"_valu":[64,23],compound:[38,35,40,19],detach:[37,1,46,40,19,23,38,35,26,27,66,49,68,50,60,67,31,21,58,8],complain:38,goofi:25,easili:[1,61,39,2,19,23,35,65,8,49,9,33,56],token:[39,5,66,49,34,76],compatibl:[40,23],with_polymorphic_discrimin:37,dump_tre:42,harm:38,name_1:[37,33],hard:[58,19,23,7,8,49,33],idea:[0,1,39,40,19,42,38,26,23,58,49,8,64,46,61,67,72,28],some_prop:31,connect:[1,38,39,40,19,23,7,35],orient:[37,1,40,19,7,69,27,8,25,30],inactiv:23,add_is_dependent_on:[55,40],maxdb:[35,71,23],print:[0,3,8,9,12,14,15,26,28,29,30,32,33,34,37,21,41,42,7,46,48,49,59,51,55,57,58,61,69,64,65,66,67,68,25,72,74,76],difficulti:67,oid:[1,38,39,7,35,65,8],committ:27,intric:[67,49,42],item_id:[2,3],prepare_instrument:[8,3,7],jacks_address:61,schemaeventtarget:[50,25,55,10],ms175095:9,omit:[58,40,43,5,56,7,4,66,48,51,9,10,75,55,34,35,25,76],wither:[35,40],perman:[69,16,26,34,65],label_length:[48,35,27,19,7],dont:[1,38,65],smaller:[25,35,28],done:[65,40,19,8],autoclos:[40,23,7],stabl:[58,35,63],is_bound:55,column_kei:[23,4,27,30,55,76],construct:[1,38,39,40,19,23,7,35],statement:[2,4,10,17,18,26,29,1,38,39,40,19,23,7,35,48,50,56,58,65,8,67,28,74],colnam:[58,38,2,19,35,27,8,9,34],for_upd:[37,38,39,19,35,10,55,76],pare:23,asguid:30,parm:38,part:[1,38,39,40,19,23,7,35,8],pars:[1,41,42,35,23,27,48,49,9,67,73,19],myclass:[3,9,13,15,31,34,36,37,38,39,19,23,7,46,49,51,57,58,69,8,28,72],contrari:[61,46,26,57],cyclic:[38,19,23,7],adapt_oper:23,horizont:[35,19,23],strategy_class:31,get_dialect:48,pysqlite2:73,mssql_cluster:[9,35],"_recurs":31,mypool:14,built:[26,2,3,10,19,20,7,30,31,16,38,39,40,41,23,43,49,51,55,58,61,34,64,65,8,67,28,25,70,72,73,74],product_unit:76,build:[1,38,39,40,19,23,7,35,8,67],someurl:[55,68],proxy_set:[55,30,40],distribut:[37,58,40,24,23,45,8,70,73,42],passwd:39,unitofwork_ma:65,previou:[4,5,9,11,15,26,29,1,39,40,19,23,7,35,50,58,65,8,67,28,73,76],most:[1,38,40,19,23,7,35,65,67],assoc:[0,21,7],myisam:[40,23,5,67,28,11,14,55],interval_end:64,charg:24,bdb:5,dimension:51,baseclass:[35,65,19],carefulli:[21,40,3,23,28,72,33],particularli:[1,38,2,19,23,7,61,65,8,49,28,50,25,9,10,31,40,14,34,55],sqla:[40,19,23,7,8,67,28,59,51],fine:[38,40,19,23,65,8,54,67,14],find:[37,39,19,23,7,65,72],coerceutf8:25,equilval:39,a_id:21,unus:[58,35,23,7],baserowproxi:26,bindvalu:[58,25],do_begin:[73,27,7],restart:[1,38,19,35,68,50,14],do_init:[1,31],common:[2,3,10,13,14,25,30,31,33,34,37,21,40,19,23,7,35,48,49,59,51,55,56,62,16,65,8,67,28,69,72,74,75,76],eligbl:69,cart_id_seq:10,clauselist:[30,19,7],reserv:[1,38,40,19,23,7,10,35,51,30,55,33,25],user_alias_nam:61,users_fullnam:[61,36],someth:[3,10,17,33,34,38,40,19,23,7,35,49,50,56,58,61,65,8,67,28,25,72],stringifi:[38,25],smallest:[5,35,11],myriad:23,subscript:57,experi:19,instrument_class:[32,57,7],altern:[38,40,19,23,7,35,8,67,28],popup:[61,33],complement:[21,35,61,23,7],getset_factori:12,some_par:40,instrument_declar:72,some_other_engin:72,entitynam:[8,42],popul:[3,21,10,12,27,29,31,32,34,36,37,38,39,40,19,23,7,35,46,49,55,57,61,28,75],parse_qsl:[39,19],alon:[21,69,8,25,17,36],foreign:[1,38,39,40,19,23,7,35,58,67,8,28],simpli:[60,40,3,23,5,65,8,67,51,12,34],point:[5,7,11,13,14,15,17,18,25,26,27,29,30,33,34,36,37,38,39,40,19,42,43,35,46,48,51,55,58,62,16,64,65,66,67,28,69,72],instanti:[38,39,41,23,61,26,48,49,50,51,30,72,12,34,25],param:[1,4,14,17,26,27,30,33,34,37,38,39,40,19,23,7,46,48,49,50,55,61,68,25,76],shutdown:[40,23],suppli:[1,3,4,5,7,10,11,12,17,26,34,36,37,21,39,23,43,56,61,25,72,74],throughout:[1,21,40,19,23,7,70,46,8,49,61,67,33,56],backend:[58,2,41,23,4,65,7,26,27,48,54,28,50,10,67,40,56,35,19],comparemixin:[38,35],addresses_1_user_id:[61,36],secret:33,after_bulk_upd:[37,32,57,7],"0x101525090":58,convert:[1,3,21,5,8,9,23,27,33,34,36,37,38,39,40,19,42,7,35,46,48,51,55,58,16,64,65,66,67,25,72,73,74,75,76],unnecessarili:[38,40,75,23,21],gap:12,related_user_id:21,understand:[38,74,23,19,18],repetit:56,"__radd__":30,employee_dept:55,solid:[40,3,48],bill:1,twophas:[23,66,49],executemani:[38,40,19,23,7],unifi:28,anonym:[37,61,2,19,42,7,65,35,46,23,28,30,40,55,33,36,76],everyon:[65,49],awesomelist:3,declarar:40,propag:[1,38,75,40,19,23,7,16,35,26,58,49,50,25,67,21,73,34,57],ordereddict:[3,19],key3:51,key2:51,key1:51,sql_modul:[40,19],arraycol:58,apply_label:35,nosesqlalchemyplugin:40,lenient:[1,35],unwarr:40,moment:[1,38,19,21,69,35,65,73],remote_t:[2,72],recreat:[0,61,40,19,7,35,72,14,34,55],enable_typecheck:[38,23,31],travers:[1,38,2,19,23,27,25,30,12,76],task:[1,38,46,40,23,43,56,35,7,66,49,25,9,61,74,34],remote_attr:[12,19],somet:[58,2,41,42,25,23,67,28,50,51,30,72,73,40,32,33,34,55,19],parenthes:[35,40,7],"__neg__":30,somed:58,spend:67,someb:7,append_foreign_kei:[38,55],existing_a1:49,colspec:27,userdict:16,iterate_inst:[40,8,67],obscur:[38,28,7],shape:[35,3,23],eager_column:36,rundown:61,cut:[30,26,40,28],somedatetim:7,apr:[38,39,40,19,23],cue:21,approxim:[58,4],myarrai:51,bin:46,varchar:[38,40,19,23,7,35,8,67],"_get_join":[40,67],bit:[38,35,40,19,23],knock:23,semi:[35,63,19,67],resultproxi:[1,38,39,40,19,23,7,35],flux:19,get_pk_constraint:[35,40],"_over":19,declared_attr:[35,40,19,72,28],brett:35,transgress:21,dbengin:67,often:[37,21,64,40,3,43,70,7,58,49,28,59,51,30,57,72,73,67,33,34,36],get_column:[0,50,27],back:[1,38,39,40,19,23,7,35,65,8,67,28],mirror:[58,21,28,7],chararct:38,scale:[35,40,19,23,7],get_col_spec:[25,40,67],pep:[69,14,26],per:[2,4,5,9,13,14,17,26,27,29,30,33,1,38,39,40,19,23,7,35,48,59,55,58,65,8,67,28,25,76],substitut:[38,3,23,5,11,56,76],fire_sequ:40,reproduc:23,connectionproxi:[28,7,40,68],nose:[19,7],patient:64,"__mapper_cls__":23,feb:[38,39,40,19,23],impos:[14,34],constraint:[1,38,40,19,23,7,35,58,67,74,18],user_from_select:37,visit_xxx:74,rowcount:[1,38,40,19,23,7,26,27,67,59,33],string_table_nam:72,myindex:2,pythonpath:[1,42],keyfunc:[3,23],nclob:[25,35,40,66,19],with_polymorph:[40,19,23,7,35,8,67,28],inclus:34,back_popul:[21,40,7,67,31,75],modicum:33,juicier:33,includ:[0,2,4,10,13,14,18,19,24,26,27,29,30,33,1,38,39,40,41,23,7,35,48,50,55,56,58,65,8,67,28,25,74,76],forward:[58,38,40,19,23,7,66,8,31,13],paren:[38,3],errant:[40,7],whereclaus:[37,55,76,23,4],reorgan:[1,40,23],incld:34,append_prefix:76,bigseri:[38,19],translat:[37,1,39,40,19,23,38,35,7,27,48,67,8,25,30,73,12,32,33,76],select_stat:8,ulist:36,concaten:[58,40,19,23,25,51,30,33,34],trusted_connect:[9,7],interbas:[59,19],constant:[40,19,25,5,35,51,30,31,10,55,34,74],do_recover_twophas:27,utilit:62,iterator3:31,not_extend_right_of:51,unalias:[35,30],sequenti:34,eaacatalog:69,deseri:[15,40,23,7],utterli:1,benchmark:28,cst_user_name_length:56,trans2:26,totyp:30,echo_pool:[1,14,40,48],cater:51,with_only_synonym:35,unaffect:[40,3,23],ansicompil:1,do_get:28,tradeoff:36,set_sqlite_pragma:73,source_id:7,pluggabl:39,optimized_:42,queri:[1,38,39,40,19,23,7,35],last_upd:10,privat:[39,40,19,23,7,35,8,67],sensit:[58,38,40,23,7,35,67,28],peanut:58,elsewher:[21,40,3,23,35,72,34],lazy_collect:40,granular:[27,19,49,7],becam:[33,40,19,28],fatal:[5,11],passiv:[58,38,19,23,7,35,46],huddlej:7,queen:26,"_sa_initi":3,volum:[25,40],untouch:43,implicitli:[58,40,23,7,35,26,27,51,10,72,55,33,56],clement:1,somejoin:40,geograph:25,book_id:34,queryableattribut:[13,35,34,31],fortun:64,orchestr:3,annot:[58,69,35,40,19],user_prefer:[55,2],textclaus:30,append:[38,40,3,42,43,65,35,7,27,8,49,23,61,75,21,12,32,76,55,57,19],resembl:[52,33],dedup:[35,7],access:[2,4,10,13,18,19,26,27,1,38,39,40,41,23,7,35,54,58,65,8,67,28,74],versioned_sess:42,"_identity_class":[40,19],sybas:[40,23,35,28,18,71,76],timeouterror:[1,38,17],closur:49,intercept:[3,12,14,28,7,29,34,31,63,21,19,42,43,35,50,57,58,16,65,8,68,25],instanceev:[34,57,28],safer:35,vertic:[26,65,23,18],sinc:[0,1,2,3,21,7,8,10,14,28,26,27,31,32,33,34,36,37,38,39,40,19,23,43,35,46,48,49,59,55,56,58,61,16,65,66,67,68,25,70,72,73,74,75,76],jointli:71,node_2_data:21,remark:1,unsiz:38,eesh:1,resurrect:[57,7],id_1:[37,35,33,28],implement:[1,38,39,40,19,23,7,35,65,8,67],honor:[35,40,19,67,7],configur:[38,39,40,19,23,7,35,65,8,67],supports_alt:27,st_astext:25,"__abstract__":[58,19],gatewai:[35,55,76],a_alia:37,refcolumn:2,ed_us:61,zerofil:5,sapendingdeprecationwarn:17,websit:5,append_group_bi:76,customnumer:58,trail:[38,23],"transient":[37,21,40,19,23,7,35,58,49,61,67,31,34],name2:33,name3:33,name1:33,after_cursor_execut:[50,28],account:[37,38,46,40,19,23,4,35,7,49,64,30,21,12,55,74,76],cymysql:35,alia:[38,40,19,23,7,35],get_foreign_kei:[0,27],carrot:8,obvious:[35,19],department_id:55,didnt:[1,38],fetch:[38,39,40,19,23,7,35],employe:[37,58,72,23,31,55,34,75],sqlite:[1,39],attributeev:[21,32,34,57,28],"_detect_col":58,msbiginteg:7,serial:[1,38,40,19,23,7,35,27,54,28,10,55,56,18],forese:[35,40,67,36,28],filepath:73,varchar2:[40,66,19],nodes_2_nam:65,special_numb:57,should_autocommit_text:27,append_whereclaus:76,lower_1:64,commit_twophas:[50,68],test_sqlalchemy_orm_pk_given:46,insp:[0,37,67,58,31],dynamicmetadata:[1,38,23],inst:[63,57],redund:[1,21,40,19,23,7,35,58,49,8,36,76],defaultclaus:[40,19,7,35,8,67,10,55],check_same_thread:73,physic:39,our_us:61,bind:[1,38,39,40,19,23,7,35],correspond:[0,1,2,3,4,5,10,21,12,13,14,17,26,27,29,30,31,32,33,34,37,38,39,40,19,23,7,35,46,48,49,51,55,56,57,61,60,8,67,28,25,73,75,76],fallback:[37,40,19,7,67,51,25],furnish:24,some_table_id:72,utcnow:74,effici:[37,38,46,40,23,42,35,26,58,49,68,75,67,14,28,34,36],bunch:39,movabl:58,ext3:32,ext2:32,ext1:32,status_cod:69,blatant:46,typemap:[38,30,27],greater:[38,40,19,23,7,5,35,64,66,28,70,31,14,34],"__getitem__":[35,30],zxjdbc:[40,19,67,48],dan:35,dao:23,dai:[58,38,40,19,23,7,65,66,28,25,9,73],mention:[21,40,3,69,26,49,28,29,33],mssql_includ:[9,35],mymutabletyp:16,ddleventsdispatch:55,autoflush:[37,21,40,3,23,7,35,46,65,58,49,8,72,73,57,19],disregard:[34,23],view_nam:[0,27],intellig:[38,39,65,72,23],sql2000:9,matt:35,compiled_cach:[76,26,40,19,4],req:49,ret:46,idosyncrasi:0,stub:[22,23],typenam:25,rel:19,unique_constraint_nam:29,ref:[35,19,23,7],defens:[58,49],math:[30,40,31],clarifi:[39,40,19,23],insid:[38,40,19,23,7,35,67],schema_item:40,standalon:[38,39,40,19,23,35,67],pjoin:75,releas:[21,5,14,26,27,63,1,38,39,40,19,23,7,35,46,48,49,50,56,57,58,59,69,65,8,67,28,76],afterward:[56,49],pgbiginteg:38,expire_on_flush:[34,19,31],grok:42,cname:17,retain:[40,41,23,35,8,49,28,59,19],"_ddlcompil":56,do_rollback_twophas:27,depende:[0,35],facil:[1,38],ancient:[19,23],"_restat":76,messag:[0,1,35,40,19,23,38,7,26,28,51,25,73,33,17,76],gear:[58,61,33],primary_el:2,pjoin_employee_id:75,basic_associ:42,prune:[23,49],morph:28,nodes_1_parent_id:65,structur:[0,1,21,5,15,23,33,16,37,38,40,19,42,35,46,48,49,52,55,58,61,34,8,67,25,75],charact:[1,38,40,19,23,7,74,35,26,27,9,48,25,29,30,11,55,56,17,58],coerce_to_decim:[66,19],before_flush:[23,7,35,8,32,57],"__rtruediv__":30,plaintext:5,thereaft:[69,61],"_mappedattribut":[72,31],have:[1,38,39,40,19,23,7,35,65,8,67],tidi:23,idan:35,update_stmt:27,min:[37,1,41,7,69,73],mid:49,mix:[1,38,40,3,23,4,5,7,26,65,58,35,19],builtin:[23,46,25,30,33,57],mit:24,uppercas:[1,40,19,23,7,5,66,67,51,9,11,73],unless:[1,26,2,3,21,5,8,10,14,7,27,31,33,34,37,38,39,40,19,23,43,35,49,51,55,66,67,28,25,72],preliminari:[38,39,40,7],subtyp:[58,42,23,7,62,30],synonym_for:[72,23],eight:[5,46,56,48],child_two:58,before_cursor_execut:[50,35,26,28],gather:19,hardcod:[35,40,23,7],psycopg:[38,14,48,51],occasion:[38,61,35,67,28,12,33],"32x":25,sa_pool_kei:19,text:[2,4,10,18,26,27,30,1,38,40,19,23,7,35,54,55,56,65,8,67,28,76],nage:65,interval_1:64,geobuff:41,setter:[12,23],name_syn:58,type_coerc:[25,35,30,40],notsupportederror:17,user_id_seq:[61,33],kinterbasdb:[35,40],typeobj:27,pave:58,regularli:[6,22,34,7],bean:[61,8],some_us:34,with_ent:[37,40],increas:[40,67],enclosing_stmt:33,reluct:38,organ:[1,61,41,42,7,46,70,19],integr:[58,20,40,23,38,44,35,7,48,54,50,25,70,67,14,56,18],instrumentationmanag:[35,7],from_hint:27,conform:23,dbsession:46,users_user_nam:36,instrumentedset:3,malfunct:23,bytestr:[40,23,25,67,51,9],email_address:[37,61,4,69,65,49,55,33,34,36],pattern:[0,1,8,12,18,20,26,33,34,37,39,40,19,42,7,35,46,55,58,62,64,66,67,28,69,72,75],boundari:[39,26,23,49,46],picklabl:[65,16,67,31],unusu:[58,21,40,19,49,28,25],progress:[1,38,39,40,23,61,35,26,49,46,30,32,56,74,57],autorollback:35,nowait:[37,1,76,19,38],del_attribut:49,in_transact:26,format_table_seq:27,plugin:[1,19],equal:[58,61,40,23,7,35,46,65,28,51,30,25,31,55,33,34,36,76],"_compared_to_oper":30,instanc:[0,4,5,9,13,14,15,18,19,26,27,29,30,33,1,38,39,40,41,23,7,35,48,50,51,55,56,58,65,8,67,28,25,74,76],equat:[21,38,7,61,31,33,34],futher:59,freeli:[58,40,23,27,65,8,49,72],strftime:[23,7],comment:[0,50,55,7],nocount:23,someothernam:33,guidelin:18,before_commit:[32,57],boston_address:21,commenc:67,unreferenc:49,columnam:23,json:58,assert:[37,38,39,40,3,42,7,16,35,46,23,58,49,65,14,34,19],untyp:23,generate_new_valu:10,determinist:[1,38,40,19,23,7,35],multi:[38,39,40,19,23,7,35],plain:[58,38,40,19,23,35,26,48,54,8,10,67,2,55,28,18],defin:[58,38,39,40,19,23,7,35,26,65,8,67,28,74,18],"__sub__":30,get_attribut:49,helper:[40,19,23,7,35,67,28,30],almost:[58,38,64,40,3,69,46,49,28,53,67,2,34,19],post_exec:27,substanti:24,execute_compil:39,inspector:[35,40,19],unneed:[3,23],"6nm2ne":7,colkei:35,codepath:[38,28],"__version__":[38,33,70,61],fetchon:[58,40,19,7,26,59,25,33,4],infer:[51,67],tighten:[38,40,23],hostnam:[5,39,34,48],again:[39,40,19,23,7,35],specialpric:30,brad:1,column_nam:[0,27],someitem:[21,8],judgment:58,exceedingli:[35,19,28],center:61,holdov:28,explciitli:38,mysql_engin:[5,38,55,72],eqival:76,choos:[37,3,67,25,32,33,63,57],position:[28,48,10,3,7],dogpil:[35,7],latest:[58,39,40,19,5,35,70,57],unari:[25,35,30,7],tablename_id:40,expung:[1,21,39,40,19,23,7,62,35,58,8,34],interim:[58,38,9,19],surpass:14,shuttl:34,nextid:10,after_xxxx:38,convert_bind_param:[8,23,7],"_get_col_to_prop":[35,40],onward:[51,19],historyarrayset:[1,39],bench:23,parent_id:[58,21,3,69,64,65,72,28,31],add:[0,2,4,5,8,9,10,14,27,30,33,1,38,39,40,19,23,7,35,50,51,55,56,58,65,66,67,68,25,73,74,76],mysql_charset:5,ado:3,smart:[21,35],"_sa_class_manag":40,realiz:28,create_sess:[38,40,65,7],substit:56,folder_1_nam:58,"_enable_transaction_account":[49,7],test_sqlalchemy_cor:46,insert:[1,38,39,40,19,23,7,35],like:[1,38,39,40,19,23,7,35,65,8,67,28],success:[37,38,64,40,19,23,61,26,49,67,33,76],ad1:8,opensql:[61,33,75,36],dbapierror:[1,19,35,30,14,17],compile_my_constraint:74,flag_modifi:49,foreign_key_list:19,dictlik:[40,19],soft:57,unreach:3,unformat_identifi:27,datamapping_inherit:42,hair:58,begin_twophas:[50,26,23,49,68],proper:[1,38,39,40,19,23,43,7,66,49,48,51,30,21,75],"_reflect":10,tmp:51,incant:19,interfaceerror:17,add_properti:[1,34,23,7],columnon:30,aliasnam:[34,76],slight:[1,38,40,19,23,7,35,28,21,14,16,76],version_id:[39,34,19,28],host:[1,29,40,25,5,6,66,48,59,51,9,39,11,73,14,34,17],arg1:[74,7],although:[38,7,48,49,67,10,57,21,32,33,36],has_tabl:[40,38,27],simpler:[1,38,39,40,72,23,61,58,67,70,31,12],beaker:[35,40,19],about:[1,38,40,19,23,7,35],actual:[0,1,2,3,21,5,12,14,26,27,30,31,32,33,34,36,37,38,40,19,23,7,35,46,48,49,51,55,57,58,61,69,8,67,28,25,72,73,74,76],socket:51,lifecycl:[40,23,49,68,29,56,57],discard:[38,46,40,3,76,26,49,50,73,14,33,34,57,19],noreferencedcolumnerror:17,mysql_auto_incr:38,easy_instal:[23,18,7],dataset:14,mymodel:72,guard:[40,34,19,7],doesnt:[1,38,39],column_reflect:[35,19],cart_id:10,rc3addcc9ffad:35,mcmillan:40,listofid:65,quote_plu:9,biggest:23,int8rang:51,"_warn_on_bytestr":[25,9,66],only_synonym:76,unexpect:[28,60,19,49,43],relationproperti:[40,67,7],brand:[69,55,49],keyerror:[39,40,3,23,35,60,31,19],"_password":34,inlin:[38,40,19,23,35,65,67],bug:[1,38,39,40,3,23,7,35,65,66,67,28,73,58,76,19],deassoci:17,wish:[21,19,61,69,56,27,49,25,67,72,73,55,33,34,75],unload:[58,21,60,40,19,7,69,49,31,32,57],"_dbproxi":14,foreignkeyconstraint:[1,38,2,19,7,5,35,58,67,50,72,21,40,55,56,17],pin:58,dure:[4,5,10,14,17,26,27,30,38,40,19,23,7,35,51,56,58,65,66,67,28,25,73,74,76],pip:[18,7],probabl:[37,38,40,19,7,35,64,59,25,21,73,74,36],manlio:19,"__set_composite_values__":7,virtual:7,associationproxi:[72,38,40,19,42,7,35,46,23,8,31,12,34],myisam_databas:46,nest_on:39,uowtransact:57,xomeworx:64,unset:[38,48,43],someparam:23,tinyblob:[5,38],soliddb:5,derr:35,poorli:[64,35,46,28,25,57],sleep:14,utcdatetim:65,testtyp:39,consider:35,undef:[40,23,65,8,67,34],kuhlman:7,spec:[38,74,48],msreal:7,"_somekei":23,concret:[38,40,42,23,7,69,8,18,19],under:[8,9,19,26,27,30,31,16,1,38,40,41,23,7,35,46,55,34,66,67,25,73],merchant:24,onclaus:19,thsoe:19,vastli:[58,19,23,7,35,67],quantiz:25,jack:[61,33,3,36],engine1:49,"__nonzero__":[35,23,7],employee_typ:[34,75],"_process_row":23,drop_al:[1,35,40,19,23],poolev:[35,68,50,29,14,28,17],naiv:[46,40,42],direct:[1,38,40,19,23,7,35,65,8,67],street:[21,34],after_parent_attach:[50,25,29],vitek:19,hide:7,introspect:[0,58,35],poison:42,supplier:42,symmetr:[40,23],corresponding_column:[38,55,23,76],subtre:21,identifiererror:17,path:[1,40,19,23,35,65,8,67],columncollectionmixin:2,enrol:74,anymor:[1,40,19,23,67,28],midwai:23,precis:[1,38,40,19,7,5,35,67,25,9,11,33],"__ge__":30,other_id:5,attributeext:8,portabl:[64,40,7,5,26,8],tremend:7,book_author:34,next_vers:34,thingtwo:49,notin_:[35,30,55],user_:39,describ:[58,38,2,41,4,35,26,65,48,54,68,50,29,8,67,74,28,17,18,19],would:[1,2,3,4,7,9,10,21,12,14,19,23,26,27,30,31,32,33,63,36,37,38,39,40,41,42,43,35,46,49,55,57,58,61,69,34,64,65,8,67,28,25,72,73,75,76],regsitri:49,harland:35,spike:1,locate_all_from:76,acollect:3,start_dat:72,phone:[29,57],mycolumncompar:31,pkg_resourc:38,selectfirst:[38,39,8],must:[2,5,8,9,11,15,26,27,30,33,38,39,40,19,23,7,35,48,50,51,55,56,58,59,65,66,67,28,25,73,76],schemavisitor:55,render_literal_valu:[40,27],"3ism":8,loosen:[35,19],loosei:7,characterencod:5,endswith:[37,1,23,35,30,55],mayhem:1,fake_us:61,end:[1,4,5,7,23,26,27,30,34,36,37,38,40,19,42,43,35,46,48,49,55,57,61,69,16,64,65,67,28,25,72,74,76],ent:40,widespread:[38,9],ancestor:[55,30,36,76],collaps:23,"_sa_sess":7,dialect:[1,38,40,19,23,7,35,8],mess:[38,33],mysessionextens:32,badli:[1,7],koval:35,posts_bodi:61,bootstrap:[35,42],exclud:[40,19,7,16,35,66,51,72,55,34],an_engin:55,scoped_sess:[40,23,7,46,65,8,49,72,15,57],environ:[38,40,42,35,46,66,49,28,67,55],enter:[65,35,27,3,49],plagu:39,smallinteg:[5,25,23,7],engine_from_config:[40,48,23],partition_bi:[30,19,28],macaddr:[51,67],composit:[1,38,39,40,19,23,7,35],somedata:[15,75],over:[1,3,4,8,9,21,23,30,33,34,36,37,38,39,40,19,42,7,35,49,51,55,56,61,65,66,67,28,25,72,73,76],becaus:[3,21,9,10,12,14,26,31,33,34,36,38,39,40,7,35,46,49,50,51,58,61,69,66,67,28,72,74],get_instance_dict:[63,7],nulltypeengin:23,osx64:46,do_execute_no_param:27,compare_valu:25,fetchal:[58,38,46,40,26,66,59,51,14,33,76],btext:3,comprehens:[21,64,40,42,56,46,49,28,52,70,31,34],"_query__context":37,subbas:7,nodes_nam:65,"_autoclose_connect":26,manager_gett:63,circulardependencyerror:[40,17,7],mytabl:[0,2,3,4,5,8,9,10,26,30,31,16,19,46,49,51,55,56,58,65,66,67,28,25,72,74,76],alex:[19,7],odbc:[5,9,35,23],unpickl:[38,19,23,7,35,15,16,57],kwarg1:7,each:[0,1,2,3,4,5,7,8,9,10,21,12,14,28,16,17,19,23,26,30,31,32,33,34,36,37,38,39,40,41,42,43,35,46,48,49,50,51,55,56,57,58,61,69,63,64,65,66,67,68,25,70,71,72,73,74,75,76],sawarn:[17,23],validate_phon:[29,57],preferred_recr:72,unicodetyp:40,rdbms_googleapi:[35,19],diana:[35,19],versionedfoo:37,brianrhud:19,"0x10d34b8c0":72,goe:[38,3,7,35,46,66,49,61,21,36],newli:[4,9,14,28,26,27,30,32,33,34,37,21,40,19,23,7,35,48,49,50,51,55,57,58,61,67,68,72,75,76],laid:38,sane:23,descriptorproperti:31,got:[1,38,39,19,23,61,65,58,33],"_write_prop":72,rail:42,free:[61,24,23,7,35,49,55,34],fred:[37,58,61,69,65,33],interval_1_id:64,connection_proxi:[50,14],do_stuff:3,reftargetmixin:72,filter:[58,38,15,40,19,23,7,35,65,8,67,28,25,55,33,56,74],isoutparam:30,convert_result_valu:[8,23,7],get_param:[39,40],onto:[39,40,19,23,7],get_primary_kei:[35,40],rang:[58,38,39,40,23,5,35,8,25,30,11,55],collectionadapt:[40,3],rank:28,restrict:[58,38,2,41,24,4,49,28,15,34,19,57,3],foo_1:67,datastructur:[61,35,16],alreadi:[0,1,3,21,5,8,9,10,11,12,14,15,16,19,23,26,30,32,33,63,36,37,38,40,41,42,7,35,46,48,49,51,52,55,56,57,58,61,34,65,66,67,28,25,70,72,73,74,75,76],primari:[1,38,39,40,19,23,7,35,65,8],pgmacaddr:23,read_committed_snapshot:9,rewritten:[1,35,40,8,67],wildli:[63,67],top:[5,9,70,11,13,23,26,33,34,38,40,19,42,46,48,49,51,55,58,69,66,28,73,76],ton:[38,19,7],too:[38,40,19,42,7,65,35,23,8,67,25,61,30,21,74,33],classstat:7,concurrencyerror:39,billing_address:21,tool:[58,46,26,8,49,10,13,55,70],setuptool:[1,38,40,23,7,26,67,28,70],took:33,sqlalchemyerror:17,folder_1_account_id:58,horizontal_shard:[45,35,40,19],incur:[21,19,35,46,67,25],conserv:[25,34,19,48],expr:[37,38,64,40,19,23,4,7,58,51,30,21,25,76],zero:[37,38,46,40,19,23,43,5,35,7,48,49,33,34],sqlengin:[1,39],index_method:51,fashion:[3,42,7,35,26,23,66,49,48,30,72,28,36,19],"0x101029dd0":30,"_notacolumnexpr":10,hypothet:[49,7],raw:[38,39,40,19,23,7,35,26,66,67,68,50,30,15,8,17],pylon:23,unresolv:[5,19],get_committed_valu:7,meta1:55,meta2:[55,65],derek:35,"_binaryexpress":[40,67],cleft:[55,30],insur:38,plenti:[69,49],iteself:72,glob:23,monkeypatch:[1,38],attributeextens:[21,42,23,7,8,28,32,34],insul:26,flow:[46,40,67,49],visit_create_column:56,dissasoci:31,declar:[1,38,39],long_str:38,changeset:[38,39],somechar:58,random:[41,23,26,27,49,19],radiu:64,dupe:[38,35],stringent:[61,33],xxg527:61,radic:28,select_from:[37,38,40,41,23,7,35,65,58,67,28,76,61,33,34,75,19],oci:66,absolut:[40,35,64,48,73,36],with_polymorphic_mapp:[37,34],createfunc:49,oct:[38,40,19,23,7],use_proxi:[55,30],watch:[61,33,40],"__subclasshook__":30,fetchedvalu:[40,19,35,8,10,55],popitem:3,report:[0,1,40,19,23,38,7,26,66,49,28,51,9,70,67,14,34,35],reconstruct:34,slnumer:23,collector:59,caseinsensitiveword:64,rdbms_mysqldb:35,twice:[37,38,64,19,23,61,46,49,33],unlabel:23,oursql:[40,19,48],unicode_error:[25,9,40,66],get_default_schema_nam:[40,23,7],setslic:[35,19],nut:[58,46],generic_fk:42,num:23,corrupt:[40,3,7,66,28,25,9,19],eventnam:[55,19],hopefulli:[1,9,35,19],databas:[1,38,39,40,19,23,7,35,65,8,67],urllib:[9,19],outstand:26,approach:[40,3,42,35,46,23,66,67,28,51,14,33,19],"0x12cdd30":12,weak:[38,23,7],group_bi:[37,61,40,23,7,8,67,33,34,76],protect:7,mildli:9,uninstall_memb:63,fault:38,interfer:[50,25,19],nchar:[38,22,23,7,5,35,25,9,40],end_1:64,"_dateaffin":25,"__legacy_microseconds__":[40,8,67,7],save_or_upd:[1,40,23,7,8,67],noload:[38,40,19,23],beef:40,nonsens:7,been:[1,38,39,40,19,23,7,35,65,8,67,28],accumul:[69,49],quickli:[59,3,23,28],session_kei:19,yusuf:19,xxx:[38,40,65],inherit_condit:[35,34,23,7],uncommon:34,kw1:12,mysql_us:[5,19],kw3:12,kw2:12,column_on:58,"catch":[38,14,46,19,49],my_object:51,mssmalldatetim:7,blazingli:28,dist_doc:59,weren:[38,40,23,61,35,49],before_drop:[50,56],coerce_to_is_typ:[25,35],reflectedtwo:[58,72],bitflag:49,pyramid:49,shebang:65,ms_utcnow:74,tediou:33,suggest:[21,40,5,67,28,25,49],complex:[37,38,39,40,19,42,7,69,56,35,26,23,58,67,8,46,21,28,34,36],"_connectionfairi":[50,68],"_with_invoke_all_eag":19,complet:[3,21,5,9,70,23,26,27,31,32,34,36,1,38,39,40,19,42,7,46,48,49,50,51,57,61,60,69,16,8,67,28,25,72,73,75,76],yield_curv:30,screw:[1,40],get_result_processor:27,my_on_checkout:50,employee_nam:[55,75],redefin:[1,38,40,19,23,35,58,67,30,55,33],some_id_seq:51,instrument_collection_class:63,with_unicod:40,shortli:61,everyth:[58,39,19,23,54,28,18],user_acount:69,shorthand:[37,38,2,21,69,26,61,55,75,76],eli:35,expos:[12,59,34,23,25],road_geom:42,columnel:[58,38,2,61,23,4,35,7,51,30,55,33,74,25,76],"__delattr__":30,instrumentationfactori:[58,63],cherri:12,gave:[58,21,40],initial_quot:27,managers_t:75,is_remov:[34,19],id_seq:66,apart:39,arbitrari:[40,3,23,43,62,35,8,67,25,30,18,55,76,36,19],contradict:5,"_inspectionattr":[37,12,64,34,31],simlarli:67,omit_schema:27,makedsn:[1,66],excerpt:34,indirect:[35,40,19,49,7],successfulli:23,cooper:46,query_context:[32,57],shard2:26,shard1:26,operationalerror:[38,40,17],core:[35,39,40,19,7],generic_associ:[35,19,42],msimag:7,init_failur:57,after_cr:[50,56],nodes_1:65,nodes_3:65,nodes_2:65,easysoft:[9,19],selectresult:[1,38,39,8,23],pgtext:7,assign_mapp:38,chapter:[61,10,25],ucs2:5,alexand:35,steadili:51,choke:25,surround:[19,5,35,49,25,11,55],unfortun:[58,73,40,48,8],instance_st:[13,49],update_tables_claus:27,produc:[0,2,4,5,8,9,10,11,13,14,17,26,27,30,33,38,39,40,19,23,7,35,48,51,55,56,58,65,66,67,28,25,74,76],encod:[38,39,40,23,7,35,27,48,67,74,33],mutablebas:[35,16,19],"__and__":30,storag:[58,33,26,11,68],othert:[0,5,40],transform:[38,40,19,7,27,67,50,25,30,33],why:[1,14,33,23],gig:33,stuck:33,anderson:35,gid:5,gin:51,head:49,substut:56,altogeth:[28,38,46,7],fold_equival:[38,35,40],labelnam:[38,40,23],cidr:51,attr:[0,58,39,40,3,42,43,69,7,23,49,46,31,12,34,57,19],default_schema_nam:[0,40,27],fundament:[19,8,49,25,30,55,33],nohostnam:48,"0x101525810":58,trim:[38,76,19,23,7],inordin:[46,71],all_tab_column:40,read_timeout:19,propig:[1,38,7],check:[1,38,39,40,19,23,7,35,65,67],assembl:[58,40,23,35,64,27,31,32,33,34],translate_connect_arg:48,readonli:[37,19],locate_dirti:23,tip:67,classdecor:[40,72],is_modifi:[35,23,7],node:[37,38,2,3,42,21,64,65,19],consid:[3,5,10,12,26,30,63,36,37,21,40,19,7,35,49,55,56,57,61,69,34,64,66,67,28,25,75],sql:[1,39],sessionawaremapp:[28,40,67,7],optimize_limit:[66,7],bitbucket:[58,35],longer:[1,38,40,19,23,7,35,65],bullet:43,bidirection:75,reconstructor:[32,34,8,7],get_lastrowid:27,serious:40,golf_sw:72,backward:[38,40,23,7,35,8,67],mypkg:39,foo_spam:28,snif:12,addresses_2:[61,33],addresses_1:[61,33,36],focus:[35,19,72,28],is_act:[57,23,19,49,7],functionel:[40,19,35,26,30,74,33],signific:[21,8,9,70,26,32,34,36,37,38,40,19,23,7,35,49,59,57,69,66,67,25],stumbl:33,xxx_by:38,bind_to:38,row:[1,38,39,40,19,23,7,35,65,8,67,28],"__rshift__":30,readabl:[12,58,35,23],"_row_processor":23,quasi:30,base_alia:37,dropindex:[56,40,19],my_append_listen:57,sourc:[38,40,19,23,7,35],addresses_q:37,col2_1:4,addresses_t:[0,37,34,36],feasibl:[12,61],"_datetimebas":9,hour:[58,5,46,48,73,14,74],level:[38,40,19,23,7,35,65,8,67,28],necessit:67,sessioncontextext:8,add_ent:[37,38,40,23,65,8],quick:[21,19,42,61,8,49,67,74,33,34,76],"__rmul__":30,group1:65,port:[1,38,40,19,7,5,66,67,48,59,51,9,11],explictli:40,retain_schema:55,folder_folder_id:58,learn:[14,28],someopt:19,normalize_whitespac:[61,33,36],do_someth:[26,57],fbdialect:23,preorder:21,bat_tabl:65,numaddress:8,tranasct:57,overriden:5,atext:3,weird:7,someobject:[65,57,49],guilti:7,semant:[1,21,3,23,7,8,49,51,30,72,34,76],cruftifi:23,tweak:[1,38,39,40,19,35],and_:[58,19,7,65,30,33],visibl:[61,65,3,67,17],append_constraint:[38,55,40,67,56],memori:[33,40,19,7,5,35,48,67,28,39,30,13,14,15,55],todai:[58,67],someinst:38,msn:33,handler:[58,21,40,3,35,26,66,73,14,57,19],criteria:[37,21,40,19,4,69,35,7,65,49,28,61,30,67,72,55,56,76],msg:[60,17],akiban:[35,71],plug:[70,48,49],flush_context:[32,57],con_record:[50,68],user_address_join:34,get_column_default_str:56,compoment:69,purpos:[1,3,21,10,14,24,16,31,33,63,37,38,40,19,23,35,48,49,50,55,56,57,58,34,64,8,67,28,75,76],stream:[37,23,7,26,48,49,16],count_from_n_factori:43,backslash:[35,40,73],do_begin_twophas:27,addresses_user_id:[61,67,36],firebirdsql:59,alwai:[1,38,39,40,19,23,7,35,65,8,67],differenti:[69,26,8,72],dinner:61,hasaddress:42,anyon:[23,7],fourth:40,multiparam:[46,4,26,27,48,68,50,76],dbapi_conn:[50,27],clone:[40,19,7],mixin:[38,40,19,42,7,35,8,67,28,51,25,74,17],get_version_info:8,practic:[3,25,69,35,8,49,28,51,75,55,34,36],predic:[21,25,51,30,55,56],"_is_select":23,inform:[0,1,2,3,4,5,6,7,8,9,10,11,13,14,22,23,26,27,31,32,33,34,37,21,40,19,42,43,35,46,48,49,50,51,52,55,56,57,58,59,61,69,16,66,67,28,25,72,73,74,75,76],preced:[38,40,19,23,4,5,35,7,72,49,30,67,11,73,55,33,56,76],combin:[3,4,8,70,12,23,27,30,33,1,38,40,19,42,7,35,46,48,50,51,55,56,58,69,66,28,25],should_autocommit:27,find_docu:42,anticip:[14,56,49,76],ymmv:7,mainli:35,join_via:[38,8],do_rollback:27,wehrstedt:35,underneath:[40,48,23],term:[37,38,40,3,42,7,69,35,64,23,72,49,68,61,67,31,28,33,34,75,76],should_creat:56,native_datetim:[40,73],name:[1,38,39,40,19,23,7,35,65,8],advent:31,propnam:[1,38,40,8,23],typeengin:[0,38,40,19,23,7,5,35,26,27,66,67,8,51,9,30,31,55,25,58],cascade_iter:[40,34,31],clauseright:30,opert:58,mysql4:39,mysql5:38,individu:[0,1,2,3,4,5,6,9,10,11,14,20,22,23,21,26,31,32,33,34,36,37,38,40,19,42,7,35,46,48,49,59,51,55,56,57,61,69,16,64,66,67,28,25,70,72,73,76],username_1:25,begun:[61,26,49,73,32,57],dispos:[1,38,40,19,7,63,35,26,49,68,50,31,14,34,55],foreign_kei:[0,38,2,19,21,7,35,46,27,58,30,31,73,40,55,76],hacker:[38,7],urlencod:9,profil:28,kent:[40,19],factori:[61,3,42,43,7,26,27,49,25,30,12,63],adaptoldconvertmethod:8,migrat:[1,35,23,7],sequences_opt:27,mysql_col:38,metadata_default:34,theori:[64,40,36],enterprisedb:35,int4_op:51,server_version_info:[5,40,9,27],synchron:[37,38,39,40,3,43,35,26,8,49,46,61,67,21],refus:[46,28],mysql_:38,turn:[1,38,39,40,19,23,35,67],place:[1,38,40,19,23,7,35,67],jaimi:40,imposs:[37,21,7,5,9,1],origin:[0,1,2,21,9,14,23,26,30,36,37,38,39,40,19,42,7,35,48,49,51,55,57,58,61,69,66,28,25,73,74,76],boundmetadata:[1,38,23],dictcol:23,arrai:[1,38,39,40,19,23,35,65,8],walkthrough:[37,21,58],"_function":[40,67],except_:[37,40,7,67,33,76],engineev:[19,28],given:[0,2,4,5,9,10,11,13,14,28,17,19,26,27,29,30,33,1,38,39,40,41,23,7,35,48,50,51,55,56,58,66,67,68,25,74,76],reli:[0,1,40,19,58,38,7,26,66,67,8,51,28,33,56,35],assort:23,necessarili:[23,4,35,26,8,49,28,25,9,34],cope:43,copi:[2,4,21,24,26,30,31,33,34,37,38,40,19,23,7,35,49,50,55,56,58,69,16,8,67,25,72,76],specifi:[2,4,10,19,26,27,29,1,38,39,40,41,23,7,35,48,54,50,56,58,65,8,67,28],engine_two:[58,72],github:[5,35],enclos:[37,58,46,40,19,23,7,69,35,26,65,49,64,30,33,34,57,76],pragma:[61,19,5,73,55,33],holder:24,serv:[61,7,69,35,26,8,49,30,72,13,57,76],wide:[58,38,40,3,23,69,27,29,49,48,51,9,70,57,13,55,28,36],nathan:35,last_inserted_param:[26,35,10,27,23],custom_op:[25,35,30],balanc:64,posit:[1,3,4,7,12,26,27,29,30,33,34,37,38,40,19,23,43,35,48,49,55,56,61,69,65,8,67,28,25,72,73],seri:[4,70,26,36,37,38,39,40,19,23,7,35,46,49,57,58,69,65,8,67,28,25,72],pre:[4,7,10,21,14,17,19,26,30,31,32,34,36,37,38,39,40,41,23,43,35,49,59,51,56,57,61,67,28,72,76],filter_someth:37,pro:46,ani:[1,38,39,40,19,23,7,35,65,8,67,28],ant:38,create_xid:[27,7],executedirect:9,bitwis:[33,55,30,64],techniqu:[0,20,46,40,23,69,26,58,8,10,14,28,56,18],cascade_cal:23,ideal:[19,23,7,69,35,67,28,50],nosuchtableerror:[38,17],sure:[1,61,39,43,46,65,8,49,25,67,72,74],multipli:[19,36,49,4],clearer:[1,56,40],set_attr_by_column:23,later:[2,21,10,26,33,34,38,39,40,19,23,7,49,51,55,57,61,65,8,72,75,76],quantiti:[37,76],create_al:[1,35,40,19,23],uncondit:49,pursuant:39,excludeconstraint:51,my_index:[5,51,9],permiss:[37,5,24,11,38],hack:[58,30,28],"_from":19,cx_oracl:[1,38,40,19,23,4,35,7,27,48,67],explicitli:[0,1,2,3,21,5,8,9,10,12,28,26,27,30,33,34,36,37,38,39,40,19,23,7,35,49,51,52,55,56,58,61,69,16,65,66,67,68,25,72,73,75,76],localtimestamp:[39,41,19],wood:33,analyz:[61,33,23],element_id:2,unicodetext:[25,9,66,23,7],from_stat:[37,61,40,19,7,35,34,36],sysdat:[10,40,41],ssl:[38,40,19],tailor:[21,48,3,72,19],lighter:28,self_group:[7,35,4,30,55,76],pipermail:[23,7],reveal:[36,49,8],"_create_connect":28,"07migrat":19,current_paramet:[10,40],n12:65,abs_1:64,detect:[1,38,40,19,23,7,35,67],"__all__":[25,19,7],construct_param:27,hist:49,getattr:[39,40,3,64,31,12],refect:7,"_connectionrecord":[50,68],myengin:65,get_select_precolumn:27,come:[58,38,39,40,19,42,5,65,35,46,27,67,28,69,23,60,72,33,36],reaction:21,region:[42,76],quiet:38,contract:[40,24,7,67,28,51,25],"_do_return_conn":28,trofatt:35,reset_isolation_level:27,tnsname:48,inspir:67,period:[40,69,46,66,49,73,14,36],pop:[39,3,43,28,16,57,19],colon:[61,33],coupl:[65,7],geom_id:25,valueerror:[16,34,19,57,43],post_upd:[38,39,40,21,31,17],manager_nam:75,decrypt:[25,35],ormtutori:65,tiger:[58,46,40,25,5,26,48,49,50,51,67,72,14],workspac:[61,49],"__eq__":[61,40,19,42,7,16,64,23,8,67,25,30,65,31,12,55,33,34],hasattr:[40,7],quote_identifi:27,"case":[1,38,39,40,19,23,7,35,65,8,67,28],myself:19,mount:23,xyz_my_goofy_funct:33,cast:[0,1,39,40,41,23,7,35,58,25,9,30,19],shipping_address:21,anytim:33,base_column:[55,30],cascade_mapp:1,someword:64,datacontain:58,author:[61,40,24,7,43,67,9,72,55,34],alphabet:43,trip:[21,40,19,23,7,35,67,59],use_threadloc:[38,14],eventu:[58,38,40],nest:[1,38,39,40,19,23,7,35],driver:[1,40,19,23,25,5,6,35,26,27,66,67,48,59,51,9,11,28,17,71],columnoper:[58,19,35,64,25,30,31,55,33],"__reduce_ex__":30,footprint:7,somecolumn:[38,7,10,35,48,8,25,30,55,33,76],ntext:[25,9,35,7],"3bdatabas":9,yhi:23,facad:[26,42],"0x12a6ed0":21,without:[4,5,8,10,14,28,17,24,26,33,1,38,39,40,19,23,7,35,51,55,56,65,66,67,68,25,73,74,76],model:[58,21,40,3,42,61,69,46,49,72,73,14,33,55],set_attribut:49,clob:[38,19,23,35,8,67,66,25],when:[1,38,39,40,19,23,7,35,65,8,67],noreferenceerror:17,halfwai:33,kill:1,version_uuid:34,get_dbapi_typ:25,blow:[35,40,19],miscellan:23,hint:[37,61,40,19,4,51,10,25,76],"0x10151ce10":72,except:[1,38,39,40,19,23,7,35,8,67],blog:[61,64,3,42],right_nod:21,blob:[38,40,23,42,25,5,35,66,67,51,9,11,73],table_nam:[0,26,27,23,7],vertice_x2:34,vertice_x1:34,cast_nul:[34,19],saniti:33,engine_email_address:55,patrick:35,btoc:36,getconn:14,cubrid:71,argumenterror:[72,34,17,23,19],"_relationship_opt":58,column_mapped_collect:[65,7,40,3,19],staledataerror:[40,60,34,19,28],rowid:[38,73,40,19,7],egenix:[9,22],intact:[58,35,31,7],slice:[37,58,40,3,23,7,35,65,51,61,42,19],name_label:61,adalias_address_id:36,legal:[35,26,46],moor:35,moot:46,complic:[21,35,39],inetaddr:25,garbag:[1,60,40,19,23,7,16,35,26,49,59,14,33,34,57],inspect:[35,40,19,67],immut:[3,34,28,25,30,55,76,16,19],"_execute_clauseel":[23,7],passivedefault:[1,39,40,23,8,67,10],firebird:[1,39],stand:[25,35,28,73,7],alemb:[55,46,19],routin:[42,7,16,35,49,50,72,12,14,34,74],adv_param_conv_dynamic_type_transl:59,compos:[61,4,35,46,65,30,12,55,33,16,76],weakkeydictionari:[16,7],supersed:[35,19,8],not_extens:31,strict:[38,40,23,5,35,46,49,11],interfac:[1,38,39,40,19,23,7,35,26,65,58,54,50,29,13,56,18],instancest:[58,40,23,7,35,49,28,67,31,13,34,57],addresses_1_email_address:[61,36],strictli:[37,21,19,48,51,12,34,75],morrison:39,"__invert__":[25,30],myattribut:49,tupl:[38,40,19,23,7,35,65,8,67],regard:[0,1,21,5,8,9,10,14,17,19,20,26,29,30,37,38,39,40,41,23,7,35,49,50,55,57,58,59,66,67,28,25,70,76],jun:[1,38,40,19,7],jul:[1,38,40,19,23,7],"_flush":49,some_column:[69,31],faster:[1,39,23,7],notat:[19,7],reduce_column:[37,35,76],query_cal:[40,19],p_union:34,sub_typ:23,encompass:2,russel:35,incorrect:[1,38,40,19,23,7,51,25,75,76],count_1:[37,61,33,28],idiom:[8,72],rdbm:73,"__table__":[35,46,40,3,7],manager_data:[72,75],compositeselect:23,symbol:[64,40,19,35,26,49,30,31,73,12,32,55,57,76],validate_email:34,postgresql_us:[51,19],orderedproperti:[30,23],allow_snapshot_isol:9,directori:[51,28,19,42,8],taavi:40,potenti:[1,21,46,40,19,42,7,69,35,26,23,8,49,28,70,67,72,73,55,34,76],binaryexpress:[33,35,30,38],invoic:[55,2],degrad:[38,35,19],threadlocalmetadata:[38,40,23,65,67,55],all:[1,38,39,40,19,23,7,35],lack:[40,3,23,66,73,19],"__table_args__":[40,19,7],scalar:[38,39,40,19,23,7,35,65,8,67],abil:[10,14,23,30,32,33,37,38,40,19,42,7,35,49,51,55,58,61,65,66,28,74,75],follow:[3,4,5,6,7,8,9,10,11,21,14,16,22,23,24,26,27,31,33,63,36,37,38,40,41,42,43,35,46,48,49,50,51,55,56,58,59,61,69,34,64,66,67,28,25,70,71,73,74,75,76],alt:[38,34],init:[1,27,28,31,34,57],program:[65,8],crappi:19,neglig:40,hemi:67,sometimestamp:58,far:[1,38,40,3,42,61,56,46,23,58,49,67,65,72,33,34,36,19],faq:48,no_stat:60,failur:[58,40,19,7,69,35,46,49,67],unoffici:35,ticket:[38,40,19,23,7,35,67],longtext:5,list:[1,38,39,40,19,23,7,35,65,8,67],entity_nam:[1,38,39,23,7,8],datetimeoffset:9,recipe_keyword:8,user_account:69,ten:[3,36,48],my_special_procedur:26,aptli:55,st_buffer:41,rate:28,design:[38,46,19,42,26,49,25,52,70,67,73,12,34,57],what:[1,38,39,40,19,23,7,44,35],sub:[38,40,19,23,7,35,65,67,18],briem:[35,19],sun:[38,40,19,23,7],sum:[37,38,41,7,64,49,36,76],brief:[35,76,31,42,19],overload:[25,30,23],version:[1,38,39,40,19,23,7,35,65,8,67],intersect:[37,38,40,23,42,7,64,67,31,33,76],visit_alter_t:74,deepli:[35,76,19,8],nullsfirst:[55,30,19,28],discern:19,contains_alia:[38,36],uniqueobject:[12,46],themselv:[4,13,15,17,18,23,26,30,33,34,1,39,40,19,42,7,35,46,50,55,57,58,62,16,28,69,72,76],append_hav:76,dispose_loc:28,behaviour:59,shouldn:[35,19,23],some_other_sess:49,ddl_event:56,misinterpret:[9,19],text_pattern_op:51,observ:[21,19,5,35,46,66,67,9,33],"_need_decimal_fix":19,nosetest:[40,19,42,28],filenam:[1,48,73],heurist:58,suddenli:61,biginteg:[5,25,40,11],proceed:[32,34,57,49,7],certainli:61,coverag:[28,40,19,23,7],con_proxi:[50,68],paramstyl:[26,27,7],minor:[28,39,7],flat:21,abspath:19,someclasshistori:42,some_identifi:5,stick:[35,40],known:[0,1,2,41,23,38,7,26,48,67,28,40,25,52,30,11,13,55,33,58,19],post_keyword:61,jim:33,someindex:2,"\u00e9ric":35,caveat:[37,22,19,23,69,6,35,72,49,59,31,12,40,56],primary_languag:72,aliasedclass:[37,19,64,49,28,31,13,75],dual:[27,34,66],useabl:38,remote_sid:[58,21,64,40,19,38,7,65,31],"__return_type__":41,cours:[0,21,46,40,3,23,61,5,26,58,49,8,64,75,28,33,36,19],goal:67,divid:40,rather:[0,1,10,11,26,30,34,37,38,40,19,23,7,35,48,49,51,57,58,61,69,28,25],resourc:[61,46,69,26,27,49,68,59,28,14,33],algebra:[40,67],itsystementwicklung:[23,7],reflect:[1,38,39,40,19,23,7,35],user_alias_fullnam:61,"short":[5,26,40,11,7],hstore:35,rollback_twophas:[50,68],caus:[1,2,3,21,5,9,7,30,16,36,37,38,39,40,19,23,43,35,46,48,49,59,51,55,57,58,60,69,65,66,25,72,73,74],callback:[14,26,16,19],informixdb:7,isout:[37,55,76],"_local_remote_pair":[35,23,31],isnt:1,subdirect:61,reconnect:[1,38,39,40,23,7,35,26,14],pginterv:38,"__weakref__":30,style:[1,38,40,19,23,4,7,26,27,48,67,8,9,10,52,58,28,55,33,56,76],call:[0,2,4,8,9,10,14,28,19,26,27,29,30,33,1,38,39,40,41,23,7,35,48,50,51,52,55,56,58,59,65,66,67,68,25,73,74,76],lai:33,generate_vers:34,depnam:28,resort:[40,66,25,9,14,34],select_stmt:37,primary_engin:26,might:[0,4,21,14,26,27,30,34,35,36,37,38,40,19,23,7,45,46,49,55,58,61,65,67,28,72,75,76],alter:[38,40,19,7,35,26,67,74],wouldn:[37,21,40,19,7,35,67,34],somejoinedsubclass:7,"return":[1,38,39,40,19,23,7,35,65,8],framework:[58,38,42,23,61,35,49,28,63],tb_info:40,oystein:40,treenod:[65,42],refresh:[37,38,39,40,19,23,7,62,35,65,8,28,60,31,21,14,33,34,57],innodb:[38,2,19,21,5,46,72,11,40,55],compris:[21,23,69,35,46,52,55,34,76],unicod:[1,38,39,40,19,23,7,35],truncat:[38,40,19,23,7,35,48,30],memoryerror:19,weight:28,order_count:34,unilater:36,sidnam:48,linkag:[37,21,2,3,7,46,66,49,61],expect:[0,3,4,21,27,33,16,36,38,39,40,19,7,35,48,49,51,58,61,60,34,64,8,67,28,25,72],mistakenli:[38,23],adalias_user_id:36,foolproof:7,"_from_self":[40,67,23],unmappederror:60,as_utc:41,denormalize_nam:27,mstime:7,get_sess:[1,39,8],uncommit:[23,5,27,51,31,73],advanc:[58,20,42,23,62,64,33],nodes_3_parent_id:65,differ:[0,2,3,21,8,9,11,12,14,16,17,19,23,26,30,33,63,36,37,38,39,40,41,42,7,35,46,48,49,51,55,57,58,61,60,69,34,64,65,66,67,28,25,72,73,74,75,76],asldevi:35,thrown:[1,38,39,34,50],thread:[1,38,39,40,19,23,26,65,66,8,59,14,28,55,18],cls_or_mapp:37,perhap:[58,19,28,72,55,74,36],implicit_retunr:40,feed:[1,33,49],notifi:[16,19],feel:[55,65,28],mysql_limit:5,least:[0,38,2,19,21,16,35,46,65,58,67,28,25,61,72,73,40,55,33,56,76],blank:[1,38,40,19,23,7,67,61,75,76],script:[1,46,40,19,42,26,49,28,52,70,14,55],interact:[1,38,3,42,21,35,23,48,49,51,61,72,33,25],reentrant:[1,40,19,23,7,34],configure_mapp:19,store:[0,26,21,5,8,9,11,14,23,7,27,30,33,34,38,40,19,42,43,49,51,55,57,61,69,16,65,66,67,28,25,74,75],option:[1,38,39,40,19,23,7,35],propertyopt:7,king:26,kind:[58,30,8,22,19,24,38,35,26,65,66,67,48,59,10,40,14,28,18],celementtre:42,whenev:[58,61,40,19,10,16,26,49,50,30,34],remot:[58,38,40,19,23,7,35,27,66,2,55],remov:[1,38,39,40,19,23,7,35,65],cost:14,architect:[37,46,49],stale:[37,1,39,40,19,46,8,49,28,14,34],cleaner:[38,40],strengthen:39,keyword_1:12,expir:[37,38,39,40,19,23,7,62,35,26,58,67,8,46,60,31,21,28,34,57],dedic:[38,27,49],entireti:46,relationship:[1,38,39,40,19,23,35],florian:40,violat:[37,21,3,7,46,49],notset:40,foreman:19,calchipan:71,someaddress:[61,36],exec:[39,32,30,57,23],reach:[58,14,40],"_prepare_instrument":[8,7],react:34,parent_assoc:21,server_default:[35,19,8],promulg:3,address_alia:8,amaz:64,"_get_colspec":7,datetime2:[9,40],selecton:[38,39,8],"0x1411910":21,email2:61,email1:61,penalti:7,address_t:34,my_id:31,hit:[1,21,40,19,35,46,65,58,33],use_unicod:5,hid:38,sizabl:23,a_b_idx:5,instancenam:5,address_1:37,wrote:7,forego:[21,35,10,49,64],dump:[38,19,23,7,35,46,8,67,28,25,42,15,16],mutabl:[38,40,19,23,7,35,65,8],dropgeometrycolumn:42,signifi:55,arg:[1,38,39,40,19,23,7,35,65],disadvantag:[9,34],false_:30,lower_2:64,inconveni:46,tableclaus:[76,55,39,4],logging_nam:[14,26,40,48],idx_col34:2,subcclass:7,syntact:[30,40,72,76],"0x10151cd90":72,mymysqldialect:26,"0x12cde30":12,class_mapp:[19,7,8,49,13,34],succeed:[1,26],unitofwork:[1,39,40,19,23,7,67],solv:[35,46,40,67,28],classnam:[38,35],verbos:[38,64,61,46,8,21,12,33],orderitem:42,psycopg2_vers:19,roman:35,operar:49,context:[1,39,40,19,23,7,35,67,28],attrnam:[35,19,23],animalfact:42,foodialect:26,sweep:43,arbitrarili:[21,7],weasel:42,mistak:28,msgenericbinari:7,duh:1,java:[58,66],dub:58,msdoubl:23,whom:[24,43],adalias2:61,adalias1:61,strategi:[1,38,40,19,23,7,35,8,67,28,18],my_pool:29,parts_alia:[37,76],demand:[12,7],my_web_framework:49,frozen:74,batch:[37,38,40,23,7,26,28,34,57],kick:[61,3,23],behavior:[1,38,39,40,19,23,7,35],sw1:64,sw2:64,select_bi:[1,38,65,8],employee_id:[55,72,75],myothert:[58,3,72],set_ident:34,description_encod:7,get_refer:2,remote_id:2,associate_with:16,distinctrow:19,sqlite3:[1,40,48,67],agnost:[0,33,26,40],higher:[20,64,40,23,21,5,35,26,65,8,67,28,30,10,62,55,70,25],somedatecol:8,text_as_varchar:[38,40],painless:67,wherea:[37,1,40,23,4,69,7,49,67,61,57,38,33,36],process_default:39,robust:[40,66,7],wherev:33,stateless:49,lower:[1,58,2,23,42,7,5,35,64,66,48,61,30,72,40,55,34],nullslast:[55,30,19,28],expire_al:[37,23,49],compile_mycolumn:74,chees:[12,70],init_fail:[32,7],programmingerror:[40,17],listen_for_col:58,relianc:[73,40,19,67,72],new_kw:49,from_obj:[37,38,75,23,76],theoret:[46,19,57],"_defaultcolumncompar":25,mydatabas:[9,35,48],finder:63,dataerror:17,vendor:[54,23],non_delet:49,newvalue2:3,table2:[38,76,4],table1:[58,38,4,26,30,76],user_alias_id:61,uninstall_descriptor:63,table_info:[61,55,33],newvalue1:3,use_label:[38,35,76,7],collect:[1,38,40,19,23,7,35],query_class:[21,31,7],msvarbinari:7,empl:59,global:[1,38,39,19,42,7,70,16,35,26,23,58,49,28,25,46,10,73,63,57],understood:[37,40,7,35,26,49,9,30,67,55,76],unspecifi:[27,23],child_id:[21,31,28],whydontforeignkeysloaddata:1,first_init:57,prop:[37,1,39,40,72,23,7,58,31,34],retri:17,object_sess:[38,40,21,35,49,32,34,57],address_email:34,leftmost:37,martin:69,viabl:35,tinker:28,squeez:7,elixir:[40,23,42,7],my_before_commit:57,internally_instru:[3,19],varbinari:[38,40,19,67],emphasi:[58,38,49,28],statment:[50,33],ellipsi:[61,33],ix_:2,announc:42,create_engin:[0,5,8,9,14,28,26,29,33,1,38,39,40,19,23,7,35,46,48,49,50,51,55,58,61,69,65,66,67,68,25,72,73],unit_pric:[37,30],fledg:[30,76],question:[35,26,19,28],"long":[3,21,9,14,28,23,26,30,34,38,40,19,42,7,35,49,58,61,66,67,68,73,76],adjac:[1,51,18],arithmet:[25,7],magamedov:35,with_par:[37,38,40,19,61,8,67,21],repeatedli:[40,3,7,49,25,33,19],numpost:65,delta:64,row1:67,row2:67,row3:67,consist:[1,38,39,40,19,23,7,35],caller:49,lifespan:[69,51,26,49,28],reorder:43,attributemanag:23,highlight:[61,33,67,28],invoice_item:[55,2],user2:[37,49],user1:49,after_insert:[1,32,35,57],sldate:[38,23],attribute_shard:[65,40],disallow_open_cursor:23,mslongtext:1,mock:[1,19,35,46,48,67],nice:[1,23,7,65,8,67,73],note_id:2,descriptor_prop:31,meaning:[19,35,8,49,28,50,34],"_stringtyp":[5,11],ternari:58,vice:[12,21,40,23,49],included_part:[37,76],casacd:23,apply_max:[38,8,23],edg:[37,40,19,42,7,35,17],nodes_3_nam:65,pymysql:[35,19,28],gang:39,edu:7,superfici:[61,33],child_id_two:58,estim:40,with_label:[37,35,40,76],distribute_setup:40,mymappedclass:[37,34,72,49],include_remov:[34,19],foo_tabl:65,relev:[21,39,3,7,50,11],"_as_declar":40,py2app:8,mssmallinteg:7,dealt:26,databaseerror:17,pleas:[4,5,6,8,9,11,28,22,42,32,34,37,21,23,7,35,46,49,59,51,55,66,67,68,73],dmitri:35,last_updated_param:[26,35,10,23,4],cfg:19,somedb:56,fold:27,total_bal:64,greaterthan10:30,fullnam:[21,61,56,65,49,33,34,36],acid:69,folk:23,compat:[1,38,39,40,19,23,7,35,65,8,67],listen_for_reflect:[50,55],before_parent_attach:[50,25],compar:[38,40,19,23,7,35,8,67,28],subselect:[1,75,7],before_execut:50,extrajob:58,my_association_table_1:31,"enum":[35,40,19,23],sqlsession:39,larger:[1,58,39,40,4,35,7,67,75,30,57,32,34,55,36,76],"_name":[33,72],typic:[0,2,4,10,14,15,17,26,27,30,33,38,40,19,23,7,35,48,50,51,55,56,58,65,67,28,25,73,76],weakvaluedictionari:[1,35],eagerload_al:[40,19,23],forbid:[38,67],appli:[1,38,40,19,23,7,35,8,67,28],app:[35,19,23],inequ:7,api:[38,40,19,23,7,35],duck:[12,3,23,19],fed:28,from:[1,38,39,40,19,23,7,35],idx_col12:2,supports_unicode_bind:[9,35,27,19],correspond_on_equival:[55,76],call_operation1:26,few:[58,3,23,7,46,48,49,28,25,30,67,42,55,19],call_operation3:26,call_operation2:26,usr:46,inet:[58,51,25,21],sort:[0,1,39,40,19,38,35,7,55,17],pymsql:28,with_hint:[37,40,19,4,35,51,76],quote_schema:[1,19,7,27,8,10,55],augment:[58,39,23,35,8,14],raw_input:14,critic:[38,46,19,23,7,35,26,67,28,75,33,57],annoi:28,executor:[46,48],tap:28,atomicity_:69,tag:[12,1,34,3],entry_id:21,xmin:[35,56],subsystem:28,sqltype:[38,16],sit:23,six:[1,19],memoiz:[69,16,64,40,19],subdirectori:1,instead:[1,38,39,40,19,23,7,35,65,8,67,28],chri:[35,40],msdn:9,express:[1,38,39,40,19,23,7,35],dynamicattributeimpl:7,tack:[27,34,19,7],attent:[61,67],dontwrapmixin:[17,19],urldecod:9,get_isolation_level:27,attributest:[58,13,31],postgi:[58,25,35,18,7],run_my_program:49,clauseel:[58,38,40,19,23,4,69,7,26,27,49,68,50,30,67,31,13,55,74,76],elif:[25,64,16,49],elig:[69,76,30,7],reilli:[19,8],yield_curve_2:30,yield_curve_1:30,msstring:38,"_updatebas":51,crash:[69,19,7],devel:[59,40],my_before_insert_listen:57,warranti:24,trac:[1,40,42,23,7,66,67,19],tran:[1,26,65,49],oracledialect:[33,23],schlicht:[35,19],table1_col1:58,table1_col2:58,returns_row:[26,40,19],our:[37,21,75,64,41,61,56,26,65,49,28,25,67,72,16,12,33,34,36,76],out:[1,38,39,40,19,23,7,35],forseeabl:40,subqueryload:[35,40,19,67],selectfirst_bi:[38,39,8],categori:[0,21,27],metaclass:[16,40,19,67,72],sql_mode:5,node_2:21,node_1:[21,64],dictionari:[0,4,5,8,14,28,18,26,27,30,33,38,39,40,19,23,7,35,48,50,51,55,56,69,65,66,67,68,74,76],my_ddl:56,blogpost:61,mstimestamp:38,"__delete__":69,ext_stop:[32,57,23],echo:[58,38,26,40,61,46,48,28,25,14,33],priamri:1,plainli:67,child_on:58,auto_convert_lob:[38,66,7],"_numerictyp":[5,11],foo_id:28,conjunect:14,unknown:[60,40,41,23,7,35,25,30,34],sep:[1,40,66,23,7],capac:[40,35,27],misunderstood:7,userkeywords_t:12,shell:9,"__del__":[1,14,26,35],shelf:49,juli:[35,28],shallow:25,chisholm:35,sqlsoup:[39,40,19],after_rollback:[32,57,49],innerjoin:40,as_uuid:[51,40],diverg:[37,66,28],rout:[40,62,48,67,16,18],which:[0,2,4,5,8,9,10,11,13,14,15,17,19,26,27,28,29,30,33,1,38,39,40,41,23,7,35,48,50,51,52,55,56,58,59,65,66,67,68,25,73,74,76],divers:28,defaultcompil:27,msenum:[5,1,40,7],who:[58,38,40,19,23,4,65,8,49,52,61,55,33],sql_transf:65,"class":[1,38,39,40,19,23,7,35,65,8,67,28],sc1modifi:42,connectionless:[74,23,18],test_vers:42,apply_sum:[38,8,23],pipe:26,abovement:1,determin:[1,4,5,9,10,11,12,14,23,21,26,30,33,63,37,38,40,19,42,7,35,46,48,49,50,55,56,58,61,69,34,66,67,25,72,75],fowler:69,users_user_id:36,"\u00fe\u00f3r":[35,19],primary_key_from_inst:34,o56asktom:66,dbapi_connect:[50,14,27,73],tree_nod:65,"__emulates__":3,fear:33,sqlcompil:[74,27],identityset:[61,19],locat:[0,2,4,10,21,17,23,26,30,31,33,34,37,38,40,19,42,7,35,49,61,64,65,67,28,25,75,76],test_tabl:58,local:[1,2,3,21,9,12,14,18,23,25,26,31,34,37,38,39,40,19,42,7,35,46,51,55,57,58,62,65,66,67,28,69,72,73,75,76],nested_set:42,"_cte_alia":76,contribut:[39,42],polymorphic_on:[40,19,23,7],ralston:1,pypi:[40,19,7,5,35,46,28,59,51,9,70,22],true_:30,notimpl:30,lossless:40,pyformat:[26,28],pk_constraint:58,include_collect:[32,57,23,49],partit:[19,23,62,28,30,18],view:[39,40,19,23,7,35,27,8,54,13,55],modulo:38,clausevisitor:[38,27,23],referred_t:[0,27],multipart:7,gmail:38,closer:46,closet:28,favor:[1,38,40,19,23,7,64,65,67,28,72,34],celko:7,crude:10,sqlj_jdbc:66,"_compared_to_typ":30,addresses_email_address:[61,36],somedai:[1,35,28],typeprocess:39,job:[1,38,46,40,3,35,26,58,49,28,56],entir:[1,3,4,5,7,8,9,21,12,14,15,26,33,63,37,38,39,40,19,43,35,46,48,49,50,53,57,58,61,69,34,65,66,67,28,25,72,75,76],addit:[0,2,4,14,19,26,27,29,30,33,38,39,40,41,23,7,35,48,50,52,55,56,58,59,67,28,25,76],polymorphic_union:[1,38,19,7,72,34,75],mediumint:[5,1],fk_element_parent_node_id:2,april:35,grain:[67,40,8,54],visit_funct:74,get_table_nam:[0,58,35,27,19],myconstraint:74,no_paramet:[26,19],my_alia:37,schema_visitor:55,lshift:35,respect:[38,40,19,23,7,35,28,51,9,72,73,33,76],platform:[35,40,19,7],decent:[19,23],"__set__":69,trademark:24,ssl_:38,compon:[1,61,46,42,7,69,26,27,48,52,70,13,33,34],besid:[46,39,19],hashing_t:65,pref_id:[55,2],mike:1,presenc:[1,38,40,61,23,7,56,35,26,49,51,10,67,55,33,34,25,76],postgresql_op:[51,19],x_2:33,x_1:[74,33],present:[1,38,40,19,23,7,35,65,8,67],fbtime:23,somemapp:49,align:21,cursor:[1,38,40,19,23,7,35,26,48,54,28,50,10,67,17,18],singletonthreadpool:[1,40,23,46,48,28,73,14],richi:19,last_inserted_id:[38,35,40,67],dsnname:[5,9,22],layer:[58,40,42,23,5,65,69,70],customiz:[55,35,34,48],"__import__":[40,19],bigint:[38,40,19,25,5,35,67,51,9,11,55],motiv:[40,28],uq_:29,lightweight:[30,3,76],recal:[61,21,26,49,74,33],avg:[37,38,28],timestampmixin:72,scalarselect:[33,76],uniti:55,justlik:40,cross:[40,23,7],member:[65,40,19,7],from_select:[35,74,4],largest:[14,67],somerel:[31,49,28],raw_connect:26,difficult:[58,40,19,23,26,8],col5:[56,2],supports_unicode_stat:27,interval_id:64,sqalchemi:9,bastien:38,collat:[38,40,19,23,7,35],surpris:[23,8,67,28,49,33],retryabl:19,english:38,methodlog:1,users_nam:[37,61,35,36],obtain:[69,24,76],tcp:[69,51],"_instanc":38,heavili:[58,38,40,19,23,69,67,11],simultan:[7,35,49,36,14,75,76],ncsmag:38,booleanexpress:38,smith:34,before_upd:[37,32,57,35,23],book:[34,8],jython:[40,5,35,66,67,48,51,9,70],branch:[21,19,35,26,30,55],mypar:21,know:[58,38,39,40,3,23,21,35,26,49,9,30,67,72,33,34,19,36,41],python3:67,set_committed_st:35,sk1:12,sk3:12,sk2:12,unord:19,"0xff":[33,55,30],is_backref:8,us_english:9,exclude_column:[0,23],growth:16,"export":[58,39,40,19,23,7,35,67,55,76],superclass:[38,40,19,23,7,35,28],smoothli:61,lead:[37,61,46,40,3,23,7,16,35,26,8,49,28,14,34,75,19],leak:[39,40,19,7,5,35,28],lean:11,hstore_data:58,leav:[1,38,40,3,23,43,7,26,65,72,67,59,61,31,73,14,34,35,75,19],settl:58,unitpric:37,distinctli:[62,35,46,40,28],customer_id:34,oracledatetim:23,acronym:69,dosometh:38,bindparamet:[25,35,30],"_floattyp":[5,11],obei:[13,23],interdepend:[40,49],rare:[21,40,3,23,7,35,66,49,8,25,9,61,57,31,28,34,36,19],anon_1_email_address:61,column:[1,38,39,40,19,23,7,35,65,8,67],trustedconnect:7,"0x12a6590":21,checking_bal:74,constructor:[1,38,40,19,23,7,35,65,67],disabl:[38,40,19,23,7,35,8,67,28],use_information_schema:1,own:[1,2,3,8,10,13,16,23,26,27,31,32,33,63,37,21,39,40,19,42,7,35,46,49,50,51,57,58,61,34,65,66,67,28,25,70,72,73,75,76],automat:[1,38,40,19,23,7,35],pool_recycl:[5,1,14,46,48],weather:42,automag:9,oraclebinari:[38,7],explcit:[38,40,23,7],transfer:[40,49],intention:[33,7],appl:35,keyedtupl:[37,61,35],replac:[1,38,55,40,19,23,4,7,26,65,58,67,68,50,30,13,14,28,8,35,76],"var":51,checkconstraint:[38,40,19,35,67,2,56],information_schema:[39,19],simplejson:25,unwrap:[50,35,3],classmanag:[63,34,57,31,7],eng_plus_manag:75,addresses_id:[61,36],made:[1,38,39,40,19,23,7,35,8,28],history_meta:[40,19,42],temp:28,whether:[1,21,8,9,10,12,14,24,26,27,30,32,34,37,38,40,19,23,35,48,49,50,51,55,56,57,64,66,67,28,25,72,73],ext_continu:[32,57,23,8],record:[37,1,19,42,21,46,48,49,68,61,14,34,36],below:[0,2,3,21,5,9,10,11,12,13,14,26,33,34,36,37,38,40,19,42,35,46,49,59,56,58,61,69,16,64,66,67,28,25,70,72,74,75,76],query_from_par:[38,40,67],propcompar:[58,64,65,28,30,31,34],oldinit:32,meaningless:35,set_shard:45,mutual:[40,7,62,2,55,17,18],preexecute_sequ:[8,23],percent:[19,7],other:[1,38,39,40,19,23,7,35,65,8],bool:[38,2,23,25,35,67,51,56],addresses_properti:31,process_bind_param:[25,30,16,17,23],my_custom_compar:23,some_column_annot:69,june:67,nodes_1_id:65,reliabl:[37,19,23,69,8,49],with_vari:[25,19],getlogg:[51,48]},objtypes:{"0":"py:module","1":"py:class","2":"py:method","3":"py:function","4":"py:attribute","5":"py:exception","6":"py:classmethod","7":"py:data","8":"py:staticmethod"},objnames:{"0":["py","module","Python module"],"1":["py","class","Python class"],"2":["py","method","Python method"],"3":["py","function","Python function"],"4":["py","attribute","Python attribute"],"5":["py","exception","Python exception"],"6":["py","classmethod","Python class method"],"7":["py","data","Python data"],"8":["py","staticmethod","Python static method"]},filenames:["core/reflection","changelog/changelog_02","core/constraints","orm/collections","core/dml","dialects/mysql","dialects/informix","changelog/changelog_05","changelog/migration_05","dialects/mssql","core/defaults","dialects/drizzle","orm/extensions/associationproxy","core/inspection","core/pooling","core/serializer","orm/extensions/mutable","core/exceptions","contents","changelog/changelog_07","index","orm/relationships","dialects/sybase","changelog/changelog_04","copyright","core/types","core/connections","core/internals","changelog/migration_07","core/event","core/sqlelement","orm/internals","orm/deprecated","core/tutorial","orm/mapper_config","changelog/changelog_08","orm/loading","orm/query","changelog/changelog_03","changelog/changelog_01","changelog/changelog_06","core/functions","orm/examples","orm/extensions/orderinglist","changelog/index","orm/extensions/horizontal_shard","faq","core/expression_api","core/engines","orm/session","core/events","dialects/postgresql","core/schema","orm/extensions/index","core/index","core/metadata","core/ddl","orm/events","changelog/migration_08","dialects/firebird","orm/exceptions","orm/tutorial","orm/index","orm/extensions/instrumentation","orm/extensions/hybrid","changelog/migration_04","dialects/oracle","changelog/migration_06","core/interfaces","glossary","intro","dialects/index","orm/extensions/declarative","dialects/sqlite","core/compiler","orm/inheritance","core/selectable"],titles:["Reflecting Database Objects","0.2 Changelog","Defining Constraints and Indexes","Collection Configuration and Techniques","Insert, Updates, Deletes","MySQL","Informix","0.5 Changelog","What’s new in SQLAlchemy 0.5?","Microsoft SQL Server","Column Insert/Update Defaults","Drizzle","Association Proxy","Runtime Inspection API","Connection Pooling","Expression Serializer Extension","Mutation Tracking","Core Exceptions","Table of Contents","0.7 Changelog","SQLAlchemy Documentation","Relationship Configuration","Sybase","0.4 Changelog","Appendix: Copyright","Column and Data Types","Working with Engines and Connections","Core Internals","What’s New in SQLAlchemy 0.7?","Events","Column Elements and Expressions","ORM Internals","Deprecated ORM Event Interfaces","SQL Expression Language Tutorial","Mapper Configuration","0.8 Changelog","Relationship Loading Techniques","Querying","0.3 Changelog","0.1 Changelog","0.6 Changelog","SQL and Generic Functions","Examples","Ordering List","Changes and Migration","Horizontal Sharding","Frequently Asked Questions","SQL Statements and Expressions API","Engine Configuration","Using the Session","Core Events","PostgreSQL","Schema Definition Language","ORM Extensions","SQLAlchemy Core","Describing Databases with MetaData","Customizing DDL","ORM Events","What’s New in SQLAlchemy 0.8?","Firebird","ORM Exceptions","Object Relational Tutorial","SQLAlchemy ORM","Alternate Class Instrumentation","Hybrid Attributes","What’s new in SQLAlchemy 0.4?","Oracle","What’s New in SQLAlchemy 0.6?","Deprecated Event Interfaces","Glossary","Overview","Dialects","Declarative","SQLite","Custom SQL Constructs and Compilation Extension","Mapping Class Inheritance Hierarchies","Selectables, Tables, FROM objects"],objects:{"":{dogpile_caching:[42,0,1,""],generic_associations:[42,0,1,""],vertical:[42,0,1,""],inheritance:[42,0,1,""],nested_sets:[42,0,1,""],versioning:[42,0,1,""],postgis:[42,0,1,""],adjacency_list:[42,0,1,""],graphs:[42,0,1,""],large_collection:[42,0,1,""],custom_attributes:[42,0,1,""],dynamic_dict:[42,0,1,""],sharding:[42,0,1,""],elementtree:[42,0,1,""],association:[42,0,1,""]},"sqlalchemy.sql.expression.TableClause":{count:[76,2,1,""],insert:[76,2,1,""],compare:[76,2,1,""],join:[76,2,1,""],primary_key:[76,4,1,""],is_derived_from:[76,2,1,""],c:[76,4,1,""],compile:[76,2,1,""],correspond_on_equivalents:[76,2,1,""],update:[76,2,1,""],alias:[76,2,1,""],outerjoin:[76,2,1,""],select:[76,2,1,""],foreign_keys:[76,4,1,""],columns:[76,4,1,""],self_group:[76,2,1,""],implicit_returning:[76,4,1,""],replace_selectable:[76,2,1,""],corresponding_column:[76,2,1,""],"delete":[76,2,1,""]},"sqlalchemy.sql.expression.ColumnElement":{compare:[30,2,1,""],shares_lineage:[30,2,1,""],startswith:[30,2,1,""],notin_:[30,2,1,""],endswith:[30,2,1,""],isnot:[30,2,1,""],op:[30,2,1,""],"__lt__":[30,2,1,""],reverse_operate:[30,2,1,""],"__init__":[30,4,1,""],proxy_set:[30,4,1,""],notilike:[30,2,1,""],is_selectable:[30,4,1,""],in_:[30,2,1,""],primary_key:[30,4,1,""],distinct:[30,2,1,""],"__eq__":[30,2,1,""],contains:[30,2,1,""],ilike:[30,2,1,""],get_children:[30,2,1,""],label:[30,2,1,""],notlike:[30,2,1,""],quote:[30,4,1,""],foreign_keys:[30,4,1,""],params:[30,2,1,""],collate:[30,2,1,""],timetuple:[30,4,1,""],between:[30,2,1,""],nullsfirst:[30,2,1,""],type:[30,4,1,""],match:[30,2,1,""],"__ne__":[30,2,1,""],is_:[30,2,1,""],supports_execution:[30,4,1,""],asc:[30,2,1,""],self_group:[30,2,1,""],base_columns:[30,4,1,""],is_clause_element:[30,4,1,""],concat:[30,2,1,""],desc:[30,2,1,""],nullslast:[30,2,1,""],like:[30,2,1,""],comparator:[30,4,1,""],unique_params:[30,2,1,""],bind:[30,4,1,""],compile:[30,2,1,""],"__le__":[30,2,1,""],operate:[30,2,1,""],expression:[30,4,1,""],anon_label:[30,4,1,""]},"sqlalchemy.schema.DDL":{"__init__":[56,2,1,""]},"sqlalchemy.engine.Connection":{info:[26,4,1,""],invalidate:[26,2,1,""],begin:[26,2,1,""],transaction:[26,2,1,""],execution_options:[26,2,1,""],contextual_connect:[26,2,1,""],in_transaction:[26,2,1,""],connection:[26,4,1,""],execute:[26,2,1,""],invalidated:[26,4,1,""],"__init__":[26,2,1,""],scalar:[26,2,1,""],connect:[26,2,1,""],closed:[26,4,1,""],begin_nested:[26,2,1,""],close:[26,2,1,""],detach:[26,2,1,""],run_callable:[26,2,1,""],begin_twophase:[26,2,1,""]},"sqlalchemy.dialects.postgresql.ranges.RangeOperators":{comparator_factory:[51,1,1,""]},"sqlalchemy.engine.interfaces.Dialect":{reset_isolation_level:[27,2,1,""],do_recover_twophase:[27,2,1,""],do_prepare_twophase:[27,2,1,""],get_view_definition:[27,2,1,""],denormalize_name:[27,2,1,""],connect:[27,2,1,""],create_xid:[27,2,1,""],do_commit:[27,2,1,""],type_descriptor:[27,6,1,""],do_savepoint:[27,2,1,""],get_table_names:[27,2,1,""],has_table:[27,2,1,""],do_begin_twophase:[27,2,1,""],get_unique_constraints:[27,2,1,""],do_rollback_twophase:[27,2,1,""],do_execute_no_params:[27,2,1,""],get_columns:[27,2,1,""],do_rollback_to_savepoint:[27,2,1,""],do_execute:[27,2,1,""],do_rollback:[27,2,1,""],set_isolation_level:[27,2,1,""],get_pk_constraint:[27,2,1,""],do_executemany:[27,2,1,""],do_begin:[27,2,1,""],initialize:[27,2,1,""],get_foreign_keys:[27,2,1,""],do_release_savepoint:[27,2,1,""],create_connect_args:[27,2,1,""],normalize_name:[27,2,1,""],has_sequence:[27,2,1,""],get_isolation_level:[27,2,1,""],get_indexes:[27,2,1,""],do_commit_twophase:[27,2,1,""],do_close:[27,2,1,""],is_disconnect:[27,2,1,""],get_view_names:[27,2,1,""],reflecttable:[27,2,1,""],get_primary_keys:[27,2,1,""]},"sqlalchemy.orm.query":{QueryContext:[31,1,1,""],Query:[37,1,1,""]},"sqlalchemy.orm.properties.ColumnProperty":{ColumnComparator:[31,4,1,""],expression:[31,4,1,""],"__init__":[31,2,1,""],Comparator:[31,1,1,""]},"sqlalchemy.types.Interval":{coerce_compared_value:[25,2,1,""],"__init__":[25,2,1,""],impl:[25,4,1,""]},"sqlalchemy.interfaces":{PoolListener:[68,1,1,""],ConnectionProxy:[68,1,1,""]},"sqlalchemy.sql.functions":{random:[41,1,1,""],session_user:[41,1,1,""],current_timestamp:[41,1,1,""],current_time:[41,1,1,""],now:[41,1,1,""],min:[41,1,1,""],sum:[41,1,1,""],localtimestamp:[41,1,1,""],char_length:[41,1,1,""],coalesce:[41,1,1,""],current_date:[41,1,1,""],current_user:[41,1,1,""],localtime:[41,1,1,""],count:[41,1,1,""],ReturnTypeFromArgs:[41,1,1,""],max:[41,1,1,""],user:[41,1,1,""],next_value:[41,1,1,""],concat:[41,1,1,""],sysdate:[41,1,1,""],GenericFunction:[41,1,1,""],AnsiFunction:[41,1,1,""],register_function:[41,3,1,""]},"sqlalchemy.dialects.drizzle.DOUBLE":{"__init__":[11,2,1,""]},"sqlalchemy.pool.SingletonThreadPool":{"__init__":[14,2,1,""]},"sqlalchemy.dialects.drizzle.REAL":{"__init__":[11,2,1,""]},"sqlalchemy.types.Numeric":{"__init__":[25,2,1,""]},"sqlalchemy.dialects.sybase":{mxodbc:[22,0,1,""],pysybase:[22,0,1,""],base:[22,0,1,""],pyodbc:[22,0,1,""]},"sqlalchemy.orm.collections.MappedCollection":{set:[3,2,1,""],setdefault:[3,2,1,""],clear:[3,2,1,""],update:[3,2,1,""],pop:[3,2,1,""],remove:[3,2,1,""],popitem:[3,2,1,""],"__init__":[3,2,1,""]},"sqlalchemy.schema.CreateTable":{"__init__":[56,2,1,""]},"sqlalchemy.sql.expression.ClauseList":{compare:[30,2,1,""]},"sqlalchemy.inspection":{inspect:[13,3,1,""]},"sqlalchemy.orm.util":{AliasedClass:[37,1,1,""],object_state:[49,3,1,""],AliasedInsp:[37,1,1,""],polymorphic_union:[34,3,1,""],identity_key:[34,3,1,""]},"sqlalchemy.types.LargeBinary":{"__init__":[25,2,1,""]},"sqlalchemy.engine.url.URL":{get_dialect:[48,2,1,""],translate_connect_args:[48,2,1,""]},"sqlalchemy.types.TypeDecorator":{bind_expression:[25,2,1,""],type_engine:[25,2,1,""],python_type:[25,4,1,""],process_bind_param:[25,2,1,""],compare_values:[25,2,1,""],result_processor:[25,2,1,""],with_variant:[25,2,1,""],dialect_impl:[25,2,1,""],coerce_to_is_types:[25,4,1,""],compile:[25,2,1,""],bind_processor:[25,2,1,""],get_dbapi_type:[25,2,1,""],adapt:[25,2,1,""],load_dialect_impl:[25,2,1,""],column_expression:[25,2,1,""],copy:[25,2,1,""],coerce_compared_value:[25,2,1,""],"__init__":[25,2,1,""],process_result_value:[25,2,1,""]},"sqlalchemy.ext.instrumentation":{ExtendedInstrumentationRegistry:[63,1,1,""],instrumentation_finders:[63,7,1,""],InstrumentationManager:[63,1,1,""],INSTRUMENTATION_MANAGER:[63,7,1,""]},"sqlalchemy.orm.mapper":{Mapper:[34,1,1,""]},"sqlalchemy.orm.events":{InstrumentationEvents:[57,1,1,""],MapperEvents:[57,1,1,""],SessionEvents:[57,1,1,""],InstanceEvents:[57,1,1,""],AttributeEvents:[57,1,1,""]},"sqlalchemy.dialects.drizzle.CHAR":{"__init__":[11,2,1,""]},"sqlalchemy.schema.ForeignKey":{get_referent:[2,2,1,""],column:[2,4,1,""],references:[2,2,1,""],copy:[2,2,1,""],"__init__":[2,2,1,""],target_fullname:[2,4,1,""]},"sqlalchemy.pool":{NullPool:[14,1,1,""],clear_managers:[14,3,1,""],manage:[14,3,1,""],StaticPool:[14,1,1,""],SingletonThreadPool:[14,1,1,""],Pool:[14,1,1,""],AssertionPool:[14,1,1,""],QueuePool:[14,1,1,""]},"sqlalchemy.orm.attributes.History":{non_deleted:[49,2,1,""],sum:[49,2,1,""],non_added:[49,2,1,""],has_changes:[49,2,1,""],empty:[49,2,1,""]},"sqlalchemy.dialects.postgresql.INET":{"__init__":[51,2,1,""]},"sqlalchemy.dialects.postgresql.ranges":{RangeOperators:[51,1,1,""]},"sqlalchemy.dialects.mysql.SMALLINT":{"__init__":[5,2,1,""]},"sqlalchemy.orm.attributes.QueryableAttribute":{info:[31,4,1,""],property:[31,4,1,""],parent:[31,4,1,""]},"sqlalchemy.orm.interfaces._InspectionAttr":{is_selectable:[31,4,1,""],is_attribute:[31,4,1,""],extension_type:[31,4,1,""],is_instance:[31,4,1,""],is_aliased_class:[31,4,1,""],is_property:[31,4,1,""],is_mapper:[31,4,1,""],is_clause_element:[31,4,1,""]},"sqlalchemy.sql.expression.FromClause":{count:[76,2,1,""],is_derived_from:[76,2,1,""],join:[76,2,1,""],primary_key:[76,4,1,""],c:[76,4,1,""],columns:[76,4,1,""],correspond_on_equivalents:[76,2,1,""],alias:[76,2,1,""],foreign_keys:[76,4,1,""],corresponding_column:[76,2,1,""],outerjoin:[76,2,1,""],replace_selectable:[76,2,1,""],select:[76,2,1,""],description:[76,4,1,""]},"sqlalchemy.dialects.drizzle.TIMESTAMP":{"__init__":[11,2,1,""]},"sqlalchemy.ext.associationproxy":{ASSOCIATION_PROXY:[12,7,1,""],AssociationProxy:[12,1,1,""],association_proxy:[12,3,1,""]},"sqlalchemy.sql.functions.current_date":{identifier:[41,4,1,""],type:[41,4,1,""],name:[41,4,1,""]},"sqlalchemy.dialects.postgresql.ARRAY.Comparator":{all:[51,2,1,""],contains:[51,2,1,""],contained_by:[51,2,1,""],overlap:[51,2,1,""],any:[51,2,1,""]},"sqlalchemy.sql.functions.char_length":{identifier:[41,4,1,""],type:[41,4,1,""],name:[41,4,1,""]},"sqlalchemy.dialects.mysql.DATE":{"__init__":[5,2,1,""]},"sqlalchemy.sql.functions.current_time":{identifier:[41,4,1,""],type:[41,4,1,""],name:[41,4,1,""]},"sqlalchemy.orm.events.MapperEvents":{before_delete:[57,2,1,""],append_result:[57,2,1,""],create_instance:[57,2,1,""],translate_row:[57,2,1,""],after_delete:[57,2,1,""],mapper_configured:[57,2,1,""],before_update:[57,2,1,""],before_insert:[57,2,1,""],populate_instance:[57,2,1,""],after_insert:[57,2,1,""],after_update:[57,2,1,""],instrument_class:[57,2,1,""],after_configured:[57,2,1,""]},"sqlalchemy.sql.functions.current_user":{identifier:[41,4,1,""],type:[41,4,1,""],name:[41,4,1,""]},"sqlalchemy.orm.session.sessionmaker":{close_all:[49,6,1,""],object_session:[49,6,1,""],configure:[49,2,1,""],identity_key:[49,6,1,""],"__call__":[49,2,1,""],"__init__":[49,2,1,""]},"sqlalchemy.dialects.postgresql.hstore":{type:[51,4,1,""]},"sqlalchemy.ext.horizontal_shard":{ShardedQuery:[45,1,1,""],ShardedSession:[45,1,1,""]},"sqlalchemy.sql.expression.Alias":{count:[76,2,1,""],compare:[76,2,1,""],join:[76,2,1,""],primary_key:[76,4,1,""],unique_params:[76,2,1,""],foreign_keys:[76,4,1,""],c:[76,4,1,""],correspond_on_equivalents:[76,2,1,""],compile:[76,2,1,""],self_group:[76,2,1,""],select:[76,2,1,""],alias:[76,2,1,""],params:[76,2,1,""],columns:[76,4,1,""],outerjoin:[76,2,1,""],replace_selectable:[76,2,1,""],corresponding_column:[76,2,1,""]},"sqlalchemy.ext.mutable.MutableBase":{coerce:[16,6,1,""],"_parents":[16,4,1,""]},"sqlalchemy.ext.declarative.api":{"_declarative_constructor":[72,3,1,""]},"sqlalchemy.orm.interfaces":{NOT_EXTENSION:[31,7,1,""],PropComparator:[31,1,1,""],MapperExtension:[32,1,1,""],"_InspectionAttr":[31,1,1,""],MapperProperty:[31,1,1,""],SessionExtension:[32,1,1,""],AttributeExtension:[32,1,1,""]},"sqlalchemy.sql.functions.max":{identifier:[41,4,1,""],name:[41,4,1,""]},"sqlalchemy.orm.scoping.scoped_session":{"__call__":[49,2,1,""],query_property:[49,2,1,""],configure:[49,2,1,""],"__init__":[49,2,1,""],remove:[49,2,1,""]},"sqlalchemy.types.TypeEngine":{hashable:[25,4,1,""],bind_expression:[25,2,1,""],result_processor:[25,2,1,""],python_type:[25,4,1,""],Comparator:[25,1,1,""],compare_values:[25,2,1,""],with_variant:[25,2,1,""],dialect_impl:[25,2,1,""],compile:[25,2,1,""],comparator_factory:[25,4,1,""],bind_processor:[25,2,1,""],get_dbapi_type:[25,2,1,""],adapt:[25,2,1,""],coerce_compared_value:[25,2,1,""],column_expression:[25,2,1,""],"__init__":[25,2,1,""]},"sqlalchemy.sql.functions.min":{identifier:[41,4,1,""],name:[41,4,1,""]},"sqlalchemy.orm.events.InstrumentationEvents":{class_instrument:[57,2,1,""],class_uninstrument:[57,2,1,""],attribute_instrument:[57,2,1,""]},"sqlalchemy.dialects.informix":{informixdb:[6,0,1,""],base:[6,0,1,""]},"sqlalchemy.dialects.mysql.ENUM":{"__init__":[5,2,1,""]},"sqlalchemy.orm.descriptor_props.CompositeProperty":{get_history:[31,2,1,""],Comparator:[31,1,1,""],do_init:[31,2,1,""]},"sqlalchemy.dialects.mssql.SQL_VARIANT":{"__init__":[9,2,1,""]},"sqlalchemy.types.Boolean":{"__init__":[25,2,1,""]},"sqlalchemy.dialects.mysql.FLOAT":{"__init__":[5,2,1,""]},"sqlalchemy.sql.expression.Delete":{execute:[4,2,1,""],execution_options:[4,2,1,""],unique_params:[4,2,1,""],bind:[4,4,1,""],returning:[4,2,1,""],compare:[4,2,1,""],compile:[4,2,1,""],with_hint:[4,2,1,""],scalar:[4,2,1,""],params:[4,2,1,""],self_group:[4,2,1,""],prefix_with:[4,2,1,""],where:[4,2,1,""]},"sqlalchemy.dialects.mssql.VARCHAR":{"__init__":[9,2,1,""]},"sqlalchemy.sql.operators":{custom_op:[30,1,1,""],Operators:[30,1,1,""],ColumnOperators:[30,1,1,""]},"sqlalchemy.pool.Pool":{unique_connection:[14,2,1,""],recreate:[14,2,1,""],dispose:[14,2,1,""],connect:[14,2,1,""],"__init__":[14,2,1,""]},"sqlalchemy.dialects.drizzle.BIGINT":{"__init__":[11,2,1,""]},"sqlalchemy.ext.hybrid.hybrid_method":{expression:[64,2,1,""],"__init__":[64,2,1,""]},"sqlalchemy.ext.orderinglist.OrderingList":{insert:[43,2,1,""],remove:[43,2,1,""],pop:[43,2,1,""],reorder:[43,2,1,""],append:[43,2,1,""],"__init__":[43,2,1,""]},"sqlalchemy.schema.Index":{bind:[2,4,1,""],create:[2,2,1,""],drop:[2,2,1,""],"__init__":[2,2,1,""]},"sqlalchemy.sql.operators.ColumnOperators":{"__rtruediv__":[30,2,1,""],"__lshift__":[30,2,1,""],"__ne__":[30,2,1,""],"__str__":[30,4,1,""],notin_:[30,2,1,""],"__getattribute__":[30,4,1,""],"__radd__":[30,2,1,""],"__rmul__":[30,2,1,""],"__truediv__":[30,2,1,""],"__rsub__":[30,2,1,""],isnot:[30,2,1,""],"__rdiv__":[30,2,1,""],"__and__":[30,2,1,""],contains:[30,2,1,""],"__sizeof__":[30,2,1,""],reverse_operate:[30,2,1,""],"__lt__":[30,2,1,""],"__init__":[30,4,1,""],"__sub__":[30,2,1,""],"__rshift__":[30,2,1,""],notilike:[30,2,1,""],"__setattr__":[30,4,1,""],in_:[30,2,1,""],"__getitem__":[30,2,1,""],distinct:[30,2,1,""],"__weakref__":[30,4,1,""],"__format__":[30,2,1,""],ilike:[30,2,1,""],notlike:[30,2,1,""],collate:[30,2,1,""],timetuple:[30,4,1,""],between:[30,2,1,""],nullsfirst:[30,2,1,""],match:[30,2,1,""],"__repr__":[30,4,1,""],startswith:[30,2,1,""],is_:[30,2,1,""],"__invert__":[30,2,1,""],"__reduce__":[30,2,1,""],asc:[30,2,1,""],"__reduce_ex__":[30,2,1,""],"__or__":[30,2,1,""],"__add__":[30,2,1,""],"__gt__":[30,2,1,""],"__eq__":[30,2,1,""],concat:[30,2,1,""],desc:[30,2,1,""],"__subclasshook__":[30,8,1,""],nullslast:[30,2,1,""],like:[30,2,1,""],"__mod__":[30,2,1,""],"__neg__":[30,2,1,""],endswith:[30,2,1,""],"__delattr__":[30,4,1,""],"__div__":[30,2,1,""],"__le__":[30,2,1,""],"__mul__":[30,2,1,""],"__hash__":[30,4,1,""],"__new__":[30,8,1,""],operate:[30,2,1,""],"__ge__":[30,2,1,""],op:[30,2,1,""]},"sqlalchemy.orm.collections":{CollectionAdapter:[3,1,1,""],InstrumentedSet:[3,1,1,""],InstrumentedDict:[3,1,1,""],collection:[3,1,1,""],column_mapped_collection:[3,3,1,""],InstrumentedList:[3,1,1,""],bulk_replace:[3,3,1,""],prepare_instrumentation:[3,3,1,""],MappedCollection:[3,1,1,""],mapped_collection:[3,3,1,""],collection_adapter:[3,3,1,""],attribute_mapped_collection:[3,3,1,""]},"sqlalchemy.schema":{Index:[2,1,1,""],DropConstraint:[56,1,1,""],CreateSchema:[56,1,1,""],CheckConstraint:[2,1,1,""],DDL:[56,1,1,""],DefaultGenerator:[10,1,1,""],ForeignKeyConstraint:[2,1,1,""],CreateTable:[56,1,1,""],CreateIndex:[56,1,1,""],PassiveDefault:[10,1,1,""],ForeignKey:[2,1,1,""],DefaultClause:[10,1,1,""],DDLElement:[56,1,1,""],DropIndex:[56,1,1,""],DropSequence:[56,1,1,""],FetchedValue:[10,1,1,""],MetaData:[55,1,1,""],ColumnDefault:[10,1,1,""],ThreadLocalMetaData:[55,1,1,""],UniqueConstraint:[2,1,1,""],Constraint:[2,1,1,""],Column:[55,1,1,""],CreateSequence:[56,1,1,""],ColumnCollectionConstraint:[2,1,1,""],CreateColumn:[56,1,1,""],DropTable:[56,1,1,""],Sequence:[10,1,1,""],AddConstraint:[56,1,1,""],DropSchema:[56,1,1,""],Table:[55,1,1,""],PrimaryKeyConstraint:[2,1,1,""],SchemaItem:[55,1,1,""]},"sqlalchemy.engine.Transaction":{close:[26,2,1,""],rollback:[26,2,1,""],commit:[26,2,1,""]},"sqlalchemy.dialects.drizzle.ENUM":{"__init__":[11,2,1,""]},"sqlalchemy.sql.expression.CTE":{count:[76,2,1,""],compare:[76,2,1,""],join:[76,2,1,""],primary_key:[76,4,1,""],unique_params:[76,2,1,""],columns:[76,4,1,""],correspond_on_equivalents:[76,2,1,""],c:[76,4,1,""],compile:[76,2,1,""],outerjoin:[76,2,1,""],foreign_keys:[76,4,1,""],params:[76,2,1,""],select:[76,2,1,""],self_group:[76,2,1,""],replace_selectable:[76,2,1,""],corresponding_column:[76,2,1,""]},"sqlalchemy.engine.Engine":{contextual_connect:[26,2,1,""],table_names:[26,2,1,""],transaction:[26,2,1,""],execution_options:[26,2,1,""],name:[26,4,1,""],raw_connection:[26,2,1,""],driver:[26,4,1,""],begin:[26,2,1,""],dispose:[26,2,1,""],execute:[26,2,1,""],update_execution_options:[26,2,1,""],connect:[26,2,1,""],run_callable:[26,2,1,""]},"sqlalchemy.engine.url":{URL:[48,1,1,""],make_url:[48,3,1,""]},"sqlalchemy.sql.functions.next_value":{identifier:[41,4,1,""],type:[41,4,1,""],name:[41,4,1,""]},"sqlalchemy.dialects.mssql":{DATETIMEOFFSET:[9,1,1,""],REAL:[9,1,1,""],SMALLDATETIME:[9,1,1,""],TEXT:[9,1,1,""],CHAR:[9,1,1,""],TINYINT:[9,1,1,""],mxodbc:[9,0,1,""],pyodbc:[9,0,1,""],VARCHAR:[9,1,1,""],IMAGE:[9,1,1,""],DATETIME2:[9,1,1,""],BIT:[9,1,1,""],NTEXT:[9,1,1,""],zxjdbc:[9,0,1,""],adodbapi:[9,0,1,""],base:[9,0,1,""],pymssql:[9,0,1,""],NVARCHAR:[9,1,1,""],NCHAR:[9,1,1,""],UNIQUEIDENTIFIER:[9,1,1,""],SQL_VARIANT:[9,1,1,""],MONEY:[9,1,1,""],SMALLMONEY:[9,1,1,""],TIME:[9,1,1,""]},"sqlalchemy.dialects.drizzle.NUMERIC":{"__init__":[11,2,1,""]},"sqlalchemy.types.Float":{"__init__":[25,2,1,""]},"sqlalchemy.sql.functions.now":{identifier:[41,4,1,""],type:[41,4,1,""],name:[41,4,1,""]},"sqlalchemy.dialects.mysql.BLOB":{"__init__":[5,2,1,""]},"sqlalchemy.dialects":{sqlite:[73,0,1,""]},"sqlalchemy.orm.events.AttributeEvents":{set:[57,2,1,""],append:[57,2,1,""],remove:[57,2,1,""]},"sqlalchemy.sql.functions.sysdate":{identifier:[41,4,1,""],type:[41,4,1,""],name:[41,4,1,""]},"sqlalchemy.engine":{Engine:[26,1,1,""],Transaction:[26,1,1,""],RowProxy:[26,1,1,""],Connectable:[26,1,1,""],TwoPhaseTransaction:[26,1,1,""],Connection:[26,1,1,""],NestedTransaction:[26,1,1,""],ResultProxy:[26,1,1,""]},"sqlalchemy.schema.Table":{append_column:[55,2,1,""],compare:[55,2,1,""],drop:[55,2,1,""],exists:[55,2,1,""],named_with_column:[55,4,1,""],dispatch:[55,4,1,""],"__init__":[55,2,1,""],append_constraint:[55,2,1,""],is_selectable:[55,4,1,""],primary_key:[55,4,1,""],create:[55,2,1,""],get_children:[55,2,1,""],add_is_dependent_on:[55,2,1,""],params:[55,2,1,""],select:[55,2,1,""],quote:[55,4,1,""],outerjoin:[55,2,1,""],implicit_returning:[55,4,1,""],replace_selectable:[55,2,1,""],corresponding_column:[55,2,1,""],schema:[55,4,1,""],count:[55,2,1,""],correspond_on_equivalents:[55,2,1,""],description:[55,4,1,""],supports_execution:[55,4,1,""],is_derived_from:[55,2,1,""],update:[55,2,1,""],key:[55,4,1,""],self_group:[55,2,1,""],selectable:[55,4,1,""],is_clause_element:[55,4,1,""],info:[55,4,1,""],insert:[55,2,1,""],c:[55,4,1,""],join:[55,2,1,""],unique_params:[55,2,1,""],bind:[55,4,1,""],columns:[55,4,1,""],compile:[55,2,1,""],alias:[55,2,1,""],foreign_keys:[55,4,1,""],tometadata:[55,2,1,""],append_ddl_listener:[55,2,1,""],"delete":[55,2,1,""]},"sqlalchemy.engine.default.DefaultDialect":{do_recover_twophase:[27,2,1,""],do_prepare_twophase:[27,2,1,""],get_view_definition:[27,2,1,""],denormalize_name:[27,2,1,""],preparer:[27,4,1,""],create_xid:[27,2,1,""],has_table:[27,2,1,""],type_descriptor:[27,2,1,""],set_isolation_level:[27,2,1,""],get_table_names:[27,2,1,""],statement_compiler:[27,4,1,""],execute_sequence_format:[27,4,1,""],get_unique_constraints:[27,2,1,""],do_rollback_twophase:[27,2,1,""],get_columns:[27,2,1,""],get_pk_constraint:[27,2,1,""],get_foreign_keys:[27,2,1,""],normalize_name:[27,2,1,""],has_sequence:[27,2,1,""],get_isolation_level:[27,2,1,""],get_indexes:[27,2,1,""],do_commit_twophase:[27,2,1,""],on_connect:[27,2,1,""],do_begin_twophase:[27,2,1,""],get_view_names:[27,2,1,""],get_primary_keys:[27,2,1,""]},"sqlalchemy.orm.session":{sessionmaker:[49,1,1,""],object_session:[49,3,1,""],Session:[49,1,1,""],SessionTransaction:[49,1,1,""],make_transient:[49,3,1,""]},"sqlalchemy.orm.interfaces.MapperProperty":{info:[31,4,1,""],compare:[31,2,1,""],merge:[31,2,1,""],is_primary:[31,2,1,""],setup:[31,2,1,""],cascade_iterator:[31,2,1,""],create_row_processor:[31,2,1,""],cascade:[31,4,1,""],init:[31,2,1,""],post_instrument_class:[31,2,1,""],class_attribute:[31,4,1,""],do_init:[31,2,1,""]},"sqlalchemy.dialects.mysql.TIMESTAMP":{"__init__":[5,2,1,""]},"sqlalchemy.util":{KeyedTuple:[37,1,1,""],ScopedRegistry:[49,1,1,""],ThreadLocalRegistry:[49,1,1,""]},"sqlalchemy.engine.interfaces":{Dialect:[27,1,1,""],ExecutionContext:[27,1,1,""],Compiled:[27,1,1,""]},"sqlalchemy.sql.expression.Join":{count:[76,2,1,""],correspond_on_equivalents:[76,2,1,""],join:[76,2,1,""],primary_key:[76,4,1,""],unique_params:[76,2,1,""],c:[76,4,1,""],columns:[76,4,1,""],compare:[76,2,1,""],corresponding_column:[76,2,1,""],compile:[76,2,1,""],alias:[76,2,1,""],foreign_keys:[76,4,1,""],params:[76,2,1,""],"__init__":[76,2,1,""],outerjoin:[76,2,1,""],replace_selectable:[76,2,1,""],select:[76,2,1,""]},"sqlalchemy.schema.CreateSchema":{"__init__":[56,2,1,""]},"sqlalchemy.dialects.mysql.TEXT":{"__init__":[5,2,1,""]},"sqlalchemy.sql.expression.SelectBase":{autocommit:[76,2,1,""],order_by:[76,2,1,""],apply_labels:[76,2,1,""],append_group_by:[76,2,1,""],label:[76,2,1,""],cte:[76,2,1,""],group_by:[76,2,1,""],limit:[76,2,1,""],offset:[76,2,1,""],append_order_by:[76,2,1,""],as_scalar:[76,2,1,""]},"sqlalchemy.sql.expression.ColumnClause":{compare:[30,2,1,""],shares_lineage:[30,2,1,""],startswith:[30,2,1,""],notin_:[30,2,1,""],endswith:[30,2,1,""],isnot:[30,2,1,""],"__lt__":[30,2,1,""],notilike:[30,2,1,""],in_:[30,2,1,""],distinct:[30,2,1,""],contains:[30,2,1,""],ilike:[30,2,1,""],get_children:[30,2,1,""],label:[30,2,1,""],notlike:[30,2,1,""],collate:[30,2,1,""],between:[30,2,1,""],nullsfirst:[30,2,1,""],match:[30,2,1,""],"__ne__":[30,2,1,""],is_:[30,2,1,""],asc:[30,2,1,""],self_group:[30,2,1,""],"__eq__":[30,2,1,""],concat:[30,2,1,""],desc:[30,2,1,""],nullslast:[30,2,1,""],like:[30,2,1,""],compile:[30,2,1,""],"__le__":[30,2,1,""],anon_label:[30,4,1,""],expression:[30,4,1,""],op:[30,2,1,""]},"sqlalchemy.dialects.postgresql.ranges.RangeOperators.comparator_factory":{overlaps:[51,2,1,""],"__ne__":[51,2,1,""],contains:[51,2,1,""],contained_by:[51,2,1,""],adjacent_to:[51,2,1,""],not_extend_right_of:[51,2,1,""],strictly_right_of:[51,2,1,""],not_extend_left_of:[51,2,1,""],strictly_left_of:[51,2,1,""]},"sqlalchemy.ext.compiler":{deregister:[74,3,1,""],compiles:[74,3,1,""]},"sqlalchemy.event":{listens_for:[29,3,1,""],listen:[29,3,1,""]},"sqlalchemy.ext.horizontal_shard.ShardedSession":{"__init__":[45,2,1,""]},"sqlalchemy.ext.mutable.Mutable":{associate_with_attribute:[16,6,1,""],as_mutable:[16,6,1,""],changed:[16,2,1,""],associate_with:[16,6,1,""]},"sqlalchemy.dialects.postgresql":{REAL:[51,1,1,""],ExcludeConstraint:[51,1,1,""],INT8RANGE:[51,1,1,""],DOUBLE_PRECISION:[51,1,1,""],array:[51,1,1,""],MACADDR:[51,1,1,""],psycopg2:[51,0,1,""],INTERVAL:[51,1,1,""],TSTZRANGE:[51,1,1,""],pg8000:[51,0,1,""],ARRAY:[51,1,1,""],INET:[51,1,1,""],zxjdbc:[51,0,1,""],ENUM:[51,1,1,""],NUMRANGE:[51,1,1,""],base:[51,0,1,""],INT4RANGE:[51,1,1,""],TSRANGE:[51,1,1,""],CIDR:[51,1,1,""],BIT:[51,1,1,""],BYTEA:[51,1,1,""],UUID:[51,1,1,""],All:[51,1,1,""],hstore:[51,1,1,""],HSTORE:[51,1,1,""],DATERANGE:[51,1,1,""],pypostgresql:[51,0,1,""],Any:[51,1,1,""]},"sqlalchemy.orm":{contains_eager:[36,3,1,""],defer:[34,3,1,""],subqueryload_all:[36,3,1,""],aliased:[37,3,1,""],dynamic_loader:[21,3,1,""],validates:[34,3,1,""],object_mapper:[34,3,1,""],session:[49,0,1,""],relation:[21,3,1,""],immediateload:[36,3,1,""],clear_mappers:[34,3,1,""],contains_alias:[36,3,1,""],joinedload:[36,3,1,""],undefer_group:[34,3,1,""],configure_mappers:[34,3,1,""],outerjoin:[37,3,1,""],eagerload:[36,3,1,""],with_polymorphic:[75,3,1,""],column_property:[34,3,1,""],relationship:[21,3,1,""],composite:[34,3,1,""],join:[37,3,1,""],interfaces:[32,0,1,""],class_mapper:[34,3,1,""],subqueryload:[36,3,1,""],with_parent:[37,3,1,""],joinedload_all:[36,3,1,""],noload:[36,3,1,""],mapper:[34,3,1,""],remote:[21,3,1,""],synonym:[34,3,1,""],exc:[60,0,1,""],foreign:[21,3,1,""],undefer:[34,3,1,""],deferred:[34,3,1,""],reconstructor:[34,3,1,""],lazyload:[36,3,1,""],eagerload_all:[36,3,1,""],backref:[21,3,1,""]},"sqlalchemy.sql.expression.Update":{execute:[4,2,1,""],prefix_with:[4,2,1,""],unique_params:[4,2,1,""],bind:[4,4,1,""],returning:[4,2,1,""],compare:[4,2,1,""],compile:[4,2,1,""],with_hint:[4,2,1,""],scalar:[4,2,1,""],values:[4,2,1,""],params:[4,2,1,""],self_group:[4,2,1,""],execution_options:[4,2,1,""],where:[4,2,1,""]},"sqlalchemy.ext":{instrumentation:[63,0,1,""],associationproxy:[12,0,1,""],orderinglist:[43,0,1,""],hybrid:[64,0,1,""],horizontal_shard:[45,0,1,""],declarative:[72,0,1,""],mutable:[16,0,1,""],serializer:[15,0,1,""],compiler:[74,0,1,""]},"sqlalchemy.schema.DDLElement":{dialect:[56,4,1,""],execute:[56,2,1,""],target:[56,4,1,""],execute_if:[56,2,1,""],bind:[56,4,1,""],execute_at:[56,2,1,""],against:[56,2,1,""],"__call__":[56,2,1,""],on:[56,4,1,""],callable_:[56,4,1,""]},"sqlalchemy.types.Unicode":{"__init__":[25,2,1,""]},"sqlalchemy.dialects.mysql.REAL":{"__init__":[5,2,1,""]},"sqlalchemy.dialects.postgresql.REAL":{"__init__":[51,2,1,""]},"sqlalchemy.types.UserDefinedType":{coerce_compared_value:[25,2,1,""]},"sqlalchemy.engine.default":{DefaultDialect:[27,1,1,""],DefaultExecutionContext:[27,1,1,""]},"sqlalchemy.dialects.postgresql.MACADDR":{"__init__":[51,2,1,""]},"sqlalchemy.dialects.mssql.NCHAR":{"__init__":[9,2,1,""]},"sqlalchemy.schema.Column":{compare:[55,2,1,""],shares_lineage:[55,2,1,""],"__ne__":[55,2,1,""],copy:[55,2,1,""],notin_:[55,2,1,""],operate:[55,2,1,""],dispatch:[55,4,1,""],endswith:[55,2,1,""],append_foreign_key:[55,2,1,""],references:[55,2,1,""],isnot:[55,2,1,""],table:[55,4,1,""],"__lt__":[55,2,1,""],reverse_operate:[55,2,1,""],"__init__":[55,2,1,""],proxy_set:[55,4,1,""],notilike:[55,2,1,""],is_selectable:[55,4,1,""],server_onupdate:[55,4,1,""],primary_key:[55,4,1,""],distinct:[55,2,1,""],is_clause_element:[55,4,1,""],contains:[55,2,1,""],ilike:[55,2,1,""],get_children:[55,2,1,""],label:[55,2,1,""],onupdate:[55,4,1,""],notlike:[55,2,1,""],"__le__":[55,2,1,""],params:[55,2,1,""],collate:[55,2,1,""],timetuple:[55,4,1,""],quote:[55,4,1,""],nullsfirst:[55,2,1,""],type:[55,4,1,""],match:[55,2,1,""],startswith:[55,2,1,""],is_:[55,2,1,""],description:[55,4,1,""],supports_execution:[55,4,1,""],between:[55,2,1,""],asc:[55,2,1,""],in_:[55,2,1,""],server_default:[55,4,1,""],self_group:[55,2,1,""],base_columns:[55,4,1,""],"__eq__":[55,2,1,""],concat:[55,2,1,""],desc:[55,2,1,""],info:[55,4,1,""],nullslast:[55,2,1,""],like:[55,2,1,""],comparator:[55,4,1,""],unique_params:[55,2,1,""],"default":[55,4,1,""],bind:[55,4,1,""],compile:[55,2,1,""],foreign_keys:[55,4,1,""],op:[55,2,1,""],expression:[55,4,1,""],anon_label:[55,4,1,""]},"sqlalchemy.dialects.postgresql.CIDR":{"__init__":[51,2,1,""]},"sqlalchemy.types":{REAL:[25,1,1,""],FLOAT:[25,1,1,""],TypeEngine:[25,1,1,""],CLOB:[25,1,1,""],UnicodeText:[25,1,1,""],TEXT:[25,1,1,""],Time:[25,1,1,""],NullType:[25,1,1,""],CHAR:[25,1,1,""],BIGINT:[25,1,1,""],DATE:[25,1,1,""],VARBINARY:[25,1,1,""],NCHAR:[25,1,1,""],AbstractType:[25,1,1,""],SMALLINT:[25,1,1,""],VARCHAR:[25,1,1,""],String:[25,1,1,""],Enum:[25,1,1,""],Variant:[25,1,1,""],NUMERIC:[25,1,1,""],Concatenable:[25,1,1,""],TypeDecorator:[25,1,1,""],Boolean:[25,1,1,""],Date:[25,1,1,""],Integer:[25,1,1,""],SchemaType:[25,1,1,""],LargeBinary:[25,1,1,""],UserDefinedType:[25,1,1,""],SmallInteger:[25,1,1,""],DECIMAL:[25,1,1,""],Interval:[25,1,1,""],DATETIME:[25,1,1,""],PickleType:[25,1,1,""],INTEGER:[25,1,1,""],DateTime:[25,1,1,""],NVARCHAR:[25,1,1,""],Text:[25,1,1,""],BINARY:[25,1,1,""],BigInteger:[25,1,1,""],INT:[25,4,1,""],TIMESTAMP:[25,1,1,""],Float:[25,1,1,""],Numeric:[25,1,1,""],BOOLEAN:[25,1,1,""],BLOB:[25,1,1,""],Unicode:[25,1,1,""],TIME:[25,1,1,""]},"sqlalchemy.sql.expression.ScalarSelect":{where:[76,2,1,""]},"sqlalchemy.orm.instrumentation":{ClassManager:[31,1,1,""],is_instrumented:[49,3,1,""]},"sqlalchemy.util.ScopedRegistry":{has:[49,2,1,""],clear:[49,2,1,""],set:[49,2,1,""],"__init__":[49,2,1,""]},"sqlalchemy.sql.functions.count":{identifier:[41,4,1,""],type:[41,4,1,""],name:[41,4,1,""]},"sqlalchemy.dialects.mysql.TINYINT":{"__init__":[5,2,1,""]},"sqlalchemy.sql.expression.UnaryExpression":{compare:[30,2,1,""]},"sqlalchemy.ext.associationproxy.AssociationProxy":{attr:[12,4,1,""],remote_attr:[12,4,1,""],is_attribute:[12,4,1,""],contains:[12,2,1,""],extension_type:[12,4,1,""],local_attr:[12,4,1,""],scalar:[12,4,1,""],target_class:[12,4,1,""],has:[12,2,1,""],any:[12,2,1,""],"__init__":[12,2,1,""]},"sqlalchemy.dialects.mysql.NVARCHAR":{"__init__":[5,2,1,""]},"sqlalchemy.dialects.mysql":{REAL:[5,1,1,""],FLOAT:[5,1,1,""],mysqldb:[5,0,1,""],TEXT:[5,1,1,""],pymysql:[5,0,1,""],CHAR:[5,1,1,""],TINYINT:[5,1,1,""],BIGINT:[5,1,1,""],DATE:[5,1,1,""],VARBINARY:[5,1,1,""],pyodbc:[5,0,1,""],SMALLINT:[5,1,1,""],MEDIUMBLOB:[5,1,1,""],SET:[5,1,1,""],VARCHAR:[5,1,1,""],NUMERIC:[5,1,1,""],MEDIUMINT:[5,1,1,""],INTEGER:[5,1,1,""],BIT:[5,1,1,""],zxjdbc:[5,0,1,""],DECIMAL:[5,1,1,""],ENUM:[5,1,1,""],DATETIME:[5,1,1,""],base:[5,0,1,""],YEAR:[5,1,1,""],TINYBLOB:[5,1,1,""],NVARCHAR:[5,1,1,""],NCHAR:[5,1,1,""],BINARY:[5,1,1,""],cymysql:[5,0,1,""],LONGTEXT:[5,1,1,""],TIMESTAMP:[5,1,1,""],TINYTEXT:[5,1,1,""],MEDIUMTEXT:[5,1,1,""],BOOLEAN:[5,1,1,""],BLOB:[5,1,1,""],TIME:[5,1,1,""],oursql:[5,0,1,""],gaerdbms:[5,0,1,""],DOUBLE:[5,1,1,""],mysqlconnector:[5,0,1,""],LONGBLOB:[5,1,1,""]},"sqlalchemy.dialects.mysql.TINYTEXT":{"__init__":[5,2,1,""]},"sqlalchemy.sql.expression.CompoundSelect":{compare:[76,2,1,""],offset:[76,2,1,""],scalar:[76,2,1,""],select:[76,2,1,""],autocommit:[76,2,1,""],execution_options:[76,2,1,""],primary_key:[76,4,1,""],apply_labels:[76,2,1,""],cte:[76,2,1,""],label:[76,2,1,""],group_by:[76,2,1,""],params:[76,2,1,""],outerjoin:[76,2,1,""],replace_selectable:[76,2,1,""],columns:[76,4,1,""],correspond_on_equivalents:[76,2,1,""],description:[76,4,1,""],execute:[76,2,1,""],append_group_by:[76,2,1,""],append_order_by:[76,2,1,""],order_by:[76,2,1,""],count:[76,2,1,""],c:[76,4,1,""],join:[76,2,1,""],unique_params:[76,2,1,""],compile:[76,2,1,""],alias:[76,2,1,""],foreign_keys:[76,4,1,""],limit:[76,2,1,""],corresponding_column:[76,2,1,""],as_scalar:[76,2,1,""]},"sqlalchemy.util.KeyedTuple":{keys:[37,2,1,""],"_fields":[37,4,1,""],"_asdict":[37,2,1,""]},"sqlalchemy.ext.horizontal_shard.ShardedQuery":{set_shard:[45,2,1,""]},"sqlalchemy.sql":{functions:[41,0,1,""],expression:[30,0,1,""]},"sqlalchemy.dialects.oracle":{INTERVAL:[66,1,1,""],BFILE:[66,1,1,""],zxjdbc:[66,0,1,""],cx_oracle:[66,0,1,""],NUMBER:[66,1,1,""],LONG:[66,1,1,""],RAW:[66,1,1,""],base:[66,0,1,""],DOUBLE_PRECISION:[66,1,1,""],NCLOB:[66,1,1,""]},"sqlalchemy.dialects.mysql.VARCHAR":{"__init__":[5,2,1,""]},"sqlalchemy.dialects.postgresql.BYTEA":{"__init__":[51,2,1,""]},"sqlalchemy.dialects.postgresql.HSTORE.comparator_factory":{contains:[51,2,1,""],keys:[51,2,1,""],matrix:[51,2,1,""],defined:[51,2,1,""],has_all:[51,2,1,""],contained_by:[51,2,1,""],vals:[51,2,1,""],has_key:[51,2,1,""],slice:[51,2,1,""],has_any:[51,2,1,""],array:[51,2,1,""],"delete":[51,2,1,""]},"sqlalchemy.engine.reflection":{Inspector:[0,1,1,""]},"sqlalchemy.types.String":{"__init__":[25,2,1,""]},"sqlalchemy.dialects.postgresql.ENUM":{create:[51,2,1,""],drop:[51,2,1,""],"__init__":[51,2,1,""]},"sqlalchemy.orm.collections.collection":{replaces:[3,8,1,""],adds:[3,8,1,""],converter:[3,8,1,""],internally_instrumented:[3,8,1,""],linker:[3,8,1,""],removes:[3,8,1,""],remover:[3,8,1,""],removes_return:[3,8,1,""],link:[3,8,1,""],iterator:[3,8,1,""],appender:[3,8,1,""]},"sqlalchemy.dialects.firebird":{kinterbasdb:[59,0,1,""],base:[59,0,1,""],fdb:[59,0,1,""]},"sqlalchemy.orm.mapper.Mapper":{inherits:[34,4,1,""],self_and_descendants:[34,4,1,""],configured:[34,4,1,""],primary_key_from_instance:[34,2,1,""],add_properties:[34,2,1,""],entity:[34,4,1,""],synonyms:[34,4,1,""],identity_key_from_instance:[34,2,1,""],attrs:[34,4,1,""],validators:[34,4,1,""],primary_mapper:[34,2,1,""],identity_key_from_row:[34,2,1,""],"__init__":[34,2,1,""],relationships:[34,4,1,""],iterate_properties:[34,4,1,""],common_parent:[34,2,1,""],primary_key:[34,4,1,""],with_polymorphic_mappers:[34,4,1,""],local_table:[34,4,1,""],mapped_table:[34,4,1,""],class_manager:[34,4,1,""],add_property:[34,2,1,""],non_primary:[34,4,1,""],columns:[34,4,1,""],identity_key_from_primary_key:[34,2,1,""],polymorphic_on:[34,4,1,""],tables:[34,4,1,""],compiled:[34,4,1,""],base_mapper:[34,4,1,""],cascade_iterator:[34,2,1,""],class_:[34,4,1,""],is_mapper:[34,4,1,""],selectable:[34,4,1,""],composites:[34,4,1,""],all_orm_descriptors:[34,4,1,""],mapper:[34,4,1,""],c:[34,4,1,""],get_property_by_column:[34,2,1,""],get_property:[34,2,1,""],polymorphic_iterator:[34,2,1,""],column_attrs:[34,4,1,""],single:[34,4,1,""],compile:[34,2,1,""],concrete:[34,4,1,""],polymorphic_identity:[34,4,1,""],isa:[34,2,1,""],polymorphic_map:[34,4,1,""]},"sqlalchemy.orm.interfaces.MapperExtension":{before_delete:[32,2,1,""],append_result:[32,2,1,""],create_instance:[32,2,1,""],translate_row:[32,2,1,""],after_delete:[32,2,1,""],init_failed:[32,2,1,""],after_insert:[32,2,1,""],init_instance:[32,2,1,""],before_update:[32,2,1,""],populate_instance:[32,2,1,""],after_update:[32,2,1,""],before_insert:[32,2,1,""],reconstruct_instance:[32,2,1,""],instrument_class:[32,2,1,""]},"sqlalchemy.sql.functions.AnsiFunction":{identifier:[41,4,1,""],name:[41,4,1,""]},"sqlalchemy.dialects.drizzle.VARCHAR":{"__init__":[11,2,1,""]},"sqlalchemy.dialects.postgresql.DOUBLE_PRECISION":{"__init__":[51,2,1,""]},"sqlalchemy.engine.RowProxy":{keys:[26,2,1,""],items:[26,2,1,""],has_key:[26,2,1,""]},"sqlalchemy.ext.orderinglist":{count_from_1:[43,3,1,""],count_from_0:[43,3,1,""],count_from_n_factory:[43,3,1,""],OrderingList:[43,1,1,""],ordering_list:[43,3,1,""]},"sqlalchemy.dialects.mssql.BIT":{"__init__":[9,2,1,""]},"sqlalchemy.sql.functions.localtime":{identifier:[41,4,1,""],type:[41,4,1,""],name:[41,4,1,""]},"sqlalchemy.orm.scoping":{scoped_session:[49,1,1,""]},"sqlalchemy.sql.operators.Operators":{"__invert__":[30,2,1,""],"__weakref__":[30,4,1,""],"__or__":[30,2,1,""],"__and__":[30,2,1,""],operate:[30,2,1,""],reverse_operate:[30,2,1,""],op:[30,2,1,""]},"sqlalchemy.dialects.mssql.NVARCHAR":{"__init__":[9,2,1,""]},"sqlalchemy.sql.functions.coalesce":{identifier:[41,4,1,""],name:[41,4,1,""]},"sqlalchemy.dialects.oracle.INTERVAL":{"__init__":[66,2,1,""]},"sqlalchemy.dialects.mysql.SET":{"__init__":[5,2,1,""]},"sqlalchemy.interfaces.ConnectionProxy":{execute:[68,2,1,""],rollback:[68,2,1,""],commit_twophase:[68,2,1,""],prepare_twophase:[68,2,1,""],release_savepoint:[68,2,1,""],savepoint:[68,2,1,""],begin:[68,2,1,""],cursor_execute:[68,2,1,""],rollback_twophase:[68,2,1,""],commit:[68,2,1,""],rollback_savepoint:[68,2,1,""],begin_twophase:[68,2,1,""]},"sqlalchemy.dialects.postgresql.UUID":{"__init__":[51,2,1,""]},"sqlalchemy.orm.interfaces.SessionExtension":{after_bulk_delete:[32,2,1,""],after_begin:[32,2,1,""],after_flush_postexec:[32,2,1,""],before_commit:[32,2,1,""],after_attach:[32,2,1,""],after_rollback:[32,2,1,""],before_flush:[32,2,1,""],after_flush:[32,2,1,""],after_bulk_update:[32,2,1,""],after_commit:[32,2,1,""]},"sqlalchemy.sql.functions.localtimestamp":{identifier:[41,4,1,""],type:[41,4,1,""],name:[41,4,1,""]},"sqlalchemy.dialects.postgresql.ExcludeConstraint":{"__init__":[51,2,1,""]},"sqlalchemy.ext.instrumentation.InstrumentationManager":{initialize_instance_dict:[63,2,1,""],install_descriptor:[63,2,1,""],instrument_attribute:[63,2,1,""],uninstall_member:[63,2,1,""],dispose:[63,2,1,""],post_configure_attribute:[63,2,1,""],manage:[63,2,1,""],install_member:[63,2,1,""],uninstall_descriptor:[63,2,1,""],install_state:[63,2,1,""],instrument_collection_class:[63,2,1,""],state_getter:[63,2,1,""],remove_state:[63,2,1,""],get_instance_dict:[63,2,1,""],manager_getter:[63,2,1,""],dict_getter:[63,2,1,""]},"sqlalchemy.dialects.mysql.INTEGER":{"__init__":[5,2,1,""]},"sqlalchemy.sql.functions.current_timestamp":{identifier:[41,4,1,""],type:[41,4,1,""],name:[41,4,1,""]},"sqlalchemy.dialects.mysql.BIGINT":{"__init__":[5,2,1,""]},"sqlalchemy.events.PoolEvents":{reset:[50,2,1,""],first_connect:[50,2,1,""],checkin:[50,2,1,""],checkout:[50,2,1,""],connect:[50,2,1,""]},"sqlalchemy.dialects.mssql.MONEY":{"__init__":[9,2,1,""]},sqlalchemy:{engine:[26,0,1,""],exc:[17,0,1,""],create_engine:[48,3,1,""],engine_from_config:[48,3,1,""],interfaces:[68,0,1,""],orm:[21,0,1,""],types:[25,0,1,""],inspection:[13,0,1,""],pool:[14,0,1,""],schema:[52,0,1,""]},"sqlalchemy.orm.descriptor_props":{SynonymProperty:[31,1,1,""],CompositeProperty:[31,1,1,""]},"sqlalchemy.events.ConnectionEvents":{dbapi_error:[50,2,1,""],rollback:[50,2,1,""],commit_twophase:[50,2,1,""],before_execute:[50,2,1,""],release_savepoint:[50,2,1,""],savepoint:[50,2,1,""],prepare_twophase:[50,2,1,""],begin:[50,2,1,""],after_cursor_execute:[50,2,1,""],rollback_twophase:[50,2,1,""],commit:[50,2,1,""],after_execute:[50,2,1,""],rollback_savepoint:[50,2,1,""],before_cursor_execute:[50,2,1,""],begin_twophase:[50,2,1,""]},"sqlalchemy.dialects.drizzle":{REAL:[11,1,1,""],FLOAT:[11,1,1,""],VARCHAR:[11,1,1,""],DOUBLE:[11,1,1,""],TIMESTAMP:[11,1,1,""],DECIMAL:[11,1,1,""],ENUM:[11,1,1,""],BIGINT:[11,1,1,""],NUMERIC:[11,1,1,""],CHAR:[11,1,1,""],mysqldb:[11,0,1,""],base:[11,0,1,""],TEXT:[11,1,1,""],INTEGER:[11,1,1,""]},"sqlalchemy.events.DDLEvents":{after_create:[50,2,1,""],before_parent_attach:[50,2,1,""],after_drop:[50,2,1,""],after_parent_attach:[50,2,1,""],before_create:[50,2,1,""],before_drop:[50,2,1,""],column_reflect:[50,2,1,""]},"sqlalchemy.dialects.mssql.UNIQUEIDENTIFIER":{"__init__":[9,2,1,""]},"sqlalchemy.sql.functions.concat":{identifier:[41,4,1,""],type:[41,4,1,""],name:[41,4,1,""]},"sqlalchemy.orm.events.InstanceEvents":{load:[57,2,1,""],first_init:[57,2,1,""],init_failure:[57,2,1,""],refresh:[57,2,1,""],resurrect:[57,2,1,""],init:[57,2,1,""],unpickle:[57,2,1,""],expire:[57,2,1,""],pickle:[57,2,1,""]},"sqlalchemy.pool.QueuePool":{unique_connection:[14,2,1,""],connect:[14,2,1,""],"__init__":[14,2,1,""]},"sqlalchemy.dialects.mysql.BIT":{"__init__":[5,2,1,""]},"sqlalchemy.dialects.mssql.TINYINT":{"__init__":[9,2,1,""]},"sqlalchemy.orm.interfaces.PropComparator":{of_type:[31,2,1,""],has:[31,2,1,""],adapted:[31,2,1,""],any:[31,2,1,""]},"sqlalchemy.events":{PoolEvents:[50,1,1,""],ConnectionEvents:[50,1,1,""],SchemaEventTarget:[50,1,1,""],DDLEvents:[50,1,1,""]},"sqlalchemy.orm.interfaces.AttributeExtension":{set:[32,2,1,""],active_history:[32,4,1,""],append:[32,2,1,""],remove:[32,2,1,""]},"sqlalchemy.dialects.drizzle.FLOAT":{"__init__":[11,2,1,""]},"sqlalchemy.dialects.mssql.TEXT":{"__init__":[9,2,1,""]},"sqlalchemy.sql.expression":{ColumnClause:[30,1,1,""],Executable:[76,1,1,""],"false":[30,3,1,""],exists:[76,3,1,""],cast:[30,3,1,""],TableClause:[76,1,1,""],text:[30,3,1,""],over:[30,3,1,""],ColumnElement:[30,1,1,""],collate:[30,3,1,""],UpdateBase:[4,1,1,""],Label:[30,1,1,""],CTE:[76,1,1,""],and_:[30,3,1,""],ClauseList:[30,1,1,""],ClauseElement:[30,1,1,""],except_:[76,3,1,""],except_all:[76,3,1,""],"null":[30,3,1,""],extract:[30,3,1,""],select:[76,3,1,""],tuple_:[30,3,1,""],union_all:[76,3,1,""],ValuesBase:[4,1,1,""],SelectBase:[76,1,1,""],table:[76,3,1,""],Tuple:[30,1,1,""],union:[76,3,1,""],not_:[30,3,1,""],Update:[4,1,1,""],label:[30,3,1,""],distinct:[30,3,1,""],"case":[30,3,1,""],literal:[30,3,1,""],bindparam:[30,3,1,""],intersect:[76,3,1,""],between:[30,3,1,""],outerjoin:[76,3,1,""],Null:[30,1,1,""],nullsfirst:[30,3,1,""],Delete:[4,1,1,""],intersect_all:[76,3,1,""],outparam:[30,3,1,""],Case:[30,1,1,""],Insert:[4,1,1,""],Join:[76,1,1,""],type_coerce:[30,3,1,""],or_:[30,3,1,""],update:[4,3,1,""],asc:[30,3,1,""],Alias:[76,1,1,""],alias:[76,3,1,""],func:[30,7,1,""],join:[76,3,1,""],"true":[30,3,1,""],Selectable:[76,1,1,""],desc:[30,3,1,""],ScalarSelect:[76,1,1,""],insert:[4,3,1,""],nullslast:[30,3,1,""],UnaryExpression:[30,1,1,""],FromClause:[76,1,1,""],literal_column:[30,3,1,""],Cast:[30,1,1,""],column:[30,3,1,""],CompoundSelect:[76,1,1,""],Over:[30,1,1,""],False_:[30,1,1,""],ColumnCollection:[30,1,1,""],BindParameter:[30,1,1,""],True_:[30,1,1,""],subquery:[76,3,1,""],TextClause:[30,1,1,""],BinaryExpression:[30,1,1,""],Extract:[30,1,1,""],Select:[76,1,1,""],"delete":[4,3,1,""]},"sqlalchemy.dialects.mysql.DOUBLE":{"__init__":[5,2,1,""]},"sqlalchemy.dialects.postgresql.HSTORE":{comparator_factory:[51,1,1,""]},"sqlalchemy.sql.expression.UpdateBase":{bind:[4,4,1,""],params:[4,2,1,""],with_hint:[4,2,1,""],returning:[4,2,1,""]},"sqlalchemy.dialects.oracle.NCLOB":{"__init__":[66,2,1,""]},"sqlalchemy.dialects.drizzle.DECIMAL":{"__init__":[11,2,1,""]},"sqlalchemy.ext.mutable":{Mutable:[16,1,1,""],MutableComposite:[16,1,1,""],MutableBase:[16,1,1,""],MutableDict:[16,1,1,""]},"sqlalchemy.exc":{CircularDependencyError:[17,5,1,""],InternalError:[17,5,1,""],NoSuchColumnError:[17,5,1,""],DisconnectionError:[17,5,1,""],AmbiguousForeignKeysError:[17,5,1,""],NoInspectionAvailable:[17,5,1,""],SQLAlchemyError:[17,5,1,""],UnboundExecutionError:[17,5,1,""],UnsupportedCompilationError:[17,5,1,""],NoReferencedColumnError:[17,5,1,""],DontWrapMixin:[17,1,1,""],DataError:[17,5,1,""],TimeoutError:[17,5,1,""],IdentifierError:[17,5,1,""],NoReferencedTableError:[17,5,1,""],NotSupportedError:[17,5,1,""],IntegrityError:[17,5,1,""],CompileError:[17,5,1,""],StatementError:[17,5,1,""],SAPendingDeprecationWarning:[17,5,1,""],InvalidRequestError:[17,5,1,""],OperationalError:[17,5,1,""],SADeprecationWarning:[17,5,1,""],ResourceClosedError:[17,5,1,""],ProgrammingError:[17,5,1,""],SAWarning:[17,5,1,""],InterfaceError:[17,5,1,""],NoReferenceError:[17,5,1,""],DBAPIError:[17,5,1,""],DatabaseError:[17,5,1,""],ArgumentError:[17,5,1,""],NoForeignKeysError:[17,5,1,""],NoSuchTableError:[17,5,1,""]},"sqlalchemy.engine.interfaces.ExecutionContext":{lastrow_has_defaults:[27,2,1,""],create_cursor:[27,2,1,""],get_rowcount:[27,2,1,""],should_autocommit_text:[27,2,1,""],pre_exec:[27,2,1,""],post_exec:[27,2,1,""],result:[27,2,1,""],handle_dbapi_exception:[27,2,1,""]},"sqlalchemy.orm.properties":{ColumnProperty:[31,1,1,""],RelationshipProperty:[31,1,1,""]},"sqlalchemy.engine.Connectable":{contextual_connect:[26,2,1,""],execute:[26,2,1,""],create:[26,2,1,""],drop:[26,2,1,""],scalar:[26,2,1,""],connect:[26,2,1,""]},"sqlalchemy.engine.interfaces.Compiled":{execute:[27,2,1,""],sql_compiler:[27,4,1,""],construct_params:[27,2,1,""],compile:[27,2,1,""],scalar:[27,2,1,""],params:[27,4,1,""],"__init__":[27,2,1,""]},"sqlalchemy.types.SchemaType":{bind:[25,4,1,""],create:[25,2,1,""],drop:[25,2,1,""],copy:[25,2,1,""],adapt:[25,2,1,""]},"sqlalchemy.dialects.postgresql.ARRAY":{Comparator:[51,1,1,""],"__init__":[51,2,1,""]},"sqlalchemy.sql.expression.BindParameter":{compare:[30,2,1,""],shares_lineage:[30,2,1,""],"__ne__":[30,2,1,""],notin_:[30,2,1,""],endswith:[30,2,1,""],isnot:[30,2,1,""],"__lt__":[30,2,1,""],"__init__":[30,2,1,""],notilike:[30,2,1,""],effective_value:[30,4,1,""],in_:[30,2,1,""],distinct:[30,2,1,""],contains:[30,2,1,""],ilike:[30,2,1,""],get_children:[30,2,1,""],label:[30,2,1,""],notlike:[30,2,1,""],params:[30,2,1,""],collate:[30,2,1,""],between:[30,2,1,""],nullsfirst:[30,2,1,""],match:[30,2,1,""],startswith:[30,2,1,""],is_:[30,2,1,""],asc:[30,2,1,""],self_group:[30,2,1,""],"__eq__":[30,2,1,""],concat:[30,2,1,""],desc:[30,2,1,""],nullslast:[30,2,1,""],like:[30,2,1,""],unique_params:[30,2,1,""],compile:[30,2,1,""],"__le__":[30,2,1,""],op:[30,2,1,""],expression:[30,4,1,""],anon_label:[30,4,1,""]},"sqlalchemy.ext.hybrid":{hybrid_property:[64,1,1,""],HYBRID_METHOD:[64,7,1,""],HYBRID_PROPERTY:[64,7,1,""],Comparator:[64,1,1,""],hybrid_method:[64,1,1,""]},"sqlalchemy.schema.ForeignKeyConstraint":{"__init__":[2,2,1,""]},"sqlalchemy.interfaces.PoolListener":{first_connect:[68,2,1,""],checkin:[68,2,1,""],checkout:[68,2,1,""],connect:[68,2,1,""]},"sqlalchemy.dialects.drizzle.TEXT":{"__init__":[11,2,1,""]},"sqlalchemy.orm.session.Session":{add_all:[49,2,1,""],no_autoflush:[49,4,1,""],expunge_all:[49,2,1,""],scalar:[49,2,1,""],flush:[49,2,1,""],close:[49,2,1,""],query:[49,2,1,""],"__init__":[49,2,1,""],close_all:[49,6,1,""],prepare:[49,2,1,""],add:[49,2,1,""],"new":[49,4,1,""],begin:[49,2,1,""],rollback:[49,2,1,""],identity_key:[49,6,1,""],deleted:[49,4,1,""],is_active:[49,4,1,""],enable_relationship_loading:[49,2,1,""],execute:[49,2,1,""],is_modified:[49,2,1,""],identity_map:[49,4,1,""],expire:[49,2,1,""],bind_table:[49,2,1,""],object_session:[49,6,1,""],transaction:[49,4,1,""],bind_mapper:[49,2,1,""],prune:[49,2,1,""],expunge:[49,2,1,""],expire_all:[49,2,1,""],refresh:[49,2,1,""],merge:[49,2,1,""],connection:[49,2,1,""],dirty:[49,4,1,""],begin_nested:[49,2,1,""],commit:[49,2,1,""],get_bind:[49,2,1,""],"delete":[49,2,1,""]},"sqlalchemy.sql.functions.random":{identifier:[41,4,1,""],name:[41,4,1,""]},"sqlalchemy.engine.reflection.Inspector":{get_table_names:[0,2,1,""],get_foreign_keys:[0,2,1,""],get_indexes:[0,2,1,""],get_unique_constraints:[0,2,1,""],from_engine:[0,6,1,""],get_view_names:[0,2,1,""],get_view_definition:[0,2,1,""],get_table_options:[0,2,1,""],get_primary_keys:[0,2,1,""],get_pk_constraint:[0,2,1,""],default_schema_name:[0,4,1,""],reflecttable:[0,2,1,""],get_columns:[0,2,1,""],"__init__":[0,2,1,""],get_schema_names:[0,2,1,""]},"sqlalchemy.ext.declarative":{DeferredReflection:[72,1,1,""],as_declarative:[72,3,1,""],comparable_using:[72,3,1,""],synonym_for:[72,3,1,""],instrument_declarative:[72,3,1,""],AbstractConcreteBase:[72,1,1,""],has_inherited_table:[72,3,1,""],declarative_base:[72,3,1,""],declared_attr:[72,1,1,""],ConcreteBase:[72,1,1,""]},"sqlalchemy.dialects.mysql.LONGTEXT":{"__init__":[5,2,1,""]},"sqlalchemy.dialects.mssql.SMALLMONEY":{"__init__":[9,2,1,""]},"sqlalchemy.dialects.mysql.DECIMAL":{"__init__":[5,2,1,""]},"sqlalchemy.schema.MetaData":{is_bound:[55,2,1,""],bind:[55,4,1,""],clear:[55,2,1,""],remove:[55,2,1,""],reflect:[55,2,1,""],create_all:[55,2,1,""],sorted_tables:[55,4,1,""],append_ddl_listener:[55,2,1,""],"__init__":[55,2,1,""],drop_all:[55,2,1,""]},"sqlalchemy.orm.properties.RelationshipProperty":{table:[31,4,1,""],cascade:[31,4,1,""],Comparator:[31,1,1,""],mapper:[31,4,1,""]},"sqlalchemy.sql.expression.Select":{append_column:[76,2,1,""],compare:[76,2,1,""],order_by:[76,2,1,""],intersect_all:[76,2,1,""],offset:[76,2,1,""],except_all:[76,2,1,""],self_group:[76,2,1,""],scalar:[76,2,1,""],froms:[76,4,1,""],correlate_except:[76,2,1,""],except_:[76,2,1,""],append_from:[76,2,1,""],"__init__":[76,2,1,""],union_all:[76,2,1,""],execution_options:[76,2,1,""],primary_key:[76,4,1,""],apply_labels:[76,2,1,""],cte:[76,2,1,""],union:[76,2,1,""],select_from:[76,2,1,""],get_children:[76,2,1,""],label:[76,2,1,""],with_only_columns:[76,2,1,""],group_by:[76,2,1,""],params:[76,2,1,""],select:[76,2,1,""],intersect:[76,2,1,""],outerjoin:[76,2,1,""],reduce_columns:[76,2,1,""],replace_selectable:[76,2,1,""],columns:[76,4,1,""],distinct:[76,2,1,""],autocommit:[76,2,1,""],correspond_on_equivalents:[76,2,1,""],description:[76,4,1,""],execute:[76,2,1,""],append_prefix:[76,2,1,""],corresponding_column:[76,2,1,""],with_hint:[76,2,1,""],alias:[76,2,1,""],append_group_by:[76,2,1,""],append_order_by:[76,2,1,""],prefix_with:[76,2,1,""],count:[76,2,1,""],c:[76,4,1,""],join:[76,2,1,""],correlate:[76,2,1,""],unique_params:[76,2,1,""],column:[76,2,1,""],append_having:[76,2,1,""],append_correlation:[76,2,1,""],compile:[76,2,1,""],inner_columns:[76,4,1,""],foreign_keys:[76,4,1,""],limit:[76,2,1,""],append_whereclause:[76,2,1,""],locate_all_froms:[76,4,1,""],where:[76,2,1,""],having:[76,2,1,""],as_scalar:[76,2,1,""]},"sqlalchemy.ext.hybrid.hybrid_property":{setter:[64,2,1,""],expression:[64,2,1,""],"__init__":[64,2,1,""],deleter:[64,2,1,""],comparator:[64,2,1,""]},"sqlalchemy.dialects.mysql.DATETIME":{"__init__":[5,2,1,""]},"sqlalchemy.schema.SchemaItem":{info:[55,4,1,""],get_children:[55,2,1,""]},"sqlalchemy.sql.compiler.DDLCompiler":{execute:[27,2,1,""],define_constraint_remote_table:[27,2,1,""],compile:[27,2,1,""],scalar:[27,2,1,""],params:[27,4,1,""],"__init__":[27,2,1,""]},"sqlalchemy.types.Enum":{create:[25,2,1,""],drop:[25,2,1,""],"__init__":[25,2,1,""]},"sqlalchemy.types.UnicodeText":{"__init__":[25,2,1,""]},"sqlalchemy.sql.compiler":{DDLCompiler:[27,1,1,""],SQLCompiler:[27,1,1,""],IdentifierPreparer:[27,1,1,""]},"sqlalchemy.schema.DropSchema":{"__init__":[56,2,1,""]},"sqlalchemy.dialects.oracle.LONG":{"__init__":[66,2,1,""]},"sqlalchemy.dialects.sqlite":{DATE:[73,1,1,""],base:[73,0,1,""],TIME:[73,1,1,""],pysqlite:[73,0,1,""],DATETIME:[73,1,1,""]},"sqlalchemy.engine.TwoPhaseTransaction":{prepare:[26,2,1,""]},"sqlalchemy.dialects.mysql.BOOLEAN":{"__init__":[5,2,1,""]},"sqlalchemy.dialects.mysql.CHAR":{"__init__":[5,2,1,""]},"sqlalchemy.engine.ResultProxy":{fetchall:[26,2,1,""],last_updated_params:[26,2,1,""],is_insert:[26,4,1,""],keys:[26,2,1,""],fetchone:[26,2,1,""],supports_sane_rowcount:[26,2,1,""],returns_rows:[26,4,1,""],last_inserted_params:[26,2,1,""],lastrow_has_defaults:[26,2,1,""],prefetch_cols:[26,2,1,""],rowcount:[26,4,1,""],scalar:[26,2,1,""],supports_sane_multi_rowcount:[26,2,1,""],close:[26,2,1,""],lastrowid:[26,4,1,""],inserted_primary_key:[26,4,1,""],fetchmany:[26,2,1,""],postfetch_cols:[26,2,1,""],first:[26,2,1,""]},"sqlalchemy.dialects.drizzle.INTEGER":{"__init__":[11,2,1,""]},"sqlalchemy.sql.functions.user":{identifier:[41,4,1,""],type:[41,4,1,""],name:[41,4,1,""]},"sqlalchemy.dialects.mssql.SMALLDATETIME":{"__init__":[9,2,1,""]},"sqlalchemy.dialects.oracle.BFILE":{"__init__":[66,2,1,""]},"sqlalchemy.orm.instrumentation.ClassManager":{state_getter:[31,6,1,""],unregister:[31,2,1,""],manage:[31,2,1,""],dispose:[31,2,1,""],has_parent:[31,2,1,""],original_init:[31,4,1,""]},"sqlalchemy.sql.expression.Executable":{bind:[76,4,1,""],scalar:[76,2,1,""],execution_options:[76,2,1,""],execute:[76,2,1,""]},"sqlalchemy.schema.ThreadLocalMetaData":{is_bound:[55,2,1,""],dispose:[55,2,1,""],"__init__":[55,2,1,""],bind:[55,4,1,""]},"sqlalchemy.orm.properties.RelationshipProperty.Comparator":{mapper:[31,4,1,""],in_:[31,2,1,""],"__ne__":[31,2,1,""],adapted:[31,2,1,""],contains:[31,2,1,""],of_type:[31,2,1,""],has:[31,2,1,""],"__eq__":[31,2,1,""],any:[31,2,1,""],"__init__":[31,2,1,""]},"sqlalchemy.sql.expression.BinaryExpression":{compare:[30,2,1,""]},"sqlalchemy.sql.functions.session_user":{identifier:[41,4,1,""],type:[41,4,1,""],name:[41,4,1,""]},"sqlalchemy.dialects.mssql.CHAR":{"__init__":[9,2,1,""]},"sqlalchemy.dialects.mssql.NTEXT":{"__init__":[9,2,1,""]},"sqlalchemy.orm.exc":{ObjectDeletedError:[60,5,1,""],DetachedInstanceError:[60,5,1,""],MultipleResultsFound:[60,5,1,""],UnmappedClassError:[60,5,1,""],ObjectDereferencedError:[60,5,1,""],NoResultFound:[60,5,1,""],StaleDataError:[60,5,1,""],NO_STATE:[60,7,1,""],UnmappedInstanceError:[60,5,1,""],UnmappedColumnError:[60,5,1,""],FlushError:[60,5,1,""],UnmappedError:[60,5,1,""],ConcurrentModificationError:[60,4,1,""]},"sqlalchemy.types.Concatenable":{"__init__":[25,4,1,""]},"sqlalchemy.exc.StatementError":{params:[17,4,1,""],statement:[17,4,1,""],orig:[17,4,1,""]},"sqlalchemy.orm.state.AttributeState":{loaded_value:[31,4,1,""],value:[31,4,1,""],history:[31,4,1,""]},"sqlalchemy.types.DateTime":{"__init__":[25,2,1,""]},"sqlalchemy.schema.Sequence":{create:[10,2,1,""],drop:[10,2,1,""],next_value:[10,2,1,""],"__init__":[10,2,1,""]},"sqlalchemy.sql.compiler.IdentifierPreparer":{format_table:[27,2,1,""],quote_identifier:[27,2,1,""],format_column:[27,2,1,""],unformat_identifiers:[27,2,1,""],format_schema:[27,2,1,""],format_table_seq:[27,2,1,""],quote_schema:[27,2,1,""],"__init__":[27,2,1,""]},"sqlalchemy.orm.query.Query":{all:[37,2,1,""],select_entity_from:[37,2,1,""],exists:[37,2,1,""],intersect_all:[37,2,1,""],filter_by:[37,2,1,""],instances:[37,2,1,""],scalar:[37,2,1,""],from_statement:[37,2,1,""],value:[37,2,1,""],except_:[37,2,1,""],except_all:[37,2,1,""],select_from:[37,2,1,""],enable_assertions:[37,2,1,""],add_entity:[37,2,1,""],union_all:[37,2,1,""],autoflush:[37,2,1,""],enable_eagerloads:[37,2,1,""],slice:[37,2,1,""],execution_options:[37,2,1,""],cte:[37,2,1,""],union:[37,2,1,""],yield_per:[37,2,1,""],label:[37,2,1,""],with_entities:[37,2,1,""],whereclause:[37,4,1,""],group_by:[37,2,1,""],params:[37,2,1,""],intersect:[37,2,1,""],outerjoin:[37,2,1,""],limit:[37,2,1,""],with_polymorphic:[37,2,1,""],populate_existing:[37,2,1,""],with_transformation:[37,2,1,""],statement:[37,4,1,""],get:[37,2,1,""],update:[37,2,1,""],with_lockmode:[37,2,1,""],with_session:[37,2,1,""],add_column:[37,2,1,""],column_descriptions:[37,4,1,""],merge_result:[37,2,1,""],with_labels:[37,2,1,""],offset:[37,2,1,""],prefix_with:[37,2,1,""],selectable:[37,4,1,""],one:[37,2,1,""],count:[37,2,1,""],join:[37,2,1,""],correlate:[37,2,1,""],with_hint:[37,2,1,""],values:[37,2,1,""],having:[37,2,1,""],from_self:[37,2,1,""],add_columns:[37,2,1,""],reset_joinpoint:[37,2,1,""],filter:[37,2,1,""],distinct:[37,2,1,""],subquery:[37,2,1,""],with_parent:[37,2,1,""],first:[37,2,1,""],order_by:[37,2,1,""],options:[37,2,1,""],as_scalar:[37,2,1,""],"delete":[37,2,1,""]},"sqlalchemy.sql.functions.sum":{identifier:[41,4,1,""],name:[41,4,1,""]},"sqlalchemy.ext.serializer":{dumps:[15,3,1,""],Deserializer:[15,3,1,""],loads:[15,3,1,""],Serializer:[15,3,1,""]},"sqlalchemy.sql.compiler.SQLCompiler":{update_from_clause:[27,2,1,""],update_tables_clause:[27,2,1,""],update_limit_clause:[27,2,1,""],returning:[27,4,1,""],construct_params:[27,2,1,""],ansi_bind_rules:[27,4,1,""],isinsert:[27,4,1,""],returning_precedes_values:[27,4,1,""],get_select_precolumns:[27,2,1,""],escape_literal_column:[27,2,1,""],params:[27,4,1,""],render_literal_value:[27,2,1,""],isdelete:[27,4,1,""],default_from:[27,2,1,""],isupdate:[27,4,1,""],render_table_with_column_in_update_from:[27,4,1,""],"__init__":[27,2,1,""]},"sqlalchemy.dialects.mysql.TIME":{"__init__":[5,2,1,""]},"sqlalchemy.orm.state.InstanceState":{identity:[31,4,1,""],mapper:[31,4,1,""],has_identity:[31,4,1,""],identity_key:[31,4,1,""],unmodified_intersection:[31,2,1,""],unmodified:[31,4,1,""],object:[31,4,1,""],persistent:[31,4,1,""],"transient":[31,4,1,""],session:[31,4,1,""],unloaded:[31,4,1,""],expired_attributes:[31,4,1,""],"__call__":[31,2,1,""],detached:[31,4,1,""],pending:[31,4,1,""],attrs:[31,4,1,""]},"sqlalchemy.dialects.mysql.MEDIUMINT":{"__init__":[5,2,1,""]},"sqlalchemy.sql.functions.ReturnTypeFromArgs":{identifier:[41,4,1,""],name:[41,4,1,""]},"sqlalchemy.types.PickleType":{impl:[25,4,1,""],"__init__":[25,2,1,""]},"sqlalchemy.sql.expression.ValuesBase":{values:[4,2,1,""]},"sqlalchemy.orm.attributes":{flag_modified:[49,3,1,""],init_collection:[49,3,1,""],QueryableAttribute:[31,1,1,""],get_attribute:[49,3,1,""],set_committed_value:[49,3,1,""],InstrumentedAttribute:[31,1,1,""],set_attribute:[49,3,1,""],get_history:[49,3,1,""],del_attribute:[49,3,1,""],instance_state:[49,3,1,""],History:[49,1,1,""]},"sqlalchemy.orm.state":{AttributeState:[31,1,1,""],InstanceState:[31,1,1,""]},"sqlalchemy.sql.expression.ClauseElement":{compare:[30,2,1,""],unique_params:[30,2,1,""],get_children:[30,2,1,""],compile:[30,2,1,""],params:[30,2,1,""],self_group:[30,2,1,""]},"sqlalchemy.sql.expression.Insert":{execute:[4,2,1,""],execution_options:[4,2,1,""],unique_params:[4,2,1,""],values:[4,2,1,""],returning:[4,2,1,""],from_select:[4,2,1,""],compare:[4,2,1,""],compile:[4,2,1,""],with_hint:[4,2,1,""],scalar:[4,2,1,""],params:[4,2,1,""],bind:[4,4,1,""],self_group:[4,2,1,""],prefix_with:[4,2,1,""]},"sqlalchemy.sql.functions.GenericFunction":{coerce_arguments:[41,4,1,""],identifier:[41,4,1,""],name:[41,4,1,""]},"sqlalchemy.dialects.mssql.IMAGE":{"__init__":[9,2,1,""]},"sqlalchemy.engine.default.DefaultExecutionContext":{set_input_sizes:[27,2,1,""],get_result_processor:[27,2,1,""],get_lastrowid:[27,2,1,""]},"sqlalchemy.orm.events.SessionEvents":{before_attach:[57,2,1,""],after_bulk_delete:[57,2,1,""],before_flush:[57,2,1,""],after_begin:[57,2,1,""],after_flush_postexec:[57,2,1,""],before_commit:[57,2,1,""],after_transaction_create:[57,2,1,""],after_soft_rollback:[57,2,1,""],after_attach:[57,2,1,""],after_rollback:[57,2,1,""],after_transaction_end:[57,2,1,""],after_flush:[57,2,1,""],after_bulk_update:[57,2,1,""],after_commit:[57,2,1,""]},"sqlalchemy.sql.expression.ColumnCollection":{add:[30,2,1,""],replace:[30,2,1,""]},"sqlalchemy.dialects.mysql.NCHAR":{"__init__":[5,2,1,""]},"sqlalchemy.dialects.mysql.NUMERIC":{"__init__":[5,2,1,""]},"sqlalchemy.dialects.mysql.MEDIUMTEXT":{"__init__":[5,2,1,""]}},titleterms:{all:[0,58,46,65,28,34],code:70,partial:51,consider:58,chain:28,queri:[37,58,21,46,65,49,28,61,67,12,75],"__table_args__":28,prefix:[58,34,28],correl:[58,33,64],informix:6,subclass:[74,46,65],implement:[14,3,28],bind_to:65,dynamic_load:65,row:[58,21,46,33],privat:28,depend:[21,46,64],deferredreflect:58,sensit:[5,10],graph:42,send:28,program:46,present:28,passiv:3,sourc:8,onclaus:28,string:[25,46,73,28],fals:[74,46,67],adodbapi:9,util:49,mechan:58,pymysql:5,affect:28,recip:[25,28],magic:58,fromclaus:28,did:61,list:[58,21,42,61,46,43,28],scalar:[61,28,10,12,33,16],drop_al:[58,46],"try":46,item:[58,49],session:[58,61,46,65,49,28,72,32,57],refer:[61,64,43,16,46,28,29,72,33,63],round:25,direct:[72,42,28],sign:28,across:[58,28],pass:[46,28],innerjoin:67,further:[61,74,33],pyodbc:[5,9,22],useexist:28,compat:[5,9,73,66,28],index:[2,5,28,51,9,72],what:[58,65,8,67,28,49,36],sub:74,compar:[58,64],defin:[33,10,2,72,64],introspect:51,current:44,delet:[58,21,2,3,4,46,49,61,67,33],version:[61,42,5,28,70,33,34],succinct:28,pg8000:51,method:[58,61,65,49,28,70],metadata:[55,46,65,72,28],sybas:22,themselv:21,deriv:28,gener:[38,40,41,23,7,35,65,67,28,25,42,19],shouldn:46,coerc:[25,16],column_properti:[34,72],path:[21,28],noload:[3,67],modifi:29,implicit:[26,49],valu:[12,64,16,28],"0b4":19,unconsum:58,undefer_group:65,precis:66,datetim:73,orm:[37,20,46,2,19,23,38,62,35,7,58,67,8,60,53,31,40,32,28,57],contains_eag:[58,28],opinion:61,chang:[58,44,65,8,67,28,74,34],keep_exist:28,overrid:[0,25],via:[70,2,3,28],eagerload_al:[65,67,28],appli:[58,25],"__abstract__":72,offset:[9,33,66,28],ask:[46,49],unix:51,api:[2,10,12,13,14,26,29,34,16,36,21,43,45,46,47,48,49,55,56,63,64,65,67,28,25,70,72],uniqu:[46,2],instal:[70,28],coercion:58,unit:67,aggress:58,from:[58,21,64,61,46,65,49,51,72,34,76],describ:55,memori:73,distinct:[64,28],regist:26,two:[65,66,49,28],connector:5,concret:[72,75],call:46,bindparam:28,scope:49,type:[58,3,25,5,65,66,67,8,51,9,11,73,74,28,34],more:[58,61,65,28],sort:46,abspath:28,claus:[65,28],notic:51,enhanc:[58,67,28],warn:[58,28],appendix:24,indic:18,particular:58,known:[5,9],cach:[58,67,49,42],must:46,none:[46,67,28],join:[21,64,61,46,65,49,28,75,67,72,33,36],cymysql:5,augment:[25,72],alia:[58,65,28],setup:[70,28],work:[58,61,26,3,64,46,67,28],annot:3,descriptor:34,can:[58,46,49,28],typedecor:[25,65,28],assignmapp:65,fetch:67,control:[56,75,49,72],defer:[34,72],sqlite:[58,38,40,19,23,7,35,46,48,28,73],process:[25,26],lock:[59,73],registr:29,shard:[45,65,42],accept:[58,65,8,67,28],liter:61,recycl:14,rollback:[58,46],serial:[51,15],nvarchar:28,paradigm:65,alwai:[58,46,28],multipl:[21,46,28,72,73,33,34],hoc:49,turn:[46,28],get:[20,46,65,49,28],of_typ:58,how:[21,46,49],criterion:49,instead:58,simpl:[34,49],updat:[58,21,2,4,65,66,49,51,10,67,33,34],map:[61,42,46,65,8,28,34,75],product:71,referenc:65,max:28,after:58,"7p1":23,reflect:[0,5,65,66,67,72,34],befor:58,dynamicmetadata:65,mixin:72,date:[58,9,73],googl:5,underscor:28,philosophi:61,data:[25,5,66,51,9,11,73],hstore:[58,51],bind:[25,9,33,65,28],counter:34,explicit:[61,36,28],element:[74,30,67,28],issu:[5,9,46],alias:[61,33,65,28],type_:65,combin:72,allow:28,subtyp:75,callabl:28,eagerload:[61,67,28],comparable_properti:28,order:[46,9,33,43],get_primary_kei:58,composit:[21,34,65,28,12,16],cx_oracl:66,repair:58,offici:67,move:[65,8],zen:36,boundmetadata:65,isnt:46,through:[55,28],hierarchi:75,still:46,dynam:[67,3,42],paramet:[33,65,28],style:5,outer:46,psycopg2:51,fix:[58,65],better:65,platform:[58,70,67],window:[33,28],pend:58,persist:42,alter:[55,46,2],non:[61,28],"return":[61,46,67,28,59,51],zxjdbc:[5,51,9,66],python:[58,22,5,46,10,11,70],auto:[58,65,66,28,9,73],savepoint:[65,49],supersed:28,initi:34,disabl:58,apply_label:58,after_attach:58,compound:67,now:[58,65,67,28],group:33,introduct:[58,28],subqueri:[58,61,64,65,67,28,33],name:[58,55,34,67,28],changelog:[1,38,39,40,19,23,7,35],level:[58,25,5,51,9,73,34],refresh:49,mode:[9,67,49],timeout:5,fulli:58,unicod:[22,25,5,66,67,28,51,9,73],side:10,compil:[25,74],domain:51,dialect:[58,20,26,67,28,59,74,71],replac:[61,25],realli:46,drizzl:11,connect:[59,46,22,61,5,6,26,65,66,68,50,51,9,8,11,73,28,14,33,48],year:58,event:[58,65,68,50,29,14,28,32,57],special:72,out:65,joinedload:[58,61,46,67],subqueryload:58,foolproof:65,content:18,rel:28,merg:[49,28],correct:46,integr:42,proxi:[12,72,28],insid:28,advanc:75,migrat:[44,55,70],hang:46,standard:25,extend_exist:28,standalon:28,base:[3,65,49,28,25,72,12],dictionari:[12,3,42],postgi:42,log:[44,46,48,67,28,51],or_:46,thread:[49,73],instrumentationev:58,filter:[61,46,49],thing:65,length:[5,28],place:[34,28],"4p2":7,"4p1":7,app:5,onto:65,assign:28,frequent:[46,49],first:[65,28],oper:[58,61,25,46,65,28,51,33,34],major:8,isolation_level:28,onc:0,arrai:[58,28],refin:28,misc:[38,40,19,23,7,35],done:28,messag:46,open:67,primari:[21,46,75,67,28],given:46,rewritten:[58,28],associ:[12,21,58,42,72],system:[58,65,67,28],construct:[37,58,46,65,49,28,67,14,56,74],configure_mapp:28,statement:[51,33,47,36],scheme:34,listen:28,archictur:67,option:[5,65,51,9,55,34],relationship:[58,21,64,3,61,46,67,75,72,36],gotcha:67,specifi:[21,55],part:28,kind:[46,36],unmap:58,target:[58,13,29,28],keyword:[46,65,28],cyclic:65,remot:51,remov:[58,46,8,67,28],sqlsoup:[58,67],horizont:[45,65,49,42],becom:58,optimist:14,stricter:67,schema:[38,40,19,23,7,35,46,65,8,67,50,51,52,55],sai:46,pro:61,argument:[58,21,46,65,66,49,48,59,51,72,28],packag:58,pessimist:14,expir:49,have:[46,28],tabl:[0,21,61,5,46,65,67,28,51,75,72,73,55,33,34,18,76],need:3,incompat:28,dbapi:[22,5,6,46,66,48,59,51,9,11,73],engin:[38,46,40,19,5,35,26,65,48],single_par:58,self:[21,65],"switch":14,mix:72,exampl:[58,42,28,9,70,74],build:[28,21,64,61],which:[46,75],singl:[72,75],simplifi:[12,28],subtransact:49,"enum":67,normal:28,track:[16,34,28],object:[0,37,64,58,42,21,46,65,66,49,8,61,10,12,33,34,76],oracl:[38,40,19,23,7,35,65,66,48],most:[58,28],server_default:28,myisam:46,phase:[65,66,49],"class":[58,61,34,46,51,72,63,75],marshal:25,don:28,connectionless:26,secondaryjoin:46,url:48,doe:[58,46,49],declar:[58,61,40,19,23,7,35,46,67,28,72,2,75],snapshot:9,create_al:58,pattern:[21,61],microsoft:[9,48],serializ:73,text:33,inserted_primary_kei:28,concurr:[34,73],longer:[8,28],fine:0,setter:64,xml:42,access:[55,3,49,72],onli:[58,51,46],slow:46,synonym:[34,66,28],copyright:24,transact:[26,5,46,65,66,49,51,73,34],configur:[58,21,3,61,46,48,49,28,72,14,34],enough:46,should:28,experiment:71,outerjoin:28,local:49,kinterbasdb:59,polymorphic_on:28,distinctli:34,express:[58,33,64,10,56,46,47,49,67,30,65,72,74,15,34],autom:34,nativ:[67,73],made:58,"new":[58,61,26,65,8,49,28,25,67,12],requir:[8,28],dirti:58,mapper:[32,34,72,57,28],enabl:[21,9,74,49],eagerli:36,"public":28,provid:46,common:61,partit:49,contain:46,oursql:5,where:46,view:[0,46],"2p3":23,set:[2,3,42,5,46,14,33],foo_id:46,orphan:[58,61,28],mutabl:[21,16,67,28],full:61,result:[58,25,67],arg:[8,28],reserv:46,close:49,charact:5,polymorph:[65,42],statu:59,detect:[58,28],extend:8,databas:[0,46,48,28,70,73,55],label:[65,28],state:[61,49],between:[58,74],"import":[65,67,28],awai:46,entiti:61,approach:72,instrumentationmanag:58,speed:28,attribut:[58,64,42,46,49,28,72,32,34,57],altern:[58,21,63,49],bound:65,kei:[58,21,2,5,46,67,28,73,75],numer:[25,66],weak:65,conjunct:33,isol:[5,51,9,73],extens:[58,2,5,67,28,70,53,74,15],lazi:[46,67,36],distinguish:58,disconnect:14,addit:[9,66,49],last:28,plugin:28,against:[46,34],foreign:[5,21,46,2,73],etc:46,tutori:[61,33],grain:0,context:[58,10],improv:28,col:46,con:61,load:[21,61,46,65,67,75,34,36],point:21,guidelin:74,"_type_map":28,insist:46,loader:[3,36],pymssql:9,guid:[44,25,70],backend:[25,55],vertic:[49,42],creat:[21,2,61,46,65,49,25,72,55,33,75],eager:[21,61,65,67,36,75],union:[33,65,67],due:46,been:[58,46],json:25,trigger:[9,10,46],basic:[21,26,75],"0rc3":7,"0rc2":7,"0rc1":7,"__len__":46,"0rc4":7,fire:58,"__table__":72,ani:58,understand:26,rang:51,driver:73,execution_opt:28,those:46,"case":[5,58,66],determinist:65,multi:65,compile_mapp:28,raw:46,plain:[14,34],properti:[21,34,65],contextu:49,cursor:68,cast:5,"while":46,executemani:67,behavior:[58,64,46,65,66,67,28,59,9,73,34],error:[58,46],glossari:69,increment:[9,66,73],helper:72,"0beta5":23,readi:71,cluster:9,type_map:28,synopsi:[74,72],inspector:[0,58,67],"__init__":[61,46],parent:[46,28],nullpool:28,decor:3,"null":28,mutat:[16,34,28],"0beta2":[23,7],minim:61,make:[46,49],referenti:[21,65],cross:[51,74],same:28,member:28,handl:[21,9,14],instanc:[61,46,57],timestamp:74,sqlite3:73,document:[58,20,45,65,8,67,28,70,12,14],conflict:72,safe:49,twophasetransact:65,nest:[26,65,42],pysqlit:73,upon:46,capabl:67,temporari:73,mani:[21,46,72,67,61],extern:[65,71,49],joinedload_al:67,collat:[58,9],typic:46,techniqu:[3,36],redefin:[25,34],off:[46,28],older:44,quicki:49,parenthes:67,inherit:[75,65,67,42,72],honor:58,pickl:16,without:46,greatest:74,thi:[58,46,65,8,67,28],deferr:34,everyth:33,dogpil:[58,42],rout:36,identifi:66,execut:[26,65,68,50,51,9,10,28,33],hybrid:[64,34,72,28],expung:49,mysql:[58,38,40,19,23,7,5,35,46,48,28,11],correlate_except:58,pickletyp:28,languag:[52,33,67],previous:28,web:49,miscellan:67,before_attach:58,nullabl:9,percent:28,populate_exist:65,except:[58,46,28,17,60],dblink:66,instrument:[58,57,63,3,42],add:46,other:[46,48,67,28,72,33],overview:70,els:33,begin_nest:58,sessioncontext:65,modul:[58,65,28],match:33,"0beta4":23,applic:49,"0beta6":23,varchar:28,"0beta1":[23,7],mapperproperti:72,"0beta3":[23,7],agnost:25,is_modifi:58,know:61,bit:28,scopedsess:28,insert:[4,46,65,49,51,10,33],like:58,specif:[37,58,25,5,67,28,51,9,55,74,75],deprec:[58,65,8,67,68,32,28],arbitrari:34,and_:46,integ:28,server:[9,10,48,46],collect:[3,42,46,65,49,28,12,36],primaryjoin:46,cascad:[21,46,66,49,61],output:46,manag:49,drop:[55,46,2],deal:14,interv:51,creation:[12,61,48],"0b3":19,back:[58,61,46,49],resolv:72,intern:[27,3,31,28],previou:46,resultproxi:65,flush:[46,65,49,28],autocommit:[74,26,49],scale:65,informixdb:6,establish:16,definit:52,per:51,when:[58,46,2,49,28],larg:[3,42],select:[58,61,65,67,28,33,34,76],ddl:[74,56,28],condit:21,foo:46,nose:28,core:[58,20,27,54,28,50,17],get_pk_constraint:58,word:46,inspect:[58,13],usag:26,step:65,isn:46,nestedtransact:65,mssql:[38,40,19,23,7,35,9],beaker:[67,28],comparison:34,about:[58,46,65,8,67,28],postgresql:[58,40,19,7,35,48,28,51],firebird:[38,40,19,23,7,35,59],constraint:[51,2],column:[0,58,10,16,46,28,25,30,72,55,34],varbinari:28,rowcount:5,constructor:[34,72,28],commit:[46,65,49],produc:46,block:26,everi:46,subset:34,mutabletyp:58,backref:[21,49],encod:25,easy_instal:70,automat:[46,65],upgrad:46,myobject:46,empti:58,sessionmak:49,column_reflect:58,storag:5,automag:46,with_polymorph:58,lob:66,"6beta2":40,"6beta3":40,wai:[21,46],"6beta1":40,support:[58,22,5,6,46,66,67,48,59,51,9,70,11,73,16],question:[46,49],transform:64,textual:46,custom:[21,64,3,56,46,65,48,49,25,72,14,33,34,74],avail:[13,14],start:20,reli:46,interfac:[32,68],includ:[9,71],"function":[2,41,46,65,28,10,14,33,74],form:28,tupl:28,sqlalchemi:[58,20,62,46,65,8,54,28,70,67],link:21,renam:[65,8,28],inlin:28,"true":[67,28],exc:58,count:[61,33,34,28],utc:74,fdb:59,"0b1":[35,19],"__declare_last__":72,consist:[67,28],"0b2":[35,19],"default":[61,65,8,28,10,74,36],limit:[0,5,46,66,28,9,33],embed:[65,49],strategi:[21,26,36,49],similar:46,emit:28,superclass:58,gone:46,featur:[5,58,28],evalu:28,classic:34,certain:[46,49],dure:46,threadloc:[26,8],utilit:49,incomplet:71,intro:49,exist:[61,74,49,25],file:28,pip:70,ing:33,check:[70,61,33,2,28],again:[46,65],mutual:21,in_:[46,65],quot:5,tip:49,virtual:28,vendor:25,valid:34,test:46,you:61,roll:[58,61,46,49],runtim:13,relat:[21,42,61,65,8,67,36],mxodbc:[9,22],sequenc:[51,10,56,65],why:46,ansi:5,insensit:58,adjac:[21,42],sql:[5,9,10,19,33,34,38,40,41,23,7,35,46,47,48,49,50,58,61,65,28,25,72,74],pool:[5,46,65,48,68,50,8,73,14,28],faster:65,anywher:58,space:28,descript:61,rule:74,time:[58,9,73,28],escap:28,backward:28}})SQLAlchemy-0.8.4/examples/0000755000076500000240000000000012251151573016051 5ustar classicstaff00000000000000SQLAlchemy-0.8.4/examples/__init__.py0000644000076500000240000000000012251147171020147 0ustar classicstaff00000000000000SQLAlchemy-0.8.4/examples/adjacency_list/0000755000076500000240000000000012251151573021025 5ustar classicstaff00000000000000SQLAlchemy-0.8.4/examples/adjacency_list/__init__.py0000644000076500000240000000040012251150015023116 0ustar classicstaff00000000000000""" An example of a dictionary-of-dictionaries structure mapped using an adjacency list model. E.g.:: node = TreeNode('rootnode') node.append('node1') node.append('node3') session.add(node) session.commit() dump_tree(node) """ SQLAlchemy-0.8.4/examples/adjacency_list/adjacency_list.py0000644000076500000240000000736012251150015024347 0ustar classicstaff00000000000000from sqlalchemy import MetaData, Table, Column, Sequence, ForeignKey,\ Integer, String, create_engine from sqlalchemy.orm import sessionmaker, relationship, backref,\ joinedload_all from sqlalchemy.ext.declarative import declarative_base from sqlalchemy.orm.collections import attribute_mapped_collection Base = declarative_base() class TreeNode(Base): __tablename__ = 'tree' id = Column(Integer, primary_key=True) parent_id = Column(Integer, ForeignKey(id)) name = Column(String(50), nullable=False) children = relationship("TreeNode", # cascade deletions cascade="all", # many to one + adjacency list - remote_side # is required to reference the 'remote' # column in the join condition. backref=backref("parent", remote_side=id), # children will be represented as a dictionary # on the "name" attribute. collection_class=attribute_mapped_collection('name'), ) def __init__(self, name, parent=None): self.name = name self.parent = parent def __repr__(self): return "TreeNode(name=%r, id=%r, parent_id=%r)" % ( self.name, self.id, self.parent_id ) def dump(self, _indent=0): return " " * _indent + repr(self) + \ "\n" + \ "".join([ c.dump(_indent +1) for c in self.children.values()] ) if __name__ == '__main__': engine = create_engine('sqlite://', echo=True) def msg(msg, *args): msg = msg % args print "\n\n\n" + "-" * len(msg.split("\n")[0]) print msg print "-" * len(msg.split("\n")[0]) msg("Creating Tree Table:") Base.metadata.create_all(engine) # session. using expire_on_commit=False # so that the session's contents are not expired # after each transaction commit. session = sessionmaker(engine, expire_on_commit=False)() node = TreeNode('rootnode') TreeNode('node1', parent=node) TreeNode('node3', parent=node) node2 = TreeNode('node2') TreeNode('subnode1', parent=node2) node.children['node2'] = node2 TreeNode('subnode2', parent=node.children['node2']) msg("Created new tree structure:\n%s", node.dump()) msg("flush + commit:") session.add(node) session.commit() msg("Tree After Save:\n %s", node.dump()) TreeNode('node4', parent=node) TreeNode('subnode3', parent=node.children['node4']) TreeNode('subnode4', parent=node.children['node4']) TreeNode('subsubnode1', parent=node.children['node4'].children['subnode3']) # mark node1 as deleted and remove session.delete(node.children['node1']) msg("Removed node1. flush + commit:") session.commit() # expire the "children" collection so that # it reflects the deletion of "node1". session.expire(node, ['children']) msg("Tree after save:\n %s", node.dump()) msg("Emptying out the session entirely, " "selecting tree on root, using eager loading to join four levels deep.") session.expunge_all() node = session.query(TreeNode).\ options(joinedload_all("children", "children", "children", "children")).\ filter(TreeNode.name=="rootnode").\ first() msg("Full Tree:\n%s", node.dump()) msg( "Marking root node as deleted, flush + commit:" ) session.delete(node) session.commit() SQLAlchemy-0.8.4/examples/association/0000755000076500000240000000000012251151573020365 5ustar classicstaff00000000000000SQLAlchemy-0.8.4/examples/association/__init__.py0000644000076500000240000000163212251150015022466 0ustar classicstaff00000000000000""" Examples illustrating the usage of the "association object" pattern, where an intermediary class mediates the relationship between two classes that are associated in a many-to-many pattern. This directory includes the following examples: * basic_association.py - illustrate a many-to-many relationship between an "Order" and a collection of "Item" objects, associating a purchase price with each via an association object called "OrderItem" * proxied_association.py - same example as basic_association, adding in usage of :mod:`sqlalchemy.ext.associationproxy` to make explicit references to "OrderItem" optional. * dict_of_sets_with_default.py - an advanced association proxy example which illustrates nesting of association proxies to produce multi-level Python collections, in this case a dictionary with string keys and sets of integers as values, which conceal the underlying mapped classes. """SQLAlchemy-0.8.4/examples/association/basic_association.py0000644000076500000240000000602012251150015024400 0ustar classicstaff00000000000000"""A basic example of using the association object pattern. The association object pattern is a form of many-to-many which associates additional data with each association between parent/child. The example illustrates an "order", referencing a collection of "items", with a particular price paid associated with each "item". """ from datetime import datetime from sqlalchemy import (create_engine, MetaData, Table, Column, Integer, String, DateTime, Float, ForeignKey, and_) from sqlalchemy.orm import mapper, relationship, Session from sqlalchemy.ext.declarative import declarative_base Base = declarative_base() class Order(Base): __tablename__ = 'order' order_id = Column(Integer, primary_key=True) customer_name = Column(String(30), nullable=False) order_date = Column(DateTime, nullable=False, default=datetime.now()) order_items = relationship("OrderItem", cascade="all, delete-orphan", backref='order') def __init__(self, customer_name): self.customer_name = customer_name class Item(Base): __tablename__ = 'item' item_id = Column(Integer, primary_key=True) description = Column(String(30), nullable=False) price = Column(Float, nullable=False) def __init__(self, description, price): self.description = description self.price = price def __repr__(self): return 'Item(%r, %r)' % ( self.description, self.price ) class OrderItem(Base): __tablename__ = 'orderitem' order_id = Column(Integer, ForeignKey('order.order_id'), primary_key=True) item_id = Column(Integer, ForeignKey('item.item_id'), primary_key=True) price = Column(Float, nullable=False) def __init__(self, item, price=None): self.item = item self.price = price or item.price item = relationship(Item, lazy='joined') if __name__ == '__main__': engine = create_engine('sqlite://') Base.metadata.create_all(engine) session = Session(engine) # create catalog tshirt, mug, hat, crowbar = ( Item('SA T-Shirt', 10.99), Item('SA Mug', 6.50), Item('SA Hat', 8.99), Item('MySQL Crowbar', 16.99) ) session.add_all([tshirt, mug, hat, crowbar]) session.commit() # create an order order = Order('john smith') # add three OrderItem associations to the Order and save order.order_items.append(OrderItem(mug)) order.order_items.append(OrderItem(crowbar, 10.99)) order.order_items.append(OrderItem(hat)) session.add(order) session.commit() # query the order, print items order = session.query(Order).filter_by(customer_name='john smith').one() print [(order_item.item.description, order_item.price) for order_item in order.order_items] # print customers who bought 'MySQL Crowbar' on sale q = session.query(Order).join('order_items', 'item') q = q.filter(and_(Item.description == 'MySQL Crowbar', Item.price > OrderItem.price)) print [order.customer_name for order in q] SQLAlchemy-0.8.4/examples/association/dict_of_sets_with_default.py0000644000076500000240000000477712251150015026150 0ustar classicstaff00000000000000"""Illustrate a 'dict of sets of integers' model. This is a three table model which represents a parent table referencing a dictionary of string keys and sets as values, where each set stores a collection of integers. The association proxy extension is used to hide the details of this persistence. The dictionary also generates new collections upon access of a non-existent key, in the same manner as Python's "collections.defaultdict" object. """ from sqlalchemy import String, Integer, Column, create_engine, ForeignKey from sqlalchemy.orm import relationship, Session from sqlalchemy.orm.collections import MappedCollection from sqlalchemy.ext.declarative import declarative_base from sqlalchemy.ext.associationproxy import association_proxy import operator class Base(object): id = Column(Integer, primary_key=True) Base = declarative_base(cls=Base) class GenDefaultCollection(MappedCollection): def __missing__(self, key): self[key] = b = B(key) return b class A(Base): __tablename__ = "a" associations = relationship("B", collection_class=lambda: GenDefaultCollection(operator.attrgetter("key")) ) collections = association_proxy("associations", "values") """Bridge the association from 'associations' over to the 'values' association proxy of B. """ class B(Base): __tablename__ = "b" a_id = Column(Integer, ForeignKey("a.id"), nullable=False) elements = relationship("C", collection_class=set) key = Column(String) values = association_proxy("elements", "value") """Bridge the association from 'elements' over to the 'value' element of C.""" def __init__(self, key, values=None): self.key = key if values: self.values = values class C(Base): __tablename__ = "c" b_id = Column(Integer, ForeignKey("b.id"), nullable=False) value = Column(Integer) def __init__(self, value): self.value = value if __name__ == '__main__': engine = create_engine('sqlite://', echo=True) Base.metadata.create_all(engine) session = Session(engine) # only "A" is referenced explicitly. Using "collections", # we deal with a dict of key/sets of integers directly. session.add_all([ A(collections={ "1": set([1, 2, 3]), }) ]) session.commit() a1 = session.query(A).first() print a1.collections["1"] a1.collections["1"].add(4) session.commit() a1.collections["2"].update([7, 8, 9]) session.commit() print a1.collections["2"] SQLAlchemy-0.8.4/examples/association/proxied_association.py0000644000076500000240000000627612251150015025006 0ustar classicstaff00000000000000"""An extension to the basic_association.py example, which illustrates the usage of sqlalchemy.ext.associationproxy. """ from datetime import datetime from sqlalchemy import (create_engine, MetaData, Table, Column, Integer, String, DateTime, Float, ForeignKey, and_) from sqlalchemy.orm import mapper, relationship, Session from sqlalchemy.ext.declarative import declarative_base from sqlalchemy.ext.associationproxy import association_proxy Base = declarative_base() class Order(Base): __tablename__ = 'order' order_id = Column(Integer, primary_key=True) customer_name = Column(String(30), nullable=False) order_date = Column(DateTime, nullable=False, default=datetime.now()) order_items = relationship("OrderItem", cascade="all, delete-orphan", backref='order') items = association_proxy("order_items", "item") def __init__(self, customer_name): self.customer_name = customer_name class Item(Base): __tablename__ = 'item' item_id = Column(Integer, primary_key=True) description = Column(String(30), nullable=False) price = Column(Float, nullable=False) def __init__(self, description, price): self.description = description self.price = price def __repr__(self): return 'Item(%r, %r)' % ( self.description, self.price ) class OrderItem(Base): __tablename__ = 'orderitem' order_id = Column(Integer, ForeignKey('order.order_id'), primary_key=True) item_id = Column(Integer, ForeignKey('item.item_id'), primary_key=True) price = Column(Float, nullable=False) def __init__(self, item, price=None): self.item = item self.price = price or item.price item = relationship(Item, lazy='joined') if __name__ == '__main__': engine = create_engine('sqlite://') Base.metadata.create_all(engine) session = Session(engine) # create catalog tshirt, mug, hat, crowbar = ( Item('SA T-Shirt', 10.99), Item('SA Mug', 6.50), Item('SA Hat', 8.99), Item('MySQL Crowbar', 16.99) ) session.add_all([tshirt, mug, hat, crowbar]) session.commit() # create an order order = Order('john smith') # add items via the association proxy. # the OrderItem is created automatically. order.items.append(mug) order.items.append(hat) # add an OrderItem explicitly. order.order_items.append(OrderItem(crowbar, 10.99)) session.add(order) session.commit() # query the order, print items order = session.query(Order).filter_by(customer_name='john smith').one() # print items based on the OrderItem collection directly print [(assoc.item.description, assoc.price, assoc.item.price) for assoc in order.order_items] # print items based on the "proxied" items collection print [(item.description, item.price) for item in order.items] # print customers who bought 'MySQL Crowbar' on sale orders = session.query(Order).\ join('order_items', 'item').\ filter(Item.description == 'MySQL Crowbar').\ filter(Item.price > OrderItem.price) print [o.customer_name for o in orders] SQLAlchemy-0.8.4/examples/custom_attributes/0000755000076500000240000000000012251151573021631 5ustar classicstaff00000000000000SQLAlchemy-0.8.4/examples/custom_attributes/__init__.py0000644000076500000240000000144712251150015023736 0ustar classicstaff00000000000000""" Two examples illustrating modifications to SQLAlchemy's attribute management system. ``listen_for_events.py`` illustrates the usage of :class:`~sqlalchemy.orm.interfaces.AttributeExtension` to intercept attribute events. It additionally illustrates a way to automatically attach these listeners to all class attributes using a :class:`.InstrumentationManager`. ``custom_management.py`` illustrates much deeper usage of :class:`.InstrumentationManager` as well as collection adaptation, to completely change the underlying method used to store state on an object. This example was developed to illustrate techniques which would be used by other third party object instrumentation systems to interact with SQLAlchemy's event system and is only intended for very intricate framework integrations. """SQLAlchemy-0.8.4/examples/custom_attributes/custom_management.py0000644000076500000240000000573412251147171025721 0ustar classicstaff00000000000000"""Illustrates customized class instrumentation, using the :mod:`sqlalchemy.ext.instrumentation` extension package. In this example, mapped classes are modified to store their state in a dictionary attached to an attribute named "_goofy_dict", instead of using __dict__. this example illustrates how to replace SQLAlchemy's class descriptors with a user-defined system. """ from sqlalchemy import create_engine, MetaData, Table, Column, Integer, Text,\ ForeignKey from sqlalchemy.orm import mapper, relationship, Session from sqlalchemy.orm.attributes import set_attribute, get_attribute, \ del_attribute from sqlalchemy.orm.instrumentation import is_instrumented from sqlalchemy.ext.instrumentation import InstrumentationManager class MyClassState(InstrumentationManager): def get_instance_dict(self, class_, instance): return instance._goofy_dict def initialize_instance_dict(self, class_, instance): instance.__dict__['_goofy_dict'] = {} def install_state(self, class_, instance, state): instance.__dict__['_goofy_dict']['state'] = state def state_getter(self, class_): def find(instance): return instance.__dict__['_goofy_dict']['state'] return find class MyClass(object): __sa_instrumentation_manager__ = MyClassState def __init__(self, **kwargs): for k in kwargs: setattr(self, k, kwargs[k]) def __getattr__(self, key): if is_instrumented(self, key): return get_attribute(self, key) else: try: return self._goofy_dict[key] except KeyError: raise AttributeError(key) def __setattr__(self, key, value): if is_instrumented(self, key): set_attribute(self, key, value) else: self._goofy_dict[key] = value def __delattr__(self, key): if is_instrumented(self, key): del_attribute(self, key) else: del self._goofy_dict[key] if __name__ == '__main__': engine = create_engine('sqlite://') meta = MetaData() table1 = Table('table1', meta, Column('id', Integer, primary_key=True), Column('name', Text)) table2 = Table('table2', meta, Column('id', Integer, primary_key=True), Column('name', Text), Column('t1id', Integer, ForeignKey('table1.id'))) meta.create_all(engine) class A(MyClass): pass class B(MyClass): pass mapper(A, table1, properties={ 'bs': relationship(B) }) mapper(B, table2) a1 = A(name='a1', bs=[B(name='b1'), B(name='b2')]) assert a1.name == 'a1' assert a1.bs[0].name == 'b1' sess = Session(engine) sess.add(a1) sess.commit() a1 = sess.query(A).get(a1.id) assert a1.name == 'a1' assert a1.bs[0].name == 'b1' a1.bs.remove(a1.bs[0]) sess.commit() a1 = sess.query(A).get(a1.id) assert len(a1.bs) == 1 SQLAlchemy-0.8.4/examples/custom_attributes/listen_for_events.py0000644000076500000240000000400212251150015025715 0ustar classicstaff00000000000000""" Illustrates how to attach events to all instrumented attributes and listen for change events. """ from sqlalchemy import event def configure_listener(class_, key, inst): def append(instance, value, initiator): instance.receive_change_event("append", key, value, None) def remove(instance, value, initiator): instance.receive_change_event("remove", key, value, None) def set_(instance, value, oldvalue, initiator): instance.receive_change_event("set", key, value, oldvalue) event.listen(inst, 'append', append) event.listen(inst, 'remove', remove) event.listen(inst, 'set', set_) if __name__ == '__main__': from sqlalchemy import Column, Integer, String, ForeignKey from sqlalchemy.orm import relationship from sqlalchemy.ext.declarative import declarative_base class Base(object): def receive_change_event(self, verb, key, value, oldvalue): s = "Value '%s' %s on attribute '%s', " % (value, verb, key) if oldvalue: s += "which replaced the value '%s', " % oldvalue s += "on object %s" % self print s Base = declarative_base(cls=Base) event.listen(Base, 'attribute_instrument', configure_listener) class MyMappedClass(Base): __tablename__ = "mytable" id = Column(Integer, primary_key=True) data = Column(String(50)) related_id = Column(Integer, ForeignKey("related.id")) related = relationship("Related", backref="mapped") def __str__(self): return "MyMappedClass(data=%r)" % self.data class Related(Base): __tablename__ = "related" id = Column(Integer, primary_key=True) data = Column(String(50)) def __str__(self): return "Related(data=%r)" % self.data # classes are instrumented. Demonstrate the events ! m1 = MyMappedClass(data='m1', related=Related(data='r1')) m1.data = 'm1mod' m1.related.mapped.append(MyMappedClass(data='m2')) del m1.data SQLAlchemy-0.8.4/examples/dogpile_caching/0000755000076500000240000000000012251151573021150 5ustar classicstaff00000000000000SQLAlchemy-0.8.4/examples/dogpile_caching/__init__.py0000644000076500000240000000577712251150015023267 0ustar classicstaff00000000000000""" Illustrates how to embed `dogpile.cache `_ functionality within the :class:`.Query` object, allowing full cache control as well as the ability to pull "lazy loaded" attributes from long term cache as well. .. versionchanged:: 0.8 The example was modernized to use dogpile.cache, replacing Beaker as the caching library in use. In this demo, the following techniques are illustrated: * Using custom subclasses of :class:`.Query` * Basic technique of circumventing Query to pull from a custom cache source instead of the database. * Rudimental caching with dogpile.cache, using "regions" which allow global control over a fixed set of configurations. * Using custom :class:`.MapperOption` objects to configure options on a Query, including the ability to invoke the options deep within an object graph when lazy loads occur. E.g.:: # query for Person objects, specifying cache q = Session.query(Person).options(FromCache("default")) # specify that each Person's "addresses" collection comes from # cache too q = q.options(RelationshipCache(Person.addresses, "default")) # query print q.all() To run, both SQLAlchemy and dogpile.cache must be installed or on the current PYTHONPATH. The demo will create a local directory for datafiles, insert initial data, and run. Running the demo a second time will utilize the cache files already present, and exactly one SQL statement against two tables will be emitted - the displayed result however will utilize dozens of lazyloads that all pull from cache. The demo scripts themselves, in order of complexity, are run as Python modules so that relative imports work:: python -m examples.dogpile_caching.helloworld python -m examples.dogpile_caching.relationship_caching python -m examples.dogpile_caching.advanced python -m examples.dogpile_caching.local_session_caching Listing of files: environment.py - Establish the Session, a dictionary of "regions", a sample cache region against a .dbm file, data / cache file paths, and configurations, bootstrap fixture data if necessary. caching_query.py - Represent functions and classes which allow the usage of Dogpile caching with SQLAlchemy. Introduces a query option called FromCache. model.py - The datamodel, which represents Person that has multiple Address objects, each with PostalCode, City, Country fixture_data.py - creates demo PostalCode, Address, Person objects in the database. helloworld.py - the basic idea. relationship_caching.py - Illustrates how to add cache options on relationship endpoints, so that lazyloads load from cache. advanced.py - Further examples of how to use FromCache. Combines techniques from the first two scripts. local_session_caching.py - Grok everything so far ? This example creates a new dogpile.cache backend that will persist data in a dictionary which is local to the current session. remove() the session and the cache is gone. """ SQLAlchemy-0.8.4/examples/dogpile_caching/advanced.py0000644000076500000240000000557112251150015023265 0ustar classicstaff00000000000000"""advanced.py Illustrate usage of Query combined with the FromCache option, including front-end loading, cache invalidation and collection caching. """ from .environment import Session from .model import Person, cache_address_bits from .caching_query import FromCache, RelationshipCache def load_name_range(start, end, invalidate=False): """Load Person objects on a range of names. start/end are integers, range is then "person " - "person ". The cache option we set up is called "name_range", indicating a range of names for the Person class. The `Person.addresses` collections are also cached. Its basically another level of tuning here, as that particular cache option can be transparently replaced with joinedload(Person.addresses). The effect is that each Person and their Address collection is cached either together or separately, affecting the kind of SQL that emits for unloaded Person objects as well as the distribution of data within the cache. """ q = Session.query(Person).\ filter(Person.name.between("person %.2d" % start, "person %.2d" % end)).\ options(cache_address_bits).\ options(FromCache("default", "name_range")) # have the "addresses" collection cached separately # each lazyload of Person.addresses loads from cache. q = q.options(RelationshipCache(Person.addresses, "default")) # alternatively, eagerly load the "addresses" collection, so that they'd # be cached together. This issues a bigger SQL statement and caches # a single, larger value in the cache per person rather than two # separate ones. #q = q.options(joinedload(Person.addresses)) # if requested, invalidate the cache on current criterion. if invalidate: q.invalidate() return q.all() print "two through twelve, possibly from cache:\n" print ", ".join([p.name for p in load_name_range(2, 12)]) print "\ntwenty five through forty, possibly from cache:\n" print ", ".join([p.name for p in load_name_range(25, 40)]) # loading them again, no SQL is emitted print "\ntwo through twelve, from the cache:\n" print ", ".join([p.name for p in load_name_range(2, 12)]) # but with invalidate, they are print "\ntwenty five through forty, invalidate first:\n" print ", ".join([p.name for p in load_name_range(25, 40, True)]) # illustrate the address loading from either cache/already # on the Person print "\n\nPeople plus addresses, two through twelve, addresses possibly from cache" for p in load_name_range(2, 12): print p.format_full() # illustrate the address loading from either cache/already # on the Person print "\n\nPeople plus addresses, two through twelve, addresses from cache" for p in load_name_range(2, 12): print p.format_full() print "\n\nIf this was the first run of advanced.py, try "\ "a second run. Only one SQL statement will be emitted." SQLAlchemy-0.8.4/examples/dogpile_caching/caching_query.py0000644000076500000240000002047312251150015024337 0ustar classicstaff00000000000000"""caching_query.py Represent persistence structures which allow the usage of dogpile.cache caching with SQLAlchemy. The three new concepts introduced here are: * CachingQuery - a Query subclass that caches and retrieves results in/from dogpile.cache. * FromCache - a query option that establishes caching parameters on a Query * RelationshipCache - a variant of FromCache which is specific to a query invoked during a lazy load. * _params_from_query - extracts value parameters from a Query. The rest of what's here are standard SQLAlchemy and dogpile.cache constructs. """ from sqlalchemy.orm.interfaces import MapperOption from sqlalchemy.orm.query import Query from sqlalchemy.sql import visitors from dogpile.cache.api import NO_VALUE class CachingQuery(Query): """A Query subclass which optionally loads full results from a dogpile cache region. The CachingQuery optionally stores additional state that allows it to consult a dogpile.cache cache before accessing the database, in the form of a FromCache or RelationshipCache object. Each of these objects refer to the name of a :class:`dogpile.cache.Region` that's been configured and stored in a lookup dictionary. When such an object has associated itself with the CachingQuery, the corresponding :class:`dogpile.cache.Region` is used to locate a cached result. If none is present, then the Query is invoked normally, the results being cached. The FromCache and RelationshipCache mapper options below represent the "public" method of configuring this state upon the CachingQuery. """ def __init__(self, regions, *args, **kw): self.cache_regions = regions Query.__init__(self, *args, **kw) def __iter__(self): """override __iter__ to pull results from dogpile if particular attributes have been configured. Note that this approach does *not* detach the loaded objects from the current session. If the cache backend is an in-process cache (like "memory") and lives beyond the scope of the current session's transaction, those objects may be expired. The method here can be modified to first expunge() each loaded item from the current session before returning the list of items, so that the items in the cache are not the same ones in the current Session. """ if hasattr(self, '_cache_region'): return self.get_value(createfunc=lambda: list(Query.__iter__(self))) else: return Query.__iter__(self) def _get_cache_plus_key(self): """Return a cache region plus key.""" dogpile_region = self.cache_regions[self._cache_region.region] if self._cache_region.cache_key: key = self._cache_region.cache_key else: key = _key_from_query(self) return dogpile_region, key def invalidate(self): """Invalidate the cache value represented by this Query.""" dogpile_region, cache_key = self._get_cache_plus_key() dogpile_region.delete(cache_key) def get_value(self, merge=True, createfunc=None, expiration_time=None, ignore_expiration=False): """Return the value from the cache for this query. Raise KeyError if no value present and no createfunc specified. """ dogpile_region, cache_key = self._get_cache_plus_key() # ignore_expiration means, if the value is in the cache # but is expired, return it anyway. This doesn't make sense # with createfunc, which says, if the value is expired, generate # a new value. assert not ignore_expiration or not createfunc, \ "Can't ignore expiration and also provide createfunc" if ignore_expiration or not createfunc: cached_value = dogpile_region.get(cache_key, expiration_time=expiration_time, ignore_expiration=ignore_expiration) else: cached_value = dogpile_region.get_or_create( cache_key, createfunc, expiration_time=expiration_time ) if cached_value is NO_VALUE: raise KeyError(cache_key) if merge: cached_value = self.merge_result(cached_value, load=False) return cached_value def set_value(self, value): """Set the value in the cache for this query.""" dogpile_region, cache_key = self._get_cache_plus_key() dogpile_region.set(cache_key, value) def query_callable(regions, query_cls=CachingQuery): def query(*arg, **kw): return query_cls(regions, *arg, **kw) return query def _key_from_query(query, qualifier=None): """Given a Query, create a cache key. There are many approaches to this; here we use the simplest, which is to create an md5 hash of the text of the SQL statement, combined with stringified versions of all the bound parameters within it. There's a bit of a performance hit with compiling out "query.statement" here; other approaches include setting up an explicit cache key with a particular Query, then combining that with the bound parameter values. """ stmt = query.with_labels().statement compiled = stmt.compile() params = compiled.params # here we return the key as a long string. our "key mangler" # set up with the region will boil it down to an md5. return " ".join( [unicode(compiled)] + [unicode(params[k]) for k in sorted(params)]) class FromCache(MapperOption): """Specifies that a Query should load results from a cache.""" propagate_to_loaders = False def __init__(self, region="default", cache_key=None): """Construct a new FromCache. :param region: the cache region. Should be a region configured in the dictionary of dogpile regions. :param cache_key: optional. A string cache key that will serve as the key to the query. Use this if your query has a huge amount of parameters (such as when using in_()) which correspond more simply to some other identifier. """ self.region = region self.cache_key = cache_key def process_query(self, query): """Process a Query during normal loading operation.""" query._cache_region = self class RelationshipCache(MapperOption): """Specifies that a Query as called within a "lazy load" should load results from a cache.""" propagate_to_loaders = True def __init__(self, attribute, region="default", cache_key=None): """Construct a new RelationshipCache. :param attribute: A Class.attribute which indicates a particular class relationship() whose lazy loader should be pulled from the cache. :param region: name of the cache region. :param cache_key: optional. A string cache key that will serve as the key to the query, bypassing the usual means of forming a key from the Query itself. """ self.region = region self.cache_key = cache_key self._relationship_options = { (attribute.property.parent.class_, attribute.property.key): self } def process_query_conditionally(self, query): """Process a Query that is used within a lazy loader. (the process_query_conditionally() method is a SQLAlchemy hook invoked only within lazyload.) """ if query._current_path: mapper, prop = query._current_path[-2:] key = prop.key for cls in mapper.class_.__mro__: if (cls, key) in self._relationship_options: relationship_option = self._relationship_options[(cls, key)] query._cache_region = relationship_option break def and_(self, option): """Chain another RelationshipCache option to this one. While many RelationshipCache objects can be specified on a single Query separately, chaining them together allows for a more efficient lookup during load. """ self._relationship_options.update(option._relationship_options) return self SQLAlchemy-0.8.4/examples/dogpile_caching/environment.py0000644000076500000240000000451412251150015024060 0ustar classicstaff00000000000000"""environment.py Establish data / cache file paths, and configurations, bootstrap fixture data if necessary. """ import caching_query from sqlalchemy import create_engine from sqlalchemy.orm import scoped_session, sessionmaker from sqlalchemy.ext.declarative import declarative_base from dogpile.cache.region import make_region import os from hashlib import md5 import sys py2k = sys.version_info < (3, 0) if py2k: input = raw_input # dogpile cache regions. A home base for cache configurations. regions = {} # scoped_session. Apply our custom CachingQuery class to it, # using a callable that will associate the dictionary # of regions with the Query. Session = scoped_session( sessionmaker( query_cls=caching_query.query_callable(regions) ) ) # global declarative base class. Base = declarative_base() root = "./dogpile_data/" if not os.path.exists(root): raw_input("Will create datafiles in %r.\n" "To reset the cache + database, delete this directory.\n" "Press enter to continue.\n" % root ) os.makedirs(root) dbfile = os.path.join(root, "dogpile_demo.db") engine = create_engine('sqlite:///%s' % dbfile, echo=True) Session.configure(bind=engine) def md5_key_mangler(key): """Receive cache keys as long concatenated strings; distill them into an md5 hash. """ return md5(key.encode('ascii')).hexdigest() # configure the "default" cache region. regions['default'] = make_region( # the "dbm" backend needs # string-encoded keys key_mangler=md5_key_mangler ).configure( # using type 'file' to illustrate # serialized persistence. Normally # memcached or similar is a better choice # for caching. 'dogpile.cache.dbm', expiration_time=3600, arguments={ "filename": os.path.join(root, "cache.dbm") } ) # optional; call invalidate() on the region # once created so that all data is fresh when # the app is restarted. Good for development, # on a production system needs to be used carefully # regions['default'].invalidate() installed = False def bootstrap(): global installed import fixture_data if not os.path.exists(dbfile): fixture_data.install() installed = TrueSQLAlchemy-0.8.4/examples/dogpile_caching/fixture_data.py0000644000076500000240000000321412251150015024167 0ustar classicstaff00000000000000"""fixture_data.py Installs some sample data. Here we have a handful of postal codes for a few US/ Canadian cities. Then, 100 Person records are installed, each with a randomly selected postal code. """ from environment import Session, Base from model import City, Country, PostalCode, Person, Address import random def install(): Base.metadata.create_all(Session().bind) data = [ ('Chicago', 'United States', ('60601', '60602', '60603', '60604')), ('Montreal', 'Canada', ('H2S 3K9', 'H2B 1V4', 'H7G 2T8')), ('Edmonton', 'Canada', ('T5J 1R9', 'T5J 1Z4', 'T5H 1P6')), ('New York', 'United States', ('10001', '10002', '10003', '10004', '10005', '10006')), ('San Francisco', 'United States', ('94102', '94103', '94104', '94105', '94107', '94108')) ] countries = {} all_post_codes = [] for city, country, postcodes in data: try: country = countries[country] except KeyError: countries[country] = country = Country(country) city = City(city, country) pc = [PostalCode(code, city) for code in postcodes] Session.add_all(pc) all_post_codes.extend(pc) for i in xrange(1, 51): person = Person( "person %.2d" % i, Address( street="street %.2d" % i, postal_code=all_post_codes[ random.randint(0, len(all_post_codes) - 1)] ) ) Session.add(person) Session.commit() # start the demo fresh Session.remove()SQLAlchemy-0.8.4/examples/dogpile_caching/helloworld.py0000644000076500000240000000471112251150015023666 0ustar classicstaff00000000000000"""helloworld.py Illustrate how to load some data, and cache the results. """ from environment import Session from model import Person from caching_query import FromCache # load Person objects. cache the result in the "default" cache region print("loading people....") people = Session.query(Person).options(FromCache("default")).all() # remove the Session. next query starts from scratch. Session.remove() # load again, using the same FromCache option. now they're cached, # so no SQL is emitted. print("loading people....again!") people = Session.query(Person).options(FromCache("default")).all() # Specifying a different query produces a different cache key, so # these results are independently cached. print("loading people two through twelve") people_two_through_twelve = Session.query(Person).\ options(FromCache("default")).\ filter(Person.name.between("person 02", "person 12")).\ all() # the data is cached under string structure of the SQL statement, *plus* # the bind parameters of the query. So this query, having # different literal parameters under "Person.name.between()" than the # previous one, issues new SQL... print("loading people five through fifteen") people_five_through_fifteen = Session.query(Person).\ options(FromCache("default")).\ filter(Person.name.between("person 05", "person 15")).\ all() # ... but using the same params as are already cached, no SQL print("loading people two through twelve...again!") people_two_through_twelve = Session.query(Person).\ options(FromCache("default")).\ filter(Person.name.between("person 02", "person 12")).\ all() # invalidate the cache for the three queries we've done. Recreate # each Query, which includes at the very least the same FromCache, # same list of objects to be loaded, and the same parameters in the # same order, then call invalidate(). print("invalidating everything") Session.query(Person).options(FromCache("default")).invalidate() Session.query(Person).\ options(FromCache("default")).\ filter(Person.name.between("person 02", "person 12")).invalidate() Session.query(Person).\ options(FromCache("default", "people_on_range")).\ filter(Person.name.between("person 05", "person 15")).invalidate() SQLAlchemy-0.8.4/examples/dogpile_caching/local_session_caching.py0000644000076500000240000000614012251150015026022 0ustar classicstaff00000000000000"""local_session_caching.py Create a new Dogpile cache backend that will store cached data local to the current Session. This is an advanced example which assumes familiarity with the basic operation of CachingQuery. """ from dogpile.cache.api import CacheBackend, NO_VALUE from dogpile.cache.region import register_backend class ScopedSessionBackend(CacheBackend): """A dogpile backend which will cache objects locally on the current session. When used with the query_cache system, the effect is that the objects in the cache are the same as that within the session - the merge() is a formality that doesn't actually create a second instance. This makes it safe to use for updates of data from an identity perspective (still not ideal for deletes though). When the session is removed, the cache is gone too, so the cache is automatically disposed upon session.remove(). """ def __init__(self, arguments): self.scoped_session = arguments['scoped_session'] def get(self, key): return self._cache_dictionary.get(key, NO_VALUE) def set(self, key, value): self._cache_dictionary[key] = value def delete(self, key): self._cache_dictionary.pop(key, None) @property def _cache_dictionary(self): """Return the cache dictionary linked to the current Session.""" sess = self.scoped_session() try: cache_dict = sess._cache_dictionary except AttributeError: sess._cache_dictionary = cache_dict = {} return cache_dict register_backend("sqlalchemy.session", __name__, "ScopedSessionBackend") if __name__ == '__main__': from environment import Session, regions from caching_query import FromCache from dogpile.cache import make_region # set up a region based on the ScopedSessionBackend, # pointing to the scoped_session declared in the example # environment. regions['local_session'] = make_region().configure( 'sqlalchemy.session', arguments={ "scoped_session": Session } ) from model import Person # query to load Person by name, with criterion # of "person 10" q = Session.query(Person).\ options(FromCache("local_session")).\ filter(Person.name == "person 10") # load from DB person10 = q.one() # next call, the query is cached. person10 = q.one() # clear out the Session. The "_cache_dictionary" dictionary # disappears with it. Session.remove() # query calls from DB again person10 = q.one() # identity is preserved - person10 is the *same* object that's # ultimately inside the cache. So it is safe to manipulate # the not-queried-for attributes of objects when using such a # cache without the need to invalidate - however, any change # that would change the results of a cached query, such as # inserts, deletes, or modification to attributes that are # part of query criterion, still require careful invalidation. cache, key = q._get_cache_plus_key() assert person10 is cache.get(key)[0] SQLAlchemy-0.8.4/examples/dogpile_caching/model.py0000644000076500000240000000577112251150015022622 0ustar classicstaff00000000000000"""Model. We are modeling Person objects with a collection of Address objects. Each Address has a PostalCode, which in turn references a City and then a Country: Person --(1..n)--> Address Address --(has a)--> PostalCode PostalCode --(has a)--> City City --(has a)--> Country """ from sqlalchemy import Column, Integer, String, ForeignKey from sqlalchemy.orm import relationship from caching_query import FromCache, RelationshipCache from environment import Base, bootstrap class Country(Base): __tablename__ = 'country' id = Column(Integer, primary_key=True) name = Column(String(100), nullable=False) def __init__(self, name): self.name = name class City(Base): __tablename__ = 'city' id = Column(Integer, primary_key=True) name = Column(String(100), nullable=False) country_id = Column(Integer, ForeignKey('country.id'), nullable=False) country = relationship(Country) def __init__(self, name, country): self.name = name self.country = country class PostalCode(Base): __tablename__ = 'postal_code' id = Column(Integer, primary_key=True) code = Column(String(10), nullable=False) city_id = Column(Integer, ForeignKey('city.id'), nullable=False) city = relationship(City) @property def country(self): return self.city.country def __init__(self, code, city): self.code = code self.city = city class Address(Base): __tablename__ = 'address' id = Column(Integer, primary_key=True) person_id = Column(Integer, ForeignKey('person.id'), nullable=False) street = Column(String(200), nullable=False) postal_code_id = Column(Integer, ForeignKey('postal_code.id')) postal_code = relationship(PostalCode) @property def city(self): return self.postal_code.city @property def country(self): return self.postal_code.country def __str__(self): return "%s\t"\ "%s, %s\t"\ "%s" % (self.street, self.city.name, self.postal_code.code, self.country.name) class Person(Base): __tablename__ = 'person' id = Column(Integer, primary_key=True) name = Column(String(100), nullable=False) addresses = relationship(Address, collection_class=set) def __init__(self, name, *addresses): self.name = name self.addresses = set(addresses) def __str__(self): return self.name def __repr__(self): return "Person(name=%r)" % self.name def format_full(self): return "\t".join([str(x) for x in [self] + list(self.addresses)]) # Caching options. A set of three RelationshipCache options # which can be applied to Query(), causing the "lazy load" # of these attributes to be loaded from cache. cache_address_bits = RelationshipCache(PostalCode.city, "default").\ and_( RelationshipCache(City.country, "default") ).and_( RelationshipCache(Address.postal_code, "default") ) bootstrap()SQLAlchemy-0.8.4/examples/dogpile_caching/relation_caching.py0000644000076500000240000000165612251150015025011 0ustar classicstaff00000000000000"""relationship_caching.py Load a set of Person and Address objects, specifying that related PostalCode, City, Country objects should be pulled from long term cache. """ from environment import Session, root from model import Person, cache_address_bits from sqlalchemy.orm import joinedload import os for p in Session.query(Person).options(joinedload(Person.addresses), cache_address_bits): print p.format_full() print "\n\nIf this was the first run of relationship_caching.py, SQL was likely emitted to "\ "load postal codes, cities, countries.\n"\ "If run a second time, assuming the cache is still valid, "\ "only a single SQL statement will run - all "\ "related data is pulled from cache.\n"\ "To clear the cache, delete the file %r. \n"\ "This will cause a re-load of cities, postal codes and countries on "\ "the next run.\n"\ % os.path.join(root, 'cache.dbm') SQLAlchemy-0.8.4/examples/dynamic_dict/0000755000076500000240000000000012251151573020500 5ustar classicstaff00000000000000SQLAlchemy-0.8.4/examples/dynamic_dict/__init__.py0000644000076500000240000000034712251150015022603 0ustar classicstaff00000000000000""" Illustrates how to place a dictionary-like facade on top of a "dynamic" relation, so that dictionary operations (assuming simple string keys) can operate upon a large collection without loading the full collection at once. """SQLAlchemy-0.8.4/examples/dynamic_dict/dynamic_dict.py0000644000076500000240000000473112251150015023474 0ustar classicstaff00000000000000class ProxyDict(object): def __init__(self, parent, collection_name, childclass, keyname): self.parent = parent self.collection_name = collection_name self.childclass = childclass self.keyname = keyname @property def collection(self): return getattr(self.parent, self.collection_name) def keys(self): descriptor = getattr(self.childclass, self.keyname) return [x[0] for x in self.collection.values(descriptor)] def __getitem__(self, key): x = self.collection.filter_by(**{self.keyname:key}).first() if x: return x else: raise KeyError(key) def __setitem__(self, key, value): try: existing = self[key] self.collection.remove(existing) except KeyError: pass self.collection.append(value) from sqlalchemy.ext.declarative import declarative_base from sqlalchemy import create_engine, Column, Integer, String, ForeignKey from sqlalchemy.orm import sessionmaker, relationship engine = create_engine('sqlite://', echo=True) Base = declarative_base(engine) class Parent(Base): __tablename__ = 'parent' id = Column(Integer, primary_key=True) name = Column(String(50)) _collection = relationship("Child", lazy="dynamic", cascade="all, delete-orphan") @property def child_map(self): return ProxyDict(self, '_collection', Child, 'key') class Child(Base): __tablename__ = 'child' id = Column(Integer, primary_key=True) key = Column(String(50)) parent_id = Column(Integer, ForeignKey('parent.id')) def __repr__(self): return "Child(key=%r)" % self.key Base.metadata.create_all() sess = sessionmaker()() p1 = Parent(name='p1') sess.add(p1) print "\n---------begin setting nodes, autoflush occurs\n" p1.child_map['k1'] = Child(key='k1') p1.child_map['k2'] = Child(key='k2') # this will autoflush the current map. # ['k1', 'k2'] print "\n---------print keys - flushes first\n" print p1.child_map.keys() # k1 print "\n---------print 'k1' node\n" print p1.child_map['k1'] print "\n---------update 'k2' node - must find existing, and replace\n" p1.child_map['k2'] = Child(key='k2') print "\n---------print 'k2' key - flushes first\n" # k2 print p1.child_map['k2'] print "\n---------print all child nodes\n" # [k1, k2b] print sess.query(Child).all() SQLAlchemy-0.8.4/examples/elementtree/0000755000076500000240000000000012251151573020362 5ustar classicstaff00000000000000SQLAlchemy-0.8.4/examples/elementtree/__init__.py0000644000076500000240000000314112251150015022460 0ustar classicstaff00000000000000""" Illustrates three strategies for persisting and querying XML documents as represented by ElementTree in a relational database. The techniques do not apply any mappings to the ElementTree objects directly, so are compatible with the native cElementTree as well as lxml, and can be adapted to suit any kind of DOM representation system. Querying along xpath-like strings is illustrated as well. In order of complexity: * ``pickle.py`` - Quick and dirty, serialize the whole DOM into a BLOB column. While the example is very brief, it has very limited functionality. * ``adjacency_list.py`` - Each DOM node is stored in an individual table row, with attributes represented in a separate table. The nodes are associated in a hierarchy using an adjacency list structure. A query function is introduced which can search for nodes along any path with a given structure of attributes, basically a (very narrow) subset of xpath. * ``optimized_al.py`` - Uses the same strategy as ``adjacency_list.py``, but associates each DOM row with its owning document row, so that a full document of DOM nodes can be loaded using O(1) queries - the construction of the "hierarchy" is performed after the load in a non-recursive fashion and is much more efficient. E.g.:: # parse an XML file and persist in the database doc = ElementTree.parse("test.xml") session.add(Document(file, doc)) session.commit() # locate documents with a certain path/attribute structure for document in find_document('/somefile/header/field2[@attr=foo]'): # dump the XML print document """SQLAlchemy-0.8.4/examples/elementtree/adjacency_list.py0000644000076500000240000001661012251150015023702 0ustar classicstaff00000000000000"""illustrates an explicit way to persist an XML document expressed using ElementTree. This example explicitly marshals/unmarshals the ElementTree document into mapped entities which have their own tables. Compare to pickle.py which uses pickle to accomplish the same task. Note that the usage of both styles of persistence are identical, as is the structure of the main Document class. """ ################################# PART I - Imports/Coniguration #################################### from sqlalchemy import (MetaData, Table, Column, Integer, String, ForeignKey, Unicode, and_, create_engine) from sqlalchemy.orm import mapper, relationship, Session, lazyload import sys, os, StringIO, re from xml.etree import ElementTree e = create_engine('sqlite://') meta = MetaData() ################################# PART II - Table Metadata ######################################### # stores a top level record of an XML document. documents = Table('documents', meta, Column('document_id', Integer, primary_key=True), Column('filename', String(30), unique=True), Column('element_id', Integer, ForeignKey('elements.element_id')) ) # stores XML nodes in an adjacency list model. This corresponds to # Element and SubElement objects. elements = Table('elements', meta, Column('element_id', Integer, primary_key=True), Column('parent_id', Integer, ForeignKey('elements.element_id')), Column('tag', Unicode(30), nullable=False), Column('text', Unicode), Column('tail', Unicode) ) # stores attributes. This corresponds to the dictionary of attributes # stored by an Element or SubElement. attributes = Table('attributes', meta, Column('element_id', Integer, ForeignKey('elements.element_id'), primary_key=True), Column('name', Unicode(100), nullable=False, primary_key=True), Column('value', Unicode(255))) meta.create_all(e) #################################### PART III - Model ############################################# # our document class. contains a string name, # and the ElementTree root element. class Document(object): def __init__(self, name, element): self.filename = name self.element = element def __str__(self): buf = StringIO.StringIO() self.element.write(buf) return buf.getvalue() #################################### PART IV - Persistence Mapping ################################# # Node class. a non-public class which will represent # the DB-persisted Element/SubElement object. We cannot create mappers for # ElementTree elements directly because they are at the very least not new-style # classes, and also may be backed by native implementations. # so here we construct an adapter. class _Node(object): pass # Attribute class. also internal, this will represent the key/value attributes stored for # a particular Node. class _Attribute(object): def __init__(self, name, value): self.name = name self.value = value # setup mappers. Document will eagerly load a list of _Node objects. mapper(Document, documents, properties={ '_root':relationship(_Node, lazy='joined', cascade="all") }) mapper(_Node, elements, properties={ 'children':relationship(_Node, cascade="all"), # eagerly load attributes 'attributes':relationship(_Attribute, lazy='joined', cascade="all, delete-orphan"), }) mapper(_Attribute, attributes) # define marshalling functions that convert from _Node/_Attribute to/from ElementTree objects. # this will set the ElementTree element as "document._element", and append the root _Node # object to the "_root" mapped collection. class ElementTreeMarshal(object): def __get__(self, document, owner): if document is None: return self if hasattr(document, '_element'): return document._element def traverse(node, parent=None): if parent is not None: elem = ElementTree.SubElement(parent, node.tag) else: elem = ElementTree.Element(node.tag) elem.text = node.text elem.tail = node.tail for attr in node.attributes: elem.attrib[attr.name] = attr.value for child in node.children: traverse(child, parent=elem) return elem document._element = ElementTree.ElementTree(traverse(document._root)) return document._element def __set__(self, document, element): def traverse(node): n = _Node() n.tag = unicode(node.tag) n.text = unicode(node.text) n.tail = unicode(node.tail) n.children = [traverse(n2) for n2 in node] n.attributes = [_Attribute(unicode(k), unicode(v)) for k, v in node.attrib.iteritems()] return n document._root = traverse(element.getroot()) document._element = element def __delete__(self, document): del document._element document._root = [] # override Document's "element" attribute with the marshaller. Document.element = ElementTreeMarshal() ########################################### PART V - Basic Persistence Example ##################### line = "\n--------------------------------------------------------" # save to DB session = Session(e) # get ElementTree documents for file in ('test.xml', 'test2.xml', 'test3.xml'): filename = os.path.join(os.path.dirname(__file__), file) doc = ElementTree.parse(filename) session.add(Document(file, doc)) print "\nSaving three documents...", line session.commit() print "Done." print "\nFull text of document 'text.xml':", line document = session.query(Document).filter_by(filename="test.xml").first() print document ############################################ PART VI - Searching for Paths ######################### # manually search for a document which contains "/somefile/header/field1:hi" d = session.query(Document).join('_root', aliased=True).filter(_Node.tag==u'somefile').\ join('children', aliased=True, from_joinpoint=True).filter(_Node.tag==u'header').\ join('children', aliased=True, from_joinpoint=True).filter( and_(_Node.tag==u'field1', _Node.text==u'hi')).one() print d # generalize the above approach into an extremely impoverished xpath function: def find_document(path, compareto): j = documents prev_elements = None query = session.query(Document) attribute = '_root' for i, match in enumerate(re.finditer(r'/([\w_]+)(?:\[@([\w_]+)(?:=(.*))?\])?', path)): (token, attrname, attrvalue) = match.group(1, 2, 3) query = query.join(attribute, aliased=True, from_joinpoint=True).filter(_Node.tag==token) attribute = 'children' if attrname: if attrvalue: query = query.join('attributes', aliased=True, from_joinpoint=True).filter( and_(_Attribute.name==attrname, _Attribute.value==attrvalue)) else: query = query.join('attributes', aliased=True, from_joinpoint=True).filter( _Attribute.name==attrname) return query.options(lazyload('_root')).filter(_Node.text==compareto).all() for path, compareto in ( (u'/somefile/header/field1', u'hi'), (u'/somefile/field1', u'hi'), (u'/somefile/header/field2', u'there'), (u'/somefile/header/field2[@attr=foo]', u'there') ): print "\nDocuments containing '%s=%s':" % (path, compareto), line print [d.filename for d in find_document(path, compareto)] SQLAlchemy-0.8.4/examples/elementtree/optimized_al.py0000644000076500000240000001773512251150015023417 0ustar classicstaff00000000000000"""This script duplicates adjacency_list.py, but optimizes the loading of XML nodes to be based on a "flattened" datamodel. Any number of XML documents, each of arbitrary complexity, can be loaded in their entirety via a single query which joins on only three tables. """ ##################### PART I - Imports/Configuration ######################### from sqlalchemy import (MetaData, Table, Column, Integer, String, ForeignKey, Unicode, and_, create_engine) from sqlalchemy.orm import mapper, relationship, Session, lazyload import sys, os, StringIO, re from xml.etree import ElementTree e = create_engine('sqlite://', echo=True) meta = MetaData() ####################### PART II - Table Metadata ############################# # stores a top level record of an XML document. documents = Table('documents', meta, Column('document_id', Integer, primary_key=True), Column('filename', String(30), unique=True), ) # stores XML nodes in an adjacency list model. This corresponds to # Element and SubElement objects. elements = Table('elements', meta, Column('element_id', Integer, primary_key=True), Column('parent_id', Integer, ForeignKey('elements.element_id')), Column('document_id', Integer, ForeignKey('documents.document_id')), Column('tag', Unicode(30), nullable=False), Column('text', Unicode), Column('tail', Unicode) ) # stores attributes. This corresponds to the dictionary of attributes # stored by an Element or SubElement. attributes = Table('attributes', meta, Column('element_id', Integer, ForeignKey('elements.element_id'), primary_key=True), Column('name', Unicode(100), nullable=False, primary_key=True), Column('value', Unicode(255))) meta.create_all(e) ########################### PART III - Model ################################# # our document class. contains a string name, # and the ElementTree root element. class Document(object): def __init__(self, name, element): self.filename = name self.element = element def __str__(self): buf = StringIO.StringIO() self.element.write(buf) return buf.getvalue() ########################## PART IV - Persistence Mapping ##################### # Node class. a non-public class which will represent # the DB-persisted Element/SubElement object. We cannot create mappers for # ElementTree elements directly because they are at the very least not new-style # classes, and also may be backed by native implementations. # so here we construct an adapter. class _Node(object): pass # Attribute class. also internal, this will represent the key/value attributes stored for # a particular Node. class _Attribute(object): def __init__(self, name, value): self.name = name self.value = value # setup mappers. Document will eagerly load a list of _Node objects. # they will be ordered in primary key/insert order, so that we can reconstruct # an ElementTree structure from the list. mapper(Document, documents, properties={ '_nodes':relationship(_Node, lazy='joined', cascade="all, delete-orphan") }) # the _Node objects change the way they load so that a list of _Nodes will organize # themselves hierarchically using the ElementTreeMarshal. this depends on the ordering of # nodes being hierarchical as well; relationship() always applies at least ROWID/primary key # ordering to rows which will suffice. mapper(_Node, elements, properties={ 'children':relationship(_Node, lazy=None), # doesnt load; used only for the save relationship 'attributes':relationship(_Attribute, lazy='joined', cascade="all, delete-orphan"), # eagerly load attributes }) mapper(_Attribute, attributes) # define marshalling functions that convert from _Node/_Attribute to/from ElementTree objects. # this will set the ElementTree element as "document._element", and append the root _Node # object to the "_nodes" mapped collection. class ElementTreeMarshal(object): def __get__(self, document, owner): if document is None: return self if hasattr(document, '_element'): return document._element nodes = {} root = None for node in document._nodes: if node.parent_id is not None: parent = nodes[node.parent_id] elem = ElementTree.SubElement(parent, node.tag) nodes[node.element_id] = elem else: parent = None elem = root = ElementTree.Element(node.tag) nodes[node.element_id] = root for attr in node.attributes: elem.attrib[attr.name] = attr.value elem.text = node.text elem.tail = node.tail document._element = ElementTree.ElementTree(root) return document._element def __set__(self, document, element): def traverse(node): n = _Node() n.tag = unicode(node.tag) n.text = unicode(node.text) n.tail = unicode(node.tail) document._nodes.append(n) n.children = [traverse(n2) for n2 in node] n.attributes = [_Attribute(unicode(k), unicode(v)) for k, v in node.attrib.iteritems()] return n traverse(element.getroot()) document._element = element def __delete__(self, document): del document._element document._nodes = [] # override Document's "element" attribute with the marshaller. Document.element = ElementTreeMarshal() ###################### PART V - Basic Persistence Example #################### line = "\n--------------------------------------------------------" # save to DB session = Session(e) # get ElementTree documents for file in ('test.xml', 'test2.xml', 'test3.xml'): filename = os.path.join(os.path.dirname(__file__), file) doc = ElementTree.parse(filename) session.add(Document(file, doc)) print "\nSaving three documents...", line session.commit() print "Done." print "\nFull text of document 'text.xml':", line document = session.query(Document).filter_by(filename="test.xml").first() print document ######################## PART VI - Searching for Paths ####################### # manually search for a document which contains "/somefile/header/field1:hi" print "\nManual search for /somefile/header/field1=='hi':", line d = session.query(Document).join('_nodes', aliased=True).\ filter(and_(_Node.parent_id==None, _Node.tag==u'somefile')).\ join('children', aliased=True, from_joinpoint=True).\ filter(_Node.tag==u'header').\ join('children', aliased=True, from_joinpoint=True).\ filter(and_(_Node.tag==u'field1', _Node.text==u'hi')).\ one() print d # generalize the above approach into an extremely impoverished xpath function: def find_document(path, compareto): j = documents prev_elements = None query = session.query(Document) first = True for i, match in enumerate(re.finditer(r'/([\w_]+)(?:\[@([\w_]+)(?:=(.*))?\])?', path)): (token, attrname, attrvalue) = match.group(1, 2, 3) if first: query = query.join('_nodes', aliased=True).filter(_Node.parent_id==None) first = False else: query = query.join('children', aliased=True, from_joinpoint=True) query = query.filter(_Node.tag==token) if attrname: query = query.join('attributes', aliased=True, from_joinpoint=True) if attrvalue: query = query.filter(and_(_Attribute.name==attrname, _Attribute.value==attrvalue)) else: query = query.filter(_Attribute.name==attrname) return query.options(lazyload('_nodes')).filter(_Node.text==compareto).all() for path, compareto in ( (u'/somefile/header/field1', u'hi'), (u'/somefile/field1', u'hi'), (u'/somefile/header/field2', u'there'), (u'/somefile/header/field2[@attr=foo]', u'there') ): print "\nDocuments containing '%s=%s':" % (path, compareto), line print [d.filename for d in find_document(path, compareto)] SQLAlchemy-0.8.4/examples/elementtree/pickle.py0000644000076500000240000000343512251147171022207 0ustar classicstaff00000000000000"""illustrates a quick and dirty way to persist an XML document expressed using ElementTree and pickle. This is a trivial example using PickleType to marshal/unmarshal the ElementTree document into a binary column. Compare to explicit.py which stores the individual components of the ElementTree structure in distinct rows using two additional mapped entities. Note that the usage of both styles of persistence are identical, as is the structure of the main Document class. """ from sqlalchemy import (create_engine, MetaData, Table, Column, Integer, String, PickleType) from sqlalchemy.orm import mapper, Session import sys, os from xml.etree import ElementTree e = create_engine('sqlite://') meta = MetaData() # setup a comparator for the PickleType since it's a mutable # element. def are_elements_equal(x, y): return x == y # stores a top level record of an XML document. # the "element" column will store the ElementTree document as a BLOB. documents = Table('documents', meta, Column('document_id', Integer, primary_key=True), Column('filename', String(30), unique=True), Column('element', PickleType(comparator=are_elements_equal)) ) meta.create_all(e) # our document class. contains a string name, # and the ElementTree root element. class Document(object): def __init__(self, name, element): self.filename = name self.element = element # setup mapper. mapper(Document, documents) ###### time to test ! ######### # get ElementTree document filename = os.path.join(os.path.dirname(__file__), "test.xml") doc = ElementTree.parse(filename) # save to DB session = Session(e) session.add(Document("test.xml", doc)) session.commit() # restore document = session.query(Document).filter_by(filename="test.xml").first() # print document.element.write(sys.stdout) SQLAlchemy-0.8.4/examples/elementtree/test.xml0000644000076500000240000000037112251147171022063 0ustar classicstaff00000000000000 This is somefile.
      hi there Some additional text within the header.
      Some more text within somefile.
      SQLAlchemy-0.8.4/examples/elementtree/test2.xml0000644000076500000240000000011112251147171022135 0ustar classicstaff00000000000000 hi there SQLAlchemy-0.8.4/examples/elementtree/test3.xml0000644000076500000240000000025112251147171022143 0ustar classicstaff00000000000000 test3
      one there
      SQLAlchemy-0.8.4/examples/generic_associations/0000755000076500000240000000000012251151573022244 5ustar classicstaff00000000000000SQLAlchemy-0.8.4/examples/generic_associations/__init__.py0000644000076500000240000000230012251150015024336 0ustar classicstaff00000000000000""" Illustrates various methods of associating multiple types of parents with a particular child object. The examples all use the declarative extension along with declarative mixins. Each one presents the identical use case at the end - two classes, ``Customer`` and ``Supplier``, both subclassing the ``HasAddresses`` mixin, which ensures that the parent class is provided with an ``addresses`` collection which contains ``Address`` objects. The configurations include: * ``table_per_related.py`` - illustrates a distinct table per related collection. * ``table_per_association.py`` - illustrates a shared collection table, using a table per association. * ``discriminator_on_association.py`` - shared collection table and shared association table, including a discriminator column. * ``generic_fk.py`` - imitates the approach taken by popular frameworks such as Django and Ruby on Rails to create a so-called "generic foreign key". The ``discriminator_on_association.py`` and ``generic_fk.py`` scripts are modernized versions of recipes presented in the 2007 blog post `Polymorphic Associations with SQLAlchemy `_. . """SQLAlchemy-0.8.4/examples/generic_associations/discriminator_on_association.py0000644000076500000240000001040112251150015030537 0ustar classicstaff00000000000000"""discriminator_on_related.py The HasAddresses mixin will provide a relationship to the fixed Address table based on a fixed association table. The association table contains a "discriminator" which determines what type of parent object associates to the Address row. SQLAlchemy's single-table-inheritance feature is used to target different association types. This is a "polymorphic association". Even though a "discriminator" that refers to a particular table is present, the extra association table is used so that traditional foreign key constraints may be used. This configuration attempts to simulate a so-called "generic foreign key" as closely as possible without actually foregoing the use of real foreign keys. Unlike table-per-related and table-per-association, it uses a fixed number of tables to serve any number of potential parent objects, but is also slightly more complex. """ from sqlalchemy.ext.declarative import as_declarative, declared_attr from sqlalchemy import create_engine, Integer, Column, \ String, ForeignKey from sqlalchemy.orm import Session, relationship, backref from sqlalchemy.ext.associationproxy import association_proxy @as_declarative() class Base(object): """Base class which provides automated table name and surrogate primary key column. """ @declared_attr def __tablename__(cls): return cls.__name__.lower() id = Column(Integer, primary_key=True) class AddressAssociation(Base): """Associates a collection of Address objects with a particular parent. """ __tablename__ = "address_association" discriminator = Column(String) """Refers to the type of parent.""" __mapper_args__ = {"polymorphic_on": discriminator} class Address(Base): """The Address class. This represents all address records in a single table. """ association_id = Column(Integer, ForeignKey("address_association.id")) street = Column(String) city = Column(String) zip = Column(String) association = relationship("AddressAssociation", backref="addresses") parent = association_proxy("association", "parent") def __repr__(self): return "%s(street=%r, city=%r, zip=%r)" % \ (self.__class__.__name__, self.street, self.city, self.zip) class HasAddresses(object): """HasAddresses mixin, creates a relationship to the address_association table for each parent. """ @declared_attr def address_association_id(cls): return Column(Integer, ForeignKey("address_association.id")) @declared_attr def address_association(cls): name = cls.__name__ discriminator = name.lower() assoc_cls = type( "%sAddressAssociation" % name, (AddressAssociation, ), dict( __mapper_args__={ "polymorphic_identity": discriminator } ) ) cls.addresses = association_proxy( "address_association", "addresses", creator=lambda addresses: assoc_cls(addresses=addresses) ) return relationship(assoc_cls, backref=backref("parent", uselist=False)) class Customer(HasAddresses, Base): name = Column(String) class Supplier(HasAddresses, Base): company_name = Column(String) engine = create_engine('sqlite://', echo=True) Base.metadata.create_all(engine) session = Session(engine) session.add_all([ Customer( name='customer 1', addresses=[ Address( street='123 anywhere street', city="New York", zip="10110"), Address( street='40 main street', city="San Francisco", zip="95732") ] ), Supplier( company_name="Ace Hammers", addresses=[ Address( street='2569 west elm', city="Detroit", zip="56785") ] ), ]) session.commit() for customer in session.query(Customer): for address in customer.addresses: print address print address.parent SQLAlchemy-0.8.4/examples/generic_associations/generic_fk.py0000644000076500000240000001042212251150015024677 0ustar classicstaff00000000000000"""generic_fk.py This example will emulate key aspects of the system used by popular frameworks such as Django, ROR, etc. It approaches the issue by bypassing standard referential integrity practices, and producing a so-called "generic foreign key", which means a database column that is not constrained to refer to any particular table. In-application logic is used to determine which table is referenced. This approach is not in line with SQLAlchemy's usual style, as foregoing foreign key integrity means that the tables can easily contain invalid references and also have no ability to use in-database cascade functionality. However, due to the popularity of these systems, as well as that it uses the fewest number of tables (which doesn't really offer any "advantage", though seems to be comforting to many) this recipe remains in high demand, so in the interests of having an easy StackOverflow answer queued up, here it is. The author recommends "table_per_related" or "table_per_association" instead of this approach. .. versionadded:: 0.8.3 """ from sqlalchemy.ext.declarative import as_declarative, declared_attr from sqlalchemy import create_engine, Integer, Column, \ String, and_ from sqlalchemy.orm import Session, relationship, foreign, remote, backref from sqlalchemy import event @as_declarative() class Base(object): """Base class which provides automated table name and surrogate primary key column. """ @declared_attr def __tablename__(cls): return cls.__name__.lower() id = Column(Integer, primary_key=True) class Address(Base): """The Address class. This represents all address records in a single table. """ street = Column(String) city = Column(String) zip = Column(String) discriminator = Column(String) """Refers to the type of parent.""" parent_id = Column(Integer) """Refers to the primary key of the parent. This could refer to any table. """ @property def parent(self): """Provides in-Python access to the "parent" by choosing the appropriate relationship. """ return getattr(self, "parent_%s" % self.discriminator) def __repr__(self): return "%s(street=%r, city=%r, zip=%r)" % \ (self.__class__.__name__, self.street, self.city, self.zip) class HasAddresses(object): """HasAddresses mixin, creates a relationship to the address_association table for each parent. """ @event.listens_for(HasAddresses, "mapper_configured", propagate=True) def setup_listener(mapper, class_): name = class_.__name__ discriminator = name.lower() class_.addresses = relationship(Address, primaryjoin=and_( class_.id == foreign(remote(Address.parent_id)), Address.discriminator == discriminator ), backref=backref( "parent_%s" % discriminator, primaryjoin=remote(class_.id) == foreign(Address.parent_id) ) ) @event.listens_for(class_.addresses, "append") def append_address(target, value, initiator): value.discriminator = discriminator class Customer(HasAddresses, Base): name = Column(String) class Supplier(HasAddresses, Base): company_name = Column(String) engine = create_engine('sqlite://', echo=True) Base.metadata.create_all(engine) session = Session(engine) session.add_all([ Customer( name='customer 1', addresses=[ Address( street='123 anywhere street', city="New York", zip="10110"), Address( street='40 main street', city="San Francisco", zip="95732") ] ), Supplier( company_name="Ace Hammers", addresses=[ Address( street='2569 west elm', city="Detroit", zip="56785") ] ), ]) session.commit() for customer in session.query(Customer): for address in customer.addresses: print(address) print(address.parent)SQLAlchemy-0.8.4/examples/generic_associations/table_per_association.py0000644000076500000240000000554412251150015027145 0ustar classicstaff00000000000000"""table_per_association.py The HasAddresses mixin will provide a new "address_association" table for each parent class. The "address" table will be shared for all parents. This configuration has the advantage that all Address rows are in one table, so that the definition of "Address" can be maintained in one place. The association table contains the foreign key to Address so that Address has no dependency on the system. """ from sqlalchemy.ext.declarative import as_declarative, declared_attr from sqlalchemy import create_engine, Integer, Column, \ String, ForeignKey, Table from sqlalchemy.orm import Session, relationship @as_declarative() class Base(object): """Base class which provides automated table name and surrogate primary key column. """ @declared_attr def __tablename__(cls): return cls.__name__.lower() id = Column(Integer, primary_key=True) class Address(Base): """The Address class. This represents all address records in a single table. """ street = Column(String) city = Column(String) zip = Column(String) def __repr__(self): return "%s(street=%r, city=%r, zip=%r)" % \ (self.__class__.__name__, self.street, self.city, self.zip) class HasAddresses(object): """HasAddresses mixin, creates a new address_association table for each parent. """ @declared_attr def addresses(cls): address_association = Table( "%s_addresses" % cls.__tablename__, cls.metadata, Column("address_id", ForeignKey("address.id"), primary_key=True), Column("%s_id" % cls.__tablename__, ForeignKey("%s.id" % cls.__tablename__), primary_key=True), ) return relationship(Address, secondary=address_association) class Customer(HasAddresses, Base): name = Column(String) class Supplier(HasAddresses, Base): company_name = Column(String) engine = create_engine('sqlite://', echo=True) Base.metadata.create_all(engine) session = Session(engine) session.add_all([ Customer( name='customer 1', addresses=[ Address( street='123 anywhere street', city="New York", zip="10110"), Address( street='40 main street', city="San Francisco", zip="95732") ] ), Supplier( company_name="Ace Hammers", addresses=[ Address( street='2569 west elm', city="Detroit", zip="56785") ] ), ]) session.commit() for customer in session.query(Customer): for address in customer.addresses: print address # no parent hereSQLAlchemy-0.8.4/examples/generic_associations/table_per_related.py0000644000076500000240000000626112251150015026246 0ustar classicstaff00000000000000"""table_per_related.py The HasAddresses mixin will provide a new "address" table for each parent class, as well as a distinct "Address" subclass. This configuration has the advantage that each type of parent maintains its "Address" rows separately, so that collection size for one type of parent will have no impact on other types of parent. Navigation between parent and "Address" is simple, direct, and bidirectional. This recipe is the most efficient (speed wise and storage wise) and simple of all of them. The creation of many related tables may seem at first like an issue but there really isn't any - the management and targeting of these tables is completely automated. """ from sqlalchemy.ext.declarative import as_declarative, declared_attr from sqlalchemy import create_engine, Integer, Column, String, ForeignKey from sqlalchemy.orm import Session, relationship @as_declarative() class Base(object): """Base class which provides automated table name and surrogate primary key column. """ @declared_attr def __tablename__(cls): return cls.__name__.lower() id = Column(Integer, primary_key=True) class Address(object): """Define columns that will be present in each 'Address' table. This is a declarative mixin, so additional mapped attributes beyond simple columns specified here should be set up using @declared_attr. """ street = Column(String) city = Column(String) zip = Column(String) def __repr__(self): return "%s(street=%r, city=%r, zip=%r)" % \ (self.__class__.__name__, self.street, self.city, self.zip) class HasAddresses(object): """HasAddresses mixin, creates a new Address class for each parent. """ @declared_attr def addresses(cls): cls.Address = type( "%sAddress" % cls.__name__, (Address, Base,), dict( __tablename__="%s_address" % cls.__tablename__, parent_id=Column(Integer, ForeignKey("%s.id" % cls.__tablename__)), parent=relationship(cls) ) ) return relationship(cls.Address) class Customer(HasAddresses, Base): name = Column(String) class Supplier(HasAddresses, Base): company_name = Column(String) engine = create_engine('sqlite://', echo=True) Base.metadata.create_all(engine) session = Session(engine) session.add_all([ Customer( name='customer 1', addresses=[ Customer.Address( street='123 anywhere street', city="New York", zip="10110"), Customer.Address( street='40 main street', city="San Francisco", zip="95732") ] ), Supplier( company_name="Ace Hammers", addresses=[ Supplier.Address( street='2569 west elm', city="Detroit", zip="56785") ] ), ]) session.commit() for customer in session.query(Customer): for address in customer.addresses: print address print address.parent SQLAlchemy-0.8.4/examples/graphs/0000755000076500000240000000000012251151573017335 5ustar classicstaff00000000000000SQLAlchemy-0.8.4/examples/graphs/__init__.py0000644000076500000240000000054712251150015021442 0ustar classicstaff00000000000000"""An example of persistence for a directed graph structure. The graph is stored as a collection of edges, each referencing both a "lower" and an "upper" node in a table of nodes. Basic persistence and querying for lower- and upper- neighbors are illustrated:: n2 = Node(2) n5 = Node(5) n2.add_neighbor(n5) print n2.higher_neighbors() """SQLAlchemy-0.8.4/examples/graphs/directed_graph.py0000644000076500000240000000437112251147171022657 0ustar classicstaff00000000000000"""a directed graph example.""" from sqlalchemy import MetaData, Table, Column, Integer, ForeignKey, \ create_engine from sqlalchemy.orm import mapper, relationship, sessionmaker from sqlalchemy.ext.declarative import declarative_base Base = declarative_base() class Node(Base): __tablename__ = 'node' node_id = Column(Integer, primary_key=True) def __init__(self, id): self.node_id = id def add_neighbors(self, *nodes): for node in nodes: Edge(self, node) return self def higher_neighbors(self): return [x.higher_node for x in self.lower_edges] def lower_neighbors(self): return [x.lower_node for x in self.higher_edges] class Edge(Base): __tablename__ = 'edge' lower_id = Column(Integer, ForeignKey('node.node_id'), primary_key=True) higher_id = Column(Integer, ForeignKey('node.node_id'), primary_key=True) lower_node = relationship(Node, primaryjoin=lower_id==Node.node_id, backref='lower_edges') higher_node = relationship(Node, primaryjoin=higher_id==Node.node_id, backref='higher_edges') # here we have lower.node_id <= higher.node_id def __init__(self, n1, n2): if n1.node_id < n2.node_id: self.lower_node = n1 self.higher_node = n2 else: self.lower_node = n2 self.higher_node = n1 engine = create_engine('sqlite://', echo=True) Base.metadata.create_all(engine) session = sessionmaker(engine)() # create a directed graph like this: # n1 -> n2 -> n5 # -> n7 # -> n3 -> n6 n1 = Node(1) n2 = Node(2) n3 = Node(3) n4 = Node(4) n5 = Node(5) n6 = Node(6) n7 = Node(7) n2.add_neighbors(n5, n1) n3.add_neighbors(n6) n7.add_neighbors(n2) n1.add_neighbors(n3) session.add_all([n1, n2, n3, n4, n5, n6, n7]) session.commit() assert [x.node_id for x in n3.higher_neighbors()] == [6] assert [x.node_id for x in n3.lower_neighbors()] == [1] assert [x.node_id for x in n2.lower_neighbors()] == [1] assert [x.node_id for x in n2.higher_neighbors()] == [5,7] SQLAlchemy-0.8.4/examples/inheritance/0000755000076500000240000000000012251151573020342 5ustar classicstaff00000000000000SQLAlchemy-0.8.4/examples/inheritance/__init__.py0000644000076500000240000000020612251150015022437 0ustar classicstaff00000000000000"""Working examples of single-table, joined-table, and concrete-table inheritance as described in :ref:`datamapping_inheritance`. """SQLAlchemy-0.8.4/examples/inheritance/concrete.py0000644000076500000240000000405212251150015022505 0ustar classicstaff00000000000000from sqlalchemy import create_engine, MetaData, Table, Column, Integer, \ String from sqlalchemy.orm import mapper, sessionmaker, polymorphic_union metadata = MetaData() managers_table = Table('managers', metadata, Column('employee_id', Integer, primary_key=True), Column('name', String(50)), Column('manager_data', String(40)) ) engineers_table = Table('engineers', metadata, Column('employee_id', Integer, primary_key=True), Column('name', String(50)), Column('engineer_info', String(40)) ) engine = create_engine('sqlite:///', echo=True) metadata.create_all(engine) class Employee(object): def __init__(self, name): self.name = name def __repr__(self): return self.__class__.__name__ + " " + self.name class Manager(Employee): def __init__(self, name, manager_data): self.name = name self.manager_data = manager_data def __repr__(self): return self.__class__.__name__ + " " + \ self.name + " " + self.manager_data class Engineer(Employee): def __init__(self, name, engineer_info): self.name = name self.engineer_info = engineer_info def __repr__(self): return self.__class__.__name__ + " " + \ self.name + " " + self.engineer_info pjoin = polymorphic_union({ 'manager':managers_table, 'engineer':engineers_table }, 'type', 'pjoin') employee_mapper = mapper(Employee, pjoin, polymorphic_on=pjoin.c.type) manager_mapper = mapper(Manager, managers_table, inherits=employee_mapper, concrete=True, polymorphic_identity='manager') engineer_mapper = mapper(Engineer, engineers_table, inherits=employee_mapper, concrete=True, polymorphic_identity='engineer') session = sessionmaker(engine)() m1 = Manager("pointy haired boss", "manager1") e1 = Engineer("wally", "engineer1") e2 = Engineer("dilbert", "engineer2") session.add(m1) session.add(e1) session.add(e2) session.commit() print session.query(Employee).all() SQLAlchemy-0.8.4/examples/inheritance/joined.py0000644000076500000240000000755612251150015022167 0ustar classicstaff00000000000000"""this example illustrates a polymorphic load of two classes""" from sqlalchemy import Table, Column, Integer, String, \ ForeignKey, create_engine, inspect, or_ from sqlalchemy.orm import relationship, Session, with_polymorphic from sqlalchemy.ext.declarative import declarative_base Base = declarative_base() class Company(Base): __tablename__ = 'company' id = Column(Integer, primary_key=True) name = Column(String(50)) employees = relationship("Person", backref='company', cascade='all, delete-orphan') def __repr__(self): return "Company %s" % self.name class Person(Base): __tablename__ = 'person' id = Column(Integer, primary_key=True) company_id = Column(Integer, ForeignKey('company.id')) name = Column(String(50)) type = Column(String(50)) __mapper_args__ = { 'polymorphic_identity':'person', 'polymorphic_on':type } def __repr__(self): return "Ordinary person %s" % self.name class Engineer(Person): __tablename__ = 'engineer' id = Column(Integer, ForeignKey('person.id'), primary_key=True) status = Column(String(30)) engineer_name = Column(String(30)) primary_language = Column(String(30)) __mapper_args__ = { 'polymorphic_identity':'engineer', } def __repr__(self): return "Engineer %s, status %s, engineer_name %s, "\ "primary_language %s" % \ (self.name, self.status, self.engineer_name, self.primary_language) class Manager(Person): __tablename__ = 'manager' id = Column(Integer, ForeignKey('person.id'), primary_key=True) status = Column(String(30)) manager_name = Column(String(30)) __mapper_args__ = { 'polymorphic_identity':'manager', } def __repr__(self): return "Manager %s, status %s, manager_name %s" % \ (self.name, self.status, self.manager_name) engine = create_engine('sqlite://', echo=True) Base.metadata.create_all(engine) session = Session(engine) c = Company(name='company1', employees=[ Manager( name='pointy haired boss', status='AAB', manager_name='manager1'), Engineer(name='dilbert', status='BBA', engineer_name='engineer1', primary_language='java'), Person(name='joesmith'), Engineer(name='wally', status='CGG', engineer_name='engineer2', primary_language='python'), Manager(name='jsmith', status='ABA', manager_name='manager2') ]) session.add(c) session.commit() c = session.query(Company).get(1) for e in c.employees: print e, inspect(e).key, e.company assert set([e.name for e in c.employees]) == set(['pointy haired boss', 'dilbert', 'joesmith', 'wally', 'jsmith']) print "\n" dilbert = session.query(Person).filter_by(name='dilbert').one() dilbert2 = session.query(Engineer).filter_by(name='dilbert').one() assert dilbert is dilbert2 dilbert.engineer_name = 'hes dilbert!' session.commit() c = session.query(Company).get(1) for e in c.employees: print e # query using with_polymorphic. eng_manager = with_polymorphic(Person, [Engineer, Manager], aliased=True) print session.query(eng_manager).\ filter( or_(eng_manager.Engineer.engineer_name=='engineer1', eng_manager.Manager.manager_name=='manager2' ) ).all() # illustrate join from Company, # We use aliased=True # to help when the selectable is used as the target of a join. eng_manager = with_polymorphic(Person, [Engineer, Manager], aliased=True) print session.query(Company).\ join( eng_manager, Company.employees ).filter( or_(eng_manager.Engineer.engineer_name=='engineer1', eng_manager.Manager.manager_name=='manager2') ).all() session.commit() SQLAlchemy-0.8.4/examples/inheritance/single.py0000644000076500000240000000627612251150015022176 0ustar classicstaff00000000000000from sqlalchemy import MetaData, Table, Column, Integer, String, \ ForeignKey, create_engine from sqlalchemy.orm import mapper, relationship, sessionmaker metadata = MetaData() # a table to store companies companies = Table('companies', metadata, Column('company_id', Integer, primary_key=True), Column('name', String(50))) employees_table = Table('employees', metadata, Column('employee_id', Integer, primary_key=True), Column('company_id', Integer, ForeignKey('companies.company_id')), Column('name', String(50)), Column('type', String(20)), Column('status', String(20)), Column('engineer_name', String(50)), Column('primary_language', String(50)), Column('manager_name', String(50)) ) class Person(object): def __init__(self, **kwargs): for key, value in kwargs.iteritems(): setattr(self, key, value) def __repr__(self): return "Ordinary person %s" % self.name class Engineer(Person): def __repr__(self): return "Engineer %s, status %s, engineer_name %s, "\ "primary_language %s" % \ (self.name, self.status, self.engineer_name, self.primary_language) class Manager(Person): def __repr__(self): return "Manager %s, status %s, manager_name %s" % \ (self.name, self.status, self.manager_name) class Company(object): def __init__(self, **kwargs): for key, value in kwargs.iteritems(): setattr(self, key, value) def __repr__(self): return "Company %s" % self.name person_mapper = mapper(Person, employees_table, polymorphic_on=employees_table.c.type, polymorphic_identity='person') manager_mapper = mapper(Manager, inherits=person_mapper, polymorphic_identity='manager') engineer_mapper = mapper(Engineer, inherits=person_mapper, polymorphic_identity='engineer') mapper(Company, companies, properties={ 'employees': relationship(Person, lazy=True, backref='company') }) engine = create_engine('sqlite:///', echo=True) metadata.create_all(engine) session = sessionmaker(engine)() c = Company(name='company1') c.employees.append(Manager(name='pointy haired boss', status='AAB', manager_name='manager1')) c.employees.append(Engineer(name='dilbert', status='BBA', engineer_name='engineer1', primary_language='java')) c.employees.append(Person(name='joesmith', status='HHH')) c.employees.append(Engineer(name='wally', status='CGG', engineer_name='engineer2', primary_language='python' )) c.employees.append(Manager(name='jsmith', status='ABA', manager_name='manager2')) session.add(c) session.commit() c = session.query(Company).get(1) for e in c.employees: print e, e.company print "\n" dilbert = session.query(Person).filter_by(name='dilbert').one() dilbert2 = session.query(Engineer).filter_by(name='dilbert').one() assert dilbert is dilbert2 dilbert.engineer_name = 'hes dibert!' session.flush() session.expunge_all() c = session.query(Company).get(1) for e in c.employees: print e session.delete(c) session.commit() SQLAlchemy-0.8.4/examples/large_collection/0000755000076500000240000000000012251151573021356 5ustar classicstaff00000000000000SQLAlchemy-0.8.4/examples/large_collection/__init__.py0000644000076500000240000000057612251150015023465 0ustar classicstaff00000000000000"""Large collection example. Illustrates the options to use with :func:`~sqlalchemy.orm.relationship()` when the list of related objects is very large, including: * "dynamic" relationships which query slices of data as accessed * how to use ON DELETE CASCADE in conjunction with ``passive_deletes=True`` to greatly improve the performance of related collection deletion. """ SQLAlchemy-0.8.4/examples/large_collection/large_collection.py0000644000076500000240000000632312251150015025227 0ustar classicstaff00000000000000 from sqlalchemy import (MetaData, Table, Column, Integer, String, ForeignKey, create_engine) from sqlalchemy.orm import (mapper, relationship, sessionmaker) meta = MetaData() org_table = Table('organizations', meta, Column('org_id', Integer, primary_key=True), Column('org_name', String(50), nullable=False, key='name'), mysql_engine='InnoDB') member_table = Table('members', meta, Column('member_id', Integer, primary_key=True), Column('member_name', String(50), nullable=False, key='name'), Column('org_id', Integer, ForeignKey('organizations.org_id', ondelete="CASCADE")), mysql_engine='InnoDB') class Organization(object): def __init__(self, name): self.name = name class Member(object): def __init__(self, name): self.name = name mapper(Organization, org_table, properties = { 'members' : relationship(Member, # Organization.members will be a Query object - no loading # of the entire collection occurs unless requested lazy="dynamic", # Member objects "belong" to their parent, are deleted when # removed from the collection cascade="all, delete-orphan", # "delete, delete-orphan" cascade does not load in objects on delete, # allows ON DELETE CASCADE to handle it. # this only works with a database that supports ON DELETE CASCADE - # *not* sqlite or MySQL with MyISAM passive_deletes=True, ) }) mapper(Member, member_table) if __name__ == '__main__': engine = create_engine("postgresql://scott:tiger@localhost/test", echo=True) meta.create_all(engine) # expire_on_commit=False means the session contents # will not get invalidated after commit. sess = sessionmaker(engine, expire_on_commit=False)() # create org with some members org = Organization('org one') org.members.append(Member('member one')) org.members.append(Member('member two')) org.members.append(Member('member three')) sess.add(org) print "-------------------------\nflush one - save org + 3 members\n" sess.commit() # the 'members' collection is a Query. it issues # SQL as needed to load subsets of the collection. print "-------------------------\nload subset of members\n" members = org.members.filter(member_table.c.name.like('%member t%')).all() print members # new Members can be appended without any # SQL being emitted to load the full collection org.members.append(Member('member four')) org.members.append(Member('member five')) org.members.append(Member('member six')) print "-------------------------\nflush two - save 3 more members\n" sess.commit() # delete the object. Using ON DELETE CASCADE # SQL is only emitted for the head row - the Member rows # disappear automatically without the need for additional SQL. sess.delete(org) print "-------------------------\nflush three - delete org, delete members in one statement\n" sess.commit() print "-------------------------\nno Member rows should remain:\n" print sess.query(Member).count() sess.close() print "------------------------\ndone. dropping tables." meta.drop_all(engine)SQLAlchemy-0.8.4/examples/nested_sets/0000755000076500000240000000000012251151573020371 5ustar classicstaff00000000000000SQLAlchemy-0.8.4/examples/nested_sets/__init__.py0000644000076500000240000000017512251150015022473 0ustar classicstaff00000000000000""" Illustrates a rudimentary way to implement the "nested sets" pattern for hierarchical data using the SQLAlchemy ORM. """SQLAlchemy-0.8.4/examples/nested_sets/nested_sets.py0000644000076500000240000000667512251147171023300 0ustar classicstaff00000000000000"""Celko's "Nested Sets" Tree Structure. http://www.intelligententerprise.com/001020/celko.jhtml """ from sqlalchemy import (create_engine, Column, Integer, String, select, case, func) from sqlalchemy.orm import Session, aliased from sqlalchemy.ext.declarative import declarative_base from sqlalchemy import event Base = declarative_base() class Employee(Base): __tablename__ = 'personnel' __mapper_args__ = { 'batch': False # allows extension to fire for each # instance before going to the next. } parent = None emp = Column(String, primary_key=True) left = Column("lft", Integer, nullable=False) right = Column("rgt", Integer, nullable=False) def __repr__(self): return "Employee(%s, %d, %d)" % (self.emp, self.left, self.right) @event.listens_for(Employee, "before_insert") def before_insert(mapper, connection, instance): if not instance.parent: instance.left = 1 instance.right = 2 else: personnel = mapper.mapped_table right_most_sibling = connection.scalar( select([personnel.c.rgt]). where(personnel.c.emp == instance.parent.emp) ) connection.execute( personnel.update( personnel.c.rgt >= right_most_sibling).values( lft=case( [(personnel.c.lft > right_most_sibling, personnel.c.lft + 2)], else_=personnel.c.lft ), rgt=case( [(personnel.c.rgt >= right_most_sibling, personnel.c.rgt + 2)], else_=personnel.c.rgt ) ) ) instance.left = right_most_sibling instance.right = right_most_sibling + 1 # before_update() would be needed to support moving of nodes # after_delete() would be needed to support removal of nodes. engine = create_engine('sqlite://', echo=True) Base.metadata.create_all(engine) session = Session(bind=engine) albert = Employee(emp='Albert') bert = Employee(emp='Bert') chuck = Employee(emp='Chuck') donna = Employee(emp='Donna') eddie = Employee(emp='Eddie') fred = Employee(emp='Fred') bert.parent = albert chuck.parent = albert donna.parent = chuck eddie.parent = chuck fred.parent = chuck # the order of "add" is important here. elements must be added in # the order in which they should be INSERTed. session.add_all([albert, bert, chuck, donna, eddie, fred]) session.commit() print(session.query(Employee).all()) # 1. Find an employee and all their supervisors, no matter how deep the tree. ealias = aliased(Employee) print(session.query(Employee).\ filter(ealias.left.between(Employee.left, Employee.right)).\ filter(ealias.emp == 'Eddie').all()) #2. Find the employee and all their subordinates. # (This query has a nice symmetry with the first query.) print(session.query(Employee).\ filter(Employee.left.between(ealias.left, ealias.right)).\ filter(ealias.emp == 'Chuck').all()) #3. Find the level of each node, so you can print the tree # as an indented listing. for indentation, employee in session.query( func.count(Employee.emp).label('indentation') - 1, ealias).\ filter(ealias.left.between(Employee.left, Employee.right)).\ group_by(ealias.emp).\ order_by(ealias.left): print(" " * indentation + str(employee)) SQLAlchemy-0.8.4/examples/postgis/0000755000076500000240000000000012251151573017541 5ustar classicstaff00000000000000SQLAlchemy-0.8.4/examples/postgis/__init__.py0000644000076500000240000000216612251150015021645 0ustar classicstaff00000000000000"""A naive example illustrating techniques to help embed PostGIS functionality. This example was originally developed in the hopes that it would be extrapolated into a comprehensive PostGIS integration layer. We are pleased to announce that this has come to fruition as `GeoAlchemy `_. The example illustrates: * a DDL extension which allows CREATE/DROP to work in conjunction with AddGeometryColumn/DropGeometryColumn * a Geometry type, as well as a few subtypes, which convert result row values to a GIS-aware object, and also integrates with the DDL extension. * a GIS-aware object which stores a raw geometry value and provides a factory for functions such as AsText(). * an ORM comparator which can override standard column methods on mapped objects to produce GIS operators. * an attribute event listener that intercepts strings and converts to GeomFromText(). * a standalone operator example. The implementation is limited to only public, well known and simple to use extension points. E.g.:: print session.query(Road).filter(Road.road_geom.intersects(r1.road_geom)).all() """ SQLAlchemy-0.8.4/examples/postgis/postgis.py0000644000076500000240000002145212251150015021575 0ustar classicstaff00000000000000from sqlalchemy.types import UserDefinedType, _Binary, TypeDecorator from sqlalchemy.sql import expression, type_coerce from sqlalchemy import event, Table import binascii # Python datatypes class GisElement(object): """Represents a geometry value.""" def __str__(self): return self.desc def __repr__(self): return "<%s at 0x%x; %r>" % (self.__class__.__name__, id(self), self.desc) class BinaryGisElement(GisElement, expression.Function): """Represents a Geometry value expressed as binary.""" def __init__(self, data): self.data = data expression.Function.__init__(self, "ST_GeomFromEWKB", data, type_=Geometry(coerce_="binary")) @property def desc(self): return self.as_hex @property def as_hex(self): return binascii.hexlify(self.data) class TextualGisElement(GisElement, expression.Function): """Represents a Geometry value expressed as text.""" def __init__(self, desc, srid=-1): self.desc = desc expression.Function.__init__(self, "ST_GeomFromText", desc, srid, type_=Geometry) # SQL datatypes. class Geometry(UserDefinedType): """Base PostGIS Geometry column type.""" name = "GEOMETRY" def __init__(self, dimension=None, srid=-1, coerce_="text"): self.dimension = dimension self.srid = srid self.coerce = coerce_ class comparator_factory(UserDefinedType.Comparator): """Define custom operations for geometry types.""" # override the __eq__() operator def __eq__(self, other): return self.op('~=')(other) # add a custom operator def intersects(self, other): return self.op('&&')(other) # any number of GIS operators can be overridden/added here # using the techniques above. def _coerce_compared_value(self, op, value): return self def get_col_spec(self): return self.name def bind_expression(self, bindvalue): if self.coerce == "text": return TextualGisElement(bindvalue) elif self.coerce == "binary": return BinaryGisElement(bindvalue) else: assert False def column_expression(self, col): if self.coerce == "text": return func.ST_AsText(col, type_=self) elif self.coerce == "binary": return func.ST_AsBinary(col, type_=self) else: assert False def bind_processor(self, dialect): def process(value): if isinstance(value, GisElement): return value.desc else: return value return process def result_processor(self, dialect, coltype): if self.coerce == "text": fac = TextualGisElement elif self.coerce == "binary": fac = BinaryGisElement else: assert False def process(value): if value is not None: return fac(value) else: return value return process def adapt(self, impltype): return impltype(dimension=self.dimension, srid=self.srid, coerce_=self.coerce) # other datatypes can be added as needed. class Point(Geometry): name = 'POINT' class Curve(Geometry): name = 'CURVE' class LineString(Curve): name = 'LINESTRING' # ... etc. # DDL integration # Postgis historically has required AddGeometryColumn/DropGeometryColumn # and other management methods in order to create Postgis columns. Newer # versions don't appear to require these special steps anymore. However, # here we illustrate how to set up these features in any case. def setup_ddl_events(): @event.listens_for(Table, "before_create") def before_create(target, connection, **kw): dispatch("before-create", target, connection) @event.listens_for(Table, "after_create") def after_create(target, connection, **kw): dispatch("after-create", target, connection) @event.listens_for(Table, "before_drop") def before_drop(target, connection, **kw): dispatch("before-drop", target, connection) @event.listens_for(Table, "after_drop") def after_drop(target, connection, **kw): dispatch("after-drop", target, connection) def dispatch(event, table, bind): if event in ('before-create', 'before-drop'): regular_cols = [c for c in table.c if not isinstance(c.type, Geometry)] gis_cols = set(table.c).difference(regular_cols) table.info["_saved_columns"] = table.c # temporarily patch a set of columns not including the # Geometry columns table.columns = expression.ColumnCollection(*regular_cols) if event == 'before-drop': for c in gis_cols: bind.execute( select([ func.DropGeometryColumn( 'public', table.name, c.name)], autocommit=True) ) elif event == 'after-create': table.columns = table.info.pop('_saved_columns') for c in table.c: if isinstance(c.type, Geometry): bind.execute( select([ func.AddGeometryColumn( table.name, c.name, c.type.srid, c.type.name, c.type.dimension)], autocommit=True) ) elif event == 'after-drop': table.columns = table.info.pop('_saved_columns') setup_ddl_events() # illustrate usage if __name__ == '__main__': from sqlalchemy import (create_engine, MetaData, Column, Integer, String, func, select) from sqlalchemy.orm import sessionmaker from sqlalchemy.ext.declarative import declarative_base engine = create_engine('postgresql://scott:tiger@localhost/test', echo=True) metadata = MetaData(engine) Base = declarative_base(metadata=metadata) class Road(Base): __tablename__ = 'roads' road_id = Column(Integer, primary_key=True) road_name = Column(String) road_geom = Column(Geometry(2)) metadata.drop_all() metadata.create_all() session = sessionmaker(bind=engine)() # Add objects. We can use strings... session.add_all([ Road(road_name='Jeff Rd', road_geom='LINESTRING(191232 243118,191108 243242)'), Road(road_name='Geordie Rd', road_geom='LINESTRING(189141 244158,189265 244817)'), Road(road_name='Paul St', road_geom='LINESTRING(192783 228138,192612 229814)'), Road(road_name='Graeme Ave', road_geom='LINESTRING(189412 252431,189631 259122)'), Road(road_name='Phil Tce', road_geom='LINESTRING(190131 224148,190871 228134)'), ]) # or use an explicit TextualGisElement (similar to saying func.GeomFromText()) r = Road(road_name='Dave Cres', road_geom=TextualGisElement('LINESTRING(198231 263418,198213 268322)', -1)) session.add(r) # pre flush, the TextualGisElement represents the string we sent. assert str(r.road_geom) == 'LINESTRING(198231 263418,198213 268322)' session.commit() # after flush and/or commit, all the TextualGisElements become PersistentGisElements. assert str(r.road_geom) == "LINESTRING(198231 263418,198213 268322)" r1 = session.query(Road).filter(Road.road_name == 'Graeme Ave').one() # illustrate the overridden __eq__() operator. # strings come in as TextualGisElements r2 = session.query(Road).filter(Road.road_geom == 'LINESTRING(189412 252431,189631 259122)').one() r3 = session.query(Road).filter(Road.road_geom == r1.road_geom).one() assert r1 is r2 is r3 # core usage just fine: road_table = Road.__table__ stmt = select([road_table]).where(road_table.c.road_geom.intersects(r1.road_geom)) print session.execute(stmt).fetchall() # TODO: for some reason the auto-generated labels have the internal replacement # strings exposed, even though PG doesn't complain # look up the hex binary version, using SQLAlchemy casts as_binary = session.scalar(select([type_coerce(r.road_geom, Geometry(coerce_="binary"))])) assert as_binary.as_hex == \ '01020000000200000000000000b832084100000000e813104100000000283208410000000088601041' # back again, same method ! as_text = session.scalar(select([type_coerce(as_binary, Geometry(coerce_="text"))])) assert as_text.desc == "LINESTRING(198231 263418,198213 268322)" session.rollback() metadata.drop_all() SQLAlchemy-0.8.4/examples/sharding/0000755000076500000240000000000012251151573017650 5ustar classicstaff00000000000000SQLAlchemy-0.8.4/examples/sharding/__init__.py0000644000076500000240000000271212251150015021751 0ustar classicstaff00000000000000"""A basic example of using the SQLAlchemy Sharding API. Sharding refers to horizontally scaling data across multiple databases. The basic components of a "sharded" mapping are: * multiple databases, each assigned a 'shard id' * a function which can return a single shard id, given an instance to be saved; this is called "shard_chooser" * a function which can return a list of shard ids which apply to a particular instance identifier; this is called "id_chooser". If it returns all shard ids, all shards will be searched. * a function which can return a list of shard ids to try, given a particular Query ("query_chooser"). If it returns all shard ids, all shards will be queried and the results joined together. In this example, four sqlite databases will store information about weather data on a database-per-continent basis. We provide example shard_chooser, id_chooser and query_chooser functions. The query_chooser illustrates inspection of the SQL expression element in order to attempt to determine a single shard being requested. The construction of generic sharding routines is an ambitious approach to the issue of organizing instances among multiple databases. For a more plain-spoken alternative, the "distinct entity" approach is a simple method of assigning objects to different tables (and potentially database nodes) in an explicit way - described on the wiki at `EntityName `_. """ SQLAlchemy-0.8.4/examples/sharding/attribute_shard.py0000644000076500000240000002237212251147171023413 0ustar classicstaff00000000000000 # step 1. imports from sqlalchemy import (create_engine, MetaData, Table, Column, Integer, String, ForeignKey, Float, DateTime, event) from sqlalchemy.orm import sessionmaker, mapper, relationship from sqlalchemy.ext.horizontal_shard import ShardedSession from sqlalchemy.sql import operators, visitors import datetime # step 2. databases. # db1 is used for id generation. The "pool_threadlocal" # causes the id_generator() to use the same connection as that # of an ongoing transaction within db1. echo = True db1 = create_engine('sqlite://', echo=echo, pool_threadlocal=True) db2 = create_engine('sqlite://', echo=echo) db3 = create_engine('sqlite://', echo=echo) db4 = create_engine('sqlite://', echo=echo) # step 3. create session function. this binds the shard ids # to databases within a ShardedSession and returns it. create_session = sessionmaker(class_=ShardedSession) create_session.configure(shards={ 'north_america':db1, 'asia':db2, 'europe':db3, 'south_america':db4 }) # step 4. table setup. meta = MetaData() # we need a way to create identifiers which are unique across all # databases. one easy way would be to just use a composite primary key, where one # value is the shard id. but here, we'll show something more "generic", an # id generation function. we'll use a simplistic "id table" stored in database # #1. Any other method will do just as well; UUID, hilo, application-specific, etc. ids = Table('ids', meta, Column('nextid', Integer, nullable=False)) def id_generator(ctx): # in reality, might want to use a separate transaction for this. c = db1.connect() nextid = c.execute(ids.select(for_update=True)).scalar() c.execute(ids.update(values={ids.c.nextid : ids.c.nextid + 1})) return nextid # table setup. we'll store a lead table of continents/cities, # and a secondary table storing locations. # a particular row will be placed in the database whose shard id corresponds to the # 'continent'. in this setup, secondary rows in 'weather_reports' will # be placed in the same DB as that of the parent, but this can be changed # if you're willing to write more complex sharding functions. weather_locations = Table("weather_locations", meta, Column('id', Integer, primary_key=True, default=id_generator), Column('continent', String(30), nullable=False), Column('city', String(50), nullable=False) ) weather_reports = Table("weather_reports", meta, Column('id', Integer, primary_key=True), Column('location_id', Integer, ForeignKey('weather_locations.id')), Column('temperature', Float), Column('report_time', DateTime, default=datetime.datetime.now), ) # create tables for db in (db1, db2, db3, db4): meta.drop_all(db) meta.create_all(db) # establish initial "id" in db1 db1.execute(ids.insert(), nextid=1) # step 5. define sharding functions. # we'll use a straight mapping of a particular set of "country" # attributes to shard id. shard_lookup = { 'North America':'north_america', 'Asia':'asia', 'Europe':'europe', 'South America':'south_america' } def shard_chooser(mapper, instance, clause=None): """shard chooser. looks at the given instance and returns a shard id note that we need to define conditions for the WeatherLocation class, as well as our secondary Report class which will point back to its WeatherLocation via its 'location' attribute. """ if isinstance(instance, WeatherLocation): return shard_lookup[instance.continent] else: return shard_chooser(mapper, instance.location) def id_chooser(query, ident): """id chooser. given a primary key, returns a list of shards to search. here, we don't have any particular information from a pk so we just return all shard ids. often, youd want to do some kind of round-robin strategy here so that requests are evenly distributed among DBs. """ return ['north_america', 'asia', 'europe', 'south_america'] def query_chooser(query): """query chooser. this also returns a list of shard ids, which can just be all of them. but here we'll search into the Query in order to try to narrow down the list of shards to query. """ ids = [] # we'll grab continent names as we find them # and convert to shard ids for column, operator, value in _get_query_comparisons(query): # "shares_lineage()" returns True if both columns refer to the same # statement column, adjusting for any annotations present. # (an annotation is an internal clone of a Column object # and occur when using ORM-mapped attributes like # "WeatherLocation.continent"). A simpler comparison, though less accurate, # would be "column.key == 'continent'". if column.shares_lineage(weather_locations.c.continent): if operator == operators.eq: ids.append(shard_lookup[value]) elif operator == operators.in_op: ids.extend(shard_lookup[v] for v in value) if len(ids) == 0: return ['north_america', 'asia', 'europe', 'south_america'] else: return ids def _get_query_comparisons(query): """Search an orm.Query object for binary expressions. Returns expressions which match a Column against one or more literal values as a list of tuples of the form (column, operator, values). "values" is a single value or tuple of values depending on the operator. """ binds = {} clauses = set() comparisons = [] def visit_bindparam(bind): # visit a bind parameter. # check in _params for it first if bind.key in query._params: value = query._params[bind.key] elif bind.callable: # some ORM functions (lazy loading) # place the bind's value as a # callable for deferred evaulation. value = bind.callable() else: # just use .value value = bind.value binds[bind] = value def visit_column(column): clauses.add(column) def visit_binary(binary): # special handling for "col IN (params)" if binary.left in clauses and \ binary.operator == operators.in_op and \ hasattr(binary.right, 'clauses'): comparisons.append( (binary.left, binary.operator, tuple(binds[bind] for bind in binary.right.clauses) ) ) elif binary.left in clauses and binary.right in binds: comparisons.append( (binary.left, binary.operator,binds[binary.right]) ) elif binary.left in binds and binary.right in clauses: comparisons.append( (binary.right, binary.operator,binds[binary.left]) ) # here we will traverse through the query's criterion, searching # for SQL constructs. We will place simple column comparisons # into a list. if query._criterion is not None: visitors.traverse_depthfirst(query._criterion, {}, {'bindparam':visit_bindparam, 'binary':visit_binary, 'column':visit_column } ) return comparisons # further configure create_session to use these functions create_session.configure( shard_chooser=shard_chooser, id_chooser=id_chooser, query_chooser=query_chooser ) # step 6. mapped classes. class WeatherLocation(object): def __init__(self, continent, city): self.continent = continent self.city = city class Report(object): def __init__(self, temperature): self.temperature = temperature # step 7. mappers mapper(WeatherLocation, weather_locations, properties={ 'reports':relationship(Report, backref='location') }) mapper(Report, weather_reports) # step 8 (optional), events. The "shard_id" is placed # in the QueryContext where it can be intercepted and associated # with objects, if needed. def add_shard_id(instance, ctx): instance.shard_id = ctx.attributes["shard_id"] event.listen(WeatherLocation, "load", add_shard_id) event.listen(Report, "load", add_shard_id) # save and load objects! tokyo = WeatherLocation('Asia', 'Tokyo') newyork = WeatherLocation('North America', 'New York') toronto = WeatherLocation('North America', 'Toronto') london = WeatherLocation('Europe', 'London') dublin = WeatherLocation('Europe', 'Dublin') brasilia = WeatherLocation('South America', 'Brasila') quito = WeatherLocation('South America', 'Quito') tokyo.reports.append(Report(80.0)) newyork.reports.append(Report(75)) quito.reports.append(Report(85)) sess = create_session() for c in [tokyo, newyork, toronto, london, dublin, brasilia, quito]: sess.add(c) sess.commit() tokyo_id = tokyo.id sess.close() t = sess.query(WeatherLocation).get(tokyo_id) assert t.city == tokyo.city assert t.reports[0].temperature == 80.0 north_american_cities = sess.query(WeatherLocation).filter(WeatherLocation.continent == 'North America') assert [c.city for c in north_american_cities] == ['New York', 'Toronto'] asia_and_europe = sess.query(WeatherLocation).filter(WeatherLocation.continent.in_(['Europe', 'Asia'])) assert set([c.city for c in asia_and_europe]) == set(['Tokyo', 'London', 'Dublin']) SQLAlchemy-0.8.4/examples/versioning/0000755000076500000240000000000012251151573020234 5ustar classicstaff00000000000000SQLAlchemy-0.8.4/examples/versioning/__init__.py0000644000076500000240000000325712251150015022342 0ustar classicstaff00000000000000""" Illustrates an extension which creates version tables for entities and stores records for each change. The same idea as Elixir's versioned extension, but more efficient (uses attribute API to get history) and handles class inheritance. The given extensions generate an anonymous "history" class which represents historical versions of the target object. Usage is illustrated via a unit test module ``test_versioning.py``, which can be run via nose:: cd examples/versioning nosetests -v A fragment of example usage, using declarative:: from history_meta import Versioned, versioned_session Base = declarative_base() class SomeClass(Versioned, Base): __tablename__ = 'sometable' id = Column(Integer, primary_key=True) name = Column(String(50)) def __eq__(self, other): assert type(other) is SomeClass and other.id == self.id Session = sessionmaker(bind=engine) versioned_session(Session) sess = Session() sc = SomeClass(name='sc1') sess.add(sc) sess.commit() sc.name = 'sc1modified' sess.commit() assert sc.version == 2 SomeClassHistory = SomeClass.__history_mapper__.class_ assert sess.query(SomeClassHistory).\\ filter(SomeClassHistory.version == 1).\\ all() \\ == [SomeClassHistory(version=1, name='sc1')] The ``Versioned`` mixin is designed to work with declarative. To use the extension with classical mappers, the ``_history_mapper`` function can be applied:: from history_meta import _history_mapper m = mapper(SomeClass, sometable) _history_mapper(m) SomeHistoryClass = SomeClass.__history_mapper__.class_ """SQLAlchemy-0.8.4/examples/versioning/_lib.py0000644000076500000240000000541712251150015021510 0ustar classicstaff00000000000000"""copy of ComparableEntity and eq_() from test.lib. This is just to support running the example outside of the SQLA testing environment which is no longer part of SQLAlchemy as of 0.7. """ import sqlalchemy as sa from sqlalchemy import exc as sa_exc def eq_(a, b, msg=None): """Assert a == b, with repr messaging on failure.""" assert a == b, msg or "%r != %r" % (a, b) _repr_stack = set() class BasicEntity(object): def __init__(self, **kw): for key, value in kw.iteritems(): setattr(self, key, value) def __repr__(self): if id(self) in _repr_stack: return object.__repr__(self) _repr_stack.add(id(self)) try: return "%s(%s)" % ( (self.__class__.__name__), ', '.join(["%s=%r" % (key, getattr(self, key)) for key in sorted(self.__dict__.keys()) if not key.startswith('_')])) finally: _repr_stack.remove(id(self)) _recursion_stack = set() class ComparableEntity(BasicEntity): def __hash__(self): return hash(self.__class__) def __ne__(self, other): return not self.__eq__(other) def __eq__(self, other): """'Deep, sparse compare. Deeply compare two entities, following the non-None attributes of the non-persisted object, if possible. """ if other is self: return True elif not self.__class__ == other.__class__: return False if id(self) in _recursion_stack: return True _recursion_stack.add(id(self)) try: # pick the entity thats not SA persisted as the source try: self_key = sa.orm.attributes.instance_state(self).key except sa.orm.exc.NO_STATE: self_key = None if other is None: a = self b = other elif self_key is not None: a = other b = self else: a = self b = other for attr in a.__dict__.keys(): if attr.startswith('_'): continue value = getattr(a, attr) try: # handle lazy loader errors battr = getattr(b, attr) except (AttributeError, sa_exc.UnboundExecutionError): return False if hasattr(value, '__iter__'): if list(value) != list(battr): return False else: if value is not None and value != battr: return False return True finally: _recursion_stack.remove(id(self)) SQLAlchemy-0.8.4/examples/versioning/history_meta.py0000644000076500000240000001467312251150015023316 0ustar classicstaff00000000000000from sqlalchemy.ext.declarative import declared_attr from sqlalchemy.orm import mapper, class_mapper, attributes, object_mapper from sqlalchemy.orm.exc import UnmappedClassError, UnmappedColumnError from sqlalchemy import Table, Column, ForeignKeyConstraint, Integer from sqlalchemy import event from sqlalchemy.orm.properties import RelationshipProperty def col_references_table(col, table): for fk in col.foreign_keys: if fk.references(table): return True return False def _history_mapper(local_mapper): cls = local_mapper.class_ # set the "active_history" flag # on on column-mapped attributes so that the old version # of the info is always loaded (currently sets it on all attributes) for prop in local_mapper.iterate_properties: getattr(local_mapper.class_, prop.key).impl.active_history = True super_mapper = local_mapper.inherits super_history_mapper = getattr(cls, '__history_mapper__', None) polymorphic_on = None super_fks = [] if not super_mapper or local_mapper.local_table is not super_mapper.local_table: cols = [] for column in local_mapper.local_table.c: if column.name == 'version': continue col = column.copy() col.unique = False if super_mapper and col_references_table(column, super_mapper.local_table): super_fks.append((col.key, list(super_history_mapper.local_table.primary_key)[0])) cols.append(col) if column is local_mapper.polymorphic_on: polymorphic_on = col if super_mapper: super_fks.append(('version', super_history_mapper.base_mapper.local_table.c.version)) cols.append(Column('version', Integer, primary_key=True, autoincrement=False)) else: cols.append(Column('version', Integer, primary_key=True, autoincrement=False)) if super_fks: cols.append(ForeignKeyConstraint(*zip(*super_fks))) table = Table(local_mapper.local_table.name + '_history', local_mapper.local_table.metadata, *cols ) else: # single table inheritance. take any additional columns that may have # been added and add them to the history table. for column in local_mapper.local_table.c: if column.key not in super_history_mapper.local_table.c: col = column.copy() col.unique = False super_history_mapper.local_table.append_column(col) table = None if super_history_mapper: bases = (super_history_mapper.class_,) else: bases = local_mapper.base_mapper.class_.__bases__ versioned_cls = type.__new__(type, "%sHistory" % cls.__name__, bases, {}) m = mapper( versioned_cls, table, inherits=super_history_mapper, polymorphic_on=polymorphic_on, polymorphic_identity=local_mapper.polymorphic_identity ) cls.__history_mapper__ = m if not super_history_mapper: local_mapper.local_table.append_column( Column('version', Integer, default=1, nullable=False) ) local_mapper.add_property("version", local_mapper.local_table.c.version) class Versioned(object): @declared_attr def __mapper_cls__(cls): def map(cls, *arg, **kw): mp = mapper(cls, *arg, **kw) _history_mapper(mp) return mp return map def versioned_objects(iter): for obj in iter: if hasattr(obj, '__history_mapper__'): yield obj def create_version(obj, session, deleted = False): obj_mapper = object_mapper(obj) history_mapper = obj.__history_mapper__ history_cls = history_mapper.class_ obj_state = attributes.instance_state(obj) attr = {} obj_changed = False for om, hm in zip(obj_mapper.iterate_to_root(), history_mapper.iterate_to_root()): if hm.single: continue for hist_col in hm.local_table.c: if hist_col.key == 'version': continue obj_col = om.local_table.c[hist_col.key] # get the value of the # attribute based on the MapperProperty related to the # mapped column. this will allow usage of MapperProperties # that have a different keyname than that of the mapped column. try: prop = obj_mapper.get_property_by_column(obj_col) except UnmappedColumnError: # in the case of single table inheritance, there may be # columns on the mapped table intended for the subclass only. # the "unmapped" status of the subclass column on the # base class is a feature of the declarative module as of sqla 0.5.2. continue # expired object attributes and also deferred cols might not be in the # dict. force it to load no matter what by using getattr(). if prop.key not in obj_state.dict: getattr(obj, prop.key) a, u, d = attributes.get_history(obj, prop.key) if d: attr[hist_col.key] = d[0] obj_changed = True elif u: attr[hist_col.key] = u[0] else: # if the attribute had no value. attr[hist_col.key] = a[0] obj_changed = True if not obj_changed: # not changed, but we have relationships. OK # check those too for prop in obj_mapper.iterate_properties: if isinstance(prop, RelationshipProperty) and \ attributes.get_history(obj, prop.key).has_changes(): for p in prop.local_columns: if p.foreign_keys: obj_changed = True break if obj_changed is True: break if not obj_changed and not deleted: return attr['version'] = obj.version hist = history_cls() for key, value in attr.iteritems(): setattr(hist, key, value) session.add(hist) obj.version += 1 def versioned_session(session): @event.listens_for(session, 'before_flush') def before_flush(session, flush_context, instances): for obj in versioned_objects(session.dirty): create_version(obj, session) for obj in versioned_objects(session.deleted): create_version(obj, session, deleted = True) SQLAlchemy-0.8.4/examples/versioning/test_versioning.py0000644000076500000240000002646612251150015024034 0ustar classicstaff00000000000000from unittest import TestCase from sqlalchemy.ext.declarative import declarative_base from history_meta import Versioned, versioned_session from sqlalchemy import create_engine, Column, Integer, String, ForeignKey from sqlalchemy.orm import clear_mappers, Session, deferred, relationship from _lib import ComparableEntity, eq_ engine = None def setup(): global engine engine = create_engine('sqlite://', echo=True) class TestVersioning(TestCase): def setUp(self): self.session = Session(engine) self.Base = declarative_base() versioned_session(self.session) def tearDown(self): self.session.close() clear_mappers() self.Base.metadata.drop_all(engine) def create_tables(self): self.Base.metadata.create_all(engine) def test_plain(self): class SomeClass(Versioned, self.Base, ComparableEntity): __tablename__ = 'sometable' id = Column(Integer, primary_key=True) name = Column(String(50)) self.create_tables() sess = self.session sc = SomeClass(name='sc1') sess.add(sc) sess.commit() sc.name = 'sc1modified' sess.commit() assert sc.version == 2 SomeClassHistory = SomeClass.__history_mapper__.class_ eq_( sess.query(SomeClassHistory).filter(SomeClassHistory.version == 1).all(), [SomeClassHistory(version=1, name='sc1')] ) sc.name = 'sc1modified2' eq_( sess.query(SomeClassHistory).order_by(SomeClassHistory.version).all(), [ SomeClassHistory(version=1, name='sc1'), SomeClassHistory(version=2, name='sc1modified') ] ) assert sc.version == 3 sess.commit() sc.name = 'temp' sc.name = 'sc1modified2' sess.commit() eq_( sess.query(SomeClassHistory).order_by(SomeClassHistory.version).all(), [ SomeClassHistory(version=1, name='sc1'), SomeClassHistory(version=2, name='sc1modified') ] ) sess.delete(sc) sess.commit() eq_( sess.query(SomeClassHistory).order_by(SomeClassHistory.version).all(), [ SomeClassHistory(version=1, name='sc1'), SomeClassHistory(version=2, name='sc1modified'), SomeClassHistory(version=3, name='sc1modified2') ] ) def test_from_null(self): class SomeClass(Versioned, self.Base, ComparableEntity): __tablename__ = 'sometable' id = Column(Integer, primary_key=True) name = Column(String(50)) self.create_tables() sess = self.session sc = SomeClass() sess.add(sc) sess.commit() sc.name = 'sc1' sess.commit() assert sc.version == 2 def test_deferred(self): """test versioning of unloaded, deferred columns.""" class SomeClass(Versioned, self.Base, ComparableEntity): __tablename__ = 'sometable' id = Column(Integer, primary_key=True) name = Column(String(50)) data = deferred(Column(String(25))) self.create_tables() sess = self.session sc = SomeClass(name='sc1', data='somedata') sess.add(sc) sess.commit() sess.close() sc = sess.query(SomeClass).first() assert 'data' not in sc.__dict__ sc.name = 'sc1modified' sess.commit() assert sc.version == 2 SomeClassHistory = SomeClass.__history_mapper__.class_ eq_( sess.query(SomeClassHistory).filter(SomeClassHistory.version == 1).all(), [SomeClassHistory(version=1, name='sc1', data='somedata')] ) def test_joined_inheritance(self): class BaseClass(Versioned, self.Base, ComparableEntity): __tablename__ = 'basetable' id = Column(Integer, primary_key=True) name = Column(String(50)) type = Column(String(20)) __mapper_args__ = {'polymorphic_on':type, 'polymorphic_identity':'base'} class SubClassSeparatePk(BaseClass): __tablename__ = 'subtable1' id = Column(Integer, primary_key=True) base_id = Column(Integer, ForeignKey('basetable.id')) subdata1 = Column(String(50)) __mapper_args__ = {'polymorphic_identity':'sep'} class SubClassSamePk(BaseClass): __tablename__ = 'subtable2' id = Column(Integer, ForeignKey('basetable.id'), primary_key=True) subdata2 = Column(String(50)) __mapper_args__ = {'polymorphic_identity':'same'} self.create_tables() sess = self.session sep1 = SubClassSeparatePk(name='sep1', subdata1='sep1subdata') base1 = BaseClass(name='base1') same1 = SubClassSamePk(name='same1', subdata2='same1subdata') sess.add_all([sep1, base1, same1]) sess.commit() base1.name = 'base1mod' same1.subdata2 = 'same1subdatamod' sep1.name ='sep1mod' sess.commit() BaseClassHistory = BaseClass.__history_mapper__.class_ SubClassSeparatePkHistory = SubClassSeparatePk.__history_mapper__.class_ SubClassSamePkHistory = SubClassSamePk.__history_mapper__.class_ eq_( sess.query(BaseClassHistory).order_by(BaseClassHistory.id).all(), [ SubClassSeparatePkHistory(id=1, name=u'sep1', type=u'sep', version=1), BaseClassHistory(id=2, name=u'base1', type=u'base', version=1), SubClassSamePkHistory(id=3, name=u'same1', type=u'same', version=1) ] ) same1.subdata2 = 'same1subdatamod2' eq_( sess.query(BaseClassHistory).order_by(BaseClassHistory.id, BaseClassHistory.version).all(), [ SubClassSeparatePkHistory(id=1, name=u'sep1', type=u'sep', version=1), BaseClassHistory(id=2, name=u'base1', type=u'base', version=1), SubClassSamePkHistory(id=3, name=u'same1', type=u'same', version=1), SubClassSamePkHistory(id=3, name=u'same1', type=u'same', version=2) ] ) base1.name = 'base1mod2' eq_( sess.query(BaseClassHistory).order_by(BaseClassHistory.id, BaseClassHistory.version).all(), [ SubClassSeparatePkHistory(id=1, name=u'sep1', type=u'sep', version=1), BaseClassHistory(id=2, name=u'base1', type=u'base', version=1), BaseClassHistory(id=2, name=u'base1mod', type=u'base', version=2), SubClassSamePkHistory(id=3, name=u'same1', type=u'same', version=1), SubClassSamePkHistory(id=3, name=u'same1', type=u'same', version=2) ] ) def test_single_inheritance(self): class BaseClass(Versioned, self.Base, ComparableEntity): __tablename__ = 'basetable' id = Column(Integer, primary_key=True) name = Column(String(50)) type = Column(String(50)) __mapper_args__ = {'polymorphic_on':type, 'polymorphic_identity':'base'} class SubClass(BaseClass): subname = Column(String(50), unique=True) __mapper_args__ = {'polymorphic_identity':'sub'} self.create_tables() sess = self.session b1 = BaseClass(name='b1') sc = SubClass(name='s1', subname='sc1') sess.add_all([b1, sc]) sess.commit() b1.name='b1modified' BaseClassHistory = BaseClass.__history_mapper__.class_ SubClassHistory = SubClass.__history_mapper__.class_ eq_( sess.query(BaseClassHistory).order_by(BaseClassHistory.id, BaseClassHistory.version).all(), [BaseClassHistory(id=1, name=u'b1', type=u'base', version=1)] ) sc.name ='s1modified' b1.name='b1modified2' eq_( sess.query(BaseClassHistory).order_by(BaseClassHistory.id, BaseClassHistory.version).all(), [ BaseClassHistory(id=1, name=u'b1', type=u'base', version=1), BaseClassHistory(id=1, name=u'b1modified', type=u'base', version=2), SubClassHistory(id=2, name=u's1', type=u'sub', version=1) ] ) # test the unique constraint on the subclass # column sc.name ="modifyagain" sess.flush() def test_unique(self): class SomeClass(Versioned, self.Base, ComparableEntity): __tablename__ = 'sometable' id = Column(Integer, primary_key=True) name = Column(String(50), unique=True) data = Column(String(50)) self.create_tables() sess = self.session sc = SomeClass(name='sc1', data='sc1') sess.add(sc) sess.commit() sc.data = 'sc1modified' sess.commit() assert sc.version == 2 sc.data = 'sc1modified2' sess.commit() assert sc.version == 3 def test_relationship(self): class SomeRelated(self.Base, ComparableEntity): __tablename__ = 'somerelated' id = Column(Integer, primary_key=True) class SomeClass(Versioned, self.Base, ComparableEntity): __tablename__ = 'sometable' id = Column(Integer, primary_key=True) name = Column(String(50)) related_id = Column(Integer, ForeignKey('somerelated.id')) related = relationship("SomeRelated", backref='classes') SomeClassHistory = SomeClass.__history_mapper__.class_ self.create_tables() sess = self.session sc = SomeClass(name='sc1') sess.add(sc) sess.commit() assert sc.version == 1 sr1 = SomeRelated() sc.related = sr1 sess.commit() assert sc.version == 2 eq_( sess.query(SomeClassHistory).filter(SomeClassHistory.version == 1).all(), [SomeClassHistory(version=1, name='sc1', related_id=None)] ) sc.related = None eq_( sess.query(SomeClassHistory).order_by(SomeClassHistory.version).all(), [ SomeClassHistory(version=1, name='sc1', related_id=None), SomeClassHistory(version=2, name='sc1', related_id=sr1.id) ] ) assert sc.version == 3 def test_backref_relationship(self): class SomeRelated(self.Base, ComparableEntity): __tablename__ = 'somerelated' id = Column(Integer, primary_key=True) name = Column(String(50)) related_id = Column(Integer, ForeignKey('sometable.id')) related = relationship("SomeClass", backref='related') class SomeClass(Versioned, self.Base, ComparableEntity): __tablename__ = 'sometable' id = Column(Integer, primary_key=True) self.create_tables() sess = self.session sc = SomeClass() sess.add(sc) sess.commit() assert sc.version == 1 sr = SomeRelated(name='sr', related=sc) sess.add(sr) sess.commit() assert sc.version == 1 sr.name = 'sr2' sess.commit() assert sc.version == 1 sess.delete(sr) sess.commit() assert sc.version == 1 SQLAlchemy-0.8.4/examples/vertical/0000755000076500000240000000000012251151573017662 5ustar classicstaff00000000000000SQLAlchemy-0.8.4/examples/vertical/__init__.py0000644000076500000240000000200312251150015021754 0ustar classicstaff00000000000000""" Illustrates "vertical table" mappings. A "vertical table" refers to a technique where individual attributes of an object are stored as distinct rows in a table. The "vertical table" technique is used to persist objects which can have a varied set of attributes, at the expense of simple query control and brevity. It is commonly found in content/document management systems in order to represent user-created structures flexibly. Two variants on the approach are given. In the second, each row references a "datatype" which contains information about the type of information stored in the attribute, such as integer, string, or date. Example:: shrew = Animal(u'shrew') shrew[u'cuteness'] = 5 shrew[u'weasel-like'] = False shrew[u'poisonous'] = True session.add(shrew) session.flush() q = (session.query(Animal). filter(Animal.facts.any( and_(AnimalFact.key == u'weasel-like', AnimalFact.value == True)))) print 'weasel-like animals', q.all() """SQLAlchemy-0.8.4/examples/vertical/dictlike-polymorphic.py0000644000076500000240000002104112251150015024353 0ustar classicstaff00000000000000"""Mapping a polymorphic-valued vertical table as a dictionary. This example illustrates accessing and modifying a "vertical" (or "properties", or pivoted) table via a dict-like interface. The 'dictlike.py' example explains the basics of vertical tables and the general approach. This example adds a twist- the vertical table holds several "value" columns, one for each type of data that can be stored. For example:: Table('properties', metadata Column('owner_id', Integer, ForeignKey('owner.id'), primary_key=True), Column('key', UnicodeText), Column('type', Unicode(16)), Column('int_value', Integer), Column('char_value', UnicodeText), Column('bool_value', Boolean), Column('decimal_value', Numeric(10,2))) For any given properties row, the value of the 'type' column will point to the '_value' column active for that row. This example approach uses exactly the same dict mapping approach as the 'dictlike' example. It only differs in the mapping for vertical rows. Here, we'll use a @hybrid_property to build a smart '.value' attribute that wraps up reading and writing those various '_value' columns and keeps the '.type' up to date. Class decorators are used, so Python 2.6 or greater is required. """ from sqlalchemy.orm.interfaces import PropComparator from sqlalchemy.orm import comparable_property from sqlalchemy.ext.hybrid import hybrid_property # Using the VerticalPropertyDictMixin from the base example from dictlike import VerticalPropertyDictMixin class PolymorphicVerticalProperty(object): """A key/value pair with polymorphic value storage. Supplies a smart 'value' attribute that provides convenient read/write access to the row's current value without the caller needing to worry about the 'type' attribute or multiple columns. The 'value' attribute can also be used for basic comparisons in queries, allowing the row's logical value to be compared without foreknowledge of which column it might be in. This is not going to be a very efficient operation on the database side, but it is possible. If you're mapping to an existing database and you have some rows with a value of str('1') and others of int(1), then this could be useful. Subclasses must provide a 'type_map' class attribute with the following form:: type_map = { : ('type column value', 'column name'), # ... } For example,:: type_map = { int: ('integer', 'integer_value'), str: ('varchar', 'varchar_value'), } Would indicate that a Python int value should be stored in the 'integer_value' column and the .type set to 'integer'. Conversely, if the value of '.type' is 'integer, then the 'integer_value' column is consulted for the current value. """ type_map = { type(None): (None, None), } def __init__(self, key, value=None): self.key = key self.value = value @hybrid_property def value(self): for discriminator, field in self.type_map.values(): if self.type == discriminator: return getattr(self, field) return None @value.setter def value(self, value): py_type = type(value) if py_type not in self.type_map: raise TypeError(py_type) for field_type in self.type_map: discriminator, field = self.type_map[field_type] field_value = None if py_type == field_type: self.type = discriminator field_value = value if field is not None: setattr(self, field, field_value) @value.deleter def value(self): self._set_value(None) @value.comparator class value(PropComparator): """A comparator for .value, builds a polymorphic comparison via CASE. """ def __init__(self, cls): self.cls = cls def _case(self): whens = [(text("'%s'" % p[0]), cast(getattr(self.cls, p[1]), String)) for p in self.cls.type_map.values() if p[1] is not None] return case(whens, self.cls.type, null()) def __eq__(self, other): return self._case() == cast(other, String) def __ne__(self, other): return self._case() != cast(other, String) def __repr__(self): return '<%s %r=%r>' % (self.__class__.__name__, self.key, self.value) if __name__ == '__main__': from sqlalchemy import (MetaData, Table, Column, Integer, Unicode, ForeignKey, UnicodeText, and_, not_, or_, String, Boolean, cast, text, null, case, create_engine) from sqlalchemy.orm import mapper, relationship, Session from sqlalchemy.orm.collections import attribute_mapped_collection metadata = MetaData() animals = Table('animal', metadata, Column('id', Integer, primary_key=True), Column('name', Unicode(100))) chars = Table('facts', metadata, Column('animal_id', Integer, ForeignKey('animal.id'), primary_key=True), Column('key', Unicode(64), primary_key=True), Column('type', Unicode(16), default=None), Column('int_value', Integer, default=None), Column('char_value', UnicodeText, default=None), Column('boolean_value', Boolean, default=None)) class AnimalFact(PolymorphicVerticalProperty): type_map = { int: (u'integer', 'int_value'), unicode: (u'char', 'char_value'), bool: (u'boolean', 'boolean_value'), type(None): (None, None), } class Animal(VerticalPropertyDictMixin): """An animal. Animal facts are available via the 'facts' property or by using dict-like accessors on an Animal instance:: cat['color'] = 'calico' # or, equivalently: cat.facts['color'] = AnimalFact('color', 'calico') """ _property_type = AnimalFact _property_mapping = 'facts' def __init__(self, name): self.name = name def __repr__(self): return '<%s %r>' % (self.__class__.__name__, self.name) mapper(Animal, animals, properties={ 'facts': relationship( AnimalFact, backref='animal', collection_class=attribute_mapped_collection('key')), }) mapper(AnimalFact, chars) engine = create_engine('sqlite://', echo=True) metadata.create_all(engine) session = Session(engine) stoat = Animal(u'stoat') stoat[u'color'] = u'red' stoat[u'cuteness'] = 7 stoat[u'weasel-like'] = True session.add(stoat) session.commit() critter = session.query(Animal).filter(Animal.name == u'stoat').one() print critter[u'color'] print critter[u'cuteness'] print "changing cuteness value and type:" critter[u'cuteness'] = u'very cute' session.commit() marten = Animal(u'marten') marten[u'cuteness'] = 5 marten[u'weasel-like'] = True marten[u'poisonous'] = False session.add(marten) shrew = Animal(u'shrew') shrew[u'cuteness'] = 5 shrew[u'weasel-like'] = False shrew[u'poisonous'] = True session.add(shrew) session.commit() q = (session.query(Animal). filter(Animal.facts.any( and_(AnimalFact.key == u'weasel-like', AnimalFact.value == True)))) print 'weasel-like animals', q.all() # Save some typing by wrapping that up in a function: with_characteristic = lambda key, value: and_(AnimalFact.key == key, AnimalFact.value == value) q = (session.query(Animal). filter(Animal.facts.any( with_characteristic(u'weasel-like', True)))) print 'weasel-like animals again', q.all() q = (session.query(Animal). filter(Animal.facts.any(with_characteristic(u'poisonous', False)))) print 'animals with poisonous=False', q.all() q = (session.query(Animal). filter(or_(Animal.facts.any( with_characteristic(u'poisonous', False)), not_(Animal.facts.any(AnimalFact.key == u'poisonous'))))) print 'non-poisonous animals', q.all() q = (session.query(Animal). filter(Animal.facts.any(AnimalFact.value == 5))) print 'any animal with a .value of 5', q.all() # Facts can be queried as well. q = (session.query(AnimalFact). filter(with_characteristic(u'cuteness', u'very cute'))) print q.all() SQLAlchemy-0.8.4/examples/vertical/dictlike.py0000644000076500000240000001702012251150015022012 0ustar classicstaff00000000000000"""Mapping a vertical table as a dictionary. This example illustrates accessing and modifying a "vertical" (or "properties", or pivoted) table via a dict-like interface. These are tables that store free-form object properties as rows instead of columns. For example, instead of:: # A regular ("horizontal") table has columns for 'species' and 'size' Table('animal', metadata, Column('id', Integer, primary_key=True), Column('species', Unicode), Column('size', Unicode)) A vertical table models this as two tables: one table for the base or parent entity, and another related table holding key/value pairs:: Table('animal', metadata, Column('id', Integer, primary_key=True)) # The properties table will have one row for a 'species' value, and # another row for the 'size' value. Table('properties', metadata Column('animal_id', Integer, ForeignKey('animal.id'), primary_key=True), Column('key', UnicodeText), Column('value', UnicodeText)) Because the key/value pairs in a vertical scheme are not fixed in advance, accessing them like a Python dict can be very convenient. The example below can be used with many common vertical schemas as-is or with minor adaptations. """ class VerticalProperty(object): """A key/value pair. This class models rows in the vertical table. """ def __init__(self, key, value): self.key = key self.value = value def __repr__(self): return '<%s %r=%r>' % (self.__class__.__name__, self.key, self.value) class VerticalPropertyDictMixin(object): """Adds obj[key] access to a mapped class. This is a mixin class. It can be inherited from directly, or included with multiple inheritence. Classes using this mixin must define two class properties:: _property_type: The mapped type of the vertical key/value pair instances. Will be invoked with two positional arugments: key, value _property_mapping: A string, the name of the Python attribute holding a dict-based relationship of _property_type instances. Using the VerticalProperty class above as an example,:: class MyObj(VerticalPropertyDictMixin): _property_type = VerticalProperty _property_mapping = 'props' mapper(MyObj, sometable, properties={ 'props': relationship(VerticalProperty, collection_class=attribute_mapped_collection('key'))}) Dict-like access to MyObj is proxied through to the 'props' relationship:: myobj['key'] = 'value' # ...is shorthand for: myobj.props['key'] = VerticalProperty('key', 'value') myobj['key'] = 'updated value'] # ...is shorthand for: myobj.props['key'].value = 'updated value' print myobj['key'] # ...is shorthand for: print myobj.props['key'].value """ _property_type = VerticalProperty _property_mapping = None __map = property(lambda self: getattr(self, self._property_mapping)) def __getitem__(self, key): return self.__map[key].value def __setitem__(self, key, value): property = self.__map.get(key, None) if property is None: self.__map[key] = self._property_type(key, value) else: property.value = value def __delitem__(self, key): del self.__map[key] def __contains__(self, key): return key in self.__map # Implement other dict methods to taste. Here are some examples: def keys(self): return self.__map.keys() def values(self): return [prop.value for prop in self.__map.values()] def items(self): return [(key, prop.value) for key, prop in self.__map.items()] def __iter__(self): return iter(self.keys()) if __name__ == '__main__': from sqlalchemy import (MetaData, Table, Column, Integer, Unicode, ForeignKey, UnicodeText, and_, not_, create_engine) from sqlalchemy.orm import mapper, relationship, Session from sqlalchemy.orm.collections import attribute_mapped_collection metadata = MetaData() # Here we have named animals, and a collection of facts about them. animals = Table('animal', metadata, Column('id', Integer, primary_key=True), Column('name', Unicode(100))) facts = Table('facts', metadata, Column('animal_id', Integer, ForeignKey('animal.id'), primary_key=True), Column('key', Unicode(64), primary_key=True), Column('value', UnicodeText, default=None),) class AnimalFact(VerticalProperty): """A fact about an animal.""" class Animal(VerticalPropertyDictMixin): """An animal. Animal facts are available via the 'facts' property or by using dict-like accessors on an Animal instance:: cat['color'] = 'calico' # or, equivalently: cat.facts['color'] = AnimalFact('color', 'calico') """ _property_type = AnimalFact _property_mapping = 'facts' def __init__(self, name): self.name = name def __repr__(self): return '<%s %r>' % (self.__class__.__name__, self.name) mapper(Animal, animals, properties={ 'facts': relationship( AnimalFact, backref='animal', collection_class=attribute_mapped_collection('key')), }) mapper(AnimalFact, facts) engine = create_engine("sqlite://") metadata.create_all(engine) session = Session(bind=engine) stoat = Animal(u'stoat') stoat[u'color'] = u'reddish' stoat[u'cuteness'] = u'somewhat' # dict-like assignment transparently creates entries in the # stoat.facts collection: print stoat.facts[u'color'] session.add(stoat) session.commit() critter = session.query(Animal).filter(Animal.name == u'stoat').one() print critter[u'color'] print critter[u'cuteness'] critter[u'cuteness'] = u'very' print 'changing cuteness:' engine.echo = True session.commit() engine.echo = False marten = Animal(u'marten') marten[u'color'] = u'brown' marten[u'cuteness'] = u'somewhat' session.add(marten) shrew = Animal(u'shrew') shrew[u'cuteness'] = u'somewhat' shrew[u'poisonous-part'] = u'saliva' session.add(shrew) loris = Animal(u'slow loris') loris[u'cuteness'] = u'fairly' loris[u'poisonous-part'] = u'elbows' session.add(loris) session.commit() q = (session.query(Animal). filter(Animal.facts.any( and_(AnimalFact.key == u'color', AnimalFact.value == u'reddish')))) print 'reddish animals', q.all() # Save some typing by wrapping that up in a function: with_characteristic = lambda key, value: and_(AnimalFact.key == key, AnimalFact.value == value) q = (session.query(Animal). filter(Animal.facts.any( with_characteristic(u'color', u'brown')))) print 'brown animals', q.all() q = (session.query(Animal). filter(not_(Animal.facts.any( with_characteristic(u'poisonous-part', u'elbows'))))) print 'animals without poisonous-part == elbows', q.all() q = (session.query(Animal). filter(Animal.facts.any(AnimalFact.value == u'somewhat'))) print 'any animal with any .value of "somewhat"', q.all() # Facts can be queried as well. q = (session.query(AnimalFact). filter(with_characteristic(u'cuteness', u'very'))) print 'just the facts', q.all() SQLAlchemy-0.8.4/ez_setup.py0000644000076500000240000002377112251150015016443 0ustar classicstaff00000000000000#!python """Bootstrap setuptools installation If you want to use setuptools in your package's setup.py, just include this file in the same directory with it, and add this to the top of your setup.py:: from ez_setup import use_setuptools use_setuptools() If you want to require a specific version of setuptools, set a download mirror, or use an alternate download directory, you can do so by supplying the appropriate options to ``use_setuptools()``. This file can also be run as a script to install or upgrade setuptools. """ import sys DEFAULT_VERSION = "0.6c11" DEFAULT_URL = "http://pypi.python.org/packages/%s/s/setuptools/" % sys.version[:3] md5_data = { 'setuptools-0.6b1-py2.3.egg': '8822caf901250d848b996b7f25c6e6ca', 'setuptools-0.6b1-py2.4.egg': 'b79a8a403e4502fbb85ee3f1941735cb', 'setuptools-0.6b2-py2.3.egg': '5657759d8a6d8fc44070a9d07272d99b', 'setuptools-0.6b2-py2.4.egg': '4996a8d169d2be661fa32a6e52e4f82a', 'setuptools-0.6b3-py2.3.egg': 'bb31c0fc7399a63579975cad9f5a0618', 'setuptools-0.6b3-py2.4.egg': '38a8c6b3d6ecd22247f179f7da669fac', 'setuptools-0.6b4-py2.3.egg': '62045a24ed4e1ebc77fe039aa4e6f7e5', 'setuptools-0.6b4-py2.4.egg': '4cb2a185d228dacffb2d17f103b3b1c4', 'setuptools-0.6c1-py2.3.egg': 'b3f2b5539d65cb7f74ad79127f1a908c', 'setuptools-0.6c1-py2.4.egg': 'b45adeda0667d2d2ffe14009364f2a4b', 'setuptools-0.6c10-py2.3.egg': 'ce1e2ab5d3a0256456d9fc13800a7090', 'setuptools-0.6c10-py2.4.egg': '57d6d9d6e9b80772c59a53a8433a5dd4', 'setuptools-0.6c10-py2.5.egg': 'de46ac8b1c97c895572e5e8596aeb8c7', 'setuptools-0.6c10-py2.6.egg': '58ea40aef06da02ce641495523a0b7f5', 'setuptools-0.6c11-py2.3.egg': '2baeac6e13d414a9d28e7ba5b5a596de', 'setuptools-0.6c11-py2.4.egg': 'bd639f9b0eac4c42497034dec2ec0c2b', 'setuptools-0.6c11-py2.5.egg': '64c94f3bf7a72a13ec83e0b24f2749b2', 'setuptools-0.6c11-py2.6.egg': 'bfa92100bd772d5a213eedd356d64086', 'setuptools-0.6c2-py2.3.egg': 'f0064bf6aa2b7d0f3ba0b43f20817c27', 'setuptools-0.6c2-py2.4.egg': '616192eec35f47e8ea16cd6a122b7277', 'setuptools-0.6c3-py2.3.egg': 'f181fa125dfe85a259c9cd6f1d7b78fa', 'setuptools-0.6c3-py2.4.egg': 'e0ed74682c998bfb73bf803a50e7b71e', 'setuptools-0.6c3-py2.5.egg': 'abef16fdd61955514841c7c6bd98965e', 'setuptools-0.6c4-py2.3.egg': 'b0b9131acab32022bfac7f44c5d7971f', 'setuptools-0.6c4-py2.4.egg': '2a1f9656d4fbf3c97bf946c0a124e6e2', 'setuptools-0.6c4-py2.5.egg': '8f5a052e32cdb9c72bcf4b5526f28afc', 'setuptools-0.6c5-py2.3.egg': 'ee9fd80965da04f2f3e6b3576e9d8167', 'setuptools-0.6c5-py2.4.egg': 'afe2adf1c01701ee841761f5bcd8aa64', 'setuptools-0.6c5-py2.5.egg': 'a8d3f61494ccaa8714dfed37bccd3d5d', 'setuptools-0.6c6-py2.3.egg': '35686b78116a668847237b69d549ec20', 'setuptools-0.6c6-py2.4.egg': '3c56af57be3225019260a644430065ab', 'setuptools-0.6c6-py2.5.egg': 'b2f8a7520709a5b34f80946de5f02f53', 'setuptools-0.6c7-py2.3.egg': '209fdf9adc3a615e5115b725658e13e2', 'setuptools-0.6c7-py2.4.egg': '5a8f954807d46a0fb67cf1f26c55a82e', 'setuptools-0.6c7-py2.5.egg': '45d2ad28f9750e7434111fde831e8372', 'setuptools-0.6c8-py2.3.egg': '50759d29b349db8cfd807ba8303f1902', 'setuptools-0.6c8-py2.4.egg': 'cba38d74f7d483c06e9daa6070cce6de', 'setuptools-0.6c8-py2.5.egg': '1721747ee329dc150590a58b3e1ac95b', 'setuptools-0.6c9-py2.3.egg': 'a83c4020414807b496e4cfbe08507c03', 'setuptools-0.6c9-py2.4.egg': '260a2be2e5388d66bdaee06abec6342a', 'setuptools-0.6c9-py2.5.egg': 'fe67c3e5a17b12c0e7c541b7ea43a8e6', 'setuptools-0.6c9-py2.6.egg': 'ca37b1ff16fa2ede6e19383e7b59245a', } import sys, os try: from hashlib import md5 except ImportError: from md5 import md5 def _validate_md5(egg_name, data): if egg_name in md5_data: digest = md5(data).hexdigest() if digest != md5_data[egg_name]: print >>sys.stderr, ( "md5 validation of %s failed! (Possible download problem?)" % egg_name ) sys.exit(2) return data def use_setuptools( version=DEFAULT_VERSION, download_base=DEFAULT_URL, to_dir=os.curdir, download_delay=15 ): """Automatically find/download setuptools and make it available on sys.path `version` should be a valid setuptools version number that is available as an egg for download under the `download_base` URL (which should end with a '/'). `to_dir` is the directory where setuptools will be downloaded, if it is not already available. If `download_delay` is specified, it should be the number of seconds that will be paused before initiating a download, should one be required. If an older version of setuptools is installed, this routine will print a message to ``sys.stderr`` and raise SystemExit in an attempt to abort the calling script. """ was_imported = 'pkg_resources' in sys.modules or 'setuptools' in sys.modules def do_download(): egg = download_setuptools(version, download_base, to_dir, download_delay) sys.path.insert(0, egg) import setuptools; setuptools.bootstrap_install_from = egg try: import pkg_resources except ImportError: return do_download() try: pkg_resources.require("setuptools>="+version); return except pkg_resources.VersionConflict, e: if was_imported: print >>sys.stderr, ( "The required version of setuptools (>=%s) is not available, and\n" "can't be installed while this script is running. Please install\n" " a more recent version first, using 'easy_install -U setuptools'." "\n\n(Currently using %r)" ) % (version, e.args[0]) sys.exit(2) except pkg_resources.DistributionNotFound: pass del pkg_resources, sys.modules['pkg_resources'] # reload ok return do_download() def download_setuptools( version=DEFAULT_VERSION, download_base=DEFAULT_URL, to_dir=os.curdir, delay = 15 ): """Download setuptools from a specified location and return its filename `version` should be a valid setuptools version number that is available as an egg for download under the `download_base` URL (which should end with a '/'). `to_dir` is the directory where the egg will be downloaded. `delay` is the number of seconds to pause before an actual download attempt. """ import urllib2, shutil egg_name = "setuptools-%s-py%s.egg" % (version,sys.version[:3]) url = download_base + egg_name saveto = os.path.join(to_dir, egg_name) src = dst = None if not os.path.exists(saveto): # Avoid repeated downloads try: from distutils import log if delay: log.warn(""" --------------------------------------------------------------------------- This script requires setuptools version %s to run (even to display help). I will attempt to download it for you (from %s), but you may need to enable firewall access for this script first. I will start the download in %d seconds. (Note: if this machine does not have network access, please obtain the file %s and place it in this directory before rerunning this script.) ---------------------------------------------------------------------------""", version, download_base, delay, url ); from time import sleep; sleep(delay) log.warn("Downloading %s", url) src = urllib2.urlopen(url) # Read/write all in one block, so we don't create a corrupt file # if the download is interrupted. data = _validate_md5(egg_name, src.read()) dst = open(saveto,"wb"); dst.write(data) finally: if src: src.close() if dst: dst.close() return os.path.realpath(saveto) def main(argv, version=DEFAULT_VERSION): """Install or upgrade setuptools and EasyInstall""" try: import setuptools except ImportError: egg = None try: egg = download_setuptools(version, delay=0) sys.path.insert(0,egg) from setuptools.command.easy_install import main return main(list(argv)+[egg]) # we're done here finally: if egg and os.path.exists(egg): os.unlink(egg) else: if setuptools.__version__ == '0.0.1': print >>sys.stderr, ( "You have an obsolete version of setuptools installed. Please\n" "remove it from your system entirely before rerunning this script." ) sys.exit(2) req = "setuptools>="+version import pkg_resources try: pkg_resources.require(req) except pkg_resources.VersionConflict: try: from setuptools.command.easy_install import main except ImportError: from easy_install import main main(list(argv)+[download_setuptools(delay=0)]) sys.exit(0) # try to force an exit else: if argv: from setuptools.command.easy_install import main main(argv) else: print "Setuptools version",version,"or greater has been installed." print '(Run "ez_setup.py -U setuptools" to reinstall or upgrade.)' def update_md5(filenames): """Update our built-in md5 registry""" import re for name in filenames: base = os.path.basename(name) f = open(name,'rb') md5_data[base] = md5(f.read()).hexdigest() f.close() data = [" %r: %r,\n" % it for it in md5_data.items()] data.sort() repl = "".join(data) import inspect srcfile = inspect.getsourcefile(sys.modules[__name__]) f = open(srcfile, 'rb'); src = f.read(); f.close() match = re.search("\nmd5_data = {\n([^}]+)}", src) if not match: print >>sys.stderr, "Internal error!" sys.exit(2) src = src[:match.start(1)] + repl + src[match.end(1):] f = open(srcfile,'w') f.write(src) f.close() if __name__=='__main__': if len(sys.argv)>2 and sys.argv[1]=='--md5update': update_md5(sys.argv[2:]) else: main(sys.argv[1:]) SQLAlchemy-0.8.4/lib/0000755000076500000240000000000012251151573015001 5ustar classicstaff00000000000000SQLAlchemy-0.8.4/lib/sqlalchemy/0000755000076500000240000000000012251151573017143 5ustar classicstaff00000000000000SQLAlchemy-0.8.4/lib/sqlalchemy/__init__.py0000644000076500000240000000367012251150015021250 0ustar classicstaff00000000000000# sqlalchemy/__init__.py # Copyright (C) 2005-2013 the SQLAlchemy authors and contributors # # This module is part of SQLAlchemy and is released under # the MIT License: http://www.opensource.org/licenses/mit-license.php import inspect as _inspect import sys from .sql import ( alias, and_, asc, between, bindparam, case, cast, collate, delete, desc, distinct, except_, except_all, exists, extract, func, insert, intersect, intersect_all, join, literal, literal_column, modifier, not_, null, or_, outerjoin, outparam, over, select, subquery, text, tuple_, type_coerce, union, union_all, update, ) from .types import ( BIGINT, BINARY, BLOB, BOOLEAN, BigInteger, Binary, Boolean, CHAR, CLOB, DATE, DATETIME, DECIMAL, Date, DateTime, Enum, FLOAT, Float, INT, INTEGER, Integer, Interval, LargeBinary, NCHAR, NVARCHAR, NUMERIC, Numeric, PickleType, REAL, SMALLINT, SmallInteger, String, TEXT, TIME, TIMESTAMP, Text, Time, TypeDecorator, Unicode, UnicodeText, VARBINARY, VARCHAR, ) from .schema import ( CheckConstraint, Column, ColumnDefault, Constraint, DDL, DefaultClause, FetchedValue, ForeignKey, ForeignKeyConstraint, Index, MetaData, PassiveDefault, PrimaryKeyConstraint, Sequence, Table, ThreadLocalMetaData, UniqueConstraint, ) from .inspection import inspect from .engine import create_engine, engine_from_config __all__ = sorted(name for name, obj in locals().items() if not (name.startswith('_') or _inspect.ismodule(obj))) __version__ = '0.8.4' del _inspect, sys from . import util as _sa_util _sa_util.importlater.resolve_all() SQLAlchemy-0.8.4/lib/sqlalchemy/cextension/0000755000076500000240000000000012251151573021322 5ustar classicstaff00000000000000SQLAlchemy-0.8.4/lib/sqlalchemy/cextension/processors.c0000644000076500000240000003513412251150015023664 0ustar classicstaff00000000000000/* processors.c Copyright (C) 2010-2013 the SQLAlchemy authors and contributors Copyright (C) 2010 Gaetan de Menten gdementen@gmail.com This module is part of SQLAlchemy and is released under the MIT License: http://www.opensource.org/licenses/mit-license.php */ #include #include #if PY_VERSION_HEX < 0x02050000 && !defined(PY_SSIZE_T_MIN) typedef int Py_ssize_t; #define PY_SSIZE_T_MAX INT_MAX #define PY_SSIZE_T_MIN INT_MIN #endif static PyObject * int_to_boolean(PyObject *self, PyObject *arg) { long l = 0; PyObject *res; if (arg == Py_None) Py_RETURN_NONE; l = PyInt_AsLong(arg); if (l == 0) { res = Py_False; } else if (l == 1) { res = Py_True; } else if ((l == -1) && PyErr_Occurred()) { /* -1 can be either the actual value, or an error flag. */ return NULL; } else { PyErr_SetString(PyExc_ValueError, "int_to_boolean only accepts None, 0 or 1"); return NULL; } Py_INCREF(res); return res; } static PyObject * to_str(PyObject *self, PyObject *arg) { if (arg == Py_None) Py_RETURN_NONE; return PyObject_Str(arg); } static PyObject * to_float(PyObject *self, PyObject *arg) { if (arg == Py_None) Py_RETURN_NONE; return PyNumber_Float(arg); } static PyObject * str_to_datetime(PyObject *self, PyObject *arg) { const char *str; unsigned int year, month, day, hour, minute, second, microsecond = 0; PyObject *err_repr; if (arg == Py_None) Py_RETURN_NONE; str = PyString_AsString(arg); if (str == NULL) { err_repr = PyObject_Repr(arg); if (err_repr == NULL) return NULL; PyErr_Format( PyExc_ValueError, "Couldn't parse datetime string '%.200s' " "- value is not a string.", PyString_AsString(err_repr)); Py_DECREF(err_repr); return NULL; } /* microseconds are optional */ /* TODO: this is slightly less picky than the Python version which would not accept "2000-01-01 00:00:00.". I don't know which is better, but they should be coherent. */ if (sscanf(str, "%4u-%2u-%2u %2u:%2u:%2u.%6u", &year, &month, &day, &hour, &minute, &second, µsecond) < 6) { err_repr = PyObject_Repr(arg); if (err_repr == NULL) return NULL; PyErr_Format( PyExc_ValueError, "Couldn't parse datetime string: %.200s", PyString_AsString(err_repr)); Py_DECREF(err_repr); return NULL; } return PyDateTime_FromDateAndTime(year, month, day, hour, minute, second, microsecond); } static PyObject * str_to_time(PyObject *self, PyObject *arg) { const char *str; unsigned int hour, minute, second, microsecond = 0; PyObject *err_repr; if (arg == Py_None) Py_RETURN_NONE; str = PyString_AsString(arg); if (str == NULL) { err_repr = PyObject_Repr(arg); if (err_repr == NULL) return NULL; PyErr_Format( PyExc_ValueError, "Couldn't parse time string '%.200s' - value is not a string.", PyString_AsString(err_repr)); Py_DECREF(err_repr); return NULL; } /* microseconds are optional */ /* TODO: this is slightly less picky than the Python version which would not accept "00:00:00.". I don't know which is better, but they should be coherent. */ if (sscanf(str, "%2u:%2u:%2u.%6u", &hour, &minute, &second, µsecond) < 3) { err_repr = PyObject_Repr(arg); if (err_repr == NULL) return NULL; PyErr_Format( PyExc_ValueError, "Couldn't parse time string: %.200s", PyString_AsString(err_repr)); Py_DECREF(err_repr); return NULL; } return PyTime_FromTime(hour, minute, second, microsecond); } static PyObject * str_to_date(PyObject *self, PyObject *arg) { const char *str; unsigned int year, month, day; PyObject *err_repr; if (arg == Py_None) Py_RETURN_NONE; str = PyString_AsString(arg); if (str == NULL) { err_repr = PyObject_Repr(arg); if (err_repr == NULL) return NULL; PyErr_Format( PyExc_ValueError, "Couldn't parse date string '%.200s' - value is not a string.", PyString_AsString(err_repr)); Py_DECREF(err_repr); return NULL; } if (sscanf(str, "%4u-%2u-%2u", &year, &month, &day) != 3) { err_repr = PyObject_Repr(arg); if (err_repr == NULL) return NULL; PyErr_Format( PyExc_ValueError, "Couldn't parse date string: %.200s", PyString_AsString(err_repr)); Py_DECREF(err_repr); return NULL; } return PyDate_FromDate(year, month, day); } /*********** * Structs * ***********/ typedef struct { PyObject_HEAD PyObject *encoding; PyObject *errors; } UnicodeResultProcessor; typedef struct { PyObject_HEAD PyObject *type; PyObject *format; } DecimalResultProcessor; /************************** * UnicodeResultProcessor * **************************/ static int UnicodeResultProcessor_init(UnicodeResultProcessor *self, PyObject *args, PyObject *kwds) { PyObject *encoding, *errors = NULL; static char *kwlist[] = {"encoding", "errors", NULL}; if (!PyArg_ParseTupleAndKeywords(args, kwds, "S|S:__init__", kwlist, &encoding, &errors)) return -1; Py_INCREF(encoding); self->encoding = encoding; if (errors) { Py_INCREF(errors); } else { errors = PyString_FromString("strict"); if (errors == NULL) return -1; } self->errors = errors; return 0; } static PyObject * UnicodeResultProcessor_process(UnicodeResultProcessor *self, PyObject *value) { const char *encoding, *errors; char *str; Py_ssize_t len; if (value == Py_None) Py_RETURN_NONE; if (PyString_AsStringAndSize(value, &str, &len)) return NULL; encoding = PyString_AS_STRING(self->encoding); errors = PyString_AS_STRING(self->errors); return PyUnicode_Decode(str, len, encoding, errors); } static void UnicodeResultProcessor_dealloc(UnicodeResultProcessor *self) { Py_XDECREF(self->encoding); Py_XDECREF(self->errors); self->ob_type->tp_free((PyObject*)self); } static PyMethodDef UnicodeResultProcessor_methods[] = { {"process", (PyCFunction)UnicodeResultProcessor_process, METH_O, "The value processor itself."}, {NULL} /* Sentinel */ }; static PyTypeObject UnicodeResultProcessorType = { PyObject_HEAD_INIT(NULL) 0, /* ob_size */ "sqlalchemy.cprocessors.UnicodeResultProcessor", /* tp_name */ sizeof(UnicodeResultProcessor), /* tp_basicsize */ 0, /* tp_itemsize */ (destructor)UnicodeResultProcessor_dealloc, /* tp_dealloc */ 0, /* tp_print */ 0, /* tp_getattr */ 0, /* tp_setattr */ 0, /* tp_compare */ 0, /* tp_repr */ 0, /* tp_as_number */ 0, /* tp_as_sequence */ 0, /* tp_as_mapping */ 0, /* tp_hash */ 0, /* tp_call */ 0, /* tp_str */ 0, /* tp_getattro */ 0, /* tp_setattro */ 0, /* tp_as_buffer */ Py_TPFLAGS_DEFAULT | Py_TPFLAGS_BASETYPE, /* tp_flags */ "UnicodeResultProcessor objects", /* tp_doc */ 0, /* tp_traverse */ 0, /* tp_clear */ 0, /* tp_richcompare */ 0, /* tp_weaklistoffset */ 0, /* tp_iter */ 0, /* tp_iternext */ UnicodeResultProcessor_methods, /* tp_methods */ 0, /* tp_members */ 0, /* tp_getset */ 0, /* tp_base */ 0, /* tp_dict */ 0, /* tp_descr_get */ 0, /* tp_descr_set */ 0, /* tp_dictoffset */ (initproc)UnicodeResultProcessor_init, /* tp_init */ 0, /* tp_alloc */ 0, /* tp_new */ }; /************************** * DecimalResultProcessor * **************************/ static int DecimalResultProcessor_init(DecimalResultProcessor *self, PyObject *args, PyObject *kwds) { PyObject *type, *format; if (!PyArg_ParseTuple(args, "OS", &type, &format)) return -1; Py_INCREF(type); self->type = type; Py_INCREF(format); self->format = format; return 0; } static PyObject * DecimalResultProcessor_process(DecimalResultProcessor *self, PyObject *value) { PyObject *str, *result, *args; if (value == Py_None) Py_RETURN_NONE; args = PyTuple_Pack(1, value); if (args == NULL) return NULL; str = PyString_Format(self->format, args); Py_DECREF(args); if (str == NULL) return NULL; result = PyObject_CallFunctionObjArgs(self->type, str, NULL); Py_DECREF(str); return result; } static void DecimalResultProcessor_dealloc(DecimalResultProcessor *self) { Py_XDECREF(self->type); Py_XDECREF(self->format); self->ob_type->tp_free((PyObject*)self); } static PyMethodDef DecimalResultProcessor_methods[] = { {"process", (PyCFunction)DecimalResultProcessor_process, METH_O, "The value processor itself."}, {NULL} /* Sentinel */ }; static PyTypeObject DecimalResultProcessorType = { PyObject_HEAD_INIT(NULL) 0, /* ob_size */ "sqlalchemy.DecimalResultProcessor", /* tp_name */ sizeof(DecimalResultProcessor), /* tp_basicsize */ 0, /* tp_itemsize */ (destructor)DecimalResultProcessor_dealloc, /* tp_dealloc */ 0, /* tp_print */ 0, /* tp_getattr */ 0, /* tp_setattr */ 0, /* tp_compare */ 0, /* tp_repr */ 0, /* tp_as_number */ 0, /* tp_as_sequence */ 0, /* tp_as_mapping */ 0, /* tp_hash */ 0, /* tp_call */ 0, /* tp_str */ 0, /* tp_getattro */ 0, /* tp_setattro */ 0, /* tp_as_buffer */ Py_TPFLAGS_DEFAULT | Py_TPFLAGS_BASETYPE, /* tp_flags */ "DecimalResultProcessor objects", /* tp_doc */ 0, /* tp_traverse */ 0, /* tp_clear */ 0, /* tp_richcompare */ 0, /* tp_weaklistoffset */ 0, /* tp_iter */ 0, /* tp_iternext */ DecimalResultProcessor_methods, /* tp_methods */ 0, /* tp_members */ 0, /* tp_getset */ 0, /* tp_base */ 0, /* tp_dict */ 0, /* tp_descr_get */ 0, /* tp_descr_set */ 0, /* tp_dictoffset */ (initproc)DecimalResultProcessor_init, /* tp_init */ 0, /* tp_alloc */ 0, /* tp_new */ }; #ifndef PyMODINIT_FUNC /* declarations for DLL import/export */ #define PyMODINIT_FUNC void #endif static PyMethodDef module_methods[] = { {"int_to_boolean", int_to_boolean, METH_O, "Convert an integer to a boolean."}, {"to_str", to_str, METH_O, "Convert any value to its string representation."}, {"to_float", to_float, METH_O, "Convert any value to its floating point representation."}, {"str_to_datetime", str_to_datetime, METH_O, "Convert an ISO string to a datetime.datetime object."}, {"str_to_time", str_to_time, METH_O, "Convert an ISO string to a datetime.time object."}, {"str_to_date", str_to_date, METH_O, "Convert an ISO string to a datetime.date object."}, {NULL, NULL, 0, NULL} /* Sentinel */ }; PyMODINIT_FUNC initcprocessors(void) { PyObject *m; UnicodeResultProcessorType.tp_new = PyType_GenericNew; if (PyType_Ready(&UnicodeResultProcessorType) < 0) return; DecimalResultProcessorType.tp_new = PyType_GenericNew; if (PyType_Ready(&DecimalResultProcessorType) < 0) return; m = Py_InitModule3("cprocessors", module_methods, "Module containing C versions of data processing functions."); if (m == NULL) return; PyDateTime_IMPORT; Py_INCREF(&UnicodeResultProcessorType); PyModule_AddObject(m, "UnicodeResultProcessor", (PyObject *)&UnicodeResultProcessorType); Py_INCREF(&DecimalResultProcessorType); PyModule_AddObject(m, "DecimalResultProcessor", (PyObject *)&DecimalResultProcessorType); } SQLAlchemy-0.8.4/lib/sqlalchemy/cextension/resultproxy.c0000644000076500000240000004366512251150015024112 0ustar classicstaff00000000000000/* resultproxy.c Copyright (C) 2010-2013 the SQLAlchemy authors and contributors Copyright (C) 2010 Gaetan de Menten gdementen@gmail.com This module is part of SQLAlchemy and is released under the MIT License: http://www.opensource.org/licenses/mit-license.php */ #include #if PY_VERSION_HEX < 0x02050000 && !defined(PY_SSIZE_T_MIN) typedef int Py_ssize_t; #define PY_SSIZE_T_MAX INT_MAX #define PY_SSIZE_T_MIN INT_MIN typedef Py_ssize_t (*lenfunc)(PyObject *); #define PyInt_FromSsize_t(x) PyInt_FromLong(x) typedef intargfunc ssizeargfunc; #endif /*********** * Structs * ***********/ typedef struct { PyObject_HEAD PyObject *parent; PyObject *row; PyObject *processors; PyObject *keymap; } BaseRowProxy; /**************** * BaseRowProxy * ****************/ static PyObject * safe_rowproxy_reconstructor(PyObject *self, PyObject *args) { PyObject *cls, *state, *tmp; BaseRowProxy *obj; if (!PyArg_ParseTuple(args, "OO", &cls, &state)) return NULL; obj = (BaseRowProxy *)PyObject_CallMethod(cls, "__new__", "O", cls); if (obj == NULL) return NULL; tmp = PyObject_CallMethod((PyObject *)obj, "__setstate__", "O", state); if (tmp == NULL) { Py_DECREF(obj); return NULL; } Py_DECREF(tmp); if (obj->parent == NULL || obj->row == NULL || obj->processors == NULL || obj->keymap == NULL) { PyErr_SetString(PyExc_RuntimeError, "__setstate__ for BaseRowProxy subclasses must set values " "for parent, row, processors and keymap"); Py_DECREF(obj); return NULL; } return (PyObject *)obj; } static int BaseRowProxy_init(BaseRowProxy *self, PyObject *args, PyObject *kwds) { PyObject *parent, *row, *processors, *keymap; if (!PyArg_UnpackTuple(args, "BaseRowProxy", 4, 4, &parent, &row, &processors, &keymap)) return -1; Py_INCREF(parent); self->parent = parent; if (!PySequence_Check(row)) { PyErr_SetString(PyExc_TypeError, "row must be a sequence"); return -1; } Py_INCREF(row); self->row = row; if (!PyList_CheckExact(processors)) { PyErr_SetString(PyExc_TypeError, "processors must be a list"); return -1; } Py_INCREF(processors); self->processors = processors; if (!PyDict_CheckExact(keymap)) { PyErr_SetString(PyExc_TypeError, "keymap must be a dict"); return -1; } Py_INCREF(keymap); self->keymap = keymap; return 0; } /* We need the reduce method because otherwise the default implementation * does very weird stuff for pickle protocol 0 and 1. It calls * BaseRowProxy.__new__(RowProxy_instance) upon *pickling*. */ static PyObject * BaseRowProxy_reduce(PyObject *self) { PyObject *method, *state; PyObject *module, *reconstructor, *cls; method = PyObject_GetAttrString(self, "__getstate__"); if (method == NULL) return NULL; state = PyObject_CallObject(method, NULL); Py_DECREF(method); if (state == NULL) return NULL; module = PyImport_ImportModule("sqlalchemy.engine.result"); if (module == NULL) return NULL; reconstructor = PyObject_GetAttrString(module, "rowproxy_reconstructor"); Py_DECREF(module); if (reconstructor == NULL) { Py_DECREF(state); return NULL; } cls = PyObject_GetAttrString(self, "__class__"); if (cls == NULL) { Py_DECREF(reconstructor); Py_DECREF(state); return NULL; } return Py_BuildValue("(N(NN))", reconstructor, cls, state); } static void BaseRowProxy_dealloc(BaseRowProxy *self) { Py_XDECREF(self->parent); Py_XDECREF(self->row); Py_XDECREF(self->processors); Py_XDECREF(self->keymap); self->ob_type->tp_free((PyObject *)self); } static PyObject * BaseRowProxy_processvalues(PyObject *values, PyObject *processors, int astuple) { Py_ssize_t num_values, num_processors; PyObject **valueptr, **funcptr, **resultptr; PyObject *func, *result, *processed_value, *values_fastseq; num_values = PySequence_Length(values); num_processors = PyList_Size(processors); if (num_values != num_processors) { PyErr_Format(PyExc_RuntimeError, "number of values in row (%d) differ from number of column " "processors (%d)", (int)num_values, (int)num_processors); return NULL; } if (astuple) { result = PyTuple_New(num_values); } else { result = PyList_New(num_values); } if (result == NULL) return NULL; values_fastseq = PySequence_Fast(values, "row must be a sequence"); if (values_fastseq == NULL) return NULL; valueptr = PySequence_Fast_ITEMS(values_fastseq); funcptr = PySequence_Fast_ITEMS(processors); resultptr = PySequence_Fast_ITEMS(result); while (--num_values >= 0) { func = *funcptr; if (func != Py_None) { processed_value = PyObject_CallFunctionObjArgs(func, *valueptr, NULL); if (processed_value == NULL) { Py_DECREF(values_fastseq); Py_DECREF(result); return NULL; } *resultptr = processed_value; } else { Py_INCREF(*valueptr); *resultptr = *valueptr; } valueptr++; funcptr++; resultptr++; } Py_DECREF(values_fastseq); return result; } static PyListObject * BaseRowProxy_values(BaseRowProxy *self) { return (PyListObject *)BaseRowProxy_processvalues(self->row, self->processors, 0); } static PyObject * BaseRowProxy_iter(BaseRowProxy *self) { PyObject *values, *result; values = BaseRowProxy_processvalues(self->row, self->processors, 1); if (values == NULL) return NULL; result = PyObject_GetIter(values); Py_DECREF(values); if (result == NULL) return NULL; return result; } static Py_ssize_t BaseRowProxy_length(BaseRowProxy *self) { return PySequence_Length(self->row); } static PyObject * BaseRowProxy_subscript(BaseRowProxy *self, PyObject *key) { PyObject *processors, *values; PyObject *processor, *value, *processed_value; PyObject *row, *record, *result, *indexobject; PyObject *exc_module, *exception, *cstr_obj; char *cstr_key; long index; int key_fallback = 0; int tuple_check = 0; if (PyInt_CheckExact(key)) { index = PyInt_AS_LONG(key); } else if (PyLong_CheckExact(key)) { index = PyLong_AsLong(key); if ((index == -1) && PyErr_Occurred()) /* -1 can be either the actual value, or an error flag. */ return NULL; } else if (PySlice_Check(key)) { values = PyObject_GetItem(self->row, key); if (values == NULL) return NULL; processors = PyObject_GetItem(self->processors, key); if (processors == NULL) { Py_DECREF(values); return NULL; } result = BaseRowProxy_processvalues(values, processors, 1); Py_DECREF(values); Py_DECREF(processors); return result; } else { record = PyDict_GetItem((PyObject *)self->keymap, key); if (record == NULL) { record = PyObject_CallMethod(self->parent, "_key_fallback", "O", key); if (record == NULL) return NULL; key_fallback = 1; } indexobject = PyTuple_GetItem(record, 2); if (indexobject == NULL) return NULL; if (key_fallback) { Py_DECREF(record); } if (indexobject == Py_None) { exc_module = PyImport_ImportModule("sqlalchemy.exc"); if (exc_module == NULL) return NULL; exception = PyObject_GetAttrString(exc_module, "InvalidRequestError"); Py_DECREF(exc_module); if (exception == NULL) return NULL; // wow. this seems quite excessive. cstr_obj = PyObject_Str(key); if (cstr_obj == NULL) return NULL; cstr_key = PyString_AsString(cstr_obj); if (cstr_key == NULL) { Py_DECREF(cstr_obj); return NULL; } Py_DECREF(cstr_obj); PyErr_Format(exception, "Ambiguous column name '%.200s' in result set! " "try 'use_labels' option on select statement.", cstr_key); return NULL; } index = PyInt_AsLong(indexobject); if ((index == -1) && PyErr_Occurred()) /* -1 can be either the actual value, or an error flag. */ return NULL; } processor = PyList_GetItem(self->processors, index); if (processor == NULL) return NULL; row = self->row; if (PyTuple_CheckExact(row)) { value = PyTuple_GetItem(row, index); tuple_check = 1; } else { value = PySequence_GetItem(row, index); tuple_check = 0; } if (value == NULL) return NULL; if (processor != Py_None) { processed_value = PyObject_CallFunctionObjArgs(processor, value, NULL); if (!tuple_check) { Py_DECREF(value); } return processed_value; } else { if (tuple_check) { Py_INCREF(value); } return value; } } static PyObject * BaseRowProxy_getitem(PyObject *self, Py_ssize_t i) { return BaseRowProxy_subscript((BaseRowProxy*)self, PyInt_FromSsize_t(i)); } static PyObject * BaseRowProxy_getattro(BaseRowProxy *self, PyObject *name) { PyObject *tmp; if (!(tmp = PyObject_GenericGetAttr((PyObject *)self, name))) { if (!PyErr_ExceptionMatches(PyExc_AttributeError)) return NULL; PyErr_Clear(); } else return tmp; tmp = BaseRowProxy_subscript(self, name); if (tmp == NULL && PyErr_ExceptionMatches(PyExc_KeyError)) { PyErr_Format( PyExc_AttributeError, "Could not locate column in row for column '%.200s'", PyString_AsString(name) ); return NULL; } return tmp; } /*********************** * getters and setters * ***********************/ static PyObject * BaseRowProxy_getparent(BaseRowProxy *self, void *closure) { Py_INCREF(self->parent); return self->parent; } static int BaseRowProxy_setparent(BaseRowProxy *self, PyObject *value, void *closure) { PyObject *module, *cls; if (value == NULL) { PyErr_SetString(PyExc_TypeError, "Cannot delete the 'parent' attribute"); return -1; } module = PyImport_ImportModule("sqlalchemy.engine.result"); if (module == NULL) return -1; cls = PyObject_GetAttrString(module, "ResultMetaData"); Py_DECREF(module); if (cls == NULL) return -1; if (PyObject_IsInstance(value, cls) != 1) { PyErr_SetString(PyExc_TypeError, "The 'parent' attribute value must be an instance of " "ResultMetaData"); return -1; } Py_DECREF(cls); Py_XDECREF(self->parent); Py_INCREF(value); self->parent = value; return 0; } static PyObject * BaseRowProxy_getrow(BaseRowProxy *self, void *closure) { Py_INCREF(self->row); return self->row; } static int BaseRowProxy_setrow(BaseRowProxy *self, PyObject *value, void *closure) { if (value == NULL) { PyErr_SetString(PyExc_TypeError, "Cannot delete the 'row' attribute"); return -1; } if (!PySequence_Check(value)) { PyErr_SetString(PyExc_TypeError, "The 'row' attribute value must be a sequence"); return -1; } Py_XDECREF(self->row); Py_INCREF(value); self->row = value; return 0; } static PyObject * BaseRowProxy_getprocessors(BaseRowProxy *self, void *closure) { Py_INCREF(self->processors); return self->processors; } static int BaseRowProxy_setprocessors(BaseRowProxy *self, PyObject *value, void *closure) { if (value == NULL) { PyErr_SetString(PyExc_TypeError, "Cannot delete the 'processors' attribute"); return -1; } if (!PyList_CheckExact(value)) { PyErr_SetString(PyExc_TypeError, "The 'processors' attribute value must be a list"); return -1; } Py_XDECREF(self->processors); Py_INCREF(value); self->processors = value; return 0; } static PyObject * BaseRowProxy_getkeymap(BaseRowProxy *self, void *closure) { Py_INCREF(self->keymap); return self->keymap; } static int BaseRowProxy_setkeymap(BaseRowProxy *self, PyObject *value, void *closure) { if (value == NULL) { PyErr_SetString(PyExc_TypeError, "Cannot delete the 'keymap' attribute"); return -1; } if (!PyDict_CheckExact(value)) { PyErr_SetString(PyExc_TypeError, "The 'keymap' attribute value must be a dict"); return -1; } Py_XDECREF(self->keymap); Py_INCREF(value); self->keymap = value; return 0; } static PyGetSetDef BaseRowProxy_getseters[] = { {"_parent", (getter)BaseRowProxy_getparent, (setter)BaseRowProxy_setparent, "ResultMetaData", NULL}, {"_row", (getter)BaseRowProxy_getrow, (setter)BaseRowProxy_setrow, "Original row tuple", NULL}, {"_processors", (getter)BaseRowProxy_getprocessors, (setter)BaseRowProxy_setprocessors, "list of type processors", NULL}, {"_keymap", (getter)BaseRowProxy_getkeymap, (setter)BaseRowProxy_setkeymap, "Key to (processor, index) dict", NULL}, {NULL} }; static PyMethodDef BaseRowProxy_methods[] = { {"values", (PyCFunction)BaseRowProxy_values, METH_NOARGS, "Return the values represented by this BaseRowProxy as a list."}, {"__reduce__", (PyCFunction)BaseRowProxy_reduce, METH_NOARGS, "Pickle support method."}, {NULL} /* Sentinel */ }; static PySequenceMethods BaseRowProxy_as_sequence = { (lenfunc)BaseRowProxy_length, /* sq_length */ 0, /* sq_concat */ 0, /* sq_repeat */ (ssizeargfunc)BaseRowProxy_getitem, /* sq_item */ 0, /* sq_slice */ 0, /* sq_ass_item */ 0, /* sq_ass_slice */ 0, /* sq_contains */ 0, /* sq_inplace_concat */ 0, /* sq_inplace_repeat */ }; static PyMappingMethods BaseRowProxy_as_mapping = { (lenfunc)BaseRowProxy_length, /* mp_length */ (binaryfunc)BaseRowProxy_subscript, /* mp_subscript */ 0 /* mp_ass_subscript */ }; static PyTypeObject BaseRowProxyType = { PyObject_HEAD_INIT(NULL) 0, /* ob_size */ "sqlalchemy.cresultproxy.BaseRowProxy", /* tp_name */ sizeof(BaseRowProxy), /* tp_basicsize */ 0, /* tp_itemsize */ (destructor)BaseRowProxy_dealloc, /* tp_dealloc */ 0, /* tp_print */ 0, /* tp_getattr */ 0, /* tp_setattr */ 0, /* tp_compare */ 0, /* tp_repr */ 0, /* tp_as_number */ &BaseRowProxy_as_sequence, /* tp_as_sequence */ &BaseRowProxy_as_mapping, /* tp_as_mapping */ 0, /* tp_hash */ 0, /* tp_call */ 0, /* tp_str */ (getattrofunc)BaseRowProxy_getattro,/* tp_getattro */ 0, /* tp_setattro */ 0, /* tp_as_buffer */ Py_TPFLAGS_DEFAULT | Py_TPFLAGS_BASETYPE, /* tp_flags */ "BaseRowProxy is a abstract base class for RowProxy", /* tp_doc */ 0, /* tp_traverse */ 0, /* tp_clear */ 0, /* tp_richcompare */ 0, /* tp_weaklistoffset */ (getiterfunc)BaseRowProxy_iter, /* tp_iter */ 0, /* tp_iternext */ BaseRowProxy_methods, /* tp_methods */ 0, /* tp_members */ BaseRowProxy_getseters, /* tp_getset */ 0, /* tp_base */ 0, /* tp_dict */ 0, /* tp_descr_get */ 0, /* tp_descr_set */ 0, /* tp_dictoffset */ (initproc)BaseRowProxy_init, /* tp_init */ 0, /* tp_alloc */ 0 /* tp_new */ }; #ifndef PyMODINIT_FUNC /* declarations for DLL import/export */ #define PyMODINIT_FUNC void #endif static PyMethodDef module_methods[] = { {"safe_rowproxy_reconstructor", safe_rowproxy_reconstructor, METH_VARARGS, "reconstruct a RowProxy instance from its pickled form."}, {NULL, NULL, 0, NULL} /* Sentinel */ }; PyMODINIT_FUNC initcresultproxy(void) { PyObject *m; BaseRowProxyType.tp_new = PyType_GenericNew; if (PyType_Ready(&BaseRowProxyType) < 0) return; m = Py_InitModule3("cresultproxy", module_methods, "Module containing C versions of core ResultProxy classes."); if (m == NULL) return; Py_INCREF(&BaseRowProxyType); PyModule_AddObject(m, "BaseRowProxy", (PyObject *)&BaseRowProxyType); } SQLAlchemy-0.8.4/lib/sqlalchemy/cextension/utils.c0000644000076500000240000001130612251150015022615 0ustar classicstaff00000000000000/* utils.c Copyright (C) 2012-2013 the SQLAlchemy authors and contributors This module is part of SQLAlchemy and is released under the MIT License: http://www.opensource.org/licenses/mit-license.php */ #include /* Given arguments from the calling form *multiparams, **params, return a list of bind parameter structures, usually a list of dictionaries. In the case of 'raw' execution which accepts positional parameters, it may be a list of tuples or lists. */ static PyObject * distill_params(PyObject *self, PyObject *args) { PyObject *multiparams, *params; PyObject *enclosing_list, *double_enclosing_list; PyObject *zero_element, *zero_element_item; Py_ssize_t multiparam_size, zero_element_length; if (!PyArg_UnpackTuple(args, "_distill_params", 2, 2, &multiparams, ¶ms)) { return NULL; } if (multiparams != Py_None) { multiparam_size = PyTuple_Size(multiparams); if (multiparam_size < 0) { return NULL; } } else { multiparam_size = 0; } if (multiparam_size == 0) { if (params != Py_None && PyDict_Size(params) != 0) { enclosing_list = PyList_New(1); if (enclosing_list == NULL) { return NULL; } Py_INCREF(params); if (PyList_SetItem(enclosing_list, 0, params) == -1) { Py_DECREF(params); Py_DECREF(enclosing_list); return NULL; } } else { enclosing_list = PyList_New(0); if (enclosing_list == NULL) { return NULL; } } return enclosing_list; } else if (multiparam_size == 1) { zero_element = PyTuple_GetItem(multiparams, 0); if (PyTuple_Check(zero_element) || PyList_Check(zero_element)) { zero_element_length = PySequence_Length(zero_element); if (zero_element_length != 0) { zero_element_item = PySequence_GetItem(zero_element, 0); if (zero_element_item == NULL) { return NULL; } } else { zero_element_item = NULL; } if (zero_element_length == 0 || ( PyObject_HasAttrString(zero_element_item, "__iter__") && !PyObject_HasAttrString(zero_element_item, "strip") ) ) { /* * execute(stmt, [{}, {}, {}, ...]) * execute(stmt, [(), (), (), ...]) */ Py_XDECREF(zero_element_item); Py_INCREF(zero_element); return zero_element; } else { /* * execute(stmt, ("value", "value")) */ Py_XDECREF(zero_element_item); enclosing_list = PyList_New(1); if (enclosing_list == NULL) { return NULL; } Py_INCREF(zero_element); if (PyList_SetItem(enclosing_list, 0, zero_element) == -1) { Py_DECREF(zero_element); Py_DECREF(enclosing_list); return NULL; } return enclosing_list; } } else if (PyObject_HasAttrString(zero_element, "keys")) { /* * execute(stmt, {"key":"value"}) */ enclosing_list = PyList_New(1); if (enclosing_list == NULL) { return NULL; } Py_INCREF(zero_element); if (PyList_SetItem(enclosing_list, 0, zero_element) == -1) { Py_DECREF(zero_element); Py_DECREF(enclosing_list); return NULL; } return enclosing_list; } else { enclosing_list = PyList_New(1); if (enclosing_list == NULL) { return NULL; } double_enclosing_list = PyList_New(1); if (double_enclosing_list == NULL) { Py_DECREF(enclosing_list); return NULL; } Py_INCREF(zero_element); if (PyList_SetItem(enclosing_list, 0, zero_element) == -1) { Py_DECREF(zero_element); Py_DECREF(enclosing_list); Py_DECREF(double_enclosing_list); return NULL; } if (PyList_SetItem(double_enclosing_list, 0, enclosing_list) == -1) { Py_DECREF(zero_element); Py_DECREF(enclosing_list); Py_DECREF(double_enclosing_list); return NULL; } return double_enclosing_list; } } else { zero_element = PyTuple_GetItem(multiparams, 0); if (PyObject_HasAttrString(zero_element, "__iter__") && !PyObject_HasAttrString(zero_element, "strip") ) { Py_INCREF(multiparams); return multiparams; } else { enclosing_list = PyList_New(1); if (enclosing_list == NULL) { return NULL; } Py_INCREF(multiparams); if (PyList_SetItem(enclosing_list, 0, multiparams) == -1) { Py_DECREF(multiparams); Py_DECREF(enclosing_list); return NULL; } return enclosing_list; } } } #ifndef PyMODINIT_FUNC /* declarations for DLL import/export */ #define PyMODINIT_FUNC void #endif static PyMethodDef module_methods[] = { {"_distill_params", distill_params, METH_VARARGS, "Distill an execute() parameter structure."}, {NULL, NULL, 0, NULL} /* Sentinel */ }; PyMODINIT_FUNC initcutils(void) { PyObject *m; m = Py_InitModule3("cutils", module_methods, "Internal utility functions."); if (m == NULL) return; } SQLAlchemy-0.8.4/lib/sqlalchemy/connectors/0000755000076500000240000000000012251151573021320 5ustar classicstaff00000000000000SQLAlchemy-0.8.4/lib/sqlalchemy/connectors/__init__.py0000644000076500000240000000042412251147171023430 0ustar classicstaff00000000000000# connectors/__init__.py # Copyright (C) 2005-2013 the SQLAlchemy authors and contributors # # This module is part of SQLAlchemy and is released under # the MIT License: http://www.opensource.org/licenses/mit-license.php class Connector(object): pass SQLAlchemy-0.8.4/lib/sqlalchemy/connectors/mxodbc.py0000644000076500000240000001236212251150015023140 0ustar classicstaff00000000000000# connectors/mxodbc.py # Copyright (C) 2005-2013 the SQLAlchemy authors and contributors # # This module is part of SQLAlchemy and is released under # the MIT License: http://www.opensource.org/licenses/mit-license.php """ Provide an SQLALchemy connector for the eGenix mxODBC commercial Python adapter for ODBC. This is not a free product, but eGenix provides SQLAlchemy with a license for use in continuous integration testing. This has been tested for use with mxODBC 3.1.2 on SQL Server 2005 and 2008, using the SQL Server Native driver. However, it is possible for this to be used on other database platforms. For more info on mxODBC, see http://www.egenix.com/ """ import sys import re import warnings from . import Connector class MxODBCConnector(Connector): driver = 'mxodbc' supports_sane_multi_rowcount = False supports_unicode_statements = True supports_unicode_binds = True supports_native_decimal = True @classmethod def dbapi(cls): # this classmethod will normally be replaced by an instance # attribute of the same name, so this is normally only called once. cls._load_mx_exceptions() platform = sys.platform if platform == 'win32': from mx.ODBC import Windows as module # this can be the string "linux2", and possibly others elif 'linux' in platform: from mx.ODBC import unixODBC as module elif platform == 'darwin': from mx.ODBC import iODBC as module else: raise ImportError("Unrecognized platform for mxODBC import") return module @classmethod def _load_mx_exceptions(cls): """ Import mxODBC exception classes into the module namespace, as if they had been imported normally. This is done here to avoid requiring all SQLAlchemy users to install mxODBC. """ global InterfaceError, ProgrammingError from mx.ODBC import InterfaceError from mx.ODBC import ProgrammingError def on_connect(self): def connect(conn): conn.stringformat = self.dbapi.MIXED_STRINGFORMAT conn.datetimeformat = self.dbapi.PYDATETIME_DATETIMEFORMAT conn.decimalformat = self.dbapi.DECIMAL_DECIMALFORMAT conn.errorhandler = self._error_handler() return connect def _error_handler(self): """ Return a handler that adjusts mxODBC's raised Warnings to emit Python standard warnings. """ from mx.ODBC.Error import Warning as MxOdbcWarning def error_handler(connection, cursor, errorclass, errorvalue): if issubclass(errorclass, MxOdbcWarning): errorclass.__bases__ = (Warning,) warnings.warn(message=str(errorvalue), category=errorclass, stacklevel=2) else: raise errorclass, errorvalue return error_handler def create_connect_args(self, url): """ Return a tuple of *args,**kwargs for creating a connection. The mxODBC 3.x connection constructor looks like this: connect(dsn, user='', password='', clear_auto_commit=1, errorhandler=None) This method translates the values in the provided uri into args and kwargs needed to instantiate an mxODBC Connection. The arg 'errorhandler' is not used by SQLAlchemy and will not be populated. """ opts = url.translate_connect_args(username='user') opts.update(url.query) args = opts.pop('host') opts.pop('port', None) opts.pop('database', None) return (args,), opts def is_disconnect(self, e, connection, cursor): # TODO: eGenix recommends checking connection.closed here # Does that detect dropped connections ? if isinstance(e, self.dbapi.ProgrammingError): return "connection already closed" in str(e) elif isinstance(e, self.dbapi.Error): return '[08S01]' in str(e) else: return False def _get_server_version_info(self, connection): # eGenix suggests using conn.dbms_version instead # of what we're doing here dbapi_con = connection.connection version = [] r = re.compile('[.\-]') # 18 == pyodbc.SQL_DBMS_VER for n in r.split(dbapi_con.getinfo(18)[1]): try: version.append(int(n)) except ValueError: version.append(n) return tuple(version) def _get_direct(self, context): if context: native_odbc_execute = context.execution_options.\ get('native_odbc_execute', 'auto') # default to direct=True in all cases, is more generally # compatible especially with SQL Server return False if native_odbc_execute is True else True else: return True def do_executemany(self, cursor, statement, parameters, context=None): cursor.executemany( statement, parameters, direct=self._get_direct(context)) def do_execute(self, cursor, statement, parameters, context=None): cursor.execute(statement, parameters, direct=self._get_direct(context)) SQLAlchemy-0.8.4/lib/sqlalchemy/connectors/mysqldb.py0000644000076500000240000001266412251150015023344 0ustar classicstaff00000000000000"""Define behaviors common to MySQLdb dialects. Currently includes MySQL and Drizzle. """ from . import Connector from ..engine import base as engine_base, default from ..sql import operators as sql_operators from .. import exc, log, schema, sql, types as sqltypes, util, processors import re # the subclassing of Connector by all classes # here is not strictly necessary class MySQLDBExecutionContext(Connector): @property def rowcount(self): if hasattr(self, '_rowcount'): return self._rowcount else: return self.cursor.rowcount class MySQLDBCompiler(Connector): def visit_mod_binary(self, binary, operator, **kw): return self.process(binary.left, **kw) + " %% " + \ self.process(binary.right, **kw) def post_process_text(self, text): return text.replace('%', '%%') class MySQLDBIdentifierPreparer(Connector): def _escape_identifier(self, value): value = value.replace(self.escape_quote, self.escape_to_quote) return value.replace("%", "%%") class MySQLDBConnector(Connector): driver = 'mysqldb' supports_unicode_statements = False supports_sane_rowcount = True supports_sane_multi_rowcount = True supports_native_decimal = True default_paramstyle = 'format' @classmethod def dbapi(cls): # is overridden when pymysql is used return __import__('MySQLdb') def do_executemany(self, cursor, statement, parameters, context=None): rowcount = cursor.executemany(statement, parameters) if context is not None: context._rowcount = rowcount def create_connect_args(self, url): opts = url.translate_connect_args(database='db', username='user', password='passwd') opts.update(url.query) util.coerce_kw_type(opts, 'compress', bool) util.coerce_kw_type(opts, 'connect_timeout', int) util.coerce_kw_type(opts, 'read_timeout', int) util.coerce_kw_type(opts, 'client_flag', int) util.coerce_kw_type(opts, 'local_infile', int) # Note: using either of the below will cause all strings to be returned # as Unicode, both in raw SQL operations and with column types like # String and MSString. util.coerce_kw_type(opts, 'use_unicode', bool) util.coerce_kw_type(opts, 'charset', str) # Rich values 'cursorclass' and 'conv' are not supported via # query string. ssl = {} keys = ['ssl_ca', 'ssl_key', 'ssl_cert', 'ssl_capath', 'ssl_cipher'] for key in keys: if key in opts: ssl[key[4:]] = opts[key] util.coerce_kw_type(ssl, key[4:], str) del opts[key] if ssl: opts['ssl'] = ssl # FOUND_ROWS must be set in CLIENT_FLAGS to enable # supports_sane_rowcount. client_flag = opts.get('client_flag', 0) if self.dbapi is not None: try: CLIENT_FLAGS = __import__( self.dbapi.__name__ + '.constants.CLIENT' ).constants.CLIENT client_flag |= CLIENT_FLAGS.FOUND_ROWS except (AttributeError, ImportError): self.supports_sane_rowcount = False opts['client_flag'] = client_flag return [[], opts] def _get_server_version_info(self, connection): dbapi_con = connection.connection version = [] r = re.compile('[.\-]') for n in r.split(dbapi_con.get_server_info()): try: version.append(int(n)) except ValueError: version.append(n) return tuple(version) def _extract_error_code(self, exception): return exception.args[0] def _detect_charset(self, connection): """Sniff out the character set in use for connection results.""" # Note: MySQL-python 1.2.1c7 seems to ignore changes made # on a connection via set_character_set() if self.server_version_info < (4, 1, 0): try: return connection.connection.character_set_name() except AttributeError: # < 1.2.1 final MySQL-python drivers have no charset support. # a query is needed. pass # Prefer 'character_set_results' for the current connection over the # value in the driver. SET NAMES or individual variable SETs will # change the charset without updating the driver's view of the world. # # If it's decided that issuing that sort of SQL leaves you SOL, then # this can prefer the driver value. rs = connection.execute("SHOW VARIABLES LIKE 'character_set%%'") opts = dict([(row[0], row[1]) for row in self._compat_fetchall(rs)]) if 'character_set_results' in opts: return opts['character_set_results'] try: return connection.connection.character_set_name() except AttributeError: # Still no charset on < 1.2.1 final... if 'character_set' in opts: return opts['character_set'] else: util.warn( "Could not detect the connection character set with this " "combination of MySQL server and MySQL-python. " "MySQL-python >= 1.2.2 is recommended. Assuming latin1.") return 'latin1' SQLAlchemy-0.8.4/lib/sqlalchemy/connectors/pyodbc.py0000644000076500000240000001342312251150015023143 0ustar classicstaff00000000000000# connectors/pyodbc.py # Copyright (C) 2005-2013 the SQLAlchemy authors and contributors # # This module is part of SQLAlchemy and is released under # the MIT License: http://www.opensource.org/licenses/mit-license.php from . import Connector from ..util import asbool import sys import re import urllib class PyODBCConnector(Connector): driver = 'pyodbc' supports_sane_multi_rowcount = False # PyODBC unicode is broken on UCS-4 builds supports_unicode = sys.maxunicode == 65535 supports_unicode_statements = supports_unicode supports_native_decimal = True default_paramstyle = 'named' # for non-DSN connections, this should # hold the desired driver name pyodbc_driver_name = None # will be set to True after initialize() # if the freetds.so is detected freetds = False # will be set to the string version of # the FreeTDS driver if freetds is detected freetds_driver_version = None # will be set to True after initialize() # if the libessqlsrv.so is detected easysoft = False def __init__(self, supports_unicode_binds=None, **kw): super(PyODBCConnector, self).__init__(**kw) self._user_supports_unicode_binds = supports_unicode_binds @classmethod def dbapi(cls): return __import__('pyodbc') def create_connect_args(self, url): opts = url.translate_connect_args(username='user') opts.update(url.query) keys = opts query = url.query connect_args = {} for param in ('ansi', 'unicode_results', 'autocommit'): if param in keys: connect_args[param] = asbool(keys.pop(param)) if 'odbc_connect' in keys: connectors = [urllib.unquote_plus(keys.pop('odbc_connect'))] else: dsn_connection = 'dsn' in keys or \ ('host' in keys and 'database' not in keys) if dsn_connection: connectors = ['dsn=%s' % (keys.pop('host', '') or \ keys.pop('dsn', ''))] else: port = '' if 'port' in keys and not 'port' in query: port = ',%d' % int(keys.pop('port')) connectors = ["DRIVER={%s}" % keys.pop('driver', self.pyodbc_driver_name), 'Server=%s%s' % (keys.pop('host', ''), port), 'Database=%s' % keys.pop('database', '')] user = keys.pop("user", None) if user: connectors.append("UID=%s" % user) connectors.append("PWD=%s" % keys.pop('password', '')) else: connectors.append("Trusted_Connection=Yes") # if set to 'Yes', the ODBC layer will try to automagically # convert textual data from your database encoding to your # client encoding. This should obviously be set to 'No' if # you query a cp1253 encoded database from a latin1 client... if 'odbc_autotranslate' in keys: connectors.append("AutoTranslate=%s" % keys.pop("odbc_autotranslate")) connectors.extend(['%s=%s' % (k, v) for k, v in keys.iteritems()]) return [[";".join(connectors)], connect_args] def is_disconnect(self, e, connection, cursor): if isinstance(e, self.dbapi.ProgrammingError): return "The cursor's connection has been closed." in str(e) or \ 'Attempt to use a closed connection.' in str(e) elif isinstance(e, self.dbapi.Error): return '[08S01]' in str(e) else: return False def initialize(self, connection): # determine FreeTDS first. can't issue SQL easily # without getting unicode_statements/binds set up. pyodbc = self.dbapi dbapi_con = connection.connection _sql_driver_name = dbapi_con.getinfo(pyodbc.SQL_DRIVER_NAME) self.freetds = bool(re.match(r".*libtdsodbc.*\.so", _sql_driver_name )) self.easysoft = bool(re.match(r".*libessqlsrv.*\.so", _sql_driver_name )) if self.freetds: self.freetds_driver_version = dbapi_con.getinfo( pyodbc.SQL_DRIVER_VER) # the "Py2K only" part here is theoretical. # have not tried pyodbc + python3.1 yet. # Py2K self.supports_unicode_statements = ( not self.freetds and not self.easysoft) if self._user_supports_unicode_binds is not None: self.supports_unicode_binds = self._user_supports_unicode_binds else: self.supports_unicode_binds = ( not self.freetds or self.freetds_driver_version >= '0.91' ) and not self.easysoft # end Py2K # run other initialization which asks for user name, etc. super(PyODBCConnector, self).initialize(connection) def _dbapi_version(self): if not self.dbapi: return () return self._parse_dbapi_version(self.dbapi.version) def _parse_dbapi_version(self, vers): m = re.match( r'(?:py.*-)?([\d\.]+)(?:-(\w+))?', vers ) if not m: return () vers = tuple([int(x) for x in m.group(1).split(".")]) if m.group(2): vers += (m.group(2),) return vers def _get_server_version_info(self, connection): dbapi_con = connection.connection version = [] r = re.compile('[.\-]') for n in r.split(dbapi_con.getinfo(self.dbapi.SQL_DBMS_VER)): try: version.append(int(n)) except ValueError: version.append(n) return tuple(version) SQLAlchemy-0.8.4/lib/sqlalchemy/connectors/zxJDBC.py0000644000076500000240000000353212251147171022760 0ustar classicstaff00000000000000# connectors/zxJDBC.py # Copyright (C) 2005-2013 the SQLAlchemy authors and contributors # # This module is part of SQLAlchemy and is released under # the MIT License: http://www.opensource.org/licenses/mit-license.php import sys from . import Connector class ZxJDBCConnector(Connector): driver = 'zxjdbc' supports_sane_rowcount = False supports_sane_multi_rowcount = False supports_unicode_binds = True supports_unicode_statements = sys.version > '2.5.0+' description_encoding = None default_paramstyle = 'qmark' jdbc_db_name = None jdbc_driver_name = None @classmethod def dbapi(cls): from com.ziclix.python.sql import zxJDBC return zxJDBC def _driver_kwargs(self): """Return kw arg dict to be sent to connect().""" return {} def _create_jdbc_url(self, url): """Create a JDBC url from a :class:`~sqlalchemy.engine.url.URL`""" return 'jdbc:%s://%s%s/%s' % (self.jdbc_db_name, url.host, url.port is not None and ':%s' % url.port or '', url.database) def create_connect_args(self, url): opts = self._driver_kwargs() opts.update(url.query) return [ [self._create_jdbc_url(url), url.username, url.password, self.jdbc_driver_name], opts] def is_disconnect(self, e, connection, cursor): if not isinstance(e, self.dbapi.ProgrammingError): return False e = str(e) return 'connection is closed' in e or 'cursor is closed' in e def _get_server_version_info(self, connection): # use connection.connection.dbversion, and parse appropriately # to get a tuple raise NotImplementedError() SQLAlchemy-0.8.4/lib/sqlalchemy/databases/0000755000076500000240000000000012251151573021072 5ustar classicstaff00000000000000SQLAlchemy-0.8.4/lib/sqlalchemy/databases/__init__.py0000644000076500000240000000166212251150015023176 0ustar classicstaff00000000000000# databases/__init__.py # Copyright (C) 2005-2013 the SQLAlchemy authors and contributors # # This module is part of SQLAlchemy and is released under # the MIT License: http://www.opensource.org/licenses/mit-license.php """Include imports from the sqlalchemy.dialects package for backwards compatibility with pre 0.6 versions. """ from ..dialects.sqlite import base as sqlite from ..dialects.postgresql import base as postgresql postgres = postgresql from ..dialects.mysql import base as mysql from ..dialects.drizzle import base as drizzle from ..dialects.oracle import base as oracle from ..dialects.firebird import base as firebird from ..dialects.informix import base as informix from ..dialects.mssql import base as mssql from ..dialects.sybase import base as sybase __all__ = ( 'drizzle', 'firebird', 'informix', 'mssql', 'mysql', 'postgresql', 'sqlite', 'oracle', 'sybase', ) SQLAlchemy-0.8.4/lib/sqlalchemy/dialects/0000755000076500000240000000000012251151573020733 5ustar classicstaff00000000000000SQLAlchemy-0.8.4/lib/sqlalchemy/dialects/__init__.py0000644000076500000240000000202512251150015023031 0ustar classicstaff00000000000000# dialects/__init__.py # Copyright (C) 2005-2013 the SQLAlchemy authors and contributors # # This module is part of SQLAlchemy and is released under # the MIT License: http://www.opensource.org/licenses/mit-license.php __all__ = ( 'drizzle', 'firebird', # 'informix', 'mssql', 'mysql', 'oracle', 'postgresql', 'sqlite', 'sybase', ) from .. import util def _auto_fn(name): """default dialect importer. plugs into the :class:`.PluginLoader` as a first-hit system. """ if "." in name: dialect, driver = name.split(".") else: dialect = name driver = "base" try: module = __import__('sqlalchemy.dialects.%s' % (dialect, )).dialects except ImportError: return None module = getattr(module, dialect) if hasattr(module, driver): module = getattr(module, driver) return lambda: module.dialect else: return None registry = util.PluginLoader("sqlalchemy.dialects", auto_fn=_auto_fn) SQLAlchemy-0.8.4/lib/sqlalchemy/dialects/drizzle/0000755000076500000240000000000012251151573022416 5ustar classicstaff00000000000000SQLAlchemy-0.8.4/lib/sqlalchemy/dialects/drizzle/__init__.py0000644000076500000240000000107512251147171024531 0ustar classicstaff00000000000000from sqlalchemy.dialects.drizzle import base, mysqldb base.dialect = mysqldb.dialect from sqlalchemy.dialects.drizzle.base import \ BIGINT, BINARY, BLOB, \ BOOLEAN, CHAR, DATE, \ DATETIME, DECIMAL, DOUBLE, \ ENUM, FLOAT, INTEGER, \ NUMERIC, REAL, TEXT, \ TIME, TIMESTAMP, VARBINARY, \ VARCHAR, dialect __all__ = ( 'BIGINT', 'BINARY', 'BLOB', 'BOOLEAN', 'CHAR', 'DATE', 'DATETIME', 'DECIMAL', 'DOUBLE', 'ENUM', 'FLOAT', 'INTEGER', 'NUMERIC', 'REAL', 'TEXT', 'TIME', 'TIMESTAMP', 'VARBINARY', 'VARCHAR', 'dialect' ) SQLAlchemy-0.8.4/lib/sqlalchemy/dialects/drizzle/base.py0000644000076500000240000003524012251150015023674 0ustar classicstaff00000000000000# drizzle/base.py # Copyright (C) 2005-2013 the SQLAlchemy authors and contributors # Copyright (C) 2010-2011 Monty Taylor # # This module is part of SQLAlchemy and is released under # the MIT License: http://www.opensource.org/licenses/mit-license.php """ .. dialect:: drizzle :name: Drizzle Drizzle is a variant of MySQL. Unlike MySQL, Drizzle's default storage engine is InnoDB (transactions, foreign-keys) rather than MyISAM. For more `Notable Differences `_, visit the `Drizzle Documentation `_. The SQLAlchemy Drizzle dialect leans heavily on the MySQL dialect, so much of the :doc:`SQLAlchemy MySQL ` documentation is also relevant. """ from sqlalchemy import exc from sqlalchemy import log from sqlalchemy import types as sqltypes from sqlalchemy.engine import reflection from sqlalchemy.dialects.mysql import base as mysql_dialect from sqlalchemy.types import DATE, DATETIME, BOOLEAN, TIME, \ BLOB, BINARY, VARBINARY class _NumericType(object): """Base for Drizzle numeric types.""" def __init__(self, **kw): super(_NumericType, self).__init__(**kw) class _FloatType(_NumericType, sqltypes.Float): def __init__(self, precision=None, scale=None, asdecimal=True, **kw): if isinstance(self, (REAL, DOUBLE)) and \ ( (precision is None and scale is not None) or (precision is not None and scale is None) ): raise exc.ArgumentError( "You must specify both precision and scale or omit " "both altogether.") super(_FloatType, self).__init__(precision=precision, asdecimal=asdecimal, **kw) self.scale = scale class _StringType(mysql_dialect._StringType): """Base for Drizzle string types.""" def __init__(self, collation=None, binary=False, **kw): kw['national'] = False super(_StringType, self).__init__(collation=collation, binary=binary, **kw) class NUMERIC(_NumericType, sqltypes.NUMERIC): """Drizzle NUMERIC type.""" __visit_name__ = 'NUMERIC' def __init__(self, precision=None, scale=None, asdecimal=True, **kw): """Construct a NUMERIC. :param precision: Total digits in this number. If scale and precision are both None, values are stored to limits allowed by the server. :param scale: The number of digits after the decimal point. """ super(NUMERIC, self).__init__(precision=precision, scale=scale, asdecimal=asdecimal, **kw) class DECIMAL(_NumericType, sqltypes.DECIMAL): """Drizzle DECIMAL type.""" __visit_name__ = 'DECIMAL' def __init__(self, precision=None, scale=None, asdecimal=True, **kw): """Construct a DECIMAL. :param precision: Total digits in this number. If scale and precision are both None, values are stored to limits allowed by the server. :param scale: The number of digits after the decimal point. """ super(DECIMAL, self).__init__(precision=precision, scale=scale, asdecimal=asdecimal, **kw) class DOUBLE(_FloatType): """Drizzle DOUBLE type.""" __visit_name__ = 'DOUBLE' def __init__(self, precision=None, scale=None, asdecimal=True, **kw): """Construct a DOUBLE. :param precision: Total digits in this number. If scale and precision are both None, values are stored to limits allowed by the server. :param scale: The number of digits after the decimal point. """ super(DOUBLE, self).__init__(precision=precision, scale=scale, asdecimal=asdecimal, **kw) class REAL(_FloatType, sqltypes.REAL): """Drizzle REAL type.""" __visit_name__ = 'REAL' def __init__(self, precision=None, scale=None, asdecimal=True, **kw): """Construct a REAL. :param precision: Total digits in this number. If scale and precision are both None, values are stored to limits allowed by the server. :param scale: The number of digits after the decimal point. """ super(REAL, self).__init__(precision=precision, scale=scale, asdecimal=asdecimal, **kw) class FLOAT(_FloatType, sqltypes.FLOAT): """Drizzle FLOAT type.""" __visit_name__ = 'FLOAT' def __init__(self, precision=None, scale=None, asdecimal=False, **kw): """Construct a FLOAT. :param precision: Total digits in this number. If scale and precision are both None, values are stored to limits allowed by the server. :param scale: The number of digits after the decimal point. """ super(FLOAT, self).__init__(precision=precision, scale=scale, asdecimal=asdecimal, **kw) def bind_processor(self, dialect): return None class INTEGER(sqltypes.INTEGER): """Drizzle INTEGER type.""" __visit_name__ = 'INTEGER' def __init__(self, **kw): """Construct an INTEGER.""" super(INTEGER, self).__init__(**kw) class BIGINT(sqltypes.BIGINT): """Drizzle BIGINTEGER type.""" __visit_name__ = 'BIGINT' def __init__(self, **kw): """Construct a BIGINTEGER.""" super(BIGINT, self).__init__(**kw) class TIME(mysql_dialect.TIME): """Drizzle TIME type.""" class TIMESTAMP(sqltypes.TIMESTAMP): """Drizzle TIMESTAMP type.""" __visit_name__ = 'TIMESTAMP' class TEXT(_StringType, sqltypes.TEXT): """Drizzle TEXT type, for text up to 2^16 characters.""" __visit_name__ = 'TEXT' def __init__(self, length=None, **kw): """Construct a TEXT. :param length: Optional, if provided the server may optimize storage by substituting the smallest TEXT type sufficient to store ``length`` characters. :param collation: Optional, a column-level collation for this string value. Takes precedence to 'binary' short-hand. :param binary: Defaults to False: short-hand, pick the binary collation type that matches the column's character set. Generates BINARY in schema. This does not affect the type of data stored, only the collation of character data. """ super(TEXT, self).__init__(length=length, **kw) class VARCHAR(_StringType, sqltypes.VARCHAR): """Drizzle VARCHAR type, for variable-length character data.""" __visit_name__ = 'VARCHAR' def __init__(self, length=None, **kwargs): """Construct a VARCHAR. :param collation: Optional, a column-level collation for this string value. Takes precedence to 'binary' short-hand. :param binary: Defaults to False: short-hand, pick the binary collation type that matches the column's character set. Generates BINARY in schema. This does not affect the type of data stored, only the collation of character data. """ super(VARCHAR, self).__init__(length=length, **kwargs) class CHAR(_StringType, sqltypes.CHAR): """Drizzle CHAR type, for fixed-length character data.""" __visit_name__ = 'CHAR' def __init__(self, length=None, **kwargs): """Construct a CHAR. :param length: Maximum data length, in characters. :param binary: Optional, use the default binary collation for the national character set. This does not affect the type of data stored, use a BINARY type for binary data. :param collation: Optional, request a particular collation. Must be compatible with the national character set. """ super(CHAR, self).__init__(length=length, **kwargs) class ENUM(mysql_dialect.ENUM): """Drizzle ENUM type.""" def __init__(self, *enums, **kw): """Construct an ENUM. Example: Column('myenum', ENUM("foo", "bar", "baz")) :param enums: The range of valid values for this ENUM. Values will be quoted when generating the schema according to the quoting flag (see below). :param strict: Defaults to False: ensure that a given value is in this ENUM's range of permissible values when inserting or updating rows. Note that Drizzle will not raise a fatal error if you attempt to store an out of range value- an alternate value will be stored instead. (See Drizzle ENUM documentation.) :param collation: Optional, a column-level collation for this string value. Takes precedence to 'binary' short-hand. :param binary: Defaults to False: short-hand, pick the binary collation type that matches the column's character set. Generates BINARY in schema. This does not affect the type of data stored, only the collation of character data. :param quoting: Defaults to 'auto': automatically determine enum value quoting. If all enum values are surrounded by the same quoting character, then use 'quoted' mode. Otherwise, use 'unquoted' mode. 'quoted': values in enums are already quoted, they will be used directly when generating the schema - this usage is deprecated. 'unquoted': values in enums are not quoted, they will be escaped and surrounded by single quotes when generating the schema. Previous versions of this type always required manually quoted values to be supplied; future versions will always quote the string literals for you. This is a transitional option. """ super(ENUM, self).__init__(*enums, **kw) class _DrizzleBoolean(sqltypes.Boolean): def get_dbapi_type(self, dbapi): return dbapi.NUMERIC colspecs = { sqltypes.Numeric: NUMERIC, sqltypes.Float: FLOAT, sqltypes.Time: TIME, sqltypes.Enum: ENUM, sqltypes.Boolean: _DrizzleBoolean, } # All the types we have in Drizzle ischema_names = { 'BIGINT': BIGINT, 'BINARY': BINARY, 'BLOB': BLOB, 'BOOLEAN': BOOLEAN, 'CHAR': CHAR, 'DATE': DATE, 'DATETIME': DATETIME, 'DECIMAL': DECIMAL, 'DOUBLE': DOUBLE, 'ENUM': ENUM, 'FLOAT': FLOAT, 'INT': INTEGER, 'INTEGER': INTEGER, 'NUMERIC': NUMERIC, 'TEXT': TEXT, 'TIME': TIME, 'TIMESTAMP': TIMESTAMP, 'VARBINARY': VARBINARY, 'VARCHAR': VARCHAR, } class DrizzleCompiler(mysql_dialect.MySQLCompiler): def visit_typeclause(self, typeclause): type_ = typeclause.type.dialect_impl(self.dialect) if isinstance(type_, sqltypes.Integer): return 'INTEGER' else: return super(DrizzleCompiler, self).visit_typeclause(typeclause) def visit_cast(self, cast, **kwargs): type_ = self.process(cast.typeclause) if type_ is None: return self.process(cast.clause) return 'CAST(%s AS %s)' % (self.process(cast.clause), type_) class DrizzleDDLCompiler(mysql_dialect.MySQLDDLCompiler): pass class DrizzleTypeCompiler(mysql_dialect.MySQLTypeCompiler): def _extend_numeric(self, type_, spec): return spec def _extend_string(self, type_, defaults, spec): """Extend a string-type declaration with standard SQL COLLATE annotations and Drizzle specific extensions. """ def attr(name): return getattr(type_, name, defaults.get(name)) if attr('collation'): collation = 'COLLATE %s' % type_.collation elif attr('binary'): collation = 'BINARY' else: collation = None return ' '.join([c for c in (spec, collation) if c is not None]) def visit_NCHAR(self, type): raise NotImplementedError("Drizzle does not support NCHAR") def visit_NVARCHAR(self, type): raise NotImplementedError("Drizzle does not support NVARCHAR") def visit_FLOAT(self, type_): if type_.scale is not None and type_.precision is not None: return "FLOAT(%s, %s)" % (type_.precision, type_.scale) else: return "FLOAT" def visit_BOOLEAN(self, type_): return "BOOLEAN" def visit_BLOB(self, type_): return "BLOB" class DrizzleExecutionContext(mysql_dialect.MySQLExecutionContext): pass class DrizzleIdentifierPreparer(mysql_dialect.MySQLIdentifierPreparer): pass class DrizzleDialect(mysql_dialect.MySQLDialect): """Details of the Drizzle dialect. Not used directly in application code. """ name = 'drizzle' _supports_cast = True supports_sequences = False supports_native_boolean = True supports_views = False default_paramstyle = 'format' colspecs = colspecs statement_compiler = DrizzleCompiler ddl_compiler = DrizzleDDLCompiler type_compiler = DrizzleTypeCompiler ischema_names = ischema_names preparer = DrizzleIdentifierPreparer def on_connect(self): """Force autocommit - Drizzle Bug#707842 doesn't set this properly""" def connect(conn): conn.autocommit(False) return connect @reflection.cache def get_table_names(self, connection, schema=None, **kw): """Return a Unicode SHOW TABLES from a given schema.""" if schema is not None: current_schema = schema else: current_schema = self.default_schema_name charset = 'utf8' rp = connection.execute("SHOW TABLES FROM %s" % self.identifier_preparer.quote_identifier(current_schema)) return [row[0] for row in self._compat_fetchall(rp, charset=charset)] @reflection.cache def get_view_names(self, connection, schema=None, **kw): raise NotImplementedError def _detect_casing(self, connection): """Sniff out identifier case sensitivity. Cached per-connection. This value can not change without a server restart. """ return 0 def _detect_collations(self, connection): """Pull the active COLLATIONS list from the server. Cached per-connection. """ collations = {} charset = self._connection_charset rs = connection.execute( 'SELECT CHARACTER_SET_NAME, COLLATION_NAME FROM' ' data_dictionary.COLLATIONS') for row in self._compat_fetchall(rs, charset): collations[row[0]] = row[1] return collations def _detect_ansiquotes(self, connection): """Detect and adjust for the ANSI_QUOTES sql mode.""" self._server_ansiquotes = False self._backslash_escapes = False log.class_logger(DrizzleDialect) SQLAlchemy-0.8.4/lib/sqlalchemy/dialects/drizzle/mysqldb.py0000644000076500000240000000236612251147171024451 0ustar classicstaff00000000000000""" .. dialect:: drizzle+mysqldb :name: MySQL-Python :dbapi: mysqldb :connectstring: drizzle+mysqldb://:@[:]/ :url: http://sourceforge.net/projects/mysql-python """ from sqlalchemy.dialects.drizzle.base import ( DrizzleDialect, DrizzleExecutionContext, DrizzleCompiler, DrizzleIdentifierPreparer) from sqlalchemy.connectors.mysqldb import ( MySQLDBExecutionContext, MySQLDBCompiler, MySQLDBIdentifierPreparer, MySQLDBConnector) class DrizzleExecutionContext_mysqldb(MySQLDBExecutionContext, DrizzleExecutionContext): pass class DrizzleCompiler_mysqldb(MySQLDBCompiler, DrizzleCompiler): pass class DrizzleIdentifierPreparer_mysqldb(MySQLDBIdentifierPreparer, DrizzleIdentifierPreparer): pass class DrizzleDialect_mysqldb(MySQLDBConnector, DrizzleDialect): execution_ctx_cls = DrizzleExecutionContext_mysqldb statement_compiler = DrizzleCompiler_mysqldb preparer = DrizzleIdentifierPreparer_mysqldb def _detect_charset(self, connection): """Sniff out the character set in use for connection results.""" return 'utf8' dialect = DrizzleDialect_mysqldb SQLAlchemy-0.8.4/lib/sqlalchemy/dialects/firebird/0000755000076500000240000000000012251151573022521 5ustar classicstaff00000000000000SQLAlchemy-0.8.4/lib/sqlalchemy/dialects/firebird/__init__.py0000644000076500000240000000123612251150015024622 0ustar classicstaff00000000000000# firebird/__init__.py # Copyright (C) 2005-2013 the SQLAlchemy authors and contributors # # This module is part of SQLAlchemy and is released under # the MIT License: http://www.opensource.org/licenses/mit-license.php from sqlalchemy.dialects.firebird import base, kinterbasdb, fdb base.dialect = kinterbasdb.dialect from sqlalchemy.dialects.firebird.base import \ SMALLINT, BIGINT, FLOAT, FLOAT, DATE, TIME, \ TEXT, NUMERIC, FLOAT, TIMESTAMP, VARCHAR, CHAR, BLOB,\ dialect __all__ = ( 'SMALLINT', 'BIGINT', 'FLOAT', 'FLOAT', 'DATE', 'TIME', 'TEXT', 'NUMERIC', 'FLOAT', 'TIMESTAMP', 'VARCHAR', 'CHAR', 'BLOB', 'dialect' ) SQLAlchemy-0.8.4/lib/sqlalchemy/dialects/firebird/base.py0000644000076500000240000006556212251150015024011 0ustar classicstaff00000000000000# firebird/base.py # Copyright (C) 2005-2013 the SQLAlchemy authors and contributors # # This module is part of SQLAlchemy and is released under # the MIT License: http://www.opensource.org/licenses/mit-license.php """ .. dialect:: firebird :name: Firebird Firebird Dialects ----------------- Firebird offers two distinct dialects_ (not to be confused with a SQLAlchemy ``Dialect``): dialect 1 This is the old syntax and behaviour, inherited from Interbase pre-6.0. dialect 3 This is the newer and supported syntax, introduced in Interbase 6.0. The SQLAlchemy Firebird dialect detects these versions and adjusts its representation of SQL accordingly. However, support for dialect 1 is not well tested and probably has incompatibilities. Locking Behavior ---------------- Firebird locks tables aggressively. For this reason, a DROP TABLE may hang until other transactions are released. SQLAlchemy does its best to release transactions as quickly as possible. The most common cause of hanging transactions is a non-fully consumed result set, i.e.:: result = engine.execute("select * from table") row = result.fetchone() return Where above, the ``ResultProxy`` has not been fully consumed. The connection will be returned to the pool and the transactional state rolled back once the Python garbage collector reclaims the objects which hold onto the connection, which often occurs asynchronously. The above use case can be alleviated by calling ``first()`` on the ``ResultProxy`` which will fetch the first row and immediately close all remaining cursor/connection resources. RETURNING support ----------------- Firebird 2.0 supports returning a result set from inserts, and 2.1 extends that to deletes and updates. This is generically exposed by the SQLAlchemy ``returning()`` method, such as:: # INSERT..RETURNING result = table.insert().returning(table.c.col1, table.c.col2).\\ values(name='foo') print result.fetchall() # UPDATE..RETURNING raises = empl.update().returning(empl.c.id, empl.c.salary).\\ where(empl.c.sales>100).\\ values(dict(salary=empl.c.salary * 1.1)) print raises.fetchall() .. _dialects: http://mc-computing.com/Databases/Firebird/SQL_Dialect.html """ import datetime from sqlalchemy import schema as sa_schema from sqlalchemy import exc, types as sqltypes, sql, util from sqlalchemy.sql import expression from sqlalchemy.engine import base, default, reflection from sqlalchemy.sql import compiler from sqlalchemy.types import (BIGINT, BLOB, DATE, FLOAT, INTEGER, NUMERIC, SMALLINT, TEXT, TIME, TIMESTAMP, Integer) RESERVED_WORDS = set([ "active", "add", "admin", "after", "all", "alter", "and", "any", "as", "asc", "ascending", "at", "auto", "avg", "before", "begin", "between", "bigint", "bit_length", "blob", "both", "by", "case", "cast", "char", "character", "character_length", "char_length", "check", "close", "collate", "column", "commit", "committed", "computed", "conditional", "connect", "constraint", "containing", "count", "create", "cross", "cstring", "current", "current_connection", "current_date", "current_role", "current_time", "current_timestamp", "current_transaction", "current_user", "cursor", "database", "date", "day", "dec", "decimal", "declare", "default", "delete", "desc", "descending", "disconnect", "distinct", "do", "domain", "double", "drop", "else", "end", "entry_point", "escape", "exception", "execute", "exists", "exit", "external", "extract", "fetch", "file", "filter", "float", "for", "foreign", "from", "full", "function", "gdscode", "generator", "gen_id", "global", "grant", "group", "having", "hour", "if", "in", "inactive", "index", "inner", "input_type", "insensitive", "insert", "int", "integer", "into", "is", "isolation", "join", "key", "leading", "left", "length", "level", "like", "long", "lower", "manual", "max", "maximum_segment", "merge", "min", "minute", "module_name", "month", "names", "national", "natural", "nchar", "no", "not", "null", "numeric", "octet_length", "of", "on", "only", "open", "option", "or", "order", "outer", "output_type", "overflow", "page", "pages", "page_size", "parameter", "password", "plan", "position", "post_event", "precision", "primary", "privileges", "procedure", "protected", "rdb$db_key", "read", "real", "record_version", "recreate", "recursive", "references", "release", "reserv", "reserving", "retain", "returning_values", "returns", "revoke", "right", "rollback", "rows", "row_count", "savepoint", "schema", "second", "segment", "select", "sensitive", "set", "shadow", "shared", "singular", "size", "smallint", "snapshot", "some", "sort", "sqlcode", "stability", "start", "starting", "starts", "statistics", "sub_type", "sum", "suspend", "table", "then", "time", "timestamp", "to", "trailing", "transaction", "trigger", "trim", "uncommitted", "union", "unique", "update", "upper", "user", "using", "value", "values", "varchar", "variable", "varying", "view", "wait", "when", "where", "while", "with", "work", "write", "year", ]) class _StringType(sqltypes.String): """Base for Firebird string types.""" def __init__(self, charset=None, **kw): self.charset = charset super(_StringType, self).__init__(**kw) class VARCHAR(_StringType, sqltypes.VARCHAR): """Firebird VARCHAR type""" __visit_name__ = 'VARCHAR' def __init__(self, length=None, **kwargs): super(VARCHAR, self).__init__(length=length, **kwargs) class CHAR(_StringType, sqltypes.CHAR): """Firebird CHAR type""" __visit_name__ = 'CHAR' def __init__(self, length=None, **kwargs): super(CHAR, self).__init__(length=length, **kwargs) class _FBDateTime(sqltypes.DateTime): def bind_processor(self, dialect): def process(value): if type(value) == datetime.date: return datetime.datetime(value.year, value.month, value.day) else: return value return process colspecs = { sqltypes.DateTime: _FBDateTime } ischema_names = { 'SHORT': SMALLINT, 'LONG': INTEGER, 'QUAD': FLOAT, 'FLOAT': FLOAT, 'DATE': DATE, 'TIME': TIME, 'TEXT': TEXT, 'INT64': BIGINT, 'DOUBLE': FLOAT, 'TIMESTAMP': TIMESTAMP, 'VARYING': VARCHAR, 'CSTRING': CHAR, 'BLOB': BLOB, } # TODO: date conversion types (should be implemented as _FBDateTime, # _FBDate, etc. as bind/result functionality is required) class FBTypeCompiler(compiler.GenericTypeCompiler): def visit_boolean(self, type_): return self.visit_SMALLINT(type_) def visit_datetime(self, type_): return self.visit_TIMESTAMP(type_) def visit_TEXT(self, type_): return "BLOB SUB_TYPE 1" def visit_BLOB(self, type_): return "BLOB SUB_TYPE 0" def _extend_string(self, type_, basic): charset = getattr(type_, 'charset', None) if charset is None: return basic else: return '%s CHARACTER SET %s' % (basic, charset) def visit_CHAR(self, type_): basic = super(FBTypeCompiler, self).visit_CHAR(type_) return self._extend_string(type_, basic) def visit_VARCHAR(self, type_): if not type_.length: raise exc.CompileError( "VARCHAR requires a length on dialect %s" % self.dialect.name) basic = super(FBTypeCompiler, self).visit_VARCHAR(type_) return self._extend_string(type_, basic) class FBCompiler(sql.compiler.SQLCompiler): """Firebird specific idiosyncrasies""" ansi_bind_rules = True #def visit_contains_op_binary(self, binary, operator, **kw): # cant use CONTAINING b.c. it's case insensitive. #def visit_notcontains_op_binary(self, binary, operator, **kw): # cant use NOT CONTAINING b.c. it's case insensitive. def visit_now_func(self, fn, **kw): return "CURRENT_TIMESTAMP" def visit_startswith_op_binary(self, binary, operator, **kw): return '%s STARTING WITH %s' % ( binary.left._compiler_dispatch(self, **kw), binary.right._compiler_dispatch(self, **kw)) def visit_notstartswith_op_binary(self, binary, operator, **kw): return '%s NOT STARTING WITH %s' % ( binary.left._compiler_dispatch(self, **kw), binary.right._compiler_dispatch(self, **kw)) def visit_mod_binary(self, binary, operator, **kw): return "mod(%s, %s)" % ( self.process(binary.left, **kw), self.process(binary.right, **kw)) def visit_alias(self, alias, asfrom=False, **kwargs): if self.dialect._version_two: return super(FBCompiler, self).\ visit_alias(alias, asfrom=asfrom, **kwargs) else: # Override to not use the AS keyword which FB 1.5 does not like if asfrom: alias_name = isinstance(alias.name, expression._truncated_label) and \ self._truncated_identifier("alias", alias.name) or alias.name return self.process( alias.original, asfrom=asfrom, **kwargs) + \ " " + \ self.preparer.format_alias(alias, alias_name) else: return self.process(alias.original, **kwargs) def visit_substring_func(self, func, **kw): s = self.process(func.clauses.clauses[0]) start = self.process(func.clauses.clauses[1]) if len(func.clauses.clauses) > 2: length = self.process(func.clauses.clauses[2]) return "SUBSTRING(%s FROM %s FOR %s)" % (s, start, length) else: return "SUBSTRING(%s FROM %s)" % (s, start) def visit_length_func(self, function, **kw): if self.dialect._version_two: return "char_length" + self.function_argspec(function) else: return "strlen" + self.function_argspec(function) visit_char_length_func = visit_length_func def function_argspec(self, func, **kw): # TODO: this probably will need to be # narrowed to a fixed list, some no-arg functions # may require parens - see similar example in the oracle # dialect if func.clauses is not None and len(func.clauses): return self.process(func.clause_expr, **kw) else: return "" def default_from(self): return " FROM rdb$database" def visit_sequence(self, seq): return "gen_id(%s, 1)" % self.preparer.format_sequence(seq) def get_select_precolumns(self, select): """Called when building a ``SELECT`` statement, position is just before column list Firebird puts the limit and offset right after the ``SELECT``... """ result = "" if select._limit: result += "FIRST %s " % self.process(sql.literal(select._limit)) if select._offset: result += "SKIP %s " % self.process(sql.literal(select._offset)) if select._distinct: result += "DISTINCT " return result def limit_clause(self, select): """Already taken care of in the `get_select_precolumns` method.""" return "" def returning_clause(self, stmt, returning_cols): columns = [ self._label_select_column(None, c, True, False, {}) for c in expression._select_iterables(returning_cols) ] return 'RETURNING ' + ', '.join(columns) class FBDDLCompiler(sql.compiler.DDLCompiler): """Firebird syntactic idiosyncrasies""" def visit_create_sequence(self, create): """Generate a ``CREATE GENERATOR`` statement for the sequence.""" # no syntax for these # http://www.firebirdsql.org/manual/generatorguide-sqlsyntax.html if create.element.start is not None: raise NotImplemented( "Firebird SEQUENCE doesn't support START WITH") if create.element.increment is not None: raise NotImplemented( "Firebird SEQUENCE doesn't support INCREMENT BY") if self.dialect._version_two: return "CREATE SEQUENCE %s" % \ self.preparer.format_sequence(create.element) else: return "CREATE GENERATOR %s" % \ self.preparer.format_sequence(create.element) def visit_drop_sequence(self, drop): """Generate a ``DROP GENERATOR`` statement for the sequence.""" if self.dialect._version_two: return "DROP SEQUENCE %s" % \ self.preparer.format_sequence(drop.element) else: return "DROP GENERATOR %s" % \ self.preparer.format_sequence(drop.element) class FBIdentifierPreparer(sql.compiler.IdentifierPreparer): """Install Firebird specific reserved words.""" reserved_words = RESERVED_WORDS def __init__(self, dialect): super(FBIdentifierPreparer, self).__init__(dialect, omit_schema=True) class FBExecutionContext(default.DefaultExecutionContext): def fire_sequence(self, seq, type_): """Get the next value from the sequence using ``gen_id()``.""" return self._execute_scalar( "SELECT gen_id(%s, 1) FROM rdb$database" % self.dialect.identifier_preparer.format_sequence(seq), type_ ) class FBDialect(default.DefaultDialect): """Firebird dialect""" name = 'firebird' max_identifier_length = 31 supports_sequences = True sequences_optional = False supports_default_values = True postfetch_lastrowid = False supports_native_boolean = False requires_name_normalize = True supports_empty_insert = False statement_compiler = FBCompiler ddl_compiler = FBDDLCompiler preparer = FBIdentifierPreparer type_compiler = FBTypeCompiler execution_ctx_cls = FBExecutionContext colspecs = colspecs ischema_names = ischema_names # defaults to dialect ver. 3, # will be autodetected off upon # first connect _version_two = True def initialize(self, connection): super(FBDialect, self).initialize(connection) self._version_two = ('firebird' in self.server_version_info and \ self.server_version_info >= (2, ) ) or \ ('interbase' in self.server_version_info and \ self.server_version_info >= (6, ) ) if not self._version_two: # TODO: whatever other pre < 2.0 stuff goes here self.ischema_names = ischema_names.copy() self.ischema_names['TIMESTAMP'] = sqltypes.DATE self.colspecs = { sqltypes.DateTime: sqltypes.DATE } self.implicit_returning = self._version_two and \ self.__dict__.get('implicit_returning', True) def normalize_name(self, name): # Remove trailing spaces: FB uses a CHAR() type, # that is padded with spaces name = name and name.rstrip() if name is None: return None elif name.upper() == name and \ not self.identifier_preparer._requires_quotes(name.lower()): return name.lower() else: return name def denormalize_name(self, name): if name is None: return None elif name.lower() == name and \ not self.identifier_preparer._requires_quotes(name.lower()): return name.upper() else: return name def has_table(self, connection, table_name, schema=None): """Return ``True`` if the given table exists, ignoring the `schema`.""" tblqry = """ SELECT 1 AS has_table FROM rdb$database WHERE EXISTS (SELECT rdb$relation_name FROM rdb$relations WHERE rdb$relation_name=?) """ c = connection.execute(tblqry, [self.denormalize_name(table_name)]) return c.first() is not None def has_sequence(self, connection, sequence_name, schema=None): """Return ``True`` if the given sequence (generator) exists.""" genqry = """ SELECT 1 AS has_sequence FROM rdb$database WHERE EXISTS (SELECT rdb$generator_name FROM rdb$generators WHERE rdb$generator_name=?) """ c = connection.execute(genqry, [self.denormalize_name(sequence_name)]) return c.first() is not None @reflection.cache def get_table_names(self, connection, schema=None, **kw): s = """ SELECT DISTINCT rdb$relation_name FROM rdb$relation_fields WHERE rdb$system_flag=0 AND rdb$view_context IS NULL """ return [self.normalize_name(row[0]) for row in connection.execute(s)] @reflection.cache def get_view_names(self, connection, schema=None, **kw): s = """ SELECT distinct rdb$view_name FROM rdb$view_relations """ return [self.normalize_name(row[0]) for row in connection.execute(s)] @reflection.cache def get_view_definition(self, connection, view_name, schema=None, **kw): qry = """ SELECT rdb$view_source AS view_source FROM rdb$relations WHERE rdb$relation_name=? """ rp = connection.execute(qry, [self.denormalize_name(view_name)]) row = rp.first() if row: return row['view_source'] else: return None @reflection.cache def get_pk_constraint(self, connection, table_name, schema=None, **kw): # Query to extract the PK/FK constrained fields of the given table keyqry = """ SELECT se.rdb$field_name AS fname FROM rdb$relation_constraints rc JOIN rdb$index_segments se ON rc.rdb$index_name=se.rdb$index_name WHERE rc.rdb$constraint_type=? AND rc.rdb$relation_name=? """ tablename = self.denormalize_name(table_name) # get primary key fields c = connection.execute(keyqry, ["PRIMARY KEY", tablename]) pkfields = [self.normalize_name(r['fname']) for r in c.fetchall()] return {'constrained_columns': pkfields, 'name': None} @reflection.cache def get_column_sequence(self, connection, table_name, column_name, schema=None, **kw): tablename = self.denormalize_name(table_name) colname = self.denormalize_name(column_name) # Heuristic-query to determine the generator associated to a PK field genqry = """ SELECT trigdep.rdb$depended_on_name AS fgenerator FROM rdb$dependencies tabdep JOIN rdb$dependencies trigdep ON tabdep.rdb$dependent_name=trigdep.rdb$dependent_name AND trigdep.rdb$depended_on_type=14 AND trigdep.rdb$dependent_type=2 JOIN rdb$triggers trig ON trig.rdb$trigger_name=tabdep.rdb$dependent_name WHERE tabdep.rdb$depended_on_name=? AND tabdep.rdb$depended_on_type=0 AND trig.rdb$trigger_type=1 AND tabdep.rdb$field_name=? AND (SELECT count(*) FROM rdb$dependencies trigdep2 WHERE trigdep2.rdb$dependent_name = trigdep.rdb$dependent_name) = 2 """ genr = connection.execute(genqry, [tablename, colname]).first() if genr is not None: return dict(name=self.normalize_name(genr['fgenerator'])) @reflection.cache def get_columns(self, connection, table_name, schema=None, **kw): # Query to extract the details of all the fields of the given table tblqry = """ SELECT r.rdb$field_name AS fname, r.rdb$null_flag AS null_flag, t.rdb$type_name AS ftype, f.rdb$field_sub_type AS stype, f.rdb$field_length/ COALESCE(cs.rdb$bytes_per_character,1) AS flen, f.rdb$field_precision AS fprec, f.rdb$field_scale AS fscale, COALESCE(r.rdb$default_source, f.rdb$default_source) AS fdefault FROM rdb$relation_fields r JOIN rdb$fields f ON r.rdb$field_source=f.rdb$field_name JOIN rdb$types t ON t.rdb$type=f.rdb$field_type AND t.rdb$field_name='RDB$FIELD_TYPE' LEFT JOIN rdb$character_sets cs ON f.rdb$character_set_id=cs.rdb$character_set_id WHERE f.rdb$system_flag=0 AND r.rdb$relation_name=? ORDER BY r.rdb$field_position """ # get the PK, used to determine the eventual associated sequence pk_constraint = self.get_pk_constraint(connection, table_name) pkey_cols = pk_constraint['constrained_columns'] tablename = self.denormalize_name(table_name) # get all of the fields for this table c = connection.execute(tblqry, [tablename]) cols = [] while True: row = c.fetchone() if row is None: break name = self.normalize_name(row['fname']) orig_colname = row['fname'] # get the data type colspec = row['ftype'].rstrip() coltype = self.ischema_names.get(colspec) if coltype is None: util.warn("Did not recognize type '%s' of column '%s'" % (colspec, name)) coltype = sqltypes.NULLTYPE elif issubclass(coltype, Integer) and row['fprec'] != 0: coltype = NUMERIC( precision=row['fprec'], scale=row['fscale'] * -1) elif colspec in ('VARYING', 'CSTRING'): coltype = coltype(row['flen']) elif colspec == 'TEXT': coltype = TEXT(row['flen']) elif colspec == 'BLOB': if row['stype'] == 1: coltype = TEXT() else: coltype = BLOB() else: coltype = coltype() # does it have a default value? defvalue = None if row['fdefault'] is not None: # the value comes down as "DEFAULT 'value'": there may be # more than one whitespace around the "DEFAULT" keyword # and it may also be lower case # (see also http://tracker.firebirdsql.org/browse/CORE-356) defexpr = row['fdefault'].lstrip() assert defexpr[:8].rstrip().upper() == \ 'DEFAULT', "Unrecognized default value: %s" % \ defexpr defvalue = defexpr[8:].strip() if defvalue == 'NULL': # Redundant defvalue = None col_d = { 'name': name, 'type': coltype, 'nullable': not bool(row['null_flag']), 'default': defvalue, 'autoincrement': defvalue is None } if orig_colname.lower() == orig_colname: col_d['quote'] = True # if the PK is a single field, try to see if its linked to # a sequence thru a trigger if len(pkey_cols) == 1 and name == pkey_cols[0]: seq_d = self.get_column_sequence(connection, tablename, name) if seq_d is not None: col_d['sequence'] = seq_d cols.append(col_d) return cols @reflection.cache def get_foreign_keys(self, connection, table_name, schema=None, **kw): # Query to extract the details of each UK/FK of the given table fkqry = """ SELECT rc.rdb$constraint_name AS cname, cse.rdb$field_name AS fname, ix2.rdb$relation_name AS targetrname, se.rdb$field_name AS targetfname FROM rdb$relation_constraints rc JOIN rdb$indices ix1 ON ix1.rdb$index_name=rc.rdb$index_name JOIN rdb$indices ix2 ON ix2.rdb$index_name=ix1.rdb$foreign_key JOIN rdb$index_segments cse ON cse.rdb$index_name=ix1.rdb$index_name JOIN rdb$index_segments se ON se.rdb$index_name=ix2.rdb$index_name AND se.rdb$field_position=cse.rdb$field_position WHERE rc.rdb$constraint_type=? AND rc.rdb$relation_name=? ORDER BY se.rdb$index_name, se.rdb$field_position """ tablename = self.denormalize_name(table_name) c = connection.execute(fkqry, ["FOREIGN KEY", tablename]) fks = util.defaultdict(lambda: { 'name': None, 'constrained_columns': [], 'referred_schema': None, 'referred_table': None, 'referred_columns': [] }) for row in c: cname = self.normalize_name(row['cname']) fk = fks[cname] if not fk['name']: fk['name'] = cname fk['referred_table'] = self.normalize_name(row['targetrname']) fk['constrained_columns'].append( self.normalize_name(row['fname'])) fk['referred_columns'].append( self.normalize_name(row['targetfname'])) return fks.values() @reflection.cache def get_indexes(self, connection, table_name, schema=None, **kw): qry = """ SELECT ix.rdb$index_name AS index_name, ix.rdb$unique_flag AS unique_flag, ic.rdb$field_name AS field_name FROM rdb$indices ix JOIN rdb$index_segments ic ON ix.rdb$index_name=ic.rdb$index_name LEFT OUTER JOIN rdb$relation_constraints ON rdb$relation_constraints.rdb$index_name = ic.rdb$index_name WHERE ix.rdb$relation_name=? AND ix.rdb$foreign_key IS NULL AND rdb$relation_constraints.rdb$constraint_type IS NULL ORDER BY index_name, field_name """ c = connection.execute(qry, [self.denormalize_name(table_name)]) indexes = util.defaultdict(dict) for row in c: indexrec = indexes[row['index_name']] if 'name' not in indexrec: indexrec['name'] = self.normalize_name(row['index_name']) indexrec['column_names'] = [] indexrec['unique'] = bool(row['unique_flag']) indexrec['column_names'].append( self.normalize_name(row['field_name'])) return indexes.values() SQLAlchemy-0.8.4/lib/sqlalchemy/dialects/firebird/fdb.py0000644000076500000240000001004412251150015023613 0ustar classicstaff00000000000000# firebird/fdb.py # Copyright (C) 2005-2013 the SQLAlchemy authors and contributors # # This module is part of SQLAlchemy and is released under # the MIT License: http://www.opensource.org/licenses/mit-license.php """ .. dialect:: firebird+fdb :name: fdb :dbapi: pyodbc :connectstring: firebird+fdb://user:password@host:port/path/to/db[?key=value&key=value...] :url: http://pypi.python.org/pypi/fdb/ fdb is a kinterbasdb compatible DBAPI for Firebird. .. versionadded:: 0.8 - Support for the fdb Firebird driver. Status ------ The fdb dialect is new and not yet tested (can't get fdb to build). Arguments ---------- The ``fdb`` dialect is based on the :mod:`sqlalchemy.dialects.firebird.kinterbasdb` dialect, however does not accept every argument that Kinterbasdb does. * ``enable_rowcount`` - True by default, setting this to False disables the usage of "cursor.rowcount" with the Kinterbasdb dialect, which SQLAlchemy ordinarily calls upon automatically after any UPDATE or DELETE statement. When disabled, SQLAlchemy's ResultProxy will return -1 for result.rowcount. The rationale here is that Kinterbasdb requires a second round trip to the database when .rowcount is called - since SQLA's resultproxy automatically closes the cursor after a non-result-returning statement, rowcount must be called, if at all, before the result object is returned. Additionally, cursor.rowcount may not return correct results with older versions of Firebird, and setting this flag to False will also cause the SQLAlchemy ORM to ignore its usage. The behavior can also be controlled on a per-execution basis using the ``enable_rowcount`` option with :meth:`.Connection.execution_options`:: conn = engine.connect().execution_options(enable_rowcount=True) r = conn.execute(stmt) print r.rowcount * ``retaining`` - True by default. Leaving this on True will pass the ``retaining=True`` keyword argument to the ``.commit()`` and ``.rollback()`` methods of the DBAPI connection, which can improve performance in some situations, but apparently with significant caveats. Please read the fdb and/or kinterbasdb DBAPI documentation in order to understand the implications of this flag. .. versionadded:: 0.8.2 - ``retaining`` keyword argument specifying transaction retaining behavior. This flag will **default to False** in 0.9. .. seealso:: http://pythonhosted.org/fdb/usage-guide.html#retaining-transactions - information on the "retaining" flag. """ from .kinterbasdb import FBDialect_kinterbasdb from ... import util class FBDialect_fdb(FBDialect_kinterbasdb): def __init__(self, enable_rowcount=True, retaining=True, **kwargs): super(FBDialect_fdb, self).__init__( enable_rowcount=enable_rowcount, retaining=retaining, **kwargs) @classmethod def dbapi(cls): return __import__('fdb') def create_connect_args(self, url): opts = url.translate_connect_args(username='user') if opts.get('port'): opts['host'] = "%s/%s" % (opts['host'], opts['port']) del opts['port'] opts.update(url.query) util.coerce_kw_type(opts, 'type_conv', int) return ([], opts) def _get_server_version_info(self, connection): """Get the version of the Firebird server used by a connection. Returns a tuple of (`major`, `minor`, `build`), three integers representing the version of the attached server. """ # This is the simpler approach (the other uses the services api), # that for backward compatibility reasons returns a string like # LI-V6.3.3.12981 Firebird 2.0 # where the first version is a fake one resembling the old # Interbase signature. isc_info_firebird_version = 103 fbconn = connection.connection version = fbconn.db_info(isc_info_firebird_version) return self._parse_version_info(version) dialect = FBDialect_fdb SQLAlchemy-0.8.4/lib/sqlalchemy/dialects/firebird/kinterbasdb.py0000644000076500000240000001374712251150015025365 0ustar classicstaff00000000000000# firebird/kinterbasdb.py # Copyright (C) 2005-2013 the SQLAlchemy authors and contributors # # This module is part of SQLAlchemy and is released under # the MIT License: http://www.opensource.org/licenses/mit-license.php """ .. dialect:: firebird+kinterbasdb :name: kinterbasdb :dbapi: kinterbasdb :connectstring: firebird+kinterbasdb://user:password@host:port/path/to/db[?key=value&key=value...] :url: http://firebirdsql.org/index.php?op=devel&sub=python Arguments ---------- The Kinterbasdb backend accepts the ``enable_rowcount`` and ``retaining`` arguments accepted by the :mod:`sqlalchemy.dialects.firebird.fdb` dialect. In addition, it also accepts the following: * ``type_conv`` - select the kind of mapping done on the types: by default SQLAlchemy uses 200 with Unicode, datetime and decimal support. See the linked documents below for further information. * ``concurrency_level`` - set the backend policy with regards to threading issues: by default SQLAlchemy uses policy 1. See the linked documents below for futher information. .. seealso:: http://sourceforge.net/projects/kinterbasdb http://kinterbasdb.sourceforge.net/dist_docs/usage.html#adv_param_conv_dynamic_type_translation http://kinterbasdb.sourceforge.net/dist_docs/usage.html#special_issue_concurrency """ from .base import FBDialect, FBExecutionContext from ... import util, types as sqltypes from re import match import decimal class _FBNumeric_kinterbasdb(sqltypes.Numeric): def bind_processor(self, dialect): def process(value): if isinstance(value, decimal.Decimal): return str(value) else: return value return process class FBExecutionContext_kinterbasdb(FBExecutionContext): @property def rowcount(self): if self.execution_options.get('enable_rowcount', self.dialect.enable_rowcount): return self.cursor.rowcount else: return -1 class FBDialect_kinterbasdb(FBDialect): driver = 'kinterbasdb' supports_sane_rowcount = False supports_sane_multi_rowcount = False execution_ctx_cls = FBExecutionContext_kinterbasdb supports_native_decimal = True colspecs = util.update_copy( FBDialect.colspecs, { sqltypes.Numeric: _FBNumeric_kinterbasdb, } ) def __init__(self, type_conv=200, concurrency_level=1, enable_rowcount=True, retaining=True, **kwargs): super(FBDialect_kinterbasdb, self).__init__(**kwargs) self.enable_rowcount = enable_rowcount self.type_conv = type_conv self.concurrency_level = concurrency_level self.retaining = retaining if enable_rowcount: self.supports_sane_rowcount = True @classmethod def dbapi(cls): return __import__('kinterbasdb') def do_execute(self, cursor, statement, parameters, context=None): # kinterbase does not accept a None, but wants an empty list # when there are no arguments. cursor.execute(statement, parameters or []) def do_rollback(self, dbapi_connection): dbapi_connection.rollback(self.retaining) def do_commit(self, dbapi_connection): dbapi_connection.commit(self.retaining) def create_connect_args(self, url): opts = url.translate_connect_args(username='user') if opts.get('port'): opts['host'] = "%s/%s" % (opts['host'], opts['port']) del opts['port'] opts.update(url.query) util.coerce_kw_type(opts, 'type_conv', int) type_conv = opts.pop('type_conv', self.type_conv) concurrency_level = opts.pop('concurrency_level', self.concurrency_level) if self.dbapi is not None: initialized = getattr(self.dbapi, 'initialized', None) if initialized is None: # CVS rev 1.96 changed the name of the attribute: # http://kinterbasdb.cvs.sourceforge.net/viewvc/kinterbasdb/ # Kinterbasdb-3.0/__init__.py?r1=1.95&r2=1.96 initialized = getattr(self.dbapi, '_initialized', False) if not initialized: self.dbapi.init(type_conv=type_conv, concurrency_level=concurrency_level) return ([], opts) def _get_server_version_info(self, connection): """Get the version of the Firebird server used by a connection. Returns a tuple of (`major`, `minor`, `build`), three integers representing the version of the attached server. """ # This is the simpler approach (the other uses the services api), # that for backward compatibility reasons returns a string like # LI-V6.3.3.12981 Firebird 2.0 # where the first version is a fake one resembling the old # Interbase signature. fbconn = connection.connection version = fbconn.server_version return self._parse_version_info(version) def _parse_version_info(self, version): m = match('\w+-V(\d+)\.(\d+)\.(\d+)\.(\d+)( \w+ (\d+)\.(\d+))?', version) if not m: raise AssertionError( "Could not determine version from string '%s'" % version) if m.group(5) != None: return tuple([int(x) for x in m.group(6, 7, 4)] + ['firebird']) else: return tuple([int(x) for x in m.group(1, 2, 3)] + ['interbase']) def is_disconnect(self, e, connection, cursor): if isinstance(e, (self.dbapi.OperationalError, self.dbapi.ProgrammingError)): msg = str(e) return ('Unable to complete network request to host' in msg or 'Invalid connection state' in msg or 'Invalid cursor state' in msg or 'connection shutdown' in msg) else: return False dialect = FBDialect_kinterbasdb SQLAlchemy-0.8.4/lib/sqlalchemy/dialects/informix/0000755000076500000240000000000012251151573022566 5ustar classicstaff00000000000000SQLAlchemy-0.8.4/lib/sqlalchemy/dialects/informix/__init__.py0000644000076500000240000000051412251150015024665 0ustar classicstaff00000000000000# informix/__init__.py # Copyright (C) 2005-2013 the SQLAlchemy authors and contributors # # This module is part of SQLAlchemy and is released under # the MIT License: http://www.opensource.org/licenses/mit-license.php from sqlalchemy.dialects.informix import base, informixdb base.dialect = informixdb.dialect SQLAlchemy-0.8.4/lib/sqlalchemy/dialects/informix/base.py0000644000076500000240000006233512251150015024051 0ustar classicstaff00000000000000# informix/base.py # Copyright (C) 2005-2013 the SQLAlchemy authors and contributors # coding: gbk # # This module is part of SQLAlchemy and is released under # the MIT License: http://www.opensource.org/licenses/mit-license.php """ .. dialect:: informix :name: Informix .. note:: The Informix dialect functions on current SQLAlchemy versions but is not regularly tested, and may have many issues and caveats not currently handled. """ import datetime from sqlalchemy import sql, schema, exc, pool, util from sqlalchemy.sql import compiler, text from sqlalchemy.engine import default, reflection from sqlalchemy import types as sqltypes RESERVED_WORDS = set( ["abs", "absolute", "access", "access_method", "acos", "active", "add", "address", "add_months", "admin", "after", "aggregate", "alignment", "all", "allocate", "all_rows", "alter", "and", "ansi", "any", "append", "array", "as", "asc", "ascii", "asin", "at", "atan", "atan2", "attach", "attributes", "audit", "authentication", "authid", "authorization", "authorized", "auto", "autofree", "auto_reprepare", "auto_stat_mode", "avg", "avoid_execute", "avoid_fact", "avoid_full", "avoid_hash", "avoid_index", "avoid_index_sj", "avoid_multi_index", "avoid_nl", "avoid_star_join", "avoid_subqf", "based", "before", "begin", "between", "bigint", "bigserial", "binary", "bitand", "bitandnot", "bitnot", "bitor", "bitxor", "blob", "blobdir", "boolean", "both", "bound_impl_pdq", "buffered", "builtin", "by", "byte", "cache", "call", "cannothash", "cardinality", "cascade", "case", "cast", "ceil", "char", "character", "character_length", "char_length", "check", "class", "class_origin", "client", "clob", "clobdir", "close", "cluster", "clustersize", "cobol", "codeset", "collation", "collection", "column", "columns", "commit", "committed", "commutator", "component", "components", "concat", "concurrent", "connect", "connection", "connection_name", "connect_by_iscycle", "connect_by_isleaf", "connect_by_rootconst", "constraint", "constraints", "constructor", "context", "continue", "copy", "cos", "costfunc", "count", "crcols", "create", "cross", "current", "current_role", "currval", "cursor", "cycle", "database", "datafiles", "dataskip", "date", "datetime", "day", "dba", "dbdate", "dbinfo", "dbpassword", "dbsecadm", "dbservername", "deallocate", "debug", "debugmode", "debug_env", "dec", "decimal", "declare", "decode", "decrypt_binary", "decrypt_char", "dec_t", "default", "default_role", "deferred", "deferred_prepare", "define", "delay", "delete", "deleting", "delimited", "delimiter", "deluxe", "desc", "describe", "descriptor", "detach", "diagnostics", "directives", "dirty", "disable", "disabled", "disconnect", "disk", "distinct", "distributebinary", "distributesreferences", "distributions", "document", "domain", "donotdistribute", "dormant", "double", "drop", "dtime_t", "each", "elif", "else", "enabled", "encryption", "encrypt_aes", "encrypt_tdes", "end", "enum", "environment", "error", "escape", "exception", "exclusive", "exec", "execute", "executeanywhere", "exemption", "exists", "exit", "exp", "explain", "explicit", "express", "expression", "extdirectives", "extend", "extent", "external", "fact", "false", "far", "fetch", "file", "filetoblob", "filetoclob", "fillfactor", "filtering", "first", "first_rows", "fixchar", "fixed", "float", "floor", "flush", "for", "force", "forced", "force_ddl_exec", "foreach", "foreign", "format", "format_units", "fortran", "found", "fraction", "fragment", "fragments", "free", "from", "full", "function", "general", "get", "gethint", "global", "go", "goto", "grant", "greaterthan", "greaterthanorequal", "group", "handlesnulls", "hash", "having", "hdr", "hex", "high", "hint", "hold", "home", "hour", "idslbacreadarray", "idslbacreadset", "idslbacreadtree", "idslbacrules", "idslbacwritearray", "idslbacwriteset", "idslbacwritetree", "idssecuritylabel", "if", "ifx_auto_reprepare", "ifx_batchedread_table", "ifx_int8_t", "ifx_lo_create_spec_t", "ifx_lo_stat_t", "immediate", "implicit", "implicit_pdq", "in", "inactive", "increment", "index", "indexes", "index_all", "index_sj", "indicator", "informix", "init", "initcap", "inline", "inner", "inout", "insert", "inserting", "instead", "int", "int8", "integ", "integer", "internal", "internallength", "interval", "into", "intrvl_t", "is", "iscanonical", "isolation", "item", "iterator", "java", "join", "keep", "key", "label", "labeleq", "labelge", "labelglb", "labelgt", "labelle", "labellt", "labellub", "labeltostring", "language", "last", "last_day", "leading", "left", "length", "lessthan", "lessthanorequal", "let", "level", "like", "limit", "list", "listing", "load", "local", "locator", "lock", "locks", "locopy", "loc_t", "log", "log10", "logn", "long", "loop", "lotofile", "low", "lower", "lpad", "ltrim", "lvarchar", "matched", "matches", "max", "maxerrors", "maxlen", "maxvalue", "mdy", "median", "medium", "memory", "memory_resident", "merge", "message_length", "message_text", "middle", "min", "minute", "minvalue", "mod", "mode", "moderate", "modify", "module", "money", "month", "months_between", "mounting", "multiset", "multi_index", "name", "nchar", "negator", "new", "next", "nextval", "next_day", "no", "nocache", "nocycle", "nomaxvalue", "nomigrate", "nominvalue", "none", "non_dim", "non_resident", "noorder", "normal", "not", "notemplatearg", "notequal", "null", "nullif", "numeric", "numrows", "numtodsinterval", "numtoyminterval", "nvarchar", "nvl", "octet_length", "of", "off", "old", "on", "online", "only", "opaque", "opclass", "open", "optcompind", "optical", "optimization", "option", "or", "order", "ordered", "out", "outer", "output", "override", "page", "parallelizable", "parameter", "partition", "pascal", "passedbyvalue", "password", "pdqpriority", "percaltl_cos", "pipe", "pli", "pload", "policy", "pow", "power", "precision", "prepare", "previous", "primary", "prior", "private", "privileges", "procedure", "properties", "public", "put", "raise", "range", "raw", "read", "real", "recordend", "references", "referencing", "register", "rejectfile", "relative", "release", "remainder", "rename", "reoptimization", "repeatable", "replace", "replication", "reserve", "resolution", "resource", "restart", "restrict", "resume", "retain", "retainupdatelocks", "return", "returned_sqlstate", "returning", "returns", "reuse", "revoke", "right", "robin", "role", "rollback", "rollforward", "root", "round", "routine", "row", "rowid", "rowids", "rows", "row_count", "rpad", "rtrim", "rule", "sameas", "samples", "sampling", "save", "savepoint", "schema", "scroll", "seclabel_by_comp", "seclabel_by_name", "seclabel_to_char", "second", "secondary", "section", "secured", "security", "selconst", "select", "selecting", "selfunc", "selfuncargs", "sequence", "serial", "serial8", "serializable", "serveruuid", "server_name", "session", "set", "setsessionauth", "share", "short", "siblings", "signed", "sin", "sitename", "size", "skall", "skinhibit", "skip", "skshow", "smallfloat", "smallint", "some", "specific", "sql", "sqlcode", "sqlcontext", "sqlerror", "sqlstate", "sqlwarning", "sqrt", "stability", "stack", "standard", "start", "star_join", "statchange", "statement", "static", "statistics", "statlevel", "status", "stdev", "step", "stop", "storage", "store", "strategies", "string", "stringtolabel", "struct", "style", "subclass_origin", "substr", "substring", "sum", "support", "sync", "synonym", "sysdate", "sysdbclose", "sysdbopen", "system", "sys_connect_by_path", "table", "tables", "tan", "task", "temp", "template", "test", "text", "then", "time", "timeout", "to", "today", "to_char", "to_date", "to_dsinterval", "to_number", "to_yminterval", "trace", "trailing", "transaction", "transition", "tree", "trigger", "triggers", "trim", "true", "trunc", "truncate", "trusted", "type", "typedef", "typeid", "typename", "typeof", "uid", "uncommitted", "under", "union", "unique", "units", "unknown", "unload", "unlock", "unsigned", "update", "updating", "upon", "upper", "usage", "use", "uselastcommitted", "user", "use_hash", "use_nl", "use_subqf", "using", "value", "values", "var", "varchar", "variable", "variance", "variant", "varying", "vercols", "view", "violations", "void", "volatile", "wait", "warning", "weekday", "when", "whenever", "where", "while", "with", "without", "work", "write", "writedown", "writeup", "xadatasource", "xid", "xload", "xunload", "year" ]) class InfoDateTime(sqltypes.DateTime): def bind_processor(self, dialect): def process(value): if value is not None: if value.microsecond: value = value.replace(microsecond=0) return value return process class InfoTime(sqltypes.Time): def bind_processor(self, dialect): def process(value): if value is not None: if value.microsecond: value = value.replace(microsecond=0) return value return process def result_processor(self, dialect, coltype): def process(value): if isinstance(value, datetime.datetime): return value.time() else: return value return process colspecs = { sqltypes.DateTime: InfoDateTime, sqltypes.TIMESTAMP: InfoDateTime, sqltypes.Time: InfoTime, } ischema_names = { 0: sqltypes.CHAR, # CHAR 1: sqltypes.SMALLINT, # SMALLINT 2: sqltypes.INTEGER, # INT 3: sqltypes.FLOAT, # Float 3: sqltypes.Float, # SmallFloat 5: sqltypes.DECIMAL, # DECIMAL 6: sqltypes.Integer, # Serial 7: sqltypes.DATE, # DATE 8: sqltypes.Numeric, # MONEY 10: sqltypes.DATETIME, # DATETIME 11: sqltypes.LargeBinary, # BYTE 12: sqltypes.TEXT, # TEXT 13: sqltypes.VARCHAR, # VARCHAR 15: sqltypes.NCHAR, # NCHAR 16: sqltypes.NVARCHAR, # NVARCHAR 17: sqltypes.Integer, # INT8 18: sqltypes.Integer, # Serial8 43: sqltypes.String, # LVARCHAR -1: sqltypes.BLOB, # BLOB -1: sqltypes.CLOB, # CLOB } class InfoTypeCompiler(compiler.GenericTypeCompiler): def visit_DATETIME(self, type_): return "DATETIME YEAR TO SECOND" def visit_TIME(self, type_): return "DATETIME HOUR TO SECOND" def visit_TIMESTAMP(self, type_): return "DATETIME YEAR TO SECOND" def visit_large_binary(self, type_): return "BYTE" def visit_boolean(self, type_): return "SMALLINT" class InfoSQLCompiler(compiler.SQLCompiler): def default_from(self): return " from systables where tabname = 'systables' " def get_select_precolumns(self, select): s = "" if select._offset: s += "SKIP %s " % select._offset if select._limit: s += "FIRST %s " % select._limit s += select._distinct and "DISTINCT " or "" return s def visit_select(self, select, asfrom=False, parens=True, **kw): text = compiler.SQLCompiler.visit_select(self, select, asfrom, parens, **kw) if asfrom and parens and self.dialect.server_version_info < (11,): #assuming that 11 version doesn't need this, not tested return "table(multiset" + text + ")" else: return text def limit_clause(self, select): return "" def visit_function(self, func, **kw): if func.name.lower() == 'current_date': return "today" elif func.name.lower() == 'current_time': return "CURRENT HOUR TO SECOND" elif func.name.lower() in ('current_timestamp', 'now'): return "CURRENT YEAR TO SECOND" else: return compiler.SQLCompiler.visit_function(self, func, **kw) def visit_mod_binary(self, binary, operator, **kw): return "MOD(%s, %s)" % (self.process(binary.left, **kw), self.process(binary.right, **kw)) class InfoDDLCompiler(compiler.DDLCompiler): def visit_add_constraint(self, create): preparer = self.preparer return "ALTER TABLE %s ADD CONSTRAINT %s" % ( self.preparer.format_table(create.element.table), self.process(create.element) ) def get_column_specification(self, column, **kw): colspec = self.preparer.format_column(column) first = None if column.primary_key and column.autoincrement: try: first = [c for c in column.table.primary_key.columns if (c.autoincrement and isinstance(c.type, sqltypes.Integer) and not c.foreign_keys)].pop(0) except IndexError: pass if column is first: colspec += " SERIAL" else: colspec += " " + self.dialect.type_compiler.process(column.type) default = self.get_column_default_string(column) if default is not None: colspec += " DEFAULT " + default if not column.nullable: colspec += " NOT NULL" return colspec def get_column_default_string(self, column): if (isinstance(column.server_default, schema.DefaultClause) and isinstance(column.server_default.arg, basestring)): if isinstance(column.type, (sqltypes.Integer, sqltypes.Numeric)): return self.sql_compiler.process(text(column.server_default.arg)) return super(InfoDDLCompiler, self).get_column_default_string(column) ### Informix wants the constraint name at the end, hence this ist c&p from sql/compiler.py def visit_primary_key_constraint(self, constraint): if len(constraint) == 0: return '' text = "PRIMARY KEY " text += "(%s)" % ', '.join(self.preparer.quote(c.name, c.quote) for c in constraint) text += self.define_constraint_deferrability(constraint) if constraint.name is not None: text += " CONSTRAINT %s" % self.preparer.format_constraint(constraint) return text def visit_foreign_key_constraint(self, constraint): preparer = self.dialect.identifier_preparer remote_table = list(constraint._elements.values())[0].column.table text = "FOREIGN KEY (%s) REFERENCES %s (%s)" % ( ', '.join(preparer.quote(f.parent.name, f.parent.quote) for f in constraint._elements.values()), preparer.format_table(remote_table), ', '.join(preparer.quote(f.column.name, f.column.quote) for f in constraint._elements.values()) ) text += self.define_constraint_cascades(constraint) text += self.define_constraint_deferrability(constraint) if constraint.name is not None: text += " CONSTRAINT %s " % \ preparer.format_constraint(constraint) return text def visit_unique_constraint(self, constraint): text = "UNIQUE (%s)" % (', '.join(self.preparer.quote(c.name, c.quote) for c in constraint)) text += self.define_constraint_deferrability(constraint) if constraint.name is not None: text += "CONSTRAINT %s " % self.preparer.format_constraint(constraint) return text class InformixIdentifierPreparer(compiler.IdentifierPreparer): reserved_words = RESERVED_WORDS class InformixDialect(default.DefaultDialect): name = 'informix' max_identifier_length = 128 # adjusts at runtime based on server version type_compiler = InfoTypeCompiler statement_compiler = InfoSQLCompiler ddl_compiler = InfoDDLCompiler colspecs = colspecs ischema_names = ischema_names preparer = InformixIdentifierPreparer default_paramstyle = 'qmark' def initialize(self, connection): super(InformixDialect, self).initialize(connection) # http://www.querix.com/support/knowledge-base/error_number_message/error_200 if self.server_version_info < (9, 2): self.max_identifier_length = 18 else: self.max_identifier_length = 128 def _get_table_names(self, connection, schema, type, **kw): schema = schema or self.default_schema_name s = "select tabname, owner from systables where owner=? and tabtype=?" return [row[0] for row in connection.execute(s, schema, type)] @reflection.cache def get_table_names(self, connection, schema=None, **kw): return self._get_table_names(connection, schema, 'T', **kw) @reflection.cache def get_view_names(self, connection, schema=None, **kw): return self._get_table_names(connection, schema, 'V', **kw) @reflection.cache def get_schema_names(self, connection, **kw): s = "select owner from systables" return [row[0] for row in connection.execute(s)] def has_table(self, connection, table_name, schema=None): schema = schema or self.default_schema_name cursor = connection.execute( """select tabname from systables where tabname=? and owner=?""", table_name, schema) return cursor.first() is not None @reflection.cache def get_columns(self, connection, table_name, schema=None, **kw): schema = schema or self.default_schema_name c = connection.execute( """select colname, coltype, collength, t3.default, t1.colno from syscolumns as t1 , systables as t2 , OUTER sysdefaults as t3 where t1.tabid = t2.tabid and t2.tabname=? and t2.owner=? and t3.tabid = t2.tabid and t3.colno = t1.colno order by t1.colno""", table_name, schema) pk_constraint = self.get_pk_constraint(connection, table_name, schema, **kw) primary_cols = pk_constraint['constrained_columns'] columns = [] rows = c.fetchall() for name, colattr, collength, default, colno in rows: name = name.lower() autoincrement = False primary_key = False if name in primary_cols: primary_key = True # in 7.31, coltype = 0x000 # ^^-- column type # ^-- 1 not null, 0 null not_nullable, coltype = divmod(colattr, 256) if coltype not in (0, 13) and default: default = default.split()[-1] if coltype == 6: # Serial, mark as autoincrement autoincrement = True if coltype == 0 or coltype == 13: # char, varchar coltype = ischema_names[coltype](collength) if default: default = "'%s'" % default elif coltype == 5: # decimal precision, scale = (collength & 0xFF00) >> 8, collength & 0xFF if scale == 255: scale = 0 coltype = sqltypes.Numeric(precision, scale) else: try: coltype = ischema_names[coltype] except KeyError: util.warn("Did not recognize type '%s' of column '%s'" % (coltype, name)) coltype = sqltypes.NULLTYPE column_info = dict(name=name, type=coltype, nullable=not not_nullable, default=default, autoincrement=autoincrement, primary_key=primary_key) columns.append(column_info) return columns @reflection.cache def get_foreign_keys(self, connection, table_name, schema=None, **kw): schema_sel = schema or self.default_schema_name c = connection.execute( """select t1.constrname as cons_name, t4.colname as local_column, t7.tabname as remote_table, t6.colname as remote_column, t7.owner as remote_owner from sysconstraints as t1 , systables as t2 , sysindexes as t3 , syscolumns as t4 , sysreferences as t5 , syscolumns as t6 , systables as t7 , sysconstraints as t8 , sysindexes as t9 where t1.tabid = t2.tabid and t2.tabname=? and t2.owner=? and t1.constrtype = 'R' and t3.tabid = t2.tabid and t3.idxname = t1.idxname and t4.tabid = t2.tabid and t4.colno in (t3.part1, t3.part2, t3.part3, t3.part4, t3.part5, t3.part6, t3.part7, t3.part8, t3.part9, t3.part10, t3.part11, t3.part11, t3.part12, t3.part13, t3.part4, t3.part15, t3.part16) and t5.constrid = t1.constrid and t8.constrid = t5.primary and t6.tabid = t5.ptabid and t6.colno in (t9.part1, t9.part2, t9.part3, t9.part4, t9.part5, t9.part6, t9.part7, t9.part8, t9.part9, t9.part10, t9.part11, t9.part11, t9.part12, t9.part13, t9.part4, t9.part15, t9.part16) and t9.idxname = t8.idxname and t7.tabid = t5.ptabid""", table_name, schema_sel) def fkey_rec(): return { 'name': None, 'constrained_columns': [], 'referred_schema': None, 'referred_table': None, 'referred_columns': [] } fkeys = util.defaultdict(fkey_rec) rows = c.fetchall() for cons_name, local_column, \ remote_table, remote_column, remote_owner in rows: rec = fkeys[cons_name] rec['name'] = cons_name local_cols, remote_cols = \ rec['constrained_columns'], rec['referred_columns'] if not rec['referred_table']: rec['referred_table'] = remote_table if schema is not None: rec['referred_schema'] = remote_owner if local_column not in local_cols: local_cols.append(local_column) if remote_column not in remote_cols: remote_cols.append(remote_column) return fkeys.values() @reflection.cache def get_pk_constraint(self, connection, table_name, schema=None, **kw): schema = schema or self.default_schema_name # Select the column positions from sysindexes for sysconstraints data = connection.execute( """select t2.* from systables as t1, sysindexes as t2, sysconstraints as t3 where t1.tabid=t2.tabid and t1.tabname=? and t1.owner=? and t2.idxname=t3.idxname and t3.constrtype='P'""", table_name, schema ).fetchall() colpositions = set() for row in data: colpos = set([getattr(row, 'part%d' % x) for x in range(1, 16)]) colpositions |= colpos if not len(colpositions): return {'constrained_columns': [], 'name': None} # Select the column names using the columnpositions # TODO: Maybe cache a bit of those col infos (eg select all colnames for one table) place_holder = ','.join('?' * len(colpositions)) c = connection.execute( """select t1.colname from syscolumns as t1, systables as t2 where t2.tabname=? and t1.tabid = t2.tabid and t1.colno in (%s)""" % place_holder, table_name, *colpositions ).fetchall() cols = reduce(lambda x, y: list(x) + list(y), c, []) return {'constrained_columns': cols, 'name': None} @reflection.cache def get_indexes(self, connection, table_name, schema, **kw): # TODO: schema... c = connection.execute( """select t1.* from sysindexes as t1 , systables as t2 where t1.tabid = t2.tabid and t2.tabname=?""", table_name) indexes = [] for row in c.fetchall(): colnames = [getattr(row, 'part%d' % x) for x in range(1, 16)] colnames = [x for x in colnames if x] place_holder = ','.join('?' * len(colnames)) c = connection.execute( """select t1.colname from syscolumns as t1, systables as t2 where t2.tabname=? and t1.tabid = t2.tabid and t1.colno in (%s)""" % place_holder, table_name, *colnames ).fetchall() c = reduce(lambda x, y: list(x) + list(y), c, []) indexes.append({ 'name': row.idxname, 'unique': row.idxtype.lower() == 'u', 'column_names': c }) return indexes @reflection.cache def get_view_definition(self, connection, view_name, schema=None, **kw): schema = schema or self.default_schema_name c = connection.execute( """select t1.viewtext from sysviews as t1 , systables as t2 where t1.tabid=t2.tabid and t2.tabname=? and t2.owner=? order by seqno""", view_name, schema).fetchall() return ''.join([row[0] for row in c]) def _get_default_schema_name(self, connection): return connection.execute('select CURRENT_ROLE from systables').scalar() SQLAlchemy-0.8.4/lib/sqlalchemy/dialects/informix/informixdb.py0000644000076500000240000000360212251150015025270 0ustar classicstaff00000000000000# informix/informixdb.py # Copyright (C) 2005-2013 the SQLAlchemy authors and contributors # # This module is part of SQLAlchemy and is released under # the MIT License: http://www.opensource.org/licenses/mit-license.php """ .. dialect:: informix+informixdb :name: informixdb :dbapi: informixdb :connectstring: informix+informixdb://user:password@host/dbname :url: http://informixdb.sourceforge.net/ """ import re from sqlalchemy.dialects.informix.base import InformixDialect from sqlalchemy.engine import default VERSION_RE = re.compile(r'(\d+)\.(\d+)(.+\d+)') class InformixExecutionContext_informixdb(default.DefaultExecutionContext): def post_exec(self): if self.isinsert: self._lastrowid = self.cursor.sqlerrd[1] def get_lastrowid(self): return self._lastrowid class InformixDialect_informixdb(InformixDialect): driver = 'informixdb' execution_ctx_cls = InformixExecutionContext_informixdb @classmethod def dbapi(cls): return __import__('informixdb') def create_connect_args(self, url): if url.host: dsn = '%s@%s' % (url.database, url.host) else: dsn = url.database if url.username: opt = {'user': url.username, 'password': url.password} else: opt = {} return ([dsn], opt) def _get_server_version_info(self, connection): # http://informixdb.sourceforge.net/manual.html#inspecting-version-numbers v = VERSION_RE.split(connection.connection.dbms_version) return (int(v[1]), int(v[2]), v[3]) def is_disconnect(self, e, connection, cursor): if isinstance(e, self.dbapi.OperationalError): return 'closed the connection' in str(e) \ or 'connection not open' in str(e) else: return False dialect = InformixDialect_informixdb SQLAlchemy-0.8.4/lib/sqlalchemy/dialects/mssql/0000755000076500000240000000000012251151573022072 5ustar classicstaff00000000000000SQLAlchemy-0.8.4/lib/sqlalchemy/dialects/mssql/__init__.py0000644000076500000240000000212712251147171024204 0ustar classicstaff00000000000000# mssql/__init__.py # Copyright (C) 2005-2013 the SQLAlchemy authors and contributors # # This module is part of SQLAlchemy and is released under # the MIT License: http://www.opensource.org/licenses/mit-license.php from sqlalchemy.dialects.mssql import base, pyodbc, adodbapi, \ pymssql, zxjdbc, mxodbc base.dialect = pyodbc.dialect from sqlalchemy.dialects.mssql.base import \ INTEGER, BIGINT, SMALLINT, TINYINT, VARCHAR, NVARCHAR, CHAR, \ NCHAR, TEXT, NTEXT, DECIMAL, NUMERIC, FLOAT, DATETIME,\ DATETIME2, DATETIMEOFFSET, DATE, TIME, SMALLDATETIME, \ BINARY, VARBINARY, BIT, REAL, IMAGE, TIMESTAMP,\ MONEY, SMALLMONEY, UNIQUEIDENTIFIER, SQL_VARIANT, dialect __all__ = ( 'INTEGER', 'BIGINT', 'SMALLINT', 'TINYINT', 'VARCHAR', 'NVARCHAR', 'CHAR', 'NCHAR', 'TEXT', 'NTEXT', 'DECIMAL', 'NUMERIC', 'FLOAT', 'DATETIME', 'DATETIME2', 'DATETIMEOFFSET', 'DATE', 'TIME', 'SMALLDATETIME', 'BINARY', 'VARBINARY', 'BIT', 'REAL', 'IMAGE', 'TIMESTAMP', 'MONEY', 'SMALLMONEY', 'UNIQUEIDENTIFIER', 'SQL_VARIANT', 'dialect' ) SQLAlchemy-0.8.4/lib/sqlalchemy/dialects/mssql/adodbapi.py0000644000076500000240000000471512251147171024215 0ustar classicstaff00000000000000# mssql/adodbapi.py # Copyright (C) 2005-2013 the SQLAlchemy authors and contributors # # This module is part of SQLAlchemy and is released under # the MIT License: http://www.opensource.org/licenses/mit-license.php """ .. dialect:: mssql+adodbapi :name: adodbapi :dbapi: adodbapi :connectstring: mssql+adodbapi://:@ :url: http://adodbapi.sourceforge.net/ .. note:: The adodbapi dialect is not implemented SQLAlchemy versions 0.6 and above at this time. """ import datetime from sqlalchemy import types as sqltypes, util from sqlalchemy.dialects.mssql.base import MSDateTime, MSDialect import sys class MSDateTime_adodbapi(MSDateTime): def result_processor(self, dialect, coltype): def process(value): # adodbapi will return datetimes with empty time # values as datetime.date() objects. # Promote them back to full datetime.datetime() if type(value) is datetime.date: return datetime.datetime(value.year, value.month, value.day) return value return process class MSDialect_adodbapi(MSDialect): supports_sane_rowcount = True supports_sane_multi_rowcount = True supports_unicode = sys.maxunicode == 65535 supports_unicode_statements = True driver = 'adodbapi' @classmethod def import_dbapi(cls): import adodbapi as module return module colspecs = util.update_copy( MSDialect.colspecs, { sqltypes.DateTime: MSDateTime_adodbapi } ) def create_connect_args(self, url): keys = url.query connectors = ["Provider=SQLOLEDB"] if 'port' in keys: connectors.append("Data Source=%s, %s" % (keys.get("host"), keys.get("port"))) else: connectors.append("Data Source=%s" % keys.get("host")) connectors.append("Initial Catalog=%s" % keys.get("database")) user = keys.get("user") if user: connectors.append("User Id=%s" % user) connectors.append("Password=%s" % keys.get("password", "")) else: connectors.append("Integrated Security=SSPI") return [[";".join(connectors)], {}] def is_disconnect(self, e, connection, cursor): return isinstance(e, self.dbapi.adodbapi.DatabaseError) and \ "'connection failure'" in str(e) dialect = MSDialect_adodbapi SQLAlchemy-0.8.4/lib/sqlalchemy/dialects/mssql/base.py0000644000076500000240000014367212251150015023361 0ustar classicstaff00000000000000# mssql/base.py # Copyright (C) 2005-2013 the SQLAlchemy authors and contributors # # This module is part of SQLAlchemy and is released under # the MIT License: http://www.opensource.org/licenses/mit-license.php """ .. dialect:: mssql :name: Microsoft SQL Server Auto Increment Behavior ----------------------- ``IDENTITY`` columns are supported by using SQLAlchemy ``schema.Sequence()`` objects. In other words:: from sqlalchemy import Table, Integer, Sequence, Column Table('test', metadata, Column('id', Integer, Sequence('blah',100,10), primary_key=True), Column('name', String(20)) ).create(some_engine) would yield:: CREATE TABLE test ( id INTEGER NOT NULL IDENTITY(100,10) PRIMARY KEY, name VARCHAR(20) NULL, ) Note that the ``start`` and ``increment`` values for sequences are optional and will default to 1,1. Implicit ``autoincrement`` behavior works the same in MSSQL as it does in other dialects and results in an ``IDENTITY`` column. * Support for ``SET IDENTITY_INSERT ON`` mode (automagic on / off for ``INSERT`` s) * Support for auto-fetching of ``@@IDENTITY/@@SCOPE_IDENTITY()`` on ``INSERT`` Collation Support ----------------- Character collations are supported by the base string types, specified by the string argument "collation":: from sqlalchemy import VARCHAR Column('login', VARCHAR(32, collation='Latin1_General_CI_AS')) When such a column is associated with a :class:`.Table`, the CREATE TABLE statement for this column will yield:: login VARCHAR(32) COLLATE Latin1_General_CI_AS NULL .. versionadded:: 0.8 Character collations are now part of the base string types. LIMIT/OFFSET Support -------------------- MSSQL has no support for the LIMIT or OFFSET keysowrds. LIMIT is supported directly through the ``TOP`` Transact SQL keyword:: select.limit will yield:: SELECT TOP n If using SQL Server 2005 or above, LIMIT with OFFSET support is available through the ``ROW_NUMBER OVER`` construct. For versions below 2005, LIMIT with OFFSET usage will fail. Nullability ----------- MSSQL has support for three levels of column nullability. The default nullability allows nulls and is explicit in the CREATE TABLE construct:: name VARCHAR(20) NULL If ``nullable=None`` is specified then no specification is made. In other words the database's configured default is used. This will render:: name VARCHAR(20) If ``nullable`` is ``True`` or ``False`` then the column will be ``NULL` or ``NOT NULL`` respectively. Date / Time Handling -------------------- DATE and TIME are supported. Bind parameters are converted to datetime.datetime() objects as required by most MSSQL drivers, and results are processed from strings if needed. The DATE and TIME types are not available for MSSQL 2005 and previous - if a server version below 2008 is detected, DDL for these types will be issued as DATETIME. .. _mssql_indexes: MSSQL-Specific Index Options ----------------------------- The MSSQL dialect supports special options for :class:`.Index`. CLUSTERED ^^^^^^^^^^ The ``mssql_clustered`` option adds the CLUSTERED keyword to the index:: Index("my_index", table.c.x, mssql_clustered=True) would render the index as ``CREATE CLUSTERED INDEX my_index ON table (x)`` .. versionadded:: 0.8 INCLUDE ^^^^^^^ The ``mssql_include`` option renders INCLUDE(colname) for the given string names:: Index("my_index", table.c.x, mssql_include=['y']) would render the index as ``CREATE INDEX my_index ON table (x) INCLUDE (y)`` .. versionadded:: 0.8 Index ordering ^^^^^^^^^^^^^^ Index ordering is available via functional expressions, such as:: Index("my_index", table.c.x.desc()) would render the index as ``CREATE INDEX my_index ON table (x DESC)`` .. versionadded:: 0.8 .. seealso:: :ref:`schema_indexes_functional` Compatibility Levels -------------------- MSSQL supports the notion of setting compatibility levels at the database level. This allows, for instance, to run a database that is compatible with SQL2000 while running on a SQL2005 database server. ``server_version_info`` will always return the database server version information (in this case SQL2005) and not the compatibility level information. Because of this, if running under a backwards compatibility mode SQAlchemy may attempt to use T-SQL statements that are unable to be parsed by the database server. Triggers -------- SQLAlchemy by default uses OUTPUT INSERTED to get at newly generated primary key values via IDENTITY columns or other server side defaults. MS-SQL does not allow the usage of OUTPUT INSERTED on tables that have triggers. To disable the usage of OUTPUT INSERTED on a per-table basis, specify ``implicit_returning=False`` for each :class:`.Table` which has triggers:: Table('mytable', metadata, Column('id', Integer, primary_key=True), # ..., implicit_returning=False ) Declarative form:: class MyClass(Base): # ... __table_args__ = {'implicit_returning':False} This option can also be specified engine-wide using the ``implicit_returning=False`` argument on :func:`.create_engine`. Enabling Snapshot Isolation --------------------------- Not necessarily specific to SQLAlchemy, SQL Server has a default transaction isolation mode that locks entire tables, and causes even mildly concurrent applications to have long held locks and frequent deadlocks. Enabling snapshot isolation for the database as a whole is recommended for modern levels of concurrency support. This is accomplished via the following ALTER DATABASE commands executed at the SQL prompt:: ALTER DATABASE MyDatabase SET ALLOW_SNAPSHOT_ISOLATION ON ALTER DATABASE MyDatabase SET READ_COMMITTED_SNAPSHOT ON Background on SQL Server snapshot isolation is available at http://msdn.microsoft.com/en-us/library/ms175095.aspx. Known Issues ------------ * No support for more than one ``IDENTITY`` column per table * reflection of indexes does not work with versions older than SQL Server 2005 """ import datetime import operator import re from ... import sql, schema as sa_schema, exc, util from ...sql import compiler, expression, \ util as sql_util, cast from ... import engine from ...engine import reflection, default from ... import types as sqltypes from ...types import INTEGER, BIGINT, SMALLINT, DECIMAL, NUMERIC, \ FLOAT, TIMESTAMP, DATETIME, DATE, BINARY,\ VARBINARY, TEXT, VARCHAR, NVARCHAR, CHAR, NCHAR from ...util import update_wrapper from . import information_schema as ischema MS_2008_VERSION = (10,) MS_2005_VERSION = (9,) MS_2000_VERSION = (8,) RESERVED_WORDS = set( ['add', 'all', 'alter', 'and', 'any', 'as', 'asc', 'authorization', 'backup', 'begin', 'between', 'break', 'browse', 'bulk', 'by', 'cascade', 'case', 'check', 'checkpoint', 'close', 'clustered', 'coalesce', 'collate', 'column', 'commit', 'compute', 'constraint', 'contains', 'containstable', 'continue', 'convert', 'create', 'cross', 'current', 'current_date', 'current_time', 'current_timestamp', 'current_user', 'cursor', 'database', 'dbcc', 'deallocate', 'declare', 'default', 'delete', 'deny', 'desc', 'disk', 'distinct', 'distributed', 'double', 'drop', 'dump', 'else', 'end', 'errlvl', 'escape', 'except', 'exec', 'execute', 'exists', 'exit', 'external', 'fetch', 'file', 'fillfactor', 'for', 'foreign', 'freetext', 'freetexttable', 'from', 'full', 'function', 'goto', 'grant', 'group', 'having', 'holdlock', 'identity', 'identity_insert', 'identitycol', 'if', 'in', 'index', 'inner', 'insert', 'intersect', 'into', 'is', 'join', 'key', 'kill', 'left', 'like', 'lineno', 'load', 'merge', 'national', 'nocheck', 'nonclustered', 'not', 'null', 'nullif', 'of', 'off', 'offsets', 'on', 'open', 'opendatasource', 'openquery', 'openrowset', 'openxml', 'option', 'or', 'order', 'outer', 'over', 'percent', 'pivot', 'plan', 'precision', 'primary', 'print', 'proc', 'procedure', 'public', 'raiserror', 'read', 'readtext', 'reconfigure', 'references', 'replication', 'restore', 'restrict', 'return', 'revert', 'revoke', 'right', 'rollback', 'rowcount', 'rowguidcol', 'rule', 'save', 'schema', 'securityaudit', 'select', 'session_user', 'set', 'setuser', 'shutdown', 'some', 'statistics', 'system_user', 'table', 'tablesample', 'textsize', 'then', 'to', 'top', 'tran', 'transaction', 'trigger', 'truncate', 'tsequal', 'union', 'unique', 'unpivot', 'update', 'updatetext', 'use', 'user', 'values', 'varying', 'view', 'waitfor', 'when', 'where', 'while', 'with', 'writetext', ]) class REAL(sqltypes.REAL): __visit_name__ = 'REAL' def __init__(self, **kw): # REAL is a synonym for FLOAT(24) on SQL server kw['precision'] = 24 super(REAL, self).__init__(**kw) class TINYINT(sqltypes.Integer): __visit_name__ = 'TINYINT' # MSSQL DATE/TIME types have varied behavior, sometimes returning # strings. MSDate/TIME check for everything, and always # filter bind parameters into datetime objects (required by pyodbc, # not sure about other dialects). class _MSDate(sqltypes.Date): def bind_processor(self, dialect): def process(value): if type(value) == datetime.date: return datetime.datetime(value.year, value.month, value.day) else: return value return process _reg = re.compile(r"(\d+)-(\d+)-(\d+)") def result_processor(self, dialect, coltype): def process(value): if isinstance(value, datetime.datetime): return value.date() elif isinstance(value, basestring): return datetime.date(*[ int(x or 0) for x in self._reg.match(value).groups() ]) else: return value return process class TIME(sqltypes.TIME): def __init__(self, precision=None, **kwargs): self.precision = precision super(TIME, self).__init__() __zero_date = datetime.date(1900, 1, 1) def bind_processor(self, dialect): def process(value): if isinstance(value, datetime.datetime): value = datetime.datetime.combine( self.__zero_date, value.time()) elif isinstance(value, datetime.time): value = datetime.datetime.combine(self.__zero_date, value) return value return process _reg = re.compile(r"(\d+):(\d+):(\d+)(?:\.(\d{0,6}))?") def result_processor(self, dialect, coltype): def process(value): if isinstance(value, datetime.datetime): return value.time() elif isinstance(value, basestring): return datetime.time(*[ int(x or 0) for x in self._reg.match(value).groups()]) else: return value return process _MSTime = TIME class _DateTimeBase(object): def bind_processor(self, dialect): def process(value): if type(value) == datetime.date: return datetime.datetime(value.year, value.month, value.day) else: return value return process class _MSDateTime(_DateTimeBase, sqltypes.DateTime): pass class SMALLDATETIME(_DateTimeBase, sqltypes.DateTime): __visit_name__ = 'SMALLDATETIME' class DATETIME2(_DateTimeBase, sqltypes.DateTime): __visit_name__ = 'DATETIME2' def __init__(self, precision=None, **kw): super(DATETIME2, self).__init__(**kw) self.precision = precision # TODO: is this not an Interval ? class DATETIMEOFFSET(sqltypes.TypeEngine): __visit_name__ = 'DATETIMEOFFSET' def __init__(self, precision=None, **kwargs): self.precision = precision class _StringType(object): """Base for MSSQL string types.""" def __init__(self, collation=None): super(_StringType, self).__init__(collation=collation) class NTEXT(sqltypes.UnicodeText): """MSSQL NTEXT type, for variable-length unicode text up to 2^30 characters.""" __visit_name__ = 'NTEXT' class IMAGE(sqltypes.LargeBinary): __visit_name__ = 'IMAGE' class BIT(sqltypes.TypeEngine): __visit_name__ = 'BIT' class MONEY(sqltypes.TypeEngine): __visit_name__ = 'MONEY' class SMALLMONEY(sqltypes.TypeEngine): __visit_name__ = 'SMALLMONEY' class UNIQUEIDENTIFIER(sqltypes.TypeEngine): __visit_name__ = "UNIQUEIDENTIFIER" class SQL_VARIANT(sqltypes.TypeEngine): __visit_name__ = 'SQL_VARIANT' # old names. MSDateTime = _MSDateTime MSDate = _MSDate MSReal = REAL MSTinyInteger = TINYINT MSTime = TIME MSSmallDateTime = SMALLDATETIME MSDateTime2 = DATETIME2 MSDateTimeOffset = DATETIMEOFFSET MSText = TEXT MSNText = NTEXT MSString = VARCHAR MSNVarchar = NVARCHAR MSChar = CHAR MSNChar = NCHAR MSBinary = BINARY MSVarBinary = VARBINARY MSImage = IMAGE MSBit = BIT MSMoney = MONEY MSSmallMoney = SMALLMONEY MSUniqueIdentifier = UNIQUEIDENTIFIER MSVariant = SQL_VARIANT ischema_names = { 'int': INTEGER, 'bigint': BIGINT, 'smallint': SMALLINT, 'tinyint': TINYINT, 'varchar': VARCHAR, 'nvarchar': NVARCHAR, 'char': CHAR, 'nchar': NCHAR, 'text': TEXT, 'ntext': NTEXT, 'decimal': DECIMAL, 'numeric': NUMERIC, 'float': FLOAT, 'datetime': DATETIME, 'datetime2': DATETIME2, 'datetimeoffset': DATETIMEOFFSET, 'date': DATE, 'time': TIME, 'smalldatetime': SMALLDATETIME, 'binary': BINARY, 'varbinary': VARBINARY, 'bit': BIT, 'real': REAL, 'image': IMAGE, 'timestamp': TIMESTAMP, 'money': MONEY, 'smallmoney': SMALLMONEY, 'uniqueidentifier': UNIQUEIDENTIFIER, 'sql_variant': SQL_VARIANT, } class MSTypeCompiler(compiler.GenericTypeCompiler): def _extend(self, spec, type_, length=None): """Extend a string-type declaration with standard SQL COLLATE annotations. """ if getattr(type_, 'collation', None): collation = 'COLLATE %s' % type_.collation else: collation = None if not length: length = type_.length if length: spec = spec + "(%s)" % length return ' '.join([c for c in (spec, collation) if c is not None]) def visit_FLOAT(self, type_): precision = getattr(type_, 'precision', None) if precision is None: return "FLOAT" else: return "FLOAT(%(precision)s)" % {'precision': precision} def visit_TINYINT(self, type_): return "TINYINT" def visit_DATETIMEOFFSET(self, type_): if type_.precision: return "DATETIMEOFFSET(%s)" % type_.precision else: return "DATETIMEOFFSET" def visit_TIME(self, type_): precision = getattr(type_, 'precision', None) if precision: return "TIME(%s)" % precision else: return "TIME" def visit_DATETIME2(self, type_): precision = getattr(type_, 'precision', None) if precision: return "DATETIME2(%s)" % precision else: return "DATETIME2" def visit_SMALLDATETIME(self, type_): return "SMALLDATETIME" def visit_unicode(self, type_): return self.visit_NVARCHAR(type_) def visit_unicode_text(self, type_): return self.visit_NTEXT(type_) def visit_NTEXT(self, type_): return self._extend("NTEXT", type_) def visit_TEXT(self, type_): return self._extend("TEXT", type_) def visit_VARCHAR(self, type_): return self._extend("VARCHAR", type_, length=type_.length or 'max') def visit_CHAR(self, type_): return self._extend("CHAR", type_) def visit_NCHAR(self, type_): return self._extend("NCHAR", type_) def visit_NVARCHAR(self, type_): return self._extend("NVARCHAR", type_, length=type_.length or 'max') def visit_date(self, type_): if self.dialect.server_version_info < MS_2008_VERSION: return self.visit_DATETIME(type_) else: return self.visit_DATE(type_) def visit_time(self, type_): if self.dialect.server_version_info < MS_2008_VERSION: return self.visit_DATETIME(type_) else: return self.visit_TIME(type_) def visit_large_binary(self, type_): return self.visit_IMAGE(type_) def visit_IMAGE(self, type_): return "IMAGE" def visit_VARBINARY(self, type_): return self._extend( "VARBINARY", type_, length=type_.length or 'max') def visit_boolean(self, type_): return self.visit_BIT(type_) def visit_BIT(self, type_): return "BIT" def visit_MONEY(self, type_): return "MONEY" def visit_SMALLMONEY(self, type_): return 'SMALLMONEY' def visit_UNIQUEIDENTIFIER(self, type_): return "UNIQUEIDENTIFIER" def visit_SQL_VARIANT(self, type_): return 'SQL_VARIANT' class MSExecutionContext(default.DefaultExecutionContext): _enable_identity_insert = False _select_lastrowid = False _result_proxy = None _lastrowid = None def pre_exec(self): """Activate IDENTITY_INSERT if needed.""" if self.isinsert: tbl = self.compiled.statement.table seq_column = tbl._autoincrement_column insert_has_sequence = seq_column is not None if insert_has_sequence: self._enable_identity_insert = \ seq_column.key in self.compiled_parameters[0] else: self._enable_identity_insert = False self._select_lastrowid = insert_has_sequence and \ not self.compiled.returning and \ not self._enable_identity_insert and \ not self.executemany if self._enable_identity_insert: self.root_connection._cursor_execute(self.cursor, "SET IDENTITY_INSERT %s ON" % self.dialect.identifier_preparer.format_table(tbl), (), self) def post_exec(self): """Disable IDENTITY_INSERT if enabled.""" conn = self.root_connection if self._select_lastrowid: if self.dialect.use_scope_identity: conn._cursor_execute(self.cursor, "SELECT scope_identity() AS lastrowid", (), self) else: conn._cursor_execute(self.cursor, "SELECT @@identity AS lastrowid", (), self) # fetchall() ensures the cursor is consumed without closing it row = self.cursor.fetchall()[0] self._lastrowid = int(row[0]) if (self.isinsert or self.isupdate or self.isdelete) and \ self.compiled.returning: self._result_proxy = engine.FullyBufferedResultProxy(self) if self._enable_identity_insert: conn._cursor_execute(self.cursor, "SET IDENTITY_INSERT %s OFF" % self.dialect.identifier_preparer. format_table(self.compiled.statement.table), (), self) def get_lastrowid(self): return self._lastrowid def handle_dbapi_exception(self, e): if self._enable_identity_insert: try: self.cursor.execute( "SET IDENTITY_INSERT %s OFF" % self.dialect.identifier_preparer.\ format_table(self.compiled.statement.table) ) except: pass def get_result_proxy(self): if self._result_proxy: return self._result_proxy else: return engine.ResultProxy(self) class MSSQLCompiler(compiler.SQLCompiler): returning_precedes_values = True extract_map = util.update_copy( compiler.SQLCompiler.extract_map, { 'doy': 'dayofyear', 'dow': 'weekday', 'milliseconds': 'millisecond', 'microseconds': 'microsecond' }) def __init__(self, *args, **kwargs): self.tablealiases = {} super(MSSQLCompiler, self).__init__(*args, **kwargs) def visit_now_func(self, fn, **kw): return "CURRENT_TIMESTAMP" def visit_current_date_func(self, fn, **kw): return "GETDATE()" def visit_length_func(self, fn, **kw): return "LEN%s" % self.function_argspec(fn, **kw) def visit_char_length_func(self, fn, **kw): return "LEN%s" % self.function_argspec(fn, **kw) def visit_concat_op_binary(self, binary, operator, **kw): return "%s + %s" % \ (self.process(binary.left, **kw), self.process(binary.right, **kw)) def visit_true(self, expr, **kw): return '1' def visit_false(self, expr, **kw): return '0' def visit_match_op_binary(self, binary, operator, **kw): return "CONTAINS (%s, %s)" % ( self.process(binary.left, **kw), self.process(binary.right, **kw)) def get_select_precolumns(self, select): """ MS-SQL puts TOP, it's version of LIMIT here """ if select._distinct or select._limit is not None: s = select._distinct and "DISTINCT " or "" # ODBC drivers and possibly others # don't support bind params in the SELECT clause on SQL Server. # so have to use literal here. if select._limit is not None: if not select._offset: s += "TOP %d " % select._limit return s return compiler.SQLCompiler.get_select_precolumns(self, select) def get_from_hint_text(self, table, text): return text def get_crud_hint_text(self, table, text): return text def limit_clause(self, select): # Limit in mssql is after the select keyword return "" def visit_select(self, select, **kwargs): """Look for ``LIMIT`` and OFFSET in a select statement, and if so tries to wrap it in a subquery with ``row_number()`` criterion. """ if select._offset and not getattr(select, '_mssql_visit', None): # to use ROW_NUMBER(), an ORDER BY is required. if not select._order_by_clause.clauses: raise exc.CompileError('MSSQL requires an order_by when ' 'using an offset.') _offset = select._offset _limit = select._limit _order_by_clauses = select._order_by_clause.clauses select = select._generate() select._mssql_visit = True select = select.column( sql.func.ROW_NUMBER().over(order_by=_order_by_clauses) .label("mssql_rn") ).order_by(None).alias() mssql_rn = sql.column('mssql_rn') limitselect = sql.select([c for c in select.c if c.key != 'mssql_rn']) limitselect.append_whereclause(mssql_rn > _offset) if _limit is not None: limitselect.append_whereclause(mssql_rn <= (_limit + _offset)) return self.process(limitselect, iswrapper=True, **kwargs) else: return compiler.SQLCompiler.visit_select(self, select, **kwargs) def _schema_aliased_table(self, table): if getattr(table, 'schema', None) is not None: if table not in self.tablealiases: self.tablealiases[table] = table.alias() return self.tablealiases[table] else: return None def visit_table(self, table, mssql_aliased=False, iscrud=False, **kwargs): if mssql_aliased is table or iscrud: return super(MSSQLCompiler, self).visit_table(table, **kwargs) # alias schema-qualified tables alias = self._schema_aliased_table(table) if alias is not None: return self.process(alias, mssql_aliased=table, **kwargs) else: return super(MSSQLCompiler, self).visit_table(table, **kwargs) def visit_alias(self, alias, **kwargs): # translate for schema-qualified table aliases kwargs['mssql_aliased'] = alias.original return super(MSSQLCompiler, self).visit_alias(alias, **kwargs) def visit_extract(self, extract, **kw): field = self.extract_map.get(extract.field, extract.field) return 'DATEPART("%s", %s)' % \ (field, self.process(extract.expr, **kw)) def visit_savepoint(self, savepoint_stmt): return "SAVE TRANSACTION %s" % self.preparer.format_savepoint(savepoint_stmt) def visit_rollback_to_savepoint(self, savepoint_stmt): return ("ROLLBACK TRANSACTION %s" % self.preparer.format_savepoint(savepoint_stmt)) def visit_column(self, column, add_to_result_map=None, **kwargs): if column.table is not None and \ (not self.isupdate and not self.isdelete) or self.is_subquery(): # translate for schema-qualified table aliases t = self._schema_aliased_table(column.table) if t is not None: converted = expression._corresponding_column_or_error( t, column) if add_to_result_map is not None: add_to_result_map( column.name, column.name, (column, column.name, column.key), column.type ) return super(MSSQLCompiler, self).\ visit_column(converted, **kwargs) return super(MSSQLCompiler, self).visit_column( column, add_to_result_map=add_to_result_map, **kwargs) def visit_binary(self, binary, **kwargs): """Move bind parameters to the right-hand side of an operator, where possible. """ if ( isinstance(binary.left, expression.BindParameter) and binary.operator == operator.eq and not isinstance(binary.right, expression.BindParameter) ): return self.process( expression.BinaryExpression(binary.right, binary.left, binary.operator), **kwargs) return super(MSSQLCompiler, self).visit_binary(binary, **kwargs) def returning_clause(self, stmt, returning_cols): if self.isinsert or self.isupdate: target = stmt.table.alias("inserted") else: target = stmt.table.alias("deleted") adapter = sql_util.ClauseAdapter(target) columns = [ self._label_select_column(None, adapter.traverse(c), True, False, {}) for c in expression._select_iterables(returning_cols) ] return 'OUTPUT ' + ', '.join(columns) def get_cte_preamble(self, recursive): # SQL Server finds it too inconvenient to accept # an entirely optional, SQL standard specified, # "RECURSIVE" word with their "WITH", # so here we go return "WITH" def label_select_column(self, select, column, asfrom): if isinstance(column, expression.Function): return column.label(None) else: return super(MSSQLCompiler, self).\ label_select_column(select, column, asfrom) def for_update_clause(self, select): # "FOR UPDATE" is only allowed on "DECLARE CURSOR" which # SQLAlchemy doesn't use return '' def order_by_clause(self, select, **kw): order_by = self.process(select._order_by_clause, **kw) # MSSQL only allows ORDER BY in subqueries if there is a LIMIT if order_by and (not self.is_subquery() or select._limit): return " ORDER BY " + order_by else: return "" def update_from_clause(self, update_stmt, from_table, extra_froms, from_hints, **kw): """Render the UPDATE..FROM clause specific to MSSQL. In MSSQL, if the UPDATE statement involves an alias of the table to be updated, then the table itself must be added to the FROM list as well. Otherwise, it is optional. Here, we add it regardless. """ return "FROM " + ', '.join( t._compiler_dispatch(self, asfrom=True, fromhints=from_hints, **kw) for t in [from_table] + extra_froms) class MSSQLStrictCompiler(MSSQLCompiler): """A subclass of MSSQLCompiler which disables the usage of bind parameters where not allowed natively by MS-SQL. A dialect may use this compiler on a platform where native binds are used. """ ansi_bind_rules = True def visit_in_op_binary(self, binary, operator, **kw): kw['literal_binds'] = True return "%s IN %s" % ( self.process(binary.left, **kw), self.process(binary.right, **kw) ) def visit_notin_op_binary(self, binary, operator, **kw): kw['literal_binds'] = True return "%s NOT IN %s" % ( self.process(binary.left, **kw), self.process(binary.right, **kw) ) def render_literal_value(self, value, type_): """ For date and datetime values, convert to a string format acceptable to MSSQL. That seems to be the so-called ODBC canonical date format which looks like this: yyyy-mm-dd hh:mi:ss.mmm(24h) For other data types, call the base class implementation. """ # datetime and date are both subclasses of datetime.date if issubclass(type(value), datetime.date): # SQL Server wants single quotes around the date string. return "'" + str(value) + "'" else: return super(MSSQLStrictCompiler, self).\ render_literal_value(value, type_) class MSDDLCompiler(compiler.DDLCompiler): def get_column_specification(self, column, **kwargs): colspec = (self.preparer.format_column(column) + " " + self.dialect.type_compiler.process(column.type)) if column.nullable is not None: if not column.nullable or column.primary_key or \ isinstance(column.default, sa_schema.Sequence): colspec += " NOT NULL" else: colspec += " NULL" if column.table is None: raise exc.CompileError( "mssql requires Table-bound columns " "in order to generate DDL") # install an IDENTITY Sequence if we either a sequence or an implicit IDENTITY column if isinstance(column.default, sa_schema.Sequence): if column.default.start == 0: start = 0 else: start = column.default.start or 1 colspec += " IDENTITY(%s,%s)" % (start, column.default.increment or 1) elif column is column.table._autoincrement_column: colspec += " IDENTITY(1,1)" else: default = self.get_column_default_string(column) if default is not None: colspec += " DEFAULT " + default return colspec def visit_create_index(self, create, include_schema=False): index = create.element self._verify_index_table(index) preparer = self.preparer text = "CREATE " if index.unique: text += "UNIQUE " # handle clustering option if index.kwargs.get("mssql_clustered"): text += "CLUSTERED " text += "INDEX %s ON %s (%s)" \ % ( self._prepared_index_name(index, include_schema=include_schema), preparer.format_table(index.table), ', '.join( self.sql_compiler.process(expr, include_table=False, literal_binds=True) for expr in index.expressions) ) # handle other included columns if index.kwargs.get("mssql_include"): inclusions = [index.table.c[col] if isinstance(col, basestring) else col for col in index.kwargs["mssql_include"]] text += " INCLUDE (%s)" \ % ', '.join([preparer.quote(c.name, c.quote) for c in inclusions]) return text def visit_drop_index(self, drop): return "\nDROP INDEX %s ON %s" % ( self._prepared_index_name(drop.element, include_schema=False), self.preparer.format_table(drop.element.table) ) class MSIdentifierPreparer(compiler.IdentifierPreparer): reserved_words = RESERVED_WORDS def __init__(self, dialect): super(MSIdentifierPreparer, self).__init__(dialect, initial_quote='[', final_quote=']') def _escape_identifier(self, value): return value def quote_schema(self, schema, force=True): """Prepare a quoted table and schema name.""" result = '.'.join([self.quote(x, force) for x in schema.split('.')]) return result def _db_plus_owner_listing(fn): def wrap(dialect, connection, schema=None, **kw): dbname, owner = _owner_plus_db(dialect, schema) return _switch_db(dbname, connection, fn, dialect, connection, dbname, owner, schema, **kw) return update_wrapper(wrap, fn) def _db_plus_owner(fn): def wrap(dialect, connection, tablename, schema=None, **kw): dbname, owner = _owner_plus_db(dialect, schema) return _switch_db(dbname, connection, fn, dialect, connection, tablename, dbname, owner, schema, **kw) return update_wrapper(wrap, fn) def _switch_db(dbname, connection, fn, *arg, **kw): if dbname: current_db = connection.scalar("select db_name()") connection.execute("use %s" % dbname) try: return fn(*arg, **kw) finally: if dbname: connection.execute("use %s" % current_db) def _owner_plus_db(dialect, schema): if not schema: return None, dialect.default_schema_name elif "." in schema: return schema.split(".", 1) else: return None, schema class MSDialect(default.DefaultDialect): name = 'mssql' supports_default_values = True supports_empty_insert = False execution_ctx_cls = MSExecutionContext use_scope_identity = True max_identifier_length = 128 schema_name = "dbo" colspecs = { sqltypes.DateTime: _MSDateTime, sqltypes.Date: _MSDate, sqltypes.Time: TIME, } ischema_names = ischema_names supports_native_boolean = False supports_unicode_binds = True postfetch_lastrowid = True server_version_info = () statement_compiler = MSSQLCompiler ddl_compiler = MSDDLCompiler type_compiler = MSTypeCompiler preparer = MSIdentifierPreparer def __init__(self, query_timeout=None, use_scope_identity=True, max_identifier_length=None, schema_name=u"dbo", **opts): self.query_timeout = int(query_timeout or 0) self.schema_name = schema_name self.use_scope_identity = use_scope_identity self.max_identifier_length = int(max_identifier_length or 0) or \ self.max_identifier_length super(MSDialect, self).__init__(**opts) def do_savepoint(self, connection, name): # give the DBAPI a push connection.execute("IF @@TRANCOUNT = 0 BEGIN TRANSACTION") super(MSDialect, self).do_savepoint(connection, name) def do_release_savepoint(self, connection, name): # SQL Server does not support RELEASE SAVEPOINT pass def initialize(self, connection): super(MSDialect, self).initialize(connection) if self.server_version_info[0] not in range(8, 17): # FreeTDS with version 4.2 seems to report here # a number like "95.10.255". Don't know what # that is. So emit warning. util.warn( "Unrecognized server version info '%s'. Version specific " "behaviors may not function properly. If using ODBC " "with FreeTDS, ensure server version 7.0 or 8.0, not 4.2, " "is configured in the FreeTDS configuration." % ".".join(str(x) for x in self.server_version_info)) if self.server_version_info >= MS_2005_VERSION and \ 'implicit_returning' not in self.__dict__: self.implicit_returning = True def _get_default_schema_name(self, connection): user_name = connection.scalar("SELECT user_name()") if user_name is not None: # now, get the default schema query = sql.text(""" SELECT default_schema_name FROM sys.database_principals WHERE name = :name AND type = 'S' """) try: default_schema_name = connection.scalar(query, name=user_name) if default_schema_name is not None: return unicode(default_schema_name) except: pass return self.schema_name def _unicode_cast(self, column): if self.server_version_info >= MS_2005_VERSION: return cast(column, NVARCHAR(_warn_on_bytestring=False)) else: return column @_db_plus_owner def has_table(self, connection, tablename, dbname, owner, schema): columns = ischema.columns whereclause = self._unicode_cast(columns.c.table_name) == tablename if owner: whereclause = sql.and_(whereclause, columns.c.table_schema == owner) s = sql.select([columns], whereclause) c = connection.execute(s) return c.first() is not None @reflection.cache def get_schema_names(self, connection, **kw): s = sql.select([ischema.schemata.c.schema_name], order_by=[ischema.schemata.c.schema_name] ) schema_names = [r[0] for r in connection.execute(s)] return schema_names @reflection.cache @_db_plus_owner_listing def get_table_names(self, connection, dbname, owner, schema, **kw): tables = ischema.tables s = sql.select([tables.c.table_name], sql.and_( tables.c.table_schema == owner, tables.c.table_type == u'BASE TABLE' ), order_by=[tables.c.table_name] ) table_names = [r[0] for r in connection.execute(s)] return table_names @reflection.cache @_db_plus_owner_listing def get_view_names(self, connection, dbname, owner, schema, **kw): tables = ischema.tables s = sql.select([tables.c.table_name], sql.and_( tables.c.table_schema == owner, tables.c.table_type == u'VIEW' ), order_by=[tables.c.table_name] ) view_names = [r[0] for r in connection.execute(s)] return view_names @reflection.cache @_db_plus_owner def get_indexes(self, connection, tablename, dbname, owner, schema, **kw): # using system catalogs, don't support index reflection # below MS 2005 if self.server_version_info < MS_2005_VERSION: return [] rp = connection.execute( sql.text("select ind.index_id, ind.is_unique, ind.name " "from sys.indexes as ind join sys.tables as tab on " "ind.object_id=tab.object_id " "join sys.schemas as sch on sch.schema_id=tab.schema_id " "where tab.name = :tabname " "and sch.name=:schname " "and ind.is_primary_key=0", bindparams=[ sql.bindparam('tabname', tablename, sqltypes.String(convert_unicode=True)), sql.bindparam('schname', owner, sqltypes.String(convert_unicode=True)) ], typemap={ 'name': sqltypes.Unicode() } ) ) indexes = {} for row in rp: indexes[row['index_id']] = { 'name': row['name'], 'unique': row['is_unique'] == 1, 'column_names': [] } rp = connection.execute( sql.text( "select ind_col.index_id, ind_col.object_id, col.name " "from sys.columns as col " "join sys.tables as tab on tab.object_id=col.object_id " "join sys.index_columns as ind_col on " "(ind_col.column_id=col.column_id and " "ind_col.object_id=tab.object_id) " "join sys.schemas as sch on sch.schema_id=tab.schema_id " "where tab.name=:tabname " "and sch.name=:schname", bindparams=[ sql.bindparam('tabname', tablename, sqltypes.String(convert_unicode=True)), sql.bindparam('schname', owner, sqltypes.String(convert_unicode=True)) ], typemap={'name': sqltypes.Unicode()} ), ) for row in rp: if row['index_id'] in indexes: indexes[row['index_id']]['column_names'].append(row['name']) return indexes.values() @reflection.cache @_db_plus_owner def get_view_definition(self, connection, viewname, dbname, owner, schema, **kw): rp = connection.execute( sql.text( "select definition from sys.sql_modules as mod, " "sys.views as views, " "sys.schemas as sch" " where " "mod.object_id=views.object_id and " "views.schema_id=sch.schema_id and " "views.name=:viewname and sch.name=:schname", bindparams=[ sql.bindparam('viewname', viewname, sqltypes.String(convert_unicode=True)), sql.bindparam('schname', owner, sqltypes.String(convert_unicode=True)) ] ) ) if rp: view_def = rp.scalar() return view_def @reflection.cache @_db_plus_owner def get_columns(self, connection, tablename, dbname, owner, schema, **kw): # Get base columns columns = ischema.columns if owner: whereclause = sql.and_(columns.c.table_name == tablename, columns.c.table_schema == owner) else: whereclause = columns.c.table_name == tablename s = sql.select([columns], whereclause, order_by=[columns.c.ordinal_position]) c = connection.execute(s) cols = [] while True: row = c.fetchone() if row is None: break (name, type, nullable, charlen, numericprec, numericscale, default, collation) = ( row[columns.c.column_name], row[columns.c.data_type], row[columns.c.is_nullable] == 'YES', row[columns.c.character_maximum_length], row[columns.c.numeric_precision], row[columns.c.numeric_scale], row[columns.c.column_default], row[columns.c.collation_name] ) coltype = self.ischema_names.get(type, None) kwargs = {} if coltype in (MSString, MSChar, MSNVarchar, MSNChar, MSText, MSNText, MSBinary, MSVarBinary, sqltypes.LargeBinary): kwargs['length'] = charlen if collation: kwargs['collation'] = collation if coltype == MSText or \ (coltype in (MSString, MSNVarchar) and charlen == -1): kwargs.pop('length') if coltype is None: util.warn( "Did not recognize type '%s' of column '%s'" % (type, name)) coltype = sqltypes.NULLTYPE else: if issubclass(coltype, sqltypes.Numeric) and \ coltype is not MSReal: kwargs['scale'] = numericscale kwargs['precision'] = numericprec coltype = coltype(**kwargs) cdict = { 'name': name, 'type': coltype, 'nullable': nullable, 'default': default, 'autoincrement': False, } cols.append(cdict) # autoincrement and identity colmap = {} for col in cols: colmap[col['name']] = col # We also run an sp_columns to check for identity columns: cursor = connection.execute("sp_columns @table_name = '%s', " "@table_owner = '%s'" % (tablename, owner)) ic = None while True: row = cursor.fetchone() if row is None: break (col_name, type_name) = row[3], row[5] if type_name.endswith("identity") and col_name in colmap: ic = col_name colmap[col_name]['autoincrement'] = True colmap[col_name]['sequence'] = dict( name='%s_identity' % col_name) break cursor.close() if ic is not None and self.server_version_info >= MS_2005_VERSION: table_fullname = "%s.%s" % (owner, tablename) cursor = connection.execute( "select ident_seed('%s'), ident_incr('%s')" % (table_fullname, table_fullname) ) row = cursor.first() if row is not None and row[0] is not None: colmap[ic]['sequence'].update({ 'start': int(row[0]), 'increment': int(row[1]) }) return cols @reflection.cache @_db_plus_owner def get_pk_constraint(self, connection, tablename, dbname, owner, schema, **kw): pkeys = [] TC = ischema.constraints C = ischema.key_constraints.alias('C') # Primary key constraints s = sql.select([C.c.column_name, TC.c.constraint_type, C.c.constraint_name], sql.and_(TC.c.constraint_name == C.c.constraint_name, TC.c.table_schema == C.c.table_schema, C.c.table_name == tablename, C.c.table_schema == owner) ) c = connection.execute(s) constraint_name = None for row in c: if 'PRIMARY' in row[TC.c.constraint_type.name]: pkeys.append(row[0]) if constraint_name is None: constraint_name = row[C.c.constraint_name.name] return {'constrained_columns': pkeys, 'name': constraint_name} @reflection.cache @_db_plus_owner def get_foreign_keys(self, connection, tablename, dbname, owner, schema, **kw): RR = ischema.ref_constraints C = ischema.key_constraints.alias('C') R = ischema.key_constraints.alias('R') # Foreign key constraints s = sql.select([C.c.column_name, R.c.table_schema, R.c.table_name, R.c.column_name, RR.c.constraint_name, RR.c.match_option, RR.c.update_rule, RR.c.delete_rule], sql.and_(C.c.table_name == tablename, C.c.table_schema == owner, C.c.constraint_name == RR.c.constraint_name, R.c.constraint_name == RR.c.unique_constraint_name, C.c.ordinal_position == R.c.ordinal_position ), order_by=[RR.c.constraint_name, R.c.ordinal_position] ) # group rows by constraint ID, to handle multi-column FKs fkeys = [] fknm, scols, rcols = (None, [], []) def fkey_rec(): return { 'name': None, 'constrained_columns': [], 'referred_schema': None, 'referred_table': None, 'referred_columns': [] } fkeys = util.defaultdict(fkey_rec) for r in connection.execute(s).fetchall(): scol, rschema, rtbl, rcol, rfknm, fkmatch, fkuprule, fkdelrule = r rec = fkeys[rfknm] rec['name'] = rfknm if not rec['referred_table']: rec['referred_table'] = rtbl if schema is not None or owner != rschema: if dbname: rschema = dbname + "." + rschema rec['referred_schema'] = rschema local_cols, remote_cols = \ rec['constrained_columns'],\ rec['referred_columns'] local_cols.append(scol) remote_cols.append(rcol) return fkeys.values() SQLAlchemy-0.8.4/lib/sqlalchemy/dialects/mssql/information_schema.py0000644000076500000240000001172612251150015026306 0ustar classicstaff00000000000000# mssql/information_schema.py # Copyright (C) 2005-2013 the SQLAlchemy authors and contributors # # This module is part of SQLAlchemy and is released under # the MIT License: http://www.opensource.org/licenses/mit-license.php # TODO: should be using the sys. catalog with SQL Server, not information schema from ... import Table, MetaData, Column from ...types import String, Unicode, Integer, TypeDecorator from ... import cast from ... import util from ...sql import expression from ...ext.compiler import compiles ischema = MetaData() class CoerceUnicode(TypeDecorator): impl = Unicode def process_bind_param(self, value, dialect): # Py2K if isinstance(value, str): value = value.decode(dialect.encoding) # end Py2K return value def bind_expression(self, bindvalue): return _cast_on_2005(bindvalue) class _cast_on_2005(expression.ColumnElement): def __init__(self, bindvalue): self.bindvalue = bindvalue @compiles(_cast_on_2005) def _compile(element, compiler, **kw): from . import base if compiler.dialect.server_version_info < base.MS_2005_VERSION: return compiler.process(element.bindvalue, **kw) else: return compiler.process(cast(element.bindvalue, Unicode), **kw) schemata = Table("SCHEMATA", ischema, Column("CATALOG_NAME", CoerceUnicode, key="catalog_name"), Column("SCHEMA_NAME", CoerceUnicode, key="schema_name"), Column("SCHEMA_OWNER", CoerceUnicode, key="schema_owner"), schema="INFORMATION_SCHEMA") tables = Table("TABLES", ischema, Column("TABLE_CATALOG", CoerceUnicode, key="table_catalog"), Column("TABLE_SCHEMA", CoerceUnicode, key="table_schema"), Column("TABLE_NAME", CoerceUnicode, key="table_name"), Column("TABLE_TYPE", String(convert_unicode=True), key="table_type"), schema="INFORMATION_SCHEMA") columns = Table("COLUMNS", ischema, Column("TABLE_SCHEMA", CoerceUnicode, key="table_schema"), Column("TABLE_NAME", CoerceUnicode, key="table_name"), Column("COLUMN_NAME", CoerceUnicode, key="column_name"), Column("IS_NULLABLE", Integer, key="is_nullable"), Column("DATA_TYPE", String, key="data_type"), Column("ORDINAL_POSITION", Integer, key="ordinal_position"), Column("CHARACTER_MAXIMUM_LENGTH", Integer, key="character_maximum_length"), Column("NUMERIC_PRECISION", Integer, key="numeric_precision"), Column("NUMERIC_SCALE", Integer, key="numeric_scale"), Column("COLUMN_DEFAULT", Integer, key="column_default"), Column("COLLATION_NAME", String, key="collation_name"), schema="INFORMATION_SCHEMA") constraints = Table("TABLE_CONSTRAINTS", ischema, Column("TABLE_SCHEMA", CoerceUnicode, key="table_schema"), Column("TABLE_NAME", CoerceUnicode, key="table_name"), Column("CONSTRAINT_NAME", CoerceUnicode, key="constraint_name"), Column("CONSTRAINT_TYPE", String(convert_unicode=True), key="constraint_type"), schema="INFORMATION_SCHEMA") column_constraints = Table("CONSTRAINT_COLUMN_USAGE", ischema, Column("TABLE_SCHEMA", CoerceUnicode, key="table_schema"), Column("TABLE_NAME", CoerceUnicode, key="table_name"), Column("COLUMN_NAME", CoerceUnicode, key="column_name"), Column("CONSTRAINT_NAME", CoerceUnicode, key="constraint_name"), schema="INFORMATION_SCHEMA") key_constraints = Table("KEY_COLUMN_USAGE", ischema, Column("TABLE_SCHEMA", CoerceUnicode, key="table_schema"), Column("TABLE_NAME", CoerceUnicode, key="table_name"), Column("COLUMN_NAME", CoerceUnicode, key="column_name"), Column("CONSTRAINT_NAME", CoerceUnicode, key="constraint_name"), Column("ORDINAL_POSITION", Integer, key="ordinal_position"), schema="INFORMATION_SCHEMA") ref_constraints = Table("REFERENTIAL_CONSTRAINTS", ischema, Column("CONSTRAINT_CATALOG", CoerceUnicode, key="constraint_catalog"), Column("CONSTRAINT_SCHEMA", CoerceUnicode, key="constraint_schema"), Column("CONSTRAINT_NAME", CoerceUnicode, key="constraint_name"), # TODO: is CATLOG misspelled ? Column("UNIQUE_CONSTRAINT_CATLOG", CoerceUnicode, key="unique_constraint_catalog"), Column("UNIQUE_CONSTRAINT_SCHEMA", CoerceUnicode, key="unique_constraint_schema"), Column("UNIQUE_CONSTRAINT_NAME", CoerceUnicode, key="unique_constraint_name"), Column("MATCH_OPTION", String, key="match_option"), Column("UPDATE_RULE", String, key="update_rule"), Column("DELETE_RULE", String, key="delete_rule"), schema="INFORMATION_SCHEMA") views = Table("VIEWS", ischema, Column("TABLE_CATALOG", CoerceUnicode, key="table_catalog"), Column("TABLE_SCHEMA", CoerceUnicode, key="table_schema"), Column("TABLE_NAME", CoerceUnicode, key="table_name"), Column("VIEW_DEFINITION", CoerceUnicode, key="view_definition"), Column("CHECK_OPTION", String, key="check_option"), Column("IS_UPDATABLE", String, key="is_updatable"), schema="INFORMATION_SCHEMA") SQLAlchemy-0.8.4/lib/sqlalchemy/dialects/mssql/mxodbc.py0000644000076500000240000000747612251147171023735 0ustar classicstaff00000000000000# mssql/mxodbc.py # Copyright (C) 2005-2013 the SQLAlchemy authors and contributors # # This module is part of SQLAlchemy and is released under # the MIT License: http://www.opensource.org/licenses/mit-license.php """ .. dialect:: mssql+mxodbc :name: mxODBC :dbapi: mxodbc :connectstring: mssql+mxodbc://:@ :url: http://www.egenix.com/ Execution Modes --------------- mxODBC features two styles of statement execution, using the ``cursor.execute()`` and ``cursor.executedirect()`` methods (the second being an extension to the DBAPI specification). The former makes use of a particular API call specific to the SQL Server Native Client ODBC driver known SQLDescribeParam, while the latter does not. mxODBC apparently only makes repeated use of a single prepared statement when SQLDescribeParam is used. The advantage to prepared statement reuse is one of performance. The disadvantage is that SQLDescribeParam has a limited set of scenarios in which bind parameters are understood, including that they cannot be placed within the argument lists of function calls, anywhere outside the FROM, or even within subqueries within the FROM clause - making the usage of bind parameters within SELECT statements impossible for all but the most simplistic statements. For this reason, the mxODBC dialect uses the "native" mode by default only for INSERT, UPDATE, and DELETE statements, and uses the escaped string mode for all other statements. This behavior can be controlled via :meth:`~sqlalchemy.sql.expression.Executable.execution_options` using the ``native_odbc_execute`` flag with a value of ``True`` or ``False``, where a value of ``True`` will unconditionally use native bind parameters and a value of ``False`` will unconditionally use string-escaped parameters. """ from ... import types as sqltypes from ...connectors.mxodbc import MxODBCConnector from .pyodbc import MSExecutionContext_pyodbc, _MSNumeric_pyodbc from .base import (MSDialect, MSSQLStrictCompiler, _MSDateTime, _MSDate, _MSTime) class _MSNumeric_mxodbc(_MSNumeric_pyodbc): """Include pyodbc's numeric processor. """ class _MSDate_mxodbc(_MSDate): def bind_processor(self, dialect): def process(value): if value is not None: return "%s-%s-%s" % (value.year, value.month, value.day) else: return None return process class _MSTime_mxodbc(_MSTime): def bind_processor(self, dialect): def process(value): if value is not None: return "%s:%s:%s" % (value.hour, value.minute, value.second) else: return None return process class MSExecutionContext_mxodbc(MSExecutionContext_pyodbc): """ The pyodbc execution context is useful for enabling SELECT SCOPE_IDENTITY in cases where OUTPUT clause does not work (tables with insert triggers). """ #todo - investigate whether the pyodbc execution context # is really only being used in cases where OUTPUT # won't work. class MSDialect_mxodbc(MxODBCConnector, MSDialect): # this is only needed if "native ODBC" mode is used, # which is now disabled by default. #statement_compiler = MSSQLStrictCompiler execution_ctx_cls = MSExecutionContext_mxodbc # flag used by _MSNumeric_mxodbc _need_decimal_fix = True colspecs = { sqltypes.Numeric: _MSNumeric_mxodbc, sqltypes.DateTime: _MSDateTime, sqltypes.Date: _MSDate_mxodbc, sqltypes.Time: _MSTime_mxodbc, } def __init__(self, description_encoding=None, **params): super(MSDialect_mxodbc, self).__init__(**params) self.description_encoding = description_encoding dialect = MSDialect_mxodbc SQLAlchemy-0.8.4/lib/sqlalchemy/dialects/mssql/pymssql.py0000644000076500000240000000600212251147171024151 0ustar classicstaff00000000000000# mssql/pymssql.py # Copyright (C) 2005-2013 the SQLAlchemy authors and contributors # # This module is part of SQLAlchemy and is released under # the MIT License: http://www.opensource.org/licenses/mit-license.php """ .. dialect:: mssql+pymssql :name: pymssql :dbapi: pymssql :connectstring: mssql+pymssql://:@?charset=utf8 :url: http://pymssql.sourceforge.net/ Limitations ----------- pymssql inherits a lot of limitations from FreeTDS, including: * no support for multibyte schema identifiers * poor support for large decimals * poor support for binary fields * poor support for VARCHAR/CHAR fields over 255 characters Please consult the pymssql documentation for further information. """ from .base import MSDialect from ... import types as sqltypes, util, processors import re class _MSNumeric_pymssql(sqltypes.Numeric): def result_processor(self, dialect, type_): if not self.asdecimal: return processors.to_float else: return sqltypes.Numeric.result_processor(self, dialect, type_) class MSDialect_pymssql(MSDialect): supports_sane_rowcount = False driver = 'pymssql' colspecs = util.update_copy( MSDialect.colspecs, { sqltypes.Numeric: _MSNumeric_pymssql, sqltypes.Float: sqltypes.Float, } ) @classmethod def dbapi(cls): module = __import__('pymssql') # pymmsql doesn't have a Binary method. we use string # TODO: monkeypatching here is less than ideal module.Binary = lambda x: x if hasattr(x, 'decode') else str(x) client_ver = tuple(int(x) for x in module.__version__.split(".")) if client_ver < (1, ): util.warn("The pymssql dialect expects at least " "the 1.0 series of the pymssql DBAPI.") return module def __init__(self, **params): super(MSDialect_pymssql, self).__init__(**params) self.use_scope_identity = True def _get_server_version_info(self, connection): vers = connection.scalar("select @@version") m = re.match( r"Microsoft SQL Server.*? - (\d+).(\d+).(\d+).(\d+)", vers) if m: return tuple(int(x) for x in m.group(1, 2, 3, 4)) else: return None def create_connect_args(self, url): opts = url.translate_connect_args(username='user') opts.update(url.query) port = opts.pop('port', None) if port and 'host' in opts: opts['host'] = "%s:%s" % (opts['host'], port) return [[], opts] def is_disconnect(self, e, connection, cursor): for msg in ( "Adaptive Server connection timed out", "message 20003", # connection timeout "Error 10054", "Not connected to any MS SQL server", "Connection is closed" ): if msg in str(e): return True else: return False dialect = MSDialect_pymssql SQLAlchemy-0.8.4/lib/sqlalchemy/dialects/mssql/pyodbc.py0000644000076500000240000002203412251150015023713 0ustar classicstaff00000000000000# mssql/pyodbc.py # Copyright (C) 2005-2013 the SQLAlchemy authors and contributors # # This module is part of SQLAlchemy and is released under # the MIT License: http://www.opensource.org/licenses/mit-license.php """ .. dialect:: mssql+pyodbc :name: PyODBC :dbapi: pyodbc :connectstring: mssql+pyodbc://:@ :url: http://pypi.python.org/pypi/pyodbc/ Additional Connection Examples ------------------------------- Examples of pyodbc connection string URLs: * ``mssql+pyodbc://mydsn`` - connects using the specified DSN named ``mydsn``. The connection string that is created will appear like:: dsn=mydsn;Trusted_Connection=Yes * ``mssql+pyodbc://user:pass@mydsn`` - connects using the DSN named ``mydsn`` passing in the ``UID`` and ``PWD`` information. The connection string that is created will appear like:: dsn=mydsn;UID=user;PWD=pass * ``mssql+pyodbc://user:pass@mydsn/?LANGUAGE=us_english`` - connects using the DSN named ``mydsn`` passing in the ``UID`` and ``PWD`` information, plus the additional connection configuration option ``LANGUAGE``. The connection string that is created will appear like:: dsn=mydsn;UID=user;PWD=pass;LANGUAGE=us_english * ``mssql+pyodbc://user:pass@host/db`` - connects using a connection that would appear like:: DRIVER={SQL Server};Server=host;Database=db;UID=user;PWD=pass * ``mssql+pyodbc://user:pass@host:123/db`` - connects using a connection string which includes the port information using the comma syntax. This will create the following connection string:: DRIVER={SQL Server};Server=host,123;Database=db;UID=user;PWD=pass * ``mssql+pyodbc://user:pass@host/db?port=123`` - connects using a connection string that includes the port information as a separate ``port`` keyword. This will create the following connection string:: DRIVER={SQL Server};Server=host;Database=db;UID=user;PWD=pass;port=123 * ``mssql+pyodbc://user:pass@host/db?driver=MyDriver`` - connects using a connection string that includes a custom ODBC driver name. This will create the following connection string:: DRIVER={MyDriver};Server=host;Database=db;UID=user;PWD=pass If you require a connection string that is outside the options presented above, use the ``odbc_connect`` keyword to pass in a urlencoded connection string. What gets passed in will be urldecoded and passed directly. For example:: mssql+pyodbc:///?odbc_connect=dsn%3Dmydsn%3BDatabase%3Ddb would create the following connection string:: dsn=mydsn;Database=db Encoding your connection string can be easily accomplished through the python shell. For example:: >>> import urllib >>> urllib.quote_plus('dsn=mydsn;Database=db') 'dsn%3Dmydsn%3BDatabase%3Ddb' Unicode Binds ------------- The current state of PyODBC on a unix backend with FreeTDS and/or EasySoft is poor regarding unicode; different OS platforms and versions of UnixODBC versus IODBC versus FreeTDS/EasySoft versus PyODBC itself dramatically alter how strings are received. The PyODBC dialect attempts to use all the information it knows to determine whether or not a Python unicode literal can be passed directly to the PyODBC driver or not; while SQLAlchemy can encode these to bytestrings first, some users have reported that PyODBC mis-handles bytestrings for certain encodings and requires a Python unicode object, while the author has observed widespread cases where a Python unicode is completely misinterpreted by PyODBC, particularly when dealing with the information schema tables used in table reflection, and the value must first be encoded to a bytestring. It is for this reason that whether or not unicode literals for bound parameters be sent to PyODBC can be controlled using the ``supports_unicode_binds`` parameter to ``create_engine()``. When left at its default of ``None``, the PyODBC dialect will use its best guess as to whether or not the driver deals with unicode literals well. When ``False``, unicode literals will be encoded first, and when ``True`` unicode literals will be passed straight through. This is an interim flag that hopefully should not be needed when the unicode situation stabilizes for unix + PyODBC. .. versionadded:: 0.7.7 ``supports_unicode_binds`` parameter to ``create_engine()``\ . """ from .base import MSExecutionContext, MSDialect from ...connectors.pyodbc import PyODBCConnector from ... import types as sqltypes, util import decimal class _MSNumeric_pyodbc(sqltypes.Numeric): """Turns Decimals with adjusted() < 0 or > 7 into strings. The routines here are needed for older pyodbc versions as well as current mxODBC versions. """ def bind_processor(self, dialect): super_process = super(_MSNumeric_pyodbc, self).\ bind_processor(dialect) if not dialect._need_decimal_fix: return super_process def process(value): if self.asdecimal and \ isinstance(value, decimal.Decimal): adjusted = value.adjusted() if adjusted < 0: return self._small_dec_to_string(value) elif adjusted > 7: return self._large_dec_to_string(value) if super_process: return super_process(value) else: return value return process # these routines needed for older versions of pyodbc. # as of 2.1.8 this logic is integrated. def _small_dec_to_string(self, value): return "%s0.%s%s" % ( (value < 0 and '-' or ''), '0' * (abs(value.adjusted()) - 1), "".join([str(nint) for nint in value.as_tuple()[1]])) def _large_dec_to_string(self, value): _int = value.as_tuple()[1] if 'E' in str(value): result = "%s%s%s" % ( (value < 0 and '-' or ''), "".join([str(s) for s in _int]), "0" * (value.adjusted() - (len(_int) - 1))) else: if (len(_int) - 1) > value.adjusted(): result = "%s%s.%s" % ( (value < 0 and '-' or ''), "".join( [str(s) for s in _int][0:value.adjusted() + 1]), "".join( [str(s) for s in _int][value.adjusted() + 1:])) else: result = "%s%s" % ( (value < 0 and '-' or ''), "".join( [str(s) for s in _int][0:value.adjusted() + 1])) return result class MSExecutionContext_pyodbc(MSExecutionContext): _embedded_scope_identity = False def pre_exec(self): """where appropriate, issue "select scope_identity()" in the same statement. Background on why "scope_identity()" is preferable to "@@identity": http://msdn.microsoft.com/en-us/library/ms190315.aspx Background on why we attempt to embed "scope_identity()" into the same statement as the INSERT: http://code.google.com/p/pyodbc/wiki/FAQs#How_do_I_retrieve_autogenerated/identity_values? """ super(MSExecutionContext_pyodbc, self).pre_exec() # don't embed the scope_identity select into an # "INSERT .. DEFAULT VALUES" if self._select_lastrowid and \ self.dialect.use_scope_identity and \ len(self.parameters[0]): self._embedded_scope_identity = True self.statement += "; select scope_identity()" def post_exec(self): if self._embedded_scope_identity: # Fetch the last inserted id from the manipulated statement # We may have to skip over a number of result sets with # no data (due to triggers, etc.) while True: try: # fetchall() ensures the cursor is consumed # without closing it (FreeTDS particularly) row = self.cursor.fetchall()[0] break except self.dialect.dbapi.Error, e: # no way around this - nextset() consumes the previous set # so we need to just keep flipping self.cursor.nextset() self._lastrowid = int(row[0]) else: super(MSExecutionContext_pyodbc, self).post_exec() class MSDialect_pyodbc(PyODBCConnector, MSDialect): execution_ctx_cls = MSExecutionContext_pyodbc pyodbc_driver_name = 'SQL Server' colspecs = util.update_copy( MSDialect.colspecs, { sqltypes.Numeric: _MSNumeric_pyodbc } ) def __init__(self, description_encoding=None, **params): super(MSDialect_pyodbc, self).__init__(**params) self.description_encoding = description_encoding self.use_scope_identity = self.use_scope_identity and \ self.dbapi and \ hasattr(self.dbapi.Cursor, 'nextset') self._need_decimal_fix = self.dbapi and \ self._dbapi_version() < (2, 1, 8) dialect = MSDialect_pyodbc SQLAlchemy-0.8.4/lib/sqlalchemy/dialects/mssql/zxjdbc.py0000644000076500000240000000421012251147171023724 0ustar classicstaff00000000000000# mssql/zxjdbc.py # Copyright (C) 2005-2013 the SQLAlchemy authors and contributors # # This module is part of SQLAlchemy and is released under # the MIT License: http://www.opensource.org/licenses/mit-license.php """ .. dialect:: mssql+zxjdbc :name: zxJDBC for Jython :dbapi: zxjdbc :connectstring: mssql+zxjdbc://user:pass@host:port/dbname[?key=value&key=value...] :driverurl: http://jtds.sourceforge.net/ """ from ...connectors.zxJDBC import ZxJDBCConnector from .base import MSDialect, MSExecutionContext from ... import engine class MSExecutionContext_zxjdbc(MSExecutionContext): _embedded_scope_identity = False def pre_exec(self): super(MSExecutionContext_zxjdbc, self).pre_exec() # scope_identity after the fact returns null in jTDS so we must # embed it if self._select_lastrowid and self.dialect.use_scope_identity: self._embedded_scope_identity = True self.statement += "; SELECT scope_identity()" def post_exec(self): if self._embedded_scope_identity: while True: try: row = self.cursor.fetchall()[0] break except self.dialect.dbapi.Error: self.cursor.nextset() self._lastrowid = int(row[0]) if (self.isinsert or self.isupdate or self.isdelete) and \ self.compiled.returning: self._result_proxy = engine.FullyBufferedResultProxy(self) if self._enable_identity_insert: table = self.dialect.identifier_preparer.format_table( self.compiled.statement.table) self.cursor.execute("SET IDENTITY_INSERT %s OFF" % table) class MSDialect_zxjdbc(ZxJDBCConnector, MSDialect): jdbc_db_name = 'jtds:sqlserver' jdbc_driver_name = 'net.sourceforge.jtds.jdbc.Driver' execution_ctx_cls = MSExecutionContext_zxjdbc def _get_server_version_info(self, connection): return tuple( int(x) for x in connection.connection.dbversion.split('.') ) dialect = MSDialect_zxjdbc SQLAlchemy-0.8.4/lib/sqlalchemy/dialects/mysql/0000755000076500000240000000000012251151573022100 5ustar classicstaff00000000000000SQLAlchemy-0.8.4/lib/sqlalchemy/dialects/mysql/__init__.py0000644000076500000240000000226112251147171024211 0ustar classicstaff00000000000000# mysql/__init__.py # Copyright (C) 2005-2013 the SQLAlchemy authors and contributors # # This module is part of SQLAlchemy and is released under # the MIT License: http://www.opensource.org/licenses/mit-license.php from . import base, mysqldb, oursql, \ pyodbc, zxjdbc, mysqlconnector, pymysql,\ gaerdbms, cymysql # default dialect base.dialect = mysqldb.dialect from .base import \ BIGINT, BINARY, BIT, BLOB, BOOLEAN, CHAR, DATE, DATETIME, \ DECIMAL, DOUBLE, ENUM, DECIMAL,\ FLOAT, INTEGER, INTEGER, LONGBLOB, LONGTEXT, MEDIUMBLOB, \ MEDIUMINT, MEDIUMTEXT, NCHAR, \ NVARCHAR, NUMERIC, SET, SMALLINT, REAL, TEXT, TIME, TIMESTAMP, \ TINYBLOB, TINYINT, TINYTEXT,\ VARBINARY, VARCHAR, YEAR, dialect __all__ = ( 'BIGINT', 'BINARY', 'BIT', 'BLOB', 'BOOLEAN', 'CHAR', 'DATE', 'DATETIME', 'DECIMAL', 'DOUBLE', 'ENUM', 'DECIMAL', 'FLOAT', 'INTEGER', 'INTEGER', 'LONGBLOB', 'LONGTEXT', 'MEDIUMBLOB', 'MEDIUMINT', 'MEDIUMTEXT', 'NCHAR', 'NVARCHAR', 'NUMERIC', 'SET', 'SMALLINT', 'REAL', 'TEXT', 'TIME', 'TIMESTAMP', 'TINYBLOB', 'TINYINT', 'TINYTEXT', 'VARBINARY', 'VARCHAR', 'YEAR', 'dialect' ) SQLAlchemy-0.8.4/lib/sqlalchemy/dialects/mysql/base.py0000644000076500000240000031375112251150015023364 0ustar classicstaff00000000000000# mysql/base.py # Copyright (C) 2005-2013 the SQLAlchemy authors and contributors # # This module is part of SQLAlchemy and is released under # the MIT License: http://www.opensource.org/licenses/mit-license.php """ .. dialect:: mysql :name: MySQL Supported Versions and Features ------------------------------- SQLAlchemy supports MySQL starting with version 4.1 through modern releases. However, no heroic measures are taken to work around major missing SQL features - if your server version does not support sub-selects, for example, they won't work in SQLAlchemy either. See the official MySQL documentation for detailed information about features supported in any given server release. .. _mysql_connection_timeouts: Connection Timeouts ------------------- MySQL features an automatic connection close behavior, for connections that have been idle for eight hours or more. To circumvent having this issue, use the ``pool_recycle`` option which controls the maximum age of any connection:: engine = create_engine('mysql+mysqldb://...', pool_recycle=3600) .. _mysql_storage_engines: Storage Engines --------------- Most MySQL server installations have a default table type of ``MyISAM``, a non-transactional table type. During a transaction, non-transactional storage engines do not participate and continue to store table changes in autocommit mode. For fully atomic transactions as well as support for foreign key constraints, all participating tables must use a transactional engine such as ``InnoDB``, ``Falcon``, ``SolidDB``, `PBXT`, etc. Storage engines can be elected when creating tables in SQLAlchemy by supplying a ``mysql_engine='whatever'`` to the ``Table`` constructor. Any MySQL table creation option can be specified in this syntax:: Table('mytable', metadata, Column('data', String(32)), mysql_engine='InnoDB', mysql_charset='utf8' ) .. seealso:: `The InnoDB Storage Engine `_ - on the MySQL website. Case Sensitivity and Table Reflection ------------------------------------- MySQL has inconsistent support for case-sensitive identifier names, basing support on specific details of the underlying operating system. However, it has been observed that no matter what case sensitivity behavior is present, the names of tables in foreign key declarations are *always* received from the database as all-lower case, making it impossible to accurately reflect a schema where inter-related tables use mixed-case identifier names. Therefore it is strongly advised that table names be declared as all lower case both within SQLAlchemy as well as on the MySQL database itself, especially if database reflection features are to be used. Transaction Isolation Level --------------------------- :func:`.create_engine` accepts an ``isolation_level`` parameter which results in the command ``SET SESSION TRANSACTION ISOLATION LEVEL `` being invoked for every new connection. Valid values for this parameter are ``READ COMMITTED``, ``READ UNCOMMITTED``, ``REPEATABLE READ``, and ``SERIALIZABLE``:: engine = create_engine( "mysql://scott:tiger@localhost/test", isolation_level="READ UNCOMMITTED" ) .. versionadded:: 0.7.6 Keys ---- Not all MySQL storage engines support foreign keys. For ``MyISAM`` and similar engines, the information loaded by table reflection will not include foreign keys. For these tables, you may supply a :class:`~sqlalchemy.ForeignKeyConstraint` at reflection time:: Table('mytable', metadata, ForeignKeyConstraint(['other_id'], ['othertable.other_id']), autoload=True ) When creating tables, SQLAlchemy will automatically set ``AUTO_INCREMENT`` on an integer primary key column:: >>> t = Table('mytable', metadata, ... Column('mytable_id', Integer, primary_key=True) ... ) >>> t.create() CREATE TABLE mytable ( id INTEGER NOT NULL AUTO_INCREMENT, PRIMARY KEY (id) ) You can disable this behavior by supplying ``autoincrement=False`` to the :class:`~sqlalchemy.Column`. This flag can also be used to enable auto-increment on a secondary column in a multi-column key for some storage engines:: Table('mytable', metadata, Column('gid', Integer, primary_key=True, autoincrement=False), Column('id', Integer, primary_key=True) ) Ansi Quoting Style ------------------ MySQL features two varieties of identifier "quoting style", one using backticks and the other using quotes, e.g. ```some_identifier``` vs. ``"some_identifier"``. All MySQL dialects detect which version is in use by checking the value of ``sql_mode`` when a connection is first established with a particular :class:`.Engine`. This quoting style comes into play when rendering table and column names as well as when reflecting existing database structures. The detection is entirely automatic and no special configuration is needed to use either quoting style. .. versionchanged:: 0.6 detection of ANSI quoting style is entirely automatic, there's no longer any end-user ``create_engine()`` options in this regard. MySQL SQL Extensions -------------------- Many of the MySQL SQL extensions are handled through SQLAlchemy's generic function and operator support:: table.select(table.c.password==func.md5('plaintext')) table.select(table.c.username.op('regexp')('^[a-d]')) And of course any valid MySQL statement can be executed as a string as well. Some limited direct support for MySQL extensions to SQL is currently available. * SELECT pragma:: select(..., prefixes=['HIGH_PRIORITY', 'SQL_SMALL_RESULT']) * UPDATE with LIMIT:: update(..., mysql_limit=10) rowcount Support ---------------- SQLAlchemy standardizes the DBAPI ``cursor.rowcount`` attribute to be the usual definition of "number of rows matched by an UPDATE or DELETE" statement. This is in contradiction to the default setting on most MySQL DBAPI drivers, which is "number of rows actually modified/deleted". For this reason, the SQLAlchemy MySQL dialects always set the ``constants.CLIENT.FOUND_ROWS`` flag, or whatever is equivalent for the DBAPI in use, on connect, unless the flag value is overridden using DBAPI-specific options (such as ``client_flag`` for the MySQL-Python driver, ``found_rows`` for the OurSQL driver). See also: :attr:`.ResultProxy.rowcount` CAST Support ------------ MySQL documents the CAST operator as available in version 4.0.2. When using the SQLAlchemy :func:`.cast` function, SQLAlchemy will not render the CAST token on MySQL before this version, based on server version detection, instead rendering the internal expression directly. CAST may still not be desirable on an early MySQL version post-4.0.2, as it didn't add all datatype support until 4.1.1. If your application falls into this narrow area, the behavior of CAST can be controlled using the :ref:`sqlalchemy.ext.compiler_toplevel` system, as per the recipe below:: from sqlalchemy.sql.expression import Cast from sqlalchemy.ext.compiler import compiles @compiles(Cast, 'mysql') def _check_mysql_version(element, compiler, **kw): if compiler.dialect.server_version_info < (4, 1, 0): return compiler.process(element.clause, **kw) else: return compiler.visit_cast(element, **kw) The above function, which only needs to be declared once within an application, overrides the compilation of the :func:`.cast` construct to check for version 4.1.0 before fully rendering CAST; else the internal element of the construct is rendered directly. .. _mysql_indexes: MySQL Specific Index Options ---------------------------- MySQL-specific extensions to the :class:`.Index` construct are available. Index Length ~~~~~~~~~~~~~ MySQL provides an option to create index entries with a certain length, where "length" refers to the number of characters or bytes in each value which will become part of the index. SQLAlchemy provides this feature via the ``mysql_length`` parameter:: Index('my_index', my_table.c.data, mysql_length=10) Index('a_b_idx', my_table.c.a, my_table.c.b, mysql_length={'a': 4, 'b': 9}) Prefix lengths are given in characters for nonbinary string types and in bytes for binary string types. The value passed to the keyword argument *must* be either an integer (and, thus, specify the same prefix length value for all columns of the index) or a dict in which keys are column names and values are prefix length values for corresponding columns. MySQL only allows a length for a column of an index if it is for a CHAR, VARCHAR, TEXT, BINARY, VARBINARY and BLOB. .. versionadded:: 0.8.2 ``mysql_length`` may now be specified as a dictionary for use with composite indexes. Index Types ~~~~~~~~~~~~~ Some MySQL storage engines permit you to specify an index type when creating an index or primary key constraint. SQLAlchemy provides this feature via the ``mysql_using`` parameter on :class:`.Index`:: Index('my_index', my_table.c.data, mysql_using='hash') As well as the ``mysql_using`` parameter on :class:`.PrimaryKeyConstraint`:: PrimaryKeyConstraint("data", mysql_using='hash') The value passed to the keyword argument will be simply passed through to the underlying CREATE INDEX or PRIMARY KEY clause, so it *must* be a valid index type for your MySQL storage engine. More information can be found at: http://dev.mysql.com/doc/refman/5.0/en/create-index.html http://dev.mysql.com/doc/refman/5.0/en/create-table.html .. _mysql_foreign_keys: MySQL Foreign Key Options ------------------------- MySQL does not support the foreign key arguments "DEFERRABLE", "INITIALLY", or "MATCH". Using the ``deferrable`` or ``initially`` keyword argument with :class:`.ForeignKeyConstraint` or :class:`.ForeignKey` will have the effect of these keywords being ignored in a DDL expression along with a warning, however this behavior **will change** in a future release. In order to use these keywords on a foreign key while having them ignored on a MySQL backend, use a custom compile rule:: from sqlalchemy.ext.compiler import compiles from sqlalchemy.schema import ForeignKeyConstraint @compiles(ForeignKeyConstraint, "mysql") def process(element, compiler, **kw): element.deferrable = element.initially = None return compiler.visit_foreign_key_constraint(element, **kw) .. versionchanged:: 0.8.3 - the MySQL backend will emit a warning when the the ``deferrable`` or ``initially`` keyword arguments of :class:`.ForeignKeyConstraint` and :class:`.ForeignKey` are used. The arguments will no longer be ignored in 0.9. The "MATCH" keyword is in fact more insidious, and in a future release will be explicitly disallowed by SQLAlchemy in conjunction with the MySQL backend. This argument is silently ignored by MySQL, but in addition has the effect of ON UPDATE and ON DELETE options also being ignored by the backend. Therefore MATCH should never be used with the MySQL backend; as is the case with DEFERRABLE and INITIALLY, custom compilation rules can be used to correct a MySQL ForeignKeyConstraint at DDL definition time. .. versionadded:: 0.8.3 - the MySQL backend will emit a warning when the ``match`` keyword is used with :class:`.ForeignKeyConstraint` or :class:`.ForeignKey`. This will be a :class:`.CompileError` in 0.9. """ import datetime import inspect import re import sys from ... import schema as sa_schema from ... import exc, log, sql, util from ...sql import compiler from array import array as _array from ...engine import reflection from ...engine import default from ... import types as sqltypes from ...util import topological from ...types import DATE, DATETIME, BOOLEAN, TIME, \ BLOB, BINARY, VARBINARY RESERVED_WORDS = set( ['accessible', 'add', 'all', 'alter', 'analyze', 'and', 'as', 'asc', 'asensitive', 'before', 'between', 'bigint', 'binary', 'blob', 'both', 'by', 'call', 'cascade', 'case', 'change', 'char', 'character', 'check', 'collate', 'column', 'condition', 'constraint', 'continue', 'convert', 'create', 'cross', 'current_date', 'current_time', 'current_timestamp', 'current_user', 'cursor', 'database', 'databases', 'day_hour', 'day_microsecond', 'day_minute', 'day_second', 'dec', 'decimal', 'declare', 'default', 'delayed', 'delete', 'desc', 'describe', 'deterministic', 'distinct', 'distinctrow', 'div', 'double', 'drop', 'dual', 'each', 'else', 'elseif', 'enclosed', 'escaped', 'exists', 'exit', 'explain', 'false', 'fetch', 'float', 'float4', 'float8', 'for', 'force', 'foreign', 'from', 'fulltext', 'grant', 'group', 'having', 'high_priority', 'hour_microsecond', 'hour_minute', 'hour_second', 'if', 'ignore', 'in', 'index', 'infile', 'inner', 'inout', 'insensitive', 'insert', 'int', 'int1', 'int2', 'int3', 'int4', 'int8', 'integer', 'interval', 'into', 'is', 'iterate', 'join', 'key', 'keys', 'kill', 'leading', 'leave', 'left', 'like', 'limit', 'linear', 'lines', 'load', 'localtime', 'localtimestamp', 'lock', 'long', 'longblob', 'longtext', 'loop', 'low_priority', 'master_ssl_verify_server_cert', 'match', 'mediumblob', 'mediumint', 'mediumtext', 'middleint', 'minute_microsecond', 'minute_second', 'mod', 'modifies', 'natural', 'not', 'no_write_to_binlog', 'null', 'numeric', 'on', 'optimize', 'option', 'optionally', 'or', 'order', 'out', 'outer', 'outfile', 'precision', 'primary', 'procedure', 'purge', 'range', 'read', 'reads', 'read_only', 'read_write', 'real', 'references', 'regexp', 'release', 'rename', 'repeat', 'replace', 'require', 'restrict', 'return', 'revoke', 'right', 'rlike', 'schema', 'schemas', 'second_microsecond', 'select', 'sensitive', 'separator', 'set', 'show', 'smallint', 'spatial', 'specific', 'sql', 'sqlexception', 'sqlstate', 'sqlwarning', 'sql_big_result', 'sql_calc_found_rows', 'sql_small_result', 'ssl', 'starting', 'straight_join', 'table', 'terminated', 'then', 'tinyblob', 'tinyint', 'tinytext', 'to', 'trailing', 'trigger', 'true', 'undo', 'union', 'unique', 'unlock', 'unsigned', 'update', 'usage', 'use', 'using', 'utc_date', 'utc_time', 'utc_timestamp', 'values', 'varbinary', 'varchar', 'varcharacter', 'varying', 'when', 'where', 'while', 'with', 'write', 'x509', 'xor', 'year_month', 'zerofill', # 5.0 'columns', 'fields', 'privileges', 'soname', 'tables', # 4.1 'accessible', 'linear', 'master_ssl_verify_server_cert', 'range', 'read_only', 'read_write', # 5.1 'general', 'ignore_server_ids', 'master_heartbeat_period', 'maxvalue', 'resignal', 'signal', 'slow', # 5.5 'get', 'io_after_gtids', 'io_before_gtids', 'master_bind', 'one_shot', 'partition', 'sql_after_gtids', 'sql_before_gtids', # 5.6 ]) AUTOCOMMIT_RE = re.compile( r'\s*(?:UPDATE|INSERT|CREATE|DELETE|DROP|ALTER|LOAD +DATA|REPLACE)', re.I | re.UNICODE) SET_RE = re.compile( r'\s*SET\s+(?:(?:GLOBAL|SESSION)\s+)?\w', re.I | re.UNICODE) class _NumericType(object): """Base for MySQL numeric types.""" def __init__(self, unsigned=False, zerofill=False, **kw): self.unsigned = unsigned self.zerofill = zerofill super(_NumericType, self).__init__(**kw) class _FloatType(_NumericType, sqltypes.Float): def __init__(self, precision=None, scale=None, asdecimal=True, **kw): if isinstance(self, (REAL, DOUBLE)) and \ ( (precision is None and scale is not None) or (precision is not None and scale is None) ): raise exc.ArgumentError( "You must specify both precision and scale or omit " "both altogether.") super(_FloatType, self).__init__(precision=precision, asdecimal=asdecimal, **kw) self.scale = scale class _IntegerType(_NumericType, sqltypes.Integer): def __init__(self, display_width=None, **kw): self.display_width = display_width super(_IntegerType, self).__init__(**kw) class _StringType(sqltypes.String): """Base for MySQL string types.""" def __init__(self, charset=None, collation=None, ascii=False, binary=False, national=False, **kw): self.charset = charset # allow collate= or collation= kw.setdefault('collation', kw.pop('collate', collation)) self.ascii = ascii # We have to munge the 'unicode' param strictly as a dict # otherwise 2to3 will turn it into str. self.__dict__['unicode'] = kw.get('unicode', False) # sqltypes.String does not accept the 'unicode' arg at all. if 'unicode' in kw: del kw['unicode'] self.binary = binary self.national = national super(_StringType, self).__init__(**kw) class NUMERIC(_NumericType, sqltypes.NUMERIC): """MySQL NUMERIC type.""" __visit_name__ = 'NUMERIC' def __init__(self, precision=None, scale=None, asdecimal=True, **kw): """Construct a NUMERIC. :param precision: Total digits in this number. If scale and precision are both None, values are stored to limits allowed by the server. :param scale: The number of digits after the decimal point. :param unsigned: a boolean, optional. :param zerofill: Optional. If true, values will be stored as strings left-padded with zeros. Note that this does not effect the values returned by the underlying database API, which continue to be numeric. """ super(NUMERIC, self).__init__(precision=precision, scale=scale, asdecimal=asdecimal, **kw) class DECIMAL(_NumericType, sqltypes.DECIMAL): """MySQL DECIMAL type.""" __visit_name__ = 'DECIMAL' def __init__(self, precision=None, scale=None, asdecimal=True, **kw): """Construct a DECIMAL. :param precision: Total digits in this number. If scale and precision are both None, values are stored to limits allowed by the server. :param scale: The number of digits after the decimal point. :param unsigned: a boolean, optional. :param zerofill: Optional. If true, values will be stored as strings left-padded with zeros. Note that this does not effect the values returned by the underlying database API, which continue to be numeric. """ super(DECIMAL, self).__init__(precision=precision, scale=scale, asdecimal=asdecimal, **kw) class DOUBLE(_FloatType): """MySQL DOUBLE type.""" __visit_name__ = 'DOUBLE' def __init__(self, precision=None, scale=None, asdecimal=True, **kw): """Construct a DOUBLE. :param precision: Total digits in this number. If scale and precision are both None, values are stored to limits allowed by the server. :param scale: The number of digits after the decimal point. :param unsigned: a boolean, optional. :param zerofill: Optional. If true, values will be stored as strings left-padded with zeros. Note that this does not effect the values returned by the underlying database API, which continue to be numeric. """ super(DOUBLE, self).__init__(precision=precision, scale=scale, asdecimal=asdecimal, **kw) class REAL(_FloatType, sqltypes.REAL): """MySQL REAL type.""" __visit_name__ = 'REAL' def __init__(self, precision=None, scale=None, asdecimal=True, **kw): """Construct a REAL. :param precision: Total digits in this number. If scale and precision are both None, values are stored to limits allowed by the server. :param scale: The number of digits after the decimal point. :param unsigned: a boolean, optional. :param zerofill: Optional. If true, values will be stored as strings left-padded with zeros. Note that this does not effect the values returned by the underlying database API, which continue to be numeric. """ super(REAL, self).__init__(precision=precision, scale=scale, asdecimal=asdecimal, **kw) class FLOAT(_FloatType, sqltypes.FLOAT): """MySQL FLOAT type.""" __visit_name__ = 'FLOAT' def __init__(self, precision=None, scale=None, asdecimal=False, **kw): """Construct a FLOAT. :param precision: Total digits in this number. If scale and precision are both None, values are stored to limits allowed by the server. :param scale: The number of digits after the decimal point. :param unsigned: a boolean, optional. :param zerofill: Optional. If true, values will be stored as strings left-padded with zeros. Note that this does not effect the values returned by the underlying database API, which continue to be numeric. """ super(FLOAT, self).__init__(precision=precision, scale=scale, asdecimal=asdecimal, **kw) def bind_processor(self, dialect): return None class INTEGER(_IntegerType, sqltypes.INTEGER): """MySQL INTEGER type.""" __visit_name__ = 'INTEGER' def __init__(self, display_width=None, **kw): """Construct an INTEGER. :param display_width: Optional, maximum display width for this number. :param unsigned: a boolean, optional. :param zerofill: Optional. If true, values will be stored as strings left-padded with zeros. Note that this does not effect the values returned by the underlying database API, which continue to be numeric. """ super(INTEGER, self).__init__(display_width=display_width, **kw) class BIGINT(_IntegerType, sqltypes.BIGINT): """MySQL BIGINTEGER type.""" __visit_name__ = 'BIGINT' def __init__(self, display_width=None, **kw): """Construct a BIGINTEGER. :param display_width: Optional, maximum display width for this number. :param unsigned: a boolean, optional. :param zerofill: Optional. If true, values will be stored as strings left-padded with zeros. Note that this does not effect the values returned by the underlying database API, which continue to be numeric. """ super(BIGINT, self).__init__(display_width=display_width, **kw) class MEDIUMINT(_IntegerType): """MySQL MEDIUMINTEGER type.""" __visit_name__ = 'MEDIUMINT' def __init__(self, display_width=None, **kw): """Construct a MEDIUMINTEGER :param display_width: Optional, maximum display width for this number. :param unsigned: a boolean, optional. :param zerofill: Optional. If true, values will be stored as strings left-padded with zeros. Note that this does not effect the values returned by the underlying database API, which continue to be numeric. """ super(MEDIUMINT, self).__init__(display_width=display_width, **kw) class TINYINT(_IntegerType): """MySQL TINYINT type.""" __visit_name__ = 'TINYINT' def __init__(self, display_width=None, **kw): """Construct a TINYINT. :param display_width: Optional, maximum display width for this number. :param unsigned: a boolean, optional. :param zerofill: Optional. If true, values will be stored as strings left-padded with zeros. Note that this does not effect the values returned by the underlying database API, which continue to be numeric. """ super(TINYINT, self).__init__(display_width=display_width, **kw) class SMALLINT(_IntegerType, sqltypes.SMALLINT): """MySQL SMALLINTEGER type.""" __visit_name__ = 'SMALLINT' def __init__(self, display_width=None, **kw): """Construct a SMALLINTEGER. :param display_width: Optional, maximum display width for this number. :param unsigned: a boolean, optional. :param zerofill: Optional. If true, values will be stored as strings left-padded with zeros. Note that this does not effect the values returned by the underlying database API, which continue to be numeric. """ super(SMALLINT, self).__init__(display_width=display_width, **kw) class BIT(sqltypes.TypeEngine): """MySQL BIT type. This type is for MySQL 5.0.3 or greater for MyISAM, and 5.0.5 or greater for MyISAM, MEMORY, InnoDB and BDB. For older versions, use a MSTinyInteger() type. """ __visit_name__ = 'BIT' def __init__(self, length=None): """Construct a BIT. :param length: Optional, number of bits. """ self.length = length def result_processor(self, dialect, coltype): """Convert a MySQL's 64 bit, variable length binary string to a long. TODO: this is MySQL-db, pyodbc specific. OurSQL and mysqlconnector already do this, so this logic should be moved to those dialects. """ def process(value): if value is not None: v = 0L for i in map(ord, value): v = v << 8 | i return v return value return process class TIME(sqltypes.TIME): """MySQL TIME type. Recent versions of MySQL add support for fractional seconds precision. While the :class:`.mysql.TIME` type now supports this, note that many DBAPI drivers may not yet include support. """ __visit_name__ = 'TIME' def __init__(self, timezone=False, fsp=None): """Construct a MySQL TIME type. :param timezone: not used by the MySQL dialect. :param fsp: fractional seconds precision value. MySQL 5.6 supports storage of fractional seconds; this parameter will be used when emitting DDL for the TIME type. Note that many DBAPI drivers may not yet have support for fractional seconds, however. .. versionadded:: 0.8 The MySQL-specific TIME type as well as fractional seconds support. """ super(TIME, self).__init__(timezone=timezone) self.fsp = fsp def result_processor(self, dialect, coltype): time = datetime.time def process(value): # convert from a timedelta value if value is not None: microseconds = value.microseconds seconds = value.seconds minutes = seconds // 60 return time(minutes // 60, minutes % 60, seconds - minutes * 60, microsecond=microseconds) else: return None return process class TIMESTAMP(sqltypes.TIMESTAMP): """MySQL TIMESTAMP type.""" __visit_name__ = 'TIMESTAMP' class YEAR(sqltypes.TypeEngine): """MySQL YEAR type, for single byte storage of years 1901-2155.""" __visit_name__ = 'YEAR' def __init__(self, display_width=None): self.display_width = display_width class TEXT(_StringType, sqltypes.TEXT): """MySQL TEXT type, for text up to 2^16 characters.""" __visit_name__ = 'TEXT' def __init__(self, length=None, **kw): """Construct a TEXT. :param length: Optional, if provided the server may optimize storage by substituting the smallest TEXT type sufficient to store ``length`` characters. :param charset: Optional, a column-level character set for this string value. Takes precedence to 'ascii' or 'unicode' short-hand. :param collation: Optional, a column-level collation for this string value. Takes precedence to 'binary' short-hand. :param ascii: Defaults to False: short-hand for the ``latin1`` character set, generates ASCII in schema. :param unicode: Defaults to False: short-hand for the ``ucs2`` character set, generates UNICODE in schema. :param national: Optional. If true, use the server's configured national character set. :param binary: Defaults to False: short-hand, pick the binary collation type that matches the column's character set. Generates BINARY in schema. This does not affect the type of data stored, only the collation of character data. """ super(TEXT, self).__init__(length=length, **kw) class TINYTEXT(_StringType): """MySQL TINYTEXT type, for text up to 2^8 characters.""" __visit_name__ = 'TINYTEXT' def __init__(self, **kwargs): """Construct a TINYTEXT. :param charset: Optional, a column-level character set for this string value. Takes precedence to 'ascii' or 'unicode' short-hand. :param collation: Optional, a column-level collation for this string value. Takes precedence to 'binary' short-hand. :param ascii: Defaults to False: short-hand for the ``latin1`` character set, generates ASCII in schema. :param unicode: Defaults to False: short-hand for the ``ucs2`` character set, generates UNICODE in schema. :param national: Optional. If true, use the server's configured national character set. :param binary: Defaults to False: short-hand, pick the binary collation type that matches the column's character set. Generates BINARY in schema. This does not affect the type of data stored, only the collation of character data. """ super(TINYTEXT, self).__init__(**kwargs) class MEDIUMTEXT(_StringType): """MySQL MEDIUMTEXT type, for text up to 2^24 characters.""" __visit_name__ = 'MEDIUMTEXT' def __init__(self, **kwargs): """Construct a MEDIUMTEXT. :param charset: Optional, a column-level character set for this string value. Takes precedence to 'ascii' or 'unicode' short-hand. :param collation: Optional, a column-level collation for this string value. Takes precedence to 'binary' short-hand. :param ascii: Defaults to False: short-hand for the ``latin1`` character set, generates ASCII in schema. :param unicode: Defaults to False: short-hand for the ``ucs2`` character set, generates UNICODE in schema. :param national: Optional. If true, use the server's configured national character set. :param binary: Defaults to False: short-hand, pick the binary collation type that matches the column's character set. Generates BINARY in schema. This does not affect the type of data stored, only the collation of character data. """ super(MEDIUMTEXT, self).__init__(**kwargs) class LONGTEXT(_StringType): """MySQL LONGTEXT type, for text up to 2^32 characters.""" __visit_name__ = 'LONGTEXT' def __init__(self, **kwargs): """Construct a LONGTEXT. :param charset: Optional, a column-level character set for this string value. Takes precedence to 'ascii' or 'unicode' short-hand. :param collation: Optional, a column-level collation for this string value. Takes precedence to 'binary' short-hand. :param ascii: Defaults to False: short-hand for the ``latin1`` character set, generates ASCII in schema. :param unicode: Defaults to False: short-hand for the ``ucs2`` character set, generates UNICODE in schema. :param national: Optional. If true, use the server's configured national character set. :param binary: Defaults to False: short-hand, pick the binary collation type that matches the column's character set. Generates BINARY in schema. This does not affect the type of data stored, only the collation of character data. """ super(LONGTEXT, self).__init__(**kwargs) class VARCHAR(_StringType, sqltypes.VARCHAR): """MySQL VARCHAR type, for variable-length character data.""" __visit_name__ = 'VARCHAR' def __init__(self, length=None, **kwargs): """Construct a VARCHAR. :param charset: Optional, a column-level character set for this string value. Takes precedence to 'ascii' or 'unicode' short-hand. :param collation: Optional, a column-level collation for this string value. Takes precedence to 'binary' short-hand. :param ascii: Defaults to False: short-hand for the ``latin1`` character set, generates ASCII in schema. :param unicode: Defaults to False: short-hand for the ``ucs2`` character set, generates UNICODE in schema. :param national: Optional. If true, use the server's configured national character set. :param binary: Defaults to False: short-hand, pick the binary collation type that matches the column's character set. Generates BINARY in schema. This does not affect the type of data stored, only the collation of character data. """ super(VARCHAR, self).__init__(length=length, **kwargs) class CHAR(_StringType, sqltypes.CHAR): """MySQL CHAR type, for fixed-length character data.""" __visit_name__ = 'CHAR' def __init__(self, length=None, **kwargs): """Construct a CHAR. :param length: Maximum data length, in characters. :param binary: Optional, use the default binary collation for the national character set. This does not affect the type of data stored, use a BINARY type for binary data. :param collation: Optional, request a particular collation. Must be compatible with the national character set. """ super(CHAR, self).__init__(length=length, **kwargs) class NVARCHAR(_StringType, sqltypes.NVARCHAR): """MySQL NVARCHAR type. For variable-length character data in the server's configured national character set. """ __visit_name__ = 'NVARCHAR' def __init__(self, length=None, **kwargs): """Construct an NVARCHAR. :param length: Maximum data length, in characters. :param binary: Optional, use the default binary collation for the national character set. This does not affect the type of data stored, use a BINARY type for binary data. :param collation: Optional, request a particular collation. Must be compatible with the national character set. """ kwargs['national'] = True super(NVARCHAR, self).__init__(length=length, **kwargs) class NCHAR(_StringType, sqltypes.NCHAR): """MySQL NCHAR type. For fixed-length character data in the server's configured national character set. """ __visit_name__ = 'NCHAR' def __init__(self, length=None, **kwargs): """Construct an NCHAR. :param length: Maximum data length, in characters. :param binary: Optional, use the default binary collation for the national character set. This does not affect the type of data stored, use a BINARY type for binary data. :param collation: Optional, request a particular collation. Must be compatible with the national character set. """ kwargs['national'] = True super(NCHAR, self).__init__(length=length, **kwargs) class TINYBLOB(sqltypes._Binary): """MySQL TINYBLOB type, for binary data up to 2^8 bytes.""" __visit_name__ = 'TINYBLOB' class MEDIUMBLOB(sqltypes._Binary): """MySQL MEDIUMBLOB type, for binary data up to 2^24 bytes.""" __visit_name__ = 'MEDIUMBLOB' class LONGBLOB(sqltypes._Binary): """MySQL LONGBLOB type, for binary data up to 2^32 bytes.""" __visit_name__ = 'LONGBLOB' class ENUM(sqltypes.Enum, _StringType): """MySQL ENUM type.""" __visit_name__ = 'ENUM' def __init__(self, *enums, **kw): """Construct an ENUM. Example: Column('myenum', MSEnum("foo", "bar", "baz")) :param enums: The range of valid values for this ENUM. Values will be quoted when generating the schema according to the quoting flag (see below). :param strict: Defaults to False: ensure that a given value is in this ENUM's range of permissible values when inserting or updating rows. Note that MySQL will not raise a fatal error if you attempt to store an out of range value- an alternate value will be stored instead. (See MySQL ENUM documentation.) :param charset: Optional, a column-level character set for this string value. Takes precedence to 'ascii' or 'unicode' short-hand. :param collation: Optional, a column-level collation for this string value. Takes precedence to 'binary' short-hand. :param ascii: Defaults to False: short-hand for the ``latin1`` character set, generates ASCII in schema. :param unicode: Defaults to False: short-hand for the ``ucs2`` character set, generates UNICODE in schema. :param binary: Defaults to False: short-hand, pick the binary collation type that matches the column's character set. Generates BINARY in schema. This does not affect the type of data stored, only the collation of character data. :param quoting: Defaults to 'auto': automatically determine enum value quoting. If all enum values are surrounded by the same quoting character, then use 'quoted' mode. Otherwise, use 'unquoted' mode. 'quoted': values in enums are already quoted, they will be used directly when generating the schema - this usage is deprecated. 'unquoted': values in enums are not quoted, they will be escaped and surrounded by single quotes when generating the schema. Previous versions of this type always required manually quoted values to be supplied; future versions will always quote the string literals for you. This is a transitional option. """ self.quoting = kw.pop('quoting', 'auto') if self.quoting == 'auto' and len(enums): # What quoting character are we using? q = None for e in enums: if len(e) == 0: self.quoting = 'unquoted' break elif q is None: q = e[0] if e[0] != q or e[-1] != q: self.quoting = 'unquoted' break else: self.quoting = 'quoted' if self.quoting == 'quoted': util.warn_deprecated( 'Manually quoting ENUM value literals is deprecated. Supply ' 'unquoted values and use the quoting= option in cases of ' 'ambiguity.') enums = self._strip_enums(enums) self.strict = kw.pop('strict', False) length = max([len(v) for v in enums] + [0]) kw.pop('metadata', None) kw.pop('schema', None) kw.pop('name', None) kw.pop('quote', None) kw.pop('native_enum', None) kw.pop('inherit_schema', None) _StringType.__init__(self, length=length, **kw) sqltypes.Enum.__init__(self, *enums) @classmethod def _strip_enums(cls, enums): strip_enums = [] for a in enums: if a[0:1] == '"' or a[0:1] == "'": # strip enclosing quotes and unquote interior a = a[1:-1].replace(a[0] * 2, a[0]) strip_enums.append(a) return strip_enums def bind_processor(self, dialect): super_convert = super(ENUM, self).bind_processor(dialect) def process(value): if self.strict and value is not None and value not in self.enums: raise exc.InvalidRequestError('"%s" not a valid value for ' 'this enum' % value) if super_convert: return super_convert(value) else: return value return process def adapt(self, impltype, **kw): kw['strict'] = self.strict return sqltypes.Enum.adapt(self, impltype, **kw) class SET(_StringType): """MySQL SET type.""" __visit_name__ = 'SET' def __init__(self, *values, **kw): """Construct a SET. Example:: Column('myset', MSSet("'foo'", "'bar'", "'baz'")) :param values: The range of valid values for this SET. Values will be used exactly as they appear when generating schemas. Strings must be quoted, as in the example above. Single-quotes are suggested for ANSI compatibility and are required for portability to servers with ANSI_QUOTES enabled. :param charset: Optional, a column-level character set for this string value. Takes precedence to 'ascii' or 'unicode' short-hand. :param collation: Optional, a column-level collation for this string value. Takes precedence to 'binary' short-hand. :param ascii: Defaults to False: short-hand for the ``latin1`` character set, generates ASCII in schema. :param unicode: Defaults to False: short-hand for the ``ucs2`` character set, generates UNICODE in schema. :param binary: Defaults to False: short-hand, pick the binary collation type that matches the column's character set. Generates BINARY in schema. This does not affect the type of data stored, only the collation of character data. """ self._ddl_values = values strip_values = [] for a in values: if a[0:1] == '"' or a[0:1] == "'": # strip enclosing quotes and unquote interior a = a[1:-1].replace(a[0] * 2, a[0]) strip_values.append(a) self.values = strip_values kw.setdefault('length', max([len(v) for v in strip_values] + [0])) super(SET, self).__init__(**kw) def result_processor(self, dialect, coltype): def process(value): # The good news: # No ',' quoting issues- commas aren't allowed in SET values # The bad news: # Plenty of driver inconsistencies here. if isinstance(value, util.set_types): # ..some versions convert '' to an empty set if not value: value.add('') # ..some return sets.Set, even for pythons # that have __builtin__.set if not isinstance(value, set): value = set(value) return value # ...and some versions return strings if value is not None: return set(value.split(',')) else: return value return process def bind_processor(self, dialect): super_convert = super(SET, self).bind_processor(dialect) def process(value): if value is None or isinstance(value, (int, long, basestring)): pass else: if None in value: value = set(value) value.remove(None) value.add('') value = ','.join(value) if super_convert: return super_convert(value) else: return value return process # old names MSTime = TIME MSSet = SET MSEnum = ENUM MSLongBlob = LONGBLOB MSMediumBlob = MEDIUMBLOB MSTinyBlob = TINYBLOB MSBlob = BLOB MSBinary = BINARY MSVarBinary = VARBINARY MSNChar = NCHAR MSNVarChar = NVARCHAR MSChar = CHAR MSString = VARCHAR MSLongText = LONGTEXT MSMediumText = MEDIUMTEXT MSTinyText = TINYTEXT MSText = TEXT MSYear = YEAR MSTimeStamp = TIMESTAMP MSBit = BIT MSSmallInteger = SMALLINT MSTinyInteger = TINYINT MSMediumInteger = MEDIUMINT MSBigInteger = BIGINT MSNumeric = NUMERIC MSDecimal = DECIMAL MSDouble = DOUBLE MSReal = REAL MSFloat = FLOAT MSInteger = INTEGER colspecs = { sqltypes.Numeric: NUMERIC, sqltypes.Float: FLOAT, sqltypes.Time: TIME, sqltypes.Enum: ENUM, } # Everything 3.23 through 5.1 excepting OpenGIS types. ischema_names = { 'bigint': BIGINT, 'binary': BINARY, 'bit': BIT, 'blob': BLOB, 'boolean': BOOLEAN, 'char': CHAR, 'date': DATE, 'datetime': DATETIME, 'decimal': DECIMAL, 'double': DOUBLE, 'enum': ENUM, 'fixed': DECIMAL, 'float': FLOAT, 'int': INTEGER, 'integer': INTEGER, 'longblob': LONGBLOB, 'longtext': LONGTEXT, 'mediumblob': MEDIUMBLOB, 'mediumint': MEDIUMINT, 'mediumtext': MEDIUMTEXT, 'nchar': NCHAR, 'nvarchar': NVARCHAR, 'numeric': NUMERIC, 'set': SET, 'smallint': SMALLINT, 'text': TEXT, 'time': TIME, 'timestamp': TIMESTAMP, 'tinyblob': TINYBLOB, 'tinyint': TINYINT, 'tinytext': TINYTEXT, 'varbinary': VARBINARY, 'varchar': VARCHAR, 'year': YEAR, } class MySQLExecutionContext(default.DefaultExecutionContext): def should_autocommit_text(self, statement): return AUTOCOMMIT_RE.match(statement) class MySQLCompiler(compiler.SQLCompiler): render_table_with_column_in_update_from = True """Overridden from base SQLCompiler value""" extract_map = compiler.SQLCompiler.extract_map.copy() extract_map.update({'milliseconds': 'millisecond'}) def visit_random_func(self, fn, **kw): return "rand%s" % self.function_argspec(fn) def visit_utc_timestamp_func(self, fn, **kw): return "UTC_TIMESTAMP" def visit_sysdate_func(self, fn, **kw): return "SYSDATE()" def visit_concat_op_binary(self, binary, operator, **kw): return "concat(%s, %s)" % (self.process(binary.left), self.process(binary.right)) def visit_match_op_binary(self, binary, operator, **kw): return "MATCH (%s) AGAINST (%s IN BOOLEAN MODE)" % \ (self.process(binary.left), self.process(binary.right)) def get_from_hint_text(self, table, text): return text def visit_typeclause(self, typeclause): type_ = typeclause.type.dialect_impl(self.dialect) if isinstance(type_, sqltypes.Integer): if getattr(type_, 'unsigned', False): return 'UNSIGNED INTEGER' else: return 'SIGNED INTEGER' elif isinstance(type_, sqltypes.TIMESTAMP): return 'DATETIME' elif isinstance(type_, (sqltypes.DECIMAL, sqltypes.DateTime, sqltypes.Date, sqltypes.Time)): return self.dialect.type_compiler.process(type_) elif isinstance(type_, sqltypes.Text): return 'CHAR' elif (isinstance(type_, sqltypes.String) and not isinstance(type_, (ENUM, SET))): if getattr(type_, 'length'): return 'CHAR(%s)' % type_.length else: return 'CHAR' elif isinstance(type_, sqltypes._Binary): return 'BINARY' elif isinstance(type_, sqltypes.NUMERIC): return self.dialect.type_compiler.process( type_).replace('NUMERIC', 'DECIMAL') else: return None def visit_cast(self, cast, **kwargs): # No cast until 4, no decimals until 5. if not self.dialect._supports_cast: return self.process(cast.clause.self_group()) type_ = self.process(cast.typeclause) if type_ is None: return self.process(cast.clause.self_group()) return 'CAST(%s AS %s)' % (self.process(cast.clause), type_) def render_literal_value(self, value, type_): value = super(MySQLCompiler, self).render_literal_value(value, type_) if self.dialect._backslash_escapes: value = value.replace('\\', '\\\\') return value def get_select_precolumns(self, select): """Add special MySQL keywords in place of DISTINCT. .. note:: this usage is deprecated. :meth:`.Select.prefix_with` should be used for special keywords at the start of a SELECT. """ if isinstance(select._distinct, basestring): return select._distinct.upper() + " " elif select._distinct: return "DISTINCT " else: return "" def visit_join(self, join, asfrom=False, **kwargs): # 'JOIN ... ON ...' for inner joins isn't available until 4.0. # Apparently < 3.23.17 requires theta joins for inner joins # (but not outer). Not generating these currently, but # support can be added, preferably after dialects are # refactored to be version-sensitive. return ''.join( (self.process(join.left, asfrom=True, **kwargs), (join.isouter and " LEFT OUTER JOIN " or " INNER JOIN "), self.process(join.right, asfrom=True, **kwargs), " ON ", self.process(join.onclause, **kwargs))) def for_update_clause(self, select): if select.for_update == 'read': return ' LOCK IN SHARE MODE' else: return super(MySQLCompiler, self).for_update_clause(select) def limit_clause(self, select): # MySQL supports: # LIMIT # LIMIT , # and in server versions > 3.3: # LIMIT OFFSET # The latter is more readable for offsets but we're stuck with the # former until we can refine dialects by server revision. limit, offset = select._limit, select._offset if (limit, offset) == (None, None): return '' elif offset is not None: # As suggested by the MySQL docs, need to apply an # artificial limit if one wasn't provided # http://dev.mysql.com/doc/refman/5.0/en/select.html if limit is None: # hardwire the upper limit. Currently # needed by OurSQL with Python 3 # (https://bugs.launchpad.net/oursql/+bug/686232), # but also is consistent with the usage of the upper # bound as part of MySQL's "syntax" for OFFSET with # no LIMIT return ' \n LIMIT %s, %s' % ( self.process(sql.literal(offset)), "18446744073709551615") else: return ' \n LIMIT %s, %s' % ( self.process(sql.literal(offset)), self.process(sql.literal(limit))) else: # No offset provided, so just use the limit return ' \n LIMIT %s' % (self.process(sql.literal(limit)),) def update_limit_clause(self, update_stmt): limit = update_stmt.kwargs.get('%s_limit' % self.dialect.name, None) if limit: return "LIMIT %s" % limit else: return None def update_tables_clause(self, update_stmt, from_table, extra_froms, **kw): return ', '.join(t._compiler_dispatch(self, asfrom=True, **kw) for t in [from_table] + list(extra_froms)) def update_from_clause(self, update_stmt, from_table, extra_froms, from_hints, **kw): return None # ug. "InnoDB needs indexes on foreign keys and referenced keys [...]. # Starting with MySQL 4.1.2, these indexes are created automatically. # In older versions, the indexes must be created explicitly or the # creation of foreign key constraints fails." class MySQLDDLCompiler(compiler.DDLCompiler): def create_table_constraints(self, table): """Get table constraints.""" constraint_string = super( MySQLDDLCompiler, self).create_table_constraints(table) engine_key = '%s_engine' % self.dialect.name is_innodb = table.kwargs.has_key(engine_key) and \ table.kwargs[engine_key].lower() == 'innodb' auto_inc_column = table._autoincrement_column if is_innodb and \ auto_inc_column is not None and \ auto_inc_column is not list(table.primary_key)[0]: if constraint_string: constraint_string += ", \n\t" constraint_string += "KEY %s (%s)" % ( self.preparer.quote( "idx_autoinc_%s" % auto_inc_column.name, None ), self.preparer.format_column(auto_inc_column) ) return constraint_string def get_column_specification(self, column, **kw): """Builds column DDL.""" colspec = [self.preparer.format_column(column), self.dialect.type_compiler.process(column.type) ] default = self.get_column_default_string(column) if default is not None: colspec.append('DEFAULT ' + default) is_timestamp = isinstance(column.type, sqltypes.TIMESTAMP) if not column.nullable and not is_timestamp: colspec.append('NOT NULL') elif column.nullable and is_timestamp and default is None: colspec.append('NULL') if column is column.table._autoincrement_column and \ column.server_default is None: colspec.append('AUTO_INCREMENT') return ' '.join(colspec) def post_create_table(self, table): """Build table-level CREATE options like ENGINE and COLLATE.""" table_opts = [] opts = dict( ( k[len(self.dialect.name) + 1:].upper(), v ) for k, v in table.kwargs.items() if k.startswith('%s_' % self.dialect.name) ) for opt in topological.sort([ ('DEFAULT_CHARSET', 'COLLATE'), ('DEFAULT_CHARACTER_SET', 'COLLATE') ], opts): arg = opts[opt] if opt in _options_of_type_string: arg = "'%s'" % arg.replace("\\", "\\\\").replace("'", "''") if opt in ('DATA_DIRECTORY', 'INDEX_DIRECTORY', 'DEFAULT_CHARACTER_SET', 'CHARACTER_SET', 'DEFAULT_CHARSET', 'DEFAULT_COLLATE'): opt = opt.replace('_', ' ') joiner = '=' if opt in ('TABLESPACE', 'DEFAULT CHARACTER SET', 'CHARACTER SET', 'COLLATE'): joiner = ' ' table_opts.append(joiner.join((opt, arg))) return ' '.join(table_opts) def visit_create_index(self, create): index = create.element self._verify_index_table(index) preparer = self.preparer table = preparer.format_table(index.table) columns = [self.sql_compiler.process(expr, include_table=False, literal_binds=True) for expr in index.expressions] name = self._prepared_index_name(index) text = "CREATE " if index.unique: text += "UNIQUE " text += "INDEX %s ON %s " % (name, table) if 'mysql_length' in index.kwargs: length = index.kwargs['mysql_length'] if isinstance(length, dict): # length value can be a (column_name --> integer value) mapping # specifying the prefix length for each column of the index columns = ', '.join( ('%s(%d)' % (col, length[col]) if col in length else '%s' % col) for col in columns ) else: # or can be an integer value specifying the same # prefix length for all columns of the index columns = ', '.join( '%s(%d)' % (col, length) for col in columns ) else: columns = ', '.join(columns) text += '(%s)' % columns if 'mysql_using' in index.kwargs: using = index.kwargs['mysql_using'] text += " USING %s" % (preparer.quote(using, index.quote)) return text def visit_primary_key_constraint(self, constraint): text = super(MySQLDDLCompiler, self).\ visit_primary_key_constraint(constraint) if "mysql_using" in constraint.kwargs: using = constraint.kwargs['mysql_using'] text += " USING %s" % ( self.preparer.quote(using, constraint.quote)) return text def visit_drop_index(self, drop): index = drop.element return "\nDROP INDEX %s ON %s" % ( self._prepared_index_name(index, include_schema=False), self.preparer.format_table(index.table)) def visit_drop_constraint(self, drop): constraint = drop.element if isinstance(constraint, sa_schema.ForeignKeyConstraint): qual = "FOREIGN KEY " const = self.preparer.format_constraint(constraint) elif isinstance(constraint, sa_schema.PrimaryKeyConstraint): qual = "PRIMARY KEY " const = "" elif isinstance(constraint, sa_schema.UniqueConstraint): qual = "INDEX " const = self.preparer.format_constraint(constraint) else: qual = "" const = self.preparer.format_constraint(constraint) return "ALTER TABLE %s DROP %s%s" % \ (self.preparer.format_table(constraint.table), qual, const) def define_constraint_deferrability(self, constraint): if constraint.deferrable is not None: util.warn("The 'deferrable' keyword will no longer be ignored by the " "MySQL backend in 0.9 - please adjust so that this keyword is " "not used in conjunction with MySQL.") if constraint.initially is not None: util.warn("The 'initially' keyword will no longer be ignored by the " "MySQL backend in 0.9 - please adjust so that this keyword is " "not used in conjunction with MySQL.") return "" def define_constraint_match(self, constraint): if constraint.match is not None: util.warn("MySQL ignores the 'MATCH' keyword while at the same time " "causes ON UPDATE/ON DELETE clauses to be ignored - " "this will be an exception in 0.9.") return "" class MySQLTypeCompiler(compiler.GenericTypeCompiler): def _extend_numeric(self, type_, spec): "Extend a numeric-type declaration with MySQL specific extensions." if not self._mysql_type(type_): return spec if type_.unsigned: spec += ' UNSIGNED' if type_.zerofill: spec += ' ZEROFILL' return spec def _extend_string(self, type_, defaults, spec): """Extend a string-type declaration with standard SQL CHARACTER SET / COLLATE annotations and MySQL specific extensions. """ def attr(name): return getattr(type_, name, defaults.get(name)) if attr('charset'): charset = 'CHARACTER SET %s' % attr('charset') elif attr('ascii'): charset = 'ASCII' elif attr('unicode'): charset = 'UNICODE' else: charset = None if attr('collation'): collation = 'COLLATE %s' % type_.collation elif attr('binary'): collation = 'BINARY' else: collation = None if attr('national'): # NATIONAL (aka NCHAR/NVARCHAR) trumps charsets. return ' '.join([c for c in ('NATIONAL', spec, collation) if c is not None]) return ' '.join([c for c in (spec, charset, collation) if c is not None]) def _mysql_type(self, type_): return isinstance(type_, (_StringType, _NumericType)) def visit_NUMERIC(self, type_): if type_.precision is None: return self._extend_numeric(type_, "NUMERIC") elif type_.scale is None: return self._extend_numeric(type_, "NUMERIC(%(precision)s)" % {'precision': type_.precision}) else: return self._extend_numeric(type_, "NUMERIC(%(precision)s, %(scale)s)" % {'precision': type_.precision, 'scale': type_.scale}) def visit_DECIMAL(self, type_): if type_.precision is None: return self._extend_numeric(type_, "DECIMAL") elif type_.scale is None: return self._extend_numeric(type_, "DECIMAL(%(precision)s)" % {'precision': type_.precision}) else: return self._extend_numeric(type_, "DECIMAL(%(precision)s, %(scale)s)" % {'precision': type_.precision, 'scale': type_.scale}) def visit_DOUBLE(self, type_): if type_.precision is not None and type_.scale is not None: return self._extend_numeric(type_, "DOUBLE(%(precision)s, %(scale)s)" % {'precision': type_.precision, 'scale': type_.scale}) else: return self._extend_numeric(type_, 'DOUBLE') def visit_REAL(self, type_): if type_.precision is not None and type_.scale is not None: return self._extend_numeric(type_, "REAL(%(precision)s, %(scale)s)" % {'precision': type_.precision, 'scale': type_.scale}) else: return self._extend_numeric(type_, 'REAL') def visit_FLOAT(self, type_): if self._mysql_type(type_) and \ type_.scale is not None and \ type_.precision is not None: return self._extend_numeric(type_, "FLOAT(%s, %s)" % (type_.precision, type_.scale)) elif type_.precision is not None: return self._extend_numeric(type_, "FLOAT(%s)" % (type_.precision,)) else: return self._extend_numeric(type_, "FLOAT") def visit_INTEGER(self, type_): if self._mysql_type(type_) and type_.display_width is not None: return self._extend_numeric(type_, "INTEGER(%(display_width)s)" % {'display_width': type_.display_width}) else: return self._extend_numeric(type_, "INTEGER") def visit_BIGINT(self, type_): if self._mysql_type(type_) and type_.display_width is not None: return self._extend_numeric(type_, "BIGINT(%(display_width)s)" % {'display_width': type_.display_width}) else: return self._extend_numeric(type_, "BIGINT") def visit_MEDIUMINT(self, type_): if self._mysql_type(type_) and type_.display_width is not None: return self._extend_numeric(type_, "MEDIUMINT(%(display_width)s)" % {'display_width': type_.display_width}) else: return self._extend_numeric(type_, "MEDIUMINT") def visit_TINYINT(self, type_): if self._mysql_type(type_) and type_.display_width is not None: return self._extend_numeric(type_, "TINYINT(%s)" % type_.display_width) else: return self._extend_numeric(type_, "TINYINT") def visit_SMALLINT(self, type_): if self._mysql_type(type_) and type_.display_width is not None: return self._extend_numeric(type_, "SMALLINT(%(display_width)s)" % {'display_width': type_.display_width} ) else: return self._extend_numeric(type_, "SMALLINT") def visit_BIT(self, type_): if type_.length is not None: return "BIT(%s)" % type_.length else: return "BIT" def visit_DATETIME(self, type_): return "DATETIME" def visit_DATE(self, type_): return "DATE" def visit_TIME(self, type_): if getattr(type_, 'fsp', None): return "TIME(%d)" % type_.fsp else: return "TIME" def visit_TIMESTAMP(self, type_): return 'TIMESTAMP' def visit_YEAR(self, type_): if type_.display_width is None: return "YEAR" else: return "YEAR(%s)" % type_.display_width def visit_TEXT(self, type_): if type_.length: return self._extend_string(type_, {}, "TEXT(%d)" % type_.length) else: return self._extend_string(type_, {}, "TEXT") def visit_TINYTEXT(self, type_): return self._extend_string(type_, {}, "TINYTEXT") def visit_MEDIUMTEXT(self, type_): return self._extend_string(type_, {}, "MEDIUMTEXT") def visit_LONGTEXT(self, type_): return self._extend_string(type_, {}, "LONGTEXT") def visit_VARCHAR(self, type_): if type_.length: return self._extend_string(type_, {}, "VARCHAR(%d)" % type_.length) else: raise exc.CompileError( "VARCHAR requires a length on dialect %s" % self.dialect.name) def visit_CHAR(self, type_): if type_.length: return self._extend_string(type_, {}, "CHAR(%(length)s)" % {'length': type_.length}) else: return self._extend_string(type_, {}, "CHAR") def visit_NVARCHAR(self, type_): # We'll actually generate the equiv. "NATIONAL VARCHAR" instead # of "NVARCHAR". if type_.length: return self._extend_string(type_, {'national': True}, "VARCHAR(%(length)s)" % {'length': type_.length}) else: raise exc.CompileError( "NVARCHAR requires a length on dialect %s" % self.dialect.name) def visit_NCHAR(self, type_): # We'll actually generate the equiv. # "NATIONAL CHAR" instead of "NCHAR". if type_.length: return self._extend_string(type_, {'national': True}, "CHAR(%(length)s)" % {'length': type_.length}) else: return self._extend_string(type_, {'national': True}, "CHAR") def visit_VARBINARY(self, type_): return "VARBINARY(%d)" % type_.length def visit_large_binary(self, type_): return self.visit_BLOB(type_) def visit_enum(self, type_): if not type_.native_enum: return super(MySQLTypeCompiler, self).visit_enum(type_) else: return self.visit_ENUM(type_) def visit_BLOB(self, type_): if type_.length: return "BLOB(%d)" % type_.length else: return "BLOB" def visit_TINYBLOB(self, type_): return "TINYBLOB" def visit_MEDIUMBLOB(self, type_): return "MEDIUMBLOB" def visit_LONGBLOB(self, type_): return "LONGBLOB" def visit_ENUM(self, type_): quoted_enums = [] for e in type_.enums: quoted_enums.append("'%s'" % e.replace("'", "''")) return self._extend_string(type_, {}, "ENUM(%s)" % ",".join(quoted_enums)) def visit_SET(self, type_): return self._extend_string(type_, {}, "SET(%s)" % ",".join(type_._ddl_values)) def visit_BOOLEAN(self, type): return "BOOL" class MySQLIdentifierPreparer(compiler.IdentifierPreparer): reserved_words = RESERVED_WORDS def __init__(self, dialect, server_ansiquotes=False, **kw): if not server_ansiquotes: quote = "`" else: quote = '"' super(MySQLIdentifierPreparer, self).__init__( dialect, initial_quote=quote, escape_quote=quote) def _quote_free_identifiers(self, *ids): """Unilaterally identifier-quote any number of strings.""" return tuple([self.quote_identifier(i) for i in ids if i is not None]) class MySQLDialect(default.DefaultDialect): """Details of the MySQL dialect. Not used directly in application code.""" name = 'mysql' supports_alter = True # identifiers are 64, however aliases can be 255... max_identifier_length = 255 max_index_name_length = 64 supports_native_enum = True supports_sane_rowcount = True supports_sane_multi_rowcount = False supports_multivalues_insert = True default_paramstyle = 'format' colspecs = colspecs statement_compiler = MySQLCompiler ddl_compiler = MySQLDDLCompiler type_compiler = MySQLTypeCompiler ischema_names = ischema_names preparer = MySQLIdentifierPreparer # default SQL compilation settings - # these are modified upon initialize(), # i.e. first connect _backslash_escapes = True _server_ansiquotes = False def __init__(self, isolation_level=None, **kwargs): kwargs.pop('use_ansiquotes', None) # legacy default.DefaultDialect.__init__(self, **kwargs) self.isolation_level = isolation_level def on_connect(self): if self.isolation_level is not None: def connect(conn): self.set_isolation_level(conn, self.isolation_level) return connect else: return None _isolation_lookup = set(['SERIALIZABLE', 'READ UNCOMMITTED', 'READ COMMITTED', 'REPEATABLE READ']) def set_isolation_level(self, connection, level): level = level.replace('_', ' ') if level not in self._isolation_lookup: raise exc.ArgumentError( "Invalid value '%s' for isolation_level. " "Valid isolation levels for %s are %s" % (level, self.name, ", ".join(self._isolation_lookup)) ) cursor = connection.cursor() cursor.execute("SET SESSION TRANSACTION ISOLATION LEVEL %s" % level) cursor.execute("COMMIT") cursor.close() def get_isolation_level(self, connection): cursor = connection.cursor() cursor.execute('SELECT @@tx_isolation') val = cursor.fetchone()[0] cursor.close() if util.py3k and isinstance(val, bytes): val = val.decode() return val.upper().replace("-", " ") def do_commit(self, dbapi_connection): """Execute a COMMIT.""" # COMMIT/ROLLBACK were introduced in 3.23.15. # Yes, we have at least one user who has to talk to these old versions! # # Ignore commit/rollback if support isn't present, otherwise even basic # operations via autocommit fail. try: dbapi_connection.commit() except: if self.server_version_info < (3, 23, 15): args = sys.exc_info()[1].args if args and args[0] == 1064: return raise def do_rollback(self, dbapi_connection): """Execute a ROLLBACK.""" try: dbapi_connection.rollback() except: if self.server_version_info < (3, 23, 15): args = sys.exc_info()[1].args if args and args[0] == 1064: return raise def do_begin_twophase(self, connection, xid): connection.execute(sql.text("XA BEGIN :xid"), xid=xid) def do_prepare_twophase(self, connection, xid): connection.execute(sql.text("XA END :xid"), xid=xid) connection.execute(sql.text("XA PREPARE :xid"), xid=xid) def do_rollback_twophase(self, connection, xid, is_prepared=True, recover=False): if not is_prepared: connection.execute(sql.text("XA END :xid"), xid=xid) connection.execute(sql.text("XA ROLLBACK :xid"), xid=xid) def do_commit_twophase(self, connection, xid, is_prepared=True, recover=False): if not is_prepared: self.do_prepare_twophase(connection, xid) connection.execute(sql.text("XA COMMIT :xid"), xid=xid) def do_recover_twophase(self, connection): resultset = connection.execute("XA RECOVER") return [row['data'][0:row['gtrid_length']] for row in resultset] def is_disconnect(self, e, connection, cursor): if isinstance(e, self.dbapi.OperationalError): return self._extract_error_code(e) in \ (2006, 2013, 2014, 2045, 2055) elif isinstance(e, self.dbapi.InterfaceError): # if underlying connection is closed, # this is the error you get return "(0, '')" in str(e) else: return False def _compat_fetchall(self, rp, charset=None): """Proxy result rows to smooth over MySQL-Python driver inconsistencies.""" return [_DecodingRowProxy(row, charset) for row in rp.fetchall()] def _compat_fetchone(self, rp, charset=None): """Proxy a result row to smooth over MySQL-Python driver inconsistencies.""" return _DecodingRowProxy(rp.fetchone(), charset) def _compat_first(self, rp, charset=None): """Proxy a result row to smooth over MySQL-Python driver inconsistencies.""" return _DecodingRowProxy(rp.first(), charset) def _extract_error_code(self, exception): raise NotImplementedError() def _get_default_schema_name(self, connection): return connection.execute('SELECT DATABASE()').scalar() def has_table(self, connection, table_name, schema=None): # SHOW TABLE STATUS LIKE and SHOW TABLES LIKE do not function properly # on macosx (and maybe win?) with multibyte table names. # # TODO: if this is not a problem on win, make the strategy swappable # based on platform. DESCRIBE is slower. # [ticket:726] # full_name = self.identifier_preparer.format_table(table, # use_schema=True) full_name = '.'.join(self.identifier_preparer._quote_free_identifiers( schema, table_name)) st = "DESCRIBE %s" % full_name rs = None try: try: rs = connection.execute(st) have = rs.fetchone() is not None rs.close() return have except exc.DBAPIError, e: if self._extract_error_code(e.orig) == 1146: return False raise finally: if rs: rs.close() def initialize(self, connection): default.DefaultDialect.initialize(self, connection) self._connection_charset = self._detect_charset(connection) self._detect_ansiquotes(connection) if self._server_ansiquotes: # if ansiquotes == True, build a new IdentifierPreparer # with the new setting self.identifier_preparer = self.preparer(self, server_ansiquotes=self._server_ansiquotes) @property def _supports_cast(self): return self.server_version_info is None or \ self.server_version_info >= (4, 0, 2) @reflection.cache def get_schema_names(self, connection, **kw): rp = connection.execute("SHOW schemas") return [r[0] for r in rp] @reflection.cache def get_table_names(self, connection, schema=None, **kw): """Return a Unicode SHOW TABLES from a given schema.""" if schema is not None: current_schema = schema else: current_schema = self.default_schema_name charset = self._connection_charset if self.server_version_info < (5, 0, 2): rp = connection.execute("SHOW TABLES FROM %s" % self.identifier_preparer.quote_identifier(current_schema)) return [row[0] for row in self._compat_fetchall(rp, charset=charset)] else: rp = connection.execute("SHOW FULL TABLES FROM %s" % self.identifier_preparer.quote_identifier(current_schema)) return [row[0] for row in self._compat_fetchall(rp, charset=charset) if row[1] == 'BASE TABLE'] @reflection.cache def get_view_names(self, connection, schema=None, **kw): if self.server_version_info < (5, 0, 2): raise NotImplementedError if schema is None: schema = self.default_schema_name if self.server_version_info < (5, 0, 2): return self.get_table_names(connection, schema) charset = self._connection_charset rp = connection.execute("SHOW FULL TABLES FROM %s" % self.identifier_preparer.quote_identifier(schema)) return [row[0] for row in self._compat_fetchall(rp, charset=charset) if row[1] in ('VIEW', 'SYSTEM VIEW')] @reflection.cache def get_table_options(self, connection, table_name, schema=None, **kw): parsed_state = self._parsed_state_or_create( connection, table_name, schema, **kw) return parsed_state.table_options @reflection.cache def get_columns(self, connection, table_name, schema=None, **kw): parsed_state = self._parsed_state_or_create( connection, table_name, schema, **kw) return parsed_state.columns @reflection.cache def get_pk_constraint(self, connection, table_name, schema=None, **kw): parsed_state = self._parsed_state_or_create( connection, table_name, schema, **kw) for key in parsed_state.keys: if key['type'] == 'PRIMARY': # There can be only one. cols = [s[0] for s in key['columns']] return {'constrained_columns': cols, 'name': None} return {'constrained_columns': [], 'name': None} @reflection.cache def get_foreign_keys(self, connection, table_name, schema=None, **kw): parsed_state = self._parsed_state_or_create( connection, table_name, schema, **kw) default_schema = None fkeys = [] for spec in parsed_state.constraints: # only FOREIGN KEYs ref_name = spec['table'][-1] ref_schema = len(spec['table']) > 1 and spec['table'][-2] or schema if not ref_schema: if default_schema is None: default_schema = \ connection.dialect.default_schema_name if schema == default_schema: ref_schema = schema loc_names = spec['local'] ref_names = spec['foreign'] con_kw = {} for opt in ('name', 'onupdate', 'ondelete'): if spec.get(opt, False): con_kw[opt] = spec[opt] fkey_d = { 'name': spec['name'], 'constrained_columns': loc_names, 'referred_schema': ref_schema, 'referred_table': ref_name, 'referred_columns': ref_names, 'options': con_kw } fkeys.append(fkey_d) return fkeys @reflection.cache def get_indexes(self, connection, table_name, schema=None, **kw): parsed_state = self._parsed_state_or_create( connection, table_name, schema, **kw) indexes = [] for spec in parsed_state.keys: unique = False flavor = spec['type'] if flavor == 'PRIMARY': continue if flavor == 'UNIQUE': unique = True elif flavor in (None, 'FULLTEXT', 'SPATIAL'): pass else: self.logger.info( "Converting unknown KEY type %s to a plain KEY" % flavor) pass index_d = {} index_d['name'] = spec['name'] index_d['column_names'] = [s[0] for s in spec['columns']] index_d['unique'] = unique index_d['type'] = flavor indexes.append(index_d) return indexes @reflection.cache def get_unique_constraints(self, connection, table_name, schema=None, **kw): parsed_state = self._parsed_state_or_create( connection, table_name, schema, **kw) return [ { 'name': key['name'], 'column_names': [col[0] for col in key['columns']] } for key in parsed_state.keys if key['type'] == 'UNIQUE' ] @reflection.cache def get_view_definition(self, connection, view_name, schema=None, **kw): charset = self._connection_charset full_name = '.'.join(self.identifier_preparer._quote_free_identifiers( schema, view_name)) sql = self._show_create_table(connection, None, charset, full_name=full_name) return sql def _parsed_state_or_create(self, connection, table_name, schema=None, **kw): return self._setup_parser( connection, table_name, schema, info_cache=kw.get('info_cache', None) ) @util.memoized_property def _tabledef_parser(self): """return the MySQLTableDefinitionParser, generate if needed. The deferred creation ensures that the dialect has retrieved server version information first. """ if (self.server_version_info < (4, 1) and self._server_ansiquotes): # ANSI_QUOTES doesn't affect SHOW CREATE TABLE on < 4.1 preparer = self.preparer(self, server_ansiquotes=False) else: preparer = self.identifier_preparer return MySQLTableDefinitionParser(self, preparer) @reflection.cache def _setup_parser(self, connection, table_name, schema=None, **kw): charset = self._connection_charset parser = self._tabledef_parser full_name = '.'.join(self.identifier_preparer._quote_free_identifiers( schema, table_name)) sql = self._show_create_table(connection, None, charset, full_name=full_name) if sql.startswith('CREATE ALGORITHM'): # Adapt views to something table-like. columns = self._describe_table(connection, None, charset, full_name=full_name) sql = parser._describe_to_create(table_name, columns) return parser.parse(sql, charset) def _detect_charset(self, connection): raise NotImplementedError() def _detect_casing(self, connection): """Sniff out identifier case sensitivity. Cached per-connection. This value can not change without a server restart. """ # http://dev.mysql.com/doc/refman/5.0/en/name-case-sensitivity.html charset = self._connection_charset row = self._compat_first(connection.execute( "SHOW VARIABLES LIKE 'lower_case_table_names'"), charset=charset) if not row: cs = 0 else: # 4.0.15 returns OFF or ON according to [ticket:489] # 3.23 doesn't, 4.0.27 doesn't.. if row[1] == 'OFF': cs = 0 elif row[1] == 'ON': cs = 1 else: cs = int(row[1]) return cs def _detect_collations(self, connection): """Pull the active COLLATIONS list from the server. Cached per-connection. """ collations = {} if self.server_version_info < (4, 1, 0): pass else: charset = self._connection_charset rs = connection.execute('SHOW COLLATION') for row in self._compat_fetchall(rs, charset): collations[row[0]] = row[1] return collations def _detect_ansiquotes(self, connection): """Detect and adjust for the ANSI_QUOTES sql mode.""" row = self._compat_first( connection.execute("SHOW VARIABLES LIKE 'sql_mode'"), charset=self._connection_charset) if not row: mode = '' else: mode = row[1] or '' # 4.0 if mode.isdigit(): mode_no = int(mode) mode = (mode_no | 4 == mode_no) and 'ANSI_QUOTES' or '' self._server_ansiquotes = 'ANSI_QUOTES' in mode # as of MySQL 5.0.1 self._backslash_escapes = 'NO_BACKSLASH_ESCAPES' not in mode def _show_create_table(self, connection, table, charset=None, full_name=None): """Run SHOW CREATE TABLE for a ``Table``.""" if full_name is None: full_name = self.identifier_preparer.format_table(table) st = "SHOW CREATE TABLE %s" % full_name rp = None try: rp = connection.execute(st) except exc.DBAPIError, e: if self._extract_error_code(e.orig) == 1146: raise exc.NoSuchTableError(full_name) else: raise row = self._compat_first(rp, charset=charset) if not row: raise exc.NoSuchTableError(full_name) return row[1].strip() return sql def _describe_table(self, connection, table, charset=None, full_name=None): """Run DESCRIBE for a ``Table`` and return processed rows.""" if full_name is None: full_name = self.identifier_preparer.format_table(table) st = "DESCRIBE %s" % full_name rp, rows = None, None try: try: rp = connection.execute(st) except exc.DBAPIError, e: if self._extract_error_code(e.orig) == 1146: raise exc.NoSuchTableError(full_name) else: raise rows = self._compat_fetchall(rp, charset=charset) finally: if rp: rp.close() return rows class ReflectedState(object): """Stores raw information about a SHOW CREATE TABLE statement.""" def __init__(self): self.columns = [] self.table_options = {} self.table_name = None self.keys = [] self.constraints = [] class MySQLTableDefinitionParser(object): """Parses the results of a SHOW CREATE TABLE statement.""" def __init__(self, dialect, preparer): self.dialect = dialect self.preparer = preparer self._prep_regexes() def parse(self, show_create, charset): state = ReflectedState() state.charset = charset for line in re.split(r'\r?\n', show_create): if line.startswith(' ' + self.preparer.initial_quote): self._parse_column(line, state) # a regular table options line elif line.startswith(') '): self._parse_table_options(line, state) # an ANSI-mode table options line elif line == ')': pass elif line.startswith('CREATE '): self._parse_table_name(line, state) # Not present in real reflection, but may be if # loading from a file. elif not line: pass else: type_, spec = self._parse_constraints(line) if type_ is None: util.warn("Unknown schema content: %r" % line) elif type_ == 'key': state.keys.append(spec) elif type_ == 'constraint': state.constraints.append(spec) else: pass return state def _parse_constraints(self, line): """Parse a KEY or CONSTRAINT line. :param line: A line of SHOW CREATE TABLE output """ # KEY m = self._re_key.match(line) if m: spec = m.groupdict() # convert columns into name, length pairs spec['columns'] = self._parse_keyexprs(spec['columns']) return 'key', spec # CONSTRAINT m = self._re_constraint.match(line) if m: spec = m.groupdict() spec['table'] = \ self.preparer.unformat_identifiers(spec['table']) spec['local'] = [c[0] for c in self._parse_keyexprs(spec['local'])] spec['foreign'] = [c[0] for c in self._parse_keyexprs(spec['foreign'])] return 'constraint', spec # PARTITION and SUBPARTITION m = self._re_partition.match(line) if m: # Punt! return 'partition', line # No match. return (None, line) def _parse_table_name(self, line, state): """Extract the table name. :param line: The first line of SHOW CREATE TABLE """ regex, cleanup = self._pr_name m = regex.match(line) if m: state.table_name = cleanup(m.group('name')) def _parse_table_options(self, line, state): """Build a dictionary of all reflected table-level options. :param line: The final line of SHOW CREATE TABLE output. """ options = {} if not line or line == ')': pass else: rest_of_line = line[:] for regex, cleanup in self._pr_options: m = regex.search(rest_of_line) if not m: continue directive, value = m.group('directive'), m.group('val') if cleanup: value = cleanup(value) options[directive.lower()] = value rest_of_line = regex.sub('', rest_of_line) for nope in ('auto_increment', 'data directory', 'index directory'): options.pop(nope, None) for opt, val in options.items(): state.table_options['%s_%s' % (self.dialect.name, opt)] = val def _parse_column(self, line, state): """Extract column details. Falls back to a 'minimal support' variant if full parse fails. :param line: Any column-bearing line from SHOW CREATE TABLE """ spec = None m = self._re_column.match(line) if m: spec = m.groupdict() spec['full'] = True else: m = self._re_column_loose.match(line) if m: spec = m.groupdict() spec['full'] = False if not spec: util.warn("Unknown column definition %r" % line) return if not spec['full']: util.warn("Incomplete reflection of column definition %r" % line) name, type_, args, notnull = \ spec['name'], spec['coltype'], spec['arg'], spec['notnull'] try: col_type = self.dialect.ischema_names[type_] except KeyError: util.warn("Did not recognize type '%s' of column '%s'" % (type_, name)) col_type = sqltypes.NullType # Column type positional arguments eg. varchar(32) if args is None or args == '': type_args = [] elif args[0] == "'" and args[-1] == "'": type_args = self._re_csv_str.findall(args) else: type_args = [int(v) for v in self._re_csv_int.findall(args)] # Column type keyword options type_kw = {} for kw in ('unsigned', 'zerofill'): if spec.get(kw, False): type_kw[kw] = True for kw in ('charset', 'collate'): if spec.get(kw, False): type_kw[kw] = spec[kw] if type_ == 'enum': type_args = ENUM._strip_enums(type_args) type_instance = col_type(*type_args, **type_kw) col_args, col_kw = [], {} # NOT NULL col_kw['nullable'] = True if spec.get('notnull', False): col_kw['nullable'] = False # AUTO_INCREMENT if spec.get('autoincr', False): col_kw['autoincrement'] = True elif issubclass(col_type, sqltypes.Integer): col_kw['autoincrement'] = False # DEFAULT default = spec.get('default', None) if default == 'NULL': # eliminates the need to deal with this later. default = None col_d = dict(name=name, type=type_instance, default=default) col_d.update(col_kw) state.columns.append(col_d) def _describe_to_create(self, table_name, columns): """Re-format DESCRIBE output as a SHOW CREATE TABLE string. DESCRIBE is a much simpler reflection and is sufficient for reflecting views for runtime use. This method formats DDL for columns only- keys are omitted. :param columns: A sequence of DESCRIBE or SHOW COLUMNS 6-tuples. SHOW FULL COLUMNS FROM rows must be rearranged for use with this function. """ buffer = [] for row in columns: (name, col_type, nullable, default, extra) = \ [row[i] for i in (0, 1, 2, 4, 5)] line = [' '] line.append(self.preparer.quote_identifier(name)) line.append(col_type) if not nullable: line.append('NOT NULL') if default: if 'auto_increment' in default: pass elif (col_type.startswith('timestamp') and default.startswith('C')): line.append('DEFAULT') line.append(default) elif default == 'NULL': line.append('DEFAULT') line.append(default) else: line.append('DEFAULT') line.append("'%s'" % default.replace("'", "''")) if extra: line.append(extra) buffer.append(' '.join(line)) return ''.join([('CREATE TABLE %s (\n' % self.preparer.quote_identifier(table_name)), ',\n'.join(buffer), '\n) ']) def _parse_keyexprs(self, identifiers): """Unpack '"col"(2),"col" ASC'-ish strings into components.""" return self._re_keyexprs.findall(identifiers) def _prep_regexes(self): """Pre-compile regular expressions.""" self._re_columns = [] self._pr_options = [] _final = self.preparer.final_quote quotes = dict(zip(('iq', 'fq', 'esc_fq'), [re.escape(s) for s in (self.preparer.initial_quote, _final, self.preparer._escape_identifier(_final))])) self._pr_name = _pr_compile( r'^CREATE (?:\w+ +)?TABLE +' r'%(iq)s(?P(?:%(esc_fq)s|[^%(fq)s])+)%(fq)s +\($' % quotes, self.preparer._unescape_identifier) # `col`,`col2`(32),`col3`(15) DESC # # Note: ASC and DESC aren't reflected, so we'll punt... self._re_keyexprs = _re_compile( r'(?:' r'(?:%(iq)s((?:%(esc_fq)s|[^%(fq)s])+)%(fq)s)' r'(?:\((\d+)\))?(?=\,|$))+' % quotes) # 'foo' or 'foo','bar' or 'fo,o','ba''a''r' self._re_csv_str = _re_compile(r'\x27(?:\x27\x27|[^\x27])*\x27') # 123 or 123,456 self._re_csv_int = _re_compile(r'\d+') # `colname` [type opts] # (NOT NULL | NULL) # DEFAULT ('value' | CURRENT_TIMESTAMP...) # COMMENT 'comment' # COLUMN_FORMAT (FIXED|DYNAMIC|DEFAULT) # STORAGE (DISK|MEMORY) self._re_column = _re_compile( r' ' r'%(iq)s(?P(?:%(esc_fq)s|[^%(fq)s])+)%(fq)s +' r'(?P\w+)' r'(?:\((?P(?:\d+|\d+,\d+|' r'(?:\x27(?:\x27\x27|[^\x27])*\x27,?)+))\))?' r'(?: +(?PUNSIGNED))?' r'(?: +(?PZEROFILL))?' r'(?: +CHARACTER SET +(?P[\w_]+))?' r'(?: +COLLATE +(?P[\w_]+))?' r'(?: +(?PNOT NULL))?' r'(?: +DEFAULT +(?P' r'(?:NULL|\x27(?:\x27\x27|[^\x27])*\x27|\w+' r'(?: +ON UPDATE \w+)?)' r'))?' r'(?: +(?PAUTO_INCREMENT))?' r'(?: +COMMENT +(P(?:\x27\x27|[^\x27])+))?' r'(?: +COLUMN_FORMAT +(?P\w+))?' r'(?: +STORAGE +(?P\w+))?' r'(?: +(?P.*))?' r',?$' % quotes ) # Fallback, try to parse as little as possible self._re_column_loose = _re_compile( r' ' r'%(iq)s(?P(?:%(esc_fq)s|[^%(fq)s])+)%(fq)s +' r'(?P\w+)' r'(?:\((?P(?:\d+|\d+,\d+|\x27(?:\x27\x27|[^\x27])+\x27))\))?' r'.*?(?PNOT NULL)?' % quotes ) # (PRIMARY|UNIQUE|FULLTEXT|SPATIAL) INDEX `name` (USING (BTREE|HASH))? # (`col` (ASC|DESC)?, `col` (ASC|DESC)?) # KEY_BLOCK_SIZE size | WITH PARSER name self._re_key = _re_compile( r' ' r'(?:(?P\S+) )?KEY' r'(?: +%(iq)s(?P(?:%(esc_fq)s|[^%(fq)s])+)%(fq)s)?' r'(?: +USING +(?P\S+))?' r' +\((?P.+?)\)' r'(?: +USING +(?P\S+))?' r'(?: +KEY_BLOCK_SIZE +(?P\S+))?' r'(?: +WITH PARSER +(?P\S+))?' r',?$' % quotes ) # CONSTRAINT `name` FOREIGN KEY (`local_col`) # REFERENCES `remote` (`remote_col`) # MATCH FULL | MATCH PARTIAL | MATCH SIMPLE # ON DELETE CASCADE ON UPDATE RESTRICT # # unique constraints come back as KEYs kw = quotes.copy() kw['on'] = 'RESTRICT|CASCASDE|SET NULL|NOACTION' self._re_constraint = _re_compile( r' ' r'CONSTRAINT +' r'%(iq)s(?P(?:%(esc_fq)s|[^%(fq)s])+)%(fq)s +' r'FOREIGN KEY +' r'\((?P[^\)]+?)\) REFERENCES +' r'(?P%(iq)s[^%(fq)s]+%(fq)s(?:\.%(iq)s[^%(fq)s]+%(fq)s)?) +' r'\((?P[^\)]+?)\)' r'(?: +(?PMATCH \w+))?' r'(?: +ON DELETE (?P%(on)s))?' r'(?: +ON UPDATE (?P%(on)s))?' % kw ) # PARTITION # # punt! self._re_partition = _re_compile(r'(?:.*)(?:SUB)?PARTITION(?:.*)') # Table-level options (COLLATE, ENGINE, etc.) # Do the string options first, since they have quoted # strings we need to get rid of. for option in _options_of_type_string: self._add_option_string(option) for option in ('ENGINE', 'TYPE', 'AUTO_INCREMENT', 'AVG_ROW_LENGTH', 'CHARACTER SET', 'DEFAULT CHARSET', 'CHECKSUM', 'COLLATE', 'DELAY_KEY_WRITE', 'INSERT_METHOD', 'MAX_ROWS', 'MIN_ROWS', 'PACK_KEYS', 'ROW_FORMAT', 'KEY_BLOCK_SIZE'): self._add_option_word(option) self._add_option_regex('UNION', r'\([^\)]+\)') self._add_option_regex('TABLESPACE', r'.*? STORAGE DISK') self._add_option_regex('RAID_TYPE', r'\w+\s+RAID_CHUNKS\s*\=\s*\w+RAID_CHUNKSIZE\s*=\s*\w+') _optional_equals = r'(?:\s*(?:=\s*)|\s+)' def _add_option_string(self, directive): regex = (r'(?P%s)%s' r"'(?P(?:[^']|'')*?)'(?!')" % (re.escape(directive), self._optional_equals)) self._pr_options.append(_pr_compile(regex, lambda v: v.replace("\\\\", "\\").replace("''", "'"))) def _add_option_word(self, directive): regex = (r'(?P%s)%s' r'(?P\w+)' % (re.escape(directive), self._optional_equals)) self._pr_options.append(_pr_compile(regex)) def _add_option_regex(self, directive, regex): regex = (r'(?P%s)%s' r'(?P%s)' % (re.escape(directive), self._optional_equals, regex)) self._pr_options.append(_pr_compile(regex)) _options_of_type_string = ('COMMENT', 'DATA DIRECTORY', 'INDEX DIRECTORY', 'PASSWORD', 'CONNECTION') log.class_logger(MySQLTableDefinitionParser) log.class_logger(MySQLDialect) class _DecodingRowProxy(object): """Return unicode-decoded values based on type inspection. Smooth over data type issues (esp. with alpha driver versions) and normalize strings as Unicode regardless of user-configured driver encoding settings. """ # Some MySQL-python versions can return some columns as # sets.Set(['value']) (seriously) but thankfully that doesn't # seem to come up in DDL queries. def __init__(self, rowproxy, charset): self.rowproxy = rowproxy self.charset = charset def __getitem__(self, index): item = self.rowproxy[index] if isinstance(item, _array): item = item.tostring() # Py2K if self.charset and isinstance(item, str): # end Py2K # Py3K #if self.charset and isinstance(item, bytes): return item.decode(self.charset) else: return item def __getattr__(self, attr): item = getattr(self.rowproxy, attr) if isinstance(item, _array): item = item.tostring() # Py2K if self.charset and isinstance(item, str): # end Py2K # Py3K #if self.charset and isinstance(item, bytes): return item.decode(self.charset) else: return item def _pr_compile(regex, cleanup=None): """Prepare a 2-tuple of compiled regex and callable.""" return (_re_compile(regex), cleanup) def _re_compile(regex): """Compile a string to regex, I and UNICODE.""" return re.compile(regex, re.I | re.UNICODE) SQLAlchemy-0.8.4/lib/sqlalchemy/dialects/mysql/cymysql.py0000644000076500000240000000367712251150015024156 0ustar classicstaff00000000000000# mysql/cymysql.py # Copyright (C) 2005-2013 the SQLAlchemy authors and contributors # # This module is part of SQLAlchemy and is released under # the MIT License: http://www.opensource.org/licenses/mit-license.php """ .. dialect:: mysql+cymysql :name: CyMySQL :dbapi: cymysql :connectstring: mysql+cymysql://:@/[?] :url: https://github.com/nakagami/CyMySQL """ from .mysqldb import MySQLDialect_mysqldb from .base import (BIT, MySQLDialect) from ... import util class _cymysqlBIT(BIT): def result_processor(self, dialect, coltype): """Convert a MySQL's 64 bit, variable length binary string to a long. """ def process(value): if value is not None: # Py2K v = 0L for i in map(ord, value): v = v << 8 | i # end Py2K # Py3K #v = 0 #for i in value: # v = v << 8 | i return v return value return process class MySQLDialect_cymysql(MySQLDialect_mysqldb): driver = 'cymysql' description_encoding = None supports_sane_rowcount = False colspecs = util.update_copy( MySQLDialect.colspecs, { BIT: _cymysqlBIT, } ) @classmethod def dbapi(cls): return __import__('cymysql') def _extract_error_code(self, exception): return exception.errno def is_disconnect(self, e, connection, cursor): if isinstance(e, self.dbapi.OperationalError): return self._extract_error_code(e) in \ (2006, 2013, 2014, 2045, 2055) elif isinstance(e, self.dbapi.InterfaceError): # if underlying connection is closed, # this is the error you get return True else: return False dialect = MySQLDialect_cymysql SQLAlchemy-0.8.4/lib/sqlalchemy/dialects/mysql/gaerdbms.py0000644000076500000240000000524412251147171024242 0ustar classicstaff00000000000000# mysql/gaerdbms.py # Copyright (C) 2005-2013 the SQLAlchemy authors and contributors # # This module is part of SQLAlchemy and is released under # the MIT License: http://www.opensource.org/licenses/mit-license.php """ .. dialect:: mysql+gaerdbms :name: Google Cloud SQL :dbapi: rdbms :connectstring: mysql+gaerdbms:///?instance= :url: https://developers.google.com/appengine/docs/python/cloud-sql/developers-guide This dialect is based primarily on the :mod:`.mysql.mysqldb` dialect with minimal changes. .. versionadded:: 0.7.8 Pooling ------- Google App Engine connections appear to be randomly recycled, so the dialect does not pool connections. The :class:`.NullPool` implementation is installed within the :class:`.Engine` by default. """ import os from .mysqldb import MySQLDialect_mysqldb from ...pool import NullPool import re def _is_dev_environment(): return os.environ.get('SERVER_SOFTWARE', '').startswith('Development/') class MySQLDialect_gaerdbms(MySQLDialect_mysqldb): @classmethod def dbapi(cls): # from django: # http://code.google.com/p/googleappengine/source/ # browse/trunk/python/google/storage/speckle/ # python/django/backend/base.py#118 # see also [ticket:2649] # see also http://stackoverflow.com/q/14224679/34549 from google.appengine.api import apiproxy_stub_map if _is_dev_environment(): from google.appengine.api import rdbms_mysqldb return rdbms_mysqldb elif apiproxy_stub_map.apiproxy.GetStub('rdbms'): from google.storage.speckle.python.api import rdbms_apiproxy return rdbms_apiproxy else: from google.storage.speckle.python.api import rdbms_googleapi return rdbms_googleapi @classmethod def get_pool_class(cls, url): # Cloud SQL connections die at any moment return NullPool def create_connect_args(self, url): opts = url.translate_connect_args() if not _is_dev_environment(): # 'dsn' and 'instance' are because we are skipping # the traditional google.api.rdbms wrapper opts['dsn'] = '' opts['instance'] = url.query['instance'] return [], opts def _extract_error_code(self, exception): match = re.compile(r"^(\d+)L?:|^\((\d+)L?,").match(str(exception)) # The rdbms api will wrap then re-raise some types of errors # making this regex return no matches. code = match.group(1) or match.group(2) if match else None if code: return int(code) dialect = MySQLDialect_gaerdbms SQLAlchemy-0.8.4/lib/sqlalchemy/dialects/mysql/mysqlconnector.py0000644000076500000240000000736112251147171025540 0ustar classicstaff00000000000000# mysql/mysqlconnector.py # Copyright (C) 2005-2013 the SQLAlchemy authors and contributors # # This module is part of SQLAlchemy and is released under # the MIT License: http://www.opensource.org/licenses/mit-license.php """ .. dialect:: mysql+mysqlconnector :name: MySQL Connector/Python :dbapi: myconnpy :connectstring: mysql+mysqlconnector://:@[:]/ :url: https://launchpad.net/myconnpy """ from .base import (MySQLDialect, MySQLExecutionContext, MySQLCompiler, MySQLIdentifierPreparer, BIT) from ... import util class MySQLExecutionContext_mysqlconnector(MySQLExecutionContext): def get_lastrowid(self): return self.cursor.lastrowid class MySQLCompiler_mysqlconnector(MySQLCompiler): def visit_mod_binary(self, binary, operator, **kw): return self.process(binary.left, **kw) + " %% " + \ self.process(binary.right, **kw) def post_process_text(self, text): return text.replace('%', '%%') class MySQLIdentifierPreparer_mysqlconnector(MySQLIdentifierPreparer): def _escape_identifier(self, value): value = value.replace(self.escape_quote, self.escape_to_quote) return value.replace("%", "%%") class _myconnpyBIT(BIT): def result_processor(self, dialect, coltype): """MySQL-connector already converts mysql bits, so.""" return None class MySQLDialect_mysqlconnector(MySQLDialect): driver = 'mysqlconnector' supports_unicode_statements = True supports_unicode_binds = True supports_sane_rowcount = True supports_sane_multi_rowcount = True supports_native_decimal = True default_paramstyle = 'format' execution_ctx_cls = MySQLExecutionContext_mysqlconnector statement_compiler = MySQLCompiler_mysqlconnector preparer = MySQLIdentifierPreparer_mysqlconnector colspecs = util.update_copy( MySQLDialect.colspecs, { BIT: _myconnpyBIT, } ) @classmethod def dbapi(cls): from mysql import connector return connector def create_connect_args(self, url): opts = url.translate_connect_args(username='user') opts.update(url.query) util.coerce_kw_type(opts, 'buffered', bool) util.coerce_kw_type(opts, 'raise_on_warnings', bool) opts.setdefault('buffered', True) opts.setdefault('raise_on_warnings', True) # FOUND_ROWS must be set in ClientFlag to enable # supports_sane_rowcount. if self.dbapi is not None: try: from mysql.connector.constants import ClientFlag client_flags = opts.get('client_flags', ClientFlag.get_default()) client_flags |= ClientFlag.FOUND_ROWS opts['client_flags'] = client_flags except: pass return [[], opts] def _get_server_version_info(self, connection): dbapi_con = connection.connection version = dbapi_con.get_server_version() return tuple(version) def _detect_charset(self, connection): return connection.connection.charset def _extract_error_code(self, exception): return exception.errno def is_disconnect(self, e, connection, cursor): errnos = (2006, 2013, 2014, 2045, 2055, 2048) exceptions = (self.dbapi.OperationalError, self.dbapi.InterfaceError) if isinstance(e, exceptions): return e.errno in errnos or \ "MySQL Connection not available." in str(e) else: return False def _compat_fetchall(self, rp, charset=None): return rp.fetchall() def _compat_fetchone(self, rp, charset=None): return rp.fetchone() dialect = MySQLDialect_mysqlconnector SQLAlchemy-0.8.4/lib/sqlalchemy/dialects/mysql/mysqldb.py0000644000076500000240000000511512251147171024126 0ustar classicstaff00000000000000# mysql/mysqldb.py # Copyright (C) 2005-2013 the SQLAlchemy authors and contributors # # This module is part of SQLAlchemy and is released under # the MIT License: http://www.opensource.org/licenses/mit-license.php """ .. dialect:: mysql+mysqldb :name: MySQL-Python :dbapi: mysqldb :connectstring: mysql+mysqldb://:@[:]/ :url: http://sourceforge.net/projects/mysql-python Unicode ------- MySQLdb will accommodate Python ``unicode`` objects if the ``use_unicode=1`` parameter, or the ``charset`` parameter, is passed as a connection argument. Without this setting, many MySQL server installations default to a ``latin1`` encoding for client connections, which has the effect of all data being converted into ``latin1``, even if you have ``utf8`` or another character set configured on your tables and columns. With versions 4.1 and higher, you can change the connection character set either through server configuration or by including the ``charset`` parameter. The ``charset`` parameter as received by MySQL-Python also has the side-effect of enabling ``use_unicode=1``:: # set client encoding to utf8; all strings come back as unicode create_engine('mysql+mysqldb:///mydb?charset=utf8') Manually configuring ``use_unicode=0`` will cause MySQL-python to return encoded strings:: # set client encoding to utf8; all strings come back as utf8 str create_engine('mysql+mysqldb:///mydb?charset=utf8&use_unicode=0') Known Issues ------------- MySQL-python version 1.2.2 has a serious memory leak related to unicode conversion, a feature which is disabled via ``use_unicode=0``. It is strongly advised to use the latest version of MySQL-Python. """ from .base import (MySQLDialect, MySQLExecutionContext, MySQLCompiler, MySQLIdentifierPreparer) from ...connectors.mysqldb import ( MySQLDBExecutionContext, MySQLDBCompiler, MySQLDBIdentifierPreparer, MySQLDBConnector ) class MySQLExecutionContext_mysqldb(MySQLDBExecutionContext, MySQLExecutionContext): pass class MySQLCompiler_mysqldb(MySQLDBCompiler, MySQLCompiler): pass class MySQLIdentifierPreparer_mysqldb(MySQLDBIdentifierPreparer, MySQLIdentifierPreparer): pass class MySQLDialect_mysqldb(MySQLDBConnector, MySQLDialect): execution_ctx_cls = MySQLExecutionContext_mysqldb statement_compiler = MySQLCompiler_mysqldb preparer = MySQLIdentifierPreparer_mysqldb dialect = MySQLDialect_mysqldb SQLAlchemy-0.8.4/lib/sqlalchemy/dialects/mysql/oursql.py0000644000076500000240000002073012251150015023767 0ustar classicstaff00000000000000# mysql/oursql.py # Copyright (C) 2005-2013 the SQLAlchemy authors and contributors # # This module is part of SQLAlchemy and is released under # the MIT License: http://www.opensource.org/licenses/mit-license.php """ .. dialect:: mysql+oursql :name: OurSQL :dbapi: oursql :connectstring: mysql+oursql://:@[:]/ :url: http://packages.python.org/oursql/ Unicode ------- oursql defaults to using ``utf8`` as the connection charset, but other encodings may be used instead. Like the MySQL-Python driver, unicode support can be completely disabled:: # oursql sets the connection charset to utf8 automatically; all strings come # back as utf8 str create_engine('mysql+oursql:///mydb?use_unicode=0') To not automatically use ``utf8`` and instead use whatever the connection defaults to, there is a separate parameter:: # use the default connection charset; all strings come back as unicode create_engine('mysql+oursql:///mydb?default_charset=1') # use latin1 as the connection charset; all strings come back as unicode create_engine('mysql+oursql:///mydb?charset=latin1') """ import re from .base import (BIT, MySQLDialect, MySQLExecutionContext) from ... import types as sqltypes, util class _oursqlBIT(BIT): def result_processor(self, dialect, coltype): """oursql already converts mysql bits, so.""" return None class MySQLExecutionContext_oursql(MySQLExecutionContext): @property def plain_query(self): return self.execution_options.get('_oursql_plain_query', False) class MySQLDialect_oursql(MySQLDialect): driver = 'oursql' # Py2K supports_unicode_binds = True supports_unicode_statements = True # end Py2K supports_native_decimal = True supports_sane_rowcount = True supports_sane_multi_rowcount = True execution_ctx_cls = MySQLExecutionContext_oursql colspecs = util.update_copy( MySQLDialect.colspecs, { sqltypes.Time: sqltypes.Time, BIT: _oursqlBIT, } ) @classmethod def dbapi(cls): return __import__('oursql') def do_execute(self, cursor, statement, parameters, context=None): """Provide an implementation of *cursor.execute(statement, parameters)*.""" if context and context.plain_query: cursor.execute(statement, plain_query=True) else: cursor.execute(statement, parameters) def do_begin(self, connection): connection.cursor().execute('BEGIN', plain_query=True) def _xa_query(self, connection, query, xid): # Py2K arg = connection.connection._escape_string(xid) # end Py2K # Py3K # charset = self._connection_charset # arg = connection.connection._escape_string(xid.encode(charset)).decode(charset) arg = "'%s'" % arg connection.execution_options(_oursql_plain_query=True).execute(query % arg) # Because mysql is bad, these methods have to be # reimplemented to use _PlainQuery. Basically, some queries # refuse to return any data if they're run through # the parameterized query API, or refuse to be parameterized # in the first place. def do_begin_twophase(self, connection, xid): self._xa_query(connection, 'XA BEGIN %s', xid) def do_prepare_twophase(self, connection, xid): self._xa_query(connection, 'XA END %s', xid) self._xa_query(connection, 'XA PREPARE %s', xid) def do_rollback_twophase(self, connection, xid, is_prepared=True, recover=False): if not is_prepared: self._xa_query(connection, 'XA END %s', xid) self._xa_query(connection, 'XA ROLLBACK %s', xid) def do_commit_twophase(self, connection, xid, is_prepared=True, recover=False): if not is_prepared: self.do_prepare_twophase(connection, xid) self._xa_query(connection, 'XA COMMIT %s', xid) # Q: why didn't we need all these "plain_query" overrides earlier ? # am i on a newer/older version of OurSQL ? def has_table(self, connection, table_name, schema=None): return MySQLDialect.has_table( self, connection.connect().execution_options(_oursql_plain_query=True), table_name, schema ) def get_table_options(self, connection, table_name, schema=None, **kw): return MySQLDialect.get_table_options( self, connection.connect().execution_options(_oursql_plain_query=True), table_name, schema=schema, **kw ) def get_columns(self, connection, table_name, schema=None, **kw): return MySQLDialect.get_columns( self, connection.connect().execution_options(_oursql_plain_query=True), table_name, schema=schema, **kw ) def get_view_names(self, connection, schema=None, **kw): return MySQLDialect.get_view_names( self, connection.connect().execution_options(_oursql_plain_query=True), schema=schema, **kw ) def get_table_names(self, connection, schema=None, **kw): return MySQLDialect.get_table_names( self, connection.connect().execution_options(_oursql_plain_query=True), schema ) def get_schema_names(self, connection, **kw): return MySQLDialect.get_schema_names( self, connection.connect().execution_options(_oursql_plain_query=True), **kw ) def initialize(self, connection): return MySQLDialect.initialize( self, connection.execution_options(_oursql_plain_query=True) ) def _show_create_table(self, connection, table, charset=None, full_name=None): return MySQLDialect._show_create_table( self, connection.contextual_connect(close_with_result=True). execution_options(_oursql_plain_query=True), table, charset, full_name ) def is_disconnect(self, e, connection, cursor): if isinstance(e, self.dbapi.ProgrammingError): return e.errno is None and 'cursor' not in e.args[1] and e.args[1].endswith('closed') else: return e.errno in (2006, 2013, 2014, 2045, 2055) def create_connect_args(self, url): opts = url.translate_connect_args(database='db', username='user', password='passwd') opts.update(url.query) util.coerce_kw_type(opts, 'port', int) util.coerce_kw_type(opts, 'compress', bool) util.coerce_kw_type(opts, 'autoping', bool) util.coerce_kw_type(opts, 'raise_on_warnings', bool) util.coerce_kw_type(opts, 'default_charset', bool) if opts.pop('default_charset', False): opts['charset'] = None else: util.coerce_kw_type(opts, 'charset', str) opts['use_unicode'] = opts.get('use_unicode', True) util.coerce_kw_type(opts, 'use_unicode', bool) # FOUND_ROWS must be set in CLIENT_FLAGS to enable # supports_sane_rowcount. opts.setdefault('found_rows', True) ssl = {} for key in ['ssl_ca', 'ssl_key', 'ssl_cert', 'ssl_capath', 'ssl_cipher']: if key in opts: ssl[key[4:]] = opts[key] util.coerce_kw_type(ssl, key[4:], str) del opts[key] if ssl: opts['ssl'] = ssl return [[], opts] def _get_server_version_info(self, connection): dbapi_con = connection.connection version = [] r = re.compile('[.\-]') for n in r.split(dbapi_con.server_info): try: version.append(int(n)) except ValueError: version.append(n) return tuple(version) def _extract_error_code(self, exception): return exception.errno def _detect_charset(self, connection): """Sniff out the character set in use for connection results.""" return connection.connection.charset def _compat_fetchall(self, rp, charset=None): """oursql isn't super-broken like MySQLdb, yaaay.""" return rp.fetchall() def _compat_fetchone(self, rp, charset=None): """oursql isn't super-broken like MySQLdb, yaaay.""" return rp.fetchone() def _compat_first(self, rp, charset=None): return rp.first() dialect = MySQLDialect_oursql SQLAlchemy-0.8.4/lib/sqlalchemy/dialects/mysql/pymysql.py0000644000076500000240000000231312251147171024166 0ustar classicstaff00000000000000# mysql/pymysql.py # Copyright (C) 2005-2013 the SQLAlchemy authors and contributors # # This module is part of SQLAlchemy and is released under # the MIT License: http://www.opensource.org/licenses/mit-license.php """ .. dialect:: mysql+pymysql :name: PyMySQL :dbapi: pymysql :connectstring: mysql+pymysql://:@/[?] :url: http://code.google.com/p/pymysql/ MySQL-Python Compatibility -------------------------- The pymysql DBAPI is a pure Python port of the MySQL-python (MySQLdb) driver, and targets 100% compatibility. Most behavioral notes for MySQL-python apply to the pymysql driver as well. """ from .mysqldb import MySQLDialect_mysqldb from ...util import py3k class MySQLDialect_pymysql(MySQLDialect_mysqldb): driver = 'pymysql' description_encoding = None if py3k: supports_unicode_statements = True @classmethod def dbapi(cls): return __import__('pymysql') if py3k: def _extract_error_code(self, exception): if isinstance(exception.args[0], Exception): exception = exception.args[0] return exception.args[0] dialect = MySQLDialect_pymysql SQLAlchemy-0.8.4/lib/sqlalchemy/dialects/mysql/pyodbc.py0000644000076500000240000000507112251147171023734 0ustar classicstaff00000000000000# mysql/pyodbc.py # Copyright (C) 2005-2013 the SQLAlchemy authors and contributors # # This module is part of SQLAlchemy and is released under # the MIT License: http://www.opensource.org/licenses/mit-license.php """ .. dialect:: mysql+pyodbc :name: PyODBC :dbapi: pyodbc :connectstring: mysql+pyodbc://:@ :url: http://pypi.python.org/pypi/pyodbc/ Limitations ----------- The mysql-pyodbc dialect is subject to unresolved character encoding issues which exist within the current ODBC drivers available. (see http://code.google.com/p/pyodbc/issues/detail?id=25). Consider usage of OurSQL, MySQLdb, or MySQL-connector/Python. """ from .base import MySQLDialect, MySQLExecutionContext from ...connectors.pyodbc import PyODBCConnector from ... import util import re class MySQLExecutionContext_pyodbc(MySQLExecutionContext): def get_lastrowid(self): cursor = self.create_cursor() cursor.execute("SELECT LAST_INSERT_ID()") lastrowid = cursor.fetchone()[0] cursor.close() return lastrowid class MySQLDialect_pyodbc(PyODBCConnector, MySQLDialect): supports_unicode_statements = False execution_ctx_cls = MySQLExecutionContext_pyodbc pyodbc_driver_name = "MySQL" def __init__(self, **kw): # deal with http://code.google.com/p/pyodbc/issues/detail?id=25 kw.setdefault('convert_unicode', True) super(MySQLDialect_pyodbc, self).__init__(**kw) def _detect_charset(self, connection): """Sniff out the character set in use for connection results.""" # Prefer 'character_set_results' for the current connection over the # value in the driver. SET NAMES or individual variable SETs will # change the charset without updating the driver's view of the world. # # If it's decided that issuing that sort of SQL leaves you SOL, then # this can prefer the driver value. rs = connection.execute("SHOW VARIABLES LIKE 'character_set%%'") opts = dict([(row[0], row[1]) for row in self._compat_fetchall(rs)]) for key in ('character_set_connection', 'character_set'): if opts.get(key, None): return opts[key] util.warn("Could not detect the connection character set. Assuming latin1.") return 'latin1' def _extract_error_code(self, exception): m = re.compile(r"\((\d+)\)").search(str(exception.args)) c = m.group(1) if c: return int(c) else: return None dialect = MySQLDialect_pyodbc SQLAlchemy-0.8.4/lib/sqlalchemy/dialects/mysql/zxjdbc.py0000644000076500000240000000730212251150015023726 0ustar classicstaff00000000000000# mysql/zxjdbc.py # Copyright (C) 2005-2013 the SQLAlchemy authors and contributors # # This module is part of SQLAlchemy and is released under # the MIT License: http://www.opensource.org/licenses/mit-license.php """ .. dialect:: mysql+zxjdbc :name: zxjdbc for Jython :dbapi: zxjdbc :connectstring: mysql+zxjdbc://:@[:]/ :driverurl: http://dev.mysql.com/downloads/connector/j/ Character Sets -------------- SQLAlchemy zxjdbc dialects pass unicode straight through to the zxjdbc/JDBC layer. To allow multiple character sets to be sent from the MySQL Connector/J JDBC driver, by default SQLAlchemy sets its ``characterEncoding`` connection property to ``UTF-8``. It may be overriden via a ``create_engine`` URL parameter. """ import re from ... import types as sqltypes, util from ...connectors.zxJDBC import ZxJDBCConnector from .base import BIT, MySQLDialect, MySQLExecutionContext class _ZxJDBCBit(BIT): def result_processor(self, dialect, coltype): """Converts boolean or byte arrays from MySQL Connector/J to longs.""" def process(value): if value is None: return value if isinstance(value, bool): return int(value) v = 0L for i in value: v = v << 8 | (i & 0xff) value = v return value return process class MySQLExecutionContext_zxjdbc(MySQLExecutionContext): def get_lastrowid(self): cursor = self.create_cursor() cursor.execute("SELECT LAST_INSERT_ID()") lastrowid = cursor.fetchone()[0] cursor.close() return lastrowid class MySQLDialect_zxjdbc(ZxJDBCConnector, MySQLDialect): jdbc_db_name = 'mysql' jdbc_driver_name = 'com.mysql.jdbc.Driver' execution_ctx_cls = MySQLExecutionContext_zxjdbc colspecs = util.update_copy( MySQLDialect.colspecs, { sqltypes.Time: sqltypes.Time, BIT: _ZxJDBCBit } ) def _detect_charset(self, connection): """Sniff out the character set in use for connection results.""" # Prefer 'character_set_results' for the current connection over the # value in the driver. SET NAMES or individual variable SETs will # change the charset without updating the driver's view of the world. # # If it's decided that issuing that sort of SQL leaves you SOL, then # this can prefer the driver value. rs = connection.execute("SHOW VARIABLES LIKE 'character_set%%'") opts = dict((row[0], row[1]) for row in self._compat_fetchall(rs)) for key in ('character_set_connection', 'character_set'): if opts.get(key, None): return opts[key] util.warn("Could not detect the connection character set. Assuming latin1.") return 'latin1' def _driver_kwargs(self): """return kw arg dict to be sent to connect().""" return dict(characterEncoding='UTF-8', yearIsDateType='false') def _extract_error_code(self, exception): # e.g.: DBAPIError: (Error) Table 'test.u2' doesn't exist # [SQLCode: 1146], [SQLState: 42S02] 'DESCRIBE `u2`' () m = re.compile(r"\[SQLCode\: (\d+)\]").search(str(exception.args)) c = m.group(1) if c: return int(c) def _get_server_version_info(self, connection): dbapi_con = connection.connection version = [] r = re.compile('[.\-]') for n in r.split(dbapi_con.dbversion): try: version.append(int(n)) except ValueError: version.append(n) return tuple(version) dialect = MySQLDialect_zxjdbc SQLAlchemy-0.8.4/lib/sqlalchemy/dialects/oracle/0000755000076500000240000000000012251151573022200 5ustar classicstaff00000000000000SQLAlchemy-0.8.4/lib/sqlalchemy/dialects/oracle/__init__.py0000644000076500000240000000144112251147171024310 0ustar classicstaff00000000000000# oracle/__init__.py # Copyright (C) 2005-2013 the SQLAlchemy authors and contributors # # This module is part of SQLAlchemy and is released under # the MIT License: http://www.opensource.org/licenses/mit-license.php from sqlalchemy.dialects.oracle import base, cx_oracle, zxjdbc base.dialect = cx_oracle.dialect from sqlalchemy.dialects.oracle.base import \ VARCHAR, NVARCHAR, CHAR, DATE, DATETIME, NUMBER,\ BLOB, BFILE, CLOB, NCLOB, TIMESTAMP, RAW,\ FLOAT, DOUBLE_PRECISION, LONG, dialect, INTERVAL,\ VARCHAR2, NVARCHAR2, ROWID, dialect __all__ = ( 'VARCHAR', 'NVARCHAR', 'CHAR', 'DATE', 'DATETIME', 'NUMBER', 'BLOB', 'BFILE', 'CLOB', 'NCLOB', 'TIMESTAMP', 'RAW', 'FLOAT', 'DOUBLE_PRECISION', 'LONG', 'dialect', 'INTERVAL', 'VARCHAR2', 'NVARCHAR2', 'ROWID' ) SQLAlchemy-0.8.4/lib/sqlalchemy/dialects/oracle/base.py0000644000076500000240000013057112251150015023461 0ustar classicstaff00000000000000# oracle/base.py # Copyright (C) 2005-2013 the SQLAlchemy authors and contributors # # This module is part of SQLAlchemy and is released under # the MIT License: http://www.opensource.org/licenses/mit-license.php """ .. dialect:: oracle :name: Oracle Oracle version 8 through current (11g at the time of this writing) are supported. Connect Arguments ----------------- The dialect supports several :func:`~sqlalchemy.create_engine()` arguments which affect the behavior of the dialect regardless of driver in use. * *use_ansi* - Use ANSI JOIN constructs (see the section on Oracle 8). Defaults to ``True``. If ``False``, Oracle-8 compatible constructs are used for joins. * *optimize_limits* - defaults to ``False``. see the section on LIMIT/OFFSET. * *use_binds_for_limits* - defaults to ``True``. see the section on LIMIT/OFFSET. Auto Increment Behavior ----------------------- SQLAlchemy Table objects which include integer primary keys are usually assumed to have "autoincrementing" behavior, meaning they can generate their own primary key values upon INSERT. Since Oracle has no "autoincrement" feature, SQLAlchemy relies upon sequences to produce these values. With the Oracle dialect, *a sequence must always be explicitly specified to enable autoincrement*. This is divergent with the majority of documentation examples which assume the usage of an autoincrement-capable database. To specify sequences, use the sqlalchemy.schema.Sequence object which is passed to a Column construct:: t = Table('mytable', metadata, Column('id', Integer, Sequence('id_seq'), primary_key=True), Column(...), ... ) This step is also required when using table reflection, i.e. autoload=True:: t = Table('mytable', metadata, Column('id', Integer, Sequence('id_seq'), primary_key=True), autoload=True ) Identifier Casing ----------------- In Oracle, the data dictionary represents all case insensitive identifier names using UPPERCASE text. SQLAlchemy on the other hand considers an all-lower case identifier name to be case insensitive. The Oracle dialect converts all case insensitive identifiers to and from those two formats during schema level communication, such as reflection of tables and indexes. Using an UPPERCASE name on the SQLAlchemy side indicates a case sensitive identifier, and SQLAlchemy will quote the name - this will cause mismatches against data dictionary data received from Oracle, so unless identifier names have been truly created as case sensitive (i.e. using quoted names), all lowercase names should be used on the SQLAlchemy side. Unicode ------- .. versionchanged:: 0.6 SQLAlchemy uses the "native unicode" mode provided as of cx_oracle 5. cx_oracle 5.0.2 or greater is recommended for support of NCLOB. If not using cx_oracle 5, the NLS_LANG environment variable needs to be set in order for the oracle client library to use proper encoding, such as "AMERICAN_AMERICA.UTF8". Also note that Oracle supports unicode data through the NVARCHAR and NCLOB data types. When using the SQLAlchemy Unicode and UnicodeText types, these DDL types will be used within CREATE TABLE statements. Usage of VARCHAR2 and CLOB with unicode text still requires NLS_LANG to be set. LIMIT/OFFSET Support -------------------- Oracle has no support for the LIMIT or OFFSET keywords. SQLAlchemy uses a wrapped subquery approach in conjunction with ROWNUM. The exact methodology is taken from http://www.oracle.com/technology/oramag/oracle/06-sep/o56asktom.html . There are two options which affect its behavior: * the "FIRST ROWS()" optimization keyword is not used by default. To enable the usage of this optimization directive, specify ``optimize_limits=True`` to :func:`.create_engine`. * the values passed for the limit/offset are sent as bound parameters. Some users have observed that Oracle produces a poor query plan when the values are sent as binds and not rendered literally. To render the limit/offset values literally within the SQL statement, specify ``use_binds_for_limits=False`` to :func:`.create_engine`. Some users have reported better performance when the entirely different approach of a window query is used, i.e. ROW_NUMBER() OVER (ORDER BY), to provide LIMIT/OFFSET (note that the majority of users don't observe this). To suit this case the method used for LIMIT/OFFSET can be replaced entirely. See the recipe at http://www.sqlalchemy.org/trac/wiki/UsageRecipes/WindowFunctionsByDefault which installs a select compiler that overrides the generation of limit/offset with a window function. ON UPDATE CASCADE ----------------- Oracle doesn't have native ON UPDATE CASCADE functionality. A trigger based solution is available at http://asktom.oracle.com/tkyte/update_cascade/index.html . When using the SQLAlchemy ORM, the ORM has limited ability to manually issue cascading updates - specify ForeignKey objects using the "deferrable=True, initially='deferred'" keyword arguments, and specify "passive_updates=False" on each relationship(). Oracle 8 Compatibility ---------------------- When Oracle 8 is detected, the dialect internally configures itself to the following behaviors: * the use_ansi flag is set to False. This has the effect of converting all JOIN phrases into the WHERE clause, and in the case of LEFT OUTER JOIN makes use of Oracle's (+) operator. * the NVARCHAR2 and NCLOB datatypes are no longer generated as DDL when the :class:`~sqlalchemy.types.Unicode` is used - VARCHAR2 and CLOB are issued instead. This because these types don't seem to work correctly on Oracle 8 even though they are available. The :class:`~sqlalchemy.types.NVARCHAR` and :class:`~sqlalchemy.dialects.oracle.NCLOB` types will always generate NVARCHAR2 and NCLOB. * the "native unicode" mode is disabled when using cx_oracle, i.e. SQLAlchemy encodes all Python unicode objects to "string" before passing in as bind parameters. Synonym/DBLINK Reflection ------------------------- When using reflection with Table objects, the dialect can optionally search for tables indicated by synonyms, either in local or remote schemas or accessed over DBLINK, by passing the flag oracle_resolve_synonyms=True as a keyword argument to the Table construct. If synonyms are not in use this flag should be left off. """ import re from sqlalchemy import util, sql from sqlalchemy.engine import default, base, reflection from sqlalchemy.sql import compiler, visitors, expression from sqlalchemy.sql import operators as sql_operators, functions as sql_functions from sqlalchemy import types as sqltypes from sqlalchemy.types import VARCHAR, NVARCHAR, CHAR, DATE, DATETIME, \ BLOB, CLOB, TIMESTAMP, FLOAT RESERVED_WORDS = \ set('SHARE RAW DROP BETWEEN FROM DESC OPTION PRIOR LONG THEN '\ 'DEFAULT ALTER IS INTO MINUS INTEGER NUMBER GRANT IDENTIFIED '\ 'ALL TO ORDER ON FLOAT DATE HAVING CLUSTER NOWAIT RESOURCE '\ 'ANY TABLE INDEX FOR UPDATE WHERE CHECK SMALLINT WITH DELETE '\ 'BY ASC REVOKE LIKE SIZE RENAME NOCOMPRESS NULL GROUP VALUES '\ 'AS IN VIEW EXCLUSIVE COMPRESS SYNONYM SELECT INSERT EXISTS '\ 'NOT TRIGGER ELSE CREATE INTERSECT PCTFREE DISTINCT USER '\ 'CONNECT SET MODE OF UNIQUE VARCHAR2 VARCHAR LOCK OR CHAR '\ 'DECIMAL UNION PUBLIC AND START UID COMMENT CURRENT LEVEL'.split()) NO_ARG_FNS = set('UID CURRENT_DATE SYSDATE USER ' 'CURRENT_TIME CURRENT_TIMESTAMP'.split()) class RAW(sqltypes._Binary): __visit_name__ = 'RAW' OracleRaw = RAW class NCLOB(sqltypes.Text): __visit_name__ = 'NCLOB' class VARCHAR2(VARCHAR): __visit_name__ = 'VARCHAR2' NVARCHAR2 = NVARCHAR class NUMBER(sqltypes.Numeric, sqltypes.Integer): __visit_name__ = 'NUMBER' def __init__(self, precision=None, scale=None, asdecimal=None): if asdecimal is None: asdecimal = bool(scale and scale > 0) super(NUMBER, self).__init__(precision=precision, scale=scale, asdecimal=asdecimal) def adapt(self, impltype): ret = super(NUMBER, self).adapt(impltype) # leave a hint for the DBAPI handler ret._is_oracle_number = True return ret @property def _type_affinity(self): if bool(self.scale and self.scale > 0): return sqltypes.Numeric else: return sqltypes.Integer class DOUBLE_PRECISION(sqltypes.Numeric): __visit_name__ = 'DOUBLE_PRECISION' def __init__(self, precision=None, scale=None, asdecimal=None): if asdecimal is None: asdecimal = False super(DOUBLE_PRECISION, self).__init__(precision=precision, scale=scale, asdecimal=asdecimal) class BFILE(sqltypes.LargeBinary): __visit_name__ = 'BFILE' class LONG(sqltypes.Text): __visit_name__ = 'LONG' class INTERVAL(sqltypes.TypeEngine): __visit_name__ = 'INTERVAL' def __init__(self, day_precision=None, second_precision=None): """Construct an INTERVAL. Note that only DAY TO SECOND intervals are currently supported. This is due to a lack of support for YEAR TO MONTH intervals within available DBAPIs (cx_oracle and zxjdbc). :param day_precision: the day precision value. this is the number of digits to store for the day field. Defaults to "2" :param second_precision: the second precision value. this is the number of digits to store for the fractional seconds field. Defaults to "6". """ self.day_precision = day_precision self.second_precision = second_precision @classmethod def _adapt_from_generic_interval(cls, interval): return INTERVAL(day_precision=interval.day_precision, second_precision=interval.second_precision) @property def _type_affinity(self): return sqltypes.Interval class ROWID(sqltypes.TypeEngine): """Oracle ROWID type. When used in a cast() or similar, generates ROWID. """ __visit_name__ = 'ROWID' class _OracleBoolean(sqltypes.Boolean): def get_dbapi_type(self, dbapi): return dbapi.NUMBER colspecs = { sqltypes.Boolean: _OracleBoolean, sqltypes.Interval: INTERVAL, } ischema_names = { 'VARCHAR2': VARCHAR, 'NVARCHAR2': NVARCHAR, 'CHAR': CHAR, 'DATE': DATE, 'NUMBER': NUMBER, 'BLOB': BLOB, 'BFILE': BFILE, 'CLOB': CLOB, 'NCLOB': NCLOB, 'TIMESTAMP': TIMESTAMP, 'TIMESTAMP WITH TIME ZONE': TIMESTAMP, 'INTERVAL DAY TO SECOND': INTERVAL, 'RAW': RAW, 'FLOAT': FLOAT, 'DOUBLE PRECISION': DOUBLE_PRECISION, 'LONG': LONG, } class OracleTypeCompiler(compiler.GenericTypeCompiler): # Note: # Oracle DATE == DATETIME # Oracle does not allow milliseconds in DATE # Oracle does not support TIME columns def visit_datetime(self, type_): return self.visit_DATE(type_) def visit_float(self, type_): return self.visit_FLOAT(type_) def visit_unicode(self, type_): if self.dialect._supports_nchar: return self.visit_NVARCHAR2(type_) else: return self.visit_VARCHAR2(type_) def visit_INTERVAL(self, type_): return "INTERVAL DAY%s TO SECOND%s" % ( type_.day_precision is not None and "(%d)" % type_.day_precision or "", type_.second_precision is not None and "(%d)" % type_.second_precision or "", ) def visit_LONG(self, type_): return "LONG" def visit_TIMESTAMP(self, type_): if type_.timezone: return "TIMESTAMP WITH TIME ZONE" else: return "TIMESTAMP" def visit_DOUBLE_PRECISION(self, type_): return self._generate_numeric(type_, "DOUBLE PRECISION") def visit_NUMBER(self, type_, **kw): return self._generate_numeric(type_, "NUMBER", **kw) def _generate_numeric(self, type_, name, precision=None, scale=None): if precision is None: precision = type_.precision if scale is None: scale = getattr(type_, 'scale', None) if precision is None: return name elif scale is None: n = "%(name)s(%(precision)s)" return n % {'name': name, 'precision': precision} else: n = "%(name)s(%(precision)s, %(scale)s)" return n % {'name': name, 'precision': precision, 'scale': scale} def visit_string(self, type_): return self.visit_VARCHAR2(type_) def visit_VARCHAR2(self, type_): return self._visit_varchar(type_, '', '2') def visit_NVARCHAR2(self, type_): return self._visit_varchar(type_, 'N', '2') visit_NVARCHAR = visit_NVARCHAR2 def visit_VARCHAR(self, type_): return self._visit_varchar(type_, '', '') def _visit_varchar(self, type_, n, num): if not type_.length: return "%(n)sVARCHAR%(two)s" % {'two': num, 'n': n} elif not n and self.dialect._supports_char_length: varchar = "VARCHAR%(two)s(%(length)s CHAR)" return varchar % {'length': type_.length, 'two': num} else: varchar = "%(n)sVARCHAR%(two)s(%(length)s)" return varchar % {'length': type_.length, 'two': num, 'n': n} def visit_text(self, type_): return self.visit_CLOB(type_) def visit_unicode_text(self, type_): if self.dialect._supports_nchar: return self.visit_NCLOB(type_) else: return self.visit_CLOB(type_) def visit_large_binary(self, type_): return self.visit_BLOB(type_) def visit_big_integer(self, type_): return self.visit_NUMBER(type_, precision=19) def visit_boolean(self, type_): return self.visit_SMALLINT(type_) def visit_RAW(self, type_): if type_.length: return "RAW(%(length)s)" % {'length': type_.length} else: return "RAW" def visit_ROWID(self, type_): return "ROWID" class OracleCompiler(compiler.SQLCompiler): """Oracle compiler modifies the lexical structure of Select statements to work under non-ANSI configured Oracle databases, if the use_ansi flag is False. """ compound_keywords = util.update_copy( compiler.SQLCompiler.compound_keywords, { expression.CompoundSelect.EXCEPT: 'MINUS' } ) def __init__(self, *args, **kwargs): self.__wheres = {} self._quoted_bind_names = {} super(OracleCompiler, self).__init__(*args, **kwargs) def visit_mod_binary(self, binary, operator, **kw): return "mod(%s, %s)" % (self.process(binary.left, **kw), self.process(binary.right, **kw)) def visit_now_func(self, fn, **kw): return "CURRENT_TIMESTAMP" def visit_char_length_func(self, fn, **kw): return "LENGTH" + self.function_argspec(fn, **kw) def visit_match_op_binary(self, binary, operator, **kw): return "CONTAINS (%s, %s)" % (self.process(binary.left), self.process(binary.right)) def visit_true(self, expr, **kw): return '1' def visit_false(self, expr, **kw): return '0' def get_select_hint_text(self, byfroms): return " ".join( "/*+ %s */" % text for table, text in byfroms.items() ) def function_argspec(self, fn, **kw): if len(fn.clauses) > 0 or fn.name.upper() not in NO_ARG_FNS: return compiler.SQLCompiler.function_argspec(self, fn, **kw) else: return "" def default_from(self): """Called when a ``SELECT`` statement has no froms, and no ``FROM`` clause is to be appended. The Oracle compiler tacks a "FROM DUAL" to the statement. """ return " FROM DUAL" def visit_join(self, join, **kwargs): if self.dialect.use_ansi: return compiler.SQLCompiler.visit_join(self, join, **kwargs) else: kwargs['asfrom'] = True return self.process(join.left, **kwargs) + \ ", " + self.process(join.right, **kwargs) def _get_nonansi_join_whereclause(self, froms): clauses = [] def visit_join(join): if join.isouter: def visit_binary(binary): if binary.operator == sql_operators.eq: if binary.left.table is join.right: binary.left = _OuterJoinColumn(binary.left) elif binary.right.table is join.right: binary.right = _OuterJoinColumn(binary.right) clauses.append(visitors.cloned_traverse(join.onclause, {}, {'binary': visit_binary})) else: clauses.append(join.onclause) for j in join.left, join.right: if isinstance(j, expression.Join): visit_join(j) for f in froms: if isinstance(f, expression.Join): visit_join(f) if not clauses: return None else: return sql.and_(*clauses) def visit_outer_join_column(self, vc): return self.process(vc.column) + "(+)" def visit_sequence(self, seq): return self.dialect.identifier_preparer.format_sequence(seq) + ".nextval" def visit_alias(self, alias, asfrom=False, ashint=False, **kwargs): """Oracle doesn't like ``FROM table AS alias``. Is the AS standard SQL??""" if asfrom or ashint: alias_name = isinstance(alias.name, expression._truncated_label) and \ self._truncated_identifier("alias", alias.name) or alias.name if ashint: return alias_name elif asfrom: return self.process(alias.original, asfrom=asfrom, **kwargs) + \ " " + self.preparer.format_alias(alias, alias_name) else: return self.process(alias.original, **kwargs) def returning_clause(self, stmt, returning_cols): columns = [] binds = [] for i, column in enumerate(expression._select_iterables(returning_cols)): if column.type._has_column_expression: col_expr = column.type.column_expression(column) else: col_expr = column outparam = sql.outparam("ret_%d" % i, type_=column.type) self.binds[outparam.key] = outparam binds.append(self.bindparam_string(self._truncate_bindparam(outparam))) columns.append(self.process(col_expr, within_columns_clause=False)) self.result_map[outparam.key] = ( outparam.key, (column, getattr(column, 'name', None), getattr(column, 'key', None)), column.type ) return 'RETURNING ' + ', '.join(columns) + " INTO " + ", ".join(binds) def _TODO_visit_compound_select(self, select): """Need to determine how to get ``LIMIT``/``OFFSET`` into a ``UNION`` for Oracle.""" pass def visit_select(self, select, **kwargs): """Look for ``LIMIT`` and OFFSET in a select statement, and if so tries to wrap it in a subquery with ``rownum`` criterion. """ if not getattr(select, '_oracle_visit', None): if not self.dialect.use_ansi: froms = self._display_froms_for_select( select, kwargs.get('asfrom', False)) whereclause = self._get_nonansi_join_whereclause(froms) if whereclause is not None: select = select.where(whereclause) select._oracle_visit = True if select._limit is not None or select._offset is not None: # See http://www.oracle.com/technology/oramag/oracle/06-sep/o56asktom.html # # Generalized form of an Oracle pagination query: # select ... from ( # select /*+ FIRST_ROWS(N) */ ...., rownum as ora_rn from ( # select distinct ... where ... order by ... # ) where ROWNUM <= :limit+:offset # ) where ora_rn > :offset # Outer select and "ROWNUM as ora_rn" can be dropped if limit=0 # TODO: use annotations instead of clone + attr set ? select = select._generate() select._oracle_visit = True # Wrap the middle select and add the hint limitselect = sql.select([c for c in select.c]) if select._limit and self.dialect.optimize_limits: limitselect = limitselect.prefix_with("/*+ FIRST_ROWS(%d) */" % select._limit) limitselect._oracle_visit = True limitselect._is_wrapper = True # If needed, add the limiting clause if select._limit is not None: max_row = select._limit if select._offset is not None: max_row += select._offset if not self.dialect.use_binds_for_limits: max_row = sql.literal_column("%d" % max_row) limitselect.append_whereclause( sql.literal_column("ROWNUM") <= max_row) # If needed, add the ora_rn, and wrap again with offset. if select._offset is None: limitselect.for_update = select.for_update select = limitselect else: limitselect = limitselect.column( sql.literal_column("ROWNUM").label("ora_rn")) limitselect._oracle_visit = True limitselect._is_wrapper = True offsetselect = sql.select( [c for c in limitselect.c if c.key != 'ora_rn']) offsetselect._oracle_visit = True offsetselect._is_wrapper = True offset_value = select._offset if not self.dialect.use_binds_for_limits: offset_value = sql.literal_column("%d" % offset_value) offsetselect.append_whereclause( sql.literal_column("ora_rn") > offset_value) offsetselect.for_update = select.for_update select = offsetselect kwargs['iswrapper'] = getattr(select, '_is_wrapper', False) return compiler.SQLCompiler.visit_select(self, select, **kwargs) def limit_clause(self, select): return "" def for_update_clause(self, select): if self.is_subquery(): return "" elif select.for_update == "nowait": return " FOR UPDATE NOWAIT" else: return super(OracleCompiler, self).for_update_clause(select) class OracleDDLCompiler(compiler.DDLCompiler): def define_constraint_cascades(self, constraint): text = "" if constraint.ondelete is not None: text += " ON DELETE %s" % constraint.ondelete # oracle has no ON UPDATE CASCADE - # its only available via triggers http://asktom.oracle.com/tkyte/update_cascade/index.html if constraint.onupdate is not None: util.warn( "Oracle does not contain native UPDATE CASCADE " "functionality - onupdates will not be rendered for foreign keys. " "Consider using deferrable=True, initially='deferred' or triggers.") return text def visit_create_index(self, create, **kw): return super(OracleDDLCompiler, self).\ visit_create_index(create, include_schema=True) class OracleIdentifierPreparer(compiler.IdentifierPreparer): reserved_words = set([x.lower() for x in RESERVED_WORDS]) illegal_initial_characters = set(xrange(0, 10)).union(["_", "$"]) def _bindparam_requires_quotes(self, value): """Return True if the given identifier requires quoting.""" lc_value = value.lower() return (lc_value in self.reserved_words or value[0] in self.illegal_initial_characters or not self.legal_characters.match(unicode(value)) ) def format_savepoint(self, savepoint): name = re.sub(r'^_+', '', savepoint.ident) return super(OracleIdentifierPreparer, self).format_savepoint(savepoint, name) class OracleExecutionContext(default.DefaultExecutionContext): def fire_sequence(self, seq, type_): return self._execute_scalar("SELECT " + self.dialect.identifier_preparer.format_sequence(seq) + ".nextval FROM DUAL", type_) class OracleDialect(default.DefaultDialect): name = 'oracle' supports_alter = True supports_unicode_statements = False supports_unicode_binds = False max_identifier_length = 30 supports_sane_rowcount = True supports_sane_multi_rowcount = False supports_sequences = True sequences_optional = False postfetch_lastrowid = False default_paramstyle = 'named' colspecs = colspecs ischema_names = ischema_names requires_name_normalize = True supports_default_values = False supports_empty_insert = False statement_compiler = OracleCompiler ddl_compiler = OracleDDLCompiler type_compiler = OracleTypeCompiler preparer = OracleIdentifierPreparer execution_ctx_cls = OracleExecutionContext reflection_options = ('oracle_resolve_synonyms', ) def __init__(self, use_ansi=True, optimize_limits=False, use_binds_for_limits=True, **kwargs): default.DefaultDialect.__init__(self, **kwargs) self.use_ansi = use_ansi self.optimize_limits = optimize_limits self.use_binds_for_limits = use_binds_for_limits def initialize(self, connection): super(OracleDialect, self).initialize(connection) self.implicit_returning = self.__dict__.get( 'implicit_returning', self.server_version_info > (10, ) ) if self._is_oracle_8: self.colspecs = self.colspecs.copy() self.colspecs.pop(sqltypes.Interval) self.use_ansi = False @property def _is_oracle_8(self): return self.server_version_info and \ self.server_version_info < (9, ) @property def _supports_char_length(self): return not self._is_oracle_8 @property def _supports_nchar(self): return not self._is_oracle_8 def do_release_savepoint(self, connection, name): # Oracle does not support RELEASE SAVEPOINT pass def has_table(self, connection, table_name, schema=None): if not schema: schema = self.default_schema_name cursor = connection.execute( sql.text("SELECT table_name FROM all_tables " "WHERE table_name = :name AND owner = :schema_name"), name=self.denormalize_name(table_name), schema_name=self.denormalize_name(schema)) return cursor.first() is not None def has_sequence(self, connection, sequence_name, schema=None): if not schema: schema = self.default_schema_name cursor = connection.execute( sql.text("SELECT sequence_name FROM all_sequences " "WHERE sequence_name = :name AND sequence_owner = :schema_name"), name=self.denormalize_name(sequence_name), schema_name=self.denormalize_name(schema)) return cursor.first() is not None def normalize_name(self, name): if name is None: return None # Py2K if isinstance(name, str): name = name.decode(self.encoding) # end Py2K if name.upper() == name and \ not self.identifier_preparer._requires_quotes(name.lower()): return name.lower() else: return name def denormalize_name(self, name): if name is None: return None elif name.lower() == name and not self.identifier_preparer._requires_quotes(name.lower()): name = name.upper() # Py2K if not self.supports_unicode_binds: name = name.encode(self.encoding) else: name = unicode(name) # end Py2K return name def _get_default_schema_name(self, connection): return self.normalize_name(connection.execute(u'SELECT USER FROM DUAL').scalar()) def _resolve_synonym(self, connection, desired_owner=None, desired_synonym=None, desired_table=None): """search for a local synonym matching the given desired owner/name. if desired_owner is None, attempts to locate a distinct owner. returns the actual name, owner, dblink name, and synonym name if found. """ q = "SELECT owner, table_owner, table_name, db_link, "\ "synonym_name FROM all_synonyms WHERE " clauses = [] params = {} if desired_synonym: clauses.append("synonym_name = :synonym_name") params['synonym_name'] = desired_synonym if desired_owner: clauses.append("owner = :desired_owner") params['desired_owner'] = desired_owner if desired_table: clauses.append("table_name = :tname") params['tname'] = desired_table q += " AND ".join(clauses) result = connection.execute(sql.text(q), **params) if desired_owner: row = result.first() if row: return row['table_name'], row['table_owner'], row['db_link'], row['synonym_name'] else: return None, None, None, None else: rows = result.fetchall() if len(rows) > 1: raise AssertionError("There are multiple tables visible to the schema, you must specify owner") elif len(rows) == 1: row = rows[0] return row['table_name'], row['table_owner'], row['db_link'], row['synonym_name'] else: return None, None, None, None @reflection.cache def _prepare_reflection_args(self, connection, table_name, schema=None, resolve_synonyms=False, dblink='', **kw): if resolve_synonyms: actual_name, owner, dblink, synonym = self._resolve_synonym( connection, desired_owner=self.denormalize_name(schema), desired_synonym=self.denormalize_name(table_name) ) else: actual_name, owner, dblink, synonym = None, None, None, None if not actual_name: actual_name = self.denormalize_name(table_name) if dblink: # using user_db_links here since all_db_links appears # to have more restricted permissions. # http://docs.oracle.com/cd/B28359_01/server.111/b28310/ds_admin005.htm # will need to hear from more users if we are doing # the right thing here. See [ticket:2619] owner = connection.scalar( sql.text("SELECT username FROM user_db_links " "WHERE db_link=:link"), link=dblink) dblink = "@" + dblink elif not owner: owner = self.denormalize_name(schema or self.default_schema_name) return (actual_name, owner, dblink or '', synonym) @reflection.cache def get_schema_names(self, connection, **kw): s = "SELECT username FROM all_users ORDER BY username" cursor = connection.execute(s,) return [self.normalize_name(row[0]) for row in cursor] @reflection.cache def get_table_names(self, connection, schema=None, **kw): schema = self.denormalize_name(schema or self.default_schema_name) # note that table_names() isnt loading DBLINKed or synonym'ed tables if schema is None: schema = self.default_schema_name s = sql.text( "SELECT table_name FROM all_tables " "WHERE nvl(tablespace_name, 'no tablespace') NOT IN ('SYSTEM', 'SYSAUX') " "AND OWNER = :owner " "AND IOT_NAME IS NULL") cursor = connection.execute(s, owner=schema) return [self.normalize_name(row[0]) for row in cursor] @reflection.cache def get_view_names(self, connection, schema=None, **kw): schema = self.denormalize_name(schema or self.default_schema_name) s = sql.text("SELECT view_name FROM all_views WHERE owner = :owner") cursor = connection.execute(s, owner=self.denormalize_name(schema)) return [self.normalize_name(row[0]) for row in cursor] @reflection.cache def get_columns(self, connection, table_name, schema=None, **kw): """ kw arguments can be: oracle_resolve_synonyms dblink """ resolve_synonyms = kw.get('oracle_resolve_synonyms', False) dblink = kw.get('dblink', '') info_cache = kw.get('info_cache') (table_name, schema, dblink, synonym) = \ self._prepare_reflection_args(connection, table_name, schema, resolve_synonyms, dblink, info_cache=info_cache) columns = [] if self._supports_char_length: char_length_col = 'char_length' else: char_length_col = 'data_length' params = {"table_name": table_name} text = "SELECT column_name, data_type, %(char_length_col)s, "\ "data_precision, data_scale, "\ "nullable, data_default FROM ALL_TAB_COLUMNS%(dblink)s "\ "WHERE table_name = :table_name" if schema is not None: params['owner'] = schema text += " AND owner = :owner " text += " ORDER BY column_id" text = text % {'dblink': dblink, 'char_length_col': char_length_col} c = connection.execute(sql.text(text), **params) for row in c: (colname, orig_colname, coltype, length, precision, scale, nullable, default) = \ (self.normalize_name(row[0]), row[0], row[1], row[2], row[3], row[4], row[5] == 'Y', row[6]) if coltype == 'NUMBER': coltype = NUMBER(precision, scale) elif coltype in ('VARCHAR2', 'NVARCHAR2', 'CHAR'): coltype = self.ischema_names.get(coltype)(length) elif 'WITH TIME ZONE' in coltype: coltype = TIMESTAMP(timezone=True) else: coltype = re.sub(r'\(\d+\)', '', coltype) try: coltype = self.ischema_names[coltype] except KeyError: util.warn("Did not recognize type '%s' of column '%s'" % (coltype, colname)) coltype = sqltypes.NULLTYPE cdict = { 'name': colname, 'type': coltype, 'nullable': nullable, 'default': default, 'autoincrement': default is None } if orig_colname.lower() == orig_colname: cdict['quote'] = True columns.append(cdict) return columns @reflection.cache def get_indexes(self, connection, table_name, schema=None, resolve_synonyms=False, dblink='', **kw): info_cache = kw.get('info_cache') (table_name, schema, dblink, synonym) = \ self._prepare_reflection_args(connection, table_name, schema, resolve_synonyms, dblink, info_cache=info_cache) indexes = [] params = {'table_name': table_name} text = \ "SELECT a.index_name, a.column_name, b.uniqueness "\ "\nFROM ALL_IND_COLUMNS%(dblink)s a, "\ "\nALL_INDEXES%(dblink)s b "\ "\nWHERE "\ "\na.index_name = b.index_name "\ "\nAND a.table_owner = b.table_owner "\ "\nAND a.table_name = b.table_name "\ "\nAND a.table_name = :table_name " if schema is not None: params['schema'] = schema text += "AND a.table_owner = :schema " text += "ORDER BY a.index_name, a.column_position" text = text % {'dblink': dblink} q = sql.text(text) rp = connection.execute(q, **params) indexes = [] last_index_name = None pk_constraint = self.get_pk_constraint( connection, table_name, schema, resolve_synonyms=resolve_synonyms, dblink=dblink, info_cache=kw.get('info_cache')) pkeys = pk_constraint['constrained_columns'] uniqueness = dict(NONUNIQUE=False, UNIQUE=True) oracle_sys_col = re.compile(r'SYS_NC\d+\$', re.IGNORECASE) def upper_name_set(names): return set([i.upper() for i in names]) pk_names = upper_name_set(pkeys) def remove_if_primary_key(index): # don't include the primary key index if index is not None and \ upper_name_set(index['column_names']) == pk_names: indexes.pop() index = None for rset in rp: if rset.index_name != last_index_name: remove_if_primary_key(index) index = dict(name=self.normalize_name(rset.index_name), column_names=[]) indexes.append(index) index['unique'] = uniqueness.get(rset.uniqueness, False) # filter out Oracle SYS_NC names. could also do an outer join # to the all_tab_columns table and check for real col names there. if not oracle_sys_col.match(rset.column_name): index['column_names'].append(self.normalize_name(rset.column_name)) last_index_name = rset.index_name remove_if_primary_key(index) return indexes @reflection.cache def _get_constraint_data(self, connection, table_name, schema=None, dblink='', **kw): params = {'table_name': table_name} text = \ "SELECT"\ "\nac.constraint_name,"\ "\nac.constraint_type,"\ "\nloc.column_name AS local_column,"\ "\nrem.table_name AS remote_table,"\ "\nrem.column_name AS remote_column,"\ "\nrem.owner AS remote_owner,"\ "\nloc.position as loc_pos,"\ "\nrem.position as rem_pos"\ "\nFROM all_constraints%(dblink)s ac,"\ "\nall_cons_columns%(dblink)s loc,"\ "\nall_cons_columns%(dblink)s rem"\ "\nWHERE ac.table_name = :table_name"\ "\nAND ac.constraint_type IN ('R','P')" if schema is not None: params['owner'] = schema text += "\nAND ac.owner = :owner" text += \ "\nAND ac.owner = loc.owner"\ "\nAND ac.constraint_name = loc.constraint_name"\ "\nAND ac.r_owner = rem.owner(+)"\ "\nAND ac.r_constraint_name = rem.constraint_name(+)"\ "\nAND (rem.position IS NULL or loc.position=rem.position)"\ "\nORDER BY ac.constraint_name, loc.position" text = text % {'dblink': dblink} rp = connection.execute(sql.text(text), **params) constraint_data = rp.fetchall() return constraint_data @reflection.cache def get_pk_constraint(self, connection, table_name, schema=None, **kw): resolve_synonyms = kw.get('oracle_resolve_synonyms', False) dblink = kw.get('dblink', '') info_cache = kw.get('info_cache') (table_name, schema, dblink, synonym) = \ self._prepare_reflection_args(connection, table_name, schema, resolve_synonyms, dblink, info_cache=info_cache) pkeys = [] constraint_name = None constraint_data = self._get_constraint_data(connection, table_name, schema, dblink, info_cache=kw.get('info_cache')) for row in constraint_data: (cons_name, cons_type, local_column, remote_table, remote_column, remote_owner) = \ row[0:2] + tuple([self.normalize_name(x) for x in row[2:6]]) if cons_type == 'P': if constraint_name is None: constraint_name = self.normalize_name(cons_name) pkeys.append(local_column) return {'constrained_columns': pkeys, 'name': constraint_name} @reflection.cache def get_foreign_keys(self, connection, table_name, schema=None, **kw): """ kw arguments can be: oracle_resolve_synonyms dblink """ requested_schema = schema # to check later on resolve_synonyms = kw.get('oracle_resolve_synonyms', False) dblink = kw.get('dblink', '') info_cache = kw.get('info_cache') (table_name, schema, dblink, synonym) = \ self._prepare_reflection_args(connection, table_name, schema, resolve_synonyms, dblink, info_cache=info_cache) constraint_data = self._get_constraint_data(connection, table_name, schema, dblink, info_cache=kw.get('info_cache')) def fkey_rec(): return { 'name': None, 'constrained_columns': [], 'referred_schema': None, 'referred_table': None, 'referred_columns': [] } fkeys = util.defaultdict(fkey_rec) for row in constraint_data: (cons_name, cons_type, local_column, remote_table, remote_column, remote_owner) = \ row[0:2] + tuple([self.normalize_name(x) for x in row[2:6]]) if cons_type == 'R': if remote_table is None: # ticket 363 util.warn( ("Got 'None' querying 'table_name' from " "all_cons_columns%(dblink)s - does the user have " "proper rights to the table?") % {'dblink': dblink}) continue rec = fkeys[cons_name] rec['name'] = cons_name local_cols, remote_cols = rec['constrained_columns'], rec['referred_columns'] if not rec['referred_table']: if resolve_synonyms: ref_remote_name, ref_remote_owner, ref_dblink, ref_synonym = \ self._resolve_synonym( connection, desired_owner=self.denormalize_name(remote_owner), desired_table=self.denormalize_name(remote_table) ) if ref_synonym: remote_table = self.normalize_name(ref_synonym) remote_owner = self.normalize_name(ref_remote_owner) rec['referred_table'] = remote_table if requested_schema is not None or self.denormalize_name(remote_owner) != schema: rec['referred_schema'] = remote_owner local_cols.append(local_column) remote_cols.append(remote_column) return fkeys.values() @reflection.cache def get_view_definition(self, connection, view_name, schema=None, resolve_synonyms=False, dblink='', **kw): info_cache = kw.get('info_cache') (view_name, schema, dblink, synonym) = \ self._prepare_reflection_args(connection, view_name, schema, resolve_synonyms, dblink, info_cache=info_cache) params = {'view_name': view_name} text = "SELECT text FROM all_views WHERE view_name=:view_name" if schema is not None: text += " AND owner = :schema" params['schema'] = schema rp = connection.execute(sql.text(text), **params).scalar() if rp: return rp.decode(self.encoding) else: return None class _OuterJoinColumn(sql.ClauseElement): __visit_name__ = 'outer_join_column' def __init__(self, column): self.column = column SQLAlchemy-0.8.4/lib/sqlalchemy/dialects/oracle/cx_oracle.py0000644000076500000240000010076012251150015024503 0ustar classicstaff00000000000000# oracle/cx_oracle.py # Copyright (C) 2005-2013 the SQLAlchemy authors and contributors # # This module is part of SQLAlchemy and is released under # the MIT License: http://www.opensource.org/licenses/mit-license.php """ .. dialect:: oracle+cx_oracle :name: cx-Oracle :dbapi: cx_oracle :connectstring: oracle+cx_oracle://user:pass@host:port/dbname[?key=value&key=value...] :url: http://cx-oracle.sourceforge.net/ Additional Connect Arguments ---------------------------- When connecting with ``dbname`` present, the host, port, and dbname tokens are converted to a TNS name using the cx_oracle :func:`makedsn()` function. Otherwise, the host token is taken directly as a TNS name. Additional arguments which may be specified either as query string arguments on the URL, or as keyword arguments to :func:`~sqlalchemy.create_engine()` are: * allow_twophase - enable two-phase transactions. Defaults to ``True``. * arraysize - set the cx_oracle.arraysize value on cursors, in SQLAlchemy it defaults to 50. See the section on "LOB Objects" below. * auto_convert_lobs - defaults to True, see the section on LOB objects. * auto_setinputsizes - the cx_oracle.setinputsizes() call is issued for all bind parameters. This is required for LOB datatypes but can be disabled to reduce overhead. Defaults to ``True``. Specific types can be excluded from this process using the ``exclude_setinputsizes`` parameter. * exclude_setinputsizes - a tuple or list of string DBAPI type names to be excluded from the "auto setinputsizes" feature. The type names here must match DBAPI types that are found in the "cx_Oracle" module namespace, such as cx_Oracle.UNICODE, cx_Oracle.NCLOB, etc. Defaults to ``(STRING, UNICODE)``. .. versionadded:: 0.8 specific DBAPI types can be excluded from the auto_setinputsizes feature via the exclude_setinputsizes attribute. * mode - This is given the string value of SYSDBA or SYSOPER, or alternatively an integer value. This value is only available as a URL query string argument. * threaded - enable multithreaded access to cx_oracle connections. Defaults to ``True``. Note that this is the opposite default of the cx_Oracle DBAPI itself. Unicode ------- cx_oracle 5 fully supports Python unicode objects. SQLAlchemy will pass all unicode strings directly to cx_oracle, and additionally uses an output handler so that all string based result values are returned as unicode as well. Generally, the ``NLS_LANG`` environment variable determines the nature of the encoding to be used. Note that this behavior is disabled when Oracle 8 is detected, as it has been observed that issues remain when passing Python unicodes to cx_oracle with Oracle 8. LOB Objects ----------- cx_oracle returns oracle LOBs using the cx_oracle.LOB object. SQLAlchemy converts these to strings so that the interface of the Binary type is consistent with that of other backends, and so that the linkage to a live cursor is not needed in scenarios like result.fetchmany() and result.fetchall(). This means that by default, LOB objects are fully fetched unconditionally by SQLAlchemy, and the linkage to a live cursor is broken. To disable this processing, pass ``auto_convert_lobs=False`` to :func:`create_engine()`. Two Phase Transaction Support ----------------------------- Two Phase transactions are implemented using XA transactions, and are known to work in a rudimental fashion with recent versions of cx_Oracle as of SQLAlchemy 0.8.0b2, 0.7.10. However, the mechanism is not yet considered to be robust and should still be regarded as experimental. In particular, the cx_Oracle DBAPI as recently as 5.1.2 has a bug regarding two phase which prevents a particular DBAPI connection from being consistently usable in both prepared transactions as well as traditional DBAPI usage patterns; therefore once a particular connection is used via :meth:`.Connection.begin_prepared`, all subsequent usages of the underlying DBAPI connection must be within the context of prepared transactions. The default behavior of :class:`.Engine` is to maintain a pool of DBAPI connections. Therefore, due to the above glitch, a DBAPI connection that has been used in a two-phase operation, and is then returned to the pool, will not be usable in a non-two-phase context. To avoid this situation, the application can make one of several choices: * Disable connection pooling using :class:`.NullPool` * Ensure that the particular :class:`.Engine` in use is only used for two-phase operations. A :class:`.Engine` bound to an ORM :class:`.Session` which includes ``twophase=True`` will consistently use the two-phase transaction style. * For ad-hoc two-phase operations without disabling pooling, the DBAPI connection in use can be evicted from the connection pool using the :class:`.Connection.detach` method. .. versionchanged:: 0.8.0b2,0.7.10 Support for cx_oracle prepared transactions has been implemented and tested. Precision Numerics ------------------ The SQLAlchemy dialect goes through a lot of steps to ensure that decimal numbers are sent and received with full accuracy. An "outputtypehandler" callable is associated with each cx_oracle connection object which detects numeric types and receives them as string values, instead of receiving a Python ``float`` directly, which is then passed to the Python ``Decimal`` constructor. The :class:`.Numeric` and :class:`.Float` types under the cx_oracle dialect are aware of this behavior, and will coerce the ``Decimal`` to ``float`` if the ``asdecimal`` flag is ``False`` (default on :class:`.Float`, optional on :class:`.Numeric`). Because the handler coerces to ``Decimal`` in all cases first, the feature can detract significantly from performance. If precision numerics aren't required, the decimal handling can be disabled by passing the flag ``coerce_to_decimal=False`` to :func:`.create_engine`:: engine = create_engine("oracle+cx_oracle://dsn", coerce_to_decimal=False) .. versionadded:: 0.7.6 Add the ``coerce_to_decimal`` flag. Another alternative to performance is to use the `cdecimal `_ library; see :class:`.Numeric` for additional notes. The handler attempts to use the "precision" and "scale" attributes of the result set column to best determine if subsequent incoming values should be received as ``Decimal`` as opposed to int (in which case no processing is added). There are several scenarios where OCI_ does not provide unambiguous data as to the numeric type, including some situations where individual rows may return a combination of floating point and integer values. Certain values for "precision" and "scale" have been observed to determine this scenario. When it occurs, the outputtypehandler receives as string and then passes off to a processing function which detects, for each returned value, if a decimal point is present, and if so converts to ``Decimal``, otherwise to int. The intention is that simple int-based statements like "SELECT my_seq.nextval() FROM DUAL" continue to return ints and not ``Decimal`` objects, and that any kind of floating point value is received as a string so that there is no floating point loss of precision. The "decimal point is present" logic itself is also sensitive to locale. Under OCI_, this is controlled by the NLS_LANG environment variable. Upon first connection, the dialect runs a test to determine the current "decimal" character, which can be a comma "," for european locales. From that point forward the outputtypehandler uses that character to represent a decimal point. Note that cx_oracle 5.0.3 or greater is required when dealing with numerics with locale settings that don't use a period "." as the decimal character. .. versionchanged:: 0.6.6 The outputtypehandler uses a comma "," character to represent a decimal point. .. _OCI: http://www.oracle.com/technetwork/database/features/oci/index.html """ from __future__ import absolute_import from .base import OracleCompiler, OracleDialect, OracleExecutionContext from . import base as oracle from ...engine import result as _result from sqlalchemy import types as sqltypes, util, exc, processors import random import collections import decimal import re class _OracleNumeric(sqltypes.Numeric): def bind_processor(self, dialect): # cx_oracle accepts Decimal objects and floats return None def result_processor(self, dialect, coltype): # we apply a cx_oracle type handler to all connections # that converts floating point strings to Decimal(). # However, in some subquery situations, Oracle doesn't # give us enough information to determine int or Decimal. # It could even be int/Decimal differently on each row, # regardless of the scale given for the originating type. # So we still need an old school isinstance() handler # here for decimals. if dialect.supports_native_decimal: if self.asdecimal: if self.scale is None: fstring = "%.10f" else: fstring = "%%.%df" % self.scale def to_decimal(value): if value is None: return None elif isinstance(value, decimal.Decimal): return value else: return decimal.Decimal(fstring % value) return to_decimal else: if self.precision is None and self.scale is None: return processors.to_float elif not getattr(self, '_is_oracle_number', False) \ and self.scale is not None: return processors.to_float else: return None else: # cx_oracle 4 behavior, will assume # floats return super(_OracleNumeric, self).\ result_processor(dialect, coltype) class _OracleDate(sqltypes.Date): def bind_processor(self, dialect): return None def result_processor(self, dialect, coltype): def process(value): if value is not None: return value.date() else: return value return process class _LOBMixin(object): def result_processor(self, dialect, coltype): if not dialect.auto_convert_lobs: # return the cx_oracle.LOB directly. return None def process(value): if value is not None: return value.read() else: return value return process class _NativeUnicodeMixin(object): # Py3K #pass # Py2K def bind_processor(self, dialect): if dialect._cx_oracle_with_unicode: def process(value): if value is None: return value else: return unicode(value) return process else: return super(_NativeUnicodeMixin, self).bind_processor(dialect) # end Py2K # we apply a connection output handler that returns # unicode in all cases, so the "native_unicode" flag # will be set for the default String.result_processor. class _OracleChar(_NativeUnicodeMixin, sqltypes.CHAR): def get_dbapi_type(self, dbapi): return dbapi.FIXED_CHAR class _OracleNVarChar(_NativeUnicodeMixin, sqltypes.NVARCHAR): def get_dbapi_type(self, dbapi): return getattr(dbapi, 'UNICODE', dbapi.STRING) class _OracleText(_LOBMixin, sqltypes.Text): def get_dbapi_type(self, dbapi): return dbapi.CLOB class _OracleLong(oracle.LONG): # a raw LONG is a text type, but does *not* # get the LobMixin with cx_oracle. def get_dbapi_type(self, dbapi): return dbapi.LONG_STRING class _OracleString(_NativeUnicodeMixin, sqltypes.String): pass class _OracleUnicodeText(_LOBMixin, _NativeUnicodeMixin, sqltypes.UnicodeText): def get_dbapi_type(self, dbapi): return dbapi.NCLOB def result_processor(self, dialect, coltype): lob_processor = _LOBMixin.result_processor(self, dialect, coltype) if lob_processor is None: return None string_processor = sqltypes.UnicodeText.result_processor(self, dialect, coltype) if string_processor is None: return lob_processor else: def process(value): return string_processor(lob_processor(value)) return process class _OracleInteger(sqltypes.Integer): def result_processor(self, dialect, coltype): def to_int(val): if val is not None: val = int(val) return val return to_int class _OracleBinary(_LOBMixin, sqltypes.LargeBinary): def get_dbapi_type(self, dbapi): return dbapi.BLOB def bind_processor(self, dialect): return None class _OracleInterval(oracle.INTERVAL): def get_dbapi_type(self, dbapi): return dbapi.INTERVAL class _OracleRaw(oracle.RAW): pass class _OracleRowid(oracle.ROWID): def get_dbapi_type(self, dbapi): return dbapi.ROWID class OracleCompiler_cx_oracle(OracleCompiler): def bindparam_string(self, name, quote=None, **kw): if quote is True or quote is not False and \ self.preparer._bindparam_requires_quotes(name): quoted_name = '"%s"' % name self._quoted_bind_names[name] = quoted_name return OracleCompiler.bindparam_string(self, quoted_name, **kw) else: return OracleCompiler.bindparam_string(self, name, **kw) class OracleExecutionContext_cx_oracle(OracleExecutionContext): def pre_exec(self): quoted_bind_names = \ getattr(self.compiled, '_quoted_bind_names', None) if quoted_bind_names: if not self.dialect.supports_unicode_statements: # if DBAPI doesn't accept unicode statements, # keys in self.parameters would have been encoded # here. so convert names in quoted_bind_names # to encoded as well. quoted_bind_names = \ dict( (fromname.encode(self.dialect.encoding), toname.encode(self.dialect.encoding)) for fromname, toname in quoted_bind_names.items() ) for param in self.parameters: for fromname, toname in quoted_bind_names.items(): param[toname] = param[fromname] del param[fromname] if self.dialect.auto_setinputsizes: # cx_oracle really has issues when you setinputsizes # on String, including that outparams/RETURNING # breaks for varchars self.set_input_sizes(quoted_bind_names, exclude_types=self.dialect.exclude_setinputsizes ) # if a single execute, check for outparams if len(self.compiled_parameters) == 1: for bindparam in self.compiled.binds.values(): if bindparam.isoutparam: dbtype = bindparam.type.dialect_impl(self.dialect).\ get_dbapi_type(self.dialect.dbapi) if not hasattr(self, 'out_parameters'): self.out_parameters = {} if dbtype is None: raise exc.InvalidRequestError( "Cannot create out parameter for parameter " "%r - it's type %r is not supported by" " cx_oracle" % (bindparam.key, bindparam.type) ) name = self.compiled.bind_names[bindparam] self.out_parameters[name] = self.cursor.var(dbtype) self.parameters[0][quoted_bind_names.get(name, name)] = \ self.out_parameters[name] def create_cursor(self): c = self._dbapi_connection.cursor() if self.dialect.arraysize: c.arraysize = self.dialect.arraysize return c def get_result_proxy(self): if hasattr(self, 'out_parameters') and self.compiled.returning: returning_params = dict( (k, v.getvalue()) for k, v in self.out_parameters.items() ) return ReturningResultProxy(self, returning_params) result = None if self.cursor.description is not None: for column in self.cursor.description: type_code = column[1] if type_code in self.dialect._cx_oracle_binary_types: result = _result.BufferedColumnResultProxy(self) if result is None: result = _result.ResultProxy(self) if hasattr(self, 'out_parameters'): if self.compiled_parameters is not None and \ len(self.compiled_parameters) == 1: result.out_parameters = out_parameters = {} for bind, name in self.compiled.bind_names.items(): if name in self.out_parameters: type = bind.type impl_type = type.dialect_impl(self.dialect) dbapi_type = impl_type.get_dbapi_type(self.dialect.dbapi) result_processor = impl_type.\ result_processor(self.dialect, dbapi_type) if result_processor is not None: out_parameters[name] = \ result_processor(self.out_parameters[name].getvalue()) else: out_parameters[name] = self.out_parameters[name].getvalue() else: result.out_parameters = dict( (k, v.getvalue()) for k, v in self.out_parameters.items() ) return result class OracleExecutionContext_cx_oracle_with_unicode(OracleExecutionContext_cx_oracle): """Support WITH_UNICODE in Python 2.xx. WITH_UNICODE allows cx_Oracle's Python 3 unicode handling behavior under Python 2.x. This mode in some cases disallows and in other cases silently passes corrupted data when non-Python-unicode strings (a.k.a. plain old Python strings) are passed as arguments to connect(), the statement sent to execute(), or any of the bind parameter keys or values sent to execute(). This optional context therefore ensures that all statements are passed as Python unicode objects. """ def __init__(self, *arg, **kw): OracleExecutionContext_cx_oracle.__init__(self, *arg, **kw) self.statement = unicode(self.statement) def _execute_scalar(self, stmt): return super(OracleExecutionContext_cx_oracle_with_unicode, self).\ _execute_scalar(unicode(stmt)) class ReturningResultProxy(_result.FullyBufferedResultProxy): """Result proxy which stuffs the _returning clause + outparams into the fetch.""" def __init__(self, context, returning_params): self._returning_params = returning_params super(ReturningResultProxy, self).__init__(context) def _cursor_description(self): returning = self.context.compiled.returning return [ ("ret_%d" % i, None) for i, col in enumerate(returning) ] def _buffer_rows(self): return collections.deque([tuple(self._returning_params["ret_%d" % i] for i, c in enumerate(self._returning_params))]) class OracleDialect_cx_oracle(OracleDialect): execution_ctx_cls = OracleExecutionContext_cx_oracle statement_compiler = OracleCompiler_cx_oracle driver = "cx_oracle" colspecs = colspecs = { sqltypes.Numeric: _OracleNumeric, sqltypes.Date: _OracleDate, # generic type, assume datetime.date is desired oracle.DATE: oracle.DATE, # non generic type - passthru sqltypes.LargeBinary: _OracleBinary, sqltypes.Boolean: oracle._OracleBoolean, sqltypes.Interval: _OracleInterval, oracle.INTERVAL: _OracleInterval, sqltypes.Text: _OracleText, sqltypes.String: _OracleString, sqltypes.UnicodeText: _OracleUnicodeText, sqltypes.CHAR: _OracleChar, # a raw LONG is a text type, but does *not* # get the LobMixin with cx_oracle. oracle.LONG: _OracleLong, # this is only needed for OUT parameters. # it would be nice if we could not use it otherwise. sqltypes.Integer: _OracleInteger, oracle.RAW: _OracleRaw, sqltypes.Unicode: _OracleNVarChar, sqltypes.NVARCHAR: _OracleNVarChar, oracle.ROWID: _OracleRowid, } execute_sequence_format = list def __init__(self, auto_setinputsizes=True, exclude_setinputsizes=("STRING", "UNICODE"), auto_convert_lobs=True, threaded=True, allow_twophase=True, coerce_to_decimal=True, arraysize=50, **kwargs): OracleDialect.__init__(self, **kwargs) self.threaded = threaded self.arraysize = arraysize self.allow_twophase = allow_twophase self.supports_timestamp = self.dbapi is None or \ hasattr(self.dbapi, 'TIMESTAMP') self.auto_setinputsizes = auto_setinputsizes self.auto_convert_lobs = auto_convert_lobs if hasattr(self.dbapi, 'version'): self.cx_oracle_ver = tuple([int(x) for x in self.dbapi.version.split('.')]) else: self.cx_oracle_ver = (0, 0, 0) def types(*names): return set( getattr(self.dbapi, name, None) for name in names ).difference([None]) self.exclude_setinputsizes = types(*(exclude_setinputsizes or ())) self._cx_oracle_string_types = types("STRING", "UNICODE", "NCLOB", "CLOB") self._cx_oracle_unicode_types = types("UNICODE", "NCLOB") self._cx_oracle_binary_types = types("BFILE", "CLOB", "NCLOB", "BLOB") self.supports_unicode_binds = self.cx_oracle_ver >= (5, 0) self.supports_native_decimal = ( self.cx_oracle_ver >= (5, 0) and coerce_to_decimal ) self._cx_oracle_native_nvarchar = self.cx_oracle_ver >= (5, 0) if self.cx_oracle_ver is None: # this occurs in tests with mock DBAPIs self._cx_oracle_string_types = set() self._cx_oracle_with_unicode = False elif self.cx_oracle_ver >= (5,) and not hasattr(self.dbapi, 'UNICODE'): # cx_Oracle WITH_UNICODE mode. *only* python # unicode objects accepted for anything self.supports_unicode_statements = True self.supports_unicode_binds = True self._cx_oracle_with_unicode = True # Py2K # There's really no reason to run with WITH_UNICODE under Python 2.x. # Give the user a hint. util.warn("cx_Oracle is compiled under Python 2.xx using the " "WITH_UNICODE flag. Consider recompiling cx_Oracle without " "this flag, which is in no way necessary for full support of Unicode. " "Otherwise, all string-holding bind parameters must " "be explicitly typed using SQLAlchemy's String type or one of its subtypes," "or otherwise be passed as Python unicode. Plain Python strings " "passed as bind parameters will be silently corrupted by cx_Oracle." ) self.execution_ctx_cls = OracleExecutionContext_cx_oracle_with_unicode # end Py2K else: self._cx_oracle_with_unicode = False if self.cx_oracle_ver is None or \ not self.auto_convert_lobs or \ not hasattr(self.dbapi, 'CLOB'): self.dbapi_type_map = {} else: # only use this for LOB objects. using it for strings, dates # etc. leads to a little too much magic, reflection doesn't know if it should # expect encoded strings or unicodes, etc. self.dbapi_type_map = { self.dbapi.CLOB: oracle.CLOB(), self.dbapi.NCLOB: oracle.NCLOB(), self.dbapi.BLOB: oracle.BLOB(), self.dbapi.BINARY: oracle.RAW(), } @classmethod def dbapi(cls): import cx_Oracle return cx_Oracle def initialize(self, connection): super(OracleDialect_cx_oracle, self).initialize(connection) if self._is_oracle_8: self.supports_unicode_binds = False self._detect_decimal_char(connection) def _detect_decimal_char(self, connection): """detect if the decimal separator character is not '.', as is the case with european locale settings for NLS_LANG. cx_oracle itself uses similar logic when it formats Python Decimal objects to strings on the bind side (as of 5.0.3), as Oracle sends/receives string numerics only in the current locale. """ if self.cx_oracle_ver < (5,): # no output type handlers before version 5 return cx_Oracle = self.dbapi conn = connection.connection # override the output_type_handler that's # on the cx_oracle connection with a plain # one on the cursor def output_type_handler(cursor, name, defaultType, size, precision, scale): return cursor.var( cx_Oracle.STRING, 255, arraysize=cursor.arraysize) cursor = conn.cursor() cursor.outputtypehandler = output_type_handler cursor.execute("SELECT 0.1 FROM DUAL") val = cursor.fetchone()[0] cursor.close() char = re.match(r"([\.,])", val).group(1) if char != '.': _detect_decimal = self._detect_decimal self._detect_decimal = \ lambda value: _detect_decimal(value.replace(char, '.')) self._to_decimal = \ lambda value: decimal.Decimal(value.replace(char, '.')) def _detect_decimal(self, value): if "." in value: return decimal.Decimal(value) else: return int(value) _to_decimal = decimal.Decimal def on_connect(self): if self.cx_oracle_ver < (5,): # no output type handlers before version 5 return cx_Oracle = self.dbapi def output_type_handler(cursor, name, defaultType, size, precision, scale): # convert all NUMBER with precision + positive scale to Decimal # this almost allows "native decimal" mode. if self.supports_native_decimal and \ defaultType == cx_Oracle.NUMBER and \ precision and scale > 0: return cursor.var( cx_Oracle.STRING, 255, outconverter=self._to_decimal, arraysize=cursor.arraysize) # if NUMBER with zero precision and 0 or neg scale, this appears # to indicate "ambiguous". Use a slower converter that will # make a decision based on each value received - the type # may change from row to row (!). This kills # off "native decimal" mode, handlers still needed. elif self.supports_native_decimal and \ defaultType == cx_Oracle.NUMBER \ and not precision and scale <= 0: return cursor.var( cx_Oracle.STRING, 255, outconverter=self._detect_decimal, arraysize=cursor.arraysize) # allow all strings to come back natively as Unicode elif defaultType in (cx_Oracle.STRING, cx_Oracle.FIXED_CHAR): return cursor.var(unicode, size, cursor.arraysize) def on_connect(conn): conn.outputtypehandler = output_type_handler return on_connect def create_connect_args(self, url): dialect_opts = dict(url.query) for opt in ('use_ansi', 'auto_setinputsizes', 'auto_convert_lobs', 'threaded', 'allow_twophase'): if opt in dialect_opts: util.coerce_kw_type(dialect_opts, opt, bool) setattr(self, opt, dialect_opts[opt]) if url.database: # if we have a database, then we have a remote host port = url.port if port: port = int(port) else: port = 1521 dsn = self.dbapi.makedsn(url.host, port, url.database) else: # we have a local tnsname dsn = url.host opts = dict( user=url.username, password=url.password, dsn=dsn, threaded=self.threaded, twophase=self.allow_twophase, ) # Py2K if self._cx_oracle_with_unicode: for k, v in opts.items(): if isinstance(v, str): opts[k] = unicode(v) else: for k, v in opts.items(): if isinstance(v, unicode): opts[k] = str(v) # end Py2K if 'mode' in url.query: opts['mode'] = url.query['mode'] if isinstance(opts['mode'], basestring): mode = opts['mode'].upper() if mode == 'SYSDBA': opts['mode'] = self.dbapi.SYSDBA elif mode == 'SYSOPER': opts['mode'] = self.dbapi.SYSOPER else: util.coerce_kw_type(opts, 'mode', int) return ([], opts) def _get_server_version_info(self, connection): return tuple( int(x) for x in connection.connection.version.split('.') ) def is_disconnect(self, e, connection, cursor): error, = e.args if isinstance(e, self.dbapi.InterfaceError): return "not connected" in str(e) elif hasattr(error, 'code'): # ORA-00028: your session has been killed # ORA-03114: not connected to ORACLE # ORA-03113: end-of-file on communication channel # ORA-03135: connection lost contact # ORA-01033: ORACLE initialization or shutdown in progress # ORA-02396: exceeded maximum idle time, please connect again # TODO: Others ? return error.code in (28, 3114, 3113, 3135, 1033, 2396) else: return False def create_xid(self): """create a two-phase transaction ID. this id will be passed to do_begin_twophase(), do_rollback_twophase(), do_commit_twophase(). its format is unspecified.""" id = random.randint(0, 2 ** 128) return (0x1234, "%032x" % id, "%032x" % 9) def do_begin_twophase(self, connection, xid): connection.connection.begin(*xid) def do_prepare_twophase(self, connection, xid): result = connection.connection.prepare() connection.info['cx_oracle_prepared'] = result def do_rollback_twophase(self, connection, xid, is_prepared=True, recover=False): self.do_rollback(connection.connection) def do_commit_twophase(self, connection, xid, is_prepared=True, recover=False): if not is_prepared: self.do_commit(connection.connection) else: oci_prepared = connection.info['cx_oracle_prepared'] if oci_prepared: self.do_commit(connection.connection) def do_recover_twophase(self, connection): connection.info.pop('cx_oracle_prepared', None) dialect = OracleDialect_cx_oracle SQLAlchemy-0.8.4/lib/sqlalchemy/dialects/oracle/zxjdbc.py0000644000076500000240000001707712251150015024040 0ustar classicstaff00000000000000# oracle/zxjdbc.py # Copyright (C) 2005-2013 the SQLAlchemy authors and contributors # # This module is part of SQLAlchemy and is released under # the MIT License: http://www.opensource.org/licenses/mit-license.php """ .. dialect:: oracle+zxjdbc :name: zxJDBC for Jython :dbapi: zxjdbc :connectstring: oracle+zxjdbc://user:pass@host/dbname :driverurl: http://www.oracle.com/technology/software/tech/java/sqlj_jdbc/index.html. """ import decimal import re from sqlalchemy import sql, types as sqltypes, util from sqlalchemy.connectors.zxJDBC import ZxJDBCConnector from sqlalchemy.dialects.oracle.base import OracleCompiler, OracleDialect, OracleExecutionContext from sqlalchemy.engine import result as _result from sqlalchemy.sql import expression import collections SQLException = zxJDBC = None class _ZxJDBCDate(sqltypes.Date): def result_processor(self, dialect, coltype): def process(value): if value is None: return None else: return value.date() return process class _ZxJDBCNumeric(sqltypes.Numeric): def result_processor(self, dialect, coltype): #XXX: does the dialect return Decimal or not??? # if it does (in all cases), we could use a None processor as well as # the to_float generic processor if self.asdecimal: def process(value): if isinstance(value, decimal.Decimal): return value else: return decimal.Decimal(str(value)) else: def process(value): if isinstance(value, decimal.Decimal): return float(value) else: return value return process class OracleCompiler_zxjdbc(OracleCompiler): def returning_clause(self, stmt, returning_cols): self.returning_cols = list(expression._select_iterables(returning_cols)) # within_columns_clause=False so that labels (foo AS bar) don't render columns = [self.process(c, within_columns_clause=False, result_map=self.result_map) for c in self.returning_cols] if not hasattr(self, 'returning_parameters'): self.returning_parameters = [] binds = [] for i, col in enumerate(self.returning_cols): dbtype = col.type.dialect_impl(self.dialect).get_dbapi_type(self.dialect.dbapi) self.returning_parameters.append((i + 1, dbtype)) bindparam = sql.bindparam("ret_%d" % i, value=ReturningParam(dbtype)) self.binds[bindparam.key] = bindparam binds.append(self.bindparam_string(self._truncate_bindparam(bindparam))) return 'RETURNING ' + ', '.join(columns) + " INTO " + ", ".join(binds) class OracleExecutionContext_zxjdbc(OracleExecutionContext): def pre_exec(self): if hasattr(self.compiled, 'returning_parameters'): # prepare a zxJDBC statement so we can grab its underlying # OraclePreparedStatement's getReturnResultSet later self.statement = self.cursor.prepare(self.statement) def get_result_proxy(self): if hasattr(self.compiled, 'returning_parameters'): rrs = None try: try: rrs = self.statement.__statement__.getReturnResultSet() rrs.next() except SQLException, sqle: msg = '%s [SQLCode: %d]' % (sqle.getMessage(), sqle.getErrorCode()) if sqle.getSQLState() is not None: msg += ' [SQLState: %s]' % sqle.getSQLState() raise zxJDBC.Error(msg) else: row = tuple(self.cursor.datahandler.getPyObject(rrs, index, dbtype) for index, dbtype in self.compiled.returning_parameters) return ReturningResultProxy(self, row) finally: if rrs is not None: try: rrs.close() except SQLException: pass self.statement.close() return _result.ResultProxy(self) def create_cursor(self): cursor = self._dbapi_connection.cursor() cursor.datahandler = self.dialect.DataHandler(cursor.datahandler) return cursor class ReturningResultProxy(_result.FullyBufferedResultProxy): """ResultProxy backed by the RETURNING ResultSet results.""" def __init__(self, context, returning_row): self._returning_row = returning_row super(ReturningResultProxy, self).__init__(context) def _cursor_description(self): ret = [] for c in self.context.compiled.returning_cols: if hasattr(c, 'name'): ret.append((c.name, c.type)) else: ret.append((c.anon_label, c.type)) return ret def _buffer_rows(self): return collections.deque([self._returning_row]) class ReturningParam(object): """A bindparam value representing a RETURNING parameter. Specially handled by OracleReturningDataHandler. """ def __init__(self, type): self.type = type def __eq__(self, other): if isinstance(other, ReturningParam): return self.type == other.type return NotImplemented def __ne__(self, other): if isinstance(other, ReturningParam): return self.type != other.type return NotImplemented def __repr__(self): kls = self.__class__ return '<%s.%s object at 0x%x type=%s>' % (kls.__module__, kls.__name__, id(self), self.type) class OracleDialect_zxjdbc(ZxJDBCConnector, OracleDialect): jdbc_db_name = 'oracle' jdbc_driver_name = 'oracle.jdbc.OracleDriver' statement_compiler = OracleCompiler_zxjdbc execution_ctx_cls = OracleExecutionContext_zxjdbc colspecs = util.update_copy( OracleDialect.colspecs, { sqltypes.Date: _ZxJDBCDate, sqltypes.Numeric: _ZxJDBCNumeric } ) def __init__(self, *args, **kwargs): super(OracleDialect_zxjdbc, self).__init__(*args, **kwargs) global SQLException, zxJDBC from java.sql import SQLException from com.ziclix.python.sql import zxJDBC from com.ziclix.python.sql.handler import OracleDataHandler class OracleReturningDataHandler(OracleDataHandler): """zxJDBC DataHandler that specially handles ReturningParam.""" def setJDBCObject(self, statement, index, object, dbtype=None): if type(object) is ReturningParam: statement.registerReturnParameter(index, object.type) elif dbtype is None: OracleDataHandler.setJDBCObject( self, statement, index, object) else: OracleDataHandler.setJDBCObject( self, statement, index, object, dbtype) self.DataHandler = OracleReturningDataHandler def initialize(self, connection): super(OracleDialect_zxjdbc, self).initialize(connection) self.implicit_returning = connection.connection.driverversion >= '10.2' def _create_jdbc_url(self, url): return 'jdbc:oracle:thin:@%s:%s:%s' % (url.host, url.port or 1521, url.database) def _get_server_version_info(self, connection): version = re.search(r'Release ([\d\.]+)', connection.connection.dbversion).group(1) return tuple(int(x) for x in version.split('.')) dialect = OracleDialect_zxjdbc SQLAlchemy-0.8.4/lib/sqlalchemy/dialects/postgres.py0000644000076500000240000000114112251147171023147 0ustar classicstaff00000000000000# dialects/postgres.py # Copyright (C) 2005-2013 the SQLAlchemy authors and contributors # # This module is part of SQLAlchemy and is released under # the MIT License: http://www.opensource.org/licenses/mit-license.php # backwards compat with the old name from sqlalchemy.util import warn_deprecated warn_deprecated( "The SQLAlchemy PostgreSQL dialect has been renamed from 'postgres' to 'postgresql'. " "The new URL format is postgresql[+driver]://:@/" ) from sqlalchemy.dialects.postgresql import * from sqlalchemy.dialects.postgresql import base SQLAlchemy-0.8.4/lib/sqlalchemy/dialects/postgresql/0000755000076500000240000000000012251151573023136 5ustar classicstaff00000000000000SQLAlchemy-0.8.4/lib/sqlalchemy/dialects/postgresql/__init__.py0000644000076500000240000000216212251147171025247 0ustar classicstaff00000000000000# postgresql/__init__.py # Copyright (C) 2005-2013 the SQLAlchemy authors and contributors # # This module is part of SQLAlchemy and is released under # the MIT License: http://www.opensource.org/licenses/mit-license.php from . import base, psycopg2, pg8000, pypostgresql, zxjdbc base.dialect = psycopg2.dialect from .base import \ INTEGER, BIGINT, SMALLINT, VARCHAR, CHAR, TEXT, NUMERIC, FLOAT, REAL, \ INET, CIDR, UUID, BIT, MACADDR, DOUBLE_PRECISION, TIMESTAMP, TIME, \ DATE, BYTEA, BOOLEAN, INTERVAL, ARRAY, ENUM, dialect, array, Any, All from .constraints import ExcludeConstraint from .hstore import HSTORE, hstore from .ranges import INT4RANGE, INT8RANGE, NUMRANGE, DATERANGE, TSRANGE, \ TSTZRANGE __all__ = ( 'INTEGER', 'BIGINT', 'SMALLINT', 'VARCHAR', 'CHAR', 'TEXT', 'NUMERIC', 'FLOAT', 'REAL', 'INET', 'CIDR', 'UUID', 'BIT', 'MACADDR', 'DOUBLE_PRECISION', 'TIMESTAMP', 'TIME', 'DATE', 'BYTEA', 'BOOLEAN', 'INTERVAL', 'ARRAY', 'ENUM', 'dialect', 'Any', 'All', 'array', 'HSTORE', 'hstore', 'INT4RANGE', 'INT8RANGE', 'NUMRANGE', 'DATERANGE', 'TSRANGE', 'TSTZRANGE' ) SQLAlchemy-0.8.4/lib/sqlalchemy/dialects/postgresql/base.py0000644000076500000240000022244012251150015024414 0ustar classicstaff00000000000000# postgresql/base.py # Copyright (C) 2005-2013 the SQLAlchemy authors and contributors # # This module is part of SQLAlchemy and is released under # the MIT License: http://www.opensource.org/licenses/mit-license.php """ .. dialect:: postgresql :name: PostgreSQL Sequences/SERIAL ---------------- PostgreSQL supports sequences, and SQLAlchemy uses these as the default means of creating new primary key values for integer-based primary key columns. When creating tables, SQLAlchemy will issue the ``SERIAL`` datatype for integer-based primary key columns, which generates a sequence and server side default corresponding to the column. To specify a specific named sequence to be used for primary key generation, use the :func:`~sqlalchemy.schema.Sequence` construct:: Table('sometable', metadata, Column('id', Integer, Sequence('some_id_seq'), primary_key=True) ) When SQLAlchemy issues a single INSERT statement, to fulfill the contract of having the "last insert identifier" available, a RETURNING clause is added to the INSERT statement which specifies the primary key columns should be returned after the statement completes. The RETURNING functionality only takes place if Postgresql 8.2 or later is in use. As a fallback approach, the sequence, whether specified explicitly or implicitly via ``SERIAL``, is executed independently beforehand, the returned value to be used in the subsequent insert. Note that when an :func:`~sqlalchemy.sql.expression.insert()` construct is executed using "executemany" semantics, the "last inserted identifier" functionality does not apply; no RETURNING clause is emitted nor is the sequence pre-executed in this case. To force the usage of RETURNING by default off, specify the flag ``implicit_returning=False`` to :func:`.create_engine`. .. _postgresql_isolation_level: Transaction Isolation Level --------------------------- All Postgresql dialects support setting of transaction isolation level both via a dialect-specific parameter ``isolation_level`` accepted by :func:`.create_engine`, as well as the ``isolation_level`` argument as passed to :meth:`.Connection.execution_options`. When using a non-psycopg2 dialect, this feature works by issuing the command ``SET SESSION CHARACTERISTICS AS TRANSACTION ISOLATION LEVEL `` for each new connection. To set isolation level using :func:`.create_engine`:: engine = create_engine( "postgresql+pg8000://scott:tiger@localhost/test", isolation_level="READ UNCOMMITTED" ) To set using per-connection execution options:: connection = engine.connect() connection = connection.execution_options(isolation_level="READ COMMITTED") Valid values for ``isolation_level`` include: * ``READ COMMITTED`` * ``READ UNCOMMITTED`` * ``REPEATABLE READ`` * ``SERIALIZABLE`` The :mod:`~sqlalchemy.dialects.postgresql.psycopg2` dialect also offers the special level ``AUTOCOMMIT``. See :ref:`psycopg2_isolation_level` for details. Remote / Cross-Schema Table Introspection ----------------------------------------- Tables can be introspected from any accessible schema, including inter-schema foreign key relationships. However, care must be taken when specifying the "schema" argument for a given :class:`.Table`, when the given schema is also present in PostgreSQL's ``search_path`` variable for the current connection. If a FOREIGN KEY constraint reports that the remote table's schema is within the current ``search_path``, the "schema" attribute of the resulting :class:`.Table` will be set to ``None``, unless the actual schema of the remote table matches that of the referencing table, and the "schema" argument was explicitly stated on the referencing table. The best practice here is to not use the ``schema`` argument on :class:`.Table` for any schemas that are present in ``search_path``. ``search_path`` defaults to "public", but care should be taken to inspect the actual value using:: SHOW search_path; .. versionchanged:: 0.7.3 Prior to this version, cross-schema foreign keys when the schemas were also in the ``search_path`` could make an incorrect assumption if the schemas were explicitly stated on each :class:`.Table`. Background on PG's ``search_path`` is at: http://www.postgresql.org/docs/9.0/static/ddl-schemas.html#DDL-SCHEMAS-PATH INSERT/UPDATE...RETURNING ------------------------- The dialect supports PG 8.2's ``INSERT..RETURNING``, ``UPDATE..RETURNING`` and ``DELETE..RETURNING`` syntaxes. ``INSERT..RETURNING`` is used by default for single-row INSERT statements in order to fetch newly generated primary key identifiers. To specify an explicit ``RETURNING`` clause, use the :meth:`._UpdateBase.returning` method on a per-statement basis:: # INSERT..RETURNING result = table.insert().returning(table.c.col1, table.c.col2).\\ values(name='foo') print result.fetchall() # UPDATE..RETURNING result = table.update().returning(table.c.col1, table.c.col2).\\ where(table.c.name=='foo').values(name='bar') print result.fetchall() # DELETE..RETURNING result = table.delete().returning(table.c.col1, table.c.col2).\\ where(table.c.name=='foo') print result.fetchall() FROM ONLY ... ------------------------ The dialect supports PostgreSQL's ONLY keyword for targeting only a particular table in an inheritance hierarchy. This can be used to produce the ``SELECT ... FROM ONLY``, ``UPDATE ONLY ...``, and ``DELETE FROM ONLY ...`` syntaxes. It uses SQLAlchemy's hints mechanism:: # SELECT ... FROM ONLY ... result = table.select().with_hint(table, 'ONLY', 'postgresql') print result.fetchall() # UPDATE ONLY ... table.update(values=dict(foo='bar')).with_hint('ONLY', dialect_name='postgresql') # DELETE FROM ONLY ... table.delete().with_hint('ONLY', dialect_name='postgresql') .. _postgresql_indexes: Postgresql-Specific Index Options --------------------------------- Several extensions to the :class:`.Index` construct are available, specific to the PostgreSQL dialect. Partial Indexes ^^^^^^^^^^^^^^^^ Partial indexes add criterion to the index definition so that the index is applied to a subset of rows. These can be specified on :class:`.Index` using the ``postgresql_where`` keyword argument:: Index('my_index', my_table.c.id, postgresql_where=tbl.c.value > 10) Operator Classes ^^^^^^^^^^^^^^^^^ PostgreSQL allows the specification of an *operator class* for each column of an index (see http://www.postgresql.org/docs/8.3/interactive/indexes-opclass.html). The :class:`.Index` construct allows these to be specified via the ``postgresql_ops`` keyword argument:: Index('my_index', my_table.c.id, my_table.c.data, postgresql_ops={ 'data': 'text_pattern_ops', 'id': 'int4_ops' }) .. versionadded:: 0.7.2 ``postgresql_ops`` keyword argument to :class:`.Index` construct. Note that the keys in the ``postgresql_ops`` dictionary are the "key" name of the :class:`.Column`, i.e. the name used to access it from the ``.c`` collection of :class:`.Table`, which can be configured to be different than the actual name of the column as expressed in the database. Index Types ^^^^^^^^^^^^ PostgreSQL provides several index types: B-Tree, Hash, GiST, and GIN, as well as the ability for users to create their own (see http://www.postgresql.org/docs/8.3/static/indexes-types.html). These can be specified on :class:`.Index` using the ``postgresql_using`` keyword argument:: Index('my_index', my_table.c.data, postgresql_using='gin') The value passed to the keyword argument will be simply passed through to the underlying CREATE INDEX command, so it *must* be a valid index type for your version of PostgreSQL. """ from collections import defaultdict import re from ... import sql, schema, exc, util from ...engine import default, reflection from ...sql import compiler, expression, util as sql_util, operators from ... import types as sqltypes try: from uuid import UUID as _python_UUID except ImportError: _python_UUID = None from sqlalchemy.types import INTEGER, BIGINT, SMALLINT, VARCHAR, \ CHAR, TEXT, FLOAT, NUMERIC, \ DATE, BOOLEAN, REAL RESERVED_WORDS = set( ["all", "analyse", "analyze", "and", "any", "array", "as", "asc", "asymmetric", "both", "case", "cast", "check", "collate", "column", "constraint", "create", "current_catalog", "current_date", "current_role", "current_time", "current_timestamp", "current_user", "default", "deferrable", "desc", "distinct", "do", "else", "end", "except", "false", "fetch", "for", "foreign", "from", "grant", "group", "having", "in", "initially", "intersect", "into", "leading", "limit", "localtime", "localtimestamp", "new", "not", "null", "off", "offset", "old", "on", "only", "or", "order", "placing", "primary", "references", "returning", "select", "session_user", "some", "symmetric", "table", "then", "to", "trailing", "true", "union", "unique", "user", "using", "variadic", "when", "where", "window", "with", "authorization", "between", "binary", "cross", "current_schema", "freeze", "full", "ilike", "inner", "is", "isnull", "join", "left", "like", "natural", "notnull", "outer", "over", "overlaps", "right", "similar", "verbose" ]) _DECIMAL_TYPES = (1231, 1700) _FLOAT_TYPES = (700, 701, 1021, 1022) _INT_TYPES = (20, 21, 23, 26, 1005, 1007, 1016) class BYTEA(sqltypes.LargeBinary): __visit_name__ = 'BYTEA' class DOUBLE_PRECISION(sqltypes.Float): __visit_name__ = 'DOUBLE_PRECISION' class INET(sqltypes.TypeEngine): __visit_name__ = "INET" PGInet = INET class CIDR(sqltypes.TypeEngine): __visit_name__ = "CIDR" PGCidr = CIDR class MACADDR(sqltypes.TypeEngine): __visit_name__ = "MACADDR" PGMacAddr = MACADDR class TIMESTAMP(sqltypes.TIMESTAMP): def __init__(self, timezone=False, precision=None): super(TIMESTAMP, self).__init__(timezone=timezone) self.precision = precision class TIME(sqltypes.TIME): def __init__(self, timezone=False, precision=None): super(TIME, self).__init__(timezone=timezone) self.precision = precision class INTERVAL(sqltypes.TypeEngine): """Postgresql INTERVAL type. The INTERVAL type may not be supported on all DBAPIs. It is known to work on psycopg2 and not pg8000 or zxjdbc. """ __visit_name__ = 'INTERVAL' def __init__(self, precision=None): self.precision = precision @classmethod def _adapt_from_generic_interval(cls, interval): return INTERVAL(precision=interval.second_precision) @property def _type_affinity(self): return sqltypes.Interval PGInterval = INTERVAL class BIT(sqltypes.TypeEngine): __visit_name__ = 'BIT' def __init__(self, length=None, varying=False): if not varying: # BIT without VARYING defaults to length 1 self.length = length or 1 else: # but BIT VARYING can be unlimited-length, so no default self.length = length self.varying = varying PGBit = BIT class UUID(sqltypes.TypeEngine): """Postgresql UUID type. Represents the UUID column type, interpreting data either as natively returned by the DBAPI or as Python uuid objects. The UUID type may not be supported on all DBAPIs. It is known to work on psycopg2 and not pg8000. """ __visit_name__ = 'UUID' def __init__(self, as_uuid=False): """Construct a UUID type. :param as_uuid=False: if True, values will be interpreted as Python uuid objects, converting to/from string via the DBAPI. """ if as_uuid and _python_UUID is None: raise NotImplementedError( "This version of Python does not support the native UUID type." ) self.as_uuid = as_uuid def bind_processor(self, dialect): if self.as_uuid: def process(value): if value is not None: value = str(value) return value return process else: return None def result_processor(self, dialect, coltype): if self.as_uuid: def process(value): if value is not None: value = _python_UUID(value) return value return process else: return None PGUuid = UUID class _Slice(expression.ColumnElement): __visit_name__ = 'slice' type = sqltypes.NULLTYPE def __init__(self, slice_, source_comparator): self.start = source_comparator._check_literal( source_comparator.expr, operators.getitem, slice_.start) self.stop = source_comparator._check_literal( source_comparator.expr, operators.getitem, slice_.stop) class Any(expression.ColumnElement): """Represent the clause ``left operator ANY (right)``. ``right`` must be an array expression. .. seealso:: :class:`.postgresql.ARRAY` :meth:`.postgresql.ARRAY.Comparator.any` - ARRAY-bound method """ __visit_name__ = 'any' def __init__(self, left, right, operator=operators.eq): self.type = sqltypes.Boolean() self.left = expression._literal_as_binds(left) self.right = right self.operator = operator class All(expression.ColumnElement): """Represent the clause ``left operator ALL (right)``. ``right`` must be an array expression. .. seealso:: :class:`.postgresql.ARRAY` :meth:`.postgresql.ARRAY.Comparator.all` - ARRAY-bound method """ __visit_name__ = 'all' def __init__(self, left, right, operator=operators.eq): self.type = sqltypes.Boolean() self.left = expression._literal_as_binds(left) self.right = right self.operator = operator class array(expression.Tuple): """A Postgresql ARRAY literal. This is used to produce ARRAY literals in SQL expressions, e.g.:: from sqlalchemy.dialects.postgresql import array from sqlalchemy.dialects import postgresql from sqlalchemy import select, func stmt = select([ array([1,2]) + array([3,4,5]) ]) print stmt.compile(dialect=postgresql.dialect()) Produces the SQL:: SELECT ARRAY[%(param_1)s, %(param_2)s] || ARRAY[%(param_3)s, %(param_4)s, %(param_5)s]) AS anon_1 An instance of :class:`.array` will always have the datatype :class:`.ARRAY`. The "inner" type of the array is inferred from the values present, unless the ``type_`` keyword argument is passed:: array(['foo', 'bar'], type_=CHAR) .. versionadded:: 0.8 Added the :class:`~.postgresql.array` literal type. See also: :class:`.postgresql.ARRAY` """ __visit_name__ = 'array' def __init__(self, clauses, **kw): super(array, self).__init__(*clauses, **kw) self.type = ARRAY(self.type) def _bind_param(self, operator, obj): return array(*[ expression.BindParameter(None, o, _compared_to_operator=operator, _compared_to_type=self.type, unique=True) for o in obj ]) def self_group(self, against=None): return self class ARRAY(sqltypes.Concatenable, sqltypes.TypeEngine): """Postgresql ARRAY type. Represents values as Python lists. An :class:`.ARRAY` type is constructed given the "type" of element:: mytable = Table("mytable", metadata, Column("data", ARRAY(Integer)) ) The above type represents an N-dimensional array, meaning Postgresql will interpret values with any number of dimensions automatically. To produce an INSERT construct that passes in a 1-dimensional array of integers:: connection.execute( mytable.insert(), data=[1,2,3] ) The :class:`.ARRAY` type can be constructed given a fixed number of dimensions:: mytable = Table("mytable", metadata, Column("data", ARRAY(Integer, dimensions=2)) ) This has the effect of the :class:`.ARRAY` type specifying that number of bracketed blocks when a :class:`.Table` is used in a CREATE TABLE statement, or when the type is used within a :func:`.expression.cast` construct; it also causes the bind parameter and result set processing of the type to optimize itself to expect exactly that number of dimensions. Note that Postgresql itself still allows N dimensions with such a type. SQL expressions of type :class:`.ARRAY` have support for "index" and "slice" behavior. The Python ``[]`` operator works normally here, given integer indexes or slices. Note that Postgresql arrays default to 1-based indexing. The operator produces binary expression constructs which will produce the appropriate SQL, both for SELECT statements:: select([mytable.c.data[5], mytable.c.data[2:7]]) as well as UPDATE statements when the :meth:`.Update.values` method is used:: mytable.update().values({ mytable.c.data[5]: 7, mytable.c.data[2:7]: [1, 2, 3] }) :class:`.ARRAY` provides special methods for containment operations, e.g.:: mytable.c.data.contains([1, 2]) For a full list of special methods see :class:`.ARRAY.Comparator`. .. versionadded:: 0.8 Added support for index and slice operations to the :class:`.ARRAY` type, including support for UPDATE statements, and special array containment operations. The :class:`.ARRAY` type may not be supported on all DBAPIs. It is known to work on psycopg2 and not pg8000. See also: :class:`.postgresql.array` - produce a literal array value. """ __visit_name__ = 'ARRAY' class Comparator(sqltypes.Concatenable.Comparator): """Define comparison operations for :class:`.ARRAY`.""" def __getitem__(self, index): if isinstance(index, slice): index = _Slice(index, self) return_type = self.type else: return_type = self.type.item_type return self._binary_operate(self.expr, operators.getitem, index, result_type=return_type) def any(self, other, operator=operators.eq): """Return ``other operator ANY (array)`` clause. Argument places are switched, because ANY requires array expression to be on the right hand-side. E.g.:: from sqlalchemy.sql import operators conn.execute( select([table.c.data]).where( table.c.data.any(7, operator=operators.lt) ) ) :param other: expression to be compared :param operator: an operator object from the :mod:`sqlalchemy.sql.operators` package, defaults to :func:`.operators.eq`. .. seealso:: :class:`.postgresql.Any` :meth:`.postgresql.ARRAY.Comparator.all` """ return Any(other, self.expr, operator=operator) def all(self, other, operator=operators.eq): """Return ``other operator ALL (array)`` clause. Argument places are switched, because ALL requires array expression to be on the right hand-side. E.g.:: from sqlalchemy.sql import operators conn.execute( select([table.c.data]).where( table.c.data.all(7, operator=operators.lt) ) ) :param other: expression to be compared :param operator: an operator object from the :mod:`sqlalchemy.sql.operators` package, defaults to :func:`.operators.eq`. .. seealso:: :class:`.postgresql.All` :meth:`.postgresql.ARRAY.Comparator.any` """ return All(other, self.expr, operator=operator) def contains(self, other, **kwargs): """Boolean expression. Test if elements are a superset of the elements of the argument array expression. """ return self.expr.op('@>')(other) def contained_by(self, other): """Boolean expression. Test if elements are a proper subset of the elements of the argument array expression. """ return self.expr.op('<@')(other) def overlap(self, other): """Boolean expression. Test if array has elements in common with an argument array expression. """ return self.expr.op('&&')(other) def _adapt_expression(self, op, other_comparator): if isinstance(op, operators.custom_op): if op.opstring in ['@>', '<@', '&&']: return op, sqltypes.Boolean return sqltypes.Concatenable.Comparator.\ _adapt_expression(self, op, other_comparator) comparator_factory = Comparator def __init__(self, item_type, as_tuple=False, dimensions=None): """Construct an ARRAY. E.g.:: Column('myarray', ARRAY(Integer)) Arguments are: :param item_type: The data type of items of this array. Note that dimensionality is irrelevant here, so multi-dimensional arrays like ``INTEGER[][]``, are constructed as ``ARRAY(Integer)``, not as ``ARRAY(ARRAY(Integer))`` or such. :param as_tuple=False: Specify whether return results should be converted to tuples from lists. DBAPIs such as psycopg2 return lists by default. When tuples are returned, the results are hashable. :param dimensions: if non-None, the ARRAY will assume a fixed number of dimensions. This will cause the DDL emitted for this ARRAY to include the exact number of bracket clauses ``[]``, and will also optimize the performance of the type overall. Note that PG arrays are always implicitly "non-dimensioned", meaning they can store any number of dimensions no matter how they were declared. """ if isinstance(item_type, ARRAY): raise ValueError("Do not nest ARRAY types; ARRAY(basetype) " "handles multi-dimensional arrays of basetype") if isinstance(item_type, type): item_type = item_type() self.item_type = item_type self.as_tuple = as_tuple self.dimensions = dimensions def compare_values(self, x, y): return x == y def _proc_array(self, arr, itemproc, dim, collection): if dim is None: arr = list(arr) if dim == 1 or dim is None and ( # this has to be (list, tuple), or at least # not hasattr('__iter__'), since Py3K strings # etc. have __iter__ not arr or not isinstance(arr[0], (list, tuple))): if itemproc: return collection(itemproc(x) for x in arr) else: return collection(arr) else: return collection( self._proc_array( x, itemproc, dim - 1 if dim is not None else None, collection) for x in arr ) def bind_processor(self, dialect): item_proc = self.item_type.\ dialect_impl(dialect).\ bind_processor(dialect) def process(value): if value is None: return value else: return self._proc_array( value, item_proc, self.dimensions, list) return process def result_processor(self, dialect, coltype): item_proc = self.item_type.\ dialect_impl(dialect).\ result_processor(dialect, coltype) def process(value): if value is None: return value else: return self._proc_array( value, item_proc, self.dimensions, tuple if self.as_tuple else list) return process PGArray = ARRAY class ENUM(sqltypes.Enum): """Postgresql ENUM type. This is a subclass of :class:`.types.Enum` which includes support for PG's ``CREATE TYPE``. :class:`~.postgresql.ENUM` is used automatically when using the :class:`.types.Enum` type on PG assuming the ``native_enum`` is left as ``True``. However, the :class:`~.postgresql.ENUM` class can also be instantiated directly in order to access some additional Postgresql-specific options, namely finer control over whether or not ``CREATE TYPE`` should be emitted. Note that both :class:`.types.Enum` as well as :class:`~.postgresql.ENUM` feature create/drop methods; the base :class:`.types.Enum` type ultimately delegates to the :meth:`~.postgresql.ENUM.create` and :meth:`~.postgresql.ENUM.drop` methods present here. """ def __init__(self, *enums, **kw): """Construct an :class:`~.postgresql.ENUM`. Arguments are the same as that of :class:`.types.Enum`, but also including the following parameters. :param create_type: Defaults to True. Indicates that ``CREATE TYPE`` should be emitted, after optionally checking for the presence of the type, when the parent table is being created; and additionally that ``DROP TYPE`` is called when the table is dropped. When ``False``, no check will be performed and no ``CREATE TYPE`` or ``DROP TYPE`` is emitted, unless :meth:`~.postgresql.ENUM.create` or :meth:`~.postgresql.ENUM.drop` are called directly. Setting to ``False`` is helpful when invoking a creation scheme to a SQL file without access to the actual database - the :meth:`~.postgresql.ENUM.create` and :meth:`~.postgresql.ENUM.drop` methods can be used to emit SQL to a target bind. .. versionadded:: 0.7.4 """ self.create_type = kw.pop("create_type", True) super(ENUM, self).__init__(*enums, **kw) def create(self, bind=None, checkfirst=True): """Emit ``CREATE TYPE`` for this :class:`~.postgresql.ENUM`. If the underlying dialect does not support Postgresql CREATE TYPE, no action is taken. :param bind: a connectable :class:`.Engine`, :class:`.Connection`, or similar object to emit SQL. :param checkfirst: if ``True``, a query against the PG catalog will be first performed to see if the type does not exist already before creating. """ if not bind.dialect.supports_native_enum: return if not checkfirst or \ not bind.dialect.has_type(bind, self.name, schema=self.schema): bind.execute(CreateEnumType(self)) def drop(self, bind=None, checkfirst=True): """Emit ``DROP TYPE`` for this :class:`~.postgresql.ENUM`. If the underlying dialect does not support Postgresql DROP TYPE, no action is taken. :param bind: a connectable :class:`.Engine`, :class:`.Connection`, or similar object to emit SQL. :param checkfirst: if ``True``, a query against the PG catalog will be first performed to see if the type actually exists before dropping. """ if not bind.dialect.supports_native_enum: return if not checkfirst or \ bind.dialect.has_type(bind, self.name, schema=self.schema): bind.execute(DropEnumType(self)) def _check_for_name_in_memos(self, checkfirst, kw): """Look in the 'ddl runner' for 'memos', then note our name in that collection. This to ensure a particular named enum is operated upon only once within any kind of create/drop sequence without relying upon "checkfirst". """ if not self.create_type: return True if '_ddl_runner' in kw: ddl_runner = kw['_ddl_runner'] if '_pg_enums' in ddl_runner.memo: pg_enums = ddl_runner.memo['_pg_enums'] else: pg_enums = ddl_runner.memo['_pg_enums'] = set() present = self.name in pg_enums pg_enums.add(self.name) return present else: return False def _on_table_create(self, target, bind, checkfirst, **kw): if not self._check_for_name_in_memos(checkfirst, kw): self.create(bind=bind, checkfirst=checkfirst) def _on_metadata_create(self, target, bind, checkfirst, **kw): if self.metadata is not None and \ not self._check_for_name_in_memos(checkfirst, kw): self.create(bind=bind, checkfirst=checkfirst) def _on_metadata_drop(self, target, bind, checkfirst, **kw): if not self._check_for_name_in_memos(checkfirst, kw): self.drop(bind=bind, checkfirst=checkfirst) colspecs = { sqltypes.Interval: INTERVAL, sqltypes.Enum: ENUM, } ischema_names = { 'integer': INTEGER, 'bigint': BIGINT, 'smallint': SMALLINT, 'character varying': VARCHAR, 'character': CHAR, '"char"': sqltypes.String, 'name': sqltypes.String, 'text': TEXT, 'numeric': NUMERIC, 'float': FLOAT, 'real': REAL, 'inet': INET, 'cidr': CIDR, 'uuid': UUID, 'bit': BIT, 'bit varying': BIT, 'macaddr': MACADDR, 'double precision': DOUBLE_PRECISION, 'timestamp': TIMESTAMP, 'timestamp with time zone': TIMESTAMP, 'timestamp without time zone': TIMESTAMP, 'time with time zone': TIME, 'time without time zone': TIME, 'date': DATE, 'time': TIME, 'bytea': BYTEA, 'boolean': BOOLEAN, 'interval': INTERVAL, 'interval year to month': INTERVAL, 'interval day to second': INTERVAL, } class PGCompiler(compiler.SQLCompiler): def visit_array(self, element, **kw): return "ARRAY[%s]" % self.visit_clauselist(element, **kw) def visit_slice(self, element, **kw): return "%s:%s" % ( self.process(element.start, **kw), self.process(element.stop, **kw), ) def visit_any(self, element, **kw): return "%s%sANY (%s)" % ( self.process(element.left, **kw), compiler.OPERATORS[element.operator], self.process(element.right, **kw) ) def visit_all(self, element, **kw): return "%s%sALL (%s)" % ( self.process(element.left, **kw), compiler.OPERATORS[element.operator], self.process(element.right, **kw) ) def visit_getitem_binary(self, binary, operator, **kw): return "%s[%s]" % ( self.process(binary.left, **kw), self.process(binary.right, **kw) ) def visit_match_op_binary(self, binary, operator, **kw): return "%s @@ to_tsquery(%s)" % ( self.process(binary.left, **kw), self.process(binary.right, **kw)) def visit_ilike_op_binary(self, binary, operator, **kw): escape = binary.modifiers.get("escape", None) return '%s ILIKE %s' % \ (self.process(binary.left, **kw), self.process(binary.right, **kw)) \ + (escape and (' ESCAPE ' + self.render_literal_value(escape, None)) or '') def visit_notilike_op_binary(self, binary, operator, **kw): escape = binary.modifiers.get("escape", None) return '%s NOT ILIKE %s' % \ (self.process(binary.left, **kw), self.process(binary.right, **kw)) \ + (escape and (' ESCAPE ' + self.render_literal_value(escape, None)) or '') def render_literal_value(self, value, type_): value = super(PGCompiler, self).render_literal_value(value, type_) # TODO: need to inspect "standard_conforming_strings" if self.dialect._backslash_escapes: value = value.replace('\\', '\\\\') return value def visit_sequence(self, seq): return "nextval('%s')" % self.preparer.format_sequence(seq) def limit_clause(self, select): text = "" if select._limit is not None: text += " \n LIMIT " + self.process(sql.literal(select._limit)) if select._offset is not None: if select._limit is None: text += " \n LIMIT ALL" text += " OFFSET " + self.process(sql.literal(select._offset)) return text def format_from_hint_text(self, sqltext, table, hint, iscrud): if hint.upper() != 'ONLY': raise exc.CompileError("Unrecognized hint: %r" % hint) return "ONLY " + sqltext def get_select_precolumns(self, select): if select._distinct is not False: if select._distinct is True: return "DISTINCT " elif isinstance(select._distinct, (list, tuple)): return "DISTINCT ON (" + ', '.join( [self.process(col) for col in select._distinct] ) + ") " else: return "DISTINCT ON (" + self.process(select._distinct) + ") " else: return "" def for_update_clause(self, select): if select.for_update == 'nowait': return " FOR UPDATE NOWAIT" elif select.for_update == 'read': return " FOR SHARE" elif select.for_update == 'read_nowait': return " FOR SHARE NOWAIT" else: return super(PGCompiler, self).for_update_clause(select) def returning_clause(self, stmt, returning_cols): columns = [ self._label_select_column(None, c, True, False, {}) for c in expression._select_iterables(returning_cols) ] return 'RETURNING ' + ', '.join(columns) def visit_substring_func(self, func, **kw): s = self.process(func.clauses.clauses[0], **kw) start = self.process(func.clauses.clauses[1], **kw) if len(func.clauses.clauses) > 2: length = self.process(func.clauses.clauses[2], **kw) return "SUBSTRING(%s FROM %s FOR %s)" % (s, start, length) else: return "SUBSTRING(%s FROM %s)" % (s, start) class PGDDLCompiler(compiler.DDLCompiler): def get_column_specification(self, column, **kwargs): colspec = self.preparer.format_column(column) impl_type = column.type.dialect_impl(self.dialect) if column.primary_key and \ column is column.table._autoincrement_column and \ not isinstance(impl_type, sqltypes.SmallInteger) and \ ( column.default is None or ( isinstance(column.default, schema.Sequence) and column.default.optional )): if isinstance(impl_type, sqltypes.BigInteger): colspec += " BIGSERIAL" else: colspec += " SERIAL" else: colspec += " " + self.dialect.type_compiler.process(column.type) default = self.get_column_default_string(column) if default is not None: colspec += " DEFAULT " + default if not column.nullable: colspec += " NOT NULL" return colspec def visit_create_enum_type(self, create): type_ = create.element return "CREATE TYPE %s AS ENUM (%s)" % ( self.preparer.format_type(type_), ",".join("'%s'" % e for e in type_.enums) ) def visit_drop_enum_type(self, drop): type_ = drop.element return "DROP TYPE %s" % ( self.preparer.format_type(type_) ) def visit_create_index(self, create): preparer = self.preparer index = create.element self._verify_index_table(index) text = "CREATE " if index.unique: text += "UNIQUE " text += "INDEX %s ON %s " % ( self._prepared_index_name(index, include_schema=False), preparer.format_table(index.table) ) if 'postgresql_using' in index.kwargs: using = index.kwargs['postgresql_using'] text += "USING %s " % preparer.quote(using, index.quote) ops = index.kwargs.get('postgresql_ops', {}) text += "(%s)" \ % ( ', '.join([ self.sql_compiler.process( expr.self_group() if not isinstance(expr, expression.ColumnClause) else expr, include_table=False, literal_binds=True) + (c.key in ops and (' ' + ops[c.key]) or '') for expr, c in zip(index.expressions, index.columns)]) ) if 'postgresql_where' in index.kwargs: whereclause = index.kwargs['postgresql_where'] else: whereclause = None if whereclause is not None: where_compiled = self.sql_compiler.process( whereclause, include_table=False, literal_binds=True) text += " WHERE " + where_compiled return text def visit_exclude_constraint(self, constraint): text = "" if constraint.name is not None: text += "CONSTRAINT %s " % \ self.preparer.format_constraint(constraint) elements = [] for c in constraint.columns: op = constraint.operators[c.name] elements.append(self.preparer.quote(c.name, c.quote)+' WITH '+op) text += "EXCLUDE USING %s (%s)" % (constraint.using, ', '.join(elements)) if constraint.where is not None: text += ' WHERE (%s)' % self.sql_compiler.process( constraint.where, literal_binds=True) text += self.define_constraint_deferrability(constraint) return text class PGTypeCompiler(compiler.GenericTypeCompiler): def visit_INET(self, type_): return "INET" def visit_CIDR(self, type_): return "CIDR" def visit_MACADDR(self, type_): return "MACADDR" def visit_FLOAT(self, type_): if not type_.precision: return "FLOAT" else: return "FLOAT(%(precision)s)" % {'precision': type_.precision} def visit_DOUBLE_PRECISION(self, type_): return "DOUBLE PRECISION" def visit_BIGINT(self, type_): return "BIGINT" def visit_HSTORE(self, type_): return "HSTORE" def visit_INT4RANGE(self, type_): return "INT4RANGE" def visit_INT8RANGE(self, type_): return "INT8RANGE" def visit_NUMRANGE(self, type_): return "NUMRANGE" def visit_DATERANGE(self, type_): return "DATERANGE" def visit_TSRANGE(self, type_): return "TSRANGE" def visit_TSTZRANGE(self, type_): return "TSTZRANGE" def visit_datetime(self, type_): return self.visit_TIMESTAMP(type_) def visit_enum(self, type_): if not type_.native_enum or not self.dialect.supports_native_enum: return super(PGTypeCompiler, self).visit_enum(type_) else: return self.visit_ENUM(type_) def visit_ENUM(self, type_): return self.dialect.identifier_preparer.format_type(type_) def visit_TIMESTAMP(self, type_): return "TIMESTAMP%s %s" % ( getattr(type_, 'precision', None) and "(%d)" % type_.precision or "", (type_.timezone and "WITH" or "WITHOUT") + " TIME ZONE" ) def visit_TIME(self, type_): return "TIME%s %s" % ( getattr(type_, 'precision', None) and "(%d)" % type_.precision or "", (type_.timezone and "WITH" or "WITHOUT") + " TIME ZONE" ) def visit_INTERVAL(self, type_): if type_.precision is not None: return "INTERVAL(%d)" % type_.precision else: return "INTERVAL" def visit_BIT(self, type_): if type_.varying: compiled = "BIT VARYING" if type_.length is not None: compiled += "(%d)" % type_.length else: compiled = "BIT(%d)" % type_.length return compiled def visit_UUID(self, type_): return "UUID" def visit_large_binary(self, type_): return self.visit_BYTEA(type_) def visit_BYTEA(self, type_): return "BYTEA" def visit_ARRAY(self, type_): return self.process(type_.item_type) + ('[]' * (type_.dimensions if type_.dimensions is not None else 1)) class PGIdentifierPreparer(compiler.IdentifierPreparer): reserved_words = RESERVED_WORDS def _unquote_identifier(self, value): if value[0] == self.initial_quote: value = value[1:-1].\ replace(self.escape_to_quote, self.escape_quote) return value def format_type(self, type_, use_schema=True): if not type_.name: raise exc.CompileError("Postgresql ENUM type requires a name.") name = self.quote(type_.name, type_.quote) if not self.omit_schema and use_schema and type_.schema is not None: name = self.quote_schema(type_.schema, type_.quote) + "." + name return name class PGInspector(reflection.Inspector): def __init__(self, conn): reflection.Inspector.__init__(self, conn) def get_table_oid(self, table_name, schema=None): """Return the oid from `table_name` and `schema`.""" return self.dialect.get_table_oid(self.bind, table_name, schema, info_cache=self.info_cache) class CreateEnumType(schema._CreateDropBase): __visit_name__ = "create_enum_type" class DropEnumType(schema._CreateDropBase): __visit_name__ = "drop_enum_type" class PGExecutionContext(default.DefaultExecutionContext): def fire_sequence(self, seq, type_): return self._execute_scalar(("select nextval('%s')" % \ self.dialect.identifier_preparer.format_sequence(seq)), type_) def get_insert_default(self, column): if column.primary_key and column is column.table._autoincrement_column: if column.server_default and column.server_default.has_argument: # pre-execute passive defaults on primary key columns return self._execute_scalar("select %s" % column.server_default.arg, column.type) elif (column.default is None or (column.default.is_sequence and column.default.optional)): # execute the sequence associated with a SERIAL primary # key column. for non-primary-key SERIAL, the ID just # generates server side. try: seq_name = column._postgresql_seq_name except AttributeError: tab = column.table.name col = column.name tab = tab[0:29 + max(0, (29 - len(col)))] col = col[0:29 + max(0, (29 - len(tab)))] name = "%s_%s_seq" % (tab, col) column._postgresql_seq_name = seq_name = name sch = column.table.schema if sch is not None: exc = "select nextval('\"%s\".\"%s\"')" % \ (sch, seq_name) else: exc = "select nextval('\"%s\"')" % \ (seq_name, ) return self._execute_scalar(exc, column.type) return super(PGExecutionContext, self).get_insert_default(column) class PGDialect(default.DefaultDialect): name = 'postgresql' supports_alter = True max_identifier_length = 63 supports_sane_rowcount = True supports_native_enum = True supports_native_boolean = True supports_sequences = True sequences_optional = True preexecute_autoincrement_sequences = True postfetch_lastrowid = False supports_default_values = True supports_empty_insert = False supports_multivalues_insert = True default_paramstyle = 'pyformat' ischema_names = ischema_names colspecs = colspecs statement_compiler = PGCompiler ddl_compiler = PGDDLCompiler type_compiler = PGTypeCompiler preparer = PGIdentifierPreparer execution_ctx_cls = PGExecutionContext inspector = PGInspector isolation_level = None # TODO: need to inspect "standard_conforming_strings" _backslash_escapes = True def __init__(self, isolation_level=None, **kwargs): default.DefaultDialect.__init__(self, **kwargs) self.isolation_level = isolation_level def initialize(self, connection): super(PGDialect, self).initialize(connection) self.implicit_returning = self.server_version_info > (8, 2) and \ self.__dict__.get('implicit_returning', True) self.supports_native_enum = self.server_version_info >= (8, 3) if not self.supports_native_enum: self.colspecs = self.colspecs.copy() # pop base Enum type self.colspecs.pop(sqltypes.Enum, None) # psycopg2, others may have placed ENUM here as well self.colspecs.pop(ENUM, None) def on_connect(self): if self.isolation_level is not None: def connect(conn): self.set_isolation_level(conn, self.isolation_level) return connect else: return None _isolation_lookup = set(['SERIALIZABLE', 'READ UNCOMMITTED', 'READ COMMITTED', 'REPEATABLE READ']) def set_isolation_level(self, connection, level): level = level.replace('_', ' ') if level not in self._isolation_lookup: raise exc.ArgumentError( "Invalid value '%s' for isolation_level. " "Valid isolation levels for %s are %s" % (level, self.name, ", ".join(self._isolation_lookup)) ) cursor = connection.cursor() cursor.execute( "SET SESSION CHARACTERISTICS AS TRANSACTION " "ISOLATION LEVEL %s" % level) cursor.execute("COMMIT") cursor.close() def get_isolation_level(self, connection): cursor = connection.cursor() cursor.execute('show transaction isolation level') val = cursor.fetchone()[0] cursor.close() return val.upper() def do_begin_twophase(self, connection, xid): self.do_begin(connection.connection) def do_prepare_twophase(self, connection, xid): connection.execute("PREPARE TRANSACTION '%s'" % xid) def do_rollback_twophase(self, connection, xid, is_prepared=True, recover=False): if is_prepared: if recover: #FIXME: ugly hack to get out of transaction # context when committing recoverable transactions # Must find out a way how to make the dbapi not # open a transaction. connection.execute("ROLLBACK") connection.execute("ROLLBACK PREPARED '%s'" % xid) connection.execute("BEGIN") self.do_rollback(connection.connection) else: self.do_rollback(connection.connection) def do_commit_twophase(self, connection, xid, is_prepared=True, recover=False): if is_prepared: if recover: connection.execute("ROLLBACK") connection.execute("COMMIT PREPARED '%s'" % xid) connection.execute("BEGIN") self.do_rollback(connection.connection) else: self.do_commit(connection.connection) def do_recover_twophase(self, connection): resultset = connection.execute( sql.text("SELECT gid FROM pg_prepared_xacts")) return [row[0] for row in resultset] def _get_default_schema_name(self, connection): return connection.scalar("select current_schema()") def has_schema(self, connection, schema): query = "select nspname from pg_namespace where lower(nspname)=:schema" cursor = connection.execute( sql.text( query, bindparams=[ sql.bindparam( 'schema', unicode(schema.lower()), type_=sqltypes.Unicode)] ) ) return bool(cursor.first()) def has_table(self, connection, table_name, schema=None): # seems like case gets folded in pg_class... if schema is None: cursor = connection.execute( sql.text( "select relname from pg_class c join pg_namespace n on " "n.oid=c.relnamespace where n.nspname=current_schema() and " "relname=:name", bindparams=[ sql.bindparam('name', unicode(table_name), type_=sqltypes.Unicode)] ) ) else: cursor = connection.execute( sql.text( "select relname from pg_class c join pg_namespace n on " "n.oid=c.relnamespace where n.nspname=:schema and " "relname=:name", bindparams=[ sql.bindparam('name', unicode(table_name), type_=sqltypes.Unicode), sql.bindparam('schema', unicode(schema), type_=sqltypes.Unicode)] ) ) return bool(cursor.first()) def has_sequence(self, connection, sequence_name, schema=None): if schema is None: cursor = connection.execute( sql.text( "SELECT relname FROM pg_class c join pg_namespace n on " "n.oid=c.relnamespace where relkind='S' and " "n.nspname=current_schema() " "and relname=:name", bindparams=[ sql.bindparam('name', unicode(sequence_name), type_=sqltypes.Unicode) ] ) ) else: cursor = connection.execute( sql.text( "SELECT relname FROM pg_class c join pg_namespace n on " "n.oid=c.relnamespace where relkind='S' and " "n.nspname=:schema and relname=:name", bindparams=[ sql.bindparam('name', unicode(sequence_name), type_=sqltypes.Unicode), sql.bindparam('schema', unicode(schema), type_=sqltypes.Unicode) ] ) ) return bool(cursor.first()) def has_type(self, connection, type_name, schema=None): bindparams = [ sql.bindparam('typname', unicode(type_name), type_=sqltypes.Unicode), sql.bindparam('nspname', unicode(schema), type_=sqltypes.Unicode), ] if schema is not None: query = """ SELECT EXISTS ( SELECT * FROM pg_catalog.pg_type t, pg_catalog.pg_namespace n WHERE t.typnamespace = n.oid AND t.typname = :typname AND n.nspname = :nspname ) """ else: query = """ SELECT EXISTS ( SELECT * FROM pg_catalog.pg_type t WHERE t.typname = :typname AND pg_type_is_visible(t.oid) ) """ cursor = connection.execute(sql.text(query, bindparams=bindparams)) return bool(cursor.scalar()) def _get_server_version_info(self, connection): v = connection.execute("select version()").scalar() m = re.match( '.*(?:PostgreSQL|EnterpriseDB) ' '(\d+)\.(\d+)(?:\.(\d+))?(?:\.\d+)?(?:devel)?', v) if not m: raise AssertionError( "Could not determine version from string '%s'" % v) return tuple([int(x) for x in m.group(1, 2, 3) if x is not None]) @reflection.cache def get_table_oid(self, connection, table_name, schema=None, **kw): """Fetch the oid for schema.table_name. Several reflection methods require the table oid. The idea for using this method is that it can be fetched one time and cached for subsequent calls. """ table_oid = None if schema is not None: schema_where_clause = "n.nspname = :schema" else: schema_where_clause = "pg_catalog.pg_table_is_visible(c.oid)" query = """ SELECT c.oid FROM pg_catalog.pg_class c LEFT JOIN pg_catalog.pg_namespace n ON n.oid = c.relnamespace WHERE (%s) AND c.relname = :table_name AND c.relkind in ('r','v') """ % schema_where_clause # Since we're binding to unicode, table_name and schema_name must be # unicode. table_name = unicode(table_name) if schema is not None: schema = unicode(schema) s = sql.text(query, bindparams=[ sql.bindparam('table_name', type_=sqltypes.Unicode), sql.bindparam('schema', type_=sqltypes.Unicode) ], typemap={'oid': sqltypes.Integer} ) c = connection.execute(s, table_name=table_name, schema=schema) table_oid = c.scalar() if table_oid is None: raise exc.NoSuchTableError(table_name) return table_oid @reflection.cache def get_schema_names(self, connection, **kw): s = """ SELECT nspname FROM pg_namespace ORDER BY nspname """ rp = connection.execute(s) # what about system tables? # Py3K #schema_names = [row[0] for row in rp \ # if not row[0].startswith('pg_')] # Py2K schema_names = [row[0].decode(self.encoding) for row in rp \ if not row[0].startswith('pg_')] # end Py2K return schema_names @reflection.cache def get_table_names(self, connection, schema=None, **kw): if schema is not None: current_schema = schema else: current_schema = self.default_schema_name result = connection.execute( sql.text(u"SELECT relname FROM pg_class c " "WHERE relkind = 'r' " "AND '%s' = (select nspname from pg_namespace n " "where n.oid = c.relnamespace) " % current_schema, typemap={'relname': sqltypes.Unicode} ) ) return [row[0] for row in result] @reflection.cache def get_view_names(self, connection, schema=None, **kw): if schema is not None: current_schema = schema else: current_schema = self.default_schema_name s = """ SELECT relname FROM pg_class c WHERE relkind = 'v' AND '%(schema)s' = (select nspname from pg_namespace n where n.oid = c.relnamespace) """ % dict(schema=current_schema) # Py3K #view_names = [row[0] for row in connection.execute(s)] # Py2K view_names = [row[0].decode(self.encoding) for row in connection.execute(s)] # end Py2K return view_names @reflection.cache def get_view_definition(self, connection, view_name, schema=None, **kw): if schema is not None: current_schema = schema else: current_schema = self.default_schema_name s = """ SELECT definition FROM pg_views WHERE schemaname = :schema AND viewname = :view_name """ rp = connection.execute(sql.text(s), view_name=view_name, schema=current_schema) if rp: # Py3K #view_def = rp.scalar() # Py2K view_def = rp.scalar().decode(self.encoding) # end Py2K return view_def @reflection.cache def get_columns(self, connection, table_name, schema=None, **kw): table_oid = self.get_table_oid(connection, table_name, schema, info_cache=kw.get('info_cache')) SQL_COLS = """ SELECT a.attname, pg_catalog.format_type(a.atttypid, a.atttypmod), (SELECT pg_catalog.pg_get_expr(d.adbin, d.adrelid) FROM pg_catalog.pg_attrdef d WHERE d.adrelid = a.attrelid AND d.adnum = a.attnum AND a.atthasdef) AS DEFAULT, a.attnotnull, a.attnum, a.attrelid as table_oid FROM pg_catalog.pg_attribute a WHERE a.attrelid = :table_oid AND a.attnum > 0 AND NOT a.attisdropped ORDER BY a.attnum """ s = sql.text(SQL_COLS, bindparams=[sql.bindparam('table_oid', type_=sqltypes.Integer)], typemap={'attname': sqltypes.Unicode, 'default': sqltypes.Unicode} ) c = connection.execute(s, table_oid=table_oid) rows = c.fetchall() domains = self._load_domains(connection) enums = self._load_enums(connection) # format columns columns = [] for name, format_type, default, notnull, attnum, table_oid in rows: column_info = self._get_column_info( name, format_type, default, notnull, domains, enums, schema) columns.append(column_info) return columns def _get_column_info(self, name, format_type, default, notnull, domains, enums, schema): ## strip (*) from character varying(5), timestamp(5) # with time zone, geometry(POLYGON), etc. attype = re.sub(r'\(.*\)', '', format_type) # strip '[]' from integer[], etc. attype = re.sub(r'\[\]', '', attype) nullable = not notnull is_array = format_type.endswith('[]') charlen = re.search('\(([\d,]+)\)', format_type) if charlen: charlen = charlen.group(1) args = re.search('\((.*)\)', format_type) if args and args.group(1): args = tuple(re.split('\s*,\s*', args.group(1))) else: args = () kwargs = {} if attype == 'numeric': if charlen: prec, scale = charlen.split(',') args = (int(prec), int(scale)) else: args = () elif attype == 'double precision': args = (53, ) elif attype == 'integer': args = () elif attype in ('timestamp with time zone', 'time with time zone'): kwargs['timezone'] = True if charlen: kwargs['precision'] = int(charlen) args = () elif attype in ('timestamp without time zone', 'time without time zone', 'time'): kwargs['timezone'] = False if charlen: kwargs['precision'] = int(charlen) args = () elif attype == 'bit varying': kwargs['varying'] = True if charlen: args = (int(charlen),) else: args = () elif attype in ('interval', 'interval year to month', 'interval day to second'): if charlen: kwargs['precision'] = int(charlen) args = () elif charlen: args = (int(charlen),) while True: if attype in self.ischema_names: coltype = self.ischema_names[attype] break elif attype in enums: enum = enums[attype] coltype = ENUM if "." in attype: kwargs['schema'], kwargs['name'] = attype.split('.') else: kwargs['name'] = attype args = tuple(enum['labels']) break elif attype in domains: domain = domains[attype] attype = domain['attype'] # A table can't override whether the domain is nullable. nullable = domain['nullable'] if domain['default'] and not default: # It can, however, override the default # value, but can't set it to null. default = domain['default'] continue else: coltype = None break if coltype: coltype = coltype(*args, **kwargs) if is_array: coltype = ARRAY(coltype) else: util.warn("Did not recognize type '%s' of column '%s'" % (attype, name)) coltype = sqltypes.NULLTYPE # adjust the default value autoincrement = False if default is not None: match = re.search(r"""(nextval\(')([^']+)('.*$)""", default) if match is not None: autoincrement = True # the default is related to a Sequence sch = schema if '.' not in match.group(2) and sch is not None: # unconditionally quote the schema name. this could # later be enhanced to obey quoting rules / # "quote schema" default = match.group(1) + \ ('"%s"' % sch) + '.' + \ match.group(2) + match.group(3) column_info = dict(name=name, type=coltype, nullable=nullable, default=default, autoincrement=autoincrement) return column_info @reflection.cache def get_pk_constraint(self, connection, table_name, schema=None, **kw): table_oid = self.get_table_oid(connection, table_name, schema, info_cache=kw.get('info_cache')) if self.server_version_info < (8, 4): # unnest() and generate_subscripts() both introduced in # version 8.4 PK_SQL = """ SELECT a.attname FROM pg_class t join pg_index ix on t.oid = ix.indrelid join pg_attribute a on t.oid=a.attrelid and a.attnum=ANY(ix.indkey) WHERE t.oid = :table_oid and ix.indisprimary = 't' ORDER BY a.attnum """ else: PK_SQL = """ SELECT a.attname FROM pg_attribute a JOIN ( SELECT unnest(ix.indkey) attnum, generate_subscripts(ix.indkey, 1) ord FROM pg_index ix WHERE ix.indrelid = :table_oid AND ix.indisprimary ) k ON a.attnum=k.attnum WHERE a.attrelid = :table_oid ORDER BY k.ord """ t = sql.text(PK_SQL, typemap={'attname': sqltypes.Unicode}) c = connection.execute(t, table_oid=table_oid) cols = [r[0] for r in c.fetchall()] PK_CONS_SQL = """ SELECT conname FROM pg_catalog.pg_constraint r WHERE r.conrelid = :table_oid AND r.contype = 'p' ORDER BY 1 """ t = sql.text(PK_CONS_SQL, typemap={'conname': sqltypes.Unicode}) c = connection.execute(t, table_oid=table_oid) name = c.scalar() return {'constrained_columns': cols, 'name': name} @reflection.cache def get_foreign_keys(self, connection, table_name, schema=None, **kw): preparer = self.identifier_preparer table_oid = self.get_table_oid(connection, table_name, schema, info_cache=kw.get('info_cache')) FK_SQL = """ SELECT r.conname, pg_catalog.pg_get_constraintdef(r.oid, true) as condef, n.nspname as conschema FROM pg_catalog.pg_constraint r, pg_namespace n, pg_class c WHERE r.conrelid = :table AND r.contype = 'f' AND c.oid = confrelid AND n.oid = c.relnamespace ORDER BY 1 """ t = sql.text(FK_SQL, typemap={ 'conname': sqltypes.Unicode, 'condef': sqltypes.Unicode}) c = connection.execute(t, table=table_oid) fkeys = [] for conname, condef, conschema in c.fetchall(): m = re.search('FOREIGN KEY \((.*?)\) REFERENCES ' '(?:(.*?)\.)?(.*?)\((.*?)\)', condef).groups() constrained_columns, referred_schema, \ referred_table, referred_columns = m constrained_columns = [preparer._unquote_identifier(x) for x in re.split(r'\s*,\s*', constrained_columns)] if referred_schema: referred_schema =\ preparer._unquote_identifier(referred_schema) elif schema is not None and schema == conschema: # no schema was returned by pg_get_constraintdef(). This # means the schema is in the search path. We will leave # it as None, unless the actual schema, which we pull out # from pg_namespace even though pg_get_constraintdef() doesn't # want to give it to us, matches that of the referencing table, # and an explicit schema was given for the referencing table. referred_schema = schema referred_table = preparer._unquote_identifier(referred_table) referred_columns = [preparer._unquote_identifier(x) for x in re.split(r'\s*,\s', referred_columns)] fkey_d = { 'name': conname, 'constrained_columns': constrained_columns, 'referred_schema': referred_schema, 'referred_table': referred_table, 'referred_columns': referred_columns } fkeys.append(fkey_d) return fkeys @reflection.cache def get_indexes(self, connection, table_name, schema, **kw): table_oid = self.get_table_oid(connection, table_name, schema, info_cache=kw.get('info_cache')) # cast indkey as varchar since it's an int2vector, # returned as a list by some drivers such as pypostgresql IDX_SQL = """ SELECT i.relname as relname, ix.indisunique, ix.indexprs, ix.indpred, a.attname, a.attnum, ix.indkey::varchar FROM pg_class t join pg_index ix on t.oid = ix.indrelid join pg_class i on i.oid=ix.indexrelid left outer join pg_attribute a on t.oid=a.attrelid and a.attnum=ANY(ix.indkey) WHERE t.relkind = 'r' and t.oid = :table_oid and ix.indisprimary = 'f' ORDER BY t.relname, i.relname """ t = sql.text(IDX_SQL, typemap={'attname': sqltypes.Unicode}) c = connection.execute(t, table_oid=table_oid) indexes = defaultdict(lambda: defaultdict(dict)) sv_idx_name = None for row in c.fetchall(): idx_name, unique, expr, prd, col, col_num, idx_key = row if expr: if idx_name != sv_idx_name: util.warn( "Skipped unsupported reflection of " "expression-based index %s" % idx_name) sv_idx_name = idx_name continue if prd and not idx_name == sv_idx_name: util.warn( "Predicate of partial index %s ignored during reflection" % idx_name) sv_idx_name = idx_name index = indexes[idx_name] if col is not None: index['cols'][col_num] = col index['key'] = [int(k.strip()) for k in idx_key.split()] index['unique'] = unique return [ {'name': name, 'unique': idx['unique'], 'column_names': [idx['cols'][i] for i in idx['key']]} for name, idx in indexes.items() ] @reflection.cache def get_unique_constraints(self, connection, table_name, schema=None, **kw): table_oid = self.get_table_oid(connection, table_name, schema, info_cache=kw.get('info_cache')) UNIQUE_SQL = """ SELECT cons.conname as name, ARRAY_AGG(a.attname) as column_names FROM pg_catalog.pg_constraint cons left outer join pg_attribute a on cons.conrelid = a.attrelid and a.attnum = ANY(cons.conkey) WHERE cons.conrelid = :table_oid AND cons.contype = 'u' GROUP BY cons.conname """ t = sql.text(UNIQUE_SQL, typemap={'column_names': ARRAY(sqltypes.Unicode)}) c = connection.execute(t, table_oid=table_oid) return [ {'name': row.name, 'column_names': row.column_names} for row in c.fetchall() ] def _load_enums(self, connection): if not self.supports_native_enum: return {} ## Load data types for enums: SQL_ENUMS = """ SELECT t.typname as "name", -- no enum defaults in 8.4 at least -- t.typdefault as "default", pg_catalog.pg_type_is_visible(t.oid) as "visible", n.nspname as "schema", e.enumlabel as "label" FROM pg_catalog.pg_type t LEFT JOIN pg_catalog.pg_namespace n ON n.oid = t.typnamespace LEFT JOIN pg_catalog.pg_enum e ON t.oid = e.enumtypid WHERE t.typtype = 'e' ORDER BY "name", e.oid -- e.oid gives us label order """ s = sql.text(SQL_ENUMS, typemap={ 'attname': sqltypes.Unicode, 'label': sqltypes.Unicode}) c = connection.execute(s) enums = {} for enum in c.fetchall(): if enum['visible']: # 'visible' just means whether or not the enum is in a # schema that's on the search path -- or not overridden by # a schema with higher precedence. If it's not visible, # it will be prefixed with the schema-name when it's used. name = enum['name'] else: name = "%s.%s" % (enum['schema'], enum['name']) if name in enums: enums[name]['labels'].append(enum['label']) else: enums[name] = { 'labels': [enum['label']], } return enums def _load_domains(self, connection): ## Load data types for domains: SQL_DOMAINS = """ SELECT t.typname as "name", pg_catalog.format_type(t.typbasetype, t.typtypmod) as "attype", not t.typnotnull as "nullable", t.typdefault as "default", pg_catalog.pg_type_is_visible(t.oid) as "visible", n.nspname as "schema" FROM pg_catalog.pg_type t LEFT JOIN pg_catalog.pg_namespace n ON n.oid = t.typnamespace WHERE t.typtype = 'd' """ s = sql.text(SQL_DOMAINS, typemap={'attname': sqltypes.Unicode}) c = connection.execute(s) domains = {} for domain in c.fetchall(): ## strip (30) from character varying(30) attype = re.search('([^\(]+)', domain['attype']).group(1) if domain['visible']: # 'visible' just means whether or not the domain is in a # schema that's on the search path -- or not overridden by # a schema with higher precedence. If it's not visible, # it will be prefixed with the schema-name when it's used. name = domain['name'] else: name = "%s.%s" % (domain['schema'], domain['name']) domains[name] = { 'attype': attype, 'nullable': domain['nullable'], 'default': domain['default'] } return domains SQLAlchemy-0.8.4/lib/sqlalchemy/dialects/postgresql/constraints.py0000644000076500000240000000475612251150015026061 0ustar classicstaff00000000000000# Copyright (C) 2013 the SQLAlchemy authors and contributors # # This module is part of SQLAlchemy and is released under # the MIT License: http://www.opensource.org/licenses/mit-license.php from sqlalchemy.schema import ColumnCollectionConstraint from sqlalchemy.sql import expression class ExcludeConstraint(ColumnCollectionConstraint): """A table-level EXCLUDE constraint. Defines an EXCLUDE constraint as described in the `postgres documentation`__. __ http://www.postgresql.org/docs/9.0/static/sql-createtable.html#SQL-CREATETABLE-EXCLUDE """ __visit_name__ = 'exclude_constraint' where = None def __init__(self, *elements, **kw): """ :param \*elements: A sequence of two tuples of the form ``(column, operator)`` where column must be a column name or Column object and operator must be a string containing the operator to use. :param name: Optional, the in-database name of this constraint. :param deferrable: Optional bool. If set, emit DEFERRABLE or NOT DEFERRABLE when issuing DDL for this constraint. :param initially: Optional string. If set, emit INITIALLY when issuing DDL for this constraint. :param using: Optional string. If set, emit USING when issuing DDL for this constraint. Defaults to 'gist'. :param where: Optional string. If set, emit WHERE when issuing DDL for this constraint. """ ColumnCollectionConstraint.__init__( self, name=kw.get('name'), deferrable=kw.get('deferrable'), initially=kw.get('initially'), *[col for col, op in elements] ) self.operators = {} for col_or_string, op in elements: name = getattr(col_or_string, 'name', col_or_string) self.operators[name] = op self.using = kw.get('using', 'gist') where = kw.get('where') if where: self.where = expression._literal_as_text(where) def copy(self, **kw): elements = [(col, self.operators[col]) for col in self.columns.keys()] c = self.__class__(name=self.name, deferrable=self.deferrable, initially=self.initially, *elements) c.dispatch._update(self.dispatch) return c SQLAlchemy-0.8.4/lib/sqlalchemy/dialects/postgresql/hstore.py0000644000076500000240000002611512251150015025007 0ustar classicstaff00000000000000# postgresql/hstore.py # Copyright (C) 2005-2013 the SQLAlchemy authors and contributors # # This module is part of SQLAlchemy and is released under # the MIT License: http://www.opensource.org/licenses/mit-license.php import re from .base import ARRAY, ischema_names from ... import types as sqltypes from ...sql import functions as sqlfunc from ...sql.operators import custom_op from ... import util __all__ = ('HSTORE', 'hstore') # My best guess at the parsing rules of hstore literals, since no formal # grammar is given. This is mostly reverse engineered from PG's input parser # behavior. HSTORE_PAIR_RE = re.compile(r""" ( "(?P (\\ . | [^"])* )" # Quoted key ) [ ]* => [ ]* # Pair operator, optional adjoining whitespace ( (?P NULL ) # NULL value | "(?P (\\ . | [^"])* )" # Quoted value ) """, re.VERBOSE) HSTORE_DELIMITER_RE = re.compile(r""" [ ]* , [ ]* """, re.VERBOSE) def _parse_error(hstore_str, pos): """format an unmarshalling error.""" ctx = 20 hslen = len(hstore_str) parsed_tail = hstore_str[max(pos - ctx - 1, 0):min(pos, hslen)] residual = hstore_str[min(pos, hslen):min(pos + ctx + 1, hslen)] if len(parsed_tail) > ctx: parsed_tail = '[...]' + parsed_tail[1:] if len(residual) > ctx: residual = residual[:-1] + '[...]' return "After %r, could not parse residual at position %d: %r" % ( parsed_tail, pos, residual) def _parse_hstore(hstore_str): """Parse an hstore from it's literal string representation. Attempts to approximate PG's hstore input parsing rules as closely as possible. Although currently this is not strictly necessary, since the current implementation of hstore's output syntax is stricter than what it accepts as input, the documentation makes no guarantees that will always be the case. """ result = {} pos = 0 pair_match = HSTORE_PAIR_RE.match(hstore_str) while pair_match is not None: key = pair_match.group('key').replace(r'\"', '"').replace("\\\\", "\\") if pair_match.group('value_null'): value = None else: value = pair_match.group('value').replace(r'\"', '"').replace("\\\\", "\\") result[key] = value pos += pair_match.end() delim_match = HSTORE_DELIMITER_RE.match(hstore_str[pos:]) if delim_match is not None: pos += delim_match.end() pair_match = HSTORE_PAIR_RE.match(hstore_str[pos:]) if pos != len(hstore_str): raise ValueError(_parse_error(hstore_str, pos)) return result def _serialize_hstore(val): """Serialize a dictionary into an hstore literal. Keys and values must both be strings (except None for values). """ def esc(s, position): if position == 'value' and s is None: return 'NULL' elif isinstance(s, basestring): return '"%s"' % s.replace("\\", "\\\\").replace('"', r'\"') else: raise ValueError("%r in %s position is not a string." % (s, position)) return ', '.join('%s=>%s' % (esc(k, 'key'), esc(v, 'value')) for k, v in val.iteritems()) class HSTORE(sqltypes.Concatenable, sqltypes.TypeEngine): """Represent the Postgresql HSTORE type. The :class:`.HSTORE` type stores dictionaries containing strings, e.g.:: data_table = Table('data_table', metadata, Column('id', Integer, primary_key=True), Column('data', HSTORE) ) with engine.connect() as conn: conn.execute( data_table.insert(), data = {"key1": "value1", "key2": "value2"} ) :class:`.HSTORE` provides for a wide range of operations, including: * Index operations:: data_table.c.data['some key'] == 'some value' * Containment operations:: data_table.c.data.has_key('some key') data_table.c.data.has_all(['one', 'two', 'three']) * Concatenation:: data_table.c.data + {"k1": "v1"} For a full list of special methods see :class:`.HSTORE.comparator_factory`. For usage with the SQLAlchemy ORM, it may be desirable to combine the usage of :class:`.HSTORE` with :class:`.MutableDict` dictionary now part of the :mod:`sqlalchemy.ext.mutable` extension. This extension will allow "in-place" changes to the dictionary, e.g. addition of new keys or replacement/removal of existing keys to/from the current dictionary, to produce events which will be detected by the unit of work:: from sqlalchemy.ext.mutable import MutableDict class MyClass(Base): __tablename__ = 'data_table' id = Column(Integer, primary_key=True) data = Column(MutableDict.as_mutable(HSTORE)) my_object = session.query(MyClass).one() # in-place mutation, requires Mutable extension # in order for the ORM to detect my_object.data['some_key'] = 'some value' session.commit() When the :mod:`sqlalchemy.ext.mutable` extension is not used, the ORM will not be alerted to any changes to the contents of an existing dictionary, unless that dictionary value is re-assigned to the HSTORE-attribute itself, thus generating a change event. .. versionadded:: 0.8 .. seealso:: :class:`.hstore` - render the Postgresql ``hstore()`` function. """ __visit_name__ = 'HSTORE' class comparator_factory(sqltypes.Concatenable.Comparator): """Define comparison operations for :class:`.HSTORE`.""" def has_key(self, other): """Boolean expression. Test for presence of a key. Note that the key may be a SQLA expression. """ return self.expr.op('?')(other) def has_all(self, other): """Boolean expression. Test for presence of all keys in the PG array. """ return self.expr.op('?&')(other) def has_any(self, other): """Boolean expression. Test for presence of any key in the PG array. """ return self.expr.op('?|')(other) def defined(self, key): """Boolean expression. Test for presence of a non-NULL value for the key. Note that the key may be a SQLA expression. """ return _HStoreDefinedFunction(self.expr, key) def contains(self, other, **kwargs): """Boolean expression. Test if keys are a superset of the keys of the argument hstore expression. """ return self.expr.op('@>')(other) def contained_by(self, other): """Boolean expression. Test if keys are a proper subset of the keys of the argument hstore expression. """ return self.expr.op('<@')(other) def __getitem__(self, other): """Text expression. Get the value at a given key. Note that the key may be a SQLA expression. """ return self.expr.op('->', precedence=5)(other) def delete(self, key): """HStore expression. Returns the contents of this hstore with the given key deleted. Note that the key may be a SQLA expression. """ if isinstance(key, dict): key = _serialize_hstore(key) return _HStoreDeleteFunction(self.expr, key) def slice(self, array): """HStore expression. Returns a subset of an hstore defined by array of keys. """ return _HStoreSliceFunction(self.expr, array) def keys(self): """Text array expression. Returns array of keys.""" return _HStoreKeysFunction(self.expr) def vals(self): """Text array expression. Returns array of values.""" return _HStoreValsFunction(self.expr) def array(self): """Text array expression. Returns array of alternating keys and values. """ return _HStoreArrayFunction(self.expr) def matrix(self): """Text array expression. Returns array of [key, value] pairs.""" return _HStoreMatrixFunction(self.expr) def _adapt_expression(self, op, other_comparator): if isinstance(op, custom_op): if op.opstring in ['?', '?&', '?|', '@>', '<@']: return op, sqltypes.Boolean elif op.opstring == '->': return op, sqltypes.Text return sqltypes.Concatenable.Comparator.\ _adapt_expression(self, op, other_comparator) def bind_processor(self, dialect): if util.py2k: encoding = dialect.encoding def process(value): if isinstance(value, dict): return _serialize_hstore(value).encode(encoding) else: return value else: def process(value): if isinstance(value, dict): return _serialize_hstore(value) else: return value return process def result_processor(self, dialect, coltype): if util.py2k: encoding = dialect.encoding def process(value): if value is not None: return _parse_hstore(value.decode(encoding)) else: return value else: def process(value): if value is not None: return _parse_hstore(value) else: return value return process ischema_names['hstore'] = HSTORE class hstore(sqlfunc.GenericFunction): """Construct an hstore value within a SQL expression using the Postgresql ``hstore()`` function. The :class:`.hstore` function accepts one or two arguments as described in the Postgresql documentation. E.g.:: from sqlalchemy.dialects.postgresql import array, hstore select([hstore('key1', 'value1')]) select([ hstore( array(['key1', 'key2', 'key3']), array(['value1', 'value2', 'value3']) ) ]) .. versionadded:: 0.8 .. seealso:: :class:`.HSTORE` - the Postgresql ``HSTORE`` datatype. """ type = HSTORE name = 'hstore' class _HStoreDefinedFunction(sqlfunc.GenericFunction): type = sqltypes.Boolean name = 'defined' class _HStoreDeleteFunction(sqlfunc.GenericFunction): type = HSTORE name = 'delete' class _HStoreSliceFunction(sqlfunc.GenericFunction): type = HSTORE name = 'slice' class _HStoreKeysFunction(sqlfunc.GenericFunction): type = ARRAY(sqltypes.Text) name = 'akeys' class _HStoreValsFunction(sqlfunc.GenericFunction): type = ARRAY(sqltypes.Text) name = 'avals' class _HStoreArrayFunction(sqlfunc.GenericFunction): type = ARRAY(sqltypes.Text) name = 'hstore_to_array' class _HStoreMatrixFunction(sqlfunc.GenericFunction): type = ARRAY(sqltypes.Text) name = 'hstore_to_matrix' SQLAlchemy-0.8.4/lib/sqlalchemy/dialects/postgresql/pg8000.py0000644000076500000240000000735312251150015024424 0ustar classicstaff00000000000000# postgresql/pg8000.py # Copyright (C) 2005-2013 the SQLAlchemy authors and contributors # # This module is part of SQLAlchemy and is released under # the MIT License: http://www.opensource.org/licenses/mit-license.php """ .. dialect:: postgresql+pg8000 :name: pg8000 :dbapi: pg8000 :connectstring: postgresql+pg8000://user:password@host:port/dbname[?key=value&key=value...] :url: http://pybrary.net/pg8000/ Unicode ------- pg8000 requires that the postgresql client encoding be configured in the postgresql.conf file in order to use encodings other than ascii. Set this value to the same value as the "encoding" parameter on create_engine(), usually "utf-8". Interval -------- Passing data from/to the Interval type is not supported as of yet. """ from ... import util, exc import decimal from ... import processors from ... import types as sqltypes from .base import PGDialect, \ PGCompiler, PGIdentifierPreparer, PGExecutionContext,\ _DECIMAL_TYPES, _FLOAT_TYPES, _INT_TYPES class _PGNumeric(sqltypes.Numeric): def result_processor(self, dialect, coltype): if self.asdecimal: if coltype in _FLOAT_TYPES: return processors.to_decimal_processor_factory(decimal.Decimal) elif coltype in _DECIMAL_TYPES or coltype in _INT_TYPES: # pg8000 returns Decimal natively for 1700 return None else: raise exc.InvalidRequestError( "Unknown PG numeric type: %d" % coltype) else: if coltype in _FLOAT_TYPES: # pg8000 returns float natively for 701 return None elif coltype in _DECIMAL_TYPES or coltype in _INT_TYPES: return processors.to_float else: raise exc.InvalidRequestError( "Unknown PG numeric type: %d" % coltype) class _PGNumericNoBind(_PGNumeric): def bind_processor(self, dialect): return None class PGExecutionContext_pg8000(PGExecutionContext): pass class PGCompiler_pg8000(PGCompiler): def visit_mod_binary(self, binary, operator, **kw): return self.process(binary.left, **kw) + " %% " + \ self.process(binary.right, **kw) def post_process_text(self, text): if '%%' in text: util.warn("The SQLAlchemy postgresql dialect " "now automatically escapes '%' in text() " "expressions to '%%'.") return text.replace('%', '%%') class PGIdentifierPreparer_pg8000(PGIdentifierPreparer): def _escape_identifier(self, value): value = value.replace(self.escape_quote, self.escape_to_quote) return value.replace('%', '%%') class PGDialect_pg8000(PGDialect): driver = 'pg8000' supports_unicode_statements = True supports_unicode_binds = True default_paramstyle = 'format' supports_sane_multi_rowcount = False execution_ctx_cls = PGExecutionContext_pg8000 statement_compiler = PGCompiler_pg8000 preparer = PGIdentifierPreparer_pg8000 description_encoding = 'use_encoding' colspecs = util.update_copy( PGDialect.colspecs, { sqltypes.Numeric: _PGNumericNoBind, sqltypes.Float: _PGNumeric } ) @classmethod def dbapi(cls): return __import__('pg8000').dbapi def create_connect_args(self, url): opts = url.translate_connect_args(username='user') if 'port' in opts: opts['port'] = int(opts['port']) opts.update(url.query) return ([], opts) def is_disconnect(self, e, connection, cursor): return "connection is closed" in str(e) dialect = PGDialect_pg8000 SQLAlchemy-0.8.4/lib/sqlalchemy/dialects/postgresql/psycopg2.py0000644000076500000240000004351312251150015025252 0ustar classicstaff00000000000000# postgresql/psycopg2.py # Copyright (C) 2005-2013 the SQLAlchemy authors and contributors # # This module is part of SQLAlchemy and is released under # the MIT License: http://www.opensource.org/licenses/mit-license.php """ .. dialect:: postgresql+psycopg2 :name: psycopg2 :dbapi: psycopg2 :connectstring: postgresql+psycopg2://user:password@host:port/dbname[?key=value&key=value...] :url: http://pypi.python.org/pypi/psycopg2/ psycopg2 Connect Arguments ----------------------------------- psycopg2-specific keyword arguments which are accepted by :func:`.create_engine()` are: * ``server_side_cursors``: Enable the usage of "server side cursors" for SQL statements which support this feature. What this essentially means from a psycopg2 point of view is that the cursor is created using a name, e.g. ``connection.cursor('some name')``, which has the effect that result rows are not immediately pre-fetched and buffered after statement execution, but are instead left on the server and only retrieved as needed. SQLAlchemy's :class:`~sqlalchemy.engine.ResultProxy` uses special row-buffering behavior when this feature is enabled, such that groups of 100 rows at a time are fetched over the wire to reduce conversational overhead. Note that the ``stream_results=True`` execution option is a more targeted way of enabling this mode on a per-execution basis. * ``use_native_unicode``: Enable the usage of Psycopg2 "native unicode" mode per connection. True by default. * ``isolation_level``: This option, available for all Posgtresql dialects, includes the ``AUTOCOMMIT`` isolation level when using the psycopg2 dialect. See :ref:`psycopg2_isolation_level`. Unix Domain Connections ------------------------ psycopg2 supports connecting via Unix domain connections. When the ``host`` portion of the URL is omitted, SQLAlchemy passes ``None`` to psycopg2, which specifies Unix-domain communication rather than TCP/IP communication:: create_engine("postgresql+psycopg2://user:password@/dbname") By default, the socket file used is to connect to a Unix-domain socket in ``/tmp``, or whatever socket directory was specified when PostgreSQL was built. This value can be overridden by passing a pathname to psycopg2, using ``host`` as an additional keyword argument:: create_engine("postgresql+psycopg2://user:password@/dbname?host=/var/lib/postgresql") See also: `PQconnectdbParams `_ Per-Statement/Connection Execution Options ------------------------------------------- The following DBAPI-specific options are respected when used with :meth:`.Connection.execution_options`, :meth:`.Executable.execution_options`, :meth:`.Query.execution_options`, in addition to those not specific to DBAPIs: * isolation_level - Set the transaction isolation level for the lifespan of a :class:`.Connection` (can only be set on a connection, not a statement or query). See :ref:`psycopg2_isolation_level`. * stream_results - Enable or disable usage of psycopg2 server side cursors - this feature makes use of "named" cursors in combination with special result handling methods so that result rows are not fully buffered. If ``None`` or not set, the ``server_side_cursors`` option of the :class:`.Engine` is used. Unicode ------- By default, the psycopg2 driver uses the ``psycopg2.extensions.UNICODE`` extension, such that the DBAPI receives and returns all strings as Python Unicode objects directly - SQLAlchemy passes these values through without change. Psycopg2 here will encode/decode string values based on the current "client encoding" setting; by default this is the value in the ``postgresql.conf`` file, which often defaults to ``SQL_ASCII``. Typically, this can be changed to ``utf-8``, as a more useful default:: #client_encoding = sql_ascii # actually, defaults to database # encoding client_encoding = utf8 A second way to affect the client encoding is to set it within Psycopg2 locally. SQLAlchemy will call psycopg2's ``set_client_encoding()`` method (see: http://initd.org/psycopg/docs/connection.html#connection.set_client_encoding) on all new connections based on the value passed to :func:`.create_engine` using the ``client_encoding`` parameter:: engine = create_engine("postgresql://user:pass@host/dbname", client_encoding='utf8') This overrides the encoding specified in the Postgresql client configuration. .. versionadded:: 0.7.3 The psycopg2-specific ``client_encoding`` parameter to :func:`.create_engine`. SQLAlchemy can also be instructed to skip the usage of the psycopg2 ``UNICODE`` extension and to instead utilize it's own unicode encode/decode services, which are normally reserved only for those DBAPIs that don't fully support unicode directly. Passing ``use_native_unicode=False`` to :func:`.create_engine` will disable usage of ``psycopg2.extensions.UNICODE``. SQLAlchemy will instead encode data itself into Python bytestrings on the way in and coerce from bytes on the way back, using the value of the :func:`.create_engine` ``encoding`` parameter, which defaults to ``utf-8``. SQLAlchemy's own unicode encode/decode functionality is steadily becoming obsolete as more DBAPIs support unicode fully along with the approach of Python 3; in modern usage psycopg2 should be relied upon to handle unicode. Transactions ------------ The psycopg2 dialect fully supports SAVEPOINT and two-phase commit operations. .. _psycopg2_isolation_level: Psycopg2 Transaction Isolation Level ------------------------------------- As discussed in :ref:`postgresql_isolation_level`, all Postgresql dialects support setting of transaction isolation level both via the ``isolation_level`` parameter passed to :func:`.create_engine`, as well as the ``isolation_level`` argument used by :meth:`.Connection.execution_options`. When using the psycopg2 dialect, these options make use of psycopg2's ``set_isolation_level()`` connection method, rather than emitting a Postgresql directive; this is because psycopg2's API-level setting is always emitted at the start of each transaction in any case. The psycopg2 dialect supports these constants for isolation level: * ``READ COMMITTED`` * ``READ UNCOMMITTED`` * ``REPEATABLE READ`` * ``SERIALIZABLE`` * ``AUTOCOMMIT`` .. versionadded:: 0.8.2 support for AUTOCOMMIT isolation level when using psycopg2. NOTICE logging --------------- The psycopg2 dialect will log Postgresql NOTICE messages via the ``sqlalchemy.dialects.postgresql`` logger:: import logging logging.getLogger('sqlalchemy.dialects.postgresql').setLevel(logging.INFO) HSTORE type ------------ The psycopg2 dialect will make use of the ``psycopg2.extensions.register_hstore()`` extension when using the HSTORE type. This replaces SQLAlchemy's pure-Python HSTORE coercion which takes effect for other DBAPIs. """ from __future__ import absolute_import import re import logging from ... import util, exc import decimal from ... import processors from ...engine import result as _result from ...sql import expression from ... import types as sqltypes from .base import PGDialect, PGCompiler, \ PGIdentifierPreparer, PGExecutionContext, \ ENUM, ARRAY, _DECIMAL_TYPES, _FLOAT_TYPES,\ _INT_TYPES from .hstore import HSTORE logger = logging.getLogger('sqlalchemy.dialects.postgresql') class _PGNumeric(sqltypes.Numeric): def bind_processor(self, dialect): return None def result_processor(self, dialect, coltype): if self.asdecimal: if coltype in _FLOAT_TYPES: return processors.to_decimal_processor_factory(decimal.Decimal) elif coltype in _DECIMAL_TYPES or coltype in _INT_TYPES: # pg8000 returns Decimal natively for 1700 return None else: raise exc.InvalidRequestError( "Unknown PG numeric type: %d" % coltype) else: if coltype in _FLOAT_TYPES: # pg8000 returns float natively for 701 return None elif coltype in _DECIMAL_TYPES or coltype in _INT_TYPES: return processors.to_float else: raise exc.InvalidRequestError( "Unknown PG numeric type: %d" % coltype) class _PGEnum(ENUM): def __init__(self, *arg, **kw): super(_PGEnum, self).__init__(*arg, **kw) # Py2K if self.convert_unicode: self.convert_unicode = "force" # end Py2K class _PGArray(ARRAY): def __init__(self, *arg, **kw): super(_PGArray, self).__init__(*arg, **kw) # Py2K # FIXME: this check won't work for setups that # have convert_unicode only on their create_engine(). if isinstance(self.item_type, sqltypes.String) and \ self.item_type.convert_unicode: self.item_type.convert_unicode = "force" # end Py2K class _PGHStore(HSTORE): def bind_processor(self, dialect): if dialect._has_native_hstore: return None else: return super(_PGHStore, self).bind_processor(dialect) def result_processor(self, dialect, coltype): if dialect._has_native_hstore: return None else: return super(_PGHStore, self).result_processor(dialect, coltype) # When we're handed literal SQL, ensure it's a SELECT-query. Since # 8.3, combining cursors and "FOR UPDATE" has been fine. SERVER_SIDE_CURSOR_RE = re.compile( r'\s*SELECT', re.I | re.UNICODE) _server_side_id = util.counter() class PGExecutionContext_psycopg2(PGExecutionContext): def create_cursor(self): # TODO: coverage for server side cursors + select.for_update() if self.dialect.server_side_cursors: is_server_side = \ self.execution_options.get('stream_results', True) and ( (self.compiled and isinstance(self.compiled.statement, expression.Selectable) \ or \ ( (not self.compiled or isinstance(self.compiled.statement, expression.TextClause)) and self.statement and SERVER_SIDE_CURSOR_RE.match(self.statement)) ) ) else: is_server_side = \ self.execution_options.get('stream_results', False) self.__is_server_side = is_server_side if is_server_side: # use server-side cursors: # http://lists.initd.org/pipermail/psycopg/2007-January/005251.html ident = "c_%s_%s" % (hex(id(self))[2:], hex(_server_side_id())[2:]) return self._dbapi_connection.cursor(ident) else: return self._dbapi_connection.cursor() def get_result_proxy(self): # TODO: ouch if logger.isEnabledFor(logging.INFO): self._log_notices(self.cursor) if self.__is_server_side: return _result.BufferedRowResultProxy(self) else: return _result.ResultProxy(self) def _log_notices(self, cursor): for notice in cursor.connection.notices: # NOTICE messages have a # newline character at the end logger.info(notice.rstrip()) cursor.connection.notices[:] = [] class PGCompiler_psycopg2(PGCompiler): def visit_mod_binary(self, binary, operator, **kw): return self.process(binary.left, **kw) + " %% " + \ self.process(binary.right, **kw) def post_process_text(self, text): return text.replace('%', '%%') class PGIdentifierPreparer_psycopg2(PGIdentifierPreparer): def _escape_identifier(self, value): value = value.replace(self.escape_quote, self.escape_to_quote) return value.replace('%', '%%') class PGDialect_psycopg2(PGDialect): driver = 'psycopg2' # Py2K supports_unicode_statements = False # end Py2K default_paramstyle = 'pyformat' supports_sane_multi_rowcount = False execution_ctx_cls = PGExecutionContext_psycopg2 statement_compiler = PGCompiler_psycopg2 preparer = PGIdentifierPreparer_psycopg2 psycopg2_version = (0, 0) _has_native_hstore = False colspecs = util.update_copy( PGDialect.colspecs, { sqltypes.Numeric: _PGNumeric, ENUM: _PGEnum, # needs force_unicode sqltypes.Enum: _PGEnum, # needs force_unicode ARRAY: _PGArray, # needs force_unicode HSTORE: _PGHStore, } ) def __init__(self, server_side_cursors=False, use_native_unicode=True, client_encoding=None, use_native_hstore=True, **kwargs): PGDialect.__init__(self, **kwargs) self.server_side_cursors = server_side_cursors self.use_native_unicode = use_native_unicode self.use_native_hstore = use_native_hstore self.supports_unicode_binds = use_native_unicode self.client_encoding = client_encoding if self.dbapi and hasattr(self.dbapi, '__version__'): m = re.match(r'(\d+)\.(\d+)(?:\.(\d+))?', self.dbapi.__version__) if m: self.psycopg2_version = tuple( int(x) for x in m.group(1, 2, 3) if x is not None) def initialize(self, connection): super(PGDialect_psycopg2, self).initialize(connection) self._has_native_hstore = self.use_native_hstore and \ self._hstore_oids(connection.connection) \ is not None @classmethod def dbapi(cls): import psycopg2 return psycopg2 @util.memoized_property def _isolation_lookup(self): extensions = __import__('psycopg2.extensions').extensions return { 'AUTOCOMMIT': extensions.ISOLATION_LEVEL_AUTOCOMMIT, 'READ COMMITTED': extensions.ISOLATION_LEVEL_READ_COMMITTED, 'READ UNCOMMITTED': extensions.ISOLATION_LEVEL_READ_UNCOMMITTED, 'REPEATABLE READ': extensions.ISOLATION_LEVEL_REPEATABLE_READ, 'SERIALIZABLE': extensions.ISOLATION_LEVEL_SERIALIZABLE } def set_isolation_level(self, connection, level): try: level = self._isolation_lookup[level.replace('_', ' ')] except KeyError: raise exc.ArgumentError( "Invalid value '%s' for isolation_level. " "Valid isolation levels for %s are %s" % (level, self.name, ", ".join(self._isolation_lookup)) ) connection.set_isolation_level(level) def on_connect(self): from psycopg2 import extras, extensions fns = [] if self.client_encoding is not None: def on_connect(conn): conn.set_client_encoding(self.client_encoding) fns.append(on_connect) if self.isolation_level is not None: def on_connect(conn): self.set_isolation_level(conn, self.isolation_level) fns.append(on_connect) if self.dbapi and self.use_native_unicode: def on_connect(conn): extensions.register_type(extensions.UNICODE, conn) fns.append(on_connect) if self.dbapi and self.use_native_hstore: def on_connect(conn): hstore_oids = self._hstore_oids(conn) if hstore_oids is not None: oid, array_oid = hstore_oids if util.py2k: extras.register_hstore(conn, oid=oid, array_oid=array_oid, unicode=True) else: extras.register_hstore(conn, oid=oid, array_oid=array_oid) fns.append(on_connect) if fns: def on_connect(conn): for fn in fns: fn(conn) return on_connect else: return None @util.memoized_instancemethod def _hstore_oids(self, conn): if self.psycopg2_version >= (2, 4): from psycopg2 import extras oids = extras.HstoreAdapter.get_oids(conn) if oids is not None and oids[0]: return oids[0:2] return None def create_connect_args(self, url): opts = url.translate_connect_args(username='user') if 'port' in opts: opts['port'] = int(opts['port']) opts.update(url.query) return ([], opts) def is_disconnect(self, e, connection, cursor): if isinstance(e, self.dbapi.Error): str_e = str(e).partition("\n")[0] for msg in [ # these error messages from libpq: interfaces/libpq/fe-misc.c # and interfaces/libpq/fe-secure.c. # TODO: these are sent through gettext in libpq and we can't # check within other locales - consider using connection.closed 'terminating connection', 'closed the connection', 'connection not open', 'could not receive data from server', # psycopg2 client errors, psycopg2/conenction.h, psycopg2/cursor.h 'connection already closed', 'cursor already closed', # not sure where this path is originally from, it may # be obsolete. It really says "losed", not "closed". 'losed the connection unexpectedly' ]: idx = str_e.find(msg) if idx >= 0 and '"' not in str_e[:idx]: return True return False dialect = PGDialect_psycopg2 SQLAlchemy-0.8.4/lib/sqlalchemy/dialects/postgresql/pypostgresql.py0000644000076500000240000000417112251147171026266 0ustar classicstaff00000000000000# postgresql/pypostgresql.py # Copyright (C) 2005-2013 the SQLAlchemy authors and contributors # # This module is part of SQLAlchemy and is released under # the MIT License: http://www.opensource.org/licenses/mit-license.php """ .. dialect:: postgresql+pypostgresql :name: py-postgresql :dbapi: pypostgresql :connectstring: postgresql+pypostgresql://user:password@host:port/dbname[?key=value&key=value...] :url: http://python.projects.pgfoundry.org/ """ from ... import util from ... import types as sqltypes from .base import PGDialect, PGExecutionContext from ... import processors class PGNumeric(sqltypes.Numeric): def bind_processor(self, dialect): return processors.to_str def result_processor(self, dialect, coltype): if self.asdecimal: return None else: return processors.to_float class PGExecutionContext_pypostgresql(PGExecutionContext): pass class PGDialect_pypostgresql(PGDialect): driver = 'pypostgresql' supports_unicode_statements = True supports_unicode_binds = True description_encoding = None default_paramstyle = 'pyformat' # requires trunk version to support sane rowcounts # TODO: use dbapi version information to set this flag appropriately supports_sane_rowcount = True supports_sane_multi_rowcount = False execution_ctx_cls = PGExecutionContext_pypostgresql colspecs = util.update_copy( PGDialect.colspecs, { sqltypes.Numeric: PGNumeric, # prevents PGNumeric from being used sqltypes.Float: sqltypes.Float, } ) @classmethod def dbapi(cls): from postgresql.driver import dbapi20 return dbapi20 def create_connect_args(self, url): opts = url.translate_connect_args(username='user') if 'port' in opts: opts['port'] = int(opts['port']) else: opts['port'] = 5432 opts.update(url.query) return ([], opts) def is_disconnect(self, e, connection, cursor): return "connection is closed" in str(e) dialect = PGDialect_pypostgresql SQLAlchemy-0.8.4/lib/sqlalchemy/dialects/postgresql/ranges.py0000644000076500000240000001130012251147171024761 0ustar classicstaff00000000000000# Copyright (C) 2013 the SQLAlchemy authors and contributors # # This module is part of SQLAlchemy and is released under # the MIT License: http://www.opensource.org/licenses/mit-license.php from .base import ischema_names from ... import types as sqltypes __all__ = ('INT4RANGE', 'INT8RANGE', 'NUMRANGE') class RangeOperators(object): """ This mixin provides functionality for the Range Operators listed in Table 9-44 of the `postgres documentation`__ for Range Functions and Operators. It is used by all the range types provided in the ``postgres`` dialect and can likely be used for any range types you create yourself. __ http://www.postgresql.org/docs/devel/static/functions-range.html No extra support is provided for the Range Functions listed in Table 9-45 of the postgres documentation. For these, the normal :func:`~sqlalchemy.sql.expression.func` object should be used. .. versionadded:: 0.8.2 Support for Postgresql RANGE operations. """ class comparator_factory(sqltypes.Concatenable.Comparator): """Define comparison operations for range types.""" def __ne__(self, other): "Boolean expression. Returns true if two ranges are not equal" return self.expr.op('<>')(other) def contains(self, other, **kw): """Boolean expression. Returns true if the right hand operand, which can be an element or a range, is contained within the column. """ return self.expr.op('@>')(other) def contained_by(self, other): """Boolean expression. Returns true if the column is contained within the right hand operand. """ return self.expr.op('<@')(other) def overlaps(self, other): """Boolean expression. Returns true if the column overlaps (has points in common with) the right hand operand. """ return self.expr.op('&&')(other) def strictly_left_of(self, other): """Boolean expression. Returns true if the column is strictly left of the right hand operand. """ return self.expr.op('<<')(other) __lshift__ = strictly_left_of def strictly_right_of(self, other): """Boolean expression. Returns true if the column is strictly right of the right hand operand. """ return self.expr.op('>>')(other) __rshift__ = strictly_right_of def not_extend_right_of(self, other): """Boolean expression. Returns true if the range in the column does not extend right of the range in the operand. """ return self.expr.op('&<')(other) def not_extend_left_of(self, other): """Boolean expression. Returns true if the range in the column does not extend left of the range in the operand. """ return self.expr.op('&>')(other) def adjacent_to(self, other): """Boolean expression. Returns true if the range in the column is adjacent to the range in the operand. """ return self.expr.op('-|-')(other) def __add__(self, other): """Range expression. Returns the union of the two ranges. Will raise an exception if the resulting range is not contigous. """ return self.expr.op('+')(other) class INT4RANGE(RangeOperators, sqltypes.TypeEngine): """Represent the Postgresql INT4RANGE type. .. versionadded:: 0.8.2 """ __visit_name__ = 'INT4RANGE' ischema_names['int4range'] = INT4RANGE class INT8RANGE(RangeOperators, sqltypes.TypeEngine): """Represent the Postgresql INT8RANGE type. .. versionadded:: 0.8.2 """ __visit_name__ = 'INT8RANGE' ischema_names['int8range'] = INT8RANGE class NUMRANGE(RangeOperators, sqltypes.TypeEngine): """Represent the Postgresql NUMRANGE type. .. versionadded:: 0.8.2 """ __visit_name__ = 'NUMRANGE' ischema_names['numrange'] = NUMRANGE class DATERANGE(RangeOperators, sqltypes.TypeEngine): """Represent the Postgresql DATERANGE type. .. versionadded:: 0.8.2 """ __visit_name__ = 'DATERANGE' ischema_names['daterange'] = DATERANGE class TSRANGE(RangeOperators, sqltypes.TypeEngine): """Represent the Postgresql TSRANGE type. .. versionadded:: 0.8.2 """ __visit_name__ = 'TSRANGE' ischema_names['tsrange'] = TSRANGE class TSTZRANGE(RangeOperators, sqltypes.TypeEngine): """Represent the Postgresql TSTZRANGE type. .. versionadded:: 0.8.2 """ __visit_name__ = 'TSTZRANGE' ischema_names['tstzrange'] = TSTZRANGE SQLAlchemy-0.8.4/lib/sqlalchemy/dialects/postgresql/zxjdbc.py0000644000076500000240000000256312251147171025001 0ustar classicstaff00000000000000# postgresql/zxjdbc.py # Copyright (C) 2005-2013 the SQLAlchemy authors and contributors # # This module is part of SQLAlchemy and is released under # the MIT License: http://www.opensource.org/licenses/mit-license.php """ .. dialect:: postgresql+zxjdbc :name: zxJDBC for Jython :dbapi: zxjdbc :connectstring: postgresql+zxjdbc://scott:tiger@localhost/db :driverurl: http://jdbc.postgresql.org/ """ from ...connectors.zxJDBC import ZxJDBCConnector from .base import PGDialect, PGExecutionContext class PGExecutionContext_zxjdbc(PGExecutionContext): def create_cursor(self): cursor = self._dbapi_connection.cursor() cursor.datahandler = self.dialect.DataHandler(cursor.datahandler) return cursor class PGDialect_zxjdbc(ZxJDBCConnector, PGDialect): jdbc_db_name = 'postgresql' jdbc_driver_name = 'org.postgresql.Driver' execution_ctx_cls = PGExecutionContext_zxjdbc supports_native_decimal = True def __init__(self, *args, **kwargs): super(PGDialect_zxjdbc, self).__init__(*args, **kwargs) from com.ziclix.python.sql.handler import PostgresqlDataHandler self.DataHandler = PostgresqlDataHandler def _get_server_version_info(self, connection): parts = connection.connection.dbversion.split('.') return tuple(int(x) for x in parts) dialect = PGDialect_zxjdbc SQLAlchemy-0.8.4/lib/sqlalchemy/dialects/sqlite/0000755000076500000240000000000012251151573022234 5ustar classicstaff00000000000000SQLAlchemy-0.8.4/lib/sqlalchemy/dialects/sqlite/__init__.py0000644000076500000240000000131012251147171024337 0ustar classicstaff00000000000000# sqlite/__init__.py # Copyright (C) 2005-2013 the SQLAlchemy authors and contributors # # This module is part of SQLAlchemy and is released under # the MIT License: http://www.opensource.org/licenses/mit-license.php from sqlalchemy.dialects.sqlite import base, pysqlite # default dialect base.dialect = pysqlite.dialect from sqlalchemy.dialects.sqlite.base import \ BLOB, BOOLEAN, CHAR, DATE, DATETIME, DECIMAL, FLOAT, INTEGER, REAL,\ NUMERIC, SMALLINT, TEXT, TIME, TIMESTAMP, VARCHAR, dialect __all__ = ( 'BLOB', 'BOOLEAN', 'CHAR', 'DATE', 'DATETIME', 'DECIMAL', 'FLOAT', 'INTEGER', 'NUMERIC', 'SMALLINT', 'TEXT', 'TIME', 'TIMESTAMP', 'VARCHAR', 'REAL', 'dialect' ) SQLAlchemy-0.8.4/lib/sqlalchemy/dialects/sqlite/base.py0000644000076500000240000010474412251150015023520 0ustar classicstaff00000000000000# sqlite/base.py # Copyright (C) 2005-2013 the SQLAlchemy authors and contributors # # This module is part of SQLAlchemy and is released under # the MIT License: http://www.opensource.org/licenses/mit-license.php """ .. dialect:: sqlite :name: SQLite Date and Time Types ------------------- SQLite does not have built-in DATE, TIME, or DATETIME types, and pysqlite does not provide out of the box functionality for translating values between Python `datetime` objects and a SQLite-supported format. SQLAlchemy's own :class:`~sqlalchemy.types.DateTime` and related types provide date formatting and parsing functionality when SQlite is used. The implementation classes are :class:`~.sqlite.DATETIME`, :class:`~.sqlite.DATE` and :class:`~.sqlite.TIME`. These types represent dates and times as ISO formatted strings, which also nicely support ordering. There's no reliance on typical "libc" internals for these functions so historical dates are fully supported. Auto Incrementing Behavior -------------------------- Background on SQLite's autoincrement is at: http://sqlite.org/autoinc.html Two things to note: * The AUTOINCREMENT keyword is **not** required for SQLite tables to generate primary key values automatically. AUTOINCREMENT only means that the algorithm used to generate ROWID values should be slightly different. * SQLite does **not** generate primary key (i.e. ROWID) values, even for one column, if the table has a composite (i.e. multi-column) primary key. This is regardless of the AUTOINCREMENT keyword being present or not. To specifically render the AUTOINCREMENT keyword on the primary key column when rendering DDL, add the flag ``sqlite_autoincrement=True`` to the Table construct:: Table('sometable', metadata, Column('id', Integer, primary_key=True), sqlite_autoincrement=True) Transaction Isolation Level --------------------------- :func:`.create_engine` accepts an ``isolation_level`` parameter which results in the command ``PRAGMA read_uncommitted `` being invoked for every new connection. Valid values for this parameter are ``SERIALIZABLE`` and ``READ UNCOMMITTED`` corresponding to a value of 0 and 1, respectively. See the section :ref:`pysqlite_serializable` for an important workaround when using serializable isolation with Pysqlite. Database Locking Behavior / Concurrency --------------------------------------- Note that SQLite is not designed for a high level of concurrency. The database itself, being a file, is locked completely during write operations and within transactions, meaning exactly one connection has exclusive access to the database during this period - all other connections will be blocked during this time. The Python DBAPI specification also calls for a connection model that is always in a transaction; there is no BEGIN method, only commit and rollback. This implies that a SQLite DBAPI driver would technically allow only serialized access to a particular database file at all times. The pysqlite driver attempts to ameliorate this by deferring the actual BEGIN statement until the first DML (INSERT, UPDATE, or DELETE) is received within a transaction. While this breaks serializable isolation, it at least delays the exclusive locking inherent in SQLite's design. SQLAlchemy's default mode of usage with the ORM is known as "autocommit=False", which means the moment the :class:`.Session` begins to be used, a transaction is begun. As the :class:`.Session` is used, the autoflush feature, also on by default, will flush out pending changes to the database before each query. The effect of this is that a :class:`.Session` used in its default mode will often emit DML early on, long before the transaction is actually committed. This again will have the effect of serializing access to the SQLite database. If highly concurrent reads are desired against the SQLite database, it is advised that the autoflush feature be disabled, and potentially even that autocommit be re-enabled, which has the effect of each SQL statement and flush committing changes immediately. For more information on SQLite's lack of concurrency by design, please see `Situations Where Another RDBMS May Work Better - High Concurrency `_ near the bottom of the page. .. _sqlite_foreign_keys: Foreign Key Support ------------------- SQLite supports FOREIGN KEY syntax when emitting CREATE statements for tables, however by default these constraints have no effect on the operation of the table. Constraint checking on SQLite has three prerequisites: * At least version 3.6.19 of SQLite must be in use * The SQLite libary must be compiled *without* the SQLITE_OMIT_FOREIGN_KEY or SQLITE_OMIT_TRIGGER symbols enabled. * The ``PRAGMA foreign_keys = ON`` statement must be emitted on all connections before use. SQLAlchemy allows for the ``PRAGMA`` statement to be emitted automatically for new connections through the usage of events:: from sqlalchemy.engine import Engine from sqlalchemy import event @event.listens_for(Engine, "connect") def set_sqlite_pragma(dbapi_connection, connection_record): cursor = dbapi_connection.cursor() cursor.execute("PRAGMA foreign_keys=ON") cursor.close() .. seealso:: `SQLite Foreign Key Support `_ - on the SQLite web site. :ref:`event_toplevel` - SQLAlchemy event API. """ import datetime import re from sqlalchemy import sql, exc from sqlalchemy.engine import default, base, reflection from sqlalchemy import types as sqltypes from sqlalchemy import util from sqlalchemy.sql import compiler from sqlalchemy import processors from sqlalchemy.types import BIGINT, BLOB, BOOLEAN, CHAR,\ DECIMAL, FLOAT, REAL, INTEGER, NUMERIC, SMALLINT, TEXT,\ TIMESTAMP, VARCHAR class _DateTimeMixin(object): _reg = None _storage_format = None def __init__(self, storage_format=None, regexp=None, **kw): super(_DateTimeMixin, self).__init__(**kw) if regexp is not None: self._reg = re.compile(regexp) if storage_format is not None: self._storage_format = storage_format def adapt(self, cls, **kw): if self._storage_format: kw["storage_format"] = self._storage_format if self._reg: kw["regexp"] = self._reg return util.constructor_copy(self, cls, **kw) class DATETIME(_DateTimeMixin, sqltypes.DateTime): """Represent a Python datetime object in SQLite using a string. The default string storage format is:: "%(year)04d-%(month)02d-%(day)02d %(hour)02d:%(min)02d:%(second)02d.%(microsecond)06d" e.g.:: 2011-03-15 12:05:57.10558 The storage format can be customized to some degree using the ``storage_format`` and ``regexp`` parameters, such as:: import re from sqlalchemy.dialects.sqlite import DATETIME dt = DATETIME( storage_format="%(year)04d/%(month)02d/%(day)02d %(hour)02d:%(min)02d:%(second)02d", regexp=r"(\d+)/(\d+)/(\d+) (\d+)-(\d+)-(\d+)" ) :param storage_format: format string which will be applied to the dict with keys year, month, day, hour, minute, second, and microsecond. :param regexp: regular expression which will be applied to incoming result rows. If the regexp contains named groups, the resulting match dict is applied to the Python datetime() constructor as keyword arguments. Otherwise, if positional groups are used, the the datetime() constructor is called with positional arguments via ``*map(int, match_obj.groups(0))``. """ _storage_format = ( "%(year)04d-%(month)02d-%(day)02d " "%(hour)02d:%(minute)02d:%(second)02d.%(microsecond)06d" ) def __init__(self, *args, **kwargs): truncate_microseconds = kwargs.pop('truncate_microseconds', False) super(DATETIME, self).__init__(*args, **kwargs) if truncate_microseconds: assert 'storage_format' not in kwargs, "You can specify only "\ "one of truncate_microseconds or storage_format." assert 'regexp' not in kwargs, "You can specify only one of "\ "truncate_microseconds or regexp." self._storage_format = ( "%(year)04d-%(month)02d-%(day)02d " "%(hour)02d:%(minute)02d:%(second)02d" ) def bind_processor(self, dialect): datetime_datetime = datetime.datetime datetime_date = datetime.date format = self._storage_format def process(value): if value is None: return None elif isinstance(value, datetime_datetime): return format % { 'year': value.year, 'month': value.month, 'day': value.day, 'hour': value.hour, 'minute': value.minute, 'second': value.second, 'microsecond': value.microsecond, } elif isinstance(value, datetime_date): return format % { 'year': value.year, 'month': value.month, 'day': value.day, 'hour': 0, 'minute': 0, 'second': 0, 'microsecond': 0, } else: raise TypeError("SQLite DateTime type only accepts Python " "datetime and date objects as input.") return process def result_processor(self, dialect, coltype): if self._reg: return processors.str_to_datetime_processor_factory( self._reg, datetime.datetime) else: return processors.str_to_datetime class DATE(_DateTimeMixin, sqltypes.Date): """Represent a Python date object in SQLite using a string. The default string storage format is:: "%(year)04d-%(month)02d-%(day)02d" e.g.:: 2011-03-15 The storage format can be customized to some degree using the ``storage_format`` and ``regexp`` parameters, such as:: import re from sqlalchemy.dialects.sqlite import DATE d = DATE( storage_format="%(month)02d/%(day)02d/%(year)04d", regexp=re.compile("(?P\d+)/(?P\d+)/(?P\d+)") ) :param storage_format: format string which will be applied to the dict with keys year, month, and day. :param regexp: regular expression which will be applied to incoming result rows. If the regexp contains named groups, the resulting match dict is applied to the Python date() constructor as keyword arguments. Otherwise, if positional groups are used, the the date() constructor is called with positional arguments via ``*map(int, match_obj.groups(0))``. """ _storage_format = "%(year)04d-%(month)02d-%(day)02d" def bind_processor(self, dialect): datetime_date = datetime.date format = self._storage_format def process(value): if value is None: return None elif isinstance(value, datetime_date): return format % { 'year': value.year, 'month': value.month, 'day': value.day, } else: raise TypeError("SQLite Date type only accepts Python " "date objects as input.") return process def result_processor(self, dialect, coltype): if self._reg: return processors.str_to_datetime_processor_factory( self._reg, datetime.date) else: return processors.str_to_date class TIME(_DateTimeMixin, sqltypes.Time): """Represent a Python time object in SQLite using a string. The default string storage format is:: "%(hour)02d:%(minute)02d:%(second)02d.%(microsecond)06d" e.g.:: 12:05:57.10558 The storage format can be customized to some degree using the ``storage_format`` and ``regexp`` parameters, such as:: import re from sqlalchemy.dialects.sqlite import TIME t = TIME( storage_format="%(hour)02d-%(minute)02d-%(second)02d-%(microsecond)06d", regexp=re.compile("(\d+)-(\d+)-(\d+)-(?:-(\d+))?") ) :param storage_format: format string which will be applied to the dict with keys hour, minute, second, and microsecond. :param regexp: regular expression which will be applied to incoming result rows. If the regexp contains named groups, the resulting match dict is applied to the Python time() constructor as keyword arguments. Otherwise, if positional groups are used, the the time() constructor is called with positional arguments via ``*map(int, match_obj.groups(0))``. """ _storage_format = "%(hour)02d:%(minute)02d:%(second)02d.%(microsecond)06d" def __init__(self, *args, **kwargs): truncate_microseconds = kwargs.pop('truncate_microseconds', False) super(TIME, self).__init__(*args, **kwargs) if truncate_microseconds: assert 'storage_format' not in kwargs, "You can specify only "\ "one of truncate_microseconds or storage_format." assert 'regexp' not in kwargs, "You can specify only one of "\ "truncate_microseconds or regexp." self._storage_format = "%(hour)02d:%(minute)02d:%(second)02d" def bind_processor(self, dialect): datetime_time = datetime.time format = self._storage_format def process(value): if value is None: return None elif isinstance(value, datetime_time): return format % { 'hour': value.hour, 'minute': value.minute, 'second': value.second, 'microsecond': value.microsecond, } else: raise TypeError("SQLite Time type only accepts Python " "time objects as input.") return process def result_processor(self, dialect, coltype): if self._reg: return processors.str_to_datetime_processor_factory( self._reg, datetime.time) else: return processors.str_to_time colspecs = { sqltypes.Date: DATE, sqltypes.DateTime: DATETIME, sqltypes.Time: TIME, } ischema_names = { 'BIGINT': sqltypes.BIGINT, 'BLOB': sqltypes.BLOB, 'BOOL': sqltypes.BOOLEAN, 'BOOLEAN': sqltypes.BOOLEAN, 'CHAR': sqltypes.CHAR, 'DATE': sqltypes.DATE, 'DATETIME': sqltypes.DATETIME, 'DECIMAL': sqltypes.DECIMAL, 'FLOAT': sqltypes.FLOAT, 'INT': sqltypes.INTEGER, 'INTEGER': sqltypes.INTEGER, 'NUMERIC': sqltypes.NUMERIC, 'REAL': sqltypes.REAL, 'SMALLINT': sqltypes.SMALLINT, 'TEXT': sqltypes.TEXT, 'TIME': sqltypes.TIME, 'TIMESTAMP': sqltypes.TIMESTAMP, 'VARCHAR': sqltypes.VARCHAR, 'NVARCHAR': sqltypes.NVARCHAR, 'NCHAR': sqltypes.NCHAR, } class SQLiteCompiler(compiler.SQLCompiler): extract_map = util.update_copy( compiler.SQLCompiler.extract_map, { 'month': '%m', 'day': '%d', 'year': '%Y', 'second': '%S', 'hour': '%H', 'doy': '%j', 'minute': '%M', 'epoch': '%s', 'dow': '%w', 'week': '%W' }) def visit_now_func(self, fn, **kw): return "CURRENT_TIMESTAMP" def visit_localtimestamp_func(self, func, **kw): return 'DATETIME(CURRENT_TIMESTAMP, "localtime")' def visit_true(self, expr, **kw): return '1' def visit_false(self, expr, **kw): return '0' def visit_char_length_func(self, fn, **kw): return "length%s" % self.function_argspec(fn) def visit_cast(self, cast, **kwargs): if self.dialect.supports_cast: return super(SQLiteCompiler, self).visit_cast(cast) else: return self.process(cast.clause) def visit_extract(self, extract, **kw): try: return "CAST(STRFTIME('%s', %s) AS INTEGER)" % ( self.extract_map[extract.field], self.process(extract.expr, **kw) ) except KeyError: raise exc.CompileError( "%s is not a valid extract argument." % extract.field) def limit_clause(self, select): text = "" if select._limit is not None: text += "\n LIMIT " + self.process(sql.literal(select._limit)) if select._offset is not None: if select._limit is None: text += "\n LIMIT " + self.process(sql.literal(-1)) text += " OFFSET " + self.process(sql.literal(select._offset)) else: text += " OFFSET " + self.process(sql.literal(0)) return text def for_update_clause(self, select): # sqlite has no "FOR UPDATE" AFAICT return '' class SQLiteDDLCompiler(compiler.DDLCompiler): def get_column_specification(self, column, **kwargs): coltype = self.dialect.type_compiler.process(column.type) colspec = self.preparer.format_column(column) + " " + coltype default = self.get_column_default_string(column) if default is not None: colspec += " DEFAULT " + default if not column.nullable: colspec += " NOT NULL" if (column.primary_key and column.table.kwargs.get('sqlite_autoincrement', False) and len(column.table.primary_key.columns) == 1 and issubclass(column.type._type_affinity, sqltypes.Integer) and not column.foreign_keys): colspec += " PRIMARY KEY AUTOINCREMENT" return colspec def visit_primary_key_constraint(self, constraint): # for columns with sqlite_autoincrement=True, # the PRIMARY KEY constraint can only be inline # with the column itself. if len(constraint.columns) == 1: c = list(constraint)[0] if c.primary_key and \ c.table.kwargs.get('sqlite_autoincrement', False) and \ issubclass(c.type._type_affinity, sqltypes.Integer) and \ not c.foreign_keys: return None return super(SQLiteDDLCompiler, self).\ visit_primary_key_constraint(constraint) def visit_foreign_key_constraint(self, constraint): local_table = constraint._elements.values()[0].parent.table remote_table = list(constraint._elements.values())[0].column.table if local_table.schema != remote_table.schema: return None else: return super(SQLiteDDLCompiler, self).visit_foreign_key_constraint(constraint) def define_constraint_remote_table(self, constraint, table, preparer): """Format the remote table clause of a CREATE CONSTRAINT clause.""" return preparer.format_table(table, use_schema=False) def visit_create_index(self, create): return super(SQLiteDDLCompiler, self).\ visit_create_index(create, include_table_schema=False) class SQLiteTypeCompiler(compiler.GenericTypeCompiler): def visit_large_binary(self, type_): return self.visit_BLOB(type_) class SQLiteIdentifierPreparer(compiler.IdentifierPreparer): reserved_words = set([ 'add', 'after', 'all', 'alter', 'analyze', 'and', 'as', 'asc', 'attach', 'autoincrement', 'before', 'begin', 'between', 'by', 'cascade', 'case', 'cast', 'check', 'collate', 'column', 'commit', 'conflict', 'constraint', 'create', 'cross', 'current_date', 'current_time', 'current_timestamp', 'database', 'default', 'deferrable', 'deferred', 'delete', 'desc', 'detach', 'distinct', 'drop', 'each', 'else', 'end', 'escape', 'except', 'exclusive', 'explain', 'false', 'fail', 'for', 'foreign', 'from', 'full', 'glob', 'group', 'having', 'if', 'ignore', 'immediate', 'in', 'index', 'indexed', 'initially', 'inner', 'insert', 'instead', 'intersect', 'into', 'is', 'isnull', 'join', 'key', 'left', 'like', 'limit', 'match', 'natural', 'not', 'notnull', 'null', 'of', 'offset', 'on', 'or', 'order', 'outer', 'plan', 'pragma', 'primary', 'query', 'raise', 'references', 'reindex', 'rename', 'replace', 'restrict', 'right', 'rollback', 'row', 'select', 'set', 'table', 'temp', 'temporary', 'then', 'to', 'transaction', 'trigger', 'true', 'union', 'unique', 'update', 'using', 'vacuum', 'values', 'view', 'virtual', 'when', 'where', ]) def format_index(self, index, use_schema=True, name=None): """Prepare a quoted index and schema name.""" if name is None: name = index.name result = self.quote(name, index.quote) if (not self.omit_schema and use_schema and getattr(index.table, "schema", None)): result = self.quote_schema( index.table.schema, index.table.quote_schema) + "." + result return result class SQLiteExecutionContext(default.DefaultExecutionContext): @util.memoized_property def _preserve_raw_colnames(self): return self.execution_options.get("sqlite_raw_colnames", False) def _translate_colname(self, colname): # adjust for dotted column names. SQLite # in the case of UNION may store col names as # "tablename.colname" # in cursor.description if not self._preserve_raw_colnames and "." in colname: return colname.split(".")[1], colname else: return colname, None class SQLiteDialect(default.DefaultDialect): name = 'sqlite' supports_alter = False supports_unicode_statements = True supports_unicode_binds = True supports_default_values = True supports_empty_insert = False supports_cast = True supports_multivalues_insert = True default_paramstyle = 'qmark' execution_ctx_cls = SQLiteExecutionContext statement_compiler = SQLiteCompiler ddl_compiler = SQLiteDDLCompiler type_compiler = SQLiteTypeCompiler preparer = SQLiteIdentifierPreparer ischema_names = ischema_names colspecs = colspecs isolation_level = None supports_cast = True supports_default_values = True _broken_fk_pragma_quotes = False def __init__(self, isolation_level=None, native_datetime=False, **kwargs): default.DefaultDialect.__init__(self, **kwargs) self.isolation_level = isolation_level # this flag used by pysqlite dialect, and perhaps others in the # future, to indicate the driver is handling date/timestamp # conversions (and perhaps datetime/time as well on some # hypothetical driver ?) self.native_datetime = native_datetime if self.dbapi is not None: self.supports_default_values = \ self.dbapi.sqlite_version_info >= (3, 3, 8) self.supports_cast = \ self.dbapi.sqlite_version_info >= (3, 2, 3) self.supports_multivalues_insert = \ self.dbapi.sqlite_version_info >= (3, 7, 11) # http://www.sqlite.org/releaselog/3_7_11.html # see http://www.sqlalchemy.org/trac/ticket/2568 # as well as http://www.sqlite.org/src/info/600482d161 self._broken_fk_pragma_quotes = \ self.dbapi.sqlite_version_info < (3, 6, 14) _isolation_lookup = { 'READ UNCOMMITTED': 1, 'SERIALIZABLE': 0 } def set_isolation_level(self, connection, level): try: isolation_level = self._isolation_lookup[level.replace('_', ' ')] except KeyError: raise exc.ArgumentError( "Invalid value '%s' for isolation_level. " "Valid isolation levels for %s are %s" % (level, self.name, ", ".join(self._isolation_lookup)) ) cursor = connection.cursor() cursor.execute("PRAGMA read_uncommitted = %d" % isolation_level) cursor.close() def get_isolation_level(self, connection): cursor = connection.cursor() cursor.execute('PRAGMA read_uncommitted') res = cursor.fetchone() if res: value = res[0] else: # http://www.sqlite.org/changes.html#version_3_3_3 # "Optional READ UNCOMMITTED isolation (instead of the # default isolation level of SERIALIZABLE) and # table level locking when database connections # share a common cache."" # pre-SQLite 3.3.0 default to 0 value = 0 cursor.close() if value == 0: return "SERIALIZABLE" elif value == 1: return "READ UNCOMMITTED" else: assert False, "Unknown isolation level %s" % value def on_connect(self): if self.isolation_level is not None: def connect(conn): self.set_isolation_level(conn, self.isolation_level) return connect else: return None @reflection.cache def get_table_names(self, connection, schema=None, **kw): if schema is not None: qschema = self.identifier_preparer.quote_identifier(schema) master = '%s.sqlite_master' % qschema s = ("SELECT name FROM %s " "WHERE type='table' ORDER BY name") % (master,) rs = connection.execute(s) else: try: s = ("SELECT name FROM " " (SELECT * FROM sqlite_master UNION ALL " " SELECT * FROM sqlite_temp_master) " "WHERE type='table' ORDER BY name") rs = connection.execute(s) except exc.DBAPIError: s = ("SELECT name FROM sqlite_master " "WHERE type='table' ORDER BY name") rs = connection.execute(s) return [row[0] for row in rs] def has_table(self, connection, table_name, schema=None): quote = self.identifier_preparer.quote_identifier if schema is not None: pragma = "PRAGMA %s." % quote(schema) else: pragma = "PRAGMA " qtable = quote(table_name) statement = "%stable_info(%s)" % (pragma, qtable) cursor = _pragma_cursor(connection.execute(statement)) row = cursor.fetchone() # consume remaining rows, to work around # http://www.sqlite.org/cvstrac/tktview?tn=1884 while not cursor.closed and cursor.fetchone() is not None: pass return (row is not None) @reflection.cache def get_view_names(self, connection, schema=None, **kw): if schema is not None: qschema = self.identifier_preparer.quote_identifier(schema) master = '%s.sqlite_master' % qschema s = ("SELECT name FROM %s " "WHERE type='view' ORDER BY name") % (master,) rs = connection.execute(s) else: try: s = ("SELECT name FROM " " (SELECT * FROM sqlite_master UNION ALL " " SELECT * FROM sqlite_temp_master) " "WHERE type='view' ORDER BY name") rs = connection.execute(s) except exc.DBAPIError: s = ("SELECT name FROM sqlite_master " "WHERE type='view' ORDER BY name") rs = connection.execute(s) return [row[0] for row in rs] @reflection.cache def get_view_definition(self, connection, view_name, schema=None, **kw): quote = self.identifier_preparer.quote_identifier if schema is not None: qschema = self.identifier_preparer.quote_identifier(schema) master = '%s.sqlite_master' % qschema s = ("SELECT sql FROM %s WHERE name = '%s'" "AND type='view'") % (master, view_name) rs = connection.execute(s) else: try: s = ("SELECT sql FROM " " (SELECT * FROM sqlite_master UNION ALL " " SELECT * FROM sqlite_temp_master) " "WHERE name = '%s' " "AND type='view'") % view_name rs = connection.execute(s) except exc.DBAPIError: s = ("SELECT sql FROM sqlite_master WHERE name = '%s' " "AND type='view'") % view_name rs = connection.execute(s) result = rs.fetchall() if result: return result[0].sql @reflection.cache def get_columns(self, connection, table_name, schema=None, **kw): quote = self.identifier_preparer.quote_identifier if schema is not None: pragma = "PRAGMA %s." % quote(schema) else: pragma = "PRAGMA " qtable = quote(table_name) statement = "%stable_info(%s)" % (pragma, qtable) c = _pragma_cursor(connection.execute(statement)) rows = c.fetchall() columns = [] for row in rows: (name, type_, nullable, default, primary_key) = \ (row[1], row[2].upper(), not row[3], row[4], row[5]) columns.append(self._get_column_info(name, type_, nullable, default, primary_key)) return columns def _get_column_info(self, name, type_, nullable, default, primary_key): match = re.match(r'(\w+)(\(.*?\))?', type_) if match: coltype = match.group(1) args = match.group(2) else: coltype = "VARCHAR" args = '' try: coltype = self.ischema_names[coltype] if args is not None: args = re.findall(r'(\d+)', args) coltype = coltype(*[int(a) for a in args]) except KeyError: util.warn("Did not recognize type '%s' of column '%s'" % (coltype, name)) coltype = sqltypes.NullType() if default is not None: default = unicode(default) return { 'name': name, 'type': coltype, 'nullable': nullable, 'default': default, 'autoincrement': default is None, 'primary_key': primary_key } @reflection.cache def get_pk_constraint(self, connection, table_name, schema=None, **kw): cols = self.get_columns(connection, table_name, schema, **kw) pkeys = [] for col in cols: if col['primary_key']: pkeys.append(col['name']) return {'constrained_columns': pkeys, 'name': None} @reflection.cache def get_foreign_keys(self, connection, table_name, schema=None, **kw): quote = self.identifier_preparer.quote_identifier if schema is not None: pragma = "PRAGMA %s." % quote(schema) else: pragma = "PRAGMA " qtable = quote(table_name) statement = "%sforeign_key_list(%s)" % (pragma, qtable) c = _pragma_cursor(connection.execute(statement)) fkeys = [] fks = {} while True: row = c.fetchone() if row is None: break (numerical_id, rtbl, lcol, rcol) = (row[0], row[2], row[3], row[4]) self._parse_fk(fks, fkeys, numerical_id, rtbl, lcol, rcol) return fkeys def _parse_fk(self, fks, fkeys, numerical_id, rtbl, lcol, rcol): # sqlite won't return rcol if the table # was created with REFERENCES , no col if rcol is None: rcol = lcol if self._broken_fk_pragma_quotes: rtbl = re.sub(r'^[\"\[`\']|[\"\]`\']$', '', rtbl) try: fk = fks[numerical_id] except KeyError: fk = { 'name': None, 'constrained_columns': [], 'referred_schema': None, 'referred_table': rtbl, 'referred_columns': [] } fkeys.append(fk) fks[numerical_id] = fk if lcol not in fk['constrained_columns']: fk['constrained_columns'].append(lcol) if rcol not in fk['referred_columns']: fk['referred_columns'].append(rcol) return fk @reflection.cache def get_indexes(self, connection, table_name, schema=None, **kw): quote = self.identifier_preparer.quote_identifier if schema is not None: pragma = "PRAGMA %s." % quote(schema) else: pragma = "PRAGMA " include_auto_indexes = kw.pop('include_auto_indexes', False) qtable = quote(table_name) statement = "%sindex_list(%s)" % (pragma, qtable) c = _pragma_cursor(connection.execute(statement)) indexes = [] while True: row = c.fetchone() if row is None: break # ignore implicit primary key index. # http://www.mail-archive.com/sqlite-users@sqlite.org/msg30517.html elif (not include_auto_indexes and row[1].startswith('sqlite_autoindex')): continue indexes.append(dict(name=row[1], column_names=[], unique=row[2])) # loop thru unique indexes to get the column names. for idx in indexes: statement = "%sindex_info(%s)" % (pragma, quote(idx['name'])) c = connection.execute(statement) cols = idx['column_names'] while True: row = c.fetchone() if row is None: break cols.append(row[2]) return indexes @reflection.cache def get_unique_constraints(self, connection, table_name, schema=None, **kw): UNIQUE_SQL = """ SELECT sql FROM sqlite_master WHERE type='table' AND name=:table_name """ c = connection.execute(UNIQUE_SQL, table_name=table_name) table_data = c.fetchone()[0] UNIQUE_PATTERN = 'CONSTRAINT (\w+) UNIQUE \(([^\)]+)\)' return [ {'name': name, 'column_names': [c.strip() for c in cols.split(',')]} for name, cols in re.findall(UNIQUE_PATTERN, table_data) ] def _pragma_cursor(cursor): """work around SQLite issue whereby cursor.description is blank when PRAGMA returns no rows.""" if cursor.closed: cursor.fetchone = lambda: None cursor.fetchall = lambda: [] return cursor SQLAlchemy-0.8.4/lib/sqlalchemy/dialects/sqlite/pysqlite.py0000644000076500000240000003157312251150015024457 0ustar classicstaff00000000000000# sqlite/pysqlite.py # Copyright (C) 2005-2013 the SQLAlchemy authors and contributors # # This module is part of SQLAlchemy and is released under # the MIT License: http://www.opensource.org/licenses/mit-license.php """ .. dialect:: sqlite+pysqlite :name: pysqlite :dbapi: sqlite3 :connectstring: sqlite+pysqlite:///file_path :url: http://docs.python.org/library/sqlite3.html Note that ``pysqlite`` is the same driver as the ``sqlite3`` module included with the Python distribution. Driver ------ When using Python 2.5 and above, the built in ``sqlite3`` driver is already installed and no additional installation is needed. Otherwise, the ``pysqlite2`` driver needs to be present. This is the same driver as ``sqlite3``, just with a different name. The ``pysqlite2`` driver will be loaded first, and if not found, ``sqlite3`` is loaded. This allows an explicitly installed pysqlite driver to take precedence over the built in one. As with all dialects, a specific DBAPI module may be provided to :func:`~sqlalchemy.create_engine()` to control this explicitly:: from sqlite3 import dbapi2 as sqlite e = create_engine('sqlite+pysqlite:///file.db', module=sqlite) Connect Strings --------------- The file specification for the SQLite database is taken as the "database" portion of the URL. Note that the format of a SQLAlchemy url is:: driver://user:pass@host/database This means that the actual filename to be used starts with the characters to the **right** of the third slash. So connecting to a relative filepath looks like:: # relative path e = create_engine('sqlite:///path/to/database.db') An absolute path, which is denoted by starting with a slash, means you need **four** slashes:: # absolute path e = create_engine('sqlite:////path/to/database.db') To use a Windows path, regular drive specifications and backslashes can be used. Double backslashes are probably needed:: # absolute path on Windows e = create_engine('sqlite:///C:\\\\path\\\\to\\\\database.db') The sqlite ``:memory:`` identifier is the default if no filepath is present. Specify ``sqlite://`` and nothing else:: # in-memory database e = create_engine('sqlite://') Compatibility with sqlite3 "native" date and datetime types ----------------------------------------------------------- The pysqlite driver includes the sqlite3.PARSE_DECLTYPES and sqlite3.PARSE_COLNAMES options, which have the effect of any column or expression explicitly cast as "date" or "timestamp" will be converted to a Python date or datetime object. The date and datetime types provided with the pysqlite dialect are not currently compatible with these options, since they render the ISO date/datetime including microseconds, which pysqlite's driver does not. Additionally, SQLAlchemy does not at this time automatically render the "cast" syntax required for the freestanding functions "current_timestamp" and "current_date" to return datetime/date types natively. Unfortunately, pysqlite does not provide the standard DBAPI types in ``cursor.description``, leaving SQLAlchemy with no way to detect these types on the fly without expensive per-row type checks. Keeping in mind that pysqlite's parsing option is not recommended, nor should be necessary, for use with SQLAlchemy, usage of PARSE_DECLTYPES can be forced if one configures "native_datetime=True" on create_engine():: engine = create_engine('sqlite://', connect_args={'detect_types': sqlite3.PARSE_DECLTYPES|sqlite3.PARSE_COLNAMES}, native_datetime=True ) With this flag enabled, the DATE and TIMESTAMP types (but note - not the DATETIME or TIME types...confused yet ?) will not perform any bind parameter or result processing. Execution of "func.current_date()" will return a string. "func.current_timestamp()" is registered as returning a DATETIME type in SQLAlchemy, so this function still receives SQLAlchemy-level result processing. .. _pysqlite_threading_pooling: Threading/Pooling Behavior --------------------------- Pysqlite's default behavior is to prohibit the usage of a single connection in more than one thread. This is originally intended to work with older versions of SQLite that did not support multithreaded operation under various circumstances. In particular, older SQLite versions did not allow a ``:memory:`` database to be used in multiple threads under any circumstances. Pysqlite does include a now-undocumented flag known as ``check_same_thread`` which will disable this check, however note that pysqlite connections are still not safe to use in concurrently in multiple threads. In particular, any statement execution calls would need to be externally mutexed, as Pysqlite does not provide for thread-safe propagation of error messages among other things. So while even ``:memory:`` databases can be shared among threads in modern SQLite, Pysqlite doesn't provide enough thread-safety to make this usage worth it. SQLAlchemy sets up pooling to work with Pysqlite's default behavior: * When a ``:memory:`` SQLite database is specified, the dialect by default will use :class:`.SingletonThreadPool`. This pool maintains a single connection per thread, so that all access to the engine within the current thread use the same ``:memory:`` database - other threads would access a different ``:memory:`` database. * When a file-based database is specified, the dialect will use :class:`.NullPool` as the source of connections. This pool closes and discards connections which are returned to the pool immediately. SQLite file-based connections have extremely low overhead, so pooling is not necessary. The scheme also prevents a connection from being used again in a different thread and works best with SQLite's coarse-grained file locking. .. versionchanged:: 0.7 Default selection of :class:`.NullPool` for SQLite file-based databases. Previous versions select :class:`.SingletonThreadPool` by default for all SQLite databases. Using a Memory Database in Multiple Threads ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ To use a ``:memory:`` database in a multithreaded scenario, the same connection object must be shared among threads, since the database exists only within the scope of that connection. The :class:`.StaticPool` implementation will maintain a single connection globally, and the ``check_same_thread`` flag can be passed to Pysqlite as ``False``:: from sqlalchemy.pool import StaticPool engine = create_engine('sqlite://', connect_args={'check_same_thread':False}, poolclass=StaticPool) Note that using a ``:memory:`` database in multiple threads requires a recent version of SQLite. Using Temporary Tables with SQLite ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ Due to the way SQLite deals with temporary tables, if you wish to use a temporary table in a file-based SQLite database across multiple checkouts from the connection pool, such as when using an ORM :class:`.Session` where the temporary table should continue to remain after :meth:`.commit` or :meth:`.rollback` is called, a pool which maintains a single connection must be used. Use :class:`.SingletonThreadPool` if the scope is only needed within the current thread, or :class:`.StaticPool` is scope is needed within multiple threads for this case:: # maintain the same connection per thread from sqlalchemy.pool import SingletonThreadPool engine = create_engine('sqlite:///mydb.db', poolclass=SingletonThreadPool) # maintain the same connection across all threads from sqlalchemy.pool import StaticPool engine = create_engine('sqlite:///mydb.db', poolclass=StaticPool) Note that :class:`.SingletonThreadPool` should be configured for the number of threads that are to be used; beyond that number, connections will be closed out in a non deterministic way. Unicode ------- The pysqlite driver only returns Python ``unicode`` objects in result sets, never plain strings, and accommodates ``unicode`` objects within bound parameter values in all cases. Regardless of the SQLAlchemy string type in use, string-based result values will by Python ``unicode`` in Python 2. The :class:`.Unicode` type should still be used to indicate those columns that require unicode, however, so that non-``unicode`` values passed inadvertently will emit a warning. Pysqlite will emit an error if a non-``unicode`` string is passed containing non-ASCII characters. .. _pysqlite_serializable: Serializable Transaction Isolation ---------------------------------- The pysqlite DBAPI driver has a long-standing bug in which transactional state is not begun until the first DML statement, that is INSERT, UPDATE or DELETE, is emitted. A SELECT statement will not cause transactional state to begin. While this mode of usage is fine for typical situations and has the advantage that the SQLite database file is not prematurely locked, it breaks serializable transaction isolation, which requires that the database file be locked upon any SQL being emitted. To work around this issue, the ``BEGIN`` keyword can be emitted at the start of each transaction. The following recipe establishes a :meth:`.ConnectionEvents.begin` handler to achieve this:: from sqlalchemy import create_engine, event engine = create_engine("sqlite:///myfile.db", isolation_level='SERIALIZABLE') @event.listens_for(engine, "begin") def do_begin(conn): conn.execute("BEGIN") """ from sqlalchemy.dialects.sqlite.base import SQLiteDialect, DATETIME, DATE from sqlalchemy import exc, pool from sqlalchemy import types as sqltypes from sqlalchemy import util import os class _SQLite_pysqliteTimeStamp(DATETIME): def bind_processor(self, dialect): if dialect.native_datetime: return None else: return DATETIME.bind_processor(self, dialect) def result_processor(self, dialect, coltype): if dialect.native_datetime: return None else: return DATETIME.result_processor(self, dialect, coltype) class _SQLite_pysqliteDate(DATE): def bind_processor(self, dialect): if dialect.native_datetime: return None else: return DATE.bind_processor(self, dialect) def result_processor(self, dialect, coltype): if dialect.native_datetime: return None else: return DATE.result_processor(self, dialect, coltype) class SQLiteDialect_pysqlite(SQLiteDialect): default_paramstyle = 'qmark' colspecs = util.update_copy( SQLiteDialect.colspecs, { sqltypes.Date: _SQLite_pysqliteDate, sqltypes.TIMESTAMP: _SQLite_pysqliteTimeStamp, } ) # Py3K #description_encoding = None driver = 'pysqlite' def __init__(self, **kwargs): SQLiteDialect.__init__(self, **kwargs) if self.dbapi is not None: sqlite_ver = self.dbapi.version_info if sqlite_ver < (2, 1, 3): util.warn( ("The installed version of pysqlite2 (%s) is out-dated " "and will cause errors in some cases. Version 2.1.3 " "or greater is recommended.") % '.'.join([str(subver) for subver in sqlite_ver])) @classmethod def dbapi(cls): try: from pysqlite2 import dbapi2 as sqlite except ImportError, e: try: from sqlite3 import dbapi2 as sqlite # try 2.5+ stdlib name. except ImportError: raise e return sqlite @classmethod def get_pool_class(cls, url): if url.database and url.database != ':memory:': return pool.NullPool else: return pool.SingletonThreadPool def _get_server_version_info(self, connection): return self.dbapi.sqlite_version_info def create_connect_args(self, url): if url.username or url.password or url.host or url.port: raise exc.ArgumentError( "Invalid SQLite URL: %s\n" "Valid SQLite URL forms are:\n" " sqlite:///:memory: (or, sqlite://)\n" " sqlite:///relative/path/to/file.db\n" " sqlite:////absolute/path/to/file.db" % (url,)) filename = url.database or ':memory:' if filename != ':memory:': filename = os.path.abspath(filename) opts = url.query.copy() util.coerce_kw_type(opts, 'timeout', float) util.coerce_kw_type(opts, 'isolation_level', str) util.coerce_kw_type(opts, 'detect_types', int) util.coerce_kw_type(opts, 'check_same_thread', bool) util.coerce_kw_type(opts, 'cached_statements', int) return ([filename], opts) def is_disconnect(self, e, connection, cursor): return isinstance(e, self.dbapi.ProgrammingError) and \ "Cannot operate on a closed database." in str(e) dialect = SQLiteDialect_pysqlite SQLAlchemy-0.8.4/lib/sqlalchemy/dialects/sybase/0000755000076500000240000000000012251151573022221 5ustar classicstaff00000000000000SQLAlchemy-0.8.4/lib/sqlalchemy/dialects/sybase/__init__.py0000644000076500000240000000167412251150015024330 0ustar classicstaff00000000000000# sybase/__init__.py # Copyright (C) 2005-2013 the SQLAlchemy authors and contributors # # This module is part of SQLAlchemy and is released under # the MIT License: http://www.opensource.org/licenses/mit-license.php from sqlalchemy.dialects.sybase import base, pysybase, pyodbc # default dialect base.dialect = pyodbc.dialect from base import CHAR, VARCHAR, TIME, NCHAR, NVARCHAR,\ TEXT, DATE, DATETIME, FLOAT, NUMERIC,\ BIGINT, INT, INTEGER, SMALLINT, BINARY,\ VARBINARY, UNITEXT, UNICHAR, UNIVARCHAR,\ IMAGE, BIT, MONEY, SMALLMONEY, TINYINT,\ dialect __all__ = ( 'CHAR', 'VARCHAR', 'TIME', 'NCHAR', 'NVARCHAR', 'TEXT', 'DATE', 'DATETIME', 'FLOAT', 'NUMERIC', 'BIGINT', 'INT', 'INTEGER', 'SMALLINT', 'BINARY', 'VARBINARY', 'UNITEXT', 'UNICHAR', 'UNIVARCHAR', 'IMAGE', 'BIT', 'MONEY', 'SMALLMONEY', 'TINYINT', 'dialect' ) SQLAlchemy-0.8.4/lib/sqlalchemy/dialects/sybase/base.py0000644000076500000240000007015012251150015023476 0ustar classicstaff00000000000000# sybase/base.py # Copyright (C) 2010-2013 the SQLAlchemy authors and contributors # get_select_precolumns(), limit_clause() implementation # copyright (C) 2007 Fisch Asset Management # AG http://www.fam.ch, with coding by Alexander Houben # alexander.houben@thor-solutions.ch # # This module is part of SQLAlchemy and is released under # the MIT License: http://www.opensource.org/licenses/mit-license.php """ .. dialect:: sybase :name: Sybase .. note:: The Sybase dialect functions on current SQLAlchemy versions but is not regularly tested, and may have many issues and caveats not currently handled. """ import operator import re from sqlalchemy.sql import compiler, expression, text, bindparam from sqlalchemy.engine import default, base, reflection from sqlalchemy import types as sqltypes from sqlalchemy.sql import operators as sql_operators from sqlalchemy import schema as sa_schema from sqlalchemy import util, sql, exc from sqlalchemy.types import CHAR, VARCHAR, TIME, NCHAR, NVARCHAR,\ TEXT, DATE, DATETIME, FLOAT, NUMERIC,\ BIGINT, INT, INTEGER, SMALLINT, BINARY,\ VARBINARY, DECIMAL, TIMESTAMP, Unicode,\ UnicodeText, REAL RESERVED_WORDS = set([ "add", "all", "alter", "and", "any", "as", "asc", "backup", "begin", "between", "bigint", "binary", "bit", "bottom", "break", "by", "call", "capability", "cascade", "case", "cast", "char", "char_convert", "character", "check", "checkpoint", "close", "comment", "commit", "connect", "constraint", "contains", "continue", "convert", "create", "cross", "cube", "current", "current_timestamp", "current_user", "cursor", "date", "dbspace", "deallocate", "dec", "decimal", "declare", "default", "delete", "deleting", "desc", "distinct", "do", "double", "drop", "dynamic", "else", "elseif", "encrypted", "end", "endif", "escape", "except", "exception", "exec", "execute", "existing", "exists", "externlogin", "fetch", "first", "float", "for", "force", "foreign", "forward", "from", "full", "goto", "grant", "group", "having", "holdlock", "identified", "if", "in", "index", "index_lparen", "inner", "inout", "insensitive", "insert", "inserting", "install", "instead", "int", "integer", "integrated", "intersect", "into", "iq", "is", "isolation", "join", "key", "lateral", "left", "like", "lock", "login", "long", "match", "membership", "message", "mode", "modify", "natural", "new", "no", "noholdlock", "not", "notify", "null", "numeric", "of", "off", "on", "open", "option", "options", "or", "order", "others", "out", "outer", "over", "passthrough", "precision", "prepare", "primary", "print", "privileges", "proc", "procedure", "publication", "raiserror", "readtext", "real", "reference", "references", "release", "remote", "remove", "rename", "reorganize", "resource", "restore", "restrict", "return", "revoke", "right", "rollback", "rollup", "save", "savepoint", "scroll", "select", "sensitive", "session", "set", "setuser", "share", "smallint", "some", "sqlcode", "sqlstate", "start", "stop", "subtrans", "subtransaction", "synchronize", "syntax_error", "table", "temporary", "then", "time", "timestamp", "tinyint", "to", "top", "tran", "trigger", "truncate", "tsequal", "unbounded", "union", "unique", "unknown", "unsigned", "update", "updating", "user", "using", "validate", "values", "varbinary", "varchar", "variable", "varying", "view", "wait", "waitfor", "when", "where", "while", "window", "with", "with_cube", "with_lparen", "with_rollup", "within", "work", "writetext", ]) class _SybaseUnitypeMixin(object): """these types appear to return a buffer object.""" def result_processor(self, dialect, coltype): def process(value): if value is not None: return str(value) # decode("ucs-2") else: return None return process class UNICHAR(_SybaseUnitypeMixin, sqltypes.Unicode): __visit_name__ = 'UNICHAR' class UNIVARCHAR(_SybaseUnitypeMixin, sqltypes.Unicode): __visit_name__ = 'UNIVARCHAR' class UNITEXT(_SybaseUnitypeMixin, sqltypes.UnicodeText): __visit_name__ = 'UNITEXT' class TINYINT(sqltypes.Integer): __visit_name__ = 'TINYINT' class BIT(sqltypes.TypeEngine): __visit_name__ = 'BIT' class MONEY(sqltypes.TypeEngine): __visit_name__ = "MONEY" class SMALLMONEY(sqltypes.TypeEngine): __visit_name__ = "SMALLMONEY" class UNIQUEIDENTIFIER(sqltypes.TypeEngine): __visit_name__ = "UNIQUEIDENTIFIER" class IMAGE(sqltypes.LargeBinary): __visit_name__ = 'IMAGE' class SybaseTypeCompiler(compiler.GenericTypeCompiler): def visit_large_binary(self, type_): return self.visit_IMAGE(type_) def visit_boolean(self, type_): return self.visit_BIT(type_) def visit_unicode(self, type_): return self.visit_NVARCHAR(type_) def visit_UNICHAR(self, type_): return "UNICHAR(%d)" % type_.length def visit_UNIVARCHAR(self, type_): return "UNIVARCHAR(%d)" % type_.length def visit_UNITEXT(self, type_): return "UNITEXT" def visit_TINYINT(self, type_): return "TINYINT" def visit_IMAGE(self, type_): return "IMAGE" def visit_BIT(self, type_): return "BIT" def visit_MONEY(self, type_): return "MONEY" def visit_SMALLMONEY(self, type_): return "SMALLMONEY" def visit_UNIQUEIDENTIFIER(self, type_): return "UNIQUEIDENTIFIER" ischema_names = { 'bigint': BIGINT, 'int': INTEGER, 'integer': INTEGER, 'smallint': SMALLINT, 'tinyint': TINYINT, 'unsigned bigint': BIGINT, # TODO: unsigned flags 'unsigned int': INTEGER, # TODO: unsigned flags 'unsigned smallint': SMALLINT, # TODO: unsigned flags 'numeric': NUMERIC, 'decimal': DECIMAL, 'dec': DECIMAL, 'float': FLOAT, 'double': NUMERIC, # TODO 'double precision': NUMERIC, # TODO 'real': REAL, 'smallmoney': SMALLMONEY, 'money': MONEY, 'smalldatetime': DATETIME, 'datetime': DATETIME, 'date': DATE, 'time': TIME, 'char': CHAR, 'character': CHAR, 'varchar': VARCHAR, 'character varying': VARCHAR, 'char varying': VARCHAR, 'unichar': UNICHAR, 'unicode character': UNIVARCHAR, 'nchar': NCHAR, 'national char': NCHAR, 'national character': NCHAR, 'nvarchar': NVARCHAR, 'nchar varying': NVARCHAR, 'national char varying': NVARCHAR, 'national character varying': NVARCHAR, 'text': TEXT, 'unitext': UNITEXT, 'binary': BINARY, 'varbinary': VARBINARY, 'image': IMAGE, 'bit': BIT, # not in documentation for ASE 15.7 'long varchar': TEXT, # TODO 'timestamp': TIMESTAMP, 'uniqueidentifier': UNIQUEIDENTIFIER, } class SybaseInspector(reflection.Inspector): def __init__(self, conn): reflection.Inspector.__init__(self, conn) def get_table_id(self, table_name, schema=None): """Return the table id from `table_name` and `schema`.""" return self.dialect.get_table_id(self.bind, table_name, schema, info_cache=self.info_cache) class SybaseExecutionContext(default.DefaultExecutionContext): _enable_identity_insert = False def set_ddl_autocommit(self, connection, value): """Must be implemented by subclasses to accommodate DDL executions. "connection" is the raw unwrapped DBAPI connection. "value" is True or False. when True, the connection should be configured such that a DDL can take place subsequently. when False, a DDL has taken place and the connection should be resumed into non-autocommit mode. """ raise NotImplementedError() def pre_exec(self): if self.isinsert: tbl = self.compiled.statement.table seq_column = tbl._autoincrement_column insert_has_sequence = seq_column is not None if insert_has_sequence: self._enable_identity_insert = \ seq_column.key in self.compiled_parameters[0] else: self._enable_identity_insert = False if self._enable_identity_insert: self.cursor.execute("SET IDENTITY_INSERT %s ON" % self.dialect.identifier_preparer.format_table(tbl)) if self.isddl: # TODO: to enhance this, we can detect "ddl in tran" on the # database settings. this error message should be improved to # include a note about that. if not self.should_autocommit: raise exc.InvalidRequestError( "The Sybase dialect only supports " "DDL in 'autocommit' mode at this time.") self.root_connection.engine.logger.info( "AUTOCOMMIT (Assuming no Sybase 'ddl in tran')") self.set_ddl_autocommit( self.root_connection.connection.connection, True) def post_exec(self): if self.isddl: self.set_ddl_autocommit(self.root_connection, False) if self._enable_identity_insert: self.cursor.execute( "SET IDENTITY_INSERT %s OFF" % self.dialect.identifier_preparer. format_table(self.compiled.statement.table) ) def get_lastrowid(self): cursor = self.create_cursor() cursor.execute("SELECT @@identity AS lastrowid") lastrowid = cursor.fetchone()[0] cursor.close() return lastrowid class SybaseSQLCompiler(compiler.SQLCompiler): ansi_bind_rules = True extract_map = util.update_copy( compiler.SQLCompiler.extract_map, { 'doy': 'dayofyear', 'dow': 'weekday', 'milliseconds': 'millisecond' }) def get_select_precolumns(self, select): s = select._distinct and "DISTINCT " or "" # TODO: don't think Sybase supports # bind params for FIRST / TOP if select._limit: #if select._limit == 1: #s += "FIRST " #else: #s += "TOP %s " % (select._limit,) s += "TOP %s " % (select._limit,) if select._offset: if not select._limit: # FIXME: sybase doesn't allow an offset without a limit # so use a huge value for TOP here s += "TOP 1000000 " s += "START AT %s " % (select._offset + 1,) return s def get_from_hint_text(self, table, text): return text def limit_clause(self, select): # Limit in sybase is after the select keyword return "" def visit_extract(self, extract, **kw): field = self.extract_map.get(extract.field, extract.field) return 'DATEPART("%s", %s)' % ( field, self.process(extract.expr, **kw)) def visit_now_func(self, fn, **kw): return "GETDATE()" def for_update_clause(self, select): # "FOR UPDATE" is only allowed on "DECLARE CURSOR" # which SQLAlchemy doesn't use return '' def order_by_clause(self, select, **kw): kw['literal_binds'] = True order_by = self.process(select._order_by_clause, **kw) # SybaseSQL only allows ORDER BY in subqueries if there is a LIMIT if order_by and (not self.is_subquery() or select._limit): return " ORDER BY " + order_by else: return "" class SybaseDDLCompiler(compiler.DDLCompiler): def get_column_specification(self, column, **kwargs): colspec = self.preparer.format_column(column) + " " + \ self.dialect.type_compiler.process(column.type) if column.table is None: raise exc.CompileError( "The Sybase dialect requires Table-bound " "columns in order to generate DDL") seq_col = column.table._autoincrement_column # install a IDENTITY Sequence if we have an implicit IDENTITY column if seq_col is column: sequence = isinstance(column.default, sa_schema.Sequence) \ and column.default if sequence: start, increment = sequence.start or 1, \ sequence.increment or 1 else: start, increment = 1, 1 if (start, increment) == (1, 1): colspec += " IDENTITY" else: # TODO: need correct syntax for this colspec += " IDENTITY(%s,%s)" % (start, increment) else: default = self.get_column_default_string(column) if default is not None: colspec += " DEFAULT " + default if column.nullable is not None: if not column.nullable or column.primary_key: colspec += " NOT NULL" else: colspec += " NULL" return colspec def visit_drop_index(self, drop): index = drop.element return "\nDROP INDEX %s.%s" % ( self.preparer.quote_identifier(index.table.name), self._prepared_index_name(drop.element, include_schema=False) ) class SybaseIdentifierPreparer(compiler.IdentifierPreparer): reserved_words = RESERVED_WORDS class SybaseDialect(default.DefaultDialect): name = 'sybase' supports_unicode_statements = False supports_sane_rowcount = False supports_sane_multi_rowcount = False supports_native_boolean = False supports_unicode_binds = False postfetch_lastrowid = True colspecs = {} ischema_names = ischema_names type_compiler = SybaseTypeCompiler statement_compiler = SybaseSQLCompiler ddl_compiler = SybaseDDLCompiler preparer = SybaseIdentifierPreparer inspector = SybaseInspector def _get_default_schema_name(self, connection): return connection.scalar( text("SELECT user_name() as user_name", typemap={'user_name': Unicode}) ) def initialize(self, connection): super(SybaseDialect, self).initialize(connection) if self.server_version_info is not None and\ self.server_version_info < (15, ): self.max_identifier_length = 30 else: self.max_identifier_length = 255 def get_table_id(self, connection, table_name, schema=None, **kw): """Fetch the id for schema.table_name. Several reflection methods require the table id. The idea for using this method is that it can be fetched one time and cached for subsequent calls. """ table_id = None if schema is None: schema = self.default_schema_name TABLEID_SQL = text(""" SELECT o.id AS id FROM sysobjects o JOIN sysusers u ON o.uid=u.uid WHERE u.name = :schema_name AND o.name = :table_name AND o.type in ('U', 'V') """) # Py2K if isinstance(schema, unicode): schema = schema.encode("ascii") if isinstance(table_name, unicode): table_name = table_name.encode("ascii") # end Py2K result = connection.execute(TABLEID_SQL, schema_name=schema, table_name=table_name) table_id = result.scalar() if table_id is None: raise exc.NoSuchTableError(table_name) return table_id @reflection.cache def get_columns(self, connection, table_name, schema=None, **kw): table_id = self.get_table_id(connection, table_name, schema, info_cache=kw.get("info_cache")) COLUMN_SQL = text(""" SELECT col.name AS name, t.name AS type, (col.status & 8) AS nullable, (col.status & 128) AS autoincrement, com.text AS 'default', col.prec AS precision, col.scale AS scale, col.length AS length FROM systypes t, syscolumns col LEFT OUTER JOIN syscomments com ON col.cdefault = com.id WHERE col.usertype = t.usertype AND col.id = :table_id ORDER BY col.colid """) results = connection.execute(COLUMN_SQL, table_id=table_id) columns = [] for (name, type_, nullable, autoincrement, default, precision, scale, length) in results: col_info = self._get_column_info(name, type_, bool(nullable), bool(autoincrement), default, precision, scale, length) columns.append(col_info) return columns def _get_column_info(self, name, type_, nullable, autoincrement, default, precision, scale, length): coltype = self.ischema_names.get(type_, None) kwargs = {} if coltype in (NUMERIC, DECIMAL): args = (precision, scale) elif coltype == FLOAT: args = (precision,) elif coltype in (CHAR, VARCHAR, UNICHAR, UNIVARCHAR, NCHAR, NVARCHAR): args = (length,) else: args = () if coltype: coltype = coltype(*args, **kwargs) #is this necessary #if is_array: # coltype = ARRAY(coltype) else: util.warn("Did not recognize type '%s' of column '%s'" % (type_, name)) coltype = sqltypes.NULLTYPE if default: default = re.sub("DEFAULT", "", default).strip() default = re.sub("^'(.*)'$", lambda m: m.group(1), default) else: default = None column_info = dict(name=name, type=coltype, nullable=nullable, default=default, autoincrement=autoincrement) return column_info @reflection.cache def get_foreign_keys(self, connection, table_name, schema=None, **kw): table_id = self.get_table_id(connection, table_name, schema, info_cache=kw.get("info_cache")) table_cache = {} column_cache = {} foreign_keys = [] table_cache[table_id] = {"name": table_name, "schema": schema} COLUMN_SQL = text(""" SELECT c.colid AS id, c.name AS name FROM syscolumns c WHERE c.id = :table_id """) results = connection.execute(COLUMN_SQL, table_id=table_id) columns = {} for col in results: columns[col["id"]] = col["name"] column_cache[table_id] = columns REFCONSTRAINT_SQL = text(""" SELECT o.name AS name, r.reftabid AS reftable_id, r.keycnt AS 'count', r.fokey1 AS fokey1, r.fokey2 AS fokey2, r.fokey3 AS fokey3, r.fokey4 AS fokey4, r.fokey5 AS fokey5, r.fokey6 AS fokey6, r.fokey7 AS fokey7, r.fokey1 AS fokey8, r.fokey9 AS fokey9, r.fokey10 AS fokey10, r.fokey11 AS fokey11, r.fokey12 AS fokey12, r.fokey13 AS fokey13, r.fokey14 AS fokey14, r.fokey15 AS fokey15, r.fokey16 AS fokey16, r.refkey1 AS refkey1, r.refkey2 AS refkey2, r.refkey3 AS refkey3, r.refkey4 AS refkey4, r.refkey5 AS refkey5, r.refkey6 AS refkey6, r.refkey7 AS refkey7, r.refkey1 AS refkey8, r.refkey9 AS refkey9, r.refkey10 AS refkey10, r.refkey11 AS refkey11, r.refkey12 AS refkey12, r.refkey13 AS refkey13, r.refkey14 AS refkey14, r.refkey15 AS refkey15, r.refkey16 AS refkey16 FROM sysreferences r JOIN sysobjects o on r.tableid = o.id WHERE r.tableid = :table_id """) referential_constraints = connection.execute(REFCONSTRAINT_SQL, table_id=table_id) REFTABLE_SQL = text(""" SELECT o.name AS name, u.name AS 'schema' FROM sysobjects o JOIN sysusers u ON o.uid = u.uid WHERE o.id = :table_id """) for r in referential_constraints: reftable_id = r["reftable_id"] if reftable_id not in table_cache: c = connection.execute(REFTABLE_SQL, table_id=reftable_id) reftable = c.fetchone() c.close() table_info = {"name": reftable["name"], "schema": None} if (schema is not None or reftable["schema"] != self.default_schema_name): table_info["schema"] = reftable["schema"] table_cache[reftable_id] = table_info results = connection.execute(COLUMN_SQL, table_id=reftable_id) reftable_columns = {} for col in results: reftable_columns[col["id"]] = col["name"] column_cache[reftable_id] = reftable_columns reftable = table_cache[reftable_id] reftable_columns = column_cache[reftable_id] constrained_columns = [] referred_columns = [] for i in range(1, r["count"] + 1): constrained_columns.append(columns[r["fokey%i" % i]]) referred_columns.append(reftable_columns[r["refkey%i" % i]]) fk_info = { "constrained_columns": constrained_columns, "referred_schema": reftable["schema"], "referred_table": reftable["name"], "referred_columns": referred_columns, "name": r["name"] } foreign_keys.append(fk_info) return foreign_keys @reflection.cache def get_indexes(self, connection, table_name, schema=None, **kw): table_id = self.get_table_id(connection, table_name, schema, info_cache=kw.get("info_cache")) INDEX_SQL = text(""" SELECT object_name(i.id) AS table_name, i.keycnt AS 'count', i.name AS name, (i.status & 0x2) AS 'unique', index_col(object_name(i.id), i.indid, 1) AS col_1, index_col(object_name(i.id), i.indid, 2) AS col_2, index_col(object_name(i.id), i.indid, 3) AS col_3, index_col(object_name(i.id), i.indid, 4) AS col_4, index_col(object_name(i.id), i.indid, 5) AS col_5, index_col(object_name(i.id), i.indid, 6) AS col_6, index_col(object_name(i.id), i.indid, 7) AS col_7, index_col(object_name(i.id), i.indid, 8) AS col_8, index_col(object_name(i.id), i.indid, 9) AS col_9, index_col(object_name(i.id), i.indid, 10) AS col_10, index_col(object_name(i.id), i.indid, 11) AS col_11, index_col(object_name(i.id), i.indid, 12) AS col_12, index_col(object_name(i.id), i.indid, 13) AS col_13, index_col(object_name(i.id), i.indid, 14) AS col_14, index_col(object_name(i.id), i.indid, 15) AS col_15, index_col(object_name(i.id), i.indid, 16) AS col_16 FROM sysindexes i, sysobjects o WHERE o.id = i.id AND o.id = :table_id AND (i.status & 2048) = 0 AND i.indid BETWEEN 1 AND 254 """) results = connection.execute(INDEX_SQL, table_id=table_id) indexes = [] for r in results: column_names = [] for i in range(1, r["count"]): column_names.append(r["col_%i" % (i,)]) index_info = {"name": r["name"], "unique": bool(r["unique"]), "column_names": column_names} indexes.append(index_info) return indexes @reflection.cache def get_pk_constraint(self, connection, table_name, schema=None, **kw): table_id = self.get_table_id(connection, table_name, schema, info_cache=kw.get("info_cache")) PK_SQL = text(""" SELECT object_name(i.id) AS table_name, i.keycnt AS 'count', i.name AS name, index_col(object_name(i.id), i.indid, 1) AS pk_1, index_col(object_name(i.id), i.indid, 2) AS pk_2, index_col(object_name(i.id), i.indid, 3) AS pk_3, index_col(object_name(i.id), i.indid, 4) AS pk_4, index_col(object_name(i.id), i.indid, 5) AS pk_5, index_col(object_name(i.id), i.indid, 6) AS pk_6, index_col(object_name(i.id), i.indid, 7) AS pk_7, index_col(object_name(i.id), i.indid, 8) AS pk_8, index_col(object_name(i.id), i.indid, 9) AS pk_9, index_col(object_name(i.id), i.indid, 10) AS pk_10, index_col(object_name(i.id), i.indid, 11) AS pk_11, index_col(object_name(i.id), i.indid, 12) AS pk_12, index_col(object_name(i.id), i.indid, 13) AS pk_13, index_col(object_name(i.id), i.indid, 14) AS pk_14, index_col(object_name(i.id), i.indid, 15) AS pk_15, index_col(object_name(i.id), i.indid, 16) AS pk_16 FROM sysindexes i, sysobjects o WHERE o.id = i.id AND o.id = :table_id AND (i.status & 2048) = 2048 AND i.indid BETWEEN 1 AND 254 """) results = connection.execute(PK_SQL, table_id=table_id) pks = results.fetchone() results.close() constrained_columns = [] for i in range(1, pks["count"] + 1): constrained_columns.append(pks["pk_%i" % (i,)]) return {"constrained_columns": constrained_columns, "name": pks["name"]} @reflection.cache def get_schema_names(self, connection, **kw): SCHEMA_SQL = text("SELECT u.name AS name FROM sysusers u") schemas = connection.execute(SCHEMA_SQL) return [s["name"] for s in schemas] @reflection.cache def get_table_names(self, connection, schema=None, **kw): if schema is None: schema = self.default_schema_name TABLE_SQL = text(""" SELECT o.name AS name FROM sysobjects o JOIN sysusers u ON o.uid = u.uid WHERE u.name = :schema_name AND o.type = 'U' """) # Py2K if isinstance(schema, unicode): schema = schema.encode("ascii") # end Py2K tables = connection.execute(TABLE_SQL, schema_name=schema) return [t["name"] for t in tables] @reflection.cache def get_view_definition(self, connection, view_name, schema=None, **kw): if schema is None: schema = self.default_schema_name VIEW_DEF_SQL = text(""" SELECT c.text FROM syscomments c JOIN sysobjects o ON c.id = o.id WHERE o.name = :view_name AND o.type = 'V' """) # Py2K if isinstance(view_name, unicode): view_name = view_name.encode("ascii") # end Py2K view = connection.execute(VIEW_DEF_SQL, view_name=view_name) return view.scalar() @reflection.cache def get_view_names(self, connection, schema=None, **kw): if schema is None: schema = self.default_schema_name VIEW_SQL = text(""" SELECT o.name AS name FROM sysobjects o JOIN sysusers u ON o.uid = u.uid WHERE u.name = :schema_name AND o.type = 'V' """) # Py2K if isinstance(schema, unicode): schema = schema.encode("ascii") # end Py2K views = connection.execute(VIEW_SQL, schema_name=schema) return [v["name"] for v in views] def has_table(self, connection, table_name, schema=None): try: self.get_table_id(connection, table_name, schema) except exc.NoSuchTableError: return False else: return True SQLAlchemy-0.8.4/lib/sqlalchemy/dialects/sybase/mxodbc.py0000644000076500000240000000160312251147171024046 0ustar classicstaff00000000000000# sybase/mxodbc.py # Copyright (C) 2005-2013 the SQLAlchemy authors and contributors # # This module is part of SQLAlchemy and is released under # the MIT License: http://www.opensource.org/licenses/mit-license.php """ .. dialect:: sybase+mxodbc :name: mxODBC :dbapi: mxodbc :connectstring: sybase+mxodbc://:@ :url: http://www.egenix.com/ .. note:: This dialect is a stub only and is likely non functional at this time. """ from sqlalchemy.dialects.sybase.base import SybaseDialect from sqlalchemy.dialects.sybase.base import SybaseExecutionContext from sqlalchemy.connectors.mxodbc import MxODBCConnector class SybaseExecutionContext_mxodbc(SybaseExecutionContext): pass class SybaseDialect_mxodbc(MxODBCConnector, SybaseDialect): execution_ctx_cls = SybaseExecutionContext_mxodbc dialect = SybaseDialect_mxodbc SQLAlchemy-0.8.4/lib/sqlalchemy/dialects/sybase/pyodbc.py0000644000076500000240000000416212251147171024055 0ustar classicstaff00000000000000# sybase/pyodbc.py # Copyright (C) 2005-2013 the SQLAlchemy authors and contributors # # This module is part of SQLAlchemy and is released under # the MIT License: http://www.opensource.org/licenses/mit-license.php """ .. dialect:: sybase+pyodbc :name: PyODBC :dbapi: pyodbc :connectstring: sybase+pyodbc://:@[/] :url: http://pypi.python.org/pypi/pyodbc/ Unicode Support --------------- The pyodbc driver currently supports usage of these Sybase types with Unicode or multibyte strings:: CHAR NCHAR NVARCHAR TEXT VARCHAR Currently *not* supported are:: UNICHAR UNITEXT UNIVARCHAR """ from sqlalchemy.dialects.sybase.base import SybaseDialect,\ SybaseExecutionContext from sqlalchemy.connectors.pyodbc import PyODBCConnector from sqlalchemy import types as sqltypes, processors import decimal class _SybNumeric_pyodbc(sqltypes.Numeric): """Turns Decimals with adjusted() < -6 into floats. It's not yet known how to get decimals with many significant digits or very large adjusted() into Sybase via pyodbc. """ def bind_processor(self, dialect): super_process = super(_SybNumeric_pyodbc, self).\ bind_processor(dialect) def process(value): if self.asdecimal and \ isinstance(value, decimal.Decimal): if value.adjusted() < -6: return processors.to_float(value) if super_process: return super_process(value) else: return value return process class SybaseExecutionContext_pyodbc(SybaseExecutionContext): def set_ddl_autocommit(self, connection, value): if value: connection.autocommit = True else: connection.autocommit = False class SybaseDialect_pyodbc(PyODBCConnector, SybaseDialect): execution_ctx_cls = SybaseExecutionContext_pyodbc colspecs = { sqltypes.Numeric: _SybNumeric_pyodbc, } dialect = SybaseDialect_pyodbc SQLAlchemy-0.8.4/lib/sqlalchemy/dialects/sybase/pysybase.py0000644000076500000240000000623412251147171024436 0ustar classicstaff00000000000000# sybase/pysybase.py # Copyright (C) 2010-2013 the SQLAlchemy authors and contributors # # This module is part of SQLAlchemy and is released under # the MIT License: http://www.opensource.org/licenses/mit-license.php """ .. dialect:: sybase+pysybase :name: Python-Sybase :dbapi: Sybase :connectstring: sybase+pysybase://:@/[database name] :url: http://python-sybase.sourceforge.net/ Unicode Support --------------- The python-sybase driver does not appear to support non-ASCII strings of any kind at this time. """ from sqlalchemy import types as sqltypes, processors from sqlalchemy.dialects.sybase.base import SybaseDialect, \ SybaseExecutionContext, SybaseSQLCompiler class _SybNumeric(sqltypes.Numeric): def result_processor(self, dialect, type_): if not self.asdecimal: return processors.to_float else: return sqltypes.Numeric.result_processor(self, dialect, type_) class SybaseExecutionContext_pysybase(SybaseExecutionContext): def set_ddl_autocommit(self, dbapi_connection, value): if value: # call commit() on the Sybase connection directly, # to avoid any side effects of calling a Connection # transactional method inside of pre_exec() dbapi_connection.commit() def pre_exec(self): SybaseExecutionContext.pre_exec(self) for param in self.parameters: for key in list(param): param["@" + key] = param[key] del param[key] class SybaseSQLCompiler_pysybase(SybaseSQLCompiler): def bindparam_string(self, name, **kw): return "@" + name class SybaseDialect_pysybase(SybaseDialect): driver = 'pysybase' execution_ctx_cls = SybaseExecutionContext_pysybase statement_compiler = SybaseSQLCompiler_pysybase colspecs = { sqltypes.Numeric: _SybNumeric, sqltypes.Float: sqltypes.Float } @classmethod def dbapi(cls): import Sybase return Sybase def create_connect_args(self, url): opts = url.translate_connect_args(username='user', password='passwd') return ([opts.pop('host')], opts) def do_executemany(self, cursor, statement, parameters, context=None): # calling python-sybase executemany yields: # TypeError: string too long for buffer for param in parameters: cursor.execute(statement, param) def _get_server_version_info(self, connection): vers = connection.scalar("select @@version_number") # i.e. 15500, 15000, 12500 == (15, 5, 0, 0), (15, 0, 0, 0), # (12, 5, 0, 0) return (vers / 1000, vers % 1000 / 100, vers % 100 / 10, vers % 10) def is_disconnect(self, e, connection, cursor): if isinstance(e, (self.dbapi.OperationalError, self.dbapi.ProgrammingError)): msg = str(e) return ('Unable to complete network request to host' in msg or 'Invalid connection state' in msg or 'Invalid cursor state' in msg) else: return False dialect = SybaseDialect_pysybase SQLAlchemy-0.8.4/lib/sqlalchemy/dialects/type_migration_guidelines.txt0000644000076500000240000002006012251147171026733 0ustar classicstaff00000000000000Rules for Migrating TypeEngine classes to 0.6 --------------------------------------------- 1. the TypeEngine classes are used for: a. Specifying behavior which needs to occur for bind parameters or result row columns. b. Specifying types that are entirely specific to the database in use and have no analogue in the sqlalchemy.types package. c. Specifying types where there is an analogue in sqlalchemy.types, but the database in use takes vendor-specific flags for those types. d. If a TypeEngine class doesn't provide any of this, it should be *removed* from the dialect. 2. the TypeEngine classes are *no longer* used for generating DDL. Dialects now have a TypeCompiler subclass which uses the same visit_XXX model as other compilers. 3. the "ischema_names" and "colspecs" dictionaries are now required members on the Dialect class. 4. The names of types within dialects are now important. If a dialect-specific type is a subclass of an existing generic type and is only provided for bind/result behavior, the current mixed case naming can remain, i.e. _PGNumeric for Numeric - in this case, end users would never need to use _PGNumeric directly. However, if a dialect-specific type is specifying a type *or* arguments that are not present generically, it should match the real name of the type on that backend, in uppercase. E.g. postgresql.INET, mysql.ENUM, postgresql.ARRAY. Or follow this handy flowchart: is the type meant to provide bind/result is the type the same name as an behavior to a generic type (i.e. MixedCase) ---- no ---> UPPERCASE type in types.py ? type in types.py ? | | | no yes yes | | | | does your type need special | +<--- yes --- behavior or arguments ? | | | | | no name the type using | | _MixedCase, i.e. v V _OracleBoolean. it name the type don't make a stays private to the dialect identically as that type, make sure the dialect's and is invoked *only* via within the DB, base.py imports the types.py the colspecs dict. using UPPERCASE UPPERCASE name into its namespace | (i.e. BIT, NCHAR, INTERVAL). | Users can import it. | | v v subclass the closest is the name of this type MixedCase type types.py, identical to an UPPERCASE i.e. <--- no ------- name in types.py ? class _DateTime(types.DateTime), class DATETIME2(types.DateTime), | class BIT(types.TypeEngine). yes | v the type should subclass the UPPERCASE type in types.py (i.e. class BLOB(types.BLOB)) Example 1. pysqlite needs bind/result processing for the DateTime type in types.py, which applies to all DateTimes and subclasses. It's named _SLDateTime and subclasses types.DateTime. Example 2. MS-SQL has a TIME type which takes a non-standard "precision" argument that is rendered within DDL. So it's named TIME in the MS-SQL dialect's base.py, and subclasses types.TIME. Users can then say mssql.TIME(precision=10). Example 3. MS-SQL dialects also need special bind/result processing for date But its DATE type doesn't render DDL differently than that of a plain DATE, i.e. it takes no special arguments. Therefore we are just adding behavior to types.Date, so it's named _MSDate in the MS-SQL dialect's base.py, and subclasses types.Date. Example 4. MySQL has a SET type, there's no analogue for this in types.py. So MySQL names it SET in the dialect's base.py, and it subclasses types.String, since it ultimately deals with strings. Example 5. Postgresql has a DATETIME type. The DBAPIs handle dates correctly, and no special arguments are used in PG's DDL beyond what types.py provides. Postgresql dialect therefore imports types.DATETIME into its base.py. Ideally one should be able to specify a schema using names imported completely from a dialect, all matching the real name on that backend: from sqlalchemy.dialects.postgresql import base as pg t = Table('mytable', metadata, Column('id', pg.INTEGER, primary_key=True), Column('name', pg.VARCHAR(300)), Column('inetaddr', pg.INET) ) where above, the INTEGER and VARCHAR types are ultimately from sqlalchemy.types, but the PG dialect makes them available in its own namespace. 5. "colspecs" now is a dictionary of generic or uppercased types from sqlalchemy.types linked to types specified in the dialect. Again, if a type in the dialect does not specify any special behavior for bind_processor() or result_processor() and does not indicate a special type only available in this database, it must be *removed* from the module and from this dictionary. 6. "ischema_names" indicates string descriptions of types as returned from the database linked to TypeEngine classes. a. The string name should be matched to the most specific type possible within sqlalchemy.types, unless there is no matching type within sqlalchemy.types in which case it points to a dialect type. *It doesn't matter* if the dialect has it's own subclass of that type with special bind/result behavior - reflect to the types.py UPPERCASE type as much as possible. With very few exceptions, all types should reflect to an UPPERCASE type. b. If the dialect contains a matching dialect-specific type that takes extra arguments which the generic one does not, then point to the dialect-specific type. E.g. mssql.VARCHAR takes a "collation" parameter which should be preserved. 5. DDL, or what was formerly issued by "get_col_spec()", is now handled exclusively by a subclass of compiler.GenericTypeCompiler. a. your TypeCompiler class will receive generic and uppercase types from sqlalchemy.types. Do not assume the presence of dialect-specific attributes on these types. b. the visit_UPPERCASE methods on GenericTypeCompiler should *not* be overridden with methods that produce a different DDL name. Uppercase types don't do any kind of "guessing" - if visit_TIMESTAMP is called, the DDL should render as TIMESTAMP in all cases, regardless of whether or not that type is legal on the backend database. c. the visit_UPPERCASE methods *should* be overridden with methods that add additional arguments and flags to those types. d. the visit_lowercase methods are overridden to provide an interpretation of a generic type. E.g. visit_large_binary() might be overridden to say "return self.visit_BIT(type_)". e. visit_lowercase methods should *never* render strings directly - it should always be via calling a visit_UPPERCASE() method. SQLAlchemy-0.8.4/lib/sqlalchemy/engine/0000755000076500000240000000000012251151573020410 5ustar classicstaff00000000000000SQLAlchemy-0.8.4/lib/sqlalchemy/engine/__init__.py0000644000076500000240000003624712251150015022523 0ustar classicstaff00000000000000# engine/__init__.py # Copyright (C) 2005-2013 the SQLAlchemy authors and contributors # # This module is part of SQLAlchemy and is released under # the MIT License: http://www.opensource.org/licenses/mit-license.php """SQL connections, SQL execution and high-level DB-API interface. The engine package defines the basic components used to interface DB-API modules with higher-level statement construction, connection-management, execution and result contexts. The primary "entry point" class into this package is the Engine and it's public constructor ``create_engine()``. This package includes: base.py Defines interface classes and some implementation classes which comprise the basic components used to interface between a DB-API, constructed and plain-text statements, connections, transactions, and results. default.py Contains default implementations of some of the components defined in base.py. All current database dialects use the classes in default.py as base classes for their own database-specific implementations. strategies.py The mechanics of constructing ``Engine`` objects are represented here. Defines the ``EngineStrategy`` class which represents how to go from arguments specified to the ``create_engine()`` function, to a fully constructed ``Engine``, including initialization of connection pooling, dialects, and specific subclasses of ``Engine``. threadlocal.py The ``TLEngine`` class is defined here, which is a subclass of the generic ``Engine`` and tracks ``Connection`` and ``Transaction`` objects against the identity of the current thread. This allows certain programming patterns based around the concept of a "thread-local connection" to be possible. The ``TLEngine`` is created by using the "threadlocal" engine strategy in conjunction with the ``create_engine()`` function. url.py Defines the ``URL`` class which represents the individual components of a string URL passed to ``create_engine()``. Also defines a basic module-loading strategy for the dialect specifier within a URL. """ # not sure what this was used for #import sqlalchemy.databases from .interfaces import ( Compiled, Connectable, Dialect, ExecutionContext, TypeCompiler ) from .base import ( Connection, Engine, NestedTransaction, RootTransaction, Transaction, TwoPhaseTransaction, ) from .result import ( BufferedColumnResultProxy, BufferedColumnRow, BufferedRowResultProxy, FullyBufferedResultProxy, ResultProxy, RowProxy, ) from .util import ( connection_memoize ) from . import util, strategies default_strategy = 'plain' def create_engine(*args, **kwargs): """Create a new :class:`.Engine` instance. The standard calling form is to send the URL as the first positional argument, usually a string that indicates database dialect and connection arguments. Additional keyword arguments may then follow it which establish various options on the resulting :class:`.Engine` and its underlying :class:`.Dialect` and :class:`.Pool` constructs. The string form of the URL is ``dialect+driver://user:password@host/dbname[?key=value..]``, where ``dialect`` is a database name such as ``mysql``, ``oracle``, ``postgresql``, etc., and ``driver`` the name of a DBAPI, such as ``psycopg2``, ``pyodbc``, ``cx_oracle``, etc. Alternatively, the URL can be an instance of :class:`~sqlalchemy.engine.url.URL`. ``**kwargs`` takes a wide variety of options which are routed towards their appropriate components. Arguments may be specific to the :class:`.Engine`, the underlying :class:`.Dialect`, as well as the :class:`.Pool`. Specific dialects also accept keyword arguments that are unique to that dialect. Here, we describe the parameters that are common to most :func:`.create_engine()` usage. Once established, the newly resulting :class:`.Engine` will request a connection from the underlying :class:`.Pool` once :meth:`.Engine.connect` is called, or a method which depends on it such as :meth:`.Engine.execute` is invoked. The :class:`.Pool` in turn will establish the first actual DBAPI connection when this request is received. The :func:`.create_engine` call itself does **not** establish any actual DBAPI connections directly. See also: :doc:`/core/engines` :ref:`connections_toplevel` :param case_sensitive=True: if False, result column names will match in a case-insensitive fashion, that is, ``row['SomeColumn']``. .. versionchanged:: 0.8 By default, result row names match case-sensitively. In version 0.7 and prior, all matches were case-insensitive. :param connect_args: a dictionary of options which will be passed directly to the DBAPI's ``connect()`` method as additional keyword arguments. See the example at :ref:`custom_dbapi_args`. :param convert_unicode=False: if set to True, sets the default behavior of ``convert_unicode`` on the :class:`.String` type to ``True``, regardless of a setting of ``False`` on an individual :class:`.String` type, thus causing all :class:`.String` -based columns to accommodate Python ``unicode`` objects. This flag is useful as an engine-wide setting when using a DBAPI that does not natively support Python ``unicode`` objects and raises an error when one is received (such as pyodbc with FreeTDS). See :class:`.String` for further details on what this flag indicates. :param creator: a callable which returns a DBAPI connection. This creation function will be passed to the underlying connection pool and will be used to create all new database connections. Usage of this function causes connection parameters specified in the URL argument to be bypassed. :param echo=False: if True, the Engine will log all statements as well as a repr() of their parameter lists to the engines logger, which defaults to sys.stdout. The ``echo`` attribute of ``Engine`` can be modified at any time to turn logging on and off. If set to the string ``"debug"``, result rows will be printed to the standard output as well. This flag ultimately controls a Python logger; see :ref:`dbengine_logging` for information on how to configure logging directly. :param echo_pool=False: if True, the connection pool will log all checkouts/checkins to the logging stream, which defaults to sys.stdout. This flag ultimately controls a Python logger; see :ref:`dbengine_logging` for information on how to configure logging directly. :param encoding: Defaults to ``utf-8``. This is the string encoding used by SQLAlchemy for string encode/decode operations which occur within SQLAlchemy, **outside of the DBAPI.** Most modern DBAPIs feature some degree of direct support for Python ``unicode`` objects, what you see in Python 2 as a string of the form ``u'some string'``. For those scenarios where the DBAPI is detected as not supporting a Python ``unicode`` object, this encoding is used to determine the source/destination encoding. It is **not used** for those cases where the DBAPI handles unicode directly. To properly configure a system to accommodate Python ``unicode`` objects, the DBAPI should be configured to handle unicode to the greatest degree as is appropriate - see the notes on unicode pertaining to the specific target database in use at :ref:`dialect_toplevel`. Areas where string encoding may need to be accommodated outside of the DBAPI include zero or more of: * the values passed to bound parameters, corresponding to the :class:`.Unicode` type or the :class:`.String` type when ``convert_unicode`` is ``True``; * the values returned in result set columns corresponding to the :class:`.Unicode` type or the :class:`.String` type when ``convert_unicode`` is ``True``; * the string SQL statement passed to the DBAPI's ``cursor.execute()`` method; * the string names of the keys in the bound parameter dictionary passed to the DBAPI's ``cursor.execute()`` as well as ``cursor.setinputsizes()`` methods; * the string column names retrieved from the DBAPI's ``cursor.description`` attribute. When using Python 3, the DBAPI is required to support *all* of the above values as Python ``unicode`` objects, which in Python 3 are just known as ``str``. In Python 2, the DBAPI does not specify unicode behavior at all, so SQLAlchemy must make decisions for each of the above values on a per-DBAPI basis - implementations are completely inconsistent in their behavior. :param execution_options: Dictionary execution options which will be applied to all connections. See :meth:`~sqlalchemy.engine.Connection.execution_options` :param implicit_returning=True: When ``True``, a RETURNING- compatible construct, if available, will be used to fetch newly generated primary key values when a single row INSERT statement is emitted with no existing returning() clause. This applies to those backends which support RETURNING or a compatible construct, including Postgresql, Firebird, Oracle, Microsoft SQL Server. Set this to ``False`` to disable the automatic usage of RETURNING. :param label_length=None: optional integer value which limits the size of dynamically generated column labels to that many characters. If less than 6, labels are generated as "_(counter)". If ``None``, the value of ``dialect.max_identifier_length`` is used instead. :param listeners: A list of one or more :class:`~sqlalchemy.interfaces.PoolListener` objects which will receive connection pool events. :param logging_name: String identifier which will be used within the "name" field of logging records generated within the "sqlalchemy.engine" logger. Defaults to a hexstring of the object's id. :param max_overflow=10: the number of connections to allow in connection pool "overflow", that is connections that can be opened above and beyond the pool_size setting, which defaults to five. this is only used with :class:`~sqlalchemy.pool.QueuePool`. :param module=None: reference to a Python module object (the module itself, not its string name). Specifies an alternate DBAPI module to be used by the engine's dialect. Each sub-dialect references a specific DBAPI which will be imported before first connect. This parameter causes the import to be bypassed, and the given module to be used instead. Can be used for testing of DBAPIs as well as to inject "mock" DBAPI implementations into the :class:`.Engine`. :param pool=None: an already-constructed instance of :class:`~sqlalchemy.pool.Pool`, such as a :class:`~sqlalchemy.pool.QueuePool` instance. If non-None, this pool will be used directly as the underlying connection pool for the engine, bypassing whatever connection parameters are present in the URL argument. For information on constructing connection pools manually, see :ref:`pooling_toplevel`. :param poolclass=None: a :class:`~sqlalchemy.pool.Pool` subclass, which will be used to create a connection pool instance using the connection parameters given in the URL. Note this differs from ``pool`` in that you don't actually instantiate the pool in this case, you just indicate what type of pool to be used. :param pool_logging_name: String identifier which will be used within the "name" field of logging records generated within the "sqlalchemy.pool" logger. Defaults to a hexstring of the object's id. :param pool_size=5: the number of connections to keep open inside the connection pool. This used with :class:`~sqlalchemy.pool.QueuePool` as well as :class:`~sqlalchemy.pool.SingletonThreadPool`. With :class:`~sqlalchemy.pool.QueuePool`, a ``pool_size`` setting of 0 indicates no limit; to disable pooling, set ``poolclass`` to :class:`~sqlalchemy.pool.NullPool` instead. :param pool_recycle=-1: this setting causes the pool to recycle connections after the given number of seconds has passed. It defaults to -1, or no timeout. For example, setting to 3600 means connections will be recycled after one hour. Note that MySQL in particular will disconnect automatically if no activity is detected on a connection for eight hours (although this is configurable with the MySQLDB connection itself and the server configuration as well). :param pool_reset_on_return='rollback': set the "reset on return" behavior of the pool, which is whether ``rollback()``, ``commit()``, or nothing is called upon connections being returned to the pool. See the docstring for ``reset_on_return`` at :class:`.Pool`. .. versionadded:: 0.7.6 :param pool_timeout=30: number of seconds to wait before giving up on getting a connection from the pool. This is only used with :class:`~sqlalchemy.pool.QueuePool`. :param strategy='plain': selects alternate engine implementations. Currently available are: * the ``threadlocal`` strategy, which is described in :ref:`threadlocal_strategy`; * the ``mock`` strategy, which dispatches all statement execution to a function passed as the argument ``executor``. See `example in the FAQ `_. :param executor=None: a function taking arguments ``(sql, *multiparams, **params)``, to which the ``mock`` strategy will dispatch all statement execution. Used only by ``strategy='mock'``. """ strategy = kwargs.pop('strategy', default_strategy) strategy = strategies.strategies[strategy] return strategy.create(*args, **kwargs) def engine_from_config(configuration, prefix='sqlalchemy.', **kwargs): """Create a new Engine instance using a configuration dictionary. The dictionary is typically produced from a config file where keys are prefixed, such as sqlalchemy.url, sqlalchemy.echo, etc. The 'prefix' argument indicates the prefix to be searched for. A select set of keyword arguments will be "coerced" to their expected type based on string values. In a future release, this functionality will be expanded and include dialect-specific arguments. """ opts = util._coerce_config(configuration, prefix) opts.update(kwargs) url = opts.pop('url') return create_engine(url, **opts) __all__ = ( 'create_engine', 'engine_from_config', ) SQLAlchemy-0.8.4/lib/sqlalchemy/engine/base.py0000644000076500000240000017605612251150015021701 0ustar classicstaff00000000000000# engine/base.py # Copyright (C) 2005-2013 the SQLAlchemy authors and contributors # # This module is part of SQLAlchemy and is released under # the MIT License: http://www.opensource.org/licenses/mit-license.php """Defines :class:`.Connection` and :class:`.Engine`. """ from __future__ import with_statement import sys from .. import exc, schema, util, log, interfaces from ..sql import expression, util as sql_util from .interfaces import Connectable, Compiled from .util import _distill_params import contextlib class Connection(Connectable): """Provides high-level functionality for a wrapped DB-API connection. Provides execution support for string-based SQL statements as well as :class:`.ClauseElement`, :class:`.Compiled` and :class:`.DefaultGenerator` objects. Provides a :meth:`begin` method to return :class:`.Transaction` objects. The Connection object is **not** thread-safe. While a Connection can be shared among threads using properly synchronized access, it is still possible that the underlying DBAPI connection may not support shared access between threads. Check the DBAPI documentation for details. The Connection object represents a single dbapi connection checked out from the connection pool. In this state, the connection pool has no affect upon the connection, including its expiration or timeout state. For the connection pool to properly manage connections, connections should be returned to the connection pool (i.e. ``connection.close()``) whenever the connection is not in use. .. index:: single: thread safety; Connection """ def __init__(self, engine, connection=None, close_with_result=False, _branch=False, _execution_options=None, _dispatch=None, _has_events=False): """Construct a new Connection. The constructor here is not public and is only called only by an :class:`.Engine`. See :meth:`.Engine.connect` and :meth:`.Engine.contextual_connect` methods. """ self.engine = engine self.dialect = engine.dialect self.__connection = connection or engine.raw_connection() self.__transaction = None self.should_close_with_result = close_with_result self.__savepoint_seq = 0 self.__branch = _branch self.__invalid = False self.__can_reconnect = True if _dispatch: self.dispatch = _dispatch elif engine._has_events: self.dispatch = self.dispatch._join(engine.dispatch) self._has_events = _has_events or engine._has_events self._echo = self.engine._should_log_info() if _execution_options: self._execution_options =\ engine._execution_options.union(_execution_options) else: self._execution_options = engine._execution_options def _branch(self): """Return a new Connection which references this Connection's engine and connection; but does not have close_with_result enabled, and also whose close() method does nothing. This is used to execute "sub" statements within a single execution, usually an INSERT statement. """ return self.engine._connection_cls( self.engine, self.__connection, _branch=True, _has_events=self._has_events, _dispatch=self.dispatch) def _clone(self): """Create a shallow copy of this Connection. """ c = self.__class__.__new__(self.__class__) c.__dict__ = self.__dict__.copy() return c def __enter__(self): return self def __exit__(self, type, value, traceback): self.close() def execution_options(self, **opt): """ Set non-SQL options for the connection which take effect during execution. The method returns a copy of this :class:`.Connection` which references the same underlying DBAPI connection, but also defines the given execution options which will take effect for a call to :meth:`execute`. As the new :class:`.Connection` references the same underlying resource, it's usually a good idea to ensure that the copies would be discarded immediately, which is implicit if used as in:: result = connection.execution_options(stream_results=True).\\ execute(stmt) Note that any key/value can be passed to :meth:`.Connection.execution_options`, and it will be stored in the ``_execution_options`` dictionary of the :class:`.Connection`. It is suitable for usage by end-user schemes to communicate with event listeners, for example. The keywords that are currently recognized by SQLAlchemy itself include all those listed under :meth:`.Executable.execution_options`, as well as others that are specific to :class:`.Connection`. :param autocommit: Available on: Connection, statement. When True, a COMMIT will be invoked after execution when executed in 'autocommit' mode, i.e. when an explicit transaction is not begun on the connection. Note that DBAPI connections by default are always in a transaction - SQLAlchemy uses rules applied to different kinds of statements to determine if COMMIT will be invoked in order to provide its "autocommit" feature. Typically, all INSERT/UPDATE/DELETE statements as well as CREATE/DROP statements have autocommit behavior enabled; SELECT constructs do not. Use this option when invoking a SELECT or other specific SQL construct where COMMIT is desired (typically when calling stored procedures and such), and an explicit transaction is not in progress. :param compiled_cache: Available on: Connection. A dictionary where :class:`.Compiled` objects will be cached when the :class:`.Connection` compiles a clause expression into a :class:`.Compiled` object. It is the user's responsibility to manage the size of this dictionary, which will have keys corresponding to the dialect, clause element, the column names within the VALUES or SET clause of an INSERT or UPDATE, as well as the "batch" mode for an INSERT or UPDATE statement. The format of this dictionary is not guaranteed to stay the same in future releases. Note that the ORM makes use of its own "compiled" caches for some operations, including flush operations. The caching used by the ORM internally supersedes a cache dictionary specified here. :param isolation_level: Available on: Connection. Set the transaction isolation level for the lifespan of this connection. Valid values include those string values accepted by the ``isolation_level`` parameter passed to :func:`.create_engine`, and are database specific, including those for :ref:`sqlite_toplevel`, :ref:`postgresql_toplevel` - see those dialect's documentation for further info. Note that this option necessarily affects the underlying DBAPI connection for the lifespan of the originating :class:`.Connection`, and is not per-execution. This setting is not removed until the underlying DBAPI connection is returned to the connection pool, i.e. the :meth:`.Connection.close` method is called. :param no_parameters: When ``True``, if the final parameter list or dictionary is totally empty, will invoke the statement on the cursor as ``cursor.execute(statement)``, not passing the parameter collection at all. Some DBAPIs such as psycopg2 and mysql-python consider percent signs as significant only when parameters are present; this option allows code to generate SQL containing percent signs (and possibly other characters) that is neutral regarding whether it's executed by the DBAPI or piped into a script that's later invoked by command line tools. .. versionadded:: 0.7.6 :param stream_results: Available on: Connection, statement. Indicate to the dialect that results should be "streamed" and not pre-buffered, if possible. This is a limitation of many DBAPIs. The flag is currently understood only by the psycopg2 dialect. """ c = self._clone() c._execution_options = c._execution_options.union(opt) if 'isolation_level' in opt: c._set_isolation_level() return c def _set_isolation_level(self): self.dialect.set_isolation_level(self.connection, self._execution_options['isolation_level']) self.connection._connection_record.finalize_callback = \ self.dialect.reset_isolation_level @property def closed(self): """Return True if this connection is closed.""" return '_Connection__connection' not in self.__dict__ \ and not self.__can_reconnect @property def invalidated(self): """Return True if this connection was invalidated.""" return self.__invalid @property def connection(self): "The underlying DB-API connection managed by this Connection." try: return self.__connection except AttributeError: return self._revalidate_connection() def _revalidate_connection(self): if self.__can_reconnect and self.__invalid: if self.__transaction is not None: raise exc.InvalidRequestError( "Can't reconnect until invalid " "transaction is rolled back") self.__connection = self.engine.raw_connection() self.__invalid = False return self.__connection raise exc.ResourceClosedError("This Connection is closed") @property def _connection_is_valid(self): # use getattr() for is_valid to support exceptions raised in # dialect initializer, where the connection is not wrapped in # _ConnectionFairy return getattr(self.__connection, 'is_valid', False) @property def _still_open_and_connection_is_valid(self): return \ not self.closed and \ not self.invalidated and \ getattr(self.__connection, 'is_valid', False) @property def info(self): """Info dictionary associated with the underlying DBAPI connection referred to by this :class:`.Connection`, allowing user-defined data to be associated with the connection. The data here will follow along with the DBAPI connection including after it is returned to the connection pool and used again in subsequent instances of :class:`.Connection`. """ return self.connection.info def connect(self): """Returns a branched version of this :class:`.Connection`. The :meth:`.Connection.close` method on the returned :class:`.Connection` can be called and this :class:`.Connection` will remain open. This method provides usage symmetry with :meth:`.Engine.connect`, including for usage with context managers. """ return self._branch() def contextual_connect(self, **kwargs): """Returns a branched version of this :class:`.Connection`. The :meth:`.Connection.close` method on the returned :class:`.Connection` can be called and this :class:`.Connection` will remain open. This method provides usage symmetry with :meth:`.Engine.contextual_connect`, including for usage with context managers. """ return self._branch() def invalidate(self, exception=None): """Invalidate the underlying DBAPI connection associated with this Connection. The underlying DB-API connection is literally closed (if possible), and is discarded. Its source connection pool will typically lazily create a new connection to replace it. Upon the next usage, this Connection will attempt to reconnect to the pool with a new connection. Transactions in progress remain in an "opened" state (even though the actual transaction is gone); these must be explicitly rolled back before a reconnect on this Connection can proceed. This is to prevent applications from accidentally continuing their transactional operations in a non-transactional state. """ if self.invalidated: return if self.closed: raise exc.ResourceClosedError("This Connection is closed") if self._connection_is_valid: self.__connection.invalidate(exception) del self.__connection self.__invalid = True def detach(self): """Detach the underlying DB-API connection from its connection pool. This Connection instance will remain usable. When closed, the DB-API connection will be literally closed and not returned to its pool. The pool will typically lazily create a new connection to replace the detached connection. This method can be used to insulate the rest of an application from a modified state on a connection (such as a transaction isolation level or similar). Also see :class:`~sqlalchemy.interfaces.PoolListener` for a mechanism to modify connection state when connections leave and return to their connection pool. """ self.__connection.detach() def begin(self): """Begin a transaction and return a transaction handle. The returned object is an instance of :class:`.Transaction`. This object represents the "scope" of the transaction, which completes when either the :meth:`.Transaction.rollback` or :meth:`.Transaction.commit` method is called. Nested calls to :meth:`.begin` on the same :class:`.Connection` will return new :class:`.Transaction` objects that represent an emulated transaction within the scope of the enclosing transaction, that is:: trans = conn.begin() # outermost transaction trans2 = conn.begin() # "nested" trans2.commit() # does nothing trans.commit() # actually commits Calls to :meth:`.Transaction.commit` only have an effect when invoked via the outermost :class:`.Transaction` object, though the :meth:`.Transaction.rollback` method of any of the :class:`.Transaction` objects will roll back the transaction. See also: :meth:`.Connection.begin_nested` - use a SAVEPOINT :meth:`.Connection.begin_twophase` - use a two phase /XID transaction :meth:`.Engine.begin` - context manager available from :class:`.Engine`. """ if self.__transaction is None: self.__transaction = RootTransaction(self) return self.__transaction else: return Transaction(self, self.__transaction) def begin_nested(self): """Begin a nested transaction and return a transaction handle. The returned object is an instance of :class:`.NestedTransaction`. Nested transactions require SAVEPOINT support in the underlying database. Any transaction in the hierarchy may ``commit`` and ``rollback``, however the outermost transaction still controls the overall ``commit`` or ``rollback`` of the transaction of a whole. See also :meth:`.Connection.begin`, :meth:`.Connection.begin_twophase`. """ if self.__transaction is None: self.__transaction = RootTransaction(self) else: self.__transaction = NestedTransaction(self, self.__transaction) return self.__transaction def begin_twophase(self, xid=None): """Begin a two-phase or XA transaction and return a transaction handle. The returned object is an instance of :class:`.TwoPhaseTransaction`, which in addition to the methods provided by :class:`.Transaction`, also provides a :meth:`~.TwoPhaseTransaction.prepare` method. :param xid: the two phase transaction id. If not supplied, a random id will be generated. See also :meth:`.Connection.begin`, :meth:`.Connection.begin_twophase`. """ if self.__transaction is not None: raise exc.InvalidRequestError( "Cannot start a two phase transaction when a transaction " "is already in progress.") if xid is None: xid = self.engine.dialect.create_xid() self.__transaction = TwoPhaseTransaction(self, xid) return self.__transaction def recover_twophase(self): return self.engine.dialect.do_recover_twophase(self) def rollback_prepared(self, xid, recover=False): self.engine.dialect.do_rollback_twophase(self, xid, recover=recover) def commit_prepared(self, xid, recover=False): self.engine.dialect.do_commit_twophase(self, xid, recover=recover) def in_transaction(self): """Return True if a transaction is in progress.""" return self.__transaction is not None def _begin_impl(self): if self._echo: self.engine.logger.info("BEGIN (implicit)") if self._has_events: self.dispatch.begin(self) try: self.engine.dialect.do_begin(self.connection) except Exception, e: self._handle_dbapi_exception(e, None, None, None, None) def _rollback_impl(self): if self._has_events: self.dispatch.rollback(self) if self._still_open_and_connection_is_valid: if self._echo: self.engine.logger.info("ROLLBACK") try: self.engine.dialect.do_rollback(self.connection) self.__transaction = None except Exception, e: self._handle_dbapi_exception(e, None, None, None, None) else: self.__transaction = None def _commit_impl(self, autocommit=False): if self._has_events: self.dispatch.commit(self) if self._echo: self.engine.logger.info("COMMIT") try: self.engine.dialect.do_commit(self.connection) self.__transaction = None except Exception, e: self._handle_dbapi_exception(e, None, None, None, None) def _savepoint_impl(self, name=None): if self._has_events: self.dispatch.savepoint(self, name) if name is None: self.__savepoint_seq += 1 name = 'sa_savepoint_%s' % self.__savepoint_seq if self._still_open_and_connection_is_valid: self.engine.dialect.do_savepoint(self, name) return name def _rollback_to_savepoint_impl(self, name, context): if self._has_events: self.dispatch.rollback_savepoint(self, name, context) if self._still_open_and_connection_is_valid: self.engine.dialect.do_rollback_to_savepoint(self, name) self.__transaction = context def _release_savepoint_impl(self, name, context): if self._has_events: self.dispatch.release_savepoint(self, name, context) if self._still_open_and_connection_is_valid: self.engine.dialect.do_release_savepoint(self, name) self.__transaction = context def _begin_twophase_impl(self, xid): if self._echo: self.engine.logger.info("BEGIN TWOPHASE (implicit)") if self._has_events: self.dispatch.begin_twophase(self, xid) if self._still_open_and_connection_is_valid: self.engine.dialect.do_begin_twophase(self, xid) def _prepare_twophase_impl(self, xid): if self._has_events: self.dispatch.prepare_twophase(self, xid) if self._still_open_and_connection_is_valid: assert isinstance(self.__transaction, TwoPhaseTransaction) self.engine.dialect.do_prepare_twophase(self, xid) def _rollback_twophase_impl(self, xid, is_prepared): if self._has_events: self.dispatch.rollback_twophase(self, xid, is_prepared) if self._still_open_and_connection_is_valid: assert isinstance(self.__transaction, TwoPhaseTransaction) self.engine.dialect.do_rollback_twophase(self, xid, is_prepared) self.__transaction = None def _commit_twophase_impl(self, xid, is_prepared): if self._has_events: self.dispatch.commit_twophase(self, xid, is_prepared) if self._still_open_and_connection_is_valid: assert isinstance(self.__transaction, TwoPhaseTransaction) self.engine.dialect.do_commit_twophase(self, xid, is_prepared) self.__transaction = None def _autorollback(self): if not self.in_transaction(): self._rollback_impl() def close(self): """Close this :class:`.Connection`. This results in a release of the underlying database resources, that is, the DBAPI connection referenced internally. The DBAPI connection is typically restored back to the connection-holding :class:`.Pool` referenced by the :class:`.Engine` that produced this :class:`.Connection`. Any transactional state present on the DBAPI connection is also unconditionally released via the DBAPI connection's ``rollback()`` method, regardless of any :class:`.Transaction` object that may be outstanding with regards to this :class:`.Connection`. After :meth:`~.Connection.close` is called, the :class:`.Connection` is permanently in a closed state, and will allow no further operations. """ try: conn = self.__connection except AttributeError: pass else: if not self.__branch: conn.close() del self.__connection self.__can_reconnect = False self.__transaction = None def scalar(self, object, *multiparams, **params): """Executes and returns the first column of the first row. The underlying result/cursor is closed after execution. """ return self.execute(object, *multiparams, **params).scalar() def execute(self, object, *multiparams, **params): """Executes the a SQL statement construct and returns a :class:`.ResultProxy`. :param object: The statement to be executed. May be one of: * a plain string * any :class:`.ClauseElement` construct that is also a subclass of :class:`.Executable`, such as a :func:`~.expression.select` construct * a :class:`.FunctionElement`, such as that generated by :attr:`.func`, will be automatically wrapped in a SELECT statement, which is then executed. * a :class:`.DDLElement` object * a :class:`.DefaultGenerator` object * a :class:`.Compiled` object :param \*multiparams/\**params: represent bound parameter values to be used in the execution. Typically, the format is either a collection of one or more dictionaries passed to \*multiparams:: conn.execute( table.insert(), {"id":1, "value":"v1"}, {"id":2, "value":"v2"} ) ...or individual key/values interpreted by \**params:: conn.execute( table.insert(), id=1, value="v1" ) In the case that a plain SQL string is passed, and the underlying DBAPI accepts positional bind parameters, a collection of tuples or individual values in \*multiparams may be passed:: conn.execute( "INSERT INTO table (id, value) VALUES (?, ?)", (1, "v1"), (2, "v2") ) conn.execute( "INSERT INTO table (id, value) VALUES (?, ?)", 1, "v1" ) Note above, the usage of a question mark "?" or other symbol is contingent upon the "paramstyle" accepted by the DBAPI in use, which may be any of "qmark", "named", "pyformat", "format", "numeric". See `pep-249 `_ for details on paramstyle. To execute a textual SQL statement which uses bound parameters in a DBAPI-agnostic way, use the :func:`~.expression.text` construct. """ for c in type(object).__mro__: if c in Connection.executors: return Connection.executors[c]( self, object, multiparams, params) else: raise exc.InvalidRequestError( "Unexecutable object type: %s" % type(object)) def _execute_function(self, func, multiparams, params): """Execute a sql.FunctionElement object.""" return self._execute_clauseelement(func.select(), multiparams, params) def _execute_default(self, default, multiparams, params): """Execute a schema.ColumnDefault object.""" if self._has_events: for fn in self.dispatch.before_execute: default, multiparams, params = \ fn(self, default, multiparams, params) try: try: conn = self.__connection except AttributeError: conn = self._revalidate_connection() dialect = self.dialect ctx = dialect.execution_ctx_cls._init_default( dialect, self, conn) except Exception, e: self._handle_dbapi_exception(e, None, None, None, None) ret = ctx._exec_default(default, None) if self.should_close_with_result: self.close() if self._has_events: self.dispatch.after_execute(self, default, multiparams, params, ret) return ret def _execute_ddl(self, ddl, multiparams, params): """Execute a schema.DDL object.""" if self._has_events: for fn in self.dispatch.before_execute: ddl, multiparams, params = \ fn(self, ddl, multiparams, params) dialect = self.dialect compiled = ddl.compile(dialect=dialect) ret = self._execute_context( dialect, dialect.execution_ctx_cls._init_ddl, compiled, None, compiled ) if self._has_events: self.dispatch.after_execute(self, ddl, multiparams, params, ret) return ret def _execute_clauseelement(self, elem, multiparams, params): """Execute a sql.ClauseElement object.""" if self._has_events: for fn in self.dispatch.before_execute: elem, multiparams, params = \ fn(self, elem, multiparams, params) distilled_params = _distill_params(multiparams, params) if distilled_params: keys = distilled_params[0].keys() else: keys = [] dialect = self.dialect if 'compiled_cache' in self._execution_options: key = dialect, elem, tuple(keys), len(distilled_params) > 1 if key in self._execution_options['compiled_cache']: compiled_sql = self._execution_options['compiled_cache'][key] else: compiled_sql = elem.compile( dialect=dialect, column_keys=keys, inline=len(distilled_params) > 1) self._execution_options['compiled_cache'][key] = compiled_sql else: compiled_sql = elem.compile( dialect=dialect, column_keys=keys, inline=len(distilled_params) > 1) ret = self._execute_context( dialect, dialect.execution_ctx_cls._init_compiled, compiled_sql, distilled_params, compiled_sql, distilled_params ) if self._has_events: self.dispatch.after_execute(self, elem, multiparams, params, ret) return ret def _execute_compiled(self, compiled, multiparams, params): """Execute a sql.Compiled object.""" if self._has_events: for fn in self.dispatch.before_execute: compiled, multiparams, params = \ fn(self, compiled, multiparams, params) dialect = self.dialect parameters = _distill_params(multiparams, params) ret = self._execute_context( dialect, dialect.execution_ctx_cls._init_compiled, compiled, parameters, compiled, parameters ) if self._has_events: self.dispatch.after_execute(self, compiled, multiparams, params, ret) return ret def _execute_text(self, statement, multiparams, params): """Execute a string SQL statement.""" if self._has_events: for fn in self.dispatch.before_execute: statement, multiparams, params = \ fn(self, statement, multiparams, params) dialect = self.dialect parameters = _distill_params(multiparams, params) ret = self._execute_context( dialect, dialect.execution_ctx_cls._init_statement, statement, parameters, statement, parameters ) if self._has_events: self.dispatch.after_execute(self, statement, multiparams, params, ret) return ret def _execute_context(self, dialect, constructor, statement, parameters, *args): """Create an :class:`.ExecutionContext` and execute, returning a :class:`.ResultProxy`.""" try: try: conn = self.__connection except AttributeError: conn = self._revalidate_connection() context = constructor(dialect, self, conn, *args) except Exception, e: self._handle_dbapi_exception(e, util.text_type(statement), parameters, None, None) if context.compiled: context.pre_exec() cursor, statement, parameters = context.cursor, \ context.statement, \ context.parameters if not context.executemany: parameters = parameters[0] if self._has_events: for fn in self.dispatch.before_cursor_execute: statement, parameters = \ fn(self, cursor, statement, parameters, context, context.executemany) if self._echo: self.engine.logger.info(statement) self.engine.logger.info("%r", sql_util._repr_params(parameters, batches=10)) try: if context.executemany: self.dialect.do_executemany( cursor, statement, parameters, context) elif not parameters and context.no_parameters: self.dialect.do_execute_no_params( cursor, statement, context) else: self.dialect.do_execute( cursor, statement, parameters, context) except Exception, e: self._handle_dbapi_exception( e, statement, parameters, cursor, context) if self._has_events: self.dispatch.after_cursor_execute(self, cursor, statement, parameters, context, context.executemany) if context.compiled: context.post_exec() if context.isinsert and not context.executemany: context.post_insert() # create a resultproxy, get rowcount/implicit RETURNING # rows, close cursor if no further results pending result = context.get_result_proxy() if context.isinsert: if context._is_implicit_returning: context._fetch_implicit_returning(result) result.close(_autoclose_connection=False) result._metadata = None elif not context._is_explicit_returning: result.close(_autoclose_connection=False) result._metadata = None elif result._metadata is None: # no results, get rowcount # (which requires open cursor on some drivers # such as kintersbasdb, mxodbc), result.rowcount result.close(_autoclose_connection=False) if self.__transaction is None and context.should_autocommit: self._commit_impl(autocommit=True) if result.closed and self.should_close_with_result: self.close() return result def _cursor_execute(self, cursor, statement, parameters, context=None): """Execute a statement + params on the given cursor. Adds appropriate logging and exception handling. This method is used by DefaultDialect for special-case executions, such as for sequences and column defaults. The path of statement execution in the majority of cases terminates at _execute_context(). """ if self._has_events: for fn in self.dispatch.before_cursor_execute: statement, parameters = \ fn(self, cursor, statement, parameters, context, context.executemany if context is not None else False) if self._echo: self.engine.logger.info(statement) self.engine.logger.info("%r", parameters) try: self.dialect.do_execute( cursor, statement, parameters) except Exception, e: self._handle_dbapi_exception( e, statement, parameters, cursor, None) def _safe_close_cursor(self, cursor): """Close the given cursor, catching exceptions and turning into log warnings. """ try: cursor.close() except Exception, e: try: ex_text = str(e) except TypeError: ex_text = repr(e) if not self.closed: self.connection._logger.warn( "Error closing cursor: %s", ex_text) if isinstance(e, (SystemExit, KeyboardInterrupt)): raise _reentrant_error = False _is_disconnect = False def _handle_dbapi_exception(self, e, statement, parameters, cursor, context): exc_info = sys.exc_info() if not self._is_disconnect: self._is_disconnect = isinstance(e, self.dialect.dbapi.Error) and \ not self.closed and \ self.dialect.is_disconnect(e, self.__connection, cursor) if self._reentrant_error: util.raise_from_cause( exc.DBAPIError.instance(statement, parameters, e, self.dialect.dbapi.Error), exc_info ) self._reentrant_error = True try: # non-DBAPI error - if we already got a context, # or theres no string statement, don't wrap it should_wrap = isinstance(e, self.dialect.dbapi.Error) or \ (statement is not None and context is None) if should_wrap and context: if self._has_events: self.dispatch.dbapi_error(self, cursor, statement, parameters, context, e) context.handle_dbapi_exception(e) if not self._is_disconnect: if cursor: self._safe_close_cursor(cursor) self._autorollback() if should_wrap: util.raise_from_cause( exc.DBAPIError.instance( statement, parameters, e, self.dialect.dbapi.Error, connection_invalidated=self._is_disconnect), exc_info ) util.reraise(*exc_info) finally: del self._reentrant_error if self._is_disconnect: del self._is_disconnect dbapi_conn_wrapper = self.connection self.invalidate(e) if not hasattr(dbapi_conn_wrapper, '_pool') or \ dbapi_conn_wrapper._pool is self.engine.pool: self.engine.dispose() if self.should_close_with_result: self.close() # poor man's multimethod/generic function thingy executors = { expression.FunctionElement: _execute_function, expression.ClauseElement: _execute_clauseelement, Compiled: _execute_compiled, schema.SchemaItem: _execute_default, schema.DDLElement: _execute_ddl, basestring: _execute_text } def default_schema_name(self): return self.engine.dialect.get_default_schema_name(self) def transaction(self, callable_, *args, **kwargs): """Execute the given function within a transaction boundary. The function is passed this :class:`.Connection` as the first argument, followed by the given \*args and \**kwargs, e.g.:: def do_something(conn, x, y): conn.execute("some statement", {'x':x, 'y':y}) conn.transaction(do_something, 5, 10) The operations inside the function are all invoked within the context of a single :class:`.Transaction`. Upon success, the transaction is committed. If an exception is raised, the transaction is rolled back before propagating the exception. .. note:: The :meth:`.transaction` method is superseded by the usage of the Python ``with:`` statement, which can be used with :meth:`.Connection.begin`:: with conn.begin(): conn.execute("some statement", {'x':5, 'y':10}) As well as with :meth:`.Engine.begin`:: with engine.begin() as conn: conn.execute("some statement", {'x':5, 'y':10}) See also: :meth:`.Engine.begin` - engine-level transactional context :meth:`.Engine.transaction` - engine-level version of :meth:`.Connection.transaction` """ trans = self.begin() try: ret = self.run_callable(callable_, *args, **kwargs) trans.commit() return ret except: with util.safe_reraise(): trans.rollback() def run_callable(self, callable_, *args, **kwargs): """Given a callable object or function, execute it, passing a :class:`.Connection` as the first argument. The given \*args and \**kwargs are passed subsequent to the :class:`.Connection` argument. This function, along with :meth:`.Engine.run_callable`, allows a function to be run with a :class:`.Connection` or :class:`.Engine` object without the need to know which one is being dealt with. """ return callable_(self, *args, **kwargs) def _run_visitor(self, visitorcallable, element, **kwargs): visitorcallable(self.dialect, self, **kwargs).traverse_single(element) class Transaction(object): """Represent a database transaction in progress. The :class:`.Transaction` object is procured by calling the :meth:`~.Connection.begin` method of :class:`.Connection`:: from sqlalchemy import create_engine engine = create_engine("postgresql://scott:tiger@localhost/test") connection = engine.connect() trans = connection.begin() connection.execute("insert into x (a, b) values (1, 2)") trans.commit() The object provides :meth:`.rollback` and :meth:`.commit` methods in order to control transaction boundaries. It also implements a context manager interface so that the Python ``with`` statement can be used with the :meth:`.Connection.begin` method:: with connection.begin(): connection.execute("insert into x (a, b) values (1, 2)") The Transaction object is **not** threadsafe. See also: :meth:`.Connection.begin`, :meth:`.Connection.begin_twophase`, :meth:`.Connection.begin_nested`. .. index:: single: thread safety; Transaction """ def __init__(self, connection, parent): self.connection = connection self._parent = parent or self self.is_active = True def close(self): """Close this :class:`.Transaction`. If this transaction is the base transaction in a begin/commit nesting, the transaction will rollback(). Otherwise, the method returns. This is used to cancel a Transaction without affecting the scope of an enclosing transaction. """ if not self._parent.is_active: return if self._parent is self: self.rollback() def rollback(self): """Roll back this :class:`.Transaction`. """ if not self._parent.is_active: return self._do_rollback() self.is_active = False def _do_rollback(self): self._parent.rollback() def commit(self): """Commit this :class:`.Transaction`.""" if not self._parent.is_active: raise exc.InvalidRequestError("This transaction is inactive") self._do_commit() self.is_active = False def _do_commit(self): pass def __enter__(self): return self def __exit__(self, type, value, traceback): if type is None and self.is_active: try: self.commit() except: with util.safe_reraise(): self.rollback() else: self.rollback() class RootTransaction(Transaction): def __init__(self, connection): super(RootTransaction, self).__init__(connection, None) self.connection._begin_impl() def _do_rollback(self): if self.is_active: self.connection._rollback_impl() def _do_commit(self): if self.is_active: self.connection._commit_impl() class NestedTransaction(Transaction): """Represent a 'nested', or SAVEPOINT transaction. A new :class:`.NestedTransaction` object may be procured using the :meth:`.Connection.begin_nested` method. The interface is the same as that of :class:`.Transaction`. """ def __init__(self, connection, parent): super(NestedTransaction, self).__init__(connection, parent) self._savepoint = self.connection._savepoint_impl() def _do_rollback(self): if self.is_active: self.connection._rollback_to_savepoint_impl( self._savepoint, self._parent) def _do_commit(self): if self.is_active: self.connection._release_savepoint_impl( self._savepoint, self._parent) class TwoPhaseTransaction(Transaction): """Represent a two-phase transaction. A new :class:`.TwoPhaseTransaction` object may be procured using the :meth:`.Connection.begin_twophase` method. The interface is the same as that of :class:`.Transaction` with the addition of the :meth:`prepare` method. """ def __init__(self, connection, xid): super(TwoPhaseTransaction, self).__init__(connection, None) self._is_prepared = False self.xid = xid self.connection._begin_twophase_impl(self.xid) def prepare(self): """Prepare this :class:`.TwoPhaseTransaction`. After a PREPARE, the transaction can be committed. """ if not self._parent.is_active: raise exc.InvalidRequestError("This transaction is inactive") self.connection._prepare_twophase_impl(self.xid) self._is_prepared = True def _do_rollback(self): self.connection._rollback_twophase_impl(self.xid, self._is_prepared) def _do_commit(self): self.connection._commit_twophase_impl(self.xid, self._is_prepared) class Engine(Connectable, log.Identified): """ Connects a :class:`~sqlalchemy.pool.Pool` and :class:`~sqlalchemy.engine.interfaces.Dialect` together to provide a source of database connectivity and behavior. An :class:`.Engine` object is instantiated publicly using the :func:`~sqlalchemy.create_engine` function. See also: :doc:`/core/engines` :ref:`connections_toplevel` """ _execution_options = util.immutabledict() _has_events = False _connection_cls = Connection def __init__(self, pool, dialect, url, logging_name=None, echo=None, proxy=None, execution_options=None ): self.pool = pool self.url = url self.dialect = dialect self.pool._dialect = dialect if logging_name: self.logging_name = logging_name self.echo = echo self.engine = self log.instance_logger(self, echoflag=echo) if proxy: interfaces.ConnectionProxy._adapt_listener(self, proxy) if execution_options: self.update_execution_options(**execution_options) def update_execution_options(self, **opt): """Update the default execution_options dictionary of this :class:`.Engine`. The given keys/values in \**opt are added to the default execution options that will be used for all connections. The initial contents of this dictionary can be sent via the ``execution_options`` parameter to :func:`.create_engine`. .. seealso:: :meth:`.Connection.execution_options` :meth:`.Engine.execution_options` """ if 'isolation_level' in opt: raise exc.ArgumentError( "'isolation_level' execution option may " "only be specified on Connection.execution_options(). " "To set engine-wide isolation level, " "use the isolation_level argument to create_engine()." ) self._execution_options = \ self._execution_options.union(opt) def execution_options(self, **opt): """Return a new :class:`.Engine` that will provide :class:`.Connection` objects with the given execution options. The returned :class:`.Engine` remains related to the original :class:`.Engine` in that it shares the same connection pool and other state: * The :class:`.Pool` used by the new :class:`.Engine` is the same instance. The :meth:`.Engine.dispose` method will replace the connection pool instance for the parent engine as well as this one. * Event listeners are "cascaded" - meaning, the new :class:`.Engine` inherits the events of the parent, and new events can be associated with the new :class:`.Engine` individually. * The logging configuration and logging_name is copied from the parent :class:`.Engine`. The intent of the :meth:`.Engine.execution_options` method is to implement "sharding" schemes where multiple :class:`.Engine` objects refer to the same connection pool, but are differentiated by options that would be consumed by a custom event:: primary_engine = create_engine("mysql://") shard1 = primary_engine.execution_options(shard_id="shard1") shard2 = primary_engine.execution_options(shard_id="shard2") Above, the ``shard1`` engine serves as a factory for :class:`.Connection` objects that will contain the execution option ``shard_id=shard1``, and ``shard2`` will produce :class:`.Connection` objects that contain the execution option ``shard_id=shard2``. An event handler can consume the above execution option to perform a schema switch or other operation, given a connection. Below we emit a MySQL ``use`` statement to switch databases, at the same time keeping track of which database we've established using the :attr:`.Connection.info` dictionary, which gives us a persistent storage space that follows the DBAPI connection:: from sqlalchemy import event from sqlalchemy.engine import Engine shards = {"default": "base", shard_1: "db1", "shard_2": "db2"} @event.listens_for(Engine, "before_cursor_execute") def _switch_shard(conn, cursor, stmt, params, context, executemany): shard_id = conn._execution_options.get('shard_id', "default") current_shard = conn.info.get("current_shard", None) if current_shard != shard_id: cursor.execute("use %s" % shards[shard_id]) conn.info["current_shard"] = shard_id .. versionadded:: 0.8 .. seealso:: :meth:`.Connection.execution_options` - update execution options on a :class:`.Connection` object. :meth:`.Engine.update_execution_options` - update the execution options for a given :class:`.Engine` in place. """ return OptionEngine(self, opt) @property def name(self): """String name of the :class:`~sqlalchemy.engine.interfaces.Dialect` in use by this :class:`Engine`.""" return self.dialect.name @property def driver(self): """Driver name of the :class:`~sqlalchemy.engine.interfaces.Dialect` in use by this :class:`Engine`.""" return self.dialect.driver echo = log.echo_property() def __repr__(self): return 'Engine(%r)' % self.url def dispose(self): """Dispose of the connection pool used by this :class:`.Engine`. A new connection pool is created immediately after the old one has been disposed. This new pool, like all SQLAlchemy connection pools, does not make any actual connections to the database until one is first requested. This method has two general use cases: * When a dropped connection is detected, it is assumed that all connections held by the pool are potentially dropped, and the entire pool is replaced. * An application may want to use :meth:`dispose` within a test suite that is creating multiple engines. It is critical to note that :meth:`dispose` does **not** guarantee that the application will release all open database connections - only those connections that are checked into the pool are closed. Connections which remain checked out or have been detached from the engine are not affected. """ self.pool = self.pool._replace() def _execute_default(self, default): with self.contextual_connect() as conn: return conn._execute_default(default, (), {}) @contextlib.contextmanager def _optional_conn_ctx_manager(self, connection=None): if connection is None: with self.contextual_connect() as conn: yield conn else: yield connection def _run_visitor(self, visitorcallable, element, connection=None, **kwargs): with self._optional_conn_ctx_manager(connection) as conn: conn._run_visitor(visitorcallable, element, **kwargs) class _trans_ctx(object): def __init__(self, conn, transaction, close_with_result): self.conn = conn self.transaction = transaction self.close_with_result = close_with_result def __enter__(self): return self.conn def __exit__(self, type, value, traceback): if type is not None: self.transaction.rollback() else: self.transaction.commit() if not self.close_with_result: self.conn.close() def begin(self, close_with_result=False): """Return a context manager delivering a :class:`.Connection` with a :class:`.Transaction` established. E.g.:: with engine.begin() as conn: conn.execute("insert into table (x, y, z) values (1, 2, 3)") conn.execute("my_special_procedure(5)") Upon successful operation, the :class:`.Transaction` is committed. If an error is raised, the :class:`.Transaction` is rolled back. The ``close_with_result`` flag is normally ``False``, and indicates that the :class:`.Connection` will be closed when the operation is complete. When set to ``True``, it indicates the :class:`.Connection` is in "single use" mode, where the :class:`.ResultProxy` returned by the first call to :meth:`.Connection.execute` will close the :class:`.Connection` when that :class:`.ResultProxy` has exhausted all result rows. .. versionadded:: 0.7.6 See also: :meth:`.Engine.connect` - procure a :class:`.Connection` from an :class:`.Engine`. :meth:`.Connection.begin` - start a :class:`.Transaction` for a particular :class:`.Connection`. """ conn = self.contextual_connect(close_with_result=close_with_result) try: trans = conn.begin() except: with util.safe_reraise(): conn.close() return Engine._trans_ctx(conn, trans, close_with_result) def transaction(self, callable_, *args, **kwargs): """Execute the given function within a transaction boundary. The function is passed a :class:`.Connection` newly procured from :meth:`.Engine.contextual_connect` as the first argument, followed by the given \*args and \**kwargs. e.g.:: def do_something(conn, x, y): conn.execute("some statement", {'x':x, 'y':y}) engine.transaction(do_something, 5, 10) The operations inside the function are all invoked within the context of a single :class:`.Transaction`. Upon success, the transaction is committed. If an exception is raised, the transaction is rolled back before propagating the exception. .. note:: The :meth:`.transaction` method is superseded by the usage of the Python ``with:`` statement, which can be used with :meth:`.Engine.begin`:: with engine.begin() as conn: conn.execute("some statement", {'x':5, 'y':10}) See also: :meth:`.Engine.begin` - engine-level transactional context :meth:`.Connection.transaction` - connection-level version of :meth:`.Engine.transaction` """ with self.contextual_connect() as conn: return conn.transaction(callable_, *args, **kwargs) def run_callable(self, callable_, *args, **kwargs): """Given a callable object or function, execute it, passing a :class:`.Connection` as the first argument. The given \*args and \**kwargs are passed subsequent to the :class:`.Connection` argument. This function, along with :meth:`.Connection.run_callable`, allows a function to be run with a :class:`.Connection` or :class:`.Engine` object without the need to know which one is being dealt with. """ with self.contextual_connect() as conn: return conn.run_callable(callable_, *args, **kwargs) def execute(self, statement, *multiparams, **params): """Executes the given construct and returns a :class:`.ResultProxy`. The arguments are the same as those used by :meth:`.Connection.execute`. Here, a :class:`.Connection` is acquired using the :meth:`~.Engine.contextual_connect` method, and the statement executed with that connection. The returned :class:`.ResultProxy` is flagged such that when the :class:`.ResultProxy` is exhausted and its underlying cursor is closed, the :class:`.Connection` created here will also be closed, which allows its associated DBAPI connection resource to be returned to the connection pool. """ connection = self.contextual_connect(close_with_result=True) return connection.execute(statement, *multiparams, **params) def scalar(self, statement, *multiparams, **params): return self.execute(statement, *multiparams, **params).scalar() def _execute_clauseelement(self, elem, multiparams=None, params=None): connection = self.contextual_connect(close_with_result=True) return connection._execute_clauseelement(elem, multiparams, params) def _execute_compiled(self, compiled, multiparams, params): connection = self.contextual_connect(close_with_result=True) return connection._execute_compiled(compiled, multiparams, params) def connect(self, **kwargs): """Return a new :class:`.Connection` object. The :class:`.Connection` object is a facade that uses a DBAPI connection internally in order to communicate with the database. This connection is procured from the connection-holding :class:`.Pool` referenced by this :class:`.Engine`. When the :meth:`~.Connection.close` method of the :class:`.Connection` object is called, the underlying DBAPI connection is then returned to the connection pool, where it may be used again in a subsequent call to :meth:`~.Engine.connect`. """ return self._connection_cls(self, **kwargs) def contextual_connect(self, close_with_result=False, **kwargs): """Return a :class:`.Connection` object which may be part of some ongoing context. By default, this method does the same thing as :meth:`.Engine.connect`. Subclasses of :class:`.Engine` may override this method to provide contextual behavior. :param close_with_result: When True, the first :class:`.ResultProxy` created by the :class:`.Connection` will call the :meth:`.Connection.close` method of that connection as soon as any pending result rows are exhausted. This is used to supply the "connectionless execution" behavior provided by the :meth:`.Engine.execute` method. """ return self._connection_cls(self, self.pool.connect(), close_with_result=close_with_result, **kwargs) def table_names(self, schema=None, connection=None): """Return a list of all table names available in the database. :param schema: Optional, retrieve names from a non-default schema. :param connection: Optional, use a specified connection. Default is the ``contextual_connect`` for this ``Engine``. """ with self._optional_conn_ctx_manager(connection) as conn: if not schema: schema = self.dialect.default_schema_name return self.dialect.get_table_names(conn, schema) def has_table(self, table_name, schema=None): return self.run_callable(self.dialect.has_table, table_name, schema) def raw_connection(self): """Return a "raw" DBAPI connection from the connection pool. The returned object is a proxied version of the DBAPI connection object used by the underlying driver in use. The object will have all the same behavior as the real DBAPI connection, except that its ``close()`` method will result in the connection being returned to the pool, rather than being closed for real. This method provides direct DBAPI connection access for special situations. In most situations, the :class:`.Connection` object should be used, which is procured using the :meth:`.Engine.connect` method. """ return self.pool.unique_connection() class OptionEngine(Engine): def __init__(self, proxied, execution_options): self._proxied = proxied self.url = proxied.url self.dialect = proxied.dialect self.logging_name = proxied.logging_name self.echo = proxied.echo log.instance_logger(self, echoflag=self.echo) self.dispatch = self.dispatch._join(proxied.dispatch) self._execution_options = proxied._execution_options self.update_execution_options(**execution_options) def _get_pool(self): return self._proxied.pool def _set_pool(self, pool): self._proxied.pool = pool pool = property(_get_pool, _set_pool) def _get_has_events(self): return self._proxied._has_events or \ self.__dict__.get('_has_events', False) def _set_has_events(self, value): self.__dict__['_has_events'] = value _has_events = property(_get_has_events, _set_has_events) SQLAlchemy-0.8.4/lib/sqlalchemy/engine/ddl.py0000644000076500000240000001557412251150015021527 0ustar classicstaff00000000000000# engine/ddl.py # Copyright (C) 2009-2013 the SQLAlchemy authors and contributors # # This module is part of SQLAlchemy and is released under # the MIT License: http://www.opensource.org/licenses/mit-license.php """Routines to handle CREATE/DROP workflow.""" from .. import schema from ..sql import util as sql_util class DDLBase(schema.SchemaVisitor): def __init__(self, connection): self.connection = connection class SchemaGenerator(DDLBase): def __init__(self, dialect, connection, checkfirst=False, tables=None, **kwargs): super(SchemaGenerator, self).__init__(connection, **kwargs) self.checkfirst = checkfirst self.tables = tables self.preparer = dialect.identifier_preparer self.dialect = dialect self.memo = {} def _can_create_table(self, table): self.dialect.validate_identifier(table.name) if table.schema: self.dialect.validate_identifier(table.schema) return not self.checkfirst or \ not self.dialect.has_table(self.connection, table.name, schema=table.schema) def _can_create_sequence(self, sequence): return self.dialect.supports_sequences and \ ( (not self.dialect.sequences_optional or not sequence.optional) and ( not self.checkfirst or not self.dialect.has_sequence( self.connection, sequence.name, schema=sequence.schema) ) ) def visit_metadata(self, metadata): if self.tables is not None: tables = self.tables else: tables = metadata.tables.values() collection = [t for t in sql_util.sort_tables(tables) if self._can_create_table(t)] seq_coll = [s for s in metadata._sequences.values() if s.column is None and self._can_create_sequence(s)] metadata.dispatch.before_create(metadata, self.connection, tables=collection, checkfirst=self.checkfirst, _ddl_runner=self) for seq in seq_coll: self.traverse_single(seq, create_ok=True) for table in collection: self.traverse_single(table, create_ok=True) metadata.dispatch.after_create(metadata, self.connection, tables=collection, checkfirst=self.checkfirst, _ddl_runner=self) def visit_table(self, table, create_ok=False): if not create_ok and not self._can_create_table(table): return table.dispatch.before_create(table, self.connection, checkfirst=self.checkfirst, _ddl_runner=self) for column in table.columns: if column.default is not None: self.traverse_single(column.default) self.connection.execute(schema.CreateTable(table)) if hasattr(table, 'indexes'): for index in table.indexes: self.traverse_single(index) table.dispatch.after_create(table, self.connection, checkfirst=self.checkfirst, _ddl_runner=self) def visit_sequence(self, sequence, create_ok=False): if not create_ok and not self._can_create_sequence(sequence): return self.connection.execute(schema.CreateSequence(sequence)) def visit_index(self, index): self.connection.execute(schema.CreateIndex(index)) class SchemaDropper(DDLBase): def __init__(self, dialect, connection, checkfirst=False, tables=None, **kwargs): super(SchemaDropper, self).__init__(connection, **kwargs) self.checkfirst = checkfirst self.tables = tables self.preparer = dialect.identifier_preparer self.dialect = dialect self.memo = {} def visit_metadata(self, metadata): if self.tables is not None: tables = self.tables else: tables = metadata.tables.values() collection = [ t for t in reversed(sql_util.sort_tables(tables)) if self._can_drop_table(t) ] seq_coll = [ s for s in metadata._sequences.values() if s.column is None and self._can_drop_sequence(s) ] metadata.dispatch.before_drop( metadata, self.connection, tables=collection, checkfirst=self.checkfirst, _ddl_runner=self) for table in collection: self.traverse_single(table, drop_ok=True) for seq in seq_coll: self.traverse_single(seq, drop_ok=True) metadata.dispatch.after_drop( metadata, self.connection, tables=collection, checkfirst=self.checkfirst, _ddl_runner=self) def _can_drop_table(self, table): self.dialect.validate_identifier(table.name) if table.schema: self.dialect.validate_identifier(table.schema) return not self.checkfirst or self.dialect.has_table(self.connection, table.name, schema=table.schema) def _can_drop_sequence(self, sequence): return self.dialect.supports_sequences and \ ((not self.dialect.sequences_optional or not sequence.optional) and (not self.checkfirst or self.dialect.has_sequence( self.connection, sequence.name, schema=sequence.schema)) ) def visit_index(self, index): self.connection.execute(schema.DropIndex(index)) def visit_table(self, table, drop_ok=False): if not drop_ok and not self._can_drop_table(table): return table.dispatch.before_drop(table, self.connection, checkfirst=self.checkfirst, _ddl_runner=self) for column in table.columns: if column.default is not None: self.traverse_single(column.default) self.connection.execute(schema.DropTable(table)) table.dispatch.after_drop(table, self.connection, checkfirst=self.checkfirst, _ddl_runner=self) def visit_sequence(self, sequence, drop_ok=False): if not drop_ok and not self._can_drop_sequence(sequence): return self.connection.execute(schema.DropSequence(sequence)) SQLAlchemy-0.8.4/lib/sqlalchemy/engine/default.py0000644000076500000240000007313512251150015022405 0ustar classicstaff00000000000000# engine/default.py # Copyright (C) 2005-2013 the SQLAlchemy authors and contributors # # This module is part of SQLAlchemy and is released under # the MIT License: http://www.opensource.org/licenses/mit-license.php """Default implementations of per-dialect sqlalchemy.engine classes. These are semi-private implementation classes which are only of importance to database dialect authors; dialects will usually use the classes here as the base class for their own corresponding classes. """ import re import random from . import reflection, interfaces, result from ..sql import compiler, expression from .. import exc, types as sqltypes, util, pool, processors import codecs import weakref AUTOCOMMIT_REGEXP = re.compile( r'\s*(?:UPDATE|INSERT|CREATE|DELETE|DROP|ALTER)', re.I | re.UNICODE) class DefaultDialect(interfaces.Dialect): """Default implementation of Dialect""" statement_compiler = compiler.SQLCompiler ddl_compiler = compiler.DDLCompiler type_compiler = compiler.GenericTypeCompiler preparer = compiler.IdentifierPreparer supports_alter = True # the first value we'd get for an autoincrement # column. default_sequence_base = 1 # most DBAPIs happy with this for execute(). # not cx_oracle. execute_sequence_format = tuple supports_views = True supports_sequences = False sequences_optional = False preexecute_autoincrement_sequences = False postfetch_lastrowid = True implicit_returning = False supports_native_enum = False supports_native_boolean = False # if the NUMERIC type # returns decimal.Decimal. # *not* the FLOAT type however. supports_native_decimal = False # Py3K #supports_unicode_statements = True #supports_unicode_binds = True #returns_unicode_strings = True #description_encoding = None # Py2K supports_unicode_statements = False supports_unicode_binds = False returns_unicode_strings = False description_encoding = 'use_encoding' # end Py2K name = 'default' # length at which to truncate # any identifier. max_identifier_length = 9999 # length at which to truncate # the name of an index. # Usually None to indicate # 'use max_identifier_length'. # thanks to MySQL, sigh max_index_name_length = None supports_sane_rowcount = True supports_sane_multi_rowcount = True dbapi_type_map = {} colspecs = {} default_paramstyle = 'named' supports_default_values = False supports_empty_insert = True supports_multivalues_insert = False server_version_info = None # indicates symbol names are # UPPERCASEd if they are case insensitive # within the database. # if this is True, the methods normalize_name() # and denormalize_name() must be provided. requires_name_normalize = False reflection_options = () def __init__(self, convert_unicode=False, encoding='utf-8', paramstyle=None, dbapi=None, implicit_returning=None, case_sensitive=True, label_length=None, **kwargs): if not getattr(self, 'ported_sqla_06', True): util.warn( "The %s dialect is not yet ported to the 0.6 format" % self.name) self.convert_unicode = convert_unicode self.encoding = encoding self.positional = False self._ischema = None self.dbapi = dbapi if paramstyle is not None: self.paramstyle = paramstyle elif self.dbapi is not None: self.paramstyle = self.dbapi.paramstyle else: self.paramstyle = self.default_paramstyle if implicit_returning is not None: self.implicit_returning = implicit_returning self.positional = self.paramstyle in ('qmark', 'format', 'numeric') self.identifier_preparer = self.preparer(self) self.type_compiler = self.type_compiler(self) self.case_sensitive = case_sensitive if label_length and label_length > self.max_identifier_length: raise exc.ArgumentError( "Label length of %d is greater than this dialect's" " maximum identifier length of %d" % (label_length, self.max_identifier_length)) self.label_length = label_length if self.description_encoding == 'use_encoding': self._description_decoder = \ processors.to_unicode_processor_factory( encoding ) elif self.description_encoding is not None: self._description_decoder = \ processors.to_unicode_processor_factory( self.description_encoding ) self._encoder = codecs.getencoder(self.encoding) self._decoder = processors.to_unicode_processor_factory(self.encoding) @util.memoized_property def _type_memos(self): return weakref.WeakKeyDictionary() @property def dialect_description(self): return self.name + "+" + self.driver @classmethod def get_pool_class(cls, url): return getattr(cls, 'poolclass', pool.QueuePool) def initialize(self, connection): try: self.server_version_info = \ self._get_server_version_info(connection) except NotImplementedError: self.server_version_info = None try: self.default_schema_name = \ self._get_default_schema_name(connection) except NotImplementedError: self.default_schema_name = None try: self.default_isolation_level = \ self.get_isolation_level(connection.connection) except NotImplementedError: self.default_isolation_level = None self.returns_unicode_strings = self._check_unicode_returns(connection) self.do_rollback(connection.connection) def on_connect(self): """return a callable which sets up a newly created DBAPI connection. This is used to set dialect-wide per-connection options such as isolation modes, unicode modes, etc. If a callable is returned, it will be assembled into a pool listener that receives the direct DBAPI connection, with all wrappers removed. If None is returned, no listener will be generated. """ return None def _check_unicode_returns(self, connection): # Py2K if self.supports_unicode_statements: cast_to = unicode else: cast_to = str # end Py2K # Py3K #cast_to = str def check_unicode(formatstr, type_): cursor = connection.connection.cursor() try: try: cursor.execute( cast_to( expression.select( [expression.cast( expression.literal_column( "'test %s returns'" % formatstr), type_) ]).compile(dialect=self) ) ) row = cursor.fetchone() return isinstance(row[0], unicode) except self.dbapi.Error, de: util.warn("Exception attempting to " "detect unicode returns: %r" % de) return False finally: cursor.close() # detect plain VARCHAR unicode_for_varchar = check_unicode("plain", sqltypes.VARCHAR(60)) # detect if there's an NVARCHAR type with different behavior available unicode_for_unicode = check_unicode("unicode", sqltypes.Unicode(60)) if unicode_for_unicode and not unicode_for_varchar: return "conditional" else: return unicode_for_varchar def type_descriptor(self, typeobj): """Provide a database-specific :class:`.TypeEngine` object, given the generic object which comes from the types module. This method looks for a dictionary called ``colspecs`` as a class or instance-level variable, and passes on to :func:`.types.adapt_type`. """ return sqltypes.adapt_type(typeobj, self.colspecs) def reflecttable(self, connection, table, include_columns, exclude_columns=None): insp = reflection.Inspector.from_engine(connection) return insp.reflecttable(table, include_columns, exclude_columns) def get_pk_constraint(self, conn, table_name, schema=None, **kw): """Compatibility method, adapts the result of get_primary_keys() for those dialects which don't implement get_pk_constraint(). """ return { 'constrained_columns': self.get_primary_keys(conn, table_name, schema=schema, **kw) } def validate_identifier(self, ident): if len(ident) > self.max_identifier_length: raise exc.IdentifierError( "Identifier '%s' exceeds maximum length of %d characters" % (ident, self.max_identifier_length) ) def connect(self, *cargs, **cparams): return self.dbapi.connect(*cargs, **cparams) def create_connect_args(self, url): opts = url.translate_connect_args() opts.update(url.query) return [[], opts] def do_begin(self, dbapi_connection): pass def do_rollback(self, dbapi_connection): dbapi_connection.rollback() def do_commit(self, dbapi_connection): dbapi_connection.commit() def do_close(self, dbapi_connection): dbapi_connection.close() def create_xid(self): """Create a random two-phase transaction ID. This id will be passed to do_begin_twophase(), do_rollback_twophase(), do_commit_twophase(). Its format is unspecified. """ return "_sa_%032x" % random.randint(0, 2 ** 128) def do_savepoint(self, connection, name): connection.execute(expression.SavepointClause(name)) def do_rollback_to_savepoint(self, connection, name): connection.execute(expression.RollbackToSavepointClause(name)) def do_release_savepoint(self, connection, name): connection.execute(expression.ReleaseSavepointClause(name)) def do_executemany(self, cursor, statement, parameters, context=None): cursor.executemany(statement, parameters) def do_execute(self, cursor, statement, parameters, context=None): cursor.execute(statement, parameters) def do_execute_no_params(self, cursor, statement, context=None): cursor.execute(statement) def is_disconnect(self, e, connection, cursor): return False def reset_isolation_level(self, dbapi_conn): # default_isolation_level is read from the first connection # after the initial set of 'isolation_level', if any, so is # the configured default of this dialect. self.set_isolation_level(dbapi_conn, self.default_isolation_level) class DefaultExecutionContext(interfaces.ExecutionContext): isinsert = False isupdate = False isdelete = False isddl = False executemany = False result_map = None compiled = None statement = None postfetch_cols = None prefetch_cols = None _is_implicit_returning = False _is_explicit_returning = False # a hook for SQLite's translation of # result column names _translate_colname = None @classmethod def _init_ddl(cls, dialect, connection, dbapi_connection, compiled_ddl): """Initialize execution context for a DDLElement construct.""" self = cls.__new__(cls) self.dialect = dialect self.root_connection = connection self._dbapi_connection = dbapi_connection self.engine = connection.engine self.compiled = compiled = compiled_ddl self.isddl = True self.execution_options = compiled.statement._execution_options if connection._execution_options: self.execution_options = dict(self.execution_options) self.execution_options.update(connection._execution_options) if not dialect.supports_unicode_statements: self.unicode_statement = unicode(compiled) self.statement = dialect._encoder(self.unicode_statement)[0] else: self.statement = self.unicode_statement = unicode(compiled) self.cursor = self.create_cursor() self.compiled_parameters = [] if dialect.positional: self.parameters = [dialect.execute_sequence_format()] else: self.parameters = [{}] return self @classmethod def _init_compiled(cls, dialect, connection, dbapi_connection, compiled, parameters): """Initialize execution context for a Compiled construct.""" self = cls.__new__(cls) self.dialect = dialect self.root_connection = connection self._dbapi_connection = dbapi_connection self.engine = connection.engine self.compiled = compiled if not compiled.can_execute: raise exc.ArgumentError("Not an executable clause") self.execution_options = compiled.statement._execution_options if connection._execution_options: self.execution_options = dict(self.execution_options) self.execution_options.update(connection._execution_options) # compiled clauseelement. process bind params, process table defaults, # track collections used by ResultProxy to target and process results self.result_map = compiled.result_map self.unicode_statement = unicode(compiled) if not dialect.supports_unicode_statements: self.statement = self.unicode_statement.encode( self.dialect.encoding) else: self.statement = self.unicode_statement self.isinsert = compiled.isinsert self.isupdate = compiled.isupdate self.isdelete = compiled.isdelete if self.isinsert or self.isupdate or self.isdelete: self._is_explicit_returning = bool(compiled.statement._returning) self._is_implicit_returning = bool(compiled.returning and \ not compiled.statement._returning) if not parameters: self.compiled_parameters = [compiled.construct_params()] else: self.compiled_parameters = \ [compiled.construct_params(m, _group_number=grp) for grp, m in enumerate(parameters)] self.executemany = len(parameters) > 1 self.cursor = self.create_cursor() if self.isinsert or self.isupdate: self.postfetch_cols = self.compiled.postfetch self.prefetch_cols = self.compiled.prefetch self.__process_defaults() processors = compiled._bind_processors # Convert the dictionary of bind parameter values # into a dict or list to be sent to the DBAPI's # execute() or executemany() method. parameters = [] if dialect.positional: for compiled_params in self.compiled_parameters: param = [] for key in self.compiled.positiontup: if key in processors: param.append(processors[key](compiled_params[key])) else: param.append(compiled_params[key]) parameters.append(dialect.execute_sequence_format(param)) else: encode = not dialect.supports_unicode_statements for compiled_params in self.compiled_parameters: param = {} if encode: for key in compiled_params: if key in processors: param[dialect._encoder(key)[0]] = \ processors[key](compiled_params[key]) else: param[dialect._encoder(key)[0]] = \ compiled_params[key] else: for key in compiled_params: if key in processors: param[key] = processors[key](compiled_params[key]) else: param[key] = compiled_params[key] parameters.append(param) self.parameters = dialect.execute_sequence_format(parameters) return self @classmethod def _init_statement(cls, dialect, connection, dbapi_connection, statement, parameters): """Initialize execution context for a string SQL statement.""" self = cls.__new__(cls) self.dialect = dialect self.root_connection = connection self._dbapi_connection = dbapi_connection self.engine = connection.engine # plain text statement self.execution_options = connection._execution_options if not parameters: if self.dialect.positional: self.parameters = [dialect.execute_sequence_format()] else: self.parameters = [{}] elif isinstance(parameters[0], dialect.execute_sequence_format): self.parameters = parameters elif isinstance(parameters[0], dict): if dialect.supports_unicode_statements: self.parameters = parameters else: self.parameters = [ dict((dialect._encoder(k)[0], d[k]) for k in d) for d in parameters ] or [{}] else: self.parameters = [dialect.execute_sequence_format(p) for p in parameters] self.executemany = len(parameters) > 1 if not dialect.supports_unicode_statements and \ isinstance(statement, unicode): self.unicode_statement = statement self.statement = dialect._encoder(statement)[0] else: self.statement = self.unicode_statement = statement self.cursor = self.create_cursor() return self @classmethod def _init_default(cls, dialect, connection, dbapi_connection): """Initialize execution context for a ColumnDefault construct.""" self = cls.__new__(cls) self.dialect = dialect self.root_connection = connection self._dbapi_connection = dbapi_connection self.engine = connection.engine self.execution_options = connection._execution_options self.cursor = self.create_cursor() return self @util.memoized_property def no_parameters(self): return self.execution_options.get("no_parameters", False) @util.memoized_property def is_crud(self): return self.isinsert or self.isupdate or self.isdelete @util.memoized_property def should_autocommit(self): autocommit = self.execution_options.get('autocommit', not self.compiled and self.statement and expression.PARSE_AUTOCOMMIT or False) if autocommit is expression.PARSE_AUTOCOMMIT: return self.should_autocommit_text(self.unicode_statement) else: return autocommit def _execute_scalar(self, stmt, type_): """Execute a string statement on the current cursor, returning a scalar result. Used to fire off sequences, default phrases, and "select lastrowid" types of statements individually or in the context of a parent INSERT or UPDATE statement. """ conn = self.root_connection if isinstance(stmt, unicode) and \ not self.dialect.supports_unicode_statements: stmt = self.dialect._encoder(stmt)[0] if self.dialect.positional: default_params = self.dialect.execute_sequence_format() else: default_params = {} conn._cursor_execute(self.cursor, stmt, default_params, context=self) r = self.cursor.fetchone()[0] if type_ is not None: # apply type post processors to the result proc = type_._cached_result_processor( self.dialect, self.cursor.description[0][1] ) if proc: return proc(r) return r @property def connection(self): return self.root_connection._branch() def should_autocommit_text(self, statement): return AUTOCOMMIT_REGEXP.match(statement) def create_cursor(self): return self._dbapi_connection.cursor() def pre_exec(self): pass def post_exec(self): pass def get_result_processor(self, type_, colname, coltype): """Return a 'result processor' for a given type as present in cursor.description. This has a default implementation that dialects can override for context-sensitive result type handling. """ return type_._cached_result_processor(self.dialect, coltype) def get_lastrowid(self): """return self.cursor.lastrowid, or equivalent, after an INSERT. This may involve calling special cursor functions, issuing a new SELECT on the cursor (or a new one), or returning a stored value that was calculated within post_exec(). This function will only be called for dialects which support "implicit" primary key generation, keep preexecute_autoincrement_sequences set to False, and when no explicit id value was bound to the statement. The function is called once, directly after post_exec() and before the transaction is committed or ResultProxy is generated. If the post_exec() method assigns a value to `self._lastrowid`, the value is used in place of calling get_lastrowid(). Note that this method is *not* equivalent to the ``lastrowid`` method on ``ResultProxy``, which is a direct proxy to the DBAPI ``lastrowid`` accessor in all cases. """ return self.cursor.lastrowid def handle_dbapi_exception(self, e): pass def get_result_proxy(self): return result.ResultProxy(self) @property def rowcount(self): return self.cursor.rowcount def supports_sane_rowcount(self): return self.dialect.supports_sane_rowcount def supports_sane_multi_rowcount(self): return self.dialect.supports_sane_multi_rowcount def post_insert(self): if not self._is_implicit_returning and \ not self._is_explicit_returning and \ not self.compiled.inline and \ self.dialect.postfetch_lastrowid and \ (not self.inserted_primary_key or \ None in self.inserted_primary_key): table = self.compiled.statement.table lastrowid = self.get_lastrowid() autoinc_col = table._autoincrement_column if autoinc_col is not None: # apply type post processors to the lastrowid proc = autoinc_col.type._cached_result_processor( self.dialect, None) if proc is not None: lastrowid = proc(lastrowid) self.inserted_primary_key = [ lastrowid if c is autoinc_col else v for c, v in zip( table.primary_key, self.inserted_primary_key) ] def _fetch_implicit_returning(self, resultproxy): table = self.compiled.statement.table row = resultproxy.fetchone() ipk = [] for c, v in zip(table.primary_key, self.inserted_primary_key): if v is not None: ipk.append(v) else: ipk.append(row[c]) self.inserted_primary_key = ipk def lastrow_has_defaults(self): return (self.isinsert or self.isupdate) and \ bool(self.postfetch_cols) def set_input_sizes(self, translate=None, exclude_types=None): """Given a cursor and ClauseParameters, call the appropriate style of ``setinputsizes()`` on the cursor, using DB-API types from the bind parameter's ``TypeEngine`` objects. This method only called by those dialects which require it, currently cx_oracle. """ if not hasattr(self.compiled, 'bind_names'): return types = dict( (self.compiled.bind_names[bindparam], bindparam.type) for bindparam in self.compiled.bind_names) if self.dialect.positional: inputsizes = [] for key in self.compiled.positiontup: typeengine = types[key] dbtype = typeengine.dialect_impl(self.dialect).\ get_dbapi_type(self.dialect.dbapi) if dbtype is not None and \ (not exclude_types or dbtype not in exclude_types): inputsizes.append(dbtype) try: self.cursor.setinputsizes(*inputsizes) except Exception, e: self.root_connection._handle_dbapi_exception( e, None, None, None, self) else: inputsizes = {} for key in self.compiled.bind_names.values(): typeengine = types[key] dbtype = typeengine.dialect_impl(self.dialect).\ get_dbapi_type(self.dialect.dbapi) if dbtype is not None and \ (not exclude_types or dbtype not in exclude_types): if translate: key = translate.get(key, key) if not self.dialect.supports_unicode_binds: key = self.dialect._encoder(key)[0] inputsizes[key] = dbtype try: self.cursor.setinputsizes(**inputsizes) except Exception, e: self.root_connection._handle_dbapi_exception( e, None, None, None, self) def _exec_default(self, default, type_): if default.is_sequence: return self.fire_sequence(default, type_) elif default.is_callable: return default.arg(self) elif default.is_clause_element: # TODO: expensive branching here should be # pulled into _exec_scalar() conn = self.connection c = expression.select([default.arg]).compile(bind=conn) return conn._execute_compiled(c, (), {}).scalar() else: return default.arg def get_insert_default(self, column): if column.default is None: return None else: return self._exec_default(column.default, column.type) def get_update_default(self, column): if column.onupdate is None: return None else: return self._exec_default(column.onupdate, column.type) def __process_defaults(self): """Generate default values for compiled insert/update statements, and generate inserted_primary_key collection. """ if self.executemany: if len(self.compiled.prefetch): scalar_defaults = {} # pre-determine scalar Python-side defaults # to avoid many calls of get_insert_default()/ # get_update_default() for c in self.prefetch_cols: if self.isinsert and c.default and c.default.is_scalar: scalar_defaults[c] = c.default.arg elif self.isupdate and c.onupdate and c.onupdate.is_scalar: scalar_defaults[c] = c.onupdate.arg for param in self.compiled_parameters: self.current_parameters = param for c in self.prefetch_cols: if c in scalar_defaults: val = scalar_defaults[c] elif self.isinsert: val = self.get_insert_default(c) else: val = self.get_update_default(c) if val is not None: param[c.key] = val del self.current_parameters else: self.current_parameters = compiled_parameters = \ self.compiled_parameters[0] for c in self.compiled.prefetch: if self.isinsert: val = self.get_insert_default(c) else: val = self.get_update_default(c) if val is not None: compiled_parameters[c.key] = val del self.current_parameters if self.isinsert: self.inserted_primary_key = [ self.compiled_parameters[0].get(c.key, None) for c in self.compiled.\ statement.table.primary_key ] DefaultDialect.execution_ctx_cls = DefaultExecutionContext SQLAlchemy-0.8.4/lib/sqlalchemy/engine/interfaces.py0000644000076500000240000007173012251150015023103 0ustar classicstaff00000000000000# engine/interfaces.py # Copyright (C) 2005-2013 the SQLAlchemy authors and contributors # # This module is part of SQLAlchemy and is released under # the MIT License: http://www.opensource.org/licenses/mit-license.php """Define core interfaces used by the engine system.""" from .. import util, event, events class Dialect(object): """Define the behavior of a specific database and DB-API combination. Any aspect of metadata definition, SQL query generation, execution, result-set handling, or anything else which varies between databases is defined under the general category of the Dialect. The Dialect acts as a factory for other database-specific object implementations including ExecutionContext, Compiled, DefaultGenerator, and TypeEngine. All Dialects implement the following attributes: name identifying name for the dialect from a DBAPI-neutral point of view (i.e. 'sqlite') driver identifying name for the dialect's DBAPI positional True if the paramstyle for this Dialect is positional. paramstyle the paramstyle to be used (some DB-APIs support multiple paramstyles). convert_unicode True if Unicode conversion should be applied to all ``str`` types. encoding type of encoding to use for unicode, usually defaults to 'utf-8'. statement_compiler a :class:`.Compiled` class used to compile SQL statements ddl_compiler a :class:`.Compiled` class used to compile DDL statements server_version_info a tuple containing a version number for the DB backend in use. This value is only available for supporting dialects, and is typically populated during the initial connection to the database. default_schema_name the name of the default schema. This value is only available for supporting dialects, and is typically populated during the initial connection to the database. execution_ctx_cls a :class:`.ExecutionContext` class used to handle statement execution execute_sequence_format either the 'tuple' or 'list' type, depending on what cursor.execute() accepts for the second argument (they vary). preparer a :class:`~sqlalchemy.sql.compiler.IdentifierPreparer` class used to quote identifiers. supports_alter ``True`` if the database supports ``ALTER TABLE``. max_identifier_length The maximum length of identifier names. supports_unicode_statements Indicate whether the DB-API can receive SQL statements as Python unicode strings supports_unicode_binds Indicate whether the DB-API can receive string bind parameters as Python unicode strings supports_sane_rowcount Indicate whether the dialect properly implements rowcount for ``UPDATE`` and ``DELETE`` statements. supports_sane_multi_rowcount Indicate whether the dialect properly implements rowcount for ``UPDATE`` and ``DELETE`` statements when executed via executemany. preexecute_autoincrement_sequences True if 'implicit' primary key functions must be executed separately in order to get their value. This is currently oriented towards Postgresql. implicit_returning use RETURNING or equivalent during INSERT execution in order to load newly generated primary keys and other column defaults in one execution, which are then available via inserted_primary_key. If an insert statement has returning() specified explicitly, the "implicit" functionality is not used and inserted_primary_key will not be available. dbapi_type_map A mapping of DB-API type objects present in this Dialect's DB-API implementation mapped to TypeEngine implementations used by the dialect. This is used to apply types to result sets based on the DB-API types present in cursor.description; it only takes effect for result sets against textual statements where no explicit typemap was present. colspecs A dictionary of TypeEngine classes from sqlalchemy.types mapped to subclasses that are specific to the dialect class. This dictionary is class-level only and is not accessed from the dialect instance itself. supports_default_values Indicates if the construct ``INSERT INTO tablename DEFAULT VALUES`` is supported supports_sequences Indicates if the dialect supports CREATE SEQUENCE or similar. sequences_optional If True, indicates if the "optional" flag on the Sequence() construct should signal to not generate a CREATE SEQUENCE. Applies only to dialects that support sequences. Currently used only to allow Postgresql SERIAL to be used on a column that specifies Sequence() for usage on other backends. supports_native_enum Indicates if the dialect supports a native ENUM construct. This will prevent types.Enum from generating a CHECK constraint when that type is used. supports_native_boolean Indicates if the dialect supports a native boolean construct. This will prevent types.Boolean from generating a CHECK constraint when that type is used. """ def create_connect_args(self, url): """Build DB-API compatible connection arguments. Given a :class:`~sqlalchemy.engine.url.URL` object, returns a tuple consisting of a `*args`/`**kwargs` suitable to send directly to the dbapi's connect function. """ raise NotImplementedError() @classmethod def type_descriptor(cls, typeobj): """Transform a generic type to a dialect-specific type. Dialect classes will usually use the :func:`.types.adapt_type` function in the types module to accomplish this. The returned result is cached *per dialect class* so can contain no dialect-instance state. """ raise NotImplementedError() def initialize(self, connection): """Called during strategized creation of the dialect with a connection. Allows dialects to configure options based on server version info or other properties. The connection passed here is a SQLAlchemy Connection object, with full capabilities. The initalize() method of the base dialect should be called via super(). """ pass def reflecttable(self, connection, table, include_columns=None): """Load table description from the database. Given a :class:`.Connection` and a :class:`~sqlalchemy.schema.Table` object, reflect its columns and properties from the database. If include_columns (a list or set) is specified, limit the autoload to the given column names. The default implementation uses the :class:`~sqlalchemy.engine.reflection.Inspector` interface to provide the output, building upon the granular table/column/ constraint etc. methods of :class:`.Dialect`. """ raise NotImplementedError() def get_columns(self, connection, table_name, schema=None, **kw): """Return information about columns in `table_name`. Given a :class:`.Connection`, a string `table_name`, and an optional string `schema`, return column information as a list of dictionaries with these keys: name the column's name type [sqlalchemy.types#TypeEngine] nullable boolean default the column's default value autoincrement boolean sequence a dictionary of the form {'name' : str, 'start' :int, 'increment': int} Additional column attributes may be present. """ raise NotImplementedError() def get_primary_keys(self, connection, table_name, schema=None, **kw): """Return information about primary keys in `table_name`. Deprecated. This method is only called by the default implementation of :meth:`.Dialect.get_pk_constraint`. Dialects should instead implement this method directly. """ raise NotImplementedError() def get_pk_constraint(self, connection, table_name, schema=None, **kw): """Return information about the primary key constraint on table_name`. Given a :class:`.Connection`, a string `table_name`, and an optional string `schema`, return primary key information as a dictionary with these keys: constrained_columns a list of column names that make up the primary key name optional name of the primary key constraint. """ raise NotImplementedError() def get_foreign_keys(self, connection, table_name, schema=None, **kw): """Return information about foreign_keys in `table_name`. Given a :class:`.Connection`, a string `table_name`, and an optional string `schema`, return foreign key information as a list of dicts with these keys: name the constraint's name constrained_columns a list of column names that make up the foreign key referred_schema the name of the referred schema referred_table the name of the referred table referred_columns a list of column names in the referred table that correspond to constrained_columns """ raise NotImplementedError() def get_table_names(self, connection, schema=None, **kw): """Return a list of table names for `schema`.""" raise NotImplementedError def get_view_names(self, connection, schema=None, **kw): """Return a list of all view names available in the database. schema: Optional, retrieve names from a non-default schema. """ raise NotImplementedError() def get_view_definition(self, connection, view_name, schema=None, **kw): """Return view definition. Given a :class:`.Connection`, a string `view_name`, and an optional string `schema`, return the view definition. """ raise NotImplementedError() def get_indexes(self, connection, table_name, schema=None, **kw): """Return information about indexes in `table_name`. Given a :class:`.Connection`, a string `table_name` and an optional string `schema`, return index information as a list of dictionaries with these keys: name the index's name column_names list of column names in order unique boolean """ raise NotImplementedError() def get_unique_constraints(self, table_name, schema=None, **kw): """Return information about unique constraints in `table_name`. Given a string `table_name` and an optional string `schema`, return unique constraint information as a list of dicts with these keys: name the unique constraint's name column_names list of column names in order """ raise NotImplementedError() def normalize_name(self, name): """convert the given name to lowercase if it is detected as case insensitive. this method is only used if the dialect defines requires_name_normalize=True. """ raise NotImplementedError() def denormalize_name(self, name): """convert the given name to a case insensitive identifier for the backend if it is an all-lowercase name. this method is only used if the dialect defines requires_name_normalize=True. """ raise NotImplementedError() def has_table(self, connection, table_name, schema=None): """Check the existence of a particular table in the database. Given a :class:`.Connection` object and a string `table_name`, return True if the given table (possibly within the specified `schema`) exists in the database, False otherwise. """ raise NotImplementedError() def has_sequence(self, connection, sequence_name, schema=None): """Check the existence of a particular sequence in the database. Given a :class:`.Connection` object and a string `sequence_name`, return True if the given sequence exists in the database, False otherwise. """ raise NotImplementedError() def _get_server_version_info(self, connection): """Retrieve the server version info from the given connection. This is used by the default implementation to populate the "server_version_info" attribute and is called exactly once upon first connect. """ raise NotImplementedError() def _get_default_schema_name(self, connection): """Return the string name of the currently selected schema from the given connection. This is used by the default implementation to populate the "default_schema_name" attribute and is called exactly once upon first connect. """ raise NotImplementedError() def do_begin(self, dbapi_connection): """Provide an implementation of ``connection.begin()``, given a DB-API connection. The DBAPI has no dedicated "begin" method and it is expected that transactions are implicit. This hook is provided for those DBAPIs that might need additional help in this area. Note that :meth:`.Dialect.do_begin` is not called unless a :class:`.Transaction` object is in use. The :meth:`.Dialect.do_autocommit` hook is provided for DBAPIs that need some extra commands emitted after a commit in order to enter the next transaction, when the SQLAlchemy :class:`.Connection` is used in it's default "autocommit" mode. :param dbapi_connection: a DBAPI connection, typically proxied within a :class:`.ConnectionFairy`. """ raise NotImplementedError() def do_rollback(self, dbapi_connection): """Provide an implementation of ``connection.rollback()``, given a DB-API connection. :param dbapi_connection: a DBAPI connection, typically proxied within a :class:`.ConnectionFairy`. """ raise NotImplementedError() def do_commit(self, dbapi_connection): """Provide an implementation of ``connection.commit()``, given a DB-API connection. :param dbapi_connection: a DBAPI connection, typically proxied within a :class:`.ConnectionFairy`. """ raise NotImplementedError() def do_close(self, dbapi_connection): """Provide an implementation of ``connection.close()``, given a DBAPI connection. This hook is called by the :class:`.Pool` when a connection has been detached from the pool, or is being returned beyond the normal capacity of the pool. .. versionadded:: 0.8 """ raise NotImplementedError() def create_xid(self): """Create a two-phase transaction ID. This id will be passed to do_begin_twophase(), do_rollback_twophase(), do_commit_twophase(). Its format is unspecified. """ raise NotImplementedError() def do_savepoint(self, connection, name): """Create a savepoint with the given name. :param connection: a :class:`.Connection`. :param name: savepoint name. """ raise NotImplementedError() def do_rollback_to_savepoint(self, connection, name): """Rollback a connection to the named savepoint. :param connection: a :class:`.Connection`. :param name: savepoint name. """ raise NotImplementedError() def do_release_savepoint(self, connection, name): """Release the named savepoint on a connection. :param connection: a :class:`.Connection`. :param name: savepoint name. """ raise NotImplementedError() def do_begin_twophase(self, connection, xid): """Begin a two phase transaction on the given connection. :param connection: a :class:`.Connection`. :param xid: xid """ raise NotImplementedError() def do_prepare_twophase(self, connection, xid): """Prepare a two phase transaction on the given connection. :param connection: a :class:`.Connection`. :param xid: xid """ raise NotImplementedError() def do_rollback_twophase(self, connection, xid, is_prepared=True, recover=False): """Rollback a two phase transaction on the given connection. :param connection: a :class:`.Connection`. :param xid: xid :param is_prepared: whether or not :meth:`.TwoPhaseTransaction.prepare` was called. :param recover: if the recover flag was passed. """ raise NotImplementedError() def do_commit_twophase(self, connection, xid, is_prepared=True, recover=False): """Commit a two phase transaction on the given connection. :param connection: a :class:`.Connection`. :param xid: xid :param is_prepared: whether or not :meth:`.TwoPhaseTransaction.prepare` was called. :param recover: if the recover flag was passed. """ raise NotImplementedError() def do_recover_twophase(self, connection): """Recover list of uncommited prepared two phase transaction identifiers on the given connection. :param connection: a :class:`.Connection`. """ raise NotImplementedError() def do_executemany(self, cursor, statement, parameters, context=None): """Provide an implementation of ``cursor.executemany(statement, parameters)``.""" raise NotImplementedError() def do_execute(self, cursor, statement, parameters, context=None): """Provide an implementation of ``cursor.execute(statement, parameters)``.""" raise NotImplementedError() def do_execute_no_params(self, cursor, statement, parameters, context=None): """Provide an implementation of ``cursor.execute(statement)``. The parameter collection should not be sent. """ raise NotImplementedError() def is_disconnect(self, e, connection, cursor): """Return True if the given DB-API error indicates an invalid connection""" raise NotImplementedError() def connect(self): """return a callable which sets up a newly created DBAPI connection. The callable accepts a single argument "conn" which is the DBAPI connection itself. It has no return value. This is used to set dialect-wide per-connection options such as isolation modes, unicode modes, etc. If a callable is returned, it will be assembled into a pool listener that receives the direct DBAPI connection, with all wrappers removed. If None is returned, no listener will be generated. """ return None def reset_isolation_level(self, dbapi_conn): """Given a DBAPI connection, revert its isolation to the default.""" raise NotImplementedError() def set_isolation_level(self, dbapi_conn, level): """Given a DBAPI connection, set its isolation level.""" raise NotImplementedError() def get_isolation_level(self, dbapi_conn): """Given a DBAPI connection, return its isolation level.""" raise NotImplementedError() class ExecutionContext(object): """A messenger object for a Dialect that corresponds to a single execution. ExecutionContext should have these data members: connection Connection object which can be freely used by default value generators to execute SQL. This Connection should reference the same underlying connection/transactional resources of root_connection. root_connection Connection object which is the source of this ExecutionContext. This Connection may have close_with_result=True set, in which case it can only be used once. dialect dialect which created this ExecutionContext. cursor DB-API cursor procured from the connection, compiled if passed to constructor, sqlalchemy.engine.base.Compiled object being executed, statement string version of the statement to be executed. Is either passed to the constructor, or must be created from the sql.Compiled object by the time pre_exec() has completed. parameters bind parameters passed to the execute() method. For compiled statements, this is a dictionary or list of dictionaries. For textual statements, it should be in a format suitable for the dialect's paramstyle (i.e. dict or list of dicts for non positional, list or list of lists/tuples for positional). isinsert True if the statement is an INSERT. isupdate True if the statement is an UPDATE. should_autocommit True if the statement is a "committable" statement. prefetch_cols a list of Column objects for which a client-side default was fired off. Applies to inserts and updates. postfetch_cols a list of Column objects for which a server-side default or inline SQL expression value was fired off. Applies to inserts and updates. """ def create_cursor(self): """Return a new cursor generated from this ExecutionContext's connection. Some dialects may wish to change the behavior of connection.cursor(), such as postgresql which may return a PG "server side" cursor. """ raise NotImplementedError() def pre_exec(self): """Called before an execution of a compiled statement. If a compiled statement was passed to this ExecutionContext, the `statement` and `parameters` datamembers must be initialized after this statement is complete. """ raise NotImplementedError() def post_exec(self): """Called after the execution of a compiled statement. If a compiled statement was passed to this ExecutionContext, the `last_insert_ids`, `last_inserted_params`, etc. datamembers should be available after this method completes. """ raise NotImplementedError() def result(self): """Return a result object corresponding to this ExecutionContext. Returns a ResultProxy. """ raise NotImplementedError() def handle_dbapi_exception(self, e): """Receive a DBAPI exception which occurred upon execute, result fetch, etc.""" raise NotImplementedError() def should_autocommit_text(self, statement): """Parse the given textual statement and return True if it refers to a "committable" statement""" raise NotImplementedError() def lastrow_has_defaults(self): """Return True if the last INSERT or UPDATE row contained inlined or database-side defaults. """ raise NotImplementedError() def get_rowcount(self): """Return the DBAPI ``cursor.rowcount`` value, or in some cases an interpreted value. See :attr:`.ResultProxy.rowcount` for details on this. """ raise NotImplementedError() class Compiled(object): """Represent a compiled SQL or DDL expression. The ``__str__`` method of the ``Compiled`` object should produce the actual text of the statement. ``Compiled`` objects are specific to their underlying database dialect, and also may or may not be specific to the columns referenced within a particular set of bind parameters. In no case should the ``Compiled`` object be dependent on the actual values of those bind parameters, even though it may reference those values as defaults. """ def __init__(self, dialect, statement, bind=None, compile_kwargs=util.immutabledict()): """Construct a new ``Compiled`` object. :param dialect: ``Dialect`` to compile against. :param statement: ``ClauseElement`` to be compiled. :param bind: Optional Engine or Connection to compile this statement against. :param compile_kwargs: additional kwargs that will be passed to the initial call to :meth:`.Compiled.process`. .. versionadded:: 0.8 """ self.dialect = dialect self.bind = bind if statement is not None: self.statement = statement self.can_execute = statement.supports_execution self.string = self.process(self.statement, **compile_kwargs) @util.deprecated("0.7", ":class:`.Compiled` objects now compile " "within the constructor.") def compile(self): """Produce the internal string representation of this element.""" pass @property def sql_compiler(self): """Return a Compiled that is capable of processing SQL expressions. If this compiler is one, it would likely just return 'self'. """ raise NotImplementedError() def process(self, obj, **kwargs): return obj._compiler_dispatch(self, **kwargs) def __str__(self): """Return the string text of the generated SQL or DDL.""" return self.string or '' def construct_params(self, params=None): """Return the bind params for this compiled object. :param params: a dict of string/object pairs whose values will override bind values compiled in to the statement. """ raise NotImplementedError() @property def params(self): """Return the bind params for this compiled object.""" return self.construct_params() def execute(self, *multiparams, **params): """Execute this compiled object.""" e = self.bind if e is None: raise exc.UnboundExecutionError( "This Compiled object is not bound to any Engine " "or Connection.") return e._execute_compiled(self, multiparams, params) def scalar(self, *multiparams, **params): """Execute this compiled object and return the result's scalar value.""" return self.execute(*multiparams, **params).scalar() class TypeCompiler(object): """Produces DDL specification for TypeEngine objects.""" def __init__(self, dialect): self.dialect = dialect def process(self, type_): return type_._compiler_dispatch(self) class Connectable(object): """Interface for an object which supports execution of SQL constructs. The two implementations of :class:`.Connectable` are :class:`.Connection` and :class:`.Engine`. Connectable must also implement the 'dialect' member which references a :class:`.Dialect` instance. """ dispatch = event.dispatcher(events.ConnectionEvents) def connect(self, **kwargs): """Return a :class:`.Connection` object. Depending on context, this may be ``self`` if this object is already an instance of :class:`.Connection`, or a newly procured :class:`.Connection` if this object is an instance of :class:`.Engine`. """ def contextual_connect(self): """Return a :class:`.Connection` object which may be part of an ongoing context. Depending on context, this may be ``self`` if this object is already an instance of :class:`.Connection`, or a newly procured :class:`.Connection` if this object is an instance of :class:`.Engine`. """ raise NotImplementedError() @util.deprecated("0.7", "Use the create() method on the given schema " "object directly, i.e. :meth:`.Table.create`, " ":meth:`.Index.create`, :meth:`.MetaData.create_all`") def create(self, entity, **kwargs): """Emit CREATE statements for the given schema entity.""" raise NotImplementedError() @util.deprecated("0.7", "Use the drop() method on the given schema " "object directly, i.e. :meth:`.Table.drop`, " ":meth:`.Index.drop`, :meth:`.MetaData.drop_all`") def drop(self, entity, **kwargs): """Emit DROP statements for the given schema entity.""" raise NotImplementedError() def execute(self, object, *multiparams, **params): """Executes the given construct and returns a :class:`.ResultProxy`.""" raise NotImplementedError() def scalar(self, object, *multiparams, **params): """Executes and returns the first column of the first row. The underlying cursor is closed after execution. """ raise NotImplementedError() def _run_visitor(self, visitorcallable, element, **kwargs): raise NotImplementedError() def _execute_clauseelement(self, elem, multiparams=None, params=None): raise NotImplementedError() SQLAlchemy-0.8.4/lib/sqlalchemy/engine/reflection.py0000644000076500000240000004613012251150015023106 0ustar classicstaff00000000000000# engine/reflection.py # Copyright (C) 2005-2013 the SQLAlchemy authors and contributors # # This module is part of SQLAlchemy and is released under # the MIT License: http://www.opensource.org/licenses/mit-license.php """Provides an abstraction for obtaining database schema information. Usage Notes: Here are some general conventions when accessing the low level inspector methods such as get_table_names, get_columns, etc. 1. Inspector methods return lists of dicts in most cases for the following reasons: * They're both standard types that can be serialized. * Using a dict instead of a tuple allows easy expansion of attributes. * Using a list for the outer structure maintains order and is easy to work with (e.g. list comprehension [d['name'] for d in cols]). 2. Records that contain a name, such as the column name in a column record use the key 'name'. So for most return values, each record will have a 'name' attribute.. """ from .. import exc, sql from .. import schema as sa_schema from .. import util from ..types import TypeEngine from ..util import deprecated from ..util import topological from .. import inspection from .base import Connectable @util.decorator def cache(fn, self, con, *args, **kw): info_cache = kw.get('info_cache', None) if info_cache is None: return fn(self, con, *args, **kw) key = ( fn.__name__, tuple(a for a in args if isinstance(a, basestring)), tuple((k, v) for k, v in kw.iteritems() if isinstance(v, (basestring, int, float))) ) ret = info_cache.get(key) if ret is None: ret = fn(self, con, *args, **kw) info_cache[key] = ret return ret class Inspector(object): """Performs database schema inspection. The Inspector acts as a proxy to the reflection methods of the :class:`~sqlalchemy.engine.interfaces.Dialect`, providing a consistent interface as well as caching support for previously fetched metadata. A :class:`.Inspector` object is usually created via the :func:`.inspect` function:: from sqlalchemy import inspect, create_engine engine = create_engine('...') insp = inspect(engine) The inspection method above is equivalent to using the :meth:`.Inspector.from_engine` method, i.e.:: engine = create_engine('...') insp = Inspector.from_engine(engine) Where above, the :class:`~sqlalchemy.engine.interfaces.Dialect` may opt to return an :class:`.Inspector` subclass that provides additional methods specific to the dialect's target database. """ def __init__(self, bind): """Initialize a new :class:`.Inspector`. :param bind: a :class:`~sqlalchemy.engine.Connectable`, which is typically an instance of :class:`~sqlalchemy.engine.Engine` or :class:`~sqlalchemy.engine.Connection`. For a dialect-specific instance of :class:`.Inspector`, see :meth:`.Inspector.from_engine` """ # this might not be a connection, it could be an engine. self.bind = bind # set the engine if hasattr(bind, 'engine'): self.engine = bind.engine else: self.engine = bind if self.engine is bind: # if engine, ensure initialized bind.connect().close() self.dialect = self.engine.dialect self.info_cache = {} @classmethod def from_engine(cls, bind): """Construct a new dialect-specific Inspector object from the given engine or connection. :param bind: a :class:`~sqlalchemy.engine.Connectable`, which is typically an instance of :class:`~sqlalchemy.engine.Engine` or :class:`~sqlalchemy.engine.Connection`. This method differs from direct a direct constructor call of :class:`.Inspector` in that the :class:`~sqlalchemy.engine.interfaces.Dialect` is given a chance to provide a dialect-specific :class:`.Inspector` instance, which may provide additional methods. See the example at :class:`.Inspector`. """ if hasattr(bind.dialect, 'inspector'): return bind.dialect.inspector(bind) return Inspector(bind) @inspection._inspects(Connectable) def _insp(bind): return Inspector.from_engine(bind) @property def default_schema_name(self): """Return the default schema name presented by the dialect for the current engine's database user. E.g. this is typically ``public`` for Postgresql and ``dbo`` for SQL Server. """ return self.dialect.default_schema_name def get_schema_names(self): """Return all schema names. """ if hasattr(self.dialect, 'get_schema_names'): return self.dialect.get_schema_names(self.bind, info_cache=self.info_cache) return [] def get_table_names(self, schema=None, order_by=None): """Return all table names in referred to within a particular schema. The names are expected to be real tables only, not views. Views are instead returned using the :meth:`.Inspector.get_view_names` method. :param schema: Schema name. If ``schema`` is left at ``None``, the database's default schema is used, else the named schema is searched. If the database does not support named schemas, behavior is undefined if ``schema`` is not passed as ``None``. :param order_by: Optional, may be the string "foreign_key" to sort the result on foreign key dependencies. .. versionchanged:: 0.8 the "foreign_key" sorting sorts tables in order of dependee to dependent; that is, in creation order, rather than in drop order. This is to maintain consistency with similar features such as :attr:`.MetaData.sorted_tables` and :func:`.util.sort_tables`. .. seealso:: :attr:`.MetaData.sorted_tables` """ if hasattr(self.dialect, 'get_table_names'): tnames = self.dialect.get_table_names(self.bind, schema, info_cache=self.info_cache) else: tnames = self.engine.table_names(schema) if order_by == 'foreign_key': tuples = [] for tname in tnames: for fkey in self.get_foreign_keys(tname, schema): if tname != fkey['referred_table']: tuples.append((fkey['referred_table'], tname)) tnames = list(topological.sort(tuples, tnames)) return tnames def get_table_options(self, table_name, schema=None, **kw): """Return a dictionary of options specified when the table of the given name was created. This currently includes some options that apply to MySQL tables. """ if hasattr(self.dialect, 'get_table_options'): return self.dialect.get_table_options( self.bind, table_name, schema, info_cache=self.info_cache, **kw) return {} def get_view_names(self, schema=None): """Return all view names in `schema`. :param schema: Optional, retrieve names from a non-default schema. """ return self.dialect.get_view_names(self.bind, schema, info_cache=self.info_cache) def get_view_definition(self, view_name, schema=None): """Return definition for `view_name`. :param schema: Optional, retrieve names from a non-default schema. """ return self.dialect.get_view_definition( self.bind, view_name, schema, info_cache=self.info_cache) def get_columns(self, table_name, schema=None, **kw): """Return information about columns in `table_name`. Given a string `table_name` and an optional string `schema`, return column information as a list of dicts with these keys: name the column's name type :class:`~sqlalchemy.types.TypeEngine` nullable boolean default the column's default value attrs dict containing optional column attributes """ col_defs = self.dialect.get_columns(self.bind, table_name, schema, info_cache=self.info_cache, **kw) for col_def in col_defs: # make this easy and only return instances for coltype coltype = col_def['type'] if not isinstance(coltype, TypeEngine): col_def['type'] = coltype() return col_defs @deprecated('0.7', 'Call to deprecated method get_primary_keys.' ' Use get_pk_constraint instead.') def get_primary_keys(self, table_name, schema=None, **kw): """Return information about primary keys in `table_name`. Given a string `table_name`, and an optional string `schema`, return primary key information as a list of column names. """ return self.dialect.get_pk_constraint(self.bind, table_name, schema, info_cache=self.info_cache, **kw)['constrained_columns'] def get_pk_constraint(self, table_name, schema=None, **kw): """Return information about primary key constraint on `table_name`. Given a string `table_name`, and an optional string `schema`, return primary key information as a dictionary with these keys: constrained_columns a list of column names that make up the primary key name optional name of the primary key constraint. """ return self.dialect.get_pk_constraint(self.bind, table_name, schema, info_cache=self.info_cache, **kw) def get_foreign_keys(self, table_name, schema=None, **kw): """Return information about foreign_keys in `table_name`. Given a string `table_name`, and an optional string `schema`, return foreign key information as a list of dicts with these keys: constrained_columns a list of column names that make up the foreign key referred_schema the name of the referred schema referred_table the name of the referred table referred_columns a list of column names in the referred table that correspond to constrained_columns name optional name of the foreign key constraint. """ return self.dialect.get_foreign_keys(self.bind, table_name, schema, info_cache=self.info_cache, **kw) def get_indexes(self, table_name, schema=None, **kw): """Return information about indexes in `table_name`. Given a string `table_name` and an optional string `schema`, return index information as a list of dicts with these keys: name the index's name column_names list of column names in order unique boolean """ return self.dialect.get_indexes(self.bind, table_name, schema, info_cache=self.info_cache, **kw) def get_unique_constraints(self, table_name, schema=None, **kw): """Return information about unique constraints in `table_name`. Given a string `table_name` and an optional string `schema`, return unique constraint information as a list of dicts with these keys: name the unique constraint's name column_names list of column names in order .. versionadded:: 0.8.4 """ return self.dialect.get_unique_constraints( self.bind, table_name, schema, info_cache=self.info_cache, **kw) def reflecttable(self, table, include_columns, exclude_columns=()): """Given a Table object, load its internal constructs based on introspection. This is the underlying method used by most dialects to produce table reflection. Direct usage is like:: from sqlalchemy import create_engine, MetaData, Table from sqlalchemy.engine import reflection engine = create_engine('...') meta = MetaData() user_table = Table('user', meta) insp = Inspector.from_engine(engine) insp.reflecttable(user_table, None) :param table: a :class:`~sqlalchemy.schema.Table` instance. :param include_columns: a list of string column names to include in the reflection process. If ``None``, all columns are reflected. """ dialect = self.bind.dialect # table attributes we might need. reflection_options = dict( (k, table.kwargs.get(k)) for k in dialect.reflection_options if k in table.kwargs) schema = table.schema table_name = table.name # apply table options tbl_opts = self.get_table_options(table_name, schema, **table.kwargs) if tbl_opts: table.kwargs.update(tbl_opts) # table.kwargs will need to be passed to each reflection method. Make # sure keywords are strings. tblkw = table.kwargs.copy() for (k, v) in tblkw.items(): del tblkw[k] tblkw[str(k)] = v # Py2K if isinstance(schema, str): schema = schema.decode(dialect.encoding) if isinstance(table_name, str): table_name = table_name.decode(dialect.encoding) # end Py2K # columns found_table = False cols_by_orig_name = {} for col_d in self.get_columns(table_name, schema, **tblkw): found_table = True orig_name = col_d['name'] table.dispatch.column_reflect(self, table, col_d) name = col_d['name'] if include_columns and name not in include_columns: continue if exclude_columns and name in exclude_columns: continue coltype = col_d['type'] col_kw = { 'nullable': col_d['nullable'], } for k in ('autoincrement', 'quote', 'info', 'key'): if k in col_d: col_kw[k] = col_d[k] colargs = [] if col_d.get('default') is not None: # the "default" value is assumed to be a literal SQL # expression, so is wrapped in text() so that no quoting # occurs on re-issuance. colargs.append( sa_schema.DefaultClause( sql.text(col_d['default']), _reflected=True ) ) if 'sequence' in col_d: # TODO: mssql, maxdb and sybase are using this. seq = col_d['sequence'] sequence = sa_schema.Sequence(seq['name'], 1, 1) if 'start' in seq: sequence.start = seq['start'] if 'increment' in seq: sequence.increment = seq['increment'] colargs.append(sequence) cols_by_orig_name[orig_name] = col = \ sa_schema.Column(name, coltype, *colargs, **col_kw) table.append_column(col) if not found_table: raise exc.NoSuchTableError(table.name) # Primary keys pk_cons = self.get_pk_constraint(table_name, schema, **tblkw) if pk_cons: pk_cols = [ cols_by_orig_name[pk] for pk in pk_cons['constrained_columns'] if pk in cols_by_orig_name and pk not in exclude_columns ] pk_cols += [ pk for pk in table.primary_key if pk.key in exclude_columns ] primary_key_constraint = sa_schema.PrimaryKeyConstraint( name=pk_cons.get('name'), *pk_cols ) table.append_constraint(primary_key_constraint) # Foreign keys fkeys = self.get_foreign_keys(table_name, schema, **tblkw) for fkey_d in fkeys: conname = fkey_d['name'] # look for columns by orig name in cols_by_orig_name, # but support columns that are in-Python only as fallback constrained_columns = [ cols_by_orig_name[c].key if c in cols_by_orig_name else c for c in fkey_d['constrained_columns'] ] if exclude_columns and set(constrained_columns).intersection( exclude_columns): continue referred_schema = fkey_d['referred_schema'] referred_table = fkey_d['referred_table'] referred_columns = fkey_d['referred_columns'] refspec = [] if referred_schema is not None: sa_schema.Table(referred_table, table.metadata, autoload=True, schema=referred_schema, autoload_with=self.bind, **reflection_options ) for column in referred_columns: refspec.append(".".join( [referred_schema, referred_table, column])) else: sa_schema.Table(referred_table, table.metadata, autoload=True, autoload_with=self.bind, **reflection_options ) for column in referred_columns: refspec.append(".".join([referred_table, column])) table.append_constraint( sa_schema.ForeignKeyConstraint(constrained_columns, refspec, conname, link_to_name=True)) # Indexes indexes = self.get_indexes(table_name, schema) for index_d in indexes: name = index_d['name'] columns = index_d['column_names'] unique = index_d['unique'] flavor = index_d.get('type', 'unknown type') if include_columns and \ not set(columns).issubset(include_columns): util.warn( "Omitting %s KEY for (%s), key covers omitted columns." % (flavor, ', '.join(columns))) continue # look for columns by orig name in cols_by_orig_name, # but support columns that are in-Python only as fallback sa_schema.Index(name, *[ cols_by_orig_name[c] if c in cols_by_orig_name else table.c[c] for c in columns ], **dict(unique=unique)) SQLAlchemy-0.8.4/lib/sqlalchemy/engine/result.py0000644000076500000240000010337512251150015022277 0ustar classicstaff00000000000000# engine/result.py # Copyright (C) 2005-2013 the SQLAlchemy authors and contributors # # This module is part of SQLAlchemy and is released under # the MIT License: http://www.opensource.org/licenses/mit-license.php """Define result set constructs including :class:`.ResultProxy` and :class:`.RowProxy.""" from itertools import izip from .. import exc, types, util from ..sql import expression import collections # This reconstructor is necessary so that pickles with the C extension or # without use the same Binary format. try: # We need a different reconstructor on the C extension so that we can # add extra checks that fields have correctly been initialized by # __setstate__. from sqlalchemy.cresultproxy import safe_rowproxy_reconstructor # The extra function embedding is needed so that the # reconstructor function has the same signature whether or not # the extension is present. def rowproxy_reconstructor(cls, state): return safe_rowproxy_reconstructor(cls, state) except ImportError: def rowproxy_reconstructor(cls, state): obj = cls.__new__(cls) obj.__setstate__(state) return obj try: from sqlalchemy.cresultproxy import BaseRowProxy except ImportError: class BaseRowProxy(object): __slots__ = ('_parent', '_row', '_processors', '_keymap') def __init__(self, parent, row, processors, keymap): """RowProxy objects are constructed by ResultProxy objects.""" self._parent = parent self._row = row self._processors = processors self._keymap = keymap def __reduce__(self): return (rowproxy_reconstructor, (self.__class__, self.__getstate__())) def values(self): """Return the values represented by this RowProxy as a list.""" return list(self) def __iter__(self): for processor, value in izip(self._processors, self._row): if processor is None: yield value else: yield processor(value) def __len__(self): return len(self._row) def __getitem__(self, key): try: processor, obj, index = self._keymap[key] except KeyError: processor, obj, index = self._parent._key_fallback(key) except TypeError: if isinstance(key, slice): l = [] for processor, value in izip(self._processors[key], self._row[key]): if processor is None: l.append(value) else: l.append(processor(value)) return tuple(l) else: raise if index is None: raise exc.InvalidRequestError( "Ambiguous column name '%s' in result set! " "try 'use_labels' option on select statement." % key) if processor is not None: return processor(self._row[index]) else: return self._row[index] def __getattr__(self, name): try: return self[name] except KeyError, e: raise AttributeError(e.args[0]) class RowProxy(BaseRowProxy): """Proxy values from a single cursor row. Mostly follows "ordered dictionary" behavior, mapping result values to the string-based column name, the integer position of the result in the row, as well as Column instances which can be mapped to the original Columns that produced this result set (for results that correspond to constructed SQL expressions). """ __slots__ = () def __contains__(self, key): return self._parent._has_key(self._row, key) def __getstate__(self): return { '_parent': self._parent, '_row': tuple(self) } def __setstate__(self, state): self._parent = parent = state['_parent'] self._row = state['_row'] self._processors = parent._processors self._keymap = parent._keymap __hash__ = None def __eq__(self, other): return other is self or other == tuple(self) def __ne__(self, other): return not self.__eq__(other) def __repr__(self): return repr(tuple(self)) def has_key(self, key): """Return True if this RowProxy contains the given key.""" return self._parent._has_key(self._row, key) def items(self): """Return a list of tuples, each tuple containing a key/value pair.""" # TODO: no coverage here return [(key, self[key]) for key in self.iterkeys()] def keys(self): """Return the list of keys as strings represented by this RowProxy.""" return self._parent.keys def iterkeys(self): return iter(self._parent.keys) def itervalues(self): return iter(self) try: # Register RowProxy with Sequence, # so sequence protocol is implemented from collections import Sequence Sequence.register(RowProxy) except ImportError: pass class ResultMetaData(object): """Handle cursor.description, applying additional info from an execution context.""" def __init__(self, parent, metadata): self._processors = processors = [] # We do not strictly need to store the processor in the key mapping, # though it is faster in the Python version (probably because of the # saved attribute lookup self._processors) self._keymap = keymap = {} self.keys = [] context = parent.context dialect = context.dialect typemap = dialect.dbapi_type_map translate_colname = context._translate_colname self.case_sensitive = dialect.case_sensitive # high precedence key values. primary_keymap = {} for i, rec in enumerate(metadata): colname = rec[0] coltype = rec[1] if dialect.description_encoding: colname = dialect._description_decoder(colname) if translate_colname: colname, untranslated = translate_colname(colname) if dialect.requires_name_normalize: colname = dialect.normalize_name(colname) if context.result_map: try: name, obj, type_ = context.result_map[colname if self.case_sensitive else colname.lower()] except KeyError: name, obj, type_ = \ colname, None, typemap.get(coltype, types.NULLTYPE) else: name, obj, type_ = \ colname, None, typemap.get(coltype, types.NULLTYPE) processor = context.get_result_processor(type_, colname, coltype) processors.append(processor) rec = (processor, obj, i) # indexes as keys. This is only needed for the Python version of # RowProxy (the C version uses a faster path for integer indexes). primary_keymap[i] = rec # populate primary keymap, looking for conflicts. if primary_keymap.setdefault( name if self.case_sensitive else name.lower(), rec) is not rec: # place a record that doesn't have the "index" - this # is interpreted later as an AmbiguousColumnError, # but only when actually accessed. Columns # colliding by name is not a problem if those names # aren't used; integer access is always # unambiguous. primary_keymap[name if self.case_sensitive else name.lower()] = rec = (None, obj, None) self.keys.append(colname) if obj: for o in obj: keymap[o] = rec # technically we should be doing this but we # are saving on callcounts by not doing so. # if keymap.setdefault(o, rec) is not rec: # keymap[o] = (None, obj, None) if translate_colname and \ untranslated: keymap[untranslated] = rec # overwrite keymap values with those of the # high precedence keymap. keymap.update(primary_keymap) if parent._echo: context.engine.logger.debug( "Col %r", tuple(x[0] for x in metadata)) @util.pending_deprecation("0.8", "sqlite dialect uses " "_translate_colname() now") def _set_keymap_synonym(self, name, origname): """Set a synonym for the given name. Some dialects (SQLite at the moment) may use this to adjust the column names that are significant within a row. """ rec = (processor, obj, i) = self._keymap[origname if self.case_sensitive else origname.lower()] if self._keymap.setdefault(name, rec) is not rec: self._keymap[name] = (processor, obj, None) def _key_fallback(self, key, raiseerr=True): map = self._keymap result = None if isinstance(key, basestring): result = map.get(key if self.case_sensitive else key.lower()) # fallback for targeting a ColumnElement to a textual expression # this is a rare use case which only occurs when matching text() # or colummn('name') constructs to ColumnElements, or after a # pickle/unpickle roundtrip elif isinstance(key, expression.ColumnElement): if key._label and ( key._label if self.case_sensitive else key._label.lower()) in map: result = map[key._label if self.case_sensitive else key._label.lower()] elif hasattr(key, 'name') and ( key.name if self.case_sensitive else key.name.lower()) in map: # match is only on name. result = map[key.name if self.case_sensitive else key.name.lower()] # search extra hard to make sure this # isn't a column/label name overlap. # this check isn't currently available if the row # was unpickled. if result is not None and \ result[1] is not None: for obj in result[1]: if key._compare_name_for_result(obj): break else: result = None if result is None: if raiseerr: raise exc.NoSuchColumnError( "Could not locate column in row for column '%s'" % expression._string_or_unprintable(key)) else: return None else: map[key] = result return result def _has_key(self, row, key): if key in self._keymap: return True else: return self._key_fallback(key, False) is not None def __getstate__(self): return { '_pickled_keymap': dict( (key, index) for key, (processor, obj, index) in self._keymap.iteritems() if isinstance(key, (basestring, int)) ), 'keys': self.keys, "case_sensitive": self.case_sensitive, } def __setstate__(self, state): # the row has been processed at pickling time so we don't need any # processor anymore self._processors = [None for _ in xrange(len(state['keys']))] self._keymap = keymap = {} for key, index in state['_pickled_keymap'].iteritems(): # not preserving "obj" here, unfortunately our # proxy comparison fails with the unpickle keymap[key] = (None, None, index) self.keys = state['keys'] self.case_sensitive = state['case_sensitive'] self._echo = False class ResultProxy(object): """Wraps a DB-API cursor object to provide easier access to row columns. Individual columns may be accessed by their integer position, case-insensitive column name, or by ``schema.Column`` object. e.g.:: row = fetchone() col1 = row[0] # access via integer position col2 = row['col2'] # access via name col3 = row[mytable.c.mycol] # access via Column object. ``ResultProxy`` also handles post-processing of result column data using ``TypeEngine`` objects, which are referenced from the originating SQL statement that produced this result set. """ _process_row = RowProxy out_parameters = None _can_close_connection = False _metadata = None def __init__(self, context): self.context = context self.dialect = context.dialect self.closed = False self.cursor = self._saved_cursor = context.cursor self.connection = context.root_connection self._echo = self.connection._echo and \ context.engine._should_log_debug() self._init_metadata() def _init_metadata(self): metadata = self._cursor_description() if metadata is not None: self._metadata = ResultMetaData(self, metadata) def keys(self): """Return the current set of string keys for rows.""" if self._metadata: return self._metadata.keys else: return [] @util.memoized_property def rowcount(self): """Return the 'rowcount' for this result. The 'rowcount' reports the number of rows *matched* by the WHERE criterion of an UPDATE or DELETE statement. .. note:: Notes regarding :attr:`.ResultProxy.rowcount`: * This attribute returns the number of rows *matched*, which is not necessarily the same as the number of rows that were actually *modified* - an UPDATE statement, for example, may have no net change on a given row if the SET values given are the same as those present in the row already. Such a row would be matched but not modified. On backends that feature both styles, such as MySQL, rowcount is configured by default to return the match count in all cases. * :attr:`.ResultProxy.rowcount` is *only* useful in conjunction with an UPDATE or DELETE statement. Contrary to what the Python DBAPI says, it does *not* return the number of rows available from the results of a SELECT statement as DBAPIs cannot support this functionality when rows are unbuffered. * :attr:`.ResultProxy.rowcount` may not be fully implemented by all dialects. In particular, most DBAPIs do not support an aggregate rowcount result from an executemany call. The :meth:`.ResultProxy.supports_sane_rowcount` and :meth:`.ResultProxy.supports_sane_multi_rowcount` methods will report from the dialect if each usage is known to be supported. * Statements that use RETURNING may not return a correct rowcount. """ try: return self.context.rowcount except Exception, e: self.connection._handle_dbapi_exception( e, None, None, self.cursor, self.context) @property def lastrowid(self): """return the 'lastrowid' accessor on the DBAPI cursor. This is a DBAPI specific method and is only functional for those backends which support it, for statements where it is appropriate. It's behavior is not consistent across backends. Usage of this method is normally unnecessary when using insert() expression constructs; the :attr:`~ResultProxy.inserted_primary_key` attribute provides a tuple of primary key values for a newly inserted row, regardless of database backend. """ try: return self._saved_cursor.lastrowid except Exception, e: self.connection._handle_dbapi_exception( e, None, None, self._saved_cursor, self.context) @property def returns_rows(self): """True if this :class:`.ResultProxy` returns rows. I.e. if it is legal to call the methods :meth:`~.ResultProxy.fetchone`, :meth:`~.ResultProxy.fetchmany` :meth:`~.ResultProxy.fetchall`. """ return self._metadata is not None @property def is_insert(self): """True if this :class:`.ResultProxy` is the result of a executing an expression language compiled :func:`.expression.insert` construct. When True, this implies that the :attr:`inserted_primary_key` attribute is accessible, assuming the statement did not include a user defined "returning" construct. """ return self.context.isinsert def _cursor_description(self): """May be overridden by subclasses.""" return self._saved_cursor.description def close(self, _autoclose_connection=True): """Close this ResultProxy. Closes the underlying DBAPI cursor corresponding to the execution. Note that any data cached within this ResultProxy is still available. For some types of results, this may include buffered rows. If this ResultProxy was generated from an implicit execution, the underlying Connection will also be closed (returns the underlying DBAPI connection to the connection pool.) This method is called automatically when: * all result rows are exhausted using the fetchXXX() methods. * cursor.description is None. """ if not self.closed: self.closed = True self.connection._safe_close_cursor(self.cursor) if _autoclose_connection and \ self.connection.should_close_with_result: self.connection.close() # allow consistent errors self.cursor = None def __iter__(self): while True: row = self.fetchone() if row is None: raise StopIteration else: yield row @util.memoized_property def inserted_primary_key(self): """Return the primary key for the row just inserted. The return value is a list of scalar values corresponding to the list of primary key columns in the target table. This only applies to single row :func:`.insert` constructs which did not explicitly specify :meth:`.Insert.returning`. Note that primary key columns which specify a server_default clause, or otherwise do not qualify as "autoincrement" columns (see the notes at :class:`.Column`), and were generated using the database-side default, will appear in this list as ``None`` unless the backend supports "returning" and the insert statement executed with the "implicit returning" enabled. Raises :class:`~sqlalchemy.exc.InvalidRequestError` if the executed statement is not a compiled expression construct or is not an insert() construct. """ if not self.context.compiled: raise exc.InvalidRequestError( "Statement is not a compiled " "expression construct.") elif not self.context.isinsert: raise exc.InvalidRequestError( "Statement is not an insert() " "expression construct.") elif self.context._is_explicit_returning: raise exc.InvalidRequestError( "Can't call inserted_primary_key " "when returning() " "is used.") return self.context.inserted_primary_key def last_updated_params(self): """Return the collection of updated parameters from this execution. Raises :class:`~sqlalchemy.exc.InvalidRequestError` if the executed statement is not a compiled expression construct or is not an update() construct. """ if not self.context.compiled: raise exc.InvalidRequestError( "Statement is not a compiled " "expression construct.") elif not self.context.isupdate: raise exc.InvalidRequestError( "Statement is not an update() " "expression construct.") elif self.context.executemany: return self.context.compiled_parameters else: return self.context.compiled_parameters[0] def last_inserted_params(self): """Return the collection of inserted parameters from this execution. Raises :class:`~sqlalchemy.exc.InvalidRequestError` if the executed statement is not a compiled expression construct or is not an insert() construct. """ if not self.context.compiled: raise exc.InvalidRequestError( "Statement is not a compiled " "expression construct.") elif not self.context.isinsert: raise exc.InvalidRequestError( "Statement is not an insert() " "expression construct.") elif self.context.executemany: return self.context.compiled_parameters else: return self.context.compiled_parameters[0] def lastrow_has_defaults(self): """Return ``lastrow_has_defaults()`` from the underlying :class:`.ExecutionContext`. See :class:`.ExecutionContext` for details. """ return self.context.lastrow_has_defaults() def postfetch_cols(self): """Return ``postfetch_cols()`` from the underlying :class:`.ExecutionContext`. See :class:`.ExecutionContext` for details. Raises :class:`~sqlalchemy.exc.InvalidRequestError` if the executed statement is not a compiled expression construct or is not an insert() or update() construct. """ if not self.context.compiled: raise exc.InvalidRequestError( "Statement is not a compiled " "expression construct.") elif not self.context.isinsert and not self.context.isupdate: raise exc.InvalidRequestError( "Statement is not an insert() or update() " "expression construct.") return self.context.postfetch_cols def prefetch_cols(self): """Return ``prefetch_cols()`` from the underlying :class:`.ExecutionContext`. See :class:`.ExecutionContext` for details. Raises :class:`~sqlalchemy.exc.InvalidRequestError` if the executed statement is not a compiled expression construct or is not an insert() or update() construct. """ if not self.context.compiled: raise exc.InvalidRequestError( "Statement is not a compiled " "expression construct.") elif not self.context.isinsert and not self.context.isupdate: raise exc.InvalidRequestError( "Statement is not an insert() or update() " "expression construct.") return self.context.prefetch_cols def supports_sane_rowcount(self): """Return ``supports_sane_rowcount`` from the dialect. See :attr:`.ResultProxy.rowcount` for background. """ return self.dialect.supports_sane_rowcount def supports_sane_multi_rowcount(self): """Return ``supports_sane_multi_rowcount`` from the dialect. See :attr:`.ResultProxy.rowcount` for background. """ return self.dialect.supports_sane_multi_rowcount def _fetchone_impl(self): try: return self.cursor.fetchone() except AttributeError: self._non_result() def _fetchmany_impl(self, size=None): try: if size is None: return self.cursor.fetchmany() else: return self.cursor.fetchmany(size) except AttributeError: self._non_result() def _fetchall_impl(self): try: return self.cursor.fetchall() except AttributeError: self._non_result() def _non_result(self): if self._metadata is None: raise exc.ResourceClosedError( "This result object does not return rows. " "It has been closed automatically.", ) else: raise exc.ResourceClosedError("This result object is closed.") def process_rows(self, rows): process_row = self._process_row metadata = self._metadata keymap = metadata._keymap processors = metadata._processors if self._echo: log = self.context.engine.logger.debug l = [] for row in rows: log("Row %r", row) l.append(process_row(metadata, row, processors, keymap)) return l else: return [process_row(metadata, row, processors, keymap) for row in rows] def fetchall(self): """Fetch all rows, just like DB-API ``cursor.fetchall()``.""" try: l = self.process_rows(self._fetchall_impl()) self.close() return l except Exception, e: self.connection._handle_dbapi_exception( e, None, None, self.cursor, self.context) def fetchmany(self, size=None): """Fetch many rows, just like DB-API ``cursor.fetchmany(size=cursor.arraysize)``. If rows are present, the cursor remains open after this is called. Else the cursor is automatically closed and an empty list is returned. """ try: l = self.process_rows(self._fetchmany_impl(size)) if len(l) == 0: self.close() return l except Exception, e: self.connection._handle_dbapi_exception( e, None, None, self.cursor, self.context) def fetchone(self): """Fetch one row, just like DB-API ``cursor.fetchone()``. If a row is present, the cursor remains open after this is called. Else the cursor is automatically closed and None is returned. """ try: row = self._fetchone_impl() if row is not None: return self.process_rows([row])[0] else: self.close() return None except Exception, e: self.connection._handle_dbapi_exception( e, None, None, self.cursor, self.context) def first(self): """Fetch the first row and then close the result set unconditionally. Returns None if no row is present. """ if self._metadata is None: self._non_result() try: row = self._fetchone_impl() except Exception, e: self.connection._handle_dbapi_exception( e, None, None, self.cursor, self.context) try: if row is not None: return self.process_rows([row])[0] else: return None finally: self.close() def scalar(self): """Fetch the first column of the first row, and close the result set. Returns None if no row is present. """ row = self.first() if row is not None: return row[0] else: return None class BufferedRowResultProxy(ResultProxy): """A ResultProxy with row buffering behavior. ``ResultProxy`` that buffers the contents of a selection of rows before ``fetchone()`` is called. This is to allow the results of ``cursor.description`` to be available immediately, when interfacing with a DB-API that requires rows to be consumed before this information is available (currently psycopg2, when used with server-side cursors). The pre-fetching behavior fetches only one row initially, and then grows its buffer size by a fixed amount with each successive need for additional rows up to a size of 100. """ def _init_metadata(self): self.__buffer_rows() super(BufferedRowResultProxy, self)._init_metadata() # this is a "growth chart" for the buffering of rows. # each successive __buffer_rows call will use the next # value in the list for the buffer size until the max # is reached size_growth = { 1: 5, 5: 10, 10: 20, 20: 50, 50: 100, 100: 250, 250: 500, 500: 1000 } def __buffer_rows(self): size = getattr(self, '_bufsize', 1) self.__rowbuffer = collections.deque(self.cursor.fetchmany(size)) self._bufsize = self.size_growth.get(size, size) def _fetchone_impl(self): if self.closed: return None if not self.__rowbuffer: self.__buffer_rows() if not self.__rowbuffer: return None return self.__rowbuffer.popleft() def _fetchmany_impl(self, size=None): if size is None: return self._fetchall_impl() result = [] for x in range(0, size): row = self._fetchone_impl() if row is None: break result.append(row) return result def _fetchall_impl(self): self.__rowbuffer.extend(self.cursor.fetchall()) ret = self.__rowbuffer self.__rowbuffer = collections.deque() return ret class FullyBufferedResultProxy(ResultProxy): """A result proxy that buffers rows fully upon creation. Used for operations where a result is to be delivered after the database conversation can not be continued, such as MSSQL INSERT...OUTPUT after an autocommit. """ def _init_metadata(self): super(FullyBufferedResultProxy, self)._init_metadata() self.__rowbuffer = self._buffer_rows() def _buffer_rows(self): return collections.deque(self.cursor.fetchall()) def _fetchone_impl(self): if self.__rowbuffer: return self.__rowbuffer.popleft() else: return None def _fetchmany_impl(self, size=None): if size is None: return self._fetchall_impl() result = [] for x in range(0, size): row = self._fetchone_impl() if row is None: break result.append(row) return result def _fetchall_impl(self): ret = self.__rowbuffer self.__rowbuffer = collections.deque() return ret class BufferedColumnRow(RowProxy): def __init__(self, parent, row, processors, keymap): # preprocess row row = list(row) # this is a tad faster than using enumerate index = 0 for processor in parent._orig_processors: if processor is not None: row[index] = processor(row[index]) index += 1 row = tuple(row) super(BufferedColumnRow, self).__init__(parent, row, processors, keymap) class BufferedColumnResultProxy(ResultProxy): """A ResultProxy with column buffering behavior. ``ResultProxy`` that loads all columns into memory each time fetchone() is called. If fetchmany() or fetchall() are called, the full grid of results is fetched. This is to operate with databases where result rows contain "live" results that fall out of scope unless explicitly fetched. Currently this includes cx_Oracle LOB objects. """ _process_row = BufferedColumnRow def _init_metadata(self): super(BufferedColumnResultProxy, self)._init_metadata() metadata = self._metadata # orig_processors will be used to preprocess each row when they are # constructed. metadata._orig_processors = metadata._processors # replace the all type processors by None processors. metadata._processors = [None for _ in xrange(len(metadata.keys))] keymap = {} for k, (func, obj, index) in metadata._keymap.iteritems(): keymap[k] = (None, obj, index) self._metadata._keymap = keymap def fetchall(self): # can't call cursor.fetchall(), since rows must be # fully processed before requesting more from the DBAPI. l = [] while True: row = self.fetchone() if row is None: break l.append(row) return l def fetchmany(self, size=None): # can't call cursor.fetchmany(), since rows must be # fully processed before requesting more from the DBAPI. if size is None: return self.fetchall() l = [] for i in xrange(size): row = self.fetchone() if row is None: break l.append(row) return l SQLAlchemy-0.8.4/lib/sqlalchemy/engine/strategies.py0000644000076500000240000002134212251150015023124 0ustar classicstaff00000000000000# engine/strategies.py # Copyright (C) 2005-2013 the SQLAlchemy authors and contributors # # This module is part of SQLAlchemy and is released under # the MIT License: http://www.opensource.org/licenses/mit-license.php """Strategies for creating new instances of Engine types. These are semi-private implementation classes which provide the underlying behavior for the "strategy" keyword argument available on :func:`~sqlalchemy.engine.create_engine`. Current available options are ``plain``, ``threadlocal``, and ``mock``. New strategies can be added via new ``EngineStrategy`` classes. """ from operator import attrgetter from sqlalchemy.engine import base, threadlocal, url from sqlalchemy import util, exc, event from sqlalchemy import pool as poollib strategies = {} class EngineStrategy(object): """An adaptor that processes input arguments and produces an Engine. Provides a ``create`` method that receives input arguments and produces an instance of base.Engine or a subclass. """ def __init__(self): strategies[self.name] = self def create(self, *args, **kwargs): """Given arguments, returns a new Engine instance.""" raise NotImplementedError() class DefaultEngineStrategy(EngineStrategy): """Base class for built-in strategies.""" def create(self, name_or_url, **kwargs): # create url.URL object u = url.make_url(name_or_url) dialect_cls = u.get_dialect() dialect_args = {} # consume dialect arguments from kwargs for k in util.get_cls_kwargs(dialect_cls): if k in kwargs: dialect_args[k] = kwargs.pop(k) dbapi = kwargs.pop('module', None) if dbapi is None: dbapi_args = {} for k in util.get_func_kwargs(dialect_cls.dbapi): if k in kwargs: dbapi_args[k] = kwargs.pop(k) dbapi = dialect_cls.dbapi(**dbapi_args) dialect_args['dbapi'] = dbapi # create dialect dialect = dialect_cls(**dialect_args) # assemble connection arguments (cargs, cparams) = dialect.create_connect_args(u) cparams.update(kwargs.pop('connect_args', {})) # look for existing pool or create pool = kwargs.pop('pool', None) if pool is None: def connect(): try: return dialect.connect(*cargs, **cparams) except dialect.dbapi.Error, e: invalidated = dialect.is_disconnect(e, None, None) # Py3K #raise exc.DBAPIError.instance(None, None, # e, dialect.dbapi.Error, # connection_invalidated=invalidated #) from e # Py2K import sys raise exc.DBAPIError.instance( None, None, e, dialect.dbapi.Error, connection_invalidated=invalidated ), None, sys.exc_info()[2] # end Py2K creator = kwargs.pop('creator', connect) poolclass = kwargs.pop('poolclass', None) if poolclass is None: poolclass = dialect_cls.get_pool_class(u) pool_args = {} # consume pool arguments from kwargs, translating a few of # the arguments translate = {'logging_name': 'pool_logging_name', 'echo': 'echo_pool', 'timeout': 'pool_timeout', 'recycle': 'pool_recycle', 'events': 'pool_events', 'use_threadlocal': 'pool_threadlocal', 'reset_on_return': 'pool_reset_on_return'} for k in util.get_cls_kwargs(poolclass): tk = translate.get(k, k) if tk in kwargs: pool_args[k] = kwargs.pop(tk) pool = poolclass(creator, **pool_args) else: if isinstance(pool, poollib._DBProxy): pool = pool.get_pool(*cargs, **cparams) else: pool = pool # create engine. engineclass = self.engine_cls engine_args = {} for k in util.get_cls_kwargs(engineclass): if k in kwargs: engine_args[k] = kwargs.pop(k) _initialize = kwargs.pop('_initialize', True) # all kwargs should be consumed if kwargs: raise TypeError( "Invalid argument(s) %s sent to create_engine(), " "using configuration %s/%s/%s. Please check that the " "keyword arguments are appropriate for this combination " "of components." % (','.join("'%s'" % k for k in kwargs), dialect.__class__.__name__, pool.__class__.__name__, engineclass.__name__)) engine = engineclass(pool, dialect, u, **engine_args) if _initialize: do_on_connect = dialect.on_connect() if do_on_connect: def on_connect(dbapi_connection, connection_record): conn = getattr( dbapi_connection, '_sqla_unwrap', dbapi_connection) if conn is None: return do_on_connect(conn) event.listen(pool, 'first_connect', on_connect) event.listen(pool, 'connect', on_connect) @util.only_once def first_connect(dbapi_connection, connection_record): c = base.Connection(engine, connection=dbapi_connection) # TODO: removing this allows the on connect activities # to generate events. tests currently assume these aren't # sent. do we want users to get all the initial connect # activities as events ? c._has_events = False dialect.initialize(c) event.listen(pool, 'first_connect', first_connect) return engine class PlainEngineStrategy(DefaultEngineStrategy): """Strategy for configuring a regular Engine.""" name = 'plain' engine_cls = base.Engine PlainEngineStrategy() class ThreadLocalEngineStrategy(DefaultEngineStrategy): """Strategy for configuring an Engine with threadlocal behavior.""" name = 'threadlocal' engine_cls = threadlocal.TLEngine ThreadLocalEngineStrategy() class MockEngineStrategy(EngineStrategy): """Strategy for configuring an Engine-like object with mocked execution. Produces a single mock Connectable object which dispatches statement execution to a passed-in function. """ name = 'mock' def create(self, name_or_url, executor, **kwargs): # create url.URL object u = url.make_url(name_or_url) dialect_cls = u.get_dialect() dialect_args = {} # consume dialect arguments from kwargs for k in util.get_cls_kwargs(dialect_cls): if k in kwargs: dialect_args[k] = kwargs.pop(k) # create dialect dialect = dialect_cls(**dialect_args) return MockEngineStrategy.MockConnection(dialect, executor) class MockConnection(base.Connectable): def __init__(self, dialect, execute): self._dialect = dialect self.execute = execute engine = property(lambda s: s) dialect = property(attrgetter('_dialect')) name = property(lambda s: s._dialect.name) def contextual_connect(self, **kwargs): return self def execution_options(self, **kw): return self def compiler(self, statement, parameters, **kwargs): return self._dialect.compiler( statement, parameters, engine=self, **kwargs) def create(self, entity, **kwargs): kwargs['checkfirst'] = False from sqlalchemy.engine import ddl ddl.SchemaGenerator( self.dialect, self, **kwargs).traverse_single(entity) def drop(self, entity, **kwargs): kwargs['checkfirst'] = False from sqlalchemy.engine import ddl ddl.SchemaDropper( self.dialect, self, **kwargs).traverse_single(entity) def _run_visitor(self, visitorcallable, element, connection=None, **kwargs): kwargs['checkfirst'] = False visitorcallable(self.dialect, self, **kwargs).traverse_single(element) def execute(self, object, *multiparams, **params): raise NotImplementedError() MockEngineStrategy() SQLAlchemy-0.8.4/lib/sqlalchemy/engine/threadlocal.py0000644000076500000240000001000112251147171023233 0ustar classicstaff00000000000000# engine/threadlocal.py # Copyright (C) 2005-2013 the SQLAlchemy authors and contributors # # This module is part of SQLAlchemy and is released under # the MIT License: http://www.opensource.org/licenses/mit-license.php """Provides a thread-local transactional wrapper around the root Engine class. The ``threadlocal`` module is invoked when using the ``strategy="threadlocal"`` flag with :func:`~sqlalchemy.engine.create_engine`. This module is semi-private and is invoked automatically when the threadlocal engine strategy is used. """ from .. import util from . import base import weakref class TLConnection(base.Connection): def __init__(self, *arg, **kw): super(TLConnection, self).__init__(*arg, **kw) self.__opencount = 0 def _increment_connect(self): self.__opencount += 1 return self def close(self): if self.__opencount == 1: base.Connection.close(self) self.__opencount -= 1 def _force_close(self): self.__opencount = 0 base.Connection.close(self) class TLEngine(base.Engine): """An Engine that includes support for thread-local managed transactions. """ _tl_connection_cls = TLConnection def __init__(self, *args, **kwargs): super(TLEngine, self).__init__(*args, **kwargs) self._connections = util.threading.local() def contextual_connect(self, **kw): if not hasattr(self._connections, 'conn'): connection = None else: connection = self._connections.conn() if connection is None or connection.closed: # guards against pool-level reapers, if desired. # or not connection.connection.is_valid: connection = self._tl_connection_cls( self, self.pool.connect(), **kw) self._connections.conn = weakref.ref(connection) return connection._increment_connect() def begin_twophase(self, xid=None): if not hasattr(self._connections, 'trans'): self._connections.trans = [] self._connections.trans.append( self.contextual_connect().begin_twophase(xid=xid)) return self def begin_nested(self): if not hasattr(self._connections, 'trans'): self._connections.trans = [] self._connections.trans.append( self.contextual_connect().begin_nested()) return self def begin(self): if not hasattr(self._connections, 'trans'): self._connections.trans = [] self._connections.trans.append(self.contextual_connect().begin()) return self def __enter__(self): return self def __exit__(self, type, value, traceback): if type is None: self.commit() else: self.rollback() def prepare(self): if not hasattr(self._connections, 'trans') or \ not self._connections.trans: return self._connections.trans[-1].prepare() def commit(self): if not hasattr(self._connections, 'trans') or \ not self._connections.trans: return trans = self._connections.trans.pop(-1) trans.commit() def rollback(self): if not hasattr(self._connections, 'trans') or \ not self._connections.trans: return trans = self._connections.trans.pop(-1) trans.rollback() def dispose(self): self._connections = util.threading.local() super(TLEngine, self).dispose() @property def closed(self): return not hasattr(self._connections, 'conn') or \ self._connections.conn() is None or \ self._connections.conn().closed def close(self): if not self.closed: self.contextual_connect().close() connection = self._connections.conn() connection._force_close() del self._connections.conn self._connections.trans = [] def __repr__(self): return 'TLEngine(%s)' % str(self.url) SQLAlchemy-0.8.4/lib/sqlalchemy/engine/url.py0000644000076500000240000001614412251150015021560 0ustar classicstaff00000000000000# engine/url.py # Copyright (C) 2005-2013 the SQLAlchemy authors and contributors # # This module is part of SQLAlchemy and is released under # the MIT License: http://www.opensource.org/licenses/mit-license.php """Provides the :class:`~sqlalchemy.engine.url.URL` class which encapsulates information about a database connection specification. The URL object is created automatically when :func:`~sqlalchemy.engine.create_engine` is called with a string argument; alternatively, the URL is a public-facing construct which can be used directly and is also accepted directly by ``create_engine()``. """ import re import urllib from .. import exc, util from . import Dialect class URL(object): """ Represent the components of a URL used to connect to a database. This object is suitable to be passed directly to a :func:`~sqlalchemy.create_engine` call. The fields of the URL are parsed from a string by the :func:`.make_url` function. the string format of the URL is an RFC-1738-style string. All initialization parameters are available as public attributes. :param drivername: the name of the database backend. This name will correspond to a module in sqlalchemy/databases or a third party plug-in. :param username: The user name. :param password: database password. :param host: The name of the host. :param port: The port number. :param database: The database name. :param query: A dictionary of options to be passed to the dialect and/or the DBAPI upon connect. """ def __init__(self, drivername, username=None, password=None, host=None, port=None, database=None, query=None): self.drivername = drivername self.username = username self.password = password self.host = host if port is not None: self.port = int(port) else: self.port = None self.database = database self.query = query or {} def __to_string__(self, hide_password=True): s = self.drivername + "://" if self.username is not None: s += self.username if self.password is not None: s += ':' + ('***' if hide_password else urllib.quote_plus(self.password)) s += "@" if self.host is not None: if ':' in self.host: s += "[%s]" % self.host else: s += self.host if self.port is not None: s += ':' + str(self.port) if self.database is not None: s += '/' + self.database if self.query: keys = self.query.keys() keys.sort() s += '?' + "&".join("%s=%s" % (k, self.query[k]) for k in keys) return s def __str__(self): return self.__to_string__(hide_password=False) def __repr__(self): return self.__to_string__() def __hash__(self): return hash(str(self)) def __eq__(self, other): return \ isinstance(other, URL) and \ self.drivername == other.drivername and \ self.username == other.username and \ self.password == other.password and \ self.host == other.host and \ self.database == other.database and \ self.query == other.query def get_dialect(self): """Return the SQLAlchemy database dialect class corresponding to this URL's driver name. """ if '+' not in self.drivername: name = self.drivername else: name = self.drivername.replace('+', '.') from sqlalchemy.dialects import registry cls = registry.load(name) # check for legacy dialects that # would return a module with 'dialect' as the # actual class if hasattr(cls, 'dialect') and \ isinstance(cls.dialect, type) and \ issubclass(cls.dialect, Dialect): return cls.dialect else: return cls def translate_connect_args(self, names=[], **kw): """Translate url attributes into a dictionary of connection arguments. Returns attributes of this url (`host`, `database`, `username`, `password`, `port`) as a plain dictionary. The attribute names are used as the keys by default. Unset or false attributes are omitted from the final dictionary. :param \**kw: Optional, alternate key names for url attributes. :param names: Deprecated. Same purpose as the keyword-based alternate names, but correlates the name to the original positionally. """ translated = {} attribute_names = ['host', 'database', 'username', 'password', 'port'] for sname in attribute_names: if names: name = names.pop(0) elif sname in kw: name = kw[sname] else: name = sname if name is not None and getattr(self, sname, False): translated[name] = getattr(self, sname) return translated def make_url(name_or_url): """Given a string or unicode instance, produce a new URL instance. The given string is parsed according to the RFC 1738 spec. If an existing URL object is passed, just returns the object. """ if isinstance(name_or_url, basestring): return _parse_rfc1738_args(name_or_url) else: return name_or_url def _parse_rfc1738_args(name): pattern = re.compile(r''' (?P[\w\+]+):// (?: (?P[^:/]*) (?::(?P[^/]*))? @)? (?: (?: \[(?P[^/]+)\] | (?P[^/:]+) )? (?::(?P[^/]*))? )? (?:/(?P.*))? ''', re.X) m = pattern.match(name) if m is not None: components = m.groupdict() if components['database'] is not None: tokens = components['database'].split('?', 2) components['database'] = tokens[0] query = (len(tokens) > 1 and dict(util.parse_qsl(tokens[1]))) or None # Py2K if query is not None: query = dict((k.encode('ascii'), query[k]) for k in query) # end Py2K else: query = None components['query'] = query if components['password'] is not None: components['password'] = \ urllib.unquote_plus(components['password']) ipv4host = components.pop('ipv4host') ipv6host = components.pop('ipv6host') components['host'] = ipv4host or ipv6host name = components.pop('name') return URL(name, **components) else: raise exc.ArgumentError( "Could not parse rfc1738 URL from string '%s'" % name) def _parse_keyvalue_args(name): m = re.match(r'(\w+)://(.*)', name) if m is not None: (name, args) = m.group(1, 2) opts = dict(util.parse_qsl(args)) return URL(name, *opts) else: return None SQLAlchemy-0.8.4/lib/sqlalchemy/engine/util.py0000644000076500000240000000573412251150015021736 0ustar classicstaff00000000000000# engine/util.py # Copyright (C) 2005-2013 the SQLAlchemy authors and contributors # # This module is part of SQLAlchemy and is released under # the MIT License: http://www.opensource.org/licenses/mit-license.php from .. import util def _coerce_config(configuration, prefix): """Convert configuration values to expected types.""" options = dict((key[len(prefix):], configuration[key]) for key in configuration if key.startswith(prefix)) for option, type_ in ( ('convert_unicode', util.bool_or_str('force')), ('pool_timeout', int), ('echo', util.bool_or_str('debug')), ('echo_pool', util.bool_or_str('debug')), ('pool_recycle', int), ('pool_size', int), ('max_overflow', int), ('pool_threadlocal', bool), ('use_native_unicode', bool), ): util.coerce_kw_type(options, option, type_) return options def connection_memoize(key): """Decorator, memoize a function in a connection.info stash. Only applicable to functions which take no arguments other than a connection. The memo will be stored in ``connection.info[key]``. """ @util.decorator def decorated(fn, self, connection): connection = connection.connect() try: return connection.info[key] except KeyError: connection.info[key] = val = fn(self, connection) return val return decorated def py_fallback(): def _distill_params(multiparams, params): """Given arguments from the calling form *multiparams, **params, return a list of bind parameter structures, usually a list of dictionaries. In the case of 'raw' execution which accepts positional parameters, it may be a list of tuples or lists. """ if not multiparams: if params: return [params] else: return [] elif len(multiparams) == 1: zero = multiparams[0] if isinstance(zero, (list, tuple)): if not zero or hasattr(zero[0], '__iter__') and \ not hasattr(zero[0], 'strip'): # execute(stmt, [{}, {}, {}, ...]) # execute(stmt, [(), (), (), ...]) return zero else: # execute(stmt, ("value", "value")) return [zero] elif hasattr(zero, 'keys'): # execute(stmt, {"key":"value"}) return [zero] else: # execute(stmt, "value") return [[zero]] else: if hasattr(multiparams[0], '__iter__') and \ not hasattr(multiparams[0], 'strip'): return multiparams else: return [multiparams] return locals() try: from sqlalchemy.cutils import _distill_params except ImportError: globals().update(py_fallback()) SQLAlchemy-0.8.4/lib/sqlalchemy/event.py0000644000076500000240000004234212251150015020631 0ustar classicstaff00000000000000# sqlalchemy/event.py # Copyright (C) 2005-2013 the SQLAlchemy authors and contributors # # This module is part of SQLAlchemy and is released under # the MIT License: http://www.opensource.org/licenses/mit-license.php """Base event API.""" from . import util, exc from itertools import chain import weakref CANCEL = util.symbol('CANCEL') NO_RETVAL = util.symbol('NO_RETVAL') def listen(target, identifier, fn, *args, **kw): """Register a listener function for the given target. e.g.:: from sqlalchemy import event from sqlalchemy.schema import UniqueConstraint def unique_constraint_name(const, table): const.name = "uq_%s_%s" % ( table.name, list(const.columns)[0].name ) event.listen( UniqueConstraint, "after_parent_attach", unique_constraint_name) """ for evt_cls in _registrars[identifier]: tgt = evt_cls._accept_with(target) if tgt is not None: tgt.dispatch._listen(tgt, identifier, fn, *args, **kw) return raise exc.InvalidRequestError("No such event '%s' for target '%s'" % (identifier, target)) def listens_for(target, identifier, *args, **kw): """Decorate a function as a listener for the given target + identifier. e.g.:: from sqlalchemy import event from sqlalchemy.schema import UniqueConstraint @event.listens_for(UniqueConstraint, "after_parent_attach") def unique_constraint_name(const, table): const.name = "uq_%s_%s" % ( table.name, list(const.columns)[0].name ) """ def decorate(fn): listen(target, identifier, fn, *args, **kw) return fn return decorate def remove(target, identifier, fn): """Remove an event listener. Note that some event removals, particularly for those event dispatchers which create wrapper functions and secondary even listeners, may not yet be supported. """ for evt_cls in _registrars[identifier]: for tgt in evt_cls._accept_with(target): tgt.dispatch._remove(identifier, tgt, fn) return _registrars = util.defaultdict(list) def _is_event_name(name): return not name.startswith('_') and name != 'dispatch' class _UnpickleDispatch(object): """Serializable callable that re-generates an instance of :class:`_Dispatch` given a particular :class:`.Events` subclass. """ def __call__(self, _parent_cls): for cls in _parent_cls.__mro__: if 'dispatch' in cls.__dict__: return cls.__dict__['dispatch'].dispatch_cls(_parent_cls) else: raise AttributeError("No class with a 'dispatch' member present.") class _Dispatch(object): """Mirror the event listening definitions of an Events class with listener collections. Classes which define a "dispatch" member will return a non-instantiated :class:`._Dispatch` subclass when the member is accessed at the class level. When the "dispatch" member is accessed at the instance level of its owner, an instance of the :class:`._Dispatch` class is returned. A :class:`._Dispatch` class is generated for each :class:`.Events` class defined, by the :func:`._create_dispatcher_class` function. The original :class:`.Events` classes remain untouched. This decouples the construction of :class:`.Events` subclasses from the implementation used by the event internals, and allows inspecting tools like Sphinx to work in an unsurprising way against the public API. """ def __init__(self, _parent_cls): self._parent_cls = _parent_cls def _join(self, other): """Create a 'join' of this :class:`._Dispatch` and another. This new dispatcher will dispatch events to both :class:`._Dispatch` objects. Once constructed, the joined dispatch will respond to new events added to this dispatcher, but may not be aware of events added to the other dispatcher after creation of the join. This is currently for performance reasons so that both dispatchers need not be "evaluated" fully on each call. """ if '_joined_dispatch_cls' not in self.__class__.__dict__: cls = type( "Joined%s" % self.__class__.__name__, (_JoinedDispatcher, self.__class__), {} ) for ls in _event_descriptors(self): setattr(cls, ls.name, _JoinedDispatchDescriptor(ls.name)) self.__class__._joined_dispatch_cls = cls return self._joined_dispatch_cls(self, other) def __reduce__(self): return _UnpickleDispatch(), (self._parent_cls, ) def _update(self, other, only_propagate=True): """Populate from the listeners in another :class:`_Dispatch` object.""" for ls in _event_descriptors(other): getattr(self, ls.name).\ for_modify(self)._update(ls, only_propagate=only_propagate) @util.hybridmethod def _clear(self): for attr in dir(self): if _is_event_name(attr): getattr(self, attr).for_modify(self).clear() def _event_descriptors(target): return [getattr(target, k) for k in dir(target) if _is_event_name(k)] class _EventMeta(type): """Intercept new Event subclasses and create associated _Dispatch classes.""" def __init__(cls, classname, bases, dict_): _create_dispatcher_class(cls, classname, bases, dict_) return type.__init__(cls, classname, bases, dict_) def _create_dispatcher_class(cls, classname, bases, dict_): """Create a :class:`._Dispatch` class corresponding to an :class:`.Events` class.""" # there's all kinds of ways to do this, # i.e. make a Dispatch class that shares the '_listen' method # of the Event class, this is the straight monkeypatch. dispatch_base = getattr(cls, 'dispatch', _Dispatch) cls.dispatch = dispatch_cls = type("%sDispatch" % classname, (dispatch_base, ), {}) dispatch_cls._listen = cls._listen for k in dict_: if _is_event_name(k): setattr(dispatch_cls, k, _DispatchDescriptor(dict_[k])) _registrars[k].append(cls) def _remove_dispatcher(cls): for k in dir(cls): if _is_event_name(k): _registrars[k].remove(cls) if not _registrars[k]: del _registrars[k] class Events(object): """Define event listening functions for a particular target type.""" __metaclass__ = _EventMeta @classmethod def _accept_with(cls, target): # Mapper, ClassManager, Session override this to # also accept classes, scoped_sessions, sessionmakers, etc. if hasattr(target, 'dispatch') and ( isinstance(target.dispatch, cls.dispatch) or \ isinstance(target.dispatch, type) and \ issubclass(target.dispatch, cls.dispatch) ): return target else: return None @classmethod def _listen(cls, target, identifier, fn, propagate=False, insert=False): if insert: getattr(target.dispatch, identifier).\ for_modify(target.dispatch).insert(fn, target, propagate) else: getattr(target.dispatch, identifier).\ for_modify(target.dispatch).append(fn, target, propagate) @classmethod def _remove(cls, target, identifier, fn): getattr(target.dispatch, identifier).remove(fn, target) @classmethod def _clear(cls): cls.dispatch._clear() class _DispatchDescriptor(object): """Class-level attributes on :class:`._Dispatch` classes.""" def __init__(self, fn): self.__name__ = fn.__name__ self.__doc__ = fn.__doc__ self._clslevel = weakref.WeakKeyDictionary() self._empty_listeners = weakref.WeakKeyDictionary() def _contains(self, cls, evt): return cls in self._clslevel and \ evt in self._clslevel[cls] def insert(self, obj, target, propagate): assert isinstance(target, type), \ "Class-level Event targets must be classes." stack = [target] while stack: cls = stack.pop(0) stack.extend(cls.__subclasses__()) if cls is not target and cls not in self._clslevel: self.update_subclass(cls) else: if cls not in self._clslevel: self._clslevel[cls] = [] self._clslevel[cls].insert(0, obj) def append(self, obj, target, propagate): assert isinstance(target, type), \ "Class-level Event targets must be classes." stack = [target] while stack: cls = stack.pop(0) stack.extend(cls.__subclasses__()) if cls is not target and cls not in self._clslevel: self.update_subclass(cls) else: if cls not in self._clslevel: self._clslevel[cls] = [] self._clslevel[cls].append(obj) def update_subclass(self, target): if target not in self._clslevel: self._clslevel[target] = [] clslevel = self._clslevel[target] for cls in target.__mro__[1:]: if cls in self._clslevel: clslevel.extend([ fn for fn in self._clslevel[cls] if fn not in clslevel ]) def remove(self, obj, target): stack = [target] while stack: cls = stack.pop(0) stack.extend(cls.__subclasses__()) if cls in self._clslevel: self._clslevel[cls].remove(obj) def clear(self): """Clear all class level listeners""" for dispatcher in self._clslevel.values(): dispatcher[:] = [] def for_modify(self, obj): """Return an event collection which can be modified. For _DispatchDescriptor at the class level of a dispatcher, this returns self. """ return self def __get__(self, obj, cls): if obj is None: return self elif obj._parent_cls in self._empty_listeners: ret = self._empty_listeners[obj._parent_cls] else: self._empty_listeners[obj._parent_cls] = ret = \ _EmptyListener(self, obj._parent_cls) # assigning it to __dict__ means # memoized for fast re-access. but more memory. obj.__dict__[self.__name__] = ret return ret class _EmptyListener(object): """Serves as a class-level interface to the events served by a _DispatchDescriptor, when there are no instance-level events present. Is replaced by _ListenerCollection when instance-level events are added. """ def __init__(self, parent, target_cls): if target_cls not in parent._clslevel: parent.update_subclass(target_cls) self.parent = parent self.parent_listeners = parent._clslevel[target_cls] self.name = parent.__name__ self.propagate = frozenset() self.listeners = () def for_modify(self, obj): """Return an event collection which can be modified. For _EmptyListener at the instance level of a dispatcher, this generates a new _ListenerCollection, applies it to the instance, and returns it. """ result = _ListenerCollection(self.parent, obj._parent_cls) if obj.__dict__[self.name] is self: obj.__dict__[self.name] = result return result def _needs_modify(self, *args, **kw): raise NotImplementedError("need to call for_modify()") exec_once = insert = append = remove = clear = _needs_modify def __call__(self, *args, **kw): """Execute this event.""" for fn in self.parent_listeners: fn(*args, **kw) def __len__(self): return len(self.parent_listeners) def __iter__(self): return iter(self.parent_listeners) def __nonzero__(self): return bool(self.parent_listeners) class _CompoundListener(object): _exec_once = False def exec_once(self, *args, **kw): """Execute this event, but only if it has not been executed already for this collection.""" if not self._exec_once: self(*args, **kw) self._exec_once = True # I'm not entirely thrilled about the overhead here, # but this allows class-level listeners to be added # at any point. # # In the absense of instance-level listeners, # we stay with the _EmptyListener object when called # at the instance level. def __call__(self, *args, **kw): """Execute this event.""" for fn in self.parent_listeners: fn(*args, **kw) for fn in self.listeners: fn(*args, **kw) def __len__(self): return len(self.parent_listeners) + len(self.listeners) def __iter__(self): return chain(self.parent_listeners, self.listeners) def __nonzero__(self): return bool(self.listeners or self.parent_listeners) class _ListenerCollection(_CompoundListener): """Instance-level attributes on instances of :class:`._Dispatch`. Represents a collection of listeners. As of 0.7.9, _ListenerCollection is only first created via the _EmptyListener.for_modify() method. """ def __init__(self, parent, target_cls): if target_cls not in parent._clslevel: parent.update_subclass(target_cls) self.parent_listeners = parent._clslevel[target_cls] self.name = parent.__name__ self.listeners = [] self.propagate = set() def for_modify(self, obj): """Return an event collection which can be modified. For _ListenerCollection at the instance level of a dispatcher, this returns self. """ return self def _update(self, other, only_propagate=True): """Populate from the listeners in another :class:`_Dispatch` object.""" existing_listeners = self.listeners existing_listener_set = set(existing_listeners) self.propagate.update(other.propagate) existing_listeners.extend([l for l in other.listeners if l not in existing_listener_set and not only_propagate or l in self.propagate ]) def insert(self, obj, target, propagate): if obj not in self.listeners: self.listeners.insert(0, obj) if propagate: self.propagate.add(obj) def append(self, obj, target, propagate): if obj not in self.listeners: self.listeners.append(obj) if propagate: self.propagate.add(obj) def remove(self, obj, target): if obj in self.listeners: self.listeners.remove(obj) self.propagate.discard(obj) def clear(self): self.listeners[:] = [] self.propagate.clear() class _JoinedDispatcher(object): """Represent a connection between two _Dispatch objects.""" def __init__(self, local, parent): self.local = local self.parent = parent self._parent_cls = local._parent_cls class _JoinedDispatchDescriptor(object): def __init__(self, name): self.name = name def __get__(self, obj, cls): if obj is None: return self else: obj.__dict__[self.name] = ret = _JoinedListener( obj.parent, self.name, getattr(obj.local, self.name) ) return ret class _JoinedListener(_CompoundListener): _exec_once = False def __init__(self, parent, name, local): self.parent = parent self.name = name self.local = local self.parent_listeners = self.local # fix .listeners for the parent. This means # new events added to the parent won't be picked # up here. Alternatively, the listeners can # be via @property to just return getattr(self.parent, self.name) # each time. less performant. self.listeners = list(getattr(self.parent, self.name)) def for_modify(self, obj): self.local = self.parent_listeners = self.local.for_modify(obj) return self def insert(self, obj, target, propagate): self.local.insert(obj, target, propagate) def append(self, obj, target, propagate): self.local.append(obj, target, propagate) def remove(self, obj, target): self.local.remove(obj, target) def clear(self): raise NotImplementedError() class dispatcher(object): """Descriptor used by target classes to deliver the _Dispatch class at the class level and produce new _Dispatch instances for target instances. """ def __init__(self, events): self.dispatch_cls = events.dispatch self.events = events def __get__(self, obj, cls): if obj is None: return self.dispatch_cls obj.__dict__['dispatch'] = disp = self.dispatch_cls(cls) return disp SQLAlchemy-0.8.4/lib/sqlalchemy/events.py0000644000076500000240000006460012251150015021015 0ustar classicstaff00000000000000# sqlalchemy/events.py # Copyright (C) 2005-2013 the SQLAlchemy authors and contributors # # This module is part of SQLAlchemy and is released under # the MIT License: http://www.opensource.org/licenses/mit-license.php """Core event interfaces.""" from . import event, exc, util engine = util.importlater('sqlalchemy', 'engine') pool = util.importlater('sqlalchemy', 'pool') class DDLEvents(event.Events): """ Define event listeners for schema objects, that is, :class:`.SchemaItem` and :class:`.SchemaEvent` subclasses, including :class:`.MetaData`, :class:`.Table`, :class:`.Column`. :class:`.MetaData` and :class:`.Table` support events specifically regarding when CREATE and DROP DDL is emitted to the database. Attachment events are also provided to customize behavior whenever a child schema element is associated with a parent, such as, when a :class:`.Column` is associated with its :class:`.Table`, when a :class:`.ForeignKeyConstraint` is associated with a :class:`.Table`, etc. Example using the ``after_create`` event:: from sqlalchemy import event from sqlalchemy import Table, Column, Metadata, Integer m = MetaData() some_table = Table('some_table', m, Column('data', Integer)) def after_create(target, connection, **kw): connection.execute("ALTER TABLE %s SET name=foo_%s" % (target.name, target.name)) event.listen(some_table, "after_create", after_create) DDL events integrate closely with the :class:`.DDL` class and the :class:`.DDLElement` hierarchy of DDL clause constructs, which are themselves appropriate as listener callables:: from sqlalchemy import DDL event.listen( some_table, "after_create", DDL("ALTER TABLE %(table)s SET name=foo_%(table)s") ) The methods here define the name of an event as well as the names of members that are passed to listener functions. See also: :ref:`event_toplevel` :class:`.DDLElement` :class:`.DDL` :ref:`schema_ddl_sequences` """ def before_create(self, target, connection, **kw): """Called before CREATE statments are emitted. :param target: the :class:`.MetaData` or :class:`.Table` object which is the target of the event. :param connection: the :class:`.Connection` where the CREATE statement or statements will be emitted. :param \**kw: additional keyword arguments relevant to the event. The contents of this dictionary may vary across releases, and include the list of tables being generated for a metadata-level event, the checkfirst flag, and other elements used by internal events. """ def after_create(self, target, connection, **kw): """Called after CREATE statments are emitted. :param target: the :class:`.MetaData` or :class:`.Table` object which is the target of the event. :param connection: the :class:`.Connection` where the CREATE statement or statements have been emitted. :param \**kw: additional keyword arguments relevant to the event. The contents of this dictionary may vary across releases, and include the list of tables being generated for a metadata-level event, the checkfirst flag, and other elements used by internal events. """ def before_drop(self, target, connection, **kw): """Called before DROP statments are emitted. :param target: the :class:`.MetaData` or :class:`.Table` object which is the target of the event. :param connection: the :class:`.Connection` where the DROP statement or statements will be emitted. :param \**kw: additional keyword arguments relevant to the event. The contents of this dictionary may vary across releases, and include the list of tables being generated for a metadata-level event, the checkfirst flag, and other elements used by internal events. """ def after_drop(self, target, connection, **kw): """Called after DROP statments are emitted. :param target: the :class:`.MetaData` or :class:`.Table` object which is the target of the event. :param connection: the :class:`.Connection` where the DROP statement or statements have been emitted. :param \**kw: additional keyword arguments relevant to the event. The contents of this dictionary may vary across releases, and include the list of tables being generated for a metadata-level event, the checkfirst flag, and other elements used by internal events. """ def before_parent_attach(self, target, parent): """Called before a :class:`.SchemaItem` is associated with a parent :class:`.SchemaItem`. :param target: the target object :param parent: the parent to which the target is being attached. :func:`.event.listen` also accepts a modifier for this event: :param propagate=False: When True, the listener function will be established for any copies made of the target object, i.e. those copies that are generated when :meth:`.Table.tometadata` is used. """ def after_parent_attach(self, target, parent): """Called after a :class:`.SchemaItem` is associated with a parent :class:`.SchemaItem`. :param target: the target object :param parent: the parent to which the target is being attached. :func:`.event.listen` also accepts a modifier for this event: :param propagate=False: When True, the listener function will be established for any copies made of the target object, i.e. those copies that are generated when :meth:`.Table.tometadata` is used. """ def column_reflect(self, inspector, table, column_info): """Called for each unit of 'column info' retrieved when a :class:`.Table` is being reflected. The dictionary of column information as returned by the dialect is passed, and can be modified. The dictionary is that returned in each element of the list returned by :meth:`.reflection.Inspector.get_columns`. The event is called before any action is taken against this dictionary, and the contents can be modified. The :class:`.Column` specific arguments ``info``, ``key``, and ``quote`` can also be added to the dictionary and will be passed to the constructor of :class:`.Column`. Note that this event is only meaningful if either associated with the :class:`.Table` class across the board, e.g.:: from sqlalchemy.schema import Table from sqlalchemy import event def listen_for_reflect(inspector, table, column_info): "receive a column_reflect event" # ... event.listen( Table, 'column_reflect', listen_for_reflect) ...or with a specific :class:`.Table` instance using the ``listeners`` argument:: def listen_for_reflect(inspector, table, column_info): "receive a column_reflect event" # ... t = Table( 'sometable', autoload=True, listeners=[ ('column_reflect', listen_for_reflect) ]) This because the reflection process initiated by ``autoload=True`` completes within the scope of the constructor for :class:`.Table`. """ class SchemaEventTarget(object): """Base class for elements that are the targets of :class:`.DDLEvents` events. This includes :class:`.SchemaItem` as well as :class:`.SchemaType`. """ dispatch = event.dispatcher(DDLEvents) def _set_parent(self, parent): """Associate with this SchemaEvent's parent object.""" raise NotImplementedError() def _set_parent_with_dispatch(self, parent): self.dispatch.before_parent_attach(self, parent) self._set_parent(parent) self.dispatch.after_parent_attach(self, parent) class PoolEvents(event.Events): """Available events for :class:`.Pool`. The methods here define the name of an event as well as the names of members that are passed to listener functions. e.g.:: from sqlalchemy import event def my_on_checkout(dbapi_conn, connection_rec, connection_proxy): "handle an on checkout event" event.listen(Pool, 'checkout', my_on_checkout) In addition to accepting the :class:`.Pool` class and :class:`.Pool` instances, :class:`.PoolEvents` also accepts :class:`.Engine` objects and the :class:`.Engine` class as targets, which will be resolved to the ``.pool`` attribute of the given engine or the :class:`.Pool` class:: engine = create_engine("postgresql://scott:tiger@localhost/test") # will associate with engine.pool event.listen(engine, 'checkout', my_on_checkout) """ @classmethod def _accept_with(cls, target): if isinstance(target, type): if issubclass(target, engine.Engine): return pool.Pool elif issubclass(target, pool.Pool): return target elif isinstance(target, engine.Engine): return target.pool else: return target def connect(self, dbapi_connection, connection_record): """Called once for each new DB-API connection or Pool's ``creator()``. :param dbapi_con: A newly connected raw DB-API connection (not a SQLAlchemy ``Connection`` wrapper). :param con_record: The ``_ConnectionRecord`` that persistently manages the connection """ def first_connect(self, dbapi_connection, connection_record): """Called exactly once for the first DB-API connection. :param dbapi_con: A newly connected raw DB-API connection (not a SQLAlchemy ``Connection`` wrapper). :param con_record: The ``_ConnectionRecord`` that persistently manages the connection """ def checkout(self, dbapi_connection, connection_record, connection_proxy): """Called when a connection is retrieved from the Pool. :param dbapi_con: A raw DB-API connection :param con_record: The ``_ConnectionRecord`` that persistently manages the connection :param con_proxy: The ``_ConnectionFairy`` which manages the connection for the span of the current checkout. If you raise a :class:`~sqlalchemy.exc.DisconnectionError`, the current connection will be disposed and a fresh connection retrieved. Processing of all checkout listeners will abort and restart using the new connection. """ def checkin(self, dbapi_connection, connection_record): """Called when a connection returns to the pool. Note that the connection may be closed, and may be None if the connection has been invalidated. ``checkin`` will not be called for detached connections. (They do not return to the pool.) :param dbapi_con: A raw DB-API connection :param con_record: The ``_ConnectionRecord`` that persistently manages the connection """ def reset(self, dbapi_con, con_record): """Called before the "reset" action occurs for a pooled connection. This event represents when the ``rollback()`` method is called on the DBAPI connection before it is returned to the pool. The behavior of "reset" can be controlled, including disabled, using the ``reset_on_return`` pool argument. The :meth:`.PoolEvents.reset` event is usually followed by the the :meth:`.PoolEvents.checkin` event is called, except in those cases where the connection is discarded immediately after reset. :param dbapi_con: A raw DB-API connection :param con_record: The ``_ConnectionRecord`` that persistently manages the connection .. versionadded:: 0.8 .. seealso:: :meth:`.ConnectionEvents.rollback` :meth:`.ConnectionEvents.commit` """ class ConnectionEvents(event.Events): """Available events for :class:`.Connectable`, which includes :class:`.Connection` and :class:`.Engine`. The methods here define the name of an event as well as the names of members that are passed to listener functions. An event listener can be associated with any :class:`.Connectable` class or instance, such as an :class:`.Engine`, e.g.:: from sqlalchemy import event, create_engine def before_cursor_execute(conn, cursor, statement, parameters, context, executemany): log.info("Received statement: %s" % statement) engine = create_engine('postgresql://scott:tiger@localhost/test') event.listen(engine, "before_cursor_execute", before_cursor_execute) or with a specific :class:`.Connection`:: with engine.begin() as conn: @event.listens_for(conn, 'before_cursor_execute') def before_cursor_execute(conn, cursor, statement, parameters, context, executemany): log.info("Received statement: %s" % statement) The :meth:`.before_execute` and :meth:`.before_cursor_execute` events can also be established with the ``retval=True`` flag, which allows modification of the statement and parameters to be sent to the database. The :meth:`.before_cursor_execute` event is particularly useful here to add ad-hoc string transformations, such as comments, to all executions:: from sqlalchemy.engine import Engine from sqlalchemy import event @event.listens_for(Engine, "before_cursor_execute", retval=True) def comment_sql_calls(conn, cursor, statement, parameters, context, executemany): statement = statement + " -- some comment" return statement, parameters .. note:: :class:`.ConnectionEvents` can be established on any combination of :class:`.Engine`, :class:`.Connection`, as well as instances of each of those classes. Events across all four scopes will fire off for a given instance of :class:`.Connection`. However, for performance reasons, the :class:`.Connection` object determines at instantiation time whether or not its parent :class:`.Engine` has event listeners established. Event listeners added to the :class:`.Engine` class or to an instance of :class:`.Engine` *after* the instantiation of a dependent :class:`.Connection` instance will usually *not* be available on that :class:`.Connection` instance. The newly added listeners will instead take effect for :class:`.Connection` instances created subsequent to those event listeners being established on the parent :class:`.Engine` class or instance. :param retval=False: Applies to the :meth:`.before_execute` and :meth:`.before_cursor_execute` events only. When True, the user-defined event function must have a return value, which is a tuple of parameters that replace the given statement and parameters. See those methods for a description of specific return arguments. .. versionchanged:: 0.8 :class:`.ConnectionEvents` can now be associated with any :class:`.Connectable` including :class:`.Connection`, in addition to the existing support for :class:`.Engine`. """ @classmethod def _listen(cls, target, identifier, fn, retval=False): target._has_events = True if not retval: if identifier == 'before_execute': orig_fn = fn def wrap_before_execute(conn, clauseelement, multiparams, params): orig_fn(conn, clauseelement, multiparams, params) return clauseelement, multiparams, params fn = wrap_before_execute elif identifier == 'before_cursor_execute': orig_fn = fn def wrap_before_cursor_execute(conn, cursor, statement, parameters, context, executemany): orig_fn(conn, cursor, statement, parameters, context, executemany) return statement, parameters fn = wrap_before_cursor_execute elif retval and \ identifier not in ('before_execute', 'before_cursor_execute'): raise exc.ArgumentError( "Only the 'before_execute' and " "'before_cursor_execute' engine " "event listeners accept the 'retval=True' " "argument.") event.Events._listen(target, identifier, fn) def before_execute(self, conn, clauseelement, multiparams, params): """Intercept high level execute() events, receiving uncompiled SQL constructs and other objects prior to rendering into SQL. This event is good for debugging SQL compilation issues as well as early manipulation of the parameters being sent to the database, as the parameter lists will be in a consistent format here. This event can be optionally established with the ``retval=True`` flag. The ``clauseelement``, ``multiparams``, and ``params`` arguments should be returned as a three-tuple in this case:: @event.listens_for(Engine, "before_execute", retval=True) def before_execute(conn, conn, clauseelement, multiparams, params): # do something with clauseelement, multiparams, params return clauseelement, multiparams, params :param conn: :class:`.Connection` object :param clauseelement: SQL expression construct, :class:`.Compiled` instance, or string statement passed to :meth:`.Connection.execute`. :param multiparams: Multiple parameter sets, a list of dictionaries. :param params: Single parameter set, a single dictionary. See also: :meth:`.before_cursor_execute` """ def after_execute(self, conn, clauseelement, multiparams, params, result): """Intercept high level execute() events after execute. :param conn: :class:`.Connection` object :param clauseelement: SQL expression construct, :class:`.Compiled` instance, or string statement passed to :meth:`.Connection.execute`. :param multiparams: Multiple parameter sets, a list of dictionaries. :param params: Single parameter set, a single dictionary. :param result: :class:`.ResultProxy` generated by the execution. """ def before_cursor_execute(self, conn, cursor, statement, parameters, context, executemany): """Intercept low-level cursor execute() events before execution, receiving the string SQL statement and DBAPI-specific parameter list to be invoked against a cursor. This event is a good choice for logging as well as late modifications to the SQL string. It's less ideal for parameter modifications except for those which are specific to a target backend. This event can be optionally established with the ``retval=True`` flag. The ``statement`` and ``parameters`` arguments should be returned as a two-tuple in this case:: @event.listens_for(Engine, "before_cursor_execute", retval=True) def before_cursor_execute(conn, cursor, statement, parameters, context, executemany): # do something with statement, parameters return statement, parameters See the example at :class:`.ConnectionEvents`. :param conn: :class:`.Connection` object :param cursor: DBAPI cursor object :param statement: string SQL statement :param parameters: Dictionary, tuple, or list of parameters being passed to the ``execute()`` or ``executemany()`` method of the DBAPI ``cursor``. In some cases may be ``None``. :param context: :class:`.ExecutionContext` object in use. May be ``None``. :param executemany: boolean, if ``True``, this is an ``executemany()`` call, if ``False``, this is an ``execute()`` call. See also: :meth:`.before_execute` :meth:`.after_cursor_execute` """ def after_cursor_execute(self, conn, cursor, statement, parameters, context, executemany): """Intercept low-level cursor execute() events after execution. :param conn: :class:`.Connection` object :param cursor: DBAPI cursor object. Will have results pending if the statement was a SELECT, but these should not be consumed as they will be needed by the :class:`.ResultProxy`. :param statement: string SQL statement :param parameters: Dictionary, tuple, or list of parameters being passed to the ``execute()`` or ``executemany()`` method of the DBAPI ``cursor``. In some cases may be ``None``. :param context: :class:`.ExecutionContext` object in use. May be ``None``. :param executemany: boolean, if ``True``, this is an ``executemany()`` call, if ``False``, this is an ``execute()`` call. """ def dbapi_error(self, conn, cursor, statement, parameters, context, exception): """Intercept a raw DBAPI error. This event is called with the DBAPI exception instance received from the DBAPI itself, *before* SQLAlchemy wraps the exception with it's own exception wrappers, and before any other operations are performed on the DBAPI cursor; the existing transaction remains in effect as well as any state on the cursor. The use case here is to inject low-level exception handling into an :class:`.Engine`, typically for logging and debugging purposes. In general, user code should **not** modify any state or throw any exceptions here as this will interfere with SQLAlchemy's cleanup and error handling routines. Subsequent to this hook, SQLAlchemy may attempt any number of operations on the connection/cursor, including closing the cursor, rolling back of the transaction in the case of connectionless execution, and disposing of the entire connection pool if a "disconnect" was detected. The exception is then wrapped in a SQLAlchemy DBAPI exception wrapper and re-thrown. :param conn: :class:`.Connection` object :param cursor: DBAPI cursor object :param statement: string SQL statement :param parameters: Dictionary, tuple, or list of parameters being passed to the ``execute()`` or ``executemany()`` method of the DBAPI ``cursor``. In some cases may be ``None``. :param context: :class:`.ExecutionContext` object in use. May be ``None``. :param exception: The **unwrapped** exception emitted directly from the DBAPI. The class here is specific to the DBAPI module in use. .. versionadded:: 0.7.7 """ def begin(self, conn): """Intercept begin() events. :param conn: :class:`.Connection` object """ def rollback(self, conn): """Intercept rollback() events, as initiated by a :class:`.Transaction`. Note that the :class:`.Pool` also "auto-rolls back" a DBAPI connection upon checkin, if the ``reset_on_return`` flag is set to its default value of ``'rollback'``. To intercept this rollback, use the :meth:`.PoolEvents.reset` hook. :param conn: :class:`.Connection` object .. seealso:: :meth:`.PoolEvents.reset` """ def commit(self, conn): """Intercept commit() events, as initiated by a :class:`.Transaction`. Note that the :class:`.Pool` may also "auto-commit" a DBAPI connection upon checkin, if the ``reset_on_return`` flag is set to the value ``'commit'``. To intercept this commit, use the :meth:`.PoolEvents.reset` hook. :param conn: :class:`.Connection` object """ def savepoint(self, conn, name=None): """Intercept savepoint() events. :param conn: :class:`.Connection` object :param name: specified name used for the savepoint. """ def rollback_savepoint(self, conn, name, context): """Intercept rollback_savepoint() events. :param conn: :class:`.Connection` object :param name: specified name used for the savepoint. :param context: :class:`.ExecutionContext` in use. May be ``None``. """ def release_savepoint(self, conn, name, context): """Intercept release_savepoint() events. :param conn: :class:`.Connection` object :param name: specified name used for the savepoint. :param context: :class:`.ExecutionContext` in use. May be ``None``. """ def begin_twophase(self, conn, xid): """Intercept begin_twophase() events. :param conn: :class:`.Connection` object :param xid: two-phase XID identifier """ def prepare_twophase(self, conn, xid): """Intercept prepare_twophase() events. :param conn: :class:`.Connection` object :param xid: two-phase XID identifier """ def rollback_twophase(self, conn, xid, is_prepared): """Intercept rollback_twophase() events. :param conn: :class:`.Connection` object :param xid: two-phase XID identifier :param is_prepared: boolean, indicates if :meth:`.TwoPhaseTransaction.prepare` was called. """ def commit_twophase(self, conn, xid, is_prepared): """Intercept commit_twophase() events. :param conn: :class:`.Connection` object :param xid: two-phase XID identifier :param is_prepared: boolean, indicates if :meth:`.TwoPhaseTransaction.prepare` was called. """ SQLAlchemy-0.8.4/lib/sqlalchemy/exc.py0000644000076500000240000002567312251150015020277 0ustar classicstaff00000000000000# sqlalchemy/exc.py # Copyright (C) 2005-2013 the SQLAlchemy authors and contributors # # This module is part of SQLAlchemy and is released under # the MIT License: http://www.opensource.org/licenses/mit-license.php """Exceptions used with SQLAlchemy. The base exception class is :class:`.SQLAlchemyError`. Exceptions which are raised as a result of DBAPI exceptions are all subclasses of :class:`.DBAPIError`. """ import traceback class SQLAlchemyError(Exception): """Generic error class.""" class ArgumentError(SQLAlchemyError): """Raised when an invalid or conflicting function argument is supplied. This error generally corresponds to construction time state errors. """ class NoForeignKeysError(ArgumentError): """Raised when no foreign keys can be located between two selectables during a join.""" class AmbiguousForeignKeysError(ArgumentError): """Raised when more than one foreign key matching can be located between two selectables during a join.""" class CircularDependencyError(SQLAlchemyError): """Raised by topological sorts when a circular dependency is detected. There are two scenarios where this error occurs: * In a Session flush operation, if two objects are mutually dependent on each other, they can not be inserted or deleted via INSERT or DELETE statements alone; an UPDATE will be needed to post-associate or pre-deassociate one of the foreign key constrained values. The ``post_update`` flag described at :ref:`post_update` can resolve this cycle. * In a :meth:`.MetaData.create_all`, :meth:`.MetaData.drop_all`, :attr:`.MetaData.sorted_tables` operation, two :class:`.ForeignKey` or :class:`.ForeignKeyConstraint` objects mutually refer to each other. Apply the ``use_alter=True`` flag to one or both, see :ref:`use_alter`. """ def __init__(self, message, cycles, edges, msg=None): if msg is None: message += " Cycles: %r all edges: %r" % (cycles, edges) else: message = msg SQLAlchemyError.__init__(self, message) self.cycles = cycles self.edges = edges def __reduce__(self): return self.__class__, (None, self.cycles, self.edges, self.args[0]) class CompileError(SQLAlchemyError): """Raised when an error occurs during SQL compilation""" class UnsupportedCompilationError(CompileError): """Raised when an operation is not supported by the given compiler. .. versionadded:: 0.8.3 """ def __init__(self, compiler, element_type): super(UnsupportedCompilationError, self).__init__( "Compiler %r can't render element of type %s" % (compiler, element_type)) class IdentifierError(SQLAlchemyError): """Raised when a schema name is beyond the max character limit""" class DisconnectionError(SQLAlchemyError): """A disconnect is detected on a raw DB-API connection. This error is raised and consumed internally by a connection pool. It can be raised by the :meth:`.PoolEvents.checkout` event so that the host pool forces a retry; the exception will be caught three times in a row before the pool gives up and raises :class:`~sqlalchemy.exc.InvalidRequestError` regarding the connection attempt. """ class TimeoutError(SQLAlchemyError): """Raised when a connection pool times out on getting a connection.""" class InvalidRequestError(SQLAlchemyError): """SQLAlchemy was asked to do something it can't do. This error generally corresponds to runtime state errors. """ class NoInspectionAvailable(InvalidRequestError): """A subject passed to :func:`sqlalchemy.inspection.inspect` produced no context for inspection.""" class ResourceClosedError(InvalidRequestError): """An operation was requested from a connection, cursor, or other object that's in a closed state.""" class NoSuchColumnError(KeyError, InvalidRequestError): """A nonexistent column is requested from a ``RowProxy``.""" class NoReferenceError(InvalidRequestError): """Raised by ``ForeignKey`` to indicate a reference cannot be resolved.""" class NoReferencedTableError(NoReferenceError): """Raised by ``ForeignKey`` when the referred ``Table`` cannot be located. """ def __init__(self, message, tname): NoReferenceError.__init__(self, message) self.table_name = tname def __reduce__(self): return self.__class__, (self.args[0], self.table_name) class NoReferencedColumnError(NoReferenceError): """Raised by ``ForeignKey`` when the referred ``Column`` cannot be located. """ def __init__(self, message, tname, cname): NoReferenceError.__init__(self, message) self.table_name = tname self.column_name = cname def __reduce__(self): return self.__class__, (self.args[0], self.table_name, self.column_name) class NoSuchTableError(InvalidRequestError): """Table does not exist or is not visible to a connection.""" class UnboundExecutionError(InvalidRequestError): """SQL was attempted without a database connection to execute it on.""" class DontWrapMixin(object): """A mixin class which, when applied to a user-defined Exception class, will not be wrapped inside of :class:`.StatementError` if the error is emitted within the process of executing a statement. E.g.:: from sqlalchemy.exc import DontWrapMixin class MyCustomException(Exception, DontWrapMixin): pass class MySpecialType(TypeDecorator): impl = String def process_bind_param(self, value, dialect): if value == 'invalid': raise MyCustomException("invalid!") """ import sys if sys.version_info < (2, 5): class DontWrapMixin: pass # Moved to orm.exc; compatibility definition installed by orm import until 0.6 UnmappedColumnError = None class StatementError(SQLAlchemyError): """An error occurred during execution of a SQL statement. :class:`StatementError` wraps the exception raised during execution, and features :attr:`.statement` and :attr:`.params` attributes which supply context regarding the specifics of the statement which had an issue. The wrapped exception object is available in the :attr:`.orig` attribute. """ statement = None """The string SQL statement being invoked when this exception occurred.""" params = None """The parameter list being used when this exception occurred.""" orig = None """The DBAPI exception object.""" def __init__(self, message, statement, params, orig): SQLAlchemyError.__init__(self, message) self.statement = statement self.params = params self.orig = orig def __reduce__(self): return self.__class__, (self.args[0], self.statement, self.params, self.orig) def __str__(self): from sqlalchemy.sql import util params_repr = util._repr_params(self.params, 10) return ' '.join((SQLAlchemyError.__str__(self), repr(self.statement), repr(params_repr))) def __unicode__(self): return self.__str__() class DBAPIError(StatementError): """Raised when the execution of a database operation fails. Wraps exceptions raised by the DB-API underlying the database operation. Driver-specific implementations of the standard DB-API exception types are wrapped by matching sub-types of SQLAlchemy's :class:`DBAPIError` when possible. DB-API's ``Error`` type maps to :class:`DBAPIError` in SQLAlchemy, otherwise the names are identical. Note that there is no guarantee that different DB-API implementations will raise the same exception type for any given error condition. :class:`DBAPIError` features :attr:`~.StatementError.statement` and :attr:`~.StatementError.params` attributes which supply context regarding the specifics of the statement which had an issue, for the typical case when the error was raised within the context of emitting a SQL statement. The wrapped exception object is available in the :attr:`~.StatementError.orig` attribute. Its type and properties are DB-API implementation specific. """ @classmethod def instance(cls, statement, params, orig, dbapi_base_err, connection_invalidated=False): # Don't ever wrap these, just return them directly as if # DBAPIError didn't exist. if isinstance(orig, (KeyboardInterrupt, SystemExit, DontWrapMixin)): return orig if orig is not None: # not a DBAPI error, statement is present. # raise a StatementError if not isinstance(orig, dbapi_base_err) and statement: msg = traceback.format_exception_only( orig.__class__, orig)[-1].strip() return StatementError( "%s (original cause: %s)" % (str(orig), msg), statement, params, orig ) name, glob = orig.__class__.__name__, globals() if name in glob and issubclass(glob[name], DBAPIError): cls = glob[name] return cls(statement, params, orig, connection_invalidated) def __reduce__(self): return self.__class__, (self.statement, self.params, self.orig, self.connection_invalidated) def __init__(self, statement, params, orig, connection_invalidated=False): try: text = str(orig) except (KeyboardInterrupt, SystemExit): raise except Exception, e: text = 'Error in str() of DB-API-generated exception: ' + str(e) StatementError.__init__( self, '(%s) %s' % (orig.__class__.__name__, text), statement, params, orig ) self.connection_invalidated = connection_invalidated class InterfaceError(DBAPIError): """Wraps a DB-API InterfaceError.""" class DatabaseError(DBAPIError): """Wraps a DB-API DatabaseError.""" class DataError(DatabaseError): """Wraps a DB-API DataError.""" class OperationalError(DatabaseError): """Wraps a DB-API OperationalError.""" class IntegrityError(DatabaseError): """Wraps a DB-API IntegrityError.""" class InternalError(DatabaseError): """Wraps a DB-API InternalError.""" class ProgrammingError(DatabaseError): """Wraps a DB-API ProgrammingError.""" class NotSupportedError(DatabaseError): """Wraps a DB-API NotSupportedError.""" # Warnings class SADeprecationWarning(DeprecationWarning): """Issued once per usage of a deprecated API.""" class SAPendingDeprecationWarning(PendingDeprecationWarning): """Issued once per usage of a deprecated API.""" class SAWarning(RuntimeWarning): """Issued at runtime.""" SQLAlchemy-0.8.4/lib/sqlalchemy/ext/0000755000076500000240000000000012251151573017743 5ustar classicstaff00000000000000SQLAlchemy-0.8.4/lib/sqlalchemy/ext/__init__.py0000644000076500000240000000035112251147171022052 0ustar classicstaff00000000000000# ext/__init__.py # Copyright (C) 2005-2013 the SQLAlchemy authors and contributors # # This module is part of SQLAlchemy and is released under # the MIT License: http://www.opensource.org/licenses/mit-license.php SQLAlchemy-0.8.4/lib/sqlalchemy/ext/associationproxy.py0000644000076500000240000007573412251150015023741 0ustar classicstaff00000000000000# ext/associationproxy.py # Copyright (C) 2005-2013 the SQLAlchemy authors and contributors # # This module is part of SQLAlchemy and is released under # the MIT License: http://www.opensource.org/licenses/mit-license.php """Contain the ``AssociationProxy`` class. The ``AssociationProxy`` is a Python property object which provides transparent proxied access to the endpoint of an association object. See the example ``examples/association/proxied_association.py``. """ import itertools import operator import weakref from .. import exc, orm, util from ..orm import collections, interfaces from ..sql import not_ def association_proxy(target_collection, attr, **kw): """Return a Python property implementing a view of a target attribute which references an attribute on members of the target. The returned value is an instance of :class:`.AssociationProxy`. Implements a Python property representing a relationship as a collection of simpler values, or a scalar value. The proxied property will mimic the collection type of the target (list, dict or set), or, in the case of a one to one relationship, a simple scalar value. :param target_collection: Name of the attribute we'll proxy to. This attribute is typically mapped by :func:`~sqlalchemy.orm.relationship` to link to a target collection, but can also be a many-to-one or non-scalar relationship. :param attr: Attribute on the associated instance or instances we'll proxy for. For example, given a target collection of [obj1, obj2], a list created by this proxy property would look like [getattr(obj1, *attr*), getattr(obj2, *attr*)] If the relationship is one-to-one or otherwise uselist=False, then simply: getattr(obj, *attr*) :param creator: optional. When new items are added to this proxied collection, new instances of the class collected by the target collection will be created. For list and set collections, the target class constructor will be called with the 'value' for the new instance. For dict types, two arguments are passed: key and value. If you want to construct instances differently, supply a *creator* function that takes arguments as above and returns instances. For scalar relationships, creator() will be called if the target is None. If the target is present, set operations are proxied to setattr() on the associated object. If you have an associated object with multiple attributes, you may set up multiple association proxies mapping to different attributes. See the unit tests for examples, and for examples of how creator() functions can be used to construct the scalar relationship on-demand in this situation. :param \*\*kw: Passes along any other keyword arguments to :class:`.AssociationProxy`. """ return AssociationProxy(target_collection, attr, **kw) ASSOCIATION_PROXY = util.symbol('ASSOCIATION_PROXY') """Symbol indicating an :class:`_InspectionAttr` that's of type :class:`.AssociationProxy`. Is assigned to the :attr:`._InspectionAttr.extension_type` attibute. """ class AssociationProxy(interfaces._InspectionAttr): """A descriptor that presents a read/write view of an object attribute.""" is_attribute = False extension_type = ASSOCIATION_PROXY def __init__(self, target_collection, attr, creator=None, getset_factory=None, proxy_factory=None, proxy_bulk_set=None): """Construct a new :class:`.AssociationProxy`. The :func:`.association_proxy` function is provided as the usual entrypoint here, though :class:`.AssociationProxy` can be instantiated and/or subclassed directly. :param target_collection: Name of the collection we'll proxy to, usually created with :func:`.relationship`. :param attr: Attribute on the collected instances we'll proxy for. For example, given a target collection of [obj1, obj2], a list created by this proxy property would look like [getattr(obj1, attr), getattr(obj2, attr)] :param creator: Optional. When new items are added to this proxied collection, new instances of the class collected by the target collection will be created. For list and set collections, the target class constructor will be called with the 'value' for the new instance. For dict types, two arguments are passed: key and value. If you want to construct instances differently, supply a 'creator' function that takes arguments as above and returns instances. :param getset_factory: Optional. Proxied attribute access is automatically handled by routines that get and set values based on the `attr` argument for this proxy. If you would like to customize this behavior, you may supply a `getset_factory` callable that produces a tuple of `getter` and `setter` functions. The factory is called with two arguments, the abstract type of the underlying collection and this proxy instance. :param proxy_factory: Optional. The type of collection to emulate is determined by sniffing the target collection. If your collection type can't be determined by duck typing or you'd like to use a different collection implementation, you may supply a factory function to produce those collections. Only applicable to non-scalar relationships. :param proxy_bulk_set: Optional, use with proxy_factory. See the _set() method for details. """ self.target_collection = target_collection self.value_attr = attr self.creator = creator self.getset_factory = getset_factory self.proxy_factory = proxy_factory self.proxy_bulk_set = proxy_bulk_set self.owning_class = None self.key = '_%s_%s_%s' % ( type(self).__name__, target_collection, id(self)) self.collection_class = None @property def remote_attr(self): """The 'remote' :class:`.MapperProperty` referenced by this :class:`.AssociationProxy`. .. versionadded:: 0.7.3 See also: :attr:`.AssociationProxy.attr` :attr:`.AssociationProxy.local_attr` """ return getattr(self.target_class, self.value_attr) @property def local_attr(self): """The 'local' :class:`.MapperProperty` referenced by this :class:`.AssociationProxy`. .. versionadded:: 0.7.3 See also: :attr:`.AssociationProxy.attr` :attr:`.AssociationProxy.remote_attr` """ return getattr(self.owning_class, self.target_collection) @property def attr(self): """Return a tuple of ``(local_attr, remote_attr)``. This attribute is convenient when specifying a join using :meth:`.Query.join` across two relationships:: sess.query(Parent).join(*Parent.proxied.attr) .. versionadded:: 0.7.3 See also: :attr:`.AssociationProxy.local_attr` :attr:`.AssociationProxy.remote_attr` """ return (self.local_attr, self.remote_attr) def _get_property(self): return (orm.class_mapper(self.owning_class). get_property(self.target_collection)) @util.memoized_property def target_class(self): """The intermediary class handled by this :class:`.AssociationProxy`. Intercepted append/set/assignment events will result in the generation of new instances of this class. """ return self._get_property().mapper.class_ @util.memoized_property def scalar(self): """Return ``True`` if this :class:`.AssociationProxy` proxies a scalar relationship on the local side.""" scalar = not self._get_property().uselist if scalar: self._initialize_scalar_accessors() return scalar @util.memoized_property def _value_is_scalar(self): return not self._get_property().\ mapper.get_property(self.value_attr).uselist def __get__(self, obj, class_): if self.owning_class is None: self.owning_class = class_ and class_ or type(obj) if obj is None: return self if self.scalar: return self._scalar_get(getattr(obj, self.target_collection)) else: try: # If the owning instance is reborn (orm session resurrect, # etc.), refresh the proxy cache. creator_id, proxy = getattr(obj, self.key) if id(obj) == creator_id: return proxy except AttributeError: pass proxy = self._new(_lazy_collection(obj, self.target_collection)) setattr(obj, self.key, (id(obj), proxy)) return proxy def __set__(self, obj, values): if self.owning_class is None: self.owning_class = type(obj) if self.scalar: creator = self.creator and self.creator or self.target_class target = getattr(obj, self.target_collection) if target is None: setattr(obj, self.target_collection, creator(values)) else: self._scalar_set(target, values) else: proxy = self.__get__(obj, None) if proxy is not values: proxy.clear() self._set(proxy, values) def __delete__(self, obj): if self.owning_class is None: self.owning_class = type(obj) delattr(obj, self.key) def _initialize_scalar_accessors(self): if self.getset_factory: get, set = self.getset_factory(None, self) else: get, set = self._default_getset(None) self._scalar_get, self._scalar_set = get, set def _default_getset(self, collection_class): attr = self.value_attr getter = operator.attrgetter(attr) if collection_class is dict: setter = lambda o, k, v: setattr(o, attr, v) else: setter = lambda o, v: setattr(o, attr, v) return getter, setter def _new(self, lazy_collection): creator = self.creator and self.creator or self.target_class self.collection_class = util.duck_type_collection(lazy_collection()) if self.proxy_factory: return self.proxy_factory( lazy_collection, creator, self.value_attr, self) if self.getset_factory: getter, setter = self.getset_factory(self.collection_class, self) else: getter, setter = self._default_getset(self.collection_class) if self.collection_class is list: return _AssociationList( lazy_collection, creator, getter, setter, self) elif self.collection_class is dict: return _AssociationDict( lazy_collection, creator, getter, setter, self) elif self.collection_class is set: return _AssociationSet( lazy_collection, creator, getter, setter, self) else: raise exc.ArgumentError( 'could not guess which interface to use for ' 'collection_class "%s" backing "%s"; specify a ' 'proxy_factory and proxy_bulk_set manually' % (self.collection_class.__name__, self.target_collection)) def _inflate(self, proxy): creator = self.creator and self.creator or self.target_class if self.getset_factory: getter, setter = self.getset_factory(self.collection_class, self) else: getter, setter = self._default_getset(self.collection_class) proxy.creator = creator proxy.getter = getter proxy.setter = setter def _set(self, proxy, values): if self.proxy_bulk_set: self.proxy_bulk_set(proxy, values) elif self.collection_class is list: proxy.extend(values) elif self.collection_class is dict: proxy.update(values) elif self.collection_class is set: proxy.update(values) else: raise exc.ArgumentError( 'no proxy_bulk_set supplied for custom ' 'collection_class implementation') @property def _comparator(self): return self._get_property().comparator def any(self, criterion=None, **kwargs): """Produce a proxied 'any' expression using EXISTS. This expression will be a composed product using the :meth:`.RelationshipProperty.Comparator.any` and/or :meth:`.RelationshipProperty.Comparator.has` operators of the underlying proxied attributes. """ if self._value_is_scalar: value_expr = getattr( self.target_class, self.value_attr).has(criterion, **kwargs) else: value_expr = getattr( self.target_class, self.value_attr).any(criterion, **kwargs) # check _value_is_scalar here, otherwise # we're scalar->scalar - call .any() so that # the "can't call any() on a scalar" msg is raised. if self.scalar and not self._value_is_scalar: return self._comparator.has( value_expr ) else: return self._comparator.any( value_expr ) def has(self, criterion=None, **kwargs): """Produce a proxied 'has' expression using EXISTS. This expression will be a composed product using the :meth:`.RelationshipProperty.Comparator.any` and/or :meth:`.RelationshipProperty.Comparator.has` operators of the underlying proxied attributes. """ return self._comparator.has( getattr(self.target_class, self.value_attr).\ has(criterion, **kwargs) ) def contains(self, obj): """Produce a proxied 'contains' expression using EXISTS. This expression will be a composed product using the :meth:`.RelationshipProperty.Comparator.any` , :meth:`.RelationshipProperty.Comparator.has`, and/or :meth:`.RelationshipProperty.Comparator.contains` operators of the underlying proxied attributes. """ if self.scalar and not self._value_is_scalar: return self._comparator.has( getattr(self.target_class, self.value_attr).contains(obj) ) else: return self._comparator.any(**{self.value_attr: obj}) def __eq__(self, obj): return self._comparator.has(**{self.value_attr: obj}) def __ne__(self, obj): return not_(self.__eq__(obj)) class _lazy_collection(object): def __init__(self, obj, target): self.ref = weakref.ref(obj) self.target = target def __call__(self): obj = self.ref() if obj is None: raise exc.InvalidRequestError( "stale association proxy, parent object has gone out of " "scope") return getattr(obj, self.target) def __getstate__(self): return {'obj': self.ref(), 'target': self.target} def __setstate__(self, state): self.ref = weakref.ref(state['obj']) self.target = state['target'] class _AssociationCollection(object): def __init__(self, lazy_collection, creator, getter, setter, parent): """Constructs an _AssociationCollection. This will always be a subclass of either _AssociationList, _AssociationSet, or _AssociationDict. lazy_collection A callable returning a list-based collection of entities (usually an object attribute managed by a SQLAlchemy relationship()) creator A function that creates new target entities. Given one parameter: value. This assertion is assumed:: obj = creator(somevalue) assert getter(obj) == somevalue getter A function. Given an associated object, return the 'value'. setter A function. Given an associated object and a value, store that value on the object. """ self.lazy_collection = lazy_collection self.creator = creator self.getter = getter self.setter = setter self.parent = parent col = property(lambda self: self.lazy_collection()) def __len__(self): return len(self.col) def __nonzero__(self): return bool(self.col) def __getstate__(self): return {'parent': self.parent, 'lazy_collection': self.lazy_collection} def __setstate__(self, state): self.parent = state['parent'] self.lazy_collection = state['lazy_collection'] self.parent._inflate(self) class _AssociationList(_AssociationCollection): """Generic, converting, list-to-list proxy.""" def _create(self, value): return self.creator(value) def _get(self, object): return self.getter(object) def _set(self, object, value): return self.setter(object, value) def __getitem__(self, index): return self._get(self.col[index]) def __setitem__(self, index, value): if not isinstance(index, slice): self._set(self.col[index], value) else: if index.stop is None: stop = len(self) elif index.stop < 0: stop = len(self) + index.stop else: stop = index.stop step = index.step or 1 rng = range(index.start or 0, stop, step) if step == 1: for i in rng: del self[index.start] i = index.start for item in value: self.insert(i, item) i += 1 else: if len(value) != len(rng): raise ValueError( "attempt to assign sequence of size %s to " "extended slice of size %s" % (len(value), len(rng))) for i, item in zip(rng, value): self._set(self.col[i], item) def __delitem__(self, index): del self.col[index] def __contains__(self, value): for member in self.col: # testlib.pragma exempt:__eq__ if self._get(member) == value: return True return False def __getslice__(self, start, end): return [self._get(member) for member in self.col[start:end]] def __setslice__(self, start, end, values): members = [self._create(v) for v in values] self.col[start:end] = members def __delslice__(self, start, end): del self.col[start:end] def __iter__(self): """Iterate over proxied values. For the actual domain objects, iterate over .col instead or just use the underlying collection directly from its property on the parent. """ for member in self.col: yield self._get(member) raise StopIteration def append(self, value): item = self._create(value) self.col.append(item) def count(self, value): return sum([1 for _ in itertools.ifilter(lambda v: v == value, iter(self))]) def extend(self, values): for v in values: self.append(v) def insert(self, index, value): self.col[index:index] = [self._create(value)] def pop(self, index=-1): return self.getter(self.col.pop(index)) def remove(self, value): for i, val in enumerate(self): if val == value: del self.col[i] return raise ValueError("value not in list") def reverse(self): """Not supported, use reversed(mylist)""" raise NotImplementedError def sort(self): """Not supported, use sorted(mylist)""" raise NotImplementedError def clear(self): del self.col[0:len(self.col)] def __eq__(self, other): return list(self) == other def __ne__(self, other): return list(self) != other def __lt__(self, other): return list(self) < other def __le__(self, other): return list(self) <= other def __gt__(self, other): return list(self) > other def __ge__(self, other): return list(self) >= other def __cmp__(self, other): return cmp(list(self), other) def __add__(self, iterable): try: other = list(iterable) except TypeError: return NotImplemented return list(self) + other def __radd__(self, iterable): try: other = list(iterable) except TypeError: return NotImplemented return other + list(self) def __mul__(self, n): if not isinstance(n, int): return NotImplemented return list(self) * n __rmul__ = __mul__ def __iadd__(self, iterable): self.extend(iterable) return self def __imul__(self, n): # unlike a regular list *=, proxied __imul__ will generate unique # backing objects for each copy. *= on proxied lists is a bit of # a stretch anyhow, and this interpretation of the __imul__ contract # is more plausibly useful than copying the backing objects. if not isinstance(n, int): return NotImplemented if n == 0: self.clear() elif n > 1: self.extend(list(self) * (n - 1)) return self def copy(self): return list(self) def __repr__(self): return repr(list(self)) def __hash__(self): raise TypeError("%s objects are unhashable" % type(self).__name__) for func_name, func in locals().items(): if (util.callable(func) and func.func_name == func_name and not func.__doc__ and hasattr(list, func_name)): func.__doc__ = getattr(list, func_name).__doc__ del func_name, func _NotProvided = util.symbol('_NotProvided') class _AssociationDict(_AssociationCollection): """Generic, converting, dict-to-dict proxy.""" def _create(self, key, value): return self.creator(key, value) def _get(self, object): return self.getter(object) def _set(self, object, key, value): return self.setter(object, key, value) def __getitem__(self, key): return self._get(self.col[key]) def __setitem__(self, key, value): if key in self.col: self._set(self.col[key], key, value) else: self.col[key] = self._create(key, value) def __delitem__(self, key): del self.col[key] def __contains__(self, key): # testlib.pragma exempt:__hash__ return key in self.col def has_key(self, key): # testlib.pragma exempt:__hash__ return key in self.col def __iter__(self): return self.col.iterkeys() def clear(self): self.col.clear() def __eq__(self, other): return dict(self) == other def __ne__(self, other): return dict(self) != other def __lt__(self, other): return dict(self) < other def __le__(self, other): return dict(self) <= other def __gt__(self, other): return dict(self) > other def __ge__(self, other): return dict(self) >= other def __cmp__(self, other): return cmp(dict(self), other) def __repr__(self): return repr(dict(self.items())) def get(self, key, default=None): try: return self[key] except KeyError: return default def setdefault(self, key, default=None): if key not in self.col: self.col[key] = self._create(key, default) return default else: return self[key] def keys(self): return self.col.keys() def iterkeys(self): return self.col.iterkeys() def values(self): return [self._get(member) for member in self.col.values()] def itervalues(self): for key in self.col: yield self._get(self.col[key]) raise StopIteration def items(self): return [(k, self._get(self.col[k])) for k in self] def iteritems(self): for key in self.col: yield (key, self._get(self.col[key])) raise StopIteration def pop(self, key, default=_NotProvided): if default is _NotProvided: member = self.col.pop(key) else: member = self.col.pop(key, default) return self._get(member) def popitem(self): item = self.col.popitem() return (item[0], self._get(item[1])) def update(self, *a, **kw): if len(a) > 1: raise TypeError('update expected at most 1 arguments, got %i' % len(a)) elif len(a) == 1: seq_or_map = a[0] # discern dict from sequence - took the advice from # http://www.voidspace.org.uk/python/articles/duck_typing.shtml # still not perfect :( if hasattr(seq_or_map, 'keys'): for item in seq_or_map: self[item] = seq_or_map[item] else: try: for k, v in seq_or_map: self[k] = v except ValueError: raise ValueError( "dictionary update sequence " "requires 2-element tuples") for key, value in kw: self[key] = value def copy(self): return dict(self.items()) def __hash__(self): raise TypeError("%s objects are unhashable" % type(self).__name__) for func_name, func in locals().items(): if (util.callable(func) and func.func_name == func_name and not func.__doc__ and hasattr(dict, func_name)): func.__doc__ = getattr(dict, func_name).__doc__ del func_name, func class _AssociationSet(_AssociationCollection): """Generic, converting, set-to-set proxy.""" def _create(self, value): return self.creator(value) def _get(self, object): return self.getter(object) def _set(self, object, value): return self.setter(object, value) def __len__(self): return len(self.col) def __nonzero__(self): if self.col: return True else: return False def __contains__(self, value): for member in self.col: # testlib.pragma exempt:__eq__ if self._get(member) == value: return True return False def __iter__(self): """Iterate over proxied values. For the actual domain objects, iterate over .col instead or just use the underlying collection directly from its property on the parent. """ for member in self.col: yield self._get(member) raise StopIteration def add(self, value): if value not in self: self.col.add(self._create(value)) # for discard and remove, choosing a more expensive check strategy rather # than call self.creator() def discard(self, value): for member in self.col: if self._get(member) == value: self.col.discard(member) break def remove(self, value): for member in self.col: if self._get(member) == value: self.col.discard(member) return raise KeyError(value) def pop(self): if not self.col: raise KeyError('pop from an empty set') member = self.col.pop() return self._get(member) def update(self, other): for value in other: self.add(value) def __ior__(self, other): if not collections._set_binops_check_strict(self, other): return NotImplemented for value in other: self.add(value) return self def _set(self): return set(iter(self)) def union(self, other): return set(self).union(other) __or__ = union def difference(self, other): return set(self).difference(other) __sub__ = difference def difference_update(self, other): for value in other: self.discard(value) def __isub__(self, other): if not collections._set_binops_check_strict(self, other): return NotImplemented for value in other: self.discard(value) return self def intersection(self, other): return set(self).intersection(other) __and__ = intersection def intersection_update(self, other): want, have = self.intersection(other), set(self) remove, add = have - want, want - have for value in remove: self.remove(value) for value in add: self.add(value) def __iand__(self, other): if not collections._set_binops_check_strict(self, other): return NotImplemented want, have = self.intersection(other), set(self) remove, add = have - want, want - have for value in remove: self.remove(value) for value in add: self.add(value) return self def symmetric_difference(self, other): return set(self).symmetric_difference(other) __xor__ = symmetric_difference def symmetric_difference_update(self, other): want, have = self.symmetric_difference(other), set(self) remove, add = have - want, want - have for value in remove: self.remove(value) for value in add: self.add(value) def __ixor__(self, other): if not collections._set_binops_check_strict(self, other): return NotImplemented want, have = self.symmetric_difference(other), set(self) remove, add = have - want, want - have for value in remove: self.remove(value) for value in add: self.add(value) return self def issubset(self, other): return set(self).issubset(other) def issuperset(self, other): return set(self).issuperset(other) def clear(self): self.col.clear() def copy(self): return set(self) def __eq__(self, other): return set(self) == other def __ne__(self, other): return set(self) != other def __lt__(self, other): return set(self) < other def __le__(self, other): return set(self) <= other def __gt__(self, other): return set(self) > other def __ge__(self, other): return set(self) >= other def __repr__(self): return repr(set(self)) def __hash__(self): raise TypeError("%s objects are unhashable" % type(self).__name__) for func_name, func in locals().items(): if (util.callable(func) and func.func_name == func_name and not func.__doc__ and hasattr(set, func_name)): func.__doc__ = getattr(set, func_name).__doc__ del func_name, func SQLAlchemy-0.8.4/lib/sqlalchemy/ext/compiler.py0000644000076500000240000003651512251150015022127 0ustar classicstaff00000000000000# ext/compiler.py # Copyright (C) 2005-2013 the SQLAlchemy authors and contributors # # This module is part of SQLAlchemy and is released under # the MIT License: http://www.opensource.org/licenses/mit-license.php """Provides an API for creation of custom ClauseElements and compilers. Synopsis ======== Usage involves the creation of one or more :class:`~sqlalchemy.sql.expression.ClauseElement` subclasses and one or more callables defining its compilation:: from sqlalchemy.ext.compiler import compiles from sqlalchemy.sql.expression import ColumnClause class MyColumn(ColumnClause): pass @compiles(MyColumn) def compile_mycolumn(element, compiler, **kw): return "[%s]" % element.name Above, ``MyColumn`` extends :class:`~sqlalchemy.sql.expression.ColumnClause`, the base expression element for named column objects. The ``compiles`` decorator registers itself with the ``MyColumn`` class so that it is invoked when the object is compiled to a string:: from sqlalchemy import select s = select([MyColumn('x'), MyColumn('y')]) print str(s) Produces:: SELECT [x], [y] Dialect-specific compilation rules ================================== Compilers can also be made dialect-specific. The appropriate compiler will be invoked for the dialect in use:: from sqlalchemy.schema import DDLElement class AlterColumn(DDLElement): def __init__(self, column, cmd): self.column = column self.cmd = cmd @compiles(AlterColumn) def visit_alter_column(element, compiler, **kw): return "ALTER COLUMN %s ..." % element.column.name @compiles(AlterColumn, 'postgresql') def visit_alter_column(element, compiler, **kw): return "ALTER TABLE %s ALTER COLUMN %s ..." % (element.table.name, element.column.name) The second ``visit_alter_table`` will be invoked when any ``postgresql`` dialect is used. Compiling sub-elements of a custom expression construct ======================================================= The ``compiler`` argument is the :class:`~sqlalchemy.engine.interfaces.Compiled` object in use. This object can be inspected for any information about the in-progress compilation, including ``compiler.dialect``, ``compiler.statement`` etc. The :class:`~sqlalchemy.sql.compiler.SQLCompiler` and :class:`~sqlalchemy.sql.compiler.DDLCompiler` both include a ``process()`` method which can be used for compilation of embedded attributes:: from sqlalchemy.sql.expression import Executable, ClauseElement class InsertFromSelect(Executable, ClauseElement): def __init__(self, table, select): self.table = table self.select = select @compiles(InsertFromSelect) def visit_insert_from_select(element, compiler, **kw): return "INSERT INTO %s (%s)" % ( compiler.process(element.table, asfrom=True), compiler.process(element.select) ) insert = InsertFromSelect(t1, select([t1]).where(t1.c.x>5)) print insert Produces:: "INSERT INTO mytable (SELECT mytable.x, mytable.y, mytable.z FROM mytable WHERE mytable.x > :x_1)" .. note:: The above ``InsertFromSelect`` construct is only an example, this actual functionality is already available using the :meth:`.Insert.from_select` method. .. note:: The above ``InsertFromSelect`` construct probably wants to have "autocommit" enabled. See :ref:`enabling_compiled_autocommit` for this step. Cross Compiling between SQL and DDL compilers --------------------------------------------- SQL and DDL constructs are each compiled using different base compilers - ``SQLCompiler`` and ``DDLCompiler``. A common need is to access the compilation rules of SQL expressions from within a DDL expression. The ``DDLCompiler`` includes an accessor ``sql_compiler`` for this reason, such as below where we generate a CHECK constraint that embeds a SQL expression:: @compiles(MyConstraint) def compile_my_constraint(constraint, ddlcompiler, **kw): return "CONSTRAINT %s CHECK (%s)" % ( constraint.name, ddlcompiler.sql_compiler.process(constraint.expression) ) .. _enabling_compiled_autocommit: Enabling Autocommit on a Construct ================================== Recall from the section :ref:`autocommit` that the :class:`.Engine`, when asked to execute a construct in the absence of a user-defined transaction, detects if the given construct represents DML or DDL, that is, a data modification or data definition statement, which requires (or may require, in the case of DDL) that the transaction generated by the DBAPI be committed (recall that DBAPI always has a transaction going on regardless of what SQLAlchemy does). Checking for this is actually accomplished by checking for the "autocommit" execution option on the construct. When building a construct like an INSERT derivation, a new DDL type, or perhaps a stored procedure that alters data, the "autocommit" option needs to be set in order for the statement to function with "connectionless" execution (as described in :ref:`dbengine_implicit`). Currently a quick way to do this is to subclass :class:`.Executable`, then add the "autocommit" flag to the ``_execution_options`` dictionary (note this is a "frozen" dictionary which supplies a generative ``union()`` method):: from sqlalchemy.sql.expression import Executable, ClauseElement class MyInsertThing(Executable, ClauseElement): _execution_options = \\ Executable._execution_options.union({'autocommit': True}) More succinctly, if the construct is truly similar to an INSERT, UPDATE, or DELETE, :class:`.UpdateBase` can be used, which already is a subclass of :class:`.Executable`, :class:`.ClauseElement` and includes the ``autocommit`` flag:: from sqlalchemy.sql.expression import UpdateBase class MyInsertThing(UpdateBase): def __init__(self, ...): ... DDL elements that subclass :class:`.DDLElement` already have the "autocommit" flag turned on. Changing the default compilation of existing constructs ======================================================= The compiler extension applies just as well to the existing constructs. When overriding the compilation of a built in SQL construct, the @compiles decorator is invoked upon the appropriate class (be sure to use the class, i.e. ``Insert`` or ``Select``, instead of the creation function such as ``insert()`` or ``select()``). Within the new compilation function, to get at the "original" compilation routine, use the appropriate visit_XXX method - this because compiler.process() will call upon the overriding routine and cause an endless loop. Such as, to add "prefix" to all insert statements:: from sqlalchemy.sql.expression import Insert @compiles(Insert) def prefix_inserts(insert, compiler, **kw): return compiler.visit_insert(insert.prefix_with("some prefix"), **kw) The above compiler will prefix all INSERT statements with "some prefix" when compiled. .. _type_compilation_extension: Changing Compilation of Types ============================= ``compiler`` works for types, too, such as below where we implement the MS-SQL specific 'max' keyword for ``String``/``VARCHAR``:: @compiles(String, 'mssql') @compiles(VARCHAR, 'mssql') def compile_varchar(element, compiler, **kw): if element.length == 'max': return "VARCHAR('max')" else: return compiler.visit_VARCHAR(element, **kw) foo = Table('foo', metadata, Column('data', VARCHAR('max')) ) Subclassing Guidelines ====================== A big part of using the compiler extension is subclassing SQLAlchemy expression constructs. To make this easier, the expression and schema packages feature a set of "bases" intended for common tasks. A synopsis is as follows: * :class:`~sqlalchemy.sql.expression.ClauseElement` - This is the root expression class. Any SQL expression can be derived from this base, and is probably the best choice for longer constructs such as specialized INSERT statements. * :class:`~sqlalchemy.sql.expression.ColumnElement` - The root of all "column-like" elements. Anything that you'd place in the "columns" clause of a SELECT statement (as well as order by and group by) can derive from this - the object will automatically have Python "comparison" behavior. :class:`~sqlalchemy.sql.expression.ColumnElement` classes want to have a ``type`` member which is expression's return type. This can be established at the instance level in the constructor, or at the class level if its generally constant:: class timestamp(ColumnElement): type = TIMESTAMP() * :class:`~sqlalchemy.sql.expression.FunctionElement` - This is a hybrid of a ``ColumnElement`` and a "from clause" like object, and represents a SQL function or stored procedure type of call. Since most databases support statements along the line of "SELECT FROM " ``FunctionElement`` adds in the ability to be used in the FROM clause of a ``select()`` construct:: from sqlalchemy.sql.expression import FunctionElement class coalesce(FunctionElement): name = 'coalesce' @compiles(coalesce) def compile(element, compiler, **kw): return "coalesce(%s)" % compiler.process(element.clauses) @compiles(coalesce, 'oracle') def compile(element, compiler, **kw): if len(element.clauses) > 2: raise TypeError("coalesce only supports two arguments on Oracle") return "nvl(%s)" % compiler.process(element.clauses) * :class:`~sqlalchemy.schema.DDLElement` - The root of all DDL expressions, like CREATE TABLE, ALTER TABLE, etc. Compilation of ``DDLElement`` subclasses is issued by a ``DDLCompiler`` instead of a ``SQLCompiler``. ``DDLElement`` also features ``Table`` and ``MetaData`` event hooks via the ``execute_at()`` method, allowing the construct to be invoked during CREATE TABLE and DROP TABLE sequences. * :class:`~sqlalchemy.sql.expression.Executable` - This is a mixin which should be used with any expression class that represents a "standalone" SQL statement that can be passed directly to an ``execute()`` method. It is already implicit within ``DDLElement`` and ``FunctionElement``. Further Examples ================ "UTC timestamp" function ------------------------- A function that works like "CURRENT_TIMESTAMP" except applies the appropriate conversions so that the time is in UTC time. Timestamps are best stored in relational databases as UTC, without time zones. UTC so that your database doesn't think time has gone backwards in the hour when daylight savings ends, without timezones because timezones are like character encodings - they're best applied only at the endpoints of an application (i.e. convert to UTC upon user input, re-apply desired timezone upon display). For Postgresql and Microsoft SQL Server:: from sqlalchemy.sql import expression from sqlalchemy.ext.compiler import compiles from sqlalchemy.types import DateTime class utcnow(expression.FunctionElement): type = DateTime() @compiles(utcnow, 'postgresql') def pg_utcnow(element, compiler, **kw): return "TIMEZONE('utc', CURRENT_TIMESTAMP)" @compiles(utcnow, 'mssql') def ms_utcnow(element, compiler, **kw): return "GETUTCDATE()" Example usage:: from sqlalchemy import ( Table, Column, Integer, String, DateTime, MetaData ) metadata = MetaData() event = Table("event", metadata, Column("id", Integer, primary_key=True), Column("description", String(50), nullable=False), Column("timestamp", DateTime, server_default=utcnow()) ) "GREATEST" function ------------------- The "GREATEST" function is given any number of arguments and returns the one that is of the highest value - it's equivalent to Python's ``max`` function. A SQL standard version versus a CASE based version which only accommodates two arguments:: from sqlalchemy.sql import expression from sqlalchemy.ext.compiler import compiles from sqlalchemy.types import Numeric class greatest(expression.FunctionElement): type = Numeric() name = 'greatest' @compiles(greatest) def default_greatest(element, compiler, **kw): return compiler.visit_function(element) @compiles(greatest, 'sqlite') @compiles(greatest, 'mssql') @compiles(greatest, 'oracle') def case_greatest(element, compiler, **kw): arg1, arg2 = list(element.clauses) return "CASE WHEN %s > %s THEN %s ELSE %s END" % ( compiler.process(arg1), compiler.process(arg2), compiler.process(arg1), compiler.process(arg2), ) Example usage:: Session.query(Account).\\ filter( greatest( Account.checking_balance, Account.savings_balance) > 10000 ) "false" expression ------------------ Render a "false" constant expression, rendering as "0" on platforms that don't have a "false" constant:: from sqlalchemy.sql import expression from sqlalchemy.ext.compiler import compiles class sql_false(expression.ColumnElement): pass @compiles(sql_false) def default_false(element, compiler, **kw): return "false" @compiles(sql_false, 'mssql') @compiles(sql_false, 'mysql') @compiles(sql_false, 'oracle') def int_false(element, compiler, **kw): return "0" Example usage:: from sqlalchemy import select, union_all exp = union_all( select([users.c.name, sql_false().label("enrolled")]), select([customers.c.name, customers.c.enrolled]) ) """ from .. import exc from ..sql import visitors def compiles(class_, *specs): """Register a function as a compiler for a given :class:`.ClauseElement` type.""" def decorate(fn): existing = class_.__dict__.get('_compiler_dispatcher', None) existing_dispatch = class_.__dict__.get('_compiler_dispatch') if not existing: existing = _dispatcher() if existing_dispatch: existing.specs['default'] = existing_dispatch # TODO: why is the lambda needed ? setattr(class_, '_compiler_dispatch', lambda *arg, **kw: existing(*arg, **kw)) setattr(class_, '_compiler_dispatcher', existing) if specs: for s in specs: existing.specs[s] = fn else: existing.specs['default'] = fn return fn return decorate def deregister(class_): """Remove all custom compilers associated with a given :class:`.ClauseElement` type.""" if hasattr(class_, '_compiler_dispatcher'): # regenerate default _compiler_dispatch visitors._generate_dispatch(class_) # remove custom directive del class_._compiler_dispatcher class _dispatcher(object): def __init__(self): self.specs = {} def __call__(self, element, compiler, **kw): # TODO: yes, this could also switch off of DBAPI in use. fn = self.specs.get(compiler.dialect.name, None) if not fn: try: fn = self.specs['default'] except KeyError: raise exc.CompileError( "%s construct has no default " "compilation handler." % type(element)) return fn(element, compiler, **kw) SQLAlchemy-0.8.4/lib/sqlalchemy/ext/declarative/0000755000076500000240000000000012251151573022226 5ustar classicstaff00000000000000SQLAlchemy-0.8.4/lib/sqlalchemy/ext/declarative/__init__.py0000644000076500000240000013173312251150015024335 0ustar classicstaff00000000000000# ext/declarative/__init__.py # Copyright (C) 2005-2013 the SQLAlchemy authors and contributors # # This module is part of SQLAlchemy and is released under # the MIT License: http://www.opensource.org/licenses/mit-license.php """ Synopsis ======== SQLAlchemy object-relational configuration involves the combination of :class:`.Table`, :func:`.mapper`, and class objects to define a mapped class. :mod:`~sqlalchemy.ext.declarative` allows all three to be expressed at once within the class declaration. As much as possible, regular SQLAlchemy schema and ORM constructs are used directly, so that configuration between "classical" ORM usage and declarative remain highly similar. As a simple example:: from sqlalchemy.ext.declarative import declarative_base Base = declarative_base() class SomeClass(Base): __tablename__ = 'some_table' id = Column(Integer, primary_key=True) name = Column(String(50)) Above, the :func:`declarative_base` callable returns a new base class from which all mapped classes should inherit. When the class definition is completed, a new :class:`.Table` and :func:`.mapper` will have been generated. The resulting table and mapper are accessible via ``__table__`` and ``__mapper__`` attributes on the ``SomeClass`` class:: # access the mapped Table SomeClass.__table__ # access the Mapper SomeClass.__mapper__ Defining Attributes =================== In the previous example, the :class:`.Column` objects are automatically named with the name of the attribute to which they are assigned. To name columns explicitly with a name distinct from their mapped attribute, just give the column a name. Below, column "some_table_id" is mapped to the "id" attribute of `SomeClass`, but in SQL will be represented as "some_table_id":: class SomeClass(Base): __tablename__ = 'some_table' id = Column("some_table_id", Integer, primary_key=True) Attributes may be added to the class after its construction, and they will be added to the underlying :class:`.Table` and :func:`.mapper` definitions as appropriate:: SomeClass.data = Column('data', Unicode) SomeClass.related = relationship(RelatedInfo) Classes which are constructed using declarative can interact freely with classes that are mapped explicitly with :func:`.mapper`. It is recommended, though not required, that all tables share the same underlying :class:`~sqlalchemy.schema.MetaData` object, so that string-configured :class:`~sqlalchemy.schema.ForeignKey` references can be resolved without issue. Accessing the MetaData ======================= The :func:`declarative_base` base class contains a :class:`.MetaData` object where newly defined :class:`.Table` objects are collected. This object is intended to be accessed directly for :class:`.MetaData`-specific operations. Such as, to issue CREATE statements for all tables:: engine = create_engine('sqlite://') Base.metadata.create_all(engine) :func:`declarative_base` can also receive a pre-existing :class:`.MetaData` object, which allows a declarative setup to be associated with an already existing traditional collection of :class:`~sqlalchemy.schema.Table` objects:: mymetadata = MetaData() Base = declarative_base(metadata=mymetadata) Configuring Relationships ========================= Relationships to other classes are done in the usual way, with the added feature that the class specified to :func:`~sqlalchemy.orm.relationship` may be a string name. The "class registry" associated with ``Base`` is used at mapper compilation time to resolve the name into the actual class object, which is expected to have been defined once the mapper configuration is used:: class User(Base): __tablename__ = 'users' id = Column(Integer, primary_key=True) name = Column(String(50)) addresses = relationship("Address", backref="user") class Address(Base): __tablename__ = 'addresses' id = Column(Integer, primary_key=True) email = Column(String(50)) user_id = Column(Integer, ForeignKey('users.id')) Column constructs, since they are just that, are immediately usable, as below where we define a primary join condition on the ``Address`` class using them:: class Address(Base): __tablename__ = 'addresses' id = Column(Integer, primary_key=True) email = Column(String(50)) user_id = Column(Integer, ForeignKey('users.id')) user = relationship(User, primaryjoin=user_id == User.id) In addition to the main argument for :func:`~sqlalchemy.orm.relationship`, other arguments which depend upon the columns present on an as-yet undefined class may also be specified as strings. These strings are evaluated as Python expressions. The full namespace available within this evaluation includes all classes mapped for this declarative base, as well as the contents of the ``sqlalchemy`` package, including expression functions like :func:`~sqlalchemy.sql.expression.desc` and :attr:`~sqlalchemy.sql.expression.func`:: class User(Base): # .... addresses = relationship("Address", order_by="desc(Address.email)", primaryjoin="Address.user_id==User.id") For the case where more than one module contains a class of the same name, string class names can also be specified as module-qualified paths within any of these string expressions:: class User(Base): # .... addresses = relationship("myapp.model.address.Address", order_by="desc(myapp.model.address.Address.email)", primaryjoin="myapp.model.address.Address.user_id==" "myapp.model.user.User.id") The qualified path can be any partial path that removes ambiguity between the names. For example, to disambiguate between ``myapp.model.address.Address`` and ``myapp.model.lookup.Address``, we can specify ``address.Address`` or ``lookup.Address``:: class User(Base): # .... addresses = relationship("address.Address", order_by="desc(address.Address.email)", primaryjoin="address.Address.user_id==" "User.id") .. versionadded:: 0.8 module-qualified paths can be used when specifying string arguments with Declarative, in order to specify specific modules. Two alternatives also exist to using string-based attributes. A lambda can also be used, which will be evaluated after all mappers have been configured:: class User(Base): # ... addresses = relationship(lambda: Address, order_by=lambda: desc(Address.email), primaryjoin=lambda: Address.user_id==User.id) Or, the relationship can be added to the class explicitly after the classes are available:: User.addresses = relationship(Address, primaryjoin=Address.user_id==User.id) Configuring Many-to-Many Relationships ====================================== Many-to-many relationships are also declared in the same way with declarative as with traditional mappings. The ``secondary`` argument to :func:`.relationship` is as usual passed a :class:`.Table` object, which is typically declared in the traditional way. The :class:`.Table` usually shares the :class:`.MetaData` object used by the declarative base:: keywords = Table( 'keywords', Base.metadata, Column('author_id', Integer, ForeignKey('authors.id')), Column('keyword_id', Integer, ForeignKey('keywords.id')) ) class Author(Base): __tablename__ = 'authors' id = Column(Integer, primary_key=True) keywords = relationship("Keyword", secondary=keywords) Like other :func:`~sqlalchemy.orm.relationship` arguments, a string is accepted as well, passing the string name of the table as defined in the ``Base.metadata.tables`` collection:: class Author(Base): __tablename__ = 'authors' id = Column(Integer, primary_key=True) keywords = relationship("Keyword", secondary="keywords") As with traditional mapping, its generally not a good idea to use a :class:`.Table` as the "secondary" argument which is also mapped to a class, unless the :func:`.relationship` is declared with ``viewonly=True``. Otherwise, the unit-of-work system may attempt duplicate INSERT and DELETE statements against the underlying table. .. _declarative_sql_expressions: Defining SQL Expressions ======================== See :ref:`mapper_sql_expressions` for examples on declaratively mapping attributes to SQL expressions. .. _declarative_table_args: Table Configuration =================== Table arguments other than the name, metadata, and mapped Column arguments are specified using the ``__table_args__`` class attribute. This attribute accommodates both positional as well as keyword arguments that are normally sent to the :class:`~sqlalchemy.schema.Table` constructor. The attribute can be specified in one of two forms. One is as a dictionary:: class MyClass(Base): __tablename__ = 'sometable' __table_args__ = {'mysql_engine':'InnoDB'} The other, a tuple, where each argument is positional (usually constraints):: class MyClass(Base): __tablename__ = 'sometable' __table_args__ = ( ForeignKeyConstraint(['id'], ['remote_table.id']), UniqueConstraint('foo'), ) Keyword arguments can be specified with the above form by specifying the last argument as a dictionary:: class MyClass(Base): __tablename__ = 'sometable' __table_args__ = ( ForeignKeyConstraint(['id'], ['remote_table.id']), UniqueConstraint('foo'), {'autoload':True} ) Using a Hybrid Approach with __table__ ======================================= As an alternative to ``__tablename__``, a direct :class:`~sqlalchemy.schema.Table` construct may be used. The :class:`~sqlalchemy.schema.Column` objects, which in this case require their names, will be added to the mapping just like a regular mapping to a table:: class MyClass(Base): __table__ = Table('my_table', Base.metadata, Column('id', Integer, primary_key=True), Column('name', String(50)) ) ``__table__`` provides a more focused point of control for establishing table metadata, while still getting most of the benefits of using declarative. An application that uses reflection might want to load table metadata elsewhere and pass it to declarative classes:: from sqlalchemy.ext.declarative import declarative_base Base = declarative_base() Base.metadata.reflect(some_engine) class User(Base): __table__ = metadata.tables['user'] class Address(Base): __table__ = metadata.tables['address'] Some configuration schemes may find it more appropriate to use ``__table__``, such as those which already take advantage of the data-driven nature of :class:`.Table` to customize and/or automate schema definition. Note that when the ``__table__`` approach is used, the object is immediately usable as a plain :class:`.Table` within the class declaration body itself, as a Python class is only another syntactical block. Below this is illustrated by using the ``id`` column in the ``primaryjoin`` condition of a :func:`.relationship`:: class MyClass(Base): __table__ = Table('my_table', Base.metadata, Column('id', Integer, primary_key=True), Column('name', String(50)) ) widgets = relationship(Widget, primaryjoin=Widget.myclass_id==__table__.c.id) Similarly, mapped attributes which refer to ``__table__`` can be placed inline, as below where we assign the ``name`` column to the attribute ``_name``, generating a synonym for ``name``:: from sqlalchemy.ext.declarative import synonym_for class MyClass(Base): __table__ = Table('my_table', Base.metadata, Column('id', Integer, primary_key=True), Column('name', String(50)) ) _name = __table__.c.name @synonym_for("_name") def name(self): return "Name: %s" % _name Using Reflection with Declarative ================================= It's easy to set up a :class:`.Table` that uses ``autoload=True`` in conjunction with a mapped class:: class MyClass(Base): __table__ = Table('mytable', Base.metadata, autoload=True, autoload_with=some_engine) However, one improvement that can be made here is to not require the :class:`.Engine` to be available when classes are being first declared. To achieve this, use the :class:`.DeferredReflection` mixin, which sets up mappings only after a special ``prepare(engine)`` step is called:: from sqlalchemy.ext.declarative import declarative_base, DeferredReflection Base = declarative_base(cls=DeferredReflection) class Foo(Base): __tablename__ = 'foo' bars = relationship("Bar") class Bar(Base): __tablename__ = 'bar' # illustrate overriding of "bar.foo_id" to have # a foreign key constraint otherwise not # reflected, such as when using MySQL foo_id = Column(Integer, ForeignKey('foo.id')) Base.prepare(e) .. versionadded:: 0.8 Added :class:`.DeferredReflection`. Mapper Configuration ==================== Declarative makes use of the :func:`~.orm.mapper` function internally when it creates the mapping to the declared table. The options for :func:`~.orm.mapper` are passed directly through via the ``__mapper_args__`` class attribute. As always, arguments which reference locally mapped columns can reference them directly from within the class declaration:: from datetime import datetime class Widget(Base): __tablename__ = 'widgets' id = Column(Integer, primary_key=True) timestamp = Column(DateTime, nullable=False) __mapper_args__ = { 'version_id_col': timestamp, 'version_id_generator': lambda v:datetime.now() } .. _declarative_inheritance: Inheritance Configuration ========================= Declarative supports all three forms of inheritance as intuitively as possible. The ``inherits`` mapper keyword argument is not needed as declarative will determine this from the class itself. The various "polymorphic" keyword arguments are specified using ``__mapper_args__``. Joined Table Inheritance ~~~~~~~~~~~~~~~~~~~~~~~~ Joined table inheritance is defined as a subclass that defines its own table:: class Person(Base): __tablename__ = 'people' id = Column(Integer, primary_key=True) discriminator = Column('type', String(50)) __mapper_args__ = {'polymorphic_on': discriminator} class Engineer(Person): __tablename__ = 'engineers' __mapper_args__ = {'polymorphic_identity': 'engineer'} id = Column(Integer, ForeignKey('people.id'), primary_key=True) primary_language = Column(String(50)) Note that above, the ``Engineer.id`` attribute, since it shares the same attribute name as the ``Person.id`` attribute, will in fact represent the ``people.id`` and ``engineers.id`` columns together, with the "Engineer.id" column taking precedence if queried directly. To provide the ``Engineer`` class with an attribute that represents only the ``engineers.id`` column, give it a different attribute name:: class Engineer(Person): __tablename__ = 'engineers' __mapper_args__ = {'polymorphic_identity': 'engineer'} engineer_id = Column('id', Integer, ForeignKey('people.id'), primary_key=True) primary_language = Column(String(50)) .. versionchanged:: 0.7 joined table inheritance favors the subclass column over that of the superclass, such as querying above for ``Engineer.id``. Prior to 0.7 this was the reverse. .. _declarative_single_table: Single Table Inheritance ~~~~~~~~~~~~~~~~~~~~~~~~ Single table inheritance is defined as a subclass that does not have its own table; you just leave out the ``__table__`` and ``__tablename__`` attributes:: class Person(Base): __tablename__ = 'people' id = Column(Integer, primary_key=True) discriminator = Column('type', String(50)) __mapper_args__ = {'polymorphic_on': discriminator} class Engineer(Person): __mapper_args__ = {'polymorphic_identity': 'engineer'} primary_language = Column(String(50)) When the above mappers are configured, the ``Person`` class is mapped to the ``people`` table *before* the ``primary_language`` column is defined, and this column will not be included in its own mapping. When ``Engineer`` then defines the ``primary_language`` column, the column is added to the ``people`` table so that it is included in the mapping for ``Engineer`` and is also part of the table's full set of columns. Columns which are not mapped to ``Person`` are also excluded from any other single or joined inheriting classes using the ``exclude_properties`` mapper argument. Below, ``Manager`` will have all the attributes of ``Person`` and ``Manager`` but *not* the ``primary_language`` attribute of ``Engineer``:: class Manager(Person): __mapper_args__ = {'polymorphic_identity': 'manager'} golf_swing = Column(String(50)) The attribute exclusion logic is provided by the ``exclude_properties`` mapper argument, and declarative's default behavior can be disabled by passing an explicit ``exclude_properties`` collection (empty or otherwise) to the ``__mapper_args__``. Resolving Column Conflicts ^^^^^^^^^^^^^^^^^^^^^^^^^^ Note above that the ``primary_language`` and ``golf_swing`` columns are "moved up" to be applied to ``Person.__table__``, as a result of their declaration on a subclass that has no table of its own. A tricky case comes up when two subclasses want to specify *the same* column, as below:: class Person(Base): __tablename__ = 'people' id = Column(Integer, primary_key=True) discriminator = Column('type', String(50)) __mapper_args__ = {'polymorphic_on': discriminator} class Engineer(Person): __mapper_args__ = {'polymorphic_identity': 'engineer'} start_date = Column(DateTime) class Manager(Person): __mapper_args__ = {'polymorphic_identity': 'manager'} start_date = Column(DateTime) Above, the ``start_date`` column declared on both ``Engineer`` and ``Manager`` will result in an error:: sqlalchemy.exc.ArgumentError: Column 'start_date' on class conflicts with existing column 'people.start_date' In a situation like this, Declarative can't be sure of the intent, especially if the ``start_date`` columns had, for example, different types. A situation like this can be resolved by using :class:`.declared_attr` to define the :class:`.Column` conditionally, taking care to return the **existing column** via the parent ``__table__`` if it already exists:: from sqlalchemy.ext.declarative import declared_attr class Person(Base): __tablename__ = 'people' id = Column(Integer, primary_key=True) discriminator = Column('type', String(50)) __mapper_args__ = {'polymorphic_on': discriminator} class Engineer(Person): __mapper_args__ = {'polymorphic_identity': 'engineer'} @declared_attr def start_date(cls): "Start date column, if not present already." return Person.__table__.c.get('start_date', Column(DateTime)) class Manager(Person): __mapper_args__ = {'polymorphic_identity': 'manager'} @declared_attr def start_date(cls): "Start date column, if not present already." return Person.__table__.c.get('start_date', Column(DateTime)) Above, when ``Manager`` is mapped, the ``start_date`` column is already present on the ``Person`` class. Declarative lets us return that :class:`.Column` as a result in this case, where it knows to skip re-assigning the same column. If the mapping is mis-configured such that the ``start_date`` column is accidentally re-assigned to a different table (such as, if we changed ``Manager`` to be joined inheritance without fixing ``start_date``), an error is raised which indicates an existing :class:`.Column` is trying to be re-assigned to a different owning :class:`.Table`. .. versionadded:: 0.8 :class:`.declared_attr` can be used on a non-mixin class, and the returned :class:`.Column` or other mapped attribute will be applied to the mapping as any other attribute. Previously, the resulting attribute would be ignored, and also result in a warning being emitted when a subclass was created. .. versionadded:: 0.8 :class:`.declared_attr`, when used either with a mixin or non-mixin declarative class, can return an existing :class:`.Column` already assigned to the parent :class:`.Table`, to indicate that the re-assignment of the :class:`.Column` should be skipped, however should still be mapped on the target class, in order to resolve duplicate column conflicts. The same concept can be used with mixin classes (see :ref:`declarative_mixins`):: class Person(Base): __tablename__ = 'people' id = Column(Integer, primary_key=True) discriminator = Column('type', String(50)) __mapper_args__ = {'polymorphic_on': discriminator} class HasStartDate(object): @declared_attr def start_date(cls): return cls.__table__.c.get('start_date', Column(DateTime)) class Engineer(HasStartDate, Person): __mapper_args__ = {'polymorphic_identity': 'engineer'} class Manager(HasStartDate, Person): __mapper_args__ = {'polymorphic_identity': 'manager'} The above mixin checks the local ``__table__`` attribute for the column. Because we're using single table inheritance, we're sure that in this case, ``cls.__table__`` refers to ``People.__table__``. If we were mixing joined- and single-table inheritance, we might want our mixin to check more carefully if ``cls.__table__`` is really the :class:`.Table` we're looking for. Concrete Table Inheritance ~~~~~~~~~~~~~~~~~~~~~~~~~~ Concrete is defined as a subclass which has its own table and sets the ``concrete`` keyword argument to ``True``:: class Person(Base): __tablename__ = 'people' id = Column(Integer, primary_key=True) name = Column(String(50)) class Engineer(Person): __tablename__ = 'engineers' __mapper_args__ = {'concrete':True} id = Column(Integer, primary_key=True) primary_language = Column(String(50)) name = Column(String(50)) Usage of an abstract base class is a little less straightforward as it requires usage of :func:`~sqlalchemy.orm.util.polymorphic_union`, which needs to be created with the :class:`.Table` objects before the class is built:: engineers = Table('engineers', Base.metadata, Column('id', Integer, primary_key=True), Column('name', String(50)), Column('primary_language', String(50)) ) managers = Table('managers', Base.metadata, Column('id', Integer, primary_key=True), Column('name', String(50)), Column('golf_swing', String(50)) ) punion = polymorphic_union({ 'engineer':engineers, 'manager':managers }, 'type', 'punion') class Person(Base): __table__ = punion __mapper_args__ = {'polymorphic_on':punion.c.type} class Engineer(Person): __table__ = engineers __mapper_args__ = {'polymorphic_identity':'engineer', 'concrete':True} class Manager(Person): __table__ = managers __mapper_args__ = {'polymorphic_identity':'manager', 'concrete':True} .. _declarative_concrete_helpers: Using the Concrete Helpers ^^^^^^^^^^^^^^^^^^^^^^^^^^^ Helper classes provides a simpler pattern for concrete inheritance. With these objects, the ``__declare_last__`` helper is used to configure the "polymorphic" loader for the mapper after all subclasses have been declared. .. versionadded:: 0.7.3 An abstract base can be declared using the :class:`.AbstractConcreteBase` class:: from sqlalchemy.ext.declarative import AbstractConcreteBase class Employee(AbstractConcreteBase, Base): pass To have a concrete ``employee`` table, use :class:`.ConcreteBase` instead:: from sqlalchemy.ext.declarative import ConcreteBase class Employee(ConcreteBase, Base): __tablename__ = 'employee' employee_id = Column(Integer, primary_key=True) name = Column(String(50)) __mapper_args__ = { 'polymorphic_identity':'employee', 'concrete':True} Either ``Employee`` base can be used in the normal fashion:: class Manager(Employee): __tablename__ = 'manager' employee_id = Column(Integer, primary_key=True) name = Column(String(50)) manager_data = Column(String(40)) __mapper_args__ = { 'polymorphic_identity':'manager', 'concrete':True} class Engineer(Employee): __tablename__ = 'engineer' employee_id = Column(Integer, primary_key=True) name = Column(String(50)) engineer_info = Column(String(40)) __mapper_args__ = {'polymorphic_identity':'engineer', 'concrete':True} .. _declarative_mixins: Mixin and Custom Base Classes ============================== A common need when using :mod:`~sqlalchemy.ext.declarative` is to share some functionality, such as a set of common columns, some common table options, or other mapped properties, across many classes. The standard Python idioms for this is to have the classes inherit from a base which includes these common features. When using :mod:`~sqlalchemy.ext.declarative`, this idiom is allowed via the usage of a custom declarative base class, as well as a "mixin" class which is inherited from in addition to the primary base. Declarative includes several helper features to make this work in terms of how mappings are declared. An example of some commonly mixed-in idioms is below:: from sqlalchemy.ext.declarative import declared_attr class MyMixin(object): @declared_attr def __tablename__(cls): return cls.__name__.lower() __table_args__ = {'mysql_engine': 'InnoDB'} __mapper_args__= {'always_refresh': True} id = Column(Integer, primary_key=True) class MyModel(MyMixin, Base): name = Column(String(1000)) Where above, the class ``MyModel`` will contain an "id" column as the primary key, a ``__tablename__`` attribute that derives from the name of the class itself, as well as ``__table_args__`` and ``__mapper_args__`` defined by the ``MyMixin`` mixin class. There's no fixed convention over whether ``MyMixin`` precedes ``Base`` or not. Normal Python method resolution rules apply, and the above example would work just as well with:: class MyModel(Base, MyMixin): name = Column(String(1000)) This works because ``Base`` here doesn't define any of the variables that ``MyMixin`` defines, i.e. ``__tablename__``, ``__table_args__``, ``id``, etc. If the ``Base`` did define an attribute of the same name, the class placed first in the inherits list would determine which attribute is used on the newly defined class. Augmenting the Base ~~~~~~~~~~~~~~~~~~~ In addition to using a pure mixin, most of the techniques in this section can also be applied to the base class itself, for patterns that should apply to all classes derived from a particular base. This is achieved using the ``cls`` argument of the :func:`.declarative_base` function:: from sqlalchemy.ext.declarative import declared_attr class Base(object): @declared_attr def __tablename__(cls): return cls.__name__.lower() __table_args__ = {'mysql_engine': 'InnoDB'} id = Column(Integer, primary_key=True) from sqlalchemy.ext.declarative import declarative_base Base = declarative_base(cls=Base) class MyModel(Base): name = Column(String(1000)) Where above, ``MyModel`` and all other classes that derive from ``Base`` will have a table name derived from the class name, an ``id`` primary key column, as well as the "InnoDB" engine for MySQL. Mixing in Columns ~~~~~~~~~~~~~~~~~ The most basic way to specify a column on a mixin is by simple declaration:: class TimestampMixin(object): created_at = Column(DateTime, default=func.now()) class MyModel(TimestampMixin, Base): __tablename__ = 'test' id = Column(Integer, primary_key=True) name = Column(String(1000)) Where above, all declarative classes that include ``TimestampMixin`` will also have a column ``created_at`` that applies a timestamp to all row insertions. Those familiar with the SQLAlchemy expression language know that the object identity of clause elements defines their role in a schema. Two ``Table`` objects ``a`` and ``b`` may both have a column called ``id``, but the way these are differentiated is that ``a.c.id`` and ``b.c.id`` are two distinct Python objects, referencing their parent tables ``a`` and ``b`` respectively. In the case of the mixin column, it seems that only one :class:`.Column` object is explicitly created, yet the ultimate ``created_at`` column above must exist as a distinct Python object for each separate destination class. To accomplish this, the declarative extension creates a **copy** of each :class:`.Column` object encountered on a class that is detected as a mixin. This copy mechanism is limited to simple columns that have no foreign keys, as a :class:`.ForeignKey` itself contains references to columns which can't be properly recreated at this level. For columns that have foreign keys, as well as for the variety of mapper-level constructs that require destination-explicit context, the :class:`~.declared_attr` decorator is provided so that patterns common to many classes can be defined as callables:: from sqlalchemy.ext.declarative import declared_attr class ReferenceAddressMixin(object): @declared_attr def address_id(cls): return Column(Integer, ForeignKey('address.id')) class User(ReferenceAddressMixin, Base): __tablename__ = 'user' id = Column(Integer, primary_key=True) Where above, the ``address_id`` class-level callable is executed at the point at which the ``User`` class is constructed, and the declarative extension can use the resulting :class:`.Column` object as returned by the method without the need to copy it. .. versionchanged:: > 0.6.5 Rename 0.6.5 ``sqlalchemy.util.classproperty`` into :class:`~.declared_attr`. Columns generated by :class:`~.declared_attr` can also be referenced by ``__mapper_args__`` to a limited degree, currently by ``polymorphic_on`` and ``version_id_col``, by specifying the classdecorator itself into the dictionary - the declarative extension will resolve them at class construction time:: class MyMixin: @declared_attr def type_(cls): return Column(String(50)) __mapper_args__= {'polymorphic_on':type_} class MyModel(MyMixin, Base): __tablename__='test' id = Column(Integer, primary_key=True) Mixing in Relationships ~~~~~~~~~~~~~~~~~~~~~~~ Relationships created by :func:`~sqlalchemy.orm.relationship` are provided with declarative mixin classes exclusively using the :class:`.declared_attr` approach, eliminating any ambiguity which could arise when copying a relationship and its possibly column-bound contents. Below is an example which combines a foreign key column and a relationship so that two classes ``Foo`` and ``Bar`` can both be configured to reference a common target class via many-to-one:: class RefTargetMixin(object): @declared_attr def target_id(cls): return Column('target_id', ForeignKey('target.id')) @declared_attr def target(cls): return relationship("Target") class Foo(RefTargetMixin, Base): __tablename__ = 'foo' id = Column(Integer, primary_key=True) class Bar(RefTargetMixin, Base): __tablename__ = 'bar' id = Column(Integer, primary_key=True) class Target(Base): __tablename__ = 'target' id = Column(Integer, primary_key=True) :func:`~sqlalchemy.orm.relationship` definitions which require explicit primaryjoin, order_by etc. expressions should use the string forms for these arguments, so that they are evaluated as late as possible. To reference the mixin class in these expressions, use the given ``cls`` to get its name:: class RefTargetMixin(object): @declared_attr def target_id(cls): return Column('target_id', ForeignKey('target.id')) @declared_attr def target(cls): return relationship("Target", primaryjoin="Target.id==%s.target_id" % cls.__name__ ) Mixing in deferred(), column_property(), and other MapperProperty classes ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ Like :func:`~sqlalchemy.orm.relationship`, all :class:`~sqlalchemy.orm.interfaces.MapperProperty` subclasses such as :func:`~sqlalchemy.orm.deferred`, :func:`~sqlalchemy.orm.column_property`, etc. ultimately involve references to columns, and therefore, when used with declarative mixins, have the :class:`.declared_attr` requirement so that no reliance on copying is needed:: class SomethingMixin(object): @declared_attr def dprop(cls): return deferred(Column(Integer)) class Something(SomethingMixin, Base): __tablename__ = "something" Mixing in Association Proxy and Other Attributes ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ Mixins can specify user-defined attributes as well as other extension units such as :func:`.association_proxy`. The usage of :class:`.declared_attr` is required in those cases where the attribute must be tailored specifically to the target subclass. An example is when constructing multiple :func:`.association_proxy` attributes which each target a different type of child object. Below is an :func:`.association_proxy` / mixin example which provides a scalar list of string values to an implementing class:: from sqlalchemy import Column, Integer, ForeignKey, String from sqlalchemy.orm import relationship from sqlalchemy.ext.associationproxy import association_proxy from sqlalchemy.ext.declarative import declarative_base, declared_attr Base = declarative_base() class HasStringCollection(object): @declared_attr def _strings(cls): class StringAttribute(Base): __tablename__ = cls.string_table_name id = Column(Integer, primary_key=True) value = Column(String(50), nullable=False) parent_id = Column(Integer, ForeignKey('%s.id' % cls.__tablename__), nullable=False) def __init__(self, value): self.value = value return relationship(StringAttribute) @declared_attr def strings(cls): return association_proxy('_strings', 'value') class TypeA(HasStringCollection, Base): __tablename__ = 'type_a' string_table_name = 'type_a_strings' id = Column(Integer(), primary_key=True) class TypeB(HasStringCollection, Base): __tablename__ = 'type_b' string_table_name = 'type_b_strings' id = Column(Integer(), primary_key=True) Above, the ``HasStringCollection`` mixin produces a :func:`.relationship` which refers to a newly generated class called ``StringAttribute``. The ``StringAttribute`` class is generated with it's own :class:`.Table` definition which is local to the parent class making usage of the ``HasStringCollection`` mixin. It also produces an :func:`.association_proxy` object which proxies references to the ``strings`` attribute onto the ``value`` attribute of each ``StringAttribute`` instance. ``TypeA`` or ``TypeB`` can be instantiated given the constructor argument ``strings``, a list of strings:: ta = TypeA(strings=['foo', 'bar']) tb = TypeA(strings=['bat', 'bar']) This list will generate a collection of ``StringAttribute`` objects, which are persisted into a table that's local to either the ``type_a_strings`` or ``type_b_strings`` table:: >>> print ta._strings [<__main__.StringAttribute object at 0x10151cd90>, <__main__.StringAttribute object at 0x10151ce10>] When constructing the :func:`.association_proxy`, the :class:`.declared_attr` decorator must be used so that a distinct :func:`.association_proxy` object is created for each of the ``TypeA`` and ``TypeB`` classes. .. versionadded:: 0.8 :class:`.declared_attr` is usable with non-mapped attributes, including user-defined attributes as well as :func:`.association_proxy`. Controlling table inheritance with mixins ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ The ``__tablename__`` attribute in conjunction with the hierarchy of classes involved in a declarative mixin scenario controls what type of table inheritance, if any, is configured by the declarative extension. If the ``__tablename__`` is computed by a mixin, you may need to control which classes get the computed attribute in order to get the type of table inheritance you require. For example, if you had a mixin that computes ``__tablename__`` but where you wanted to use that mixin in a single table inheritance hierarchy, you can explicitly specify ``__tablename__`` as ``None`` to indicate that the class should not have a table mapped:: from sqlalchemy.ext.declarative import declared_attr class Tablename: @declared_attr def __tablename__(cls): return cls.__name__.lower() class Person(Tablename, Base): id = Column(Integer, primary_key=True) discriminator = Column('type', String(50)) __mapper_args__ = {'polymorphic_on': discriminator} class Engineer(Person): __tablename__ = None __mapper_args__ = {'polymorphic_identity': 'engineer'} primary_language = Column(String(50)) Alternatively, you can make the mixin intelligent enough to only return a ``__tablename__`` in the event that no table is already mapped in the inheritance hierarchy. To help with this, a :func:`~sqlalchemy.ext.declarative.has_inherited_table` helper function is provided that returns ``True`` if a parent class already has a mapped table. As an example, here's a mixin that will only allow single table inheritance:: from sqlalchemy.ext.declarative import declared_attr from sqlalchemy.ext.declarative import has_inherited_table class Tablename(object): @declared_attr def __tablename__(cls): if has_inherited_table(cls): return None return cls.__name__.lower() class Person(Tablename, Base): id = Column(Integer, primary_key=True) discriminator = Column('type', String(50)) __mapper_args__ = {'polymorphic_on': discriminator} class Engineer(Person): primary_language = Column(String(50)) __mapper_args__ = {'polymorphic_identity': 'engineer'} If you want to use a similar pattern with a mix of single and joined table inheritance, you would need a slightly different mixin and use it on any joined table child classes in addition to their parent classes:: from sqlalchemy.ext.declarative import declared_attr from sqlalchemy.ext.declarative import has_inherited_table class Tablename(object): @declared_attr def __tablename__(cls): if (has_inherited_table(cls) and Tablename not in cls.__bases__): return None return cls.__name__.lower() class Person(Tablename, Base): id = Column(Integer, primary_key=True) discriminator = Column('type', String(50)) __mapper_args__ = {'polymorphic_on': discriminator} # This is single table inheritance class Engineer(Person): primary_language = Column(String(50)) __mapper_args__ = {'polymorphic_identity': 'engineer'} # This is joined table inheritance class Manager(Tablename, Person): id = Column(Integer, ForeignKey('person.id'), primary_key=True) preferred_recreation = Column(String(50)) __mapper_args__ = {'polymorphic_identity': 'engineer'} Combining Table/Mapper Arguments from Multiple Mixins ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ In the case of ``__table_args__`` or ``__mapper_args__`` specified with declarative mixins, you may want to combine some parameters from several mixins with those you wish to define on the class iteself. The :class:`.declared_attr` decorator can be used here to create user-defined collation routines that pull from multiple collections:: from sqlalchemy.ext.declarative import declared_attr class MySQLSettings(object): __table_args__ = {'mysql_engine':'InnoDB'} class MyOtherMixin(object): __table_args__ = {'info':'foo'} class MyModel(MySQLSettings, MyOtherMixin, Base): __tablename__='my_model' @declared_attr def __table_args__(cls): args = dict() args.update(MySQLSettings.__table_args__) args.update(MyOtherMixin.__table_args__) return args id = Column(Integer, primary_key=True) Creating Indexes with Mixins ~~~~~~~~~~~~~~~~~~~~~~~~~~~~ To define a named, potentially multicolumn :class:`.Index` that applies to all tables derived from a mixin, use the "inline" form of :class:`.Index` and establish it as part of ``__table_args__``:: class MyMixin(object): a = Column(Integer) b = Column(Integer) @declared_attr def __table_args__(cls): return (Index('test_idx_%s' % cls.__tablename__, 'a', 'b'),) class MyModel(MyMixin, Base): __tablename__ = 'atable' c = Column(Integer,primary_key=True) Special Directives ================== ``__declare_last__()`` ~~~~~~~~~~~~~~~~~~~~~~ The ``__declare_last__()`` hook allows definition of a class level function that is automatically called by the :meth:`.MapperEvents.after_configured` event, which occurs after mappings are assumed to be completed and the 'configure' step has finished:: class MyClass(Base): @classmethod def __declare_last__(cls): "" # do something with mappings .. versionadded:: 0.7.3 .. _declarative_abstract: ``__abstract__`` ~~~~~~~~~~~~~~~~~~~ ``__abstract__`` causes declarative to skip the production of a table or mapper for the class entirely. A class can be added within a hierarchy in the same way as mixin (see :ref:`declarative_mixins`), allowing subclasses to extend just from the special class:: class SomeAbstractBase(Base): __abstract__ = True def some_helpful_method(self): "" @declared_attr def __mapper_args__(cls): return {"helpful mapper arguments":True} class MyMappedClass(SomeAbstractBase): "" One possible use of ``__abstract__`` is to use a distinct :class:`.MetaData` for different bases:: Base = declarative_base() class DefaultBase(Base): __abstract__ = True metadata = MetaData() class OtherBase(Base): __abstract__ = True metadata = MetaData() Above, classes which inherit from ``DefaultBase`` will use one :class:`.MetaData` as the registry of tables, and those which inherit from ``OtherBase`` will use a different one. The tables themselves can then be created perhaps within distinct databases:: DefaultBase.metadata.create_all(some_engine) OtherBase.metadata_create_all(some_other_engine) .. versionadded:: 0.7.3 Class Constructor ================= As a convenience feature, the :func:`declarative_base` sets a default constructor on classes which takes keyword arguments, and assigns them to the named attributes:: e = Engineer(primary_language='python') Sessions ======== Note that ``declarative`` does nothing special with sessions, and is only intended as an easier way to configure mappers and :class:`~sqlalchemy.schema.Table` objects. A typical application setup using :class:`~sqlalchemy.orm.scoped_session` might look like:: engine = create_engine('postgresql://scott:tiger@localhost/test') Session = scoped_session(sessionmaker(autocommit=False, autoflush=False, bind=engine)) Base = declarative_base() Mapped instances then make usage of :class:`~sqlalchemy.orm.session.Session` in the usual way. """ from .api import declarative_base, synonym_for, comparable_using, \ instrument_declarative, ConcreteBase, AbstractConcreteBase, \ DeclarativeMeta, DeferredReflection, has_inherited_table,\ declared_attr, as_declarative __all__ = ['declarative_base', 'synonym_for', 'has_inherited_table', 'comparable_using', 'instrument_declarative', 'declared_attr', 'ConcreteBase', 'AbstractConcreteBase', 'DeclarativeMeta', 'DeferredReflection'] SQLAlchemy-0.8.4/lib/sqlalchemy/ext/declarative/api.py0000644000076500000240000004070712251150015023347 0ustar classicstaff00000000000000# ext/declarative/api.py # Copyright (C) 2005-2013 the SQLAlchemy authors and contributors # # This module is part of SQLAlchemy and is released under # the MIT License: http://www.opensource.org/licenses/mit-license.php """Public API functions and helpers for declarative.""" from ...schema import Table, MetaData from ...orm import synonym as _orm_synonym, mapper,\ comparable_property,\ interfaces from ...orm.util import polymorphic_union, _mapper_or_none from ... import exc import weakref from .base import _as_declarative, \ _declarative_constructor,\ _MapperConfig, _add_attribute def instrument_declarative(cls, registry, metadata): """Given a class, configure the class declaratively, using the given registry, which can be any dictionary, and MetaData object. """ if '_decl_class_registry' in cls.__dict__: raise exc.InvalidRequestError( "Class %r already has been " "instrumented declaratively" % cls) cls._decl_class_registry = registry cls.metadata = metadata _as_declarative(cls, cls.__name__, cls.__dict__) def has_inherited_table(cls): """Given a class, return True if any of the classes it inherits from has a mapped table, otherwise return False. """ for class_ in cls.__mro__[1:]: if getattr(class_, '__table__', None) is not None: return True return False class DeclarativeMeta(type): def __init__(cls, classname, bases, dict_): if '_decl_class_registry' not in cls.__dict__: _as_declarative(cls, classname, cls.__dict__) type.__init__(cls, classname, bases, dict_) def __setattr__(cls, key, value): _add_attribute(cls, key, value) def synonym_for(name, map_column=False): """Decorator, make a Python @property a query synonym for a column. A decorator version of :func:`~sqlalchemy.orm.synonym`. The function being decorated is the 'descriptor', otherwise passes its arguments through to synonym():: @synonym_for('col') @property def prop(self): return 'special sauce' The regular ``synonym()`` is also usable directly in a declarative setting and may be convenient for read/write properties:: prop = synonym('col', descriptor=property(_read_prop, _write_prop)) """ def decorate(fn): return _orm_synonym(name, map_column=map_column, descriptor=fn) return decorate def comparable_using(comparator_factory): """Decorator, allow a Python @property to be used in query criteria. This is a decorator front end to :func:`~sqlalchemy.orm.comparable_property` that passes through the comparator_factory and the function being decorated:: @comparable_using(MyComparatorType) @property def prop(self): return 'special sauce' The regular ``comparable_property()`` is also usable directly in a declarative setting and may be convenient for read/write properties:: prop = comparable_property(MyComparatorType) """ def decorate(fn): return comparable_property(comparator_factory, fn) return decorate class declared_attr(interfaces._MappedAttribute, property): """Mark a class-level method as representing the definition of a mapped property or special declarative member name. @declared_attr turns the attribute into a scalar-like property that can be invoked from the uninstantiated class. Declarative treats attributes specifically marked with @declared_attr as returning a construct that is specific to mapping or declarative table configuration. The name of the attribute is that of what the non-dynamic version of the attribute would be. @declared_attr is more often than not applicable to mixins, to define relationships that are to be applied to different implementors of the class:: class ProvidesUser(object): "A mixin that adds a 'user' relationship to classes." @declared_attr def user(self): return relationship("User") It also can be applied to mapped classes, such as to provide a "polymorphic" scheme for inheritance:: class Employee(Base): id = Column(Integer, primary_key=True) type = Column(String(50), nullable=False) @declared_attr def __tablename__(cls): return cls.__name__.lower() @declared_attr def __mapper_args__(cls): if cls.__name__ == 'Employee': return { "polymorphic_on":cls.type, "polymorphic_identity":"Employee" } else: return {"polymorphic_identity":cls.__name__} .. versionchanged:: 0.8 :class:`.declared_attr` can be used with non-ORM or extension attributes, such as user-defined attributes or :func:`.association_proxy` objects, which will be assigned to the class at class construction time. """ def __init__(self, fget, *arg, **kw): super(declared_attr, self).__init__(fget, *arg, **kw) self.__doc__ = fget.__doc__ def __get__(desc, self, cls): return desc.fget(cls) def declarative_base(bind=None, metadata=None, mapper=None, cls=object, name='Base', constructor=_declarative_constructor, class_registry=None, metaclass=DeclarativeMeta): """Construct a base class for declarative class definitions. The new base class will be given a metaclass that produces appropriate :class:`~sqlalchemy.schema.Table` objects and makes the appropriate :func:`~sqlalchemy.orm.mapper` calls based on the information provided declaratively in the class and any subclasses of the class. :param bind: An optional :class:`~sqlalchemy.engine.base.Connectable`, will be assigned the ``bind`` attribute on the :class:`~sqlalchemy.MetaData` instance. :param metadata: An optional :class:`~sqlalchemy.MetaData` instance. All :class:`~sqlalchemy.schema.Table` objects implicitly declared by subclasses of the base will share this MetaData. A MetaData instance will be created if none is provided. The :class:`~sqlalchemy.MetaData` instance will be available via the `metadata` attribute of the generated declarative base class. :param mapper: An optional callable, defaults to :func:`~sqlalchemy.orm.mapper`. Will be used to map subclasses to their Tables. :param cls: Defaults to :class:`object`. A type to use as the base for the generated declarative base class. May be a class or tuple of classes. :param name: Defaults to ``Base``. The display name for the generated class. Customizing this is not required, but can improve clarity in tracebacks and debugging. :param constructor: Defaults to :func:`~sqlalchemy.ext.declarative._declarative_constructor`, an __init__ implementation that assigns \**kwargs for declared fields and relationships to an instance. If ``None`` is supplied, no __init__ will be provided and construction will fall back to cls.__init__ by way of the normal Python semantics. :param class_registry: optional dictionary that will serve as the registry of class names-> mapped classes when string names are used to identify classes inside of :func:`.relationship` and others. Allows two or more declarative base classes to share the same registry of class names for simplified inter-base relationships. :param metaclass: Defaults to :class:`.DeclarativeMeta`. A metaclass or __metaclass__ compatible callable to use as the meta type of the generated declarative base class. .. seealso:: :func:`.as_declarative` """ lcl_metadata = metadata or MetaData() if bind: lcl_metadata.bind = bind if class_registry is None: class_registry = weakref.WeakValueDictionary() bases = not isinstance(cls, tuple) and (cls,) or cls class_dict = dict(_decl_class_registry=class_registry, metadata=lcl_metadata) if constructor: class_dict['__init__'] = constructor if mapper: class_dict['__mapper_cls__'] = mapper return metaclass(name, bases, class_dict) def as_declarative(**kw): """ Class decorator for :func:`.declarative_base`. Provides a syntactical shortcut to the ``cls`` argument sent to :func:`.declarative_base`, allowing the base class to be converted in-place to a "declarative" base:: from sqlalchemy.ext.declarative import as_declarative @as_declarative() class Base(object): @declared_attr def __tablename__(cls): return cls.__name__.lower() id = Column(Integer, primary_key=True) class MyMappedClass(Base): # ... All keyword arguments passed to :func:`.as_declarative` are passed along to :func:`.declarative_base`. .. versionadded:: 0.8.3 .. seealso:: :func:`.declarative_base` """ def decorate(cls): kw['cls'] = cls kw['name'] = cls.__name__ return declarative_base(**kw) return decorate class ConcreteBase(object): """A helper class for 'concrete' declarative mappings. :class:`.ConcreteBase` will use the :func:`.polymorphic_union` function automatically, against all tables mapped as a subclass to this class. The function is called via the ``__declare_last__()`` function, which is essentially a hook for the :func:`.MapperEvents.after_configured` event. :class:`.ConcreteBase` produces a mapped table for the class itself. Compare to :class:`.AbstractConcreteBase`, which does not. Example:: from sqlalchemy.ext.declarative import ConcreteBase class Employee(ConcreteBase, Base): __tablename__ = 'employee' employee_id = Column(Integer, primary_key=True) name = Column(String(50)) __mapper_args__ = { 'polymorphic_identity':'employee', 'concrete':True} class Manager(Employee): __tablename__ = 'manager' employee_id = Column(Integer, primary_key=True) name = Column(String(50)) manager_data = Column(String(40)) __mapper_args__ = { 'polymorphic_identity':'manager', 'concrete':True} """ @classmethod def _create_polymorphic_union(cls, mappers): return polymorphic_union(dict( (mp.polymorphic_identity, mp.local_table) for mp in mappers ), 'type', 'pjoin') @classmethod def __declare_last__(cls): m = cls.__mapper__ if m.with_polymorphic: return mappers = list(m.self_and_descendants) pjoin = cls._create_polymorphic_union(mappers) m._set_with_polymorphic(("*", pjoin)) m._set_polymorphic_on(pjoin.c.type) class AbstractConcreteBase(ConcreteBase): """A helper class for 'concrete' declarative mappings. :class:`.AbstractConcreteBase` will use the :func:`.polymorphic_union` function automatically, against all tables mapped as a subclass to this class. The function is called via the ``__declare_last__()`` function, which is essentially a hook for the :func:`.MapperEvents.after_configured` event. :class:`.AbstractConcreteBase` does not produce a mapped table for the class itself. Compare to :class:`.ConcreteBase`, which does. Example:: from sqlalchemy.ext.declarative import AbstractConcreteBase class Employee(AbstractConcreteBase, Base): pass class Manager(Employee): __tablename__ = 'manager' employee_id = Column(Integer, primary_key=True) name = Column(String(50)) manager_data = Column(String(40)) __mapper_args__ = { 'polymorphic_identity':'manager', 'concrete':True} """ __abstract__ = True @classmethod def __declare_last__(cls): if hasattr(cls, '__mapper__'): return # can't rely on 'self_and_descendants' here # since technically an immediate subclass # might not be mapped, but a subclass # may be. mappers = [] stack = list(cls.__subclasses__()) while stack: klass = stack.pop() stack.extend(klass.__subclasses__()) mn = _mapper_or_none(klass) if mn is not None: mappers.append(mn) pjoin = cls._create_polymorphic_union(mappers) cls.__mapper__ = m = mapper(cls, pjoin, polymorphic_on=pjoin.c.type) for scls in cls.__subclasses__(): sm = _mapper_or_none(scls) if sm.concrete and cls in scls.__bases__: sm._set_concrete_base(m) class DeferredReflection(object): """A helper class for construction of mappings based on a deferred reflection step. Normally, declarative can be used with reflection by setting a :class:`.Table` object using autoload=True as the ``__table__`` attribute on a declarative class. The caveat is that the :class:`.Table` must be fully reflected, or at the very least have a primary key column, at the point at which a normal declarative mapping is constructed, meaning the :class:`.Engine` must be available at class declaration time. The :class:`.DeferredReflection` mixin moves the construction of mappers to be at a later point, after a specific method is called which first reflects all :class:`.Table` objects created so far. Classes can define it as such:: from sqlalchemy.ext.declarative import declarative_base from sqlalchemy.ext.declarative import DeferredReflection Base = declarative_base() class MyClass(DeferredReflection, Base): __tablename__ = 'mytable' Above, ``MyClass`` is not yet mapped. After a series of classes have been defined in the above fashion, all tables can be reflected and mappings created using :meth:`.DeferredReflection.prepare`:: engine = create_engine("someengine://...") DeferredReflection.prepare(engine) The :class:`.DeferredReflection` mixin can be applied to individual classes, used as the base for the declarative base itself, or used in a custom abstract class. Using an abstract base allows that only a subset of classes to be prepared for a particular prepare step, which is necessary for applications that use more than one engine. For example, if an application has two engines, you might use two bases, and prepare each separately, e.g.:: class ReflectedOne(DeferredReflection, Base): __abstract__ = True class ReflectedTwo(DeferredReflection, Base): __abstract__ = True class MyClass(ReflectedOne): __tablename__ = 'mytable' class MyOtherClass(ReflectedOne): __tablename__ = 'myothertable' class YetAnotherClass(ReflectedTwo): __tablename__ = 'yetanothertable' # ... etc. Above, the class hierarchies for ``ReflectedOne`` and ``ReflectedTwo`` can be configured separately:: ReflectedOne.prepare(engine_one) ReflectedTwo.prepare(engine_two) .. versionadded:: 0.8 """ @classmethod def prepare(cls, engine): """Reflect all :class:`.Table` objects for all current :class:`.DeferredReflection` subclasses""" to_map = [m for m in _MapperConfig.configs.values() if issubclass(m.cls, cls)] for thingy in to_map: cls._sa_decl_prepare(thingy.local_table, engine) thingy.map() @classmethod def _sa_decl_prepare(cls, local_table, engine): # autoload Table, which is already # present in the metadata. This # will fill in db-loaded columns # into the existing Table object. if local_table is not None: Table(local_table.name, local_table.metadata, extend_existing=True, autoload_replace=False, autoload=True, autoload_with=engine, schema=local_table.schema) SQLAlchemy-0.8.4/lib/sqlalchemy/ext/declarative/base.py0000644000076500000240000004113112251150015023500 0ustar classicstaff00000000000000# ext/declarative/base.py # Copyright (C) 2005-2013 the SQLAlchemy authors and contributors # # This module is part of SQLAlchemy and is released under # the MIT License: http://www.opensource.org/licenses/mit-license.php """Internal implementation for declarative.""" from ...schema import Table, Column from ...orm import mapper, class_mapper from ...orm.interfaces import MapperProperty from ...orm.properties import ColumnProperty, CompositeProperty from ...orm.util import _is_mapped_class from ... import util, exc from ...sql import expression from ... import event from . import clsregistry def _declared_mapping_info(cls): # deferred mapping if cls in _MapperConfig.configs: return _MapperConfig.configs[cls] # regular mapping elif _is_mapped_class(cls): return class_mapper(cls, configure=False) else: return None def _as_declarative(cls, classname, dict_): from .api import declared_attr # dict_ will be a dictproxy, which we can't write to, and we need to! dict_ = dict(dict_) column_copies = {} potential_columns = {} mapper_args_fn = None table_args = inherited_table_args = None tablename = None declarative_props = (declared_attr, util.classproperty) for base in cls.__mro__: _is_declarative_inherits = hasattr(base, '_decl_class_registry') if '__declare_last__' in base.__dict__: @event.listens_for(mapper, "after_configured") def go(): cls.__declare_last__() if '__abstract__' in base.__dict__: if (base is cls or (base in cls.__bases__ and not _is_declarative_inherits) ): return class_mapped = _declared_mapping_info(base) is not None for name, obj in vars(base).items(): if name == '__mapper_args__': if not mapper_args_fn and ( not class_mapped or isinstance(obj, declarative_props) ): # don't even invoke __mapper_args__ until # after we've determined everything about the # mapped table. mapper_args_fn = lambda: cls.__mapper_args__ elif name == '__tablename__': if not tablename and ( not class_mapped or isinstance(obj, declarative_props) ): tablename = cls.__tablename__ elif name == '__table_args__': if not table_args and ( not class_mapped or isinstance(obj, declarative_props) ): table_args = cls.__table_args__ if not isinstance(table_args, (tuple, dict, type(None))): raise exc.ArgumentError( "__table_args__ value must be a tuple, " "dict, or None") if base is not cls: inherited_table_args = True elif class_mapped: if isinstance(obj, declarative_props): util.warn("Regular (i.e. not __special__) " "attribute '%s.%s' uses @declared_attr, " "but owning class %s is mapped - " "not applying to subclass %s." % (base.__name__, name, base, cls)) continue elif base is not cls: # we're a mixin. if isinstance(obj, Column): if getattr(cls, name) is not obj: # if column has been overridden # (like by the InstrumentedAttribute of the # superclass), skip continue if obj.foreign_keys: raise exc.InvalidRequestError( "Columns with foreign keys to other columns " "must be declared as @declared_attr callables " "on declarative mixin classes. ") if name not in dict_ and not ( '__table__' in dict_ and (obj.name or name) in dict_['__table__'].c ) and name not in potential_columns: potential_columns[name] = \ column_copies[obj] = \ obj.copy() column_copies[obj]._creation_order = \ obj._creation_order elif isinstance(obj, MapperProperty): raise exc.InvalidRequestError( "Mapper properties (i.e. deferred," "column_property(), relationship(), etc.) must " "be declared as @declared_attr callables " "on declarative mixin classes.") elif isinstance(obj, declarative_props): dict_[name] = ret = \ column_copies[obj] = getattr(cls, name) if isinstance(ret, (Column, MapperProperty)) and \ ret.doc is None: ret.doc = obj.__doc__ # apply inherited columns as we should for k, v in potential_columns.items(): dict_[k] = v if inherited_table_args and not tablename: table_args = None clsregistry.add_class(classname, cls) our_stuff = util.OrderedDict() for k in list(dict_): # TODO: improve this ? all dunders ? if k in ('__table__', '__tablename__', '__mapper_args__'): continue value = dict_[k] if isinstance(value, declarative_props): value = getattr(cls, k) if (isinstance(value, tuple) and len(value) == 1 and isinstance(value[0], (Column, MapperProperty))): util.warn("Ignoring declarative-like tuple value of attribute " "%s: possibly a copy-and-paste error with a comma " "left at the end of the line?" % k) continue if not isinstance(value, (Column, MapperProperty)): if not k.startswith('__'): dict_.pop(k) setattr(cls, k, value) continue if k == 'metadata': raise exc.InvalidRequestError( "Attribute name 'metadata' is reserved " "for the MetaData instance when using a " "declarative base class." ) prop = clsregistry._deferred_relationship(cls, value) our_stuff[k] = prop # set up attributes in the order they were created our_stuff.sort(key=lambda key: our_stuff[key]._creation_order) # extract columns from the class dict declared_columns = set() for key, c in our_stuff.iteritems(): if isinstance(c, (ColumnProperty, CompositeProperty)): for col in c.columns: if isinstance(col, Column) and \ col.table is None: _undefer_column_name(key, col) declared_columns.add(col) elif isinstance(c, Column): _undefer_column_name(key, c) declared_columns.add(c) # if the column is the same name as the key, # remove it from the explicit properties dict. # the normal rules for assigning column-based properties # will take over, including precedence of columns # in multi-column ColumnProperties. if key == c.key: del our_stuff[key] declared_columns = sorted( declared_columns, key=lambda c: c._creation_order) table = None if hasattr(cls, '__table_cls__'): table_cls = util.unbound_method_to_callable(cls.__table_cls__) else: table_cls = Table if '__table__' not in dict_: if tablename is not None: args, table_kw = (), {} if table_args: if isinstance(table_args, dict): table_kw = table_args elif isinstance(table_args, tuple): if isinstance(table_args[-1], dict): args, table_kw = table_args[0:-1], table_args[-1] else: args = table_args autoload = dict_.get('__autoload__') if autoload: table_kw['autoload'] = True cls.__table__ = table = table_cls( tablename, cls.metadata, *(tuple(declared_columns) + tuple(args)), **table_kw) else: table = cls.__table__ if declared_columns: for c in declared_columns: if not table.c.contains_column(c): raise exc.ArgumentError( "Can't add additional column %r when " "specifying __table__" % c.key ) if hasattr(cls, '__mapper_cls__'): mapper_cls = util.unbound_method_to_callable(cls.__mapper_cls__) else: mapper_cls = mapper for c in cls.__bases__: if _declared_mapping_info(c) is not None: inherits = c break else: inherits = None if table is None and inherits is None: raise exc.InvalidRequestError( "Class %r does not have a __table__ or __tablename__ " "specified and does not inherit from an existing " "table-mapped class." % cls ) elif inherits: inherited_mapper = _declared_mapping_info(inherits) inherited_table = inherited_mapper.local_table inherited_mapped_table = inherited_mapper.mapped_table if table is None: # single table inheritance. # ensure no table args if table_args: raise exc.ArgumentError( "Can't place __table_args__ on an inherited class " "with no table." ) # add any columns declared here to the inherited table. for c in declared_columns: if c.primary_key: raise exc.ArgumentError( "Can't place primary key columns on an inherited " "class with no table." ) if c.name in inherited_table.c: if inherited_table.c[c.name] is c: continue raise exc.ArgumentError( "Column '%s' on class %s conflicts with " "existing column '%s'" % (c, cls, inherited_table.c[c.name]) ) inherited_table.append_column(c) if inherited_mapped_table is not None and \ inherited_mapped_table is not inherited_table: inherited_mapped_table._refresh_for_new_column(c) mt = _MapperConfig(mapper_cls, cls, table, inherits, declared_columns, column_copies, our_stuff, mapper_args_fn) if not hasattr(cls, '_sa_decl_prepare'): mt.map() class _MapperConfig(object): configs = util.OrderedDict() mapped_table = None def __init__(self, mapper_cls, cls, table, inherits, declared_columns, column_copies, properties, mapper_args_fn): self.mapper_cls = mapper_cls self.cls = cls self.local_table = table self.inherits = inherits self.properties = properties self.mapper_args_fn = mapper_args_fn self.declared_columns = declared_columns self.column_copies = column_copies self.configs[cls] = self def _prepare_mapper_arguments(self): properties = self.properties if self.mapper_args_fn: mapper_args = self.mapper_args_fn() else: mapper_args = {} # make sure that column copies are used rather # than the original columns from any mixins for k in ('version_id_col', 'polymorphic_on',): if k in mapper_args: v = mapper_args[k] mapper_args[k] = self.column_copies.get(v, v) assert 'inherits' not in mapper_args, \ "Can't specify 'inherits' explicitly with declarative mappings" if self.inherits: mapper_args['inherits'] = self.inherits if self.inherits and not mapper_args.get('concrete', False): # single or joined inheritance # exclude any cols on the inherited table which are # not mapped on the parent class, to avoid # mapping columns specific to sibling/nephew classes inherited_mapper = _declared_mapping_info(self.inherits) inherited_table = inherited_mapper.local_table if 'exclude_properties' not in mapper_args: mapper_args['exclude_properties'] = exclude_properties = \ set([c.key for c in inherited_table.c if c not in inherited_mapper._columntoproperty]) exclude_properties.difference_update( [c.key for c in self.declared_columns]) # look through columns in the current mapper that # are keyed to a propname different than the colname # (if names were the same, we'd have popped it out above, # in which case the mapper makes this combination). # See if the superclass has a similar column property. # If so, join them together. for k, col in properties.items(): if not isinstance(col, expression.ColumnElement): continue if k in inherited_mapper._props: p = inherited_mapper._props[k] if isinstance(p, ColumnProperty): # note here we place the subclass column # first. See [ticket:1892] for background. properties[k] = [col] + p.columns result_mapper_args = mapper_args.copy() result_mapper_args['properties'] = properties return result_mapper_args def map(self): self.configs.pop(self.cls, None) mapper_args = self._prepare_mapper_arguments() self.cls.__mapper__ = self.mapper_cls( self.cls, self.local_table, **mapper_args ) def _add_attribute(cls, key, value): """add an attribute to an existing declarative class. This runs through the logic to determine MapperProperty, adds it to the Mapper, adds a column to the mapped Table, etc. """ if '__mapper__' in cls.__dict__: if isinstance(value, Column): _undefer_column_name(key, value) cls.__table__.append_column(value) cls.__mapper__.add_property(key, value) elif isinstance(value, ColumnProperty): for col in value.columns: if isinstance(col, Column) and col.table is None: _undefer_column_name(key, col) cls.__table__.append_column(col) cls.__mapper__.add_property(key, value) elif isinstance(value, MapperProperty): cls.__mapper__.add_property( key, clsregistry._deferred_relationship(cls, value) ) else: type.__setattr__(cls, key, value) else: type.__setattr__(cls, key, value) def _declarative_constructor(self, **kwargs): """A simple constructor that allows initialization from kwargs. Sets attributes on the constructed instance using the names and values in ``kwargs``. Only keys that are present as attributes of the instance's class are allowed. These could be, for example, any mapped columns or relationships. """ cls_ = type(self) for k in kwargs: if not hasattr(cls_, k): raise TypeError( "%r is an invalid keyword argument for %s" % (k, cls_.__name__)) setattr(self, k, kwargs[k]) _declarative_constructor.__name__ = '__init__' def _undefer_column_name(key, column): if column.key is None: column.key = key if column.name is None: column.name = key SQLAlchemy-0.8.4/lib/sqlalchemy/ext/declarative/clsregistry.py0000644000076500000240000002314412251150015025144 0ustar classicstaff00000000000000# ext/declarative/clsregistry.py # Copyright (C) 2005-2013 the SQLAlchemy authors and contributors # # This module is part of SQLAlchemy and is released under # the MIT License: http://www.opensource.org/licenses/mit-license.php """Routines to handle the string class registry used by declarative. This system allows specification of classes and expressions used in :func:`.relationship` using strings. """ from ...orm.properties import ColumnProperty, RelationshipProperty, \ SynonymProperty from ...schema import _get_table_key from ...orm import class_mapper, interfaces from ... import util from ... import exc import weakref # strong references to registries which we place in # the _decl_class_registry, which is usually weak referencing. # the internal registries here link to classes with weakrefs and remove # themselves when all references to contained classes are removed. _registries = set() def add_class(classname, cls): """Add a class to the _decl_class_registry associated with the given declarative class. """ if classname in cls._decl_class_registry: # class already exists. existing = cls._decl_class_registry[classname] if not isinstance(existing, _MultipleClassMarker): existing = \ cls._decl_class_registry[classname] = \ _MultipleClassMarker([cls, existing]) else: cls._decl_class_registry[classname] = cls try: root_module = cls._decl_class_registry['_sa_module_registry'] except KeyError: cls._decl_class_registry['_sa_module_registry'] = \ root_module = _ModuleMarker('_sa_module_registry', None) tokens = cls.__module__.split(".") # build up a tree like this: # modulename: myapp.snacks.nuts # # myapp->snack->nuts->(classes) # snack->nuts->(classes) # nuts->(classes) # # this allows partial token paths to be used. while tokens: token = tokens.pop(0) module = root_module.get_module(token) for token in tokens: module = module.get_module(token) module.add_class(classname, cls) class _MultipleClassMarker(object): """refers to multiple classes of the same name within _decl_class_registry. """ def __init__(self, classes, on_remove=None): self.on_remove = on_remove self.contents = set([ weakref.ref(item, self._remove_item) for item in classes]) _registries.add(self) def __iter__(self): return (ref() for ref in self.contents) def attempt_get(self, path, key): if len(self.contents) > 1: raise exc.InvalidRequestError( "Multiple classes found for path \"%s\" " "in the registry of this declarative " "base. Please use a fully module-qualified path." % (".".join(path + [key])) ) else: ref = list(self.contents)[0] cls = ref() if cls is None: raise NameError(key) return cls def _remove_item(self, ref): self.contents.remove(ref) if not self.contents: _registries.discard(self) if self.on_remove: self.on_remove() def add_item(self, item): modules = set([cls().__module__ for cls in self.contents]) if item.__module__ in modules: util.warn( "This declarative base already contains a class with the " "same class name and module name as %s.%s, and will " "be replaced in the string-lookup table." % ( item.__module__, item.__name__ ) ) self.contents.add(weakref.ref(item, self._remove_item)) class _ModuleMarker(object): """"refers to a module name within _decl_class_registry. """ def __init__(self, name, parent): self.parent = parent self.name = name self.contents = {} self.mod_ns = _ModNS(self) if self.parent: self.path = self.parent.path + [self.name] else: self.path = [] _registries.add(self) def __contains__(self, name): return name in self.contents def __getitem__(self, name): return self.contents[name] def _remove_item(self, name): self.contents.pop(name, None) if not self.contents and self.parent is not None: self.parent._remove_item(self.name) _registries.discard(self) def resolve_attr(self, key): return getattr(self.mod_ns, key) def get_module(self, name): if name not in self.contents: marker = _ModuleMarker(name, self) self.contents[name] = marker else: marker = self.contents[name] return marker def add_class(self, name, cls): if name in self.contents: existing = self.contents[name] existing.add_item(cls) else: existing = self.contents[name] = \ _MultipleClassMarker([cls], on_remove=lambda: self._remove_item(name)) class _ModNS(object): def __init__(self, parent): self.__parent = parent def __getattr__(self, key): try: value = self.__parent.contents[key] except KeyError: pass else: if value is not None: if isinstance(value, _ModuleMarker): return value.mod_ns else: assert isinstance(value, _MultipleClassMarker) return value.attempt_get(self.__parent.path, key) raise AttributeError("Module %r has no mapped classes " "registered under the name %r" % (self.__parent.name, key)) class _GetColumns(object): def __init__(self, cls): self.cls = cls def __getattr__(self, key): mp = class_mapper(self.cls, configure=False) if mp: if key not in mp.all_orm_descriptors: raise exc.InvalidRequestError( "Class %r does not have a mapped column named %r" % (self.cls, key)) desc = mp.all_orm_descriptors[key] if desc.extension_type is interfaces.NOT_EXTENSION: prop = desc.property if isinstance(prop, SynonymProperty): key = prop.name elif not isinstance(prop, ColumnProperty): raise exc.InvalidRequestError( "Property %r is not an instance of" " ColumnProperty (i.e. does not correspond" " directly to a Column)." % key) return getattr(self.cls, key) class _GetTable(object): def __init__(self, key, metadata): self.key = key self.metadata = metadata def __getattr__(self, key): return self.metadata.tables[ _get_table_key(key, self.key) ] def _determine_container(key, value): if isinstance(value, _MultipleClassMarker): value = value.attempt_get([], key) return _GetColumns(value) def _resolver(cls, prop): def resolve_arg(arg): import sqlalchemy from sqlalchemy.orm import foreign, remote fallback = sqlalchemy.__dict__.copy() fallback.update({'foreign': foreign, 'remote': remote}) def access_cls(key): if key in cls._decl_class_registry: return _determine_container(key, cls._decl_class_registry[key]) elif key in cls.metadata.tables: return cls.metadata.tables[key] elif key in cls.metadata._schemas: return _GetTable(key, cls.metadata) elif '_sa_module_registry' in cls._decl_class_registry and \ key in cls._decl_class_registry['_sa_module_registry']: registry = cls._decl_class_registry['_sa_module_registry'] return registry.resolve_attr(key) else: return fallback[key] d = util.PopulateDict(access_cls) def return_cls(): try: x = eval(arg, globals(), d) if isinstance(x, _GetColumns): return x.cls else: return x except NameError, n: raise exc.InvalidRequestError( "When initializing mapper %s, expression %r failed to " "locate a name (%r). If this is a class name, consider " "adding this relationship() to the %r class after " "both dependent classes have been defined." % (prop.parent, arg, n.args[0], cls) ) return return_cls return resolve_arg def _deferred_relationship(cls, prop): if isinstance(prop, RelationshipProperty): resolve_arg = _resolver(cls, prop) for attr in ('argument', 'order_by', 'primaryjoin', 'secondaryjoin', 'secondary', '_user_defined_foreign_keys', 'remote_side'): v = getattr(prop, attr) if isinstance(v, basestring): setattr(prop, attr, resolve_arg(v)) if prop.backref and isinstance(prop.backref, tuple): key, kwargs = prop.backref for attr in ('primaryjoin', 'secondaryjoin', 'secondary', 'foreign_keys', 'remote_side', 'order_by'): if attr in kwargs and isinstance(kwargs[attr], basestring): kwargs[attr] = resolve_arg(kwargs[attr]) return prop SQLAlchemy-0.8.4/lib/sqlalchemy/ext/horizontal_shard.py0000644000076500000240000001144612251147171023674 0ustar classicstaff00000000000000# ext/horizontal_shard.py # Copyright (C) 2005-2013 the SQLAlchemy authors and contributors # # This module is part of SQLAlchemy and is released under # the MIT License: http://www.opensource.org/licenses/mit-license.php """Horizontal sharding support. Defines a rudimental 'horizontal sharding' system which allows a Session to distribute queries and persistence operations across multiple databases. For a usage example, see the :ref:`examples_sharding` example included in the source distribution. """ from .. import util from ..orm.session import Session from ..orm.query import Query __all__ = ['ShardedSession', 'ShardedQuery'] class ShardedQuery(Query): def __init__(self, *args, **kwargs): super(ShardedQuery, self).__init__(*args, **kwargs) self.id_chooser = self.session.id_chooser self.query_chooser = self.session.query_chooser self._shard_id = None def set_shard(self, shard_id): """return a new query, limited to a single shard ID. all subsequent operations with the returned query will be against the single shard regardless of other state. """ q = self._clone() q._shard_id = shard_id return q def _execute_and_instances(self, context): def iter_for_shard(shard_id): context.attributes['shard_id'] = shard_id result = self._connection_from_session( mapper=self._mapper_zero(), shard_id=shard_id).execute( context.statement, self._params) return self.instances(result, context) if self._shard_id is not None: return iter_for_shard(self._shard_id) else: partial = [] for shard_id in self.query_chooser(self): partial.extend(iter_for_shard(shard_id)) # if some kind of in memory 'sorting' # were done, this is where it would happen return iter(partial) def get(self, ident, **kwargs): if self._shard_id is not None: return super(ShardedQuery, self).get(ident) else: ident = util.to_list(ident) for shard_id in self.id_chooser(self, ident): o = self.set_shard(shard_id).get(ident, **kwargs) if o is not None: return o else: return None class ShardedSession(Session): def __init__(self, shard_chooser, id_chooser, query_chooser, shards=None, query_cls=ShardedQuery, **kwargs): """Construct a ShardedSession. :param shard_chooser: A callable which, passed a Mapper, a mapped instance, and possibly a SQL clause, returns a shard ID. This id may be based off of the attributes present within the object, or on some round-robin scheme. If the scheme is based on a selection, it should set whatever state on the instance to mark it in the future as participating in that shard. :param id_chooser: A callable, passed a query and a tuple of identity values, which should return a list of shard ids where the ID might reside. The databases will be queried in the order of this listing. :param query_chooser: For a given Query, returns the list of shard_ids where the query should be issued. Results from all shards returned will be combined together into a single listing. :param shards: A dictionary of string shard names to :class:`~sqlalchemy.engine.Engine` objects. """ super(ShardedSession, self).__init__(query_cls=query_cls, **kwargs) self.shard_chooser = shard_chooser self.id_chooser = id_chooser self.query_chooser = query_chooser self.__binds = {} self.connection_callable = self.connection if shards is not None: for k in shards: self.bind_shard(k, shards[k]) def connection(self, mapper=None, instance=None, shard_id=None, **kwargs): if shard_id is None: shard_id = self.shard_chooser(mapper, instance) if self.transaction is not None: return self.transaction.connection(mapper, shard_id=shard_id) else: return self.get_bind(mapper, shard_id=shard_id, instance=instance).contextual_connect(**kwargs) def get_bind(self, mapper, shard_id=None, instance=None, clause=None, **kw): if shard_id is None: shard_id = self.shard_chooser(mapper, instance, clause=clause) return self.__binds[shard_id] def bind_shard(self, shard_id, bind): self.__binds[shard_id] = bind SQLAlchemy-0.8.4/lib/sqlalchemy/ext/hybrid.py0000644000076500000240000006651212251150015021576 0ustar classicstaff00000000000000# ext/hybrid.py # Copyright (C) 2005-2013 the SQLAlchemy authors and contributors # # This module is part of SQLAlchemy and is released under # the MIT License: http://www.opensource.org/licenses/mit-license.php """Define attributes on ORM-mapped classes that have "hybrid" behavior. "hybrid" means the attribute has distinct behaviors defined at the class level and at the instance level. The :mod:`~sqlalchemy.ext.hybrid` extension provides a special form of method decorator, is around 50 lines of code and has almost no dependencies on the rest of SQLAlchemy. It can, in theory, work with any descriptor-based expression system. Consider a mapping ``Interval``, representing integer ``start`` and ``end`` values. We can define higher level functions on mapped classes that produce SQL expressions at the class level, and Python expression evaluation at the instance level. Below, each function decorated with :class:`.hybrid_method` or :class:`.hybrid_property` may receive ``self`` as an instance of the class, or as the class itself:: from sqlalchemy import Column, Integer from sqlalchemy.ext.declarative import declarative_base from sqlalchemy.orm import Session, aliased from sqlalchemy.ext.hybrid import hybrid_property, hybrid_method Base = declarative_base() class Interval(Base): __tablename__ = 'interval' id = Column(Integer, primary_key=True) start = Column(Integer, nullable=False) end = Column(Integer, nullable=False) def __init__(self, start, end): self.start = start self.end = end @hybrid_property def length(self): return self.end - self.start @hybrid_method def contains(self,point): return (self.start <= point) & (point < self.end) @hybrid_method def intersects(self, other): return self.contains(other.start) | self.contains(other.end) Above, the ``length`` property returns the difference between the ``end`` and ``start`` attributes. With an instance of ``Interval``, this subtraction occurs in Python, using normal Python descriptor mechanics:: >>> i1 = Interval(5, 10) >>> i1.length 5 When dealing with the ``Interval`` class itself, the :class:`.hybrid_property` descriptor evaluates the function body given the ``Interval`` class as the argument, which when evaluated with SQLAlchemy expression mechanics returns a new SQL expression:: >>> print Interval.length interval."end" - interval.start >>> print Session().query(Interval).filter(Interval.length > 10) SELECT interval.id AS interval_id, interval.start AS interval_start, interval."end" AS interval_end FROM interval WHERE interval."end" - interval.start > :param_1 ORM methods such as :meth:`~.Query.filter_by` generally use ``getattr()`` to locate attributes, so can also be used with hybrid attributes:: >>> print Session().query(Interval).filter_by(length=5) SELECT interval.id AS interval_id, interval.start AS interval_start, interval."end" AS interval_end FROM interval WHERE interval."end" - interval.start = :param_1 The ``Interval`` class example also illustrates two methods, ``contains()`` and ``intersects()``, decorated with :class:`.hybrid_method`. This decorator applies the same idea to methods that :class:`.hybrid_property` applies to attributes. The methods return boolean values, and take advantage of the Python ``|`` and ``&`` bitwise operators to produce equivalent instance-level and SQL expression-level boolean behavior:: >>> i1.contains(6) True >>> i1.contains(15) False >>> i1.intersects(Interval(7, 18)) True >>> i1.intersects(Interval(25, 29)) False >>> print Session().query(Interval).filter(Interval.contains(15)) SELECT interval.id AS interval_id, interval.start AS interval_start, interval."end" AS interval_end FROM interval WHERE interval.start <= :start_1 AND interval."end" > :end_1 >>> ia = aliased(Interval) >>> print Session().query(Interval, ia).filter(Interval.intersects(ia)) SELECT interval.id AS interval_id, interval.start AS interval_start, interval."end" AS interval_end, interval_1.id AS interval_1_id, interval_1.start AS interval_1_start, interval_1."end" AS interval_1_end FROM interval, interval AS interval_1 WHERE interval.start <= interval_1.start AND interval."end" > interval_1.start OR interval.start <= interval_1."end" AND interval."end" > interval_1."end" Defining Expression Behavior Distinct from Attribute Behavior -------------------------------------------------------------- Our usage of the ``&`` and ``|`` bitwise operators above was fortunate, considering our functions operated on two boolean values to return a new one. In many cases, the construction of an in-Python function and a SQLAlchemy SQL expression have enough differences that two separate Python expressions should be defined. The :mod:`~sqlalchemy.ext.hybrid` decorators define the :meth:`.hybrid_property.expression` modifier for this purpose. As an example we'll define the radius of the interval, which requires the usage of the absolute value function:: from sqlalchemy import func class Interval(object): # ... @hybrid_property def radius(self): return abs(self.length) / 2 @radius.expression def radius(cls): return func.abs(cls.length) / 2 Above the Python function ``abs()`` is used for instance-level operations, the SQL function ``ABS()`` is used via the :attr:`.func` object for class-level expressions:: >>> i1.radius 2 >>> print Session().query(Interval).filter(Interval.radius > 5) SELECT interval.id AS interval_id, interval.start AS interval_start, interval."end" AS interval_end FROM interval WHERE abs(interval."end" - interval.start) / :abs_1 > :param_1 Defining Setters ---------------- Hybrid properties can also define setter methods. If we wanted ``length`` above, when set, to modify the endpoint value:: class Interval(object): # ... @hybrid_property def length(self): return self.end - self.start @length.setter def length(self, value): self.end = self.start + value The ``length(self, value)`` method is now called upon set:: >>> i1 = Interval(5, 10) >>> i1.length 5 >>> i1.length = 12 >>> i1.end 17 Working with Relationships -------------------------- There's no essential difference when creating hybrids that work with related objects as opposed to column-based data. The need for distinct expressions tends to be greater. Two variants of we'll illustrate are the "join-dependent" hybrid, and the "correlated subquery" hybrid. Join-Dependent Relationship Hybrid ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ Consider the following declarative mapping which relates a ``User`` to a ``SavingsAccount``:: from sqlalchemy import Column, Integer, ForeignKey, Numeric, String from sqlalchemy.orm import relationship from sqlalchemy.ext.declarative import declarative_base from sqlalchemy.ext.hybrid import hybrid_property Base = declarative_base() class SavingsAccount(Base): __tablename__ = 'account' id = Column(Integer, primary_key=True) user_id = Column(Integer, ForeignKey('user.id'), nullable=False) balance = Column(Numeric(15, 5)) class User(Base): __tablename__ = 'user' id = Column(Integer, primary_key=True) name = Column(String(100), nullable=False) accounts = relationship("SavingsAccount", backref="owner") @hybrid_property def balance(self): if self.accounts: return self.accounts[0].balance else: return None @balance.setter def balance(self, value): if not self.accounts: account = Account(owner=self) else: account = self.accounts[0] account.balance = value @balance.expression def balance(cls): return SavingsAccount.balance The above hybrid property ``balance`` works with the first ``SavingsAccount`` entry in the list of accounts for this user. The in-Python getter/setter methods can treat ``accounts`` as a Python list available on ``self``. However, at the expression level, it's expected that the ``User`` class will be used in an appropriate context such that an appropriate join to ``SavingsAccount`` will be present:: >>> print Session().query(User, User.balance).\\ ... join(User.accounts).filter(User.balance > 5000) SELECT "user".id AS user_id, "user".name AS user_name, account.balance AS account_balance FROM "user" JOIN account ON "user".id = account.user_id WHERE account.balance > :balance_1 Note however, that while the instance level accessors need to worry about whether ``self.accounts`` is even present, this issue expresses itself differently at the SQL expression level, where we basically would use an outer join:: >>> from sqlalchemy import or_ >>> print (Session().query(User, User.balance).outerjoin(User.accounts). ... filter(or_(User.balance < 5000, User.balance == None))) SELECT "user".id AS user_id, "user".name AS user_name, account.balance AS account_balance FROM "user" LEFT OUTER JOIN account ON "user".id = account.user_id WHERE account.balance < :balance_1 OR account.balance IS NULL Correlated Subquery Relationship Hybrid ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ We can, of course, forego being dependent on the enclosing query's usage of joins in favor of the correlated subquery, which can portably be packed into a single column expression. A correlated subquery is more portable, but often performs more poorly at the SQL level. Using the same technique illustrated at :ref:`mapper_column_property_sql_expressions`, we can adjust our ``SavingsAccount`` example to aggregate the balances for *all* accounts, and use a correlated subquery for the column expression:: from sqlalchemy import Column, Integer, ForeignKey, Numeric, String from sqlalchemy.orm import relationship from sqlalchemy.ext.declarative import declarative_base from sqlalchemy.ext.hybrid import hybrid_property from sqlalchemy import select, func Base = declarative_base() class SavingsAccount(Base): __tablename__ = 'account' id = Column(Integer, primary_key=True) user_id = Column(Integer, ForeignKey('user.id'), nullable=False) balance = Column(Numeric(15, 5)) class User(Base): __tablename__ = 'user' id = Column(Integer, primary_key=True) name = Column(String(100), nullable=False) accounts = relationship("SavingsAccount", backref="owner") @hybrid_property def balance(self): return sum(acc.balance for acc in self.accounts) @balance.expression def balance(cls): return select([func.sum(SavingsAccount.balance)]).\\ where(SavingsAccount.user_id==cls.id).\\ label('total_balance') The above recipe will give us the ``balance`` column which renders a correlated SELECT:: >>> print s.query(User).filter(User.balance > 400) SELECT "user".id AS user_id, "user".name AS user_name FROM "user" WHERE (SELECT sum(account.balance) AS sum_1 FROM account WHERE account.user_id = "user".id) > :param_1 .. _hybrid_custom_comparators: Building Custom Comparators --------------------------- The hybrid property also includes a helper that allows construction of custom comparators. A comparator object allows one to customize the behavior of each SQLAlchemy expression operator individually. They are useful when creating custom types that have some highly idiosyncratic behavior on the SQL side. The example class below allows case-insensitive comparisons on the attribute named ``word_insensitive``:: from sqlalchemy.ext.hybrid import Comparator, hybrid_property from sqlalchemy import func, Column, Integer, String from sqlalchemy.orm import Session from sqlalchemy.ext.declarative import declarative_base Base = declarative_base() class CaseInsensitiveComparator(Comparator): def __eq__(self, other): return func.lower(self.__clause_element__()) == func.lower(other) class SearchWord(Base): __tablename__ = 'searchword' id = Column(Integer, primary_key=True) word = Column(String(255), nullable=False) @hybrid_property def word_insensitive(self): return self.word.lower() @word_insensitive.comparator def word_insensitive(cls): return CaseInsensitiveComparator(cls.word) Above, SQL expressions against ``word_insensitive`` will apply the ``LOWER()`` SQL function to both sides:: >>> print Session().query(SearchWord).filter_by(word_insensitive="Trucks") SELECT searchword.id AS searchword_id, searchword.word AS searchword_word FROM searchword WHERE lower(searchword.word) = lower(:lower_1) The ``CaseInsensitiveComparator`` above implements part of the :class:`.ColumnOperators` interface. A "coercion" operation like lowercasing can be applied to all comparison operations (i.e. ``eq``, ``lt``, ``gt``, etc.) using :meth:`.Operators.operate`:: class CaseInsensitiveComparator(Comparator): def operate(self, op, other): return op(func.lower(self.__clause_element__()), func.lower(other)) Hybrid Value Objects -------------------- Note in our previous example, if we were to compare the ``word_insensitive`` attribute of a ``SearchWord`` instance to a plain Python string, the plain Python string would not be coerced to lower case - the ``CaseInsensitiveComparator`` we built, being returned by ``@word_insensitive.comparator``, only applies to the SQL side. A more comprehensive form of the custom comparator is to construct a *Hybrid Value Object*. This technique applies the target value or expression to a value object which is then returned by the accessor in all cases. The value object allows control of all operations upon the value as well as how compared values are treated, both on the SQL expression side as well as the Python value side. Replacing the previous ``CaseInsensitiveComparator`` class with a new ``CaseInsensitiveWord`` class:: class CaseInsensitiveWord(Comparator): "Hybrid value representing a lower case representation of a word." def __init__(self, word): if isinstance(word, basestring): self.word = word.lower() elif isinstance(word, CaseInsensitiveWord): self.word = word.word else: self.word = func.lower(word) def operate(self, op, other): if not isinstance(other, CaseInsensitiveWord): other = CaseInsensitiveWord(other) return op(self.word, other.word) def __clause_element__(self): return self.word def __str__(self): return self.word key = 'word' "Label to apply to Query tuple results" Above, the ``CaseInsensitiveWord`` object represents ``self.word``, which may be a SQL function, or may be a Python native. By overriding ``operate()`` and ``__clause_element__()`` to work in terms of ``self.word``, all comparison operations will work against the "converted" form of ``word``, whether it be SQL side or Python side. Our ``SearchWord`` class can now deliver the ``CaseInsensitiveWord`` object unconditionally from a single hybrid call:: class SearchWord(Base): __tablename__ = 'searchword' id = Column(Integer, primary_key=True) word = Column(String(255), nullable=False) @hybrid_property def word_insensitive(self): return CaseInsensitiveWord(self.word) The ``word_insensitive`` attribute now has case-insensitive comparison behavior universally, including SQL expression vs. Python expression (note the Python value is converted to lower case on the Python side here):: >>> print Session().query(SearchWord).filter_by(word_insensitive="Trucks") SELECT searchword.id AS searchword_id, searchword.word AS searchword_word FROM searchword WHERE lower(searchword.word) = :lower_1 SQL expression versus SQL expression:: >>> sw1 = aliased(SearchWord) >>> sw2 = aliased(SearchWord) >>> print Session().query( ... sw1.word_insensitive, ... sw2.word_insensitive).\\ ... filter( ... sw1.word_insensitive > sw2.word_insensitive ... ) SELECT lower(searchword_1.word) AS lower_1, lower(searchword_2.word) AS lower_2 FROM searchword AS searchword_1, searchword AS searchword_2 WHERE lower(searchword_1.word) > lower(searchword_2.word) Python only expression:: >>> ws1 = SearchWord(word="SomeWord") >>> ws1.word_insensitive == "sOmEwOrD" True >>> ws1.word_insensitive == "XOmEwOrX" False >>> print ws1.word_insensitive someword The Hybrid Value pattern is very useful for any kind of value that may have multiple representations, such as timestamps, time deltas, units of measurement, currencies and encrypted passwords. .. seealso:: `Hybrids and Value Agnostic Types `_ - on the techspot.zzzeek.org blog `Value Agnostic Types, Part II `_ - on the techspot.zzzeek.org blog .. _hybrid_transformers: Building Transformers ---------------------- A *transformer* is an object which can receive a :class:`.Query` object and return a new one. The :class:`.Query` object includes a method :meth:`.with_transformation` that returns a new :class:`.Query` transformed by the given function. We can combine this with the :class:`.Comparator` class to produce one type of recipe which can both set up the FROM clause of a query as well as assign filtering criterion. Consider a mapped class ``Node``, which assembles using adjacency list into a hierarchical tree pattern:: from sqlalchemy import Column, Integer, ForeignKey from sqlalchemy.orm import relationship from sqlalchemy.ext.declarative import declarative_base Base = declarative_base() class Node(Base): __tablename__ = 'node' id =Column(Integer, primary_key=True) parent_id = Column(Integer, ForeignKey('node.id')) parent = relationship("Node", remote_side=id) Suppose we wanted to add an accessor ``grandparent``. This would return the ``parent`` of ``Node.parent``. When we have an instance of ``Node``, this is simple:: from sqlalchemy.ext.hybrid import hybrid_property class Node(Base): # ... @hybrid_property def grandparent(self): return self.parent.parent For the expression, things are not so clear. We'd need to construct a :class:`.Query` where we :meth:`~.Query.join` twice along ``Node.parent`` to get to the ``grandparent``. We can instead return a transforming callable that we'll combine with the :class:`.Comparator` class to receive any :class:`.Query` object, and return a new one that's joined to the ``Node.parent`` attribute and filtered based on the given criterion:: from sqlalchemy.ext.hybrid import Comparator class GrandparentTransformer(Comparator): def operate(self, op, other): def transform(q): cls = self.__clause_element__() parent_alias = aliased(cls) return q.join(parent_alias, cls.parent).\\ filter(op(parent_alias.parent, other)) return transform Base = declarative_base() class Node(Base): __tablename__ = 'node' id =Column(Integer, primary_key=True) parent_id = Column(Integer, ForeignKey('node.id')) parent = relationship("Node", remote_side=id) @hybrid_property def grandparent(self): return self.parent.parent @grandparent.comparator def grandparent(cls): return GrandparentTransformer(cls) The ``GrandparentTransformer`` overrides the core :meth:`.Operators.operate` method at the base of the :class:`.Comparator` hierarchy to return a query-transforming callable, which then runs the given comparison operation in a particular context. Such as, in the example above, the ``operate`` method is called, given the :attr:`.Operators.eq` callable as well as the right side of the comparison ``Node(id=5)``. A function ``transform`` is then returned which will transform a :class:`.Query` first to join to ``Node.parent``, then to compare ``parent_alias`` using :attr:`.Operators.eq` against the left and right sides, passing into :class:`.Query.filter`: .. sourcecode:: pycon+sql >>> from sqlalchemy.orm import Session >>> session = Session() {sql}>>> session.query(Node).\\ ... with_transformation(Node.grandparent==Node(id=5)).\\ ... all() SELECT node.id AS node_id, node.parent_id AS node_parent_id FROM node JOIN node AS node_1 ON node_1.id = node.parent_id WHERE :param_1 = node_1.parent_id {stop} We can modify the pattern to be more verbose but flexible by separating the "join" step from the "filter" step. The tricky part here is ensuring that successive instances of ``GrandparentTransformer`` use the same :class:`.AliasedClass` object against ``Node``. Below we use a simple memoizing approach that associates a ``GrandparentTransformer`` with each class:: class Node(Base): # ... @grandparent.comparator def grandparent(cls): # memoize a GrandparentTransformer # per class if '_gp' not in cls.__dict__: cls._gp = GrandparentTransformer(cls) return cls._gp class GrandparentTransformer(Comparator): def __init__(self, cls): self.parent_alias = aliased(cls) @property def join(self): def go(q): return q.join(self.parent_alias, Node.parent) return go def operate(self, op, other): return op(self.parent_alias.parent, other) .. sourcecode:: pycon+sql {sql}>>> session.query(Node).\\ ... with_transformation(Node.grandparent.join).\\ ... filter(Node.grandparent==Node(id=5)) SELECT node.id AS node_id, node.parent_id AS node_parent_id FROM node JOIN node AS node_1 ON node_1.id = node.parent_id WHERE :param_1 = node_1.parent_id {stop} The "transformer" pattern is an experimental pattern that starts to make usage of some functional programming paradigms. While it's only recommended for advanced and/or patient developers, there's probably a whole lot of amazing things it can be used for. """ from .. import util from ..orm import attributes, interfaces HYBRID_METHOD = util.symbol('HYBRID_METHOD') """Symbol indicating an :class:`_InspectionAttr` that's of type :class:`.hybrid_method`. Is assigned to the :attr:`._InspectionAttr.extension_type` attibute. .. seealso:: :attr:`.Mapper.all_orm_attributes` """ HYBRID_PROPERTY = util.symbol('HYBRID_PROPERTY') """Symbol indicating an :class:`_InspectionAttr` that's of type :class:`.hybrid_method`. Is assigned to the :attr:`._InspectionAttr.extension_type` attibute. .. seealso:: :attr:`.Mapper.all_orm_attributes` """ class hybrid_method(interfaces._InspectionAttr): """A decorator which allows definition of a Python object method with both instance-level and class-level behavior. """ is_attribute = True extension_type = HYBRID_METHOD def __init__(self, func, expr=None): """Create a new :class:`.hybrid_method`. Usage is typically via decorator:: from sqlalchemy.ext.hybrid import hybrid_method class SomeClass(object): @hybrid_method def value(self, x, y): return self._value + x + y @value.expression def value(self, x, y): return func.some_function(self._value, x, y) """ self.func = func self.expr = expr or func def __get__(self, instance, owner): if instance is None: return self.expr.__get__(owner, owner.__class__) else: return self.func.__get__(instance, owner) def expression(self, expr): """Provide a modifying decorator that defines a SQL-expression producing method.""" self.expr = expr return self class hybrid_property(interfaces._InspectionAttr): """A decorator which allows definition of a Python descriptor with both instance-level and class-level behavior. """ is_attribute = True extension_type = HYBRID_PROPERTY def __init__(self, fget, fset=None, fdel=None, expr=None): """Create a new :class:`.hybrid_property`. Usage is typically via decorator:: from sqlalchemy.ext.hybrid import hybrid_property class SomeClass(object): @hybrid_property def value(self): return self._value @value.setter def value(self, value): self._value = value """ self.fget = fget self.fset = fset self.fdel = fdel self.expr = expr or fget util.update_wrapper(self, fget) def __get__(self, instance, owner): if instance is None: return self.expr(owner) else: return self.fget(instance) def __set__(self, instance, value): if self.fset is None: raise AttributeError("can't set attribute") self.fset(instance, value) def __delete__(self, instance): if self.fdel is None: raise AttributeError("can't delete attribute") self.fdel(instance) def setter(self, fset): """Provide a modifying decorator that defines a value-setter method.""" self.fset = fset return self def deleter(self, fdel): """Provide a modifying decorator that defines a value-deletion method.""" self.fdel = fdel return self def expression(self, expr): """Provide a modifying decorator that defines a SQL-expression producing method.""" self.expr = expr return self def comparator(self, comparator): """Provide a modifying decorator that defines a custom comparator producing method. The return value of the decorated method should be an instance of :class:`~.hybrid.Comparator`. """ proxy_attr = attributes.\ create_proxied_attribute(self) def expr(owner): return proxy_attr(owner, self.__name__, self, comparator(owner)) self.expr = expr return self class Comparator(interfaces.PropComparator): """A helper class that allows easy construction of custom :class:`~.orm.interfaces.PropComparator` classes for usage with hybrids.""" property = None def __init__(self, expression): self.expression = expression def __clause_element__(self): expr = self.expression while hasattr(expr, '__clause_element__'): expr = expr.__clause_element__() return expr def adapted(self, adapter): # interesting.... return self SQLAlchemy-0.8.4/lib/sqlalchemy/ext/instrumentation.py0000644000076500000240000003435712251150015023562 0ustar classicstaff00000000000000"""Extensible class instrumentation. The :mod:`sqlalchemy.ext.instrumentation` package provides for alternate systems of class instrumentation within the ORM. Class instrumentation refers to how the ORM places attributes on the class which maintain data and track changes to that data, as well as event hooks installed on the class. .. note:: The extension package is provided for the benefit of integration with other object management packages, which already perform their own instrumentation. It is not intended for general use. For examples of how the instrumentation extension is used, see the example :ref:`examples_instrumentation`. .. versionchanged:: 0.8 The :mod:`sqlalchemy.orm.instrumentation` was split out so that all functionality having to do with non-standard instrumentation was moved out to :mod:`sqlalchemy.ext.instrumentation`. When imported, the module installs itself within :mod:`sqlalchemy.orm.instrumentation` so that it takes effect, including recognition of ``__sa_instrumentation_manager__`` on mapped classes, as well :attr:`.instrumentation_finders` being used to determine class instrumentation resolution. """ from ..orm import instrumentation as orm_instrumentation from ..orm.instrumentation import ( ClassManager, InstrumentationFactory, _default_state_getter, _default_dict_getter, _default_manager_getter ) from ..orm import attributes, collections from .. import util from ..orm import exc as orm_exc import weakref INSTRUMENTATION_MANAGER = '__sa_instrumentation_manager__' """Attribute, elects custom instrumentation when present on a mapped class. Allows a class to specify a slightly or wildly different technique for tracking changes made to mapped attributes and collections. Only one instrumentation implementation is allowed in a given object inheritance hierarchy. The value of this attribute must be a callable and will be passed a class object. The callable must return one of: - An instance of an InstrumentationManager or subclass - An object implementing all or some of InstrumentationManager (TODO) - A dictionary of callables, implementing all or some of the above (TODO) - An instance of a ClassManager or subclass This attribute is consulted by SQLAlchemy instrumentation resolution, once the :mod:`sqlalchemy.ext.instrumentation` module has been imported. If custom finders are installed in the global instrumentation_finders list, they may or may not choose to honor this attribute. """ def find_native_user_instrumentation_hook(cls): """Find user-specified instrumentation management for a class.""" return getattr(cls, INSTRUMENTATION_MANAGER, None) instrumentation_finders = [find_native_user_instrumentation_hook] """An extensible sequence of callables which return instrumentation implementations When a class is registered, each callable will be passed a class object. If None is returned, the next finder in the sequence is consulted. Otherwise the return must be an instrumentation factory that follows the same guidelines as sqlalchemy.ext.instrumentation.INSTRUMENTATION_MANAGER. By default, the only finder is find_native_user_instrumentation_hook, which searches for INSTRUMENTATION_MANAGER. If all finders return None, standard ClassManager instrumentation is used. """ class ExtendedInstrumentationRegistry(InstrumentationFactory): """Extends :class:`.InstrumentationFactory` with additional bookkeeping, to accommodate multiple types of class managers. """ _manager_finders = weakref.WeakKeyDictionary() _state_finders = weakref.WeakKeyDictionary() _dict_finders = weakref.WeakKeyDictionary() _extended = False def _locate_extended_factory(self, class_): for finder in instrumentation_finders: factory = finder(class_) if factory is not None: manager = self._extended_class_manager(class_, factory) return manager, factory else: return None, None def _check_conflicts(self, class_, factory): existing_factories = self._collect_management_factories_for(class_).\ difference([factory]) if existing_factories: raise TypeError( "multiple instrumentation implementations specified " "in %s inheritance hierarchy: %r" % ( class_.__name__, list(existing_factories))) def _extended_class_manager(self, class_, factory): manager = factory(class_) if not isinstance(manager, ClassManager): manager = _ClassInstrumentationAdapter(class_, manager) if factory != ClassManager and not self._extended: # somebody invoked a custom ClassManager. # reinstall global "getter" functions with the more # expensive ones. self._extended = True _install_instrumented_lookups() self._manager_finders[class_] = manager.manager_getter() self._state_finders[class_] = manager.state_getter() self._dict_finders[class_] = manager.dict_getter() return manager def _collect_management_factories_for(self, cls): """Return a collection of factories in play or specified for a hierarchy. Traverses the entire inheritance graph of a cls and returns a collection of instrumentation factories for those classes. Factories are extracted from active ClassManagers, if available, otherwise instrumentation_finders is consulted. """ hierarchy = util.class_hierarchy(cls) factories = set() for member in hierarchy: manager = self.manager_of_class(member) if manager is not None: factories.add(manager.factory) else: for finder in instrumentation_finders: factory = finder(member) if factory is not None: break else: factory = None factories.add(factory) factories.discard(None) return factories def unregister(self, class_): if class_ in self._manager_finders: del self._manager_finders[class_] del self._state_finders[class_] del self._dict_finders[class_] super(ExtendedInstrumentationRegistry, self).unregister(class_) def manager_of_class(self, cls): if cls is None: return None return self._manager_finders.get(cls, _default_manager_getter)(cls) def state_of(self, instance): if instance is None: raise AttributeError("None has no persistent state.") return self._state_finders.get( instance.__class__, _default_state_getter)(instance) def dict_of(self, instance): if instance is None: raise AttributeError("None has no persistent state.") return self._dict_finders.get( instance.__class__, _default_dict_getter)(instance) orm_instrumentation._instrumentation_factory = \ _instrumentation_factory = ExtendedInstrumentationRegistry() orm_instrumentation.instrumentation_finders = instrumentation_finders class InstrumentationManager(object): """User-defined class instrumentation extension. :class:`.InstrumentationManager` can be subclassed in order to change how class instrumentation proceeds. This class exists for the purposes of integration with other object management frameworks which would like to entirely modify the instrumentation methodology of the ORM, and is not intended for regular usage. For interception of class instrumentation events, see :class:`.InstrumentationEvents`. The API for this class should be considered as semi-stable, and may change slightly with new releases. .. versionchanged:: 0.8 :class:`.InstrumentationManager` was moved from :mod:`sqlalchemy.orm.instrumentation` to :mod:`sqlalchemy.ext.instrumentation`. """ # r4361 added a mandatory (cls) constructor to this interface. # given that, perhaps class_ should be dropped from all of these # signatures. def __init__(self, class_): pass def manage(self, class_, manager): setattr(class_, '_default_class_manager', manager) def dispose(self, class_, manager): delattr(class_, '_default_class_manager') def manager_getter(self, class_): def get(cls): return cls._default_class_manager return get def instrument_attribute(self, class_, key, inst): pass def post_configure_attribute(self, class_, key, inst): pass def install_descriptor(self, class_, key, inst): setattr(class_, key, inst) def uninstall_descriptor(self, class_, key): delattr(class_, key) def install_member(self, class_, key, implementation): setattr(class_, key, implementation) def uninstall_member(self, class_, key): delattr(class_, key) def instrument_collection_class(self, class_, key, collection_class): return collections.prepare_instrumentation(collection_class) def get_instance_dict(self, class_, instance): return instance.__dict__ def initialize_instance_dict(self, class_, instance): pass def install_state(self, class_, instance, state): setattr(instance, '_default_state', state) def remove_state(self, class_, instance): delattr(instance, '_default_state') def state_getter(self, class_): return lambda instance: getattr(instance, '_default_state') def dict_getter(self, class_): return lambda inst: self.get_instance_dict(class_, inst) class _ClassInstrumentationAdapter(ClassManager): """Adapts a user-defined InstrumentationManager to a ClassManager.""" def __init__(self, class_, override): self._adapted = override self._get_state = self._adapted.state_getter(class_) self._get_dict = self._adapted.dict_getter(class_) ClassManager.__init__(self, class_) def manage(self): self._adapted.manage(self.class_, self) def dispose(self): self._adapted.dispose(self.class_) def manager_getter(self): return self._adapted.manager_getter(self.class_) def instrument_attribute(self, key, inst, propagated=False): ClassManager.instrument_attribute(self, key, inst, propagated) if not propagated: self._adapted.instrument_attribute(self.class_, key, inst) def post_configure_attribute(self, key): super(_ClassInstrumentationAdapter, self).post_configure_attribute(key) self._adapted.post_configure_attribute(self.class_, key, self[key]) def install_descriptor(self, key, inst): self._adapted.install_descriptor(self.class_, key, inst) def uninstall_descriptor(self, key): self._adapted.uninstall_descriptor(self.class_, key) def install_member(self, key, implementation): self._adapted.install_member(self.class_, key, implementation) def uninstall_member(self, key): self._adapted.uninstall_member(self.class_, key) def instrument_collection_class(self, key, collection_class): return self._adapted.instrument_collection_class( self.class_, key, collection_class) def initialize_collection(self, key, state, factory): delegate = getattr(self._adapted, 'initialize_collection', None) if delegate: return delegate(key, state, factory) else: return ClassManager.initialize_collection(self, key, state, factory) def new_instance(self, state=None): instance = self.class_.__new__(self.class_) self.setup_instance(instance, state) return instance def _new_state_if_none(self, instance): """Install a default InstanceState if none is present. A private convenience method used by the __init__ decorator. """ if self.has_state(instance): return False else: return self.setup_instance(instance) def setup_instance(self, instance, state=None): self._adapted.initialize_instance_dict(self.class_, instance) if state is None: state = self._state_constructor(instance, self) # the given instance is assumed to have no state self._adapted.install_state(self.class_, instance, state) return state def teardown_instance(self, instance): self._adapted.remove_state(self.class_, instance) def has_state(self, instance): try: self._get_state(instance) except orm_exc.NO_STATE: return False else: return True def state_getter(self): return self._get_state def dict_getter(self): return self._get_dict def _install_instrumented_lookups(): """Replace global class/object management functions with ExtendedInstrumentationRegistry implementations, which allow multiple types of class managers to be present, at the cost of performance. This function is called only by ExtendedInstrumentationRegistry and unit tests specific to this behavior. The _reinstall_default_lookups() function can be called after this one to re-establish the default functions. """ _install_lookups( dict( instance_state=_instrumentation_factory.state_of, instance_dict=_instrumentation_factory.dict_of, manager_of_class=_instrumentation_factory.manager_of_class ) ) def _reinstall_default_lookups(): """Restore simplified lookups.""" _install_lookups( dict( instance_state=_default_state_getter, instance_dict=_default_dict_getter, manager_of_class=_default_manager_getter ) ) def _install_lookups(lookups): global instance_state, instance_dict, manager_of_class instance_state = lookups['instance_state'] instance_dict = lookups['instance_dict'] manager_of_class = lookups['manager_of_class'] attributes.instance_state = \ orm_instrumentation.instance_state = instance_state attributes.instance_dict = \ orm_instrumentation.instance_dict = instance_dict attributes.manager_of_class = \ orm_instrumentation.manager_of_class = manager_of_class SQLAlchemy-0.8.4/lib/sqlalchemy/ext/mutable.py0000644000076500000240000005457412251150015021753 0ustar classicstaff00000000000000# ext/mutable.py # Copyright (C) 2005-2013 the SQLAlchemy authors and contributors # # This module is part of SQLAlchemy and is released under # the MIT License: http://www.opensource.org/licenses/mit-license.php """Provide support for tracking of in-place changes to scalar values, which are propagated into ORM change events on owning parent objects. .. versionadded:: 0.7 :mod:`sqlalchemy.ext.mutable` replaces SQLAlchemy's legacy approach to in-place mutations of scalar values; see :ref:`07_migration_mutation_extension`. .. _mutable_scalars: Establishing Mutability on Scalar Column Values =============================================== A typical example of a "mutable" structure is a Python dictionary. Following the example introduced in :ref:`types_toplevel`, we begin with a custom type that marshals Python dictionaries into JSON strings before being persisted:: from sqlalchemy.types import TypeDecorator, VARCHAR import json class JSONEncodedDict(TypeDecorator): "Represents an immutable structure as a json-encoded string." impl = VARCHAR def process_bind_param(self, value, dialect): if value is not None: value = json.dumps(value) return value def process_result_value(self, value, dialect): if value is not None: value = json.loads(value) return value The usage of ``json`` is only for the purposes of example. The :mod:`sqlalchemy.ext.mutable` extension can be used with any type whose target Python type may be mutable, including :class:`.PickleType`, :class:`.postgresql.ARRAY`, etc. When using the :mod:`sqlalchemy.ext.mutable` extension, the value itself tracks all parents which reference it. Below, we illustrate the a simple version of the :class:`.MutableDict` dictionary object, which applies the :class:`.Mutable` mixin to a plain Python dictionary:: import collections from sqlalchemy.ext.mutable import Mutable class MutableDict(Mutable, dict): @classmethod def coerce(cls, key, value): "Convert plain dictionaries to MutableDict." if not isinstance(value, MutableDict): if isinstance(value, dict): return MutableDict(value) # this call will raise ValueError return Mutable.coerce(key, value) else: return value def __setitem__(self, key, value): "Detect dictionary set events and emit change events." dict.__setitem__(self, key, value) self.changed() def __delitem__(self, key): "Detect dictionary del events and emit change events." dict.__delitem__(self, key) self.changed() The above dictionary class takes the approach of subclassing the Python built-in ``dict`` to produce a dict subclass which routes all mutation events through ``__setitem__``. There are variants on this approach, such as subclassing ``UserDict.UserDict`` or ``collections.MutableMapping``; the part that's important to this example is that the :meth:`.Mutable.changed` method is called whenever an in-place change to the datastructure takes place. We also redefine the :meth:`.Mutable.coerce` method which will be used to convert any values that are not instances of ``MutableDict``, such as the plain dictionaries returned by the ``json`` module, into the appropriate type. Defining this method is optional; we could just as well created our ``JSONEncodedDict`` such that it always returns an instance of ``MutableDict``, and additionally ensured that all calling code uses ``MutableDict`` explicitly. When :meth:`.Mutable.coerce` is not overridden, any values applied to a parent object which are not instances of the mutable type will raise a ``ValueError``. Our new ``MutableDict`` type offers a class method :meth:`~.Mutable.as_mutable` which we can use within column metadata to associate with types. This method grabs the given type object or class and associates a listener that will detect all future mappings of this type, applying event listening instrumentation to the mapped attribute. Such as, with classical table metadata:: from sqlalchemy import Table, Column, Integer my_data = Table('my_data', metadata, Column('id', Integer, primary_key=True), Column('data', MutableDict.as_mutable(JSONEncodedDict)) ) Above, :meth:`~.Mutable.as_mutable` returns an instance of ``JSONEncodedDict`` (if the type object was not an instance already), which will intercept any attributes which are mapped against this type. Below we establish a simple mapping against the ``my_data`` table:: from sqlalchemy import mapper class MyDataClass(object): pass # associates mutation listeners with MyDataClass.data mapper(MyDataClass, my_data) The ``MyDataClass.data`` member will now be notified of in place changes to its value. There's no difference in usage when using declarative:: from sqlalchemy.ext.declarative import declarative_base Base = declarative_base() class MyDataClass(Base): __tablename__ = 'my_data' id = Column(Integer, primary_key=True) data = Column(MutableDict.as_mutable(JSONEncodedDict)) Any in-place changes to the ``MyDataClass.data`` member will flag the attribute as "dirty" on the parent object:: >>> from sqlalchemy.orm import Session >>> sess = Session() >>> m1 = MyDataClass(data={'value1':'foo'}) >>> sess.add(m1) >>> sess.commit() >>> m1.data['value1'] = 'bar' >>> assert m1 in sess.dirty True The ``MutableDict`` can be associated with all future instances of ``JSONEncodedDict`` in one step, using :meth:`~.Mutable.associate_with`. This is similar to :meth:`~.Mutable.as_mutable` except it will intercept all occurrences of ``MutableDict`` in all mappings unconditionally, without the need to declare it individually:: MutableDict.associate_with(JSONEncodedDict) class MyDataClass(Base): __tablename__ = 'my_data' id = Column(Integer, primary_key=True) data = Column(JSONEncodedDict) Supporting Pickling -------------------- The key to the :mod:`sqlalchemy.ext.mutable` extension relies upon the placement of a ``weakref.WeakKeyDictionary`` upon the value object, which stores a mapping of parent mapped objects keyed to the attribute name under which they are associated with this value. ``WeakKeyDictionary`` objects are not picklable, due to the fact that they contain weakrefs and function callbacks. In our case, this is a good thing, since if this dictionary were picklable, it could lead to an excessively large pickle size for our value objects that are pickled by themselves outside of the context of the parent. The developer responsibility here is only to provide a ``__getstate__`` method that excludes the :meth:`~.MutableBase._parents` collection from the pickle stream:: class MyMutableType(Mutable): def __getstate__(self): d = self.__dict__.copy() d.pop('_parents', None) return d With our dictionary example, we need to return the contents of the dict itself (and also restore them on __setstate__):: class MutableDict(Mutable, dict): # .... def __getstate__(self): return dict(self) def __setstate__(self, state): self.update(state) In the case that our mutable value object is pickled as it is attached to one or more parent objects that are also part of the pickle, the :class:`.Mutable` mixin will re-establish the :attr:`.Mutable._parents` collection on each value object as the owning parents themselves are unpickled. .. _mutable_composites: Establishing Mutability on Composites ===================================== Composites are a special ORM feature which allow a single scalar attribute to be assigned an object value which represents information "composed" from one or more columns from the underlying mapped table. The usual example is that of a geometric "point", and is introduced in :ref:`mapper_composite`. .. versionchanged:: 0.7 The internals of :func:`.orm.composite` have been greatly simplified and in-place mutation detection is no longer enabled by default; instead, the user-defined value must detect changes on its own and propagate them to all owning parents. The :mod:`sqlalchemy.ext.mutable` extension provides the helper class :class:`.MutableComposite`, which is a slight variant on the :class:`.Mutable` class. As is the case with :class:`.Mutable`, the user-defined composite class subclasses :class:`.MutableComposite` as a mixin, and detects and delivers change events to its parents via the :meth:`.MutableComposite.changed` method. In the case of a composite class, the detection is usually via the usage of Python descriptors (i.e. ``@property``), or alternatively via the special Python method ``__setattr__()``. Below we expand upon the ``Point`` class introduced in :ref:`mapper_composite` to subclass :class:`.MutableComposite` and to also route attribute set events via ``__setattr__`` to the :meth:`.MutableComposite.changed` method:: from sqlalchemy.ext.mutable import MutableComposite class Point(MutableComposite): def __init__(self, x, y): self.x = x self.y = y def __setattr__(self, key, value): "Intercept set events" # set the attribute object.__setattr__(self, key, value) # alert all parents to the change self.changed() def __composite_values__(self): return self.x, self.y def __eq__(self, other): return isinstance(other, Point) and \\ other.x == self.x and \\ other.y == self.y def __ne__(self, other): return not self.__eq__(other) The :class:`.MutableComposite` class uses a Python metaclass to automatically establish listeners for any usage of :func:`.orm.composite` that specifies our ``Point`` type. Below, when ``Point`` is mapped to the ``Vertex`` class, listeners are established which will route change events from ``Point`` objects to each of the ``Vertex.start`` and ``Vertex.end`` attributes:: from sqlalchemy.orm import composite, mapper from sqlalchemy import Table, Column vertices = Table('vertices', metadata, Column('id', Integer, primary_key=True), Column('x1', Integer), Column('y1', Integer), Column('x2', Integer), Column('y2', Integer), ) class Vertex(object): pass mapper(Vertex, vertices, properties={ 'start': composite(Point, vertices.c.x1, vertices.c.y1), 'end': composite(Point, vertices.c.x2, vertices.c.y2) }) Any in-place changes to the ``Vertex.start`` or ``Vertex.end`` members will flag the attribute as "dirty" on the parent object:: >>> from sqlalchemy.orm import Session >>> sess = Session() >>> v1 = Vertex(start=Point(3, 4), end=Point(12, 15)) >>> sess.add(v1) >>> sess.commit() >>> v1.end.x = 8 >>> assert v1 in sess.dirty True Coercing Mutable Composites --------------------------- The :meth:`.MutableBase.coerce` method is also supported on composite types. In the case of :class:`.MutableComposite`, the :meth:`.MutableBase.coerce` method is only called for attribute set operations, not load operations. Overriding the :meth:`.MutableBase.coerce` method is essentially equivalent to using a :func:`.validates` validation routine for all attributes which make use of the custom composite type:: class Point(MutableComposite): # other Point methods # ... def coerce(cls, key, value): if isinstance(value, tuple): value = Point(*value) elif not isinstance(value, Point): raise ValueError("tuple or Point expected") return value .. versionadded:: 0.7.10,0.8.0b2 Support for the :meth:`.MutableBase.coerce` method in conjunction with objects of type :class:`.MutableComposite`. Supporting Pickling -------------------- As is the case with :class:`.Mutable`, the :class:`.MutableComposite` helper class uses a ``weakref.WeakKeyDictionary`` available via the :meth:`.MutableBase._parents` attribute which isn't picklable. If we need to pickle instances of ``Point`` or its owning class ``Vertex``, we at least need to define a ``__getstate__`` that doesn't include the ``_parents`` dictionary. Below we define both a ``__getstate__`` and a ``__setstate__`` that package up the minimal form of our ``Point`` class:: class Point(MutableComposite): # ... def __getstate__(self): return self.x, self.y def __setstate__(self, state): self.x, self.y = state As with :class:`.Mutable`, the :class:`.MutableComposite` augments the pickling process of the parent's object-relational state so that the :meth:`.MutableBase._parents` collection is restored to all ``Point`` objects. """ from ..orm.attributes import flag_modified from .. import event, types from ..orm import mapper, object_mapper, Mapper from ..util import memoized_property import weakref class MutableBase(object): """Common base class to :class:`.Mutable` and :class:`.MutableComposite`. """ @memoized_property def _parents(self): """Dictionary of parent object->attribute name on the parent. This attribute is a so-called "memoized" property. It initializes itself with a new ``weakref.WeakKeyDictionary`` the first time it is accessed, returning the same object upon subsequent access. """ return weakref.WeakKeyDictionary() @classmethod def coerce(cls, key, value): """Given a value, coerce it into the target type. Can be overridden by custom subclasses to coerce incoming data into a particular type. By default, raises ``ValueError``. This method is called in different scenarios depending on if the parent class is of type :class:`.Mutable` or of type :class:`.MutableComposite`. In the case of the former, it is called for both attribute-set operations as well as during ORM loading operations. For the latter, it is only called during attribute-set operations; the mechanics of the :func:`.composite` construct handle coercion during load operations. :param key: string name of the ORM-mapped attribute being set. :param value: the incoming value. :return: the method should return the coerced value, or raise ``ValueError`` if the coercion cannot be completed. """ if value is None: return None msg = "Attribute '%s' does not accept objects of type %s" raise ValueError(msg % (key, type(value))) @classmethod def _listen_on_attribute(cls, attribute, coerce, parent_cls): """Establish this type as a mutation listener for the given mapped descriptor. """ key = attribute.key if parent_cls is not attribute.class_: return # rely on "propagate" here parent_cls = attribute.class_ def load(state, *args): """Listen for objects loaded or refreshed. Wrap the target data member's value with ``Mutable``. """ val = state.dict.get(key, None) if val is not None: if coerce: val = cls.coerce(key, val) state.dict[key] = val val._parents[state.obj()] = key def set(target, value, oldvalue, initiator): """Listen for set/replace events on the target data member. Establish a weak reference to the parent object on the incoming value, remove it for the one outgoing. """ if not isinstance(value, cls): value = cls.coerce(key, value) if value is not None: value._parents[target.obj()] = key if isinstance(oldvalue, cls): oldvalue._parents.pop(target.obj(), None) return value def pickle(state, state_dict): val = state.dict.get(key, None) if val is not None: if 'ext.mutable.values' not in state_dict: state_dict['ext.mutable.values'] = [] state_dict['ext.mutable.values'].append(val) def unpickle(state, state_dict): if 'ext.mutable.values' in state_dict: for val in state_dict['ext.mutable.values']: val._parents[state.obj()] = key event.listen(parent_cls, 'load', load, raw=True, propagate=True) event.listen(parent_cls, 'refresh', load, raw=True, propagate=True) event.listen(attribute, 'set', set, raw=True, retval=True, propagate=True) event.listen(parent_cls, 'pickle', pickle, raw=True, propagate=True) event.listen(parent_cls, 'unpickle', unpickle, raw=True, propagate=True) class Mutable(MutableBase): """Mixin that defines transparent propagation of change events to a parent object. See the example in :ref:`mutable_scalars` for usage information. """ def changed(self): """Subclasses should call this method whenever change events occur.""" for parent, key in self._parents.items(): flag_modified(parent, key) @classmethod def associate_with_attribute(cls, attribute): """Establish this type as a mutation listener for the given mapped descriptor. """ cls._listen_on_attribute(attribute, True, attribute.class_) @classmethod def associate_with(cls, sqltype): """Associate this wrapper with all future mapped columns of the given type. This is a convenience method that calls ``associate_with_attribute`` automatically. .. warning:: The listeners established by this method are *global* to all mappers, and are *not* garbage collected. Only use :meth:`.associate_with` for types that are permanent to an application, not with ad-hoc types else this will cause unbounded growth in memory usage. """ def listen_for_type(mapper, class_): for prop in mapper.column_attrs: if isinstance(prop.columns[0].type, sqltype): cls.associate_with_attribute(getattr(class_, prop.key)) event.listen(mapper, 'mapper_configured', listen_for_type) @classmethod def as_mutable(cls, sqltype): """Associate a SQL type with this mutable Python type. This establishes listeners that will detect ORM mappings against the given type, adding mutation event trackers to those mappings. The type is returned, unconditionally as an instance, so that :meth:`.as_mutable` can be used inline:: Table('mytable', metadata, Column('id', Integer, primary_key=True), Column('data', MyMutableType.as_mutable(PickleType)) ) Note that the returned type is always an instance, even if a class is given, and that only columns which are declared specifically with that type instance receive additional instrumentation. To associate a particular mutable type with all occurrences of a particular type, use the :meth:`.Mutable.associate_with` classmethod of the particular :meth:`.Mutable` subclass to establish a global association. .. warning:: The listeners established by this method are *global* to all mappers, and are *not* garbage collected. Only use :meth:`.as_mutable` for types that are permanent to an application, not with ad-hoc types else this will cause unbounded growth in memory usage. """ sqltype = types.to_instance(sqltype) def listen_for_type(mapper, class_): for prop in mapper.column_attrs: if prop.columns[0].type is sqltype: cls.associate_with_attribute(getattr(class_, prop.key)) event.listen(mapper, 'mapper_configured', listen_for_type) return sqltype class MutableComposite(MutableBase): """Mixin that defines transparent propagation of change events on a SQLAlchemy "composite" object to its owning parent or parents. See the example in :ref:`mutable_composites` for usage information. """ def changed(self): """Subclasses should call this method whenever change events occur.""" for parent, key in self._parents.items(): prop = object_mapper(parent).get_property(key) for value, attr_name in zip( self.__composite_values__(), prop._attribute_keys): setattr(parent, attr_name, value) def _setup_composite_listener(): import types def _listen_for_type(mapper, class_): for prop in mapper.iterate_properties: if (hasattr(prop, 'composite_class') and (type(prop.composite_class) in (types.ClassType, types.TypeType)) and issubclass(prop.composite_class, MutableComposite)): prop.composite_class._listen_on_attribute( getattr(class_, prop.key), False, class_) if not Mapper.dispatch.mapper_configured._contains(Mapper, _listen_for_type): event.listen(Mapper, 'mapper_configured', _listen_for_type) _setup_composite_listener() class MutableDict(Mutable, dict): """A dictionary type that implements :class:`.Mutable`. .. versionadded:: 0.8 """ def __setitem__(self, key, value): """Detect dictionary set events and emit change events.""" dict.__setitem__(self, key, value) self.changed() def __delitem__(self, key): """Detect dictionary del events and emit change events.""" dict.__delitem__(self, key) self.changed() def clear(self): dict.clear(self) self.changed() @classmethod def coerce(cls, key, value): """Convert plain dictionary to MutableDict.""" if not isinstance(value, MutableDict): if isinstance(value, dict): return MutableDict(value) return Mutable.coerce(key, value) else: return value def __getstate__(self): return dict(self) def __setstate__(self, state): self.update(state) SQLAlchemy-0.8.4/lib/sqlalchemy/ext/orderinglist.py0000644000076500000240000003145612251150015023021 0ustar classicstaff00000000000000# ext/orderinglist.py # Copyright (C) 2005-2013 the SQLAlchemy authors and contributors # # This module is part of SQLAlchemy and is released under # the MIT License: http://www.opensource.org/licenses/mit-license.php """A custom list that manages index/position information for contained elements. :author: Jason Kirtland ``orderinglist`` is a helper for mutable ordered relationships. It will intercept list operations performed on a :func:`.relationship`-managed collection and automatically synchronize changes in list position onto a target scalar attribute. Example: A ``slide`` table, where each row refers to zero or more entries in a related ``bullet`` table. The bullets within a slide are displayed in order based on the value of the ``position`` column in the ``bullet`` table. As entries are reordered in memory, the value of the ``position`` attribute should be updated to reflect the new sort order:: Base = declarative_base() class Slide(Base): __tablename__ = 'slide' id = Column(Integer, primary_key=True) name = Column(String) bullets = relationship("Bullet", order_by="Bullet.position") class Bullet(Base): __tablename__ = 'bullet' id = Column(Integer, primary_key=True) slide_id = Column(Integer, ForeignKey('slide.id')) position = Column(Integer) text = Column(String) The standard relationship mapping will produce a list-like attribute on each ``Slide`` containing all related ``Bullet`` objects, but coping with changes in ordering is not handled automatically. When appending a ``Bullet`` into ``Slide.bullets``, the ``Bullet.position`` attribute will remain unset until manually assigned. When the ``Bullet`` is inserted into the middle of the list, the following ``Bullet`` objects will also need to be renumbered. The :class:`.OrderingList` object automates this task, managing the ``position`` attribute on all ``Bullet`` objects in the collection. It is constructed using the :func:`.ordering_list` factory:: from sqlalchemy.ext.orderinglist import ordering_list Base = declarative_base() class Slide(Base): __tablename__ = 'slide' id = Column(Integer, primary_key=True) name = Column(String) bullets = relationship("Bullet", order_by="Bullet.position", collection_class=ordering_list('position')) class Bullet(Base): __tablename__ = 'bullet' id = Column(Integer, primary_key=True) slide_id = Column(Integer, ForeignKey('slide.id')) position = Column(Integer) text = Column(String) With the above mapping the ``Bullet.position`` attribute is managed:: s = Slide() s.bullets.append(Bullet()) s.bullets.append(Bullet()) s.bullets[1].position >>> 1 s.bullets.insert(1, Bullet()) s.bullets[2].position >>> 2 The :class:`.OrderingList` construct only works with **changes** to a collection, and not the initial load from the database, and requires that the list be sorted when loaded. Therefore, be sure to specify ``order_by`` on the :func:`.relationship` against the target ordering attribute, so that the ordering is correct when first loaded. .. warning:: :class:`.OrderingList` only provides limited functionality when a primary key column or unique column is the target of the sort. Since changing the order of entries often means that two rows must trade values, this is not possible when the value is constrained by a primary key or unique constraint, since one of the rows would temporarily have to point to a third available value so that the other row could take its old value. :class:`.OrderingList` doesn't do any of this for you, nor does SQLAlchemy itself. :func:`.ordering_list` takes the name of the related object's ordering attribute as an argument. By default, the zero-based integer index of the object's position in the :func:`.ordering_list` is synchronized with the ordering attribute: index 0 will get position 0, index 1 position 1, etc. To start numbering at 1 or some other integer, provide ``count_from=1``. """ from ..orm.collections import collection from .. import util __all__ = ['ordering_list'] def ordering_list(attr, count_from=None, **kw): """Prepares an :class:`OrderingList` factory for use in mapper definitions. Returns an object suitable for use as an argument to a Mapper relationship's ``collection_class`` option. e.g.:: from sqlalchemy.ext.orderinglist import ordering_list class Slide(Base): __tablename__ = 'slide' id = Column(Integer, primary_key=True) name = Column(String) bullets = relationship("Bullet", order_by="Bullet.position", collection_class=ordering_list('position')) :param attr: Name of the mapped attribute to use for storage and retrieval of ordering information :param count_from: Set up an integer-based ordering, starting at ``count_from``. For example, ``ordering_list('pos', count_from=1)`` would create a 1-based list in SQL, storing the value in the 'pos' column. Ignored if ``ordering_func`` is supplied. Additional arguments are passed to the :class:`.OrderingList` constructor. """ kw = _unsugar_count_from(count_from=count_from, **kw) return lambda: OrderingList(attr, **kw) # Ordering utility functions def count_from_0(index, collection): """Numbering function: consecutive integers starting at 0.""" return index def count_from_1(index, collection): """Numbering function: consecutive integers starting at 1.""" return index + 1 def count_from_n_factory(start): """Numbering function: consecutive integers starting at arbitrary start.""" def f(index, collection): return index + start try: f.__name__ = 'count_from_%i' % start except TypeError: pass return f def _unsugar_count_from(**kw): """Builds counting functions from keyword arguments. Keyword argument filter, prepares a simple ``ordering_func`` from a ``count_from`` argument, otherwise passes ``ordering_func`` on unchanged. """ count_from = kw.pop('count_from', None) if kw.get('ordering_func', None) is None and count_from is not None: if count_from == 0: kw['ordering_func'] = count_from_0 elif count_from == 1: kw['ordering_func'] = count_from_1 else: kw['ordering_func'] = count_from_n_factory(count_from) return kw class OrderingList(list): """A custom list that manages position information for its children. The :class:`.OrderingList` object is normally set up using the :func:`.ordering_list` factory function, used in conjunction with the :func:`.relationship` function. """ def __init__(self, ordering_attr=None, ordering_func=None, reorder_on_append=False): """A custom list that manages position information for its children. ``OrderingList`` is a ``collection_class`` list implementation that syncs position in a Python list with a position attribute on the mapped objects. This implementation relies on the list starting in the proper order, so be **sure** to put an ``order_by`` on your relationship. :param ordering_attr: Name of the attribute that stores the object's order in the relationship. :param ordering_func: Optional. A function that maps the position in the Python list to a value to store in the ``ordering_attr``. Values returned are usually (but need not be!) integers. An ``ordering_func`` is called with two positional parameters: the index of the element in the list, and the list itself. If omitted, Python list indexes are used for the attribute values. Two basic pre-built numbering functions are provided in this module: ``count_from_0`` and ``count_from_1``. For more exotic examples like stepped numbering, alphabetical and Fibonacci numbering, see the unit tests. :param reorder_on_append: Default False. When appending an object with an existing (non-None) ordering value, that value will be left untouched unless ``reorder_on_append`` is true. This is an optimization to avoid a variety of dangerous unexpected database writes. SQLAlchemy will add instances to the list via append() when your object loads. If for some reason the result set from the database skips a step in the ordering (say, row '1' is missing but you get '2', '3', and '4'), reorder_on_append=True would immediately renumber the items to '1', '2', '3'. If you have multiple sessions making changes, any of whom happen to load this collection even in passing, all of the sessions would try to "clean up" the numbering in their commits, possibly causing all but one to fail with a concurrent modification error. Recommend leaving this with the default of False, and just call ``reorder()`` if you're doing ``append()`` operations with previously ordered instances or when doing some housekeeping after manual sql operations. """ self.ordering_attr = ordering_attr if ordering_func is None: ordering_func = count_from_0 self.ordering_func = ordering_func self.reorder_on_append = reorder_on_append # More complex serialization schemes (multi column, e.g.) are possible by # subclassing and reimplementing these two methods. def _get_order_value(self, entity): return getattr(entity, self.ordering_attr) def _set_order_value(self, entity, value): setattr(entity, self.ordering_attr, value) def reorder(self): """Synchronize ordering for the entire collection. Sweeps through the list and ensures that each object has accurate ordering information set. """ for index, entity in enumerate(self): self._order_entity(index, entity, True) # As of 0.5, _reorder is no longer semi-private _reorder = reorder def _order_entity(self, index, entity, reorder=True): have = self._get_order_value(entity) # Don't disturb existing ordering if reorder is False if have is not None and not reorder: return should_be = self.ordering_func(index, self) if have != should_be: self._set_order_value(entity, should_be) def append(self, entity): super(OrderingList, self).append(entity) self._order_entity(len(self) - 1, entity, self.reorder_on_append) def _raw_append(self, entity): """Append without any ordering behavior.""" super(OrderingList, self).append(entity) _raw_append = collection.adds(1)(_raw_append) def insert(self, index, entity): super(OrderingList, self).insert(index, entity) self._reorder() def remove(self, entity): super(OrderingList, self).remove(entity) self._reorder() def pop(self, index=-1): entity = super(OrderingList, self).pop(index) self._reorder() return entity def __setitem__(self, index, entity): if isinstance(index, slice): step = index.step or 1 start = index.start or 0 if start < 0: start += len(self) stop = index.stop or len(self) if stop < 0: stop += len(self) for i in xrange(start, stop, step): self.__setitem__(i, entity[i]) else: self._order_entity(index, entity, True) super(OrderingList, self).__setitem__(index, entity) def __delitem__(self, index): super(OrderingList, self).__delitem__(index) self._reorder() # Py2K def __setslice__(self, start, end, values): super(OrderingList, self).__setslice__(start, end, values) self._reorder() def __delslice__(self, start, end): super(OrderingList, self).__delslice__(start, end) self._reorder() # end Py2K def __reduce__(self): return _reconstitute, (self.__class__, self.__dict__, list(self)) for func_name, func in locals().items(): if (util.callable(func) and func.func_name == func_name and not func.__doc__ and hasattr(list, func_name)): func.__doc__ = getattr(list, func_name).__doc__ del func_name, func def _reconstitute(cls, dict_, items): """ Reconstitute an :class:`.OrderingList`. This is the adjoint to :meth:`.OrderingList.__reduce__`. It is used for unpickling :class:`.OrderingList` objects. """ obj = cls.__new__(cls) obj.__dict__.update(dict_) list.extend(obj, items) return obj SQLAlchemy-0.8.4/lib/sqlalchemy/ext/serializer.py0000644000076500000240000001340212251150015022454 0ustar classicstaff00000000000000# ext/serializer.py # Copyright (C) 2005-2013 the SQLAlchemy authors and contributors # # This module is part of SQLAlchemy and is released under # the MIT License: http://www.opensource.org/licenses/mit-license.php """Serializer/Deserializer objects for usage with SQLAlchemy query structures, allowing "contextual" deserialization. Any SQLAlchemy query structure, either based on sqlalchemy.sql.* or sqlalchemy.orm.* can be used. The mappers, Tables, Columns, Session etc. which are referenced by the structure are not persisted in serialized form, but are instead re-associated with the query structure when it is deserialized. Usage is nearly the same as that of the standard Python pickle module:: from sqlalchemy.ext.serializer import loads, dumps metadata = MetaData(bind=some_engine) Session = scoped_session(sessionmaker()) # ... define mappers query = Session.query(MyClass).filter(MyClass.somedata=='foo').order_by(MyClass.sortkey) # pickle the query serialized = dumps(query) # unpickle. Pass in metadata + scoped_session query2 = loads(serialized, metadata, Session) print query2.all() Similar restrictions as when using raw pickle apply; mapped classes must be themselves be pickleable, meaning they are importable from a module-level namespace. The serializer module is only appropriate for query structures. It is not needed for: * instances of user-defined classes. These contain no references to engines, sessions or expression constructs in the typical case and can be serialized directly. * Table metadata that is to be loaded entirely from the serialized structure (i.e. is not already declared in the application). Regular pickle.loads()/dumps() can be used to fully dump any ``MetaData`` object, typically one which was reflected from an existing database at some previous point in time. The serializer module is specifically for the opposite case, where the Table metadata is already present in memory. """ from ..orm import class_mapper from ..orm.session import Session from ..orm.mapper import Mapper from ..orm.interfaces import MapperProperty from ..orm.attributes import QueryableAttribute from .. import Table, Column from ..engine import Engine from ..util import pickle, text_type import re import base64 # Py3K #from io import BytesIO as byte_buffer # Py2K from cStringIO import StringIO as byte_buffer # end Py2K # Py3K #def b64encode(x): # return base64.b64encode(x).decode('ascii') #def b64decode(x): # return base64.b64decode(x.encode('ascii')) # Py2K b64encode = base64.b64encode b64decode = base64.b64decode # end Py2K __all__ = ['Serializer', 'Deserializer', 'dumps', 'loads'] def Serializer(*args, **kw): pickler = pickle.Pickler(*args, **kw) def persistent_id(obj): #print "serializing:", repr(obj) if isinstance(obj, QueryableAttribute): cls = obj.impl.class_ key = obj.impl.key id = "attribute:" + key + ":" + b64encode(pickle.dumps(cls)) elif isinstance(obj, Mapper) and not obj.non_primary: id = "mapper:" + b64encode(pickle.dumps(obj.class_)) elif isinstance(obj, MapperProperty) and not obj.parent.non_primary: id = "mapperprop:" + b64encode(pickle.dumps(obj.parent.class_)) + \ ":" + obj.key elif isinstance(obj, Table): id = "table:" + text_type(obj.key) elif isinstance(obj, Column) and isinstance(obj.table, Table): id = "column:" + text_type(obj.table.key) + ":" + text_type(obj.key) elif isinstance(obj, Session): id = "session:" elif isinstance(obj, Engine): id = "engine:" else: return None return id pickler.persistent_id = persistent_id return pickler our_ids = re.compile( r'(mapperprop|mapper|table|column|session|attribute|engine):(.*)') def Deserializer(file, metadata=None, scoped_session=None, engine=None): unpickler = pickle.Unpickler(file) def get_engine(): if engine: return engine elif scoped_session and scoped_session().bind: return scoped_session().bind elif metadata and metadata.bind: return metadata.bind else: return None def persistent_load(id): m = our_ids.match(text_type(id)) if not m: return None else: type_, args = m.group(1, 2) if type_ == 'attribute': key, clsarg = args.split(":") cls = pickle.loads(b64decode(clsarg)) return getattr(cls, key) elif type_ == "mapper": cls = pickle.loads(b64decode(args)) return class_mapper(cls) elif type_ == "mapperprop": mapper, keyname = args.split(':') cls = pickle.loads(b64decode(mapper)) return class_mapper(cls).attrs[keyname] elif type_ == "table": return metadata.tables[args] elif type_ == "column": table, colname = args.split(':') return metadata.tables[table].c[colname] elif type_ == "session": return scoped_session() elif type_ == "engine": return get_engine() else: raise Exception("Unknown token: %s" % type_) unpickler.persistent_load = persistent_load return unpickler def dumps(obj, protocol=0): buf = byte_buffer() pickler = Serializer(buf, protocol) pickler.dump(obj) return buf.getvalue() def loads(data, metadata=None, scoped_session=None, engine=None): buf = byte_buffer(data) unpickler = Deserializer(buf, metadata, scoped_session, engine) return unpickler.load() SQLAlchemy-0.8.4/lib/sqlalchemy/inspection.py0000644000076500000240000000604012251150015021656 0ustar classicstaff00000000000000# sqlalchemy/inspect.py # Copyright (C) 2005-2013 the SQLAlchemy authors and contributors # # This module is part of SQLAlchemy and is released under # the MIT License: http://www.opensource.org/licenses/mit-license.php """The inspection module provides the :func:`.inspect` function, which delivers runtime information about a wide variety of SQLAlchemy objects, both within the Core as well as the ORM. The :func:`.inspect` function is the entry point to SQLAlchemy's public API for viewing the configuration and construction of in-memory objects. Depending on the type of object passed to :func:`.inspect`, the return value will either be a related object which provides a known interface, or in many cases it will return the object itself. The rationale for :func:`.inspect` is twofold. One is that it replaces the need to be aware of a large variety of "information getting" functions in SQLAlchemy, such as :meth:`.Inspector.from_engine`, :func:`.orm.attributes.instance_state`, :func:`.orm.class_mapper`, and others. The other is that the return value of :func:`.inspect` is guaranteed to obey a documented API, thus allowing third party tools which build on top of SQLAlchemy configurations to be constructed in a forwards-compatible way. .. versionadded:: 0.8 The :func:`.inspect` system is introduced as of version 0.8. """ from . import util, exc _registrars = util.defaultdict(list) def inspect(subject, raiseerr=True): """Produce an inspection object for the given target. The returned value in some cases may be the same object as the one given, such as if a :class:`.orm.Mapper` object is passed. In other cases, it will be an instance of the registered inspection type for the given object, such as if a :class:`.engine.Engine` is passed, an :class:`.engine.Inspector` object is returned. :param subject: the subject to be inspected. :param raiseerr: When ``True``, if the given subject does not correspond to a known SQLAlchemy inspected type, :class:`sqlalchemy.exc.NoInspectionAvailable` is raised. If ``False``, ``None`` is returned. """ type_ = type(subject) for cls in type_.__mro__: if cls in _registrars: reg = _registrars[cls] if reg is True: return subject ret = reg(subject) if ret is not None: break else: reg = ret = None if raiseerr and ( reg is None or ret is None ): raise exc.NoInspectionAvailable( "No inspection system is " "available for object of type %s" % type_) return ret def _inspects(*types): def decorate(fn_or_cls): for type_ in types: if type_ in _registrars: raise AssertionError( "Type %s is already " "registered" % type_) _registrars[type_] = fn_or_cls return fn_or_cls return decorate def _self_inspects(*types): _inspects(*types)(True) SQLAlchemy-0.8.4/lib/sqlalchemy/interfaces.py0000644000076500000240000002524612251147171021650 0ustar classicstaff00000000000000# sqlalchemy/interfaces.py # Copyright (C) 2007-2013 the SQLAlchemy authors and contributors # Copyright (C) 2007 Jason Kirtland jek@discorporate.us # # This module is part of SQLAlchemy and is released under # the MIT License: http://www.opensource.org/licenses/mit-license.php """Deprecated core event interfaces. This module is **deprecated** and is superseded by the event system. """ from . import event, util class PoolListener(object): """Hooks into the lifecycle of connections in a :class:`.Pool`. .. note:: :class:`.PoolListener` is deprecated. Please refer to :class:`.PoolEvents`. Usage:: class MyListener(PoolListener): def connect(self, dbapi_con, con_record): '''perform connect operations''' # etc. # create a new pool with a listener p = QueuePool(..., listeners=[MyListener()]) # add a listener after the fact p.add_listener(MyListener()) # usage with create_engine() e = create_engine("url://", listeners=[MyListener()]) All of the standard connection :class:`~sqlalchemy.pool.Pool` types can accept event listeners for key connection lifecycle events: creation, pool check-out and check-in. There are no events fired when a connection closes. For any given DB-API connection, there will be one ``connect`` event, `n` number of ``checkout`` events, and either `n` or `n - 1` ``checkin`` events. (If a ``Connection`` is detached from its pool via the ``detach()`` method, it won't be checked back in.) These are low-level events for low-level objects: raw Python DB-API connections, without the conveniences of the SQLAlchemy ``Connection`` wrapper, ``Dialect`` services or ``ClauseElement`` execution. If you execute SQL through the connection, explicitly closing all cursors and other resources is recommended. Events also receive a ``_ConnectionRecord``, a long-lived internal ``Pool`` object that basically represents a "slot" in the connection pool. ``_ConnectionRecord`` objects have one public attribute of note: ``info``, a dictionary whose contents are scoped to the lifetime of the DB-API connection managed by the record. You can use this shared storage area however you like. There is no need to subclass ``PoolListener`` to handle events. Any class that implements one or more of these methods can be used as a pool listener. The ``Pool`` will inspect the methods provided by a listener object and add the listener to one or more internal event queues based on its capabilities. In terms of efficiency and function call overhead, you're much better off only providing implementations for the hooks you'll be using. """ @classmethod def _adapt_listener(cls, self, listener): """Adapt a :class:`.PoolListener` to individual :class:`event.Dispatch` events. """ listener = util.as_interface(listener, methods=('connect', 'first_connect', 'checkout', 'checkin')) if hasattr(listener, 'connect'): event.listen(self, 'connect', listener.connect) if hasattr(listener, 'first_connect'): event.listen(self, 'first_connect', listener.first_connect) if hasattr(listener, 'checkout'): event.listen(self, 'checkout', listener.checkout) if hasattr(listener, 'checkin'): event.listen(self, 'checkin', listener.checkin) def connect(self, dbapi_con, con_record): """Called once for each new DB-API connection or Pool's ``creator()``. dbapi_con A newly connected raw DB-API connection (not a SQLAlchemy ``Connection`` wrapper). con_record The ``_ConnectionRecord`` that persistently manages the connection """ def first_connect(self, dbapi_con, con_record): """Called exactly once for the first DB-API connection. dbapi_con A newly connected raw DB-API connection (not a SQLAlchemy ``Connection`` wrapper). con_record The ``_ConnectionRecord`` that persistently manages the connection """ def checkout(self, dbapi_con, con_record, con_proxy): """Called when a connection is retrieved from the Pool. dbapi_con A raw DB-API connection con_record The ``_ConnectionRecord`` that persistently manages the connection con_proxy The ``_ConnectionFairy`` which manages the connection for the span of the current checkout. If you raise an ``exc.DisconnectionError``, the current connection will be disposed and a fresh connection retrieved. Processing of all checkout listeners will abort and restart using the new connection. """ def checkin(self, dbapi_con, con_record): """Called when a connection returns to the pool. Note that the connection may be closed, and may be None if the connection has been invalidated. ``checkin`` will not be called for detached connections. (They do not return to the pool.) dbapi_con A raw DB-API connection con_record The ``_ConnectionRecord`` that persistently manages the connection """ class ConnectionProxy(object): """Allows interception of statement execution by Connections. .. note:: :class:`.ConnectionProxy` is deprecated. Please refer to :class:`.ConnectionEvents`. Either or both of the ``execute()`` and ``cursor_execute()`` may be implemented to intercept compiled statement and cursor level executions, e.g.:: class MyProxy(ConnectionProxy): def execute(self, conn, execute, clauseelement, *multiparams, **params): print "compiled statement:", clauseelement return execute(clauseelement, *multiparams, **params) def cursor_execute(self, execute, cursor, statement, parameters, context, executemany): print "raw statement:", statement return execute(cursor, statement, parameters, context) The ``execute`` argument is a function that will fulfill the default execution behavior for the operation. The signature illustrated in the example should be used. The proxy is installed into an :class:`~sqlalchemy.engine.Engine` via the ``proxy`` argument:: e = create_engine('someurl://', proxy=MyProxy()) """ @classmethod def _adapt_listener(cls, self, listener): def adapt_execute(conn, clauseelement, multiparams, params): def execute_wrapper(clauseelement, *multiparams, **params): return clauseelement, multiparams, params return listener.execute(conn, execute_wrapper, clauseelement, *multiparams, **params) event.listen(self, 'before_execute', adapt_execute) def adapt_cursor_execute(conn, cursor, statement, parameters, context, executemany): def execute_wrapper( cursor, statement, parameters, context, ): return statement, parameters return listener.cursor_execute( execute_wrapper, cursor, statement, parameters, context, executemany, ) event.listen(self, 'before_cursor_execute', adapt_cursor_execute) def do_nothing_callback(*arg, **kw): pass def adapt_listener(fn): def go(conn, *arg, **kw): fn(conn, do_nothing_callback, *arg, **kw) return util.update_wrapper(go, fn) event.listen(self, 'begin', adapt_listener(listener.begin)) event.listen(self, 'rollback', adapt_listener(listener.rollback)) event.listen(self, 'commit', adapt_listener(listener.commit)) event.listen(self, 'savepoint', adapt_listener(listener.savepoint)) event.listen(self, 'rollback_savepoint', adapt_listener(listener.rollback_savepoint)) event.listen(self, 'release_savepoint', adapt_listener(listener.release_savepoint)) event.listen(self, 'begin_twophase', adapt_listener(listener.begin_twophase)) event.listen(self, 'prepare_twophase', adapt_listener(listener.prepare_twophase)) event.listen(self, 'rollback_twophase', adapt_listener(listener.rollback_twophase)) event.listen(self, 'commit_twophase', adapt_listener(listener.commit_twophase)) def execute(self, conn, execute, clauseelement, *multiparams, **params): """Intercept high level execute() events.""" return execute(clauseelement, *multiparams, **params) def cursor_execute(self, execute, cursor, statement, parameters, context, executemany): """Intercept low-level cursor execute() events.""" return execute(cursor, statement, parameters, context) def begin(self, conn, begin): """Intercept begin() events.""" return begin() def rollback(self, conn, rollback): """Intercept rollback() events.""" return rollback() def commit(self, conn, commit): """Intercept commit() events.""" return commit() def savepoint(self, conn, savepoint, name=None): """Intercept savepoint() events.""" return savepoint(name=name) def rollback_savepoint(self, conn, rollback_savepoint, name, context): """Intercept rollback_savepoint() events.""" return rollback_savepoint(name, context) def release_savepoint(self, conn, release_savepoint, name, context): """Intercept release_savepoint() events.""" return release_savepoint(name, context) def begin_twophase(self, conn, begin_twophase, xid): """Intercept begin_twophase() events.""" return begin_twophase(xid) def prepare_twophase(self, conn, prepare_twophase, xid): """Intercept prepare_twophase() events.""" return prepare_twophase(xid) def rollback_twophase(self, conn, rollback_twophase, xid, is_prepared): """Intercept rollback_twophase() events.""" return rollback_twophase(xid, is_prepared) def commit_twophase(self, conn, commit_twophase, xid, is_prepared): """Intercept commit_twophase() events.""" return commit_twophase(xid, is_prepared) SQLAlchemy-0.8.4/lib/sqlalchemy/log.py0000644000076500000240000001521412251150015020267 0ustar classicstaff00000000000000# sqlalchemy/log.py # Copyright (C) 2006-2013 the SQLAlchemy authors and contributors # Includes alterations by Vinay Sajip vinay_sajip@yahoo.co.uk # # This module is part of SQLAlchemy and is released under # the MIT License: http://www.opensource.org/licenses/mit-license.php """Logging control and utilities. Control of logging for SA can be performed from the regular python logging module. The regular dotted module namespace is used, starting at 'sqlalchemy'. For class-level logging, the class name is appended. The "echo" keyword parameter, available on SQLA :class:`.Engine` and :class:`.Pool` objects, corresponds to a logger specific to that instance only. """ import logging import sys # set initial level to WARN. This so that # log statements don't occur in the absense of explicit # logging being enabled for 'sqlalchemy'. rootlogger = logging.getLogger('sqlalchemy') if rootlogger.level == logging.NOTSET: rootlogger.setLevel(logging.WARN) def _add_default_handler(logger): handler = logging.StreamHandler(sys.stdout) handler.setFormatter(logging.Formatter( '%(asctime)s %(levelname)s %(name)s %(message)s')) logger.addHandler(handler) _logged_classes = set() def class_logger(cls, enable=False): logger = logging.getLogger(cls.__module__ + "." + cls.__name__) if enable == 'debug': logger.setLevel(logging.DEBUG) elif enable == 'info': logger.setLevel(logging.INFO) cls._should_log_debug = lambda self: logger.isEnabledFor(logging.DEBUG) cls._should_log_info = lambda self: logger.isEnabledFor(logging.INFO) cls.logger = logger _logged_classes.add(cls) class Identified(object): logging_name = None def _should_log_debug(self): return self.logger.isEnabledFor(logging.DEBUG) def _should_log_info(self): return self.logger.isEnabledFor(logging.INFO) class InstanceLogger(object): """A logger adapter (wrapper) for :class:`.Identified` subclasses. This allows multiple instances (e.g. Engine or Pool instances) to share a logger, but have its verbosity controlled on a per-instance basis. The basic functionality is to return a logging level which is based on an instance's echo setting. Default implementation is: 'debug' -> logging.DEBUG True -> logging.INFO False -> Effective level of underlying logger (logging.WARNING by default) None -> same as False """ # Map echo settings to logger levels _echo_map = { None: logging.NOTSET, False: logging.NOTSET, True: logging.INFO, 'debug': logging.DEBUG, } def __init__(self, echo, name): self.echo = echo self.logger = logging.getLogger(name) # if echo flag is enabled and no handlers, # add a handler to the list if self._echo_map[echo] <= logging.INFO \ and not self.logger.handlers: _add_default_handler(self.logger) # # Boilerplate convenience methods # def debug(self, msg, *args, **kwargs): """Delegate a debug call to the underlying logger.""" self.log(logging.DEBUG, msg, *args, **kwargs) def info(self, msg, *args, **kwargs): """Delegate an info call to the underlying logger.""" self.log(logging.INFO, msg, *args, **kwargs) def warning(self, msg, *args, **kwargs): """Delegate a warning call to the underlying logger.""" self.log(logging.WARNING, msg, *args, **kwargs) warn = warning def error(self, msg, *args, **kwargs): """ Delegate an error call to the underlying logger. """ self.log(logging.ERROR, msg, *args, **kwargs) def exception(self, msg, *args, **kwargs): """Delegate an exception call to the underlying logger.""" kwargs["exc_info"] = 1 self.log(logging.ERROR, msg, *args, **kwargs) def critical(self, msg, *args, **kwargs): """Delegate a critical call to the underlying logger.""" self.log(logging.CRITICAL, msg, *args, **kwargs) def log(self, level, msg, *args, **kwargs): """Delegate a log call to the underlying logger. The level here is determined by the echo flag as well as that of the underlying logger, and logger._log() is called directly. """ # inline the logic from isEnabledFor(), # getEffectiveLevel(), to avoid overhead. if self.logger.manager.disable >= level: return selected_level = self._echo_map[self.echo] if selected_level == logging.NOTSET: selected_level = self.logger.getEffectiveLevel() if level >= selected_level: self.logger._log(level, msg, args, **kwargs) def isEnabledFor(self, level): """Is this logger enabled for level 'level'?""" if self.logger.manager.disable >= level: return False return level >= self.getEffectiveLevel() def getEffectiveLevel(self): """What's the effective level for this logger?""" level = self._echo_map[self.echo] if level == logging.NOTSET: level = self.logger.getEffectiveLevel() return level def instance_logger(instance, echoflag=None): """create a logger for an instance that implements :class:`.Identified`.""" if instance.logging_name: name = "%s.%s.%s" % (instance.__class__.__module__, instance.__class__.__name__, instance.logging_name) else: name = "%s.%s" % (instance.__class__.__module__, instance.__class__.__name__) instance._echo = echoflag if echoflag in (False, None): # if no echo setting or False, return a Logger directly, # avoiding overhead of filtering logger = logging.getLogger(name) else: # if a specified echo flag, return an EchoLogger, # which checks the flag, overrides normal log # levels by calling logger._log() logger = InstanceLogger(echoflag, name) instance.logger = logger class echo_property(object): __doc__ = """\ When ``True``, enable log output for this element. This has the effect of setting the Python logging level for the namespace of this element's class and object reference. A value of boolean ``True`` indicates that the loglevel ``logging.INFO`` will be set for the logger, whereas the string value ``debug`` will set the loglevel to ``logging.DEBUG``. """ def __get__(self, instance, owner): if instance is None: return self else: return instance._echo def __set__(self, instance, value): instance_logger(instance, echoflag=value) SQLAlchemy-0.8.4/lib/sqlalchemy/orm/0000755000076500000240000000000012251151573017740 5ustar classicstaff00000000000000SQLAlchemy-0.8.4/lib/sqlalchemy/orm/__init__.py0000644000076500000240000021236012251150015022043 0ustar classicstaff00000000000000# orm/__init__.py # Copyright (C) 2005-2013 the SQLAlchemy authors and contributors # # This module is part of SQLAlchemy and is released under # the MIT License: http://www.opensource.org/licenses/mit-license.php """ Functional constructs for ORM configuration. See the SQLAlchemy object relational tutorial and mapper configuration documentation for an overview of how this module is used. """ from . import exc from .mapper import ( Mapper, _mapper_registry, class_mapper, configure_mappers, reconstructor, validates ) from .interfaces import ( EXT_CONTINUE, EXT_STOP, MapperExtension, PropComparator, SessionExtension, AttributeExtension, ) from .util import ( aliased, join, object_mapper, outerjoin, polymorphic_union, was_deleted, with_parent, with_polymorphic, ) from .properties import ( ColumnProperty, ComparableProperty, CompositeProperty, RelationshipProperty, PropertyLoader, SynonymProperty, ) from .relationships import ( foreign, remote, ) from .session import ( Session, object_session, sessionmaker, make_transient ) from .scoping import ( scoped_session ) from . import mapper as mapperlib from . import strategies from .query import AliasOption, Query from ..sql import util as sql_util from .. import util as sa_util from . import interfaces # here, we can establish InstrumentationManager back # in sqlalchemy.orm and sqlalchemy.orm.interfaces, which # also re-establishes the extended instrumentation system. #from ..ext import instrumentation as _ext_instrumentation #InstrumentationManager = \ # interfaces.InstrumentationManager = \ # _ext_instrumentation.InstrumentationManager __all__ = ( 'EXT_CONTINUE', 'EXT_STOP', 'MapperExtension', 'AttributeExtension', 'PropComparator', 'Query', 'Session', 'aliased', 'backref', 'class_mapper', 'clear_mappers', 'column_property', 'comparable_property', 'compile_mappers', 'configure_mappers', 'composite', 'contains_alias', 'contains_eager', 'create_session', 'defer', 'deferred', 'dynamic_loader', 'eagerload', 'eagerload_all', 'foreign', 'immediateload', 'join', 'joinedload', 'joinedload_all', 'lazyload', 'mapper', 'make_transient', 'noload', 'object_mapper', 'object_session', 'outerjoin', 'polymorphic_union', 'reconstructor', 'relationship', 'relation', 'remote', 'scoped_session', 'sessionmaker', 'subqueryload', 'subqueryload_all', 'synonym', 'undefer', 'undefer_group', 'validates', 'was_deleted', 'with_polymorphic' ) def create_session(bind=None, **kwargs): """Create a new :class:`.Session` with no automation enabled by default. This function is used primarily for testing. The usual route to :class:`.Session` creation is via its constructor or the :func:`.sessionmaker` function. :param bind: optional, a single Connectable to use for all database access in the created :class:`~sqlalchemy.orm.session.Session`. :param \*\*kwargs: optional, passed through to the :class:`.Session` constructor. :returns: an :class:`~sqlalchemy.orm.session.Session` instance The defaults of create_session() are the opposite of that of :func:`sessionmaker`; ``autoflush`` and ``expire_on_commit`` are False, ``autocommit`` is True. In this sense the session acts more like the "classic" SQLAlchemy 0.3 session with these. Usage:: >>> from sqlalchemy.orm import create_session >>> session = create_session() It is recommended to use :func:`sessionmaker` instead of create_session(). """ kwargs.setdefault('autoflush', False) kwargs.setdefault('autocommit', True) kwargs.setdefault('expire_on_commit', False) return Session(bind=bind, **kwargs) def relationship(argument, secondary=None, **kwargs): """Provide a relationship of a primary Mapper to a secondary Mapper. This corresponds to a parent-child or associative table relationship. The constructed class is an instance of :class:`.RelationshipProperty`. A typical :func:`.relationship`, used in a classical mapping:: mapper(Parent, properties={ 'children': relationship(Child) }) Some arguments accepted by :func:`.relationship` optionally accept a callable function, which when called produces the desired value. The callable is invoked by the parent :class:`.Mapper` at "mapper initialization" time, which happens only when mappers are first used, and is assumed to be after all mappings have been constructed. This can be used to resolve order-of-declaration and other dependency issues, such as if ``Child`` is declared below ``Parent`` in the same file:: mapper(Parent, properties={ "children":relationship(lambda: Child, order_by=lambda: Child.id) }) When using the :ref:`declarative_toplevel` extension, the Declarative initializer allows string arguments to be passed to :func:`.relationship`. These string arguments are converted into callables that evaluate the string as Python code, using the Declarative class-registry as a namespace. This allows the lookup of related classes to be automatic via their string name, and removes the need to import related classes at all into the local module space:: from sqlalchemy.ext.declarative import declarative_base Base = declarative_base() class Parent(Base): __tablename__ = 'parent' id = Column(Integer, primary_key=True) children = relationship("Child", order_by="Child.id") A full array of examples and reference documentation regarding :func:`.relationship` is at :ref:`relationship_config_toplevel`. :param argument: a mapped class, or actual :class:`.Mapper` instance, representing the target of the relationship. ``argument`` may also be passed as a callable function which is evaluated at mapper initialization time, and may be passed as a Python-evaluable string when using Declarative. :param secondary: for a many-to-many relationship, specifies the intermediary table, and is an instance of :class:`.Table`. The ``secondary`` keyword argument should generally only be used for a table that is not otherwise expressed in any class mapping, unless this relationship is declared as view only, otherwise conflicting persistence operations can occur. ``secondary`` may also be passed as a callable function which is evaluated at mapper initialization time. :param active_history=False: When ``True``, indicates that the "previous" value for a many-to-one reference should be loaded when replaced, if not already loaded. Normally, history tracking logic for simple many-to-ones only needs to be aware of the "new" value in order to perform a flush. This flag is available for applications that make use of :func:`.attributes.get_history` which also need to know the "previous" value of the attribute. :param backref: indicates the string name of a property to be placed on the related mapper's class that will handle this relationship in the other direction. The other property will be created automatically when the mappers are configured. Can also be passed as a :func:`backref` object to control the configuration of the new relationship. :param back_populates: Takes a string name and has the same meaning as ``backref``, except the complementing property is **not** created automatically, and instead must be configured explicitly on the other mapper. The complementing property should also indicate ``back_populates`` to this relationship to ensure proper functioning. :param cascade: a comma-separated list of cascade rules which determines how Session operations should be "cascaded" from parent to child. This defaults to ``False``, which means the default cascade should be used. The default value is ``"save-update, merge"``. Available cascades are: * ``save-update`` - cascade the :meth:`.Session.add` operation. This cascade applies both to future and past calls to :meth:`~sqlalchemy.orm.session.Session.add`, meaning new items added to a collection or scalar relationship get placed into the same session as that of the parent, and also applies to items which have been removed from this relationship but are still part of unflushed history. * ``merge`` - cascade the :meth:`~sqlalchemy.orm.session.Session.merge` operation * ``expunge`` - cascade the :meth:`.Session.expunge` operation * ``delete`` - cascade the :meth:`.Session.delete` operation * ``delete-orphan`` - if an item of the child's type is detached from its parent, mark it for deletion. .. versionchanged:: 0.7 This option does not prevent a new instance of the child object from being persisted without a parent to start with; to constrain against that case, ensure the child's foreign key column(s) is configured as NOT NULL * ``refresh-expire`` - cascade the :meth:`.Session.expire` and :meth:`~sqlalchemy.orm.session.Session.refresh` operations * ``all`` - shorthand for "save-update,merge, refresh-expire, expunge, delete" See the section :ref:`unitofwork_cascades` for more background on configuring cascades. :param cascade_backrefs=True: a boolean value indicating if the ``save-update`` cascade should operate along an assignment event intercepted by a backref. When set to ``False``, the attribute managed by this relationship will not cascade an incoming transient object into the session of a persistent parent, if the event is received via backref. That is:: mapper(A, a_table, properties={ 'bs':relationship(B, backref="a", cascade_backrefs=False) }) If an ``A()`` is present in the session, assigning it to the "a" attribute on a transient ``B()`` will not place the ``B()`` into the session. To set the flag in the other direction, i.e. so that ``A().bs.append(B())`` won't add a transient ``A()`` into the session for a persistent ``B()``:: mapper(A, a_table, properties={ 'bs':relationship(B, backref=backref("a", cascade_backrefs=False) ) }) See the section :ref:`unitofwork_cascades` for more background on configuring cascades. :param collection_class: a class or callable that returns a new list-holding object. will be used in place of a plain list for storing elements. Behavior of this attribute is described in detail at :ref:`custom_collections`. :param comparator_factory: a class which extends :class:`.RelationshipProperty.Comparator` which provides custom SQL clause generation for comparison operations. :param distinct_target_key=False: Indicate if a "subquery" eager load should apply the DISTINCT keyword to the innermost SELECT statement. When set to ``None``, the DISTINCT keyword will be applied in those cases when the target columns do not comprise the full primary key of the target table. When set to ``True``, the DISTINCT keyword is applied to the innermost SELECT unconditionally. This flag defaults as False in 0.8 but will default to None in 0.9. It may be desirable to set this flag to False when the DISTINCT is reducing performance of the innermost subquery beyond that of what duplicate innermost rows may be causing. .. versionadded:: 0.8.3 - distinct_target_key allows the subquery eager loader to apply a DISTINCT modifier to the innermost SELECT. :param doc: docstring which will be applied to the resulting descriptor. :param extension: an :class:`.AttributeExtension` instance, or list of extensions, which will be prepended to the list of attribute listeners for the resulting descriptor placed on the class. **Deprecated.** Please see :class:`.AttributeEvents`. :param foreign_keys: a list of columns which are to be used as "foreign key" columns, or columns which refer to the value in a remote column, within the context of this :func:`.relationship` object's ``primaryjoin`` condition. That is, if the ``primaryjoin`` condition of this :func:`.relationship` is ``a.id == b.a_id``, and the values in ``b.a_id`` are required to be present in ``a.id``, then the "foreign key" column of this :func:`.relationship` is ``b.a_id``. In normal cases, the ``foreign_keys`` parameter is **not required.** :func:`.relationship` will **automatically** determine which columns in the ``primaryjoin`` conditition are to be considered "foreign key" columns based on those :class:`.Column` objects that specify :class:`.ForeignKey`, or are otherwise listed as referencing columns in a :class:`.ForeignKeyConstraint` construct. ``foreign_keys`` is only needed when: 1. There is more than one way to construct a join from the local table to the remote table, as there are multiple foreign key references present. Setting ``foreign_keys`` will limit the :func:`.relationship` to consider just those columns specified here as "foreign". .. versionchanged:: 0.8 A multiple-foreign key join ambiguity can be resolved by setting the ``foreign_keys`` parameter alone, without the need to explicitly set ``primaryjoin`` as well. 2. The :class:`.Table` being mapped does not actually have :class:`.ForeignKey` or :class:`.ForeignKeyConstraint` constructs present, often because the table was reflected from a database that does not support foreign key reflection (MySQL MyISAM). 3. The ``primaryjoin`` argument is used to construct a non-standard join condition, which makes use of columns or expressions that do not normally refer to their "parent" column, such as a join condition expressed by a complex comparison using a SQL function. The :func:`.relationship` construct will raise informative error messages that suggest the use of the ``foreign_keys`` parameter when presented with an ambiguous condition. In typical cases, if :func:`.relationship` doesn't raise any exceptions, the ``foreign_keys`` parameter is usually not needed. ``foreign_keys`` may also be passed as a callable function which is evaluated at mapper initialization time, and may be passed as a Python-evaluable string when using Declarative. .. seealso:: :ref:`relationship_foreign_keys` :ref:`relationship_custom_foreign` :func:`.foreign` - allows direct annotation of the "foreign" columns within a ``primaryjoin`` condition. .. versionadded:: 0.8 The :func:`.foreign` annotation can also be applied directly to the ``primaryjoin`` expression, which is an alternate, more specific system of describing which columns in a particular ``primaryjoin`` should be considered "foreign". :param info: Optional data dictionary which will be populated into the :attr:`.MapperProperty.info` attribute of this object. .. versionadded:: 0.8 :param innerjoin=False: when ``True``, joined eager loads will use an inner join to join against related tables instead of an outer join. The purpose of this option is generally one of performance, as inner joins generally perform better than outer joins. Another reason can be the use of ``with_lockmode``, which does not support outer joins. This flag can be set to ``True`` when the relationship references an object via many-to-one using local foreign keys that are not nullable, or when the reference is one-to-one or a collection that is guaranteed to have one or at least one entry. :param join_depth: when non-``None``, an integer value indicating how many levels deep "eager" loaders should join on a self-referring or cyclical relationship. The number counts how many times the same Mapper shall be present in the loading condition along a particular join branch. When left at its default of ``None``, eager loaders will stop chaining when they encounter a the same target mapper which is already higher up in the chain. This option applies both to joined- and subquery- eager loaders. :param lazy='select': specifies how the related items should be loaded. Default value is ``select``. Values include: * ``select`` - items should be loaded lazily when the property is first accessed, using a separate SELECT statement, or identity map fetch for simple many-to-one references. * ``immediate`` - items should be loaded as the parents are loaded, using a separate SELECT statement, or identity map fetch for simple many-to-one references. .. versionadded:: 0.6.5 * ``joined`` - items should be loaded "eagerly" in the same query as that of the parent, using a JOIN or LEFT OUTER JOIN. Whether the join is "outer" or not is determined by the ``innerjoin`` parameter. * ``subquery`` - items should be loaded "eagerly" as the parents are loaded, using one additional SQL statement, which issues a JOIN to a subquery of the original statement, for each collection requested. * ``noload`` - no loading should occur at any time. This is to support "write-only" attributes, or attributes which are populated in some manner specific to the application. * ``dynamic`` - the attribute will return a pre-configured :class:`~sqlalchemy.orm.query.Query` object for all read operations, onto which further filtering operations can be applied before iterating the results. See the section :ref:`dynamic_relationship` for more details. * True - a synonym for 'select' * False - a synonym for 'joined' * None - a synonym for 'noload' Detailed discussion of loader strategies is at :doc:`/orm/loading`. :param load_on_pending=False: Indicates loading behavior for transient or pending parent objects. .. versionchanged:: 0.8 load_on_pending is superseded by :meth:`.Session.enable_relationship_loading`. When set to ``True``, causes the lazy-loader to issue a query for a parent object that is not persistent, meaning it has never been flushed. This may take effect for a pending object when autoflush is disabled, or for a transient object that has been "attached" to a :class:`.Session` but is not part of its pending collection. The load_on_pending flag does not improve behavior when the ORM is used normally - object references should be constructed at the object level, not at the foreign key level, so that they are present in an ordinary way before flush() proceeds. This flag is not not intended for general use. .. versionadded:: 0.6.5 :param order_by: indicates the ordering that should be applied when loading these items. ``order_by`` is expected to refer to one of the :class:`.Column` objects to which the target class is mapped, or the attribute itself bound to the target class which refers to the column. ``order_by`` may also be passed as a callable function which is evaluated at mapper initialization time, and may be passed as a Python-evaluable string when using Declarative. :param passive_deletes=False: Indicates loading behavior during delete operations. A value of True indicates that unloaded child items should not be loaded during a delete operation on the parent. Normally, when a parent item is deleted, all child items are loaded so that they can either be marked as deleted, or have their foreign key to the parent set to NULL. Marking this flag as True usually implies an ON DELETE rule is in place which will handle updating/deleting child rows on the database side. Additionally, setting the flag to the string value 'all' will disable the "nulling out" of the child foreign keys, when there is no delete or delete-orphan cascade enabled. This is typically used when a triggering or error raise scenario is in place on the database side. Note that the foreign key attributes on in-session child objects will not be changed after a flush occurs so this is a very special use-case setting. :param passive_updates=True: Indicates loading and INSERT/UPDATE/DELETE behavior when the source of a foreign key value changes (i.e. an "on update" cascade), which are typically the primary key columns of the source row. When True, it is assumed that ON UPDATE CASCADE is configured on the foreign key in the database, and that the database will handle propagation of an UPDATE from a source column to dependent rows. Note that with databases which enforce referential integrity (i.e. PostgreSQL, MySQL with InnoDB tables), ON UPDATE CASCADE is required for this operation. The relationship() will update the value of the attribute on related items which are locally present in the session during a flush. When False, it is assumed that the database does not enforce referential integrity and will not be issuing its own CASCADE operation for an update. The relationship() will issue the appropriate UPDATE statements to the database in response to the change of a referenced key, and items locally present in the session during a flush will also be refreshed. This flag should probably be set to False if primary key changes are expected and the database in use doesn't support CASCADE (i.e. SQLite, MySQL MyISAM tables). Also see the passive_updates flag on ``mapper()``. A future SQLAlchemy release will provide a "detect" feature for this flag. :param post_update: this indicates that the relationship should be handled by a second UPDATE statement after an INSERT or before a DELETE. Currently, it also will issue an UPDATE after the instance was UPDATEd as well, although this technically should be improved. This flag is used to handle saving bi-directional dependencies between two individual rows (i.e. each row references the other), where it would otherwise be impossible to INSERT or DELETE both rows fully since one row exists before the other. Use this flag when a particular mapping arrangement will incur two rows that are dependent on each other, such as a table that has a one-to-many relationship to a set of child rows, and also has a column that references a single child row within that list (i.e. both tables contain a foreign key to each other). If a ``flush()`` operation returns an error that a "cyclical dependency" was detected, this is a cue that you might want to use ``post_update`` to "break" the cycle. :param primaryjoin: a SQL expression that will be used as the primary join of this child object against the parent object, or in a many-to-many relationship the join of the primary object to the association table. By default, this value is computed based on the foreign key relationships of the parent and child tables (or association table). ``primaryjoin`` may also be passed as a callable function which is evaluated at mapper initialization time, and may be passed as a Python-evaluable string when using Declarative. :param remote_side: used for self-referential relationships, indicates the column or list of columns that form the "remote side" of the relationship. ``remote_side`` may also be passed as a callable function which is evaluated at mapper initialization time, and may be passed as a Python-evaluable string when using Declarative. .. versionchanged:: 0.8 The :func:`.remote` annotation can also be applied directly to the ``primaryjoin`` expression, which is an alternate, more specific system of describing which columns in a particular ``primaryjoin`` should be considered "remote". :param query_class: a :class:`.Query` subclass that will be used as the base of the "appender query" returned by a "dynamic" relationship, that is, a relationship that specifies ``lazy="dynamic"`` or was otherwise constructed using the :func:`.orm.dynamic_loader` function. :param secondaryjoin: a SQL expression that will be used as the join of an association table to the child object. By default, this value is computed based on the foreign key relationships of the association and child tables. ``secondaryjoin`` may also be passed as a callable function which is evaluated at mapper initialization time, and may be passed as a Python-evaluable string when using Declarative. :param single_parent=(True|False): when True, installs a validator which will prevent objects from being associated with more than one parent at a time. This is used for many-to-one or many-to-many relationships that should be treated either as one-to-one or one-to-many. Its usage is optional unless delete-orphan cascade is also set on this relationship(), in which case its required. :param uselist=(True|False): a boolean that indicates if this property should be loaded as a list or a scalar. In most cases, this value is determined automatically by ``relationship()``, based on the type and direction of the relationship - one to many forms a list, many to one forms a scalar, many to many is a list. If a scalar is desired where normally a list would be present, such as a bi-directional one-to-one relationship, set uselist to False. :param viewonly=False: when set to True, the relationship is used only for loading objects within the relationship, and has no effect on the unit-of-work flush process. Relationships with viewonly can specify any kind of join conditions to provide additional views of related objects onto a parent object. Note that the functionality of a viewonly relationship has its limits - complicated join conditions may not compile into eager or lazy loaders properly. If this is the case, use an alternative method. .. versionchanged:: 0.6 :func:`relationship` was renamed from its previous name :func:`relation`. """ return RelationshipProperty(argument, secondary=secondary, **kwargs) def relation(*arg, **kw): """A synonym for :func:`relationship`.""" return relationship(*arg, **kw) def dynamic_loader(argument, **kw): """Construct a dynamically-loading mapper property. This is essentially the same as using the ``lazy='dynamic'`` argument with :func:`relationship`:: dynamic_loader(SomeClass) # is the same as relationship(SomeClass, lazy="dynamic") See the section :ref:`dynamic_relationship` for more details on dynamic loading. """ kw['lazy'] = 'dynamic' return relationship(argument, **kw) def column_property(*cols, **kw): """Provide a column-level property for use with a Mapper. Column-based properties can normally be applied to the mapper's ``properties`` dictionary using the :class:`.Column` element directly. Use this function when the given column is not directly present within the mapper's selectable; examples include SQL expressions, functions, and scalar SELECT queries. Columns that aren't present in the mapper's selectable won't be persisted by the mapper and are effectively "read-only" attributes. :param \*cols: list of Column objects to be mapped. :param active_history=False: When ``True``, indicates that the "previous" value for a scalar attribute should be loaded when replaced, if not already loaded. Normally, history tracking logic for simple non-primary-key scalar values only needs to be aware of the "new" value in order to perform a flush. This flag is available for applications that make use of :func:`.attributes.get_history` or :meth:`.Session.is_modified` which also need to know the "previous" value of the attribute. .. versionadded:: 0.6.6 :param comparator_factory: a class which extends :class:`.ColumnProperty.Comparator` which provides custom SQL clause generation for comparison operations. :param group: a group name for this property when marked as deferred. :param deferred: when True, the column property is "deferred", meaning that it does not load immediately, and is instead loaded when the attribute is first accessed on an instance. See also :func:`~sqlalchemy.orm.deferred`. :param doc: optional string that will be applied as the doc on the class-bound descriptor. :param expire_on_flush=True: Disable expiry on flush. A column_property() which refers to a SQL expression (and not a single table-bound column) is considered to be a "read only" property; populating it has no effect on the state of data, and it can only return database state. For this reason a column_property()'s value is expired whenever the parent object is involved in a flush, that is, has any kind of "dirty" state within a flush. Setting this parameter to ``False`` will have the effect of leaving any existing value present after the flush proceeds. Note however that the :class:`.Session` with default expiration settings still expires all attributes after a :meth:`.Session.commit` call, however. .. versionadded:: 0.7.3 :param info: Optional data dictionary which will be populated into the :attr:`.MapperProperty.info` attribute of this object. .. versionadded:: 0.8 :param extension: an :class:`.AttributeExtension` instance, or list of extensions, which will be prepended to the list of attribute listeners for the resulting descriptor placed on the class. **Deprecated.** Please see :class:`.AttributeEvents`. """ return ColumnProperty(*cols, **kw) def composite(class_, *cols, **kwargs): """Return a composite column-based property for use with a Mapper. See the mapping documentation section :ref:`mapper_composite` for a full usage example. The :class:`.MapperProperty` returned by :func:`.composite` is the :class:`.CompositeProperty`. :param class\_: The "composite type" class. :param \*cols: List of Column objects to be mapped. :param active_history=False: When ``True``, indicates that the "previous" value for a scalar attribute should be loaded when replaced, if not already loaded. See the same flag on :func:`.column_property`. .. versionchanged:: 0.7 This flag specifically becomes meaningful - previously it was a placeholder. :param group: A group name for this property when marked as deferred. :param deferred: When True, the column property is "deferred", meaning that it does not load immediately, and is instead loaded when the attribute is first accessed on an instance. See also :func:`~sqlalchemy.orm.deferred`. :param comparator_factory: a class which extends :class:`.CompositeProperty.Comparator` which provides custom SQL clause generation for comparison operations. :param doc: optional string that will be applied as the doc on the class-bound descriptor. :param info: Optional data dictionary which will be populated into the :attr:`.MapperProperty.info` attribute of this object. .. versionadded:: 0.8 :param extension: an :class:`.AttributeExtension` instance, or list of extensions, which will be prepended to the list of attribute listeners for the resulting descriptor placed on the class. **Deprecated.** Please see :class:`.AttributeEvents`. """ return CompositeProperty(class_, *cols, **kwargs) def backref(name, **kwargs): """Create a back reference with explicit keyword arguments, which are the same arguments one can send to :func:`relationship`. Used with the ``backref`` keyword argument to :func:`relationship` in place of a string argument, e.g.:: 'items':relationship(SomeItem, backref=backref('parent', lazy='subquery')) """ return (name, kwargs) def deferred(*columns, **kwargs): """Return a :class:`.DeferredColumnProperty`, which indicates this object attributes should only be loaded from its corresponding table column when first accessed. Used with the "properties" dictionary sent to :func:`mapper`. See also: :ref:`deferred` """ return ColumnProperty(deferred=True, *columns, **kwargs) def mapper(class_, local_table=None, *args, **params): """Return a new :class:`~.Mapper` object. This function is typically used behind the scenes via the Declarative extension. When using Declarative, many of the usual :func:`.mapper` arguments are handled by the Declarative extension itself, including ``class_``, ``local_table``, ``properties``, and ``inherits``. Other options are passed to :func:`.mapper` using the ``__mapper_args__`` class variable:: class MyClass(Base): __tablename__ = 'my_table' id = Column(Integer, primary_key=True) type = Column(String(50)) alt = Column("some_alt", Integer) __mapper_args__ = { 'polymorphic_on' : type } Explicit use of :func:`.mapper` is often referred to as *classical mapping*. The above declarative example is equivalent in classical form to:: my_table = Table("my_table", metadata, Column('id', Integer, primary_key=True), Column('type', String(50)), Column("some_alt", Integer) ) class MyClass(object): pass mapper(MyClass, my_table, polymorphic_on=my_table.c.type, properties={ 'alt':my_table.c.some_alt }) See also: :ref:`classical_mapping` - discussion of direct usage of :func:`.mapper` :param class\_: The class to be mapped. When using Declarative, this argument is automatically passed as the declared class itself. :param local_table: The :class:`.Table` or other selectable to which the class is mapped. May be ``None`` if this mapper inherits from another mapper using single-table inheritance. When using Declarative, this argument is automatically passed by the extension, based on what is configured via the ``__table__`` argument or via the :class:`.Table` produced as a result of the ``__tablename__`` and :class:`.Column` arguments present. :param always_refresh: If True, all query operations for this mapped class will overwrite all data within object instances that already exist within the session, erasing any in-memory changes with whatever information was loaded from the database. Usage of this flag is highly discouraged; as an alternative, see the method :meth:`.Query.populate_existing`. :param allow_partial_pks: Defaults to True. Indicates that a composite primary key with some NULL values should be considered as possibly existing within the database. This affects whether a mapper will assign an incoming row to an existing identity, as well as if :meth:`.Session.merge` will check the database first for a particular primary key value. A "partial primary key" can occur if one has mapped to an OUTER JOIN, for example. :param batch: Defaults to ``True``, indicating that save operations of multiple entities can be batched together for efficiency. Setting to False indicates that an instance will be fully saved before saving the next instance. This is used in the extremely rare case that a :class:`.MapperEvents` listener requires being called in between individual row persistence operations. :param column_prefix: A string which will be prepended to the mapped attribute name when :class:`.Column` objects are automatically assigned as attributes to the mapped class. Does not affect explicitly specified column-based properties. See the section :ref:`column_prefix` for an example. :param concrete: If True, indicates this mapper should use concrete table inheritance with its parent mapper. See the section :ref:`concrete_inheritance` for an example. :param eager_defaults: if True, the ORM will immediately fetch the value of server-generated default values after an INSERT or UPDATE, rather than leaving them as expired to be fetched on next access. This can be used for event schemes where the server-generated values are needed immediately before the flush completes. This scheme will emit an individual ``SELECT`` statement per row inserted or updated, which note can add significant performance overhead. :param exclude_properties: A list or set of string column names to be excluded from mapping. See :ref:`include_exclude_cols` for an example. :param extension: A :class:`.MapperExtension` instance or list of :class:`.MapperExtension` instances which will be applied to all operations by this :class:`.Mapper`. **Deprecated.** Please see :class:`.MapperEvents`. :param include_properties: An inclusive list or set of string column names to map. See :ref:`include_exclude_cols` for an example. :param inherits: A mapped class or the corresponding :class:`.Mapper` of one indicating a superclass to which this :class:`.Mapper` should *inherit* from. The mapped class here must be a subclass of the other mapper's class. When using Declarative, this argument is passed automatically as a result of the natural class hierarchy of the declared classes. .. seealso:: :ref:`inheritance_toplevel` :param inherit_condition: For joined table inheritance, a SQL expression which will define how the two tables are joined; defaults to a natural join between the two tables. :param inherit_foreign_keys: When ``inherit_condition`` is used and the columns present are missing a :class:`.ForeignKey` configuration, this parameter can be used to specify which columns are "foreign". In most cases can be left as ``None``. :param legacy_is_orphan: Boolean, defaults to ``False``. When ``True``, specifies that "legacy" orphan consideration is to be applied to objects mapped by this mapper, which means that a pending (that is, not persistent) object is auto-expunged from an owning :class:`.Session` only when it is de-associated from *all* parents that specify a ``delete-orphan`` cascade towards this mapper. The new default behavior is that the object is auto-expunged when it is de-associated with *any* of its parents that specify ``delete-orphan`` cascade. This behavior is more consistent with that of a persistent object, and allows behavior to be consistent in more scenarios independently of whether or not an orphanable object has been flushed yet or not. See the change note and example at :ref:`legacy_is_orphan_addition` for more detail on this change. .. versionadded:: 0.8 - the consideration of a pending object as an "orphan" has been modified to more closely match the behavior as that of persistent objects, which is that the object is expunged from the :class:`.Session` as soon as it is de-associated from any of its orphan-enabled parents. Previously, the pending object would be expunged only if de-associated from all of its orphan-enabled parents. The new flag ``legacy_is_orphan`` is added to :func:`.orm.mapper` which re-establishes the legacy behavior. :param non_primary: Specify that this :class:`.Mapper` is in addition to the "primary" mapper, that is, the one used for persistence. The :class:`.Mapper` created here may be used for ad-hoc mapping of the class to an alternate selectable, for loading only. The ``non_primary`` feature is rarely needed with modern usage. :param order_by: A single :class:`.Column` or list of :class:`.Column` objects for which selection operations should use as the default ordering for entities. By default mappers have no pre-defined ordering. :param passive_updates: Indicates UPDATE behavior of foreign key columns when a primary key column changes on a joined-table inheritance mapping. Defaults to ``True``. When True, it is assumed that ON UPDATE CASCADE is configured on the foreign key in the database, and that the database will handle propagation of an UPDATE from a source column to dependent columns on joined-table rows. When False, it is assumed that the database does not enforce referential integrity and will not be issuing its own CASCADE operation for an update. The :class:`.Mapper` here will emit an UPDATE statement for the dependent columns during a primary key change. .. seealso:: :ref:`passive_updates` - description of a similar feature as used with :func:`.relationship` :param polymorphic_on: Specifies the column, attribute, or SQL expression used to determine the target class for an incoming row, when inheriting classes are present. This value is commonly a :class:`.Column` object that's present in the mapped :class:`.Table`:: class Employee(Base): __tablename__ = 'employee' id = Column(Integer, primary_key=True) discriminator = Column(String(50)) __mapper_args__ = { "polymorphic_on":discriminator, "polymorphic_identity":"employee" } It may also be specified as a SQL expression, as in this example where we use the :func:`.case` construct to provide a conditional approach:: class Employee(Base): __tablename__ = 'employee' id = Column(Integer, primary_key=True) discriminator = Column(String(50)) __mapper_args__ = { "polymorphic_on":case([ (discriminator == "EN", "engineer"), (discriminator == "MA", "manager"), ], else_="employee"), "polymorphic_identity":"employee" } It may also refer to any attribute configured with :func:`.column_property`, or to the string name of one:: class Employee(Base): __tablename__ = 'employee' id = Column(Integer, primary_key=True) discriminator = Column(String(50)) employee_type = column_property( case([ (discriminator == "EN", "engineer"), (discriminator == "MA", "manager"), ], else_="employee") ) __mapper_args__ = { "polymorphic_on":employee_type, "polymorphic_identity":"employee" } .. versionchanged:: 0.7.4 ``polymorphic_on`` may be specified as a SQL expression, or refer to any attribute configured with :func:`.column_property`, or to the string name of one. When setting ``polymorphic_on`` to reference an attribute or expression that's not present in the locally mapped :class:`.Table`, yet the value of the discriminator should be persisted to the database, the value of the discriminator is not automatically set on new instances; this must be handled by the user, either through manual means or via event listeners. A typical approach to establishing such a listener looks like:: from sqlalchemy import event from sqlalchemy.orm import object_mapper @event.listens_for(Employee, "init", propagate=True) def set_identity(instance, *arg, **kw): mapper = object_mapper(instance) instance.discriminator = mapper.polymorphic_identity Where above, we assign the value of ``polymorphic_identity`` for the mapped class to the ``discriminator`` attribute, thus persisting the value to the ``discriminator`` column in the database. .. seealso:: :ref:`inheritance_toplevel` :param polymorphic_identity: Specifies the value which identifies this particular class as returned by the column expression referred to by the ``polymorphic_on`` setting. As rows are received, the value corresponding to the ``polymorphic_on`` column expression is compared to this value, indicating which subclass should be used for the newly reconstructed object. :param properties: A dictionary mapping the string names of object attributes to :class:`.MapperProperty` instances, which define the persistence behavior of that attribute. Note that :class:`.Column` objects present in the mapped :class:`.Table` are automatically placed into ``ColumnProperty`` instances upon mapping, unless overridden. When using Declarative, this argument is passed automatically, based on all those :class:`.MapperProperty` instances declared in the declared class body. :param primary_key: A list of :class:`.Column` objects which define the primary key to be used against this mapper's selectable unit. This is normally simply the primary key of the ``local_table``, but can be overridden here. :param version_id_col: A :class:`.Column` that will be used to keep a running version id of rows in the table. This is used to detect concurrent updates or the presence of stale data in a flush. The methodology is to detect if an UPDATE statement does not match the last known version id, a :class:`~sqlalchemy.orm.exc.StaleDataError` exception is thrown. By default, the column must be of :class:`.Integer` type, unless ``version_id_generator`` specifies an alternative version generator. .. seealso:: :ref:`mapper_version_counter` - discussion of version counting and rationale. :param version_id_generator: Define how new version ids should be generated. Defaults to ``None``, which indicates that a simple integer counting scheme be employed. To provide a custom versioning scheme, provide a callable function of the form:: def generate_version(version): return next_version .. seealso:: :ref:`custom_version_counter` :param with_polymorphic: A tuple in the form ``(, )`` indicating the default style of "polymorphic" loading, that is, which tables are queried at once. is any single or list of mappers and/or classes indicating the inherited classes that should be loaded at once. The special value ``'*'`` may be used to indicate all descending classes should be loaded immediately. The second tuple argument indicates a selectable that will be used to query for multiple classes. .. seealso:: :ref:`with_polymorphic` """ return Mapper(class_, local_table, *args, **params) def synonym(name, map_column=False, descriptor=None, comparator_factory=None, doc=None): """Denote an attribute name as a synonym to a mapped property. .. versionchanged:: 0.7 :func:`.synonym` is superseded by the :mod:`~sqlalchemy.ext.hybrid` extension. See the documentation for hybrids at :ref:`hybrids_toplevel`. Used with the ``properties`` dictionary sent to :func:`~sqlalchemy.orm.mapper`:: class MyClass(object): def _get_status(self): return self._status def _set_status(self, value): self._status = value status = property(_get_status, _set_status) mapper(MyClass, sometable, properties={ "status":synonym("_status", map_column=True) }) Above, the ``status`` attribute of MyClass will produce expression behavior against the table column named ``status``, using the Python attribute ``_status`` on the mapped class to represent the underlying value. :param name: the name of the existing mapped property, which can be any other ``MapperProperty`` including column-based properties and relationships. :param map_column: if ``True``, an additional ``ColumnProperty`` is created on the mapper automatically, using the synonym's name as the keyname of the property, and the keyname of this ``synonym()`` as the name of the column to map. """ return SynonymProperty(name, map_column=map_column, descriptor=descriptor, comparator_factory=comparator_factory, doc=doc) def comparable_property(comparator_factory, descriptor=None): """Provides a method of applying a :class:`.PropComparator` to any Python descriptor attribute. .. versionchanged:: 0.7 :func:`.comparable_property` is superseded by the :mod:`~sqlalchemy.ext.hybrid` extension. See the example at :ref:`hybrid_custom_comparators`. Allows any Python descriptor to behave like a SQL-enabled attribute when used at the class level in queries, allowing redefinition of expression operator behavior. In the example below we redefine :meth:`.PropComparator.operate` to wrap both sides of an expression in ``func.lower()`` to produce case-insensitive comparison:: from sqlalchemy.orm import comparable_property from sqlalchemy.orm.interfaces import PropComparator from sqlalchemy.sql import func from sqlalchemy import Integer, String, Column from sqlalchemy.ext.declarative import declarative_base class CaseInsensitiveComparator(PropComparator): def __clause_element__(self): return self.prop def operate(self, op, other): return op( func.lower(self.__clause_element__()), func.lower(other) ) Base = declarative_base() class SearchWord(Base): __tablename__ = 'search_word' id = Column(Integer, primary_key=True) word = Column(String) word_insensitive = comparable_property(lambda prop, mapper: CaseInsensitiveComparator(mapper.c.word, mapper) ) A mapping like the above allows the ``word_insensitive`` attribute to render an expression like:: >>> print SearchWord.word_insensitive == "Trucks" lower(search_word.word) = lower(:lower_1) :param comparator_factory: A PropComparator subclass or factory that defines operator behavior for this property. :param descriptor: Optional when used in a ``properties={}`` declaration. The Python descriptor or property to layer comparison behavior on top of. The like-named descriptor will be automatically retrieved from the mapped class if left blank in a ``properties`` declaration. """ return ComparableProperty(comparator_factory, descriptor) @sa_util.deprecated("0.7", message=":func:`.compile_mappers` " "is renamed to :func:`.configure_mappers`") def compile_mappers(): """Initialize the inter-mapper relationships of all mappers that have been defined. """ configure_mappers() def clear_mappers(): """Remove all mappers from all classes. This function removes all instrumentation from classes and disposes of their associated mappers. Once called, the classes are unmapped and can be later re-mapped with new mappers. :func:`.clear_mappers` is *not* for normal use, as there is literally no valid usage for it outside of very specific testing scenarios. Normally, mappers are permanent structural components of user-defined classes, and are never discarded independently of their class. If a mapped class itself is garbage collected, its mapper is automatically disposed of as well. As such, :func:`.clear_mappers` is only for usage in test suites that re-use the same classes with different mappings, which is itself an extremely rare use case - the only such use case is in fact SQLAlchemy's own test suite, and possibly the test suites of other ORM extension libraries which intend to test various combinations of mapper construction upon a fixed set of classes. """ mapperlib._CONFIGURE_MUTEX.acquire() try: while _mapper_registry: try: # can't even reliably call list(weakdict) in jython mapper, b = _mapper_registry.popitem() mapper.dispose() except KeyError: pass finally: mapperlib._CONFIGURE_MUTEX.release() def joinedload(*keys, **kw): """Return a ``MapperOption`` that will convert the property of the given name or series of mapped attributes into an joined eager load. .. versionchanged:: 0.6beta3 This function is known as :func:`eagerload` in all versions of SQLAlchemy prior to version 0.6beta3, including the 0.5 and 0.4 series. :func:`eagerload` will remain available for the foreseeable future in order to enable cross-compatibility. Used with :meth:`~sqlalchemy.orm.query.Query.options`. examples:: # joined-load the "orders" collection on "User" query(User).options(joinedload(User.orders)) # joined-load the "keywords" collection on each "Item", # but not the "items" collection on "Order" - those # remain lazily loaded. query(Order).options(joinedload(Order.items, Item.keywords)) # to joined-load across both, use joinedload_all() query(Order).options(joinedload_all(Order.items, Item.keywords)) # set the default strategy to be 'joined' query(Order).options(joinedload('*')) :func:`joinedload` also accepts a keyword argument `innerjoin=True` which indicates using an inner join instead of an outer:: query(Order).options(joinedload(Order.user, innerjoin=True)) .. note:: The join created by :func:`joinedload` is anonymously aliased such that it **does not affect the query results**. An :meth:`.Query.order_by` or :meth:`.Query.filter` call **cannot** reference these aliased tables - so-called "user space" joins are constructed using :meth:`.Query.join`. The rationale for this is that :func:`joinedload` is only applied in order to affect how related objects or collections are loaded as an optimizing detail - it can be added or removed with no impact on actual results. See the section :ref:`zen_of_eager_loading` for a detailed description of how this is used, including how to use a single explicit JOIN for filtering/ordering and eager loading simultaneously. See also: :func:`subqueryload`, :func:`lazyload` """ innerjoin = kw.pop('innerjoin', None) if innerjoin is not None: return ( strategies.EagerLazyOption(keys, lazy='joined'), strategies.EagerJoinOption(keys, innerjoin) ) else: return strategies.EagerLazyOption(keys, lazy='joined') def joinedload_all(*keys, **kw): """Return a ``MapperOption`` that will convert all properties along the given dot-separated path or series of mapped attributes into an joined eager load. .. versionchanged:: 0.6beta3 This function is known as :func:`eagerload_all` in all versions of SQLAlchemy prior to version 0.6beta3, including the 0.5 and 0.4 series. :func:`eagerload_all` will remain available for the foreseeable future in order to enable cross-compatibility. Used with :meth:`~sqlalchemy.orm.query.Query.options`. For example:: query.options(joinedload_all('orders.items.keywords'))... will set all of ``orders``, ``orders.items``, and ``orders.items.keywords`` to load in one joined eager load. Individual descriptors are accepted as arguments as well:: query.options(joinedload_all(User.orders, Order.items, Item.keywords)) The keyword arguments accept a flag `innerjoin=True|False` which will override the value of the `innerjoin` flag specified on the relationship(). See also: :func:`subqueryload_all`, :func:`lazyload` """ innerjoin = kw.pop('innerjoin', None) if innerjoin is not None: return ( strategies.EagerLazyOption(keys, lazy='joined', chained=True), strategies.EagerJoinOption(keys, innerjoin, chained=True) ) else: return strategies.EagerLazyOption(keys, lazy='joined', chained=True) def eagerload(*args, **kwargs): """A synonym for :func:`joinedload()`.""" return joinedload(*args, **kwargs) def eagerload_all(*args, **kwargs): """A synonym for :func:`joinedload_all()`""" return joinedload_all(*args, **kwargs) def subqueryload(*keys): """Return a ``MapperOption`` that will convert the property of the given name or series of mapped attributes into an subquery eager load. Used with :meth:`~sqlalchemy.orm.query.Query.options`. examples:: # subquery-load the "orders" collection on "User" query(User).options(subqueryload(User.orders)) # subquery-load the "keywords" collection on each "Item", # but not the "items" collection on "Order" - those # remain lazily loaded. query(Order).options(subqueryload(Order.items, Item.keywords)) # to subquery-load across both, use subqueryload_all() query(Order).options(subqueryload_all(Order.items, Item.keywords)) # set the default strategy to be 'subquery' query(Order).options(subqueryload('*')) See also: :func:`joinedload`, :func:`lazyload` """ return strategies.EagerLazyOption(keys, lazy="subquery") def subqueryload_all(*keys): """Return a ``MapperOption`` that will convert all properties along the given dot-separated path or series of mapped attributes into a subquery eager load. Used with :meth:`~sqlalchemy.orm.query.Query.options`. For example:: query.options(subqueryload_all('orders.items.keywords'))... will set all of ``orders``, ``orders.items``, and ``orders.items.keywords`` to load in one subquery eager load. Individual descriptors are accepted as arguments as well:: query.options(subqueryload_all(User.orders, Order.items, Item.keywords)) See also: :func:`joinedload_all`, :func:`lazyload`, :func:`immediateload` """ return strategies.EagerLazyOption(keys, lazy="subquery", chained=True) def lazyload(*keys): """Return a ``MapperOption`` that will convert the property of the given name or series of mapped attributes into a lazy load. Used with :meth:`~sqlalchemy.orm.query.Query.options`. See also: :func:`eagerload`, :func:`subqueryload`, :func:`immediateload` """ return strategies.EagerLazyOption(keys, lazy=True) def lazyload_all(*keys): """Return a ``MapperOption`` that will convert all the properties along the given dot-separated path or series of mapped attributes into a lazy load. Used with :meth:`~sqlalchemy.orm.query.Query.options`. See also: :func:`eagerload`, :func:`subqueryload`, :func:`immediateload` """ return strategies.EagerLazyOption(keys, lazy=True, chained=True) def noload(*keys): """Return a ``MapperOption`` that will convert the property of the given name or series of mapped attributes into a non-load. Used with :meth:`~sqlalchemy.orm.query.Query.options`. See also: :func:`lazyload`, :func:`eagerload`, :func:`subqueryload`, :func:`immediateload` """ return strategies.EagerLazyOption(keys, lazy=None) def immediateload(*keys): """Return a ``MapperOption`` that will convert the property of the given name or series of mapped attributes into an immediate load. The "immediate" load means the attribute will be fetched with a separate SELECT statement per parent in the same way as lazy loading - except the loader is guaranteed to be called at load time before the parent object is returned in the result. The normal behavior of lazy loading applies - if the relationship is a simple many-to-one, and the child object is already present in the :class:`.Session`, no SELECT statement will be emitted. Used with :meth:`~sqlalchemy.orm.query.Query.options`. See also: :func:`lazyload`, :func:`eagerload`, :func:`subqueryload` .. versionadded:: 0.6.5 """ return strategies.EagerLazyOption(keys, lazy='immediate') def contains_alias(alias): """Return a :class:`.MapperOption` that will indicate to the query that the main table has been aliased. This is used in the very rare case that :func:`.contains_eager` is being used in conjunction with a user-defined SELECT statement that aliases the parent table. E.g.:: # define an aliased UNION called 'ulist' statement = users.select(users.c.user_id==7).\\ union(users.select(users.c.user_id>7)).\\ alias('ulist') # add on an eager load of "addresses" statement = statement.outerjoin(addresses).\\ select().apply_labels() # create query, indicating "ulist" will be an # alias for the main table, "addresses" # property should be eager loaded query = session.query(User).options( contains_alias('ulist'), contains_eager('addresses')) # then get results via the statement results = query.from_statement(statement).all() :param alias: is the string name of an alias, or a :class:`~.sql.expression.Alias` object representing the alias. """ return AliasOption(alias) def contains_eager(*keys, **kwargs): """Return a ``MapperOption`` that will indicate to the query that the given attribute should be eagerly loaded from columns currently in the query. Used with :meth:`~sqlalchemy.orm.query.Query.options`. The option is used in conjunction with an explicit join that loads the desired rows, i.e.:: sess.query(Order).\\ join(Order.user).\\ options(contains_eager(Order.user)) The above query would join from the ``Order`` entity to its related ``User`` entity, and the returned ``Order`` objects would have the ``Order.user`` attribute pre-populated. :func:`contains_eager` also accepts an `alias` argument, which is the string name of an alias, an :func:`~sqlalchemy.sql.expression.alias` construct, or an :func:`~sqlalchemy.orm.aliased` construct. Use this when the eagerly-loaded rows are to come from an aliased table:: user_alias = aliased(User) sess.query(Order).\\ join((user_alias, Order.user)).\\ options(contains_eager(Order.user, alias=user_alias)) See also :func:`eagerload` for the "automatic" version of this functionality. For additional examples of :func:`contains_eager` see :ref:`contains_eager`. """ alias = kwargs.pop('alias', None) if kwargs: raise exc.ArgumentError( 'Invalid kwargs for contains_eager: %r' % kwargs.keys()) return strategies.EagerLazyOption(keys, lazy='joined', propagate_to_loaders=False, chained=True), \ strategies.LoadEagerFromAliasOption(keys, alias=alias, chained=True) def defer(*key): """Return a :class:`.MapperOption` that will convert the column property of the given name into a deferred load. Used with :meth:`.Query.options`. e.g.:: from sqlalchemy.orm import defer query(MyClass).options(defer("attribute_one"), defer("attribute_two")) A class bound descriptor is also accepted:: query(MyClass).options( defer(MyClass.attribute_one), defer(MyClass.attribute_two)) A "path" can be specified onto a related or collection object using a dotted name. The :func:`.orm.defer` option will be applied to that object when loaded:: query(MyClass).options( defer("related.attribute_one"), defer("related.attribute_two")) To specify a path via class, send multiple arguments:: query(MyClass).options( defer(MyClass.related, MyOtherClass.attribute_one), defer(MyClass.related, MyOtherClass.attribute_two)) See also: :ref:`deferred` :param \*key: A key representing an individual path. Multiple entries are accepted to allow a multiple-token path for a single target, not multiple targets. """ return strategies.DeferredOption(key, defer=True) def undefer(*key): """Return a :class:`.MapperOption` that will convert the column property of the given name into a non-deferred (regular column) load. Used with :meth:`.Query.options`. e.g.:: from sqlalchemy.orm import undefer query(MyClass).options( undefer("attribute_one"), undefer("attribute_two")) A class bound descriptor is also accepted:: query(MyClass).options( undefer(MyClass.attribute_one), undefer(MyClass.attribute_two)) A "path" can be specified onto a related or collection object using a dotted name. The :func:`.orm.undefer` option will be applied to that object when loaded:: query(MyClass).options( undefer("related.attribute_one"), undefer("related.attribute_two")) To specify a path via class, send multiple arguments:: query(MyClass).options( undefer(MyClass.related, MyOtherClass.attribute_one), undefer(MyClass.related, MyOtherClass.attribute_two)) See also: :func:`.orm.undefer_group` as a means to "undefer" a group of attributes at once. :ref:`deferred` :param \*key: A key representing an individual path. Multiple entries are accepted to allow a multiple-token path for a single target, not multiple targets. """ return strategies.DeferredOption(key, defer=False) def undefer_group(name): """Return a :class:`.MapperOption` that will convert the given group of deferred column properties into a non-deferred (regular column) load. Used with :meth:`.Query.options`. e.g.:: query(MyClass).options(undefer("group_one")) See also: :ref:`deferred` :param name: String name of the deferred group. This name is established using the "group" name to the :func:`.orm.deferred` configurational function. """ return strategies.UndeferGroupOption(name) from sqlalchemy import util as _sa_util _sa_util.importlater.resolve_all() SQLAlchemy-0.8.4/lib/sqlalchemy/orm/attributes.py0000644000076500000240000015372512251150015022503 0ustar classicstaff00000000000000# orm/attributes.py # Copyright (C) 2005-2013 the SQLAlchemy authors and contributors # # This module is part of SQLAlchemy and is released under # the MIT License: http://www.opensource.org/licenses/mit-license.php """Defines instrumentation for class attributes and their interaction with instances. This module is usually not directly visible to user applications, but defines a large part of the ORM's interactivity. """ import operator from operator import itemgetter from .. import util, event, inspection from . import interfaces, collections, events, exc as orm_exc from .instrumentation import instance_state, instance_dict, manager_of_class orm_util = util.importlater("sqlalchemy.orm", "util") PASSIVE_NO_RESULT = util.symbol('PASSIVE_NO_RESULT', """Symbol returned by a loader callable or other attribute/history retrieval operation when a value could not be determined, based on loader callable flags. """ ) ATTR_WAS_SET = util.symbol('ATTR_WAS_SET', """Symbol returned by a loader callable to indicate the retrieved value, or values, were assigned to their attributes on the target object. """) ATTR_EMPTY = util.symbol('ATTR_EMPTY', """Symbol used internally to indicate an attribute had no callable. """) NO_VALUE = util.symbol('NO_VALUE', """Symbol which may be placed as the 'previous' value of an attribute, indicating no value was loaded for an attribute when it was modified, and flags indicated we were not to load it. """ ) NEVER_SET = util.symbol('NEVER_SET', """Symbol which may be placed as the 'previous' value of an attribute indicating that the attribute had not been assigned to previously. """ ) NO_CHANGE = util.symbol("NO_CHANGE", """No callables or SQL should be emitted on attribute access and no state should change""", canonical=0 ) CALLABLES_OK = util.symbol("CALLABLES_OK", """Loader callables can be fired off if a value is not present.""", canonical=1 ) SQL_OK = util.symbol("SQL_OK", """Loader callables can emit SQL at least on scalar value attributes.""", canonical=2) RELATED_OBJECT_OK = util.symbol("RELATED_OBJECT_OK", """callables can use SQL to load related objects as well as scalar value attributes. """, canonical=4 ) INIT_OK = util.symbol("INIT_OK", """Attributes should be initialized with a blank value (None or an empty collection) upon get, if no other value can be obtained. """, canonical=8 ) NON_PERSISTENT_OK = util.symbol("NON_PERSISTENT_OK", """callables can be emitted if the parent is not persistent.""", canonical=16 ) LOAD_AGAINST_COMMITTED = util.symbol("LOAD_AGAINST_COMMITTED", """callables should use committed values as primary/foreign keys during a load """, canonical=32 ) # pre-packaged sets of flags used as inputs PASSIVE_OFF = util.symbol("PASSIVE_OFF", "Callables can be emitted in all cases.", canonical=(RELATED_OBJECT_OK | NON_PERSISTENT_OK | INIT_OK | CALLABLES_OK | SQL_OK) ) PASSIVE_RETURN_NEVER_SET = util.symbol("PASSIVE_RETURN_NEVER_SET", """PASSIVE_OFF ^ INIT_OK""", canonical=PASSIVE_OFF ^ INIT_OK ) PASSIVE_NO_INITIALIZE = util.symbol("PASSIVE_NO_INITIALIZE", "PASSIVE_RETURN_NEVER_SET ^ CALLABLES_OK", canonical=PASSIVE_RETURN_NEVER_SET ^ CALLABLES_OK ) PASSIVE_NO_FETCH = util.symbol("PASSIVE_NO_FETCH", "PASSIVE_OFF ^ SQL_OK", canonical=PASSIVE_OFF ^ SQL_OK ) PASSIVE_NO_FETCH_RELATED = util.symbol("PASSIVE_NO_FETCH_RELATED", "PASSIVE_OFF ^ RELATED_OBJECT_OK", canonical=PASSIVE_OFF ^ RELATED_OBJECT_OK ) PASSIVE_ONLY_PERSISTENT = util.symbol("PASSIVE_ONLY_PERSISTENT", "PASSIVE_OFF ^ NON_PERSISTENT_OK", canonical=PASSIVE_OFF ^ NON_PERSISTENT_OK ) class QueryableAttribute(interfaces._MappedAttribute, interfaces._InspectionAttr, interfaces.PropComparator): """Base class for :term:`descriptor` objects that intercept attribute events on behalf of a :class:`.MapperProperty` object. The actual :class:`.MapperProperty` is accessible via the :attr:`.QueryableAttribute.property` attribute. .. seealso:: :class:`.InstrumentedAttribute` :class:`.MapperProperty` :attr:`.Mapper.all_orm_descriptors` :attr:`.Mapper.attrs` """ is_attribute = True def __init__(self, class_, key, impl=None, comparator=None, parententity=None, of_type=None): self.class_ = class_ self.key = key self.impl = impl self.comparator = comparator self._parententity = parententity self._of_type = of_type manager = manager_of_class(class_) # manager is None in the case of AliasedClass if manager: # propagate existing event listeners from # immediate superclass for base in manager._bases: if key in base: self.dispatch._update(base[key].dispatch) dispatch = event.dispatcher(events.AttributeEvents) dispatch.dispatch_cls._active_history = False @util.memoized_property def _supports_population(self): return self.impl.supports_population def get_history(self, instance, passive=PASSIVE_OFF): return self.impl.get_history(instance_state(instance), instance_dict(instance), passive) def __selectable__(self): # TODO: conditionally attach this method based on clause_element ? return self @util.memoized_property def info(self): """Return the 'info' dictionary for the underlying SQL element. The behavior here is as follows: * If the attribute is a column-mapped property, i.e. :class:`.ColumnProperty`, which is mapped directly to a schema-level :class:`.Column` object, this attribute will return the :attr:`.SchemaItem.info` dictionary associated with the core-level :class:`.Column` object. * If the attribute is a :class:`.ColumnProperty` but is mapped to any other kind of SQL expression other than a :class:`.Column`, the attribute will refer to the :attr:`.MapperProperty.info` dictionary associated directly with the :class:`.ColumnProperty`, assuming the SQL expression itself does not have it's own ``.info`` attribute (which should be the case, unless a user-defined SQL construct has defined one). * If the attribute refers to any other kind of :class:`.MapperProperty`, including :class:`.RelationshipProperty`, the attribute will refer to the :attr:`.MapperProperty.info` dictionary associated with that :class:`.MapperProperty`. * To access the :attr:`.MapperProperty.info` dictionary of the :class:`.MapperProperty` unconditionally, including for a :class:`.ColumnProperty` that's associated directly with a :class:`.schema.Column`, the attribute can be referred to using :attr:`.QueryableAttribute.property` attribute, as ``MyClass.someattribute.property.info``. .. versionadded:: 0.8.0 .. seealso:: :attr:`.SchemaItem.info` :attr:`.MapperProperty.info` """ return self.comparator.info @util.memoized_property def parent(self): """Return an inspection instance representing the parent. This will be either an instance of :class:`.Mapper` or :class:`.AliasedInsp`, depending upon the nature of the parent entity which this attribute is associated with. """ return inspection.inspect(self._parententity) @property def expression(self): return self.comparator.__clause_element__() def __clause_element__(self): return self.comparator.__clause_element__() def of_type(self, cls): return QueryableAttribute( self.class_, self.key, self.impl, self.comparator.of_type(cls), self._parententity, of_type=cls) def label(self, name): return self.__clause_element__().label(name) def operate(self, op, *other, **kwargs): return op(self.comparator, *other, **kwargs) def reverse_operate(self, op, other, **kwargs): return op(other, self.comparator, **kwargs) def hasparent(self, state, optimistic=False): return self.impl.hasparent(state, optimistic=optimistic) is not False def __getattr__(self, key): try: return getattr(self.comparator, key) except AttributeError: raise AttributeError( 'Neither %r object nor %r object associated with %s ' 'has an attribute %r' % ( type(self).__name__, type(self.comparator).__name__, self, key) ) def __str__(self): return "%s.%s" % (self.class_.__name__, self.key) @util.memoized_property def property(self): """Return the :class:`.MapperProperty` associated with this :class:`.QueryableAttribute`. Return values here will commonly be instances of :class:`.ColumnProperty` or :class:`.RelationshipProperty`. """ return self.comparator.property inspection._self_inspects(QueryableAttribute) class InstrumentedAttribute(QueryableAttribute): """Class bound instrumented attribute which adds basic :term:`descriptor` methods. See :class:`.QueryableAttribute` for a description of most features. """ def __set__(self, instance, value): self.impl.set(instance_state(instance), instance_dict(instance), value, None) def __delete__(self, instance): self.impl.delete(instance_state(instance), instance_dict(instance)) def __get__(self, instance, owner): if instance is None: return self dict_ = instance_dict(instance) if self._supports_population and self.key in dict_: return dict_[self.key] else: return self.impl.get(instance_state(instance), dict_) def create_proxied_attribute(descriptor): """Create an QueryableAttribute / user descriptor hybrid. Returns a new QueryableAttribute type that delegates descriptor behavior and getattr() to the given descriptor. """ # TODO: can move this to descriptor_props if the need for this # function is removed from ext/hybrid.py class Proxy(QueryableAttribute): """Presents the :class:`.QueryableAttribute` interface as a proxy on top of a Python descriptor / :class:`.PropComparator` combination. """ def __init__(self, class_, key, descriptor, comparator, adapter=None, doc=None, original_property=None): self.class_ = class_ self.key = key self.descriptor = descriptor self.original_property = original_property self._comparator = comparator self.adapter = adapter self.__doc__ = doc @property def property(self): return self.comparator.property @util.memoized_property def comparator(self): if util.callable(self._comparator): self._comparator = self._comparator() if self.adapter: self._comparator = self._comparator.adapted(self.adapter) return self._comparator def adapted(self, adapter): """Proxy adapted() for the use case of AliasedClass calling adapted. """ return self.__class__(self.class_, self.key, self.descriptor, self._comparator, adapter) def __get__(self, instance, owner): if instance is None: return self else: return self.descriptor.__get__(instance, owner) def __str__(self): return "%s.%s" % (self.class_.__name__, self.key) def __getattr__(self, attribute): """Delegate __getattr__ to the original descriptor and/or comparator.""" try: return getattr(descriptor, attribute) except AttributeError: try: return getattr(self.comparator, attribute) except AttributeError: raise AttributeError( 'Neither %r object nor %r object associated with %s ' 'has an attribute %r' % ( type(descriptor).__name__, type(self.comparator).__name__, self, attribute) ) Proxy.__name__ = type(descriptor).__name__ + 'Proxy' util.monkeypatch_proxied_specials(Proxy, type(descriptor), name='descriptor', from_instance=descriptor) return Proxy class AttributeImpl(object): """internal implementation for instrumented attributes.""" def __init__(self, class_, key, callable_, dispatch, trackparent=False, extension=None, compare_function=None, active_history=False, parent_token=None, expire_missing=True, **kwargs): """Construct an AttributeImpl. \class_ associated class key string name of the attribute \callable_ optional function which generates a callable based on a parent instance, which produces the "default" values for a scalar or collection attribute when it's first accessed, if not present already. trackparent if True, attempt to track if an instance has a parent attached to it via this attribute. extension a single or list of AttributeExtension object(s) which will receive set/delete/append/remove/etc. events. Deprecated. The event package is now used. compare_function a function that compares two values which are normally assignable to this attribute. active_history indicates that get_history() should always return the "old" value, even if it means executing a lazy callable upon attribute change. parent_token Usually references the MapperProperty, used as a key for the hasparent() function to identify an "owning" attribute. Allows multiple AttributeImpls to all match a single owner attribute. expire_missing if False, don't add an "expiry" callable to this attribute during state.expire_attributes(None), if no value is present for this key. """ self.class_ = class_ self.key = key self.callable_ = callable_ self.dispatch = dispatch self.trackparent = trackparent self.parent_token = parent_token or self if compare_function is None: self.is_equal = operator.eq else: self.is_equal = compare_function # TODO: pass in the manager here # instead of doing a lookup attr = manager_of_class(class_)[key] for ext in util.to_list(extension or []): ext._adapt_listener(attr, ext) if active_history: self.dispatch._active_history = True self.expire_missing = expire_missing def __str__(self): return "%s.%s" % (self.class_.__name__, self.key) def _get_active_history(self): """Backwards compat for impl.active_history""" return self.dispatch._active_history def _set_active_history(self, value): self.dispatch._active_history = value active_history = property(_get_active_history, _set_active_history) def hasparent(self, state, optimistic=False): """Return the boolean value of a `hasparent` flag attached to the given state. The `optimistic` flag determines what the default return value should be if no `hasparent` flag can be located. As this function is used to determine if an instance is an *orphan*, instances that were loaded from storage should be assumed to not be orphans, until a True/False value for this flag is set. An instance attribute that is loaded by a callable function will also not have a `hasparent` flag. """ msg = "This AttributeImpl is not configured to track parents." assert self.trackparent, msg return state.parents.get(id(self.parent_token), optimistic) \ is not False def sethasparent(self, state, parent_state, value): """Set a boolean flag on the given item corresponding to whether or not it is attached to a parent object via the attribute represented by this ``InstrumentedAttribute``. """ msg = "This AttributeImpl is not configured to track parents." assert self.trackparent, msg id_ = id(self.parent_token) if value: state.parents[id_] = parent_state else: if id_ in state.parents: last_parent = state.parents[id_] if last_parent is not False and \ last_parent.key != parent_state.key: if last_parent.obj() is None: raise orm_exc.StaleDataError( "Removing state %s from parent " "state %s along attribute '%s', " "but the parent record " "has gone stale, can't be sure this " "is the most recent parent." % (orm_util.state_str(state), orm_util.state_str(parent_state), self.key)) return state.parents[id_] = False def set_callable(self, state, callable_): """Set a callable function for this attribute on the given object. This callable will be executed when the attribute is next accessed, and is assumed to construct part of the instances previously stored state. When its value or values are loaded, they will be established as part of the instance's *committed state*. While *trackparent* information will be assembled for these instances, attribute-level event handlers will not be fired. The callable overrides the class level callable set in the ``InstrumentedAttribute`` constructor. """ state.callables[self.key] = callable_ def get_history(self, state, dict_, passive=PASSIVE_OFF): raise NotImplementedError() def get_all_pending(self, state, dict_): """Return a list of tuples of (state, obj) for all objects in this attribute's current state + history. Only applies to object-based attributes. This is an inlining of existing functionality which roughly corresponds to: get_state_history( state, key, passive=PASSIVE_NO_INITIALIZE).sum() """ raise NotImplementedError() def initialize(self, state, dict_): """Initialize the given state's attribute with an empty value.""" dict_[self.key] = None return None def get(self, state, dict_, passive=PASSIVE_OFF): """Retrieve a value from the given object. If a callable is assembled on this object's attribute, and passive is False, the callable will be executed and the resulting value will be set as the new value for this attribute. """ if self.key in dict_: return dict_[self.key] else: # if history present, don't load key = self.key if key not in state.committed_state or \ state.committed_state[key] is NEVER_SET: if not passive & CALLABLES_OK: return PASSIVE_NO_RESULT if key in state.callables: callable_ = state.callables[key] value = callable_(state, passive) elif self.callable_: value = self.callable_(state, passive) else: value = ATTR_EMPTY if value is PASSIVE_NO_RESULT or value is NEVER_SET: return value elif value is ATTR_WAS_SET: try: return dict_[key] except KeyError: # TODO: no test coverage here. raise KeyError( "Deferred loader for attribute " "%r failed to populate " "correctly" % key) elif value is not ATTR_EMPTY: return self.set_committed_value(state, dict_, value) if not passive & INIT_OK: return NEVER_SET else: # Return a new, empty value return self.initialize(state, dict_) def append(self, state, dict_, value, initiator, passive=PASSIVE_OFF): self.set(state, dict_, value, initiator, passive=passive) def remove(self, state, dict_, value, initiator, passive=PASSIVE_OFF): self.set(state, dict_, None, initiator, passive=passive, check_old=value) def pop(self, state, dict_, value, initiator, passive=PASSIVE_OFF): self.set(state, dict_, None, initiator, passive=passive, check_old=value, pop=True) def set(self, state, dict_, value, initiator, passive=PASSIVE_OFF, check_old=None, pop=False): raise NotImplementedError() def get_committed_value(self, state, dict_, passive=PASSIVE_OFF): """return the unchanged value of this attribute""" if self.key in state.committed_state: value = state.committed_state[self.key] if value is NO_VALUE: return None else: return value else: return self.get(state, dict_, passive=passive) def set_committed_value(self, state, dict_, value): """set an attribute value on the given instance and 'commit' it.""" dict_[self.key] = value state._commit(dict_, [self.key]) return value class ScalarAttributeImpl(AttributeImpl): """represents a scalar value-holding InstrumentedAttribute.""" accepts_scalar_loader = True uses_objects = False supports_population = True collection = False def delete(self, state, dict_): # TODO: catch key errors, convert to attributeerror? if self.dispatch._active_history: old = self.get(state, dict_, PASSIVE_RETURN_NEVER_SET) else: old = dict_.get(self.key, NO_VALUE) if self.dispatch.remove: self.fire_remove_event(state, dict_, old, None) state._modified_event(dict_, self, old) del dict_[self.key] def get_history(self, state, dict_, passive=PASSIVE_OFF): return History.from_scalar_attribute( self, state, dict_.get(self.key, NO_VALUE)) def set(self, state, dict_, value, initiator, passive=PASSIVE_OFF, check_old=None, pop=False): if initiator and initiator.parent_token is self.parent_token: return if self.dispatch._active_history: old = self.get(state, dict_, PASSIVE_RETURN_NEVER_SET) else: old = dict_.get(self.key, NO_VALUE) if self.dispatch.set: value = self.fire_replace_event(state, dict_, value, old, initiator) state._modified_event(dict_, self, old) dict_[self.key] = value def fire_replace_event(self, state, dict_, value, previous, initiator): for fn in self.dispatch.set: value = fn(state, value, previous, initiator or self) return value def fire_remove_event(self, state, dict_, value, initiator): for fn in self.dispatch.remove: fn(state, value, initiator or self) @property def type(self): self.property.columns[0].type class ScalarObjectAttributeImpl(ScalarAttributeImpl): """represents a scalar-holding InstrumentedAttribute, where the target object is also instrumented. Adds events to delete/set operations. """ accepts_scalar_loader = False uses_objects = True supports_population = True collection = False def delete(self, state, dict_): old = self.get(state, dict_) self.fire_remove_event(state, dict_, old, self) del dict_[self.key] def get_history(self, state, dict_, passive=PASSIVE_OFF): if self.key in dict_: return History.from_object_attribute(self, state, dict_[self.key]) else: if passive & INIT_OK: passive ^= INIT_OK current = self.get(state, dict_, passive=passive) if current is PASSIVE_NO_RESULT: return HISTORY_BLANK else: return History.from_object_attribute(self, state, current) def get_all_pending(self, state, dict_): if self.key in dict_: current = dict_[self.key] if current is not None: ret = [(instance_state(current), current)] else: ret = [(None, None)] if self.key in state.committed_state: original = state.committed_state[self.key] if original not in (NEVER_SET, PASSIVE_NO_RESULT, None) and \ original is not current: ret.append((instance_state(original), original)) return ret else: return [] def set(self, state, dict_, value, initiator, passive=PASSIVE_OFF, check_old=None, pop=False): """Set a value on the given InstanceState. `initiator` is the ``InstrumentedAttribute`` that initiated the ``set()`` operation and is used to control the depth of a circular setter operation. """ if initiator and initiator.parent_token is self.parent_token: return if self.dispatch._active_history: old = self.get(state, dict_, passive=PASSIVE_ONLY_PERSISTENT) else: old = self.get(state, dict_, passive=PASSIVE_NO_FETCH) if check_old is not None and \ old is not PASSIVE_NO_RESULT and \ check_old is not old: if pop: return else: raise ValueError( "Object %s not associated with %s on attribute '%s'" % ( orm_util.instance_str(check_old), orm_util.state_str(state), self.key )) value = self.fire_replace_event(state, dict_, value, old, initiator) dict_[self.key] = value def fire_remove_event(self, state, dict_, value, initiator): if self.trackparent and value is not None: self.sethasparent(instance_state(value), state, False) for fn in self.dispatch.remove: fn(state, value, initiator or self) state._modified_event(dict_, self, value) def fire_replace_event(self, state, dict_, value, previous, initiator): if self.trackparent: if (previous is not value and previous is not None and previous is not PASSIVE_NO_RESULT): self.sethasparent(instance_state(previous), state, False) for fn in self.dispatch.set: value = fn(state, value, previous, initiator or self) state._modified_event(dict_, self, previous) if self.trackparent: if value is not None: self.sethasparent(instance_state(value), state, True) return value class CollectionAttributeImpl(AttributeImpl): """A collection-holding attribute that instruments changes in membership. Only handles collections of instrumented objects. InstrumentedCollectionAttribute holds an arbitrary, user-specified container object (defaulting to a list) and brokers access to the CollectionAdapter, a "view" onto that object that presents consistent bag semantics to the orm layer independent of the user data implementation. """ accepts_scalar_loader = False uses_objects = True supports_population = True collection = True def __init__(self, class_, key, callable_, dispatch, typecallable=None, trackparent=False, extension=None, copy_function=None, compare_function=None, **kwargs): super(CollectionAttributeImpl, self).__init__( class_, key, callable_, dispatch, trackparent=trackparent, extension=extension, compare_function=compare_function, **kwargs) if copy_function is None: copy_function = self.__copy self.copy = copy_function self.collection_factory = typecallable def __copy(self, item): return [y for y in list(collections.collection_adapter(item))] def get_history(self, state, dict_, passive=PASSIVE_OFF): current = self.get(state, dict_, passive=passive) if current is PASSIVE_NO_RESULT: return HISTORY_BLANK else: return History.from_collection(self, state, current) def get_all_pending(self, state, dict_): if self.key not in dict_: return [] current = dict_[self.key] current = getattr(current, '_sa_adapter') if self.key in state.committed_state: original = state.committed_state[self.key] if original not in (NO_VALUE, NEVER_SET): current_states = [((c is not None) and instance_state(c) or None, c) for c in current] original_states = [((c is not None) and instance_state(c) or None, c) for c in original] current_set = dict(current_states) original_set = dict(original_states) return \ [(s, o) for s, o in current_states if s not in original_set] + \ [(s, o) for s, o in current_states if s in original_set] + \ [(s, o) for s, o in original_states if s not in current_set] return [(instance_state(o), o) for o in current] def fire_append_event(self, state, dict_, value, initiator): for fn in self.dispatch.append: value = fn(state, value, initiator or self) state._modified_event(dict_, self, NEVER_SET, True) if self.trackparent and value is not None: self.sethasparent(instance_state(value), state, True) return value def fire_pre_remove_event(self, state, dict_, initiator): state._modified_event(dict_, self, NEVER_SET, True) def fire_remove_event(self, state, dict_, value, initiator): if self.trackparent and value is not None: self.sethasparent(instance_state(value), state, False) for fn in self.dispatch.remove: fn(state, value, initiator or self) state._modified_event(dict_, self, NEVER_SET, True) def delete(self, state, dict_): if self.key not in dict_: return state._modified_event(dict_, self, NEVER_SET, True) collection = self.get_collection(state, state.dict) collection.clear_with_event() # TODO: catch key errors, convert to attributeerror? del dict_[self.key] def initialize(self, state, dict_): """Initialize this attribute with an empty collection.""" _, user_data = self._initialize_collection(state) dict_[self.key] = user_data return user_data def _initialize_collection(self, state): return state.manager.initialize_collection( self.key, state, self.collection_factory) def append(self, state, dict_, value, initiator, passive=PASSIVE_OFF): if initiator and initiator.parent_token is self.parent_token: return collection = self.get_collection(state, dict_, passive=passive) if collection is PASSIVE_NO_RESULT: value = self.fire_append_event(state, dict_, value, initiator) assert self.key not in dict_, \ "Collection was loaded during event handling." state._get_pending_mutation(self.key).append(value) else: collection.append_with_event(value, initiator) def remove(self, state, dict_, value, initiator, passive=PASSIVE_OFF): if initiator and initiator.parent_token is self.parent_token: return collection = self.get_collection(state, state.dict, passive=passive) if collection is PASSIVE_NO_RESULT: self.fire_remove_event(state, dict_, value, initiator) assert self.key not in dict_, \ "Collection was loaded during event handling." state._get_pending_mutation(self.key).remove(value) else: collection.remove_with_event(value, initiator) def pop(self, state, dict_, value, initiator, passive=PASSIVE_OFF): try: # TODO: better solution here would be to add # a "popper" role to collections.py to complement # "remover". self.remove(state, dict_, value, initiator, passive=passive) except (ValueError, KeyError, IndexError): pass def set(self, state, dict_, value, initiator, passive=PASSIVE_OFF, pop=False): """Set a value on the given object. `initiator` is the ``InstrumentedAttribute`` that initiated the ``set()`` operation and is used to control the depth of a circular setter operation. """ if initiator and initiator.parent_token is self.parent_token: return self._set_iterable( state, dict_, value, lambda adapter, i: adapter.adapt_like_to_iterable(i)) def _set_iterable(self, state, dict_, iterable, adapter=None): """Set a collection value from an iterable of state-bearers. ``adapter`` is an optional callable invoked with a CollectionAdapter and the iterable. Should return an iterable of state-bearing instances suitable for appending via a CollectionAdapter. Can be used for, e.g., adapting an incoming dictionary into an iterator of values rather than keys. """ # pulling a new collection first so that an adaptation exception does # not trigger a lazy load of the old collection. new_collection, user_data = self._initialize_collection(state) if adapter: new_values = list(adapter(new_collection, iterable)) else: new_values = list(iterable) old = self.get(state, dict_, passive=PASSIVE_ONLY_PERSISTENT) if old is PASSIVE_NO_RESULT: old = self.initialize(state, dict_) elif old is iterable: # ignore re-assignment of the current collection, as happens # implicitly with in-place operators (foo.collection |= other) return # place a copy of "old" in state.committed_state state._modified_event(dict_, self, old, True) old_collection = getattr(old, '_sa_adapter') dict_[self.key] = user_data collections.bulk_replace(new_values, old_collection, new_collection) old_collection.unlink(old) def _invalidate_collection(self, collection): adapter = getattr(collection, '_sa_adapter') adapter.invalidated = True def set_committed_value(self, state, dict_, value): """Set an attribute value on the given instance and 'commit' it.""" collection, user_data = self._initialize_collection(state) if value: collection.append_multiple_without_event(value) state.dict[self.key] = user_data state._commit(dict_, [self.key]) if self.key in state._pending_mutations: # pending items exist. issue a modified event, # add/remove new items. state._modified_event(dict_, self, user_data, True) pending = state._pending_mutations.pop(self.key) added = pending.added_items removed = pending.deleted_items for item in added: collection.append_without_event(item) for item in removed: collection.remove_without_event(item) return user_data def get_collection(self, state, dict_, user_data=None, passive=PASSIVE_OFF): """Retrieve the CollectionAdapter associated with the given state. Creates a new CollectionAdapter if one does not exist. """ if user_data is None: user_data = self.get(state, dict_, passive=passive) if user_data is PASSIVE_NO_RESULT: return user_data return getattr(user_data, '_sa_adapter') def backref_listeners(attribute, key, uselist): """Apply listeners to synchronize a two-way relationship.""" # use easily recognizable names for stack traces parent_token = attribute.impl.parent_token def _acceptable_key_err(child_state, initiator, child_impl): raise ValueError( "Bidirectional attribute conflict detected: " 'Passing object %s to attribute "%s" ' 'triggers a modify event on attribute "%s" ' 'via the backref "%s".' % ( orm_util.state_str(child_state), initiator.parent_token, child_impl.parent_token, attribute.impl.parent_token ) ) def emit_backref_from_scalar_set_event(state, child, oldchild, initiator): if oldchild is child: return child if oldchild is not None and oldchild is not PASSIVE_NO_RESULT: # With lazy=None, there's no guarantee that the full collection is # present when updating via a backref. old_state, old_dict = instance_state(oldchild),\ instance_dict(oldchild) impl = old_state.manager[key].impl impl.pop(old_state, old_dict, state.obj(), initiator, passive=PASSIVE_NO_FETCH) if child is not None: child_state, child_dict = instance_state(child),\ instance_dict(child) child_impl = child_state.manager[key].impl if initiator.parent_token is not parent_token and \ initiator.parent_token is not child_impl.parent_token: _acceptable_key_err(state, initiator, child_impl) child_impl.append( child_state, child_dict, state.obj(), initiator, passive=PASSIVE_NO_FETCH) return child def emit_backref_from_collection_append_event(state, child, initiator): if child is None: return child_state, child_dict = instance_state(child), \ instance_dict(child) child_impl = child_state.manager[key].impl if initiator.parent_token is not parent_token and \ initiator.parent_token is not child_impl.parent_token: _acceptable_key_err(state, initiator, child_impl) child_impl.append( child_state, child_dict, state.obj(), initiator, passive=PASSIVE_NO_FETCH) return child def emit_backref_from_collection_remove_event(state, child, initiator): if child is not None: child_state, child_dict = instance_state(child),\ instance_dict(child) child_impl = child_state.manager[key].impl # can't think of a path that would produce an initiator # mismatch here, as it would require an existing collection # mismatch. child_impl.pop( child_state, child_dict, state.obj(), initiator, passive=PASSIVE_NO_FETCH) if uselist: event.listen(attribute, "append", emit_backref_from_collection_append_event, retval=True, raw=True) else: event.listen(attribute, "set", emit_backref_from_scalar_set_event, retval=True, raw=True) # TODO: need coverage in test/orm/ of remove event event.listen(attribute, "remove", emit_backref_from_collection_remove_event, retval=True, raw=True) _NO_HISTORY = util.symbol('NO_HISTORY') _NO_STATE_SYMBOLS = frozenset([ id(PASSIVE_NO_RESULT), id(NO_VALUE), id(NEVER_SET)]) History = util.namedtuple("History", [ "added", "unchanged", "deleted" ]) class History(History): """A 3-tuple of added, unchanged and deleted values, representing the changes which have occurred on an instrumented attribute. The easiest way to get a :class:`.History` object for a particular attribute on an object is to use the :func:`.inspect` function:: from sqlalchemy import inspect hist = inspect(myobject).attrs.myattribute.history Each tuple member is an iterable sequence: * ``added`` - the collection of items added to the attribute (the first tuple element). * ``unchanged`` - the collection of items that have not changed on the attribute (the second tuple element). * ``deleted`` - the collection of items that have been removed from the attribute (the third tuple element). """ def __nonzero__(self): return self != HISTORY_BLANK def empty(self): """Return True if this :class:`.History` has no changes and no existing, unchanged state. """ return not bool( (self.added or self.deleted) or self.unchanged and self.unchanged != [None] ) def sum(self): """Return a collection of added + unchanged + deleted.""" return (self.added or []) +\ (self.unchanged or []) +\ (self.deleted or []) def non_deleted(self): """Return a collection of added + unchanged.""" return (self.added or []) +\ (self.unchanged or []) def non_added(self): """Return a collection of unchanged + deleted.""" return (self.unchanged or []) +\ (self.deleted or []) def has_changes(self): """Return True if this :class:`.History` has changes.""" return bool(self.added or self.deleted) def as_state(self): return History( [(c is not None) and instance_state(c) or None for c in self.added], [(c is not None) and instance_state(c) or None for c in self.unchanged], [(c is not None) and instance_state(c) or None for c in self.deleted], ) @classmethod def from_scalar_attribute(cls, attribute, state, current): original = state.committed_state.get(attribute.key, _NO_HISTORY) if original is _NO_HISTORY: if current is NO_VALUE: return cls((), (), ()) else: return cls((), [current], ()) # don't let ClauseElement expressions here trip things up elif attribute.is_equal(current, original) is True: return cls((), [current], ()) else: # current convention on native scalars is to not # include information # about missing previous value in "deleted", but # we do include None, which helps in some primary # key situations if id(original) in _NO_STATE_SYMBOLS: deleted = () else: deleted = [original] if current is NO_VALUE: return cls((), (), deleted) else: return cls([current], (), deleted) @classmethod def from_object_attribute(cls, attribute, state, current): original = state.committed_state.get(attribute.key, _NO_HISTORY) if original is _NO_HISTORY: if current is NO_VALUE or current is NEVER_SET: return cls((), (), ()) else: return cls((), [current], ()) elif current is original: return cls((), [current], ()) else: # current convention on related objects is to not # include information # about missing previous value in "deleted", and # to also not include None - the dependency.py rules # ignore the None in any case. if id(original) in _NO_STATE_SYMBOLS or original is None: deleted = () else: deleted = [original] if current is NO_VALUE or current is NEVER_SET: return cls((), (), deleted) else: return cls([current], (), deleted) @classmethod def from_collection(cls, attribute, state, current): original = state.committed_state.get(attribute.key, _NO_HISTORY) if current is NO_VALUE or current is NEVER_SET: return cls((), (), ()) current = getattr(current, '_sa_adapter') if original in (NO_VALUE, NEVER_SET): return cls(list(current), (), ()) elif original is _NO_HISTORY: return cls((), list(current), ()) else: current_states = [((c is not None) and instance_state(c) or None, c) for c in current ] original_states = [((c is not None) and instance_state(c) or None, c) for c in original ] current_set = dict(current_states) original_set = dict(original_states) return cls( [o for s, o in current_states if s not in original_set], [o for s, o in current_states if s in original_set], [o for s, o in original_states if s not in current_set] ) HISTORY_BLANK = History(None, None, None) def get_history(obj, key, passive=PASSIVE_OFF): """Return a :class:`.History` record for the given object and attribute key. :param obj: an object whose class is instrumented by the attributes package. :param key: string attribute name. :param passive: indicates loading behavior for the attribute if the value is not already present. This is a bitflag attribute, which defaults to the symbol :attr:`.PASSIVE_OFF` indicating all necessary SQL should be emitted. """ if passive is True: util.warn_deprecated("Passing True for 'passive' is deprecated. " "Use attributes.PASSIVE_NO_INITIALIZE") passive = PASSIVE_NO_INITIALIZE elif passive is False: util.warn_deprecated("Passing False for 'passive' is " "deprecated. Use attributes.PASSIVE_OFF") passive = PASSIVE_OFF return get_state_history(instance_state(obj), key, passive) def get_state_history(state, key, passive=PASSIVE_OFF): return state.get_history(key, passive) def has_parent(cls, obj, key, optimistic=False): """TODO""" manager = manager_of_class(cls) state = instance_state(obj) return manager.has_parent(state, key, optimistic) def register_attribute(class_, key, **kw): comparator = kw.pop('comparator', None) parententity = kw.pop('parententity', None) doc = kw.pop('doc', None) desc = register_descriptor(class_, key, comparator, parententity, doc=doc) register_attribute_impl(class_, key, **kw) return desc def register_attribute_impl(class_, key, uselist=False, callable_=None, useobject=False, impl_class=None, backref=None, **kw): manager = manager_of_class(class_) if uselist: factory = kw.pop('typecallable', None) typecallable = manager.instrument_collection_class( key, factory or list) else: typecallable = kw.pop('typecallable', None) dispatch = manager[key].dispatch if impl_class: impl = impl_class(class_, key, typecallable, dispatch, **kw) elif uselist: impl = CollectionAttributeImpl(class_, key, callable_, dispatch, typecallable=typecallable, **kw) elif useobject: impl = ScalarObjectAttributeImpl(class_, key, callable_, dispatch, **kw) else: impl = ScalarAttributeImpl(class_, key, callable_, dispatch, **kw) manager[key].impl = impl if backref: backref_listeners(manager[key], backref, uselist) manager.post_configure_attribute(key) return manager[key] def register_descriptor(class_, key, comparator=None, parententity=None, doc=None): manager = manager_of_class(class_) descriptor = InstrumentedAttribute(class_, key, comparator=comparator, parententity=parententity) descriptor.__doc__ = doc manager.instrument_attribute(key, descriptor) return descriptor def unregister_attribute(class_, key): manager_of_class(class_).uninstrument_attribute(key) def init_collection(obj, key): """Initialize a collection attribute and return the collection adapter. This function is used to provide direct access to collection internals for a previously unloaded attribute. e.g.:: collection_adapter = init_collection(someobject, 'elements') for elem in values: collection_adapter.append_without_event(elem) For an easier way to do the above, see :func:`~sqlalchemy.orm.attributes.set_committed_value`. obj is an instrumented object instance. An InstanceState is accepted directly for backwards compatibility but this usage is deprecated. """ state = instance_state(obj) dict_ = state.dict return init_state_collection(state, dict_, key) def init_state_collection(state, dict_, key): """Initialize a collection attribute and return the collection adapter.""" attr = state.manager[key].impl user_data = attr.initialize(state, dict_) return attr.get_collection(state, dict_, user_data) def set_committed_value(instance, key, value): """Set the value of an attribute with no history events. Cancels any previous history present. The value should be a scalar value for scalar-holding attributes, or an iterable for any collection-holding attribute. This is the same underlying method used when a lazy loader fires off and loads additional data from the database. In particular, this method can be used by application code which has loaded additional attributes or collections through separate queries, which can then be attached to an instance as though it were part of its original loaded state. """ state, dict_ = instance_state(instance), instance_dict(instance) state.manager[key].impl.set_committed_value(state, dict_, value) def set_attribute(instance, key, value): """Set the value of an attribute, firing history events. This function may be used regardless of instrumentation applied directly to the class, i.e. no descriptors are required. Custom attribute management schemes will need to make usage of this method to establish attribute state as understood by SQLAlchemy. """ state, dict_ = instance_state(instance), instance_dict(instance) state.manager[key].impl.set(state, dict_, value, None) def get_attribute(instance, key): """Get the value of an attribute, firing any callables required. This function may be used regardless of instrumentation applied directly to the class, i.e. no descriptors are required. Custom attribute management schemes will need to make usage of this method to make usage of attribute state as understood by SQLAlchemy. """ state, dict_ = instance_state(instance), instance_dict(instance) return state.manager[key].impl.get(state, dict_) def del_attribute(instance, key): """Delete the value of an attribute, firing history events. This function may be used regardless of instrumentation applied directly to the class, i.e. no descriptors are required. Custom attribute management schemes will need to make usage of this method to establish attribute state as understood by SQLAlchemy. """ state, dict_ = instance_state(instance), instance_dict(instance) state.manager[key].impl.delete(state, dict_) def flag_modified(instance, key): """Mark an attribute on an instance as 'modified'. This sets the 'modified' flag on the instance and establishes an unconditional change event for the given attribute. """ state, dict_ = instance_state(instance), instance_dict(instance) impl = state.manager[key].impl state._modified_event(dict_, impl, NO_VALUE) SQLAlchemy-0.8.4/lib/sqlalchemy/orm/collections.py0000644000076500000240000015177512251150015022636 0ustar classicstaff00000000000000# orm/collections.py # Copyright (C) 2005-2013 the SQLAlchemy authors and contributors # # This module is part of SQLAlchemy and is released under # the MIT License: http://www.opensource.org/licenses/mit-license.php """Support for collections of mapped entities. The collections package supplies the machinery used to inform the ORM of collection membership changes. An instrumentation via decoration approach is used, allowing arbitrary types (including built-ins) to be used as entity collections without requiring inheritance from a base class. Instrumentation decoration relays membership change events to the :class:`.CollectionAttributeImpl` that is currently managing the collection. The decorators observe function call arguments and return values, tracking entities entering or leaving the collection. Two decorator approaches are provided. One is a bundle of generic decorators that map function arguments and return values to events:: from sqlalchemy.orm.collections import collection class MyClass(object): # ... @collection.adds(1) def store(self, item): self.data.append(item) @collection.removes_return() def pop(self): return self.data.pop() The second approach is a bundle of targeted decorators that wrap appropriate append and remove notifiers around the mutation methods present in the standard Python ``list``, ``set`` and ``dict`` interfaces. These could be specified in terms of generic decorator recipes, but are instead hand-tooled for increased efficiency. The targeted decorators occasionally implement adapter-like behavior, such as mapping bulk-set methods (``extend``, ``update``, ``__setslice__``, etc.) into the series of atomic mutation events that the ORM requires. The targeted decorators are used internally for automatic instrumentation of entity collection classes. Every collection class goes through a transformation process roughly like so: 1. If the class is a built-in, substitute a trivial sub-class 2. Is this class already instrumented? 3. Add in generic decorators 4. Sniff out the collection interface through duck-typing 5. Add targeted decoration to any undecorated interface method This process modifies the class at runtime, decorating methods and adding some bookkeeping properties. This isn't possible (or desirable) for built-in classes like ``list``, so trivial sub-classes are substituted to hold decoration:: class InstrumentedList(list): pass Collection classes can be specified in ``relationship(collection_class=)`` as types or a function that returns an instance. Collection classes are inspected and instrumented during the mapper compilation phase. The collection_class callable will be executed once to produce a specimen instance, and the type of that specimen will be instrumented. Functions that return built-in types like ``lists`` will be adapted to produce instrumented instances. When extending a known type like ``list``, additional decorations are not generally not needed. Odds are, the extension method will delegate to a method that's already instrumented. For example:: class QueueIsh(list): def push(self, item): self.append(item) def shift(self): return self.pop(0) There's no need to decorate these methods. ``append`` and ``pop`` are already instrumented as part of the ``list`` interface. Decorating them would fire duplicate events, which should be avoided. The targeted decoration tries not to rely on other methods in the underlying collection class, but some are unavoidable. Many depend on 'read' methods being present to properly instrument a 'write', for example, ``__setitem__`` needs ``__getitem__``. "Bulk" methods like ``update`` and ``extend`` may also reimplemented in terms of atomic appends and removes, so the ``extend`` decoration will actually perform many ``append`` operations and not call the underlying method at all. Tight control over bulk operation and the firing of events is also possible by implementing the instrumentation internally in your methods. The basic instrumentation package works under the general assumption that collection mutation will not raise unusual exceptions. If you want to closely orchestrate append and remove events with exception management, internal instrumentation may be the answer. Within your method, ``collection_adapter(self)`` will retrieve an object that you can use for explicit control over triggering append and remove events. The owning object and :class:`.CollectionAttributeImpl` are also reachable through the adapter, allowing for some very sophisticated behavior. """ import inspect import operator import weakref from ..sql import expression from .. import util, exc as sa_exc orm_util = util.importlater("sqlalchemy.orm", "util") attributes = util.importlater("sqlalchemy.orm", "attributes") __all__ = ['collection', 'collection_adapter', 'mapped_collection', 'column_mapped_collection', 'attribute_mapped_collection'] __instrumentation_mutex = util.threading.Lock() class _PlainColumnGetter(object): """Plain column getter, stores collection of Column objects directly. Serializes to a :class:`._SerializableColumnGetterV2` which has more expensive __call__() performance and some rare caveats. """ def __init__(self, cols): self.cols = cols self.composite = len(cols) > 1 def __reduce__(self): return _SerializableColumnGetterV2._reduce_from_cols(self.cols) def _cols(self, mapper): return self.cols def __call__(self, value): state = attributes.instance_state(value) m = orm_util._state_mapper(state) key = [ m._get_state_attr_by_column(state, state.dict, col) for col in self._cols(m) ] if self.composite: return tuple(key) else: return key[0] class _SerializableColumnGetter(object): """Column-based getter used in version 0.7.6 only. Remains here for pickle compatibility with 0.7.6. """ def __init__(self, colkeys): self.colkeys = colkeys self.composite = len(colkeys) > 1 def __reduce__(self): return _SerializableColumnGetter, (self.colkeys,) def __call__(self, value): state = attributes.instance_state(value) m = orm_util._state_mapper(state) key = [m._get_state_attr_by_column( state, state.dict, m.mapped_table.columns[k]) for k in self.colkeys] if self.composite: return tuple(key) else: return key[0] class _SerializableColumnGetterV2(_PlainColumnGetter): """Updated serializable getter which deals with multi-table mapped classes. Two extremely unusual cases are not supported. Mappings which have tables across multiple metadata objects, or which are mapped to non-Table selectables linked across inheriting mappers may fail to function here. """ def __init__(self, colkeys): self.colkeys = colkeys self.composite = len(colkeys) > 1 def __reduce__(self): return self.__class__, (self.colkeys,) @classmethod def _reduce_from_cols(cls, cols): def _table_key(c): if not isinstance(c.table, expression.TableClause): return None else: return c.table.key colkeys = [(c.key, _table_key(c)) for c in cols] return _SerializableColumnGetterV2, (colkeys,) def _cols(self, mapper): cols = [] metadata = getattr(mapper.local_table, 'metadata', None) for (ckey, tkey) in self.colkeys: if tkey is None or \ metadata is None or \ tkey not in metadata: cols.append(mapper.local_table.c[ckey]) else: cols.append(metadata.tables[tkey].c[ckey]) return cols def column_mapped_collection(mapping_spec): """A dictionary-based collection type with column-based keying. Returns a :class:`.MappedCollection` factory with a keying function generated from mapping_spec, which may be a Column or a sequence of Columns. The key value must be immutable for the lifetime of the object. You can not, for example, map on foreign key values if those key values will change during the session, i.e. from None to a database-assigned integer after a session flush. """ cols = [expression._only_column_elements(q, "mapping_spec") for q in util.to_list(mapping_spec) ] keyfunc = _PlainColumnGetter(cols) return lambda: MappedCollection(keyfunc) class _SerializableAttrGetter(object): def __init__(self, name): self.name = name self.getter = operator.attrgetter(name) def __call__(self, target): return self.getter(target) def __reduce__(self): return _SerializableAttrGetter, (self.name, ) def attribute_mapped_collection(attr_name): """A dictionary-based collection type with attribute-based keying. Returns a :class:`.MappedCollection` factory with a keying based on the 'attr_name' attribute of entities in the collection, where ``attr_name`` is the string name of the attribute. The key value must be immutable for the lifetime of the object. You can not, for example, map on foreign key values if those key values will change during the session, i.e. from None to a database-assigned integer after a session flush. """ getter = _SerializableAttrGetter(attr_name) return lambda: MappedCollection(getter) def mapped_collection(keyfunc): """A dictionary-based collection type with arbitrary keying. Returns a :class:`.MappedCollection` factory with a keying function generated from keyfunc, a callable that takes an entity and returns a key value. The key value must be immutable for the lifetime of the object. You can not, for example, map on foreign key values if those key values will change during the session, i.e. from None to a database-assigned integer after a session flush. """ return lambda: MappedCollection(keyfunc) class collection(object): """Decorators for entity collection classes. The decorators fall into two groups: annotations and interception recipes. The annotating decorators (appender, remover, iterator, linker, converter, internally_instrumented) indicate the method's purpose and take no arguments. They are not written with parens:: @collection.appender def append(self, append): ... The recipe decorators all require parens, even those that take no arguments:: @collection.adds('entity') def insert(self, position, entity): ... @collection.removes_return() def popitem(self): ... """ # Bundled as a class solely for ease of use: packaging, doc strings, # importability. @staticmethod def appender(fn): """Tag the method as the collection appender. The appender method is called with one positional argument: the value to append. The method will be automatically decorated with 'adds(1)' if not already decorated:: @collection.appender def add(self, append): ... # or, equivalently @collection.appender @collection.adds(1) def add(self, append): ... # for mapping type, an 'append' may kick out a previous value # that occupies that slot. consider d['a'] = 'foo'- any previous # value in d['a'] is discarded. @collection.appender @collection.replaces(1) def add(self, entity): key = some_key_func(entity) previous = None if key in self: previous = self[key] self[key] = entity return previous If the value to append is not allowed in the collection, you may raise an exception. Something to remember is that the appender will be called for each object mapped by a database query. If the database contains rows that violate your collection semantics, you will need to get creative to fix the problem, as access via the collection will not work. If the appender method is internally instrumented, you must also receive the keyword argument '_sa_initiator' and ensure its promulgation to collection events. """ setattr(fn, '_sa_instrument_role', 'appender') return fn @staticmethod def remover(fn): """Tag the method as the collection remover. The remover method is called with one positional argument: the value to remove. The method will be automatically decorated with :meth:`removes_return` if not already decorated:: @collection.remover def zap(self, entity): ... # or, equivalently @collection.remover @collection.removes_return() def zap(self, ): ... If the value to remove is not present in the collection, you may raise an exception or return None to ignore the error. If the remove method is internally instrumented, you must also receive the keyword argument '_sa_initiator' and ensure its promulgation to collection events. """ setattr(fn, '_sa_instrument_role', 'remover') return fn @staticmethod def iterator(fn): """Tag the method as the collection remover. The iterator method is called with no arguments. It is expected to return an iterator over all collection members:: @collection.iterator def __iter__(self): ... """ setattr(fn, '_sa_instrument_role', 'iterator') return fn @staticmethod def internally_instrumented(fn): """Tag the method as instrumented. This tag will prevent any decoration from being applied to the method. Use this if you are orchestrating your own calls to :func:`.collection_adapter` in one of the basic SQLAlchemy interface methods, or to prevent an automatic ABC method decoration from wrapping your implementation:: # normally an 'extend' method on a list-like class would be # automatically intercepted and re-implemented in terms of # SQLAlchemy events and append(). your implementation will # never be called, unless: @collection.internally_instrumented def extend(self, items): ... """ setattr(fn, '_sa_instrumented', True) return fn @staticmethod def linker(fn): """Tag the method as a "linked to attribute" event handler. This optional event handler will be called when the collection class is linked to or unlinked from the InstrumentedAttribute. It is invoked immediately after the '_sa_adapter' property is set on the instance. A single argument is passed: the collection adapter that has been linked, or None if unlinking. """ setattr(fn, '_sa_instrument_role', 'linker') return fn link = linker """deprecated; synonym for :meth:`.collection.linker`.""" @staticmethod def converter(fn): """Tag the method as the collection converter. This optional method will be called when a collection is being replaced entirely, as in:: myobj.acollection = [newvalue1, newvalue2] The converter method will receive the object being assigned and should return an iterable of values suitable for use by the ``appender`` method. A converter must not assign values or mutate the collection, it's sole job is to adapt the value the user provides into an iterable of values for the ORM's use. The default converter implementation will use duck-typing to do the conversion. A dict-like collection will be convert into an iterable of dictionary values, and other types will simply be iterated:: @collection.converter def convert(self, other): ... If the duck-typing of the object does not match the type of this collection, a TypeError is raised. Supply an implementation of this method if you want to expand the range of possible types that can be assigned in bulk or perform validation on the values about to be assigned. """ setattr(fn, '_sa_instrument_role', 'converter') return fn @staticmethod def adds(arg): """Mark the method as adding an entity to the collection. Adds "add to collection" handling to the method. The decorator argument indicates which method argument holds the SQLAlchemy-relevant value. Arguments can be specified positionally (i.e. integer) or by name:: @collection.adds(1) def push(self, item): ... @collection.adds('entity') def do_stuff(self, thing, entity=None): ... """ def decorator(fn): setattr(fn, '_sa_instrument_before', ('fire_append_event', arg)) return fn return decorator @staticmethod def replaces(arg): """Mark the method as replacing an entity in the collection. Adds "add to collection" and "remove from collection" handling to the method. The decorator argument indicates which method argument holds the SQLAlchemy-relevant value to be added, and return value, if any will be considered the value to remove. Arguments can be specified positionally (i.e. integer) or by name:: @collection.replaces(2) def __setitem__(self, index, item): ... """ def decorator(fn): setattr(fn, '_sa_instrument_before', ('fire_append_event', arg)) setattr(fn, '_sa_instrument_after', 'fire_remove_event') return fn return decorator @staticmethod def removes(arg): """Mark the method as removing an entity in the collection. Adds "remove from collection" handling to the method. The decorator argument indicates which method argument holds the SQLAlchemy-relevant value to be removed. Arguments can be specified positionally (i.e. integer) or by name:: @collection.removes(1) def zap(self, item): ... For methods where the value to remove is not known at call-time, use collection.removes_return. """ def decorator(fn): setattr(fn, '_sa_instrument_before', ('fire_remove_event', arg)) return fn return decorator @staticmethod def removes_return(): """Mark the method as removing an entity in the collection. Adds "remove from collection" handling to the method. The return value of the method, if any, is considered the value to remove. The method arguments are not inspected:: @collection.removes_return() def pop(self): ... For methods where the value to remove is known at call-time, use collection.remove. """ def decorator(fn): setattr(fn, '_sa_instrument_after', 'fire_remove_event') return fn return decorator # public instrumentation interface for 'internally instrumented' # implementations def collection_adapter(collection): """Fetch the :class:`.CollectionAdapter` for a collection.""" return getattr(collection, '_sa_adapter', None) def collection_iter(collection): """Iterate over an object supporting the @iterator or __iter__ protocols. If the collection is an ORM collection, it need not be attached to an object to be iterable. """ try: return getattr(collection, '_sa_iterator', getattr(collection, '__iter__'))() except AttributeError: raise TypeError("'%s' object is not iterable" % type(collection).__name__) class CollectionAdapter(object): """Bridges between the ORM and arbitrary Python collections. Proxies base-level collection operations (append, remove, iterate) to the underlying Python collection, and emits add/remove events for entities entering or leaving the collection. The ORM uses :class:`.CollectionAdapter` exclusively for interaction with entity collections. The usage of getattr()/setattr() is currently to allow injection of custom methods, such as to unwrap Zope security proxies. """ invalidated = False def __init__(self, attr, owner_state, data): self._key = attr.key self._data = weakref.ref(data) self.owner_state = owner_state self.link_to_self(data) def _warn_invalidated(self): util.warn("This collection has been invalidated.") @property def data(self): "The entity collection being adapted." return self._data() @util.memoized_property def attr(self): return self.owner_state.manager[self._key].impl def link_to_self(self, data): """Link a collection to this adapter, and fire a link event.""" setattr(data, '_sa_adapter', self) if hasattr(data, '_sa_linker'): getattr(data, '_sa_linker')(self) def unlink(self, data): """Unlink a collection from any adapter, and fire a link event.""" setattr(data, '_sa_adapter', None) if hasattr(data, '_sa_linker'): getattr(data, '_sa_linker')(None) def adapt_like_to_iterable(self, obj): """Converts collection-compatible objects to an iterable of values. Can be passed any type of object, and if the underlying collection determines that it can be adapted into a stream of values it can use, returns an iterable of values suitable for append()ing. This method may raise TypeError or any other suitable exception if adaptation fails. If a converter implementation is not supplied on the collection, a default duck-typing-based implementation is used. """ converter = getattr(self._data(), '_sa_converter', None) if converter is not None: return converter(obj) setting_type = util.duck_type_collection(obj) receiving_type = util.duck_type_collection(self._data()) if obj is None or setting_type != receiving_type: given = obj is None and 'None' or obj.__class__.__name__ if receiving_type is None: wanted = self._data().__class__.__name__ else: wanted = receiving_type.__name__ raise TypeError( "Incompatible collection type: %s is not %s-like" % ( given, wanted)) # If the object is an adapted collection, return the (iterable) # adapter. if getattr(obj, '_sa_adapter', None) is not None: return getattr(obj, '_sa_adapter') elif setting_type == dict: # Py3K #return obj.values() # Py2K return getattr(obj, 'itervalues', getattr(obj, 'values'))() # end Py2K else: return iter(obj) def append_with_event(self, item, initiator=None): """Add an entity to the collection, firing mutation events.""" getattr(self._data(), '_sa_appender')(item, _sa_initiator=initiator) def append_without_event(self, item): """Add or restore an entity to the collection, firing no events.""" getattr(self._data(), '_sa_appender')(item, _sa_initiator=False) def append_multiple_without_event(self, items): """Add or restore an entity to the collection, firing no events.""" appender = getattr(self._data(), '_sa_appender') for item in items: appender(item, _sa_initiator=False) def remove_with_event(self, item, initiator=None): """Remove an entity from the collection, firing mutation events.""" getattr(self._data(), '_sa_remover')(item, _sa_initiator=initiator) def remove_without_event(self, item): """Remove an entity from the collection, firing no events.""" getattr(self._data(), '_sa_remover')(item, _sa_initiator=False) def clear_with_event(self, initiator=None): """Empty the collection, firing a mutation event for each entity.""" remover = getattr(self._data(), '_sa_remover') for item in list(self): remover(item, _sa_initiator=initiator) def clear_without_event(self): """Empty the collection, firing no events.""" remover = getattr(self._data(), '_sa_remover') for item in list(self): remover(item, _sa_initiator=False) def __iter__(self): """Iterate over entities in the collection.""" # Py3K requires iter() here return iter(getattr(self._data(), '_sa_iterator')()) def __len__(self): """Count entities in the collection.""" return len(list(getattr(self._data(), '_sa_iterator')())) def __nonzero__(self): return True def fire_append_event(self, item, initiator=None): """Notify that a entity has entered the collection. Initiator is a token owned by the InstrumentedAttribute that initiated the membership mutation, and should be left as None unless you are passing along an initiator value from a chained operation. """ if initiator is not False: if self.invalidated: self._warn_invalidated() return self.attr.fire_append_event( self.owner_state, self.owner_state.dict, item, initiator) else: return item def fire_remove_event(self, item, initiator=None): """Notify that a entity has been removed from the collection. Initiator is the InstrumentedAttribute that initiated the membership mutation, and should be left as None unless you are passing along an initiator value from a chained operation. """ if initiator is not False: if self.invalidated: self._warn_invalidated() self.attr.fire_remove_event( self.owner_state, self.owner_state.dict, item, initiator) def fire_pre_remove_event(self, initiator=None): """Notify that an entity is about to be removed from the collection. Only called if the entity cannot be removed after calling fire_remove_event(). """ if self.invalidated: self._warn_invalidated() self.attr.fire_pre_remove_event( self.owner_state, self.owner_state.dict, initiator=initiator) def __getstate__(self): return {'key': self._key, 'owner_state': self.owner_state, 'data': self.data} def __setstate__(self, d): self._key = d['key'] self.owner_state = d['owner_state'] self._data = weakref.ref(d['data']) def bulk_replace(values, existing_adapter, new_adapter): """Load a new collection, firing events based on prior like membership. Appends instances in ``values`` onto the ``new_adapter``. Events will be fired for any instance not present in the ``existing_adapter``. Any instances in ``existing_adapter`` not present in ``values`` will have remove events fired upon them. :param values: An iterable of collection member instances :param existing_adapter: A :class:`.CollectionAdapter` of instances to be replaced :param new_adapter: An empty :class:`.CollectionAdapter` to load with ``values`` """ if not isinstance(values, list): values = list(values) idset = util.IdentitySet existing_idset = idset(existing_adapter or ()) constants = existing_idset.intersection(values or ()) additions = idset(values or ()).difference(constants) removals = existing_idset.difference(constants) for member in values or (): if member in additions: new_adapter.append_with_event(member) elif member in constants: new_adapter.append_without_event(member) if existing_adapter: for member in removals: existing_adapter.remove_with_event(member) def prepare_instrumentation(factory): """Prepare a callable for future use as a collection class factory. Given a collection class factory (either a type or no-arg callable), return another factory that will produce compatible instances when called. This function is responsible for converting collection_class=list into the run-time behavior of collection_class=InstrumentedList. """ # Convert a builtin to 'Instrumented*' if factory in __canned_instrumentation: factory = __canned_instrumentation[factory] # Create a specimen cls = type(factory()) # Did factory callable return a builtin? if cls in __canned_instrumentation: # Wrap it so that it returns our 'Instrumented*' factory = __converting_factory(cls, factory) cls = factory() # Instrument the class if needed. if __instrumentation_mutex.acquire(): try: if getattr(cls, '_sa_instrumented', None) != id(cls): _instrument_class(cls) finally: __instrumentation_mutex.release() return factory def __converting_factory(specimen_cls, original_factory): """Return a wrapper that converts a "canned" collection like set, dict, list into the Instrumented* version. """ instrumented_cls = __canned_instrumentation[specimen_cls] def wrapper(): collection = original_factory() return instrumented_cls(collection) # often flawed but better than nothing wrapper.__name__ = "%sWrapper" % original_factory.__name__ wrapper.__doc__ = original_factory.__doc__ return wrapper def _instrument_class(cls): """Modify methods in a class and install instrumentation.""" # In the normal call flow, a request for any of the 3 basic collection # types is transformed into one of our trivial subclasses # (e.g. InstrumentedList). Catch anything else that sneaks in here... if cls.__module__ == '__builtin__': raise sa_exc.ArgumentError( "Can not instrument a built-in type. Use a " "subclass, even a trivial one.") roles = {} methods = {} # search for _sa_instrument_role-decorated methods in # method resolution order, assign to roles for supercls in cls.__mro__: for name, method in vars(supercls).items(): if not util.callable(method): continue # note role declarations if hasattr(method, '_sa_instrument_role'): role = method._sa_instrument_role assert role in ('appender', 'remover', 'iterator', 'linker', 'converter') roles.setdefault(role, name) # transfer instrumentation requests from decorated function # to the combined queue before, after = None, None if hasattr(method, '_sa_instrument_before'): op, argument = method._sa_instrument_before assert op in ('fire_append_event', 'fire_remove_event') before = op, argument if hasattr(method, '_sa_instrument_after'): op = method._sa_instrument_after assert op in ('fire_append_event', 'fire_remove_event') after = op if before: methods[name] = before[0], before[1], after elif after: methods[name] = None, None, after # see if this class has "canned" roles based on a known # collection type (dict, set, list). Apply those roles # as needed to the "roles" dictionary, and also # prepare "decorator" methods collection_type = util.duck_type_collection(cls) if collection_type in __interfaces: canned_roles, decorators = __interfaces[collection_type] for role, name in canned_roles.items(): roles.setdefault(role, name) # apply ABC auto-decoration to methods that need it for method, decorator in decorators.items(): fn = getattr(cls, method, None) if (fn and method not in methods and not hasattr(fn, '_sa_instrumented')): setattr(cls, method, decorator(fn)) # ensure all roles are present, and apply implicit instrumentation if # needed if 'appender' not in roles or not hasattr(cls, roles['appender']): raise sa_exc.ArgumentError( "Type %s must elect an appender method to be " "a collection class" % cls.__name__) elif (roles['appender'] not in methods and not hasattr(getattr(cls, roles['appender']), '_sa_instrumented')): methods[roles['appender']] = ('fire_append_event', 1, None) if 'remover' not in roles or not hasattr(cls, roles['remover']): raise sa_exc.ArgumentError( "Type %s must elect a remover method to be " "a collection class" % cls.__name__) elif (roles['remover'] not in methods and not hasattr(getattr(cls, roles['remover']), '_sa_instrumented')): methods[roles['remover']] = ('fire_remove_event', 1, None) if 'iterator' not in roles or not hasattr(cls, roles['iterator']): raise sa_exc.ArgumentError( "Type %s must elect an iterator method to be " "a collection class" % cls.__name__) # apply ad-hoc instrumentation from decorators, class-level defaults # and implicit role declarations for method_name, (before, argument, after) in methods.items(): setattr(cls, method_name, _instrument_membership_mutator(getattr(cls, method_name), before, argument, after)) # intern the role map for role, method_name in roles.items(): setattr(cls, '_sa_%s' % role, getattr(cls, method_name)) setattr(cls, '_sa_instrumented', id(cls)) def _instrument_membership_mutator(method, before, argument, after): """Route method args and/or return value through the collection adapter.""" # This isn't smart enough to handle @adds(1) for 'def fn(self, (a, b))' if before: fn_args = list(util.flatten_iterator(inspect.getargspec(method)[0])) if type(argument) is int: pos_arg = argument named_arg = len(fn_args) > argument and fn_args[argument] or None else: if argument in fn_args: pos_arg = fn_args.index(argument) else: pos_arg = None named_arg = argument del fn_args def wrapper(*args, **kw): if before: if pos_arg is None: if named_arg not in kw: raise sa_exc.ArgumentError( "Missing argument %s" % argument) value = kw[named_arg] else: if len(args) > pos_arg: value = args[pos_arg] elif named_arg in kw: value = kw[named_arg] else: raise sa_exc.ArgumentError( "Missing argument %s" % argument) initiator = kw.pop('_sa_initiator', None) if initiator is False: executor = None else: executor = getattr(args[0], '_sa_adapter', None) if before and executor: getattr(executor, before)(value, initiator) if not after or not executor: return method(*args, **kw) else: res = method(*args, **kw) if res is not None: getattr(executor, after)(res, initiator) return res wrapper._sa_instrumented = True if hasattr(method, "_sa_instrument_role"): wrapper._sa_instrument_role = method._sa_instrument_role wrapper.__name__ = method.__name__ wrapper.__doc__ = method.__doc__ return wrapper def __set(collection, item, _sa_initiator=None): """Run set events, may eventually be inlined into decorators.""" if _sa_initiator is not False: executor = getattr(collection, '_sa_adapter', None) if executor: item = getattr(executor, 'fire_append_event')(item, _sa_initiator) return item def __del(collection, item, _sa_initiator=None): """Run del events, may eventually be inlined into decorators.""" if _sa_initiator is not False: executor = getattr(collection, '_sa_adapter', None) if executor: getattr(executor, 'fire_remove_event')(item, _sa_initiator) def __before_delete(collection, _sa_initiator=None): """Special method to run 'commit existing value' methods""" executor = getattr(collection, '_sa_adapter', None) if executor: getattr(executor, 'fire_pre_remove_event')(_sa_initiator) def _list_decorators(): """Tailored instrumentation wrappers for any list-like class.""" def _tidy(fn): setattr(fn, '_sa_instrumented', True) fn.__doc__ = getattr(getattr(list, fn.__name__), '__doc__') def append(fn): def append(self, item, _sa_initiator=None): item = __set(self, item, _sa_initiator) fn(self, item) _tidy(append) return append def remove(fn): def remove(self, value, _sa_initiator=None): __before_delete(self, _sa_initiator) # testlib.pragma exempt:__eq__ fn(self, value) __del(self, value, _sa_initiator) _tidy(remove) return remove def insert(fn): def insert(self, index, value): value = __set(self, value) fn(self, index, value) _tidy(insert) return insert def __setitem__(fn): def __setitem__(self, index, value): if not isinstance(index, slice): existing = self[index] if existing is not None: __del(self, existing) value = __set(self, value) fn(self, index, value) else: # slice assignment requires __delitem__, insert, __len__ step = index.step or 1 start = index.start or 0 if start < 0: start += len(self) if index.stop is not None: stop = index.stop else: stop = len(self) if stop < 0: stop += len(self) if step == 1: for i in xrange(start, stop, step): if len(self) > start: del self[start] for i, item in enumerate(value): self.insert(i + start, item) else: rng = range(start, stop, step) if len(value) != len(rng): raise ValueError( "attempt to assign sequence of size %s to " "extended slice of size %s" % (len(value), len(rng))) for i, item in zip(rng, value): self.__setitem__(i, item) _tidy(__setitem__) return __setitem__ def __delitem__(fn): def __delitem__(self, index): if not isinstance(index, slice): item = self[index] __del(self, item) fn(self, index) else: # slice deletion requires __getslice__ and a slice-groking # __getitem__ for stepped deletion # note: not breaking this into atomic dels for item in self[index]: __del(self, item) fn(self, index) _tidy(__delitem__) return __delitem__ # Py2K def __setslice__(fn): def __setslice__(self, start, end, values): for value in self[start:end]: __del(self, value) values = [__set(self, value) for value in values] fn(self, start, end, values) _tidy(__setslice__) return __setslice__ def __delslice__(fn): def __delslice__(self, start, end): for value in self[start:end]: __del(self, value) fn(self, start, end) _tidy(__delslice__) return __delslice__ # end Py2K def extend(fn): def extend(self, iterable): for value in iterable: self.append(value) _tidy(extend) return extend def __iadd__(fn): def __iadd__(self, iterable): # list.__iadd__ takes any iterable and seems to let TypeError raise # as-is instead of returning NotImplemented for value in iterable: self.append(value) return self _tidy(__iadd__) return __iadd__ def pop(fn): def pop(self, index=-1): __before_delete(self) item = fn(self, index) __del(self, item) return item _tidy(pop) return pop # __imul__ : not wrapping this. all members of the collection are already # present, so no need to fire appends... wrapping it with an explicit # decorator is still possible, so events on *= can be had if they're # desired. hard to imagine a use case for __imul__, though. l = locals().copy() l.pop('_tidy') return l def _dict_decorators(): """Tailored instrumentation wrappers for any dict-like mapping class.""" def _tidy(fn): setattr(fn, '_sa_instrumented', True) fn.__doc__ = getattr(getattr(dict, fn.__name__), '__doc__') Unspecified = util.symbol('Unspecified') def __setitem__(fn): def __setitem__(self, key, value, _sa_initiator=None): if key in self: __del(self, self[key], _sa_initiator) value = __set(self, value, _sa_initiator) fn(self, key, value) _tidy(__setitem__) return __setitem__ def __delitem__(fn): def __delitem__(self, key, _sa_initiator=None): if key in self: __del(self, self[key], _sa_initiator) fn(self, key) _tidy(__delitem__) return __delitem__ def clear(fn): def clear(self): for key in self: __del(self, self[key]) fn(self) _tidy(clear) return clear def pop(fn): def pop(self, key, default=Unspecified): if key in self: __del(self, self[key]) if default is Unspecified: return fn(self, key) else: return fn(self, key, default) _tidy(pop) return pop def popitem(fn): def popitem(self): __before_delete(self) item = fn(self) __del(self, item[1]) return item _tidy(popitem) return popitem def setdefault(fn): def setdefault(self, key, default=None): if key not in self: self.__setitem__(key, default) return default else: return self.__getitem__(key) _tidy(setdefault) return setdefault def update(fn): def update(self, __other=Unspecified, **kw): if __other is not Unspecified: if hasattr(__other, 'keys'): for key in __other.keys(): if (key not in self or self[key] is not __other[key]): self[key] = __other[key] else: for key, value in __other: if key not in self or self[key] is not value: self[key] = value for key in kw: if key not in self or self[key] is not kw[key]: self[key] = kw[key] _tidy(update) return update l = locals().copy() l.pop('_tidy') l.pop('Unspecified') return l if util.py3k_warning: _set_binop_bases = (set, frozenset) else: import sets _set_binop_bases = (set, frozenset, sets.BaseSet) def _set_binops_check_strict(self, obj): """Allow only set, frozenset and self.__class__-derived objects in binops.""" return isinstance(obj, _set_binop_bases + (self.__class__,)) def _set_binops_check_loose(self, obj): """Allow anything set-like to participate in set binops.""" return (isinstance(obj, _set_binop_bases + (self.__class__,)) or util.duck_type_collection(obj) == set) def _set_decorators(): """Tailored instrumentation wrappers for any set-like class.""" def _tidy(fn): setattr(fn, '_sa_instrumented', True) fn.__doc__ = getattr(getattr(set, fn.__name__), '__doc__') Unspecified = util.symbol('Unspecified') def add(fn): def add(self, value, _sa_initiator=None): if value not in self: value = __set(self, value, _sa_initiator) # testlib.pragma exempt:__hash__ fn(self, value) _tidy(add) return add def discard(fn): def discard(self, value, _sa_initiator=None): # testlib.pragma exempt:__hash__ if value in self: __del(self, value, _sa_initiator) # testlib.pragma exempt:__hash__ fn(self, value) _tidy(discard) return discard def remove(fn): def remove(self, value, _sa_initiator=None): # testlib.pragma exempt:__hash__ if value in self: __del(self, value, _sa_initiator) # testlib.pragma exempt:__hash__ fn(self, value) _tidy(remove) return remove def pop(fn): def pop(self): __before_delete(self) item = fn(self) __del(self, item) return item _tidy(pop) return pop def clear(fn): def clear(self): for item in list(self): self.remove(item) _tidy(clear) return clear def update(fn): def update(self, value): for item in value: self.add(item) _tidy(update) return update def __ior__(fn): def __ior__(self, value): if not _set_binops_check_strict(self, value): return NotImplemented for item in value: self.add(item) return self _tidy(__ior__) return __ior__ def difference_update(fn): def difference_update(self, value): for item in value: self.discard(item) _tidy(difference_update) return difference_update def __isub__(fn): def __isub__(self, value): if not _set_binops_check_strict(self, value): return NotImplemented for item in value: self.discard(item) return self _tidy(__isub__) return __isub__ def intersection_update(fn): def intersection_update(self, other): want, have = self.intersection(other), set(self) remove, add = have - want, want - have for item in remove: self.remove(item) for item in add: self.add(item) _tidy(intersection_update) return intersection_update def __iand__(fn): def __iand__(self, other): if not _set_binops_check_strict(self, other): return NotImplemented want, have = self.intersection(other), set(self) remove, add = have - want, want - have for item in remove: self.remove(item) for item in add: self.add(item) return self _tidy(__iand__) return __iand__ def symmetric_difference_update(fn): def symmetric_difference_update(self, other): want, have = self.symmetric_difference(other), set(self) remove, add = have - want, want - have for item in remove: self.remove(item) for item in add: self.add(item) _tidy(symmetric_difference_update) return symmetric_difference_update def __ixor__(fn): def __ixor__(self, other): if not _set_binops_check_strict(self, other): return NotImplemented want, have = self.symmetric_difference(other), set(self) remove, add = have - want, want - have for item in remove: self.remove(item) for item in add: self.add(item) return self _tidy(__ixor__) return __ixor__ l = locals().copy() l.pop('_tidy') l.pop('Unspecified') return l class InstrumentedList(list): """An instrumented version of the built-in list.""" class InstrumentedSet(set): """An instrumented version of the built-in set.""" class InstrumentedDict(dict): """An instrumented version of the built-in dict.""" __canned_instrumentation = { list: InstrumentedList, set: InstrumentedSet, dict: InstrumentedDict, } __interfaces = { list: ( {'appender': 'append', 'remover': 'remove', 'iterator': '__iter__'}, _list_decorators() ), set: ({'appender': 'add', 'remover': 'remove', 'iterator': '__iter__'}, _set_decorators() ), # decorators are required for dicts and object collections. # Py3K #dict: ({'iterator': 'values'}, _dict_decorators()), # Py2K dict: ({'iterator': 'itervalues'}, _dict_decorators()), # end Py2K } class MappedCollection(dict): """A basic dictionary-based collection class. Extends dict with the minimal bag semantics that collection classes require. ``set`` and ``remove`` are implemented in terms of a keying function: any callable that takes an object and returns an object for use as a dictionary key. """ def __init__(self, keyfunc): """Create a new collection with keying provided by keyfunc. keyfunc may be any callable any callable that takes an object and returns an object for use as a dictionary key. The keyfunc will be called every time the ORM needs to add a member by value-only (such as when loading instances from the database) or remove a member. The usual cautions about dictionary keying apply- ``keyfunc(object)`` should return the same output for the life of the collection. Keying based on mutable properties can result in unreachable instances "lost" in the collection. """ self.keyfunc = keyfunc @collection.appender @collection.internally_instrumented def set(self, value, _sa_initiator=None): """Add an item by value, consulting the keyfunc for the key.""" key = self.keyfunc(value) self.__setitem__(key, value, _sa_initiator) @collection.remover @collection.internally_instrumented def remove(self, value, _sa_initiator=None): """Remove an item by value, consulting the keyfunc for the key.""" key = self.keyfunc(value) # Let self[key] raise if key is not in this collection # testlib.pragma exempt:__ne__ if self[key] != value: raise sa_exc.InvalidRequestError( "Can not remove '%s': collection holds '%s' for key '%s'. " "Possible cause: is the MappedCollection key function " "based on mutable properties or properties that only obtain " "values after flush?" % (value, self[key], key)) self.__delitem__(key, _sa_initiator) @collection.converter def _convert(self, dictlike): """Validate and convert a dict-like object into values for set()ing. This is called behind the scenes when a MappedCollection is replaced entirely by another collection, as in:: myobj.mappedcollection = {'a':obj1, 'b': obj2} # ... Raises a TypeError if the key in any (key, value) pair in the dictlike object does not match the key that this collection's keyfunc would have assigned for that value. """ for incoming_key, value in util.dictlike_iteritems(dictlike): new_key = self.keyfunc(value) if incoming_key != new_key: raise TypeError( "Found incompatible key %r for value %r; this " "collection's " "keying function requires a key of %r for this value." % ( incoming_key, value, new_key)) yield value # ensure instrumentation is associated with # these built-in classes; if a user-defined class # subclasses these and uses @internally_instrumented, # the superclass is otherwise not instrumented. # see [ticket:2406]. _instrument_class(MappedCollection) _instrument_class(InstrumentedList) _instrument_class(InstrumentedSet) SQLAlchemy-0.8.4/lib/sqlalchemy/orm/dependency.py0000644000076500000240000014023712251147171022436 0ustar classicstaff00000000000000# orm/dependency.py # Copyright (C) 2005-2013 the SQLAlchemy authors and contributors # # This module is part of SQLAlchemy and is released under # the MIT License: http://www.opensource.org/licenses/mit-license.php """Relationship dependencies. """ from .. import sql, util, exc as sa_exc from . import attributes, exc, sync, unitofwork, \ util as mapperutil from .interfaces import ONETOMANY, MANYTOONE, MANYTOMANY class DependencyProcessor(object): def __init__(self, prop): self.prop = prop self.cascade = prop.cascade self.mapper = prop.mapper self.parent = prop.parent self.secondary = prop.secondary self.direction = prop.direction self.post_update = prop.post_update self.passive_deletes = prop.passive_deletes self.passive_updates = prop.passive_updates self.enable_typechecks = prop.enable_typechecks if self.passive_deletes: self._passive_delete_flag = attributes.PASSIVE_NO_INITIALIZE else: self._passive_delete_flag = attributes.PASSIVE_OFF if self.passive_updates: self._passive_update_flag = attributes.PASSIVE_NO_INITIALIZE else: self._passive_update_flag = attributes.PASSIVE_OFF self.key = prop.key if not self.prop.synchronize_pairs: raise sa_exc.ArgumentError( "Can't build a DependencyProcessor for relationship %s. " "No target attributes to populate between parent and " "child are present" % self.prop) @classmethod def from_relationship(cls, prop): return _direction_to_processor[prop.direction](prop) def hasparent(self, state): """return True if the given object instance has a parent, according to the ``InstrumentedAttribute`` handled by this ``DependencyProcessor``. """ return self.parent.class_manager.get_impl(self.key).hasparent(state) def per_property_preprocessors(self, uow): """establish actions and dependencies related to a flush. These actions will operate on all relevant states in the aggregate. """ uow.register_preprocessor(self, True) def per_property_flush_actions(self, uow): after_save = unitofwork.ProcessAll(uow, self, False, True) before_delete = unitofwork.ProcessAll(uow, self, True, True) parent_saves = unitofwork.SaveUpdateAll( uow, self.parent.primary_base_mapper ) child_saves = unitofwork.SaveUpdateAll( uow, self.mapper.primary_base_mapper ) parent_deletes = unitofwork.DeleteAll( uow, self.parent.primary_base_mapper ) child_deletes = unitofwork.DeleteAll( uow, self.mapper.primary_base_mapper ) self.per_property_dependencies(uow, parent_saves, child_saves, parent_deletes, child_deletes, after_save, before_delete ) def per_state_flush_actions(self, uow, states, isdelete): """establish actions and dependencies related to a flush. These actions will operate on all relevant states individually. This occurs only if there are cycles in the 'aggregated' version of events. """ parent_base_mapper = self.parent.primary_base_mapper child_base_mapper = self.mapper.primary_base_mapper child_saves = unitofwork.SaveUpdateAll(uow, child_base_mapper) child_deletes = unitofwork.DeleteAll(uow, child_base_mapper) # locate and disable the aggregate processors # for this dependency if isdelete: before_delete = unitofwork.ProcessAll(uow, self, True, True) before_delete.disabled = True else: after_save = unitofwork.ProcessAll(uow, self, False, True) after_save.disabled = True # check if the "child" side is part of the cycle if child_saves not in uow.cycles: # based on the current dependencies we use, the saves/ # deletes should always be in the 'cycles' collection # together. if this changes, we will have to break up # this method a bit more. assert child_deletes not in uow.cycles # child side is not part of the cycle, so we will link per-state # actions to the aggregate "saves", "deletes" actions child_actions = [ (child_saves, False), (child_deletes, True) ] child_in_cycles = False else: child_in_cycles = True # check if the "parent" side is part of the cycle if not isdelete: parent_saves = unitofwork.SaveUpdateAll( uow, self.parent.base_mapper) parent_deletes = before_delete = None if parent_saves in uow.cycles: parent_in_cycles = True else: parent_deletes = unitofwork.DeleteAll( uow, self.parent.base_mapper) parent_saves = after_save = None if parent_deletes in uow.cycles: parent_in_cycles = True # now create actions /dependencies for each state. for state in states: # detect if there's anything changed or loaded # by a preprocessor on this state/attribute. if not, # we should be able to skip it entirely. sum_ = state.manager[self.key].impl.get_all_pending( state, state.dict) if not sum_: continue if isdelete: before_delete = unitofwork.ProcessState(uow, self, True, state) if parent_in_cycles: parent_deletes = unitofwork.DeleteState( uow, state, parent_base_mapper) else: after_save = unitofwork.ProcessState(uow, self, False, state) if parent_in_cycles: parent_saves = unitofwork.SaveUpdateState( uow, state, parent_base_mapper) if child_in_cycles: child_actions = [] for child_state, child in sum_: if child_state not in uow.states: child_action = (None, None) else: (deleted, listonly) = uow.states[child_state] if deleted: child_action = ( unitofwork.DeleteState( uow, child_state, child_base_mapper), True) else: child_action = ( unitofwork.SaveUpdateState( uow, child_state, child_base_mapper), False) child_actions.append(child_action) # establish dependencies between our possibly per-state # parent action and our possibly per-state child action. for child_action, childisdelete in child_actions: self.per_state_dependencies(uow, parent_saves, parent_deletes, child_action, after_save, before_delete, isdelete, childisdelete) def presort_deletes(self, uowcommit, states): return False def presort_saves(self, uowcommit, states): return False def process_deletes(self, uowcommit, states): pass def process_saves(self, uowcommit, states): pass def prop_has_changes(self, uowcommit, states, isdelete): if not isdelete or self.passive_deletes: passive = attributes.PASSIVE_NO_INITIALIZE elif self.direction is MANYTOONE: passive = attributes.PASSIVE_NO_FETCH_RELATED else: passive = attributes.PASSIVE_OFF for s in states: # TODO: add a high speed method # to InstanceState which returns: attribute # has a non-None value, or had one history = uowcommit.get_attribute_history( s, self.key, passive) if history and not history.empty(): return True else: return states and \ not self.prop._is_self_referential and \ self.mapper in uowcommit.mappers def _verify_canload(self, state): if self.prop.uselist and state is None: raise exc.FlushError( "Can't flush None value found in " "collection %s" % (self.prop, )) elif state is not None and \ not self.mapper._canload(state, allow_subtypes=not self.enable_typechecks): if self.mapper._canload(state, allow_subtypes=True): raise exc.FlushError('Attempting to flush an item of type ' '%(x)s as a member of collection ' '"%(y)s". Expected an object of type ' '%(z)s or a polymorphic subclass of ' 'this type. If %(x)s is a subclass of ' '%(z)s, configure mapper "%(zm)s" to ' 'load this subtype polymorphically, or ' 'set enable_typechecks=False to allow ' 'any subtype to be accepted for flush. ' % { 'x': state.class_, 'y': self.prop, 'z': self.mapper.class_, 'zm': self.mapper, }) else: raise exc.FlushError( 'Attempting to flush an item of type ' '%(x)s as a member of collection ' '"%(y)s". Expected an object of type ' '%(z)s or a polymorphic subclass of ' 'this type.' % { 'x': state.class_, 'y': self.prop, 'z': self.mapper.class_, }) def _synchronize(self, state, child, associationrow, clearkeys, uowcommit): raise NotImplementedError() def _get_reversed_processed_set(self, uow): if not self.prop._reverse_property: return None process_key = tuple(sorted( [self.key] + [p.key for p in self.prop._reverse_property] )) return uow.memo( ('reverse_key', process_key), set ) def _post_update(self, state, uowcommit, related): for x in related: if x is not None: uowcommit.issue_post_update( state, [r for l, r in self.prop.synchronize_pairs] ) break def _pks_changed(self, uowcommit, state): raise NotImplementedError() def __repr__(self): return "%s(%s)" % (self.__class__.__name__, self.prop) class OneToManyDP(DependencyProcessor): def per_property_dependencies(self, uow, parent_saves, child_saves, parent_deletes, child_deletes, after_save, before_delete, ): if self.post_update: child_post_updates = unitofwork.IssuePostUpdate( uow, self.mapper.primary_base_mapper, False) child_pre_updates = unitofwork.IssuePostUpdate( uow, self.mapper.primary_base_mapper, True) uow.dependencies.update([ (child_saves, after_save), (parent_saves, after_save), (after_save, child_post_updates), (before_delete, child_pre_updates), (child_pre_updates, parent_deletes), (child_pre_updates, child_deletes), ]) else: uow.dependencies.update([ (parent_saves, after_save), (after_save, child_saves), (after_save, child_deletes), (child_saves, parent_deletes), (child_deletes, parent_deletes), (before_delete, child_saves), (before_delete, child_deletes), ]) def per_state_dependencies(self, uow, save_parent, delete_parent, child_action, after_save, before_delete, isdelete, childisdelete): if self.post_update: child_post_updates = unitofwork.IssuePostUpdate( uow, self.mapper.primary_base_mapper, False) child_pre_updates = unitofwork.IssuePostUpdate( uow, self.mapper.primary_base_mapper, True) # TODO: this whole block is not covered # by any tests if not isdelete: if childisdelete: uow.dependencies.update([ (child_action, after_save), (after_save, child_post_updates), ]) else: uow.dependencies.update([ (save_parent, after_save), (child_action, after_save), (after_save, child_post_updates), ]) else: if childisdelete: uow.dependencies.update([ (before_delete, child_pre_updates), (child_pre_updates, delete_parent), ]) else: uow.dependencies.update([ (before_delete, child_pre_updates), (child_pre_updates, delete_parent), ]) elif not isdelete: uow.dependencies.update([ (save_parent, after_save), (after_save, child_action), (save_parent, child_action) ]) else: uow.dependencies.update([ (before_delete, child_action), (child_action, delete_parent) ]) def presort_deletes(self, uowcommit, states): # head object is being deleted, and we manage its list of # child objects the child objects have to have their # foreign key to the parent set to NULL should_null_fks = not self.cascade.delete and \ not self.passive_deletes == 'all' for state in states: history = uowcommit.get_attribute_history( state, self.key, self._passive_delete_flag) if history: for child in history.deleted: if child is not None and self.hasparent(child) is False: if self.cascade.delete_orphan: uowcommit.register_object(child, isdelete=True) else: uowcommit.register_object(child) if should_null_fks: for child in history.unchanged: if child is not None: uowcommit.register_object(child, operation="delete", prop=self.prop) def presort_saves(self, uowcommit, states): children_added = uowcommit.memo(('children_added', self), set) for state in states: pks_changed = self._pks_changed(uowcommit, state) if not pks_changed or self.passive_updates: passive = attributes.PASSIVE_NO_INITIALIZE else: passive = attributes.PASSIVE_OFF history = uowcommit.get_attribute_history( state, self.key, passive) if history: for child in history.added: if child is not None: uowcommit.register_object(child, cancel_delete=True, operation="add", prop=self.prop) children_added.update(history.added) for child in history.deleted: if not self.cascade.delete_orphan: uowcommit.register_object(child, isdelete=False, operation='delete', prop=self.prop) elif self.hasparent(child) is False: uowcommit.register_object(child, isdelete=True, operation="delete", prop=self.prop) for c, m, st_, dct_ in self.mapper.cascade_iterator( 'delete', child): uowcommit.register_object( st_, isdelete=True) if pks_changed: if history: for child in history.unchanged: if child is not None: uowcommit.register_object( child, False, self.passive_updates, operation="pk change", prop=self.prop) def process_deletes(self, uowcommit, states): # head object is being deleted, and we manage its list of # child objects the child objects have to have their foreign # key to the parent set to NULL this phase can be called # safely for any cascade but is unnecessary if delete cascade # is on. if self.post_update or not self.passive_deletes == 'all': children_added = uowcommit.memo(('children_added', self), set) for state in states: history = uowcommit.get_attribute_history( state, self.key, self._passive_delete_flag) if history: for child in history.deleted: if child is not None and \ self.hasparent(child) is False: self._synchronize( state, child, None, True, uowcommit, False) if self.post_update and child: self._post_update(child, uowcommit, [state]) if self.post_update or not self.cascade.delete: for child in set(history.unchanged).\ difference(children_added): if child is not None: self._synchronize( state, child, None, True, uowcommit, False) if self.post_update and child: self._post_update(child, uowcommit, [state]) # technically, we can even remove each child from the # collection here too. but this would be a somewhat # inconsistent behavior since it wouldn't happen #if the old parent wasn't deleted but child was moved. def process_saves(self, uowcommit, states): for state in states: history = uowcommit.get_attribute_history( state, self.key, attributes.PASSIVE_NO_INITIALIZE) if history: for child in history.added: self._synchronize(state, child, None, False, uowcommit, False) if child is not None and self.post_update: self._post_update(child, uowcommit, [state]) for child in history.deleted: if not self.cascade.delete_orphan and \ not self.hasparent(child): self._synchronize(state, child, None, True, uowcommit, False) if self._pks_changed(uowcommit, state): for child in history.unchanged: self._synchronize(state, child, None, False, uowcommit, True) def _synchronize(self, state, child, associationrow, clearkeys, uowcommit, pks_changed): source = state dest = child self._verify_canload(child) if dest is None or \ (not self.post_update and uowcommit.is_deleted(dest)): return if clearkeys: sync.clear(dest, self.mapper, self.prop.synchronize_pairs) else: sync.populate(source, self.parent, dest, self.mapper, self.prop.synchronize_pairs, uowcommit, self.passive_updates and pks_changed) def _pks_changed(self, uowcommit, state): return sync.source_modified( uowcommit, state, self.parent, self.prop.synchronize_pairs) class ManyToOneDP(DependencyProcessor): def __init__(self, prop): DependencyProcessor.__init__(self, prop) self.mapper._dependency_processors.append(DetectKeySwitch(prop)) def per_property_dependencies(self, uow, parent_saves, child_saves, parent_deletes, child_deletes, after_save, before_delete): if self.post_update: parent_post_updates = unitofwork.IssuePostUpdate( uow, self.parent.primary_base_mapper, False) parent_pre_updates = unitofwork.IssuePostUpdate( uow, self.parent.primary_base_mapper, True) uow.dependencies.update([ (child_saves, after_save), (parent_saves, after_save), (after_save, parent_post_updates), (after_save, parent_pre_updates), (before_delete, parent_pre_updates), (parent_pre_updates, child_deletes), ]) else: uow.dependencies.update([ (child_saves, after_save), (after_save, parent_saves), (parent_saves, child_deletes), (parent_deletes, child_deletes) ]) def per_state_dependencies(self, uow, save_parent, delete_parent, child_action, after_save, before_delete, isdelete, childisdelete): if self.post_update: if not isdelete: parent_post_updates = unitofwork.IssuePostUpdate( uow, self.parent.primary_base_mapper, False) if childisdelete: uow.dependencies.update([ (after_save, parent_post_updates), (parent_post_updates, child_action) ]) else: uow.dependencies.update([ (save_parent, after_save), (child_action, after_save), (after_save, parent_post_updates) ]) else: parent_pre_updates = unitofwork.IssuePostUpdate( uow, self.parent.primary_base_mapper, True) uow.dependencies.update([ (before_delete, parent_pre_updates), (parent_pre_updates, delete_parent), (parent_pre_updates, child_action) ]) elif not isdelete: if not childisdelete: uow.dependencies.update([ (child_action, after_save), (after_save, save_parent), ]) else: uow.dependencies.update([ (after_save, save_parent), ]) else: if childisdelete: uow.dependencies.update([ (delete_parent, child_action) ]) def presort_deletes(self, uowcommit, states): if self.cascade.delete or self.cascade.delete_orphan: for state in states: history = uowcommit.get_attribute_history( state, self.key, self._passive_delete_flag) if history: if self.cascade.delete_orphan: todelete = history.sum() else: todelete = history.non_deleted() for child in todelete: if child is None: continue uowcommit.register_object(child, isdelete=True, operation="delete", prop=self.prop) t = self.mapper.cascade_iterator('delete', child) for c, m, st_, dct_ in t: uowcommit.register_object( st_, isdelete=True) def presort_saves(self, uowcommit, states): for state in states: uowcommit.register_object(state, operation="add", prop=self.prop) if self.cascade.delete_orphan: history = uowcommit.get_attribute_history( state, self.key, self._passive_delete_flag) if history: for child in history.deleted: if self.hasparent(child) is False: uowcommit.register_object(child, isdelete=True, operation="delete", prop=self.prop) t = self.mapper.cascade_iterator('delete', child) for c, m, st_, dct_ in t: uowcommit.register_object(st_, isdelete=True) def process_deletes(self, uowcommit, states): if self.post_update and \ not self.cascade.delete_orphan and \ not self.passive_deletes == 'all': # post_update means we have to update our # row to not reference the child object # before we can DELETE the row for state in states: self._synchronize(state, None, None, True, uowcommit) if state and self.post_update: history = uowcommit.get_attribute_history( state, self.key, self._passive_delete_flag) if history: self._post_update(state, uowcommit, history.sum()) def process_saves(self, uowcommit, states): for state in states: history = uowcommit.get_attribute_history( state, self.key, attributes.PASSIVE_NO_INITIALIZE) if history: for child in history.added: self._synchronize(state, child, None, False, uowcommit, "add") if self.post_update: self._post_update(state, uowcommit, history.sum()) def _synchronize(self, state, child, associationrow, clearkeys, uowcommit, operation=None): if state is None or \ (not self.post_update and uowcommit.is_deleted(state)): return if operation is not None and \ child is not None and \ not uowcommit.session._contains_state(child): util.warn( "Object of type %s not in session, %s " "operation along '%s' won't proceed" % (mapperutil.state_class_str(child), operation, self.prop)) return if clearkeys or child is None: sync.clear(state, self.parent, self.prop.synchronize_pairs) else: self._verify_canload(child) sync.populate(child, self.mapper, state, self.parent, self.prop.synchronize_pairs, uowcommit, False) class DetectKeySwitch(DependencyProcessor): """For many-to-one relationships with no one-to-many backref, searches for parents through the unit of work when a primary key has changed and updates them. Theoretically, this approach could be expanded to support transparent deletion of objects referenced via many-to-one as well, although the current attribute system doesn't do enough bookkeeping for this to be efficient. """ def per_property_preprocessors(self, uow): if self.prop._reverse_property: if self.passive_updates: return else: if False in (prop.passive_updates for \ prop in self.prop._reverse_property): return uow.register_preprocessor(self, False) def per_property_flush_actions(self, uow): parent_saves = unitofwork.SaveUpdateAll( uow, self.parent.base_mapper) after_save = unitofwork.ProcessAll(uow, self, False, False) uow.dependencies.update([ (parent_saves, after_save) ]) def per_state_flush_actions(self, uow, states, isdelete): pass def presort_deletes(self, uowcommit, states): pass def presort_saves(self, uow, states): if not self.passive_updates: # for non-passive updates, register in the preprocess stage # so that mapper save_obj() gets a hold of changes self._process_key_switches(states, uow) def prop_has_changes(self, uow, states, isdelete): if not isdelete and self.passive_updates: d = self._key_switchers(uow, states) return bool(d) return False def process_deletes(self, uowcommit, states): assert False def process_saves(self, uowcommit, states): # for passive updates, register objects in the process stage # so that we avoid ManyToOneDP's registering the object without # the listonly flag in its own preprocess stage (results in UPDATE) # statements being emitted assert self.passive_updates self._process_key_switches(states, uowcommit) def _key_switchers(self, uow, states): switched, notswitched = uow.memo( ('pk_switchers', self), lambda: (set(), set()) ) allstates = switched.union(notswitched) for s in states: if s not in allstates: if self._pks_changed(uow, s): switched.add(s) else: notswitched.add(s) return switched def _process_key_switches(self, deplist, uowcommit): switchers = self._key_switchers(uowcommit, deplist) if switchers: # if primary key values have actually changed somewhere, perform # a linear search through the UOW in search of a parent. for state in uowcommit.session.identity_map.all_states(): if not issubclass(state.class_, self.parent.class_): continue dict_ = state.dict related = state.get_impl(self.key).get(state, dict_, passive=self._passive_update_flag) if related is not attributes.PASSIVE_NO_RESULT and \ related is not None: related_state = attributes.instance_state(dict_[self.key]) if related_state in switchers: uowcommit.register_object(state, False, self.passive_updates) sync.populate( related_state, self.mapper, state, self.parent, self.prop.synchronize_pairs, uowcommit, self.passive_updates) def _pks_changed(self, uowcommit, state): return bool(state.key) and sync.source_modified(uowcommit, state, self.mapper, self.prop.synchronize_pairs) class ManyToManyDP(DependencyProcessor): def per_property_dependencies(self, uow, parent_saves, child_saves, parent_deletes, child_deletes, after_save, before_delete ): uow.dependencies.update([ (parent_saves, after_save), (child_saves, after_save), (after_save, child_deletes), # a rowswitch on the parent from deleted to saved # can make this one occur, as the "save" may remove # an element from the # "deleted" list before we have a chance to # process its child rows (before_delete, parent_saves), (before_delete, parent_deletes), (before_delete, child_deletes), (before_delete, child_saves), ]) def per_state_dependencies(self, uow, save_parent, delete_parent, child_action, after_save, before_delete, isdelete, childisdelete): if not isdelete: if childisdelete: uow.dependencies.update([ (save_parent, after_save), (after_save, child_action), ]) else: uow.dependencies.update([ (save_parent, after_save), (child_action, after_save), ]) else: uow.dependencies.update([ (before_delete, child_action), (before_delete, delete_parent) ]) def presort_deletes(self, uowcommit, states): # TODO: no tests fail if this whole # thing is removed !!!! if not self.passive_deletes: # if no passive deletes, load history on # the collection, so that prop_has_changes() # returns True for state in states: uowcommit.get_attribute_history( state, self.key, self._passive_delete_flag) def presort_saves(self, uowcommit, states): if not self.passive_updates: # if no passive updates, load history on # each collection where parent has changed PK, # so that prop_has_changes() returns True for state in states: if self._pks_changed(uowcommit, state): history = uowcommit.get_attribute_history( state, self.key, attributes.PASSIVE_OFF) if not self.cascade.delete_orphan: return # check for child items removed from the collection # if delete_orphan check is turned on. for state in states: history = uowcommit.get_attribute_history( state, self.key, attributes.PASSIVE_NO_INITIALIZE) if history: for child in history.deleted: if self.hasparent(child) is False: uowcommit.register_object(child, isdelete=True, operation="delete", prop=self.prop) for c, m, st_, dct_ in self.mapper.cascade_iterator( 'delete', child): uowcommit.register_object( st_, isdelete=True) def process_deletes(self, uowcommit, states): secondary_delete = [] secondary_insert = [] secondary_update = [] processed = self._get_reversed_processed_set(uowcommit) tmp = set() for state in states: # this history should be cached already, as # we loaded it in preprocess_deletes history = uowcommit.get_attribute_history( state, self.key, self._passive_delete_flag) if history: for child in history.non_added(): if child is None or \ (processed is not None and (state, child) in processed): continue associationrow = {} if not self._synchronize( state, child, associationrow, False, uowcommit, "delete"): continue secondary_delete.append(associationrow) tmp.update((c, state) for c in history.non_added()) if processed is not None: processed.update(tmp) self._run_crud(uowcommit, secondary_insert, secondary_update, secondary_delete) def process_saves(self, uowcommit, states): secondary_delete = [] secondary_insert = [] secondary_update = [] processed = self._get_reversed_processed_set(uowcommit) tmp = set() for state in states: need_cascade_pks = not self.passive_updates and \ self._pks_changed(uowcommit, state) if need_cascade_pks: passive = attributes.PASSIVE_OFF else: passive = attributes.PASSIVE_NO_INITIALIZE history = uowcommit.get_attribute_history(state, self.key, passive) if history: for child in history.added: if (processed is not None and (state, child) in processed): continue associationrow = {} if not self._synchronize(state, child, associationrow, False, uowcommit, "add"): continue secondary_insert.append(associationrow) for child in history.deleted: if (processed is not None and (state, child) in processed): continue associationrow = {} if not self._synchronize(state, child, associationrow, False, uowcommit, "delete"): continue secondary_delete.append(associationrow) tmp.update((c, state) for c in history.added + history.deleted) if need_cascade_pks: for child in history.unchanged: associationrow = {} sync.update(state, self.parent, associationrow, "old_", self.prop.synchronize_pairs) sync.update(child, self.mapper, associationrow, "old_", self.prop.secondary_synchronize_pairs) secondary_update.append(associationrow) if processed is not None: processed.update(tmp) self._run_crud(uowcommit, secondary_insert, secondary_update, secondary_delete) def _run_crud(self, uowcommit, secondary_insert, secondary_update, secondary_delete): connection = uowcommit.transaction.connection(self.mapper) if secondary_delete: associationrow = secondary_delete[0] statement = self.secondary.delete(sql.and_(*[ c == sql.bindparam(c.key, type_=c.type) for c in self.secondary.c if c.key in associationrow ])) result = connection.execute(statement, secondary_delete) if result.supports_sane_multi_rowcount() and \ result.rowcount != len(secondary_delete): raise exc.StaleDataError( "DELETE statement on table '%s' expected to delete " "%d row(s); Only %d were matched." % (self.secondary.description, len(secondary_delete), result.rowcount) ) if secondary_update: associationrow = secondary_update[0] statement = self.secondary.update(sql.and_(*[ c == sql.bindparam("old_" + c.key, type_=c.type) for c in self.secondary.c if c.key in associationrow ])) result = connection.execute(statement, secondary_update) if result.supports_sane_multi_rowcount() and \ result.rowcount != len(secondary_update): raise exc.StaleDataError( "UPDATE statement on table '%s' expected to update " "%d row(s); Only %d were matched." % (self.secondary.description, len(secondary_update), result.rowcount) ) if secondary_insert: statement = self.secondary.insert() connection.execute(statement, secondary_insert) def _synchronize(self, state, child, associationrow, clearkeys, uowcommit, operation): # this checks for None if uselist=True self._verify_canload(child) # but if uselist=False we get here. If child is None, # no association row can be generated, so return. if child is None: return False if child is not None and not uowcommit.session._contains_state(child): if not child.deleted: util.warn( "Object of type %s not in session, %s " "operation along '%s' won't proceed" % (mapperutil.state_class_str(child), operation, self.prop)) return False sync.populate_dict(state, self.parent, associationrow, self.prop.synchronize_pairs) sync.populate_dict(child, self.mapper, associationrow, self.prop.secondary_synchronize_pairs) return True def _pks_changed(self, uowcommit, state): return sync.source_modified( uowcommit, state, self.parent, self.prop.synchronize_pairs) _direction_to_processor = { ONETOMANY: OneToManyDP, MANYTOONE: ManyToOneDP, MANYTOMANY: ManyToManyDP, } SQLAlchemy-0.8.4/lib/sqlalchemy/orm/deprecated_interfaces.py0000644000076500000240000005241412251150015024611 0ustar classicstaff00000000000000# orm/deprecated_interfaces.py # Copyright (C) 2005-2013 the SQLAlchemy authors and contributors # # This module is part of SQLAlchemy and is released under # the MIT License: http://www.opensource.org/licenses/mit-license.php from .. import event, util from .interfaces import EXT_CONTINUE class MapperExtension(object): """Base implementation for :class:`.Mapper` event hooks. .. note:: :class:`.MapperExtension` is deprecated. Please refer to :func:`.event.listen` as well as :class:`.MapperEvents`. New extension classes subclass :class:`.MapperExtension` and are specified using the ``extension`` mapper() argument, which is a single :class:`.MapperExtension` or a list of such:: from sqlalchemy.orm.interfaces import MapperExtension class MyExtension(MapperExtension): def before_insert(self, mapper, connection, instance): print "instance %s before insert !" % instance m = mapper(User, users_table, extension=MyExtension()) A single mapper can maintain a chain of ``MapperExtension`` objects. When a particular mapping event occurs, the corresponding method on each ``MapperExtension`` is invoked serially, and each method has the ability to halt the chain from proceeding further:: m = mapper(User, users_table, extension=[ext1, ext2, ext3]) Each ``MapperExtension`` method returns the symbol EXT_CONTINUE by default. This symbol generally means "move to the next ``MapperExtension`` for processing". For methods that return objects like translated rows or new object instances, EXT_CONTINUE means the result of the method should be ignored. In some cases it's required for a default mapper activity to be performed, such as adding a new instance to a result list. The symbol EXT_STOP has significance within a chain of ``MapperExtension`` objects that the chain will be stopped when this symbol is returned. Like EXT_CONTINUE, it also has additional significance in some cases that a default mapper activity will not be performed. """ @classmethod def _adapt_instrument_class(cls, self, listener): cls._adapt_listener_methods(self, listener, ('instrument_class',)) @classmethod def _adapt_listener(cls, self, listener): cls._adapt_listener_methods( self, listener, ( 'init_instance', 'init_failed', 'translate_row', 'create_instance', 'append_result', 'populate_instance', 'reconstruct_instance', 'before_insert', 'after_insert', 'before_update', 'after_update', 'before_delete', 'after_delete' )) @classmethod def _adapt_listener_methods(cls, self, listener, methods): for meth in methods: me_meth = getattr(MapperExtension, meth) ls_meth = getattr(listener, meth) if not util.methods_equivalent(me_meth, ls_meth): if meth == 'reconstruct_instance': def go(ls_meth): def reconstruct(instance, ctx): ls_meth(self, instance) return reconstruct event.listen(self.class_manager, 'load', go(ls_meth), raw=False, propagate=True) elif meth == 'init_instance': def go(ls_meth): def init_instance(instance, args, kwargs): ls_meth(self, self.class_, self.class_manager.original_init, instance, args, kwargs) return init_instance event.listen(self.class_manager, 'init', go(ls_meth), raw=False, propagate=True) elif meth == 'init_failed': def go(ls_meth): def init_failed(instance, args, kwargs): util.warn_exception(ls_meth, self, self.class_, self.class_manager.original_init, instance, args, kwargs) return init_failed event.listen(self.class_manager, 'init_failure', go(ls_meth), raw=False, propagate=True) else: event.listen(self, "%s" % meth, ls_meth, raw=False, retval=True, propagate=True) def instrument_class(self, mapper, class_): """Receive a class when the mapper is first constructed, and has applied instrumentation to the mapped class. The return value is only significant within the ``MapperExtension`` chain; the parent mapper's behavior isn't modified by this method. """ return EXT_CONTINUE def init_instance(self, mapper, class_, oldinit, instance, args, kwargs): """Receive an instance when it's constructor is called. This method is only called during a userland construction of an object. It is not called when an object is loaded from the database. The return value is only significant within the ``MapperExtension`` chain; the parent mapper's behavior isn't modified by this method. """ return EXT_CONTINUE def init_failed(self, mapper, class_, oldinit, instance, args, kwargs): """Receive an instance when it's constructor has been called, and raised an exception. This method is only called during a userland construction of an object. It is not called when an object is loaded from the database. The return value is only significant within the ``MapperExtension`` chain; the parent mapper's behavior isn't modified by this method. """ return EXT_CONTINUE def translate_row(self, mapper, context, row): """Perform pre-processing on the given result row and return a new row instance. This is called when the mapper first receives a row, before the object identity or the instance itself has been derived from that row. The given row may or may not be a ``RowProxy`` object - it will always be a dictionary-like object which contains mapped columns as keys. The returned object should also be a dictionary-like object which recognizes mapped columns as keys. If the ultimate return value is EXT_CONTINUE, the row is not translated. """ return EXT_CONTINUE def create_instance(self, mapper, selectcontext, row, class_): """Receive a row when a new object instance is about to be created from that row. The method can choose to create the instance itself, or it can return EXT_CONTINUE to indicate normal object creation should take place. mapper The mapper doing the operation selectcontext The QueryContext generated from the Query. row The result row from the database class\_ The class we are mapping. return value A new object instance, or EXT_CONTINUE """ return EXT_CONTINUE def append_result(self, mapper, selectcontext, row, instance, result, **flags): """Receive an object instance before that instance is appended to a result list. If this method returns EXT_CONTINUE, result appending will proceed normally. if this method returns any other value or None, result appending will not proceed for this instance, giving this extension an opportunity to do the appending itself, if desired. mapper The mapper doing the operation. selectcontext The QueryContext generated from the Query. row The result row from the database. instance The object instance to be appended to the result. result List to which results are being appended. \**flags extra information about the row, same as criterion in ``create_row_processor()`` method of :class:`~sqlalchemy.orm.interfaces.MapperProperty` """ return EXT_CONTINUE def populate_instance(self, mapper, selectcontext, row, instance, **flags): """Receive an instance before that instance has its attributes populated. This usually corresponds to a newly loaded instance but may also correspond to an already-loaded instance which has unloaded attributes to be populated. The method may be called many times for a single instance, as multiple result rows are used to populate eagerly loaded collections. If this method returns EXT_CONTINUE, instance population will proceed normally. If any other value or None is returned, instance population will not proceed, giving this extension an opportunity to populate the instance itself, if desired. .. deprecated:: 0.5 Most usages of this hook are obsolete. For a generic "object has been newly created from a row" hook, use ``reconstruct_instance()``, or the ``@orm.reconstructor`` decorator. """ return EXT_CONTINUE def reconstruct_instance(self, mapper, instance): """Receive an object instance after it has been created via ``__new__``, and after initial attribute population has occurred. This typically occurs when the instance is created based on incoming result rows, and is only called once for that instance's lifetime. Note that during a result-row load, this method is called upon the first row received for this instance. Note that some attributes and collections may or may not be loaded or even initialized, depending on what's present in the result rows. The return value is only significant within the ``MapperExtension`` chain; the parent mapper's behavior isn't modified by this method. """ return EXT_CONTINUE def before_insert(self, mapper, connection, instance): """Receive an object instance before that instance is inserted into its table. This is a good place to set up primary key values and such that aren't handled otherwise. Column-based attributes can be modified within this method which will result in the new value being inserted. However *no* changes to the overall flush plan can be made, and manipulation of the ``Session`` will not have the desired effect. To manipulate the ``Session`` within an extension, use ``SessionExtension``. The return value is only significant within the ``MapperExtension`` chain; the parent mapper's behavior isn't modified by this method. """ return EXT_CONTINUE def after_insert(self, mapper, connection, instance): """Receive an object instance after that instance is inserted. The return value is only significant within the ``MapperExtension`` chain; the parent mapper's behavior isn't modified by this method. """ return EXT_CONTINUE def before_update(self, mapper, connection, instance): """Receive an object instance before that instance is updated. Note that this method is called for all instances that are marked as "dirty", even those which have no net changes to their column-based attributes. An object is marked as dirty when any of its column-based attributes have a "set attribute" operation called or when any of its collections are modified. If, at update time, no column-based attributes have any net changes, no UPDATE statement will be issued. This means that an instance being sent to before_update is *not* a guarantee that an UPDATE statement will be issued (although you can affect the outcome here). To detect if the column-based attributes on the object have net changes, and will therefore generate an UPDATE statement, use ``object_session(instance).is_modified(instance, include_collections=False)``. Column-based attributes can be modified within this method which will result in the new value being updated. However *no* changes to the overall flush plan can be made, and manipulation of the ``Session`` will not have the desired effect. To manipulate the ``Session`` within an extension, use ``SessionExtension``. The return value is only significant within the ``MapperExtension`` chain; the parent mapper's behavior isn't modified by this method. """ return EXT_CONTINUE def after_update(self, mapper, connection, instance): """Receive an object instance after that instance is updated. The return value is only significant within the ``MapperExtension`` chain; the parent mapper's behavior isn't modified by this method. """ return EXT_CONTINUE def before_delete(self, mapper, connection, instance): """Receive an object instance before that instance is deleted. Note that *no* changes to the overall flush plan can be made here; and manipulation of the ``Session`` will not have the desired effect. To manipulate the ``Session`` within an extension, use ``SessionExtension``. The return value is only significant within the ``MapperExtension`` chain; the parent mapper's behavior isn't modified by this method. """ return EXT_CONTINUE def after_delete(self, mapper, connection, instance): """Receive an object instance after that instance is deleted. The return value is only significant within the ``MapperExtension`` chain; the parent mapper's behavior isn't modified by this method. """ return EXT_CONTINUE class SessionExtension(object): """Base implementation for :class:`.Session` event hooks. .. note:: :class:`.SessionExtension` is deprecated. Please refer to :func:`.event.listen` as well as :class:`.SessionEvents`. Subclasses may be installed into a :class:`.Session` (or :class:`.sessionmaker`) using the ``extension`` keyword argument:: from sqlalchemy.orm.interfaces import SessionExtension class MySessionExtension(SessionExtension): def before_commit(self, session): print "before commit!" Session = sessionmaker(extension=MySessionExtension()) The same :class:`.SessionExtension` instance can be used with any number of sessions. """ @classmethod def _adapt_listener(cls, self, listener): for meth in [ 'before_commit', 'after_commit', 'after_rollback', 'before_flush', 'after_flush', 'after_flush_postexec', 'after_begin', 'after_attach', 'after_bulk_update', 'after_bulk_delete', ]: me_meth = getattr(SessionExtension, meth) ls_meth = getattr(listener, meth) if not util.methods_equivalent(me_meth, ls_meth): event.listen(self, meth, getattr(listener, meth)) def before_commit(self, session): """Execute right before commit is called. Note that this may not be per-flush if a longer running transaction is ongoing.""" def after_commit(self, session): """Execute after a commit has occurred. Note that this may not be per-flush if a longer running transaction is ongoing.""" def after_rollback(self, session): """Execute after a rollback has occurred. Note that this may not be per-flush if a longer running transaction is ongoing.""" def before_flush(self, session, flush_context, instances): """Execute before flush process has started. `instances` is an optional list of objects which were passed to the ``flush()`` method. """ def after_flush(self, session, flush_context): """Execute after flush has completed, but before commit has been called. Note that the session's state is still in pre-flush, i.e. 'new', 'dirty', and 'deleted' lists still show pre-flush state as well as the history settings on instance attributes.""" def after_flush_postexec(self, session, flush_context): """Execute after flush has completed, and after the post-exec state occurs. This will be when the 'new', 'dirty', and 'deleted' lists are in their final state. An actual commit() may or may not have occurred, depending on whether or not the flush started its own transaction or participated in a larger transaction. """ def after_begin(self, session, transaction, connection): """Execute after a transaction is begun on a connection `transaction` is the SessionTransaction. This method is called after an engine level transaction is begun on a connection. """ def after_attach(self, session, instance): """Execute after an instance is attached to a session. This is called after an add, delete or merge. """ def after_bulk_update(self, session, query, query_context, result): """Execute after a bulk update operation to the session. This is called after a session.query(...).update() `query` is the query object that this update operation was called on. `query_context` was the query context object. `result` is the result object returned from the bulk operation. """ def after_bulk_delete(self, session, query, query_context, result): """Execute after a bulk delete operation to the session. This is called after a session.query(...).delete() `query` is the query object that this delete operation was called on. `query_context` was the query context object. `result` is the result object returned from the bulk operation. """ class AttributeExtension(object): """Base implementation for :class:`.AttributeImpl` event hooks, events that fire upon attribute mutations in user code. .. note:: :class:`.AttributeExtension` is deprecated. Please refer to :func:`.event.listen` as well as :class:`.AttributeEvents`. :class:`.AttributeExtension` is used to listen for set, remove, and append events on individual mapped attributes. It is established on an individual mapped attribute using the `extension` argument, available on :func:`.column_property`, :func:`.relationship`, and others:: from sqlalchemy.orm.interfaces import AttributeExtension from sqlalchemy.orm import mapper, relationship, column_property class MyAttrExt(AttributeExtension): def append(self, state, value, initiator): print "append event !" return value def set(self, state, value, oldvalue, initiator): print "set event !" return value mapper(SomeClass, sometable, properties={ 'foo':column_property(sometable.c.foo, extension=MyAttrExt()), 'bar':relationship(Bar, extension=MyAttrExt()) }) Note that the :class:`.AttributeExtension` methods :meth:`~.AttributeExtension.append` and :meth:`~.AttributeExtension.set` need to return the ``value`` parameter. The returned value is used as the effective value, and allows the extension to change what is ultimately persisted. AttributeExtension is assembled within the descriptors associated with a mapped class. """ active_history = True """indicates that the set() method would like to receive the 'old' value, even if it means firing lazy callables. Note that ``active_history`` can also be set directly via :func:`.column_property` and :func:`.relationship`. """ @classmethod def _adapt_listener(cls, self, listener): event.listen(self, 'append', listener.append, active_history=listener.active_history, raw=True, retval=True) event.listen(self, 'remove', listener.remove, active_history=listener.active_history, raw=True, retval=True) event.listen(self, 'set', listener.set, active_history=listener.active_history, raw=True, retval=True) def append(self, state, value, initiator): """Receive a collection append event. The returned value will be used as the actual value to be appended. """ return value def remove(self, state, value, initiator): """Receive a remove event. No return value is defined. """ pass def set(self, state, value, oldvalue, initiator): """Receive a set event. The returned value will be used as the actual value to be set. """ return value SQLAlchemy-0.8.4/lib/sqlalchemy/orm/descriptor_props.py0000644000076500000240000004071412251150015023707 0ustar classicstaff00000000000000# orm/descriptor_props.py # Copyright (C) 2005-2013 the SQLAlchemy authors and contributors # # This module is part of SQLAlchemy and is released under # the MIT License: http://www.opensource.org/licenses/mit-license.php """Descriptor properties are more "auxiliary" properties that exist as configurational elements, but don't participate as actively in the load/persist ORM loop. """ from .interfaces import MapperProperty, PropComparator from .util import _none_set from . import attributes, strategies from .. import util, sql, exc as sa_exc, event, schema from ..sql import expression properties = util.importlater('sqlalchemy.orm', 'properties') class DescriptorProperty(MapperProperty): """:class:`.MapperProperty` which proxies access to a user-defined descriptor.""" doc = None def instrument_class(self, mapper): prop = self class _ProxyImpl(object): accepts_scalar_loader = False expire_missing = True collection = False def __init__(self, key): self.key = key if hasattr(prop, 'get_history'): def get_history(self, state, dict_, passive=attributes.PASSIVE_OFF): return prop.get_history(state, dict_, passive) if self.descriptor is None: desc = getattr(mapper.class_, self.key, None) if mapper._is_userland_descriptor(desc): self.descriptor = desc if self.descriptor is None: def fset(obj, value): setattr(obj, self.name, value) def fdel(obj): delattr(obj, self.name) def fget(obj): return getattr(obj, self.name) self.descriptor = property( fget=fget, fset=fset, fdel=fdel, ) proxy_attr = attributes.\ create_proxied_attribute(self.descriptor)\ ( self.parent.class_, self.key, self.descriptor, lambda: self._comparator_factory(mapper), doc=self.doc, original_property=self ) proxy_attr.impl = _ProxyImpl(self.key) mapper.class_manager.instrument_attribute(self.key, proxy_attr) class CompositeProperty(DescriptorProperty): """Defines a "composite" mapped attribute, representing a collection of columns as one attribute. :class:`.CompositeProperty` is constructed using the :func:`.composite` function. See also: :ref:`mapper_composite` """ def __init__(self, class_, *attrs, **kwargs): self.attrs = attrs self.composite_class = class_ self.active_history = kwargs.get('active_history', False) self.deferred = kwargs.get('deferred', False) self.group = kwargs.get('group', None) self.comparator_factory = kwargs.pop('comparator_factory', self.__class__.Comparator) if 'info' in kwargs: self.info = kwargs.pop('info') util.set_creation_order(self) self._create_descriptor() def instrument_class(self, mapper): super(CompositeProperty, self).instrument_class(mapper) self._setup_event_handlers() def do_init(self): """Initialization which occurs after the :class:`.CompositeProperty` has been associated with its parent mapper. """ self._init_props() self._setup_arguments_on_columns() def _create_descriptor(self): """Create the Python descriptor that will serve as the access point on instances of the mapped class. """ def fget(instance): dict_ = attributes.instance_dict(instance) state = attributes.instance_state(instance) if self.key not in dict_: # key not present. Iterate through related # attributes, retrieve their values. This # ensures they all load. values = [ getattr(instance, key) for key in self._attribute_keys ] # current expected behavior here is that the composite is # created on access if the object is persistent or if # col attributes have non-None. This would be better # if the composite were created unconditionally, # but that would be a behavioral change. if self.key not in dict_ and ( state.key is not None or not _none_set.issuperset(values) ): dict_[self.key] = self.composite_class(*values) state.manager.dispatch.refresh(state, None, [self.key]) return dict_.get(self.key, None) def fset(instance, value): dict_ = attributes.instance_dict(instance) state = attributes.instance_state(instance) attr = state.manager[self.key] previous = dict_.get(self.key, attributes.NO_VALUE) for fn in attr.dispatch.set: value = fn(state, value, previous, attr.impl) dict_[self.key] = value if value is None: for key in self._attribute_keys: setattr(instance, key, None) else: for key, value in zip( self._attribute_keys, value.__composite_values__()): setattr(instance, key, value) def fdel(instance): state = attributes.instance_state(instance) dict_ = attributes.instance_dict(instance) previous = dict_.pop(self.key, attributes.NO_VALUE) attr = state.manager[self.key] attr.dispatch.remove(state, previous, attr.impl) for key in self._attribute_keys: setattr(instance, key, None) self.descriptor = property(fget, fset, fdel) @util.memoized_property def _comparable_elements(self): return [ getattr(self.parent.class_, prop.key) for prop in self.props ] def _init_props(self): self.props = props = [] for attr in self.attrs: if isinstance(attr, basestring): prop = self.parent.get_property(attr) elif isinstance(attr, schema.Column): prop = self.parent._columntoproperty[attr] elif isinstance(attr, attributes.InstrumentedAttribute): prop = attr.property props.append(prop) @property def columns(self): return [a for a in self.attrs if isinstance(a, schema.Column)] def _setup_arguments_on_columns(self): """Propagate configuration arguments made on this composite to the target columns, for those that apply. """ for prop in self.props: prop.active_history = self.active_history if self.deferred: prop.deferred = self.deferred prop.strategy_class = strategies.DeferredColumnLoader prop.group = self.group def _setup_event_handlers(self): """Establish events that populate/expire the composite attribute.""" def load_handler(state, *args): dict_ = state.dict if self.key in dict_: return # if column elements aren't loaded, skip. # __get__() will initiate a load for those # columns for k in self._attribute_keys: if k not in dict_: return #assert self.key not in dict_ dict_[self.key] = self.composite_class( *[state.dict[key] for key in self._attribute_keys] ) def expire_handler(state, keys): if keys is None or set(self._attribute_keys).intersection(keys): state.dict.pop(self.key, None) def insert_update_handler(mapper, connection, state): """After an insert or update, some columns may be expired due to server side defaults, or re-populated due to client side defaults. Pop out the composite value here so that it recreates. """ state.dict.pop(self.key, None) event.listen(self.parent, 'after_insert', insert_update_handler, raw=True) event.listen(self.parent, 'after_update', insert_update_handler, raw=True) event.listen(self.parent, 'load', load_handler, raw=True, propagate=True) event.listen(self.parent, 'refresh', load_handler, raw=True, propagate=True) event.listen(self.parent, 'expire', expire_handler, raw=True, propagate=True) # TODO: need a deserialize hook here @util.memoized_property def _attribute_keys(self): return [ prop.key for prop in self.props ] def get_history(self, state, dict_, passive=attributes.PASSIVE_OFF): """Provided for userland code that uses attributes.get_history().""" added = [] deleted = [] has_history = False for prop in self.props: key = prop.key hist = state.manager[key].impl.get_history(state, dict_) if hist.has_changes(): has_history = True non_deleted = hist.non_deleted() if non_deleted: added.extend(non_deleted) else: added.append(None) if hist.deleted: deleted.extend(hist.deleted) else: deleted.append(None) if has_history: return attributes.History( [self.composite_class(*added)], (), [self.composite_class(*deleted)] ) else: return attributes.History( (), [self.composite_class(*added)], () ) def _comparator_factory(self, mapper): return self.comparator_factory(self, mapper) class Comparator(PropComparator): """Produce boolean, comparison, and other operators for :class:`.CompositeProperty` attributes. See the example in :ref:`composite_operations` for an overview of usage , as well as the documentation for :class:`.PropComparator`. See also: :class:`.PropComparator` :class:`.ColumnOperators` :ref:`types_operators` :attr:`.TypeEngine.comparator_factory` """ def __clause_element__(self): return expression.ClauseList(group=False, *self._comparable_elements) __hash__ = None @util.memoized_property def _comparable_elements(self): if self.adapter: # we need to do a little fudging here because # the adapter function we're given only accepts # ColumnElements, but our prop._comparable_elements is returning # InstrumentedAttribute, because we support the use case # of composites that refer to relationships. The better # solution here is to open up how AliasedClass interacts # with PropComparators so more context is available. return [self.adapter(x.__clause_element__()) for x in self.prop._comparable_elements] else: return self.prop._comparable_elements def __eq__(self, other): if other is None: values = [None] * len(self.prop._comparable_elements) else: values = other.__composite_values__() comparisons = [ a == b for a, b in zip(self.prop._comparable_elements, values) ] if self.adapter: comparisons = [self.adapter(x) for x in comparisons] return sql.and_(*comparisons) def __ne__(self, other): return sql.not_(self.__eq__(other)) def __str__(self): return str(self.parent.class_.__name__) + "." + self.key class ConcreteInheritedProperty(DescriptorProperty): """A 'do nothing' :class:`.MapperProperty` that disables an attribute on a concrete subclass that is only present on the inherited mapper, not the concrete classes' mapper. Cases where this occurs include: * When the superclass mapper is mapped against a "polymorphic union", which includes all attributes from all subclasses. * When a relationship() is configured on an inherited mapper, but not on the subclass mapper. Concrete mappers require that relationship() is configured explicitly on each subclass. """ def _comparator_factory(self, mapper): comparator_callable = None for m in self.parent.iterate_to_root(): p = m._props[self.key] if not isinstance(p, ConcreteInheritedProperty): comparator_callable = p.comparator_factory break return comparator_callable def __init__(self): def warn(): raise AttributeError("Concrete %s does not implement " "attribute %r at the instance level. Add this " "property explicitly to %s." % (self.parent, self.key, self.parent)) class NoninheritedConcreteProp(object): def __set__(s, obj, value): warn() def __delete__(s, obj): warn() def __get__(s, obj, owner): if obj is None: return self.descriptor warn() self.descriptor = NoninheritedConcreteProp() class SynonymProperty(DescriptorProperty): def __init__(self, name, map_column=None, descriptor=None, comparator_factory=None, doc=None): self.name = name self.map_column = map_column self.descriptor = descriptor self.comparator_factory = comparator_factory self.doc = doc or (descriptor and descriptor.__doc__) or None util.set_creation_order(self) # TODO: when initialized, check _proxied_property, # emit a warning if its not a column-based property @util.memoized_property def _proxied_property(self): return getattr(self.parent.class_, self.name).property def _comparator_factory(self, mapper): prop = self._proxied_property if self.comparator_factory: comp = self.comparator_factory(prop, mapper) else: comp = prop.comparator_factory(prop, mapper) return comp def set_parent(self, parent, init): if self.map_column: # implement the 'map_column' option. if self.key not in parent.mapped_table.c: raise sa_exc.ArgumentError( "Can't compile synonym '%s': no column on table " "'%s' named '%s'" % (self.name, parent.mapped_table.description, self.key)) elif parent.mapped_table.c[self.key] in \ parent._columntoproperty and \ parent._columntoproperty[ parent.mapped_table.c[self.key] ].key == self.name: raise sa_exc.ArgumentError( "Can't call map_column=True for synonym %r=%r, " "a ColumnProperty already exists keyed to the name " "%r for column %r" % (self.key, self.name, self.name, self.key) ) p = properties.ColumnProperty(parent.mapped_table.c[self.key]) parent._configure_property( self.name, p, init=init, setparent=True) p._mapped_by_synonym = self.key self.parent = parent class ComparableProperty(DescriptorProperty): """Instruments a Python property for use in query expressions.""" def __init__(self, comparator_factory, descriptor=None, doc=None): self.descriptor = descriptor self.comparator_factory = comparator_factory self.doc = doc or (descriptor and descriptor.__doc__) or None util.set_creation_order(self) def _comparator_factory(self, mapper): return self.comparator_factory(self, mapper) SQLAlchemy-0.8.4/lib/sqlalchemy/orm/dynamic.py0000644000076500000240000003176112251150015021734 0ustar classicstaff00000000000000# orm/dynamic.py # Copyright (C) 2005-2013 the SQLAlchemy authors and contributors # # This module is part of SQLAlchemy and is released under # the MIT License: http://www.opensource.org/licenses/mit-license.php """Dynamic collection API. Dynamic collections act like Query() objects for read operations and support basic add/delete mutation. """ from .. import log, util, exc from ..sql import operators from . import ( attributes, object_session, util as orm_util, strategies, object_mapper, exc as orm_exc ) from .query import Query class DynaLoader(strategies.AbstractRelationshipLoader): def init_class_attribute(self, mapper): self.is_class_level = True if not self.uselist: raise exc.InvalidRequestError( "On relationship %s, 'dynamic' loaders cannot be used with " "many-to-one/one-to-one relationships and/or " "uselist=False." % self.parent_property) strategies._register_attribute(self, mapper, useobject=True, uselist=True, impl_class=DynamicAttributeImpl, target_mapper=self.parent_property.mapper, order_by=self.parent_property.order_by, query_class=self.parent_property.query_class, backref=self.parent_property.back_populates, ) log.class_logger(DynaLoader) class DynamicAttributeImpl(attributes.AttributeImpl): uses_objects = True accepts_scalar_loader = False supports_population = False collection = False def __init__(self, class_, key, typecallable, dispatch, target_mapper, order_by, query_class=None, **kw): super(DynamicAttributeImpl, self).\ __init__(class_, key, typecallable, dispatch, **kw) self.target_mapper = target_mapper self.order_by = order_by if not query_class: self.query_class = AppenderQuery elif AppenderMixin in query_class.mro(): self.query_class = query_class else: self.query_class = mixin_user_query(query_class) def get(self, state, dict_, passive=attributes.PASSIVE_OFF): if not passive & attributes.SQL_OK: return self._get_collection_history(state, attributes.PASSIVE_NO_INITIALIZE).added_items else: return self.query_class(self, state) def get_collection(self, state, dict_, user_data=None, passive=attributes.PASSIVE_NO_INITIALIZE): if not passive & attributes.SQL_OK: return self._get_collection_history(state, passive).added_items else: history = self._get_collection_history(state, passive) return history.added_plus_unchanged def fire_append_event(self, state, dict_, value, initiator, collection_history=None): if collection_history is None: collection_history = self._modified_event(state, dict_) collection_history.add_added(value) for fn in self.dispatch.append: value = fn(state, value, initiator or self) if self.trackparent and value is not None: self.sethasparent(attributes.instance_state(value), state, True) def fire_remove_event(self, state, dict_, value, initiator, collection_history=None): if collection_history is None: collection_history = self._modified_event(state, dict_) collection_history.add_removed(value) if self.trackparent and value is not None: self.sethasparent(attributes.instance_state(value), state, False) for fn in self.dispatch.remove: fn(state, value, initiator or self) def _modified_event(self, state, dict_): if self.key not in state.committed_state: state.committed_state[self.key] = CollectionHistory(self, state) state._modified_event(dict_, self, attributes.NEVER_SET) # this is a hack to allow the fixtures.ComparableEntity fixture # to work dict_[self.key] = True return state.committed_state[self.key] def set(self, state, dict_, value, initiator, passive=attributes.PASSIVE_OFF, check_old=None, pop=False): if initiator and initiator.parent_token is self.parent_token: return if pop and value is None: return self._set_iterable(state, dict_, value) def _set_iterable(self, state, dict_, iterable, adapter=None): new_values = list(iterable) if state.has_identity: old_collection = util.IdentitySet(self.get(state, dict_)) collection_history = self._modified_event(state, dict_) if not state.has_identity: old_collection = collection_history.added_items else: old_collection = old_collection.union( collection_history.added_items) idset = util.IdentitySet constants = old_collection.intersection(new_values) additions = idset(new_values).difference(constants) removals = old_collection.difference(constants) for member in new_values: if member in additions: self.fire_append_event(state, dict_, member, None, collection_history=collection_history) for member in removals: self.fire_remove_event(state, dict_, member, None, collection_history=collection_history) def delete(self, *args, **kwargs): raise NotImplementedError() def set_committed_value(self, state, dict_, value): raise NotImplementedError("Dynamic attributes don't support " "collection population.") def get_history(self, state, dict_, passive=attributes.PASSIVE_OFF): c = self._get_collection_history(state, passive) return c.as_history() def get_all_pending(self, state, dict_): c = self._get_collection_history( state, attributes.PASSIVE_NO_INITIALIZE) return [ (attributes.instance_state(x), x) for x in c.all_items ] def _get_collection_history(self, state, passive=attributes.PASSIVE_OFF): if self.key in state.committed_state: c = state.committed_state[self.key] else: c = CollectionHistory(self, state) if state.has_identity and (passive & attributes.INIT_OK): return CollectionHistory(self, state, apply_to=c) else: return c def append(self, state, dict_, value, initiator, passive=attributes.PASSIVE_OFF): if initiator is not self: self.fire_append_event(state, dict_, value, initiator) def remove(self, state, dict_, value, initiator, passive=attributes.PASSIVE_OFF): if initiator is not self: self.fire_remove_event(state, dict_, value, initiator) def pop(self, state, dict_, value, initiator, passive=attributes.PASSIVE_OFF): self.remove(state, dict_, value, initiator, passive=passive) class AppenderMixin(object): query_class = None def __init__(self, attr, state): super(AppenderMixin, self).__init__(attr.target_mapper, None) self.instance = instance = state.obj() self.attr = attr mapper = object_mapper(instance) prop = mapper._props[self.attr.key] self._criterion = prop.compare( operators.eq, instance, value_is_parent=True, alias_secondary=False) if self.attr.order_by: self._order_by = self.attr.order_by def session(self): sess = object_session(self.instance) if sess is not None and self.autoflush and sess.autoflush \ and self.instance in sess: sess.flush() if not orm_util.has_identity(self.instance): return None else: return sess session = property(session, lambda s, x: None) def __iter__(self): sess = self.session if sess is None: return iter(self.attr._get_collection_history( attributes.instance_state(self.instance), attributes.PASSIVE_NO_INITIALIZE).added_items) else: return iter(self._clone(sess)) def __getitem__(self, index): sess = self.session if sess is None: return self.attr._get_collection_history( attributes.instance_state(self.instance), attributes.PASSIVE_NO_INITIALIZE).indexed(index) else: return self._clone(sess).__getitem__(index) def count(self): sess = self.session if sess is None: return len(self.attr._get_collection_history( attributes.instance_state(self.instance), attributes.PASSIVE_NO_INITIALIZE).added_items) else: return self._clone(sess).count() def _clone(self, sess=None): # note we're returning an entirely new Query class instance # here without any assignment capabilities; the class of this # query is determined by the session. instance = self.instance if sess is None: sess = object_session(instance) if sess is None: raise orm_exc.DetachedInstanceError( "Parent instance %s is not bound to a Session, and no " "contextual session is established; lazy load operation " "of attribute '%s' cannot proceed" % ( orm_util.instance_str(instance), self.attr.key)) if self.query_class: query = self.query_class(self.attr.target_mapper, session=sess) else: query = sess.query(self.attr.target_mapper) query._criterion = self._criterion query._order_by = self._order_by return query def extend(self, iterator): for item in iterator: self.attr.append( attributes.instance_state(self.instance), attributes.instance_dict(self.instance), item, None) def append(self, item): self.attr.append( attributes.instance_state(self.instance), attributes.instance_dict(self.instance), item, None) def remove(self, item): self.attr.remove( attributes.instance_state(self.instance), attributes.instance_dict(self.instance), item, None) class AppenderQuery(AppenderMixin, Query): """A dynamic query that supports basic collection storage operations.""" def mixin_user_query(cls): """Return a new class with AppenderQuery functionality layered over.""" name = 'Appender' + cls.__name__ return type(name, (AppenderMixin, cls), {'query_class': cls}) class CollectionHistory(object): """Overrides AttributeHistory to receive append/remove events directly.""" def __init__(self, attr, state, apply_to=None): if apply_to: coll = AppenderQuery(attr, state).autoflush(False) self.unchanged_items = util.OrderedIdentitySet(coll) self.added_items = apply_to.added_items self.deleted_items = apply_to.deleted_items self._reconcile_collection = True else: self.deleted_items = util.OrderedIdentitySet() self.added_items = util.OrderedIdentitySet() self.unchanged_items = util.OrderedIdentitySet() self._reconcile_collection = False @property def added_plus_unchanged(self): return list(self.added_items.union(self.unchanged_items)) @property def all_items(self): return list(self.added_items.union( self.unchanged_items).union(self.deleted_items)) def as_history(self): if self._reconcile_collection: added = self.added_items.difference(self.unchanged_items) deleted = self.deleted_items.intersection(self.unchanged_items) unchanged = self.unchanged_items.difference(deleted) else: added, unchanged, deleted = self.added_items,\ self.unchanged_items,\ self.deleted_items return attributes.History( list(added), list(unchanged), list(deleted), ) def indexed(self, index): return list(self.added_items)[index] def add_added(self, value): self.added_items.add(value) def add_removed(self, value): if value in self.added_items: self.added_items.remove(value) else: self.deleted_items.add(value) SQLAlchemy-0.8.4/lib/sqlalchemy/orm/evaluator.py0000644000076500000240000001047712251150015022313 0ustar classicstaff00000000000000# orm/evaluator.py # Copyright (C) 2005-2013 the SQLAlchemy authors and contributors # # This module is part of SQLAlchemy and is released under # the MIT License: http://www.opensource.org/licenses/mit-license.php import operator from ..sql import operators class UnevaluatableError(Exception): pass _straight_ops = set(getattr(operators, op) for op in ('add', 'mul', 'sub', # Py2K 'div', # end Py2K 'mod', 'truediv', 'lt', 'le', 'ne', 'gt', 'ge', 'eq')) _notimplemented_ops = set(getattr(operators, op) for op in ('like_op', 'notlike_op', 'ilike_op', 'notilike_op', 'between_op', 'in_op', 'notin_op', 'endswith_op', 'concat_op')) class EvaluatorCompiler(object): def process(self, clause): meth = getattr(self, "visit_%s" % clause.__visit_name__, None) if not meth: raise UnevaluatableError( "Cannot evaluate %s" % type(clause).__name__) return meth(clause) def visit_grouping(self, clause): return self.process(clause.element) def visit_null(self, clause): return lambda obj: None def visit_false(self, clause): return lambda obj: False def visit_true(self, clause): return lambda obj: True def visit_column(self, clause): if 'parentmapper' in clause._annotations: key = clause._annotations['parentmapper'].\ _columntoproperty[clause].key else: key = clause.key get_corresponding_attr = operator.attrgetter(key) return lambda obj: get_corresponding_attr(obj) def visit_clauselist(self, clause): evaluators = map(self.process, clause.clauses) if clause.operator is operators.or_: def evaluate(obj): has_null = False for sub_evaluate in evaluators: value = sub_evaluate(obj) if value: return True has_null = has_null or value is None if has_null: return None return False elif clause.operator is operators.and_: def evaluate(obj): for sub_evaluate in evaluators: value = sub_evaluate(obj) if not value: if value is None: return None return False return True else: raise UnevaluatableError( "Cannot evaluate clauselist with operator %s" % clause.operator) return evaluate def visit_binary(self, clause): eval_left, eval_right = map(self.process, [clause.left, clause.right]) operator = clause.operator if operator is operators.is_: def evaluate(obj): return eval_left(obj) == eval_right(obj) elif operator is operators.isnot: def evaluate(obj): return eval_left(obj) != eval_right(obj) elif operator in _straight_ops: def evaluate(obj): left_val = eval_left(obj) right_val = eval_right(obj) if left_val is None or right_val is None: return None return operator(eval_left(obj), eval_right(obj)) else: raise UnevaluatableError( "Cannot evaluate %s with operator %s" % (type(clause).__name__, clause.operator)) return evaluate def visit_unary(self, clause): eval_inner = self.process(clause.element) if clause.operator is operators.inv: def evaluate(obj): value = eval_inner(obj) if value is None: return None return not value return evaluate raise UnevaluatableError( "Cannot evaluate %s with operator %s" % (type(clause).__name__, clause.operator)) def visit_bindparam(self, clause): val = clause.value return lambda obj: val SQLAlchemy-0.8.4/lib/sqlalchemy/orm/events.py0000644000076500000240000017361612251150015021622 0ustar classicstaff00000000000000# orm/events.py # Copyright (C) 2005-2013 the SQLAlchemy authors and contributors # # This module is part of SQLAlchemy and is released under # the MIT License: http://www.opensource.org/licenses/mit-license.php """ORM event interfaces. """ from .. import event, exc, util orm = util.importlater("sqlalchemy", "orm") import inspect import weakref class InstrumentationEvents(event.Events): """Events related to class instrumentation events. The listeners here support being established against any new style class, that is any object that is a subclass of 'type'. Events will then be fired off for events against that class. If the "propagate=True" flag is passed to event.listen(), the event will fire off for subclasses of that class as well. The Python ``type`` builtin is also accepted as a target, which when used has the effect of events being emitted for all classes. Note the "propagate" flag here is defaulted to ``True``, unlike the other class level events where it defaults to ``False``. This means that new subclasses will also be the subject of these events, when a listener is established on a superclass. .. versionchanged:: 0.8 - events here will emit based on comparing the incoming class to the type of class passed to :func:`.event.listen`. Previously, the event would fire for any class unconditionally regardless of what class was sent for listening, despite documentation which stated the contrary. """ @classmethod def _accept_with(cls, target): # TODO: there's no coverage for this if isinstance(target, type): return _InstrumentationEventsHold(target) else: return None @classmethod def _listen(cls, target, identifier, fn, propagate=True): def listen(target_cls, *arg): listen_cls = target() if propagate and issubclass(target_cls, listen_cls): return fn(target_cls, *arg) elif not propagate and target_cls is listen_cls: return fn(target_cls, *arg) def remove(ref): event.Events._remove(orm.instrumentation._instrumentation_factory, identifier, listen) target = weakref.ref(target.class_, remove) event.Events._listen(orm.instrumentation._instrumentation_factory, identifier, listen) @classmethod def _remove(cls, identifier, target, fn): raise NotImplementedError("Removal of instrumentation events " "not yet implemented") @classmethod def _clear(cls): super(InstrumentationEvents, cls)._clear() orm.instrumentation._instrumentation_factory.dispatch._clear() def class_instrument(self, cls): """Called after the given class is instrumented. To get at the :class:`.ClassManager`, use :func:`.manager_of_class`. """ def class_uninstrument(self, cls): """Called before the given class is uninstrumented. To get at the :class:`.ClassManager`, use :func:`.manager_of_class`. """ def attribute_instrument(self, cls, key, inst): """Called when an attribute is instrumented.""" class _InstrumentationEventsHold(object): """temporary marker object used to transfer from _accept_with() to _listen() on the InstrumentationEvents class. """ def __init__(self, class_): self.class_ = class_ dispatch = event.dispatcher(InstrumentationEvents) class InstanceEvents(event.Events): """Define events specific to object lifecycle. e.g.:: from sqlalchemy import event def my_load_listener(target, context): print "on load!" event.listen(SomeClass, 'load', my_load_listener) Available targets include: * mapped classes * unmapped superclasses of mapped or to-be-mapped classes (using the ``propagate=True`` flag) * :class:`.Mapper` objects * the :class:`.Mapper` class itself and the :func:`.mapper` function indicate listening for all mappers. .. versionchanged:: 0.8.0 instance events can be associated with unmapped superclasses of mapped classes. Instance events are closely related to mapper events, but are more specific to the instance and its instrumentation, rather than its system of persistence. When using :class:`.InstanceEvents`, several modifiers are available to the :func:`.event.listen` function. :param propagate=False: When True, the event listener should be applied to all inheriting classes as well as the class which is the target of this listener. :param raw=False: When True, the "target" argument passed to applicable event listener functions will be the instance's :class:`.InstanceState` management object, rather than the mapped instance itself. """ @classmethod def _accept_with(cls, target): if isinstance(target, orm.instrumentation.ClassManager): return target elif isinstance(target, orm.Mapper): return target.class_manager elif target is orm.mapper: return orm.instrumentation.ClassManager elif isinstance(target, type): if issubclass(target, orm.Mapper): return orm.instrumentation.ClassManager else: manager = orm.instrumentation.manager_of_class(target) if manager: return manager else: return _InstanceEventsHold(target) return None @classmethod def _listen(cls, target, identifier, fn, raw=False, propagate=False): if not raw: orig_fn = fn def wrap(state, *arg, **kw): return orig_fn(state.obj(), *arg, **kw) fn = wrap event.Events._listen(target, identifier, fn, propagate=propagate) if propagate: for mgr in target.subclass_managers(True): event.Events._listen(mgr, identifier, fn, True) @classmethod def _remove(cls, identifier, target, fn): msg = "Removal of instance events not yet implemented" raise NotImplementedError(msg) @classmethod def _clear(cls): super(InstanceEvents, cls)._clear() _InstanceEventsHold._clear() def first_init(self, manager, cls): """Called when the first instance of a particular mapping is called. """ def init(self, target, args, kwargs): """Receive an instance when it's constructor is called. This method is only called during a userland construction of an object. It is not called when an object is loaded from the database. """ def init_failure(self, target, args, kwargs): """Receive an instance when it's constructor has been called, and raised an exception. This method is only called during a userland construction of an object. It is not called when an object is loaded from the database. """ def load(self, target, context): """Receive an object instance after it has been created via ``__new__``, and after initial attribute population has occurred. This typically occurs when the instance is created based on incoming result rows, and is only called once for that instance's lifetime. Note that during a result-row load, this method is called upon the first row received for this instance. Note that some attributes and collections may or may not be loaded or even initialized, depending on what's present in the result rows. :param target: the mapped instance. If the event is configured with ``raw=True``, this will instead be the :class:`.InstanceState` state-management object associated with the instance. :param context: the :class:`.QueryContext` corresponding to the current :class:`.Query` in progress. This argument may be ``None`` if the load does not correspond to a :class:`.Query`, such as during :meth:`.Session.merge`. """ def refresh(self, target, context, attrs): """Receive an object instance after one or more attributes have been refreshed from a query. :param target: the mapped instance. If the event is configured with ``raw=True``, this will instead be the :class:`.InstanceState` state-management object associated with the instance. :param context: the :class:`.QueryContext` corresponding to the current :class:`.Query` in progress. :param attrs: iterable collection of attribute names which were populated, or None if all column-mapped, non-deferred attributes were populated. """ def expire(self, target, attrs): """Receive an object instance after its attributes or some subset have been expired. 'keys' is a list of attribute names. If None, the entire state was expired. :param target: the mapped instance. If the event is configured with ``raw=True``, this will instead be the :class:`.InstanceState` state-management object associated with the instance. :param attrs: iterable collection of attribute names which were expired, or None if all attributes were expired. """ def resurrect(self, target): """Receive an object instance as it is 'resurrected' from garbage collection, which occurs when a "dirty" state falls out of scope. :param target: the mapped instance. If the event is configured with ``raw=True``, this will instead be the :class:`.InstanceState` state-management object associated with the instance. """ def pickle(self, target, state_dict): """Receive an object instance when its associated state is being pickled. :param target: the mapped instance. If the event is configured with ``raw=True``, this will instead be the :class:`.InstanceState` state-management object associated with the instance. :param state_dict: the dictionary returned by :class:`.InstanceState.__getstate__`, containing the state to be pickled. """ def unpickle(self, target, state_dict): """Receive an object instance after it's associated state has been unpickled. :param target: the mapped instance. If the event is configured with ``raw=True``, this will instead be the :class:`.InstanceState` state-management object associated with the instance. :param state_dict: the dictionary sent to :class:`.InstanceState.__setstate__`, containing the state dictionary which was pickled. """ class _EventsHold(object): """Hold onto listeners against unmapped, uninstrumented classes. Establish _listen() for that class' mapper/instrumentation when those objects are created for that class. """ def __init__(self, class_): self.class_ = class_ @classmethod def _clear(cls): cls.all_holds.clear() class HoldEvents(object): @classmethod def _listen(cls, target, identifier, fn, raw=False, propagate=False): if target.class_ in target.all_holds: collection = target.all_holds[target.class_] else: collection = target.all_holds[target.class_] = [] collection.append((identifier, fn, raw, propagate)) if propagate: stack = list(target.class_.__subclasses__()) while stack: subclass = stack.pop(0) stack.extend(subclass.__subclasses__()) subject = target.resolve(subclass) if subject is not None: subject.dispatch._listen(subject, identifier, fn, raw=raw, propagate=propagate) @classmethod def populate(cls, class_, subject): for subclass in class_.__mro__: if subclass in cls.all_holds: collection = cls.all_holds[subclass] for ident, fn, raw, propagate in collection: if propagate or subclass is class_: # since we can't be sure in what order different classes # in a hierarchy are triggered with populate(), # we rely upon _EventsHold for all event # assignment, instead of using the generic propagate # flag. subject.dispatch._listen(subject, ident, fn, raw=raw, propagate=False) class _InstanceEventsHold(_EventsHold): all_holds = weakref.WeakKeyDictionary() def resolve(self, class_): return orm.instrumentation.manager_of_class(class_) class HoldInstanceEvents(_EventsHold.HoldEvents, InstanceEvents): pass dispatch = event.dispatcher(HoldInstanceEvents) class MapperEvents(event.Events): """Define events specific to mappings. e.g.:: from sqlalchemy import event def my_before_insert_listener(mapper, connection, target): # execute a stored procedure upon INSERT, # apply the value to the row to be inserted target.calculated_value = connection.scalar( "select my_special_function(%d)" % target.special_number) # associate the listener function with SomeClass, # to execute during the "before_insert" hook event.listen( SomeClass, 'before_insert', my_before_insert_listener) Available targets include: * mapped classes * unmapped superclasses of mapped or to-be-mapped classes (using the ``propagate=True`` flag) * :class:`.Mapper` objects * the :class:`.Mapper` class itself and the :func:`.mapper` function indicate listening for all mappers. .. versionchanged:: 0.8.0 mapper events can be associated with unmapped superclasses of mapped classes. Mapper events provide hooks into critical sections of the mapper, including those related to object instrumentation, object loading, and object persistence. In particular, the persistence methods :meth:`~.MapperEvents.before_insert`, and :meth:`~.MapperEvents.before_update` are popular places to augment the state being persisted - however, these methods operate with several significant restrictions. The user is encouraged to evaluate the :meth:`.SessionEvents.before_flush` and :meth:`.SessionEvents.after_flush` methods as more flexible and user-friendly hooks in which to apply additional database state during a flush. When using :class:`.MapperEvents`, several modifiers are available to the :func:`.event.listen` function. :param propagate=False: When True, the event listener should be applied to all inheriting mappers and/or the mappers of inheriting classes, as well as any mapper which is the target of this listener. :param raw=False: When True, the "target" argument passed to applicable event listener functions will be the instance's :class:`.InstanceState` management object, rather than the mapped instance itself. :param retval=False: when True, the user-defined event function must have a return value, the purpose of which is either to control subsequent event propagation, or to otherwise alter the operation in progress by the mapper. Possible return values are: * ``sqlalchemy.orm.interfaces.EXT_CONTINUE`` - continue event processing normally. * ``sqlalchemy.orm.interfaces.EXT_STOP`` - cancel all subsequent event handlers in the chain. * other values - the return value specified by specific listeners, such as :meth:`~.MapperEvents.translate_row` or :meth:`~.MapperEvents.create_instance`. """ @classmethod def _accept_with(cls, target): if target is orm.mapper: return orm.Mapper elif isinstance(target, type): if issubclass(target, orm.Mapper): return target else: mapper = orm.util._mapper_or_none(target) if mapper is not None: return mapper else: return _MapperEventsHold(target) else: return target @classmethod def _listen(cls, target, identifier, fn, raw=False, retval=False, propagate=False): if not raw or not retval: if not raw: meth = getattr(cls, identifier) try: target_index = \ inspect.getargspec(meth)[0].index('target') - 1 except ValueError: target_index = None wrapped_fn = fn def wrap(*arg, **kw): if not raw and target_index is not None: arg = list(arg) arg[target_index] = arg[target_index].obj() if not retval: wrapped_fn(*arg, **kw) return orm.interfaces.EXT_CONTINUE else: return wrapped_fn(*arg, **kw) fn = wrap if propagate: for mapper in target.self_and_descendants: event.Events._listen(mapper, identifier, fn, propagate=True) else: event.Events._listen(target, identifier, fn) @classmethod def _clear(cls): super(MapperEvents, cls)._clear() _MapperEventsHold._clear() def instrument_class(self, mapper, class_): """Receive a class when the mapper is first constructed, before instrumentation is applied to the mapped class. This event is the earliest phase of mapper construction. Most attributes of the mapper are not yet initialized. This listener can either be applied to the :class:`.Mapper` class overall, or to any un-mapped class which serves as a base for classes that will be mapped (using the ``propagate=True`` flag):: Base = declarative_base() @event.listens_for(Base, "instrument_class", propagate=True) def on_new_class(mapper, cls_): " ... " :param mapper: the :class:`.Mapper` which is the target of this event. :param class\_: the mapped class. """ def mapper_configured(self, mapper, class_): """Called when the mapper for the class is fully configured. This event is the latest phase of mapper construction, and is invoked when the mapped classes are first used, so that relationships between mappers can be resolved. When the event is called, the mapper should be in its final state. While the configuration event normally occurs automatically, it can be forced to occur ahead of time, in the case where the event is needed before any actual mapper usage, by using the :func:`.configure_mappers` function. :param mapper: the :class:`.Mapper` which is the target of this event. :param class\_: the mapped class. """ # TODO: need coverage for this event def after_configured(self): """Called after a series of mappers have been configured. This corresponds to the :func:`.orm.configure_mappers` call, which note is usually called automatically as mappings are first used. Theoretically this event is called once per application, but is actually called any time new mappers have been affected by a :func:`.orm.configure_mappers` call. If new mappings are constructed after existing ones have already been used, this event can be called again. """ def translate_row(self, mapper, context, row): """Perform pre-processing on the given result row and return a new row instance. This listener is typically registered with ``retval=True``. It is called when the mapper first receives a row, before the object identity or the instance itself has been derived from that row. The given row may or may not be a :class:`.RowProxy` object - it will always be a dictionary-like object which contains mapped columns as keys. The returned object should also be a dictionary-like object which recognizes mapped columns as keys. :param mapper: the :class:`.Mapper` which is the target of this event. :param context: the :class:`.QueryContext`, which includes a handle to the current :class:`.Query` in progress as well as additional state information. :param row: the result row being handled. This may be an actual :class:`.RowProxy` or may be a dictionary containing :class:`.Column` objects as keys. :return: When configured with ``retval=True``, the function should return a dictionary-like row object, or ``EXT_CONTINUE``, indicating the original row should be used. """ def create_instance(self, mapper, context, row, class_): """Receive a row when a new object instance is about to be created from that row. The method can choose to create the instance itself, or it can return EXT_CONTINUE to indicate normal object creation should take place. This listener is typically registered with ``retval=True``. :param mapper: the :class:`.Mapper` which is the target of this event. :param context: the :class:`.QueryContext`, which includes a handle to the current :class:`.Query` in progress as well as additional state information. :param row: the result row being handled. This may be an actual :class:`.RowProxy` or may be a dictionary containing :class:`.Column` objects as keys. :param class\_: the mapped class. :return: When configured with ``retval=True``, the return value should be a newly created instance of the mapped class, or ``EXT_CONTINUE`` indicating that default object construction should take place. """ def append_result(self, mapper, context, row, target, result, **flags): """Receive an object instance before that instance is appended to a result list. This is a rarely used hook which can be used to alter the construction of a result list returned by :class:`.Query`. :param mapper: the :class:`.Mapper` which is the target of this event. :param context: the :class:`.QueryContext`, which includes a handle to the current :class:`.Query` in progress as well as additional state information. :param row: the result row being handled. This may be an actual :class:`.RowProxy` or may be a dictionary containing :class:`.Column` objects as keys. :param target: the mapped instance being populated. If the event is configured with ``raw=True``, this will instead be the :class:`.InstanceState` state-management object associated with the instance. :param result: a list-like object where results are being appended. :param \**flags: Additional state information about the current handling of the row. :return: If this method is registered with ``retval=True``, a return value of ``EXT_STOP`` will prevent the instance from being appended to the given result list, whereas a return value of ``EXT_CONTINUE`` will result in the default behavior of appending the value to the result list. """ def populate_instance(self, mapper, context, row, target, **flags): """Receive an instance before that instance has its attributes populated. This usually corresponds to a newly loaded instance but may also correspond to an already-loaded instance which has unloaded attributes to be populated. The method may be called many times for a single instance, as multiple result rows are used to populate eagerly loaded collections. Most usages of this hook are obsolete. For a generic "object has been newly created from a row" hook, use :meth:`.InstanceEvents.load`. :param mapper: the :class:`.Mapper` which is the target of this event. :param context: the :class:`.QueryContext`, which includes a handle to the current :class:`.Query` in progress as well as additional state information. :param row: the result row being handled. This may be an actual :class:`.RowProxy` or may be a dictionary containing :class:`.Column` objects as keys. :param target: the mapped instance. If the event is configured with ``raw=True``, this will instead be the :class:`.InstanceState` state-management object associated with the instance. :return: When configured with ``retval=True``, a return value of ``EXT_STOP`` will bypass instance population by the mapper. A value of ``EXT_CONTINUE`` indicates that default instance population should take place. """ def before_insert(self, mapper, connection, target): """Receive an object instance before an INSERT statement is emitted corresponding to that instance. This event is used to modify local, non-object related attributes on the instance before an INSERT occurs, as well as to emit additional SQL statements on the given connection. The event is often called for a batch of objects of the same class before their INSERT statements are emitted at once in a later step. In the extremely rare case that this is not desirable, the :func:`.mapper` can be configured with ``batch=False``, which will cause batches of instances to be broken up into individual (and more poorly performing) event->persist->event steps. .. warning:: Mapper-level flush events are designed to operate **on attributes local to the immediate object being handled and via SQL operations with the given** :class:`.Connection` **only.** Handlers here should **not** make alterations to the state of the :class:`.Session` overall, and in general should not affect any :func:`.relationship` -mapped attributes, as session cascade rules will not function properly, nor is it always known if the related class has already been handled. Operations that **are not supported in mapper events** include: * :meth:`.Session.add` * :meth:`.Session.delete` * Mapped collection append, add, remove, delete, discard, etc. * Mapped relationship attribute set/del events, i.e. ``someobject.related = someotherobject`` Operations which manipulate the state of the object relative to other objects are better handled: * In the ``__init__()`` method of the mapped object itself, or another method designed to establish some particular state. * In a ``@validates`` handler, see :ref:`simple_validators` * Within the :meth:`.SessionEvents.before_flush` event. :param mapper: the :class:`.Mapper` which is the target of this event. :param connection: the :class:`.Connection` being used to emit INSERT statements for this instance. This provides a handle into the current transaction on the target database specific to this instance. :param target: the mapped instance being persisted. If the event is configured with ``raw=True``, this will instead be the :class:`.InstanceState` state-management object associated with the instance. :return: No return value is supported by this event. """ def after_insert(self, mapper, connection, target): """Receive an object instance after an INSERT statement is emitted corresponding to that instance. This event is used to modify in-Python-only state on the instance after an INSERT occurs, as well as to emit additional SQL statements on the given connection. The event is often called for a batch of objects of the same class after their INSERT statements have been emitted at once in a previous step. In the extremely rare case that this is not desirable, the :func:`.mapper` can be configured with ``batch=False``, which will cause batches of instances to be broken up into individual (and more poorly performing) event->persist->event steps. .. warning:: Mapper-level flush events are designed to operate **on attributes local to the immediate object being handled and via SQL operations with the given** :class:`.Connection` **only.** Handlers here should **not** make alterations to the state of the :class:`.Session` overall, and in general should not affect any :func:`.relationship` -mapped attributes, as session cascade rules will not function properly, nor is it always known if the related class has already been handled. Operations that **are not supported in mapper events** include: * :meth:`.Session.add` * :meth:`.Session.delete` * Mapped collection append, add, remove, delete, discard, etc. * Mapped relationship attribute set/del events, i.e. ``someobject.related = someotherobject`` Operations which manipulate the state of the object relative to other objects are better handled: * In the ``__init__()`` method of the mapped object itself, or another method designed to establish some particular state. * In a ``@validates`` handler, see :ref:`simple_validators` * Within the :meth:`.SessionEvents.before_flush` event. :param mapper: the :class:`.Mapper` which is the target of this event. :param connection: the :class:`.Connection` being used to emit INSERT statements for this instance. This provides a handle into the current transaction on the target database specific to this instance. :param target: the mapped instance being persisted. If the event is configured with ``raw=True``, this will instead be the :class:`.InstanceState` state-management object associated with the instance. :return: No return value is supported by this event. """ def before_update(self, mapper, connection, target): """Receive an object instance before an UPDATE statement is emitted corresponding to that instance. This event is used to modify local, non-object related attributes on the instance before an UPDATE occurs, as well as to emit additional SQL statements on the given connection. This method is called for all instances that are marked as "dirty", *even those which have no net changes to their column-based attributes*. An object is marked as dirty when any of its column-based attributes have a "set attribute" operation called or when any of its collections are modified. If, at update time, no column-based attributes have any net changes, no UPDATE statement will be issued. This means that an instance being sent to :meth:`~.MapperEvents.before_update` is *not* a guarantee that an UPDATE statement will be issued, although you can affect the outcome here by modifying attributes so that a net change in value does exist. To detect if the column-based attributes on the object have net changes, and will therefore generate an UPDATE statement, use ``object_session(instance).is_modified(instance, include_collections=False)``. The event is often called for a batch of objects of the same class before their UPDATE statements are emitted at once in a later step. In the extremely rare case that this is not desirable, the :func:`.mapper` can be configured with ``batch=False``, which will cause batches of instances to be broken up into individual (and more poorly performing) event->persist->event steps. .. warning:: Mapper-level flush events are designed to operate **on attributes local to the immediate object being handled and via SQL operations with the given** :class:`.Connection` **only.** Handlers here should **not** make alterations to the state of the :class:`.Session` overall, and in general should not affect any :func:`.relationship` -mapped attributes, as session cascade rules will not function properly, nor is it always known if the related class has already been handled. Operations that **are not supported in mapper events** include: * :meth:`.Session.add` * :meth:`.Session.delete` * Mapped collection append, add, remove, delete, discard, etc. * Mapped relationship attribute set/del events, i.e. ``someobject.related = someotherobject`` Operations which manipulate the state of the object relative to other objects are better handled: * In the ``__init__()`` method of the mapped object itself, or another method designed to establish some particular state. * In a ``@validates`` handler, see :ref:`simple_validators` * Within the :meth:`.SessionEvents.before_flush` event. :param mapper: the :class:`.Mapper` which is the target of this event. :param connection: the :class:`.Connection` being used to emit UPDATE statements for this instance. This provides a handle into the current transaction on the target database specific to this instance. :param target: the mapped instance being persisted. If the event is configured with ``raw=True``, this will instead be the :class:`.InstanceState` state-management object associated with the instance. :return: No return value is supported by this event. """ def after_update(self, mapper, connection, target): """Receive an object instance after an UPDATE statement is emitted corresponding to that instance. This event is used to modify in-Python-only state on the instance after an UPDATE occurs, as well as to emit additional SQL statements on the given connection. This method is called for all instances that are marked as "dirty", *even those which have no net changes to their column-based attributes*, and for which no UPDATE statement has proceeded. An object is marked as dirty when any of its column-based attributes have a "set attribute" operation called or when any of its collections are modified. If, at update time, no column-based attributes have any net changes, no UPDATE statement will be issued. This means that an instance being sent to :meth:`~.MapperEvents.after_update` is *not* a guarantee that an UPDATE statement has been issued. To detect if the column-based attributes on the object have net changes, and therefore resulted in an UPDATE statement, use ``object_session(instance).is_modified(instance, include_collections=False)``. The event is often called for a batch of objects of the same class after their UPDATE statements have been emitted at once in a previous step. In the extremely rare case that this is not desirable, the :func:`.mapper` can be configured with ``batch=False``, which will cause batches of instances to be broken up into individual (and more poorly performing) event->persist->event steps. .. warning:: Mapper-level flush events are designed to operate **on attributes local to the immediate object being handled and via SQL operations with the given** :class:`.Connection` **only.** Handlers here should **not** make alterations to the state of the :class:`.Session` overall, and in general should not affect any :func:`.relationship` -mapped attributes, as session cascade rules will not function properly, nor is it always known if the related class has already been handled. Operations that **are not supported in mapper events** include: * :meth:`.Session.add` * :meth:`.Session.delete` * Mapped collection append, add, remove, delete, discard, etc. * Mapped relationship attribute set/del events, i.e. ``someobject.related = someotherobject`` Operations which manipulate the state of the object relative to other objects are better handled: * In the ``__init__()`` method of the mapped object itself, or another method designed to establish some particular state. * In a ``@validates`` handler, see :ref:`simple_validators` * Within the :meth:`.SessionEvents.before_flush` event. :param mapper: the :class:`.Mapper` which is the target of this event. :param connection: the :class:`.Connection` being used to emit UPDATE statements for this instance. This provides a handle into the current transaction on the target database specific to this instance. :param target: the mapped instance being persisted. If the event is configured with ``raw=True``, this will instead be the :class:`.InstanceState` state-management object associated with the instance. :return: No return value is supported by this event. """ def before_delete(self, mapper, connection, target): """Receive an object instance before a DELETE statement is emitted corresponding to that instance. This event is used to emit additional SQL statements on the given connection as well as to perform application specific bookkeeping related to a deletion event. The event is often called for a batch of objects of the same class before their DELETE statements are emitted at once in a later step. .. warning:: Mapper-level flush events are designed to operate **on attributes local to the immediate object being handled and via SQL operations with the given** :class:`.Connection` **only.** Handlers here should **not** make alterations to the state of the :class:`.Session` overall, and in general should not affect any :func:`.relationship` -mapped attributes, as session cascade rules will not function properly, nor is it always known if the related class has already been handled. Operations that **are not supported in mapper events** include: * :meth:`.Session.add` * :meth:`.Session.delete` * Mapped collection append, add, remove, delete, discard, etc. * Mapped relationship attribute set/del events, i.e. ``someobject.related = someotherobject`` Operations which manipulate the state of the object relative to other objects are better handled: * In the ``__init__()`` method of the mapped object itself, or another method designed to establish some particular state. * In a ``@validates`` handler, see :ref:`simple_validators` * Within the :meth:`.SessionEvents.before_flush` event. :param mapper: the :class:`.Mapper` which is the target of this event. :param connection: the :class:`.Connection` being used to emit DELETE statements for this instance. This provides a handle into the current transaction on the target database specific to this instance. :param target: the mapped instance being deleted. If the event is configured with ``raw=True``, this will instead be the :class:`.InstanceState` state-management object associated with the instance. :return: No return value is supported by this event. """ def after_delete(self, mapper, connection, target): """Receive an object instance after a DELETE statement has been emitted corresponding to that instance. This event is used to emit additional SQL statements on the given connection as well as to perform application specific bookkeeping related to a deletion event. The event is often called for a batch of objects of the same class after their DELETE statements have been emitted at once in a previous step. .. warning:: Mapper-level flush events are designed to operate **on attributes local to the immediate object being handled and via SQL operations with the given** :class:`.Connection` **only.** Handlers here should **not** make alterations to the state of the :class:`.Session` overall, and in general should not affect any :func:`.relationship` -mapped attributes, as session cascade rules will not function properly, nor is it always known if the related class has already been handled. Operations that **are not supported in mapper events** include: * :meth:`.Session.add` * :meth:`.Session.delete` * Mapped collection append, add, remove, delete, discard, etc. * Mapped relationship attribute set/del events, i.e. ``someobject.related = someotherobject`` Operations which manipulate the state of the object relative to other objects are better handled: * In the ``__init__()`` method of the mapped object itself, or another method designed to establish some particular state. * In a ``@validates`` handler, see :ref:`simple_validators` * Within the :meth:`.SessionEvents.before_flush` event. :param mapper: the :class:`.Mapper` which is the target of this event. :param connection: the :class:`.Connection` being used to emit DELETE statements for this instance. This provides a handle into the current transaction on the target database specific to this instance. :param target: the mapped instance being deleted. If the event is configured with ``raw=True``, this will instead be the :class:`.InstanceState` state-management object associated with the instance. :return: No return value is supported by this event. """ @classmethod def _remove(cls, identifier, target, fn): "Removal of mapper events not yet implemented" raise NotImplementedError(msg) class _MapperEventsHold(_EventsHold): all_holds = weakref.WeakKeyDictionary() def resolve(self, class_): return orm.util._mapper_or_none(class_) class HoldMapperEvents(_EventsHold.HoldEvents, MapperEvents): pass dispatch = event.dispatcher(HoldMapperEvents) class SessionEvents(event.Events): """Define events specific to :class:`.Session` lifecycle. e.g.:: from sqlalchemy import event from sqlalchemy.orm import sessionmaker def my_before_commit(session): print "before commit!" Session = sessionmaker() event.listen(Session, "before_commit", my_before_commit) The :func:`~.event.listen` function will accept :class:`.Session` objects as well as the return result of :func:`.sessionmaker` and :func:`.scoped_session`. Additionally, it accepts the :class:`.Session` class which will apply listeners to all :class:`.Session` instances globally. """ @classmethod def _accept_with(cls, target): if isinstance(target, orm.scoped_session): target = target.session_factory if not isinstance(target, orm.sessionmaker) and \ ( not isinstance(target, type) or not issubclass(target, orm.Session) ): raise exc.ArgumentError( "Session event listen on a scoped_session " "requires that its creation callable " "is associated with the Session class.") if isinstance(target, orm.sessionmaker): return target.class_ elif isinstance(target, type): if issubclass(target, orm.scoped_session): return orm.Session elif issubclass(target, orm.Session): return target elif isinstance(target, orm.Session): return target else: return None @classmethod def _remove(cls, identifier, target, fn): msg = "Removal of session events not yet implemented" raise NotImplementedError(msg) def after_transaction_create(self, session, transaction): """Execute when a new :class:`.SessionTransaction` is created. This event differs from :meth:`~.SessionEvents.after_begin` in that it occurs for each :class:`.SessionTransaction` overall, as opposed to when transactions are begun on individual database connections. It is also invoked for nested transactions and subtransactions, and is always matched by a corresponding :meth:`~.SessionEvents.after_transaction_end` event (assuming normal operation of the :class:`.Session`). :param session: the target :class:`.Session`. :param transaction: the target :class:`.SessionTransaction`. .. versionadded:: 0.8 .. seealso:: :meth:`~.SessionEvents.after_transaction_end` """ def after_transaction_end(self, session, transaction): """Execute when the span of a :class:`.SessionTransaction` ends. This event differs from :meth:`~.SessionEvents.after_commit` in that it corresponds to all :class:`.SessionTransaction` objects in use, including those for nested transactions and subtransactions, and is always matched by a corresponding :meth:`~.SessionEvents.after_transaction_create` event. :param session: the target :class:`.Session`. :param transaction: the target :class:`.SessionTransaction`. .. versionadded:: 0.8 .. seealso:: :meth:`~.SessionEvents.after_transaction_create` """ def before_commit(self, session): """Execute before commit is called. .. note:: The :meth:`.before_commit` hook is *not* per-flush, that is, the :class:`.Session` can emit SQL to the database many times within the scope of a transaction. For interception of these events, use the :meth:`~.SessionEvents.before_flush`, :meth:`~.SessionEvents.after_flush`, or :meth:`~.SessionEvents.after_flush_postexec` events. :param session: The target :class:`.Session`. .. seealso:: :meth:`~.SessionEvents.after_commit` :meth:`~.SessionEvents.after_begin` :meth:`~.SessionEvents.after_transaction_create` :meth:`~.SessionEvents.after_transaction_end` """ def after_commit(self, session): """Execute after a commit has occurred. .. note:: The :meth:`~.SessionEvents.after_commit` hook is *not* per-flush, that is, the :class:`.Session` can emit SQL to the database many times within the scope of a transaction. For interception of these events, use the :meth:`~.SessionEvents.before_flush`, :meth:`~.SessionEvents.after_flush`, or :meth:`~.SessionEvents.after_flush_postexec` events. .. note:: The :class:`.Session` is not in an active tranasction when the :meth:`~.SessionEvents.after_commit` event is invoked, and therefore can not emit SQL. To emit SQL corresponding to every transaction, use the :meth:`~.SessionEvents.before_commit` event. :param session: The target :class:`.Session`. .. seealso:: :meth:`~.SessionEvents.before_commit` :meth:`~.SessionEvents.after_begin` :meth:`~.SessionEvents.after_transaction_create` :meth:`~.SessionEvents.after_transaction_end` """ def after_rollback(self, session): """Execute after a real DBAPI rollback has occurred. Note that this event only fires when the *actual* rollback against the database occurs - it does *not* fire each time the :meth:`.Session.rollback` method is called, if the underlying DBAPI transaction has already been rolled back. In many cases, the :class:`.Session` will not be in an "active" state during this event, as the current transaction is not valid. To acquire a :class:`.Session` which is active after the outermost rollback has proceeded, use the :meth:`.SessionEvents.after_soft_rollback` event, checking the :attr:`.Session.is_active` flag. :param session: The target :class:`.Session`. """ def after_soft_rollback(self, session, previous_transaction): """Execute after any rollback has occurred, including "soft" rollbacks that don't actually emit at the DBAPI level. This corresponds to both nested and outer rollbacks, i.e. the innermost rollback that calls the DBAPI's rollback() method, as well as the enclosing rollback calls that only pop themselves from the transaction stack. The given :class:`.Session` can be used to invoke SQL and :meth:`.Session.query` operations after an outermost rollback by first checking the :attr:`.Session.is_active` flag:: @event.listens_for(Session, "after_soft_rollback") def do_something(session, previous_transaction): if session.is_active: session.execute("select * from some_table") :param session: The target :class:`.Session`. :param previous_transaction: The :class:`.SessionTransaction` transactional marker object which was just closed. The current :class:`.SessionTransaction` for the given :class:`.Session` is available via the :attr:`.Session.transaction` attribute. .. versionadded:: 0.7.3 """ def before_flush(self, session, flush_context, instances): """Execute before flush process has started. :param session: The target :class:`.Session`. :param flush_context: Internal :class:`.UOWTransaction` object which handles the details of the flush. :param instances: Usually ``None``, this is the collection of objects which can be passed to the :meth:`.Session.flush` method (note this usage is deprecated). .. seealso:: :meth:`~.SessionEvents.after_flush` :meth:`~.SessionEvents.after_flush_postexec` """ def after_flush(self, session, flush_context): """Execute after flush has completed, but before commit has been called. Note that the session's state is still in pre-flush, i.e. 'new', 'dirty', and 'deleted' lists still show pre-flush state as well as the history settings on instance attributes. :param session: The target :class:`.Session`. :param flush_context: Internal :class:`.UOWTransaction` object which handles the details of the flush. .. seealso:: :meth:`~.SessionEvents.before_flush` :meth:`~.SessionEvents.after_flush_postexec` """ def after_flush_postexec(self, session, flush_context): """Execute after flush has completed, and after the post-exec state occurs. This will be when the 'new', 'dirty', and 'deleted' lists are in their final state. An actual commit() may or may not have occurred, depending on whether or not the flush started its own transaction or participated in a larger transaction. :param session: The target :class:`.Session`. :param flush_context: Internal :class:`.UOWTransaction` object which handles the details of the flush. .. seealso:: :meth:`~.SessionEvents.before_flush` :meth:`~.SessionEvents.after_flush` """ def after_begin(self, session, transaction, connection): """Execute after a transaction is begun on a connection :param session: The target :class:`.Session`. :param transaction: The :class:`.SessionTransaction`. :param connection: The :class:`~.engine.Connection` object which will be used for SQL statements. .. seealso:: :meth:`~.SessionEvents.before_commit` :meth:`~.SessionEvents.after_commit` :meth:`~.SessionEvents.after_transaction_create` :meth:`~.SessionEvents.after_transaction_end` """ def before_attach(self, session, instance): """Execute before an instance is attached to a session. This is called before an add, delete or merge causes the object to be part of the session. .. versionadded:: 0.8. Note that :meth:`.after_attach` now fires off after the item is part of the session. :meth:`.before_attach` is provided for those cases where the item should not yet be part of the session state. .. seealso:: :meth:`~.SessionEvents.after_attach` """ def after_attach(self, session, instance): """Execute after an instance is attached to a session. This is called after an add, delete or merge. .. note:: As of 0.8, this event fires off *after* the item has been fully associated with the session, which is different than previous releases. For event handlers that require the object not yet be part of session state (such as handlers which may autoflush while the target object is not yet complete) consider the new :meth:`.before_attach` event. .. seealso:: :meth:`~.SessionEvents.before_attach` """ def after_bulk_update(self, session, query, query_context, result): """Execute after a bulk update operation to the session. This is called as a result of the :meth:`.Query.update` method. :param query: the :class:`.Query` object that this update operation was called upon. :param query_context: The :class:`.QueryContext` object, corresponding to the invocation of an ORM query. :param result: the :class:`.ResultProxy` returned as a result of the bulk UPDATE operation. """ def after_bulk_delete(self, session, query, query_context, result): """Execute after a bulk delete operation to the session. This is called as a result of the :meth:`.Query.delete` method. :param query: the :class:`.Query` object that this update operation was called upon. :param query_context: The :class:`.QueryContext` object, corresponding to the invocation of an ORM query. :param result: the :class:`.ResultProxy` returned as a result of the bulk DELETE operation. """ class AttributeEvents(event.Events): """Define events for object attributes. These are typically defined on the class-bound descriptor for the target class. e.g.:: from sqlalchemy import event def my_append_listener(target, value, initiator): print "received append event for target: %s" % target event.listen(MyClass.collection, 'append', my_append_listener) Listeners have the option to return a possibly modified version of the value, when the ``retval=True`` flag is passed to :func:`~.event.listen`:: def validate_phone(target, value, oldvalue, initiator): "Strip non-numeric characters from a phone number" return re.sub(r'(?![0-9])', '', value) # setup listener on UserContact.phone attribute, instructing # it to use the return value listen(UserContact.phone, 'set', validate_phone, retval=True) A validation function like the above can also raise an exception such as :class:`.ValueError` to halt the operation. Several modifiers are available to the :func:`~.event.listen` function. :param active_history=False: When True, indicates that the "set" event would like to receive the "old" value being replaced unconditionally, even if this requires firing off database loads. Note that ``active_history`` can also be set directly via :func:`.column_property` and :func:`.relationship`. :param propagate=False: When True, the listener function will be established not just for the class attribute given, but for attributes of the same name on all current subclasses of that class, as well as all future subclasses of that class, using an additional listener that listens for instrumentation events. :param raw=False: When True, the "target" argument to the event will be the :class:`.InstanceState` management object, rather than the mapped instance itself. :param retval=False: when True, the user-defined event listening must return the "value" argument from the function. This gives the listening function the opportunity to change the value that is ultimately used for a "set" or "append" event. """ @classmethod def _accept_with(cls, target): # TODO: coverage if isinstance(target, orm.interfaces.MapperProperty): return getattr(target.parent.class_, target.key) else: return target @classmethod def _listen(cls, target, identifier, fn, active_history=False, raw=False, retval=False, propagate=False): if active_history: target.dispatch._active_history = True # TODO: for removal, need to package the identity # of the wrapper with the original function. if not raw or not retval: orig_fn = fn def wrap(target, value, *arg): if not raw: target = target.obj() if not retval: orig_fn(target, value, *arg) return value else: return orig_fn(target, value, *arg) fn = wrap event.Events._listen(target, identifier, fn, propagate) if propagate: manager = orm.instrumentation.manager_of_class(target.class_) for mgr in manager.subclass_managers(True): event.Events._listen(mgr[target.key], identifier, fn, True) @classmethod def _remove(cls, identifier, target, fn): msg = "Removal of attribute events not yet implemented" raise NotImplementedError(msg) def append(self, target, value, initiator): """Receive a collection append event. :param target: the object instance receiving the event. If the listener is registered with ``raw=True``, this will be the :class:`.InstanceState` object. :param value: the value being appended. If this listener is registered with ``retval=True``, the listener function must return this value, or a new value which replaces it. :param initiator: the attribute implementation object which initiated this event. :return: if the event was registered with ``retval=True``, the given value, or a new effective value, should be returned. """ def remove(self, target, value, initiator): """Receive a collection remove event. :param target: the object instance receiving the event. If the listener is registered with ``raw=True``, this will be the :class:`.InstanceState` object. :param value: the value being removed. :param initiator: the attribute implementation object which initiated this event. :return: No return value is defined for this event. """ def set(self, target, value, oldvalue, initiator): """Receive a scalar set event. :param target: the object instance receiving the event. If the listener is registered with ``raw=True``, this will be the :class:`.InstanceState` object. :param value: the value being set. If this listener is registered with ``retval=True``, the listener function must return this value, or a new value which replaces it. :param oldvalue: the previous value being replaced. This may also be the symbol ``NEVER_SET`` or ``NO_VALUE``. If the listener is registered with ``active_history=True``, the previous value of the attribute will be loaded from the database if the existing value is currently unloaded or expired. :param initiator: the attribute implementation object which initiated this event. :return: if the event was registered with ``retval=True``, the given value, or a new effective value, should be returned. """ SQLAlchemy-0.8.4/lib/sqlalchemy/orm/exc.py0000644000076500000240000001245512251150015021066 0ustar classicstaff00000000000000# orm/exc.py # Copyright (C) 2005-2013 the SQLAlchemy authors and contributors # # This module is part of SQLAlchemy and is released under # the MIT License: http://www.opensource.org/licenses/mit-license.php """SQLAlchemy ORM exceptions.""" from .. import exc as sa_exc, util orm_util = util.importlater('sqlalchemy.orm', 'util') attributes = util.importlater('sqlalchemy.orm', 'attributes') NO_STATE = (AttributeError, KeyError) """Exception types that may be raised by instrumentation implementations.""" class StaleDataError(sa_exc.SQLAlchemyError): """An operation encountered database state that is unaccounted for. Conditions which cause this to happen include: * A flush may have attempted to update or delete rows and an unexpected number of rows were matched during the UPDATE or DELETE statement. Note that when version_id_col is used, rows in UPDATE or DELETE statements are also matched against the current known version identifier. * A mapped object with version_id_col was refreshed, and the version number coming back from the database does not match that of the object itself. * A object is detached from its parent object, however the object was previously attached to a different parent identity which was garbage collected, and a decision cannot be made if the new parent was really the most recent "parent". .. versionadded:: 0.7.4 """ ConcurrentModificationError = StaleDataError class FlushError(sa_exc.SQLAlchemyError): """A invalid condition was detected during flush().""" class UnmappedError(sa_exc.InvalidRequestError): """Base for exceptions that involve expected mappings not present.""" class ObjectDereferencedError(sa_exc.SQLAlchemyError): """An operation cannot complete due to an object being garbage collected. """ class DetachedInstanceError(sa_exc.SQLAlchemyError): """An attempt to access unloaded attributes on a mapped instance that is detached.""" class UnmappedInstanceError(UnmappedError): """An mapping operation was requested for an unknown instance.""" def __init__(self, obj, msg=None): if not msg: try: mapper = orm_util.class_mapper(type(obj)) name = _safe_cls_name(type(obj)) msg = ("Class %r is mapped, but this instance lacks " "instrumentation. This occurs when the instance" "is created before sqlalchemy.orm.mapper(%s) " "was called." % (name, name)) except UnmappedClassError: msg = _default_unmapped(type(obj)) if isinstance(obj, type): msg += ( '; was a class (%s) supplied where an instance was ' 'required?' % _safe_cls_name(obj)) UnmappedError.__init__(self, msg) def __reduce__(self): return self.__class__, (None, self.args[0]) class UnmappedClassError(UnmappedError): """An mapping operation was requested for an unknown class.""" def __init__(self, cls, msg=None): if not msg: msg = _default_unmapped(cls) UnmappedError.__init__(self, msg) def __reduce__(self): return self.__class__, (None, self.args[0]) class ObjectDeletedError(sa_exc.InvalidRequestError): """A refresh operation failed to retrieve the database row corresponding to an object's known primary key identity. A refresh operation proceeds when an expired attribute is accessed on an object, or when :meth:`.Query.get` is used to retrieve an object which is, upon retrieval, detected as expired. A SELECT is emitted for the target row based on primary key; if no row is returned, this exception is raised. The true meaning of this exception is simply that no row exists for the primary key identifier associated with a persistent object. The row may have been deleted, or in some cases the primary key updated to a new value, outside of the ORM's management of the target object. """ def __init__(self, state, msg=None): if not msg: msg = "Instance '%s' has been deleted, or its "\ "row is otherwise not present." % orm_util.state_str(state) sa_exc.InvalidRequestError.__init__(self, msg) def __reduce__(self): return self.__class__, (None, self.args[0]) class UnmappedColumnError(sa_exc.InvalidRequestError): """Mapping operation was requested on an unknown column.""" class NoResultFound(sa_exc.InvalidRequestError): """A database result was required but none was found.""" class MultipleResultsFound(sa_exc.InvalidRequestError): """A single database result was required but more than one were found.""" def _safe_cls_name(cls): try: cls_name = '.'.join((cls.__module__, cls.__name__)) except AttributeError: cls_name = getattr(cls, '__name__', None) if cls_name is None: cls_name = repr(cls) return cls_name def _default_unmapped(cls): try: mappers = attributes.manager_of_class(cls).mappers except NO_STATE: mappers = {} except TypeError: mappers = {} name = _safe_cls_name(cls) if not mappers: return "Class '%s' is not mapped" % name SQLAlchemy-0.8.4/lib/sqlalchemy/orm/identity.py0000644000076500000240000001552012251150015022134 0ustar classicstaff00000000000000# orm/identity.py # Copyright (C) 2005-2013 the SQLAlchemy authors and contributors # # This module is part of SQLAlchemy and is released under # the MIT License: http://www.opensource.org/licenses/mit-license.php import weakref from . import attributes class IdentityMap(dict): def __init__(self): self._modified = set() self._wr = weakref.ref(self) def replace(self, state): raise NotImplementedError() def add(self, state): raise NotImplementedError() def update(self, dict): raise NotImplementedError("IdentityMap uses add() to insert data") def clear(self): raise NotImplementedError("IdentityMap uses remove() to remove data") def _manage_incoming_state(self, state): state._instance_dict = self._wr if state.modified: self._modified.add(state) def _manage_removed_state(self, state): del state._instance_dict self._modified.discard(state) def _dirty_states(self): return self._modified def check_modified(self): """return True if any InstanceStates present have been marked as 'modified'. """ return bool(self._modified) def has_key(self, key): return key in self def popitem(self): raise NotImplementedError("IdentityMap uses remove() to remove data") def pop(self, key, *args): raise NotImplementedError("IdentityMap uses remove() to remove data") def setdefault(self, key, default=None): raise NotImplementedError("IdentityMap uses add() to insert data") def copy(self): raise NotImplementedError() def __setitem__(self, key, value): raise NotImplementedError("IdentityMap uses add() to insert data") def __delitem__(self, key): raise NotImplementedError("IdentityMap uses remove() to remove data") class WeakInstanceDict(IdentityMap): def __init__(self): IdentityMap.__init__(self) def __getitem__(self, key): state = dict.__getitem__(self, key) o = state.obj() if o is None: raise KeyError, key return o def __contains__(self, key): try: if dict.__contains__(self, key): state = dict.__getitem__(self, key) o = state.obj() else: return False except KeyError: return False else: return o is not None def contains_state(self, state): return dict.get(self, state.key) is state def replace(self, state): if dict.__contains__(self, state.key): existing = dict.__getitem__(self, state.key) if existing is not state: self._manage_removed_state(existing) else: return dict.__setitem__(self, state.key, state) self._manage_incoming_state(state) def add(self, state): key = state.key # inline of self.__contains__ if dict.__contains__(self, key): try: existing_state = dict.__getitem__(self, key) if existing_state is not state: o = existing_state.obj() if o is not None: raise AssertionError( "A conflicting state is already " "present in the identity map for key %r" % (key, )) else: return except KeyError: pass dict.__setitem__(self, key, state) self._manage_incoming_state(state) def get(self, key, default=None): state = dict.get(self, key, default) if state is default: return default o = state.obj() if o is None: return default return o def _items(self): values = self.all_states() result = [] for state in values: value = state.obj() if value is not None: result.append((state.key, value)) return result def _values(self): values = self.all_states() result = [] for state in values: value = state.obj() if value is not None: result.append(value) return result # Py3K #def items(self): # return iter(self._items()) # #def values(self): # return iter(self._values()) # Py2K items = _items def iteritems(self): return iter(self.items()) values = _values def itervalues(self): return iter(self.values()) # end Py2K def all_states(self): # Py3K # return list(dict.values(self)) # Py2K return dict.values(self) # end Py2K def discard(self, state): st = dict.get(self, state.key, None) if st is state: dict.pop(self, state.key, None) self._manage_removed_state(state) def prune(self): return 0 class StrongInstanceDict(IdentityMap): def all_states(self): return [attributes.instance_state(o) for o in self.itervalues()] def contains_state(self, state): return ( state.key in self and attributes.instance_state(self[state.key]) is state) def replace(self, state): if dict.__contains__(self, state.key): existing = dict.__getitem__(self, state.key) existing = attributes.instance_state(existing) if existing is not state: self._manage_removed_state(existing) else: return dict.__setitem__(self, state.key, state.obj()) self._manage_incoming_state(state) def add(self, state): if state.key in self: if attributes.instance_state(dict.__getitem__(self, state.key)) is not state: raise AssertionError('A conflicting state is already ' 'present in the identity map for key %r' % (state.key, )) else: dict.__setitem__(self, state.key, state.obj()) self._manage_incoming_state(state) def discard(self, state): obj = dict.get(self, state.key, None) if obj is not None: st = attributes.instance_state(obj) if st is state: dict.pop(self, state.key, None) self._manage_removed_state(state) def prune(self): """prune unreferenced, non-dirty states.""" ref_count = len(self) dirty = [s.obj() for s in self.all_states() if s.modified] # work around http://bugs.python.org/issue6149 keepers = weakref.WeakValueDictionary() keepers.update(self) dict.clear(self) dict.update(self, keepers) self.modified = bool(dirty) return ref_count - len(self) SQLAlchemy-0.8.4/lib/sqlalchemy/orm/instrumentation.py0000644000076500000240000003651212251150015023552 0ustar classicstaff00000000000000# orm/instrumentation.py # Copyright (C) 2005-2013 the SQLAlchemy authors and contributors # # This module is part of SQLAlchemy and is released under # the MIT License: http://www.opensource.org/licenses/mit-license.php """Defines SQLAlchemy's system of class instrumentation. This module is usually not directly visible to user applications, but defines a large part of the ORM's interactivity. instrumentation.py deals with registration of end-user classes for state tracking. It interacts closely with state.py and attributes.py which establish per-instance and per-class-attribute instrumentation, respectively. The class instrumentation system can be customized on a per-class or global basis using the :mod:`sqlalchemy.ext.instrumentation` module, which provides the means to build and specify alternate instrumentation forms. .. versionchanged: 0.8 The instrumentation extension system was moved out of the ORM and into the external :mod:`sqlalchemy.ext.instrumentation` package. When that package is imported, it installs itself within sqlalchemy.orm so that its more comprehensive resolution mechanics take effect. """ from . import exc, collections, events, interfaces from operator import attrgetter from .. import event, util state = util.importlater("sqlalchemy.orm", "state") class ClassManager(dict): """tracks state information at the class level.""" MANAGER_ATTR = '_sa_class_manager' STATE_ATTR = '_sa_instance_state' deferred_scalar_loader = None original_init = object.__init__ factory = None def __init__(self, class_): self.class_ = class_ self.info = {} self.new_init = None self.local_attrs = {} self.originals = {} self._bases = [mgr for mgr in [ manager_of_class(base) for base in self.class_.__bases__ if isinstance(base, type) ] if mgr is not None] for base in self._bases: self.update(base) events._InstanceEventsHold.populate(class_, self) for basecls in class_.__mro__: mgr = manager_of_class(basecls) if mgr is not None: self.dispatch._update(mgr.dispatch) self.manage() self._instrument_init() if '__del__' in class_.__dict__: util.warn("__del__() method on class %s will " "cause unreachable cycles and memory leaks, " "as SQLAlchemy instrumentation often creates " "reference cycles. Please remove this method." % class_) dispatch = event.dispatcher(events.InstanceEvents) @property def is_mapped(self): return 'mapper' in self.__dict__ @util.memoized_property def mapper(self): # raises unless self.mapper has been assigned raise exc.UnmappedClassError(self.class_) def _all_sqla_attributes(self, exclude=None): """return an iterator of all classbound attributes that are implement :class:`._InspectionAttr`. This includes :class:`.QueryableAttribute` as well as extension types such as :class:`.hybrid_property` and :class:`.AssociationProxy`. """ if exclude is None: exclude = set() for supercls in self.class_.__mro__: for key in set(supercls.__dict__).difference(exclude): exclude.add(key) val = supercls.__dict__[key] if isinstance(val, interfaces._InspectionAttr): yield key, val def _attr_has_impl(self, key): """Return True if the given attribute is fully initialized. i.e. has an impl. """ return key in self and self[key].impl is not None def _subclass_manager(self, cls): """Create a new ClassManager for a subclass of this ClassManager's class. This is called automatically when attributes are instrumented so that the attributes can be propagated to subclasses against their own class-local manager, without the need for mappers etc. to have already pre-configured managers for the full class hierarchy. Mappers can post-configure the auto-generated ClassManager when needed. """ manager = manager_of_class(cls) if manager is None: manager = _instrumentation_factory.create_manager_for_cls(cls) return manager def _instrument_init(self): # TODO: self.class_.__init__ is often the already-instrumented # __init__ from an instrumented superclass. We still need to make # our own wrapper, but it would # be nice to wrap the original __init__ and not our existing wrapper # of such, since this adds method overhead. self.original_init = self.class_.__init__ self.new_init = _generate_init(self.class_, self) self.install_member('__init__', self.new_init) def _uninstrument_init(self): if self.new_init: self.uninstall_member('__init__') self.new_init = None @util.memoized_property def _state_constructor(self): self.dispatch.first_init(self, self.class_) return state.InstanceState def manage(self): """Mark this instance as the manager for its class.""" setattr(self.class_, self.MANAGER_ATTR, self) def dispose(self): """Dissasociate this manager from its class.""" delattr(self.class_, self.MANAGER_ATTR) @util.hybridmethod def manager_getter(self): def manager_of_class(cls): return cls.__dict__.get(ClassManager.MANAGER_ATTR, None) return manager_of_class @util.hybridmethod def state_getter(self): """Return a (instance) -> InstanceState callable. "state getter" callables should raise either KeyError or AttributeError if no InstanceState could be found for the instance. """ return attrgetter(self.STATE_ATTR) @util.hybridmethod def dict_getter(self): return attrgetter('__dict__') def instrument_attribute(self, key, inst, propagated=False): if propagated: if key in self.local_attrs: return # don't override local attr with inherited attr else: self.local_attrs[key] = inst self.install_descriptor(key, inst) self[key] = inst for cls in self.class_.__subclasses__(): manager = self._subclass_manager(cls) manager.instrument_attribute(key, inst, True) def subclass_managers(self, recursive): for cls in self.class_.__subclasses__(): mgr = manager_of_class(cls) if mgr is not None and mgr is not self: yield mgr if recursive: for m in mgr.subclass_managers(True): yield m def post_configure_attribute(self, key): _instrumentation_factory.dispatch.\ attribute_instrument(self.class_, key, self[key]) def uninstrument_attribute(self, key, propagated=False): if key not in self: return if propagated: if key in self.local_attrs: return # don't get rid of local attr else: del self.local_attrs[key] self.uninstall_descriptor(key) del self[key] for cls in self.class_.__subclasses__(): manager = manager_of_class(cls) if manager: manager.uninstrument_attribute(key, True) def unregister(self): """remove all instrumentation established by this ClassManager.""" self._uninstrument_init() self.mapper = self.dispatch = None self.info.clear() for key in list(self): if key in self.local_attrs: self.uninstrument_attribute(key) def install_descriptor(self, key, inst): if key in (self.STATE_ATTR, self.MANAGER_ATTR): raise KeyError("%r: requested attribute name conflicts with " "instrumentation attribute of the same name." % key) setattr(self.class_, key, inst) def uninstall_descriptor(self, key): delattr(self.class_, key) def install_member(self, key, implementation): if key in (self.STATE_ATTR, self.MANAGER_ATTR): raise KeyError("%r: requested attribute name conflicts with " "instrumentation attribute of the same name." % key) self.originals.setdefault(key, getattr(self.class_, key, None)) setattr(self.class_, key, implementation) def uninstall_member(self, key): original = self.originals.pop(key, None) if original is not None: setattr(self.class_, key, original) def instrument_collection_class(self, key, collection_class): return collections.prepare_instrumentation(collection_class) def initialize_collection(self, key, state, factory): user_data = factory() adapter = collections.CollectionAdapter( self.get_impl(key), state, user_data) return adapter, user_data def is_instrumented(self, key, search=False): if search: return key in self else: return key in self.local_attrs def get_impl(self, key): return self[key].impl @property def attributes(self): return self.itervalues() ## InstanceState management def new_instance(self, state=None): instance = self.class_.__new__(self.class_) setattr(instance, self.STATE_ATTR, state or self._state_constructor(instance, self)) return instance def setup_instance(self, instance, state=None): setattr(instance, self.STATE_ATTR, state or self._state_constructor(instance, self)) def teardown_instance(self, instance): delattr(instance, self.STATE_ATTR) def _new_state_if_none(self, instance): """Install a default InstanceState if none is present. A private convenience method used by the __init__ decorator. """ if hasattr(instance, self.STATE_ATTR): return False elif self.class_ is not instance.__class__ and \ self.is_mapped: # this will create a new ClassManager for the # subclass, without a mapper. This is likely a # user error situation but allow the object # to be constructed, so that it is usable # in a non-ORM context at least. return self._subclass_manager(instance.__class__).\ _new_state_if_none(instance) else: state = self._state_constructor(instance, self) setattr(instance, self.STATE_ATTR, state) return state def has_state(self, instance): return hasattr(instance, self.STATE_ATTR) def has_parent(self, state, key, optimistic=False): """TODO""" return self.get_impl(key).hasparent(state, optimistic=optimistic) def __nonzero__(self): """All ClassManagers are non-zero regardless of attribute state.""" return True def __repr__(self): return '<%s of %r at %x>' % ( self.__class__.__name__, self.class_, id(self)) class InstrumentationFactory(object): """Factory for new ClassManager instances.""" dispatch = event.dispatcher(events.InstrumentationEvents) def create_manager_for_cls(self, class_): assert class_ is not None assert manager_of_class(class_) is None # give a more complicated subclass # a chance to do what it wants here manager, factory = self._locate_extended_factory(class_) if factory is None: factory = ClassManager manager = factory(class_) self._check_conflicts(class_, factory) manager.factory = factory self.dispatch.class_instrument(class_) return manager def _locate_extended_factory(self, class_): """Overridden by a subclass to do an extended lookup.""" return None, None def _check_conflicts(self, class_, factory): """Overridden by a subclass to test for conflicting factories.""" return def unregister(self, class_): manager = manager_of_class(class_) manager.unregister() manager.dispose() self.dispatch.class_uninstrument(class_) if ClassManager.MANAGER_ATTR in class_.__dict__: delattr(class_, ClassManager.MANAGER_ATTR) # this attribute is replaced by sqlalchemy.ext.instrumentation # when importred. _instrumentation_factory = InstrumentationFactory() def register_class(class_): """Register class instrumentation. Returns the existing or newly created class manager. """ manager = manager_of_class(class_) if manager is None: manager = _instrumentation_factory.create_manager_for_cls(class_) return manager def unregister_class(class_): """Unregister class instrumentation.""" _instrumentation_factory.unregister(class_) def is_instrumented(instance, key): """Return True if the given attribute on the given instance is instrumented by the attributes package. This function may be used regardless of instrumentation applied directly to the class, i.e. no descriptors are required. """ return manager_of_class(instance.__class__).\ is_instrumented(key, search=True) # these attributes are replaced by sqlalchemy.ext.instrumentation # when a non-standard InstrumentationManager class is first # used to instrument a class. instance_state = _default_state_getter = ClassManager.state_getter() instance_dict = _default_dict_getter = ClassManager.dict_getter() manager_of_class = _default_manager_getter = ClassManager.manager_getter() def _generate_init(class_, class_manager): """Build an __init__ decorator that triggers ClassManager events.""" # TODO: we should use the ClassManager's notion of the # original '__init__' method, once ClassManager is fixed # to always reference that. original__init__ = class_.__init__ assert original__init__ # Go through some effort here and don't change the user's __init__ # calling signature, including the unlikely case that it has # a return value. # FIXME: need to juggle local names to avoid constructor argument # clashes. func_body = """\ def __init__(%(apply_pos)s): new_state = class_manager._new_state_if_none(%(self_arg)s) if new_state: return new_state._initialize_instance(%(apply_kw)s) else: return original__init__(%(apply_kw)s) """ func_vars = util.format_argspec_init(original__init__, grouped=False) func_text = func_body % func_vars # Py3K #func_defaults = getattr(original__init__, '__defaults__', None) #func_kw_defaults = getattr(original__init__, '__kwdefaults__', None) # Py2K func = getattr(original__init__, 'im_func', original__init__) func_defaults = getattr(func, 'func_defaults', None) # end Py2K env = locals().copy() exec func_text in env __init__ = env['__init__'] __init__.__doc__ = original__init__.__doc__ if func_defaults: __init__.func_defaults = func_defaults # Py3K #if func_kw_defaults: # __init__.__kwdefaults__ = func_kw_defaults return __init__ SQLAlchemy-0.8.4/lib/sqlalchemy/orm/interfaces.py0000644000076500000240000006761712251150015022444 0ustar classicstaff00000000000000# orm/interfaces.py # Copyright (C) 2005-2013 the SQLAlchemy authors and contributors # # This module is part of SQLAlchemy and is released under # the MIT License: http://www.opensource.org/licenses/mit-license.php """ Contains various base classes used throughout the ORM. Defines the now deprecated ORM extension classes as well as ORM internals. Other than the deprecated extensions, this module and the classes within should be considered mostly private. """ from __future__ import absolute_import from .. import exc as sa_exc, util, inspect from ..sql import operators from collections import deque orm_util = util.importlater('sqlalchemy.orm', 'util') collections = util.importlater('sqlalchemy.orm', 'collections') __all__ = ( 'AttributeExtension', 'EXT_CONTINUE', 'EXT_STOP', 'ExtensionOption', 'InstrumentationManager', 'LoaderStrategy', 'MapperExtension', 'MapperOption', 'MapperProperty', 'PropComparator', 'PropertyOption', 'SessionExtension', 'StrategizedOption', 'StrategizedProperty', ) EXT_CONTINUE = util.symbol('EXT_CONTINUE') EXT_STOP = util.symbol('EXT_STOP') ONETOMANY = util.symbol('ONETOMANY') MANYTOONE = util.symbol('MANYTOONE') MANYTOMANY = util.symbol('MANYTOMANY') from .deprecated_interfaces import AttributeExtension, \ SessionExtension, \ MapperExtension NOT_EXTENSION = util.symbol('NOT_EXTENSION') """Symbol indicating an :class:`_InspectionAttr` that's not part of sqlalchemy.ext. Is assigned to the :attr:`._InspectionAttr.extension_type` attibute. """ class _InspectionAttr(object): """A base class applied to all ORM objects that can be returned by the :func:`.inspect` function. The attributes defined here allow the usage of simple boolean checks to test basic facts about the object returned. While the boolean checks here are basically the same as using the Python isinstance() function, the flags here can be used without the need to import all of these classes, and also such that the SQLAlchemy class system can change while leaving the flags here intact for forwards-compatibility. """ is_selectable = False """Return True if this object is an instance of :class:`.Selectable`.""" is_aliased_class = False """True if this object is an instance of :class:`.AliasedClass`.""" is_instance = False """True if this object is an instance of :class:`.InstanceState`.""" is_mapper = False """True if this object is an instance of :class:`.Mapper`.""" is_property = False """True if this object is an instance of :class:`.MapperProperty`.""" is_attribute = False """True if this object is a Python :term:`descriptor`. This can refer to one of many types. Usually a :class:`.QueryableAttribute` which handles attributes events on behalf of a :class:`.MapperProperty`. But can also be an extension type such as :class:`.AssociationProxy` or :class:`.hybrid_property`. The :attr:`._InspectionAttr.extension_type` will refer to a constant identifying the specific subtype. .. seealso:: :attr:`.Mapper.all_orm_descriptors` """ is_clause_element = False """True if this object is an instance of :class:`.ClauseElement`.""" extension_type = NOT_EXTENSION """The extension type, if any. Defaults to :data:`.interfaces.NOT_EXTENSION` .. versionadded:: 0.8.0 .. seealso:: :data:`.HYBRID_METHOD` :data:`.HYBRID_PROPERTY` :data:`.ASSOCIATION_PROXY` """ class _MappedAttribute(object): """Mixin for attributes which should be replaced by mapper-assigned attributes. """ class MapperProperty(_MappedAttribute, _InspectionAttr): """Manage the relationship of a ``Mapper`` to a single class attribute, as well as that attribute as it appears on individual instances of the class, including attribute instrumentation, attribute access, loading behavior, and dependency calculations. The most common occurrences of :class:`.MapperProperty` are the mapped :class:`.Column`, which is represented in a mapping as an instance of :class:`.ColumnProperty`, and a reference to another class produced by :func:`.relationship`, represented in the mapping as an instance of :class:`.RelationshipProperty`. """ cascade = frozenset() """The set of 'cascade' attribute names. This collection is checked before the 'cascade_iterator' method is called. """ is_property = True def setup(self, context, entity, path, adapter, **kwargs): """Called by Query for the purposes of constructing a SQL statement. Each MapperProperty associated with the target mapper processes the statement referenced by the query context, adding columns and/or criterion as appropriate. """ pass def create_row_processor(self, context, path, mapper, row, adapter): """Return a 3-tuple consisting of three row processing functions. """ return None, None, None def cascade_iterator(self, type_, state, visited_instances=None, halt_on=None): """Iterate through instances related to the given instance for a particular 'cascade', starting with this MapperProperty. Return an iterator3-tuples (instance, mapper, state). Note that the 'cascade' collection on this MapperProperty is checked first for the given type before cascade_iterator is called. See PropertyLoader for the related instance implementation. """ return iter(()) def set_parent(self, parent, init): self.parent = parent def instrument_class(self, mapper): # pragma: no-coverage raise NotImplementedError() @util.memoized_property def info(self): """Info dictionary associated with the object, allowing user-defined data to be associated with this :class:`.MapperProperty`. The dictionary is generated when first accessed. Alternatively, it can be specified as a constructor argument to the :func:`.column_property`, :func:`.relationship`, or :func:`.composite` functions. .. versionadded:: 0.8 Added support for .info to all :class:`.MapperProperty` subclasses. .. seealso:: :attr:`.QueryableAttribute.info` :attr:`.SchemaItem.info` """ return {} _configure_started = False _configure_finished = False def init(self): """Called after all mappers are created to assemble relationships between mappers and perform other post-mapper-creation initialization steps. """ self._configure_started = True self.do_init() self._configure_finished = True @property def class_attribute(self): """Return the class-bound descriptor corresponding to this :class:`.MapperProperty`. This is basically a ``getattr()`` call:: return getattr(self.parent.class_, self.key) I.e. if this :class:`.MapperProperty` were named ``addresses``, and the class to which it is mapped is ``User``, this sequence is possible:: >>> from sqlalchemy import inspect >>> mapper = inspect(User) >>> addresses_property = mapper.attrs.addresses >>> addresses_property.class_attribute is User.addresses True >>> User.addresses.property is addresses_property True """ return getattr(self.parent.class_, self.key) def do_init(self): """Perform subclass-specific initialization post-mapper-creation steps. This is a template method called by the ``MapperProperty`` object's init() method. """ pass def post_instrument_class(self, mapper): """Perform instrumentation adjustments that need to occur after init() has completed. """ pass def is_primary(self): """Return True if this ``MapperProperty``'s mapper is the primary mapper for its class. This flag is used to indicate that the ``MapperProperty`` can define attribute instrumentation for the class at the class level (as opposed to the individual instance level). """ return not self.parent.non_primary def merge(self, session, source_state, source_dict, dest_state, dest_dict, load, _recursive): """Merge the attribute represented by this ``MapperProperty`` from source to destination object""" pass def compare(self, operator, value, **kw): """Return a compare operation for the columns represented by this ``MapperProperty`` to the given value, which may be a column value or an instance. 'operator' is an operator from the operators module, or from sql.Comparator. By default uses the PropComparator attached to this MapperProperty under the attribute name "comparator". """ return operator(self.comparator, value) def __repr__(self): return '<%s at 0x%x; %s>' % ( self.__class__.__name__, id(self), getattr(self, 'key', 'no key')) class PropComparator(operators.ColumnOperators): """Defines boolean, comparison, and other operators for :class:`.MapperProperty` objects. SQLAlchemy allows for operators to be redefined at both the Core and ORM level. :class:`.PropComparator` is the base class of operator redefinition for ORM-level operations, including those of :class:`.ColumnProperty`, :class:`.RelationshipProperty`, and :class:`.CompositeProperty`. .. note:: With the advent of Hybrid properties introduced in SQLAlchemy 0.7, as well as Core-level operator redefinition in SQLAlchemy 0.8, the use case for user-defined :class:`.PropComparator` instances is extremely rare. See :ref:`hybrids_toplevel` as well as :ref:`types_operators`. User-defined subclasses of :class:`.PropComparator` may be created. The built-in Python comparison and math operator methods, such as :meth:`.operators.ColumnOperators.__eq__`, :meth:`.operators.ColumnOperators.__lt__`, and :meth:`.operators.ColumnOperators.__add__`, can be overridden to provide new operator behavior. The custom :class:`.PropComparator` is passed to the :class:`.MapperProperty` instance via the ``comparator_factory`` argument. In each case, the appropriate subclass of :class:`.PropComparator` should be used:: # definition of custom PropComparator subclasses from sqlalchemy.orm.properties import \\ ColumnProperty,\\ CompositeProperty,\\ RelationshipProperty class MyColumnComparator(ColumnProperty.Comparator): def __eq__(self, other): return self.__clause_element__() == other class MyRelationshipComparator(RelationshipProperty.Comparator): def any(self, expression): "define the 'any' operation" # ... class MyCompositeComparator(CompositeProperty.Comparator): def __gt__(self, other): "redefine the 'greater than' operation" return sql.and_(*[a>b for a, b in zip(self.__clause_element__().clauses, other.__composite_values__())]) # application of custom PropComparator subclasses from sqlalchemy.orm import column_property, relationship, composite from sqlalchemy import Column, String class SomeMappedClass(Base): some_column = column_property(Column("some_column", String), comparator_factory=MyColumnComparator) some_relationship = relationship(SomeOtherClass, comparator_factory=MyRelationshipComparator) some_composite = composite( Column("a", String), Column("b", String), comparator_factory=MyCompositeComparator ) Note that for column-level operator redefinition, it's usually simpler to define the operators at the Core level, using the :attr:`.TypeEngine.comparator_factory` attribute. See :ref:`types_operators` for more detail. See also: :class:`.ColumnProperty.Comparator` :class:`.RelationshipProperty.Comparator` :class:`.CompositeProperty.Comparator` :class:`.ColumnOperators` :ref:`types_operators` :attr:`.TypeEngine.comparator_factory` """ def __init__(self, prop, parentmapper, adapter=None): self.prop = self.property = prop self._parentmapper = parentmapper self.adapter = adapter def __clause_element__(self): raise NotImplementedError("%r" % self) def adapted(self, adapter): """Return a copy of this PropComparator which will use the given adaption function on the local side of generated expressions. """ return self.__class__(self.prop, self._parentmapper, adapter) @util.memoized_property def info(self): return self.property.info @staticmethod def any_op(a, b, **kwargs): return a.any(b, **kwargs) @staticmethod def has_op(a, b, **kwargs): return a.has(b, **kwargs) @staticmethod def of_type_op(a, class_): return a.of_type(class_) def of_type(self, class_): """Redefine this object in terms of a polymorphic subclass. Returns a new PropComparator from which further criterion can be evaluated. e.g.:: query.join(Company.employees.of_type(Engineer)).\\ filter(Engineer.name=='foo') :param \class_: a class or mapper indicating that criterion will be against this specific subclass. """ return self.operate(PropComparator.of_type_op, class_) def any(self, criterion=None, **kwargs): """Return true if this collection contains any member that meets the given criterion. The usual implementation of ``any()`` is :meth:`.RelationshipProperty.Comparator.any`. :param criterion: an optional ClauseElement formulated against the member class' table or attributes. :param \**kwargs: key/value pairs corresponding to member class attribute names which will be compared via equality to the corresponding values. """ return self.operate(PropComparator.any_op, criterion, **kwargs) def has(self, criterion=None, **kwargs): """Return true if this element references a member which meets the given criterion. The usual implementation of ``has()`` is :meth:`.RelationshipProperty.Comparator.has`. :param criterion: an optional ClauseElement formulated against the member class' table or attributes. :param \**kwargs: key/value pairs corresponding to member class attribute names which will be compared via equality to the corresponding values. """ return self.operate(PropComparator.has_op, criterion, **kwargs) class StrategizedProperty(MapperProperty): """A MapperProperty which uses selectable strategies to affect loading behavior. There is a single strategy selected by default. Alternate strategies can be selected at Query time through the usage of ``StrategizedOption`` objects via the Query.options() method. """ strategy_wildcard_key = None @util.memoized_property def _wildcard_path(self): if self.strategy_wildcard_key: return ('loaderstrategy', (self.strategy_wildcard_key,)) else: return None def _get_context_strategy(self, context, path): strategy_cls = path._inlined_get_for(self, context, 'loaderstrategy') if not strategy_cls: wc_key = self._wildcard_path if wc_key and wc_key in context.attributes: strategy_cls = context.attributes[wc_key] if strategy_cls: try: return self._strategies[strategy_cls] except KeyError: return self.__init_strategy(strategy_cls) return self.strategy def _get_strategy(self, cls): try: return self._strategies[cls] except KeyError: return self.__init_strategy(cls) def __init_strategy(self, cls): self._strategies[cls] = strategy = cls(self) return strategy def setup(self, context, entity, path, adapter, **kwargs): self._get_context_strategy(context, path).\ setup_query(context, entity, path, adapter, **kwargs) def create_row_processor(self, context, path, mapper, row, adapter): return self._get_context_strategy(context, path).\ create_row_processor(context, path, mapper, row, adapter) def do_init(self): self._strategies = {} self.strategy = self.__init_strategy(self.strategy_class) def post_instrument_class(self, mapper): if self.is_primary() and \ not mapper.class_manager._attr_has_impl(self.key): self.strategy.init_class_attribute(mapper) class MapperOption(object): """Describe a modification to a Query.""" propagate_to_loaders = False """if True, indicate this option should be carried along Query object generated by scalar or object lazy loaders. """ def process_query(self, query): pass def process_query_conditionally(self, query): """same as process_query(), except that this option may not apply to the given query. Used when secondary loaders resend existing options to a new Query.""" self.process_query(query) class PropertyOption(MapperOption): """A MapperOption that is applied to a property off the mapper or one of its child mappers, identified by a dot-separated key or list of class-bound attributes. """ def __init__(self, key, mapper=None): self.key = key self.mapper = mapper def process_query(self, query): self._process(query, True) def process_query_conditionally(self, query): self._process(query, False) def _process(self, query, raiseerr): paths = self._process_paths(query, raiseerr) if paths: self.process_query_property(query, paths) def process_query_property(self, query, paths): pass def __getstate__(self): d = self.__dict__.copy() d['key'] = ret = [] for token in util.to_list(self.key): if isinstance(token, PropComparator): ret.append((token._parentmapper.class_, token.key)) else: ret.append(token) return d def __setstate__(self, state): ret = [] for key in state['key']: if isinstance(key, tuple): cls, propkey = key ret.append(getattr(cls, propkey)) else: ret.append(key) state['key'] = tuple(ret) self.__dict__ = state def _find_entity_prop_comparator(self, query, token, mapper, raiseerr): if orm_util._is_aliased_class(mapper): searchfor = mapper else: searchfor = orm_util._class_to_mapper(mapper) for ent in query._mapper_entities: if ent.corresponds_to(searchfor): return ent else: if raiseerr: if not list(query._mapper_entities): raise sa_exc.ArgumentError( "Query has only expression-based entities - " "can't find property named '%s'." % (token, ) ) else: raise sa_exc.ArgumentError( "Can't find property '%s' on any entity " "specified in this Query. Note the full path " "from root (%s) to target entity must be specified." % (token, ",".join(str(x) for x in query._mapper_entities)) ) else: return None def _find_entity_basestring(self, query, token, raiseerr): for ent in query._mapper_entities: # return only the first _MapperEntity when searching # based on string prop name. Ideally object # attributes are used to specify more exactly. return ent else: if raiseerr: raise sa_exc.ArgumentError( "Query has only expression-based entities - " "can't find property named '%s'." % (token, ) ) else: return None def _process_paths(self, query, raiseerr): """reconcile the 'key' for this PropertyOption with the current path and entities of the query. Return a list of affected paths. """ path = orm_util.PathRegistry.root entity = None paths = [] no_result = [] # _current_path implies we're in a # secondary load with an existing path current_path = list(query._current_path.path) tokens = deque(self.key) while tokens: token = tokens.popleft() if isinstance(token, basestring): # wildcard token if token.endswith(':*'): return [path.token(token)] sub_tokens = token.split(".", 1) token = sub_tokens[0] tokens.extendleft(sub_tokens[1:]) # exhaust current_path before # matching tokens to entities if current_path: if current_path[1].key == token: current_path = current_path[2:] continue else: return no_result if not entity: entity = self._find_entity_basestring( query, token, raiseerr) if entity is None: return no_result path_element = entity.entity_zero mapper = entity.mapper if hasattr(mapper.class_, token): prop = getattr(mapper.class_, token).property else: if raiseerr: raise sa_exc.ArgumentError( "Can't find property named '%s' on the " "mapped entity %s in this Query. " % ( token, mapper) ) else: return no_result elif isinstance(token, PropComparator): prop = token.property # exhaust current_path before # matching tokens to entities if current_path: if current_path[0:2] == \ [token._parententity, prop]: current_path = current_path[2:] continue else: return no_result if not entity: entity = self._find_entity_prop_comparator( query, prop.key, token._parententity, raiseerr) if not entity: return no_result path_element = entity.entity_zero mapper = entity.mapper else: raise sa_exc.ArgumentError( "mapper option expects " "string key or list of attributes") assert prop is not None if raiseerr and not prop.parent.common_parent(mapper): raise sa_exc.ArgumentError("Attribute '%s' does not " "link from element '%s'" % (token, path_element)) path = path[path_element][prop] paths.append(path) if getattr(token, '_of_type', None): ac = token._of_type ext_info = inspect(ac) path_element = mapper = ext_info.mapper if not ext_info.is_aliased_class: ac = orm_util.with_polymorphic( ext_info.mapper.base_mapper, ext_info.mapper, aliased=True, _use_mapper_path=True) ext_info = inspect(ac) path.set(query, "path_with_polymorphic", ext_info) else: path_element = mapper = getattr(prop, 'mapper', None) if mapper is None and tokens: raise sa_exc.ArgumentError( "Attribute '%s' of entity '%s' does not " "refer to a mapped entity" % (token, entity) ) if current_path: # ran out of tokens before # current_path was exhausted. assert not tokens return no_result return paths class StrategizedOption(PropertyOption): """A MapperOption that affects which LoaderStrategy will be used for an operation by a StrategizedProperty. """ chained = False def process_query_property(self, query, paths): strategy = self.get_strategy_class() if self.chained: for path in paths: path.set( query, "loaderstrategy", strategy ) else: paths[-1].set( query, "loaderstrategy", strategy ) def get_strategy_class(self): raise NotImplementedError() class LoaderStrategy(object): """Describe the loading behavior of a StrategizedProperty object. The ``LoaderStrategy`` interacts with the querying process in three ways: * it controls the configuration of the ``InstrumentedAttribute`` placed on a class to handle the behavior of the attribute. this may involve setting up class-level callable functions to fire off a select operation when the attribute is first accessed (i.e. a lazy load) * it processes the ``QueryContext`` at statement construction time, where it can modify the SQL statement that is being produced. Simple column attributes may add their represented column to the list of selected columns, *eager loading* properties may add ``LEFT OUTER JOIN`` clauses to the statement. * It produces "row processor" functions at result fetching time. These "row processor" functions populate a particular attribute on a particular mapped instance. """ def __init__(self, parent): self.parent_property = parent self.is_class_level = False self.parent = self.parent_property.parent self.key = self.parent_property.key def init_class_attribute(self, mapper): pass def setup_query(self, context, entity, path, adapter, **kwargs): pass def create_row_processor(self, context, path, mapper, row, adapter): """Return row processing functions which fulfill the contract specified by MapperProperty.create_row_processor. StrategizedProperty delegates its create_row_processor method directly to this method. """ return None, None, None def __str__(self): return str(self.parent_property) SQLAlchemy-0.8.4/lib/sqlalchemy/orm/loading.py0000644000076500000240000005250612251150015021725 0ustar classicstaff00000000000000# orm/loading.py # Copyright (C) 2005-2013 the SQLAlchemy authors and contributors # # This module is part of SQLAlchemy and is released under # the MIT License: http://www.opensource.org/licenses/mit-license.php """private module containing functions used to convert database rows into object instances and associated state. the functions here are called primarily by Query, Mapper, as well as some of the attribute loading strategies. """ from __future__ import absolute_import from .. import util from . import attributes, exc as orm_exc, state as statelib from .interfaces import EXT_CONTINUE from ..sql import util as sql_util from .util import _none_set, state_str from .. import exc as sa_exc sessionlib = util.importlater("sqlalchemy.orm", "session") _new_runid = util.counter() def instances(query, cursor, context): """Return an ORM result as an iterator.""" session = query.session context.runid = _new_runid() filter_fns = [ent.filter_fn for ent in query._entities] filtered = id in filter_fns single_entity = filtered and len(query._entities) == 1 if filtered: if single_entity: filter_fn = id else: def filter_fn(row): return tuple(fn(x) for x, fn in zip(row, filter_fns)) custom_rows = single_entity and \ query._entities[0].mapper.dispatch.append_result (process, labels) = \ zip(*[ query_entity.row_processor(query, context, custom_rows) for query_entity in query._entities ]) while True: context.progress = {} context.partials = {} if query._yield_per: fetch = cursor.fetchmany(query._yield_per) if not fetch: break else: fetch = cursor.fetchall() if custom_rows: rows = [] for row in fetch: process[0](row, rows) elif single_entity: rows = [process[0](row, None) for row in fetch] else: rows = [util.KeyedTuple([proc(row, None) for proc in process], labels) for row in fetch] if filtered: rows = util.unique_list(rows, filter_fn) if context.refresh_state and query._only_load_props \ and context.refresh_state in context.progress: context.refresh_state._commit( context.refresh_state.dict, query._only_load_props) context.progress.pop(context.refresh_state) statelib.InstanceState._commit_all_states( context.progress.items(), session.identity_map ) for state, (dict_, attrs) in context.partials.iteritems(): state._commit(dict_, attrs) for row in rows: yield row if not query._yield_per: break def merge_result(query, iterator, load=True): """Merge a result into this :class:`.Query` object's Session.""" from . import query as querylib session = query.session if load: # flush current contents if we expect to load data session._autoflush() autoflush = session.autoflush try: session.autoflush = False single_entity = len(query._entities) == 1 if single_entity: if isinstance(query._entities[0], querylib._MapperEntity): result = [session._merge( attributes.instance_state(instance), attributes.instance_dict(instance), load=load, _recursive={}) for instance in iterator] else: result = list(iterator) else: mapped_entities = [i for i, e in enumerate(query._entities) if isinstance(e, querylib._MapperEntity)] result = [] keys = [ent._label_name for ent in query._entities] for row in iterator: newrow = list(row) for i in mapped_entities: if newrow[i] is not None: newrow[i] = session._merge( attributes.instance_state(newrow[i]), attributes.instance_dict(newrow[i]), load=load, _recursive={}) result.append(util.KeyedTuple(newrow, keys)) return iter(result) finally: session.autoflush = autoflush def get_from_identity(session, key, passive): """Look up the given key in the given session's identity map, check the object for expired state if found. """ instance = session.identity_map.get(key) if instance is not None: state = attributes.instance_state(instance) # expired - ensure it still exists if state.expired: if not passive & attributes.SQL_OK: # TODO: no coverage here return attributes.PASSIVE_NO_RESULT elif not passive & attributes.RELATED_OBJECT_OK: # this mode is used within a flush and the instance's # expired state will be checked soon enough, if necessary return instance try: state(state, passive) except orm_exc.ObjectDeletedError: session._remove_newly_deleted([state]) return None return instance else: return None def load_on_ident(query, key, refresh_state=None, lockmode=None, only_load_props=None): """Load the given identity key from the database.""" lockmode = lockmode or query._lockmode if key is not None: ident = key[1] else: ident = None if refresh_state is None: q = query._clone() q._get_condition() else: q = query._clone() if ident is not None: mapper = query._mapper_zero() (_get_clause, _get_params) = mapper._get_clause # None present in ident - turn those comparisons # into "IS NULL" if None in ident: nones = set([ _get_params[col].key for col, value in zip(mapper.primary_key, ident) if value is None ]) _get_clause = sql_util.adapt_criterion_to_null( _get_clause, nones) _get_clause = q._adapt_clause(_get_clause, True, False) q._criterion = _get_clause params = dict([ (_get_params[primary_key].key, id_val) for id_val, primary_key in zip(ident, mapper.primary_key) ]) q._params = params if lockmode is not None: q._lockmode = lockmode q._get_options( populate_existing=bool(refresh_state), version_check=(lockmode is not None), only_load_props=only_load_props, refresh_state=refresh_state) q._order_by = None try: return q.one() except orm_exc.NoResultFound: return None def instance_processor(mapper, context, path, adapter, polymorphic_from=None, only_load_props=None, refresh_state=None, polymorphic_discriminator=None): """Produce a mapper level row processor callable which processes rows into mapped instances.""" # note that this method, most of which exists in a closure # called _instance(), resists being broken out, as # attempts to do so tend to add significant function # call overhead. _instance() is the most # performance-critical section in the whole ORM. pk_cols = mapper.primary_key if polymorphic_from or refresh_state: polymorphic_on = None else: if polymorphic_discriminator is not None: polymorphic_on = polymorphic_discriminator else: polymorphic_on = mapper.polymorphic_on polymorphic_instances = util.PopulateDict( _configure_subclass_mapper( mapper, context, path, adapter) ) version_id_col = mapper.version_id_col if adapter: pk_cols = [adapter.columns[c] for c in pk_cols] if polymorphic_on is not None: polymorphic_on = adapter.columns[polymorphic_on] if version_id_col is not None: version_id_col = adapter.columns[version_id_col] identity_class = mapper._identity_class new_populators = [] existing_populators = [] eager_populators = [] load_path = context.query._current_path + path \ if context.query._current_path.path \ else path def populate_state(state, dict_, row, isnew, only_load_props): if isnew: if context.propagate_options: state.load_options = context.propagate_options if state.load_options: state.load_path = load_path if not new_populators: _populators(mapper, context, path, row, adapter, new_populators, existing_populators, eager_populators ) if isnew: populators = new_populators else: populators = existing_populators if only_load_props is None: for key, populator in populators: populator(state, dict_, row) elif only_load_props: for key, populator in populators: if key in only_load_props: populator(state, dict_, row) session_identity_map = context.session.identity_map listeners = mapper.dispatch translate_row = listeners.translate_row or None create_instance = listeners.create_instance or None populate_instance = listeners.populate_instance or None append_result = listeners.append_result or None populate_existing = context.populate_existing or mapper.always_refresh invoke_all_eagers = context.invoke_all_eagers if mapper.allow_partial_pks: is_not_primary_key = _none_set.issuperset else: is_not_primary_key = _none_set.issubset def _instance(row, result): if not new_populators and invoke_all_eagers: _populators(mapper, context, path, row, adapter, new_populators, existing_populators, eager_populators ) if translate_row: for fn in translate_row: ret = fn(mapper, context, row) if ret is not EXT_CONTINUE: row = ret break if polymorphic_on is not None: discriminator = row[polymorphic_on] if discriminator is not None: _instance = polymorphic_instances[discriminator] if _instance: return _instance(row, result) # determine identity key if refresh_state: identitykey = refresh_state.key if identitykey is None: # super-rare condition; a refresh is being called # on a non-instance-key instance; this is meant to only # occur within a flush() identitykey = mapper._identity_key_from_state(refresh_state) else: identitykey = ( identity_class, tuple([row[column] for column in pk_cols]) ) instance = session_identity_map.get(identitykey) if instance is not None: state = attributes.instance_state(instance) dict_ = attributes.instance_dict(instance) isnew = state.runid != context.runid currentload = not isnew loaded_instance = False if not currentload and \ version_id_col is not None and \ context.version_check and \ mapper._get_state_attr_by_column( state, dict_, mapper.version_id_col) != \ row[version_id_col]: raise orm_exc.StaleDataError( "Instance '%s' has version id '%s' which " "does not match database-loaded version id '%s'." % (state_str(state), mapper._get_state_attr_by_column( state, dict_, mapper.version_id_col), row[version_id_col])) elif refresh_state: # out of band refresh_state detected (i.e. its not in the # session.identity_map) honor it anyway. this can happen # if a _get() occurs within save_obj(), such as # when eager_defaults is True. state = refresh_state instance = state.obj() dict_ = attributes.instance_dict(instance) isnew = state.runid != context.runid currentload = True loaded_instance = False else: # check for non-NULL values in the primary key columns, # else no entity is returned for the row if is_not_primary_key(identitykey[1]): return None isnew = True currentload = True loaded_instance = True if create_instance: for fn in create_instance: instance = fn(mapper, context, row, mapper.class_) if instance is not EXT_CONTINUE: manager = attributes.manager_of_class( instance.__class__) # TODO: if manager is None, raise a friendly error # about returning instances of unmapped types manager.setup_instance(instance) break else: instance = mapper.class_manager.new_instance() else: instance = mapper.class_manager.new_instance() dict_ = attributes.instance_dict(instance) state = attributes.instance_state(instance) state.key = identitykey # attach instance to session. state.session_id = context.session.hash_key session_identity_map.add(state) if currentload or populate_existing: # state is being fully loaded, so populate. # add to the "context.progress" collection. if isnew: state.runid = context.runid context.progress[state] = dict_ if populate_instance: for fn in populate_instance: ret = fn(mapper, context, row, state, only_load_props=only_load_props, instancekey=identitykey, isnew=isnew) if ret is not EXT_CONTINUE: break else: populate_state(state, dict_, row, isnew, only_load_props) else: populate_state(state, dict_, row, isnew, only_load_props) if loaded_instance: state.manager.dispatch.load(state, context) elif isnew: state.manager.dispatch.refresh(state, context, only_load_props) elif state in context.partials or state.unloaded or eager_populators: # state is having a partial set of its attributes # refreshed. Populate those attributes, # and add to the "context.partials" collection. if state in context.partials: isnew = False (d_, attrs) = context.partials[state] else: isnew = True attrs = state.unloaded context.partials[state] = (dict_, attrs) if populate_instance: for fn in populate_instance: ret = fn(mapper, context, row, state, only_load_props=attrs, instancekey=identitykey, isnew=isnew) if ret is not EXT_CONTINUE: break else: populate_state(state, dict_, row, isnew, attrs) else: populate_state(state, dict_, row, isnew, attrs) for key, pop in eager_populators: if key not in state.unloaded: pop(state, dict_, row) if isnew: state.manager.dispatch.refresh(state, context, attrs) if result is not None: if append_result: for fn in append_result: if fn(mapper, context, row, state, result, instancekey=identitykey, isnew=isnew) is not EXT_CONTINUE: break else: result.append(instance) else: result.append(instance) return instance return _instance def _populators(mapper, context, path, row, adapter, new_populators, existing_populators, eager_populators): """Produce a collection of attribute level row processor callables.""" delayed_populators = [] pops = (new_populators, existing_populators, delayed_populators, eager_populators) for prop in mapper._props.itervalues(): for i, pop in enumerate(prop.create_row_processor( context, path, mapper, row, adapter)): if pop is not None: pops[i].append((prop.key, pop)) if delayed_populators: new_populators.extend(delayed_populators) def _configure_subclass_mapper(mapper, context, path, adapter): """Produce a mapper level row processor callable factory for mappers inheriting this one.""" def configure_subclass_mapper(discriminator): try: sub_mapper = mapper.polymorphic_map[discriminator] except KeyError: raise AssertionError( "No such polymorphic_identity %r is defined" % discriminator) if sub_mapper is mapper: return None return instance_processor( sub_mapper, context, path, adapter, polymorphic_from=mapper) return configure_subclass_mapper def load_scalar_attributes(mapper, state, attribute_names): """initiate a column-based attribute refresh operation.""" #assert mapper is _state_mapper(state) session = sessionlib._state_session(state) if not session: raise orm_exc.DetachedInstanceError( "Instance %s is not bound to a Session; " "attribute refresh operation cannot proceed" % (state_str(state))) has_key = bool(state.key) result = False if mapper.inherits and not mapper.concrete: statement = mapper._optimized_get_statement(state, attribute_names) if statement is not None: result = load_on_ident( session.query(mapper).from_statement(statement), None, only_load_props=attribute_names, refresh_state=state ) if result is False: if has_key: identity_key = state.key else: # this codepath is rare - only valid when inside a flush, and the # object is becoming persistent but hasn't yet been assigned # an identity_key. # check here to ensure we have the attrs we need. pk_attrs = [mapper._columntoproperty[col].key for col in mapper.primary_key] if state.expired_attributes.intersection(pk_attrs): raise sa_exc.InvalidRequestError( "Instance %s cannot be refreshed - it's not " " persistent and does not " "contain a full primary key." % state_str(state)) identity_key = mapper._identity_key_from_state(state) if (_none_set.issubset(identity_key) and \ not mapper.allow_partial_pks) or \ _none_set.issuperset(identity_key): util.warn("Instance %s to be refreshed doesn't " "contain a full primary key - can't be refreshed " "(and shouldn't be expired, either)." % state_str(state)) return result = load_on_ident( session.query(mapper), identity_key, refresh_state=state, only_load_props=attribute_names) # if instance is pending, a refresh operation # may not complete (even if PK attributes are assigned) if has_key and result is None: raise orm_exc.ObjectDeletedError(state) SQLAlchemy-0.8.4/lib/sqlalchemy/orm/mapper.py0000644000076500000240000026017212251150015021574 0ustar classicstaff00000000000000# orm/mapper.py # Copyright (C) 2005-2013 the SQLAlchemy authors and contributors # # This module is part of SQLAlchemy and is released under # the MIT License: http://www.opensource.org/licenses/mit-license.php """Logic to map Python classes to and from selectables. Defines the :class:`~sqlalchemy.orm.mapper.Mapper` class, the central configurational unit which associates a class with a database table. This is a semi-private module; the main configurational API of the ORM is available in :class:`~sqlalchemy.orm.`. """ from __future__ import absolute_import import types import weakref from itertools import chain from collections import deque from .. import sql, util, log, exc as sa_exc, event, schema, inspection from ..sql import expression, visitors, operators, util as sql_util from . import instrumentation, attributes, \ exc as orm_exc, events, loading from .interfaces import MapperProperty, _InspectionAttr, _MappedAttribute from .util import _INSTRUMENTOR, _class_to_mapper, \ _state_mapper, class_mapper, \ PathRegistry, state_str import sys properties = util.importlater("sqlalchemy.orm", "properties") descriptor_props = util.importlater("sqlalchemy.orm", "descriptor_props") __all__ = ( 'Mapper', '_mapper_registry', 'class_mapper', 'object_mapper', ) _mapper_registry = weakref.WeakKeyDictionary() _new_mappers = False _already_compiling = False _memoized_configured_property = util.group_expirable_memoized_property() # a constant returned by _get_attr_by_column to indicate # this mapper is not handling an attribute for a particular # column NO_ATTRIBUTE = util.symbol('NO_ATTRIBUTE') # lock used to synchronize the "mapper configure" step _CONFIGURE_MUTEX = util.threading.RLock() class Mapper(_InspectionAttr): """Define the correlation of class attributes to database table columns. The :class:`.Mapper` object is instantiated using the :func:`~sqlalchemy.orm.mapper` function. For information about instantiating new :class:`.Mapper` objects, see that function's documentation. When :func:`.mapper` is used explicitly to link a user defined class with table metadata, this is referred to as *classical mapping*. Modern SQLAlchemy usage tends to favor the :mod:`sqlalchemy.ext.declarative` extension for class configuration, which makes usage of :func:`.mapper` behind the scenes. Given a particular class known to be mapped by the ORM, the :class:`.Mapper` which maintains it can be acquired using the :func:`.inspect` function:: from sqlalchemy import inspect mapper = inspect(MyClass) A class which was mapped by the :mod:`sqlalchemy.ext.declarative` extension will also have its mapper available via the ``__mapper__`` attribute. """ def __init__(self, class_, local_table, properties=None, primary_key=None, non_primary=False, inherits=None, inherit_condition=None, inherit_foreign_keys=None, extension=None, order_by=False, always_refresh=False, version_id_col=None, version_id_generator=None, polymorphic_on=None, _polymorphic_map=None, polymorphic_identity=None, concrete=False, with_polymorphic=None, allow_partial_pks=True, batch=True, column_prefix=None, include_properties=None, exclude_properties=None, passive_updates=True, eager_defaults=False, legacy_is_orphan=False, _compiled_cache_size=100, ): """Construct a new mapper. Mappers are normally constructed via the :func:`~sqlalchemy.orm.mapper` function. See for details. """ self.class_ = util.assert_arg_type(class_, type, 'class_') self.class_manager = None self._primary_key_argument = util.to_list(primary_key) self.non_primary = non_primary if order_by is not False: self.order_by = util.to_list(order_by) else: self.order_by = order_by self.always_refresh = always_refresh self.version_id_col = version_id_col self.version_id_generator = version_id_generator or \ (lambda x: (x or 0) + 1) self.concrete = concrete self.single = False self.inherits = inherits self.local_table = local_table self.inherit_condition = inherit_condition self.inherit_foreign_keys = inherit_foreign_keys self._init_properties = properties or {} self._delete_orphans = [] self.batch = batch self.eager_defaults = eager_defaults self.column_prefix = column_prefix self.polymorphic_on = expression._clause_element_as_expr( polymorphic_on) self._dependency_processors = [] self.validators = util.immutabledict() self.passive_updates = passive_updates self.legacy_is_orphan = legacy_is_orphan self._clause_adapter = None self._requires_row_aliasing = False self._inherits_equated_pairs = None self._memoized_values = {} self._compiled_cache_size = _compiled_cache_size self._reconstructor = None self._deprecated_extensions = util.to_list(extension or []) self.allow_partial_pks = allow_partial_pks self._set_with_polymorphic(with_polymorphic) if isinstance(self.local_table, expression.SelectBase): raise sa_exc.InvalidRequestError( "When mapping against a select() construct, map against " "an alias() of the construct instead." "This because several databases don't allow a " "SELECT from a subquery that does not have an alias." ) if self.with_polymorphic and \ isinstance(self.with_polymorphic[1], expression.SelectBase): self.with_polymorphic = (self.with_polymorphic[0], self.with_polymorphic[1].alias()) # our 'polymorphic identity', a string name that when located in a # result set row indicates this Mapper should be used to construct # the object instance for that row. self.polymorphic_identity = polymorphic_identity # a dictionary of 'polymorphic identity' names, associating those # names with Mappers that will be used to construct object instances # upon a select operation. if _polymorphic_map is None: self.polymorphic_map = {} else: self.polymorphic_map = _polymorphic_map if include_properties is not None: self.include_properties = util.to_set(include_properties) else: self.include_properties = None if exclude_properties: self.exclude_properties = util.to_set(exclude_properties) else: self.exclude_properties = None self.configured = False # prevent this mapper from being constructed # while a configure_mappers() is occurring (and defer a # configure_mappers() until construction succeeds) _CONFIGURE_MUTEX.acquire() try: events._MapperEventsHold.populate(class_, self) self._configure_inheritance() self._configure_legacy_instrument_class() self._configure_class_instrumentation() self._configure_listeners() self._configure_properties() self._configure_polymorphic_setter() self._configure_pks() global _new_mappers _new_mappers = True self._log("constructed") self._expire_memoizations() finally: _CONFIGURE_MUTEX.release() # major attributes initialized at the classlevel so that # they can be Sphinx-documented. is_mapper = True """Part of the inspection API.""" @property def mapper(self): """Part of the inspection API. Returns self. """ return self @property def entity(self): """Part of the inspection API. Returns self.class\_. """ return self.class_ local_table = None """The :class:`.Selectable` which this :class:`.Mapper` manages. Typically is an instance of :class:`.Table` or :class:`.Alias`. May also be ``None``. The "local" table is the selectable that the :class:`.Mapper` is directly responsible for managing from an attribute access and flush perspective. For non-inheriting mappers, the local table is the same as the "mapped" table. For joined-table inheritance mappers, local_table will be the particular sub-table of the overall "join" which this :class:`.Mapper` represents. If this mapper is a single-table inheriting mapper, local_table will be ``None``. .. seealso:: :attr:`~.Mapper.mapped_table`. """ mapped_table = None """The :class:`.Selectable` to which this :class:`.Mapper` is mapped. Typically an instance of :class:`.Table`, :class:`.Join`, or :class:`.Alias`. The "mapped" table is the selectable that the mapper selects from during queries. For non-inheriting mappers, the mapped table is the same as the "local" table. For joined-table inheritance mappers, mapped_table references the full :class:`.Join` representing full rows for this particular subclass. For single-table inheritance mappers, mapped_table references the base table. .. seealso:: :attr:`~.Mapper.local_table`. """ inherits = None """References the :class:`.Mapper` which this :class:`.Mapper` inherits from, if any. This is a *read only* attribute determined during mapper construction. Behavior is undefined if directly modified. """ configured = None """Represent ``True`` if this :class:`.Mapper` has been configured. This is a *read only* attribute determined during mapper construction. Behavior is undefined if directly modified. .. seealso:: :func:`.configure_mappers`. """ concrete = None """Represent ``True`` if this :class:`.Mapper` is a concrete inheritance mapper. This is a *read only* attribute determined during mapper construction. Behavior is undefined if directly modified. """ tables = None """An iterable containing the collection of :class:`.Table` objects which this :class:`.Mapper` is aware of. If the mapper is mapped to a :class:`.Join`, or an :class:`.Alias` representing a :class:`.Select`, the individual :class:`.Table` objects that comprise the full construct will be represented here. This is a *read only* attribute determined during mapper construction. Behavior is undefined if directly modified. """ primary_key = None """An iterable containing the collection of :class:`.Column` objects which comprise the 'primary key' of the mapped table, from the perspective of this :class:`.Mapper`. This list is against the selectable in :attr:`~.Mapper.mapped_table`. In the case of inheriting mappers, some columns may be managed by a superclass mapper. For example, in the case of a :class:`.Join`, the primary key is determined by all of the primary key columns across all tables referenced by the :class:`.Join`. The list is also not necessarily the same as the primary key column collection associated with the underlying tables; the :class:`.Mapper` features a ``primary_key`` argument that can override what the :class:`.Mapper` considers as primary key columns. This is a *read only* attribute determined during mapper construction. Behavior is undefined if directly modified. """ class_ = None """The Python class which this :class:`.Mapper` maps. This is a *read only* attribute determined during mapper construction. Behavior is undefined if directly modified. """ class_manager = None """The :class:`.ClassManager` which maintains event listeners and class-bound descriptors for this :class:`.Mapper`. This is a *read only* attribute determined during mapper construction. Behavior is undefined if directly modified. """ single = None """Represent ``True`` if this :class:`.Mapper` is a single table inheritance mapper. :attr:`~.Mapper.local_table` will be ``None`` if this flag is set. This is a *read only* attribute determined during mapper construction. Behavior is undefined if directly modified. """ non_primary = None """Represent ``True`` if this :class:`.Mapper` is a "non-primary" mapper, e.g. a mapper that is used only to selet rows but not for persistence management. This is a *read only* attribute determined during mapper construction. Behavior is undefined if directly modified. """ polymorphic_on = None """The :class:`.Column` or SQL expression specified as the ``polymorphic_on`` argument for this :class:`.Mapper`, within an inheritance scenario. This attribute is normally a :class:`.Column` instance but may also be an expression, such as one derived from :func:`.cast`. This is a *read only* attribute determined during mapper construction. Behavior is undefined if directly modified. """ polymorphic_map = None """A mapping of "polymorphic identity" identifiers mapped to :class:`.Mapper` instances, within an inheritance scenario. The identifiers can be of any type which is comparable to the type of column represented by :attr:`~.Mapper.polymorphic_on`. An inheritance chain of mappers will all reference the same polymorphic map object. The object is used to correlate incoming result rows to target mappers. This is a *read only* attribute determined during mapper construction. Behavior is undefined if directly modified. """ polymorphic_identity = None """Represent an identifier which is matched against the :attr:`~.Mapper.polymorphic_on` column during result row loading. Used only with inheritance, this object can be of any type which is comparable to the type of column represented by :attr:`~.Mapper.polymorphic_on`. This is a *read only* attribute determined during mapper construction. Behavior is undefined if directly modified. """ base_mapper = None """The base-most :class:`.Mapper` in an inheritance chain. In a non-inheriting scenario, this attribute will always be this :class:`.Mapper`. In an inheritance scenario, it references the :class:`.Mapper` which is parent to all other :class:`.Mapper` objects in the inheritance chain. This is a *read only* attribute determined during mapper construction. Behavior is undefined if directly modified. """ columns = None """A collection of :class:`.Column` or other scalar expression objects maintained by this :class:`.Mapper`. The collection behaves the same as that of the ``c`` attribute on any :class:`.Table` object, except that only those columns included in this mapping are present, and are keyed based on the attribute name defined in the mapping, not necessarily the ``key`` attribute of the :class:`.Column` itself. Additionally, scalar expressions mapped by :func:`.column_property` are also present here. This is a *read only* attribute determined during mapper construction. Behavior is undefined if directly modified. """ validators = None """An immutable dictionary of attributes which have been decorated using the :func:`~.orm.validates` decorator. The dictionary contains string attribute names as keys mapped to the actual validation method. """ c = None """A synonym for :attr:`~.Mapper.columns`.""" dispatch = event.dispatcher(events.MapperEvents) @util.memoized_property def _path_registry(self): return PathRegistry.per_mapper(self) def _configure_inheritance(self): """Configure settings related to inherting and/or inherited mappers being present.""" # a set of all mappers which inherit from this one. self._inheriting_mappers = util.WeakSequence() if self.inherits: if isinstance(self.inherits, type): self.inherits = class_mapper(self.inherits, configure=False) if not issubclass(self.class_, self.inherits.class_): raise sa_exc.ArgumentError( "Class '%s' does not inherit from '%s'" % (self.class_.__name__, self.inherits.class_.__name__)) if self.non_primary != self.inherits.non_primary: np = not self.non_primary and "primary" or "non-primary" raise sa_exc.ArgumentError( "Inheritance of %s mapper for class '%s' is " "only allowed from a %s mapper" % (np, self.class_.__name__, np)) # inherit_condition is optional. if self.local_table is None: self.local_table = self.inherits.local_table self.mapped_table = self.inherits.mapped_table self.single = True elif not self.local_table is self.inherits.local_table: if self.concrete: self.mapped_table = self.local_table for mapper in self.iterate_to_root(): if mapper.polymorphic_on is not None: mapper._requires_row_aliasing = True else: if self.inherit_condition is None: # figure out inherit condition from our table to the # immediate table of the inherited mapper, not its # full table which could pull in other stuff we dont # want (allows test/inheritance.InheritTest4 to pass) self.inherit_condition = sql_util.join_condition( self.inherits.local_table, self.local_table) self.mapped_table = sql.join( self.inherits.mapped_table, self.local_table, self.inherit_condition) fks = util.to_set(self.inherit_foreign_keys) self._inherits_equated_pairs = sql_util.criterion_as_pairs( self.mapped_table.onclause, consider_as_foreign_keys=fks) else: self.mapped_table = self.local_table if self.polymorphic_identity is not None and not self.concrete: self._identity_class = self.inherits._identity_class else: self._identity_class = self.class_ if self.version_id_col is None: self.version_id_col = self.inherits.version_id_col self.version_id_generator = self.inherits.version_id_generator elif self.inherits.version_id_col is not None and \ self.version_id_col is not self.inherits.version_id_col: util.warn( "Inheriting version_id_col '%s' does not match inherited " "version_id_col '%s' and will not automatically populate " "the inherited versioning column. " "version_id_col should only be specified on " "the base-most mapper that includes versioning." % (self.version_id_col.description, self.inherits.version_id_col.description) ) if self.order_by is False and \ not self.concrete and \ self.inherits.order_by is not False: self.order_by = self.inherits.order_by self.polymorphic_map = self.inherits.polymorphic_map self.batch = self.inherits.batch self.inherits._inheriting_mappers.append(self) self.base_mapper = self.inherits.base_mapper self.passive_updates = self.inherits.passive_updates self._all_tables = self.inherits._all_tables if self.polymorphic_identity is not None: self.polymorphic_map[self.polymorphic_identity] = self else: self._all_tables = set() self.base_mapper = self self.mapped_table = self.local_table if self.polymorphic_identity is not None: self.polymorphic_map[self.polymorphic_identity] = self self._identity_class = self.class_ if self.mapped_table is None: raise sa_exc.ArgumentError( "Mapper '%s' does not have a mapped_table specified." % self) def _set_with_polymorphic(self, with_polymorphic): if with_polymorphic == '*': self.with_polymorphic = ('*', None) elif isinstance(with_polymorphic, (tuple, list)): if isinstance(with_polymorphic[0], (basestring, tuple, list)): self.with_polymorphic = with_polymorphic else: self.with_polymorphic = (with_polymorphic, None) elif with_polymorphic is not None: raise sa_exc.ArgumentError("Invalid setting for with_polymorphic") else: self.with_polymorphic = None if isinstance(self.local_table, expression.SelectBase): raise sa_exc.InvalidRequestError( "When mapping against a select() construct, map against " "an alias() of the construct instead." "This because several databases don't allow a " "SELECT from a subquery that does not have an alias." ) if self.with_polymorphic and \ isinstance(self.with_polymorphic[1], expression.SelectBase): self.with_polymorphic = (self.with_polymorphic[0], self.with_polymorphic[1].alias()) if self.configured: self._expire_memoizations() def _set_concrete_base(self, mapper): """Set the given :class:`.Mapper` as the 'inherits' for this :class:`.Mapper`, assuming this :class:`.Mapper` is concrete and does not already have an inherits.""" assert self.concrete assert not self.inherits assert isinstance(mapper, Mapper) self.inherits = mapper self.inherits.polymorphic_map.update(self.polymorphic_map) self.polymorphic_map = self.inherits.polymorphic_map for mapper in self.iterate_to_root(): if mapper.polymorphic_on is not None: mapper._requires_row_aliasing = True self.batch = self.inherits.batch for mp in self.self_and_descendants: mp.base_mapper = self.inherits.base_mapper self.inherits._inheriting_mappers.append(self) self.passive_updates = self.inherits.passive_updates self._all_tables = self.inherits._all_tables for key, prop in mapper._props.iteritems(): if key not in self._props and \ not self._should_exclude(key, key, local=False, column=None): self._adapt_inherited_property(key, prop, False) def _set_polymorphic_on(self, polymorphic_on): self.polymorphic_on = polymorphic_on self._configure_polymorphic_setter(True) def _configure_legacy_instrument_class(self): if self.inherits: self.dispatch._update(self.inherits.dispatch) super_extensions = set( chain(*[m._deprecated_extensions for m in self.inherits.iterate_to_root()])) else: super_extensions = set() for ext in self._deprecated_extensions: if ext not in super_extensions: ext._adapt_instrument_class(self, ext) def _configure_listeners(self): if self.inherits: super_extensions = set( chain(*[m._deprecated_extensions for m in self.inherits.iterate_to_root()])) else: super_extensions = set() for ext in self._deprecated_extensions: if ext not in super_extensions: ext._adapt_listener(self, ext) def _configure_class_instrumentation(self): """If this mapper is to be a primary mapper (i.e. the non_primary flag is not set), associate this Mapper with the given class_ and entity name. Subsequent calls to ``class_mapper()`` for the class_/entity name combination will return this mapper. Also decorate the `__init__` method on the mapped class to include optional auto-session attachment logic. """ manager = attributes.manager_of_class(self.class_) if self.non_primary: if not manager or not manager.is_mapped: raise sa_exc.InvalidRequestError( "Class %s has no primary mapper configured. Configure " "a primary mapper first before setting up a non primary " "Mapper." % self.class_) self.class_manager = manager self._identity_class = manager.mapper._identity_class _mapper_registry[self] = True return if manager is not None: assert manager.class_ is self.class_ if manager.is_mapped: raise sa_exc.ArgumentError( "Class '%s' already has a primary mapper defined. " "Use non_primary=True to " "create a non primary Mapper. clear_mappers() will " "remove *all* current mappers from all classes." % self.class_) #else: # a ClassManager may already exist as # ClassManager.instrument_attribute() creates # new managers for each subclass if they don't yet exist. _mapper_registry[self] = True self.dispatch.instrument_class(self, self.class_) if manager is None: manager = instrumentation.register_class(self.class_) self.class_manager = manager manager.mapper = self manager.deferred_scalar_loader = util.partial( loading.load_scalar_attributes, self) # The remaining members can be added by any mapper, # e_name None or not. if manager.info.get(_INSTRUMENTOR, False): return event.listen(manager, 'first_init', _event_on_first_init, raw=True) event.listen(manager, 'init', _event_on_init, raw=True) event.listen(manager, 'resurrect', _event_on_resurrect, raw=True) for key, method in util.iterate_attributes(self.class_): if isinstance(method, types.FunctionType): if hasattr(method, '__sa_reconstructor__'): self._reconstructor = method event.listen(manager, 'load', _event_on_load, raw=True) elif hasattr(method, '__sa_validators__'): include_removes = getattr(method, "__sa_include_removes__", False) for name in method.__sa_validators__: self.validators = self.validators.union( {name: (method, include_removes)} ) manager.info[_INSTRUMENTOR] = self @util.deprecated("0.7", message=":meth:`.Mapper.compile` " "is replaced by :func:`.configure_mappers`") def compile(self): """Initialize the inter-mapper relationships of all mappers that have been constructed thus far. """ configure_mappers() return self @property @util.deprecated("0.7", message=":attr:`.Mapper.compiled` " "is replaced by :attr:`.Mapper.configured`") def compiled(self): return self.configured def dispose(self): # Disable any attribute-based compilation. self.configured = True if hasattr(self, '_configure_failed'): del self._configure_failed if not self.non_primary and \ self.class_manager is not None and \ self.class_manager.is_mapped and \ self.class_manager.mapper is self: instrumentation.unregister_class(self.class_) def _configure_pks(self): self.tables = sql_util.find_tables(self.mapped_table) self._pks_by_table = {} self._cols_by_table = {} all_cols = util.column_set(chain(*[ col.proxy_set for col in self._columntoproperty])) pk_cols = util.column_set(c for c in all_cols if c.primary_key) # identify primary key columns which are also mapped by this mapper. tables = set(self.tables + [self.mapped_table]) self._all_tables.update(tables) for t in tables: if t.primary_key and pk_cols.issuperset(t.primary_key): # ordering is important since it determines the ordering of # mapper.primary_key (and therefore query.get()) self._pks_by_table[t] = \ util.ordered_column_set(t.primary_key).\ intersection(pk_cols) self._cols_by_table[t] = \ util.ordered_column_set(t.c).\ intersection(all_cols) # determine cols that aren't expressed within our tables; mark these # as "read only" properties which are refreshed upon INSERT/UPDATE self._readonly_props = set( self._columntoproperty[col] for col in self._columntoproperty if not hasattr(col, 'table') or col.table not in self._cols_by_table) # if explicit PK argument sent, add those columns to the # primary key mappings if self._primary_key_argument: for k in self._primary_key_argument: if k.table not in self._pks_by_table: self._pks_by_table[k.table] = util.OrderedSet() self._pks_by_table[k.table].add(k) # otherwise, see that we got a full PK for the mapped table elif self.mapped_table not in self._pks_by_table or \ len(self._pks_by_table[self.mapped_table]) == 0: raise sa_exc.ArgumentError( "Mapper %s could not assemble any primary " "key columns for mapped table '%s'" % (self, self.mapped_table.description)) elif self.local_table not in self._pks_by_table and \ isinstance(self.local_table, schema.Table): util.warn("Could not assemble any primary " "keys for locally mapped table '%s' - " "no rows will be persisted in this Table." % self.local_table.description) if self.inherits and \ not self.concrete and \ not self._primary_key_argument: # if inheriting, the "primary key" for this mapper is # that of the inheriting (unless concrete or explicit) self.primary_key = self.inherits.primary_key else: # determine primary key from argument or mapped_table pks - # reduce to the minimal set of columns if self._primary_key_argument: primary_key = sql_util.reduce_columns( [self.mapped_table.corresponding_column(c) for c in self._primary_key_argument], ignore_nonexistent_tables=True) else: primary_key = sql_util.reduce_columns( self._pks_by_table[self.mapped_table], ignore_nonexistent_tables=True) if len(primary_key) == 0: raise sa_exc.ArgumentError( "Mapper %s could not assemble any primary " "key columns for mapped table '%s'" % (self, self.mapped_table.description)) self.primary_key = tuple(primary_key) self._log("Identified primary key columns: %s", primary_key) def _configure_properties(self): # Column and other ClauseElement objects which are mapped self.columns = self.c = util.OrderedProperties() # object attribute names mapped to MapperProperty objects self._props = util.OrderedDict() # table columns mapped to lists of MapperProperty objects # using a list allows a single column to be defined as # populating multiple object attributes self._columntoproperty = _ColumnMapping(self) # load custom properties if self._init_properties: for key, prop in self._init_properties.iteritems(): self._configure_property(key, prop, False) # pull properties from the inherited mapper if any. if self.inherits: for key, prop in self.inherits._props.iteritems(): if key not in self._props and \ not self._should_exclude(key, key, local=False, column=None): self._adapt_inherited_property(key, prop, False) # create properties for each column in the mapped table, # for those columns which don't already map to a property for column in self.mapped_table.columns: if column in self._columntoproperty: continue column_key = (self.column_prefix or '') + column.key if self._should_exclude( column.key, column_key, local=self.local_table.c.contains_column(column), column=column ): continue # adjust the "key" used for this column to that # of the inheriting mapper for mapper in self.iterate_to_root(): if column in mapper._columntoproperty: column_key = mapper._columntoproperty[column].key self._configure_property(column_key, column, init=False, setparent=True) def _configure_polymorphic_setter(self, init=False): """Configure an attribute on the mapper representing the 'polymorphic_on' column, if applicable, and not already generated by _configure_properties (which is typical). Also create a setter function which will assign this attribute to the value of the 'polymorphic_identity' upon instance construction, also if applicable. This routine will run when an instance is created. """ setter = False if self.polymorphic_on is not None: setter = True if isinstance(self.polymorphic_on, basestring): # polymorphic_on specified as as string - link # it to mapped ColumnProperty try: self.polymorphic_on = self._props[self.polymorphic_on] except KeyError: raise sa_exc.ArgumentError( "Can't determine polymorphic_on " "value '%s' - no attribute is " "mapped to this name." % self.polymorphic_on) if self.polymorphic_on in self._columntoproperty: # polymorphic_on is a column that is already mapped # to a ColumnProperty prop = self._columntoproperty[self.polymorphic_on] polymorphic_key = prop.key self.polymorphic_on = prop.columns[0] polymorphic_key = prop.key elif isinstance(self.polymorphic_on, MapperProperty): # polymorphic_on is directly a MapperProperty, # ensure it's a ColumnProperty if not isinstance(self.polymorphic_on, properties.ColumnProperty): raise sa_exc.ArgumentError( "Only direct column-mapped " "property or SQL expression " "can be passed for polymorphic_on") prop = self.polymorphic_on self.polymorphic_on = prop.columns[0] polymorphic_key = prop.key elif not expression.is_column(self.polymorphic_on): # polymorphic_on is not a Column and not a ColumnProperty; # not supported right now. raise sa_exc.ArgumentError( "Only direct column-mapped " "property or SQL expression " "can be passed for polymorphic_on" ) else: # polymorphic_on is a Column or SQL expression and # doesn't appear to be mapped. this means it can be 1. # only present in the with_polymorphic selectable or # 2. a totally standalone SQL expression which we'd # hope is compatible with this mapper's mapped_table col = self.mapped_table.corresponding_column( self.polymorphic_on) if col is None: # polymorphic_on doesn't derive from any # column/expression isn't present in the mapped # table. we will make a "hidden" ColumnProperty # for it. Just check that if it's directly a # schema.Column and we have with_polymorphic, it's # likely a user error if the schema.Column isn't # represented somehow in either mapped_table or # with_polymorphic. Otherwise as of 0.7.4 we # just go with it and assume the user wants it # that way (i.e. a CASE statement) setter = False instrument = False col = self.polymorphic_on if isinstance(col, schema.Column) and ( self.with_polymorphic is None or \ self.with_polymorphic[1].\ corresponding_column(col) is None ): raise sa_exc.InvalidRequestError( "Could not map polymorphic_on column " "'%s' to the mapped table - polymorphic " "loads will not function properly" % col.description) else: # column/expression that polymorphic_on derives from # is present in our mapped table # and is probably mapped, but polymorphic_on itself # is not. This happens when # the polymorphic_on is only directly present in the # with_polymorphic selectable, as when use # polymorphic_union. # we'll make a separate ColumnProperty for it. instrument = True key = getattr(col, 'key', None) if key: if self._should_exclude(col.key, col.key, False, col): raise sa_exc.InvalidRequestError( "Cannot exclude or override the " "discriminator column %r" % col.key) else: self.polymorphic_on = col = \ col.label("_sa_polymorphic_on") key = col.key self._configure_property( key, properties.ColumnProperty(col, _instrument=instrument), init=init, setparent=True) polymorphic_key = key else: # no polymorphic_on was set. # check inheriting mappers for one. for mapper in self.iterate_to_root(): # determine if polymorphic_on of the parent # should be propagated here. If the col # is present in our mapped table, or if our mapped # table is the same as the parent (i.e. single table # inheritance), we can use it if mapper.polymorphic_on is not None: if self.mapped_table is mapper.mapped_table: self.polymorphic_on = mapper.polymorphic_on else: self.polymorphic_on = \ self.mapped_table.corresponding_column( mapper.polymorphic_on) # we can use the parent mapper's _set_polymorphic_identity # directly; it ensures the polymorphic_identity of the # instance's mapper is used so is portable to subclasses. if self.polymorphic_on is not None: self._set_polymorphic_identity = \ mapper._set_polymorphic_identity self._validate_polymorphic_identity = \ mapper._validate_polymorphic_identity else: self._set_polymorphic_identity = None return if setter: def _set_polymorphic_identity(state): dict_ = state.dict state.get_impl(polymorphic_key).set(state, dict_, state.manager.mapper.polymorphic_identity, None) def _validate_polymorphic_identity(mapper, state, dict_): if polymorphic_key in dict_ and \ dict_[polymorphic_key] not in \ mapper._acceptable_polymorphic_identities: util.warn( "Flushing object %s with " "incompatible polymorphic identity %r; the " "object may not refresh and/or load correctly" % ( state_str(state), dict_[polymorphic_key] ) ) self._set_polymorphic_identity = _set_polymorphic_identity self._validate_polymorphic_identity = _validate_polymorphic_identity else: self._set_polymorphic_identity = None _validate_polymorphic_identity = None @_memoized_configured_property def _acceptable_polymorphic_identities(self): identities = set() stack = deque([self]) while stack: item = stack.popleft() if item.mapped_table is self.mapped_table: identities.add(item.polymorphic_identity) stack.extend(item._inheriting_mappers) return identities def _adapt_inherited_property(self, key, prop, init): if not self.concrete: self._configure_property(key, prop, init=False, setparent=False) elif key not in self._props: self._configure_property( key, properties.ConcreteInheritedProperty(), init=init, setparent=True) def _configure_property(self, key, prop, init=True, setparent=True): self._log("_configure_property(%s, %s)", key, prop.__class__.__name__) if not isinstance(prop, MapperProperty): prop = self._property_from_column(key, prop) if isinstance(prop, properties.ColumnProperty): col = self.mapped_table.corresponding_column(prop.columns[0]) # if the column is not present in the mapped table, # test if a column has been added after the fact to the # parent table (or their parent, etc.) [ticket:1570] if col is None and self.inherits: path = [self] for m in self.inherits.iterate_to_root(): col = m.local_table.corresponding_column(prop.columns[0]) if col is not None: for m2 in path: m2.mapped_table._reset_exported() col = self.mapped_table.corresponding_column( prop.columns[0]) break path.append(m) # subquery expression, column not present in the mapped # selectable. if col is None: col = prop.columns[0] # column is coming in after _readonly_props was # initialized; check for 'readonly' if hasattr(self, '_readonly_props') and \ (not hasattr(col, 'table') or col.table not in self._cols_by_table): self._readonly_props.add(prop) else: # if column is coming in after _cols_by_table was # initialized, ensure the col is in the right set if hasattr(self, '_cols_by_table') and \ col.table in self._cols_by_table and \ col not in self._cols_by_table[col.table]: self._cols_by_table[col.table].add(col) # if this properties.ColumnProperty represents the "polymorphic # discriminator" column, mark it. We'll need this when rendering # columns in SELECT statements. if not hasattr(prop, '_is_polymorphic_discriminator'): prop._is_polymorphic_discriminator = \ (col is self.polymorphic_on or prop.columns[0] is self.polymorphic_on) self.columns[key] = col for col in prop.columns + prop._orig_columns: for col in col.proxy_set: self._columntoproperty[col] = prop prop.key = key if setparent: prop.set_parent(self, init) if key in self._props and \ getattr(self._props[key], '_mapped_by_synonym', False): syn = self._props[key]._mapped_by_synonym raise sa_exc.ArgumentError( "Can't call map_column=True for synonym %r=%r, " "a ColumnProperty already exists keyed to the name " "%r for column %r" % (syn, key, key, syn) ) if key in self._props and \ not isinstance(prop, properties.ColumnProperty) and \ not isinstance(self._props[key], properties.ColumnProperty): util.warn("Property %s on %s being replaced with new " "property %s; the old property will be discarded" % ( self._props[key], self, prop, )) self._props[key] = prop if not self.non_primary: prop.instrument_class(self) for mapper in self._inheriting_mappers: mapper._adapt_inherited_property(key, prop, init) if init: prop.init() prop.post_instrument_class(self) if self.configured: self._expire_memoizations() def _property_from_column(self, key, prop): """generate/update a :class:`.ColumnProprerty` given a :class:`.Column` object. """ # we were passed a Column or a list of Columns; # generate a properties.ColumnProperty columns = util.to_list(prop) column = columns[0] if not expression.is_column(column): raise sa_exc.ArgumentError( "%s=%r is not an instance of MapperProperty or Column" % (key, prop)) prop = self._props.get(key, None) if isinstance(prop, properties.ColumnProperty): if prop.parent is self: raise sa_exc.InvalidRequestError( "Implicitly combining column %s with column " "%s under attribute '%s'. Please configure one " "or more attributes for these same-named columns " "explicitly." % (prop.columns[-1], column, key)) # existing properties.ColumnProperty from an inheriting # mapper. make a copy and append our column to it prop = prop.copy() prop.columns.insert(0, column) self._log("inserting column to existing list " "in properties.ColumnProperty %s" % (key)) return prop elif prop is None or isinstance(prop, properties.ConcreteInheritedProperty): mapped_column = [] for c in columns: mc = self.mapped_table.corresponding_column(c) if mc is None: mc = self.local_table.corresponding_column(c) if mc is not None: # if the column is in the local table but not the # mapped table, this corresponds to adding a # column after the fact to the local table. # [ticket:1523] self.mapped_table._reset_exported() mc = self.mapped_table.corresponding_column(c) if mc is None: raise sa_exc.ArgumentError( "When configuring property '%s' on %s, " "column '%s' is not represented in the mapper's " "table. Use the `column_property()` function to " "force this column to be mapped as a read-only " "attribute." % (key, self, c)) mapped_column.append(mc) return properties.ColumnProperty(*mapped_column) else: raise sa_exc.ArgumentError( "WARNING: when configuring property '%s' on %s, " "column '%s' conflicts with property '%r'. " "To resolve this, map the column to the class under a " "different name in the 'properties' dictionary. Or, " "to remove all awareness of the column entirely " "(including its availability as a foreign key), " "use the 'include_properties' or 'exclude_properties' " "mapper arguments to control specifically which table " "columns get mapped." % (key, self, column.key, prop)) def _post_configure_properties(self): """Call the ``init()`` method on all ``MapperProperties`` attached to this mapper. This is a deferred configuration step which is intended to execute once all mappers have been constructed. """ self._log("_post_configure_properties() started") l = [(key, prop) for key, prop in self._props.iteritems()] for key, prop in l: self._log("initialize prop %s", key) if prop.parent is self and not prop._configure_started: prop.init() if prop._configure_finished: prop.post_instrument_class(self) self._log("_post_configure_properties() complete") self.configured = True def add_properties(self, dict_of_properties): """Add the given dictionary of properties to this mapper, using `add_property`. """ for key, value in dict_of_properties.iteritems(): self.add_property(key, value) def add_property(self, key, prop): """Add an individual MapperProperty to this mapper. If the mapper has not been configured yet, just adds the property to the initial properties dictionary sent to the constructor. If this Mapper has already been configured, then the given MapperProperty is configured immediately. """ self._init_properties[key] = prop self._configure_property(key, prop, init=self.configured) def _expire_memoizations(self): for mapper in self.iterate_to_root(): _memoized_configured_property.expire_instance(mapper) @property def _log_desc(self): return "(" + self.class_.__name__ + \ "|" + \ (self.local_table is not None and self.local_table.description or str(self.local_table)) +\ (self.non_primary and "|non-primary" or "") + ")" def _log(self, msg, *args): self.logger.info( "%s " + msg, *((self._log_desc,) + args) ) def _log_debug(self, msg, *args): self.logger.debug( "%s " + msg, *((self._log_desc,) + args) ) def __repr__(self): return '' % ( id(self), self.class_.__name__) def __str__(self): return "Mapper|%s|%s%s" % ( self.class_.__name__, self.local_table is not None and self.local_table.description or None, self.non_primary and "|non-primary" or "" ) def _is_orphan(self, state): orphan_possible = False for mapper in self.iterate_to_root(): for (key, cls) in mapper._delete_orphans: orphan_possible = True has_parent = attributes.manager_of_class(cls).has_parent( state, key, optimistic=state.has_identity) if self.legacy_is_orphan and has_parent: return False elif not self.legacy_is_orphan and not has_parent: return True if self.legacy_is_orphan: return orphan_possible else: return False def has_property(self, key): return key in self._props def get_property(self, key, _configure_mappers=True): """return a MapperProperty associated with the given key. """ if _configure_mappers and _new_mappers: configure_mappers() try: return self._props[key] except KeyError: raise sa_exc.InvalidRequestError( "Mapper '%s' has no property '%s'" % (self, key)) def get_property_by_column(self, column): """Given a :class:`.Column` object, return the :class:`.MapperProperty` which maps this column.""" return self._columntoproperty[column] @property def iterate_properties(self): """return an iterator of all MapperProperty objects.""" if _new_mappers: configure_mappers() return self._props.itervalues() def _mappers_from_spec(self, spec, selectable): """given a with_polymorphic() argument, return the set of mappers it represents. Trims the list of mappers to just those represented within the given selectable, if present. This helps some more legacy-ish mappings. """ if spec == '*': mappers = list(self.self_and_descendants) elif spec: mappers = set() for m in util.to_list(spec): m = _class_to_mapper(m) if not m.isa(self): raise sa_exc.InvalidRequestError( "%r does not inherit from %r" % (m, self)) if selectable is None: mappers.update(m.iterate_to_root()) else: mappers.add(m) mappers = [m for m in self.self_and_descendants if m in mappers] else: mappers = [] if selectable is not None: tables = set(sql_util.find_tables(selectable, include_aliases=True)) mappers = [m for m in mappers if m.local_table in tables] return mappers def _selectable_from_mappers(self, mappers, innerjoin): """given a list of mappers (assumed to be within this mapper's inheritance hierarchy), construct an outerjoin amongst those mapper's mapped tables. """ from_obj = self.mapped_table for m in mappers: if m is self: continue if m.concrete: raise sa_exc.InvalidRequestError( "'with_polymorphic()' requires 'selectable' argument " "when concrete-inheriting mappers are used.") elif not m.single: if innerjoin: from_obj = from_obj.join(m.local_table, m.inherit_condition) else: from_obj = from_obj.outerjoin(m.local_table, m.inherit_condition) return from_obj @_memoized_configured_property def _single_table_criterion(self): if self.single and \ self.inherits and \ self.polymorphic_on is not None: return self.polymorphic_on.in_( m.polymorphic_identity for m in self.self_and_descendants) else: return None @_memoized_configured_property def _with_polymorphic_mappers(self): if _new_mappers: configure_mappers() if not self.with_polymorphic: return [] return self._mappers_from_spec(*self.with_polymorphic) @_memoized_configured_property def _with_polymorphic_selectable(self): if not self.with_polymorphic: return self.mapped_table spec, selectable = self.with_polymorphic if selectable is not None: return selectable else: return self._selectable_from_mappers( self._mappers_from_spec(spec, selectable), False) with_polymorphic_mappers = _with_polymorphic_mappers """The list of :class:`.Mapper` objects included in the default "polymorphic" query. """ @property def selectable(self): """The :func:`.select` construct this :class:`.Mapper` selects from by default. Normally, this is equivalent to :attr:`.mapped_table`, unless the ``with_polymorphic`` feature is in use, in which case the full "polymorphic" selectable is returned. """ return self._with_polymorphic_selectable def _with_polymorphic_args(self, spec=None, selectable=False, innerjoin=False): if self.with_polymorphic: if not spec: spec = self.with_polymorphic[0] if selectable is False: selectable = self.with_polymorphic[1] elif selectable is False: selectable = None mappers = self._mappers_from_spec(spec, selectable) if selectable is not None: return mappers, selectable else: return mappers, self._selectable_from_mappers(mappers, innerjoin) @_memoized_configured_property def _polymorphic_properties(self): return list(self._iterate_polymorphic_properties( self._with_polymorphic_mappers)) def _iterate_polymorphic_properties(self, mappers=None): """Return an iterator of MapperProperty objects which will render into a SELECT.""" if mappers is None: mappers = self._with_polymorphic_mappers if not mappers: for c in self.iterate_properties: yield c else: # in the polymorphic case, filter out discriminator columns # from other mappers, as these are sometimes dependent on that # mapper's polymorphic selectable (which we don't want rendered) for c in util.unique_list( chain(*[ list(mapper.iterate_properties) for mapper in [self] + mappers ]) ): if getattr(c, '_is_polymorphic_discriminator', False) and \ (self.polymorphic_on is None or c.columns[0] is not self.polymorphic_on): continue yield c @util.memoized_property def attrs(self): """A namespace of all :class:`.MapperProperty` objects associated this mapper. This is an object that provides each property based on its key name. For instance, the mapper for a ``User`` class which has ``User.name`` attribute would provide ``mapper.attrs.name``, which would be the :class:`.ColumnProperty` representing the ``name`` column. The namespace object can also be iterated, which would yield each :class:`.MapperProperty`. :class:`.Mapper` has several pre-filtered views of this attribute which limit the types of properties returned, inclding :attr:`.synonyms`, :attr:`.column_attrs`, :attr:`.relationships`, and :attr:`.composites`. .. seealso:: :attr:`.Mapper.all_orm_descriptors` """ if _new_mappers: configure_mappers() return util.ImmutableProperties(self._props) @util.memoized_property def all_orm_descriptors(self): """A namespace of all :class:`._InspectionAttr` attributes associated with the mapped class. These attributes are in all cases Python :term:`descriptors` associated with the mapped class or its superclasses. This namespace includes attributes that are mapped to the class as well as attributes declared by extension modules. It includes any Python descriptor type that inherits from :class:`._InspectionAttr`. This includes :class:`.QueryableAttribute`, as well as extension types such as :class:`.hybrid_property`, :class:`.hybrid_method` and :class:`.AssociationProxy`. To distinguish between mapped attributes and extension attributes, the attribute :attr:`._InspectionAttr.extension_type` will refer to a constant that distinguishes between different extension types. When dealing with a :class:`.QueryableAttribute`, the :attr:`.QueryableAttribute.property` attribute refers to the :class:`.MapperProperty` property, which is what you get when referring to the collection of mapped properties via :attr:`.Mapper.attrs`. .. versionadded:: 0.8.0 .. seealso:: :attr:`.Mapper.attrs` """ return util.ImmutableProperties( dict(self.class_manager._all_sqla_attributes())) @_memoized_configured_property def synonyms(self): """Return a namespace of all :class:`.SynonymProperty` properties maintained by this :class:`.Mapper`. .. seealso:: :attr:`.Mapper.attrs` - namespace of all :class:`.MapperProperty` objects. """ return self._filter_properties(descriptor_props.SynonymProperty) @_memoized_configured_property def column_attrs(self): """Return a namespace of all :class:`.ColumnProperty` properties maintained by this :class:`.Mapper`. .. seealso:: :attr:`.Mapper.attrs` - namespace of all :class:`.MapperProperty` objects. """ return self._filter_properties(properties.ColumnProperty) @_memoized_configured_property def relationships(self): """Return a namespace of all :class:`.RelationshipProperty` properties maintained by this :class:`.Mapper`. .. seealso:: :attr:`.Mapper.attrs` - namespace of all :class:`.MapperProperty` objects. """ return self._filter_properties(properties.RelationshipProperty) @_memoized_configured_property def composites(self): """Return a namespace of all :class:`.CompositeProperty` properties maintained by this :class:`.Mapper`. .. seealso:: :attr:`.Mapper.attrs` - namespace of all :class:`.MapperProperty` objects. """ return self._filter_properties(descriptor_props.CompositeProperty) def _filter_properties(self, type_): if _new_mappers: configure_mappers() return util.ImmutableProperties(util.OrderedDict( (k, v) for k, v in self._props.iteritems() if isinstance(v, type_) )) @_memoized_configured_property def _get_clause(self): """create a "get clause" based on the primary key. this is used by query.get() and many-to-one lazyloads to load this item by primary key. """ params = [(primary_key, sql.bindparam(None, type_=primary_key.type)) for primary_key in self.primary_key] return sql.and_(*[k == v for (k, v) in params]), \ util.column_dict(params) @_memoized_configured_property def _equivalent_columns(self): """Create a map of all *equivalent* columns, based on the determination of column pairs that are equated to one another based on inherit condition. This is designed to work with the queries that util.polymorphic_union comes up with, which often don't include the columns from the base table directly (including the subclass table columns only). The resulting structure is a dictionary of columns mapped to lists of equivalent columns, i.e. { tablea.col1: set([tableb.col1, tablec.col1]), tablea.col2: set([tabled.col2]) } """ result = util.column_dict() def visit_binary(binary): if binary.operator == operators.eq: if binary.left in result: result[binary.left].add(binary.right) else: result[binary.left] = util.column_set((binary.right,)) if binary.right in result: result[binary.right].add(binary.left) else: result[binary.right] = util.column_set((binary.left,)) for mapper in self.base_mapper.self_and_descendants: if mapper.inherit_condition is not None: visitors.traverse( mapper.inherit_condition, {}, {'binary': visit_binary}) return result def _is_userland_descriptor(self, obj): if isinstance(obj, (_MappedAttribute, instrumentation.ClassManager, expression.ColumnElement)): return False else: return True def _should_exclude(self, name, assigned_name, local, column): """determine whether a particular property should be implicitly present on the class. This occurs when properties are propagated from an inherited class, or are applied from the columns present in the mapped table. """ # check for class-bound attributes and/or descriptors, # either local or from an inherited class if local: if self.class_.__dict__.get(assigned_name, None) is not None \ and self._is_userland_descriptor( self.class_.__dict__[assigned_name]): return True else: if getattr(self.class_, assigned_name, None) is not None \ and self._is_userland_descriptor( getattr(self.class_, assigned_name)): return True if self.include_properties is not None and \ name not in self.include_properties and \ (column is None or column not in self.include_properties): self._log("not including property %s" % (name)) return True if self.exclude_properties is not None and \ ( name in self.exclude_properties or \ (column is not None and column in self.exclude_properties) ): self._log("excluding property %s" % (name)) return True return False def common_parent(self, other): """Return true if the given mapper shares a common inherited parent as this mapper.""" return self.base_mapper is other.base_mapper def _canload(self, state, allow_subtypes): s = self.primary_mapper() if self.polymorphic_on is not None or allow_subtypes: return _state_mapper(state).isa(s) else: return _state_mapper(state) is s def isa(self, other): """Return True if the this mapper inherits from the given mapper.""" m = self while m and m is not other: m = m.inherits return bool(m) def iterate_to_root(self): m = self while m: yield m m = m.inherits @_memoized_configured_property def self_and_descendants(self): """The collection including this mapper and all descendant mappers. This includes not just the immediately inheriting mappers but all their inheriting mappers as well. """ descendants = [] stack = deque([self]) while stack: item = stack.popleft() descendants.append(item) stack.extend(item._inheriting_mappers) return util.WeakSequence(descendants) def polymorphic_iterator(self): """Iterate through the collection including this mapper and all descendant mappers. This includes not just the immediately inheriting mappers but all their inheriting mappers as well. To iterate through an entire hierarchy, use ``mapper.base_mapper.polymorphic_iterator()``. """ return iter(self.self_and_descendants) def primary_mapper(self): """Return the primary mapper corresponding to this mapper's class key (class).""" return self.class_manager.mapper @property def primary_base_mapper(self): return self.class_manager.mapper.base_mapper def identity_key_from_row(self, row, adapter=None): """Return an identity-map key for use in storing/retrieving an item from the identity map. :param row: A :class:`.RowProxy` instance. The columns which are mapped by this :class:`.Mapper` should be locatable in the row, preferably via the :class:`.Column` object directly (as is the case when a :func:`.select` construct is executed), or via string names of the form ``_``. """ pk_cols = self.primary_key if adapter: pk_cols = [adapter.columns[c] for c in pk_cols] return self._identity_class, \ tuple(row[column] for column in pk_cols) def identity_key_from_primary_key(self, primary_key): """Return an identity-map key for use in storing/retrieving an item from an identity map. :param primary_key: A list of values indicating the identifier. """ return self._identity_class, tuple(primary_key) def identity_key_from_instance(self, instance): """Return the identity key for the given instance, based on its primary key attributes. If the instance's state is expired, calling this method will result in a database check to see if the object has been deleted. If the row no longer exists, :class:`~sqlalchemy.orm.exc.ObjectDeletedError` is raised. This value is typically also found on the instance state under the attribute name `key`. """ return self.identity_key_from_primary_key( self.primary_key_from_instance(instance)) def _identity_key_from_state(self, state): dict_ = state.dict manager = state.manager return self._identity_class, tuple([ manager[self._columntoproperty[col].key].\ impl.get(state, dict_, attributes.PASSIVE_OFF) for col in self.primary_key ]) def primary_key_from_instance(self, instance): """Return the list of primary key values for the given instance. If the instance's state is expired, calling this method will result in a database check to see if the object has been deleted. If the row no longer exists, :class:`~sqlalchemy.orm.exc.ObjectDeletedError` is raised. """ state = attributes.instance_state(instance) return self._primary_key_from_state(state) def _primary_key_from_state(self, state): dict_ = state.dict manager = state.manager return [ manager[self._columntoproperty[col].key].\ impl.get(state, dict_, attributes.PASSIVE_OFF) for col in self.primary_key ] def _get_state_attr_by_column(self, state, dict_, column, passive=attributes.PASSIVE_OFF): prop = self._columntoproperty[column] return state.manager[prop.key].impl.get(state, dict_, passive=passive) def _set_state_attr_by_column(self, state, dict_, column, value): prop = self._columntoproperty[column] state.manager[prop.key].impl.set(state, dict_, value, None) def _get_committed_attr_by_column(self, obj, column): state = attributes.instance_state(obj) dict_ = attributes.instance_dict(obj) return self._get_committed_state_attr_by_column(state, dict_, column) def _get_committed_state_attr_by_column(self, state, dict_, column, passive=attributes.PASSIVE_OFF): prop = self._columntoproperty[column] return state.manager[prop.key].impl.\ get_committed_value(state, dict_, passive=passive) def _optimized_get_statement(self, state, attribute_names): """assemble a WHERE clause which retrieves a given state by primary key, using a minimized set of tables. Applies to a joined-table inheritance mapper where the requested attribute names are only present on joined tables, not the base table. The WHERE clause attempts to include only those tables to minimize joins. """ props = self._props tables = set(chain( *[sql_util.find_tables(c, check_columns=True) for key in attribute_names for c in props[key].columns] )) if self.base_mapper.local_table in tables: return None class ColumnsNotAvailable(Exception): pass def visit_binary(binary): leftcol = binary.left rightcol = binary.right if leftcol is None or rightcol is None: return if leftcol.table not in tables: leftval = self._get_committed_state_attr_by_column( state, state.dict, leftcol, passive=attributes.PASSIVE_NO_INITIALIZE) if leftval is attributes.PASSIVE_NO_RESULT or leftval is None: raise ColumnsNotAvailable() binary.left = sql.bindparam(None, leftval, type_=binary.right.type) elif rightcol.table not in tables: rightval = self._get_committed_state_attr_by_column( state, state.dict, rightcol, passive=attributes.PASSIVE_NO_INITIALIZE) if rightval is attributes.PASSIVE_NO_RESULT or \ rightval is None: raise ColumnsNotAvailable() binary.right = sql.bindparam(None, rightval, type_=binary.right.type) allconds = [] try: start = False for mapper in reversed(list(self.iterate_to_root())): if mapper.local_table in tables: start = True elif not isinstance(mapper.local_table, expression.TableClause): return None if start and not mapper.single: allconds.append(visitors.cloned_traverse( mapper.inherit_condition, {}, {'binary': visit_binary} ) ) except ColumnsNotAvailable: return None cond = sql.and_(*allconds) cols = [] for key in attribute_names: cols.extend(props[key].columns) return sql.select(cols, cond, use_labels=True) def cascade_iterator(self, type_, state, halt_on=None): """Iterate each element and its mapper in an object graph, for all relationships that meet the given cascade rule. :param type_: The name of the cascade rule (i.e. save-update, delete, etc.) :param state: The lead InstanceState. child items will be processed per the relationships defined for this object's mapper. the return value are object instances; this provides a strong reference so that they don't fall out of scope immediately. """ visited_states = set() prp, mpp = object(), object() visitables = deque([(deque(self._props.values()), prp, state, state.dict)]) while visitables: iterator, item_type, parent_state, parent_dict = visitables[-1] if not iterator: visitables.pop() continue if item_type is prp: prop = iterator.popleft() if type_ not in prop.cascade: continue queue = deque(prop.cascade_iterator(type_, parent_state, parent_dict, visited_states, halt_on)) if queue: visitables.append((queue, mpp, None, None)) elif item_type is mpp: instance, instance_mapper, corresponding_state, \ corresponding_dict = iterator.popleft() yield instance, instance_mapper, \ corresponding_state, corresponding_dict visitables.append((deque(instance_mapper._props.values()), prp, corresponding_state, corresponding_dict)) @_memoized_configured_property def _compiled_cache(self): return util.LRUCache(self._compiled_cache_size) @_memoized_configured_property def _sorted_tables(self): table_to_mapper = {} for mapper in self.base_mapper.self_and_descendants: for t in mapper.tables: table_to_mapper.setdefault(t, mapper) extra_dependencies = [] for table, mapper in table_to_mapper.items(): super_ = mapper.inherits if super_: extra_dependencies.extend([ (super_table, table) for super_table in super_.tables ]) def skip(fk): # attempt to skip dependencies that are not # significant to the inheritance chain # for two tables that are related by inheritance. # while that dependency may be important, it's techinically # not what we mean to sort on here. parent = table_to_mapper.get(fk.parent.table) dep = table_to_mapper.get(fk.column.table) if parent is not None and \ dep is not None and \ dep is not parent and \ dep.inherit_condition is not None: cols = set(sql_util.find_columns(dep.inherit_condition)) if parent.inherit_condition is not None: cols = cols.union(sql_util.find_columns( parent.inherit_condition)) return fk.parent not in cols and fk.column not in cols else: return fk.parent not in cols return False sorted_ = sql_util.sort_tables(table_to_mapper.iterkeys(), skip_fn=skip, extra_dependencies=extra_dependencies) ret = util.OrderedDict() for t in sorted_: ret[t] = table_to_mapper[t] return ret def _memo(self, key, callable_): if key in self._memoized_values: return self._memoized_values[key] else: self._memoized_values[key] = value = callable_() return value @util.memoized_property def _table_to_equated(self): """memoized map of tables to collections of columns to be synchronized upwards to the base mapper.""" result = util.defaultdict(list) for table in self._sorted_tables: cols = set(table.c) for m in self.iterate_to_root(): if m._inherits_equated_pairs and \ cols.intersection( [l for l, r in m._inherits_equated_pairs]): result[table].append((m, m._inherits_equated_pairs)) return result inspection._self_inspects(Mapper) log.class_logger(Mapper) def configure_mappers(): """Initialize the inter-mapper relationships of all mappers that have been constructed thus far. This function can be called any number of times, but in most cases is handled internally. """ global _new_mappers if not _new_mappers: return _call_configured = None _CONFIGURE_MUTEX.acquire() try: global _already_compiling if _already_compiling: return _already_compiling = True try: # double-check inside mutex if not _new_mappers: return # initialize properties on all mappers # note that _mapper_registry is unordered, which # may randomly conceal/reveal issues related to # the order of mapper compilation for mapper in list(_mapper_registry): if getattr(mapper, '_configure_failed', False): e = sa_exc.InvalidRequestError( "One or more mappers failed to initialize - " "can't proceed with initialization of other " "mappers. Original exception was: %s" % mapper._configure_failed) e._configure_failed = mapper._configure_failed raise e if not mapper.configured: try: mapper._post_configure_properties() mapper._expire_memoizations() mapper.dispatch.mapper_configured( mapper, mapper.class_) _call_configured = mapper except: exc = sys.exc_info()[1] if not hasattr(exc, '_configure_failed'): mapper._configure_failed = exc raise _new_mappers = False finally: _already_compiling = False finally: _CONFIGURE_MUTEX.release() if _call_configured is not None: _call_configured.dispatch.after_configured() def reconstructor(fn): """Decorate a method as the 'reconstructor' hook. Designates a method as the "reconstructor", an ``__init__``-like method that will be called by the ORM after the instance has been loaded from the database or otherwise reconstituted. The reconstructor will be invoked with no arguments. Scalar (non-collection) database-mapped attributes of the instance will be available for use within the function. Eagerly-loaded collections are generally not yet available and will usually only contain the first element. ORM state changes made to objects at this stage will not be recorded for the next flush() operation, so the activity within a reconstructor should be conservative. """ fn.__sa_reconstructor__ = True return fn def validates(*names, **kw): """Decorate a method as a 'validator' for one or more named properties. Designates a method as a validator, a method which receives the name of the attribute as well as a value to be assigned, or in the case of a collection, the value to be added to the collection. The function can then raise validation exceptions to halt the process from continuing (where Python's built-in ``ValueError`` and ``AssertionError`` exceptions are reasonable choices), or can modify or replace the value before proceeding. The function should otherwise return the given value. Note that a validator for a collection **cannot** issue a load of that collection within the validation routine - this usage raises an assertion to avoid recursion overflows. This is a reentrant condition which is not supported. :param \*names: list of attribute names to be validated. :param include_removes: if True, "remove" events will be sent as well - the validation function must accept an additional argument "is_remove" which will be a boolean. .. versionadded:: 0.7.7 """ include_removes = kw.pop('include_removes', False) def wrap(fn): fn.__sa_validators__ = names fn.__sa_include_removes__ = include_removes return fn return wrap def _event_on_load(state, ctx): instrumenting_mapper = state.manager.info[_INSTRUMENTOR] if instrumenting_mapper._reconstructor: instrumenting_mapper._reconstructor(state.obj()) def _event_on_first_init(manager, cls): """Initial mapper compilation trigger. instrumentation calls this one when InstanceState is first generated, and is needed for legacy mutable attributes to work. """ instrumenting_mapper = manager.info.get(_INSTRUMENTOR) if instrumenting_mapper: if _new_mappers: configure_mappers() def _event_on_init(state, args, kwargs): """Run init_instance hooks. This also includes mapper compilation, normally not needed here but helps with some piecemeal configuration scenarios (such as in the ORM tutorial). """ instrumenting_mapper = state.manager.info.get(_INSTRUMENTOR) if instrumenting_mapper: if _new_mappers: configure_mappers() if instrumenting_mapper._set_polymorphic_identity: instrumenting_mapper._set_polymorphic_identity(state) def _event_on_resurrect(state): # re-populate the primary key elements # of the dict based on the mapping. instrumenting_mapper = state.manager.info.get(_INSTRUMENTOR) if instrumenting_mapper: for col, val in zip(instrumenting_mapper.primary_key, state.key[1]): instrumenting_mapper._set_state_attr_by_column( state, state.dict, col, val) class _ColumnMapping(dict): """Error reporting helper for mapper._columntoproperty.""" def __init__(self, mapper): self.mapper = mapper def __missing__(self, column): prop = self.mapper._props.get(column) if prop: raise orm_exc.UnmappedColumnError( "Column '%s.%s' is not available, due to " "conflicting property '%s':%r" % ( column.table.name, column.name, column.key, prop)) raise orm_exc.UnmappedColumnError( "No column %s is configured on mapper %s..." % (column, self.mapper)) SQLAlchemy-0.8.4/lib/sqlalchemy/orm/persistence.py0000644000076500000240000011462112251150015022631 0ustar classicstaff00000000000000# orm/persistence.py # Copyright (C) 2005-2013 the SQLAlchemy authors and contributors # # This module is part of SQLAlchemy and is released under # the MIT License: http://www.opensource.org/licenses/mit-license.php """private module containing functions used to emit INSERT, UPDATE and DELETE statements on behalf of a :class:`.Mapper` and its descending mappers. The functions here are called only by the unit of work functions in unitofwork.py. """ import operator from itertools import groupby from .. import sql, util, exc as sa_exc, schema from . import attributes, sync, exc as orm_exc, evaluator from .util import _state_mapper, state_str, _attr_as_key from ..sql import expression from . import loading def save_obj(base_mapper, states, uowtransaction, single=False): """Issue ``INSERT`` and/or ``UPDATE`` statements for a list of objects. This is called within the context of a UOWTransaction during a flush operation, given a list of states to be flushed. The base mapper in an inheritance hierarchy handles the inserts/ updates for all descendant mappers. """ # if batch=false, call _save_obj separately for each object if not single and not base_mapper.batch: for state in _sort_states(states): save_obj(base_mapper, [state], uowtransaction, single=True) return states_to_insert, states_to_update = _organize_states_for_save( base_mapper, states, uowtransaction) cached_connections = _cached_connection_dict(base_mapper) for table, mapper in base_mapper._sorted_tables.iteritems(): insert = _collect_insert_commands(base_mapper, uowtransaction, table, states_to_insert) update = _collect_update_commands(base_mapper, uowtransaction, table, states_to_update) if update: _emit_update_statements(base_mapper, uowtransaction, cached_connections, mapper, table, update) if insert: _emit_insert_statements(base_mapper, uowtransaction, cached_connections, table, insert) _finalize_insert_update_commands(base_mapper, uowtransaction, states_to_insert, states_to_update) def post_update(base_mapper, states, uowtransaction, post_update_cols): """Issue UPDATE statements on behalf of a relationship() which specifies post_update. """ cached_connections = _cached_connection_dict(base_mapper) states_to_update = _organize_states_for_post_update( base_mapper, states, uowtransaction) for table, mapper in base_mapper._sorted_tables.iteritems(): update = _collect_post_update_commands(base_mapper, uowtransaction, table, states_to_update, post_update_cols) if update: _emit_post_update_statements(base_mapper, uowtransaction, cached_connections, mapper, table, update) def delete_obj(base_mapper, states, uowtransaction): """Issue ``DELETE`` statements for a list of objects. This is called within the context of a UOWTransaction during a flush operation. """ cached_connections = _cached_connection_dict(base_mapper) states_to_delete = _organize_states_for_delete( base_mapper, states, uowtransaction) table_to_mapper = base_mapper._sorted_tables for table in reversed(table_to_mapper.keys()): delete = _collect_delete_commands(base_mapper, uowtransaction, table, states_to_delete) mapper = table_to_mapper[table] _emit_delete_statements(base_mapper, uowtransaction, cached_connections, mapper, table, delete) for state, state_dict, mapper, has_identity, connection \ in states_to_delete: mapper.dispatch.after_delete(mapper, connection, state) def _organize_states_for_save(base_mapper, states, uowtransaction): """Make an initial pass across a set of states for INSERT or UPDATE. This includes splitting out into distinct lists for each, calling before_insert/before_update, obtaining key information for each state including its dictionary, mapper, the connection to use for the execution per state, and the identity flag. """ states_to_insert = [] states_to_update = [] for state, dict_, mapper, connection in _connections_for_states( base_mapper, uowtransaction, states): has_identity = bool(state.key) instance_key = state.key or mapper._identity_key_from_state(state) row_switch = None # call before_XXX extensions if not has_identity: mapper.dispatch.before_insert(mapper, connection, state) else: mapper.dispatch.before_update(mapper, connection, state) if mapper._validate_polymorphic_identity: mapper._validate_polymorphic_identity(mapper, state, dict_) # detect if we have a "pending" instance (i.e. has # no instance_key attached to it), and another instance # with the same identity key already exists as persistent. # convert to an UPDATE if so. if not has_identity and \ instance_key in uowtransaction.session.identity_map: instance = \ uowtransaction.session.identity_map[instance_key] existing = attributes.instance_state(instance) if not uowtransaction.is_deleted(existing): raise orm_exc.FlushError( "New instance %s with identity key %s conflicts " "with persistent instance %s" % (state_str(state), instance_key, state_str(existing))) base_mapper._log_debug( "detected row switch for identity %s. " "will update %s, remove %s from " "transaction", instance_key, state_str(state), state_str(existing)) # remove the "delete" flag from the existing element uowtransaction.remove_state_actions(existing) row_switch = existing if not has_identity and not row_switch: states_to_insert.append( (state, dict_, mapper, connection, has_identity, instance_key, row_switch) ) else: states_to_update.append( (state, dict_, mapper, connection, has_identity, instance_key, row_switch) ) return states_to_insert, states_to_update def _organize_states_for_post_update(base_mapper, states, uowtransaction): """Make an initial pass across a set of states for UPDATE corresponding to post_update. This includes obtaining key information for each state including its dictionary, mapper, the connection to use for the execution per state. """ return list(_connections_for_states(base_mapper, uowtransaction, states)) def _organize_states_for_delete(base_mapper, states, uowtransaction): """Make an initial pass across a set of states for DELETE. This includes calling out before_delete and obtaining key information for each state including its dictionary, mapper, the connection to use for the execution per state. """ states_to_delete = [] for state, dict_, mapper, connection in _connections_for_states( base_mapper, uowtransaction, states): mapper.dispatch.before_delete(mapper, connection, state) states_to_delete.append((state, dict_, mapper, bool(state.key), connection)) return states_to_delete def _collect_insert_commands(base_mapper, uowtransaction, table, states_to_insert): """Identify sets of values to use in INSERT statements for a list of states. """ insert = [] for state, state_dict, mapper, connection, has_identity, \ instance_key, row_switch in states_to_insert: if table not in mapper._pks_by_table: continue pks = mapper._pks_by_table[table] params = {} value_params = {} has_all_pks = True for col in mapper._cols_by_table[table]: if col is mapper.version_id_col: params[col.key] = mapper.version_id_generator(None) else: # pull straight from the dict for # pending objects prop = mapper._columntoproperty[col] value = state_dict.get(prop.key, None) if value is None: if col in pks: has_all_pks = False elif col.default is None and \ col.server_default is None: params[col.key] = value elif isinstance(value, sql.ClauseElement): value_params[col] = value else: params[col.key] = value insert.append((state, state_dict, params, mapper, connection, value_params, has_all_pks)) return insert def _collect_update_commands(base_mapper, uowtransaction, table, states_to_update): """Identify sets of values to use in UPDATE statements for a list of states. This function works intricately with the history system to determine exactly what values should be updated as well as how the row should be matched within an UPDATE statement. Includes some tricky scenarios where the primary key of an object might have been changed. """ update = [] for state, state_dict, mapper, connection, has_identity, \ instance_key, row_switch in states_to_update: if table not in mapper._pks_by_table: continue pks = mapper._pks_by_table[table] params = {} value_params = {} hasdata = hasnull = False for col in mapper._cols_by_table[table]: if col is mapper.version_id_col: params[col._label] = \ mapper._get_committed_state_attr_by_column( row_switch or state, row_switch and row_switch.dict or state_dict, col) prop = mapper._columntoproperty[col] history = attributes.get_state_history( state, prop.key, attributes.PASSIVE_NO_INITIALIZE ) if history.added: params[col.key] = history.added[0] hasdata = True else: params[col.key] = mapper.version_id_generator( params[col._label]) # HACK: check for history, in case the # history is only # in a different table than the one # where the version_id_col is. for prop in mapper._columntoproperty.itervalues(): history = attributes.get_state_history( state, prop.key, attributes.PASSIVE_NO_INITIALIZE) if history.added: hasdata = True else: prop = mapper._columntoproperty[col] history = attributes.get_state_history( state, prop.key, attributes.PASSIVE_NO_INITIALIZE) if history.added: if isinstance(history.added[0], sql.ClauseElement): value_params[col] = history.added[0] else: value = history.added[0] params[col.key] = value if col in pks: if history.deleted and \ not row_switch: # if passive_updates and sync detected # this was a pk->pk sync, use the new # value to locate the row, since the # DB would already have set this if ("pk_cascaded", state, col) in \ uowtransaction.attributes: value = history.added[0] params[col._label] = value else: # use the old value to # locate the row value = history.deleted[0] params[col._label] = value hasdata = True else: # row switch logic can reach us here # remove the pk from the update params # so the update doesn't # attempt to include the pk in the # update statement del params[col.key] value = history.added[0] params[col._label] = value if value is None: hasnull = True else: hasdata = True elif col in pks: value = state.manager[prop.key].impl.get( state, state_dict) if value is None: hasnull = True params[col._label] = value if hasdata: if hasnull: raise orm_exc.FlushError( "Can't update table " "using NULL for primary " "key value") update.append((state, state_dict, params, mapper, connection, value_params)) return update def _collect_post_update_commands(base_mapper, uowtransaction, table, states_to_update, post_update_cols): """Identify sets of values to use in UPDATE statements for a list of states within a post_update operation. """ update = [] for state, state_dict, mapper, connection in states_to_update: if table not in mapper._pks_by_table: continue pks = mapper._pks_by_table[table] params = {} hasdata = False for col in mapper._cols_by_table[table]: if col in pks: params[col._label] = \ mapper._get_state_attr_by_column( state, state_dict, col) elif col in post_update_cols: prop = mapper._columntoproperty[col] history = attributes.get_state_history( state, prop.key, attributes.PASSIVE_NO_INITIALIZE) if history.added: value = history.added[0] params[col.key] = value hasdata = True if hasdata: update.append((state, state_dict, params, mapper, connection)) return update def _collect_delete_commands(base_mapper, uowtransaction, table, states_to_delete): """Identify values to use in DELETE statements for a list of states to be deleted.""" delete = util.defaultdict(list) for state, state_dict, mapper, has_identity, connection \ in states_to_delete: if not has_identity or table not in mapper._pks_by_table: continue params = {} delete[connection].append(params) for col in mapper._pks_by_table[table]: params[col.key] = \ value = \ mapper._get_state_attr_by_column( state, state_dict, col) if value is None: raise orm_exc.FlushError( "Can't delete from table " "using NULL for primary " "key value") if mapper.version_id_col is not None and \ table.c.contains_column(mapper.version_id_col): params[mapper.version_id_col.key] = \ mapper._get_committed_state_attr_by_column( state, state_dict, mapper.version_id_col) return delete def _emit_update_statements(base_mapper, uowtransaction, cached_connections, mapper, table, update): """Emit UPDATE statements corresponding to value lists collected by _collect_update_commands().""" needs_version_id = mapper.version_id_col is not None and \ table.c.contains_column(mapper.version_id_col) def update_stmt(): clause = sql.and_() for col in mapper._pks_by_table[table]: clause.clauses.append(col == sql.bindparam(col._label, type_=col.type)) if needs_version_id: clause.clauses.append(mapper.version_id_col ==\ sql.bindparam(mapper.version_id_col._label, type_=mapper.version_id_col.type)) return table.update(clause) statement = base_mapper._memo(('update', table), update_stmt) rows = 0 for state, state_dict, params, mapper, \ connection, value_params in update: if value_params: c = connection.execute( statement.values(value_params), params) else: c = cached_connections[connection].\ execute(statement, params) _postfetch( mapper, uowtransaction, table, state, state_dict, c.context.prefetch_cols, c.context.postfetch_cols, c.context.compiled_parameters[0], value_params) rows += c.rowcount if connection.dialect.supports_sane_rowcount: if rows != len(update): raise orm_exc.StaleDataError( "UPDATE statement on table '%s' expected to " "update %d row(s); %d were matched." % (table.description, len(update), rows)) elif needs_version_id: util.warn("Dialect %s does not support updated rowcount " "- versioning cannot be verified." % c.dialect.dialect_description, stacklevel=12) def _emit_insert_statements(base_mapper, uowtransaction, cached_connections, table, insert): """Emit INSERT statements corresponding to value lists collected by _collect_insert_commands().""" statement = base_mapper._memo(('insert', table), table.insert) for (connection, pkeys, hasvalue, has_all_pks), \ records in groupby(insert, lambda rec: (rec[4], rec[2].keys(), bool(rec[5]), rec[6]) ): if has_all_pks and not hasvalue: records = list(records) multiparams = [rec[2] for rec in records] c = cached_connections[connection].\ execute(statement, multiparams) for (state, state_dict, params, mapper, conn, value_params, has_all_pks), \ last_inserted_params in \ zip(records, c.context.compiled_parameters): _postfetch( mapper, uowtransaction, table, state, state_dict, c.context.prefetch_cols, c.context.postfetch_cols, last_inserted_params, value_params) else: for state, state_dict, params, mapper, \ connection, value_params, \ has_all_pks in records: if value_params: result = connection.execute( statement.values(value_params), params) else: result = cached_connections[connection].\ execute(statement, params) primary_key = result.context.inserted_primary_key if primary_key is not None: # set primary key attributes for pk, col in zip(primary_key, mapper._pks_by_table[table]): prop = mapper._columntoproperty[col] if state_dict.get(prop.key) is None: # TODO: would rather say: #state_dict[prop.key] = pk mapper._set_state_attr_by_column( state, state_dict, col, pk) _postfetch( mapper, uowtransaction, table, state, state_dict, result.context.prefetch_cols, result.context.postfetch_cols, result.context.compiled_parameters[0], value_params) def _emit_post_update_statements(base_mapper, uowtransaction, cached_connections, mapper, table, update): """Emit UPDATE statements corresponding to value lists collected by _collect_post_update_commands().""" def update_stmt(): clause = sql.and_() for col in mapper._pks_by_table[table]: clause.clauses.append(col == sql.bindparam(col._label, type_=col.type)) return table.update(clause) statement = base_mapper._memo(('post_update', table), update_stmt) # execute each UPDATE in the order according to the original # list of states to guarantee row access order, but # also group them into common (connection, cols) sets # to support executemany(). for key, grouper in groupby( update, lambda rec: (rec[4], rec[2].keys()) ): connection = key[0] multiparams = [params for state, state_dict, params, mapper, conn in grouper] cached_connections[connection].\ execute(statement, multiparams) def _emit_delete_statements(base_mapper, uowtransaction, cached_connections, mapper, table, delete): """Emit DELETE statements corresponding to value lists collected by _collect_delete_commands().""" need_version_id = mapper.version_id_col is not None and \ table.c.contains_column(mapper.version_id_col) def delete_stmt(): clause = sql.and_() for col in mapper._pks_by_table[table]: clause.clauses.append( col == sql.bindparam(col.key, type_=col.type)) if need_version_id: clause.clauses.append( mapper.version_id_col == sql.bindparam( mapper.version_id_col.key, type_=mapper.version_id_col.type ) ) return table.delete(clause) for connection, del_objects in delete.iteritems(): statement = base_mapper._memo(('delete', table), delete_stmt) connection = cached_connections[connection] if need_version_id: # TODO: need test coverage for this [ticket:1761] if connection.dialect.supports_sane_rowcount: rows = 0 # execute deletes individually so that versioned # rows can be verified for params in del_objects: c = connection.execute(statement, params) rows += c.rowcount if rows != len(del_objects): raise orm_exc.StaleDataError( "DELETE statement on table '%s' expected to " "delete %d row(s); %d were matched." % (table.description, len(del_objects), c.rowcount) ) else: util.warn( "Dialect %s does not support deleted rowcount " "- versioning cannot be verified." % connection.dialect.dialect_description, stacklevel=12) connection.execute(statement, del_objects) else: connection.execute(statement, del_objects) def _finalize_insert_update_commands(base_mapper, uowtransaction, states_to_insert, states_to_update): """finalize state on states that have been inserted or updated, including calling after_insert/after_update events. """ for state, state_dict, mapper, connection, has_identity, \ instance_key, row_switch in states_to_insert + \ states_to_update: if mapper._readonly_props: readonly = state.unmodified_intersection( [p.key for p in mapper._readonly_props if p.expire_on_flush or p.key not in state.dict] ) if readonly: state._expire_attributes(state.dict, readonly) # if eager_defaults option is enabled, # refresh whatever has been expired. if base_mapper.eager_defaults and state.unloaded: state.key = base_mapper._identity_key_from_state(state) loading.load_on_ident( uowtransaction.session.query(base_mapper), state.key, refresh_state=state, only_load_props=state.unloaded) # call after_XXX extensions if not has_identity: mapper.dispatch.after_insert(mapper, connection, state) else: mapper.dispatch.after_update(mapper, connection, state) def _postfetch(mapper, uowtransaction, table, state, dict_, prefetch_cols, postfetch_cols, params, value_params): """Expire attributes in need of newly persisted database state, after an INSERT or UPDATE statement has proceeded for that state.""" if mapper.version_id_col is not None: prefetch_cols = list(prefetch_cols) + [mapper.version_id_col] for c in prefetch_cols: if c.key in params and c in mapper._columntoproperty: mapper._set_state_attr_by_column(state, dict_, c, params[c.key]) if postfetch_cols: state._expire_attributes(state.dict, [mapper._columntoproperty[c].key for c in postfetch_cols if c in mapper._columntoproperty] ) # synchronize newly inserted ids from one table to the next # TODO: this still goes a little too often. would be nice to # have definitive list of "columns that changed" here for m, equated_pairs in mapper._table_to_equated[table]: sync.populate(state, m, state, m, equated_pairs, uowtransaction, mapper.passive_updates) def _connections_for_states(base_mapper, uowtransaction, states): """Return an iterator of (state, state.dict, mapper, connection). The states are sorted according to _sort_states, then paired with the connection they should be using for the given unit of work transaction. """ # if session has a connection callable, # organize individual states with the connection # to use for update if uowtransaction.session.connection_callable: connection_callable = \ uowtransaction.session.connection_callable else: connection = None connection_callable = None for state in _sort_states(states): if connection_callable: connection = connection_callable(base_mapper, state.obj()) elif not connection: connection = uowtransaction.transaction.connection( base_mapper) mapper = _state_mapper(state) yield state, state.dict, mapper, connection def _cached_connection_dict(base_mapper): # dictionary of connection->connection_with_cache_options. return util.PopulateDict( lambda conn: conn.execution_options( compiled_cache=base_mapper._compiled_cache )) def _sort_states(states): pending = set(states) persistent = set(s for s in pending if s.key is not None) pending.difference_update(persistent) return sorted(pending, key=operator.attrgetter("insert_order")) + \ sorted(persistent, key=lambda q: q.key[1]) class BulkUD(object): """Handle bulk update and deletes via a :class:`.Query`.""" def __init__(self, query): self.query = query.enable_eagerloads(False) @classmethod def _factory(cls, lookup, synchronize_session, *arg): try: klass = lookup[synchronize_session] except KeyError: raise sa_exc.ArgumentError( "Valid strategies for session synchronization " "are %s" % (", ".join(sorted(repr(x) for x in lookup.keys())))) else: return klass(*arg) def exec_(self): self._do_pre() self._do_pre_synchronize() self._do_exec() self._do_post_synchronize() self._do_post() def _do_pre(self): query = self.query self.context = context = query._compile_context() if len(context.statement.froms) != 1 or \ not isinstance(context.statement.froms[0], schema.Table): self.primary_table = query._only_entity_zero( "This operation requires only one Table or " "entity be specified as the target." ).mapper.local_table else: self.primary_table = context.statement.froms[0] session = query.session if query._autoflush: session._autoflush() def _do_pre_synchronize(self): pass def _do_post_synchronize(self): pass class BulkEvaluate(BulkUD): """BulkUD which does the 'evaluate' method of session state resolution.""" def _additional_evaluators(self, evaluator_compiler): pass def _do_pre_synchronize(self): query = self.query try: evaluator_compiler = evaluator.EvaluatorCompiler() if query.whereclause is not None: eval_condition = evaluator_compiler.process( query.whereclause) else: def eval_condition(obj): return True self._additional_evaluators(evaluator_compiler) except evaluator.UnevaluatableError: raise sa_exc.InvalidRequestError( "Could not evaluate current criteria in Python. " "Specify 'fetch' or False for the " "synchronize_session parameter.") target_cls = query._mapper_zero().class_ #TODO: detect when the where clause is a trivial primary key match self.matched_objects = [ obj for (cls, pk), obj in query.session.identity_map.iteritems() if issubclass(cls, target_cls) and eval_condition(obj)] class BulkFetch(BulkUD): """BulkUD which does the 'fetch' method of session state resolution.""" def _do_pre_synchronize(self): query = self.query session = query.session select_stmt = self.context.statement.with_only_columns( self.primary_table.primary_key) self.matched_rows = session.execute( select_stmt, params=query._params).fetchall() class BulkUpdate(BulkUD): """BulkUD which handles UPDATEs.""" def __init__(self, query, values): super(BulkUpdate, self).__init__(query) self.query._no_select_modifiers("update") self.values = values @classmethod def factory(cls, query, synchronize_session, values): return BulkUD._factory({ "evaluate": BulkUpdateEvaluate, "fetch": BulkUpdateFetch, False: BulkUpdate }, synchronize_session, query, values) def _do_exec(self): update_stmt = sql.update(self.primary_table, self.context.whereclause, self.values) self.result = self.query.session.execute( update_stmt, params=self.query._params) self.rowcount = self.result.rowcount def _do_post(self): session = self.query.session session.dispatch.after_bulk_update(session, self.query, self.context, self.result) class BulkDelete(BulkUD): """BulkUD which handles DELETEs.""" def __init__(self, query): super(BulkDelete, self).__init__(query) self.query._no_select_modifiers("delete") @classmethod def factory(cls, query, synchronize_session): return BulkUD._factory({ "evaluate": BulkDeleteEvaluate, "fetch": BulkDeleteFetch, False: BulkDelete }, synchronize_session, query) def _do_exec(self): delete_stmt = sql.delete(self.primary_table, self.context.whereclause) self.result = self.query.session.execute(delete_stmt, params=self.query._params) self.rowcount = self.result.rowcount def _do_post(self): session = self.query.session session.dispatch.after_bulk_delete(session, self.query, self.context, self.result) class BulkUpdateEvaluate(BulkEvaluate, BulkUpdate): """BulkUD which handles UPDATEs using the "evaluate" method of session resolution.""" def _additional_evaluators(self, evaluator_compiler): self.value_evaluators = {} for key, value in self.values.iteritems(): key = _attr_as_key(key) self.value_evaluators[key] = evaluator_compiler.process( expression._literal_as_binds(value)) def _do_post_synchronize(self): session = self.query.session states = set() evaluated_keys = self.value_evaluators.keys() for obj in self.matched_objects: state, dict_ = attributes.instance_state(obj),\ attributes.instance_dict(obj) # only evaluate unmodified attributes to_evaluate = state.unmodified.intersection( evaluated_keys) for key in to_evaluate: dict_[key] = self.value_evaluators[key](obj) state._commit(dict_, list(to_evaluate)) # expire attributes with pending changes # (there was no autoflush, so they are overwritten) state._expire_attributes(dict_, set(evaluated_keys). difference(to_evaluate)) states.add(state) session._register_altered(states) class BulkDeleteEvaluate(BulkEvaluate, BulkDelete): """BulkUD which handles DELETEs using the "evaluate" method of session resolution.""" def _do_post_synchronize(self): self.query.session._remove_newly_deleted( [attributes.instance_state(obj) for obj in self.matched_objects]) class BulkUpdateFetch(BulkFetch, BulkUpdate): """BulkUD which handles UPDATEs using the "fetch" method of session resolution.""" def _do_post_synchronize(self): session = self.query.session target_mapper = self.query._mapper_zero() states = set([ attributes.instance_state(session.identity_map[identity_key]) for identity_key in [ target_mapper.identity_key_from_primary_key( list(primary_key)) for primary_key in self.matched_rows ] if identity_key in session.identity_map ]) attrib = [_attr_as_key(k) for k in self.values] for state in states: session._expire_state(state, attrib) session._register_altered(states) class BulkDeleteFetch(BulkFetch, BulkDelete): """BulkUD which handles DELETEs using the "fetch" method of session resolution.""" def _do_post_synchronize(self): session = self.query.session target_mapper = self.query._mapper_zero() for primary_key in self.matched_rows: # TODO: inline this and call remove_newly_deleted # once identity_key = target_mapper.identity_key_from_primary_key( list(primary_key)) if identity_key in session.identity_map: session._remove_newly_deleted( [attributes.instance_state( session.identity_map[identity_key] )] ) SQLAlchemy-0.8.4/lib/sqlalchemy/orm/properties.py0000644000076500000240000014357712251150015022515 0ustar classicstaff00000000000000# orm/properties.py # Copyright (C) 2005-2013 the SQLAlchemy authors and contributors # # This module is part of SQLAlchemy and is released under # the MIT License: http://www.opensource.org/licenses/mit-license.php """MapperProperty implementations. This is a private module which defines the behavior of invidual ORM- mapped attributes. """ from .. import sql, util, log, exc as sa_exc, inspect from ..sql import operators, expression from . import ( attributes, mapper, strategies, configure_mappers, relationships, dependency ) from .util import CascadeOptions, \ _orm_annotate, _orm_deannotate, _orm_full_deannotate from .interfaces import MANYTOMANY, MANYTOONE, ONETOMANY,\ PropComparator, StrategizedProperty mapperlib = util.importlater("sqlalchemy.orm", "mapperlib") NoneType = type(None) from descriptor_props import CompositeProperty, SynonymProperty, \ ComparableProperty, ConcreteInheritedProperty __all__ = ['ColumnProperty', 'CompositeProperty', 'SynonymProperty', 'ComparableProperty', 'RelationshipProperty', 'RelationProperty'] class ColumnProperty(StrategizedProperty): """Describes an object attribute that corresponds to a table column. Public constructor is the :func:`.orm.column_property` function. """ def __init__(self, *columns, **kwargs): """Construct a ColumnProperty. Note the public constructor is the :func:`.orm.column_property` function. :param \*columns: The list of `columns` describes a single object property. If there are multiple tables joined together for the mapper, this list represents the equivalent column as it appears across each table. :param group: :param deferred: :param comparator_factory: :param descriptor: :param expire_on_flush: :param extension: :param info: Optional data dictionary which will be populated into the :attr:`.info` attribute of this object. """ self._orig_columns = [expression._labeled(c) for c in columns] self.columns = [expression._labeled(_orm_full_deannotate(c)) for c in columns] self.group = kwargs.pop('group', None) self.deferred = kwargs.pop('deferred', False) self.instrument = kwargs.pop('_instrument', True) self.comparator_factory = kwargs.pop('comparator_factory', self.__class__.Comparator) self.descriptor = kwargs.pop('descriptor', None) self.extension = kwargs.pop('extension', None) self.active_history = kwargs.pop('active_history', False) self.expire_on_flush = kwargs.pop('expire_on_flush', True) if 'info' in kwargs: self.info = kwargs.pop('info') if 'doc' in kwargs: self.doc = kwargs.pop('doc') else: for col in reversed(self.columns): doc = getattr(col, 'doc', None) if doc is not None: self.doc = doc break else: self.doc = None if kwargs: raise TypeError( "%s received unexpected keyword argument(s): %s" % ( self.__class__.__name__, ', '.join(sorted(kwargs.keys())))) util.set_creation_order(self) if not self.instrument: self.strategy_class = strategies.UninstrumentedColumnLoader elif self.deferred: self.strategy_class = strategies.DeferredColumnLoader else: self.strategy_class = strategies.ColumnLoader @property def expression(self): """Return the primary column or expression for this ColumnProperty. """ return self.columns[0] def instrument_class(self, mapper): if not self.instrument: return attributes.register_descriptor( mapper.class_, self.key, comparator=self.comparator_factory(self, mapper), parententity=mapper, doc=self.doc ) def do_init(self): super(ColumnProperty, self).do_init() if len(self.columns) > 1 and \ set(self.parent.primary_key).issuperset(self.columns): util.warn( ("On mapper %s, primary key column '%s' is being combined " "with distinct primary key column '%s' in attribute '%s'. " "Use explicit properties to give each column its own mapped " "attribute name.") % (self.parent, self.columns[1], self.columns[0], self.key)) def copy(self): return ColumnProperty( deferred=self.deferred, group=self.group, active_history=self.active_history, *self.columns) def _getcommitted(self, state, dict_, column, passive=attributes.PASSIVE_OFF): return state.get_impl(self.key).\ get_committed_value(state, dict_, passive=passive) def merge(self, session, source_state, source_dict, dest_state, dest_dict, load, _recursive): if not self.instrument: return elif self.key in source_dict: value = source_dict[self.key] if not load: dest_dict[self.key] = value else: impl = dest_state.get_impl(self.key) impl.set(dest_state, dest_dict, value, None) elif dest_state.has_identity and self.key not in dest_dict: dest_state._expire_attributes(dest_dict, [self.key]) class Comparator(PropComparator): """Produce boolean, comparison, and other operators for :class:`.ColumnProperty` attributes. See the documentation for :class:`.PropComparator` for a brief overview. See also: :class:`.PropComparator` :class:`.ColumnOperators` :ref:`types_operators` :attr:`.TypeEngine.comparator_factory` """ @util.memoized_instancemethod def __clause_element__(self): if self.adapter: return self.adapter(self.prop.columns[0]) else: return self.prop.columns[0]._annotate({ "parententity": self._parentmapper, "parentmapper": self._parentmapper}) @util.memoized_property def info(self): ce = self.__clause_element__() try: return ce.info except AttributeError: return self.prop.info def __getattr__(self, key): """proxy attribute access down to the mapped column. this allows user-defined comparison methods to be accessed. """ return getattr(self.__clause_element__(), key) def operate(self, op, *other, **kwargs): return op(self.__clause_element__(), *other, **kwargs) def reverse_operate(self, op, other, **kwargs): col = self.__clause_element__() return op(col._bind_param(op, other), col, **kwargs) # TODO: legacy..do we need this ? (0.5) ColumnComparator = Comparator def __str__(self): return str(self.parent.class_.__name__) + "." + self.key log.class_logger(ColumnProperty) class RelationshipProperty(StrategizedProperty): """Describes an object property that holds a single item or list of items that correspond to a related database table. Public constructor is the :func:`.orm.relationship` function. See also: :ref:`relationship_config_toplevel` """ strategy_wildcard_key = 'relationship:*' _dependency_processor = None def __init__(self, argument, secondary=None, primaryjoin=None, secondaryjoin=None, foreign_keys=None, uselist=None, order_by=False, backref=None, back_populates=None, post_update=False, cascade=False, extension=None, viewonly=False, lazy=True, collection_class=None, passive_deletes=False, passive_updates=True, remote_side=None, enable_typechecks=True, join_depth=None, comparator_factory=None, single_parent=False, innerjoin=False, distinct_target_key=False, doc=None, active_history=False, cascade_backrefs=True, load_on_pending=False, strategy_class=None, _local_remote_pairs=None, query_class=None, info=None): self.uselist = uselist self.argument = argument self.secondary = secondary self.primaryjoin = primaryjoin self.secondaryjoin = secondaryjoin self.post_update = post_update self.direction = None self.viewonly = viewonly self.lazy = lazy self.single_parent = single_parent self._user_defined_foreign_keys = foreign_keys self.collection_class = collection_class self.passive_deletes = passive_deletes self.cascade_backrefs = cascade_backrefs self.passive_updates = passive_updates self.remote_side = remote_side self.enable_typechecks = enable_typechecks self.query_class = query_class self.innerjoin = innerjoin self.distinct_target_key = distinct_target_key self.doc = doc self.active_history = active_history self.join_depth = join_depth self.local_remote_pairs = _local_remote_pairs self.extension = extension self.load_on_pending = load_on_pending self.comparator_factory = comparator_factory or \ RelationshipProperty.Comparator self.comparator = self.comparator_factory(self, None) util.set_creation_order(self) if info is not None: self.info = info if strategy_class: self.strategy_class = strategy_class elif self.lazy == 'dynamic': from sqlalchemy.orm import dynamic self.strategy_class = dynamic.DynaLoader else: self.strategy_class = strategies.factory(self.lazy) self._reverse_property = set() self.cascade = cascade if cascade is not False \ else "save-update, merge" self.order_by = order_by self.back_populates = back_populates if self.back_populates: if backref: raise sa_exc.ArgumentError( "backref and back_populates keyword arguments " "are mutually exclusive") self.backref = None else: self.backref = backref def instrument_class(self, mapper): attributes.register_descriptor( mapper.class_, self.key, comparator=self.comparator_factory(self, mapper), parententity=mapper, doc=self.doc, ) class Comparator(PropComparator): """Produce boolean, comparison, and other operators for :class:`.RelationshipProperty` attributes. See the documentation for :class:`.PropComparator` for a brief overview of ORM level operator definition. See also: :class:`.PropComparator` :class:`.ColumnProperty.Comparator` :class:`.ColumnOperators` :ref:`types_operators` :attr:`.TypeEngine.comparator_factory` """ _of_type = None def __init__(self, prop, parentmapper, of_type=None, adapter=None): """Construction of :class:`.RelationshipProperty.Comparator` is internal to the ORM's attribute mechanics. """ self.prop = prop self._parentmapper = parentmapper self.adapter = adapter if of_type: self._of_type = of_type def adapted(self, adapter): """Return a copy of this PropComparator which will use the given adaption function on the local side of generated expressions. """ return self.__class__(self.property, self._parentmapper, getattr(self, '_of_type', None), adapter) @util.memoized_property def mapper(self): """The target :class:`.Mapper` referred to by this :class:`.RelationshipProperty.Comparator. This is the "target" or "remote" side of the :func:`.relationship`. """ return self.property.mapper @util.memoized_property def _parententity(self): return self.property.parent def _source_selectable(self): elem = self.property.parent._with_polymorphic_selectable if self.adapter: return self.adapter(elem) else: return elem def __clause_element__(self): adapt_from = self._source_selectable() if self._of_type: of_type = inspect(self._of_type).mapper else: of_type = None pj, sj, source, dest, \ secondary, target_adapter = self.property._create_joins( source_selectable=adapt_from, source_polymorphic=True, of_type=of_type) if sj is not None: return pj & sj else: return pj def of_type(self, cls): """Produce a construct that represents a particular 'subtype' of attribute for the parent class. Currently this is usable in conjunction with :meth:`.Query.join` and :meth:`.Query.outerjoin`. """ return RelationshipProperty.Comparator( self.property, self._parentmapper, cls, adapter=self.adapter) def in_(self, other): """Produce an IN clause - this is not implemented for :func:`~.orm.relationship`-based attributes at this time. """ raise NotImplementedError('in_() not yet supported for ' 'relationships. For a simple many-to-one, use ' 'in_() against the set of foreign key values.') __hash__ = None def __eq__(self, other): """Implement the ``==`` operator. In a many-to-one context, such as:: MyClass.some_prop == this will typically produce a clause such as:: mytable.related_id == Where ```` is the primary key of the given object. The ``==`` operator provides partial functionality for non- many-to-one comparisons: * Comparisons against collections are not supported. Use :meth:`~.RelationshipProperty.Comparator.contains`. * Compared to a scalar one-to-many, will produce a clause that compares the target columns in the parent to the given target. * Compared to a scalar many-to-many, an alias of the association table will be rendered as well, forming a natural join that is part of the main body of the query. This will not work for queries that go beyond simple AND conjunctions of comparisons, such as those which use OR. Use explicit joins, outerjoins, or :meth:`~.RelationshipProperty.Comparator.has` for more comprehensive non-many-to-one scalar membership tests. * Comparisons against ``None`` given in a one-to-many or many-to-many context produce a NOT EXISTS clause. """ if isinstance(other, (NoneType, expression.Null)): if self.property.direction in [ONETOMANY, MANYTOMANY]: return ~self._criterion_exists() else: return _orm_annotate(self.property._optimized_compare( None, adapt_source=self.adapter)) elif self.property.uselist: raise sa_exc.InvalidRequestError("Can't compare a colle" "ction to an object or collection; use " "contains() to test for membership.") else: return _orm_annotate(self.property._optimized_compare(other, adapt_source=self.adapter)) def _criterion_exists(self, criterion=None, **kwargs): if getattr(self, '_of_type', None): info = inspect(self._of_type) target_mapper, to_selectable, is_aliased_class = \ info.mapper, info.selectable, info.is_aliased_class if self.property._is_self_referential and not is_aliased_class: to_selectable = to_selectable.alias() single_crit = target_mapper._single_table_criterion if single_crit is not None: if criterion is not None: criterion = single_crit & criterion else: criterion = single_crit else: is_aliased_class = False to_selectable = None if self.adapter: source_selectable = self._source_selectable() else: source_selectable = None pj, sj, source, dest, secondary, target_adapter = \ self.property._create_joins(dest_polymorphic=True, dest_selectable=to_selectable, source_selectable=source_selectable) for k in kwargs: crit = getattr(self.property.mapper.class_, k) == kwargs[k] if criterion is None: criterion = crit else: criterion = criterion & crit # annotate the *local* side of the join condition, in the case # of pj + sj this is the full primaryjoin, in the case of just # pj its the local side of the primaryjoin. if sj is not None: j = _orm_annotate(pj) & sj else: j = _orm_annotate(pj, exclude=self.property.remote_side) if criterion is not None and target_adapter and not is_aliased_class: # limit this adapter to annotated only? criterion = target_adapter.traverse(criterion) # only have the "joined left side" of what we # return be subject to Query adaption. The right # side of it is used for an exists() subquery and # should not correlate or otherwise reach out # to anything in the enclosing query. if criterion is not None: criterion = criterion._annotate( {'no_replacement_traverse': True}) crit = j & criterion ex = sql.exists([1], crit, from_obj=dest).correlate_except(dest) if secondary is not None: ex = ex.correlate_except(secondary) return ex def any(self, criterion=None, **kwargs): """Produce an expression that tests a collection against particular criterion, using EXISTS. An expression like:: session.query(MyClass).filter( MyClass.somereference.any(SomeRelated.x==2) ) Will produce a query like:: SELECT * FROM my_table WHERE EXISTS (SELECT 1 FROM related WHERE related.my_id=my_table.id AND related.x=2) Because :meth:`~.RelationshipProperty.Comparator.any` uses a correlated subquery, its performance is not nearly as good when compared against large target tables as that of using a join. :meth:`~.RelationshipProperty.Comparator.any` is particularly useful for testing for empty collections:: session.query(MyClass).filter( ~MyClass.somereference.any() ) will produce:: SELECT * FROM my_table WHERE NOT EXISTS (SELECT 1 FROM related WHERE related.my_id=my_table.id) :meth:`~.RelationshipProperty.Comparator.any` is only valid for collections, i.e. a :func:`.relationship` that has ``uselist=True``. For scalar references, use :meth:`~.RelationshipProperty.Comparator.has`. """ if not self.property.uselist: raise sa_exc.InvalidRequestError( "'any()' not implemented for scalar " "attributes. Use has()." ) return self._criterion_exists(criterion, **kwargs) def has(self, criterion=None, **kwargs): """Produce an expression that tests a scalar reference against particular criterion, using EXISTS. An expression like:: session.query(MyClass).filter( MyClass.somereference.has(SomeRelated.x==2) ) Will produce a query like:: SELECT * FROM my_table WHERE EXISTS (SELECT 1 FROM related WHERE related.id==my_table.related_id AND related.x=2) Because :meth:`~.RelationshipProperty.Comparator.has` uses a correlated subquery, its performance is not nearly as good when compared against large target tables as that of using a join. :meth:`~.RelationshipProperty.Comparator.has` is only valid for scalar references, i.e. a :func:`.relationship` that has ``uselist=False``. For collection references, use :meth:`~.RelationshipProperty.Comparator.any`. """ if self.property.uselist: raise sa_exc.InvalidRequestError( "'has()' not implemented for collections. " "Use any().") return self._criterion_exists(criterion, **kwargs) def contains(self, other, **kwargs): """Return a simple expression that tests a collection for containment of a particular item. :meth:`~.RelationshipProperty.Comparator.contains` is only valid for a collection, i.e. a :func:`~.orm.relationship` that implements one-to-many or many-to-many with ``uselist=True``. When used in a simple one-to-many context, an expression like:: MyClass.contains(other) Produces a clause like:: mytable.id == Where ```` is the value of the foreign key attribute on ``other`` which refers to the primary key of its parent object. From this it follows that :meth:`~.RelationshipProperty.Comparator.contains` is very useful when used with simple one-to-many operations. For many-to-many operations, the behavior of :meth:`~.RelationshipProperty.Comparator.contains` has more caveats. The association table will be rendered in the statement, producing an "implicit" join, that is, includes multiple tables in the FROM clause which are equated in the WHERE clause:: query(MyClass).filter(MyClass.contains(other)) Produces a query like:: SELECT * FROM my_table, my_association_table AS my_association_table_1 WHERE my_table.id = my_association_table_1.parent_id AND my_association_table_1.child_id = Where ```` would be the primary key of ``other``. From the above, it is clear that :meth:`~.RelationshipProperty.Comparator.contains` will **not** work with many-to-many collections when used in queries that move beyond simple AND conjunctions, such as multiple :meth:`~.RelationshipProperty.Comparator.contains` expressions joined by OR. In such cases subqueries or explicit "outer joins" will need to be used instead. See :meth:`~.RelationshipProperty.Comparator.any` for a less-performant alternative using EXISTS, or refer to :meth:`.Query.outerjoin` as well as :ref:`ormtutorial_joins` for more details on constructing outer joins. """ if not self.property.uselist: raise sa_exc.InvalidRequestError( "'contains' not implemented for scalar " "attributes. Use ==") clause = self.property._optimized_compare(other, adapt_source=self.adapter) if self.property.secondaryjoin is not None: clause.negation_clause = \ self.__negated_contains_or_equals(other) return clause def __negated_contains_or_equals(self, other): if self.property.direction == MANYTOONE: state = attributes.instance_state(other) def state_bindparam(x, state, col): o = state.obj() # strong ref return sql.bindparam(x, unique=True, callable_=lambda: \ self.property.mapper._get_committed_attr_by_column(o, col)) def adapt(col): if self.adapter: return self.adapter(col) else: return col if self.property._use_get: return sql.and_(*[ sql.or_( adapt(x) != state_bindparam(adapt(x), state, y), adapt(x) == None) for (x, y) in self.property.local_remote_pairs]) criterion = sql.and_(*[x == y for (x, y) in zip( self.property.mapper.primary_key, self.property.\ mapper.\ primary_key_from_instance(other)) ]) return ~self._criterion_exists(criterion) def __ne__(self, other): """Implement the ``!=`` operator. In a many-to-one context, such as:: MyClass.some_prop != This will typically produce a clause such as:: mytable.related_id != Where ```` is the primary key of the given object. The ``!=`` operator provides partial functionality for non- many-to-one comparisons: * Comparisons against collections are not supported. Use :meth:`~.RelationshipProperty.Comparator.contains` in conjunction with :func:`~.expression.not_`. * Compared to a scalar one-to-many, will produce a clause that compares the target columns in the parent to the given target. * Compared to a scalar many-to-many, an alias of the association table will be rendered as well, forming a natural join that is part of the main body of the query. This will not work for queries that go beyond simple AND conjunctions of comparisons, such as those which use OR. Use explicit joins, outerjoins, or :meth:`~.RelationshipProperty.Comparator.has` in conjunction with :func:`~.expression.not_` for more comprehensive non-many-to-one scalar membership tests. * Comparisons against ``None`` given in a one-to-many or many-to-many context produce an EXISTS clause. """ if isinstance(other, (NoneType, expression.Null)): if self.property.direction == MANYTOONE: return sql.or_(*[x != None for x in self.property._calculated_foreign_keys]) else: return self._criterion_exists() elif self.property.uselist: raise sa_exc.InvalidRequestError("Can't compare a collection" " to an object or collection; use " "contains() to test for membership.") else: return self.__negated_contains_or_equals(other) @util.memoized_property def property(self): if mapperlib.module._new_mappers: configure_mappers() return self.prop def compare(self, op, value, value_is_parent=False, alias_secondary=True): if op == operators.eq: if value is None: if self.uselist: return ~sql.exists([1], self.primaryjoin) else: return self._optimized_compare(None, value_is_parent=value_is_parent, alias_secondary=alias_secondary) else: return self._optimized_compare(value, value_is_parent=value_is_parent, alias_secondary=alias_secondary) else: return op(self.comparator, value) def _optimized_compare(self, value, value_is_parent=False, adapt_source=None, alias_secondary=True): if value is not None: value = attributes.instance_state(value) return self._get_strategy(strategies.LazyLoader).lazy_clause(value, reverse_direction=not value_is_parent, alias_secondary=alias_secondary, adapt_source=adapt_source) def __str__(self): return str(self.parent.class_.__name__) + "." + self.key def merge(self, session, source_state, source_dict, dest_state, dest_dict, load, _recursive): if load: for r in self._reverse_property: if (source_state, r) in _recursive: return if not "merge" in self._cascade: return if self.key not in source_dict: return if self.uselist: instances = source_state.get_impl(self.key).\ get(source_state, source_dict) if hasattr(instances, '_sa_adapter'): # convert collections to adapters to get a true iterator instances = instances._sa_adapter if load: # for a full merge, pre-load the destination collection, # so that individual _merge of each item pulls from identity # map for those already present. # also assumes CollectionAttrbiuteImpl behavior of loading # "old" list in any case dest_state.get_impl(self.key).get(dest_state, dest_dict) dest_list = [] for current in instances: current_state = attributes.instance_state(current) current_dict = attributes.instance_dict(current) _recursive[(current_state, self)] = True obj = session._merge(current_state, current_dict, load=load, _recursive=_recursive) if obj is not None: dest_list.append(obj) if not load: coll = attributes.init_state_collection(dest_state, dest_dict, self.key) for c in dest_list: coll.append_without_event(c) else: dest_state.get_impl(self.key)._set_iterable(dest_state, dest_dict, dest_list) else: current = source_dict[self.key] if current is not None: current_state = attributes.instance_state(current) current_dict = attributes.instance_dict(current) _recursive[(current_state, self)] = True obj = session._merge(current_state, current_dict, load=load, _recursive=_recursive) else: obj = None if not load: dest_dict[self.key] = obj else: dest_state.get_impl(self.key).set(dest_state, dest_dict, obj, None) def _value_as_iterable(self, state, dict_, key, passive=attributes.PASSIVE_OFF): """Return a list of tuples (state, obj) for the given key. returns an empty list if the value is None/empty/PASSIVE_NO_RESULT """ impl = state.manager[key].impl x = impl.get(state, dict_, passive=passive) if x is attributes.PASSIVE_NO_RESULT or x is None: return [] elif hasattr(impl, 'get_collection'): return [ (attributes.instance_state(o), o) for o in impl.get_collection(state, dict_, x, passive=passive) ] else: return [(attributes.instance_state(x), x)] def cascade_iterator(self, type_, state, dict_, visited_states, halt_on=None): #assert type_ in self._cascade # only actively lazy load on the 'delete' cascade if type_ != 'delete' or self.passive_deletes: passive = attributes.PASSIVE_NO_INITIALIZE else: passive = attributes.PASSIVE_OFF if type_ == 'save-update': tuples = state.manager[self.key].impl.\ get_all_pending(state, dict_) else: tuples = self._value_as_iterable(state, dict_, self.key, passive=passive) skip_pending = type_ == 'refresh-expire' and 'delete-orphan' \ not in self._cascade for instance_state, c in tuples: if instance_state in visited_states: continue if c is None: # would like to emit a warning here, but # would not be consistent with collection.append(None) # current behavior of silently skipping. # see [ticket:2229] continue instance_dict = attributes.instance_dict(c) if halt_on and halt_on(instance_state): continue if skip_pending and not instance_state.key: continue instance_mapper = instance_state.manager.mapper if not instance_mapper.isa(self.mapper.class_manager.mapper): raise AssertionError("Attribute '%s' on class '%s' " "doesn't handle objects " "of type '%s'" % ( self.key, self.parent.class_, c.__class__ )) visited_states.add(instance_state) yield c, instance_mapper, instance_state, instance_dict def _add_reverse_property(self, key): other = self.mapper.get_property(key, _configure_mappers=False) self._reverse_property.add(other) other._reverse_property.add(self) if not other.mapper.common_parent(self.parent): raise sa_exc.ArgumentError('reverse_property %r on ' 'relationship %s references relationship %s, which ' 'does not reference mapper %s' % (key, self, other, self.parent)) if self.direction in (ONETOMANY, MANYTOONE) and self.direction \ == other.direction: raise sa_exc.ArgumentError('%s and back-reference %s are ' 'both of the same direction %r. Did you mean to ' 'set remote_side on the many-to-one side ?' % (other, self, self.direction)) @util.memoized_property def mapper(self): """Return the targeted :class:`.Mapper` for this :class:`.RelationshipProperty`. This is a lazy-initializing static attribute. """ if isinstance(self.argument, type): mapper_ = mapper.class_mapper(self.argument, configure=False) elif isinstance(self.argument, mapper.Mapper): mapper_ = self.argument elif util.callable(self.argument): # accept a callable to suit various deferred- # configurational schemes mapper_ = mapper.class_mapper(self.argument(), configure=False) else: raise sa_exc.ArgumentError("relationship '%s' expects " "a class or a mapper argument (received: %s)" % (self.key, type(self.argument))) assert isinstance(mapper_, mapper.Mapper), mapper_ return mapper_ @util.memoized_property @util.deprecated("0.7", "Use .target") def table(self): """Return the selectable linked to this :class:`.RelationshipProperty` object's target :class:`.Mapper`.""" return self.target def do_init(self): self._check_conflicts() self._process_dependent_arguments() self._setup_join_conditions() self._check_cascade_settings(self._cascade) self._post_init() self._generate_backref() super(RelationshipProperty, self).do_init() def _process_dependent_arguments(self): """Convert incoming configuration arguments to their proper form. Callables are resolved, ORM annotations removed. """ # accept callables for other attributes which may require # deferred initialization. This technique is used # by declarative "string configs" and some recipes. for attr in ( 'order_by', 'primaryjoin', 'secondaryjoin', 'secondary', '_user_defined_foreign_keys', 'remote_side', ): attr_value = getattr(self, attr) if util.callable(attr_value): setattr(self, attr, attr_value()) # remove "annotations" which are present if mapped class # descriptors are used to create the join expression. for attr in 'primaryjoin', 'secondaryjoin': val = getattr(self, attr) if val is not None: setattr(self, attr, _orm_deannotate( expression._only_column_elements(val, attr)) ) # ensure expressions in self.order_by, foreign_keys, # remote_side are all columns, not strings. if self.order_by is not False and self.order_by is not None: self.order_by = [ expression._only_column_elements(x, "order_by") for x in util.to_list(self.order_by)] self._user_defined_foreign_keys = \ util.column_set( expression._only_column_elements(x, "foreign_keys") for x in util.to_column_set( self._user_defined_foreign_keys )) self.remote_side = \ util.column_set( expression._only_column_elements(x, "remote_side") for x in util.to_column_set(self.remote_side)) self.target = self.mapper.mapped_table def _setup_join_conditions(self): self._join_condition = jc = relationships.JoinCondition( parent_selectable=self.parent.mapped_table, child_selectable=self.mapper.mapped_table, parent_local_selectable=self.parent.local_table, child_local_selectable=self.mapper.local_table, primaryjoin=self.primaryjoin, secondary=self.secondary, secondaryjoin=self.secondaryjoin, parent_equivalents=self.parent._equivalent_columns, child_equivalents=self.mapper._equivalent_columns, consider_as_foreign_keys=self._user_defined_foreign_keys, local_remote_pairs=self.local_remote_pairs, remote_side=self.remote_side, self_referential=self._is_self_referential, prop=self, support_sync=not self.viewonly, can_be_synced_fn=self._columns_are_mapped ) self.primaryjoin = jc.deannotated_primaryjoin self.secondaryjoin = jc.deannotated_secondaryjoin self.direction = jc.direction self.local_remote_pairs = jc.local_remote_pairs self.remote_side = jc.remote_columns self.local_columns = jc.local_columns self.synchronize_pairs = jc.synchronize_pairs self._calculated_foreign_keys = jc.foreign_key_columns self.secondary_synchronize_pairs = jc.secondary_synchronize_pairs def _check_conflicts(self): """Test that this relationship is legal, warn about inheritance conflicts.""" if not self.is_primary() \ and not mapper.class_mapper( self.parent.class_, configure=False).has_property(self.key): raise sa_exc.ArgumentError("Attempting to assign a new " "relationship '%s' to a non-primary mapper on " "class '%s'. New relationships can only be added " "to the primary mapper, i.e. the very first mapper " "created for class '%s' " % (self.key, self.parent.class_.__name__, self.parent.class_.__name__)) # check for conflicting relationship() on superclass if not self.parent.concrete: for inheriting in self.parent.iterate_to_root(): if inheriting is not self.parent \ and inheriting.has_property(self.key): util.warn("Warning: relationship '%s' on mapper " "'%s' supersedes the same relationship " "on inherited mapper '%s'; this can " "cause dependency issues during flush" % (self.key, self.parent, inheriting)) def _get_cascade(self): """Return the current cascade setting for this :class:`.RelationshipProperty`. """ return self._cascade def _set_cascade(self, cascade): cascade = CascadeOptions(cascade) if 'mapper' in self.__dict__: self._check_cascade_settings(cascade) self._cascade = cascade if self._dependency_processor: self._dependency_processor.cascade = cascade cascade = property(_get_cascade, _set_cascade) def _check_cascade_settings(self, cascade): if cascade.delete_orphan and not self.single_parent \ and (self.direction is MANYTOMANY or self.direction is MANYTOONE): raise sa_exc.ArgumentError( 'On %s, delete-orphan cascade is not supported ' 'on a many-to-many or many-to-one relationship ' 'when single_parent is not set. Set ' 'single_parent=True on the relationship().' % self) if self.direction is MANYTOONE and self.passive_deletes: util.warn("On %s, 'passive_deletes' is normally configured " "on one-to-many, one-to-one, many-to-many " "relationships only." % self) if self.passive_deletes == 'all' and \ ("delete" in cascade or "delete-orphan" in cascade): raise sa_exc.ArgumentError( "On %s, can't set passive_deletes='all' in conjunction " "with 'delete' or 'delete-orphan' cascade" % self) if cascade.delete_orphan: self.mapper.primary_mapper()._delete_orphans.append( (self.key, self.parent.class_) ) def _columns_are_mapped(self, *cols): """Return True if all columns in the given collection are mapped by the tables referenced by this :class:`.Relationship`. """ for c in cols: if self.secondary is not None \ and self.secondary.c.contains_column(c): continue if not self.parent.mapped_table.c.contains_column(c) and \ not self.target.c.contains_column(c): return False return True def _generate_backref(self): """Interpret the 'backref' instruction to create a :func:`.relationship` complementary to this one.""" if not self.is_primary(): return if self.backref is not None and not self.back_populates: if isinstance(self.backref, basestring): backref_key, kwargs = self.backref, {} else: backref_key, kwargs = self.backref mapper = self.mapper.primary_mapper() check = set(mapper.iterate_to_root()).\ union(mapper.self_and_descendants) for m in check: if m.has_property(backref_key): raise sa_exc.ArgumentError("Error creating backref " "'%s' on relationship '%s': property of that " "name exists on mapper '%s'" % (backref_key, self, m)) # determine primaryjoin/secondaryjoin for the # backref. Use the one we had, so that # a custom join doesn't have to be specified in # both directions. if self.secondary is not None: # for many to many, just switch primaryjoin/ # secondaryjoin. use the annotated # pj/sj on the _join_condition. pj = kwargs.pop('primaryjoin', self._join_condition.secondaryjoin_minus_local) sj = kwargs.pop('secondaryjoin', self._join_condition.primaryjoin_minus_local) else: pj = kwargs.pop('primaryjoin', self._join_condition.primaryjoin_reverse_remote) sj = kwargs.pop('secondaryjoin', None) if sj: raise sa_exc.InvalidRequestError( "Can't assign 'secondaryjoin' on a backref " "against a non-secondary relationship." ) foreign_keys = kwargs.pop('foreign_keys', self._user_defined_foreign_keys) parent = self.parent.primary_mapper() kwargs.setdefault('viewonly', self.viewonly) kwargs.setdefault('post_update', self.post_update) kwargs.setdefault('passive_updates', self.passive_updates) self.back_populates = backref_key relationship = RelationshipProperty( parent, self.secondary, pj, sj, foreign_keys=foreign_keys, back_populates=self.key, **kwargs) mapper._configure_property(backref_key, relationship) if self.back_populates: self._add_reverse_property(self.back_populates) def _post_init(self): if self.uselist is None: self.uselist = self.direction is not MANYTOONE if not self.viewonly: self._dependency_processor = \ dependency.DependencyProcessor.from_relationship(self) @util.memoized_property def _use_get(self): """memoize the 'use_get' attribute of this RelationshipLoader's lazyloader.""" strategy = self._get_strategy(strategies.LazyLoader) return strategy.use_get @util.memoized_property def _is_self_referential(self): return self.mapper.common_parent(self.parent) def _create_joins(self, source_polymorphic=False, source_selectable=None, dest_polymorphic=False, dest_selectable=None, of_type=None): if source_selectable is None: if source_polymorphic and self.parent.with_polymorphic: source_selectable = self.parent._with_polymorphic_selectable aliased = False if dest_selectable is None: if dest_polymorphic and self.mapper.with_polymorphic: dest_selectable = self.mapper._with_polymorphic_selectable aliased = True else: dest_selectable = self.mapper.mapped_table if self._is_self_referential and source_selectable is None: dest_selectable = dest_selectable.alias() aliased = True else: aliased = True dest_mapper = of_type or self.mapper single_crit = dest_mapper._single_table_criterion aliased = aliased or (source_selectable is not None) primaryjoin, secondaryjoin, secondary, target_adapter, dest_selectable = \ self._join_condition.join_targets( source_selectable, dest_selectable, aliased, single_crit ) if source_selectable is None: source_selectable = self.parent.local_table if dest_selectable is None: dest_selectable = self.mapper.local_table return (primaryjoin, secondaryjoin, source_selectable, dest_selectable, secondary, target_adapter) PropertyLoader = RelationProperty = RelationshipProperty log.class_logger(RelationshipProperty) SQLAlchemy-0.8.4/lib/sqlalchemy/orm/query.py0000644000076500000240000035537012251150015021462 0ustar classicstaff00000000000000# orm/query.py # Copyright (C) 2005-2013 the SQLAlchemy authors and contributors # # This module is part of SQLAlchemy and is released under # the MIT License: http://www.opensource.org/licenses/mit-license.php """The Query class and support. Defines the :class:`.Query` class, the central construct used by the ORM to construct database queries. The :class:`.Query` class should not be confused with the :class:`.Select` class, which defines database SELECT operations at the SQL (non-ORM) level. ``Query`` differs from ``Select`` in that it returns ORM-mapped objects and interacts with an ORM session, whereas the ``Select`` construct interacts directly with the database to return iterable result sets. """ from itertools import chain from . import ( attributes, interfaces, object_mapper, persistence, exc as orm_exc, loading ) from .util import ( AliasedClass, ORMAdapter, _entity_descriptor, PathRegistry, _is_aliased_class, _is_mapped_class, _orm_columns, join as orm_join, with_parent, aliased ) from .. import sql, util, log, exc as sa_exc, inspect, inspection, \ types as sqltypes from ..sql.expression import _interpret_as_from from ..sql import ( util as sql_util, expression, visitors ) __all__ = ['Query', 'QueryContext', 'aliased'] def _generative(*assertions): """Mark a method as generative.""" @util.decorator def generate(fn, *args, **kw): self = args[0]._clone() for assertion in assertions: assertion(self, fn.func_name) fn(self, *args[1:], **kw) return self return generate _path_registry = PathRegistry.root class Query(object): """ORM-level SQL construction object. :class:`.Query` is the source of all SELECT statements generated by the ORM, both those formulated by end-user query operations as well as by high level internal operations such as related collection loading. It features a generative interface whereby successive calls return a new :class:`.Query` object, a copy of the former with additional criteria and options associated with it. :class:`.Query` objects are normally initially generated using the :meth:`~.Session.query` method of :class:`.Session`. For a full walkthrough of :class:`.Query` usage, see the :ref:`ormtutorial_toplevel`. """ _enable_eagerloads = True _enable_assertions = True _with_labels = False _criterion = None _yield_per = None _lockmode = None _order_by = False _group_by = False _having = None _distinct = False _prefixes = None _offset = None _limit = None _statement = None _correlate = frozenset() _populate_existing = False _invoke_all_eagers = True _version_check = False _autoflush = True _only_load_props = None _refresh_state = None _from_obj = () _join_entities = () _select_from_entity = None _mapper_adapter_map = {} _filter_aliases = None _from_obj_alias = None _joinpath = _joinpoint = util.immutabledict() _execution_options = util.immutabledict() _params = util.immutabledict() _attributes = util.immutabledict() _with_options = () _with_hints = () _enable_single_crit = True _current_path = _path_registry def __init__(self, entities, session=None): self.session = session self._polymorphic_adapters = {} self._set_entities(entities) def _set_entities(self, entities, entity_wrapper=None): if entity_wrapper is None: entity_wrapper = _QueryEntity self._entities = [] for ent in util.to_list(entities): entity_wrapper(self, ent) self._set_entity_selectables(self._entities) def _set_entity_selectables(self, entities): self._mapper_adapter_map = d = self._mapper_adapter_map.copy() for ent in entities: for entity in ent.entities: if entity not in d: ext_info = inspect(entity) if not ext_info.is_aliased_class and \ ext_info.mapper.with_polymorphic: if ext_info.mapper.mapped_table not in \ self._polymorphic_adapters: self._mapper_loads_polymorphically_with( ext_info.mapper, sql_util.ColumnAdapter( ext_info.selectable, ext_info.mapper._equivalent_columns ) ) aliased_adapter = None elif ext_info.is_aliased_class: aliased_adapter = sql_util.ColumnAdapter( ext_info.selectable, ext_info.mapper._equivalent_columns ) else: aliased_adapter = None d[entity] = ( ext_info, aliased_adapter ) ent.setup_entity(*d[entity]) def _mapper_loads_polymorphically_with(self, mapper, adapter): for m2 in mapper._with_polymorphic_mappers or [mapper]: self._polymorphic_adapters[m2] = adapter for m in m2.iterate_to_root(): self._polymorphic_adapters[m.local_table] = adapter def _set_select_from(self, obj, set_base_alias): fa = [] select_from_alias = None for from_obj in obj: info = inspect(from_obj) if hasattr(info, 'mapper') and \ (info.is_mapper or info.is_aliased_class): if set_base_alias: raise sa_exc.ArgumentError( "A selectable (FromClause) instance is " "expected when the base alias is being set.") self._select_from_entity = from_obj fa.append(info.selectable) elif not info.is_selectable: raise sa_exc.ArgumentError( "argument is not a mapped class, mapper, " "aliased(), or FromClause instance.") else: if isinstance(from_obj, expression.SelectBase): from_obj = from_obj.alias() select_from_alias = from_obj fa.append(from_obj) self._from_obj = tuple(fa) if len(self._from_obj) == 1 and \ isinstance(select_from_alias, expression.Alias): equivs = self.__all_equivs() self._from_obj_alias = sql_util.ColumnAdapter( self._from_obj[0], equivs) def _reset_polymorphic_adapter(self, mapper): for m2 in mapper._with_polymorphic_mappers: self._polymorphic_adapters.pop(m2, None) for m in m2.iterate_to_root(): self._polymorphic_adapters.pop(m.local_table, None) def _adapt_polymorphic_element(self, element): if "parententity" in element._annotations: search = element._annotations['parententity'] alias = self._polymorphic_adapters.get(search, None) if alias: return alias.adapt_clause(element) if isinstance(element, expression.FromClause): search = element elif hasattr(element, 'table'): search = element.table else: return None alias = self._polymorphic_adapters.get(search, None) if alias: return alias.adapt_clause(element) def _adapt_col_list(self, cols): return [ self._adapt_clause( expression._literal_as_text(o), True, True) for o in cols ] @_generative() def _adapt_all_clauses(self): self._orm_only_adapt = False def _adapt_clause(self, clause, as_filter, orm_only): """Adapt incoming clauses to transformations which have been applied within this query.""" adapters = [] # do we adapt all expression elements or only those # tagged as 'ORM' constructs ? orm_only = getattr(self, '_orm_only_adapt', orm_only) if as_filter and self._filter_aliases: for fa in self._filter_aliases._visitor_iterator: adapters.append( ( orm_only, fa.replace ) ) if self._from_obj_alias: # for the "from obj" alias, apply extra rule to the # 'ORM only' check, if this query were generated from a # subquery of itself, i.e. _from_selectable(), apply adaption # to all SQL constructs. adapters.append( ( getattr(self, '_orm_only_from_obj_alias', orm_only), self._from_obj_alias.replace ) ) if self._polymorphic_adapters: adapters.append( ( orm_only, self._adapt_polymorphic_element ) ) if not adapters: return clause def replace(elem): for _orm_only, adapter in adapters: # if 'orm only', look for ORM annotations # in the element before adapting. if not _orm_only or \ '_orm_adapt' in elem._annotations or \ "parententity" in elem._annotations: e = adapter(elem) if e is not None: return e return visitors.replacement_traverse( clause, {}, replace ) def _entity_zero(self): return self._entities[0] def _mapper_zero(self): return self._select_from_entity or \ self._entity_zero().entity_zero @property def _mapper_entities(self): for ent in self._entities: if isinstance(ent, _MapperEntity): yield ent def _joinpoint_zero(self): return self._joinpoint.get( '_joinpoint_entity', self._mapper_zero() ) def _mapper_zero_or_none(self): if not getattr(self._entities[0], 'primary_entity', False): return None return self._entities[0].mapper def _only_mapper_zero(self, rationale=None): if len(self._entities) > 1: raise sa_exc.InvalidRequestError( rationale or "This operation requires a Query " "against a single mapper." ) return self._mapper_zero() def _only_full_mapper_zero(self, methname): if len(self._entities) != 1: raise sa_exc.InvalidRequestError( "%s() can only be used against " "a single mapped class." % methname) entity = self._entity_zero() if not hasattr(entity, 'primary_entity'): raise sa_exc.InvalidRequestError( "%s() can only be used against " "a single mapped class." % methname) return entity.entity_zero def _only_entity_zero(self, rationale=None): if len(self._entities) > 1: raise sa_exc.InvalidRequestError( rationale or "This operation requires a Query " "against a single mapper." ) return self._entity_zero() def __all_equivs(self): equivs = {} for ent in self._mapper_entities: equivs.update(ent.mapper._equivalent_columns) return equivs def _get_condition(self): self._order_by = self._distinct = False return self._no_criterion_condition("get") def _no_criterion_condition(self, meth): if not self._enable_assertions: return if self._criterion is not None or \ self._statement is not None or self._from_obj or \ self._limit is not None or self._offset is not None or \ self._group_by or self._order_by or self._distinct: raise sa_exc.InvalidRequestError( "Query.%s() being called on a " "Query with existing criterion. " % meth) self._from_obj = () self._statement = self._criterion = None self._order_by = self._group_by = self._distinct = False def _no_clauseelement_condition(self, meth): if not self._enable_assertions: return if self._order_by: raise sa_exc.InvalidRequestError( "Query.%s() being called on a " "Query with existing criterion. " % meth) self._no_criterion_condition(meth) def _no_statement_condition(self, meth): if not self._enable_assertions: return if self._statement is not None: raise sa_exc.InvalidRequestError( ("Query.%s() being called on a Query with an existing full " "statement - can't apply criterion.") % meth) def _no_limit_offset(self, meth): if not self._enable_assertions: return if self._limit is not None or self._offset is not None: raise sa_exc.InvalidRequestError( "Query.%s() being called on a Query which already has LIMIT " "or OFFSET applied. To modify the row-limited results of a " " Query, call from_self() first. " "Otherwise, call %s() before limit() or offset() " "are applied." % (meth, meth) ) def _no_select_modifiers(self, meth): if not self._enable_assertions: return for attr, methname, notset in ( ('_limit', 'limit()', None), ('_offset', 'offset()', None), ('_order_by', 'order_by()', False), ('_group_by', 'group_by()', False), ('_distinct', 'distinct()', False), ): if getattr(self, attr) is not notset: raise sa_exc.InvalidRequestError( "Can't call Query.%s() when %s has been called" % (meth, methname) ) def _get_options(self, populate_existing=None, version_check=None, only_load_props=None, refresh_state=None): if populate_existing: self._populate_existing = populate_existing if version_check: self._version_check = version_check if refresh_state: self._refresh_state = refresh_state if only_load_props: self._only_load_props = set(only_load_props) return self def _clone(self): cls = self.__class__ q = cls.__new__(cls) q.__dict__ = self.__dict__.copy() return q @property def statement(self): """The full SELECT statement represented by this Query. The statement by default will not have disambiguating labels applied to the construct unless with_labels(True) is called first. """ stmt = self._compile_context(labels=self._with_labels).\ statement if self._params: stmt = stmt.params(self._params) # TODO: there's no tests covering effects of # the annotation not being there return stmt._annotate({'no_replacement_traverse': True}) def subquery(self, name=None, with_labels=False, reduce_columns=False): """return the full SELECT statement represented by this :class:`.Query`, embedded within an :class:`.Alias`. Eager JOIN generation within the query is disabled. :param name: string name to be assigned as the alias; this is passed through to :meth:`.FromClause.alias`. If ``None``, a name will be deterministically generated at compile time. :param with_labels: if True, :meth:`.with_labels` will be called on the :class:`.Query` first to apply table-qualified labels to all columns. :param reduce_columns: if True, :meth:`.Select.reduce_columns` will be called on the resulting :func:`.select` construct, to remove same-named columns where one also refers to the other via foreign key or WHERE clause equivalence. .. versionchanged:: 0.8 the ``with_labels`` and ``reduce_columns`` keyword arguments were added. """ q = self.enable_eagerloads(False) if with_labels: q = q.with_labels() q = q.statement if reduce_columns: q = q.reduce_columns() return q.alias(name=name) def cte(self, name=None, recursive=False): """Return the full SELECT statement represented by this :class:`.Query` represented as a common table expression (CTE). .. versionadded:: 0.7.6 Parameters and usage are the same as those of the :meth:`.SelectBase.cte` method; see that method for further details. Here is the `Postgresql WITH RECURSIVE example `_. Note that, in this example, the ``included_parts`` cte and the ``incl_alias`` alias of it are Core selectables, which means the columns are accessed via the ``.c.`` attribute. The ``parts_alias`` object is an :func:`.orm.aliased` instance of the ``Part`` entity, so column-mapped attributes are available directly:: from sqlalchemy.orm import aliased class Part(Base): __tablename__ = 'part' part = Column(String, primary_key=True) sub_part = Column(String, primary_key=True) quantity = Column(Integer) included_parts = session.query( Part.sub_part, Part.part, Part.quantity).\\ filter(Part.part=="our part").\\ cte(name="included_parts", recursive=True) incl_alias = aliased(included_parts, name="pr") parts_alias = aliased(Part, name="p") included_parts = included_parts.union_all( session.query( parts_alias.part, parts_alias.sub_part, parts_alias.quantity).\\ filter(parts_alias.part==incl_alias.c.sub_part) ) q = session.query( included_parts.c.sub_part, func.sum(included_parts.c.quantity). label('total_quantity') ).\\ group_by(included_parts.c.sub_part) See also: :meth:`.SelectBase.cte` """ return self.enable_eagerloads(False).\ statement.cte(name=name, recursive=recursive) def label(self, name): """Return the full SELECT statement represented by this :class:`.Query`, converted to a scalar subquery with a label of the given name. Analogous to :meth:`sqlalchemy.sql.expression.SelectBase.label`. .. versionadded:: 0.6.5 """ return self.enable_eagerloads(False).statement.label(name) def as_scalar(self): """Return the full SELECT statement represented by this :class:`.Query`, converted to a scalar subquery. Analogous to :meth:`sqlalchemy.sql.expression.SelectBase.as_scalar`. .. versionadded:: 0.6.5 """ return self.enable_eagerloads(False).statement.as_scalar() @property def selectable(self): """Return the :class:`.Select` object emitted by this :class:`.Query`. Used for :func:`.inspect` compatibility, this is equivalent to:: query.enable_eagerloads(False).with_labels().statement """ return self.__clause_element__() def __clause_element__(self): return self.enable_eagerloads(False).with_labels().statement @_generative() def enable_eagerloads(self, value): """Control whether or not eager joins and subqueries are rendered. When set to False, the returned Query will not render eager joins regardless of :func:`~sqlalchemy.orm.joinedload`, :func:`~sqlalchemy.orm.subqueryload` options or mapper-level ``lazy='joined'``/``lazy='subquery'`` configurations. This is used primarily when nesting the Query's statement into a subquery or other selectable. """ self._enable_eagerloads = value @_generative() def with_labels(self): """Apply column labels to the return value of Query.statement. Indicates that this Query's `statement` accessor should return a SELECT statement that applies labels to all columns in the form _; this is commonly used to disambiguate columns from multiple tables which have the same name. When the `Query` actually issues SQL to load rows, it always uses column labeling. """ self._with_labels = True @_generative() def enable_assertions(self, value): """Control whether assertions are generated. When set to False, the returned Query will not assert its state before certain operations, including that LIMIT/OFFSET has not been applied when filter() is called, no criterion exists when get() is called, and no "from_statement()" exists when filter()/order_by()/group_by() etc. is called. This more permissive mode is used by custom Query subclasses to specify criterion or other modifiers outside of the usual usage patterns. Care should be taken to ensure that the usage pattern is even possible. A statement applied by from_statement() will override any criterion set by filter() or order_by(), for example. """ self._enable_assertions = value @property def whereclause(self): """A readonly attribute which returns the current WHERE criterion for this Query. This returned value is a SQL expression construct, or ``None`` if no criterion has been established. """ return self._criterion @_generative() def _with_current_path(self, path): """indicate that this query applies to objects loaded within a certain path. Used by deferred loaders (see strategies.py) which transfer query options from an originating query to a newly generated query intended for the deferred load. """ self._current_path = path @_generative(_no_clauseelement_condition) def with_polymorphic(self, cls_or_mappers, selectable=None, polymorphic_on=None): """Load columns for inheriting classes. :meth:`.Query.with_polymorphic` applies transformations to the "main" mapped class represented by this :class:`.Query`. The "main" mapped class here means the :class:`.Query` object's first argument is a full class, i.e. ``session.query(SomeClass)``. These transformations allow additional tables to be present in the FROM clause so that columns for a joined-inheritance subclass are available in the query, both for the purposes of load-time efficiency as well as the ability to use these columns at query time. See the documentation section :ref:`with_polymorphic` for details on how this method is used. .. versionchanged:: 0.8 A new and more flexible function :func:`.orm.with_polymorphic` supersedes :meth:`.Query.with_polymorphic`, as it can apply the equivalent functionality to any set of columns or classes in the :class:`.Query`, not just the "zero mapper". See that function for a description of arguments. """ if not getattr(self._entities[0], 'primary_entity', False): raise sa_exc.InvalidRequestError( "No primary mapper set up for this Query.") entity = self._entities[0]._clone() self._entities = [entity] + self._entities[1:] entity.set_with_polymorphic(self, cls_or_mappers, selectable=selectable, polymorphic_on=polymorphic_on) @_generative() def yield_per(self, count): """Yield only ``count`` rows at a time. WARNING: use this method with caution; if the same instance is present in more than one batch of rows, end-user changes to attributes will be overwritten. In particular, it's usually impossible to use this setting with eagerly loaded collections (i.e. any lazy='joined' or 'subquery') since those collections will be cleared for a new load when encountered in a subsequent result batch. In the case of 'subquery' loading, the full result for all rows is fetched which generally defeats the purpose of :meth:`~sqlalchemy.orm.query.Query.yield_per`. Also note that while :meth:`~sqlalchemy.orm.query.Query.yield_per` will set the ``stream_results`` execution option to True, currently this is only understood by :mod:`~sqlalchemy.dialects.postgresql.psycopg2` dialect which will stream results using server side cursors instead of pre-buffer all rows for this query. Other DBAPIs pre-buffer all rows before making them available. """ self._yield_per = count self._execution_options = self._execution_options.union( {"stream_results": True}) def get(self, ident): """Return an instance based on the given primary key identifier, or ``None`` if not found. E.g.:: my_user = session.query(User).get(5) some_object = session.query(VersionedFoo).get((5, 10)) :meth:`~.Query.get` is special in that it provides direct access to the identity map of the owning :class:`.Session`. If the given primary key identifier is present in the local identity map, the object is returned directly from this collection and no SQL is emitted, unless the object has been marked fully expired. If not present, a SELECT is performed in order to locate the object. :meth:`~.Query.get` also will perform a check if the object is present in the identity map and marked as expired - a SELECT is emitted to refresh the object as well as to ensure that the row is still present. If not, :class:`~sqlalchemy.orm.exc.ObjectDeletedError` is raised. :meth:`~.Query.get` is only used to return a single mapped instance, not multiple instances or individual column constructs, and strictly on a single primary key value. The originating :class:`.Query` must be constructed in this way, i.e. against a single mapped entity, with no additional filtering criterion. Loading options via :meth:`~.Query.options` may be applied however, and will be used if the object is not yet locally present. A lazy-loading, many-to-one attribute configured by :func:`.relationship`, using a simple foreign-key-to-primary-key criterion, will also use an operation equivalent to :meth:`~.Query.get` in order to retrieve the target value from the local identity map before querying the database. See :doc:`/orm/loading` for further details on relationship loading. :param ident: A scalar or tuple value representing the primary key. For a composite primary key, the order of identifiers corresponds in most cases to that of the mapped :class:`.Table` object's primary key columns. For a :func:`.mapper` that was given the ``primary key`` argument during construction, the order of identifiers corresponds to the elements present in this collection. :return: The object instance, or ``None``. """ # convert composite types to individual args if hasattr(ident, '__composite_values__'): ident = ident.__composite_values__() ident = util.to_list(ident) mapper = self._only_full_mapper_zero("get") if len(ident) != len(mapper.primary_key): raise sa_exc.InvalidRequestError( "Incorrect number of values in identifier to formulate " "primary key for query.get(); primary key columns are %s" % ','.join("'%s'" % c for c in mapper.primary_key)) key = mapper.identity_key_from_primary_key(ident) if not self._populate_existing and \ not mapper.always_refresh and \ self._lockmode is None: instance = loading.get_from_identity( self.session, key, attributes.PASSIVE_OFF) if instance is not None: # reject calls for id in identity map but class # mismatch. if not issubclass(instance.__class__, mapper.class_): return None return instance return loading.load_on_ident(self, key) @_generative() def correlate(self, *args): """Return a :class:`.Query` construct which will correlate the given FROM clauses to that of an enclosing :class:`.Query` or :func:`~.expression.select`. The method here accepts mapped classes, :func:`.aliased` constructs, and :func:`.mapper` constructs as arguments, which are resolved into expression constructs, in addition to appropriate expression constructs. The correlation arguments are ultimately passed to :meth:`.Select.correlate` after coercion to expression constructs. The correlation arguments take effect in such cases as when :meth:`.Query.from_self` is used, or when a subquery as returned by :meth:`.Query.subquery` is embedded in another :func:`~.expression.select` construct. """ self._correlate = self._correlate.union( _interpret_as_from(s) if s is not None else None for s in args) @_generative() def autoflush(self, setting): """Return a Query with a specific 'autoflush' setting. Note that a Session with autoflush=False will not autoflush, even if this flag is set to True at the Query level. Therefore this flag is usually used only to disable autoflush for a specific Query. """ self._autoflush = setting @_generative() def populate_existing(self): """Return a :class:`.Query` that will expire and refresh all instances as they are loaded, or reused from the current :class:`.Session`. :meth:`.populate_existing` does not improve behavior when the ORM is used normally - the :class:`.Session` object's usual behavior of maintaining a transaction and expiring all attributes after rollback or commit handles object state automatically. This method is not intended for general use. """ self._populate_existing = True @_generative() def _with_invoke_all_eagers(self, value): """Set the 'invoke all eagers' flag which causes joined- and subquery loaders to traverse into already-loaded related objects and collections. Default is that of :attr:`.Query._invoke_all_eagers`. """ self._invoke_all_eagers = value def with_parent(self, instance, property=None): """Add filtering criterion that relates the given instance to a child object or collection, using its attribute state as well as an established :func:`.relationship()` configuration. The method uses the :func:`.with_parent` function to generate the clause, the result of which is passed to :meth:`.Query.filter`. Parameters are the same as :func:`.with_parent`, with the exception that the given property can be None, in which case a search is performed against this :class:`.Query` object's target mapper. """ if property is None: from sqlalchemy.orm import properties mapper = object_mapper(instance) for prop in mapper.iterate_properties: if isinstance(prop, properties.PropertyLoader) and \ prop.mapper is self._mapper_zero(): property = prop break else: raise sa_exc.InvalidRequestError( "Could not locate a property which relates instances " "of class '%s' to instances of class '%s'" % ( self._mapper_zero().class_.__name__, instance.__class__.__name__) ) return self.filter(with_parent(instance, property)) @_generative() def add_entity(self, entity, alias=None): """add a mapped entity to the list of result columns to be returned.""" if alias is not None: entity = aliased(entity, alias) self._entities = list(self._entities) m = _MapperEntity(self, entity) self._set_entity_selectables([m]) @_generative() def with_session(self, session): """Return a :class:`Query` that will use the given :class:`.Session`. """ self.session = session def from_self(self, *entities): """return a Query that selects from this Query's SELECT statement. \*entities - optional list of entities which will replace those being selected. """ fromclause = self.with_labels().enable_eagerloads(False).\ _enable_single_crit(False).\ statement.correlate(None) q = self._from_selectable(fromclause) if entities: q._set_entities(entities) return q @_generative() def _enable_single_crit(self, val): self._enable_single_crit = val @_generative() def _from_selectable(self, fromclause): for attr in ( '_statement', '_criterion', '_order_by', '_group_by', '_limit', '_offset', '_joinpath', '_joinpoint', '_distinct', '_having', '_prefixes', ): self.__dict__.pop(attr, None) self._set_select_from([fromclause], True) # this enables clause adaptation for non-ORM # expressions. self._orm_only_from_obj_alias = False old_entities = self._entities self._entities = [] for e in old_entities: e.adapt_to_selectable(self, self._from_obj[0]) def values(self, *columns): """Return an iterator yielding result tuples corresponding to the given list of columns""" if not columns: return iter(()) q = self._clone() q._set_entities(columns, entity_wrapper=_ColumnEntity) if not q._yield_per: q._yield_per = 10 return iter(q) _values = values def value(self, column): """Return a scalar result corresponding to the given column expression.""" try: # Py3K #return self.values(column).__next__()[0] # Py2K return self.values(column).next()[0] # end Py2K except StopIteration: return None @_generative() def with_entities(self, *entities): """Return a new :class:`.Query` replacing the SELECT list with the given entities. e.g.:: # Users, filtered on some arbitrary criterion # and then ordered by related email address q = session.query(User).\\ join(User.address).\\ filter(User.name.like('%ed%')).\\ order_by(Address.email) # given *only* User.id==5, Address.email, and 'q', what # would the *next* User in the result be ? subq = q.with_entities(Address.email).\\ order_by(None).\\ filter(User.id==5).\\ subquery() q = q.join((subq, subq.c.email < Address.email)).\\ limit(1) .. versionadded:: 0.6.5 """ self._set_entities(entities) @_generative() def add_columns(self, *column): """Add one or more column expressions to the list of result columns to be returned.""" self._entities = list(self._entities) l = len(self._entities) for c in column: _ColumnEntity(self, c) # _ColumnEntity may add many entities if the # given arg is a FROM clause self._set_entity_selectables(self._entities[l:]) @util.pending_deprecation("0.7", ":meth:`.add_column` is superseded by :meth:`.add_columns`", False) def add_column(self, column): """Add a column expression to the list of result columns to be returned. Pending deprecation: :meth:`.add_column` will be superseded by :meth:`.add_columns`. """ return self.add_columns(column) def options(self, *args): """Return a new Query object, applying the given list of mapper options. Most supplied options regard changing how column- and relationship-mapped attributes are loaded. See the sections :ref:`deferred` and :doc:`/orm/loading` for reference documentation. """ return self._options(False, *args) def _conditional_options(self, *args): return self._options(True, *args) @_generative() def _options(self, conditional, *args): # most MapperOptions write to the '_attributes' dictionary, # so copy that as well self._attributes = self._attributes.copy() opts = tuple(util.flatten_iterator(args)) self._with_options = self._with_options + opts if conditional: for opt in opts: opt.process_query_conditionally(self) else: for opt in opts: opt.process_query(self) def with_transformation(self, fn): """Return a new :class:`.Query` object transformed by the given function. E.g.:: def filter_something(criterion): def transform(q): return q.filter(criterion) return transform q = q.with_transformation(filter_something(x==5)) This allows ad-hoc recipes to be created for :class:`.Query` objects. See the example at :ref:`hybrid_transformers`. .. versionadded:: 0.7.4 """ return fn(self) @_generative() def with_hint(self, selectable, text, dialect_name='*'): """Add an indexing hint for the given entity or selectable to this :class:`.Query`. Functionality is passed straight through to :meth:`~sqlalchemy.sql.expression.Select.with_hint`, with the addition that ``selectable`` can be a :class:`.Table`, :class:`.Alias`, or ORM entity / mapped class /etc. """ selectable = inspect(selectable).selectable self._with_hints += ((selectable, text, dialect_name),) @_generative() def execution_options(self, **kwargs): """ Set non-SQL options which take effect during execution. The options are the same as those accepted by :meth:`.Connection.execution_options`. Note that the ``stream_results`` execution option is enabled automatically if the :meth:`~sqlalchemy.orm.query.Query.yield_per()` method is used. """ self._execution_options = self._execution_options.union(kwargs) @_generative() def with_lockmode(self, mode): """Return a new Query object with the specified locking mode. :param mode: a string representing the desired locking mode. A corresponding value is passed to the ``for_update`` parameter of :meth:`~sqlalchemy.sql.expression.select` when the query is executed. Valid values are: ``'update'`` - passes ``for_update=True``, which translates to ``FOR UPDATE`` (standard SQL, supported by most dialects) ``'update_nowait'`` - passes ``for_update='nowait'``, which translates to ``FOR UPDATE NOWAIT`` (supported by Oracle, PostgreSQL 8.1 upwards) ``'read'`` - passes ``for_update='read'``, which translates to ``LOCK IN SHARE MODE`` (for MySQL), and ``FOR SHARE`` (for PostgreSQL) ``'read_nowait'`` - passes ``for_update='read_nowait'``, which translates to ``FOR SHARE NOWAIT`` (supported by PostgreSQL). .. versionadded:: 0.7.7 ``FOR SHARE`` and ``FOR SHARE NOWAIT`` (PostgreSQL). """ self._lockmode = mode @_generative() def params(self, *args, **kwargs): """add values for bind parameters which may have been specified in filter(). parameters may be specified using \**kwargs, or optionally a single dictionary as the first positional argument. The reason for both is that \**kwargs is convenient, however some parameter dictionaries contain unicode keys in which case \**kwargs cannot be used. """ if len(args) == 1: kwargs.update(args[0]) elif len(args) > 0: raise sa_exc.ArgumentError( "params() takes zero or one positional argument, " "which is a dictionary.") self._params = self._params.copy() self._params.update(kwargs) @_generative(_no_statement_condition, _no_limit_offset) def filter(self, *criterion): """apply the given filtering criterion to a copy of this :class:`.Query`, using SQL expressions. e.g.:: session.query(MyClass).filter(MyClass.name == 'some name') Multiple criteria are joined together by AND:: session.query(MyClass).\\ filter(MyClass.name == 'some name', MyClass.id > 5) The criterion is any SQL expression object applicable to the WHERE clause of a select. String expressions are coerced into SQL expression constructs via the :func:`.text` construct. .. versionchanged:: 0.7.5 Multiple criteria joined by AND. See also: :meth:`.Query.filter_by` - filter on keyword expressions. """ for criterion in list(criterion): criterion = expression._literal_as_text(criterion) criterion = self._adapt_clause(criterion, True, True) if self._criterion is not None: self._criterion = self._criterion & criterion else: self._criterion = criterion def filter_by(self, **kwargs): """apply the given filtering criterion to a copy of this :class:`.Query`, using keyword expressions. e.g.:: session.query(MyClass).filter_by(name = 'some name') Multiple criteria are joined together by AND:: session.query(MyClass).\\ filter_by(name = 'some name', id = 5) The keyword expressions are extracted from the primary entity of the query, or the last entity that was the target of a call to :meth:`.Query.join`. See also: :meth:`.Query.filter` - filter on SQL expressions. """ clauses = [_entity_descriptor(self._joinpoint_zero(), key) == value for key, value in kwargs.iteritems()] return self.filter(sql.and_(*clauses)) @_generative(_no_statement_condition, _no_limit_offset) def order_by(self, *criterion): """apply one or more ORDER BY criterion to the query and return the newly resulting ``Query`` All existing ORDER BY settings can be suppressed by passing ``None`` - this will suppress any ORDER BY configured on mappers as well. Alternatively, an existing ORDER BY setting on the Query object can be entirely cancelled by passing ``False`` as the value - use this before calling methods where an ORDER BY is invalid. """ if len(criterion) == 1: if criterion[0] is False: if '_order_by' in self.__dict__: del self._order_by return if criterion[0] is None: self._order_by = None return criterion = self._adapt_col_list(criterion) if self._order_by is False or self._order_by is None: self._order_by = criterion else: self._order_by = self._order_by + criterion @_generative(_no_statement_condition, _no_limit_offset) def group_by(self, *criterion): """apply one or more GROUP BY criterion to the query and return the newly resulting :class:`.Query`""" criterion = list(chain(*[_orm_columns(c) for c in criterion])) criterion = self._adapt_col_list(criterion) if self._group_by is False: self._group_by = criterion else: self._group_by = self._group_by + criterion @_generative(_no_statement_condition, _no_limit_offset) def having(self, criterion): """apply a HAVING criterion to the query and return the newly resulting :class:`.Query`. :meth:`having` is used in conjunction with :meth:`group_by`. HAVING criterion makes it possible to use filters on aggregate functions like COUNT, SUM, AVG, MAX, and MIN, eg.:: q = session.query(User.id).\\ join(User.addresses).\\ group_by(User.id).\\ having(func.count(Address.id) > 2) """ if isinstance(criterion, basestring): criterion = sql.text(criterion) if criterion is not None and \ not isinstance(criterion, sql.ClauseElement): raise sa_exc.ArgumentError( "having() argument must be of type " "sqlalchemy.sql.ClauseElement or string") criterion = self._adapt_clause(criterion, True, True) if self._having is not None: self._having = self._having & criterion else: self._having = criterion def union(self, *q): """Produce a UNION of this Query against one or more queries. e.g.:: q1 = sess.query(SomeClass).filter(SomeClass.foo=='bar') q2 = sess.query(SomeClass).filter(SomeClass.bar=='foo') q3 = q1.union(q2) The method accepts multiple Query objects so as to control the level of nesting. A series of ``union()`` calls such as:: x.union(y).union(z).all() will nest on each ``union()``, and produces:: SELECT * FROM (SELECT * FROM (SELECT * FROM X UNION SELECT * FROM y) UNION SELECT * FROM Z) Whereas:: x.union(y, z).all() produces:: SELECT * FROM (SELECT * FROM X UNION SELECT * FROM y UNION SELECT * FROM Z) Note that many database backends do not allow ORDER BY to be rendered on a query called within UNION, EXCEPT, etc. To disable all ORDER BY clauses including those configured on mappers, issue ``query.order_by(None)`` - the resulting :class:`.Query` object will not render ORDER BY within its SELECT statement. """ return self._from_selectable( expression.union(*([self] + list(q)))) def union_all(self, *q): """Produce a UNION ALL of this Query against one or more queries. Works the same way as :meth:`~sqlalchemy.orm.query.Query.union`. See that method for usage examples. """ return self._from_selectable( expression.union_all(*([self] + list(q))) ) def intersect(self, *q): """Produce an INTERSECT of this Query against one or more queries. Works the same way as :meth:`~sqlalchemy.orm.query.Query.union`. See that method for usage examples. """ return self._from_selectable( expression.intersect(*([self] + list(q))) ) def intersect_all(self, *q): """Produce an INTERSECT ALL of this Query against one or more queries. Works the same way as :meth:`~sqlalchemy.orm.query.Query.union`. See that method for usage examples. """ return self._from_selectable( expression.intersect_all(*([self] + list(q))) ) def except_(self, *q): """Produce an EXCEPT of this Query against one or more queries. Works the same way as :meth:`~sqlalchemy.orm.query.Query.union`. See that method for usage examples. """ return self._from_selectable( expression.except_(*([self] + list(q))) ) def except_all(self, *q): """Produce an EXCEPT ALL of this Query against one or more queries. Works the same way as :meth:`~sqlalchemy.orm.query.Query.union`. See that method for usage examples. """ return self._from_selectable( expression.except_all(*([self] + list(q))) ) def join(self, *props, **kwargs): """Create a SQL JOIN against this :class:`.Query` object's criterion and apply generatively, returning the newly resulting :class:`.Query`. **Simple Relationship Joins** Consider a mapping between two classes ``User`` and ``Address``, with a relationship ``User.addresses`` representing a collection of ``Address`` objects associated with each ``User``. The most common usage of :meth:`~.Query.join` is to create a JOIN along this relationship, using the ``User.addresses`` attribute as an indicator for how this should occur:: q = session.query(User).join(User.addresses) Where above, the call to :meth:`~.Query.join` along ``User.addresses`` will result in SQL equivalent to:: SELECT user.* FROM user JOIN address ON user.id = address.user_id In the above example we refer to ``User.addresses`` as passed to :meth:`~.Query.join` as the *on clause*, that is, it indicates how the "ON" portion of the JOIN should be constructed. For a single-entity query such as the one above (i.e. we start by selecting only from ``User`` and nothing else), the relationship can also be specified by its string name:: q = session.query(User).join("addresses") :meth:`~.Query.join` can also accommodate multiple "on clause" arguments to produce a chain of joins, such as below where a join across four related entities is constructed:: q = session.query(User).join("orders", "items", "keywords") The above would be shorthand for three separate calls to :meth:`~.Query.join`, each using an explicit attribute to indicate the source entity:: q = session.query(User).\\ join(User.orders).\\ join(Order.items).\\ join(Item.keywords) **Joins to a Target Entity or Selectable** A second form of :meth:`~.Query.join` allows any mapped entity or core selectable construct as a target. In this usage, :meth:`~.Query.join` will attempt to create a JOIN along the natural foreign key relationship between two entities:: q = session.query(User).join(Address) The above calling form of :meth:`.join` will raise an error if either there are no foreign keys between the two entities, or if there are multiple foreign key linkages between them. In the above calling form, :meth:`~.Query.join` is called upon to create the "on clause" automatically for us. The target can be any mapped entity or selectable, such as a :class:`.Table`:: q = session.query(User).join(addresses_table) **Joins to a Target with an ON Clause** The third calling form allows both the target entity as well as the ON clause to be passed explicitly. Suppose for example we wanted to join to ``Address`` twice, using an alias the second time. We use :func:`~sqlalchemy.orm.aliased` to create a distinct alias of ``Address``, and join to it using the ``target, onclause`` form, so that the alias can be specified explicitly as the target along with the relationship to instruct how the ON clause should proceed:: a_alias = aliased(Address) q = session.query(User).\\ join(User.addresses).\\ join(a_alias, User.addresses).\\ filter(Address.email_address=='ed@foo.com').\\ filter(a_alias.email_address=='ed@bar.com') Where above, the generated SQL would be similar to:: SELECT user.* FROM user JOIN address ON user.id = address.user_id JOIN address AS address_1 ON user.id=address_1.user_id WHERE address.email_address = :email_address_1 AND address_1.email_address = :email_address_2 The two-argument calling form of :meth:`~.Query.join` also allows us to construct arbitrary joins with SQL-oriented "on clause" expressions, not relying upon configured relationships at all. Any SQL expression can be passed as the ON clause when using the two-argument form, which should refer to the target entity in some way as well as an applicable source entity:: q = session.query(User).join(Address, User.id==Address.user_id) .. versionchanged:: 0.7 In SQLAlchemy 0.6 and earlier, the two argument form of :meth:`~.Query.join` requires the usage of a tuple: ``query(User).join((Address, User.id==Address.user_id))``\ . This calling form is accepted in 0.7 and further, though is not necessary unless multiple join conditions are passed to a single :meth:`~.Query.join` call, which itself is also not generally necessary as it is now equivalent to multiple calls (this wasn't always the case). **Advanced Join Targeting and Adaption** There is a lot of flexibility in what the "target" can be when using :meth:`~.Query.join`. As noted previously, it also accepts :class:`.Table` constructs and other selectables such as :func:`.alias` and :func:`.select` constructs, with either the one or two-argument forms:: addresses_q = select([Address.user_id]).\\ where(Address.email_address.endswith("@bar.com")).\\ alias() q = session.query(User).\\ join(addresses_q, addresses_q.c.user_id==User.id) :meth:`~.Query.join` also features the ability to *adapt* a :meth:`~sqlalchemy.orm.relationship` -driven ON clause to the target selectable. Below we construct a JOIN from ``User`` to a subquery against ``Address``, allowing the relationship denoted by ``User.addresses`` to *adapt* itself to the altered target:: address_subq = session.query(Address).\\ filter(Address.email_address == 'ed@foo.com').\\ subquery() q = session.query(User).join(address_subq, User.addresses) Producing SQL similar to:: SELECT user.* FROM user JOIN ( SELECT address.id AS id, address.user_id AS user_id, address.email_address AS email_address FROM address WHERE address.email_address = :email_address_1 ) AS anon_1 ON user.id = anon_1.user_id The above form allows one to fall back onto an explicit ON clause at any time:: q = session.query(User).\\ join(address_subq, User.id==address_subq.c.user_id) **Controlling what to Join From** While :meth:`~.Query.join` exclusively deals with the "right" side of the JOIN, we can also control the "left" side, in those cases where it's needed, using :meth:`~.Query.select_from`. Below we construct a query against ``Address`` but can still make usage of ``User.addresses`` as our ON clause by instructing the :class:`.Query` to select first from the ``User`` entity:: q = session.query(Address).select_from(User).\\ join(User.addresses).\\ filter(User.name == 'ed') Which will produce SQL similar to:: SELECT address.* FROM user JOIN address ON user.id=address.user_id WHERE user.name = :name_1 **Constructing Aliases Anonymously** :meth:`~.Query.join` can construct anonymous aliases using the ``aliased=True`` flag. This feature is useful when a query is being joined algorithmically, such as when querying self-referentially to an arbitrary depth:: q = session.query(Node).\\ join("children", "children", aliased=True) When ``aliased=True`` is used, the actual "alias" construct is not explicitly available. To work with it, methods such as :meth:`.Query.filter` will adapt the incoming entity to the last join point:: q = session.query(Node).\\ join("children", "children", aliased=True).\\ filter(Node.name == 'grandchild 1') When using automatic aliasing, the ``from_joinpoint=True`` argument can allow a multi-node join to be broken into multiple calls to :meth:`~.Query.join`, so that each path along the way can be further filtered:: q = session.query(Node).\\ join("children", aliased=True).\\ filter(Node.name='child 1').\\ join("children", aliased=True, from_joinpoint=True).\\ filter(Node.name == 'grandchild 1') The filtering aliases above can then be reset back to the original ``Node`` entity using :meth:`~.Query.reset_joinpoint`:: q = session.query(Node).\\ join("children", "children", aliased=True).\\ filter(Node.name == 'grandchild 1').\\ reset_joinpoint().\\ filter(Node.name == 'parent 1) For an example of ``aliased=True``, see the distribution example :ref:`examples_xmlpersistence` which illustrates an XPath-like query system using algorithmic joins. :param \*props: A collection of one or more join conditions, each consisting of a relationship-bound attribute or string relationship name representing an "on clause", or a single target entity, or a tuple in the form of ``(target, onclause)``. A special two-argument calling form of the form ``target, onclause`` is also accepted. :param aliased=False: If True, indicate that the JOIN target should be anonymously aliased. Subsequent calls to :class:`~.Query.filter` and similar will adapt the incoming criterion to the target alias, until :meth:`~.Query.reset_joinpoint` is called. :param from_joinpoint=False: When using ``aliased=True``, a setting of True here will cause the join to be from the most recent joined target, rather than starting back from the original FROM clauses of the query. See also: :ref:`ormtutorial_joins` in the ORM tutorial. :ref:`inheritance_toplevel` for details on how :meth:`~.Query.join` is used for inheritance relationships. :func:`.orm.join` - a standalone ORM-level join function, used internally by :meth:`.Query.join`, which in previous SQLAlchemy versions was the primary ORM-level joining interface. """ aliased, from_joinpoint = kwargs.pop('aliased', False),\ kwargs.pop('from_joinpoint', False) if kwargs: raise TypeError("unknown arguments: %s" % ','.join(kwargs.iterkeys())) return self._join(props, outerjoin=False, create_aliases=aliased, from_joinpoint=from_joinpoint) def outerjoin(self, *props, **kwargs): """Create a left outer join against this ``Query`` object's criterion and apply generatively, returning the newly resulting ``Query``. Usage is the same as the ``join()`` method. """ aliased, from_joinpoint = kwargs.pop('aliased', False), \ kwargs.pop('from_joinpoint', False) if kwargs: raise TypeError("unknown arguments: %s" % ','.join(kwargs.iterkeys())) return self._join(props, outerjoin=True, create_aliases=aliased, from_joinpoint=from_joinpoint) def _update_joinpoint(self, jp): self._joinpoint = jp # copy backwards to the root of the _joinpath # dict, so that no existing dict in the path is mutated while 'prev' in jp: f, prev = jp['prev'] prev = prev.copy() prev[f] = jp jp['prev'] = (f, prev) jp = prev self._joinpath = jp @_generative(_no_statement_condition, _no_limit_offset) def _join(self, keys, outerjoin, create_aliases, from_joinpoint): """consumes arguments from join() or outerjoin(), places them into a consistent format with which to form the actual JOIN constructs. """ if not from_joinpoint: self._reset_joinpoint() if len(keys) == 2 and \ isinstance(keys[0], (expression.FromClause, type, AliasedClass)) and \ isinstance(keys[1], (basestring, expression.ClauseElement, interfaces.PropComparator)): # detect 2-arg form of join and # convert to a tuple. keys = (keys,) for arg1 in util.to_list(keys): if isinstance(arg1, tuple): # "tuple" form of join, multiple # tuples are accepted as well. The simpler # "2-arg" form is preferred. May deprecate # the "tuple" usage. arg1, arg2 = arg1 else: arg2 = None # determine onclause/right_entity. there # is a little bit of legacy behavior still at work here # which means they might be in either order. may possibly # lock this down to (right_entity, onclause) in 0.6. if isinstance(arg1, (interfaces.PropComparator, basestring)): right_entity, onclause = arg2, arg1 else: right_entity, onclause = arg1, arg2 left_entity = prop = None if isinstance(onclause, basestring): left_entity = self._joinpoint_zero() descriptor = _entity_descriptor(left_entity, onclause) onclause = descriptor # check for q.join(Class.propname, from_joinpoint=True) # and Class is that of the current joinpoint elif from_joinpoint and \ isinstance(onclause, interfaces.PropComparator): left_entity = onclause._parententity info = inspect(self._joinpoint_zero()) left_mapper, left_selectable, left_is_aliased = \ getattr(info, 'mapper', None), \ info.selectable, \ getattr(info, 'is_aliased_class', None) if left_mapper is left_entity: left_entity = self._joinpoint_zero() descriptor = _entity_descriptor(left_entity, onclause.key) onclause = descriptor if isinstance(onclause, interfaces.PropComparator): if right_entity is None: right_entity = onclause.property.mapper of_type = getattr(onclause, '_of_type', None) if of_type: right_entity = of_type else: right_entity = onclause.property.mapper left_entity = onclause._parententity prop = onclause.property if not isinstance(onclause, attributes.QueryableAttribute): onclause = prop if not create_aliases: # check for this path already present. # don't render in that case. edge = (left_entity, right_entity, prop.key) if edge in self._joinpoint: # The child's prev reference might be stale -- # it could point to a parent older than the # current joinpoint. If this is the case, # then we need to update it and then fix the # tree's spine with _update_joinpoint. Copy # and then mutate the child, which might be # shared by a different query object. jp = self._joinpoint[edge].copy() jp['prev'] = (edge, self._joinpoint) self._update_joinpoint(jp) continue elif onclause is not None and right_entity is None: # TODO: no coverage here raise NotImplementedError("query.join(a==b) not supported.") self._join_left_to_right( left_entity, right_entity, onclause, outerjoin, create_aliases, prop) def _join_left_to_right(self, left, right, onclause, outerjoin, create_aliases, prop): """append a JOIN to the query's from clause.""" self._polymorphic_adapters = self._polymorphic_adapters.copy() if left is None: if self._from_obj: left = self._from_obj[0] elif self._entities: left = self._entities[0].entity_zero_or_selectable if left is right and \ not create_aliases: raise sa_exc.InvalidRequestError( "Can't construct a join from %s to %s, they " "are the same entity" % (left, right)) right, onclause = self._prepare_right_side( right, onclause, create_aliases, prop) # if joining on a MapperProperty path, # track the path to prevent redundant joins if not create_aliases and prop: self._update_joinpoint({ '_joinpoint_entity': right, 'prev': ((left, right, prop.key), self._joinpoint) }) else: self._joinpoint = {'_joinpoint_entity': right} self._join_to_left(left, right, onclause, outerjoin) def _prepare_right_side(self, right, onclause, create_aliases, prop): info = inspect(right) right_mapper, right_selectable, right_is_aliased = \ getattr(info, 'mapper', None), \ info.selectable, \ getattr(info, 'is_aliased_class', False) if right_mapper: self._join_entities += (info, ) if right_mapper and prop and \ not right_mapper.common_parent(prop.mapper): raise sa_exc.InvalidRequestError( "Join target %s does not correspond to " "the right side of join condition %s" % (right, onclause) ) if not right_mapper and prop: right_mapper = prop.mapper need_adapter = False if right_mapper and right is right_selectable: if not right_selectable.is_derived_from( right_mapper.mapped_table): raise sa_exc.InvalidRequestError( "Selectable '%s' is not derived from '%s'" % (right_selectable.description, right_mapper.mapped_table.description)) if not isinstance(right_selectable, expression.Alias): right_selectable = right_selectable.alias() right = aliased(right_mapper, right_selectable) need_adapter = True aliased_entity = right_mapper and \ not right_is_aliased and \ ( right_mapper.with_polymorphic or isinstance( right_mapper.mapped_table, expression.Join) ) if not need_adapter and (create_aliases or aliased_entity): right = aliased(right) need_adapter = True # if an alias() of the right side was generated here, # apply an adapter to all subsequent filter() calls # until reset_joinpoint() is called. if need_adapter: self._filter_aliases = ORMAdapter(right, equivalents=right_mapper and right_mapper._equivalent_columns or {}, chain_to=self._filter_aliases) # if the onclause is a ClauseElement, adapt it with any # adapters that are in place right now if isinstance(onclause, expression.ClauseElement): onclause = self._adapt_clause(onclause, True, True) # if an alias() on the right side was generated, # which is intended to wrap a the right side in a subquery, # ensure that columns retrieved from this target in the result # set are also adapted. if aliased_entity and not create_aliases: self._mapper_loads_polymorphically_with( right_mapper, ORMAdapter( right, equivalents=right_mapper._equivalent_columns ) ) return right, onclause def _join_to_left(self, left, right, onclause, outerjoin): info = inspect(left) left_mapper = getattr(info, 'mapper', None) left_selectable = info.selectable if self._from_obj: replace_clause_index, clause = sql_util.find_join_source( self._from_obj, left_selectable) if clause is not None: try: clause = orm_join(clause, right, onclause, isouter=outerjoin) except sa_exc.ArgumentError, ae: raise sa_exc.InvalidRequestError( "Could not find a FROM clause to join from. " "Tried joining to %s, but got: %s" % (right, ae)) self._from_obj = \ self._from_obj[:replace_clause_index] + \ (clause, ) + \ self._from_obj[replace_clause_index + 1:] return if left_mapper: for ent in self._entities: if ent.corresponds_to(left): clause = ent.selectable break else: clause = left else: clause = left_selectable assert clause is not None try: clause = orm_join(clause, right, onclause, isouter=outerjoin) except sa_exc.ArgumentError, ae: raise sa_exc.InvalidRequestError( "Could not find a FROM clause to join from. " "Tried joining to %s, but got: %s" % (right, ae)) self._from_obj = self._from_obj + (clause,) def _reset_joinpoint(self): self._joinpoint = self._joinpath self._filter_aliases = None @_generative(_no_statement_condition) def reset_joinpoint(self): """Return a new :class:`.Query`, where the "join point" has been reset back to the base FROM entities of the query. This method is usually used in conjunction with the ``aliased=True`` feature of the :meth:`~.Query.join` method. See the example in :meth:`~.Query.join` for how this is used. """ self._reset_joinpoint() @_generative(_no_clauseelement_condition) def select_from(self, *from_obj): """Set the FROM clause of this :class:`.Query` explicitly. :meth:`.Query.select_from` is often used in conjunction with :meth:`.Query.join` in order to control which entity is selected from on the "left" side of the join. The entity or selectable object here effectively replaces the "left edge" of any calls to :meth:`~.Query.join`, when no joinpoint is otherwise established - usually, the default "join point" is the leftmost entity in the :class:`~.Query` object's list of entities to be selected. A typical example:: q = session.query(Address).select_from(User).\\ join(User.addresses).\\ filter(User.name == 'ed') Which produces SQL equivalent to:: SELECT address.* FROM user JOIN address ON user.id=address.user_id WHERE user.name = :name_1 :param \*from_obj: collection of one or more entities to apply to the FROM clause. Entities can be mapped classes, :class:`.AliasedClass` objects, :class:`.Mapper` objects as well as core :class:`.FromClause` elements like subqueries. .. note:: :meth:`.Query.select_from` features a deprecated behavior whereby when passed a :class:`.FromClause` element, such as a select construct, it will apply that select construct to *replace* the FROM clause that an existing entity is joined from. This behavior is being removed in SQLAlchemy 0.9, to be replaced with the :meth:`.Query.select_entity_from` method. Applications which rely on this behavior to re-base query entities to an arbitrary selectable should transition to this method before upgrading to 0.9. .. seealso:: :meth:`~.Query.join` :meth:`.Query.select_entity_from` """ self._set_select_from(from_obj, False) @_generative(_no_clauseelement_condition) def select_entity_from(self, from_obj): """Set the FROM clause of this :class:`.Query` to a core selectable, applying it as a replacement FROM clause for corresponding mapped entities. This method is currently equivalent to the :meth:`.Query.select_from` method, but in 0.9 these two methods will diverge in functionality. In addition to changing the FROM list, the method will also apply the given selectable to replace the FROM which the selected entities would normally select from. The given ``from_obj`` must be an instance of a :class:`.FromClause`, e.g. a :func:`.select` or :class:`.Alias` construct. An example would be a :class:`.Query` that selects ``User`` entities, but uses :meth:`.Query.select_entity_from` to have the entities selected from a :func:`.select` construct instead of the base ``user`` table:: select_stmt = select([User]).where(User.id == 7) q = session.query(User).\\ select_entity_from(select_stmt).\\ filter(User.name == 'ed') The query generated will select ``User`` entities directly from the given :func:`.select` construct, and will be:: SELECT anon_1.id AS anon_1_id, anon_1.name AS anon_1_name FROM (SELECT "user".id AS id, "user".name AS name FROM "user" WHERE "user".id = :id_1) AS anon_1 WHERE anon_1.name = :name_1 Notice above that even the WHERE criterion was "adapted" such that the ``anon_1`` subquery effectively replaces all references to the ``user`` table, except for the one that it refers to internally. Compare this to :meth:`.Query.select_from`, which as of version 0.9, does not affect existing entities. The statement below:: q = session.query(User).\\ select_from(select_stmt).\\ filter(User.name == 'ed') Produces SQL where both the ``user`` table as well as the ``select_stmt`` construct are present as separate elements in the FROM clause. No "adaptation" of the ``user`` table is applied:: SELECT "user".id AS user_id, "user".name AS user_name FROM "user", (SELECT "user".id AS id, "user".name AS name FROM "user" WHERE "user".id = :id_1) AS anon_1 WHERE "user".name = :name_1 :meth:`.Query.select_entity_from` maintains an older behavior of :meth:`.Query.select_from`. In modern usage, similar results can also be achieved using :func:`.aliased`:: select_stmt = select([User]).where(User.id == 7) user_from_select = aliased(User, select_stmt.alias()) q = session.query(user_from_select) :param from_obj: a :class:`.FromClause` object that will replace the FROM clause of this :class:`.Query`. .. seealso:: :meth:`.Query.select_from` .. versionadded:: 0.8.2 :meth:`.Query.select_entity_from` was added to specify the specific behavior of entity replacement, however the :meth:`.Query.select_from` maintains this behavior as well until 0.9. """ self._set_select_from([from_obj], True) def __getitem__(self, item): if isinstance(item, slice): start, stop, step = util.decode_slice(item) if isinstance(stop, int) and \ isinstance(start, int) and \ stop - start <= 0: return [] # perhaps we should execute a count() here so that we # can still use LIMIT/OFFSET ? elif (isinstance(start, int) and start < 0) \ or (isinstance(stop, int) and stop < 0): return list(self)[item] res = self.slice(start, stop) if step is not None: return list(res)[None:None:item.step] else: return list(res) else: if item == -1: return list(self)[-1] else: return list(self[item:item + 1])[0] @_generative(_no_statement_condition) def slice(self, start, stop): """apply LIMIT/OFFSET to the ``Query`` based on a " "range and return the newly resulting ``Query``.""" if start is not None and stop is not None: self._offset = (self._offset or 0) + start self._limit = stop - start elif start is None and stop is not None: self._limit = stop elif start is not None and stop is None: self._offset = (self._offset or 0) + start if self._offset == 0: self._offset = None @_generative(_no_statement_condition) def limit(self, limit): """Apply a ``LIMIT`` to the query and return the newly resulting ``Query``. """ self._limit = limit @_generative(_no_statement_condition) def offset(self, offset): """Apply an ``OFFSET`` to the query and return the newly resulting ``Query``. """ self._offset = offset @_generative(_no_statement_condition) def distinct(self, *criterion): """Apply a ``DISTINCT`` to the query and return the newly resulting ``Query``. :param \*expr: optional column expressions. When present, the Postgresql dialect will render a ``DISTINCT ON (>)`` construct. """ if not criterion: self._distinct = True else: criterion = self._adapt_col_list(criterion) if isinstance(self._distinct, list): self._distinct += criterion else: self._distinct = criterion @_generative() def prefix_with(self, *prefixes): """Apply the prefixes to the query and return the newly resulting ``Query``. :param \*prefixes: optional prefixes, typically strings, not using any commas. In particular is useful for MySQL keywords. e.g.:: query = sess.query(User.name).\\ prefix_with('HIGH_PRIORITY').\\ prefix_with('SQL_SMALL_RESULT', 'ALL') Would render:: SELECT HIGH_PRIORITY SQL_SMALL_RESULT ALL users.name AS users_name FROM users .. versionadded:: 0.7.7 """ if self._prefixes: self._prefixes += prefixes else: self._prefixes = prefixes def all(self): """Return the results represented by this ``Query`` as a list. This results in an execution of the underlying query. """ return list(self) @_generative(_no_clauseelement_condition) def from_statement(self, statement): """Execute the given SELECT statement and return results. This method bypasses all internal statement compilation, and the statement is executed without modification. The statement argument is either a string, a ``select()`` construct, or a ``text()`` construct, and should return the set of columns appropriate to the entity class represented by this ``Query``. """ if isinstance(statement, basestring): statement = sql.text(statement) if not isinstance(statement, (expression.TextClause, expression.SelectBase)): raise sa_exc.ArgumentError( "from_statement accepts text(), select(), " "and union() objects only.") self._statement = statement def first(self): """Return the first result of this ``Query`` or None if the result doesn't contain any row. first() applies a limit of one within the generated SQL, so that only one primary entity row is generated on the server side (note this may consist of multiple result rows if join-loaded collections are present). Calling ``first()`` results in an execution of the underlying query. """ if self._statement is not None: ret = list(self)[0:1] else: ret = list(self[0:1]) if len(ret) > 0: return ret[0] else: return None def one(self): """Return exactly one result or raise an exception. Raises ``sqlalchemy.orm.exc.NoResultFound`` if the query selects no rows. Raises ``sqlalchemy.orm.exc.MultipleResultsFound`` if multiple object identities are returned, or if multiple rows are returned for a query that does not return object identities. Note that an entity query, that is, one which selects one or more mapped classes as opposed to individual column attributes, may ultimately represent many rows but only one row of unique entity or entities - this is a successful result for one(). Calling ``one()`` results in an execution of the underlying query. .. versionchanged:: 0.6 ``one()`` fully fetches all results instead of applying any kind of limit, so that the "unique"-ing of entities does not conceal multiple object identities. """ ret = list(self) l = len(ret) if l == 1: return ret[0] elif l == 0: raise orm_exc.NoResultFound("No row was found for one()") else: raise orm_exc.MultipleResultsFound( "Multiple rows were found for one()") def scalar(self): """Return the first element of the first result or None if no rows present. If multiple rows are returned, raises MultipleResultsFound. >>> session.query(Item).scalar() >>> session.query(Item.id).scalar() 1 >>> session.query(Item.id).filter(Item.id < 0).scalar() None >>> session.query(Item.id, Item.name).scalar() 1 >>> session.query(func.count(Parent.id)).scalar() 20 This results in an execution of the underlying query. """ try: ret = self.one() if not isinstance(ret, tuple): return ret return ret[0] except orm_exc.NoResultFound: return None def __iter__(self): context = self._compile_context() context.statement.use_labels = True if self._autoflush and not self._populate_existing: self.session._autoflush() return self._execute_and_instances(context) def _connection_from_session(self, **kw): conn = self.session.connection( **kw) if self._execution_options: conn = conn.execution_options(**self._execution_options) return conn def _execute_and_instances(self, querycontext): conn = self._connection_from_session( mapper=self._mapper_zero_or_none(), clause=querycontext.statement, close_with_result=True) result = conn.execute(querycontext.statement, self._params) return loading.instances(self, result, querycontext) @property def column_descriptions(self): """Return metadata about the columns which would be returned by this :class:`.Query`. Format is a list of dictionaries:: user_alias = aliased(User, name='user2') q = sess.query(User, User.id, user_alias) # this expression: q.column_descriptions # would return: [ { 'name':'User', 'type':User, 'aliased':False, 'expr':User, }, { 'name':'id', 'type':Integer(), 'aliased':False, 'expr':User.id, }, { 'name':'user2', 'type':User, 'aliased':True, 'expr':user_alias } ] """ return [ { 'name':ent._label_name, 'type':ent.type, 'aliased':getattr(ent, 'is_aliased_class', False), 'expr':ent.expr } for ent in self._entities ] def instances(self, cursor, __context=None): """Given a ResultProxy cursor as returned by connection.execute(), return an ORM result as an iterator. e.g.:: result = engine.execute("select * from users") for u in session.query(User).instances(result): print u """ context = __context if context is None: context = QueryContext(self) return loading.instances(self, cursor, context) def merge_result(self, iterator, load=True): """Merge a result into this :class:`.Query` object's Session. Given an iterator returned by a :class:`.Query` of the same structure as this one, return an identical iterator of results, with all mapped instances merged into the session using :meth:`.Session.merge`. This is an optimized method which will merge all mapped instances, preserving the structure of the result rows and unmapped columns with less method overhead than that of calling :meth:`.Session.merge` explicitly for each value. The structure of the results is determined based on the column list of this :class:`.Query` - if these do not correspond, unchecked errors will occur. The 'load' argument is the same as that of :meth:`.Session.merge`. For an example of how :meth:`~.Query.merge_result` is used, see the source code for the example :ref:`examples_caching`, where :meth:`~.Query.merge_result` is used to efficiently restore state from a cache back into a target :class:`.Session`. """ return loading.merge_result(self, iterator, load) @property def _select_args(self): return { 'limit': self._limit, 'offset': self._offset, 'distinct': self._distinct, 'prefixes': self._prefixes, 'group_by': self._group_by or None, 'having': self._having } @property def _should_nest_selectable(self): kwargs = self._select_args return (kwargs.get('limit') is not None or kwargs.get('offset') is not None or kwargs.get('distinct', False)) def exists(self): """A convenience method that turns a query into an EXISTS subquery of the form EXISTS (SELECT 1 FROM ... WHERE ...). e.g.:: q = session.query(User).filter(User.name == 'fred') session.query(q.exists()) Producing SQL similar to:: SELECT EXISTS ( SELECT 1 FROM users WHERE users.name = :name_1 ) AS anon_1 .. versionadded:: 0.8.1 """ return sql.exists(self.with_labels().statement.with_only_columns(['1'])) def count(self): """Return a count of rows this Query would return. This generates the SQL for this Query as follows:: SELECT count(1) AS count_1 FROM ( SELECT ) AS anon_1 .. versionchanged:: 0.7 The above scheme is newly refined as of 0.7b3. For fine grained control over specific columns to count, to skip the usage of a subquery or otherwise control of the FROM clause, or to use other aggregate functions, use :attr:`~sqlalchemy.sql.expression.func` expressions in conjunction with :meth:`~.Session.query`, i.e.:: from sqlalchemy import func # count User records, without # using a subquery. session.query(func.count(User.id)) # return count of user "id" grouped # by "name" session.query(func.count(User.id)).\\ group_by(User.name) from sqlalchemy import distinct # count distinct "name" values session.query(func.count(distinct(User.name))) """ col = sql.func.count(sql.literal_column('*')) return self.from_self(col).scalar() def delete(self, synchronize_session='evaluate'): """Perform a bulk delete query. Deletes rows matched by this query from the database. :param synchronize_session: chooses the strategy for the removal of matched objects from the session. Valid values are: ``False`` - don't synchronize the session. This option is the most efficient and is reliable once the session is expired, which typically occurs after a commit(), or explicitly using expire_all(). Before the expiration, objects may still remain in the session which were in fact deleted which can lead to confusing results if they are accessed via get() or already loaded collections. ``'fetch'`` - performs a select query before the delete to find objects that are matched by the delete query and need to be removed from the session. Matched objects are removed from the session. ``'evaluate'`` - Evaluate the query's criteria in Python straight on the objects in the session. If evaluation of the criteria isn't implemented, an error is raised. In that case you probably want to use the 'fetch' strategy as a fallback. The expression evaluator currently doesn't account for differing string collations between the database and Python. :return: the count of rows matched as returned by the database's "row count" feature. This method has several key caveats: * The method does **not** offer in-Python cascading of relationships - it is assumed that ON DELETE CASCADE/SET NULL/etc. is configured for any foreign key references which require it, otherwise the database may emit an integrity violation if foreign key references are being enforced. After the DELETE, dependent objects in the :class:`.Session` which were impacted by an ON DELETE may not contain the current state, or may have been deleted. This issue is resolved once the :class:`.Session` is expired, which normally occurs upon :meth:`.Session.commit` or can be forced by using :meth:`.Session.expire_all`. Accessing an expired object whose row has been deleted will invoke a SELECT to locate the row; when the row is not found, an :class:`.ObjectDeletedError` is raised. * The :meth:`.MapperEvents.before_delete` and :meth:`.MapperEvents.after_delete` events are **not** invoked from this method. Instead, the :meth:`.SessionEvents.after_bulk_delete` method is provided to act upon a mass DELETE of entity rows. .. seealso:: :meth:`.Query.update` :ref:`inserts_and_updates` - Core SQL tutorial """ #TODO: cascades need handling. delete_op = persistence.BulkDelete.factory( self, synchronize_session) delete_op.exec_() return delete_op.rowcount def update(self, values, synchronize_session='evaluate'): """Perform a bulk update query. Updates rows matched by this query in the database. :param values: a dictionary with attributes names as keys and literal values or sql expressions as values. :param synchronize_session: chooses the strategy to update the attributes on objects in the session. Valid values are: ``False`` - don't synchronize the session. This option is the most efficient and is reliable once the session is expired, which typically occurs after a commit(), or explicitly using expire_all(). Before the expiration, updated objects may still remain in the session with stale values on their attributes, which can lead to confusing results. ``'fetch'`` - performs a select query before the update to find objects that are matched by the update query. The updated attributes are expired on matched objects. ``'evaluate'`` - Evaluate the Query's criteria in Python straight on the objects in the session. If evaluation of the criteria isn't implemented, an exception is raised. The expression evaluator currently doesn't account for differing string collations between the database and Python. :return: the count of rows matched as returned by the database's "row count" feature. This method has several key caveats: * The method does **not** offer in-Python cascading of relationships - it is assumed that ON UPDATE CASCADE is configured for any foreign key references which require it, otherwise the database may emit an integrity violation if foreign key references are being enforced. After the UPDATE, dependent objects in the :class:`.Session` which were impacted by an ON UPDATE CASCADE may not contain the current state; this issue is resolved once the :class:`.Session` is expired, which normally occurs upon :meth:`.Session.commit` or can be forced by using :meth:`.Session.expire_all`. * As of 0.8, this method will support multiple table updates, as detailed in :ref:`multi_table_updates`, and this behavior does extend to support updates of joined-inheritance and other multiple table mappings. However, the **join condition of an inheritance mapper is currently not automatically rendered**. Care must be taken in any multiple-table update to explicitly include the joining condition between those tables, even in mappings where this is normally automatic. E.g. if a class ``Engineer`` subclasses ``Employee``, an UPDATE of the ``Engineer`` local table using criteria against the ``Employee`` local table might look like:: session.query(Engineer).\\ filter(Engineer.id == Employee.id).\\ filter(Employee.name == 'dilbert').\\ update({"engineer_type": "programmer"}) * The :meth:`.MapperEvents.before_update` and :meth:`.MapperEvents.after_update` events are **not** invoked from this method. Instead, the :meth:`.SessionEvents.after_bulk_update` method is provided to act upon a mass UPDATE of entity rows. .. seealso:: :meth:`.Query.delete` :ref:`inserts_and_updates` - Core SQL tutorial """ #TODO: value keys need to be mapped to corresponding sql cols and # instr.attr.s to string keys #TODO: updates of manytoone relationships need to be converted to # fk assignments #TODO: cascades need handling. update_op = persistence.BulkUpdate.factory( self, synchronize_session, values) update_op.exec_() return update_op.rowcount _lockmode_lookup = { 'read': 'read', 'read_nowait': 'read_nowait', 'update': True, 'update_nowait': 'nowait', None: False } def _compile_context(self, labels=True): context = QueryContext(self) if context.statement is not None: return context context.labels = labels if self._lockmode: try: context.for_update = self._lockmode_lookup[self._lockmode] except KeyError: raise sa_exc.ArgumentError( "Unknown lockmode %r" % self._lockmode) for entity in self._entities: entity.setup_context(self, context) for rec in context.create_eager_joins: strategy = rec[0] strategy(*rec[1:]) if context.from_clause: # "load from explicit FROMs" mode, # i.e. when select_from() or join() is used context.froms = list(context.from_clause) else: # "load from discrete FROMs" mode, # i.e. when each _MappedEntity has its own FROM context.froms = context.froms if self._enable_single_crit: self._adjust_for_single_inheritance(context) if not context.primary_columns: if self._only_load_props: raise sa_exc.InvalidRequestError( "No column-based properties specified for " "refresh operation. Use session.expire() " "to reload collections and related items.") else: raise sa_exc.InvalidRequestError( "Query contains no columns with which to " "SELECT from.") if context.multi_row_eager_loaders and self._should_nest_selectable: context.statement = self._compound_eager_statement(context) else: context.statement = self._simple_statement(context) return context def _compound_eager_statement(self, context): # for eager joins present and LIMIT/OFFSET/DISTINCT, # wrap the query inside a select, # then append eager joins onto that if context.order_by: order_by_col_expr = list( chain(*[ sql_util.unwrap_order_by(o) for o in context.order_by ]) ) else: context.order_by = None order_by_col_expr = [] inner = sql.select( context.primary_columns + order_by_col_expr, context.whereclause, from_obj=context.froms, use_labels=context.labels, # TODO: this order_by is only needed if # LIMIT/OFFSET is present in self._select_args, # else the application on the outside is enough order_by=context.order_by, **self._select_args ) for hint in self._with_hints: inner = inner.with_hint(*hint) if self._correlate: inner = inner.correlate(*self._correlate) inner = inner.alias() equivs = self.__all_equivs() context.adapter = sql_util.ColumnAdapter(inner, equivs) statement = sql.select( [inner] + context.secondary_columns, for_update=context.for_update, use_labels=context.labels) from_clause = inner for eager_join in context.eager_joins.values(): # EagerLoader places a 'stop_on' attribute on the join, # giving us a marker as to where the "splice point" of # the join should be from_clause = sql_util.splice_joins( from_clause, eager_join, eager_join.stop_on) statement.append_from(from_clause) if context.order_by: statement.append_order_by( *context.adapter.copy_and_process( context.order_by ) ) statement.append_order_by(*context.eager_order_by) return statement def _simple_statement(self, context): if not context.order_by: context.order_by = None if self._distinct and context.order_by: order_by_col_expr = list( chain(*[ sql_util.unwrap_order_by(o) for o in context.order_by ]) ) context.primary_columns += order_by_col_expr context.froms += tuple(context.eager_joins.values()) statement = sql.select( context.primary_columns + context.secondary_columns, context.whereclause, from_obj=context.froms, use_labels=context.labels, for_update=context.for_update, order_by=context.order_by, **self._select_args ) for hint in self._with_hints: statement = statement.with_hint(*hint) if self._correlate: statement = statement.correlate(*self._correlate) if context.eager_order_by: statement.append_order_by(*context.eager_order_by) return statement def _adjust_for_single_inheritance(self, context): """Apply single-table-inheritance filtering. For all distinct single-table-inheritance mappers represented in the columns clause of this query, add criterion to the WHERE clause of the given QueryContext such that only the appropriate subtypes are selected from the total results. """ for (ext_info, adapter) in self._mapper_adapter_map.values(): if ext_info in self._join_entities: continue single_crit = ext_info.mapper._single_table_criterion if single_crit is not None: if adapter: single_crit = adapter.traverse(single_crit) single_crit = self._adapt_clause(single_crit, False, False) context.whereclause = sql.and_(context.whereclause, single_crit) def __str__(self): return str(self._compile_context().statement) inspection._self_inspects(Query) class _QueryEntity(object): """represent an entity column returned within a Query result.""" def __new__(cls, *args, **kwargs): if cls is _QueryEntity: entity = args[1] if not isinstance(entity, basestring) and \ _is_mapped_class(entity): cls = _MapperEntity else: cls = _ColumnEntity return object.__new__(cls) def _clone(self): q = self.__class__.__new__(self.__class__) q.__dict__ = self.__dict__.copy() return q class _MapperEntity(_QueryEntity): """mapper/class/AliasedClass entity""" def __init__(self, query, entity): self.primary_entity = not query._entities query._entities.append(self) self.entities = [entity] self.expr = entity def setup_entity(self, ext_info, aliased_adapter): self.mapper = ext_info.mapper self.aliased_adapter = aliased_adapter self.selectable = ext_info.selectable self.is_aliased_class = ext_info.is_aliased_class self._with_polymorphic = ext_info.with_polymorphic_mappers self._polymorphic_discriminator = \ ext_info.polymorphic_on self.entity_zero = ext_info if ext_info.is_aliased_class: self._label_name = self.entity_zero.name else: self._label_name = self.mapper.class_.__name__ self.path = self.entity_zero._path_registry def set_with_polymorphic(self, query, cls_or_mappers, selectable, polymorphic_on): """Receive an update from a call to query.with_polymorphic(). Note the newer style of using a free standing with_polymporphic() construct doesn't make use of this method. """ if self.is_aliased_class: # TODO: invalidrequest ? raise NotImplementedError( "Can't use with_polymorphic() against " "an Aliased object" ) if cls_or_mappers is None: query._reset_polymorphic_adapter(self.mapper) return mappers, from_obj = self.mapper._with_polymorphic_args( cls_or_mappers, selectable) self._with_polymorphic = mappers self._polymorphic_discriminator = polymorphic_on self.selectable = from_obj query._mapper_loads_polymorphically_with(self.mapper, sql_util.ColumnAdapter(from_obj, self.mapper._equivalent_columns)) filter_fn = id @property def type(self): return self.mapper.class_ @property def entity_zero_or_selectable(self): return self.entity_zero def corresponds_to(self, entity): if entity.is_aliased_class: if self.is_aliased_class: if entity._base_alias is self.entity_zero._base_alias: return True return False elif self.is_aliased_class: if self.entity_zero._use_mapper_path: return entity in self._with_polymorphic else: return entity is self.entity_zero return entity.common_parent(self.entity_zero) def adapt_to_selectable(self, query, sel): query._entities.append(self) def _get_entity_clauses(self, query, context): adapter = None if not self.is_aliased_class: if query._polymorphic_adapters: adapter = query._polymorphic_adapters.get(self.mapper, None) else: adapter = self.aliased_adapter if adapter: if query._from_obj_alias: ret = adapter.wrap(query._from_obj_alias) else: ret = adapter else: ret = query._from_obj_alias return ret def row_processor(self, query, context, custom_rows): adapter = self._get_entity_clauses(query, context) if context.adapter and adapter: adapter = adapter.wrap(context.adapter) elif not adapter: adapter = context.adapter # polymorphic mappers which have concrete tables in # their hierarchy usually # require row aliasing unconditionally. if not adapter and self.mapper._requires_row_aliasing: adapter = sql_util.ColumnAdapter( self.selectable, self.mapper._equivalent_columns) if self.primary_entity: _instance = loading.instance_processor( self.mapper, context, self.path, adapter, only_load_props=query._only_load_props, refresh_state=context.refresh_state, polymorphic_discriminator=self._polymorphic_discriminator ) else: _instance = loading.instance_processor( self.mapper, context, self.path, adapter, polymorphic_discriminator=self._polymorphic_discriminator ) return _instance, self._label_name def setup_context(self, query, context): adapter = self._get_entity_clauses(query, context) context.froms += (self.selectable,) if context.order_by is False and self.mapper.order_by: context.order_by = self.mapper.order_by # apply adaptation to the mapper's order_by if needed. if adapter: context.order_by = adapter.adapt_list( util.to_list( context.order_by ) ) if self._with_polymorphic: poly_properties = self.mapper._iterate_polymorphic_properties( self._with_polymorphic) else: poly_properties = self.mapper._polymorphic_properties for value in poly_properties: if query._only_load_props and \ value.key not in query._only_load_props: continue value.setup( context, self, self.path, adapter, only_load_props=query._only_load_props, column_collection=context.primary_columns ) if self._polymorphic_discriminator is not None and \ self._polymorphic_discriminator \ is not self.mapper.polymorphic_on: if adapter: pd = adapter.columns[self._polymorphic_discriminator] else: pd = self._polymorphic_discriminator context.primary_columns.append(pd) def __str__(self): return str(self.mapper) class _ColumnEntity(_QueryEntity): """Column/expression based entity.""" def __init__(self, query, column, namespace=None): self.expr = column self.namespace = namespace if isinstance(column, basestring): column = sql.literal_column(column) self._label_name = column.name elif isinstance(column, ( attributes.QueryableAttribute, interfaces.PropComparator )): self._label_name = column.key column = column.__clause_element__() else: self._label_name = getattr(column, 'key', None) if not isinstance(column, expression.ColumnElement) and \ hasattr(column, '_select_iterable'): for c in column._select_iterable: if c is column: break _ColumnEntity(query, c, namespace=column) if c is not column: return if not isinstance(column, sql.ColumnElement): raise sa_exc.InvalidRequestError( "SQL expression, column, or mapped entity " "expected - got '%r'" % (column, ) ) type_ = column.type if type_.hashable: self.filter_fn = lambda item: item else: counter = util.counter() self.filter_fn = lambda item: counter() # If the Column is unnamed, give it a # label() so that mutable column expressions # can be located in the result even # if the expression's identity has been changed # due to adaption. if not column._label and not getattr(column, 'is_literal', False): column = column.label(self._label_name) query._entities.append(self) self.column = column self.froms = set() # look for ORM entities represented within the # given expression. Try to count only entities # for columns whose FROM object is in the actual list # of FROMs for the overall expression - this helps # subqueries which were built from ORM constructs from # leaking out their entities into the main select construct self.actual_froms = actual_froms = set(column._from_objects) self.entities = util.OrderedSet( elem._annotations['parententity'] for elem in visitors.iterate(column, {}) if 'parententity' in elem._annotations and actual_froms.intersection(elem._from_objects) ) if self.entities: self.entity_zero = list(self.entities)[0] elif self.namespace is not None: self.entity_zero = self.namespace else: self.entity_zero = None @property def entity_zero_or_selectable(self): if self.entity_zero is not None: return self.entity_zero elif self.actual_froms: return list(self.actual_froms)[0] else: return None @property def type(self): return self.column.type def adapt_to_selectable(self, query, sel): c = _ColumnEntity(query, sel.corresponding_column(self.column)) c._label_name = self._label_name c.entity_zero = self.entity_zero c.entities = self.entities def setup_entity(self, ext_info, aliased_adapter): if 'selectable' not in self.__dict__: self.selectable = ext_info.selectable self.froms.add(ext_info.selectable) def corresponds_to(self, entity): if self.entity_zero is None: return False elif _is_aliased_class(entity): # TODO: polymorphic subclasses ? return entity is self.entity_zero else: return not _is_aliased_class(self.entity_zero) and \ entity.common_parent(self.entity_zero) def _resolve_expr_against_query_aliases(self, query, expr, context): return query._adapt_clause(expr, False, True) def row_processor(self, query, context, custom_rows): column = self._resolve_expr_against_query_aliases( query, self.column, context) if context.adapter: column = context.adapter.columns[column] def proc(row, result): return row[column] return proc, self._label_name def setup_context(self, query, context): column = self._resolve_expr_against_query_aliases( query, self.column, context) context.froms += tuple(self.froms) context.primary_columns.append(column) def __str__(self): return str(self.column) log.class_logger(Query) class QueryContext(object): multi_row_eager_loaders = False adapter = None froms = () for_update = False def __init__(self, query): if query._statement is not None: if isinstance(query._statement, expression.SelectBase) and \ not query._statement.use_labels: self.statement = query._statement.apply_labels() else: self.statement = query._statement else: self.statement = None self.from_clause = query._from_obj self.whereclause = query._criterion self.order_by = query._order_by self.query = query self.session = query.session self.populate_existing = query._populate_existing self.invoke_all_eagers = query._invoke_all_eagers self.version_check = query._version_check self.refresh_state = query._refresh_state self.primary_columns = [] self.secondary_columns = [] self.eager_order_by = [] self.eager_joins = {} self.create_eager_joins = [] self.propagate_options = set(o for o in query._with_options if o.propagate_to_loaders) self.attributes = self._attributes = query._attributes.copy() class AliasOption(interfaces.MapperOption): def __init__(self, alias): self.alias = alias def process_query(self, query): if isinstance(self.alias, basestring): alias = query._mapper_zero().mapped_table.alias(self.alias) else: alias = self.alias query._from_obj_alias = sql_util.ColumnAdapter(alias) SQLAlchemy-0.8.4/lib/sqlalchemy/orm/relationships.py0000644000076500000240000010646412251150015023177 0ustar classicstaff00000000000000# orm/relationships.py # Copyright (C) 2005-2013 the SQLAlchemy authors and contributors # # This module is part of SQLAlchemy and is released under # the MIT License: http://www.opensource.org/licenses/mit-license.php """Heuristics related to join conditions as used in :func:`.relationship`. Provides the :class:`.JoinCondition` object, which encapsulates SQL annotation and aliasing behavior focused on the `primaryjoin` and `secondaryjoin` aspects of :func:`.relationship`. """ from .. import sql, util, exc as sa_exc, schema from ..sql.util import ( ClauseAdapter, join_condition, _shallow_annotate, visit_binary_product, _deep_deannotate, find_tables ) from ..sql import operators, expression, visitors from .interfaces import MANYTOMANY, MANYTOONE, ONETOMANY def remote(expr): """Annotate a portion of a primaryjoin expression with a 'remote' annotation. See the section :ref:`relationship_custom_foreign` for a description of use. .. versionadded:: 0.8 .. seealso:: :ref:`relationship_custom_foreign` :func:`.foreign` """ return _annotate_columns(expression._clause_element_as_expr(expr), {"remote": True}) def foreign(expr): """Annotate a portion of a primaryjoin expression with a 'foreign' annotation. See the section :ref:`relationship_custom_foreign` for a description of use. .. versionadded:: 0.8 .. seealso:: :ref:`relationship_custom_foreign` :func:`.remote` """ return _annotate_columns(expression._clause_element_as_expr(expr), {"foreign": True}) def _annotate_columns(element, annotations): def clone(elem): if isinstance(elem, expression.ColumnClause): elem = elem._annotate(annotations.copy()) elem._copy_internals(clone=clone) return elem if element is not None: element = clone(element) return element class JoinCondition(object): def __init__(self, parent_selectable, child_selectable, parent_local_selectable, child_local_selectable, primaryjoin=None, secondary=None, secondaryjoin=None, parent_equivalents=None, child_equivalents=None, consider_as_foreign_keys=None, local_remote_pairs=None, remote_side=None, self_referential=False, prop=None, support_sync=True, can_be_synced_fn=lambda *c: True ): self.parent_selectable = parent_selectable self.parent_local_selectable = parent_local_selectable self.child_selectable = child_selectable self.child_local_selectable = child_local_selectable self.parent_equivalents = parent_equivalents self.child_equivalents = child_equivalents self.primaryjoin = primaryjoin self.secondaryjoin = secondaryjoin self.secondary = secondary self.consider_as_foreign_keys = consider_as_foreign_keys self._local_remote_pairs = local_remote_pairs self._remote_side = remote_side self.prop = prop self.self_referential = self_referential self.support_sync = support_sync self.can_be_synced_fn = can_be_synced_fn self._determine_joins() self._annotate_fks() self._annotate_remote() self._annotate_local() self._setup_pairs() self._check_foreign_cols(self.primaryjoin, True) if self.secondaryjoin is not None: self._check_foreign_cols(self.secondaryjoin, False) self._determine_direction() self._check_remote_side() self._log_joins() def _log_joins(self): if self.prop is None: return log = self.prop.logger log.info('%s setup primary join %s', self.prop, self.primaryjoin) log.info('%s setup secondary join %s', self.prop, self.secondaryjoin) log.info('%s synchronize pairs [%s]', self.prop, ','.join('(%s => %s)' % (l, r) for (l, r) in self.synchronize_pairs)) log.info('%s secondary synchronize pairs [%s]', self.prop, ','.join('(%s => %s)' % (l, r) for (l, r) in self.secondary_synchronize_pairs or [])) log.info('%s local/remote pairs [%s]', self.prop, ','.join('(%s / %s)' % (l, r) for (l, r) in self.local_remote_pairs)) log.info('%s remote columns [%s]', self.prop, ','.join('%s' % col for col in self.remote_columns) ) log.info('%s local columns [%s]', self.prop, ','.join('%s' % col for col in self.local_columns) ) log.info('%s relationship direction %s', self.prop, self.direction) def _determine_joins(self): """Determine the 'primaryjoin' and 'secondaryjoin' attributes, if not passed to the constructor already. This is based on analysis of the foreign key relationships between the parent and target mapped selectables. """ if self.secondaryjoin is not None and self.secondary is None: raise sa_exc.ArgumentError( "Property %s specified with secondary " "join condition but " "no secondary argument" % self.prop) # find a join between the given mapper's mapped table and # the given table. will try the mapper's local table first # for more specificity, then if not found will try the more # general mapped table, which in the case of inheritance is # a join. try: consider_as_foreign_keys = self.consider_as_foreign_keys or None if self.secondary is not None: if self.secondaryjoin is None: self.secondaryjoin = \ join_condition( self.child_selectable, self.secondary, a_subset=self.child_local_selectable, consider_as_foreign_keys=consider_as_foreign_keys ) if self.primaryjoin is None: self.primaryjoin = \ join_condition( self.parent_selectable, self.secondary, a_subset=self.parent_local_selectable, consider_as_foreign_keys=consider_as_foreign_keys ) else: if self.primaryjoin is None: self.primaryjoin = \ join_condition( self.parent_selectable, self.child_selectable, a_subset=self.parent_local_selectable, consider_as_foreign_keys=consider_as_foreign_keys ) except sa_exc.NoForeignKeysError: if self.secondary is not None: raise sa_exc.NoForeignKeysError("Could not determine join " "condition between parent/child tables on " "relationship %s - there are no foreign keys " "linking these tables via secondary table '%s'. " "Ensure that referencing columns are associated " "with a ForeignKey or ForeignKeyConstraint, or " "specify 'primaryjoin' and 'secondaryjoin' " "expressions." % (self.prop, self.secondary)) else: raise sa_exc.NoForeignKeysError("Could not determine join " "condition between parent/child tables on " "relationship %s - there are no foreign keys " "linking these tables. " "Ensure that referencing columns are associated " "with a ForeignKey or ForeignKeyConstraint, or " "specify a 'primaryjoin' expression." % self.prop) except sa_exc.AmbiguousForeignKeysError: if self.secondary is not None: raise sa_exc.AmbiguousForeignKeysError( "Could not determine join " "condition between parent/child tables on " "relationship %s - there are multiple foreign key " "paths linking the tables via secondary table '%s'. " "Specify the 'foreign_keys' " "argument, providing a list of those columns which " "should be counted as containing a foreign key " "reference from the secondary table to each of the " "parent and child tables." % (self.prop, self.secondary)) else: raise sa_exc.AmbiguousForeignKeysError( "Could not determine join " "condition between parent/child tables on " "relationship %s - there are multiple foreign key " "paths linking the tables. Specify the " "'foreign_keys' argument, providing a list of those " "columns which should be counted as containing a " "foreign key reference to the parent table." % self.prop) @property def primaryjoin_minus_local(self): return _deep_deannotate(self.primaryjoin, values=("local", "remote")) @property def secondaryjoin_minus_local(self): return _deep_deannotate(self.secondaryjoin, values=("local", "remote")) @util.memoized_property def primaryjoin_reverse_remote(self): """Return the primaryjoin condition suitable for the "reverse" direction. If the primaryjoin was delivered here with pre-existing "remote" annotations, the local/remote annotations are reversed. Otherwise, the local/remote annotations are removed. """ if self._has_remote_annotations: def replace(element): if "remote" in element._annotations: v = element._annotations.copy() del v['remote'] v['local'] = True return element._with_annotations(v) elif "local" in element._annotations: v = element._annotations.copy() del v['local'] v['remote'] = True return element._with_annotations(v) return visitors.replacement_traverse( self.primaryjoin, {}, replace) else: if self._has_foreign_annotations: # TODO: coverage return _deep_deannotate(self.primaryjoin, values=("local", "remote")) else: return _deep_deannotate(self.primaryjoin) def _has_annotation(self, clause, annotation): for col in visitors.iterate(clause, {}): if annotation in col._annotations: return True else: return False @util.memoized_property def _has_foreign_annotations(self): return self._has_annotation(self.primaryjoin, "foreign") @util.memoized_property def _has_remote_annotations(self): return self._has_annotation(self.primaryjoin, "remote") def _annotate_fks(self): """Annotate the primaryjoin and secondaryjoin structures with 'foreign' annotations marking columns considered as foreign. """ if self._has_foreign_annotations: return if self.consider_as_foreign_keys: self._annotate_from_fk_list() else: self._annotate_present_fks() def _annotate_from_fk_list(self): def check_fk(col): if col in self.consider_as_foreign_keys: return col._annotate({"foreign": True}) self.primaryjoin = visitors.replacement_traverse( self.primaryjoin, {}, check_fk ) if self.secondaryjoin is not None: self.secondaryjoin = visitors.replacement_traverse( self.secondaryjoin, {}, check_fk ) def _annotate_present_fks(self): if self.secondary is not None: secondarycols = util.column_set(self.secondary.c) else: secondarycols = set() def is_foreign(a, b): if isinstance(a, schema.Column) and \ isinstance(b, schema.Column): if a.references(b): return a elif b.references(a): return b if secondarycols: if a in secondarycols and b not in secondarycols: return a elif b in secondarycols and a not in secondarycols: return b def visit_binary(binary): if not isinstance(binary.left, sql.ColumnElement) or \ not isinstance(binary.right, sql.ColumnElement): return if "foreign" not in binary.left._annotations and \ "foreign" not in binary.right._annotations: col = is_foreign(binary.left, binary.right) if col is not None: if col.compare(binary.left): binary.left = binary.left._annotate( {"foreign": True}) elif col.compare(binary.right): binary.right = binary.right._annotate( {"foreign": True}) self.primaryjoin = visitors.cloned_traverse( self.primaryjoin, {}, {"binary": visit_binary} ) if self.secondaryjoin is not None: self.secondaryjoin = visitors.cloned_traverse( self.secondaryjoin, {}, {"binary": visit_binary} ) def _refers_to_parent_table(self): """Return True if the join condition contains column comparisons where both columns are in both tables. """ pt = self.parent_selectable mt = self.child_selectable result = [False] def visit_binary(binary): c, f = binary.left, binary.right if ( isinstance(c, expression.ColumnClause) and \ isinstance(f, expression.ColumnClause) and \ pt.is_derived_from(c.table) and \ pt.is_derived_from(f.table) and \ mt.is_derived_from(c.table) and \ mt.is_derived_from(f.table) ): result[0] = True visitors.traverse( self.primaryjoin, {}, {"binary": visit_binary} ) return result[0] def _tables_overlap(self): """Return True if parent/child tables have some overlap.""" return bool( set(find_tables(self.parent_selectable)).intersection( find_tables(self.child_selectable) ) ) def _annotate_remote(self): """Annotate the primaryjoin and secondaryjoin structures with 'remote' annotations marking columns considered as part of the 'remote' side. """ if self._has_remote_annotations: return if self.secondary is not None: self._annotate_remote_secondary() elif self._local_remote_pairs or self._remote_side: self._annotate_remote_from_args() elif self._refers_to_parent_table(): self._annotate_selfref(lambda col: "foreign" in col._annotations) elif self._tables_overlap(): self._annotate_remote_with_overlap() else: self._annotate_remote_distinct_selectables() def _annotate_remote_secondary(self): """annotate 'remote' in primaryjoin, secondaryjoin when 'secondary' is present. """ def repl(element): if self.secondary.c.contains_column(element): return element._annotate({"remote": True}) self.primaryjoin = visitors.replacement_traverse( self.primaryjoin, {}, repl) self.secondaryjoin = visitors.replacement_traverse( self.secondaryjoin, {}, repl) def _annotate_selfref(self, fn): """annotate 'remote' in primaryjoin, secondaryjoin when the relationship is detected as self-referential. """ def visit_binary(binary): equated = binary.left.compare(binary.right) if isinstance(binary.left, expression.ColumnClause) and \ isinstance(binary.right, expression.ColumnClause): # assume one to many - FKs are "remote" if fn(binary.left): binary.left = binary.left._annotate({"remote": True}) if fn(binary.right) and not equated: binary.right = binary.right._annotate( {"remote": True}) else: self._warn_non_column_elements() self.primaryjoin = visitors.cloned_traverse( self.primaryjoin, {}, {"binary": visit_binary}) def _annotate_remote_from_args(self): """annotate 'remote' in primaryjoin, secondaryjoin when the 'remote_side' or '_local_remote_pairs' arguments are used. """ if self._local_remote_pairs: if self._remote_side: raise sa_exc.ArgumentError( "remote_side argument is redundant " "against more detailed _local_remote_side " "argument.") remote_side = [r for (l, r) in self._local_remote_pairs] else: remote_side = self._remote_side if self._refers_to_parent_table(): self._annotate_selfref(lambda col: col in remote_side) else: def repl(element): if element in remote_side: return element._annotate({"remote": True}) self.primaryjoin = visitors.replacement_traverse( self.primaryjoin, {}, repl) def _annotate_remote_with_overlap(self): """annotate 'remote' in primaryjoin, secondaryjoin when the parent/child tables have some set of tables in common, though is not a fully self-referential relationship. """ def visit_binary(binary): binary.left, binary.right = proc_left_right(binary.left, binary.right) binary.right, binary.left = proc_left_right(binary.right, binary.left) def proc_left_right(left, right): if isinstance(left, expression.ColumnClause) and \ isinstance(right, expression.ColumnClause): if self.child_selectable.c.contains_column(right) and \ self.parent_selectable.c.contains_column(left): right = right._annotate({"remote": True}) else: self._warn_non_column_elements() return left, right self.primaryjoin = visitors.cloned_traverse( self.primaryjoin, {}, {"binary": visit_binary}) def _annotate_remote_distinct_selectables(self): """annotate 'remote' in primaryjoin, secondaryjoin when the parent/child tables are entirely separate. """ def repl(element): if self.child_selectable.c.contains_column(element) and \ ( not self.parent_local_selectable.c.\ contains_column(element) or self.child_local_selectable.c.\ contains_column(element)): return element._annotate({"remote": True}) self.primaryjoin = visitors.replacement_traverse( self.primaryjoin, {}, repl) def _warn_non_column_elements(self): util.warn( "Non-simple column elements in primary " "join condition for property %s - consider using " "remote() annotations to mark the remote side." % self.prop ) def _annotate_local(self): """Annotate the primaryjoin and secondaryjoin structures with 'local' annotations. This annotates all column elements found simultaneously in the parent table and the join condition that don't have a 'remote' annotation set up from _annotate_remote() or user-defined. """ if self._has_annotation(self.primaryjoin, "local"): return if self._local_remote_pairs: local_side = util.column_set([l for (l, r) in self._local_remote_pairs]) else: local_side = util.column_set(self.parent_selectable.c) def locals_(elem): if "remote" not in elem._annotations and \ elem in local_side: return elem._annotate({"local": True}) self.primaryjoin = visitors.replacement_traverse( self.primaryjoin, {}, locals_ ) def _check_remote_side(self): if not self.local_remote_pairs: raise sa_exc.ArgumentError('Relationship %s could ' 'not determine any unambiguous local/remote column ' 'pairs based on join condition and remote_side ' 'arguments. ' 'Consider using the remote() annotation to ' 'accurately mark those elements of the join ' 'condition that are on the remote side of ' 'the relationship.' % (self.prop, )) def _check_foreign_cols(self, join_condition, primary): """Check the foreign key columns collected and emit error messages.""" can_sync = False foreign_cols = self._gather_columns_with_annotation( join_condition, "foreign") has_foreign = bool(foreign_cols) if primary: can_sync = bool(self.synchronize_pairs) else: can_sync = bool(self.secondary_synchronize_pairs) if self.support_sync and can_sync or \ (not self.support_sync and has_foreign): return # from here below is just determining the best error message # to report. Check for a join condition using any operator # (not just ==), perhaps they need to turn on "viewonly=True". if self.support_sync and has_foreign and not can_sync: err = "Could not locate any simple equality expressions "\ "involving locally mapped foreign key columns for "\ "%s join condition "\ "'%s' on relationship %s." % ( primary and 'primary' or 'secondary', join_condition, self.prop ) err += \ " Ensure that referencing columns are associated "\ "with a ForeignKey or ForeignKeyConstraint, or are "\ "annotated in the join condition with the foreign() "\ "annotation. To allow comparison operators other than "\ "'==', the relationship can be marked as viewonly=True." raise sa_exc.ArgumentError(err) else: err = "Could not locate any relevant foreign key columns "\ "for %s join condition '%s' on relationship %s." % ( primary and 'primary' or 'secondary', join_condition, self.prop ) err += \ ' Ensure that referencing columns are associated '\ 'with a ForeignKey or ForeignKeyConstraint, or are '\ 'annotated in the join condition with the foreign() '\ 'annotation.' raise sa_exc.ArgumentError(err) def _determine_direction(self): """Determine if this relationship is one to many, many to one, many to many. """ if self.secondaryjoin is not None: self.direction = MANYTOMANY else: parentcols = util.column_set(self.parent_selectable.c) targetcols = util.column_set(self.child_selectable.c) # fk collection which suggests ONETOMANY. onetomany_fk = targetcols.intersection( self.foreign_key_columns) # fk collection which suggests MANYTOONE. manytoone_fk = parentcols.intersection( self.foreign_key_columns) if onetomany_fk and manytoone_fk: # fks on both sides. test for overlap of local/remote # with foreign key self_equated = self.remote_columns.intersection( self.local_columns ) onetomany_local = self.remote_columns.\ intersection(self.foreign_key_columns).\ difference(self_equated) manytoone_local = self.local_columns.\ intersection(self.foreign_key_columns).\ difference(self_equated) if onetomany_local and not manytoone_local: self.direction = ONETOMANY elif manytoone_local and not onetomany_local: self.direction = MANYTOONE else: raise sa_exc.ArgumentError( "Can't determine relationship" " direction for relationship '%s' - foreign " "key columns within the join condition are present " "in both the parent and the child's mapped tables. " "Ensure that only those columns referring " "to a parent column are marked as foreign, " "either via the foreign() annotation or " "via the foreign_keys argument." % self.prop) elif onetomany_fk: self.direction = ONETOMANY elif manytoone_fk: self.direction = MANYTOONE else: raise sa_exc.ArgumentError("Can't determine relationship " "direction for relationship '%s' - foreign " "key columns are present in neither the parent " "nor the child's mapped tables" % self.prop) def _deannotate_pairs(self, collection): """provide deannotation for the various lists of pairs, so that using them in hashes doesn't incur high-overhead __eq__() comparisons against original columns mapped. """ return [(x._deannotate(), y._deannotate()) for x, y in collection] def _setup_pairs(self): sync_pairs = [] lrp = util.OrderedSet([]) secondary_sync_pairs = [] def go(joincond, collection): def visit_binary(binary, left, right): if "remote" in right._annotations and \ "remote" not in left._annotations and \ self.can_be_synced_fn(left): lrp.add((left, right)) elif "remote" in left._annotations and \ "remote" not in right._annotations and \ self.can_be_synced_fn(right): lrp.add((right, left)) if binary.operator is operators.eq and \ self.can_be_synced_fn(left, right): if "foreign" in right._annotations: collection.append((left, right)) elif "foreign" in left._annotations: collection.append((right, left)) visit_binary_product(visit_binary, joincond) for joincond, collection in [ (self.primaryjoin, sync_pairs), (self.secondaryjoin, secondary_sync_pairs) ]: if joincond is None: continue go(joincond, collection) self.local_remote_pairs = self._deannotate_pairs(lrp) self.synchronize_pairs = self._deannotate_pairs(sync_pairs) self.secondary_synchronize_pairs = \ self._deannotate_pairs(secondary_sync_pairs) @util.memoized_property def remote_columns(self): return self._gather_join_annotations("remote") @util.memoized_property def local_columns(self): return self._gather_join_annotations("local") @util.memoized_property def foreign_key_columns(self): return self._gather_join_annotations("foreign") @util.memoized_property def deannotated_primaryjoin(self): return _deep_deannotate(self.primaryjoin) @util.memoized_property def deannotated_secondaryjoin(self): if self.secondaryjoin is not None: return _deep_deannotate(self.secondaryjoin) else: return None def _gather_join_annotations(self, annotation): s = set( self._gather_columns_with_annotation( self.primaryjoin, annotation) ) if self.secondaryjoin is not None: s.update( self._gather_columns_with_annotation( self.secondaryjoin, annotation) ) return set([x._deannotate() for x in s]) def _gather_columns_with_annotation(self, clause, *annotation): annotation = set(annotation) return set([ col for col in visitors.iterate(clause, {}) if annotation.issubset(col._annotations) ]) def join_targets(self, source_selectable, dest_selectable, aliased, single_crit=None): """Given a source and destination selectable, create a join between them. This takes into account aliasing the join clause to reference the appropriate corresponding columns in the target objects, as well as the extra child criterion, equivalent column sets, etc. """ # place a barrier on the destination such that # replacement traversals won't ever dig into it. # its internal structure remains fixed # regardless of context. dest_selectable = _shallow_annotate( dest_selectable, {'no_replacement_traverse': True}) primaryjoin, secondaryjoin, secondary = self.primaryjoin, \ self.secondaryjoin, self.secondary # adjust the join condition for single table inheritance, # in the case that the join is to a subclass # this is analogous to the # "_adjust_for_single_table_inheritance()" method in Query. if single_crit is not None: if secondaryjoin is not None: secondaryjoin = secondaryjoin & single_crit else: primaryjoin = primaryjoin & single_crit if aliased: if secondary is not None: secondary = secondary.alias() primary_aliasizer = ClauseAdapter(secondary) secondary_aliasizer = \ ClauseAdapter(dest_selectable, equivalents=self.child_equivalents).\ chain(primary_aliasizer) if source_selectable is not None: primary_aliasizer = \ ClauseAdapter(secondary).\ chain(ClauseAdapter(source_selectable, equivalents=self.parent_equivalents)) secondaryjoin = \ secondary_aliasizer.traverse(secondaryjoin) else: primary_aliasizer = ClauseAdapter(dest_selectable, exclude_fn=_ColInAnnotations("local"), equivalents=self.child_equivalents) if source_selectable is not None: primary_aliasizer.chain( ClauseAdapter(source_selectable, exclude_fn=_ColInAnnotations("remote"), equivalents=self.parent_equivalents)) secondary_aliasizer = None primaryjoin = primary_aliasizer.traverse(primaryjoin) target_adapter = secondary_aliasizer or primary_aliasizer target_adapter.exclude_fn = None else: target_adapter = None return primaryjoin, secondaryjoin, secondary, \ target_adapter, dest_selectable def create_lazy_clause(self, reverse_direction=False): binds = util.column_dict() lookup = util.column_dict() equated_columns = util.column_dict() if reverse_direction and self.secondaryjoin is None: for l, r in self.local_remote_pairs: _list = lookup.setdefault(r, []) _list.append((r, l)) equated_columns[l] = r else: for l, r in self.local_remote_pairs: _list = lookup.setdefault(l, []) _list.append((l, r)) equated_columns[r] = l def col_to_bind(col): if col in lookup: for tobind, equated in lookup[col]: if equated in binds: return None if col not in binds: binds[col] = sql.bindparam( None, None, type_=col.type, unique=True) return binds[col] return None lazywhere = self.deannotated_primaryjoin if self.deannotated_secondaryjoin is None or not reverse_direction: lazywhere = visitors.replacement_traverse( lazywhere, {}, col_to_bind) if self.deannotated_secondaryjoin is not None: secondaryjoin = self.deannotated_secondaryjoin if reverse_direction: secondaryjoin = visitors.replacement_traverse( secondaryjoin, {}, col_to_bind) lazywhere = sql.and_(lazywhere, secondaryjoin) bind_to_col = dict((binds[col].key, col) for col in binds) return lazywhere, bind_to_col, equated_columns class _ColInAnnotations(object): """Seralizable equivalent to: lambda c: "name" in c._annotations """ def __init__(self, name): self.name = name def __call__(self, c): return self.name in c._annotationsSQLAlchemy-0.8.4/lib/sqlalchemy/orm/scoping.py0000644000076500000240000001373512251150015021753 0ustar classicstaff00000000000000# orm/scoping.py # Copyright (C) 2005-2013 the SQLAlchemy authors and contributors # # This module is part of SQLAlchemy and is released under # the MIT License: http://www.opensource.org/licenses/mit-license.php from .. import exc as sa_exc from ..util import ScopedRegistry, ThreadLocalRegistry, warn from . import class_mapper, exc as orm_exc from .session import Session __all__ = ['scoped_session'] class scoped_session(object): """Provides scoped management of :class:`.Session` objects. See :ref:`unitofwork_contextual` for a tutorial. """ def __init__(self, session_factory, scopefunc=None): """Construct a new :class:`.scoped_session`. :param session_factory: a factory to create new :class:`.Session` instances. This is usually, but not necessarily, an instance of :class:`.sessionmaker`. :param scopefunc: optional function which defines the current scope. If not passed, the :class:`.scoped_session` object assumes "thread-local" scope, and will use a Python ``threading.local()`` in order to maintain the current :class:`.Session`. If passed, the function should return a hashable token; this token will be used as the key in a dictionary in order to store and retrieve the current :class:`.Session`. """ self.session_factory = session_factory if scopefunc: self.registry = ScopedRegistry(session_factory, scopefunc) else: self.registry = ThreadLocalRegistry(session_factory) def __call__(self, **kw): """Return the current :class:`.Session`, creating it using the session factory if not present. :param \**kw: Keyword arguments will be passed to the session factory callable, if an existing :class:`.Session` is not present. If the :class:`.Session` is present and keyword arguments have been passed, :exc:`~sqlalchemy.exc.InvalidRequestError` is raised. """ if kw: scope = kw.pop('scope', False) if scope is not None: if self.registry.has(): raise sa_exc.InvalidRequestError( "Scoped session is already present; " "no new arguments may be specified.") else: sess = self.session_factory(**kw) self.registry.set(sess) return sess else: return self.session_factory(**kw) else: return self.registry() def remove(self): """Dispose of the current :class:`.Session`, if present. This will first call :meth:`.Session.close` method on the current :class:`.Session`, which releases any existing transactional/connection resources still being held; transactions specifically are rolled back. The :class:`.Session` is then discarded. Upon next usage within the same scope, the :class:`.scoped_session` will produce a new :class:`.Session` object. """ if self.registry.has(): self.registry().close() self.registry.clear() def configure(self, **kwargs): """reconfigure the :class:`.sessionmaker` used by this :class:`.scoped_session`. See :meth:`.sessionmaker.configure`. """ if self.registry.has(): warn('At least one scoped session is already present. ' ' configure() can not affect sessions that have ' 'already been created.') self.session_factory.configure(**kwargs) def query_property(self, query_cls=None): """return a class property which produces a :class:`.Query` object against the class and the current :class:`.Session` when called. e.g.:: Session = scoped_session(sessionmaker()) class MyClass(object): query = Session.query_property() # after mappers are defined result = MyClass.query.filter(MyClass.name=='foo').all() Produces instances of the session's configured query class by default. To override and use a custom implementation, provide a ``query_cls`` callable. The callable will be invoked with the class's mapper as a positional argument and a session keyword argument. There is no limit to the number of query properties placed on a class. """ class query(object): def __get__(s, instance, owner): try: mapper = class_mapper(owner) if mapper: if query_cls: # custom query class return query_cls(mapper, session=self.registry()) else: # session's configured query class return self.registry().query(mapper) except orm_exc.UnmappedClassError: return None return query() ScopedSession = scoped_session """Old name for backwards compatibility.""" def instrument(name): def do(self, *args, **kwargs): return getattr(self.registry(), name)(*args, **kwargs) return do for meth in Session.public_methods: setattr(scoped_session, meth, instrument(meth)) def makeprop(name): def set(self, attr): setattr(self.registry(), name, attr) def get(self): return getattr(self.registry(), name) return property(get, set) for prop in ('bind', 'dirty', 'deleted', 'new', 'identity_map', 'is_active', 'autoflush', 'no_autoflush'): setattr(scoped_session, prop, makeprop(prop)) def clslevel(name): def do(cls, *args, **kwargs): return getattr(Session, name)(*args, **kwargs) return classmethod(do) for prop in ('close_all', 'object_session', 'identity_key'): setattr(scoped_session, prop, clslevel(prop)) SQLAlchemy-0.8.4/lib/sqlalchemy/orm/session.py0000644000076500000240000026306412251150015021776 0ustar classicstaff00000000000000# orm/session.py # Copyright (C) 2005-2013 the SQLAlchemy authors and contributors # # This module is part of SQLAlchemy and is released under # the MIT License: http://www.opensource.org/licenses/mit-license.php """Provides the Session class and related utilities.""" from __future__ import with_statement import weakref from .. import util, sql, engine, exc as sa_exc, event from ..sql import util as sql_util, expression from . import ( SessionExtension, attributes, exc, query, util as orm_util, loading, identity ) from .util import ( object_mapper, class_mapper, _class_to_mapper, _state_mapper, object_state, _none_set ) from .unitofwork import UOWTransaction from .mapper import Mapper from .events import SessionEvents statelib = util.importlater("sqlalchemy.orm", "state") import sys __all__ = ['Session', 'SessionTransaction', 'SessionExtension', 'sessionmaker'] class _SessionClassMethods(object): """Class-level methods for :class:`.Session`, :class:`.sessionmaker`.""" @classmethod def close_all(cls): """Close *all* sessions in memory.""" for sess in _sessions.values(): sess.close() @classmethod def identity_key(cls, *args, **kwargs): """Return an identity key. This is an alias of :func:`.util.identity_key`. """ return orm_util.identity_key(*args, **kwargs) @classmethod def object_session(cls, instance): """Return the :class:`.Session` to which an object belongs. This is an alias of :func:`.object_session`. """ return object_session(instance) ACTIVE = util.symbol('ACTIVE') PREPARED = util.symbol('PREPARED') COMMITTED = util.symbol('COMMITTED') DEACTIVE = util.symbol('DEACTIVE') CLOSED = util.symbol('CLOSED') class SessionTransaction(object): """A :class:`.Session`-level transaction. :class:`.SessionTransaction` is a mostly behind-the-scenes object not normally referenced directly by application code. It coordinates among multiple :class:`.Connection` objects, maintaining a database transaction for each one individually, committing or rolling them back all at once. It also provides optional two-phase commit behavior which can augment this coordination operation. The :attr:`.Session.transaction` attribute of :class:`.Session` refers to the current :class:`.SessionTransaction` object in use, if any. A :class:`.SessionTransaction` is associated with a :class:`.Session` in its default mode of ``autocommit=False`` immediately, associated with no database connections. As the :class:`.Session` is called upon to emit SQL on behalf of various :class:`.Engine` or :class:`.Connection` objects, a corresponding :class:`.Connection` and associated :class:`.Transaction` is added to a collection within the :class:`.SessionTransaction` object, becoming one of the connection/transaction pairs maintained by the :class:`.SessionTransaction`. The lifespan of the :class:`.SessionTransaction` ends when the :meth:`.Session.commit`, :meth:`.Session.rollback` or :meth:`.Session.close` methods are called. At this point, the :class:`.SessionTransaction` removes its association with its parent :class:`.Session`. A :class:`.Session` that is in ``autocommit=False`` mode will create a new :class:`.SessionTransaction` to replace it immediately, whereas a :class:`.Session` that's in ``autocommit=True`` mode will remain without a :class:`.SessionTransaction` until the :meth:`.Session.begin` method is called. Another detail of :class:`.SessionTransaction` behavior is that it is capable of "nesting". This means that the :meth:`.Session.begin` method can be called while an existing :class:`.SessionTransaction` is already present, producing a new :class:`.SessionTransaction` that temporarily replaces the parent :class:`.SessionTransaction`. When a :class:`.SessionTransaction` is produced as nested, it assigns itself to the :attr:`.Session.transaction` attribute. When it is ended via :meth:`.Session.commit` or :meth:`.Session.rollback`, it restores its parent :class:`.SessionTransaction` back onto the :attr:`.Session.transaction` attribute. The behavior is effectively a stack, where :attr:`.Session.transaction` refers to the current head of the stack. The purpose of this stack is to allow nesting of :meth:`.Session.rollback` or :meth:`.Session.commit` calls in context with various flavors of :meth:`.Session.begin`. This nesting behavior applies to when :meth:`.Session.begin_nested` is used to emit a SAVEPOINT transaction, and is also used to produce a so-called "subtransaction" which allows a block of code to use a begin/rollback/commit sequence regardless of whether or not its enclosing code block has begun a transaction. The :meth:`.flush` method, whether called explicitly or via autoflush, is the primary consumer of the "subtransaction" feature, in that it wishes to guarantee that it works within in a transaction block regardless of whether or not the :class:`.Session` is in transactional mode when the method is called. See also: :meth:`.Session.rollback` :meth:`.Session.commit` :meth:`.Session.begin` :meth:`.Session.begin_nested` :attr:`.Session.is_active` :meth:`.SessionEvents.after_commit` :meth:`.SessionEvents.after_rollback` :meth:`.SessionEvents.after_soft_rollback` """ _rollback_exception = None def __init__(self, session, parent=None, nested=False): self.session = session self._connections = {} self._parent = parent self.nested = nested self._state = ACTIVE if not parent and nested: raise sa_exc.InvalidRequestError( "Can't start a SAVEPOINT transaction when no existing " "transaction is in progress") if self.session._enable_transaction_accounting: self._take_snapshot() if self.session.dispatch.after_transaction_create: self.session.dispatch.after_transaction_create(self.session, self) @property def is_active(self): return self.session is not None and self._state is ACTIVE def _assert_active(self, prepared_ok=False, rollback_ok=False, deactive_ok=False, closed_msg="This transaction is closed"): if self._state is COMMITTED: raise sa_exc.InvalidRequestError( "This session is in 'committed' state; no further " "SQL can be emitted within this transaction." ) elif self._state is PREPARED: if not prepared_ok: raise sa_exc.InvalidRequestError( "This session is in 'prepared' state; no further " "SQL can be emitted within this transaction." ) elif self._state is DEACTIVE: if not deactive_ok and not rollback_ok: if self._rollback_exception: raise sa_exc.InvalidRequestError( "This Session's transaction has been rolled back " "due to a previous exception during flush." " To begin a new transaction with this Session, " "first issue Session.rollback()." " Original exception was: %s" % self._rollback_exception ) elif not deactive_ok: raise sa_exc.InvalidRequestError( "This Session's transaction has been rolled back " "by a nested rollback() call. To begin a new " "transaction, issue Session.rollback() first." ) elif self._state is CLOSED: raise sa_exc.ResourceClosedError(closed_msg) @property def _is_transaction_boundary(self): return self.nested or not self._parent def connection(self, bindkey, **kwargs): self._assert_active() bind = self.session.get_bind(bindkey, **kwargs) return self._connection_for_bind(bind) def _begin(self, nested=False): self._assert_active() return SessionTransaction( self.session, self, nested=nested) def _iterate_parents(self, upto=None): if self._parent is upto: return (self,) else: if self._parent is None: raise sa_exc.InvalidRequestError( "Transaction %s is not on the active transaction list" % ( upto)) return (self,) + self._parent._iterate_parents(upto) def _take_snapshot(self): if not self._is_transaction_boundary: self._new = self._parent._new self._deleted = self._parent._deleted self._dirty = self._parent._dirty self._key_switches = self._parent._key_switches return if not self.session._flushing: self.session.flush() self._new = weakref.WeakKeyDictionary() self._deleted = weakref.WeakKeyDictionary() self._dirty = weakref.WeakKeyDictionary() self._key_switches = weakref.WeakKeyDictionary() def _restore_snapshot(self, dirty_only=False): assert self._is_transaction_boundary for s in set(self._new).union(self.session._new): self.session._expunge_state(s) if s.key: del s.key for s, (oldkey, newkey) in self._key_switches.items(): self.session.identity_map.discard(s) s.key = oldkey self.session.identity_map.replace(s) for s in set(self._deleted).union(self.session._deleted): if s.deleted: #assert s in self._deleted del s.deleted self.session._update_impl(s, discard_existing=True) assert not self.session._deleted for s in self.session.identity_map.all_states(): if not dirty_only or s.modified or s in self._dirty: s._expire(s.dict, self.session.identity_map._modified) def _remove_snapshot(self): assert self._is_transaction_boundary if not self.nested and self.session.expire_on_commit: for s in self.session.identity_map.all_states(): s._expire(s.dict, self.session.identity_map._modified) for s in self._deleted: s.session_id = None self._deleted.clear() def _connection_for_bind(self, bind): self._assert_active() if bind in self._connections: return self._connections[bind][0] if self._parent: conn = self._parent._connection_for_bind(bind) if not self.nested: return conn else: if isinstance(bind, engine.Connection): conn = bind if conn.engine in self._connections: raise sa_exc.InvalidRequestError( "Session already has a Connection associated for the " "given Connection's Engine") else: conn = bind.contextual_connect() if self.session.twophase and self._parent is None: transaction = conn.begin_twophase() elif self.nested: transaction = conn.begin_nested() else: transaction = conn.begin() self._connections[conn] = self._connections[conn.engine] = \ (conn, transaction, conn is not bind) self.session.dispatch.after_begin(self.session, self, conn) return conn def prepare(self): if self._parent is not None or not self.session.twophase: raise sa_exc.InvalidRequestError( "'twophase' mode not enabled, or not root transaction; " "can't prepare.") self._prepare_impl() def _prepare_impl(self): self._assert_active() if self._parent is None or self.nested: self.session.dispatch.before_commit(self.session) stx = self.session.transaction if stx is not self: for subtransaction in stx._iterate_parents(upto=self): subtransaction.commit() if not self.session._flushing: for _flush_guard in xrange(100): if self.session._is_clean(): break self.session.flush() else: raise exc.FlushError( "Over 100 subsequent flushes have occurred within " "session.commit() - is an after_flush() hook " "creating new objects?") if self._parent is None and self.session.twophase: try: for t in set(self._connections.values()): t[1].prepare() except: with util.safe_reraise(): self.rollback() self._state = PREPARED def commit(self): self._assert_active(prepared_ok=True) if self._state is not PREPARED: self._prepare_impl() if self._parent is None or self.nested: for t in set(self._connections.values()): t[1].commit() self._state = COMMITTED self.session.dispatch.after_commit(self.session) if self.session._enable_transaction_accounting: self._remove_snapshot() self.close() return self._parent def rollback(self, _capture_exception=False): self._assert_active(prepared_ok=True, rollback_ok=True) stx = self.session.transaction if stx is not self: for subtransaction in stx._iterate_parents(upto=self): subtransaction.close() if self._state in (ACTIVE, PREPARED): for transaction in self._iterate_parents(): if transaction._parent is None or transaction.nested: transaction._rollback_impl() transaction._state = DEACTIVE break else: transaction._state = DEACTIVE sess = self.session if self.session._enable_transaction_accounting and \ not sess._is_clean(): # if items were added, deleted, or mutated # here, we need to re-restore the snapshot util.warn( "Session's state has been changed on " "a non-active transaction - this state " "will be discarded.") self._restore_snapshot(dirty_only=self.nested) self.close() if self._parent and _capture_exception: self._parent._rollback_exception = sys.exc_info()[1] sess.dispatch.after_soft_rollback(sess, self) return self._parent def _rollback_impl(self): for t in set(self._connections.values()): t[1].rollback() if self.session._enable_transaction_accounting: self._restore_snapshot(dirty_only=self.nested) self.session.dispatch.after_rollback(self.session) def close(self): self.session.transaction = self._parent if self._parent is None: for connection, transaction, autoclose in \ set(self._connections.values()): if autoclose: connection.close() else: transaction.close() self._state = CLOSED if self.session.dispatch.after_transaction_end: self.session.dispatch.after_transaction_end(self.session, self) if self._parent is None: if not self.session.autocommit: self.session.begin() self.session = None self._connections = None def __enter__(self): return self def __exit__(self, type, value, traceback): self._assert_active(deactive_ok=True, prepared_ok=True) if self.session.transaction is None: return if type is None: try: self.commit() except: with util.safe_reraise(): self.rollback() else: self.rollback() class Session(_SessionClassMethods): """Manages persistence operations for ORM-mapped objects. The Session's usage paradigm is described at :doc:`/orm/session`. """ public_methods = ( '__contains__', '__iter__', 'add', 'add_all', 'begin', 'begin_nested', 'close', 'commit', 'connection', 'delete', 'execute', 'expire', 'expire_all', 'expunge', 'expunge_all', 'flush', 'get_bind', 'is_modified', 'merge', 'query', 'refresh', 'rollback', 'scalar') def __init__(self, bind=None, autoflush=True, expire_on_commit=True, _enable_transaction_accounting=True, autocommit=False, twophase=False, weak_identity_map=True, binds=None, extension=None, query_cls=query.Query): """Construct a new Session. See also the :class:`.sessionmaker` function which is used to generate a :class:`.Session`-producing callable with a given set of arguments. :param autocommit: .. warning:: The autocommit flag is **not for general use**, and if it is used, queries should only be invoked within the span of a :meth:`.Session.begin` / :meth:`.Session.commit` pair. Executing queries outside of a demarcated transaction is a legacy mode of usage, and can in some cases lead to concurrent connection checkouts. Defaults to ``False``. When ``True``, the :class:`.Session` does not keep a persistent transaction running, and will acquire connections from the engine on an as-needed basis, returning them immediately after their use. Flushes will begin and commit (or possibly rollback) their own transaction if no transaction is present. When using this mode, the :meth:`.Session.begin` method is used to explicitly start transactions. .. seealso:: :ref:`session_autocommit` :param autoflush: When ``True``, all query operations will issue a ``flush()`` call to this ``Session`` before proceeding. This is a convenience feature so that ``flush()`` need not be called repeatedly in order for database queries to retrieve results. It's typical that ``autoflush`` is used in conjunction with ``autocommit=False``. In this scenario, explicit calls to ``flush()`` are rarely needed; you usually only need to call ``commit()`` (which flushes) to finalize changes. :param bind: An optional ``Engine`` or ``Connection`` to which this ``Session`` should be bound. When specified, all SQL operations performed by this session will execute via this connectable. :param binds: An optional dictionary which contains more granular "bind" information than the ``bind`` parameter provides. This dictionary can map individual ``Table`` instances as well as ``Mapper`` instances to individual ``Engine`` or ``Connection`` objects. Operations which proceed relative to a particular ``Mapper`` will consult this dictionary for the direct ``Mapper`` instance as well as the mapper's ``mapped_table`` attribute in order to locate an connectable to use. The full resolution is described in the ``get_bind()`` method of ``Session``. Usage looks like:: Session = sessionmaker(binds={ SomeMappedClass: create_engine('postgresql://engine1'), somemapper: create_engine('postgresql://engine2'), some_table: create_engine('postgresql://engine3'), }) Also see the :meth:`.Session.bind_mapper` and :meth:`.Session.bind_table` methods. :param \class_: Specify an alternate class other than ``sqlalchemy.orm.session.Session`` which should be used by the returned class. This is the only argument that is local to the ``sessionmaker()`` function, and is not sent directly to the constructor for ``Session``. :param _enable_transaction_accounting: Defaults to ``True``. A legacy-only flag which when ``False`` disables *all* 0.5-style object accounting on transaction boundaries, including auto-expiry of instances on rollback and commit, maintenance of the "new" and "deleted" lists upon rollback, and autoflush of pending changes upon begin(), all of which are interdependent. :param expire_on_commit: Defaults to ``True``. When ``True``, all instances will be fully expired after each ``commit()``, so that all attribute/object access subsequent to a completed transaction will load from the most recent database state. :param extension: An optional :class:`~.SessionExtension` instance, or a list of such instances, which will receive pre- and post- commit and flush events, as well as a post-rollback event. **Deprecated.** Please see :class:`.SessionEvents`. :param query_cls: Class which should be used to create new Query objects, as returned by the ``query()`` method. Defaults to :class:`~sqlalchemy.orm.query.Query`. :param twophase: When ``True``, all transactions will be started as a "two phase" transaction, i.e. using the "two phase" semantics of the database in use along with an XID. During a ``commit()``, after ``flush()`` has been issued for all attached databases, the ``prepare()`` method on each database's ``TwoPhaseTransaction`` will be called. This allows each database to roll back the entire transaction, before each transaction is committed. :param weak_identity_map: Defaults to ``True`` - when set to ``False``, objects placed in the :class:`.Session` will be strongly referenced until explicitly removed or the :class:`.Session` is closed. **Deprecated** - this option is obsolete. """ if weak_identity_map: self._identity_cls = identity.WeakInstanceDict else: util.warn_deprecated("weak_identity_map=False is deprecated. " "This feature is not needed.") self._identity_cls = identity.StrongInstanceDict self.identity_map = self._identity_cls() self._new = {} # InstanceState->object, strong refs object self._deleted = {} # same self.bind = bind self.__binds = {} self._flushing = False self._warn_on_events = False self.transaction = None self.hash_key = _new_sessionid() self.autoflush = autoflush self.autocommit = autocommit self.expire_on_commit = expire_on_commit self._enable_transaction_accounting = _enable_transaction_accounting self.twophase = twophase self._query_cls = query_cls if extension: for ext in util.to_list(extension): SessionExtension._adapt_listener(self, ext) if binds is not None: for mapperortable, bind in binds.iteritems(): if isinstance(mapperortable, (type, Mapper)): self.bind_mapper(mapperortable, bind) else: self.bind_table(mapperortable, bind) if not self.autocommit: self.begin() _sessions[self.hash_key] = self dispatch = event.dispatcher(SessionEvents) connection_callable = None transaction = None """The current active or inactive :class:`.SessionTransaction`.""" def begin(self, subtransactions=False, nested=False): """Begin a transaction on this Session. If this Session is already within a transaction, either a plain transaction or nested transaction, an error is raised, unless ``subtransactions=True`` or ``nested=True`` is specified. The ``subtransactions=True`` flag indicates that this :meth:`~.Session.begin` can create a subtransaction if a transaction is already in progress. For documentation on subtransactions, please see :ref:`session_subtransactions`. The ``nested`` flag begins a SAVEPOINT transaction and is equivalent to calling :meth:`~.Session.begin_nested`. For documentation on SAVEPOINT transactions, please see :ref:`session_begin_nested`. """ if self.transaction is not None: if subtransactions or nested: self.transaction = self.transaction._begin( nested=nested) else: raise sa_exc.InvalidRequestError( "A transaction is already begun. Use " "subtransactions=True to allow subtransactions.") else: self.transaction = SessionTransaction( self, nested=nested) return self.transaction # needed for __enter__/__exit__ hook def begin_nested(self): """Begin a `nested` transaction on this Session. The target database(s) must support SQL SAVEPOINTs or a SQLAlchemy-supported vendor implementation of the idea. For documentation on SAVEPOINT transactions, please see :ref:`session_begin_nested`. """ return self.begin(nested=True) def rollback(self): """Rollback the current transaction in progress. If no transaction is in progress, this method is a pass-through. This method rolls back the current transaction or nested transaction regardless of subtransactions being in effect. All subtransactions up to the first real transaction are closed. Subtransactions occur when begin() is called multiple times. .. seealso:: :ref:`session_rollback` """ if self.transaction is None: pass else: self.transaction.rollback() def commit(self): """Flush pending changes and commit the current transaction. If no transaction is in progress, this method raises an :exc:`~sqlalchemy.exc.InvalidRequestError`. By default, the :class:`.Session` also expires all database loaded state on all ORM-managed attributes after transaction commit. This so that subsequent operations load the most recent data from the database. This behavior can be disabled using the ``expire_on_commit=False`` option to :class:`.sessionmaker` or the :class:`.Session` constructor. If a subtransaction is in effect (which occurs when begin() is called multiple times), the subtransaction will be closed, and the next call to ``commit()`` will operate on the enclosing transaction. When using the :class:`.Session` in its default mode of ``autocommit=False``, a new transaction will be begun immediately after the commit, but note that the newly begun transaction does *not* use any connection resources until the first SQL is actually emitted. .. seealso:: :ref:`session_committing` """ if self.transaction is None: if not self.autocommit: self.begin() else: raise sa_exc.InvalidRequestError("No transaction is begun.") self.transaction.commit() def prepare(self): """Prepare the current transaction in progress for two phase commit. If no transaction is in progress, this method raises an :exc:`~sqlalchemy.exc.InvalidRequestError`. Only root transactions of two phase sessions can be prepared. If the current transaction is not such, an :exc:`~sqlalchemy.exc.InvalidRequestError` is raised. """ if self.transaction is None: if not self.autocommit: self.begin() else: raise sa_exc.InvalidRequestError("No transaction is begun.") self.transaction.prepare() def connection(self, mapper=None, clause=None, bind=None, close_with_result=False, **kw): """Return a :class:`.Connection` object corresponding to this :class:`.Session` object's transactional state. If this :class:`.Session` is configured with ``autocommit=False``, either the :class:`.Connection` corresponding to the current transaction is returned, or if no transaction is in progress, a new one is begun and the :class:`.Connection` returned (note that no transactional state is established with the DBAPI until the first SQL statement is emitted). Alternatively, if this :class:`.Session` is configured with ``autocommit=True``, an ad-hoc :class:`.Connection` is returned using :meth:`.Engine.contextual_connect` on the underlying :class:`.Engine`. Ambiguity in multi-bind or unbound :class:`.Session` objects can be resolved through any of the optional keyword arguments. This ultimately makes usage of the :meth:`.get_bind` method for resolution. :param bind: Optional :class:`.Engine` to be used as the bind. If this engine is already involved in an ongoing transaction, that connection will be used. This argument takes precedence over ``mapper``, ``clause``. :param mapper: Optional :func:`.mapper` mapped class, used to identify the appropriate bind. This argument takes precedence over ``clause``. :param clause: A :class:`.ClauseElement` (i.e. :func:`~.sql.expression.select`, :func:`~.sql.expression.text`, etc.) which will be used to locate a bind, if a bind cannot otherwise be identified. :param close_with_result: Passed to :meth:`Engine.connect`, indicating the :class:`.Connection` should be considered "single use", automatically closing when the first result set is closed. This flag only has an effect if this :class:`.Session` is configured with ``autocommit=True`` and does not already have a transaction in progress. :param \**kw: Additional keyword arguments are sent to :meth:`get_bind()`, allowing additional arguments to be passed to custom implementations of :meth:`get_bind`. """ if bind is None: bind = self.get_bind(mapper, clause=clause, **kw) return self._connection_for_bind(bind, close_with_result=close_with_result) def _connection_for_bind(self, engine, **kwargs): if self.transaction is not None: return self.transaction._connection_for_bind(engine) else: return engine.contextual_connect(**kwargs) def execute(self, clause, params=None, mapper=None, bind=None, **kw): """Execute a SQL expression construct or string statement within the current transaction. Returns a :class:`.ResultProxy` representing results of the statement execution, in the same manner as that of an :class:`.Engine` or :class:`.Connection`. E.g.:: result = session.execute( user_table.select().where(user_table.c.id == 5) ) :meth:`~.Session.execute` accepts any executable clause construct, such as :func:`~.sql.expression.select`, :func:`~.sql.expression.insert`, :func:`~.sql.expression.update`, :func:`~.sql.expression.delete`, and :func:`~.sql.expression.text`. Plain SQL strings can be passed as well, which in the case of :meth:`.Session.execute` only will be interpreted the same as if it were passed via a :func:`~.expression.text` construct. That is, the following usage:: result = session.execute( "SELECT * FROM user WHERE id=:param", {"param":5} ) is equivalent to:: from sqlalchemy import text result = session.execute( text("SELECT * FROM user WHERE id=:param"), {"param":5} ) The second positional argument to :meth:`.Session.execute` is an optional parameter set. Similar to that of :meth:`.Connection.execute`, whether this is passed as a single dictionary, or a list of dictionaries, determines whether the DBAPI cursor's ``execute()`` or ``executemany()`` is used to execute the statement. An INSERT construct may be invoked for a single row:: result = session.execute(users.insert(), {"id": 7, "name": "somename"}) or for multiple rows:: result = session.execute(users.insert(), [ {"id": 7, "name": "somename7"}, {"id": 8, "name": "somename8"}, {"id": 9, "name": "somename9"} ]) The statement is executed within the current transactional context of this :class:`.Session`. The :class:`.Connection` which is used to execute the statement can also be acquired directly by calling the :meth:`.Session.connection` method. Both methods use a rule-based resolution scheme in order to determine the :class:`.Connection`, which in the average case is derived directly from the "bind" of the :class:`.Session` itself, and in other cases can be based on the :func:`.mapper` and :class:`.Table` objects passed to the method; see the documentation for :meth:`.Session.get_bind` for a full description of this scheme. The :meth:`.Session.execute` method does *not* invoke autoflush. The :class:`.ResultProxy` returned by the :meth:`.Session.execute` method is returned with the "close_with_result" flag set to true; the significance of this flag is that if this :class:`.Session` is autocommitting and does not have a transaction-dedicated :class:`.Connection` available, a temporary :class:`.Connection` is established for the statement execution, which is closed (meaning, returned to the connection pool) when the :class:`.ResultProxy` has consumed all available data. This applies *only* when the :class:`.Session` is configured with autocommit=True and no transaction has been started. :param clause: An executable statement (i.e. an :class:`.Executable` expression such as :func:`.expression.select`) or string SQL statement to be executed. :param params: Optional dictionary, or list of dictionaries, containing bound parameter values. If a single dictionary, single-row execution occurs; if a list of dictionaries, an "executemany" will be invoked. The keys in each dictionary must correspond to parameter names present in the statement. :param mapper: Optional :func:`.mapper` or mapped class, used to identify the appropriate bind. This argument takes precedence over ``clause`` when locating a bind. See :meth:`.Session.get_bind` for more details. :param bind: Optional :class:`.Engine` to be used as the bind. If this engine is already involved in an ongoing transaction, that connection will be used. This argument takes precedence over ``mapper`` and ``clause`` when locating a bind. :param \**kw: Additional keyword arguments are sent to :meth:`.Session.get_bind()` to allow extensibility of "bind" schemes. .. seealso:: :ref:`sqlexpression_toplevel` - Tutorial on using Core SQL constructs. :ref:`connections_toplevel` - Further information on direct statement execution. :meth:`.Connection.execute` - core level statement execution method, which is :meth:`.Session.execute` ultimately uses in order to execute the statement. """ clause = expression._literal_as_text(clause) if bind is None: bind = self.get_bind(mapper, clause=clause, **kw) return self._connection_for_bind(bind, close_with_result=True).execute( clause, params or {}) def scalar(self, clause, params=None, mapper=None, bind=None, **kw): """Like :meth:`~.Session.execute` but return a scalar result.""" return self.execute( clause, params=params, mapper=mapper, bind=bind, **kw).scalar() def close(self): """Close this Session. This clears all items and ends any transaction in progress. If this session were created with ``autocommit=False``, a new transaction is immediately begun. Note that this new transaction does not use any connection resources until they are first needed. """ self.expunge_all() if self.transaction is not None: for transaction in self.transaction._iterate_parents(): transaction.close() def expunge_all(self): """Remove all object instances from this ``Session``. This is equivalent to calling ``expunge(obj)`` on all objects in this ``Session``. """ for state in self.identity_map.all_states() + list(self._new): state._detach() self.identity_map = self._identity_cls() self._new = {} self._deleted = {} # TODO: need much more test coverage for bind_mapper() and similar ! # TODO: + crystalize + document resolution order # vis. bind_mapper/bind_table def bind_mapper(self, mapper, bind): """Bind operations for a mapper to a Connectable. mapper A mapper instance or mapped class bind Any Connectable: a ``Engine`` or ``Connection``. All subsequent operations involving this mapper will use the given `bind`. """ if isinstance(mapper, type): mapper = class_mapper(mapper) self.__binds[mapper.base_mapper] = bind for t in mapper._all_tables: self.__binds[t] = bind def bind_table(self, table, bind): """Bind operations on a Table to a Connectable. table A ``Table`` instance bind Any Connectable: a ``Engine`` or ``Connection``. All subsequent operations involving this ``Table`` will use the given `bind`. """ self.__binds[table] = bind def get_bind(self, mapper=None, clause=None): """Return a "bind" to which this :class:`.Session` is bound. The "bind" is usually an instance of :class:`.Engine`, except in the case where the :class:`.Session` has been explicitly bound directly to a :class:`.Connection`. For a multiply-bound or unbound :class:`.Session`, the ``mapper`` or ``clause`` arguments are used to determine the appropriate bind to return. Note that the "mapper" argument is usually present when :meth:`.Session.get_bind` is called via an ORM operation such as a :meth:`.Session.query`, each individual INSERT/UPDATE/DELETE operation within a :meth:`.Session.flush`, call, etc. The order of resolution is: 1. if mapper given and session.binds is present, locate a bind based on mapper. 2. if clause given and session.binds is present, locate a bind based on :class:`.Table` objects found in the given clause present in session.binds. 3. if session.bind is present, return that. 4. if clause given, attempt to return a bind linked to the :class:`.MetaData` ultimately associated with the clause. 5. if mapper given, attempt to return a bind linked to the :class:`.MetaData` ultimately associated with the :class:`.Table` or other selectable to which the mapper is mapped. 6. No bind can be found, :exc:`~sqlalchemy.exc.UnboundExecutionError` is raised. :param mapper: Optional :func:`.mapper` mapped class or instance of :class:`.Mapper`. The bind can be derived from a :class:`.Mapper` first by consulting the "binds" map associated with this :class:`.Session`, and secondly by consulting the :class:`.MetaData` associated with the :class:`.Table` to which the :class:`.Mapper` is mapped for a bind. :param clause: A :class:`.ClauseElement` (i.e. :func:`~.sql.expression.select`, :func:`~.sql.expression.text`, etc.). If the ``mapper`` argument is not present or could not produce a bind, the given expression construct will be searched for a bound element, typically a :class:`.Table` associated with bound :class:`.MetaData`. """ if mapper is clause is None: if self.bind: return self.bind else: raise sa_exc.UnboundExecutionError( "This session is not bound to a single Engine or " "Connection, and no context was provided to locate " "a binding.") c_mapper = mapper is not None and _class_to_mapper(mapper) or None # manually bound? if self.__binds: if c_mapper: if c_mapper.base_mapper in self.__binds: return self.__binds[c_mapper.base_mapper] elif c_mapper.mapped_table in self.__binds: return self.__binds[c_mapper.mapped_table] if clause is not None: for t in sql_util.find_tables(clause, include_crud=True): if t in self.__binds: return self.__binds[t] if self.bind: return self.bind if isinstance(clause, sql.expression.ClauseElement) and clause.bind: return clause.bind if c_mapper and c_mapper.mapped_table.bind: return c_mapper.mapped_table.bind context = [] if mapper is not None: context.append('mapper %s' % c_mapper) if clause is not None: context.append('SQL expression') raise sa_exc.UnboundExecutionError( "Could not locate a bind configured on %s or this Session" % ( ', '.join(context))) def query(self, *entities, **kwargs): """Return a new ``Query`` object corresponding to this ``Session``.""" return self._query_cls(entities, self, **kwargs) @property @util.contextmanager def no_autoflush(self): """Return a context manager that disables autoflush. e.g.:: with session.no_autoflush: some_object = SomeClass() session.add(some_object) # won't autoflush some_object.related_thing = session.query(SomeRelated).first() Operations that proceed within the ``with:`` block will not be subject to flushes occurring upon query access. This is useful when initializing a series of objects which involve existing database queries, where the uncompleted object should not yet be flushed. .. versionadded:: 0.7.6 """ autoflush = self.autoflush self.autoflush = False yield self self.autoflush = autoflush def _autoflush(self): if self.autoflush and not self._flushing: self.flush() def refresh(self, instance, attribute_names=None, lockmode=None): """Expire and refresh the attributes on the given instance. A query will be issued to the database and all attributes will be refreshed with their current database value. Lazy-loaded relational attributes will remain lazily loaded, so that the instance-wide refresh operation will be followed immediately by the lazy load of that attribute. Eagerly-loaded relational attributes will eagerly load within the single refresh operation. Note that a highly isolated transaction will return the same values as were previously read in that same transaction, regardless of changes in database state outside of that transaction - usage of :meth:`~Session.refresh` usually only makes sense if non-ORM SQL statement were emitted in the ongoing transaction, or if autocommit mode is turned on. :param attribute_names: optional. An iterable collection of string attribute names indicating a subset of attributes to be refreshed. :param lockmode: Passed to the :class:`~sqlalchemy.orm.query.Query` as used by :meth:`~sqlalchemy.orm.query.Query.with_lockmode`. """ try: state = attributes.instance_state(instance) except exc.NO_STATE: raise exc.UnmappedInstanceError(instance) self._expire_state(state, attribute_names) if loading.load_on_ident( self.query(object_mapper(instance)), state.key, refresh_state=state, lockmode=lockmode, only_load_props=attribute_names) is None: raise sa_exc.InvalidRequestError( "Could not refresh instance '%s'" % orm_util.instance_str(instance)) def expire_all(self): """Expires all persistent instances within this Session. When any attributes on a persistent instance is next accessed, a query will be issued using the :class:`.Session` object's current transactional context in order to load all expired attributes for the given instance. Note that a highly isolated transaction will return the same values as were previously read in that same transaction, regardless of changes in database state outside of that transaction. To expire individual objects and individual attributes on those objects, use :meth:`Session.expire`. The :class:`.Session` object's default behavior is to expire all state whenever the :meth:`Session.rollback` or :meth:`Session.commit` methods are called, so that new state can be loaded for the new transaction. For this reason, calling :meth:`Session.expire_all` should not be needed when autocommit is ``False``, assuming the transaction is isolated. """ for state in self.identity_map.all_states(): state._expire(state.dict, self.identity_map._modified) def expire(self, instance, attribute_names=None): """Expire the attributes on an instance. Marks the attributes of an instance as out of date. When an expired attribute is next accessed, a query will be issued to the :class:`.Session` object's current transactional context in order to load all expired attributes for the given instance. Note that a highly isolated transaction will return the same values as were previously read in that same transaction, regardless of changes in database state outside of that transaction. To expire all objects in the :class:`.Session` simultaneously, use :meth:`Session.expire_all`. The :class:`.Session` object's default behavior is to expire all state whenever the :meth:`Session.rollback` or :meth:`Session.commit` methods are called, so that new state can be loaded for the new transaction. For this reason, calling :meth:`Session.expire` only makes sense for the specific case that a non-ORM SQL statement was emitted in the current transaction. :param instance: The instance to be refreshed. :param attribute_names: optional list of string attribute names indicating a subset of attributes to be expired. """ try: state = attributes.instance_state(instance) except exc.NO_STATE: raise exc.UnmappedInstanceError(instance) self._expire_state(state, attribute_names) def _expire_state(self, state, attribute_names): self._validate_persistent(state) if attribute_names: state._expire_attributes(state.dict, attribute_names) else: # pre-fetch the full cascade since the expire is going to # remove associations cascaded = list(state.manager.mapper.cascade_iterator( 'refresh-expire', state)) self._conditional_expire(state) for o, m, st_, dct_ in cascaded: self._conditional_expire(st_) def _conditional_expire(self, state): """Expire a state if persistent, else expunge if pending""" if state.key: state._expire(state.dict, self.identity_map._modified) elif state in self._new: self._new.pop(state) state._detach() @util.deprecated("0.7", "The non-weak-referencing identity map " "feature is no longer needed.") def prune(self): """Remove unreferenced instances cached in the identity map. Note that this method is only meaningful if "weak_identity_map" is set to False. The default weak identity map is self-pruning. Removes any object in this Session's identity map that is not referenced in user code, modified, new or scheduled for deletion. Returns the number of objects pruned. """ return self.identity_map.prune() def expunge(self, instance): """Remove the `instance` from this ``Session``. This will free all internal references to the instance. Cascading will be applied according to the *expunge* cascade rule. """ try: state = attributes.instance_state(instance) except exc.NO_STATE: raise exc.UnmappedInstanceError(instance) if state.session_id is not self.hash_key: raise sa_exc.InvalidRequestError( "Instance %s is not present in this Session" % orm_util.state_str(state)) cascaded = list(state.manager.mapper.cascade_iterator( 'expunge', state)) self._expunge_state(state) for o, m, st_, dct_ in cascaded: self._expunge_state(st_) def _expunge_state(self, state): if state in self._new: self._new.pop(state) state._detach() elif self.identity_map.contains_state(state): self.identity_map.discard(state) self._deleted.pop(state, None) state._detach() elif self.transaction: self.transaction._deleted.pop(state, None) def _register_newly_persistent(self, states): for state in states: mapper = _state_mapper(state) # prevent against last minute dereferences of the object obj = state.obj() if obj is not None: instance_key = mapper._identity_key_from_state(state) if _none_set.issubset(instance_key[1]) and \ not mapper.allow_partial_pks or \ _none_set.issuperset(instance_key[1]): raise exc.FlushError( "Instance %s has a NULL identity key. If this is an " "auto-generated value, check that the database table " "allows generation of new primary key values, and " "that the mapped Column object is configured to " "expect these generated values. Ensure also that " "this flush() is not occurring at an inappropriate " "time, such aswithin a load() event." % orm_util.state_str(state) ) if state.key is None: state.key = instance_key elif state.key != instance_key: # primary key switch. use discard() in case another # state has already replaced this one in the identity # map (see test/orm/test_naturalpks.py ReversePKsTest) self.identity_map.discard(state) if state in self.transaction._key_switches: orig_key = self.transaction._key_switches[state][0] else: orig_key = state.key self.transaction._key_switches[state] = ( orig_key, instance_key) state.key = instance_key self.identity_map.replace(state) statelib.InstanceState._commit_all_states( ((state, state.dict) for state in states), self.identity_map ) self._register_altered(states) # remove from new last, might be the last strong ref for state in set(states).intersection(self._new): self._new.pop(state) def _register_altered(self, states): if self._enable_transaction_accounting and self.transaction: for state in states: if state in self._new: self.transaction._new[state] = True else: self.transaction._dirty[state] = True def _remove_newly_deleted(self, states): for state in states: if self._enable_transaction_accounting and self.transaction: self.transaction._deleted[state] = True self.identity_map.discard(state) self._deleted.pop(state, None) state.deleted = True def add(self, instance, _warn=True): """Place an object in the ``Session``. Its state will be persisted to the database on the next flush operation. Repeated calls to ``add()`` will be ignored. The opposite of ``add()`` is ``expunge()``. """ if _warn and self._warn_on_events: self._flush_warning("Session.add()") try: state = attributes.instance_state(instance) except exc.NO_STATE: raise exc.UnmappedInstanceError(instance) self._save_or_update_state(state) def add_all(self, instances): """Add the given collection of instances to this ``Session``.""" if self._warn_on_events: self._flush_warning("Session.add_all()") for instance in instances: self.add(instance, _warn=False) def _save_or_update_state(self, state): self._save_or_update_impl(state) mapper = _state_mapper(state) for o, m, st_, dct_ in mapper.cascade_iterator( 'save-update', state, halt_on=self._contains_state): self._save_or_update_impl(st_) def delete(self, instance): """Mark an instance as deleted. The database delete operation occurs upon ``flush()``. """ if self._warn_on_events: self._flush_warning("Session.delete()") try: state = attributes.instance_state(instance) except exc.NO_STATE: raise exc.UnmappedInstanceError(instance) if state.key is None: raise sa_exc.InvalidRequestError( "Instance '%s' is not persisted" % orm_util.state_str(state)) if state in self._deleted: return # ensure object is attached to allow the # cascade operation to load deferred attributes # and collections self._attach(state, include_before=True) # grab the cascades before adding the item to the deleted list # so that autoflush does not delete the item # the strong reference to the instance itself is significant here cascade_states = list(state.manager.mapper.cascade_iterator( 'delete', state)) self._deleted[state] = state.obj() self.identity_map.add(state) for o, m, st_, dct_ in cascade_states: self._delete_impl(st_) def merge(self, instance, load=True): """Copy the state of a given instance into a corresponding instance within this :class:`.Session`. :meth:`.Session.merge` examines the primary key attributes of the source instance, and attempts to reconcile it with an instance of the same primary key in the session. If not found locally, it attempts to load the object from the database based on primary key, and if none can be located, creates a new instance. The state of each attribute on the source instance is then copied to the target instance. The resulting target instance is then returned by the method; the original source instance is left unmodified, and un-associated with the :class:`.Session` if not already. This operation cascades to associated instances if the association is mapped with ``cascade="merge"``. See :ref:`unitofwork_merging` for a detailed discussion of merging. :param instance: Instance to be merged. :param load: Boolean, when False, :meth:`.merge` switches into a "high performance" mode which causes it to forego emitting history events as well as all database access. This flag is used for cases such as transferring graphs of objects into a :class:`.Session` from a second level cache, or to transfer just-loaded objects into the :class:`.Session` owned by a worker thread or process without re-querying the database. The ``load=False`` use case adds the caveat that the given object has to be in a "clean" state, that is, has no pending changes to be flushed - even if the incoming object is detached from any :class:`.Session`. This is so that when the merge operation populates local attributes and cascades to related objects and collections, the values can be "stamped" onto the target object as is, without generating any history or attribute events, and without the need to reconcile the incoming data with any existing related objects or collections that might not be loaded. The resulting objects from ``load=False`` are always produced as "clean", so it is only appropriate that the given objects should be "clean" as well, else this suggests a mis-use of the method. """ if self._warn_on_events: self._flush_warning("Session.merge()") _recursive = {} if load: # flush current contents if we expect to load data self._autoflush() object_mapper(instance) # verify mapped autoflush = self.autoflush try: self.autoflush = False return self._merge( attributes.instance_state(instance), attributes.instance_dict(instance), load=load, _recursive=_recursive) finally: self.autoflush = autoflush def _merge(self, state, state_dict, load=True, _recursive=None): mapper = _state_mapper(state) if state in _recursive: return _recursive[state] new_instance = False key = state.key if key is None: if not load: raise sa_exc.InvalidRequestError( "merge() with load=False option does not support " "objects transient (i.e. unpersisted) objects. flush() " "all changes on mapped instances before merging with " "load=False.") key = mapper._identity_key_from_state(state) if key in self.identity_map: merged = self.identity_map[key] elif not load: if state.modified: raise sa_exc.InvalidRequestError( "merge() with load=False option does not support " "objects marked as 'dirty'. flush() all changes on " "mapped instances before merging with load=False.") merged = mapper.class_manager.new_instance() merged_state = attributes.instance_state(merged) merged_state.key = key self._update_impl(merged_state) new_instance = True elif not _none_set.issubset(key[1]) or \ (mapper.allow_partial_pks and not _none_set.issuperset(key[1])): merged = self.query(mapper.class_).get(key[1]) else: merged = None if merged is None: merged = mapper.class_manager.new_instance() merged_state = attributes.instance_state(merged) merged_dict = attributes.instance_dict(merged) new_instance = True self._save_or_update_state(merged_state) else: merged_state = attributes.instance_state(merged) merged_dict = attributes.instance_dict(merged) _recursive[state] = merged # check that we didn't just pull the exact same # state out. if state is not merged_state: # version check if applicable if mapper.version_id_col is not None: existing_version = mapper._get_state_attr_by_column( state, state_dict, mapper.version_id_col, passive=attributes.PASSIVE_NO_INITIALIZE) merged_version = mapper._get_state_attr_by_column( merged_state, merged_dict, mapper.version_id_col, passive=attributes.PASSIVE_NO_INITIALIZE) if existing_version is not attributes.PASSIVE_NO_RESULT and \ merged_version is not attributes.PASSIVE_NO_RESULT and \ existing_version != merged_version: raise exc.StaleDataError( "Version id '%s' on merged state %s " "does not match existing version '%s'. " "Leave the version attribute unset when " "merging to update the most recent version." % ( existing_version, orm_util.state_str(merged_state), merged_version )) merged_state.load_path = state.load_path merged_state.load_options = state.load_options for prop in mapper.iterate_properties: prop.merge(self, state, state_dict, merged_state, merged_dict, load, _recursive) if not load: # remove any history merged_state._commit_all(merged_dict, self.identity_map) if new_instance: merged_state.manager.dispatch.load(merged_state, None) return merged def _validate_persistent(self, state): if not self.identity_map.contains_state(state): raise sa_exc.InvalidRequestError( "Instance '%s' is not persistent within this Session" % orm_util.state_str(state)) def _save_impl(self, state): if state.key is not None: raise sa_exc.InvalidRequestError( "Object '%s' already has an identity - it can't be registered " "as pending" % orm_util.state_str(state)) self._before_attach(state) if state not in self._new: self._new[state] = state.obj() state.insert_order = len(self._new) self._attach(state) def _update_impl(self, state, discard_existing=False): if (self.identity_map.contains_state(state) and state not in self._deleted): return if state.key is None: raise sa_exc.InvalidRequestError( "Instance '%s' is not persisted" % orm_util.state_str(state)) if state.deleted: raise sa_exc.InvalidRequestError( "Instance '%s' has been deleted. Use the make_transient() " "function to send this object back to the transient state." % orm_util.state_str(state) ) self._before_attach(state) self._deleted.pop(state, None) if discard_existing: self.identity_map.replace(state) else: self.identity_map.add(state) self._attach(state) def _save_or_update_impl(self, state): if state.key is None: self._save_impl(state) else: self._update_impl(state) def _delete_impl(self, state): if state in self._deleted: return if state.key is None: return self._attach(state, include_before=True) self._deleted[state] = state.obj() self.identity_map.add(state) def enable_relationship_loading(self, obj): """Associate an object with this :class:`.Session` for related object loading. .. warning:: :meth:`.enable_relationship_loading` exists to serve special use cases and is not recommended for general use. Accesses of attributes mapped with :func:`.relationship` will attempt to load a value from the database using this :class:`.Session` as the source of connectivity. The values will be loaded based on foreign key values present on this object - it follows that this functionality generally only works for many-to-one-relationships. The object will be attached to this session, but will **not** participate in any persistence operations; its state for almost all purposes will remain either "transient" or "detached", except for the case of relationship loading. Also note that backrefs will often not work as expected. Altering a relationship-bound attribute on the target object may not fire off a backref event, if the effective value is what was already loaded from a foreign-key-holding value. The :meth:`.Session.enable_relationship_loading` method supersedes the ``load_on_pending`` flag on :func:`.relationship`. Unlike that flag, :meth:`.Session.enable_relationship_loading` allows an object to remain transient while still being able to load related items. To make a transient object associated with a :class:`.Session` via :meth:`.Session.enable_relationship_loading` pending, add it to the :class:`.Session` using :meth:`.Session.add` normally. :meth:`.Session.enable_relationship_loading` does not improve behavior when the ORM is used normally - object references should be constructed at the object level, not at the foreign key level, so that they are present in an ordinary way before flush() proceeds. This method is not intended for general use. .. versionadded:: 0.8 """ state = attributes.instance_state(obj) self._attach(state, include_before=True) state._load_pending = True def _before_attach(self, state): if state.session_id != self.hash_key and \ self.dispatch.before_attach: self.dispatch.before_attach(self, state.obj()) def _attach(self, state, include_before=False): if state.key and \ state.key in self.identity_map and \ not self.identity_map.contains_state(state): raise sa_exc.InvalidRequestError("Can't attach instance " "%s; another instance with key %s is already " "present in this session." % (orm_util.state_str(state), state.key)) if state.session_id and \ state.session_id is not self.hash_key and \ state.session_id in _sessions: raise sa_exc.InvalidRequestError( "Object '%s' is already attached to session '%s' " "(this is '%s')" % (orm_util.state_str(state), state.session_id, self.hash_key)) if state.session_id != self.hash_key: if include_before and \ self.dispatch.before_attach: self.dispatch.before_attach(self, state.obj()) state.session_id = self.hash_key if state.modified and state._strong_obj is None: state._strong_obj = state.obj() if self.dispatch.after_attach: self.dispatch.after_attach(self, state.obj()) def __contains__(self, instance): """Return True if the instance is associated with this session. The instance may be pending or persistent within the Session for a result of True. """ try: state = attributes.instance_state(instance) except exc.NO_STATE: raise exc.UnmappedInstanceError(instance) return self._contains_state(state) def __iter__(self): """Iterate over all pending or persistent instances within this Session. """ return iter(list(self._new.values()) + self.identity_map.values()) def _contains_state(self, state): return state in self._new or self.identity_map.contains_state(state) def flush(self, objects=None): """Flush all the object changes to the database. Writes out all pending object creations, deletions and modifications to the database as INSERTs, DELETEs, UPDATEs, etc. Operations are automatically ordered by the Session's unit of work dependency solver. Database operations will be issued in the current transactional context and do not affect the state of the transaction, unless an error occurs, in which case the entire transaction is rolled back. You may flush() as often as you like within a transaction to move changes from Python to the database's transaction buffer. For ``autocommit`` Sessions with no active manual transaction, flush() will create a transaction on the fly that surrounds the entire set of operations int the flush. :param objects: Optional; restricts the flush operation to operate only on elements that are in the given collection. This feature is for an extremely narrow set of use cases where particular objects may need to be operated upon before the full flush() occurs. It is not intended for general use. """ if self._flushing: raise sa_exc.InvalidRequestError("Session is already flushing") if self._is_clean(): return try: self._flushing = True self._flush(objects) finally: self._flushing = False def _flush_warning(self, method): util.warn( "Usage of the '%s' operation is not currently supported " "within the execution stage of the flush process. " "Results may not be consistent. Consider using alternative " "event listeners or connection-level operations instead." % method) def _is_clean(self): return not self.identity_map.check_modified() and \ not self._deleted and \ not self._new def _flush(self, objects=None): dirty = self._dirty_states if not dirty and not self._deleted and not self._new: self.identity_map._modified.clear() return flush_context = UOWTransaction(self) if self.dispatch.before_flush: self.dispatch.before_flush(self, flush_context, objects) # re-establish "dirty states" in case the listeners # added dirty = self._dirty_states deleted = set(self._deleted) new = set(self._new) dirty = set(dirty).difference(deleted) # create the set of all objects we want to operate upon if objects: # specific list passed in objset = set() for o in objects: try: state = attributes.instance_state(o) except exc.NO_STATE: raise exc.UnmappedInstanceError(o) objset.add(state) else: objset = None # store objects whose fate has been decided processed = set() # put all saves/updates into the flush context. detect top-level # orphans and throw them into deleted. if objset: proc = new.union(dirty).intersection(objset).difference(deleted) else: proc = new.union(dirty).difference(deleted) for state in proc: is_orphan = ( _state_mapper(state)._is_orphan(state) and state.has_identity) flush_context.register_object(state, isdelete=is_orphan) processed.add(state) # put all remaining deletes into the flush context. if objset: proc = deleted.intersection(objset).difference(processed) else: proc = deleted.difference(processed) for state in proc: flush_context.register_object(state, isdelete=True) if not flush_context.has_work: return flush_context.transaction = transaction = self.begin( subtransactions=True) try: self._warn_on_events = True try: flush_context.execute() finally: self._warn_on_events = False self.dispatch.after_flush(self, flush_context) flush_context.finalize_flush_changes() if not objects and self.identity_map._modified: len_ = len(self.identity_map._modified) statelib.InstanceState._commit_all_states( [(state, state.dict) for state in self.identity_map._modified], instance_dict=self.identity_map) util.warn("Attribute history events accumulated on %d " "previously clean instances " "within inner-flush event handlers have been reset, " "and will not result in database updates. " "Consider using set_committed_value() within " "inner-flush event handlers to avoid this warning." % len_) # useful assertions: #if not objects: # assert not self.identity_map._modified #else: # assert self.identity_map._modified == \ # self.identity_map._modified.difference(objects) self.dispatch.after_flush_postexec(self, flush_context) transaction.commit() except: with util.safe_reraise(): transaction.rollback(_capture_exception=True) def is_modified(self, instance, include_collections=True, passive=True): """Return ``True`` if the given instance has locally modified attributes. This method retrieves the history for each instrumented attribute on the instance and performs a comparison of the current value to its previously committed value, if any. It is in effect a more expensive and accurate version of checking for the given instance in the :attr:`.Session.dirty` collection; a full test for each attribute's net "dirty" status is performed. E.g.:: return session.is_modified(someobject) .. versionchanged:: 0.8 When using SQLAlchemy 0.7 and earlier, the ``passive`` flag should **always** be explicitly set to ``True``, else SQL loads/autoflushes may proceed which can affect the modified state itself: ``session.is_modified(someobject, passive=True)``\ . In 0.8 and above, the behavior is corrected and this flag is ignored. A few caveats to this method apply: * Instances present in the :attr:`.Session.dirty` collection may report ``False`` when tested with this method. This is because the object may have received change events via attribute mutation, thus placing it in :attr:`.Session.dirty`, but ultimately the state is the same as that loaded from the database, resulting in no net change here. * Scalar attributes may not have recorded the previously set value when a new value was applied, if the attribute was not loaded, or was expired, at the time the new value was received - in these cases, the attribute is assumed to have a change, even if there is ultimately no net change against its database value. SQLAlchemy in most cases does not need the "old" value when a set event occurs, so it skips the expense of a SQL call if the old value isn't present, based on the assumption that an UPDATE of the scalar value is usually needed, and in those few cases where it isn't, is less expensive on average than issuing a defensive SELECT. The "old" value is fetched unconditionally upon set only if the attribute container has the ``active_history`` flag set to ``True``. This flag is set typically for primary key attributes and scalar object references that are not a simple many-to-one. To set this flag for any arbitrary mapped column, use the ``active_history`` argument with :func:`.column_property`. :param instance: mapped instance to be tested for pending changes. :param include_collections: Indicates if multivalued collections should be included in the operation. Setting this to ``False`` is a way to detect only local-column based properties (i.e. scalar columns or many-to-one foreign keys) that would result in an UPDATE for this instance upon flush. :param passive: .. versionchanged:: 0.8 Ignored for backwards compatibility. When using SQLAlchemy 0.7 and earlier, this flag should always be set to ``True``. """ state = object_state(instance) if not state.modified: return False dict_ = state.dict for attr in state.manager.attributes: if \ ( not include_collections and hasattr(attr.impl, 'get_collection') ) or not hasattr(attr.impl, 'get_history'): continue (added, unchanged, deleted) = \ attr.impl.get_history(state, dict_, passive=attributes.NO_CHANGE) if added or deleted: return True else: return False @property def is_active(self): """True if this :class:`.Session` is in "transaction mode" and is not in "partial rollback" state. The :class:`.Session` in its default mode of ``autocommit=False`` is essentially always in "transaction mode", in that a :class:`.SessionTransaction` is associated with it as soon as it is instantiated. This :class:`.SessionTransaction` is immediately replaced with a new one as soon as it is ended, due to a rollback, commit, or close operation. "Transaction mode" does *not* indicate whether or not actual database connection resources are in use; the :class:`.SessionTransaction` object coordinates among zero or more actual database transactions, and starts out with none, accumulating individual DBAPI connections as different data sources are used within its scope. The best way to track when a particular :class:`.Session` has actually begun to use DBAPI resources is to implement a listener using the :meth:`.SessionEvents.after_begin` method, which will deliver both the :class:`.Session` as well as the target :class:`.Connection` to a user-defined event listener. The "partial rollback" state refers to when an "inner" transaction, typically used during a flush, encounters an error and emits a rollback of the DBAPI connection. At this point, the :class:`.Session` is in "partial rollback" and awaits for the user to call :meth:`.Session.rollback`, in order to close out the transaction stack. It is in this "partial rollback" period that the :attr:`.is_active` flag returns False. After the call to :meth:`.Session.rollback`, the :class:`.SessionTransaction` is replaced with a new one and :attr:`.is_active` returns ``True`` again. When a :class:`.Session` is used in ``autocommit=True`` mode, the :class:`.SessionTransaction` is only instantiated within the scope of a flush call, or when :meth:`.Session.begin` is called. So :attr:`.is_active` will always be ``False`` outside of a flush or :meth:`.Session.begin` block in this mode, and will be ``True`` within the :meth:`.Session.begin` block as long as it doesn't enter "partial rollback" state. From all the above, it follows that the only purpose to this flag is for application frameworks that wish to detect is a "rollback" is necessary within a generic error handling routine, for :class:`.Session` objects that would otherwise be in "partial rollback" mode. In a typical integration case, this is also not necessary as it is standard practice to emit :meth:`.Session.rollback` unconditionally within the outermost exception catch. To track the transactional state of a :class:`.Session` fully, use event listeners, primarily the :meth:`.SessionEvents.after_begin`, :meth:`.SessionEvents.after_commit`, :meth:`.SessionEvents.after_rollback` and related events. """ return self.transaction and self.transaction.is_active identity_map = None """A mapping of object identities to objects themselves. Iterating through ``Session.identity_map.values()`` provides access to the full set of persistent objects (i.e., those that have row identity) currently in the session. .. seealso:: :func:`.identity_key` - helper function to produce the keys used in this dictionary. """ @property def _dirty_states(self): """The set of all persistent states considered dirty. This method returns all states that were modified including those that were possibly deleted. """ return self.identity_map._dirty_states() @property def dirty(self): """The set of all persistent instances considered dirty. E.g.:: some_mapped_object in session.dirty Instances are considered dirty when they were modified but not deleted. Note that this 'dirty' calculation is 'optimistic'; most attribute-setting or collection modification operations will mark an instance as 'dirty' and place it in this set, even if there is no net change to the attribute's value. At flush time, the value of each attribute is compared to its previously saved value, and if there's no net change, no SQL operation will occur (this is a more expensive operation so it's only done at flush time). To check if an instance has actionable net changes to its attributes, use the :meth:`.Session.is_modified` method. """ return util.IdentitySet( [state.obj() for state in self._dirty_states if state not in self._deleted]) @property def deleted(self): "The set of all instances marked as 'deleted' within this ``Session``" return util.IdentitySet(self._deleted.values()) @property def new(self): "The set of all instances marked as 'new' within this ``Session``." return util.IdentitySet(self._new.values()) class sessionmaker(_SessionClassMethods): """A configurable :class:`.Session` factory. The :class:`.sessionmaker` factory generates new :class:`.Session` objects when called, creating them given the configurational arguments established here. e.g.:: # global scope Session = sessionmaker(autoflush=False) # later, in a local scope, create and use a session: sess = Session() Any keyword arguments sent to the constructor itself will override the "configured" keywords:: Session = sessionmaker() # bind an individual session to a connection sess = Session(bind=connection) The class also includes a method :meth:`.configure`, which can be used to specify additional keyword arguments to the factory, which will take effect for subsequent :class:`.Session` objects generated. This is usually used to associate one or more :class:`.Engine` objects with an existing :class:`.sessionmaker` factory before it is first used:: # application starts Session = sessionmaker() # ... later engine = create_engine('sqlite:///foo.db') Session.configure(bind=engine) sess = Session() .. seealso: :ref:`session_getting` - introductory text on creating sessions using :class:`.sessionmaker`. """ def __init__(self, bind=None, class_=Session, autoflush=True, autocommit=False, expire_on_commit=True, **kw): """Construct a new :class:`.sessionmaker`. All arguments here except for ``class_`` correspond to arguments accepted by :class:`.Session` directly. See the :meth:`.Session.__init__` docstring for more details on parameters. :param bind: a :class:`.Engine` or other :class:`.Connectable` with which newly created :class:`.Session` objects will be associated. :param class_: class to use in order to create new :class:`.Session` objects. Defaults to :class:`.Session`. :param autoflush: The autoflush setting to use with newly created :class:`.Session` objects. :param autocommit: The autocommit setting to use with newly created :class:`.Session` objects. :param expire_on_commit=True: the expire_on_commit setting to use with newly created :class:`.Session` objects. :param \**kw: all other keyword arguments are passed to the constructor of newly created :class:`.Session` objects. """ kw['bind'] = bind kw['autoflush'] = autoflush kw['autocommit'] = autocommit kw['expire_on_commit'] = expire_on_commit self.kw = kw # make our own subclass of the given class, so that # events can be associated with it specifically. self.class_ = type(class_.__name__, (class_,), {}) def __call__(self, **local_kw): """Produce a new :class:`.Session` object using the configuration established in this :class:`.sessionmaker`. In Python, the ``__call__`` method is invoked on an object when it is "called" in the same way as a function:: Session = sessionmaker() session = Session() # invokes sessionmaker.__call__() """ for k, v in self.kw.items(): local_kw.setdefault(k, v) return self.class_(**local_kw) def configure(self, **new_kw): """(Re)configure the arguments for this sessionmaker. e.g.:: Session = sessionmaker() Session.configure(bind=create_engine('sqlite://')) """ self.kw.update(new_kw) def __repr__(self): return "%s(class_=%r%s)" % ( self.__class__.__name__, self.class_.__name__, ", ".join("%s=%r" % (k, v) for k, v in self.kw.items()) ) _sessions = weakref.WeakValueDictionary() def make_transient(instance): """Make the given instance 'transient'. This will remove its association with any session and additionally will remove its "identity key", such that it's as though the object were newly constructed, except retaining its values. It also resets the "deleted" flag on the state if this object had been explicitly deleted by its session. Attributes which were "expired" or deferred at the instance level are reverted to undefined, and will not trigger any loads. """ state = attributes.instance_state(instance) s = _state_session(state) if s: s._expunge_state(state) # remove expired state and # deferred callables state.callables.clear() if state.key: del state.key if state.deleted: del state.deleted def object_session(instance): """Return the ``Session`` to which instance belongs. If the instance is not a mapped instance, an error is raised. """ try: return _state_session(attributes.instance_state(instance)) except exc.NO_STATE: raise exc.UnmappedInstanceError(instance) def _state_session(state): if state.session_id: try: return _sessions[state.session_id] except KeyError: pass return None _new_sessionid = util.counter() SQLAlchemy-0.8.4/lib/sqlalchemy/orm/state.py0000644000076500000240000004472112251150015021430 0ustar classicstaff00000000000000# orm/state.py # Copyright (C) 2005-2013 the SQLAlchemy authors and contributors # # This module is part of SQLAlchemy and is released under # the MIT License: http://www.opensource.org/licenses/mit-license.php """Defines instrumentation of instances. This module is usually not directly visible to user applications, but defines a large part of the ORM's interactivity. """ import weakref from .. import util from . import exc as orm_exc, attributes, util as orm_util, interfaces from .attributes import ( PASSIVE_NO_RESULT, SQL_OK, NEVER_SET, ATTR_WAS_SET, NO_VALUE,\ PASSIVE_NO_INITIALIZE ) sessionlib = util.importlater("sqlalchemy.orm", "session") instrumentation = util.importlater("sqlalchemy.orm", "instrumentation") mapperlib = util.importlater("sqlalchemy.orm", "mapperlib") class InstanceState(interfaces._InspectionAttr): """tracks state information at the instance level.""" session_id = None key = None runid = None load_options = util.EMPTY_SET load_path = () insert_order = None _strong_obj = None modified = False expired = False deleted = False _load_pending = False is_instance = True def __init__(self, obj, manager): self.class_ = obj.__class__ self.manager = manager self.obj = weakref.ref(obj, self._cleanup) self.callables = {} self.committed_state = {} @util.memoized_property def attrs(self): """Return a namespace representing each attribute on the mapped object, including its current value and history. The returned object is an instance of :class:`.AttributeState`. """ return util.ImmutableProperties( dict( (key, AttributeState(self, key)) for key in self.manager ) ) @property def transient(self): """Return true if the object is transient.""" return self.key is None and \ not self._attached @property def pending(self): """Return true if the object is pending.""" return self.key is None and \ self._attached @property def persistent(self): """Return true if the object is persistent.""" return self.key is not None and \ self._attached @property def detached(self): """Return true if the object is detached.""" return self.key is not None and \ not self._attached @property def _attached(self): return self.session_id is not None and \ self.session_id in sessionlib._sessions @property def session(self): """Return the owning :class:`.Session` for this instance, or ``None`` if none available.""" return sessionlib._state_session(self) @property def object(self): """Return the mapped object represented by this :class:`.InstanceState`.""" return self.obj() @property def identity(self): """Return the mapped identity of the mapped object. This is the primary key identity as persisted by the ORM which can always be passed directly to :meth:`.Query.get`. Returns ``None`` if the object has no primary key identity. .. note:: An object which is transient or pending does **not** have a mapped identity until it is flushed, even if its attributes include primary key values. """ if self.key is None: return None else: return self.key[1] @property def identity_key(self): """Return the identity key for the mapped object. This is the key used to locate the object within the :attr:`.Session.identity_map` mapping. It contains the identity as returned by :attr:`.identity` within it. """ # TODO: just change .key to .identity_key across # the board ? probably return self.key @util.memoized_property def parents(self): return {} @util.memoized_property def _pending_mutations(self): return {} @util.memoized_property def mapper(self): """Return the :class:`.Mapper` used for this mapepd object.""" return self.manager.mapper @property def has_identity(self): """Return ``True`` if this object has an identity key. This should always have the same value as the expression ``state.persistent or state.detached``. """ return bool(self.key) def _detach(self): self.session_id = self._strong_obj = None def _dispose(self): self._detach() del self.obj def _cleanup(self, ref): instance_dict = self._instance_dict() if instance_dict: instance_dict.discard(self) self.callables = {} self.session_id = self._strong_obj = None del self.obj def obj(self): return None @property def dict(self): o = self.obj() if o is not None: return attributes.instance_dict(o) else: return {} def _initialize_instance(*mixed, **kwargs): self, instance, args = mixed[0], mixed[1], mixed[2:] manager = self.manager manager.dispatch.init(self, args, kwargs) try: return manager.original_init(*mixed[1:], **kwargs) except: manager.dispatch.init_failure(self, args, kwargs) raise def get_history(self, key, passive): return self.manager[key].impl.get_history(self, self.dict, passive) def get_impl(self, key): return self.manager[key].impl def _get_pending_mutation(self, key): if key not in self._pending_mutations: self._pending_mutations[key] = PendingCollection() return self._pending_mutations[key] def __getstate__(self): d = {'instance': self.obj()} d.update( (k, self.__dict__[k]) for k in ( 'committed_state', '_pending_mutations', 'modified', 'expired', 'callables', 'key', 'parents', 'load_options', 'class_', ) if k in self.__dict__ ) if self.load_path: d['load_path'] = self.load_path.serialize() self.manager.dispatch.pickle(self, d) return d def __setstate__(self, state): inst = state['instance'] if inst is not None: self.obj = weakref.ref(inst, self._cleanup) self.class_ = inst.__class__ else: # None being possible here generally new as of 0.7.4 # due to storage of state in "parents". "class_" # also new. self.obj = None self.class_ = state['class_'] self.manager = manager = instrumentation.manager_of_class(self.class_) if manager is None: raise orm_exc.UnmappedInstanceError( inst, "Cannot deserialize object of type %r - " "no mapper() has " "been configured for this class within the current " "Python process!" % self.class_) elif manager.is_mapped and not manager.mapper.configured: mapperlib.configure_mappers() self.committed_state = state.get('committed_state', {}) self._pending_mutations = state.get('_pending_mutations', {}) self.parents = state.get('parents', {}) self.modified = state.get('modified', False) self.expired = state.get('expired', False) self.callables = state.get('callables', {}) self.__dict__.update([ (k, state[k]) for k in ( 'key', 'load_options', ) if k in state ]) if 'load_path' in state: self.load_path = orm_util.PathRegistry.\ deserialize(state['load_path']) # setup _sa_instance_state ahead of time so that # unpickle events can access the object normally. # see [ticket:2362] if inst is not None: manager.setup_instance(inst, self) manager.dispatch.unpickle(self, state) def _initialize(self, key): """Set this attribute to an empty value or collection, based on the AttributeImpl in use.""" self.manager.get_impl(key).initialize(self, self.dict) def _reset(self, dict_, key): """Remove the given attribute and any callables associated with it.""" old = dict_.pop(key, None) if old is not None and self.manager[key].impl.collection: self.manager[key].impl._invalidate_collection(old) self.callables.pop(key, None) def _expire_attribute_pre_commit(self, dict_, key): """a fast expire that can be called by column loaders during a load. The additional bookkeeping is finished up in commit_all(). Should only be called for scalar attributes. This method is actually called a lot with joined-table loading, when the second table isn't present in the result. """ dict_.pop(key, None) self.callables[key] = self @classmethod def _row_processor(cls, manager, fn, key): impl = manager[key].impl if impl.collection: def _set_callable(state, dict_, row): old = dict_.pop(key, None) if old is not None: impl._invalidate_collection(old) state.callables[key] = fn else: def _set_callable(state, dict_, row): state.callables[key] = fn return _set_callable def _expire(self, dict_, modified_set): self.expired = True if self.modified: modified_set.discard(self) self.modified = False self._strong_obj = None self.committed_state.clear() InstanceState._pending_mutations._reset(self) # clear out 'parents' collection. not # entirely clear how we can best determine # which to remove, or not. InstanceState.parents._reset(self) for key in self.manager: impl = self.manager[key].impl if impl.accepts_scalar_loader and \ (impl.expire_missing or key in dict_): self.callables[key] = self old = dict_.pop(key, None) if impl.collection and old is not None: impl._invalidate_collection(old) self.manager.dispatch.expire(self, None) def _expire_attributes(self, dict_, attribute_names): pending = self.__dict__.get('_pending_mutations', None) for key in attribute_names: impl = self.manager[key].impl if impl.accepts_scalar_loader: self.callables[key] = self old = dict_.pop(key, None) if impl.collection and old is not None: impl._invalidate_collection(old) self.committed_state.pop(key, None) if pending: pending.pop(key, None) self.manager.dispatch.expire(self, attribute_names) def __call__(self, state, passive): """__call__ allows the InstanceState to act as a deferred callable for loading expired attributes, which is also serializable (picklable). """ if not passive & SQL_OK: return PASSIVE_NO_RESULT toload = self.expired_attributes.\ intersection(self.unmodified) self.manager.deferred_scalar_loader(self, toload) # if the loader failed, or this # instance state didn't have an identity, # the attributes still might be in the callables # dict. ensure they are removed. for k in toload.intersection(self.callables): del self.callables[k] return ATTR_WAS_SET @property def unmodified(self): """Return the set of keys which have no uncommitted changes""" return set(self.manager).difference(self.committed_state) def unmodified_intersection(self, keys): """Return self.unmodified.intersection(keys).""" return set(keys).intersection(self.manager).\ difference(self.committed_state) @property def unloaded(self): """Return the set of keys which do not have a loaded value. This includes expired attributes and any other attribute that was never populated or modified. """ return set(self.manager).\ difference(self.committed_state).\ difference(self.dict) @property def expired_attributes(self): """Return the set of keys which are 'expired' to be loaded by the manager's deferred scalar loader, assuming no pending changes. see also the ``unmodified`` collection which is intersected against this set when a refresh operation occurs. """ return set([k for k, v in self.callables.items() if v is self]) def _instance_dict(self): return None def _modified_event(self, dict_, attr, previous, collection=False): if attr.key not in self.committed_state: if collection: if previous is NEVER_SET: if attr.key in dict_: previous = dict_[attr.key] if previous not in (None, NO_VALUE, NEVER_SET): previous = attr.copy(previous) self.committed_state[attr.key] = previous # assert self._strong_obj is None or self.modified if (self.session_id and self._strong_obj is None) \ or not self.modified: instance_dict = self._instance_dict() if instance_dict: instance_dict._modified.add(self) # only create _strong_obj link if attached # to a session inst = self.obj() if self.session_id: self._strong_obj = inst if inst is None: raise orm_exc.ObjectDereferencedError( "Can't emit change event for attribute '%s' - " "parent object of type %s has been garbage " "collected." % ( self.manager[attr.key], orm_util.state_class_str(self) )) self.modified = True def _commit(self, dict_, keys): """Commit attributes. This is used by a partial-attribute load operation to mark committed those attributes which were refreshed from the database. Attributes marked as "expired" can potentially remain "expired" after this step if a value was not populated in state.dict. """ for key in keys: self.committed_state.pop(key, None) self.expired = False for key in set(self.callables).\ intersection(keys).\ intersection(dict_): del self.callables[key] def _commit_all(self, dict_, instance_dict=None): """commit all attributes unconditionally. This is used after a flush() or a full load/refresh to remove all pending state from the instance. - all attributes are marked as "committed" - the "strong dirty reference" is removed - the "modified" flag is set to False - any "expired" markers/callables for attributes loaded are removed. Attributes marked as "expired" can potentially remain "expired" after this step if a value was not populated in state.dict. """ self._commit_all_states([(self, dict_)], instance_dict) @classmethod def _commit_all_states(self, iter, instance_dict=None): """Mass version of commit_all().""" for state, dict_ in iter: state.committed_state.clear() InstanceState._pending_mutations._reset(state) callables = state.callables for key in list(callables): if key in dict_ and callables[key] is state: del callables[key] if instance_dict and state.modified: instance_dict._modified.discard(state) state.modified = state.expired = False state._strong_obj = None class AttributeState(object): """Provide an inspection interface corresponding to a particular attribute on a particular mapped object. The :class:`.AttributeState` object is accessed via the :attr:`.InstanceState.attrs` collection of a particular :class:`.InstanceState`:: from sqlalchemy import inspect insp = inspect(some_mapped_object) attr_state = insp.attrs.some_attribute """ def __init__(self, state, key): self.state = state self.key = key @property def loaded_value(self): """The current value of this attribute as loaded from the database. If the value has not been loaded, or is otherwise not present in the object's dictionary, returns NO_VALUE. """ return self.state.dict.get(self.key, NO_VALUE) @property def value(self): """Return the value of this attribute. This operation is equivalent to accessing the object's attribute directly or via ``getattr()``, and will fire off any pending loader callables if needed. """ return self.state.manager[self.key].__get__( self.state.obj(), self.state.class_) @property def history(self): """Return the current pre-flush change history for this attribute, via the :class:`.History` interface. """ return self.state.get_history(self.key, PASSIVE_NO_INITIALIZE) class PendingCollection(object): """A writable placeholder for an unloaded collection. Stores items appended to and removed from a collection that has not yet been loaded. When the collection is loaded, the changes stored in PendingCollection are applied to it to produce the final result. """ def __init__(self): self.deleted_items = util.IdentitySet() self.added_items = util.OrderedIdentitySet() def append(self, value): if value in self.deleted_items: self.deleted_items.remove(value) else: self.added_items.add(value) def remove(self, value): if value in self.added_items: self.added_items.remove(value) else: self.deleted_items.add(value) SQLAlchemy-0.8.4/lib/sqlalchemy/orm/strategies.py0000644000076500000240000015315312251150015022462 0ustar classicstaff00000000000000# orm/strategies.py # Copyright (C) 2005-2013 the SQLAlchemy authors and contributors # # This module is part of SQLAlchemy and is released under # the MIT License: http://www.opensource.org/licenses/mit-license.php """sqlalchemy.orm.interfaces.LoaderStrategy implementations, and related MapperOptions.""" from .. import exc as sa_exc, inspect from .. import util, log, event from ..sql import util as sql_util, visitors from . import ( attributes, interfaces, exc as orm_exc, loading, unitofwork, util as orm_util ) from .state import InstanceState from .util import _none_set from .interfaces import ( LoaderStrategy, StrategizedOption, MapperOption, PropertyOption, StrategizedProperty ) from .session import _state_session import itertools def _register_attribute(strategy, mapper, useobject, compare_function=None, typecallable=None, uselist=False, callable_=None, proxy_property=None, active_history=False, impl_class=None, **kw ): prop = strategy.parent_property attribute_ext = list(util.to_list(prop.extension, default=[])) listen_hooks = [] if useobject and prop.single_parent: listen_hooks.append(single_parent_validator) if prop.key in prop.parent.validators: fn, include_removes = prop.parent.validators[prop.key] listen_hooks.append( lambda desc, prop: orm_util._validator_events(desc, prop.key, fn, include_removes) ) if useobject: listen_hooks.append(unitofwork.track_cascade_events) # need to assemble backref listeners # after the singleparentvalidator, mapper validator backref = kw.pop('backref', None) if backref: listen_hooks.append( lambda desc, prop: attributes.backref_listeners(desc, backref, uselist) ) for m in mapper.self_and_descendants: if prop is m._props.get(prop.key): desc = attributes.register_attribute_impl( m.class_, prop.key, parent_token=prop, uselist=uselist, compare_function=compare_function, useobject=useobject, extension=attribute_ext, trackparent=useobject and (prop.single_parent or prop.direction is interfaces.ONETOMANY), typecallable=typecallable, callable_=callable_, active_history=active_history, impl_class=impl_class, doc=prop.doc, **kw ) for hook in listen_hooks: hook(desc, prop) class UninstrumentedColumnLoader(LoaderStrategy): """Represent the a non-instrumented MapperProperty. The polymorphic_on argument of mapper() often results in this, if the argument is against the with_polymorphic selectable. """ def __init__(self, parent): super(UninstrumentedColumnLoader, self).__init__(parent) self.columns = self.parent_property.columns def setup_query(self, context, entity, path, adapter, column_collection=None, **kwargs): for c in self.columns: if adapter: c = adapter.columns[c] column_collection.append(c) def create_row_processor(self, context, path, mapper, row, adapter): return None, None, None class ColumnLoader(LoaderStrategy): """Provide loading behavior for a :class:`.ColumnProperty`.""" def __init__(self, parent): super(ColumnLoader, self).__init__(parent) self.columns = self.parent_property.columns self.is_composite = hasattr(self.parent_property, 'composite_class') def setup_query(self, context, entity, path, adapter, column_collection, **kwargs): for c in self.columns: if adapter: c = adapter.columns[c] column_collection.append(c) def init_class_attribute(self, mapper): self.is_class_level = True coltype = self.columns[0].type # TODO: check all columns ? check for foreign key as well? active_history = self.parent_property.active_history or \ self.columns[0].primary_key _register_attribute(self, mapper, useobject=False, compare_function=coltype.compare_values, active_history=active_history ) def create_row_processor(self, context, path, mapper, row, adapter): key = self.key # look through list of columns represented here # to see which, if any, is present in the row. for col in self.columns: if adapter: col = adapter.columns[col] if col is not None and col in row: def fetch_col(state, dict_, row): dict_[key] = row[col] return fetch_col, None, None else: def expire_for_non_present_col(state, dict_, row): state._expire_attribute_pre_commit(dict_, key) return expire_for_non_present_col, None, None log.class_logger(ColumnLoader) class DeferredColumnLoader(LoaderStrategy): """Provide loading behavior for a deferred :class:`.ColumnProperty`.""" def __init__(self, parent): super(DeferredColumnLoader, self).__init__(parent) if hasattr(self.parent_property, 'composite_class'): raise NotImplementedError("Deferred loading for composite " "types not implemented yet") self.columns = self.parent_property.columns self.group = self.parent_property.group def create_row_processor(self, context, path, mapper, row, adapter): col = self.columns[0] if adapter: col = adapter.columns[col] key = self.key if col in row: return self.parent_property._get_strategy(ColumnLoader).\ create_row_processor( context, path, mapper, row, adapter) elif not self.is_class_level: set_deferred_for_local_state = InstanceState._row_processor( mapper.class_manager, LoadDeferredColumns(key), key) return set_deferred_for_local_state, None, None else: def reset_col_for_deferred(state, dict_, row): # reset state on the key so that deferred callables # fire off on next access. state._reset(dict_, key) return reset_col_for_deferred, None, None def init_class_attribute(self, mapper): self.is_class_level = True _register_attribute(self, mapper, useobject=False, compare_function=self.columns[0].type.compare_values, callable_=self._load_for_state, expire_missing=False ) def setup_query(self, context, entity, path, adapter, only_load_props=None, **kwargs): if ( self.group is not None and context.attributes.get(('undefer', self.group), False) ) or (only_load_props and self.key in only_load_props): self.parent_property._get_strategy(ColumnLoader).\ setup_query(context, entity, path, adapter, **kwargs) def _load_for_state(self, state, passive): if not state.key: return attributes.ATTR_EMPTY if not passive & attributes.SQL_OK: return attributes.PASSIVE_NO_RESULT localparent = state.manager.mapper if self.group: toload = [ p.key for p in localparent.iterate_properties if isinstance(p, StrategizedProperty) and isinstance(p.strategy, DeferredColumnLoader) and p.group == self.group ] else: toload = [self.key] # narrow the keys down to just those which have no history group = [k for k in toload if k in state.unmodified] session = _state_session(state) if session is None: raise orm_exc.DetachedInstanceError( "Parent instance %s is not bound to a Session; " "deferred load operation of attribute '%s' cannot proceed" % (orm_util.state_str(state), self.key) ) query = session.query(localparent) if loading.load_on_ident(query, state.key, only_load_props=group, refresh_state=state) is None: raise orm_exc.ObjectDeletedError(state) return attributes.ATTR_WAS_SET log.class_logger(DeferredColumnLoader) class LoadDeferredColumns(object): """serializable loader object used by DeferredColumnLoader""" def __init__(self, key): self.key = key def __call__(self, state, passive=attributes.PASSIVE_OFF): key = self.key localparent = state.manager.mapper prop = localparent._props[key] strategy = prop._strategies[DeferredColumnLoader] return strategy._load_for_state(state, passive) class DeferredOption(StrategizedOption): propagate_to_loaders = True def __init__(self, key, defer=False): super(DeferredOption, self).__init__(key) self.defer = defer def get_strategy_class(self): if self.defer: return DeferredColumnLoader else: return ColumnLoader class UndeferGroupOption(MapperOption): propagate_to_loaders = True def __init__(self, group): self.group = group def process_query(self, query): query._attributes[("undefer", self.group)] = True class AbstractRelationshipLoader(LoaderStrategy): """LoaderStratgies which deal with related objects.""" def __init__(self, parent): super(AbstractRelationshipLoader, self).__init__(parent) self.mapper = self.parent_property.mapper self.target = self.parent_property.target self.uselist = self.parent_property.uselist class NoLoader(AbstractRelationshipLoader): """Provide loading behavior for a :class:`.RelationshipProperty` with "lazy=None". """ def init_class_attribute(self, mapper): self.is_class_level = True _register_attribute(self, mapper, useobject=True, uselist=self.parent_property.uselist, typecallable=self.parent_property.collection_class, ) def create_row_processor(self, context, path, mapper, row, adapter): def invoke_no_load(state, dict_, row): state._initialize(self.key) return invoke_no_load, None, None log.class_logger(NoLoader) class LazyLoader(AbstractRelationshipLoader): """Provide loading behavior for a :class:`.RelationshipProperty` with "lazy=True", that is loads when first accessed. """ def __init__(self, parent): super(LazyLoader, self).__init__(parent) join_condition = self.parent_property._join_condition self._lazywhere, \ self._bind_to_col, \ self._equated_columns = join_condition.create_lazy_clause() self._rev_lazywhere, \ self._rev_bind_to_col, \ self._rev_equated_columns = join_condition.create_lazy_clause( reverse_direction=True) self.logger.info("%s lazy loading clause %s", self, self._lazywhere) # determine if our "lazywhere" clause is the same as the mapper's # get() clause. then we can just use mapper.get() #from sqlalchemy.orm import query self.use_get = not self.uselist and \ self.mapper._get_clause[0].compare( self._lazywhere, use_proxies=True, equivalents=self.mapper._equivalent_columns ) if self.use_get: for col in self._equated_columns.keys(): if col in self.mapper._equivalent_columns: for c in self.mapper._equivalent_columns[col]: self._equated_columns[c] = self._equated_columns[col] self.logger.info("%s will use query.get() to " "optimize instance loads" % self) def init_class_attribute(self, mapper): self.is_class_level = True active_history = ( self.parent_property.active_history or self.parent_property.direction is not interfaces.MANYTOONE or not self.use_get ) # MANYTOONE currently only needs the # "old" value for delete-orphan # cascades. the required _SingleParentValidator # will enable active_history # in that case. otherwise we don't need the # "old" value during backref operations. _register_attribute(self, mapper, useobject=True, callable_=self._load_for_state, uselist=self.parent_property.uselist, backref=self.parent_property.back_populates, typecallable=self.parent_property.collection_class, active_history=active_history ) def lazy_clause(self, state, reverse_direction=False, alias_secondary=False, adapt_source=None, passive=None): if state is None: return self._lazy_none_clause( reverse_direction, adapt_source=adapt_source) if not reverse_direction: criterion, bind_to_col, rev = \ self._lazywhere, \ self._bind_to_col, \ self._equated_columns else: criterion, bind_to_col, rev = \ self._rev_lazywhere, \ self._rev_bind_to_col, \ self._rev_equated_columns if reverse_direction: mapper = self.parent_property.mapper else: mapper = self.parent_property.parent o = state.obj() # strong ref dict_ = attributes.instance_dict(o) # use the "committed state" only if we're in a flush # for this state. if passive and passive & attributes.LOAD_AGAINST_COMMITTED: def visit_bindparam(bindparam): if bindparam._identifying_key in bind_to_col: bindparam.callable = \ lambda: mapper._get_committed_state_attr_by_column( state, dict_, bind_to_col[bindparam._identifying_key]) else: def visit_bindparam(bindparam): if bindparam._identifying_key in bind_to_col: bindparam.callable = \ lambda: mapper._get_state_attr_by_column( state, dict_, bind_to_col[bindparam._identifying_key]) if self.parent_property.secondary is not None and alias_secondary: criterion = sql_util.ClauseAdapter( self.parent_property.secondary.alias()).\ traverse(criterion) criterion = visitors.cloned_traverse( criterion, {}, {'bindparam': visit_bindparam}) if adapt_source: criterion = adapt_source(criterion) return criterion def _lazy_none_clause(self, reverse_direction=False, adapt_source=None): if not reverse_direction: criterion, bind_to_col, rev = \ self._lazywhere, \ self._bind_to_col,\ self._equated_columns else: criterion, bind_to_col, rev = \ self._rev_lazywhere, \ self._rev_bind_to_col, \ self._rev_equated_columns criterion = sql_util.adapt_criterion_to_null(criterion, bind_to_col) if adapt_source: criterion = adapt_source(criterion) return criterion def _load_for_state(self, state, passive): if not state.key and \ ( ( not self.parent_property.load_on_pending and not state._load_pending ) or not state.session_id ): return attributes.ATTR_EMPTY pending = not state.key ident_key = None if ( (not passive & attributes.SQL_OK and not self.use_get) or (not passive & attributes.NON_PERSISTENT_OK and pending) ): return attributes.PASSIVE_NO_RESULT session = _state_session(state) if not session: raise orm_exc.DetachedInstanceError( "Parent instance %s is not bound to a Session; " "lazy load operation of attribute '%s' cannot proceed" % (orm_util.state_str(state), self.key) ) # if we have a simple primary key load, check the # identity map without generating a Query at all if self.use_get: ident = self._get_ident_for_use_get( session, state, passive ) if attributes.PASSIVE_NO_RESULT in ident: return attributes.PASSIVE_NO_RESULT elif attributes.NEVER_SET in ident: return attributes.NEVER_SET if _none_set.issuperset(ident): return None ident_key = self.mapper.identity_key_from_primary_key(ident) instance = loading.get_from_identity(session, ident_key, passive) if instance is not None: return instance elif not passive & attributes.SQL_OK or \ not passive & attributes.RELATED_OBJECT_OK: return attributes.PASSIVE_NO_RESULT return self._emit_lazyload(session, state, ident_key, passive) def _get_ident_for_use_get(self, session, state, passive): instance_mapper = state.manager.mapper if passive & attributes.LOAD_AGAINST_COMMITTED: get_attr = instance_mapper._get_committed_state_attr_by_column else: get_attr = instance_mapper._get_state_attr_by_column dict_ = state.dict return [ get_attr( state, dict_, self._equated_columns[pk], passive=passive) for pk in self.mapper.primary_key ] def _emit_lazyload(self, session, state, ident_key, passive): q = session.query(self.mapper)._adapt_all_clauses() q = q._with_invoke_all_eagers(False) pending = not state.key # don't autoflush on pending if pending: q = q.autoflush(False) if state.load_path: q = q._with_current_path(state.load_path[self.parent_property]) if state.load_options: q = q._conditional_options(*state.load_options) if self.use_get: return loading.load_on_ident(q, ident_key) if self.parent_property.order_by: q = q.order_by(*util.to_list(self.parent_property.order_by)) for rev in self.parent_property._reverse_property: # reverse props that are MANYTOONE are loading *this* # object from get(), so don't need to eager out to those. if rev.direction is interfaces.MANYTOONE and \ rev._use_get and \ not isinstance(rev.strategy, LazyLoader): q = q.options(EagerLazyOption((rev.key,), lazy='select')) lazy_clause = self.lazy_clause(state, passive=passive) if pending: bind_values = sql_util.bind_values(lazy_clause) if None in bind_values: return None q = q.filter(lazy_clause) result = q.all() if self.uselist: return result else: l = len(result) if l: if l > 1: util.warn( "Multiple rows returned with " "uselist=False for lazily-loaded attribute '%s' " % self.parent_property) return result[0] else: return None def create_row_processor(self, context, path, mapper, row, adapter): key = self.key if not self.is_class_level: # we are not the primary manager for this attribute # on this class - set up a # per-instance lazyloader, which will override the # class-level behavior. # this currently only happens when using a # "lazyload" option on a "no load" # attribute - "eager" attributes always have a # class-level lazyloader installed. set_lazy_callable = InstanceState._row_processor( mapper.class_manager, LoadLazyAttribute(key), key) return set_lazy_callable, None, None else: def reset_for_lazy_callable(state, dict_, row): # we are the primary manager for this attribute on # this class - reset its # per-instance attribute state, so that the class-level # lazy loader is # executed when next referenced on this instance. # this is needed in # populate_existing() types of scenarios to reset # any existing state. state._reset(dict_, key) return reset_for_lazy_callable, None, None log.class_logger(LazyLoader) class LoadLazyAttribute(object): """serializable loader object used by LazyLoader""" def __init__(self, key): self.key = key def __call__(self, state, passive=attributes.PASSIVE_OFF): key = self.key instance_mapper = state.manager.mapper prop = instance_mapper._props[key] strategy = prop._strategies[LazyLoader] return strategy._load_for_state(state, passive) class ImmediateLoader(AbstractRelationshipLoader): def init_class_attribute(self, mapper): self.parent_property.\ _get_strategy(LazyLoader).\ init_class_attribute(mapper) def setup_query(self, context, entity, path, adapter, column_collection=None, parentmapper=None, **kwargs): pass def create_row_processor(self, context, path, mapper, row, adapter): def load_immediate(state, dict_, row): state.get_impl(self.key).get(state, dict_) return None, None, load_immediate class SubqueryLoader(AbstractRelationshipLoader): def __init__(self, parent): super(SubqueryLoader, self).__init__(parent) self.join_depth = self.parent_property.join_depth def init_class_attribute(self, mapper): self.parent_property.\ _get_strategy(LazyLoader).\ init_class_attribute(mapper) def setup_query(self, context, entity, path, adapter, column_collection=None, parentmapper=None, **kwargs): if not context.query._enable_eagerloads: return path = path[self.parent_property] # build up a path indicating the path from the leftmost # entity to the thing we're subquery loading. with_poly_info = path.get(context, "path_with_polymorphic", None) if with_poly_info is not None: effective_entity = with_poly_info.entity else: effective_entity = self.mapper subq_path = context.attributes.get(('subquery_path', None), orm_util.PathRegistry.root) subq_path = subq_path + path # if not via query option, check for # a cycle if not path.contains(context, "loaderstrategy"): if self.join_depth: if path.length / 2 > self.join_depth: return elif subq_path.contains_mapper(self.mapper): return subq_mapper, leftmost_mapper, leftmost_attr, leftmost_relationship = \ self._get_leftmost(subq_path) orig_query = context.attributes.get( ("orig_query", SubqueryLoader), context.query) # generate a new Query from the original, then # produce a subquery from it. left_alias = self._generate_from_original_query( orig_query, leftmost_mapper, leftmost_attr, leftmost_relationship, entity.mapper ) # generate another Query that will join the # left alias to the target relationships. # basically doing a longhand # "from_self()". (from_self() itself not quite industrial # strength enough for all contingencies...but very close) q = orig_query.session.query(effective_entity) q._attributes = { ("orig_query", SubqueryLoader): orig_query, ('subquery_path', None): subq_path } q = q._enable_single_crit(False) to_join, local_attr, parent_alias = \ self._prep_for_joins(left_alias, subq_path) q = q.order_by(*local_attr) q = q.add_columns(*local_attr) q = self._apply_joins(q, to_join, left_alias, parent_alias, effective_entity) q = self._setup_options(q, subq_path, orig_query, effective_entity) q = self._setup_outermost_orderby(q) # add new query to attributes to be picked up # by create_row_processor path.set(context, "subquery", q) def _get_leftmost(self, subq_path): subq_path = subq_path.path subq_mapper = orm_util._class_to_mapper(subq_path[0]) # determine attributes of the leftmost mapper if self.parent.isa(subq_mapper) and self.parent_property is subq_path[1]: leftmost_mapper, leftmost_prop = \ self.parent, self.parent_property else: leftmost_mapper, leftmost_prop = \ subq_mapper, \ subq_path[1] leftmost_cols = leftmost_prop.local_columns leftmost_attr = [ leftmost_mapper._columntoproperty[c].class_attribute for c in leftmost_cols ] return subq_mapper, leftmost_mapper, leftmost_attr, leftmost_prop def _generate_from_original_query(self, orig_query, leftmost_mapper, leftmost_attr, leftmost_relationship, entity_mapper ): # reformat the original query # to look only for significant columns q = orig_query._clone().correlate(None) # set a real "from" if not present, as this is more # accurate than just going off of the column expression if not q._from_obj and entity_mapper.isa(leftmost_mapper): q._set_select_from([entity_mapper], False) target_cols = q._adapt_col_list(leftmost_attr) # select from the identity columns of the outer q._set_entities(target_cols) distinct_target_key = leftmost_relationship.distinct_target_key if distinct_target_key is True: q._distinct = True elif distinct_target_key is None: # if target_cols refer to a non-primary key or only # part of a composite primary key, set the q as distinct for t in set(c.table for c in target_cols): if not set(target_cols).issuperset(t.primary_key): q._distinct = True break if q._order_by is False: q._order_by = leftmost_mapper.order_by # don't need ORDER BY if no limit/offset if q._limit is None and q._offset is None: q._order_by = None # the original query now becomes a subquery # which we'll join onto. embed_q = q.with_labels().subquery() left_alias = orm_util.AliasedClass(leftmost_mapper, embed_q, use_mapper_path=True) return left_alias def _prep_for_joins(self, left_alias, subq_path): # figure out what's being joined. a.k.a. the fun part to_join = [] pairs = list(subq_path.pairs()) for i, (mapper, prop) in enumerate(pairs): if i > 0: # look at the previous mapper in the chain - # if it is as or more specific than this prop's # mapper, use that instead. # note we have an assumption here that # the non-first element is always going to be a mapper, # not an AliasedClass prev_mapper = pairs[i - 1][1].mapper to_append = prev_mapper if prev_mapper.isa(mapper) else mapper else: to_append = mapper to_join.append((to_append, prop.key)) # determine the immediate parent class we are joining from, # which needs to be aliased. if len(to_join) > 1: info = inspect(to_join[-1][0]) if len(to_join) < 2: # in the case of a one level eager load, this is the # leftmost "left_alias". parent_alias = left_alias elif info.mapper.isa(self.parent): # In the case of multiple levels, retrieve # it from subq_path[-2]. This is the same as self.parent # in the vast majority of cases, and [ticket:2014] # illustrates a case where sub_path[-2] is a subclass # of self.parent parent_alias = orm_util.AliasedClass(to_join[-1][0], use_mapper_path=True) else: # if of_type() were used leading to this relationship, # self.parent is more specific than subq_path[-2] parent_alias = orm_util.AliasedClass(self.parent, use_mapper_path=True) local_cols = self.parent_property.local_columns local_attr = [ getattr(parent_alias, self.parent._columntoproperty[c].key) for c in local_cols ] return to_join, local_attr, parent_alias def _apply_joins(self, q, to_join, left_alias, parent_alias, effective_entity): for i, (mapper, key) in enumerate(to_join): # we need to use query.join() as opposed to # orm.join() here because of the # rich behavior it brings when dealing with # "with_polymorphic" mappers. "aliased" # and "from_joinpoint" take care of most of # the chaining and aliasing for us. first = i == 0 middle = i < len(to_join) - 1 second_to_last = i == len(to_join) - 2 last = i == len(to_join) - 1 if first: attr = getattr(left_alias, key) if last and effective_entity is not self.mapper: attr = attr.of_type(effective_entity) else: if last and effective_entity is not self.mapper: attr = getattr(parent_alias, key).\ of_type(effective_entity) else: attr = key if second_to_last: q = q.join(parent_alias, attr, from_joinpoint=True) else: q = q.join(attr, aliased=middle, from_joinpoint=True) return q def _setup_options(self, q, subq_path, orig_query, effective_entity): # propagate loader options etc. to the new query. # these will fire relative to subq_path. q = q._with_current_path(subq_path) q = q._conditional_options(*orig_query._with_options) if orig_query._populate_existing: q._populate_existing = orig_query._populate_existing return q def _setup_outermost_orderby(self, q): if self.parent_property.order_by: # if there's an ORDER BY, alias it the same # way joinedloader does, but we have to pull out # the "eagerjoin" from the query. # this really only picks up the "secondary" table # right now. eagerjoin = q._from_obj[0] eager_order_by = \ eagerjoin._target_adapter.\ copy_and_process( util.to_list( self.parent_property.order_by ) ) q = q.order_by(*eager_order_by) return q def create_row_processor(self, context, path, mapper, row, adapter): if not self.parent.class_manager[self.key].impl.supports_population: raise sa_exc.InvalidRequestError( "'%s' does not support object " "population - eager loading cannot be applied." % self) path = path[self.parent_property] subq = path.get(context, 'subquery') if subq is None: return None, None, None local_cols = self.parent_property.local_columns # cache the loaded collections in the context # so that inheriting mappers don't re-load when they # call upon create_row_processor again collections = path.get(context, "collections") if collections is None: collections = dict( (k, [v[0] for v in v]) for k, v in itertools.groupby( subq, lambda x: x[1:] )) path.set(context, 'collections', collections) if adapter: local_cols = [adapter.columns[c] for c in local_cols] if self.uselist: return self._create_collection_loader(collections, local_cols) else: return self._create_scalar_loader(collections, local_cols) def _create_collection_loader(self, collections, local_cols): def load_collection_from_subq(state, dict_, row): collection = collections.get( tuple([row[col] for col in local_cols]), () ) state.get_impl(self.key).\ set_committed_value(state, dict_, collection) return load_collection_from_subq, None, None def _create_scalar_loader(self, collections, local_cols): def load_scalar_from_subq(state, dict_, row): collection = collections.get( tuple([row[col] for col in local_cols]), (None,) ) if len(collection) > 1: util.warn( "Multiple rows returned with " "uselist=False for eagerly-loaded attribute '%s' " % self) scalar = collection[0] state.get_impl(self.key).\ set_committed_value(state, dict_, scalar) return load_scalar_from_subq, None, None log.class_logger(SubqueryLoader) class JoinedLoader(AbstractRelationshipLoader): """Provide loading behavior for a :class:`.RelationshipProperty` using joined eager loading. """ def __init__(self, parent): super(JoinedLoader, self).__init__(parent) self.join_depth = self.parent_property.join_depth def init_class_attribute(self, mapper): self.parent_property.\ _get_strategy(LazyLoader).init_class_attribute(mapper) def setup_query(self, context, entity, path, adapter, \ column_collection=None, parentmapper=None, allow_innerjoin=True, **kwargs): """Add a left outer join to the statement thats being constructed.""" if not context.query._enable_eagerloads: return path = path[self.parent_property] with_polymorphic = None user_defined_adapter = path.get(context, "user_defined_eager_row_processor", False) if user_defined_adapter is not False: clauses, adapter, add_to_collection = \ self._get_user_defined_adapter( context, entity, path, adapter, user_defined_adapter ) else: # if not via query option, check for # a cycle if not path.contains(context, "loaderstrategy"): if self.join_depth: if path.length / 2 > self.join_depth: return elif path.contains_mapper(self.mapper): return clauses, adapter, add_to_collection, \ allow_innerjoin = self._generate_row_adapter( context, entity, path, adapter, column_collection, parentmapper, allow_innerjoin ) with_poly_info = path.get( context, "path_with_polymorphic", None ) if with_poly_info is not None: with_polymorphic = with_poly_info.with_polymorphic_mappers else: with_polymorphic = None path = path[self.mapper] for value in self.mapper._iterate_polymorphic_properties( mappers=with_polymorphic): value.setup( context, entity, path, clauses, parentmapper=self.mapper, column_collection=add_to_collection, allow_innerjoin=allow_innerjoin) def _get_user_defined_adapter(self, context, entity, path, adapter, user_defined_adapter): adapter = entity._get_entity_clauses(context.query, context) if adapter and user_defined_adapter: user_defined_adapter = user_defined_adapter.wrap(adapter) path.set(context, "user_defined_eager_row_processor", user_defined_adapter) elif adapter: user_defined_adapter = adapter path.set(context, "user_defined_eager_row_processor", user_defined_adapter) add_to_collection = context.primary_columns return user_defined_adapter, adapter, add_to_collection def _generate_row_adapter(self, context, entity, path, adapter, column_collection, parentmapper, allow_innerjoin ): with_poly_info = path.get( context, "path_with_polymorphic", None ) if with_poly_info: to_adapt = with_poly_info.entity else: to_adapt = orm_util.AliasedClass(self.mapper, use_mapper_path=True) clauses = orm_util.ORMAdapter( to_adapt, equivalents=self.mapper._equivalent_columns, adapt_required=True) assert clauses.aliased_class is not None if self.parent_property.direction != interfaces.MANYTOONE: context.multi_row_eager_loaders = True innerjoin = allow_innerjoin and path.get(context, "eager_join_type", self.parent_property.innerjoin) if not innerjoin: # if this is an outer join, all eager joins from # here must also be outer joins allow_innerjoin = False context.create_eager_joins.append( (self._create_eager_join, context, entity, path, adapter, parentmapper, clauses, innerjoin) ) add_to_collection = context.secondary_columns path.set(context, "eager_row_processor", clauses) return clauses, adapter, add_to_collection, allow_innerjoin def _create_eager_join(self, context, entity, path, adapter, parentmapper, clauses, innerjoin): if parentmapper is None: localparent = entity.mapper else: localparent = parentmapper # whether or not the Query will wrap the selectable in a subquery, # and then attach eager load joins to that (i.e., in the case of # LIMIT/OFFSET etc.) should_nest_selectable = context.multi_row_eager_loaders and \ context.query._should_nest_selectable entity_key = None if entity not in context.eager_joins and \ not should_nest_selectable and \ context.from_clause: index, clause = \ sql_util.find_join_source( context.from_clause, entity.selectable) if clause is not None: # join to an existing FROM clause on the query. # key it to its list index in the eager_joins dict. # Query._compile_context will adapt as needed and # append to the FROM clause of the select(). entity_key, default_towrap = index, clause if entity_key is None: entity_key, default_towrap = entity, entity.selectable towrap = context.eager_joins.setdefault(entity_key, default_towrap) if adapter: if getattr(adapter, 'aliased_class', None): onclause = getattr( adapter.aliased_class, self.key, self.parent_property) else: onclause = getattr( orm_util.AliasedClass( self.parent, adapter.selectable, use_mapper_path=True ), self.key, self.parent_property ) else: onclause = self.parent_property assert clauses.aliased_class is not None context.eager_joins[entity_key] = eagerjoin = \ orm_util.join( towrap, clauses.aliased_class, onclause, isouter=not innerjoin ) # send a hint to the Query as to where it may "splice" this join eagerjoin.stop_on = entity.selectable if self.parent_property.secondary is None and \ not parentmapper: # for parentclause that is the non-eager end of the join, # ensure all the parent cols in the primaryjoin are actually # in the # columns clause (i.e. are not deferred), so that aliasing applied # by the Query propagates those columns outward. # This has the effect # of "undefering" those columns. for col in sql_util.find_columns( self.parent_property.primaryjoin): if localparent.mapped_table.c.contains_column(col): if adapter: col = adapter.columns[col] context.primary_columns.append(col) if self.parent_property.order_by: context.eager_order_by += \ eagerjoin._target_adapter.\ copy_and_process( util.to_list( self.parent_property.order_by ) ) def _create_eager_adapter(self, context, row, adapter, path): user_defined_adapter = path.get(context, "user_defined_eager_row_processor", False) if user_defined_adapter is not False: decorator = user_defined_adapter # user defined eagerloads are part of the "primary" # portion of the load. # the adapters applied to the Query should be honored. if context.adapter and decorator: decorator = decorator.wrap(context.adapter) elif context.adapter: decorator = context.adapter else: decorator = path.get(context, "eager_row_processor") if decorator is None: return False try: self.mapper.identity_key_from_row(row, decorator) return decorator except KeyError: # no identity key - dont return a row # processor, will cause a degrade to lazy return False def create_row_processor(self, context, path, mapper, row, adapter): if not self.parent.class_manager[self.key].impl.supports_population: raise sa_exc.InvalidRequestError( "'%s' does not support object " "population - eager loading cannot be applied." % self) our_path = path[self.parent_property] eager_adapter = self._create_eager_adapter( context, row, adapter, our_path) if eager_adapter is not False: key = self.key _instance = loading.instance_processor( self.mapper, context, our_path[self.mapper], eager_adapter) if not self.uselist: return self._create_scalar_loader(context, key, _instance) else: return self._create_collection_loader(context, key, _instance) else: return self.parent_property.\ _get_strategy(LazyLoader).\ create_row_processor( context, path, mapper, row, adapter) def _create_collection_loader(self, context, key, _instance): def load_collection_from_joined_new_row(state, dict_, row): collection = attributes.init_state_collection( state, dict_, key) result_list = util.UniqueAppender(collection, 'append_without_event') context.attributes[(state, key)] = result_list _instance(row, result_list) def load_collection_from_joined_existing_row(state, dict_, row): if (state, key) in context.attributes: result_list = context.attributes[(state, key)] else: # appender_key can be absent from context.attributes # with isnew=False when self-referential eager loading # is used; the same instance may be present in two # distinct sets of result columns collection = attributes.init_state_collection(state, dict_, key) result_list = util.UniqueAppender( collection, 'append_without_event') context.attributes[(state, key)] = result_list _instance(row, result_list) def load_collection_from_joined_exec(state, dict_, row): _instance(row, None) return load_collection_from_joined_new_row, \ load_collection_from_joined_existing_row, \ None, load_collection_from_joined_exec def _create_scalar_loader(self, context, key, _instance): def load_scalar_from_joined_new_row(state, dict_, row): # set a scalar object instance directly on the parent # object, bypassing InstrumentedAttribute event handlers. dict_[key] = _instance(row, None) def load_scalar_from_joined_existing_row(state, dict_, row): # call _instance on the row, even though the object has # been created, so that we further descend into properties existing = _instance(row, None) if existing is not None \ and key in dict_ \ and existing is not dict_[key]: util.warn( "Multiple rows returned with " "uselist=False for eagerly-loaded attribute '%s' " % self) def load_scalar_from_joined_exec(state, dict_, row): _instance(row, None) return load_scalar_from_joined_new_row, \ load_scalar_from_joined_existing_row, \ None, load_scalar_from_joined_exec log.class_logger(JoinedLoader) class EagerLazyOption(StrategizedOption): def __init__(self, key, lazy=True, chained=False, propagate_to_loaders=True ): if isinstance(key[0], basestring) and key[0] == '*': if len(key) != 1: raise sa_exc.ArgumentError( "Wildcard identifier '*' must " "be specified alone.") key = ("relationship:*",) propagate_to_loaders = False super(EagerLazyOption, self).__init__(key) self.lazy = lazy self.chained = chained self.propagate_to_loaders = propagate_to_loaders self.strategy_cls = factory(lazy) def get_strategy_class(self): return self.strategy_cls _factory = { False: JoinedLoader, "joined": JoinedLoader, None: NoLoader, "noload": NoLoader, "select": LazyLoader, True: LazyLoader, "subquery": SubqueryLoader, "immediate": ImmediateLoader } def factory(identifier): return _factory.get(identifier, LazyLoader) class EagerJoinOption(PropertyOption): def __init__(self, key, innerjoin, chained=False): super(EagerJoinOption, self).__init__(key) self.innerjoin = innerjoin self.chained = chained def process_query_property(self, query, paths): if self.chained: for path in paths: path.set(query, "eager_join_type", self.innerjoin) else: paths[-1].set(query, "eager_join_type", self.innerjoin) class LoadEagerFromAliasOption(PropertyOption): def __init__(self, key, alias=None, chained=False): super(LoadEagerFromAliasOption, self).__init__(key) if alias is not None: if not isinstance(alias, basestring): info = inspect(alias) alias = info.selectable self.alias = alias self.chained = chained def process_query_property(self, query, paths): if self.chained: for path in paths[0:-1]: (root_mapper, prop) = path.path[-2:] adapter = query._polymorphic_adapters.get(prop.mapper, None) path.setdefault(query, "user_defined_eager_row_processor", adapter) root_mapper, prop = paths[-1].path[-2:] if self.alias is not None: if isinstance(self.alias, basestring): self.alias = prop.target.alias(self.alias) paths[-1].set(query, "user_defined_eager_row_processor", sql_util.ColumnAdapter(self.alias, equivalents=prop.mapper._equivalent_columns) ) else: if paths[-1].contains(query, "path_with_polymorphic"): with_poly_info = paths[-1].get(query, "path_with_polymorphic") adapter = orm_util.ORMAdapter( with_poly_info.entity, equivalents=prop.mapper._equivalent_columns, adapt_required=True) else: adapter = query._polymorphic_adapters.get(prop.mapper, None) paths[-1].set(query, "user_defined_eager_row_processor", adapter) def single_parent_validator(desc, prop): def _do_check(state, value, oldvalue, initiator): if value is not None and initiator.key == prop.key: hasparent = initiator.hasparent(attributes.instance_state(value)) if hasparent and oldvalue is not value: raise sa_exc.InvalidRequestError( "Instance %s is already associated with an instance " "of %s via its %s attribute, and is only allowed a " "single parent." % (orm_util.instance_str(value), state.class_, prop) ) return value def append(state, value, initiator): return _do_check(state, value, None, initiator) def set_(state, value, oldvalue, initiator): return _do_check(state, value, oldvalue, initiator) event.listen(desc, 'append', append, raw=True, retval=True, active_history=True) event.listen(desc, 'set', set_, raw=True, retval=True, active_history=True) SQLAlchemy-0.8.4/lib/sqlalchemy/orm/sync.py0000644000076500000240000001113212251147171021263 0ustar classicstaff00000000000000# orm/sync.py # Copyright (C) 2005-2013 the SQLAlchemy authors and contributors # # This module is part of SQLAlchemy and is released under # the MIT License: http://www.opensource.org/licenses/mit-license.php """private module containing functions used for copying data between instances based on join conditions. """ from . import exc, util as orm_util, attributes def populate(source, source_mapper, dest, dest_mapper, synchronize_pairs, uowcommit, flag_cascaded_pks): source_dict = source.dict dest_dict = dest.dict for l, r in synchronize_pairs: try: # inline of source_mapper._get_state_attr_by_column prop = source_mapper._columntoproperty[l] value = source.manager[prop.key].impl.get(source, source_dict, attributes.PASSIVE_OFF) except exc.UnmappedColumnError: _raise_col_to_prop(False, source_mapper, l, dest_mapper, r) try: # inline of dest_mapper._set_state_attr_by_column prop = dest_mapper._columntoproperty[r] dest.manager[prop.key].impl.set(dest, dest_dict, value, None) except exc.UnmappedColumnError: _raise_col_to_prop(True, source_mapper, l, dest_mapper, r) # technically the "r.primary_key" check isn't # needed here, but we check for this condition to limit # how often this logic is invoked for memory/performance # reasons, since we only need this info for a primary key # destination. if flag_cascaded_pks and l.primary_key and \ r.primary_key and \ r.references(l): uowcommit.attributes[("pk_cascaded", dest, r)] = True def clear(dest, dest_mapper, synchronize_pairs): for l, r in synchronize_pairs: if r.primary_key: raise AssertionError( "Dependency rule tried to blank-out primary key " "column '%s' on instance '%s'" % (r, orm_util.state_str(dest)) ) try: dest_mapper._set_state_attr_by_column(dest, dest.dict, r, None) except exc.UnmappedColumnError: _raise_col_to_prop(True, None, l, dest_mapper, r) def update(source, source_mapper, dest, old_prefix, synchronize_pairs): for l, r in synchronize_pairs: try: oldvalue = source_mapper._get_committed_attr_by_column( source.obj(), l) value = source_mapper._get_state_attr_by_column( source, source.dict, l) except exc.UnmappedColumnError: _raise_col_to_prop(False, source_mapper, l, None, r) dest[r.key] = value dest[old_prefix + r.key] = oldvalue def populate_dict(source, source_mapper, dict_, synchronize_pairs): for l, r in synchronize_pairs: try: value = source_mapper._get_state_attr_by_column( source, source.dict, l) except exc.UnmappedColumnError: _raise_col_to_prop(False, source_mapper, l, None, r) dict_[r.key] = value def source_modified(uowcommit, source, source_mapper, synchronize_pairs): """return true if the source object has changes from an old to a new value on the given synchronize pairs """ for l, r in synchronize_pairs: try: prop = source_mapper._columntoproperty[l] except exc.UnmappedColumnError: _raise_col_to_prop(False, source_mapper, l, None, r) history = uowcommit.get_attribute_history(source, prop.key, attributes.PASSIVE_NO_INITIALIZE) if bool(history.deleted): return True else: return False def _raise_col_to_prop(isdest, source_mapper, source_column, dest_mapper, dest_column): if isdest: raise exc.UnmappedColumnError("Can't execute sync rule for " "destination column '%s'; mapper '%s' does not map " "this column. Try using an explicit `foreign_keys` " "collection which does not include this column (or use " "a viewonly=True relation)." % (dest_column, dest_mapper)) else: raise exc.UnmappedColumnError("Can't execute sync rule for " "source column '%s'; mapper '%s' does not map this " "column. Try using an explicit `foreign_keys` " "collection which does not include destination column " "'%s' (or use a viewonly=True relation)." % (source_column, source_mapper, dest_column)) SQLAlchemy-0.8.4/lib/sqlalchemy/orm/unitofwork.py0000644000076500000240000005542712251150015022524 0ustar classicstaff00000000000000# orm/unitofwork.py # Copyright (C) 2005-2013 the SQLAlchemy authors and contributors # # This module is part of SQLAlchemy and is released under # the MIT License: http://www.opensource.org/licenses/mit-license.php """The internals for the unit of work system. The session's flush() process passes objects to a contextual object here, which assembles flush tasks based on mappers and their properties, organizes them in order of dependency, and executes. """ from .. import util, event from ..util import topological from . import attributes, persistence, util as orm_util sessionlib = util.importlater("sqlalchemy.orm", "session") def track_cascade_events(descriptor, prop): """Establish event listeners on object attributes which handle cascade-on-set/append. """ key = prop.key def append(state, item, initiator): # process "save_update" cascade rules for when # an instance is appended to the list of another instance if item is None: return sess = sessionlib._state_session(state) if sess: if sess._warn_on_events: sess._flush_warning("collection append") prop = state.manager.mapper._props[key] item_state = attributes.instance_state(item) if prop._cascade.save_update and \ (prop.cascade_backrefs or key == initiator.key) and \ not sess._contains_state(item_state): sess._save_or_update_state(item_state) return item def remove(state, item, initiator): if item is None: return sess = sessionlib._state_session(state) if sess: prop = state.manager.mapper._props[key] if sess._warn_on_events: sess._flush_warning( "collection remove" if prop.uselist else "related attribute delete") # expunge pending orphans item_state = attributes.instance_state(item) if prop._cascade.delete_orphan and \ item_state in sess._new and \ prop.mapper._is_orphan(item_state): sess.expunge(item) def set_(state, newvalue, oldvalue, initiator): # process "save_update" cascade rules for when an instance # is attached to another instance if oldvalue is newvalue: return newvalue sess = sessionlib._state_session(state) if sess: if sess._warn_on_events: sess._flush_warning("related attribute set") prop = state.manager.mapper._props[key] if newvalue is not None: newvalue_state = attributes.instance_state(newvalue) if prop._cascade.save_update and \ (prop.cascade_backrefs or key == initiator.key) and \ not sess._contains_state(newvalue_state): sess._save_or_update_state(newvalue_state) if oldvalue is not None and \ oldvalue is not attributes.PASSIVE_NO_RESULT and \ prop._cascade.delete_orphan: # possible to reach here with attributes.NEVER_SET ? oldvalue_state = attributes.instance_state(oldvalue) if oldvalue_state in sess._new and \ prop.mapper._is_orphan(oldvalue_state): sess.expunge(oldvalue) return newvalue event.listen(descriptor, 'append', append, raw=True, retval=True) event.listen(descriptor, 'remove', remove, raw=True, retval=True) event.listen(descriptor, 'set', set_, raw=True, retval=True) class UOWTransaction(object): def __init__(self, session): self.session = session # dictionary used by external actors to # store arbitrary state information. self.attributes = {} # dictionary of mappers to sets of # DependencyProcessors, which are also # set to be part of the sorted flush actions, # which have that mapper as a parent. self.deps = util.defaultdict(set) # dictionary of mappers to sets of InstanceState # items pending for flush which have that mapper # as a parent. self.mappers = util.defaultdict(set) # a dictionary of Preprocess objects, which gather # additional states impacted by the flush # and determine if a flush action is needed self.presort_actions = {} # dictionary of PostSortRec objects, each # one issues work during the flush within # a certain ordering. self.postsort_actions = {} # a set of 2-tuples, each containing two # PostSortRec objects where the second # is dependent on the first being executed # first self.dependencies = set() # dictionary of InstanceState-> (isdelete, listonly) # tuples, indicating if this state is to be deleted # or insert/updated, or just refreshed self.states = {} # tracks InstanceStates which will be receiving # a "post update" call. Keys are mappers, # values are a set of states and a set of the # columns which should be included in the update. self.post_update_states = util.defaultdict(lambda: (set(), set())) @property def has_work(self): return bool(self.states) def is_deleted(self, state): """return true if the given state is marked as deleted within this uowtransaction.""" return state in self.states and self.states[state][0] def memo(self, key, callable_): if key in self.attributes: return self.attributes[key] else: self.attributes[key] = ret = callable_() return ret def remove_state_actions(self, state): """remove pending actions for a state from the uowtransaction.""" isdelete = self.states[state][0] self.states[state] = (isdelete, True) def get_attribute_history(self, state, key, passive=attributes.PASSIVE_NO_INITIALIZE): """facade to attributes.get_state_history(), including caching of results.""" hashkey = ("history", state, key) # cache the objects, not the states; the strong reference here # prevents newly loaded objects from being dereferenced during the # flush process if hashkey in self.attributes: history, state_history, cached_passive = self.attributes[hashkey] # if the cached lookup was "passive" and now # we want non-passive, do a non-passive lookup and re-cache if not cached_passive & attributes.SQL_OK \ and passive & attributes.SQL_OK: impl = state.manager[key].impl history = impl.get_history(state, state.dict, attributes.PASSIVE_OFF | attributes.LOAD_AGAINST_COMMITTED) if history and impl.uses_objects: state_history = history.as_state() else: state_history = history self.attributes[hashkey] = (history, state_history, passive) else: impl = state.manager[key].impl # TODO: store the history as (state, object) tuples # so we don't have to keep converting here history = impl.get_history(state, state.dict, passive | attributes.LOAD_AGAINST_COMMITTED) if history and impl.uses_objects: state_history = history.as_state() else: state_history = history self.attributes[hashkey] = (history, state_history, passive) return state_history def has_dep(self, processor): return (processor, True) in self.presort_actions def register_preprocessor(self, processor, fromparent): key = (processor, fromparent) if key not in self.presort_actions: self.presort_actions[key] = Preprocess(processor, fromparent) def register_object(self, state, isdelete=False, listonly=False, cancel_delete=False, operation=None, prop=None): if not self.session._contains_state(state): if not state.deleted and operation is not None: util.warn("Object of type %s not in session, %s operation " "along '%s' will not proceed" % (orm_util.state_class_str(state), operation, prop)) return False if state not in self.states: mapper = state.manager.mapper if mapper not in self.mappers: self._per_mapper_flush_actions(mapper) self.mappers[mapper].add(state) self.states[state] = (isdelete, listonly) else: if not listonly and (isdelete or cancel_delete): self.states[state] = (isdelete, False) return True def issue_post_update(self, state, post_update_cols): mapper = state.manager.mapper.base_mapper states, cols = self.post_update_states[mapper] states.add(state) cols.update(post_update_cols) def _per_mapper_flush_actions(self, mapper): saves = SaveUpdateAll(self, mapper.base_mapper) deletes = DeleteAll(self, mapper.base_mapper) self.dependencies.add((saves, deletes)) for dep in mapper._dependency_processors: dep.per_property_preprocessors(self) for prop in mapper.relationships: if prop.viewonly: continue dep = prop._dependency_processor dep.per_property_preprocessors(self) @util.memoized_property def _mapper_for_dep(self): """return a dynamic mapping of (Mapper, DependencyProcessor) to True or False, indicating if the DependencyProcessor operates on objects of that Mapper. The result is stored in the dictionary persistently once calculated. """ return util.PopulateDict( lambda tup: tup[0]._props.get(tup[1].key) is tup[1].prop ) def filter_states_for_dep(self, dep, states): """Filter the given list of InstanceStates to those relevant to the given DependencyProcessor. """ mapper_for_dep = self._mapper_for_dep return [s for s in states if mapper_for_dep[(s.manager.mapper, dep)]] def states_for_mapper_hierarchy(self, mapper, isdelete, listonly): checktup = (isdelete, listonly) for mapper in mapper.base_mapper.self_and_descendants: for state in self.mappers[mapper]: if self.states[state] == checktup: yield state def _generate_actions(self): """Generate the full, unsorted collection of PostSortRecs as well as dependency pairs for this UOWTransaction. """ # execute presort_actions, until all states # have been processed. a presort_action might # add new states to the uow. while True: ret = False for action in list(self.presort_actions.values()): if action.execute(self): ret = True if not ret: break # see if the graph of mapper dependencies has cycles. self.cycles = cycles = topological.find_cycles( self.dependencies, self.postsort_actions.values()) if cycles: # if yes, break the per-mapper actions into # per-state actions convert = dict( (rec, set(rec.per_state_flush_actions(self))) for rec in cycles ) # rewrite the existing dependencies to point to # the per-state actions for those per-mapper actions # that were broken up. for edge in list(self.dependencies): if None in edge or \ edge[0].disabled or edge[1].disabled or \ cycles.issuperset(edge): self.dependencies.remove(edge) elif edge[0] in cycles: self.dependencies.remove(edge) for dep in convert[edge[0]]: self.dependencies.add((dep, edge[1])) elif edge[1] in cycles: self.dependencies.remove(edge) for dep in convert[edge[1]]: self.dependencies.add((edge[0], dep)) return set([a for a in self.postsort_actions.values() if not a.disabled ] ).difference(cycles) def execute(self): postsort_actions = self._generate_actions() #sort = topological.sort(self.dependencies, postsort_actions) #print "--------------" #print "\ndependencies:", self.dependencies #print "\ncycles:", self.cycles #print "\nsort:", list(sort) #print "\nCOUNT OF POSTSORT ACTIONS", len(postsort_actions) # execute if self.cycles: for set_ in topological.sort_as_subsets( self.dependencies, postsort_actions): while set_: n = set_.pop() n.execute_aggregate(self, set_) else: for rec in topological.sort( self.dependencies, postsort_actions): rec.execute(self) def finalize_flush_changes(self): """mark processed objects as clean / deleted after a successful flush(). this method is called within the flush() method after the execute() method has succeeded and the transaction has been committed. """ states = set(self.states) isdel = set( s for (s, (isdelete, listonly)) in self.states.iteritems() if isdelete ) other = states.difference(isdel) self.session._remove_newly_deleted(isdel) self.session._register_newly_persistent(other) class IterateMappersMixin(object): def _mappers(self, uow): if self.fromparent: return iter( m for m in self.dependency_processor.parent.self_and_descendants if uow._mapper_for_dep[(m, self.dependency_processor)] ) else: return self.dependency_processor.mapper.self_and_descendants class Preprocess(IterateMappersMixin): def __init__(self, dependency_processor, fromparent): self.dependency_processor = dependency_processor self.fromparent = fromparent self.processed = set() self.setup_flush_actions = False def execute(self, uow): delete_states = set() save_states = set() for mapper in self._mappers(uow): for state in uow.mappers[mapper].difference(self.processed): (isdelete, listonly) = uow.states[state] if not listonly: if isdelete: delete_states.add(state) else: save_states.add(state) if delete_states: self.dependency_processor.presort_deletes(uow, delete_states) self.processed.update(delete_states) if save_states: self.dependency_processor.presort_saves(uow, save_states) self.processed.update(save_states) if (delete_states or save_states): if not self.setup_flush_actions and ( self.dependency_processor.\ prop_has_changes(uow, delete_states, True) or self.dependency_processor.\ prop_has_changes(uow, save_states, False) ): self.dependency_processor.per_property_flush_actions(uow) self.setup_flush_actions = True return True else: return False class PostSortRec(object): disabled = False def __new__(cls, uow, *args): key = (cls, ) + args if key in uow.postsort_actions: return uow.postsort_actions[key] else: uow.postsort_actions[key] = \ ret = \ object.__new__(cls) return ret def execute_aggregate(self, uow, recs): self.execute(uow) def __repr__(self): return "%s(%s)" % ( self.__class__.__name__, ",".join(str(x) for x in self.__dict__.values()) ) class ProcessAll(IterateMappersMixin, PostSortRec): def __init__(self, uow, dependency_processor, delete, fromparent): self.dependency_processor = dependency_processor self.delete = delete self.fromparent = fromparent uow.deps[dependency_processor.parent.base_mapper].\ add(dependency_processor) def execute(self, uow): states = self._elements(uow) if self.delete: self.dependency_processor.process_deletes(uow, states) else: self.dependency_processor.process_saves(uow, states) def per_state_flush_actions(self, uow): # this is handled by SaveUpdateAll and DeleteAll, # since a ProcessAll should unconditionally be pulled # into per-state if either the parent/child mappers # are part of a cycle return iter([]) def __repr__(self): return "%s(%s, delete=%s)" % ( self.__class__.__name__, self.dependency_processor, self.delete ) def _elements(self, uow): for mapper in self._mappers(uow): for state in uow.mappers[mapper]: (isdelete, listonly) = uow.states[state] if isdelete == self.delete and not listonly: yield state class IssuePostUpdate(PostSortRec): def __init__(self, uow, mapper, isdelete): self.mapper = mapper self.isdelete = isdelete def execute(self, uow): states, cols = uow.post_update_states[self.mapper] states = [s for s in states if uow.states[s][0] == self.isdelete] persistence.post_update(self.mapper, states, uow, cols) class SaveUpdateAll(PostSortRec): def __init__(self, uow, mapper): self.mapper = mapper assert mapper is mapper.base_mapper def execute(self, uow): persistence.save_obj(self.mapper, uow.states_for_mapper_hierarchy(self.mapper, False, False), uow ) def per_state_flush_actions(self, uow): states = list(uow.states_for_mapper_hierarchy( self.mapper, False, False)) base_mapper = self.mapper.base_mapper delete_all = DeleteAll(uow, base_mapper) for state in states: # keep saves before deletes - # this ensures 'row switch' operations work action = SaveUpdateState(uow, state, base_mapper) uow.dependencies.add((action, delete_all)) yield action for dep in uow.deps[self.mapper]: states_for_prop = uow.filter_states_for_dep(dep, states) dep.per_state_flush_actions(uow, states_for_prop, False) class DeleteAll(PostSortRec): def __init__(self, uow, mapper): self.mapper = mapper assert mapper is mapper.base_mapper def execute(self, uow): persistence.delete_obj(self.mapper, uow.states_for_mapper_hierarchy(self.mapper, True, False), uow ) def per_state_flush_actions(self, uow): states = list(uow.states_for_mapper_hierarchy( self.mapper, True, False)) base_mapper = self.mapper.base_mapper save_all = SaveUpdateAll(uow, base_mapper) for state in states: # keep saves before deletes - # this ensures 'row switch' operations work action = DeleteState(uow, state, base_mapper) uow.dependencies.add((save_all, action)) yield action for dep in uow.deps[self.mapper]: states_for_prop = uow.filter_states_for_dep(dep, states) dep.per_state_flush_actions(uow, states_for_prop, True) class ProcessState(PostSortRec): def __init__(self, uow, dependency_processor, delete, state): self.dependency_processor = dependency_processor self.delete = delete self.state = state def execute_aggregate(self, uow, recs): cls_ = self.__class__ dependency_processor = self.dependency_processor delete = self.delete our_recs = [r for r in recs if r.__class__ is cls_ and r.dependency_processor is dependency_processor and r.delete is delete] recs.difference_update(our_recs) states = [self.state] + [r.state for r in our_recs] if delete: dependency_processor.process_deletes(uow, states) else: dependency_processor.process_saves(uow, states) def __repr__(self): return "%s(%s, %s, delete=%s)" % ( self.__class__.__name__, self.dependency_processor, orm_util.state_str(self.state), self.delete ) class SaveUpdateState(PostSortRec): def __init__(self, uow, state, mapper): self.state = state self.mapper = mapper def execute_aggregate(self, uow, recs): cls_ = self.__class__ mapper = self.mapper our_recs = [r for r in recs if r.__class__ is cls_ and r.mapper is mapper] recs.difference_update(our_recs) persistence.save_obj(mapper, [self.state] + [r.state for r in our_recs], uow) def __repr__(self): return "%s(%s)" % ( self.__class__.__name__, orm_util.state_str(self.state) ) class DeleteState(PostSortRec): def __init__(self, uow, state, mapper): self.state = state self.mapper = mapper def execute_aggregate(self, uow, recs): cls_ = self.__class__ mapper = self.mapper our_recs = [r for r in recs if r.__class__ is cls_ and r.mapper is mapper] recs.difference_update(our_recs) states = [self.state] + [r.state for r in our_recs] persistence.delete_obj(mapper, [s for s in states if uow.states[s][0]], uow) def __repr__(self): return "%s(%s)" % ( self.__class__.__name__, orm_util.state_str(self.state) ) SQLAlchemy-0.8.4/lib/sqlalchemy/orm/util.py0000644000076500000240000013130412251150015021257 0ustar classicstaff00000000000000# orm/util.py # Copyright (C) 2005-2013 the SQLAlchemy authors and contributors # # This module is part of SQLAlchemy and is released under # the MIT License: http://www.opensource.org/licenses/mit-license.php from .. import sql, util, event, exc as sa_exc, inspection from ..sql import expression, util as sql_util, operators from .interfaces import PropComparator, MapperProperty, _InspectionAttr from itertools import chain from . import attributes, exc import re mapperlib = util.importlater("sqlalchemy.orm", "mapperlib") all_cascades = frozenset(("delete", "delete-orphan", "all", "merge", "expunge", "save-update", "refresh-expire", "none")) _INSTRUMENTOR = ('mapper', 'instrumentor') _none_set = frozenset([None]) class CascadeOptions(frozenset): """Keeps track of the options sent to relationship().cascade""" _add_w_all_cascades = all_cascades.difference([ 'all', 'none', 'delete-orphan']) _allowed_cascades = all_cascades def __new__(cls, arg): values = set([ c for c in re.split('\s*,\s*', arg or "") if c ]) if values.difference(cls._allowed_cascades): raise sa_exc.ArgumentError( "Invalid cascade option(s): %s" % ", ".join([repr(x) for x in sorted( values.difference(cls._allowed_cascades) )]) ) if "all" in values: values.update(cls._add_w_all_cascades) if "none" in values: values.clear() values.discard('all') self = frozenset.__new__(CascadeOptions, values) self.save_update = 'save-update' in values self.delete = 'delete' in values self.refresh_expire = 'refresh-expire' in values self.merge = 'merge' in values self.expunge = 'expunge' in values self.delete_orphan = "delete-orphan" in values if self.delete_orphan and not self.delete: util.warn("The 'delete-orphan' cascade " "option requires 'delete'.") return self def __repr__(self): return "CascadeOptions(%r)" % ( ",".join([x for x in sorted(self)]) ) def _validator_events(desc, key, validator, include_removes): """Runs a validation method on an attribute value to be set or appended.""" if include_removes: def append(state, value, initiator): return validator(state.obj(), key, value, False) def set_(state, value, oldvalue, initiator): return validator(state.obj(), key, value, False) def remove(state, value, initiator): validator(state.obj(), key, value, True) else: def append(state, value, initiator): return validator(state.obj(), key, value) def set_(state, value, oldvalue, initiator): return validator(state.obj(), key, value) event.listen(desc, 'append', append, raw=True, retval=True) event.listen(desc, 'set', set_, raw=True, retval=True) if include_removes: event.listen(desc, "remove", remove, raw=True, retval=True) def polymorphic_union(table_map, typecolname, aliasname='p_union', cast_nulls=True): """Create a ``UNION`` statement used by a polymorphic mapper. See :ref:`concrete_inheritance` for an example of how this is used. :param table_map: mapping of polymorphic identities to :class:`.Table` objects. :param typecolname: string name of a "discriminator" column, which will be derived from the query, producing the polymorphic identity for each row. If ``None``, no polymorphic discriminator is generated. :param aliasname: name of the :func:`~sqlalchemy.sql.expression.alias()` construct generated. :param cast_nulls: if True, non-existent columns, which are represented as labeled NULLs, will be passed into CAST. This is a legacy behavior that is problematic on some backends such as Oracle - in which case it can be set to False. """ colnames = util.OrderedSet() colnamemaps = {} types = {} for key in table_map.keys(): table = table_map[key] # mysql doesnt like selecting from a select; # make it an alias of the select if isinstance(table, sql.Select): table = table.alias() table_map[key] = table m = {} for c in table.c: colnames.add(c.key) m[c.key] = c types[c.key] = c.type colnamemaps[table] = m def col(name, table): try: return colnamemaps[table][name] except KeyError: if cast_nulls: return sql.cast(sql.null(), types[name]).label(name) else: return sql.type_coerce(sql.null(), types[name]).label(name) result = [] for type, table in table_map.iteritems(): if typecolname is not None: result.append( sql.select([col(name, table) for name in colnames] + [sql.literal_column(sql_util._quote_ddl_expr(type)). label(typecolname)], from_obj=[table])) else: result.append(sql.select([col(name, table) for name in colnames], from_obj=[table])) return sql.union_all(*result).alias(aliasname) def identity_key(*args, **kwargs): """Generate "identity key" tuples, as are used as keys in the :attr:`.Session.identity_map` dictionary. This function has several call styles: * ``identity_key(class, ident)`` This form receives a mapped class and a primary key scalar or tuple as an argument. E.g.:: >>> identity_key(MyClass, (1, 2)) (, (1, 2)) :param class: mapped class (must be a positional argument) :param ident: primary key, may be a scalar or tuple argument. * ``identity_key(instance=instance)`` This form will produce the identity key for a given instance. The instance need not be persistent, only that its primary key attributes are populated (else the key will contain ``None`` for those missing values). E.g.:: >>> instance = MyClass(1, 2) >>> identity_key(instance=instance) (, (1, 2)) In this form, the given instance is ultimately run though :meth:`.Mapper.identity_key_from_instance`, which will have the effect of performing a database check for the corresponding row if the object is expired. :param instance: object instance (must be given as a keyword arg) * ``identity_key(class, row=row)`` This form is similar to the class/tuple form, except is passed a database result row as a :class:`.RowProxy` object. E.g.:: >>> row = engine.execute("select * from table where a=1 and b=2").first() >>> identity_key(MyClass, row=row) (, (1, 2)) :param class: mapped class (must be a positional argument) :param row: :class:`.RowProxy` row returned by a :class:`.ResultProxy` (must be given as a keyword arg) """ if args: if len(args) == 1: class_ = args[0] try: row = kwargs.pop("row") except KeyError: ident = kwargs.pop("ident") elif len(args) == 2: class_, ident = args elif len(args) == 3: class_, ident = args else: raise sa_exc.ArgumentError("expected up to three " "positional arguments, got %s" % len(args)) if kwargs: raise sa_exc.ArgumentError("unknown keyword arguments: %s" % ", ".join(kwargs.keys())) mapper = class_mapper(class_) if "ident" in locals(): return mapper.identity_key_from_primary_key(util.to_list(ident)) return mapper.identity_key_from_row(row) instance = kwargs.pop("instance") if kwargs: raise sa_exc.ArgumentError("unknown keyword arguments: %s" % ", ".join(kwargs.keys())) mapper = object_mapper(instance) return mapper.identity_key_from_instance(instance) class ORMAdapter(sql_util.ColumnAdapter): """Extends ColumnAdapter to accept ORM entities. The selectable is extracted from the given entity, and the AliasedClass if any is referenced. """ def __init__(self, entity, equivalents=None, chain_to=None, adapt_required=False): info = inspection.inspect(entity) self.mapper = info.mapper selectable = info.selectable is_aliased_class = info.is_aliased_class if is_aliased_class: self.aliased_class = entity else: self.aliased_class = None sql_util.ColumnAdapter.__init__(self, selectable, equivalents, chain_to, adapt_required=adapt_required) def replace(self, elem): entity = elem._annotations.get('parentmapper', None) if not entity or entity.isa(self.mapper): return sql_util.ColumnAdapter.replace(self, elem) else: return None def _unreduce_path(path): return PathRegistry.deserialize(path) class PathRegistry(object): """Represent query load paths and registry functions. Basically represents structures like: (, "orders", , "items", ) These structures are generated by things like query options (joinedload(), subqueryload(), etc.) and are used to compose keys stored in the query._attributes dictionary for various options. They are then re-composed at query compile/result row time as the query is formed and as rows are fetched, where they again serve to compose keys to look up options in the context.attributes dictionary, which is copied from query._attributes. The path structure has a limited amount of caching, where each "root" ultimately pulls from a fixed registry associated with the first mapper, that also contains elements for each of its property keys. However paths longer than two elements, which are the exception rather than the rule, are generated on an as-needed basis. """ def __eq__(self, other): return other is not None and \ self.path == other.path def set(self, reg, key, value): reg._attributes[(key, self.path)] = value def setdefault(self, reg, key, value): reg._attributes.setdefault((key, self.path), value) def get(self, reg, key, value=None): key = (key, self.path) if key in reg._attributes: return reg._attributes[key] else: return value def __len__(self): return len(self.path) @property def length(self): return len(self.path) def pairs(self): path = self.path for i in xrange(0, len(path), 2): yield path[i], path[i + 1] def contains_mapper(self, mapper): for path_mapper in [ self.path[i] for i in range(0, len(self.path), 2) ]: if isinstance(path_mapper, mapperlib.Mapper) and \ path_mapper.isa(mapper): return True else: return False def contains(self, reg, key): return (key, self.path) in reg._attributes def __reduce__(self): return _unreduce_path, (self.serialize(), ) def serialize(self): path = self.path return zip( [m.class_ for m in [path[i] for i in range(0, len(path), 2)]], [path[i].key for i in range(1, len(path), 2)] + [None] ) @classmethod def deserialize(cls, path): if path is None: return None p = tuple(chain(*[(class_mapper(mcls), class_mapper(mcls).attrs[key] if key is not None else None) for mcls, key in path])) if p and p[-1] is None: p = p[0:-1] return cls.coerce(p) @classmethod def per_mapper(cls, mapper): return EntityRegistry( cls.root, mapper ) @classmethod def coerce(cls, raw): return util.reduce(lambda prev, next: prev[next], raw, cls.root) @classmethod def token(cls, token): return TokenRegistry(cls.root, token) def __add__(self, other): return util.reduce( lambda prev, next: prev[next], other.path, self) def __repr__(self): return "%s(%r)" % (self.__class__.__name__, self.path, ) class RootRegistry(PathRegistry): """Root registry, defers to mappers so that paths are maintained per-root-mapper. """ path = () def __getitem__(self, entity): return entity._path_registry PathRegistry.root = RootRegistry() class TokenRegistry(PathRegistry): def __init__(self, parent, token): self.token = token self.parent = parent self.path = parent.path + (token,) def __getitem__(self, entity): raise NotImplementedError() class PropRegistry(PathRegistry): def __init__(self, parent, prop): # restate this path in terms of the # given MapperProperty's parent. insp = inspection.inspect(parent[-1]) if not insp.is_aliased_class or insp._use_mapper_path: parent = parent.parent[prop.parent] elif insp.is_aliased_class and insp.with_polymorphic_mappers: if prop.parent is not insp.mapper and \ prop.parent in insp.with_polymorphic_mappers: subclass_entity = parent[-1]._entity_for_mapper(prop.parent) parent = parent.parent[subclass_entity] self.prop = prop self.parent = parent self.path = parent.path + (prop,) def __getitem__(self, entity): if isinstance(entity, (int, slice)): return self.path[entity] else: return EntityRegistry( self, entity ) class EntityRegistry(PathRegistry, dict): is_aliased_class = False def __init__(self, parent, entity): self.key = entity self.parent = parent self.is_aliased_class = entity.is_aliased_class self.path = parent.path + (entity,) def __nonzero__(self): return True def __getitem__(self, entity): if isinstance(entity, (int, slice)): return self.path[entity] else: return dict.__getitem__(self, entity) def _inlined_get_for(self, prop, context, key): """an inlined version of: cls = path[mapperproperty].get(context, key) Skips the isinstance() check in __getitem__ and the extra method call for get(). Used by StrategizedProperty for its very frequent lookup. """ path = dict.__getitem__(self, prop) path_key = (key, path.path) if path_key in context._attributes: return context._attributes[path_key] else: return None def __missing__(self, key): self[key] = item = PropRegistry(self, key) return item class AliasedClass(object): """Represents an "aliased" form of a mapped class for usage with Query. The ORM equivalent of a :func:`sqlalchemy.sql.expression.alias` construct, this object mimics the mapped class using a __getattr__ scheme and maintains a reference to a real :class:`~sqlalchemy.sql.expression.Alias` object. Usage is via the :func:`.orm.aliased` function, or alternatively via the :func:`.orm.with_polymorphic` function. Usage example:: # find all pairs of users with the same name user_alias = aliased(User) session.query(User, user_alias).\\ join((user_alias, User.id > user_alias.id)).\\ filter(User.name==user_alias.name) The resulting object is an instance of :class:`.AliasedClass`. This object implements an attribute scheme which produces the same attribute and method interface as the original mapped class, allowing :class:`.AliasedClass` to be compatible with any attribute technique which works on the original class, including hybrid attributes (see :ref:`hybrids_toplevel`). The :class:`.AliasedClass` can be inspected for its underlying :class:`.Mapper`, aliased selectable, and other information using :func:`.inspect`:: from sqlalchemy import inspect my_alias = aliased(MyClass) insp = inspect(my_alias) The resulting inspection object is an instance of :class:`.AliasedInsp`. See :func:`.aliased` and :func:`.with_polymorphic` for construction argument descriptions. """ def __init__(self, cls, alias=None, name=None, adapt_on_names=False, # TODO: None for default here? with_polymorphic_mappers=(), with_polymorphic_discriminator=None, base_alias=None, use_mapper_path=False): mapper = _class_to_mapper(cls) if alias is None: alias = mapper._with_polymorphic_selectable.alias(name=name) self._aliased_insp = AliasedInsp( self, mapper, alias, name, with_polymorphic_mappers if with_polymorphic_mappers else mapper.with_polymorphic_mappers, with_polymorphic_discriminator if with_polymorphic_discriminator is not None else mapper.polymorphic_on, base_alias, use_mapper_path ) self._setup(self._aliased_insp, adapt_on_names) def _setup(self, aliased_insp, adapt_on_names): self.__adapt_on_names = adapt_on_names mapper = aliased_insp.mapper alias = aliased_insp.selectable self.__target = mapper.class_ self.__adapt_on_names = adapt_on_names self.__adapter = sql_util.ClauseAdapter(alias, equivalents=mapper._equivalent_columns, adapt_on_names=self.__adapt_on_names) for poly in aliased_insp.with_polymorphic_mappers: if poly is not mapper: setattr(self, poly.class_.__name__, AliasedClass(poly.class_, alias, base_alias=self, use_mapper_path=self._aliased_insp._use_mapper_path)) self.__name__ = 'AliasedClass_%s' % self.__target.__name__ def __getstate__(self): return { 'mapper': self._aliased_insp.mapper, 'alias': self._aliased_insp.selectable, 'name': self._aliased_insp.name, 'adapt_on_names': self.__adapt_on_names, 'with_polymorphic_mappers': self._aliased_insp.with_polymorphic_mappers, 'with_polymorphic_discriminator': self._aliased_insp.polymorphic_on, 'base_alias': self._aliased_insp._base_alias.entity, 'use_mapper_path': self._aliased_insp._use_mapper_path } def __setstate__(self, state): self._aliased_insp = AliasedInsp( self, state['mapper'], state['alias'], state['name'], state['with_polymorphic_mappers'], state['with_polymorphic_discriminator'], state['base_alias'], state['use_mapper_path'] ) self._setup(self._aliased_insp, state['adapt_on_names']) def __adapt_element(self, elem): return self.__adapter.traverse(elem).\ _annotate({ 'parententity': self, 'parentmapper': self._aliased_insp.mapper} ) def __adapt_prop(self, existing, key): comparator = existing.comparator.adapted(self.__adapt_element) queryattr = attributes.QueryableAttribute( self, key, impl=existing.impl, parententity=self._aliased_insp, comparator=comparator) setattr(self, key, queryattr) return queryattr def __getattr__(self, key): for base in self.__target.__mro__: try: attr = object.__getattribute__(base, key) except AttributeError: continue else: break else: raise AttributeError(key) if isinstance(attr, attributes.QueryableAttribute): return self.__adapt_prop(attr, key) elif hasattr(attr, 'func_code'): is_method = getattr(self.__target, key, None) if is_method and is_method.im_self is not None: return util.types.MethodType(attr.im_func, self, self) else: return None elif hasattr(attr, '__get__'): ret = attr.__get__(None, self) if isinstance(ret, PropComparator): return ret.adapted(self.__adapt_element) return ret else: return attr def __repr__(self): return '' % ( id(self), self.__target.__name__) class AliasedInsp(_InspectionAttr): """Provide an inspection interface for an :class:`.AliasedClass` object. The :class:`.AliasedInsp` object is returned given an :class:`.AliasedClass` using the :func:`.inspect` function:: from sqlalchemy import inspect from sqlalchemy.orm import aliased my_alias = aliased(MyMappedClass) insp = inspect(my_alias) Attributes on :class:`.AliasedInsp` include: * ``entity`` - the :class:`.AliasedClass` represented. * ``mapper`` - the :class:`.Mapper` mapping the underlying class. * ``selectable`` - the :class:`.Alias` construct which ultimately represents an aliased :class:`.Table` or :class:`.Select` construct. * ``name`` - the name of the alias. Also is used as the attribute name when returned in a result tuple from :class:`.Query`. * ``with_polymorphic_mappers`` - collection of :class:`.Mapper` objects indicating all those mappers expressed in the select construct for the :class:`.AliasedClass`. * ``polymorphic_on`` - an alternate column or SQL expression which will be used as the "discriminator" for a polymorphic load. .. seealso:: :ref:`inspection_toplevel` """ def __init__(self, entity, mapper, selectable, name, with_polymorphic_mappers, polymorphic_on, _base_alias, _use_mapper_path): self.entity = entity self.mapper = mapper self.selectable = selectable self.name = name self.with_polymorphic_mappers = with_polymorphic_mappers self.polymorphic_on = polymorphic_on # a little dance to get serialization to work self._base_alias = _base_alias._aliased_insp if _base_alias \ and _base_alias is not entity else self self._use_mapper_path = _use_mapper_path is_aliased_class = True "always returns True" @property def class_(self): """Return the mapped class ultimately represented by this :class:`.AliasedInsp`.""" return self.mapper.class_ @util.memoized_property def _path_registry(self): if self._use_mapper_path: return self.mapper._path_registry else: return PathRegistry.per_mapper(self) def _entity_for_mapper(self, mapper): self_poly = self.with_polymorphic_mappers if mapper in self_poly: return getattr(self.entity, mapper.class_.__name__)._aliased_insp elif mapper.isa(self.mapper): return self else: assert False, "mapper %s doesn't correspond to %s" % (mapper, self) def __repr__(self): return '' % ( id(self), self.class_.__name__) inspection._inspects(AliasedClass)(lambda target: target._aliased_insp) inspection._inspects(AliasedInsp)(lambda target: target) def aliased(element, alias=None, name=None, adapt_on_names=False): """Produce an alias of the given element, usually an :class:`.AliasedClass` instance. E.g.:: my_alias = aliased(MyClass) session.query(MyClass, my_alias).filter(MyClass.id > my_alias.id) The :func:`.aliased` function is used to create an ad-hoc mapping of a mapped class to a new selectable. By default, a selectable is generated from the normally mapped selectable (typically a :class:`.Table`) using the :meth:`.FromClause.alias` method. However, :func:`.aliased` can also be used to link the class to a new :func:`.select` statement. Also, the :func:`.with_polymorphic` function is a variant of :func:`.aliased` that is intended to specify a so-called "polymorphic selectable", that corresponds to the union of several joined-inheritance subclasses at once. For convenience, the :func:`.aliased` function also accepts plain :class:`.FromClause` constructs, such as a :class:`.Table` or :func:`.select` construct. In those cases, the :meth:`.FromClause.alias` method is called on the object and the new :class:`.Alias` object returned. The returned :class:`.Alias` is not ORM-mapped in this case. :param element: element to be aliased. Is normally a mapped class, but for convenience can also be a :class:`.FromClause` element. :param alias: Optional selectable unit to map the element to. This should normally be a :class:`.Alias` object corresponding to the :class:`.Table` to which the class is mapped, or to a :func:`.select` construct that is compatible with the mapping. By default, a simple anonymous alias of the mapped table is generated. :param name: optional string name to use for the alias, if not specified by the ``alias`` parameter. The name, among other things, forms the attribute name that will be accessible via tuples returned by a :class:`.Query` object. :param adapt_on_names: if True, more liberal "matching" will be used when mapping the mapped columns of the ORM entity to those of the given selectable - a name-based match will be performed if the given selectable doesn't otherwise have a column that corresponds to one on the entity. The use case for this is when associating an entity with some derived selectable such as one that uses aggregate functions:: class UnitPrice(Base): __tablename__ = 'unit_price' ... unit_id = Column(Integer) price = Column(Numeric) aggregated_unit_price = Session.query( func.sum(UnitPrice.price).label('price') ).group_by(UnitPrice.unit_id).subquery() aggregated_unit_price = aliased(UnitPrice, alias=aggregated_unit_price, adapt_on_names=True) Above, functions on ``aggregated_unit_price`` which refer to ``.price`` will return the ``fund.sum(UnitPrice.price).label('price')`` column, as it is matched on the name "price". Ordinarily, the "price" function wouldn't have any "column correspondence" to the actual ``UnitPrice.price`` column as it is not a proxy of the original. .. versionadded:: 0.7.3 """ if isinstance(element, expression.FromClause): if adapt_on_names: raise sa_exc.ArgumentError( "adapt_on_names only applies to ORM elements" ) return element.alias(name) else: return AliasedClass(element, alias=alias, name=name, adapt_on_names=adapt_on_names) def with_polymorphic(base, classes, selectable=False, polymorphic_on=None, aliased=False, innerjoin=False, _use_mapper_path=False): """Produce an :class:`.AliasedClass` construct which specifies columns for descendant mappers of the given base. .. versionadded:: 0.8 :func:`.orm.with_polymorphic` is in addition to the existing :class:`.Query` method :meth:`.Query.with_polymorphic`, which has the same purpose but is not as flexible in its usage. Using this method will ensure that each descendant mapper's tables are included in the FROM clause, and will allow filter() criterion to be used against those tables. The resulting instances will also have those columns already loaded so that no "post fetch" of those columns will be required. See the examples at :ref:`with_polymorphic`. :param base: Base class to be aliased. :param classes: a single class or mapper, or list of class/mappers, which inherit from the base class. Alternatively, it may also be the string ``'*'``, in which case all descending mapped classes will be added to the FROM clause. :param aliased: when True, the selectable will be wrapped in an alias, that is ``(SELECT * FROM ) AS anon_1``. This can be important when using the with_polymorphic() to create the target of a JOIN on a backend that does not support parenthesized joins, such as SQLite and older versions of MySQL. :param selectable: a table or select() statement that will be used in place of the generated FROM clause. This argument is required if any of the desired classes use concrete table inheritance, since SQLAlchemy currently cannot generate UNIONs among tables automatically. If used, the ``selectable`` argument must represent the full set of tables and columns mapped by every mapped class. Otherwise, the unaccounted mapped columns will result in their table being appended directly to the FROM clause which will usually lead to incorrect results. :param polymorphic_on: a column to be used as the "discriminator" column for the given selectable. If not given, the polymorphic_on attribute of the base classes' mapper will be used, if any. This is useful for mappings that don't have polymorphic loading behavior by default. :param innerjoin: if True, an INNER JOIN will be used. This should only be specified if querying for one specific subtype only """ primary_mapper = _class_to_mapper(base) mappers, selectable = primary_mapper.\ _with_polymorphic_args(classes, selectable, innerjoin=innerjoin) if aliased: selectable = selectable.alias() return AliasedClass(base, selectable, with_polymorphic_mappers=mappers, with_polymorphic_discriminator=polymorphic_on, use_mapper_path=_use_mapper_path) def _orm_annotate(element, exclude=None): """Deep copy the given ClauseElement, annotating each element with the "_orm_adapt" flag. Elements within the exclude collection will be cloned but not annotated. """ return sql_util._deep_annotate(element, {'_orm_adapt': True}, exclude) def _orm_deannotate(element): """Remove annotations that link a column to a particular mapping. Note this doesn't affect "remote" and "foreign" annotations passed by the :func:`.orm.foreign` and :func:`.orm.remote` annotators. """ return sql_util._deep_deannotate(element, values=("_orm_adapt", "parententity") ) def _orm_full_deannotate(element): return sql_util._deep_deannotate(element) class _ORMJoin(expression.Join): """Extend Join to support ORM constructs as input.""" __visit_name__ = expression.Join.__visit_name__ def __init__(self, left, right, onclause=None, isouter=False): left_info = inspection.inspect(left) left_orm_info = getattr(left, '_joined_from_info', left_info) right_info = inspection.inspect(right) adapt_to = right_info.selectable self._joined_from_info = right_info if isinstance(onclause, basestring): onclause = getattr(left_orm_info.entity, onclause) if isinstance(onclause, attributes.QueryableAttribute): on_selectable = onclause.comparator._source_selectable() prop = onclause.property elif isinstance(onclause, MapperProperty): prop = onclause on_selectable = prop.parent.selectable else: prop = None if prop: if sql_util.clause_is_present(on_selectable, left_info.selectable): adapt_from = on_selectable else: adapt_from = left_info.selectable pj, sj, source, dest, \ secondary, target_adapter = prop._create_joins( source_selectable=adapt_from, dest_selectable=adapt_to, source_polymorphic=True, dest_polymorphic=True, of_type=right_info.mapper) if sj is not None: left = sql.join(left, secondary, pj, isouter) onclause = sj else: onclause = pj self._target_adapter = target_adapter expression.Join.__init__(self, left, right, onclause, isouter) def join(self, right, onclause=None, isouter=False, join_to_left=None): return _ORMJoin(self, right, onclause, isouter) def outerjoin(self, right, onclause=None, join_to_left=None): return _ORMJoin(self, right, onclause, True) def join(left, right, onclause=None, isouter=False, join_to_left=None): """Produce an inner join between left and right clauses. :func:`.orm.join` is an extension to the core join interface provided by :func:`.sql.expression.join()`, where the left and right selectables may be not only core selectable objects such as :class:`.Table`, but also mapped classes or :class:`.AliasedClass` instances. The "on" clause can be a SQL expression, or an attribute or string name referencing a configured :func:`.relationship`. :func:`.orm.join` is not commonly needed in modern usage, as its functionality is encapsulated within that of the :meth:`.Query.join` method, which features a significant amount of automation beyond :func:`.orm.join` by itself. Explicit usage of :func:`.orm.join` with :class:`.Query` involves usage of the :meth:`.Query.select_from` method, as in:: from sqlalchemy.orm import join session.query(User).\\ select_from(join(User, Address, User.addresses)).\\ filter(Address.email_address=='foo@bar.com') In modern SQLAlchemy the above join can be written more succinctly as:: session.query(User).\\ join(User.addresses).\\ filter(Address.email_address=='foo@bar.com') See :meth:`.Query.join` for information on modern usage of ORM level joins. .. versionchanged:: 0.8.1 - the ``join_to_left`` parameter is no longer used, and is deprecated. """ return _ORMJoin(left, right, onclause, isouter) def outerjoin(left, right, onclause=None, join_to_left=None): """Produce a left outer join between left and right clauses. This is the "outer join" version of the :func:`.orm.join` function, featuring the same behavior except that an OUTER JOIN is generated. See that function's documentation for other usage details. """ return _ORMJoin(left, right, onclause, True) def with_parent(instance, prop): """Create filtering criterion that relates this query's primary entity to the given related instance, using established :func:`.relationship()` configuration. The SQL rendered is the same as that rendered when a lazy loader would fire off from the given parent on that attribute, meaning that the appropriate state is taken from the parent object in Python without the need to render joins to the parent table in the rendered statement. .. versionchanged:: 0.6.4 This method accepts parent instances in all persistence states, including transient, persistent, and detached. Only the requisite primary key/foreign key attributes need to be populated. Previous versions didn't work with transient instances. :param instance: An instance which has some :func:`.relationship`. :param property: String property name, or class-bound attribute, which indicates what relationship from the instance should be used to reconcile the parent/child relationship. """ if isinstance(prop, basestring): mapper = object_mapper(instance) prop = getattr(mapper.class_, prop).property elif isinstance(prop, attributes.QueryableAttribute): prop = prop.property return prop.compare(operators.eq, instance, value_is_parent=True) def _attr_as_key(attr): if hasattr(attr, 'key'): return attr.key else: return expression._column_as_key(attr) _state_mapper = util.dottedgetter('manager.mapper') @inspection._inspects(object) def _inspect_mapped_object(instance): try: return attributes.instance_state(instance) # TODO: whats the py-2/3 syntax to catch two # different kinds of exceptions at once ? except exc.UnmappedClassError: return None except exc.NO_STATE: return None @inspection._inspects(type) def _inspect_mapped_class(class_, configure=False): try: class_manager = attributes.manager_of_class(class_) if not class_manager.is_mapped: return None mapper = class_manager.mapper if configure and mapperlib.module._new_mappers: mapperlib.configure_mappers() return mapper except exc.NO_STATE: return None def object_mapper(instance): """Given an object, return the primary Mapper associated with the object instance. Raises :class:`sqlalchemy.orm.exc.UnmappedInstanceError` if no mapping is configured. This function is available via the inspection system as:: inspect(instance).mapper Using the inspection system will raise :class:`sqlalchemy.exc.NoInspectionAvailable` if the instance is not part of a mapping. """ return object_state(instance).mapper def object_state(instance): """Given an object, return the :class:`.InstanceState` associated with the object. Raises :class:`sqlalchemy.orm.exc.UnmappedInstanceError` if no mapping is configured. Equivalent functionality is available via the :func:`.inspect` function as:: inspect(instance) Using the inspection system will raise :class:`sqlalchemy.exc.NoInspectionAvailable` if the instance is not part of a mapping. """ state = _inspect_mapped_object(instance) if state is None: raise exc.UnmappedInstanceError(instance) else: return state def class_mapper(class_, configure=True): """Given a class, return the primary :class:`.Mapper` associated with the key. Raises :class:`.UnmappedClassError` if no mapping is configured on the given class, or :class:`.ArgumentError` if a non-class object is passed. Equivalent functionality is available via the :func:`.inspect` function as:: inspect(some_mapped_class) Using the inspection system will raise :class:`sqlalchemy.exc.NoInspectionAvailable` if the class is not mapped. """ mapper = _inspect_mapped_class(class_, configure=configure) if mapper is None: if not isinstance(class_, type): raise sa_exc.ArgumentError( "Class object expected, got '%r'." % class_) raise exc.UnmappedClassError(class_) else: return mapper def _class_to_mapper(class_or_mapper): insp = inspection.inspect(class_or_mapper, False) if insp is not None: return insp.mapper else: raise exc.UnmappedClassError(class_or_mapper) def _mapper_or_none(entity): """Return the :class:`.Mapper` for the given class or None if the class is not mapped.""" insp = inspection.inspect(entity, False) if insp is not None: return insp.mapper else: return None def _is_mapped_class(entity): """Return True if the given object is a mapped class, :class:`.Mapper`, or :class:`.AliasedClass`.""" insp = inspection.inspect(entity, False) return insp is not None and \ hasattr(insp, "mapper") and \ ( insp.is_mapper or insp.is_aliased_class ) def _is_aliased_class(entity): insp = inspection.inspect(entity, False) return insp is not None and \ getattr(insp, "is_aliased_class", False) def _entity_descriptor(entity, key): """Return a class attribute given an entity and string name. May return :class:`.InstrumentedAttribute` or user-defined attribute. """ insp = inspection.inspect(entity) if insp.is_selectable: description = entity entity = insp.c elif insp.is_aliased_class: entity = insp.entity description = entity elif hasattr(insp, "mapper"): description = entity = insp.mapper.class_ else: description = entity try: return getattr(entity, key) except AttributeError: raise sa_exc.InvalidRequestError( "Entity '%s' has no property '%s'" % (description, key) ) def _orm_columns(entity): insp = inspection.inspect(entity, False) if hasattr(insp, 'selectable'): return [c for c in insp.selectable.c] else: return [entity] def has_identity(object): """Return True if the given object has a database identity. This typically corresponds to the object being in either the persistent or detached state. .. seealso:: :func:`.was_deleted` """ state = attributes.instance_state(object) return state.has_identity def was_deleted(object): """Return True if the given object was deleted within a session flush. .. versionadded:: 0.8.0 """ state = attributes.instance_state(object) return state.deleted def instance_str(instance): """Return a string describing an instance.""" return state_str(attributes.instance_state(instance)) def state_str(state): """Return a string describing an instance via its InstanceState.""" if state is None: return "None" else: return '<%s at 0x%x>' % (state.class_.__name__, id(state.obj())) def state_class_str(state): """Return a string describing an instance's class via its InstanceState.""" if state is None: return "None" else: return '<%s>' % (state.class_.__name__, ) def attribute_str(instance, attribute): return instance_str(instance) + "." + attribute def state_attribute_str(state, attribute): return state_str(state) + "." + attribute def randomize_unitofwork(): """Use random-ordering sets within the unit of work in order to detect unit of work sorting issues. This is a utility function that can be used to help reproduce inconsistent unit of work sorting issues. For example, if two kinds of objects A and B are being inserted, and B has a foreign key reference to A - the A must be inserted first. However, if there is no relationship between A and B, the unit of work won't know to perform this sorting, and an operation may or may not fail, depending on how the ordering works out. Since Python sets and dictionaries have non-deterministic ordering, such an issue may occur on some runs and not on others, and in practice it tends to have a great dependence on the state of the interpreter. This leads to so-called "heisenbugs" where changing entirely irrelevant aspects of the test program still cause the failure behavior to change. By calling ``randomize_unitofwork()`` when a script first runs, the ordering of a key series of sets within the unit of work implementation are randomized, so that the script can be minimized down to the fundamental mapping and operation that's failing, while still reproducing the issue on at least some runs. This utility is also available when running the test suite via the ``--reversetop`` flag. .. versionadded:: 0.8.1 created a standalone version of the ``--reversetop`` feature. """ from sqlalchemy.orm import unitofwork, session, mapper, dependency from sqlalchemy.util import topological from sqlalchemy.testing.util import RandomSet topological.set = unitofwork.set = session.set = mapper.set = \ dependency.set = RandomSet SQLAlchemy-0.8.4/lib/sqlalchemy/pool.py0000644000076500000240000011027612251150015020463 0ustar classicstaff00000000000000# sqlalchemy/pool.py # Copyright (C) 2005-2013 the SQLAlchemy authors and contributors # # This module is part of SQLAlchemy and is released under # the MIT License: http://www.opensource.org/licenses/mit-license.php """Connection pooling for DB-API connections. Provides a number of connection pool implementations for a variety of usage scenarios and thread behavior requirements imposed by the application, DB-API or database itself. Also provides a DB-API 2.0 connection proxying mechanism allowing regular DB-API connect() methods to be transparently managed by a SQLAlchemy connection pool. """ from __future__ import with_statement import time import traceback import weakref from . import exc, log, event, events, interfaces, util from .util import queue as sqla_queue from .util import threading, memoized_property, \ chop_traceback proxies = {} def manage(module, **params): """Return a proxy for a DB-API module that automatically pools connections. Given a DB-API 2.0 module and pool management parameters, returns a proxy for the module that will automatically pool connections, creating new connection pools for each distinct set of connection arguments sent to the decorated module's connect() function. :param module: a DB-API 2.0 database module :param poolclass: the class used by the pool module to provide pooling. Defaults to :class:`.QueuePool`. :param \*\*params: will be passed through to *poolclass* """ try: return proxies[module] except KeyError: return proxies.setdefault(module, _DBProxy(module, **params)) def clear_managers(): """Remove all current DB-API 2.0 managers. All pools and connections are disposed. """ for manager in proxies.itervalues(): manager.close() proxies.clear() reset_rollback = util.symbol('reset_rollback') reset_commit = util.symbol('reset_commit') reset_none = util.symbol('reset_none') class _ConnDialect(object): """partial implementation of :class:`.Dialect` which provides DBAPI connection methods. When a :class:`.Pool` is combined with an :class:`.Engine`, the :class:`.Engine` replaces this with its own :class:`.Dialect`. """ def do_rollback(self, dbapi_connection): dbapi_connection.rollback() def do_commit(self, dbapi_connection): dbapi_connection.commit() def do_close(self, dbapi_connection): dbapi_connection.close() class Pool(log.Identified): """Abstract base class for connection pools.""" _dialect = _ConnDialect() def __init__(self, creator, recycle=-1, echo=None, use_threadlocal=False, logging_name=None, reset_on_return=True, listeners=None, events=None, _dispatch=None, _dialect=None): """ Construct a Pool. :param creator: a callable function that returns a DB-API connection object. The function will be called with parameters. :param recycle: If set to non -1, number of seconds between connection recycling, which means upon checkout, if this timeout is surpassed the connection will be closed and replaced with a newly opened connection. Defaults to -1. :param logging_name: String identifier which will be used within the "name" field of logging records generated within the "sqlalchemy.pool" logger. Defaults to a hexstring of the object's id. :param echo: If True, connections being pulled and retrieved from the pool will be logged to the standard output, as well as pool sizing information. Echoing can also be achieved by enabling logging for the "sqlalchemy.pool" namespace. Defaults to False. :param use_threadlocal: If set to True, repeated calls to :meth:`connect` within the same application thread will be guaranteed to return the same connection object, if one has already been retrieved from the pool and has not been returned yet. Offers a slight performance advantage at the cost of individual transactions by default. The :meth:`unique_connection` method is provided to bypass the threadlocal behavior installed into :meth:`connect`. :param reset_on_return: Configures the action to take on connections as they are returned to the pool. See the argument description in :class:`.QueuePool` for more detail. :param events: a list of 2-tuples, each of the form ``(callable, target)`` which will be passed to event.listen() upon construction. Provided here so that event listeners can be assigned via ``create_engine`` before dialect-level listeners are applied. :param listeners: Deprecated. A list of :class:`~sqlalchemy.interfaces.PoolListener`-like objects or dictionaries of callables that receive events when DB-API connections are created, checked out and checked in to the pool. This has been superseded by :func:`~sqlalchemy.event.listen`. """ if logging_name: self.logging_name = self._orig_logging_name = logging_name else: self._orig_logging_name = None log.instance_logger(self, echoflag=echo) self._threadconns = threading.local() self._creator = creator self._recycle = recycle self._use_threadlocal = use_threadlocal if reset_on_return in ('rollback', True, reset_rollback): self._reset_on_return = reset_rollback elif reset_on_return in (None, False, reset_none): self._reset_on_return = reset_none elif reset_on_return in ('commit', reset_commit): self._reset_on_return = reset_commit else: raise exc.ArgumentError( "Invalid value for 'reset_on_return': %r" % reset_on_return) self.echo = echo if _dispatch: self.dispatch._update(_dispatch, only_propagate=False) if _dialect: self._dialect = _dialect if events: for fn, target in events: event.listen(self, target, fn) if listeners: util.warn_deprecated( "The 'listeners' argument to Pool (and " "create_engine()) is deprecated. Use event.listen().") for l in listeners: self.add_listener(l) dispatch = event.dispatcher(events.PoolEvents) def _close_connection(self, connection): self.logger.debug("Closing connection %r", connection) try: self._dialect.do_close(connection) except (SystemExit, KeyboardInterrupt): raise except: self.logger.debug("Exception closing connection %r", connection) @util.deprecated( 2.7, "Pool.add_listener is deprecated. Use event.listen()") def add_listener(self, listener): """Add a :class:`.PoolListener`-like object to this pool. ``listener`` may be an object that implements some or all of PoolListener, or a dictionary of callables containing implementations of some or all of the named methods in PoolListener. """ interfaces.PoolListener._adapt_listener(self, listener) def unique_connection(self): """Produce a DBAPI connection that is not referenced by any thread-local context. This method is different from :meth:`.Pool.connect` only if the ``use_threadlocal`` flag has been set to ``True``. """ return _ConnectionFairy(self).checkout() def _create_connection(self): """Called by subclasses to create a new ConnectionRecord.""" return _ConnectionRecord(self) def recreate(self): """Return a new :class:`.Pool`, of the same class as this one and configured with identical creation arguments. This method is used in conjunection with :meth:`dispose` to close out an entire :class:`.Pool` and create a new one in its place. """ raise NotImplementedError() def dispose(self): """Dispose of this pool. This method leaves the possibility of checked-out connections remaining open, as it only affects connections that are idle in the pool. See also the :meth:`Pool.recreate` method. """ raise NotImplementedError() def _replace(self): """Dispose + recreate this pool. Subclasses may employ special logic to move threads waiting on this pool to the new one. """ self.dispose() return self.recreate() def connect(self): """Return a DBAPI connection from the pool. The connection is instrumented such that when its ``close()`` method is called, the connection will be returned to the pool. """ if not self._use_threadlocal: return _ConnectionFairy(self).checkout() try: rec = self._threadconns.current() if rec: return rec.checkout() except AttributeError: pass agent = _ConnectionFairy(self) self._threadconns.current = weakref.ref(agent) return agent.checkout() def _return_conn(self, record): """Given a _ConnectionRecord, return it to the :class:`.Pool`. This method is called when an instrumented DBAPI connection has its ``close()`` method called. """ if self._use_threadlocal: try: del self._threadconns.current except AttributeError: pass self._do_return_conn(record) def _do_get(self): """Implementation for :meth:`get`, supplied by subclasses.""" raise NotImplementedError() def _do_return_conn(self, conn): """Implementation for :meth:`return_conn`, supplied by subclasses.""" raise NotImplementedError() def status(self): raise NotImplementedError() class _ConnectionRecord(object): finalize_callback = None def __init__(self, pool): self.__pool = pool self.connection = self.__connect() pool.dispatch.first_connect.\ for_modify(pool.dispatch).\ exec_once(self.connection, self) pool.dispatch.connect(self.connection, self) @util.memoized_property def info(self): return {} def close(self): if self.connection is not None: self.__pool._close_connection(self.connection) def invalidate(self, e=None): if e is not None: self.__pool.logger.info( "Invalidate connection %r (reason: %s:%s)", self.connection, e.__class__.__name__, e) else: self.__pool.logger.info( "Invalidate connection %r", self.connection) self.__close() self.connection = None def get_connection(self): if self.connection is None: self.connection = self.__connect() self.info.clear() if self.__pool.dispatch.connect: self.__pool.dispatch.connect(self.connection, self) elif self.__pool._recycle > -1 and \ time.time() - self.starttime > self.__pool._recycle: self.__pool.logger.info( "Connection %r exceeded timeout; recycling", self.connection) self.__close() self.connection = self.__connect() self.info.clear() if self.__pool.dispatch.connect: self.__pool.dispatch.connect(self.connection, self) return self.connection def checkin(self): self.fairy = None connection = self.connection pool = self.__pool if self.finalize_callback: self.finalize_callback(connection) del self.finalize_callback if pool.dispatch.checkin: pool.dispatch.checkin(connection, self) pool._return_conn(self) def __close(self): self.__pool._close_connection(self.connection) def __connect(self): try: self.starttime = time.time() connection = self.__pool._creator() self.__pool.logger.debug("Created new connection %r", connection) return connection except Exception, e: self.__pool.logger.debug("Error on connect(): %s", e) raise def _finalize_fairy(connection, connection_record, pool, ref, echo): _refs.discard(connection_record) if ref is not None and \ connection_record.fairy is not ref: return if connection_record and echo: pool.logger.debug("Connection %r being returned to pool", connection) if connection is not None: try: if pool.dispatch.reset: pool.dispatch.reset(connection, connection_record) if pool._reset_on_return is reset_rollback: pool._dialect.do_rollback(connection) elif pool._reset_on_return is reset_commit: pool._dialect.do_commit(connection) # Immediately close detached instances if connection_record is None: pool._close_connection(connection) except Exception, e: if connection_record is not None: connection_record.invalidate(e=e) if isinstance(e, (SystemExit, KeyboardInterrupt)): raise if connection_record: connection_record.checkin() _refs = set() class _ConnectionFairy(object): """Proxies a DB-API connection and provides return-on-dereference support.""" def __init__(self, pool): self._pool = pool self.__counter = 0 self._echo = _echo = pool._should_log_debug() try: rec = self._connection_record = pool._do_get() try: conn = self.connection = self._connection_record.get_connection() except: self._connection_record.checkin() raise rec.fairy = weakref.ref( self, lambda ref: _finalize_fairy and \ _finalize_fairy(conn, rec, pool, ref, _echo) ) _refs.add(rec) except: # helps with endless __getattr__ loops later on self.connection = None self._connection_record = None raise if self._echo: self._pool.logger.debug("Connection %r checked out from pool" % self.connection) @property def _logger(self): return self._pool.logger @property def is_valid(self): return self.connection is not None @util.memoized_property def info(self): """Info dictionary associated with the underlying DBAPI connection referred to by this :class:`.ConnectionFairy`, allowing user-defined data to be associated with the connection. The data here will follow along with the DBAPI connection including after it is returned to the connection pool and used again in subsequent instances of :class:`.ConnectionFairy`. """ try: return self._connection_record.info except AttributeError: raise exc.InvalidRequestError("This connection is closed") def invalidate(self, e=None): """Mark this connection as invalidated. The connection will be immediately closed. The containing ConnectionRecord will create a new connection when next used. """ if self.connection is None: raise exc.InvalidRequestError("This connection is closed") if self._connection_record is not None: self._connection_record.invalidate(e=e) self.connection = None self._close() def cursor(self, *args, **kwargs): return self.connection.cursor(*args, **kwargs) def __getattr__(self, key): return getattr(self.connection, key) def checkout(self): if self.connection is None: raise exc.InvalidRequestError("This connection is closed") self.__counter += 1 if not self._pool.dispatch.checkout or self.__counter != 1: return self # Pool listeners can trigger a reconnection on checkout attempts = 2 while attempts > 0: try: self._pool.dispatch.checkout(self.connection, self._connection_record, self) return self except exc.DisconnectionError, e: self._pool.logger.info( "Disconnection detected on checkout: %s", e) self._connection_record.invalidate(e) self.connection = self._connection_record.get_connection() attempts -= 1 self._pool.logger.info("Reconnection attempts exhausted on checkout") self.invalidate() raise exc.InvalidRequestError("This connection is closed") def detach(self): """Separate this connection from its Pool. This means that the connection will no longer be returned to the pool when closed, and will instead be literally closed. The containing ConnectionRecord is separated from the DB-API connection, and will create a new connection when next used. Note that any overall connection limiting constraints imposed by a Pool implementation may be violated after a detach, as the detached connection is removed from the pool's knowledge and control. """ if self._connection_record is not None: _refs.remove(self._connection_record) self._connection_record.fairy = None self._connection_record.connection = None self._pool._do_return_conn(self._connection_record) self.info = self.info.copy() self._connection_record = None def close(self): self.__counter -= 1 if self.__counter == 0: self._close() def _close(self): _finalize_fairy(self.connection, self._connection_record, self._pool, None, self._echo) self.connection = None self._connection_record = None class SingletonThreadPool(Pool): """A Pool that maintains one connection per thread. Maintains one connection per each thread, never moving a connection to a thread other than the one which it was created in. Options are the same as those of :class:`.Pool`, as well as: :param pool_size: The number of threads in which to maintain connections at once. Defaults to five. :class:`.SingletonThreadPool` is used by the SQLite dialect automatically when a memory-based database is used. See :ref:`sqlite_toplevel`. """ def __init__(self, creator, pool_size=5, **kw): kw['use_threadlocal'] = True Pool.__init__(self, creator, **kw) self._conn = threading.local() self._all_conns = set() self.size = pool_size def recreate(self): self.logger.info("Pool recreating") return self.__class__(self._creator, pool_size=self.size, recycle=self._recycle, echo=self.echo, logging_name=self._orig_logging_name, use_threadlocal=self._use_threadlocal, reset_on_return=self._reset_on_return, _dispatch=self.dispatch, _dialect=self._dialect) def dispose(self): """Dispose of this pool.""" for conn in self._all_conns: try: conn.close() except (SystemExit, KeyboardInterrupt): raise except: # pysqlite won't even let you close a conn from a thread # that didn't create it pass self._all_conns.clear() def _cleanup(self): while len(self._all_conns) > self.size: c = self._all_conns.pop() c.close() def status(self): return "SingletonThreadPool id:%d size: %d" % \ (id(self), len(self._all_conns)) def _do_return_conn(self, conn): pass def _do_get(self): try: c = self._conn.current() if c: return c except AttributeError: pass c = self._create_connection() self._conn.current = weakref.ref(c) self._all_conns.add(c) if len(self._all_conns) > self.size: self._cleanup() return c class QueuePool(Pool): """A :class:`.Pool` that imposes a limit on the number of open connections. :class:`.QueuePool` is the default pooling implementation used for all :class:`.Engine` objects, unless the SQLite dialect is in use. """ def __init__(self, creator, pool_size=5, max_overflow=10, timeout=30, **kw): """ Construct a QueuePool. :param creator: a callable function that returns a DB-API connection object. The function will be called with parameters. :param pool_size: The size of the pool to be maintained, defaults to 5. This is the largest number of connections that will be kept persistently in the pool. Note that the pool begins with no connections; once this number of connections is requested, that number of connections will remain. ``pool_size`` can be set to 0 to indicate no size limit; to disable pooling, use a :class:`~sqlalchemy.pool.NullPool` instead. :param max_overflow: The maximum overflow size of the pool. When the number of checked-out connections reaches the size set in pool_size, additional connections will be returned up to this limit. When those additional connections are returned to the pool, they are disconnected and discarded. It follows then that the total number of simultaneous connections the pool will allow is pool_size + `max_overflow`, and the total number of "sleeping" connections the pool will allow is pool_size. `max_overflow` can be set to -1 to indicate no overflow limit; no limit will be placed on the total number of concurrent connections. Defaults to 10. :param timeout: The number of seconds to wait before giving up on returning a connection. Defaults to 30. :param recycle: If set to non -1, number of seconds between connection recycling, which means upon checkout, if this timeout is surpassed the connection will be closed and replaced with a newly opened connection. Defaults to -1. :param echo: If True, connections being pulled and retrieved from the pool will be logged to the standard output, as well as pool sizing information. Echoing can also be achieved by enabling logging for the "sqlalchemy.pool" namespace. Defaults to False. :param use_threadlocal: If set to True, repeated calls to :meth:`connect` within the same application thread will be guaranteed to return the same connection object, if one has already been retrieved from the pool and has not been returned yet. Offers a slight performance advantage at the cost of individual transactions by default. The :meth:`unique_connection` method is provided to bypass the threadlocal behavior installed into :meth:`connect`. :param reset_on_return: Determine steps to take on connections as they are returned to the pool. reset_on_return can have any of these values: * 'rollback' - call rollback() on the connection, to release locks and transaction resources. This is the default value. The vast majority of use cases should leave this value set. * True - same as 'rollback', this is here for backwards compatibility. * 'commit' - call commit() on the connection, to release locks and transaction resources. A commit here may be desirable for databases that cache query plans if a commit is emitted, such as Microsoft SQL Server. However, this value is more dangerous than 'rollback' because any data changes present on the transaction are committed unconditionally. * None - don't do anything on the connection. This setting should only be made on a database that has no transaction support at all, namely MySQL MyISAM. By not doing anything, performance can be improved. This setting should **never be selected** for a database that supports transactions, as it will lead to deadlocks and stale state. * False - same as None, this is here for backwards compatibility. .. versionchanged:: 0.7.6 ``reset_on_return`` accepts values. :param listeners: A list of :class:`~sqlalchemy.interfaces.PoolListener`-like objects or dictionaries of callables that receive events when DB-API connections are created, checked out and checked in to the pool. """ Pool.__init__(self, creator, **kw) self._pool = sqla_queue.Queue(pool_size) self._overflow = 0 - pool_size self._max_overflow = max_overflow self._timeout = timeout self._overflow_lock = threading.Lock() def _do_return_conn(self, conn): try: self._pool.put(conn, False) except sqla_queue.Full: try: conn.close() finally: self._dec_overflow() def _do_get(self): use_overflow = self._max_overflow > -1 try: wait = use_overflow and self._overflow >= self._max_overflow return self._pool.get(wait, self._timeout) except sqla_queue.SAAbort, aborted: return aborted.context._do_get() except sqla_queue.Empty: if use_overflow and self._overflow >= self._max_overflow: if not wait: return self._do_get() else: raise exc.TimeoutError( "QueuePool limit of size %d overflow %d reached, " "connection timed out, timeout %d" % (self.size(), self.overflow(), self._timeout)) if self._inc_overflow(): try: return self._create_connection() except: self._dec_overflow() raise else: return self._do_get() def _inc_overflow(self): if self._max_overflow == -1: self._overflow += 1 return True with self._overflow_lock: if self._overflow < self._max_overflow: self._overflow += 1 return True else: return False def _dec_overflow(self): if self._max_overflow == -1: self._overflow -= 1 return True with self._overflow_lock: self._overflow -= 1 return True def recreate(self): self.logger.info("Pool recreating") return self.__class__(self._creator, pool_size=self._pool.maxsize, max_overflow=self._max_overflow, timeout=self._timeout, recycle=self._recycle, echo=self.echo, logging_name=self._orig_logging_name, use_threadlocal=self._use_threadlocal, reset_on_return=self._reset_on_return, _dispatch=self.dispatch, _dialect=self._dialect) def dispose(self): while True: try: conn = self._pool.get(False) conn.close() except sqla_queue.Empty: break self._overflow = 0 - self.size() self.logger.info("Pool disposed. %s", self.status()) def _replace(self): self.dispose() np = self.recreate() self._pool.abort(np) return np def status(self): return "Pool size: %d Connections in pool: %d "\ "Current Overflow: %d Current Checked out "\ "connections: %d" % (self.size(), self.checkedin(), self.overflow(), self.checkedout()) def size(self): return self._pool.maxsize def checkedin(self): return self._pool.qsize() def overflow(self): return self._overflow def checkedout(self): return self._pool.maxsize - self._pool.qsize() + self._overflow class NullPool(Pool): """A Pool which does not pool connections. Instead it literally opens and closes the underlying DB-API connection per each connection open/close. Reconnect-related functions such as ``recycle`` and connection invalidation are not supported by this Pool implementation, since no connections are held persistently. .. versionchanged:: 0.7 :class:`.NullPool` is used by the SQlite dialect automatically when a file-based database is used. See :ref:`sqlite_toplevel`. """ def status(self): return "NullPool" def _do_return_conn(self, conn): conn.close() def _do_get(self): return self._create_connection() def recreate(self): self.logger.info("Pool recreating") return self.__class__(self._creator, recycle=self._recycle, echo=self.echo, logging_name=self._orig_logging_name, use_threadlocal=self._use_threadlocal, reset_on_return=self._reset_on_return, _dispatch=self.dispatch, _dialect=self._dialect) def dispose(self): pass class StaticPool(Pool): """A Pool of exactly one connection, used for all requests. Reconnect-related functions such as ``recycle`` and connection invalidation (which is also used to support auto-reconnect) are not currently supported by this Pool implementation but may be implemented in a future release. """ @memoized_property def _conn(self): return self._creator() @memoized_property def connection(self): return _ConnectionRecord(self) def status(self): return "StaticPool" def dispose(self): if '_conn' in self.__dict__: self._conn.close() self._conn = None def recreate(self): self.logger.info("Pool recreating") return self.__class__(creator=self._creator, recycle=self._recycle, use_threadlocal=self._use_threadlocal, reset_on_return=self._reset_on_return, echo=self.echo, logging_name=self._orig_logging_name, _dispatch=self.dispatch, _dialect=self._dialect) def _create_connection(self): return self._conn def _do_return_conn(self, conn): pass def _do_get(self): return self.connection class AssertionPool(Pool): """A :class:`.Pool` that allows at most one checked out connection at any given time. This will raise an exception if more than one connection is checked out at a time. Useful for debugging code that is using more connections than desired. .. versionchanged:: 0.7 :class:`.AssertionPool` also logs a traceback of where the original connection was checked out, and reports this in the assertion error raised. """ def __init__(self, *args, **kw): self._conn = None self._checked_out = False self._store_traceback = kw.pop('store_traceback', True) self._checkout_traceback = None Pool.__init__(self, *args, **kw) def status(self): return "AssertionPool" def _do_return_conn(self, conn): if not self._checked_out: raise AssertionError("connection is not checked out") self._checked_out = False assert conn is self._conn def dispose(self): self._checked_out = False if self._conn: self._conn.close() def recreate(self): self.logger.info("Pool recreating") return self.__class__(self._creator, echo=self.echo, logging_name=self._orig_logging_name, _dispatch=self.dispatch, _dialect=self._dialect) def _do_get(self): if self._checked_out: if self._checkout_traceback: suffix = ' at:\n%s' % ''.join( chop_traceback(self._checkout_traceback)) else: suffix = '' raise AssertionError("connection is already checked out" + suffix) if not self._conn: self._conn = self._create_connection() self._checked_out = True if self._store_traceback: self._checkout_traceback = traceback.format_stack() return self._conn class _DBProxy(object): """Layers connection pooling behavior on top of a standard DB-API module. Proxies a DB-API 2.0 connect() call to a connection pool keyed to the specific connect parameters. Other functions and attributes are delegated to the underlying DB-API module. """ def __init__(self, module, poolclass=QueuePool, **kw): """Initializes a new proxy. module a DB-API 2.0 module poolclass a Pool class, defaulting to QueuePool Other parameters are sent to the Pool object's constructor. """ self.module = module self.kw = kw self.poolclass = poolclass self.pools = {} self._create_pool_mutex = threading.Lock() def close(self): for key in self.pools.keys(): del self.pools[key] def __del__(self): self.close() def __getattr__(self, key): return getattr(self.module, key) def get_pool(self, *args, **kw): key = self._serialize(*args, **kw) try: return self.pools[key] except KeyError: self._create_pool_mutex.acquire() try: if key not in self.pools: kw.pop('sa_pool_key', None) pool = self.poolclass(lambda: self.module.connect(*args, **kw), **self.kw) self.pools[key] = pool return pool else: return self.pools[key] finally: self._create_pool_mutex.release() def connect(self, *args, **kw): """Activate a connection to the database. Connect to the database using this DBProxy's module and the given connect arguments. If the arguments match an existing pool, the connection will be returned from the pool's current thread-local connection instance, or if there is no thread-local connection instance it will be checked out from the set of pooled connections. If the pool has no available connections and allows new connections to be created, a new database connection will be made. """ return self.get_pool(*args, **kw).connect() def dispose(self, *args, **kw): """Dispose the pool referenced by the given connect arguments.""" key = self._serialize(*args, **kw) try: del self.pools[key] except KeyError: pass def _serialize(self, *args, **kw): if "sa_pool_key" in kw: return kw['sa_pool_key'] return tuple( list(args) + [(k, kw[k]) for k in sorted(kw)] ) SQLAlchemy-0.8.4/lib/sqlalchemy/processors.py0000644000076500000240000001065712251150015021716 0ustar classicstaff00000000000000# sqlalchemy/processors.py # Copyright (C) 2010-2013 the SQLAlchemy authors and contributors # Copyright (C) 2010 Gaetan de Menten gdementen@gmail.com # # This module is part of SQLAlchemy and is released under # the MIT License: http://www.opensource.org/licenses/mit-license.php """defines generic type conversion functions, as used in bind and result processors. They all share one common characteristic: None is passed through unchanged. """ import codecs import re import datetime def str_to_datetime_processor_factory(regexp, type_): rmatch = regexp.match # Even on python2.6 datetime.strptime is both slower than this code # and it does not support microseconds. has_named_groups = bool(regexp.groupindex) def process(value): if value is None: return None else: try: m = rmatch(value) except TypeError: raise ValueError("Couldn't parse %s string '%r' " "- value is not a string." % (type_.__name__, value)) if m is None: raise ValueError("Couldn't parse %s string: " "'%s'" % (type_.__name__, value)) if has_named_groups: groups = m.groupdict(0) return type_(**dict(zip(groups.iterkeys(), map(int, groups.itervalues())))) else: return type_(*map(int, m.groups(0))) return process def boolean_to_int(value): if value is None: return None else: return int(value) def py_fallback(): def to_unicode_processor_factory(encoding, errors=None): decoder = codecs.getdecoder(encoding) def process(value): if value is None: return None else: # decoder returns a tuple: (value, len). Simply dropping the # len part is safe: it is done that way in the normal # 'xx'.decode(encoding) code path. return decoder(value, errors)[0] return process def to_decimal_processor_factory(target_class, scale=10): fstring = "%%.%df" % scale def process(value): if value is None: return None else: return target_class(fstring % value) return process def to_float(value): if value is None: return None else: return float(value) def to_str(value): if value is None: return None else: return str(value) def int_to_boolean(value): if value is None: return None else: return value and True or False DATETIME_RE = re.compile( "(\d+)-(\d+)-(\d+) (\d+):(\d+):(\d+)(?:\.(\d+))?") TIME_RE = re.compile("(\d+):(\d+):(\d+)(?:\.(\d+))?") DATE_RE = re.compile("(\d+)-(\d+)-(\d+)") str_to_datetime = str_to_datetime_processor_factory(DATETIME_RE, datetime.datetime) str_to_time = str_to_datetime_processor_factory(TIME_RE, datetime.time) str_to_date = str_to_datetime_processor_factory(DATE_RE, datetime.date) return locals() try: from sqlalchemy.cprocessors import UnicodeResultProcessor, \ DecimalResultProcessor, \ to_float, to_str, int_to_boolean, \ str_to_datetime, str_to_time, \ str_to_date def to_unicode_processor_factory(encoding, errors=None): # this is cumbersome but it would be even more so on the C side if errors is not None: return UnicodeResultProcessor(encoding, errors).process else: return UnicodeResultProcessor(encoding).process def to_decimal_processor_factory(target_class, scale=10): # Note that the scale argument is not taken into account for integer # values in the C implementation while it is in the Python one. # For example, the Python implementation might return # Decimal('5.00000') whereas the C implementation will # return Decimal('5'). These are equivalent of course. return DecimalResultProcessor(target_class, "%%.%df" % scale).process except ImportError: globals().update(py_fallback()) SQLAlchemy-0.8.4/lib/sqlalchemy/schema.py0000644000076500000240000040706212251150015020754 0ustar classicstaff00000000000000# sqlalchemy/schema.py # Copyright (C) 2005-2013 the SQLAlchemy authors and contributors # # This module is part of SQLAlchemy and is released under # the MIT License: http://www.opensource.org/licenses/mit-license.php """The schema module provides the building blocks for database metadata. Each element within this module describes a database entity which can be created and dropped, or is otherwise part of such an entity. Examples include tables, columns, sequences, and indexes. All entities are subclasses of :class:`~sqlalchemy.schema.SchemaItem`, and as defined in this module they are intended to be agnostic of any vendor-specific constructs. A collection of entities are grouped into a unit called :class:`~sqlalchemy.schema.MetaData`. MetaData serves as a logical grouping of schema elements, and can also be associated with an actual database connection such that operations involving the contained elements can contact the database as needed. Two of the elements here also build upon their "syntactic" counterparts, which are defined in :class:`~sqlalchemy.sql.expression.`, specifically :class:`~sqlalchemy.schema.Table` and :class:`~sqlalchemy.schema.Column`. Since these objects are part of the SQL expression language, they are usable as components in SQL expressions. """ from __future__ import with_statement import re import inspect from . import exc, util, dialects, event, events, inspection from .sql import expression, visitors ddl = util.importlater("sqlalchemy.engine", "ddl") sqlutil = util.importlater("sqlalchemy.sql", "util") url = util.importlater("sqlalchemy.engine", "url") sqltypes = util.importlater("sqlalchemy", "types") __all__ = ['SchemaItem', 'Table', 'Column', 'ForeignKey', 'Sequence', 'Index', 'ForeignKeyConstraint', 'PrimaryKeyConstraint', 'CheckConstraint', 'UniqueConstraint', 'DefaultGenerator', 'Constraint', 'MetaData', 'ThreadLocalMetaData', 'SchemaVisitor', 'PassiveDefault', 'DefaultClause', 'FetchedValue', 'ColumnDefault', 'DDL', 'CreateTable', 'DropTable', 'CreateSequence', 'DropSequence', 'AddConstraint', 'DropConstraint', ] __all__.sort() RETAIN_SCHEMA = util.symbol('retain_schema') class SchemaItem(events.SchemaEventTarget, visitors.Visitable): """Base class for items that define a database schema.""" __visit_name__ = 'schema_item' quote = None def _init_items(self, *args): """Initialize the list of child items for this SchemaItem.""" for item in args: if item is not None: item._set_parent_with_dispatch(self) def get_children(self, **kwargs): """used to allow SchemaVisitor access""" return [] def __repr__(self): return util.generic_repr(self) @util.memoized_property def info(self): """Info dictionary associated with the object, allowing user-defined data to be associated with this :class:`.SchemaItem`. The dictionary is automatically generated when first accessed. It can also be specified in the constructor of some objects, such as :class:`.Table` and :class:`.Column`. """ return {} def _get_table_key(name, schema): if schema is None: return name else: return schema + "." + name def _validate_dialect_kwargs(kwargs, name): # validate remaining kwargs that they all specify DB prefixes for k in kwargs: m = re.match('^(.+?)_.*', k) if m is None: raise TypeError("Additional arguments should be " "named _, got '%s'" % k) inspection._self_inspects(SchemaItem) class Table(SchemaItem, expression.TableClause): """Represent a table in a database. e.g.:: mytable = Table("mytable", metadata, Column('mytable_id', Integer, primary_key=True), Column('value', String(50)) ) The :class:`.Table` object constructs a unique instance of itself based on its name and optional schema name within the given :class:`.MetaData` object. Calling the :class:`.Table` constructor with the same name and same :class:`.MetaData` argument a second time will return the *same* :class:`.Table` object - in this way the :class:`.Table` constructor acts as a registry function. .. seealso:: :ref:`metadata_describing` - Introduction to database metadata Constructor arguments are as follows: :param name: The name of this table as represented in the database. This property, along with the *schema*, indicates the *singleton identity* of this table in relation to its parent :class:`.MetaData`. Additional calls to :class:`.Table` with the same name, metadata, and schema name will return the same :class:`.Table` object. Names which contain no upper case characters will be treated as case insensitive names, and will not be quoted unless they are a reserved word. Names with any number of upper case characters will be quoted and sent exactly. Note that this behavior applies even for databases which standardize upper case names as case insensitive such as Oracle. :param metadata: a :class:`.MetaData` object which will contain this table. The metadata is used as a point of association of this table with other tables which are referenced via foreign key. It also may be used to associate this table with a particular :class:`.Connectable`. :param \*args: Additional positional arguments are used primarily to add the list of :class:`.Column` objects contained within this table. Similar to the style of a CREATE TABLE statement, other :class:`.SchemaItem` constructs may be added here, including :class:`.PrimaryKeyConstraint`, and :class:`.ForeignKeyConstraint`. :param autoload: Defaults to False: the Columns for this table should be reflected from the database. Usually there will be no Column objects in the constructor if this property is set. :param autoload_replace: If ``True``, when using ``autoload=True`` and ``extend_existing=True``, replace ``Column`` objects already present in the ``Table`` that's in the ``MetaData`` registry with what's reflected. Otherwise, all existing columns will be excluded from the reflection process. Note that this does not impact ``Column`` objects specified in the same call to ``Table`` which includes ``autoload``, those always take precedence. Defaults to ``True``. .. versionadded:: 0.7.5 :param autoload_with: If autoload==True, this is an optional Engine or Connection instance to be used for the table reflection. If ``None``, the underlying MetaData's bound connectable will be used. :param extend_existing: When ``True``, indicates that if this :class:`.Table` is already present in the given :class:`.MetaData`, apply further arguments within the constructor to the existing :class:`.Table`. If ``extend_existing`` or ``keep_existing`` are not set, an error is raised if additional table modifiers are specified when the given :class:`.Table` is already present in the :class:`.MetaData`. .. versionchanged:: 0.7.4 ``extend_existing`` will work in conjunction with ``autoload=True`` to run a new reflection operation against the database; new :class:`.Column` objects will be produced from database metadata to replace those existing with the same name, and additional :class:`.Column` objects not present in the :class:`.Table` will be added. As is always the case with ``autoload=True``, :class:`.Column` objects can be specified in the same :class:`.Table` constructor, which will take precedence. I.e.:: Table("mytable", metadata, Column('y', Integer), extend_existing=True, autoload=True, autoload_with=engine ) The above will overwrite all columns within ``mytable`` which are present in the database, except for ``y`` which will be used as is from the above definition. If the ``autoload_replace`` flag is set to False, no existing columns will be replaced. :param implicit_returning: True by default - indicates that RETURNING can be used by default to fetch newly inserted primary key values, for backends which support this. Note that create_engine() also provides an implicit_returning flag. :param include_columns: A list of strings indicating a subset of columns to be loaded via the ``autoload`` operation; table columns who aren't present in this list will not be represented on the resulting ``Table`` object. Defaults to ``None`` which indicates all columns should be reflected. :param info: Optional data dictionary which will be populated into the :attr:`.SchemaItem.info` attribute of this object. :param keep_existing: When ``True``, indicates that if this Table is already present in the given :class:`.MetaData`, ignore further arguments within the constructor to the existing :class:`.Table`, and return the :class:`.Table` object as originally created. This is to allow a function that wishes to define a new :class:`.Table` on first call, but on subsequent calls will return the same :class:`.Table`, without any of the declarations (particularly constraints) being applied a second time. Also see extend_existing. If extend_existing or keep_existing are not set, an error is raised if additional table modifiers are specified when the given :class:`.Table` is already present in the :class:`.MetaData`. :param listeners: A list of tuples of the form ``(, )`` which will be passed to :func:`.event.listen` upon construction. This alternate hook to :func:`.event.listen` allows the establishment of a listener function specific to this :class:`.Table` before the "autoload" process begins. Particularly useful for the :meth:`.DDLEvents.column_reflect` event:: def listen_for_reflect(table, column_info): "handle the column reflection event" # ... t = Table( 'sometable', autoload=True, listeners=[ ('column_reflect', listen_for_reflect) ]) :param mustexist: When ``True``, indicates that this Table must already be present in the given :class:`.MetaData` collection, else an exception is raised. :param prefixes: A list of strings to insert after CREATE in the CREATE TABLE statement. They will be separated by spaces. :param quote: Force quoting of this table's name on or off, corresponding to ``True`` or ``False``. When left at its default of ``None``, the column identifier will be quoted according to whether the name is case sensitive (identifiers with at least one upper case character are treated as case sensitive), or if it's a reserved word. This flag is only needed to force quoting of a reserved word which is not known by the SQLAlchemy dialect. :param quote_schema: same as 'quote' but applies to the schema identifier. :param schema: The *schema name* for this table, which is required if the table resides in a schema other than the default selected schema for the engine's database connection. Defaults to ``None``. :param useexisting: Deprecated. Use extend_existing. """ __visit_name__ = 'table' def __new__(cls, *args, **kw): if not args: # python3k pickle seems to call this return object.__new__(cls) try: name, metadata, args = args[0], args[1], args[2:] except IndexError: raise TypeError("Table() takes at least two arguments") schema = kw.get('schema', None) if schema is None: schema = metadata.schema keep_existing = kw.pop('keep_existing', False) extend_existing = kw.pop('extend_existing', False) if 'useexisting' in kw: msg = "useexisting is deprecated. Use extend_existing." util.warn_deprecated(msg) if extend_existing: msg = "useexisting is synonymous with extend_existing." raise exc.ArgumentError(msg) extend_existing = kw.pop('useexisting', False) if keep_existing and extend_existing: msg = "keep_existing and extend_existing are mutually exclusive." raise exc.ArgumentError(msg) mustexist = kw.pop('mustexist', False) key = _get_table_key(name, schema) if key in metadata.tables: if not keep_existing and not extend_existing and bool(args): raise exc.InvalidRequestError( "Table '%s' is already defined for this MetaData " "instance. Specify 'extend_existing=True' " "to redefine " "options and columns on an " "existing Table object." % key) table = metadata.tables[key] if extend_existing: table._init_existing(*args, **kw) return table else: if mustexist: raise exc.InvalidRequestError( "Table '%s' not defined" % (key)) table = object.__new__(cls) table.dispatch.before_parent_attach(table, metadata) metadata._add_table(name, schema, table) try: table._init(name, metadata, *args, **kw) table.dispatch.after_parent_attach(table, metadata) return table except: metadata._remove_table(name, schema) raise def __init__(self, *args, **kw): """Constructor for :class:`~.schema.Table`. This method is a no-op. See the top-level documentation for :class:`~.schema.Table` for constructor arguments. """ # __init__ is overridden to prevent __new__ from # calling the superclass constructor. def _init(self, name, metadata, *args, **kwargs): super(Table, self).__init__(name) self.metadata = metadata self.schema = kwargs.pop('schema', None) if self.schema is None: self.schema = metadata.schema self.quote_schema = kwargs.pop( 'quote_schema', metadata.quote_schema) else: self.quote_schema = kwargs.pop('quote_schema', None) self.indexes = set() self.constraints = set() self._columns = expression.ColumnCollection() PrimaryKeyConstraint()._set_parent_with_dispatch(self) self.foreign_keys = set() self._extra_dependencies = set() self.kwargs = {} if self.schema is not None: self.fullname = "%s.%s" % (self.schema, self.name) else: self.fullname = self.name autoload = kwargs.pop('autoload', False) autoload_with = kwargs.pop('autoload_with', None) # this argument is only used with _init_existing() kwargs.pop('autoload_replace', True) include_columns = kwargs.pop('include_columns', None) self.implicit_returning = kwargs.pop('implicit_returning', True) self.quote = kwargs.pop('quote', None) if 'info' in kwargs: self.info = kwargs.pop('info') if 'listeners' in kwargs: listeners = kwargs.pop('listeners') for evt, fn in listeners: event.listen(self, evt, fn) self._prefixes = kwargs.pop('prefixes', []) self._extra_kwargs(**kwargs) # load column definitions from the database if 'autoload' is defined # we do it after the table is in the singleton dictionary to support # circular foreign keys if autoload: self._autoload(metadata, autoload_with, include_columns) # initialize all the column, etc. objects. done after reflection to # allow user-overrides self._init_items(*args) def _autoload(self, metadata, autoload_with, include_columns, exclude_columns=()): if self.primary_key.columns: PrimaryKeyConstraint(*[ c for c in self.primary_key.columns if c.key in exclude_columns ])._set_parent_with_dispatch(self) if autoload_with: autoload_with.run_callable( autoload_with.dialect.reflecttable, self, include_columns, exclude_columns ) else: bind = _bind_or_error(metadata, msg="No engine is bound to this Table's MetaData. " "Pass an engine to the Table via " "autoload_with=, " "or associate the MetaData with an engine via " "metadata.bind=") bind.run_callable( bind.dialect.reflecttable, self, include_columns, exclude_columns ) @property def _sorted_constraints(self): """Return the set of constraints as a list, sorted by creation order. """ return sorted(self.constraints, key=lambda c: c._creation_order) def _init_existing(self, *args, **kwargs): autoload = kwargs.pop('autoload', False) autoload_with = kwargs.pop('autoload_with', None) autoload_replace = kwargs.pop('autoload_replace', True) schema = kwargs.pop('schema', None) if schema and schema != self.schema: raise exc.ArgumentError( "Can't change schema of existing table from '%s' to '%s'", (self.schema, schema)) include_columns = kwargs.pop('include_columns', None) if include_columns is not None: for c in self.c: if c.name not in include_columns: self._columns.remove(c) for key in ('quote', 'quote_schema'): if key in kwargs: setattr(self, key, kwargs.pop(key)) if 'info' in kwargs: self.info = kwargs.pop('info') if autoload: if not autoload_replace: exclude_columns = [c.name for c in self.c] else: exclude_columns = () self._autoload( self.metadata, autoload_with, include_columns, exclude_columns) self._extra_kwargs(**kwargs) self._init_items(*args) def _extra_kwargs(self, **kwargs): # validate remaining kwargs that they all specify DB prefixes _validate_dialect_kwargs(kwargs, "Table") self.kwargs.update(kwargs) def _init_collections(self): pass @util.memoized_property def _autoincrement_column(self): for col in self.primary_key: if col.autoincrement and \ col.type._type_affinity is not None and \ issubclass(col.type._type_affinity, sqltypes.Integer) and \ (not col.foreign_keys or col.autoincrement == 'ignore_fk') and \ isinstance(col.default, (type(None), Sequence)) and \ (col.server_default is None or col.server_default.reflected): return col @property def key(self): return _get_table_key(self.name, self.schema) def __repr__(self): return "Table(%s)" % ', '.join( [repr(self.name)] + [repr(self.metadata)] + [repr(x) for x in self.columns] + ["%s=%s" % (k, repr(getattr(self, k))) for k in ['schema']]) def __str__(self): return _get_table_key(self.description, self.schema) @property def bind(self): """Return the connectable associated with this Table.""" return self.metadata and self.metadata.bind or None def add_is_dependent_on(self, table): """Add a 'dependency' for this Table. This is another Table object which must be created first before this one can, or dropped after this one. Usually, dependencies between tables are determined via ForeignKey objects. However, for other situations that create dependencies outside of foreign keys (rules, inheriting), this method can manually establish such a link. """ self._extra_dependencies.add(table) def append_column(self, column): """Append a :class:`~.schema.Column` to this :class:`~.schema.Table`. The "key" of the newly added :class:`~.schema.Column`, i.e. the value of its ``.key`` attribute, will then be available in the ``.c`` collection of this :class:`~.schema.Table`, and the column definition will be included in any CREATE TABLE, SELECT, UPDATE, etc. statements generated from this :class:`~.schema.Table` construct. Note that this does **not** change the definition of the table as it exists within any underlying database, assuming that table has already been created in the database. Relational databases support the addition of columns to existing tables using the SQL ALTER command, which would need to be emitted for an already-existing table that doesn't contain the newly added column. """ column._set_parent_with_dispatch(self) def append_constraint(self, constraint): """Append a :class:`~.schema.Constraint` to this :class:`~.schema.Table`. This has the effect of the constraint being included in any future CREATE TABLE statement, assuming specific DDL creation events have not been associated with the given :class:`~.schema.Constraint` object. Note that this does **not** produce the constraint within the relational database automatically, for a table that already exists in the database. To add a constraint to an existing relational database table, the SQL ALTER command must be used. SQLAlchemy also provides the :class:`.AddConstraint` construct which can produce this SQL when invoked as an executable clause. """ constraint._set_parent_with_dispatch(self) def append_ddl_listener(self, event_name, listener): """Append a DDL event listener to this ``Table``. Deprecated. See :class:`.DDLEvents`. """ def adapt_listener(target, connection, **kw): listener(event_name, target, connection) event.listen(self, "" + event_name.replace('-', '_'), adapt_listener) def _set_parent(self, metadata): metadata._add_table(self.name, self.schema, self) self.metadata = metadata def get_children(self, column_collections=True, schema_visitor=False, **kw): if not schema_visitor: return expression.TableClause.get_children( self, column_collections=column_collections, **kw) else: if column_collections: return list(self.columns) else: return [] def exists(self, bind=None): """Return True if this table exists.""" if bind is None: bind = _bind_or_error(self) return bind.run_callable(bind.dialect.has_table, self.name, schema=self.schema) def create(self, bind=None, checkfirst=False): """Issue a ``CREATE`` statement for this :class:`.Table`, using the given :class:`.Connectable` for connectivity. .. seealso:: :meth:`.MetaData.create_all`. """ if bind is None: bind = _bind_or_error(self) bind._run_visitor(ddl.SchemaGenerator, self, checkfirst=checkfirst) def drop(self, bind=None, checkfirst=False): """Issue a ``DROP`` statement for this :class:`.Table`, using the given :class:`.Connectable` for connectivity. .. seealso:: :meth:`.MetaData.drop_all`. """ if bind is None: bind = _bind_or_error(self) bind._run_visitor(ddl.SchemaDropper, self, checkfirst=checkfirst) def tometadata(self, metadata, schema=RETAIN_SCHEMA): """Return a copy of this :class:`.Table` associated with a different :class:`.MetaData`. E.g.:: some_engine = create_engine("sqlite:///some.db") # create two metadata meta1 = MetaData() meta2 = MetaData() # load 'users' from the sqlite engine users_table = Table('users', meta1, autoload=True, autoload_with=some_engine) # create the same Table object for the plain metadata users_table_2 = users_table.tometadata(meta2) :param metadata: Target :class:`.MetaData` object. :param schema: Optional string name of a target schema, or ``None`` for no schema. The :class:`.Table` object will be given this schema name upon copy. Defaults to the special symbol :attr:`.RETAIN_SCHEMA` which indicates no change should be made to the schema name of the resulting :class:`.Table`. """ if schema is RETAIN_SCHEMA: schema = self.schema elif schema is None: schema = metadata.schema key = _get_table_key(self.name, schema) if key in metadata.tables: util.warn("Table '%s' already exists within the given " "MetaData - not copying." % self.description) return metadata.tables[key] args = [] for c in self.columns: args.append(c.copy(schema=schema)) table = Table( self.name, metadata, schema=schema, *args, **self.kwargs ) for c in self.constraints: table.append_constraint(c.copy(schema=schema, target_table=table)) for index in self.indexes: # skip indexes that would be generated # by the 'index' flag on Column if len(index.columns) == 1 and \ list(index.columns)[0].index: continue Index(index.name, unique=index.unique, *[table.c[col] for col in index.columns.keys()], **index.kwargs) table.dispatch._update(self.dispatch) return table class Column(SchemaItem, expression.ColumnClause): """Represents a column in a database table.""" __visit_name__ = 'column' def __init__(self, *args, **kwargs): """ Construct a new ``Column`` object. :param name: The name of this column as represented in the database. This argument may be the first positional argument, or specified via keyword. Names which contain no upper case characters will be treated as case insensitive names, and will not be quoted unless they are a reserved word. Names with any number of upper case characters will be quoted and sent exactly. Note that this behavior applies even for databases which standardize upper case names as case insensitive such as Oracle. The name field may be omitted at construction time and applied later, at any time before the Column is associated with a :class:`.Table`. This is to support convenient usage within the :mod:`~sqlalchemy.ext.declarative` extension. :param type\_: The column's type, indicated using an instance which subclasses :class:`~sqlalchemy.types.TypeEngine`. If no arguments are required for the type, the class of the type can be sent as well, e.g.:: # use a type with arguments Column('data', String(50)) # use no arguments Column('level', Integer) The ``type`` argument may be the second positional argument or specified by keyword. There is partial support for automatic detection of the type based on that of a :class:`.ForeignKey` associated with this column, if the type is specified as ``None``. However, this feature is not fully implemented and may not function in all cases. :param \*args: Additional positional arguments include various :class:`.SchemaItem` derived constructs which will be applied as options to the column. These include instances of :class:`.Constraint`, :class:`.ForeignKey`, :class:`.ColumnDefault`, and :class:`.Sequence`. In some cases an equivalent keyword argument is available such as ``server_default``, ``default`` and ``unique``. :param autoincrement: This flag may be set to ``False`` to indicate an integer primary key column that should not be considered to be the "autoincrement" column, that is the integer primary key column which generates values implicitly upon INSERT and whose value is usually returned via the DBAPI cursor.lastrowid attribute. It defaults to ``True`` to satisfy the common use case of a table with a single integer primary key column. If the table has a composite primary key consisting of more than one integer column, set this flag to True only on the column that should be considered "autoincrement". The setting *only* has an effect for columns which are: * Integer derived (i.e. INT, SMALLINT, BIGINT). * Part of the primary key * Are not referenced by any foreign keys, unless the value is specified as ``'ignore_fk'`` .. versionadded:: 0.7.4 * have no server side or client side defaults (with the exception of Postgresql SERIAL). The setting has these two effects on columns that meet the above criteria: * DDL issued for the column will include database-specific keywords intended to signify this column as an "autoincrement" column, such as AUTO INCREMENT on MySQL, SERIAL on Postgresql, and IDENTITY on MS-SQL. It does *not* issue AUTOINCREMENT for SQLite since this is a special SQLite flag that is not required for autoincrementing behavior. See the SQLite dialect documentation for information on SQLite's AUTOINCREMENT. * The column will be considered to be available as cursor.lastrowid or equivalent, for those dialects which "post fetch" newly inserted identifiers after a row has been inserted (SQLite, MySQL, MS-SQL). It does not have any effect in this regard for databases that use sequences to generate primary key identifiers (i.e. Firebird, Postgresql, Oracle). .. versionchanged:: 0.7.4 ``autoincrement`` accepts a special value ``'ignore_fk'`` to indicate that autoincrementing status regardless of foreign key references. This applies to certain composite foreign key setups, such as the one demonstrated in the ORM documentation at :ref:`post_update`. :param default: A scalar, Python callable, or :class:`.ColumnElement` expression representing the *default value* for this column, which will be invoked upon insert if this column is otherwise not specified in the VALUES clause of the insert. This is a shortcut to using :class:`.ColumnDefault` as a positional argument; see that class for full detail on the structure of the argument. Contrast this argument to ``server_default`` which creates a default generator on the database side. :param doc: optional String that can be used by the ORM or similar to document attributes. This attribute does not render SQL comments (a future attribute 'comment' will achieve that). :param key: An optional string identifier which will identify this ``Column`` object on the :class:`.Table`. When a key is provided, this is the only identifier referencing the ``Column`` within the application, including ORM attribute mapping; the ``name`` field is used only when rendering SQL. :param index: When ``True``, indicates that the column is indexed. This is a shortcut for using a :class:`.Index` construct on the table. To specify indexes with explicit names or indexes that contain multiple columns, use the :class:`.Index` construct instead. :param info: Optional data dictionary which will be populated into the :attr:`.SchemaItem.info` attribute of this object. :param nullable: If set to the default of ``True``, indicates the column will be rendered as allowing NULL, else it's rendered as NOT NULL. This parameter is only used when issuing CREATE TABLE statements. :param onupdate: A scalar, Python callable, or :class:`~sqlalchemy.sql.expression.ClauseElement` representing a default value to be applied to the column within UPDATE statements, which wil be invoked upon update if this column is not present in the SET clause of the update. This is a shortcut to using :class:`.ColumnDefault` as a positional argument with ``for_update=True``. :param primary_key: If ``True``, marks this column as a primary key column. Multiple columns can have this flag set to specify composite primary keys. As an alternative, the primary key of a :class:`.Table` can be specified via an explicit :class:`.PrimaryKeyConstraint` object. :param server_default: A :class:`.FetchedValue` instance, str, Unicode or :func:`~sqlalchemy.sql.expression.text` construct representing the DDL DEFAULT value for the column. String types will be emitted as-is, surrounded by single quotes:: Column('x', Text, server_default="val") x TEXT DEFAULT 'val' A :func:`~sqlalchemy.sql.expression.text` expression will be rendered as-is, without quotes:: Column('y', DateTime, server_default=text('NOW()')) y DATETIME DEFAULT NOW() Strings and text() will be converted into a :class:`.DefaultClause` object upon initialization. Use :class:`.FetchedValue` to indicate that an already-existing column will generate a default value on the database side which will be available to SQLAlchemy for post-fetch after inserts. This construct does not specify any DDL and the implementation is left to the database, such as via a trigger. :param server_onupdate: A :class:`.FetchedValue` instance representing a database-side default generation function. This indicates to SQLAlchemy that a newly generated value will be available after updates. This construct does not specify any DDL and the implementation is left to the database, such as via a trigger. :param quote: Force quoting of this column's name on or off, corresponding to ``True`` or ``False``. When left at its default of ``None``, the column identifier will be quoted according to whether the name is case sensitive (identifiers with at least one upper case character are treated as case sensitive), or if it's a reserved word. This flag is only needed to force quoting of a reserved word which is not known by the SQLAlchemy dialect. :param unique: When ``True``, indicates that this column contains a unique constraint, or if ``index`` is ``True`` as well, indicates that the :class:`.Index` should be created with the unique flag. To specify multiple columns in the constraint/index or to specify an explicit name, use the :class:`.UniqueConstraint` or :class:`.Index` constructs explicitly. :param system: When ``True``, indicates this is a "system" column, that is a column which is automatically made available by the database, and should not be included in the columns list for a ``CREATE TABLE`` statement. For more elaborate scenarios where columns should be conditionally rendered differently on different backends, consider custom compilation rules for :class:`.CreateColumn`. ..versionadded:: 0.8.3 Added the ``system=True`` parameter to :class:`.Column`. """ name = kwargs.pop('name', None) type_ = kwargs.pop('type_', None) args = list(args) if args: if isinstance(args[0], basestring): if name is not None: raise exc.ArgumentError( "May not pass name positionally and as a keyword.") name = args.pop(0) if args: coltype = args[0] if (isinstance(coltype, sqltypes.TypeEngine) or (isinstance(coltype, type) and issubclass(coltype, sqltypes.TypeEngine))): if type_ is not None: raise exc.ArgumentError( "May not pass type_ positionally and as a keyword.") type_ = args.pop(0) no_type = type_ is None super(Column, self).__init__(name, None, type_) self.key = kwargs.pop('key', name) self.primary_key = kwargs.pop('primary_key', False) self.nullable = kwargs.pop('nullable', not self.primary_key) self.default = kwargs.pop('default', None) self.server_default = kwargs.pop('server_default', None) self.server_onupdate = kwargs.pop('server_onupdate', None) # these default to None because .index and .unique is *not* # an informational flag about Column - there can still be an # Index or UniqueConstraint referring to this Column. self.index = kwargs.pop('index', None) self.unique = kwargs.pop('unique', None) self.system = kwargs.pop('system', False) self.quote = kwargs.pop('quote', None) self.doc = kwargs.pop('doc', None) self.onupdate = kwargs.pop('onupdate', None) self.autoincrement = kwargs.pop('autoincrement', True) self.constraints = set() self.foreign_keys = set() # check if this Column is proxying another column if '_proxies' in kwargs: self._proxies = kwargs.pop('_proxies') # otherwise, add DDL-related events elif isinstance(self.type, sqltypes.SchemaType): self.type._set_parent_with_dispatch(self) if self.default is not None: if isinstance(self.default, (ColumnDefault, Sequence)): args.append(self.default) else: if getattr(self.type, '_warn_on_bytestring', False): # Py3K #if isinstance(self.default, bytes): # Py2K if isinstance(self.default, str): # end Py2K util.warn("Unicode column received non-unicode " "default value.") args.append(ColumnDefault(self.default)) if self.server_default is not None: if isinstance(self.server_default, FetchedValue): args.append(self.server_default._as_for_update(False)) else: args.append(DefaultClause(self.server_default)) if self.onupdate is not None: if isinstance(self.onupdate, (ColumnDefault, Sequence)): args.append(self.onupdate) else: args.append(ColumnDefault(self.onupdate, for_update=True)) if self.server_onupdate is not None: if isinstance(self.server_onupdate, FetchedValue): args.append(self.server_onupdate._as_for_update(True)) else: args.append(DefaultClause(self.server_onupdate, for_update=True)) self._init_items(*args) if not self.foreign_keys and no_type: raise exc.ArgumentError("'type' is required on Column objects " "which have no foreign keys.") util.set_creation_order(self) if 'info' in kwargs: self.info = kwargs.pop('info') if kwargs: raise exc.ArgumentError( "Unknown arguments passed to Column: " + repr(kwargs.keys())) def __str__(self): if self.name is None: return "(no name)" elif self.table is not None: if self.table.named_with_column: return (self.table.description + "." + self.description) else: return self.description else: return self.description def references(self, column): """Return True if this Column references the given column via foreign key.""" for fk in self.foreign_keys: if fk.column.proxy_set.intersection(column.proxy_set): return True else: return False def append_foreign_key(self, fk): fk._set_parent_with_dispatch(self) def __repr__(self): kwarg = [] if self.key != self.name: kwarg.append('key') if self.primary_key: kwarg.append('primary_key') if not self.nullable: kwarg.append('nullable') if self.onupdate: kwarg.append('onupdate') if self.default: kwarg.append('default') if self.server_default: kwarg.append('server_default') return "Column(%s)" % ', '.join( [repr(self.name)] + [repr(self.type)] + [repr(x) for x in self.foreign_keys if x is not None] + [repr(x) for x in self.constraints] + [(self.table is not None and "table=<%s>" % self.table.description or "table=None")] + ["%s=%s" % (k, repr(getattr(self, k))) for k in kwarg]) def _set_parent(self, table): if not self.name: raise exc.ArgumentError( "Column must be constructed with a non-blank name or " "assign a non-blank .name before adding to a Table.") if self.key is None: self.key = self.name existing = getattr(self, 'table', None) if existing is not None and existing is not table: raise exc.ArgumentError( "Column object already assigned to Table '%s'" % existing.description) if self.key in table._columns: col = table._columns.get(self.key) if col is not self: for fk in list(col.foreign_keys): table.foreign_keys.remove(fk) if fk.constraint in table.constraints: # this might have been removed # already, if it's a composite constraint # and more than one col being replaced table.constraints.remove(fk.constraint) table._columns.replace(self) if self.primary_key: table.primary_key._replace(self) Table._autoincrement_column._reset(table) elif self.key in table.primary_key: raise exc.ArgumentError( "Trying to redefine primary-key column '%s' as a " "non-primary-key column on table '%s'" % ( self.key, table.fullname)) self.table = table if self.index: if isinstance(self.index, basestring): raise exc.ArgumentError( "The 'index' keyword argument on Column is boolean only. " "To create indexes with a specific name, create an " "explicit Index object external to the Table.") Index(expression._truncated_label('ix_%s' % self._label), self, unique=bool(self.unique)) elif self.unique: if isinstance(self.unique, basestring): raise exc.ArgumentError( "The 'unique' keyword argument on Column is boolean " "only. To create unique constraints or indexes with a " "specific name, append an explicit UniqueConstraint to " "the Table's list of elements, or create an explicit " "Index object external to the Table.") table.append_constraint(UniqueConstraint(self.key)) def _on_table_attach(self, fn): if self.table is not None: fn(self, self.table) event.listen(self, 'after_parent_attach', fn) def copy(self, **kw): """Create a copy of this ``Column``, unitialized. This is used in ``Table.tometadata``. """ # Constraint objects plus non-constraint-bound ForeignKey objects args = \ [c.copy(**kw) for c in self.constraints] + \ [c.copy(**kw) for c in self.foreign_keys if not c.constraint] type_ = self.type if isinstance(type_, sqltypes.SchemaType): type_ = type_.copy(**kw) c = self._constructor( name=self.name, type_=type_, key=self.key, primary_key=self.primary_key, nullable=self.nullable, unique=self.unique, system=self.system, quote=self.quote, index=self.index, autoincrement=self.autoincrement, default=self.default, server_default=self.server_default, onupdate=self.onupdate, server_onupdate=self.server_onupdate, info=self.info, doc=self.doc, *args ) c.dispatch._update(self.dispatch) return c def _make_proxy(self, selectable, name=None, key=None, name_is_truncatable=False, **kw): """Create a *proxy* for this column. This is a copy of this ``Column`` referenced by a different parent (such as an alias or select statement). The column should be used only in select scenarios, as its full DDL/default information is not transferred. """ fk = [ForeignKey(f.column, _constraint=f.constraint) for f in self.foreign_keys] if name is None and self.name is None: raise exc.InvalidRequestError("Cannot initialize a sub-selectable" " with this Column object until it's 'name' has " "been assigned.") try: c = self._constructor( expression._as_truncated(name or self.name) if \ name_is_truncatable else (name or self.name), self.type, key=key if key else name if name else self.key, primary_key=self.primary_key, nullable=self.nullable, quote=self.quote, _proxies=[self], *fk) except TypeError, e: # Py3K #raise TypeError( # "Could not create a copy of this %r object. " # "Ensure the class includes a _constructor() " # "attribute or method which accepts the " # "standard Column constructor arguments, or " # "references the Column class itself." % self.__class__) from e # Py2K raise TypeError( "Could not create a copy of this %r object. " "Ensure the class includes a _constructor() " "attribute or method which accepts the " "standard Column constructor arguments, or " "references the Column class itself. " "Original error: %s" % (self.__class__, e)) # end Py2K c.table = selectable selectable._columns.add(c) if selectable._is_clone_of is not None: c._is_clone_of = selectable._is_clone_of.columns[c.key] if self.primary_key: selectable.primary_key.add(c) c.dispatch.after_parent_attach(c, selectable) return c def get_children(self, schema_visitor=False, **kwargs): if schema_visitor: return [x for x in (self.default, self.onupdate) if x is not None] + \ list(self.foreign_keys) + list(self.constraints) else: return expression.ColumnClause.get_children(self, **kwargs) class ForeignKey(SchemaItem): """Defines a dependency between two columns. ``ForeignKey`` is specified as an argument to a :class:`.Column` object, e.g.:: t = Table("remote_table", metadata, Column("remote_id", ForeignKey("main_table.id")) ) Note that ``ForeignKey`` is only a marker object that defines a dependency between two columns. The actual constraint is in all cases represented by the :class:`.ForeignKeyConstraint` object. This object will be generated automatically when a ``ForeignKey`` is associated with a :class:`.Column` which in turn is associated with a :class:`.Table`. Conversely, when :class:`.ForeignKeyConstraint` is applied to a :class:`.Table`, ``ForeignKey`` markers are automatically generated to be present on each associated :class:`.Column`, which are also associated with the constraint object. Note that you cannot define a "composite" foreign key constraint, that is a constraint between a grouping of multiple parent/child columns, using ``ForeignKey`` objects. To define this grouping, the :class:`.ForeignKeyConstraint` object must be used, and applied to the :class:`.Table`. The associated ``ForeignKey`` objects are created automatically. The ``ForeignKey`` objects associated with an individual :class:`.Column` object are available in the `foreign_keys` collection of that column. Further examples of foreign key configuration are in :ref:`metadata_foreignkeys`. """ __visit_name__ = 'foreign_key' def __init__(self, column, _constraint=None, use_alter=False, name=None, onupdate=None, ondelete=None, deferrable=None, schema=None, initially=None, link_to_name=False, match=None): """ Construct a column-level FOREIGN KEY. The :class:`.ForeignKey` object when constructed generates a :class:`.ForeignKeyConstraint` which is associated with the parent :class:`.Table` object's collection of constraints. :param column: A single target column for the key relationship. A :class:`.Column` object or a column name as a string: ``tablename.columnkey`` or ``schema.tablename.columnkey``. ``columnkey`` is the ``key`` which has been assigned to the column (defaults to the column name itself), unless ``link_to_name`` is ``True`` in which case the rendered name of the column is used. .. versionadded:: 0.7.4 Note that if the schema name is not included, and the underlying :class:`.MetaData` has a "schema", that value will be used. :param name: Optional string. An in-database name for the key if `constraint` is not provided. :param onupdate: Optional string. If set, emit ON UPDATE when issuing DDL for this constraint. Typical values include CASCADE, DELETE and RESTRICT. :param ondelete: Optional string. If set, emit ON DELETE when issuing DDL for this constraint. Typical values include CASCADE, DELETE and RESTRICT. :param deferrable: Optional bool. If set, emit DEFERRABLE or NOT DEFERRABLE when issuing DDL for this constraint. :param initially: Optional string. If set, emit INITIALLY when issuing DDL for this constraint. :param link_to_name: if True, the string name given in ``column`` is the rendered name of the referenced column, not its locally assigned ``key``. :param use_alter: passed to the underlying :class:`.ForeignKeyConstraint` to indicate the constraint should be generated/dropped externally from the CREATE TABLE/ DROP TABLE statement. See that classes' constructor for details. :param match: Optional string. If set, emit MATCH when issuing DDL for this constraint. Typical values include SIMPLE, PARTIAL and FULL. :param schema: Deprecated; this flag does nothing and will be removed in 0.9. """ self._colspec = column # the linked ForeignKeyConstraint. # ForeignKey will create this when parent Column # is attached to a Table, *or* ForeignKeyConstraint # object passes itself in when creating ForeignKey # markers. self.constraint = _constraint self.use_alter = use_alter self.name = name self.onupdate = onupdate self.ondelete = ondelete self.deferrable = deferrable self.initially = initially self.link_to_name = link_to_name self.match = match if schema: util.warn_deprecated( "'schema' argument on ForeignKey has no effect - " "please specify the target as " "...") def __repr__(self): return "ForeignKey(%r)" % self._get_colspec() def copy(self, schema=None): """Produce a copy of this :class:`.ForeignKey` object. The new :class:`.ForeignKey` will not be bound to any :class:`.Column`. This method is usually used by the internal copy procedures of :class:`.Column`, :class:`.Table`, and :class:`.MetaData`. :param schema: The returned :class:`.ForeignKey` will reference the original table and column name, qualified by the given string schema name. """ fk = ForeignKey( self._get_colspec(schema=schema), use_alter=self.use_alter, name=self.name, onupdate=self.onupdate, ondelete=self.ondelete, deferrable=self.deferrable, initially=self.initially, link_to_name=self.link_to_name, match=self.match ) fk.dispatch._update(self.dispatch) return fk def _get_colspec(self, schema=None): """Return a string based 'column specification' for this :class:`.ForeignKey`. This is usually the equivalent of the string-based "tablename.colname" argument first passed to the object's constructor. """ if schema: return schema + "." + self.column.table.name + \ "." + self.column.key elif isinstance(self._colspec, basestring): return self._colspec elif hasattr(self._colspec, '__clause_element__'): _column = self._colspec.__clause_element__() else: _column = self._colspec return "%s.%s" % (_column.table.fullname, _column.key) target_fullname = property(_get_colspec) def references(self, table): """Return True if the given :class:`.Table` is referenced by this :class:`.ForeignKey`.""" return table.corresponding_column(self.column) is not None def get_referent(self, table): """Return the :class:`.Column` in the given :class:`.Table` referenced by this :class:`.ForeignKey`. Returns None if this :class:`.ForeignKey` does not reference the given :class:`.Table`. """ return table.corresponding_column(self.column) @util.memoized_property def column(self): """Return the target :class:`.Column` referenced by this :class:`.ForeignKey`. If this :class:`.ForeignKey` was created using a string-based target column specification, this attribute will on first access initiate a resolution process to locate the referenced remote :class:`.Column`. The resolution process traverses to the parent :class:`.Column`, :class:`.Table`, and :class:`.MetaData` to proceed - if any of these aren't yet present, an error is raised. """ # ForeignKey inits its remote column as late as possible, so tables # can be defined without dependencies if isinstance(self._colspec, basestring): # locate the parent table this foreign key is attached to. we # use the "original" column which our parent column represents # (its a list of columns/other ColumnElements if the parent # table is a UNION) for c in self.parent.base_columns: if isinstance(c, Column): parenttable = c.table break else: raise exc.ArgumentError( "Parent column '%s' does not descend from a " "table-attached Column" % str(self.parent)) m = self._colspec.split('.') if m is None: raise exc.ArgumentError( "Invalid foreign key column specification: %s" % self._colspec) # A FK between column 'bar' and table 'foo' can be # specified as 'foo', 'foo.bar', 'dbo.foo.bar', # 'otherdb.dbo.foo.bar'. Once we have the column name and # the table name, treat everything else as the schema # name. Some databases (e.g. Sybase) support # inter-database foreign keys. See tickets#1341 and -- # indirectly related -- Ticket #594. This assumes that '.' # will never appear *within* any component of the FK. (schema, tname, colname) = (None, None, None) if schema is None and parenttable.metadata.schema is not None: schema = parenttable.metadata.schema if (len(m) == 1): tname = m.pop() else: colname = m.pop() tname = m.pop() if (len(m) > 0): schema = '.'.join(m) if _get_table_key(tname, schema) not in parenttable.metadata: raise exc.NoReferencedTableError( "Foreign key associated with column '%s' could not find " "table '%s' with which to generate a " "foreign key to target column '%s'" % (self.parent, tname, colname), tname) table = Table(tname, parenttable.metadata, mustexist=True, schema=schema) if not hasattr(self.constraint, '_referred_table'): self.constraint._referred_table = table elif self.constraint._referred_table is not table: raise exc.ArgumentError( 'ForeignKeyConstraint on %s(%s) refers to ' 'multiple remote tables: %s and %s' % ( parenttable, self.constraint._col_description, self.constraint._referred_table, table )) _column = None if colname is None: # colname is None in the case that ForeignKey argument # was specified as table name only, in which case we # match the column name to the same column on the # parent. key = self.parent _column = table.c.get(self.parent.key, None) elif self.link_to_name: key = colname for c in table.c: if c.name == colname: _column = c else: key = colname _column = table.c.get(colname, None) if _column is None: raise exc.NoReferencedColumnError( "Could not create ForeignKey '%s' on table '%s': " "table '%s' has no column named '%s'" % ( self._colspec, parenttable.name, table.name, key), table.name, key) elif hasattr(self._colspec, '__clause_element__'): _column = self._colspec.__clause_element__() else: _column = self._colspec # propagate TypeEngine to parent if it didn't have one if isinstance(self.parent.type, sqltypes.NullType): self.parent.type = _column.type return _column def _set_parent(self, column): if hasattr(self, 'parent'): if self.parent is column: return raise exc.InvalidRequestError( "This ForeignKey already has a parent !") self.parent = column self.parent.foreign_keys.add(self) self.parent._on_table_attach(self._set_table) def _set_table(self, column, table): # standalone ForeignKey - create ForeignKeyConstraint # on the hosting Table when attached to the Table. if self.constraint is None and isinstance(table, Table): self.constraint = ForeignKeyConstraint( [], [], use_alter=self.use_alter, name=self.name, onupdate=self.onupdate, ondelete=self.ondelete, deferrable=self.deferrable, initially=self.initially, match=self.match, ) self.constraint._elements[self.parent] = self self.constraint._set_parent_with_dispatch(table) table.foreign_keys.add(self) class _NotAColumnExpr(object): def _not_a_column_expr(self): raise exc.InvalidRequestError( "This %s cannot be used directly " "as a column expression." % self.__class__.__name__) __clause_element__ = self_group = lambda self: self._not_a_column_expr() _from_objects = property(lambda self: self._not_a_column_expr()) class DefaultGenerator(_NotAColumnExpr, SchemaItem): """Base class for column *default* values.""" __visit_name__ = 'default_generator' is_sequence = False is_server_default = False column = None def __init__(self, for_update=False): self.for_update = for_update def _set_parent(self, column): self.column = column if self.for_update: self.column.onupdate = self else: self.column.default = self def execute(self, bind=None, **kwargs): if bind is None: bind = _bind_or_error(self) return bind._execute_default(self, **kwargs) @property def bind(self): """Return the connectable associated with this default.""" if getattr(self, 'column', None) is not None: return self.column.table.bind else: return None class ColumnDefault(DefaultGenerator): """A plain default value on a column. This could correspond to a constant, a callable function, or a SQL clause. :class:`.ColumnDefault` is generated automatically whenever the ``default``, ``onupdate`` arguments of :class:`.Column` are used. A :class:`.ColumnDefault` can be passed positionally as well. For example, the following:: Column('foo', Integer, default=50) Is equivalent to:: Column('foo', Integer, ColumnDefault(50)) """ def __init__(self, arg, **kwargs): """"Construct a new :class:`.ColumnDefault`. :param arg: argument representing the default value. May be one of the following: * a plain non-callable Python value, such as a string, integer, boolean, or other simple type. The default value will be used as is each time. * a SQL expression, that is one which derives from :class:`.ColumnElement`. The SQL expression will be rendered into the INSERT or UPDATE statement, or in the case of a primary key column when RETURNING is not used may be pre-executed before an INSERT within a SELECT. * A Python callable. The function will be invoked for each new row subject to an INSERT or UPDATE. The callable must accept exactly zero or one positional arguments. The one-argument form will receive an instance of the :class:`.ExecutionContext`, which provides contextual information as to the current :class:`.Connection` in use as well as the current statement and parameters. """ super(ColumnDefault, self).__init__(**kwargs) if isinstance(arg, FetchedValue): raise exc.ArgumentError( "ColumnDefault may not be a server-side default type.") if util.callable(arg): arg = self._maybe_wrap_callable(arg) self.arg = arg @util.memoized_property def is_callable(self): return util.callable(self.arg) @util.memoized_property def is_clause_element(self): return isinstance(self.arg, expression.ClauseElement) @util.memoized_property def is_scalar(self): return not self.is_callable and \ not self.is_clause_element and \ not self.is_sequence def _maybe_wrap_callable(self, fn): """Wrap callables that don't accept a context. The alternative here is to require that a simple callable passed to "default" would need to be of the form "default=lambda ctx: datetime.now". That is the more "correct" way to go, but the case of using a zero-arg callable for "default" is so much more prominent than the context-specific one I'm having trouble justifying putting that inconvenience on everyone. """ if inspect.isfunction(fn): inspectable = fn elif inspect.isclass(fn): inspectable = fn.__init__ elif hasattr(fn, '__call__'): inspectable = fn.__call__ else: # probably not inspectable, try anyways. inspectable = fn try: argspec = inspect.getargspec(inspectable) except TypeError: return lambda ctx: fn() defaulted = argspec[3] is not None and len(argspec[3]) or 0 positionals = len(argspec[0]) - defaulted # Py3K compat - no unbound methods if inspect.ismethod(inspectable) or inspect.isclass(fn): positionals -= 1 if positionals == 0: return lambda ctx: fn() elif positionals == 1: return fn else: raise exc.ArgumentError( "ColumnDefault Python function takes zero or one " "positional arguments") def _visit_name(self): if self.for_update: return "column_onupdate" else: return "column_default" __visit_name__ = property(_visit_name) def __repr__(self): return "ColumnDefault(%r)" % self.arg class Sequence(DefaultGenerator): """Represents a named database sequence. The :class:`.Sequence` object represents the name and configurational parameters of a database sequence. It also represents a construct that can be "executed" by a SQLAlchemy :class:`.Engine` or :class:`.Connection`, rendering the appropriate "next value" function for the target database and returning a result. The :class:`.Sequence` is typically associated with a primary key column:: some_table = Table('some_table', metadata, Column('id', Integer, Sequence('some_table_seq'), primary_key=True) ) When CREATE TABLE is emitted for the above :class:`.Table`, if the target platform supports sequences, a CREATE SEQUENCE statement will be emitted as well. For platforms that don't support sequences, the :class:`.Sequence` construct is ignored. .. seealso:: :class:`.CreateSequence` :class:`.DropSequence` """ __visit_name__ = 'sequence' is_sequence = True def __init__(self, name, start=None, increment=None, schema=None, optional=False, quote=None, metadata=None, quote_schema=None, for_update=False): """Construct a :class:`.Sequence` object. :param name: The name of the sequence. :param start: the starting index of the sequence. This value is used when the CREATE SEQUENCE command is emitted to the database as the value of the "START WITH" clause. If ``None``, the clause is omitted, which on most platforms indicates a starting value of 1. :param increment: the increment value of the sequence. This value is used when the CREATE SEQUENCE command is emitted to the database as the value of the "INCREMENT BY" clause. If ``None``, the clause is omitted, which on most platforms indicates an increment of 1. :param schema: Optional schema name for the sequence, if located in a schema other than the default. :param optional: boolean value, when ``True``, indicates that this :class:`.Sequence` object only needs to be explicitly generated on backends that don't provide another way to generate primary key identifiers. Currently, it essentially means, "don't create this sequence on the Postgresql backend, where the SERIAL keyword creates a sequence for us automatically". :param quote: boolean value, when ``True`` or ``False``, explicitly forces quoting of the schema name on or off. When left at its default of ``None``, normal quoting rules based on casing and reserved words take place. :param metadata: optional :class:`.MetaData` object which will be associated with this :class:`.Sequence`. A :class:`.Sequence` that is associated with a :class:`.MetaData` gains access to the ``bind`` of that :class:`.MetaData`, meaning the :meth:`.Sequence.create` and :meth:`.Sequence.drop` methods will make usage of that engine automatically. .. versionchanged:: 0.7 Additionally, the appropriate CREATE SEQUENCE/ DROP SEQUENCE DDL commands will be emitted corresponding to this :class:`.Sequence` when :meth:`.MetaData.create_all` and :meth:`.MetaData.drop_all` are invoked. Note that when a :class:`.Sequence` is applied to a :class:`.Column`, the :class:`.Sequence` is automatically associated with the :class:`.MetaData` object of that column's parent :class:`.Table`, when that association is made. The :class:`.Sequence` will then be subject to automatic CREATE SEQUENCE/DROP SEQUENCE corresponding to when the :class:`.Table` object itself is created or dropped, rather than that of the :class:`.MetaData` object overall. :param for_update: Indicates this :class:`.Sequence`, when associated with a :class:`.Column`, should be invoked for UPDATE statements on that column's table, rather than for INSERT statements, when no value is otherwise present for that column in the statement. """ super(Sequence, self).__init__(for_update=for_update) self.name = name self.start = start self.increment = increment self.optional = optional self.quote = quote if metadata is not None and schema is None and metadata.schema: self.schema = schema = metadata.schema self.quote_schema = metadata.quote_schema else: self.schema = schema self.quote_schema = quote_schema self.metadata = metadata self._key = _get_table_key(name, schema) if metadata: self._set_metadata(metadata) @util.memoized_property def is_callable(self): return False @util.memoized_property def is_clause_element(self): return False def next_value(self): """Return a :class:`.next_value` function element which will render the appropriate increment function for this :class:`.Sequence` within any SQL expression. """ return expression.func.next_value(self, bind=self.bind) def _set_parent(self, column): super(Sequence, self)._set_parent(column) column._on_table_attach(self._set_table) def _set_table(self, column, table): self._set_metadata(table.metadata) def _set_metadata(self, metadata): self.metadata = metadata self.metadata._sequences[self._key] = self @property def bind(self): if self.metadata: return self.metadata.bind else: return None def create(self, bind=None, checkfirst=True): """Creates this sequence in the database.""" if bind is None: bind = _bind_or_error(self) bind._run_visitor(ddl.SchemaGenerator, self, checkfirst=checkfirst) def drop(self, bind=None, checkfirst=True): """Drops this sequence from the database.""" if bind is None: bind = _bind_or_error(self) bind._run_visitor(ddl.SchemaDropper, self, checkfirst=checkfirst) def _not_a_column_expr(self): raise exc.InvalidRequestError( "This %s cannot be used directly " "as a column expression. Use func.next_value(sequence) " "to produce a 'next value' function that's usable " "as a column element." % self.__class__.__name__) class FetchedValue(_NotAColumnExpr, events.SchemaEventTarget): """A marker for a transparent database-side default. Use :class:`.FetchedValue` when the database is configured to provide some automatic default for a column. E.g.:: Column('foo', Integer, FetchedValue()) Would indicate that some trigger or default generator will create a new value for the ``foo`` column during an INSERT. .. seealso:: :ref:`triggered_columns` """ is_server_default = True reflected = False has_argument = False def __init__(self, for_update=False): self.for_update = for_update def _as_for_update(self, for_update): if for_update == self.for_update: return self else: return self._clone(for_update) def _clone(self, for_update): n = self.__class__.__new__(self.__class__) n.__dict__.update(self.__dict__) n.__dict__.pop('column', None) n.for_update = for_update return n def _set_parent(self, column): self.column = column if self.for_update: self.column.server_onupdate = self else: self.column.server_default = self def __repr__(self): return util.generic_repr(self) inspection._self_inspects(FetchedValue) class DefaultClause(FetchedValue): """A DDL-specified DEFAULT column value. :class:`.DefaultClause` is a :class:`.FetchedValue` that also generates a "DEFAULT" clause when "CREATE TABLE" is emitted. :class:`.DefaultClause` is generated automatically whenever the ``server_default``, ``server_onupdate`` arguments of :class:`.Column` are used. A :class:`.DefaultClause` can be passed positionally as well. For example, the following:: Column('foo', Integer, server_default="50") Is equivalent to:: Column('foo', Integer, DefaultClause("50")) """ has_argument = True def __init__(self, arg, for_update=False, _reflected=False): util.assert_arg_type(arg, (basestring, expression.ClauseElement, expression.TextClause), 'arg') super(DefaultClause, self).__init__(for_update) self.arg = arg self.reflected = _reflected def __repr__(self): return "DefaultClause(%r, for_update=%r)" % \ (self.arg, self.for_update) class PassiveDefault(DefaultClause): """A DDL-specified DEFAULT column value. .. deprecated:: 0.6 :class:`.PassiveDefault` is deprecated. Use :class:`.DefaultClause`. """ @util.deprecated("0.6", ":class:`.PassiveDefault` is deprecated. " "Use :class:`.DefaultClause`.", False) def __init__(self, *arg, **kw): DefaultClause.__init__(self, *arg, **kw) class Constraint(SchemaItem): """A table-level SQL constraint.""" __visit_name__ = 'constraint' def __init__(self, name=None, deferrable=None, initially=None, _create_rule=None, **kw): """Create a SQL constraint. :param name: Optional, the in-database name of this ``Constraint``. :param deferrable: Optional bool. If set, emit DEFERRABLE or NOT DEFERRABLE when issuing DDL for this constraint. :param initially: Optional string. If set, emit INITIALLY when issuing DDL for this constraint. :param _create_rule: a callable which is passed the DDLCompiler object during compilation. Returns True or False to signal inline generation of this Constraint. The AddConstraint and DropConstraint DDL constructs provide DDLElement's more comprehensive "conditional DDL" approach that is passed a database connection when DDL is being issued. _create_rule is instead called during any CREATE TABLE compilation, where there may not be any transaction/connection in progress. However, it allows conditional compilation of the constraint even for backends which do not support addition of constraints through ALTER TABLE, which currently includes SQLite. _create_rule is used by some types to create constraints. Currently, its call signature is subject to change at any time. :param \**kwargs: Dialect-specific keyword parameters, see the documentation for various dialects and constraints regarding options here. """ self.name = name self.deferrable = deferrable self.initially = initially self._create_rule = _create_rule util.set_creation_order(self) _validate_dialect_kwargs(kw, self.__class__.__name__) self.kwargs = kw @property def table(self): try: if isinstance(self.parent, Table): return self.parent except AttributeError: pass raise exc.InvalidRequestError( "This constraint is not bound to a table. Did you " "mean to call table.append_constraint(constraint) ?") def _set_parent(self, parent): self.parent = parent parent.constraints.add(self) def copy(self, **kw): raise NotImplementedError() class ColumnCollectionMixin(object): def __init__(self, *columns): self.columns = expression.ColumnCollection() self._pending_colargs = [_to_schema_column_or_string(c) for c in columns] if self._pending_colargs and \ isinstance(self._pending_colargs[0], Column) and \ isinstance(self._pending_colargs[0].table, Table): self._set_parent_with_dispatch(self._pending_colargs[0].table) def _set_parent(self, table): for col in self._pending_colargs: if isinstance(col, basestring): col = table.c[col] self.columns.add(col) class ColumnCollectionConstraint(ColumnCollectionMixin, Constraint): """A constraint that proxies a ColumnCollection.""" def __init__(self, *columns, **kw): """ :param \*columns: A sequence of column names or Column objects. :param name: Optional, the in-database name of this constraint. :param deferrable: Optional bool. If set, emit DEFERRABLE or NOT DEFERRABLE when issuing DDL for this constraint. :param initially: Optional string. If set, emit INITIALLY when issuing DDL for this constraint. """ ColumnCollectionMixin.__init__(self, *columns) Constraint.__init__(self, **kw) def _set_parent(self, table): ColumnCollectionMixin._set_parent(self, table) Constraint._set_parent(self, table) def __contains__(self, x): return x in self.columns def copy(self, **kw): c = self.__class__(name=self.name, deferrable=self.deferrable, initially=self.initially, *self.columns.keys()) c.dispatch._update(self.dispatch) return c def contains_column(self, col): return self.columns.contains_column(col) def __iter__(self): # inlining of # return iter(self.columns) # ColumnCollection->OrderedProperties->OrderedDict ordered_dict = self.columns._data return (ordered_dict[key] for key in ordered_dict._list) def __len__(self): return len(self.columns._data) class CheckConstraint(Constraint): """A table- or column-level CHECK constraint. Can be included in the definition of a Table or Column. """ def __init__(self, sqltext, name=None, deferrable=None, initially=None, table=None, _create_rule=None, _autoattach=True): """Construct a CHECK constraint. :param sqltext: A string containing the constraint definition, which will be used verbatim, or a SQL expression construct. :param name: Optional, the in-database name of the constraint. :param deferrable: Optional bool. If set, emit DEFERRABLE or NOT DEFERRABLE when issuing DDL for this constraint. :param initially: Optional string. If set, emit INITIALLY when issuing DDL for this constraint. """ super(CheckConstraint, self).\ __init__(name, deferrable, initially, _create_rule) self.sqltext = expression._literal_as_text(sqltext) if table is not None: self._set_parent_with_dispatch(table) elif _autoattach: cols = sqlutil.find_columns(self.sqltext) tables = set([c.table for c in cols if isinstance(c.table, Table)]) if len(tables) == 1: self._set_parent_with_dispatch( tables.pop()) def __visit_name__(self): if isinstance(self.parent, Table): return "check_constraint" else: return "column_check_constraint" __visit_name__ = property(__visit_name__) def copy(self, target_table=None, **kw): if target_table is not None: def replace(col): if self.table.c.contains_column(col): return target_table.c[col.key] else: return None sqltext = visitors.replacement_traverse(self.sqltext, {}, replace) else: sqltext = self.sqltext c = CheckConstraint(sqltext, name=self.name, initially=self.initially, deferrable=self.deferrable, _create_rule=self._create_rule, table=target_table, _autoattach=False) c.dispatch._update(self.dispatch) return c class ForeignKeyConstraint(Constraint): """A table-level FOREIGN KEY constraint. Defines a single column or composite FOREIGN KEY ... REFERENCES constraint. For a no-frills, single column foreign key, adding a :class:`.ForeignKey` to the definition of a :class:`.Column` is a shorthand equivalent for an unnamed, single column :class:`.ForeignKeyConstraint`. Examples of foreign key configuration are in :ref:`metadata_foreignkeys`. """ __visit_name__ = 'foreign_key_constraint' def __init__(self, columns, refcolumns, name=None, onupdate=None, ondelete=None, deferrable=None, initially=None, use_alter=False, link_to_name=False, match=None, table=None): """Construct a composite-capable FOREIGN KEY. :param columns: A sequence of local column names. The named columns must be defined and present in the parent Table. The names should match the ``key`` given to each column (defaults to the name) unless ``link_to_name`` is True. :param refcolumns: A sequence of foreign column names or Column objects. The columns must all be located within the same Table. :param name: Optional, the in-database name of the key. :param onupdate: Optional string. If set, emit ON UPDATE when issuing DDL for this constraint. Typical values include CASCADE, DELETE and RESTRICT. :param ondelete: Optional string. If set, emit ON DELETE when issuing DDL for this constraint. Typical values include CASCADE, DELETE and RESTRICT. :param deferrable: Optional bool. If set, emit DEFERRABLE or NOT DEFERRABLE when issuing DDL for this constraint. :param initially: Optional string. If set, emit INITIALLY when issuing DDL for this constraint. :param link_to_name: if True, the string name given in ``column`` is the rendered name of the referenced column, not its locally assigned ``key``. :param use_alter: If True, do not emit the DDL for this constraint as part of the CREATE TABLE definition. Instead, generate it via an ALTER TABLE statement issued after the full collection of tables have been created, and drop it via an ALTER TABLE statement before the full collection of tables are dropped. This is shorthand for the usage of :class:`.AddConstraint` and :class:`.DropConstraint` applied as "after-create" and "before-drop" events on the MetaData object. This is normally used to generate/drop constraints on objects that are mutually dependent on each other. :param match: Optional string. If set, emit MATCH when issuing DDL for this constraint. Typical values include SIMPLE, PARTIAL and FULL. """ super(ForeignKeyConstraint, self).\ __init__(name, deferrable, initially) self.onupdate = onupdate self.ondelete = ondelete self.link_to_name = link_to_name if self.name is None and use_alter: raise exc.ArgumentError("Alterable Constraint requires a name") self.use_alter = use_alter self.match = match self._elements = util.OrderedDict() # standalone ForeignKeyConstraint - create # associated ForeignKey objects which will be applied to hosted # Column objects (in col.foreign_keys), either now or when attached # to the Table for string-specified names for col, refcol in zip(columns, refcolumns): self._elements[col] = ForeignKey( refcol, _constraint=self, name=self.name, onupdate=self.onupdate, ondelete=self.ondelete, use_alter=self.use_alter, link_to_name=self.link_to_name, match=self.match ) if table is not None: self._set_parent_with_dispatch(table) elif columns and \ isinstance(columns[0], Column) and \ columns[0].table is not None: self._set_parent_with_dispatch(columns[0].table) @property def _col_description(self): return ", ".join(self._elements) @property def columns(self): return self._elements.keys() @property def elements(self): return self._elements.values() def _set_parent(self, table): super(ForeignKeyConstraint, self)._set_parent(table) for col, fk in self._elements.iteritems(): # string-specified column names now get # resolved to Column objects if isinstance(col, basestring): try: col = table.c[col] except KeyError: raise exc.ArgumentError( "Can't create ForeignKeyConstraint " "on table '%s': no column " "named '%s' is present." % (table.description, col)) if not hasattr(fk, 'parent') or \ fk.parent is not col: fk._set_parent_with_dispatch(col) if self.use_alter: def supports_alter(ddl, event, schema_item, bind, **kw): return table in set(kw['tables']) and \ bind.dialect.supports_alter event.listen(table.metadata, "after_create", AddConstraint(self, on=supports_alter)) event.listen(table.metadata, "before_drop", DropConstraint(self, on=supports_alter)) def copy(self, schema=None, **kw): fkc = ForeignKeyConstraint( [x.parent.key for x in self._elements.values()], [x._get_colspec(schema=schema) for x in self._elements.values()], name=self.name, onupdate=self.onupdate, ondelete=self.ondelete, use_alter=self.use_alter, deferrable=self.deferrable, initially=self.initially, link_to_name=self.link_to_name, match=self.match ) fkc.dispatch._update(self.dispatch) return fkc class PrimaryKeyConstraint(ColumnCollectionConstraint): """A table-level PRIMARY KEY constraint. Defines a single column or composite PRIMARY KEY constraint. For a no-frills primary key, adding ``primary_key=True`` to one or more ``Column`` definitions is a shorthand equivalent for an unnamed single- or multiple-column PrimaryKeyConstraint. """ __visit_name__ = 'primary_key_constraint' def _set_parent(self, table): super(PrimaryKeyConstraint, self)._set_parent(table) if table.primary_key in table.constraints: table.constraints.remove(table.primary_key) table.primary_key = self table.constraints.add(self) for c in self.columns: c.primary_key = True def _replace(self, col): self.columns.replace(col) class UniqueConstraint(ColumnCollectionConstraint): """A table-level UNIQUE constraint. Defines a single column or composite UNIQUE constraint. For a no-frills, single column constraint, adding ``unique=True`` to the ``Column`` definition is a shorthand equivalent for an unnamed, single column UniqueConstraint. """ __visit_name__ = 'unique_constraint' class Index(ColumnCollectionMixin, SchemaItem): """A table-level INDEX. Defines a composite (one or more column) INDEX. E.g.:: sometable = Table("sometable", metadata, Column("name", String(50)), Column("address", String(100)) ) Index("some_index", sometable.c.name) For a no-frills, single column index, adding :class:`.Column` also supports ``index=True``:: sometable = Table("sometable", metadata, Column("name", String(50), index=True) ) For a composite index, multiple columns can be specified:: Index("some_index", sometable.c.name, sometable.c.address) Functional indexes are supported as well, keeping in mind that at least one :class:`.Column` must be present:: Index("some_index", func.lower(sometable.c.name)) .. versionadded:: 0.8 support for functional and expression-based indexes. .. seealso:: :ref:`schema_indexes` - General information on :class:`.Index`. :ref:`postgresql_indexes` - PostgreSQL-specific options available for the :class:`.Index` construct. :ref:`mysql_indexes` - MySQL-specific options available for the :class:`.Index` construct. :ref:`mssql_indexes` - MSSQL-specific options available for the :class:`.Index` construct. """ __visit_name__ = 'index' def __init__(self, name, *expressions, **kw): """Construct an index object. :param name: The name of the index :param \*expressions: Column or SQL expressions. :param unique: Defaults to False: create a unique index. :param \**kw: Other keyword arguments may be interpreted by specific dialects. """ self.table = None columns = [] for expr in expressions: if not isinstance(expr, expression.ClauseElement): columns.append(expr) else: cols = [] visitors.traverse(expr, {}, {'column': cols.append}) if cols: columns.append(cols[0]) else: columns.append(expr) self.expressions = expressions # will call _set_parent() if table-bound column # objects are present ColumnCollectionMixin.__init__(self, *columns) self.name = name self.unique = kw.pop('unique', False) self.kwargs = kw def _set_parent(self, table): ColumnCollectionMixin._set_parent(self, table) if self.table is not None and table is not self.table: raise exc.ArgumentError( "Index '%s' is against table '%s', and " "cannot be associated with table '%s'." % ( self.name, self.table.description, table.description ) ) self.table = table for c in self.columns: if c.table != self.table: raise exc.ArgumentError( "Column '%s' is not part of table '%s'." % (c, self.table.description) ) table.indexes.add(self) self.expressions = [ expr if isinstance(expr, expression.ClauseElement) else colexpr for expr, colexpr in zip(self.expressions, self.columns) ] @property def bind(self): """Return the connectable associated with this Index.""" return self.table.bind def create(self, bind=None): """Issue a ``CREATE`` statement for this :class:`.Index`, using the given :class:`.Connectable` for connectivity. .. seealso:: :meth:`.MetaData.create_all`. """ if bind is None: bind = _bind_or_error(self) bind._run_visitor(ddl.SchemaGenerator, self) return self def drop(self, bind=None): """Issue a ``DROP`` statement for this :class:`.Index`, using the given :class:`.Connectable` for connectivity. .. seealso:: :meth:`.MetaData.drop_all`. """ if bind is None: bind = _bind_or_error(self) bind._run_visitor(ddl.SchemaDropper, self) def __repr__(self): return 'Index(%s)' % ( ", ".join( [repr(self.name)] + [repr(c) for c in self.columns] + (self.unique and ["unique=True"] or []) )) class MetaData(SchemaItem): """A collection of :class:`.Table` objects and their associated schema constructs. Holds a collection of :class:`.Table` objects as well as an optional binding to an :class:`.Engine` or :class:`.Connection`. If bound, the :class:`.Table` objects in the collection and their columns may participate in implicit SQL execution. The :class:`.Table` objects themselves are stored in the ``metadata.tables`` dictionary. The ``bind`` property may be assigned to dynamically. A common pattern is to start unbound and then bind later when an engine is available:: metadata = MetaData() # define tables Table('mytable', metadata, ...) # connect to an engine later, perhaps after loading a URL from a # configuration file metadata.bind = an_engine MetaData is a thread-safe object after tables have been explicitly defined or loaded via reflection. .. seealso:: :ref:`metadata_describing` - Introduction to database metadata """ __visit_name__ = 'metadata' def __init__(self, bind=None, reflect=False, schema=None, quote_schema=None): """Create a new MetaData object. :param bind: An Engine or Connection to bind to. May also be a string or URL instance, these are passed to create_engine() and this MetaData will be bound to the resulting engine. :param reflect: Optional, automatically load all tables from the bound database. Defaults to False. ``bind`` is required when this option is set. .. deprecated:: 0.8 Please use the :meth:`.MetaData.reflect` method. :param schema: The default schema to use for the :class:`.Table`, :class:`.Sequence`, and other objects associated with this :class:`.MetaData`. Defaults to ``None``. :param quote_schema: Sets the ``quote_schema`` flag for those :class:`.Table`, :class:`.Sequence`, and other objects which make usage of the local ``schema`` name. .. versionadded:: 0.7.4 ``schema`` and ``quote_schema`` parameters. """ self.tables = util.immutabledict() self.schema = schema self.quote_schema = quote_schema self._schemas = set() self._sequences = {} self.bind = bind if reflect: util.warn("reflect=True is deprecate; please " "use the reflect() method.") if not bind: raise exc.ArgumentError( "A bind must be supplied in conjunction " "with reflect=True") self.reflect() def __repr__(self): return 'MetaData(bind=%r)' % self.bind def __contains__(self, table_or_key): if not isinstance(table_or_key, basestring): table_or_key = table_or_key.key return table_or_key in self.tables def _add_table(self, name, schema, table): key = _get_table_key(name, schema) dict.__setitem__(self.tables, key, table) if schema: self._schemas.add(schema) def _remove_table(self, name, schema): key = _get_table_key(name, schema) dict.pop(self.tables, key, None) if self._schemas: self._schemas = set([t.schema for t in self.tables.values() if t.schema is not None]) def __getstate__(self): return {'tables': self.tables, 'schema': self.schema, 'quote_schema': self.quote_schema, 'schemas': self._schemas, 'sequences': self._sequences} def __setstate__(self, state): self.tables = state['tables'] self.schema = state['schema'] self.quote_schema = state['quote_schema'] self._bind = None self._sequences = state['sequences'] self._schemas = state['schemas'] def is_bound(self): """True if this MetaData is bound to an Engine or Connection.""" return self._bind is not None def bind(self): """An :class:`.Engine` or :class:`.Connection` to which this :class:`.MetaData` is bound. Typically, a :class:`.Engine` is assigned to this attribute so that "implicit execution" may be used, or alternatively as a means of providing engine binding information to an ORM :class:`.Session` object:: engine = create_engine("someurl://") metadata.bind = engine .. seealso:: :ref:`dbengine_implicit` - background on "bound metadata" """ return self._bind def _bind_to(self, bind): """Bind this MetaData to an Engine, Connection, string or URL.""" if isinstance(bind, (basestring, url.URL)): from sqlalchemy import create_engine self._bind = create_engine(bind) else: self._bind = bind bind = property(bind, _bind_to) def clear(self): """Clear all Table objects from this MetaData.""" dict.clear(self.tables) self._schemas.clear() def remove(self, table): """Remove the given Table object from this MetaData.""" self._remove_table(table.name, table.schema) @property def sorted_tables(self): """Returns a list of :class:`.Table` objects sorted in order of foreign key dependency. The sorting will place :class:`.Table` objects that have dependencies first, before the dependencies themselves, representing the order in which they can be created. To get the order in which the tables would be dropped, use the ``reversed()`` Python built-in. .. seealso:: :meth:`.Inspector.sorted_tables` """ return sqlutil.sort_tables(self.tables.itervalues()) def reflect(self, bind=None, schema=None, views=False, only=None): """Load all available table definitions from the database. Automatically creates ``Table`` entries in this ``MetaData`` for any table available in the database but not yet present in the ``MetaData``. May be called multiple times to pick up tables recently added to the database, however no special action is taken if a table in this ``MetaData`` no longer exists in the database. :param bind: A :class:`.Connectable` used to access the database; if None, uses the existing bind on this ``MetaData``, if any. :param schema: Optional, query and reflect tables from an alterate schema. If None, the schema associated with this :class:`.MetaData` is used, if any. :param views: If True, also reflect views. :param only: Optional. Load only a sub-set of available named tables. May be specified as a sequence of names or a callable. If a sequence of names is provided, only those tables will be reflected. An error is raised if a table is requested but not available. Named tables already present in this ``MetaData`` are ignored. If a callable is provided, it will be used as a boolean predicate to filter the list of potential table names. The callable is called with a table name and this ``MetaData`` instance as positional arguments and should return a true value for any table to reflect. """ if bind is None: bind = _bind_or_error(self) with bind.connect() as conn: reflect_opts = { 'autoload': True, 'autoload_with': conn } if schema is None: schema = self.schema if schema is not None: reflect_opts['schema'] = schema available = util.OrderedSet(bind.engine.table_names(schema, connection=conn)) if views: available.update( bind.dialect.get_view_names(conn, schema) ) if schema is not None: available_w_schema = util.OrderedSet(["%s.%s" % (schema, name) for name in available]) else: available_w_schema = available current = set(self.tables) if only is None: load = [name for name, schname in zip(available, available_w_schema) if schname not in current] elif util.callable(only): load = [name for name, schname in zip(available, available_w_schema) if schname not in current and only(name, self)] else: missing = [name for name in only if name not in available] if missing: s = schema and (" schema '%s'" % schema) or '' raise exc.InvalidRequestError( 'Could not reflect: requested table(s) not available ' 'in %s%s: (%s)' % (bind.engine.url, s, ', '.join(missing))) load = [name for name in only if name not in current] for name in load: Table(name, self, **reflect_opts) def append_ddl_listener(self, event_name, listener): """Append a DDL event listener to this ``MetaData``. Deprecated. See :class:`.DDLEvents`. """ def adapt_listener(target, connection, **kw): tables = kw['tables'] listener(event, target, connection, tables=tables) event.listen(self, "" + event_name.replace('-', '_'), adapt_listener) def create_all(self, bind=None, tables=None, checkfirst=True): """Create all tables stored in this metadata. Conditional by default, will not attempt to recreate tables already present in the target database. :param bind: A :class:`.Connectable` used to access the database; if None, uses the existing bind on this ``MetaData``, if any. :param tables: Optional list of ``Table`` objects, which is a subset of the total tables in the ``MetaData`` (others are ignored). :param checkfirst: Defaults to True, don't issue CREATEs for tables already present in the target database. """ if bind is None: bind = _bind_or_error(self) bind._run_visitor(ddl.SchemaGenerator, self, checkfirst=checkfirst, tables=tables) def drop_all(self, bind=None, tables=None, checkfirst=True): """Drop all tables stored in this metadata. Conditional by default, will not attempt to drop tables not present in the target database. :param bind: A :class:`.Connectable` used to access the database; if None, uses the existing bind on this ``MetaData``, if any. :param tables: Optional list of ``Table`` objects, which is a subset of the total tables in the ``MetaData`` (others are ignored). :param checkfirst: Defaults to True, only issue DROPs for tables confirmed to be present in the target database. """ if bind is None: bind = _bind_or_error(self) bind._run_visitor(ddl.SchemaDropper, self, checkfirst=checkfirst, tables=tables) class ThreadLocalMetaData(MetaData): """A MetaData variant that presents a different ``bind`` in every thread. Makes the ``bind`` property of the MetaData a thread-local value, allowing this collection of tables to be bound to different ``Engine`` implementations or connections in each thread. The ThreadLocalMetaData starts off bound to None in each thread. Binds must be made explicitly by assigning to the ``bind`` property or using ``connect()``. You can also re-bind dynamically multiple times per thread, just like a regular ``MetaData``. """ __visit_name__ = 'metadata' def __init__(self): """Construct a ThreadLocalMetaData.""" self.context = util.threading.local() self.__engines = {} super(ThreadLocalMetaData, self).__init__() def bind(self): """The bound Engine or Connection for this thread. This property may be assigned an Engine or Connection, or assigned a string or URL to automatically create a basic Engine for this bind with ``create_engine()``.""" return getattr(self.context, '_engine', None) def _bind_to(self, bind): """Bind to a Connectable in the caller's thread.""" if isinstance(bind, (basestring, url.URL)): try: self.context._engine = self.__engines[bind] except KeyError: from sqlalchemy import create_engine e = create_engine(bind) self.__engines[bind] = e self.context._engine = e else: # TODO: this is squirrely. we shouldnt have to hold onto engines # in a case like this if bind not in self.__engines: self.__engines[bind] = bind self.context._engine = bind bind = property(bind, _bind_to) def is_bound(self): """True if there is a bind for this thread.""" return (hasattr(self.context, '_engine') and self.context._engine is not None) def dispose(self): """Dispose all bound engines, in all thread contexts.""" for e in self.__engines.itervalues(): if hasattr(e, 'dispose'): e.dispose() class SchemaVisitor(visitors.ClauseVisitor): """Define the visiting for ``SchemaItem`` objects.""" __traverse_options__ = {'schema_visitor': True} class _DDLCompiles(expression.ClauseElement): def _compiler(self, dialect, **kw): """Return a compiler appropriate for this ClauseElement, given a Dialect.""" return dialect.ddl_compiler(dialect, self, **kw) class DDLElement(expression.Executable, _DDLCompiles): """Base class for DDL expression constructs. This class is the base for the general purpose :class:`.DDL` class, as well as the various create/drop clause constructs such as :class:`.CreateTable`, :class:`.DropTable`, :class:`.AddConstraint`, etc. :class:`.DDLElement` integrates closely with SQLAlchemy events, introduced in :ref:`event_toplevel`. An instance of one is itself an event receiving callable:: event.listen( users, 'after_create', AddConstraint(constraint).execute_if(dialect='postgresql') ) .. seealso:: :class:`.DDL` :class:`.DDLEvents` :ref:`event_toplevel` :ref:`schema_ddl_sequences` """ _execution_options = expression.Executable.\ _execution_options.union({'autocommit': True}) target = None on = None dialect = None callable_ = None def execute(self, bind=None, target=None): """Execute this DDL immediately. Executes the DDL statement in isolation using the supplied :class:`.Connectable` or :class:`.Connectable` assigned to the ``.bind`` property, if not supplied. If the DDL has a conditional ``on`` criteria, it will be invoked with None as the event. :param bind: Optional, an ``Engine`` or ``Connection``. If not supplied, a valid :class:`.Connectable` must be present in the ``.bind`` property. :param target: Optional, defaults to None. The target SchemaItem for the execute call. Will be passed to the ``on`` callable if any, and may also provide string expansion data for the statement. See ``execute_at`` for more information. """ if bind is None: bind = _bind_or_error(self) if self._should_execute(target, bind): return bind.execute(self.against(target)) else: bind.engine.logger.info( "DDL execution skipped, criteria not met.") @util.deprecated("0.7", "See :class:`.DDLEvents`, as well as " ":meth:`.DDLElement.execute_if`.") def execute_at(self, event_name, target): """Link execution of this DDL to the DDL lifecycle of a SchemaItem. Links this ``DDLElement`` to a ``Table`` or ``MetaData`` instance, executing it when that schema item is created or dropped. The DDL statement will be executed using the same Connection and transactional context as the Table create/drop itself. The ``.bind`` property of this statement is ignored. :param event: One of the events defined in the schema item's ``.ddl_events``; e.g. 'before-create', 'after-create', 'before-drop' or 'after-drop' :param target: The Table or MetaData instance for which this DDLElement will be associated with. A DDLElement instance can be linked to any number of schema items. ``execute_at`` builds on the ``append_ddl_listener`` interface of :class:`.MetaData` and :class:`.Table` objects. Caveat: Creating or dropping a Table in isolation will also trigger any DDL set to ``execute_at`` that Table's MetaData. This may change in a future release. """ def call_event(target, connection, **kw): if self._should_execute_deprecated(event_name, target, connection, **kw): return connection.execute(self.against(target)) event.listen(target, "" + event_name.replace('-', '_'), call_event) @expression._generative def against(self, target): """Return a copy of this DDL against a specific schema item.""" self.target = target @expression._generative def execute_if(self, dialect=None, callable_=None, state=None): """Return a callable that will execute this DDLElement conditionally. Used to provide a wrapper for event listening:: event.listen( metadata, 'before_create', DDL("my_ddl").execute_if(dialect='postgresql') ) :param dialect: May be a string, tuple or a callable predicate. If a string, it will be compared to the name of the executing database dialect:: DDL('something').execute_if(dialect='postgresql') If a tuple, specifies multiple dialect names:: DDL('something').execute_if(dialect=('postgresql', 'mysql')) :param callable_: A callable, which will be invoked with four positional arguments as well as optional keyword arguments: :ddl: This DDL element. :target: The :class:`.Table` or :class:`.MetaData` object which is the target of this event. May be None if the DDL is executed explicitly. :bind: The :class:`.Connection` being used for DDL execution :tables: Optional keyword argument - a list of Table objects which are to be created/ dropped within a MetaData.create_all() or drop_all() method call. :state: Optional keyword argument - will be the ``state`` argument passed to this function. :checkfirst: Keyword argument, will be True if the 'checkfirst' flag was set during the call to ``create()``, ``create_all()``, ``drop()``, ``drop_all()``. If the callable returns a true value, the DDL statement will be executed. :param state: any value which will be passed to the callable_ as the ``state`` keyword argument. .. seealso:: :class:`.DDLEvents` :ref:`event_toplevel` """ self.dialect = dialect self.callable_ = callable_ self.state = state def _should_execute(self, target, bind, **kw): if self.on is not None and \ not self._should_execute_deprecated(None, target, bind, **kw): return False if isinstance(self.dialect, basestring): if self.dialect != bind.engine.name: return False elif isinstance(self.dialect, (tuple, list, set)): if bind.engine.name not in self.dialect: return False if self.callable_ is not None and \ not self.callable_(self, target, bind, state=self.state, **kw): return False return True def _should_execute_deprecated(self, event, target, bind, **kw): if self.on is None: return True elif isinstance(self.on, basestring): return self.on == bind.engine.name elif isinstance(self.on, (tuple, list, set)): return bind.engine.name in self.on else: return self.on(self, event, target, bind, **kw) def __call__(self, target, bind, **kw): """Execute the DDL as a ddl_listener.""" if self._should_execute(target, bind, **kw): return bind.execute(self.against(target)) def _check_ddl_on(self, on): if (on is not None and (not isinstance(on, (basestring, tuple, list, set)) and not util.callable(on))): raise exc.ArgumentError( "Expected the name of a database dialect, a tuple " "of names, or a callable for " "'on' criteria, got type '%s'." % type(on).__name__) def bind(self): if self._bind: return self._bind def _set_bind(self, bind): self._bind = bind bind = property(bind, _set_bind) def _generate(self): s = self.__class__.__new__(self.__class__) s.__dict__ = self.__dict__.copy() return s class DDL(DDLElement): """A literal DDL statement. Specifies literal SQL DDL to be executed by the database. DDL objects function as DDL event listeners, and can be subscribed to those events listed in :class:`.DDLEvents`, using either :class:`.Table` or :class:`.MetaData` objects as targets. Basic templating support allows a single DDL instance to handle repetitive tasks for multiple tables. Examples:: from sqlalchemy import event, DDL tbl = Table('users', metadata, Column('uid', Integer)) event.listen(tbl, 'before_create', DDL('DROP TRIGGER users_trigger')) spow = DDL('ALTER TABLE %(table)s SET secretpowers TRUE') event.listen(tbl, 'after_create', spow.execute_if(dialect='somedb')) drop_spow = DDL('ALTER TABLE users SET secretpowers FALSE') connection.execute(drop_spow) When operating on Table events, the following ``statement`` string substitions are available:: %(table)s - the Table name, with any required quoting applied %(schema)s - the schema name, with any required quoting applied %(fullname)s - the Table name including schema, quoted if needed The DDL's "context", if any, will be combined with the standard substutions noted above. Keys present in the context will override the standard substitutions. """ __visit_name__ = "ddl" def __init__(self, statement, on=None, context=None, bind=None): """Create a DDL statement. :param statement: A string or unicode string to be executed. Statements will be processed with Python's string formatting operator. See the ``context`` argument and the ``execute_at`` method. A literal '%' in a statement must be escaped as '%%'. SQL bind parameters are not available in DDL statements. :param on: Deprecated. See :meth:`.DDLElement.execute_if`. Optional filtering criteria. May be a string, tuple or a callable predicate. If a string, it will be compared to the name of the executing database dialect:: DDL('something', on='postgresql') If a tuple, specifies multiple dialect names:: DDL('something', on=('postgresql', 'mysql')) If a callable, it will be invoked with four positional arguments as well as optional keyword arguments: :ddl: This DDL element. :event: The name of the event that has triggered this DDL, such as 'after-create' Will be None if the DDL is executed explicitly. :target: The ``Table`` or ``MetaData`` object which is the target of this event. May be None if the DDL is executed explicitly. :connection: The ``Connection`` being used for DDL execution :tables: Optional keyword argument - a list of Table objects which are to be created/ dropped within a MetaData.create_all() or drop_all() method call. If the callable returns a true value, the DDL statement will be executed. :param context: Optional dictionary, defaults to None. These values will be available for use in string substitutions on the DDL statement. :param bind: Optional. A :class:`.Connectable`, used by default when ``execute()`` is invoked without a bind argument. .. seealso:: :class:`.DDLEvents` :mod:`sqlalchemy.event` """ if not isinstance(statement, basestring): raise exc.ArgumentError( "Expected a string or unicode SQL statement, got '%r'" % statement) self.statement = statement self.context = context or {} self._check_ddl_on(on) self.on = on self._bind = bind def __repr__(self): return '<%s@%s; %s>' % ( type(self).__name__, id(self), ', '.join([repr(self.statement)] + ['%s=%r' % (key, getattr(self, key)) for key in ('on', 'context') if getattr(self, key)])) def _to_schema_column(element): if hasattr(element, '__clause_element__'): element = element.__clause_element__() if not isinstance(element, Column): raise exc.ArgumentError("schema.Column object expected") return element def _to_schema_column_or_string(element): if hasattr(element, '__clause_element__'): element = element.__clause_element__() if not isinstance(element, (basestring, expression.ColumnElement)): msg = "Element %r is not a string name or column element" raise exc.ArgumentError(msg % element) return element class _CreateDropBase(DDLElement): """Base class for DDL constucts that represent CREATE and DROP or equivalents. The common theme of _CreateDropBase is a single ``element`` attribute which refers to the element to be created or dropped. """ def __init__(self, element, on=None, bind=None): self.element = element self._check_ddl_on(on) self.on = on self.bind = bind def _create_rule_disable(self, compiler): """Allow disable of _create_rule using a callable. Pass to _create_rule using util.portable_instancemethod(self._create_rule_disable) to retain serializability. """ return False class CreateSchema(_CreateDropBase): """Represent a CREATE SCHEMA statement. .. versionadded:: 0.7.4 The argument here is the string name of the schema. """ __visit_name__ = "create_schema" def __init__(self, name, quote=None, **kw): """Create a new :class:`.CreateSchema` construct.""" self.quote = quote super(CreateSchema, self).__init__(name, **kw) class DropSchema(_CreateDropBase): """Represent a DROP SCHEMA statement. The argument here is the string name of the schema. .. versionadded:: 0.7.4 """ __visit_name__ = "drop_schema" def __init__(self, name, quote=None, cascade=False, **kw): """Create a new :class:`.DropSchema` construct.""" self.quote = quote self.cascade = cascade super(DropSchema, self).__init__(name, **kw) class CreateTable(_CreateDropBase): """Represent a CREATE TABLE statement.""" __visit_name__ = "create_table" def __init__(self, element, on=None, bind=None): """Create a :class:`.CreateTable` construct. :param element: a :class:`.Table` that's the subject of the CREATE :param on: See the description for 'on' in :class:`.DDL`. :param bind: See the description for 'bind' in :class:`.DDL`. """ super(CreateTable, self).__init__(element, on=on, bind=bind) self.columns = [CreateColumn(column) for column in element.columns ] class _DropView(_CreateDropBase): """Semi-public 'DROP VIEW' construct. Used by the test suite for dialect-agnostic drops of views. This object will eventually be part of a public "view" API. """ __visit_name__ = "drop_view" class CreateColumn(_DDLCompiles): """Represent a :class:`.Column` as rendered in a CREATE TABLE statement, via the :class:`.CreateTable` construct. This is provided to support custom column DDL within the generation of CREATE TABLE statements, by using the compiler extension documented in :ref:`sqlalchemy.ext.compiler_toplevel` to extend :class:`.CreateColumn`. Typical integration is to examine the incoming :class:`.Column` object, and to redirect compilation if a particular flag or condition is found:: from sqlalchemy import schema from sqlalchemy.ext.compiler import compiles @compiles(schema.CreateColumn) def compile(element, compiler, **kw): column = element.element if "special" not in column.info: return compiler.visit_create_column(element, **kw) text = "%s SPECIAL DIRECTIVE %s" % ( column.name, compiler.type_compiler.process(column.type) ) default = compiler.get_column_default_string(column) if default is not None: text += " DEFAULT " + default if not column.nullable: text += " NOT NULL" if column.constraints: text += " ".join( compiler.process(const) for const in column.constraints) return text The above construct can be applied to a :class:`.Table` as follows:: from sqlalchemy import Table, Metadata, Column, Integer, String from sqlalchemy import schema metadata = MetaData() table = Table('mytable', MetaData(), Column('x', Integer, info={"special":True}, primary_key=True), Column('y', String(50)), Column('z', String(20), info={"special":True}) ) metadata.create_all(conn) Above, the directives we've added to the :attr:`.Column.info` collection will be detected by our custom compilation scheme:: CREATE TABLE mytable ( x SPECIAL DIRECTIVE INTEGER NOT NULL, y VARCHAR(50), z SPECIAL DIRECTIVE VARCHAR(20), PRIMARY KEY (x) ) The :class:`.CreateColumn` construct can also be used to skip certain columns when producing a ``CREATE TABLE``. This is accomplished by creating a compilation rule that conditionally returns ``None``. This is essentially how to produce the same effect as using the ``system=True`` argument on :class:`.Column`, which marks a column as an implicitly-present "system" column. For example, suppose we wish to produce a :class:`.Table` which skips rendering of the Postgresql ``xmin`` column against the Postgresql backend, but on other backends does render it, in anticipation of a triggered rule. A conditional compilation rule could skip this name only on Postgresql:: from sqlalchemy.schema import CreateColumn @compiles(CreateColumn, "postgresql") def skip_xmin(element, compiler, **kw): if element.element.name == 'xmin': return None else: return compiler.visit_create_column(element, **kw) my_table = Table('mytable', metadata, Column('id', Integer, primary_key=True), Column('xmin', Integer) ) Above, a :class:`.CreateTable` construct will generate a ``CREATE TABLE`` which only includes the ``id`` column in the string; the ``xmin`` column will be omitted, but only against the Postgresql backend. .. versionadded:: 0.8.3 The :class:`.CreateColumn` construct supports skipping of columns by returning ``None`` from a custom compilation rule. .. versionadded:: 0.8 The :class:`.CreateColumn` construct was added to support custom column creation styles. """ __visit_name__ = 'create_column' def __init__(self, element): self.element = element class DropTable(_CreateDropBase): """Represent a DROP TABLE statement.""" __visit_name__ = "drop_table" class CreateSequence(_CreateDropBase): """Represent a CREATE SEQUENCE statement.""" __visit_name__ = "create_sequence" class DropSequence(_CreateDropBase): """Represent a DROP SEQUENCE statement.""" __visit_name__ = "drop_sequence" class CreateIndex(_CreateDropBase): """Represent a CREATE INDEX statement.""" __visit_name__ = "create_index" class DropIndex(_CreateDropBase): """Represent a DROP INDEX statement.""" __visit_name__ = "drop_index" class AddConstraint(_CreateDropBase): """Represent an ALTER TABLE ADD CONSTRAINT statement.""" __visit_name__ = "add_constraint" def __init__(self, element, *args, **kw): super(AddConstraint, self).__init__(element, *args, **kw) element._create_rule = util.portable_instancemethod( self._create_rule_disable) class DropConstraint(_CreateDropBase): """Represent an ALTER TABLE DROP CONSTRAINT statement.""" __visit_name__ = "drop_constraint" def __init__(self, element, cascade=False, **kw): self.cascade = cascade super(DropConstraint, self).__init__(element, **kw) element._create_rule = util.portable_instancemethod( self._create_rule_disable) def _bind_or_error(schemaitem, msg=None): bind = schemaitem.bind if not bind: name = schemaitem.__class__.__name__ label = getattr(schemaitem, 'fullname', getattr(schemaitem, 'name', None)) if label: item = '%s %r' % (name, label) else: item = name if isinstance(schemaitem, (MetaData, DDL)): bindable = "the %s's .bind" % name else: bindable = "this %s's .metadata.bind" % name if msg is None: msg = "The %s is not bound to an Engine or Connection. "\ "Execution can not proceed without a database to execute "\ "against. Either execute with an explicit connection or "\ "assign %s to enable implicit execution." % \ (item, bindable) raise exc.UnboundExecutionError(msg) return bind SQLAlchemy-0.8.4/lib/sqlalchemy/sql/0000755000076500000240000000000012251151573017742 5ustar classicstaff00000000000000SQLAlchemy-0.8.4/lib/sqlalchemy/sql/__init__.py0000644000076500000240000000212512251150015022041 0ustar classicstaff00000000000000# sql/__init__.py # Copyright (C) 2005-2013 the SQLAlchemy authors and contributors # # This module is part of SQLAlchemy and is released under # the MIT License: http://www.opensource.org/licenses/mit-license.php from .expression import ( Alias, ClauseElement, ColumnCollection, ColumnElement, CompoundSelect, Delete, FromClause, Insert, Join, Select, Selectable, TableClause, Update, alias, and_, asc, between, bindparam, case, cast, collate, column, delete, desc, distinct, except_, except_all, exists, extract, false, func, insert, intersect, intersect_all, join, label, literal, literal_column, modifier, not_, null, or_, outerjoin, outparam, over, select, subquery, table, text, true, tuple_, type_coerce, union, union_all, update, ) from .visitors import ClauseVisitor __tmp = locals().keys() __all__ = sorted([i for i in __tmp if not i.startswith('__')]) SQLAlchemy-0.8.4/lib/sqlalchemy/sql/compiler.py0000644000076500000240000027267612251150015022140 0ustar classicstaff00000000000000# sql/compiler.py # Copyright (C) 2005-2013 the SQLAlchemy authors and contributors # # This module is part of SQLAlchemy and is released under # the MIT License: http://www.opensource.org/licenses/mit-license.php """Base SQL and DDL compiler implementations. Classes provided include: :class:`.compiler.SQLCompiler` - renders SQL strings :class:`.compiler.DDLCompiler` - renders DDL (data definition language) strings :class:`.compiler.GenericTypeCompiler` - renders type specification strings. To generate user-defined SQL strings, see :doc:`/ext/compiler`. """ import re import sys from .. import schema, engine, util, exc, types from . import ( operators, functions, util as sql_util, visitors, expression as sql ) import decimal import itertools RESERVED_WORDS = set([ 'all', 'analyse', 'analyze', 'and', 'any', 'array', 'as', 'asc', 'asymmetric', 'authorization', 'between', 'binary', 'both', 'case', 'cast', 'check', 'collate', 'column', 'constraint', 'create', 'cross', 'current_date', 'current_role', 'current_time', 'current_timestamp', 'current_user', 'default', 'deferrable', 'desc', 'distinct', 'do', 'else', 'end', 'except', 'false', 'for', 'foreign', 'freeze', 'from', 'full', 'grant', 'group', 'having', 'ilike', 'in', 'initially', 'inner', 'intersect', 'into', 'is', 'isnull', 'join', 'leading', 'left', 'like', 'limit', 'localtime', 'localtimestamp', 'natural', 'new', 'not', 'notnull', 'null', 'off', 'offset', 'old', 'on', 'only', 'or', 'order', 'outer', 'overlaps', 'placing', 'primary', 'references', 'right', 'select', 'session_user', 'set', 'similar', 'some', 'symmetric', 'table', 'then', 'to', 'trailing', 'true', 'union', 'unique', 'user', 'using', 'verbose', 'when', 'where']) LEGAL_CHARACTERS = re.compile(r'^[A-Z0-9_$]+$', re.I) ILLEGAL_INITIAL_CHARACTERS = set([str(x) for x in xrange(0, 10)]).union(['$']) BIND_PARAMS = re.compile(r'(? ', operators.ge: ' >= ', operators.eq: ' = ', operators.concat_op: ' || ', operators.between_op: ' BETWEEN ', operators.match_op: ' MATCH ', operators.in_op: ' IN ', operators.notin_op: ' NOT IN ', operators.comma_op: ', ', operators.from_: ' FROM ', operators.as_: ' AS ', operators.is_: ' IS ', operators.isnot: ' IS NOT ', operators.collate: ' COLLATE ', # unary operators.exists: 'EXISTS ', operators.distinct_op: 'DISTINCT ', operators.inv: 'NOT ', # modifiers operators.desc_op: ' DESC', operators.asc_op: ' ASC', operators.nullsfirst_op: ' NULLS FIRST', operators.nullslast_op: ' NULLS LAST', } FUNCTIONS = { functions.coalesce: 'coalesce%(expr)s', functions.current_date: 'CURRENT_DATE', functions.current_time: 'CURRENT_TIME', functions.current_timestamp: 'CURRENT_TIMESTAMP', functions.current_user: 'CURRENT_USER', functions.localtime: 'LOCALTIME', functions.localtimestamp: 'LOCALTIMESTAMP', functions.random: 'random%(expr)s', functions.sysdate: 'sysdate', functions.session_user: 'SESSION_USER', functions.user: 'USER' } EXTRACT_MAP = { 'month': 'month', 'day': 'day', 'year': 'year', 'second': 'second', 'hour': 'hour', 'doy': 'doy', 'minute': 'minute', 'quarter': 'quarter', 'dow': 'dow', 'week': 'week', 'epoch': 'epoch', 'milliseconds': 'milliseconds', 'microseconds': 'microseconds', 'timezone_hour': 'timezone_hour', 'timezone_minute': 'timezone_minute' } COMPOUND_KEYWORDS = { sql.CompoundSelect.UNION: 'UNION', sql.CompoundSelect.UNION_ALL: 'UNION ALL', sql.CompoundSelect.EXCEPT: 'EXCEPT', sql.CompoundSelect.EXCEPT_ALL: 'EXCEPT ALL', sql.CompoundSelect.INTERSECT: 'INTERSECT', sql.CompoundSelect.INTERSECT_ALL: 'INTERSECT ALL' } class _CompileLabel(visitors.Visitable): """lightweight label object which acts as an expression.Label.""" __visit_name__ = 'label' __slots__ = 'element', 'name' def __init__(self, col, name, alt_names=()): self.element = col self.name = name self._alt_names = (col,) + alt_names @property def proxy_set(self): return self.element.proxy_set @property def type(self): return self.element.type @property def quote(self): return self.element.quote class SQLCompiler(engine.Compiled): """Default implementation of Compiled. Compiles ClauseElements into SQL strings. Uses a similar visit paradigm as visitors.ClauseVisitor but implements its own traversal. """ extract_map = EXTRACT_MAP compound_keywords = COMPOUND_KEYWORDS isdelete = isinsert = isupdate = False """class-level defaults which can be set at the instance level to define if this Compiled instance represents INSERT/UPDATE/DELETE """ returning = None """holds the "returning" collection of columns if the statement is CRUD and defines returning columns either implicitly or explicitly """ returning_precedes_values = False """set to True classwide to generate RETURNING clauses before the VALUES or WHERE clause (i.e. MSSQL) """ render_table_with_column_in_update_from = False """set to True classwide to indicate the SET clause in a multi-table UPDATE statement should qualify columns with the table name (i.e. MySQL only) """ ansi_bind_rules = False """SQL 92 doesn't allow bind parameters to be used in the columns clause of a SELECT, nor does it allow ambiguous expressions like "? = ?". A compiler subclass can set this flag to False if the target driver/DB enforces this """ def __init__(self, dialect, statement, column_keys=None, inline=False, **kwargs): """Construct a new ``DefaultCompiler`` object. dialect Dialect to be used statement ClauseElement to be compiled column_keys a list of column names to be compiled into an INSERT or UPDATE statement. """ self.column_keys = column_keys # compile INSERT/UPDATE defaults/sequences inlined (no pre- # execute) self.inline = inline or getattr(statement, 'inline', False) # a dictionary of bind parameter keys to BindParameter # instances. self.binds = {} # a dictionary of BindParameter instances to "compiled" names # that are actually present in the generated SQL self.bind_names = util.column_dict() # stack which keeps track of nested SELECT statements self.stack = [] # relates label names in the final SQL to a tuple of local # column/label name, ColumnElement object (if any) and # TypeEngine. ResultProxy uses this for type processing and # column targeting self.result_map = {} # true if the paramstyle is positional self.positional = dialect.positional if self.positional: self.positiontup = [] self.bindtemplate = BIND_TEMPLATES[dialect.paramstyle] self.ctes = None # an IdentifierPreparer that formats the quoting of identifiers self.preparer = dialect.identifier_preparer self.label_length = dialect.label_length \ or dialect.max_identifier_length # a map which tracks "anonymous" identifiers that are created on # the fly here self.anon_map = util.PopulateDict(self._process_anon) # a map which tracks "truncated" names based on # dialect.label_length or dialect.max_identifier_length self.truncated_names = {} engine.Compiled.__init__(self, dialect, statement, **kwargs) if self.positional and dialect.paramstyle == 'numeric': self._apply_numbered_params() @util.memoized_instancemethod def _init_cte_state(self): """Initialize collections related to CTEs only if a CTE is located, to save on the overhead of these collections otherwise. """ # collect CTEs to tack on top of a SELECT self.ctes = util.OrderedDict() self.ctes_by_name = {} self.ctes_recursive = False if self.positional: self.cte_positional = [] def _apply_numbered_params(self): poscount = itertools.count(1) self.string = re.sub( r'\[_POSITION\]', lambda m: str(util.next(poscount)), self.string) @util.memoized_property def _bind_processors(self): return dict( (key, value) for key, value in ((self.bind_names[bindparam], bindparam.type._cached_bind_processor(self.dialect)) for bindparam in self.bind_names) if value is not None ) def is_subquery(self): return len(self.stack) > 1 @property def sql_compiler(self): return self def construct_params(self, params=None, _group_number=None, _check=True): """return a dictionary of bind parameter keys and values""" if params: pd = {} for bindparam, name in self.bind_names.iteritems(): if bindparam.key in params: pd[name] = params[bindparam.key] elif name in params: pd[name] = params[name] elif _check and bindparam.required: if _group_number: raise exc.InvalidRequestError( "A value is required for bind parameter %r, " "in parameter group %d" % (bindparam.key, _group_number)) else: raise exc.InvalidRequestError( "A value is required for bind parameter %r" % bindparam.key) else: pd[name] = bindparam.effective_value return pd else: pd = {} for bindparam in self.bind_names: if _check and bindparam.required: if _group_number: raise exc.InvalidRequestError( "A value is required for bind parameter %r, " "in parameter group %d" % (bindparam.key, _group_number)) else: raise exc.InvalidRequestError( "A value is required for bind parameter %r" % bindparam.key) pd[self.bind_names[bindparam]] = bindparam.effective_value return pd @property def params(self): """Return the bind param dictionary embedded into this compiled object, for those values that are present.""" return self.construct_params(_check=False) def default_from(self): """Called when a SELECT statement has no froms, and no FROM clause is to be appended. Gives Oracle a chance to tack on a ``FROM DUAL`` to the string output. """ return "" def visit_grouping(self, grouping, asfrom=False, **kwargs): return "(" + grouping.element._compiler_dispatch(self, **kwargs) + ")" def visit_label(self, label, add_to_result_map=None, within_label_clause=False, within_columns_clause=False, **kw): # only render labels within the columns clause # or ORDER BY clause of a select. dialect-specific compilers # can modify this behavior. if within_columns_clause and not within_label_clause: if isinstance(label.name, sql._truncated_label): labelname = self._truncated_identifier("colident", label.name) else: labelname = label.name if add_to_result_map is not None: add_to_result_map( labelname, label.name, (label, labelname, ) + label._alt_names, label.type ) return label.element._compiler_dispatch(self, within_columns_clause=True, within_label_clause=True, **kw) + \ OPERATORS[operators.as_] + \ self.preparer.format_label(label, labelname) else: return label.element._compiler_dispatch(self, within_columns_clause=False, **kw) def visit_column(self, column, add_to_result_map=None, include_table=True, **kwargs): name = orig_name = column.name if name is None: raise exc.CompileError("Cannot compile Column object until " "its 'name' is assigned.") is_literal = column.is_literal if not is_literal and isinstance(name, sql._truncated_label): name = self._truncated_identifier("colident", name) if add_to_result_map is not None: add_to_result_map( name, orig_name, (column, name, column.key), column.type ) if is_literal: name = self.escape_literal_column(name) else: name = self.preparer.quote(name, column.quote) table = column.table if table is None or not include_table or not table.named_with_column: return name else: if table.schema: schema_prefix = self.preparer.quote_schema( table.schema, table.quote_schema) + '.' else: schema_prefix = '' tablename = table.name if isinstance(tablename, sql._truncated_label): tablename = self._truncated_identifier("alias", tablename) return schema_prefix + \ self.preparer.quote(tablename, table.quote) + \ "." + name def escape_literal_column(self, text): """provide escaping for the literal_column() construct.""" # TODO: some dialects might need different behavior here return text.replace('%', '%%') def visit_fromclause(self, fromclause, **kwargs): return fromclause.name def visit_index(self, index, **kwargs): return index.name def visit_typeclause(self, typeclause, **kwargs): return self.dialect.type_compiler.process(typeclause.type) def post_process_text(self, text): return text def visit_textclause(self, textclause, **kwargs): if textclause.typemap is not None: for colname, type_ in textclause.typemap.iteritems(): self.result_map[colname if self.dialect.case_sensitive else colname.lower()] = \ (colname, None, type_) def do_bindparam(m): name = m.group(1) if name in textclause.bindparams: return self.process(textclause.bindparams[name]) else: return self.bindparam_string(name, **kwargs) # un-escape any \:params return BIND_PARAMS_ESC.sub(lambda m: m.group(1), BIND_PARAMS.sub(do_bindparam, self.post_process_text(textclause.text)) ) def visit_null(self, expr, **kw): return 'NULL' def visit_true(self, expr, **kw): return 'true' def visit_false(self, expr, **kw): return 'false' def visit_clauselist(self, clauselist, **kwargs): sep = clauselist.operator if sep is None: sep = " " else: sep = OPERATORS[clauselist.operator] return sep.join( s for s in (c._compiler_dispatch(self, **kwargs) for c in clauselist.clauses) if s) def visit_case(self, clause, **kwargs): x = "CASE " if clause.value is not None: x += clause.value._compiler_dispatch(self, **kwargs) + " " for cond, result in clause.whens: x += "WHEN " + cond._compiler_dispatch( self, **kwargs ) + " THEN " + result._compiler_dispatch( self, **kwargs) + " " if clause.else_ is not None: x += "ELSE " + clause.else_._compiler_dispatch( self, **kwargs ) + " " x += "END" return x def visit_cast(self, cast, **kwargs): return "CAST(%s AS %s)" % \ (cast.clause._compiler_dispatch(self, **kwargs), cast.typeclause._compiler_dispatch(self, **kwargs)) def visit_over(self, over, **kwargs): return "%s OVER (%s)" % ( over.func._compiler_dispatch(self, **kwargs), ' '.join( '%s BY %s' % (word, clause._compiler_dispatch(self, **kwargs)) for word, clause in ( ('PARTITION', over.partition_by), ('ORDER', over.order_by) ) if clause is not None and len(clause) ) ) def visit_extract(self, extract, **kwargs): field = self.extract_map.get(extract.field, extract.field) return "EXTRACT(%s FROM %s)" % (field, extract.expr._compiler_dispatch(self, **kwargs)) def visit_function(self, func, add_to_result_map=None, **kwargs): if add_to_result_map is not None: add_to_result_map( func.name, func.name, (), func.type ) disp = getattr(self, "visit_%s_func" % func.name.lower(), None) if disp: return disp(func, **kwargs) else: name = FUNCTIONS.get(func.__class__, func.name + "%(expr)s") return ".".join(list(func.packagenames) + [name]) % \ {'expr': self.function_argspec(func, **kwargs)} def visit_next_value_func(self, next_value, **kw): return self.visit_sequence(next_value.sequence) def visit_sequence(self, sequence): raise NotImplementedError( "Dialect '%s' does not support sequence increments." % self.dialect.name ) def function_argspec(self, func, **kwargs): return func.clause_expr._compiler_dispatch(self, **kwargs) def visit_compound_select(self, cs, asfrom=False, parens=True, compound_index=0, **kwargs): toplevel = not self.stack entry = self._default_stack_entry if toplevel else self.stack[-1] self.stack.append( { 'correlate_froms': entry['correlate_froms'], 'iswrapper': toplevel, 'asfrom_froms': entry['asfrom_froms'] }) keyword = self.compound_keywords.get(cs.keyword) text = (" " + keyword + " ").join( (c._compiler_dispatch(self, asfrom=asfrom, parens=False, compound_index=i, **kwargs) for i, c in enumerate(cs.selects)) ) group_by = cs._group_by_clause._compiler_dispatch( self, asfrom=asfrom, **kwargs) if group_by: text += " GROUP BY " + group_by text += self.order_by_clause(cs, **kwargs) text += (cs._limit is not None or cs._offset is not None) and \ self.limit_clause(cs) or "" if self.ctes and \ compound_index == 0 and toplevel: text = self._render_cte_clause() + text self.stack.pop(-1) if asfrom and parens: return "(" + text + ")" else: return text def visit_unary(self, unary, **kw): if unary.operator: if unary.modifier: raise exc.CompileError( "Unary expression does not support operator " "and modifier simultaneously") disp = getattr(self, "visit_%s_unary_operator" % unary.operator.__name__, None) if disp: return disp(unary, unary.operator, **kw) else: return self._generate_generic_unary_operator(unary, OPERATORS[unary.operator], **kw) elif unary.modifier: disp = getattr(self, "visit_%s_unary_modifier" % unary.modifier.__name__, None) if disp: return disp(unary, unary.modifier, **kw) else: return self._generate_generic_unary_modifier(unary, OPERATORS[unary.modifier], **kw) else: raise exc.CompileError( "Unary expression has no operator or modifier") def visit_binary(self, binary, **kw): # don't allow "? = ?" to render if self.ansi_bind_rules and \ isinstance(binary.left, sql.BindParameter) and \ isinstance(binary.right, sql.BindParameter): kw['literal_binds'] = True operator = binary.operator disp = getattr(self, "visit_%s_binary" % operator.__name__, None) if disp: return disp(binary, operator, **kw) else: try: opstring = OPERATORS[operator] except KeyError: raise exc.UnsupportedCompilationError(self, operator) else: return self._generate_generic_binary(binary, opstring, **kw) def visit_custom_op_binary(self, element, operator, **kw): return self._generate_generic_binary(element, " " + operator.opstring + " ", **kw) def visit_custom_op_unary_operator(self, element, operator, **kw): return self._generate_generic_unary_operator(element, operator.opstring + " ", **kw) def visit_custom_op_unary_modifier(self, element, operator, **kw): return self._generate_generic_unary_modifier(element, " " + operator.opstring, **kw) def _generate_generic_binary(self, binary, opstring, **kw): return binary.left._compiler_dispatch(self, **kw) + \ opstring + \ binary.right._compiler_dispatch(self, **kw) def _generate_generic_unary_operator(self, unary, opstring, **kw): return opstring + unary.element._compiler_dispatch(self, **kw) def _generate_generic_unary_modifier(self, unary, opstring, **kw): return unary.element._compiler_dispatch(self, **kw) + opstring @util.memoized_property def _like_percent_literal(self): return sql.literal_column("'%'", type_=types.String()) def visit_contains_op_binary(self, binary, operator, **kw): binary = binary._clone() percent = self._like_percent_literal binary.right = percent.__add__(binary.right).__add__(percent) return self.visit_like_op_binary(binary, operator, **kw) def visit_notcontains_op_binary(self, binary, operator, **kw): binary = binary._clone() percent = self._like_percent_literal binary.right = percent.__add__(binary.right).__add__(percent) return self.visit_notlike_op_binary(binary, operator, **kw) def visit_startswith_op_binary(self, binary, operator, **kw): binary = binary._clone() percent = self._like_percent_literal binary.right = percent.__radd__( binary.right ) return self.visit_like_op_binary(binary, operator, **kw) def visit_notstartswith_op_binary(self, binary, operator, **kw): binary = binary._clone() percent = self._like_percent_literal binary.right = percent.__radd__( binary.right ) return self.visit_notlike_op_binary(binary, operator, **kw) def visit_endswith_op_binary(self, binary, operator, **kw): binary = binary._clone() percent = self._like_percent_literal binary.right = percent.__add__(binary.right) return self.visit_like_op_binary(binary, operator, **kw) def visit_notendswith_op_binary(self, binary, operator, **kw): binary = binary._clone() percent = self._like_percent_literal binary.right = percent.__add__(binary.right) return self.visit_notlike_op_binary(binary, operator, **kw) def visit_like_op_binary(self, binary, operator, **kw): escape = binary.modifiers.get("escape", None) return '%s LIKE %s' % ( binary.left._compiler_dispatch(self, **kw), binary.right._compiler_dispatch(self, **kw)) \ + (escape and (' ESCAPE ' + self.render_literal_value(escape, None)) or '') def visit_notlike_op_binary(self, binary, operator, **kw): escape = binary.modifiers.get("escape", None) return '%s NOT LIKE %s' % ( binary.left._compiler_dispatch(self, **kw), binary.right._compiler_dispatch(self, **kw)) \ + (escape and (' ESCAPE ' + self.render_literal_value(escape, None)) or '') def visit_ilike_op_binary(self, binary, operator, **kw): escape = binary.modifiers.get("escape", None) return 'lower(%s) LIKE lower(%s)' % ( binary.left._compiler_dispatch(self, **kw), binary.right._compiler_dispatch(self, **kw)) \ + (escape and (' ESCAPE ' + self.render_literal_value(escape, None)) or '') def visit_notilike_op_binary(self, binary, operator, **kw): escape = binary.modifiers.get("escape", None) return 'lower(%s) NOT LIKE lower(%s)' % ( binary.left._compiler_dispatch(self, **kw), binary.right._compiler_dispatch(self, **kw)) \ + (escape and (' ESCAPE ' + self.render_literal_value(escape, None)) or '') def visit_bindparam(self, bindparam, within_columns_clause=False, literal_binds=False, skip_bind_expression=False, **kwargs): if not skip_bind_expression and bindparam.type._has_bind_expression: bind_expression = bindparam.type.bind_expression(bindparam) return self.process(bind_expression, skip_bind_expression=True) if literal_binds or \ (within_columns_clause and \ self.ansi_bind_rules): if bindparam.value is None: raise exc.CompileError("Bind parameter without a " "renderable value not allowed here.") return self.render_literal_bindparam(bindparam, within_columns_clause=True, **kwargs) name = self._truncate_bindparam(bindparam) if name in self.binds: existing = self.binds[name] if existing is not bindparam: if (existing.unique or bindparam.unique) and \ not existing.proxy_set.intersection( bindparam.proxy_set): raise exc.CompileError( "Bind parameter '%s' conflicts with " "unique bind parameter of the same name" % bindparam.key ) elif existing._is_crud or bindparam._is_crud: raise exc.CompileError( "bindparam() name '%s' is reserved " "for automatic usage in the VALUES or SET " "clause of this " "insert/update statement. Please use a " "name other than column name when using bindparam() " "with insert() or update() (for example, 'b_%s')." % (bindparam.key, bindparam.key) ) self.binds[bindparam.key] = self.binds[name] = bindparam return self.bindparam_string(name, quote=bindparam.quote, **kwargs) def render_literal_bindparam(self, bindparam, **kw): value = bindparam.value processor = bindparam.type._cached_bind_processor(self.dialect) if processor: value = processor(value) return self.render_literal_value(value, bindparam.type) def render_literal_value(self, value, type_): """Render the value of a bind parameter as a quoted literal. This is used for statement sections that do not accept bind parameters on the target driver/database. This should be implemented by subclasses using the quoting services of the DBAPI. """ if isinstance(value, basestring): value = value.replace("'", "''") return "'%s'" % value elif value is None: return "NULL" elif isinstance(value, (float, int, long)): return repr(value) elif isinstance(value, decimal.Decimal): return str(value) elif isinstance(value, util.binary_type): # only would occur on py3k b.c. on 2k the string_types # directive above catches this. # see #2838 value = value.decode(self.dialect.encoding).replace("'", "''") return "'%s'" % value else: raise NotImplementedError( "Don't know how to literal-quote value %r" % value) def _truncate_bindparam(self, bindparam): if bindparam in self.bind_names: return self.bind_names[bindparam] bind_name = bindparam.key if isinstance(bind_name, sql._truncated_label): bind_name = self._truncated_identifier("bindparam", bind_name) # add to bind_names for translation self.bind_names[bindparam] = bind_name return bind_name def _truncated_identifier(self, ident_class, name): if (ident_class, name) in self.truncated_names: return self.truncated_names[(ident_class, name)] anonname = name.apply_map(self.anon_map) if len(anonname) > self.label_length: counter = self.truncated_names.get(ident_class, 1) truncname = anonname[0:max(self.label_length - 6, 0)] + \ "_" + hex(counter)[2:] self.truncated_names[ident_class] = counter + 1 else: truncname = anonname self.truncated_names[(ident_class, name)] = truncname return truncname def _anonymize(self, name): return name % self.anon_map def _process_anon(self, key): (ident, derived) = key.split(' ', 1) anonymous_counter = self.anon_map.get(derived, 1) self.anon_map[derived] = anonymous_counter + 1 return derived + "_" + str(anonymous_counter) def bindparam_string(self, name, quote=None, positional_names=None, **kw): if self.positional: if positional_names is not None: positional_names.append(name) else: self.positiontup.append(name) return self.bindtemplate % {'name': name} def visit_cte(self, cte, asfrom=False, ashint=False, fromhints=None, **kwargs): self._init_cte_state() if self.positional: kwargs['positional_names'] = self.cte_positional if isinstance(cte.name, sql._truncated_label): cte_name = self._truncated_identifier("alias", cte.name) else: cte_name = cte.name if cte_name in self.ctes_by_name: existing_cte = self.ctes_by_name[cte_name] # we've generated a same-named CTE that we are enclosed in, # or this is the same CTE. just return the name. if cte in existing_cte._restates or cte is existing_cte: return self.preparer.format_alias(cte, cte_name) elif existing_cte in cte._restates: # we've generated a same-named CTE that is # enclosed in us - we take precedence, so # discard the text for the "inner". del self.ctes[existing_cte] else: raise exc.CompileError( "Multiple, unrelated CTEs found with " "the same name: %r" % cte_name) self.ctes_by_name[cte_name] = cte if cte._cte_alias is not None: orig_cte = cte._cte_alias if orig_cte not in self.ctes: self.visit_cte(orig_cte) cte_alias_name = cte._cte_alias.name if isinstance(cte_alias_name, sql._truncated_label): cte_alias_name = self._truncated_identifier("alias", cte_alias_name) else: orig_cte = cte cte_alias_name = None if not cte_alias_name and cte not in self.ctes: if cte.recursive: self.ctes_recursive = True text = self.preparer.format_alias(cte, cte_name) if cte.recursive: if isinstance(cte.original, sql.Select): col_source = cte.original elif isinstance(cte.original, sql.CompoundSelect): col_source = cte.original.selects[0] else: assert False recur_cols = [c for c in util.unique_list(col_source.inner_columns) if c is not None] text += "(%s)" % (", ".join( self.preparer.format_column(ident) for ident in recur_cols)) text += " AS \n" + \ cte.original._compiler_dispatch( self, asfrom=True, **kwargs ) self.ctes[cte] = text if asfrom: if cte_alias_name: text = self.preparer.format_alias(cte, cte_alias_name) text += " AS " + cte_name else: return self.preparer.format_alias(cte, cte_name) return text def visit_alias(self, alias, asfrom=False, ashint=False, iscrud=False, fromhints=None, **kwargs): if asfrom or ashint: if isinstance(alias.name, sql._truncated_label): alias_name = self._truncated_identifier("alias", alias.name) else: alias_name = alias.name if ashint: return self.preparer.format_alias(alias, alias_name) elif asfrom: ret = alias.original._compiler_dispatch(self, asfrom=True, **kwargs) + \ " AS " + \ self.preparer.format_alias(alias, alias_name) if fromhints and alias in fromhints: ret = self.format_from_hint_text(ret, alias, fromhints[alias], iscrud) return ret else: return alias.original._compiler_dispatch(self, **kwargs) def _add_to_result_map(self, keyname, name, objects, type_): if not self.dialect.case_sensitive: keyname = keyname.lower() if keyname in self.result_map: # conflicting keyname, just double up the list # of objects. this will cause an "ambiguous name" # error if an attempt is made by the result set to # access. e_name, e_obj, e_type = self.result_map[keyname] self.result_map[keyname] = e_name, e_obj + objects, e_type else: self.result_map[keyname] = name, objects, type_ def _label_select_column(self, select, column, populate_result_map, asfrom, column_clause_args, name=None, within_columns_clause=True): """produce labeled columns present in a select().""" if column.type._has_column_expression and \ populate_result_map: col_expr = column.type.column_expression(column) add_to_result_map = lambda keyname, name, objects, type_: \ self._add_to_result_map( keyname, name, objects + (column,), type_) else: col_expr = column if populate_result_map: add_to_result_map = self._add_to_result_map else: add_to_result_map = None if not within_columns_clause: result_expr = col_expr elif isinstance(column, sql.Label): if col_expr is not column: result_expr = _CompileLabel( col_expr, column.name, alt_names=(column.element,) ) else: result_expr = col_expr elif select is not None and name: result_expr = _CompileLabel( col_expr, name, alt_names=(column._key_label,) ) elif \ asfrom and \ isinstance(column, sql.ColumnClause) and \ not column.is_literal and \ column.table is not None and \ not isinstance(column.table, sql.Select): result_expr = _CompileLabel(col_expr, sql._as_truncated(column.name), alt_names=(column.key,)) elif not isinstance(column, (sql.UnaryExpression, sql.TextClause)) \ and (not hasattr(column, 'name') or \ isinstance(column, sql.Function)): result_expr = _CompileLabel(col_expr, column.anon_label) elif col_expr is not column: # TODO: are we sure "column" has a .name and .key here ? # assert isinstance(column, sql.ColumnClause) result_expr = _CompileLabel(col_expr, sql._as_truncated(column.name), alt_names=(column.key,)) else: result_expr = col_expr column_clause_args.update( within_columns_clause=within_columns_clause, add_to_result_map=add_to_result_map ) return result_expr._compiler_dispatch( self, **column_clause_args ) def format_from_hint_text(self, sqltext, table, hint, iscrud): hinttext = self.get_from_hint_text(table, hint) if hinttext: sqltext += " " + hinttext return sqltext def get_select_hint_text(self, byfroms): return None def get_from_hint_text(self, table, text): return None def get_crud_hint_text(self, table, text): return None _default_stack_entry = util.immutabledict([ ('iswrapper', False), ('correlate_froms', frozenset()), ('asfrom_froms', frozenset()) ]) def _display_froms_for_select(self, select, asfrom): # utility method to help external dialects # get the correct from list for a select. # specifically the oracle dialect needs this feature # right now. toplevel = not self.stack entry = self._default_stack_entry if toplevel else self.stack[-1] correlate_froms = entry['correlate_froms'] asfrom_froms = entry['asfrom_froms'] if asfrom: froms = select._get_display_froms( explicit_correlate_froms=\ correlate_froms.difference(asfrom_froms), implicit_correlate_froms=()) else: froms = select._get_display_froms( explicit_correlate_froms=correlate_froms, implicit_correlate_froms=asfrom_froms) return froms def visit_select(self, select, asfrom=False, parens=True, iswrapper=False, fromhints=None, compound_index=0, force_result_map=False, positional_names=None, **kwargs): toplevel = not self.stack entry = self._default_stack_entry if toplevel else self.stack[-1] populate_result_map = force_result_map or ( compound_index == 0 and ( toplevel or \ entry['iswrapper'] ) ) correlate_froms = entry['correlate_froms'] asfrom_froms = entry['asfrom_froms'] if asfrom: froms = select._get_display_froms( explicit_correlate_froms= correlate_froms.difference(asfrom_froms), implicit_correlate_froms=()) else: froms = select._get_display_froms( explicit_correlate_froms=correlate_froms, implicit_correlate_froms=asfrom_froms) new_correlate_froms = set(sql._from_objects(*froms)) all_correlate_froms = new_correlate_froms.union(correlate_froms) new_entry = { 'asfrom_froms': new_correlate_froms, 'iswrapper': iswrapper, 'correlate_froms': all_correlate_froms } self.stack.append(new_entry) column_clause_args = kwargs.copy() column_clause_args.update({ 'positional_names': positional_names, 'within_label_clause': False, 'within_columns_clause': False }) # the actual list of columns to print in the SELECT column list. inner_columns = [ c for c in [ self._label_select_column(select, column, populate_result_map, asfrom, column_clause_args, name=name) for name, column in select._columns_plus_names ] if c is not None ] text = "SELECT " # we're off to a good start ! if select._hints: byfrom = dict([ (from_, hinttext % { 'name':from_._compiler_dispatch( self, ashint=True) }) for (from_, dialect), hinttext in select._hints.iteritems() if dialect in ('*', self.dialect.name) ]) hint_text = self.get_select_hint_text(byfrom) if hint_text: text += hint_text + " " if select._prefixes: text += self._generate_prefixes(select, select._prefixes, **kwargs) text += self.get_select_precolumns(select) text += ', '.join(inner_columns) if froms: text += " \nFROM " if select._hints: text += ', '.join([f._compiler_dispatch(self, asfrom=True, fromhints=byfrom, **kwargs) for f in froms]) else: text += ', '.join([f._compiler_dispatch(self, asfrom=True, **kwargs) for f in froms]) else: text += self.default_from() if select._whereclause is not None: t = select._whereclause._compiler_dispatch(self, **kwargs) if t: text += " \nWHERE " + t if select._group_by_clause.clauses: group_by = select._group_by_clause._compiler_dispatch( self, **kwargs) if group_by: text += " GROUP BY " + group_by if select._having is not None: t = select._having._compiler_dispatch(self, **kwargs) if t: text += " \nHAVING " + t if select._order_by_clause.clauses: text += self.order_by_clause(select, **kwargs) if select._limit is not None or select._offset is not None: text += self.limit_clause(select) if select.for_update: text += self.for_update_clause(select) if self.ctes and \ compound_index == 0 and toplevel: text = self._render_cte_clause() + text self.stack.pop(-1) if asfrom and parens: return "(" + text + ")" else: return text def _generate_prefixes(self, stmt, prefixes, **kw): clause = " ".join( prefix._compiler_dispatch(self, **kw) for prefix, dialect_name in prefixes if dialect_name is None or dialect_name == self.dialect.name ) if clause: clause += " " return clause def _render_cte_clause(self): if self.positional: self.positiontup = self.cte_positional + self.positiontup cte_text = self.get_cte_preamble(self.ctes_recursive) + " " cte_text += ", \n".join( [txt for txt in self.ctes.values()] ) cte_text += "\n " return cte_text def get_cte_preamble(self, recursive): if recursive: return "WITH RECURSIVE" else: return "WITH" def get_select_precolumns(self, select): """Called when building a ``SELECT`` statement, position is just before column list. """ return select._distinct and "DISTINCT " or "" def order_by_clause(self, select, **kw): order_by = select._order_by_clause._compiler_dispatch(self, **kw) if order_by: return " ORDER BY " + order_by else: return "" def for_update_clause(self, select): if select.for_update: return " FOR UPDATE" else: return "" def returning_clause(self, stmt, returning_cols): raise exc.CompileError( "RETURNING is not supported by this " "dialect's statement compiler.") def limit_clause(self, select): text = "" if select._limit is not None: text += "\n LIMIT " + self.process(sql.literal(select._limit)) if select._offset is not None: if select._limit is None: text += "\n LIMIT -1" text += " OFFSET " + self.process(sql.literal(select._offset)) return text def visit_table(self, table, asfrom=False, iscrud=False, ashint=False, fromhints=None, **kwargs): if asfrom or ashint: if getattr(table, "schema", None): ret = self.preparer.quote_schema(table.schema, table.quote_schema) + \ "." + self.preparer.quote(table.name, table.quote) else: ret = self.preparer.quote(table.name, table.quote) if fromhints and table in fromhints: ret = self.format_from_hint_text(ret, table, fromhints[table], iscrud) return ret else: return "" def visit_join(self, join, asfrom=False, **kwargs): return ( join.left._compiler_dispatch(self, asfrom=True, **kwargs) + (join.isouter and " LEFT OUTER JOIN " or " JOIN ") + join.right._compiler_dispatch(self, asfrom=True, **kwargs) + " ON " + join.onclause._compiler_dispatch(self, **kwargs) ) def visit_insert(self, insert_stmt, **kw): self.isinsert = True colparams = self._get_colparams(insert_stmt) if not colparams and \ not self.dialect.supports_default_values and \ not self.dialect.supports_empty_insert: raise exc.CompileError("The '%s' dialect with current database " "version settings does not support empty " "inserts." % self.dialect.name) if insert_stmt._has_multi_parameters: if not self.dialect.supports_multivalues_insert: raise exc.CompileError("The '%s' dialect with current database " "version settings does not support " "in-place multirow inserts." % self.dialect.name) colparams_single = colparams[0] else: colparams_single = colparams preparer = self.preparer supports_default_values = self.dialect.supports_default_values text = "INSERT " if insert_stmt._prefixes: text += self._generate_prefixes(insert_stmt, insert_stmt._prefixes, **kw) text += "INTO " table_text = preparer.format_table(insert_stmt.table) if insert_stmt._hints: dialect_hints = dict([ (table, hint_text) for (table, dialect), hint_text in insert_stmt._hints.items() if dialect in ('*', self.dialect.name) ]) if insert_stmt.table in dialect_hints: table_text = self.format_from_hint_text( table_text, insert_stmt.table, dialect_hints[insert_stmt.table], True ) text += table_text if colparams_single or not supports_default_values: text += " (%s)" % ', '.join([preparer.format_column(c[0]) for c in colparams_single]) if self.returning or insert_stmt._returning: self.returning = self.returning or insert_stmt._returning returning_clause = self.returning_clause( insert_stmt, self.returning) if self.returning_precedes_values: text += " " + returning_clause if insert_stmt.select is not None: text += " %s" % self.process(insert_stmt.select, **kw) elif not colparams and supports_default_values: text += " DEFAULT VALUES" elif insert_stmt._has_multi_parameters: text += " VALUES %s" % ( ", ".join( "(%s)" % ( ', '.join(c[1] for c in colparam_set) ) for colparam_set in colparams ) ) else: text += " VALUES (%s)" % \ ', '.join([c[1] for c in colparams]) if self.returning and not self.returning_precedes_values: text += " " + returning_clause return text def update_limit_clause(self, update_stmt): """Provide a hook for MySQL to add LIMIT to the UPDATE""" return None def update_tables_clause(self, update_stmt, from_table, extra_froms, **kw): """Provide a hook to override the initial table clause in an UPDATE statement. MySQL overrides this. """ return from_table._compiler_dispatch(self, asfrom=True, iscrud=True, **kw) def update_from_clause(self, update_stmt, from_table, extra_froms, from_hints, **kw): """Provide a hook to override the generation of an UPDATE..FROM clause. MySQL and MSSQL override this. """ return "FROM " + ', '.join( t._compiler_dispatch(self, asfrom=True, fromhints=from_hints, **kw) for t in extra_froms) def visit_update(self, update_stmt, **kw): self.stack.append( {'correlate_froms': set([update_stmt.table]), "iswrapper": False, "asfrom_froms": set([update_stmt.table])}) self.isupdate = True extra_froms = update_stmt._extra_froms text = "UPDATE " if update_stmt._prefixes: text += self._generate_prefixes(update_stmt, update_stmt._prefixes, **kw) table_text = self.update_tables_clause(update_stmt, update_stmt.table, extra_froms, **kw) colparams = self._get_colparams(update_stmt, extra_froms) if update_stmt._hints: dialect_hints = dict([ (table, hint_text) for (table, dialect), hint_text in update_stmt._hints.items() if dialect in ('*', self.dialect.name) ]) if update_stmt.table in dialect_hints: table_text = self.format_from_hint_text( table_text, update_stmt.table, dialect_hints[update_stmt.table], True ) else: dialect_hints = None text += table_text text += ' SET ' include_table = extra_froms and \ self.render_table_with_column_in_update_from text += ', '.join( c[0]._compiler_dispatch(self, include_table=include_table) + '=' + c[1] for c in colparams ) if update_stmt._returning: self.returning = update_stmt._returning if self.returning_precedes_values: text += " " + self.returning_clause( update_stmt, update_stmt._returning) if extra_froms: extra_from_text = self.update_from_clause( update_stmt, update_stmt.table, extra_froms, dialect_hints, **kw) if extra_from_text: text += " " + extra_from_text if update_stmt._whereclause is not None: text += " WHERE " + self.process(update_stmt._whereclause) limit_clause = self.update_limit_clause(update_stmt) if limit_clause: text += " " + limit_clause if self.returning and not self.returning_precedes_values: text += " " + self.returning_clause( update_stmt, update_stmt._returning) self.stack.pop(-1) return text def _create_crud_bind_param(self, col, value, required=False, name=None): if name is None: name = col.key bindparam = sql.bindparam(name, value, type_=col.type, required=required, quote=col.quote) bindparam._is_crud = True return bindparam._compiler_dispatch(self) def _get_colparams(self, stmt, extra_tables=None): """create a set of tuples representing column/string pairs for use in an INSERT or UPDATE statement. Also generates the Compiled object's postfetch, prefetch, and returning column collections, used for default handling and ultimately populating the ResultProxy's prefetch_cols() and postfetch_cols() collections. """ self.postfetch = [] self.prefetch = [] self.returning = [] # no parameters in the statement, no parameters in the # compiled params - return binds for all columns if self.column_keys is None and stmt.parameters is None: return [ (c, self._create_crud_bind_param(c, None, required=True)) for c in stmt.table.columns ] if stmt._has_multi_parameters: stmt_parameters = stmt.parameters[0] else: stmt_parameters = stmt.parameters # if we have statement parameters - set defaults in the # compiled params if self.column_keys is None: parameters = {} else: parameters = dict((sql._column_as_key(key), REQUIRED) for key in self.column_keys if not stmt_parameters or key not in stmt_parameters) # create a list of column assignment clauses as tuples values = [] if stmt_parameters is not None: for k, v in stmt_parameters.iteritems(): colkey = sql._column_as_key(k) if colkey is not None: parameters.setdefault(colkey, v) else: # a non-Column expression on the left side; # add it to values() in an "as-is" state, # coercing right side to bound param if sql._is_literal(v): v = self.process(sql.bindparam(None, v, type_=k.type)) else: v = self.process(v.self_group()) values.append((k, v)) need_pks = self.isinsert and \ not self.inline and \ not stmt._returning implicit_returning = need_pks and \ self.dialect.implicit_returning and \ stmt.table.implicit_returning postfetch_lastrowid = need_pks and self.dialect.postfetch_lastrowid check_columns = {} # special logic that only occurs for multi-table UPDATE # statements if extra_tables and stmt_parameters: normalized_params = dict( (sql._clause_element_as_expr(c), param) for c, param in stmt_parameters.items() ) assert self.isupdate affected_tables = set() for t in extra_tables: for c in t.c: if c in normalized_params: affected_tables.add(t) check_columns[c.key] = c value = normalized_params[c] if sql._is_literal(value): value = self._create_crud_bind_param( c, value, required=value is REQUIRED) else: self.postfetch.append(c) value = self.process(value.self_group()) values.append((c, value)) # determine tables which are actually # to be updated - process onupdate and # server_onupdate for these for t in affected_tables: for c in t.c: if c in normalized_params: continue elif c.onupdate is not None and not c.onupdate.is_sequence: if c.onupdate.is_clause_element: values.append( (c, self.process(c.onupdate.arg.self_group())) ) self.postfetch.append(c) else: values.append( (c, self._create_crud_bind_param(c, None)) ) self.prefetch.append(c) elif c.server_onupdate is not None: self.postfetch.append(c) # iterating through columns at the top to maintain ordering. # otherwise we might iterate through individual sets of # "defaults", "primary key cols", etc. for c in stmt.table.columns: if c.key in parameters and c.key not in check_columns: value = parameters.pop(c.key) if sql._is_literal(value): value = self._create_crud_bind_param( c, value, required=value is REQUIRED, name=c.key if not stmt._has_multi_parameters else "%s_0" % c.key ) elif c.primary_key and implicit_returning: self.returning.append(c) value = self.process(value.self_group()) else: self.postfetch.append(c) value = self.process(value.self_group()) values.append((c, value)) elif self.isinsert: if c.primary_key and \ need_pks and \ ( implicit_returning or not postfetch_lastrowid or c is not stmt.table._autoincrement_column ): if implicit_returning: if c.default is not None: if c.default.is_sequence: if self.dialect.supports_sequences and \ (not c.default.optional or \ not self.dialect.sequences_optional): proc = self.process(c.default) values.append((c, proc)) self.returning.append(c) elif c.default.is_clause_element: values.append( (c, self.process(c.default.arg.self_group())) ) self.returning.append(c) else: values.append( (c, self._create_crud_bind_param(c, None)) ) self.prefetch.append(c) else: self.returning.append(c) else: if c.default is not None or \ c is stmt.table._autoincrement_column and ( self.dialect.supports_sequences or self.dialect.preexecute_autoincrement_sequences ): values.append( (c, self._create_crud_bind_param(c, None)) ) self.prefetch.append(c) elif c.default is not None: if c.default.is_sequence: if self.dialect.supports_sequences and \ (not c.default.optional or \ not self.dialect.sequences_optional): proc = self.process(c.default) values.append((c, proc)) if not c.primary_key: self.postfetch.append(c) elif c.default.is_clause_element: values.append( (c, self.process(c.default.arg.self_group())) ) if not c.primary_key: # dont add primary key column to postfetch self.postfetch.append(c) else: values.append( (c, self._create_crud_bind_param(c, None)) ) self.prefetch.append(c) elif c.server_default is not None: if not c.primary_key: self.postfetch.append(c) elif self.isupdate: if c.onupdate is not None and not c.onupdate.is_sequence: if c.onupdate.is_clause_element: values.append( (c, self.process(c.onupdate.arg.self_group())) ) self.postfetch.append(c) else: values.append( (c, self._create_crud_bind_param(c, None)) ) self.prefetch.append(c) elif c.server_onupdate is not None: self.postfetch.append(c) if parameters and stmt_parameters: check = set(parameters).intersection( sql._column_as_key(k) for k in stmt.parameters ).difference(check_columns) if check: raise exc.CompileError( "Unconsumed column names: %s" % (", ".join(check)) ) if stmt._has_multi_parameters: values_0 = values values = [values] values.extend( [ ( c, self._create_crud_bind_param( c, row[c.key], name="%s_%d" % (c.key, i + 1) ) if c.key in row else param ) for (c, param) in values_0 ] for i, row in enumerate(stmt.parameters[1:]) ) return values def visit_delete(self, delete_stmt, **kw): self.stack.append({'correlate_froms': set([delete_stmt.table]), "iswrapper": False, "asfrom_froms": set([delete_stmt.table])}) self.isdelete = True text = "DELETE " if delete_stmt._prefixes: text += self._generate_prefixes(delete_stmt, delete_stmt._prefixes, **kw) text += "FROM " table_text = delete_stmt.table._compiler_dispatch(self, asfrom=True, iscrud=True) if delete_stmt._hints: dialect_hints = dict([ (table, hint_text) for (table, dialect), hint_text in delete_stmt._hints.items() if dialect in ('*', self.dialect.name) ]) if delete_stmt.table in dialect_hints: table_text = self.format_from_hint_text( table_text, delete_stmt.table, dialect_hints[delete_stmt.table], True ) else: dialect_hints = None text += table_text if delete_stmt._returning: self.returning = delete_stmt._returning if self.returning_precedes_values: text += " " + self.returning_clause( delete_stmt, delete_stmt._returning) if delete_stmt._whereclause is not None: text += " WHERE " text += delete_stmt._whereclause._compiler_dispatch(self) if self.returning and not self.returning_precedes_values: text += " " + self.returning_clause( delete_stmt, delete_stmt._returning) self.stack.pop(-1) return text def visit_savepoint(self, savepoint_stmt): return "SAVEPOINT %s" % self.preparer.format_savepoint(savepoint_stmt) def visit_rollback_to_savepoint(self, savepoint_stmt): return "ROLLBACK TO SAVEPOINT %s" % \ self.preparer.format_savepoint(savepoint_stmt) def visit_release_savepoint(self, savepoint_stmt): return "RELEASE SAVEPOINT %s" % \ self.preparer.format_savepoint(savepoint_stmt) class DDLCompiler(engine.Compiled): @util.memoized_property def sql_compiler(self): return self.dialect.statement_compiler(self.dialect, None) @util.memoized_property def type_compiler(self): return self.dialect.type_compiler @property def preparer(self): return self.dialect.identifier_preparer def construct_params(self, params=None): return None def visit_ddl(self, ddl, **kwargs): # table events can substitute table and schema name context = ddl.context if isinstance(ddl.target, schema.Table): context = context.copy() preparer = self.dialect.identifier_preparer path = preparer.format_table_seq(ddl.target) if len(path) == 1: table, sch = path[0], '' else: table, sch = path[-1], path[0] context.setdefault('table', table) context.setdefault('schema', sch) context.setdefault('fullname', preparer.format_table(ddl.target)) return self.sql_compiler.post_process_text(ddl.statement % context) def visit_create_schema(self, create): schema = self.preparer.format_schema(create.element, create.quote) return "CREATE SCHEMA " + schema def visit_drop_schema(self, drop): schema = self.preparer.format_schema(drop.element, drop.quote) text = "DROP SCHEMA " + schema if drop.cascade: text += " CASCADE" return text def visit_create_table(self, create): table = create.element preparer = self.dialect.identifier_preparer text = "\n" + " ".join(['CREATE'] + \ table._prefixes + \ ['TABLE', preparer.format_table(table), "("]) separator = "\n" # if only one primary key, specify it along with the column first_pk = False for create_column in create.columns: column = create_column.element try: processed = self.process(create_column, first_pk=column.primary_key and not first_pk) if processed is not None: text += separator separator = ", \n" text += "\t" + processed if column.primary_key: first_pk = True except exc.CompileError, ce: # Py3K #raise exc.CompileError("(in table '%s', column '%s'): %s" # % ( # table.description, # column.name, # ce.args[0] # )) from ce # Py2K raise exc.CompileError("(in table '%s', column '%s'): %s" % ( table.description, column.name, ce.args[0] )), None, sys.exc_info()[2] # end Py2K const = self.create_table_constraints(table) if const: text += ", \n\t" + const text += "\n)%s\n\n" % self.post_create_table(table) return text def visit_create_column(self, create, first_pk=False): column = create.element if column.system: return None text = self.get_column_specification( column, first_pk=first_pk ) const = " ".join(self.process(constraint) \ for constraint in column.constraints) if const: text += " " + const return text def create_table_constraints(self, table): # On some DB order is significant: visit PK first, then the # other constraints (engine.ReflectionTest.testbasic failed on FB2) constraints = [] if table.primary_key: constraints.append(table.primary_key) constraints.extend([c for c in table._sorted_constraints if c is not table.primary_key]) return ", \n\t".join(p for p in (self.process(constraint) for constraint in constraints if ( constraint._create_rule is None or constraint._create_rule(self)) and ( not self.dialect.supports_alter or not getattr(constraint, 'use_alter', False) )) if p is not None ) def visit_drop_table(self, drop): return "\nDROP TABLE " + self.preparer.format_table(drop.element) def visit_drop_view(self, drop): return "\nDROP VIEW " + self.preparer.format_table(drop.element) def _verify_index_table(self, index): if index.table is None: raise exc.CompileError("Index '%s' is not associated " "with any table." % index.name) def visit_create_index(self, create, include_schema=False, include_table_schema=True): index = create.element self._verify_index_table(index) preparer = self.preparer text = "CREATE " if index.unique: text += "UNIQUE " text += "INDEX %s ON %s (%s)" \ % ( self._prepared_index_name(index, include_schema=include_schema), preparer.format_table(index.table, use_schema=include_table_schema), ', '.join( self.sql_compiler.process(expr, include_table=False, literal_binds=True) for expr in index.expressions) ) return text def visit_drop_index(self, drop): index = drop.element return "\nDROP INDEX " + self._prepared_index_name(index, include_schema=True) def _prepared_index_name(self, index, include_schema=False): if include_schema and index.table is not None and index.table.schema: schema = index.table.schema schema_name = self.preparer.quote_schema(schema, index.table.quote_schema) else: schema_name = None ident = index.name if isinstance(ident, sql._truncated_label): max_ = self.dialect.max_index_name_length or \ self.dialect.max_identifier_length if len(ident) > max_: ident = ident[0:max_ - 8] + \ "_" + util.md5_hex(ident)[-4:] else: self.dialect.validate_identifier(ident) index_name = self.preparer.quote( ident, index.quote) if schema_name: index_name = schema_name + "." + index_name return index_name def visit_add_constraint(self, create): return "ALTER TABLE %s ADD %s" % ( self.preparer.format_table(create.element.table), self.process(create.element) ) def visit_create_sequence(self, create): text = "CREATE SEQUENCE %s" % \ self.preparer.format_sequence(create.element) if create.element.increment is not None: text += " INCREMENT BY %d" % create.element.increment if create.element.start is not None: text += " START WITH %d" % create.element.start return text def visit_drop_sequence(self, drop): return "DROP SEQUENCE %s" % \ self.preparer.format_sequence(drop.element) def visit_drop_constraint(self, drop): return "ALTER TABLE %s DROP CONSTRAINT %s%s" % ( self.preparer.format_table(drop.element.table), self.preparer.format_constraint(drop.element), drop.cascade and " CASCADE" or "" ) def get_column_specification(self, column, **kwargs): colspec = self.preparer.format_column(column) + " " + \ self.dialect.type_compiler.process(column.type) default = self.get_column_default_string(column) if default is not None: colspec += " DEFAULT " + default if not column.nullable: colspec += " NOT NULL" return colspec def post_create_table(self, table): return '' def get_column_default_string(self, column): if isinstance(column.server_default, schema.DefaultClause): if isinstance(column.server_default.arg, basestring): return "'%s'" % column.server_default.arg else: return self.sql_compiler.process(column.server_default.arg) else: return None def visit_check_constraint(self, constraint): text = "" if constraint.name is not None: text += "CONSTRAINT %s " % \ self.preparer.format_constraint(constraint) text += "CHECK (%s)" % self.sql_compiler.process(constraint.sqltext, include_table=False, literal_binds=True) text += self.define_constraint_deferrability(constraint) return text def visit_column_check_constraint(self, constraint): text = "" if constraint.name is not None: text += "CONSTRAINT %s " % \ self.preparer.format_constraint(constraint) text += "CHECK (%s)" % constraint.sqltext text += self.define_constraint_deferrability(constraint) return text def visit_primary_key_constraint(self, constraint): if len(constraint) == 0: return '' text = "" if constraint.name is not None: text += "CONSTRAINT %s " % \ self.preparer.format_constraint(constraint) text += "PRIMARY KEY " text += "(%s)" % ', '.join(self.preparer.quote(c.name, c.quote) for c in constraint) text += self.define_constraint_deferrability(constraint) return text def visit_foreign_key_constraint(self, constraint): preparer = self.dialect.identifier_preparer text = "" if constraint.name is not None: text += "CONSTRAINT %s " % \ preparer.format_constraint(constraint) remote_table = list(constraint._elements.values())[0].column.table text += "FOREIGN KEY(%s) REFERENCES %s (%s)" % ( ', '.join(preparer.quote(f.parent.name, f.parent.quote) for f in constraint._elements.values()), self.define_constraint_remote_table( constraint, remote_table, preparer), ', '.join(preparer.quote(f.column.name, f.column.quote) for f in constraint._elements.values()) ) text += self.define_constraint_match(constraint) text += self.define_constraint_cascades(constraint) text += self.define_constraint_deferrability(constraint) return text def define_constraint_remote_table(self, constraint, table, preparer): """Format the remote table clause of a CREATE CONSTRAINT clause.""" return preparer.format_table(table) def visit_unique_constraint(self, constraint): text = "" if constraint.name is not None: text += "CONSTRAINT %s " % \ self.preparer.format_constraint(constraint) text += "UNIQUE (%s)" % ( ', '.join(self.preparer.quote(c.name, c.quote) for c in constraint)) text += self.define_constraint_deferrability(constraint) return text def define_constraint_cascades(self, constraint): text = "" if constraint.ondelete is not None: text += " ON DELETE %s" % constraint.ondelete if constraint.onupdate is not None: text += " ON UPDATE %s" % constraint.onupdate return text def define_constraint_deferrability(self, constraint): text = "" if constraint.deferrable is not None: if constraint.deferrable: text += " DEFERRABLE" else: text += " NOT DEFERRABLE" if constraint.initially is not None: text += " INITIALLY %s" % constraint.initially return text def define_constraint_match(self, constraint): text = "" if constraint.match is not None: text += " MATCH %s" % constraint.match return text class GenericTypeCompiler(engine.TypeCompiler): def visit_FLOAT(self, type_): return "FLOAT" def visit_REAL(self, type_): return "REAL" def visit_NUMERIC(self, type_): if type_.precision is None: return "NUMERIC" elif type_.scale is None: return "NUMERIC(%(precision)s)" % \ {'precision': type_.precision} else: return "NUMERIC(%(precision)s, %(scale)s)" % \ {'precision': type_.precision, 'scale': type_.scale} def visit_DECIMAL(self, type_): if type_.precision is None: return "DECIMAL" elif type_.scale is None: return "DECIMAL(%(precision)s)" % \ {'precision': type_.precision} else: return "DECIMAL(%(precision)s, %(scale)s)" % \ {'precision': type_.precision, 'scale': type_.scale} def visit_INTEGER(self, type_): return "INTEGER" def visit_SMALLINT(self, type_): return "SMALLINT" def visit_BIGINT(self, type_): return "BIGINT" def visit_TIMESTAMP(self, type_): return 'TIMESTAMP' def visit_DATETIME(self, type_): return "DATETIME" def visit_DATE(self, type_): return "DATE" def visit_TIME(self, type_): return "TIME" def visit_CLOB(self, type_): return "CLOB" def visit_NCLOB(self, type_): return "NCLOB" def _render_string_type(self, type_, name): text = name if type_.length: text += "(%d)" % type_.length if type_.collation: text += ' COLLATE "%s"' % type_.collation return text def visit_CHAR(self, type_): return self._render_string_type(type_, "CHAR") def visit_NCHAR(self, type_): return self._render_string_type(type_, "NCHAR") def visit_VARCHAR(self, type_): return self._render_string_type(type_, "VARCHAR") def visit_NVARCHAR(self, type_): return self._render_string_type(type_, "NVARCHAR") def visit_TEXT(self, type_): return self._render_string_type(type_, "TEXT") def visit_BLOB(self, type_): return "BLOB" def visit_BINARY(self, type_): return "BINARY" + (type_.length and "(%d)" % type_.length or "") def visit_VARBINARY(self, type_): return "VARBINARY" + (type_.length and "(%d)" % type_.length or "") def visit_BOOLEAN(self, type_): return "BOOLEAN" def visit_large_binary(self, type_): return self.visit_BLOB(type_) def visit_boolean(self, type_): return self.visit_BOOLEAN(type_) def visit_time(self, type_): return self.visit_TIME(type_) def visit_datetime(self, type_): return self.visit_DATETIME(type_) def visit_date(self, type_): return self.visit_DATE(type_) def visit_big_integer(self, type_): return self.visit_BIGINT(type_) def visit_small_integer(self, type_): return self.visit_SMALLINT(type_) def visit_integer(self, type_): return self.visit_INTEGER(type_) def visit_real(self, type_): return self.visit_REAL(type_) def visit_float(self, type_): return self.visit_FLOAT(type_) def visit_numeric(self, type_): return self.visit_NUMERIC(type_) def visit_string(self, type_): return self.visit_VARCHAR(type_) def visit_unicode(self, type_): return self.visit_VARCHAR(type_) def visit_text(self, type_): return self.visit_TEXT(type_) def visit_unicode_text(self, type_): return self.visit_TEXT(type_) def visit_enum(self, type_): return self.visit_VARCHAR(type_) def visit_null(self, type_): raise NotImplementedError("Can't generate DDL for the null type") def visit_type_decorator(self, type_): return self.process(type_.type_engine(self.dialect)) def visit_user_defined(self, type_): return type_.get_col_spec() class IdentifierPreparer(object): """Handle quoting and case-folding of identifiers based on options.""" reserved_words = RESERVED_WORDS legal_characters = LEGAL_CHARACTERS illegal_initial_characters = ILLEGAL_INITIAL_CHARACTERS def __init__(self, dialect, initial_quote='"', final_quote=None, escape_quote='"', omit_schema=False): """Construct a new ``IdentifierPreparer`` object. initial_quote Character that begins a delimited identifier. final_quote Character that ends a delimited identifier. Defaults to `initial_quote`. omit_schema Prevent prepending schema name. Useful for databases that do not support schemae. """ self.dialect = dialect self.initial_quote = initial_quote self.final_quote = final_quote or self.initial_quote self.escape_quote = escape_quote self.escape_to_quote = self.escape_quote * 2 self.omit_schema = omit_schema self._strings = {} def _escape_identifier(self, value): """Escape an identifier. Subclasses should override this to provide database-dependent escaping behavior. """ return value.replace(self.escape_quote, self.escape_to_quote) def _unescape_identifier(self, value): """Canonicalize an escaped identifier. Subclasses should override this to provide database-dependent unescaping behavior that reverses _escape_identifier. """ return value.replace(self.escape_to_quote, self.escape_quote) def quote_identifier(self, value): """Quote an identifier. Subclasses should override this to provide database-dependent quoting behavior. """ return self.initial_quote + \ self._escape_identifier(value) + \ self.final_quote def _requires_quotes(self, value): """Return True if the given identifier requires quoting.""" lc_value = value.lower() return (lc_value in self.reserved_words or value[0] in self.illegal_initial_characters or not self.legal_characters.match(unicode(value)) or (lc_value != value)) def quote_schema(self, schema, force): """Quote a schema. Subclasses should override this to provide database-dependent quoting behavior. """ return self.quote(schema, force) def quote(self, ident, force): if force is None: if ident in self._strings: return self._strings[ident] else: if self._requires_quotes(ident): self._strings[ident] = self.quote_identifier(ident) else: self._strings[ident] = ident return self._strings[ident] elif force: return self.quote_identifier(ident) else: return ident def format_sequence(self, sequence, use_schema=True): name = self.quote(sequence.name, sequence.quote) if not self.omit_schema and use_schema and \ sequence.schema is not None: name = self.quote_schema(sequence.schema, sequence.quote) + \ "." + name return name def format_label(self, label, name=None): return self.quote(name or label.name, label.quote) def format_alias(self, alias, name=None): return self.quote(name or alias.name, alias.quote) def format_savepoint(self, savepoint, name=None): return self.quote(name or savepoint.ident, savepoint.quote) def format_constraint(self, constraint): return self.quote(constraint.name, constraint.quote) def format_table(self, table, use_schema=True, name=None): """Prepare a quoted table and schema name.""" if name is None: name = table.name result = self.quote(name, table.quote) if not self.omit_schema and use_schema \ and getattr(table, "schema", None): result = self.quote_schema(table.schema, table.quote_schema) + \ "." + result return result def format_schema(self, name, quote): """Prepare a quoted schema name.""" return self.quote(name, quote) def format_column(self, column, use_table=False, name=None, table_name=None): """Prepare a quoted column name.""" if name is None: name = column.name if not getattr(column, 'is_literal', False): if use_table: return self.format_table( column.table, use_schema=False, name=table_name) + "." + \ self.quote(name, column.quote) else: return self.quote(name, column.quote) else: # literal textual elements get stuck into ColumnClause a lot, # which shouldn't get quoted if use_table: return self.format_table(column.table, use_schema=False, name=table_name) + '.' + name else: return name def format_table_seq(self, table, use_schema=True): """Format table name and schema as a tuple.""" # Dialects with more levels in their fully qualified references # ('database', 'owner', etc.) could override this and return # a longer sequence. if not self.omit_schema and use_schema and \ getattr(table, 'schema', None): return (self.quote_schema(table.schema, table.quote_schema), self.format_table(table, use_schema=False)) else: return (self.format_table(table, use_schema=False), ) @util.memoized_property def _r_identifiers(self): initial, final, escaped_final = \ [re.escape(s) for s in (self.initial_quote, self.final_quote, self._escape_identifier(self.final_quote))] r = re.compile( r'(?:' r'(?:%(initial)s((?:%(escaped)s|[^%(final)s])+)%(final)s' r'|([^\.]+))(?=\.|$))+' % {'initial': initial, 'final': final, 'escaped': escaped_final}) return r def unformat_identifiers(self, identifiers): """Unpack 'schema.table.column'-like strings into components.""" r = self._r_identifiers return [self._unescape_identifier(i) for i in [a or b for a, b in r.findall(identifiers)]] SQLAlchemy-0.8.4/lib/sqlalchemy/sql/expression.py0000644000076500000240000066242612251150015022521 0ustar classicstaff00000000000000# sql/expression.py # Copyright (C) 2005-2013 the SQLAlchemy authors and contributors # # This module is part of SQLAlchemy and is released under # the MIT License: http://www.opensource.org/licenses/mit-license.php """Defines the base components of SQL expression trees. All components are derived from a common base class :class:`.ClauseElement`. Common behaviors are organized based on class hierarchies, in some cases via mixins. All object construction from this package occurs via functions which in some cases will construct composite :class:`.ClauseElement` structures together, and in other cases simply return a single :class:`.ClauseElement` constructed directly. The function interface affords a more "DSL-ish" feel to constructing SQL expressions and also allows future class reorganizations. Even though classes are not constructed directly from the outside, most classes which have additional public methods are considered to be public (i.e. have no leading underscore). Other classes which are "semi-public" are marked with a single leading underscore; these classes usually have few or no public methods and are less guaranteed to stay the same in future releases. """ import itertools import re from operator import attrgetter from .. import util, exc, inspection from . import operators from .operators import ColumnOperators from .visitors import Visitable, cloned_traverse import operator functions = util.importlater("sqlalchemy.sql", "functions") sqlutil = util.importlater("sqlalchemy.sql", "util") sqltypes = util.importlater("sqlalchemy", "types") default = util.importlater("sqlalchemy.engine", "default") __all__ = [ 'Alias', 'ClauseElement', 'ColumnCollection', 'ColumnElement', 'CompoundSelect', 'Delete', 'FromClause', 'Insert', 'Join', 'Select', 'Selectable', 'TableClause', 'Update', 'alias', 'and_', 'asc', 'between', 'bindparam', 'case', 'cast', 'column', 'delete', 'desc', 'distinct', 'except_', 'except_all', 'exists', 'extract', 'func', 'modifier', 'collate', 'insert', 'intersect', 'intersect_all', 'join', 'label', 'literal', 'literal_column', 'not_', 'null', 'nullsfirst', 'nullslast', 'or_', 'outparam', 'outerjoin', 'over', 'select', 'subquery', 'table', 'text', 'tuple_', 'type_coerce', 'union', 'union_all', 'update', ] PARSE_AUTOCOMMIT = util.symbol('PARSE_AUTOCOMMIT') NO_ARG = util.symbol('NO_ARG') def nullsfirst(column): """Return a NULLS FIRST ``ORDER BY`` clause element. e.g.:: someselect.order_by(desc(table1.mycol).nullsfirst()) produces:: ORDER BY mycol DESC NULLS FIRST """ return UnaryExpression(column, modifier=operators.nullsfirst_op) def nullslast(column): """Return a NULLS LAST ``ORDER BY`` clause element. e.g.:: someselect.order_by(desc(table1.mycol).nullslast()) produces:: ORDER BY mycol DESC NULLS LAST """ return UnaryExpression(column, modifier=operators.nullslast_op) def desc(column): """Return a descending ``ORDER BY`` clause element. e.g.:: someselect.order_by(desc(table1.mycol)) produces:: ORDER BY mycol DESC """ return UnaryExpression(column, modifier=operators.desc_op) def asc(column): """Return an ascending ``ORDER BY`` clause element. e.g.:: someselect.order_by(asc(table1.mycol)) produces:: ORDER BY mycol ASC """ return UnaryExpression(column, modifier=operators.asc_op) def outerjoin(left, right, onclause=None): """Return an ``OUTER JOIN`` clause element. The returned object is an instance of :class:`.Join`. Similar functionality is also available via the :meth:`~.FromClause.outerjoin()` method on any :class:`.FromClause`. :param left: The left side of the join. :param right: The right side of the join. :param onclause: Optional criterion for the ``ON`` clause, is derived from foreign key relationships established between left and right otherwise. To chain joins together, use the :meth:`.FromClause.join` or :meth:`.FromClause.outerjoin` methods on the resulting :class:`.Join` object. """ return Join(left, right, onclause, isouter=True) def join(left, right, onclause=None, isouter=False): """Return a ``JOIN`` clause element (regular inner join). The returned object is an instance of :class:`.Join`. Similar functionality is also available via the :meth:`~.FromClause.join()` method on any :class:`.FromClause`. :param left: The left side of the join. :param right: The right side of the join. :param onclause: Optional criterion for the ``ON`` clause, is derived from foreign key relationships established between left and right otherwise. To chain joins together, use the :meth:`.FromClause.join` or :meth:`.FromClause.outerjoin` methods on the resulting :class:`.Join` object. """ return Join(left, right, onclause, isouter) def select(columns=None, whereclause=None, from_obj=[], **kwargs): """Returns a ``SELECT`` clause element. Similar functionality is also available via the :func:`select()` method on any :class:`.FromClause`. The returned object is an instance of :class:`.Select`. All arguments which accept :class:`.ClauseElement` arguments also accept string arguments, which will be converted as appropriate into either :func:`text()` or :func:`literal_column()` constructs. .. seealso:: :ref:`coretutorial_selecting` - Core Tutorial description of :func:`.select`. :param columns: A list of :class:`.ClauseElement` objects, typically :class:`.ColumnElement` objects or subclasses, which will form the columns clause of the resulting statement. For all members which are instances of :class:`.Selectable`, the individual :class:`.ColumnElement` members of the :class:`.Selectable` will be added individually to the columns clause. For example, specifying a :class:`~sqlalchemy.schema.Table` instance will result in all the contained :class:`~sqlalchemy.schema.Column` objects within to be added to the columns clause. This argument is not present on the form of :func:`select()` available on :class:`~sqlalchemy.schema.Table`. :param whereclause: A :class:`.ClauseElement` expression which will be used to form the ``WHERE`` clause. :param from_obj: A list of :class:`.ClauseElement` objects which will be added to the ``FROM`` clause of the resulting statement. Note that "from" objects are automatically located within the columns and whereclause ClauseElements. Use this parameter to explicitly specify "from" objects which are not automatically locatable. This could include :class:`~sqlalchemy.schema.Table` objects that aren't otherwise present, or :class:`.Join` objects whose presence will supercede that of the :class:`~sqlalchemy.schema.Table` objects already located in the other clauses. :param autocommit: Deprecated. Use .execution_options(autocommit=) to set the autocommit option. :param bind=None: an :class:`~.base.Engine` or :class:`~.base.Connection` instance to which the resulting :class:`.Select` object will be bound. The :class:`.Select` object will otherwise automatically bind to whatever :class:`~.base.Connectable` instances can be located within its contained :class:`.ClauseElement` members. :param correlate=True: indicates that this :class:`.Select` object should have its contained :class:`.FromClause` elements "correlated" to an enclosing :class:`.Select` object. This means that any :class:`.ClauseElement` instance within the "froms" collection of this :class:`.Select` which is also present in the "froms" collection of an enclosing select will not be rendered in the ``FROM`` clause of this select statement. :param distinct=False: when ``True``, applies a ``DISTINCT`` qualifier to the columns clause of the resulting statement. The boolean argument may also be a column expression or list of column expressions - this is a special calling form which is understood by the Postgresql dialect to render the ``DISTINCT ON ()`` syntax. ``distinct`` is also available via the :meth:`~.Select.distinct` generative method. :param for_update=False: when ``True``, applies ``FOR UPDATE`` to the end of the resulting statement. Certain database dialects also support alternate values for this parameter: * With the MySQL dialect, the value ``"read"`` translates to ``LOCK IN SHARE MODE``. * With the Oracle and Postgresql dialects, the value ``"nowait"`` translates to ``FOR UPDATE NOWAIT``. * With the Postgresql dialect, the values "read" and ``"read_nowait"`` translate to ``FOR SHARE`` and ``FOR SHARE NOWAIT``, respectively. .. versionadded:: 0.7.7 :param group_by: a list of :class:`.ClauseElement` objects which will comprise the ``GROUP BY`` clause of the resulting select. :param having: a :class:`.ClauseElement` that will comprise the ``HAVING`` clause of the resulting select when ``GROUP BY`` is used. :param limit=None: a numerical value which usually compiles to a ``LIMIT`` expression in the resulting select. Databases that don't support ``LIMIT`` will attempt to provide similar functionality. :param offset=None: a numeric value which usually compiles to an ``OFFSET`` expression in the resulting select. Databases that don't support ``OFFSET`` will attempt to provide similar functionality. :param order_by: a scalar or list of :class:`.ClauseElement` objects which will comprise the ``ORDER BY`` clause of the resulting select. :param use_labels=False: when ``True``, the statement will be generated using labels for each column in the columns clause, which qualify each column with its parent table's (or aliases) name so that name conflicts between columns in different tables don't occur. The format of the label is _. The "c" collection of the resulting :class:`.Select` object will use these names as well for targeting column members. use_labels is also available via the :meth:`~.SelectBase.apply_labels` generative method. """ return Select(columns, whereclause=whereclause, from_obj=from_obj, **kwargs) def subquery(alias, *args, **kwargs): """Return an :class:`.Alias` object derived from a :class:`.Select`. name alias name \*args, \**kwargs all other arguments are delivered to the :func:`select` function. """ return Select(*args, **kwargs).alias(alias) def insert(table, values=None, inline=False, **kwargs): """Represent an ``INSERT`` statement via the :class:`.Insert` SQL construct. Similar functionality is available via the :meth:`~.TableClause.insert` method on :class:`~.schema.Table`. :param table: :class:`.TableClause` which is the subject of the insert. :param values: collection of values to be inserted; see :meth:`.Insert.values` for a description of allowed formats here. Can be omitted entirely; a :class:`.Insert` construct will also dynamically render the VALUES clause at execution time based on the parameters passed to :meth:`.Connection.execute`. :param inline: if True, SQL defaults will be compiled 'inline' into the statement and not pre-executed. If both `values` and compile-time bind parameters are present, the compile-time bind parameters override the information specified within `values` on a per-key basis. The keys within `values` can be either :class:`~sqlalchemy.schema.Column` objects or their string identifiers. Each key may reference one of: * a literal data value (i.e. string, number, etc.); * a Column object; * a SELECT statement. If a ``SELECT`` statement is specified which references this ``INSERT`` statement's table, the statement will be correlated against the ``INSERT`` statement. .. seealso:: :ref:`coretutorial_insert_expressions` - SQL Expression Tutorial :ref:`inserts_and_updates` - SQL Expression Tutorial """ return Insert(table, values, inline=inline, **kwargs) def update(table, whereclause=None, values=None, inline=False, **kwargs): """Represent an ``UPDATE`` statement via the :class:`.Update` SQL construct. E.g.:: from sqlalchemy import update stmt = update(users).where(users.c.id==5).\\ values(name='user #5') Similar functionality is available via the :meth:`~.TableClause.update` method on :class:`.Table`:: stmt = users.update().\\ where(users.c.id==5).\\ values(name='user #5') :param table: A :class:`.Table` object representing the database table to be updated. :param whereclause: Optional SQL expression describing the ``WHERE`` condition of the ``UPDATE`` statement. Modern applications may prefer to use the generative :meth:`~Update.where()` method to specify the ``WHERE`` clause. The WHERE clause can refer to multiple tables. For databases which support this, an ``UPDATE FROM`` clause will be generated, or on MySQL, a multi-table update. The statement will fail on databases that don't have support for multi-table update statements. A SQL-standard method of referring to additional tables in the WHERE clause is to use a correlated subquery:: users.update().values(name='ed').where( users.c.name==select([addresses.c.email_address]).\\ where(addresses.c.user_id==users.c.id).\\ as_scalar() ) .. versionchanged:: 0.7.4 The WHERE clause can refer to multiple tables. :param values: Optional dictionary which specifies the ``SET`` conditions of the ``UPDATE``. If left as ``None``, the ``SET`` conditions are determined from those parameters passed to the statement during the execution and/or compilation of the statement. When compiled standalone without any parameters, the ``SET`` clause generates for all columns. Modern applications may prefer to use the generative :meth:`.Update.values` method to set the values of the UPDATE statement. :param inline: if True, SQL defaults present on :class:`.Column` objects via the ``default`` keyword will be compiled 'inline' into the statement and not pre-executed. This means that their values will not be available in the dictionary returned from :meth:`.ResultProxy.last_updated_params`. If both ``values`` and compile-time bind parameters are present, the compile-time bind parameters override the information specified within ``values`` on a per-key basis. The keys within ``values`` can be either :class:`.Column` objects or their string identifiers (specifically the "key" of the :class:`.Column`, normally but not necessarily equivalent to its "name"). Normally, the :class:`.Column` objects used here are expected to be part of the target :class:`.Table` that is the table to be updated. However when using MySQL, a multiple-table UPDATE statement can refer to columns from any of the tables referred to in the WHERE clause. The values referred to in ``values`` are typically: * a literal data value (i.e. string, number, etc.) * a SQL expression, such as a related :class:`.Column`, a scalar-returning :func:`.select` construct, etc. When combining :func:`.select` constructs within the values clause of an :func:`.update` construct, the subquery represented by the :func:`.select` should be *correlated* to the parent table, that is, providing criterion which links the table inside the subquery to the outer table being updated:: users.update().values( name=select([addresses.c.email_address]).\\ where(addresses.c.user_id==users.c.id).\\ as_scalar() ) .. seealso:: :ref:`inserts_and_updates` - SQL Expression Language Tutorial """ return Update( table, whereclause=whereclause, values=values, inline=inline, **kwargs) def delete(table, whereclause=None, **kwargs): """Represent a ``DELETE`` statement via the :class:`.Delete` SQL construct. Similar functionality is available via the :meth:`~.TableClause.delete` method on :class:`~.schema.Table`. :param table: The table to be updated. :param whereclause: A :class:`.ClauseElement` describing the ``WHERE`` condition of the ``UPDATE`` statement. Note that the :meth:`~Delete.where()` generative method may be used instead. .. seealso:: :ref:`deletes` - SQL Expression Tutorial """ return Delete(table, whereclause, **kwargs) def and_(*clauses): """Join a list of clauses together using the ``AND`` operator. The ``&`` operator is also overloaded on all :class:`.ColumnElement` subclasses to produce the same result. """ if len(clauses) == 1: return clauses[0] return BooleanClauseList(operator=operators.and_, *clauses) def or_(*clauses): """Join a list of clauses together using the ``OR`` operator. The ``|`` operator is also overloaded on all :class:`.ColumnElement` subclasses to produce the same result. """ if len(clauses) == 1: return clauses[0] return BooleanClauseList(operator=operators.or_, *clauses) def not_(clause): """Return a negation of the given clause, i.e. ``NOT(clause)``. The ``~`` operator is also overloaded on all :class:`.ColumnElement` subclasses to produce the same result. """ return operators.inv(_literal_as_binds(clause)) def distinct(expr): """Return a ``DISTINCT`` clause. e.g.:: distinct(a) renders:: DISTINCT a """ expr = _literal_as_binds(expr) return UnaryExpression(expr, operator=operators.distinct_op, type_=expr.type) def between(ctest, cleft, cright): """Return a ``BETWEEN`` predicate clause. Equivalent of SQL ``clausetest BETWEEN clauseleft AND clauseright``. The :func:`between()` method on all :class:`.ColumnElement` subclasses provides similar functionality. """ ctest = _literal_as_binds(ctest) return ctest.between(cleft, cright) def case(whens, value=None, else_=None): """Produce a ``CASE`` statement. whens A sequence of pairs, or alternatively a dict, to be translated into "WHEN / THEN" clauses. value Optional for simple case statements, produces a column expression as in "CASE WHEN ..." else\_ Optional as well, for case defaults produces the "ELSE" portion of the "CASE" statement. The expressions used for THEN and ELSE, when specified as strings, will be interpreted as bound values. To specify textual SQL expressions for these, use the :func:`literal_column` construct. The expressions used for the WHEN criterion may only be literal strings when "value" is present, i.e. CASE table.somecol WHEN "x" THEN "y". Otherwise, literal strings are not accepted in this position, and either the text() or literal() constructs must be used to interpret raw string values. Usage examples:: case([(orderline.c.qty > 100, item.c.specialprice), (orderline.c.qty > 10, item.c.bulkprice) ], else_=item.c.regularprice) case(value=emp.c.type, whens={ 'engineer': emp.c.salary * 1.1, 'manager': emp.c.salary * 3, }) Using :func:`literal_column()`, to allow for databases that do not support bind parameters in the ``then`` clause. The type can be specified which determines the type of the :func:`case()` construct overall:: case([(orderline.c.qty > 100, literal_column("'greaterthan100'", String)), (orderline.c.qty > 10, literal_column("'greaterthan10'", String)) ], else_=literal_column("'lethan10'", String)) """ return Case(whens, value=value, else_=else_) def cast(clause, totype, **kwargs): """Return a ``CAST`` function. Equivalent of SQL ``CAST(clause AS totype)``. Use with a :class:`~sqlalchemy.types.TypeEngine` subclass, i.e:: cast(table.c.unit_price * table.c.qty, Numeric(10,4)) or:: cast(table.c.timestamp, DATE) """ return Cast(clause, totype, **kwargs) def extract(field, expr): """Return the clause ``extract(field FROM expr)``.""" return Extract(field, expr) def collate(expression, collation): """Return the clause ``expression COLLATE collation``. e.g.:: collate(mycolumn, 'utf8_bin') produces:: mycolumn COLLATE utf8_bin """ expr = _literal_as_binds(expression) return BinaryExpression( expr, _literal_as_text(collation), operators.collate, type_=expr.type) def exists(*args, **kwargs): """Return an ``EXISTS`` clause as applied to a :class:`.Select` object. Calling styles are of the following forms:: # use on an existing select() s = select([table.c.col1]).where(table.c.col2==5) s = exists(s) # construct a select() at once exists(['*'], **select_arguments).where(criterion) # columns argument is optional, generates "EXISTS (SELECT *)" # by default. exists().where(table.c.col2==5) """ return Exists(*args, **kwargs) def union(*selects, **kwargs): """Return a ``UNION`` of multiple selectables. The returned object is an instance of :class:`.CompoundSelect`. A similar :func:`union()` method is available on all :class:`.FromClause` subclasses. \*selects a list of :class:`.Select` instances. \**kwargs available keyword arguments are the same as those of :func:`select`. """ return CompoundSelect(CompoundSelect.UNION, *selects, **kwargs) def union_all(*selects, **kwargs): """Return a ``UNION ALL`` of multiple selectables. The returned object is an instance of :class:`.CompoundSelect`. A similar :func:`union_all()` method is available on all :class:`.FromClause` subclasses. \*selects a list of :class:`.Select` instances. \**kwargs available keyword arguments are the same as those of :func:`select`. """ return CompoundSelect(CompoundSelect.UNION_ALL, *selects, **kwargs) def except_(*selects, **kwargs): """Return an ``EXCEPT`` of multiple selectables. The returned object is an instance of :class:`.CompoundSelect`. \*selects a list of :class:`.Select` instances. \**kwargs available keyword arguments are the same as those of :func:`select`. """ return CompoundSelect(CompoundSelect.EXCEPT, *selects, **kwargs) def except_all(*selects, **kwargs): """Return an ``EXCEPT ALL`` of multiple selectables. The returned object is an instance of :class:`.CompoundSelect`. \*selects a list of :class:`.Select` instances. \**kwargs available keyword arguments are the same as those of :func:`select`. """ return CompoundSelect(CompoundSelect.EXCEPT_ALL, *selects, **kwargs) def intersect(*selects, **kwargs): """Return an ``INTERSECT`` of multiple selectables. The returned object is an instance of :class:`.CompoundSelect`. \*selects a list of :class:`.Select` instances. \**kwargs available keyword arguments are the same as those of :func:`select`. """ return CompoundSelect(CompoundSelect.INTERSECT, *selects, **kwargs) def intersect_all(*selects, **kwargs): """Return an ``INTERSECT ALL`` of multiple selectables. The returned object is an instance of :class:`.CompoundSelect`. \*selects a list of :class:`.Select` instances. \**kwargs available keyword arguments are the same as those of :func:`select`. """ return CompoundSelect(CompoundSelect.INTERSECT_ALL, *selects, **kwargs) def alias(selectable, name=None): """Return an :class:`.Alias` object. An :class:`.Alias` represents any :class:`.FromClause` with an alternate name assigned within SQL, typically using the ``AS`` clause when generated, e.g. ``SELECT * FROM table AS aliasname``. Similar functionality is available via the :meth:`~.FromClause.alias` method available on all :class:`.FromClause` subclasses. When an :class:`.Alias` is created from a :class:`.Table` object, this has the effect of the table being rendered as ``tablename AS aliasname`` in a SELECT statement. For :func:`.select` objects, the effect is that of creating a named subquery, i.e. ``(select ...) AS aliasname``. The ``name`` parameter is optional, and provides the name to use in the rendered SQL. If blank, an "anonymous" name will be deterministically generated at compile time. Deterministic means the name is guaranteed to be unique against other constructs used in the same statement, and will also be the same name for each successive compilation of the same statement object. :param selectable: any :class:`.FromClause` subclass, such as a table, select statement, etc. :param name: string name to be assigned as the alias. If ``None``, a name will be deterministically generated at compile time. """ return Alias(selectable, name=name) def literal(value, type_=None): """Return a literal clause, bound to a bind parameter. Literal clauses are created automatically when non- :class:`.ClauseElement` objects (such as strings, ints, dates, etc.) are used in a comparison operation with a :class:`.ColumnElement` subclass, such as a :class:`~sqlalchemy.schema.Column` object. Use this function to force the generation of a literal clause, which will be created as a :class:`BindParameter` with a bound value. :param value: the value to be bound. Can be any Python object supported by the underlying DB-API, or is translatable via the given type argument. :param type\_: an optional :class:`~sqlalchemy.types.TypeEngine` which will provide bind-parameter translation for this literal. """ return BindParameter(None, value, type_=type_, unique=True) def tuple_(*expr): """Return a SQL tuple. Main usage is to produce a composite IN construct:: tuple_(table.c.col1, table.c.col2).in_( [(1, 2), (5, 12), (10, 19)] ) .. warning:: The composite IN construct is not supported by all backends, and is currently known to work on Postgresql and MySQL, but not SQLite. Unsupported backends will raise a subclass of :class:`~sqlalchemy.exc.DBAPIError` when such an expression is invoked. """ return Tuple(*expr) def type_coerce(expr, type_): """Coerce the given expression into the given type, on the Python side only. :func:`.type_coerce` is roughly similar to :func:`.cast`, except no "CAST" expression is rendered - the given type is only applied towards expression typing and against received result values. e.g.:: from sqlalchemy.types import TypeDecorator import uuid class AsGuid(TypeDecorator): impl = String def process_bind_param(self, value, dialect): if value is not None: return str(value) else: return None def process_result_value(self, value, dialect): if value is not None: return uuid.UUID(value) else: return None conn.execute( select([type_coerce(mytable.c.ident, AsGuid)]).\\ where( type_coerce(mytable.c.ident, AsGuid) == uuid.uuid3(uuid.NAMESPACE_URL, 'bar') ) ) """ type_ = sqltypes.to_instance(type_) if hasattr(expr, '__clause_element__'): return type_coerce(expr.__clause_element__(), type_) elif isinstance(expr, BindParameter): bp = expr._clone() bp.type = type_ return bp elif not isinstance(expr, Visitable): if expr is None: return null() else: return literal(expr, type_=type_) else: return Label(None, expr, type_=type_) def label(name, obj): """Return a :class:`Label` object for the given :class:`.ColumnElement`. A label changes the name of an element in the columns clause of a ``SELECT`` statement, typically via the ``AS`` SQL keyword. This functionality is more conveniently available via the :func:`label()` method on :class:`.ColumnElement`. name label name obj a :class:`.ColumnElement`. """ return Label(name, obj) def column(text, type_=None): """Return a textual column clause, as would be in the columns clause of a ``SELECT`` statement. The object returned is an instance of :class:`.ColumnClause`, which represents the "syntactical" portion of the schema-level :class:`~sqlalchemy.schema.Column` object. It is often used directly within :func:`~.expression.select` constructs or with lightweight :func:`~.expression.table` constructs. Note that the :func:`~.expression.column` function is not part of the ``sqlalchemy`` namespace. It must be imported from the ``sql`` package:: from sqlalchemy.sql import table, column :param text: the name of the column. Quoting rules will be applied to the clause like any other column name. For textual column constructs that are not to be quoted, use the :func:`literal_column` function. :param type\_: an optional :class:`~sqlalchemy.types.TypeEngine` object which will provide result-set translation for this column. See :class:`.ColumnClause` for further examples. """ return ColumnClause(text, type_=type_) def literal_column(text, type_=None): """Return a textual column expression, as would be in the columns clause of a ``SELECT`` statement. The object returned supports further expressions in the same way as any other column object, including comparison, math and string operations. The type\_ parameter is important to determine proper expression behavior (such as, '+' means string concatenation or numerical addition based on the type). :param text: the text of the expression; can be any SQL expression. Quoting rules will not be applied. To specify a column-name expression which should be subject to quoting rules, use the :func:`column` function. :param type\_: an optional :class:`~sqlalchemy.types.TypeEngine` object which will provide result-set translation and additional expression semantics for this column. If left as None the type will be NullType. """ return ColumnClause(text, type_=type_, is_literal=True) def table(name, *columns): """Represent a textual table clause. The object returned is an instance of :class:`.TableClause`, which represents the "syntactical" portion of the schema-level :class:`~.schema.Table` object. It may be used to construct lightweight table constructs. Note that the :func:`~.expression.table` function is not part of the ``sqlalchemy`` namespace. It must be imported from the ``sql`` package:: from sqlalchemy.sql import table, column :param name: Name of the table. :param columns: A collection of :func:`~.expression.column` constructs. See :class:`.TableClause` for further examples. """ return TableClause(name, *columns) def bindparam(key, value=NO_ARG, type_=None, unique=False, required=NO_ARG, quote=None, callable_=None): """Create a bind parameter clause with the given key. :param key: the key for this bind param. Will be used in the generated SQL statement for dialects that use named parameters. This value may be modified when part of a compilation operation, if other :class:`BindParameter` objects exist with the same key, or if its length is too long and truncation is required. :param value: Initial value for this bind param. This value may be overridden by the dictionary of parameters sent to statement compilation/execution. Defaults to ``None``, however if neither ``value`` nor ``callable`` are passed explicitly, the ``required`` flag will be set to ``True`` which has the effect of requiring a value be present when the statement is actually executed. .. versionchanged:: 0.8 The ``required`` flag is set to ``True`` automatically if ``value`` or ``callable`` is not passed. :param callable\_: A callable function that takes the place of "value". The function will be called at statement execution time to determine the ultimate value. Used for scenarios where the actual bind value cannot be determined at the point at which the clause construct is created, but embedded bind values are still desirable. :param type\_: A ``TypeEngine`` object that will be used to pre-process the value corresponding to this :class:`BindParameter` at execution time. :param unique: if True, the key name of this BindParamClause will be modified if another :class:`BindParameter` of the same name already has been located within the containing :class:`.ClauseElement`. :param required: If ``True``, a value is required at execution time. If not passed, is set to ``True`` or ``False`` based on whether or not one of ``value`` or ``callable`` were passed.. .. versionchanged:: 0.8 If the ``required`` flag is not specified, it will be set automatically to ``True`` or ``False`` depending on whether or not the ``value`` or ``callable`` parameters were specified. :param quote: True if this parameter name requires quoting and is not currently known as a SQLAlchemy reserved word; this currently only applies to the Oracle backend. """ if isinstance(key, ColumnClause): type_ = key.type key = key.name if required is NO_ARG: required = (value is NO_ARG and callable_ is None) if value is NO_ARG: value = None return BindParameter(key, value, type_=type_, callable_=callable_, unique=unique, required=required, quote=quote) def outparam(key, type_=None): """Create an 'OUT' parameter for usage in functions (stored procedures), for databases which support them. The ``outparam`` can be used like a regular function parameter. The "output" value will be available from the :class:`~sqlalchemy.engine.ResultProxy` object via its ``out_parameters`` attribute, which returns a dictionary containing the values. """ return BindParameter( key, None, type_=type_, unique=False, isoutparam=True) def text(text, bind=None, *args, **kwargs): """Create a SQL construct that is represented by a literal string. E.g.:: t = text("SELECT * FROM users") result = connection.execute(t) The advantages :func:`text` provides over a plain string are backend-neutral support for bind parameters, per-statement execution options, as well as bind parameter and result-column typing behavior, allowing SQLAlchemy type constructs to play a role when executing a statement that is specified literally. Bind parameters are specified by name, using the format ``:name``. E.g.:: t = text("SELECT * FROM users WHERE id=:user_id") result = connection.execute(t, user_id=12) To invoke SQLAlchemy typing logic for bind parameters, the ``bindparams`` list allows specification of :func:`bindparam` constructs which specify the type for a given name:: t = text("SELECT id FROM users WHERE updated_at>:updated", bindparams=[bindparam('updated', DateTime())] ) Typing during result row processing is also an important concern. Result column types are specified using the ``typemap`` dictionary, where the keys match the names of columns. These names are taken from what the DBAPI returns as ``cursor.description``:: t = text("SELECT id, name FROM users", typemap={ 'id':Integer, 'name':Unicode } ) The :func:`text` construct is used internally for most cases when a literal string is specified for part of a larger query, such as within :func:`select()`, :func:`update()`, :func:`insert()` or :func:`delete()`. In those cases, the same bind parameter syntax is applied:: s = select([users.c.id, users.c.name]).where("id=:user_id") result = connection.execute(s, user_id=12) Using :func:`text` explicitly usually implies the construction of a full, standalone statement. As such, SQLAlchemy refers to it as an :class:`.Executable` object, and it supports the :meth:`Executable.execution_options` method. For example, a :func:`text` construct that should be subject to "autocommit" can be set explicitly so using the ``autocommit`` option:: t = text("EXEC my_procedural_thing()").\\ execution_options(autocommit=True) Note that SQLAlchemy's usual "autocommit" behavior applies to :func:`text` constructs - that is, statements which begin with a phrase such as ``INSERT``, ``UPDATE``, ``DELETE``, or a variety of other phrases specific to certain backends, will be eligible for autocommit if no transaction is in progress. :param text: the text of the SQL statement to be created. use ``:`` to specify bind parameters; they will be compiled to their engine-specific format. :param autocommit: Deprecated. Use .execution_options(autocommit=) to set the autocommit option. :param bind: an optional connection or engine to be used for this text query. :param bindparams: a list of :func:`bindparam()` instances which can be used to define the types and/or initial values for the bind parameters within the textual statement; the keynames of the bindparams must match those within the text of the statement. The types will be used for pre-processing on bind values. :param typemap: a dictionary mapping the names of columns represented in the columns clause of a ``SELECT`` statement to type objects, which will be used to perform post-processing on columns within the result set. This argument applies to any expression that returns result sets. """ return TextClause(text, bind=bind, *args, **kwargs) def over(func, partition_by=None, order_by=None): """Produce an OVER clause against a function. Used against aggregate or so-called "window" functions, for database backends that support window functions. E.g.:: from sqlalchemy import over over(func.row_number(), order_by='x') Would produce "ROW_NUMBER() OVER(ORDER BY x)". :param func: a :class:`.FunctionElement` construct, typically generated by :data:`~.expression.func`. :param partition_by: a column element or string, or a list of such, that will be used as the PARTITION BY clause of the OVER construct. :param order_by: a column element or string, or a list of such, that will be used as the ORDER BY clause of the OVER construct. This function is also available from the :data:`~.expression.func` construct itself via the :meth:`.FunctionElement.over` method. .. versionadded:: 0.7 """ return Over(func, partition_by=partition_by, order_by=order_by) def null(): """Return a :class:`Null` object, which compiles to ``NULL``. """ return Null() def true(): """Return a :class:`True_` object, which compiles to ``true``, or the boolean equivalent for the target dialect. """ return True_() def false(): """Return a :class:`False_` object, which compiles to ``false``, or the boolean equivalent for the target dialect. """ return False_() class _FunctionGenerator(object): """Generate :class:`.Function` objects based on getattr calls.""" def __init__(self, **opts): self.__names = [] self.opts = opts def __getattr__(self, name): # passthru __ attributes; fixes pydoc if name.startswith('__'): try: return self.__dict__[name] except KeyError: raise AttributeError(name) elif name.endswith('_'): name = name[0:-1] f = _FunctionGenerator(**self.opts) f.__names = list(self.__names) + [name] return f def __call__(self, *c, **kwargs): o = self.opts.copy() o.update(kwargs) tokens = len(self.__names) if tokens == 2: package, fname = self.__names elif tokens == 1: package, fname = "_default", self.__names[0] else: package = None if package is not None and \ package in functions._registry and \ fname in functions._registry[package]: func = functions._registry[package][fname] return func(*c, **o) return Function(self.__names[-1], packagenames=self.__names[0:-1], *c, **o) # "func" global - i.e. func.count() func = _FunctionGenerator() """Generate SQL function expressions. :data:`.func` is a special object instance which generates SQL functions based on name-based attributes, e.g.:: >>> print func.count(1) count(:param_1) The element is a column-oriented SQL element like any other, and is used in that way:: >>> print select([func.count(table.c.id)]) SELECT count(sometable.id) FROM sometable Any name can be given to :data:`.func`. If the function name is unknown to SQLAlchemy, it will be rendered exactly as is. For common SQL functions which SQLAlchemy is aware of, the name may be interpreted as a *generic function* which will be compiled appropriately to the target database:: >>> print func.current_timestamp() CURRENT_TIMESTAMP To call functions which are present in dot-separated packages, specify them in the same manner:: >>> print func.stats.yield_curve(5, 10) stats.yield_curve(:yield_curve_1, :yield_curve_2) SQLAlchemy can be made aware of the return type of functions to enable type-specific lexical and result-based behavior. For example, to ensure that a string-based function returns a Unicode value and is similarly treated as a string in expressions, specify :class:`~sqlalchemy.types.Unicode` as the type: >>> print func.my_string(u'hi', type_=Unicode) + ' ' + \ ... func.my_string(u'there', type_=Unicode) my_string(:my_string_1) || :my_string_2 || my_string(:my_string_3) The object returned by a :data:`.func` call is usually an instance of :class:`.Function`. This object meets the "column" interface, including comparison and labeling functions. The object can also be passed the :meth:`~.Connectable.execute` method of a :class:`.Connection` or :class:`.Engine`, where it will be wrapped inside of a SELECT statement first:: print connection.execute(func.current_timestamp()).scalar() In a few exception cases, the :data:`.func` accessor will redirect a name to a built-in expression such as :func:`.cast` or :func:`.extract`, as these names have well-known meaning but are not exactly the same as "functions" from a SQLAlchemy perspective. .. versionadded:: 0.8 :data:`.func` can return non-function expression constructs for common quasi-functional names like :func:`.cast` and :func:`.extract`. Functions which are interpreted as "generic" functions know how to calculate their return type automatically. For a listing of known generic functions, see :ref:`generic_functions`. """ # "modifier" global - i.e. modifier.distinct # TODO: use UnaryExpression for this instead ? modifier = _FunctionGenerator(group=False) class _truncated_label(unicode): """A unicode subclass used to identify symbolic " "names that may require truncation.""" def apply_map(self, map_): return self # for backwards compatibility in case # someone is re-implementing the # _truncated_identifier() sequence in a custom # compiler _generated_label = _truncated_label class _anonymous_label(_truncated_label): """A unicode subclass used to identify anonymously generated names.""" def __add__(self, other): return _anonymous_label( unicode(self) + unicode(other)) def __radd__(self, other): return _anonymous_label( unicode(other) + unicode(self)) def apply_map(self, map_): return self % map_ def _as_truncated(value): """coerce the given value to :class:`._truncated_label`. Existing :class:`._truncated_label` and :class:`._anonymous_label` objects are passed unchanged. """ if isinstance(value, _truncated_label): return value else: return _truncated_label(value) def _string_or_unprintable(element): if isinstance(element, basestring): return element else: try: return str(element) except: return "unprintable element %r" % element def _clone(element, **kw): return element._clone() def _expand_cloned(elements): """expand the given set of ClauseElements to be the set of all 'cloned' predecessors. """ return itertools.chain(*[x._cloned_set for x in elements]) def _select_iterables(elements): """expand tables into individual columns in the given list of column expressions. """ return itertools.chain(*[c._select_iterable for c in elements]) def _cloned_intersection(a, b): """return the intersection of sets a and b, counting any overlap between 'cloned' predecessors. The returned set is in terms of the entities present within 'a'. """ all_overlap = set(_expand_cloned(a)).intersection(_expand_cloned(b)) return set(elem for elem in a if all_overlap.intersection(elem._cloned_set)) def _cloned_difference(a, b): all_overlap = set(_expand_cloned(a)).intersection(_expand_cloned(b)) return set(elem for elem in a if not all_overlap.intersection(elem._cloned_set)) def _from_objects(*elements): return itertools.chain(*[element._from_objects for element in elements]) def _labeled(element): if not hasattr(element, 'name'): return element.label(None) else: return element # there is some inconsistency here between the usage of # inspect() vs. checking for Visitable and __clause_element__. # Ideally all functions here would derive from inspect(), # however the inspect() versions add significant callcount # overhead for critical functions like _interpret_as_column_or_from(). # Generally, the column-based functions are more performance critical # and are fine just checking for __clause_element__(). it's only # _interpret_as_from() where we'd like to be able to receive ORM entities # that have no defined namespace, hence inspect() is needed there. def _column_as_key(element): if isinstance(element, basestring): return element if hasattr(element, '__clause_element__'): element = element.__clause_element__() try: return element.key except AttributeError: return None def _clause_element_as_expr(element): if hasattr(element, '__clause_element__'): return element.__clause_element__() else: return element def _literal_as_text(element): if isinstance(element, Visitable): return element elif hasattr(element, '__clause_element__'): return element.__clause_element__() elif isinstance(element, basestring): return TextClause(unicode(element)) elif isinstance(element, (util.NoneType, bool)): return _const_expr(element) else: raise exc.ArgumentError( "SQL expression object or string expected." ) def _no_literals(element): if hasattr(element, '__clause_element__'): return element.__clause_element__() elif not isinstance(element, Visitable): raise exc.ArgumentError("Ambiguous literal: %r. Use the 'text()' " "function to indicate a SQL expression " "literal, or 'literal()' to indicate a " "bound value." % element) else: return element def _is_literal(element): return not isinstance(element, Visitable) and \ not hasattr(element, '__clause_element__') def _only_column_elements_or_none(element, name): if element is None: return None else: return _only_column_elements(element, name) def _only_column_elements(element, name): if hasattr(element, '__clause_element__'): element = element.__clause_element__() if not isinstance(element, ColumnElement): raise exc.ArgumentError( "Column-based expression object expected for argument " "'%s'; got: '%s', type %s" % (name, element, type(element))) return element def _literal_as_binds(element, name=None, type_=None): if hasattr(element, '__clause_element__'): return element.__clause_element__() elif not isinstance(element, Visitable): if element is None: return null() else: return _BindParamClause(name, element, type_=type_, unique=True) else: return element def _interpret_as_column_or_from(element): if isinstance(element, Visitable): return element elif hasattr(element, '__clause_element__'): return element.__clause_element__() insp = inspection.inspect(element, raiseerr=False) if insp is None: if isinstance(element, (util.NoneType, bool)): return _const_expr(element) elif hasattr(insp, "selectable"): return insp.selectable return literal_column(str(element)) def _interpret_as_from(element): insp = inspection.inspect(element, raiseerr=False) if insp is None: if isinstance(element, basestring): return TextClause(unicode(element)) elif hasattr(insp, "selectable"): return insp.selectable raise exc.ArgumentError("FROM expression expected") def _interpret_as_select(element): element = _interpret_as_from(element) if isinstance(element, Alias): element = element.original if not isinstance(element, Select): element = element.select() return element def _const_expr(element): if isinstance(element, (Null, False_, True_)): return element elif element is None: return null() elif element is False: return false() elif element is True: return true() else: raise exc.ArgumentError( "Expected None, False, or True" ) def _type_from_args(args): for a in args: if not isinstance(a.type, sqltypes.NullType): return a.type else: return sqltypes.NullType def _corresponding_column_or_error(fromclause, column, require_embedded=False): c = fromclause.corresponding_column(column, require_embedded=require_embedded) if c is None: raise exc.InvalidRequestError( "Given column '%s', attached to table '%s', " "failed to locate a corresponding column from table '%s'" % (column, getattr(column, 'table', None), fromclause.description) ) return c @util.decorator def _generative(fn, *args, **kw): """Mark a method as generative.""" self = args[0]._generate() fn(self, *args[1:], **kw) return self def is_column(col): """True if ``col`` is an instance of :class:`.ColumnElement`.""" return isinstance(col, ColumnElement) class ClauseElement(Visitable): """Base class for elements of a programmatically constructed SQL expression. """ __visit_name__ = 'clause' _annotations = {} supports_execution = False _from_objects = [] bind = None _is_clone_of = None is_selectable = False is_clause_element = True def _clone(self): """Create a shallow copy of this ClauseElement. This method may be used by a generative API. Its also used as part of the "deep" copy afforded by a traversal that combines the _copy_internals() method. """ c = self.__class__.__new__(self.__class__) c.__dict__ = self.__dict__.copy() ClauseElement._cloned_set._reset(c) ColumnElement.comparator._reset(c) # this is a marker that helps to "equate" clauses to each other # when a Select returns its list of FROM clauses. the cloning # process leaves around a lot of remnants of the previous clause # typically in the form of column expressions still attached to the # old table. c._is_clone_of = self return c @property def _constructor(self): """return the 'constructor' for this ClauseElement. This is for the purposes for creating a new object of this type. Usually, its just the element's __class__. However, the "Annotated" version of the object overrides to return the class of its proxied element. """ return self.__class__ @util.memoized_property def _cloned_set(self): """Return the set consisting all cloned ancestors of this ClauseElement. Includes this ClauseElement. This accessor tends to be used for FromClause objects to identify 'equivalent' FROM clauses, regardless of transformative operations. """ s = util.column_set() f = self while f is not None: s.add(f) f = f._is_clone_of return s def __getstate__(self): d = self.__dict__.copy() d.pop('_is_clone_of', None) return d if util.jython: def __hash__(self): """Return a distinct hash code. ClauseElements may have special equality comparisons which makes us rely on them having unique hash codes for use in hash-based collections. Stock __hash__ doesn't guarantee unique values on platforms with moving GCs. """ return id(self) def _annotate(self, values): """return a copy of this ClauseElement with annotations updated by the given dictionary. """ return sqlutil.Annotated(self, values) def _with_annotations(self, values): """return a copy of this ClauseElement with annotations replaced by the given dictionary. """ return sqlutil.Annotated(self, values) def _deannotate(self, values=None, clone=False): """return a copy of this :class:`.ClauseElement` with annotations removed. :param values: optional tuple of individual values to remove. """ if clone: # clone is used when we are also copying # the expression for a deep deannotation return self._clone() else: # if no clone, since we have no annotations we return # self return self def unique_params(self, *optionaldict, **kwargs): """Return a copy with :func:`bindparam()` elements replaced. Same functionality as ``params()``, except adds `unique=True` to affected bind parameters so that multiple statements can be used. """ return self._params(True, optionaldict, kwargs) def params(self, *optionaldict, **kwargs): """Return a copy with :func:`bindparam()` elements replaced. Returns a copy of this ClauseElement with :func:`bindparam()` elements replaced with values taken from the given dictionary:: >>> clause = column('x') + bindparam('foo') >>> print clause.compile().params {'foo':None} >>> print clause.params({'foo':7}).compile().params {'foo':7} """ return self._params(False, optionaldict, kwargs) def _params(self, unique, optionaldict, kwargs): if len(optionaldict) == 1: kwargs.update(optionaldict[0]) elif len(optionaldict) > 1: raise exc.ArgumentError( "params() takes zero or one positional dictionary argument") def visit_bindparam(bind): if bind.key in kwargs: bind.value = kwargs[bind.key] bind.required = False if unique: bind._convert_to_unique() return cloned_traverse(self, {}, {'bindparam': visit_bindparam}) def compare(self, other, **kw): """Compare this ClauseElement to the given ClauseElement. Subclasses should override the default behavior, which is a straight identity comparison. \**kw are arguments consumed by subclass compare() methods and may be used to modify the criteria for comparison. (see :class:`.ColumnElement`) """ return self is other def _copy_internals(self, clone=_clone, **kw): """Reassign internal elements to be clones of themselves. Called during a copy-and-traverse operation on newly shallow-copied elements to create a deep copy. The given clone function should be used, which may be applying additional transformations to the element (i.e. replacement traversal, cloned traversal, annotations). """ pass def get_children(self, **kwargs): """Return immediate child elements of this :class:`.ClauseElement`. This is used for visit traversal. \**kwargs may contain flags that change the collection that is returned, for example to return a subset of items in order to cut down on larger traversals, or to return child items from a different context (such as schema-level collections instead of clause-level). """ return [] def self_group(self, against=None): """Apply a 'grouping' to this :class:`.ClauseElement`. This method is overridden by subclasses to return a "grouping" construct, i.e. parenthesis. In particular it's used by "binary" expressions to provide a grouping around themselves when placed into a larger expression, as well as by :func:`.select` constructs when placed into the FROM clause of another :func:`.select`. (Note that subqueries should be normally created using the :func:`.Select.alias` method, as many platforms require nested SELECT statements to be named). As expressions are composed together, the application of :meth:`self_group` is automatic - end-user code should never need to use this method directly. Note that SQLAlchemy's clause constructs take operator precedence into account - so parenthesis might not be needed, for example, in an expression like ``x OR (y AND z)`` - AND takes precedence over OR. The base :meth:`self_group` method of :class:`.ClauseElement` just returns self. """ return self def compile(self, bind=None, dialect=None, **kw): """Compile this SQL expression. The return value is a :class:`~.Compiled` object. Calling ``str()`` or ``unicode()`` on the returned value will yield a string representation of the result. The :class:`~.Compiled` object also can return a dictionary of bind parameter names and values using the ``params`` accessor. :param bind: An ``Engine`` or ``Connection`` from which a ``Compiled`` will be acquired. This argument takes precedence over this :class:`.ClauseElement`'s bound engine, if any. :param column_keys: Used for INSERT and UPDATE statements, a list of column names which should be present in the VALUES clause of the compiled statement. If ``None``, all columns from the target table object are rendered. :param dialect: A ``Dialect`` instance from which a ``Compiled`` will be acquired. This argument takes precedence over the `bind` argument as well as this :class:`.ClauseElement`'s bound engine, if any. :param inline: Used for INSERT statements, for a dialect which does not support inline retrieval of newly generated primary key columns, will force the expression used to create the new primary key value to be rendered inline within the INSERT statement's VALUES clause. This typically refers to Sequence execution but may also refer to any server-side default generation function associated with a primary key `Column`. """ if not dialect: if bind: dialect = bind.dialect elif self.bind: dialect = self.bind.dialect bind = self.bind else: dialect = default.DefaultDialect() return self._compiler(dialect, bind=bind, **kw) def _compiler(self, dialect, **kw): """Return a compiler appropriate for this ClauseElement, given a Dialect.""" return dialect.statement_compiler(dialect, self, **kw) def __str__(self): # Py3K #return unicode(self.compile()) # Py2K return unicode(self.compile()).encode('ascii', 'backslashreplace') # end Py2K def __and__(self, other): return and_(self, other) def __or__(self, other): return or_(self, other) def __invert__(self): return self._negate() def __nonzero__(self): raise TypeError("Boolean value of this clause is not defined") def _negate(self): if hasattr(self, 'negation_clause'): return self.negation_clause else: return UnaryExpression( self.self_group(against=operators.inv), operator=operators.inv, negate=None) def __repr__(self): friendly = getattr(self, 'description', None) if friendly is None: return object.__repr__(self) else: return '<%s.%s at 0x%x; %s>' % ( self.__module__, self.__class__.__name__, id(self), friendly) inspection._self_inspects(ClauseElement) class Immutable(object): """mark a ClauseElement as 'immutable' when expressions are cloned.""" def unique_params(self, *optionaldict, **kwargs): raise NotImplementedError("Immutable objects do not support copying") def params(self, *optionaldict, **kwargs): raise NotImplementedError("Immutable objects do not support copying") def _clone(self): return self class _DefaultColumnComparator(operators.ColumnOperators): """Defines comparison and math operations. See :class:`.ColumnOperators` and :class:`.Operators` for descriptions of all operations. """ @util.memoized_property def type(self): return self.expr.type def operate(self, op, *other, **kwargs): o = self.operators[op.__name__] return o[0](self, self.expr, op, *(other + o[1:]), **kwargs) def reverse_operate(self, op, other, **kwargs): o = self.operators[op.__name__] return o[0](self, self.expr, op, other, reverse=True, *o[1:], **kwargs) def _adapt_expression(self, op, other_comparator): """evaluate the return type of , and apply any adaptations to the given operator. This method determines the type of a resulting binary expression given two source types and an operator. For example, two :class:`.Column` objects, both of the type :class:`.Integer`, will produce a :class:`.BinaryExpression` that also has the type :class:`.Integer` when compared via the addition (``+``) operator. However, using the addition operator with an :class:`.Integer` and a :class:`.Date` object will produce a :class:`.Date`, assuming "days delta" behavior by the database (in reality, most databases other than Postgresql don't accept this particular operation). The method returns a tuple of the form , . The resulting operator and type will be those applied to the resulting :class:`.BinaryExpression` as the final operator and the right-hand side of the expression. Note that only a subset of operators make usage of :meth:`._adapt_expression`, including math operators and user-defined operators, but not boolean comparison or special SQL keywords like MATCH or BETWEEN. """ return op, other_comparator.type def _boolean_compare(self, expr, op, obj, negate=None, reverse=False, _python_is_types=(util.NoneType, bool), **kwargs): if isinstance(obj, _python_is_types + (Null, True_, False_)): # allow x ==/!= True/False to be treated as a literal. # this comes out to "== / != true/false" or "1/0" if those # constants aren't supported and works on all platforms if op in (operators.eq, operators.ne) and \ isinstance(obj, (bool, True_, False_)): return BinaryExpression(expr, obj, op, type_=sqltypes.BOOLEANTYPE, negate=negate, modifiers=kwargs) else: # all other None/True/False uses IS, IS NOT if op in (operators.eq, operators.is_): return BinaryExpression(expr, _const_expr(obj), operators.is_, negate=operators.isnot) elif op in (operators.ne, operators.isnot): return BinaryExpression(expr, _const_expr(obj), operators.isnot, negate=operators.is_) else: raise exc.ArgumentError( "Only '=', '!=', 'is_()', 'isnot()' operators can " "be used with None/True/False") else: obj = self._check_literal(expr, op, obj) if reverse: return BinaryExpression(obj, expr, op, type_=sqltypes.BOOLEANTYPE, negate=negate, modifiers=kwargs) else: return BinaryExpression(expr, obj, op, type_=sqltypes.BOOLEANTYPE, negate=negate, modifiers=kwargs) def _binary_operate(self, expr, op, obj, reverse=False, result_type=None, **kw): obj = self._check_literal(expr, op, obj) if reverse: left, right = obj, expr else: left, right = expr, obj if result_type is None: op, result_type = left.comparator._adapt_expression( op, right.comparator) return BinaryExpression(left, right, op, type_=result_type) def _scalar(self, expr, op, fn, **kw): return fn(expr) def _in_impl(self, expr, op, seq_or_selectable, negate_op, **kw): seq_or_selectable = _clause_element_as_expr(seq_or_selectable) if isinstance(seq_or_selectable, ScalarSelect): return self._boolean_compare(expr, op, seq_or_selectable, negate=negate_op) elif isinstance(seq_or_selectable, SelectBase): # TODO: if we ever want to support (x, y, z) IN (select x, # y, z from table), we would need a multi-column version of # as_scalar() to produce a multi- column selectable that # does not export itself as a FROM clause return self._boolean_compare( expr, op, seq_or_selectable.as_scalar(), negate=negate_op, **kw) elif isinstance(seq_or_selectable, (Selectable, TextClause)): return self._boolean_compare(expr, op, seq_or_selectable, negate=negate_op, **kw) # Handle non selectable arguments as sequences args = [] for o in seq_or_selectable: if not _is_literal(o): if not isinstance(o, ColumnOperators): raise exc.InvalidRequestError('in() function accept' 's either a list of non-selectable values, ' 'or a selectable: %r' % o) elif o is None: o = null() else: o = expr._bind_param(op, o) args.append(o) if len(args) == 0: # Special case handling for empty IN's, behave like # comparison against zero row selectable. We use != to # build the contradiction as it handles NULL values # appropriately, i.e. "not (x IN ())" should not return NULL # values for x. util.warn('The IN-predicate on "%s" was invoked with an ' 'empty sequence. This results in a ' 'contradiction, which nonetheless can be ' 'expensive to evaluate. Consider alternative ' 'strategies for improved performance.' % expr) if op is operators.in_op: return expr != expr else: return expr == expr return self._boolean_compare(expr, op, ClauseList(*args).self_group(against=op), negate=negate_op) def _unsupported_impl(self, expr, op, *arg, **kw): raise NotImplementedError("Operator '%s' is not supported on " "this expression" % op.__name__) def _neg_impl(self, expr, op, **kw): """See :meth:`.ColumnOperators.__neg__`.""" return UnaryExpression(expr, operator=operators.neg) def _match_impl(self, expr, op, other, **kw): """See :meth:`.ColumnOperators.match`.""" return self._boolean_compare(expr, operators.match_op, self._check_literal(expr, operators.match_op, other)) def _distinct_impl(self, expr, op, **kw): """See :meth:`.ColumnOperators.distinct`.""" return UnaryExpression(expr, operator=operators.distinct_op, type_=expr.type) def _between_impl(self, expr, op, cleft, cright, **kw): """See :meth:`.ColumnOperators.between`.""" return BinaryExpression( expr, ClauseList( self._check_literal(expr, operators.and_, cleft), self._check_literal(expr, operators.and_, cright), operator=operators.and_, group=False), operators.between_op) def _collate_impl(self, expr, op, other, **kw): return collate(expr, other) # a mapping of operators with the method they use, along with # their negated operator for comparison operators operators = { "add": (_binary_operate,), "mul": (_binary_operate,), "sub": (_binary_operate,), "div": (_binary_operate,), "mod": (_binary_operate,), "truediv": (_binary_operate,), "custom_op": (_binary_operate,), "concat_op": (_binary_operate,), "lt": (_boolean_compare, operators.ge), "le": (_boolean_compare, operators.gt), "ne": (_boolean_compare, operators.eq), "gt": (_boolean_compare, operators.le), "ge": (_boolean_compare, operators.lt), "eq": (_boolean_compare, operators.ne), "like_op": (_boolean_compare, operators.notlike_op), "ilike_op": (_boolean_compare, operators.notilike_op), "notlike_op": (_boolean_compare, operators.like_op), "notilike_op": (_boolean_compare, operators.ilike_op), "contains_op": (_boolean_compare, operators.notcontains_op), "startswith_op": (_boolean_compare, operators.notstartswith_op), "endswith_op": (_boolean_compare, operators.notendswith_op), "desc_op": (_scalar, desc), "asc_op": (_scalar, asc), "nullsfirst_op": (_scalar, nullsfirst), "nullslast_op": (_scalar, nullslast), "in_op": (_in_impl, operators.notin_op), "notin_op": (_in_impl, operators.in_op), "is_": (_boolean_compare, operators.is_), "isnot": (_boolean_compare, operators.isnot), "collate": (_collate_impl,), "match_op": (_match_impl,), "distinct_op": (_distinct_impl,), "between_op": (_between_impl, ), "neg": (_neg_impl,), "getitem": (_unsupported_impl,), "lshift": (_unsupported_impl,), "rshift": (_unsupported_impl,), } def _check_literal(self, expr, operator, other): if isinstance(other, (ColumnElement, TextClause)): if isinstance(other, BindParameter) and \ isinstance(other.type, sqltypes.NullType): # TODO: perhaps we should not mutate the incoming # bindparam() here and instead make a copy of it. # this might be the only place that we're mutating # an incoming construct. other.type = expr.type return other elif hasattr(other, '__clause_element__'): other = other.__clause_element__() elif isinstance(other, sqltypes.TypeEngine.Comparator): other = other.expr if isinstance(other, (SelectBase, Alias)): return other.as_scalar() elif not isinstance(other, (ColumnElement, TextClause)): return expr._bind_param(operator, other) else: return other class ColumnElement(ClauseElement, ColumnOperators): """Represent a column-oriented SQL expression suitable for usage in the "columns" clause, WHERE clause etc. of a statement. While the most familiar kind of :class:`.ColumnElement` is the :class:`.Column` object, :class:`.ColumnElement` serves as the basis for any unit that may be present in a SQL expression, including the expressions themselves, SQL functions, bound parameters, literal expressions, keywords such as ``NULL``, etc. :class:`.ColumnElement` is the ultimate base class for all such elements. A :class:`.ColumnElement` provides the ability to generate new :class:`.ColumnElement` objects using Python expressions. This means that Python operators such as ``==``, ``!=`` and ``<`` are overloaded to mimic SQL operations, and allow the instantiation of further :class:`.ColumnElement` instances which are composed from other, more fundamental :class:`.ColumnElement` objects. For example, two :class:`.ColumnClause` objects can be added together with the addition operator ``+`` to produce a :class:`.BinaryExpression`. Both :class:`.ColumnClause` and :class:`.BinaryExpression` are subclasses of :class:`.ColumnElement`:: >>> from sqlalchemy.sql import column >>> column('a') + column('b') >>> print column('a') + column('b') a + b :class:`.ColumnElement` supports the ability to be a *proxy* element, which indicates that the :class:`.ColumnElement` may be associated with a :class:`.Selectable` which was derived from another :class:`.Selectable`. An example of a "derived" :class:`.Selectable` is an :class:`.Alias` of a :class:`~sqlalchemy.schema.Table`. For the ambitious, an in-depth discussion of this concept can be found at `Expression Transformations `_. """ __visit_name__ = 'column' primary_key = False foreign_keys = [] quote = None _label = None _key_label = None _alt_names = () @util.memoized_property def type(self): return sqltypes.NULLTYPE @util.memoized_property def comparator(self): return self.type.comparator_factory(self) def __getattr__(self, key): try: return getattr(self.comparator, key) except AttributeError: raise AttributeError( 'Neither %r object nor %r object has an attribute %r' % ( type(self).__name__, type(self.comparator).__name__, key) ) def operate(self, op, *other, **kwargs): return op(self.comparator, *other, **kwargs) def reverse_operate(self, op, other, **kwargs): return op(other, self.comparator, **kwargs) def _bind_param(self, operator, obj): return BindParameter(None, obj, _compared_to_operator=operator, _compared_to_type=self.type, unique=True) @property def expression(self): """Return a column expression. Part of the inspection interface; returns self. """ return self @property def _select_iterable(self): return (self, ) @util.memoized_property def base_columns(self): return util.column_set(c for c in self.proxy_set if not hasattr(c, '_proxies')) @util.memoized_property def proxy_set(self): s = util.column_set([self]) if hasattr(self, '_proxies'): for c in self._proxies: s.update(c.proxy_set) return s def shares_lineage(self, othercolumn): """Return True if the given :class:`.ColumnElement` has a common ancestor to this :class:`.ColumnElement`.""" return bool(self.proxy_set.intersection(othercolumn.proxy_set)) def _compare_name_for_result(self, other): """Return True if the given column element compares to this one when targeting within a result row.""" return hasattr(other, 'name') and hasattr(self, 'name') and \ other.name == self.name def _make_proxy(self, selectable, name=None, name_is_truncatable=False, **kw): """Create a new :class:`.ColumnElement` representing this :class:`.ColumnElement` as it appears in the select list of a descending selectable. """ if name is None: name = self.anon_label try: key = str(self) except exc.UnsupportedCompilationError: key = self.anon_label else: key = name co = ColumnClause(_as_truncated(name) if name_is_truncatable else name, selectable, type_=getattr(self, 'type', None)) co._proxies = [self] if selectable._is_clone_of is not None: co._is_clone_of = \ selectable._is_clone_of.columns.get(key) selectable._columns[key] = co return co def compare(self, other, use_proxies=False, equivalents=None, **kw): """Compare this ColumnElement to another. Special arguments understood: :param use_proxies: when True, consider two columns that share a common base column as equivalent (i.e. shares_lineage()) :param equivalents: a dictionary of columns as keys mapped to sets of columns. If the given "other" column is present in this dictionary, if any of the columns in the corresponding set() pass the comparison test, the result is True. This is used to expand the comparison to other columns that may be known to be equivalent to this one via foreign key or other criterion. """ to_compare = (other, ) if equivalents and other in equivalents: to_compare = equivalents[other].union(to_compare) for oth in to_compare: if use_proxies and self.shares_lineage(oth): return True elif hash(oth) == hash(self): return True else: return False def label(self, name): """Produce a column label, i.e. `` AS ``. This is a shortcut to the :func:`~.expression.label` function. if 'name' is None, an anonymous label name will be generated. """ return Label(name, self, self.type) @util.memoized_property def anon_label(self): """provides a constant 'anonymous label' for this ColumnElement. This is a label() expression which will be named at compile time. The same label() is returned each time anon_label is called so that expressions can reference anon_label multiple times, producing the same label name at compile time. the compiler uses this function automatically at compile time for expressions that are known to be 'unnamed' like binary expressions and function calls. """ return _anonymous_label('%%(%d %s)s' % (id(self), getattr(self, 'name', 'anon'))) class ColumnCollection(util.OrderedProperties): """An ordered dictionary that stores a list of ColumnElement instances. Overrides the ``__eq__()`` method to produce SQL clauses between sets of correlated columns. """ def __init__(self, *cols): super(ColumnCollection, self).__init__() self._data.update((c.key, c) for c in cols) self.__dict__['_all_cols'] = util.column_set(self) def __str__(self): return repr([str(c) for c in self]) def replace(self, column): """add the given column to this collection, removing unaliased versions of this column as well as existing columns with the same key. e.g.:: t = Table('sometable', metadata, Column('col1', Integer)) t.columns.replace(Column('col1', Integer, key='columnone')) will remove the original 'col1' from the collection, and add the new column under the name 'columnname'. Used by schema.Column to override columns during table reflection. """ if column.name in self and column.key != column.name: other = self[column.name] if other.name == other.key: del self._data[other.name] self._all_cols.remove(other) if column.key in self._data: self._all_cols.remove(self._data[column.key]) self._all_cols.add(column) self._data[column.key] = column def add(self, column): """Add a column to this collection. The key attribute of the column will be used as the hash key for this dictionary. """ self[column.key] = column def __delitem__(self, key): raise NotImplementedError() def __setattr__(self, key, object): raise NotImplementedError() def __setitem__(self, key, value): if key in self: # this warning is primarily to catch select() statements # which have conflicting column names in their exported # columns collection existing = self[key] if not existing.shares_lineage(value): util.warn('Column %r on table %r being replaced by ' '%r, which has the same key. Consider ' 'use_labels for select() statements.' % (key, getattr(existing, 'table', None), value)) self._all_cols.remove(existing) # pop out memoized proxy_set as this # operation may very well be occurring # in a _make_proxy operation ColumnElement.proxy_set._reset(value) self._all_cols.add(value) self._data[key] = value def clear(self): self._data.clear() self._all_cols.clear() def remove(self, column): del self._data[column.key] self._all_cols.remove(column) def update(self, value): self._data.update(value) self._all_cols.clear() self._all_cols.update(self._data.values()) def extend(self, iter): self.update((c.key, c) for c in iter) __hash__ = None def __eq__(self, other): l = [] for c in other: for local in self: if c.shares_lineage(local): l.append(c == local) return and_(*l) def __contains__(self, other): if not isinstance(other, basestring): raise exc.ArgumentError("__contains__ requires a string argument") return util.OrderedProperties.__contains__(self, other) def __setstate__(self, state): self.__dict__['_data'] = state['_data'] self.__dict__['_all_cols'] = util.column_set(self._data.values()) def contains_column(self, col): # this has to be done via set() membership return col in self._all_cols def as_immutable(self): return ImmutableColumnCollection(self._data, self._all_cols) class ImmutableColumnCollection(util.ImmutableProperties, ColumnCollection): def __init__(self, data, colset): util.ImmutableProperties.__init__(self, data) self.__dict__['_all_cols'] = colset extend = remove = util.ImmutableProperties._immutable class ColumnSet(util.ordered_column_set): def contains_column(self, col): return col in self def extend(self, cols): for col in cols: self.add(col) def __add__(self, other): return list(self) + list(other) def __eq__(self, other): l = [] for c in other: for local in self: if c.shares_lineage(local): l.append(c == local) return and_(*l) def __hash__(self): return hash(tuple(x for x in self)) class Selectable(ClauseElement): """mark a class as being selectable""" __visit_name__ = 'selectable' is_selectable = True @property def selectable(self): return self class FromClause(Selectable): """Represent an element that can be used within the ``FROM`` clause of a ``SELECT`` statement. The most common forms of :class:`.FromClause` are the :class:`.Table` and the :func:`.select` constructs. Key features common to all :class:`.FromClause` objects include: * a :attr:`.c` collection, which provides per-name access to a collection of :class:`.ColumnElement` objects. * a :attr:`.primary_key` attribute, which is a collection of all those :class:`.ColumnElement` objects that indicate the ``primary_key`` flag. * Methods to generate various derivations of a "from" clause, including :meth:`.FromClause.alias`, :meth:`.FromClause.join`, :meth:`.FromClause.select`. """ __visit_name__ = 'fromclause' named_with_column = False _hide_froms = [] quote = None schema = None _memoized_property = util.group_expirable_memoized_property(["_columns"]) def count(self, whereclause=None, **params): """return a SELECT COUNT generated against this :class:`.FromClause`.""" if self.primary_key: col = list(self.primary_key)[0] else: col = list(self.columns)[0] return select( [func.count(col).label('tbl_row_count')], whereclause, from_obj=[self], **params) def select(self, whereclause=None, **params): """return a SELECT of this :class:`.FromClause`. .. seealso:: :func:`~.sql.expression.select` - general purpose method which allows for arbitrary column lists. """ return select([self], whereclause, **params) def join(self, right, onclause=None, isouter=False): """return a join of this :class:`.FromClause` against another :class:`.FromClause`.""" return Join(self, right, onclause, isouter) def outerjoin(self, right, onclause=None): """return an outer join of this :class:`.FromClause` against another :class:`.FromClause`.""" return Join(self, right, onclause, True) def alias(self, name=None): """return an alias of this :class:`.FromClause`. This is shorthand for calling:: from sqlalchemy import alias a = alias(self, name=name) See :func:`~.expression.alias` for details. """ return Alias(self, name) def is_derived_from(self, fromclause): """Return True if this FromClause is 'derived' from the given FromClause. An example would be an Alias of a Table is derived from that Table. """ # this is essentially an "identity" check in the base class. # Other constructs override this to traverse through # contained elements. return fromclause in self._cloned_set def _is_lexical_equivalent(self, other): """Return True if this FromClause and the other represent the same lexical identity. This tests if either one is a copy of the other, or if they are the same via annotation identity. """ return self._cloned_set.intersection(other._cloned_set) def replace_selectable(self, old, alias): """replace all occurrences of FromClause 'old' with the given Alias object, returning a copy of this :class:`.FromClause`. """ return sqlutil.ClauseAdapter(alias).traverse(self) def correspond_on_equivalents(self, column, equivalents): """Return corresponding_column for the given column, or if None search for a match in the given dictionary. """ col = self.corresponding_column(column, require_embedded=True) if col is None and col in equivalents: for equiv in equivalents[col]: nc = self.corresponding_column(equiv, require_embedded=True) if nc: return nc return col def corresponding_column(self, column, require_embedded=False): """Given a :class:`.ColumnElement`, return the exported :class:`.ColumnElement` object from this :class:`.Selectable` which corresponds to that original :class:`~sqlalchemy.schema.Column` via a common ancestor column. :param column: the target :class:`.ColumnElement` to be matched :param require_embedded: only return corresponding columns for the given :class:`.ColumnElement`, if the given :class:`.ColumnElement` is actually present within a sub-element of this :class:`.FromClause`. Normally the column will match if it merely shares a common ancestor with one of the exported columns of this :class:`.FromClause`. """ def embedded(expanded_proxy_set, target_set): for t in target_set.difference(expanded_proxy_set): if not set(_expand_cloned([t]) ).intersection(expanded_proxy_set): return False return True # don't dig around if the column is locally present if self.c.contains_column(column): return column col, intersect = None, None target_set = column.proxy_set cols = self.c for c in cols: expanded_proxy_set = set(_expand_cloned(c.proxy_set)) i = target_set.intersection(expanded_proxy_set) if i and (not require_embedded or embedded(expanded_proxy_set, target_set)): if col is None: # no corresponding column yet, pick this one. col, intersect = c, i elif len(i) > len(intersect): # 'c' has a larger field of correspondence than # 'col'. i.e. selectable.c.a1_x->a1.c.x->table.c.x # matches a1.c.x->table.c.x better than # selectable.c.x->table.c.x does. col, intersect = c, i elif i == intersect: # they have the same field of correspondence. see # which proxy_set has fewer columns in it, which # indicates a closer relationship with the root # column. Also take into account the "weight" # attribute which CompoundSelect() uses to give # higher precedence to columns based on vertical # position in the compound statement, and discard # columns that have no reference to the target # column (also occurs with CompoundSelect) col_distance = util.reduce(operator.add, [sc._annotations.get('weight', 1) for sc in col.proxy_set if sc.shares_lineage(column)]) c_distance = util.reduce(operator.add, [sc._annotations.get('weight', 1) for sc in c.proxy_set if sc.shares_lineage(column)]) if c_distance < col_distance: col, intersect = c, i return col @property def description(self): """a brief description of this FromClause. Used primarily for error message formatting. """ return getattr(self, 'name', self.__class__.__name__ + " object") def _reset_exported(self): """delete memoized collections when a FromClause is cloned.""" self._memoized_property.expire_instance(self) @_memoized_property def columns(self): """A named-based collection of :class:`.ColumnElement` objects maintained by this :class:`.FromClause`. The :attr:`.columns`, or :attr:`.c` collection, is the gateway to the construction of SQL expressions using table-bound or other selectable-bound columns:: select([mytable]).where(mytable.c.somecolumn == 5) """ if '_columns' not in self.__dict__: self._init_collections() self._populate_column_collection() return self._columns.as_immutable() @_memoized_property def primary_key(self): """Return the collection of Column objects which comprise the primary key of this FromClause.""" self._init_collections() self._populate_column_collection() return self.primary_key @_memoized_property def foreign_keys(self): """Return the collection of ForeignKey objects which this FromClause references.""" self._init_collections() self._populate_column_collection() return self.foreign_keys c = property(attrgetter('columns'), doc="An alias for the :attr:`.columns` attribute.") _select_iterable = property(attrgetter('columns')) def _init_collections(self): assert '_columns' not in self.__dict__ assert 'primary_key' not in self.__dict__ assert 'foreign_keys' not in self.__dict__ self._columns = ColumnCollection() self.primary_key = ColumnSet() self.foreign_keys = set() @property def _cols_populated(self): return '_columns' in self.__dict__ def _populate_column_collection(self): """Called on subclasses to establish the .c collection. Each implementation has a different way of establishing this collection. """ def _refresh_for_new_column(self, column): """Given a column added to the .c collection of an underlying selectable, produce the local version of that column, assuming this selectable ultimately should proxy this column. this is used to "ping" a derived selectable to add a new column to its .c. collection when a Column has been added to one of the Table objects it ultimtely derives from. If the given selectable hasn't populated it's .c. collection yet, it should at least pass on the message to the contained selectables, but it will return None. This method is currently used by Declarative to allow Table columns to be added to a partially constructed inheritance mapping that may have already produced joins. The method isn't public right now, as the full span of implications and/or caveats aren't yet clear. It's also possible that this functionality could be invoked by default via an event, which would require that selectables maintain a weak referencing collection of all derivations. """ if not self._cols_populated: return None elif column.key in self.columns and self.columns[column.key] is column: return column else: return None class BindParameter(ColumnElement): """Represent a bind parameter. Public constructor is the :func:`bindparam()` function. """ __visit_name__ = 'bindparam' quote = None _is_crud = False def __init__(self, key, value, type_=None, unique=False, callable_=None, isoutparam=False, required=False, quote=None, _compared_to_operator=None, _compared_to_type=None): """Construct a BindParameter. :param key: the key for this bind param. Will be used in the generated SQL statement for dialects that use named parameters. This value may be modified when part of a compilation operation, if other :class:`BindParameter` objects exist with the same key, or if its length is too long and truncation is required. :param value: Initial value for this bind param. This value may be overridden by the dictionary of parameters sent to statement compilation/execution. :param callable\_: A callable function that takes the place of "value". The function will be called at statement execution time to determine the ultimate value. Used for scenarios where the actual bind value cannot be determined at the point at which the clause construct is created, but embedded bind values are still desirable. :param type\_: A ``TypeEngine`` object that will be used to pre-process the value corresponding to this :class:`BindParameter` at execution time. :param unique: if True, the key name of this BindParamClause will be modified if another :class:`BindParameter` of the same name already has been located within the containing :class:`.ClauseElement`. :param quote: True if this parameter name requires quoting and is not currently known as a SQLAlchemy reserved word; this currently only applies to the Oracle backend. :param required: a value is required at execution time. :param isoutparam: if True, the parameter should be treated like a stored procedure "OUT" parameter. """ if unique: self.key = _anonymous_label('%%(%d %s)s' % (id(self), key or 'param')) else: self.key = key or _anonymous_label('%%(%d param)s' % id(self)) # identifying key that won't change across # clones, used to identify the bind's logical # identity self._identifying_key = self.key # key that was passed in the first place, used to # generate new keys self._orig_key = key or 'param' self.unique = unique self.value = value self.callable = callable_ self.isoutparam = isoutparam self.required = required self.quote = quote if type_ is None: if _compared_to_type is not None: self.type = \ _compared_to_type.coerce_compared_value( _compared_to_operator, value) else: self.type = sqltypes._type_map.get(type(value), sqltypes.NULLTYPE) elif isinstance(type_, type): self.type = type_() else: self.type = type_ @property def effective_value(self): """Return the value of this bound parameter, taking into account if the ``callable`` parameter was set. The ``callable`` value will be evaluated and returned if present, else ``value``. """ if self.callable: return self.callable() else: return self.value def _clone(self): c = ClauseElement._clone(self) if self.unique: c.key = _anonymous_label('%%(%d %s)s' % (id(c), c._orig_key or 'param')) return c def _convert_to_unique(self): if not self.unique: self.unique = True self.key = _anonymous_label('%%(%d %s)s' % (id(self), self._orig_key or 'param')) def compare(self, other, **kw): """Compare this :class:`BindParameter` to the given clause.""" return isinstance(other, BindParameter) \ and self.type._compare_type_affinity(other.type) \ and self.value == other.value def __getstate__(self): """execute a deferred value for serialization purposes.""" d = self.__dict__.copy() v = self.value if self.callable: v = self.callable() d['callable'] = None d['value'] = v return d def __repr__(self): return 'BindParameter(%r, %r, type_=%r)' % (self.key, self.value, self.type) class TypeClause(ClauseElement): """Handle a type keyword in a SQL statement. Used by the ``Case`` statement. """ __visit_name__ = 'typeclause' def __init__(self, type): self.type = type class Generative(object): """Allow a ClauseElement to generate itself via the @_generative decorator. """ def _generate(self): s = self.__class__.__new__(self.__class__) s.__dict__ = self.__dict__.copy() return s class Executable(Generative): """Mark a ClauseElement as supporting execution. :class:`.Executable` is a superclass for all "statement" types of objects, including :func:`select`, :func:`delete`, :func:`update`, :func:`insert`, :func:`text`. """ supports_execution = True _execution_options = util.immutabledict() _bind = None @_generative def execution_options(self, **kw): """ Set non-SQL options for the statement which take effect during execution. Execution options can be set on a per-statement or per :class:`.Connection` basis. Additionally, the :class:`.Engine` and ORM :class:`~.orm.query.Query` objects provide access to execution options which they in turn configure upon connections. The :meth:`execution_options` method is generative. A new instance of this statement is returned that contains the options:: statement = select([table.c.x, table.c.y]) statement = statement.execution_options(autocommit=True) Note that only a subset of possible execution options can be applied to a statement - these include "autocommit" and "stream_results", but not "isolation_level" or "compiled_cache". See :meth:`.Connection.execution_options` for a full list of possible options. .. seealso:: :meth:`.Connection.execution_options()` :meth:`.Query.execution_options()` """ if 'isolation_level' in kw: raise exc.ArgumentError( "'isolation_level' execution option may only be specified " "on Connection.execution_options(), or " "per-engine using the isolation_level " "argument to create_engine()." ) if 'compiled_cache' in kw: raise exc.ArgumentError( "'compiled_cache' execution option may only be specified " "on Connection.execution_options(), not per statement." ) self._execution_options = self._execution_options.union(kw) def execute(self, *multiparams, **params): """Compile and execute this :class:`.Executable`.""" e = self.bind if e is None: label = getattr(self, 'description', self.__class__.__name__) msg = ('This %s is not directly bound to a Connection or Engine.' 'Use the .execute() method of a Connection or Engine ' 'to execute this construct.' % label) raise exc.UnboundExecutionError(msg) return e._execute_clauseelement(self, multiparams, params) def scalar(self, *multiparams, **params): """Compile and execute this :class:`.Executable`, returning the result's scalar representation. """ return self.execute(*multiparams, **params).scalar() @property def bind(self): """Returns the :class:`.Engine` or :class:`.Connection` to which this :class:`.Executable` is bound, or None if none found. This is a traversal which checks locally, then checks among the "from" clauses of associated objects until a bound engine or connection is found. """ if self._bind is not None: return self._bind for f in _from_objects(self): if f is self: continue engine = f.bind if engine is not None: return engine else: return None # legacy, some outside users may be calling this _Executable = Executable class TextClause(Executable, ClauseElement): """Represent a literal SQL text fragment. Public constructor is the :func:`text()` function. """ __visit_name__ = 'textclause' _bind_params_regex = re.compile(r'(? RIGHT``. A :class:`.BinaryExpression` is generated automatically whenever two column expressions are used in a Python binary expresion:: >>> from sqlalchemy.sql import column >>> column('a') + column('b') >>> print column('a') + column('b') a + b """ __visit_name__ = 'binary' def __init__(self, left, right, operator, type_=None, negate=None, modifiers=None): # allow compatibility with libraries that # refer to BinaryExpression directly and pass strings if isinstance(operator, basestring): operator = operators.custom_op(operator) self._orig = (left, right) self.left = _literal_as_text(left).self_group(against=operator) self.right = _literal_as_text(right).self_group(against=operator) self.operator = operator self.type = sqltypes.to_instance(type_) self.negate = negate if modifiers is None: self.modifiers = {} else: self.modifiers = modifiers def __nonzero__(self): if self.operator in (operator.eq, operator.ne): return self.operator(hash(self._orig[0]), hash(self._orig[1])) else: raise TypeError("Boolean value of this clause is not defined") @property def is_comparison(self): return operators.is_comparison(self.operator) @property def _from_objects(self): return self.left._from_objects + self.right._from_objects def _copy_internals(self, clone=_clone, **kw): self.left = clone(self.left, **kw) self.right = clone(self.right, **kw) def get_children(self, **kwargs): return self.left, self.right def compare(self, other, **kw): """Compare this :class:`BinaryExpression` against the given :class:`BinaryExpression`.""" return ( isinstance(other, BinaryExpression) and self.operator == other.operator and ( self.left.compare(other.left, **kw) and self.right.compare(other.right, **kw) or ( operators.is_commutative(self.operator) and self.left.compare(other.right, **kw) and self.right.compare(other.left, **kw) ) ) ) def self_group(self, against=None): if operators.is_precedent(self.operator, against): return Grouping(self) else: return self def _negate(self): if self.negate is not None: return BinaryExpression( self.left, self.right, self.negate, negate=self.operator, type_=sqltypes.BOOLEANTYPE, modifiers=self.modifiers) else: return super(BinaryExpression, self)._negate() class Exists(UnaryExpression): __visit_name__ = UnaryExpression.__visit_name__ _from_objects = [] def __init__(self, *args, **kwargs): if args and isinstance(args[0], (SelectBase, ScalarSelect)): s = args[0] else: if not args: args = ([literal_column('*')],) s = select(*args, **kwargs).as_scalar().self_group() UnaryExpression.__init__(self, s, operator=operators.exists, type_=sqltypes.Boolean) def select(self, whereclause=None, **params): return select([self], whereclause, **params) def correlate(self, *fromclause): e = self._clone() e.element = self.element.correlate(*fromclause).self_group() return e def correlate_except(self, *fromclause): e = self._clone() e.element = self.element.correlate_except(*fromclause).self_group() return e def select_from(self, clause): """return a new :class:`.Exists` construct, applying the given expression to the :meth:`.Select.select_from` method of the select statement contained. """ e = self._clone() e.element = self.element.select_from(clause).self_group() return e def where(self, clause): """return a new exists() construct with the given expression added to its WHERE clause, joined to the existing clause via AND, if any. """ e = self._clone() e.element = self.element.where(clause).self_group() return e class Join(FromClause): """represent a ``JOIN`` construct between two :class:`.FromClause` elements. The public constructor function for :class:`.Join` is the module-level :func:`join()` function, as well as the :func:`join()` method available off all :class:`.FromClause` subclasses. """ __visit_name__ = 'join' def __init__(self, left, right, onclause=None, isouter=False): """Construct a new :class:`.Join`. The usual entrypoint here is the :func:`~.expression.join` function or the :meth:`.FromClause.join` method of any :class:`.FromClause` object. """ self.left = _interpret_as_from(left) self.right = _interpret_as_from(right).self_group() if onclause is None: self.onclause = self._match_primaries(self.left, self.right) else: self.onclause = onclause self.isouter = isouter @property def description(self): return "Join object on %s(%d) and %s(%d)" % ( self.left.description, id(self.left), self.right.description, id(self.right)) def is_derived_from(self, fromclause): return fromclause is self or \ self.left.is_derived_from(fromclause) or \ self.right.is_derived_from(fromclause) def self_group(self, against=None): return FromGrouping(self) def _populate_column_collection(self): columns = [c for c in self.left.columns] + \ [c for c in self.right.columns] self.primary_key.extend(sqlutil.reduce_columns( (c for c in columns if c.primary_key), self.onclause)) self._columns.update((col._label, col) for col in columns) self.foreign_keys.update(itertools.chain( *[col.foreign_keys for col in columns])) def _refresh_for_new_column(self, column): col = self.left._refresh_for_new_column(column) if col is None: col = self.right._refresh_for_new_column(column) if col is not None: if self._cols_populated: self._columns[col._label] = col self.foreign_keys.add(col) if col.primary_key: self.primary_key.add(col) return col return None def _copy_internals(self, clone=_clone, **kw): self._reset_exported() self.left = clone(self.left, **kw) self.right = clone(self.right, **kw) self.onclause = clone(self.onclause, **kw) def get_children(self, **kwargs): return self.left, self.right, self.onclause def _match_primaries(self, left, right): if isinstance(left, Join): left_right = left.right else: left_right = None return sqlutil.join_condition(left, right, a_subset=left_right) def select(self, whereclause=None, **kwargs): """Create a :class:`.Select` from this :class:`.Join`. The equivalent long-hand form, given a :class:`.Join` object ``j``, is:: from sqlalchemy import select j = select([j.left, j.right], **kw).\\ where(whereclause).\\ select_from(j) :param whereclause: the WHERE criterion that will be sent to the :func:`select()` function :param \**kwargs: all other kwargs are sent to the underlying :func:`select()` function. """ collist = [self.left, self.right] return select(collist, whereclause, from_obj=[self], **kwargs) @property def bind(self): return self.left.bind or self.right.bind def alias(self, name=None): """return an alias of this :class:`.Join`. Used against a :class:`.Join` object, :meth:`~.Join.alias` calls the :meth:`~.Join.select` method first so that a subquery against a :func:`.select` construct is generated. the :func:`~expression.select` construct also has the ``correlate`` flag set to ``False`` and will not auto-correlate inside an enclosing :func:`~expression.select` construct. The equivalent long-hand form, given a :class:`.Join` object ``j``, is:: from sqlalchemy import select, alias j = alias( select([j.left, j.right]).\\ select_from(j).\\ with_labels(True).\\ correlate(False), name=name ) See :func:`~.expression.alias` for further details on aliases. """ return self.select(use_labels=True, correlate=False).alias(name) @property def _hide_froms(self): return itertools.chain(*[_from_objects(x.left, x.right) for x in self._cloned_set]) @property def _from_objects(self): return [self] + \ self.onclause._from_objects + \ self.left._from_objects + \ self.right._from_objects class Alias(FromClause): """Represents an table or selectable alias (AS). Represents an alias, as typically applied to any table or sub-select within a SQL statement using the ``AS`` keyword (or without the keyword on certain databases such as Oracle). This object is constructed from the :func:`~.expression.alias` module level function as well as the :meth:`.FromClause.alias` method available on all :class:`.FromClause` subclasses. """ __visit_name__ = 'alias' named_with_column = True def __init__(self, selectable, name=None): baseselectable = selectable while isinstance(baseselectable, Alias): baseselectable = baseselectable.element self.original = baseselectable self.supports_execution = baseselectable.supports_execution if self.supports_execution: self._execution_options = baseselectable._execution_options self.element = selectable if name is None: if self.original.named_with_column: name = getattr(self.original, 'name', None) name = _anonymous_label('%%(%d %s)s' % (id(self), name or 'anon')) self.name = name @property def description(self): # Py3K #return self.name # Py2K return self.name.encode('ascii', 'backslashreplace') # end Py2K def as_scalar(self): try: return self.element.as_scalar() except AttributeError: raise AttributeError("Element %s does not support " "'as_scalar()'" % self.element) def is_derived_from(self, fromclause): if fromclause in self._cloned_set: return True return self.element.is_derived_from(fromclause) def _populate_column_collection(self): for col in self.element.columns: col._make_proxy(self) def _refresh_for_new_column(self, column): col = self.element._refresh_for_new_column(column) if col is not None: if not self._cols_populated: return None else: return col._make_proxy(self) else: return None def _copy_internals(self, clone=_clone, **kw): # don't apply anything to an aliased Table # for now. May want to drive this from # the given **kw. if isinstance(self.element, TableClause): return self._reset_exported() self.element = clone(self.element, **kw) baseselectable = self.element while isinstance(baseselectable, Alias): baseselectable = baseselectable.element self.original = baseselectable def get_children(self, column_collections=True, **kw): if column_collections: for c in self.c: yield c yield self.element @property def _from_objects(self): return [self] @property def bind(self): return self.element.bind class CTE(Alias): """Represent a Common Table Expression. The :class:`.CTE` object is obtained using the :meth:`.SelectBase.cte` method from any selectable. See that method for complete examples. .. versionadded:: 0.7.6 """ __visit_name__ = 'cte' def __init__(self, selectable, name=None, recursive=False, _cte_alias=None, _restates=frozenset()): self.recursive = recursive self._cte_alias = _cte_alias self._restates = _restates super(CTE, self).__init__(selectable, name=name) def alias(self, name=None): return CTE( self.original, name=name, recursive=self.recursive, _cte_alias=self, ) def union(self, other): return CTE( self.original.union(other), name=self.name, recursive=self.recursive, _restates=self._restates.union([self]) ) def union_all(self, other): return CTE( self.original.union_all(other), name=self.name, recursive=self.recursive, _restates=self._restates.union([self]) ) class Grouping(ColumnElement): """Represent a grouping within a column expression""" __visit_name__ = 'grouping' def __init__(self, element): self.element = element self.type = getattr(element, 'type', sqltypes.NULLTYPE) @property def _label(self): return getattr(self.element, '_label', None) or self.anon_label def _copy_internals(self, clone=_clone, **kw): self.element = clone(self.element, **kw) def get_children(self, **kwargs): return self.element, @property def _from_objects(self): return self.element._from_objects def __getattr__(self, attr): return getattr(self.element, attr) def __getstate__(self): return {'element': self.element, 'type': self.type} def __setstate__(self, state): self.element = state['element'] self.type = state['type'] def compare(self, other, **kw): return isinstance(other, Grouping) and \ self.element.compare(other.element) class FromGrouping(FromClause): """Represent a grouping of a FROM clause""" __visit_name__ = 'grouping' def __init__(self, element): self.element = element def _init_collections(self): pass @property def columns(self): return self.element.columns @property def primary_key(self): return self.element.primary_key @property def foreign_keys(self): # this could be # self.element.foreign_keys # see SelectableTest.test_join_condition return set() @property def _hide_froms(self): return self.element._hide_froms def get_children(self, **kwargs): return self.element, def _copy_internals(self, clone=_clone, **kw): self.element = clone(self.element, **kw) @property def _from_objects(self): return self.element._from_objects def __getattr__(self, attr): return getattr(self.element, attr) def __getstate__(self): return {'element': self.element} def __setstate__(self, state): self.element = state['element'] class Over(ColumnElement): """Represent an OVER clause. This is a special operator against a so-called "window" function, as well as any aggregate function, which produces results relative to the result set itself. It's supported only by certain database backends. """ __visit_name__ = 'over' order_by = None partition_by = None def __init__(self, func, partition_by=None, order_by=None): self.func = func if order_by is not None: self.order_by = ClauseList(*util.to_list(order_by)) if partition_by is not None: self.partition_by = ClauseList(*util.to_list(partition_by)) @util.memoized_property def type(self): return self.func.type def get_children(self, **kwargs): return [c for c in (self.func, self.partition_by, self.order_by) if c is not None] def _copy_internals(self, clone=_clone, **kw): self.func = clone(self.func, **kw) if self.partition_by is not None: self.partition_by = clone(self.partition_by, **kw) if self.order_by is not None: self.order_by = clone(self.order_by, **kw) @property def _from_objects(self): return list(itertools.chain( *[c._from_objects for c in (self.func, self.partition_by, self.order_by) if c is not None] )) class Label(ColumnElement): """Represents a column label (AS). Represent a label, as typically applied to any column-level element using the ``AS`` sql keyword. This object is constructed from the :func:`label()` module level function as well as the :func:`label()` method available on all :class:`.ColumnElement` subclasses. """ __visit_name__ = 'label' def __init__(self, name, element, type_=None): while isinstance(element, Label): element = element.element if name: self.name = name else: self.name = _anonymous_label('%%(%d %s)s' % (id(self), getattr(element, 'name', 'anon'))) self.key = self._label = self._key_label = self.name self._element = element self._type = type_ self.quote = element.quote self._proxies = [element] def __reduce__(self): return self.__class__, (self.name, self._element, self._type) @util.memoized_property def type(self): return sqltypes.to_instance( self._type or getattr(self._element, 'type', None) ) @util.memoized_property def element(self): return self._element.self_group(against=operators.as_) def self_group(self, against=None): sub_element = self._element.self_group(against=against) if sub_element is not self._element: return Label(self.name, sub_element, type_=self._type) else: return self @property def primary_key(self): return self.element.primary_key @property def foreign_keys(self): return self.element.foreign_keys def get_children(self, **kwargs): return self.element, def _copy_internals(self, clone=_clone, **kw): self.element = clone(self.element, **kw) @property def _from_objects(self): return self.element._from_objects def _make_proxy(self, selectable, name=None, **kw): e = self.element._make_proxy(selectable, name=name if name else self.name) e._proxies.append(self) if self._type is not None: e.type = self._type return e class ColumnClause(Immutable, ColumnElement): """Represents a generic column expression from any textual string. This includes columns associated with tables, aliases and select statements, but also any arbitrary text. May or may not be bound to an underlying :class:`.Selectable`. :class:`.ColumnClause` is constructed by itself typically via the :func:`~.expression.column` function. It may be placed directly into constructs such as :func:`.select` constructs:: from sqlalchemy.sql import column, select c1, c2 = column("c1"), column("c2") s = select([c1, c2]).where(c1==5) There is also a variant on :func:`~.expression.column` known as :func:`~.expression.literal_column` - the difference is that in the latter case, the string value is assumed to be an exact expression, rather than a column name, so that no quoting rules or similar are applied:: from sqlalchemy.sql import literal_column, select s = select([literal_column("5 + 7")]) :class:`.ColumnClause` can also be used in a table-like fashion by combining the :func:`~.expression.column` function with the :func:`~.expression.table` function, to produce a "lightweight" form of table metadata:: from sqlalchemy.sql import table, column user = table("user", column("id"), column("name"), column("description"), ) The above construct can be created in an ad-hoc fashion and is not associated with any :class:`.schema.MetaData`, unlike it's more full fledged :class:`.schema.Table` counterpart. :param text: the text of the element. :param selectable: parent selectable. :param type: :class:`.types.TypeEngine` object which can associate this :class:`.ColumnClause` with a type. :param is_literal: if True, the :class:`.ColumnClause` is assumed to be an exact expression that will be delivered to the output with no quoting rules applied regardless of case sensitive settings. the :func:`literal_column()` function is usually used to create such a :class:`.ColumnClause`. """ __visit_name__ = 'column' onupdate = default = server_default = server_onupdate = None _memoized_property = util.group_expirable_memoized_property() def __init__(self, text, selectable=None, type_=None, is_literal=False): self.key = self.name = text self.table = selectable self.type = sqltypes.to_instance(type_) self.is_literal = is_literal def _compare_name_for_result(self, other): if self.is_literal or \ self.table is None or \ not hasattr(other, 'proxy_set') or ( isinstance(other, ColumnClause) and other.is_literal ): return super(ColumnClause, self).\ _compare_name_for_result(other) else: return other.proxy_set.intersection(self.proxy_set) def _get_table(self): return self.__dict__['table'] def _set_table(self, table): self._memoized_property.expire_instance(self) self.__dict__['table'] = table table = property(_get_table, _set_table) @_memoized_property def _from_objects(self): t = self.table if t is not None: return [t] else: return [] @util.memoized_property def description(self): # Py3K #return self.name # Py2K return self.name.encode('ascii', 'backslashreplace') # end Py2K @_memoized_property def _key_label(self): if self.key != self.name: return self._gen_label(self.key) else: return self._label @_memoized_property def _label(self): return self._gen_label(self.name) def _gen_label(self, name): t = self.table if self.is_literal: return None elif t is not None and t.named_with_column: if getattr(t, 'schema', None): label = t.schema.replace('.', '_') + "_" + \ t.name + "_" + name else: label = t.name + "_" + name # ensure the label name doesn't conflict with that # of an existing column if label in t.c: _label = label counter = 1 while _label in t.c: _label = label + "_" + str(counter) counter += 1 label = _label return _as_truncated(label) else: return name def _bind_param(self, operator, obj): return BindParameter(self.name, obj, _compared_to_operator=operator, _compared_to_type=self.type, unique=True) def _make_proxy(self, selectable, name=None, attach=True, name_is_truncatable=False, **kw): # propagate the "is_literal" flag only if we are keeping our name, # otherwise its considered to be a label is_literal = self.is_literal and (name is None or name == self.name) c = self._constructor( _as_truncated(name or self.name) if \ name_is_truncatable else \ (name or self.name), selectable=selectable, type_=self.type, is_literal=is_literal ) if name is None: c.key = self.key c._proxies = [self] if selectable._is_clone_of is not None: c._is_clone_of = \ selectable._is_clone_of.columns.get(c.key) if attach: selectable._columns[c.key] = c return c class TableClause(Immutable, FromClause): """Represents a minimal "table" construct. The constructor for :class:`.TableClause` is the :func:`~.expression.table` function. This produces a lightweight table object that has only a name and a collection of columns, which are typically produced by the :func:`~.expression.column` function:: from sqlalchemy.sql import table, column user = table("user", column("id"), column("name"), column("description"), ) The :class:`.TableClause` construct serves as the base for the more commonly used :class:`~.schema.Table` object, providing the usual set of :class:`~.expression.FromClause` services including the ``.c.`` collection and statement generation methods. It does **not** provide all the additional schema-level services of :class:`~.schema.Table`, including constraints, references to other tables, or support for :class:`.MetaData`-level services. It's useful on its own as an ad-hoc construct used to generate quick SQL statements when a more fully fledged :class:`~.schema.Table` is not on hand. """ __visit_name__ = 'table' named_with_column = True implicit_returning = False """:class:`.TableClause` doesn't support having a primary key or column -level defaults, so implicit returning doesn't apply.""" _autoincrement_column = None """No PK or default support so no autoincrement column.""" def __init__(self, name, *columns): super(TableClause, self).__init__() self.name = self.fullname = name self._columns = ColumnCollection() self.primary_key = ColumnSet() self.foreign_keys = set() for c in columns: self.append_column(c) def _init_collections(self): pass @util.memoized_property def description(self): # Py3K #return self.name # Py2K return self.name.encode('ascii', 'backslashreplace') # end Py2K def append_column(self, c): self._columns[c.key] = c c.table = self def get_children(self, column_collections=True, **kwargs): if column_collections: return [c for c in self.c] else: return [] def count(self, whereclause=None, **params): """return a SELECT COUNT generated against this :class:`.TableClause`.""" if self.primary_key: col = list(self.primary_key)[0] else: col = list(self.columns)[0] return select( [func.count(col).label('tbl_row_count')], whereclause, from_obj=[self], **params) def insert(self, values=None, inline=False, **kwargs): """Generate an :func:`.insert` construct against this :class:`.TableClause`. E.g.:: table.insert().values(name='foo') See :func:`.insert` for argument and usage information. """ return insert(self, values=values, inline=inline, **kwargs) def update(self, whereclause=None, values=None, inline=False, **kwargs): """Generate an :func:`.update` construct against this :class:`.TableClause`. E.g.:: table.update().where(table.c.id==7).values(name='foo') See :func:`.update` for argument and usage information. """ return update(self, whereclause=whereclause, values=values, inline=inline, **kwargs) def delete(self, whereclause=None, **kwargs): """Generate a :func:`.delete` construct against this :class:`.TableClause`. E.g.:: table.delete().where(table.c.id==7) See :func:`.delete` for argument and usage information. """ return delete(self, whereclause, **kwargs) @property def _from_objects(self): return [self] class SelectBase(Executable, FromClause): """Base class for :class:`.Select` and :class:`.CompoundSelect`.""" _order_by_clause = ClauseList() _group_by_clause = ClauseList() _limit = None _offset = None def __init__(self, use_labels=False, for_update=False, limit=None, offset=None, order_by=None, group_by=None, bind=None, autocommit=None): self.use_labels = use_labels self.for_update = for_update if autocommit is not None: util.warn_deprecated('autocommit on select() is ' 'deprecated. Use .execution_options(a' 'utocommit=True)') self._execution_options = \ self._execution_options.union( {'autocommit': autocommit}) if limit is not None: self._limit = util.asint(limit) if offset is not None: self._offset = util.asint(offset) self._bind = bind if order_by is not None: self._order_by_clause = ClauseList(*util.to_list(order_by)) if group_by is not None: self._group_by_clause = ClauseList(*util.to_list(group_by)) def as_scalar(self): """return a 'scalar' representation of this selectable, which can be used as a column expression. Typically, a select statement which has only one column in its columns clause is eligible to be used as a scalar expression. The returned object is an instance of :class:`ScalarSelect`. """ return ScalarSelect(self) @_generative def apply_labels(self): """return a new selectable with the 'use_labels' flag set to True. This will result in column expressions being generated using labels against their table name, such as "SELECT somecolumn AS tablename_somecolumn". This allows selectables which contain multiple FROM clauses to produce a unique set of column names regardless of name conflicts among the individual FROM clauses. """ self.use_labels = True def label(self, name): """return a 'scalar' representation of this selectable, embedded as a subquery with a label. .. seealso:: :meth:`~.SelectBase.as_scalar`. """ return self.as_scalar().label(name) def cte(self, name=None, recursive=False): """Return a new :class:`.CTE`, or Common Table Expression instance. Common table expressions are a SQL standard whereby SELECT statements can draw upon secondary statements specified along with the primary statement, using a clause called "WITH". Special semantics regarding UNION can also be employed to allow "recursive" queries, where a SELECT statement can draw upon the set of rows that have previously been selected. SQLAlchemy detects :class:`.CTE` objects, which are treated similarly to :class:`.Alias` objects, as special elements to be delivered to the FROM clause of the statement as well as to a WITH clause at the top of the statement. .. versionadded:: 0.7.6 :param name: name given to the common table expression. Like :meth:`._FromClause.alias`, the name can be left as ``None`` in which case an anonymous symbol will be used at query compile time. :param recursive: if ``True``, will render ``WITH RECURSIVE``. A recursive common table expression is intended to be used in conjunction with UNION ALL in order to derive rows from those already selected. The following examples illustrate two examples from Postgresql's documentation at http://www.postgresql.org/docs/8.4/static/queries-with.html. Example 1, non recursive:: from sqlalchemy import Table, Column, String, Integer, MetaData, \\ select, func metadata = MetaData() orders = Table('orders', metadata, Column('region', String), Column('amount', Integer), Column('product', String), Column('quantity', Integer) ) regional_sales = select([ orders.c.region, func.sum(orders.c.amount).label('total_sales') ]).group_by(orders.c.region).cte("regional_sales") top_regions = select([regional_sales.c.region]).\\ where( regional_sales.c.total_sales > select([ func.sum(regional_sales.c.total_sales)/10 ]) ).cte("top_regions") statement = select([ orders.c.region, orders.c.product, func.sum(orders.c.quantity).label("product_units"), func.sum(orders.c.amount).label("product_sales") ]).where(orders.c.region.in_( select([top_regions.c.region]) )).group_by(orders.c.region, orders.c.product) result = conn.execute(statement).fetchall() Example 2, WITH RECURSIVE:: from sqlalchemy import Table, Column, String, Integer, MetaData, \\ select, func metadata = MetaData() parts = Table('parts', metadata, Column('part', String), Column('sub_part', String), Column('quantity', Integer), ) included_parts = select([ parts.c.sub_part, parts.c.part, parts.c.quantity]).\\ where(parts.c.part=='our part').\\ cte(recursive=True) incl_alias = included_parts.alias() parts_alias = parts.alias() included_parts = included_parts.union_all( select([ parts_alias.c.part, parts_alias.c.sub_part, parts_alias.c.quantity ]). where(parts_alias.c.part==incl_alias.c.sub_part) ) statement = select([ included_parts.c.sub_part, func.sum(included_parts.c.quantity). label('total_quantity') ]).\ select_from(included_parts.join(parts, included_parts.c.part==parts.c.part)).\\ group_by(included_parts.c.sub_part) result = conn.execute(statement).fetchall() .. seealso:: :meth:`.orm.query.Query.cte` - ORM version of :meth:`.SelectBase.cte`. """ return CTE(self, name=name, recursive=recursive) @_generative @util.deprecated('0.6', message=":func:`.autocommit` is deprecated. Use " ":func:`.Executable.execution_options` with the " "'autocommit' flag.") def autocommit(self): """return a new selectable with the 'autocommit' flag set to True.""" self._execution_options = \ self._execution_options.union({'autocommit': True}) def _generate(self): """Override the default _generate() method to also clear out exported collections.""" s = self.__class__.__new__(self.__class__) s.__dict__ = self.__dict__.copy() s._reset_exported() return s @_generative def limit(self, limit): """return a new selectable with the given LIMIT criterion applied.""" self._limit = util.asint(limit) @_generative def offset(self, offset): """return a new selectable with the given OFFSET criterion applied.""" self._offset = util.asint(offset) @_generative def order_by(self, *clauses): """return a new selectable with the given list of ORDER BY criterion applied. The criterion will be appended to any pre-existing ORDER BY criterion. """ self.append_order_by(*clauses) @_generative def group_by(self, *clauses): """return a new selectable with the given list of GROUP BY criterion applied. The criterion will be appended to any pre-existing GROUP BY criterion. """ self.append_group_by(*clauses) def append_order_by(self, *clauses): """Append the given ORDER BY criterion applied to this selectable. The criterion will be appended to any pre-existing ORDER BY criterion. This is an **in-place** mutation method; the :meth:`~.SelectBase.order_by` method is preferred, as it provides standard :term:`method chaining`. """ if len(clauses) == 1 and clauses[0] is None: self._order_by_clause = ClauseList() else: if getattr(self, '_order_by_clause', None) is not None: clauses = list(self._order_by_clause) + list(clauses) self._order_by_clause = ClauseList(*clauses) def append_group_by(self, *clauses): """Append the given GROUP BY criterion applied to this selectable. The criterion will be appended to any pre-existing GROUP BY criterion. This is an **in-place** mutation method; the :meth:`~.SelectBase.group_by` method is preferred, as it provides standard :term:`method chaining`. """ if len(clauses) == 1 and clauses[0] is None: self._group_by_clause = ClauseList() else: if getattr(self, '_group_by_clause', None) is not None: clauses = list(self._group_by_clause) + list(clauses) self._group_by_clause = ClauseList(*clauses) @property def _from_objects(self): return [self] class ScalarSelect(Generative, Grouping): _from_objects = [] def __init__(self, element): self.element = element self.type = element._scalar_type() @property def columns(self): raise exc.InvalidRequestError('Scalar Select expression has no ' 'columns; use this object directly within a ' 'column-level expression.') c = columns @_generative def where(self, crit): """Apply a WHERE clause to the SELECT statement referred to by this :class:`.ScalarSelect`. """ self.element = self.element.where(crit) def self_group(self, **kwargs): return self class CompoundSelect(SelectBase): """Forms the basis of ``UNION``, ``UNION ALL``, and other SELECT-based set operations. .. seealso:: :func:`.union` :func:`.union_all` :func:`.intersect` :func:`.intersect_all` :func:`.except` :func:`.except_all` """ __visit_name__ = 'compound_select' UNION = util.symbol('UNION') UNION_ALL = util.symbol('UNION ALL') EXCEPT = util.symbol('EXCEPT') EXCEPT_ALL = util.symbol('EXCEPT ALL') INTERSECT = util.symbol('INTERSECT') INTERSECT_ALL = util.symbol('INTERSECT ALL') def __init__(self, keyword, *selects, **kwargs): self._auto_correlate = kwargs.pop('correlate', False) self.keyword = keyword self.selects = [] numcols = None # some DBs do not like ORDER BY in the inner queries of a UNION, etc. for n, s in enumerate(selects): s = _clause_element_as_expr(s) if not numcols: numcols = len(s.c) elif len(s.c) != numcols: raise exc.ArgumentError('All selectables passed to ' 'CompoundSelect must have identical numbers of ' 'columns; select #%d has %d columns, select ' '#%d has %d' % (1, len(self.selects[0].c), n + 1, len(s.c))) self.selects.append(s.self_group(self)) SelectBase.__init__(self, **kwargs) def _scalar_type(self): return self.selects[0]._scalar_type() def self_group(self, against=None): return FromGrouping(self) def is_derived_from(self, fromclause): for s in self.selects: if s.is_derived_from(fromclause): return True return False def _populate_column_collection(self): for cols in zip(*[s.c for s in self.selects]): # this is a slightly hacky thing - the union exports a # column that resembles just that of the *first* selectable. # to get at a "composite" column, particularly foreign keys, # you have to dig through the proxies collection which we # generate below. We may want to improve upon this, such as # perhaps _make_proxy can accept a list of other columns # that are "shared" - schema.column can then copy all the # ForeignKeys in. this would allow the union() to have all # those fks too. proxy = cols[0]._make_proxy(self, name=cols[0]._label if self.use_labels else None, key=cols[0]._key_label if self.use_labels else None) # hand-construct the "_proxies" collection to include all # derived columns place a 'weight' annotation corresponding # to how low in the list of select()s the column occurs, so # that the corresponding_column() operation can resolve # conflicts proxy._proxies = [c._annotate({'weight': i + 1}) for (i, c) in enumerate(cols)] def _refresh_for_new_column(self, column): for s in self.selects: s._refresh_for_new_column(column) if not self._cols_populated: return None raise NotImplementedError("CompoundSelect constructs don't support " "addition of columns to underlying selectables") def _copy_internals(self, clone=_clone, **kw): self._reset_exported() self.selects = [clone(s, **kw) for s in self.selects] if hasattr(self, '_col_map'): del self._col_map for attr in ('_order_by_clause', '_group_by_clause'): if getattr(self, attr) is not None: setattr(self, attr, clone(getattr(self, attr), **kw)) def get_children(self, column_collections=True, **kwargs): return (column_collections and list(self.c) or []) \ + [self._order_by_clause, self._group_by_clause] \ + list(self.selects) def bind(self): if self._bind: return self._bind for s in self.selects: e = s.bind if e: return e else: return None def _set_bind(self, bind): self._bind = bind bind = property(bind, _set_bind) class HasPrefixes(object): _prefixes = () @_generative def prefix_with(self, *expr, **kw): """Add one or more expressions following the statement keyword, i.e. SELECT, INSERT, UPDATE, or DELETE. Generative. This is used to support backend-specific prefix keywords such as those provided by MySQL. E.g.:: stmt = table.insert().prefix_with("LOW_PRIORITY", dialect="mysql") Multiple prefixes can be specified by multiple calls to :meth:`.prefix_with`. :param \*expr: textual or :class:`.ClauseElement` construct which will be rendered following the INSERT, UPDATE, or DELETE keyword. :param \**kw: A single keyword 'dialect' is accepted. This is an optional string dialect name which will limit rendering of this prefix to only that dialect. """ dialect = kw.pop('dialect', None) if kw: raise exc.ArgumentError("Unsupported argument(s): %s" % ",".join(kw)) self._setup_prefixes(expr, dialect) def _setup_prefixes(self, prefixes, dialect=None): self._prefixes = self._prefixes + tuple( [(_literal_as_text(p), dialect) for p in prefixes]) class Select(HasPrefixes, SelectBase): """Represents a ``SELECT`` statement. .. seealso:: :func:`~.expression.select` - the function which creates a :class:`.Select` object. :ref:`coretutorial_selecting` - Core Tutorial description of :func:`.select`. """ __visit_name__ = 'select' _prefixes = () _hints = util.immutabledict() _distinct = False _from_cloned = None _correlate = () _correlate_except = None _memoized_property = SelectBase._memoized_property def __init__(self, columns, whereclause=None, from_obj=None, distinct=False, having=None, correlate=True, prefixes=None, **kwargs): """Construct a Select object. The public constructor for Select is the :func:`select` function; see that function for argument descriptions. Additional generative and mutator methods are available on the :class:`SelectBase` superclass. """ self._auto_correlate = correlate if distinct is not False: if distinct is True: self._distinct = True else: self._distinct = [ _literal_as_text(e) for e in util.to_list(distinct) ] if from_obj is not None: self._from_obj = util.OrderedSet( _interpret_as_from(f) for f in util.to_list(from_obj)) else: self._from_obj = util.OrderedSet() try: cols_present = bool(columns) except TypeError: raise exc.ArgumentError("columns argument to select() must " "be a Python list or other iterable") if cols_present: self._raw_columns = [] for c in columns: c = _interpret_as_column_or_from(c) if isinstance(c, ScalarSelect): c = c.self_group(against=operators.comma_op) self._raw_columns.append(c) else: self._raw_columns = [] if whereclause is not None: self._whereclause = _literal_as_text(whereclause) else: self._whereclause = None if having is not None: self._having = _literal_as_text(having) else: self._having = None if prefixes: self._setup_prefixes(prefixes) SelectBase.__init__(self, **kwargs) @property def _froms(self): # would love to cache this, # but there's just enough edge cases, particularly now that # declarative encourages construction of SQL expressions # without tables present, to just regen this each time. froms = [] seen = set() translate = self._from_cloned def add(items): for item in items: if item is self: raise exc.InvalidRequestError( "select() construct refers to itself as a FROM") if translate and item in translate: item = translate[item] if not seen.intersection(item._cloned_set): froms.append(item) seen.update(item._cloned_set) add(_from_objects(*self._raw_columns)) if self._whereclause is not None: add(_from_objects(self._whereclause)) add(self._from_obj) return froms def _get_display_froms(self, explicit_correlate_froms=None, implicit_correlate_froms=None): """Return the full list of 'from' clauses to be displayed. Takes into account a set of existing froms which may be rendered in the FROM clause of enclosing selects; this Select may want to leave those absent if it is automatically correlating. """ froms = self._froms toremove = set(itertools.chain(*[ _expand_cloned(f._hide_froms) for f in froms])) if toremove: # if we're maintaining clones of froms, # add the copies out to the toremove list. only include # clones that are lexical equivalents. if self._from_cloned: toremove.update( self._from_cloned[f] for f in toremove.intersection(self._from_cloned) if self._from_cloned[f]._is_lexical_equivalent(f) ) # filter out to FROM clauses not in the list, # using a list to maintain ordering froms = [f for f in froms if f not in toremove] if self._correlate: to_correlate = self._correlate if to_correlate: froms = [ f for f in froms if f not in _cloned_intersection( _cloned_intersection(froms, explicit_correlate_froms or ()), to_correlate ) ] if self._correlate_except is not None: froms = [ f for f in froms if f not in _cloned_difference( _cloned_intersection(froms, explicit_correlate_froms or ()), self._correlate_except ) ] if self._auto_correlate and \ implicit_correlate_froms and \ len(froms) > 1: froms = [ f for f in froms if f not in _cloned_intersection(froms, implicit_correlate_froms) ] if not len(froms): raise exc.InvalidRequestError("Select statement '%s" "' returned no FROM clauses due to " "auto-correlation; specify " "correlate() to control " "correlation manually." % self) return froms def _scalar_type(self): elem = self._raw_columns[0] cols = list(elem._select_iterable) return cols[0].type @property def froms(self): """Return the displayed list of FromClause elements.""" return self._get_display_froms() @_generative def with_hint(self, selectable, text, dialect_name='*'): """Add an indexing hint for the given selectable to this :class:`.Select`. The text of the hint is rendered in the appropriate location for the database backend in use, relative to the given :class:`.Table` or :class:`.Alias` passed as the ``selectable`` argument. The dialect implementation typically uses Python string substitution syntax with the token ``%(name)s`` to render the name of the table or alias. E.g. when using Oracle, the following:: select([mytable]).\\ with_hint(mytable, "+ index(%(name)s ix_mytable)") Would render SQL as:: select /*+ index(mytable ix_mytable) */ ... from mytable The ``dialect_name`` option will limit the rendering of a particular hint to a particular backend. Such as, to add hints for both Oracle and Sybase simultaneously:: select([mytable]).\\ with_hint(mytable, "+ index(%(name)s ix_mytable)", 'oracle').\\ with_hint(mytable, "WITH INDEX ix_mytable", 'sybase') """ self._hints = self._hints.union( {(selectable, dialect_name): text}) @property def type(self): raise exc.InvalidRequestError("Select objects don't have a type. " "Call as_scalar() on this Select object " "to return a 'scalar' version of this Select.") @_memoized_property.method def locate_all_froms(self): """return a Set of all FromClause elements referenced by this Select. This set is a superset of that returned by the ``froms`` property, which is specifically for those FromClause elements that would actually be rendered. """ froms = self._froms return froms + list(_from_objects(*froms)) @property def inner_columns(self): """an iterator of all ColumnElement expressions which would be rendered into the columns clause of the resulting SELECT statement. """ return _select_iterables(self._raw_columns) def is_derived_from(self, fromclause): if self in fromclause._cloned_set: return True for f in self.locate_all_froms(): if f.is_derived_from(fromclause): return True return False def _copy_internals(self, clone=_clone, **kw): # Select() object has been cloned and probably adapted by the # given clone function. Apply the cloning function to internal # objects # 1. keep a dictionary of the froms we've cloned, and what # they've become. This is consulted later when we derive # additional froms from "whereclause" and the columns clause, # which may still reference the uncloned parent table. # as of 0.7.4 we also put the current version of _froms, which # gets cleared on each generation. previously we were "baking" # _froms into self._from_obj. self._from_cloned = from_cloned = dict((f, clone(f, **kw)) for f in self._from_obj.union(self._froms)) # 3. update persistent _from_obj with the cloned versions. self._from_obj = util.OrderedSet(from_cloned[f] for f in self._from_obj) # the _correlate collection is done separately, what can happen # here is the same item is _correlate as in _from_obj but the # _correlate version has an annotation on it - (specifically # RelationshipProperty.Comparator._criterion_exists() does # this). Also keep _correlate liberally open with it's previous # contents, as this set is used for matching, not rendering. self._correlate = set(clone(f) for f in self._correlate).union(self._correlate) # 4. clone other things. The difficulty here is that Column # objects are not actually cloned, and refer to their original # .table, resulting in the wrong "from" parent after a clone # operation. Hence _from_cloned and _from_obj supercede what is # present here. self._raw_columns = [clone(c, **kw) for c in self._raw_columns] for attr in '_whereclause', '_having', '_order_by_clause', \ '_group_by_clause': if getattr(self, attr) is not None: setattr(self, attr, clone(getattr(self, attr), **kw)) # erase exported column list, _froms collection, # etc. self._reset_exported() def get_children(self, column_collections=True, **kwargs): """return child elements as per the ClauseElement specification.""" return (column_collections and list(self.columns) or []) + \ self._raw_columns + list(self._froms) + \ [x for x in (self._whereclause, self._having, self._order_by_clause, self._group_by_clause) if x is not None] @_generative def column(self, column): """return a new select() construct with the given column expression added to its columns clause. """ self.append_column(column) def reduce_columns(self, only_synonyms=True): """Return a new :func`.select` construct with redundantly named, equivalently-valued columns removed from the columns clause. "Redundant" here means two columns where one refers to the other either based on foreign key, or via a simple equality comparison in the WHERE clause of the statement. The primary purpose of this method is to automatically construct a select statement with all uniquely-named columns, without the need to use table-qualified labels as :meth:`.apply_labels` does. When columns are omitted based on foreign key, the referred-to column is the one that's kept. When columns are omitted based on WHERE eqivalence, the first column in the columns clause is the one that's kept. :param only_synonyms: when True, limit the removal of columns to those which have the same name as the equivalent. Otherwise, all columns that are equivalent to another are removed. .. versionadded:: 0.8 """ return self.with_only_columns( sqlutil.reduce_columns( self.inner_columns, only_synonyms=only_synonyms, *(self._whereclause, ) + tuple(self._from_obj) ) ) @_generative def with_only_columns(self, columns): """Return a new :func:`.select` construct with its columns clause replaced with the given columns. .. versionchanged:: 0.7.3 Due to a bug fix, this method has a slight behavioral change as of version 0.7.3. Prior to version 0.7.3, the FROM clause of a :func:`.select` was calculated upfront and as new columns were added; in 0.7.3 and later it's calculated at compile time, fixing an issue regarding late binding of columns to parent tables. This changes the behavior of :meth:`.Select.with_only_columns` in that FROM clauses no longer represented in the new list are dropped, but this behavior is more consistent in that the FROM clauses are consistently derived from the current columns clause. The original intent of this method is to allow trimming of the existing columns list to be fewer columns than originally present; the use case of replacing the columns list with an entirely different one hadn't been anticipated until 0.7.3 was released; the usage guidelines below illustrate how this should be done. This method is exactly equivalent to as if the original :func:`.select` had been called with the given columns clause. I.e. a statement:: s = select([table1.c.a, table1.c.b]) s = s.with_only_columns([table1.c.b]) should be exactly equivalent to:: s = select([table1.c.b]) This means that FROM clauses which are only derived from the column list will be discarded if the new column list no longer contains that FROM:: >>> table1 = table('t1', column('a'), column('b')) >>> table2 = table('t2', column('a'), column('b')) >>> s1 = select([table1.c.a, table2.c.b]) >>> print s1 SELECT t1.a, t2.b FROM t1, t2 >>> s2 = s1.with_only_columns([table2.c.b]) >>> print s2 SELECT t2.b FROM t1 The preferred way to maintain a specific FROM clause in the construct, assuming it won't be represented anywhere else (i.e. not in the WHERE clause, etc.) is to set it using :meth:`.Select.select_from`:: >>> s1 = select([table1.c.a, table2.c.b]).\\ ... select_from(table1.join(table2, ... table1.c.a==table2.c.a)) >>> s2 = s1.with_only_columns([table2.c.b]) >>> print s2 SELECT t2.b FROM t1 JOIN t2 ON t1.a=t2.a Care should also be taken to use the correct set of column objects passed to :meth:`.Select.with_only_columns`. Since the method is essentially equivalent to calling the :func:`.select` construct in the first place with the given columns, the columns passed to :meth:`.Select.with_only_columns` should usually be a subset of those which were passed to the :func:`.select` construct, not those which are available from the ``.c`` collection of that :func:`.select`. That is:: s = select([table1.c.a, table1.c.b]).select_from(table1) s = s.with_only_columns([table1.c.b]) and **not**:: # usually incorrect s = s.with_only_columns([s.c.b]) The latter would produce the SQL:: SELECT b FROM (SELECT t1.a AS a, t1.b AS b FROM t1), t1 Since the :func:`.select` construct is essentially being asked to select both from ``table1`` as well as itself. """ self._reset_exported() rc = [] for c in columns: c = _interpret_as_column_or_from(c) if isinstance(c, ScalarSelect): c = c.self_group(against=operators.comma_op) rc.append(c) self._raw_columns = rc @_generative def where(self, whereclause): """return a new select() construct with the given expression added to its WHERE clause, joined to the existing clause via AND, if any. """ self.append_whereclause(whereclause) @_generative def having(self, having): """return a new select() construct with the given expression added to its HAVING clause, joined to the existing clause via AND, if any. """ self.append_having(having) @_generative def distinct(self, *expr): """Return a new select() construct which will apply DISTINCT to its columns clause. :param \*expr: optional column expressions. When present, the Postgresql dialect will render a ``DISTINCT ON (>)`` construct. """ if expr: expr = [_literal_as_text(e) for e in expr] if isinstance(self._distinct, list): self._distinct = self._distinct + expr else: self._distinct = expr else: self._distinct = True @_generative def select_from(self, fromclause): """return a new :func:`.select` construct with the given FROM expression merged into its list of FROM objects. E.g.:: table1 = table('t1', column('a')) table2 = table('t2', column('b')) s = select([table1.c.a]).\\ select_from( table1.join(table2, table1.c.a==table2.c.b) ) The "from" list is a unique set on the identity of each element, so adding an already present :class:`.Table` or other selectable will have no effect. Passing a :class:`.Join` that refers to an already present :class:`.Table` or other selectable will have the effect of concealing the presence of that selectable as an individual element in the rendered FROM list, instead rendering it into a JOIN clause. While the typical purpose of :meth:`.Select.select_from` is to replace the default, derived FROM clause with a join, it can also be called with individual table elements, multiple times if desired, in the case that the FROM clause cannot be fully derived from the columns clause:: select([func.count('*')]).select_from(table1) """ self.append_from(fromclause) @_generative def correlate(self, *fromclauses): """return a new :class:`.Select` which will correlate the given FROM clauses to that of an enclosing :class:`.Select`. Calling this method turns off the :class:`.Select` object's default behavior of "auto-correlation". Normally, FROM elements which appear in a :class:`.Select` that encloses this one via its :term:`WHERE clause`, ORDER BY, HAVING or :term:`columns clause` will be omitted from this :class:`.Select` object's :term:`FROM clause`. Setting an explicit correlation collection using the :meth:`.Select.correlate` method provides a fixed list of FROM objects that can potentially take place in this process. When :meth:`.Select.correlate` is used to apply specific FROM clauses for correlation, the FROM elements become candidates for correlation regardless of how deeply nested this :class:`.Select` object is, relative to an enclosing :class:`.Select` which refers to the same FROM object. This is in contrast to the behavior of "auto-correlation" which only correlates to an immediate enclosing :class:`.Select`. Multi-level correlation ensures that the link between enclosed and enclosing :class:`.Select` is always via at least one WHERE/ORDER BY/HAVING/columns clause in order for correlation to take place. If ``None`` is passed, the :class:`.Select` object will correlate none of its FROM entries, and all will render unconditionally in the local FROM clause. :param \*fromclauses: a list of one or more :class:`.FromClause` constructs, or other compatible constructs (i.e. ORM-mapped classes) to become part of the correlate collection. .. versionchanged:: 0.8.0 ORM-mapped classes are accepted by :meth:`.Select.correlate`. .. versionchanged:: 0.8.0 The :meth:`.Select.correlate` method no longer unconditionally removes entries from the FROM clause; instead, the candidate FROM entries must also be matched by a FROM entry located in an enclosing :class:`.Select`, which ultimately encloses this one as present in the WHERE clause, ORDER BY clause, HAVING clause, or columns clause of an enclosing :meth:`.Select`. .. versionchanged:: 0.8.2 explicit correlation takes place via any level of nesting of :class:`.Select` objects; in previous 0.8 versions, correlation would only occur relative to the immediate enclosing :class:`.Select` construct. .. seealso:: :meth:`.Select.correlate_except` :ref:`correlated_subqueries` """ self._auto_correlate = False if fromclauses and fromclauses[0] is None: self._correlate = () else: self._correlate = set(self._correlate).union( _interpret_as_from(f) for f in fromclauses) @_generative def correlate_except(self, *fromclauses): """return a new :class:`.Select` which will omit the given FROM clauses from the auto-correlation process. Calling :meth:`.Select.correlate_except` turns off the :class:`.Select` object's default behavior of "auto-correlation" for the given FROM elements. An element specified here will unconditionally appear in the FROM list, while all other FROM elements remain subject to normal auto-correlation behaviors. .. versionchanged:: 0.8.2 The :meth:`.Select.correlate_except` method was improved to fully prevent FROM clauses specified here from being omitted from the immediate FROM clause of this :class:`.Select`. If ``None`` is passed, the :class:`.Select` object will correlate all of its FROM entries. .. versionchanged:: 0.8.2 calling ``correlate_except(None)`` will correctly auto-correlate all FROM clauses. :param \*fromclauses: a list of one or more :class:`.FromClause` constructs, or other compatible constructs (i.e. ORM-mapped classes) to become part of the correlate-exception collection. .. seealso:: :meth:`.Select.correlate` :ref:`correlated_subqueries` """ self._auto_correlate = False if fromclauses and fromclauses[0] is None: self._correlate_except = () else: self._correlate_except = set(self._correlate_except or ()).union( _interpret_as_from(f) for f in fromclauses) def append_correlation(self, fromclause): """append the given correlation expression to this select() construct. This is an **in-place** mutation method; the :meth:`~.Select.correlate` method is preferred, as it provides standard :term:`method chaining`. """ self._auto_correlate = False self._correlate = set(self._correlate).union( _interpret_as_from(f) for f in fromclause) def append_column(self, column): """append the given column expression to the columns clause of this select() construct. This is an **in-place** mutation method; the :meth:`~.Select.column` method is preferred, as it provides standard :term:`method chaining`. """ self._reset_exported() column = _interpret_as_column_or_from(column) if isinstance(column, ScalarSelect): column = column.self_group(against=operators.comma_op) self._raw_columns = self._raw_columns + [column] def append_prefix(self, clause): """append the given columns clause prefix expression to this select() construct. This is an **in-place** mutation method; the :meth:`~.Select.prefix_with` method is preferred, as it provides standard :term:`method chaining`. """ clause = _literal_as_text(clause) self._prefixes = self._prefixes + (clause,) def append_whereclause(self, whereclause): """append the given expression to this select() construct's WHERE criterion. The expression will be joined to existing WHERE criterion via AND. This is an **in-place** mutation method; the :meth:`~.Select.where` method is preferred, as it provides standard :term:`method chaining`. """ self._reset_exported() whereclause = _literal_as_text(whereclause) if self._whereclause is not None: self._whereclause = and_(self._whereclause, whereclause) else: self._whereclause = whereclause def append_having(self, having): """append the given expression to this select() construct's HAVING criterion. The expression will be joined to existing HAVING criterion via AND. This is an **in-place** mutation method; the :meth:`~.Select.having` method is preferred, as it provides standard :term:`method chaining`. """ if self._having is not None: self._having = and_(self._having, _literal_as_text(having)) else: self._having = _literal_as_text(having) def append_from(self, fromclause): """append the given FromClause expression to this select() construct's FROM clause. This is an **in-place** mutation method; the :meth:`~.Select.select_from` method is preferred, as it provides standard :term:`method chaining`. """ self._reset_exported() fromclause = _interpret_as_from(fromclause) self._from_obj = self._from_obj.union([fromclause]) @_memoized_property def _columns_plus_names(self): if self.use_labels: names = set() def name_for_col(c): if c._label is None: return (None, c) name = c._label if name in names: name = c.anon_label else: names.add(name) return name, c return [ name_for_col(c) for c in util.unique_list(_select_iterables(self._raw_columns)) ] else: return [ (None, c) for c in util.unique_list(_select_iterables(self._raw_columns)) ] def _populate_column_collection(self): for name, c in self._columns_plus_names: if not hasattr(c, '_make_proxy'): continue if name is None: key = None elif self.use_labels: key = c._key_label if key is not None and key in self.c: key = c.anon_label else: key = None c._make_proxy(self, key=key, name=name, name_is_truncatable=True) def _refresh_for_new_column(self, column): for fromclause in self._froms: col = fromclause._refresh_for_new_column(column) if col is not None: if col in self.inner_columns and self._cols_populated: our_label = col._key_label if self.use_labels else col.key if our_label not in self.c: return col._make_proxy(self, name=col._label if self.use_labels else None, key=col._key_label if self.use_labels else None, name_is_truncatable=True) return None return None def self_group(self, against=None): """return a 'grouping' construct as per the ClauseElement specification. This produces an element that can be embedded in an expression. Note that this method is called automatically as needed when constructing expressions and should not require explicit use. """ if isinstance(against, CompoundSelect): return self return FromGrouping(self) def union(self, other, **kwargs): """return a SQL UNION of this select() construct against the given selectable.""" return union(self, other, **kwargs) def union_all(self, other, **kwargs): """return a SQL UNION ALL of this select() construct against the given selectable. """ return union_all(self, other, **kwargs) def except_(self, other, **kwargs): """return a SQL EXCEPT of this select() construct against the given selectable.""" return except_(self, other, **kwargs) def except_all(self, other, **kwargs): """return a SQL EXCEPT ALL of this select() construct against the given selectable. """ return except_all(self, other, **kwargs) def intersect(self, other, **kwargs): """return a SQL INTERSECT of this select() construct against the given selectable. """ return intersect(self, other, **kwargs) def intersect_all(self, other, **kwargs): """return a SQL INTERSECT ALL of this select() construct against the given selectable. """ return intersect_all(self, other, **kwargs) def bind(self): if self._bind: return self._bind froms = self._froms if not froms: for c in self._raw_columns: e = c.bind if e: self._bind = e return e else: e = list(froms)[0].bind if e: self._bind = e return e return None def _set_bind(self, bind): self._bind = bind bind = property(bind, _set_bind) class UpdateBase(HasPrefixes, Executable, ClauseElement): """Form the base for ``INSERT``, ``UPDATE``, and ``DELETE`` statements. """ __visit_name__ = 'update_base' _execution_options = \ Executable._execution_options.union({'autocommit': True}) kwargs = util.immutabledict() _hints = util.immutabledict() _prefixes = () def _process_colparams(self, parameters): def process_single(p): if isinstance(p, (list, tuple)): return dict( (c.key, pval) for c, pval in zip(self.table.c, p) ) else: return p if isinstance(parameters, (list, tuple)) and \ isinstance(parameters[0], (list, tuple, dict)): if not self._supports_multi_parameters: raise exc.InvalidRequestError( "This construct does not support " "multiple parameter sets.") return [process_single(p) for p in parameters], True else: return process_single(parameters), False def params(self, *arg, **kw): """Set the parameters for the statement. This method raises ``NotImplementedError`` on the base class, and is overridden by :class:`.ValuesBase` to provide the SET/VALUES clause of UPDATE and INSERT. """ raise NotImplementedError( "params() is not supported for INSERT/UPDATE/DELETE statements." " To set the values for an INSERT or UPDATE statement, use" " stmt.values(**parameters).") def bind(self): """Return a 'bind' linked to this :class:`.UpdateBase` or a :class:`.Table` associated with it. """ return self._bind or self.table.bind def _set_bind(self, bind): self._bind = bind bind = property(bind, _set_bind) @_generative def returning(self, *cols): """Add a RETURNING or equivalent clause to this statement. The given list of columns represent columns within the table that is the target of the INSERT, UPDATE, or DELETE. Each element can be any column expression. :class:`~sqlalchemy.schema.Table` objects will be expanded into their individual columns. Upon compilation, a RETURNING clause, or database equivalent, will be rendered within the statement. For INSERT and UPDATE, the values are the newly inserted/updated values. For DELETE, the values are those of the rows which were deleted. Upon execution, the values of the columns to be returned are made available via the result set and can be iterated using ``fetchone()`` and similar. For DBAPIs which do not natively support returning values (i.e. cx_oracle), SQLAlchemy will approximate this behavior at the result level so that a reasonable amount of behavioral neutrality is provided. Note that not all databases/DBAPIs support RETURNING. For those backends with no support, an exception is raised upon compilation and/or execution. For those who do support it, the functionality across backends varies greatly, including restrictions on executemany() and other statements which return multiple rows. Please read the documentation notes for the database in use in order to determine the availability of RETURNING. """ self._returning = cols @_generative def with_hint(self, text, selectable=None, dialect_name="*"): """Add a table hint for a single table to this INSERT/UPDATE/DELETE statement. .. note:: :meth:`.UpdateBase.with_hint` currently applies only to Microsoft SQL Server. For MySQL INSERT/UPDATE/DELETE hints, use :meth:`.UpdateBase.prefix_with`. The text of the hint is rendered in the appropriate location for the database backend in use, relative to the :class:`.Table` that is the subject of this statement, or optionally to that of the given :class:`.Table` passed as the ``selectable`` argument. The ``dialect_name`` option will limit the rendering of a particular hint to a particular backend. Such as, to add a hint that only takes effect for SQL Server:: mytable.insert().with_hint("WITH (PAGLOCK)", dialect_name="mssql") .. versionadded:: 0.7.6 :param text: Text of the hint. :param selectable: optional :class:`.Table` that specifies an element of the FROM clause within an UPDATE or DELETE to be the subject of the hint - applies only to certain backends. :param dialect_name: defaults to ``*``, if specified as the name of a particular dialect, will apply these hints only when that dialect is in use. """ if selectable is None: selectable = self.table self._hints = self._hints.union( {(selectable, dialect_name): text}) class ValuesBase(UpdateBase): """Supplies support for :meth:`.ValuesBase.values` to INSERT and UPDATE constructs.""" __visit_name__ = 'values_base' _supports_multi_parameters = False _has_multi_parameters = False select = None def __init__(self, table, values, prefixes): self.table = _interpret_as_from(table) self.parameters, self._has_multi_parameters = \ self._process_colparams(values) if prefixes: self._setup_prefixes(prefixes) @_generative def values(self, *args, **kwargs): """specify a fixed VALUES clause for an INSERT statement, or the SET clause for an UPDATE. Note that the :class:`.Insert` and :class:`.Update` constructs support per-execution time formatting of the VALUES and/or SET clauses, based on the arguments passed to :meth:`.Connection.execute`. However, the :meth:`.ValuesBase.values` method can be used to "fix" a particular set of parameters into the statement. Multiple calls to :meth:`.ValuesBase.values` will produce a new construct, each one with the parameter list modified to include the new parameters sent. In the typical case of a single dictionary of parameters, the newly passed keys will replace the same keys in the previous construct. In the case of a list-based "multiple values" construct, each new list of values is extended onto the existing list of values. :param \**kwargs: key value pairs representing the string key of a :class:`.Column` mapped to the value to be rendered into the VALUES or SET clause:: users.insert().values(name="some name") users.update().where(users.c.id==5).values(name="some name") :param \*args: Alternatively, a dictionary, tuple or list of dictionaries or tuples can be passed as a single positional argument in order to form the VALUES or SET clause of the statement. The single dictionary form works the same as the kwargs form:: users.insert().values({"name": "some name"}) If a tuple is passed, the tuple should contain the same number of columns as the target :class:`.Table`:: users.insert().values((5, "some name")) The :class:`.Insert` construct also supports multiply-rendered VALUES construct, for those backends which support this SQL syntax (SQLite, Postgresql, MySQL). This mode is indicated by passing a list of one or more dictionaries/tuples:: users.insert().values([ {"name": "some name"}, {"name": "some other name"}, {"name": "yet another name"}, ]) In the case of an :class:`.Update` construct, only the single dictionary/tuple form is accepted, else an exception is raised. It is also an exception case to attempt to mix the single-/multiple- value styles together, either through multiple :meth:`.ValuesBase.values` calls or by sending a list + kwargs at the same time. .. note:: Passing a multiple values list is *not* the same as passing a multiple values list to the :meth:`.Connection.execute` method. Passing a list of parameter sets to :meth:`.ValuesBase.values` produces a construct of this form:: INSERT INTO table (col1, col2, col3) VALUES (col1_0, col2_0, col3_0), (col1_1, col2_1, col3_1), ... whereas a multiple list passed to :meth:`.Connection.execute` has the effect of using the DBAPI `executemany() `_ method, which provides a high-performance system of invoking a single-row INSERT statement many times against a series of parameter sets. The "executemany" style is supported by all database backends, as it does not depend on a special SQL syntax. .. versionadded:: 0.8 Support for multiple-VALUES INSERT statements. .. seealso:: :ref:`inserts_and_updates` - SQL Expression Language Tutorial :func:`~.expression.insert` - produce an ``INSERT`` statement :func:`~.expression.update` - produce an ``UPDATE`` statement """ if self.select is not None: raise exc.InvalidRequestError( "This construct already inserts from a SELECT") if self._has_multi_parameters and kwargs: raise exc.InvalidRequestError( "This construct already has multiple parameter sets.") if args: if len(args) > 1: raise exc.ArgumentError( "Only a single dictionary/tuple or list of " "dictionaries/tuples is accepted positionally.") v = args[0] else: v = {} if self.parameters is None: self.parameters, self._has_multi_parameters = \ self._process_colparams(v) else: if self._has_multi_parameters: self.parameters = list(self.parameters) p, self._has_multi_parameters = self._process_colparams(v) if not self._has_multi_parameters: raise exc.ArgumentError( "Can't mix single-values and multiple values " "formats in one statement") self.parameters.extend(p) else: self.parameters = self.parameters.copy() p, self._has_multi_parameters = self._process_colparams(v) if self._has_multi_parameters: raise exc.ArgumentError( "Can't mix single-values and multiple values " "formats in one statement") self.parameters.update(p) if kwargs: if self._has_multi_parameters: raise exc.ArgumentError( "Can't pass kwargs and multiple parameter sets " "simultaenously") else: self.parameters.update(kwargs) class Insert(ValuesBase): """Represent an INSERT construct. The :class:`.Insert` object is created using the :func:`~.expression.insert()` function. .. seealso:: :ref:`coretutorial_insert_expressions` """ __visit_name__ = 'insert' _supports_multi_parameters = True def __init__(self, table, values=None, inline=False, bind=None, prefixes=None, returning=None, **kwargs): ValuesBase.__init__(self, table, values, prefixes) self._bind = bind self.select = None self.inline = inline self._returning = returning self.kwargs = kwargs def get_children(self, **kwargs): if self.select is not None: return self.select, else: return () @_generative def from_select(self, names, select): """Return a new :class:`.Insert` construct which represents an ``INSERT...FROM SELECT`` statement. e.g.:: sel = select([table1.c.a, table1.c.b]).where(table1.c.c > 5) ins = table2.insert().from_select(['a', 'b'], sel) :param names: a sequence of string column names or :class:`.Column` objects representing the target columns. :param select: a :func:`.select` construct, :class:`.FromClause` or other construct which resolves into a :class:`.FromClause`, such as an ORM :class:`.Query` object, etc. The order of columns returned from this FROM clause should correspond to the order of columns sent as the ``names`` parameter; while this is not checked before passing along to the database, the database would normally raise an exception if these column lists don't correspond. .. note:: Depending on backend, it may be necessary for the :class:`.Insert` statement to be constructed using the ``inline=True`` flag; this flag will prevent the implicit usage of ``RETURNING`` when the ``INSERT`` statement is rendered, which isn't supported on a backend such as Oracle in conjunction with an ``INSERT..SELECT`` combination:: sel = select([table1.c.a, table1.c.b]).where(table1.c.c > 5) ins = table2.insert(inline=True).from_select(['a', 'b'], sel) .. versionadded:: 0.8.3 """ if self.parameters: raise exc.InvalidRequestError( "This construct already inserts value expressions") self.parameters, self._has_multi_parameters = \ self._process_colparams(dict((n, null()) for n in names)) self.select = _interpret_as_select(select) def _copy_internals(self, clone=_clone, **kw): # TODO: coverage self.parameters = self.parameters.copy() if self.select is not None: self.select = _clone(self.select) class Update(ValuesBase): """Represent an Update construct. The :class:`.Update` object is created using the :func:`update()` function. """ __visit_name__ = 'update' def __init__(self, table, whereclause, values=None, inline=False, bind=None, prefixes=None, returning=None, **kwargs): ValuesBase.__init__(self, table, values, prefixes) self._bind = bind self._returning = returning if whereclause is not None: self._whereclause = _literal_as_text(whereclause) else: self._whereclause = None self.inline = inline self.kwargs = kwargs def get_children(self, **kwargs): if self._whereclause is not None: return self._whereclause, else: return () def _copy_internals(self, clone=_clone, **kw): # TODO: coverage self._whereclause = clone(self._whereclause, **kw) self.parameters = self.parameters.copy() @_generative def where(self, whereclause): """return a new update() construct with the given expression added to its WHERE clause, joined to the existing clause via AND, if any. """ if self._whereclause is not None: self._whereclause = and_(self._whereclause, _literal_as_text(whereclause)) else: self._whereclause = _literal_as_text(whereclause) @property def _extra_froms(self): # TODO: this could be made memoized # if the memoization is reset on each generative call. froms = [] seen = set([self.table]) if self._whereclause is not None: for item in _from_objects(self._whereclause): if not seen.intersection(item._cloned_set): froms.append(item) seen.update(item._cloned_set) return froms class Delete(UpdateBase): """Represent a DELETE construct. The :class:`.Delete` object is created using the :func:`delete()` function. """ __visit_name__ = 'delete' def __init__(self, table, whereclause, bind=None, returning=None, prefixes=None, **kwargs): self._bind = bind self.table = _interpret_as_from(table) self._returning = returning if prefixes: self._setup_prefixes(prefixes) if whereclause is not None: self._whereclause = _literal_as_text(whereclause) else: self._whereclause = None self.kwargs = kwargs def get_children(self, **kwargs): if self._whereclause is not None: return self._whereclause, else: return () @_generative def where(self, whereclause): """Add the given WHERE clause to a newly returned delete construct.""" if self._whereclause is not None: self._whereclause = and_(self._whereclause, _literal_as_text(whereclause)) else: self._whereclause = _literal_as_text(whereclause) def _copy_internals(self, clone=_clone, **kw): # TODO: coverage self._whereclause = clone(self._whereclause, **kw) class _IdentifiedClause(Executable, ClauseElement): __visit_name__ = 'identified' _execution_options = \ Executable._execution_options.union({'autocommit': False}) quote = None def __init__(self, ident): self.ident = ident class SavepointClause(_IdentifiedClause): __visit_name__ = 'savepoint' class RollbackToSavepointClause(_IdentifiedClause): __visit_name__ = 'rollback_to_savepoint' class ReleaseSavepointClause(_IdentifiedClause): __visit_name__ = 'release_savepoint' # old names for compatibility _BindParamClause = BindParameter _Label = Label _SelectBase = SelectBase _BinaryExpression = BinaryExpression _Cast = Cast _Null = Null _False = False_ _True = True_ _TextClause = TextClause _UnaryExpression = UnaryExpression _Case = Case _Tuple = Tuple _Over = Over _Generative = Generative _TypeClause = TypeClause _Extract = Extract _Exists = Exists _Grouping = Grouping _FromGrouping = FromGrouping _ScalarSelect = ScalarSelect SQLAlchemy-0.8.4/lib/sqlalchemy/sql/functions.py0000644000076500000240000001613412251150015022317 0ustar classicstaff00000000000000# sql/functions.py # Copyright (C) 2005-2013 the SQLAlchemy authors and contributors # # This module is part of SQLAlchemy and is released under # the MIT License: http://www.opensource.org/licenses/mit-license.php from .. import types as sqltypes, schema from .expression import ( ClauseList, Function, _literal_as_binds, literal_column, _type_from_args, cast, extract ) from . import operators from .visitors import VisitableType from .. import util _registry = util.defaultdict(dict) def register_function(identifier, fn, package="_default"): """Associate a callable with a particular func. name. This is normally called by _GenericMeta, but is also available by itself so that a non-Function construct can be associated with the :data:`.func` accessor (i.e. CAST, EXTRACT). """ reg = _registry[package] reg[identifier] = fn class _GenericMeta(VisitableType): def __init__(cls, clsname, bases, clsdict): cls.name = name = clsdict.get('name', clsname) cls.identifier = identifier = clsdict.get('identifier', name) package = clsdict.pop('package', '_default') # legacy if '__return_type__' in clsdict: cls.type = clsdict['__return_type__'] register_function(identifier, cls, package) super(_GenericMeta, cls).__init__(clsname, bases, clsdict) class GenericFunction(Function): """Define a 'generic' function. A generic function is a pre-established :class:`.Function` class that is instantiated automatically when called by name from the :data:`.func` attribute. Note that calling any name from :data:`.func` has the effect that a new :class:`.Function` instance is created automatically, given that name. The primary use case for defining a :class:`.GenericFunction` class is so that a function of a particular name may be given a fixed return type. It can also include custom argument parsing schemes as well as additional methods. Subclasses of :class:`.GenericFunction` are automatically registered under the name of the class. For example, a user-defined function ``as_utc()`` would be available immediately:: from sqlalchemy.sql.functions import GenericFunction from sqlalchemy.types import DateTime class as_utc(GenericFunction): type = DateTime print select([func.as_utc()]) User-defined generic functions can be organized into packages by specifying the "package" attribute when defining :class:`.GenericFunction`. Third party libraries containing many functions may want to use this in order to avoid name conflicts with other systems. For example, if our ``as_utc()`` function were part of a package "time":: class as_utc(GenericFunction): type = DateTime package = "time" The above function would be available from :data:`.func` using the package name ``time``:: print select([func.time.as_utc()]) A final option is to allow the function to be accessed from one name in :data:`.func` but to render as a different name. The ``identifier`` attribute will override the name used to access the function as loaded from :data:`.func`, but will retain the usage of ``name`` as the rendered name:: class GeoBuffer(GenericFunction): type = Geometry package = "geo" name = "ST_Buffer" identifier = "buffer" The above function will render as follows:: >>> print func.geo.buffer() ST_Buffer() .. versionadded:: 0.8 :class:`.GenericFunction` now supports automatic registration of new functions as well as package and custom naming support. .. versionchanged:: 0.8 The attribute name ``type`` is used to specify the function's return type at the class level. Previously, the name ``__return_type__`` was used. This name is still recognized for backwards-compatibility. """ __metaclass__ = _GenericMeta coerce_arguments = True def __init__(self, *args, **kwargs): parsed_args = kwargs.pop('_parsed_args', None) if parsed_args is None: parsed_args = [_literal_as_binds(c) for c in args] self.packagenames = [] self._bind = kwargs.get('bind', None) self.clause_expr = ClauseList( operator=operators.comma_op, group_contents=True, *parsed_args).self_group() self.type = sqltypes.to_instance( kwargs.pop("type_", None) or getattr(self, 'type', None)) register_function("cast", cast) register_function("extract", extract) class next_value(GenericFunction): """Represent the 'next value', given a :class:`.Sequence` as it's single argument. Compiles into the appropriate function on each backend, or will raise NotImplementedError if used on a backend that does not provide support for sequences. """ type = sqltypes.Integer() name = "next_value" def __init__(self, seq, **kw): assert isinstance(seq, schema.Sequence), \ "next_value() accepts a Sequence object as input." self._bind = kw.get('bind', None) self.sequence = seq @property def _from_objects(self): return [] class AnsiFunction(GenericFunction): def __init__(self, **kwargs): GenericFunction.__init__(self, **kwargs) class ReturnTypeFromArgs(GenericFunction): """Define a function whose return type is the same as its arguments.""" def __init__(self, *args, **kwargs): args = [_literal_as_binds(c) for c in args] kwargs.setdefault('type_', _type_from_args(args)) kwargs['_parsed_args'] = args GenericFunction.__init__(self, *args, **kwargs) class coalesce(ReturnTypeFromArgs): pass class max(ReturnTypeFromArgs): pass class min(ReturnTypeFromArgs): pass class sum(ReturnTypeFromArgs): pass class now(GenericFunction): type = sqltypes.DateTime class concat(GenericFunction): type = sqltypes.String class char_length(GenericFunction): type = sqltypes.Integer def __init__(self, arg, **kwargs): GenericFunction.__init__(self, arg, **kwargs) class random(GenericFunction): pass class count(GenericFunction): """The ANSI COUNT aggregate function. With no arguments, emits COUNT \*. """ type = sqltypes.Integer def __init__(self, expression=None, **kwargs): if expression is None: expression = literal_column('*') GenericFunction.__init__(self, expression, **kwargs) class current_date(AnsiFunction): type = sqltypes.Date class current_time(AnsiFunction): type = sqltypes.Time class current_timestamp(AnsiFunction): type = sqltypes.DateTime class current_user(AnsiFunction): type = sqltypes.String class localtime(AnsiFunction): type = sqltypes.DateTime class localtimestamp(AnsiFunction): type = sqltypes.DateTime class session_user(AnsiFunction): type = sqltypes.String class sysdate(AnsiFunction): type = sqltypes.DateTime class user(AnsiFunction): type = sqltypes.String SQLAlchemy-0.8.4/lib/sqlalchemy/sql/operators.py0000644000076500000240000005107212251150015022325 0ustar classicstaff00000000000000# sql/operators.py # Copyright (C) 2005-2013 the SQLAlchemy authors and contributors # # This module is part of SQLAlchemy and is released under # the MIT License: http://www.opensource.org/licenses/mit-license.php # This module is part of SQLAlchemy and is released under # the MIT License: http://www.opensource.org/licenses/mit-license.php """Defines operators used in SQL expressions.""" from operator import ( and_, or_, inv, add, mul, sub, mod, truediv, lt, le, ne, gt, ge, eq, neg, getitem, lshift, rshift ) # Py2K from operator import (div,) # end Py2K from ..util import symbol class Operators(object): """Base of comparison and logical operators. Implements base methods :meth:`~sqlalchemy.sql.operators.Operators.operate` and :meth:`~sqlalchemy.sql.operators.Operators.reverse_operate`, as well as :meth:`~sqlalchemy.sql.operators.Operators.__and__`, :meth:`~sqlalchemy.sql.operators.Operators.__or__`, :meth:`~sqlalchemy.sql.operators.Operators.__invert__`. Usually is used via its most common subclass :class:`.ColumnOperators`. """ def __and__(self, other): """Implement the ``&`` operator. When used with SQL expressions, results in an AND operation, equivalent to :func:`~.expression.and_`, that is:: a & b is equivalent to:: from sqlalchemy import and_ and_(a, b) Care should be taken when using ``&`` regarding operator precedence; the ``&`` operator has the highest precedence. The operands should be enclosed in parenthesis if they contain further sub expressions:: (a == 2) & (b == 4) """ return self.operate(and_, other) def __or__(self, other): """Implement the ``|`` operator. When used with SQL expressions, results in an OR operation, equivalent to :func:`~.expression.or_`, that is:: a | b is equivalent to:: from sqlalchemy import or_ or_(a, b) Care should be taken when using ``|`` regarding operator precedence; the ``|`` operator has the highest precedence. The operands should be enclosed in parenthesis if they contain further sub expressions:: (a == 2) | (b == 4) """ return self.operate(or_, other) def __invert__(self): """Implement the ``~`` operator. When used with SQL expressions, results in a NOT operation, equivalent to :func:`~.expression.not_`, that is:: ~a is equivalent to:: from sqlalchemy import not_ not_(a) """ return self.operate(inv) def op(self, opstring, precedence=0): """produce a generic operator function. e.g.:: somecolumn.op("*")(5) produces:: somecolumn * 5 This function can also be used to make bitwise operators explicit. For example:: somecolumn.op('&')(0xff) is a bitwise AND of the value in ``somecolumn``. :param operator: a string which will be output as the infix operator between this element and the expression passed to the generated function. :param precedence: precedence to apply to the operator, when parenthesizing expressions. A lower number will cause the expression to be parenthesized when applied against another operator with higher precedence. The default value of ``0`` is lower than all operators except for the comma (``,``) and ``AS`` operators. A value of 100 will be higher or equal to all operators, and -100 will be lower than or equal to all operators. .. versionadded:: 0.8 - added the 'precedence' argument. .. seealso:: :ref:`types_operators` """ operator = custom_op(opstring, precedence) def against(other): return operator(self, other) return against def operate(self, op, *other, **kwargs): """Operate on an argument. This is the lowest level of operation, raises :class:`NotImplementedError` by default. Overriding this on a subclass can allow common behavior to be applied to all operations. For example, overriding :class:`.ColumnOperators` to apply ``func.lower()`` to the left and right side:: class MyComparator(ColumnOperators): def operate(self, op, other): return op(func.lower(self), func.lower(other)) :param op: Operator callable. :param \*other: the 'other' side of the operation. Will be a single scalar for most operations. :param \**kwargs: modifiers. These may be passed by special operators such as :meth:`ColumnOperators.contains`. """ raise NotImplementedError(str(op)) def reverse_operate(self, op, other, **kwargs): """Reverse operate on an argument. Usage is the same as :meth:`operate`. """ raise NotImplementedError(str(op)) class custom_op(object): """Represent a 'custom' operator. :class:`.custom_op` is normally instantitated when the :meth:`.ColumnOperators.op` method is used to create a custom operator callable. The class can also be used directly when programmatically constructing expressions. E.g. to represent the "factorial" operation:: from sqlalchemy.sql import UnaryExpression from sqlalchemy.sql import operators from sqlalchemy import Numeric unary = UnaryExpression(table.c.somecolumn, modifier=operators.custom_op("!"), type_=Numeric) """ __name__ = 'custom_op' def __init__(self, opstring, precedence=0): self.opstring = opstring self.precedence = precedence def __eq__(self, other): return isinstance(other, custom_op) and \ other.opstring == self.opstring def __hash__(self): return id(self) def __call__(self, left, right, **kw): return left.operate(self, right, **kw) class ColumnOperators(Operators): """Defines boolean, comparison, and other operators for :class:`.ColumnElement` expressions. By default, all methods call down to :meth:`.operate` or :meth:`.reverse_operate`, passing in the appropriate operator function from the Python builtin ``operator`` module or a SQLAlchemy-specific operator function from :mod:`sqlalchemy.expression.operators`. For example the ``__eq__`` function:: def __eq__(self, other): return self.operate(operators.eq, other) Where ``operators.eq`` is essentially:: def eq(a, b): return a == b The core column expression unit :class:`.ColumnElement` overrides :meth:`.Operators.operate` and others to return further :class:`.ColumnElement` constructs, so that the ``==`` operation above is replaced by a clause construct. See also: :ref:`types_operators` :attr:`.TypeEngine.comparator_factory` :class:`.ColumnOperators` :class:`.PropComparator` """ timetuple = None """Hack, allows datetime objects to be compared on the LHS.""" def __lt__(self, other): """Implement the ``<`` operator. In a column context, produces the clause ``a < b``. """ return self.operate(lt, other) def __le__(self, other): """Implement the ``<=`` operator. In a column context, produces the clause ``a <= b``. """ return self.operate(le, other) __hash__ = Operators.__hash__ def __eq__(self, other): """Implement the ``==`` operator. In a column context, produces the clause ``a = b``. If the target is ``None``, produces ``a IS NULL``. """ return self.operate(eq, other) def __ne__(self, other): """Implement the ``!=`` operator. In a column context, produces the clause ``a != b``. If the target is ``None``, produces ``a IS NOT NULL``. """ return self.operate(ne, other) def __gt__(self, other): """Implement the ``>`` operator. In a column context, produces the clause ``a > b``. """ return self.operate(gt, other) def __ge__(self, other): """Implement the ``>=`` operator. In a column context, produces the clause ``a >= b``. """ return self.operate(ge, other) def __neg__(self): """Implement the ``-`` operator. In a column context, produces the clause ``-a``. """ return self.operate(neg) def __getitem__(self, index): """Implement the [] operator. This can be used by some database-specific types such as Postgresql ARRAY and HSTORE. """ return self.operate(getitem, index) def __lshift__(self, other): """implement the << operator. Not used by SQLAlchemy core, this is provided for custom operator systems which want to use << as an extension point. """ return self.operate(lshift, other) def __rshift__(self, other): """implement the >> operator. Not used by SQLAlchemy core, this is provided for custom operator systems which want to use >> as an extension point. """ return self.operate(rshift, other) def concat(self, other): """Implement the 'concat' operator. In a column context, produces the clause ``a || b``, or uses the ``concat()`` operator on MySQL. """ return self.operate(concat_op, other) def like(self, other, escape=None): """Implement the ``like`` operator. In a column context, produces the clause ``a LIKE other``. E.g.:: select([sometable]).where(sometable.c.column.like("%foobar%")) :param other: expression to be compared :param escape: optional escape character, renders the ``ESCAPE`` keyword, e.g.:: somecolumn.like("foo/%bar", escape="/") .. seealso:: :meth:`.ColumnOperators.ilike` """ return self.operate(like_op, other, escape=escape) def ilike(self, other, escape=None): """Implement the ``ilike`` operator. In a column context, produces the clause ``a ILIKE other``. E.g.:: select([sometable]).where(sometable.c.column.ilike("%foobar%")) :param other: expression to be compared :param escape: optional escape character, renders the ``ESCAPE`` keyword, e.g.:: somecolumn.ilike("foo/%bar", escape="/") .. seealso:: :meth:`.ColumnOperators.like` """ return self.operate(ilike_op, other, escape=escape) def in_(self, other): """Implement the ``in`` operator. In a column context, produces the clause ``a IN other``. "other" may be a tuple/list of column expressions, or a :func:`~.expression.select` construct. """ return self.operate(in_op, other) def notin_(self, other): """implement the ``NOT IN`` operator. This is equivalent to using negation with :meth:`.ColumnOperators.in_`, i.e. ``~x.in_(y)``. .. versionadded:: 0.8 .. seealso:: :meth:`.ColumnOperators.in_` """ return self.operate(notin_op, other) def notlike(self, other, escape=None): """implement the ``NOT LIKE`` operator. This is equivalent to using negation with :meth:`.ColumnOperators.like`, i.e. ``~x.like(y)``. .. versionadded:: 0.8 .. seealso:: :meth:`.ColumnOperators.like` """ return self.operate(notlike_op, other, escape=escape) def notilike(self, other, escape=None): """implement the ``NOT ILIKE`` operator. This is equivalent to using negation with :meth:`.ColumnOperators.ilike`, i.e. ``~x.ilike(y)``. .. versionadded:: 0.8 .. seealso:: :meth:`.ColumnOperators.ilike` """ return self.operate(notilike_op, other, escape=escape) def is_(self, other): """Implement the ``IS`` operator. Normally, ``IS`` is generated automatically when comparing to a value of ``None``, which resolves to ``NULL``. However, explicit usage of ``IS`` may be desirable if comparing to boolean values on certain platforms. .. versionadded:: 0.7.9 .. seealso:: :meth:`.ColumnOperators.isnot` """ return self.operate(is_, other) def isnot(self, other): """Implement the ``IS NOT`` operator. Normally, ``IS NOT`` is generated automatically when comparing to a value of ``None``, which resolves to ``NULL``. However, explicit usage of ``IS NOT`` may be desirable if comparing to boolean values on certain platforms. .. versionadded:: 0.7.9 .. seealso:: :meth:`.ColumnOperators.is_` """ return self.operate(isnot, other) def startswith(self, other, **kwargs): """Implement the ``startwith`` operator. In a column context, produces the clause ``LIKE '%'`` """ return self.operate(startswith_op, other, **kwargs) def endswith(self, other, **kwargs): """Implement the 'endswith' operator. In a column context, produces the clause ``LIKE '%'`` """ return self.operate(endswith_op, other, **kwargs) def contains(self, other, **kwargs): """Implement the 'contains' operator. In a column context, produces the clause ``LIKE '%%'`` """ return self.operate(contains_op, other, **kwargs) def match(self, other, **kwargs): """Implements the 'match' operator. In a column context, this produces a MATCH clause, i.e. ``MATCH ''``. The allowed contents of ``other`` are database backend specific. """ return self.operate(match_op, other, **kwargs) def desc(self): """Produce a :func:`~.expression.desc` clause against the parent object.""" return self.operate(desc_op) def asc(self): """Produce a :func:`~.expression.asc` clause against the parent object.""" return self.operate(asc_op) def nullsfirst(self): """Produce a :func:`~.expression.nullsfirst` clause against the parent object.""" return self.operate(nullsfirst_op) def nullslast(self): """Produce a :func:`~.expression.nullslast` clause against the parent object.""" return self.operate(nullslast_op) def collate(self, collation): """Produce a :func:`~.expression.collate` clause against the parent object, given the collation string.""" return self.operate(collate, collation) def __radd__(self, other): """Implement the ``+`` operator in reverse. See :meth:`.ColumnOperators.__add__`. """ return self.reverse_operate(add, other) def __rsub__(self, other): """Implement the ``-`` operator in reverse. See :meth:`.ColumnOperators.__sub__`. """ return self.reverse_operate(sub, other) def __rmul__(self, other): """Implement the ``*`` operator in reverse. See :meth:`.ColumnOperators.__mul__`. """ return self.reverse_operate(mul, other) def __rdiv__(self, other): """Implement the ``/`` operator in reverse. See :meth:`.ColumnOperators.__div__`. """ return self.reverse_operate(div, other) def between(self, cleft, cright): """Produce a :func:`~.expression.between` clause against the parent object, given the lower and upper range.""" return self.operate(between_op, cleft, cright) def distinct(self): """Produce a :func:`~.expression.distinct` clause against the parent object. """ return self.operate(distinct_op) def __add__(self, other): """Implement the ``+`` operator. In a column context, produces the clause ``a + b`` if the parent object has non-string affinity. If the parent object has a string affinity, produces the concatenation operator, ``a || b`` - see :meth:`.ColumnOperators.concat`. """ return self.operate(add, other) def __sub__(self, other): """Implement the ``-`` operator. In a column context, produces the clause ``a - b``. """ return self.operate(sub, other) def __mul__(self, other): """Implement the ``*`` operator. In a column context, produces the clause ``a * b``. """ return self.operate(mul, other) def __div__(self, other): """Implement the ``/`` operator. In a column context, produces the clause ``a / b``. """ return self.operate(div, other) def __mod__(self, other): """Implement the ``%`` operator. In a column context, produces the clause ``a % b``. """ return self.operate(mod, other) def __truediv__(self, other): """Implement the ``//`` operator. In a column context, produces the clause ``a / b``. """ return self.operate(truediv, other) def __rtruediv__(self, other): """Implement the ``//`` operator in reverse. See :meth:`.ColumnOperators.__truediv__`. """ return self.reverse_operate(truediv, other) def from_(): raise NotImplementedError() def as_(): raise NotImplementedError() def exists(): raise NotImplementedError() def is_(a, b): return a.is_(b) def isnot(a, b): return a.isnot(b) def collate(a, b): return a.collate(b) def op(a, opstring, b): return a.op(opstring)(b) def like_op(a, b, escape=None): return a.like(b, escape=escape) def notlike_op(a, b, escape=None): return a.notlike(b, escape=escape) def ilike_op(a, b, escape=None): return a.ilike(b, escape=escape) def notilike_op(a, b, escape=None): return a.notilike(b, escape=escape) def between_op(a, b, c): return a.between(b, c) def in_op(a, b): return a.in_(b) def notin_op(a, b): return a.notin_(b) def distinct_op(a): return a.distinct() def startswith_op(a, b, escape=None): return a.startswith(b, escape=escape) def notstartswith_op(a, b, escape=None): return ~a.startswith(b, escape=escape) def endswith_op(a, b, escape=None): return a.endswith(b, escape=escape) def notendswith_op(a, b, escape=None): return ~a.endswith(b, escape=escape) def contains_op(a, b, escape=None): return a.contains(b, escape=escape) def notcontains_op(a, b, escape=None): return ~a.contains(b, escape=escape) def match_op(a, b): return a.match(b) def comma_op(a, b): raise NotImplementedError() def concat_op(a, b): return a.concat(b) def desc_op(a): return a.desc() def asc_op(a): return a.asc() def nullsfirst_op(a): return a.nullsfirst() def nullslast_op(a): return a.nullslast() _commutative = set([eq, ne, add, mul]) _comparison = set([eq, ne, lt, gt, ge, le, between_op]) def is_comparison(op): return op in _comparison def is_commutative(op): return op in _commutative def is_ordering_modifier(op): return op in (asc_op, desc_op, nullsfirst_op, nullslast_op) _associative = _commutative.union([concat_op, and_, or_]) _natural_self_precedent = _associative.union([getitem]) """Operators where if we have (a op b) op c, we don't want to parenthesize (a op b). """ _smallest = symbol('_smallest', canonical=-100) _largest = symbol('_largest', canonical=100) _PRECEDENCE = { from_: 15, getitem: 15, mul: 8, truediv: 8, # Py2K div: 8, # end Py2K mod: 8, neg: 8, add: 7, sub: 7, concat_op: 6, match_op: 6, ilike_op: 6, notilike_op: 6, like_op: 6, notlike_op: 6, in_op: 6, notin_op: 6, is_: 6, isnot: 6, eq: 5, ne: 5, gt: 5, lt: 5, ge: 5, le: 5, between_op: 5, distinct_op: 5, inv: 5, and_: 3, or_: 2, comma_op: -1, collate: 7, as_: -1, exists: 0, _smallest: _smallest, _largest: _largest } def is_precedent(operator, against): if operator is against and operator in _natural_self_precedent: return False else: return (_PRECEDENCE.get(operator, getattr(operator, 'precedence', _smallest)) <= _PRECEDENCE.get(against, getattr(against, 'precedence', _largest))) SQLAlchemy-0.8.4/lib/sqlalchemy/sql/util.py0000644000076500000240000007442012251150015021266 0ustar classicstaff00000000000000# sql/util.py # Copyright (C) 2005-2013 the SQLAlchemy authors and contributors # # This module is part of SQLAlchemy and is released under # the MIT License: http://www.opensource.org/licenses/mit-license.php from .. import exc, schema, util, sql from ..util import topological from . import expression, operators, visitors from itertools import chain from collections import deque """Utility functions that build upon SQL and Schema constructs.""" def sort_tables(tables, skip_fn=None, extra_dependencies=None): """sort a collection of Table objects in order of their foreign-key dependency.""" tables = list(tables) tuples = [] if extra_dependencies is not None: tuples.extend(extra_dependencies) def visit_foreign_key(fkey): if fkey.use_alter: return elif skip_fn and skip_fn(fkey): return parent_table = fkey.column.table if parent_table in tables: child_table = fkey.parent.table if parent_table is not child_table: tuples.append((parent_table, child_table)) for table in tables: visitors.traverse(table, {'schema_visitor': True}, {'foreign_key': visit_foreign_key}) tuples.extend( [parent, table] for parent in table._extra_dependencies ) return list(topological.sort(tuples, tables)) def find_join_source(clauses, join_to): """Given a list of FROM clauses and a selectable, return the first index and element from the list of clauses which can be joined against the selectable. returns None, None if no match is found. e.g.:: clause1 = table1.join(table2) clause2 = table4.join(table5) join_to = table2.join(table3) find_join_source([clause1, clause2], join_to) == clause1 """ selectables = list(expression._from_objects(join_to)) for i, f in enumerate(clauses): for s in selectables: if f.is_derived_from(s): return i, f else: return None, None def visit_binary_product(fn, expr): """Produce a traversal of the given expression, delivering column comparisons to the given function. The function is of the form:: def my_fn(binary, left, right) For each binary expression located which has a comparison operator, the product of "left" and "right" will be delivered to that function, in terms of that binary. Hence an expression like:: and_( (a + b) == q + func.sum(e + f), j == r ) would have the traversal:: a q a e a f b q b e b f j r That is, every combination of "left" and "right" that doesn't further contain a binary comparison is passed as pairs. """ stack = [] def visit(element): if isinstance(element, (expression.ScalarSelect)): # we dont want to dig into correlated subqueries, # those are just column elements by themselves yield element elif element.__visit_name__ == 'binary' and \ operators.is_comparison(element.operator): stack.insert(0, element) for l in visit(element.left): for r in visit(element.right): fn(stack[0], l, r) stack.pop(0) for elem in element.get_children(): visit(elem) else: if isinstance(element, expression.ColumnClause): yield element for elem in element.get_children(): for e in visit(elem): yield e list(visit(expr)) def find_tables(clause, check_columns=False, include_aliases=False, include_joins=False, include_selects=False, include_crud=False): """locate Table objects within the given expression.""" tables = [] _visitors = {} if include_selects: _visitors['select'] = _visitors['compound_select'] = tables.append if include_joins: _visitors['join'] = tables.append if include_aliases: _visitors['alias'] = tables.append if include_crud: _visitors['insert'] = _visitors['update'] = \ _visitors['delete'] = lambda ent: tables.append(ent.table) if check_columns: def visit_column(column): tables.append(column.table) _visitors['column'] = visit_column _visitors['table'] = tables.append visitors.traverse(clause, {'column_collections': False}, _visitors) return tables def find_columns(clause): """locate Column objects within the given expression.""" cols = util.column_set() visitors.traverse(clause, {}, {'column': cols.add}) return cols def unwrap_order_by(clause): """Break up an 'order by' expression into individual column-expressions, without DESC/ASC/NULLS FIRST/NULLS LAST""" cols = util.column_set() stack = deque([clause]) while stack: t = stack.popleft() if isinstance(t, expression.ColumnElement) and \ ( not isinstance(t, expression.UnaryExpression) or \ not operators.is_ordering_modifier(t.modifier) ): cols.add(t) else: for c in t.get_children(): stack.append(c) return cols def clause_is_present(clause, search): """Given a target clause and a second to search within, return True if the target is plainly present in the search without any subqueries or aliases involved. Basically descends through Joins. """ stack = [search] while stack: elem = stack.pop() if clause == elem: # use == here so that Annotated's compare return True elif isinstance(elem, expression.Join): stack.extend((elem.left, elem.right)) return False def bind_values(clause): """Return an ordered list of "bound" values in the given clause. E.g.:: >>> expr = and_( ... table.c.foo==5, table.c.foo==7 ... ) >>> bind_values(expr) [5, 7] """ v = [] def visit_bindparam(bind): v.append(bind.effective_value) visitors.traverse(clause, {}, {'bindparam': visit_bindparam}) return v def _quote_ddl_expr(element): if isinstance(element, basestring): element = element.replace("'", "''") return "'%s'" % element else: return repr(element) class _repr_params(object): """A string view of bound parameters, truncating display to the given number of 'multi' parameter sets. """ def __init__(self, params, batches): self.params = params self.batches = batches def __repr__(self): if isinstance(self.params, (list, tuple)) and \ len(self.params) > self.batches and \ isinstance(self.params[0], (list, dict, tuple)): msg = " ... displaying %i of %i total bound parameter sets ... " return ' '.join(( repr(self.params[:self.batches - 2])[0:-1], msg % (self.batches, len(self.params)), repr(self.params[-2:])[1:] )) else: return repr(self.params) def expression_as_ddl(clause): """Given a SQL expression, convert for usage in DDL, such as CREATE INDEX and CHECK CONSTRAINT. Converts bind params into quoted literals, column identifiers into detached column constructs so that the parent table identifier is not included. .. deprecated:: this function is removed in 0.9.0. """ def repl(element): if isinstance(element, expression.BindParameter): return expression.literal_column(_quote_ddl_expr(element.value)) elif isinstance(element, expression.ColumnClause) and \ element.table is not None: col = expression.column(element.name) col.quote = element.quote return col else: return None return visitors.replacement_traverse(clause, {}, repl) def adapt_criterion_to_null(crit, nulls): """given criterion containing bind params, convert selected elements to IS NULL. """ def visit_binary(binary): if isinstance(binary.left, expression.BindParameter) \ and binary.left._identifying_key in nulls: # reverse order if the NULL is on the left side binary.left = binary.right binary.right = expression.null() binary.operator = operators.is_ binary.negate = operators.isnot elif isinstance(binary.right, expression.BindParameter) \ and binary.right._identifying_key in nulls: binary.right = expression.null() binary.operator = operators.is_ binary.negate = operators.isnot return visitors.cloned_traverse(crit, {}, {'binary': visit_binary}) def join_condition(a, b, ignore_nonexistent_tables=False, a_subset=None, consider_as_foreign_keys=None): """create a join condition between two tables or selectables. e.g.:: join_condition(tablea, tableb) would produce an expression along the lines of:: tablea.c.id==tableb.c.tablea_id The join is determined based on the foreign key relationships between the two selectables. If there are multiple ways to join, or no way to join, an error is raised. :param ignore_nonexistent_tables: Deprecated - this flag is no longer used. Only resolution errors regarding the two given tables are propagated. :param a_subset: An optional expression that is a sub-component of ``a``. An attempt will be made to join to just this sub-component first before looking at the full ``a`` construct, and if found will be successful even if there are other ways to join to ``a``. This allows the "right side" of a join to be passed thereby providing a "natural join". """ crit = [] constraints = set() for left in (a_subset, a): if left is None: continue for fk in sorted( b.foreign_keys, key=lambda fk: fk.parent._creation_order): if consider_as_foreign_keys is not None and \ fk.parent not in consider_as_foreign_keys: continue try: col = fk.get_referent(left) except exc.NoReferenceError, nrte: if nrte.table_name == left.name: raise else: continue if col is not None: crit.append(col == fk.parent) constraints.add(fk.constraint) if left is not b: for fk in sorted( left.foreign_keys, key=lambda fk: fk.parent._creation_order): if consider_as_foreign_keys is not None and \ fk.parent not in consider_as_foreign_keys: continue try: col = fk.get_referent(b) except exc.NoReferenceError, nrte: if nrte.table_name == b.name: raise else: # this is totally covered. can't get # coverage to mark it. continue if col is not None: crit.append(col == fk.parent) constraints.add(fk.constraint) if crit: break if len(crit) == 0: if isinstance(b, expression.FromGrouping): hint = " Perhaps you meant to convert the right side to a "\ "subquery using alias()?" else: hint = "" raise exc.NoForeignKeysError( "Can't find any foreign key relationships " "between '%s' and '%s'.%s" % (a.description, b.description, hint)) elif len(constraints) > 1: raise exc.AmbiguousForeignKeysError( "Can't determine join between '%s' and '%s'; " "tables have more than one foreign key " "constraint relationship between them. " "Please specify the 'onclause' of this " "join explicitly." % (a.description, b.description)) elif len(crit) == 1: return (crit[0]) else: return sql.and_(*crit) class Annotated(object): """clones a ClauseElement and applies an 'annotations' dictionary. Unlike regular clones, this clone also mimics __hash__() and __cmp__() of the original element so that it takes its place in hashed collections. A reference to the original element is maintained, for the important reason of keeping its hash value current. When GC'ed, the hash value may be reused, causing conflicts. """ def __new__(cls, *args): if not args: # clone constructor return object.__new__(cls) else: element, values = args # pull appropriate subclass from registry of annotated # classes try: cls = annotated_classes[element.__class__] except KeyError: cls = annotated_classes[element.__class__] = type.__new__(type, "Annotated%s" % element.__class__.__name__, (cls, element.__class__), {}) return object.__new__(cls) def __init__(self, element, values): # force FromClause to generate their internal # collections into __dict__ if isinstance(element, expression.FromClause): element.c self.__dict__ = element.__dict__.copy() expression.ColumnElement.comparator._reset(self) self.__element = element self._annotations = values def _annotate(self, values): _values = self._annotations.copy() _values.update(values) return self._with_annotations(_values) def _with_annotations(self, values): clone = self.__class__.__new__(self.__class__) clone.__dict__ = self.__dict__.copy() expression.ColumnElement.comparator._reset(clone) clone._annotations = values return clone def _deannotate(self, values=None, clone=True): if values is None: return self.__element else: _values = self._annotations.copy() for v in values: _values.pop(v, None) return self._with_annotations(_values) def _compiler_dispatch(self, visitor, **kw): return self.__element.__class__._compiler_dispatch(self, visitor, **kw) @property def _constructor(self): return self.__element._constructor def _clone(self): clone = self.__element._clone() if clone is self.__element: # detect immutable, don't change anything return self else: # update the clone with any changes that have occurred # to this object's __dict__. clone.__dict__.update(self.__dict__) return self.__class__(clone, self._annotations) def __hash__(self): return hash(self.__element) def __eq__(self, other): if isinstance(self.__element, expression.ColumnOperators): return self.__element.__class__.__eq__(self, other) else: return hash(other) == hash(self) class AnnotatedColumnElement(Annotated): def __init__(self, element, values): Annotated.__init__(self, element, values) for attr in ('name', 'key', 'table'): if self.__dict__.get(attr, False) is None: self.__dict__.pop(attr) @util.memoized_property def name(self): """pull 'name' from parent, if not present""" return self._Annotated__element.name @util.memoized_property def table(self): """pull 'table' from parent, if not present""" return self._Annotated__element.table @util.memoized_property def key(self): """pull 'key' from parent, if not present""" return self._Annotated__element.key @util.memoized_property def info(self): return self._Annotated__element.info # hard-generate Annotated subclasses. this technique # is used instead of on-the-fly types (i.e. type.__new__()) # so that the resulting objects are pickleable. annotated_classes = {} for cls in expression.__dict__.values() + [schema.Column, schema.Table]: if isinstance(cls, type) and issubclass(cls, expression.ClauseElement): if issubclass(cls, expression.ColumnElement): annotation_cls = "AnnotatedColumnElement" else: annotation_cls = "Annotated" exec "class Annotated%s(%s, cls):\n" \ " pass" % (cls.__name__, annotation_cls) in locals() exec "annotated_classes[cls] = Annotated%s" % (cls.__name__,) def _deep_annotate(element, annotations, exclude=None): """Deep copy the given ClauseElement, annotating each element with the given annotations dictionary. Elements within the exclude collection will be cloned but not annotated. """ def clone(elem): if exclude and \ hasattr(elem, 'proxy_set') and \ elem.proxy_set.intersection(exclude): newelem = elem._clone() elif annotations != elem._annotations: newelem = elem._annotate(annotations) else: newelem = elem newelem._copy_internals(clone=clone) return newelem if element is not None: element = clone(element) return element def _deep_deannotate(element, values=None): """Deep copy the given element, removing annotations.""" cloned = util.column_dict() def clone(elem): # if a values dict is given, # the elem must be cloned each time it appears, # as there may be different annotations in source # elements that are remaining. if totally # removing all annotations, can assume the same # slate... if values or elem not in cloned: newelem = elem._deannotate(values=values, clone=True) newelem._copy_internals(clone=clone) if not values: cloned[elem] = newelem return newelem else: return cloned[elem] if element is not None: element = clone(element) return element def _shallow_annotate(element, annotations): """Annotate the given ClauseElement and copy its internals so that internal objects refer to the new annotated object. Basically used to apply a "dont traverse" annotation to a selectable, without digging throughout the whole structure wasting time. """ element = element._annotate(annotations) element._copy_internals() return element def splice_joins(left, right, stop_on=None): if left is None: return right stack = [(right, None)] adapter = ClauseAdapter(left) ret = None while stack: (right, prevright) = stack.pop() if isinstance(right, expression.Join) and right is not stop_on: right = right._clone() right._reset_exported() right.onclause = adapter.traverse(right.onclause) stack.append((right.left, right)) else: right = adapter.traverse(right) if prevright is not None: prevright.left = right if ret is None: ret = right return ret def reduce_columns(columns, *clauses, **kw): """given a list of columns, return a 'reduced' set based on natural equivalents. the set is reduced to the smallest list of columns which have no natural equivalent present in the list. A "natural equivalent" means that two columns will ultimately represent the same value because they are related by a foreign key. \*clauses is an optional list of join clauses which will be traversed to further identify columns that are "equivalent". \**kw may specify 'ignore_nonexistent_tables' to ignore foreign keys whose tables are not yet configured, or columns that aren't yet present. This function is primarily used to determine the most minimal "primary key" from a selectable, by reducing the set of primary key columns present in the the selectable to just those that are not repeated. """ ignore_nonexistent_tables = kw.pop('ignore_nonexistent_tables', False) only_synonyms = kw.pop('only_synonyms', False) columns = util.ordered_column_set(columns) omit = util.column_set() for col in columns: for fk in chain(*[c.foreign_keys for c in col.proxy_set]): for c in columns: if c is col: continue try: fk_col = fk.column except exc.NoReferencedColumnError: # TODO: add specific coverage here # to test/sql/test_selectable ReduceTest if ignore_nonexistent_tables: continue else: raise except exc.NoReferencedTableError: # TODO: add specific coverage here # to test/sql/test_selectable ReduceTest if ignore_nonexistent_tables: continue else: raise if fk_col.shares_lineage(c) and \ (not only_synonyms or \ c.name == col.name): omit.add(col) break if clauses: def visit_binary(binary): if binary.operator == operators.eq: cols = util.column_set(chain(*[c.proxy_set for c in columns.difference(omit)])) if binary.left in cols and binary.right in cols: for c in reversed(columns): if c.shares_lineage(binary.right) and \ (not only_synonyms or \ c.name == binary.left.name): omit.add(c) break for clause in clauses: if clause is not None: visitors.traverse(clause, {}, {'binary': visit_binary}) return expression.ColumnSet(columns.difference(omit)) def criterion_as_pairs(expression, consider_as_foreign_keys=None, consider_as_referenced_keys=None, any_operator=False): """traverse an expression and locate binary criterion pairs.""" if consider_as_foreign_keys and consider_as_referenced_keys: raise exc.ArgumentError("Can only specify one of " "'consider_as_foreign_keys' or " "'consider_as_referenced_keys'") def col_is(a, b): #return a is b return a.compare(b) def visit_binary(binary): if not any_operator and binary.operator is not operators.eq: return if not isinstance(binary.left, sql.ColumnElement) or \ not isinstance(binary.right, sql.ColumnElement): return if consider_as_foreign_keys: if binary.left in consider_as_foreign_keys and \ (col_is(binary.right, binary.left) or binary.right not in consider_as_foreign_keys): pairs.append((binary.right, binary.left)) elif binary.right in consider_as_foreign_keys and \ (col_is(binary.left, binary.right) or binary.left not in consider_as_foreign_keys): pairs.append((binary.left, binary.right)) elif consider_as_referenced_keys: if binary.left in consider_as_referenced_keys and \ (col_is(binary.right, binary.left) or binary.right not in consider_as_referenced_keys): pairs.append((binary.left, binary.right)) elif binary.right in consider_as_referenced_keys and \ (col_is(binary.left, binary.right) or binary.left not in consider_as_referenced_keys): pairs.append((binary.right, binary.left)) else: if isinstance(binary.left, schema.Column) and \ isinstance(binary.right, schema.Column): if binary.left.references(binary.right): pairs.append((binary.right, binary.left)) elif binary.right.references(binary.left): pairs.append((binary.left, binary.right)) pairs = [] visitors.traverse(expression, {}, {'binary': visit_binary}) return pairs class AliasedRow(object): """Wrap a RowProxy with a translation map. This object allows a set of keys to be translated to those present in a RowProxy. """ def __init__(self, row, map): # AliasedRow objects don't nest, so un-nest # if another AliasedRow was passed if isinstance(row, AliasedRow): self.row = row.row else: self.row = row self.map = map def __contains__(self, key): return self.map[key] in self.row def has_key(self, key): return key in self def __getitem__(self, key): return self.row[self.map[key]] def keys(self): return self.row.keys() class ClauseAdapter(visitors.ReplacingCloningVisitor): """Clones and modifies clauses based on column correspondence. E.g.:: table1 = Table('sometable', metadata, Column('col1', Integer), Column('col2', Integer) ) table2 = Table('someothertable', metadata, Column('col1', Integer), Column('col2', Integer) ) condition = table1.c.col1 == table2.c.col1 make an alias of table1:: s = table1.alias('foo') calling ``ClauseAdapter(s).traverse(condition)`` converts condition to read:: s.c.col1 == table2.c.col1 """ def __init__(self, selectable, equivalents=None, include=None, exclude=None, include_fn=None, exclude_fn=None, adapt_on_names=False): self.__traverse_options__ = {'stop_on': [selectable]} self.selectable = selectable if include: assert not include_fn self.include_fn = lambda e: e in include else: self.include_fn = include_fn if exclude: assert not exclude_fn self.exclude_fn = lambda e: e in exclude else: self.exclude_fn = exclude_fn self.equivalents = util.column_dict(equivalents or {}) self.adapt_on_names = adapt_on_names def _corresponding_column(self, col, require_embedded, _seen=util.EMPTY_SET): newcol = self.selectable.corresponding_column( col, require_embedded=require_embedded) if newcol is None and col in self.equivalents and col not in _seen: for equiv in self.equivalents[col]: newcol = self._corresponding_column(equiv, require_embedded=require_embedded, _seen=_seen.union([col])) if newcol is not None: return newcol if self.adapt_on_names and newcol is None: newcol = self.selectable.c.get(col.name) return newcol def replace(self, col): if isinstance(col, expression.FromClause) and \ self.selectable.is_derived_from(col): return self.selectable elif not isinstance(col, expression.ColumnElement): return None elif self.include_fn and not self.include_fn(col): return None elif self.exclude_fn and self.exclude_fn(col): return None else: return self._corresponding_column(col, True) class ColumnAdapter(ClauseAdapter): """Extends ClauseAdapter with extra utility functions. Provides the ability to "wrap" this ClauseAdapter around another, a columns dictionary which returns adapted elements given an original, and an adapted_row() factory. """ def __init__(self, selectable, equivalents=None, chain_to=None, include=None, exclude=None, adapt_required=False): ClauseAdapter.__init__(self, selectable, equivalents, include, exclude) if chain_to: self.chain(chain_to) self.columns = util.populate_column_dict(self._locate_col) self.adapt_required = adapt_required def wrap(self, adapter): ac = self.__class__.__new__(self.__class__) ac.__dict__ = self.__dict__.copy() ac._locate_col = ac._wrap(ac._locate_col, adapter._locate_col) ac.adapt_clause = ac._wrap(ac.adapt_clause, adapter.adapt_clause) ac.adapt_list = ac._wrap(ac.adapt_list, adapter.adapt_list) ac.columns = util.populate_column_dict(ac._locate_col) return ac adapt_clause = ClauseAdapter.traverse adapt_list = ClauseAdapter.copy_and_process def _wrap(self, local, wrapped): def locate(col): col = local(col) return wrapped(col) return locate def _locate_col(self, col): c = self._corresponding_column(col, True) if c is None: c = self.adapt_clause(col) # anonymize labels in case they have a hardcoded name if isinstance(c, expression.Label): c = c.label(None) # adapt_required indicates that if we got the same column # back which we put in (i.e. it passed through), # it's not correct. this is used by eagerloading which # knows that all columns and expressions need to be adapted # to a result row, and a "passthrough" is definitely targeting # the wrong column. if self.adapt_required and c is col: return None return c def adapted_row(self, row): return AliasedRow(row, self.columns) def __getstate__(self): d = self.__dict__.copy() del d['columns'] return d def __setstate__(self, state): self.__dict__.update(state) self.columns = util.PopulateDict(self._locate_col) SQLAlchemy-0.8.4/lib/sqlalchemy/sql/visitors.py0000644000076500000240000002343512251150015022173 0ustar classicstaff00000000000000# sql/visitors.py # Copyright (C) 2005-2013 the SQLAlchemy authors and contributors # # This module is part of SQLAlchemy and is released under # the MIT License: http://www.opensource.org/licenses/mit-license.php """Visitor/traversal interface and library functions. SQLAlchemy schema and expression constructs rely on a Python-centric version of the classic "visitor" pattern as the primary way in which they apply functionality. The most common use of this pattern is statement compilation, where individual expression classes match up to rendering methods that produce a string result. Beyond this, the visitor system is also used to inspect expressions for various information and patterns, as well as for usage in some kinds of expression transformation. Other kinds of transformation use a non-visitor traversal system. For many examples of how the visit system is used, see the sqlalchemy.sql.util and the sqlalchemy.sql.compiler modules. For an introduction to clause adaption, see http://techspot.zzzeek.org/2008/01/23/expression-transformations/ """ from collections import deque from .. import util import operator from .. import exc __all__ = ['VisitableType', 'Visitable', 'ClauseVisitor', 'CloningVisitor', 'ReplacingCloningVisitor', 'iterate', 'iterate_depthfirst', 'traverse_using', 'traverse', 'cloned_traverse', 'replacement_traverse'] class VisitableType(type): """Metaclass which assigns a `_compiler_dispatch` method to classes having a `__visit_name__` attribute. The _compiler_dispatch attribute becomes an instance method which looks approximately like the following:: def _compiler_dispatch (self, visitor, **kw): '''Look for an attribute named "visit_" + self.__visit_name__ on the visitor, and call it with the same kw params.''' visit_attr = 'visit_%s' % self.__visit_name__ return getattr(visitor, visit_attr)(self, **kw) Classes having no __visit_name__ attribute will remain unaffected. """ def __init__(cls, clsname, bases, clsdict): if cls.__name__ == 'Visitable' or not hasattr(cls, '__visit_name__'): super(VisitableType, cls).__init__(clsname, bases, clsdict) return _generate_dispatch(cls) super(VisitableType, cls).__init__(clsname, bases, clsdict) def _generate_dispatch(cls): """Return an optimized visit dispatch function for the cls for use by the compiler. """ if '__visit_name__' in cls.__dict__: visit_name = cls.__visit_name__ if isinstance(visit_name, str): # There is an optimization opportunity here because the # the string name of the class's __visit_name__ is known at # this early stage (import time) so it can be pre-constructed. getter = operator.attrgetter("visit_%s" % visit_name) def _compiler_dispatch(self, visitor, **kw): try: meth = getter(visitor) except AttributeError: raise exc.UnsupportedCompilationError(visitor, cls) else: return meth(self, **kw) else: # The optimization opportunity is lost for this case because the # __visit_name__ is not yet a string. As a result, the visit # string has to be recalculated with each compilation. def _compiler_dispatch(self, visitor, **kw): visit_attr = 'visit_%s' % self.__visit_name__ try: meth = getattr(visitor, visit_attr) except AttributeError: raise exc.UnsupportedCompilationError(visitor, cls) else: return meth(self, **kw) _compiler_dispatch.__doc__ = \ """Look for an attribute named "visit_" + self.__visit_name__ on the visitor, and call it with the same kw params. """ cls._compiler_dispatch = _compiler_dispatch class Visitable(object): """Base class for visitable objects, applies the ``VisitableType`` metaclass. """ __metaclass__ = VisitableType class ClauseVisitor(object): """Base class for visitor objects which can traverse using the traverse() function. """ __traverse_options__ = {} def traverse_single(self, obj, **kw): for v in self._visitor_iterator: meth = getattr(v, "visit_%s" % obj.__visit_name__, None) if meth: return meth(obj, **kw) def iterate(self, obj): """traverse the given expression structure, returning an iterator of all elements. """ return iterate(obj, self.__traverse_options__) def traverse(self, obj): """traverse and visit the given expression structure.""" return traverse(obj, self.__traverse_options__, self._visitor_dict) @util.memoized_property def _visitor_dict(self): visitors = {} for name in dir(self): if name.startswith('visit_'): visitors[name[6:]] = getattr(self, name) return visitors @property def _visitor_iterator(self): """iterate through this visitor and each 'chained' visitor.""" v = self while v: yield v v = getattr(v, '_next', None) def chain(self, visitor): """'chain' an additional ClauseVisitor onto this ClauseVisitor. the chained visitor will receive all visit events after this one. """ tail = list(self._visitor_iterator)[-1] tail._next = visitor return self class CloningVisitor(ClauseVisitor): """Base class for visitor objects which can traverse using the cloned_traverse() function. """ def copy_and_process(self, list_): """Apply cloned traversal to the given list of elements, and return the new list. """ return [self.traverse(x) for x in list_] def traverse(self, obj): """traverse and visit the given expression structure.""" return cloned_traverse( obj, self.__traverse_options__, self._visitor_dict) class ReplacingCloningVisitor(CloningVisitor): """Base class for visitor objects which can traverse using the replacement_traverse() function. """ def replace(self, elem): """receive pre-copied elements during a cloning traversal. If the method returns a new element, the element is used instead of creating a simple copy of the element. Traversal will halt on the newly returned element if it is re-encountered. """ return None def traverse(self, obj): """traverse and visit the given expression structure.""" def replace(elem): for v in self._visitor_iterator: e = v.replace(elem) if e is not None: return e return replacement_traverse(obj, self.__traverse_options__, replace) def iterate(obj, opts): """traverse the given expression structure, returning an iterator. traversal is configured to be breadth-first. """ stack = deque([obj]) while stack: t = stack.popleft() yield t for c in t.get_children(**opts): stack.append(c) def iterate_depthfirst(obj, opts): """traverse the given expression structure, returning an iterator. traversal is configured to be depth-first. """ stack = deque([obj]) traversal = deque() while stack: t = stack.pop() traversal.appendleft(t) for c in t.get_children(**opts): stack.append(c) return iter(traversal) def traverse_using(iterator, obj, visitors): """visit the given expression structure using the given iterator of objects. """ for target in iterator: meth = visitors.get(target.__visit_name__, None) if meth: meth(target) return obj def traverse(obj, opts, visitors): """traverse and visit the given expression structure using the default iterator. """ return traverse_using(iterate(obj, opts), obj, visitors) def traverse_depthfirst(obj, opts, visitors): """traverse and visit the given expression structure using the depth-first iterator. """ return traverse_using(iterate_depthfirst(obj, opts), obj, visitors) def cloned_traverse(obj, opts, visitors): """clone the given expression structure, allowing modifications by visitors.""" cloned = util.column_dict() stop_on = util.column_set(opts.get('stop_on', [])) def clone(elem): if elem in stop_on: return elem else: if id(elem) not in cloned: cloned[id(elem)] = newelem = elem._clone() newelem._copy_internals(clone=clone) meth = visitors.get(newelem.__visit_name__, None) if meth: meth(newelem) return cloned[id(elem)] if obj is not None: obj = clone(obj) return obj def replacement_traverse(obj, opts, replace): """clone the given expression structure, allowing element replacement by a given replacement function.""" cloned = util.column_dict() stop_on = util.column_set([id(x) for x in opts.get('stop_on', [])]) def clone(elem, **kw): if id(elem) in stop_on or \ 'no_replacement_traverse' in elem._annotations: return elem else: newelem = replace(elem) if newelem is not None: stop_on.add(id(newelem)) return newelem else: if elem not in cloned: cloned[elem] = newelem = elem._clone() newelem._copy_internals(clone=clone, **kw) return cloned[elem] if obj is not None: obj = clone(obj, **opts) return obj SQLAlchemy-0.8.4/lib/sqlalchemy/testing/0000755000076500000240000000000012251151573020620 5ustar classicstaff00000000000000SQLAlchemy-0.8.4/lib/sqlalchemy/testing/__init__.py0000644000076500000240000000131412251150015022716 0ustar classicstaff00000000000000from __future__ import absolute_import from .warnings import testing_warn, assert_warnings, resetwarnings from . import config from .exclusions import db_spec, _is_excluded, fails_if, skip_if, future,\ fails_on, fails_on_everything_except, skip, only_on, exclude, against,\ _server_version, only_if from .assertions import emits_warning, emits_warning_on, uses_deprecated, \ eq_, ne_, is_, is_not_, startswith_, assert_raises, \ assert_raises_message, AssertsCompiledSQL, ComparesTables, \ AssertsExecutionResults from .util import run_as_contextmanager, rowset, fail, provide_metadata, adict crashes = skip from .config import db, requirements as requires from . import mock SQLAlchemy-0.8.4/lib/sqlalchemy/testing/assertions.py0000644000076500000240000003227612251150015023364 0ustar classicstaff00000000000000from __future__ import absolute_import from . import util as testutil from sqlalchemy import pool, orm, util from sqlalchemy.engine import default, create_engine from sqlalchemy import exc as sa_exc from sqlalchemy.util import decorator from sqlalchemy import types as sqltypes, schema import warnings import re from .warnings import resetwarnings from .exclusions import db_spec, _is_excluded from . import assertsql from . import config import itertools from .util import fail import contextlib def emits_warning(*messages): """Mark a test as emitting a warning. With no arguments, squelches all SAWarning failures. Or pass one or more strings; these will be matched to the root of the warning description by warnings.filterwarnings(). """ # TODO: it would be nice to assert that a named warning was # emitted. should work with some monkeypatching of warnings, # and may work on non-CPython if they keep to the spirit of # warnings.showwarning's docstring. # - update: jython looks ok, it uses cpython's module @decorator def decorate(fn, *args, **kw): # todo: should probably be strict about this, too filters = [dict(action='ignore', category=sa_exc.SAPendingDeprecationWarning)] if not messages: filters.append(dict(action='ignore', category=sa_exc.SAWarning)) else: filters.extend(dict(action='ignore', message=message, category=sa_exc.SAWarning) for message in messages) for f in filters: warnings.filterwarnings(**f) try: return fn(*args, **kw) finally: resetwarnings() return decorate def emits_warning_on(db, *warnings): """Mark a test as emitting a warning on a specific dialect. With no arguments, squelches all SAWarning failures. Or pass one or more strings; these will be matched to the root of the warning description by warnings.filterwarnings(). """ spec = db_spec(db) @decorator def decorate(fn, *args, **kw): if isinstance(db, basestring): if not spec(config.db): return fn(*args, **kw) else: wrapped = emits_warning(*warnings)(fn) return wrapped(*args, **kw) else: if not _is_excluded(*db): return fn(*args, **kw) else: wrapped = emits_warning(*warnings)(fn) return wrapped(*args, **kw) return decorate def uses_deprecated(*messages): """Mark a test as immune from fatal deprecation warnings. With no arguments, squelches all SADeprecationWarning failures. Or pass one or more strings; these will be matched to the root of the warning description by warnings.filterwarnings(). As a special case, you may pass a function name prefixed with // and it will be re-written as needed to match the standard warning verbiage emitted by the sqlalchemy.util.deprecated decorator. """ @decorator def decorate(fn, *args, **kw): # todo: should probably be strict about this, too filters = [dict(action='ignore', category=sa_exc.SAPendingDeprecationWarning)] if not messages: filters.append(dict(action='ignore', category=sa_exc.SADeprecationWarning)) else: filters.extend( [dict(action='ignore', message=message, category=sa_exc.SADeprecationWarning) for message in [(m.startswith('//') and ('Call to deprecated function ' + m[2:]) or m) for m in messages]]) for f in filters: warnings.filterwarnings(**f) try: return fn(*args, **kw) finally: resetwarnings() return decorate def global_cleanup_assertions(): """Check things that have to be finalized at the end of a test suite. Hardcoded at the moment, a modular system can be built here to support things like PG prepared transactions, tables all dropped, etc. """ testutil.lazy_gc() assert not pool._refs, str(pool._refs) def eq_(a, b, msg=None): """Assert a == b, with repr messaging on failure.""" assert a == b, msg or "%r != %r" % (a, b) def ne_(a, b, msg=None): """Assert a != b, with repr messaging on failure.""" assert a != b, msg or "%r == %r" % (a, b) def is_(a, b, msg=None): """Assert a is b, with repr messaging on failure.""" assert a is b, msg or "%r is not %r" % (a, b) def is_not_(a, b, msg=None): """Assert a is not b, with repr messaging on failure.""" assert a is not b, msg or "%r is %r" % (a, b) def startswith_(a, fragment, msg=None): """Assert a.startswith(fragment), with repr messaging on failure.""" assert a.startswith(fragment), msg or "%r does not start with %r" % ( a, fragment) def assert_raises(except_cls, callable_, *args, **kw): try: callable_(*args, **kw) success = False except except_cls: success = True # assert outside the block so it works for AssertionError too ! assert success, "Callable did not raise an exception" def assert_raises_message(except_cls, msg, callable_, *args, **kwargs): try: callable_(*args, **kwargs) assert False, "Callable did not raise an exception" except except_cls, e: assert re.search(msg, unicode(e), re.UNICODE), u"%r !~ %s" % (msg, e) print unicode(e).encode('utf-8') class AssertsCompiledSQL(object): def assert_compile(self, clause, result, params=None, checkparams=None, dialect=None, checkpositional=None, use_default_dialect=False, allow_dialect_select=False): if use_default_dialect: dialect = default.DefaultDialect() elif dialect == None and not allow_dialect_select: dialect = getattr(self, '__dialect__', None) if dialect == 'default': dialect = default.DefaultDialect() elif dialect is None: dialect = config.db.dialect elif isinstance(dialect, basestring): dialect = create_engine("%s://" % dialect).dialect kw = {} if params is not None: kw['column_keys'] = params.keys() if isinstance(clause, orm.Query): context = clause._compile_context() context.statement.use_labels = True clause = context.statement c = clause.compile(dialect=dialect, **kw) param_str = repr(getattr(c, 'params', {})) if util.py3k: param_str = param_str.encode('utf-8').decode('ascii', 'ignore') print(("\nSQL String:\n" + util.text_type(c) + param_str).encode('utf-8')) else: print(("\nSQL String:\n" + util.text_type(c).encode('utf-8') + param_str)) cc = re.sub(r'[\n\t]', '', util.text_type(c)) eq_(cc, result, "%r != %r on dialect %r" % (cc, result, dialect)) if checkparams is not None: eq_(c.construct_params(params), checkparams) if checkpositional is not None: p = c.construct_params(params) eq_(tuple([p[x] for x in c.positiontup]), checkpositional) class ComparesTables(object): def assert_tables_equal(self, table, reflected_table, strict_types=False): assert len(table.c) == len(reflected_table.c) for c, reflected_c in zip(table.c, reflected_table.c): eq_(c.name, reflected_c.name) assert reflected_c is reflected_table.c[c.name] eq_(c.primary_key, reflected_c.primary_key) eq_(c.nullable, reflected_c.nullable) if strict_types: msg = "Type '%s' doesn't correspond to type '%s'" assert type(reflected_c.type) is type(c.type), \ msg % (reflected_c.type, c.type) else: self.assert_types_base(reflected_c, c) if isinstance(c.type, sqltypes.String): eq_(c.type.length, reflected_c.type.length) eq_( set([f.column.name for f in c.foreign_keys]), set([f.column.name for f in reflected_c.foreign_keys]) ) if c.server_default: assert isinstance(reflected_c.server_default, schema.FetchedValue) assert len(table.primary_key) == len(reflected_table.primary_key) for c in table.primary_key: assert reflected_table.primary_key.columns[c.name] is not None def assert_types_base(self, c1, c2): assert c1.type._compare_type_affinity(c2.type),\ "On column %r, type '%s' doesn't correspond to type '%s'" % \ (c1.name, c1.type, c2.type) class AssertsExecutionResults(object): def assert_result(self, result, class_, *objects): result = list(result) print repr(result) self.assert_list(result, class_, objects) def assert_list(self, result, class_, list): self.assert_(len(result) == len(list), "result list is not the same size as test list, " + "for class " + class_.__name__) for i in range(0, len(list)): self.assert_row(class_, result[i], list[i]) def assert_row(self, class_, rowobj, desc): self.assert_(rowobj.__class__ is class_, "item class is not " + repr(class_)) for key, value in desc.iteritems(): if isinstance(value, tuple): if isinstance(value[1], list): self.assert_list(getattr(rowobj, key), value[0], value[1]) else: self.assert_row(value[0], getattr(rowobj, key), value[1]) else: self.assert_(getattr(rowobj, key) == value, "attribute %s value %s does not match %s" % ( key, getattr(rowobj, key), value)) def assert_unordered_result(self, result, cls, *expected): """As assert_result, but the order of objects is not considered. The algorithm is very expensive but not a big deal for the small numbers of rows that the test suite manipulates. """ class immutabledict(dict): def __hash__(self): return id(self) found = util.IdentitySet(result) expected = set([immutabledict(e) for e in expected]) for wrong in itertools.ifilterfalse(lambda o: type(o) == cls, found): fail('Unexpected type "%s", expected "%s"' % ( type(wrong).__name__, cls.__name__)) if len(found) != len(expected): fail('Unexpected object count "%s", expected "%s"' % ( len(found), len(expected))) NOVALUE = object() def _compare_item(obj, spec): for key, value in spec.iteritems(): if isinstance(value, tuple): try: self.assert_unordered_result( getattr(obj, key), value[0], *value[1]) except AssertionError: return False else: if getattr(obj, key, NOVALUE) != value: return False return True for expected_item in expected: for found_item in found: if _compare_item(found_item, expected_item): found.remove(found_item) break else: fail( "Expected %s instance with attributes %s not found." % ( cls.__name__, repr(expected_item))) return True def assert_sql_execution(self, db, callable_, *rules): assertsql.asserter.add_rules(rules) try: callable_() assertsql.asserter.statement_complete() finally: assertsql.asserter.clear_rules() def assert_sql(self, db, callable_, list_, with_sequences=None): if with_sequences is not None and config.db.dialect.supports_sequences: rules = with_sequences else: rules = list_ newrules = [] for rule in rules: if isinstance(rule, dict): newrule = assertsql.AllOf(*[ assertsql.ExactSQL(k, v) for k, v in rule.iteritems() ]) else: newrule = assertsql.ExactSQL(*rule) newrules.append(newrule) self.assert_sql_execution(db, callable_, *newrules) def assert_sql_count(self, db, callable_, count): self.assert_sql_execution( db, callable_, assertsql.CountStatements(count)) @contextlib.contextmanager def assert_execution(self, *rules): assertsql.asserter.add_rules(rules) try: yield assertsql.asserter.statement_complete() finally: assertsql.asserter.clear_rules() def assert_statement_count(self, count): return self.assert_execution(assertsql.CountStatements(count)) SQLAlchemy-0.8.4/lib/sqlalchemy/testing/assertsql.py0000644000076500000240000002421412251150015023204 0ustar classicstaff00000000000000 from ..engine.default import DefaultDialect from .. import util import re class AssertRule(object): def process_execute(self, clauseelement, *multiparams, **params): pass def process_cursor_execute(self, statement, parameters, context, executemany): pass def is_consumed(self): """Return True if this rule has been consumed, False if not. Should raise an AssertionError if this rule's condition has definitely failed. """ raise NotImplementedError() def rule_passed(self): """Return True if the last test of this rule passed, False if failed, None if no test was applied.""" raise NotImplementedError() def consume_final(self): """Return True if this rule has been consumed. Should raise an AssertionError if this rule's condition has not been consumed or has failed. """ if self._result is None: assert False, 'Rule has not been consumed' return self.is_consumed() class SQLMatchRule(AssertRule): def __init__(self): self._result = None self._errmsg = "" def rule_passed(self): return self._result def is_consumed(self): if self._result is None: return False assert self._result, self._errmsg return True class ExactSQL(SQLMatchRule): def __init__(self, sql, params=None): SQLMatchRule.__init__(self) self.sql = sql self.params = params def process_cursor_execute(self, statement, parameters, context, executemany): if not context: return _received_statement = \ _process_engine_statement(context.unicode_statement, context) _received_parameters = context.compiled_parameters # TODO: remove this step once all unit tests are migrated, as # ExactSQL should really be *exact* SQL sql = _process_assertion_statement(self.sql, context) equivalent = _received_statement == sql if self.params: if util.callable(self.params): params = self.params(context) else: params = self.params if not isinstance(params, list): params = [params] equivalent = equivalent and params \ == context.compiled_parameters else: params = {} self._result = equivalent if not self._result: self._errmsg = \ 'Testing for exact statement %r exact params %r, '\ 'received %r with params %r' % (sql, params, _received_statement, _received_parameters) class RegexSQL(SQLMatchRule): def __init__(self, regex, params=None): SQLMatchRule.__init__(self) self.regex = re.compile(regex) self.orig_regex = regex self.params = params def process_cursor_execute(self, statement, parameters, context, executemany): if not context: return _received_statement = \ _process_engine_statement(context.unicode_statement, context) _received_parameters = context.compiled_parameters equivalent = bool(self.regex.match(_received_statement)) if self.params: if util.callable(self.params): params = self.params(context) else: params = self.params if not isinstance(params, list): params = [params] # do a positive compare only for param, received in zip(params, _received_parameters): for k, v in param.iteritems(): if k not in received or received[k] != v: equivalent = False break else: params = {} self._result = equivalent if not self._result: self._errmsg = \ 'Testing for regex %r partial params %r, received %r '\ 'with params %r' % (self.orig_regex, params, _received_statement, _received_parameters) class CompiledSQL(SQLMatchRule): def __init__(self, statement, params=None): SQLMatchRule.__init__(self) self.statement = statement self.params = params def process_cursor_execute(self, statement, parameters, context, executemany): if not context: return from sqlalchemy.schema import _DDLCompiles _received_parameters = list(context.compiled_parameters) # recompile from the context, using the default dialect if isinstance(context.compiled.statement, _DDLCompiles): compiled = \ context.compiled.statement.compile(dialect=DefaultDialect()) else: compiled = \ context.compiled.statement.compile(dialect=DefaultDialect(), column_keys=context.compiled.column_keys) _received_statement = re.sub(r'[\n\t]', '', str(compiled)) equivalent = self.statement == _received_statement if self.params: if util.callable(self.params): params = self.params(context) else: params = self.params if not isinstance(params, list): params = [params] else: params = list(params) all_params = list(params) all_received = list(_received_parameters) while params: param = dict(params.pop(0)) for k, v in context.compiled.params.iteritems(): param.setdefault(k, v) if param not in _received_parameters: equivalent = False break else: _received_parameters.remove(param) if _received_parameters: equivalent = False else: params = {} all_params = {} all_received = [] self._result = equivalent if not self._result: print 'Testing for compiled statement %r partial params '\ '%r, received %r with params %r' % (self.statement, all_params, _received_statement, all_received) self._errmsg = \ 'Testing for compiled statement %r partial params %r, '\ 'received %r with params %r' % (self.statement, all_params, _received_statement, all_received) # print self._errmsg class CountStatements(AssertRule): def __init__(self, count): self.count = count self._statement_count = 0 def process_execute(self, clauseelement, *multiparams, **params): self._statement_count += 1 def process_cursor_execute(self, statement, parameters, context, executemany): pass def is_consumed(self): return False def consume_final(self): assert self.count == self._statement_count, \ 'desired statement count %d does not match %d' \ % (self.count, self._statement_count) return True class AllOf(AssertRule): def __init__(self, *rules): self.rules = set(rules) def process_execute(self, clauseelement, *multiparams, **params): for rule in self.rules: rule.process_execute(clauseelement, *multiparams, **params) def process_cursor_execute(self, statement, parameters, context, executemany): for rule in self.rules: rule.process_cursor_execute(statement, parameters, context, executemany) def is_consumed(self): if not self.rules: return True for rule in list(self.rules): if rule.rule_passed(): # a rule passed, move on self.rules.remove(rule) return len(self.rules) == 0 assert False, 'No assertion rules were satisfied for statement' def consume_final(self): return len(self.rules) == 0 def _process_engine_statement(query, context): if util.jython: # oracle+zxjdbc passes a PyStatement when returning into query = unicode(query) if context.engine.name == 'mssql' \ and query.endswith('; select scope_identity()'): query = query[:-25] query = re.sub(r'\n', '', query) return query def _process_assertion_statement(query, context): paramstyle = context.dialect.paramstyle if paramstyle == 'named': pass elif paramstyle == 'pyformat': query = re.sub(r':([\w_]+)', r"%(\1)s", query) else: # positional params repl = None if paramstyle == 'qmark': repl = "?" elif paramstyle == 'format': repl = r"%s" elif paramstyle == 'numeric': repl = None query = re.sub(r':([\w_]+)', repl, query) return query class SQLAssert(object): rules = None def add_rules(self, rules): self.rules = list(rules) def statement_complete(self): for rule in self.rules: if not rule.consume_final(): assert False, \ 'All statements are complete, but pending '\ 'assertion rules remain' def clear_rules(self): del self.rules def execute(self, conn, clauseelement, multiparams, params, result): if self.rules is not None: if not self.rules: assert False, \ 'All rules have been exhausted, but further '\ 'statements remain' rule = self.rules[0] rule.process_execute(clauseelement, *multiparams, **params) if rule.is_consumed(): self.rules.pop(0) def cursor_execute(self, conn, cursor, statement, parameters, context, executemany): if self.rules: rule = self.rules[0] rule.process_cursor_execute(statement, parameters, context, executemany) asserter = SQLAssert() SQLAlchemy-0.8.4/lib/sqlalchemy/testing/config.py0000644000076500000240000000003612251150015022424 0ustar classicstaff00000000000000requirements = None db = None SQLAlchemy-0.8.4/lib/sqlalchemy/testing/engines.py0000644000076500000240000003164012251150015022614 0ustar classicstaff00000000000000from __future__ import absolute_import import types import weakref from collections import deque from . import config from .util import decorator from .. import event, pool import re import warnings class ConnectionKiller(object): def __init__(self): self.proxy_refs = weakref.WeakKeyDictionary() self.testing_engines = weakref.WeakKeyDictionary() self.conns = set() def add_engine(self, engine): self.testing_engines[engine] = True def connect(self, dbapi_conn, con_record): self.conns.add((dbapi_conn, con_record)) def checkout(self, dbapi_con, con_record, con_proxy): self.proxy_refs[con_proxy] = True def _safe(self, fn): try: fn() except (SystemExit, KeyboardInterrupt): raise except Exception, e: warnings.warn( "testing_reaper couldn't " "rollback/close connection: %s" % e) def rollback_all(self): for rec in self.proxy_refs.keys(): if rec is not None and rec.is_valid: self._safe(rec.rollback) def close_all(self): for rec in self.proxy_refs.keys(): if rec is not None: self._safe(rec._close) def _after_test_ctx(self): pass # this can cause a deadlock with pg8000 - pg8000 acquires # prepared statment lock inside of rollback() - if async gc # is collecting in finalize_fairy, deadlock. # not sure if this should be if pypy/jython only. # note that firebird/fdb definitely needs this though for conn, rec in self.conns: self._safe(conn.rollback) def _stop_test_ctx(self): if config.options.low_connections: self._stop_test_ctx_minimal() else: self._stop_test_ctx_aggressive() def _stop_test_ctx_minimal(self): self.close_all() self.conns = set() for rec in self.testing_engines.keys(): if rec is not config.db: rec.dispose() def _stop_test_ctx_aggressive(self): self.close_all() for conn, rec in self.conns: self._safe(conn.close) rec.connection = None self.conns = set() for rec in self.testing_engines.keys(): rec.dispose() def assert_all_closed(self): for rec in self.proxy_refs: if rec.is_valid: assert False testing_reaper = ConnectionKiller() def drop_all_tables(metadata, bind): testing_reaper.close_all() if hasattr(bind, 'close'): bind.close() metadata.drop_all(bind) @decorator def assert_conns_closed(fn, *args, **kw): try: fn(*args, **kw) finally: testing_reaper.assert_all_closed() @decorator def rollback_open_connections(fn, *args, **kw): """Decorator that rolls back all open connections after fn execution.""" try: fn(*args, **kw) finally: testing_reaper.rollback_all() @decorator def close_first(fn, *args, **kw): """Decorator that closes all connections before fn execution.""" testing_reaper.close_all() fn(*args, **kw) @decorator def close_open_connections(fn, *args, **kw): """Decorator that closes all connections after fn execution.""" try: fn(*args, **kw) finally: testing_reaper.close_all() def all_dialects(exclude=None): import sqlalchemy.databases as d for name in d.__all__: # TEMPORARY if exclude and name in exclude: continue mod = getattr(d, name, None) if not mod: mod = getattr(__import__( 'sqlalchemy.databases.%s' % name).databases, name) yield mod.dialect() class ReconnectFixture(object): def __init__(self, dbapi): self.dbapi = dbapi self.connections = [] def __getattr__(self, key): return getattr(self.dbapi, key) def connect(self, *args, **kwargs): conn = self.dbapi.connect(*args, **kwargs) self.connections.append(conn) return conn def _safe(self, fn): try: fn() except (SystemExit, KeyboardInterrupt): raise except Exception, e: warnings.warn( "ReconnectFixture couldn't " "close connection: %s" % e) def shutdown(self): # TODO: this doesn't cover all cases # as nicely as we'd like, namely MySQLdb. # would need to implement R. Brewer's # proxy server idea to get better # coverage. for c in list(self.connections): self._safe(c.close) self.connections = [] def reconnecting_engine(url=None, options=None): url = url or config.db_url dbapi = config.db.dialect.dbapi if not options: options = {} options['module'] = ReconnectFixture(dbapi) engine = testing_engine(url, options) _dispose = engine.dispose def dispose(): engine.dialect.dbapi.shutdown() _dispose() engine.test_shutdown = engine.dialect.dbapi.shutdown engine.dispose = dispose return engine def testing_engine(url=None, options=None): """Produce an engine configured by --options with optional overrides.""" from sqlalchemy import create_engine from .assertsql import asserter if not options: use_reaper = True else: use_reaper = options.pop('use_reaper', True) url = url or config.db_url if options is None: options = config.db_opts engine = create_engine(url, **options) if isinstance(engine.pool, pool.QueuePool): engine.pool._timeout = 0 engine.pool._max_overflow = 0 event.listen(engine, 'after_execute', asserter.execute) event.listen(engine, 'after_cursor_execute', asserter.cursor_execute) if use_reaper: event.listen(engine.pool, 'connect', testing_reaper.connect) event.listen(engine.pool, 'checkout', testing_reaper.checkout) testing_reaper.add_engine(engine) return engine def utf8_engine(url=None, options=None): """Hook for dialects or drivers that don't handle utf8 by default.""" from sqlalchemy.engine import url as engine_url if config.db.dialect.name == 'mysql' and \ config.db.driver in ['mysqldb', 'pymysql', 'cymysql']: # note 1.2.1.gamma.6 or greater of MySQLdb # needed here url = url or config.db_url url = engine_url.make_url(url) url.query['charset'] = 'utf8' url.query['use_unicode'] = '0' url = str(url) return testing_engine(url, options) def mock_engine(dialect_name=None): """Provides a mocking engine based on the current testing.db. This is normally used to test DDL generation flow as emitted by an Engine. It should not be used in other cases, as assert_compile() and assert_sql_execution() are much better choices with fewer moving parts. """ from sqlalchemy import create_engine if not dialect_name: dialect_name = config.db.name buffer = [] def executor(sql, *a, **kw): buffer.append(sql) def assert_sql(stmts): recv = [re.sub(r'[\n\t]', '', str(s)) for s in buffer] assert recv == stmts, recv def print_sql(): d = engine.dialect return "\n".join( str(s.compile(dialect=d)) for s in engine.mock ) engine = create_engine(dialect_name + '://', strategy='mock', executor=executor) assert not hasattr(engine, 'mock') engine.mock = buffer engine.assert_sql = assert_sql engine.print_sql = print_sql return engine class DBAPIProxyCursor(object): """Proxy a DBAPI cursor. Tests can provide subclasses of this to intercept DBAPI-level cursor operations. """ def __init__(self, engine, conn): self.engine = engine self.connection = conn self.cursor = conn.cursor() def execute(self, stmt, parameters=None, **kw): if parameters: return self.cursor.execute(stmt, parameters, **kw) else: return self.cursor.execute(stmt, **kw) def executemany(self, stmt, params, **kw): return self.cursor.executemany(stmt, params, **kw) def __getattr__(self, key): return getattr(self.cursor, key) class DBAPIProxyConnection(object): """Proxy a DBAPI connection. Tests can provide subclasses of this to intercept DBAPI-level connection operations. """ def __init__(self, engine, cursor_cls): self.conn = self._sqla_unwrap = engine.pool._creator() self.engine = engine self.cursor_cls = cursor_cls def cursor(self): return self.cursor_cls(self.engine, self.conn) def close(self): self.conn.close() def __getattr__(self, key): return getattr(self.conn, key) def proxying_engine(conn_cls=DBAPIProxyConnection, cursor_cls=DBAPIProxyCursor): """Produce an engine that provides proxy hooks for common methods. """ def mock_conn(): return conn_cls(config.db, cursor_cls) return testing_engine(options={'creator': mock_conn}) class ReplayableSession(object): """A simple record/playback tool. This is *not* a mock testing class. It only records a session for later playback and makes no assertions on call consistency whatsoever. It's unlikely to be suitable for anything other than DB-API recording. """ Callable = object() NoAttribute = object() # Py3K #Natives = set([getattr(types, t) # for t in dir(types) if not t.startswith('_')]). \ # union([type(t) if not isinstance(t, type) # else t for t in __builtins__.values()]).\ # difference([getattr(types, t) # for t in ('FunctionType', 'BuiltinFunctionType', # 'MethodType', 'BuiltinMethodType', # 'LambdaType', )]) # Py2K Natives = set([getattr(types, t) for t in dir(types) if not t.startswith('_')]). \ difference([getattr(types, t) for t in ('FunctionType', 'BuiltinFunctionType', 'MethodType', 'BuiltinMethodType', 'LambdaType', 'UnboundMethodType',)]) # end Py2K def __init__(self): self.buffer = deque() def recorder(self, base): return self.Recorder(self.buffer, base) def player(self): return self.Player(self.buffer) class Recorder(object): def __init__(self, buffer, subject): self._buffer = buffer self._subject = subject def __call__(self, *args, **kw): subject, buffer = [object.__getattribute__(self, x) for x in ('_subject', '_buffer')] result = subject(*args, **kw) if type(result) not in ReplayableSession.Natives: buffer.append(ReplayableSession.Callable) return type(self)(buffer, result) else: buffer.append(result) return result @property def _sqla_unwrap(self): return self._subject def __getattribute__(self, key): try: return object.__getattribute__(self, key) except AttributeError: pass subject, buffer = [object.__getattribute__(self, x) for x in ('_subject', '_buffer')] try: result = type(subject).__getattribute__(subject, key) except AttributeError: buffer.append(ReplayableSession.NoAttribute) raise else: if type(result) not in ReplayableSession.Natives: buffer.append(ReplayableSession.Callable) return type(self)(buffer, result) else: buffer.append(result) return result class Player(object): def __init__(self, buffer): self._buffer = buffer def __call__(self, *args, **kw): buffer = object.__getattribute__(self, '_buffer') result = buffer.popleft() if result is ReplayableSession.Callable: return self else: return result @property def _sqla_unwrap(self): return None def __getattribute__(self, key): try: return object.__getattribute__(self, key) except AttributeError: pass buffer = object.__getattribute__(self, '_buffer') result = buffer.popleft() if result is ReplayableSession.Callable: return self elif result is ReplayableSession.NoAttribute: raise AttributeError(key) else: return result SQLAlchemy-0.8.4/lib/sqlalchemy/testing/entities.py0000644000076500000240000000472212251150015023011 0ustar classicstaff00000000000000import sqlalchemy as sa from sqlalchemy import exc as sa_exc _repr_stack = set() class BasicEntity(object): def __init__(self, **kw): for key, value in kw.iteritems(): setattr(self, key, value) def __repr__(self): if id(self) in _repr_stack: return object.__repr__(self) _repr_stack.add(id(self)) try: return "%s(%s)" % ( (self.__class__.__name__), ', '.join(["%s=%r" % (key, getattr(self, key)) for key in sorted(self.__dict__.keys()) if not key.startswith('_')])) finally: _repr_stack.remove(id(self)) _recursion_stack = set() class ComparableEntity(BasicEntity): def __hash__(self): return hash(self.__class__) def __ne__(self, other): return not self.__eq__(other) def __eq__(self, other): """'Deep, sparse compare. Deeply compare two entities, following the non-None attributes of the non-persisted object, if possible. """ if other is self: return True elif not self.__class__ == other.__class__: return False if id(self) in _recursion_stack: return True _recursion_stack.add(id(self)) try: # pick the entity thats not SA persisted as the source try: self_key = sa.orm.attributes.instance_state(self).key except sa.orm.exc.NO_STATE: self_key = None if other is None: a = self b = other elif self_key is not None: a = other b = self else: a = self b = other for attr in a.__dict__.keys(): if attr.startswith('_'): continue value = getattr(a, attr) try: # handle lazy loader errors battr = getattr(b, attr) except (AttributeError, sa_exc.UnboundExecutionError): return False if hasattr(value, '__iter__'): if list(value) != list(battr): return False else: if value is not None and value != battr: return False return True finally: _recursion_stack.remove(id(self)) SQLAlchemy-0.8.4/lib/sqlalchemy/testing/exclusions.py0000644000076500000240000002157612251150015023367 0ustar classicstaff00000000000000from __future__ import with_statement import operator from nose import SkipTest from ..util import decorator from . import config from .. import util import contextlib class skip_if(object): def __init__(self, predicate, reason=None): self.predicate = _as_predicate(predicate) self.reason = reason _fails_on = None @property def enabled(self): return not self.predicate() @contextlib.contextmanager def fail_if(self, name='block'): try: yield except Exception, ex: if self.predicate(): print ("%s failed as expected (%s): %s " % ( name, self.predicate, str(ex))) else: raise else: if self.predicate(): raise AssertionError( "Unexpected success for '%s' (%s)" % (name, self.predicate)) def __call__(self, fn): @decorator def decorate(fn, *args, **kw): if self.predicate(): if self.reason: msg = "'%s' : %s" % ( fn.__name__, self.reason ) else: msg = "'%s': %s" % ( fn.__name__, self.predicate ) raise SkipTest(msg) else: if self._fails_on: with self._fails_on.fail_if(name=fn.__name__): return fn(*args, **kw) else: return fn(*args, **kw) return decorate(fn) def fails_on(self, other, reason=None): self._fails_on = skip_if(other, reason) return self class fails_if(skip_if): def __call__(self, fn): @decorator def decorate(fn, *args, **kw): with self.fail_if(name=fn.__name__): return fn(*args, **kw) return decorate(fn) def only_if(predicate, reason=None): predicate = _as_predicate(predicate) return skip_if(NotPredicate(predicate), reason) def succeeds_if(predicate, reason=None): predicate = _as_predicate(predicate) return fails_if(NotPredicate(predicate), reason) class Predicate(object): @classmethod def as_predicate(cls, predicate): if isinstance(predicate, skip_if): return predicate.predicate elif isinstance(predicate, Predicate): return predicate elif isinstance(predicate, list): return OrPredicate([cls.as_predicate(pred) for pred in predicate]) elif isinstance(predicate, tuple): return SpecPredicate(*predicate) elif isinstance(predicate, basestring): return SpecPredicate(predicate, None, None) elif util.callable(predicate): return LambdaPredicate(predicate) else: assert False, "unknown predicate type: %s" % predicate class BooleanPredicate(Predicate): def __init__(self, value, description=None): self.value = value self.description = description or "boolean %s" % value def __call__(self): return self.value def _as_string(self, negate=False): if negate: return "not " + self.description else: return self.description def __str__(self): return self._as_string() class SpecPredicate(Predicate): def __init__(self, db, op=None, spec=None, description=None): self.db = db self.op = op self.spec = spec self.description = description _ops = { '<': operator.lt, '>': operator.gt, '==': operator.eq, '!=': operator.ne, '<=': operator.le, '>=': operator.ge, 'in': operator.contains, 'between': lambda val, pair: val >= pair[0] and val <= pair[1], } def __call__(self, engine=None): if engine is None: engine = config.db if "+" in self.db: dialect, driver = self.db.split('+') else: dialect, driver = self.db, None if dialect and engine.name != dialect: return False if driver is not None and engine.driver != driver: return False if self.op is not None: assert driver is None, "DBAPI version specs not supported yet" version = _server_version(engine) oper = hasattr(self.op, '__call__') and self.op \ or self._ops[self.op] return oper(version, self.spec) else: return True def _as_string(self, negate=False): if self.description is not None: return self.description elif self.op is None: if negate: return "not %s" % self.db else: return "%s" % self.db else: if negate: return "not %s %s %s" % ( self.db, self.op, self.spec ) else: return "%s %s %s" % ( self.db, self.op, self.spec ) def __str__(self): return self._as_string() class LambdaPredicate(Predicate): def __init__(self, lambda_, description=None, args=None, kw=None): self.lambda_ = lambda_ self.args = args or () self.kw = kw or {} if description: self.description = description elif lambda_.__doc__: self.description = lambda_.__doc__ else: self.description = "custom function" def __call__(self): return self.lambda_(*self.args, **self.kw) def _as_string(self, negate=False): if negate: return "not " + self.description else: return self.description def __str__(self): return self._as_string() class NotPredicate(Predicate): def __init__(self, predicate): self.predicate = predicate def __call__(self, *arg, **kw): return not self.predicate(*arg, **kw) def __str__(self): return self.predicate._as_string(True) class OrPredicate(Predicate): def __init__(self, predicates, description=None): self.predicates = predicates self.description = description def __call__(self, *arg, **kw): for pred in self.predicates: if pred(*arg, **kw): self._str = pred return True return False _str = None def _eval_str(self, negate=False): if self._str is None: if negate: conjunction = " and " else: conjunction = " or " return conjunction.join(p._as_string(negate=negate) for p in self.predicates) else: return self._str._as_string(negate=negate) def _negation_str(self): if self.description is not None: return "Not " + (self.description % {"spec": self._str}) else: return self._eval_str(negate=True) def _as_string(self, negate=False): if negate: return self._negation_str() else: if self.description is not None: return self.description % {"spec": self._str} else: return self._eval_str() def __str__(self): return self._as_string() _as_predicate = Predicate.as_predicate def _is_excluded(db, op, spec): return SpecPredicate(db, op, spec)() def _server_version(engine): """Return a server_version_info tuple.""" # force metadata to be retrieved conn = engine.connect() version = getattr(engine.dialect, 'server_version_info', ()) conn.close() return version def db_spec(*dbs): return OrPredicate( Predicate.as_predicate(db) for db in dbs ) def open(): return skip_if(BooleanPredicate(False, "mark as execute")) def closed(): return skip_if(BooleanPredicate(True, "marked as skip")) @decorator def future(fn, *args, **kw): return fails_if(LambdaPredicate(fn, *args, **kw), "Future feature") def fails_on(db, reason=None): return fails_if(SpecPredicate(db), reason) def fails_on_everything_except(*dbs): return succeeds_if( OrPredicate([ SpecPredicate(db) for db in dbs ]) ) def skip(db, reason=None): return skip_if(SpecPredicate(db), reason) def only_on(dbs, reason=None): return only_if( OrPredicate([SpecPredicate(db) for db in util.to_list(dbs)]) ) def exclude(db, op, spec, reason=None): return skip_if(SpecPredicate(db, op, spec), reason) def against(*queries): return OrPredicate([ Predicate.as_predicate(query) for query in queries ])() SQLAlchemy-0.8.4/lib/sqlalchemy/testing/fixtures.py0000644000076500000240000002232112251150015023031 0ustar classicstaff00000000000000from . import config from . import assertions, schema from .util import adict from .engines import drop_all_tables from .entities import BasicEntity, ComparableEntity import sys import sqlalchemy as sa from sqlalchemy.ext.declarative import declarative_base, DeclarativeMeta class TestBase(object): # A sequence of database names to always run, regardless of the # constraints below. __whitelist__ = () # A sequence of requirement names matching testing.requires decorators __requires__ = () # A sequence of dialect names to exclude from the test class. __unsupported_on__ = () # If present, test class is only runnable for the *single* specified # dialect. If you need multiple, use __unsupported_on__ and invert. __only_on__ = None # A sequence of no-arg callables. If any are True, the entire testcase is # skipped. __skip_if__ = None def assert_(self, val, msg=None): assert val, msg class TablesTest(TestBase): # 'once', None run_setup_bind = 'once' # 'once', 'each', None run_define_tables = 'once' # 'once', 'each', None run_create_tables = 'once' # 'once', 'each', None run_inserts = 'each' # 'each', None run_deletes = 'each' # 'once', None run_dispose_bind = None bind = None metadata = None tables = None other = None @classmethod def setup_class(cls): cls._init_class() cls._setup_once_tables() cls._setup_once_inserts() @classmethod def _init_class(cls): if cls.run_define_tables == 'each': if cls.run_create_tables == 'once': cls.run_create_tables = 'each' assert cls.run_inserts in ('each', None) if cls.other is None: cls.other = adict() if cls.tables is None: cls.tables = adict() if cls.bind is None: setattr(cls, 'bind', cls.setup_bind()) if cls.metadata is None: setattr(cls, 'metadata', sa.MetaData()) if cls.metadata.bind is None: cls.metadata.bind = cls.bind @classmethod def _setup_once_inserts(cls): if cls.run_inserts == 'once': cls._load_fixtures() cls.insert_data() @classmethod def _setup_once_tables(cls): if cls.run_define_tables == 'once': cls.define_tables(cls.metadata) if cls.run_create_tables == 'once': cls.metadata.create_all(cls.bind) cls.tables.update(cls.metadata.tables) def _setup_each_tables(self): if self.run_define_tables == 'each': self.tables.clear() if self.run_create_tables == 'each': drop_all_tables(self.metadata, self.bind) self.metadata.clear() self.define_tables(self.metadata) if self.run_create_tables == 'each': self.metadata.create_all(self.bind) self.tables.update(self.metadata.tables) elif self.run_create_tables == 'each': drop_all_tables(self.metadata, self.bind) self.metadata.create_all(self.bind) def _setup_each_inserts(self): if self.run_inserts == 'each': self._load_fixtures() self.insert_data() def _teardown_each_tables(self): # no need to run deletes if tables are recreated on setup if self.run_define_tables != 'each' and self.run_deletes == 'each': for table in reversed(self.metadata.sorted_tables): try: table.delete().execute().close() except sa.exc.DBAPIError, ex: print >> sys.stderr, "Error emptying table %s: %r" % ( table, ex) def setup(self): self._setup_each_tables() self._setup_each_inserts() def teardown(self): self._teardown_each_tables() @classmethod def _teardown_once_metadata_bind(cls): if cls.run_create_tables: drop_all_tables(cls.metadata, cls.bind) if cls.run_dispose_bind == 'once': cls.dispose_bind(cls.bind) cls.metadata.bind = None if cls.run_setup_bind is not None: cls.bind = None @classmethod def teardown_class(cls): cls._teardown_once_metadata_bind() @classmethod def setup_bind(cls): return config.db @classmethod def dispose_bind(cls, bind): if hasattr(bind, 'dispose'): bind.dispose() elif hasattr(bind, 'close'): bind.close() @classmethod def define_tables(cls, metadata): pass @classmethod def fixtures(cls): return {} @classmethod def insert_data(cls): pass def sql_count_(self, count, fn): self.assert_sql_count(self.bind, fn, count) def sql_eq_(self, callable_, statements, with_sequences=None): self.assert_sql(self.bind, callable_, statements, with_sequences) @classmethod def _load_fixtures(cls): """Insert rows as represented by the fixtures() method.""" headers, rows = {}, {} for table, data in cls.fixtures().iteritems(): if len(data) < 2: continue if isinstance(table, basestring): table = cls.tables[table] headers[table] = data[0] rows[table] = data[1:] for table in cls.metadata.sorted_tables: if table not in headers: continue cls.bind.execute( table.insert(), [dict(zip(headers[table], column_values)) for column_values in rows[table]]) class _ORMTest(object): @classmethod def teardown_class(cls): sa.orm.session.Session.close_all() sa.orm.clear_mappers() class ORMTest(_ORMTest, TestBase): pass class MappedTest(_ORMTest, TablesTest, assertions.AssertsExecutionResults): # 'once', 'each', None run_setup_classes = 'once' # 'once', 'each', None run_setup_mappers = 'each' classes = None @classmethod def setup_class(cls): cls._init_class() if cls.classes is None: cls.classes = adict() cls._setup_once_tables() cls._setup_once_classes() cls._setup_once_mappers() cls._setup_once_inserts() @classmethod def teardown_class(cls): cls._teardown_once_class() cls._teardown_once_metadata_bind() def setup(self): self._setup_each_tables() self._setup_each_mappers() self._setup_each_inserts() def teardown(self): sa.orm.session.Session.close_all() self._teardown_each_mappers() self._teardown_each_tables() @classmethod def _teardown_once_class(cls): cls.classes.clear() _ORMTest.teardown_class() @classmethod def _setup_once_classes(cls): if cls.run_setup_classes == 'once': cls._with_register_classes(cls.setup_classes) @classmethod def _setup_once_mappers(cls): if cls.run_setup_mappers == 'once': cls._with_register_classes(cls.setup_mappers) def _setup_each_mappers(self): if self.run_setup_mappers == 'each': self._with_register_classes(self.setup_mappers) @classmethod def _with_register_classes(cls, fn): """Run a setup method, framing the operation with a Base class that will catch new subclasses to be established within the "classes" registry. """ cls_registry = cls.classes class FindFixture(type): def __init__(cls, classname, bases, dict_): cls_registry[classname] = cls return type.__init__(cls, classname, bases, dict_) class _Base(object): __metaclass__ = FindFixture class Basic(BasicEntity, _Base): pass class Comparable(ComparableEntity, _Base): pass cls.Basic = Basic cls.Comparable = Comparable fn() def _teardown_each_mappers(self): # some tests create mappers in the test bodies # and will define setup_mappers as None - # clear mappers in any case if self.run_setup_mappers != 'once': sa.orm.clear_mappers() @classmethod def setup_classes(cls): pass @classmethod def setup_mappers(cls): pass class DeclarativeMappedTest(MappedTest): run_setup_classes = 'once' run_setup_mappers = 'once' @classmethod def _setup_once_tables(cls): pass @classmethod def _with_register_classes(cls, fn): cls_registry = cls.classes class FindFixtureDeclarative(DeclarativeMeta): def __init__(cls, classname, bases, dict_): cls_registry[classname] = cls return DeclarativeMeta.__init__( cls, classname, bases, dict_) class DeclarativeBasic(object): __table_cls__ = schema.Table _DeclBase = declarative_base(metadata=cls.metadata, metaclass=FindFixtureDeclarative, cls=DeclarativeBasic) cls.DeclarativeBasic = _DeclBase fn() if cls.metadata.tables: cls.metadata.create_all(config.db) SQLAlchemy-0.8.4/lib/sqlalchemy/testing/mock.py0000644000076500000240000000057312251150015022116 0ustar classicstaff00000000000000"""Import stub for mock library. """ from __future__ import absolute_import from ..util import py33 if py33: from unittest.mock import MagicMock, Mock, call else: try: from mock import MagicMock, Mock, call except ImportError: raise ImportError( "SQLAlchemy's test suite requires the " "'mock' library as of 0.8.2.") SQLAlchemy-0.8.4/lib/sqlalchemy/testing/pickleable.py0000644000076500000240000000454712251150015023265 0ustar classicstaff00000000000000"""Classes used in pickling tests, need to be at the module level for unpickling. """ from . import fixtures class User(fixtures.ComparableEntity): pass class Order(fixtures.ComparableEntity): pass class Dingaling(fixtures.ComparableEntity): pass class EmailUser(User): pass class Address(fixtures.ComparableEntity): pass # TODO: these are kind of arbitrary.... class Child1(fixtures.ComparableEntity): pass class Child2(fixtures.ComparableEntity): pass class Parent(fixtures.ComparableEntity): pass class Screen(object): def __init__(self, obj, parent=None): self.obj = obj self.parent = parent class Foo(object): def __init__(self, moredata): self.data = 'im data' self.stuff = 'im stuff' self.moredata = moredata __hash__ = object.__hash__ def __eq__(self, other): return other.data == self.data and \ other.stuff == self.stuff and \ other.moredata == self.moredata class Bar(object): def __init__(self, x, y): self.x = x self.y = y __hash__ = object.__hash__ def __eq__(self, other): return other.__class__ is self.__class__ and \ other.x == self.x and \ other.y == self.y def __str__(self): return "Bar(%d, %d)" % (self.x, self.y) class OldSchool: def __init__(self, x, y): self.x = x self.y = y def __eq__(self, other): return other.__class__ is self.__class__ and \ other.x == self.x and \ other.y == self.y class OldSchoolWithoutCompare: def __init__(self, x, y): self.x = x self.y = y class BarWithoutCompare(object): def __init__(self, x, y): self.x = x self.y = y def __str__(self): return "Bar(%d, %d)" % (self.x, self.y) class NotComparable(object): def __init__(self, data): self.data = data def __hash__(self): return id(self) def __eq__(self, other): return NotImplemented def __ne__(self, other): return NotImplemented class BrokenComparable(object): def __init__(self, data): self.data = data def __hash__(self): return id(self) def __eq__(self, other): raise NotImplementedError def __ne__(self, other): raise NotImplementedError SQLAlchemy-0.8.4/lib/sqlalchemy/testing/plugin/0000755000076500000240000000000012251151573022116 5ustar classicstaff00000000000000SQLAlchemy-0.8.4/lib/sqlalchemy/testing/plugin/__init__.py0000644000076500000240000000000012251147171024214 0ustar classicstaff00000000000000SQLAlchemy-0.8.4/lib/sqlalchemy/testing/plugin/noseplugin.py0000644000076500000240000003513212251150015024645 0ustar classicstaff00000000000000"""Enhance nose with extra options and behaviors for running SQLAlchemy tests. When running ./sqla_nose.py, this module is imported relative to the "plugins" package as a top level package by the sqla_nose.py runner, so that the plugin can be loaded with the rest of nose including the coverage plugin before any of SQLAlchemy itself is imported, so that coverage works. When third party libraries use this plugin, it can be imported normally as "from sqlalchemy.testing.plugin import noseplugin". """ from __future__ import absolute_import import os import ConfigParser from nose.plugins import Plugin from nose import SkipTest import time import sys import re # late imports fixtures = None engines = None exclusions = None warnings = None profiling = None assertions = None requirements = None config = None util = None file_config = None logging = None db = None db_label = None db_url = None db_opts = {} options = None _existing_engine = None def _log(option, opt_str, value, parser): global logging if not logging: import logging logging.basicConfig() if opt_str.endswith('-info'): logging.getLogger(value).setLevel(logging.INFO) elif opt_str.endswith('-debug'): logging.getLogger(value).setLevel(logging.DEBUG) def _list_dbs(*args): print "Available --db options (use --dburi to override)" for macro in sorted(file_config.options('db')): print "%20s\t%s" % (macro, file_config.get('db', macro)) sys.exit(0) def _server_side_cursors(options, opt_str, value, parser): db_opts['server_side_cursors'] = True def _engine_strategy(options, opt_str, value, parser): if value: db_opts['strategy'] = value pre_configure = [] post_configure = [] def pre(fn): pre_configure.append(fn) return fn def post(fn): post_configure.append(fn) return fn @pre def _setup_options(opt, file_config): global options options = opt @pre def _monkeypatch_cdecimal(options, file_config): if options.cdecimal: import cdecimal sys.modules['decimal'] = cdecimal @post def _engine_uri(options, file_config): global db_label, db_url if options.dburi: db_url = options.dburi db_label = db_url[:db_url.index(':')] elif options.db: db_label = options.db db_url = None if db_url is None: if db_label not in file_config.options('db'): raise RuntimeError( "Unknown URI specifier '%s'. Specify --dbs for known uris." % db_label) db_url = file_config.get('db', db_label) @post def _require(options, file_config): if not(options.require or (file_config.has_section('require') and file_config.items('require'))): return try: import pkg_resources except ImportError: raise RuntimeError("setuptools is required for version requirements") cmdline = [] for requirement in options.require: pkg_resources.require(requirement) cmdline.append(re.split('\s*(=)', requirement, 1)[0]) if file_config.has_section('require'): for label, requirement in file_config.items('require'): if not label == db_label or label.startswith('%s.' % db_label): continue seen = [c for c in cmdline if requirement.startswith(c)] if seen: continue pkg_resources.require(requirement) @post def _engine_pool(options, file_config): if options.mockpool: from sqlalchemy import pool db_opts['poolclass'] = pool.AssertionPool @post def _create_testing_engine(options, file_config): from sqlalchemy.testing import engines, config from sqlalchemy import testing global db config.db = testing.db = db = engines.testing_engine(db_url, db_opts) config.db.connect().close() config.db_opts = db_opts config.db_url = db_url @post def _prep_testing_database(options, file_config): from sqlalchemy.testing import engines from sqlalchemy import schema, inspect # also create alt schemas etc. here? if options.dropfirst: e = engines.utf8_engine() inspector = inspect(e) try: view_names = inspector.get_view_names() except NotImplementedError: pass else: for vname in view_names: e.execute(schema._DropView(schema.Table(vname, schema.MetaData()))) try: view_names = inspector.get_view_names(schema="test_schema") except NotImplementedError: pass else: for vname in view_names: e.execute(schema._DropView( schema.Table(vname, schema.MetaData(), schema="test_schema"))) for tname in reversed(inspector.get_table_names(order_by="foreign_key")): e.execute(schema.DropTable(schema.Table(tname, schema.MetaData()))) for tname in reversed(inspector.get_table_names( order_by="foreign_key", schema="test_schema")): e.execute(schema.DropTable( schema.Table(tname, schema.MetaData(), schema="test_schema"))) e.dispose() @post def _set_table_options(options, file_config): from sqlalchemy.testing import schema table_options = schema.table_options for spec in options.tableopts: key, value = spec.split('=') table_options[key] = value if options.mysql_engine: table_options['mysql_engine'] = options.mysql_engine @post def _reverse_topological(options, file_config): if options.reversetop: from sqlalchemy.orm.util import randomize_unitofwork randomize_unitofwork() def _requirements_opt(options, opt_str, value, parser): _setup_requirements(value) @post def _requirements(options, file_config): requirement_cls = file_config.get('sqla_testing', "requirement_cls") _setup_requirements(requirement_cls) def _setup_requirements(argument): from sqlalchemy.testing import config from sqlalchemy import testing if config.requirements is not None: return modname, clsname = argument.split(":") # importlib.import_module() only introduced in 2.7, a little # late mod = __import__(modname) for component in modname.split(".")[1:]: mod = getattr(mod, component) req_cls = getattr(mod, clsname) config.requirements = testing.requires = req_cls(config) @post def _post_setup_options(opt, file_config): from sqlalchemy.testing import config config.options = options config.file_config = file_config @post def _setup_profiling(options, file_config): from sqlalchemy.testing import profiling profiling._profile_stats = profiling.ProfileStatsFile( file_config.get('sqla_testing', 'profile_file')) class NoseSQLAlchemy(Plugin): """ Handles the setup and extra properties required for testing SQLAlchemy """ enabled = True name = 'sqla_testing' score = 100 def options(self, parser, env=os.environ): Plugin.options(self, parser, env) opt = parser.add_option opt("--log-info", action="callback", type="string", callback=_log, help="turn on info logging for (multiple OK)") opt("--log-debug", action="callback", type="string", callback=_log, help="turn on debug logging for (multiple OK)") opt("--require", action="append", dest="require", default=[], help="require a particular driver or module version (multiple OK)") opt("--db", action="store", dest="db", default="default", help="Use prefab database uri") opt('--dbs', action='callback', callback=_list_dbs, help="List available prefab dbs") opt("--dburi", action="store", dest="dburi", help="Database uri (overrides --db)") opt("--dropfirst", action="store_true", dest="dropfirst", help="Drop all tables in the target database first") opt("--mockpool", action="store_true", dest="mockpool", help="Use mock pool (asserts only one connection used)") opt("--low-connections", action="store_true", dest="low_connections", help="Use a low number of distinct connections - i.e. for Oracle TNS" ) opt("--enginestrategy", action="callback", type="string", callback=_engine_strategy, help="Engine strategy (plain or threadlocal, defaults to plain)") opt("--reversetop", action="store_true", dest="reversetop", default=False, help="Use a random-ordering set implementation in the ORM (helps " "reveal dependency issues)") opt("--requirements", action="callback", type="string", callback=_requirements_opt, help="requirements class for testing, overrides setup.cfg") opt("--with-cdecimal", action="store_true", dest="cdecimal", default=False, help="Monkeypatch the cdecimal library into Python 'decimal' for all tests") opt("--unhashable", action="store_true", dest="unhashable", default=False, help="Disallow SQLAlchemy from performing a hash() on mapped test objects.") opt("--noncomparable", action="store_true", dest="noncomparable", default=False, help="Disallow SQLAlchemy from performing == on mapped test objects.") opt("--truthless", action="store_true", dest="truthless", default=False, help="Disallow SQLAlchemy from truth-evaluating mapped test objects.") opt("--serverside", action="callback", callback=_server_side_cursors, help="Turn on server side cursors for PG") opt("--mysql-engine", action="store", dest="mysql_engine", default=None, help="Use the specified MySQL storage engine for all tables, default is " "a db-default/InnoDB combo.") opt("--table-option", action="append", dest="tableopts", default=[], help="Add a dialect-specific table option, key=value") opt("--write-profiles", action="store_true", dest="write_profiles", default=False, help="Write/update profiling data.") global file_config file_config = ConfigParser.ConfigParser() file_config.read(['setup.cfg', 'test.cfg']) def configure(self, options, conf): Plugin.configure(self, options, conf) self.options = options for fn in pre_configure: fn(self.options, file_config) def begin(self): # Lazy setup of other options (post coverage) for fn in post_configure: fn(self.options, file_config) # late imports, has to happen after config as well # as nose plugins like coverage global util, fixtures, engines, exclusions, \ assertions, warnings, profiling,\ config from sqlalchemy.testing import fixtures, engines, exclusions, \ assertions, warnings, profiling, config from sqlalchemy import util def describeTest(self, test): return "" def wantFunction(self, fn): if fn.__module__.startswith('sqlalchemy.testing'): return False def wantClass(self, cls): """Return true if you want the main test selector to collect tests from this class, false if you don't, and None if you don't care. :Parameters: cls : class The class being examined by the selector """ if not issubclass(cls, fixtures.TestBase): return False elif cls.__name__.startswith('_'): return False else: return True def _do_skips(self, cls): from sqlalchemy.testing import config if hasattr(cls, '__requires__'): def test_suite(): return 'ok' test_suite.__name__ = cls.__name__ for requirement in cls.__requires__: check = getattr(config.requirements, requirement) if not check.enabled: raise SkipTest( check.reason if check.reason else ( "'%s' unsupported on DB implementation '%s'" % ( cls.__name__, config.db.name ) ) ) if cls.__unsupported_on__: spec = exclusions.db_spec(*cls.__unsupported_on__) if spec(config.db): raise SkipTest( "'%s' unsupported on DB implementation '%s'" % ( cls.__name__, config.db.name) ) if getattr(cls, '__only_on__', None): spec = exclusions.db_spec(*util.to_list(cls.__only_on__)) if not spec(config.db): raise SkipTest( "'%s' unsupported on DB implementation '%s'" % ( cls.__name__, config.db.name) ) if getattr(cls, '__skip_if__', False): for c in getattr(cls, '__skip_if__'): if c(): raise SkipTest("'%s' skipped by %s" % ( cls.__name__, c.__name__) ) for db, op, spec in getattr(cls, '__excluded_on__', ()): exclusions.exclude(db, op, spec, "'%s' unsupported on DB %s version %s" % ( cls.__name__, config.db.name, exclusions._server_version(config.db))) def beforeTest(self, test): warnings.resetwarnings() profiling._current_test = test.id() def afterTest(self, test): engines.testing_reaper._after_test_ctx() warnings.resetwarnings() def _setup_engine(self, ctx): if getattr(ctx, '__engine_options__', None): global _existing_engine _existing_engine = config.db config.db = engines.testing_engine(options=ctx.__engine_options__) def _restore_engine(self, ctx): global _existing_engine if _existing_engine is not None: config.db = _existing_engine _existing_engine = None def startContext(self, ctx): if not isinstance(ctx, type) \ or not issubclass(ctx, fixtures.TestBase): return self._do_skips(ctx) self._setup_engine(ctx) def stopContext(self, ctx): if not isinstance(ctx, type) \ or not issubclass(ctx, fixtures.TestBase): return engines.testing_reaper._stop_test_ctx() if not options.low_connections: assertions.global_cleanup_assertions() self._restore_engine(ctx) SQLAlchemy-0.8.4/lib/sqlalchemy/testing/profiling.py0000644000076500000240000002300312251150015023147 0ustar classicstaff00000000000000"""Profiling support for unit and performance tests. These are special purpose profiling methods which operate in a more fine-grained way than nose's profiling plugin. """ import os import sys from .util import gc_collect, decorator from . import config from nose import SkipTest import pstats import time import collections from .. import util try: import cProfile except ImportError: cProfile = None from ..util import jython, pypy, win32, update_wrapper _current_test = None def profiled(target=None, **target_opts): """Function profiling. @profiled() or @profiled(report=True, sort=('calls',), limit=20) Outputs profiling info for a decorated function. """ profile_config = {'targets': set(), 'report': True, 'print_callers': False, 'print_callees': False, 'graphic': False, 'sort': ('time', 'calls'), 'limit': None} if target is None: target = 'anonymous_target' filename = "%s.prof" % target @decorator def decorate(fn, *args, **kw): elapsed, load_stats, result = _profile( filename, fn, *args, **kw) graphic = target_opts.get('graphic', profile_config['graphic']) if graphic: os.system("runsnake %s" % filename) else: report = target_opts.get('report', profile_config['report']) if report: sort_ = target_opts.get('sort', profile_config['sort']) limit = target_opts.get('limit', profile_config['limit']) print ("Profile report for target '%s' (%s)" % ( target, filename) ) stats = load_stats() stats.sort_stats(*sort_) if limit: stats.print_stats(limit) else: stats.print_stats() print_callers = target_opts.get( 'print_callers', profile_config['print_callers']) if print_callers: stats.print_callers() print_callees = target_opts.get( 'print_callees', profile_config['print_callees']) if print_callees: stats.print_callees() os.unlink(filename) return result return decorate class ProfileStatsFile(object): """"Store per-platform/fn profiling results in a file. We're still targeting Py2.5, 2.4 on 0.7 with no dependencies, so no json lib :( need to roll something silly """ def __init__(self, filename): self.write = ( config.options is not None and config.options.write_profiles ) self.fname = os.path.abspath(filename) self.short_fname = os.path.split(self.fname)[-1] self.data = collections.defaultdict( lambda: collections.defaultdict(dict)) self._read() if self.write: # rewrite for the case where features changed, # etc. self._write() @util.memoized_property def platform_key(self): dbapi_key = config.db.name + "_" + config.db.driver # keep it at 2.7, 3.1, 3.2, etc. for now. py_version = '.'.join([str(v) for v in sys.version_info[0:2]]) platform_tokens = [py_version] platform_tokens.append(dbapi_key) if jython: platform_tokens.append("jython") if pypy: platform_tokens.append("pypy") if win32: platform_tokens.append("win") _has_cext = config.requirements._has_cextensions() platform_tokens.append(_has_cext and "cextensions" or "nocextensions") return "_".join(platform_tokens) def has_stats(self): test_key = _current_test return ( test_key in self.data and self.platform_key in self.data[test_key] ) def result(self, callcount): test_key = _current_test per_fn = self.data[test_key] per_platform = per_fn[self.platform_key] if 'counts' not in per_platform: per_platform['counts'] = counts = [] else: counts = per_platform['counts'] if 'current_count' not in per_platform: per_platform['current_count'] = current_count = 0 else: current_count = per_platform['current_count'] has_count = len(counts) > current_count if not has_count: counts.append(callcount) if self.write: self._write() result = None else: result = per_platform['lineno'], counts[current_count] per_platform['current_count'] += 1 return result def _header(self): return \ "# %s\n"\ "# This file is written out on a per-environment basis.\n"\ "# For each test in aaa_profiling, the corresponding function and \n"\ "# environment is located within this file. If it doesn't exist,\n"\ "# the test is skipped.\n"\ "# If a callcount does exist, it is compared to what we received. \n"\ "# assertions are raised if the counts do not match.\n"\ "# \n"\ "# To add a new callcount test, apply the function_call_count \n"\ "# decorator and re-run the tests using the --write-profiles \n"\ "# option - this file will be rewritten including the new count.\n"\ "# \n"\ "" % (self.fname) def _read(self): try: profile_f = open(self.fname) except IOError: return for lineno, line in enumerate(profile_f): line = line.strip() if not line or line.startswith("#"): continue test_key, platform_key, counts = line.split() per_fn = self.data[test_key] per_platform = per_fn[platform_key] c = [int(count) for count in counts.split(",")] per_platform['counts'] = c per_platform['lineno'] = lineno + 1 per_platform['current_count'] = 0 profile_f.close() def _write(self): print("Writing profile file %s" % self.fname) profile_f = open(self.fname, "w") profile_f.write(self._header()) for test_key in sorted(self.data): per_fn = self.data[test_key] profile_f.write("\n# TEST: %s\n\n" % test_key) for platform_key in sorted(per_fn): per_platform = per_fn[platform_key] c = ",".join(str(count) for count in per_platform['counts']) profile_f.write("%s %s %s\n" % (test_key, platform_key, c)) profile_f.close() def function_call_count(variance=0.05): """Assert a target for a test case's function call count. The main purpose of this assertion is to detect changes in callcounts for various functions - the actual number is not as important. Callcounts are stored in a file keyed to Python version and OS platform information. This file is generated automatically for new tests, and versioned so that unexpected changes in callcounts will be detected. """ def decorate(fn): def wrap(*args, **kw): if cProfile is None: raise SkipTest("cProfile is not installed") if not _profile_stats.has_stats() and not _profile_stats.write: # run the function anyway, to support dependent tests # (not a great idea but we have these in test_zoomark) fn(*args, **kw) raise SkipTest("No profiling stats available on this " "platform for this function. Run tests with " "--write-profiles to add statistics to %s for " "this platform." % _profile_stats.short_fname) gc_collect() timespent, load_stats, fn_result = _profile( fn, *args, **kw ) stats = load_stats() callcount = stats.total_calls expected = _profile_stats.result(callcount) if expected is None: expected_count = None else: line_no, expected_count = expected print("Pstats calls: %d Expected %s" % ( callcount, expected_count ) ) stats.print_stats() #stats.print_callers() if expected_count: deviance = int(callcount * variance) if abs(callcount - expected_count) > deviance: raise AssertionError( "Adjusted function call count %s not within %s%% " "of expected %s. (Delete line %d of file %s to " "regenerate this callcount, when tests are run " "with --write-profiles.)" % ( callcount, (variance * 100), expected_count, line_no, _profile_stats.fname)) return fn_result return update_wrapper(wrap, fn) return decorate def _profile(fn, *args, **kw): filename = "%s.prof" % fn.__name__ def load_stats(): st = pstats.Stats(filename) os.unlink(filename) return st began = time.time() cProfile.runctx('result = fn(*args, **kw)', globals(), locals(), filename=filename) ended = time.time() return ended - began, load_stats, locals()['result'] SQLAlchemy-0.8.4/lib/sqlalchemy/testing/requirements.py0000644000076500000240000003007312251150015023706 0ustar classicstaff00000000000000"""Global database feature support policy. Provides decorators to mark tests requiring specific feature support from the target database. External dialect test suites should subclass SuiteRequirements to provide specific inclusion/exlusions. """ from . import exclusions, config class Requirements(object): def __init__(self, config): self.config = config @property def db(self): return config.db class SuiteRequirements(Requirements): @property def create_table(self): """target platform can emit basic CreateTable DDL.""" return exclusions.open() @property def drop_table(self): """target platform can emit basic DropTable DDL.""" return exclusions.open() @property def foreign_keys(self): """Target database must support foreign keys.""" return exclusions.open() @property def on_update_cascade(self): """"target database must support ON UPDATE..CASCADE behavior in foreign keys.""" return exclusions.open() @property def deferrable_fks(self): return exclusions.closed() @property def on_update_or_deferrable_fks(self): # TODO: exclusions should be composable, # somehow only_if([x, y]) isn't working here, negation/conjunctions # getting confused. return exclusions.only_if( lambda: self.on_update_cascade.enabled or self.deferrable_fks.enabled ) @property def self_referential_foreign_keys(self): """Target database must support self-referential foreign keys.""" return exclusions.open() @property def foreign_key_ddl(self): """Target database must support the DDL phrases for FOREIGN KEY.""" return exclusions.open() @property def named_constraints(self): """target database must support names for constraints.""" return exclusions.open() @property def subqueries(self): """Target database must support subqueries.""" return exclusions.open() @property def offset(self): """target database can render OFFSET, or an equivalent, in a SELECT.""" return exclusions.open() @property def boolean_col_expressions(self): """Target database must support boolean expressions as columns""" return exclusions.closed() @property def nullsordering(self): """Target backends that support nulls ordering.""" return exclusions.closed() @property def standalone_binds(self): """target database/driver supports bound parameters as column expressions without being in the context of a typed column. """ return exclusions.closed() @property def intersect(self): """Target database must support INTERSECT or equivalent.""" return exclusions.closed() @property def except_(self): """Target database must support EXCEPT or equivalent (i.e. MINUS).""" return exclusions.closed() @property def window_functions(self): """Target database must support window functions.""" return exclusions.closed() @property def autoincrement_insert(self): """target platform generates new surrogate integer primary key values when insert() is executed, excluding the pk column.""" return exclusions.open() @property def empty_inserts(self): """target platform supports INSERT with no values, i.e. INSERT DEFAULT VALUES or equivalent.""" return exclusions.only_if( lambda: self.config.db.dialect.supports_empty_insert or \ self.config.db.dialect.supports_default_values, "empty inserts not supported" ) @property def insert_from_select(self): """target platform supports INSERT from a SELECT.""" return exclusions.open() @property def returning(self): """target platform supports RETURNING.""" return exclusions.only_if( lambda: self.config.db.dialect.implicit_returning, "'returning' not supported by database" ) @property def denormalized_names(self): """Target database must have 'denormalized', i.e. UPPERCASE as case insensitive names.""" return exclusions.skip_if( lambda: not self.db.dialect.requires_name_normalize, "Backend does not require denormalized names." ) @property def multivalues_inserts(self): """target database must support multiple VALUES clauses in an INSERT statement.""" return exclusions.skip_if( lambda: not self.db.dialect.supports_multivalues_insert, "Backend does not support multirow inserts." ) @property def implements_get_lastrowid(self): """"target dialect implements the executioncontext.get_lastrowid() method without reliance on RETURNING. """ return exclusions.open() @property def emulated_lastrowid(self): """"target dialect retrieves cursor.lastrowid, or fetches from a database-side function after an insert() construct executes, within the get_lastrowid() method. Only dialects that "pre-execute", or need RETURNING to get last inserted id, would return closed/fail/skip for this. """ return exclusions.closed() @property def dbapi_lastrowid(self): """"target platform includes a 'lastrowid' accessor on the DBAPI cursor object. """ return exclusions.closed() @property def views(self): """Target database must support VIEWs.""" return exclusions.closed() @property def schemas(self): """Target database must support external schemas, and have one named 'test_schema'.""" return exclusions.closed() @property def sequences(self): """Target database must support SEQUENCEs.""" return exclusions.only_if([ lambda: self.config.db.dialect.supports_sequences ], "no sequence support") @property def sequences_optional(self): """Target database supports sequences, but also optionally as a means of generating new PK values.""" return exclusions.only_if([ lambda: self.config.db.dialect.supports_sequences and \ self.config.db.dialect.sequences_optional ], "no sequence support, or sequences not optional") @property def reflects_pk_names(self): return exclusions.closed() @property def table_reflection(self): return exclusions.open() @property def view_reflection(self): return self.views @property def schema_reflection(self): return self.schemas @property def primary_key_constraint_reflection(self): return exclusions.open() @property def foreign_key_constraint_reflection(self): return exclusions.open() @property def index_reflection(self): return exclusions.open() @property def unique_constraint_reflection(self): """target dialect supports reflection of unique constraints""" return exclusions.open() @property def unbounded_varchar(self): """Target database must support VARCHAR with no length""" return exclusions.open() @property def unicode_data(self): """Target database/dialect must support Python unicode objects with non-ASCII characters represented, delivered as bound parameters as well as in result rows. """ return exclusions.open() @property def unicode_ddl(self): """Target driver must support some degree of non-ascii symbol names.""" return exclusions.closed() @property def datetime(self): """target dialect supports representation of Python datetime.datetime() objects.""" return exclusions.open() @property def datetime_microseconds(self): """target dialect supports representation of Python datetime.datetime() with microsecond objects.""" return exclusions.open() @property def datetime_historic(self): """target dialect supports representation of Python datetime.datetime() objects with historic (pre 1970) values.""" return exclusions.closed() @property def date(self): """target dialect supports representation of Python datetime.date() objects.""" return exclusions.open() @property def date_historic(self): """target dialect supports representation of Python datetime.datetime() objects with historic (pre 1970) values.""" return exclusions.closed() @property def time(self): """target dialect supports representation of Python datetime.time() objects.""" return exclusions.open() @property def time_microseconds(self): """target dialect supports representation of Python datetime.time() with microsecond objects.""" return exclusions.open() @property def precision_numerics_general(self): """target backend has general support for moderately high-precision numerics.""" return exclusions.open() @property def precision_numerics_enotation_small(self): """target backend supports Decimal() objects using E notation to represent very small values.""" return exclusions.closed() @property def precision_numerics_enotation_large(self): """target backend supports Decimal() objects using E notation to represent very large values.""" return exclusions.closed() @property def precision_numerics_many_significant_digits(self): """target backend supports values with many digits on both sides, such as 319438950232418390.273596, 87673.594069654243 """ return exclusions.closed() @property def precision_numerics_retains_significant_digits(self): """A precision numeric type will return empty significant digits, i.e. a value such as 10.000 will come back in Decimal form with the .000 maintained.""" return exclusions.closed() @property def text_type(self): """Target database must support an unbounded Text() " "type such as TEXT or CLOB""" return exclusions.open() @property def empty_strings_varchar(self): """target database can persist/return an empty string with a varchar. """ return exclusions.open() @property def empty_strings_text(self): """target database can persist/return an empty string with an unbounded text.""" return exclusions.open() @property def update_from(self): """Target must support UPDATE..FROM syntax""" return exclusions.closed() @property def update_where_target_in_subquery(self): """Target must support UPDATE where the same table is present in a subquery in the WHERE clause. This is an ANSI-standard syntax that apparently MySQL can't handle, such as: UPDATE documents SET flag=1 WHERE documents.title IN (SELECT max(documents.title) AS title FROM documents GROUP BY documents.user_id ) """ return exclusions.open() @property def mod_operator_as_percent_sign(self): """target database must use a plain percent '%' as the 'modulus' operator.""" return exclusions.closed() @property def unicode_connections(self): """Target driver must support non-ASCII characters being passed at all.""" return exclusions.open() @property def skip_mysql_on_windows(self): """Catchall for a large variety of MySQL on Windows failures""" return exclusions.open() def _has_mysql_on_windows(self): return False def _has_mysql_fully_case_sensitive(self): return False SQLAlchemy-0.8.4/lib/sqlalchemy/testing/runner.py0000644000076500000240000000255312251150015022476 0ustar classicstaff00000000000000#!/usr/bin/env python """ Nose test runner module. This script is a front-end to "nosetests" which installs SQLAlchemy's testing plugin into the local environment. The script is intended to be used by third-party dialects and extensions that run within SQLAlchemy's testing framework. The runner can be invoked via:: python -m sqlalchemy.testing.runner The script is then essentially the same as the "nosetests" script, including all of the usual Nose options. The test environment requires that a setup.cfg is locally present including various required options. Note that when using this runner, Nose's "coverage" plugin will not be able to provide coverage for SQLAlchemy itself, since SQLAlchemy is imported into sys.modules before coverage is started. The special script sqla_nose.py is provided as a top-level script which loads the plugin in a special (somewhat hacky) way so that coverage against SQLAlchemy itself is possible. """ from sqlalchemy.testing.plugin.noseplugin import NoseSQLAlchemy import nose def main(): nose.main(addplugins=[NoseSQLAlchemy()]) def setup_py_test(): """Runner to use for the 'test_suite' entry of your setup.py. Prevents any name clash shenanigans from the command line argument "test" that the "setup.py test" command sends to nose. """ nose.main(addplugins=[NoseSQLAlchemy()], argv=['runner']) SQLAlchemy-0.8.4/lib/sqlalchemy/testing/schema.py0000644000076500000240000000611412251150015022422 0ustar classicstaff00000000000000 from . import exclusions from .. import schema, event from . import config __all__ = 'Table', 'Column', table_options = {} def Table(*args, **kw): """A schema.Table wrapper/hook for dialect-specific tweaks.""" test_opts = dict([(k, kw.pop(k)) for k in kw.keys() if k.startswith('test_')]) kw.update(table_options) if exclusions.against('mysql'): if 'mysql_engine' not in kw and 'mysql_type' not in kw: if 'test_needs_fk' in test_opts or 'test_needs_acid' in test_opts: kw['mysql_engine'] = 'InnoDB' else: kw['mysql_engine'] = 'MyISAM' # Apply some default cascading rules for self-referential foreign keys. # MySQL InnoDB has some issues around seleting self-refs too. if exclusions.against('firebird'): table_name = args[0] unpack = (config.db.dialect. identifier_preparer.unformat_identifiers) # Only going after ForeignKeys in Columns. May need to # expand to ForeignKeyConstraint too. fks = [fk for col in args if isinstance(col, schema.Column) for fk in col.foreign_keys] for fk in fks: # root around in raw spec ref = fk._colspec if isinstance(ref, schema.Column): name = ref.table.name else: # take just the table name: on FB there cannot be # a schema, so the first element is always the # table name, possibly followed by the field name name = unpack(ref)[0] if name == table_name: if fk.ondelete is None: fk.ondelete = 'CASCADE' if fk.onupdate is None: fk.onupdate = 'CASCADE' return schema.Table(*args, **kw) def Column(*args, **kw): """A schema.Column wrapper/hook for dialect-specific tweaks.""" test_opts = dict([(k, kw.pop(k)) for k in kw.keys() if k.startswith('test_')]) if not config.requirements.foreign_key_ddl.enabled: args = [arg for arg in args if not isinstance(arg, schema.ForeignKey)] col = schema.Column(*args, **kw) if 'test_needs_autoincrement' in test_opts and \ kw.get('primary_key', False): # allow any test suite to pick up on this col.info['test_needs_autoincrement'] = True # hardcoded rule for firebird, oracle; this should # be moved out if exclusions.against('firebird', 'oracle'): def add_seq(c, tbl): c._init_items( schema.Sequence(_truncate_name( config.db.dialect, tbl.name + '_' + c.name + '_seq'), optional=True) ) event.listen(col, 'after_parent_attach', add_seq, propagate=True) return col def _truncate_name(dialect, name): if len(name) > dialect.max_identifier_length: return name[0:max(dialect.max_identifier_length - 6, 0)] + \ "_" + hex(hash(name) % 64)[2:] else: return name SQLAlchemy-0.8.4/lib/sqlalchemy/testing/suite/0000755000076500000240000000000012251151573021751 5ustar classicstaff00000000000000SQLAlchemy-0.8.4/lib/sqlalchemy/testing/suite/__init__.py0000644000076500000240000000056012251150015024051 0ustar classicstaff00000000000000 from sqlalchemy.testing.suite.test_ddl import * from sqlalchemy.testing.suite.test_insert import * from sqlalchemy.testing.suite.test_sequence import * from sqlalchemy.testing.suite.test_results import * from sqlalchemy.testing.suite.test_update_delete import * from sqlalchemy.testing.suite.test_reflection import * from sqlalchemy.testing.suite.test_types import * SQLAlchemy-0.8.4/lib/sqlalchemy/testing/suite/test_ddl.py0000644000076500000240000000240712251150015024116 0ustar classicstaff00000000000000from __future__ import with_statement from .. import fixtures, config, util from ..config import requirements from ..assertions import eq_ from sqlalchemy import Table, Column, Integer, String class TableDDLTest(fixtures.TestBase): def _simple_fixture(self): return Table('test_table', self.metadata, Column('id', Integer, primary_key=True, autoincrement=False), Column('data', String(50)) ) def _simple_roundtrip(self, table): with config.db.begin() as conn: conn.execute(table.insert().values((1, 'some data'))) result = conn.execute(table.select()) eq_( result.first(), (1, 'some data') ) @requirements.create_table @util.provide_metadata def test_create_table(self): table = self._simple_fixture() table.create( config.db, checkfirst=False ) self._simple_roundtrip(table) @requirements.drop_table @util.provide_metadata def test_drop_table(self): table = self._simple_fixture() table.create( config.db, checkfirst=False ) table.drop( config.db, checkfirst=False ) __all__ = ('TableDDLTest', ) SQLAlchemy-0.8.4/lib/sqlalchemy/testing/suite/test_insert.py0000644000076500000240000001363512251150015024664 0ustar classicstaff00000000000000from .. import fixtures, config from ..config import requirements from .. import exclusions from ..assertions import eq_ from .. import engines from sqlalchemy import Integer, String, select, util from ..schema import Table, Column class LastrowidTest(fixtures.TablesTest): run_deletes = 'each' __requires__ = 'implements_get_lastrowid', 'autoincrement_insert' __engine_options__ = {"implicit_returning": False} @classmethod def define_tables(cls, metadata): Table('autoinc_pk', metadata, Column('id', Integer, primary_key=True, test_needs_autoincrement=True), Column('data', String(50)) ) Table('manual_pk', metadata, Column('id', Integer, primary_key=True, autoincrement=False), Column('data', String(50)) ) def _assert_round_trip(self, table, conn): row = conn.execute(table.select()).first() eq_( row, (config.db.dialect.default_sequence_base, "some data") ) def test_autoincrement_on_insert(self): config.db.execute( self.tables.autoinc_pk.insert(), data="some data" ) self._assert_round_trip(self.tables.autoinc_pk, config.db) def test_last_inserted_id(self): r = config.db.execute( self.tables.autoinc_pk.insert(), data="some data" ) pk = config.db.scalar(select([self.tables.autoinc_pk.c.id])) eq_( r.inserted_primary_key, [pk] ) @exclusions.fails_if(lambda: util.pypy, "lastrowid not maintained after " "connection close") @requirements.dbapi_lastrowid def test_native_lastrowid_autoinc(self): r = config.db.execute( self.tables.autoinc_pk.insert(), data="some data" ) lastrowid = r.lastrowid pk = config.db.scalar(select([self.tables.autoinc_pk.c.id])) eq_( lastrowid, pk ) class InsertBehaviorTest(fixtures.TablesTest): run_deletes = 'each' @classmethod def define_tables(cls, metadata): Table('autoinc_pk', metadata, Column('id', Integer, primary_key=True, \ test_needs_autoincrement=True), Column('data', String(50)) ) def test_autoclose_on_insert(self): if requirements.returning.enabled: engine = engines.testing_engine( options={'implicit_returning': False}) else: engine = config.db r = engine.execute( self.tables.autoinc_pk.insert(), data="some data" ) assert r.closed assert r.is_insert assert not r.returns_rows @requirements.returning def test_autoclose_on_insert_implicit_returning(self): r = config.db.execute( self.tables.autoinc_pk.insert(), data="some data" ) assert r.closed assert r.is_insert assert not r.returns_rows @requirements.empty_inserts def test_empty_insert(self): r = config.db.execute( self.tables.autoinc_pk.insert(), ) assert r.closed r = config.db.execute( self.tables.autoinc_pk.select().\ where(self.tables.autoinc_pk.c.id != None) ) assert len(r.fetchall()) @requirements.insert_from_select def test_insert_from_select(self): table = self.tables.autoinc_pk config.db.execute( table.insert(), [ dict(data="data1"), dict(data="data2"), dict(data="data3"), ] ) config.db.execute( table.insert(inline=True). from_select( ("id", "data",), select([table.c.id + 5, table.c.data]).where( table.c.data.in_(["data2", "data3"])) ), ) eq_( config.db.execute( select([table.c.data]).order_by(table.c.data) ).fetchall(), [("data1", ), ("data2", ), ("data2", ), ("data3", ), ("data3", )] ) class ReturningTest(fixtures.TablesTest): run_deletes = 'each' __requires__ = 'returning', 'autoincrement_insert' __engine_options__ = {"implicit_returning": True} def _assert_round_trip(self, table, conn): row = conn.execute(table.select()).first() eq_( row, (config.db.dialect.default_sequence_base, "some data") ) @classmethod def define_tables(cls, metadata): Table('autoinc_pk', metadata, Column('id', Integer, primary_key=True, \ test_needs_autoincrement=True), Column('data', String(50)) ) def test_explicit_returning_pk(self): engine = config.db table = self.tables.autoinc_pk r = engine.execute( table.insert().returning( table.c.id), data="some data" ) pk = r.first()[0] fetched_pk = config.db.scalar(select([table.c.id])) eq_(fetched_pk, pk) def test_autoincrement_on_insert_implcit_returning(self): config.db.execute( self.tables.autoinc_pk.insert(), data="some data" ) self._assert_round_trip(self.tables.autoinc_pk, config.db) def test_last_inserted_id_implicit_returning(self): r = config.db.execute( self.tables.autoinc_pk.insert(), data="some data" ) pk = config.db.scalar(select([self.tables.autoinc_pk.c.id])) eq_( r.inserted_primary_key, [pk] ) __all__ = ('LastrowidTest', 'InsertBehaviorTest', 'ReturningTest') SQLAlchemy-0.8.4/lib/sqlalchemy/testing/suite/test_reflection.py0000644000076500000240000004210012251150015025477 0ustar classicstaff00000000000000from __future__ import with_statement import sqlalchemy as sa from sqlalchemy import exc as sa_exc from sqlalchemy import types as sql_types from sqlalchemy import inspect from sqlalchemy import MetaData, Integer, String from sqlalchemy.engine.reflection import Inspector from sqlalchemy.testing import engines, fixtures from sqlalchemy.testing.schema import Table, Column from sqlalchemy.testing import eq_, assert_raises_message from sqlalchemy import testing from .. import config import operator from sqlalchemy.schema import DDL, Index from sqlalchemy import event metadata, users = None, None class HasTableTest(fixtures.TablesTest): @classmethod def define_tables(cls, metadata): Table('test_table', metadata, Column('id', Integer, primary_key=True), Column('data', String(50)) ) def test_has_table(self): with config.db.begin() as conn: assert config.db.dialect.has_table(conn, "test_table") assert not config.db.dialect.has_table(conn, "nonexistent_table") class ComponentReflectionTest(fixtures.TablesTest): run_inserts = run_deletes = None @classmethod def define_tables(cls, metadata): cls.define_reflected_tables(metadata, None) if testing.requires.schemas.enabled: cls.define_reflected_tables(metadata, "test_schema") @classmethod def define_reflected_tables(cls, metadata, schema): if schema: schema_prefix = schema + "." else: schema_prefix = "" if testing.requires.self_referential_foreign_keys.enabled: users = Table('users', metadata, Column('user_id', sa.INT, primary_key=True), Column('test1', sa.CHAR(5), nullable=False), Column('test2', sa.Float(5), nullable=False), Column('parent_user_id', sa.Integer, sa.ForeignKey('%susers.user_id' % schema_prefix)), schema=schema, test_needs_fk=True, ) else: users = Table('users', metadata, Column('user_id', sa.INT, primary_key=True), Column('test1', sa.CHAR(5), nullable=False), Column('test2', sa.Float(5), nullable=False), schema=schema, test_needs_fk=True, ) Table("dingalings", metadata, Column('dingaling_id', sa.Integer, primary_key=True), Column('address_id', sa.Integer, sa.ForeignKey('%semail_addresses.address_id' % schema_prefix)), Column('data', sa.String(30)), schema=schema, test_needs_fk=True, ) Table('email_addresses', metadata, Column('address_id', sa.Integer), Column('remote_user_id', sa.Integer, sa.ForeignKey(users.c.user_id)), Column('email_address', sa.String(20)), sa.PrimaryKeyConstraint('address_id', name='email_ad_pk'), schema=schema, test_needs_fk=True, ) if testing.requires.index_reflection.enabled: cls.define_index(metadata, users) if testing.requires.view_reflection.enabled: cls.define_views(metadata, schema) @classmethod def define_index(cls, metadata, users): Index("users_t_idx", users.c.test1, users.c.test2) Index("users_all_idx", users.c.user_id, users.c.test2, users.c.test1) @classmethod def define_views(cls, metadata, schema): for table_name in ('users', 'email_addresses'): fullname = table_name if schema: fullname = "%s.%s" % (schema, table_name) view_name = fullname + '_v' query = "CREATE VIEW %s AS SELECT * FROM %s" % ( view_name, fullname) event.listen( metadata, "after_create", DDL(query) ) event.listen( metadata, "before_drop", DDL("DROP VIEW %s" % view_name) ) @testing.requires.schema_reflection def test_get_schema_names(self): insp = inspect(testing.db) self.assert_('test_schema' in insp.get_schema_names()) @testing.requires.schema_reflection def test_dialect_initialize(self): engine = engines.testing_engine() assert not hasattr(engine.dialect, 'default_schema_name') inspect(engine) assert hasattr(engine.dialect, 'default_schema_name') @testing.requires.schema_reflection def test_get_default_schema_name(self): insp = inspect(testing.db) eq_(insp.default_schema_name, testing.db.dialect.default_schema_name) @testing.provide_metadata def _test_get_table_names(self, schema=None, table_type='table', order_by=None): meta = self.metadata users, addresses, dingalings = self.tables.users, \ self.tables.email_addresses, self.tables.dingalings insp = inspect(meta.bind) if table_type == 'view': table_names = insp.get_view_names(schema) table_names.sort() answer = ['email_addresses_v', 'users_v'] else: table_names = insp.get_table_names(schema, order_by=order_by) if order_by == 'foreign_key': answer = ['users', 'email_addresses', 'dingalings'] eq_(table_names, answer) else: answer = ['dingalings', 'email_addresses', 'users'] eq_(sorted(table_names), answer) @testing.requires.table_reflection def test_get_table_names(self): self._test_get_table_names() @testing.requires.table_reflection @testing.requires.foreign_key_constraint_reflection def test_get_table_names_fks(self): self._test_get_table_names(order_by='foreign_key') @testing.requires.table_reflection @testing.requires.schemas def test_get_table_names_with_schema(self): self._test_get_table_names('test_schema') @testing.requires.view_reflection def test_get_view_names(self): self._test_get_table_names(table_type='view') @testing.requires.view_reflection @testing.requires.schemas def test_get_view_names_with_schema(self): self._test_get_table_names('test_schema', table_type='view') def _test_get_columns(self, schema=None, table_type='table'): meta = MetaData(testing.db) users, addresses, dingalings = self.tables.users, \ self.tables.email_addresses, self.tables.dingalings table_names = ['users', 'email_addresses'] if table_type == 'view': table_names = ['users_v', 'email_addresses_v'] insp = inspect(meta.bind) for table_name, table in zip(table_names, (users, addresses)): schema_name = schema cols = insp.get_columns(table_name, schema=schema_name) self.assert_(len(cols) > 0, len(cols)) # should be in order for i, col in enumerate(table.columns): eq_(col.name, cols[i]['name']) ctype = cols[i]['type'].__class__ ctype_def = col.type if isinstance(ctype_def, sa.types.TypeEngine): ctype_def = ctype_def.__class__ # Oracle returns Date for DateTime. if testing.against('oracle') and ctype_def \ in (sql_types.Date, sql_types.DateTime): ctype_def = sql_types.Date # assert that the desired type and return type share # a base within one of the generic types. self.assert_(len(set(ctype.__mro__). intersection(ctype_def.__mro__).intersection([ sql_types.Integer, sql_types.Numeric, sql_types.DateTime, sql_types.Date, sql_types.Time, sql_types.String, sql_types._Binary, ])) > 0, '%s(%s), %s(%s)' % (col.name, col.type, cols[i]['name'], ctype)) if not col.primary_key: assert cols[i]['default'] is None @testing.requires.table_reflection def test_get_columns(self): self._test_get_columns() @testing.requires.table_reflection @testing.requires.schemas def test_get_columns_with_schema(self): self._test_get_columns(schema='test_schema') @testing.requires.view_reflection def test_get_view_columns(self): self._test_get_columns(table_type='view') @testing.requires.view_reflection @testing.requires.schemas def test_get_view_columns_with_schema(self): self._test_get_columns(schema='test_schema', table_type='view') @testing.provide_metadata def _test_get_pk_constraint(self, schema=None): meta = self.metadata users, addresses = self.tables.users, self.tables.email_addresses insp = inspect(meta.bind) users_cons = insp.get_pk_constraint(users.name, schema=schema) users_pkeys = users_cons['constrained_columns'] eq_(users_pkeys, ['user_id']) addr_cons = insp.get_pk_constraint(addresses.name, schema=schema) addr_pkeys = addr_cons['constrained_columns'] eq_(addr_pkeys, ['address_id']) with testing.requires.reflects_pk_names.fail_if(): eq_(addr_cons['name'], 'email_ad_pk') @testing.requires.primary_key_constraint_reflection def test_get_pk_constraint(self): self._test_get_pk_constraint() @testing.requires.table_reflection @testing.requires.primary_key_constraint_reflection @testing.requires.schemas def test_get_pk_constraint_with_schema(self): self._test_get_pk_constraint(schema='test_schema') @testing.requires.table_reflection @testing.provide_metadata def test_deprecated_get_primary_keys(self): meta = self.metadata users = self.tables.users insp = Inspector(meta.bind) assert_raises_message( sa_exc.SADeprecationWarning, "Call to deprecated method get_primary_keys." " Use get_pk_constraint instead.", insp.get_primary_keys, users.name ) @testing.provide_metadata def _test_get_foreign_keys(self, schema=None): meta = self.metadata users, addresses, dingalings = self.tables.users, \ self.tables.email_addresses, self.tables.dingalings insp = inspect(meta.bind) expected_schema = schema # users users_fkeys = insp.get_foreign_keys(users.name, schema=schema) fkey1 = users_fkeys[0] with testing.requires.named_constraints.fail_if(): self.assert_(fkey1['name'] is not None) eq_(fkey1['referred_schema'], expected_schema) eq_(fkey1['referred_table'], users.name) eq_(fkey1['referred_columns'], ['user_id', ]) if testing.requires.self_referential_foreign_keys.enabled: eq_(fkey1['constrained_columns'], ['parent_user_id']) #addresses addr_fkeys = insp.get_foreign_keys(addresses.name, schema=schema) fkey1 = addr_fkeys[0] with testing.requires.named_constraints.fail_if(): self.assert_(fkey1['name'] is not None) eq_(fkey1['referred_schema'], expected_schema) eq_(fkey1['referred_table'], users.name) eq_(fkey1['referred_columns'], ['user_id', ]) eq_(fkey1['constrained_columns'], ['remote_user_id']) @testing.requires.foreign_key_constraint_reflection def test_get_foreign_keys(self): self._test_get_foreign_keys() @testing.requires.foreign_key_constraint_reflection @testing.requires.schemas def test_get_foreign_keys_with_schema(self): self._test_get_foreign_keys(schema='test_schema') @testing.provide_metadata def _test_get_indexes(self, schema=None): meta = self.metadata users, addresses, dingalings = self.tables.users, \ self.tables.email_addresses, self.tables.dingalings # The database may decide to create indexes for foreign keys, etc. # so there may be more indexes than expected. insp = inspect(meta.bind) indexes = insp.get_indexes('users', schema=schema) expected_indexes = [ {'unique': False, 'column_names': ['test1', 'test2'], 'name': 'users_t_idx'}, {'unique': False, 'column_names': ['user_id', 'test2', 'test1'], 'name': 'users_all_idx'} ] index_names = [d['name'] for d in indexes] for e_index in expected_indexes: assert e_index['name'] in index_names index = indexes[index_names.index(e_index['name'])] for key in e_index: eq_(e_index[key], index[key]) @testing.requires.index_reflection def test_get_indexes(self): self._test_get_indexes() @testing.requires.index_reflection @testing.requires.schemas def test_get_indexes_with_schema(self): self._test_get_indexes(schema='test_schema') @testing.requires.unique_constraint_reflection def test_get_unique_constraints(self): self._test_get_unique_constraints() @testing.requires.unique_constraint_reflection @testing.requires.schemas def test_get_unique_constraints_with_schema(self): self._test_get_unique_constraints(schema='test_schema') @testing.provide_metadata def _test_get_unique_constraints(self, schema=None): uniques = sorted( [ {'name': 'unique_a_b_c', 'column_names': ['a', 'b', 'c']}, {'name': 'unique_a_c', 'column_names': ['a', 'c']}, {'name': 'unique_b_c', 'column_names': ['b', 'c']}, ], key=operator.itemgetter('name') ) orig_meta = self.metadata table = Table( 'testtbl', orig_meta, Column('a', sa.String(20)), Column('b', sa.String(30)), Column('c', sa.Integer), schema=schema ) for uc in uniques: table.append_constraint( sa.UniqueConstraint(name=uc['name'], *uc['column_names']) ) orig_meta.create_all() inspector = inspect(orig_meta.bind) reflected = sorted( inspector.get_unique_constraints('testtbl', schema=schema), key=operator.itemgetter('name') ) eq_(uniques, reflected) @testing.provide_metadata def _test_get_view_definition(self, schema=None): meta = self.metadata users, addresses, dingalings = self.tables.users, \ self.tables.email_addresses, self.tables.dingalings view_name1 = 'users_v' view_name2 = 'email_addresses_v' insp = inspect(meta.bind) v1 = insp.get_view_definition(view_name1, schema=schema) self.assert_(v1) v2 = insp.get_view_definition(view_name2, schema=schema) self.assert_(v2) @testing.requires.view_reflection def test_get_view_definition(self): self._test_get_view_definition() @testing.requires.view_reflection @testing.requires.schemas def test_get_view_definition_with_schema(self): self._test_get_view_definition(schema='test_schema') @testing.only_on("postgresql", "PG specific feature") @testing.provide_metadata def _test_get_table_oid(self, table_name, schema=None): meta = self.metadata users, addresses, dingalings = self.tables.users, \ self.tables.email_addresses, self.tables.dingalings insp = inspect(meta.bind) oid = insp.get_table_oid(table_name, schema) self.assert_(isinstance(oid, (int, long))) def test_get_table_oid(self): self._test_get_table_oid('users') @testing.requires.schemas def test_get_table_oid_with_schema(self): self._test_get_table_oid('users', schema='test_schema') @testing.provide_metadata def test_autoincrement_col(self): """test that 'autoincrement' is reflected according to sqla's policy. Don't mark this test as unsupported for any backend ! (technically it fails with MySQL InnoDB since "id" comes before "id2") A backend is better off not returning "autoincrement" at all, instead of potentially returning "False" for an auto-incrementing primary key column. """ meta = self.metadata insp = inspect(meta.bind) for tname, cname in [ ('users', 'user_id'), ('email_addresses', 'address_id'), ('dingalings', 'dingaling_id'), ]: cols = insp.get_columns(tname) id_ = dict((c['name'], c) for c in cols)[cname] assert id_.get('autoincrement', True) __all__ = ('ComponentReflectionTest', 'HasTableTest') SQLAlchemy-0.8.4/lib/sqlalchemy/testing/suite/test_results.py0000644000076500000240000000320012251147171025055 0ustar classicstaff00000000000000from .. import fixtures, config from ..config import requirements from .. import exclusions from ..assertions import eq_ from .. import engines from sqlalchemy import Integer, String, select, util from ..schema import Table, Column class RowFetchTest(fixtures.TablesTest): @classmethod def define_tables(cls, metadata): Table('plain_pk', metadata, Column('id', Integer, primary_key=True), Column('data', String(50)) ) @classmethod def insert_data(cls): config.db.execute( cls.tables.plain_pk.insert(), [ {"id":1, "data":"d1"}, {"id":2, "data":"d2"}, {"id":3, "data":"d3"}, ] ) def test_via_string(self): row = config.db.execute( self.tables.plain_pk.select().\ order_by(self.tables.plain_pk.c.id) ).first() eq_( row['id'], 1 ) eq_( row['data'], "d1" ) def test_via_int(self): row = config.db.execute( self.tables.plain_pk.select().\ order_by(self.tables.plain_pk.c.id) ).first() eq_( row[0], 1 ) eq_( row[1], "d1" ) def test_via_col_object(self): row = config.db.execute( self.tables.plain_pk.select().\ order_by(self.tables.plain_pk.c.id) ).first() eq_( row[self.tables.plain_pk.c.id], 1 ) eq_( row[self.tables.plain_pk.c.data], "d1" )SQLAlchemy-0.8.4/lib/sqlalchemy/testing/suite/test_sequence.py0000644000076500000240000000721412251147171025175 0ustar classicstaff00000000000000from .. import fixtures, config from ..config import requirements from ..assertions import eq_ from ... import testing from ... import Integer, String, Sequence, schema from ..schema import Table, Column class SequenceTest(fixtures.TablesTest): __requires__ = ('sequences',) run_create_tables = 'each' @classmethod def define_tables(cls, metadata): Table('seq_pk', metadata, Column('id', Integer, Sequence('tab_id_seq'), primary_key=True), Column('data', String(50)) ) Table('seq_opt_pk', metadata, Column('id', Integer, Sequence('tab_id_seq', optional=True), primary_key=True), Column('data', String(50)) ) def test_insert_roundtrip(self): config.db.execute( self.tables.seq_pk.insert(), data="some data" ) self._assert_round_trip(self.tables.seq_pk, config.db) def test_insert_lastrowid(self): r = config.db.execute( self.tables.seq_pk.insert(), data="some data" ) eq_( r.inserted_primary_key, [1] ) def test_nextval_direct(self): r = config.db.execute( self.tables.seq_pk.c.id.default ) eq_( r, 1 ) @requirements.sequences_optional def test_optional_seq(self): r = config.db.execute( self.tables.seq_opt_pk.insert(), data="some data" ) eq_( r.inserted_primary_key, [1] ) def _assert_round_trip(self, table, conn): row = conn.execute(table.select()).first() eq_( row, (1, "some data") ) class HasSequenceTest(fixtures.TestBase): __requires__ = 'sequences', def test_has_sequence(self): s1 = Sequence('user_id_seq') testing.db.execute(schema.CreateSequence(s1)) try: eq_(testing.db.dialect.has_sequence(testing.db, 'user_id_seq'), True) finally: testing.db.execute(schema.DropSequence(s1)) @testing.requires.schemas def test_has_sequence_schema(self): s1 = Sequence('user_id_seq', schema="test_schema") testing.db.execute(schema.CreateSequence(s1)) try: eq_(testing.db.dialect.has_sequence(testing.db, 'user_id_seq', schema="test_schema"), True) finally: testing.db.execute(schema.DropSequence(s1)) def test_has_sequence_neg(self): eq_(testing.db.dialect.has_sequence(testing.db, 'user_id_seq'), False) @testing.requires.schemas def test_has_sequence_schemas_neg(self): eq_(testing.db.dialect.has_sequence(testing.db, 'user_id_seq', schema="test_schema"), False) @testing.requires.schemas def test_has_sequence_default_not_in_remote(self): s1 = Sequence('user_id_seq') testing.db.execute(schema.CreateSequence(s1)) try: eq_(testing.db.dialect.has_sequence(testing.db, 'user_id_seq', schema="test_schema"), False) finally: testing.db.execute(schema.DropSequence(s1)) @testing.requires.schemas def test_has_sequence_remote_not_in_default(self): s1 = Sequence('user_id_seq', schema="test_schema") testing.db.execute(schema.CreateSequence(s1)) try: eq_(testing.db.dialect.has_sequence(testing.db, 'user_id_seq'), False) finally: testing.db.execute(schema.DropSequence(s1)) SQLAlchemy-0.8.4/lib/sqlalchemy/testing/suite/test_types.py0000644000076500000240000002567012251150015024526 0ustar classicstaff00000000000000# coding: utf-8 from .. import fixtures, config from ..assertions import eq_ from ..config import requirements from sqlalchemy import Integer, Unicode, UnicodeText, select from sqlalchemy import Date, DateTime, Time, MetaData, String, \ Text, Numeric, Float from ..schema import Table, Column from ... import testing import decimal import datetime class _UnicodeFixture(object): __requires__ = 'unicode_data', data = u"Alors vous imaginez ma surprise, au lever du jour, "\ u"quand une drôle de petite voix m’a réveillé. Elle "\ u"disait: « S’il vous plaît… dessine-moi un mouton! »" @classmethod def define_tables(cls, metadata): Table('unicode_table', metadata, Column('id', Integer, primary_key=True, test_needs_autoincrement=True), Column('unicode_data', cls.datatype), ) def test_round_trip(self): unicode_table = self.tables.unicode_table config.db.execute( unicode_table.insert(), { 'unicode_data': self.data, } ) row = config.db.execute( select([ unicode_table.c.unicode_data, ]) ).first() eq_( row, (self.data, ) ) assert isinstance(row[0], unicode) def test_round_trip_executemany(self): unicode_table = self.tables.unicode_table config.db.execute( unicode_table.insert(), [ { 'unicode_data': self.data, } for i in xrange(3) ] ) rows = config.db.execute( select([ unicode_table.c.unicode_data, ]) ).fetchall() eq_( rows, [(self.data, ) for i in xrange(3)] ) for row in rows: assert isinstance(row[0], unicode) def _test_empty_strings(self): unicode_table = self.tables.unicode_table config.db.execute( unicode_table.insert(), {"unicode_data": u''} ) row = config.db.execute( select([unicode_table.c.unicode_data]) ).first() eq_(row, (u'',)) class UnicodeVarcharTest(_UnicodeFixture, fixtures.TablesTest): __requires__ = 'unicode_data', datatype = Unicode(255) @requirements.empty_strings_varchar def test_empty_strings_varchar(self): self._test_empty_strings() class UnicodeTextTest(_UnicodeFixture, fixtures.TablesTest): __requires__ = 'unicode_data', 'text_type' datatype = UnicodeText() @requirements.empty_strings_text def test_empty_strings_text(self): self._test_empty_strings() class TextTest(fixtures.TablesTest): @classmethod def define_tables(cls, metadata): Table('text_table', metadata, Column('id', Integer, primary_key=True, test_needs_autoincrement=True), Column('text_data', Text), ) def test_text_roundtrip(self): text_table = self.tables.text_table config.db.execute( text_table.insert(), {"text_data": 'some text'} ) row = config.db.execute( select([text_table.c.text_data]) ).first() eq_(row, ('some text',)) def test_text_empty_strings(self): text_table = self.tables.text_table config.db.execute( text_table.insert(), {"text_data": ''} ) row = config.db.execute( select([text_table.c.text_data]) ).first() eq_(row, ('',)) class StringTest(fixtures.TestBase): @requirements.unbounded_varchar def test_nolength_string(self): metadata = MetaData() foo = Table('foo', metadata, Column('one', String) ) foo.create(config.db) foo.drop(config.db) class _DateFixture(object): compare = None @classmethod def define_tables(cls, metadata): Table('date_table', metadata, Column('id', Integer, primary_key=True, test_needs_autoincrement=True), Column('date_data', cls.datatype), ) def test_round_trip(self): date_table = self.tables.date_table config.db.execute( date_table.insert(), {'date_data': self.data} ) row = config.db.execute( select([ date_table.c.date_data, ]) ).first() compare = self.compare or self.data eq_(row, (compare, )) assert isinstance(row[0], type(compare)) def test_null(self): date_table = self.tables.date_table config.db.execute( date_table.insert(), {'date_data': None} ) row = config.db.execute( select([ date_table.c.date_data, ]) ).first() eq_(row, (None,)) class DateTimeTest(_DateFixture, fixtures.TablesTest): __requires__ = 'datetime', datatype = DateTime data = datetime.datetime(2012, 10, 15, 12, 57, 18) class DateTimeMicrosecondsTest(_DateFixture, fixtures.TablesTest): __requires__ = 'datetime_microseconds', datatype = DateTime data = datetime.datetime(2012, 10, 15, 12, 57, 18, 396) class TimeTest(_DateFixture, fixtures.TablesTest): __requires__ = 'time', datatype = Time data = datetime.time(12, 57, 18) class TimeMicrosecondsTest(_DateFixture, fixtures.TablesTest): __requires__ = 'time_microseconds', datatype = Time data = datetime.time(12, 57, 18, 396) class DateTest(_DateFixture, fixtures.TablesTest): __requires__ = 'date', datatype = Date data = datetime.date(2012, 10, 15) class DateTimeCoercedToDateTimeTest(_DateFixture, fixtures.TablesTest): __requires__ = 'date', datatype = Date data = datetime.datetime(2012, 10, 15, 12, 57, 18) compare = datetime.date(2012, 10, 15) class DateTimeHistoricTest(_DateFixture, fixtures.TablesTest): __requires__ = 'datetime_historic', datatype = DateTime data = datetime.datetime(1850, 11, 10, 11, 52, 35) class DateHistoricTest(_DateFixture, fixtures.TablesTest): __requires__ = 'date_historic', datatype = Date data = datetime.date(1727, 4, 1) class NumericTest(fixtures.TestBase): @testing.emits_warning(r".*does \*not\* support Decimal objects natively") @testing.provide_metadata def _do_test(self, type_, input_, output, filter_=None, check_scale=False): metadata = self.metadata t = Table('t', metadata, Column('x', type_)) t.create() t.insert().execute([{'x':x} for x in input_]) result = set([row[0] for row in t.select().execute()]) output = set(output) if filter_: result = set(filter_(x) for x in result) output = set(filter_(x) for x in output) eq_(result, output) if check_scale: eq_( [str(x) for x in result], [str(x) for x in output], ) def test_numeric_as_decimal(self): self._do_test( Numeric(precision=8, scale=4), [15.7563, decimal.Decimal("15.7563"), None], [decimal.Decimal("15.7563"), None], ) def test_numeric_as_float(self): self._do_test( Numeric(precision=8, scale=4, asdecimal=False), [15.7563, decimal.Decimal("15.7563"), None], [15.7563, None], ) def test_float_as_decimal(self): self._do_test( Float(precision=8, asdecimal=True), [15.7563, decimal.Decimal("15.7563"), None], [decimal.Decimal("15.7563"), None], ) def test_float_as_float(self): self._do_test( Float(precision=8), [15.7563, decimal.Decimal("15.7563")], [15.7563], filter_=lambda n: n is not None and round(n, 5) or None ) @testing.requires.precision_numerics_general def test_precision_decimal(self): numbers = set([ decimal.Decimal("54.234246451650"), decimal.Decimal("0.004354"), decimal.Decimal("900.0"), ]) self._do_test( Numeric(precision=18, scale=12), numbers, numbers, ) @testing.requires.precision_numerics_enotation_large def test_enotation_decimal(self): """test exceedingly small decimals. Decimal reports values with E notation when the exponent is greater than 6. """ numbers = set([ decimal.Decimal('1E-2'), decimal.Decimal('1E-3'), decimal.Decimal('1E-4'), decimal.Decimal('1E-5'), decimal.Decimal('1E-6'), decimal.Decimal('1E-7'), decimal.Decimal('1E-8'), decimal.Decimal("0.01000005940696"), decimal.Decimal("0.00000005940696"), decimal.Decimal("0.00000000000696"), decimal.Decimal("0.70000000000696"), decimal.Decimal("696E-12"), ]) self._do_test( Numeric(precision=18, scale=14), numbers, numbers ) @testing.requires.precision_numerics_enotation_large def test_enotation_decimal_large(self): """test exceedingly large decimals. """ numbers = set([ decimal.Decimal('4E+8'), decimal.Decimal("5748E+15"), decimal.Decimal('1.521E+15'), decimal.Decimal('00000000000000.1E+12'), ]) self._do_test( Numeric(precision=25, scale=2), numbers, numbers ) @testing.requires.precision_numerics_many_significant_digits def test_many_significant_digits(self): numbers = set([ decimal.Decimal("31943874831932418390.01"), decimal.Decimal("319438950232418390.273596"), decimal.Decimal("87673.594069654243"), ]) self._do_test( Numeric(precision=38, scale=12), numbers, numbers ) @testing.requires.precision_numerics_retains_significant_digits def test_numeric_no_decimal(self): numbers = set([ decimal.Decimal("1.000") ]) self._do_test( Numeric(precision=5, scale=3), numbers, numbers, check_scale=True ) __all__ = ('UnicodeVarcharTest', 'UnicodeTextTest', 'DateTest', 'DateTimeTest', 'TextTest', 'NumericTest', 'DateTimeHistoricTest', 'DateTimeCoercedToDateTimeTest', 'TimeMicrosecondsTest', 'TimeTest', 'DateTimeMicrosecondsTest', 'DateHistoricTest', 'StringTest') SQLAlchemy-0.8.4/lib/sqlalchemy/testing/suite/test_update_delete.py0000644000076500000240000000302312251147171026163 0ustar classicstaff00000000000000from .. import fixtures, config from ..assertions import eq_ from sqlalchemy import Integer, String from ..schema import Table, Column class SimpleUpdateDeleteTest(fixtures.TablesTest): run_deletes = 'each' @classmethod def define_tables(cls, metadata): Table('plain_pk', metadata, Column('id', Integer, primary_key=True), Column('data', String(50)) ) @classmethod def insert_data(cls): config.db.execute( cls.tables.plain_pk.insert(), [ {"id":1, "data":"d1"}, {"id":2, "data":"d2"}, {"id":3, "data":"d3"}, ] ) def test_update(self): t = self.tables.plain_pk r = config.db.execute( t.update().where(t.c.id == 2), data="d2_new" ) assert not r.is_insert assert not r.returns_rows eq_( config.db.execute(t.select().order_by(t.c.id)).fetchall(), [ (1, "d1"), (2, "d2_new"), (3, "d3") ] ) def test_delete(self): t = self.tables.plain_pk r = config.db.execute( t.delete().where(t.c.id == 2) ) assert not r.is_insert assert not r.returns_rows eq_( config.db.execute(t.select().order_by(t.c.id)).fetchall(), [ (1, "d1"), (3, "d3") ] ) __all__ = ('SimpleUpdateDeleteTest', ) SQLAlchemy-0.8.4/lib/sqlalchemy/testing/util.py0000644000076500000240000001172312251150015022141 0ustar classicstaff00000000000000from ..util import jython, pypy, defaultdict, decorator import decimal import gc import time import random import sys import types if jython: def jython_gc_collect(*args): """aggressive gc.collect for tests.""" gc.collect() time.sleep(0.1) gc.collect() gc.collect() return 0 # "lazy" gc, for VM's that don't GC on refcount == 0 gc_collect = lazy_gc = jython_gc_collect elif pypy: def pypy_gc_collect(*args): gc.collect() gc.collect() gc_collect = lazy_gc = pypy_gc_collect else: # assume CPython - straight gc.collect, lazy_gc() is a pass gc_collect = gc.collect def lazy_gc(): pass def picklers(): picklers = set() # Py2K try: import cPickle picklers.add(cPickle) except ImportError: pass # end Py2K import pickle picklers.add(pickle) # yes, this thing needs this much testing for pickle_ in picklers: for protocol in -1, 0, 1, 2: yield pickle_.loads, lambda d: pickle_.dumps(d, protocol) def round_decimal(value, prec): if isinstance(value, float): return round(value, prec) # can also use shift() here but that is 2.6 only return (value * decimal.Decimal("1" + "0" * prec) ).to_integral(decimal.ROUND_FLOOR) / \ pow(10, prec) class RandomSet(set): def __iter__(self): l = list(set.__iter__(self)) random.shuffle(l) return iter(l) def pop(self): index = random.randint(0, len(self) - 1) item = list(set.__iter__(self))[index] self.remove(item) return item def union(self, other): return RandomSet(set.union(self, other)) def difference(self, other): return RandomSet(set.difference(self, other)) def intersection(self, other): return RandomSet(set.intersection(self, other)) def copy(self): return RandomSet(self) def conforms_partial_ordering(tuples, sorted_elements): """True if the given sorting conforms to the given partial ordering.""" deps = defaultdict(set) for parent, child in tuples: deps[parent].add(child) for i, node in enumerate(sorted_elements): for n in sorted_elements[i:]: if node in deps[n]: return False else: return True def all_partial_orderings(tuples, elements): edges = defaultdict(set) for parent, child in tuples: edges[child].add(parent) def _all_orderings(elements): if len(elements) == 1: yield list(elements) else: for elem in elements: subset = set(elements).difference([elem]) if not subset.intersection(edges[elem]): for sub_ordering in _all_orderings(subset): yield [elem] + sub_ordering return iter(_all_orderings(elements)) def function_named(fn, name): """Return a function with a given __name__. Will assign to __name__ and return the original function if possible on the Python implementation, otherwise a new function will be constructed. This function should be phased out as much as possible in favor of @decorator. Tests that "generate" many named tests should be modernized. """ try: fn.__name__ = name except TypeError: fn = types.FunctionType(fn.func_code, fn.func_globals, name, fn.func_defaults, fn.func_closure) return fn def run_as_contextmanager(ctx, fn, *arg, **kw): """Run the given function under the given contextmanager, simulating the behavior of 'with' to support older Python versions. """ obj = ctx.__enter__() try: result = fn(obj, *arg, **kw) ctx.__exit__(None, None, None) return result except: exc_info = sys.exc_info() raise_ = ctx.__exit__(*exc_info) if raise_ is None: raise else: return raise_ def rowset(results): """Converts the results of sql execution into a plain set of column tuples. Useful for asserting the results of an unordered query. """ return set([tuple(row) for row in results]) def fail(msg): assert False, msg @decorator def provide_metadata(fn, *args, **kw): """Provide bound MetaData for a single test, dropping afterwards.""" from . import config from sqlalchemy import schema metadata = schema.MetaData(config.db) self = args[0] prev_meta = getattr(self, 'metadata', None) self.metadata = metadata try: return fn(*args, **kw) finally: metadata.drop_all() self.metadata = prev_meta class adict(dict): """Dict keys available as attributes. Shadows.""" def __getattribute__(self, key): try: return self[key] except KeyError: return dict.__getattribute__(self, key) def get_all(self, *keys): return tuple([self[key] for key in keys]) SQLAlchemy-0.8.4/lib/sqlalchemy/testing/warnings.py0000644000076500000240000000246212251150015023014 0ustar classicstaff00000000000000from __future__ import absolute_import import warnings from .. import exc as sa_exc from .. import util def testing_warn(msg, stacklevel=3): """Replaces sqlalchemy.util.warn during tests.""" filename = "sqlalchemy.testing.warnings" lineno = 1 if isinstance(msg, basestring): warnings.warn_explicit(msg, sa_exc.SAWarning, filename, lineno) else: warnings.warn_explicit(msg, filename, lineno) def resetwarnings(): """Reset warning behavior to testing defaults.""" util.warn = util.langhelpers.warn = testing_warn warnings.filterwarnings('ignore', category=sa_exc.SAPendingDeprecationWarning) warnings.filterwarnings('error', category=sa_exc.SADeprecationWarning) warnings.filterwarnings('error', category=sa_exc.SAWarning) def assert_warnings(fn, warnings): """Assert that each of the given warnings are emitted by fn.""" from .assertions import eq_, emits_warning canary = [] orig_warn = util.warn def capture_warnings(*args, **kw): orig_warn(*args, **kw) popwarn = warnings.pop(0) canary.append(popwarn) eq_(args[0], popwarn) util.warn = util.langhelpers.warn = capture_warnings result = emits_warning()(fn)() assert canary, "No warning was emitted" return result SQLAlchemy-0.8.4/lib/sqlalchemy/types.py0000644000076500000240000024467512251150015020671 0ustar classicstaff00000000000000# sqlalchemy/types.py # Copyright (C) 2005-2013 the SQLAlchemy authors and contributors # # This module is part of SQLAlchemy and is released under # the MIT License: http://www.opensource.org/licenses/mit-license.php """defines genericized SQL types, each represented by a subclass of :class:`~sqlalchemy.types.AbstractType`. Dialects define further subclasses of these types. For more information see the SQLAlchemy documentation on types. """ __all__ = ['TypeEngine', 'TypeDecorator', 'AbstractType', 'UserDefinedType', 'INT', 'CHAR', 'VARCHAR', 'NCHAR', 'NVARCHAR', 'TEXT', 'Text', 'FLOAT', 'NUMERIC', 'REAL', 'DECIMAL', 'TIMESTAMP', 'DATETIME', 'CLOB', 'BLOB', 'BINARY', 'VARBINARY', 'BOOLEAN', 'BIGINT', 'SMALLINT', 'INTEGER', 'DATE', 'TIME', 'String', 'Integer', 'SmallInteger', 'BigInteger', 'Numeric', 'Float', 'DateTime', 'Date', 'Time', 'LargeBinary', 'Binary', 'Boolean', 'Unicode', 'Concatenable', 'UnicodeText', 'PickleType', 'Interval', 'Enum'] import datetime as dt import codecs from . import exc, schema, util, processors, events, event from .sql import operators, type_coerce from .sql.expression import _DefaultColumnComparator from .util import pickle from .sql.visitors import Visitable import decimal default = util.importlater("sqlalchemy.engine", "default") NoneType = type(None) if util.jython: import array class AbstractType(Visitable): """Base for all types - not needed except for backwards compatibility.""" class TypeEngine(AbstractType): """Base for built-in types.""" class Comparator(_DefaultColumnComparator): """Base class for custom comparison operations defined at the type level. See :attr:`.TypeEngine.comparator_factory`. The public base class for :class:`.TypeEngine.Comparator` is :class:`.ColumnOperators`. """ def __init__(self, expr): self.expr = expr def __reduce__(self): return _reconstitute_comparator, (self.expr, ) hashable = True """Flag, if False, means values from this type aren't hashable. Used by the ORM when uniquing result lists. """ comparator_factory = Comparator """A :class:`.TypeEngine.Comparator` class which will apply to operations performed by owning :class:`.ColumnElement` objects. The :attr:`.comparator_factory` attribute is a hook consulted by the core expression system when column and SQL expression operations are performed. When a :class:`.TypeEngine.Comparator` class is associated with this attribute, it allows custom re-definition of all existing operators, as well as definition of new operators. Existing operators include those provided by Python operator overloading such as :meth:`.operators.ColumnOperators.__add__` and :meth:`.operators.ColumnOperators.__eq__`, those provided as standard attributes of :class:`.operators.ColumnOperators` such as :meth:`.operators.ColumnOperators.like` and :meth:`.operators.ColumnOperators.in_`. Rudimentary usage of this hook is allowed through simple subclassing of existing types, or alternatively by using :class:`.TypeDecorator`. See the documentation section :ref:`types_operators` for examples. .. versionadded:: 0.8 The expression system was enhanced to support customization of operators on a per-type level. """ def copy_value(self, value): return value def bind_processor(self, dialect): """Return a conversion function for processing bind values. Returns a callable which will receive a bind parameter value as the sole positional argument and will return a value to send to the DB-API. If processing is not necessary, the method should return ``None``. :param dialect: Dialect instance in use. """ return None def result_processor(self, dialect, coltype): """Return a conversion function for processing result row values. Returns a callable which will receive a result row column value as the sole positional argument and will return a value to return to the user. If processing is not necessary, the method should return ``None``. :param dialect: Dialect instance in use. :param coltype: DBAPI coltype argument received in cursor.description. """ return None def column_expression(self, colexpr): """Given a SELECT column expression, return a wrapping SQL expression. This is typically a SQL function that wraps a column expression as rendered in the columns clause of a SELECT statement. It is used for special data types that require columns to be wrapped in some special database function in order to coerce the value before being sent back to the application. It is the SQL analogue of the :meth:`.TypeEngine.result_processor` method. The method is evaluated at statement compile time, as opposed to statement construction time. See also: :ref:`types_sql_value_processing` """ return None @util.memoized_property def _has_column_expression(self): """memoized boolean, check if column_expression is implemented. Allows the method to be skipped for the vast majority of expression types that don't use this feature. """ return self.__class__.column_expression.func_code \ is not TypeEngine.column_expression.func_code def bind_expression(self, bindvalue): """"Given a bind value (i.e. a :class:`.BindParameter` instance), return a SQL expression in its place. This is typically a SQL function that wraps the existing bound parameter within the statement. It is used for special data types that require literals being wrapped in some special database function in order to coerce an application-level value into a database-specific format. It is the SQL analogue of the :meth:`.TypeEngine.bind_processor` method. The method is evaluated at statement compile time, as opposed to statement construction time. Note that this method, when implemented, should always return the exact same structure, without any conditional logic, as it may be used in an executemany() call against an arbitrary number of bound parameter sets. See also: :ref:`types_sql_value_processing` """ return None @util.memoized_property def _has_bind_expression(self): """memoized boolean, check if bind_expression is implemented. Allows the method to be skipped for the vast majority of expression types that don't use this feature. """ return self.__class__.bind_expression.func_code \ is not TypeEngine.bind_expression.func_code def compare_values(self, x, y): """Compare two values for equality.""" return x == y def get_dbapi_type(self, dbapi): """Return the corresponding type object from the underlying DB-API, if any. This can be useful for calling ``setinputsizes()``, for example. """ return None @property def python_type(self): """Return the Python type object expected to be returned by instances of this type, if known. Basically, for those types which enforce a return type, or are known across the board to do such for all common DBAPIs (like ``int`` for example), will return that type. If a return type is not defined, raises ``NotImplementedError``. Note that any type also accommodates NULL in SQL which means you can also get back ``None`` from any type in practice. """ raise NotImplementedError() def with_variant(self, type_, dialect_name): """Produce a new type object that will utilize the given type when applied to the dialect of the given name. e.g.:: from sqlalchemy.types import String from sqlalchemy.dialects import mysql s = String() s = s.with_variant(mysql.VARCHAR(collation='foo'), 'mysql') The construction of :meth:`.TypeEngine.with_variant` is always from the "fallback" type to that which is dialect specific. The returned type is an instance of :class:`.Variant`, which itself provides a :meth:`~sqlalchemy.types.Variant.with_variant` that can be called repeatedly. :param type_: a :class:`.TypeEngine` that will be selected as a variant from the originating type, when a dialect of the given name is in use. :param dialect_name: base name of the dialect which uses this type. (i.e. ``'postgresql'``, ``'mysql'``, etc.) .. versionadded:: 0.7.2 """ return Variant(self, {dialect_name: type_}) @util.memoized_property def _type_affinity(self): """Return a rudimental 'affinity' value expressing the general class of type.""" typ = None for t in self.__class__.__mro__: if t is TypeEngine or t is UserDefinedType: return typ elif issubclass(t, TypeEngine): typ = t else: return self.__class__ def dialect_impl(self, dialect): """Return a dialect-specific implementation for this :class:`.TypeEngine`. """ try: return dialect._type_memos[self]['impl'] except KeyError: return self._dialect_info(dialect)['impl'] def _cached_bind_processor(self, dialect): """Return a dialect-specific bind processor for this type.""" try: return dialect._type_memos[self]['bind'] except KeyError: d = self._dialect_info(dialect) d['bind'] = bp = d['impl'].bind_processor(dialect) return bp def _cached_result_processor(self, dialect, coltype): """Return a dialect-specific result processor for this type.""" try: return dialect._type_memos[self][coltype] except KeyError: d = self._dialect_info(dialect) # key assumption: DBAPI type codes are # constants. Else this dictionary would # grow unbounded. d[coltype] = rp = d['impl'].result_processor(dialect, coltype) return rp def _dialect_info(self, dialect): """Return a dialect-specific registry which caches a dialect-specific implementation, bind processing function, and one or more result processing functions.""" if self in dialect._type_memos: return dialect._type_memos[self] else: impl = self._gen_dialect_impl(dialect) if impl is self: impl = self.adapt(type(self)) # this can't be self, else we create a cycle assert impl is not self dialect._type_memos[self] = d = {'impl': impl} return d def _gen_dialect_impl(self, dialect): return dialect.type_descriptor(self) def adapt(self, cls, **kw): """Produce an "adapted" form of this type, given an "impl" class to work with. This method is used internally to associate generic types with "implementation" types that are specific to a particular dialect. """ return util.constructor_copy(self, cls, **kw) def coerce_compared_value(self, op, value): """Suggest a type for a 'coerced' Python value in an expression. Given an operator and value, gives the type a chance to return a type which the value should be coerced into. The default behavior here is conservative; if the right-hand side is already coerced into a SQL type based on its Python type, it is usually left alone. End-user functionality extension here should generally be via :class:`.TypeDecorator`, which provides more liberal behavior in that it defaults to coercing the other side of the expression into this type, thus applying special Python conversions above and beyond those needed by the DBAPI to both ides. It also provides the public method :meth:`.TypeDecorator.coerce_compared_value` which is intended for end-user customization of this behavior. """ _coerced_type = _type_map.get(type(value), NULLTYPE) if _coerced_type is NULLTYPE or _coerced_type._type_affinity \ is self._type_affinity: return self else: return _coerced_type def _compare_type_affinity(self, other): return self._type_affinity is other._type_affinity def compile(self, dialect=None): """Produce a string-compiled form of this :class:`.TypeEngine`. When called with no arguments, uses a "default" dialect to produce a string result. :param dialect: a :class:`.Dialect` instance. """ # arg, return value is inconsistent with # ClauseElement.compile()....this is a mistake. if not dialect: dialect = self._default_dialect return dialect.type_compiler.process(self) @property def _default_dialect(self): if self.__class__.__module__.startswith("sqlalchemy.dialects"): tokens = self.__class__.__module__.split(".")[0:3] mod = ".".join(tokens) return getattr(__import__(mod).dialects, tokens[-1]).dialect() else: return default.DefaultDialect() def __str__(self): # Py3K #return unicode(self.compile()) # Py2K return unicode(self.compile()).\ encode('ascii', 'backslashreplace') # end Py2K def __init__(self, *args, **kwargs): """Support implementations that were passing arguments""" if args or kwargs: util.warn_deprecated("Passing arguments to type object " "constructor %s is deprecated" % self.__class__) def __repr__(self): return util.generic_repr(self) def _reconstitute_comparator(expression): return expression.comparator class UserDefinedType(TypeEngine): """Base for user defined types. This should be the base of new types. Note that for most cases, :class:`.TypeDecorator` is probably more appropriate:: import sqlalchemy.types as types class MyType(types.UserDefinedType): def __init__(self, precision = 8): self.precision = precision def get_col_spec(self): return "MYTYPE(%s)" % self.precision def bind_processor(self, dialect): def process(value): return value return process def result_processor(self, dialect, coltype): def process(value): return value return process Once the type is made, it's immediately usable:: table = Table('foo', meta, Column('id', Integer, primary_key=True), Column('data', MyType(16)) ) """ __visit_name__ = "user_defined" class Comparator(TypeEngine.Comparator): def _adapt_expression(self, op, other_comparator): if hasattr(self.type, 'adapt_operator'): util.warn_deprecated( "UserDefinedType.adapt_operator is deprecated. Create " "a UserDefinedType.Comparator subclass instead which " "generates the desired expression constructs, given a " "particular operator." ) return self.type.adapt_operator(op), self.type else: return op, self.type comparator_factory = Comparator def coerce_compared_value(self, op, value): """Suggest a type for a 'coerced' Python value in an expression. Default behavior for :class:`.UserDefinedType` is the same as that of :class:`.TypeDecorator`; by default it returns ``self``, assuming the compared value should be coerced into the same type as this one. See :meth:`.TypeDecorator.coerce_compared_value` for more detail. .. versionchanged:: 0.8 :meth:`.UserDefinedType.coerce_compared_value` now returns ``self`` by default, rather than falling onto the more fundamental behavior of :meth:`.TypeEngine.coerce_compared_value`. """ return self class TypeDecorator(TypeEngine): """Allows the creation of types which add additional functionality to an existing type. This method is preferred to direct subclassing of SQLAlchemy's built-in types as it ensures that all required functionality of the underlying type is kept in place. Typical usage:: import sqlalchemy.types as types class MyType(types.TypeDecorator): '''Prefixes Unicode values with "PREFIX:" on the way in and strips it off on the way out. ''' impl = types.Unicode def process_bind_param(self, value, dialect): return "PREFIX:" + value def process_result_value(self, value, dialect): return value[7:] def copy(self): return MyType(self.impl.length) The class-level "impl" attribute is required, and can reference any TypeEngine class. Alternatively, the load_dialect_impl() method can be used to provide different type classes based on the dialect given; in this case, the "impl" variable can reference ``TypeEngine`` as a placeholder. Types that receive a Python type that isn't similar to the ultimate type used may want to define the :meth:`TypeDecorator.coerce_compared_value` method. This is used to give the expression system a hint when coercing Python objects into bind parameters within expressions. Consider this expression:: mytable.c.somecol + datetime.date(2009, 5, 15) Above, if "somecol" is an ``Integer`` variant, it makes sense that we're doing date arithmetic, where above is usually interpreted by databases as adding a number of days to the given date. The expression system does the right thing by not attempting to coerce the "date()" value into an integer-oriented bind parameter. However, in the case of ``TypeDecorator``, we are usually changing an incoming Python type to something new - ``TypeDecorator`` by default will "coerce" the non-typed side to be the same type as itself. Such as below, we define an "epoch" type that stores a date value as an integer:: class MyEpochType(types.TypeDecorator): impl = types.Integer epoch = datetime.date(1970, 1, 1) def process_bind_param(self, value, dialect): return (value - self.epoch).days def process_result_value(self, value, dialect): return self.epoch + timedelta(days=value) Our expression of ``somecol + date`` with the above type will coerce the "date" on the right side to also be treated as ``MyEpochType``. This behavior can be overridden via the :meth:`~TypeDecorator.coerce_compared_value` method, which returns a type that should be used for the value of the expression. Below we set it such that an integer value will be treated as an ``Integer``, and any other value is assumed to be a date and will be treated as a ``MyEpochType``:: def coerce_compared_value(self, op, value): if isinstance(value, int): return Integer() else: return self """ __visit_name__ = "type_decorator" def __init__(self, *args, **kwargs): """Construct a :class:`.TypeDecorator`. Arguments sent here are passed to the constructor of the class assigned to the ``impl`` class level attribute, assuming the ``impl`` is a callable, and the resulting object is assigned to the ``self.impl`` instance attribute (thus overriding the class attribute of the same name). If the class level ``impl`` is not a callable (the unusual case), it will be assigned to the same instance attribute 'as-is', ignoring those arguments passed to the constructor. Subclasses can override this to customize the generation of ``self.impl`` entirely. """ if not hasattr(self.__class__, 'impl'): raise AssertionError("TypeDecorator implementations " "require a class-level variable " "'impl' which refers to the class of " "type being decorated") self.impl = to_instance(self.__class__.impl, *args, **kwargs) coerce_to_is_types = (util.NoneType, ) """Specify those Python types which should be coerced at the expression level to "IS " when compared using ``==`` (and same for ``IS NOT`` in conjunction with ``!=``. For most SQLAlchemy types, this includes ``NoneType``, as well as ``bool``. :class:`.TypeDecorator` modifies this list to only include ``NoneType``, as typedecorator implementations that deal with boolean types are common. Custom :class:`.TypeDecorator` classes can override this attribute to return an empty tuple, in which case no values will be coerced to constants. ..versionadded:: 0.8.2 Added :attr:`.TypeDecorator.coerce_to_is_types` to allow for easier control of ``__eq__()`` ``__ne__()`` operations. """ class Comparator(TypeEngine.Comparator): def operate(self, op, *other, **kwargs): kwargs['_python_is_types'] = self.expr.type.coerce_to_is_types return super(TypeDecorator.Comparator, self).operate( op, *other, **kwargs) def reverse_operate(self, op, other, **kwargs): kwargs['_python_is_types'] = self.expr.type.coerce_to_is_types return super(TypeDecorator.Comparator, self).reverse_operate( op, other, **kwargs) @property def comparator_factory(self): return type("TDComparator", (TypeDecorator.Comparator, self.impl.comparator_factory), {}) def _gen_dialect_impl(self, dialect): """ #todo """ adapted = dialect.type_descriptor(self) if adapted is not self: return adapted # otherwise adapt the impl type, link # to a copy of this TypeDecorator and return # that. typedesc = self.load_dialect_impl(dialect).dialect_impl(dialect) tt = self.copy() if not isinstance(tt, self.__class__): raise AssertionError('Type object %s does not properly ' 'implement the copy() method, it must ' 'return an object of type %s' % (self, self.__class__)) tt.impl = typedesc return tt @property def _type_affinity(self): """ #todo """ return self.impl._type_affinity def type_engine(self, dialect): """Return a dialect-specific :class:`.TypeEngine` instance for this :class:`.TypeDecorator`. In most cases this returns a dialect-adapted form of the :class:`.TypeEngine` type represented by ``self.impl``. Makes usage of :meth:`dialect_impl` but also traverses into wrapped :class:`.TypeDecorator` instances. Behavior can be customized here by overriding :meth:`load_dialect_impl`. """ adapted = dialect.type_descriptor(self) if type(adapted) is not type(self): return adapted elif isinstance(self.impl, TypeDecorator): return self.impl.type_engine(dialect) else: return self.load_dialect_impl(dialect) def load_dialect_impl(self, dialect): """Return a :class:`.TypeEngine` object corresponding to a dialect. This is an end-user override hook that can be used to provide differing types depending on the given dialect. It is used by the :class:`.TypeDecorator` implementation of :meth:`type_engine` to help determine what type should ultimately be returned for a given :class:`.TypeDecorator`. By default returns ``self.impl``. """ return self.impl def __getattr__(self, key): """Proxy all other undefined accessors to the underlying implementation.""" return getattr(self.impl, key) def process_bind_param(self, value, dialect): """Receive a bound parameter value to be converted. Subclasses override this method to return the value that should be passed along to the underlying :class:`.TypeEngine` object, and from there to the DBAPI ``execute()`` method. The operation could be anything desired to perform custom behavior, such as transforming or serializing data. This could also be used as a hook for validating logic. This operation should be designed with the reverse operation in mind, which would be the process_result_value method of this class. :param value: Data to operate upon, of any type expected by this method in the subclass. Can be ``None``. :param dialect: the :class:`.Dialect` in use. """ raise NotImplementedError() def process_result_value(self, value, dialect): """Receive a result-row column value to be converted. Subclasses should implement this method to operate on data fetched from the database. Subclasses override this method to return the value that should be passed back to the application, given a value that is already processed by the underlying :class:`.TypeEngine` object, originally from the DBAPI cursor method ``fetchone()`` or similar. The operation could be anything desired to perform custom behavior, such as transforming or serializing data. This could also be used as a hook for validating logic. :param value: Data to operate upon, of any type expected by this method in the subclass. Can be ``None``. :param dialect: the :class:`.Dialect` in use. This operation should be designed to be reversible by the "process_bind_param" method of this class. """ raise NotImplementedError() @util.memoized_property def _has_bind_processor(self): """memoized boolean, check if process_bind_param is implemented. Allows the base process_bind_param to raise NotImplementedError without needing to test an expensive exception throw. """ return self.__class__.process_bind_param.func_code \ is not TypeDecorator.process_bind_param.func_code def bind_processor(self, dialect): """Provide a bound value processing function for the given :class:`.Dialect`. This is the method that fulfills the :class:`.TypeEngine` contract for bound value conversion. :class:`.TypeDecorator` will wrap a user-defined implementation of :meth:`process_bind_param` here. User-defined code can override this method directly, though its likely best to use :meth:`process_bind_param` so that the processing provided by ``self.impl`` is maintained. :param dialect: Dialect instance in use. This method is the reverse counterpart to the :meth:`result_processor` method of this class. """ if self._has_bind_processor: process_param = self.process_bind_param impl_processor = self.impl.bind_processor(dialect) if impl_processor: def process(value): return impl_processor(process_param(value, dialect)) else: def process(value): return process_param(value, dialect) return process else: return self.impl.bind_processor(dialect) @util.memoized_property def _has_result_processor(self): """memoized boolean, check if process_result_value is implemented. Allows the base process_result_value to raise NotImplementedError without needing to test an expensive exception throw. """ return self.__class__.process_result_value.func_code \ is not TypeDecorator.process_result_value.func_code def result_processor(self, dialect, coltype): """Provide a result value processing function for the given :class:`.Dialect`. This is the method that fulfills the :class:`.TypeEngine` contract for result value conversion. :class:`.TypeDecorator` will wrap a user-defined implementation of :meth:`process_result_value` here. User-defined code can override this method directly, though its likely best to use :meth:`process_result_value` so that the processing provided by ``self.impl`` is maintained. :param dialect: Dialect instance in use. :param coltype: An SQLAlchemy data type This method is the reverse counterpart to the :meth:`bind_processor` method of this class. """ if self._has_result_processor: process_value = self.process_result_value impl_processor = self.impl.result_processor(dialect, coltype) if impl_processor: def process(value): return process_value(impl_processor(value), dialect) else: def process(value): return process_value(value, dialect) return process else: return self.impl.result_processor(dialect, coltype) def coerce_compared_value(self, op, value): """Suggest a type for a 'coerced' Python value in an expression. By default, returns self. This method is called by the expression system when an object using this type is on the left or right side of an expression against a plain Python object which does not yet have a SQLAlchemy type assigned:: expr = table.c.somecolumn + 35 Where above, if ``somecolumn`` uses this type, this method will be called with the value ``operator.add`` and ``35``. The return value is whatever SQLAlchemy type should be used for ``35`` for this particular operation. """ return self def copy(self): """Produce a copy of this :class:`.TypeDecorator` instance. This is a shallow copy and is provided to fulfill part of the :class:`.TypeEngine` contract. It usually does not need to be overridden unless the user-defined :class:`.TypeDecorator` has local state that should be deep-copied. """ instance = self.__class__.__new__(self.__class__) instance.__dict__.update(self.__dict__) return instance def get_dbapi_type(self, dbapi): """Return the DBAPI type object represented by this :class:`.TypeDecorator`. By default this calls upon :meth:`.TypeEngine.get_dbapi_type` of the underlying "impl". """ return self.impl.get_dbapi_type(dbapi) def compare_values(self, x, y): """Given two values, compare them for equality. By default this calls upon :meth:`.TypeEngine.compare_values` of the underlying "impl", which in turn usually uses the Python equals operator ``==``. This function is used by the ORM to compare an original-loaded value with an intercepted "changed" value, to determine if a net change has occurred. """ return self.impl.compare_values(x, y) def __repr__(self): return util.generic_repr(self, to_inspect=self.impl) class Variant(TypeDecorator): """A wrapping type that selects among a variety of implementations based on dialect in use. The :class:`.Variant` type is typically constructed using the :meth:`.TypeEngine.with_variant` method. .. versionadded:: 0.7.2 """ def __init__(self, base, mapping): """Construct a new :class:`.Variant`. :param base: the base 'fallback' type :param mapping: dictionary of string dialect names to :class:`.TypeEngine` instances. """ self.impl = base self.mapping = mapping def load_dialect_impl(self, dialect): if dialect.name in self.mapping: return self.mapping[dialect.name] else: return self.impl def with_variant(self, type_, dialect_name): """Return a new :class:`.Variant` which adds the given type + dialect name to the mapping, in addition to the mapping present in this :class:`.Variant`. :param type_: a :class:`.TypeEngine` that will be selected as a variant from the originating type, when a dialect of the given name is in use. :param dialect_name: base name of the dialect which uses this type. (i.e. ``'postgresql'``, ``'mysql'``, etc.) """ if dialect_name in self.mapping: raise exc.ArgumentError( "Dialect '%s' is already present in " "the mapping for this Variant" % dialect_name) mapping = self.mapping.copy() mapping[dialect_name] = type_ return Variant(self.impl, mapping) def to_instance(typeobj, *arg, **kw): if typeobj is None: return NULLTYPE if util.callable(typeobj): return typeobj(*arg, **kw) else: return typeobj def adapt_type(typeobj, colspecs): if isinstance(typeobj, type): typeobj = typeobj() for t in typeobj.__class__.__mro__[0:-1]: try: impltype = colspecs[t] break except KeyError: pass else: # couldnt adapt - so just return the type itself # (it may be a user-defined type) return typeobj # if we adapted the given generic type to a database-specific type, # but it turns out the originally given "generic" type # is actually a subclass of our resulting type, then we were already # given a more specific type than that required; so use that. if (issubclass(typeobj.__class__, impltype)): return typeobj return typeobj.adapt(impltype) class NullType(TypeEngine): """An unknown type. NullTypes will stand in if :class:`~sqlalchemy.Table` reflection encounters a column data type unknown to SQLAlchemy. The resulting columns are nearly fully usable: the DB-API adapter will handle all translation to and from the database data type. NullType does not have sufficient information to particpate in a ``CREATE TABLE`` statement and will raise an exception if encountered during a :meth:`~sqlalchemy.Table.create` operation. """ __visit_name__ = 'null' class Comparator(TypeEngine.Comparator): def _adapt_expression(self, op, other_comparator): if isinstance(other_comparator, NullType.Comparator) or \ not operators.is_commutative(op): return op, self.expr.type else: return other_comparator._adapt_expression(op, self) comparator_factory = Comparator NullTypeEngine = NullType class Concatenable(object): """A mixin that marks a type as supporting 'concatenation', typically strings.""" class Comparator(TypeEngine.Comparator): def _adapt_expression(self, op, other_comparator): if op is operators.add and isinstance(other_comparator, (Concatenable.Comparator, NullType.Comparator)): return operators.concat_op, self.expr.type else: return op, self.expr.type comparator_factory = Comparator class _DateAffinity(object): """Mixin date/time specific expression adaptations. Rules are implemented within Date,Time,Interval,DateTime, Numeric, Integer. Based on http://www.postgresql.org/docs/current/static /functions-datetime.html. """ @property def _expression_adaptations(self): raise NotImplementedError() class Comparator(TypeEngine.Comparator): _blank_dict = util.immutabledict() def _adapt_expression(self, op, other_comparator): othertype = other_comparator.type._type_affinity return op, \ self.type._expression_adaptations.get(op, self._blank_dict).\ get(othertype, NULLTYPE) comparator_factory = Comparator class String(Concatenable, TypeEngine): """The base for all string and character types. In SQL, corresponds to VARCHAR. Can also take Python unicode objects and encode to the database's encoding in bind params (and the reverse for result sets.) The `length` field is usually required when the `String` type is used within a CREATE TABLE statement, as VARCHAR requires a length on most databases. """ __visit_name__ = 'string' def __init__(self, length=None, collation=None, convert_unicode=False, unicode_error=None, _warn_on_bytestring=False ): """ Create a string-holding type. :param length: optional, a length for the column for use in DDL and CAST expressions. May be safely omitted if no ``CREATE TABLE`` will be issued. Certain databases may require a ``length`` for use in DDL, and will raise an exception when the ``CREATE TABLE`` DDL is issued if a ``VARCHAR`` with no length is included. Whether the value is interpreted as bytes or characters is database specific. :param collation: Optional, a column-level collation for use in DDL and CAST expressions. Renders using the COLLATE keyword supported by SQLite, MySQL, and Postgresql. E.g.:: >>> from sqlalchemy import cast, select, String >>> print select([cast('some string', String(collation='utf8'))]) SELECT CAST(:param_1 AS VARCHAR COLLATE utf8) AS anon_1 .. versionadded:: 0.8 Added support for COLLATE to all string types. :param convert_unicode: When set to ``True``, the :class:`.String` type will assume that input is to be passed as Python ``unicode`` objects, and results returned as Python ``unicode`` objects. If the DBAPI in use does not support Python unicode (which is fewer and fewer these days), SQLAlchemy will encode/decode the value, using the value of the ``encoding`` parameter passed to :func:`.create_engine` as the encoding. When using a DBAPI that natively supports Python unicode objects, this flag generally does not need to be set. For columns that are explicitly intended to store non-ASCII data, the :class:`.Unicode` or :class:`UnicodeText` types should be used regardless, which feature the same behavior of ``convert_unicode`` but also indicate an underlying column type that directly supports unicode, such as ``NVARCHAR``. For the extremely rare case that Python ``unicode`` is to be encoded/decoded by SQLAlchemy on a backend that does natively support Python ``unicode``, the value ``force`` can be passed here which will cause SQLAlchemy's encode/decode services to be used unconditionally. :param unicode_error: Optional, a method to use to handle Unicode conversion errors. Behaves like the ``errors`` keyword argument to the standard library's ``string.decode()`` functions. This flag requires that ``convert_unicode`` is set to ``force`` - otherwise, SQLAlchemy is not guaranteed to handle the task of unicode conversion. Note that this flag adds significant performance overhead to row-fetching operations for backends that already return unicode objects natively (which most DBAPIs do). This flag should only be used as a last resort for reading strings from a column with varied or corrupted encodings. """ if unicode_error is not None and convert_unicode != 'force': raise exc.ArgumentError("convert_unicode must be 'force' " "when unicode_error is set.") self.length = length self.collation = collation self.convert_unicode = convert_unicode self.unicode_error = unicode_error self._warn_on_bytestring = _warn_on_bytestring def bind_processor(self, dialect): if self.convert_unicode or dialect.convert_unicode: if dialect.supports_unicode_binds and \ self.convert_unicode != 'force': if self._warn_on_bytestring: def process(value): # Py3K #if isinstance(value, bytes): # Py2K if isinstance(value, str): # end Py2K util.warn("Unicode type received non-unicode bind " "param value.") return value return process else: return None else: encoder = codecs.getencoder(dialect.encoding) warn_on_bytestring = self._warn_on_bytestring def process(value): if isinstance(value, unicode): return encoder(value, self.unicode_error)[0] elif warn_on_bytestring and value is not None: util.warn("Unicode type received non-unicode bind " "param value") return value return process else: return None def result_processor(self, dialect, coltype): wants_unicode = self.convert_unicode or dialect.convert_unicode needs_convert = wants_unicode and \ (dialect.returns_unicode_strings is not True or self.convert_unicode == 'force') if needs_convert: to_unicode = processors.to_unicode_processor_factory( dialect.encoding, self.unicode_error) if dialect.returns_unicode_strings: # we wouldn't be here unless convert_unicode='force' # was specified, or the driver has erratic unicode-returning # habits. since we will be getting back unicode # in most cases, we check for it (decode will fail). def process(value): if isinstance(value, unicode): return value else: return to_unicode(value) return process else: # here, we assume that the object is not unicode, # avoiding expensive isinstance() check. return to_unicode else: return None @property def python_type(self): if self.convert_unicode: return unicode else: return str def get_dbapi_type(self, dbapi): return dbapi.STRING class Text(String): """A variably sized string type. In SQL, usually corresponds to CLOB or TEXT. Can also take Python unicode objects and encode to the database's encoding in bind params (and the reverse for result sets.) In general, TEXT objects do not have a length; while some databases will accept a length argument here, it will be rejected by others. """ __visit_name__ = 'text' class Unicode(String): """A variable length Unicode string type. The :class:`.Unicode` type is a :class:`.String` subclass that assumes input and output as Python ``unicode`` data, and in that regard is equivalent to the usage of the ``convert_unicode`` flag with the :class:`.String` type. However, unlike plain :class:`.String`, it also implies an underlying column type that is explicitly supporting of non-ASCII data, such as ``NVARCHAR`` on Oracle and SQL Server. This can impact the output of ``CREATE TABLE`` statements and ``CAST`` functions at the dialect level, and can also affect the handling of bound parameters in some specific DBAPI scenarios. The encoding used by the :class:`.Unicode` type is usually determined by the DBAPI itself; most modern DBAPIs feature support for Python ``unicode`` objects as bound values and result set values, and the encoding should be configured as detailed in the notes for the target DBAPI in the :ref:`dialect_toplevel` section. For those DBAPIs which do not support, or are not configured to accommodate Python ``unicode`` objects directly, SQLAlchemy does the encoding and decoding outside of the DBAPI. The encoding in this scenario is determined by the ``encoding`` flag passed to :func:`.create_engine`. When using the :class:`.Unicode` type, it is only appropriate to pass Python ``unicode`` objects, and not plain ``str``. If a plain ``str`` is passed under Python 2, a warning is emitted. If you notice your application emitting these warnings but you're not sure of the source of them, the Python ``warnings`` filter, documented at http://docs.python.org/library/warnings.html, can be used to turn these warnings into exceptions which will illustrate a stack trace:: import warnings warnings.simplefilter('error') For an application that wishes to pass plain bytestrings and Python ``unicode`` objects to the ``Unicode`` type equally, the bytestrings must first be decoded into unicode. The recipe at :ref:`coerce_to_unicode` illustrates how this is done. See also: :class:`.UnicodeText` - unlengthed textual counterpart to :class:`.Unicode`. """ __visit_name__ = 'unicode' def __init__(self, length=None, **kwargs): """ Create a :class:`.Unicode` object. Parameters are the same as that of :class:`.String`, with the exception that ``convert_unicode`` defaults to ``True``. """ kwargs.setdefault('convert_unicode', True) kwargs.setdefault('_warn_on_bytestring', True) super(Unicode, self).__init__(length=length, **kwargs) class UnicodeText(Text): """An unbounded-length Unicode string type. See :class:`.Unicode` for details on the unicode behavior of this object. Like :class:`.Unicode`, usage the :class:`.UnicodeText` type implies a unicode-capable type being used on the backend, such as ``NCLOB``, ``NTEXT``. """ __visit_name__ = 'unicode_text' def __init__(self, length=None, **kwargs): """ Create a Unicode-converting Text type. Parameters are the same as that of :class:`.Text`, with the exception that ``convert_unicode`` defaults to ``True``. """ kwargs.setdefault('convert_unicode', True) kwargs.setdefault('_warn_on_bytestring', True) super(UnicodeText, self).__init__(length=length, **kwargs) class Integer(_DateAffinity, TypeEngine): """A type for ``int`` integers.""" __visit_name__ = 'integer' def get_dbapi_type(self, dbapi): return dbapi.NUMBER @property def python_type(self): return int @util.memoized_property def _expression_adaptations(self): # TODO: need a dictionary object that will # handle operators generically here, this is incomplete return { operators.add: { Date: Date, Integer: self.__class__, Numeric: Numeric, }, operators.mul: { Interval: Interval, Integer: self.__class__, Numeric: Numeric, }, # Py2K operators.div: { Integer: self.__class__, Numeric: Numeric, }, # end Py2K operators.truediv: { Integer: self.__class__, Numeric: Numeric, }, operators.sub: { Integer: self.__class__, Numeric: Numeric, }, } class SmallInteger(Integer): """A type for smaller ``int`` integers. Typically generates a ``SMALLINT`` in DDL, and otherwise acts like a normal :class:`.Integer` on the Python side. """ __visit_name__ = 'small_integer' class BigInteger(Integer): """A type for bigger ``int`` integers. Typically generates a ``BIGINT`` in DDL, and otherwise acts like a normal :class:`.Integer` on the Python side. """ __visit_name__ = 'big_integer' class Numeric(_DateAffinity, TypeEngine): """A type for fixed precision numbers. Typically generates DECIMAL or NUMERIC. Returns ``decimal.Decimal`` objects by default, applying conversion as needed. .. note:: The `cdecimal `_ library is a high performing alternative to Python's built-in ``decimal.Decimal`` type, which performs very poorly in high volume situations. SQLAlchemy 0.7 is tested against ``cdecimal`` and supports it fully. The type is not necessarily supported by DBAPI implementations however, most of which contain an import for plain ``decimal`` in their source code, even though some such as psycopg2 provide hooks for alternate adapters. SQLAlchemy imports ``decimal`` globally as well. The most straightforward and foolproof way to use "cdecimal" given current DBAPI and Python support is to patch it directly into sys.modules before anything else is imported:: import sys import cdecimal sys.modules["decimal"] = cdecimal While the global patch is a little ugly, it's particularly important to use just one decimal library at a time since Python Decimal and cdecimal Decimal objects are not currently compatible *with each other*:: >>> import cdecimal >>> import decimal >>> decimal.Decimal("10") == cdecimal.Decimal("10") False SQLAlchemy will provide more natural support of cdecimal if and when it becomes a standard part of Python installations and is supported by all DBAPIs. """ __visit_name__ = 'numeric' def __init__(self, precision=None, scale=None, asdecimal=True): """ Construct a Numeric. :param precision: the numeric precision for use in DDL ``CREATE TABLE``. :param scale: the numeric scale for use in DDL ``CREATE TABLE``. :param asdecimal: default True. Return whether or not values should be sent as Python Decimal objects, or as floats. Different DBAPIs send one or the other based on datatypes - the Numeric type will ensure that return values are one or the other across DBAPIs consistently. When using the ``Numeric`` type, care should be taken to ensure that the asdecimal setting is apppropriate for the DBAPI in use - when Numeric applies a conversion from Decimal->float or float-> Decimal, this conversion incurs an additional performance overhead for all result columns received. DBAPIs that return Decimal natively (e.g. psycopg2) will have better accuracy and higher performance with a setting of ``True``, as the native translation to Decimal reduces the amount of floating- point issues at play, and the Numeric type itself doesn't need to apply any further conversions. However, another DBAPI which returns floats natively *will* incur an additional conversion overhead, and is still subject to floating point data loss - in which case ``asdecimal=False`` will at least remove the extra conversion overhead. """ self.precision = precision self.scale = scale self.asdecimal = asdecimal def get_dbapi_type(self, dbapi): return dbapi.NUMBER @property def python_type(self): if self.asdecimal: return decimal.Decimal else: return float def bind_processor(self, dialect): if dialect.supports_native_decimal: return None else: return processors.to_float def result_processor(self, dialect, coltype): if self.asdecimal: if dialect.supports_native_decimal: # we're a "numeric", DBAPI will give us Decimal directly return None else: util.warn('Dialect %s+%s does *not* support Decimal ' 'objects natively, and SQLAlchemy must ' 'convert from floating point - rounding ' 'errors and other issues may occur. Please ' 'consider storing Decimal numbers as strings ' 'or integers on this platform for lossless ' 'storage.' % (dialect.name, dialect.driver)) # we're a "numeric", DBAPI returns floats, convert. if self.scale is not None: return processors.to_decimal_processor_factory( decimal.Decimal, self.scale) else: return processors.to_decimal_processor_factory( decimal.Decimal) else: if dialect.supports_native_decimal: return processors.to_float else: return None @util.memoized_property def _expression_adaptations(self): return { operators.mul: { Interval: Interval, Numeric: self.__class__, Integer: self.__class__, }, # Py2K operators.div: { Numeric: self.__class__, Integer: self.__class__, }, # end Py2K operators.truediv: { Numeric: self.__class__, Integer: self.__class__, }, operators.add: { Numeric: self.__class__, Integer: self.__class__, }, operators.sub: { Numeric: self.__class__, Integer: self.__class__, } } class Float(Numeric): """A type for ``float`` numbers. Returns Python ``float`` objects by default, applying conversion as needed. """ __visit_name__ = 'float' scale = None def __init__(self, precision=None, asdecimal=False, **kwargs): """ Construct a Float. :param precision: the numeric precision for use in DDL ``CREATE TABLE``. :param asdecimal: the same flag as that of :class:`.Numeric`, but defaults to ``False``. Note that setting this flag to ``True`` results in floating point conversion. :param \**kwargs: deprecated. Additional arguments here are ignored by the default :class:`.Float` type. For database specific floats that support additional arguments, see that dialect's documentation for details, such as :class:`sqlalchemy.dialects.mysql.FLOAT`. """ self.precision = precision self.asdecimal = asdecimal if kwargs: util.warn_deprecated("Additional keyword arguments " "passed to Float ignored.") def result_processor(self, dialect, coltype): if self.asdecimal: return processors.to_decimal_processor_factory(decimal.Decimal) else: return None @util.memoized_property def _expression_adaptations(self): return { operators.mul: { Interval: Interval, Numeric: self.__class__, }, # Py2K operators.div: { Numeric: self.__class__, }, # end Py2K operators.truediv: { Numeric: self.__class__, }, operators.add: { Numeric: self.__class__, }, operators.sub: { Numeric: self.__class__, } } class DateTime(_DateAffinity, TypeEngine): """A type for ``datetime.datetime()`` objects. Date and time types return objects from the Python ``datetime`` module. Most DBAPIs have built in support for the datetime module, with the noted exception of SQLite. In the case of SQLite, date and time types are stored as strings which are then converted back to datetime objects when rows are returned. """ __visit_name__ = 'datetime' def __init__(self, timezone=False): """Construct a new :class:`.DateTime`. :param timezone: boolean. If True, and supported by the backend, will produce 'TIMESTAMP WITH TIMEZONE'. For backends that don't support timezone aware timestamps, has no effect. """ self.timezone = timezone def get_dbapi_type(self, dbapi): return dbapi.DATETIME @property def python_type(self): return dt.datetime @util.memoized_property def _expression_adaptations(self): return { operators.add: { Interval: self.__class__, }, operators.sub: { Interval: self.__class__, DateTime: Interval, }, } class Date(_DateAffinity, TypeEngine): """A type for ``datetime.date()`` objects.""" __visit_name__ = 'date' def get_dbapi_type(self, dbapi): return dbapi.DATETIME @property def python_type(self): return dt.date @util.memoized_property def _expression_adaptations(self): return { operators.add: { Integer: self.__class__, Interval: DateTime, Time: DateTime, }, operators.sub: { # date - integer = date Integer: self.__class__, # date - date = integer. Date: Integer, Interval: DateTime, # date - datetime = interval, # this one is not in the PG docs # but works DateTime: Interval, }, } class Time(_DateAffinity, TypeEngine): """A type for ``datetime.time()`` objects.""" __visit_name__ = 'time' def __init__(self, timezone=False): self.timezone = timezone def get_dbapi_type(self, dbapi): return dbapi.DATETIME @property def python_type(self): return dt.time @util.memoized_property def _expression_adaptations(self): return { operators.add: { Date: DateTime, Interval: self.__class__ }, operators.sub: { Time: Interval, Interval: self.__class__, }, } class _Binary(TypeEngine): """Define base behavior for binary types.""" def __init__(self, length=None): self.length = length @property def python_type(self): # Py3K #return bytes # Py2K return str # end Py2K # Python 3 - sqlite3 doesn't need the `Binary` conversion # here, though pg8000 does to indicate "bytea" def bind_processor(self, dialect): DBAPIBinary = dialect.dbapi.Binary def process(value): x = self if value is not None: return DBAPIBinary(value) else: return None return process # Python 3 has native bytes() type # both sqlite3 and pg8000 seem to return it, # psycopg2 as of 2.5 returns 'memoryview' # Py3K #def result_processor(self, dialect, coltype): # def process(value): # if value is not None: # value = bytes(value) # return value # return process # Py2K def result_processor(self, dialect, coltype): if util.jython: def process(value): if value is not None: if isinstance(value, array.array): return value.tostring() return str(value) else: return None else: process = processors.to_str return process # end Py2K def coerce_compared_value(self, op, value): """See :meth:`.TypeEngine.coerce_compared_value` for a description.""" if isinstance(value, basestring): return self else: return super(_Binary, self).coerce_compared_value(op, value) def get_dbapi_type(self, dbapi): return dbapi.BINARY class LargeBinary(_Binary): """A type for large binary byte data. The Binary type generates BLOB or BYTEA when tables are created, and also converts incoming values using the ``Binary`` callable provided by each DB-API. """ __visit_name__ = 'large_binary' def __init__(self, length=None): """ Construct a LargeBinary type. :param length: optional, a length for the column for use in DDL statements, for those BLOB types that accept a length (i.e. MySQL). It does *not* produce a small BINARY/VARBINARY type - use the BINARY/VARBINARY types specifically for those. May be safely omitted if no ``CREATE TABLE`` will be issued. Certain databases may require a *length* for use in DDL, and will raise an exception when the ``CREATE TABLE`` DDL is issued. """ _Binary.__init__(self, length=length) class Binary(LargeBinary): """Deprecated. Renamed to LargeBinary.""" def __init__(self, *arg, **kw): util.warn_deprecated('The Binary type has been renamed to ' 'LargeBinary.') LargeBinary.__init__(self, *arg, **kw) class SchemaType(events.SchemaEventTarget): """Mark a type as possibly requiring schema-level DDL for usage. Supports types that must be explicitly created/dropped (i.e. PG ENUM type) as well as types that are complimented by table or schema level constraints, triggers, and other rules. :class:`.SchemaType` classes can also be targets for the :meth:`.DDLEvents.before_parent_attach` and :meth:`.DDLEvents.after_parent_attach` events, where the events fire off surrounding the association of the type object with a parent :class:`.Column`. .. seealso:: :class:`.Enum` :class:`.Boolean` """ def __init__(self, **kw): self.name = kw.pop('name', None) self.quote = kw.pop('quote', None) self.schema = kw.pop('schema', None) self.metadata = kw.pop('metadata', None) self.inherit_schema = kw.pop('inherit_schema', False) if self.metadata: event.listen( self.metadata, "before_create", util.portable_instancemethod(self._on_metadata_create) ) event.listen( self.metadata, "after_drop", util.portable_instancemethod(self._on_metadata_drop) ) def _set_parent(self, column): column._on_table_attach(util.portable_instancemethod(self._set_table)) def _set_table(self, column, table): if self.inherit_schema: self.schema = table.schema event.listen( table, "before_create", util.portable_instancemethod( self._on_table_create) ) event.listen( table, "after_drop", util.portable_instancemethod(self._on_table_drop) ) if self.metadata is None: # TODO: what's the difference between self.metadata # and table.metadata here ? event.listen( table.metadata, "before_create", util.portable_instancemethod(self._on_metadata_create) ) event.listen( table.metadata, "after_drop", util.portable_instancemethod(self._on_metadata_drop) ) def copy(self, **kw): return self.adapt(self.__class__) def adapt(self, impltype, **kw): schema = kw.pop('schema', self.schema) metadata = kw.pop('metadata', self.metadata) return impltype(name=self.name, quote=self.quote, schema=schema, metadata=metadata, inherit_schema=self.inherit_schema, **kw ) @property def bind(self): return self.metadata and self.metadata.bind or None def create(self, bind=None, checkfirst=False): """Issue CREATE ddl for this type, if applicable.""" if bind is None: bind = schema._bind_or_error(self) t = self.dialect_impl(bind.dialect) if t.__class__ is not self.__class__ and isinstance(t, SchemaType): t.create(bind=bind, checkfirst=checkfirst) def drop(self, bind=None, checkfirst=False): """Issue DROP ddl for this type, if applicable.""" if bind is None: bind = schema._bind_or_error(self) t = self.dialect_impl(bind.dialect) if t.__class__ is not self.__class__ and isinstance(t, SchemaType): t.drop(bind=bind, checkfirst=checkfirst) def _on_table_create(self, target, bind, **kw): t = self.dialect_impl(bind.dialect) if t.__class__ is not self.__class__ and isinstance(t, SchemaType): t._on_table_create(target, bind, **kw) def _on_table_drop(self, target, bind, **kw): t = self.dialect_impl(bind.dialect) if t.__class__ is not self.__class__ and isinstance(t, SchemaType): t._on_table_drop(target, bind, **kw) def _on_metadata_create(self, target, bind, **kw): t = self.dialect_impl(bind.dialect) if t.__class__ is not self.__class__ and isinstance(t, SchemaType): t._on_metadata_create(target, bind, **kw) def _on_metadata_drop(self, target, bind, **kw): t = self.dialect_impl(bind.dialect) if t.__class__ is not self.__class__ and isinstance(t, SchemaType): t._on_metadata_drop(target, bind, **kw) class Enum(String, SchemaType): """Generic Enum Type. The Enum type provides a set of possible string values which the column is constrained towards. By default, uses the backend's native ENUM type if available, else uses VARCHAR + a CHECK constraint. .. seealso:: :class:`~.postgresql.ENUM` - PostgreSQL-specific type, which has additional functionality. """ __visit_name__ = 'enum' def __init__(self, *enums, **kw): """Construct an enum. Keyword arguments which don't apply to a specific backend are ignored by that backend. :param \*enums: string or unicode enumeration labels. If unicode labels are present, the `convert_unicode` flag is auto-enabled. :param convert_unicode: Enable unicode-aware bind parameter and result-set processing for this Enum's data. This is set automatically based on the presence of unicode label strings. :param metadata: Associate this type directly with a ``MetaData`` object. For types that exist on the target database as an independent schema construct (Postgresql), this type will be created and dropped within ``create_all()`` and ``drop_all()`` operations. If the type is not associated with any ``MetaData`` object, it will associate itself with each ``Table`` in which it is used, and will be created when any of those individual tables are created, after a check is performed for it's existence. The type is only dropped when ``drop_all()`` is called for that ``Table`` object's metadata, however. :param name: The name of this type. This is required for Postgresql and any future supported database which requires an explicitly named type, or an explicitly named constraint in order to generate the type and/or a table that uses it. :param native_enum: Use the database's native ENUM type when available. Defaults to True. When False, uses VARCHAR + check constraint for all backends. :param schema: Schema name of this type. For types that exist on the target database as an independent schema construct (Postgresql), this parameter specifies the named schema in which the type is present. .. note:: The ``schema`` of the :class:`.Enum` type does not by default make use of the ``schema`` established on the owning :class:`.Table`. If this behavior is desired, set the ``inherit_schema`` flag to ``True``. :param quote: Force quoting to be on or off on the type's name. If left as the default of `None`, the usual schema-level "case sensitive"/"reserved name" rules are used to determine if this type's name should be quoted. :param inherit_schema: When ``True``, the "schema" from the owning :class:`.Table` will be copied to the "schema" attribute of this :class:`.Enum`, replacing whatever value was passed for the ``schema`` attribute. This also takes effect when using the :meth:`.Table.tometadata` operation. .. versionadded:: 0.8 """ self.enums = enums self.native_enum = kw.pop('native_enum', True) convert_unicode = kw.pop('convert_unicode', None) if convert_unicode is None: for e in enums: if isinstance(e, unicode): convert_unicode = True break else: convert_unicode = False if self.enums: length = max(len(x) for x in self.enums) else: length = 0 String.__init__(self, length=length, convert_unicode=convert_unicode, ) SchemaType.__init__(self, **kw) def __repr__(self): return util.generic_repr(self, [ ("native_enum", True), ("name", None) ]) def _should_create_constraint(self, compiler): return not self.native_enum or \ not compiler.dialect.supports_native_enum def _set_table(self, column, table): if self.native_enum: SchemaType._set_table(self, column, table) e = schema.CheckConstraint( type_coerce(column, self).in_(self.enums), name=self.name, _create_rule=util.portable_instancemethod( self._should_create_constraint) ) table.append_constraint(e) def adapt(self, impltype, **kw): schema = kw.pop('schema', self.schema) metadata = kw.pop('metadata', self.metadata) if issubclass(impltype, Enum): return impltype(name=self.name, quote=self.quote, schema=schema, metadata=metadata, convert_unicode=self.convert_unicode, native_enum=self.native_enum, inherit_schema=self.inherit_schema, *self.enums, **kw ) else: return super(Enum, self).adapt(impltype, **kw) class PickleType(TypeDecorator): """Holds Python objects, which are serialized using pickle. PickleType builds upon the Binary type to apply Python's ``pickle.dumps()`` to incoming objects, and ``pickle.loads()`` on the way out, allowing any pickleable Python object to be stored as a serialized binary field. To allow ORM change events to propagate for elements associated with :class:`.PickleType`, see :ref:`mutable_toplevel`. """ impl = LargeBinary def __init__(self, protocol=pickle.HIGHEST_PROTOCOL, pickler=None, comparator=None): """ Construct a PickleType. :param protocol: defaults to ``pickle.HIGHEST_PROTOCOL``. :param pickler: defaults to cPickle.pickle or pickle.pickle if cPickle is not available. May be any object with pickle-compatible ``dumps` and ``loads`` methods. :param comparator: a 2-arg callable predicate used to compare values of this type. If left as ``None``, the Python "equals" operator is used to compare values. """ self.protocol = protocol self.pickler = pickler or pickle self.comparator = comparator super(PickleType, self).__init__() def __reduce__(self): return PickleType, (self.protocol, None, self.comparator) def bind_processor(self, dialect): impl_processor = self.impl.bind_processor(dialect) dumps = self.pickler.dumps protocol = self.protocol if impl_processor: def process(value): if value is not None: value = dumps(value, protocol) return impl_processor(value) else: def process(value): if value is not None: value = dumps(value, protocol) return value return process def result_processor(self, dialect, coltype): impl_processor = self.impl.result_processor(dialect, coltype) loads = self.pickler.loads if impl_processor: def process(value): value = impl_processor(value) if value is None: return None return loads(value) else: def process(value): if value is None: return None return loads(value) return process def compare_values(self, x, y): if self.comparator: return self.comparator(x, y) else: return x == y class Boolean(TypeEngine, SchemaType): """A bool datatype. Boolean typically uses BOOLEAN or SMALLINT on the DDL side, and on the Python side deals in ``True`` or ``False``. """ __visit_name__ = 'boolean' def __init__(self, create_constraint=True, name=None): """Construct a Boolean. :param create_constraint: defaults to True. If the boolean is generated as an int/smallint, also create a CHECK constraint on the table that ensures 1 or 0 as a value. :param name: if a CHECK constraint is generated, specify the name of the constraint. """ self.create_constraint = create_constraint self.name = name def _should_create_constraint(self, compiler): return not compiler.dialect.supports_native_boolean def _set_table(self, column, table): if not self.create_constraint: return e = schema.CheckConstraint( type_coerce(column, self).in_([0, 1]), name=self.name, _create_rule=util.portable_instancemethod( self._should_create_constraint) ) table.append_constraint(e) @property def python_type(self): return bool def bind_processor(self, dialect): if dialect.supports_native_boolean: return None else: return processors.boolean_to_int def result_processor(self, dialect, coltype): if dialect.supports_native_boolean: return None else: return processors.int_to_boolean class Interval(_DateAffinity, TypeDecorator): """A type for ``datetime.timedelta()`` objects. The Interval type deals with ``datetime.timedelta`` objects. In PostgreSQL, the native ``INTERVAL`` type is used; for others, the value is stored as a date which is relative to the "epoch" (Jan. 1, 1970). Note that the ``Interval`` type does not currently provide date arithmetic operations on platforms which do not support interval types natively. Such operations usually require transformation of both sides of the expression (such as, conversion of both sides into integer epoch values first) which currently is a manual procedure (such as via :attr:`~sqlalchemy.sql.expression.func`). """ impl = DateTime epoch = dt.datetime.utcfromtimestamp(0) def __init__(self, native=True, second_precision=None, day_precision=None): """Construct an Interval object. :param native: when True, use the actual INTERVAL type provided by the database, if supported (currently Postgresql, Oracle). Otherwise, represent the interval data as an epoch value regardless. :param second_precision: For native interval types which support a "fractional seconds precision" parameter, i.e. Oracle and Postgresql :param day_precision: for native interval types which support a "day precision" parameter, i.e. Oracle. """ super(Interval, self).__init__() self.native = native self.second_precision = second_precision self.day_precision = day_precision def adapt(self, cls, **kw): if self.native and hasattr(cls, '_adapt_from_generic_interval'): return cls._adapt_from_generic_interval(self, **kw) else: return self.__class__( native=self.native, second_precision=self.second_precision, day_precision=self.day_precision, **kw) @property def python_type(self): return dt.timedelta def bind_processor(self, dialect): impl_processor = self.impl.bind_processor(dialect) epoch = self.epoch if impl_processor: def process(value): if value is not None: value = epoch + value return impl_processor(value) else: def process(value): if value is not None: value = epoch + value return value return process def result_processor(self, dialect, coltype): impl_processor = self.impl.result_processor(dialect, coltype) epoch = self.epoch if impl_processor: def process(value): value = impl_processor(value) if value is None: return None return value - epoch else: def process(value): if value is None: return None return value - epoch return process @util.memoized_property def _expression_adaptations(self): return { operators.add: { Date: DateTime, Interval: self.__class__, DateTime: DateTime, Time: Time, }, operators.sub: { Interval: self.__class__ }, operators.mul: { Numeric: self.__class__ }, operators.truediv: { Numeric: self.__class__ }, # Py2K operators.div: { Numeric: self.__class__ } # end Py2K } @property def _type_affinity(self): return Interval def coerce_compared_value(self, op, value): """See :meth:`.TypeEngine.coerce_compared_value` for a description.""" return self.impl.coerce_compared_value(op, value) class REAL(Float): """The SQL REAL type.""" __visit_name__ = 'REAL' class FLOAT(Float): """The SQL FLOAT type.""" __visit_name__ = 'FLOAT' class NUMERIC(Numeric): """The SQL NUMERIC type.""" __visit_name__ = 'NUMERIC' class DECIMAL(Numeric): """The SQL DECIMAL type.""" __visit_name__ = 'DECIMAL' class INTEGER(Integer): """The SQL INT or INTEGER type.""" __visit_name__ = 'INTEGER' INT = INTEGER class SMALLINT(SmallInteger): """The SQL SMALLINT type.""" __visit_name__ = 'SMALLINT' class BIGINT(BigInteger): """The SQL BIGINT type.""" __visit_name__ = 'BIGINT' class TIMESTAMP(DateTime): """The SQL TIMESTAMP type.""" __visit_name__ = 'TIMESTAMP' def get_dbapi_type(self, dbapi): return dbapi.TIMESTAMP class DATETIME(DateTime): """The SQL DATETIME type.""" __visit_name__ = 'DATETIME' class DATE(Date): """The SQL DATE type.""" __visit_name__ = 'DATE' class TIME(Time): """The SQL TIME type.""" __visit_name__ = 'TIME' class TEXT(Text): """The SQL TEXT type.""" __visit_name__ = 'TEXT' class CLOB(Text): """The CLOB type. This type is found in Oracle and Informix. """ __visit_name__ = 'CLOB' class VARCHAR(String): """The SQL VARCHAR type.""" __visit_name__ = 'VARCHAR' class NVARCHAR(Unicode): """The SQL NVARCHAR type.""" __visit_name__ = 'NVARCHAR' class CHAR(String): """The SQL CHAR type.""" __visit_name__ = 'CHAR' class NCHAR(Unicode): """The SQL NCHAR type.""" __visit_name__ = 'NCHAR' class BLOB(LargeBinary): """The SQL BLOB type.""" __visit_name__ = 'BLOB' class BINARY(_Binary): """The SQL BINARY type.""" __visit_name__ = 'BINARY' class VARBINARY(_Binary): """The SQL VARBINARY type.""" __visit_name__ = 'VARBINARY' class BOOLEAN(Boolean): """The SQL BOOLEAN type.""" __visit_name__ = 'BOOLEAN' NULLTYPE = NullType() BOOLEANTYPE = Boolean() STRINGTYPE = String() _type_map = { str: String(), # Py3K #bytes: LargeBinary(), # Py2K unicode: Unicode(), # end Py2K int: Integer(), float: Numeric(), bool: BOOLEANTYPE, decimal.Decimal: Numeric(), dt.date: Date(), dt.datetime: DateTime(), dt.time: Time(), dt.timedelta: Interval(), NoneType: NULLTYPE } SQLAlchemy-0.8.4/lib/sqlalchemy/util/0000755000076500000240000000000012251151573020120 5ustar classicstaff00000000000000SQLAlchemy-0.8.4/lib/sqlalchemy/util/__init__.py0000644000076500000240000000404412251150015022221 0ustar classicstaff00000000000000# util/__init__.py # Copyright (C) 2005-2013 the SQLAlchemy authors and contributors # # This module is part of SQLAlchemy and is released under # the MIT License: http://www.opensource.org/licenses/mit-license.php from .compat import callable, cmp, reduce, \ threading, py3k, py33, py2k, py3k_warning, jython, pypy, cpython, win32, \ set_types, py26, \ pickle, dottedgetter, parse_qsl, namedtuple, next, WeakSet, reraise, \ raise_from_cause, u, b, ue, string_types, text_type, int_types from ._collections import KeyedTuple, ImmutableContainer, immutabledict, \ Properties, OrderedProperties, ImmutableProperties, OrderedDict, \ OrderedSet, IdentitySet, OrderedIdentitySet, column_set, \ column_dict, ordered_column_set, populate_column_dict, unique_list, \ UniqueAppender, PopulateDict, EMPTY_SET, to_list, to_set, \ to_column_set, update_copy, flatten_iterator, \ LRUCache, ScopedRegistry, ThreadLocalRegistry, WeakSequence from .langhelpers import iterate_attributes, class_hierarchy, \ portable_instancemethod, unbound_method_to_callable, \ getargspec_init, format_argspec_init, format_argspec_plus, \ get_func_kwargs, get_cls_kwargs, decorator, as_interface, \ memoized_property, memoized_instancemethod, md5_hex, \ group_expirable_memoized_property, importlater, decode_slice, \ monkeypatch_proxied_specials, asbool, bool_or_str, coerce_kw_type,\ duck_type_collection, assert_arg_type, symbol, dictlike_iteritems,\ classproperty, set_creation_order, warn_exception, warn, NoneType,\ constructor_copy, methods_equivalent, chop_traceback, asint,\ generic_repr, counter, PluginLoader, hybridmethod, safe_reraise,\ only_once from .deprecations import warn_deprecated, warn_pending_deprecation, \ deprecated, pending_deprecation # things that used to be not always available, # but are now as of current support Python versions from collections import defaultdict from functools import partial from functools import update_wrapper from contextlib import contextmanager SQLAlchemy-0.8.4/lib/sqlalchemy/util/_collections.py0000644000076500000240000006034512251150015023145 0ustar classicstaff00000000000000# util/_collections.py # Copyright (C) 2005-2013 the SQLAlchemy authors and contributors # # This module is part of SQLAlchemy and is released under # the MIT License: http://www.opensource.org/licenses/mit-license.php """Collection classes and helpers.""" import itertools import weakref import operator from .compat import threading EMPTY_SET = frozenset() class KeyedTuple(tuple): """``tuple`` subclass that adds labeled names. E.g.:: >>> k = KeyedTuple([1, 2, 3], labels=["one", "two", "three"]) >>> k.one 1 >>> k.two 2 Result rows returned by :class:`.Query` that contain multiple ORM entities and/or column expressions make use of this class to return rows. The :class:`.KeyedTuple` exhibits similar behavior to the ``collections.namedtuple()`` construct provided in the Python standard library, however is architected very differently. Unlike ``collections.namedtuple()``, :class:`.KeyedTuple` is does not rely on creation of custom subtypes in order to represent a new series of keys, instead each :class:`.KeyedTuple` instance receives its list of keys in place. The subtype approach of ``collections.namedtuple()`` introduces significant complexity and performance overhead, which is not necessary for the :class:`.Query` object's use case. .. versionchanged:: 0.8 Compatibility methods with ``collections.namedtuple()`` have been added including :attr:`.KeyedTuple._fields` and :meth:`.KeyedTuple._asdict`. .. seealso:: :ref:`ormtutorial_querying` """ def __new__(cls, vals, labels=None): t = tuple.__new__(cls, vals) t._labels = [] if labels: t.__dict__.update(zip(labels, vals)) t._labels = labels return t def keys(self): """Return a list of string key names for this :class:`.KeyedTuple`. .. seealso:: :attr:`.KeyedTuple._fields` """ return [l for l in self._labels if l is not None] @property def _fields(self): """Return a tuple of string key names for this :class:`.KeyedTuple`. This method provides compatibility with ``collections.namedtuple()``. .. versionadded:: 0.8 .. seealso:: :meth:`.KeyedTuple.keys` """ return tuple(self.keys()) def _asdict(self): """Return the contents of this :class:`.KeyedTuple` as a dictionary. This method provides compatibility with ``collections.namedtuple()``, with the exception that the dictionary returned is **not** ordered. .. versionadded:: 0.8 """ return dict((key, self.__dict__[key]) for key in self.keys()) class ImmutableContainer(object): def _immutable(self, *arg, **kw): raise TypeError("%s object is immutable" % self.__class__.__name__) __delitem__ = __setitem__ = __setattr__ = _immutable class immutabledict(ImmutableContainer, dict): clear = pop = popitem = setdefault = \ update = ImmutableContainer._immutable def __new__(cls, *args): new = dict.__new__(cls) dict.__init__(new, *args) return new def __init__(self, *args): pass def __reduce__(self): return immutabledict, (dict(self), ) def union(self, d): if not self: return immutabledict(d) else: d2 = immutabledict(self) dict.update(d2, d) return d2 def __repr__(self): return "immutabledict(%s)" % dict.__repr__(self) class Properties(object): """Provide a __getattr__/__setattr__ interface over a dict.""" def __init__(self, data): self.__dict__['_data'] = data def __len__(self): return len(self._data) def __iter__(self): return self._data.itervalues() def __add__(self, other): return list(self) + list(other) def __setitem__(self, key, object): self._data[key] = object def __getitem__(self, key): return self._data[key] def __delitem__(self, key): del self._data[key] def __setattr__(self, key, object): self._data[key] = object def __getstate__(self): return {'_data': self.__dict__['_data']} def __setstate__(self, state): self.__dict__['_data'] = state['_data'] def __getattr__(self, key): try: return self._data[key] except KeyError: raise AttributeError(key) def __contains__(self, key): return key in self._data def as_immutable(self): """Return an immutable proxy for this :class:`.Properties`.""" return ImmutableProperties(self._data) def update(self, value): self._data.update(value) def get(self, key, default=None): if key in self: return self[key] else: return default def keys(self): return self._data.keys() def values(self): return self._data.values() def items(self): return self._data.items() def has_key(self, key): return key in self._data def clear(self): self._data.clear() class OrderedProperties(Properties): """Provide a __getattr__/__setattr__ interface with an OrderedDict as backing store.""" def __init__(self): Properties.__init__(self, OrderedDict()) class ImmutableProperties(ImmutableContainer, Properties): """Provide immutable dict/object attribute to an underlying dictionary.""" class OrderedDict(dict): """A dict that returns keys/values/items in the order they were added.""" def __init__(self, ____sequence=None, **kwargs): self._list = [] if ____sequence is None: if kwargs: self.update(**kwargs) else: self.update(____sequence, **kwargs) def clear(self): self._list = [] dict.clear(self) def copy(self): return self.__copy__() def __copy__(self): return OrderedDict(self) def sort(self, *arg, **kw): self._list.sort(*arg, **kw) def update(self, ____sequence=None, **kwargs): if ____sequence is not None: if hasattr(____sequence, 'keys'): for key in ____sequence.keys(): self.__setitem__(key, ____sequence[key]) else: for key, value in ____sequence: self[key] = value if kwargs: self.update(kwargs) def setdefault(self, key, value): if key not in self: self.__setitem__(key, value) return value else: return self.__getitem__(key) def __iter__(self): return iter(self._list) def values(self): return [self[key] for key in self._list] def itervalues(self): return iter([self[key] for key in self._list]) def keys(self): return list(self._list) def iterkeys(self): return iter(self.keys()) def items(self): return [(key, self[key]) for key in self.keys()] def iteritems(self): return iter(self.items()) def __setitem__(self, key, object): if key not in self: try: self._list.append(key) except AttributeError: # work around Python pickle loads() with # dict subclass (seems to ignore __setstate__?) self._list = [key] dict.__setitem__(self, key, object) def __delitem__(self, key): dict.__delitem__(self, key) self._list.remove(key) def pop(self, key, *default): present = key in self value = dict.pop(self, key, *default) if present: self._list.remove(key) return value def popitem(self): item = dict.popitem(self) self._list.remove(item[0]) return item class OrderedSet(set): def __init__(self, d=None): set.__init__(self) self._list = [] if d is not None: self.update(d) def add(self, element): if element not in self: self._list.append(element) set.add(self, element) def remove(self, element): set.remove(self, element) self._list.remove(element) def insert(self, pos, element): if element not in self: self._list.insert(pos, element) set.add(self, element) def discard(self, element): if element in self: self._list.remove(element) set.remove(self, element) def clear(self): set.clear(self) self._list = [] def __getitem__(self, key): return self._list[key] def __iter__(self): return iter(self._list) def __add__(self, other): return self.union(other) def __repr__(self): return '%s(%r)' % (self.__class__.__name__, self._list) __str__ = __repr__ def update(self, iterable): for e in iterable: if e not in self: self._list.append(e) set.add(self, e) return self __ior__ = update def union(self, other): result = self.__class__(self) result.update(other) return result __or__ = union def intersection(self, other): other = set(other) return self.__class__(a for a in self if a in other) __and__ = intersection def symmetric_difference(self, other): other = set(other) result = self.__class__(a for a in self if a not in other) result.update(a for a in other if a not in self) return result __xor__ = symmetric_difference def difference(self, other): other = set(other) return self.__class__(a for a in self if a not in other) __sub__ = difference def intersection_update(self, other): other = set(other) set.intersection_update(self, other) self._list = [a for a in self._list if a in other] return self __iand__ = intersection_update def symmetric_difference_update(self, other): set.symmetric_difference_update(self, other) self._list = [a for a in self._list if a in self] self._list += [a for a in other._list if a in self] return self __ixor__ = symmetric_difference_update def difference_update(self, other): set.difference_update(self, other) self._list = [a for a in self._list if a in self] return self __isub__ = difference_update class IdentitySet(object): """A set that considers only object id() for uniqueness. This strategy has edge cases for builtin types- it's possible to have two 'foo' strings in one of these sets, for example. Use sparingly. """ _working_set = set def __init__(self, iterable=None): self._members = dict() if iterable: for o in iterable: self.add(o) def add(self, value): self._members[id(value)] = value def __contains__(self, value): return id(value) in self._members def remove(self, value): del self._members[id(value)] def discard(self, value): try: self.remove(value) except KeyError: pass def pop(self): try: pair = self._members.popitem() return pair[1] except KeyError: raise KeyError('pop from an empty set') def clear(self): self._members.clear() def __cmp__(self, other): raise TypeError('cannot compare sets using cmp()') def __eq__(self, other): if isinstance(other, IdentitySet): return self._members == other._members else: return False def __ne__(self, other): if isinstance(other, IdentitySet): return self._members != other._members else: return True def issubset(self, iterable): other = type(self)(iterable) if len(self) > len(other): return False for m in itertools.ifilterfalse(other._members.__contains__, self._members.iterkeys()): return False return True def __le__(self, other): if not isinstance(other, IdentitySet): return NotImplemented return self.issubset(other) def __lt__(self, other): if not isinstance(other, IdentitySet): return NotImplemented return len(self) < len(other) and self.issubset(other) def issuperset(self, iterable): other = type(self)(iterable) if len(self) < len(other): return False for m in itertools.ifilterfalse(self._members.__contains__, other._members.iterkeys()): return False return True def __ge__(self, other): if not isinstance(other, IdentitySet): return NotImplemented return self.issuperset(other) def __gt__(self, other): if not isinstance(other, IdentitySet): return NotImplemented return len(self) > len(other) and self.issuperset(other) def union(self, iterable): result = type(self)() # testlib.pragma exempt:__hash__ members = self._member_id_tuples() other = _iter_id(iterable) result._members.update(self._working_set(members).union(other)) return result def __or__(self, other): if not isinstance(other, IdentitySet): return NotImplemented return self.union(other) def update(self, iterable): self._members = self.union(iterable)._members def __ior__(self, other): if not isinstance(other, IdentitySet): return NotImplemented self.update(other) return self def difference(self, iterable): result = type(self)() # testlib.pragma exempt:__hash__ members = self._member_id_tuples() other = _iter_id(iterable) result._members.update(self._working_set(members).difference(other)) return result def __sub__(self, other): if not isinstance(other, IdentitySet): return NotImplemented return self.difference(other) def difference_update(self, iterable): self._members = self.difference(iterable)._members def __isub__(self, other): if not isinstance(other, IdentitySet): return NotImplemented self.difference_update(other) return self def intersection(self, iterable): result = type(self)() # testlib.pragma exempt:__hash__ members = self._member_id_tuples() other = _iter_id(iterable) result._members.update(self._working_set(members).intersection(other)) return result def __and__(self, other): if not isinstance(other, IdentitySet): return NotImplemented return self.intersection(other) def intersection_update(self, iterable): self._members = self.intersection(iterable)._members def __iand__(self, other): if not isinstance(other, IdentitySet): return NotImplemented self.intersection_update(other) return self def symmetric_difference(self, iterable): result = type(self)() # testlib.pragma exempt:__hash__ members = self._member_id_tuples() other = _iter_id(iterable) result._members.update( self._working_set(members).symmetric_difference(other)) return result def _member_id_tuples(self): return ((id(v), v) for v in self._members.itervalues()) def __xor__(self, other): if not isinstance(other, IdentitySet): return NotImplemented return self.symmetric_difference(other) def symmetric_difference_update(self, iterable): self._members = self.symmetric_difference(iterable)._members def __ixor__(self, other): if not isinstance(other, IdentitySet): return NotImplemented self.symmetric_difference(other) return self def copy(self): return type(self)(self._members.itervalues()) __copy__ = copy def __len__(self): return len(self._members) def __iter__(self): return self._members.itervalues() def __hash__(self): raise TypeError('set objects are unhashable') def __repr__(self): return '%s(%r)' % (type(self).__name__, self._members.values()) class WeakSequence(object): def __init__(self, __elements=()): self._storage = [ weakref.ref(element, self._remove) for element in __elements ] def append(self, item): self._storage.append(weakref.ref(item, self._remove)) def _remove(self, ref): self._storage.remove(ref) def __len__(self): return len(self._storage) def __iter__(self): return (obj for obj in (ref() for ref in self._storage) if obj is not None) def __getitem__(self, index): try: obj = self._storage[index] except KeyError: raise IndexError("Index %s out of range" % index) else: return obj() class OrderedIdentitySet(IdentitySet): class _working_set(OrderedSet): # a testing pragma: exempt the OIDS working set from the test suite's # "never call the user's __hash__" assertions. this is a big hammer, # but it's safe here: IDS operates on (id, instance) tuples in the # working set. __sa_hash_exempt__ = True def __init__(self, iterable=None): IdentitySet.__init__(self) self._members = OrderedDict() if iterable: for o in iterable: self.add(o) class PopulateDict(dict): """A dict which populates missing values via a creation function. Note the creation function takes a key, unlike collections.defaultdict. """ def __init__(self, creator): self.creator = creator def __missing__(self, key): self[key] = val = self.creator(key) return val # Define collections that are capable of storing # ColumnElement objects as hashable keys/elements. # At this point, these are mostly historical, things # used to be more complicated. column_set = set column_dict = dict ordered_column_set = OrderedSet populate_column_dict = PopulateDict def unique_list(seq, hashfunc=None): seen = {} if not hashfunc: return [x for x in seq if x not in seen and not seen.__setitem__(x, True)] else: return [x for x in seq if hashfunc(x) not in seen and not seen.__setitem__(hashfunc(x), True)] class UniqueAppender(object): """Appends items to a collection ensuring uniqueness. Additional appends() of the same object are ignored. Membership is determined by identity (``is a``) not equality (``==``). """ def __init__(self, data, via=None): self.data = data self._unique = {} if via: self._data_appender = getattr(data, via) elif hasattr(data, 'append'): self._data_appender = data.append elif hasattr(data, 'add'): self._data_appender = data.add def append(self, item): id_ = id(item) if id_ not in self._unique: self._data_appender(item) self._unique[id_] = True def __iter__(self): return iter(self.data) def to_list(x, default=None): if x is None: return default if not isinstance(x, (list, tuple)): return [x] else: return x def to_set(x): if x is None: return set() if not isinstance(x, set): return set(to_list(x)) else: return x def to_column_set(x): if x is None: return column_set() if not isinstance(x, column_set): return column_set(to_list(x)) else: return x def update_copy(d, _new=None, **kw): """Copy the given dict and update with the given values.""" d = d.copy() if _new: d.update(_new) d.update(**kw) return d def flatten_iterator(x): """Given an iterator of which further sub-elements may also be iterators, flatten the sub-elements into a single iterator. """ for elem in x: if not isinstance(elem, basestring) and hasattr(elem, '__iter__'): for y in flatten_iterator(elem): yield y else: yield elem class LRUCache(dict): """Dictionary with 'squishy' removal of least recently used items. """ def __init__(self, capacity=100, threshold=.5): self.capacity = capacity self.threshold = threshold self._counter = 0 def _inc_counter(self): self._counter += 1 return self._counter def __getitem__(self, key): item = dict.__getitem__(self, key) item[2] = self._inc_counter() return item[1] def values(self): return [i[1] for i in dict.values(self)] def setdefault(self, key, value): if key in self: return self[key] else: self[key] = value return value def __setitem__(self, key, value): item = dict.get(self, key) if item is None: item = [key, value, self._inc_counter()] dict.__setitem__(self, key, item) else: item[1] = value self._manage_size() def _manage_size(self): while len(self) > self.capacity + self.capacity * self.threshold: by_counter = sorted(dict.values(self), key=operator.itemgetter(2), reverse=True) for item in by_counter[self.capacity:]: try: del self[item[0]] except KeyError: # if we couldnt find a key, most # likely some other thread broke in # on us. loop around and try again break class ScopedRegistry(object): """A Registry that can store one or multiple instances of a single class on the basis of a "scope" function. The object implements ``__call__`` as the "getter", so by calling ``myregistry()`` the contained object is returned for the current scope. :param createfunc: a callable that returns a new object to be placed in the registry :param scopefunc: a callable that will return a key to store/retrieve an object. """ def __init__(self, createfunc, scopefunc): """Construct a new :class:`.ScopedRegistry`. :param createfunc: A creation function that will generate a new value for the current scope, if none is present. :param scopefunc: A function that returns a hashable token representing the current scope (such as, current thread identifier). """ self.createfunc = createfunc self.scopefunc = scopefunc self.registry = {} def __call__(self): key = self.scopefunc() try: return self.registry[key] except KeyError: return self.registry.setdefault(key, self.createfunc()) def has(self): """Return True if an object is present in the current scope.""" return self.scopefunc() in self.registry def set(self, obj): """Set the value forthe current scope.""" self.registry[self.scopefunc()] = obj def clear(self): """Clear the current scope, if any.""" try: del self.registry[self.scopefunc()] except KeyError: pass class ThreadLocalRegistry(ScopedRegistry): """A :class:`.ScopedRegistry` that uses a ``threading.local()`` variable for storage. """ def __init__(self, createfunc): self.createfunc = createfunc self.registry = threading.local() def __call__(self): try: return self.registry.value except AttributeError: val = self.registry.value = self.createfunc() return val def has(self): return hasattr(self.registry, "value") def set(self, obj): self.registry.value = obj def clear(self): try: del self.registry.value except AttributeError: pass def _iter_id(iterable): """Generator: ((id(o), o) for o in iterable).""" for item in iterable: yield id(item), item SQLAlchemy-0.8.4/lib/sqlalchemy/util/compat.py0000644000076500000240000001166012251150015021747 0ustar classicstaff00000000000000# util/compat.py # Copyright (C) 2005-2013 the SQLAlchemy authors and contributors # # This module is part of SQLAlchemy and is released under # the MIT License: http://www.opensource.org/licenses/mit-license.php """Handle Python version/platform incompatibilities.""" import sys try: import threading except ImportError: import dummy_threading as threading py33 = sys.version_info >= (3, 3) py32 = sys.version_info >= (3, 2) py26 = sys.version_info >= (2, 6) py3k_warning = getattr(sys, 'py3kwarning', False) or sys.version_info >= (3, 0) py3k = sys.version_info >= (3, 0) py2k = sys.version_info < (3, 0) jython = sys.platform.startswith('java') pypy = hasattr(sys, 'pypy_version_info') win32 = sys.platform.startswith('win') cpython = not pypy and not jython # TODO: something better for this ? if py3k_warning: set_types = set elif sys.version_info < (2, 6): import sets set_types = set, sets.Set else: # 2.6 deprecates sets.Set, but we still need to be able to detect them # in user code and as return values from DB-APIs ignore = ('ignore', None, DeprecationWarning, None, 0) import warnings try: warnings.filters.insert(0, ignore) except Exception: import sets else: import sets warnings.filters.remove(ignore) set_types = set, sets.Set if sys.version_info < (2, 6): def next(iter): return iter.next() else: next = next if py3k_warning: import pickle else: try: import cPickle as pickle except ImportError: import pickle if sys.version_info < (2, 6): # emits a nasty deprecation warning # in newer pythons from cgi import parse_qsl else: from urlparse import parse_qsl # Py3K #from inspect import getfullargspec as inspect_getfullargspec # Py2K from inspect import getargspec as inspect_getfullargspec # end Py2K if py3k_warning: # they're bringing it back in 3.2. brilliant ! def callable(fn): return hasattr(fn, '__call__') def cmp(a, b): return (a > b) - (a < b) from functools import reduce else: callable = callable cmp = cmp reduce = reduce try: from collections import namedtuple except ImportError: def namedtuple(typename, fieldnames): def __new__(cls, *values): tup = tuple.__new__(cls, values) for i, fname in enumerate(fieldnames): setattr(tup, fname, tup[i]) return tup tuptype = type(typename, (tuple, ), {'__new__': __new__}) return tuptype try: from weakref import WeakSet except: import weakref class WeakSet(object): """Implement the small subset of set() which SQLAlchemy needs here. """ def __init__(self, values=None): self._storage = weakref.WeakKeyDictionary() if values is not None: self._storage.update((value, None) for value in values) def __iter__(self): return iter(self._storage) def union(self, other): return WeakSet(set(self).union(other)) def add(self, other): self._storage[other] = True import time if win32 or jython: time_func = time.clock else: time_func = time.time if sys.version_info >= (2, 6): from operator import attrgetter as dottedgetter else: def dottedgetter(attr): def g(obj): for name in attr.split("."): obj = getattr(obj, name) return obj return g if py3k: string_types = str, binary_type = bytes text_type = str int_types = int, def u(s): return s def ue(s): return s def b(s): return s.encode("latin-1") else: string_types = basestring, binary_type = str text_type = unicode int_types = int, long def b(s): return s def u(s): # this differs from what six does, which doesn't support non-ASCII # strings - we only use u() with # literal source strings, and all our source files with non-ascii # in them (all are tests) are utf-8 encoded. return unicode(s, "utf-8") def ue(s): return unicode(s, "unicode_escape") def b(s): return s if py3k: def reraise(tp, value, tb=None, cause=None): if cause is not None: value.__cause__ = cause if value.__traceback__ is not tb: raise value.with_traceback(tb) raise value def raise_from_cause(exception, exc_info): exc_type, exc_value, exc_tb = exc_info reraise(type(exception), exception, tb=exc_tb, cause=exc_value) else: exec("def reraise(tp, value, tb=None, cause=None):\n" " raise tp, value, tb\n") def raise_from_cause(exception, exc_info): # not as nice as that of Py3K, but at least preserves # the code line where the issue occurred exc_type, exc_value, exc_tb = exc_info reraise(type(exception), exception, tb=exc_tb) SQLAlchemy-0.8.4/lib/sqlalchemy/util/deprecations.py0000644000076500000240000000747712251150015023157 0ustar classicstaff00000000000000# util/deprecations.py # Copyright (C) 2005-2013 the SQLAlchemy authors and contributors # # This module is part of SQLAlchemy and is released under # the MIT License: http://www.opensource.org/licenses/mit-license.php """Helpers related to deprecation of functions, methods, classes, other functionality.""" from .. import exc import warnings import re from langhelpers import decorator def warn_deprecated(msg, stacklevel=3): warnings.warn(msg, exc.SADeprecationWarning, stacklevel=stacklevel) def warn_pending_deprecation(msg, stacklevel=3): warnings.warn(msg, exc.SAPendingDeprecationWarning, stacklevel=stacklevel) def deprecated(version, message=None, add_deprecation_to_docstring=True): """Decorates a function and issues a deprecation warning on use. :param message: If provided, issue message in the warning. A sensible default is used if not provided. :param add_deprecation_to_docstring: Default True. If False, the wrapped function's __doc__ is left as-is. If True, the 'message' is prepended to the docs if provided, or sensible default if message is omitted. """ if add_deprecation_to_docstring: header = ".. deprecated:: %s %s" % \ (version, (message or '')) else: header = None if message is None: message = "Call to deprecated function %(func)s" def decorate(fn): return _decorate_with_warning( fn, exc.SADeprecationWarning, message % dict(func=fn.__name__), header) return decorate def pending_deprecation(version, message=None, add_deprecation_to_docstring=True): """Decorates a function and issues a pending deprecation warning on use. :param version: An approximate future version at which point the pending deprecation will become deprecated. Not used in messaging. :param message: If provided, issue message in the warning. A sensible default is used if not provided. :param add_deprecation_to_docstring: Default True. If False, the wrapped function's __doc__ is left as-is. If True, the 'message' is prepended to the docs if provided, or sensible default if message is omitted. """ if add_deprecation_to_docstring: header = ".. deprecated:: %s (pending) %s" % \ (version, (message or '')) else: header = None if message is None: message = "Call to deprecated function %(func)s" def decorate(fn): return _decorate_with_warning( fn, exc.SAPendingDeprecationWarning, message % dict(func=fn.__name__), header) return decorate def _sanitize_restructured_text(text): def repl(m): type_, name = m.group(1, 2) if type_ in ("func", "meth"): name += "()" return name return re.sub(r'\:(\w+)\:`~?\.?(.+?)`', repl, text) def _decorate_with_warning(func, wtype, message, docstring_header=None): """Wrap a function with a warnings.warn and augmented docstring.""" message = _sanitize_restructured_text(message) @decorator def warned(fn, *args, **kwargs): warnings.warn(wtype(message), stacklevel=3) return fn(*args, **kwargs) doc = func.__doc__ is not None and func.__doc__ or '' if docstring_header is not None: docstring_header %= dict(func=func.__name__) docs = doc and doc.expandtabs().split('\n') or [] indent = '' for line in docs[1:]: text = line.lstrip() if text: indent = line[0:len(line) - len(text)] break point = min(len(docs), 1) docs.insert(point, '\n' + indent + docstring_header.rstrip()) doc = '\n'.join(docs) decorated = warned(func) decorated.__doc__ = doc return decorated SQLAlchemy-0.8.4/lib/sqlalchemy/util/langhelpers.py0000644000076500000240000007644212251150015023001 0ustar classicstaff00000000000000# util/langhelpers.py # Copyright (C) 2005-2013 the SQLAlchemy authors and contributors # # This module is part of SQLAlchemy and is released under # the MIT License: http://www.opensource.org/licenses/mit-license.php """Routines to help with the creation, loading and introspection of modules, classes, hierarchies, attributes, functions, and methods. """ import itertools import inspect import operator import re import sys import types import warnings from .compat import set_types, threading, \ callable, inspect_getfullargspec from functools import update_wrapper from .. import exc import hashlib from . import compat def md5_hex(x): # Py3K #x = x.encode('utf-8') m = hashlib.md5() m.update(x) return m.hexdigest() class safe_reraise(object): """Reraise an exception after invoking some handler code. Stores the existing exception info before invoking so that it is maintained across a potential coroutine context switch. e.g.:: try: sess.commit() except: with safe_reraise(): sess.rollback() """ def __enter__(self): self._exc_info = sys.exc_info() def __exit__(self, type_, value, traceback): # see #2703 for notes if type_ is None: exc_type, exc_value, exc_tb = self._exc_info self._exc_info = None # remove potential circular references compat.reraise(exc_type, exc_value, exc_tb) else: self._exc_info = None # remove potential circular references compat.reraise(type_, value, traceback) def decode_slice(slc): """decode a slice object as sent to __getitem__. takes into account the 2.5 __index__() method, basically. """ ret = [] for x in slc.start, slc.stop, slc.step: if hasattr(x, '__index__'): x = x.__index__() ret.append(x) return tuple(ret) def _unique_symbols(used, *bases): used = set(used) for base in bases: pool = itertools.chain((base,), itertools.imap(lambda i: base + str(i), xrange(1000))) for sym in pool: if sym not in used: used.add(sym) yield sym break else: raise NameError("exhausted namespace for symbol base %s" % base) def decorator(target): """A signature-matching decorator factory.""" def decorate(fn): if not inspect.isfunction(fn): raise Exception("not a decoratable function") spec = inspect_getfullargspec(fn) names = tuple(spec[0]) + spec[1:3] + (fn.func_name,) targ_name, fn_name = _unique_symbols(names, 'target', 'fn') metadata = dict(target=targ_name, fn=fn_name) metadata.update(format_argspec_plus(spec, grouped=False)) code = 'lambda %(args)s: %(target)s(%(fn)s, %(apply_kw)s)' % ( metadata) decorated = eval(code, {targ_name: target, fn_name: fn}) decorated.func_defaults = getattr(fn, 'im_func', fn).func_defaults return update_wrapper(decorated, fn) return update_wrapper(decorate, target) class PluginLoader(object): def __init__(self, group, auto_fn=None): self.group = group self.impls = {} self.auto_fn = auto_fn def load(self, name): if name in self.impls: return self.impls[name]() if self.auto_fn: loader = self.auto_fn(name) if loader: self.impls[name] = loader return loader() try: import pkg_resources except ImportError: pass else: for impl in pkg_resources.iter_entry_points( self.group, name): self.impls[name] = impl.load return impl.load() from sqlalchemy import exc raise exc.ArgumentError( "Can't load plugin: %s:%s" % (self.group, name)) def register(self, name, modulepath, objname): def load(): mod = __import__(modulepath) for token in modulepath.split(".")[1:]: mod = getattr(mod, token) return getattr(mod, objname) self.impls[name] = load def get_cls_kwargs(cls, _set=None): """Return the full set of inherited kwargs for the given `cls`. Probes a class's __init__ method, collecting all named arguments. If the __init__ defines a \**kwargs catch-all, then the constructor is presumed to pass along unrecognized keywords to it's base classes, and the collection process is repeated recursively on each of the bases. Uses a subset of inspect.getargspec() to cut down on method overhead. No anonymous tuple arguments please ! """ toplevel = _set == None if toplevel: _set = set() ctr = cls.__dict__.get('__init__', False) has_init = ctr and isinstance(ctr, types.FunctionType) and \ isinstance(ctr.func_code, types.CodeType) if has_init: names, has_kw = inspect_func_args(ctr) _set.update(names) if not has_kw and not toplevel: return None if not has_init or has_kw: for c in cls.__bases__: if get_cls_kwargs(c, _set) is None: break _set.discard('self') return _set try: from inspect import CO_VARKEYWORDS def inspect_func_args(fn): co = fn.func_code nargs = co.co_argcount names = co.co_varnames args = list(names[:nargs]) has_kw = bool(co.co_flags & CO_VARKEYWORDS) return args, has_kw except ImportError: def inspect_func_args(fn): names, _, has_kw, _ = inspect.getargspec(fn) return names, bool(has_kw) def get_func_kwargs(func): """Return the set of legal kwargs for the given `func`. Uses getargspec so is safe to call for methods, functions, etc. """ return inspect.getargspec(func)[0] def format_argspec_plus(fn, grouped=True): """Returns a dictionary of formatted, introspected function arguments. A enhanced variant of inspect.formatargspec to support code generation. fn An inspectable callable or tuple of inspect getargspec() results. grouped Defaults to True; include (parens, around, argument) lists Returns: args Full inspect.formatargspec for fn self_arg The name of the first positional argument, varargs[0], or None if the function defines no positional arguments. apply_pos args, re-written in calling rather than receiving syntax. Arguments are passed positionally. apply_kw Like apply_pos, except keyword-ish args are passed as keywords. Example:: >>> format_argspec_plus(lambda self, a, b, c=3, **d: 123) {'args': '(self, a, b, c=3, **d)', 'self_arg': 'self', 'apply_kw': '(self, a, b, c=c, **d)', 'apply_pos': '(self, a, b, c, **d)'} """ if callable(fn): spec = inspect_getfullargspec(fn) else: # we accept an existing argspec... spec = fn args = inspect.formatargspec(*spec) if spec[0]: self_arg = spec[0][0] elif spec[1]: self_arg = '%s[0]' % spec[1] else: self_arg = None # Py3K #apply_pos = inspect.formatargspec(spec[0], spec[1], # spec[2], None, spec[4]) #num_defaults = 0 #if spec[3]: # num_defaults += len(spec[3]) #if spec[4]: # num_defaults += len(spec[4]) #name_args = spec[0] + spec[4] # Py2K apply_pos = inspect.formatargspec(spec[0], spec[1], spec[2]) num_defaults = 0 if spec[3]: num_defaults += len(spec[3]) name_args = spec[0] # end Py2K if num_defaults: defaulted_vals = name_args[0 - num_defaults:] else: defaulted_vals = () apply_kw = inspect.formatargspec(name_args, spec[1], spec[2], defaulted_vals, formatvalue=lambda x: '=' + x) if grouped: return dict(args=args, self_arg=self_arg, apply_pos=apply_pos, apply_kw=apply_kw) else: return dict(args=args[1:-1], self_arg=self_arg, apply_pos=apply_pos[1:-1], apply_kw=apply_kw[1:-1]) def format_argspec_init(method, grouped=True): """format_argspec_plus with considerations for typical __init__ methods Wraps format_argspec_plus with error handling strategies for typical __init__ cases:: object.__init__ -> (self) other unreflectable (usually C) -> (self, *args, **kwargs) """ try: return format_argspec_plus(method, grouped=grouped) except TypeError: if method is object.__init__: args = grouped and '(self)' or 'self' else: args = (grouped and '(self, *args, **kwargs)' or 'self, *args, **kwargs') return dict(self_arg='self', args=args, apply_pos=args, apply_kw=args) def getargspec_init(method): """inspect.getargspec with considerations for typical __init__ methods Wraps inspect.getargspec with error handling for typical __init__ cases:: object.__init__ -> (self) other unreflectable (usually C) -> (self, *args, **kwargs) """ try: return inspect.getargspec(method) except TypeError: if method is object.__init__: return (['self'], None, None, None) else: return (['self'], 'args', 'kwargs', None) def unbound_method_to_callable(func_or_cls): """Adjust the incoming callable such that a 'self' argument is not required. """ if isinstance(func_or_cls, types.MethodType) and not func_or_cls.im_self: return func_or_cls.im_func else: return func_or_cls def generic_repr(obj, additional_kw=(), to_inspect=None): """Produce a __repr__() based on direct association of the __init__() specification vs. same-named attributes present. """ if to_inspect is None: to_inspect = obj missing = object() def genargs(): try: (args, vargs, vkw, defaults) = \ inspect.getargspec(to_inspect.__init__) except TypeError: return default_len = defaults and len(defaults) or 0 if not default_len: for arg in args[1:]: yield repr(getattr(obj, arg, None)) if vargs is not None and hasattr(obj, vargs): yield ', '.join(repr(val) for val in getattr(obj, vargs)) else: for arg in args[1:-default_len]: yield repr(getattr(obj, arg, None)) for (arg, defval) in zip(args[-default_len:], defaults): try: val = getattr(obj, arg, missing) if val is not missing and val != defval: yield '%s=%r' % (arg, val) except: pass if additional_kw: for arg, defval in additional_kw: try: val = getattr(obj, arg, missing) if val is not missing and val != defval: yield '%s=%r' % (arg, val) except: pass return "%s(%s)" % (obj.__class__.__name__, ", ".join(genargs())) class portable_instancemethod(object): """Turn an instancemethod into a (parent, name) pair to produce a serializable callable. """ def __init__(self, meth): self.target = meth.im_self self.name = meth.__name__ def __call__(self, *arg, **kw): return getattr(self.target, self.name)(*arg, **kw) def class_hierarchy(cls): """Return an unordered sequence of all classes related to cls. Traverses diamond hierarchies. Fibs slightly: subclasses of builtin types are not returned. Thus class_hierarchy(class A(object)) returns (A, object), not A plus every class systemwide that derives from object. Old-style classes are discarded and hierarchies rooted on them will not be descended. """ # Py2K if isinstance(cls, types.ClassType): return list() # end Py2K hier = set([cls]) process = list(cls.__mro__) while process: c = process.pop() # Py2K if isinstance(c, types.ClassType): continue for b in (_ for _ in c.__bases__ if _ not in hier and not isinstance(_, types.ClassType)): # end Py2K # Py3K #for b in (_ for _ in c.__bases__ # if _ not in hier): process.append(b) hier.add(b) # Py3K #if c.__module__ == 'builtins' or not hasattr(c, '__subclasses__'): # continue # Py2K if c.__module__ == '__builtin__' or not hasattr(c, '__subclasses__'): continue # end Py2K for s in [_ for _ in c.__subclasses__() if _ not in hier]: process.append(s) hier.add(s) return list(hier) def iterate_attributes(cls): """iterate all the keys and attributes associated with a class, without using getattr(). Does not use getattr() so that class-sensitive descriptors (i.e. property.__get__()) are not called. """ keys = dir(cls) for key in keys: for c in cls.__mro__: if key in c.__dict__: yield (key, c.__dict__[key]) break def monkeypatch_proxied_specials(into_cls, from_cls, skip=None, only=None, name='self.proxy', from_instance=None): """Automates delegation of __specials__ for a proxying type.""" if only: dunders = only else: if skip is None: skip = ('__slots__', '__del__', '__getattribute__', '__metaclass__', '__getstate__', '__setstate__') dunders = [m for m in dir(from_cls) if (m.startswith('__') and m.endswith('__') and not hasattr(into_cls, m) and m not in skip)] for method in dunders: try: fn = getattr(from_cls, method) if not hasattr(fn, '__call__'): continue fn = getattr(fn, 'im_func', fn) except AttributeError: continue try: spec = inspect.getargspec(fn) fn_args = inspect.formatargspec(spec[0]) d_args = inspect.formatargspec(spec[0][1:]) except TypeError: fn_args = '(self, *args, **kw)' d_args = '(*args, **kw)' py = ("def %(method)s%(fn_args)s: " "return %(name)s.%(method)s%(d_args)s" % locals()) env = from_instance is not None and {name: from_instance} or {} exec py in env try: env[method].func_defaults = fn.func_defaults except AttributeError: pass setattr(into_cls, method, env[method]) def methods_equivalent(meth1, meth2): """Return True if the two methods are the same implementation.""" # Py3K #return getattr(meth1, '__func__', meth1) is getattr(meth2, '__func__', meth2) # Py2K return getattr(meth1, 'im_func', meth1) is getattr(meth2, 'im_func', meth2) # end Py2K def as_interface(obj, cls=None, methods=None, required=None): """Ensure basic interface compliance for an instance or dict of callables. Checks that ``obj`` implements public methods of ``cls`` or has members listed in ``methods``. If ``required`` is not supplied, implementing at least one interface method is sufficient. Methods present on ``obj`` that are not in the interface are ignored. If ``obj`` is a dict and ``dict`` does not meet the interface requirements, the keys of the dictionary are inspected. Keys present in ``obj`` that are not in the interface will raise TypeErrors. Raises TypeError if ``obj`` does not meet the interface criteria. In all passing cases, an object with callable members is returned. In the simple case, ``obj`` is returned as-is; if dict processing kicks in then an anonymous class is returned. obj A type, instance, or dictionary of callables. cls Optional, a type. All public methods of cls are considered the interface. An ``obj`` instance of cls will always pass, ignoring ``required``.. methods Optional, a sequence of method names to consider as the interface. required Optional, a sequence of mandatory implementations. If omitted, an ``obj`` that provides at least one interface method is considered sufficient. As a convenience, required may be a type, in which case all public methods of the type are required. """ if not cls and not methods: raise TypeError('a class or collection of method names are required') if isinstance(cls, type) and isinstance(obj, cls): return obj interface = set(methods or [m for m in dir(cls) if not m.startswith('_')]) implemented = set(dir(obj)) complies = operator.ge if isinstance(required, type): required = interface elif not required: required = set() complies = operator.gt else: required = set(required) if complies(implemented.intersection(interface), required): return obj # No dict duck typing here. if not type(obj) is dict: qualifier = complies is operator.gt and 'any of' or 'all of' raise TypeError("%r does not implement %s: %s" % ( obj, qualifier, ', '.join(interface))) class AnonymousInterface(object): """A callable-holding shell.""" if cls: AnonymousInterface.__name__ = 'Anonymous' + cls.__name__ found = set() for method, impl in dictlike_iteritems(obj): if method not in interface: raise TypeError("%r: unknown in this interface" % method) if not callable(impl): raise TypeError("%r=%r is not callable" % (method, impl)) setattr(AnonymousInterface, method, staticmethod(impl)) found.add(method) if complies(found, required): return AnonymousInterface raise TypeError("dictionary does not contain required keys %s" % ', '.join(required - found)) class memoized_property(object): """A read-only @property that is only evaluated once.""" def __init__(self, fget, doc=None): self.fget = fget self.__doc__ = doc or fget.__doc__ self.__name__ = fget.__name__ def __get__(self, obj, cls): if obj is None: return self obj.__dict__[self.__name__] = result = self.fget(obj) return result def _reset(self, obj): obj.__dict__.pop(self.__name__, None) class memoized_instancemethod(object): """Decorate a method memoize its return value. Best applied to no-arg methods: memoization is not sensitive to argument values, and will always return the same value even when called with different arguments. """ def __init__(self, fget, doc=None): self.fget = fget self.__doc__ = doc or fget.__doc__ self.__name__ = fget.__name__ def __get__(self, obj, cls): if obj is None: return self def oneshot(*args, **kw): result = self.fget(obj, *args, **kw) memo = lambda *a, **kw: result memo.__name__ = self.__name__ memo.__doc__ = self.__doc__ obj.__dict__[self.__name__] = memo return result oneshot.__name__ = self.__name__ oneshot.__doc__ = self.__doc__ return oneshot class group_expirable_memoized_property(object): """A family of @memoized_properties that can be expired in tandem.""" def __init__(self, attributes=()): self.attributes = [] if attributes: self.attributes.extend(attributes) def expire_instance(self, instance): """Expire all memoized properties for *instance*.""" stash = instance.__dict__ for attribute in self.attributes: stash.pop(attribute, None) def __call__(self, fn): self.attributes.append(fn.__name__) return memoized_property(fn) def method(self, fn): self.attributes.append(fn.__name__) return memoized_instancemethod(fn) class importlater(object): """Deferred import object. e.g.:: somesubmod = importlater("mypackage.somemodule", "somesubmod") is equivalent to:: from mypackage.somemodule import somesubmod except evaluated upon attribute access to "somesubmod". importlater() currently requires that resolve_all() be called, typically at the bottom of a package's __init__.py. This is so that __import__ still called only at module import time, and not potentially within a non-main thread later on. """ _unresolved = set() def __init__(self, path, addtl=None): self._il_path = path self._il_addtl = addtl importlater._unresolved.add(self) @classmethod def resolve_all(cls): for m in list(importlater._unresolved): m._resolve() @property def _full_path(self): if self._il_addtl: return self._il_path + "." + self._il_addtl else: return self._il_path @memoized_property def module(self): if self in importlater._unresolved: raise ImportError( "importlater.resolve_all() hasn't " "been called (this is %s %s)" % (self._il_path, self._il_addtl)) m = self._initial_import if self._il_addtl: m = getattr(m, self._il_addtl) else: for token in self._il_path.split(".")[1:]: m = getattr(m, token) return m def _resolve(self): importlater._unresolved.discard(self) if self._il_addtl: self._initial_import = __import__( self._il_path, globals(), locals(), [self._il_addtl]) else: self._initial_import = __import__(self._il_path) def __getattr__(self, key): if key == 'module': raise ImportError("Could not resolve module %s" % self._full_path) try: attr = getattr(self.module, key) except AttributeError: raise AttributeError( "Module %s has no attribute '%s'" % (self._full_path, key) ) self.__dict__[key] = attr return attr # from paste.deploy.converters def asbool(obj): if isinstance(obj, (str, unicode)): obj = obj.strip().lower() if obj in ['true', 'yes', 'on', 'y', 't', '1']: return True elif obj in ['false', 'no', 'off', 'n', 'f', '0']: return False else: raise ValueError("String is not true/false: %r" % obj) return bool(obj) def bool_or_str(*text): """Return a callable that will evaulate a string as boolean, or one of a set of "alternate" string values. """ def bool_or_value(obj): if obj in text: return obj else: return asbool(obj) return bool_or_value def asint(value): """Coerce to integer.""" if value is None: return value return int(value) def coerce_kw_type(kw, key, type_, flexi_bool=True): """If 'key' is present in dict 'kw', coerce its value to type 'type\_' if necessary. If 'flexi_bool' is True, the string '0' is considered false when coercing to boolean. """ if key in kw and type(kw[key]) is not type_ and kw[key] is not None: if type_ is bool and flexi_bool: kw[key] = asbool(kw[key]) else: kw[key] = type_(kw[key]) def constructor_copy(obj, cls, **kw): """Instantiate cls using the __dict__ of obj as constructor arguments. Uses inspect to match the named arguments of ``cls``. """ names = get_cls_kwargs(cls) kw.update((k, obj.__dict__[k]) for k in names if k in obj.__dict__) return cls(**kw) def counter(): """Return a threadsafe counter function.""" lock = threading.Lock() counter = itertools.count(1L) # avoid the 2to3 "next" transformation... def _next(): lock.acquire() try: return counter.next() finally: lock.release() return _next def duck_type_collection(specimen, default=None): """Given an instance or class, guess if it is or is acting as one of the basic collection types: list, set and dict. If the __emulates__ property is present, return that preferentially. """ if hasattr(specimen, '__emulates__'): # canonicalize set vs sets.Set to a standard: the builtin set if (specimen.__emulates__ is not None and issubclass(specimen.__emulates__, set_types)): return set else: return specimen.__emulates__ isa = isinstance(specimen, type) and issubclass or isinstance if isa(specimen, list): return list elif isa(specimen, set_types): return set elif isa(specimen, dict): return dict if hasattr(specimen, 'append'): return list elif hasattr(specimen, 'add'): return set elif hasattr(specimen, 'set'): return dict else: return default def assert_arg_type(arg, argtype, name): if isinstance(arg, argtype): return arg else: if isinstance(argtype, tuple): raise exc.ArgumentError( "Argument '%s' is expected to be one of type %s, got '%s'" % (name, ' or '.join("'%s'" % a for a in argtype), type(arg))) else: raise exc.ArgumentError( "Argument '%s' is expected to be of type '%s', got '%s'" % (name, argtype, type(arg))) def dictlike_iteritems(dictlike): """Return a (key, value) iterator for almost any dict-like object.""" # Py3K #if hasattr(dictlike, 'items'): # return dictlike.items() # Py2K if hasattr(dictlike, 'iteritems'): return dictlike.iteritems() elif hasattr(dictlike, 'items'): return iter(dictlike.items()) # end Py2K getter = getattr(dictlike, '__getitem__', getattr(dictlike, 'get', None)) if getter is None: raise TypeError( "Object '%r' is not dict-like" % dictlike) if hasattr(dictlike, 'iterkeys'): def iterator(): for key in dictlike.iterkeys(): yield key, getter(key) return iterator() elif hasattr(dictlike, 'keys'): return iter((key, getter(key)) for key in dictlike.keys()) else: raise TypeError( "Object '%r' is not dict-like" % dictlike) class classproperty(property): """A decorator that behaves like @property except that operates on classes rather than instances. The decorator is currently special when using the declarative module, but note that the :class:`~.sqlalchemy.ext.declarative.declared_attr` decorator should be used for this purpose with declarative. """ def __init__(self, fget, *arg, **kw): super(classproperty, self).__init__(fget, *arg, **kw) self.__doc__ = fget.__doc__ def __get__(desc, self, cls): return desc.fget(cls) class hybridmethod(object): """Decorate a function as cls- or instance- level.""" def __init__(self, func, expr=None): self.func = func def __get__(self, instance, owner): if instance is None: return self.func.__get__(owner, owner.__class__) else: return self.func.__get__(instance, owner) class _symbol(int): def __new__(self, name, doc=None, canonical=None): """Construct a new named symbol.""" assert isinstance(name, str) if canonical is None: canonical = hash(name) v = int.__new__(_symbol, canonical) v.name = name if doc: v.__doc__ = doc return v def __reduce__(self): return symbol, (self.name, "x", int(self)) def __str__(self): return repr(self) def __repr__(self): return "" % self.name _symbol.__name__ = 'symbol' class symbol(object): """A constant symbol. >>> symbol('foo') is symbol('foo') True >>> symbol('foo') A slight refinement of the MAGICCOOKIE=object() pattern. The primary advantage of symbol() is its repr(). They are also singletons. Repeated calls of symbol('name') will all return the same instance. The optional ``doc`` argument assigns to ``__doc__``. This is strictly so that Sphinx autoattr picks up the docstring we want (it doesn't appear to pick up the in-module docstring if the datamember is in a different module - autoattribute also blows up completely). If Sphinx fixes/improves this then we would no longer need ``doc`` here. """ symbols = {} _lock = threading.Lock() def __new__(cls, name, doc=None, canonical=None): cls._lock.acquire() try: sym = cls.symbols.get(name) if sym is None: cls.symbols[name] = sym = _symbol(name, doc, canonical) return sym finally: symbol._lock.release() _creation_order = 1 def set_creation_order(instance): """Assign a '_creation_order' sequence to the given instance. This allows multiple instances to be sorted in order of creation (typically within a single thread; the counter is not particularly threadsafe). """ global _creation_order instance._creation_order = _creation_order _creation_order += 1 def warn_exception(func, *args, **kwargs): """executes the given function, catches all exceptions and converts to a warning. """ try: return func(*args, **kwargs) except: warn("%s('%s') ignored" % sys.exc_info()[0:2]) def warn(msg, stacklevel=3): """Issue a warning. If msg is a string, :class:`.exc.SAWarning` is used as the category. .. note:: This function is swapped out when the test suite runs, with a compatible version that uses warnings.warn_explicit, so that the warnings registry can be controlled. """ if isinstance(msg, basestring): warnings.warn(msg, exc.SAWarning, stacklevel=stacklevel) else: warnings.warn(msg, stacklevel=stacklevel) def only_once(fn): """Decorate the given function to be a no-op after it is called exactly once.""" once = [fn] def go(*arg, **kw): if once: once_fn = once.pop() return once_fn(*arg, **kw) return update_wrapper(go, fn) _SQLA_RE = re.compile(r'sqlalchemy/([a-z_]+/){0,2}[a-z_]+\.py') _UNITTEST_RE = re.compile(r'unit(?:2|test2?/)') def chop_traceback(tb, exclude_prefix=_UNITTEST_RE, exclude_suffix=_SQLA_RE): """Chop extraneous lines off beginning and end of a traceback. :param tb: a list of traceback lines as returned by ``traceback.format_stack()`` :param exclude_prefix: a regular expression object matching lines to skip at beginning of ``tb`` :param exclude_suffix: a regular expression object matching lines to skip at end of ``tb`` """ start = 0 end = len(tb) - 1 while start <= end and exclude_prefix.search(tb[start]): start += 1 while start <= end and exclude_suffix.search(tb[end]): end -= 1 return tb[start:end + 1] NoneType = type(None) SQLAlchemy-0.8.4/lib/sqlalchemy/util/queue.py0000644000076500000240000002000512251150015021601 0ustar classicstaff00000000000000# util/queue.py # Copyright (C) 2005-2013 the SQLAlchemy authors and contributors # # This module is part of SQLAlchemy and is released under # the MIT License: http://www.opensource.org/licenses/mit-license.php """An adaptation of Py2.3/2.4's Queue module which supports reentrant behavior, using RLock instead of Lock for its mutex object. The Queue object is used exclusively by the sqlalchemy.pool.QueuePool class. This is to support the connection pool's usage of weakref callbacks to return connections to the underlying Queue, which can in extremely rare cases be invoked within the ``get()`` method of the Queue itself, producing a ``put()`` inside the ``get()`` and therefore a reentrant condition. An additional change includes a special "abort" method which can be used to immediately raise a special exception for threads that are blocking on get(). This is to accommodate a rare race condition that can occur within QueuePool. """ from collections import deque from time import time as _time from .compat import threading import sys if sys.version_info < (2, 6): def notify_all(condition): condition.notify() else: def notify_all(condition): condition.notify_all() __all__ = ['Empty', 'Full', 'Queue', 'SAAbort'] class Empty(Exception): "Exception raised by Queue.get(block=0)/get_nowait()." pass class Full(Exception): "Exception raised by Queue.put(block=0)/put_nowait()." pass class SAAbort(Exception): "Special SQLA exception to abort waiting" def __init__(self, context): self.context = context class Queue: def __init__(self, maxsize=0): """Initialize a queue object with a given maximum size. If `maxsize` is <= 0, the queue size is infinite. """ self._init(maxsize) # mutex must be held whenever the queue is mutating. All methods # that acquire mutex must release it before returning. mutex # is shared between the two conditions, so acquiring and # releasing the conditions also acquires and releases mutex. self.mutex = threading.RLock() # Notify not_empty whenever an item is added to the queue; a # thread waiting to get is notified then. self.not_empty = threading.Condition(self.mutex) # Notify not_full whenever an item is removed from the queue; # a thread waiting to put is notified then. self.not_full = threading.Condition(self.mutex) # when this is set, SAAbort is raised within get(). self._sqla_abort_context = False def qsize(self): """Return the approximate size of the queue (not reliable!).""" self.mutex.acquire() n = self._qsize() self.mutex.release() return n def empty(self): """Return True if the queue is empty, False otherwise (not reliable!).""" self.mutex.acquire() n = self._empty() self.mutex.release() return n def full(self): """Return True if the queue is full, False otherwise (not reliable!).""" self.mutex.acquire() n = self._full() self.mutex.release() return n def put(self, item, block=True, timeout=None): """Put an item into the queue. If optional args `block` is True and `timeout` is None (the default), block if necessary until a free slot is available. If `timeout` is a positive number, it blocks at most `timeout` seconds and raises the ``Full`` exception if no free slot was available within that time. Otherwise (`block` is false), put an item on the queue if a free slot is immediately available, else raise the ``Full`` exception (`timeout` is ignored in that case). """ self.not_full.acquire() try: if not block: if self._full(): raise Full elif timeout is None: while self._full(): self.not_full.wait() else: if timeout < 0: raise ValueError("'timeout' must be a positive number") endtime = _time() + timeout while self._full(): remaining = endtime - _time() if remaining <= 0.0: raise Full self.not_full.wait(remaining) self._put(item) self.not_empty.notify() finally: self.not_full.release() def put_nowait(self, item): """Put an item into the queue without blocking. Only enqueue the item if a free slot is immediately available. Otherwise raise the ``Full`` exception. """ return self.put(item, False) def get(self, block=True, timeout=None): """Remove and return an item from the queue. If optional args `block` is True and `timeout` is None (the default), block if necessary until an item is available. If `timeout` is a positive number, it blocks at most `timeout` seconds and raises the ``Empty`` exception if no item was available within that time. Otherwise (`block` is false), return an item if one is immediately available, else raise the ``Empty`` exception (`timeout` is ignored in that case). """ self.not_empty.acquire() try: if not block: if self._empty(): raise Empty elif timeout is None: while self._empty(): # wait for only half a second, then # loop around, so that we can see a change in # _sqla_abort_context in case we missed the notify_all() # called by abort() self.not_empty.wait(.5) if self._sqla_abort_context: raise SAAbort(self._sqla_abort_context) else: if timeout < 0: raise ValueError("'timeout' must be a positive number") endtime = _time() + timeout while self._empty(): remaining = endtime - _time() if remaining <= 0.0: raise Empty self.not_empty.wait(remaining) if self._sqla_abort_context: raise SAAbort(self._sqla_abort_context) item = self._get() self.not_full.notify() return item finally: self.not_empty.release() def abort(self, context): """Issue an 'abort', will force any thread waiting on get() to stop waiting and raise SAAbort. """ self._sqla_abort_context = context if not self.not_full.acquire(False): return try: # note that this is now optional # as the waiters in get() both loop around # to check the _sqla_abort_context flag periodically notify_all(self.not_empty) finally: self.not_full.release() def get_nowait(self): """Remove and return an item from the queue without blocking. Only get an item if one is immediately available. Otherwise raise the ``Empty`` exception. """ return self.get(False) # Override these methods to implement other queue organizations # (e.g. stack or priority queue). # These will only be called with appropriate locks held # Initialize the queue representation def _init(self, maxsize): self.maxsize = maxsize self.queue = deque() def _qsize(self): return len(self.queue) # Check whether the queue is empty def _empty(self): return not self.queue # Check whether the queue is full def _full(self): return self.maxsize > 0 and len(self.queue) == self.maxsize # Put a new item in the queue def _put(self, item): self.queue.append(item) # Get an item from the queue def _get(self): return self.queue.popleft() SQLAlchemy-0.8.4/lib/sqlalchemy/util/topological.py0000644000076500000240000000514012251147171023005 0ustar classicstaff00000000000000# util/topological.py # Copyright (C) 2005-2013 the SQLAlchemy authors and contributors # # This module is part of SQLAlchemy and is released under # the MIT License: http://www.opensource.org/licenses/mit-license.php """Topological sorting algorithms.""" from ..exc import CircularDependencyError from .. import util __all__ = ['sort', 'sort_as_subsets', 'find_cycles'] def sort_as_subsets(tuples, allitems): edges = util.defaultdict(set) for parent, child in tuples: edges[child].add(parent) todo = set(allitems) while todo: output = set() for node in list(todo): if not todo.intersection(edges[node]): output.add(node) if not output: raise CircularDependencyError( "Circular dependency detected.", find_cycles(tuples, allitems), _gen_edges(edges) ) todo.difference_update(output) yield output def sort(tuples, allitems): """sort the given list of items by dependency. 'tuples' is a list of tuples representing a partial ordering. """ for set_ in sort_as_subsets(tuples, allitems): for s in set_: yield s def find_cycles(tuples, allitems): # adapted from: # http://neopythonic.blogspot.com/2009/01/detecting-cycles-in-directed-graph.html edges = util.defaultdict(set) for parent, child in tuples: edges[parent].add(child) nodes_to_test = set(edges) output = set() # we'd like to find all nodes that are # involved in cycles, so we do the full # pass through the whole thing for each # node in the original list. # we can go just through parent edge nodes. # if a node is only a child and never a parent, # by definition it can't be part of a cycle. same # if it's not in the edges at all. for node in nodes_to_test: stack = [node] todo = nodes_to_test.difference(stack) while stack: top = stack[-1] for node in edges[top]: if node in stack: cyc = stack[stack.index(node):] todo.difference_update(cyc) output.update(cyc) if node in todo: stack.append(node) todo.remove(node) break else: node = stack.pop() return output def _gen_edges(edges): return set([ (right, left) for left in edges for right in edges[left] ]) SQLAlchemy-0.8.4/lib/SQLAlchemy.egg-info/0000755000076500000240000000000012251151573020435 5ustar classicstaff00000000000000SQLAlchemy-0.8.4/lib/SQLAlchemy.egg-info/dependency_links.txt0000644000076500000240000000000112251151573024503 0ustar classicstaff00000000000000 SQLAlchemy-0.8.4/lib/SQLAlchemy.egg-info/PKG-INFO0000644000076500000240000001533412251151573021540 0ustar classicstaff00000000000000Metadata-Version: 1.1 Name: SQLAlchemy Version: 0.8.4 Summary: Database Abstraction Library Home-page: http://www.sqlalchemy.org Author: Mike Bayer Author-email: mike_mp@zzzcomputing.com License: MIT License Description: SQLAlchemy ========== The Python SQL Toolkit and Object Relational Mapper Introduction ------------- SQLAlchemy is the Python SQL toolkit and Object Relational Mapper that gives application developers the full power and flexibility of SQL. SQLAlchemy provides a full suite of well known enterprise-level persistence patterns, designed for efficient and high-performing database access, adapted into a simple and Pythonic domain language. Major SQLAlchemy features include: * An industrial strength ORM, built from the core on the identity map, unit of work, and data mapper patterns. These patterns allow transparent persistence of objects using a declarative configuration system. Domain models can be constructed and manipulated naturally, and changes are synchronized with the current transaction automatically. * A relationally-oriented query system, exposing the full range of SQL's capabilities explicitly, including joins, subqueries, correlation, and most everything else, in terms of the object model. Writing queries with the ORM uses the same techniques of relational composition you use when writing SQL. While you can drop into literal SQL at any time, it's virtually never needed. * A comprehensive and flexible system of eager loading for related collections and objects. Collections are cached within a session, and can be loaded on individual access, all at once using joins, or by query per collection across the full result set. * A Core SQL construction system and DBAPI interaction layer. The SQLAlchemy Core is separate from the ORM and is a full database abstraction layer in its own right, and includes an extensible Python-based SQL expression language, schema metadata, connection pooling, type coercion, and custom types. * All primary and foreign key constraints are assumed to be composite and natural. Surrogate integer primary keys are of course still the norm, but SQLAlchemy never assumes or hardcodes to this model. * Database introspection and generation. Database schemas can be "reflected" in one step into Python structures representing database metadata; those same structures can then generate CREATE statements right back out - all within the Core, independent of the ORM. SQLAlchemy's philosophy: * SQL databases behave less and less like object collections the more size and performance start to matter; object collections behave less and less like tables and rows the more abstraction starts to matter. SQLAlchemy aims to accommodate both of these principles. * An ORM doesn't need to hide the "R". A relational database provides rich, set-based functionality that should be fully exposed. SQLAlchemy's ORM provides an open-ended set of patterns that allow a developer to construct a custom mediation layer between a domain model and a relational schema, turning the so-called "object relational impedance" issue into a distant memory. * The developer, in all cases, makes all decisions regarding the design, structure, and naming conventions of both the object model as well as the relational schema. SQLAlchemy only provides the means to automate the execution of these decisions. * With SQLAlchemy, there's no such thing as "the ORM generated a bad query" - you retain full control over the structure of queries, including how joins are organized, how subqueries and correlation is used, what columns are requested. Everything SQLAlchemy does is ultimately the result of a developer- initiated decision. * Don't use an ORM if the problem doesn't need one. SQLAlchemy consists of a Core and separate ORM component. The Core offers a full SQL expression language that allows Pythonic construction of SQL constructs that render directly to SQL strings for a target database, returning result sets that are essentially enhanced DBAPI cursors. * Transactions should be the norm. With SQLAlchemy's ORM, nothing goes to permanent storage until commit() is called. SQLAlchemy encourages applications to create a consistent means of delineating the start and end of a series of operations. * Never render a literal value in a SQL statement. Bound parameters are used to the greatest degree possible, allowing query optimizers to cache query plans effectively and making SQL injection attacks a non-issue. Documentation ------------- Latest documentation is at: http://www.sqlalchemy.org/docs/ Installation / Requirements --------------------------- Full documentation for installation is at `Installation `_. Getting Help / Development / Bug reporting ------------------------------------------ Please refer to the `SQLAlchemy Community Guide `_. License ------- SQLAlchemy is distributed under the `MIT license `_. Platform: UNKNOWN Classifier: Development Status :: 5 - Production/Stable Classifier: Intended Audience :: Developers Classifier: License :: OSI Approved :: MIT License Classifier: Programming Language :: Python Classifier: Programming Language :: Python :: 3 Classifier: Programming Language :: Python :: Implementation :: CPython Classifier: Programming Language :: Python :: Implementation :: Jython Classifier: Programming Language :: Python :: Implementation :: PyPy Classifier: Topic :: Database :: Front-Ends Classifier: Operating System :: OS Independent SQLAlchemy-0.8.4/lib/SQLAlchemy.egg-info/SOURCES.txt0000644000076500000240000005227512251151573022334 0ustar classicstaff00000000000000AUTHORS CHANGES LICENSE MANIFEST.in README.dialects.rst README.py3k README.rst README.unittests.rst distribute_setup.py ez_setup.py sa2to3.py setup.cfg setup.py sqla_nose.py doc/contents.html doc/copyright.html doc/faq.html doc/genindex.html doc/glossary.html doc/index.html doc/intro.html doc/search.html doc/searchindex.js doc/_images/sqla_arch_small.png doc/_images/sqla_engine_arch.png doc/_sources/contents.txt doc/_sources/copyright.txt doc/_sources/faq.txt doc/_sources/glossary.txt doc/_sources/index.txt doc/_sources/intro.txt doc/_sources/changelog/changelog_01.txt doc/_sources/changelog/changelog_02.txt doc/_sources/changelog/changelog_03.txt doc/_sources/changelog/changelog_04.txt doc/_sources/changelog/changelog_05.txt doc/_sources/changelog/changelog_06.txt doc/_sources/changelog/changelog_07.txt doc/_sources/changelog/changelog_08.txt doc/_sources/changelog/index.txt doc/_sources/changelog/migration_04.txt doc/_sources/changelog/migration_05.txt doc/_sources/changelog/migration_06.txt doc/_sources/changelog/migration_07.txt doc/_sources/changelog/migration_08.txt doc/_sources/core/compiler.txt doc/_sources/core/connections.txt doc/_sources/core/constraints.txt doc/_sources/core/ddl.txt doc/_sources/core/defaults.txt doc/_sources/core/dml.txt doc/_sources/core/engines.txt doc/_sources/core/event.txt doc/_sources/core/events.txt doc/_sources/core/exceptions.txt doc/_sources/core/expression_api.txt doc/_sources/core/functions.txt doc/_sources/core/index.txt doc/_sources/core/inspection.txt doc/_sources/core/interfaces.txt doc/_sources/core/internals.txt doc/_sources/core/metadata.txt doc/_sources/core/pooling.txt doc/_sources/core/reflection.txt doc/_sources/core/schema.txt doc/_sources/core/selectable.txt doc/_sources/core/serializer.txt doc/_sources/core/sqlelement.txt doc/_sources/core/tutorial.txt doc/_sources/core/types.txt doc/_sources/dialects/drizzle.txt doc/_sources/dialects/firebird.txt doc/_sources/dialects/index.txt doc/_sources/dialects/informix.txt doc/_sources/dialects/mssql.txt doc/_sources/dialects/mysql.txt doc/_sources/dialects/oracle.txt doc/_sources/dialects/postgresql.txt doc/_sources/dialects/sqlite.txt doc/_sources/dialects/sybase.txt doc/_sources/orm/collections.txt doc/_sources/orm/deprecated.txt doc/_sources/orm/events.txt doc/_sources/orm/examples.txt doc/_sources/orm/exceptions.txt doc/_sources/orm/index.txt doc/_sources/orm/inheritance.txt doc/_sources/orm/internals.txt doc/_sources/orm/loading.txt doc/_sources/orm/mapper_config.txt doc/_sources/orm/query.txt doc/_sources/orm/relationships.txt doc/_sources/orm/session.txt doc/_sources/orm/tutorial.txt doc/_sources/orm/extensions/associationproxy.txt doc/_sources/orm/extensions/declarative.txt doc/_sources/orm/extensions/horizontal_shard.txt doc/_sources/orm/extensions/hybrid.txt doc/_sources/orm/extensions/index.txt doc/_sources/orm/extensions/instrumentation.txt doc/_sources/orm/extensions/mutable.txt doc/_sources/orm/extensions/orderinglist.txt doc/_static/basic.css doc/_static/comment-bright.png doc/_static/comment-close.png doc/_static/comment.png doc/_static/default.css doc/_static/docs.css doc/_static/doctools.js doc/_static/down-pressed.png doc/_static/down.png doc/_static/file.png doc/_static/init.js doc/_static/jquery.js doc/_static/minus.png doc/_static/plus.png doc/_static/pygments.css doc/_static/searchtools.js doc/_static/sidebar.js doc/_static/underscore.js doc/_static/up-pressed.png doc/_static/up.png doc/_static/websupport.js doc/build/Makefile doc/build/conf.py doc/build/contents.rst doc/build/copyright.rst doc/build/faq.rst doc/build/glossary.rst doc/build/index.rst doc/build/intro.rst doc/build/requirements.txt doc/build/sqla_arch_small.png doc/build/testdocs.py doc/build/builder/__init__.py doc/build/builder/autodoc_mods.py doc/build/builder/changelog.py doc/build/builder/dialect_info.py doc/build/builder/mako.py doc/build/builder/sqlformatter.py doc/build/builder/util.py doc/build/changelog/changelog_01.rst doc/build/changelog/changelog_02.rst doc/build/changelog/changelog_03.rst doc/build/changelog/changelog_04.rst doc/build/changelog/changelog_05.rst doc/build/changelog/changelog_06.rst doc/build/changelog/changelog_07.rst doc/build/changelog/changelog_08.rst doc/build/changelog/index.rst doc/build/changelog/migration_04.rst doc/build/changelog/migration_05.rst doc/build/changelog/migration_06.rst doc/build/changelog/migration_07.rst doc/build/changelog/migration_08.rst doc/build/core/compiler.rst doc/build/core/connections.rst doc/build/core/constraints.rst doc/build/core/ddl.rst doc/build/core/defaults.rst doc/build/core/dml.rst doc/build/core/engines.rst doc/build/core/event.rst doc/build/core/events.rst doc/build/core/exceptions.rst doc/build/core/expression_api.rst doc/build/core/functions.rst doc/build/core/index.rst doc/build/core/inspection.rst doc/build/core/interfaces.rst doc/build/core/internals.rst doc/build/core/metadata.rst doc/build/core/pooling.rst doc/build/core/reflection.rst doc/build/core/schema.rst doc/build/core/selectable.rst doc/build/core/serializer.rst doc/build/core/sqla_engine_arch.png doc/build/core/sqlelement.rst doc/build/core/tutorial.rst doc/build/core/types.rst doc/build/dialects/drizzle.rst doc/build/dialects/firebird.rst doc/build/dialects/index.rst doc/build/dialects/informix.rst doc/build/dialects/mssql.rst doc/build/dialects/mysql.rst doc/build/dialects/oracle.rst doc/build/dialects/postgresql.rst doc/build/dialects/sqlite.rst doc/build/dialects/sybase.rst doc/build/orm/collections.rst doc/build/orm/deprecated.rst doc/build/orm/events.rst doc/build/orm/examples.rst doc/build/orm/exceptions.rst doc/build/orm/index.rst doc/build/orm/inheritance.rst doc/build/orm/internals.rst doc/build/orm/loading.rst doc/build/orm/mapper_config.rst doc/build/orm/query.rst doc/build/orm/relationships.rst doc/build/orm/session.rst doc/build/orm/tutorial.rst doc/build/orm/extensions/associationproxy.rst doc/build/orm/extensions/declarative.rst doc/build/orm/extensions/horizontal_shard.rst doc/build/orm/extensions/hybrid.rst doc/build/orm/extensions/index.rst doc/build/orm/extensions/instrumentation.rst doc/build/orm/extensions/mutable.rst doc/build/orm/extensions/orderinglist.rst doc/build/static/docs.css doc/build/static/init.js doc/build/templates/genindex.mako doc/build/templates/layout.mako doc/build/templates/page.mako doc/build/templates/search.mako doc/build/templates/static_base.mako doc/build/texinputs/Makefile doc/build/texinputs/sphinx.sty doc/changelog/changelog_01.html doc/changelog/changelog_02.html doc/changelog/changelog_03.html doc/changelog/changelog_04.html doc/changelog/changelog_05.html doc/changelog/changelog_06.html doc/changelog/changelog_07.html doc/changelog/changelog_08.html doc/changelog/index.html doc/changelog/migration_04.html doc/changelog/migration_05.html doc/changelog/migration_06.html doc/changelog/migration_07.html doc/changelog/migration_08.html doc/core/compiler.html doc/core/connections.html doc/core/constraints.html doc/core/ddl.html doc/core/defaults.html doc/core/dml.html doc/core/engines.html doc/core/event.html doc/core/events.html doc/core/exceptions.html doc/core/expression_api.html doc/core/functions.html doc/core/index.html doc/core/inspection.html doc/core/interfaces.html doc/core/internals.html doc/core/metadata.html doc/core/pooling.html doc/core/reflection.html doc/core/schema.html doc/core/selectable.html doc/core/serializer.html doc/core/sqlelement.html doc/core/tutorial.html doc/core/types.html doc/dialects/drizzle.html doc/dialects/firebird.html doc/dialects/index.html doc/dialects/informix.html doc/dialects/mssql.html doc/dialects/mysql.html doc/dialects/oracle.html doc/dialects/postgresql.html doc/dialects/sqlite.html doc/dialects/sybase.html doc/orm/collections.html doc/orm/deprecated.html doc/orm/events.html doc/orm/examples.html doc/orm/exceptions.html doc/orm/index.html doc/orm/inheritance.html doc/orm/internals.html doc/orm/loading.html doc/orm/mapper_config.html doc/orm/query.html doc/orm/relationships.html doc/orm/session.html doc/orm/tutorial.html doc/orm/extensions/associationproxy.html doc/orm/extensions/declarative.html doc/orm/extensions/horizontal_shard.html doc/orm/extensions/hybrid.html doc/orm/extensions/index.html doc/orm/extensions/instrumentation.html doc/orm/extensions/mutable.html doc/orm/extensions/orderinglist.html examples/__init__.py examples/adjacency_list/__init__.py examples/adjacency_list/adjacency_list.py examples/association/__init__.py examples/association/basic_association.py examples/association/dict_of_sets_with_default.py examples/association/proxied_association.py examples/custom_attributes/__init__.py examples/custom_attributes/custom_management.py examples/custom_attributes/listen_for_events.py examples/dogpile_caching/__init__.py examples/dogpile_caching/advanced.py examples/dogpile_caching/caching_query.py examples/dogpile_caching/environment.py examples/dogpile_caching/fixture_data.py examples/dogpile_caching/helloworld.py examples/dogpile_caching/local_session_caching.py examples/dogpile_caching/model.py examples/dogpile_caching/relation_caching.py examples/dynamic_dict/__init__.py examples/dynamic_dict/dynamic_dict.py examples/elementtree/__init__.py examples/elementtree/adjacency_list.py examples/elementtree/optimized_al.py examples/elementtree/pickle.py examples/elementtree/test.xml examples/elementtree/test2.xml examples/elementtree/test3.xml examples/generic_associations/__init__.py examples/generic_associations/discriminator_on_association.py examples/generic_associations/generic_fk.py examples/generic_associations/table_per_association.py examples/generic_associations/table_per_related.py examples/graphs/__init__.py examples/graphs/directed_graph.py examples/inheritance/__init__.py examples/inheritance/concrete.py examples/inheritance/joined.py examples/inheritance/single.py examples/large_collection/__init__.py examples/large_collection/large_collection.py examples/nested_sets/__init__.py examples/nested_sets/nested_sets.py examples/postgis/__init__.py examples/postgis/postgis.py examples/sharding/__init__.py examples/sharding/attribute_shard.py examples/versioning/__init__.py examples/versioning/_lib.py examples/versioning/history_meta.py examples/versioning/test_versioning.py examples/vertical/__init__.py examples/vertical/dictlike-polymorphic.py examples/vertical/dictlike.py lib/SQLAlchemy.egg-info/PKG-INFO lib/SQLAlchemy.egg-info/SOURCES.txt lib/SQLAlchemy.egg-info/dependency_links.txt lib/SQLAlchemy.egg-info/top_level.txt lib/sqlalchemy/__init__.py lib/sqlalchemy/event.py lib/sqlalchemy/events.py lib/sqlalchemy/exc.py lib/sqlalchemy/inspection.py lib/sqlalchemy/interfaces.py lib/sqlalchemy/log.py lib/sqlalchemy/pool.py lib/sqlalchemy/processors.py lib/sqlalchemy/schema.py lib/sqlalchemy/types.py lib/sqlalchemy/cextension/processors.c lib/sqlalchemy/cextension/resultproxy.c lib/sqlalchemy/cextension/utils.c lib/sqlalchemy/connectors/__init__.py lib/sqlalchemy/connectors/mxodbc.py lib/sqlalchemy/connectors/mysqldb.py lib/sqlalchemy/connectors/pyodbc.py lib/sqlalchemy/connectors/zxJDBC.py lib/sqlalchemy/databases/__init__.py lib/sqlalchemy/dialects/__init__.py lib/sqlalchemy/dialects/postgres.py lib/sqlalchemy/dialects/type_migration_guidelines.txt lib/sqlalchemy/dialects/drizzle/__init__.py lib/sqlalchemy/dialects/drizzle/base.py lib/sqlalchemy/dialects/drizzle/mysqldb.py lib/sqlalchemy/dialects/firebird/__init__.py lib/sqlalchemy/dialects/firebird/base.py lib/sqlalchemy/dialects/firebird/fdb.py lib/sqlalchemy/dialects/firebird/kinterbasdb.py lib/sqlalchemy/dialects/informix/__init__.py lib/sqlalchemy/dialects/informix/base.py lib/sqlalchemy/dialects/informix/informixdb.py lib/sqlalchemy/dialects/mssql/__init__.py lib/sqlalchemy/dialects/mssql/adodbapi.py lib/sqlalchemy/dialects/mssql/base.py lib/sqlalchemy/dialects/mssql/information_schema.py lib/sqlalchemy/dialects/mssql/mxodbc.py lib/sqlalchemy/dialects/mssql/pymssql.py lib/sqlalchemy/dialects/mssql/pyodbc.py lib/sqlalchemy/dialects/mssql/zxjdbc.py lib/sqlalchemy/dialects/mysql/__init__.py lib/sqlalchemy/dialects/mysql/base.py lib/sqlalchemy/dialects/mysql/cymysql.py lib/sqlalchemy/dialects/mysql/gaerdbms.py lib/sqlalchemy/dialects/mysql/mysqlconnector.py lib/sqlalchemy/dialects/mysql/mysqldb.py lib/sqlalchemy/dialects/mysql/oursql.py lib/sqlalchemy/dialects/mysql/pymysql.py lib/sqlalchemy/dialects/mysql/pyodbc.py lib/sqlalchemy/dialects/mysql/zxjdbc.py lib/sqlalchemy/dialects/oracle/__init__.py lib/sqlalchemy/dialects/oracle/base.py lib/sqlalchemy/dialects/oracle/cx_oracle.py lib/sqlalchemy/dialects/oracle/zxjdbc.py lib/sqlalchemy/dialects/postgresql/__init__.py lib/sqlalchemy/dialects/postgresql/base.py lib/sqlalchemy/dialects/postgresql/constraints.py lib/sqlalchemy/dialects/postgresql/hstore.py lib/sqlalchemy/dialects/postgresql/pg8000.py lib/sqlalchemy/dialects/postgresql/psycopg2.py lib/sqlalchemy/dialects/postgresql/pypostgresql.py lib/sqlalchemy/dialects/postgresql/ranges.py lib/sqlalchemy/dialects/postgresql/zxjdbc.py lib/sqlalchemy/dialects/sqlite/__init__.py lib/sqlalchemy/dialects/sqlite/base.py lib/sqlalchemy/dialects/sqlite/pysqlite.py lib/sqlalchemy/dialects/sybase/__init__.py lib/sqlalchemy/dialects/sybase/base.py lib/sqlalchemy/dialects/sybase/mxodbc.py lib/sqlalchemy/dialects/sybase/pyodbc.py lib/sqlalchemy/dialects/sybase/pysybase.py lib/sqlalchemy/engine/__init__.py lib/sqlalchemy/engine/base.py lib/sqlalchemy/engine/ddl.py lib/sqlalchemy/engine/default.py lib/sqlalchemy/engine/interfaces.py lib/sqlalchemy/engine/reflection.py lib/sqlalchemy/engine/result.py lib/sqlalchemy/engine/strategies.py lib/sqlalchemy/engine/threadlocal.py lib/sqlalchemy/engine/url.py lib/sqlalchemy/engine/util.py lib/sqlalchemy/ext/__init__.py lib/sqlalchemy/ext/associationproxy.py lib/sqlalchemy/ext/compiler.py lib/sqlalchemy/ext/horizontal_shard.py lib/sqlalchemy/ext/hybrid.py lib/sqlalchemy/ext/instrumentation.py lib/sqlalchemy/ext/mutable.py lib/sqlalchemy/ext/orderinglist.py lib/sqlalchemy/ext/serializer.py lib/sqlalchemy/ext/declarative/__init__.py lib/sqlalchemy/ext/declarative/api.py lib/sqlalchemy/ext/declarative/base.py lib/sqlalchemy/ext/declarative/clsregistry.py lib/sqlalchemy/orm/__init__.py lib/sqlalchemy/orm/attributes.py lib/sqlalchemy/orm/collections.py lib/sqlalchemy/orm/dependency.py lib/sqlalchemy/orm/deprecated_interfaces.py lib/sqlalchemy/orm/descriptor_props.py lib/sqlalchemy/orm/dynamic.py lib/sqlalchemy/orm/evaluator.py lib/sqlalchemy/orm/events.py lib/sqlalchemy/orm/exc.py lib/sqlalchemy/orm/identity.py lib/sqlalchemy/orm/instrumentation.py lib/sqlalchemy/orm/interfaces.py lib/sqlalchemy/orm/loading.py lib/sqlalchemy/orm/mapper.py lib/sqlalchemy/orm/persistence.py lib/sqlalchemy/orm/properties.py lib/sqlalchemy/orm/query.py lib/sqlalchemy/orm/relationships.py lib/sqlalchemy/orm/scoping.py lib/sqlalchemy/orm/session.py lib/sqlalchemy/orm/state.py lib/sqlalchemy/orm/strategies.py lib/sqlalchemy/orm/sync.py lib/sqlalchemy/orm/unitofwork.py lib/sqlalchemy/orm/util.py lib/sqlalchemy/sql/__init__.py lib/sqlalchemy/sql/compiler.py lib/sqlalchemy/sql/expression.py lib/sqlalchemy/sql/functions.py lib/sqlalchemy/sql/operators.py lib/sqlalchemy/sql/util.py lib/sqlalchemy/sql/visitors.py lib/sqlalchemy/testing/__init__.py lib/sqlalchemy/testing/assertions.py lib/sqlalchemy/testing/assertsql.py lib/sqlalchemy/testing/config.py lib/sqlalchemy/testing/engines.py lib/sqlalchemy/testing/entities.py lib/sqlalchemy/testing/exclusions.py lib/sqlalchemy/testing/fixtures.py lib/sqlalchemy/testing/mock.py lib/sqlalchemy/testing/pickleable.py lib/sqlalchemy/testing/profiling.py lib/sqlalchemy/testing/requirements.py lib/sqlalchemy/testing/runner.py lib/sqlalchemy/testing/schema.py lib/sqlalchemy/testing/util.py lib/sqlalchemy/testing/warnings.py lib/sqlalchemy/testing/plugin/__init__.py lib/sqlalchemy/testing/plugin/noseplugin.py lib/sqlalchemy/testing/suite/__init__.py lib/sqlalchemy/testing/suite/test_ddl.py lib/sqlalchemy/testing/suite/test_insert.py lib/sqlalchemy/testing/suite/test_reflection.py lib/sqlalchemy/testing/suite/test_results.py lib/sqlalchemy/testing/suite/test_sequence.py lib/sqlalchemy/testing/suite/test_types.py lib/sqlalchemy/testing/suite/test_update_delete.py lib/sqlalchemy/util/__init__.py lib/sqlalchemy/util/_collections.py lib/sqlalchemy/util/compat.py lib/sqlalchemy/util/deprecations.py lib/sqlalchemy/util/langhelpers.py lib/sqlalchemy/util/queue.py lib/sqlalchemy/util/topological.py test/__init__.py test/binary_data_one.dat test/binary_data_two.dat test/requirements.py test/aaa_profiling/__init__.py test/aaa_profiling/test_compiler.py test/aaa_profiling/test_memusage.py test/aaa_profiling/test_orm.py test/aaa_profiling/test_pool.py test/aaa_profiling/test_resultset.py test/aaa_profiling/test_zoomark.py test/aaa_profiling/test_zoomark_orm.py test/base/__init__.py test/base/test_dependency.py test/base/test_events.py test/base/test_except.py test/base/test_inspect.py test/base/test_utils.py test/dialect/__init__.py test/dialect/test_firebird.py test/dialect/test_informix.py test/dialect/test_mxodbc.py test/dialect/test_oracle.py test/dialect/test_pyodbc.py test/dialect/test_sqlite.py test/dialect/test_suite.py test/dialect/test_sybase.py test/dialect/mssql/__init__.py test/dialect/mssql/test_compiler.py test/dialect/mssql/test_engine.py test/dialect/mssql/test_query.py test/dialect/mssql/test_reflection.py test/dialect/mssql/test_types.py test/dialect/mysql/__init__.py test/dialect/mysql/test_compiler.py test/dialect/mysql/test_dialect.py test/dialect/mysql/test_query.py test/dialect/mysql/test_reflection.py test/dialect/mysql/test_types.py test/dialect/postgresql/__init__.py test/dialect/postgresql/test_compiler.py test/dialect/postgresql/test_dialect.py test/dialect/postgresql/test_query.py test/dialect/postgresql/test_reflection.py test/dialect/postgresql/test_types.py test/engine/__init__.py test/engine/test_bind.py test/engine/test_ddlemit.py test/engine/test_ddlevents.py test/engine/test_execute.py test/engine/test_parseconnect.py test/engine/test_pool.py test/engine/test_processors.py test/engine/test_reconnect.py test/engine/test_reflection.py test/engine/test_transaction.py test/ext/__init__.py test/ext/test_associationproxy.py test/ext/test_compiler.py test/ext/test_extendedattr.py test/ext/test_horizontal_shard.py test/ext/test_hybrid.py test/ext/test_mutable.py test/ext/test_orderinglist.py test/ext/test_serializer.py test/ext/declarative/__init__.py test/ext/declarative/test_basic.py test/ext/declarative/test_clsregistry.py test/ext/declarative/test_inheritance.py test/ext/declarative/test_mixin.py test/ext/declarative/test_reflection.py test/orm/__init__.py test/orm/_fixtures.py test/orm/test_association.py test/orm/test_assorted_eager.py test/orm/test_attributes.py test/orm/test_backref_mutations.py test/orm/test_bind.py test/orm/test_cascade.py test/orm/test_collection.py test/orm/test_compile.py test/orm/test_composites.py test/orm/test_cycles.py test/orm/test_default_strategies.py test/orm/test_defaults.py test/orm/test_deprecations.py test/orm/test_descriptor.py test/orm/test_dynamic.py test/orm/test_eager_relations.py test/orm/test_evaluator.py test/orm/test_events.py test/orm/test_expire.py test/orm/test_froms.py test/orm/test_generative.py test/orm/test_hasparent.py test/orm/test_immediate_load.py test/orm/test_inspect.py test/orm/test_instrumentation.py test/orm/test_joins.py test/orm/test_lazy_relations.py test/orm/test_load_on_fks.py test/orm/test_loading.py test/orm/test_lockmode.py test/orm/test_manytomany.py test/orm/test_mapper.py test/orm/test_merge.py test/orm/test_naturalpks.py test/orm/test_of_type.py test/orm/test_onetoone.py test/orm/test_pickled.py test/orm/test_query.py test/orm/test_rel_fn.py test/orm/test_relationships.py test/orm/test_scoping.py test/orm/test_selectable.py test/orm/test_session.py test/orm/test_subquery_relations.py test/orm/test_sync.py test/orm/test_transaction.py test/orm/test_unitofwork.py test/orm/test_unitofworkv2.py test/orm/test_update_delete.py test/orm/test_utils.py test/orm/test_versioning.py test/orm/inheritance/__init__.py test/orm/inheritance/_poly_fixtures.py test/orm/inheritance/test_abc_inheritance.py test/orm/inheritance/test_abc_polymorphic.py test/orm/inheritance/test_assorted_poly.py test/orm/inheritance/test_basic.py test/orm/inheritance/test_concrete.py test/orm/inheritance/test_magazine.py test/orm/inheritance/test_manytomany.py test/orm/inheritance/test_poly_linked_list.py test/orm/inheritance/test_poly_persistence.py test/orm/inheritance/test_polymorphic_rel.py test/orm/inheritance/test_productspec.py test/orm/inheritance/test_relationship.py test/orm/inheritance/test_selects.py test/orm/inheritance/test_single.py test/orm/inheritance/test_with_poly.py test/perf/insertspeed.py test/perf/large_flush.py test/perf/objselectspeed.py test/perf/objupdatespeed.py test/perf/orm2010.py test/perf/ormsession.py test/perf/sessions.py test/perf/stress_all.py test/perf/stresstest.py test/perf/threaded_compile.py test/sql/__init__.py test/sql/test_case_statement.py test/sql/test_compiler.py test/sql/test_constraints.py test/sql/test_cte.py test/sql/test_defaults.py test/sql/test_delete.py test/sql/test_functions.py test/sql/test_generative.py test/sql/test_insert.py test/sql/test_inspect.py test/sql/test_labels.py test/sql/test_metadata.py test/sql/test_operators.py test/sql/test_query.py test/sql/test_quote.py test/sql/test_returning.py test/sql/test_rowcount.py test/sql/test_selectable.py test/sql/test_type_expressions.py test/sql/test_types.py test/sql/test_unicode.py test/sql/test_update.pySQLAlchemy-0.8.4/lib/SQLAlchemy.egg-info/top_level.txt0000644000076500000240000000001312251151573023161 0ustar classicstaff00000000000000sqlalchemy SQLAlchemy-0.8.4/LICENSE0000644000076500000240000000231612251147171015241 0ustar classicstaff00000000000000This is the MIT license: http://www.opensource.org/licenses/mit-license.php Copyright (c) 2005-2013 the SQLAlchemy authors and contributors . SQLAlchemy is a trademark of Michael Bayer. Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions: The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software. THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. SQLAlchemy-0.8.4/MANIFEST.in0000644000076500000240000000076112251150043015764 0ustar classicstaff00000000000000# any kind of "*" pulls in __init__.pyc files, # so all extensions are explicit. recursive-include doc *.html *.css *.txt *.js *.jpg *.png *.py Makefile *.rst *.mako *.sty recursive-include examples *.py *.xml recursive-include test *.py *.dat # include the c extensions, which otherwise # don't come in if --with-cextensions isn't specified. recursive-include lib *.c *.txt include README* AUTHORS LICENSE distribute_setup.py sa2to3.py ez_setup.py sqla_nose.py CHANGES* prune doc/build/output SQLAlchemy-0.8.4/PKG-INFO0000644000076500000240000001533412251151573015336 0ustar classicstaff00000000000000Metadata-Version: 1.1 Name: SQLAlchemy Version: 0.8.4 Summary: Database Abstraction Library Home-page: http://www.sqlalchemy.org Author: Mike Bayer Author-email: mike_mp@zzzcomputing.com License: MIT License Description: SQLAlchemy ========== The Python SQL Toolkit and Object Relational Mapper Introduction ------------- SQLAlchemy is the Python SQL toolkit and Object Relational Mapper that gives application developers the full power and flexibility of SQL. SQLAlchemy provides a full suite of well known enterprise-level persistence patterns, designed for efficient and high-performing database access, adapted into a simple and Pythonic domain language. Major SQLAlchemy features include: * An industrial strength ORM, built from the core on the identity map, unit of work, and data mapper patterns. These patterns allow transparent persistence of objects using a declarative configuration system. Domain models can be constructed and manipulated naturally, and changes are synchronized with the current transaction automatically. * A relationally-oriented query system, exposing the full range of SQL's capabilities explicitly, including joins, subqueries, correlation, and most everything else, in terms of the object model. Writing queries with the ORM uses the same techniques of relational composition you use when writing SQL. While you can drop into literal SQL at any time, it's virtually never needed. * A comprehensive and flexible system of eager loading for related collections and objects. Collections are cached within a session, and can be loaded on individual access, all at once using joins, or by query per collection across the full result set. * A Core SQL construction system and DBAPI interaction layer. The SQLAlchemy Core is separate from the ORM and is a full database abstraction layer in its own right, and includes an extensible Python-based SQL expression language, schema metadata, connection pooling, type coercion, and custom types. * All primary and foreign key constraints are assumed to be composite and natural. Surrogate integer primary keys are of course still the norm, but SQLAlchemy never assumes or hardcodes to this model. * Database introspection and generation. Database schemas can be "reflected" in one step into Python structures representing database metadata; those same structures can then generate CREATE statements right back out - all within the Core, independent of the ORM. SQLAlchemy's philosophy: * SQL databases behave less and less like object collections the more size and performance start to matter; object collections behave less and less like tables and rows the more abstraction starts to matter. SQLAlchemy aims to accommodate both of these principles. * An ORM doesn't need to hide the "R". A relational database provides rich, set-based functionality that should be fully exposed. SQLAlchemy's ORM provides an open-ended set of patterns that allow a developer to construct a custom mediation layer between a domain model and a relational schema, turning the so-called "object relational impedance" issue into a distant memory. * The developer, in all cases, makes all decisions regarding the design, structure, and naming conventions of both the object model as well as the relational schema. SQLAlchemy only provides the means to automate the execution of these decisions. * With SQLAlchemy, there's no such thing as "the ORM generated a bad query" - you retain full control over the structure of queries, including how joins are organized, how subqueries and correlation is used, what columns are requested. Everything SQLAlchemy does is ultimately the result of a developer- initiated decision. * Don't use an ORM if the problem doesn't need one. SQLAlchemy consists of a Core and separate ORM component. The Core offers a full SQL expression language that allows Pythonic construction of SQL constructs that render directly to SQL strings for a target database, returning result sets that are essentially enhanced DBAPI cursors. * Transactions should be the norm. With SQLAlchemy's ORM, nothing goes to permanent storage until commit() is called. SQLAlchemy encourages applications to create a consistent means of delineating the start and end of a series of operations. * Never render a literal value in a SQL statement. Bound parameters are used to the greatest degree possible, allowing query optimizers to cache query plans effectively and making SQL injection attacks a non-issue. Documentation ------------- Latest documentation is at: http://www.sqlalchemy.org/docs/ Installation / Requirements --------------------------- Full documentation for installation is at `Installation `_. Getting Help / Development / Bug reporting ------------------------------------------ Please refer to the `SQLAlchemy Community Guide `_. License ------- SQLAlchemy is distributed under the `MIT license `_. Platform: UNKNOWN Classifier: Development Status :: 5 - Production/Stable Classifier: Intended Audience :: Developers Classifier: License :: OSI Approved :: MIT License Classifier: Programming Language :: Python Classifier: Programming Language :: Python :: 3 Classifier: Programming Language :: Python :: Implementation :: CPython Classifier: Programming Language :: Python :: Implementation :: Jython Classifier: Programming Language :: Python :: Implementation :: PyPy Classifier: Topic :: Database :: Front-Ends Classifier: Operating System :: OS Independent SQLAlchemy-0.8.4/README.dialects.rst0000644000076500000240000002321712251147171017515 0ustar classicstaff00000000000000======================== Developing new Dialects ======================== .. note:: When studying this file, it's probably a good idea to also familiarize with the README.unittests.rst file, which discusses SQLAlchemy's usage and extension of the Nose test runner. While SQLAlchemy includes many dialects within the core distribution, the trend for new dialects should be that they are published as external projects. SQLAlchemy has since version 0.5 featured a "plugin" system which allows external dialects to be integrated into SQLAlchemy using standard setuptools entry points. As of version 0.8, this system has been enhanced, so that a dialect can also be "plugged in" at runtime. On the testing side, SQLAlchemy as of 0.8 also includes a "dialect compliance suite" that is usable by third party libraries. There is no longer a strong need for a new dialect to run through SQLAlchemy's full testing suite, as a large portion of these tests do not have dialect-sensitive functionality. The "dialect compliance suite" should be viewed as the primary target for new dialects, and as it continues to grow and mature it should become a more thorough and efficient system of testing new dialects. Dialect Layout =============== The file structure of a dialect is typically similar to the following:: sqlalchemy-/ setup.py setup.cfg run_tests.py sqlalchemy_/ __init__.py base.py .py requirements.py test/ __init__.py test_suite.py test_.py ... An example of this structure can be seen in the Access dialect at https://bitbucket.org/zzzeek/sqlalchemy-access/. Key aspects of this file layout include: * setup.py - should specify setuptools entrypoints, allowing the dialect to be usable from create_engine(), e.g.:: entry_points={ 'sqlalchemy.dialects': [ 'access = sqlalchemy_access.pyodbc:AccessDialect_pyodbc', 'access.pyodbc = sqlalchemy_access.pyodbc:AccessDialect_pyodbc', ] } Above, the two entrypoints ``access`` and ``access.pyodbc`` allow URLs to be used such as:: create_engine("access://user:pw@dsn") create_engine("access+pyodbc://user:pw@dsn") * setup.cfg - this file contains the traditional contents such as [egg_info] and [nosetests] directives, but also contains new directives that are used by SQLAlchemy's testing framework. E.g. for Access:: [egg_info] tag_build = dev [nosetests] with-sqla_testing = true where = test cover-package = sqlalchemy_access with-coverage = 1 cover-erase = 1 [sqla_testing] requirement_cls=sqlalchemy_access.requirements:Requirements profile_file=.profiles.txt [db] default=access+pyodbc://admin@access_test sqlite=sqlite:///:memory: Above, the ``[sqla_testing]`` section contains configuration used by SQLAlchemy's test plugin.The ``[nosetests]`` section includes the directive ``with-sql_testing = true``, which indicates to Nose that the SQLAlchemy nose plugin should be used. * run_tests.py - The plugin is provided with SQLAlchemy, however is not plugged into Nose automatically; instead, a ``run_tests.py`` script should be composed as a front end to Nose, such that SQLAlchemy's plugin will be correctly installed. run_tests.py has two parts. One optional, but probably helpful, step is that it installs your third party dialect into SQLAlchemy without using the setuptools entrypoint system; this allows your dialect to be present without any explicit setup.py step needed. The other step is to import SQLAlchemy's nose runner and invoke it. An example run_tests.py file looks like the following:: from sqlalchemy.dialects import registry registry.register("access", "sqlalchemy_access.pyodbc", "AccessDialect_pyodbc") registry.register("access.pyodbc", "sqlalchemy_access.pyodbc", "AccessDialect_pyodbc") from sqlalchemy.testing import runner # use this in setup.py 'test_suite': # test_suite="run_tests.setup_py_test" def setup_py_test(): runner.setup_py_test() if __name__ == '__main__': runner.main() Where above, the ``registry`` module, introduced in SQLAlchemy 0.8, provides an in-Python means of installing the dialect entrypoints without the use of setuptools, using the ``registry.register()`` function in a way that is similar to the ``entry_points`` directive we placed in our ``setup.py``. The call to ``runner.main()`` then runs the Nose front end, which installs SQLAlchemy's testing plugins. Invoking our custom runner looks like the following:: $ python run_tests.py -v * requirements.py - The ``requirements.py`` file is where directives regarding database and dialect capabilities are set up. SQLAlchemy's tests are often annotated with decorators that mark tests as "skip" or "fail" for particular backends. Over time, this system has been refined such that specific database and DBAPI names are mentioned less and less, in favor of @requires directives which state a particular capability. The requirement directive is linked to target dialects using a ``Requirements`` subclass. The custom ``Requirements`` subclass is specified in the ``requirements.py`` file and is made available to SQLAlchemy's test runner using the ``requirement_cls`` directive inside the ``[sqla_testing]`` section. For a third-party dialect, the custom ``Requirements`` class can usually specify a simple yes/no answer for a particular system. For example, a requirements file that specifies a database that supports the RETURNING construct but does not support reflection of tables might look like this:: # sqlalchemy_access/requirements.py from sqlalchemy.testing.requirements import SuiteRequirements from sqlalchemy.testing import exclusions class Requirements(SuiteRequirements): @property def table_reflection(self): return exclusions.closed() @property def returning(self): return exclusions.open() The ``SuiteRequirements`` class in ``sqlalchemy.testing.requirements`` contains a large number of requirements rules, which attempt to have reasonable defaults. The tests will report on those requirements found as they are run. The requirements system can also be used when running SQLAlchemy's primary test suite against the external dialect. In this use case, a ``--dburi`` as well as a ``--requirements`` flag are passed to SQLAlchemy's main test runner ``./sqla_nose.py`` so that exclusions specific to the dialect take place:: cd /path/to/sqlalchemy python ./sqla_nose.py -v \ --requirements sqlalchemy_access.requirements:Requirements \ --dburi access+pyodbc://admin@access_test * test_suite.py - Finally, the ``test_suite.py`` module represents a Nose test suite, which pulls in the actual SQLAlchemy test suite. To pull in the suite as a whole, it can be imported in one step:: # test/test_suite.py from sqlalchemy.testing.suite import * That's all that's needed - the ``sqlalchemy.testing.suite`` package contains an ever expanding series of tests, most of which should be annotated with specific requirement decorators so that they can be fully controlled. To specifically modify some of the tests, they can be imported by name and subclassed:: from sqlalchemy.testing.suite import * from sqlalchemy.testing.suite import ComponentReflectionTest as _ComponentReflectionTest class ComponentReflectionTest(_ComponentReflectionTest): @classmethod def define_views(cls, metadata, schema): # bypass the "define_views" section of the # fixture return Going Forward ============== The third-party dialect can be distributed like any other Python module on Pypi. Links to prominent dialects can be featured within SQLAlchemy's own documentation; contact the developers (see AUTHORS) for help with this. While SQLAlchemy includes many dialects built in, it remains to be seen if the project as a whole might move towards "plugin" model for all dialects, including all those currently built in. Now that SQLAlchemy's dialect API is mature and the test suite is not far behind, it may be that a better maintenance experience can be delivered by having all dialects separately maintained and released. As new versions of SQLAlchemy are released, the test suite and requirements file will receive new tests and changes. The dialect maintainer would normally keep track of these changes and make adjustments as needed. Continuous Integration ====================== The most ideal scenario for ongoing dialect testing is continuous integration, that is, an automated test runner that runs in response to changes not just in the dialect itself but to new pushes to SQLAlchemy as well. The SQLAlchemy project features a Jenkins installation that runs tests on Amazon EC2 instances. It is possible for third-party dialect developers to provide the SQLAlchemy project either with AMIs or EC2 instance keys which feature test environments appropriate to the dialect - SQLAlchemy's own Jenkins suite can invoke tests on these environments. Contact the developers for further info. SQLAlchemy-0.8.4/README.py3k0000644000076500000240000000264212251150015015772 0ustar classicstaff00000000000000================= PYTHON 3 SUPPORT ================= Current Python 3k support in SQLAlchemy is provided by a customized 2to3 script which wraps Python's 2to3 tool. Installing Distribute --------------------- Distribute should be installed with the Python3 installation. The distribute bootloader is included. Running as a user with permission to modify the Python distribution, install Distribute: python3 distribute_setup.py Installing SQLAlchemy in Python 3 --------------------------------- Once Distribute is installed, SQLAlchemy can be installed directly. The 2to3 process will kick in which takes several minutes: python3 setup.py install Converting Tests, Examples, Source to Python 3 ---------------------------------------------- To convert all files in the source distribution, run SQLAlchemys "sa2to3.py" script, which monkeypatches a preprocessor onto the 2to3 tool: python3 sa2to3.py --no-diffs -w lib test examples The above will rewrite all files in-place in Python 3 format. Running Tests ------------- To run unit tests in Py3k, Nose 1.0 is required, or a development version of Nose that supports Python 3. The tests are run using ./sqla_nose.py as described in README.unittests. Current 3k Issues ----------------- Current bugs and tickets related to Py3k are on the Py3k milestone in trac: http://www.sqlalchemy.org/trac/query?status=new&status=assigned&status=reopened&milestone=py3k SQLAlchemy-0.8.4/README.rst0000644000076500000240000001162312251147171015724 0ustar classicstaff00000000000000SQLAlchemy ========== The Python SQL Toolkit and Object Relational Mapper Introduction ------------- SQLAlchemy is the Python SQL toolkit and Object Relational Mapper that gives application developers the full power and flexibility of SQL. SQLAlchemy provides a full suite of well known enterprise-level persistence patterns, designed for efficient and high-performing database access, adapted into a simple and Pythonic domain language. Major SQLAlchemy features include: * An industrial strength ORM, built from the core on the identity map, unit of work, and data mapper patterns. These patterns allow transparent persistence of objects using a declarative configuration system. Domain models can be constructed and manipulated naturally, and changes are synchronized with the current transaction automatically. * A relationally-oriented query system, exposing the full range of SQL's capabilities explicitly, including joins, subqueries, correlation, and most everything else, in terms of the object model. Writing queries with the ORM uses the same techniques of relational composition you use when writing SQL. While you can drop into literal SQL at any time, it's virtually never needed. * A comprehensive and flexible system of eager loading for related collections and objects. Collections are cached within a session, and can be loaded on individual access, all at once using joins, or by query per collection across the full result set. * A Core SQL construction system and DBAPI interaction layer. The SQLAlchemy Core is separate from the ORM and is a full database abstraction layer in its own right, and includes an extensible Python-based SQL expression language, schema metadata, connection pooling, type coercion, and custom types. * All primary and foreign key constraints are assumed to be composite and natural. Surrogate integer primary keys are of course still the norm, but SQLAlchemy never assumes or hardcodes to this model. * Database introspection and generation. Database schemas can be "reflected" in one step into Python structures representing database metadata; those same structures can then generate CREATE statements right back out - all within the Core, independent of the ORM. SQLAlchemy's philosophy: * SQL databases behave less and less like object collections the more size and performance start to matter; object collections behave less and less like tables and rows the more abstraction starts to matter. SQLAlchemy aims to accommodate both of these principles. * An ORM doesn't need to hide the "R". A relational database provides rich, set-based functionality that should be fully exposed. SQLAlchemy's ORM provides an open-ended set of patterns that allow a developer to construct a custom mediation layer between a domain model and a relational schema, turning the so-called "object relational impedance" issue into a distant memory. * The developer, in all cases, makes all decisions regarding the design, structure, and naming conventions of both the object model as well as the relational schema. SQLAlchemy only provides the means to automate the execution of these decisions. * With SQLAlchemy, there's no such thing as "the ORM generated a bad query" - you retain full control over the structure of queries, including how joins are organized, how subqueries and correlation is used, what columns are requested. Everything SQLAlchemy does is ultimately the result of a developer- initiated decision. * Don't use an ORM if the problem doesn't need one. SQLAlchemy consists of a Core and separate ORM component. The Core offers a full SQL expression language that allows Pythonic construction of SQL constructs that render directly to SQL strings for a target database, returning result sets that are essentially enhanced DBAPI cursors. * Transactions should be the norm. With SQLAlchemy's ORM, nothing goes to permanent storage until commit() is called. SQLAlchemy encourages applications to create a consistent means of delineating the start and end of a series of operations. * Never render a literal value in a SQL statement. Bound parameters are used to the greatest degree possible, allowing query optimizers to cache query plans effectively and making SQL injection attacks a non-issue. Documentation ------------- Latest documentation is at: http://www.sqlalchemy.org/docs/ Installation / Requirements --------------------------- Full documentation for installation is at `Installation `_. Getting Help / Development / Bug reporting ------------------------------------------ Please refer to the `SQLAlchemy Community Guide `_. License ------- SQLAlchemy is distributed under the `MIT license `_. SQLAlchemy-0.8.4/README.unittests.rst0000644000076500000240000001723012251150015017754 0ustar classicstaff00000000000000===================== SQLALCHEMY UNIT TESTS ===================== SQLAlchemy unit tests by default run using Python's built-in sqlite3 module. If running on Python 2.4, pysqlite must be installed. Unit tests are run using nose. Nose is available at:: https://pypi.python.org/pypi/nose/ SQLAlchemy implements a nose plugin that must be present when tests are run. This plugin is invoked when the test runner script provided with SQLAlchemy is used. The test suite as of version 0.8.2 also requires the mock library. While mock is part of the Python standard library as of 3.3, previous versions will need to have it installed, and is available at:: https://pypi.python.org/pypi/mock **NOTE:** - the nose plugin is no longer installed by setuptools as of version 0.7 ! Use "python setup.py test" or "./sqla_nose.py". RUNNING TESTS VIA SETUP.PY -------------------------- A plain vanilla run of all tests using sqlite can be run via setup.py: $ python setup.py test The -v flag also works here:: $ python setup.py test -v RUNNING ALL TESTS ------------------ To run all tests:: $ ./sqla_nose.py If you're running the tests on Microsoft Windows, then there is an additional argument that must be passed to ./sqla_nose.py:: > ./sqla_nose.py --first-package-wins This is required because nose's importer will normally evict a package from sys.modules if it sees a package with the same name in a different location. Setting this argument disables that behavior. Assuming all tests pass, this is a very unexciting output. To make it more interesting:: $ ./sqla_nose.py -v RUNNING INDIVIDUAL TESTS ------------------------- Any directory of test modules can be run at once by specifying the directory path:: $ ./sqla_nose.py test/dialect Any test module can be run directly by specifying its module name:: $ ./sqla_nose.py test.orm.test_mapper To run a specific test within the module, specify it as module:ClassName.methodname:: $ ./sqla_nose.py test.orm.test_mapper:MapperTest.test_utils COMMAND LINE OPTIONS -------------------- Help is available via --help:: $ ./sqla_nose.py --help The --help screen is a combination of common nose options and options which the SQLAlchemy nose plugin adds. The most commonly SQLAlchemy-specific options used are '--db' and '--dburi'. DATABASE TARGETS ---------------- Tests will target an in-memory SQLite database by default. To test against another database, use the --dburi option with any standard SQLAlchemy URL:: --dburi=postgresql://user:password@localhost/test Use an empty database and a database user with general DBA privileges. The test suite will be creating and dropping many tables and other DDL, and preexisting tables will interfere with the tests. Several tests require alternate usernames or schemas to be present, which are used to test dotted-name access scenarios. On some databases such as Oracle or Sybase, these are usernames, and others such as Postgresql and MySQL they are schemas. The requirement applies to all backends except SQLite and Firebird. The names are:: test_schema test_schema_2 (only used on Postgresql) Please refer to your vendor documentation for the proper syntax to create these namespaces - the database user must have permission to create and drop tables within these schemas. Its perfectly fine to run the test suite without these namespaces present, it only means that a handful of tests which expect them to be present will fail. Additional steps specific to individual databases are as follows:: MYSQL: Default storage engine should be "MyISAM". Tests that require "InnoDB" as the engine will specify this explicitly. ORACLE: a user named "test_schema" is created. The primary database user needs to be able to create and drop tables, synonyms, and constraints within the "test_schema" user. For this to work fully, including that the user has the "REFERENCES" role in a remote schema for tables not yet defined (REFERENCES is per-table), it is required that the test the user be present in the "DBA" role: grant dba to scott; SYBASE: Similar to Oracle, "test_schema" is created as a user, and the primary test user needs to have the "sa_role". It's also recommended to turn on "trunc log on chkpt" and to use a separate transaction log device - Sybase basically seizes up when the transaction log is full otherwise. A full series of setup assuming sa/master: disk init name="translog", physname="/opt/sybase/data/translog.dat", size="10M" create database sqlalchemy on default log on translog="10M" sp_dboption sqlalchemy, "trunc log on chkpt", true sp_addlogin scott, "tiger7" sp_addlogin test_schema, "tiger7" use sqlalchemy sp_adduser scott sp_adduser test_schema grant all to scott sp_role "grant", sa_role, scott Sybase will still freeze for up to a minute when the log becomes full. To manually dump the log:: dump tran sqlalchemy with truncate_only MSSQL: Tests that involve multiple connections require Snapshot Isolation ability implemented on the test database in order to prevent deadlocks that will occur with record locking isolation. This feature is only available with MSSQL 2005 and greater. You must enable snapshot isolation at the database level and set the default cursor isolation with two SQL commands: ALTER DATABASE MyDatabase SET ALLOW_SNAPSHOT_ISOLATION ON ALTER DATABASE MyDatabase SET READ_COMMITTED_SNAPSHOT ON MSSQL+zxJDBC: Trying to run the unit tests on Windows against SQL Server requires using a test.cfg configuration file as the cmd.exe shell won't properly pass the URL arguments into the nose test runner. If you'll be running the tests frequently, database aliases can save a lot of typing. The --dbs option lists the built-in aliases and their matching URLs:: $ ./sqla_nose.py --dbs Available --db options (use --dburi to override) mysql mysql://scott:tiger@127.0.0.1:3306/test oracle oracle://scott:tiger@127.0.0.1:1521 postgresql postgresql://scott:tiger@127.0.0.1:5432/test [...] To run tests against an aliased database:: $ ./sqla_nose.py --db=postgresql To customize the URLs with your own users or hostnames, create a file called `test.cfg` at the top level of the SQLAlchemy source distribution. This file is in Python config format, and contains a [db] section which lists out additional database configurations:: [db] postgresql=postgresql://myuser:mypass@localhost/mydb Your custom entries will override the defaults and you'll see them reflected in the output of --dbs. CONFIGURING LOGGING ------------------- SQLAlchemy logs its activity and debugging through Python's logging package. Any log target can be directed to the console with command line options, such as:: $ ./sqla_nose.py test.orm.unitofwork --log-info=sqlalchemy.orm.mapper \ --log-debug=sqlalchemy.pool --log-info=sqlalchemy.engine This would log mapper configuration, connection pool checkouts, and SQL statement execution. BUILT-IN COVERAGE REPORTING ------------------------------ Coverage is tracked using Nose's coverage plugin. See the nose documentation for details. Basic usage is:: $ ./sqla_nose.py test.sql.test_query --with-coverage BIG COVERAGE TIP !!! There is an issue where existing .pyc files may store the incorrect filepaths, which will break the coverage system. If coverage numbers are coming out as low/zero, try deleting all .pyc files. DEVELOPING AND TESTING NEW DIALECTS ----------------------------------- See the new file README.dialects.rst for detail on dialects. SQLAlchemy-0.8.4/sa2to3.py0000644000076500000240000000361212251150015015710 0ustar classicstaff00000000000000"""SQLAlchemy 2to3 tool. This tool monkeypatches a preprocessor onto lib2to3.refactor.RefactoringTool, so that conditional sections can replace non-fixable Python 2 code sections for the appropriate Python 3 version before 2to3 is run. """ from lib2to3 import main, refactor import re py3k_pattern = re.compile(r'\s*# Py3K') comment_pattern = re.compile(r'(\s*)#(?! ?Py2K)(.*)') py2k_pattern = re.compile(r'\s*# Py2K') end_py2k_pattern = re.compile(r'\s*# end Py2K') def preprocess(data): lines = data.split('\n') def consume_normal(): while lines: line = lines.pop(0) if py3k_pattern.match(line): for line in consume_py3k(): yield line elif py2k_pattern.match(line): for line in consume_py2k(): yield line else: yield line def consume_py3k(): yield "# start Py3K" while lines: line = lines.pop(0) m = comment_pattern.match(line) if m: yield "%s%s" % m.group(1, 2) else: # pushback lines.insert(0, line) break yield "# end Py3K" def consume_py2k(): yield "# start Py2K" while lines: line = lines.pop(0) if not end_py2k_pattern.match(line): yield "#%s" % line else: break yield "# end Py2K" return "\n".join(consume_normal()) old_refactor_string = refactor.RefactoringTool.refactor_string def refactor_string(self, data, name): newdata = preprocess(data) tree = old_refactor_string(self, newdata, name) if tree: if newdata != data: tree.was_changed = True return tree if __name__ == '__main__': refactor.RefactoringTool.refactor_string = refactor_string main.main("lib2to3.fixes") SQLAlchemy-0.8.4/setup.cfg0000644000076500000240000000175512251151573016064 0ustar classicstaff00000000000000[egg_info] tag_build = tag_date = 0 tag_svn_revision = 0 [nosetests] with-sqla_testing = true exclude = ^examples first-package-wins = true where = test [upload] sign = 1 identity = C4DAFEE1 [sqla_testing] requirement_cls = test.requirements:DefaultRequirements profile_file = test/profiles.txt oracle_db_link = test_link [db] default = sqlite:///:memory: sqlite = sqlite:///:memory: sqlite_file = sqlite:///querytest.db postgresql = postgresql://scott:tiger@127.0.0.1:5432/test postgres = postgresql://scott:tiger@127.0.0.1:5432/test pg8000 = postgresql+pg8000://scott:tiger@127.0.0.1:5432/test postgresql_jython = postgresql+zxjdbc://scott:tiger@127.0.0.1:5432/test mysql_jython = mysql+zxjdbc://scott:tiger@127.0.0.1:5432/test mysql = mysql://scott:tiger@127.0.0.1:3306/test pymysql = mysql+pymysql://scott:tiger@127.0.0.1:3306/test?use_unicode=0&charset=utf8 oracle = oracle://scott:tiger@127.0.0.1:1521 oracle8 = oracle://scott:tiger@127.0.0.1:1521/?use_ansi=0 maxdb = maxdb://MONA:RED@/maxdb1 SQLAlchemy-0.8.4/setup.py0000644000076500000240000001434212251150015015737 0ustar classicstaff00000000000000"""setup.py Please see README for basic installation instructions. """ import os import re import sys from distutils.command.build_ext import build_ext from distutils.errors import (CCompilerError, DistutilsExecError, DistutilsPlatformError) try: from setuptools import setup, Extension, Feature has_setuptools = True except ImportError: has_setuptools = False from distutils.core import setup, Extension Feature = None try: # Python 3 from distutils.command.build_py import build_py_2to3 as build_py except ImportError: # Python 2 from distutils.command.build_py import build_py cmdclass = {} pypy = hasattr(sys, 'pypy_version_info') jython = sys.platform.startswith('java') py3k = False extra = {} if sys.version_info < (2, 4): raise Exception("SQLAlchemy requires Python 2.4 or higher.") elif sys.version_info >= (3, 0): py3k = True if has_setuptools: extra.update( use_2to3=True, ) else: cmdclass['build_py'] = build_py ext_modules = [ Extension('sqlalchemy.cprocessors', sources=['lib/sqlalchemy/cextension/processors.c']), Extension('sqlalchemy.cresultproxy', sources=['lib/sqlalchemy/cextension/resultproxy.c']), Extension('sqlalchemy.cutils', sources=['lib/sqlalchemy/cextension/utils.c']) ] ext_errors = (CCompilerError, DistutilsExecError, DistutilsPlatformError) if sys.platform == 'win32' and sys.version_info > (2, 6): # 2.6's distutils.msvc9compiler can raise an IOError when failing to # find the compiler ext_errors += (IOError,) class BuildFailed(Exception): def __init__(self): self.cause = sys.exc_info()[1] # work around py 2/3 different syntax class ve_build_ext(build_ext): # This class allows C extension building to fail. def run(self): try: build_ext.run(self) except DistutilsPlatformError: raise BuildFailed() def build_extension(self, ext): try: build_ext.build_extension(self, ext) except ext_errors: raise BuildFailed() except ValueError: # this can happen on Windows 64 bit, see Python issue 7511 if "'path'" in str(sys.exc_info()[1]): # works with both py 2/3 raise BuildFailed() raise cmdclass['build_ext'] = ve_build_ext def status_msgs(*msgs): print('*' * 75) for msg in msgs: print(msg) print('*' * 75) def find_packages(location): packages = [] for pkg in ['sqlalchemy']: for _dir, subdirectories, files in ( os.walk(os.path.join(location, pkg)) ): if '__init__.py' in files: tokens = _dir.split(os.sep)[len(location.split(os.sep)):] packages.append(".".join(tokens)) return packages v_file = open(os.path.join(os.path.dirname(__file__), 'lib', 'sqlalchemy', '__init__.py')) VERSION = re.compile(r".*__version__ = '(.*?)'", re.S).match(v_file.read()).group(1) v_file.close() r_file = open(os.path.join(os.path.dirname(__file__), 'README.rst')) readme = r_file.read() r_file.close() def run_setup(with_cext): kwargs = extra.copy() if with_cext: if Feature: kwargs['features'] = {'cextensions': Feature( "optional C speed-enhancements", standard=True, ext_modules=ext_modules )} else: kwargs['ext_modules'] = ext_modules setup(name="SQLAlchemy", version=VERSION, description="Database Abstraction Library", author="Mike Bayer", author_email="mike_mp@zzzcomputing.com", url="http://www.sqlalchemy.org", packages=find_packages('lib'), package_dir={'': 'lib'}, license="MIT License", cmdclass=cmdclass, tests_require=['nose >= 0.11', 'mock'], test_suite="sqla_nose", long_description=readme, classifiers=[ "Development Status :: 5 - Production/Stable", "Intended Audience :: Developers", "License :: OSI Approved :: MIT License", "Programming Language :: Python", "Programming Language :: Python :: 3", "Programming Language :: Python :: Implementation :: CPython", "Programming Language :: Python :: Implementation :: Jython", "Programming Language :: Python :: Implementation :: PyPy", "Topic :: Database :: Front-Ends", "Operating System :: OS Independent", ], **kwargs ) def monkeypatch2to3(): from sa2to3 import refactor_string from lib2to3.refactor import RefactoringTool RefactoringTool.old_refactor_string = RefactoringTool.refactor_string RefactoringTool.refactor_string = refactor_string def unmonkeypatch2to3(): from lib2to3.refactor import RefactoringTool if hasattr(RefactoringTool, 'old_refactor_string'): RefactoringTool.refactor_string = RefactoringTool.old_refactor_string if pypy or jython or py3k: if py3k: # monkeypatch our preprocessor onto the 2to3 tool. monkeypatch2to3() try: run_setup(False) finally: if py3k: # unmonkeypatch to not stomp other setup.py's that are compiled # and exec'd and which also require 2to3 fixing unmonkeypatch2to3() status_msgs( "WARNING: C extensions are not supported on " + "this Python platform, speedups are not enabled.", "Plain-Python build succeeded." ) else: try: run_setup(True) except BuildFailed: exc = sys.exc_info()[1] # work around py 2/3 different syntax status_msgs( exc.cause, "WARNING: The C extension could not be compiled, " + "speedups are not enabled.", "Failure information, if any, is above.", "Retrying the build without the C extension now." ) run_setup(False) status_msgs( "WARNING: The C extension could not be compiled, " + "speedups are not enabled.", "Plain-Python build succeeded." ) SQLAlchemy-0.8.4/sqla_nose.py0000755000076500000240000000106712251147171016577 0ustar classicstaff00000000000000#!/usr/bin/env python """ nose runner script. This script is a front-end to "nosetests" which installs SQLAlchemy's testing plugin into the local environment. """ import sys import imp import nose from os import path for pth in ['./lib']: sys.path.insert(0, path.join(path.dirname(path.abspath(__file__)), pth)) # installing without importing SQLAlchemy, so that coverage includes # SQLAlchemy itself. path = "lib/sqlalchemy/testing/plugin/noseplugin.py" noseplugin = imp.load_source("noseplugin", path) nose.main(addplugins=[noseplugin.NoseSQLAlchemy()]) SQLAlchemy-0.8.4/test/0000755000076500000240000000000012251151573015212 5ustar classicstaff00000000000000SQLAlchemy-0.8.4/test/__init__.py0000644000076500000240000000000012251147171017310 0ustar classicstaff00000000000000SQLAlchemy-0.8.4/test/aaa_profiling/0000755000076500000240000000000012251151573020005 5ustar classicstaff00000000000000SQLAlchemy-0.8.4/test/aaa_profiling/__init__.py0000644000076500000240000000000012251147171022103 0ustar classicstaff00000000000000SQLAlchemy-0.8.4/test/aaa_profiling/test_compiler.py0000644000076500000240000000432112251150015023216 0ustar classicstaff00000000000000from sqlalchemy import * from sqlalchemy.testing import * from sqlalchemy.engine import default class CompileTest(fixtures.TestBase, AssertsExecutionResults): __requires__ = 'cpython', @classmethod def setup_class(cls): global t1, t2, metadata metadata = MetaData() t1 = Table('t1', metadata, Column('c1', Integer, primary_key=True), Column('c2', String(30))) t2 = Table('t2', metadata, Column('c1', Integer, primary_key=True), Column('c2', String(30))) # do a "compile" ahead of time to load # deferred imports t1.insert().compile() # go through all the TypeEngine # objects in use and pre-load their _type_affinity # entries. for t in (t1, t2): for c in t.c: c.type._type_affinity from sqlalchemy import types for t in types._type_map.values(): t._type_affinity cls.dialect = default.DefaultDialect() @profiling.function_call_count() def test_insert(self): t1.insert().compile(dialect=self.dialect) @profiling.function_call_count() def test_update(self): t1.update().compile(dialect=self.dialect) def test_update_whereclause(self): t1.update().where(t1.c.c2 == 12).compile(dialect=self.dialect) @profiling.function_call_count() def go(): t1.update().where(t1.c.c2 == 12).compile(dialect=self.dialect) go() def test_select(self): # give some of the cached type values # a chance to warm up s = select([t1], t1.c.c2 == t2.c.c1) s.compile(dialect=self.dialect) @profiling.function_call_count() def go(): s = select([t1], t1.c.c2 == t2.c.c1) s.compile(dialect=self.dialect) go() def test_select_labels(self): # give some of the cached type values # a chance to warm up s = select([t1], t1.c.c2 == t2.c.c1).apply_labels() s.compile(dialect=self.dialect) @profiling.function_call_count() def go(): s = select([t1], t1.c.c2 == t2.c.c1).apply_labels() s.compile(dialect=self.dialect) go()SQLAlchemy-0.8.4/test/aaa_profiling/test_memusage.py0000644000076500000240000004744612251150015023226 0ustar classicstaff00000000000000from sqlalchemy.testing import eq_ from sqlalchemy.orm import mapper, relationship, create_session, \ clear_mappers, sessionmaker, aliased,\ Session, subqueryload from sqlalchemy.orm.mapper import _mapper_registry from sqlalchemy.orm.session import _sessions from sqlalchemy import testing from sqlalchemy.testing import engines from sqlalchemy import MetaData, Integer, String, ForeignKey, \ Unicode, select import sqlalchemy as sa from sqlalchemy.testing.schema import Table, Column from sqlalchemy.sql import column from sqlalchemy.processors import to_decimal_processor_factory, \ to_unicode_processor_factory from sqlalchemy.testing.util import gc_collect import decimal import gc from sqlalchemy.testing import fixtures import weakref class A(fixtures.ComparableEntity): pass class B(fixtures.ComparableEntity): pass class ASub(A): pass def profile_memory(times=50): def decorate(func): # run the test 50 times. if length of gc.get_objects() # keeps growing, assert false def get_objects_skipping_sqlite_issue(): # pysqlite keeps adding weakref objects which only # get reset after 220 iterations, which is too long # to run lots of these tests, so just filter them # out. return [o for o in gc.get_objects() if not isinstance(o, weakref.ref)] def profile(*args): gc_collect() samples = [0 for x in range(0, times)] for x in range(0, times): func(*args) gc_collect() samples[x] = len(get_objects_skipping_sqlite_issue()) print "sample gc sizes:", samples assert len(_sessions) == 0 for x in samples[-4:]: if x != samples[-5]: flatline = False break else: flatline = True # object count is bigger than when it started if not flatline and samples[-1] > samples[0]: for x in samples[1:-2]: # see if a spike bigger than the endpoint exists if x > samples[-1]: break else: assert False, repr(samples) + " " + repr(flatline) return profile return decorate def assert_no_mappers(): clear_mappers() gc_collect() assert len(_mapper_registry) == 0 class EnsureZeroed(fixtures.ORMTest): def setup(self): _sessions.clear() _mapper_registry.clear() class MemUsageTest(EnsureZeroed): __requires__ = 'cpython', # ensure a pure growing test trips the assertion @testing.fails_if(lambda: True) def test_fixture(self): class Foo(object): pass x = [] @profile_memory() def go(): x[-1:] = [Foo(), Foo(), Foo(), Foo(), Foo(), Foo()] go() def test_session(self): metadata = MetaData(testing.db) table1 = Table("mytable", metadata, Column('col1', Integer, primary_key=True, test_needs_autoincrement=True), Column('col2', String(30))) table2 = Table("mytable2", metadata, Column('col1', Integer, primary_key=True, test_needs_autoincrement=True), Column('col2', String(30)), Column('col3', Integer, ForeignKey("mytable.col1"))) metadata.create_all() m1 = mapper(A, table1, properties={ "bs":relationship(B, cascade="all, delete", order_by=table2.c.col1)}, order_by=table1.c.col1) m2 = mapper(B, table2) m3 = mapper(A, table1, non_primary=True) @profile_memory() def go(): sess = create_session() a1 = A(col2="a1") a2 = A(col2="a2") a3 = A(col2="a3") a1.bs.append(B(col2="b1")) a1.bs.append(B(col2="b2")) a3.bs.append(B(col2="b3")) for x in [a1,a2,a3]: sess.add(x) sess.flush() sess.expunge_all() alist = sess.query(A).all() eq_( [ A(col2="a1", bs=[B(col2="b1"), B(col2="b2")]), A(col2="a2", bs=[]), A(col2="a3", bs=[B(col2="b3")]) ], alist) for a in alist: sess.delete(a) sess.flush() go() metadata.drop_all() del m1, m2, m3 assert_no_mappers() def test_sessionmaker(self): @profile_memory() def go(): sessmaker = sessionmaker(bind=testing.db) sess = sessmaker() r = sess.execute(select([1])) r.close() sess.close() del sess del sessmaker go() @testing.crashes('sqlite', ':memory: connection not suitable here') def test_orm_many_engines(self): metadata = MetaData(testing.db) table1 = Table("mytable", metadata, Column('col1', Integer, primary_key=True, test_needs_autoincrement=True), Column('col2', String(30))) table2 = Table("mytable2", metadata, Column('col1', Integer, primary_key=True, test_needs_autoincrement=True), Column('col2', String(30)), Column('col3', Integer, ForeignKey("mytable.col1"))) metadata.create_all() m1 = mapper(A, table1, properties={ "bs":relationship(B, cascade="all, delete", order_by=table2.c.col1)}, order_by=table1.c.col1, _compiled_cache_size=10 ) m2 = mapper(B, table2, _compiled_cache_size=10 ) m3 = mapper(A, table1, non_primary=True) @profile_memory() def go(): engine = engines.testing_engine( options={'logging_name':'FOO', 'pool_logging_name':'BAR', 'use_reaper':False} ) sess = create_session(bind=engine) a1 = A(col2="a1") a2 = A(col2="a2") a3 = A(col2="a3") a1.bs.append(B(col2="b1")) a1.bs.append(B(col2="b2")) a3.bs.append(B(col2="b3")) for x in [a1,a2,a3]: sess.add(x) sess.flush() sess.expunge_all() alist = sess.query(A).all() eq_( [ A(col2="a1", bs=[B(col2="b1"), B(col2="b2")]), A(col2="a2", bs=[]), A(col2="a3", bs=[B(col2="b3")]) ], alist) for a in alist: sess.delete(a) sess.flush() sess.close() engine.dispose() go() metadata.drop_all() del m1, m2, m3 assert_no_mappers() def test_ad_hoc_types(self): """test storage of bind processors, result processors in dialect-wide registry.""" from sqlalchemy.dialects import mysql, postgresql, sqlite from sqlalchemy import types eng = engines.testing_engine() for args in ( (types.Integer, ), (types.String, ), (types.PickleType, ), (types.Enum, 'a', 'b', 'c'), (sqlite.DATETIME, ), (postgresql.ENUM, 'a', 'b', 'c'), (types.Interval, ), (postgresql.INTERVAL, ), (mysql.VARCHAR, ), ): @profile_memory() def go(): type_ = args[0](*args[1:]) bp = type_._cached_bind_processor(eng.dialect) rp = type_._cached_result_processor(eng.dialect, 0) go() assert not eng.dialect._type_memos def test_many_updates(self): metadata = MetaData(testing.db) wide_table = Table('t', metadata, Column('id', Integer, primary_key=True, test_needs_autoincrement=True), *[Column('col%d' % i, Integer) for i in range(10)] ) class Wide(object): pass mapper(Wide, wide_table, _compiled_cache_size=10) metadata.create_all() session = create_session() w1 = Wide() session.add(w1) session.flush() session.close() del session counter = [1] @profile_memory() def go(): session = create_session() w1 = session.query(Wide).first() x = counter[0] dec = 10 while dec > 0: # trying to count in binary here, # works enough to trip the test case if pow(2, dec) < x: setattr(w1, 'col%d' % dec, counter[0]) x -= pow(2, dec) dec -= 1 session.flush() session.close() counter[0] += 1 try: go() finally: metadata.drop_all() @testing.crashes('mysql+cymysql', 'blocking with cymysql >= 0.6') def test_unicode_warnings(self): metadata = MetaData(testing.db) table1 = Table('mytable', metadata, Column('col1', Integer, primary_key=True, test_needs_autoincrement=True), Column('col2', Unicode(30))) metadata.create_all() i = [1] # the times here is cranked way up so that we can see # pysqlite clearing out it's internal buffer and allow # the test to pass @testing.emits_warning() @profile_memory() def go(): # execute with a non-unicode object. a warning is emitted, # this warning shouldn't clog up memory. testing.db.execute(table1.select().where(table1.c.col2 == 'foo%d' % i[0])) i[0] += 1 try: go() finally: metadata.drop_all() def test_mapper_reset(self): metadata = MetaData(testing.db) table1 = Table("mytable", metadata, Column('col1', Integer, primary_key=True, test_needs_autoincrement=True), Column('col2', String(30))) table2 = Table("mytable2", metadata, Column('col1', Integer, primary_key=True, test_needs_autoincrement=True), Column('col2', String(30)), Column('col3', Integer, ForeignKey("mytable.col1"))) @profile_memory() def go(): m1 = mapper(A, table1, properties={ "bs":relationship(B, order_by=table2.c.col1) }) m2 = mapper(B, table2) m3 = mapper(A, table1, non_primary=True) sess = create_session() a1 = A(col2="a1") a2 = A(col2="a2") a3 = A(col2="a3") a1.bs.append(B(col2="b1")) a1.bs.append(B(col2="b2")) a3.bs.append(B(col2="b3")) for x in [a1,a2,a3]: sess.add(x) sess.flush() sess.expunge_all() alist = sess.query(A).order_by(A.col1).all() eq_( [ A(col2="a1", bs=[B(col2="b1"), B(col2="b2")]), A(col2="a2", bs=[]), A(col2="a3", bs=[B(col2="b3")]) ], alist) for a in alist: sess.delete(a) sess.flush() sess.close() clear_mappers() metadata.create_all() try: go() finally: metadata.drop_all() assert_no_mappers() def test_alias_pathing(self): metadata = MetaData(testing.db) a = Table("a", metadata, Column('id', Integer, primary_key=True, test_needs_autoincrement=True), Column('bid', Integer, ForeignKey('b.id')), Column('type', String(30)) ) asub = Table("asub", metadata, Column('id', Integer, ForeignKey('a.id'), primary_key=True), Column('data', String(30))) b = Table("b", metadata, Column('id', Integer, primary_key=True, test_needs_autoincrement=True), ) mapper(A, a, polymorphic_identity='a', polymorphic_on=a.c.type) mapper(ASub, asub, inherits=A,polymorphic_identity='asub') m1 = mapper(B, b, properties={ 'as_':relationship(A) }) metadata.create_all() sess = Session() a1 = ASub(data="a1") a2 = ASub(data="a2") a3 = ASub(data="a3") b1 = B(as_=[a1, a2, a3]) sess.add(b1) sess.commit() del sess # sqlite has a slow enough growth here # that we have to run it more times to see the # "dip" again @profile_memory(times=120) def go(): sess = Session() sess.query(B).options(subqueryload(B.as_.of_type(ASub))).all() sess.close() try: go() finally: metadata.drop_all() clear_mappers() def test_path_registry(self): metadata = MetaData() a = Table("a", metadata, Column('id', Integer, primary_key=True), Column('foo', Integer), Column('bar', Integer) ) m1 = mapper(A, a) @profile_memory() def go(): ma = sa.inspect(aliased(A)) m1._path_registry[m1.attrs.foo][ma][m1.attrs.bar] go() clear_mappers() def test_with_inheritance(self): metadata = MetaData(testing.db) table1 = Table("mytable", metadata, Column('col1', Integer, primary_key=True, test_needs_autoincrement=True), Column('col2', String(30)) ) table2 = Table("mytable2", metadata, Column('col1', Integer, ForeignKey('mytable.col1'), primary_key=True, test_needs_autoincrement=True), Column('col3', String(30)), ) @profile_memory() def go(): class A(fixtures.ComparableEntity): pass class B(A): pass mapper(A, table1, polymorphic_on=table1.c.col2, polymorphic_identity='a') mapper(B, table2, inherits=A, polymorphic_identity='b') sess = create_session() a1 = A() a2 = A() b1 = B(col3='b1') b2 = B(col3='b2') for x in [a1,a2,b1, b2]: sess.add(x) sess.flush() sess.expunge_all() alist = sess.query(A).order_by(A.col1).all() eq_( [ A(), A(), B(col3='b1'), B(col3='b2') ], alist) for a in alist: sess.delete(a) sess.flush() # dont need to clear_mappers() del B del A metadata.create_all() try: go() finally: metadata.drop_all() assert_no_mappers() def test_with_manytomany(self): metadata = MetaData(testing.db) table1 = Table("mytable", metadata, Column('col1', Integer, primary_key=True, test_needs_autoincrement=True), Column('col2', String(30)) ) table2 = Table("mytable2", metadata, Column('col1', Integer, primary_key=True, test_needs_autoincrement=True), Column('col2', String(30)), ) table3 = Table('t1tot2', metadata, Column('t1', Integer, ForeignKey('mytable.col1')), Column('t2', Integer, ForeignKey('mytable2.col1')), ) @profile_memory() def go(): class A(fixtures.ComparableEntity): pass class B(fixtures.ComparableEntity): pass mapper(A, table1, properties={ 'bs':relationship(B, secondary=table3, backref='as', order_by=table3.c.t1) }) mapper(B, table2) sess = create_session() a1 = A(col2='a1') a2 = A(col2='a2') b1 = B(col2='b1') b2 = B(col2='b2') a1.bs.append(b1) a2.bs.append(b2) for x in [a1,a2]: sess.add(x) sess.flush() sess.expunge_all() alist = sess.query(A).order_by(A.col1).all() eq_( [ A(bs=[B(col2='b1')]), A(bs=[B(col2='b2')]) ], alist) for a in alist: sess.delete(a) sess.flush() # dont need to clear_mappers() del B del A metadata.create_all() try: go() finally: metadata.drop_all() assert_no_mappers() @testing.provide_metadata def test_key_fallback_result(self): e = testing.db m = self.metadata t = Table('t', m, Column('x', Integer), Column('y', Integer)) m.create_all(e) e.execute(t.insert(), {"x":1, "y":1}) @profile_memory() def go(): r = e.execute(t.alias().select()) for row in r: row[t.c.x] go() # fails on newer versions of pysqlite due to unusual memory behvior # in pysqlite itself. background at: # http://thread.gmane.org/gmane.comp.python.db.pysqlite.user/2290 def test_join_cache(self): metadata = MetaData(testing.db) table1 = Table('table1', metadata, Column('id', Integer, primary_key=True, test_needs_autoincrement=True), Column('data', String(30))) table2 = Table('table2', metadata, Column('id', Integer, primary_key=True, test_needs_autoincrement=True), Column('data', String(30)), Column('t1id', Integer, ForeignKey('table1.id'))) class Foo(object): pass class Bar(object): pass mapper(Foo, table1, properties={'bars' : relationship(mapper(Bar, table2))}) metadata.create_all() session = sessionmaker() @profile_memory() def go(): s = table2.select() sess = session() sess.query(Foo).join((s, Foo.bars)).all() sess.rollback() try: go() finally: metadata.drop_all() def test_type_compile(self): from sqlalchemy.dialects.sqlite.base import dialect as SQLiteDialect cast = sa.cast(column('x'), sa.Integer) @profile_memory() def go(): dialect = SQLiteDialect() cast.compile(dialect=dialect) go() @testing.requires.cextensions def test_DecimalResultProcessor_init(self): @profile_memory() def go(): to_decimal_processor_factory({}, 10) go() @testing.requires.cextensions def test_DecimalResultProcessor_process(self): @profile_memory() def go(): to_decimal_processor_factory(decimal.Decimal, 10)(1.2) go() @testing.requires.cextensions def test_UnicodeResultProcessor_init(self): @profile_memory() def go(): to_unicode_processor_factory('utf8') go() SQLAlchemy-0.8.4/test/aaa_profiling/test_orm.py0000644000076500000240000002217712251150015022212 0ustar classicstaff00000000000000from sqlalchemy.testing import eq_, assert_raises, \ assert_raises_message from sqlalchemy import exc as sa_exc, util, Integer, String, ForeignKey from sqlalchemy.orm import exc as orm_exc, mapper, relationship, \ sessionmaker, Session, defer from sqlalchemy import testing from sqlalchemy.testing import profiling from sqlalchemy.testing import fixtures from sqlalchemy.testing.schema import Table, Column import sys class MergeTest(fixtures.MappedTest): @classmethod def define_tables(cls, metadata): Table('parent', metadata, Column('id', Integer, primary_key=True, test_needs_autoincrement=True), Column('data', String(20))) Table('child', metadata, Column('id', Integer, primary_key=True, test_needs_autoincrement=True), Column('data', String(20)), Column('parent_id', Integer, ForeignKey('parent.id'), nullable=False)) @classmethod def setup_classes(cls): class Parent(cls.Basic): pass class Child(cls.Basic): pass @classmethod def setup_mappers(cls): Child, Parent, parent, child = (cls.classes.Child, cls.classes.Parent, cls.tables.parent, cls.tables.child) mapper(Parent, parent, properties={'children': relationship(Child, backref='parent')}) mapper(Child, child) @classmethod def insert_data(cls): parent, child = cls.tables.parent, cls.tables.child parent.insert().execute({'id': 1, 'data': 'p1'}) child.insert().execute({'id': 1, 'data': 'p1c1', 'parent_id': 1}) def test_merge_no_load(self): Parent = self.classes.Parent sess = sessionmaker()() sess2 = sessionmaker()() p1 = sess.query(Parent).get(1) p1.children # down from 185 on this this is a small slice of a usually # bigger operation so using a small variance @profiling.function_call_count(variance=0.10) def go1(): return sess2.merge(p1, load=False) p2 = go1() # third call, merge object already present. almost no calls. @profiling.function_call_count(variance=0.10) def go2(): return sess2.merge(p2, load=False) go2() def test_merge_load(self): Parent = self.classes.Parent sess = sessionmaker()() sess2 = sessionmaker()() p1 = sess.query(Parent).get(1) p1.children # preloading of collection took this down from 1728 to 1192 # using sqlite3 the C extension took it back up to approx. 1257 # (py2.6) @profiling.function_call_count() def go(): p2 = sess2.merge(p1) go() # one more time, count the SQL def go2(): p2 = sess2.merge(p1) sess2 = sessionmaker(testing.db)() self.assert_sql_count(testing.db, go2, 2) class LoadManyToOneFromIdentityTest(fixtures.MappedTest): """test overhead associated with many-to-one fetches. Prior to the refactor of LoadLazyAttribute and query._get(), the load from identity map took 2x as many calls (65K calls here instead of around 33K) to load 1000 related objects from the identity map. """ @classmethod def define_tables(cls, metadata): Table('parent', metadata, Column('id', Integer, primary_key=True), Column('data', String(20)), Column('child_id', Integer, ForeignKey('child.id')) ) Table('child', metadata, Column('id', Integer, primary_key=True), Column('data', String(20)) ) @classmethod def setup_classes(cls): class Parent(cls.Basic): pass class Child(cls.Basic): pass @classmethod def setup_mappers(cls): Child, Parent, parent, child = (cls.classes.Child, cls.classes.Parent, cls.tables.parent, cls.tables.child) mapper(Parent, parent, properties={ 'child': relationship(Child)}) mapper(Child, child) @classmethod def insert_data(cls): parent, child = cls.tables.parent, cls.tables.child child.insert().execute([ {'id':i, 'data':'c%d' % i} for i in xrange(1, 251) ]) parent.insert().execute([ { 'id':i, 'data':'p%dc%d' % (i, (i % 250) + 1), 'child_id':(i % 250) + 1 } for i in xrange(1, 1000) ]) def test_many_to_one_load_no_identity(self): Parent = self.classes.Parent sess = Session() parents = sess.query(Parent).all() @profiling.function_call_count(variance=.2) def go(): for p in parents: p.child go() def test_many_to_one_load_identity(self): Parent, Child = self.classes.Parent, self.classes.Child sess = Session() parents = sess.query(Parent).all() children = sess.query(Child).all() @profiling.function_call_count() def go(): for p in parents: p.child go() class MergeBackrefsTest(fixtures.MappedTest): @classmethod def define_tables(cls, metadata): Table('a', metadata, Column('id', Integer, primary_key=True), Column('c_id', Integer, ForeignKey('c.id')) ) Table('b', metadata, Column('id', Integer, primary_key=True), Column('a_id', Integer, ForeignKey('a.id')) ) Table('c', metadata, Column('id', Integer, primary_key=True), ) Table('d', metadata, Column('id', Integer, primary_key=True), Column('a_id', Integer, ForeignKey('a.id')) ) @classmethod def setup_classes(cls): class A(cls.Basic): pass class B(cls.Basic): pass class C(cls.Basic): pass class D(cls.Basic): pass @classmethod def setup_mappers(cls): A, B, C, D = cls.classes.A, cls.classes.B, \ cls.classes.C, cls.classes.D a, b, c, d = cls.tables.a, cls.tables.b, \ cls.tables.c, cls.tables.d mapper(A, a, properties={ 'bs': relationship(B, backref='a'), 'c': relationship(C, backref='as'), 'ds': relationship(D, backref='a'), }) mapper(B, b) mapper(C, c) mapper(D, d) @classmethod def insert_data(cls): A, B, C, D = cls.classes.A, cls.classes.B, \ cls.classes.C, cls.classes.D s = Session() s.add_all([ A(id=i, bs=[B(id=(i * 5) + j) for j in xrange(1, 5)], c=C(id=i), ds=[D(id=(i * 5) + j) for j in xrange(1, 5)] ) for i in xrange(1, 5) ]) s.commit() @profiling.function_call_count(variance=.10) def test_merge_pending_with_all_pks(self): A, B, C, D = self.classes.A, self.classes.B, \ self.classes.C, self.classes.D s = Session() for a in [ A(id=i, bs=[B(id=(i * 5) + j) for j in xrange(1, 5)], c=C(id=i), ds=[D(id=(i * 5) + j) for j in xrange(1, 5)] ) for i in xrange(1, 5) ]: s.merge(a) class DeferOptionsTest(fixtures.MappedTest): @classmethod def define_tables(cls, metadata): Table('a', metadata, Column('id', Integer, primary_key=True), Column('x', String(5)), Column('y', String(5)), Column('z', String(5)), Column('q', String(5)), Column('p', String(5)), Column('r', String(5)), ) @classmethod def setup_classes(cls): class A(cls.Basic): pass @classmethod def setup_mappers(cls): A = cls.classes.A a = cls.tables.a mapper(A, a) @classmethod def insert_data(cls): A = cls.classes.A s = Session() s.add_all([ A(id=i, **dict((letter, "%s%d" % (letter, i)) for letter in ['x', 'y', 'z', 'p', 'q', 'r']) ) for i in range(1, 1001) ]) s.commit() @profiling.function_call_count(variance=.10) def test_baseline(self): # as of [ticket:2778], this is at 39025 A = self.classes.A s = Session() s.query(A).all() @profiling.function_call_count(variance=.10) def test_defer_many_cols(self): # with [ticket:2778], this goes from 50805 to 32817, # as it should be fewer function calls than the baseline A = self.classes.A s = Session() s.query(A).options( *[defer(letter) for letter in ['x', 'y', 'z', 'p', 'q', 'r']]).\ all() SQLAlchemy-0.8.4/test/aaa_profiling/test_pool.py0000644000076500000240000000314412251147171022370 0ustar classicstaff00000000000000from sqlalchemy import * from sqlalchemy.testing import * from sqlalchemy.pool import QueuePool from sqlalchemy import pool as pool_module class QueuePoolTest(fixtures.TestBase, AssertsExecutionResults): __requires__ = 'cpython', class Connection(object): def rollback(self): pass def close(self): pass def teardown(self): # the tests leave some fake connections # around which dont necessarily # get gc'ed as quickly as we'd like, # on backends like pypy, python3.2 pool_module._refs.clear() def setup(self): # create a throwaway pool which # has the effect of initializing # class-level event listeners on Pool, # if not present already. p1 = QueuePool(creator=self.Connection, pool_size=3, max_overflow=-1, use_threadlocal=True) p1.connect() global pool pool = QueuePool(creator=self.Connection, pool_size=3, max_overflow=-1, use_threadlocal=True) @profiling.function_call_count() def test_first_connect(self): conn = pool.connect() def test_second_connect(self): conn = pool.connect() conn.close() @profiling.function_call_count() def go(): conn2 = pool.connect() return conn2 c2 = go() def test_second_samethread_connect(self): conn = pool.connect() @profiling.function_call_count() def go(): return pool.connect() c2 = go() SQLAlchemy-0.8.4/test/aaa_profiling/test_resultset.py0000644000076500000240000001027012251150015023436 0ustar classicstaff00000000000000from sqlalchemy import * from sqlalchemy.testing import fixtures, AssertsExecutionResults, profiling from sqlalchemy import testing from sqlalchemy.testing import eq_ from sqlalchemy.engine.result import RowProxy import sys NUM_FIELDS = 10 NUM_RECORDS = 1000 class ResultSetTest(fixtures.TestBase, AssertsExecutionResults): @classmethod def setup_class(cls): global t, t2, metadata metadata = MetaData(testing.db) t = Table('table', metadata, *[Column('field%d' % fnum, String(50)) for fnum in range(NUM_FIELDS)]) t2 = Table('table2', metadata, *[Column('field%d' % fnum, Unicode(50)) for fnum in range(NUM_FIELDS)]) def setup(self): metadata.create_all() t.insert().execute([dict(('field%d' % fnum, u'value%d' % fnum) for fnum in range(NUM_FIELDS)) for r_num in range(NUM_RECORDS)]) t2.insert().execute([dict(('field%d' % fnum, u'value%d' % fnum) for fnum in range(NUM_FIELDS)) for r_num in range(NUM_RECORDS)]) # warm up type caches t.select().execute().fetchall() t2.select().execute().fetchall() def teardown(self): metadata.drop_all() @profiling.function_call_count() def test_string(self): [tuple(row) for row in t.select().execute().fetchall()] @profiling.function_call_count() def test_unicode(self): [tuple(row) for row in t2.select().execute().fetchall()] def test_contains_doesnt_compile(self): row = t.select().execute().first() c1 = Column('some column', Integer) + Column("some other column", Integer) @profiling.function_call_count() def go(): c1 in row go() class ExecutionTest(fixtures.TestBase): def test_minimal_connection_execute(self): # create an engine without any instrumentation. e = create_engine('sqlite://') c = e.connect() # ensure initial connect activities complete c.execute("select 1") @profiling.function_call_count() def go(): c.execute("select 1") go() def test_minimal_engine_execute(self): # create an engine without any instrumentation. e = create_engine('sqlite://') # ensure initial connect activities complete e.execute("select 1") @profiling.function_call_count() def go(): e.execute("select 1") go() class RowProxyTest(fixtures.TestBase): __requires__ = 'cpython', def _rowproxy_fixture(self, keys, processors, row): class MockMeta(object): def __init__(self): pass metadata = MockMeta() keymap = {} for index, (keyobjs, processor, values) in \ enumerate(zip(keys, processors, row)): for key in keyobjs: keymap[key] = (processor, key, index) keymap[index] = (processor, key, index) return RowProxy(metadata, row, processors, keymap) def _test_getitem_value_refcounts(self, seq_factory): col1, col2 = object(), object() def proc1(value): return value value1, value2 = "x", "y" row = self._rowproxy_fixture( [(col1, "a"), (col2, "b")], [proc1, None], seq_factory([value1, value2]) ) v1_refcount = sys.getrefcount(value1) v2_refcount = sys.getrefcount(value2) for i in range(10): row[col1] row["a"] row[col2] row["b"] row[0] row[1] row[0:2] eq_(sys.getrefcount(value1), v1_refcount) eq_(sys.getrefcount(value2), v2_refcount) def test_value_refcounts_pure_tuple(self): self._test_getitem_value_refcounts(tuple) def test_value_refcounts_custom_seq(self): class CustomSeq(object): def __init__(self, data): self.data = data def __getitem__(self, item): return self.data[item] def __iter__(self): return iter(self.data) self._test_getitem_value_refcounts(CustomSeq) SQLAlchemy-0.8.4/test/aaa_profiling/test_zoomark.py0000644000076500000240000003731112251150015023073 0ustar classicstaff00000000000000"""Benchmark for SQLAlchemy. An adaptation of Robert Brewers' ZooMark speed tests. """ import datetime import sys import time from sqlalchemy import * from sqlalchemy.testing import fixtures, engines, profiling from sqlalchemy import testing ITERATIONS = 1 dbapi_session = engines.ReplayableSession() metadata = None class ZooMarkTest(fixtures.TestBase): """Runs the ZooMark and squawks if method counts vary from the norm. Each test has an associated `call_range`, the total number of accepted function calls made during the test. The count can vary between Python 2.4 and 2.5. Unlike a unit test, this is a ordered collection of steps. Running components individually will fail. """ __requires__ = 'cpython', __only_on__ = 'postgresql+psycopg2' __skip_if__ = lambda : sys.version_info < (2, 5), def test_baseline_0_setup(self): global metadata creator = testing.db.pool._creator recorder = lambda : dbapi_session.recorder(creator()) engine = engines.testing_engine(options={'creator': recorder, 'use_reaper':False}) metadata = MetaData(engine) engine.connect() def test_baseline_1_create_tables(self): Zoo = Table( 'Zoo', metadata, Column('ID', Integer, Sequence('zoo_id_seq'), primary_key=True, index=True), Column('Name', Unicode(255)), Column('Founded', Date), Column('Opens', Time), Column('LastEscape', DateTime), Column('Admission', Float), ) Animal = Table( 'Animal', metadata, Column('ID', Integer, Sequence('animal_id_seq'), primary_key=True), Column('ZooID', Integer, ForeignKey('Zoo.ID'), index=True), Column('Name', Unicode(100)), Column('Species', Unicode(100)), Column('Legs', Integer, default=4), Column('LastEscape', DateTime), Column('Lifespan', Float(4)), Column('MotherID', Integer, ForeignKey('Animal.ID')), Column('PreferredFoodID', Integer), Column('AlternateFoodID', Integer), ) metadata.create_all() def test_baseline_1a_populate(self): Zoo = metadata.tables['Zoo'] Animal = metadata.tables['Animal'] engine = metadata.bind wap = engine.execute(Zoo.insert(), Name=u'Wild Animal Park', Founded=datetime.date(2000, 1, 1), Opens=datetime.time(8, 15, 59), LastEscape= datetime.datetime(2004, 7, 29, 5, 6, 7), Admission=4.95).inserted_primary_key[0] sdz = engine.execute(Zoo.insert(), Name=u'San Diego Zoo', Founded=datetime.date(1935, 9, 13), Opens=datetime.time(9, 0, 0), Admission=0).inserted_primary_key[0] engine.execute(Zoo.insert(inline=True), Name=u'Montr\xe9al Biod\xf4me', Founded=datetime.date(1992, 6, 19), Opens=datetime.time(9, 0, 0), Admission=11.75) seaworld = engine.execute(Zoo.insert(), Name=u'Sea_World', Admission=60).inserted_primary_key[0] # Let's add a crazy futuristic Zoo to test large date values. lp = engine.execute(Zoo.insert(), Name=u'Luna Park', Founded=datetime.date(2072, 7, 17), Opens=datetime.time(0, 0, 0), Admission=134.95).inserted_primary_key[0] # Animals leopardid = engine.execute(Animal.insert(), Species=u'Leopard', Lifespan=73.5).inserted_primary_key[0] engine.execute(Animal.update(Animal.c.ID == leopardid), ZooID=wap, LastEscape=datetime.datetime( 2004, 12, 21, 8, 15, 0, 999907,) ) lion = engine.execute(Animal.insert(), Species=u'Lion', ZooID=wap).inserted_primary_key[0] engine.execute(Animal.insert(), Species=u'Slug', Legs=1, Lifespan=.75) tiger = engine.execute(Animal.insert(), Species=u'Tiger', ZooID=sdz).inserted_primary_key[0] # Override Legs.default with itself just to make sure it works. engine.execute(Animal.insert(inline=True), Species=u'Bear', Legs=4) engine.execute(Animal.insert(inline=True), Species=u'Ostrich', Legs=2, Lifespan=103.2) engine.execute(Animal.insert(inline=True), Species=u'Centipede', Legs=100) emp = engine.execute(Animal.insert(), Species=u'Emperor Penguin', Legs=2, ZooID=seaworld).inserted_primary_key[0] adelie = engine.execute(Animal.insert(), Species=u'Adelie Penguin', Legs=2, ZooID=seaworld).inserted_primary_key[0] engine.execute(Animal.insert(inline=True), Species=u'Millipede', Legs=1000000, ZooID=sdz) # Add a mother and child to test relationships bai_yun = engine.execute(Animal.insert(), Species=u'Ape', Name=u'Bai Yun', Legs=2).inserted_primary_key[0] engine.execute(Animal.insert(inline=True), Species=u'Ape', Name=u'Hua Mei', Legs=2, MotherID=bai_yun) def test_baseline_2_insert(self): Animal = metadata.tables['Animal'] i = Animal.insert(inline=True) for x in xrange(ITERATIONS): tick = i.execute(Species=u'Tick', Name=u'Tick %d' % x, Legs=8) def test_baseline_3_properties(self): Zoo = metadata.tables['Zoo'] Animal = metadata.tables['Animal'] engine = metadata.bind def fullobject(select): """Iterate over the full result row.""" return list(engine.execute(select).first()) for x in xrange(ITERATIONS): # Zoos WAP = fullobject(Zoo.select(Zoo.c.Name == u'Wild Animal Park')) SDZ = fullobject(Zoo.select(Zoo.c.Founded == datetime.date(1935, 9, 13))) Biodome = fullobject(Zoo.select(Zoo.c.Name == u'Montr\xe9al Biod\xf4me')) seaworld = fullobject(Zoo.select(Zoo.c.Admission == float(60))) # Animals leopard = fullobject(Animal.select(Animal.c.Species == u'Leopard')) ostrich = fullobject(Animal.select(Animal.c.Species == u'Ostrich')) millipede = fullobject(Animal.select(Animal.c.Legs == 1000000)) ticks = fullobject(Animal.select(Animal.c.Species == u'Tick' )) def test_baseline_4_expressions(self): Zoo = metadata.tables['Zoo'] Animal = metadata.tables['Animal'] engine = metadata.bind def fulltable(select): """Iterate over the full result table.""" return [list(row) for row in engine.execute(select).fetchall()] for x in xrange(ITERATIONS): assert len(fulltable(Zoo.select())) == 5 assert len(fulltable(Animal.select())) == ITERATIONS + 12 assert len(fulltable(Animal.select(Animal.c.Legs == 4))) \ == 4 assert len(fulltable(Animal.select(Animal.c.Legs == 2))) \ == 5 assert len(fulltable(Animal.select(and_(Animal.c.Legs >= 2, Animal.c.Legs < 20)))) == ITERATIONS + 9 assert len(fulltable(Animal.select(Animal.c.Legs > 10))) \ == 2 assert len(fulltable(Animal.select(Animal.c.Lifespan > 70))) == 2 assert len(fulltable(Animal.select(Animal.c.Species. startswith(u'L')))) == 2 assert len(fulltable(Animal.select(Animal.c.Species. endswith(u'pede')))) == 2 assert len(fulltable(Animal.select(Animal.c.LastEscape != None))) == 1 assert len(fulltable(Animal.select(None == Animal.c.LastEscape))) == ITERATIONS + 11 # In operator (containedby) assert len(fulltable(Animal.select(Animal.c.Species.like(u'%pede%' )))) == 2 assert len(fulltable(Animal.select(Animal.c.Species.in_([u'Lion' , u'Tiger', u'Bear'])))) == 3 # Try In with cell references class thing(object): pass pet, pet2 = thing(), thing() pet.Name, pet2.Name = u'Slug', u'Ostrich' assert len(fulltable(Animal.select(Animal.c.Species.in_([pet.Name, pet2.Name])))) == 2 # logic and other functions assert len(fulltable(Animal.select(Animal.c.Species.like(u'Slug' )))) == 1 assert len(fulltable(Animal.select(Animal.c.Species.like(u'%pede%' )))) == 2 name = u'Lion' assert len(fulltable(Animal.select(func.length(Animal.c.Species) == len(name)))) == ITERATIONS + 3 assert len(fulltable(Animal.select(Animal.c.Species.like(u'%i%' )))) == ITERATIONS + 7 # Test now(), today(), year(), month(), day() assert len(fulltable(Zoo.select(and_(Zoo.c.Founded != None, Zoo.c.Founded < func.current_timestamp(_type=Date))))) == 3 assert len(fulltable(Animal.select(Animal.c.LastEscape == func.current_timestamp(_type=Date)))) == 0 assert len(fulltable(Animal.select(func.date_part('year', Animal.c.LastEscape) == 2004))) == 1 assert len(fulltable(Animal.select(func.date_part('month', Animal.c.LastEscape) == 12))) == 1 assert len(fulltable(Animal.select(func.date_part('day', Animal.c.LastEscape) == 21))) == 1 def test_baseline_5_aggregates(self): Animal = metadata.tables['Animal'] Zoo = metadata.tables['Zoo'] engine = metadata.bind for x in xrange(ITERATIONS): # views view = engine.execute(select([Animal.c.Legs])).fetchall() legs = [x[0] for x in view] legs.sort() expected = { 'Leopard': 73.5, 'Slug': .75, 'Tiger': None, 'Lion': None, 'Bear': None, 'Ostrich': 103.2, 'Centipede': None, 'Emperor Penguin': None, 'Adelie Penguin': None, 'Millipede': None, 'Ape': None, 'Tick': None, } for species, lifespan in engine.execute(select([Animal.c.Species, Animal.c.Lifespan])).fetchall(): assert lifespan == expected[species] expected = [u'Montr\xe9al Biod\xf4me', 'Wild Animal Park'] e = select([Zoo.c.Name], and_(Zoo.c.Founded != None, Zoo.c.Founded <= func.current_timestamp(), Zoo.c.Founded >= datetime.date(1990, 1, 1))) values = [val[0] for val in engine.execute(e).fetchall()] assert set(values) == set(expected) # distinct legs = [x[0] for x in engine.execute(select([Animal.c.Legs], distinct=True)).fetchall()] legs.sort() def test_baseline_6_editing(self): Zoo = metadata.tables['Zoo'] engine = metadata.bind for x in xrange(ITERATIONS): # Edit SDZ = engine.execute(Zoo.select(Zoo.c.Name == u'San Diego Zoo' )).first() engine.execute(Zoo.update(Zoo.c.ID == SDZ['ID' ]), Name=u'The San Diego Zoo', Founded=datetime.date(1900, 1, 1), Opens=datetime.time(7, 30, 0), Admission='35.00') # Test edits SDZ = engine.execute(Zoo.select(Zoo.c.Name == u'The San Diego Zoo' )).first() assert SDZ['Founded'] == datetime.date(1900, 1, 1), \ SDZ['Founded'] # Change it back engine.execute(Zoo.update(Zoo.c.ID == SDZ['ID' ]), Name=u'San Diego Zoo', Founded=datetime.date(1935, 9, 13), Opens=datetime.time(9, 0, 0), Admission='0') # Test re-edits SDZ = engine.execute(Zoo.select(Zoo.c.Name == u'San Diego Zoo' )).first() assert SDZ['Founded'] == datetime.date(1935, 9, 13) def test_baseline_7_multiview(self): Zoo = metadata.tables['Zoo'] Animal = metadata.tables['Animal'] engine = metadata.bind def fulltable(select): """Iterate over the full result table.""" return [list(row) for row in engine.execute(select).fetchall()] for x in xrange(ITERATIONS): za = fulltable(select([Zoo.c.ID] + list(Animal.c), Zoo.c.Name == u'San Diego Zoo', from_obj=[join(Zoo, Animal)])) SDZ = Zoo.select(Zoo.c.Name == u'San Diego Zoo') e = fulltable(select([Zoo.c.ID, Animal.c.ID], and_(Zoo.c.Name == u'San Diego Zoo', Animal.c.Species == u'Leopard'), from_obj=[join(Zoo, Animal)])) # Now try the same query with INNER, LEFT, and RIGHT JOINs. e = fulltable(select([Zoo.c.Name, Animal.c.Species], from_obj=[join(Zoo, Animal)])) e = fulltable(select([Zoo.c.Name, Animal.c.Species], from_obj=[outerjoin(Zoo, Animal)])) e = fulltable(select([Zoo.c.Name, Animal.c.Species], from_obj=[outerjoin(Animal, Zoo)])) def test_baseline_8_drop(self): metadata.drop_all() # Now, run all of these tests again with the DB-API driver factored # out: the ReplayableSession playback stands in for the database. # # How awkward is this in a unittest framework? Very. def test_profile_0(self): global metadata player = lambda : dbapi_session.player() engine = create_engine('postgresql:///', creator=player, use_native_hstore=False) metadata = MetaData(engine) engine.connect() def test_profile_1_create_tables(self): self.test_baseline_1_create_tables() @profiling.function_call_count() def test_profile_1a_populate(self): self.test_baseline_1a_populate() @profiling.function_call_count() def test_profile_2_insert(self): self.test_baseline_2_insert() @profiling.function_call_count() def test_profile_3_properties(self): self.test_baseline_3_properties() @profiling.function_call_count() def test_profile_4_expressions(self): self.test_baseline_4_expressions() @profiling.function_call_count() def test_profile_5_aggregates(self): self.test_baseline_5_aggregates() @profiling.function_call_count() def test_profile_6_editing(self): self.test_baseline_6_editing() @profiling.function_call_count() def test_profile_7_multiview(self): self.test_baseline_7_multiview() def test_profile_8_drop(self): self.test_baseline_8_drop() SQLAlchemy-0.8.4/test/aaa_profiling/test_zoomark_orm.py0000644000076500000240000003211512251150015023745 0ustar classicstaff00000000000000"""Benchmark for SQLAlchemy. An adaptation of Robert Brewers' ZooMark speed tests. """ import datetime import sys import time from sqlalchemy import * from sqlalchemy.orm import * from sqlalchemy.testing import fixtures, engines, profiling from sqlalchemy import testing ITERATIONS = 1 dbapi_session = engines.ReplayableSession() metadata = None class ZooMarkTest(fixtures.TestBase): """Runs the ZooMark and squawks if method counts vary from the norm. Each test has an associated `call_range`, the total number of accepted function calls made during the test. The count can vary between Python 2.4 and 2.5. Unlike a unit test, this is a ordered collection of steps. Running components individually will fail. """ __requires__ = 'cpython', __only_on__ = 'postgresql+psycopg2' __skip_if__ = lambda : sys.version_info < (2, 5), def test_baseline_0_setup(self): global metadata, session creator = testing.db.pool._creator recorder = lambda : dbapi_session.recorder(creator()) engine = engines.testing_engine(options={'creator': recorder, 'use_reaper':False}) metadata = MetaData(engine) session = sessionmaker(engine)() engine.connect() def test_baseline_1_create_tables(self): zoo = Table( 'Zoo', metadata, Column('ID', Integer, Sequence('zoo_id_seq'), primary_key=True, index=True), Column('Name', Unicode(255)), Column('Founded', Date), Column('Opens', Time), Column('LastEscape', DateTime), Column('Admission', Float), ) animal = Table( 'Animal', metadata, Column('ID', Integer, Sequence('animal_id_seq'), primary_key=True), Column('ZooID', Integer, ForeignKey('Zoo.ID'), index=True), Column('Name', Unicode(100)), Column('Species', Unicode(100)), Column('Legs', Integer, default=4), Column('LastEscape', DateTime), Column('Lifespan', Float(4)), Column('MotherID', Integer, ForeignKey('Animal.ID')), Column('PreferredFoodID', Integer), Column('AlternateFoodID', Integer), ) metadata.create_all() global Zoo, Animal class Zoo(object): def __init__(self, **kwargs): for k, v in kwargs.iteritems(): setattr(self, k, v) class Animal(object): def __init__(self, **kwargs): for k, v in kwargs.iteritems(): setattr(self, k, v) mapper(Zoo, zoo) mapper(Animal, animal) def test_baseline_1a_populate(self): wap = Zoo(Name=u'Wild Animal Park', Founded=datetime.date(2000, 1, 1), Opens=datetime.time(8, 15, 59), LastEscape=datetime.datetime( 2004, 7, 29, 5, 6, 7, ), Admission=4.95) session.add(wap) sdz = Zoo(Name=u'San Diego Zoo', Founded=datetime.date(1835, 9, 13), Opens=datetime.time(9, 0, 0), Admission=0) session.add(sdz) bio = Zoo(Name=u'Montr\xe9al Biod\xf4me', Founded=datetime.date(1992, 6, 19), Opens=datetime.time(9, 0, 0), Admission=11.75) session.add(bio) seaworld = Zoo(Name=u'Sea_World', Admission=60) session.add(seaworld) # Let's add a crazy futuristic Zoo to test large date values. lp = Zoo(Name=u'Luna Park', Founded=datetime.date(2072, 7, 17), Opens=datetime.time(0, 0, 0), Admission=134.95) session.add(lp) session.flush() # Animals leopard = Animal(Species=u'Leopard', Lifespan=73.5) session.add(leopard) leopard.ZooID = wap.ID leopard.LastEscape = \ datetime.datetime(2004, 12, 21, 8, 15, 0, 999907, ) session.add(Animal(Species=u'Lion', ZooID=wap.ID)) session.add(Animal(Species=u'Slug', Legs=1, Lifespan=.75)) session.add(Animal(Species=u'Tiger', ZooID=sdz.ID)) # Override Legs.default with itself just to make sure it works. session.add(Animal(Species=u'Bear', Legs=4)) session.add(Animal(Species=u'Ostrich', Legs=2, Lifespan=103.2)) session.add(Animal(Species=u'Centipede', Legs=100)) session.add(Animal(Species=u'Emperor Penguin', Legs=2, ZooID=seaworld.ID)) session.add(Animal(Species=u'Adelie Penguin', Legs=2, ZooID=seaworld.ID)) session.add(Animal(Species=u'Millipede', Legs=1000000, ZooID=sdz.ID)) # Add a mother and child to test relationships bai_yun = Animal(Species=u'Ape', Nameu=u'Bai Yun', Legs=2) session.add(bai_yun) session.add(Animal(Species=u'Ape', Name=u'Hua Mei', Legs=2, MotherID=bai_yun.ID)) session.flush() session.commit() def test_baseline_2_insert(self): for x in xrange(ITERATIONS): session.add(Animal(Species=u'Tick', Name=u'Tick %d' % x, Legs=8)) session.flush() def test_baseline_3_properties(self): for x in xrange(ITERATIONS): # Zoos WAP = list(session.query(Zoo).filter(Zoo.Name == u'Wild Animal Park')) SDZ = list(session.query(Zoo).filter(Zoo.Founded == datetime.date(1835, 9, 13))) Biodome = list(session.query(Zoo).filter(Zoo.Name == u'Montr\xe9al Biod\xf4me')) seaworld = list(session.query(Zoo).filter(Zoo.Admission == float(60))) # Animals leopard = list(session.query(Animal).filter(Animal.Species == u'Leopard')) ostrich = list(session.query(Animal).filter(Animal.Species == u'Ostrich')) millipede = list(session.query(Animal).filter(Animal.Legs == 1000000)) ticks = list(session.query(Animal).filter(Animal.Species == u'Tick')) def test_baseline_4_expressions(self): for x in xrange(ITERATIONS): assert len(list(session.query(Zoo))) == 5 assert len(list(session.query(Animal))) == ITERATIONS + 12 assert len(list(session.query(Animal).filter(Animal.Legs == 4))) == 4 assert len(list(session.query(Animal).filter(Animal.Legs == 2))) == 5 assert len(list(session.query(Animal).filter(and_(Animal.Legs >= 2, Animal.Legs < 20)))) == ITERATIONS + 9 assert len(list(session.query(Animal).filter(Animal.Legs > 10))) == 2 assert len(list(session.query(Animal).filter(Animal.Lifespan > 70))) == 2 assert len(list(session.query(Animal). filter(Animal.Species.like(u'L%')))) == 2 assert len(list(session.query(Animal). filter(Animal.Species.like(u'%pede')))) == 2 assert len(list(session.query(Animal).filter(Animal.LastEscape != None))) == 1 assert len(list(session.query(Animal).filter(Animal.LastEscape == None))) == ITERATIONS + 11 # In operator (containedby) assert len(list(session.query(Animal).filter( Animal.Species.like(u'%pede%')))) == 2 assert len(list(session.query(Animal). filter(Animal.Species.in_((u'Lion' , u'Tiger', u'Bear'))))) == 3 # Try In with cell references class thing(object): pass pet, pet2 = thing(), thing() pet.Name, pet2.Name = u'Slug', u'Ostrich' assert len(list(session.query(Animal). filter(Animal.Species.in_((pet.Name, pet2.Name))))) == 2 # logic and other functions name = u'Lion' assert len(list(session.query(Animal). filter(func.length(Animal.Species) == len(name)))) == ITERATIONS + 3 assert len(list(session.query(Animal). filter(Animal.Species.like(u'%i%' )))) == ITERATIONS + 7 # Test now(), today(), year(), month(), day() assert len(list(session.query(Zoo).filter(and_(Zoo.Founded != None, Zoo.Founded < func.now())))) == 3 assert len(list(session.query(Animal).filter(Animal.LastEscape == func.now()))) == 0 assert len(list(session.query(Animal).filter(func.date_part('year' , Animal.LastEscape) == 2004))) == 1 assert len(list(session.query(Animal). filter(func.date_part('month' , Animal.LastEscape) == 12))) == 1 assert len(list(session.query(Animal).filter(func.date_part('day' , Animal.LastEscape) == 21))) == 1 def test_baseline_5_aggregates(self): Animal = metadata.tables['Animal'] Zoo = metadata.tables['Zoo'] # TODO: convert to ORM engine = metadata.bind for x in xrange(ITERATIONS): # views view = engine.execute(select([Animal.c.Legs])).fetchall() legs = [x[0] for x in view] legs.sort() expected = { 'Leopard': 73.5, 'Slug': .75, 'Tiger': None, 'Lion': None, 'Bear': None, 'Ostrich': 103.2, 'Centipede': None, 'Emperor Penguin': None, 'Adelie Penguin': None, 'Millipede': None, 'Ape': None, 'Tick': None, } for species, lifespan in engine.execute(select([Animal.c.Species, Animal.c.Lifespan])).fetchall(): assert lifespan == expected[species] expected = [u'Montr\xe9al Biod\xf4me', 'Wild Animal Park'] e = select([Zoo.c.Name], and_(Zoo.c.Founded != None, Zoo.c.Founded <= func.current_timestamp(), Zoo.c.Founded >= datetime.date(1990, 1, 1))) values = [val[0] for val in engine.execute(e).fetchall()] assert set(values) == set(expected) # distinct legs = [x[0] for x in engine.execute(select([Animal.c.Legs], distinct=True)).fetchall()] legs.sort() def test_baseline_6_editing(self): for x in xrange(ITERATIONS): # Edit SDZ = session.query(Zoo).filter(Zoo.Name == u'San Diego Zoo' ).one() SDZ.Name = u'The San Diego Zoo' SDZ.Founded = datetime.date(1900, 1, 1) SDZ.Opens = datetime.time(7, 30, 0) SDZ.Admission = 35.00 # Test edits SDZ = session.query(Zoo).filter(Zoo.Name == u'The San Diego Zoo').one() assert SDZ.Founded == datetime.date(1900, 1, 1), SDZ.Founded # Change it back SDZ.Name = u'San Diego Zoo' SDZ.Founded = datetime.date(1835, 9, 13) SDZ.Opens = datetime.time(9, 0, 0) SDZ.Admission = 0 # Test re-edits SDZ = session.query(Zoo).filter(Zoo.Name == u'San Diego Zoo' ).one() assert SDZ.Founded == datetime.date(1835, 9, 13), \ SDZ.Founded def test_baseline_7_drop(self): session.rollback() metadata.drop_all() # Now, run all of these tests again with the DB-API driver factored # out: the ReplayableSession playback stands in for the database. # # How awkward is this in a unittest framework? Very. def test_profile_0(self): global metadata, session player = lambda : dbapi_session.player() engine = create_engine('postgresql:///', creator=player, use_native_hstore=False) metadata = MetaData(engine) session = sessionmaker(engine)() engine.connect() def test_profile_1_create_tables(self): self.test_baseline_1_create_tables() @profiling.function_call_count() def test_profile_1a_populate(self): self.test_baseline_1a_populate() @profiling.function_call_count() def test_profile_2_insert(self): self.test_baseline_2_insert() @profiling.function_call_count() def test_profile_3_properties(self): self.test_baseline_3_properties() @profiling.function_call_count() def test_profile_4_expressions(self): self.test_baseline_4_expressions() @profiling.function_call_count() def test_profile_5_aggregates(self): self.test_baseline_5_aggregates() @profiling.function_call_count() def test_profile_6_editing(self): self.test_baseline_6_editing() def test_profile_7_drop(self): self.test_baseline_7_drop() SQLAlchemy-0.8.4/test/base/0000755000076500000240000000000012251151573016124 5ustar classicstaff00000000000000SQLAlchemy-0.8.4/test/base/__init__.py0000644000076500000240000000000012251147171020222 0ustar classicstaff00000000000000SQLAlchemy-0.8.4/test/base/test_dependency.py0000644000076500000240000002111212251150015021636 0ustar classicstaff00000000000000from sqlalchemy.util import topological from sqlalchemy.testing import assert_raises, eq_ from sqlalchemy.testing.util import conforms_partial_ordering from sqlalchemy import exc from sqlalchemy.testing import fixtures class DependencySortTest(fixtures.TestBase): def assert_sort(self, tuples, allitems=None): if allitems is None: allitems = self._nodes_from_tuples(tuples) else: allitems = self._nodes_from_tuples(tuples).union(allitems) result = list(topological.sort(tuples, allitems)) assert conforms_partial_ordering(tuples, result) def _nodes_from_tuples(self, tups): s = set() for tup in tups: s.update(tup) return s def test_sort_one(self): rootnode = 'root' node2 = 'node2' node3 = 'node3' node4 = 'node4' subnode1 = 'subnode1' subnode2 = 'subnode2' subnode3 = 'subnode3' subnode4 = 'subnode4' subsubnode1 = 'subsubnode1' tuples = [ (subnode3, subsubnode1), (node2, subnode1), (node2, subnode2), (rootnode, node2), (rootnode, node3), (rootnode, node4), (node4, subnode3), (node4, subnode4), ] self.assert_sort(tuples) def test_sort_two(self): node1 = 'node1' node2 = 'node2' node3 = 'node3' node4 = 'node4' node5 = 'node5' node6 = 'node6' node7 = 'node7' tuples = [(node1, node2), (node3, node4), (node4, node5), (node5, node6), (node6, node2)] self.assert_sort(tuples, [node7]) def test_sort_three(self): node1 = 'keywords' node2 = 'itemkeyowrds' node3 = 'items' node4 = 'hoho' tuples = [(node1, node2), (node4, node1), (node1, node3), (node3, node2)] self.assert_sort(tuples) def test_raise_on_cycle_one(self): node1 = 'node1' node2 = 'node2' node3 = 'node3' node4 = 'node4' node5 = 'node5' tuples = [ (node4, node5), (node5, node4), (node1, node2), (node2, node3), (node3, node1), (node4, node1), ] allitems = self._nodes_from_tuples(tuples) try: list(topological.sort(tuples, allitems)) assert False except exc.CircularDependencyError, err: eq_(err.cycles, set(['node1', 'node3', 'node2', 'node5', 'node4'])) eq_(err.edges, set([('node3', 'node1'), ('node4', 'node1'), ('node2', 'node3'), ('node1', 'node2'), ('node4','node5'), ('node5', 'node4')])) def test_raise_on_cycle_two(self): # this condition was arising from ticket:362 and was not treated # properly by topological sort node1 = 'node1' node2 = 'node2' node3 = 'node3' node4 = 'node4' tuples = [(node1, node2), (node3, node1), (node2, node4), (node3, node2), (node2, node3)] allitems = self._nodes_from_tuples(tuples) try: list(topological.sort(tuples, allitems)) assert False except exc.CircularDependencyError, err: eq_(err.cycles, set(['node1', 'node3', 'node2'])) eq_(err.edges, set([('node3', 'node1'), ('node2', 'node3'), ('node3', 'node2'), ('node1', 'node2'), ('node2','node4')])) def test_raise_on_cycle_three(self): question, issue, providerservice, answer, provider = \ 'Question', 'Issue', 'ProviderService', 'Answer', 'Provider' tuples = [ (question, issue), (providerservice, issue), (provider, question), (question, provider), (providerservice, question), (provider, providerservice), (question, answer), (issue, question), ] allitems = self._nodes_from_tuples(tuples) assert_raises(exc.CircularDependencyError, list, topological.sort(tuples, allitems)) # TODO: test find_cycles def test_large_sort(self): tuples = [(i, i + 1) for i in range(0, 1500, 2)] self.assert_sort(tuples) def test_ticket_1380(self): # ticket:1380 regression: would raise a KeyError tuples = [(id(i), i) for i in range(3)] self.assert_sort(tuples) def test_find_cycle_one(self): node1 = 'node1' node2 = 'node2' node3 = 'node3' node4 = 'node4' tuples = [(node1, node2), (node3, node1), (node2, node4), (node3, node2), (node2, node3)] eq_(topological.find_cycles(tuples, self._nodes_from_tuples(tuples)), set([node1, node2, node3])) def test_find_multiple_cycles_one(self): node1 = 'node1' node2 = 'node2' node3 = 'node3' node4 = 'node4' node5 = 'node5' node6 = 'node6' node7 = 'node7' node8 = 'node8' node9 = 'node9' tuples = [ # cycle 1 cycle 2 cycle 3 cycle 4, but only if cycle # 1 nodes are present (node1, node2), (node2, node4), (node4, node1), (node9, node9), (node7, node5), (node5, node7), (node1, node6), (node6, node8), (node8, node4), (node3, node1), (node3, node2), ] allnodes = set([ node1, node2, node3, node4, node5, node6, node7, node8, node9, ]) eq_(topological.find_cycles(tuples, allnodes), set([ 'node8', 'node1', 'node2', 'node5', 'node4', 'node7', 'node6', 'node9', ])) def test_find_multiple_cycles_two(self): node1 = 'node1' node2 = 'node2' node3 = 'node3' node4 = 'node4' node5 = 'node5' node6 = 'node6' tuples = [ # cycle 1 cycle 2 (node1, node2), (node2, node4), (node4, node1), (node1, node6), (node6, node2), (node2, node4), (node4, node1), ] allnodes = set([ node1, node2, node3, node4, node5, node6, ]) # node6 only became present here once [ticket:2282] was addressed. eq_( topological.find_cycles(tuples, allnodes), set(['node1','node2', 'node4', 'node6']) ) def test_find_multiple_cycles_three(self): node1 = 'node1' node2 = 'node2' node3 = 'node3' node4 = 'node4' node5 = 'node5' node6 = 'node6' tuples = [ # cycle 1 cycle 2 cycle3 cycle4 (node1, node2), (node2, node1), (node2, node3), (node3, node2), (node2, node4), (node4, node2), (node2, node5), (node5, node6), (node6, node2), ] allnodes = set([ node1, node2, node3, node4, node5, node6, ]) eq_(topological.find_cycles(tuples, allnodes), allnodes) def test_find_multiple_cycles_four(self): tuples = [ ('node6', 'node2'), ('node15', 'node19'), ('node19', 'node2'), ('node4', 'node10'), ('node15', 'node13'), ('node17', 'node11'), ('node1', 'node19'), ('node15', 'node8'), ('node6', 'node20'), ('node14', 'node11'), ('node6', 'node14'), ('node11', 'node2'), ('node10', 'node20'), ('node1', 'node11'), ('node20', 'node19'), ('node4', 'node20'), ('node15', 'node20'), ('node9', 'node19'), ('node11', 'node10'), ('node11', 'node19'), ('node13', 'node6'), ('node3', 'node15'), ('node9', 'node11'), ('node4', 'node17'), ('node2', 'node20'), ('node19', 'node10'), ('node8', 'node4'), ('node11', 'node3'), ('node6', 'node1') ] allnodes = ['node%d' % i for i in xrange(1, 21)] eq_( topological.find_cycles(tuples, allnodes), set(['node11', 'node10', 'node13', 'node15', 'node14', 'node17', 'node19', 'node20', 'node8', 'node1', 'node3', 'node2', 'node4', 'node6']) ) SQLAlchemy-0.8.4/test/base/test_events.py0000644000076500000240000004323012251150015021031 0ustar classicstaff00000000000000"""Test event registration and listening.""" from sqlalchemy.testing import eq_, assert_raises, assert_raises_message, \ is_, is_not_ from sqlalchemy import event, exc from sqlalchemy.testing import fixtures from sqlalchemy.testing.util import gc_collect from sqlalchemy.testing.mock import Mock, call class EventsTest(fixtures.TestBase): """Test class- and instance-level event registration.""" def setUp(self): assert 'event_one' not in event._registrars assert 'event_two' not in event._registrars class TargetEvents(event.Events): def event_one(self, x, y): pass def event_two(self, x): pass def event_three(self, x): pass class Target(object): dispatch = event.dispatcher(TargetEvents) self.Target = Target def tearDown(self): event._remove_dispatcher(self.Target.__dict__['dispatch'].events) def test_register_class(self): def listen(x, y): pass event.listen(self.Target, "event_one", listen) eq_(len(self.Target().dispatch.event_one), 1) eq_(len(self.Target().dispatch.event_two), 0) def test_register_instance(self): def listen(x, y): pass t1 = self.Target() event.listen(t1, "event_one", listen) eq_(len(self.Target().dispatch.event_one), 0) eq_(len(t1.dispatch.event_one), 1) eq_(len(self.Target().dispatch.event_two), 0) eq_(len(t1.dispatch.event_two), 0) def test_bool_clslevel(self): def listen_one(x, y): pass event.listen(self.Target, "event_one", listen_one) t = self.Target() assert t.dispatch.event_one def test_register_class_instance(self): def listen_one(x, y): pass def listen_two(x, y): pass event.listen(self.Target, "event_one", listen_one) t1 = self.Target() event.listen(t1, "event_one", listen_two) eq_(len(self.Target().dispatch.event_one), 1) eq_(len(t1.dispatch.event_one), 2) eq_(len(self.Target().dispatch.event_two), 0) eq_(len(t1.dispatch.event_two), 0) def listen_three(x, y): pass event.listen(self.Target, "event_one", listen_three) eq_(len(self.Target().dispatch.event_one), 2) eq_(len(t1.dispatch.event_one), 3) def test_append_vs_insert(self): def listen_one(x, y): pass def listen_two(x, y): pass def listen_three(x, y): pass event.listen(self.Target, "event_one", listen_one) event.listen(self.Target, "event_one", listen_two) event.listen(self.Target, "event_one", listen_three, insert=True) eq_( list(self.Target().dispatch.event_one), [listen_three, listen_one, listen_two] ) def test_decorator(self): @event.listens_for(self.Target, "event_one") def listen_one(x, y): pass @event.listens_for(self.Target, "event_two") @event.listens_for(self.Target, "event_three") def listen_two(x, y): pass eq_( list(self.Target().dispatch.event_one), [listen_one] ) eq_( list(self.Target().dispatch.event_two), [listen_two] ) eq_( list(self.Target().dispatch.event_three), [listen_two] ) def test_no_instance_level_collections(self): @event.listens_for(self.Target, "event_one") def listen_one(x, y): pass t1 = self.Target() t2 = self.Target() t1.dispatch.event_one(5, 6) t2.dispatch.event_one(5, 6) is_( t1.dispatch.__dict__['event_one'], self.Target.dispatch.event_one.\ _empty_listeners[self.Target] ) @event.listens_for(t1, "event_one") def listen_two(x, y): pass is_not_( t1.dispatch.__dict__['event_one'], self.Target.dispatch.event_one.\ _empty_listeners[self.Target] ) is_( t2.dispatch.__dict__['event_one'], self.Target.dispatch.event_one.\ _empty_listeners[self.Target] ) def test_immutable_methods(self): t1 = self.Target() for meth in [ t1.dispatch.event_one.exec_once, t1.dispatch.event_one.insert, t1.dispatch.event_one.append, t1.dispatch.event_one.remove, t1.dispatch.event_one.clear, ]: assert_raises_message( NotImplementedError, r"need to call for_modify\(\)", meth ) class ClsLevelListenTest(fixtures.TestBase): def tearDown(self): event._remove_dispatcher(self.TargetOne.__dict__['dispatch'].events) def setUp(self): class TargetEventsOne(event.Events): def event_one(self, x, y): pass class TargetOne(object): dispatch = event.dispatcher(TargetEventsOne) self.TargetOne = TargetOne def tearDown(self): event._remove_dispatcher( self.TargetOne.__dict__['dispatch'].events) def test_lis_subcalss_lis(self): @event.listens_for(self.TargetOne, "event_one") def handler1(x, y): pass class SubTarget(self.TargetOne): pass @event.listens_for(self.TargetOne, "event_one") def handler2(x, y): pass eq_( len(SubTarget().dispatch.event_one), 2 ) def test_lis_multisub_lis(self): @event.listens_for(self.TargetOne, "event_one") def handler1(x, y): pass class SubTarget(self.TargetOne): pass class SubSubTarget(SubTarget): pass @event.listens_for(self.TargetOne, "event_one") def handler2(x, y): pass eq_( len(SubTarget().dispatch.event_one), 2 ) eq_( len(SubSubTarget().dispatch.event_one), 2 ) def test_two_sub_lis(self): class SubTarget1(self.TargetOne): pass class SubTarget2(self.TargetOne): pass @event.listens_for(self.TargetOne, "event_one") def handler1(x, y): pass @event.listens_for(SubTarget1, "event_one") def handler2(x, y): pass s1 = SubTarget1() assert handler1 in s1.dispatch.event_one assert handler2 in s1.dispatch.event_one s2 = SubTarget2() assert handler1 in s2.dispatch.event_one assert handler2 not in s2.dispatch.event_one class AcceptTargetsTest(fixtures.TestBase): """Test default target acceptance.""" def setUp(self): class TargetEventsOne(event.Events): def event_one(self, x, y): pass class TargetEventsTwo(event.Events): def event_one(self, x, y): pass class TargetOne(object): dispatch = event.dispatcher(TargetEventsOne) class TargetTwo(object): dispatch = event.dispatcher(TargetEventsTwo) self.TargetOne = TargetOne self.TargetTwo = TargetTwo def tearDown(self): event._remove_dispatcher(self.TargetOne.__dict__['dispatch'].events) event._remove_dispatcher(self.TargetTwo.__dict__['dispatch'].events) def test_target_accept(self): """Test that events of the same name are routed to the correct collection based on the type of target given. """ def listen_one(x, y): pass def listen_two(x, y): pass def listen_three(x, y): pass def listen_four(x, y): pass event.listen(self.TargetOne, "event_one", listen_one) event.listen(self.TargetTwo, "event_one", listen_two) eq_( list(self.TargetOne().dispatch.event_one), [listen_one] ) eq_( list(self.TargetTwo().dispatch.event_one), [listen_two] ) t1 = self.TargetOne() t2 = self.TargetTwo() event.listen(t1, "event_one", listen_three) event.listen(t2, "event_one", listen_four) eq_( list(t1.dispatch.event_one), [listen_one, listen_three] ) eq_( list(t2.dispatch.event_one), [listen_two, listen_four] ) class CustomTargetsTest(fixtures.TestBase): """Test custom target acceptance.""" def setUp(self): class TargetEvents(event.Events): @classmethod def _accept_with(cls, target): if target == 'one': return Target else: return None def event_one(self, x, y): pass class Target(object): dispatch = event.dispatcher(TargetEvents) self.Target = Target def tearDown(self): event._remove_dispatcher(self.Target.__dict__['dispatch'].events) def test_indirect(self): def listen(x, y): pass event.listen("one", "event_one", listen) eq_( list(self.Target().dispatch.event_one), [listen] ) assert_raises( exc.InvalidRequestError, event.listen, listen, "event_one", self.Target ) class SubclassGrowthTest(fixtures.TestBase): """test that ad-hoc subclasses are garbage collected.""" def setUp(self): class TargetEvents(event.Events): def some_event(self, x, y): pass class Target(object): dispatch = event.dispatcher(TargetEvents) self.Target = Target def test_subclass(self): class SubTarget(self.Target): pass st = SubTarget() st.dispatch.some_event(1, 2) del st del SubTarget gc_collect() eq_(self.Target.__subclasses__(), []) class ListenOverrideTest(fixtures.TestBase): """Test custom listen functions which change the listener function signature.""" def setUp(self): class TargetEvents(event.Events): @classmethod def _listen(cls, target, identifier, fn, add=False): if add: def adapt(x, y): fn(x + y) else: adapt = fn event.Events._listen(target, identifier, adapt) def event_one(self, x, y): pass class Target(object): dispatch = event.dispatcher(TargetEvents) self.Target = Target def tearDown(self): event._remove_dispatcher(self.Target.__dict__['dispatch'].events) def test_listen_override(self): listen_one = Mock() listen_two = Mock() event.listen(self.Target, "event_one", listen_one, add=True) event.listen(self.Target, "event_one", listen_two) t1 = self.Target() t1.dispatch.event_one(5, 7) t1.dispatch.event_one(10, 5) eq_( listen_one.mock_calls, [call(12), call(15)] ) eq_( listen_two.mock_calls, [call(5, 7), call(10, 5)] ) class PropagateTest(fixtures.TestBase): def setUp(self): class TargetEvents(event.Events): def event_one(self, arg): pass def event_two(self, arg): pass class Target(object): dispatch = event.dispatcher(TargetEvents) self.Target = Target def test_propagate(self): listen_one = Mock() listen_two = Mock() t1 = self.Target() event.listen(t1, "event_one", listen_one, propagate=True) event.listen(t1, "event_two", listen_two) t2 = self.Target() t2.dispatch._update(t1.dispatch) t2.dispatch.event_one(t2, 1) t2.dispatch.event_two(t2, 2) eq_( listen_one.mock_calls, [call(t2, 1)] ) eq_( listen_two.mock_calls, [] ) class JoinTest(fixtures.TestBase): def setUp(self): class TargetEvents(event.Events): def event_one(self, target, arg): pass class BaseTarget(object): dispatch = event.dispatcher(TargetEvents) class TargetFactory(BaseTarget): def create(self): return TargetElement(self) class TargetElement(BaseTarget): def __init__(self, parent): self.dispatch = self.dispatch._join(parent.dispatch) def run_event(self, arg): list(self.dispatch.event_one) self.dispatch.event_one(self, arg) self.BaseTarget = BaseTarget self.TargetFactory = TargetFactory self.TargetElement = TargetElement def tearDown(self): for cls in (self.TargetElement, self.TargetFactory, self.BaseTarget): if 'dispatch' in cls.__dict__: event._remove_dispatcher(cls.__dict__['dispatch'].events) def test_neither(self): element = self.TargetFactory().create() element.run_event(1) element.run_event(2) element.run_event(3) def test_parent_class_only(self): l1 = Mock() event.listen(self.TargetFactory, "event_one", l1) element = self.TargetFactory().create() element.run_event(1) element.run_event(2) element.run_event(3) eq_( l1.mock_calls, [call(element, 1), call(element, 2), call(element, 3)] ) def test_parent_class_child_class(self): l1 = Mock() l2 = Mock() event.listen(self.TargetFactory, "event_one", l1) event.listen(self.TargetElement, "event_one", l2) element = self.TargetFactory().create() element.run_event(1) element.run_event(2) element.run_event(3) eq_( l1.mock_calls, [call(element, 1), call(element, 2), call(element, 3)] ) eq_( l2.mock_calls, [call(element, 1), call(element, 2), call(element, 3)] ) def test_parent_class_child_instance_apply_after(self): l1 = Mock() l2 = Mock() event.listen(self.TargetFactory, "event_one", l1) element = self.TargetFactory().create() element.run_event(1) event.listen(element, "event_one", l2) element.run_event(2) element.run_event(3) eq_( l1.mock_calls, [call(element, 1), call(element, 2), call(element, 3)] ) eq_( l2.mock_calls, [call(element, 2), call(element, 3)] ) def test_parent_class_child_instance_apply_before(self): l1 = Mock() l2 = Mock() event.listen(self.TargetFactory, "event_one", l1) element = self.TargetFactory().create() event.listen(element, "event_one", l2) element.run_event(1) element.run_event(2) element.run_event(3) eq_( l1.mock_calls, [call(element, 1), call(element, 2), call(element, 3)] ) eq_( l2.mock_calls, [call(element, 1), call(element, 2), call(element, 3)] ) def test_parent_instance_child_class_apply_before(self): l1 = Mock() l2 = Mock() event.listen(self.TargetElement, "event_one", l2) factory = self.TargetFactory() event.listen(factory, "event_one", l1) element = factory.create() element.run_event(1) element.run_event(2) element.run_event(3) eq_( l1.mock_calls, [call(element, 1), call(element, 2), call(element, 3)] ) eq_( l2.mock_calls, [call(element, 1), call(element, 2), call(element, 3)] ) def test_parent_instance_child_class_apply_after(self): l1 = Mock() l2 = Mock() event.listen(self.TargetElement, "event_one", l2) factory = self.TargetFactory() element = factory.create() element.run_event(1) event.listen(factory, "event_one", l1) element.run_event(2) element.run_event(3) # c1 gets no events due to _JoinedListener # fixing the "parent" at construction time. # this can be changed to be "live" at the cost # of performance. eq_( l1.mock_calls, [] ) eq_( l2.mock_calls, [call(element, 1), call(element, 2), call(element, 3)] ) def test_parent_instance_child_instance_apply_before(self): l1 = Mock() l2 = Mock() factory = self.TargetFactory() event.listen(factory, "event_one", l1) element = factory.create() event.listen(element, "event_one", l2) element.run_event(1) element.run_event(2) element.run_event(3) eq_( l1.mock_calls, [call(element, 1), call(element, 2), call(element, 3)] ) eq_( l2.mock_calls, [call(element, 1), call(element, 2), call(element, 3)] ) def test_parent_events_child_no_events(self): l1 = Mock() factory = self.TargetFactory() event.listen(self.TargetElement, "event_one", l1) element = factory.create() element.run_event(1) element.run_event(2) element.run_event(3) eq_( l1.mock_calls, [call(element, 1), call(element, 2), call(element, 3)] ) SQLAlchemy-0.8.4/test/base/test_except.py0000644000076500000240000001343212251150015021016 0ustar classicstaff00000000000000"""Tests exceptions and DB-API exception wrapping.""" from sqlalchemy import exc as sa_exceptions from sqlalchemy.testing import fixtures from sqlalchemy.testing import eq_ # Py3K #StandardError = BaseException # Py2K from exceptions import StandardError, KeyboardInterrupt, SystemExit # end Py2K class Error(StandardError): """This class will be old-style on <= 2.4 and new-style on >= 2.5.""" class DatabaseError(Error): pass class OperationalError(DatabaseError): pass class ProgrammingError(DatabaseError): def __str__(self): return '<%s>' % self.bogus class OutOfSpec(DatabaseError): pass class WrapTest(fixtures.TestBase): def test_db_error_normal(self): try: raise sa_exceptions.DBAPIError.instance('', [], OperationalError(), DatabaseError) except sa_exceptions.DBAPIError: self.assert_(True) def test_tostring(self): try: raise sa_exceptions.DBAPIError.instance('this is a message' , None, OperationalError(), DatabaseError) except sa_exceptions.DBAPIError, exc: assert str(exc) \ == "(OperationalError) 'this is a message' None" def test_tostring_large_dict(self): try: raise sa_exceptions.DBAPIError.instance('this is a message' , {'a': 1, 'b': 2, 'c': 3, 'd': 4, 'e': 5, 'f': 6, 'g': 7, 'h': 8, 'i': 9, 'j': 10, 'k': 11, }, OperationalError(), DatabaseError) except sa_exceptions.DBAPIError, exc: assert str(exc).startswith("(OperationalError) 'this is a " "message' {") def test_tostring_large_list(self): try: raise sa_exceptions.DBAPIError.instance('this is a message', [1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11,], OperationalError(), DatabaseError) except sa_exceptions.DBAPIError, exc: assert str(exc).startswith("(OperationalError) 'this is a " "message' [1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11]") def test_tostring_large_executemany(self): try: raise sa_exceptions.DBAPIError.instance('this is a message', [{1: 1}, {1: 1}, {1: 1}, {1: 1}, {1: 1}, {1: 1}, {1: 1}, {1:1}, {1: 1}, {1: 1},], OperationalError(), DatabaseError) except sa_exceptions.DBAPIError, exc: eq_(str(exc) , "(OperationalError) 'this is a message' [{1: 1}, "\ "{1: 1}, {1: 1}, {1: 1}, {1: 1}, {1: 1}, {1: 1}, {1: "\ "1}, {1: 1}, {1: 1}]") try: raise sa_exceptions.DBAPIError.instance('this is a message', [ {1: 1}, {1: 1}, {1: 1}, {1: 1}, {1: 1}, {1: 1}, {1: 1}, {1:1}, {1: 1}, {1: 1}, {1: 1}, ], OperationalError(), DatabaseError) except sa_exceptions.DBAPIError, exc: eq_(str(exc) , "(OperationalError) 'this is a message' [{1: 1}, " "{1: 1}, {1: 1}, {1: 1}, {1: 1}, {1: 1}, " "{1: 1}, {1: 1} ... displaying 10 of 11 total " "bound parameter sets ... {1: 1}, {1: 1}]" ) try: raise sa_exceptions.DBAPIError.instance('this is a message', [ (1, ), (1, ), (1, ), (1, ), (1, ), (1, ), (1, ), (1, ), (1, ), (1, ), ], OperationalError(), DatabaseError) except sa_exceptions.DBAPIError, exc: eq_(str(exc), "(OperationalError) 'this is a message' [(1,), "\ "(1,), (1,), (1,), (1,), (1,), (1,), (1,), (1,), (1,)]") try: raise sa_exceptions.DBAPIError.instance('this is a message', [ (1, ), (1, ), (1, ), (1, ), (1, ), (1, ), (1, ), (1, ), (1, ), (1, ), (1, ), ], OperationalError(), DatabaseError) except sa_exceptions.DBAPIError, exc: eq_(str(exc), "(OperationalError) 'this is a message' [(1,), " "(1,), (1,), (1,), (1,), (1,), (1,), (1,) " "... displaying 10 of 11 total bound " "parameter sets ... (1,), (1,)]" ) def test_db_error_busted_dbapi(self): try: raise sa_exceptions.DBAPIError.instance('', [], ProgrammingError(), DatabaseError) except sa_exceptions.DBAPIError, e: self.assert_(True) self.assert_('Error in str() of DB-API' in e.args[0]) def test_db_error_noncompliant_dbapi(self): try: raise sa_exceptions.DBAPIError.instance('', [], OutOfSpec(), DatabaseError) except sa_exceptions.DBAPIError, e: self.assert_(e.__class__ is sa_exceptions.DBAPIError) except OutOfSpec: self.assert_(False) try: raise sa_exceptions.DBAPIError.instance('', [], sa_exceptions.ArgumentError(), DatabaseError) except sa_exceptions.DBAPIError, e: self.assert_(e.__class__ is sa_exceptions.DBAPIError) except sa_exceptions.ArgumentError: self.assert_(False) def test_db_error_keyboard_interrupt(self): try: raise sa_exceptions.DBAPIError.instance('', [], KeyboardInterrupt(), DatabaseError) except sa_exceptions.DBAPIError: self.assert_(False) except KeyboardInterrupt: self.assert_(True) def test_db_error_system_exit(self): try: raise sa_exceptions.DBAPIError.instance('', [], SystemExit(), DatabaseError) except sa_exceptions.DBAPIError: self.assert_(False) except SystemExit: self.assert_(True) SQLAlchemy-0.8.4/test/base/test_inspect.py0000644000076500000240000000354112251147171021204 0ustar classicstaff00000000000000"""test the inspection registry system.""" from sqlalchemy.testing import eq_, assert_raises_message from sqlalchemy import exc, util from sqlalchemy import inspection, inspect from sqlalchemy.testing import fixtures class TestFixture(object): pass class TestInspection(fixtures.TestBase): def tearDown(self): for type_ in list(inspection._registrars): if issubclass(type_, TestFixture): del inspection._registrars[type_] def test_def_insp(self): class SomeFoo(TestFixture): pass @inspection._inspects(SomeFoo) def insp_somefoo(subject): return {"insp":subject} somefoo = SomeFoo() insp = inspect(somefoo) assert insp["insp"] is somefoo def test_no_inspect(self): class SomeFoo(TestFixture): pass assert_raises_message( exc.NoInspectionAvailable, "No inspection system is available for object of type ", inspect, SomeFoo ) def test_class_insp(self): class SomeFoo(TestFixture): pass class SomeFooInspect(object): def __init__(self, target): self.target = target SomeFooInspect = inspection._inspects(SomeFoo)(SomeFooInspect) somefoo = SomeFoo() insp = inspect(somefoo) assert isinstance(insp, SomeFooInspect) assert insp.target is somefoo def test_hierarchy_insp(self): class SomeFoo(TestFixture): pass class SomeSubFoo(SomeFoo): pass @inspection._inspects(SomeFoo) def insp_somefoo(subject): return 1 @inspection._inspects(SomeSubFoo) def insp_somesubfoo(subject): return 2 somefoo = SomeFoo() eq_(inspect(SomeFoo()), 1) eq_(inspect(SomeSubFoo()), 2) SQLAlchemy-0.8.4/test/base/test_utils.py0000644000076500000240000013153512251150015020673 0ustar classicstaff00000000000000import copy from sqlalchemy import util, sql, exc, testing from sqlalchemy.testing import assert_raises, assert_raises_message, fixtures from sqlalchemy.testing import eq_, is_, ne_, fails_if from sqlalchemy.testing.util import picklers, gc_collect from sqlalchemy.util import classproperty, WeakSequence class KeyedTupleTest(): def test_empty(self): keyed_tuple = util.KeyedTuple([]) eq_(type(keyed_tuple), util.KeyedTuple) eq_(str(keyed_tuple), '()') eq_(len(keyed_tuple), 0) eq_(keyed_tuple.__dict__, {'_labels': []}) eq_(keyed_tuple.keys(), []) eq_(keyed_tuple._fields, ()) eq_(keyed_tuple._asdict(), {}) def test_values_but_no_labels(self): keyed_tuple = util.KeyedTuple([1, 2]) eq_(type(keyed_tuple), util.KeyedTuple) eq_(str(keyed_tuple), '(1, 2)') eq_(len(keyed_tuple), 2) eq_(keyed_tuple.__dict__, {'_labels': []}) eq_(keyed_tuple.keys(), []) eq_(keyed_tuple._fields, ()) eq_(keyed_tuple._asdict(), {}) eq_(keyed_tuple[0], 1) eq_(keyed_tuple[1], 2) def test_basic_creation(self): keyed_tuple = util.KeyedTuple([1, 2], ['a', 'b']) eq_(str(keyed_tuple), '(1, 2)') eq_(keyed_tuple.keys(), ['a', 'b']) eq_(keyed_tuple._fields, ('a', 'b')) eq_(keyed_tuple._asdict(), {'a': 1, 'b': 2}) def test_basic_index_access(self): keyed_tuple = util.KeyedTuple([1, 2], ['a', 'b']) eq_(keyed_tuple[0], 1) eq_(keyed_tuple[1], 2) def should_raise(): keyed_tuple[2] assert_raises(IndexError, should_raise) def test_basic_attribute_access(self): keyed_tuple = util.KeyedTuple([1, 2], ['a', 'b']) eq_(keyed_tuple.a, 1) eq_(keyed_tuple.b, 2) def should_raise(): keyed_tuple.c assert_raises(AttributeError, should_raise) def test_none_label(self): keyed_tuple = util.KeyedTuple([1, 2, 3], ['a', None, 'b']) eq_(str(keyed_tuple), '(1, 2, 3)') # TODO: consider not allowing None labels expected = {'a': 1, None: 2, 'b': 3, '_labels': ['a', None, 'b']} eq_(keyed_tuple.__dict__, expected) eq_(keyed_tuple.keys(), ['a', 'b']) eq_(keyed_tuple._fields, ('a', 'b')) eq_(keyed_tuple._asdict(), {'a': 1, 'b': 3}) # attribute access: can't get at value 2 eq_(keyed_tuple.a, 1) eq_(keyed_tuple.b, 3) # index access: can get at value 2 eq_(keyed_tuple[0], 1) eq_(keyed_tuple[1], 2) eq_(keyed_tuple[2], 3) def test_duplicate_labels(self): keyed_tuple = util.KeyedTuple([1, 2, 3], ['a', 'b', 'b']) eq_(str(keyed_tuple), '(1, 2, 3)') # TODO: consider not allowing duplicate labels expected = {'a': 1, 'b': 3, '_labels': ['a', 'b', 'b']} eq_(keyed_tuple.__dict__, expected) eq_(keyed_tuple.keys(), ['a', 'b', 'b']) eq_(keyed_tuple._fields, ('a', 'b', 'b')) eq_(keyed_tuple._asdict(), {'a': 1, 'b': 3}) # attribute access: can't get at value 2 eq_(keyed_tuple.a, 1) eq_(keyed_tuple.b, 3) # index access: can get at value 2 eq_(keyed_tuple[0], 1) eq_(keyed_tuple[1], 2) eq_(keyed_tuple[2], 3) def test_immutable(self): keyed_tuple = util.KeyedTuple([1, 2], ['a', 'b']) eq_(str(keyed_tuple), '(1, 2)') # attribute access: mutable eq_(keyed_tuple.a, 1) keyed_tuple.a = 100 eq_(keyed_tuple.a, 100) keyed_tuple.c = 300 eq_(keyed_tuple.c, 300) # index access: immutable def should_raise(): keyed_tuple[0] = 100 assert_raises(TypeError, should_raise) class WeakSequenceTest(fixtures.TestBase): @testing.requires.predictable_gc def test_cleanout_elements(self): class Foo(object): pass f1, f2, f3 = Foo(), Foo(), Foo() w = WeakSequence([f1, f2, f3]) eq_(len(w), 3) eq_(len(w._storage), 3) del f2 gc_collect() eq_(len(w), 2) eq_(len(w._storage), 2) @testing.requires.predictable_gc def test_cleanout_appended(self): class Foo(object): pass f1, f2, f3 = Foo(), Foo(), Foo() w = WeakSequence() w.append(f1) w.append(f2) w.append(f3) eq_(len(w), 3) eq_(len(w._storage), 3) del f2 gc_collect() eq_(len(w), 2) eq_(len(w._storage), 2) class OrderedDictTest(fixtures.TestBase): def test_odict(self): o = util.OrderedDict() o['a'] = 1 o['b'] = 2 o['snack'] = 'attack' o['c'] = 3 eq_(o.keys(), ['a', 'b', 'snack', 'c']) eq_(o.values(), [1, 2, 'attack', 3]) o.pop('snack') eq_(o.keys(), ['a', 'b', 'c']) eq_(o.values(), [1, 2, 3]) try: o.pop('eep') assert False except KeyError: pass eq_(o.pop('eep', 'woot'), 'woot') try: o.pop('whiff', 'bang', 'pow') assert False except TypeError: pass eq_(o.keys(), ['a', 'b', 'c']) eq_(o.values(), [1, 2, 3]) o2 = util.OrderedDict(d=4) o2['e'] = 5 eq_(o2.keys(), ['d', 'e']) eq_(o2.values(), [4, 5]) o.update(o2) eq_(o.keys(), ['a', 'b', 'c', 'd', 'e']) eq_(o.values(), [1, 2, 3, 4, 5]) o.setdefault('c', 'zzz') o.setdefault('f', 6) eq_(o.keys(), ['a', 'b', 'c', 'd', 'e', 'f']) eq_(o.values(), [1, 2, 3, 4, 5, 6]) def test_odict_constructor(self): o = util.OrderedDict([('name', 'jbe'), ('fullname', 'jonathan' ), ('password', '')]) eq_(o.keys(), ['name', 'fullname', 'password']) def test_odict_copy(self): o = util.OrderedDict() o["zzz"] = 1 o["aaa"] = 2 eq_(o.keys(), ['zzz', 'aaa']) o2 = o.copy() eq_(o2.keys(), o.keys()) o3 = copy.copy(o) eq_(o3.keys(), o.keys()) class OrderedSetTest(fixtures.TestBase): def test_mutators_against_iter(self): # testing a set modified against an iterator o = util.OrderedSet([3, 2, 4, 5]) eq_(o.difference(iter([3, 4])), util.OrderedSet([2, 5])) eq_(o.intersection(iter([3, 4, 6])), util.OrderedSet([3, 4])) eq_(o.union(iter([3, 4, 6])), util.OrderedSet([2, 3, 4, 5, 6])) class FrozenDictTest(fixtures.TestBase): def test_serialize(self): d = util.immutabledict({1: 2, 3: 4}) for loads, dumps in picklers(): print loads(dumps(d)) class MemoizedAttrTest(fixtures.TestBase): def test_memoized_property(self): val = [20] class Foo(object): @util.memoized_property def bar(self): v = val[0] val[0] += 1 return v ne_(Foo.bar, None) f1 = Foo() assert 'bar' not in f1.__dict__ eq_(f1.bar, 20) eq_(f1.bar, 20) eq_(val[0], 21) eq_(f1.__dict__['bar'], 20) def test_memoized_instancemethod(self): val = [20] class Foo(object): @util.memoized_instancemethod def bar(self): v = val[0] val[0] += 1 return v ne_(Foo.bar, None) f1 = Foo() assert 'bar' not in f1.__dict__ eq_(f1.bar(), 20) eq_(f1.bar(), 20) eq_(val[0], 21) class ColumnCollectionTest(fixtures.TestBase): def test_in(self): cc = sql.ColumnCollection() cc.add(sql.column('col1')) cc.add(sql.column('col2')) cc.add(sql.column('col3')) assert 'col1' in cc assert 'col2' in cc try: cc['col1'] in cc assert False except exc.ArgumentError, e: eq_(str(e), "__contains__ requires a string argument") def test_compare(self): cc1 = sql.ColumnCollection() cc2 = sql.ColumnCollection() cc3 = sql.ColumnCollection() c1 = sql.column('col1') c2 = c1.label('col2') c3 = sql.column('col3') cc1.add(c1) cc2.add(c2) cc3.add(c3) assert (cc1 == cc2).compare(c1 == c2) assert not (cc1 == cc3).compare(c2 == c3) class LRUTest(fixtures.TestBase): def test_lru(self): class item(object): def __init__(self, id): self.id = id def __str__(self): return "item id %d" % self.id l = util.LRUCache(10, threshold=.2) for id in range(1, 20): l[id] = item(id) # first couple of items should be gone assert 1 not in l assert 2 not in l # next batch over the threshold of 10 should be present for id_ in range(11, 20): assert id_ in l l[12] l[15] l[23] = item(23) l[24] = item(24) l[25] = item(25) l[26] = item(26) l[27] = item(27) assert 11 not in l assert 13 not in l for id_ in (25, 24, 23, 14, 12, 19, 18, 17, 16, 15): assert id_ in l i1 = l[25] i2 = item(25) l[25] = i2 assert 25 in l assert l[25] is i2 class ImmutableSubclass(str): pass class FlattenIteratorTest(fixtures.TestBase): def test_flatten(self): assert list(util.flatten_iterator([[1, 2, 3], [4, 5, 6], 7, 8])) == [1, 2, 3, 4, 5, 6, 7, 8] def test_str_with_iter(self): """ensure that a str object with an __iter__ method (like in PyPy) is not interpreted as an iterable. """ class IterString(str): def __iter__(self): return iter(self + '') assert list(util.flatten_iterator([IterString('asdf'), [IterString('x'), IterString('y')]])) == ['asdf', 'x', 'y'] class HashOverride(object): def __init__(self, value=None): self.value = value def __hash__(self): return hash(self.value) class EqOverride(object): def __init__(self, value=None): self.value = value __hash__ = object.__hash__ def __eq__(self, other): if isinstance(other, EqOverride): return self.value == other.value else: return False def __ne__(self, other): if isinstance(other, EqOverride): return self.value != other.value else: return True class HashEqOverride(object): def __init__(self, value=None): self.value = value def __hash__(self): return hash(self.value) def __eq__(self, other): if isinstance(other, EqOverride): return self.value == other.value else: return False def __ne__(self, other): if isinstance(other, EqOverride): return self.value != other.value else: return True class IdentitySetTest(fixtures.TestBase): def assert_eq(self, identityset, expected_iterable): expected = sorted([id(o) for o in expected_iterable]) found = sorted([id(o) for o in identityset]) eq_(found, expected) def test_init(self): ids = util.IdentitySet([1, 2, 3, 2, 1]) self.assert_eq(ids, [1, 2, 3]) ids = util.IdentitySet(ids) self.assert_eq(ids, [1, 2, 3]) ids = util.IdentitySet() self.assert_eq(ids, []) ids = util.IdentitySet([]) self.assert_eq(ids, []) ids = util.IdentitySet(ids) self.assert_eq(ids, []) def test_add(self): for type_ in (object, ImmutableSubclass): data = [type_(), type_()] ids = util.IdentitySet() for i in range(2) + range(2): ids.add(data[i]) self.assert_eq(ids, data) for type_ in (EqOverride, HashOverride, HashEqOverride): data = [type_(1), type_(1), type_(2)] ids = util.IdentitySet() for i in range(3) + range(3): ids.add(data[i]) self.assert_eq(ids, data) def test_dunder_sub2(self): IdentitySet = util.IdentitySet o1, o2, o3 = object(), object(), object() ids1 = IdentitySet([o1]) ids2 = IdentitySet([o1, o2, o3]) eq_( ids2 - ids1, IdentitySet([o2, o3]) ) ids2 -= ids1 eq_(ids2, IdentitySet([o2, o3])) def test_dunder_eq(self): _, _, twin1, twin2, unique1, unique2 = self._create_sets() # basic set math eq_(twin1 == twin2, True) eq_(unique1 == unique2, False) # not an IdentitySet not_an_identity_set = object() eq_(unique1 == not_an_identity_set, False) def test_dunder_ne(self): _, _, twin1, twin2, unique1, unique2 = self._create_sets() # basic set math eq_(twin1 != twin2, False) eq_(unique1 != unique2, True) # not an IdentitySet not_an_identity_set = object() eq_(unique1 != not_an_identity_set, True) def test_dunder_le(self): super_, sub_, twin1, twin2, unique1, unique2 = self._create_sets() # basic set math eq_(sub_ <= super_, True) eq_(super_ <= sub_, False) # the same sets eq_(twin1 <= twin2, True) eq_(twin2 <= twin1, True) # totally different sets eq_(unique1 <= unique2, False) eq_(unique2 <= unique1, False) # not an IdentitySet def should_raise(): not_an_identity_set = object() return unique1 <= not_an_identity_set self._assert_unorderable_types(should_raise) def test_dunder_lt(self): super_, sub_, twin1, twin2, unique1, unique2 = self._create_sets() # basic set math eq_(sub_ < super_, True) eq_(super_ < sub_, False) # the same sets eq_(twin1 < twin2, False) eq_(twin2 < twin1, False) # totally different sets eq_(unique1 < unique2, False) eq_(unique2 < unique1, False) # not an IdentitySet def should_raise(): not_an_identity_set = object() return unique1 < not_an_identity_set self._assert_unorderable_types(should_raise) def test_dunder_ge(self): super_, sub_, twin1, twin2, unique1, unique2 = self._create_sets() # basic set math eq_(sub_ >= super_, False) eq_(super_ >= sub_, True) # the same sets eq_(twin1 >= twin2, True) eq_(twin2 >= twin1, True) # totally different sets eq_(unique1 >= unique2, False) eq_(unique2 >= unique1, False) # not an IdentitySet def should_raise(): not_an_identity_set = object() return unique1 >= not_an_identity_set self._assert_unorderable_types(should_raise) def test_dunder_gt(self): super_, sub_, twin1, twin2, unique1, unique2 = self._create_sets() # basic set math eq_(sub_ > super_, False) eq_(super_ > sub_, True) # the same sets eq_(twin1 > twin2, False) eq_(twin2 > twin1, False) # totally different sets eq_(unique1 > unique2, False) eq_(unique2 > unique1, False) # not an IdentitySet def should_raise(): not_an_identity_set = object() return unique1 > not_an_identity_set self._assert_unorderable_types(should_raise) def test_issubset(self): super_, sub_, twin1, twin2, unique1, unique2 = self._create_sets() # basic set math eq_(sub_.issubset(super_), True) eq_(super_.issubset(sub_), False) # the same sets eq_(twin1.issubset(twin2), True) eq_(twin2.issubset(twin1), True) # totally different sets eq_(unique1.issubset(unique2), False) eq_(unique2.issubset(unique1), False) # not an IdentitySet not_an_identity_set = object() assert_raises(TypeError, unique1.issubset, not_an_identity_set) def test_issuperset(self): super_, sub_, twin1, twin2, unique1, unique2 = self._create_sets() # basic set math eq_(sub_.issuperset(super_), False) eq_(super_.issuperset(sub_), True) # the same sets eq_(twin1.issuperset(twin2), True) eq_(twin2.issuperset(twin1), True) # totally different sets eq_(unique1.issuperset(unique2), False) eq_(unique2.issuperset(unique1), False) # not an IdentitySet not_an_identity_set = object() assert_raises(TypeError, unique1.issuperset, not_an_identity_set) def test_union(self): super_, sub_, twin1, twin2, _, _ = self._create_sets() # basic set math eq_(sub_.union(super_), super_) eq_(super_.union(sub_), super_) # the same sets eq_(twin1.union(twin2), twin1) eq_(twin2.union(twin1), twin1) # empty sets empty = util.IdentitySet([]) eq_(empty.union(empty), empty) # totally different sets unique1 = util.IdentitySet([1]) unique2 = util.IdentitySet([2]) eq_(unique1.union(unique2), util.IdentitySet([1, 2])) # not an IdentitySet not_an_identity_set = object() assert_raises(TypeError, unique1.union, not_an_identity_set) def test_dunder_or(self): super_, sub_, twin1, twin2, _, _ = self._create_sets() # basic set math eq_(sub_ | super_, super_) eq_(super_ | sub_, super_) # the same sets eq_(twin1 | twin2, twin1) eq_(twin2 | twin1, twin1) # empty sets empty = util.IdentitySet([]) eq_(empty | empty, empty) # totally different sets unique1 = util.IdentitySet([1]) unique2 = util.IdentitySet([2]) eq_(unique1 | unique2, util.IdentitySet([1, 2])) # not an IdentitySet def should_raise(): not_an_identity_set = object() return unique1 | not_an_identity_set assert_raises(TypeError, should_raise) def test_update(self): pass # TODO def test_dunder_ior(self): super_, sub_, _, _, _, _ = self._create_sets() # basic set math sub_ |= super_ eq_(sub_, super_) super_ |= sub_ eq_(super_, super_) # totally different sets unique1 = util.IdentitySet([1]) unique2 = util.IdentitySet([2]) unique1 |= unique2 eq_(unique1, util.IdentitySet([1, 2])) eq_(unique2, util.IdentitySet([2])) # not an IdentitySet def should_raise(): unique = util.IdentitySet([1]) not_an_identity_set = object() unique |= not_an_identity_set assert_raises(TypeError, should_raise) def test_difference(self): _, _, twin1, twin2, _, _ = self._create_sets() # basic set math set1 = util.IdentitySet([1, 2, 3]) set2 = util.IdentitySet([2, 3, 4]) eq_(set1.difference(set2), util.IdentitySet([1])) eq_(set2.difference(set1), util.IdentitySet([4])) # empty sets empty = util.IdentitySet([]) eq_(empty.difference(empty), empty) # the same sets eq_(twin1.difference(twin2), empty) eq_(twin2.difference(twin1), empty) # totally different sets unique1 = util.IdentitySet([1]) unique2 = util.IdentitySet([2]) eq_(unique1.difference(unique2), util.IdentitySet([1])) eq_(unique2.difference(unique1), util.IdentitySet([2])) # not an IdentitySet not_an_identity_set = object() assert_raises(TypeError, unique1.difference, not_an_identity_set) def test_dunder_sub(self): _, _, twin1, twin2, _, _ = self._create_sets() # basic set math set1 = util.IdentitySet([1, 2, 3]) set2 = util.IdentitySet([2, 3, 4]) eq_(set1 - set2, util.IdentitySet([1])) eq_(set2 - set1, util.IdentitySet([4])) # empty sets empty = util.IdentitySet([]) eq_(empty - empty, empty) # the same sets eq_(twin1 - twin2, empty) eq_(twin2 - twin1, empty) # totally different sets unique1 = util.IdentitySet([1]) unique2 = util.IdentitySet([2]) eq_(unique1 - unique2, util.IdentitySet([1])) eq_(unique2 - unique1, util.IdentitySet([2])) # not an IdentitySet def should_raise(): not_an_identity_set = object() unique1 - not_an_identity_set assert_raises(TypeError, should_raise) def test_difference_update(self): pass # TODO def test_dunder_isub(self): pass # TODO def test_intersection(self): super_, sub_, twin1, twin2, unique1, unique2 = self._create_sets() # basic set math eq_(sub_.intersection(super_), sub_) eq_(super_.intersection(sub_), sub_) # the same sets eq_(twin1.intersection(twin2), twin1) eq_(twin2.intersection(twin1), twin1) # empty sets empty = util.IdentitySet([]) eq_(empty.intersection(empty), empty) # totally different sets eq_(unique1.intersection(unique2), empty) # not an IdentitySet not_an_identity_set = object() assert_raises(TypeError, unique1.intersection, not_an_identity_set) def test_dunder_and(self): super_, sub_, twin1, twin2, unique1, unique2 = self._create_sets() # basic set math eq_(sub_ & super_, sub_) eq_(super_ & sub_, sub_) # the same sets eq_(twin1 & twin2, twin1) eq_(twin2 & twin1, twin1) # empty sets empty = util.IdentitySet([]) eq_(empty & empty, empty) # totally different sets eq_(unique1 & unique2, empty) # not an IdentitySet def should_raise(): not_an_identity_set = object() return unique1 & not_an_identity_set assert_raises(TypeError, should_raise) def test_intersection_update(self): pass # TODO def test_dunder_iand(self): pass # TODO def test_symmetric_difference(self): _, _, twin1, twin2, _, _ = self._create_sets() # basic set math set1 = util.IdentitySet([1, 2, 3]) set2 = util.IdentitySet([2, 3, 4]) eq_(set1.symmetric_difference(set2), util.IdentitySet([1, 4])) eq_(set2.symmetric_difference(set1), util.IdentitySet([1, 4])) # empty sets empty = util.IdentitySet([]) eq_(empty.symmetric_difference(empty), empty) # the same sets eq_(twin1.symmetric_difference(twin2), empty) eq_(twin2.symmetric_difference(twin1), empty) # totally different sets unique1 = util.IdentitySet([1]) unique2 = util.IdentitySet([2]) eq_(unique1.symmetric_difference(unique2), util.IdentitySet([1, 2])) eq_(unique2.symmetric_difference(unique1), util.IdentitySet([1, 2])) # not an IdentitySet not_an_identity_set = object() assert_raises( TypeError, unique1.symmetric_difference, not_an_identity_set) def test_dunder_xor(self): _, _, twin1, twin2, _, _ = self._create_sets() # basic set math set1 = util.IdentitySet([1, 2, 3]) set2 = util.IdentitySet([2, 3, 4]) eq_(set1 ^ set2, util.IdentitySet([1, 4])) eq_(set2 ^ set1, util.IdentitySet([1, 4])) # empty sets empty = util.IdentitySet([]) eq_(empty ^ empty, empty) # the same sets eq_(twin1 ^ twin2, empty) eq_(twin2 ^ twin1, empty) # totally different sets unique1 = util.IdentitySet([1]) unique2 = util.IdentitySet([2]) eq_(unique1 ^ unique2, util.IdentitySet([1, 2])) eq_(unique2 ^ unique1, util.IdentitySet([1, 2])) # not an IdentitySet def should_raise(): not_an_identity_set = object() return unique1 ^ not_an_identity_set assert_raises(TypeError, should_raise) def test_symmetric_difference_update(self): pass # TODO def _create_sets(self): o1, o2, o3, o4, o5 = object(), object(), object(), object(), object() super_ = util.IdentitySet([o1, o2, o3]) sub_ = util.IdentitySet([o2]) twin1 = util.IdentitySet([o3]) twin2 = util.IdentitySet([o3]) unique1 = util.IdentitySet([o4]) unique2 = util.IdentitySet([o5]) return super_, sub_, twin1, twin2, unique1, unique2 def _assert_unorderable_types(self, callable_): # Py3K #assert_raises_message( # TypeError, 'unorderable types', callable_) # Py2K assert_raises_message( TypeError, 'cannot compare sets using cmp()', callable_) # end Py2K def test_basic_sanity(self): IdentitySet = util.IdentitySet o1, o2, o3 = object(), object(), object() ids = IdentitySet([o1]) ids.discard(o1) ids.discard(o1) ids.add(o1) ids.remove(o1) assert_raises(KeyError, ids.remove, o1) eq_(ids.copy(), ids) # explicit __eq__ and __ne__ tests assert ids != None assert not(ids == None) ne_(ids, IdentitySet([o1, o2, o3])) ids.clear() assert o1 not in ids ids.add(o2) assert o2 in ids eq_(ids.pop(), o2) ids.add(o1) eq_(len(ids), 1) isuper = IdentitySet([o1, o2]) assert ids < isuper assert ids.issubset(isuper) assert isuper.issuperset(ids) assert isuper > ids eq_(ids.union(isuper), isuper) eq_(ids | isuper, isuper) eq_(isuper - ids, IdentitySet([o2])) eq_(isuper.difference(ids), IdentitySet([o2])) eq_(ids.intersection(isuper), IdentitySet([o1])) eq_(ids & isuper, IdentitySet([o1])) eq_(ids.symmetric_difference(isuper), IdentitySet([o2])) eq_(ids ^ isuper, IdentitySet([o2])) ids.update(isuper) ids |= isuper ids.difference_update(isuper) ids -= isuper ids.intersection_update(isuper) ids &= isuper ids.symmetric_difference_update(isuper) ids ^= isuper ids.update('foobar') try: ids |= 'foobar' assert False except TypeError: assert True try: s = set([o1, o2]) s |= ids assert False except TypeError: assert True assert_raises(TypeError, util.cmp, ids) assert_raises(TypeError, hash, ids) class OrderedIdentitySetTest(fixtures.TestBase): def assert_eq(self, identityset, expected_iterable): expected = [id(o) for o in expected_iterable] found = [id(o) for o in identityset] eq_(found, expected) def test_add(self): elem = object s = util.OrderedIdentitySet() s.add(elem()) s.add(elem()) def test_intersection(self): elem = object eq_ = self.assert_eq a, b, c, d, e, f, g = \ elem(), elem(), elem(), elem(), elem(), elem(), elem() s1 = util.OrderedIdentitySet([a, b, c]) s2 = util.OrderedIdentitySet([d, e, f]) s3 = util.OrderedIdentitySet([a, d, f, g]) eq_(s1.intersection(s2), []) eq_(s1.intersection(s3), [a]) eq_(s1.union(s2).intersection(s3), [a, d, f]) class DictlikeIteritemsTest(fixtures.TestBase): baseline = set([('a', 1), ('b', 2), ('c', 3)]) def _ok(self, instance): iterator = util.dictlike_iteritems(instance) eq_(set(iterator), self.baseline) def _notok(self, instance): assert_raises(TypeError, util.dictlike_iteritems, instance) def test_dict(self): d = dict(a=1, b=2, c=3) self._ok(d) def test_subdict(self): class subdict(dict): pass d = subdict(a=1, b=2, c=3) self._ok(d) # Py2K def test_UserDict(self): import UserDict d = UserDict.UserDict(a=1, b=2, c=3) self._ok(d) # end Py2K def test_object(self): self._notok(object()) # Py2K def test_duck_1(self): class duck1(object): def iteritems(duck): return iter(self.baseline) self._ok(duck1()) # end Py2K def test_duck_2(self): class duck2(object): def items(duck): return list(self.baseline) self._ok(duck2()) # Py2K def test_duck_3(self): class duck3(object): def iterkeys(duck): return iter(['a', 'b', 'c']) def __getitem__(duck, key): return dict(a=1, b=2, c=3).get(key) self._ok(duck3()) # end Py2K def test_duck_4(self): class duck4(object): def iterkeys(duck): return iter(['a', 'b', 'c']) self._notok(duck4()) def test_duck_5(self): class duck5(object): def keys(duck): return ['a', 'b', 'c'] def get(duck, key): return dict(a=1, b=2, c=3).get(key) self._ok(duck5()) def test_duck_6(self): class duck6(object): def keys(duck): return ['a', 'b', 'c'] self._notok(duck6()) class DuckTypeCollectionTest(fixtures.TestBase): def test_sets(self): # Py2K import sets # end Py2K class SetLike(object): def add(self): pass class ForcedSet(list): __emulates__ = set for type_ in (set, # Py2K sets.Set, # end Py2K SetLike, ForcedSet): eq_(util.duck_type_collection(type_), set) instance = type_() eq_(util.duck_type_collection(instance), set) for type_ in (frozenset, # Py2K sets.ImmutableSet # end Py2K ): is_(util.duck_type_collection(type_), None) instance = type_() is_(util.duck_type_collection(instance), None) class ArgInspectionTest(fixtures.TestBase): def test_get_cls_kwargs(self): class A(object): def __init__(self, a): pass class A1(A): def __init__(self, a1): pass class A11(A1): def __init__(self, a11, **kw): pass class B(object): def __init__(self, b, **kw): pass class B1(B): def __init__(self, b1, **kw): pass class B2(B): def __init__(self, b2): pass class AB(A, B): def __init__(self, ab): pass class BA(B, A): def __init__(self, ba, **kwargs): pass class BA1(BA): pass class CAB(A, B): pass class CBA(B, A): pass class CB1A1(B1, A1): pass class CAB1(A, B1): pass class CB1A(B1, A): pass class CB2A(B2, A): pass class D(object): pass class BA2(B, A): pass class A11B1(A11, B1): pass def test(cls, *expected): eq_(set(util.get_cls_kwargs(cls)), set(expected)) test(A, 'a') test(A1, 'a1') test(A11, 'a11', 'a1') test(B, 'b') test(B1, 'b1', 'b') test(AB, 'ab') test(BA, 'ba', 'b', 'a') test(BA1, 'ba', 'b', 'a') test(CAB, 'a') test(CBA, 'b', 'a') test(CAB1, 'a') test(CB1A, 'b1', 'b', 'a') test(CB2A, 'b2') test(CB1A1, "a1", "b1", "b") test(D) test(BA2, "a", "b") test(A11B1, "a1", "a11", "b", "b1") def test_get_func_kwargs(self): def f1(): pass def f2(foo): pass def f3(*foo): pass def f4(**foo): pass def test(fn, *expected): eq_(set(util.get_func_kwargs(fn)), set(expected)) test(f1) test(f2, 'foo') test(f3) test(f4) class SymbolTest(fixtures.TestBase): def test_basic(self): sym1 = util.symbol('foo') assert sym1.name == 'foo' sym2 = util.symbol('foo') assert sym1 is sym2 assert sym1 == sym2 sym3 = util.symbol('bar') assert sym1 is not sym3 assert sym1 != sym3 def test_pickle(self): sym1 = util.symbol('foo') sym2 = util.symbol('foo') assert sym1 is sym2 # default s = util.pickle.dumps(sym1) sym3 = util.pickle.loads(s) for protocol in 0, 1, 2: print protocol serial = util.pickle.dumps(sym1) rt = util.pickle.loads(serial) assert rt is sym1 assert rt is sym2 def test_bitflags(self): sym1 = util.symbol('sym1', canonical=1) sym2 = util.symbol('sym2', canonical=2) assert sym1 & sym1 assert not sym1 & sym2 assert not sym1 & sym1 & sym2 def test_composites(self): sym1 = util.symbol('sym1', canonical=1) sym2 = util.symbol('sym2', canonical=2) sym3 = util.symbol('sym3', canonical=4) sym4 = util.symbol('sym4', canonical=8) assert sym1 & (sym2 | sym1 | sym4) assert not sym1 & (sym2 | sym3) assert not (sym1 | sym2) & (sym3 | sym4) assert (sym1 | sym2) & (sym2 | sym4) class TestFormatArgspec(fixtures.TestBase): def test_specs(self): def test(fn, wanted, grouped=None): if grouped is None: parsed = util.format_argspec_plus(fn) else: parsed = util.format_argspec_plus(fn, grouped=grouped) eq_(parsed, wanted) test(lambda: None, {'args': '()', 'self_arg': None, 'apply_kw': '()', 'apply_pos': '()'}) test(lambda: None, {'args': '', 'self_arg': None, 'apply_kw': '', 'apply_pos': ''}, grouped=False) test(lambda self: None, {'args': '(self)', 'self_arg': 'self', 'apply_kw': '(self)', 'apply_pos': '(self)'}) test(lambda self: None, {'args': 'self', 'self_arg': 'self', 'apply_kw': 'self', 'apply_pos': 'self'}, grouped=False) test(lambda *a: None, {'args': '(*a)', 'self_arg': 'a[0]', 'apply_kw': '(*a)', 'apply_pos': '(*a)'}) test(lambda **kw: None, {'args': '(**kw)', 'self_arg': None, 'apply_kw': '(**kw)', 'apply_pos': '(**kw)'}) test(lambda *a, **kw: None, {'args': '(*a, **kw)', 'self_arg': 'a[0]', 'apply_kw': '(*a, **kw)', 'apply_pos': '(*a, **kw)'}) test(lambda a, *b: None, {'args': '(a, *b)', 'self_arg': 'a', 'apply_kw': '(a, *b)', 'apply_pos': '(a, *b)'}) test(lambda a, **b: None, {'args': '(a, **b)', 'self_arg': 'a', 'apply_kw': '(a, **b)', 'apply_pos': '(a, **b)'}) test(lambda a, *b, **c: None, {'args': '(a, *b, **c)', 'self_arg': 'a', 'apply_kw': '(a, *b, **c)', 'apply_pos': '(a, *b, **c)'}) test(lambda a, b=1, **c: None, {'args': '(a, b=1, **c)', 'self_arg': 'a', 'apply_kw': '(a, b=b, **c)', 'apply_pos': '(a, b, **c)'}) test(lambda a=1, b=2: None, {'args': '(a=1, b=2)', 'self_arg': 'a', 'apply_kw': '(a=a, b=b)', 'apply_pos': '(a, b)'}) test(lambda a=1, b=2: None, {'args': 'a=1, b=2', 'self_arg': 'a', 'apply_kw': 'a=a, b=b', 'apply_pos': 'a, b'}, grouped=False) @fails_if(lambda: util.pypy, "object.__init__ is introspectable") def test_init_grouped(self): object_spec = { 'args': '(self)', 'self_arg': 'self', 'apply_pos': '(self)', 'apply_kw': '(self)'} wrapper_spec = { 'args': '(self, *args, **kwargs)', 'self_arg': 'self', 'apply_pos': '(self, *args, **kwargs)', 'apply_kw': '(self, *args, **kwargs)'} custom_spec = { 'args': '(slef, a=123)', 'self_arg': 'slef', # yes, slef 'apply_pos': '(slef, a)', 'apply_kw': '(slef, a=a)'} self._test_init(None, object_spec, wrapper_spec, custom_spec) self._test_init(True, object_spec, wrapper_spec, custom_spec) @fails_if(lambda: util.pypy, "object.__init__ can be introspected") def test_init_bare(self): object_spec = { 'args': 'self', 'self_arg': 'self', 'apply_pos': 'self', 'apply_kw': 'self'} wrapper_spec = { 'args': 'self, *args, **kwargs', 'self_arg': 'self', 'apply_pos': 'self, *args, **kwargs', 'apply_kw': 'self, *args, **kwargs'} custom_spec = { 'args': 'slef, a=123', 'self_arg': 'slef', # yes, slef 'apply_pos': 'slef, a', 'apply_kw': 'slef, a=a'} self._test_init(False, object_spec, wrapper_spec, custom_spec) def _test_init(self, grouped, object_spec, wrapper_spec, custom_spec): def test(fn, wanted): if grouped is None: parsed = util.format_argspec_init(fn) else: parsed = util.format_argspec_init(fn, grouped=grouped) eq_(parsed, wanted) class O(object): pass test(O.__init__, object_spec) class O(object): def __init__(self): pass test(O.__init__, object_spec) class O(object): def __init__(slef, a=123): pass test(O.__init__, custom_spec) class O(list): pass test(O.__init__, wrapper_spec) class O(list): def __init__(self, *args, **kwargs): pass test(O.__init__, wrapper_spec) class O(list): def __init__(self): pass test(O.__init__, object_spec) class O(list): def __init__(slef, a=123): pass test(O.__init__, custom_spec) class GenericReprTest(fixtures.TestBase): def test_all_positional(self): class Foo(object): def __init__(self, a, b, c): self.a = a self.b = b self.c = c eq_( util.generic_repr(Foo(1, 2, 3)), "Foo(1, 2, 3)" ) def test_positional_plus_kw(self): class Foo(object): def __init__(self, a, b, c=5, d=4): self.a = a self.b = b self.c = c self.d = d eq_( util.generic_repr(Foo(1, 2, 3, 6)), "Foo(1, 2, c=3, d=6)" ) def test_kw_defaults(self): class Foo(object): def __init__(self, a=1, b=2, c=3, d=4): self.a = a self.b = b self.c = c self.d = d eq_( util.generic_repr(Foo(1, 5, 3, 7)), "Foo(b=5, d=7)" ) def test_discard_vargs(self): class Foo(object): def __init__(self, a, b, *args): self.a = a self.b = b self.c, self.d = args[0:2] eq_( util.generic_repr(Foo(1, 2, 3, 4)), "Foo(1, 2)" ) def test_discard_vargs_kwargs(self): class Foo(object): def __init__(self, a, b, *args, **kw): self.a = a self.b = b self.c, self.d = args[0:2] eq_( util.generic_repr(Foo(1, 2, 3, 4, x=7, y=4)), "Foo(1, 2)" ) def test_significant_vargs(self): class Foo(object): def __init__(self, a, b, *args): self.a = a self.b = b self.args = args eq_( util.generic_repr(Foo(1, 2, 3, 4)), "Foo(1, 2, 3, 4)" ) def test_no_args(self): class Foo(object): def __init__(self): pass eq_( util.generic_repr(Foo()), "Foo()" ) def test_no_init(self): class Foo(object): pass eq_( util.generic_repr(Foo()), "Foo()" ) class AsInterfaceTest(fixtures.TestBase): class Something(object): def _ignoreme(self): pass def foo(self): pass def bar(self): pass class Partial(object): def bar(self): pass class Object(object): pass def test_instance(self): obj = object() assert_raises(TypeError, util.as_interface, obj, cls=self.Something) assert_raises(TypeError, util.as_interface, obj, methods=('foo')) assert_raises(TypeError, util.as_interface, obj, cls=self.Something, required=('foo')) obj = self.Something() eq_(obj, util.as_interface(obj, cls=self.Something)) eq_(obj, util.as_interface(obj, methods=('foo',))) eq_( obj, util.as_interface(obj, cls=self.Something, required=('outofband',))) partial = self.Partial() slotted = self.Object() slotted.bar = lambda self: 123 for obj in partial, slotted: eq_(obj, util.as_interface(obj, cls=self.Something)) assert_raises(TypeError, util.as_interface, obj, methods=('foo')) eq_(obj, util.as_interface(obj, methods=('bar',))) eq_(obj, util.as_interface(obj, cls=self.Something, required=('bar',))) assert_raises(TypeError, util.as_interface, obj, cls=self.Something, required=('foo',)) assert_raises(TypeError, util.as_interface, obj, cls=self.Something, required=self.Something) def test_dict(self): obj = {} assert_raises(TypeError, util.as_interface, obj, cls=self.Something) assert_raises(TypeError, util.as_interface, obj, methods='foo') assert_raises(TypeError, util.as_interface, obj, cls=self.Something, required='foo') def assertAdapted(obj, *methods): assert isinstance(obj, type) found = set([m for m in dir(obj) if not m.startswith('_')]) for method in methods: assert method in found found.remove(method) assert not found fn = lambda self: 123 obj = {'foo': fn, 'bar': fn} res = util.as_interface(obj, cls=self.Something) assertAdapted(res, 'foo', 'bar') res = util.as_interface(obj, cls=self.Something, required=self.Something) assertAdapted(res, 'foo', 'bar') res = util.as_interface(obj, cls=self.Something, required=('foo',)) assertAdapted(res, 'foo', 'bar') res = util.as_interface(obj, methods=('foo', 'bar')) assertAdapted(res, 'foo', 'bar') res = util.as_interface(obj, methods=('foo', 'bar', 'baz')) assertAdapted(res, 'foo', 'bar') res = util.as_interface(obj, methods=('foo', 'bar'), required=('foo',)) assertAdapted(res, 'foo', 'bar') assert_raises(TypeError, util.as_interface, obj, methods=('foo',)) assert_raises(TypeError, util.as_interface, obj, methods=('foo', 'bar', 'baz'), required=('baz', )) obj = {'foo': 123} assert_raises(TypeError, util.as_interface, obj, cls=self.Something) class TestClassHierarchy(fixtures.TestBase): def test_object(self): eq_(set(util.class_hierarchy(object)), set((object,))) def test_single(self): class A(object): pass class B(object): pass eq_(set(util.class_hierarchy(A)), set((A, object))) eq_(set(util.class_hierarchy(B)), set((B, object))) class C(A, B): pass eq_(set(util.class_hierarchy(A)), set((A, B, C, object))) eq_(set(util.class_hierarchy(B)), set((A, B, C, object))) # Py2K def test_oldstyle_mixin(self): class A(object): pass class Mixin: pass class B(A, Mixin): pass eq_(set(util.class_hierarchy(B)), set((A, B, object))) eq_(set(util.class_hierarchy(Mixin)), set()) eq_(set(util.class_hierarchy(A)), set((A, B, object))) # end Py2K class TestClassProperty(fixtures.TestBase): def test_simple(self): class A(object): something = {'foo': 1} class B(A): @classproperty def something(cls): d = dict(super(B, cls).something) d.update({'bazz': 2}) return d eq_(B.something, {'foo': 1, 'bazz': 2}) SQLAlchemy-0.8.4/test/binary_data_one.dat0000644000076500000240000001640612251147172021031 0ustar classicstaff00000000000000m Ec@sldkTdkZdkZeiZeiZeeZedee de e dde de e de dd d Zed ee d e e d de de e de eeiie de dZedee de e dde de e de eeiie de de de Zedee dee dde de e deede dedZedee de e dde de e dedZedee deede deedZedee deede deedZdZd Zd!Zd"Zd#Zd$Zd%e fd&YZ!d'e fd(YZ"d)e fd*YZ#d+e fd,YZ$d-e fd.YZ%hdd/( ttupletidR;RtreprRtgetattrR:t_[1]tattr(R;RDRC((Rt__repr__s(t__name__t __module__R<RE(((RR9s tAddresscBstZdZRS(NcCsEdtt|dddtt|dddt|iS(Ns Address: R t R(RARBR;R:R (R;((RREs(RFRGRE(((RRHstOrdercBstZdZdZRS(NcCs d|_dS(Ni(R;R(R;((RR<scCs<dt|idt|idtt|ddS(NsOrder: RIR(RAR;RRRBR:(R;((RREs(RFRGR<RE(((RRJs tItemcBstZdZRS(NcCs+dt|idtt|ddS(NsItem: RIR(RAR;RRBR:(R;((RREs(RFRGRE(((RRKstKeywordcBstZdZRS(NcCs)dtt|ddt|ifS(NsKeyword: %s/%sR(RARBR;R:R(R;((RREs(RFRGRE(((RRLsiii R6iiiiisitem 3sitem 4sitem 5sitem 1sitem 2iR0R3R5R1R2R4(+t sqlalchemytosttestbasetechotECHOtdbt BoundMetaDataRtTabletColumntIntegertSequenceR$tStringRt ForeignKeytcRR6R tINTtVARCHARR7RRRRR!R&R-R.R8tobjectR9RHRJRKRLt user_resulttuser_address_resulttuser_address_orders_resulttuser_all_resulttitem_keyword_result(R6R-R.RR RORRR9RHRR_RRLRaRRRQRKR^R8RJRbR!RR`RNR7R&((Rt?s`      !  ! ! ! !        ;* SQLAlchemy-0.8.4/test/binary_data_two.dat0000644000076500000240000006073712251147172021067 0ustar classicstaff00000000000000m  and statements that are missed with !. With the -d option, make the copies in that directory. Without the -d option, make each copy in the same directory as the original. -o dir,dir2,... Omit reporting or annotating files when their filename path starts with a directory listed in the omit list. e.g. python coverage.py -i -r -o c:\python23,lib\enthought\traits Coverage data is saved in the file .coverage by default. Set the COVERAGE_FILE environment variable to save it somewhere else.s 2.6.20060823N(s gethostnametStatementFindingAstVisitorcBstZdZdZeZZdZeZZdZ dZ dZ e Z Z ZZZZZZZdZdZeZd Zd d Zd Zd ZdZdZdZdZdZ dZ!RS(NcCs;tiii|||_||_||_d|_dS(Ni( tcompilertvisitort ASTVisitort__init__tselft statementstexcludedt suite_spotstexcluding_suite(RRRR((t./Users/classic/dev/sqlalchemy/test/coverage.pyRZs    cCs5|i|x!|iD]}|i|qWdS(N(RtrecordNodeLinetnodet getChildNodestntdispatch(RR R((R t doRecursiveas  cCsUt|do.|io$|i|i|i|in|i||idS(Nt decorators(thasattrR RRRtrecordAndDispatchtcodetdoSuite(RR ((R tdoCodehscCsa|i}xQ|iD]C}|i|}|o|ot||}q|p|}qW|S(N(R tlinenoR RRt getFirstLinetftmin(RR RRR((R Rqs  cCs<|i}x,|iD]}t||i|}qW|S(N(R RR RtmaxRt getLastLine(RR RR((R R|s   cCs|i|i|dS(N(Rt recordLineRR (RR ((R t doStatementscCs|i|iS(N(RRR R(RR ((R R scCs|o||ijo|i|d}n|iod|i|R?(((R RNstcoveragecBstZdZdZhZhZhZhZdZdZ ddZ ddZ ddZ edZed Zd Zd Zd Zd ZdZdZdZdZdZdZdZdZdZdZdZdZ ddZ!dZ"dZ#dZ$dZ%dZ&d Z'd!Z(d"d#dgd$Z)e*i+d%Z,e*i+d&Z-dd#gd'Z.dd(Z/RS()Ns .coveraget COVERAGE_FILEcCs|to tdnd|_d|_d|_d|_g|_g|_ t i i t i i t it i i|_dS(Ns!Only one coverage object allowed.iti(t the_coverageRNRtusecacheR*tcachet exclude_retnestingtcstacktxstacktostpathtnormcasetabspathtcurdirtsept relative_dir(R((R R s       cCsa|djoMd|i|ii|if}| i|o(| i|o|d||fqqWqW| idp| idp | id}| idp| idp|}|p|dn| o"| o|dd i| n|i| id|id!| ido|in| idol| p|d"n| t_ |idk}tiiti d#tid#:sRQit:it=s=You can't specify the '%s' and '%s' options at the same time.s7You must specify at least one of -e, -x, -c, -r, or -a.sUnexpected arguments: %st s)#pragma[: ]+[nN][oO] [cC][oO][vV][eE][rR]sNothing to do.it,t omit_prefixes(,tgetoptRjRtsettingstoptmaptstringtjointmaptkeyst short_optstvaluest long_optstargvtoptionstargsRpR6RtgetR5tjt args_neededtactiont get_readyR!RmRhtstartt__main__RYRZtdirnametexecfilet__dict__Rlt cexecutedt ignore_errorst show_missingt directorytomitR*tsplitRnRk(RRRjRRRRRRwRRyRR6RxR5RRpR~RRR((R t command_line)sv i $      /&      cCs,||_|o|i o ||_ndS(N(RSRt cache_fileRTt cache_default(RRSR((R t use_cache~s cCs|iol|i oatii|i|i|_|o.|idt dt ti 7_n|i nh|_ dS(Nt.(RRSRTRYtenvironRt cache_envRt parallel_modet gethostnametstrtgetpidtrestoretanalysis_cache(RR((R Rs .cCsh|i||idjo8ti|ittdoti|iqUn|id7_dS(Nitsettracei( RRRRVRhRR0Rt threading(RR((R Rs  cCsU|id8_|idjo2tidttdotidqQndS(NiiR(RRVRhRR*RR(R((R tstops  cCs\h|_h|_h|_|io*tii|ioti|ind|_ dS(NRQ( RRbRRRTRYRZtexiststremoveRU(R((R Rms     cCs8|io|id7_n|id|d7_dS(Nt|t(t)(RRUtre(RR((R R!s cCs*|ii|i|ii|idS(N(RRWtappendRbRXRU(R((R tbegin_recursivescCs(|ii|_|ii|_dS(N(RRWtpopRbRXRU(R((R t end_recursivescCs^|ioP|ioF|it|id}dk}|i|i||indS(Ntwb( RRSRTtcanonicalize_filenamestopentmarshaltdumpRtclose(RRTR((R tsaves   cCsVh|_h|_|ipttii|io|i |i|_ndS(N( RRbRRStAssertionErrorRYRZRRTt restore_file(R((R Rs   cCsfyTt|d}dk}|i|}|it|ti o|SnhSWn hSnXdS(Ntrb( Rt file_nameRTRtloadRRt isinstancettypestDictType(RRRTRR((R Rs  cCstii|i\}}x_ti|D]N}|i |pq+ntii ||}|i |}|i|q+WdS(N(RYRZRRRTt cache_dirtlocaltlistdirtfilet startswithR{t full_pathRRt merge_data(RRRRRR((R RlscCs\xU|iD]G\}}|ii|o|i|i||q ||i|RRRYRZR(RR((R t morf_names cCsWg}xJ|D]B}x9|D]$}|i|i|oPqqW|i|q W|S(sp Return list of morfs where the morf name does not begin with any one of the omit_prefixes. N( tfiltered_morfstmorfsRRvtprefixRRRR(RRRvRRR((R tfilter_by_prefixs cCst|i||i|S(N(tcmpRRtxty(RR R ((R tmorf_name_comparesiic Cst|tip |g}n|i||}|i|it dgt t t |i |}d|}|d}|dd} |d}|o| d} |d}n|p ti}n|| IJ|d t | IJd }d }x&|D]}|i |}y|i|\}}}}} t |}|t |}|d jod ||} nd } |||| f}|o|| f}n|||IJ||}||}Wqt#j o q|p4ti%d d !\} } |||| | fIJqqXqWt |d jor|d t | IJ|d jod ||} nd } d||| f}|o|d}n|||IJndS(Nis %%- %ds s%s: %stNames Stmts Exec Covers% 6d % 6d % 5d%%s Missings %st-if100.0iitTOTALRQ(RQ((RRRtListTypeRR RvRR RR|R3Rtmax_nametfmt_nametfmt_errtheadert fmt_coverageRRRhtstdoutttotal_statementsttotal_executedRtnameRRRRtreadableRRtpcRtKeyboardInterruptRtexc_infoRtmsg(RRRRRRvRRRRRRRRRRRRRRRRRRR((R Rnsb (         # s\s*(#|$)s\s*else\s*:\s*(#|$)c Cs|i||}xv|D]n} y;|i| \}}} }}|i ||| ||Wqt j o q|pqqXqWdS(N(RR RRvRRRRRRRt annotate_fileRRR( RRRRRvRRRRRR((R RkscCst|d} |o)tii|tii|d} n |d} t| d}d} d}d}d} x| i} | djoPn| d} x3|t|jo||| jo|d}qWx3|t|jo||| jo|d}qW|t|jo8||| jo'|t|jp||| j} n|ii| o|idn|ii| o|t|jo$|t|jo|idqq|t|jp|t|jo|id qq||||jo|idqq|id nD| |jo|id n&| o|id n|id|i| qsW| i|idS( NRs,coverRaiiRQs s! s> s- (RRRRRYRZR{Rt dest_filetdestRR5RtcoveredtreadlineR`R3RRRtblank_retmatchtwritetelse_reRR(RRRRRRR5R"RRR!RR#R`((R R sT     $$$'&&   (0R>R?RRRbRRRRR0R*RjRRtFalseRRRRmR!RRRRRRlRRRRRRRRRRRRRR R RnRRR%R(RkR (((R ROsT    U               # #      7 cOsti||S(N(RRRRtkw(RR*((R R3scOsti||S(N(RRRRR*(RR*((R R4scOsti||S(N(RRRRR*(RR*((R R5scOsti||S(N(RRRmRR*(RR*((R Rm6scOsti||S(N(RRRRR*(RR*((R R7scOsti||S(N(RRRRR*(RR*((R R8scOsti||S(N(RRR!RR*(RR*((R R!9scOsti||S(N(RRRRR*(RR*((R R:scOsti||S(N(RRRRR*(RR*((R R;scOsti||S(N(RRRnRR*(RR*((R Rn<scOsti||S(N(RRRkRR*(RR*((R Rk=scOsti||S(N(RRR RR*(RR*((R R >sRi((Rgt __version__Rtcompiler.visitorRYRRzRhRRtsocketRRRRR*RRt ExceptionRNRORRRRmRRR!RRRnRkR tatexittregisterRt ImportErrortexitfuncR>RR(RRkR/RmR!RRRR RNRR+RzRRhRRORnRRRRRRRY((R t?7sH         A               SQLAlchemy-0.8.4/test/dialect/0000755000076500000240000000000012251151573016617 5ustar classicstaff00000000000000SQLAlchemy-0.8.4/test/dialect/__init__.py0000644000076500000240000000000012251147172020716 0ustar classicstaff00000000000000SQLAlchemy-0.8.4/test/dialect/mssql/0000755000076500000240000000000012251151573017756 5ustar classicstaff00000000000000SQLAlchemy-0.8.4/test/dialect/mssql/__init__.py0000644000076500000240000000000012251147172022055 0ustar classicstaff00000000000000SQLAlchemy-0.8.4/test/dialect/mssql/test_compiler.py0000644000076500000240000006105012251147172023203 0ustar classicstaff00000000000000# -*- encoding: utf-8 from sqlalchemy.testing import eq_ from sqlalchemy import * from sqlalchemy import schema from sqlalchemy.sql import table, column from sqlalchemy.databases import mssql from sqlalchemy.dialects.mssql import mxodbc from sqlalchemy.testing import fixtures, AssertsCompiledSQL from sqlalchemy import sql class CompileTest(fixtures.TestBase, AssertsCompiledSQL): __dialect__ = mssql.dialect() def test_true_false(self): self.assert_compile( sql.false(), "0" ) self.assert_compile( sql.true(), "1" ) def test_select(self): t = table('sometable', column('somecolumn')) self.assert_compile(t.select(), 'SELECT sometable.somecolumn FROM sometable') def test_select_with_nolock(self): t = table('sometable', column('somecolumn')) self.assert_compile(t.select().with_hint(t, 'WITH (NOLOCK)'), 'SELECT sometable.somecolumn FROM sometable WITH (NOLOCK)') def test_join_with_hint(self): t1 = table('t1', column('a', Integer), column('b', String), column('c', String), ) t2 = table('t2', column("a", Integer), column("b", Integer), column("c", Integer), ) join = t1.join(t2, t1.c.a==t2.c.a).\ select().with_hint(t1, 'WITH (NOLOCK)') self.assert_compile( join, 'SELECT t1.a, t1.b, t1.c, t2.a, t2.b, t2.c ' 'FROM t1 WITH (NOLOCK) JOIN t2 ON t1.a = t2.a' ) def test_insert(self): t = table('sometable', column('somecolumn')) self.assert_compile(t.insert(), 'INSERT INTO sometable (somecolumn) VALUES ' '(:somecolumn)') def test_update(self): t = table('sometable', column('somecolumn')) self.assert_compile(t.update(t.c.somecolumn == 7), 'UPDATE sometable SET somecolumn=:somecolum' 'n WHERE sometable.somecolumn = ' ':somecolumn_1', dict(somecolumn=10)) def test_insert_hint(self): t = table('sometable', column('somecolumn')) for targ in (None, t): for darg in ("*", "mssql"): self.assert_compile( t.insert(). values(somecolumn="x"). with_hint("WITH (PAGLOCK)", selectable=targ, dialect_name=darg), "INSERT INTO sometable WITH (PAGLOCK) " "(somecolumn) VALUES (:somecolumn)" ) def test_update_hint(self): t = table('sometable', column('somecolumn')) for targ in (None, t): for darg in ("*", "mssql"): self.assert_compile( t.update().where(t.c.somecolumn=="q"). values(somecolumn="x"). with_hint("WITH (PAGLOCK)", selectable=targ, dialect_name=darg), "UPDATE sometable WITH (PAGLOCK) " "SET somecolumn=:somecolumn " "WHERE sometable.somecolumn = :somecolumn_1" ) def test_update_exclude_hint(self): t = table('sometable', column('somecolumn')) self.assert_compile( t.update().where(t.c.somecolumn=="q"). values(somecolumn="x"). with_hint("XYZ", "mysql"), "UPDATE sometable SET somecolumn=:somecolumn " "WHERE sometable.somecolumn = :somecolumn_1" ) def test_delete_hint(self): t = table('sometable', column('somecolumn')) for targ in (None, t): for darg in ("*", "mssql"): self.assert_compile( t.delete().where(t.c.somecolumn=="q"). with_hint("WITH (PAGLOCK)", selectable=targ, dialect_name=darg), "DELETE FROM sometable WITH (PAGLOCK) " "WHERE sometable.somecolumn = :somecolumn_1" ) def test_delete_exclude_hint(self): t = table('sometable', column('somecolumn')) self.assert_compile( t.delete().\ where(t.c.somecolumn=="q").\ with_hint("XYZ", dialect_name="mysql"), "DELETE FROM sometable WHERE " "sometable.somecolumn = :somecolumn_1" ) def test_update_from_hint(self): t = table('sometable', column('somecolumn')) t2 = table('othertable', column('somecolumn')) for darg in ("*", "mssql"): self.assert_compile( t.update().where(t.c.somecolumn==t2.c.somecolumn). values(somecolumn="x"). with_hint("WITH (PAGLOCK)", selectable=t2, dialect_name=darg), "UPDATE sometable SET somecolumn=:somecolumn " "FROM sometable, othertable WITH (PAGLOCK) " "WHERE sometable.somecolumn = othertable.somecolumn" ) # TODO: not supported yet. #def test_delete_from_hint(self): # t = table('sometable', column('somecolumn')) # t2 = table('othertable', column('somecolumn')) # for darg in ("*", "mssql"): # self.assert_compile( # t.delete().where(t.c.somecolumn==t2.c.somecolumn). # with_hint("WITH (PAGLOCK)", # selectable=t2, # dialect_name=darg), # "" # ) def test_strict_binds(self): """test the 'strict' compiler binds.""" from sqlalchemy.dialects.mssql.base import MSSQLStrictCompiler mxodbc_dialect = mxodbc.dialect() mxodbc_dialect.statement_compiler = MSSQLStrictCompiler t = table('sometable', column('foo')) for expr, compile in [ ( select([literal("x"), literal("y")]), "SELECT 'x' AS anon_1, 'y' AS anon_2", ), ( select([t]).where(t.c.foo.in_(['x', 'y', 'z'])), "SELECT sometable.foo FROM sometable WHERE sometable.foo " "IN ('x', 'y', 'z')", ), ( t.c.foo.in_([None]), "sometable.foo IN (NULL)" ) ]: self.assert_compile(expr, compile, dialect=mxodbc_dialect) def test_in_with_subqueries(self): """Test removal of legacy behavior that converted "x==subquery" to use IN. """ t = table('sometable', column('somecolumn')) self.assert_compile(t.select().where(t.c.somecolumn == t.select()), 'SELECT sometable.somecolumn FROM ' 'sometable WHERE sometable.somecolumn = ' '(SELECT sometable.somecolumn FROM ' 'sometable)') self.assert_compile(t.select().where(t.c.somecolumn != t.select()), 'SELECT sometable.somecolumn FROM ' 'sometable WHERE sometable.somecolumn != ' '(SELECT sometable.somecolumn FROM ' 'sometable)') def test_count(self): t = table('sometable', column('somecolumn')) self.assert_compile(t.count(), 'SELECT count(sometable.somecolumn) AS ' 'tbl_row_count FROM sometable') def test_noorderby_insubquery(self): """test that the ms-sql dialect removes ORDER BY clauses from subqueries""" table1 = table('mytable', column('myid', Integer), column('name', String), column('description', String), ) q = select([table1.c.myid], order_by=[table1.c.myid]).alias('foo') crit = q.c.myid == table1.c.myid self.assert_compile(select(['*'], crit), "SELECT * FROM (SELECT mytable.myid AS " "myid FROM mytable) AS foo, mytable WHERE " "foo.myid = mytable.myid") def test_delete_schema(self): metadata = MetaData() tbl = Table('test', metadata, Column('id', Integer, primary_key=True), schema='paj') self.assert_compile(tbl.delete(tbl.c.id == 1), 'DELETE FROM paj.test WHERE paj.test.id = ' ':id_1') s = select([tbl.c.id]).where(tbl.c.id == 1) self.assert_compile(tbl.delete().where(tbl.c.id.in_(s)), 'DELETE FROM paj.test WHERE paj.test.id IN ' '(SELECT test_1.id FROM paj.test AS test_1 ' 'WHERE test_1.id = :id_1)') def test_delete_schema_multipart(self): metadata = MetaData() tbl = Table('test', metadata, Column('id', Integer, primary_key=True), schema='banana.paj') self.assert_compile(tbl.delete(tbl.c.id == 1), 'DELETE FROM banana.paj.test WHERE ' 'banana.paj.test.id = :id_1') s = select([tbl.c.id]).where(tbl.c.id == 1) self.assert_compile(tbl.delete().where(tbl.c.id.in_(s)), 'DELETE FROM banana.paj.test WHERE ' 'banana.paj.test.id IN (SELECT test_1.id ' 'FROM banana.paj.test AS test_1 WHERE ' 'test_1.id = :id_1)') def test_delete_schema_multipart_needs_quoting(self): metadata = MetaData() tbl = Table('test', metadata, Column('id', Integer, primary_key=True), schema='banana split.paj') self.assert_compile(tbl.delete(tbl.c.id == 1), 'DELETE FROM [banana split].paj.test WHERE ' '[banana split].paj.test.id = :id_1') s = select([tbl.c.id]).where(tbl.c.id == 1) self.assert_compile(tbl.delete().where(tbl.c.id.in_(s)), 'DELETE FROM [banana split].paj.test WHERE ' '[banana split].paj.test.id IN (SELECT ' 'test_1.id FROM [banana split].paj.test AS ' 'test_1 WHERE test_1.id = :id_1)') def test_delete_schema_multipart_both_need_quoting(self): metadata = MetaData() tbl = Table('test', metadata, Column('id', Integer, primary_key=True), schema='banana split.paj with a space') self.assert_compile(tbl.delete(tbl.c.id == 1), 'DELETE FROM [banana split].[paj with a ' 'space].test WHERE [banana split].[paj ' 'with a space].test.id = :id_1') s = select([tbl.c.id]).where(tbl.c.id == 1) self.assert_compile(tbl.delete().where(tbl.c.id.in_(s)), 'DELETE FROM [banana split].[paj with a ' 'space].test WHERE [banana split].[paj ' 'with a space].test.id IN (SELECT ' 'test_1.id FROM [banana split].[paj with a ' 'space].test AS test_1 WHERE test_1.id = ' ':id_1)') def test_union(self): t1 = table('t1', column('col1'), column('col2'), column('col3' ), column('col4')) t2 = table('t2', column('col1'), column('col2'), column('col3' ), column('col4')) s1, s2 = select([t1.c.col3.label('col3'), t1.c.col4.label('col4' )], t1.c.col2.in_(['t1col2r1', 't1col2r2'])), \ select([t2.c.col3.label('col3'), t2.c.col4.label('col4')], t2.c.col2.in_(['t2col2r2', 't2col2r3'])) u = union(s1, s2, order_by=['col3', 'col4']) self.assert_compile(u, 'SELECT t1.col3 AS col3, t1.col4 AS col4 ' 'FROM t1 WHERE t1.col2 IN (:col2_1, ' ':col2_2) UNION SELECT t2.col3 AS col3, ' 't2.col4 AS col4 FROM t2 WHERE t2.col2 IN ' '(:col2_3, :col2_4) ORDER BY col3, col4') self.assert_compile(u.alias('bar').select(), 'SELECT bar.col3, bar.col4 FROM (SELECT ' 't1.col3 AS col3, t1.col4 AS col4 FROM t1 ' 'WHERE t1.col2 IN (:col2_1, :col2_2) UNION ' 'SELECT t2.col3 AS col3, t2.col4 AS col4 ' 'FROM t2 WHERE t2.col2 IN (:col2_3, ' ':col2_4)) AS bar') def test_function(self): self.assert_compile(func.foo(1, 2), 'foo(:foo_1, :foo_2)') self.assert_compile(func.current_time(), 'CURRENT_TIME') self.assert_compile(func.foo(), 'foo()') m = MetaData() t = Table('sometable', m, Column('col1', Integer), Column('col2' , Integer)) self.assert_compile(select([func.max(t.c.col1)]), 'SELECT max(sometable.col1) AS max_1 FROM ' 'sometable') def test_function_overrides(self): self.assert_compile(func.current_date(), "GETDATE()") self.assert_compile(func.length(3), "LEN(:length_1)") def test_extract(self): t = table('t', column('col1')) for field in 'day', 'month', 'year': self.assert_compile( select([extract(field, t.c.col1)]), 'SELECT DATEPART("%s", t.col1) AS anon_1 FROM t' % field) def test_update_returning(self): table1 = table('mytable', column('myid', Integer), column('name' , String(128)), column('description', String(128))) u = update(table1, values=dict(name='foo' )).returning(table1.c.myid, table1.c.name) self.assert_compile(u, 'UPDATE mytable SET name=:name OUTPUT ' 'inserted.myid, inserted.name') u = update(table1, values=dict(name='foo')).returning(table1) self.assert_compile(u, 'UPDATE mytable SET name=:name OUTPUT ' 'inserted.myid, inserted.name, ' 'inserted.description') u = update(table1, values=dict(name='foo' )).returning(table1).where(table1.c.name == 'bar') self.assert_compile(u, 'UPDATE mytable SET name=:name OUTPUT ' 'inserted.myid, inserted.name, ' 'inserted.description WHERE mytable.name = ' ':name_1') u = update(table1, values=dict(name='foo' )).returning(func.length(table1.c.name)) self.assert_compile(u, 'UPDATE mytable SET name=:name OUTPUT ' 'LEN(inserted.name) AS length_1') def test_delete_returning(self): table1 = table('mytable', column('myid', Integer), column('name' , String(128)), column('description', String(128))) d = delete(table1).returning(table1.c.myid, table1.c.name) self.assert_compile(d, 'DELETE FROM mytable OUTPUT deleted.myid, ' 'deleted.name') d = delete(table1).where(table1.c.name == 'bar' ).returning(table1.c.myid, table1.c.name) self.assert_compile(d, 'DELETE FROM mytable OUTPUT deleted.myid, ' 'deleted.name WHERE mytable.name = :name_1') def test_insert_returning(self): table1 = table('mytable', column('myid', Integer), column('name' , String(128)), column('description', String(128))) i = insert(table1, values=dict(name='foo' )).returning(table1.c.myid, table1.c.name) self.assert_compile(i, 'INSERT INTO mytable (name) OUTPUT ' 'inserted.myid, inserted.name VALUES ' '(:name)') i = insert(table1, values=dict(name='foo')).returning(table1) self.assert_compile(i, 'INSERT INTO mytable (name) OUTPUT ' 'inserted.myid, inserted.name, ' 'inserted.description VALUES (:name)') i = insert(table1, values=dict(name='foo' )).returning(func.length(table1.c.name)) self.assert_compile(i, 'INSERT INTO mytable (name) OUTPUT ' 'LEN(inserted.name) AS length_1 VALUES ' '(:name)') def test_limit_using_top(self): t = table('t', column('x', Integer), column('y', Integer)) s = select([t]).where(t.c.x==5).order_by(t.c.y).limit(10) self.assert_compile( s, "SELECT TOP 10 t.x, t.y FROM t WHERE t.x = :x_1 ORDER BY t.y", checkparams={'x_1': 5} ) def test_limit_zero_using_top(self): t = table('t', column('x', Integer), column('y', Integer)) s = select([t]).where(t.c.x==5).order_by(t.c.y).limit(0) self.assert_compile( s, "SELECT TOP 0 t.x, t.y FROM t WHERE t.x = :x_1 ORDER BY t.y", checkparams={'x_1': 5} ) def test_offset_using_window(self): t = table('t', column('x', Integer), column('y', Integer)) s = select([t]).where(t.c.x==5).order_by(t.c.y).offset(20) # test that the select is not altered with subsequent compile # calls for i in range(2): self.assert_compile( s, "SELECT anon_1.x, anon_1.y FROM (SELECT t.x AS x, t.y " "AS y, ROW_NUMBER() OVER (ORDER BY t.y) AS " "mssql_rn FROM t WHERE t.x = :x_1) AS " "anon_1 WHERE mssql_rn > :mssql_rn_1", checkparams={'mssql_rn_1': 20, 'x_1': 5} ) def test_limit_offset_using_window(self): t = table('t', column('x', Integer), column('y', Integer)) s = select([t]).where(t.c.x==5).order_by(t.c.y).limit(10).offset(20) self.assert_compile( s, "SELECT anon_1.x, anon_1.y " "FROM (SELECT t.x AS x, t.y AS y, " "ROW_NUMBER() OVER (ORDER BY t.y) AS mssql_rn " "FROM t " "WHERE t.x = :x_1) AS anon_1 " "WHERE mssql_rn > :mssql_rn_1 AND mssql_rn <= :mssql_rn_2", checkparams={'mssql_rn_1': 20, 'mssql_rn_2': 30, 'x_1': 5} ) def test_limit_offset_with_correlated_order_by(self): t1 = table('t1', column('x', Integer), column('y', Integer)) t2 = table('t2', column('x', Integer), column('y', Integer)) order_by = select([t2.c.y]).where(t1.c.x == t2.c.x).as_scalar() s = select([t1]).where(t1.c.x == 5).order_by(order_by) \ .limit(10).offset(20) self.assert_compile( s, "SELECT anon_1.x, anon_1.y " "FROM (SELECT t1.x AS x, t1.y AS y, " "ROW_NUMBER() OVER (ORDER BY " "(SELECT t2.y FROM t2 WHERE t1.x = t2.x)" ") AS mssql_rn " "FROM t1 " "WHERE t1.x = :x_1) AS anon_1 " "WHERE mssql_rn > :mssql_rn_1 AND mssql_rn <= :mssql_rn_2", checkparams={'mssql_rn_1': 20, 'mssql_rn_2': 30, 'x_1': 5} ) def test_limit_zero_offset_using_window(self): t = table('t', column('x', Integer), column('y', Integer)) s = select([t]).where(t.c.x==5).order_by(t.c.y).limit(0).offset(0) # render the LIMIT of zero, but not the OFFSET # of zero, so produces TOP 0 self.assert_compile( s, "SELECT TOP 0 t.x, t.y FROM t " "WHERE t.x = :x_1 ORDER BY t.y", checkparams={'x_1': 5} ) def test_sequence_start_0(self): metadata = MetaData() tbl = Table('test', metadata, Column('id', Integer, Sequence('', 0), primary_key=True)) self.assert_compile(schema.CreateTable(tbl), "CREATE TABLE test (id INTEGER NOT NULL IDENTITY(0,1), " "PRIMARY KEY (id))" ) def test_sequence_non_primary_key(self): metadata = MetaData() tbl = Table('test', metadata, Column('id', Integer, Sequence(''), primary_key=False)) self.assert_compile(schema.CreateTable(tbl), "CREATE TABLE test (id INTEGER NOT NULL IDENTITY(1,1))" ) def test_sequence_ignore_nullability(self): metadata = MetaData() tbl = Table('test', metadata, Column('id', Integer, Sequence(''), nullable=True)) self.assert_compile(schema.CreateTable(tbl), "CREATE TABLE test (id INTEGER NOT NULL IDENTITY(1,1))" ) def test_index_clustering(self): metadata = MetaData() tbl = Table('test', metadata, Column('id', Integer)) idx = Index("foo", tbl.c.id, mssql_clustered=True) self.assert_compile(schema.CreateIndex(idx), "CREATE CLUSTERED INDEX foo ON test (id)" ) def test_index_ordering(self): metadata = MetaData() tbl = Table('test', metadata, Column('x', Integer), Column('y', Integer), Column('z', Integer)) idx = Index("foo", tbl.c.x.desc(), "y") self.assert_compile(schema.CreateIndex(idx), "CREATE INDEX foo ON test (x DESC, y)" ) def test_create_index_expr(self): m = MetaData() t1 = Table('foo', m, Column('x', Integer) ) self.assert_compile( schema.CreateIndex(Index("bar", t1.c.x > 5)), "CREATE INDEX bar ON foo (x > 5)" ) def test_drop_index_w_schema(self): m = MetaData() t1 = Table('foo', m, Column('x', Integer), schema='bar' ) self.assert_compile( schema.DropIndex(Index("idx_foo", t1.c.x)), "DROP INDEX idx_foo ON bar.foo" ) def test_index_extra_include_1(self): metadata = MetaData() tbl = Table('test', metadata, Column('x', Integer), Column('y', Integer), Column('z', Integer)) idx = Index("foo", tbl.c.x, mssql_include=['y']) self.assert_compile(schema.CreateIndex(idx), "CREATE INDEX foo ON test (x) INCLUDE (y)" ) def test_index_extra_include_2(self): metadata = MetaData() tbl = Table('test', metadata, Column('x', Integer), Column('y', Integer), Column('z', Integer)) idx = Index("foo", tbl.c.x, mssql_include=[tbl.c.y]) self.assert_compile(schema.CreateIndex(idx), "CREATE INDEX foo ON test (x) INCLUDE (y)" ) class SchemaTest(fixtures.TestBase): def setup(self): t = Table('sometable', MetaData(), Column('pk_column', Integer), Column('test_column', String) ) self.column = t.c.test_column dialect = mssql.dialect() self.ddl_compiler = dialect.ddl_compiler(dialect, schema.CreateTable(t)) def _column_spec(self): return self.ddl_compiler.get_column_specification(self.column) def test_that_mssql_default_nullability_emits_null(self): eq_("test_column VARCHAR(max) NULL", self._column_spec()) def test_that_mssql_none_nullability_does_not_emit_nullability(self): self.column.nullable = None eq_("test_column VARCHAR(max)", self._column_spec()) def test_that_mssql_specified_nullable_emits_null(self): self.column.nullable = True eq_("test_column VARCHAR(max) NULL", self._column_spec()) def test_that_mssql_specified_not_nullable_emits_not_null(self): self.column.nullable = False eq_("test_column VARCHAR(max) NOT NULL", self._column_spec()) SQLAlchemy-0.8.4/test/dialect/mssql/test_engine.py0000644000076500000240000001433312251147172022640 0ustar classicstaff00000000000000# -*- encoding: utf-8 from sqlalchemy.testing import eq_, engines from sqlalchemy import * from sqlalchemy import exc from sqlalchemy.dialects.mssql import pyodbc, pymssql from sqlalchemy.engine import url from sqlalchemy.testing import fixtures from sqlalchemy import testing from sqlalchemy.testing import assert_raises_message class ParseConnectTest(fixtures.TestBase): def test_pyodbc_connect_dsn_trusted(self): dialect = pyodbc.dialect() u = url.make_url('mssql://mydsn') connection = dialect.create_connect_args(u) eq_([['dsn=mydsn;Trusted_Connection=Yes'], {}], connection) def test_pyodbc_connect_old_style_dsn_trusted(self): dialect = pyodbc.dialect() u = url.make_url('mssql:///?dsn=mydsn') connection = dialect.create_connect_args(u) eq_([['dsn=mydsn;Trusted_Connection=Yes'], {}], connection) def test_pyodbc_connect_dsn_non_trusted(self): dialect = pyodbc.dialect() u = url.make_url('mssql://username:password@mydsn') connection = dialect.create_connect_args(u) eq_([['dsn=mydsn;UID=username;PWD=password'], {}], connection) def test_pyodbc_connect_dsn_extra(self): dialect = pyodbc.dialect() u = \ url.make_url('mssql://username:password@mydsn/?LANGUAGE=us_' 'english&foo=bar') connection = dialect.create_connect_args(u) dsn_string = connection[0][0] assert ";LANGUAGE=us_english" in dsn_string assert ";foo=bar" in dsn_string def test_pyodbc_connect(self): dialect = pyodbc.dialect() u = url.make_url('mssql://username:password@hostspec/database') connection = dialect.create_connect_args(u) eq_([['DRIVER={SQL Server};Server=hostspec;Database=database;UI' 'D=username;PWD=password'], {}], connection) def test_pyodbc_connect_comma_port(self): dialect = pyodbc.dialect() u = \ url.make_url('mssql://username:password@hostspec:12345/data' 'base') connection = dialect.create_connect_args(u) eq_([['DRIVER={SQL Server};Server=hostspec,12345;Database=datab' 'ase;UID=username;PWD=password'], {}], connection) def test_pyodbc_connect_config_port(self): dialect = pyodbc.dialect() u = \ url.make_url('mssql://username:password@hostspec/database?p' 'ort=12345') connection = dialect.create_connect_args(u) eq_([['DRIVER={SQL Server};Server=hostspec;Database=database;UI' 'D=username;PWD=password;port=12345'], {}], connection) def test_pyodbc_extra_connect(self): dialect = pyodbc.dialect() u = \ url.make_url('mssql://username:password@hostspec/database?L' 'ANGUAGE=us_english&foo=bar') connection = dialect.create_connect_args(u) eq_(connection[1], {}) eq_(connection[0][0] in ('DRIVER={SQL Server};Server=hostspec;Database=database;' 'UID=username;PWD=password;foo=bar;LANGUAGE=us_english', 'DRIVER={SQL Server};Server=hostspec;Database=database;UID=' 'username;PWD=password;LANGUAGE=us_english;foo=bar'), True) def test_pyodbc_odbc_connect(self): dialect = pyodbc.dialect() u = \ url.make_url('mssql:///?odbc_connect=DRIVER%3D%7BSQL+Server' '%7D%3BServer%3Dhostspec%3BDatabase%3Ddatabase' '%3BUID%3Dusername%3BPWD%3Dpassword') connection = dialect.create_connect_args(u) eq_([['DRIVER={SQL Server};Server=hostspec;Database=database;UI' 'D=username;PWD=password'], {}], connection) def test_pyodbc_odbc_connect_with_dsn(self): dialect = pyodbc.dialect() u = \ url.make_url('mssql:///?odbc_connect=dsn%3Dmydsn%3BDatabase' '%3Ddatabase%3BUID%3Dusername%3BPWD%3Dpassword' ) connection = dialect.create_connect_args(u) eq_([['dsn=mydsn;Database=database;UID=username;PWD=password'], {}], connection) def test_pyodbc_odbc_connect_ignores_other_values(self): dialect = pyodbc.dialect() u = \ url.make_url('mssql://userdiff:passdiff@localhost/dbdiff?od' 'bc_connect=DRIVER%3D%7BSQL+Server%7D%3BServer' '%3Dhostspec%3BDatabase%3Ddatabase%3BUID%3Duse' 'rname%3BPWD%3Dpassword') connection = dialect.create_connect_args(u) eq_([['DRIVER={SQL Server};Server=hostspec;Database=database;UI' 'D=username;PWD=password'], {}], connection) def test_pymssql_port_setting(self): dialect = pymssql.dialect() u = \ url.make_url('mssql+pymssql://scott:tiger@somehost/test') connection = dialect.create_connect_args(u) eq_( [[], {'host': 'somehost', 'password': 'tiger', 'user': 'scott', 'database': 'test'}], connection ) u = \ url.make_url('mssql+pymssql://scott:tiger@somehost:5000/test') connection = dialect.create_connect_args(u) eq_( [[], {'host': 'somehost:5000', 'password': 'tiger', 'user': 'scott', 'database': 'test'}], connection ) def test_pymssql_disconnect(self): dialect = pymssql.dialect() for error in [ 'Adaptive Server connection timed out', 'message 20003', "Error 10054", "Not connected to any MS SQL server", "Connection is closed" ]: eq_(dialect.is_disconnect(error, None, None), True) eq_(dialect.is_disconnect("not an error", None, None), False) @testing.only_on(['mssql+pyodbc', 'mssql+pymssql'], "FreeTDS specific test") def test_bad_freetds_warning(self): engine = engines.testing_engine() def _bad_version(connection): return 95, 10, 255 engine.dialect._get_server_version_info = _bad_version assert_raises_message(exc.SAWarning, 'Unrecognized server version info', engine.connect) SQLAlchemy-0.8.4/test/dialect/mssql/test_query.py0000644000076500000240000004063612251147172022545 0ustar classicstaff00000000000000# -*- encoding: utf-8 from sqlalchemy.testing import eq_, engines from sqlalchemy import * from sqlalchemy.sql import table, column from sqlalchemy.databases import mssql from sqlalchemy.testing import fixtures, AssertsCompiledSQL from sqlalchemy import testing from sqlalchemy.util import ue from sqlalchemy import util class SchemaAliasingTest(fixtures.TestBase, AssertsCompiledSQL): """SQL server cannot reference schema-qualified tables in a SELECT statement, they must be aliased. """ __dialect__ = mssql.dialect() def setup(self): metadata = MetaData() self.t1 = table('t1', column('a', Integer), column('b', String), column('c', String), ) self.t2 = Table( 't2', metadata, Column("a", Integer), Column("b", Integer), Column("c", Integer), schema = 'schema' ) def test_result_map(self): s = self.t2.select() c = s.compile(dialect=self.__dialect__) assert self.t2.c.a in set(c.result_map['a'][1]) def test_result_map_use_labels(self): s = self.t2.select(use_labels=True) c = s.compile(dialect=self.__dialect__) assert self.t2.c.a in set(c.result_map['schema_t2_a'][1]) def test_straight_select(self): self.assert_compile(self.t2.select(), "SELECT t2_1.a, t2_1.b, t2_1.c FROM [schema].t2 AS t2_1" ) def test_straight_select_use_labels(self): self.assert_compile( self.t2.select(use_labels=True), "SELECT t2_1.a AS schema_t2_a, t2_1.b AS schema_t2_b, " "t2_1.c AS schema_t2_c FROM [schema].t2 AS t2_1" ) def test_join_to_schema(self): t1, t2 = self.t1, self.t2 self.assert_compile( t1.join(t2, t1.c.a==t2.c.a).select(), "SELECT t1.a, t1.b, t1.c, t2_1.a, t2_1.b, t2_1.c FROM t1 " "JOIN [schema].t2 AS t2_1 ON t2_1.a = t1.a" ) def test_union_schema_to_non(self): t1, t2 = self.t1, self.t2 s = select([t2.c.a, t2.c.b]).apply_labels().\ union( select([t1.c.a, t1.c.b]).apply_labels() ).alias().select() self.assert_compile( s, "SELECT anon_1.schema_t2_a, anon_1.schema_t2_b FROM " "(SELECT t2_1.a AS schema_t2_a, t2_1.b AS schema_t2_b " "FROM [schema].t2 AS t2_1 UNION SELECT t1.a AS t1_a, " "t1.b AS t1_b FROM t1) AS anon_1" ) def test_column_subquery_to_alias(self): a1 = self.t2.alias('a1') s = select([self.t2, select([a1.c.a]).as_scalar()]) self.assert_compile( s, "SELECT t2_1.a, t2_1.b, t2_1.c, " "(SELECT a1.a FROM [schema].t2 AS a1) " "AS anon_1 FROM [schema].t2 AS t2_1" ) class IdentityInsertTest(fixtures.TestBase, AssertsCompiledSQL): __only_on__ = 'mssql' __dialect__ = mssql.MSDialect() @classmethod def setup_class(cls): global metadata, cattable metadata = MetaData(testing.db) cattable = Table('cattable', metadata, Column('id', Integer), Column('description', String(50)), PrimaryKeyConstraint('id', name='PK_cattable'), ) def setup(self): metadata.create_all() def teardown(self): metadata.drop_all() def test_compiled(self): self.assert_compile(cattable.insert().values(id=9, description='Python'), 'INSERT INTO cattable (id, description) ' 'VALUES (:id, :description)') def test_execute(self): cattable.insert().values(id=9, description='Python').execute() cats = cattable.select().order_by(cattable.c.id).execute() eq_([(9, 'Python')], list(cats)) result = cattable.insert().values(description='PHP').execute() eq_([10], result.inserted_primary_key) lastcat = cattable.select().order_by(desc(cattable.c.id)).execute() eq_((10, 'PHP'), lastcat.first()) def test_executemany(self): cattable.insert().execute([{'id': 89, 'description': 'Python'}, {'id': 8, 'description': 'Ruby'}, {'id': 3, 'description': 'Perl'}, {'id': 1, 'description': 'Java'}]) cats = cattable.select().order_by(cattable.c.id).execute() eq_([(1, 'Java'), (3, 'Perl'), (8, 'Ruby'), (89, 'Python')], list(cats)) cattable.insert().execute([{'description': 'PHP'}, {'description': 'Smalltalk'}]) lastcats = \ cattable.select().order_by(desc(cattable.c.id)).limit(2).execute() eq_([(91, 'Smalltalk'), (90, 'PHP')], list(lastcats)) class QueryUnicodeTest(fixtures.TestBase): __only_on__ = 'mssql' def test_convert_unicode(self): meta = MetaData(testing.db) t1 = Table('unitest_table', meta, Column('id', Integer, primary_key=True), Column('descr', mssql.MSText(convert_unicode=True))) meta.create_all() con = testing.db.connect() # encode in UTF-8 (sting object) because this is the default # dialect encoding con.execute(ue("insert into unitest_table values ('bien u\ umang\xc3\xa9')").encode('UTF-8')) try: r = t1.select().execute().first() assert isinstance(r[1], util.text_type), \ '%s is %s instead of unicode, working on %s' % (r[1], type(r[1]), meta.bind) finally: meta.drop_all() from sqlalchemy.testing.assertsql import ExactSQL class QueryTest(testing.AssertsExecutionResults, fixtures.TestBase): __only_on__ = 'mssql' def test_fetchid_trigger(self): """ Verify identity return value on inserting to a trigger table. MSSQL's OUTPUT INSERTED clause does not work for the case of a table having an identity (autoincrement) primary key column, and which also has a trigger configured to fire upon each insert and subsequently perform an insert into a different table. SQLALchemy's MSSQL dialect by default will attempt to use an OUTPUT_INSERTED clause, which in this case will raise the following error: ProgrammingError: (ProgrammingError) ('42000', 334, "[Microsoft][SQL Server Native Client 10.0][SQL Server]The target table 't1' of the DML statement cannot have any enabled triggers if the statement contains an OUTPUT clause without INTO clause.", 7748) 'INSERT INTO t1 (descr) OUTPUT inserted.id VALUES (?)' ('hello',) This test verifies a workaround, which is to rely on the older SCOPE_IDENTITY() call, which still works for this scenario. To enable the workaround, the Table must be instantiated with the init parameter 'implicit_returning = False'. """ #todo: this same test needs to be tried in a multithreaded context # with multiple threads inserting to the same table. #todo: check whether this error also occurs with clients other # than the SQL Server Native Client. Maybe an assert_raises # test should be written. meta = MetaData(testing.db) t1 = Table('t1', meta, Column('id', Integer, Sequence('fred', 100, 1), primary_key=True), Column('descr', String(200)), # the following flag will prevent the # MSSQLCompiler.returning_clause from getting called, # though the ExecutionContext will still have a # _select_lastrowid, so the SELECT SCOPE_IDENTITY() will # hopefully be called instead. implicit_returning = False ) t2 = Table('t2', meta, Column('id', Integer, Sequence('fred', 200, 1), primary_key=True), Column('descr', String(200))) meta.create_all() con = testing.db.connect() con.execute("""create trigger paj on t1 for insert as insert into t2 (descr) select descr from inserted""") try: tr = con.begin() r = con.execute(t2.insert(), descr='hello') self.assert_(r.inserted_primary_key == [200]) r = con.execute(t1.insert(), descr='hello') self.assert_(r.inserted_primary_key == [100]) finally: tr.commit() con.execute("""drop trigger paj""") meta.drop_all() @testing.provide_metadata def test_disable_scope_identity(self): engine = engines.testing_engine(options={"use_scope_identity":False}) metadata = self.metadata metadata.bind = engine t1 = Table('t1', metadata, Column('id', Integer, primary_key=True), implicit_returning=False ) metadata.create_all() self.assert_sql_execution( testing.db, lambda: engine.execute(t1.insert()), ExactSQL("INSERT INTO t1 DEFAULT VALUES"), # we dont have an event for # "SELECT @@IDENTITY" part here. # this will be in 0.8 with #2459 ) assert not engine.dialect.use_scope_identity def test_insertid_schema(self): meta = MetaData(testing.db) con = testing.db.connect() con.execute('create schema paj') tbl = Table('test', meta, Column('id', Integer, primary_key=True), schema='paj') tbl.create() try: tbl.insert().execute({'id':1}) finally: tbl.drop() con.execute('drop schema paj') def test_returning_no_autoinc(self): meta = MetaData(testing.db) table = Table('t1', meta, Column('id', Integer, primary_key=True), Column('data', String(50))) table.create() try: result = table.insert().values(id=1, data=func.lower('SomeString' )).returning(table.c.id, table.c.data).execute() eq_(result.fetchall(), [(1, 'somestring')]) finally: # this will hang if the "SET IDENTITY_INSERT t1 OFF" occurs # before the result is fetched table.drop() def test_delete_schema(self): meta = MetaData(testing.db) con = testing.db.connect() con.execute('create schema paj') tbl = Table('test', meta, Column('id', Integer, primary_key=True), schema='paj') tbl.create() try: tbl.insert().execute({'id': 1}) tbl.delete(tbl.c.id == 1).execute() finally: tbl.drop() con.execute('drop schema paj') def test_insertid_reserved(self): meta = MetaData(testing.db) table = Table( 'select', meta, Column('col', Integer, primary_key=True) ) table.create() meta2 = MetaData(testing.db) try: table.insert().execute(col=7) finally: table.drop() class Foo(object): def __init__(self, **kw): for k in kw: setattr(self, k, kw[k]) def full_text_search_missing(): """Test if full text search is not implemented and return False if it is and True otherwise.""" try: connection = testing.db.connect() try: connection.execute('CREATE FULLTEXT CATALOG Catalog AS ' 'DEFAULT') return False except: return True finally: connection.close() class MatchTest(fixtures.TestBase, AssertsCompiledSQL): __only_on__ = 'mssql' __skip_if__ = full_text_search_missing, @classmethod def setup_class(cls): global metadata, cattable, matchtable metadata = MetaData(testing.db) cattable = Table('cattable', metadata, Column('id', Integer), Column('description', String(50)), PrimaryKeyConstraint('id', name='PK_cattable')) matchtable = Table( 'matchtable', metadata, Column('id', Integer), Column('title', String(200)), Column('category_id', Integer, ForeignKey('cattable.id')), PrimaryKeyConstraint('id', name='PK_matchtable'), ) DDL("""CREATE FULLTEXT INDEX ON cattable (description) KEY INDEX PK_cattable""").execute_at('after-create' , matchtable) DDL("""CREATE FULLTEXT INDEX ON matchtable (title) KEY INDEX PK_matchtable""").execute_at('after-create' , matchtable) metadata.create_all() cattable.insert().execute([{'id': 1, 'description': 'Python'}, {'id': 2, 'description': 'Ruby'}]) matchtable.insert().execute([{'id': 1, 'title' : 'Agile Web Development with Rails' , 'category_id': 2}, {'id': 2, 'title': 'Dive Into Python', 'category_id': 1}, {'id': 3, 'title' : "Programming Matz's Ruby", 'category_id': 2}, {'id': 4, 'title' : 'The Definitive Guide to Django', 'category_id': 1}, {'id': 5, 'title' : 'Python in a Nutshell', 'category_id': 1}]) DDL("WAITFOR DELAY '00:00:05'" ).execute(bind=engines.testing_engine()) @classmethod def teardown_class(cls): metadata.drop_all() connection = testing.db.connect() connection.execute("DROP FULLTEXT CATALOG Catalog") connection.close() def test_expression(self): self.assert_compile(matchtable.c.title.match('somstr'), 'CONTAINS (matchtable.title, ?)') def test_simple_match(self): results = \ matchtable.select().where(matchtable.c.title.match('python' )).order_by(matchtable.c.id).execute().fetchall() eq_([2, 5], [r.id for r in results]) def test_simple_match_with_apostrophe(self): results = \ matchtable.select().where(matchtable.c.title.match("Matz's" )).execute().fetchall() eq_([3], [r.id for r in results]) def test_simple_prefix_match(self): results = \ matchtable.select().where(matchtable.c.title.match('"nut*"' )).execute().fetchall() eq_([5], [r.id for r in results]) def test_simple_inflectional_match(self): results = \ matchtable.select().where( matchtable.c.title.match('FORMSOF(INFLECTIONAL, "dives")' )).execute().fetchall() eq_([2], [r.id for r in results]) def test_or_match(self): results1 = \ matchtable.select().where(or_(matchtable.c.title.match('nutshell' ), matchtable.c.title.match('ruby' ))).order_by(matchtable.c.id).execute().fetchall() eq_([3, 5], [r.id for r in results1]) results2 = \ matchtable.select().where( matchtable.c.title.match('nutshell OR ruby' )).order_by(matchtable.c.id).execute().fetchall() eq_([3, 5], [r.id for r in results2]) def test_and_match(self): results1 = \ matchtable.select().where(and_(matchtable.c.title.match('python' ), matchtable.c.title.match('nutshell' ))).execute().fetchall() eq_([5], [r.id for r in results1]) results2 = \ matchtable.select().where( matchtable.c.title.match('python AND nutshell' )).execute().fetchall() eq_([5], [r.id for r in results2]) def test_match_across_joins(self): results = matchtable.select().where(and_(cattable.c.id == matchtable.c.category_id, or_(cattable.c.description.match('Ruby'), matchtable.c.title.match('nutshell' )))).order_by(matchtable.c.id).execute().fetchall() eq_([1, 3, 5], [r.id for r in results]) SQLAlchemy-0.8.4/test/dialect/mssql/test_reflection.py0000644000076500000240000001762612251147172023535 0ustar classicstaff00000000000000# -*- encoding: utf-8 from sqlalchemy.testing import eq_ from sqlalchemy import * from sqlalchemy import types, schema, event from sqlalchemy.databases import mssql from sqlalchemy.testing import fixtures, AssertsCompiledSQL, \ ComparesTables from sqlalchemy import testing from sqlalchemy.engine.reflection import Inspector from sqlalchemy import util class ReflectionTest(fixtures.TestBase, ComparesTables): __only_on__ = 'mssql' @testing.provide_metadata def test_basic_reflection(self): meta = self.metadata users = Table( 'engine_users', meta, Column('user_id', types.INT, primary_key=True), Column('user_name', types.VARCHAR(20), nullable=False), Column('test1', types.CHAR(5), nullable=False), Column('test2', types.Float(5), nullable=False), Column('test3', types.Text), Column('test4', types.Numeric, nullable=False), Column('test5', types.DateTime), Column('parent_user_id', types.Integer, ForeignKey('engine_users.user_id')), Column('test6', types.DateTime, nullable=False), Column('test7', types.Text), Column('test8', types.LargeBinary), Column('test_passivedefault2', types.Integer, server_default='5'), Column('test9', types.BINARY(100)), Column('test_numeric', types.Numeric()), ) addresses = Table( 'engine_email_addresses', meta, Column('address_id', types.Integer, primary_key=True), Column('remote_user_id', types.Integer, ForeignKey(users.c.user_id)), Column('email_address', types.String(20)), ) meta.create_all() meta2 = MetaData() reflected_users = Table('engine_users', meta2, autoload=True, autoload_with=testing.db) reflected_addresses = Table('engine_email_addresses', meta2, autoload=True, autoload_with=testing.db) self.assert_tables_equal(users, reflected_users) self.assert_tables_equal(addresses, reflected_addresses) @testing.provide_metadata def test_identity(self): metadata = self.metadata table = Table( 'identity_test', metadata, Column('col1', Integer, Sequence('fred', 2, 3), primary_key=True) ) table.create() meta2 = MetaData(testing.db) table2 = Table('identity_test', meta2, autoload=True) sequence = isinstance(table2.c['col1'].default, schema.Sequence) \ and table2.c['col1'].default assert sequence.start == 2 assert sequence.increment == 3 @testing.emits_warning("Did not recognize") @testing.provide_metadata def test_skip_types(self): metadata = self.metadata testing.db.execute(""" create table foo (id integer primary key, data xml) """) t1 = Table('foo', metadata, autoload=True) assert isinstance(t1.c.id.type, Integer) assert isinstance(t1.c.data.type, types.NullType) @testing.provide_metadata def test_db_qualified_items(self): metadata = self.metadata Table('foo', metadata, Column('id', Integer, primary_key=True)) Table('bar', metadata, Column('id', Integer, primary_key=True), Column('foo_id', Integer, ForeignKey('foo.id', name="fkfoo")) ) metadata.create_all() dbname = testing.db.scalar("select db_name()") owner = testing.db.scalar("SELECT user_name()") inspector = inspect(testing.db) bar_via_db = inspector.get_foreign_keys( "bar", schema="%s.%s" % (dbname, owner)) eq_( bar_via_db, [{ 'referred_table': 'foo', 'referred_columns': ['id'], 'referred_schema': 'test.dbo', 'name': 'fkfoo', 'constrained_columns': ['foo_id']}] ) assert testing.db.has_table("bar", schema="test.dbo") m2 = MetaData() Table('bar', m2, schema="test.dbo", autoload=True, autoload_with=testing.db) eq_(m2.tables["test.dbo.foo"].schema, "test.dbo") @testing.provide_metadata def test_indexes_cols(self): metadata = self.metadata t1 = Table('t', metadata, Column('x', Integer), Column('y', Integer)) Index('foo', t1.c.x, t1.c.y) metadata.create_all() m2 = MetaData() t2 = Table('t', m2, autoload=True, autoload_with=testing.db) eq_( set(list(t2.indexes)[0].columns), set([t2.c['x'], t2.c.y]) ) @testing.provide_metadata def test_indexes_cols_with_commas(self): metadata = self.metadata t1 = Table('t', metadata, Column('x, col', Integer, key='x'), Column('y', Integer) ) Index('foo', t1.c.x, t1.c.y) metadata.create_all() m2 = MetaData() t2 = Table('t', m2, autoload=True, autoload_with=testing.db) eq_( set(list(t2.indexes)[0].columns), set([t2.c['x, col'], t2.c.y]) ) @testing.provide_metadata def test_indexes_cols_with_spaces(self): metadata = self.metadata t1 = Table('t', metadata, Column('x col', Integer, key='x'), Column('y', Integer)) Index('foo', t1.c.x, t1.c.y) metadata.create_all() m2 = MetaData() t2 = Table('t', m2, autoload=True, autoload_with=testing.db) eq_( set(list(t2.indexes)[0].columns), set([t2.c['x col'], t2.c.y]) ) from sqlalchemy.dialects.mssql.information_schema import CoerceUnicode, tables from sqlalchemy.dialects.mssql import base class InfoCoerceUnicodeTest(fixtures.TestBase, AssertsCompiledSQL): def test_info_unicode_coercion(self): dialect = mssql.dialect() value = CoerceUnicode().bind_processor(dialect)('a string') assert isinstance(value, util.text_type) def test_info_unicode_cast_no_2000(self): dialect = mssql.dialect() dialect.server_version_info = base.MS_2000_VERSION stmt = tables.c.table_name == 'somename' self.assert_compile( stmt, "[TABLES_1].[TABLE_NAME] = :TABLE_NAME_1", dialect=dialect ) def test_info_unicode_cast(self): dialect = mssql.dialect() dialect.server_version_info = base.MS_2005_VERSION stmt = tables.c.table_name == 'somename' self.assert_compile( stmt, "[TABLES_1].[TABLE_NAME] = CAST(:TABLE_NAME_1 AS NVARCHAR(max))", dialect=dialect ) class ReflectHugeViewTest(fixtures.TestBase): __only_on__ = 'mssql' def setup(self): self.col_num = 150 self.metadata = MetaData(testing.db) t = Table('base_table', self.metadata, *[ Column("long_named_column_number_%d" % i, Integer) for i in range(self.col_num) ] ) self.view_str = view_str = \ "CREATE VIEW huge_named_view AS SELECT %s FROM base_table" % ( ",".join("long_named_column_number_%d" % i for i in range(self.col_num)) ) assert len(view_str) > 4000 event.listen(t, 'after_create', DDL(view_str) ) event.listen(t, 'before_drop', DDL("DROP VIEW huge_named_view") ) self.metadata.create_all() def teardown(self): self.metadata.drop_all() def test_inspect_view_definition(self): inspector = Inspector.from_engine(testing.db) view_def = inspector.get_view_definition("huge_named_view") eq_(view_def, self.view_str) SQLAlchemy-0.8.4/test/dialect/mssql/test_types.py0000644000076500000240000006317612251150015022536 0ustar classicstaff00000000000000# -*- encoding: utf-8 from __future__ import with_statement from sqlalchemy.testing import eq_, engines, pickleable import datetime import os from sqlalchemy import * from sqlalchemy import types, schema from sqlalchemy.databases import mssql from sqlalchemy.dialects.mssql.base import TIME from sqlalchemy.testing import fixtures, \ AssertsExecutionResults, ComparesTables from sqlalchemy import testing from sqlalchemy.testing import emits_warning_on import decimal from sqlalchemy.util import b class TimeTypeTest(fixtures.TestBase): def test_result_processor_no_microseconds(self): expected = datetime.time(12, 34, 56) self._assert_result_processor(expected, '12:34:56') def test_result_processor_too_many_microseconds(self): # microsecond must be in 0..999999, should truncate (6 vs 7 digits) expected = datetime.time(12, 34, 56, 123456) self._assert_result_processor(expected, '12:34:56.1234567') def _assert_result_processor(self, expected, value): mssql_time_type = TIME() result_processor = mssql_time_type.result_processor(None, None) eq_(expected, result_processor(value)) class TypeDDLTest(fixtures.TestBase): def test_boolean(self): "Exercise type specification for boolean type." columns = [ # column type, args, kwargs, expected ddl (Boolean, [], {}, 'BIT'), ] metadata = MetaData() table_args = ['test_mssql_boolean', metadata] for index, spec in enumerate(columns): type_, args, kw, res = spec table_args.append( Column('c%s' % index, type_(*args, **kw), nullable=None)) boolean_table = Table(*table_args) dialect = mssql.dialect() gen = dialect.ddl_compiler(dialect, schema.CreateTable(boolean_table)) for col in boolean_table.c: index = int(col.name[1:]) testing.eq_(gen.get_column_specification(col), "%s %s" % (col.name, columns[index][3])) self.assert_(repr(col)) def test_numeric(self): "Exercise type specification and options for numeric types." columns = [ # column type, args, kwargs, expected ddl (types.NUMERIC, [], {}, 'NUMERIC'), (types.NUMERIC, [None], {}, 'NUMERIC'), (types.NUMERIC, [12, 4], {}, 'NUMERIC(12, 4)'), (types.Float, [], {}, 'FLOAT'), (types.Float, [None], {}, 'FLOAT'), (types.Float, [12], {}, 'FLOAT(12)'), (mssql.MSReal, [], {}, 'REAL'), (types.Integer, [], {}, 'INTEGER'), (types.BigInteger, [], {}, 'BIGINT'), (mssql.MSTinyInteger, [], {}, 'TINYINT'), (types.SmallInteger, [], {}, 'SMALLINT'), ] metadata = MetaData() table_args = ['test_mssql_numeric', metadata] for index, spec in enumerate(columns): type_, args, kw, res = spec table_args.append( Column('c%s' % index, type_(*args, **kw), nullable=None)) numeric_table = Table(*table_args) dialect = mssql.dialect() gen = dialect.ddl_compiler(dialect, schema.CreateTable(numeric_table)) for col in numeric_table.c: index = int(col.name[1:]) testing.eq_(gen.get_column_specification(col), "%s %s" % (col.name, columns[index][3])) self.assert_(repr(col)) def test_char(self): """Exercise COLLATE-ish options on string types.""" columns = [ (mssql.MSChar, [], {}, 'CHAR'), (mssql.MSChar, [1], {}, 'CHAR(1)'), (mssql.MSChar, [1], {'collation': 'Latin1_General_CI_AS'}, 'CHAR(1) COLLATE Latin1_General_CI_AS'), (mssql.MSNChar, [], {}, 'NCHAR'), (mssql.MSNChar, [1], {}, 'NCHAR(1)'), (mssql.MSNChar, [1], {'collation': 'Latin1_General_CI_AS'}, 'NCHAR(1) COLLATE Latin1_General_CI_AS'), (mssql.MSString, [], {}, 'VARCHAR(max)'), (mssql.MSString, [1], {}, 'VARCHAR(1)'), (mssql.MSString, [1], {'collation': 'Latin1_General_CI_AS'}, 'VARCHAR(1) COLLATE Latin1_General_CI_AS'), (mssql.MSNVarchar, [], {}, 'NVARCHAR(max)'), (mssql.MSNVarchar, [1], {}, 'NVARCHAR(1)'), (mssql.MSNVarchar, [1], {'collation': 'Latin1_General_CI_AS'}, 'NVARCHAR(1) COLLATE Latin1_General_CI_AS'), (mssql.MSText, [], {}, 'TEXT'), (mssql.MSText, [], {'collation': 'Latin1_General_CI_AS'}, 'TEXT COLLATE Latin1_General_CI_AS'), (mssql.MSNText, [], {}, 'NTEXT'), (mssql.MSNText, [], {'collation': 'Latin1_General_CI_AS'}, 'NTEXT COLLATE Latin1_General_CI_AS'), ] metadata = MetaData() table_args = ['test_mssql_charset', metadata] for index, spec in enumerate(columns): type_, args, kw, res = spec table_args.append( Column('c%s' % index, type_(*args, **kw), nullable=None)) charset_table = Table(*table_args) dialect = mssql.dialect() gen = dialect.ddl_compiler(dialect, schema.CreateTable(charset_table)) for col in charset_table.c: index = int(col.name[1:]) testing.eq_(gen.get_column_specification(col), "%s %s" % (col.name, columns[index][3])) self.assert_(repr(col)) def test_timestamp(self): """Exercise TIMESTAMP column.""" dialect = mssql.dialect() metadata = MetaData() spec, expected = (TIMESTAMP, 'TIMESTAMP') t = Table('mssql_ts', metadata, Column('id', Integer, primary_key=True), Column('t', spec, nullable=None)) gen = dialect.ddl_compiler(dialect, schema.CreateTable(t)) testing.eq_(gen.get_column_specification(t.c.t), "t %s" % expected) self.assert_(repr(t.c.t)) def test_money(self): """Exercise type specification for money types.""" columns = [(mssql.MSMoney, [], {}, 'MONEY'), (mssql.MSSmallMoney, [], {}, 'SMALLMONEY')] metadata = MetaData() table_args = ['test_mssql_money', metadata] for index, spec in enumerate(columns): type_, args, kw, res = spec table_args.append(Column('c%s' % index, type_(*args, **kw), nullable=None)) money_table = Table(*table_args) dialect = mssql.dialect() gen = dialect.ddl_compiler(dialect, schema.CreateTable(money_table)) for col in money_table.c: index = int(col.name[1:]) testing.eq_(gen.get_column_specification(col), '%s %s' % (col.name, columns[index][3])) self.assert_(repr(col)) def test_binary(self): "Exercise type specification for binary types." columns = [ # column type, args, kwargs, expected ddl (mssql.MSBinary, [], {}, 'BINARY'), (mssql.MSBinary, [10], {}, 'BINARY(10)'), (types.BINARY, [], {}, 'BINARY'), (types.BINARY, [10], {}, 'BINARY(10)'), (mssql.MSVarBinary, [], {}, 'VARBINARY(max)'), (mssql.MSVarBinary, [10], {}, 'VARBINARY(10)'), (types.VARBINARY, [10], {}, 'VARBINARY(10)'), (types.VARBINARY, [], {}, 'VARBINARY(max)'), (mssql.MSImage, [], {}, 'IMAGE'), (mssql.IMAGE, [], {}, 'IMAGE'), (types.LargeBinary, [], {}, 'IMAGE'), ] metadata = MetaData() table_args = ['test_mssql_binary', metadata] for index, spec in enumerate(columns): type_, args, kw, res = spec table_args.append(Column('c%s' % index, type_(*args, **kw), nullable=None)) binary_table = Table(*table_args) dialect = mssql.dialect() gen = dialect.ddl_compiler(dialect, schema.CreateTable(binary_table)) for col in binary_table.c: index = int(col.name[1:]) testing.eq_(gen.get_column_specification(col), '%s %s' % (col.name, columns[index][3])) self.assert_(repr(col)) class TypeRoundTripTest(fixtures.TestBase, AssertsExecutionResults, ComparesTables): __only_on__ = 'mssql' @classmethod def setup_class(cls): global metadata metadata = MetaData(testing.db) def teardown(self): metadata.drop_all() @testing.fails_on_everything_except('mssql+pyodbc', 'this is some pyodbc-specific feature') def test_decimal_notation(self): numeric_table = Table('numeric_table', metadata, Column('id', Integer, Sequence('numeric_id_seq', optional=True), primary_key=True), Column('numericcol', Numeric(precision=38, scale=20, asdecimal=True))) metadata.create_all() test_items = [decimal.Decimal(d) for d in ( '1500000.00000000000000000000', '-1500000.00000000000000000000', '1500000', '0.0000000000000000002', '0.2', '-0.0000000000000000002', '-2E-2', '156666.458923543', '-156666.458923543', '1', '-1', '-1234', '1234', '2E-12', '4E8', '3E-6', '3E-7', '4.1', '1E-1', '1E-2', '1E-3', '1E-4', '1E-5', '1E-6', '1E-7', '1E-1', '1E-8', '0.2732E2', '-0.2432E2', '4.35656E2', '-02452E-2', '45125E-2', '1234.58965E-2', '1.521E+15', '-1E-25', '1E-25', '1254E-25', '-1203E-25', '0', '-0.00', '-0', '4585E12', '000000000000000000012', '000000000000.32E12', '00000000000000.1E+12', '000000000000.2E-32', )] for value in test_items: numeric_table.insert().execute(numericcol=value) for value in select([numeric_table.c.numericcol]).execute(): assert value[0] in test_items, "%r not in test_items" % value[0] def test_float(self): float_table = Table('float_table', metadata, Column('id', Integer, Sequence('numeric_id_seq', optional=True), primary_key=True), Column('floatcol', Float())) metadata.create_all() try: test_items = [float(d) for d in ( '1500000.00000000000000000000', '-1500000.00000000000000000000', '1500000', '0.0000000000000000002', '0.2', '-0.0000000000000000002', '156666.458923543', '-156666.458923543', '1', '-1', '1234', '2E-12', '4E8', '3E-6', '3E-7', '4.1', '1E-1', '1E-2', '1E-3', '1E-4', '1E-5', '1E-6', '1E-7', '1E-8', )] for value in test_items: float_table.insert().execute(floatcol=value) except Exception: raise # todo this should suppress warnings, but it does not @emits_warning_on('mssql+mxodbc', r'.*does not have any indexes.*') def test_dates(self): "Exercise type specification for date types." columns = [ # column type, args, kwargs, expected ddl (mssql.MSDateTime, [], {}, 'DATETIME', []), (types.DATE, [], {}, 'DATE', ['>=', (10,)]), (types.Date, [], {}, 'DATE', ['>=', (10,)]), (types.Date, [], {}, 'DATETIME', ['<', (10,)], mssql.MSDateTime), (mssql.MSDate, [], {}, 'DATE', ['>=', (10,)]), (mssql.MSDate, [], {}, 'DATETIME', ['<', (10,)], mssql.MSDateTime), (types.TIME, [], {}, 'TIME', ['>=', (10,)]), (types.Time, [], {}, 'TIME', ['>=', (10,)]), (mssql.MSTime, [], {}, 'TIME', ['>=', (10,)]), (mssql.MSTime, [1], {}, 'TIME(1)', ['>=', (10,)]), (types.Time, [], {}, 'DATETIME', ['<', (10,)], mssql.MSDateTime), (mssql.MSTime, [], {}, 'TIME', ['>=', (10,)]), (mssql.MSSmallDateTime, [], {}, 'SMALLDATETIME', []), (mssql.MSDateTimeOffset, [], {}, 'DATETIMEOFFSET', ['>=', (10,)]), (mssql.MSDateTimeOffset, [1], {}, 'DATETIMEOFFSET(1)', ['>=', (10,)]), (mssql.MSDateTime2, [], {}, 'DATETIME2', ['>=', (10,)]), (mssql.MSDateTime2, [1], {}, 'DATETIME2(1)', ['>=', (10,)]), ] table_args = ['test_mssql_dates', metadata] for index, spec in enumerate(columns): type_, args, kw, res, requires = spec[0:5] if requires and testing._is_excluded('mssql', *requires) \ or not requires: c = Column('c%s' % index, type_(*args, **kw), nullable=None) testing.db.dialect.type_descriptor(c.type) table_args.append(c) dates_table = Table(*table_args) gen = testing.db.dialect.ddl_compiler(testing.db.dialect, schema.CreateTable(dates_table)) for col in dates_table.c: index = int(col.name[1:]) testing.eq_(gen.get_column_specification(col), '%s %s' % (col.name, columns[index][3])) self.assert_(repr(col)) dates_table.create(checkfirst=True) reflected_dates = Table('test_mssql_dates', MetaData(testing.db), autoload=True) for col in reflected_dates.c: self.assert_types_base(col, dates_table.c[col.key]) def test_date_roundtrip(self): t = Table('test_dates', metadata, Column('id', Integer, Sequence('datetest_id_seq', optional=True), primary_key=True), Column('adate', Date), Column('atime', Time), Column('adatetime', DateTime)) metadata.create_all() d1 = datetime.date(2007, 10, 30) t1 = datetime.time(11, 2, 32) d2 = datetime.datetime(2007, 10, 30, 11, 2, 32) t.insert().execute(adate=d1, adatetime=d2, atime=t1) t.insert().execute(adate=d2, adatetime=d2, atime=d2) x = t.select().execute().fetchall()[0] self.assert_(x.adate.__class__ == datetime.date) self.assert_(x.atime.__class__ == datetime.time) self.assert_(x.adatetime.__class__ == datetime.datetime) t.delete().execute() t.insert().execute(adate=d1, adatetime=d2, atime=t1) eq_(select([t.c.adate, t.c.atime, t.c.adatetime], t.c.adate == d1).execute().fetchall(), [(d1, t1, d2)]) @emits_warning_on('mssql+mxodbc', r'.*does not have any indexes.*') @testing.provide_metadata def test_binary_reflection(self): "Exercise type specification for binary types." columns = [ # column type, args, kwargs, expected ddl (mssql.MSBinary, [], {}, 'BINARY'), (mssql.MSBinary, [10], {}, 'BINARY(10)'), (types.BINARY, [], {}, 'BINARY'), (types.BINARY, [10], {}, 'BINARY(10)'), (mssql.MSVarBinary, [], {}, 'VARBINARY(max)'), (mssql.MSVarBinary, [10], {}, 'VARBINARY(10)'), (types.VARBINARY, [10], {}, 'VARBINARY(10)'), (types.VARBINARY, [], {}, 'VARBINARY(max)'), (mssql.MSImage, [], {}, 'IMAGE'), (mssql.IMAGE, [], {}, 'IMAGE'), (types.LargeBinary, [], {}, 'IMAGE'), ] metadata = self.metadata table_args = ['test_mssql_binary', metadata] for index, spec in enumerate(columns): type_, args, kw, res = spec table_args.append(Column('c%s' % index, type_(*args, **kw), nullable=None)) binary_table = Table(*table_args) metadata.create_all() reflected_binary = Table('test_mssql_binary', MetaData(testing.db), autoload=True) for col in reflected_binary.c: c1 = testing.db.dialect.type_descriptor(col.type).__class__ c2 = \ testing.db.dialect.type_descriptor( binary_table.c[col.name].type).__class__ assert issubclass(c1, c2), '%r is not a subclass of %r' \ % (c1, c2) if binary_table.c[col.name].type.length: testing.eq_(col.type.length, binary_table.c[col.name].type.length) def test_autoincrement(self): Table('ai_1', metadata, Column('int_y', Integer, primary_key=True), Column('int_n', Integer, DefaultClause('0'), primary_key=True, autoincrement=False)) Table('ai_2', metadata, Column('int_y', Integer, primary_key=True), Column('int_n', Integer, DefaultClause('0'), primary_key=True, autoincrement=False)) Table('ai_3', metadata, Column('int_n', Integer, DefaultClause('0'), primary_key=True, autoincrement=False), Column('int_y', Integer, primary_key=True)) Table('ai_4', metadata, Column('int_n', Integer, DefaultClause('0'), primary_key=True, autoincrement=False), Column('int_n2', Integer, DefaultClause('0'), primary_key=True, autoincrement=False)) Table('ai_5', metadata, Column('int_y', Integer, primary_key=True), Column('int_n', Integer, DefaultClause('0'), primary_key=True, autoincrement=False)) Table('ai_6', metadata, Column('o1', String(1), DefaultClause('x'), primary_key=True), Column('int_y', Integer, primary_key=True)) Table('ai_7', metadata, Column('o1', String(1), DefaultClause('x'), primary_key=True), Column('o2', String(1), DefaultClause('x'), primary_key=True), Column('int_y', Integer, primary_key=True)) Table('ai_8', metadata, Column('o1', String(1), DefaultClause('x'), primary_key=True), Column('o2', String(1), DefaultClause('x'), primary_key=True)) metadata.create_all() table_names = ['ai_1', 'ai_2', 'ai_3', 'ai_4', 'ai_5', 'ai_6', 'ai_7', 'ai_8'] mr = MetaData(testing.db) for name in table_names: tbl = Table(name, mr, autoload=True) tbl = metadata.tables[name] for c in tbl.c: if c.name.startswith('int_y'): assert c.autoincrement, name assert tbl._autoincrement_column is c, name elif c.name.startswith('int_n'): assert not c.autoincrement, name assert tbl._autoincrement_column is not c, name # mxodbc can't handle scope_identity() with DEFAULT VALUES if testing.db.driver == 'mxodbc': eng = \ [engines.testing_engine(options={'implicit_returning' : True})] else: eng = \ [engines.testing_engine(options={'implicit_returning' : False}), engines.testing_engine(options={'implicit_returning' : True})] for counter, engine in enumerate(eng): engine.execute(tbl.insert()) if 'int_y' in tbl.c: assert engine.scalar(select([tbl.c.int_y])) \ == counter + 1 assert list(engine.execute(tbl.select()).first()).\ count(counter + 1) == 1 else: assert 1 \ not in list(engine.execute(tbl.select()).first()) engine.execute(tbl.delete()) class MonkeyPatchedBinaryTest(fixtures.TestBase): __only_on__ = 'mssql+pymssql' def test_unicode(self): module = __import__('pymssql') result = module.Binary('foo') eq_(result, 'foo') def test_bytes(self): module = __import__('pymssql') input = b('\x80\x03]q\x00X\x03\x00\x00\x00oneq\x01a.') expected_result = input result = module.Binary(input) eq_(result, expected_result) class BinaryTest(fixtures.TestBase, AssertsExecutionResults): """Test the Binary and VarBinary types""" __only_on__ = 'mssql' @classmethod def setup_class(cls): global binary_table, MyPickleType class MyPickleType(types.TypeDecorator): impl = PickleType def process_bind_param(self, value, dialect): if value: value.stuff = 'this is modified stuff' return value def process_result_value(self, value, dialect): if value: value.stuff = 'this is the right stuff' return value binary_table = Table( 'binary_table', MetaData(testing.db), Column('primary_id', Integer, Sequence('binary_id_seq', optional=True), primary_key=True), Column('data', mssql.MSVarBinary(8000)), Column('data_image', mssql.MSImage), Column('data_slice', types.BINARY(100)), Column('misc', String(30)), Column('pickled', PickleType), Column('mypickle', MyPickleType), ) binary_table.create() def teardown(self): binary_table.delete().execute() @classmethod def teardown_class(cls): binary_table.drop() def test_binary(self): testobj1 = pickleable.Foo('im foo 1') testobj2 = pickleable.Foo('im foo 2') testobj3 = pickleable.Foo('im foo 3') stream1 = self.load_stream('binary_data_one.dat') stream2 = self.load_stream('binary_data_two.dat') binary_table.insert().execute( primary_id=1, misc='binary_data_one.dat', data=stream1, data_image=stream1, data_slice=stream1[0:100], pickled=testobj1, mypickle=testobj3, ) binary_table.insert().execute( primary_id=2, misc='binary_data_two.dat', data=stream2, data_image=stream2, data_slice=stream2[0:99], pickled=testobj2, ) # TODO: pyodbc does not seem to accept "None" for a VARBINARY # column (data=None). error: [Microsoft][ODBC SQL Server # Driver][SQL Server]Implicit conversion from data type varchar # to varbinary is not allowed. Use the CONVERT function to run # this query. (257) binary_table.insert().execute(primary_id=3, # misc='binary_data_two.dat', data=None, data_image=None, # data_slice=stream2[0:99], pickled=None) binary_table.insert().execute(primary_id=3, misc='binary_data_two.dat', data_image=None, data_slice=stream2[0:99], pickled=None) for stmt in \ binary_table.select(order_by=binary_table.c.primary_id), \ text('select * from binary_table order by ' 'binary_table.primary_id', typemap=dict(data=mssql.MSVarBinary(8000), data_image=mssql.MSImage, data_slice=types.BINARY(100), pickled=PickleType, mypickle=MyPickleType), bind=testing.db): l = stmt.execute().fetchall() eq_(list(stream1), list(l[0]['data'])) paddedstream = list(stream1[0:100]) paddedstream.extend(['\x00'] * (100 - len(paddedstream))) eq_(paddedstream, list(l[0]['data_slice'])) eq_(list(stream2), list(l[1]['data'])) eq_(list(stream2), list(l[1]['data_image'])) eq_(testobj1, l[0]['pickled']) eq_(testobj2, l[1]['pickled']) eq_(testobj3.moredata, l[0]['mypickle'].moredata) eq_(l[0]['mypickle'].stuff, 'this is the right stuff') def load_stream(self, name, len=3000): fp = open(os.path.join(os.path.dirname(__file__), "..", "..", name), 'rb') stream = fp.read(len) fp.close() return stream SQLAlchemy-0.8.4/test/dialect/mysql/0000755000076500000240000000000012251151573017764 5ustar classicstaff00000000000000SQLAlchemy-0.8.4/test/dialect/mysql/__init__.py0000644000076500000240000000000012251147172022063 0ustar classicstaff00000000000000SQLAlchemy-0.8.4/test/dialect/mysql/test_compiler.py0000644000076500000240000004376412251150015023213 0ustar classicstaff00000000000000# coding: utf-8 from sqlalchemy.testing import eq_, assert_raises_message from sqlalchemy import * from sqlalchemy import sql, exc, schema, types as sqltypes from sqlalchemy.dialects.mysql import base as mysql from sqlalchemy.testing import fixtures, AssertsCompiledSQL from sqlalchemy import testing class CompileTest(fixtures.TestBase, AssertsCompiledSQL): __dialect__ = mysql.dialect() def test_reserved_words(self): table = Table("mysql_table", MetaData(), Column("col1", Integer), Column("master_ssl_verify_server_cert", Integer)) x = select([table.c.col1, table.c.master_ssl_verify_server_cert]) self.assert_compile(x, "SELECT mysql_table.col1, " "mysql_table.`master_ssl_verify_server_cert` FROM mysql_table") def test_create_index_simple(self): m = MetaData() tbl = Table('testtbl', m, Column('data', String(255))) idx = Index('test_idx1', tbl.c.data) self.assert_compile(schema.CreateIndex(idx), 'CREATE INDEX test_idx1 ON testtbl (data)') def test_create_index_with_length(self): m = MetaData() tbl = Table('testtbl', m, Column('data', String(255))) idx1 = Index('test_idx1', tbl.c.data, mysql_length=10) idx2 = Index('test_idx2', tbl.c.data, mysql_length=5) self.assert_compile(schema.CreateIndex(idx1), 'CREATE INDEX test_idx1 ON testtbl (data(10))') self.assert_compile(schema.CreateIndex(idx2), 'CREATE INDEX test_idx2 ON testtbl (data(5))') def test_create_composite_index_with_length(self): m = MetaData() tbl = Table('testtbl', m, Column('a', String(255)), Column('b', String(255))) idx1 = Index('test_idx1', tbl.c.a, tbl.c.b, mysql_length={'a': 10, 'b': 20}) idx2 = Index('test_idx2', tbl.c.a, tbl.c.b, mysql_length={'a': 15}) idx3 = Index('test_idx3', tbl.c.a, tbl.c.b, mysql_length=30) self.assert_compile( schema.CreateIndex(idx1), 'CREATE INDEX test_idx1 ON testtbl (a(10), b(20))' ) self.assert_compile( schema.CreateIndex(idx2), 'CREATE INDEX test_idx2 ON testtbl (a(15), b)' ) self.assert_compile( schema.CreateIndex(idx3), 'CREATE INDEX test_idx3 ON testtbl (a(30), b(30))' ) def test_create_index_with_using(self): m = MetaData() tbl = Table('testtbl', m, Column('data', String(255))) idx1 = Index('test_idx1', tbl.c.data, mysql_using='btree') idx2 = Index('test_idx2', tbl.c.data, mysql_using='hash') self.assert_compile(schema.CreateIndex(idx1), 'CREATE INDEX test_idx1 ON testtbl (data) USING btree') self.assert_compile(schema.CreateIndex(idx2), 'CREATE INDEX test_idx2 ON testtbl (data) USING hash') def test_create_pk_plain(self): m = MetaData() tbl = Table('testtbl', m, Column('data', String(255)), PrimaryKeyConstraint('data')) self.assert_compile(schema.CreateTable(tbl), "CREATE TABLE testtbl (data VARCHAR(255), PRIMARY KEY (data))") def test_create_pk_with_using(self): m = MetaData() tbl = Table('testtbl', m, Column('data', String(255)), PrimaryKeyConstraint('data', mysql_using='btree')) self.assert_compile(schema.CreateTable(tbl), "CREATE TABLE testtbl (data VARCHAR(255), " "PRIMARY KEY (data) USING btree)") def test_create_index_expr(self): m = MetaData() t1 = Table('foo', m, Column('x', Integer) ) self.assert_compile( schema.CreateIndex(Index("bar", t1.c.x > 5)), "CREATE INDEX bar ON foo (x > 5)" ) def test_deferrable_emits_warning(self): m = MetaData() t1 = Table('t1', m, Column('id', Integer, primary_key=True)) t2 = Table('t2', m, Column('id', Integer, ForeignKey('t1.id', deferrable=True), primary_key=True)) assert_raises_message( exc.SAWarning, "The 'deferrable' keyword will no longer be ignored by the MySQL " "backend in 0.9 - please adjust so that this keyword is not used in " "conjunction with MySQL.", schema.CreateTable(t2).compile, dialect=mysql.dialect() ) @testing.emits_warning("The 'deferrable' keyword") def go(): self.assert_compile( schema.CreateTable(t2), "CREATE TABLE t2 (id INTEGER NOT NULL, " "PRIMARY KEY (id), FOREIGN KEY(id) REFERENCES t1 (id))") go() def test_initially_emits_warning(self): m = MetaData() t1 = Table('t1', m, Column('id', Integer, primary_key=True)) t2 = Table('t2', m, Column('id', Integer, ForeignKey('t1.id', initially="XYZ"), primary_key=True)) assert_raises_message( exc.SAWarning, "The 'initially' keyword will no longer be ignored by the MySQL " "backend in 0.9 - please adjust so that this keyword is not used " "in conjunction with MySQL.", schema.CreateTable(t2).compile, dialect=mysql.dialect() ) @testing.emits_warning("The 'initially' keyword ") def go(): self.assert_compile( schema.CreateTable(t2), "CREATE TABLE t2 (id INTEGER NOT NULL, " "PRIMARY KEY (id), FOREIGN KEY(id) REFERENCES t1 (id))") go() def test_match_kw_raises(self): m = MetaData() t1 = Table('t1', m, Column('id', Integer, primary_key=True)) t2 = Table('t2', m, Column('id', Integer, ForeignKey('t1.id', match="XYZ"), primary_key=True)) assert_raises_message( exc.SAWarning, "MySQL ignores the 'MATCH' keyword while at the same time causes " "ON UPDATE/ON DELETE clauses to be ignored.", schema.CreateTable(t2).compile, dialect=mysql.dialect() ) class SQLTest(fixtures.TestBase, AssertsCompiledSQL): """Tests MySQL-dialect specific compilation.""" __dialect__ = mysql.dialect() def test_precolumns(self): dialect = self.__dialect__ def gen(distinct=None, prefixes=None): kw = {} if distinct is not None: kw['distinct'] = distinct if prefixes is not None: kw['prefixes'] = prefixes return str(select(['q'], **kw).compile(dialect=dialect)) eq_(gen(None), 'SELECT q') eq_(gen(True), 'SELECT DISTINCT q') eq_(gen(prefixes=['ALL']), 'SELECT ALL q') eq_(gen(prefixes=['DISTINCTROW']), 'SELECT DISTINCTROW q') # Interaction with MySQL prefix extensions eq_( gen(None, ['straight_join']), 'SELECT straight_join q') eq_( gen(False, ['HIGH_PRIORITY', 'SQL_SMALL_RESULT', 'ALL']), 'SELECT HIGH_PRIORITY SQL_SMALL_RESULT ALL q') eq_( gen(True, ['high_priority', sql.text('sql_cache')]), 'SELECT high_priority sql_cache DISTINCT q') @testing.uses_deprecated def test_deprecated_distinct(self): dialect = self.__dialect__ self.assert_compile( select(['q'], distinct='ALL'), 'SELECT ALL q', ) self.assert_compile( select(['q'], distinct='distinctROW'), 'SELECT DISTINCTROW q', ) self.assert_compile( select(['q'], distinct='ALL', prefixes=['HIGH_PRIORITY', 'SQL_SMALL_RESULT']), 'SELECT HIGH_PRIORITY SQL_SMALL_RESULT ALL q' ) def test_backslash_escaping(self): self.assert_compile( sql.column('foo').like('bar', escape='\\'), "foo LIKE %s ESCAPE '\\\\'" ) dialect = mysql.dialect() dialect._backslash_escapes=False self.assert_compile( sql.column('foo').like('bar', escape='\\'), "foo LIKE %s ESCAPE '\\'", dialect=dialect ) def test_limit(self): t = sql.table('t', sql.column('col1'), sql.column('col2')) self.assert_compile( select([t]).limit(10).offset(20), "SELECT t.col1, t.col2 FROM t LIMIT %s, %s", {'param_1':20, 'param_2':10} ) self.assert_compile( select([t]).limit(10), "SELECT t.col1, t.col2 FROM t LIMIT %s", {'param_1':10}) self.assert_compile( select([t]).offset(10), "SELECT t.col1, t.col2 FROM t LIMIT %s, 18446744073709551615", {'param_1':10} ) def test_varchar_raise(self): for type_ in ( String, VARCHAR, String(), VARCHAR(), NVARCHAR(), Unicode, Unicode(), ): type_ = sqltypes.to_instance(type_) assert_raises_message( exc.CompileError, "VARCHAR requires a length on dialect mysql", type_.compile, dialect=mysql.dialect() ) t1 = Table('sometable', MetaData(), Column('somecolumn', type_) ) assert_raises_message( exc.CompileError, r"\(in table 'sometable', column 'somecolumn'\)\: " r"(?:N)?VARCHAR requires a length on dialect mysql", schema.CreateTable(t1).compile, dialect=mysql.dialect() ) def test_update_limit(self): t = sql.table('t', sql.column('col1'), sql.column('col2')) self.assert_compile( t.update(values={'col1':123}), "UPDATE t SET col1=%s" ) self.assert_compile( t.update(values={'col1':123}, mysql_limit=5), "UPDATE t SET col1=%s LIMIT 5" ) self.assert_compile( t.update(values={'col1':123}, mysql_limit=None), "UPDATE t SET col1=%s" ) self.assert_compile( t.update(t.c.col2==456, values={'col1':123}, mysql_limit=1), "UPDATE t SET col1=%s WHERE t.col2 = %s LIMIT 1" ) def test_utc_timestamp(self): self.assert_compile(func.utc_timestamp(), "UTC_TIMESTAMP") def test_sysdate(self): self.assert_compile(func.sysdate(), "SYSDATE()") def test_cast(self): t = sql.table('t', sql.column('col')) m = mysql specs = [ (Integer, "CAST(t.col AS SIGNED INTEGER)"), (INT, "CAST(t.col AS SIGNED INTEGER)"), (m.MSInteger, "CAST(t.col AS SIGNED INTEGER)"), (m.MSInteger(unsigned=True), "CAST(t.col AS UNSIGNED INTEGER)"), (SmallInteger, "CAST(t.col AS SIGNED INTEGER)"), (m.MSSmallInteger, "CAST(t.col AS SIGNED INTEGER)"), (m.MSTinyInteger, "CAST(t.col AS SIGNED INTEGER)"), # 'SIGNED INTEGER' is a bigint, so this is ok. (m.MSBigInteger, "CAST(t.col AS SIGNED INTEGER)"), (m.MSBigInteger(unsigned=False), "CAST(t.col AS SIGNED INTEGER)"), (m.MSBigInteger(unsigned=True), "CAST(t.col AS UNSIGNED INTEGER)"), (m.MSBit, "t.col"), # this is kind of sucky. thank you default arguments! (NUMERIC, "CAST(t.col AS DECIMAL)"), (DECIMAL, "CAST(t.col AS DECIMAL)"), (Numeric, "CAST(t.col AS DECIMAL)"), (m.MSNumeric, "CAST(t.col AS DECIMAL)"), (m.MSDecimal, "CAST(t.col AS DECIMAL)"), (FLOAT, "t.col"), (Float, "t.col"), (m.MSFloat, "t.col"), (m.MSDouble, "t.col"), (m.MSReal, "t.col"), (TIMESTAMP, "CAST(t.col AS DATETIME)"), (DATETIME, "CAST(t.col AS DATETIME)"), (DATE, "CAST(t.col AS DATE)"), (TIME, "CAST(t.col AS TIME)"), (DateTime, "CAST(t.col AS DATETIME)"), (Date, "CAST(t.col AS DATE)"), (Time, "CAST(t.col AS TIME)"), (DateTime, "CAST(t.col AS DATETIME)"), (Date, "CAST(t.col AS DATE)"), (m.MSTime, "CAST(t.col AS TIME)"), (m.MSTimeStamp, "CAST(t.col AS DATETIME)"), (m.MSYear, "t.col"), (m.MSYear(2), "t.col"), (Interval, "t.col"), (String, "CAST(t.col AS CHAR)"), (Unicode, "CAST(t.col AS CHAR)"), (UnicodeText, "CAST(t.col AS CHAR)"), (VARCHAR, "CAST(t.col AS CHAR)"), (NCHAR, "CAST(t.col AS CHAR)"), (CHAR, "CAST(t.col AS CHAR)"), (CLOB, "CAST(t.col AS CHAR)"), (TEXT, "CAST(t.col AS CHAR)"), (String(32), "CAST(t.col AS CHAR(32))"), (Unicode(32), "CAST(t.col AS CHAR(32))"), (CHAR(32), "CAST(t.col AS CHAR(32))"), (m.MSString, "CAST(t.col AS CHAR)"), (m.MSText, "CAST(t.col AS CHAR)"), (m.MSTinyText, "CAST(t.col AS CHAR)"), (m.MSMediumText, "CAST(t.col AS CHAR)"), (m.MSLongText, "CAST(t.col AS CHAR)"), (m.MSNChar, "CAST(t.col AS CHAR)"), (m.MSNVarChar, "CAST(t.col AS CHAR)"), (LargeBinary, "CAST(t.col AS BINARY)"), (BLOB, "CAST(t.col AS BINARY)"), (m.MSBlob, "CAST(t.col AS BINARY)"), (m.MSBlob(32), "CAST(t.col AS BINARY)"), (m.MSTinyBlob, "CAST(t.col AS BINARY)"), (m.MSMediumBlob, "CAST(t.col AS BINARY)"), (m.MSLongBlob, "CAST(t.col AS BINARY)"), (m.MSBinary, "CAST(t.col AS BINARY)"), (m.MSBinary(32), "CAST(t.col AS BINARY)"), (m.MSVarBinary, "CAST(t.col AS BINARY)"), (m.MSVarBinary(32), "CAST(t.col AS BINARY)"), # maybe this could be changed to something more DWIM, needs # testing (Boolean, "t.col"), (BOOLEAN, "t.col"), (m.MSEnum, "t.col"), (m.MSEnum("1", "2"), "t.col"), (m.MSSet, "t.col"), (m.MSSet("1", "2"), "t.col"), ] for type_, expected in specs: self.assert_compile(cast(t.c.col, type_), expected) def test_no_cast_pre_4(self): self.assert_compile( cast(Column('foo', Integer), String), "CAST(foo AS CHAR)", ) dialect = mysql.dialect() dialect.server_version_info = (3, 2, 3) self.assert_compile( cast(Column('foo', Integer), String), "foo", dialect=dialect ) def test_cast_grouped_expression_non_castable(self): self.assert_compile( cast(sql.column('x') + sql.column('y'), Float), "(x + y)" ) def test_cast_grouped_expression_pre_4(self): dialect = mysql.dialect() dialect.server_version_info = (3, 2, 3) self.assert_compile( cast(sql.column('x') + sql.column('y'), Integer), "(x + y)", dialect=dialect ) def test_extract(self): t = sql.table('t', sql.column('col1')) for field in 'year', 'month', 'day': self.assert_compile( select([extract(field, t.c.col1)]), "SELECT EXTRACT(%s FROM t.col1) AS anon_1 FROM t" % field) # millsecondS to millisecond self.assert_compile( select([extract('milliseconds', t.c.col1)]), "SELECT EXTRACT(millisecond FROM t.col1) AS anon_1 FROM t") def test_too_long_index(self): exp = 'ix_zyrenian_zyme_zyzzogeton_zyzzogeton_zyrenian_zyme_zyz_5cd2' tname = 'zyrenian_zyme_zyzzogeton_zyzzogeton' cname = 'zyrenian_zyme_zyzzogeton_zo' t1 = Table(tname, MetaData(), Column(cname, Integer, index=True), ) ix1 = list(t1.indexes)[0] self.assert_compile( schema.CreateIndex(ix1), "CREATE INDEX %s " "ON %s (%s)" % (exp, tname, cname) ) def test_innodb_autoincrement(self): t1 = Table('sometable', MetaData(), Column('assigned_id', Integer(), primary_key=True, autoincrement=False), Column('id', Integer(), primary_key=True, autoincrement=True), mysql_engine='InnoDB') self.assert_compile(schema.CreateTable(t1), 'CREATE TABLE sometable (assigned_id ' 'INTEGER NOT NULL, id INTEGER NOT NULL ' 'AUTO_INCREMENT, PRIMARY KEY (assigned_id, ' 'id), KEY idx_autoinc_id (id))ENGINE=Inn' 'oDB') t1 = Table('sometable', MetaData(), Column('assigned_id', Integer(), primary_key=True, autoincrement=True), Column('id', Integer(), primary_key=True, autoincrement=False), mysql_engine='InnoDB') self.assert_compile(schema.CreateTable(t1), 'CREATE TABLE sometable (assigned_id ' 'INTEGER NOT NULL AUTO_INCREMENT, id ' 'INTEGER NOT NULL, PRIMARY KEY ' '(assigned_id, id))ENGINE=InnoDB') def test_innodb_autoincrement_reserved_word_column_name(self): t1 = Table( 'sometable', MetaData(), Column('id', Integer(), primary_key=True, autoincrement=False), Column('order', Integer(), primary_key=True, autoincrement=True), mysql_engine='InnoDB') self.assert_compile( schema.CreateTable(t1), 'CREATE TABLE sometable (' 'id INTEGER NOT NULL, ' '`order` INTEGER NOT NULL AUTO_INCREMENT, ' 'PRIMARY KEY (id, `order`), ' 'KEY idx_autoinc_order (`order`)' ')ENGINE=InnoDB') SQLAlchemy-0.8.4/test/dialect/mysql/test_dialect.py0000644000076500000240000001133012251147172023000 0ustar classicstaff00000000000000# coding: utf-8 from sqlalchemy.testing import eq_ from sqlalchemy import * from sqlalchemy.engine.url import make_url from sqlalchemy.testing import fixtures from sqlalchemy import testing from sqlalchemy.testing import engines import datetime class DialectTest(fixtures.TestBase): def test_ssl_arguments_mysqldb(self): from sqlalchemy.dialects.mysql import mysqldb dialect = mysqldb.dialect() self._test_ssl_arguments(dialect) def test_ssl_arguments_oursql(self): from sqlalchemy.dialects.mysql import oursql dialect = oursql.dialect() self._test_ssl_arguments(dialect) def _test_ssl_arguments(self, dialect): kwarg = dialect.create_connect_args( make_url("mysql://scott:tiger@localhost:3306/test" "?ssl_ca=/ca.pem&ssl_cert=/cert.pem&ssl_key=/key.pem") )[1] # args that differ among mysqldb and oursql for k in ('use_unicode', 'found_rows', 'client_flag'): kwarg.pop(k, None) eq_( kwarg, { 'passwd': 'tiger', 'db': 'test', 'ssl': {'ca': '/ca.pem', 'cert': '/cert.pem', 'key': '/key.pem'}, 'host': 'localhost', 'user': 'scott', 'port': 3306 } ) def test_mysqlconnector_buffered_arg(self): from sqlalchemy.dialects.mysql import mysqlconnector dialect = mysqlconnector.dialect() kw = dialect.create_connect_args( make_url("mysql+mysqlconnector://u:p@host/db?buffered=true") )[1] eq_(kw['buffered'], True) kw = dialect.create_connect_args( make_url("mysql+mysqlconnector://u:p@host/db?buffered=false") )[1] eq_(kw['buffered'], False) kw = dialect.create_connect_args( make_url("mysql+mysqlconnector://u:p@host/db") )[1] eq_(kw['buffered'], True) def test_mysqlconnector_raise_on_warnings_arg(self): from sqlalchemy.dialects.mysql import mysqlconnector dialect = mysqlconnector.dialect() kw = dialect.create_connect_args( make_url("mysql+mysqlconnector://u:p@host/db?raise_on_warnings=true") )[1] eq_(kw['raise_on_warnings'], True) kw = dialect.create_connect_args( make_url("mysql+mysqlconnector://u:p@host/db?raise_on_warnings=false") )[1] eq_(kw['raise_on_warnings'], False) kw = dialect.create_connect_args( make_url("mysql+mysqlconnector://u:p@host/db") )[1] eq_(kw['raise_on_warnings'], True) @testing.only_on('mysql') def test_random_arg(self): dialect = testing.db.dialect kw = dialect.create_connect_args( make_url("mysql://u:p@host/db?foo=true") )[1] eq_(kw['foo'], "true") class SQLModeDetectionTest(fixtures.TestBase): __only_on__ = 'mysql' def _options(self, modes): def connect(con, record): cursor = con.cursor() print("DOING THiS:", "set sql_mode='%s'" % (",".join(modes))) cursor.execute("set sql_mode='%s'" % (",".join(modes))) e = engines.testing_engine(options={ 'pool_events':[ (connect, 'first_connect'), (connect, 'connect') ] }) return e def test_backslash_escapes(self): engine = self._options(['NO_BACKSLASH_ESCAPES']) c = engine.connect() assert not engine.dialect._backslash_escapes c.close() engine.dispose() engine = self._options([]) c = engine.connect() assert engine.dialect._backslash_escapes c.close() engine.dispose() def test_ansi_quotes(self): engine = self._options(['ANSI_QUOTES']) c = engine.connect() assert engine.dialect._server_ansiquotes c.close() engine.dispose() def test_combination(self): engine = self._options(['ANSI_QUOTES,NO_BACKSLASH_ESCAPES']) c = engine.connect() assert engine.dialect._server_ansiquotes assert not engine.dialect._backslash_escapes c.close() engine.dispose() class ExecutionTest(fixtures.TestBase): """Various MySQL execution special cases.""" __only_on__ = 'mysql' def test_charset_caching(self): engine = engines.testing_engine() cx = engine.connect() meta = MetaData() charset = engine.dialect._detect_charset(cx) meta.reflect(cx) eq_(cx.dialect._connection_charset, charset) cx.close() def test_sysdate(self): d = testing.db.scalar(func.sysdate()) assert isinstance(d, datetime.datetime) SQLAlchemy-0.8.4/test/dialect/mysql/test_query.py0000644000076500000240000001110712251147172022542 0ustar classicstaff00000000000000# coding: utf-8 from sqlalchemy.testing import eq_ from sqlalchemy import * from sqlalchemy.testing import fixtures, AssertsCompiledSQL from sqlalchemy import testing class MatchTest(fixtures.TestBase, AssertsCompiledSQL): __only_on__ = 'mysql' @classmethod def setup_class(cls): global metadata, cattable, matchtable metadata = MetaData(testing.db) cattable = Table('cattable', metadata, Column('id', Integer, primary_key=True), Column('description', String(50)), mysql_engine='MyISAM' ) matchtable = Table('matchtable', metadata, Column('id', Integer, primary_key=True), Column('title', String(200)), Column('category_id', Integer, ForeignKey('cattable.id')), mysql_engine='MyISAM' ) metadata.create_all() cattable.insert().execute([ {'id': 1, 'description': 'Python'}, {'id': 2, 'description': 'Ruby'}, ]) matchtable.insert().execute([ {'id': 1, 'title': 'Agile Web Development with Rails', 'category_id': 2}, {'id': 2, 'title': 'Dive Into Python', 'category_id': 1}, {'id': 3, 'title': "Programming Matz's Ruby", 'category_id': 2}, {'id': 4, 'title': 'The Definitive Guide to Django', 'category_id': 1}, {'id': 5, 'title': 'Python in a Nutshell', 'category_id': 1} ]) @classmethod def teardown_class(cls): metadata.drop_all() @testing.fails_on('mysql+mysqlconnector', 'uses pyformat') def test_expression(self): format = testing.db.dialect.paramstyle == 'format' and '%s' or '?' self.assert_compile( matchtable.c.title.match('somstr'), "MATCH (matchtable.title) AGAINST (%s IN BOOLEAN MODE)" % format) @testing.fails_on('mysql+mysqldb', 'uses format') @testing.fails_on('mysql+pymysql', 'uses format') @testing.fails_on('mysql+cymysql', 'uses format') @testing.fails_on('mysql+oursql', 'uses format') @testing.fails_on('mysql+pyodbc', 'uses format') @testing.fails_on('mysql+zxjdbc', 'uses format') def test_expression(self): format = '%(title_1)s' self.assert_compile( matchtable.c.title.match('somstr'), "MATCH (matchtable.title) AGAINST (%s IN BOOLEAN MODE)" % format) def test_simple_match(self): results = (matchtable.select(). where(matchtable.c.title.match('python')). order_by(matchtable.c.id). execute(). fetchall()) eq_([2, 5], [r.id for r in results]) def test_simple_match_with_apostrophe(self): results = (matchtable.select(). where(matchtable.c.title.match("Matz's")). execute(). fetchall()) eq_([3], [r.id for r in results]) def test_or_match(self): results1 = (matchtable.select(). where(or_(matchtable.c.title.match('nutshell'), matchtable.c.title.match('ruby'))). order_by(matchtable.c.id). execute(). fetchall()) eq_([3, 5], [r.id for r in results1]) results2 = (matchtable.select(). where(matchtable.c.title.match('nutshell ruby')). order_by(matchtable.c.id). execute(). fetchall()) eq_([3, 5], [r.id for r in results2]) def test_and_match(self): results1 = (matchtable.select(). where(and_(matchtable.c.title.match('python'), matchtable.c.title.match('nutshell'))). execute(). fetchall()) eq_([5], [r.id for r in results1]) results2 = (matchtable.select(). where(matchtable.c.title.match('+python +nutshell')). execute(). fetchall()) eq_([5], [r.id for r in results2]) def test_match_across_joins(self): results = (matchtable.select(). where(and_(cattable.c.id==matchtable.c.category_id, or_(cattable.c.description.match('Ruby'), matchtable.c.title.match('nutshell')))). order_by(matchtable.c.id). execute(). fetchall()) eq_([1, 3, 5], [r.id for r in results]) SQLAlchemy-0.8.4/test/dialect/mysql/test_reflection.py0000644000076500000240000003124412251150015023521 0ustar classicstaff00000000000000# coding: utf-8 from sqlalchemy.testing import eq_ from sqlalchemy import * from sqlalchemy import sql from sqlalchemy.dialects.mysql import base as mysql from sqlalchemy.testing import fixtures, AssertsExecutionResults from sqlalchemy import testing class ReflectionTest(fixtures.TestBase, AssertsExecutionResults): __only_on__ = 'mysql' def test_default_reflection(self): """Test reflection of column defaults.""" from sqlalchemy.dialects.mysql import VARCHAR def_table = Table( 'mysql_def', MetaData(testing.db), Column('c1', VARCHAR(10, collation='utf8_unicode_ci'), DefaultClause(''), nullable=False), Column('c2', String(10), DefaultClause('0')), Column('c3', String(10), DefaultClause('abc')), Column('c4', TIMESTAMP, DefaultClause('2009-04-05 12:00:00' )), Column('c5', TIMESTAMP), Column('c6', TIMESTAMP, DefaultClause(sql.text("CURRENT_TIMESTAMP " "ON UPDATE CURRENT_TIMESTAMP"))), ) def_table.create() try: reflected = Table('mysql_def', MetaData(testing.db), autoload=True) finally: def_table.drop() assert def_table.c.c1.server_default.arg == '' assert def_table.c.c2.server_default.arg == '0' assert def_table.c.c3.server_default.arg == 'abc' assert def_table.c.c4.server_default.arg \ == '2009-04-05 12:00:00' assert str(reflected.c.c1.server_default.arg) == "''" assert str(reflected.c.c2.server_default.arg) == "'0'" assert str(reflected.c.c3.server_default.arg) == "'abc'" assert str(reflected.c.c4.server_default.arg) \ == "'2009-04-05 12:00:00'" assert reflected.c.c5.default is None assert reflected.c.c5.server_default is None assert reflected.c.c6.default is None eq_( str(reflected.c.c6.server_default.arg).upper(), "CURRENT_TIMESTAMP ON UPDATE CURRENT_TIMESTAMP" ) reflected.create() try: reflected2 = Table('mysql_def', MetaData(testing.db), autoload=True) finally: reflected.drop() assert str(reflected2.c.c1.server_default.arg) == "''" assert str(reflected2.c.c2.server_default.arg) == "'0'" assert str(reflected2.c.c3.server_default.arg) == "'abc'" assert str(reflected2.c.c4.server_default.arg) \ == "'2009-04-05 12:00:00'" assert reflected.c.c5.default is None assert reflected.c.c5.server_default is None assert reflected.c.c6.default is None eq_( str(reflected.c.c6.server_default.arg).upper(), "CURRENT_TIMESTAMP ON UPDATE CURRENT_TIMESTAMP" ) def test_reflection_with_table_options(self): comment = r"""Comment types type speedily ' " \ '' Fun!""" def_table = Table('mysql_def', MetaData(testing.db), Column('c1', Integer()), mysql_engine='MEMORY', mysql_comment=comment, mysql_default_charset='utf8', mysql_auto_increment='5', mysql_avg_row_length='3', mysql_password='secret', mysql_connection='fish', ) def_table.create() try: reflected = Table('mysql_def', MetaData(testing.db), autoload=True) finally: def_table.drop() assert def_table.kwargs['mysql_engine'] == 'MEMORY' assert def_table.kwargs['mysql_comment'] == comment assert def_table.kwargs['mysql_default_charset'] == 'utf8' assert def_table.kwargs['mysql_auto_increment'] == '5' assert def_table.kwargs['mysql_avg_row_length'] == '3' assert def_table.kwargs['mysql_password'] == 'secret' assert def_table.kwargs['mysql_connection'] == 'fish' assert reflected.kwargs['mysql_engine'] == 'MEMORY' assert reflected.kwargs['mysql_comment'] == comment assert reflected.kwargs['mysql_default charset'] == 'utf8' assert reflected.kwargs['mysql_avg_row_length'] == '3' assert reflected.kwargs['mysql_connection'] == 'fish' # This field doesn't seem to be returned by mysql itself. #assert reflected.kwargs['mysql_password'] == 'secret' # This is explicitly ignored when reflecting schema. #assert reflected.kwargs['mysql_auto_increment'] == '5' def test_reflection_on_include_columns(self): """Test reflection of include_columns to be sure they respect case.""" case_table = Table('mysql_case', MetaData(testing.db), Column('c1', String(10)), Column('C2', String(10)), Column('C3', String(10))) try: case_table.create() reflected = Table('mysql_case', MetaData(testing.db), autoload=True, include_columns=['c1', 'C2']) for t in case_table, reflected: assert 'c1' in t.c.keys() assert 'C2' in t.c.keys() reflected2 = Table('mysql_case', MetaData(testing.db), autoload=True, include_columns=['c1', 'c2']) assert 'c1' in reflected2.c.keys() for c in ['c2', 'C2', 'C3']: assert c not in reflected2.c.keys() finally: case_table.drop() @testing.exclude('mysql', '<', (5, 0, 0), 'early types are squirrely') @testing.uses_deprecated('Using String type with no length') @testing.uses_deprecated('Manually quoting ENUM value literals') def test_type_reflection(self): # (ask_for, roundtripped_as_if_different) specs = [( String(1), mysql.MSString(1), ), ( String(3), mysql.MSString(3), ), ( Text(), mysql.MSText(), ), ( Unicode(1), mysql.MSString(1), ), ( Unicode(3), mysql.MSString(3), ), ( UnicodeText(), mysql.MSText(), ), ( mysql.MSChar(1), ), ( mysql.MSChar(3), ), ( NCHAR(2), mysql.MSChar(2), ), ( mysql.MSNChar(2), mysql.MSChar(2), ), # N is CREATE only ( mysql.MSNVarChar(22), mysql.MSString(22), ), ( SmallInteger(), mysql.MSSmallInteger(), ), ( SmallInteger(), mysql.MSSmallInteger(4), ), ( mysql.MSSmallInteger(), ), ( mysql.MSSmallInteger(4), mysql.MSSmallInteger(4), ), ( mysql.MSMediumInteger(), mysql.MSMediumInteger(), ), ( mysql.MSMediumInteger(8), mysql.MSMediumInteger(8), ), ( LargeBinary(3), mysql.TINYBLOB(), ), ( LargeBinary(), mysql.BLOB() ), ( mysql.MSBinary(3), mysql.MSBinary(3), ), ( mysql.MSVarBinary(3),), ( mysql.MSTinyBlob(),), ( mysql.MSBlob(),), ( mysql.MSBlob(1234), mysql.MSBlob()), ( mysql.MSMediumBlob(),), ( mysql.MSLongBlob(),), ( mysql.ENUM("''","'fleem'"), ), ] columns = [Column('c%i' % (i + 1), t[0]) for i, t in enumerate(specs)] db = testing.db m = MetaData(db) t_table = Table('mysql_types', m, *columns) try: m.create_all() m2 = MetaData(db) rt = Table('mysql_types', m2, autoload=True) try: db.execute('CREATE OR REPLACE VIEW mysql_types_v ' 'AS SELECT * from mysql_types') rv = Table('mysql_types_v', m2, autoload=True) expected = [len(c) > 1 and c[1] or c[0] for c in specs] # Early 5.0 releases seem to report more "general" for columns # in a view, e.g. char -> varchar, tinyblob -> mediumblob # # Not sure exactly which point version has the fix. if db.dialect.server_version_info < (5, 0, 11): tables = rt, else: tables = rt, rv for table in tables: for i, reflected in enumerate(table.c): assert isinstance(reflected.type, type(expected[i])), \ 'element %d: %r not instance of %r' % (i, reflected.type, type(expected[i])) finally: db.execute('DROP VIEW mysql_types_v') finally: m.drop_all() def test_autoincrement(self): meta = MetaData(testing.db) try: Table('ai_1', meta, Column('int_y', Integer, primary_key=True), Column('int_n', Integer, DefaultClause('0'), primary_key=True), mysql_engine='MyISAM') Table('ai_2', meta, Column('int_y', Integer, primary_key=True), Column('int_n', Integer, DefaultClause('0'), primary_key=True), mysql_engine='MyISAM') Table('ai_3', meta, Column('int_n', Integer, DefaultClause('0'), primary_key=True, autoincrement=False), Column('int_y', Integer, primary_key=True), mysql_engine='MyISAM') Table('ai_4', meta, Column('int_n', Integer, DefaultClause('0'), primary_key=True, autoincrement=False), Column('int_n2', Integer, DefaultClause('0'), primary_key=True, autoincrement=False), mysql_engine='MyISAM') Table('ai_5', meta, Column('int_y', Integer, primary_key=True), Column('int_n', Integer, DefaultClause('0'), primary_key=True, autoincrement=False), mysql_engine='MyISAM') Table('ai_6', meta, Column('o1', String(1), DefaultClause('x'), primary_key=True), Column('int_y', Integer, primary_key=True), mysql_engine='MyISAM') Table('ai_7', meta, Column('o1', String(1), DefaultClause('x'), primary_key=True), Column('o2', String(1), DefaultClause('x'), primary_key=True), Column('int_y', Integer, primary_key=True), mysql_engine='MyISAM') Table('ai_8', meta, Column('o1', String(1), DefaultClause('x'), primary_key=True), Column('o2', String(1), DefaultClause('x'), primary_key=True), mysql_engine='MyISAM') meta.create_all() table_names = ['ai_1', 'ai_2', 'ai_3', 'ai_4', 'ai_5', 'ai_6', 'ai_7', 'ai_8'] mr = MetaData(testing.db) mr.reflect(only=table_names) for tbl in [mr.tables[name] for name in table_names]: for c in tbl.c: if c.name.startswith('int_y'): assert c.autoincrement elif c.name.startswith('int_n'): assert not c.autoincrement tbl.insert().execute() if 'int_y' in tbl.c: assert select([tbl.c.int_y]).scalar() == 1 assert list(tbl.select().execute().first()).count(1) == 1 else: assert 1 not in list(tbl.select().execute().first()) finally: meta.drop_all() @testing.exclude('mysql', '<', (5, 0, 0), 'no information_schema support') def test_system_views(self): dialect = testing.db.dialect connection = testing.db.connect() view_names = dialect.get_view_names(connection, "information_schema") self.assert_('TABLES' in view_names) class RawReflectionTest(fixtures.TestBase): def setup(self): dialect = mysql.dialect() self.parser = mysql.MySQLTableDefinitionParser(dialect, dialect.identifier_preparer) def test_key_reflection(self): regex = self.parser._re_key assert regex.match(' PRIMARY KEY (`id`),') assert regex.match(' PRIMARY KEY USING BTREE (`id`),') assert regex.match(' PRIMARY KEY (`id`) USING BTREE,') assert regex.match(' PRIMARY KEY (`id`)') assert regex.match(' PRIMARY KEY USING BTREE (`id`)') assert regex.match(' PRIMARY KEY (`id`) USING BTREE') SQLAlchemy-0.8.4/test/dialect/mysql/test_types.py0000644000076500000240000006415112251150015022536 0ustar classicstaff00000000000000# coding: utf-8 from sqlalchemy.testing import eq_, assert_raises from sqlalchemy import * from sqlalchemy import sql, exc, schema from sqlalchemy.util import u from sqlalchemy.dialects.mysql import base as mysql from sqlalchemy.testing import fixtures, AssertsCompiledSQL, AssertsExecutionResults from sqlalchemy import testing from sqlalchemy.testing.engines import utf8_engine import datetime class TypesTest(fixtures.TestBase, AssertsExecutionResults, AssertsCompiledSQL): "Test MySQL column types" __dialect__ = mysql.dialect() def test_numeric(self): "Exercise type specification and options for numeric types." columns = [ # column type, args, kwargs, expected ddl # e.g. Column(Integer(10, unsigned=True)) == # 'INTEGER(10) UNSIGNED' (mysql.MSNumeric, [], {}, 'NUMERIC'), (mysql.MSNumeric, [None], {}, 'NUMERIC'), (mysql.MSNumeric, [12], {}, 'NUMERIC(12)'), (mysql.MSNumeric, [12, 4], {'unsigned':True}, 'NUMERIC(12, 4) UNSIGNED'), (mysql.MSNumeric, [12, 4], {'zerofill':True}, 'NUMERIC(12, 4) ZEROFILL'), (mysql.MSNumeric, [12, 4], {'zerofill':True, 'unsigned':True}, 'NUMERIC(12, 4) UNSIGNED ZEROFILL'), (mysql.MSDecimal, [], {}, 'DECIMAL'), (mysql.MSDecimal, [None], {}, 'DECIMAL'), (mysql.MSDecimal, [12], {}, 'DECIMAL(12)'), (mysql.MSDecimal, [12, None], {}, 'DECIMAL(12)'), (mysql.MSDecimal, [12, 4], {'unsigned':True}, 'DECIMAL(12, 4) UNSIGNED'), (mysql.MSDecimal, [12, 4], {'zerofill':True}, 'DECIMAL(12, 4) ZEROFILL'), (mysql.MSDecimal, [12, 4], {'zerofill':True, 'unsigned':True}, 'DECIMAL(12, 4) UNSIGNED ZEROFILL'), (mysql.MSDouble, [None, None], {}, 'DOUBLE'), (mysql.MSDouble, [12, 4], {'unsigned':True}, 'DOUBLE(12, 4) UNSIGNED'), (mysql.MSDouble, [12, 4], {'zerofill':True}, 'DOUBLE(12, 4) ZEROFILL'), (mysql.MSDouble, [12, 4], {'zerofill':True, 'unsigned':True}, 'DOUBLE(12, 4) UNSIGNED ZEROFILL'), (mysql.MSReal, [None, None], {}, 'REAL'), (mysql.MSReal, [12, 4], {'unsigned':True}, 'REAL(12, 4) UNSIGNED'), (mysql.MSReal, [12, 4], {'zerofill':True}, 'REAL(12, 4) ZEROFILL'), (mysql.MSReal, [12, 4], {'zerofill':True, 'unsigned':True}, 'REAL(12, 4) UNSIGNED ZEROFILL'), (mysql.MSFloat, [], {}, 'FLOAT'), (mysql.MSFloat, [None], {}, 'FLOAT'), (mysql.MSFloat, [12], {}, 'FLOAT(12)'), (mysql.MSFloat, [12, 4], {}, 'FLOAT(12, 4)'), (mysql.MSFloat, [12, 4], {'unsigned':True}, 'FLOAT(12, 4) UNSIGNED'), (mysql.MSFloat, [12, 4], {'zerofill':True}, 'FLOAT(12, 4) ZEROFILL'), (mysql.MSFloat, [12, 4], {'zerofill':True, 'unsigned':True}, 'FLOAT(12, 4) UNSIGNED ZEROFILL'), (mysql.MSInteger, [], {}, 'INTEGER'), (mysql.MSInteger, [4], {}, 'INTEGER(4)'), (mysql.MSInteger, [4], {'unsigned':True}, 'INTEGER(4) UNSIGNED'), (mysql.MSInteger, [4], {'zerofill':True}, 'INTEGER(4) ZEROFILL'), (mysql.MSInteger, [4], {'zerofill':True, 'unsigned':True}, 'INTEGER(4) UNSIGNED ZEROFILL'), (mysql.MSBigInteger, [], {}, 'BIGINT'), (mysql.MSBigInteger, [4], {}, 'BIGINT(4)'), (mysql.MSBigInteger, [4], {'unsigned':True}, 'BIGINT(4) UNSIGNED'), (mysql.MSBigInteger, [4], {'zerofill':True}, 'BIGINT(4) ZEROFILL'), (mysql.MSBigInteger, [4], {'zerofill':True, 'unsigned':True}, 'BIGINT(4) UNSIGNED ZEROFILL'), (mysql.MSMediumInteger, [], {}, 'MEDIUMINT'), (mysql.MSMediumInteger, [4], {}, 'MEDIUMINT(4)'), (mysql.MSMediumInteger, [4], {'unsigned':True}, 'MEDIUMINT(4) UNSIGNED'), (mysql.MSMediumInteger, [4], {'zerofill':True}, 'MEDIUMINT(4) ZEROFILL'), (mysql.MSMediumInteger, [4], {'zerofill':True, 'unsigned':True}, 'MEDIUMINT(4) UNSIGNED ZEROFILL'), (mysql.MSTinyInteger, [], {}, 'TINYINT'), (mysql.MSTinyInteger, [1], {}, 'TINYINT(1)'), (mysql.MSTinyInteger, [1], {'unsigned':True}, 'TINYINT(1) UNSIGNED'), (mysql.MSTinyInteger, [1], {'zerofill':True}, 'TINYINT(1) ZEROFILL'), (mysql.MSTinyInteger, [1], {'zerofill':True, 'unsigned':True}, 'TINYINT(1) UNSIGNED ZEROFILL'), (mysql.MSSmallInteger, [], {}, 'SMALLINT'), (mysql.MSSmallInteger, [4], {}, 'SMALLINT(4)'), (mysql.MSSmallInteger, [4], {'unsigned':True}, 'SMALLINT(4) UNSIGNED'), (mysql.MSSmallInteger, [4], {'zerofill':True}, 'SMALLINT(4) ZEROFILL'), (mysql.MSSmallInteger, [4], {'zerofill':True, 'unsigned':True}, 'SMALLINT(4) UNSIGNED ZEROFILL'), ] for type_, args, kw, res in columns: self.assert_compile( type_(*args, **kw), res ) @testing.exclude('mysql', '<', (4, 1, 1), 'no charset support') def test_charset(self): """Exercise CHARACTER SET and COLLATE-ish options on string types.""" columns = [ (mysql.MSChar, [1], {}, 'CHAR(1)'), (mysql.NCHAR, [1], {}, 'NATIONAL CHAR(1)'), (mysql.MSChar, [1], {'binary':True}, 'CHAR(1) BINARY'), (mysql.MSChar, [1], {'ascii':True}, 'CHAR(1) ASCII'), (mysql.MSChar, [1], {'unicode':True}, 'CHAR(1) UNICODE'), (mysql.MSChar, [1], {'ascii':True, 'binary':True}, 'CHAR(1) ASCII BINARY'), (mysql.MSChar, [1], {'unicode':True, 'binary':True}, 'CHAR(1) UNICODE BINARY'), (mysql.MSChar, [1], {'charset':'utf8'}, 'CHAR(1) CHARACTER SET utf8'), (mysql.MSChar, [1], {'charset':'utf8', 'binary':True}, 'CHAR(1) CHARACTER SET utf8 BINARY'), (mysql.MSChar, [1], {'charset':'utf8', 'unicode':True}, 'CHAR(1) CHARACTER SET utf8'), (mysql.MSChar, [1], {'charset':'utf8', 'ascii':True}, 'CHAR(1) CHARACTER SET utf8'), (mysql.MSChar, [1], {'collation': 'utf8_bin'}, 'CHAR(1) COLLATE utf8_bin'), (mysql.MSChar, [1], {'charset': 'utf8', 'collation': 'utf8_bin'}, 'CHAR(1) CHARACTER SET utf8 COLLATE utf8_bin'), (mysql.MSChar, [1], {'charset': 'utf8', 'binary': True}, 'CHAR(1) CHARACTER SET utf8 BINARY'), (mysql.MSChar, [1], {'charset': 'utf8', 'collation': 'utf8_bin', 'binary': True}, 'CHAR(1) CHARACTER SET utf8 COLLATE utf8_bin'), (mysql.MSChar, [1], {'national':True}, 'NATIONAL CHAR(1)'), (mysql.MSChar, [1], {'national':True, 'charset':'utf8'}, 'NATIONAL CHAR(1)'), (mysql.MSChar, [1], {'national':True, 'charset':'utf8', 'binary':True}, 'NATIONAL CHAR(1) BINARY'), (mysql.MSChar, [1], {'national':True, 'binary':True, 'unicode':True}, 'NATIONAL CHAR(1) BINARY'), (mysql.MSChar, [1], {'national':True, 'collation':'utf8_bin'}, 'NATIONAL CHAR(1) COLLATE utf8_bin'), (mysql.MSString, [1], {'charset':'utf8', 'collation':'utf8_bin'}, 'VARCHAR(1) CHARACTER SET utf8 COLLATE utf8_bin'), (mysql.MSString, [1], {'national':True, 'collation':'utf8_bin'}, 'NATIONAL VARCHAR(1) COLLATE utf8_bin'), (mysql.MSTinyText, [], {'charset':'utf8', 'collation':'utf8_bin'}, 'TINYTEXT CHARACTER SET utf8 COLLATE utf8_bin'), (mysql.MSMediumText, [], {'charset':'utf8', 'binary':True}, 'MEDIUMTEXT CHARACTER SET utf8 BINARY'), (mysql.MSLongText, [], {'ascii':True}, 'LONGTEXT ASCII'), (mysql.ENUM, ["foo", "bar"], {'unicode':True}, '''ENUM('foo','bar') UNICODE'''), (String, [20], {"collation":"utf8"}, 'VARCHAR(20) COLLATE utf8') ] for type_, args, kw, res in columns: self.assert_compile( type_(*args, **kw), res ) @testing.only_if('mysql') @testing.exclude('mysql', '<', (5, 0, 5), 'a 5.0+ feature') @testing.provide_metadata def test_charset_collate_table(self): t = Table('foo', self.metadata, Column('id', Integer), mysql_default_charset='utf8', mysql_collate='utf8_unicode_ci' ) t.create() m2 = MetaData(testing.db) t2 = Table('foo', m2, autoload=True) eq_(t2.kwargs['mysql_collate'], 'utf8_unicode_ci') eq_(t2.kwargs['mysql_default charset'], 'utf8') def test_bit_50(self): """Exercise BIT types on 5.0+ (not valid for all engine types)""" for type_, expected in [ (mysql.MSBit(), "BIT"), (mysql.MSBit(1), "BIT(1)"), (mysql.MSBit(63), "BIT(63)"), ]: self.assert_compile(type_, expected) @testing.only_if('mysql') @testing.exclude('mysql', '<', (5, 0, 5), 'a 5.0+ feature') @testing.fails_on('mysql+oursql', 'some round trips fail, oursql bug ?') @testing.provide_metadata def test_bit_50_roundtrip(self): bit_table = Table('mysql_bits', self.metadata, Column('b1', mysql.MSBit), Column('b2', mysql.MSBit()), Column('b3', mysql.MSBit(), nullable=False), Column('b4', mysql.MSBit(1)), Column('b5', mysql.MSBit(8)), Column('b6', mysql.MSBit(32)), Column('b7', mysql.MSBit(63)), Column('b8', mysql.MSBit(64))) self.metadata.create_all() meta2 = MetaData(testing.db) reflected = Table('mysql_bits', meta2, autoload=True) for table in bit_table, reflected: def roundtrip(store, expected=None): expected = expected or store table.insert(store).execute() row = table.select().execute().first() try: self.assert_(list(row) == expected) except: print("Storing %s" % store) print("Expected %s" % expected) print("Found %s" % list(row)) raise table.delete().execute().close() roundtrip([0] * 8) roundtrip([None, None, 0, None, None, None, None, None]) roundtrip([1] * 8) roundtrip([sql.text("b'1'")] * 8, [1] * 8) i = 255 roundtrip([0, 0, 0, 0, i, i, i, i]) i = 2 ** 32 - 1 roundtrip([0, 0, 0, 0, 0, i, i, i]) i = 2 ** 63 - 1 roundtrip([0, 0, 0, 0, 0, 0, i, i]) i = 2 ** 64 - 1 roundtrip([0, 0, 0, 0, 0, 0, 0, i]) def test_boolean(self): for type_, expected in [ (BOOLEAN(), "BOOL"), (Boolean(), "BOOL"), (mysql.TINYINT(1), "TINYINT(1)"), (mysql.TINYINT(1, unsigned=True), "TINYINT(1) UNSIGNED") ]: self.assert_compile(type_, expected) @testing.only_if('mysql') @testing.provide_metadata def test_boolean_roundtrip(self): bool_table = Table( 'mysql_bool', self.metadata, Column('b1', BOOLEAN), Column('b2', Boolean), Column('b3', mysql.MSTinyInteger(1)), Column('b4', mysql.MSTinyInteger(1, unsigned=True)), Column('b5', mysql.MSTinyInteger), ) self.metadata.create_all() table = bool_table def roundtrip(store, expected=None): expected = expected or store table.insert(store).execute() row = table.select().execute().first() self.assert_(list(row) == expected) for i, val in enumerate(expected): if isinstance(val, bool): self.assert_(val is row[i]) table.delete().execute() roundtrip([None, None, None, None, None]) roundtrip([True, True, 1, 1, 1]) roundtrip([False, False, 0, 0, 0]) roundtrip([True, True, True, True, True], [True, True, 1, 1, 1]) roundtrip([False, False, 0, 0, 0], [False, False, 0, 0, 0]) meta2 = MetaData(testing.db) table = Table('mysql_bool', meta2, autoload=True) eq_(colspec(table.c.b3), 'b3 TINYINT(1)') eq_(colspec(table.c.b4), 'b4 TINYINT(1) UNSIGNED') meta2 = MetaData(testing.db) table = Table( 'mysql_bool', meta2, Column('b1', BOOLEAN), Column('b2', Boolean), Column('b3', BOOLEAN), Column('b4', BOOLEAN), autoload=True, ) eq_(colspec(table.c.b3), 'b3 BOOL') eq_(colspec(table.c.b4), 'b4 BOOL') roundtrip([None, None, None, None, None]) roundtrip([True, True, 1, 1, 1], [True, True, True, True, 1]) roundtrip([False, False, 0, 0, 0], [False, False, False, False, 0]) roundtrip([True, True, True, True, True], [True, True, True, True, 1]) roundtrip([False, False, 0, 0, 0], [False, False, False, False, 0]) def test_timestamp(self): """Exercise funky TIMESTAMP default syntax.""" columns = [ ([TIMESTAMP], 'TIMESTAMP NULL'), ([mysql.MSTimeStamp], 'TIMESTAMP NULL'), ([mysql.MSTimeStamp, DefaultClause(sql.text('CURRENT_TIMESTAMP'))], "TIMESTAMP DEFAULT CURRENT_TIMESTAMP"), ([mysql.MSTimeStamp, DefaultClause(sql.text("'1999-09-09 09:09:09'"))], "TIMESTAMP DEFAULT '1999-09-09 09:09:09'"), ([mysql.MSTimeStamp, DefaultClause(sql.text("'1999-09-09 09:09:09' " "ON UPDATE CURRENT_TIMESTAMP"))], "TIMESTAMP DEFAULT '1999-09-09 09:09:09' " "ON UPDATE CURRENT_TIMESTAMP"), ([mysql.MSTimeStamp, DefaultClause(sql.text("CURRENT_TIMESTAMP " "ON UPDATE CURRENT_TIMESTAMP"))], "TIMESTAMP DEFAULT CURRENT_TIMESTAMP " "ON UPDATE CURRENT_TIMESTAMP"), ] for spec, expected in columns: c = Column('t', *spec) Table('t', MetaData(), c) self.assert_compile( schema.CreateColumn(c), "t %s" % expected ) @testing.only_if('mysql') @testing.provide_metadata def test_timestamp_nullable(self): ts_table = Table('mysql_timestamp', self.metadata, Column('t1', TIMESTAMP), Column('t2', TIMESTAMP, nullable=False), ) self.metadata.create_all() now = testing.db.execute("select now()").scalar() # TIMESTAMP without NULL inserts current time when passed # NULL. when not passed, generates 0000-00-00 quite # annoyingly. ts_table.insert().execute({'t1': now, 't2': None}) ts_table.insert().execute({'t1': None, 't2': None}) # normalize dates that are over the second boundary def normalize(dt): if dt is None: return None elif (dt - now).seconds < 5: return now else: return dt eq_( [tuple([normalize(dt) for dt in row]) for row in ts_table.select().execute()], [(now, now), (None, now)] ) def test_time(self): """"Exercise TIME.""" self.assert_compile( mysql.TIME(), "TIME" ) self.assert_compile( mysql.TIME(fsp=5), "TIME(5)" ) eq_( mysql.TIME().result_processor(None, None)( datetime.timedelta(seconds=35, minutes=517, microseconds=450 )), datetime.time(8, 37, 35, 450) ) @testing.only_if('mysql') @testing.provide_metadata def test_year(self): """Exercise YEAR.""" year_table = Table('mysql_year', self.metadata, Column('y1', mysql.MSYear), Column('y2', mysql.MSYear), Column('y3', mysql.MSYear), Column('y5', mysql.MSYear(4))) for col in year_table.c: self.assert_(repr(col)) year_table.create() reflected = Table('mysql_year', MetaData(testing.db), autoload=True) for table in year_table, reflected: table.insert(['1950', '50', None, 1950]).execute() row = table.select().execute().first() eq_(list(row), [1950, 2050, None, 1950]) table.delete().execute() self.assert_(colspec(table.c.y1).startswith('y1 YEAR')) eq_(colspec(table.c.y5), 'y5 YEAR(4)') @testing.only_if('mysql') @testing.provide_metadata def test_set(self): """Exercise the SET type.""" set_table = Table('mysql_set', self.metadata, Column('s1', mysql.MSSet("'dq'", "'sq'")), Column('s2', mysql.MSSet("'a'")), Column('s3', mysql.MSSet("'5'", "'7'", "'9'"))) eq_(colspec(set_table.c.s1), "s1 SET('dq','sq')") eq_(colspec(set_table.c.s2), "s2 SET('a')") eq_(colspec(set_table.c.s3), "s3 SET('5','7','9')") set_table.create() reflected = Table('mysql_set', MetaData(testing.db), autoload=True) for table in set_table, reflected: def roundtrip(store, expected=None): expected = expected or store table.insert(store).execute() row = table.select().execute().first() self.assert_(list(row) == expected) table.delete().execute() roundtrip([None, None, None], [None] * 3) roundtrip(['', '', ''], [set([''])] * 3) roundtrip([set(['dq']), set(['a']), set(['5'])]) roundtrip(['dq', 'a', '5'], [set(['dq']), set(['a']), set(['5'])]) roundtrip([1, 1, 1], [set(['dq']), set(['a']), set(['5' ])]) roundtrip([set(['dq', 'sq']), None, set(['9', '5', '7' ])]) set_table.insert().execute({'s3': set(['5'])}, {'s3': set(['5', '7'])}, {'s3': set(['5', '7', '9'])}, {'s3': set(['7', '9'])}) # NOTE: the string sent to MySQL here is sensitive to ordering. # for some reason the set ordering is always "5, 7" when we test on # MySQLdb but in Py3K this is not guaranteed. So basically our # SET type doesn't do ordering correctly (not sure how it can, # as we don't know how the SET was configured in the first place.) rows = select([set_table.c.s3], set_table.c.s3.in_([set(['5']), ['5', '7']]) ).execute().fetchall() found = set([frozenset(row[0]) for row in rows]) eq_(found, set([frozenset(['5']), frozenset(['5', '7'])])) class EnumTest(fixtures.TestBase, AssertsExecutionResults, AssertsCompiledSQL): __only_on__ = 'mysql' __dialect__ = mysql.dialect() @testing.uses_deprecated('Manually quoting ENUM value literals') @testing.provide_metadata def test_enum(self): """Exercise the ENUM type.""" enum_table = Table('mysql_enum', self.metadata, Column('e1', mysql.ENUM("'a'", "'b'")), Column('e2', mysql.ENUM("'a'", "'b'"), nullable=False), Column('e2generic', Enum("a", "b"), nullable=False), Column('e3', mysql.ENUM("'a'", "'b'", strict=True)), Column('e4', mysql.ENUM("'a'", "'b'", strict=True), nullable=False), Column('e5', mysql.ENUM("a", "b")), Column('e5generic', Enum("a", "b")), Column('e6', mysql.ENUM("'a'", "b")), ) eq_(colspec(enum_table.c.e1), "e1 ENUM('a','b')") eq_(colspec(enum_table.c.e2), "e2 ENUM('a','b') NOT NULL") eq_(colspec(enum_table.c.e2generic), "e2generic ENUM('a','b') NOT NULL") eq_(colspec(enum_table.c.e3), "e3 ENUM('a','b')") eq_(colspec(enum_table.c.e4), "e4 ENUM('a','b') NOT NULL") eq_(colspec(enum_table.c.e5), "e5 ENUM('a','b')") eq_(colspec(enum_table.c.e5generic), "e5generic ENUM('a','b')") eq_(colspec(enum_table.c.e6), "e6 ENUM('''a''','b')") enum_table.create() assert_raises(exc.DBAPIError, enum_table.insert().execute, e1=None, e2=None, e3=None, e4=None) assert_raises(exc.StatementError, enum_table.insert().execute, e1='c', e2='c', e2generic='c', e3='c', e4='c', e5='c', e5generic='c', e6='c') enum_table.insert().execute() enum_table.insert().execute(e1='a', e2='a', e2generic='a', e3='a', e4='a', e5='a', e5generic='a', e6="'a'") enum_table.insert().execute(e1='b', e2='b', e2generic='b', e3='b', e4='b', e5='b', e5generic='b', e6='b') res = enum_table.select().execute().fetchall() expected = [(None, 'a', 'a', None, 'a', None, None, None), ('a', 'a', 'a', 'a', 'a', 'a', 'a', "'a'"), ('b', 'b', 'b', 'b', 'b', 'b', 'b', 'b')] eq_(res, expected) def test_unicode_enum(self): unicode_engine = utf8_engine() metadata = MetaData(unicode_engine) t1 = Table('table', metadata, Column('id', Integer, primary_key=True), Column('value', Enum(u('réveillé'), u('drôle'), u('S’il'))), Column('value2', mysql.ENUM(u('réveillé'), u('drôle'), u('S’il'))) ) metadata.create_all() try: t1.insert().execute(value=u('drôle'), value2=u('drôle')) t1.insert().execute(value=u('réveillé'), value2=u('réveillé')) t1.insert().execute(value=u('S’il'), value2=u('S’il')) eq_(t1.select().order_by(t1.c.id).execute().fetchall(), [(1, u('drôle'), u('drôle')), (2, u('réveillé'), u('réveillé')), (3, u('S’il'), u('S’il'))] ) # test reflection of the enum labels m2 = MetaData(testing.db) t2 = Table('table', m2, autoload=True) # TODO: what's wrong with the last element ? is there # latin-1 stuff forcing its way in ? assert t2.c.value.type.enums[0:2] == \ (u('réveillé'), u('drôle')) # u'S’il') # eh ? assert t2.c.value2.type.enums[0:2] == \ (u('réveillé'), u('drôle')) # u'S’il') # eh ? finally: metadata.drop_all() def test_enum_compile(self): e1 = Enum('x', 'y', 'z', name='somename') t1 = Table('sometable', MetaData(), Column('somecolumn', e1)) self.assert_compile(schema.CreateTable(t1), "CREATE TABLE sometable (somecolumn " "ENUM('x','y','z'))") t1 = Table('sometable', MetaData(), Column('somecolumn', Enum('x', 'y', 'z', native_enum=False))) self.assert_compile(schema.CreateTable(t1), "CREATE TABLE sometable (somecolumn " "VARCHAR(1), CHECK (somecolumn IN ('x', " "'y', 'z')))") @testing.exclude('mysql', '<', (4,), "3.23 can't handle an ENUM of ''") @testing.uses_deprecated('Manually quoting ENUM value literals') def test_enum_parse(self): """More exercises for the ENUM type.""" # MySQL 3.23 can't handle an ENUM of ''.... enum_table = Table('mysql_enum', MetaData(testing.db), Column('e1', mysql.ENUM("'a'")), Column('e2', mysql.ENUM("''")), Column('e3', mysql.ENUM('a')), Column('e4', mysql.ENUM('')), Column('e5', mysql.ENUM("'a'", "''")), Column('e6', mysql.ENUM("''", "'a'")), Column('e7', mysql.ENUM("''", "'''a'''", "'b''b'", "''''"))) for col in enum_table.c: self.assert_(repr(col)) try: enum_table.create() reflected = Table('mysql_enum', MetaData(testing.db), autoload=True) for t in enum_table, reflected: eq_(t.c.e1.type.enums, ("a",)) eq_(t.c.e2.type.enums, ("",)) eq_(t.c.e3.type.enums, ("a",)) eq_(t.c.e4.type.enums, ("",)) eq_(t.c.e5.type.enums, ("a", "")) eq_(t.c.e6.type.enums, ("", "a")) eq_(t.c.e7.type.enums, ("", "'a'", "b'b", "'")) finally: enum_table.drop() def colspec(c): return testing.db.dialect.ddl_compiler( testing.db.dialect, None).get_column_specification(c) SQLAlchemy-0.8.4/test/dialect/postgresql/0000755000076500000240000000000012251151573021022 5ustar classicstaff00000000000000SQLAlchemy-0.8.4/test/dialect/postgresql/__init__.py0000644000076500000240000000000012251147172023121 0ustar classicstaff00000000000000SQLAlchemy-0.8.4/test/dialect/postgresql/test_compiler.py0000644000076500000240000005177212251150015024247 0ustar classicstaff00000000000000# coding: utf-8 from sqlalchemy.testing.assertions import AssertsCompiledSQL, is_, assert_raises from sqlalchemy.testing import engines, fixtures from sqlalchemy import testing import datetime from sqlalchemy import Table, Column, select, MetaData, text, Integer, \ String, Sequence, ForeignKey, join, Numeric, \ PrimaryKeyConstraint, DateTime, tuple_, Float, BigInteger, \ func, literal_column, literal, bindparam, cast, extract, \ SmallInteger, Enum, REAL, update, insert, Index, delete, \ and_, Date, TypeDecorator, Time, Unicode, Interval, or_, Text from sqlalchemy.dialects.postgresql import ExcludeConstraint, array from sqlalchemy import exc, schema from sqlalchemy.dialects.postgresql import base as postgresql from sqlalchemy.dialects.postgresql import TSRANGE from sqlalchemy.orm import mapper, aliased, Session from sqlalchemy.sql import table, column, operators class SequenceTest(fixtures.TestBase, AssertsCompiledSQL): def test_format(self): seq = Sequence('my_seq_no_schema') dialect = postgresql.PGDialect() assert dialect.identifier_preparer.format_sequence(seq) \ == 'my_seq_no_schema' seq = Sequence('my_seq', schema='some_schema') assert dialect.identifier_preparer.format_sequence(seq) \ == 'some_schema.my_seq' seq = Sequence('My_Seq', schema='Some_Schema') assert dialect.identifier_preparer.format_sequence(seq) \ == '"Some_Schema"."My_Seq"' @testing.only_on('postgresql', 'foo') @testing.provide_metadata def test_reverse_eng_name(self): metadata = self.metadata engine = engines.testing_engine(options=dict(implicit_returning=False)) for tname, cname in [ ('tb1' * 30, 'abc'), ('tb2', 'abc' * 30), ('tb3' * 30, 'abc' * 30), ('tb4', 'abc'), ]: t = Table(tname[:57], metadata, Column(cname[:57], Integer, primary_key=True) ) t.create(engine) r = engine.execute(t.insert()) assert r.inserted_primary_key == [1] class CompileTest(fixtures.TestBase, AssertsCompiledSQL): __dialect__ = postgresql.dialect() def test_update_returning(self): dialect = postgresql.dialect() table1 = table('mytable', column('myid', Integer), column('name' , String(128)), column('description', String(128))) u = update(table1, values=dict(name='foo' )).returning(table1.c.myid, table1.c.name) self.assert_compile(u, 'UPDATE mytable SET name=%(name)s ' 'RETURNING mytable.myid, mytable.name', dialect=dialect) u = update(table1, values=dict(name='foo')).returning(table1) self.assert_compile(u, 'UPDATE mytable SET name=%(name)s ' 'RETURNING mytable.myid, mytable.name, ' 'mytable.description', dialect=dialect) u = update(table1, values=dict(name='foo' )).returning(func.length(table1.c.name)) self.assert_compile(u, 'UPDATE mytable SET name=%(name)s ' 'RETURNING length(mytable.name) AS length_1' , dialect=dialect) def test_insert_returning(self): dialect = postgresql.dialect() table1 = table('mytable', column('myid', Integer), column('name', String(128)), column('description', String(128)), ) i = insert(table1, values=dict(name='foo' )).returning(table1.c.myid, table1.c.name) self.assert_compile(i, 'INSERT INTO mytable (name) VALUES ' '(%(name)s) RETURNING mytable.myid, ' 'mytable.name', dialect=dialect) i = insert(table1, values=dict(name='foo')).returning(table1) self.assert_compile(i, 'INSERT INTO mytable (name) VALUES ' '(%(name)s) RETURNING mytable.myid, ' 'mytable.name, mytable.description', dialect=dialect) i = insert(table1, values=dict(name='foo' )).returning(func.length(table1.c.name)) self.assert_compile(i, 'INSERT INTO mytable (name) VALUES ' '(%(name)s) RETURNING length(mytable.name) ' 'AS length_1', dialect=dialect) def test_create_partial_index(self): m = MetaData() tbl = Table('testtbl', m, Column('data', Integer)) idx = Index('test_idx1', tbl.c.data, postgresql_where=and_(tbl.c.data > 5, tbl.c.data < 10)) idx = Index('test_idx1', tbl.c.data, postgresql_where=and_(tbl.c.data > 5, tbl.c.data < 10)) # test quoting and all that idx2 = Index('test_idx2', tbl.c.data, postgresql_where=and_(tbl.c.data > 'a', tbl.c.data < "b's")) self.assert_compile(schema.CreateIndex(idx), 'CREATE INDEX test_idx1 ON testtbl (data) ' 'WHERE data > 5 AND data < 10', dialect=postgresql.dialect()) self.assert_compile(schema.CreateIndex(idx2), "CREATE INDEX test_idx2 ON testtbl (data) " "WHERE data > 'a' AND data < 'b''s'", dialect=postgresql.dialect()) def test_create_index_with_ops(self): m = MetaData() tbl = Table('testtbl', m, Column('data', String), Column('data2', Integer, key='d2')) idx = Index('test_idx1', tbl.c.data, postgresql_ops={'data': 'text_pattern_ops'}) idx2 = Index('test_idx2', tbl.c.data, tbl.c.d2, postgresql_ops={'data': 'text_pattern_ops', 'd2': 'int4_ops'}) self.assert_compile(schema.CreateIndex(idx), 'CREATE INDEX test_idx1 ON testtbl ' '(data text_pattern_ops)', dialect=postgresql.dialect()) self.assert_compile(schema.CreateIndex(idx2), 'CREATE INDEX test_idx2 ON testtbl ' '(data text_pattern_ops, data2 int4_ops)', dialect=postgresql.dialect()) def test_create_index_with_using(self): m = MetaData() tbl = Table('testtbl', m, Column('data', String)) idx1 = Index('test_idx1', tbl.c.data) idx2 = Index('test_idx2', tbl.c.data, postgresql_using='btree') idx3 = Index('test_idx3', tbl.c.data, postgresql_using='hash') self.assert_compile(schema.CreateIndex(idx1), 'CREATE INDEX test_idx1 ON testtbl ' '(data)', dialect=postgresql.dialect()) self.assert_compile(schema.CreateIndex(idx2), 'CREATE INDEX test_idx2 ON testtbl ' 'USING btree (data)', dialect=postgresql.dialect()) self.assert_compile(schema.CreateIndex(idx3), 'CREATE INDEX test_idx3 ON testtbl ' 'USING hash (data)', dialect=postgresql.dialect()) def test_create_index_expr_gets_parens(self): m = MetaData() tbl = Table('testtbl', m, Column('x', Integer), Column('y', Integer)) idx1 = Index('test_idx1', 5 / (tbl.c.x + tbl.c.y)) self.assert_compile( schema.CreateIndex(idx1), "CREATE INDEX test_idx1 ON testtbl ((5 / (x + y)))" ) def test_create_index_literals(self): m = MetaData() tbl = Table('testtbl', m, Column('data', Integer)) idx1 = Index('test_idx1', tbl.c.data + 5) self.assert_compile( schema.CreateIndex(idx1), "CREATE INDEX test_idx1 ON testtbl ((data + 5))" ) def test_exclude_constraint_min(self): m = MetaData() tbl = Table('testtbl', m, Column('room', Integer, primary_key=True)) cons = ExcludeConstraint(('room', '=')) tbl.append_constraint(cons) self.assert_compile(schema.AddConstraint(cons), 'ALTER TABLE testtbl ADD EXCLUDE USING gist ' '(room WITH =)', dialect=postgresql.dialect()) def test_exclude_constraint_full(self): m = MetaData() room = Column('room', Integer, primary_key=True) tbl = Table('testtbl', m, room, Column('during', TSRANGE)) room = Column('room', Integer, primary_key=True) cons = ExcludeConstraint((room, '='), ('during', '&&'), name='my_name', using='gist', where="room > 100", deferrable=True, initially='immediate') tbl.append_constraint(cons) self.assert_compile(schema.AddConstraint(cons), 'ALTER TABLE testtbl ADD CONSTRAINT my_name ' 'EXCLUDE USING gist ' '(room WITH =, during WITH ''&&) WHERE ' '(room > 100) DEFERRABLE INITIALLY immediate', dialect=postgresql.dialect()) def test_exclude_constraint_copy(self): m = MetaData() cons = ExcludeConstraint(('room', '=')) tbl = Table('testtbl', m, Column('room', Integer, primary_key=True), cons) # apparently you can't copy a ColumnCollectionConstraint until # after it has been bound to a table... cons_copy = cons.copy() tbl.append_constraint(cons_copy) self.assert_compile(schema.AddConstraint(cons_copy), 'ALTER TABLE testtbl ADD EXCLUDE USING gist ' '(room WITH =)', dialect=postgresql.dialect()) def test_substring(self): self.assert_compile(func.substring('abc', 1, 2), 'SUBSTRING(%(substring_1)s FROM %(substring_2)s ' 'FOR %(substring_3)s)') self.assert_compile(func.substring('abc', 1), 'SUBSTRING(%(substring_1)s FROM %(substring_2)s)') def test_reserved_words(self): table = Table("pg_table", MetaData(), Column("col1", Integer), Column("variadic", Integer)) x = select([table.c.col1, table.c.variadic]) self.assert_compile(x, '''SELECT pg_table.col1, pg_table."variadic" FROM pg_table''') def test_array(self): c = Column('x', postgresql.ARRAY(Integer)) self.assert_compile( cast(c, postgresql.ARRAY(Integer)), "CAST(x AS INTEGER[])" ) self.assert_compile( c[5], "x[%(x_1)s]", checkparams={'x_1': 5} ) self.assert_compile( c[5:7], "x[%(x_1)s:%(x_2)s]", checkparams={'x_2': 7, 'x_1': 5} ) self.assert_compile( c[5:7][2:3], "x[%(x_1)s:%(x_2)s][%(param_1)s:%(param_2)s]", checkparams={'x_2': 7, 'x_1': 5, 'param_1':2, 'param_2':3} ) self.assert_compile( c[5:7][3], "x[%(x_1)s:%(x_2)s][%(param_1)s]", checkparams={'x_2': 7, 'x_1': 5, 'param_1':3} ) self.assert_compile( c.contains([1]), 'x @> %(x_1)s', checkparams={'x_1': [1]} ) self.assert_compile( c.contained_by([2]), 'x <@ %(x_1)s', checkparams={'x_1': [2]} ) self.assert_compile( c.overlap([3]), 'x && %(x_1)s', checkparams={'x_1': [3]} ) self.assert_compile( postgresql.Any(4, c), '%(param_1)s = ANY (x)', checkparams={'param_1': 4} ) self.assert_compile( c.any(5, operator=operators.ne), '%(param_1)s != ANY (x)', checkparams={'param_1': 5} ) self.assert_compile( postgresql.All(6, c, operator=operators.gt), '%(param_1)s > ALL (x)', checkparams={'param_1': 6} ) self.assert_compile( c.all(7, operator=operators.lt), '%(param_1)s < ALL (x)', checkparams={'param_1': 7} ) def test_array_literal_type(self): is_(postgresql.array([1, 2]).type._type_affinity, postgresql.ARRAY) is_(postgresql.array([1, 2]).type.item_type._type_affinity, Integer) is_(postgresql.array([1, 2], type_=String). type.item_type._type_affinity, String) def test_array_literal(self): self.assert_compile( func.array_dims(postgresql.array([1, 2]) + postgresql.array([3, 4, 5])), "array_dims(ARRAY[%(param_1)s, %(param_2)s] || " "ARRAY[%(param_3)s, %(param_4)s, %(param_5)s])", checkparams={'param_5': 5, 'param_4': 4, 'param_1': 1, 'param_3': 3, 'param_2': 2} ) def test_array_literal_insert(self): m = MetaData() t = Table('t', m, Column('data', postgresql.ARRAY(Integer))) self.assert_compile( t.insert().values(data=array([1, 2, 3])), "INSERT INTO t (data) VALUES (ARRAY[%(param_1)s, " "%(param_2)s, %(param_3)s])" ) def test_update_array_element(self): m = MetaData() t = Table('t', m, Column('data', postgresql.ARRAY(Integer))) self.assert_compile( t.update().values({t.c.data[5]: 1}), "UPDATE t SET data[%(data_1)s]=%(param_1)s", checkparams={'data_1': 5, 'param_1': 1} ) def test_update_array_slice(self): m = MetaData() t = Table('t', m, Column('data', postgresql.ARRAY(Integer))) self.assert_compile( t.update().values({t.c.data[2:5]: 2}), "UPDATE t SET data[%(data_1)s:%(data_2)s]=%(param_1)s", checkparams={'param_1': 2, 'data_2': 5, 'data_1': 2} ) def test_from_only(self): m = MetaData() tbl1 = Table('testtbl1', m, Column('id', Integer)) tbl2 = Table('testtbl2', m, Column('id', Integer)) stmt = tbl1.select().with_hint(tbl1, 'ONLY', 'postgresql') expected = 'SELECT testtbl1.id FROM ONLY testtbl1' self.assert_compile(stmt, expected) talias1 = tbl1.alias('foo') stmt = talias1.select().with_hint(talias1, 'ONLY', 'postgresql') expected = 'SELECT foo.id FROM ONLY testtbl1 AS foo' self.assert_compile(stmt, expected) stmt = select([tbl1, tbl2]).with_hint(tbl1, 'ONLY', 'postgresql') expected = ('SELECT testtbl1.id, testtbl2.id FROM ONLY testtbl1, ' 'testtbl2') self.assert_compile(stmt, expected) stmt = select([tbl1, tbl2]).with_hint(tbl2, 'ONLY', 'postgresql') expected = ('SELECT testtbl1.id, testtbl2.id FROM testtbl1, ONLY ' 'testtbl2') self.assert_compile(stmt, expected) stmt = select([tbl1, tbl2]) stmt = stmt.with_hint(tbl1, 'ONLY', 'postgresql') stmt = stmt.with_hint(tbl2, 'ONLY', 'postgresql') expected = ('SELECT testtbl1.id, testtbl2.id FROM ONLY testtbl1, ' 'ONLY testtbl2') self.assert_compile(stmt, expected) stmt = update(tbl1, values=dict(id=1)) stmt = stmt.with_hint('ONLY', dialect_name='postgresql') expected = 'UPDATE ONLY testtbl1 SET id=%(id)s' self.assert_compile(stmt, expected) stmt = delete(tbl1).with_hint('ONLY', selectable=tbl1, dialect_name='postgresql') expected = 'DELETE FROM ONLY testtbl1' self.assert_compile(stmt, expected) tbl3 = Table('testtbl3', m, Column('id', Integer), schema='testschema') stmt = tbl3.select().with_hint(tbl3, 'ONLY', 'postgresql') expected = 'SELECT testschema.testtbl3.id FROM ONLY testschema.testtbl3' self.assert_compile(stmt, expected) assert_raises( exc.CompileError, tbl3.select().with_hint(tbl3, "FAKE", "postgresql").compile, dialect=postgresql.dialect() ) class DistinctOnTest(fixtures.TestBase, AssertsCompiledSQL): """Test 'DISTINCT' with SQL expression language and orm.Query with an emphasis on PG's 'DISTINCT ON' syntax. """ __dialect__ = postgresql.dialect() def setup(self): self.table = Table('t', MetaData(), Column('id',Integer, primary_key=True), Column('a', String), Column('b', String), ) def test_plain_generative(self): self.assert_compile( select([self.table]).distinct(), "SELECT DISTINCT t.id, t.a, t.b FROM t" ) def test_on_columns_generative(self): self.assert_compile( select([self.table]).distinct(self.table.c.a), "SELECT DISTINCT ON (t.a) t.id, t.a, t.b FROM t" ) def test_on_columns_generative_multi_call(self): self.assert_compile( select([self.table]).distinct(self.table.c.a). distinct(self.table.c.b), "SELECT DISTINCT ON (t.a, t.b) t.id, t.a, t.b FROM t" ) def test_plain_inline(self): self.assert_compile( select([self.table], distinct=True), "SELECT DISTINCT t.id, t.a, t.b FROM t" ) def test_on_columns_inline_list(self): self.assert_compile( select([self.table], distinct=[self.table.c.a, self.table.c.b]). order_by(self.table.c.a, self.table.c.b), "SELECT DISTINCT ON (t.a, t.b) t.id, " "t.a, t.b FROM t ORDER BY t.a, t.b" ) def test_on_columns_inline_scalar(self): self.assert_compile( select([self.table], distinct=self.table.c.a), "SELECT DISTINCT ON (t.a) t.id, t.a, t.b FROM t" ) def test_query_plain(self): sess = Session() self.assert_compile( sess.query(self.table).distinct(), "SELECT DISTINCT t.id AS t_id, t.a AS t_a, " "t.b AS t_b FROM t" ) def test_query_on_columns(self): sess = Session() self.assert_compile( sess.query(self.table).distinct(self.table.c.a), "SELECT DISTINCT ON (t.a) t.id AS t_id, t.a AS t_a, " "t.b AS t_b FROM t" ) def test_query_on_columns_multi_call(self): sess = Session() self.assert_compile( sess.query(self.table).distinct(self.table.c.a). distinct(self.table.c.b), "SELECT DISTINCT ON (t.a, t.b) t.id AS t_id, t.a AS t_a, " "t.b AS t_b FROM t" ) def test_query_on_columns_subquery(self): sess = Session() class Foo(object): pass mapper(Foo, self.table) sess = Session() self.assert_compile( sess.query(Foo).from_self().distinct(Foo.a, Foo.b), "SELECT DISTINCT ON (anon_1.t_a, anon_1.t_b) anon_1.t_id " "AS anon_1_t_id, anon_1.t_a AS anon_1_t_a, anon_1.t_b " "AS anon_1_t_b FROM (SELECT t.id AS t_id, t.a AS t_a, " "t.b AS t_b FROM t) AS anon_1" ) def test_query_distinct_on_aliased(self): class Foo(object): pass mapper(Foo, self.table) a1 = aliased(Foo) sess = Session() self.assert_compile( sess.query(a1).distinct(a1.a), "SELECT DISTINCT ON (t_1.a) t_1.id AS t_1_id, " "t_1.a AS t_1_a, t_1.b AS t_1_b FROM t AS t_1" ) def test_distinct_on_subquery_anon(self): sq = select([self.table]).alias() q = select([self.table.c.id,sq.c.id]).\ distinct(sq.c.id).\ where(self.table.c.id==sq.c.id) self.assert_compile( q, "SELECT DISTINCT ON (anon_1.id) t.id, anon_1.id " "FROM t, (SELECT t.id AS id, t.a AS a, t.b " "AS b FROM t) AS anon_1 WHERE t.id = anon_1.id" ) def test_distinct_on_subquery_named(self): sq = select([self.table]).alias('sq') q = select([self.table.c.id,sq.c.id]).\ distinct(sq.c.id).\ where(self.table.c.id==sq.c.id) self.assert_compile( q, "SELECT DISTINCT ON (sq.id) t.id, sq.id " "FROM t, (SELECT t.id AS id, t.a AS a, " "t.b AS b FROM t) AS sq WHERE t.id = sq.id" ) SQLAlchemy-0.8.4/test/dialect/postgresql/test_dialect.py0000644000076500000240000002125512251150015024033 0ustar classicstaff00000000000000# coding: utf-8 from sqlalchemy.testing.assertions import eq_, assert_raises, \ assert_raises_message, AssertsExecutionResults, \ AssertsCompiledSQL from sqlalchemy.testing import engines, fixtures from sqlalchemy import testing import datetime from sqlalchemy import Table, Column, select, MetaData, text, Integer, \ String, Sequence, ForeignKey, join, Numeric, \ PrimaryKeyConstraint, DateTime, tuple_, Float, BigInteger, \ func, literal_column, literal, bindparam, cast, extract, \ SmallInteger, Enum, REAL, update, insert, Index, delete, \ and_, Date, TypeDecorator, Time, Unicode, Interval, or_, Text from sqlalchemy import exc, schema from sqlalchemy.dialects.postgresql import base as postgresql import logging import logging.handlers from sqlalchemy.testing.mock import Mock class MiscTest(fixtures.TestBase, AssertsExecutionResults, AssertsCompiledSQL): __only_on__ = 'postgresql' @testing.provide_metadata def test_date_reflection(self): metadata = self.metadata t1 = Table('pgdate', metadata, Column('date1', DateTime(timezone=True)), Column('date2', DateTime(timezone=False))) metadata.create_all() m2 = MetaData(testing.db) t2 = Table('pgdate', m2, autoload=True) assert t2.c.date1.type.timezone is True assert t2.c.date2.type.timezone is False @testing.fails_on('+zxjdbc', 'The JDBC driver handles the version parsing') def test_version_parsing(self): def mock_conn(res): return Mock( execute=Mock( return_value=Mock(scalar=Mock(return_value=res)) ) ) for string, version in \ [('PostgreSQL 8.3.8 on i686-redhat-linux-gnu, compiled by ' 'GCC gcc (GCC) 4.1.2 20070925 (Red Hat 4.1.2-33)', (8, 3, 8)), ('PostgreSQL 8.5devel on x86_64-unknown-linux-gnu, ' 'compiled by GCC gcc (GCC) 4.4.2, 64-bit', (8, 5)), ('EnterpriseDB 9.1.2.2 on x86_64-unknown-linux-gnu, ' 'compiled by gcc (GCC) 4.1.2 20080704 (Red Hat 4.1.2-50), ' '64-bit', (9, 1, 2)), ('[PostgreSQL 9.2.4 ] VMware vFabric Postgres 9.2.4.0 ' 'release build 1080137', (9, 2, 4)) ]: eq_(testing.db.dialect._get_server_version_info(mock_conn(string)), version) @testing.only_on('postgresql+psycopg2', 'psycopg2-specific feature') def test_psycopg2_version(self): v = testing.db.dialect.psycopg2_version assert testing.db.dialect.dbapi.__version__.\ startswith(".".join(str(x) for x in v)) @testing.only_on('postgresql+psycopg2', 'psycopg2-specific feature') def test_notice_logging(self): log = logging.getLogger('sqlalchemy.dialects.postgresql') buf = logging.handlers.BufferingHandler(100) lev = log.level log.addHandler(buf) log.setLevel(logging.INFO) try: conn = testing.db.connect() trans = conn.begin() try: conn.execute('create table foo (id serial primary key)') finally: trans.rollback() finally: log.removeHandler(buf) log.setLevel(lev) msgs = ' '.join(b.msg for b in buf.buffer) assert 'will create implicit sequence' in msgs assert 'will create implicit index' in msgs @testing.only_on('postgresql+psycopg2', 'psycopg2-specific feature') @engines.close_open_connections def test_client_encoding(self): c = testing.db.connect() current_encoding = c.connection.connection.encoding c.close() # attempt to use an encoding that's not # already set if current_encoding == 'UTF8': test_encoding = 'LATIN1' else: test_encoding = 'UTF8' e = engines.testing_engine( options={'client_encoding':test_encoding} ) c = e.connect() eq_(c.connection.connection.encoding, test_encoding) @testing.only_on('postgresql+psycopg2', 'psycopg2-specific feature') @engines.close_open_connections def test_autocommit_isolation_level(self): extensions = __import__('psycopg2.extensions').extensions c = testing.db.connect() c = c.execution_options(isolation_level='AUTOCOMMIT') eq_(c.connection.connection.isolation_level, extensions.ISOLATION_LEVEL_AUTOCOMMIT) @testing.fails_on('+zxjdbc', "Can't infer the SQL type to use for an instance " "of org.python.core.PyObjectDerived.") @testing.fails_on('+pg8000', "Can't determine correct type.") def test_extract(self): fivedaysago = datetime.datetime.now() \ - datetime.timedelta(days=5) for field, exp in ('year', fivedaysago.year), ('month', fivedaysago.month), ('day', fivedaysago.day): r = testing.db.execute(select([extract(field, func.now() + datetime.timedelta(days=-5))])).scalar() eq_(r, exp) def test_checksfor_sequence(self): meta1 = MetaData(testing.db) seq = Sequence('fooseq') t = Table('mytable', meta1, Column('col1', Integer, seq)) seq.drop() try: testing.db.execute('CREATE SEQUENCE fooseq') t.create(checkfirst=True) finally: t.drop(checkfirst=True) def test_schema_roundtrips(self): meta = MetaData(testing.db) users = Table('users', meta, Column('id', Integer, primary_key=True), Column('name', String(50)), schema='test_schema') users.create() try: users.insert().execute(id=1, name='name1') users.insert().execute(id=2, name='name2') users.insert().execute(id=3, name='name3') users.insert().execute(id=4, name='name4') eq_(users.select().where(users.c.name == 'name2' ).execute().fetchall(), [(2, 'name2')]) eq_(users.select(use_labels=True).where(users.c.name == 'name2').execute().fetchall(), [(2, 'name2')]) users.delete().where(users.c.id == 3).execute() eq_(users.select().where(users.c.name == 'name3' ).execute().fetchall(), []) users.update().where(users.c.name == 'name4' ).execute(name='newname') eq_(users.select(use_labels=True).where(users.c.id == 4).execute().fetchall(), [(4, 'newname')]) finally: users.drop() def test_preexecute_passivedefault(self): """test that when we get a primary key column back from reflecting a table which has a default value on it, we pre- execute that DefaultClause upon insert.""" try: meta = MetaData(testing.db) testing.db.execute(""" CREATE TABLE speedy_users ( speedy_user_id SERIAL PRIMARY KEY, user_name VARCHAR NOT NULL, user_password VARCHAR NOT NULL ); """) t = Table('speedy_users', meta, autoload=True) r = t.insert().execute(user_name='user', user_password='lala') assert r.inserted_primary_key == [1] l = t.select().execute().fetchall() assert l == [(1, 'user', 'lala')] finally: testing.db.execute('drop table speedy_users') @testing.fails_on('+zxjdbc', 'psycopg2/pg8000 specific assertion') @testing.fails_on('pypostgresql', 'psycopg2/pg8000 specific assertion') def test_numeric_raise(self): stmt = text("select cast('hi' as char) as hi", typemap={'hi' : Numeric}) assert_raises(exc.InvalidRequestError, testing.db.execute, stmt) def test_serial_integer(self): for type_, expected in [ (Integer, 'SERIAL'), (BigInteger, 'BIGSERIAL'), (SmallInteger, 'SMALLINT'), (postgresql.INTEGER, 'SERIAL'), (postgresql.BIGINT, 'BIGSERIAL'), ]: m = MetaData() t = Table('t', m, Column('c', type_, primary_key=True)) ddl_compiler = testing.db.dialect.ddl_compiler(testing.db.dialect, schema.CreateTable(t)) eq_( ddl_compiler.get_column_specification(t.c.c), "c %s NOT NULL" % expected ) SQLAlchemy-0.8.4/test/dialect/postgresql/test_query.py0000644000076500000240000010266112251147172023606 0ustar classicstaff00000000000000# coding: utf-8 from sqlalchemy.testing.assertions import eq_, assert_raises, \ assert_raises_message, is_, AssertsExecutionResults, \ AssertsCompiledSQL, ComparesTables from sqlalchemy.testing import engines, fixtures from sqlalchemy import testing from sqlalchemy import Table, Column, select, MetaData, text, Integer, \ String, Sequence, ForeignKey, join, Numeric, \ PrimaryKeyConstraint, DateTime, tuple_, Float, BigInteger, \ func, literal_column, literal, bindparam, cast, extract, \ SmallInteger, Enum, REAL, update, insert, Index, delete, \ and_, Date, TypeDecorator, Time, Unicode, Interval, or_, Text from sqlalchemy import exc from sqlalchemy.dialects import postgresql import datetime class InsertTest(fixtures.TestBase, AssertsExecutionResults): __only_on__ = 'postgresql' @classmethod def setup_class(cls): global metadata cls.engine = testing.db metadata = MetaData(testing.db) def teardown(self): metadata.drop_all() metadata.clear() if self.engine is not testing.db: self.engine.dispose() def test_compiled_insert(self): table = Table('testtable', metadata, Column('id', Integer, primary_key=True), Column('data', String(30))) metadata.create_all() ins = table.insert(inline=True, values={'data': bindparam('x' )}).compile() ins.execute({'x': 'five'}, {'x': 'seven'}) assert table.select().execute().fetchall() == [(1, 'five'), (2, 'seven')] def test_foreignkey_missing_insert(self): t1 = Table('t1', metadata, Column('id', Integer, primary_key=True)) t2 = Table('t2', metadata, Column('id', Integer, ForeignKey('t1.id'), primary_key=True)) metadata.create_all() # want to ensure that "null value in column "id" violates not- # null constraint" is raised (IntegrityError on psycoopg2, but # ProgrammingError on pg8000), and not "ProgrammingError: # (ProgrammingError) relationship "t2_id_seq" does not exist". # the latter corresponds to autoincrement behavior, which is not # the case here due to the foreign key. for eng in [engines.testing_engine(options={'implicit_returning' : False}), engines.testing_engine(options={'implicit_returning' : True})]: assert_raises_message(exc.DBAPIError, 'violates not-null constraint', eng.execute, t2.insert()) def test_sequence_insert(self): table = Table('testtable', metadata, Column('id', Integer, Sequence('my_seq'), primary_key=True), Column('data', String(30))) metadata.create_all() self._assert_data_with_sequence(table, 'my_seq') def test_sequence_returning_insert(self): table = Table('testtable', metadata, Column('id', Integer, Sequence('my_seq'), primary_key=True), Column('data', String(30))) metadata.create_all() self._assert_data_with_sequence_returning(table, 'my_seq') def test_opt_sequence_insert(self): table = Table('testtable', metadata, Column('id', Integer, Sequence('my_seq', optional=True), primary_key=True), Column('data', String(30))) metadata.create_all() self._assert_data_autoincrement(table) def test_opt_sequence_returning_insert(self): table = Table('testtable', metadata, Column('id', Integer, Sequence('my_seq', optional=True), primary_key=True), Column('data', String(30))) metadata.create_all() self._assert_data_autoincrement_returning(table) def test_autoincrement_insert(self): table = Table('testtable', metadata, Column('id', Integer, primary_key=True), Column('data', String(30))) metadata.create_all() self._assert_data_autoincrement(table) def test_autoincrement_returning_insert(self): table = Table('testtable', metadata, Column('id', Integer, primary_key=True), Column('data', String(30))) metadata.create_all() self._assert_data_autoincrement_returning(table) def test_noautoincrement_insert(self): table = Table('testtable', metadata, Column('id', Integer, primary_key=True, autoincrement=False), Column('data', String(30))) metadata.create_all() self._assert_data_noautoincrement(table) def _assert_data_autoincrement(self, table): self.engine = \ engines.testing_engine(options={'implicit_returning' : False}) metadata.bind = self.engine def go(): # execute with explicit id r = table.insert().execute({'id': 30, 'data': 'd1'}) assert r.inserted_primary_key == [30] # execute with prefetch id r = table.insert().execute({'data': 'd2'}) assert r.inserted_primary_key == [1] # executemany with explicit ids table.insert().execute({'id': 31, 'data': 'd3'}, {'id': 32, 'data': 'd4'}) # executemany, uses SERIAL table.insert().execute({'data': 'd5'}, {'data': 'd6'}) # single execute, explicit id, inline table.insert(inline=True).execute({'id': 33, 'data': 'd7'}) # single execute, inline, uses SERIAL table.insert(inline=True).execute({'data': 'd8'}) # note that the test framework doesnt capture the "preexecute" # of a seqeuence or default. we just see it in the bind params. self.assert_sql(self.engine, go, [], with_sequences=[ ('INSERT INTO testtable (id, data) VALUES (:id, :data)', {'id': 30, 'data': 'd1'}), ('INSERT INTO testtable (id, data) VALUES (:id, :data)', {'id': 1, 'data': 'd2'}), ('INSERT INTO testtable (id, data) VALUES (:id, :data)', [{'id': 31, 'data': 'd3'}, {'id': 32, 'data': 'd4'}]), ('INSERT INTO testtable (data) VALUES (:data)', [{'data' : 'd5'}, {'data': 'd6'}]), ('INSERT INTO testtable (id, data) VALUES (:id, :data)', [{'id': 33, 'data': 'd7'}]), ('INSERT INTO testtable (data) VALUES (:data)', [{'data' : 'd8'}]), ]) assert table.select().execute().fetchall() == [ (30, 'd1'), (1, 'd2'), (31, 'd3'), (32, 'd4'), (2, 'd5'), (3, 'd6'), (33, 'd7'), (4, 'd8'), ] table.delete().execute() # test the same series of events using a reflected version of # the table m2 = MetaData(self.engine) table = Table(table.name, m2, autoload=True) def go(): table.insert().execute({'id': 30, 'data': 'd1'}) r = table.insert().execute({'data': 'd2'}) assert r.inserted_primary_key == [5] table.insert().execute({'id': 31, 'data': 'd3'}, {'id': 32, 'data': 'd4'}) table.insert().execute({'data': 'd5'}, {'data': 'd6'}) table.insert(inline=True).execute({'id': 33, 'data': 'd7'}) table.insert(inline=True).execute({'data': 'd8'}) self.assert_sql(self.engine, go, [], with_sequences=[ ('INSERT INTO testtable (id, data) VALUES (:id, :data)', {'id': 30, 'data': 'd1'}), ('INSERT INTO testtable (id, data) VALUES (:id, :data)', {'id': 5, 'data': 'd2'}), ('INSERT INTO testtable (id, data) VALUES (:id, :data)', [{'id': 31, 'data': 'd3'}, {'id': 32, 'data': 'd4'}]), ('INSERT INTO testtable (data) VALUES (:data)', [{'data' : 'd5'}, {'data': 'd6'}]), ('INSERT INTO testtable (id, data) VALUES (:id, :data)', [{'id': 33, 'data': 'd7'}]), ('INSERT INTO testtable (data) VALUES (:data)', [{'data' : 'd8'}]), ]) assert table.select().execute().fetchall() == [ (30, 'd1'), (5, 'd2'), (31, 'd3'), (32, 'd4'), (6, 'd5'), (7, 'd6'), (33, 'd7'), (8, 'd8'), ] table.delete().execute() def _assert_data_autoincrement_returning(self, table): self.engine = \ engines.testing_engine(options={'implicit_returning': True}) metadata.bind = self.engine def go(): # execute with explicit id r = table.insert().execute({'id': 30, 'data': 'd1'}) assert r.inserted_primary_key == [30] # execute with prefetch id r = table.insert().execute({'data': 'd2'}) assert r.inserted_primary_key == [1] # executemany with explicit ids table.insert().execute({'id': 31, 'data': 'd3'}, {'id': 32, 'data': 'd4'}) # executemany, uses SERIAL table.insert().execute({'data': 'd5'}, {'data': 'd6'}) # single execute, explicit id, inline table.insert(inline=True).execute({'id': 33, 'data': 'd7'}) # single execute, inline, uses SERIAL table.insert(inline=True).execute({'data': 'd8'}) self.assert_sql(self.engine, go, [], with_sequences=[ ('INSERT INTO testtable (id, data) VALUES (:id, :data)', {'id': 30, 'data': 'd1'}), ('INSERT INTO testtable (data) VALUES (:data) RETURNING ' 'testtable.id', {'data': 'd2'}), ('INSERT INTO testtable (id, data) VALUES (:id, :data)', [{'id': 31, 'data': 'd3'}, {'id': 32, 'data': 'd4'}]), ('INSERT INTO testtable (data) VALUES (:data)', [{'data' : 'd5'}, {'data': 'd6'}]), ('INSERT INTO testtable (id, data) VALUES (:id, :data)', [{'id': 33, 'data': 'd7'}]), ('INSERT INTO testtable (data) VALUES (:data)', [{'data' : 'd8'}]), ]) assert table.select().execute().fetchall() == [ (30, 'd1'), (1, 'd2'), (31, 'd3'), (32, 'd4'), (2, 'd5'), (3, 'd6'), (33, 'd7'), (4, 'd8'), ] table.delete().execute() # test the same series of events using a reflected version of # the table m2 = MetaData(self.engine) table = Table(table.name, m2, autoload=True) def go(): table.insert().execute({'id': 30, 'data': 'd1'}) r = table.insert().execute({'data': 'd2'}) assert r.inserted_primary_key == [5] table.insert().execute({'id': 31, 'data': 'd3'}, {'id': 32, 'data': 'd4'}) table.insert().execute({'data': 'd5'}, {'data': 'd6'}) table.insert(inline=True).execute({'id': 33, 'data': 'd7'}) table.insert(inline=True).execute({'data': 'd8'}) self.assert_sql(self.engine, go, [], with_sequences=[ ('INSERT INTO testtable (id, data) VALUES (:id, :data)', {'id': 30, 'data': 'd1'}), ('INSERT INTO testtable (data) VALUES (:data) RETURNING ' 'testtable.id', {'data': 'd2'}), ('INSERT INTO testtable (id, data) VALUES (:id, :data)', [{'id': 31, 'data': 'd3'}, {'id': 32, 'data': 'd4'}]), ('INSERT INTO testtable (data) VALUES (:data)', [{'data' : 'd5'}, {'data': 'd6'}]), ('INSERT INTO testtable (id, data) VALUES (:id, :data)', [{'id': 33, 'data': 'd7'}]), ('INSERT INTO testtable (data) VALUES (:data)', [{'data' : 'd8'}]), ]) assert table.select().execute().fetchall() == [ (30, 'd1'), (5, 'd2'), (31, 'd3'), (32, 'd4'), (6, 'd5'), (7, 'd6'), (33, 'd7'), (8, 'd8'), ] table.delete().execute() def _assert_data_with_sequence(self, table, seqname): self.engine = \ engines.testing_engine(options={'implicit_returning' : False}) metadata.bind = self.engine def go(): table.insert().execute({'id': 30, 'data': 'd1'}) table.insert().execute({'data': 'd2'}) table.insert().execute({'id': 31, 'data': 'd3'}, {'id': 32, 'data': 'd4'}) table.insert().execute({'data': 'd5'}, {'data': 'd6'}) table.insert(inline=True).execute({'id': 33, 'data': 'd7'}) table.insert(inline=True).execute({'data': 'd8'}) self.assert_sql(self.engine, go, [], with_sequences=[ ('INSERT INTO testtable (id, data) VALUES (:id, :data)', {'id': 30, 'data': 'd1'}), ('INSERT INTO testtable (id, data) VALUES (:id, :data)', {'id': 1, 'data': 'd2'}), ('INSERT INTO testtable (id, data) VALUES (:id, :data)', [{'id': 31, 'data': 'd3'}, {'id': 32, 'data': 'd4'}]), ("INSERT INTO testtable (id, data) VALUES (nextval('%s'), " ":data)" % seqname, [{'data': 'd5'}, {'data': 'd6'}]), ('INSERT INTO testtable (id, data) VALUES (:id, :data)', [{'id': 33, 'data': 'd7'}]), ("INSERT INTO testtable (id, data) VALUES (nextval('%s'), " ":data)" % seqname, [{'data': 'd8'}]), ]) assert table.select().execute().fetchall() == [ (30, 'd1'), (1, 'd2'), (31, 'd3'), (32, 'd4'), (2, 'd5'), (3, 'd6'), (33, 'd7'), (4, 'd8'), ] # cant test reflection here since the Sequence must be # explicitly specified def _assert_data_with_sequence_returning(self, table, seqname): self.engine = \ engines.testing_engine(options={'implicit_returning': True}) metadata.bind = self.engine def go(): table.insert().execute({'id': 30, 'data': 'd1'}) table.insert().execute({'data': 'd2'}) table.insert().execute({'id': 31, 'data': 'd3'}, {'id': 32, 'data': 'd4'}) table.insert().execute({'data': 'd5'}, {'data': 'd6'}) table.insert(inline=True).execute({'id': 33, 'data': 'd7'}) table.insert(inline=True).execute({'data': 'd8'}) self.assert_sql(self.engine, go, [], with_sequences=[ ('INSERT INTO testtable (id, data) VALUES (:id, :data)', {'id': 30, 'data': 'd1'}), ("INSERT INTO testtable (id, data) VALUES " "(nextval('my_seq'), :data) RETURNING testtable.id", {'data': 'd2'}), ('INSERT INTO testtable (id, data) VALUES (:id, :data)', [{'id': 31, 'data': 'd3'}, {'id': 32, 'data': 'd4'}]), ("INSERT INTO testtable (id, data) VALUES (nextval('%s'), " ":data)" % seqname, [{'data': 'd5'}, {'data': 'd6'}]), ('INSERT INTO testtable (id, data) VALUES (:id, :data)', [{'id': 33, 'data': 'd7'}]), ("INSERT INTO testtable (id, data) VALUES (nextval('%s'), " ":data)" % seqname, [{'data': 'd8'}]), ]) assert table.select().execute().fetchall() == [ (30, 'd1'), (1, 'd2'), (31, 'd3'), (32, 'd4'), (2, 'd5'), (3, 'd6'), (33, 'd7'), (4, 'd8'), ] # cant test reflection here since the Sequence must be # explicitly specified def _assert_data_noautoincrement(self, table): self.engine = \ engines.testing_engine(options={'implicit_returning' : False}) metadata.bind = self.engine table.insert().execute({'id': 30, 'data': 'd1'}) if self.engine.driver == 'pg8000': exception_cls = exc.ProgrammingError elif self.engine.driver == 'pypostgresql': exception_cls = Exception else: exception_cls = exc.IntegrityError assert_raises_message(exception_cls, 'violates not-null constraint', table.insert().execute, {'data': 'd2'}) assert_raises_message(exception_cls, 'violates not-null constraint', table.insert().execute, {'data': 'd2'}, {'data': 'd3'}) assert_raises_message(exception_cls, 'violates not-null constraint', table.insert().execute, {'data': 'd2'}) assert_raises_message(exception_cls, 'violates not-null constraint', table.insert().execute, {'data': 'd2'}, {'data': 'd3'}) table.insert().execute({'id': 31, 'data': 'd2'}, {'id': 32, 'data': 'd3'}) table.insert(inline=True).execute({'id': 33, 'data': 'd4'}) assert table.select().execute().fetchall() == [(30, 'd1'), (31, 'd2'), (32, 'd3'), (33, 'd4')] table.delete().execute() # test the same series of events using a reflected version of # the table m2 = MetaData(self.engine) table = Table(table.name, m2, autoload=True) table.insert().execute({'id': 30, 'data': 'd1'}) assert_raises_message(exception_cls, 'violates not-null constraint', table.insert().execute, {'data': 'd2'}) assert_raises_message(exception_cls, 'violates not-null constraint', table.insert().execute, {'data': 'd2'}, {'data': 'd3'}) table.insert().execute({'id': 31, 'data': 'd2'}, {'id': 32, 'data': 'd3'}) table.insert(inline=True).execute({'id': 33, 'data': 'd4'}) assert table.select().execute().fetchall() == [(30, 'd1'), (31, 'd2'), (32, 'd3'), (33, 'd4')] class ServerSideCursorsTest(fixtures.TestBase, AssertsExecutionResults): __only_on__ = 'postgresql+psycopg2' def _fixture(self, server_side_cursors): self.engine = engines.testing_engine( options={'server_side_cursors':server_side_cursors} ) return self.engine def tearDown(self): engines.testing_reaper.close_all() self.engine.dispose() def test_global_string(self): engine = self._fixture(True) result = engine.execute('select 1') assert result.cursor.name def test_global_text(self): engine = self._fixture(True) result = engine.execute(text('select 1')) assert result.cursor.name def test_global_expr(self): engine = self._fixture(True) result = engine.execute(select([1])) assert result.cursor.name def test_global_off_explicit(self): engine = self._fixture(False) result = engine.execute(text('select 1')) # It should be off globally ... assert not result.cursor.name def test_stmt_option(self): engine = self._fixture(False) s = select([1]).execution_options(stream_results=True) result = engine.execute(s) # ... but enabled for this one. assert result.cursor.name def test_conn_option(self): engine = self._fixture(False) # and this one result = \ engine.connect().execution_options(stream_results=True).\ execute('select 1' ) assert result.cursor.name def test_stmt_enabled_conn_option_disabled(self): engine = self._fixture(False) s = select([1]).execution_options(stream_results=True) # not this one result = \ engine.connect().execution_options(stream_results=False).\ execute(s) assert not result.cursor.name def test_stmt_option_disabled(self): engine = self._fixture(True) s = select([1]).execution_options(stream_results=False) result = engine.execute(s) assert not result.cursor.name def test_aliases_and_ss(self): engine = self._fixture(False) s1 = select([1]).execution_options(stream_results=True).alias() result = engine.execute(s1) assert result.cursor.name # s1's options shouldn't affect s2 when s2 is used as a # from_obj. s2 = select([1], from_obj=s1) result = engine.execute(s2) assert not result.cursor.name def test_for_update_expr(self): engine = self._fixture(True) s1 = select([1], for_update=True) result = engine.execute(s1) assert result.cursor.name def test_for_update_string(self): engine = self._fixture(True) result = engine.execute('SELECT 1 FOR UPDATE') assert result.cursor.name def test_text_no_ss(self): engine = self._fixture(False) s = text('select 42') result = engine.execute(s) assert not result.cursor.name def test_text_ss_option(self): engine = self._fixture(False) s = text('select 42').execution_options(stream_results=True) result = engine.execute(s) assert result.cursor.name def test_roundtrip(self): engine = self._fixture(True) test_table = Table('test_table', MetaData(engine), Column('id', Integer, primary_key=True), Column('data', String(50))) test_table.create(checkfirst=True) try: test_table.insert().execute(data='data1') nextid = engine.execute(Sequence('test_table_id_seq')) test_table.insert().execute(id=nextid, data='data2') eq_(test_table.select().execute().fetchall(), [(1, 'data1' ), (2, 'data2')]) test_table.update().where(test_table.c.id == 2).values(data=test_table.c.data + ' updated' ).execute() eq_(test_table.select().execute().fetchall(), [(1, 'data1' ), (2, 'data2 updated')]) test_table.delete().execute() eq_(test_table.count().scalar(), 0) finally: test_table.drop(checkfirst=True) class MatchTest(fixtures.TestBase, AssertsCompiledSQL): __only_on__ = 'postgresql' __excluded_on__ = ('postgresql', '<', (8, 3, 0)), @classmethod def setup_class(cls): global metadata, cattable, matchtable metadata = MetaData(testing.db) cattable = Table('cattable', metadata, Column('id', Integer, primary_key=True), Column('description', String(50))) matchtable = Table('matchtable', metadata, Column('id', Integer, primary_key=True), Column('title', String(200)), Column('category_id', Integer, ForeignKey('cattable.id'))) metadata.create_all() cattable.insert().execute([{'id': 1, 'description': 'Python'}, {'id': 2, 'description': 'Ruby'}]) matchtable.insert().execute([{'id': 1, 'title' : 'Agile Web Development with Rails' , 'category_id': 2}, {'id': 2, 'title': 'Dive Into Python', 'category_id': 1}, {'id': 3, 'title' : "Programming Matz's Ruby", 'category_id': 2}, {'id': 4, 'title' : 'The Definitive Guide to Django', 'category_id': 1}, {'id': 5, 'title' : 'Python in a Nutshell', 'category_id': 1}]) @classmethod def teardown_class(cls): metadata.drop_all() @testing.fails_on('postgresql+pg8000', 'uses positional') @testing.fails_on('postgresql+zxjdbc', 'uses qmark') def test_expression_pyformat(self): self.assert_compile(matchtable.c.title.match('somstr'), 'matchtable.title @@ to_tsquery(%(title_1)s' ')') @testing.fails_on('postgresql+psycopg2', 'uses pyformat') @testing.fails_on('postgresql+pypostgresql', 'uses pyformat') @testing.fails_on('postgresql+zxjdbc', 'uses qmark') def test_expression_positional(self): self.assert_compile(matchtable.c.title.match('somstr'), 'matchtable.title @@ to_tsquery(%s)') def test_simple_match(self): results = \ matchtable.select().where(matchtable.c.title.match('python' )).order_by(matchtable.c.id).execute().fetchall() eq_([2, 5], [r.id for r in results]) def test_simple_match_with_apostrophe(self): results = \ matchtable.select().where(matchtable.c.title.match("Matz's" )).execute().fetchall() eq_([3], [r.id for r in results]) def test_simple_derivative_match(self): results = \ matchtable.select().where(matchtable.c.title.match('nutshells' )).execute().fetchall() eq_([5], [r.id for r in results]) def test_or_match(self): results1 = \ matchtable.select().where(or_(matchtable.c.title.match('nutshells' ), matchtable.c.title.match('rubies' ))).order_by(matchtable.c.id).execute().fetchall() eq_([3, 5], [r.id for r in results1]) results2 = \ matchtable.select().where( matchtable.c.title.match('nutshells | rubies' )).order_by(matchtable.c.id).execute().fetchall() eq_([3, 5], [r.id for r in results2]) def test_and_match(self): results1 = \ matchtable.select().where(and_(matchtable.c.title.match('python' ), matchtable.c.title.match('nutshells' ))).execute().fetchall() eq_([5], [r.id for r in results1]) results2 = \ matchtable.select().where( matchtable.c.title.match('python & nutshells' )).execute().fetchall() eq_([5], [r.id for r in results2]) def test_match_across_joins(self): results = matchtable.select().where(and_(cattable.c.id == matchtable.c.category_id, or_(cattable.c.description.match('Ruby'), matchtable.c.title.match('nutshells' )))).order_by(matchtable.c.id).execute().fetchall() eq_([1, 3, 5], [r.id for r in results]) class TupleTest(fixtures.TestBase): __only_on__ = 'postgresql' def test_tuple_containment(self): for test, exp in [ ([('a', 'b')], True), ([('a', 'c')], False), ([('f', 'q'), ('a', 'b')], True), ([('f', 'q'), ('a', 'c')], False) ]: eq_( testing.db.execute( select([ tuple_( literal_column("'a'"), literal_column("'b'") ).\ in_([ tuple_(*[ literal_column("'%s'" % letter) for letter in elem ]) for elem in test ]) ]) ).scalar(), exp ) class ExtractTest(fixtures.TablesTest): """The rationale behind this test is that for many years we've had a system of embedding type casts into the expressions rendered by visit_extract() on the postgreql platform. The reason for this cast is not clear. So here we try to produce a wide range of cases to ensure that these casts are not needed; see [ticket:2740]. """ __only_on__ = 'postgresql' run_inserts = 'once' run_deletes = None @classmethod def define_tables(cls, metadata): Table('t', metadata, Column('id', Integer, primary_key=True), Column('dtme', DateTime), Column('dt', Date), Column('tm', Time), Column('intv', postgresql.INTERVAL), Column('dttz', DateTime(timezone=True)) ) @classmethod def insert_data(cls): # TODO: why does setting hours to anything # not affect the TZ in the DB col ? class TZ(datetime.tzinfo): def utcoffset(self, dt): return datetime.timedelta(hours=4) conn = testing.db.connect() # we aren't resetting this at the moment but we don't have # any other tests that are TZ specific conn.execute("SET SESSION TIME ZONE 0") conn.execute( cls.tables.t.insert(), { 'dtme': datetime.datetime(2012, 5, 10, 12, 15, 25), 'dt': datetime.date(2012, 5, 10), 'tm': datetime.time(12, 15, 25), 'intv': datetime.timedelta(seconds=570), 'dttz': datetime.datetime(2012, 5, 10, 12, 15, 25, tzinfo=TZ()) }, ) def _test(self, expr, field="all", overrides=None): t = self.tables.t if field == "all": fields = {"year": 2012, "month": 5, "day": 10, "epoch": 1336652125.0, "hour": 12, "minute": 15} elif field == "time": fields = {"hour": 12, "minute": 15, "second": 25} elif field == 'date': fields = {"year": 2012, "month": 5, "day": 10} elif field == 'all+tz': fields = {"year": 2012, "month": 5, "day": 10, "epoch": 1336637725.0, "hour": 8, "timezone": 0 } else: fields = field if overrides: fields.update(overrides) for field in fields: result = testing.db.scalar( select([extract(field, expr)]).select_from(t)) eq_(result, fields[field]) def test_one(self): t = self.tables.t self._test(t.c.dtme, "all") def test_two(self): t = self.tables.t self._test(t.c.dtme + t.c.intv, overrides={"epoch": 1336652695.0, "minute": 24}) def test_three(self): t = self.tables.t actual_ts = testing.db.scalar(func.current_timestamp()) - \ datetime.timedelta(days=5) self._test(func.current_timestamp() - datetime.timedelta(days=5), {"hour": actual_ts.hour, "year": actual_ts.year, "month": actual_ts.month} ) def test_four(self): t = self.tables.t self._test(datetime.timedelta(days=5) + t.c.dt, overrides={"day": 15, "epoch": 1337040000.0, "hour": 0, "minute": 0} ) def test_five(self): t = self.tables.t self._test(func.coalesce(t.c.dtme, func.current_timestamp()), overrides={"epoch": 1336652125.0}) def test_six(self): t = self.tables.t self._test(t.c.tm + datetime.timedelta(seconds=30), "time", overrides={"second": 55}) def test_seven(self): self._test(literal(datetime.timedelta(seconds=10)) - literal(datetime.timedelta(seconds=10)), "all", overrides={"hour": 0, "minute": 0, "month": 0, "year": 0, "day": 0, "epoch": 0}) def test_eight(self): t = self.tables.t self._test(t.c.tm + datetime.timedelta(seconds=30), {"hour": 12, "minute": 15, "second": 55}) def test_nine(self): self._test(text("t.dt + t.tm")) def test_ten(self): t = self.tables.t self._test(t.c.dt + t.c.tm) def test_eleven(self): self._test(func.current_timestamp() - func.current_timestamp(), {"year": 0, "month": 0, "day": 0, "hour": 0} ) def test_twelve(self): t = self.tables.t actual_ts = testing.db.scalar( func.current_timestamp()).replace(tzinfo=None) - \ datetime.datetime(2012, 5, 10, 12, 15, 25) self._test(func.current_timestamp() - func.coalesce(t.c.dtme, func.current_timestamp()), {"day": actual_ts.days} ) def test_thirteen(self): t = self.tables.t self._test(t.c.dttz, "all+tz") def test_fourteen(self): t = self.tables.t self._test(t.c.tm, "time") def test_fifteen(self): t = self.tables.t self._test(datetime.timedelta(days=5) + t.c.dtme, overrides={"day": 15, "epoch": 1337084125.0} ) SQLAlchemy-0.8.4/test/dialect/postgresql/test_reflection.py0000644000076500000240000004411112251150015024554 0ustar classicstaff00000000000000# coding: utf-8 from __future__ import with_statement from sqlalchemy.testing.assertions import eq_, assert_raises, \ assert_raises_message, is_, AssertsExecutionResults, \ AssertsCompiledSQL, ComparesTables from sqlalchemy.testing import engines, fixtures from sqlalchemy import testing from sqlalchemy import Table, Column, select, MetaData, text, Integer, \ String, Sequence, ForeignKey, join, Numeric, \ PrimaryKeyConstraint, DateTime, tuple_, Float, BigInteger, \ func, literal_column, literal, bindparam, cast, extract, \ SmallInteger, Enum, REAL, update, insert, Index, delete, \ and_, Date, TypeDecorator, Time, Unicode, Interval, or_, Text from sqlalchemy import exc from sqlalchemy.dialects.postgresql import base as postgresql import logging class DomainReflectionTest(fixtures.TestBase, AssertsExecutionResults): """Test PostgreSQL domains""" __only_on__ = 'postgresql' @classmethod def setup_class(cls): con = testing.db.connect() for ddl in \ 'CREATE DOMAIN testdomain INTEGER NOT NULL DEFAULT 42', \ 'CREATE DOMAIN test_schema.testdomain INTEGER DEFAULT 0', \ "CREATE TYPE testtype AS ENUM ('test')", \ 'CREATE DOMAIN enumdomain AS testtype'\ : try: con.execute(ddl) except exc.DBAPIError, e: if not 'already exists' in str(e): raise e con.execute('CREATE TABLE testtable (question integer, answer ' 'testdomain)') con.execute('CREATE TABLE test_schema.testtable(question ' 'integer, answer test_schema.testdomain, anything ' 'integer)') con.execute('CREATE TABLE crosschema (question integer, answer ' 'test_schema.testdomain)') con.execute('CREATE TABLE enum_test (id integer, data enumdomain)') @classmethod def teardown_class(cls): con = testing.db.connect() con.execute('DROP TABLE testtable') con.execute('DROP TABLE test_schema.testtable') con.execute('DROP TABLE crosschema') con.execute('DROP DOMAIN testdomain') con.execute('DROP DOMAIN test_schema.testdomain') con.execute("DROP TABLE enum_test") con.execute("DROP DOMAIN enumdomain") con.execute("DROP TYPE testtype") def test_table_is_reflected(self): metadata = MetaData(testing.db) table = Table('testtable', metadata, autoload=True) eq_(set(table.columns.keys()), set(['question', 'answer']), "Columns of reflected table didn't equal expected columns") assert isinstance(table.c.answer.type, Integer) def test_domain_is_reflected(self): metadata = MetaData(testing.db) table = Table('testtable', metadata, autoload=True) eq_(str(table.columns.answer.server_default.arg), '42', "Reflected default value didn't equal expected value") assert not table.columns.answer.nullable, \ 'Expected reflected column to not be nullable.' def test_enum_domain_is_reflected(self): metadata = MetaData(testing.db) table = Table('enum_test', metadata, autoload=True) eq_( table.c.data.type.enums, ('test', ) ) def test_table_is_reflected_test_schema(self): metadata = MetaData(testing.db) table = Table('testtable', metadata, autoload=True, schema='test_schema') eq_(set(table.columns.keys()), set(['question', 'answer', 'anything']), "Columns of reflected table didn't equal expected columns") assert isinstance(table.c.anything.type, Integer) def test_schema_domain_is_reflected(self): metadata = MetaData(testing.db) table = Table('testtable', metadata, autoload=True, schema='test_schema') eq_(str(table.columns.answer.server_default.arg), '0', "Reflected default value didn't equal expected value") assert table.columns.answer.nullable, \ 'Expected reflected column to be nullable.' def test_crosschema_domain_is_reflected(self): metadata = MetaData(testing.db) table = Table('crosschema', metadata, autoload=True) eq_(str(table.columns.answer.server_default.arg), '0', "Reflected default value didn't equal expected value") assert table.columns.answer.nullable, \ 'Expected reflected column to be nullable.' def test_unknown_types(self): from sqlalchemy.databases import postgresql ischema_names = postgresql.PGDialect.ischema_names postgresql.PGDialect.ischema_names = {} try: m2 = MetaData(testing.db) assert_raises(exc.SAWarning, Table, 'testtable', m2, autoload=True) @testing.emits_warning('Did not recognize type') def warns(): m3 = MetaData(testing.db) t3 = Table('testtable', m3, autoload=True) assert t3.c.answer.type.__class__ == sa.types.NullType finally: postgresql.PGDialect.ischema_names = ischema_names class ReflectionTest(fixtures.TestBase): __only_on__ = 'postgresql' @testing.fails_if(('postgresql', '<', (8, 4)), "newer query is bypassed due to unsupported SQL functions") @testing.provide_metadata def test_reflected_primary_key_order(self): meta1 = self.metadata subject = Table('subject', meta1, Column('p1', Integer, primary_key=True), Column('p2', Integer, primary_key=True), PrimaryKeyConstraint('p2', 'p1') ) meta1.create_all() meta2 = MetaData(testing.db) subject = Table('subject', meta2, autoload=True) eq_(subject.primary_key.columns.keys(), ['p2', 'p1']) @testing.provide_metadata def test_pg_weirdchar_reflection(self): meta1 = self.metadata subject = Table('subject', meta1, Column('id$', Integer, primary_key=True)) referer = Table('referer', meta1, Column('id', Integer, primary_key=True), Column('ref', Integer, ForeignKey('subject.id$'))) meta1.create_all() meta2 = MetaData(testing.db) subject = Table('subject', meta2, autoload=True) referer = Table('referer', meta2, autoload=True) self.assert_((subject.c['id$'] == referer.c.ref).compare( subject.join(referer).onclause)) @testing.provide_metadata def test_reflect_default_over_128_chars(self): Table('t', self.metadata, Column('x', String(200), server_default="abcd" * 40) ).create(testing.db) m = MetaData() t = Table('t', m, autoload=True, autoload_with=testing.db) eq_( t.c.x.server_default.arg.text, "'%s'::character varying" % ("abcd" * 40) ) @testing.provide_metadata def test_renamed_sequence_reflection(self): metadata = self.metadata t = Table('t', metadata, Column('id', Integer, primary_key=True)) metadata.create_all() m2 = MetaData(testing.db) t2 = Table('t', m2, autoload=True, implicit_returning=False) eq_(t2.c.id.server_default.arg.text, "nextval('t_id_seq'::regclass)") r = t2.insert().execute() eq_(r.inserted_primary_key, [1]) testing.db.connect().execution_options(autocommit=True).\ execute('alter table t_id_seq rename to foobar_id_seq' ) m3 = MetaData(testing.db) t3 = Table('t', m3, autoload=True, implicit_returning=False) eq_(t3.c.id.server_default.arg.text, "nextval('foobar_id_seq'::regclass)") r = t3.insert().execute() eq_(r.inserted_primary_key, [2]) @testing.provide_metadata def test_renamed_pk_reflection(self): metadata = self.metadata t = Table('t', metadata, Column('id', Integer, primary_key=True)) metadata.create_all() testing.db.connect().execution_options(autocommit=True).\ execute('alter table t rename id to t_id') m2 = MetaData(testing.db) t2 = Table('t', m2, autoload=True) eq_([c.name for c in t2.primary_key], ['t_id']) @testing.provide_metadata def test_schema_reflection(self): """note: this test requires that the 'test_schema' schema be separate and accessible by the test user""" meta1 = self.metadata users = Table('users', meta1, Column('user_id', Integer, primary_key=True), Column('user_name', String(30), nullable=False), schema='test_schema') addresses = Table( 'email_addresses', meta1, Column('address_id', Integer, primary_key=True), Column('remote_user_id', Integer, ForeignKey(users.c.user_id)), Column('email_address', String(20)), schema='test_schema', ) meta1.create_all() meta2 = MetaData(testing.db) addresses = Table('email_addresses', meta2, autoload=True, schema='test_schema') users = Table('users', meta2, mustexist=True, schema='test_schema') j = join(users, addresses) self.assert_((users.c.user_id == addresses.c.remote_user_id).compare(j.onclause)) @testing.provide_metadata def test_schema_reflection_2(self): meta1 = self.metadata subject = Table('subject', meta1, Column('id', Integer, primary_key=True)) referer = Table('referer', meta1, Column('id', Integer, primary_key=True), Column('ref', Integer, ForeignKey('subject.id')), schema='test_schema') meta1.create_all() meta2 = MetaData(testing.db) subject = Table('subject', meta2, autoload=True) referer = Table('referer', meta2, schema='test_schema', autoload=True) self.assert_((subject.c.id == referer.c.ref).compare( subject.join(referer).onclause)) @testing.provide_metadata def test_schema_reflection_3(self): meta1 = self.metadata subject = Table('subject', meta1, Column('id', Integer, primary_key=True), schema='test_schema_2') referer = Table('referer', meta1, Column('id', Integer, primary_key=True), Column('ref', Integer, ForeignKey('test_schema_2.subject.id')), schema='test_schema') meta1.create_all() meta2 = MetaData(testing.db) subject = Table('subject', meta2, autoload=True, schema='test_schema_2') referer = Table('referer', meta2, schema='test_schema', autoload=True) self.assert_((subject.c.id == referer.c.ref).compare( subject.join(referer).onclause)) @testing.provide_metadata def test_uppercase_lowercase_table(self): metadata = self.metadata a_table = Table('a', metadata, Column('x', Integer)) A_table = Table('A', metadata, Column('x', Integer)) a_table.create() assert testing.db.has_table("a") assert not testing.db.has_table("A") A_table.create(checkfirst=True) assert testing.db.has_table("A") def test_uppercase_lowercase_sequence(self): a_seq = Sequence('a') A_seq = Sequence('A') a_seq.create(testing.db) assert testing.db.dialect.has_sequence(testing.db, "a") assert not testing.db.dialect.has_sequence(testing.db, "A") A_seq.create(testing.db, checkfirst=True) assert testing.db.dialect.has_sequence(testing.db, "A") a_seq.drop(testing.db) A_seq.drop(testing.db) def test_schema_reflection_multi_search_path(self): """test the 'set the same schema' rule when multiple schemas/search paths are in effect.""" db = engines.testing_engine() conn = db.connect() trans = conn.begin() try: conn.execute("set search_path to test_schema_2, " "test_schema, public") conn.dialect.default_schema_name = "test_schema_2" conn.execute(""" CREATE TABLE test_schema.some_table ( id SERIAL not null primary key ) """) conn.execute(""" CREATE TABLE test_schema_2.some_other_table ( id SERIAL not null primary key, sid INTEGER REFERENCES test_schema.some_table(id) ) """) m1 = MetaData() t2_schema = Table('some_other_table', m1, schema="test_schema_2", autoload=True, autoload_with=conn) t1_schema = Table('some_table', m1, schema="test_schema", autoload=True, autoload_with=conn) t2_no_schema = Table('some_other_table', m1, autoload=True, autoload_with=conn) t1_no_schema = Table('some_table', m1, autoload=True, autoload_with=conn) # OK, this because, "test_schema" is # in the search path, and might as well be # the default too. why would we assign # a "schema" to the Table ? assert t2_schema.c.sid.references( t1_no_schema.c.id) assert t2_no_schema.c.sid.references( t1_no_schema.c.id) finally: trans.rollback() conn.close() db.dispose() @testing.provide_metadata def test_index_reflection(self): """ Reflecting partial & expression-based indexes should warn """ metadata = self.metadata t1 = Table('party', metadata, Column('id', String(10), nullable=False), Column('name', String(20), index=True), Column('aname', String(20))) metadata.create_all() testing.db.execute(""" create index idx1 on party ((id || name)) """) testing.db.execute(""" create unique index idx2 on party (id) where name = 'test' """) testing.db.execute(""" create index idx3 on party using btree (lower(name::text), lower(aname::text)) """) def go(): m2 = MetaData(testing.db) t2 = Table('party', m2, autoload=True) assert len(t2.indexes) == 2 # Make sure indexes are in the order we expect them in tmp = [(idx.name, idx) for idx in t2.indexes] tmp.sort() r1, r2 = [idx[1] for idx in tmp] assert r1.name == 'idx2' assert r1.unique == True assert r2.unique == False assert [t2.c.id] == r1.columns assert [t2.c.name] == r2.columns testing.assert_warnings(go, [ 'Skipped unsupported reflection of ' 'expression-based index idx1', 'Predicate of partial index idx2 ignored during ' 'reflection', 'Skipped unsupported reflection of ' 'expression-based index idx3' ]) @testing.provide_metadata def test_index_reflection_modified(self): """reflect indexes when a column name has changed - PG 9 does not update the name of the column in the index def. [ticket:2141] """ metadata = self.metadata t1 = Table('t', metadata, Column('id', Integer, primary_key=True), Column('x', Integer) ) metadata.create_all() conn = testing.db.connect().execution_options(autocommit=True) conn.execute("CREATE INDEX idx1 ON t (x)") conn.execute("ALTER TABLE t RENAME COLUMN x to y") ind = testing.db.dialect.get_indexes(conn, "t", None) eq_(ind, [{'unique': False, 'column_names': ['y'], 'name': 'idx1'}]) conn.close() class CustomTypeReflectionTest(fixtures.TestBase): class CustomType(object): def __init__(self, arg1=None, arg2=None): self.arg1 = arg1 self.arg2 = arg2 ischema_names = None def setup(self): ischema_names = postgresql.PGDialect.ischema_names postgresql.PGDialect.ischema_names = ischema_names.copy() self.ischema_names = ischema_names def teardown(self): postgresql.PGDialect.ischema_names = self.ischema_names self.ischema_names = None def _assert_reflected(self, dialect): for sch, args in [ ('my_custom_type', (None, None)), ('my_custom_type()', (None, None)), ('my_custom_type(ARG1)', ('ARG1', None)), ('my_custom_type(ARG1, ARG2)', ('ARG1', 'ARG2')), ]: column_info = dialect._get_column_info( 'colname', sch, None, False, {}, {}, 'public') assert isinstance(column_info['type'], self.CustomType) eq_(column_info['type'].arg1, args[0]) eq_(column_info['type'].arg2, args[1]) def test_clslevel(self): postgresql.PGDialect.ischema_names['my_custom_type'] = self.CustomType dialect = postgresql.PGDialect() self._assert_reflected(dialect) def test_instancelevel(self): dialect = postgresql.PGDialect() dialect.ischema_names = dialect.ischema_names.copy() dialect.ischema_names['my_custom_type'] = self.CustomType self._assert_reflected(dialect) SQLAlchemy-0.8.4/test/dialect/postgresql/test_types.py0000644000076500000240000016023712251150015023576 0ustar classicstaff00000000000000# coding: utf-8 from __future__ import with_statement from sqlalchemy.testing.assertions import eq_, assert_raises, \ assert_raises_message, is_, AssertsExecutionResults, \ AssertsCompiledSQL, ComparesTables from sqlalchemy.testing import engines, fixtures from sqlalchemy import testing import datetime from sqlalchemy import Table, Column, select, MetaData, text, Integer, \ String, Sequence, ForeignKey, join, Numeric, \ PrimaryKeyConstraint, DateTime, tuple_, Float, BigInteger, \ func, literal_column, literal, bindparam, cast, extract, \ SmallInteger, Enum, REAL, update, insert, Index, delete, \ and_, Date, TypeDecorator, Time, Unicode, Interval, or_, Text from sqlalchemy.orm import Session, mapper, aliased from sqlalchemy import exc, schema, types from sqlalchemy.dialects.postgresql import base as postgresql from sqlalchemy.dialects.postgresql import HSTORE, hstore, array, \ INT4RANGE, INT8RANGE, NUMRANGE, DATERANGE, TSRANGE, TSTZRANGE import decimal from sqlalchemy import util from sqlalchemy.testing.util import round_decimal from sqlalchemy.sql import table, column, operators import logging import re class FloatCoercionTest(fixtures.TablesTest, AssertsExecutionResults): __only_on__ = 'postgresql' __dialect__ = postgresql.dialect() @classmethod def define_tables(cls, metadata): data_table = Table('data_table', metadata, Column('id', Integer, primary_key=True), Column('data', Integer) ) @classmethod def insert_data(cls): data_table = cls.tables.data_table data_table.insert().execute( {'data':3}, {'data':5}, {'data':7}, {'data':2}, {'data':15}, {'data':12}, {'data':6}, {'data':478}, {'data':52}, {'data':9}, ) @testing.fails_on('postgresql+zxjdbc', 'XXX: postgresql+zxjdbc currently returns a Decimal result for Float') def test_float_coercion(self): data_table = self.tables.data_table for type_, result in [ (Numeric, decimal.Decimal('140.381230939')), (Float, 140.381230939), (Float(asdecimal=True), decimal.Decimal('140.381230939')), (Numeric(asdecimal=False), 140.381230939), ]: ret = testing.db.execute( select([ func.stddev_pop(data_table.c.data, type_=type_) ]) ).scalar() eq_(round_decimal(ret, 9), result) ret = testing.db.execute( select([ cast(func.stddev_pop(data_table.c.data), type_) ]) ).scalar() eq_(round_decimal(ret, 9), result) @testing.fails_on('postgresql+zxjdbc', 'zxjdbc has no support for PG arrays') @testing.provide_metadata def test_arrays(self): metadata = self.metadata t1 = Table('t', metadata, Column('x', postgresql.ARRAY(Float)), Column('y', postgresql.ARRAY(REAL)), Column('z', postgresql.ARRAY(postgresql.DOUBLE_PRECISION)), Column('q', postgresql.ARRAY(Numeric)) ) metadata.create_all() t1.insert().execute(x=[5], y=[5], z=[6], q=[decimal.Decimal("6.4")]) row = t1.select().execute().first() eq_( row, ([5], [5], [6], [decimal.Decimal("6.4")]) ) class EnumTest(fixtures.TestBase, AssertsExecutionResults, AssertsCompiledSQL): __only_on__ = 'postgresql' __dialect__ = postgresql.dialect() def test_compile(self): e1 = Enum('x', 'y', 'z', name='somename') e2 = Enum('x', 'y', 'z', name='somename', schema='someschema') self.assert_compile(postgresql.CreateEnumType(e1), "CREATE TYPE somename AS ENUM ('x','y','z')" ) self.assert_compile(postgresql.CreateEnumType(e2), "CREATE TYPE someschema.somename AS ENUM " "('x','y','z')") self.assert_compile(postgresql.DropEnumType(e1), 'DROP TYPE somename') self.assert_compile(postgresql.DropEnumType(e2), 'DROP TYPE someschema.somename') t1 = Table('sometable', MetaData(), Column('somecolumn', e1)) self.assert_compile(schema.CreateTable(t1), 'CREATE TABLE sometable (somecolumn ' 'somename)') t1 = Table('sometable', MetaData(), Column('somecolumn', Enum('x', 'y', 'z', native_enum=False))) self.assert_compile(schema.CreateTable(t1), "CREATE TABLE sometable (somecolumn " "VARCHAR(1), CHECK (somecolumn IN ('x', " "'y', 'z')))") @testing.fails_on('postgresql+zxjdbc', 'zxjdbc fails on ENUM: column "XXX" is of type ' 'XXX but expression is of type character varying') @testing.fails_on('postgresql+pg8000', 'zxjdbc fails on ENUM: column "XXX" is of type ' 'XXX but expression is of type text') def test_create_table(self): metadata = MetaData(testing.db) t1 = Table('table', metadata, Column('id', Integer, primary_key=True), Column('value', Enum('one', 'two' , 'three', name='onetwothreetype'))) t1.create() t1.create(checkfirst=True) # check the create try: t1.insert().execute(value='two') t1.insert().execute(value='three') t1.insert().execute(value='three') eq_(t1.select().order_by(t1.c.id).execute().fetchall(), [(1, 'two'), (2, 'three'), (3, 'three')]) finally: metadata.drop_all() metadata.drop_all() def test_name_required(self): metadata = MetaData(testing.db) etype = Enum('four', 'five', 'six', metadata=metadata) assert_raises(exc.CompileError, etype.create) assert_raises(exc.CompileError, etype.compile, dialect=postgresql.dialect()) @testing.fails_on('postgresql+zxjdbc', 'zxjdbc fails on ENUM: column "XXX" is of type ' 'XXX but expression is of type character varying') @testing.fails_on('postgresql+pg8000', 'zxjdbc fails on ENUM: column "XXX" is of type ' 'XXX but expression is of type text') @testing.provide_metadata def test_unicode_labels(self): metadata = self.metadata t1 = Table('table', metadata, Column('id', Integer, primary_key=True), Column('value', Enum(util.u('réveillé'), util.u('drôle'), util.u('S’il'), name='onetwothreetype')) ) metadata.create_all() t1.insert().execute(value=util.u('drôle')) t1.insert().execute(value=util.u('réveillé')) t1.insert().execute(value=util.u('S’il')) eq_(t1.select().order_by(t1.c.id).execute().fetchall(), [(1, util.u('drôle')), (2, util.u('réveillé')), (3, util.u('S’il'))] ) m2 = MetaData(testing.db) t2 = Table('table', m2, autoload=True) eq_( t2.c.value.type.enums, (util.u('réveillé'), util.u('drôle'), util.u('S’il')) ) def test_non_native_type(self): metadata = MetaData() t1 = Table('foo', metadata, Column('bar', Enum('one', 'two', 'three', name='myenum', native_enum=False))) def go(): t1.create(testing.db) try: self.assert_sql(testing.db, go, [], with_sequences=[("CREATE TABLE foo (\tbar " "VARCHAR(5), \tCONSTRAINT myenum CHECK " "(bar IN ('one', 'two', 'three')))", {})]) finally: metadata.drop_all(testing.db) @testing.provide_metadata def test_disable_create(self): metadata = self.metadata e1 = postgresql.ENUM('one', 'two', 'three', name="myenum", create_type=False) t1 = Table('e1', metadata, Column('c1', e1) ) # table can be created separately # without conflict e1.create(bind=testing.db) t1.create(testing.db) t1.drop(testing.db) e1.drop(bind=testing.db) @testing.provide_metadata def test_generate_multiple(self): """Test that the same enum twice only generates once for the create_all() call, without using checkfirst. A 'memo' collection held by the DDL runner now handles this. """ metadata = self.metadata e1 = Enum('one', 'two', 'three', name="myenum") t1 = Table('e1', metadata, Column('c1', e1) ) t2 = Table('e2', metadata, Column('c1', e1) ) metadata.create_all(checkfirst=False) metadata.drop_all(checkfirst=False) def test_non_native_dialect(self): engine = engines.testing_engine() engine.connect() engine.dialect.supports_native_enum = False metadata = MetaData() t1 = Table('foo', metadata, Column('bar', Enum('one', 'two', 'three', name='myenum'))) def go(): t1.create(engine) try: self.assert_sql(engine, go, [], with_sequences=[("CREATE TABLE foo (\tbar " "VARCHAR(5), \tCONSTRAINT myenum CHECK " "(bar IN ('one', 'two', 'three')))", {})]) finally: metadata.drop_all(engine) def test_standalone_enum(self): metadata = MetaData(testing.db) etype = Enum('four', 'five', 'six', name='fourfivesixtype', metadata=metadata) etype.create() try: assert testing.db.dialect.has_type(testing.db, 'fourfivesixtype') finally: etype.drop() assert not testing.db.dialect.has_type(testing.db, 'fourfivesixtype') metadata.create_all() try: assert testing.db.dialect.has_type(testing.db, 'fourfivesixtype') finally: metadata.drop_all() assert not testing.db.dialect.has_type(testing.db, 'fourfivesixtype') def test_no_support(self): def server_version_info(self): return (8, 2) e = engines.testing_engine() dialect = e.dialect dialect._get_server_version_info = server_version_info assert dialect.supports_native_enum e.connect() assert not dialect.supports_native_enum # initialize is called again on new pool e.dispose() e.connect() assert not dialect.supports_native_enum def test_reflection(self): metadata = MetaData(testing.db) etype = Enum('four', 'five', 'six', name='fourfivesixtype', metadata=metadata) t1 = Table('table', metadata, Column('id', Integer, primary_key=True), Column('value', Enum('one', 'two' , 'three', name='onetwothreetype')), Column('value2' , etype)) metadata.create_all() try: m2 = MetaData(testing.db) t2 = Table('table', m2, autoload=True) assert t2.c.value.type.enums == ('one', 'two', 'three') assert t2.c.value.type.name == 'onetwothreetype' assert t2.c.value2.type.enums == ('four', 'five', 'six') assert t2.c.value2.type.name == 'fourfivesixtype' finally: metadata.drop_all() def test_schema_reflection(self): metadata = MetaData(testing.db) etype = Enum( 'four', 'five', 'six', name='fourfivesixtype', schema='test_schema', metadata=metadata, ) t1 = Table('table', metadata, Column('id', Integer, primary_key=True), Column('value', Enum('one', 'two' , 'three', name='onetwothreetype', schema='test_schema')), Column('value2', etype)) metadata.create_all() try: m2 = MetaData(testing.db) t2 = Table('table', m2, autoload=True) assert t2.c.value.type.enums == ('one', 'two', 'three') assert t2.c.value.type.name == 'onetwothreetype' assert t2.c.value2.type.enums == ('four', 'five', 'six') assert t2.c.value2.type.name == 'fourfivesixtype' assert t2.c.value2.type.schema == 'test_schema' finally: metadata.drop_all() class NumericInterpretationTest(fixtures.TestBase): __only_on__ = 'postgresql' def test_numeric_codes(self): from sqlalchemy.dialects.postgresql import pg8000, psycopg2, base for dialect in (pg8000.dialect(), psycopg2.dialect()): typ = Numeric().dialect_impl(dialect) for code in base._INT_TYPES + base._FLOAT_TYPES + \ base._DECIMAL_TYPES: proc = typ.result_processor(dialect, code) val = 23.7 if proc is not None: val = proc(val) assert val in (23.7, decimal.Decimal("23.7")) @testing.provide_metadata def test_numeric_default(self): metadata = self.metadata # pg8000 appears to fail when the value is 0, # returns an int instead of decimal. t =Table('t', metadata, Column('id', Integer, primary_key=True), Column('nd', Numeric(asdecimal=True), default=1), Column('nf', Numeric(asdecimal=False), default=1), Column('fd', Float(asdecimal=True), default=1), Column('ff', Float(asdecimal=False), default=1), ) metadata.create_all() r = t.insert().execute() row = t.select().execute().first() assert isinstance(row[1], decimal.Decimal) assert isinstance(row[2], float) assert isinstance(row[3], decimal.Decimal) assert isinstance(row[4], float) eq_( row, (1, decimal.Decimal("1"), 1, decimal.Decimal("1"), 1) ) class TimezoneTest(fixtures.TestBase): """Test timezone-aware datetimes. psycopg will return a datetime with a tzinfo attached to it, if postgresql returns it. python then will not let you compare a datetime with a tzinfo to a datetime that doesnt have one. this test illustrates two ways to have datetime types with and without timezone info. """ __only_on__ = 'postgresql' @classmethod def setup_class(cls): global tztable, notztable, metadata metadata = MetaData(testing.db) # current_timestamp() in postgresql is assumed to return # TIMESTAMP WITH TIMEZONE tztable = Table('tztable', metadata, Column('id', Integer, primary_key=True), Column('date', DateTime(timezone=True), onupdate=func.current_timestamp()), Column('name', String(20))) notztable = Table('notztable', metadata, Column('id', Integer, primary_key=True), Column('date', DateTime(timezone=False), onupdate=cast(func.current_timestamp(), DateTime(timezone=False))), Column('name', String(20))) metadata.create_all() @classmethod def teardown_class(cls): metadata.drop_all() @testing.fails_on('postgresql+zxjdbc', "XXX: postgresql+zxjdbc doesn't give a tzinfo back") def test_with_timezone(self): # get a date with a tzinfo somedate = \ testing.db.connect().scalar(func.current_timestamp().select()) assert somedate.tzinfo tztable.insert().execute(id=1, name='row1', date=somedate) row = select([tztable.c.date], tztable.c.id == 1).execute().first() eq_(row[0], somedate) eq_(somedate.tzinfo.utcoffset(somedate), row[0].tzinfo.utcoffset(row[0])) result = tztable.update(tztable.c.id == 1).returning(tztable.c.date).\ execute(name='newname' ) row = result.first() assert row[0] >= somedate def test_without_timezone(self): # get a date without a tzinfo somedate = datetime.datetime( 2005, 10, 20, 11, 52, 0, ) assert not somedate.tzinfo notztable.insert().execute(id=1, name='row1', date=somedate) row = select([notztable.c.date], notztable.c.id == 1).execute().first() eq_(row[0], somedate) eq_(row[0].tzinfo, None) result = notztable.update(notztable.c.id == 1).returning(notztable.c.date).\ execute(name='newname' ) row = result.first() assert row[0] >= somedate class TimePrecisionTest(fixtures.TestBase, AssertsCompiledSQL): __dialect__ = postgresql.dialect() def test_compile(self): for type_, expected in [ (postgresql.TIME(), 'TIME WITHOUT TIME ZONE'), (postgresql.TIME(precision=5), 'TIME(5) WITHOUT TIME ZONE' ), (postgresql.TIME(timezone=True, precision=5), 'TIME(5) WITH TIME ZONE'), (postgresql.TIMESTAMP(), 'TIMESTAMP WITHOUT TIME ZONE'), (postgresql.TIMESTAMP(precision=5), 'TIMESTAMP(5) WITHOUT TIME ZONE'), (postgresql.TIMESTAMP(timezone=True, precision=5), 'TIMESTAMP(5) WITH TIME ZONE'), ]: self.assert_compile(type_, expected) @testing.only_on('postgresql', 'DB specific feature') @testing.provide_metadata def test_reflection(self): metadata = self.metadata t1 = Table( 't1', metadata, Column('c1', postgresql.TIME()), Column('c2', postgresql.TIME(precision=5)), Column('c3', postgresql.TIME(timezone=True, precision=5)), Column('c4', postgresql.TIMESTAMP()), Column('c5', postgresql.TIMESTAMP(precision=5)), Column('c6', postgresql.TIMESTAMP(timezone=True, precision=5)), ) t1.create() m2 = MetaData(testing.db) t2 = Table('t1', m2, autoload=True) eq_(t2.c.c1.type.precision, None) eq_(t2.c.c2.type.precision, 5) eq_(t2.c.c3.type.precision, 5) eq_(t2.c.c4.type.precision, None) eq_(t2.c.c5.type.precision, 5) eq_(t2.c.c6.type.precision, 5) eq_(t2.c.c1.type.timezone, False) eq_(t2.c.c2.type.timezone, False) eq_(t2.c.c3.type.timezone, True) eq_(t2.c.c4.type.timezone, False) eq_(t2.c.c5.type.timezone, False) eq_(t2.c.c6.type.timezone, True) class ArrayTest(fixtures.TablesTest, AssertsExecutionResults): __only_on__ = 'postgresql' __unsupported_on__ = 'postgresql+pg8000', 'postgresql+zxjdbc' @classmethod def define_tables(cls, metadata): class ProcValue(TypeDecorator): impl = postgresql.ARRAY(Integer, dimensions=2) def process_bind_param(self, value, dialect): if value is None: return None return [ [x + 5 for x in v] for v in value ] def process_result_value(self, value, dialect): if value is None: return None return [ [x - 7 for x in v] for v in value ] Table('arrtable', metadata, Column('id', Integer, primary_key=True), Column('intarr', postgresql.ARRAY(Integer)), Column('strarr', postgresql.ARRAY(Unicode())), Column('dimarr', ProcValue) ) Table('dim_arrtable', metadata, Column('id', Integer, primary_key=True), Column('intarr', postgresql.ARRAY(Integer, dimensions=1)), Column('strarr', postgresql.ARRAY(Unicode(), dimensions=1)), Column('dimarr', ProcValue) ) def _fixture_456(self, table): testing.db.execute( table.insert(), intarr=[4, 5, 6] ) def test_reflect_array_column(self): metadata2 = MetaData(testing.db) tbl = Table('arrtable', metadata2, autoload=True) assert isinstance(tbl.c.intarr.type, postgresql.ARRAY) assert isinstance(tbl.c.strarr.type, postgresql.ARRAY) assert isinstance(tbl.c.intarr.type.item_type, Integer) assert isinstance(tbl.c.strarr.type.item_type, String) def test_insert_array(self): arrtable = self.tables.arrtable arrtable.insert().execute(intarr=[1, 2, 3], strarr=[util.u('abc'), util.u('def')]) results = arrtable.select().execute().fetchall() eq_(len(results), 1) eq_(results[0]['intarr'], [1, 2, 3]) eq_(results[0]['strarr'], [util.u('abc'), util.u('def')]) def test_array_where(self): arrtable = self.tables.arrtable arrtable.insert().execute(intarr=[1, 2, 3], strarr=[util.u('abc'), util.u('def')]) arrtable.insert().execute(intarr=[4, 5, 6], strarr=util.u('ABC')) results = arrtable.select().where(arrtable.c.intarr == [1, 2, 3]).execute().fetchall() eq_(len(results), 1) eq_(results[0]['intarr'], [1, 2, 3]) def test_array_concat(self): arrtable = self.tables.arrtable arrtable.insert().execute(intarr=[1, 2, 3], strarr=[util.u('abc'), util.u('def')]) results = select([arrtable.c.intarr + [4, 5, 6]]).execute().fetchall() eq_(len(results), 1) eq_(results[0][0], [ 1, 2, 3, 4, 5, 6, ]) def test_array_subtype_resultprocessor(self): arrtable = self.tables.arrtable arrtable.insert().execute(intarr=[4, 5, 6], strarr=[[util.ue('m\xe4\xe4')], [ util.ue('m\xf6\xf6')]]) arrtable.insert().execute(intarr=[1, 2, 3], strarr=[ util.ue('m\xe4\xe4'), util.ue('m\xf6\xf6')]) results = \ arrtable.select(order_by=[arrtable.c.intarr]).execute().fetchall() eq_(len(results), 2) eq_(results[0]['strarr'], [util.ue('m\xe4\xe4'), util.ue('m\xf6\xf6')]) eq_(results[1]['strarr'], [[util.ue('m\xe4\xe4')], [util.ue('m\xf6\xf6')]]) def test_array_literal(self): eq_( testing.db.scalar( select([ postgresql.array([1, 2]) + postgresql.array([3, 4, 5]) ]) ), [1,2,3,4,5] ) def test_array_getitem_single_type(self): arrtable = self.tables.arrtable is_(arrtable.c.intarr[1].type._type_affinity, Integer) is_(arrtable.c.strarr[1].type._type_affinity, String) def test_array_getitem_slice_type(self): arrtable = self.tables.arrtable is_(arrtable.c.intarr[1:3].type._type_affinity, postgresql.ARRAY) is_(arrtable.c.strarr[1:3].type._type_affinity, postgresql.ARRAY) def test_array_getitem_single_exec(self): arrtable = self.tables.arrtable self._fixture_456(arrtable) eq_( testing.db.scalar(select([arrtable.c.intarr[2]])), 5 ) testing.db.execute( arrtable.update().values({arrtable.c.intarr[2]: 7}) ) eq_( testing.db.scalar(select([arrtable.c.intarr[2]])), 7 ) def test_undim_array_empty(self): arrtable = self.tables.arrtable self._fixture_456(arrtable) eq_( testing.db.scalar( select([arrtable.c.intarr]). where(arrtable.c.intarr.contains([])) ), [4, 5, 6] ) def test_array_getitem_slice_exec(self): arrtable = self.tables.arrtable testing.db.execute( arrtable.insert(), intarr=[4, 5, 6], strarr=[util.u('abc'), util.u('def')] ) eq_( testing.db.scalar(select([arrtable.c.intarr[2:3]])), [5, 6] ) testing.db.execute( arrtable.update().values({arrtable.c.intarr[2:3]: [7, 8]}) ) eq_( testing.db.scalar(select([arrtable.c.intarr[2:3]])), [7, 8] ) def _test_undim_array_contains_typed_exec(self, struct): arrtable = self.tables.arrtable self._fixture_456(arrtable) eq_( testing.db.scalar( select([arrtable.c.intarr]). where(arrtable.c.intarr.contains(struct([4, 5]))) ), [4, 5, 6] ) def test_undim_array_contains_set_exec(self): self._test_undim_array_contains_typed_exec(set) def test_undim_array_contains_list_exec(self): self._test_undim_array_contains_typed_exec(list) def test_undim_array_contains_generator_exec(self): self._test_undim_array_contains_typed_exec( lambda elem: (x for x in elem)) def _test_dim_array_contains_typed_exec(self, struct): dim_arrtable = self.tables.dim_arrtable self._fixture_456(dim_arrtable) eq_( testing.db.scalar( select([dim_arrtable.c.intarr]). where(dim_arrtable.c.intarr.contains(struct([4, 5]))) ), [4, 5, 6] ) def test_dim_array_contains_set_exec(self): self._test_dim_array_contains_typed_exec(set) def test_dim_array_contains_list_exec(self): self._test_dim_array_contains_typed_exec(list) def test_dim_array_contains_generator_exec(self): self._test_dim_array_contains_typed_exec(lambda elem: (x for x in elem)) def test_array_contained_by_exec(self): arrtable = self.tables.arrtable with testing.db.connect() as conn: conn.execute( arrtable.insert(), intarr=[6, 5, 4] ) eq_( conn.scalar( select([arrtable.c.intarr.contained_by([4, 5, 6, 7])]) ), True ) def test_array_overlap_exec(self): arrtable = self.tables.arrtable with testing.db.connect() as conn: conn.execute( arrtable.insert(), intarr=[4, 5, 6] ) eq_( conn.scalar( select([arrtable.c.intarr]). where(arrtable.c.intarr.overlap([7, 6])) ), [4, 5, 6] ) def test_array_any_exec(self): arrtable = self.tables.arrtable with testing.db.connect() as conn: conn.execute( arrtable.insert(), intarr=[4, 5, 6] ) eq_( conn.scalar( select([arrtable.c.intarr]). where(postgresql.Any(5, arrtable.c.intarr)) ), [4, 5, 6] ) def test_array_all_exec(self): arrtable = self.tables.arrtable with testing.db.connect() as conn: conn.execute( arrtable.insert(), intarr=[4, 5, 6] ) eq_( conn.scalar( select([arrtable.c.intarr]). where(arrtable.c.intarr.all(4, operator=operators.le)) ), [4, 5, 6] ) @testing.provide_metadata def test_tuple_flag(self): metadata = self.metadata t1 = Table('t1', metadata, Column('id', Integer, primary_key=True), Column('data', postgresql.ARRAY(String(5), as_tuple=True)), Column('data2', postgresql.ARRAY(Numeric(asdecimal=False), as_tuple=True)), ) metadata.create_all() testing.db.execute(t1.insert(), id=1, data=["1","2","3"], data2=[5.4, 5.6]) testing.db.execute(t1.insert(), id=2, data=["4", "5", "6"], data2=[1.0]) testing.db.execute(t1.insert(), id=3, data=[["4", "5"], ["6", "7"]], data2=[[5.4, 5.6], [1.0, 1.1]]) r = testing.db.execute(t1.select().order_by(t1.c.id)).fetchall() eq_( r, [ (1, ('1', '2', '3'), (5.4, 5.6)), (2, ('4', '5', '6'), (1.0,)), (3, (('4', '5'), ('6', '7')), ((5.4, 5.6), (1.0, 1.1))) ] ) # hashable eq_( set(row[1] for row in r), set([('1', '2', '3'), ('4', '5', '6'), (('4', '5'), ('6', '7'))]) ) def test_dimension(self): arrtable = self.tables.arrtable testing.db.execute(arrtable.insert(), dimarr=[[1, 2, 3], [4,5, 6]]) eq_( testing.db.scalar(select([arrtable.c.dimarr])), [[-1, 0, 1], [2, 3, 4]] ) class TimestampTest(fixtures.TestBase, AssertsExecutionResults): __only_on__ = 'postgresql' def test_timestamp(self): engine = testing.db connection = engine.connect() s = select(["timestamp '2007-12-25'"]) result = connection.execute(s).first() eq_(result[0], datetime.datetime(2007, 12, 25, 0, 0)) class SpecialTypesTest(fixtures.TestBase, ComparesTables, AssertsCompiledSQL): """test DDL and reflection of PG-specific types """ __only_on__ = 'postgresql' __excluded_on__ = (('postgresql', '<', (8, 3, 0)),) @classmethod def setup_class(cls): global metadata, table metadata = MetaData(testing.db) # create these types so that we can issue # special SQL92 INTERVAL syntax class y2m(types.UserDefinedType, postgresql.INTERVAL): def get_col_spec(self): return "INTERVAL YEAR TO MONTH" class d2s(types.UserDefinedType, postgresql.INTERVAL): def get_col_spec(self): return "INTERVAL DAY TO SECOND" table = Table('sometable', metadata, Column('id', postgresql.UUID, primary_key=True), Column('flag', postgresql.BIT), Column('bitstring', postgresql.BIT(4)), Column('addr', postgresql.INET), Column('addr2', postgresql.MACADDR), Column('addr3', postgresql.CIDR), Column('doubleprec', postgresql.DOUBLE_PRECISION), Column('plain_interval', postgresql.INTERVAL), Column('year_interval', y2m()), Column('month_interval', d2s()), Column('precision_interval', postgresql.INTERVAL(precision=3)) ) metadata.create_all() # cheat so that the "strict type check" # works table.c.year_interval.type = postgresql.INTERVAL() table.c.month_interval.type = postgresql.INTERVAL() @classmethod def teardown_class(cls): metadata.drop_all() def test_reflection(self): m = MetaData(testing.db) t = Table('sometable', m, autoload=True) self.assert_tables_equal(table, t, strict_types=True) assert t.c.plain_interval.type.precision is None assert t.c.precision_interval.type.precision == 3 assert t.c.bitstring.type.length == 4 def test_bit_compile(self): pairs = [(postgresql.BIT(), 'BIT(1)'), (postgresql.BIT(5), 'BIT(5)'), (postgresql.BIT(varying=True), 'BIT VARYING'), (postgresql.BIT(5, varying=True), 'BIT VARYING(5)'), ] for type_, expected in pairs: self.assert_compile(type_, expected) @testing.provide_metadata def test_bit_reflection(self): metadata = self.metadata t1 = Table('t1', metadata, Column('bit1', postgresql.BIT()), Column('bit5', postgresql.BIT(5)), Column('bitvarying', postgresql.BIT(varying=True)), Column('bitvarying5', postgresql.BIT(5, varying=True)), ) t1.create() m2 = MetaData(testing.db) t2 = Table('t1', m2, autoload=True) eq_(t2.c.bit1.type.length, 1) eq_(t2.c.bit1.type.varying, False) eq_(t2.c.bit5.type.length, 5) eq_(t2.c.bit5.type.varying, False) eq_(t2.c.bitvarying.type.length, None) eq_(t2.c.bitvarying.type.varying, True) eq_(t2.c.bitvarying5.type.length, 5) eq_(t2.c.bitvarying5.type.varying, True) class UUIDTest(fixtures.TestBase): """Test the bind/return values of the UUID type.""" __only_on__ = 'postgresql' @testing.requires.python25 @testing.fails_on('postgresql+zxjdbc', 'column "data" is of type uuid but expression is of type character varying') @testing.fails_on('postgresql+pg8000', 'No support for UUID type') def test_uuid_string(self): import uuid self._test_round_trip( Table('utable', MetaData(), Column('data', postgresql.UUID()) ), str(uuid.uuid4()), str(uuid.uuid4()) ) @testing.requires.python25 @testing.fails_on('postgresql+zxjdbc', 'column "data" is of type uuid but expression is of type character varying') @testing.fails_on('postgresql+pg8000', 'No support for UUID type') def test_uuid_uuid(self): import uuid self._test_round_trip( Table('utable', MetaData(), Column('data', postgresql.UUID(as_uuid=True)) ), uuid.uuid4(), uuid.uuid4() ) def test_no_uuid_available(self): from sqlalchemy.dialects.postgresql import base uuid_type = base._python_UUID base._python_UUID = None try: assert_raises( NotImplementedError, postgresql.UUID, as_uuid=True ) finally: base._python_UUID = uuid_type def setup(self): self.conn = testing.db.connect() trans = self.conn.begin() def teardown(self): self.conn.close() def _test_round_trip(self, utable, value1, value2): utable.create(self.conn) self.conn.execute(utable.insert(), {'data':value1}) self.conn.execute(utable.insert(), {'data':value2}) r = self.conn.execute( select([utable.c.data]). where(utable.c.data != value1) ) eq_(r.fetchone()[0], value2) eq_(r.fetchone(), None) class HStoreTest(fixtures.TestBase): def _assert_sql(self, construct, expected): dialect = postgresql.dialect() compiled = str(construct.compile(dialect=dialect)) compiled = re.sub(r'\s+', ' ', compiled) expected = re.sub(r'\s+', ' ', expected) eq_(compiled, expected) def setup(self): metadata = MetaData() self.test_table = Table('test_table', metadata, Column('id', Integer, primary_key=True), Column('hash', HSTORE) ) self.hashcol = self.test_table.c.hash def _test_where(self, whereclause, expected): stmt = select([self.test_table]).where(whereclause) self._assert_sql( stmt, "SELECT test_table.id, test_table.hash FROM test_table " "WHERE %s" % expected ) def _test_cols(self, colclause, expected, from_=True): stmt = select([colclause]) self._assert_sql( stmt, ( "SELECT %s" + (" FROM test_table" if from_ else "") ) % expected ) def test_bind_serialize_default(self): from sqlalchemy.engine import default dialect = default.DefaultDialect() proc = self.test_table.c.hash.type._cached_bind_processor(dialect) eq_( proc(util.OrderedDict([("key1", "value1"), ("key2", "value2")])), '"key1"=>"value1", "key2"=>"value2"' ) def test_bind_serialize_with_slashes_and_quotes(self): from sqlalchemy.engine import default dialect = default.DefaultDialect() proc = self.test_table.c.hash.type._cached_bind_processor(dialect) eq_( proc({'\\"a': '\\"1'}), '"\\\\\\"a"=>"\\\\\\"1"' ) def test_parse_error(self): from sqlalchemy.engine import default dialect = default.DefaultDialect() proc = self.test_table.c.hash.type._cached_result_processor( dialect, None) assert_raises_message( ValueError, r'''After u?'\[\.\.\.\], "key1"=>"value1", ', could not parse ''' '''residual at position 36: u?'crapcrapcrap, "key3"\[\.\.\.\]''', proc, '"key2"=>"value2", "key1"=>"value1", ' 'crapcrapcrap, "key3"=>"value3"' ) def test_result_deserialize_default(self): from sqlalchemy.engine import default dialect = default.DefaultDialect() proc = self.test_table.c.hash.type._cached_result_processor( dialect, None) eq_( proc('"key2"=>"value2", "key1"=>"value1"'), {"key1": "value1", "key2": "value2"} ) def test_result_deserialize_with_slashes_and_quotes(self): from sqlalchemy.engine import default dialect = default.DefaultDialect() proc = self.test_table.c.hash.type._cached_result_processor( dialect, None) eq_( proc('"\\\\\\"a"=>"\\\\\\"1"'), {'\\"a': '\\"1'} ) def test_bind_serialize_psycopg2(self): from sqlalchemy.dialects.postgresql import psycopg2 dialect = psycopg2.PGDialect_psycopg2() dialect._has_native_hstore = True proc = self.test_table.c.hash.type._cached_bind_processor(dialect) is_(proc, None) dialect = psycopg2.PGDialect_psycopg2() dialect._has_native_hstore = False proc = self.test_table.c.hash.type._cached_bind_processor(dialect) eq_( proc(util.OrderedDict([("key1", "value1"), ("key2", "value2")])), '"key1"=>"value1", "key2"=>"value2"' ) def test_result_deserialize_psycopg2(self): from sqlalchemy.dialects.postgresql import psycopg2 dialect = psycopg2.PGDialect_psycopg2() dialect._has_native_hstore = True proc = self.test_table.c.hash.type._cached_result_processor( dialect, None) is_(proc, None) dialect = psycopg2.PGDialect_psycopg2() dialect._has_native_hstore = False proc = self.test_table.c.hash.type._cached_result_processor( dialect, None) eq_( proc('"key2"=>"value2", "key1"=>"value1"'), {"key1": "value1", "key2": "value2"} ) def test_where_has_key(self): self._test_where( # hide from 2to3 getattr(self.hashcol, 'has_key')('foo'), "test_table.hash ? %(hash_1)s" ) def test_where_has_all(self): self._test_where( self.hashcol.has_all(postgresql.array(['1', '2'])), "test_table.hash ?& ARRAY[%(param_1)s, %(param_2)s]" ) def test_where_has_any(self): self._test_where( self.hashcol.has_any(postgresql.array(['1', '2'])), "test_table.hash ?| ARRAY[%(param_1)s, %(param_2)s]" ) def test_where_defined(self): self._test_where( self.hashcol.defined('foo'), "defined(test_table.hash, %(param_1)s)" ) def test_where_contains(self): self._test_where( self.hashcol.contains({'foo': '1'}), "test_table.hash @> %(hash_1)s" ) def test_where_contained_by(self): self._test_where( self.hashcol.contained_by({'foo': '1', 'bar': None}), "test_table.hash <@ %(hash_1)s" ) def test_where_getitem(self): self._test_where( self.hashcol['bar'] == None, "(test_table.hash -> %(hash_1)s) IS NULL" ) def test_cols_get(self): self._test_cols( self.hashcol['foo'], "test_table.hash -> %(hash_1)s AS anon_1", True ) def test_cols_delete_single_key(self): self._test_cols( self.hashcol.delete('foo'), "delete(test_table.hash, %(param_1)s) AS delete_1", True ) def test_cols_delete_array_of_keys(self): self._test_cols( self.hashcol.delete(postgresql.array(['foo', 'bar'])), ("delete(test_table.hash, ARRAY[%(param_1)s, %(param_2)s]) " "AS delete_1"), True ) def test_cols_delete_matching_pairs(self): self._test_cols( self.hashcol.delete(hstore('1', '2')), ("delete(test_table.hash, hstore(%(param_1)s, %(param_2)s)) " "AS delete_1"), True ) def test_cols_slice(self): self._test_cols( self.hashcol.slice(postgresql.array(['1', '2'])), ("slice(test_table.hash, ARRAY[%(param_1)s, %(param_2)s]) " "AS slice_1"), True ) def test_cols_hstore_pair_text(self): self._test_cols( hstore('foo', '3')['foo'], "hstore(%(param_1)s, %(param_2)s) -> %(hstore_1)s AS anon_1", False ) def test_cols_hstore_pair_array(self): self._test_cols( hstore(postgresql.array(['1', '2']), postgresql.array(['3', None]))['1'], ("hstore(ARRAY[%(param_1)s, %(param_2)s], " "ARRAY[%(param_3)s, NULL]) -> %(hstore_1)s AS anon_1"), False ) def test_cols_hstore_single_array(self): self._test_cols( hstore(postgresql.array(['1', '2', '3', None]))['3'], ("hstore(ARRAY[%(param_1)s, %(param_2)s, %(param_3)s, NULL]) " "-> %(hstore_1)s AS anon_1"), False ) def test_cols_concat(self): self._test_cols( self.hashcol.concat(hstore(cast(self.test_table.c.id, Text), '3')), ("test_table.hash || hstore(CAST(test_table.id AS TEXT), " "%(param_1)s) AS anon_1"), True ) def test_cols_concat_op(self): self._test_cols( hstore('foo', 'bar') + self.hashcol, "hstore(%(param_1)s, %(param_2)s) || test_table.hash AS anon_1", True ) def test_cols_concat_get(self): self._test_cols( (self.hashcol + self.hashcol)['foo'], "test_table.hash || test_table.hash -> %(param_1)s AS anon_1" ) def test_cols_keys(self): self._test_cols( # hide from 2to3 getattr(self.hashcol, 'keys')(), "akeys(test_table.hash) AS akeys_1", True ) def test_cols_vals(self): self._test_cols( self.hashcol.vals(), "avals(test_table.hash) AS avals_1", True ) def test_cols_array(self): self._test_cols( self.hashcol.array(), "hstore_to_array(test_table.hash) AS hstore_to_array_1", True ) def test_cols_matrix(self): self._test_cols( self.hashcol.matrix(), "hstore_to_matrix(test_table.hash) AS hstore_to_matrix_1", True ) class HStoreRoundTripTest(fixtures.TablesTest): __requires__ = 'hstore', __dialect__ = 'postgresql' @classmethod def define_tables(cls, metadata): Table('data_table', metadata, Column('id', Integer, primary_key=True), Column('name', String(30), nullable=False), Column('data', HSTORE) ) def _fixture_data(self, engine): data_table = self.tables.data_table engine.execute( data_table.insert(), {'name': 'r1', 'data': {"k1": "r1v1", "k2": "r1v2"}}, {'name': 'r2', 'data': {"k1": "r2v1", "k2": "r2v2"}}, {'name': 'r3', 'data': {"k1": "r3v1", "k2": "r3v2"}}, {'name': 'r4', 'data': {"k1": "r4v1", "k2": "r4v2"}}, {'name': 'r5', 'data': {"k1": "r5v1", "k2": "r5v2"}}, ) def _assert_data(self, compare): data = testing.db.execute( select([self.tables.data_table.c.data]). order_by(self.tables.data_table.c.name) ).fetchall() eq_([d for d, in data], compare) def _test_insert(self, engine): engine.execute( self.tables.data_table.insert(), {'name': 'r1', 'data': {"k1": "r1v1", "k2": "r1v2"}} ) self._assert_data([{"k1": "r1v1", "k2": "r1v2"}]) def _non_native_engine(self): if testing.against("postgresql+psycopg2"): engine = engines.testing_engine(options=dict(use_native_hstore=False)) else: engine = testing.db engine.connect() return engine def test_reflect(self): from sqlalchemy import inspect insp = inspect(testing.db) cols = insp.get_columns('data_table') assert isinstance(cols[2]['type'], HSTORE) @testing.only_on("postgresql+psycopg2") def test_insert_native(self): engine = testing.db self._test_insert(engine) def test_insert_python(self): engine = self._non_native_engine() self._test_insert(engine) @testing.only_on("postgresql+psycopg2") def test_criterion_native(self): engine = testing.db self._fixture_data(engine) self._test_criterion(engine) def test_criterion_python(self): engine = self._non_native_engine() self._fixture_data(engine) self._test_criterion(engine) def _test_criterion(self, engine): data_table = self.tables.data_table result = engine.execute( select([data_table.c.data]).where(data_table.c.data['k1'] == 'r3v1') ).first() eq_(result, ({'k1': 'r3v1', 'k2': 'r3v2'},)) def _test_fixed_round_trip(self, engine): s = select([ hstore( array(['key1', 'key2', 'key3']), array(['value1', 'value2', 'value3']) ) ]) eq_( engine.scalar(s), {"key1": "value1", "key2": "value2", "key3": "value3"} ) def test_fixed_round_trip_python(self): engine = self._non_native_engine() self._test_fixed_round_trip(engine) @testing.only_on("postgresql+psycopg2") def test_fixed_round_trip_native(self): engine = testing.db self._test_fixed_round_trip(engine) def _test_unicode_round_trip(self, engine): s = select([ hstore( array([util.u('réveillé'), util.u('drôle'), util.u('S’il')]), array([util.u('réveillé'), util.u('drôle'), util.u('S’il')]) ) ]) eq_( engine.scalar(s), { util.u('réveillé'): util.u('réveillé'), util.u('drôle'): util.u('drôle'), util.u('S’il'): util.u('S’il') } ) def test_unicode_round_trip_python(self): engine = self._non_native_engine() self._test_unicode_round_trip(engine) @testing.only_on("postgresql+psycopg2") def test_unicode_round_trip_native(self): engine = testing.db self._test_unicode_round_trip(engine) def test_escaped_quotes_round_trip_python(self): engine = self._non_native_engine() self._test_escaped_quotes_round_trip(engine) @testing.only_on("postgresql+psycopg2") def test_escaped_quotes_round_trip_native(self): engine = testing.db self._test_escaped_quotes_round_trip(engine) def _test_escaped_quotes_round_trip(self, engine): engine.execute( self.tables.data_table.insert(), {'name': 'r1', 'data': {r'key \"foo\"': r'value \"bar"\ xyz'}} ) self._assert_data([{r'key \"foo\"': r'value \"bar"\ xyz'}]) class _RangeTypeMixin(object): __requires__ = 'range_types', __dialect__ = 'postgresql+psycopg2' @property def extras(self): # done this way so we don't get ImportErrors with # older psycopg2 versions. from psycopg2 import extras return extras @classmethod def define_tables(cls, metadata): # no reason ranges shouldn't be primary keys, # so lets just use them as such table = Table('data_table', metadata, Column('range', cls._col_type, primary_key=True), ) cls.col = table.c.range def test_actual_type(self): eq_(str(self._col_type()), self._col_str) def test_reflect(self): from sqlalchemy import inspect insp = inspect(testing.db) cols = insp.get_columns('data_table') assert isinstance(cols[0]['type'], self._col_type) def _assert_data(self): data = testing.db.execute( select([self.tables.data_table.c.range]) ).fetchall() eq_(data, [(self._data_obj(), )]) def test_insert_obj(self): testing.db.engine.execute( self.tables.data_table.insert(), {'range': self._data_obj()} ) self._assert_data() def test_insert_text(self): testing.db.engine.execute( self.tables.data_table.insert(), {'range': self._data_str} ) self._assert_data() # operator tests def _test_clause(self, colclause, expected): dialect = postgresql.dialect() compiled = str(colclause.compile(dialect=dialect)) eq_(compiled, expected) def test_where_equal(self): self._test_clause( self.col==self._data_str, "data_table.range = %(range_1)s" ) def test_where_not_equal(self): self._test_clause( self.col!=self._data_str, "data_table.range <> %(range_1)s" ) def test_where_less_than(self): self._test_clause( self.col < self._data_str, "data_table.range < %(range_1)s" ) def test_where_greater_than(self): self._test_clause( self.col > self._data_str, "data_table.range > %(range_1)s" ) def test_where_less_than_or_equal(self): self._test_clause( self.col <= self._data_str, "data_table.range <= %(range_1)s" ) def test_where_greater_than_or_equal(self): self._test_clause( self.col >= self._data_str, "data_table.range >= %(range_1)s" ) def test_contains(self): self._test_clause( self.col.contains(self._data_str), "data_table.range @> %(range_1)s" ) def test_contained_by(self): self._test_clause( self.col.contained_by(self._data_str), "data_table.range <@ %(range_1)s" ) def test_overlaps(self): self._test_clause( self.col.overlaps(self._data_str), "data_table.range && %(range_1)s" ) def test_strictly_left_of(self): self._test_clause( self.col << self._data_str, "data_table.range << %(range_1)s" ) self._test_clause( self.col.strictly_left_of(self._data_str), "data_table.range << %(range_1)s" ) def test_strictly_right_of(self): self._test_clause( self.col >> self._data_str, "data_table.range >> %(range_1)s" ) self._test_clause( self.col.strictly_right_of(self._data_str), "data_table.range >> %(range_1)s" ) def test_not_extend_right_of(self): self._test_clause( self.col.not_extend_right_of(self._data_str), "data_table.range &< %(range_1)s" ) def test_not_extend_left_of(self): self._test_clause( self.col.not_extend_left_of(self._data_str), "data_table.range &> %(range_1)s" ) def test_adjacent_to(self): self._test_clause( self.col.adjacent_to(self._data_str), "data_table.range -|- %(range_1)s" ) def test_union(self): self._test_clause( self.col + self.col, "data_table.range + data_table.range" ) def test_union_result(self): # insert testing.db.engine.execute( self.tables.data_table.insert(), {'range': self._data_str} ) # select range = self.tables.data_table.c.range data = testing.db.execute( select([range + range]) ).fetchall() eq_(data, [(self._data_obj(), )]) def test_intersection(self): self._test_clause( self.col * self.col, "data_table.range * data_table.range" ) def test_intersection_result(self): # insert testing.db.engine.execute( self.tables.data_table.insert(), {'range': self._data_str} ) # select range = self.tables.data_table.c.range data = testing.db.execute( select([range * range]) ).fetchall() eq_(data, [(self._data_obj(), )]) def test_different(self): self._test_clause( self.col - self.col, "data_table.range - data_table.range" ) def test_difference_result(self): # insert testing.db.engine.execute( self.tables.data_table.insert(), {'range': self._data_str} ) # select range = self.tables.data_table.c.range data = testing.db.execute( select([range - range]) ).fetchall() eq_(data, [(self._data_obj().__class__(empty=True), )]) class Int4RangeTests(_RangeTypeMixin, fixtures.TablesTest): _col_type = INT4RANGE _col_str = 'INT4RANGE' _data_str = '[1,2)' def _data_obj(self): return self.extras.NumericRange(1, 2) class Int8RangeTests(_RangeTypeMixin, fixtures.TablesTest): _col_type = INT8RANGE _col_str = 'INT8RANGE' _data_str = '[9223372036854775806,9223372036854775807)' def _data_obj(self): return self.extras.NumericRange( 9223372036854775806, 9223372036854775807 ) class NumRangeTests(_RangeTypeMixin, fixtures.TablesTest): _col_type = NUMRANGE _col_str = 'NUMRANGE' _data_str = '[1.0,2.0)' def _data_obj(self): return self.extras.NumericRange( decimal.Decimal('1.0'), decimal.Decimal('2.0') ) class DateRangeTests(_RangeTypeMixin, fixtures.TablesTest): _col_type = DATERANGE _col_str = 'DATERANGE' _data_str = '[2013-03-23,2013-03-24)' def _data_obj(self): return self.extras.DateRange( datetime.date(2013, 3, 23), datetime.date(2013, 3, 24) ) class DateTimeRangeTests(_RangeTypeMixin, fixtures.TablesTest): _col_type = TSRANGE _col_str = 'TSRANGE' _data_str = '[2013-03-23 14:30,2013-03-23 23:30)' def _data_obj(self): return self.extras.DateTimeRange( datetime.datetime(2013, 3, 23, 14, 30), datetime.datetime(2013, 3, 23, 23, 30) ) class DateTimeTZRangeTests(_RangeTypeMixin, fixtures.TablesTest): _col_type = TSTZRANGE _col_str = 'TSTZRANGE' # make sure we use one, steady timestamp with timezone pair # for all parts of all these tests _tstzs = None def tstzs(self): if self._tstzs is None: lower = testing.db.connect().scalar( func.current_timestamp().select() ) upper = lower+datetime.timedelta(1) self._tstzs = (lower, upper) return self._tstzs @property def _data_str(self): return '[%s,%s)' % self.tstzs() def _data_obj(self): return self.extras.DateTimeTZRange(*self.tstzs()) SQLAlchemy-0.8.4/test/dialect/test_firebird.py0000644000076500000240000004406112251150015022011 0ustar classicstaff00000000000000from sqlalchemy.testing import eq_, assert_raises_message from sqlalchemy import exc from sqlalchemy.databases import firebird from sqlalchemy.exc import ProgrammingError from sqlalchemy.sql import table, column from sqlalchemy import types as sqltypes from sqlalchemy.testing import fixtures, AssertsExecutionResults, AssertsCompiledSQL from sqlalchemy import testing from sqlalchemy.testing import engines from sqlalchemy import String, VARCHAR, NVARCHAR, Unicode, Integer,\ func, insert, update, MetaData, select, Table, Column, text,\ Sequence, Float from sqlalchemy import schema class DomainReflectionTest(fixtures.TestBase, AssertsExecutionResults): "Test Firebird domains" __only_on__ = 'firebird' @classmethod def setup_class(cls): con = testing.db.connect() try: con.execute('CREATE DOMAIN int_domain AS INTEGER DEFAULT ' '42 NOT NULL') con.execute('CREATE DOMAIN str_domain AS VARCHAR(255)') con.execute('CREATE DOMAIN rem_domain AS BLOB SUB_TYPE TEXT' ) con.execute('CREATE DOMAIN img_domain AS BLOB SUB_TYPE ' 'BINARY') except ProgrammingError, e: if not 'attempt to store duplicate value' in str(e): raise e con.execute('''CREATE GENERATOR gen_testtable_id''') con.execute('''CREATE TABLE testtable (question int_domain, answer str_domain DEFAULT 'no answer', remark rem_domain DEFAULT '', photo img_domain, d date, t time, dt timestamp, redundant str_domain DEFAULT NULL)''') con.execute("ALTER TABLE testtable " "ADD CONSTRAINT testtable_pk PRIMARY KEY " "(question)") con.execute("CREATE TRIGGER testtable_autoid FOR testtable " " ACTIVE BEFORE INSERT AS" " BEGIN" " IF (NEW.question IS NULL) THEN" " NEW.question = gen_id(gen_testtable_id, 1);" " END") @classmethod def teardown_class(cls): con = testing.db.connect() con.execute('DROP TABLE testtable') con.execute('DROP DOMAIN int_domain') con.execute('DROP DOMAIN str_domain') con.execute('DROP DOMAIN rem_domain') con.execute('DROP DOMAIN img_domain') con.execute('DROP GENERATOR gen_testtable_id') def test_table_is_reflected(self): from sqlalchemy.types import Integer, Text, BLOB, String, Date, \ Time, DateTime metadata = MetaData(testing.db) table = Table('testtable', metadata, autoload=True) eq_(set(table.columns.keys()), set([ 'question', 'answer', 'remark', 'photo', 'd', 't', 'dt', 'redundant', ]), "Columns of reflected table didn't equal expected " "columns") eq_(table.c.question.primary_key, True) # disabled per http://www.sqlalchemy.org/trac/ticket/1660 # eq_(table.c.question.sequence.name, 'gen_testtable_id') assert isinstance(table.c.question.type, Integer) eq_(table.c.question.server_default.arg.text, '42') assert isinstance(table.c.answer.type, String) assert table.c.answer.type.length == 255 eq_(table.c.answer.server_default.arg.text, "'no answer'") assert isinstance(table.c.remark.type, Text) eq_(table.c.remark.server_default.arg.text, "''") assert isinstance(table.c.photo.type, BLOB) assert table.c.redundant.server_default is None # The following assume a Dialect 3 database assert isinstance(table.c.d.type, Date) assert isinstance(table.c.t.type, Time) assert isinstance(table.c.dt.type, DateTime) class BuggyDomainReflectionTest(fixtures.TestBase, AssertsExecutionResults): """Test Firebird domains (and some other reflection bumps), see [ticket:1663] and http://tracker.firebirdsql.org/browse/CORE-356""" __only_on__ = 'firebird' # NB: spacing and newlines are *significant* here! # PS: this test is superfluous on recent FB, where the issue 356 is probably fixed... AUTOINC_DM = """\ CREATE DOMAIN AUTOINC_DM AS NUMERIC(18,0) """ MONEY_DM = """\ CREATE DOMAIN MONEY_DM AS NUMERIC(15,2) DEFAULT 0 CHECK (VALUE BETWEEN - 9999999999999.99 AND +9999999999999.99) """ NOSI_DM = """\ CREATE DOMAIN NOSI_DM AS CHAR(1) DEFAULT 'N' NOT NULL CHECK (VALUE IN ('S', 'N')) """ RIT_TESORERIA_CAPITOLO_DM = """\ CREATE DOMAIN RIT_TESORERIA_CAPITOLO_DM AS VARCHAR(6) CHECK ((VALUE IS NULL) OR (VALUE = UPPER(VALUE))) """ DEF_ERROR_TB = """\ CREATE TABLE DEF_ERROR ( RITENUTAMOV_ID AUTOINC_DM NOT NULL, RITENUTA MONEY_DM, INTERESSI MONEY_DM DEFAULT 0, STAMPATO_MODULO NOSI_DM DEFAULT 'S', TESORERIA_CAPITOLO RIT_TESORERIA_CAPITOLO_DM) """ DEF_ERROR_NODOM_TB = """\ CREATE TABLE DEF_ERROR_NODOM ( RITENUTAMOV_ID INTEGER NOT NULL, RITENUTA NUMERIC(15,2) DEFAULT 0, INTERESSI NUMERIC(15,2) DEFAULT 0, STAMPATO_MODULO CHAR(1) DEFAULT 'S', TESORERIA_CAPITOLO CHAR(1)) """ DOM_ID = """ CREATE DOMAIN DOM_ID INTEGER NOT NULL """ TABLE_A = """\ CREATE TABLE A ( ID DOM_ID /* INTEGER NOT NULL */ DEFAULT 0 ) """ # the 'default' keyword is lower case here TABLE_B = """\ CREATE TABLE B ( ID DOM_ID /* INTEGER NOT NULL */ default 0 ) """ @classmethod def setup_class(cls): con = testing.db.connect() con.execute(cls.AUTOINC_DM) con.execute(cls.MONEY_DM) con.execute(cls.NOSI_DM) con.execute(cls.RIT_TESORERIA_CAPITOLO_DM) con.execute(cls.DEF_ERROR_TB) con.execute(cls.DEF_ERROR_NODOM_TB) con.execute(cls.DOM_ID) con.execute(cls.TABLE_A) con.execute(cls.TABLE_B) @classmethod def teardown_class(cls): con = testing.db.connect() con.execute('DROP TABLE a') con.execute("DROP TABLE b") con.execute('DROP DOMAIN dom_id') con.execute('DROP TABLE def_error_nodom') con.execute('DROP TABLE def_error') con.execute('DROP DOMAIN rit_tesoreria_capitolo_dm') con.execute('DROP DOMAIN nosi_dm') con.execute('DROP DOMAIN money_dm') con.execute('DROP DOMAIN autoinc_dm') def test_tables_are_reflected_same_way(self): metadata = MetaData(testing.db) table_dom = Table('def_error', metadata, autoload=True) table_nodom = Table('def_error_nodom', metadata, autoload=True) eq_(table_dom.c.interessi.server_default.arg.text, table_nodom.c.interessi.server_default.arg.text) eq_(table_dom.c.ritenuta.server_default.arg.text, table_nodom.c.ritenuta.server_default.arg.text) eq_(table_dom.c.stampato_modulo.server_default.arg.text, table_nodom.c.stampato_modulo.server_default.arg.text) def test_intermixed_comment(self): metadata = MetaData(testing.db) table_a = Table('a', metadata, autoload=True) eq_(table_a.c.id.server_default.arg.text, "0") def test_lowercase_default_name(self): metadata = MetaData(testing.db) table_b = Table('b', metadata, autoload=True) eq_(table_b.c.id.server_default.arg.text, "0") class CompileTest(fixtures.TestBase, AssertsCompiledSQL): __dialect__ = firebird.FBDialect() def test_alias(self): t = table('sometable', column('col1'), column('col2')) s = select([t.alias()]) self.assert_compile(s, 'SELECT sometable_1.col1, sometable_1.col2 ' 'FROM sometable AS sometable_1') dialect = firebird.FBDialect() dialect._version_two = False self.assert_compile(s, 'SELECT sometable_1.col1, sometable_1.col2 ' 'FROM sometable sometable_1', dialect=dialect) def test_varchar_raise(self): for type_ in ( String, VARCHAR, String(), VARCHAR(), Unicode, Unicode(), ): type_ = sqltypes.to_instance(type_) assert_raises_message( exc.CompileError, "VARCHAR requires a length on dialect firebird", type_.compile, dialect=firebird.dialect()) t1 = Table('sometable', MetaData(), Column('somecolumn', type_) ) assert_raises_message( exc.CompileError, r"\(in table 'sometable', column 'somecolumn'\)\: " r"(?:N)?VARCHAR requires a length on dialect firebird", schema.CreateTable(t1).compile, dialect=firebird.dialect() ) def test_function(self): self.assert_compile(func.foo(1, 2), 'foo(:foo_1, :foo_2)') self.assert_compile(func.current_time(), 'CURRENT_TIME') self.assert_compile(func.foo(), 'foo') m = MetaData() t = Table('sometable', m, Column('col1', Integer), Column('col2' , Integer)) self.assert_compile(select([func.max(t.c.col1)]), 'SELECT max(sometable.col1) AS max_1 FROM ' 'sometable') def test_substring(self): self.assert_compile(func.substring('abc', 1, 2), 'SUBSTRING(:substring_1 FROM :substring_2 ' 'FOR :substring_3)') self.assert_compile(func.substring('abc', 1), 'SUBSTRING(:substring_1 FROM :substring_2)') def test_update_returning(self): table1 = table('mytable', column('myid', Integer), column('name' , String(128)), column('description', String(128))) u = update(table1, values=dict(name='foo' )).returning(table1.c.myid, table1.c.name) self.assert_compile(u, 'UPDATE mytable SET name=:name RETURNING ' 'mytable.myid, mytable.name') u = update(table1, values=dict(name='foo')).returning(table1) self.assert_compile(u, 'UPDATE mytable SET name=:name RETURNING ' 'mytable.myid, mytable.name, ' 'mytable.description') u = update(table1, values=dict(name='foo' )).returning(func.length(table1.c.name)) self.assert_compile(u, 'UPDATE mytable SET name=:name RETURNING ' 'char_length(mytable.name) AS length_1') def test_insert_returning(self): table1 = table('mytable', column('myid', Integer), column('name' , String(128)), column('description', String(128))) i = insert(table1, values=dict(name='foo' )).returning(table1.c.myid, table1.c.name) self.assert_compile(i, 'INSERT INTO mytable (name) VALUES (:name) ' 'RETURNING mytable.myid, mytable.name') i = insert(table1, values=dict(name='foo')).returning(table1) self.assert_compile(i, 'INSERT INTO mytable (name) VALUES (:name) ' 'RETURNING mytable.myid, mytable.name, ' 'mytable.description') i = insert(table1, values=dict(name='foo' )).returning(func.length(table1.c.name)) self.assert_compile(i, 'INSERT INTO mytable (name) VALUES (:name) ' 'RETURNING char_length(mytable.name) AS ' 'length_1') def test_charset(self): """Exercise CHARACTER SET options on string types.""" columns = [(firebird.CHAR, [1], {}, 'CHAR(1)'), (firebird.CHAR, [1], {'charset': 'OCTETS'}, 'CHAR(1) CHARACTER SET OCTETS'), (firebird.VARCHAR, [1], {}, 'VARCHAR(1)'), (firebird.VARCHAR, [1], {'charset': 'OCTETS'}, 'VARCHAR(1) CHARACTER SET OCTETS')] for type_, args, kw, res in columns: self.assert_compile(type_(*args, **kw), res) class TypesTest(fixtures.TestBase): __only_on__ = 'firebird' @testing.provide_metadata def test_infinite_float(self): metadata = self.metadata t = Table('t', metadata, Column('data', Float) ) metadata.create_all() t.insert().execute(data=float('inf')) eq_(t.select().execute().fetchall(), [(float('inf'),)] ) class MiscTest(fixtures.TestBase): __only_on__ = 'firebird' @testing.provide_metadata def test_strlen(self): metadata = self.metadata # On FB the length() function is implemented by an external UDF, # strlen(). Various SA tests fail because they pass a parameter # to it, and that does not work (it always results the maximum # string length the UDF was declared to accept). This test # checks that at least it works ok in other cases. t = Table('t1', metadata, Column('id', Integer, Sequence('t1idseq'), primary_key=True), Column('name' , String(10))) metadata.create_all() t.insert(values=dict(name='dante')).execute() t.insert(values=dict(name='alighieri')).execute() select([func.count(t.c.id)], func.length(t.c.name) == 5).execute().first()[0] == 1 def test_version_parsing(self): for string, result in [ ("WI-V1.5.0.1234 Firebird 1.5", (1, 5, 1234, 'firebird')), ("UI-V6.3.2.18118 Firebird 2.1", (2, 1, 18118, 'firebird')), ("LI-V6.3.3.12981 Firebird 2.0", (2, 0, 12981, 'firebird')), ("WI-V8.1.1.333", (8, 1, 1, 'interbase')), ("WI-V8.1.1.333 Firebird 1.5", (1, 5, 333, 'firebird')), ]: eq_( testing.db.dialect._parse_version_info(string), result ) @testing.provide_metadata def test_rowcount_flag(self): metadata = self.metadata engine = engines.testing_engine(options={'enable_rowcount' : True}) assert engine.dialect.supports_sane_rowcount metadata.bind = engine t = Table('t1', metadata, Column('data', String(10))) metadata.create_all() r = t.insert().execute({'data': 'd1'}, {'data': 'd2'}, {'data' : 'd3'}) r = t.update().where(t.c.data == 'd2').values(data='d3' ).execute() eq_(r.rowcount, 1) r = t.delete().where(t.c.data == 'd3').execute() eq_(r.rowcount, 2) r = \ t.delete().execution_options(enable_rowcount=False).execute() eq_(r.rowcount, -1) engine = engines.testing_engine(options={'enable_rowcount' : False}) assert not engine.dialect.supports_sane_rowcount metadata.bind = engine r = t.insert().execute({'data': 'd1'}, {'data': 'd2'}, {'data' : 'd3'}) r = t.update().where(t.c.data == 'd2').values(data='d3' ).execute() eq_(r.rowcount, -1) r = t.delete().where(t.c.data == 'd3').execute() eq_(r.rowcount, -1) r = t.delete().execution_options(enable_rowcount=True).execute() eq_(r.rowcount, 1) def test_percents_in_text(self): for expr, result in (text("select '%' from rdb$database"), '%' ), (text("select '%%' from rdb$database"), '%%'), \ (text("select '%%%' from rdb$database"), '%%%'), \ (text("select 'hello % world' from rdb$database"), 'hello % world'): eq_(testing.db.scalar(expr), result) from sqlalchemy.testing.mock import Mock, call class ArgumentTest(fixtures.TestBase): def _dbapi(self): return Mock( paramstyle='qmark', connect=Mock( return_value=Mock( server_version="UI-V6.3.2.18118 Firebird 2.1", cursor=Mock(return_value=Mock()) ) ) ) def _engine(self, type_, **kw): dbapi = self._dbapi() kw.update( dict( module=dbapi, _initialize=False ) ) engine = engines.testing_engine("firebird+%s://" % type_, options=kw) return engine def test_retaining_flag_default_kinterbasdb(self): engine = self._engine("kinterbasdb") self._assert_retaining(engine, True) def test_retaining_flag_true_kinterbasdb(self): engine = self._engine("kinterbasdb", retaining=True) self._assert_retaining(engine, True) def test_retaining_flag_false_kinterbasdb(self): engine = self._engine("kinterbasdb", retaining=False) self._assert_retaining(engine, False) def test_retaining_flag_default_fdb(self): engine = self._engine("fdb") self._assert_retaining(engine, True) def test_retaining_flag_true_fdb(self): engine = self._engine("fdb", retaining=True) self._assert_retaining(engine, True) def test_retaining_flag_false_fdb(self): engine = self._engine("fdb", retaining=False) self._assert_retaining(engine, False) def _assert_retaining(self, engine, flag): conn = engine.connect() trans = conn.begin() trans.commit() eq_( engine.dialect.dbapi.connect.return_value.commit.mock_calls, [call(flag)] ) trans = conn.begin() trans.rollback() eq_( engine.dialect.dbapi.connect.return_value.rollback.mock_calls, [call(flag)] ) SQLAlchemy-0.8.4/test/dialect/test_informix.py0000644000076500000240000000205612251150015022054 0ustar classicstaff00000000000000from sqlalchemy import * from sqlalchemy.databases import informix from sqlalchemy.testing import * class CompileTest(fixtures.TestBase, AssertsCompiledSQL): __dialect__ = informix.InformixDialect() def test_statements(self): meta = MetaData() t1 = Table('t1', meta, Column('col1', Integer, primary_key=True), Column('col2', String(50))) t2 = Table('t2', meta, Column('col1', Integer, primary_key=True), Column('col2', String(50)), Column('col3', Integer, ForeignKey('t1.col1'))) self.assert_compile(t1.select(), 'SELECT t1.col1, t1.col2 FROM t1') self.assert_compile(select([t1, t2]).select_from(t1.join(t2)), 'SELECT t1.col1, t1.col2, t2.col1, ' 't2.col2, t2.col3 FROM t1 JOIN t2 ON ' 't1.col1 = t2.col3') self.assert_compile(t1.update().values({t1.c.col1: t1.c.col1 + 1}), 'UPDATE t1 SET col1=(t1.col1 + ?)') SQLAlchemy-0.8.4/test/dialect/test_mxodbc.py0000644000076500000240000000332312251147172021505 0ustar classicstaff00000000000000from sqlalchemy import * from sqlalchemy.testing import eq_ from sqlalchemy.testing import engines from sqlalchemy.testing import fixtures from sqlalchemy.testing.mock import Mock def mock_dbapi(): return Mock(paramstyle='qmark', connect=Mock( return_value=Mock( cursor=Mock( return_value=Mock( description=None, rowcount=None) ) ) ) ) class MxODBCTest(fixtures.TestBase): def test_native_odbc_execute(self): t1 = Table('t1', MetaData(), Column('c1', Integer)) dbapi = mock_dbapi() engine = engines.testing_engine('mssql+mxodbc://localhost', options={'module': dbapi, '_initialize': False}) conn = engine.connect() # crud: uses execute conn.execute(t1.insert().values(c1='foo')) conn.execute(t1.delete().where(t1.c.c1 == 'foo')) conn.execute(t1.update().where(t1.c.c1 == 'foo').values(c1='bar')) # select: uses executedirect conn.execute(t1.select()) # manual flagging conn.execution_options(native_odbc_execute=True).\ execute(t1.select()) conn.execution_options(native_odbc_execute=False).\ execute(t1.insert().values(c1='foo')) eq_( [c[2] for c in dbapi.connect.return_value.cursor.return_value.execute.mock_calls], [{'direct': True}, {'direct': True}, {'direct': True}, {'direct': True}, {'direct': False}, {'direct': True}] ) SQLAlchemy-0.8.4/test/dialect/test_oracle.py0000644000076500000240000020522412251150015021470 0ustar classicstaff00000000000000# coding: utf-8 from __future__ import with_statement from sqlalchemy.testing import eq_ from sqlalchemy import * from sqlalchemy import types as sqltypes, exc, schema from sqlalchemy.sql import table, column from sqlalchemy.testing import fixtures, AssertsExecutionResults, AssertsCompiledSQL from sqlalchemy import testing from sqlalchemy.testing import assert_raises, assert_raises_message from sqlalchemy.testing.engines import testing_engine from sqlalchemy.dialects.oracle import cx_oracle, base as oracle from sqlalchemy.engine import default from sqlalchemy.util import u import decimal from sqlalchemy.testing.schema import Table, Column import datetime import os from sqlalchemy import sql from sqlalchemy.testing.mock import Mock class OutParamTest(fixtures.TestBase, AssertsExecutionResults): __only_on__ = 'oracle+cx_oracle' @classmethod def setup_class(cls): testing.db.execute(""" create or replace procedure foo(x_in IN number, x_out OUT number, y_out OUT number, z_out OUT varchar) IS retval number; begin retval := 6; x_out := 10; y_out := x_in * 15; z_out := NULL; end; """) def test_out_params(self): result = testing.db.execute(text('begin foo(:x_in, :x_out, :y_out, ' ':z_out); end;', bindparams=[bindparam('x_in', Float), outparam('x_out', Integer), outparam('y_out', Float), outparam('z_out', String)]), x_in=5) eq_(result.out_parameters, {'x_out': 10, 'y_out': 75, 'z_out': None}) assert isinstance(result.out_parameters['x_out'], int) @classmethod def teardown_class(cls): testing.db.execute("DROP PROCEDURE foo") class CXOracleArgsTest(fixtures.TestBase): __only_on__ = 'oracle+cx_oracle' def test_autosetinputsizes(self): dialect = cx_oracle.dialect() assert dialect.auto_setinputsizes dialect = cx_oracle.dialect(auto_setinputsizes=False) assert not dialect.auto_setinputsizes def test_exclude_inputsizes_none(self): dialect = cx_oracle.dialect(exclude_setinputsizes=None) eq_(dialect.exclude_setinputsizes, set()) def test_exclude_inputsizes_custom(self): import cx_Oracle dialect = cx_oracle.dialect(dbapi=cx_Oracle, exclude_setinputsizes=('NCLOB',)) eq_(dialect.exclude_setinputsizes, set([cx_Oracle.NCLOB])) class QuotedBindRoundTripTest(fixtures.TestBase): __only_on__ = 'oracle' @testing.provide_metadata def test_table_round_trip(self): oracle.RESERVED_WORDS.remove('UNION') metadata = self.metadata table = Table("t1", metadata, Column("option", Integer), Column("plain", Integer, quote=True), # test that quote works for a reserved word # that the dialect isn't aware of when quote # is set Column("union", Integer, quote=True) ) metadata.create_all() table.insert().execute( {"option": 1, "plain": 1, "union": 1} ) eq_( testing.db.execute(table.select()).first(), (1, 1, 1) ) table.update().values(option=2, plain=2, union=2).execute() eq_( testing.db.execute(table.select()).first(), (2, 2, 2) ) class CompileTest(fixtures.TestBase, AssertsCompiledSQL): __dialect__ = oracle.dialect() def test_true_false(self): self.assert_compile( sql.false(), "0" ) self.assert_compile( sql.true(), "1" ) def test_owner(self): meta = MetaData() parent = Table('parent', meta, Column('id', Integer, primary_key=True), Column('name', String(50)), schema='ed') child = Table('child', meta, Column('id', Integer, primary_key=True), Column('parent_id', Integer, ForeignKey('ed.parent.id')), schema='ed') self.assert_compile(parent.join(child), 'ed.parent JOIN ed.child ON ed.parent.id = ' 'ed.child.parent_id') def test_subquery(self): t = table('sometable', column('col1'), column('col2')) s = select([t]) s = select([s.c.col1, s.c.col2]) self.assert_compile(s, "SELECT col1, col2 FROM (SELECT " "sometable.col1 AS col1, sometable.col2 " "AS col2 FROM sometable)") def test_bindparam_quote(self): """test that bound parameters take on quoting for reserved words, column names quote flag enabled.""" # note: this is only in cx_oracle at the moment. not sure # what other hypothetical oracle dialects might need self.assert_compile( bindparam("option"), ':"option"' ) self.assert_compile( bindparam("plain"), ':plain' ) t = Table("s", MetaData(), Column('plain', Integer, quote=True)) self.assert_compile( t.insert().values(plain=5), 'INSERT INTO s ("plain") VALUES (:"plain")' ) self.assert_compile( t.update().values(plain=5), 'UPDATE s SET "plain"=:"plain"' ) def test_limit(self): t = table('sometable', column('col1'), column('col2')) s = select([t]) c = s.compile(dialect=oracle.OracleDialect()) assert t.c.col1 in set(c.result_map['col1'][1]) s = select([t]).limit(10).offset(20) self.assert_compile(s, 'SELECT col1, col2 FROM (SELECT col1, ' 'col2, ROWNUM AS ora_rn FROM (SELECT ' 'sometable.col1 AS col1, sometable.col2 AS ' 'col2 FROM sometable) WHERE ROWNUM <= ' ':ROWNUM_1) WHERE ora_rn > :ora_rn_1') c = s.compile(dialect=oracle.OracleDialect()) assert t.c.col1 in set(c.result_map['col1'][1]) s = select([s.c.col1, s.c.col2]) self.assert_compile(s, 'SELECT col1, col2 FROM (SELECT col1, col2 ' 'FROM (SELECT col1, col2, ROWNUM AS ora_rn ' 'FROM (SELECT sometable.col1 AS col1, ' 'sometable.col2 AS col2 FROM sometable) ' 'WHERE ROWNUM <= :ROWNUM_1) WHERE ora_rn > ' ':ora_rn_1)') self.assert_compile(s, 'SELECT col1, col2 FROM (SELECT col1, col2 ' 'FROM (SELECT col1, col2, ROWNUM AS ora_rn ' 'FROM (SELECT sometable.col1 AS col1, ' 'sometable.col2 AS col2 FROM sometable) ' 'WHERE ROWNUM <= :ROWNUM_1) WHERE ora_rn > ' ':ora_rn_1)') s = select([t]).limit(10).offset(20).order_by(t.c.col2) self.assert_compile(s, 'SELECT col1, col2 FROM (SELECT col1, ' 'col2, ROWNUM AS ora_rn FROM (SELECT ' 'sometable.col1 AS col1, sometable.col2 AS ' 'col2 FROM sometable ORDER BY ' 'sometable.col2) WHERE ROWNUM <= ' ':ROWNUM_1) WHERE ora_rn > :ora_rn_1') s = select([t], for_update=True).limit(10).order_by(t.c.col2) self.assert_compile(s, 'SELECT col1, col2 FROM (SELECT ' 'sometable.col1 AS col1, sometable.col2 AS ' 'col2 FROM sometable ORDER BY ' 'sometable.col2) WHERE ROWNUM <= :ROWNUM_1 ' 'FOR UPDATE') s = select([t], for_update=True).limit(10).offset(20).order_by(t.c.col2) self.assert_compile(s, 'SELECT col1, col2 FROM (SELECT col1, ' 'col2, ROWNUM AS ora_rn FROM (SELECT ' 'sometable.col1 AS col1, sometable.col2 AS ' 'col2 FROM sometable ORDER BY ' 'sometable.col2) WHERE ROWNUM <= ' ':ROWNUM_1) WHERE ora_rn > :ora_rn_1 FOR ' 'UPDATE') def test_limit_preserves_typing_information(self): class MyType(TypeDecorator): impl = Integer stmt = select([type_coerce(column('x'), MyType).label('foo')]).limit(1) dialect = oracle.dialect() compiled = stmt.compile(dialect=dialect) assert isinstance(compiled.result_map['foo'][-1], MyType) def test_use_binds_for_limits_disabled(self): t = table('sometable', column('col1'), column('col2')) dialect = oracle.OracleDialect(use_binds_for_limits=False) self.assert_compile(select([t]).limit(10), "SELECT col1, col2 FROM (SELECT sometable.col1 AS col1, " "sometable.col2 AS col2 FROM sometable) WHERE ROWNUM <= 10", dialect=dialect) self.assert_compile(select([t]).offset(10), "SELECT col1, col2 FROM (SELECT col1, col2, ROWNUM AS ora_rn " "FROM (SELECT sometable.col1 AS col1, sometable.col2 AS col2 " "FROM sometable)) WHERE ora_rn > 10", dialect=dialect) self.assert_compile(select([t]).limit(10).offset(10), "SELECT col1, col2 FROM (SELECT col1, col2, ROWNUM AS ora_rn " "FROM (SELECT sometable.col1 AS col1, sometable.col2 AS col2 " "FROM sometable) WHERE ROWNUM <= 20) WHERE ora_rn > 10", dialect=dialect) def test_use_binds_for_limits_enabled(self): t = table('sometable', column('col1'), column('col2')) dialect = oracle.OracleDialect(use_binds_for_limits=True) self.assert_compile(select([t]).limit(10), "SELECT col1, col2 FROM (SELECT sometable.col1 AS col1, " "sometable.col2 AS col2 FROM sometable) WHERE ROWNUM " "<= :ROWNUM_1", dialect=dialect) self.assert_compile(select([t]).offset(10), "SELECT col1, col2 FROM (SELECT col1, col2, ROWNUM AS ora_rn " "FROM (SELECT sometable.col1 AS col1, sometable.col2 AS col2 " "FROM sometable)) WHERE ora_rn > :ora_rn_1", dialect=dialect) self.assert_compile(select([t]).limit(10).offset(10), "SELECT col1, col2 FROM (SELECT col1, col2, ROWNUM AS ora_rn " "FROM (SELECT sometable.col1 AS col1, sometable.col2 AS col2 " "FROM sometable) WHERE ROWNUM <= :ROWNUM_1) WHERE ora_rn > " ":ora_rn_1", dialect=dialect) def test_long_labels(self): dialect = default.DefaultDialect() dialect.max_identifier_length = 30 ora_dialect = oracle.dialect() m = MetaData() a_table = Table( 'thirty_characters_table_xxxxxx', m, Column('id', Integer, primary_key=True) ) other_table = Table( 'other_thirty_characters_table_', m, Column('id', Integer, primary_key=True), Column('thirty_characters_table_id', Integer, ForeignKey('thirty_characters_table_xxxxxx.id'), primary_key=True ) ) anon = a_table.alias() self.assert_compile(select([other_table, anon]). select_from( other_table.outerjoin(anon)).apply_labels(), 'SELECT other_thirty_characters_table_.id ' 'AS other_thirty_characters__1, ' 'other_thirty_characters_table_.thirty_char' 'acters_table_id AS other_thirty_characters' '__2, thirty_characters_table__1.id AS ' 'thirty_characters_table__3 FROM ' 'other_thirty_characters_table_ LEFT OUTER ' 'JOIN thirty_characters_table_xxxxxx AS ' 'thirty_characters_table__1 ON ' 'thirty_characters_table__1.id = ' 'other_thirty_characters_table_.thirty_char' 'acters_table_id', dialect=dialect) self.assert_compile(select([other_table, anon]).select_from( other_table.outerjoin(anon)).apply_labels(), 'SELECT other_thirty_characters_table_.id ' 'AS other_thirty_characters__1, ' 'other_thirty_characters_table_.thirty_char' 'acters_table_id AS other_thirty_characters' '__2, thirty_characters_table__1.id AS ' 'thirty_characters_table__3 FROM ' 'other_thirty_characters_table_ LEFT OUTER ' 'JOIN thirty_characters_table_xxxxxx ' 'thirty_characters_table__1 ON ' 'thirty_characters_table__1.id = ' 'other_thirty_characters_table_.thirty_char' 'acters_table_id', dialect=ora_dialect) def test_outer_join(self): table1 = table('mytable', column('myid', Integer), column('name', String), column('description', String), ) table2 = table( 'myothertable', column('otherid', Integer), column('othername', String), ) table3 = table( 'thirdtable', column('userid', Integer), column('otherstuff', String), ) query = select([table1, table2], or_(table1.c.name == 'fred', table1.c.myid == 10, table2.c.othername != 'jack', 'EXISTS (select yay from foo where boo = lar)' ), from_obj=[outerjoin(table1, table2, table1.c.myid == table2.c.otherid)]) self.assert_compile(query, 'SELECT mytable.myid, mytable.name, ' 'mytable.description, myothertable.otherid,' ' myothertable.othername FROM mytable, ' 'myothertable WHERE (mytable.name = ' ':name_1 OR mytable.myid = :myid_1 OR ' 'myothertable.othername != :othername_1 OR ' 'EXISTS (select yay from foo where boo = ' 'lar)) AND mytable.myid = ' 'myothertable.otherid(+)', dialect=oracle.OracleDialect(use_ansi=False)) query = table1.outerjoin(table2, table1.c.myid == table2.c.otherid).outerjoin(table3, table3.c.userid == table2.c.otherid) self.assert_compile(query.select(), 'SELECT mytable.myid, mytable.name, ' 'mytable.description, myothertable.otherid,' ' myothertable.othername, ' 'thirdtable.userid, thirdtable.otherstuff ' 'FROM mytable LEFT OUTER JOIN myothertable ' 'ON mytable.myid = myothertable.otherid ' 'LEFT OUTER JOIN thirdtable ON ' 'thirdtable.userid = myothertable.otherid') self.assert_compile(query.select(), 'SELECT mytable.myid, mytable.name, ' 'mytable.description, myothertable.otherid,' ' myothertable.othername, ' 'thirdtable.userid, thirdtable.otherstuff ' 'FROM mytable, myothertable, thirdtable ' 'WHERE thirdtable.userid(+) = ' 'myothertable.otherid AND mytable.myid = ' 'myothertable.otherid(+)', dialect=oracle.dialect(use_ansi=False)) query = table1.join(table2, table1.c.myid == table2.c.otherid).join(table3, table3.c.userid == table2.c.otherid) self.assert_compile(query.select(), 'SELECT mytable.myid, mytable.name, ' 'mytable.description, myothertable.otherid,' ' myothertable.othername, ' 'thirdtable.userid, thirdtable.otherstuff ' 'FROM mytable, myothertable, thirdtable ' 'WHERE thirdtable.userid = ' 'myothertable.otherid AND mytable.myid = ' 'myothertable.otherid', dialect=oracle.dialect(use_ansi=False)) query = table1.join(table2, table1.c.myid == table2.c.otherid).outerjoin(table3, table3.c.userid == table2.c.otherid) self.assert_compile(query.select().order_by(table1.c.name). limit(10).offset(5), 'SELECT myid, name, description, otherid, ' 'othername, userid, otherstuff FROM ' '(SELECT myid, name, description, otherid, ' 'othername, userid, otherstuff, ROWNUM AS ' 'ora_rn FROM (SELECT mytable.myid AS myid, ' 'mytable.name AS name, mytable.description ' 'AS description, myothertable.otherid AS ' 'otherid, myothertable.othername AS ' 'othername, thirdtable.userid AS userid, ' 'thirdtable.otherstuff AS otherstuff FROM ' 'mytable, myothertable, thirdtable WHERE ' 'thirdtable.userid(+) = ' 'myothertable.otherid AND mytable.myid = ' 'myothertable.otherid ORDER BY ' 'mytable.name) WHERE ROWNUM <= :ROWNUM_1) ' 'WHERE ora_rn > :ora_rn_1', dialect=oracle.dialect(use_ansi=False)) subq = select([table1]).select_from(table1.outerjoin(table2, table1.c.myid == table2.c.otherid)).alias() q = select([table3]).select_from(table3.outerjoin(subq, table3.c.userid == subq.c.myid)) self.assert_compile(q, 'SELECT thirdtable.userid, ' 'thirdtable.otherstuff FROM thirdtable ' 'LEFT OUTER JOIN (SELECT mytable.myid AS ' 'myid, mytable.name AS name, ' 'mytable.description AS description FROM ' 'mytable LEFT OUTER JOIN myothertable ON ' 'mytable.myid = myothertable.otherid) ' 'anon_1 ON thirdtable.userid = anon_1.myid', dialect=oracle.dialect(use_ansi=True)) self.assert_compile(q, 'SELECT thirdtable.userid, ' 'thirdtable.otherstuff FROM thirdtable, ' '(SELECT mytable.myid AS myid, ' 'mytable.name AS name, mytable.description ' 'AS description FROM mytable, myothertable ' 'WHERE mytable.myid = myothertable.otherid(' '+)) anon_1 WHERE thirdtable.userid = ' 'anon_1.myid(+)', dialect=oracle.dialect(use_ansi=False)) q = select([table1.c.name]).where(table1.c.name == 'foo') self.assert_compile(q, 'SELECT mytable.name FROM mytable WHERE ' 'mytable.name = :name_1', dialect=oracle.dialect(use_ansi=False)) subq = select([table3.c.otherstuff]).where(table3.c.otherstuff == table1.c.name).label('bar') q = select([table1.c.name, subq]) self.assert_compile(q, 'SELECT mytable.name, (SELECT ' 'thirdtable.otherstuff FROM thirdtable ' 'WHERE thirdtable.otherstuff = ' 'mytable.name) AS bar FROM mytable', dialect=oracle.dialect(use_ansi=False)) def test_alias_outer_join(self): address_types = table('address_types', column('id'), column('name')) addresses = table('addresses', column('id'), column('user_id'), column('address_type_id'), column('email_address')) at_alias = address_types.alias() s = select([at_alias, addresses]).select_from(addresses.outerjoin(at_alias, addresses.c.address_type_id == at_alias.c.id)).where(addresses.c.user_id == 7).order_by(addresses.c.id, address_types.c.id) self.assert_compile(s, 'SELECT address_types_1.id, ' 'address_types_1.name, addresses.id, ' 'addresses.user_id, addresses.address_type_' 'id, addresses.email_address FROM ' 'addresses LEFT OUTER JOIN address_types ' 'address_types_1 ON addresses.address_type_' 'id = address_types_1.id WHERE ' 'addresses.user_id = :user_id_1 ORDER BY ' 'addresses.id, address_types.id') def test_returning_insert(self): t1 = table('t1', column('c1'), column('c2'), column('c3')) self.assert_compile( t1.insert().values(c1=1).returning(t1.c.c2, t1.c.c3), "INSERT INTO t1 (c1) VALUES (:c1) RETURNING " "t1.c2, t1.c3 INTO :ret_0, :ret_1" ) def test_returning_insert_functional(self): t1 = table('t1', column('c1'), column('c2', String()), column('c3', String())) fn = func.lower(t1.c.c2, type_=String()) stmt = t1.insert().values(c1=1).returning(fn, t1.c.c3) compiled = stmt.compile(dialect=oracle.dialect()) eq_( compiled.result_map, {'ret_1': ('ret_1', (t1.c.c3, 'c3', 'c3'), t1.c.c3.type), 'ret_0': ('ret_0', (fn, 'lower', None), fn.type)} ) self.assert_compile( stmt, "INSERT INTO t1 (c1) VALUES (:c1) RETURNING " "lower(t1.c2), t1.c3 INTO :ret_0, :ret_1" ) def test_returning_insert_labeled(self): t1 = table('t1', column('c1'), column('c2'), column('c3')) self.assert_compile( t1.insert().values(c1=1).returning( t1.c.c2.label('c2_l'), t1.c.c3.label('c3_l')), "INSERT INTO t1 (c1) VALUES (:c1) RETURNING " "t1.c2, t1.c3 INTO :ret_0, :ret_1" ) def test_compound(self): t1 = table('t1', column('c1'), column('c2'), column('c3')) t2 = table('t2', column('c1'), column('c2'), column('c3')) self.assert_compile(union(t1.select(), t2.select()), 'SELECT t1.c1, t1.c2, t1.c3 FROM t1 UNION ' 'SELECT t2.c1, t2.c2, t2.c3 FROM t2') self.assert_compile(except_(t1.select(), t2.select()), 'SELECT t1.c1, t1.c2, t1.c3 FROM t1 MINUS ' 'SELECT t2.c1, t2.c2, t2.c3 FROM t2') def test_no_paren_fns(self): for fn, expected in [ (func.uid(), "uid"), (func.UID(), "UID"), (func.sysdate(), "sysdate"), (func.row_number(), "row_number()"), (func.rank(), "rank()"), (func.now(), "CURRENT_TIMESTAMP"), (func.current_timestamp(), "CURRENT_TIMESTAMP"), (func.user(), "USER"), ]: self.assert_compile(fn, expected) def test_create_index_alt_schema(self): m = MetaData() t1 = Table('foo', m, Column('x', Integer), schema="alt_schema" ) self.assert_compile( schema.CreateIndex(Index("bar", t1.c.x)), "CREATE INDEX alt_schema.bar ON alt_schema.foo (x)" ) def test_create_index_expr(self): m = MetaData() t1 = Table('foo', m, Column('x', Integer) ) self.assert_compile( schema.CreateIndex(Index("bar", t1.c.x > 5)), "CREATE INDEX bar ON foo (x > 5)" ) class CompatFlagsTest(fixtures.TestBase, AssertsCompiledSQL): def _dialect(self, server_version, **kw): def server_version_info(conn): return server_version dialect = oracle.dialect( dbapi=Mock(version="0.0.0", paramstyle="named"), **kw) dialect._get_server_version_info = server_version_info dialect._check_unicode_returns = Mock() dialect._check_unicode_description = Mock() dialect._get_default_schema_name = Mock() return dialect def test_ora8_flags(self): dialect = self._dialect((8, 2, 5)) # before connect, assume modern DB assert dialect._supports_char_length assert dialect._supports_nchar assert dialect.use_ansi dialect.initialize(Mock()) assert not dialect.implicit_returning assert not dialect._supports_char_length assert not dialect._supports_nchar assert not dialect.use_ansi self.assert_compile(String(50), "VARCHAR2(50)", dialect=dialect) self.assert_compile(Unicode(50), "VARCHAR2(50)", dialect=dialect) self.assert_compile(UnicodeText(), "CLOB", dialect=dialect) dialect = self._dialect((8, 2, 5), implicit_returning=True) dialect.initialize(testing.db.connect()) assert dialect.implicit_returning def test_default_flags(self): """test with no initialization or server version info""" dialect = self._dialect(None) assert dialect._supports_char_length assert dialect._supports_nchar assert dialect.use_ansi self.assert_compile(String(50), "VARCHAR2(50 CHAR)", dialect=dialect) self.assert_compile(Unicode(50), "NVARCHAR2(50)", dialect=dialect) self.assert_compile(UnicodeText(), "NCLOB", dialect=dialect) def test_ora10_flags(self): dialect = self._dialect((10, 2, 5)) dialect.initialize(Mock()) assert dialect._supports_char_length assert dialect._supports_nchar assert dialect.use_ansi self.assert_compile(String(50), "VARCHAR2(50 CHAR)", dialect=dialect) self.assert_compile(Unicode(50), "NVARCHAR2(50)", dialect=dialect) self.assert_compile(UnicodeText(), "NCLOB", dialect=dialect) class MultiSchemaTest(fixtures.TestBase, AssertsCompiledSQL): __only_on__ = 'oracle' @classmethod def setup_class(cls): # currently assuming full DBA privs for the user. # don't really know how else to go here unless # we connect as the other user. for stmt in """ create table test_schema.parent( id integer primary key, data varchar2(50) ); create table test_schema.child( id integer primary key, data varchar2(50), parent_id integer references test_schema.parent(id) ); create table local_table( id integer primary key, data varchar2(50) ); create synonym test_schema.ptable for test_schema.parent; create synonym test_schema.ctable for test_schema.child; create synonym test_schema_ptable for test_schema.parent; create synonym test_schema.local_table for local_table; -- can't make a ref from local schema to the -- remote schema's table without this, -- *and* cant give yourself a grant ! -- so we give it to public. ideas welcome. grant references on test_schema.parent to public; grant references on test_schema.child to public; """.split(";"): if stmt.strip(): testing.db.execute(stmt) @classmethod def teardown_class(cls): for stmt in """ drop table test_schema.child; drop table test_schema.parent; drop table local_table; drop synonym test_schema.ctable; drop synonym test_schema.ptable; drop synonym test_schema_ptable; drop synonym test_schema.local_table; """.split(";"): if stmt.strip(): testing.db.execute(stmt) @testing.provide_metadata def test_create_same_names_explicit_schema(self): schema = testing.db.dialect.default_schema_name meta = self.metadata parent = Table('parent', meta, Column('pid', Integer, primary_key=True), schema=schema ) child = Table('child', meta, Column('cid', Integer, primary_key=True), Column('pid', Integer, ForeignKey('%s.parent.pid' % schema)), schema=schema ) meta.create_all() parent.insert().execute({'pid': 1}) child.insert().execute({'cid': 1, 'pid': 1}) eq_(child.select().execute().fetchall(), [(1, 1)]) def test_reflect_alt_table_owner_local_synonym(self): meta = MetaData(testing.db) parent = Table('test_schema_ptable', meta, autoload=True, oracle_resolve_synonyms=True) self.assert_compile(parent.select(), "SELECT test_schema_ptable.id, " "test_schema_ptable.data FROM test_schema_ptable") select([parent]).execute().fetchall() def test_reflect_alt_synonym_owner_local_table(self): meta = MetaData(testing.db) parent = Table('local_table', meta, autoload=True, oracle_resolve_synonyms=True, schema="test_schema") self.assert_compile(parent.select(), "SELECT test_schema.local_table.id, " "test_schema.local_table.data FROM test_schema.local_table") select([parent]).execute().fetchall() @testing.provide_metadata def test_create_same_names_implicit_schema(self): meta = self.metadata parent = Table('parent', meta, Column('pid', Integer, primary_key=True), ) child = Table('child', meta, Column('cid', Integer, primary_key=True), Column('pid', Integer, ForeignKey('parent.pid')), ) meta.create_all() parent.insert().execute({'pid': 1}) child.insert().execute({'cid': 1, 'pid': 1}) eq_(child.select().execute().fetchall(), [(1, 1)]) def test_reflect_alt_owner_explicit(self): meta = MetaData(testing.db) parent = Table('parent', meta, autoload=True, schema='test_schema') child = Table('child', meta, autoload=True, schema='test_schema') self.assert_compile(parent.join(child), "test_schema.parent JOIN test_schema.child ON " "test_schema.parent.id = test_schema.child.parent_id") select([parent, child]).\ select_from(parent.join(child)).\ execute().fetchall() def test_reflect_local_to_remote(self): testing.db.execute('CREATE TABLE localtable (id INTEGER ' 'PRIMARY KEY, parent_id INTEGER REFERENCES ' 'test_schema.parent(id))') try: meta = MetaData(testing.db) lcl = Table('localtable', meta, autoload=True) parent = meta.tables['test_schema.parent'] self.assert_compile(parent.join(lcl), 'test_schema.parent JOIN localtable ON ' 'test_schema.parent.id = ' 'localtable.parent_id') select([parent, lcl]).select_from(parent.join(lcl)).execute().fetchall() finally: testing.db.execute('DROP TABLE localtable') def test_reflect_alt_owner_implicit(self): meta = MetaData(testing.db) parent = Table('parent', meta, autoload=True, schema='test_schema') child = Table('child', meta, autoload=True, schema='test_schema' ) self.assert_compile(parent.join(child), 'test_schema.parent JOIN test_schema.child ' 'ON test_schema.parent.id = ' 'test_schema.child.parent_id') select([parent, child]).select_from(parent.join(child)).execute().fetchall() def test_reflect_alt_owner_synonyms(self): testing.db.execute('CREATE TABLE localtable (id INTEGER ' 'PRIMARY KEY, parent_id INTEGER REFERENCES ' 'test_schema.ptable(id))') try: meta = MetaData(testing.db) lcl = Table('localtable', meta, autoload=True, oracle_resolve_synonyms=True) parent = meta.tables['test_schema.ptable'] self.assert_compile(parent.join(lcl), 'test_schema.ptable JOIN localtable ON ' 'test_schema.ptable.id = ' 'localtable.parent_id') select([parent, lcl]).select_from(parent.join(lcl)).execute().fetchall() finally: testing.db.execute('DROP TABLE localtable') def test_reflect_remote_synonyms(self): meta = MetaData(testing.db) parent = Table('ptable', meta, autoload=True, schema='test_schema', oracle_resolve_synonyms=True) child = Table('ctable', meta, autoload=True, schema='test_schema', oracle_resolve_synonyms=True) self.assert_compile(parent.join(child), 'test_schema.ptable JOIN ' 'test_schema.ctable ON test_schema.ptable.i' 'd = test_schema.ctable.parent_id') select([parent, child]).select_from(parent.join(child)).execute().fetchall() class ConstraintTest(fixtures.TestBase): __only_on__ = 'oracle' def setup(self): global metadata metadata = MetaData(testing.db) foo = Table('foo', metadata, Column('id', Integer, primary_key=True)) foo.create(checkfirst=True) def teardown(self): metadata.drop_all() def test_oracle_has_no_on_update_cascade(self): bar = Table('bar', metadata, Column('id', Integer, primary_key=True), Column('foo_id', Integer, ForeignKey('foo.id', onupdate='CASCADE'))) assert_raises(exc.SAWarning, bar.create) bat = Table('bat', metadata, Column('id', Integer, primary_key=True), Column('foo_id', Integer), ForeignKeyConstraint(['foo_id'], ['foo.id'], onupdate='CASCADE')) assert_raises(exc.SAWarning, bat.create) class TwoPhaseTest(fixtures.TablesTest): """test cx_oracle two phase, which remains in a semi-broken state so requires a carefully written test.""" __only_on__ = 'oracle+cx_oracle' @classmethod def define_tables(cls, metadata): Table('datatable', metadata, Column('id', Integer, primary_key=True), Column('data', String(50)) ) def _connection(self): conn = testing.db.connect() conn.detach() return conn def _assert_data(self, rows): eq_( testing.db.scalar("select count(*) from datatable"), rows ) def test_twophase_prepare_false(self): conn = self._connection() for i in xrange(2): trans = conn.begin_twophase() conn.execute("select 1 from dual") trans.prepare() trans.commit() conn.close() self._assert_data(0) def test_twophase_prepare_true(self): conn = self._connection() for i in xrange(2): trans = conn.begin_twophase() conn.execute("insert into datatable (id, data) " "values (%s, 'somedata')" % i) trans.prepare() trans.commit() conn.close() self._assert_data(2) def test_twophase_rollback(self): conn = self._connection() trans = conn.begin_twophase() conn.execute("insert into datatable (id, data) " "values (%s, 'somedata')" % 1) trans.rollback() trans = conn.begin_twophase() conn.execute("insert into datatable (id, data) " "values (%s, 'somedata')" % 1) trans.prepare() trans.commit() conn.close() self._assert_data(1) def test_not_prepared(self): conn = self._connection() trans = conn.begin_twophase() conn.execute("insert into datatable (id, data) " "values (%s, 'somedata')" % 1) trans.commit() conn.close() self._assert_data(1) class DialectTypesTest(fixtures.TestBase, AssertsCompiledSQL): __dialect__ = oracle.OracleDialect() def test_no_clobs_for_string_params(self): """test that simple string params get a DBAPI type of VARCHAR, not CLOB. This is to prevent setinputsizes from setting up cx_oracle.CLOBs on string-based bind params [ticket:793].""" class FakeDBAPI(object): def __getattr__(self, attr): return attr dialect = oracle.OracleDialect() dbapi = FakeDBAPI() b = bindparam("foo", "hello world!") eq_( b.type.dialect_impl(dialect).get_dbapi_type(dbapi), 'STRING' ) b = bindparam("foo", "hello world!") eq_( b.type.dialect_impl(dialect).get_dbapi_type(dbapi), 'STRING' ) def test_long(self): self.assert_compile(oracle.LONG(), "LONG") def test_type_adapt(self): dialect = cx_oracle.dialect() for start, test in [ (Date(), cx_oracle._OracleDate), (oracle.OracleRaw(), cx_oracle._OracleRaw), (String(), String), (VARCHAR(), cx_oracle._OracleString), (DATE(), DATE), (String(50), cx_oracle._OracleString), (Unicode(), cx_oracle._OracleNVarChar), (Text(), cx_oracle._OracleText), (UnicodeText(), cx_oracle._OracleUnicodeText), (NCHAR(), cx_oracle._OracleNVarChar), (oracle.RAW(50), cx_oracle._OracleRaw), ]: assert isinstance(start.dialect_impl(dialect), test), \ "wanted %r got %r" % (test, start.dialect_impl(dialect)) def test_raw_compile(self): self.assert_compile(oracle.RAW(), "RAW") self.assert_compile(oracle.RAW(35), "RAW(35)") def test_char_length(self): self.assert_compile(VARCHAR(50), "VARCHAR(50 CHAR)") oracle8dialect = oracle.dialect() oracle8dialect.server_version_info = (8, 0) self.assert_compile(VARCHAR(50), "VARCHAR(50)", dialect=oracle8dialect) self.assert_compile(NVARCHAR(50), "NVARCHAR2(50)") self.assert_compile(CHAR(50), "CHAR(50)") def test_varchar_types(self): dialect = oracle.dialect() for typ, exp in [ (String(50), "VARCHAR2(50 CHAR)"), (Unicode(50), "NVARCHAR2(50)"), (NVARCHAR(50), "NVARCHAR2(50)"), (VARCHAR(50), "VARCHAR(50 CHAR)"), (oracle.NVARCHAR2(50), "NVARCHAR2(50)"), (oracle.VARCHAR2(50), "VARCHAR2(50 CHAR)"), (String(), "VARCHAR2"), (Unicode(), "NVARCHAR2"), (NVARCHAR(), "NVARCHAR2"), (VARCHAR(), "VARCHAR"), (oracle.NVARCHAR2(), "NVARCHAR2"), (oracle.VARCHAR2(), "VARCHAR2"), ]: self.assert_compile(typ, exp, dialect=dialect) def test_interval(self): for type_, expected in [(oracle.INTERVAL(), 'INTERVAL DAY TO SECOND'), (oracle.INTERVAL(day_precision=3), 'INTERVAL DAY(3) TO SECOND'), (oracle.INTERVAL(second_precision=5), 'INTERVAL DAY TO SECOND(5)'), (oracle.INTERVAL(day_precision=2, second_precision=5), 'INTERVAL DAY(2) TO SECOND(5)')]: self.assert_compile(type_, expected) class TypesTest(fixtures.TestBase): __only_on__ = 'oracle' __dialect__ = oracle.OracleDialect() @testing.fails_on('+zxjdbc', 'zxjdbc lacks the FIXED_CHAR dbapi type') def test_fixed_char(self): m = MetaData(testing.db) t = Table('t1', m, Column('id', Integer, primary_key=True), Column('data', CHAR(30), nullable=False) ) t.create() try: t.insert().execute( dict(id=1, data="value 1"), dict(id=2, data="value 2"), dict(id=3, data="value 3") ) eq_( t.select().where(t.c.data == 'value 2').execute().fetchall(), [(2, 'value 2 ')] ) m2 = MetaData(testing.db) t2 = Table('t1', m2, autoload=True) assert type(t2.c.data.type) is CHAR eq_( t2.select().where(t2.c.data == 'value 2').execute().fetchall(), [(2, 'value 2 ')] ) finally: t.drop() @testing.requires.returning @testing.provide_metadata def test_int_not_float(self): m = self.metadata t1 = Table('t1', m, Column('foo', Integer)) t1.create() r = t1.insert().values(foo=5).returning(t1.c.foo).execute() x = r.scalar() assert x == 5 assert isinstance(x, int) x = t1.select().scalar() assert x == 5 assert isinstance(x, int) @testing.provide_metadata def test_rowid(self): metadata = self.metadata t = Table('t1', metadata, Column('x', Integer) ) t.create() t.insert().execute(x=5) s1 = select([t]) s2 = select([column('rowid')]).select_from(s1) rowid = s2.scalar() # the ROWID type is not really needed here, # as cx_oracle just treats it as a string, # but we want to make sure the ROWID works... rowid_col = column('rowid', oracle.ROWID) s3 = select([t.c.x, rowid_col]).\ where(rowid_col == cast(rowid, oracle.ROWID)) eq_(s3.select().execute().fetchall(), [(5, rowid)] ) @testing.fails_on('+zxjdbc', 'Not yet known how to pass values of the ' 'INTERVAL type') @testing.provide_metadata def test_interval(self): metadata = self.metadata interval_table = Table('intervaltable', metadata, Column('id', Integer, primary_key=True, test_needs_autoincrement=True), Column('day_interval', oracle.INTERVAL(day_precision=3))) metadata.create_all() interval_table.insert().\ execute(day_interval=datetime.timedelta(days=35, seconds=5743)) row = interval_table.select().execute().first() eq_(row['day_interval'], datetime.timedelta(days=35, seconds=5743)) @testing.provide_metadata def test_numerics(self): m = self.metadata t1 = Table('t1', m, Column('intcol', Integer), Column('numericcol', Numeric(precision=9, scale=2)), Column('floatcol1', Float()), Column('floatcol2', FLOAT()), Column('doubleprec', oracle.DOUBLE_PRECISION), Column('numbercol1', oracle.NUMBER(9)), Column('numbercol2', oracle.NUMBER(9, 3)), Column('numbercol3', oracle.NUMBER), ) t1.create() t1.insert().execute( intcol=1, numericcol=5.2, floatcol1=6.5, floatcol2=8.5, doubleprec=9.5, numbercol1=12, numbercol2=14.85, numbercol3=15.76 ) m2 = MetaData(testing.db) t2 = Table('t1', m2, autoload=True) for row in ( t1.select().execute().first(), t2.select().execute().first() ): for i, (val, type_) in enumerate(( (1, int), (decimal.Decimal("5.2"), decimal.Decimal), (6.5, float), (8.5, float), (9.5, float), (12, int), (decimal.Decimal("14.85"), decimal.Decimal), (15.76, float), )): eq_(row[i], val) assert isinstance(row[i], type_), '%r is not %r' \ % (row[i], type_) def test_numeric_no_decimal_mode(self): engine = testing_engine(options=dict(coerce_to_decimal=False)) value = engine.scalar("SELECT 5.66 FROM DUAL") assert isinstance(value, float) value = testing.db.scalar("SELECT 5.66 FROM DUAL") assert isinstance(value, decimal.Decimal) @testing.provide_metadata def test_numerics_broken_inspection(self): """Numeric scenarios where Oracle type info is 'broken', returning us precision, scale of the form (0, 0) or (0, -127). We convert to Decimal and let int()/float() processors take over. """ metadata = self.metadata # this test requires cx_oracle 5 foo = Table('foo', metadata, Column('idata', Integer), Column('ndata', Numeric(20, 2)), Column('ndata2', Numeric(20, 2)), Column('nidata', Numeric(5, 0)), Column('fdata', Float()), ) foo.create() foo.insert().execute({ 'idata': 5, 'ndata': decimal.Decimal("45.6"), 'ndata2': decimal.Decimal("45.0"), 'nidata': decimal.Decimal('53'), 'fdata': 45.68392 }) stmt = "SELECT idata, ndata, ndata2, nidata, fdata FROM foo" row = testing.db.execute(stmt).fetchall()[0] eq_( [type(x) for x in row], [int, decimal.Decimal, decimal.Decimal, int, float] ) eq_( row, (5, decimal.Decimal('45.6'), decimal.Decimal('45'), 53, 45.683920000000001) ) # with a nested subquery, # both Numeric values that don't have decimal places, regardless # of their originating type, come back as ints with no useful # typing information beyond "numeric". So native handler # must convert to int. # this means our Decimal converters need to run no matter what. # totally sucks. stmt = """ SELECT (SELECT (SELECT idata FROM foo) FROM DUAL) AS idata, (SELECT CAST((SELECT ndata FROM foo) AS NUMERIC(20, 2)) FROM DUAL) AS ndata, (SELECT CAST((SELECT ndata2 FROM foo) AS NUMERIC(20, 2)) FROM DUAL) AS ndata2, (SELECT CAST((SELECT nidata FROM foo) AS NUMERIC(5, 0)) FROM DUAL) AS nidata, (SELECT CAST((SELECT fdata FROM foo) AS FLOAT) FROM DUAL) AS fdata FROM dual """ row = testing.db.execute(stmt).fetchall()[0] eq_( [type(x) for x in row], [int, decimal.Decimal, int, int, decimal.Decimal] ) eq_( row, (5, decimal.Decimal('45.6'), 45, 53, decimal.Decimal('45.68392')) ) row = testing.db.execute(text(stmt, typemap={ 'idata': Integer(), 'ndata': Numeric(20, 2), 'ndata2': Numeric(20, 2), 'nidata': Numeric(5, 0), 'fdata': Float() })).fetchall()[0] eq_( [type(x) for x in row], [int, decimal.Decimal, decimal.Decimal, decimal.Decimal, float] ) eq_( row, (5, decimal.Decimal('45.6'), decimal.Decimal('45'), decimal.Decimal('53'), 45.683920000000001) ) stmt = """ SELECT anon_1.idata AS anon_1_idata, anon_1.ndata AS anon_1_ndata, anon_1.ndata2 AS anon_1_ndata2, anon_1.nidata AS anon_1_nidata, anon_1.fdata AS anon_1_fdata FROM (SELECT idata, ndata, ndata2, nidata, fdata FROM ( SELECT (SELECT (SELECT idata FROM foo) FROM DUAL) AS idata, (SELECT CAST((SELECT ndata FROM foo) AS NUMERIC(20, 2)) FROM DUAL) AS ndata, (SELECT CAST((SELECT ndata2 FROM foo) AS NUMERIC(20, 2)) FROM DUAL) AS ndata2, (SELECT CAST((SELECT nidata FROM foo) AS NUMERIC(5, 0)) FROM DUAL) AS nidata, (SELECT CAST((SELECT fdata FROM foo) AS FLOAT) FROM DUAL) AS fdata FROM dual ) WHERE ROWNUM >= 0) anon_1 """ row = testing.db.execute(stmt).fetchall()[0] eq_( [type(x) for x in row], [int, decimal.Decimal, int, int, decimal.Decimal] ) eq_( row, (5, decimal.Decimal('45.6'), 45, 53, decimal.Decimal('45.68392')) ) row = testing.db.execute(text(stmt, typemap={ 'anon_1_idata': Integer(), 'anon_1_ndata': Numeric(20, 2), 'anon_1_ndata2': Numeric(20, 2), 'anon_1_nidata': Numeric(5, 0), 'anon_1_fdata': Float() })).fetchall()[0] eq_( [type(x) for x in row], [int, decimal.Decimal, decimal.Decimal, decimal.Decimal, float] ) eq_( row, (5, decimal.Decimal('45.6'), decimal.Decimal('45'), decimal.Decimal('53'), 45.683920000000001) ) row = testing.db.execute(text(stmt, typemap={ 'anon_1_idata': Integer(), 'anon_1_ndata': Numeric(20, 2, asdecimal=False), 'anon_1_ndata2': Numeric(20, 2, asdecimal=False), 'anon_1_nidata': Numeric(5, 0, asdecimal=False), 'anon_1_fdata': Float(asdecimal=True) })).fetchall()[0] eq_( [type(x) for x in row], [int, float, float, float, decimal.Decimal] ) eq_( row, (5, 45.6, 45, 53, decimal.Decimal('45.68392')) ) @testing.provide_metadata def test_reflect_dates(self): metadata = self.metadata Table( "date_types", metadata, Column('d1', DATE), Column('d2', TIMESTAMP), Column('d3', TIMESTAMP(timezone=True)), Column('d4', oracle.INTERVAL(second_precision=5)), ) metadata.create_all() m = MetaData(testing.db) t1 = Table( "date_types", m, autoload=True) assert isinstance(t1.c.d1.type, DATE) assert isinstance(t1.c.d2.type, TIMESTAMP) assert not t1.c.d2.type.timezone assert isinstance(t1.c.d3.type, TIMESTAMP) assert t1.c.d3.type.timezone assert isinstance(t1.c.d4.type, oracle.INTERVAL) def test_reflect_all_types_schema(self): types_table = Table('all_types', MetaData(testing.db), Column('owner', String(30), primary_key=True), Column('type_name', String(30), primary_key=True), autoload=True, oracle_resolve_synonyms=True ) for row in types_table.select().execute().fetchall(): [row[k] for k in row.keys()] @testing.provide_metadata def test_raw_roundtrip(self): metadata = self.metadata raw_table = Table('raw', metadata, Column('id', Integer, primary_key=True), Column('data', oracle.RAW(35)) ) metadata.create_all() testing.db.execute(raw_table.insert(), id=1, data="ABCDEF") eq_( testing.db.execute(raw_table.select()).first(), (1, "ABCDEF") ) @testing.provide_metadata def test_reflect_nvarchar(self): metadata = self.metadata Table('t', metadata, Column('data', sqltypes.NVARCHAR(255)) ) metadata.create_all() m2 = MetaData(testing.db) t2 = Table('t', m2, autoload=True) assert isinstance(t2.c.data.type, sqltypes.NVARCHAR) if testing.against('oracle+cx_oracle'): # nvarchar returns unicode natively. cx_oracle # _OracleNVarChar type should be at play here. assert isinstance( t2.c.data.type.dialect_impl(testing.db.dialect), cx_oracle._OracleNVarChar) data = u'm’a réveillé.' t2.insert().execute(data=data) res = t2.select().execute().first()['data'] eq_(res, data) assert isinstance(res, unicode) @testing.provide_metadata def test_char_length(self): metadata = self.metadata t1 = Table('t1', metadata, Column("c1", VARCHAR(50)), Column("c2", NVARCHAR(250)), Column("c3", CHAR(200)) ) t1.create() m2 = MetaData(testing.db) t2 = Table('t1', m2, autoload=True) eq_(t2.c.c1.type.length, 50) eq_(t2.c.c2.type.length, 250) eq_(t2.c.c3.type.length, 200) @testing.provide_metadata def test_long_type(self): metadata = self.metadata t = Table('t', metadata, Column('data', oracle.LONG) ) metadata.create_all(testing.db) testing.db.execute(t.insert(), data='xyz') eq_( testing.db.scalar(select([t.c.data])), "xyz" ) def test_longstring(self): metadata = MetaData(testing.db) testing.db.execute(""" CREATE TABLE Z_TEST ( ID NUMERIC(22) PRIMARY KEY, ADD_USER VARCHAR2(20) NOT NULL ) """) try: t = Table("z_test", metadata, autoload=True) t.insert().execute(id=1.0, add_user='foobar') assert t.select().execute().fetchall() == [(1, 'foobar')] finally: testing.db.execute("DROP TABLE Z_TEST") @testing.fails_on('+zxjdbc', 'auto_convert_lobs not applicable') def test_lobs_without_convert(self): engine = testing_engine(options=dict(auto_convert_lobs=False)) metadata = MetaData() t = Table("z_test", metadata, Column('id', Integer, primary_key=True), Column('data', Text), Column('bindata', LargeBinary)) t.create(engine) try: engine.execute(t.insert(), id=1, data='this is text', bindata='this is binary') row = engine.execute(t.select()).first() eq_(row['data'].read(), 'this is text') eq_(row['bindata'].read(), 'this is binary') finally: t.drop(engine) class EuroNumericTest(fixtures.TestBase): """test the numeric output_type_handler when using non-US locale for NLS_LANG.""" __only_on__ = 'oracle+cx_oracle' def setup(self): self.old_nls_lang = os.environ.get('NLS_LANG', False) os.environ['NLS_LANG'] = "GERMAN" self.engine = testing_engine() def teardown(self): if self.old_nls_lang is not False: os.environ['NLS_LANG'] = self.old_nls_lang else: del os.environ['NLS_LANG'] self.engine.dispose() def test_output_type_handler(self): for stmt, exp, kw in [ ("SELECT 0.1 FROM DUAL", decimal.Decimal("0.1"), {}), ("SELECT 15 FROM DUAL", 15, {}), ("SELECT CAST(15 AS NUMERIC(3, 1)) FROM DUAL", decimal.Decimal("15"), {}), ("SELECT CAST(0.1 AS NUMERIC(5, 2)) FROM DUAL", decimal.Decimal("0.1"), {}), ("SELECT :num FROM DUAL", decimal.Decimal("2.5"), {'num': decimal.Decimal("2.5")}) ]: test_exp = self.engine.scalar(stmt, **kw) eq_( test_exp, exp ) assert type(test_exp) is type(exp) class DontReflectIOTTest(fixtures.TestBase): """test that index overflow tables aren't included in table_names.""" __only_on__ = 'oracle' def setup(self): testing.db.execute(""" CREATE TABLE admin_docindex( token char(20), doc_id NUMBER, token_frequency NUMBER, token_offsets VARCHAR2(2000), CONSTRAINT pk_admin_docindex PRIMARY KEY (token, doc_id)) ORGANIZATION INDEX TABLESPACE users PCTTHRESHOLD 20 OVERFLOW TABLESPACE users """) def teardown(self): testing.db.execute("drop table admin_docindex") def test_reflect_all(self): m = MetaData(testing.db) m.reflect() eq_( set(t.name for t in m.tables.values()), set(['admin_docindex']) ) class BufferedColumnTest(fixtures.TestBase, AssertsCompiledSQL): __only_on__ = 'oracle' @classmethod def setup_class(cls): global binary_table, stream, meta meta = MetaData(testing.db) binary_table = Table('binary_table', meta, Column('id', Integer, primary_key=True), Column('data', LargeBinary) ) meta.create_all() stream = os.path.join( os.path.dirname(__file__), "..", 'binary_data_one.dat') stream = file(stream).read(12000) for i in range(1, 11): binary_table.insert().execute(id=i, data=stream) @classmethod def teardown_class(cls): meta.drop_all() def test_fetch(self): result = binary_table.select().order_by(binary_table.c.id).\ execute().fetchall() eq_(result, [(i, stream) for i in range(1, 11)]) @testing.fails_on('+zxjdbc', 'FIXME: zxjdbc should support this') def test_fetch_single_arraysize(self): eng = testing_engine(options={'arraysize': 1}) result = eng.execute(binary_table.select(). order_by(binary_table.c.id)).fetchall() eq_(result, [(i, stream) for i in range(1, 11)]) class UnsupportedIndexReflectTest(fixtures.TestBase): __only_on__ = 'oracle' @testing.emits_warning("No column names") @testing.provide_metadata def test_reflect_functional_index(self): metadata = self.metadata Table('test_index_reflect', metadata, Column('data', String(20), primary_key=True) ) metadata.create_all() testing.db.execute('CREATE INDEX DATA_IDX ON ' 'TEST_INDEX_REFLECT (UPPER(DATA))') m2 = MetaData(testing.db) Table('test_index_reflect', m2, autoload=True) class RoundTripIndexTest(fixtures.TestBase): __only_on__ = 'oracle' @testing.provide_metadata def test_basic(self): metadata = self.metadata table = Table("sometable", metadata, Column("id_a", Unicode(255), primary_key=True), Column("id_b", Unicode(255), primary_key=True, unique=True), Column("group", Unicode(255), primary_key=True), Column("col", Unicode(255)), UniqueConstraint('col', 'group'), ) # "group" is a keyword, so lower case normalind = Index('tableind', table.c.id_b, table.c.group) metadata.create_all() mirror = MetaData(testing.db) mirror.reflect() metadata.drop_all() mirror.create_all() inspect = MetaData(testing.db) inspect.reflect() def obj_definition(obj): return obj.__class__, tuple([c.name for c in obj.columns]), getattr(obj, 'unique', None) # find what the primary k constraint name should be primaryconsname = testing.db.execute( text("""SELECT constraint_name FROM all_constraints WHERE table_name = :table_name AND owner = :owner AND constraint_type = 'P' """), table_name=table.name.upper(), owner=testing.db.url.username.upper()).fetchall()[0][0] reflectedtable = inspect.tables[table.name] # make a dictionary of the reflected objects: reflected = dict([(obj_definition(i), i) for i in reflectedtable.indexes | reflectedtable.constraints]) # assert we got primary key constraint and its name, Error # if not in dict assert reflected[(PrimaryKeyConstraint, ('id_a', 'id_b', 'group'), None)].name.upper() \ == primaryconsname.upper() # Error if not in dict eq_( reflected[(Index, ('id_b', 'group'), False)].name, normalind.name ) assert (Index, ('id_b', ), True) in reflected assert (Index, ('col', 'group'), True) in reflected eq_(len(reflectedtable.constraints), 1) eq_(len(reflectedtable.indexes), 3) class SequenceTest(fixtures.TestBase, AssertsCompiledSQL): def test_basic(self): seq = Sequence('my_seq_no_schema') dialect = oracle.OracleDialect() assert dialect.identifier_preparer.format_sequence(seq) \ == 'my_seq_no_schema' seq = Sequence('my_seq', schema='some_schema') assert dialect.identifier_preparer.format_sequence(seq) \ == 'some_schema.my_seq' seq = Sequence('My_Seq', schema='Some_Schema') assert dialect.identifier_preparer.format_sequence(seq) \ == '"Some_Schema"."My_Seq"' class ExecuteTest(fixtures.TestBase): __only_on__ = 'oracle' def test_basic(self): eq_(testing.db.execute('/*+ this is a comment */ SELECT 1 FROM ' 'DUAL').fetchall(), [(1, )]) def test_sequences_are_integers(self): seq = Sequence('foo_seq') seq.create(testing.db) try: val = testing.db.execute(seq) eq_(val, 1) assert type(val) is int finally: seq.drop(testing.db) @testing.provide_metadata def test_limit_offset_for_update(self): metadata = self.metadata # oracle can't actually do the ROWNUM thing with FOR UPDATE # very well. t = Table('t1', metadata, Column('id', Integer, primary_key=True), Column('data', Integer) ) metadata.create_all() t.insert().execute( {'id': 1, 'data': 1}, {'id': 2, 'data': 7}, {'id': 3, 'data': 12}, {'id': 4, 'data': 15}, {'id': 5, 'data': 32}, ) # here, we can't use ORDER BY. eq_( t.select(for_update=True).limit(2).execute().fetchall(), [(1, 1), (2, 7)] ) # here, its impossible. But we'd prefer it to raise ORA-02014 # instead of issuing a syntax error. assert_raises_message( exc.DatabaseError, "ORA-02014", t.select(for_update=True).limit(2).offset(3).execute ) class UnicodeSchemaTest(fixtures.TestBase): __only_on__ = 'oracle' @testing.provide_metadata def test_quoted_column_non_unicode(self): metadata = self.metadata table = Table("atable", metadata, Column("_underscorecolumn", Unicode(255), primary_key=True), ) metadata.create_all() table.insert().execute( {'_underscorecolumn': u('’é')}, ) result = testing.db.execute( table.select().where(table.c._underscorecolumn == u('’é')) ).scalar() eq_(result, u('’é')) @testing.provide_metadata def test_quoted_column_unicode(self): metadata = self.metadata table = Table("atable", metadata, Column(u("méil"), Unicode(255), primary_key=True), ) metadata.create_all() table.insert().execute( {u('méil'): u('’é')}, ) result = testing.db.execute( table.select().where(table.c[u('méil')] == u('’é')) ).scalar() eq_(result, u('’é')) class DBLinkReflectionTest(fixtures.TestBase): __requires__ = 'oracle_test_dblink', __only_on__ = 'oracle' @classmethod def setup_class(cls): from sqlalchemy.testing import config cls.dblink = config.file_config.get('sqla_testing', 'oracle_db_link') with testing.db.connect() as conn: conn.execute( "create table test_table " "(id integer primary key, data varchar2(50))") conn.execute("create synonym test_table_syn " "for test_table@%s" % cls.dblink) @classmethod def teardown_class(cls): with testing.db.connect() as conn: conn.execute("drop synonym test_table_syn") conn.execute("drop table test_table") def test_hello_world(self): """test that the synonym/dblink is functional.""" testing.db.execute("insert into test_table_syn (id, data) " "values (1, 'some data')") eq_( testing.db.execute("select * from test_table_syn").first(), (1, 'some data') ) def test_reflection(self): """test the resolution of the synonym/dblink. """ m = MetaData() t = Table('test_table_syn', m, autoload=True, autoload_with=testing.db, oracle_resolve_synonyms=True) eq_(t.c.keys(), ['id', 'data']) eq_(list(t.primary_key), [t.c.id]) SQLAlchemy-0.8.4/test/dialect/test_pyodbc.py0000644000076500000240000000104512251147172021510 0ustar classicstaff00000000000000from sqlalchemy.testing import eq_ from sqlalchemy.connectors import pyodbc from sqlalchemy.testing import fixtures class PyODBCTest(fixtures.TestBase): def test_pyodbc_version(self): connector = pyodbc.PyODBCConnector() for vers, expected in [ ('2.1.8', (2, 1, 8)), ("py3-3.0.1-beta4", (3, 0, 1, 'beta4')), ("10.15.17", (10, 15, 17)), ("crap.crap.crap", ()), ]: eq_( connector._parse_dbapi_version(vers), expected )SQLAlchemy-0.8.4/test/dialect/test_sqlite.py0000644000076500000240000011075712251150015021532 0ustar classicstaff00000000000000"""SQLite-specific tests.""" from sqlalchemy.testing import eq_, assert_raises, \ assert_raises_message import datetime from sqlalchemy import Table, String, select, Text, CHAR, bindparam, Column,\ Unicode, Date, MetaData, UnicodeText, Time, Integer, TIMESTAMP, \ Boolean, func, NUMERIC, DateTime, extract, ForeignKey, text, Numeric,\ DefaultClause, and_, DECIMAL, TypeDecorator, create_engine, Float, \ INTEGER, UniqueConstraint, DATETIME, DATE, TIME, BOOLEAN, BIGINT from sqlalchemy import exc, sql, schema, pool, types as sqltypes from sqlalchemy.dialects.sqlite import base as sqlite, \ pysqlite as pysqlite_dialect from sqlalchemy.engine.url import make_url from sqlalchemy.testing import fixtures, AssertsCompiledSQL, \ AssertsExecutionResults, engines from sqlalchemy import testing import os from sqlalchemy.schema import CreateTable class TestTypes(fixtures.TestBase, AssertsExecutionResults): __only_on__ = 'sqlite' def test_boolean(self): """Test that the boolean only treats 1 as True """ meta = MetaData(testing.db) t = Table('bool_table', meta, Column('id', Integer, primary_key=True), Column('boo', Boolean(create_constraint=False))) try: meta.create_all() testing.db.execute("INSERT INTO bool_table (id, boo) " "VALUES (1, 'false');") testing.db.execute("INSERT INTO bool_table (id, boo) " "VALUES (2, 'true');") testing.db.execute("INSERT INTO bool_table (id, boo) " "VALUES (3, '1');") testing.db.execute("INSERT INTO bool_table (id, boo) " "VALUES (4, '0');") testing.db.execute('INSERT INTO bool_table (id, boo) ' 'VALUES (5, 1);') testing.db.execute('INSERT INTO bool_table (id, boo) ' 'VALUES (6, 0);') eq_(t.select(t.c.boo).order_by(t.c.id).execute().fetchall(), [(3, True), (5, True)]) finally: meta.drop_all() def test_string_dates_passed_raise(self): assert_raises(exc.StatementError, testing.db.execute, select([1]).where(bindparam('date', type_=Date)), date=str(datetime.date(2007, 10, 30))) def test_cant_parse_datetime_message(self): for (typ, disp) in [ (Time, "time"), (DateTime, "datetime"), (Date, "date") ]: assert_raises_message( ValueError, "Couldn't parse %s string." % disp, lambda: testing.db.execute( text("select 'ASDF' as value", typemap={"value":typ}) ).scalar() ) def test_native_datetime(self): dbapi = testing.db.dialect.dbapi connect_args = {'detect_types': dbapi.PARSE_DECLTYPES \ | dbapi.PARSE_COLNAMES} engine = engines.testing_engine(options={'connect_args' : connect_args, 'native_datetime': True}) t = Table('datetest', MetaData(), Column('id', Integer, primary_key=True), Column('d1', Date), Column('d2', TIMESTAMP)) t.create(engine) try: engine.execute(t.insert(), {'d1': datetime.date(2010, 5, 10), 'd2': datetime.datetime( 2010, 5, 10, 12, 15, 25, )}) row = engine.execute(t.select()).first() eq_(row, (1, datetime.date(2010, 5, 10), datetime.datetime( 2010, 5, 10, 12, 15, 25, ))) r = engine.execute(func.current_date()).scalar() assert isinstance(r, basestring) finally: t.drop(engine) engine.dispose() @testing.provide_metadata def test_custom_datetime(self): sqlite_date = sqlite.DATETIME( # 2004-05-21T00:00:00 storage_format="%(year)04d-%(month)02d-%(day)02d" "T%(hour)02d:%(minute)02d:%(second)02d", regexp=r"(\d+)-(\d+)-(\d+)T(\d+):(\d+):(\d+)", ) t = Table('t', self.metadata, Column('d', sqlite_date)) self.metadata.create_all(testing.db) testing.db.execute(t.insert(). values(d=datetime.datetime(2010, 10, 15, 12, 37, 0))) testing.db.execute("insert into t (d) values ('2004-05-21T00:00:00')") eq_( testing.db.execute("select * from t order by d").fetchall(), [(u'2004-05-21T00:00:00',), (u'2010-10-15T12:37:00',)] ) eq_( testing.db.execute(select([t.c.d]).order_by(t.c.d)).fetchall(), [(datetime.datetime(2004, 5, 21, 0, 0),), (datetime.datetime(2010, 10, 15, 12, 37),)] ) @testing.provide_metadata def test_custom_date(self): sqlite_date = sqlite.DATE( # 2004-05-21T00:00:00 storage_format="%(year)04d|%(month)02d|%(day)02d", regexp=r"(\d+)\|(\d+)\|(\d+)", ) t = Table('t', self.metadata, Column('d', sqlite_date)) self.metadata.create_all(testing.db) testing.db.execute(t.insert(). values(d=datetime.date(2010, 10, 15))) testing.db.execute("insert into t (d) values ('2004|05|21')") eq_( testing.db.execute("select * from t order by d").fetchall(), [(u'2004|05|21',), (u'2010|10|15',)] ) eq_( testing.db.execute(select([t.c.d]).order_by(t.c.d)).fetchall(), [(datetime.date(2004, 5, 21),), (datetime.date(2010, 10, 15),)] ) def test_no_convert_unicode(self): """test no utf-8 encoding occurs""" dialect = sqlite.dialect() for t in ( String(convert_unicode=True), CHAR(convert_unicode=True), Unicode(), UnicodeText(), String(convert_unicode=True), CHAR(convert_unicode=True), Unicode(), UnicodeText(), ): bindproc = t.dialect_impl(dialect).bind_processor(dialect) assert not bindproc or isinstance(bindproc(u'some string'), unicode) @testing.provide_metadata def test_type_reflection(self): metadata = self.metadata # (ask_for, roundtripped_as_if_different) specs = [ (String(), String()), (String(1), String(1)), (String(3), String(3)), (Text(), Text()), (Unicode(), String()), (Unicode(1), String(1)), (Unicode(3), String(3)), (UnicodeText(), Text()), (CHAR(1), ), (CHAR(3), CHAR(3)), (NUMERIC, NUMERIC()), (NUMERIC(10, 2), NUMERIC(10, 2)), (Numeric, NUMERIC()), (Numeric(10, 2), NUMERIC(10, 2)), (DECIMAL, DECIMAL()), (DECIMAL(10, 2), DECIMAL(10, 2)), (INTEGER, INTEGER()), (BIGINT, BIGINT()), (Float, Float()), (NUMERIC(), ), (TIMESTAMP, TIMESTAMP()), (DATETIME, DATETIME()), (DateTime, DateTime()), (DateTime(), ), (DATE, DATE()), (Date, Date()), (TIME, TIME()), (Time, Time()), (BOOLEAN, BOOLEAN()), (Boolean, Boolean()), ] columns = [Column('c%i' % (i + 1), t[0]) for (i, t) in enumerate(specs)] db = testing.db t_table = Table('types', metadata, *columns) metadata.create_all() m2 = MetaData(db) rt = Table('types', m2, autoload=True) try: db.execute('CREATE VIEW types_v AS SELECT * from types') rv = Table('types_v', m2, autoload=True) expected = [len(c) > 1 and c[1] or c[0] for c in specs] for table in rt, rv: for i, reflected in enumerate(table.c): assert isinstance(reflected.type, type(expected[i])), '%d: %r' % (i, type(expected[i])) finally: db.execute('DROP VIEW types_v') @testing.emits_warning('Did not recognize') @testing.provide_metadata def test_unknown_reflection(self): metadata = self.metadata t = Table('t', metadata, Column('x', sqltypes.BINARY(16)), Column('y', sqltypes.BINARY()) ) t.create() t2 = Table('t', MetaData(), autoload=True, autoload_with=testing.db) assert isinstance(t2.c.x.type, sqltypes.NullType) assert isinstance(t2.c.y.type, sqltypes.NullType) class DateTimeTest(fixtures.TestBase, AssertsCompiledSQL): def test_time_microseconds(self): dt = datetime.datetime(2008, 6, 27, 12, 0, 0, 125, ) eq_(str(dt), '2008-06-27 12:00:00.000125') sldt = sqlite.DATETIME() bp = sldt.bind_processor(None) eq_(bp(dt), '2008-06-27 12:00:00.000125') rp = sldt.result_processor(None, None) eq_(rp(bp(dt)), dt) def test_truncate_microseconds(self): dt = datetime.datetime(2008, 6, 27, 12, 0, 0, 125) dt_out = datetime.datetime(2008, 6, 27, 12, 0, 0) eq_(str(dt), '2008-06-27 12:00:00.000125') sldt = sqlite.DATETIME(truncate_microseconds=True) bp = sldt.bind_processor(None) eq_(bp(dt), '2008-06-27 12:00:00') rp = sldt.result_processor(None, None) eq_(rp(bp(dt)), dt_out) def test_custom_format_compact(self): dt = datetime.datetime(2008, 6, 27, 12, 0, 0, 125) eq_(str(dt), '2008-06-27 12:00:00.000125') sldt = sqlite.DATETIME( storage_format=( "%(year)04d%(month)02d%(day)02d" "%(hour)02d%(minute)02d%(second)02d%(microsecond)06d" ), regexp="(\d{4})(\d{2})(\d{2})(\d{2})(\d{2})(\d{2})(\d{6})", ) bp = sldt.bind_processor(None) eq_(bp(dt), '20080627120000000125') rp = sldt.result_processor(None, None) eq_(rp(bp(dt)), dt) class DateTest(fixtures.TestBase, AssertsCompiledSQL): def test_default(self): dt = datetime.date(2008, 6, 27) eq_(str(dt), '2008-06-27') sldt = sqlite.DATE() bp = sldt.bind_processor(None) eq_(bp(dt), '2008-06-27') rp = sldt.result_processor(None, None) eq_(rp(bp(dt)), dt) def test_custom_format(self): dt = datetime.date(2008, 6, 27) eq_(str(dt), '2008-06-27') sldt = sqlite.DATE( storage_format="%(month)02d/%(day)02d/%(year)04d", regexp="(?P\d+)/(?P\d+)/(?P\d+)", ) bp = sldt.bind_processor(None) eq_(bp(dt), '06/27/2008') rp = sldt.result_processor(None, None) eq_(rp(bp(dt)), dt) class TimeTest(fixtures.TestBase, AssertsCompiledSQL): def test_default(self): dt = datetime.date(2008, 6, 27) eq_(str(dt), '2008-06-27') sldt = sqlite.DATE() bp = sldt.bind_processor(None) eq_(bp(dt), '2008-06-27') rp = sldt.result_processor(None, None) eq_(rp(bp(dt)), dt) def test_truncate_microseconds(self): dt = datetime.time(12, 0, 0, 125) dt_out = datetime.time(12, 0, 0) eq_(str(dt), '12:00:00.000125') sldt = sqlite.TIME(truncate_microseconds=True) bp = sldt.bind_processor(None) eq_(bp(dt), '12:00:00') rp = sldt.result_processor(None, None) eq_(rp(bp(dt)), dt_out) def test_custom_format(self): dt = datetime.date(2008, 6, 27) eq_(str(dt), '2008-06-27') sldt = sqlite.DATE( storage_format="%(year)04d%(month)02d%(day)02d", regexp="(\d{4})(\d{2})(\d{2})", ) bp = sldt.bind_processor(None) eq_(bp(dt), '20080627') rp = sldt.result_processor(None, None) eq_(rp(bp(dt)), dt) class DefaultsTest(fixtures.TestBase, AssertsCompiledSQL): __only_on__ = 'sqlite' @testing.exclude('sqlite', '<', (3, 3, 8), 'sqlite3 changesets 3353 and 3440 modified ' 'behavior of default displayed in pragma ' 'table_info()') def test_default_reflection(self): # (ask_for, roundtripped_as_if_different) specs = [(String(3), '"foo"'), (NUMERIC(10, 2), '100.50'), (Integer, '5'), (Boolean, 'False')] columns = [Column('c%i' % (i + 1), t[0], server_default=text(t[1])) for (i, t) in enumerate(specs)] db = testing.db m = MetaData(db) t_table = Table('t_defaults', m, *columns) try: m.create_all() m2 = MetaData(db) rt = Table('t_defaults', m2, autoload=True) expected = [c[1] for c in specs] for i, reflected in enumerate(rt.c): eq_(str(reflected.server_default.arg), expected[i]) finally: m.drop_all() @testing.exclude('sqlite', '<', (3, 3, 8), 'sqlite3 changesets 3353 and 3440 modified ' 'behavior of default displayed in pragma ' 'table_info()') def test_default_reflection_2(self): db = testing.db m = MetaData(db) expected = ["'my_default'", '0'] table = \ """CREATE TABLE r_defaults ( data VARCHAR(40) DEFAULT 'my_default', val INTEGER NOT NULL DEFAULT 0 )""" try: db.execute(table) rt = Table('r_defaults', m, autoload=True) for i, reflected in enumerate(rt.c): eq_(str(reflected.server_default.arg), expected[i]) finally: db.execute('DROP TABLE r_defaults') def test_default_reflection_3(self): db = testing.db table = \ """CREATE TABLE r_defaults ( data VARCHAR(40) DEFAULT 'my_default', val INTEGER NOT NULL DEFAULT 0 )""" try: db.execute(table) m1 = MetaData(db) t1 = Table('r_defaults', m1, autoload=True) db.execute("DROP TABLE r_defaults") t1.create() m2 = MetaData(db) t2 = Table('r_defaults', m2, autoload=True) self.assert_compile( CreateTable(t2), "CREATE TABLE r_defaults (data VARCHAR(40) " "DEFAULT 'my_default', val INTEGER DEFAULT 0 " "NOT NULL)" ) finally: db.execute("DROP TABLE r_defaults") @testing.provide_metadata def test_boolean_default(self): t = Table("t", self.metadata, Column("x", Boolean, server_default=sql.false())) t.create(testing.db) testing.db.execute(t.insert()) testing.db.execute(t.insert().values(x=True)) eq_( testing.db.execute(t.select().order_by(t.c.x)).fetchall(), [(False,), (True,)] ) def test_old_style_default(self): """test non-quoted integer value on older sqlite pragma""" dialect = sqlite.dialect() eq_( dialect._get_column_info("foo", "INTEGER", False, 3, False), {'primary_key': False, 'nullable': False, 'default': '3', 'autoincrement': False, 'type': INTEGER, 'name': 'foo'} ) class DialectTest(fixtures.TestBase, AssertsExecutionResults): __only_on__ = 'sqlite' def test_extra_reserved_words(self): """Tests reserved words in identifiers. 'true', 'false', and 'column' are undocumented reserved words when used as column identifiers (as of 3.5.1). Covering them here to ensure they remain in place if the dialect's reserved_words set is updated in the future. """ meta = MetaData(testing.db) t = Table( 'reserved', meta, Column('safe', Integer), Column('true', Integer), Column('false', Integer), Column('column', Integer), ) try: meta.create_all() t.insert().execute(safe=1) list(t.select().execute()) finally: meta.drop_all() @testing.provide_metadata def test_quoted_identifiers_functional_one(self): """Tests autoload of tables created with quoted column names.""" metadata = self.metadata testing.db.execute("""CREATE TABLE "django_content_type" ( "id" integer NOT NULL PRIMARY KEY, "django_stuff" text NULL ) """) testing.db.execute(""" CREATE TABLE "django_admin_log" ( "id" integer NOT NULL PRIMARY KEY, "action_time" datetime NOT NULL, "content_type_id" integer NULL REFERENCES "django_content_type" ("id"), "object_id" text NULL, "change_message" text NOT NULL ) """) table1 = Table('django_admin_log', metadata, autoload=True) table2 = Table('django_content_type', metadata, autoload=True) j = table1.join(table2) assert j.onclause.compare(table1.c.content_type_id == table2.c.id) @testing.provide_metadata def test_quoted_identifiers_functional_two(self): """"test the edgiest of edge cases, quoted table/col names that start and end with quotes. SQLite claims to have fixed this in http://www.sqlite.org/src/info/600482d161, however it still fails if the FK points to a table name that actually has quotes as part of its name. """ metadata = self.metadata testing.db.execute(r'''CREATE TABLE """a""" ( """id""" integer NOT NULL PRIMARY KEY ) ''') # unfortunately, still can't do this; sqlite quadruples # up the quotes on the table name here for pragma foreign_key_list #testing.db.execute(r''' #CREATE TABLE """b""" ( # """id""" integer NOT NULL PRIMARY KEY, # """aid""" integer NULL # REFERENCES """a""" ("""id""") #) #''') table1 = Table(r'"a"', metadata, autoload=True) assert '"id"' in table1.c #table2 = Table(r'"b"', metadata, autoload=True) #j = table1.join(table2) #assert j.onclause.compare(table1.c['"id"'] # == table2.c['"aid"']) def test_legacy_quoted_identifiers_unit(self): dialect = sqlite.dialect() dialect._broken_fk_pragma_quotes = True for row in [ (0, 'target', 'tid', 'id'), (0, '"target"', 'tid', 'id'), (0, '[target]', 'tid', 'id'), (0, "'target'", 'tid', 'id'), (0, '`target`', 'tid', 'id'), ]: fks = {} fkeys = [] dialect._parse_fk(fks, fkeys, *row) eq_(fkeys, [{ 'referred_table': 'target', 'referred_columns': ['id'], 'referred_schema': None, 'name': None, 'constrained_columns': ['tid'] }]) def test_attached_as_schema(self): cx = testing.db.connect() try: cx.execute('ATTACH DATABASE ":memory:" AS test_schema') dialect = cx.dialect assert dialect.get_table_names(cx, 'test_schema') == [] meta = MetaData(cx) Table('created', meta, Column('id', Integer), schema='test_schema') alt_master = Table('sqlite_master', meta, autoload=True, schema='test_schema') meta.create_all(cx) eq_(dialect.get_table_names(cx, 'test_schema'), ['created']) assert len(alt_master.c) > 0 meta.clear() reflected = Table('created', meta, autoload=True, schema='test_schema') assert len(reflected.c) == 1 cx.execute(reflected.insert(), dict(id=1)) r = cx.execute(reflected.select()).fetchall() assert list(r) == [(1, )] cx.execute(reflected.update(), dict(id=2)) r = cx.execute(reflected.select()).fetchall() assert list(r) == [(2, )] cx.execute(reflected.delete(reflected.c.id == 2)) r = cx.execute(reflected.select()).fetchall() assert list(r) == [] # note that sqlite_master is cleared, above meta.drop_all() assert dialect.get_table_names(cx, 'test_schema') == [] finally: cx.execute('DETACH DATABASE test_schema') @testing.exclude('sqlite', '<', (2, 6), 'no database support') def test_temp_table_reflection(self): cx = testing.db.connect() try: cx.execute('CREATE TEMPORARY TABLE tempy (id INT)') assert 'tempy' in cx.dialect.get_table_names(cx, None) meta = MetaData(cx) tempy = Table('tempy', meta, autoload=True) assert len(tempy.c) == 1 meta.drop_all() except: try: cx.execute('DROP TABLE tempy') except exc.DBAPIError: pass raise def test_file_path_is_absolute(self): d = pysqlite_dialect.dialect() eq_( d.create_connect_args(make_url('sqlite:///foo.db')), ([os.path.abspath('foo.db')], {}) ) def test_pool_class(self): e = create_engine('sqlite+pysqlite://') assert e.pool.__class__ is pool.SingletonThreadPool e = create_engine('sqlite+pysqlite:///:memory:') assert e.pool.__class__ is pool.SingletonThreadPool e = create_engine('sqlite+pysqlite:///foo.db') assert e.pool.__class__ is pool.NullPool def test_dont_reflect_autoindex(self): meta = MetaData(testing.db) t = Table('foo', meta, Column('bar', String, primary_key=True)) meta.create_all() from sqlalchemy.engine.reflection import Inspector try: inspector = Inspector(testing.db) eq_(inspector.get_indexes('foo'), []) eq_(inspector.get_indexes('foo', include_auto_indexes=True), [{'unique': 1, 'name' : u'sqlite_autoindex_foo_1', 'column_names': [u'bar']}]) finally: meta.drop_all() def test_create_index_with_schema(self): """Test creation of index with explicit schema""" meta = MetaData(testing.db) t = Table('foo', meta, Column('bar', String, index=True), schema='main') try: meta.create_all() finally: meta.drop_all() class SQLTest(fixtures.TestBase, AssertsCompiledSQL): """Tests SQLite-dialect specific compilation.""" __dialect__ = sqlite.dialect() def test_extract(self): t = sql.table('t', sql.column('col1')) mapping = { 'month': '%m', 'day': '%d', 'year': '%Y', 'second': '%S', 'hour': '%H', 'doy': '%j', 'minute': '%M', 'epoch': '%s', 'dow': '%w', 'week': '%W', } for field, subst in mapping.items(): self.assert_compile(select([extract(field, t.c.col1)]), "SELECT CAST(STRFTIME('%s', t.col1) AS " "INTEGER) AS anon_1 FROM t" % subst) def test_true_false(self): self.assert_compile( sql.false(), "0" ) self.assert_compile( sql.true(), "1" ) def test_localtime(self): self.assert_compile( func.localtimestamp(), 'DATETIME(CURRENT_TIMESTAMP, "localtime")' ) def test_constraints_with_schemas(self): metadata = MetaData() t1 = Table('t1', metadata, Column('id', Integer, primary_key=True), schema='master') t2 = Table('t2', metadata, Column('id', Integer, primary_key=True), Column('t1_id', Integer, ForeignKey('master.t1.id')), schema='master' ) t3 = Table('t3', metadata, Column('id', Integer, primary_key=True), Column('t1_id', Integer, ForeignKey('master.t1.id')), schema='alternate' ) t4 = Table('t4', metadata, Column('id', Integer, primary_key=True), Column('t1_id', Integer, ForeignKey('master.t1.id')), ) # schema->schema, generate REFERENCES with no schema name self.assert_compile( schema.CreateTable(t2), "CREATE TABLE master.t2 (" "id INTEGER NOT NULL, " "t1_id INTEGER, " "PRIMARY KEY (id), " "FOREIGN KEY(t1_id) REFERENCES t1 (id)" ")" ) # schema->different schema, don't generate REFERENCES self.assert_compile( schema.CreateTable(t3), "CREATE TABLE alternate.t3 (" "id INTEGER NOT NULL, " "t1_id INTEGER, " "PRIMARY KEY (id)" ")" ) # same for local schema self.assert_compile( schema.CreateTable(t4), "CREATE TABLE t4 (" "id INTEGER NOT NULL, " "t1_id INTEGER, " "PRIMARY KEY (id)" ")" ) class InsertTest(fixtures.TestBase, AssertsExecutionResults): """Tests inserts and autoincrement.""" __only_on__ = 'sqlite' # empty insert (i.e. INSERT INTO table DEFAULT VALUES) fails on # 3.3.7 and before def _test_empty_insert(self, table, expect=1): try: table.create() for wanted in expect, expect * 2: table.insert().execute() rows = table.select().execute().fetchall() eq_(len(rows), wanted) finally: table.drop() @testing.exclude('sqlite', '<', (3, 3, 8), 'no database support') def test_empty_insert_pk1(self): self._test_empty_insert(Table('a', MetaData(testing.db), Column('id', Integer, primary_key=True))) @testing.exclude('sqlite', '<', (3, 3, 8), 'no database support') def test_empty_insert_pk2(self): assert_raises(exc.DBAPIError, self._test_empty_insert, Table('b' , MetaData(testing.db), Column('x', Integer, primary_key=True), Column('y', Integer, primary_key=True))) @testing.exclude('sqlite', '<', (3, 3, 8), 'no database support') def test_empty_insert_pk3(self): assert_raises(exc.DBAPIError, self._test_empty_insert, Table('c' , MetaData(testing.db), Column('x', Integer, primary_key=True), Column('y', Integer, DefaultClause('123'), primary_key=True))) @testing.exclude('sqlite', '<', (3, 3, 8), 'no database support') def test_empty_insert_pk4(self): self._test_empty_insert(Table('d', MetaData(testing.db), Column('x', Integer, primary_key=True), Column('y', Integer, DefaultClause('123' )))) @testing.exclude('sqlite', '<', (3, 3, 8), 'no database support') def test_empty_insert_nopk1(self): self._test_empty_insert(Table('e', MetaData(testing.db), Column('id', Integer))) @testing.exclude('sqlite', '<', (3, 3, 8), 'no database support') def test_empty_insert_nopk2(self): self._test_empty_insert(Table('f', MetaData(testing.db), Column('x', Integer), Column('y', Integer))) def test_inserts_with_spaces(self): tbl = Table('tbl', MetaData('sqlite:///'), Column('with space', Integer), Column('without', Integer)) tbl.create() try: tbl.insert().execute({'without': 123}) assert list(tbl.select().execute()) == [(None, 123)] tbl.insert().execute({'with space': 456}) assert list(tbl.select().execute()) == [(None, 123), (456, None)] finally: tbl.drop() def full_text_search_missing(): """Test if full text search is not implemented and return False if it is and True otherwise.""" try: testing.db.execute('CREATE VIRTUAL TABLE t using FTS3;') testing.db.execute('DROP TABLE t;') return False except: return True class MatchTest(fixtures.TestBase, AssertsCompiledSQL): __only_on__ = 'sqlite' __skip_if__ = full_text_search_missing, @classmethod def setup_class(cls): global metadata, cattable, matchtable metadata = MetaData(testing.db) testing.db.execute(""" CREATE VIRTUAL TABLE cattable using FTS3 ( id INTEGER NOT NULL, description VARCHAR(50), PRIMARY KEY (id) ) """) cattable = Table('cattable', metadata, autoload=True) testing.db.execute(""" CREATE VIRTUAL TABLE matchtable using FTS3 ( id INTEGER NOT NULL, title VARCHAR(200), category_id INTEGER NOT NULL, PRIMARY KEY (id) ) """) matchtable = Table('matchtable', metadata, autoload=True) metadata.create_all() cattable.insert().execute([{'id': 1, 'description': 'Python'}, {'id': 2, 'description': 'Ruby'}]) matchtable.insert().execute([{'id': 1, 'title' : 'Agile Web Development with Rails' , 'category_id': 2}, {'id': 2, 'title': 'Dive Into Python', 'category_id': 1}, {'id': 3, 'title' : "Programming Matz's Ruby", 'category_id': 2}, {'id': 4, 'title' : 'The Definitive Guide to Django', 'category_id': 1}, {'id': 5, 'title' : 'Python in a Nutshell', 'category_id': 1}]) @classmethod def teardown_class(cls): metadata.drop_all() def test_expression(self): self.assert_compile(matchtable.c.title.match('somstr'), 'matchtable.title MATCH ?', dialect=sqlite.dialect()) def test_simple_match(self): results = \ matchtable.select().where(matchtable.c.title.match('python' )).order_by(matchtable.c.id).execute().fetchall() eq_([2, 5], [r.id for r in results]) def test_simple_prefix_match(self): results = \ matchtable.select().where(matchtable.c.title.match('nut*' )).execute().fetchall() eq_([5], [r.id for r in results]) def test_or_match(self): results2 = \ matchtable.select().where( matchtable.c.title.match('nutshell OR ruby' )).order_by(matchtable.c.id).execute().fetchall() eq_([3, 5], [r.id for r in results2]) def test_and_match(self): results2 = \ matchtable.select().where( matchtable.c.title.match('python nutshell' )).execute().fetchall() eq_([5], [r.id for r in results2]) def test_match_across_joins(self): results = matchtable.select().where(and_(cattable.c.id == matchtable.c.category_id, cattable.c.description.match('Ruby' ))).order_by(matchtable.c.id).execute().fetchall() eq_([1, 3], [r.id for r in results]) class AutoIncrementTest(fixtures.TestBase, AssertsCompiledSQL): def test_sqlite_autoincrement(self): table = Table('autoinctable', MetaData(), Column('id', Integer, primary_key=True), Column('x', Integer, default=None), sqlite_autoincrement=True) self.assert_compile(schema.CreateTable(table), 'CREATE TABLE autoinctable (id INTEGER NOT ' 'NULL PRIMARY KEY AUTOINCREMENT, x INTEGER)' , dialect=sqlite.dialect()) def test_sqlite_autoincrement_constraint(self): table = Table( 'autoinctable', MetaData(), Column('id', Integer, primary_key=True), Column('x', Integer, default=None), UniqueConstraint('x'), sqlite_autoincrement=True, ) self.assert_compile(schema.CreateTable(table), 'CREATE TABLE autoinctable (id INTEGER NOT ' 'NULL PRIMARY KEY AUTOINCREMENT, x ' 'INTEGER, UNIQUE (x))', dialect=sqlite.dialect()) def test_sqlite_no_autoincrement(self): table = Table('noautoinctable', MetaData(), Column('id', Integer, primary_key=True), Column('x', Integer, default=None)) self.assert_compile(schema.CreateTable(table), 'CREATE TABLE noautoinctable (id INTEGER ' 'NOT NULL, x INTEGER, PRIMARY KEY (id))', dialect=sqlite.dialect()) def test_sqlite_autoincrement_int_affinity(self): class MyInteger(TypeDecorator): impl = Integer table = Table( 'autoinctable', MetaData(), Column('id', MyInteger, primary_key=True), sqlite_autoincrement=True, ) self.assert_compile(schema.CreateTable(table), 'CREATE TABLE autoinctable (id INTEGER NOT ' 'NULL PRIMARY KEY AUTOINCREMENT)', dialect=sqlite.dialect()) class ReflectHeadlessFKsTest(fixtures.TestBase): __only_on__ = 'sqlite' def setup(self): testing.db.execute("CREATE TABLE a (id INTEGER PRIMARY KEY)") # this syntax actually works on other DBs perhaps we'd want to add # tests to test_reflection testing.db.execute("CREATE TABLE b (id INTEGER PRIMARY KEY REFERENCES a)") def teardown(self): testing.db.execute("drop table b") testing.db.execute("drop table a") def test_reflect_tables_fk_no_colref(self): meta = MetaData() a = Table('a', meta, autoload=True, autoload_with=testing.db) b = Table('b', meta, autoload=True, autoload_with=testing.db) assert b.c.id.references(a.c.id) class ReflectFKConstraintTest(fixtures.TestBase): __only_on__ = 'sqlite' def setup(self): testing.db.execute("CREATE TABLE a1 (id INTEGER PRIMARY KEY)") testing.db.execute("CREATE TABLE a2 (id INTEGER PRIMARY KEY)") testing.db.execute("CREATE TABLE b (id INTEGER PRIMARY KEY, " "FOREIGN KEY(id) REFERENCES a1(id)," "FOREIGN KEY(id) REFERENCES a2(id)" ")") testing.db.execute("CREATE TABLE c (id INTEGER, " "CONSTRAINT bar PRIMARY KEY(id)," "CONSTRAINT foo1 FOREIGN KEY(id) REFERENCES a1(id)," "CONSTRAINT foo2 FOREIGN KEY(id) REFERENCES a2(id)" ")") def teardown(self): testing.db.execute("drop table c") testing.db.execute("drop table b") testing.db.execute("drop table a1") testing.db.execute("drop table a2") def test_name_is_none(self): # and not "0" meta = MetaData() b = Table('b', meta, autoload=True, autoload_with=testing.db) eq_( [con.name for con in b.constraints], [None, None, None] ) def test_name_not_none(self): # we don't have names for PK constraints, # it appears we get back None in the pragma for # FKs also (also it doesn't even appear to be documented on sqlite's docs # at http://www.sqlite.org/pragma.html#pragma_foreign_key_list # how did we ever know that's the "name" field ??) meta = MetaData() c = Table('c', meta, autoload=True, autoload_with=testing.db) eq_( set([con.name for con in c.constraints]), set([None, None]) ) SQLAlchemy-0.8.4/test/dialect/test_suite.py0000644000076500000240000000005012251147172021354 0ustar classicstaff00000000000000from sqlalchemy.testing.suite import * SQLAlchemy-0.8.4/test/dialect/test_sybase.py0000644000076500000240000000134412251150015021506 0ustar classicstaff00000000000000from sqlalchemy import * from sqlalchemy import sql from sqlalchemy.databases import sybase from sqlalchemy.testing import * class CompileTest(fixtures.TestBase, AssertsCompiledSQL): __dialect__ = sybase.dialect() def test_extract(self): t = sql.table('t', sql.column('col1')) mapping = { 'day': 'day', 'doy': 'dayofyear', 'dow': 'weekday', 'milliseconds': 'millisecond', 'millisecond': 'millisecond', 'year': 'year', } for field, subst in mapping.items(): self.assert_compile( select([extract(field, t.c.col1)]), 'SELECT DATEPART("%s", t.col1) AS anon_1 FROM t' % subst) SQLAlchemy-0.8.4/test/engine/0000755000076500000240000000000012251151573016457 5ustar classicstaff00000000000000SQLAlchemy-0.8.4/test/engine/__init__.py0000644000076500000240000000000012251147172020556 0ustar classicstaff00000000000000SQLAlchemy-0.8.4/test/engine/test_bind.py0000644000076500000240000001637712251150015021010 0ustar classicstaff00000000000000"""tests the "bind" attribute/argument across schema and SQL, including the deprecated versions of these arguments""" from __future__ import with_statement from sqlalchemy.testing import eq_, assert_raises from sqlalchemy import engine, exc from sqlalchemy import MetaData, ThreadLocalMetaData from sqlalchemy import Integer, text from sqlalchemy.testing.schema import Table from sqlalchemy.testing.schema import Column import sqlalchemy as sa from sqlalchemy import testing from sqlalchemy.testing import fixtures class BindTest(fixtures.TestBase): def test_bind_close_engine(self): e = testing.db with e.connect() as conn: assert not conn.closed assert conn.closed with e.contextual_connect() as conn: assert not conn.closed assert conn.closed def test_bind_close_conn(self): e = testing.db conn = e.connect() with conn.connect() as c2: assert not c2.closed assert not conn.closed assert c2.closed with conn.contextual_connect() as c2: assert not c2.closed assert not conn.closed assert c2.closed def test_create_drop_explicit(self): metadata = MetaData() table = Table('test_table', metadata, Column('foo', Integer)) for bind in ( testing.db, testing.db.connect() ): for args in [ ([], {'bind':bind}), ([bind], {}) ]: metadata.create_all(*args[0], **args[1]) assert table.exists(*args[0], **args[1]) metadata.drop_all(*args[0], **args[1]) table.create(*args[0], **args[1]) table.drop(*args[0], **args[1]) assert not table.exists(*args[0], **args[1]) def test_create_drop_err_metadata(self): metadata = MetaData() table = Table('test_table', metadata, Column('foo', Integer)) for meth in [metadata.create_all, metadata.drop_all]: try: meth() assert False except exc.UnboundExecutionError, e: eq_(str(e), "The MetaData is not bound to an Engine or " "Connection. Execution can not proceed without a " "database to execute against. Either execute with " "an explicit connection or assign the MetaData's " ".bind to enable implicit execution.") def test_create_drop_err_table(self): metadata = MetaData() table = Table('test_table', metadata, Column('foo', Integer)) for meth in [ table.exists, table.create, table.drop, ]: try: meth() assert False except exc.UnboundExecutionError, e: eq_( str(e), "The Table 'test_table' " "is not bound to an Engine or Connection. " "Execution can not proceed without a database to execute " "against. Either execute with an explicit connection or " "assign this Table's .metadata.bind to enable implicit " "execution.") @testing.uses_deprecated() def test_create_drop_bound(self): for meta in (MetaData,ThreadLocalMetaData): for bind in ( testing.db, testing.db.connect() ): metadata = meta() table = Table('test_table', metadata, Column('foo', Integer)) metadata.bind = bind assert metadata.bind is table.bind is bind metadata.create_all() assert table.exists() metadata.drop_all() table.create() table.drop() assert not table.exists() metadata = meta() table = Table('test_table', metadata, Column('foo', Integer)) metadata.bind = bind assert metadata.bind is table.bind is bind metadata.create_all() assert table.exists() metadata.drop_all() table.create() table.drop() assert not table.exists() if isinstance(bind, engine.Connection): bind.close() def test_create_drop_constructor_bound(self): for bind in ( testing.db, testing.db.connect() ): try: for args in ( ([bind], {}), ([], {'bind':bind}), ): metadata = MetaData(*args[0], **args[1]) table = Table('test_table', metadata, Column('foo', Integer)) assert metadata.bind is table.bind is bind metadata.create_all() assert table.exists() metadata.drop_all() table.create() table.drop() assert not table.exists() finally: if isinstance(bind, engine.Connection): bind.close() def test_implicit_execution(self): metadata = MetaData() table = Table('test_table', metadata, Column('foo', Integer), test_needs_acid=True, ) conn = testing.db.connect() metadata.create_all(bind=conn) try: trans = conn.begin() metadata.bind = conn t = table.insert() assert t.bind is conn table.insert().execute(foo=5) table.insert().execute(foo=6) table.insert().execute(foo=7) trans.rollback() metadata.bind = None assert conn.execute('select count(*) from test_table' ).scalar() == 0 finally: metadata.drop_all(bind=conn) def test_clauseelement(self): metadata = MetaData() table = Table('test_table', metadata, Column('foo', Integer)) metadata.create_all(bind=testing.db) try: for elem in [ table.select, lambda **kwargs: sa.func.current_timestamp(**kwargs).select(), # func.current_timestamp().select, lambda **kwargs:text("select * from test_table", **kwargs) ]: for bind in ( testing.db, testing.db.connect() ): try: e = elem(bind=bind) assert e.bind is bind e.execute().close() finally: if isinstance(bind, engine.Connection): bind.close() e = elem() assert e.bind is None assert_raises( exc.UnboundExecutionError, e.execute ) finally: if isinstance(bind, engine.Connection): bind.close() metadata.drop_all(bind=testing.db) SQLAlchemy-0.8.4/test/engine/test_ddlemit.py0000644000076500000240000001504312251150015021503 0ustar classicstaff00000000000000from sqlalchemy.testing import fixtures from sqlalchemy.engine.ddl import SchemaGenerator, SchemaDropper from sqlalchemy.engine import default from sqlalchemy import MetaData, Table, Column, Integer, Sequence from sqlalchemy import schema from sqlalchemy.testing.mock import Mock class EmitDDLTest(fixtures.TestBase): def _mock_connection(self, item_exists): def has_item(connection, name, schema): return item_exists(name) return Mock(dialect=Mock( supports_sequences=True, has_table=Mock(side_effect=has_item), has_sequence=Mock(side_effect=has_item) ) ) def _mock_create_fixture(self, checkfirst, tables, item_exists=lambda item: False): connection = self._mock_connection(item_exists) return SchemaGenerator(connection.dialect, connection, checkfirst=checkfirst, tables=tables) def _mock_drop_fixture(self, checkfirst, tables, item_exists=lambda item: True): connection = self._mock_connection(item_exists) return SchemaDropper(connection.dialect, connection, checkfirst=checkfirst, tables=tables) def _table_fixture(self): m = MetaData() return (m, ) + tuple( Table('t%d' % i, m, Column('x', Integer)) for i in xrange(1, 6) ) def _table_seq_fixture(self): m = MetaData() s1 = Sequence('s1') s2 = Sequence('s2') t1 = Table('t1', m, Column("x", Integer, s1, primary_key=True)) t2 = Table('t2', m, Column("x", Integer, s2, primary_key=True)) return m, t1, t2, s1, s2 def test_create_seq_checkfirst(self): m, t1, t2, s1, s2 = self._table_seq_fixture() generator = self._mock_create_fixture(True, [t1, t2], item_exists=lambda t: t not in ("t1", "s1") ) self._assert_create([t1, s1], generator, m) def test_drop_seq_checkfirst(self): m, t1, t2, s1, s2 = self._table_seq_fixture() generator = self._mock_drop_fixture(True, [t1, t2], item_exists=lambda t: t in ("t1", "s1") ) self._assert_drop([t1, s1], generator, m) def test_create_collection_checkfirst(self): m, t1, t2, t3, t4, t5 = self._table_fixture() generator = self._mock_create_fixture(True, [t2, t3, t4], item_exists=lambda t: t not in ("t2", "t4") ) self._assert_create_tables([t2, t4], generator, m) def test_drop_collection_checkfirst(self): m, t1, t2, t3, t4, t5 = self._table_fixture() generator = self._mock_drop_fixture(True, [t2, t3, t4], item_exists=lambda t: t in ("t2", "t4") ) self._assert_drop_tables([t2, t4], generator, m) def test_create_collection_nocheck(self): m, t1, t2, t3, t4, t5 = self._table_fixture() generator = self._mock_create_fixture(False, [t2, t3, t4], item_exists=lambda t: t not in ("t2", "t4") ) self._assert_create_tables([t2, t3, t4], generator, m) def test_create_empty_collection(self): m, t1, t2, t3, t4, t5 = self._table_fixture() generator = self._mock_create_fixture(True, [], item_exists=lambda t: t not in ("t2", "t4") ) self._assert_create_tables([], generator, m) def test_drop_empty_collection(self): m, t1, t2, t3, t4, t5 = self._table_fixture() generator = self._mock_drop_fixture(True, [], item_exists=lambda t: t in ("t2", "t4") ) self._assert_drop_tables([], generator, m) def test_drop_collection_nocheck(self): m, t1, t2, t3, t4, t5 = self._table_fixture() generator = self._mock_drop_fixture(False, [t2, t3, t4], item_exists=lambda t: t in ("t2", "t4") ) self._assert_drop_tables([t2, t3, t4], generator, m) def test_create_metadata_checkfirst(self): m, t1, t2, t3, t4, t5 = self._table_fixture() generator = self._mock_create_fixture(True, None, item_exists=lambda t: t not in ("t2", "t4") ) self._assert_create_tables([t2, t4], generator, m) def test_drop_metadata_checkfirst(self): m, t1, t2, t3, t4, t5 = self._table_fixture() generator = self._mock_drop_fixture(True, None, item_exists=lambda t: t in ("t2", "t4") ) self._assert_drop_tables([t2, t4], generator, m) def test_create_metadata_nocheck(self): m, t1, t2, t3, t4, t5 = self._table_fixture() generator = self._mock_create_fixture(False, None, item_exists=lambda t: t not in ("t2", "t4") ) self._assert_create_tables([t1, t2, t3, t4, t5], generator, m) def test_drop_metadata_nocheck(self): m, t1, t2, t3, t4, t5 = self._table_fixture() generator = self._mock_drop_fixture(False, None, item_exists=lambda t: t in ("t2", "t4") ) self._assert_drop_tables([t1, t2, t3, t4, t5], generator, m) def _assert_create_tables(self, elements, generator, argument): self._assert_ddl(schema.CreateTable, elements, generator, argument) def _assert_drop_tables(self, elements, generator, argument): self._assert_ddl(schema.DropTable, elements, generator, argument) def _assert_create(self, elements, generator, argument): self._assert_ddl( (schema.CreateTable, schema.CreateSequence), elements, generator, argument) def _assert_drop(self, elements, generator, argument): self._assert_ddl( (schema.DropTable, schema.DropSequence), elements, generator, argument) def _assert_ddl(self, ddl_cls, elements, generator, argument): generator.traverse_single(argument) for call_ in generator.connection.execute.mock_calls: c = call_[1][0] assert isinstance(c, ddl_cls) assert c.element in elements, "element %r was not expected"\ % c.element elements.remove(c.element) assert not elements, "elements remain in list: %r" % elements SQLAlchemy-0.8.4/test/engine/test_ddlevents.py0000644000076500000240000005322512251150015022055 0ustar classicstaff00000000000000from __future__ import with_statement from sqlalchemy.testing import assert_raises, assert_raises_message from sqlalchemy.schema import DDL, CheckConstraint, AddConstraint, \ DropConstraint from sqlalchemy import create_engine from sqlalchemy import MetaData, Integer, String, event, exc, text from sqlalchemy.testing.schema import Table from sqlalchemy.testing.schema import Column import sqlalchemy as tsa from sqlalchemy import testing from sqlalchemy.testing import engines from sqlalchemy.testing import AssertsCompiledSQL, eq_ from nose import SkipTest from sqlalchemy.testing import fixtures class DDLEventTest(fixtures.TestBase): class Canary(object): def __init__(self, schema_item, bind): self.state = None self.schema_item = schema_item self.bind = bind def before_create(self, schema_item, bind, **kw): assert self.state is None assert schema_item is self.schema_item assert bind is self.bind self.state = 'before-create' def after_create(self, schema_item, bind, **kw): assert self.state in ('before-create', 'skipped') assert schema_item is self.schema_item assert bind is self.bind self.state = 'after-create' def before_drop(self, schema_item, bind, **kw): assert self.state is None assert schema_item is self.schema_item assert bind is self.bind self.state = 'before-drop' def after_drop(self, schema_item, bind, **kw): assert self.state in ('before-drop', 'skipped') assert schema_item is self.schema_item assert bind is self.bind self.state = 'after-drop' def setup(self): self.bind = engines.mock_engine() self.metadata = MetaData() self.table = Table('t', self.metadata, Column('id', Integer)) def test_table_create_before(self): table, bind = self.table, self.bind canary = self.Canary(table, bind) event.listen(table, 'before_create', canary.before_create) table.create(bind) assert canary.state == 'before-create' table.drop(bind) assert canary.state == 'before-create' def test_table_create_after(self): table, bind = self.table, self.bind canary = self.Canary(table, bind) event.listen(table, 'after_create', canary.after_create) canary.state = 'skipped' table.create(bind) assert canary.state == 'after-create' table.drop(bind) assert canary.state == 'after-create' def test_table_create_both(self): table, bind = self.table, self.bind canary = self.Canary(table, bind) event.listen(table, 'before_create', canary.before_create) event.listen(table, 'after_create', canary.after_create) table.create(bind) assert canary.state == 'after-create' table.drop(bind) assert canary.state == 'after-create' def test_table_drop_before(self): table, bind = self.table, self.bind canary = self.Canary(table, bind) event.listen(table, 'before_drop', canary.before_drop) table.create(bind) assert canary.state is None table.drop(bind) assert canary.state == 'before-drop' def test_table_drop_after(self): table, bind = self.table, self.bind canary = self.Canary(table, bind) event.listen(table, 'after_drop', canary.after_drop) table.create(bind) assert canary.state is None canary.state = 'skipped' table.drop(bind) assert canary.state == 'after-drop' def test_table_drop_both(self): table, bind = self.table, self.bind canary = self.Canary(table, bind) event.listen(table, 'before_drop', canary.before_drop) event.listen(table, 'after_drop', canary.after_drop) table.create(bind) assert canary.state is None table.drop(bind) assert canary.state == 'after-drop' def test_table_all(self): table, bind = self.table, self.bind canary = self.Canary(table, bind) event.listen(table, 'before_create', canary.before_create) event.listen(table, 'after_create', canary.after_create) event.listen(table, 'before_drop', canary.before_drop) event.listen(table, 'after_drop', canary.after_drop) assert canary.state is None table.create(bind) assert canary.state == 'after-create' canary.state = None table.drop(bind) assert canary.state == 'after-drop' def test_table_create_before(self): metadata, bind = self.metadata, self.bind canary = self.Canary(metadata, bind) event.listen(metadata, 'before_create', canary.before_create) metadata.create_all(bind) assert canary.state == 'before-create' metadata.drop_all(bind) assert canary.state == 'before-create' def test_metadata_create_after(self): metadata, bind = self.metadata, self.bind canary = self.Canary(metadata, bind) event.listen(metadata, 'after_create', canary.after_create) canary.state = 'skipped' metadata.create_all(bind) assert canary.state == 'after-create' metadata.drop_all(bind) assert canary.state == 'after-create' def test_metadata_create_both(self): metadata, bind = self.metadata, self.bind canary = self.Canary(metadata, bind) event.listen(metadata, 'before_create', canary.before_create) event.listen(metadata, 'after_create', canary.after_create) metadata.create_all(bind) assert canary.state == 'after-create' metadata.drop_all(bind) assert canary.state == 'after-create' def test_metadata_table_isolation(self): metadata, table, bind = self.metadata, self.table, self.bind table_canary = self.Canary(table, bind) event.listen(table, 'before_create', table_canary.before_create) metadata_canary = self.Canary(metadata, bind) event.listen(metadata, 'before_create', metadata_canary.before_create) self.table.create(self.bind) assert metadata_canary.state == None def test_append_listener(self): metadata, table, bind = self.metadata, self.table, self.bind fn = lambda *a: None table.append_ddl_listener('before-create', fn) assert_raises(exc.InvalidRequestError, table.append_ddl_listener, 'blah', fn) metadata.append_ddl_listener('before-create', fn) assert_raises(exc.InvalidRequestError, metadata.append_ddl_listener, 'blah', fn) class DDLExecutionTest(fixtures.TestBase): def setup(self): self.engine = engines.mock_engine() self.metadata = MetaData(self.engine) self.users = Table('users', self.metadata, Column('user_id', Integer, primary_key=True), Column('user_name', String(40)), ) def test_table_standalone(self): users, engine = self.users, self.engine event.listen(users, 'before_create', DDL('mxyzptlk')) event.listen(users, 'after_create', DDL('klptzyxm')) event.listen(users, 'before_drop', DDL('xyzzy')) event.listen(users, 'after_drop', DDL('fnord')) users.create() strings = [str(x) for x in engine.mock] assert 'mxyzptlk' in strings assert 'klptzyxm' in strings assert 'xyzzy' not in strings assert 'fnord' not in strings del engine.mock[:] users.drop() strings = [str(x) for x in engine.mock] assert 'mxyzptlk' not in strings assert 'klptzyxm' not in strings assert 'xyzzy' in strings assert 'fnord' in strings def test_table_by_metadata(self): metadata, users, engine = self.metadata, self.users, self.engine event.listen(users, 'before_create', DDL('mxyzptlk')) event.listen(users, 'after_create', DDL('klptzyxm')) event.listen(users, 'before_drop', DDL('xyzzy')) event.listen(users, 'after_drop', DDL('fnord')) metadata.create_all() strings = [str(x) for x in engine.mock] assert 'mxyzptlk' in strings assert 'klptzyxm' in strings assert 'xyzzy' not in strings assert 'fnord' not in strings del engine.mock[:] metadata.drop_all() strings = [str(x) for x in engine.mock] assert 'mxyzptlk' not in strings assert 'klptzyxm' not in strings assert 'xyzzy' in strings assert 'fnord' in strings @testing.uses_deprecated(r'See DDLEvents') def test_table_by_metadata_deprecated(self): metadata, users, engine = self.metadata, self.users, self.engine DDL('mxyzptlk').execute_at('before-create', users) DDL('klptzyxm').execute_at('after-create', users) DDL('xyzzy').execute_at('before-drop', users) DDL('fnord').execute_at('after-drop', users) metadata.create_all() strings = [str(x) for x in engine.mock] assert 'mxyzptlk' in strings assert 'klptzyxm' in strings assert 'xyzzy' not in strings assert 'fnord' not in strings del engine.mock[:] metadata.drop_all() strings = [str(x) for x in engine.mock] assert 'mxyzptlk' not in strings assert 'klptzyxm' not in strings assert 'xyzzy' in strings assert 'fnord' in strings def test_deprecated_append_ddl_listener_table(self): metadata, users, engine = self.metadata, self.users, self.engine canary = [] users.append_ddl_listener('before-create', lambda e, t, b:canary.append('mxyzptlk') ) users.append_ddl_listener('after-create', lambda e, t, b:canary.append('klptzyxm') ) users.append_ddl_listener('before-drop', lambda e, t, b:canary.append('xyzzy') ) users.append_ddl_listener('after-drop', lambda e, t, b:canary.append('fnord') ) metadata.create_all() assert 'mxyzptlk' in canary assert 'klptzyxm' in canary assert 'xyzzy' not in canary assert 'fnord' not in canary del engine.mock[:] canary[:] = [] metadata.drop_all() assert 'mxyzptlk' not in canary assert 'klptzyxm' not in canary assert 'xyzzy' in canary assert 'fnord' in canary def test_deprecated_append_ddl_listener_metadata(self): metadata, users, engine = self.metadata, self.users, self.engine canary = [] metadata.append_ddl_listener('before-create', lambda e, t, b, tables=None:canary.append('mxyzptlk') ) metadata.append_ddl_listener('after-create', lambda e, t, b, tables=None:canary.append('klptzyxm') ) metadata.append_ddl_listener('before-drop', lambda e, t, b, tables=None:canary.append('xyzzy') ) metadata.append_ddl_listener('after-drop', lambda e, t, b, tables=None:canary.append('fnord') ) metadata.create_all() assert 'mxyzptlk' in canary assert 'klptzyxm' in canary assert 'xyzzy' not in canary assert 'fnord' not in canary del engine.mock[:] canary[:] = [] metadata.drop_all() assert 'mxyzptlk' not in canary assert 'klptzyxm' not in canary assert 'xyzzy' in canary assert 'fnord' in canary def test_metadata(self): metadata, engine = self.metadata, self.engine event.listen(metadata, 'before_create', DDL('mxyzptlk')) event.listen(metadata, 'after_create', DDL('klptzyxm')) event.listen(metadata, 'before_drop', DDL('xyzzy')) event.listen(metadata, 'after_drop', DDL('fnord')) metadata.create_all() strings = [str(x) for x in engine.mock] assert 'mxyzptlk' in strings assert 'klptzyxm' in strings assert 'xyzzy' not in strings assert 'fnord' not in strings del engine.mock[:] metadata.drop_all() strings = [str(x) for x in engine.mock] assert 'mxyzptlk' not in strings assert 'klptzyxm' not in strings assert 'xyzzy' in strings assert 'fnord' in strings @testing.uses_deprecated(r'See DDLEvents') def test_metadata_deprecated(self): metadata, engine = self.metadata, self.engine DDL('mxyzptlk').execute_at('before-create', metadata) DDL('klptzyxm').execute_at('after-create', metadata) DDL('xyzzy').execute_at('before-drop', metadata) DDL('fnord').execute_at('after-drop', metadata) metadata.create_all() strings = [str(x) for x in engine.mock] assert 'mxyzptlk' in strings assert 'klptzyxm' in strings assert 'xyzzy' not in strings assert 'fnord' not in strings del engine.mock[:] metadata.drop_all() strings = [str(x) for x in engine.mock] assert 'mxyzptlk' not in strings assert 'klptzyxm' not in strings assert 'xyzzy' in strings assert 'fnord' in strings def test_conditional_constraint(self): metadata, users, engine = self.metadata, self.users, self.engine nonpg_mock = engines.mock_engine(dialect_name='sqlite') pg_mock = engines.mock_engine(dialect_name='postgresql') constraint = CheckConstraint('a < b', name='my_test_constraint' , table=users) # by placing the constraint in an Add/Drop construct, the # 'inline_ddl' flag is set to False event.listen( users, 'after_create', AddConstraint(constraint).execute_if(dialect='postgresql'), ) event.listen( users, 'before_drop', DropConstraint(constraint).execute_if(dialect='postgresql'), ) metadata.create_all(bind=nonpg_mock) strings = ' '.join(str(x) for x in nonpg_mock.mock) assert 'my_test_constraint' not in strings metadata.drop_all(bind=nonpg_mock) strings = ' '.join(str(x) for x in nonpg_mock.mock) assert 'my_test_constraint' not in strings metadata.create_all(bind=pg_mock) strings = ' '.join(str(x) for x in pg_mock.mock) assert 'my_test_constraint' in strings metadata.drop_all(bind=pg_mock) strings = ' '.join(str(x) for x in pg_mock.mock) assert 'my_test_constraint' in strings @testing.uses_deprecated(r'See DDLEvents') def test_conditional_constraint_deprecated(self): metadata, users, engine = self.metadata, self.users, self.engine nonpg_mock = engines.mock_engine(dialect_name='sqlite') pg_mock = engines.mock_engine(dialect_name='postgresql') constraint = CheckConstraint('a < b', name='my_test_constraint' , table=users) # by placing the constraint in an Add/Drop construct, the # 'inline_ddl' flag is set to False AddConstraint(constraint, on='postgresql' ).execute_at('after-create', users) DropConstraint(constraint, on='postgresql' ).execute_at('before-drop', users) metadata.create_all(bind=nonpg_mock) strings = ' '.join(str(x) for x in nonpg_mock.mock) assert 'my_test_constraint' not in strings metadata.drop_all(bind=nonpg_mock) strings = ' '.join(str(x) for x in nonpg_mock.mock) assert 'my_test_constraint' not in strings metadata.create_all(bind=pg_mock) strings = ' '.join(str(x) for x in pg_mock.mock) assert 'my_test_constraint' in strings metadata.drop_all(bind=pg_mock) strings = ' '.join(str(x) for x in pg_mock.mock) assert 'my_test_constraint' in strings def test_ddl_execute(self): try: engine = create_engine('sqlite:///') except ImportError: raise SkipTest('Requires sqlite') cx = engine.connect() table = self.users ddl = DDL('SELECT 1') for py in ('engine.execute(ddl)', 'engine.execute(ddl, table)', 'cx.execute(ddl)', 'cx.execute(ddl, table)', 'ddl.execute(engine)', 'ddl.execute(engine, table)', 'ddl.execute(cx)', 'ddl.execute(cx, table)'): r = eval(py) assert list(r) == [(1,)], py for py in ('ddl.execute()', 'ddl.execute(target=table)'): try: r = eval(py) assert False except tsa.exc.UnboundExecutionError: pass for bind in engine, cx: ddl.bind = bind for py in ('ddl.execute()', 'ddl.execute(target=table)'): r = eval(py) assert list(r) == [(1,)], py @testing.fails_on('postgresql+pg8000', 'pg8000 requires explicit types') def test_platform_escape(self): """test the escaping of % characters in the DDL construct.""" default_from = testing.db.dialect.statement_compiler( testing.db.dialect, None).default_from() # We're abusing the DDL() # construct here by pushing a SELECT through it # so that we can verify the round trip. # the DDL() will trigger autocommit, which prohibits # some DBAPIs from returning results (pyodbc), so we # run in an explicit transaction. with testing.db.begin() as conn: eq_( conn.execute( text("select 'foo%something'" + default_from) ).scalar(), 'foo%something' ) eq_( conn.execute( DDL("select 'foo%%something'" + default_from) ).scalar(), 'foo%something' ) class DDLTest(fixtures.TestBase, AssertsCompiledSQL): def mock_engine(self): executor = lambda *a, **kw: None engine = create_engine(testing.db.name + '://', strategy='mock', executor=executor) engine.dialect.identifier_preparer = \ tsa.sql.compiler.IdentifierPreparer(engine.dialect) return engine def test_tokens(self): m = MetaData() sane_alone = Table('t', m, Column('id', Integer)) sane_schema = Table('t', m, Column('id', Integer), schema='s') insane_alone = Table('t t', m, Column('id', Integer)) insane_schema = Table('t t', m, Column('id', Integer), schema='s s') ddl = DDL('%(schema)s-%(table)s-%(fullname)s') dialect = self.mock_engine().dialect self.assert_compile(ddl.against(sane_alone), '-t-t', dialect=dialect) self.assert_compile(ddl.against(sane_schema), 's-t-s.t', dialect=dialect) self.assert_compile(ddl.against(insane_alone), '-"t t"-"t t"', dialect=dialect) self.assert_compile(ddl.against(insane_schema), '"s s"-"t t"-"s s"."t t"', dialect=dialect) # overrides are used piece-meal and verbatim. ddl = DDL('%(schema)s-%(table)s-%(fullname)s-%(bonus)s', context={'schema': 'S S', 'table': 'T T', 'bonus': 'b' }) self.assert_compile(ddl.against(sane_alone), 'S S-T T-t-b', dialect=dialect) self.assert_compile(ddl.against(sane_schema), 'S S-T T-s.t-b', dialect=dialect) self.assert_compile(ddl.against(insane_alone), 'S S-T T-"t t"-b' , dialect=dialect) self.assert_compile(ddl.against(insane_schema), 'S S-T T-"s s"."t t"-b', dialect=dialect) def test_filter(self): cx = self.mock_engine() tbl = Table('t', MetaData(), Column('id', Integer)) target = cx.name assert DDL('')._should_execute(tbl, cx) assert DDL('').execute_if(dialect=target)._should_execute(tbl, cx) assert not DDL('').execute_if(dialect='bogus').\ _should_execute(tbl, cx) assert DDL('').execute_if(callable_=lambda d, y,z, **kw: True).\ _should_execute(tbl, cx) assert(DDL('').execute_if( callable_=lambda d, y,z, **kw: z.engine.name != 'bogus'). _should_execute(tbl, cx)) @testing.uses_deprecated(r'See DDLEvents') def test_filter_deprecated(self): cx = self.mock_engine() tbl = Table('t', MetaData(), Column('id', Integer)) target = cx.name assert DDL('')._should_execute_deprecated('x', tbl, cx) assert DDL('', on=target)._should_execute_deprecated('x', tbl, cx) assert not DDL('', on='bogus').\ _should_execute_deprecated('x', tbl, cx) assert DDL('', on=lambda d, x,y,z: True).\ _should_execute_deprecated('x', tbl, cx) assert(DDL('', on=lambda d, x,y,z: z.engine.name != 'bogus'). _should_execute_deprecated('x', tbl, cx)) def test_repr(self): assert repr(DDL('s')) assert repr(DDL('s', on='engine')) assert repr(DDL('s', on=lambda x: 1)) assert repr(DDL('s', context={'a':1})) assert repr(DDL('s', on='engine', context={'a':1})) SQLAlchemy-0.8.4/test/engine/test_execute.py0000644000076500000240000016465212251150015021536 0ustar classicstaff00000000000000# coding: utf-8 from __future__ import with_statement from sqlalchemy.testing import eq_, assert_raises, assert_raises_message, \ config, is_ import re from sqlalchemy.testing.util import picklers from sqlalchemy.interfaces import ConnectionProxy from sqlalchemy import MetaData, Integer, String, INT, VARCHAR, func, \ bindparam, select, event, TypeDecorator, create_engine, Sequence from sqlalchemy.sql import column, literal from sqlalchemy.testing.schema import Table, Column import sqlalchemy as tsa from sqlalchemy import testing from sqlalchemy import util from sqlalchemy.testing import engines from sqlalchemy.testing.engines import testing_engine import logging.handlers from sqlalchemy.dialects.oracle.zxjdbc import ReturningParam from sqlalchemy.engine import result as _result, default from sqlalchemy.engine.base import Connection, Engine from sqlalchemy.testing import fixtures from sqlalchemy.testing.mock import Mock, call import StringIO users, metadata, users_autoinc = None, None, None class ExecuteTest(fixtures.TestBase): @classmethod def setup_class(cls): global users, users_autoinc, metadata metadata = MetaData(testing.db) users = Table('users', metadata, Column('user_id', INT, primary_key = True, autoincrement=False), Column('user_name', VARCHAR(20)), ) users_autoinc = Table('users_autoinc', metadata, Column('user_id', INT, primary_key = True, test_needs_autoincrement=True), Column('user_name', VARCHAR(20)), ) metadata.create_all() @engines.close_first def teardown(self): testing.db.execute(users.delete()) @classmethod def teardown_class(cls): metadata.drop_all() @testing.fails_on("postgresql+pg8000", "pg8000 still doesn't allow single % without params") def test_no_params_option(self): stmt = "SELECT '%'" + testing.db.dialect.statement_compiler( testing.db.dialect, None).default_from() conn = testing.db.connect() result = conn.\ execution_options(no_parameters=True).\ scalar(stmt) eq_(result, '%') @testing.fails_on_everything_except('firebird', 'maxdb', 'sqlite', '+pyodbc', '+mxodbc', '+zxjdbc', 'mysql+oursql', 'informix+informixdb') def test_raw_qmark(self): def go(conn): conn.execute('insert into users (user_id, user_name) ' 'values (?, ?)', (1, 'jack')) conn.execute('insert into users (user_id, user_name) ' 'values (?, ?)', [2, 'fred']) conn.execute('insert into users (user_id, user_name) ' 'values (?, ?)', [3, 'ed'], [4, 'horse']) conn.execute('insert into users (user_id, user_name) ' 'values (?, ?)', (5, 'barney'), (6, 'donkey')) conn.execute('insert into users (user_id, user_name) ' 'values (?, ?)', 7, 'sally') res = conn.execute('select * from users order by user_id') assert res.fetchall() == [ (1, 'jack'), (2, 'fred'), (3, 'ed'), (4, 'horse'), (5, 'barney'), (6, 'donkey'), (7, 'sally'), ] for multiparam, param in [ (("jack", "fred"), {}), ((["jack", "fred"],), {}) ]: res = conn.execute( "select * from users where user_name=? or " "user_name=? order by user_id", *multiparam, **param) assert res.fetchall() == [ (1, 'jack'), (2, 'fred') ] res = conn.execute("select * from users where user_name=?", "jack" ) assert res.fetchall() == [(1, 'jack')] conn.execute('delete from users') go(testing.db) conn = testing.db.connect() try: go(conn) finally: conn.close() # some psycopg2 versions bomb this. @testing.fails_on_everything_except('mysql+mysqldb', 'mysql+pymysql', 'mysql+cymysql', 'mysql+mysqlconnector', 'postgresql') @testing.fails_on('postgresql+zxjdbc', 'sprintf not supported') def test_raw_sprintf(self): def go(conn): conn.execute('insert into users (user_id, user_name) ' 'values (%s, %s)', [1, 'jack']) conn.execute('insert into users (user_id, user_name) ' 'values (%s, %s)', [2, 'ed'], [3, 'horse']) conn.execute('insert into users (user_id, user_name) ' 'values (%s, %s)', 4, 'sally') conn.execute('insert into users (user_id) values (%s)', 5) res = conn.execute('select * from users order by user_id') assert res.fetchall() == [(1, 'jack'), (2, 'ed'), (3, 'horse'), (4, 'sally'), (5, None)] for multiparam, param in [ (("jack", "ed"), {}), ((["jack", "ed"],), {}) ]: res = conn.execute( "select * from users where user_name=%s or " "user_name=%s order by user_id", *multiparam, **param) assert res.fetchall() == [ (1, 'jack'), (2, 'ed') ] res = conn.execute("select * from users where user_name=%s", "jack" ) assert res.fetchall() == [(1, 'jack')] conn.execute('delete from users') go(testing.db) conn = testing.db.connect() try: go(conn) finally: conn.close() # pyformat is supported for mysql, but skipping because a few driver # versions have a bug that bombs out on this test. (1.2.2b3, # 1.2.2c1, 1.2.2) @testing.skip_if(lambda : testing.against('mysql+mysqldb'), 'db-api flaky') @testing.fails_on_everything_except('postgresql+psycopg2', 'postgresql+pypostgresql', 'mysql+mysqlconnector', 'mysql+pymysql', 'mysql+cymysql') def test_raw_python(self): def go(conn): conn.execute('insert into users (user_id, user_name) ' 'values (%(id)s, %(name)s)', {'id': 1, 'name' : 'jack'}) conn.execute('insert into users (user_id, user_name) ' 'values (%(id)s, %(name)s)', {'id': 2, 'name' : 'ed'}, {'id': 3, 'name': 'horse'}) conn.execute('insert into users (user_id, user_name) ' 'values (%(id)s, %(name)s)', id=4, name='sally' ) res = conn.execute('select * from users order by user_id') assert res.fetchall() == [(1, 'jack'), (2, 'ed'), (3, 'horse'), (4, 'sally')] conn.execute('delete from users') go(testing.db) conn = testing.db.connect() try: go(conn) finally: conn.close() @testing.fails_on_everything_except('sqlite', 'oracle+cx_oracle', 'informix+informixdb') def test_raw_named(self): def go(conn): conn.execute('insert into users (user_id, user_name) ' 'values (:id, :name)', {'id': 1, 'name': 'jack' }) conn.execute('insert into users (user_id, user_name) ' 'values (:id, :name)', {'id': 2, 'name': 'ed' }, {'id': 3, 'name': 'horse'}) conn.execute('insert into users (user_id, user_name) ' 'values (:id, :name)', id=4, name='sally') res = conn.execute('select * from users order by user_id') assert res.fetchall() == [(1, 'jack'), (2, 'ed'), (3, 'horse'), (4, 'sally')] conn.execute('delete from users') go(testing.db) conn= testing.db.connect() try: go(conn) finally: conn.close() @testing.engines.close_open_connections def test_exception_wrapping_dbapi(self): conn = testing.db.connect() for _c in testing.db, conn: assert_raises_message( tsa.exc.DBAPIError, r"not_a_valid_statement", _c.execute, 'not_a_valid_statement' ) @testing.requires.sqlite def test_exception_wrapping_non_dbapi_error(self): e = create_engine('sqlite://') e.dialect.is_disconnect = is_disconnect = Mock() c = e.connect() c.connection.cursor = Mock( return_value=Mock( execute=Mock( side_effect=TypeError("I'm not a DBAPI error") )) ) assert_raises_message( TypeError, "I'm not a DBAPI error", c.execute, "select " ) eq_(is_disconnect.call_count, 0) def test_exception_wrapping_non_dbapi_statement(self): class MyType(TypeDecorator): impl = Integer def process_bind_param(self, value, dialect): raise Exception("nope") def _go(conn): assert_raises_message( tsa.exc.StatementError, r"nope \(original cause: Exception: nope\) u?'SELECT 1 ", conn.execute, select([1]).\ where( column('foo') == literal('bar', MyType()) ) ) _go(testing.db) conn = testing.db.connect() try: _go(conn) finally: conn.close() def test_stmt_exception_non_ascii(self): name = util.u('méil') assert_raises_message( tsa.exc.StatementError, util.u( "A value is required for bind parameter 'uname'" r'.*SELECT users.user_name AS .m\\xe9il.') if util.py2k else util.u( "A value is required for bind parameter 'uname'" '.*SELECT users.user_name AS .méil.'), testing.db.execute, select([users.c.user_name.label(name)]).where( users.c.user_name == bindparam("uname")), {'uname_incorrect': 'foo'} ) def test_stmt_exception_pickleable_no_dbapi(self): self._test_stmt_exception_pickleable(Exception("hello world")) @testing.crashes("postgresql+psycopg2", "Older versions dont support cursor pickling, newer ones do") @testing.fails_on("mysql+oursql", "Exception doesn't come back exactly the same from pickle") @testing.fails_on("oracle+cx_oracle", "cx_oracle exception seems to be having " "some issue with pickling") def test_stmt_exception_pickleable_plus_dbapi(self): raw = testing.db.raw_connection() the_orig = None try: try: cursor = raw.cursor() cursor.execute("SELECTINCORRECT") except testing.db.dialect.dbapi.DatabaseError, orig: # py3k has "orig" in local scope... the_orig = orig finally: raw.close() self._test_stmt_exception_pickleable(the_orig) def _test_stmt_exception_pickleable(self, orig): for sa_exc in ( tsa.exc.StatementError("some error", "select * from table", {"foo":"bar"}, orig), tsa.exc.InterfaceError("select * from table", {"foo":"bar"}, orig), tsa.exc.NoReferencedTableError("message", "tname"), tsa.exc.NoReferencedColumnError("message", "tname", "cname"), tsa.exc.CircularDependencyError("some message", [1, 2, 3], [(1, 2), (3, 4)]), ): for loads, dumps in picklers(): repickled = loads(dumps(sa_exc)) eq_(repickled.args[0], sa_exc.args[0]) if isinstance(sa_exc, tsa.exc.StatementError): eq_(repickled.params, {"foo":"bar"}) eq_(repickled.statement, sa_exc.statement) if hasattr(sa_exc, "connection_invalidated"): eq_(repickled.connection_invalidated, sa_exc.connection_invalidated) eq_(repickled.orig.args[0], orig.args[0]) def test_dont_wrap_mixin(self): class MyException(Exception, tsa.exc.DontWrapMixin): pass class MyType(TypeDecorator): impl = Integer def process_bind_param(self, value, dialect): raise MyException("nope") def _go(conn): assert_raises_message( MyException, "nope", conn.execute, select([1]).\ where( column('foo') == literal('bar', MyType()) ) ) _go(testing.db) conn = testing.db.connect() try: _go(conn) finally: conn.close() def test_empty_insert(self): """test that execute() interprets [] as a list with no params""" testing.db.execute(users_autoinc.insert(). values(user_name=bindparam('name', None)), []) eq_(testing.db.execute(users_autoinc.select()).fetchall(), [(1, None)]) @testing.requires.ad_hoc_engines def test_engine_level_options(self): eng = engines.testing_engine(options={'execution_options': {'foo': 'bar'}}) conn = eng.contextual_connect() eq_(conn._execution_options['foo'], 'bar') eq_(conn.execution_options(bat='hoho')._execution_options['foo' ], 'bar') eq_(conn.execution_options(bat='hoho')._execution_options['bat' ], 'hoho') eq_(conn.execution_options(foo='hoho')._execution_options['foo' ], 'hoho') eng.update_execution_options(foo='hoho') conn = eng.contextual_connect() eq_(conn._execution_options['foo'], 'hoho') @testing.requires.ad_hoc_engines def test_generative_engine_execution_options(self): eng = engines.testing_engine(options={'execution_options': {'base': 'x1'}}) eng1 = eng.execution_options(foo="b1") eng2 = eng.execution_options(foo="b2") eng1a = eng1.execution_options(bar="a1") eng2a = eng2.execution_options(foo="b3", bar="a2") eq_(eng._execution_options, {'base': 'x1'}) eq_(eng1._execution_options, {'base': 'x1', 'foo': 'b1'}) eq_(eng2._execution_options, {'base': 'x1', 'foo': 'b2'}) eq_(eng1a._execution_options, {'base': 'x1', 'foo': 'b1', 'bar': 'a1'}) eq_(eng2a._execution_options, {'base': 'x1', 'foo': 'b3', 'bar': 'a2'}) is_(eng1a.pool, eng.pool) # test pool is shared eng2.dispose() is_(eng1a.pool, eng2.pool) is_(eng.pool, eng2.pool) @testing.requires.ad_hoc_engines def test_generative_engine_event_dispatch(self): canary = [] def l1(*arg, **kw): canary.append("l1") def l2(*arg, **kw): canary.append("l2") def l3(*arg, **kw): canary.append("l3") eng = engines.testing_engine(options={'execution_options': {'base': 'x1'}}) event.listen(eng, "before_execute", l1) eng1 = eng.execution_options(foo="b1") event.listen(eng, "before_execute", l2) event.listen(eng1, "before_execute", l3) eng.execute(select([1])) eng1.execute(select([1])) eq_(canary, ["l1", "l2", "l3", "l1", "l2"]) @testing.requires.ad_hoc_engines def test_generative_engine_event_dispatch_hasevents(self): def l1(*arg, **kw): pass eng = create_engine(testing.db.url) assert not eng._has_events event.listen(eng, "before_execute", l1) eng2 = eng.execution_options(foo='bar') assert eng2._has_events def test_unicode_test_fails_warning(self): class MockCursor(engines.DBAPIProxyCursor): def execute(self, stmt, params=None, **kw): if "test unicode returns" in stmt: raise self.engine.dialect.dbapi.DatabaseError("boom") else: return super(MockCursor, self).execute(stmt, params, **kw) eng = engines.proxying_engine(cursor_cls=MockCursor) assert_raises_message( tsa.exc.SAWarning, "Exception attempting to detect unicode returns", eng.connect ) assert eng.dialect.returns_unicode_strings in (True, False) eng.dispose() class ConvenienceExecuteTest(fixtures.TablesTest): @classmethod def define_tables(cls, metadata): cls.table = Table('exec_test', metadata, Column('a', Integer), Column('b', Integer), test_needs_acid=True ) def _trans_fn(self, is_transaction=False): def go(conn, x, value=None): if is_transaction: conn = conn.connection conn.execute(self.table.insert().values(a=x, b=value)) return go def _trans_rollback_fn(self, is_transaction=False): def go(conn, x, value=None): if is_transaction: conn = conn.connection conn.execute(self.table.insert().values(a=x, b=value)) raise Exception("breakage") return go def _assert_no_data(self): eq_( testing.db.scalar(self.table.count()), 0 ) def _assert_fn(self, x, value=None): eq_( testing.db.execute(self.table.select()).fetchall(), [(x, value)] ) def test_transaction_engine_ctx_commit(self): fn = self._trans_fn() ctx = testing.db.begin() testing.run_as_contextmanager(ctx, fn, 5, value=8) self._assert_fn(5, value=8) def test_transaction_engine_ctx_begin_fails(self): engine = engines.testing_engine() mock_connection = Mock( return_value=Mock( begin=Mock(side_effect=Exception("boom")) ) ) engine._connection_cls = mock_connection assert_raises( Exception, engine.begin ) eq_( mock_connection.return_value.close.mock_calls, [call()] ) def test_transaction_engine_ctx_rollback(self): fn = self._trans_rollback_fn() ctx = testing.db.begin() assert_raises_message( Exception, "breakage", testing.run_as_contextmanager, ctx, fn, 5, value=8 ) self._assert_no_data() def test_transaction_tlocal_engine_ctx_commit(self): fn = self._trans_fn() engine = engines.testing_engine(options=dict( strategy='threadlocal', pool=testing.db.pool)) ctx = engine.begin() testing.run_as_contextmanager(ctx, fn, 5, value=8) self._assert_fn(5, value=8) def test_transaction_tlocal_engine_ctx_rollback(self): fn = self._trans_rollback_fn() engine = engines.testing_engine(options=dict( strategy='threadlocal', pool=testing.db.pool)) ctx = engine.begin() assert_raises_message( Exception, "breakage", testing.run_as_contextmanager, ctx, fn, 5, value=8 ) self._assert_no_data() def test_transaction_connection_ctx_commit(self): fn = self._trans_fn(True) conn = testing.db.connect() ctx = conn.begin() testing.run_as_contextmanager(ctx, fn, 5, value=8) self._assert_fn(5, value=8) def test_transaction_connection_ctx_rollback(self): fn = self._trans_rollback_fn(True) conn = testing.db.connect() ctx = conn.begin() assert_raises_message( Exception, "breakage", testing.run_as_contextmanager, ctx, fn, 5, value=8 ) self._assert_no_data() def test_connection_as_ctx(self): fn = self._trans_fn() ctx = testing.db.connect() testing.run_as_contextmanager(ctx, fn, 5, value=8) # autocommit is on self._assert_fn(5, value=8) @testing.fails_on('mysql+oursql', "oursql bug ? getting wrong rowcount") def test_connect_as_ctx_noautocommit(self): fn = self._trans_fn() self._assert_no_data() ctx = testing.db.connect().execution_options(autocommit=False) testing.run_as_contextmanager(ctx, fn, 5, value=8) # autocommit is off self._assert_no_data() def test_transaction_engine_fn_commit(self): fn = self._trans_fn() testing.db.transaction(fn, 5, value=8) self._assert_fn(5, value=8) def test_transaction_engine_fn_rollback(self): fn = self._trans_rollback_fn() assert_raises_message( Exception, "breakage", testing.db.transaction, fn, 5, value=8 ) self._assert_no_data() def test_transaction_connection_fn_commit(self): fn = self._trans_fn() conn = testing.db.connect() conn.transaction(fn, 5, value=8) self._assert_fn(5, value=8) def test_transaction_connection_fn_rollback(self): fn = self._trans_rollback_fn() conn = testing.db.connect() assert_raises( Exception, conn.transaction, fn, 5, value=8 ) self._assert_no_data() class CompiledCacheTest(fixtures.TestBase): @classmethod def setup_class(cls): global users, metadata metadata = MetaData(testing.db) users = Table('users', metadata, Column('user_id', INT, primary_key=True, test_needs_autoincrement=True), Column('user_name', VARCHAR(20)), ) metadata.create_all() @engines.close_first def teardown(self): testing.db.execute(users.delete()) @classmethod def teardown_class(cls): metadata.drop_all() def test_cache(self): conn = testing.db.connect() cache = {} cached_conn = conn.execution_options(compiled_cache=cache) ins = users.insert() cached_conn.execute(ins, {'user_name':'u1'}) cached_conn.execute(ins, {'user_name':'u2'}) cached_conn.execute(ins, {'user_name':'u3'}) assert len(cache) == 1 eq_(conn.execute("select count(*) from users").scalar(), 3) class LogParamsTest(fixtures.TestBase): __only_on__ = 'sqlite' __requires__ = 'ad_hoc_engines', def setup(self): self.eng = engines.testing_engine(options={'echo':True}) self.eng.execute("create table foo (data string)") self.buf = logging.handlers.BufferingHandler(100) for log in [ logging.getLogger('sqlalchemy.engine'), logging.getLogger('sqlalchemy.pool') ]: log.addHandler(self.buf) def teardown(self): self.eng.execute("drop table foo") for log in [ logging.getLogger('sqlalchemy.engine'), logging.getLogger('sqlalchemy.pool') ]: log.removeHandler(self.buf) def test_log_large_dict(self): self.eng.execute( "INSERT INTO foo (data) values (:data)", [{"data":str(i)} for i in xrange(100)] ) eq_( self.buf.buffer[1].message, "[{'data': '0'}, {'data': '1'}, {'data': '2'}, {'data': '3'}, " "{'data': '4'}, {'data': '5'}, {'data': '6'}, {'data': '7'}" " ... displaying 10 of 100 total bound " "parameter sets ... {'data': '98'}, {'data': '99'}]" ) def test_log_large_list(self): self.eng.execute( "INSERT INTO foo (data) values (?)", [(str(i), ) for i in xrange(100)] ) eq_( self.buf.buffer[1].message, "[('0',), ('1',), ('2',), ('3',), ('4',), ('5',), " "('6',), ('7',) ... displaying 10 of 100 total " "bound parameter sets ... ('98',), ('99',)]" ) def test_error_large_dict(self): assert_raises_message( tsa.exc.DBAPIError, r".*'INSERT INTO nonexistent \(data\) values \(:data\)' " "\[{'data': '0'}, {'data': '1'}, {'data': '2'}, " "{'data': '3'}, {'data': '4'}, {'data': '5'}, " "{'data': '6'}, {'data': '7'} ... displaying 10 of " "100 total bound parameter sets ... {'data': '98'}, {'data': '99'}\]", lambda: self.eng.execute( "INSERT INTO nonexistent (data) values (:data)", [{"data":str(i)} for i in xrange(100)] ) ) def test_error_large_list(self): assert_raises_message( tsa.exc.DBAPIError, r".*INSERT INTO nonexistent \(data\) values " "\(\?\)' \[\('0',\), \('1',\), \('2',\), \('3',\), " "\('4',\), \('5',\), \('6',\), \('7',\) ... displaying " "10 of 100 total bound parameter sets ... " "\('98',\), \('99',\)\]", lambda: self.eng.execute( "INSERT INTO nonexistent (data) values (?)", [(str(i), ) for i in xrange(100)] ) ) class LoggingNameTest(fixtures.TestBase): __requires__ = 'ad_hoc_engines', def _assert_names_in_execute(self, eng, eng_name, pool_name): eng.execute(select([1])) assert self.buf.buffer for name in [b.name for b in self.buf.buffer]: assert name in ( 'sqlalchemy.engine.base.Engine.%s' % eng_name, 'sqlalchemy.pool.%s.%s' % (eng.pool.__class__.__name__, pool_name) ) def _assert_no_name_in_execute(self, eng): eng.execute(select([1])) assert self.buf.buffer for name in [b.name for b in self.buf.buffer]: assert name in ( 'sqlalchemy.engine.base.Engine', 'sqlalchemy.pool.%s' % eng.pool.__class__.__name__ ) def _named_engine(self, **kw): options = { 'logging_name':'myenginename', 'pool_logging_name':'mypoolname', 'echo':True } options.update(kw) return engines.testing_engine(options=options) def _unnamed_engine(self, **kw): kw.update({'echo':True}) return engines.testing_engine(options=kw) def setup(self): self.buf = logging.handlers.BufferingHandler(100) for log in [ logging.getLogger('sqlalchemy.engine'), logging.getLogger('sqlalchemy.pool') ]: log.addHandler(self.buf) def teardown(self): for log in [ logging.getLogger('sqlalchemy.engine'), logging.getLogger('sqlalchemy.pool') ]: log.removeHandler(self.buf) def test_named_logger_names(self): eng = self._named_engine() eq_(eng.logging_name, "myenginename") eq_(eng.pool.logging_name, "mypoolname") def test_named_logger_names_after_dispose(self): eng = self._named_engine() eng.execute(select([1])) eng.dispose() eq_(eng.logging_name, "myenginename") eq_(eng.pool.logging_name, "mypoolname") def test_unnamed_logger_names(self): eng = self._unnamed_engine() eq_(eng.logging_name, None) eq_(eng.pool.logging_name, None) def test_named_logger_execute(self): eng = self._named_engine() self._assert_names_in_execute(eng, "myenginename", "mypoolname") def test_named_logger_echoflags_execute(self): eng = self._named_engine(echo='debug', echo_pool='debug') self._assert_names_in_execute(eng, "myenginename", "mypoolname") def test_named_logger_execute_after_dispose(self): eng = self._named_engine() eng.execute(select([1])) eng.dispose() self._assert_names_in_execute(eng, "myenginename", "mypoolname") def test_unnamed_logger_execute(self): eng = self._unnamed_engine() self._assert_no_name_in_execute(eng) def test_unnamed_logger_echoflags_execute(self): eng = self._unnamed_engine(echo='debug', echo_pool='debug') self._assert_no_name_in_execute(eng) class EchoTest(fixtures.TestBase): __requires__ = 'ad_hoc_engines', def setup(self): self.level = logging.getLogger('sqlalchemy.engine').level logging.getLogger('sqlalchemy.engine').setLevel(logging.WARN) self.buf = logging.handlers.BufferingHandler(100) logging.getLogger('sqlalchemy.engine').addHandler(self.buf) def teardown(self): logging.getLogger('sqlalchemy.engine').removeHandler(self.buf) logging.getLogger('sqlalchemy.engine').setLevel(self.level) def testing_engine(self): e = engines.testing_engine() # do an initial execute to clear out 'first connect' # messages e.execute(select([10])).close() self.buf.flush() return e def test_levels(self): e1 = engines.testing_engine() eq_(e1._should_log_info(), False) eq_(e1._should_log_debug(), False) eq_(e1.logger.isEnabledFor(logging.INFO), False) eq_(e1.logger.getEffectiveLevel(), logging.WARN) e1.echo = True eq_(e1._should_log_info(), True) eq_(e1._should_log_debug(), False) eq_(e1.logger.isEnabledFor(logging.INFO), True) eq_(e1.logger.getEffectiveLevel(), logging.INFO) e1.echo = 'debug' eq_(e1._should_log_info(), True) eq_(e1._should_log_debug(), True) eq_(e1.logger.isEnabledFor(logging.DEBUG), True) eq_(e1.logger.getEffectiveLevel(), logging.DEBUG) e1.echo = False eq_(e1._should_log_info(), False) eq_(e1._should_log_debug(), False) eq_(e1.logger.isEnabledFor(logging.INFO), False) eq_(e1.logger.getEffectiveLevel(), logging.WARN) def test_echo_flag_independence(self): """test the echo flag's independence to a specific engine.""" e1 = self.testing_engine() e2 = self.testing_engine() e1.echo = True e1.execute(select([1])).close() e2.execute(select([2])).close() e1.echo = False e1.execute(select([3])).close() e2.execute(select([4])).close() e2.echo = True e1.execute(select([5])).close() e2.execute(select([6])).close() assert self.buf.buffer[0].getMessage().startswith("SELECT 1") assert self.buf.buffer[2].getMessage().startswith("SELECT 6") assert len(self.buf.buffer) == 4 class MockStrategyTest(fixtures.TestBase): def _engine_fixture(self): buf = StringIO.StringIO() def dump(sql, *multiparams, **params): buf.write(unicode(sql.compile(dialect=engine.dialect))) engine = create_engine('postgresql://', strategy='mock', executor=dump) return engine, buf def test_sequence_not_duped(self): engine, buf = self._engine_fixture() metadata = MetaData() t = Table('testtable', metadata, Column('pk', Integer, Sequence('testtable_pk_seq'), primary_key=True) ) t.create(engine) t.drop(engine) eq_( re.findall(r'CREATE (\w+)', buf.getvalue()), ["SEQUENCE", "TABLE"] ) eq_( re.findall(r'DROP (\w+)', buf.getvalue()), ["SEQUENCE", "TABLE"] ) class ResultProxyTest(fixtures.TestBase): def test_nontuple_row(self): """ensure the C version of BaseRowProxy handles duck-type-dependent rows.""" from sqlalchemy.engine import RowProxy class MyList(object): def __init__(self, l): self.l = l def __len__(self): return len(self.l) def __getitem__(self, i): return list.__getitem__(self.l, i) proxy = RowProxy(object(), MyList(['value']), [None], {'key' : (None, None, 0), 0: (None, None, 0)}) eq_(list(proxy), ['value']) eq_(proxy[0], 'value') eq_(proxy['key'], 'value') @testing.provide_metadata def test_no_rowcount_on_selects_inserts(self): """assert that rowcount is only called on deletes and updates. This because cursor.rowcount can be expensive on some dialects such as Firebird. """ metadata = self.metadata engine = engines.testing_engine() metadata.bind = engine t = Table('t1', metadata, Column('data', String(10)) ) metadata.create_all() class BreakRowcountMixin(object): @property def rowcount(self): assert False execution_ctx_cls = engine.dialect.execution_ctx_cls engine.dialect.execution_ctx_cls = type("FakeCtx", (BreakRowcountMixin, execution_ctx_cls), {}) try: r = t.insert().execute({'data': 'd1'}, {'data': 'd2'}, {'data': 'd3'}) eq_(t.select().execute().fetchall(), [('d1', ), ('d2', ), ('d3', )]) assert_raises(AssertionError, t.update().execute, {'data' : 'd4'}) assert_raises(AssertionError, t.delete().execute) finally: engine.dialect.execution_ctx_cls = execution_ctx_cls @testing.requires.python26 def test_rowproxy_is_sequence(self): import collections from sqlalchemy.engine import RowProxy row = RowProxy(object(), ['value'], [None], {'key' : (None, None, 0), 0: (None, None, 0)}) assert isinstance(row, collections.Sequence) @testing.requires.cextensions def test_row_c_sequence_check(self): import csv import collections from StringIO import StringIO metadata = MetaData() metadata.bind = 'sqlite://' users = Table('users', metadata, Column('id', Integer, primary_key=True), Column('name', String(40)), ) users.create() users.insert().execute(name='Test') row = users.select().execute().fetchone() s = StringIO() writer = csv.writer(s) # csv performs PySequenceCheck call writer.writerow(row) assert s.getvalue().strip() == '1,Test' @testing.requires.selectone def test_empty_accessors(self): statements = [ ( "select 1", [ lambda r: r.last_inserted_params(), lambda r: r.last_updated_params(), lambda r: r.prefetch_cols(), lambda r: r.postfetch_cols(), lambda r : r.inserted_primary_key ], "Statement is not a compiled expression construct." ), ( select([1]), [ lambda r: r.last_inserted_params(), lambda r : r.inserted_primary_key ], r"Statement is not an insert\(\) expression construct." ), ( select([1]), [ lambda r: r.last_updated_params(), ], r"Statement is not an update\(\) expression construct." ), ( select([1]), [ lambda r: r.prefetch_cols(), lambda r : r.postfetch_cols() ], r"Statement is not an insert\(\) " r"or update\(\) expression construct." ), ] for stmt, meths, msg in statements: r = testing.db.execute(stmt) try: for meth in meths: assert_raises_message( tsa.exc.InvalidRequestError, msg, meth, r ) finally: r.close() class AlternateResultProxyTest(fixtures.TestBase): __requires__ = ('sqlite', ) @classmethod def setup_class(cls): from sqlalchemy.engine import base, default cls.engine = engine = testing_engine('sqlite://') m = MetaData() cls.table = t = Table('test', m, Column('x', Integer, primary_key=True), Column('y', String(50, convert_unicode='force')) ) m.create_all(engine) engine.execute(t.insert(), [ {'x':i, 'y':"t_%d" % i} for i in xrange(1, 12) ]) def _test_proxy(self, cls): class ExcCtx(default.DefaultExecutionContext): def get_result_proxy(self): return cls(self) self.engine.dialect.execution_ctx_cls = ExcCtx rows = [] r = self.engine.execute(select([self.table])) assert isinstance(r, cls) for i in range(5): rows.append(r.fetchone()) eq_(rows, [(i, "t_%d" % i) for i in xrange(1, 6)]) rows = r.fetchmany(3) eq_(rows, [(i, "t_%d" % i) for i in xrange(6, 9)]) rows = r.fetchall() eq_(rows, [(i, "t_%d" % i) for i in xrange(9, 12)]) r = self.engine.execute(select([self.table])) rows = r.fetchmany(None) eq_(rows[0], (1, "t_1")) # number of rows here could be one, or the whole thing assert len(rows) == 1 or len(rows) == 11 r = self.engine.execute(select([self.table]).limit(1)) r.fetchone() eq_(r.fetchone(), None) r = self.engine.execute(select([self.table]).limit(5)) rows = r.fetchmany(6) eq_(rows, [(i, "t_%d" % i) for i in xrange(1, 6)]) def test_plain(self): self._test_proxy(_result.ResultProxy) def test_buffered_row_result_proxy(self): self._test_proxy(_result.BufferedRowResultProxy) def test_fully_buffered_result_proxy(self): self._test_proxy(_result.FullyBufferedResultProxy) def test_buffered_column_result_proxy(self): self._test_proxy(_result.BufferedColumnResultProxy) class EngineEventsTest(fixtures.TestBase): __requires__ = 'ad_hoc_engines', def tearDown(self): Engine.dispatch._clear() Engine._has_events = False def _assert_stmts(self, expected, received): orig = list(received) for stmt, params, posn in expected: if not received: assert False, "Nothing available for stmt: %s" % stmt while received: teststmt, testparams, testmultiparams = \ received.pop(0) teststmt = re.compile(r'[\n\t ]+', re.M).sub(' ', teststmt).strip() if teststmt.startswith(stmt) and (testparams == params or testparams == posn): break def test_per_engine_independence(self): e1 = testing_engine(config.db_url) e2 = testing_engine(config.db_url) canary = [] def before_exec(conn, stmt, *arg): canary.append(stmt) event.listen(e1, "before_execute", before_exec) s1 = select([1]) s2 = select([2]) e1.execute(s1) e2.execute(s2) eq_(canary, [s1]) event.listen(e2, "before_execute", before_exec) e1.execute(s1) e2.execute(s2) eq_(canary, [s1, s1, s2]) def test_per_engine_plus_global(self): canary = [] def be1(conn, stmt, *arg): canary.append('be1') def be2(conn, stmt, *arg): canary.append('be2') def be3(conn, stmt, *arg): canary.append('be3') event.listen(Engine, "before_execute", be1) e1 = testing_engine(config.db_url) e2 = testing_engine(config.db_url) event.listen(e1, "before_execute", be2) event.listen(Engine, "before_execute", be3) e1.connect() e2.connect() canary[:] = [] e1.execute(select([1])) e2.execute(select([1])) eq_(canary, ['be1', 'be3', 'be2', 'be1', 'be3']) def test_per_connection_plus_engine(self): canary = [] def be1(conn, stmt, *arg): canary.append('be1') def be2(conn, stmt, *arg): canary.append('be2') e1 = testing_engine(config.db_url) event.listen(e1, "before_execute", be1) conn = e1.connect() event.listen(conn, "before_execute", be2) canary[:] = [] conn.execute(select([1])) eq_(canary, ['be2', 'be1']) conn._branch().execute(select([1])) eq_(canary, ['be2', 'be1', 'be2', 'be1']) def test_argument_format_execute(self): def before_execute(conn, clauseelement, multiparams, params): assert isinstance(multiparams, (list, tuple)) assert isinstance(params, dict) def after_execute(conn, clauseelement, multiparams, params, result): assert isinstance(multiparams, (list, tuple)) assert isinstance(params, dict) e1 = testing_engine(config.db_url) event.listen(e1, 'before_execute', before_execute) event.listen(e1, 'after_execute', after_execute) e1.execute(select([1])) e1.execute(select([1]).compile(dialect=e1.dialect).statement) e1.execute(select([1]).compile(dialect=e1.dialect)) e1._execute_compiled(select([1]).compile(dialect=e1.dialect), (), {}) def test_exception_event(self): engine = engines.testing_engine() canary = [] @event.listens_for(engine, 'dbapi_error') def err(conn, cursor, stmt, parameters, context, exception): canary.append((stmt, parameters, exception)) conn = engine.connect() try: conn.execute("SELECT FOO FROM I_DONT_EXIST") assert False except tsa.exc.DBAPIError, e: assert canary[0][2] is e.orig assert canary[0][0] == "SELECT FOO FROM I_DONT_EXIST" @testing.fails_on('firebird', 'Data type unknown') def test_execute_events(self): stmts = [] cursor_stmts = [] def execute(conn, clauseelement, multiparams, params ): stmts.append((str(clauseelement), params, multiparams)) def cursor_execute(conn, cursor, statement, parameters, context, executemany): cursor_stmts.append((str(statement), parameters, None)) for engine in [ engines.testing_engine(options=dict(implicit_returning=False)), engines.testing_engine(options=dict(implicit_returning=False, strategy='threadlocal')), engines.testing_engine(options=dict(implicit_returning=False)).\ connect() ]: event.listen(engine, 'before_execute', execute) event.listen(engine, 'before_cursor_execute', cursor_execute) m = MetaData(engine) t1 = Table('t1', m, Column('c1', Integer, primary_key=True), Column('c2', String(50), default=func.lower('Foo'), primary_key=True) ) m.create_all() try: t1.insert().execute(c1=5, c2='some data') t1.insert().execute(c1=6) eq_(engine.execute('select * from t1').fetchall(), [(5, 'some data'), (6, 'foo')]) finally: m.drop_all() compiled = [('CREATE TABLE t1', {}, None), ('INSERT INTO t1 (c1, c2)', {'c2': 'some data', 'c1': 5}, None), ('INSERT INTO t1 (c1, c2)', {'c1': 6}, None), ('select * from t1', {}, None), ('DROP TABLE t1', {}, None)] # or engine.dialect.preexecute_pk_sequences: if not testing.against('oracle+zxjdbc'): cursor = [ ('CREATE TABLE t1', {}, ()), ('INSERT INTO t1 (c1, c2)', { 'c2': 'some data', 'c1': 5}, (5, 'some data')), ('SELECT lower', {'lower_2': 'Foo'}, ('Foo', )), ('INSERT INTO t1 (c1, c2)', {'c2': 'foo', 'c1': 6}, (6, 'foo')), ('select * from t1', {}, ()), ('DROP TABLE t1', {}, ()), ] else: insert2_params = 6, 'Foo' if testing.against('oracle+zxjdbc'): insert2_params += (ReturningParam(12), ) cursor = [('CREATE TABLE t1', {}, ()), ('INSERT INTO t1 (c1, c2)', {'c2': 'some data', 'c1': 5}, (5, 'some data')), ('INSERT INTO t1 (c1, c2)', {'c1': 6, 'lower_2': 'Foo'}, insert2_params), ('select * from t1', {}, ()), ('DROP TABLE t1', {}, ())] # bind param name 'lower_2' might # be incorrect self._assert_stmts(compiled, stmts) self._assert_stmts(cursor, cursor_stmts) def test_options(self): canary = [] def execute(conn, *args, **kw): canary.append('execute') def cursor_execute(conn, *args, **kw): canary.append('cursor_execute') engine = engines.testing_engine() event.listen(engine, 'before_execute', execute) event.listen(engine, 'before_cursor_execute', cursor_execute) conn = engine.connect() c2 = conn.execution_options(foo='bar') eq_(c2._execution_options, {'foo':'bar'}) c2.execute(select([1])) c3 = c2.execution_options(bar='bat') eq_(c3._execution_options, {'foo':'bar', 'bar':'bat'}) eq_(canary, ['execute', 'cursor_execute']) def test_retval_flag(self): canary = [] def tracker(name): def go(conn, *args, **kw): canary.append(name) return go def execute(conn, clauseelement, multiparams, params): canary.append('execute') return clauseelement, multiparams, params def cursor_execute(conn, cursor, statement, parameters, context, executemany): canary.append('cursor_execute') return statement, parameters engine = engines.testing_engine() assert_raises( tsa.exc.ArgumentError, event.listen, engine, "begin", tracker("begin"), retval=True ) event.listen(engine, "before_execute", execute, retval=True) event.listen(engine, "before_cursor_execute", cursor_execute, retval=True) engine.execute(select([1])) eq_( canary, ['execute', 'cursor_execute'] ) @testing.requires.sequences @testing.provide_metadata def test_cursor_execute(self): canary = [] def tracker(name): def go(conn, cursor, statement, parameters, context, executemany): canary.append((statement, context)) return go engine = engines.testing_engine() t = Table('t', self.metadata, Column('x', Integer, Sequence('t_id_seq'), primary_key=True), implicit_returning=False ) self.metadata.create_all(engine) with engine.begin() as conn: event.listen(conn, 'before_cursor_execute', tracker('cursor_execute')) conn.execute(t.insert()) # we see the sequence pre-executed in the first call assert "t_id_seq" in canary[0][0] assert "INSERT" in canary[1][0] # same context is_( canary[0][1], canary[1][1] ) def test_transactional(self): canary = [] def tracker(name): def go(conn, *args, **kw): canary.append(name) return go engine = engines.testing_engine() event.listen(engine, 'before_execute', tracker('execute')) event.listen(engine, 'before_cursor_execute', tracker('cursor_execute')) event.listen(engine, 'begin', tracker('begin')) event.listen(engine, 'commit', tracker('commit')) event.listen(engine, 'rollback', tracker('rollback')) conn = engine.connect() trans = conn.begin() conn.execute(select([1])) trans.rollback() trans = conn.begin() conn.execute(select([1])) trans.commit() eq_(canary, [ 'begin', 'execute', 'cursor_execute', 'rollback', 'begin', 'execute', 'cursor_execute', 'commit', ]) @testing.requires.savepoints @testing.requires.two_phase_transactions def test_transactional_advanced(self): canary1 = [] def tracker1(name): def go(*args, **kw): canary1.append(name) return go canary2 = [] def tracker2(name): def go(*args, **kw): canary2.append(name) return go engine = engines.testing_engine() for name in ['begin', 'savepoint', 'rollback_savepoint', 'release_savepoint', 'rollback', 'begin_twophase', 'prepare_twophase', 'commit_twophase']: event.listen(engine, '%s' % name, tracker1(name)) conn = engine.connect() for name in ['begin', 'savepoint', 'rollback_savepoint', 'release_savepoint', 'rollback', 'begin_twophase', 'prepare_twophase', 'commit_twophase']: event.listen(conn, '%s' % name, tracker2(name)) trans = conn.begin() trans2 = conn.begin_nested() conn.execute(select([1])) trans2.rollback() trans2 = conn.begin_nested() conn.execute(select([1])) trans2.commit() trans.rollback() trans = conn.begin_twophase() conn.execute(select([1])) trans.prepare() trans.commit() eq_(canary1, ['begin', 'savepoint', 'rollback_savepoint', 'savepoint', 'release_savepoint', 'rollback', 'begin_twophase', 'prepare_twophase', 'commit_twophase'] ) eq_(canary2, ['begin', 'savepoint', 'rollback_savepoint', 'savepoint', 'release_savepoint', 'rollback', 'begin_twophase', 'prepare_twophase', 'commit_twophase'] ) class ProxyConnectionTest(fixtures.TestBase): """These are the same tests as EngineEventsTest, except using the deprecated ConnectionProxy interface. """ __requires__ = 'ad_hoc_engines', @testing.uses_deprecated(r'.*Use event.listen') @testing.fails_on('firebird', 'Data type unknown') def test_proxy(self): stmts = [] cursor_stmts = [] class MyProxy(ConnectionProxy): def execute( self, conn, execute, clauseelement, *multiparams, **params ): stmts.append((str(clauseelement), params, multiparams)) return execute(clauseelement, *multiparams, **params) def cursor_execute( self, execute, cursor, statement, parameters, context, executemany, ): cursor_stmts.append((str(statement), parameters, None)) return execute(cursor, statement, parameters, context) def assert_stmts(expected, received): for stmt, params, posn in expected: if not received: assert False, "Nothing available for stmt: %s" % stmt while received: teststmt, testparams, testmultiparams = \ received.pop(0) teststmt = re.compile(r'[\n\t ]+', re.M).sub(' ', teststmt).strip() if teststmt.startswith(stmt) and (testparams == params or testparams == posn): break for engine in \ engines.testing_engine(options=dict(implicit_returning=False, proxy=MyProxy())), \ engines.testing_engine(options=dict(implicit_returning=False, proxy=MyProxy(), strategy='threadlocal')): m = MetaData(engine) t1 = Table('t1', m, Column('c1', Integer, primary_key=True), Column('c2', String(50), default=func.lower('Foo'), primary_key=True) ) m.create_all() try: t1.insert().execute(c1=5, c2='some data') t1.insert().execute(c1=6) eq_(engine.execute('select * from t1').fetchall(), [(5, 'some data'), (6, 'foo')]) finally: m.drop_all() engine.dispose() compiled = [('CREATE TABLE t1', {}, None), ('INSERT INTO t1 (c1, c2)', {'c2': 'some data', 'c1': 5}, None), ('INSERT INTO t1 (c1, c2)', {'c1': 6}, None), ('select * from t1', {}, None), ('DROP TABLE t1', {}, None)] if not testing.against('oracle+zxjdbc'): # or engine.dialect.pr # eexecute_pk_sequence # s: cursor = [ ('CREATE TABLE t1', {}, ()), ('INSERT INTO t1 (c1, c2)', {'c2': 'some data', 'c1' : 5}, (5, 'some data')), ('SELECT lower', {'lower_2': 'Foo'}, ('Foo', )), ('INSERT INTO t1 (c1, c2)', {'c2': 'foo', 'c1': 6}, (6, 'foo')), ('select * from t1', {}, ()), ('DROP TABLE t1', {}, ()), ] else: insert2_params = 6, 'Foo' if testing.against('oracle+zxjdbc'): insert2_params += (ReturningParam(12), ) cursor = [('CREATE TABLE t1', {}, ()), ('INSERT INTO t1 (c1, c2)', {'c2': 'some data' , 'c1': 5}, (5, 'some data')), ('INSERT INTO t1 (c1, c2)', {'c1': 6, 'lower_2': 'Foo'}, insert2_params), ('select * from t1', {}, ()), ('DROP TABLE t1' , {}, ())] # bind param name 'lower_2' might # be incorrect assert_stmts(compiled, stmts) assert_stmts(cursor, cursor_stmts) @testing.uses_deprecated(r'.*Use event.listen') def test_options(self): canary = [] class TrackProxy(ConnectionProxy): def __getattribute__(self, key): fn = object.__getattribute__(self, key) def go(*arg, **kw): canary.append(fn.__name__) return fn(*arg, **kw) return go engine = engines.testing_engine(options={'proxy':TrackProxy()}) conn = engine.connect() c2 = conn.execution_options(foo='bar') eq_(c2._execution_options, {'foo':'bar'}) c2.execute(select([1])) c3 = c2.execution_options(bar='bat') eq_(c3._execution_options, {'foo':'bar', 'bar':'bat'}) eq_(canary, ['execute', 'cursor_execute']) @testing.uses_deprecated(r'.*Use event.listen') def test_transactional(self): canary = [] class TrackProxy(ConnectionProxy): def __getattribute__(self, key): fn = object.__getattribute__(self, key) def go(*arg, **kw): canary.append(fn.__name__) return fn(*arg, **kw) return go engine = engines.testing_engine(options={'proxy':TrackProxy()}) conn = engine.connect() trans = conn.begin() conn.execute(select([1])) trans.rollback() trans = conn.begin() conn.execute(select([1])) trans.commit() eq_(canary, [ 'begin', 'execute', 'cursor_execute', 'rollback', 'begin', 'execute', 'cursor_execute', 'commit', ]) @testing.uses_deprecated(r'.*Use event.listen') @testing.requires.savepoints @testing.requires.two_phase_transactions def test_transactional_advanced(self): canary = [] class TrackProxy(ConnectionProxy): def __getattribute__(self, key): fn = object.__getattribute__(self, key) def go(*arg, **kw): canary.append(fn.__name__) return fn(*arg, **kw) return go engine = engines.testing_engine(options={'proxy':TrackProxy()}) conn = engine.connect() trans = conn.begin() trans2 = conn.begin_nested() conn.execute(select([1])) trans2.rollback() trans2 = conn.begin_nested() conn.execute(select([1])) trans2.commit() trans.rollback() trans = conn.begin_twophase() conn.execute(select([1])) trans.prepare() trans.commit() canary = [t for t in canary if t not in ('cursor_execute', 'execute')] eq_(canary, ['begin', 'savepoint', 'rollback_savepoint', 'savepoint', 'release_savepoint', 'rollback', 'begin_twophase', 'prepare_twophase', 'commit_twophase'] ) SQLAlchemy-0.8.4/test/engine/test_parseconnect.py0000644000076500000240000003432412251150015022550 0ustar classicstaff00000000000000from sqlalchemy.testing import assert_raises, assert_raises_message, eq_ import ConfigParser import StringIO import sqlalchemy.engine.url as url from sqlalchemy import create_engine, engine_from_config, exc, pool from sqlalchemy.engine.util import _coerce_config from sqlalchemy.engine.default import DefaultDialect import sqlalchemy as tsa from sqlalchemy.testing import fixtures from sqlalchemy import testing from sqlalchemy.testing.mock import Mock class ParseConnectTest(fixtures.TestBase): def test_rfc1738(self): for text in ( 'dbtype://username:password@hostspec:110//usr/db_file.db', 'dbtype://username:password@hostspec/database', 'dbtype+apitype://username:password@hostspec/database', 'dbtype://username:password@hostspec', 'dbtype://username:password@/database', 'dbtype://username@hostspec', 'dbtype://username:password@127.0.0.1:1521', 'dbtype://hostspec/database', 'dbtype://hostspec', 'dbtype://hostspec/?arg1=val1&arg2=val2', 'dbtype+apitype:///database', 'dbtype:///:memory:', 'dbtype:///foo/bar/im/a/file', 'dbtype:///E:/work/src/LEM/db/hello.db', 'dbtype:///E:/work/src/LEM/db/hello.db?foo=bar&hoho=lala', 'dbtype://', 'dbtype://username:password@/database', 'dbtype:////usr/local/_xtest@example.com/members.db', 'dbtype://username:apples%2Foranges@hostspec/database', 'dbtype://username:password@[2001:da8:2004:1000:202:116:160:90]/database?foo=bar', 'dbtype://username:password@[2001:da8:2004:1000:202:116:160:90]:80/database?foo=bar' ): u = url.make_url(text) assert u.drivername in ('dbtype', 'dbtype+apitype') assert u.username in ('username', None) assert u.password in ('password', 'apples/oranges', None) assert u.host in ('hostspec', '127.0.0.1', '2001:da8:2004:1000:202:116:160:90', '', None), u.host assert u.database in ('database', '/usr/local/_xtest@example.com/members.db', '/usr/db_file.db', ':memory:', '', 'foo/bar/im/a/file', 'E:/work/src/LEM/db/hello.db', None), u.database eq_(str(u), text) class DialectImportTest(fixtures.TestBase): def test_import_base_dialects(self): # the globals() somehow makes it for the exec() + nose3. for name in ( 'mysql', 'firebird', 'postgresql', 'sqlite', 'oracle', 'mssql', ): exec ('from sqlalchemy.dialects import %s\ndialect = ' '%s.dialect()' % (name, name), globals()) eq_(dialect.name, name) class CreateEngineTest(fixtures.TestBase): """test that create_engine arguments of different types get propagated properly""" def test_connect_query(self): dbapi = MockDBAPI(foober='12', lala='18', fooz='somevalue') e = \ create_engine('postgresql://scott:tiger@somehost/test?foobe' 'r=12&lala=18&fooz=somevalue', module=dbapi, _initialize=False) c = e.connect() def test_kwargs(self): dbapi = MockDBAPI(foober=12, lala=18, hoho={'this': 'dict'}, fooz='somevalue') e = \ create_engine('postgresql://scott:tiger@somehost/test?fooz=' 'somevalue', connect_args={'foober': 12, 'lala': 18, 'hoho': {'this': 'dict'}}, module=dbapi, _initialize=False) c = e.connect() def test_coerce_config(self): raw = r""" [prefixed] sqlalchemy.url=postgresql://scott:tiger@somehost/test?fooz=somevalue sqlalchemy.convert_unicode=0 sqlalchemy.echo=false sqlalchemy.echo_pool=1 sqlalchemy.max_overflow=2 sqlalchemy.pool_recycle=50 sqlalchemy.pool_size=2 sqlalchemy.pool_threadlocal=1 sqlalchemy.pool_timeout=10 [plain] url=postgresql://scott:tiger@somehost/test?fooz=somevalue convert_unicode=0 echo=0 echo_pool=1 max_overflow=2 pool_recycle=50 pool_size=2 pool_threadlocal=1 pool_timeout=10 """ ini = ConfigParser.ConfigParser() ini.readfp(StringIO.StringIO(raw)) expected = { 'url': 'postgresql://scott:tiger@somehost/test?fooz=somevalue', 'convert_unicode': 0, 'echo': False, 'echo_pool': True, 'max_overflow': 2, 'pool_recycle': 50, 'pool_size': 2, 'pool_threadlocal': True, 'pool_timeout': 10, } prefixed = dict(ini.items('prefixed')) self.assert_(_coerce_config(prefixed, 'sqlalchemy.') == expected) plain = dict(ini.items('plain')) self.assert_(_coerce_config(plain, '') == expected) def test_engine_from_config(self): dbapi = mock_dbapi config = \ {'sqlalchemy.url': 'postgresql://scott:tiger@somehost/test'\ '?fooz=somevalue', 'sqlalchemy.pool_recycle': '50', 'sqlalchemy.echo': 'true'} e = engine_from_config(config, module=dbapi, _initialize=False) assert e.pool._recycle == 50 assert e.url \ == url.make_url('postgresql://scott:tiger@somehost/test?foo' 'z=somevalue') assert e.echo is True for param, values in [ ('convert_unicode', ('true', 'false', 'force')), ('echo', ('true', 'false', 'debug')), ('echo_pool', ('true', 'false', 'debug')), ('use_native_unicode', ('true', 'false')), ]: for value in values: config = { 'sqlalchemy.url': 'postgresql://scott:tiger@somehost/test', 'sqlalchemy.%s' % param : value } cfg = _coerce_config(config, 'sqlalchemy.') assert cfg[param] == {'true':True, 'false':False}.get(value, value) def test_custom(self): dbapi = MockDBAPI(foober=12, lala=18, hoho={'this': 'dict'}, fooz='somevalue') def connect(): return dbapi.connect(foober=12, lala=18, fooz='somevalue', hoho={'this': 'dict'}) # start the postgresql dialect, but put our mock DBAPI as the # module instead of psycopg e = create_engine('postgresql://', creator=connect, module=dbapi, _initialize=False) c = e.connect() def test_recycle(self): dbapi = MockDBAPI(foober=12, lala=18, hoho={'this': 'dict'}, fooz='somevalue') e = create_engine('postgresql://', pool_recycle=472, module=dbapi, _initialize=False) assert e.pool._recycle == 472 def test_reset_on_return(self): dbapi = MockDBAPI(foober=12, lala=18, hoho={'this': 'dict'}, fooz='somevalue') for (value, expected) in [ ('rollback', pool.reset_rollback), ('commit', pool.reset_commit), (None, pool.reset_none), (True, pool.reset_rollback), (False, pool.reset_none), ]: e = create_engine('postgresql://', pool_reset_on_return=value, module=dbapi, _initialize=False) assert e.pool._reset_on_return is expected assert_raises( exc.ArgumentError, create_engine, "postgresql://", pool_reset_on_return='hi', module=dbapi, _initialize=False ) def test_bad_args(self): assert_raises(exc.ArgumentError, create_engine, 'foobar://', module=mock_dbapi) # bad arg assert_raises(TypeError, create_engine, 'postgresql://', use_ansi=True, module=mock_dbapi) # bad arg assert_raises( TypeError, create_engine, 'oracle://', lala=5, use_ansi=True, module=mock_dbapi, ) assert_raises(TypeError, create_engine, 'postgresql://', lala=5, module=mock_dbapi) assert_raises(TypeError, create_engine, 'sqlite://', lala=5, module=mock_sqlite_dbapi) assert_raises(TypeError, create_engine, 'mysql+mysqldb://', use_unicode=True, module=mock_dbapi) @testing.requires.sqlite def test_wraps_connect_in_dbapi(self): e = create_engine('sqlite://') sqlite3 = e.dialect.dbapi dbapi = MockDBAPI() dbapi.Error = sqlite3.Error, dbapi.ProgrammingError = sqlite3.ProgrammingError dbapi.connect = Mock(side_effect=sqlite3.ProgrammingError("random error")) try: create_engine('sqlite://', module=dbapi).connect() assert False except tsa.exc.DBAPIError, de: assert not de.connection_invalidated @testing.requires.sqlite def test_dont_touch_non_dbapi_exception_on_connect(self): e = create_engine('sqlite://') sqlite3 = e.dialect.dbapi dbapi = MockDBAPI() dbapi.Error = sqlite3.Error, dbapi.ProgrammingError = sqlite3.ProgrammingError dbapi.connect = Mock(side_effect=TypeError("I'm not a DBAPI error")) e = create_engine('sqlite://', module=dbapi) e.dialect.is_disconnect = is_disconnect = Mock() assert_raises_message( TypeError, "I'm not a DBAPI error", e.connect ) eq_(is_disconnect.call_count, 0) def test_ensure_dialect_does_is_disconnect_no_conn(self): """test that is_disconnect() doesn't choke if no connection, cursor given.""" dialect = testing.db.dialect dbapi = dialect.dbapi assert not dialect.is_disconnect(dbapi.OperationalError("test"), None, None) @testing.requires.sqlite def test_invalidate_on_connect(self): """test that is_disconnect() is called during connect. interpretation of connection failures are not supported by every backend. """ e = create_engine('sqlite://') sqlite3 = e.dialect.dbapi dbapi = MockDBAPI() dbapi.Error = sqlite3.Error, dbapi.ProgrammingError = sqlite3.ProgrammingError dbapi.connect = Mock(side_effect=sqlite3.ProgrammingError( "Cannot operate on a closed database.")) try: create_engine('sqlite://', module=dbapi).connect() assert False except tsa.exc.DBAPIError, de: assert de.connection_invalidated def test_urlattr(self): """test the url attribute on ``Engine``.""" e = create_engine('mysql://scott:tiger@localhost/test', module=mock_dbapi, _initialize=False) u = url.make_url('mysql://scott:tiger@localhost/test') e2 = create_engine(u, module=mock_dbapi, _initialize=False) assert e.url.drivername == e2.url.drivername == 'mysql' assert e.url.username == e2.url.username == 'scott' assert e2.url is u assert str(u) == 'mysql://scott:tiger@localhost/test' assert repr(u) == 'mysql://scott:***@localhost/test' assert repr(e) == 'Engine(mysql://scott:***@localhost/test)' assert repr(e2) == 'Engine(mysql://scott:***@localhost/test)' def test_poolargs(self): """test that connection pool args make it thru""" e = create_engine( 'postgresql://', creator=None, pool_recycle=50, echo_pool=None, module=mock_dbapi, _initialize=False, ) assert e.pool._recycle == 50 # these args work for QueuePool e = create_engine( 'postgresql://', max_overflow=8, pool_timeout=60, poolclass=tsa.pool.QueuePool, module=mock_dbapi, _initialize=False, ) # but not SingletonThreadPool assert_raises( TypeError, create_engine, 'sqlite://', max_overflow=8, pool_timeout=60, poolclass=tsa.pool.SingletonThreadPool, module=mock_sqlite_dbapi, _initialize=False, ) class TestRegNewDBAPI(fixtures.TestBase): def test_register_base(self): from sqlalchemy.dialects import registry registry.register("mockdialect", __name__, "MockDialect") e = create_engine("mockdialect://") assert isinstance(e.dialect, MockDialect) def test_register_dotted(self): from sqlalchemy.dialects import registry registry.register("mockdialect.foob", __name__, "MockDialect") e = create_engine("mockdialect+foob://") assert isinstance(e.dialect, MockDialect) def test_register_legacy(self): from sqlalchemy.dialects import registry tokens = __name__.split(".") global dialect dialect = MockDialect registry.register("mockdialect.foob", ".".join(tokens[0:-1]), tokens[-1]) e = create_engine("mockdialect+foob://") assert isinstance(e.dialect, MockDialect) def test_register_per_dbapi(self): from sqlalchemy.dialects import registry registry.register("mysql.my_mock_dialect", __name__, "MockDialect") e = create_engine("mysql+my_mock_dialect://") assert isinstance(e.dialect, MockDialect) class MockDialect(DefaultDialect): @classmethod def dbapi(cls, **kw): return MockDBAPI() def MockDBAPI(**assert_kwargs): connection = Mock(get_server_version_info=Mock(return_value='5.0')) def connect(*args, **kwargs): for k in assert_kwargs: assert k in kwargs, 'key %s not present in dictionary' % k eq_( kwargs[k], assert_kwargs[k] ) return connection return Mock( sqlite_version_info=(99, 9, 9,), version_info=(99, 9, 9,), sqlite_version='99.9.9', paramstyle='named', connect=Mock(side_effect=connect) ) mock_dbapi = MockDBAPI() mock_sqlite_dbapi = msd = MockDBAPI() SQLAlchemy-0.8.4/test/engine/test_pool.py0000644000076500000240000012346412251150015021041 0ustar classicstaff00000000000000from __future__ import with_statement import threading import time from sqlalchemy import pool, select, event import sqlalchemy as tsa from sqlalchemy import testing from sqlalchemy.testing.util import gc_collect, lazy_gc from sqlalchemy.testing import eq_, assert_raises, is_not_ from sqlalchemy.testing.engines import testing_engine from sqlalchemy.testing import fixtures from sqlalchemy.testing.mock import Mock, call join_timeout = 10 def MockDBAPI(): def cursor(): while True: yield Mock() def connect(): while True: yield Mock(cursor=Mock(side_effect=cursor())) def shutdown(value): if value: db.connect = Mock(side_effect=Exception("connect failed")) else: db.connect = Mock(side_effect=connect()) db = Mock(connect=Mock(side_effect=connect()), shutdown=shutdown, _shutdown=False) return db class PoolTestBase(fixtures.TestBase): def setup(self): pool.clear_managers() @classmethod def teardown_class(cls): pool.clear_managers() def _queuepool_fixture(self, **kw): dbapi, pool = self._queuepool_dbapi_fixture(**kw) return pool def _queuepool_dbapi_fixture(self, **kw): dbapi = MockDBAPI() return dbapi, pool.QueuePool(creator=lambda: dbapi.connect('foo.db'), **kw) class PoolTest(PoolTestBase): def test_manager(self): manager = pool.manage(MockDBAPI(), use_threadlocal=True) c1 = manager.connect('foo.db') c2 = manager.connect('foo.db') c3 = manager.connect('bar.db') c4 = manager.connect("foo.db", bar="bat") c5 = manager.connect("foo.db", bar="hoho") c6 = manager.connect("foo.db", bar="bat") assert c1.cursor() is not None assert c1 is c2 assert c1 is not c3 assert c4 is c6 assert c4 is not c5 def test_manager_with_key(self): dbapi = MockDBAPI() manager = pool.manage(dbapi, use_threadlocal=True) c1 = manager.connect('foo.db', sa_pool_key="a") c2 = manager.connect('foo.db', sa_pool_key="b") c3 = manager.connect('bar.db', sa_pool_key="a") assert c1.cursor() is not None assert c1 is not c2 assert c1 is c3 eq_(dbapi.connect.mock_calls, [ call("foo.db"), call("foo.db"), ] ) def test_bad_args(self): manager = pool.manage(MockDBAPI()) manager.connect(None) def test_non_thread_local_manager(self): manager = pool.manage(MockDBAPI(), use_threadlocal=False) connection = manager.connect('foo.db') connection2 = manager.connect('foo.db') self.assert_(connection.cursor() is not None) self.assert_(connection is not connection2) @testing.fails_on('+pyodbc', "pyodbc cursor doesn't implement tuple __eq__") def test_cursor_iterable(self): conn = testing.db.raw_connection() cursor = conn.cursor() cursor.execute(str(select([1], bind=testing.db))) expected = [(1, )] for row in cursor: eq_(row, expected.pop(0)) def test_no_connect_on_recreate(self): def creator(): raise Exception("no creates allowed") for cls in (pool.SingletonThreadPool, pool.StaticPool, pool.QueuePool, pool.NullPool, pool.AssertionPool): p = cls(creator=creator) p.dispose() p2 = p.recreate() assert p2.__class__ is cls mock_dbapi = MockDBAPI() p = cls(creator=mock_dbapi.connect) conn = p.connect() conn.close() mock_dbapi.connect.side_effect = Exception("error!") p.dispose() p.recreate() def testthreadlocal_del(self): self._do_testthreadlocal(useclose=False) def testthreadlocal_close(self): self._do_testthreadlocal(useclose=True) def _do_testthreadlocal(self, useclose=False): dbapi = MockDBAPI() for p in pool.QueuePool(creator=dbapi.connect, pool_size=3, max_overflow=-1, use_threadlocal=True), \ pool.SingletonThreadPool(creator=dbapi.connect, use_threadlocal=True): c1 = p.connect() c2 = p.connect() self.assert_(c1 is c2) c3 = p.unique_connection() self.assert_(c3 is not c1) if useclose: c2.close() else: c2 = None c2 = p.connect() self.assert_(c1 is c2) self.assert_(c3 is not c1) if useclose: c2.close() else: c2 = None lazy_gc() if useclose: c1 = p.connect() c2 = p.connect() c3 = p.connect() c3.close() c2.close() self.assert_(c1.connection is not None) c1.close() c1 = c2 = c3 = None # extra tests with QueuePool to ensure connections get # __del__()ed when dereferenced if isinstance(p, pool.QueuePool): lazy_gc() self.assert_(p.checkedout() == 0) c1 = p.connect() c2 = p.connect() if useclose: c2.close() c1.close() else: c2 = None c1 = None lazy_gc() self.assert_(p.checkedout() == 0) def test_info(self): p = self._queuepool_fixture(pool_size=1, max_overflow=0) c = p.connect() self.assert_(not c.info) self.assert_(c.info is c._connection_record.info) c.info['foo'] = 'bar' c.close() del c c = p.connect() self.assert_('foo' in c.info) c.invalidate() c = p.connect() self.assert_('foo' not in c.info) c.info['foo2'] = 'bar2' c.detach() self.assert_('foo2' in c.info) c2 = p.connect() is_not_(c.connection, c2.connection) assert not c2.info assert 'foo2' in c.info class PoolDialectTest(PoolTestBase): def _dialect(self): canary = [] class PoolDialect(object): def do_rollback(self, dbapi_connection): canary.append('R') dbapi_connection.rollback() def do_commit(self, dbapi_connection): canary.append('C') dbapi_connection.commit() def do_close(self, dbapi_connection): canary.append('CL') dbapi_connection.close() return PoolDialect(), canary def _do_test(self, pool_cls, assertion): mock_dbapi = MockDBAPI() dialect, canary = self._dialect() p = pool_cls(creator=mock_dbapi.connect) p._dialect = dialect conn = p.connect() conn.close() p.dispose() p.recreate() conn = p.connect() conn.close() eq_(canary, assertion) def test_queue_pool(self): self._do_test(pool.QueuePool, ['R', 'CL', 'R']) def test_assertion_pool(self): self._do_test(pool.AssertionPool, ['R', 'CL', 'R']) def test_singleton_pool(self): self._do_test(pool.SingletonThreadPool, ['R', 'CL', 'R']) def test_null_pool(self): self._do_test(pool.NullPool, ['R', 'CL', 'R', 'CL']) def test_static_pool(self): self._do_test(pool.StaticPool, ['R', 'R']) class PoolEventsTest(PoolTestBase): def _first_connect_event_fixture(self): p = self._queuepool_fixture() canary = [] def first_connect(*arg, **kw): canary.append('first_connect') event.listen(p, 'first_connect', first_connect) return p, canary def _connect_event_fixture(self): p = self._queuepool_fixture() canary = [] def connect(*arg, **kw): canary.append('connect') event.listen(p, 'connect', connect) return p, canary def _checkout_event_fixture(self): p = self._queuepool_fixture() canary = [] def checkout(*arg, **kw): canary.append('checkout') event.listen(p, 'checkout', checkout) return p, canary def _checkin_event_fixture(self): p = self._queuepool_fixture() canary = [] def checkin(*arg, **kw): canary.append('checkin') event.listen(p, 'checkin', checkin) return p, canary def _reset_event_fixture(self): p = self._queuepool_fixture() canary = [] def reset(*arg, **kw): canary.append('reset') event.listen(p, 'reset', reset) return p, canary def test_first_connect_event(self): p, canary = self._first_connect_event_fixture() c1 = p.connect() eq_(canary, ['first_connect']) def test_first_connect_event_fires_once(self): p, canary = self._first_connect_event_fixture() c1 = p.connect() c2 = p.connect() eq_(canary, ['first_connect']) def test_first_connect_on_previously_recreated(self): p, canary = self._first_connect_event_fixture() p2 = p.recreate() c1 = p.connect() c2 = p2.connect() eq_(canary, ['first_connect', 'first_connect']) def test_first_connect_on_subsequently_recreated(self): p, canary = self._first_connect_event_fixture() c1 = p.connect() p2 = p.recreate() c2 = p2.connect() eq_(canary, ['first_connect', 'first_connect']) def test_connect_event(self): p, canary = self._connect_event_fixture() c1 = p.connect() eq_(canary, ['connect']) def test_connect_event_fires_subsequent(self): p, canary = self._connect_event_fixture() c1 = p.connect() c2 = p.connect() eq_(canary, ['connect', 'connect']) def test_connect_on_previously_recreated(self): p, canary = self._connect_event_fixture() p2 = p.recreate() c1 = p.connect() c2 = p2.connect() eq_(canary, ['connect', 'connect']) def test_connect_on_subsequently_recreated(self): p, canary = self._connect_event_fixture() c1 = p.connect() p2 = p.recreate() c2 = p2.connect() eq_(canary, ['connect', 'connect']) def test_checkout_event(self): p, canary = self._checkout_event_fixture() c1 = p.connect() eq_(canary, ['checkout']) def test_checkout_event_fires_subsequent(self): p, canary = self._checkout_event_fixture() c1 = p.connect() c2 = p.connect() eq_(canary, ['checkout', 'checkout']) def test_checkout_event_on_subsequently_recreated(self): p, canary = self._checkout_event_fixture() c1 = p.connect() p2 = p.recreate() c2 = p2.connect() eq_(canary, ['checkout', 'checkout']) def test_checkin_event(self): p, canary = self._checkin_event_fixture() c1 = p.connect() eq_(canary, []) c1.close() eq_(canary, ['checkin']) def test_reset_event(self): p, canary = self._reset_event_fixture() c1 = p.connect() eq_(canary, []) c1.close() eq_(canary, ['reset']) def test_checkin_event_gc(self): p, canary = self._checkin_event_fixture() c1 = p.connect() eq_(canary, []) del c1 lazy_gc() eq_(canary, ['checkin']) def test_checkin_event_on_subsequently_recreated(self): p, canary = self._checkin_event_fixture() c1 = p.connect() p2 = p.recreate() c2 = p2.connect() eq_(canary, []) c1.close() eq_(canary, ['checkin']) c2.close() eq_(canary, ['checkin', 'checkin']) def test_listen_targets_scope(self): canary = [] def listen_one(*args): canary.append("listen_one") def listen_two(*args): canary.append("listen_two") def listen_three(*args): canary.append("listen_three") def listen_four(*args): canary.append("listen_four") engine = testing_engine(testing.db.url) event.listen(pool.Pool, 'connect', listen_one) event.listen(engine.pool, 'connect', listen_two) event.listen(engine, 'connect', listen_three) event.listen(engine.__class__, 'connect', listen_four) engine.execute(select([1])).close() eq_( canary, ["listen_one", "listen_four", "listen_two", "listen_three"] ) def test_listen_targets_per_subclass(self): """test that listen() called on a subclass remains specific to that subclass.""" canary = [] def listen_one(*args): canary.append("listen_one") def listen_two(*args): canary.append("listen_two") def listen_three(*args): canary.append("listen_three") event.listen(pool.Pool, 'connect', listen_one) event.listen(pool.QueuePool, 'connect', listen_two) event.listen(pool.SingletonThreadPool, 'connect', listen_three) p1 = pool.QueuePool(creator=MockDBAPI().connect) p2 = pool.SingletonThreadPool(creator=MockDBAPI().connect) assert listen_one in p1.dispatch.connect assert listen_two in p1.dispatch.connect assert listen_three not in p1.dispatch.connect assert listen_one in p2.dispatch.connect assert listen_two not in p2.dispatch.connect assert listen_three in p2.dispatch.connect p1.connect() eq_(canary, ["listen_one", "listen_two"]) p2.connect() eq_(canary, ["listen_one", "listen_two", "listen_one", "listen_three"]) def teardown(self): # TODO: need to get remove() functionality # going pool.Pool.dispatch._clear() class DeprecatedPoolListenerTest(PoolTestBase): @testing.requires.predictable_gc @testing.uses_deprecated(r".*Use event.listen") def test_listeners(self): class InstrumentingListener(object): def __init__(self): if hasattr(self, 'connect'): self.connect = self.inst_connect if hasattr(self, 'first_connect'): self.first_connect = self.inst_first_connect if hasattr(self, 'checkout'): self.checkout = self.inst_checkout if hasattr(self, 'checkin'): self.checkin = self.inst_checkin self.clear() def clear(self): self.connected = [] self.first_connected = [] self.checked_out = [] self.checked_in = [] def assert_total(innerself, conn, fconn, cout, cin): eq_(len(innerself.connected), conn) eq_(len(innerself.first_connected), fconn) eq_(len(innerself.checked_out), cout) eq_(len(innerself.checked_in), cin) def assert_in(innerself, item, in_conn, in_fconn, in_cout, in_cin): self.assert_((item in innerself.connected) == in_conn) self.assert_((item in innerself.first_connected) == in_fconn) self.assert_((item in innerself.checked_out) == in_cout) self.assert_((item in innerself.checked_in) == in_cin) def inst_connect(self, con, record): print("connect(%s, %s)" % (con, record)) assert con is not None assert record is not None self.connected.append(con) def inst_first_connect(self, con, record): print("first_connect(%s, %s)" % (con, record)) assert con is not None assert record is not None self.first_connected.append(con) def inst_checkout(self, con, record, proxy): print("checkout(%s, %s, %s)" % (con, record, proxy)) assert con is not None assert record is not None assert proxy is not None self.checked_out.append(con) def inst_checkin(self, con, record): print("checkin(%s, %s)" % (con, record)) # con can be None if invalidated assert record is not None self.checked_in.append(con) class ListenAll(tsa.interfaces.PoolListener, InstrumentingListener): pass class ListenConnect(InstrumentingListener): def connect(self, con, record): pass class ListenFirstConnect(InstrumentingListener): def first_connect(self, con, record): pass class ListenCheckOut(InstrumentingListener): def checkout(self, con, record, proxy, num): pass class ListenCheckIn(InstrumentingListener): def checkin(self, con, record): pass def assert_listeners(p, total, conn, fconn, cout, cin): for instance in (p, p.recreate()): self.assert_(len(instance.dispatch.connect) == conn) self.assert_(len(instance.dispatch.first_connect) == fconn) self.assert_(len(instance.dispatch.checkout) == cout) self.assert_(len(instance.dispatch.checkin) == cin) p = self._queuepool_fixture() assert_listeners(p, 0, 0, 0, 0, 0) p.add_listener(ListenAll()) assert_listeners(p, 1, 1, 1, 1, 1) p.add_listener(ListenConnect()) assert_listeners(p, 2, 2, 1, 1, 1) p.add_listener(ListenFirstConnect()) assert_listeners(p, 3, 2, 2, 1, 1) p.add_listener(ListenCheckOut()) assert_listeners(p, 4, 2, 2, 2, 1) p.add_listener(ListenCheckIn()) assert_listeners(p, 5, 2, 2, 2, 2) del p snoop = ListenAll() p = self._queuepool_fixture(listeners=[snoop]) assert_listeners(p, 1, 1, 1, 1, 1) c = p.connect() snoop.assert_total(1, 1, 1, 0) cc = c.connection snoop.assert_in(cc, True, True, True, False) c.close() snoop.assert_in(cc, True, True, True, True) del c, cc snoop.clear() # this one depends on immediate gc c = p.connect() cc = c.connection snoop.assert_in(cc, False, False, True, False) snoop.assert_total(0, 0, 1, 0) del c, cc lazy_gc() snoop.assert_total(0, 0, 1, 1) p.dispose() snoop.clear() c = p.connect() c.close() c = p.connect() snoop.assert_total(1, 0, 2, 1) c.close() snoop.assert_total(1, 0, 2, 2) # invalidation p.dispose() snoop.clear() c = p.connect() snoop.assert_total(1, 0, 1, 0) c.invalidate() snoop.assert_total(1, 0, 1, 1) c.close() snoop.assert_total(1, 0, 1, 1) del c lazy_gc() snoop.assert_total(1, 0, 1, 1) c = p.connect() snoop.assert_total(2, 0, 2, 1) c.close() del c lazy_gc() snoop.assert_total(2, 0, 2, 2) # detached p.dispose() snoop.clear() c = p.connect() snoop.assert_total(1, 0, 1, 0) c.detach() snoop.assert_total(1, 0, 1, 0) c.close() del c snoop.assert_total(1, 0, 1, 0) c = p.connect() snoop.assert_total(2, 0, 2, 0) c.close() del c snoop.assert_total(2, 0, 2, 1) # recreated p = p.recreate() snoop.clear() c = p.connect() snoop.assert_total(1, 1, 1, 0) c.close() snoop.assert_total(1, 1, 1, 1) c = p.connect() snoop.assert_total(1, 1, 2, 1) c.close() snoop.assert_total(1, 1, 2, 2) @testing.uses_deprecated(r".*Use event.listen") def test_listeners_callables(self): def connect(dbapi_con, con_record): counts[0] += 1 def checkout(dbapi_con, con_record, con_proxy): counts[1] += 1 def checkin(dbapi_con, con_record): counts[2] += 1 i_all = dict(connect=connect, checkout=checkout, checkin=checkin) i_connect = dict(connect=connect) i_checkout = dict(checkout=checkout) i_checkin = dict(checkin=checkin) for cls in (pool.QueuePool, pool.StaticPool): counts = [0, 0, 0] def assert_listeners(p, total, conn, cout, cin): for instance in (p, p.recreate()): eq_(len(instance.dispatch.connect), conn) eq_(len(instance.dispatch.checkout), cout) eq_(len(instance.dispatch.checkin), cin) p = self._queuepool_fixture() assert_listeners(p, 0, 0, 0, 0) p.add_listener(i_all) assert_listeners(p, 1, 1, 1, 1) p.add_listener(i_connect) assert_listeners(p, 2, 1, 1, 1) p.add_listener(i_checkout) assert_listeners(p, 3, 1, 1, 1) p.add_listener(i_checkin) assert_listeners(p, 4, 1, 1, 1) del p p = self._queuepool_fixture(listeners=[i_all]) assert_listeners(p, 1, 1, 1, 1) c = p.connect() assert counts == [1, 1, 0] c.close() assert counts == [1, 1, 1] c = p.connect() assert counts == [1, 2, 1] p.add_listener(i_checkin) c.close() assert counts == [1, 2, 2] class QueuePoolTest(PoolTestBase): def testqueuepool_del(self): self._do_testqueuepool(useclose=False) def testqueuepool_close(self): self._do_testqueuepool(useclose=True) def _do_testqueuepool(self, useclose=False): p = self._queuepool_fixture(pool_size=3, max_overflow=-1) def status(pool): tup = pool.size(), pool.checkedin(), pool.overflow(), \ pool.checkedout() print('Pool size: %d Connections in pool: %d Current '\ 'Overflow: %d Current Checked out connections: %d' % tup) return tup c1 = p.connect() self.assert_(status(p) == (3, 0, -2, 1)) c2 = p.connect() self.assert_(status(p) == (3, 0, -1, 2)) c3 = p.connect() self.assert_(status(p) == (3, 0, 0, 3)) c4 = p.connect() self.assert_(status(p) == (3, 0, 1, 4)) c5 = p.connect() self.assert_(status(p) == (3, 0, 2, 5)) c6 = p.connect() self.assert_(status(p) == (3, 0, 3, 6)) if useclose: c4.close() c3.close() c2.close() else: c4 = c3 = c2 = None lazy_gc() self.assert_(status(p) == (3, 3, 3, 3)) if useclose: c1.close() c5.close() c6.close() else: c1 = c5 = c6 = None lazy_gc() self.assert_(status(p) == (3, 3, 0, 0)) c1 = p.connect() c2 = p.connect() self.assert_(status(p) == (3, 1, 0, 2), status(p)) if useclose: c2.close() else: c2 = None lazy_gc() self.assert_(status(p) == (3, 2, 0, 1)) c1.close() lazy_gc() assert not pool._refs def test_timeout(self): p = self._queuepool_fixture(pool_size=3, max_overflow=0, timeout=2) c1 = p.connect() c2 = p.connect() c3 = p.connect() now = time.time() try: c4 = p.connect() assert False except tsa.exc.TimeoutError: assert int(time.time() - now) == 2 @testing.requires.threading_with_mock def test_timeout_race(self): # test a race condition where the initial connecting threads all race # to queue.Empty, then block on the mutex. each thread consumes a # connection as they go in. when the limit is reached, the remaining # threads go in, and get TimeoutError; even though they never got to # wait for the timeout on queue.get(). the fix involves checking the # timeout again within the mutex, and if so, unlocking and throwing # them back to the start of do_get() dbapi = MockDBAPI() p = pool.QueuePool( creator=lambda: dbapi.connect(delay=.05), pool_size=2, max_overflow=1, use_threadlocal=False, timeout=3) timeouts = [] def checkout(): for x in range(1): now = time.time() try: c1 = p.connect() except tsa.exc.TimeoutError: timeouts.append(time.time() - now) continue time.sleep(4) c1.close() threads = [] for i in range(10): th = threading.Thread(target=checkout) th.start() threads.append(th) for th in threads: th.join(join_timeout) assert len(timeouts) > 0 for t in timeouts: assert t >= 3, "Not all timeouts were >= 3 seconds %r" % timeouts # normally, the timeout should under 4 seconds, # but on a loaded down buildbot it can go up. assert t < 14, "Not all timeouts were < 14 seconds %r" % timeouts def _test_overflow(self, thread_count, max_overflow): gc_collect() dbapi = MockDBAPI() def creator(): time.sleep(.05) return dbapi.connect() p = pool.QueuePool(creator=creator, pool_size=3, timeout=2, max_overflow=max_overflow) peaks = [] def whammy(): for i in range(10): try: con = p.connect() time.sleep(.005) peaks.append(p.overflow()) con.close() del con except tsa.exc.TimeoutError: pass threads = [] for i in range(thread_count): th = threading.Thread(target=whammy) th.start() threads.append(th) for th in threads: th.join(join_timeout) self.assert_(max(peaks) <= max_overflow) lazy_gc() assert not pool._refs def test_overflow_reset_on_failed_connect(self): dbapi = Mock() def failing_dbapi(): time.sleep(2) raise Exception("connection failed") creator = dbapi.connect def create(): return creator() p = pool.QueuePool(creator=create, pool_size=2, max_overflow=3) c1 = p.connect() c2 = p.connect() c3 = p.connect() eq_(p._overflow, 1) creator = failing_dbapi assert_raises(Exception, p.connect) eq_(p._overflow, 1) @testing.requires.threading_with_mock def test_hanging_connect_within_overflow(self): """test that a single connect() call which is hanging does not block other connections from proceeding.""" dbapi = Mock() mutex = threading.Lock() def hanging_dbapi(): time.sleep(2) with mutex: return dbapi.connect() def fast_dbapi(): with mutex: return dbapi.connect() creator = threading.local() def create(): return creator.mock_connector() def run_test(name, pool, should_hang): if should_hang: creator.mock_connector = hanging_dbapi else: creator.mock_connector = fast_dbapi conn = pool.connect() conn.operation(name) time.sleep(1) conn.close() p = pool.QueuePool(creator=create, pool_size=2, max_overflow=3) threads = [ threading.Thread( target=run_test, args=("success_one", p, False)), threading.Thread( target=run_test, args=("success_two", p, False)), threading.Thread( target=run_test, args=("overflow_one", p, True)), threading.Thread( target=run_test, args=("overflow_two", p, False)), threading.Thread( target=run_test, args=("overflow_three", p, False)) ] for t in threads: t.start() time.sleep(.2) for t in threads: t.join(timeout=join_timeout) eq_( dbapi.connect().operation.mock_calls, [call("success_one"), call("success_two"), call("overflow_two"), call("overflow_three"), call("overflow_one")] ) @testing.requires.threading_with_mock def test_waiters_handled(self): """test that threads waiting for connections are handled when the pool is replaced. """ mutex = threading.Lock() dbapi = MockDBAPI() def creator(): mutex.acquire() try: return dbapi.connect() finally: mutex.release() success = [] for timeout in (None, 30): for max_overflow in (0, -1, 3): p = pool.QueuePool(creator=creator, pool_size=2, timeout=timeout, max_overflow=max_overflow) def waiter(p, timeout, max_overflow): success_key = (timeout, max_overflow) conn = p.connect() success.append(success_key) time.sleep(.1) conn.close() c1 = p.connect() c2 = p.connect() threads = set() for i in range(2): t = threading.Thread(target=waiter, args=(p, timeout, max_overflow)) t.daemon = True t.start() threads.add(t) # this sleep makes sure that the # two waiter threads hit upon wait() # inside the queue, before we invalidate the other # two conns time.sleep(.2) p2 = p._replace() for t in threads: t.join(join_timeout) eq_(len(success), 12, "successes: %s" % success) @testing.requires.threading_with_mock @testing.requires.python26 def test_notify_waiters(self): dbapi = MockDBAPI() canary = [] def creator1(): canary.append(1) return dbapi.connect() def creator2(): canary.append(2) return dbapi.connect() p1 = pool.QueuePool(creator=creator1, pool_size=1, timeout=None, max_overflow=0) p2 = pool.NullPool(creator=creator2) def waiter(p): conn = p.connect() time.sleep(.5) conn.close() c1 = p1.connect() for i in range(5): t = threading.Thread(target=waiter, args=(p1, )) t.setDaemon(True) t.start() time.sleep(.5) eq_(canary, [1]) p1._pool.abort(p2) time.sleep(1) eq_(canary, [1, 2, 2, 2, 2, 2]) def test_dispose_closes_pooled(self): dbapi = MockDBAPI() p = pool.QueuePool(creator=dbapi.connect, pool_size=2, timeout=None, max_overflow=0) c1 = p.connect() c2 = p.connect() c1_con = c1.connection c2_con = c2.connection c1.close() eq_(c1_con.close.call_count, 0) eq_(c2_con.close.call_count, 0) p.dispose() eq_(c1_con.close.call_count, 1) eq_(c2_con.close.call_count, 0) # currently, if a ConnectionFairy is closed # after the pool has been disposed, there's no # flag that states it should be invalidated # immediately - it just gets returned to the # pool normally... c2.close() eq_(c1_con.close.call_count, 1) eq_(c2_con.close.call_count, 0) # ...and that's the one we'll get back next. c3 = p.connect() assert c3.connection is c2_con @testing.requires.threading_with_mock def test_no_overflow(self): self._test_overflow(40, 0) @testing.requires.threading_with_mock def test_max_overflow(self): self._test_overflow(40, 5) def test_mixed_close(self): pool._refs.clear() p = self._queuepool_fixture(pool_size=3, max_overflow=-1, use_threadlocal=True) c1 = p.connect() c2 = p.connect() assert c1 is c2 c1.close() c2 = None assert p.checkedout() == 1 c1 = None lazy_gc() assert p.checkedout() == 0 lazy_gc() assert not pool._refs def test_overflow_no_gc_tlocal(self): self._test_overflow_no_gc(True) def test_overflow_no_gc(self): self._test_overflow_no_gc(False) def _test_overflow_no_gc(self, threadlocal): p = self._queuepool_fixture(pool_size=2, max_overflow=2) # disable weakref collection of the # underlying connections strong_refs = set() def _conn(): c = p.connect() strong_refs.add(c.connection) return c for j in range(5): # open 4 conns at a time. each time this # will yield two pooled connections + two # overflow connections. conns = [_conn() for i in range(4)] for c in conns: c.close() # doing that for a total of 5 times yields # ten overflow connections closed plus the # two pooled connections unclosed. eq_( set([c.close.call_count for c in strong_refs]), set([1, 1, 1, 1, 1, 1, 1, 0, 1, 1, 1, 0]) ) @testing.requires.predictable_gc def test_weakref_kaboom(self): p = self._queuepool_fixture(pool_size=3, max_overflow=-1, use_threadlocal=True) c1 = p.connect() c2 = p.connect() c1.close() c2 = None del c1 del c2 gc_collect() assert p.checkedout() == 0 c3 = p.connect() assert c3 is not None def test_trick_the_counter(self): """this is a "flaw" in the connection pool; since threadlocal uses a single ConnectionFairy per thread with an open/close counter, you can fool the counter into giving you a ConnectionFairy with an ambiguous counter. i.e. its not true reference counting.""" p = self._queuepool_fixture(pool_size=3, max_overflow=-1, use_threadlocal=True) c1 = p.connect() c2 = p.connect() assert c1 is c2 c1.close() c2 = p.connect() c2.close() self.assert_(p.checkedout() != 0) c2.close() self.assert_(p.checkedout() == 0) def test_recycle(self): p = self._queuepool_fixture(pool_size=1, max_overflow=0, recycle=3) c1 = p.connect() c_id = id(c1.connection) c1.close() c2 = p.connect() assert id(c2.connection) == c_id c2.close() time.sleep(4) c3 = p.connect() assert id(c3.connection) != c_id def _assert_cleanup_on_pooled_reconnect(self, dbapi, p): # p is QueuePool with size=1, max_overflow=2, # and one connection in the pool that will need to # reconnect when next used (either due to recycle or invalidate) eq_(p.checkedout(), 0) eq_(p._overflow, 0) dbapi.shutdown(True) assert_raises( Exception, p.connect ) eq_(p._overflow, 0) eq_(p.checkedout(), 0) # and not 1 dbapi.shutdown(False) c1 = p.connect() assert p._pool.empty() # poolsize is one, so we're empty OK c2 = p.connect() eq_(p._overflow, 1) # and not 2 # this hangs if p._overflow is 2 c3 = p.connect() def test_error_on_pooled_reconnect_cleanup_invalidate(self): dbapi, p = self._queuepool_dbapi_fixture(pool_size=1, max_overflow=2) c1 = p.connect() c1.invalidate() c1.close() self._assert_cleanup_on_pooled_reconnect(dbapi, p) def test_error_on_pooled_reconnect_cleanup_recycle(self): dbapi, p = self._queuepool_dbapi_fixture(pool_size=1, max_overflow=2, recycle=1) c1 = p.connect() c1.close() time.sleep(1) self._assert_cleanup_on_pooled_reconnect(dbapi, p) def test_invalidate(self): p = self._queuepool_fixture(pool_size=1, max_overflow=0) c1 = p.connect() c_id = c1.connection.id c1.close() c1 = None c1 = p.connect() assert c1.connection.id == c_id c1.invalidate() c1 = None c1 = p.connect() assert c1.connection.id != c_id def test_recreate(self): p = self._queuepool_fixture(reset_on_return=None, pool_size=1, max_overflow=0) p2 = p.recreate() assert p2.size() == 1 assert p2._reset_on_return is pool.reset_none assert p2._use_threadlocal is False assert p2._max_overflow == 0 def test_reconnect(self): """tests reconnect operations at the pool level. SA's engine/dialect includes another layer of reconnect support for 'database was lost' errors.""" dbapi, p = self._queuepool_dbapi_fixture(pool_size=1, max_overflow=0) c1 = p.connect() c_id = c1.connection.id c1.close() c1 = None c1 = p.connect() assert c1.connection.id == c_id dbapi.raise_error = True c1.invalidate() c1 = None c1 = p.connect() assert c1.connection.id != c_id def test_detach(self): dbapi, p = self._queuepool_dbapi_fixture(pool_size=1, max_overflow=0) c1 = p.connect() c1.detach() c2 = p.connect() eq_(dbapi.connect.mock_calls, [call("foo.db"), call("foo.db")]) c1_con = c1.connection assert c1_con is not None eq_(c1_con.close.call_count, 0) c1.close() eq_(c1_con.close.call_count, 1) def test_detach_via_invalidate(self): dbapi, p = self._queuepool_dbapi_fixture(pool_size=1, max_overflow=0) c1 = p.connect() c1_con = c1.connection c1.invalidate() assert c1.connection is None eq_(c1_con.close.call_count, 1) c2 = p.connect() assert c2.connection is not c1_con c2_con = c2.connection c2.close() eq_(c2_con.close.call_count, 0) def test_threadfairy(self): p = self._queuepool_fixture(pool_size=3, max_overflow=-1, use_threadlocal=True) c1 = p.connect() c1.close() c2 = p.connect() assert c2.connection is not None class SingletonThreadPoolTest(PoolTestBase): @testing.requires.threading_with_mock def test_cleanup(self): self._test_cleanup(False) @testing.requires.threading_with_mock def test_cleanup_no_gc(self): self._test_cleanup(True) def _test_cleanup(self, strong_refs): """test that the pool's connections are OK after cleanup() has been called.""" dbapi = MockDBAPI() lock = threading.Lock() def creator(): # the mock iterator isn't threadsafe... with lock: return dbapi.connect() p = pool.SingletonThreadPool(creator=creator, pool_size=3) if strong_refs: sr = set() def _conn(): c = p.connect() sr.add(c.connection) return c else: def _conn(): return p.connect() def checkout(): for x in range(10): c = _conn() assert c c.cursor() c.close() time.sleep(.1) threads = [] for i in range(10): th = threading.Thread(target=checkout) th.start() threads.append(th) for th in threads: th.join(join_timeout) assert len(p._all_conns) == 3 if strong_refs: still_opened = len([c for c in sr if not c.close.call_count]) eq_(still_opened, 3) class AssertionPoolTest(PoolTestBase): def test_connect_error(self): dbapi = MockDBAPI() p = pool.AssertionPool(creator=lambda: dbapi.connect('foo.db')) c1 = p.connect() assert_raises(AssertionError, p.connect) def test_connect_multiple(self): dbapi = MockDBAPI() p = pool.AssertionPool(creator=lambda: dbapi.connect('foo.db')) c1 = p.connect() c1.close() c2 = p.connect() c2.close() c3 = p.connect() assert_raises(AssertionError, p.connect) class NullPoolTest(PoolTestBase): def test_reconnect(self): dbapi = MockDBAPI() p = pool.NullPool(creator=lambda: dbapi.connect('foo.db')) c1 = p.connect() c1.close() c1 = None c1 = p.connect() c1.invalidate() c1 = None c1 = p.connect() dbapi.connect.assert_has_calls([ call('foo.db'), call('foo.db')], any_order=True) class StaticPoolTest(PoolTestBase): def test_recreate(self): dbapi = MockDBAPI() creator = lambda: dbapi.connect('foo.db') p = pool.StaticPool(creator) p2 = p.recreate() assert p._creator is p2._creator SQLAlchemy-0.8.4/test/engine/test_processors.py0000644000076500000240000001114712251150015022264 0ustar classicstaff00000000000000from sqlalchemy.testing import fixtures from sqlalchemy.testing import assert_raises_message, eq_ class _DateProcessorTest(fixtures.TestBase): def test_date_no_string(self): assert_raises_message( ValueError, "Couldn't parse date string '2012' - value is not a string", self.module.str_to_date, 2012 ) def test_datetime_no_string(self): assert_raises_message( ValueError, "Couldn't parse datetime string '2012' - value is not a string", self.module.str_to_datetime, 2012 ) def test_time_no_string(self): assert_raises_message( ValueError, "Couldn't parse time string '2012' - value is not a string", self.module.str_to_time, 2012 ) def test_date_invalid_string(self): assert_raises_message( ValueError, "Couldn't parse date string: '5:a'", self.module.str_to_date, "5:a" ) def test_datetime_invalid_string(self): assert_raises_message( ValueError, "Couldn't parse datetime string: '5:a'", self.module.str_to_datetime, "5:a" ) def test_time_invalid_string(self): assert_raises_message( ValueError, "Couldn't parse time string: '5:a'", self.module.str_to_time, "5:a" ) class PyDateProcessorTest(_DateProcessorTest): @classmethod def setup_class(cls): from sqlalchemy import processors cls.module = type("util", (object,), dict( (k, staticmethod(v)) for k, v in processors.py_fallback().items() ) ) class CDateProcessorTest(_DateProcessorTest): __requires__ = ('cextensions',) @classmethod def setup_class(cls): from sqlalchemy import cprocessors cls.module = cprocessors class _DistillArgsTest(fixtures.TestBase): def test_distill_none(self): eq_( self.module._distill_params(None, None), [] ) def test_distill_no_multi_no_param(self): eq_( self.module._distill_params((), {}), [] ) def test_distill_dict_multi_none_param(self): eq_( self.module._distill_params(None, {"foo": "bar"}), [{"foo": "bar"}] ) def test_distill_dict_multi_empty_param(self): eq_( self.module._distill_params((), {"foo": "bar"}), [{"foo": "bar"}] ) def test_distill_single_dict(self): eq_( self.module._distill_params(({"foo": "bar"},), {}), [{"foo": "bar"}] ) def test_distill_single_list_strings(self): eq_( self.module._distill_params((["foo", "bar"],), {}), [["foo", "bar"]] ) def test_distill_single_list_tuples(self): eq_( self.module._distill_params(([("foo", "bar"), ("bat", "hoho")],), {}), [('foo', 'bar'), ('bat', 'hoho')] ) def test_distill_single_list_tuple(self): eq_( self.module._distill_params(([("foo", "bar")],), {}), [('foo', 'bar')] ) def test_distill_multi_list_tuple(self): eq_( self.module._distill_params( ([("foo", "bar")], [("bar", "bat")]), {} ), ([('foo', 'bar')], [('bar', 'bat')]) ) def test_distill_multi_strings(self): eq_( self.module._distill_params(("foo", "bar"), {}), [('foo', 'bar')] ) def test_distill_single_list_dicts(self): eq_( self.module._distill_params(([{"foo": "bar"}, {"foo": "hoho"}],), {}), [{'foo': 'bar'}, {'foo': 'hoho'}] ) def test_distill_single_string(self): eq_( self.module._distill_params(("arg",), {}), [["arg"]] ) def test_distill_multi_string_tuple(self): eq_( self.module._distill_params((("arg", "arg"),), {}), [("arg", "arg")] ) class PyDistillArgsTest(_DistillArgsTest): @classmethod def setup_class(cls): from sqlalchemy.engine import util cls.module = type("util", (object,), dict( (k, staticmethod(v)) for k, v in util.py_fallback().items() ) ) class CDistillArgsTest(_DistillArgsTest): __requires__ = ('cextensions', ) @classmethod def setup_class(cls): from sqlalchemy import cutils as util cls.module = util SQLAlchemy-0.8.4/test/engine/test_reconnect.py0000644000076500000240000004565212251150015022052 0ustar classicstaff00000000000000from __future__ import with_statement from sqlalchemy.testing import eq_, assert_raises, assert_raises_message import time from sqlalchemy import select, MetaData, Integer, String, create_engine, pool from sqlalchemy.testing.schema import Table, Column import sqlalchemy as tsa from sqlalchemy import testing from sqlalchemy.testing import engines from sqlalchemy.testing.util import gc_collect from sqlalchemy import exc from sqlalchemy.testing import fixtures from sqlalchemy.testing.engines import testing_engine from sqlalchemy.testing import is_not_ from sqlalchemy.testing.mock import Mock, call from sqlalchemy import util class MockError(Exception): pass class MockDisconnect(MockError): pass def mock_connection(): def mock_cursor(): def execute(*args, **kwargs): if conn.explode == 'execute': raise MockDisconnect("Lost the DB connection on execute") elif conn.explode in ('execute_no_disconnect', ): raise MockError( "something broke on execute but we didn't lose the connection") elif conn.explode in ('rollback', 'rollback_no_disconnect'): raise MockError( "something broke on execute but we didn't lose the connection") elif args and "SELECT" in args[0]: cursor.description = [('foo', None, None, None, None, None)] else: return def close(): cursor.fetchall = cursor.fetchone = \ Mock(side_effect=MockError("cursor closed")) cursor = Mock( execute=Mock(side_effect=execute), close=Mock(side_effect=close) ) return cursor def cursor(): while True: yield mock_cursor() def rollback(): if conn.explode == 'rollback': raise MockDisconnect("Lost the DB connection on rollback") if conn.explode == 'rollback_no_disconnect': raise MockError( "something broke on rollback but we didn't lose the connection") else: return conn = Mock( rollback=Mock(side_effect=rollback), cursor=Mock(side_effect=cursor()) ) return conn def MockDBAPI(): connections = [] def connect(): while True: conn = mock_connection() connections.append(conn) yield conn def shutdown(explode='execute'): for c in connections: c.explode = explode def dispose(): for c in connections: c.explode = None connections[:] = [] return Mock( connect=Mock(side_effect=connect()), shutdown=Mock(side_effect=shutdown), dispose=Mock(side_effect=dispose), paramstyle='named', connections=connections, Error=MockError ) class MockReconnectTest(fixtures.TestBase): def setup(self): self.dbapi = MockDBAPI() self.db = testing_engine( 'postgresql://foo:bar@localhost/test', options=dict(module=self.dbapi, _initialize=False)) self.mock_connect = call(host='localhost', password='bar', user='foo', database='test') # monkeypatch disconnect checker self.db.dialect.is_disconnect = lambda e, conn, cursor: isinstance(e, MockDisconnect) def teardown(self): self.dbapi.dispose() def test_reconnect(self): """test that an 'is_disconnect' condition will invalidate the connection, and additionally dispose the previous connection pool and recreate.""" db_pool = self.db.pool # make a connection conn = self.db.connect() # connection works conn.execute(select([1])) # create a second connection within the pool, which we'll ensure # also goes away conn2 = self.db.connect() conn2.close() # two connections opened total now assert len(self.dbapi.connections) == 2 # set it to fail self.dbapi.shutdown() assert_raises( tsa.exc.DBAPIError, conn.execute, select([1]) ) # assert was invalidated assert not conn.closed assert conn.invalidated # close shouldnt break conn.close() is_not_(self.db.pool, db_pool) # ensure all connections closed (pool was recycled) eq_( [c.close.mock_calls for c in self.dbapi.connections], [[call()], [call()]] ) conn = self.db.connect() conn.execute(select([1])) conn.close() eq_( [c.close.mock_calls for c in self.dbapi.connections], [[call()], [call()], []] ) def test_invalidate_trans(self): conn = self.db.connect() trans = conn.begin() self.dbapi.shutdown() assert_raises( tsa.exc.DBAPIError, conn.execute, select([1]) ) eq_( [c.close.mock_calls for c in self.dbapi.connections], [[call()]] ) assert not conn.closed assert conn.invalidated assert trans.is_active assert_raises_message( tsa.exc.StatementError, "Can't reconnect until invalid transaction is rolled back", conn.execute, select([1]) ) assert trans.is_active assert_raises_message( tsa.exc.InvalidRequestError, "Can't reconnect until invalid transaction is " "rolled back", trans.commit ) assert trans.is_active trans.rollback() assert not trans.is_active conn.execute(select([1])) assert not conn.invalidated eq_( [c.close.mock_calls for c in self.dbapi.connections], [[call()], []] ) def test_conn_reusable(self): conn = self.db.connect() conn.execute(select([1])) eq_( self.dbapi.connect.mock_calls, [self.mock_connect] ) self.dbapi.shutdown() assert_raises( tsa.exc.DBAPIError, conn.execute, select([1]) ) assert not conn.closed assert conn.invalidated eq_( [c.close.mock_calls for c in self.dbapi.connections], [[call()]] ) # test reconnects conn.execute(select([1])) assert not conn.invalidated eq_( [c.close.mock_calls for c in self.dbapi.connections], [[call()], []] ) def test_invalidated_close(self): conn = self.db.connect() self.dbapi.shutdown() assert_raises( tsa.exc.DBAPIError, conn.execute, select([1]) ) conn.close() assert conn.closed assert conn.invalidated assert_raises_message( tsa.exc.StatementError, "This Connection is closed", conn.execute, select([1]) ) def test_noreconnect_execute_plus_closewresult(self): conn = self.db.connect(close_with_result=True) self.dbapi.shutdown("execute_no_disconnect") # raises error assert_raises_message( tsa.exc.DBAPIError, "something broke on execute but we didn't lose the connection", conn.execute, select([1]) ) assert conn.closed assert not conn.invalidated def test_noreconnect_rollback_plus_closewresult(self): conn = self.db.connect(close_with_result=True) self.dbapi.shutdown("rollback_no_disconnect") # raises error assert_raises_message( tsa.exc.DBAPIError, "something broke on rollback but we didn't lose the connection", conn.execute, select([1]) ) assert conn.closed assert not conn.invalidated assert_raises_message( tsa.exc.StatementError, "This Connection is closed", conn.execute, select([1]) ) def test_reconnect_on_reentrant(self): conn = self.db.connect() conn.execute(select([1])) assert len(self.dbapi.connections) == 1 self.dbapi.shutdown("rollback") # raises error assert_raises_message( tsa.exc.DBAPIError, "Lost the DB connection on rollback", conn.execute, select([1]) ) assert not conn.closed assert conn.invalidated def test_reconnect_on_reentrant_plus_closewresult(self): conn = self.db.connect(close_with_result=True) self.dbapi.shutdown("rollback") # raises error assert_raises_message( tsa.exc.DBAPIError, "Lost the DB connection on rollback", conn.execute, select([1]) ) assert conn.closed assert conn.invalidated assert_raises_message( tsa.exc.StatementError, "This Connection is closed", conn.execute, select([1]) ) def test_check_disconnect_no_cursor(self): conn = self.db.connect() result = conn.execute(select([1])) result.cursor.close() conn.close() assert_raises_message( tsa.exc.DBAPIError, "cursor closed", list, result ) def test_dialect_initialize_once(self): from sqlalchemy.engine.base import Engine from sqlalchemy.engine.url import URL from sqlalchemy.engine.default import DefaultDialect from sqlalchemy.pool import QueuePool dbapi = self.dbapi mock_dialect = Mock() class MyURL(URL): def get_dialect(self): return Dialect class Dialect(DefaultDialect): initialize = Mock() engine = create_engine(MyURL("foo://"), module=dbapi) c1 = engine.connect() engine.dispose() c2 = engine.connect() eq_(Dialect.initialize.call_count, 1) class CursorErrTest(fixtures.TestBase): def setup(self): def MockDBAPI(): def cursor(): while True: yield Mock( description=[], close=Mock(side_effect=Exception("explode"))) def connect(): while True: yield Mock(cursor=Mock(side_effect=cursor())) return Mock(connect=Mock(side_effect=connect())) dbapi = MockDBAPI() self.db = testing_engine( 'postgresql://foo:bar@localhost/test', options=dict(module=dbapi, _initialize=False)) def test_cursor_explode(self): conn = self.db.connect() result = conn.execute("select foo") result.close() conn.close() def teardown(self): self.db.dispose() def _assert_invalidated(fn, *args): try: fn(*args) assert False except tsa.exc.DBAPIError, e: if not e.connection_invalidated: raise class RealReconnectTest(fixtures.TestBase): def setup(self): self.engine = engines.reconnecting_engine() def teardown(self): self.engine.dispose() @testing.fails_on('+informixdb', "Wrong error thrown, fix in informixdb?") def test_reconnect(self): conn = self.engine.connect() eq_(conn.execute(select([1])).scalar(), 1) assert not conn.closed self.engine.test_shutdown() _assert_invalidated(conn.execute, select([1])) assert not conn.closed assert conn.invalidated assert conn.invalidated eq_(conn.execute(select([1])).scalar(), 1) assert not conn.invalidated # one more time self.engine.test_shutdown() _assert_invalidated(conn.execute, select([1])) assert conn.invalidated eq_(conn.execute(select([1])).scalar(), 1) assert not conn.invalidated conn.close() def test_multiple_invalidate(self): c1 = self.engine.connect() c2 = self.engine.connect() eq_(c1.execute(select([1])).scalar(), 1) p1 = self.engine.pool self.engine.test_shutdown() _assert_invalidated(c1.execute, select([1])) p2 = self.engine.pool _assert_invalidated(c2.execute, select([1])) # pool isn't replaced assert self.engine.pool is p2 def test_ensure_is_disconnect_gets_connection(self): def is_disconnect(e, conn, cursor): # connection is still present assert conn.connection is not None # the error usually occurs on connection.cursor(), # though MySQLdb we get a non-working cursor. # assert cursor is None self.engine.dialect.is_disconnect = is_disconnect conn = self.engine.connect() self.engine.test_shutdown() assert_raises( tsa.exc.DBAPIError, conn.execute, select([1]) ) def test_rollback_on_invalid_plain(self): conn = self.engine.connect() trans = conn.begin() conn.invalidate() trans.rollback() @testing.requires.two_phase_transactions def test_rollback_on_invalid_twophase(self): conn = self.engine.connect() trans = conn.begin_twophase() conn.invalidate() trans.rollback() @testing.requires.savepoints def test_rollback_on_invalid_savepoint(self): conn = self.engine.connect() trans = conn.begin() trans2 = conn.begin_nested() conn.invalidate() trans2.rollback() def test_invalidate_twice(self): conn = self.engine.connect() conn.invalidate() conn.invalidate() def test_explode_in_initializer(self): engine = engines.testing_engine() def broken_initialize(connection): connection.execute("select fake_stuff from _fake_table") engine.dialect.initialize = broken_initialize # raises a DBAPIError, not an AttributeError assert_raises(exc.DBAPIError, engine.connect) @testing.skip_if( [lambda: util.py3k, "oracle+cx_oracle"], "Crashes on py3k+cx_oracle") def test_explode_in_initializer_disconnect(self): engine = engines.testing_engine() def broken_initialize(connection): connection.execute("select fake_stuff from _fake_table") engine.dialect.initialize = broken_initialize p1 = engine.pool def is_disconnect(e, conn, cursor): return True engine.dialect.is_disconnect = is_disconnect # invalidate() also doesn't screw up assert_raises(exc.DBAPIError, engine.connect) # pool was recreated assert engine.pool is not p1 @testing.fails_on('+informixdb', "Wrong error thrown, fix in informixdb?") def test_null_pool(self): engine = \ engines.reconnecting_engine(options=dict(poolclass=pool.NullPool)) conn = engine.connect() eq_(conn.execute(select([1])).scalar(), 1) assert not conn.closed engine.test_shutdown() _assert_invalidated(conn.execute, select([1])) assert not conn.closed assert conn.invalidated eq_(conn.execute(select([1])).scalar(), 1) assert not conn.invalidated @testing.fails_on('+informixdb', "Wrong error thrown, fix in informixdb?") def test_close(self): conn = self.engine.connect() eq_(conn.execute(select([1])).scalar(), 1) assert not conn.closed self.engine.test_shutdown() _assert_invalidated(conn.execute, select([1])) conn.close() conn = self.engine.connect() eq_(conn.execute(select([1])).scalar(), 1) @testing.fails_on('+informixdb', "Wrong error thrown, fix in informixdb?") def test_with_transaction(self): conn = self.engine.connect() trans = conn.begin() eq_(conn.execute(select([1])).scalar(), 1) assert not conn.closed self.engine.test_shutdown() _assert_invalidated(conn.execute, select([1])) assert not conn.closed assert conn.invalidated assert trans.is_active assert_raises_message( tsa.exc.StatementError, "Can't reconnect until invalid transaction is "\ "rolled back", conn.execute, select([1]) ) assert trans.is_active assert_raises_message( tsa.exc.InvalidRequestError, "Can't reconnect until invalid transaction is rolled back", trans.commit ) assert trans.is_active trans.rollback() assert not trans.is_active assert conn.invalidated eq_(conn.execute(select([1])).scalar(), 1) assert not conn.invalidated class RecycleTest(fixtures.TestBase): def test_basic(self): for threadlocal in False, True: engine = engines.reconnecting_engine( options={'pool_threadlocal': threadlocal}) conn = engine.contextual_connect() eq_(conn.execute(select([1])).scalar(), 1) conn.close() # set the pool recycle down to 1. # we aren't doing this inline with the # engine create since cx_oracle takes way # too long to create the 1st connection and don't # want to build a huge delay into this test. engine.pool._recycle = 1 # kill the DB connection engine.test_shutdown() # wait until past the recycle period time.sleep(2) # can connect, no exception conn = engine.contextual_connect() eq_(conn.execute(select([1])).scalar(), 1) conn.close() class InvalidateDuringResultTest(fixtures.TestBase): def setup(self): self.engine = engines.reconnecting_engine() self.meta = MetaData(self.engine) table = Table('sometable', self.meta, Column('id', Integer, primary_key=True), Column('name', String(50))) self.meta.create_all() table.insert().execute( [{'id': i, 'name': 'row %d' % i} for i in range(1, 100)] ) def teardown(self): self.meta.drop_all() self.engine.dispose() @testing.fails_if([ '+mysqlconnector', '+mysqldb', '+cymysql', '+pymysql', '+pg8000' ], "Buffers the result set and doesn't check for " "connection close") @testing.fails_on('+informixdb', "Wrong error thrown, fix in informixdb?") def test_invalidate_on_results(self): conn = self.engine.connect() result = conn.execute('select * from sometable') for x in xrange(20): result.fetchone() self.engine.test_shutdown() _assert_invalidated(result.fetchone) assert conn.invalidated SQLAlchemy-0.8.4/test/engine/test_reflection.py0000644000076500000240000015331312251150015022216 0ustar classicstaff00000000000000import operator import unicodedata import sqlalchemy as sa from sqlalchemy import schema, events, event, inspect from sqlalchemy import MetaData, Integer, String from sqlalchemy.testing import ComparesTables, \ engines, AssertsCompiledSQL, fixtures from sqlalchemy.testing.schema import Table, Column from sqlalchemy.testing import eq_, assert_raises, assert_raises_message from sqlalchemy import testing metadata, users = None, None class ReflectionTest(fixtures.TestBase, ComparesTables): @testing.exclude('mssql', '<', (10, 0, 0), 'Date is only supported on MSSQL 2008+') @testing.exclude('mysql', '<', (4, 1, 1), 'early types are squirrely') @testing.provide_metadata def test_basic_reflection(self): meta = self.metadata users = Table('engine_users', meta, Column('user_id', sa.INT, primary_key=True), Column('user_name', sa.VARCHAR(20), nullable=False), Column('test1', sa.CHAR(5), nullable=False), Column('test2', sa.Float(5), nullable=False), Column('test3', sa.Text), Column('test4', sa.Numeric(10, 2), nullable=False), Column('test5', sa.Date), Column('parent_user_id', sa.Integer, sa.ForeignKey('engine_users.user_id')), Column('test6', sa.Date, nullable=False), Column('test7', sa.Text), Column('test8', sa.LargeBinary), Column('test_passivedefault2', sa.Integer, server_default='5'), Column('test9', sa.LargeBinary(100)), Column('test10', sa.Numeric(10, 2)), test_needs_fk=True, ) addresses = Table( 'engine_email_addresses', meta, Column('address_id', sa.Integer, primary_key=True), Column('remote_user_id', sa.Integer, sa.ForeignKey(users.c.user_id)), Column('email_address', sa.String(20)), test_needs_fk=True, ) meta.create_all() meta2 = MetaData() reflected_users = Table('engine_users', meta2, autoload=True, autoload_with=testing.db) reflected_addresses = Table('engine_email_addresses', meta2, autoload=True, autoload_with=testing.db) self.assert_tables_equal(users, reflected_users) self.assert_tables_equal(addresses, reflected_addresses) @testing.provide_metadata def test_two_foreign_keys(self): meta = self.metadata Table( 't1', meta, Column('id', sa.Integer, primary_key=True), Column('t2id', sa.Integer, sa.ForeignKey('t2.id')), Column('t3id', sa.Integer, sa.ForeignKey('t3.id')), test_needs_fk=True, ) Table('t2', meta, Column('id', sa.Integer, primary_key=True), test_needs_fk=True) Table('t3', meta, Column('id', sa.Integer, primary_key=True), test_needs_fk=True) meta.create_all() meta2 = MetaData() t1r, t2r, t3r = [Table(x, meta2, autoload=True, autoload_with=testing.db) for x in ('t1', 't2', 't3')] assert t1r.c.t2id.references(t2r.c.id) assert t1r.c.t3id.references(t3r.c.id) def test_nonexistent(self): meta = MetaData(testing.db) assert_raises(sa.exc.NoSuchTableError, Table, 'nonexistent', meta, autoload=True) @testing.provide_metadata def test_include_columns(self): meta = self.metadata foo = Table('foo', meta, *[Column(n, sa.String(30)) for n in ['a', 'b', 'c', 'd', 'e', 'f']]) meta.create_all() meta2 = MetaData(testing.db) foo = Table('foo', meta2, autoload=True, include_columns=['b', 'f', 'e']) # test that cols come back in original order eq_([c.name for c in foo.c], ['b', 'e', 'f']) for c in ('b', 'f', 'e'): assert c in foo.c for c in ('a', 'c', 'd'): assert c not in foo.c # test against a table which is already reflected meta3 = MetaData(testing.db) foo = Table('foo', meta3, autoload=True) foo = Table('foo', meta3, include_columns=['b', 'f', 'e'], extend_existing=True) eq_([c.name for c in foo.c], ['b', 'e', 'f']) for c in ('b', 'f', 'e'): assert c in foo.c for c in ('a', 'c', 'd'): assert c not in foo.c @testing.provide_metadata def test_extend_existing(self): meta = self.metadata Table('t', meta, Column('id', Integer, primary_key=True), Column('x', Integer), Column('y', Integer), Column('z', Integer, server_default="5"), ) meta.create_all() m2 = MetaData() old_z = Column('z', String, primary_key=True) old_y = Column('y', String) old_q = Column('q', Integer) t2 = Table('t', m2, old_z, old_q) eq_(t2.primary_key.columns, (t2.c.z, )) t2 = Table('t', m2, old_y, extend_existing=True, autoload=True, autoload_with=testing.db) eq_( set(t2.columns.keys()), set(['x', 'y', 'z', 'q', 'id']) ) eq_(t2.primary_key.columns, (t2.c.id, )) assert t2.c.z is not old_z assert t2.c.y is old_y assert t2.c.z.type._type_affinity is Integer assert t2.c.q is old_q m3 = MetaData() t3 = Table('t', m3, Column('z', Integer)) t3 = Table('t', m3, extend_existing=False, autoload=True, autoload_with=testing.db) eq_( set(t3.columns.keys()), set(['z']) ) m4 = MetaData() old_z = Column('z', String, primary_key=True) old_y = Column('y', String) old_q = Column('q', Integer) t4 = Table('t', m4, old_z, old_q) eq_(t4.primary_key.columns, (t4.c.z, )) t4 = Table('t', m4, old_y, extend_existing=True, autoload=True, autoload_replace=False, autoload_with=testing.db) eq_( set(t4.columns.keys()), set(['x', 'y', 'z', 'q', 'id']) ) eq_(t4.primary_key.columns, (t4.c.id, )) assert t4.c.z is old_z assert t4.c.y is old_y assert t4.c.z.type._type_affinity is String assert t4.c.q is old_q @testing.emits_warning(r".*omitted columns") @testing.provide_metadata def test_include_columns_indexes(self): m = self.metadata t1 = Table('t1', m, Column('a', sa.Integer), Column('b', sa.Integer)) sa.Index('foobar', t1.c.a, t1.c.b) sa.Index('bat', t1.c.a) m.create_all() m2 = MetaData(testing.db) t2 = Table('t1', m2, autoload=True) assert len(t2.indexes) == 2 m2 = MetaData(testing.db) t2 = Table('t1', m2, autoload=True, include_columns=['a']) assert len(t2.indexes) == 1 m2 = MetaData(testing.db) t2 = Table('t1', m2, autoload=True, include_columns=['a', 'b']) assert len(t2.indexes) == 2 @testing.provide_metadata def test_autoload_replace_foreign_key_nonpresent(self): """test autoload_replace=False with col plus FK establishes the FK not present in the DB. """ Table('a', self.metadata, Column('id', Integer, primary_key=True)) Table('b', self.metadata, Column('id', Integer, primary_key=True), Column('a_id', Integer)) self.metadata.create_all() m2 = MetaData() b2 = Table('b', m2, Column('a_id', Integer, sa.ForeignKey('a.id'))) a2 = Table('a', m2, autoload=True, autoload_with=testing.db) b2 = Table('b', m2, extend_existing=True, autoload=True, autoload_with=testing.db, autoload_replace=False) assert b2.c.id is not None assert b2.c.a_id.references(a2.c.id) eq_(len(b2.constraints), 2) @testing.provide_metadata def test_autoload_replace_foreign_key_ispresent(self): """test autoload_replace=False with col plus FK mirroring DB-reflected FK skips the reflected FK and installs the in-python one only. """ Table('a', self.metadata, Column('id', Integer, primary_key=True)) Table('b', self.metadata, Column('id', Integer, primary_key=True), Column('a_id', Integer, sa.ForeignKey('a.id'))) self.metadata.create_all() m2 = MetaData() b2 = Table('b', m2, Column('a_id', Integer, sa.ForeignKey('a.id'))) a2 = Table('a', m2, autoload=True, autoload_with=testing.db) b2 = Table('b', m2, extend_existing=True, autoload=True, autoload_with=testing.db, autoload_replace=False) assert b2.c.id is not None assert b2.c.a_id.references(a2.c.id) eq_(len(b2.constraints), 2) @testing.provide_metadata def test_autoload_replace_foreign_key_removed(self): """test autoload_replace=False with col minus FK that's in the DB means the FK is skipped and doesn't get installed at all. """ Table('a', self.metadata, Column('id', Integer, primary_key=True)) Table('b', self.metadata, Column('id', Integer, primary_key=True), Column('a_id', Integer, sa.ForeignKey('a.id'))) self.metadata.create_all() m2 = MetaData() b2 = Table('b', m2, Column('a_id', Integer)) a2 = Table('a', m2, autoload=True, autoload_with=testing.db) b2 = Table('b', m2, extend_existing=True, autoload=True, autoload_with=testing.db, autoload_replace=False) assert b2.c.id is not None assert not b2.c.a_id.references(a2.c.id) eq_(len(b2.constraints), 1) @testing.provide_metadata def test_autoload_replace_primary_key(self): Table('a', self.metadata, Column('id', Integer)) self.metadata.create_all() m2 = MetaData() a2 = Table('a', m2, Column('id', Integer, primary_key=True)) Table('a', m2, autoload=True, autoload_with=testing.db, autoload_replace=False, extend_existing=True) eq_(list(a2.primary_key), [a2.c.id]) def test_autoload_replace_arg(self): Table('t', MetaData(), autoload_replace=False) @testing.provide_metadata def test_autoincrement_col(self): """test that 'autoincrement' is reflected according to sqla's policy. Don't mark this test as unsupported for any backend ! (technically it fails with MySQL InnoDB since "id" comes before "id2") """ meta = self.metadata Table('test', meta, Column('id', sa.Integer, primary_key=True), Column('data', sa.String(50)), mysql_engine='MyISAM' ) Table('test2', meta, Column('id', sa.Integer, sa.ForeignKey('test.id'), primary_key=True), Column('id2', sa.Integer, primary_key=True), Column('data', sa.String(50)), mysql_engine='MyISAM' ) meta.create_all() m2 = MetaData(testing.db) t1a = Table('test', m2, autoload=True) assert t1a._autoincrement_column is t1a.c.id t2a = Table('test2', m2, autoload=True) assert t2a._autoincrement_column is t2a.c.id2 @testing.provide_metadata def test_unknown_types(self): meta = self.metadata t = Table("test", meta, Column('foo', sa.DateTime)) ischema_names = testing.db.dialect.ischema_names t.create() testing.db.dialect.ischema_names = {} try: m2 = MetaData(testing.db) assert_raises(sa.exc.SAWarning, Table, "test", m2, autoload=True) @testing.emits_warning('Did not recognize type') def warns(): m3 = MetaData(testing.db) t3 = Table("test", m3, autoload=True) assert t3.c.foo.type.__class__ == sa.types.NullType finally: testing.db.dialect.ischema_names = ischema_names @testing.provide_metadata def test_basic_override(self): meta = self.metadata table = Table( 'override_test', meta, Column('col1', sa.Integer, primary_key=True), Column('col2', sa.String(20)), Column('col3', sa.Numeric) ) table.create() meta2 = MetaData(testing.db) table = Table( 'override_test', meta2, Column('col2', sa.Unicode()), Column('col4', sa.String(30)), autoload=True) self.assert_(isinstance(table.c.col1.type, sa.Integer)) self.assert_(isinstance(table.c.col2.type, sa.Unicode)) self.assert_(isinstance(table.c.col4.type, sa.String)) @testing.provide_metadata def test_override_pkfk(self): """test that you can override columns which contain foreign keys to other reflected tables, where the foreign key column is also a primary key column""" meta = self.metadata Table('users', meta, Column('id', sa.Integer, primary_key=True), Column('name', sa.String(30))) Table('addresses', meta, Column('id', sa.Integer, primary_key=True), Column('street', sa.String(30))) meta.create_all() meta2 = MetaData(testing.db) a2 = Table('addresses', meta2, Column('id', sa.Integer, sa.ForeignKey('users.id'), primary_key=True), autoload=True) u2 = Table('users', meta2, autoload=True) assert list(a2.primary_key) == [a2.c.id] assert list(u2.primary_key) == [u2.c.id] assert u2.join(a2).onclause.compare(u2.c.id == a2.c.id) meta3 = MetaData(testing.db) u3 = Table('users', meta3, autoload=True) a3 = Table('addresses', meta3, Column('id', sa.Integer, sa.ForeignKey('users.id'), primary_key=True), autoload=True) assert list(a3.primary_key) == [a3.c.id] assert list(u3.primary_key) == [u3.c.id] assert u3.join(a3).onclause.compare(u3.c.id == a3.c.id) @testing.provide_metadata def test_override_nonexistent_fk(self): """test that you can override columns and create new foreign keys to other reflected tables which have no foreign keys. this is common with MySQL MyISAM tables.""" meta = self.metadata Table('users', meta, Column('id', sa.Integer, primary_key=True), Column('name', sa.String(30))) Table('addresses', meta, Column('id', sa.Integer, primary_key=True), Column('street', sa.String(30)), Column('user_id', sa.Integer)) meta.create_all() meta2 = MetaData(testing.db) a2 = Table('addresses', meta2, Column('user_id', sa.Integer, sa.ForeignKey('users.id')), autoload=True) u2 = Table('users', meta2, autoload=True) assert len(a2.c.user_id.foreign_keys) == 1 assert len(a2.foreign_keys) == 1 assert [c.parent for c in a2.foreign_keys] == [a2.c.user_id] assert [c.parent for c in a2.c.user_id.foreign_keys] \ == [a2.c.user_id] assert list(a2.c.user_id.foreign_keys)[0].parent \ is a2.c.user_id assert u2.join(a2).onclause.compare(u2.c.id == a2.c.user_id) meta3 = MetaData(testing.db) u3 = Table('users', meta3, autoload=True) a3 = Table('addresses', meta3, Column('user_id', sa.Integer, sa.ForeignKey('users.id')), autoload=True) assert u3.join(a3).onclause.compare(u3.c.id == a3.c.user_id) meta4 = MetaData(testing.db) u4 = Table('users', meta4, Column('id', sa.Integer, key='u_id', primary_key=True), autoload=True) a4 = Table( 'addresses', meta4, Column('id', sa.Integer, key='street', primary_key=True), Column('street', sa.String(30), key='user_id'), Column('user_id', sa.Integer, sa.ForeignKey('users.u_id' ), key='id'), autoload=True, ) assert u4.join(a4).onclause.compare(u4.c.u_id == a4.c.id) assert list(u4.primary_key) == [u4.c.u_id] assert len(u4.columns) == 2 assert len(u4.constraints) == 1 assert len(a4.columns) == 3 assert len(a4.constraints) == 2 @testing.provide_metadata def test_override_composite_fk(self): """Test double-remove of composite foreign key, when replaced.""" metadata = self.metadata Table('a', metadata, Column('x', sa.Integer, primary_key=True), Column('y', sa.Integer, primary_key=True), ) Table('b', metadata, Column('x', sa.Integer, primary_key=True), Column('y', sa.Integer, primary_key=True), sa.ForeignKeyConstraint(['x', 'y'], ['a.x', 'a.y']) ) metadata.create_all() meta2 = MetaData() c1 = Column('x', sa.Integer, primary_key=True) c2 = Column('y', sa.Integer, primary_key=True) f1 = sa.ForeignKeyConstraint(['x', 'y'], ['a.x', 'a.y']) b1 = Table('b', meta2, c1, c2, f1, autoload=True, autoload_with=testing.db ) assert b1.c.x is c1 assert b1.c.y is c2 assert f1 in b1.constraints assert len(b1.constraints) == 2 @testing.provide_metadata def test_override_keys(self): """test that columns can be overridden with a 'key', and that ForeignKey targeting during reflection still works.""" meta = self.metadata Table('a', meta, Column('x', sa.Integer, primary_key=True), Column('z', sa.Integer), test_needs_fk=True ) Table('b', meta, Column('y', sa.Integer, sa.ForeignKey('a.x')), test_needs_fk=True ) meta.create_all() m2 = MetaData(testing.db) a2 = Table('a', m2, Column('x', sa.Integer, primary_key=True, key='x1'), autoload=True) b2 = Table('b', m2, autoload=True) assert a2.join(b2).onclause.compare(a2.c.x1 == b2.c.y) assert b2.c.y.references(a2.c.x1) @testing.provide_metadata def test_nonreflected_fk_raises(self): """test that a NoReferencedColumnError is raised when reflecting a table with an FK to another table which has not included the target column in its reflection. """ meta = self.metadata Table('a', meta, Column('x', sa.Integer, primary_key=True), Column('z', sa.Integer), test_needs_fk=True ) Table('b', meta, Column('y', sa.Integer, sa.ForeignKey('a.x')), test_needs_fk=True ) meta.create_all() m2 = MetaData(testing.db) a2 = Table('a', m2, include_columns=['z'], autoload=True) b2 = Table('b', m2, autoload=True) assert_raises(sa.exc.NoReferencedColumnError, a2.join, b2) @testing.exclude('mysql', '<', (4, 1, 1), 'innodb funkiness') @testing.provide_metadata def test_override_existing_fk(self): """test that you can override columns and specify new foreign keys to other reflected tables, on columns which *do* already have that foreign key, and that the FK is not duped. """ meta = self.metadata Table('users', meta, Column('id', sa.Integer, primary_key=True), Column('name', sa.String(30)), test_needs_fk=True) Table('addresses', meta, Column('id', sa.Integer, primary_key=True), Column('user_id', sa.Integer, sa.ForeignKey('users.id')), test_needs_fk=True) meta.create_all() meta2 = MetaData(testing.db) a2 = Table('addresses', meta2, Column('user_id', sa.Integer, sa.ForeignKey('users.id')), autoload=True) u2 = Table('users', meta2, autoload=True) s = sa.select([a2]) assert s.c.user_id is not None assert len(a2.foreign_keys) == 1 assert len(a2.c.user_id.foreign_keys) == 1 assert len(a2.constraints) == 2 assert [c.parent for c in a2.foreign_keys] == [a2.c.user_id] assert [c.parent for c in a2.c.user_id.foreign_keys] \ == [a2.c.user_id] assert list(a2.c.user_id.foreign_keys)[0].parent \ is a2.c.user_id assert u2.join(a2).onclause.compare(u2.c.id == a2.c.user_id) meta2 = MetaData(testing.db) u2 = Table('users', meta2, Column('id', sa.Integer, primary_key=True), autoload=True) a2 = Table('addresses', meta2, Column('id', sa.Integer, primary_key=True), Column('user_id', sa.Integer, sa.ForeignKey('users.id')), autoload=True) s = sa.select([a2]) assert s.c.user_id is not None assert len(a2.foreign_keys) == 1 assert len(a2.c.user_id.foreign_keys) == 1 assert len(a2.constraints) == 2 assert [c.parent for c in a2.foreign_keys] == [a2.c.user_id] assert [c.parent for c in a2.c.user_id.foreign_keys] \ == [a2.c.user_id] assert list(a2.c.user_id.foreign_keys)[0].parent \ is a2.c.user_id assert u2.join(a2).onclause.compare(u2.c.id == a2.c.user_id) def test_pks_not_uniques(self): """test that primary key reflection not tripped up by unique indexes""" testing.db.execute(""" CREATE TABLE book ( id INTEGER NOT NULL, title VARCHAR(100) NOT NULL, series INTEGER, series_id INTEGER, UNIQUE(series, series_id), PRIMARY KEY(id) )""") try: metadata = MetaData(bind=testing.db) book = Table('book', metadata, autoload=True) assert book.primary_key.contains_column(book.c.id) assert not book.primary_key.contains_column(book.c.series) assert len(book.primary_key) == 1 finally: testing.db.execute("drop table book") def test_fk_error(self): metadata = MetaData(testing.db) Table('slots', metadata, Column('slot_id', sa.Integer, primary_key=True), Column('pkg_id', sa.Integer, sa.ForeignKey('pkgs.pkg_id')), Column('slot', sa.String(128)), ) assert_raises_message(sa.exc.InvalidRequestError, "Foreign key associated with column 'slots.pkg_id' " "could not find table 'pkgs' with which to generate " "a foreign key to target column 'pkg_id'", metadata.create_all) def test_composite_pks(self): """test reflection of a composite primary key""" testing.db.execute(""" CREATE TABLE book ( id INTEGER NOT NULL, isbn VARCHAR(50) NOT NULL, title VARCHAR(100) NOT NULL, series INTEGER NOT NULL, series_id INTEGER NOT NULL, UNIQUE(series, series_id), PRIMARY KEY(id, isbn) )""") try: metadata = MetaData(bind=testing.db) book = Table('book', metadata, autoload=True) assert book.primary_key.contains_column(book.c.id) assert book.primary_key.contains_column(book.c.isbn) assert not book.primary_key.contains_column(book.c.series) assert len(book.primary_key) == 2 finally: testing.db.execute("drop table book") @testing.exclude('mysql', '<', (4, 1, 1), 'innodb funkiness') @testing.provide_metadata def test_composite_fk(self): """test reflection of composite foreign keys""" meta = self.metadata multi = Table( 'multi', meta, Column('multi_id', sa.Integer, primary_key=True), Column('multi_rev', sa.Integer, primary_key=True), Column('multi_hoho', sa.Integer, primary_key=True), Column('name', sa.String(50), nullable=False), Column('val', sa.String(100)), test_needs_fk=True, ) multi2 = Table('multi2', meta, Column('id', sa.Integer, primary_key=True), Column('foo', sa.Integer), Column('bar', sa.Integer), Column('lala', sa.Integer), Column('data', sa.String(50)), sa.ForeignKeyConstraint(['foo', 'bar', 'lala'], ['multi.multi_id', 'multi.multi_rev', 'multi.multi_hoho' ]), test_needs_fk=True, ) meta.create_all() meta2 = MetaData() table = Table('multi', meta2, autoload=True, autoload_with=testing.db) table2 = Table('multi2', meta2, autoload=True, autoload_with=testing.db) self.assert_tables_equal(multi, table) self.assert_tables_equal(multi2, table2) j = sa.join(table, table2) self.assert_(sa.and_(table.c.multi_id == table2.c.foo, table.c.multi_rev == table2.c.bar, table.c.multi_hoho == table2.c.lala).compare(j.onclause)) @testing.crashes('oracle', 'FIXME: unknown, confirm not fails_on') @testing.fails_on('+informixdb', "FIXME: should be supported via the " "DELIMITED env var but that breaks " "everything else for now") @testing.provide_metadata def test_reserved(self): # check a table that uses an SQL reserved name doesn't cause an # error meta = self.metadata table_a = Table('select', meta, Column('not', sa.Integer, primary_key=True), Column('from', sa.String(12), nullable=False), sa.UniqueConstraint('from', name='when')) sa.Index('where', table_a.c['from']) # There's currently no way to calculate identifier case # normalization in isolation, so... if testing.against('firebird', 'oracle', 'maxdb'): check_col = 'TRUE' else: check_col = 'true' quoter = meta.bind.dialect.identifier_preparer.quote_identifier Table('false', meta, Column('create', sa.Integer, primary_key=True), Column('true', sa.Integer, sa.ForeignKey('select.not')), sa.CheckConstraint('%s <> 1' % quoter(check_col), name='limit') ) table_c = Table('is', meta, Column('or', sa.Integer, nullable=False, primary_key=True), Column('join', sa.Integer, nullable=False, primary_key=True), sa.PrimaryKeyConstraint('or', 'join', name='to') ) index_c = sa.Index('else', table_c.c.join) meta.create_all() index_c.drop() meta2 = MetaData(testing.db) Table('select', meta2, autoload=True) Table('false', meta2, autoload=True) Table('is', meta2, autoload=True) @testing.provide_metadata def _test_reflect_uses_bind(self, fn): from sqlalchemy.pool import AssertionPool e = engines.testing_engine(options={"poolclass": AssertionPool}) fn(e) @testing.uses_deprecated def test_reflect_uses_bind_constructor_conn(self): self._test_reflect_uses_bind(lambda e: MetaData(e.connect(), reflect=True)) @testing.uses_deprecated def test_reflect_uses_bind_constructor_engine(self): self._test_reflect_uses_bind(lambda e: MetaData(e, reflect=True)) def test_reflect_uses_bind_constructor_conn_reflect(self): self._test_reflect_uses_bind(lambda e: MetaData(e.connect()).reflect()) def test_reflect_uses_bind_constructor_engine_reflect(self): self._test_reflect_uses_bind(lambda e: MetaData(e).reflect()) def test_reflect_uses_bind_conn_reflect(self): self._test_reflect_uses_bind(lambda e: MetaData().reflect(e.connect())) def test_reflect_uses_bind_engine_reflect(self): self._test_reflect_uses_bind(lambda e: MetaData().reflect(e)) @testing.provide_metadata def test_reflect_all(self): existing = testing.db.table_names() names = ['rt_%s' % name for name in ('a', 'b', 'c', 'd', 'e')] nameset = set(names) for name in names: # be sure our starting environment is sane self.assert_(name not in existing) self.assert_('rt_f' not in existing) baseline = self.metadata for name in names: Table(name, baseline, Column('id', sa.Integer, primary_key=True)) baseline.create_all() m1 = MetaData(testing.db) self.assert_(not m1.tables) m1.reflect() self.assert_(nameset.issubset(set(m1.tables.keys()))) m2 = MetaData() m2.reflect(testing.db, only=['rt_a', 'rt_b']) self.assert_(set(m2.tables.keys()) == set(['rt_a', 'rt_b'])) m3 = MetaData() c = testing.db.connect() m3.reflect(bind=c, only=lambda name, meta: name == 'rt_c') self.assert_(set(m3.tables.keys()) == set(['rt_c'])) m4 = MetaData(testing.db) try: m4.reflect(only=['rt_a', 'rt_f']) self.assert_(False) except sa.exc.InvalidRequestError, e: self.assert_(e.args[0].endswith('(rt_f)')) m5 = MetaData(testing.db) m5.reflect(only=[]) self.assert_(not m5.tables) m6 = MetaData(testing.db) m6.reflect(only=lambda n, m: False) self.assert_(not m6.tables) m7 = MetaData(testing.db) m7.reflect() self.assert_(nameset.issubset(set(m7.tables.keys()))) m8 = MetaData() assert_raises( sa.exc.UnboundExecutionError, m8.reflect ) if existing: print "Other tables present in database, skipping some checks." else: baseline.drop_all() m9 = MetaData(testing.db) m9.reflect() self.assert_(not m9.tables) def test_reflect_all_conn_closing(self): m1 = MetaData() c = testing.db.connect() m1.reflect(bind=c) assert not c.closed def test_inspector_conn_closing(self): c = testing.db.connect() inspect(c) assert not c.closed @testing.provide_metadata def test_index_reflection(self): m1 = self.metadata t1 = Table('party', m1, Column('id', sa.Integer, nullable=False), Column('name', sa.String(20), index=True) ) sa.Index('idx1', t1.c.id, unique=True) sa.Index('idx2', t1.c.name, t1.c.id, unique=False) m1.create_all() m2 = MetaData(testing.db) t2 = Table('party', m2, autoload=True) assert len(t2.indexes) == 3 # Make sure indexes are in the order we expect them in tmp = [(idx.name, idx) for idx in t2.indexes] tmp.sort() r1, r2, r3 = [idx[1] for idx in tmp] assert r1.name == 'idx1' assert r2.name == 'idx2' assert r1.unique == True assert r2.unique == False assert r3.unique == False assert set([t2.c.id]) == set(r1.columns) assert set([t2.c.name, t2.c.id]) == set(r2.columns) assert set([t2.c.name]) == set(r3.columns) @testing.requires.views @testing.provide_metadata def test_views(self): metadata = self.metadata users, addresses, dingalings = createTables(metadata) try: metadata.create_all() _create_views(metadata.bind, None) m2 = MetaData(testing.db) users_v = Table("users_v", m2, autoload=True) addresses_v = Table("email_addresses_v", m2, autoload=True) for c1, c2 in zip(users.c, users_v.c): eq_(c1.name, c2.name) self.assert_types_base(c1, c2) for c1, c2 in zip(addresses.c, addresses_v.c): eq_(c1.name, c2.name) self.assert_types_base(c1, c2) finally: _drop_views(metadata.bind) @testing.requires.views @testing.provide_metadata def test_reflect_all_with_views(self): metadata = self.metadata users, addresses, dingalings = createTables(metadata, None) try: metadata.create_all() _create_views(metadata.bind, None) m2 = MetaData(testing.db) m2.reflect(views=False) eq_( set(m2.tables), set(['users', 'email_addresses', 'dingalings']) ) m2 = MetaData(testing.db) m2.reflect(views=True) eq_( set(m2.tables), set(['email_addresses_v', 'users_v', 'users', 'dingalings', 'email_addresses']) ) finally: _drop_views(metadata.bind) class CreateDropTest(fixtures.TestBase): @classmethod def setup_class(cls): global metadata, users metadata = MetaData() users = Table('users', metadata, Column('user_id', sa.Integer, sa.Sequence('user_id_seq', optional=True), primary_key=True), Column('user_name', sa.String(40))) Table('email_addresses', metadata, Column('address_id', sa.Integer, sa.Sequence('address_id_seq', optional=True), primary_key=True), Column('user_id', sa.Integer, sa.ForeignKey(users.c.user_id)), Column('email_address', sa.String(40))) Table( 'orders', metadata, Column('order_id', sa.Integer, sa.Sequence('order_id_seq', optional=True), primary_key=True), Column('user_id', sa.Integer, sa.ForeignKey(users.c.user_id)), Column('description', sa.String(50)), Column('isopen', sa.Integer), ) Table('items', metadata, Column('item_id', sa.INT, sa.Sequence('items_id_seq', optional=True), primary_key=True), Column('order_id', sa.INT, sa.ForeignKey('orders')), Column('item_name', sa.VARCHAR(50))) def test_sorter(self): tables = metadata.sorted_tables table_names = [t.name for t in tables] ua = [n for n in table_names if n in ('users', 'email_addresses')] oi = [n for n in table_names if n in ('orders', 'items')] eq_(ua, ['users', 'email_addresses']) eq_(oi, ['orders', 'items']) def testcheckfirst(self): try: assert not users.exists(testing.db) users.create(bind=testing.db) assert users.exists(testing.db) users.create(bind=testing.db, checkfirst=True) users.drop(bind=testing.db) users.drop(bind=testing.db, checkfirst=True) assert not users.exists(bind=testing.db) users.create(bind=testing.db, checkfirst=True) users.drop(bind=testing.db) finally: metadata.drop_all(bind=testing.db) def test_createdrop(self): metadata.create_all(bind=testing.db) eq_(testing.db.has_table('items'), True) eq_(testing.db.has_table('email_addresses'), True) metadata.create_all(bind=testing.db) eq_(testing.db.has_table('items'), True) metadata.drop_all(bind=testing.db) eq_(testing.db.has_table('items'), False) eq_(testing.db.has_table('email_addresses'), False) metadata.drop_all(bind=testing.db) eq_(testing.db.has_table('items'), False) def test_tablenames(self): metadata.create_all(bind=testing.db) # we only check to see if all the explicitly created tables are # there, rather than assertEqual -- the test db could have # "extra" tables if there is a misconfigured template. (*cough* # tsearch2 w/ the pg windows installer.) self.assert_(not set(metadata.tables) - set(testing.db.table_names())) metadata.drop_all(bind=testing.db) class SchemaManipulationTest(fixtures.TestBase): def test_append_constraint_unique(self): meta = MetaData() users = Table('users', meta, Column('id', sa.Integer)) addresses = Table('addresses', meta, Column('id', sa.Integer), Column('user_id', sa.Integer)) fk = sa.ForeignKeyConstraint(['user_id'], [users.c.id]) addresses.append_constraint(fk) addresses.append_constraint(fk) assert len(addresses.c.user_id.foreign_keys) == 1 assert addresses.constraints == set([addresses.primary_key, fk]) class UnicodeReflectionTest(fixtures.TestBase): @classmethod def setup_class(cls): # trigger mysql _server_casing check... testing.db.connect().close() cls.bind = bind = engines.utf8_engine( options={'convert_unicode': True}) cls.metadata = metadata = MetaData() no_multibyte_period = set([ (u'plain', u'col_plain', u'ix_plain') ]) no_has_table = [ (u'no_has_table_1', u'col_Unit\u00e9ble', u'ix_Unit\u00e9ble'), (u'no_has_table_2', u'col_\u6e2c\u8a66', u'ix_\u6e2c\u8a66'), ] no_case_sensitivity = [ (u'\u6e2c\u8a66', u'col_\u6e2c\u8a66', u'ix_\u6e2c\u8a66'), (u'unit\u00e9ble', u'col_unit\u00e9ble', u'ix_unit\u00e9ble'), ] full = [ (u'Unit\u00e9ble', u'col_Unit\u00e9ble', u'ix_Unit\u00e9ble'), (u'\u6e2c\u8a66', u'col_\u6e2c\u8a66', u'ix_\u6e2c\u8a66'), ] # as you can see, our options for this kind of thing # are really limited unless you're on PG or SQLite # forget about it on these backends if not testing.requires.unicode_ddl.enabled: names = no_multibyte_period # mysql can't handle casing usually elif testing.against("mysql") and \ not testing.requires._has_mysql_fully_case_sensitive(): names = no_multibyte_period.union(no_case_sensitivity) # mssql + pyodbc + freetds can't compare multibyte names to # information_schema.tables.table_name elif testing.against("mssql"): names = no_multibyte_period.union(no_has_table) else: names = no_multibyte_period.union(full) for tname, cname, ixname in names: t = Table(tname, metadata, Column('id', sa.Integer, sa.Sequence(cname + '_id_seq'), primary_key=True), Column(cname, Integer) ) schema.Index(ixname, t.c[cname]) metadata.create_all(bind) cls.names = names @classmethod def teardown_class(cls): cls.metadata.drop_all(cls.bind, checkfirst=False) cls.bind.dispose() @testing.requires.unicode_connections def test_has_table(self): for tname, cname, ixname in self.names: assert self.bind.has_table(tname), "Can't detect name %s" % tname @testing.requires.unicode_connections def test_basic(self): # the 'convert_unicode' should not get in the way of the # reflection process. reflecttable for oracle, postgresql # (others?) expect non-unicode strings in result sets/bind # params bind = self.bind names = set([rec[0] for rec in self.names]) reflected = set(bind.table_names()) # Jython 2.5 on Java 5 lacks unicodedata.normalize if not names.issubset(reflected) and hasattr(unicodedata, 'normalize'): # Python source files in the utf-8 coding seem to # normalize literals as NFC (and the above are # explicitly NFC). Maybe this database normalizes NFD # on reflection. nfc = set([unicodedata.normalize('NFC', n) for n in names]) self.assert_(nfc == names) # Yep. But still ensure that bulk reflection and # create/drop work with either normalization. r = MetaData(bind) r.reflect() r.drop_all(checkfirst=False) r.create_all(checkfirst=False) @testing.requires.unicode_connections def test_get_names(self): inspector = inspect(self.bind) names = dict( (tname, (cname, ixname)) for tname, cname, ixname in self.names ) for tname in inspector.get_table_names(): assert tname in names eq_( [ (rec['name'], rec['column_names'][0]) for rec in inspector.get_indexes(tname) ], [(names[tname][1], names[tname][0])] ) class SchemaTest(fixtures.TestBase): @testing.requires.schemas @testing.requires.cross_schema_fk_reflection def test_has_schema(self): eq_(testing.db.dialect.has_schema(testing.db, 'test_schema'), True) eq_(testing.db.dialect.has_schema(testing.db, 'sa_fake_schema_123'), False) @testing.requires.schemas @testing.fails_on('sqlite', 'FIXME: unknown') @testing.fails_on('sybase', 'FIXME: unknown') def test_explicit_default_schema(self): engine = testing.db engine.connect().close() if testing.against('sqlite'): # Works for CREATE TABLE main.foo, SELECT FROM main.foo, etc., # but fails on: # FOREIGN KEY(col2) REFERENCES main.table1 (col1) schema = 'main' else: schema = engine.dialect.default_schema_name assert bool(schema) metadata = MetaData(engine) Table('table1', metadata, Column('col1', sa.Integer, primary_key=True), test_needs_fk=True, schema=schema) Table('table2', metadata, Column('col1', sa.Integer, primary_key=True), Column('col2', sa.Integer, sa.ForeignKey('%s.table1.col1' % schema)), test_needs_fk=True, schema=schema) try: metadata.create_all() metadata.create_all(checkfirst=True) assert len(metadata.tables) == 2 metadata.clear() Table('table1', metadata, autoload=True, schema=schema) Table('table2', metadata, autoload=True, schema=schema) assert len(metadata.tables) == 2 finally: metadata.drop_all() @testing.requires.schemas @testing.fails_on('sybase', 'FIXME: unknown') def test_explicit_default_schema_metadata(self): engine = testing.db if testing.against('sqlite'): # Works for CREATE TABLE main.foo, SELECT FROM main.foo, etc., # but fails on: # FOREIGN KEY(col2) REFERENCES main.table1 (col1) schema = 'main' else: schema = engine.dialect.default_schema_name assert bool(schema) metadata = MetaData(engine, schema=schema) Table('table1', metadata, Column('col1', sa.Integer, primary_key=True), test_needs_fk=True) Table('table2', metadata, Column('col1', sa.Integer, primary_key=True), Column('col2', sa.Integer, sa.ForeignKey('table1.col1')), test_needs_fk=True) try: metadata.create_all() metadata.create_all(checkfirst=True) assert len(metadata.tables) == 2 metadata.clear() Table('table1', metadata, autoload=True) Table('table2', metadata, autoload=True) assert len(metadata.tables) == 2 finally: metadata.drop_all() @testing.requires.schemas @testing.provide_metadata def test_metadata_reflect_schema(self): metadata = self.metadata createTables(metadata, "test_schema") metadata.create_all() m2 = MetaData(schema="test_schema", bind=testing.db) m2.reflect() eq_( set(m2.tables), set(['test_schema.dingalings', 'test_schema.users', 'test_schema.email_addresses']) ) @testing.requires.schemas @testing.requires.cross_schema_fk_reflection @testing.provide_metadata def test_reflect_all_schemas_default_overlap(self): t1 = Table('t', self.metadata, Column('id', Integer, primary_key=True)) t2 = Table('t', self.metadata, Column('id1', sa.ForeignKey('t.id')), schema="test_schema" ) self.metadata.create_all() m2 = MetaData() m2.reflect(testing.db, schema="test_schema") m3 = MetaData() m3.reflect(testing.db) m3.reflect(testing.db, schema="test_schema") eq_( set((t.name, t.schema) for t in m2.tables.values()), set((t.name, t.schema) for t in m3.tables.values()) ) # Tests related to engine.reflection def createTables(meta, schema=None): if schema: schema_prefix = schema + "." else: schema_prefix = "" users = Table('users', meta, Column('user_id', sa.INT, primary_key=True), Column('user_name', sa.VARCHAR(20), nullable=False), Column('test1', sa.CHAR(5), nullable=False), Column('test2', sa.Float(5), nullable=False), Column('test3', sa.Text), Column('test4', sa.Numeric(10, 2), nullable=False), Column('test5', sa.Date), Column('test5_1', sa.TIMESTAMP), Column('parent_user_id', sa.Integer, sa.ForeignKey('%susers.user_id' % schema_prefix)), Column('test6', sa.Date, nullable=False), Column('test7', sa.Text), Column('test8', sa.LargeBinary), Column('test_passivedefault2', sa.Integer, server_default='5'), Column('test9', sa.LargeBinary(100)), Column('test10', sa.Numeric(10, 2)), schema=schema, test_needs_fk=True, ) dingalings = Table("dingalings", meta, Column('dingaling_id', sa.Integer, primary_key=True), Column('address_id', sa.Integer, sa.ForeignKey('%semail_addresses.address_id' % schema_prefix)), Column('data', sa.String(30)), schema=schema, test_needs_fk=True, ) addresses = Table('email_addresses', meta, Column('address_id', sa.Integer), Column('remote_user_id', sa.Integer, sa.ForeignKey(users.c.user_id)), Column('email_address', sa.String(20)), sa.PrimaryKeyConstraint('address_id', name='email_ad_pk'), schema=schema, test_needs_fk=True, ) return (users, addresses, dingalings) def createIndexes(con, schema=None): fullname = 'users' if schema: fullname = "%s.%s" % (schema, 'users') query = "CREATE INDEX users_t_idx ON %s (test1, test2)" % fullname con.execute(sa.sql.text(query)) @testing.requires.views def _create_views(con, schema=None): for table_name in ('users', 'email_addresses'): fullname = table_name if schema: fullname = "%s.%s" % (schema, table_name) view_name = fullname + '_v' query = "CREATE VIEW %s AS SELECT * FROM %s" % (view_name, fullname) con.execute(sa.sql.text(query)) @testing.requires.views def _drop_views(con, schema=None): for table_name in ('email_addresses', 'users'): fullname = table_name if schema: fullname = "%s.%s" % (schema, table_name) view_name = fullname + '_v' query = "DROP VIEW %s" % view_name con.execute(sa.sql.text(query)) class ReverseCasingReflectTest(fixtures.TestBase, AssertsCompiledSQL): __dialect__ = 'default' @testing.requires.denormalized_names def setup(self): testing.db.execute(""" CREATE TABLE weird_casing( col1 char(20), "Col2" char(20), "col3" char(20) ) """) @testing.requires.denormalized_names def teardown(self): testing.db.execute("drop table weird_casing") @testing.requires.denormalized_names def test_direct_quoting(self): m = MetaData(testing.db) t = Table('weird_casing', m, autoload=True) self.assert_compile(t.select(), 'SELECT weird_casing.col1, ' 'weird_casing."Col2", weird_casing."col3" ' 'FROM weird_casing') class CaseSensitiveTest(fixtures.TablesTest): """Nail down case sensitive behaviors, mostly on MySQL.""" @classmethod def define_tables(cls, metadata): Table('SomeTable', metadata, Column('x', Integer, primary_key=True), test_needs_fk=True ) Table('SomeOtherTable', metadata, Column('x', Integer, primary_key=True), Column('y', Integer, sa.ForeignKey("SomeTable.x")), test_needs_fk=True ) @testing.fails_if(testing.requires._has_mysql_on_windows) def test_table_names(self): x = testing.db.run_callable( testing.db.dialect.get_table_names ) assert set(["SomeTable", "SomeOtherTable"]).issubset(x) def test_reflect_exact_name(self): m = MetaData() t1 = Table("SomeTable", m, autoload=True, autoload_with=testing.db) eq_(t1.name, "SomeTable") assert t1.c.x is not None @testing.fails_if(lambda: testing.against(('mysql', '<', (5, 5))) and not testing.requires._has_mysql_fully_case_sensitive() ) def test_reflect_via_fk(self): m = MetaData() t2 = Table("SomeOtherTable", m, autoload=True, autoload_with=testing.db) eq_(t2.name, "SomeOtherTable") assert "SomeTable" in m.tables @testing.fails_if(testing.requires._has_mysql_fully_case_sensitive) @testing.fails_on_everything_except('sqlite', 'mysql', 'mssql') def test_reflect_case_insensitive(self): m = MetaData() t2 = Table("sOmEtAbLe", m, autoload=True, autoload_with=testing.db) eq_(t2.name, "sOmEtAbLe") class ColumnEventsTest(fixtures.TestBase): @classmethod def setup_class(cls): cls.metadata = MetaData() cls.to_reflect = Table( 'to_reflect', cls.metadata, Column('x', sa.Integer, primary_key=True), Column('y', sa.Integer), test_needs_fk=True ) cls.related = Table( 'related', cls.metadata, Column('q', sa.Integer, sa.ForeignKey('to_reflect.x')), test_needs_fk=True ) sa.Index("some_index", cls.to_reflect.c.y) cls.metadata.create_all(testing.db) @classmethod def teardown_class(cls): cls.metadata.drop_all(testing.db) def teardown(self): events.SchemaEventTarget.dispatch._clear() def _do_test(self, col, update, assert_, tablename="to_reflect"): # load the actual Table class, not the test # wrapper from sqlalchemy.schema import Table m = MetaData(testing.db) def column_reflect(insp, table, column_info): if column_info['name'] == col: column_info.update(update) t = Table(tablename, m, autoload=True, listeners=[ ('column_reflect', column_reflect), ]) assert_(t) m = MetaData(testing.db) event.listen(Table, 'column_reflect', column_reflect) t2 = Table(tablename, m, autoload=True) assert_(t2) def test_override_key(self): def assertions(table): eq_(table.c.YXZ.name, "x") eq_(set(table.primary_key), set([table.c.YXZ])) self._do_test( "x", {"key": "YXZ"}, assertions ) def test_override_index(self): def assertions(table): idx = list(table.indexes)[0] eq_(idx.columns, [table.c.YXZ]) self._do_test( "y", {"key": "YXZ"}, assertions ) def test_override_key_fk(self): m = MetaData(testing.db) def column_reflect(insp, table, column_info): if column_info['name'] == 'q': column_info['key'] = 'qyz' elif column_info['name'] == 'x': column_info['key'] = 'xyz' to_reflect = Table("to_reflect", m, autoload=True, listeners=[ ('column_reflect', column_reflect), ]) related = Table("related", m, autoload=True, listeners=[ ('column_reflect', column_reflect), ]) assert related.c.qyz.references(to_reflect.c.xyz) def test_override_type(self): def assert_(table): assert isinstance(table.c.x.type, sa.String) self._do_test( "x", {"type": sa.String}, assert_ ) def test_override_info(self): self._do_test( "x", {"info": {"a": "b"}}, lambda table: eq_(table.c.x.info, {"a": "b"}) ) SQLAlchemy-0.8.4/test/engine/test_transaction.py0000644000076500000240000013727612251150015022423 0ustar classicstaff00000000000000from sqlalchemy.testing import eq_, assert_raises, \ assert_raises_message, ne_ import sys import time import threading from sqlalchemy.testing.engines import testing_engine from sqlalchemy import create_engine, MetaData, INT, VARCHAR, Sequence, \ select, Integer, String, func, text, exc from sqlalchemy.testing.schema import Table from sqlalchemy.testing.schema import Column from sqlalchemy import testing from sqlalchemy.testing import fixtures users, metadata = None, None class TransactionTest(fixtures.TestBase): @classmethod def setup_class(cls): global users, metadata metadata = MetaData() users = Table('query_users', metadata, Column('user_id', INT, primary_key = True), Column('user_name', VARCHAR(20)), test_needs_acid=True, ) users.create(testing.db) def teardown(self): testing.db.execute(users.delete()).close() @classmethod @testing.crashes('mysql+cymysql', 'deadlock') def teardown_class(cls): users.drop(testing.db) def test_commits(self): connection = testing.db.connect() transaction = connection.begin() connection.execute(users.insert(), user_id=1, user_name='user1') transaction.commit() transaction = connection.begin() connection.execute(users.insert(), user_id=2, user_name='user2') connection.execute(users.insert(), user_id=3, user_name='user3') transaction.commit() transaction = connection.begin() result = connection.execute("select * from query_users") assert len(result.fetchall()) == 3 transaction.commit() connection.close() def test_rollback(self): """test a basic rollback""" connection = testing.db.connect() transaction = connection.begin() connection.execute(users.insert(), user_id=1, user_name='user1') connection.execute(users.insert(), user_id=2, user_name='user2') connection.execute(users.insert(), user_id=3, user_name='user3') transaction.rollback() result = connection.execute("select * from query_users") assert len(result.fetchall()) == 0 connection.close() def test_raise(self): connection = testing.db.connect() transaction = connection.begin() try: connection.execute(users.insert(), user_id=1, user_name='user1') connection.execute(users.insert(), user_id=2, user_name='user2') connection.execute(users.insert(), user_id=1, user_name='user3') transaction.commit() assert False except Exception , e: print "Exception: ", e transaction.rollback() result = connection.execute("select * from query_users") assert len(result.fetchall()) == 0 connection.close() def test_transaction_container(self): def go(conn, table, data): for d in data: conn.execute(table.insert(), d) testing.db.transaction(go, users, [dict(user_id=1, user_name='user1')]) eq_(testing.db.execute(users.select()).fetchall(), [(1, 'user1' )]) assert_raises(exc.DBAPIError, testing.db.transaction, go, users, [{'user_id': 2, 'user_name': 'user2'}, {'user_id': 1, 'user_name': 'user3'}]) eq_(testing.db.execute(users.select()).fetchall(), [(1, 'user1' )]) def test_nested_rollback(self): connection = testing.db.connect() try: transaction = connection.begin() try: connection.execute(users.insert(), user_id=1, user_name='user1') connection.execute(users.insert(), user_id=2, user_name='user2') connection.execute(users.insert(), user_id=3, user_name='user3') trans2 = connection.begin() try: connection.execute(users.insert(), user_id=4, user_name='user4') connection.execute(users.insert(), user_id=5, user_name='user5') raise Exception('uh oh') trans2.commit() except: trans2.rollback() raise transaction.rollback() except Exception, e: transaction.rollback() raise except Exception, e: try: assert str(e) == 'uh oh' # and not "This transaction is # inactive" finally: connection.close() def test_retains_through_options(self): connection = testing.db.connect() try: transaction = connection.begin() connection.execute(users.insert(), user_id=1, user_name='user1') conn2 = connection.execution_options(dummy=True) conn2.execute(users.insert(), user_id=2, user_name='user2') transaction.rollback() eq_(connection.scalar("select count(*) from query_users"), 0) finally: connection.close() def test_nesting(self): connection = testing.db.connect() transaction = connection.begin() connection.execute(users.insert(), user_id=1, user_name='user1') connection.execute(users.insert(), user_id=2, user_name='user2') connection.execute(users.insert(), user_id=3, user_name='user3') trans2 = connection.begin() connection.execute(users.insert(), user_id=4, user_name='user4') connection.execute(users.insert(), user_id=5, user_name='user5') trans2.commit() transaction.rollback() self.assert_(connection.scalar('select count(*) from ' 'query_users') == 0) result = connection.execute('select * from query_users') assert len(result.fetchall()) == 0 connection.close() def test_with_interface(self): connection = testing.db.connect() trans = connection.begin() connection.execute(users.insert(), user_id=1, user_name='user1') connection.execute(users.insert(), user_id=2, user_name='user2') try: connection.execute(users.insert(), user_id=2, user_name='user2.5') except Exception, e: trans.__exit__(*sys.exc_info()) assert not trans.is_active self.assert_(connection.scalar('select count(*) from ' 'query_users') == 0) trans = connection.begin() connection.execute(users.insert(), user_id=1, user_name='user1') trans.__exit__(None, None, None) assert not trans.is_active self.assert_(connection.scalar('select count(*) from ' 'query_users') == 1) connection.close() def test_close(self): connection = testing.db.connect() transaction = connection.begin() connection.execute(users.insert(), user_id=1, user_name='user1') connection.execute(users.insert(), user_id=2, user_name='user2') connection.execute(users.insert(), user_id=3, user_name='user3') trans2 = connection.begin() connection.execute(users.insert(), user_id=4, user_name='user4') connection.execute(users.insert(), user_id=5, user_name='user5') assert connection.in_transaction() trans2.close() assert connection.in_transaction() transaction.commit() assert not connection.in_transaction() self.assert_(connection.scalar('select count(*) from ' 'query_users') == 5) result = connection.execute('select * from query_users') assert len(result.fetchall()) == 5 connection.close() def test_close2(self): connection = testing.db.connect() transaction = connection.begin() connection.execute(users.insert(), user_id=1, user_name='user1') connection.execute(users.insert(), user_id=2, user_name='user2') connection.execute(users.insert(), user_id=3, user_name='user3') trans2 = connection.begin() connection.execute(users.insert(), user_id=4, user_name='user4') connection.execute(users.insert(), user_id=5, user_name='user5') assert connection.in_transaction() trans2.close() assert connection.in_transaction() transaction.close() assert not connection.in_transaction() self.assert_(connection.scalar('select count(*) from ' 'query_users') == 0) result = connection.execute('select * from query_users') assert len(result.fetchall()) == 0 connection.close() @testing.requires.savepoints def test_nested_subtransaction_rollback(self): connection = testing.db.connect() transaction = connection.begin() connection.execute(users.insert(), user_id=1, user_name='user1') trans2 = connection.begin_nested() connection.execute(users.insert(), user_id=2, user_name='user2') trans2.rollback() connection.execute(users.insert(), user_id=3, user_name='user3') transaction.commit() eq_(connection.execute(select([users.c.user_id]). order_by(users.c.user_id)).fetchall(), [(1, ), (3, )]) connection.close() @testing.requires.savepoints @testing.crashes('oracle+zxjdbc', 'Errors out and causes subsequent tests to ' 'deadlock') def test_nested_subtransaction_commit(self): connection = testing.db.connect() transaction = connection.begin() connection.execute(users.insert(), user_id=1, user_name='user1') trans2 = connection.begin_nested() connection.execute(users.insert(), user_id=2, user_name='user2') trans2.commit() connection.execute(users.insert(), user_id=3, user_name='user3') transaction.commit() eq_(connection.execute(select([users.c.user_id]). order_by(users.c.user_id)).fetchall(), [(1, ), (2, ), (3, )]) connection.close() @testing.requires.savepoints def test_rollback_to_subtransaction(self): connection = testing.db.connect() transaction = connection.begin() connection.execute(users.insert(), user_id=1, user_name='user1') trans2 = connection.begin_nested() connection.execute(users.insert(), user_id=2, user_name='user2') trans3 = connection.begin() connection.execute(users.insert(), user_id=3, user_name='user3') trans3.rollback() connection.execute(users.insert(), user_id=4, user_name='user4') transaction.commit() eq_(connection.execute(select([users.c.user_id]). order_by(users.c.user_id)).fetchall(), [(1, ), (4, )]) connection.close() @testing.requires.two_phase_transactions def test_two_phase_transaction(self): connection = testing.db.connect() transaction = connection.begin_twophase() connection.execute(users.insert(), user_id=1, user_name='user1') transaction.prepare() transaction.commit() transaction = connection.begin_twophase() connection.execute(users.insert(), user_id=2, user_name='user2') transaction.commit() transaction.close() transaction = connection.begin_twophase() connection.execute(users.insert(), user_id=3, user_name='user3') transaction.rollback() transaction = connection.begin_twophase() connection.execute(users.insert(), user_id=4, user_name='user4') transaction.prepare() transaction.rollback() transaction.close() eq_(connection.execute(select([users.c.user_id]). order_by(users.c.user_id)).fetchall(), [(1, ), (2, )]) connection.close() # PG emergency shutdown: # select * from pg_prepared_xacts # ROLLBACK PREPARED '' @testing.crashes('mysql', 'Crashing on 5.5, not worth it') @testing.requires.skip_mysql_on_windows @testing.requires.two_phase_transactions @testing.requires.savepoints def test_mixed_two_phase_transaction(self): connection = testing.db.connect() transaction = connection.begin_twophase() connection.execute(users.insert(), user_id=1, user_name='user1') transaction2 = connection.begin() connection.execute(users.insert(), user_id=2, user_name='user2') transaction3 = connection.begin_nested() connection.execute(users.insert(), user_id=3, user_name='user3') transaction4 = connection.begin() connection.execute(users.insert(), user_id=4, user_name='user4') transaction4.commit() transaction3.rollback() connection.execute(users.insert(), user_id=5, user_name='user5') transaction2.commit() transaction.prepare() transaction.commit() eq_(connection.execute(select([users.c.user_id]). order_by(users.c.user_id)).fetchall(), [(1, ), (2, ), (5, )]) connection.close() @testing.requires.two_phase_transactions @testing.crashes('mysql+oursql', 'Times out in full test runs only, causing ' 'subsequent tests to fail') @testing.crashes('mysql+zxjdbc', 'Deadlocks, causing subsequent tests to fail') @testing.fails_on('mysql', 'FIXME: unknown') def test_two_phase_recover(self): # MySQL recovery doesn't currently seem to work correctly # Prepared transactions disappear when connections are closed # and even when they aren't it doesn't seem possible to use the # recovery id. connection = testing.db.connect() transaction = connection.begin_twophase() connection.execute(users.insert(), user_id=1, user_name='user1') transaction.prepare() connection.close() connection2 = testing.db.connect() eq_(connection2.execute(select([users.c.user_id]). order_by(users.c.user_id)).fetchall(), []) recoverables = connection2.recover_twophase() assert transaction.xid in recoverables connection2.commit_prepared(transaction.xid, recover=True) eq_(connection2.execute(select([users.c.user_id]). order_by(users.c.user_id)).fetchall(), [(1, )]) connection2.close() @testing.requires.two_phase_transactions def test_multiple_two_phase(self): conn = testing.db.connect() xa = conn.begin_twophase() conn.execute(users.insert(), user_id=1, user_name='user1') xa.prepare() xa.commit() xa = conn.begin_twophase() conn.execute(users.insert(), user_id=2, user_name='user2') xa.prepare() xa.rollback() xa = conn.begin_twophase() conn.execute(users.insert(), user_id=3, user_name='user3') xa.rollback() xa = conn.begin_twophase() conn.execute(users.insert(), user_id=4, user_name='user4') xa.prepare() xa.commit() result = \ conn.execute(select([users.c.user_name]). order_by(users.c.user_id)) eq_(result.fetchall(), [('user1', ), ('user4', )]) conn.close() class AutoRollbackTest(fixtures.TestBase): @classmethod def setup_class(cls): global metadata metadata = MetaData() @classmethod def teardown_class(cls): metadata.drop_all(testing.db) def test_rollback_deadlock(self): """test that returning connections to the pool clears any object locks.""" conn1 = testing.db.connect() conn2 = testing.db.connect() users = Table('deadlock_users', metadata, Column('user_id', INT, primary_key=True), Column('user_name', VARCHAR(20)), test_needs_acid=True) users.create(conn1) conn1.execute('select * from deadlock_users') conn1.close() # without auto-rollback in the connection pool's return() logic, # this deadlocks in PostgreSQL, because conn1 is returned to the # pool but still has a lock on "deadlock_users". comment out the # rollback in pool/ConnectionFairy._close() to see ! users.drop(conn2) conn2.close() class ExplicitAutoCommitTest(fixtures.TestBase): """test the 'autocommit' flag on select() and text() objects. Requires PostgreSQL so that we may define a custom function which modifies the database. """ __only_on__ = 'postgresql' @classmethod def setup_class(cls): global metadata, foo metadata = MetaData(testing.db) foo = Table('foo', metadata, Column('id', Integer, primary_key=True), Column('data', String(100))) metadata.create_all() testing.db.execute("create function insert_foo(varchar) " "returns integer as 'insert into foo(data) " "values ($1);select 1;' language sql") def teardown(self): foo.delete().execute().close() @classmethod def teardown_class(cls): testing.db.execute('drop function insert_foo(varchar)') metadata.drop_all() def test_control(self): # test that not using autocommit does not commit conn1 = testing.db.connect() conn2 = testing.db.connect() conn1.execute(select([func.insert_foo('data1')])) assert conn2.execute(select([foo.c.data])).fetchall() == [] conn1.execute(text("select insert_foo('moredata')")) assert conn2.execute(select([foo.c.data])).fetchall() == [] trans = conn1.begin() trans.commit() assert conn2.execute(select([foo.c.data])).fetchall() \ == [('data1', ), ('moredata', )] conn1.close() conn2.close() def test_explicit_compiled(self): conn1 = testing.db.connect() conn2 = testing.db.connect() conn1.execute(select([func.insert_foo('data1' )]).execution_options(autocommit=True)) assert conn2.execute(select([foo.c.data])).fetchall() \ == [('data1', )] conn1.close() conn2.close() def test_explicit_connection(self): conn1 = testing.db.connect() conn2 = testing.db.connect() conn1.execution_options(autocommit=True).\ execute(select([func.insert_foo('data1' )])) eq_(conn2.execute(select([foo.c.data])).fetchall(), [('data1', )]) # connection supersedes statement conn1.execution_options(autocommit=False).\ execute(select([func.insert_foo('data2' )]).execution_options(autocommit=True)) eq_(conn2.execute(select([foo.c.data])).fetchall(), [('data1', )]) # ditto conn1.execution_options(autocommit=True).\ execute(select([func.insert_foo('data3' )]).execution_options(autocommit=False)) eq_(conn2.execute(select([foo.c.data])).fetchall(), [('data1', ), ('data2', ), ('data3', )]) conn1.close() conn2.close() def test_explicit_text(self): conn1 = testing.db.connect() conn2 = testing.db.connect() conn1.execute(text("select insert_foo('moredata')" ).execution_options(autocommit=True)) assert conn2.execute(select([foo.c.data])).fetchall() \ == [('moredata', )] conn1.close() conn2.close() @testing.uses_deprecated(r'autocommit on select\(\) is deprecated', r'autocommit\(\) is deprecated') def test_explicit_compiled_deprecated(self): conn1 = testing.db.connect() conn2 = testing.db.connect() conn1.execute(select([func.insert_foo('data1')], autocommit=True)) assert conn2.execute(select([foo.c.data])).fetchall() \ == [('data1', )] conn1.execute(select([func.insert_foo('data2')]).autocommit()) assert conn2.execute(select([foo.c.data])).fetchall() \ == [('data1', ), ('data2', )] conn1.close() conn2.close() @testing.uses_deprecated(r'autocommit on text\(\) is deprecated') def test_explicit_text_deprecated(self): conn1 = testing.db.connect() conn2 = testing.db.connect() conn1.execute(text("select insert_foo('moredata')", autocommit=True)) assert conn2.execute(select([foo.c.data])).fetchall() \ == [('moredata', )] conn1.close() conn2.close() def test_implicit_text(self): conn1 = testing.db.connect() conn2 = testing.db.connect() conn1.execute(text("insert into foo (data) values " "('implicitdata')")) assert conn2.execute(select([foo.c.data])).fetchall() \ == [('implicitdata', )] conn1.close() conn2.close() tlengine = None class TLTransactionTest(fixtures.TestBase): __requires__ = ('ad_hoc_engines', ) @classmethod def setup_class(cls): global users, metadata, tlengine tlengine = testing_engine(options=dict(strategy='threadlocal')) metadata = MetaData() users = Table('query_users', metadata, Column('user_id', INT, Sequence('query_users_id_seq', optional=True), primary_key=True), Column('user_name', VARCHAR(20)), test_needs_acid=True) metadata.create_all(tlengine) def teardown(self): tlengine.execute(users.delete()).close() @classmethod def teardown_class(cls): tlengine.close() metadata.drop_all(tlengine) tlengine.dispose() def setup(self): # ensure tests start with engine closed tlengine.close() @testing.crashes('oracle', 'TNS error of unknown origin occurs on the buildbot.') def test_rollback_no_trans(self): tlengine = testing_engine(options=dict(strategy="threadlocal")) # shouldn't fail tlengine.rollback() tlengine.begin() tlengine.rollback() # shouldn't fail tlengine.rollback() def test_commit_no_trans(self): tlengine = testing_engine(options=dict(strategy="threadlocal")) # shouldn't fail tlengine.commit() tlengine.begin() tlengine.rollback() # shouldn't fail tlengine.commit() def test_prepare_no_trans(self): tlengine = testing_engine(options=dict(strategy="threadlocal")) # shouldn't fail tlengine.prepare() tlengine.begin() tlengine.rollback() # shouldn't fail tlengine.prepare() def test_connection_close(self): """test that when connections are closed for real, transactions are rolled back and disposed.""" c = tlengine.contextual_connect() c.begin() assert c.in_transaction() c.close() assert not c.in_transaction() def test_transaction_close(self): c = tlengine.contextual_connect() t = c.begin() tlengine.execute(users.insert(), user_id=1, user_name='user1') tlengine.execute(users.insert(), user_id=2, user_name='user2') t2 = c.begin() tlengine.execute(users.insert(), user_id=3, user_name='user3') tlengine.execute(users.insert(), user_id=4, user_name='user4') t2.close() result = c.execute('select * from query_users') assert len(result.fetchall()) == 4 t.close() external_connection = tlengine.connect() result = external_connection.execute('select * from query_users' ) try: assert len(result.fetchall()) == 0 finally: c.close() external_connection.close() def test_rollback(self): """test a basic rollback""" tlengine.begin() tlengine.execute(users.insert(), user_id=1, user_name='user1') tlengine.execute(users.insert(), user_id=2, user_name='user2') tlengine.execute(users.insert(), user_id=3, user_name='user3') tlengine.rollback() external_connection = tlengine.connect() result = external_connection.execute('select * from query_users' ) try: assert len(result.fetchall()) == 0 finally: external_connection.close() def test_commit(self): """test a basic commit""" tlengine.begin() tlengine.execute(users.insert(), user_id=1, user_name='user1') tlengine.execute(users.insert(), user_id=2, user_name='user2') tlengine.execute(users.insert(), user_id=3, user_name='user3') tlengine.commit() external_connection = tlengine.connect() result = external_connection.execute('select * from query_users' ) try: assert len(result.fetchall()) == 3 finally: external_connection.close() def test_with_interface(self): trans = tlengine.begin() tlengine.execute(users.insert(), user_id=1, user_name='user1') tlengine.execute(users.insert(), user_id=2, user_name='user2') trans.commit() trans = tlengine.begin() tlengine.execute(users.insert(), user_id=3, user_name='user3') trans.__exit__(Exception, "fake", None) trans = tlengine.begin() tlengine.execute(users.insert(), user_id=4, user_name='user4') trans.__exit__(None, None, None) eq_( tlengine.execute(users.select().order_by(users.c.user_id)).fetchall(), [ (1, 'user1'), (2, 'user2'), (4, 'user4'), ] ) def test_commits(self): connection = tlengine.connect() assert connection.execute('select count(*) from query_users' ).scalar() == 0 connection.close() connection = tlengine.contextual_connect() transaction = connection.begin() connection.execute(users.insert(), user_id=1, user_name='user1') transaction.commit() transaction = connection.begin() connection.execute(users.insert(), user_id=2, user_name='user2') connection.execute(users.insert(), user_id=3, user_name='user3') transaction.commit() transaction = connection.begin() result = connection.execute('select * from query_users') l = result.fetchall() assert len(l) == 3, 'expected 3 got %d' % len(l) transaction.commit() connection.close() def test_rollback_off_conn(self): # test that a TLTransaction opened off a TLConnection allows # that TLConnection to be aware of the transactional context conn = tlengine.contextual_connect() trans = conn.begin() conn.execute(users.insert(), user_id=1, user_name='user1') conn.execute(users.insert(), user_id=2, user_name='user2') conn.execute(users.insert(), user_id=3, user_name='user3') trans.rollback() external_connection = tlengine.connect() result = external_connection.execute('select * from query_users' ) try: assert len(result.fetchall()) == 0 finally: conn.close() external_connection.close() def test_morerollback_off_conn(self): # test that an existing TLConnection automatically takes place # in a TLTransaction opened on a second TLConnection conn = tlengine.contextual_connect() conn2 = tlengine.contextual_connect() trans = conn2.begin() conn.execute(users.insert(), user_id=1, user_name='user1') conn.execute(users.insert(), user_id=2, user_name='user2') conn.execute(users.insert(), user_id=3, user_name='user3') trans.rollback() external_connection = tlengine.connect() result = external_connection.execute('select * from query_users' ) try: assert len(result.fetchall()) == 0 finally: conn.close() conn2.close() external_connection.close() def test_commit_off_connection(self): conn = tlengine.contextual_connect() trans = conn.begin() conn.execute(users.insert(), user_id=1, user_name='user1') conn.execute(users.insert(), user_id=2, user_name='user2') conn.execute(users.insert(), user_id=3, user_name='user3') trans.commit() external_connection = tlengine.connect() result = external_connection.execute('select * from query_users' ) try: assert len(result.fetchall()) == 3 finally: conn.close() external_connection.close() def test_nesting_rollback(self): """tests nesting of transactions, rollback at the end""" external_connection = tlengine.connect() self.assert_(external_connection.connection is not tlengine.contextual_connect().connection) tlengine.begin() tlengine.execute(users.insert(), user_id=1, user_name='user1') tlengine.execute(users.insert(), user_id=2, user_name='user2') tlengine.execute(users.insert(), user_id=3, user_name='user3') tlengine.begin() tlengine.execute(users.insert(), user_id=4, user_name='user4') tlengine.execute(users.insert(), user_id=5, user_name='user5') tlengine.commit() tlengine.rollback() try: self.assert_(external_connection.scalar( 'select count(*) from query_users' ) == 0) finally: external_connection.close() def test_nesting_commit(self): """tests nesting of transactions, commit at the end.""" external_connection = tlengine.connect() self.assert_(external_connection.connection is not tlengine.contextual_connect().connection) tlengine.begin() tlengine.execute(users.insert(), user_id=1, user_name='user1') tlengine.execute(users.insert(), user_id=2, user_name='user2') tlengine.execute(users.insert(), user_id=3, user_name='user3') tlengine.begin() tlengine.execute(users.insert(), user_id=4, user_name='user4') tlengine.execute(users.insert(), user_id=5, user_name='user5') tlengine.commit() tlengine.commit() try: self.assert_(external_connection.scalar( 'select count(*) from query_users' ) == 5) finally: external_connection.close() def test_mixed_nesting(self): """tests nesting of transactions off the TLEngine directly inside of tranasctions off the connection from the TLEngine""" external_connection = tlengine.connect() self.assert_(external_connection.connection is not tlengine.contextual_connect().connection) conn = tlengine.contextual_connect() trans = conn.begin() trans2 = conn.begin() tlengine.execute(users.insert(), user_id=1, user_name='user1') tlengine.execute(users.insert(), user_id=2, user_name='user2') tlengine.execute(users.insert(), user_id=3, user_name='user3') tlengine.begin() tlengine.execute(users.insert(), user_id=4, user_name='user4') tlengine.begin() tlengine.execute(users.insert(), user_id=5, user_name='user5') tlengine.execute(users.insert(), user_id=6, user_name='user6') tlengine.execute(users.insert(), user_id=7, user_name='user7') tlengine.commit() tlengine.execute(users.insert(), user_id=8, user_name='user8') tlengine.commit() trans2.commit() trans.rollback() conn.close() try: self.assert_(external_connection.scalar( 'select count(*) from query_users' ) == 0) finally: external_connection.close() def test_more_mixed_nesting(self): """tests nesting of transactions off the connection from the TLEngine inside of tranasctions off thbe TLEngine directly.""" external_connection = tlengine.connect() self.assert_(external_connection.connection is not tlengine.contextual_connect().connection) tlengine.begin() connection = tlengine.contextual_connect() connection.execute(users.insert(), user_id=1, user_name='user1') tlengine.begin() connection.execute(users.insert(), user_id=2, user_name='user2') connection.execute(users.insert(), user_id=3, user_name='user3') trans = connection.begin() connection.execute(users.insert(), user_id=4, user_name='user4') connection.execute(users.insert(), user_id=5, user_name='user5') trans.commit() tlengine.commit() tlengine.rollback() connection.close() try: self.assert_(external_connection.scalar( 'select count(*) from query_users' ) == 0) finally: external_connection.close() @testing.requires.savepoints def test_nested_subtransaction_rollback(self): tlengine.begin() tlengine.execute(users.insert(), user_id=1, user_name='user1') tlengine.begin_nested() tlengine.execute(users.insert(), user_id=2, user_name='user2') tlengine.rollback() tlengine.execute(users.insert(), user_id=3, user_name='user3') tlengine.commit() tlengine.close() eq_(tlengine.execute(select([users.c.user_id]). order_by(users.c.user_id)).fetchall(), [(1, ), (3, )]) tlengine.close() @testing.requires.savepoints @testing.crashes('oracle+zxjdbc', 'Errors out and causes subsequent tests to ' 'deadlock') def test_nested_subtransaction_commit(self): tlengine.begin() tlengine.execute(users.insert(), user_id=1, user_name='user1') tlengine.begin_nested() tlengine.execute(users.insert(), user_id=2, user_name='user2') tlengine.commit() tlengine.execute(users.insert(), user_id=3, user_name='user3') tlengine.commit() tlengine.close() eq_(tlengine.execute(select([users.c.user_id]). order_by(users.c.user_id)).fetchall(), [(1, ), (2, ), (3, )]) tlengine.close() @testing.requires.savepoints def test_rollback_to_subtransaction(self): tlengine.begin() tlengine.execute(users.insert(), user_id=1, user_name='user1') tlengine.begin_nested() tlengine.execute(users.insert(), user_id=2, user_name='user2') tlengine.begin() tlengine.execute(users.insert(), user_id=3, user_name='user3') tlengine.rollback() tlengine.rollback() tlengine.execute(users.insert(), user_id=4, user_name='user4') tlengine.commit() tlengine.close() eq_(tlengine.execute(select([users.c.user_id]). order_by(users.c.user_id)).fetchall(), [(1, ), (4, )]) tlengine.close() def test_connections(self): """tests that contextual_connect is threadlocal""" c1 = tlengine.contextual_connect() c2 = tlengine.contextual_connect() assert c1.connection is c2.connection c2.close() assert not c1.closed assert not tlengine.closed @testing.requires.independent_cursors def test_result_closing(self): """tests that contextual_connect is threadlocal""" r1 = tlengine.execute(select([1])) r2 = tlengine.execute(select([1])) row1 = r1.fetchone() row2 = r2.fetchone() r1.close() assert r2.connection is r1.connection assert not r2.connection.closed assert not tlengine.closed # close again, nothing happens since resultproxy calls close() # only once r1.close() assert r2.connection is r1.connection assert not r2.connection.closed assert not tlengine.closed r2.close() assert r2.connection.closed assert tlengine.closed @testing.crashes('oracle+cx_oracle', 'intermittent failures on the buildbot') def test_dispose(self): eng = testing_engine(options=dict(strategy='threadlocal')) result = eng.execute(select([1])) eng.dispose() eng.execute(select([1])) @testing.requires.two_phase_transactions def test_two_phase_transaction(self): tlengine.begin_twophase() tlengine.execute(users.insert(), user_id=1, user_name='user1') tlengine.prepare() tlengine.commit() tlengine.begin_twophase() tlengine.execute(users.insert(), user_id=2, user_name='user2') tlengine.commit() tlengine.begin_twophase() tlengine.execute(users.insert(), user_id=3, user_name='user3') tlengine.rollback() tlengine.begin_twophase() tlengine.execute(users.insert(), user_id=4, user_name='user4') tlengine.prepare() tlengine.rollback() eq_(tlengine.execute(select([users.c.user_id]). order_by(users.c.user_id)).fetchall(), [(1, ), (2, )]) counters = None class ForUpdateTest(fixtures.TestBase): __requires__ = 'ad_hoc_engines', @classmethod def setup_class(cls): global counters, metadata metadata = MetaData() counters = Table('forupdate_counters', metadata, Column('counter_id', INT, primary_key=True), Column('counter_value', INT), test_needs_acid=True) counters.create(testing.db) def teardown(self): testing.db.execute(counters.delete()).close() @classmethod def teardown_class(cls): counters.drop(testing.db) def increment(self, count, errors, update_style=True, delay=0.005): con = testing.db.connect() sel = counters.select(for_update=update_style, whereclause=counters.c.counter_id == 1) for i in xrange(count): trans = con.begin() try: existing = con.execute(sel).first() incr = existing['counter_value'] + 1 time.sleep(delay) con.execute(counters.update(counters.c.counter_id == 1, values={'counter_value': incr})) time.sleep(delay) readback = con.execute(sel).first() if readback['counter_value'] != incr: raise AssertionError('Got %s post-update, expected ' '%s' % (readback['counter_value'], incr)) trans.commit() except Exception, e: trans.rollback() errors.append(e) break con.close() @testing.crashes('mssql', 'FIXME: unknown') @testing.crashes('firebird', 'FIXME: unknown') @testing.crashes('sybase', 'FIXME: unknown') @testing.crashes('access', 'FIXME: unknown') @testing.requires.independent_connections def test_queued_update(self): """Test SELECT FOR UPDATE with concurrent modifications. Runs concurrent modifications on a single row in the users table, with each mutator trying to increment a value stored in user_name. """ db = testing.db db.execute(counters.insert(), counter_id=1, counter_value=0) iterations, thread_count = 10, 5 threads, errors = [], [] for i in xrange(thread_count): thrd = threading.Thread(target=self.increment, args=(iterations, ), kwargs={'errors': errors, 'update_style': True}) thrd.start() threads.append(thrd) for thrd in threads: thrd.join() assert not errors sel = counters.select(whereclause=counters.c.counter_id == 1) final = db.execute(sel).first() eq_(final['counter_value'], iterations * thread_count) def overlap(self, ids, errors, update_style): sel = counters.select(for_update=update_style, whereclause=counters.c.counter_id.in_(ids)) con = testing.db.connect() trans = con.begin() try: rows = con.execute(sel).fetchall() time.sleep(0.50) trans.commit() except Exception, e: trans.rollback() errors.append(e) con.close() def _threaded_overlap(self, thread_count, groups, update_style=True, pool=5): db = testing.db for cid in range(pool - 1): db.execute(counters.insert(), counter_id=cid + 1, counter_value=0) errors, threads = [], [] for i in xrange(thread_count): thrd = threading.Thread(target=self.overlap, args=(groups.pop(0), errors, update_style)) time.sleep(0.20) # give the previous thread a chance to start # to ensure it gets a lock thrd.start() threads.append(thrd) for thrd in threads: thrd.join() return errors @testing.crashes('mssql', 'FIXME: unknown') @testing.crashes('firebird', 'FIXME: unknown') @testing.crashes('sybase', 'FIXME: unknown') @testing.crashes('access', 'FIXME: unknown') @testing.requires.independent_connections def test_queued_select(self): """Simple SELECT FOR UPDATE conflict test""" errors = self._threaded_overlap(2, [(1, 2, 3), (3, 4, 5)]) assert not errors @testing.crashes('mssql', 'FIXME: unknown') @testing.fails_on('mysql', 'No support for NOWAIT') @testing.crashes('firebird', 'FIXME: unknown') @testing.crashes('sybase', 'FIXME: unknown') @testing.crashes('access', 'FIXME: unknown') @testing.requires.independent_connections def test_nowait_select(self): """Simple SELECT FOR UPDATE NOWAIT conflict test""" errors = self._threaded_overlap(2, [(1, 2, 3), (3, 4, 5)], update_style='nowait') assert errors class IsolationLevelTest(fixtures.TestBase): __requires__ = ('isolation_level', 'ad_hoc_engines') def _default_isolation_level(self): if testing.against('sqlite'): return 'SERIALIZABLE' elif testing.against('postgresql'): return 'READ COMMITTED' elif testing.against('mysql'): return "REPEATABLE READ" else: assert False, "default isolation level not known" def _non_default_isolation_level(self): if testing.against('sqlite'): return 'READ UNCOMMITTED' elif testing.against('postgresql'): return 'SERIALIZABLE' elif testing.against('mysql'): return "SERIALIZABLE" else: assert False, "non default isolation level not known" def test_engine_param_stays(self): eng = testing_engine() isolation_level = eng.dialect.get_isolation_level( eng.connect().connection) level = self._non_default_isolation_level() ne_(isolation_level, level) eng = testing_engine(options=dict(isolation_level=level)) eq_( eng.dialect.get_isolation_level( eng.connect().connection), level ) # check that it stays conn = eng.connect() eq_( eng.dialect.get_isolation_level(conn.connection), level ) conn.close() conn = eng.connect() eq_( eng.dialect.get_isolation_level(conn.connection), level ) conn.close() def test_default_level(self): eng = testing_engine(options=dict()) isolation_level = eng.dialect.get_isolation_level( eng.connect().connection) eq_(isolation_level, self._default_isolation_level()) def test_reset_level(self): eng = testing_engine(options=dict()) conn = eng.connect() eq_( eng.dialect.get_isolation_level(conn.connection), self._default_isolation_level() ) eng.dialect.set_isolation_level( conn.connection, self._non_default_isolation_level() ) eq_( eng.dialect.get_isolation_level(conn.connection), self._non_default_isolation_level() ) eng.dialect.reset_isolation_level(conn.connection) eq_( eng.dialect.get_isolation_level(conn.connection), self._default_isolation_level() ) conn.close() def test_reset_level_with_setting(self): eng = testing_engine(options=dict( isolation_level= self._non_default_isolation_level())) conn = eng.connect() eq_(eng.dialect.get_isolation_level(conn.connection), self._non_default_isolation_level()) eng.dialect.set_isolation_level(conn.connection, self._default_isolation_level()) eq_(eng.dialect.get_isolation_level(conn.connection), self._default_isolation_level()) eng.dialect.reset_isolation_level(conn.connection) eq_(eng.dialect.get_isolation_level(conn.connection), self._non_default_isolation_level()) conn.close() def test_invalid_level(self): eng = testing_engine(options=dict(isolation_level='FOO')) assert_raises_message( exc.ArgumentError, "Invalid value '%s' for isolation_level. " "Valid isolation levels for %s are %s" % ("FOO", eng.dialect.name, ", ".join(eng.dialect._isolation_lookup)), eng.connect) def test_per_connection(self): from sqlalchemy.pool import QueuePool eng = testing_engine(options=dict( poolclass=QueuePool, pool_size=2, max_overflow=0)) c1 = eng.connect() c1 = c1.execution_options( isolation_level=self._non_default_isolation_level() ) c2 = eng.connect() eq_( eng.dialect.get_isolation_level(c1.connection), self._non_default_isolation_level() ) eq_( eng.dialect.get_isolation_level(c2.connection), self._default_isolation_level() ) c1.close() c2.close() c3 = eng.connect() eq_( eng.dialect.get_isolation_level(c3.connection), self._default_isolation_level() ) c4 = eng.connect() eq_( eng.dialect.get_isolation_level(c4.connection), self._default_isolation_level() ) c3.close() c4.close() def test_per_statement_bzzt(self): assert_raises_message( exc.ArgumentError, r"'isolation_level' execution option may only be specified " r"on Connection.execution_options\(\), or " r"per-engine using the isolation_level " r"argument to create_engine\(\).", select([1]).execution_options, isolation_level=self._non_default_isolation_level() ) def test_per_engine_bzzt(self): assert_raises_message( exc.ArgumentError, r"'isolation_level' execution option may " r"only be specified on Connection.execution_options\(\). " r"To set engine-wide isolation level, " r"use the isolation_level argument to create_engine\(\).", create_engine, testing.db.url, execution_options={'isolation_level': self._non_default_isolation_level} ) SQLAlchemy-0.8.4/test/ext/0000755000076500000240000000000012251151573016012 5ustar classicstaff00000000000000SQLAlchemy-0.8.4/test/ext/__init__.py0000644000076500000240000000000012251147172020111 0ustar classicstaff00000000000000SQLAlchemy-0.8.4/test/ext/declarative/0000755000076500000240000000000012251151573020275 5ustar classicstaff00000000000000SQLAlchemy-0.8.4/test/ext/declarative/__init__.py0000644000076500000240000000000012251147172022374 0ustar classicstaff00000000000000SQLAlchemy-0.8.4/test/ext/declarative/test_basic.py0000644000076500000240000015161312251150015022764 0ustar classicstaff00000000000000 from sqlalchemy.testing import eq_, assert_raises, \ assert_raises_message, is_ from sqlalchemy.ext import declarative as decl from sqlalchemy import exc import sqlalchemy as sa from sqlalchemy import testing from sqlalchemy import MetaData, Integer, String, ForeignKey, \ ForeignKeyConstraint, Index from sqlalchemy.testing.schema import Table, Column from sqlalchemy.orm import relationship, create_session, class_mapper, \ joinedload, configure_mappers, backref, clear_mappers, \ deferred, column_property, composite,\ Session from sqlalchemy.testing import eq_ from sqlalchemy.util import classproperty from sqlalchemy.ext.declarative import declared_attr, AbstractConcreteBase, \ ConcreteBase, synonym_for from sqlalchemy.testing import fixtures from sqlalchemy.testing.util import gc_collect Base = None class DeclarativeTestBase(fixtures.TestBase, testing.AssertsExecutionResults, testing.AssertsCompiledSQL): __dialect__ = 'default' def setup(self): global Base Base = decl.declarative_base(testing.db) def teardown(self): Session.close_all() clear_mappers() Base.metadata.drop_all() class DeclarativeTest(DeclarativeTestBase): def test_basic(self): class User(Base, fixtures.ComparableEntity): __tablename__ = 'users' id = Column('id', Integer, primary_key=True, test_needs_autoincrement=True) name = Column('name', String(50)) addresses = relationship("Address", backref="user") class Address(Base, fixtures.ComparableEntity): __tablename__ = 'addresses' id = Column(Integer, primary_key=True, test_needs_autoincrement=True) email = Column(String(50), key='_email') user_id = Column('user_id', Integer, ForeignKey('users.id'), key='_user_id') Base.metadata.create_all() eq_(Address.__table__.c['id'].name, 'id') eq_(Address.__table__.c['_email'].name, 'email') eq_(Address.__table__.c['_user_id'].name, 'user_id') u1 = User(name='u1', addresses=[ Address(email='one'), Address(email='two'), ]) sess = create_session() sess.add(u1) sess.flush() sess.expunge_all() eq_(sess.query(User).all(), [User(name='u1', addresses=[ Address(email='one'), Address(email='two'), ])]) a1 = sess.query(Address).filter(Address.email == 'two').one() eq_(a1, Address(email='two')) eq_(a1.user, User(name='u1')) def test_no_table(self): def go(): class User(Base): id = Column('id', Integer, primary_key=True) assert_raises_message(sa.exc.InvalidRequestError, 'does not have a __table__', go) def test_table_args_empty_dict(self): class MyModel(Base): __tablename__ = 'test' id = Column(Integer, primary_key=True) __table_args__ = {} def test_table_args_empty_tuple(self): class MyModel(Base): __tablename__ = 'test' id = Column(Integer, primary_key=True) __table_args__ = () def test_cant_add_columns(self): t = Table('t', Base.metadata, Column('id', Integer, primary_key=True), Column('data', String)) def go(): class User(Base): __table__ = t foo = Column(Integer, primary_key=True) # can't specify new columns not already in the table assert_raises_message(sa.exc.ArgumentError, "Can't add additional column 'foo' when " "specifying __table__", go) # regular re-mapping works tho class Bar(Base): __table__ = t some_data = t.c.data assert class_mapper(Bar).get_property('some_data').columns[0] \ is t.c.data def test_difficult_class(self): """test no getattr() errors with a customized class""" # metaclass to mock the way zope.interface breaks getattr() class BrokenMeta(type): def __getattribute__(self, attr): if attr == 'xyzzy': raise AttributeError, 'xyzzy' else: return object.__getattribute__(self,attr) # even though this class has an xyzzy attribute, getattr(cls,"xyzzy") # fails class BrokenParent(object): __metaclass__ = BrokenMeta xyzzy = "magic" # _as_declarative() inspects obj.__class__.__bases__ class User(BrokenParent,fixtures.ComparableEntity): __tablename__ = 'users' id = Column('id', Integer, primary_key=True, test_needs_autoincrement=True) name = Column('name', String(50)) decl.instrument_declarative(User,{},Base.metadata) def test_reserved_identifiers(self): def go1(): class User1(Base): __tablename__ = 'user1' id = Column(Integer, primary_key=True) metadata = Column(Integer) def go2(): class User2(Base): __tablename__ = 'user2' id = Column(Integer, primary_key=True) metadata = relationship("Address") for go in (go1, go2): assert_raises_message( exc.InvalidRequestError, "Attribute name 'metadata' is reserved " "for the MetaData instance when using a " "declarative base class.", go ) def test_undefer_column_name(self): # TODO: not sure if there was an explicit # test for this elsewhere foo = Column(Integer) eq_(str(foo), '(no name)') eq_(foo.key, None) eq_(foo.name, None) decl.base._undefer_column_name('foo', foo) eq_(str(foo), 'foo') eq_(foo.key, 'foo') eq_(foo.name, 'foo') def test_recompile_on_othermapper(self): """declarative version of the same test in mappers.py""" from sqlalchemy.orm import mapperlib class User(Base): __tablename__ = 'users' id = Column('id', Integer, primary_key=True) name = Column('name', String(50)) class Address(Base): __tablename__ = 'addresses' id = Column('id', Integer, primary_key=True) email = Column('email', String(50)) user_id = Column('user_id', Integer, ForeignKey('users.id')) user = relationship("User", primaryjoin=user_id == User.id, backref="addresses") assert mapperlib._new_mappers is True u = User() assert User.addresses assert mapperlib._new_mappers is False def test_string_dependency_resolution(self): from sqlalchemy.sql import desc class User(Base, fixtures.ComparableEntity): __tablename__ = 'users' id = Column(Integer, primary_key=True, test_needs_autoincrement=True) name = Column(String(50)) addresses = relationship('Address', order_by='desc(Address.email)', primaryjoin='User.id==Address.user_id', foreign_keys='[Address.user_id]', backref=backref('user', primaryjoin='User.id==Address.user_id', foreign_keys='[Address.user_id]')) class Address(Base, fixtures.ComparableEntity): __tablename__ = 'addresses' id = Column(Integer, primary_key=True, test_needs_autoincrement=True) email = Column(String(50)) user_id = Column(Integer) # note no foreign key Base.metadata.create_all() sess = create_session() u1 = User(name='ed', addresses=[Address(email='abc'), Address(email='def'), Address(email='xyz')]) sess.add(u1) sess.flush() sess.expunge_all() eq_(sess.query(User).filter(User.name == 'ed').one(), User(name='ed', addresses=[Address(email='xyz'), Address(email='def'), Address(email='abc')])) class Foo(Base, fixtures.ComparableEntity): __tablename__ = 'foo' id = Column(Integer, primary_key=True) rel = relationship('User', primaryjoin='User.addresses==Foo.id') assert_raises_message(exc.InvalidRequestError, "'addresses' is not an instance of " "ColumnProperty", configure_mappers) def test_string_dependency_resolution_synonym(self): from sqlalchemy.sql import desc class User(Base, fixtures.ComparableEntity): __tablename__ = 'users' id = Column(Integer, primary_key=True, test_needs_autoincrement=True) name = Column(String(50)) Base.metadata.create_all() sess = create_session() u1 = User(name='ed') sess.add(u1) sess.flush() sess.expunge_all() eq_(sess.query(User).filter(User.name == 'ed').one(), User(name='ed')) class Foo(Base, fixtures.ComparableEntity): __tablename__ = 'foo' id = Column(Integer, primary_key=True) _user_id = Column(Integer) rel = relationship('User', uselist=False, foreign_keys=[User.id], primaryjoin='Foo.user_id==User.id') @synonym_for('_user_id') @property def user_id(self): return self._user_id foo = Foo() foo.rel = u1 assert foo.rel == u1 def test_string_dependency_resolution_orm_descriptor(self): from sqlalchemy.ext.hybrid import hybrid_property class User(Base): __tablename__ = 'user' id = Column(Integer, primary_key=True) firstname = Column(String(50)) lastname = Column(String(50)) game_id = Column(Integer, ForeignKey('game.id')) @hybrid_property def fullname(self): return self.firstname + " " + self.lastname class Game(Base): __tablename__ = 'game' id = Column(Integer, primary_key=True) name = Column(String(50)) users = relationship("User", order_by="User.fullname") s = Session() self.assert_compile( s.query(Game).options(joinedload(Game.users)), "SELECT game.id AS game_id, game.name AS game_name, " "user_1.id AS user_1_id, user_1.firstname AS user_1_firstname, " "user_1.lastname AS user_1_lastname, " "user_1.game_id AS user_1_game_id " "FROM game LEFT OUTER JOIN \"user\" AS user_1 ON game.id = " "user_1.game_id ORDER BY " "user_1.firstname || :firstname_1 || user_1.lastname" ) def test_string_dependency_resolution_no_table(self): class User(Base, fixtures.ComparableEntity): __tablename__ = 'users' id = Column(Integer, primary_key=True, test_needs_autoincrement=True) name = Column(String(50)) class Bar(Base, fixtures.ComparableEntity): __tablename__ = 'bar' id = Column(Integer, primary_key=True) rel = relationship('User', primaryjoin='User.id==Bar.__table__.id') assert_raises_message(exc.InvalidRequestError, "does not have a mapped column named " "'__table__'", configure_mappers) def test_string_w_pj_annotations(self): class User(Base, fixtures.ComparableEntity): __tablename__ = 'users' id = Column(Integer, primary_key=True, test_needs_autoincrement=True) name = Column(String(50)) class Address(Base, fixtures.ComparableEntity): __tablename__ = 'addresses' id = Column(Integer, primary_key=True, test_needs_autoincrement=True) email = Column(String(50)) user_id = Column(Integer) user = relationship("User", primaryjoin="remote(User.id)==foreign(Address.user_id)" ) eq_( Address.user.property._join_condition.local_remote_pairs, [(Address.__table__.c.user_id, User.__table__.c.id)] ) def test_string_dependency_resolution_no_magic(self): """test that full tinkery expressions work as written""" class User(Base, fixtures.ComparableEntity): __tablename__ = 'users' id = Column(Integer, primary_key=True) addresses = relationship('Address', primaryjoin='User.id==Address.user_id.prop.columns[' '0]') class Address(Base, fixtures.ComparableEntity): __tablename__ = 'addresses' id = Column(Integer, primary_key=True) user_id = Column(Integer, ForeignKey('users.id')) configure_mappers() eq_(str(User.addresses.prop.primaryjoin), 'users.id = addresses.user_id') def test_string_dependency_resolution_module_qualified(self): class User(Base, fixtures.ComparableEntity): __tablename__ = 'users' id = Column(Integer, primary_key=True) addresses = relationship('%s.Address' % __name__, primaryjoin='%s.User.id==%s.Address.user_id.prop.columns[' '0]' % (__name__, __name__)) class Address(Base, fixtures.ComparableEntity): __tablename__ = 'addresses' id = Column(Integer, primary_key=True) user_id = Column(Integer, ForeignKey('users.id')) configure_mappers() eq_(str(User.addresses.prop.primaryjoin), 'users.id = addresses.user_id') def test_string_dependency_resolution_in_backref(self): class User(Base, fixtures.ComparableEntity): __tablename__ = 'users' id = Column(Integer, primary_key=True) name = Column(String(50)) addresses = relationship('Address', primaryjoin='User.id==Address.user_id', backref='user') class Address(Base, fixtures.ComparableEntity): __tablename__ = 'addresses' id = Column(Integer, primary_key=True) email = Column(String(50)) user_id = Column(Integer, ForeignKey('users.id')) configure_mappers() eq_(str(User.addresses.property.primaryjoin), str(Address.user.property.primaryjoin)) def test_string_dependency_resolution_tables(self): class User(Base, fixtures.ComparableEntity): __tablename__ = 'users' id = Column(Integer, primary_key=True) name = Column(String(50)) props = relationship('Prop', secondary='user_to_prop', primaryjoin='User.id==user_to_prop.c.u' 'ser_id', secondaryjoin='user_to_prop.c.prop_id=' '=Prop.id', backref='users') class Prop(Base, fixtures.ComparableEntity): __tablename__ = 'props' id = Column(Integer, primary_key=True) name = Column(String(50)) user_to_prop = Table('user_to_prop', Base.metadata, Column('user_id', Integer, ForeignKey('users.id')), Column('prop_id', Integer, ForeignKey('props.id'))) configure_mappers() assert class_mapper(User).get_property('props').secondary \ is user_to_prop def test_string_dependency_resolution_schemas(self): Base = decl.declarative_base() class User(Base): __tablename__ = 'users' __table_args__ = {'schema':'fooschema'} id = Column(Integer, primary_key=True) name = Column(String(50)) props = relationship('Prop', secondary='fooschema.user_to_prop', primaryjoin='User.id==fooschema.user_to_prop.c.user_id', secondaryjoin='fooschema.user_to_prop.c.prop_id==Prop.id', backref='users') class Prop(Base): __tablename__ = 'props' __table_args__ = {'schema':'fooschema'} id = Column(Integer, primary_key=True) name = Column(String(50)) user_to_prop = Table('user_to_prop', Base.metadata, Column('user_id', Integer, ForeignKey('fooschema.users.id')), Column('prop_id',Integer, ForeignKey('fooschema.props.id')), schema='fooschema') configure_mappers() assert class_mapper(User).get_property('props').secondary \ is user_to_prop def test_string_dependency_resolution_annotations(self): Base = decl.declarative_base() class Parent(Base): __tablename__ = 'parent' id = Column(Integer, primary_key=True) name = Column(String) children = relationship("Child", primaryjoin="Parent.name==remote(foreign(func.lower(Child.name_upper)))" ) class Child(Base): __tablename__ = 'child' id = Column(Integer, primary_key=True) name_upper = Column(String) configure_mappers() eq_( Parent.children.property._calculated_foreign_keys, set([Child.name_upper.property.columns[0]]) ) def test_shared_class_registry(self): reg = {} Base1 = decl.declarative_base(testing.db, class_registry=reg) Base2 = decl.declarative_base(testing.db, class_registry=reg) class A(Base1): __tablename__ = 'a' id = Column(Integer, primary_key=True) class B(Base2): __tablename__ = 'b' id = Column(Integer, primary_key=True) aid = Column(Integer, ForeignKey(A.id)) as_ = relationship("A") assert B.as_.property.mapper.class_ is A def test_uncompiled_attributes_in_relationship(self): class Address(Base, fixtures.ComparableEntity): __tablename__ = 'addresses' id = Column(Integer, primary_key=True, test_needs_autoincrement=True) email = Column(String(50)) user_id = Column(Integer, ForeignKey('users.id')) class User(Base, fixtures.ComparableEntity): __tablename__ = 'users' id = Column(Integer, primary_key=True, test_needs_autoincrement=True) name = Column(String(50)) addresses = relationship('Address', order_by=Address.email, foreign_keys=Address.user_id, remote_side=Address.user_id) # get the mapper for User. User mapper will compile, # "addresses" relationship will call upon Address.user_id for # its clause element. Address.user_id is a _CompileOnAttr, # which then calls class_mapper(Address). But ! We're already # "in compilation", but class_mapper(Address) needs to # initialize regardless, or COA's assertion fails and things # generally go downhill from there. class_mapper(User) Base.metadata.create_all() sess = create_session() u1 = User(name='ed', addresses=[Address(email='abc'), Address(email='xyz'), Address(email='def')]) sess.add(u1) sess.flush() sess.expunge_all() eq_(sess.query(User).filter(User.name == 'ed').one(), User(name='ed', addresses=[Address(email='abc'), Address(email='def'), Address(email='xyz')])) def test_nice_dependency_error(self): class User(Base): __tablename__ = 'users' id = Column('id', Integer, primary_key=True) addresses = relationship('Address') class Address(Base): __tablename__ = 'addresses' id = Column(Integer, primary_key=True) foo = sa.orm.column_property(User.id == 5) # this used to raise an error when accessing User.id but that's # no longer the case since we got rid of _CompileOnAttr. assert_raises(sa.exc.ArgumentError, configure_mappers) def test_nice_dependency_error_works_with_hasattr(self): class User(Base): __tablename__ = 'users' id = Column('id', Integer, primary_key=True) addresses = relationship('Address') # hasattr() on a compile-loaded attribute try: hasattr(User.addresses, 'property') except exc.InvalidRequestError: assert sa.util.compat.py32 # the exception is preserved. Remains the # same through repeated calls. for i in range(3): assert_raises_message(sa.exc.InvalidRequestError, "^One or more mappers failed to initialize - " "can't proceed with initialization of other " "mappers. Original exception was: When initializing.*", configure_mappers) def test_custom_base(self): class MyBase(object): def foobar(self): return "foobar" Base = decl.declarative_base(cls=MyBase) assert hasattr(Base, 'metadata') assert Base().foobar() == "foobar" def test_uses_get_on_class_col_fk(self): # test [ticket:1492] class Master(Base): __tablename__ = 'master' id = Column(Integer, primary_key=True, test_needs_autoincrement=True) class Detail(Base): __tablename__ = 'detail' id = Column(Integer, primary_key=True, test_needs_autoincrement=True) master_id = Column(None, ForeignKey(Master.id)) master = relationship(Master) Base.metadata.create_all() configure_mappers() assert class_mapper(Detail).get_property('master' ).strategy.use_get m1 = Master() d1 = Detail(master=m1) sess = create_session() sess.add(d1) sess.flush() sess.expunge_all() d1 = sess.query(Detail).first() m1 = sess.query(Master).first() def go(): assert d1.master self.assert_sql_count(testing.db, go, 0) def test_index_doesnt_compile(self): class User(Base): __tablename__ = 'users' id = Column('id', Integer, primary_key=True) name = Column('name', String(50)) error = relationship("Address") i = Index('my_index', User.name) # compile fails due to the nonexistent Addresses relationship assert_raises(sa.exc.InvalidRequestError, configure_mappers) # index configured assert i in User.__table__.indexes assert User.__table__.c.id not in set(i.columns) assert User.__table__.c.name in set(i.columns) # tables create fine Base.metadata.create_all() def test_add_prop(self): class User(Base, fixtures.ComparableEntity): __tablename__ = 'users' id = Column('id', Integer, primary_key=True, test_needs_autoincrement=True) User.name = Column('name', String(50)) User.addresses = relationship('Address', backref='user') class Address(Base, fixtures.ComparableEntity): __tablename__ = 'addresses' id = Column(Integer, primary_key=True, test_needs_autoincrement=True) Address.email = Column(String(50), key='_email') Address.user_id = Column('user_id', Integer, ForeignKey('users.id'), key='_user_id') Base.metadata.create_all() eq_(Address.__table__.c['id'].name, 'id') eq_(Address.__table__.c['_email'].name, 'email') eq_(Address.__table__.c['_user_id'].name, 'user_id') u1 = User(name='u1', addresses=[Address(email='one'), Address(email='two')]) sess = create_session() sess.add(u1) sess.flush() sess.expunge_all() eq_(sess.query(User).all(), [User(name='u1', addresses=[Address(email='one'), Address(email='two')])]) a1 = sess.query(Address).filter(Address.email == 'two').one() eq_(a1, Address(email='two')) eq_(a1.user, User(name='u1')) def test_eager_order_by(self): class Address(Base, fixtures.ComparableEntity): __tablename__ = 'addresses' id = Column('id', Integer, primary_key=True, test_needs_autoincrement=True) email = Column('email', String(50)) user_id = Column('user_id', Integer, ForeignKey('users.id')) class User(Base, fixtures.ComparableEntity): __tablename__ = 'users' id = Column('id', Integer, primary_key=True, test_needs_autoincrement=True) name = Column('name', String(50)) addresses = relationship('Address', order_by=Address.email) Base.metadata.create_all() u1 = User(name='u1', addresses=[Address(email='two'), Address(email='one')]) sess = create_session() sess.add(u1) sess.flush() sess.expunge_all() eq_(sess.query(User).options(joinedload(User.addresses)).all(), [User(name='u1', addresses=[Address(email='one'), Address(email='two')])]) def test_order_by_multi(self): class Address(Base, fixtures.ComparableEntity): __tablename__ = 'addresses' id = Column('id', Integer, primary_key=True, test_needs_autoincrement=True) email = Column('email', String(50)) user_id = Column('user_id', Integer, ForeignKey('users.id')) class User(Base, fixtures.ComparableEntity): __tablename__ = 'users' id = Column('id', Integer, primary_key=True, test_needs_autoincrement=True) name = Column('name', String(50)) addresses = relationship('Address', order_by=(Address.email, Address.id)) Base.metadata.create_all() u1 = User(name='u1', addresses=[Address(email='two'), Address(email='one')]) sess = create_session() sess.add(u1) sess.flush() sess.expunge_all() u = sess.query(User).filter(User.name == 'u1').one() a = u.addresses def test_as_declarative(self): class User(fixtures.ComparableEntity): __tablename__ = 'users' id = Column('id', Integer, primary_key=True, test_needs_autoincrement=True) name = Column('name', String(50)) addresses = relationship('Address', backref='user') class Address(fixtures.ComparableEntity): __tablename__ = 'addresses' id = Column('id', Integer, primary_key=True, test_needs_autoincrement=True) email = Column('email', String(50)) user_id = Column('user_id', Integer, ForeignKey('users.id')) reg = {} decl.instrument_declarative(User, reg, Base.metadata) decl.instrument_declarative(Address, reg, Base.metadata) Base.metadata.create_all() u1 = User(name='u1', addresses=[Address(email='one'), Address(email='two')]) sess = create_session() sess.add(u1) sess.flush() sess.expunge_all() eq_(sess.query(User).all(), [User(name='u1', addresses=[Address(email='one'), Address(email='two')])]) def test_custom_mapper_attribute(self): def mymapper(cls, tbl, **kwargs): m = sa.orm.mapper(cls, tbl, **kwargs) m.CHECK = True return m base = decl.declarative_base() class Foo(base): __tablename__ = 'foo' __mapper_cls__ = mymapper id = Column(Integer, primary_key=True) eq_(Foo.__mapper__.CHECK, True) def test_custom_mapper_argument(self): def mymapper(cls, tbl, **kwargs): m = sa.orm.mapper(cls, tbl, **kwargs) m.CHECK = True return m base = decl.declarative_base(mapper=mymapper) class Foo(base): __tablename__ = 'foo' id = Column(Integer, primary_key=True) eq_(Foo.__mapper__.CHECK, True) @testing.emits_warning('Ignoring declarative-like tuple value of ' 'attribute id') def test_oops(self): def define(): class User(Base, fixtures.ComparableEntity): __tablename__ = 'users' id = Column('id', Integer, primary_key=True), name = Column('name', String(50)) assert False assert_raises_message(sa.exc.ArgumentError, 'Mapper Mapper|User|users could not ' 'assemble any primary key', define) def test_table_args_no_dict(self): class Foo1(Base): __tablename__ = 'foo' __table_args__ = ForeignKeyConstraint(['id'], ['foo.bar']), id = Column('id', Integer, primary_key=True) bar = Column('bar', Integer) assert Foo1.__table__.c.id.references(Foo1.__table__.c.bar) def test_table_args_type(self): def err(): class Foo1(Base): __tablename__ = 'foo' __table_args__ = ForeignKeyConstraint(['id'], ['foo.id' ]) id = Column('id', Integer, primary_key=True) assert_raises_message(sa.exc.ArgumentError, '__table_args__ value must be a tuple, ', err) def test_table_args_none(self): class Foo2(Base): __tablename__ = 'foo' __table_args__ = None id = Column('id', Integer, primary_key=True) assert Foo2.__table__.kwargs == {} def test_table_args_dict_format(self): class Foo2(Base): __tablename__ = 'foo' __table_args__ = {'mysql_engine': 'InnoDB'} id = Column('id', Integer, primary_key=True) assert Foo2.__table__.kwargs['mysql_engine'] == 'InnoDB' def test_table_args_tuple_format(self): class Foo2(Base): __tablename__ = 'foo' __table_args__ = {'mysql_engine': 'InnoDB'} id = Column('id', Integer, primary_key=True) class Bar(Base): __tablename__ = 'bar' __table_args__ = ForeignKeyConstraint(['id'], ['foo.id']), \ {'mysql_engine': 'InnoDB'} id = Column('id', Integer, primary_key=True) assert Bar.__table__.c.id.references(Foo2.__table__.c.id) assert Bar.__table__.kwargs['mysql_engine'] == 'InnoDB' def test_expression(self): class User(Base, fixtures.ComparableEntity): __tablename__ = 'users' id = Column('id', Integer, primary_key=True, test_needs_autoincrement=True) name = Column('name', String(50)) addresses = relationship('Address', backref='user') class Address(Base, fixtures.ComparableEntity): __tablename__ = 'addresses' id = Column('id', Integer, primary_key=True, test_needs_autoincrement=True) email = Column('email', String(50)) user_id = Column('user_id', Integer, ForeignKey('users.id')) User.address_count = \ sa.orm.column_property(sa.select([sa.func.count(Address.id)]). where(Address.user_id == User.id).as_scalar()) Base.metadata.create_all() u1 = User(name='u1', addresses=[Address(email='one'), Address(email='two')]) sess = create_session() sess.add(u1) sess.flush() sess.expunge_all() eq_(sess.query(User).all(), [User(name='u1', address_count=2, addresses=[Address(email='one'), Address(email='two')])]) def test_useless_declared_attr(self): class Address(Base, fixtures.ComparableEntity): __tablename__ = 'addresses' id = Column('id', Integer, primary_key=True, test_needs_autoincrement=True) email = Column('email', String(50)) user_id = Column('user_id', Integer, ForeignKey('users.id')) class User(Base, fixtures.ComparableEntity): __tablename__ = 'users' id = Column('id', Integer, primary_key=True, test_needs_autoincrement=True) name = Column('name', String(50)) addresses = relationship('Address', backref='user') @declared_attr def address_count(cls): # this doesn't really gain us anything. but if # one is used, lets have it function as expected... return sa.orm.column_property(sa.select([sa.func.count(Address.id)]). where(Address.user_id == cls.id)) Base.metadata.create_all() u1 = User(name='u1', addresses=[Address(email='one'), Address(email='two')]) sess = create_session() sess.add(u1) sess.flush() sess.expunge_all() eq_(sess.query(User).all(), [User(name='u1', address_count=2, addresses=[Address(email='one'), Address(email='two')])]) def test_declared_on_base_class(self): class MyBase(Base): __tablename__ = 'foo' id = Column(Integer, primary_key=True) @declared_attr def somecol(cls): return Column(Integer) class MyClass(MyBase): __tablename__ = 'bar' id = Column(Integer, ForeignKey('foo.id'), primary_key=True) # previously, the 'somecol' declared_attr would be ignored # by the mapping and would remain unused. now we take # it as part of MyBase. assert 'somecol' in MyBase.__table__.c assert 'somecol' not in MyClass.__table__.c def test_column(self): class User(Base, fixtures.ComparableEntity): __tablename__ = 'users' id = Column('id', Integer, primary_key=True, test_needs_autoincrement=True) name = Column('name', String(50)) User.a = Column('a', String(10)) User.b = Column(String(10)) Base.metadata.create_all() u1 = User(name='u1', a='a', b='b') eq_(u1.a, 'a') eq_(User.a.get_history(u1), (['a'], (), ())) sess = create_session() sess.add(u1) sess.flush() sess.expunge_all() eq_(sess.query(User).all(), [User(name='u1', a='a', b='b')]) def test_column_properties(self): class Address(Base, fixtures.ComparableEntity): __tablename__ = 'addresses' id = Column(Integer, primary_key=True, test_needs_autoincrement=True) email = Column(String(50)) user_id = Column(Integer, ForeignKey('users.id')) class User(Base, fixtures.ComparableEntity): __tablename__ = 'users' id = Column('id', Integer, primary_key=True, test_needs_autoincrement=True) name = Column('name', String(50)) adr_count = \ sa.orm.column_property( sa.select([sa.func.count(Address.id)], Address.user_id == id).as_scalar()) addresses = relationship(Address) Base.metadata.create_all() u1 = User(name='u1', addresses=[Address(email='one'), Address(email='two')]) sess = create_session() sess.add(u1) sess.flush() sess.expunge_all() eq_(sess.query(User).all(), [User(name='u1', adr_count=2, addresses=[Address(email='one'), Address(email='two')])]) def test_column_properties_2(self): class Address(Base, fixtures.ComparableEntity): __tablename__ = 'addresses' id = Column(Integer, primary_key=True) email = Column(String(50)) user_id = Column(Integer, ForeignKey('users.id')) class User(Base, fixtures.ComparableEntity): __tablename__ = 'users' id = Column('id', Integer, primary_key=True) name = Column('name', String(50)) # this is not "valid" but we want to test that Address.id # doesnt get stuck into user's table adr_count = Address.id eq_(set(User.__table__.c.keys()), set(['id', 'name'])) eq_(set(Address.__table__.c.keys()), set(['id', 'email', 'user_id'])) def test_deferred(self): class User(Base, fixtures.ComparableEntity): __tablename__ = 'users' id = Column(Integer, primary_key=True, test_needs_autoincrement=True) name = sa.orm.deferred(Column(String(50))) Base.metadata.create_all() sess = create_session() sess.add(User(name='u1')) sess.flush() sess.expunge_all() u1 = sess.query(User).filter(User.name == 'u1').one() assert 'name' not in u1.__dict__ def go(): eq_(u1.name, 'u1') self.assert_sql_count(testing.db, go, 1) def test_composite_inline(self): class AddressComposite(fixtures.ComparableEntity): def __init__(self, street, state): self.street = street self.state = state def __composite_values__(self): return [self.street, self.state] class User(Base, fixtures.ComparableEntity): __tablename__ = 'user' id = Column(Integer, primary_key=True, test_needs_autoincrement=True) address = composite(AddressComposite, Column('street', String(50)), Column('state', String(2)), ) Base.metadata.create_all() sess = Session() sess.add(User( address=AddressComposite('123 anywhere street', 'MD') )) sess.commit() eq_( sess.query(User).all(), [User(address=AddressComposite('123 anywhere street', 'MD'))] ) def test_composite_separate(self): class AddressComposite(fixtures.ComparableEntity): def __init__(self, street, state): self.street = street self.state = state def __composite_values__(self): return [self.street, self.state] class User(Base, fixtures.ComparableEntity): __tablename__ = 'user' id = Column(Integer, primary_key=True, test_needs_autoincrement=True) street = Column(String(50)) state = Column(String(2)) address = composite(AddressComposite, street, state) Base.metadata.create_all() sess = Session() sess.add(User( address=AddressComposite('123 anywhere street', 'MD') )) sess.commit() eq_( sess.query(User).all(), [User(address=AddressComposite('123 anywhere street', 'MD'))] ) def test_mapping_to_join(self): users = Table('users', Base.metadata, Column('id', Integer, primary_key=True) ) addresses = Table('addresses', Base.metadata, Column('id', Integer, primary_key=True), Column('user_id', Integer, ForeignKey('users.id')) ) usersaddresses = sa.join(users, addresses, users.c.id == addresses.c.user_id) class User(Base): __table__ = usersaddresses __table_args__ = {'primary_key':[users.c.id]} # need to use column_property for now user_id = column_property(users.c.id, addresses.c.user_id) address_id = addresses.c.id assert User.__mapper__.get_property('user_id').columns[0] \ is users.c.id assert User.__mapper__.get_property('user_id').columns[1] \ is addresses.c.user_id def test_synonym_inline(self): class User(Base, fixtures.ComparableEntity): __tablename__ = 'users' id = Column('id', Integer, primary_key=True, test_needs_autoincrement=True) _name = Column('name', String(50)) def _set_name(self, name): self._name = 'SOMENAME ' + name def _get_name(self): return self._name name = sa.orm.synonym('_name', descriptor=property(_get_name, _set_name)) Base.metadata.create_all() sess = create_session() u1 = User(name='someuser') eq_(u1.name, 'SOMENAME someuser') sess.add(u1) sess.flush() eq_(sess.query(User).filter(User.name == 'SOMENAME someuser' ).one(), u1) def test_synonym_no_descriptor(self): from sqlalchemy.orm.properties import ColumnProperty class CustomCompare(ColumnProperty.Comparator): __hash__ = None def __eq__(self, other): return self.__clause_element__() == other + ' FOO' class User(Base, fixtures.ComparableEntity): __tablename__ = 'users' id = Column('id', Integer, primary_key=True, test_needs_autoincrement=True) _name = Column('name', String(50)) name = sa.orm.synonym('_name', comparator_factory=CustomCompare) Base.metadata.create_all() sess = create_session() u1 = User(name='someuser FOO') sess.add(u1) sess.flush() eq_(sess.query(User).filter(User.name == 'someuser').one(), u1) def test_synonym_added(self): class User(Base, fixtures.ComparableEntity): __tablename__ = 'users' id = Column('id', Integer, primary_key=True, test_needs_autoincrement=True) _name = Column('name', String(50)) def _set_name(self, name): self._name = 'SOMENAME ' + name def _get_name(self): return self._name name = property(_get_name, _set_name) User.name = sa.orm.synonym('_name', descriptor=User.name) Base.metadata.create_all() sess = create_session() u1 = User(name='someuser') eq_(u1.name, 'SOMENAME someuser') sess.add(u1) sess.flush() eq_(sess.query(User).filter(User.name == 'SOMENAME someuser' ).one(), u1) def test_reentrant_compile_via_foreignkey(self): class User(Base, fixtures.ComparableEntity): __tablename__ = 'users' id = Column('id', Integer, primary_key=True, test_needs_autoincrement=True) name = Column('name', String(50)) addresses = relationship('Address', backref='user') class Address(Base, fixtures.ComparableEntity): __tablename__ = 'addresses' id = Column('id', Integer, primary_key=True, test_needs_autoincrement=True) email = Column('email', String(50)) user_id = Column('user_id', Integer, ForeignKey(User.id)) # previous versions would force a re-entrant mapper compile via # the User.id inside the ForeignKey but this is no longer the # case sa.orm.configure_mappers() eq_(str(list(Address.user_id.property.columns[0].foreign_keys)[0]), "ForeignKey('users.id')") Base.metadata.create_all() u1 = User(name='u1', addresses=[Address(email='one'), Address(email='two')]) sess = create_session() sess.add(u1) sess.flush() sess.expunge_all() eq_(sess.query(User).all(), [User(name='u1', addresses=[Address(email='one'), Address(email='two')])]) def test_relationship_reference(self): class Address(Base, fixtures.ComparableEntity): __tablename__ = 'addresses' id = Column('id', Integer, primary_key=True, test_needs_autoincrement=True) email = Column('email', String(50)) user_id = Column('user_id', Integer, ForeignKey('users.id')) class User(Base, fixtures.ComparableEntity): __tablename__ = 'users' id = Column('id', Integer, primary_key=True, test_needs_autoincrement=True) name = Column('name', String(50)) addresses = relationship('Address', backref='user', primaryjoin=id == Address.user_id) User.address_count = \ sa.orm.column_property(sa.select([sa.func.count(Address.id)]). where(Address.user_id == User.id).as_scalar()) Base.metadata.create_all() u1 = User(name='u1', addresses=[Address(email='one'), Address(email='two')]) sess = create_session() sess.add(u1) sess.flush() sess.expunge_all() eq_(sess.query(User).all(), [User(name='u1', address_count=2, addresses=[Address(email='one'), Address(email='two')])]) def test_pk_with_fk_init(self): class Bar(Base): __tablename__ = 'bar' id = sa.Column(sa.Integer, sa.ForeignKey('foo.id'), primary_key=True) ex = sa.Column(sa.Integer, primary_key=True) class Foo(Base): __tablename__ = 'foo' id = sa.Column(sa.Integer, primary_key=True) bars = sa.orm.relationship(Bar) assert Bar.__mapper__.primary_key[0] is Bar.__table__.c.id assert Bar.__mapper__.primary_key[1] is Bar.__table__.c.ex def test_with_explicit_autoloaded(self): meta = MetaData(testing.db) t1 = Table('t1', meta, Column('id', String(50), primary_key=True, test_needs_autoincrement=True), Column('data', String(50))) meta.create_all() try: class MyObj(Base): __table__ = Table('t1', Base.metadata, autoload=True) sess = create_session() m = MyObj(id='someid', data='somedata') sess.add(m) sess.flush() eq_(t1.select().execute().fetchall(), [('someid', 'somedata' )]) finally: meta.drop_all() def test_synonym_for(self): class User(Base, fixtures.ComparableEntity): __tablename__ = 'users' id = Column('id', Integer, primary_key=True, test_needs_autoincrement=True) name = Column('name', String(50)) @decl.synonym_for('name') @property def namesyn(self): return self.name Base.metadata.create_all() sess = create_session() u1 = User(name='someuser') eq_(u1.name, 'someuser') eq_(u1.namesyn, 'someuser') sess.add(u1) sess.flush() rt = sess.query(User).filter(User.namesyn == 'someuser').one() eq_(rt, u1) def test_comparable_using(self): class NameComparator(sa.orm.PropComparator): @property def upperself(self): cls = self.prop.parent.class_ col = getattr(cls, 'name') return sa.func.upper(col) def operate( self, op, other, **kw ): return op(self.upperself, other, **kw) class User(Base, fixtures.ComparableEntity): __tablename__ = 'users' id = Column('id', Integer, primary_key=True, test_needs_autoincrement=True) name = Column('name', String(50)) @decl.comparable_using(NameComparator) @property def uc_name(self): return self.name is not None and self.name.upper() \ or None Base.metadata.create_all() sess = create_session() u1 = User(name='someuser') eq_(u1.name, 'someuser', u1.name) eq_(u1.uc_name, 'SOMEUSER', u1.uc_name) sess.add(u1) sess.flush() sess.expunge_all() rt = sess.query(User).filter(User.uc_name == 'SOMEUSER').one() eq_(rt, u1) sess.expunge_all() rt = sess.query(User).filter(User.uc_name.startswith('SOMEUSE' )).one() eq_(rt, u1) def test_duplicate_classes_in_base(self): class Test(Base): __tablename__ = 'a' id = Column(Integer, primary_key=True) assert_raises_message( sa.exc.SAWarning, "This declarative base already contains a class with ", lambda: type(Base)("Test", (Base,), dict( __tablename__='b', id=Column(Integer, primary_key=True) )) ) def _produce_test(inline, stringbased): class ExplicitJoinTest(fixtures.MappedTest): @classmethod def define_tables(cls, metadata): global User, Address Base = decl.declarative_base(metadata=metadata) class User(Base, fixtures.ComparableEntity): __tablename__ = 'users' id = Column(Integer, primary_key=True, test_needs_autoincrement=True) name = Column(String(50)) class Address(Base, fixtures.ComparableEntity): __tablename__ = 'addresses' id = Column(Integer, primary_key=True, test_needs_autoincrement=True) email = Column(String(50)) user_id = Column(Integer, ForeignKey('users.id')) if inline: if stringbased: user = relationship('User', primaryjoin='User.id==Address.user_id', backref='addresses') else: user = relationship(User, primaryjoin=User.id == user_id, backref='addresses') if not inline: configure_mappers() if stringbased: Address.user = relationship('User', primaryjoin='User.id==Address.user_id', backref='addresses') else: Address.user = relationship(User, primaryjoin=User.id == Address.user_id, backref='addresses') @classmethod def insert_data(cls): params = [dict(zip(('id', 'name'), column_values)) for column_values in [(7, 'jack'), (8, 'ed'), (9, 'fred'), (10, 'chuck')]] User.__table__.insert().execute(params) Address.__table__.insert().execute([dict(zip(('id', 'user_id', 'email'), column_values)) for column_values in [(1, 7, 'jack@bean.com'), (2, 8, 'ed@wood.com'), (3, 8, 'ed@bettyboop.com'), (4, 8, 'ed@lala.com'), (5, 9, 'fred@fred.com')]]) def test_aliased_join(self): # this query will screw up if the aliasing enabled in # query.join() gets applied to the right half of the join # condition inside the any(). the join condition inside of # any() comes from the "primaryjoin" of the relationship, # and should not be annotated with _orm_adapt. # PropertyLoader.Comparator will annotate the left side with # _orm_adapt, though. sess = create_session() eq_(sess.query(User).join(User.addresses, aliased=True).filter(Address.email == 'ed@wood.com' ).filter(User.addresses.any(Address.email == 'jack@bean.com')).all(), []) ExplicitJoinTest.__name__ = 'ExplicitJoinTest%s%s' % (inline and 'Inline' or 'Separate', stringbased and 'String' or 'Literal') return ExplicitJoinTest for inline in True, False: for stringbased in True, False: testclass = _produce_test(inline, stringbased) exec '%s = testclass' % testclass.__name__ del testclass SQLAlchemy-0.8.4/test/ext/declarative/test_clsregistry.py0000644000076500000240000001704212251150015024252 0ustar classicstaff00000000000000from sqlalchemy.testing import fixtures from sqlalchemy.testing.util import gc_collect from sqlalchemy.testing import assert_raises_message, is_, eq_ from sqlalchemy import exc, MetaData from sqlalchemy.ext.declarative import clsregistry import weakref class MockClass(object): def __init__(self, base, name): self._decl_class_registry = base tokens = name.split(".") self.__module__ = ".".join(tokens[0:-1]) self.name = self.__name__ = tokens[-1] self.metadata = MetaData() class MockProp(object): parent = "some_parent" class ClsRegistryTest(fixtures.TestBase): __requires__ = 'predictable_gc', def test_same_module_same_name(self): base = weakref.WeakValueDictionary() f1 = MockClass(base, "foo.bar.Foo") f2 = MockClass(base, "foo.bar.Foo") clsregistry.add_class("Foo", f1) gc_collect() assert_raises_message( exc.SAWarning, "This declarative base already contains a class with the " "same class name and module name as foo.bar.Foo, and " "will be replaced in the string-lookup table.", clsregistry.add_class, "Foo", f2 ) def test_resolve(self): base = weakref.WeakValueDictionary() f1 = MockClass(base, "foo.bar.Foo") f2 = MockClass(base, "foo.alt.Foo") clsregistry.add_class("Foo", f1) clsregistry.add_class("Foo", f2) resolver = clsregistry._resolver(f1, MockProp()) gc_collect() is_(resolver("foo.bar.Foo")(), f1) is_(resolver("foo.alt.Foo")(), f2) def test_fragment_resolve(self): base = weakref.WeakValueDictionary() f1 = MockClass(base, "foo.bar.Foo") f2 = MockClass(base, "foo.alt.Foo") f3 = MockClass(base, "bat.alt.Hoho") clsregistry.add_class("Foo", f1) clsregistry.add_class("Foo", f2) clsregistry.add_class("HoHo", f3) resolver = clsregistry._resolver(f1, MockProp()) gc_collect() is_(resolver("bar.Foo")(), f1) is_(resolver("alt.Foo")(), f2) def test_fragment_ambiguous(self): base = weakref.WeakValueDictionary() f1 = MockClass(base, "foo.bar.Foo") f2 = MockClass(base, "foo.alt.Foo") f3 = MockClass(base, "bat.alt.Foo") clsregistry.add_class("Foo", f1) clsregistry.add_class("Foo", f2) clsregistry.add_class("Foo", f3) resolver = clsregistry._resolver(f1, MockProp()) gc_collect() assert_raises_message( exc.InvalidRequestError, 'Multiple classes found for path "alt.Foo" in the registry ' 'of this declarative base. Please use a fully ' 'module-qualified path.', resolver("alt.Foo") ) def test_resolve_dupe_by_name(self): base = weakref.WeakValueDictionary() f1 = MockClass(base, "foo.bar.Foo") f2 = MockClass(base, "foo.alt.Foo") clsregistry.add_class("Foo", f1) clsregistry.add_class("Foo", f2) gc_collect() resolver = clsregistry._resolver(f1, MockProp()) resolver = resolver("Foo") assert_raises_message( exc.InvalidRequestError, 'Multiple classes found for path "Foo" in the ' 'registry of this declarative base. Please use a ' 'fully module-qualified path.', resolver ) def test_dupe_classes_back_to_one(self): base = weakref.WeakValueDictionary() f1 = MockClass(base, "foo.bar.Foo") f2 = MockClass(base, "foo.alt.Foo") clsregistry.add_class("Foo", f1) clsregistry.add_class("Foo", f2) del f2 gc_collect() # registry restores itself to just the one class resolver = clsregistry._resolver(f1, MockProp()) resolver = resolver("Foo") is_(resolver(), f1) def test_dupe_classes_cleanout(self): # force this to maintain isolation between tests clsregistry._registries.clear() base = weakref.WeakValueDictionary() for i in xrange(3): f1 = MockClass(base, "foo.bar.Foo") f2 = MockClass(base, "foo.alt.Foo") clsregistry.add_class("Foo", f1) clsregistry.add_class("Foo", f2) eq_(len(clsregistry._registries), 11) del f1 del f2 gc_collect() eq_(len(clsregistry._registries), 1) def test_dupe_classes_name_race(self): """test the race condition that the class was garbage " "collected while being resolved from a dupe class.""" base = weakref.WeakValueDictionary() f1 = MockClass(base, "foo.bar.Foo") f2 = MockClass(base, "foo.alt.Foo") clsregistry.add_class("Foo", f1) clsregistry.add_class("Foo", f2) dupe_reg = base['Foo'] dupe_reg.contents = [lambda: None] resolver = clsregistry._resolver(f1, MockProp()) resolver = resolver("Foo") assert_raises_message( exc.InvalidRequestError, "When initializing mapper some_parent, expression " "'Foo' failed to locate a name \('Foo'\).", resolver ) def test_module_reg_cleanout_race(self): """test the race condition that a class was gc'ed as we tried to look it up by module name.""" base = weakref.WeakValueDictionary() f1 = MockClass(base, "foo.bar.Foo") clsregistry.add_class("Foo", f1) reg = base['_sa_module_registry'] mod_entry = reg['foo']['bar'] resolver = clsregistry._resolver(f1, MockProp()) resolver = resolver("foo") del mod_entry.contents["Foo"] assert_raises_message( AttributeError, "Module 'bar' has no mapped classes registered " "under the name 'Foo'", lambda: resolver().bar.Foo ) def test_module_reg_no_class(self): base = weakref.WeakValueDictionary() f1 = MockClass(base, "foo.bar.Foo") clsregistry.add_class("Foo", f1) reg = base['_sa_module_registry'] mod_entry = reg['foo']['bar'] resolver = clsregistry._resolver(f1, MockProp()) resolver = resolver("foo") assert_raises_message( AttributeError, "Module 'bar' has no mapped classes registered " "under the name 'Bat'", lambda: resolver().bar.Bat ) def test_module_reg_cleanout_two_sub(self): base = weakref.WeakValueDictionary() f1 = MockClass(base, "foo.bar.Foo") clsregistry.add_class("Foo", f1) reg = base['_sa_module_registry'] f2 = MockClass(base, "foo.alt.Bar") clsregistry.add_class("Bar", f2) assert reg['foo']['bar'] del f1 gc_collect() assert 'bar' not in \ reg['foo'] assert 'alt' in reg['foo'] del f2 gc_collect() assert 'foo' not in reg.contents def test_module_reg_cleanout_sub_to_base(self): base = weakref.WeakValueDictionary() f3 = MockClass(base, "bat.bar.Hoho") clsregistry.add_class("Hoho", f3) reg = base['_sa_module_registry'] assert reg['bat']['bar'] del f3 gc_collect() assert 'bat' not in reg def test_module_reg_cleanout_cls_to_base(self): base = weakref.WeakValueDictionary() f4 = MockClass(base, "single.Blat") clsregistry.add_class("Blat", f4) reg = base['_sa_module_registry'] assert reg['single'] del f4 gc_collect() assert 'single' not in reg SQLAlchemy-0.8.4/test/ext/declarative/test_inheritance.py0000644000076500000240000012644012251150015024174 0ustar classicstaff00000000000000 from sqlalchemy.testing import eq_, assert_raises, \ assert_raises_message, is_ from sqlalchemy.ext import declarative as decl import sqlalchemy as sa from sqlalchemy import testing from sqlalchemy import Integer, String, ForeignKey from sqlalchemy.testing.schema import Table, Column from sqlalchemy.orm import relationship, create_session, class_mapper, \ configure_mappers, clear_mappers, \ polymorphic_union, deferred, Session from sqlalchemy.ext.declarative import declared_attr, AbstractConcreteBase, \ ConcreteBase, has_inherited_table from sqlalchemy.testing import fixtures Base = None class DeclarativeTestBase(fixtures.TestBase, testing.AssertsExecutionResults): def setup(self): global Base Base = decl.declarative_base(testing.db) def teardown(self): Session.close_all() clear_mappers() Base.metadata.drop_all() class DeclarativeInheritanceTest(DeclarativeTestBase): def test_we_must_copy_mapper_args(self): class Person(Base): __tablename__ = 'people' id = Column(Integer, primary_key=True) discriminator = Column('type', String(50)) __mapper_args__ = {'polymorphic_on': discriminator, 'polymorphic_identity': 'person'} class Engineer(Person): primary_language = Column(String(50)) assert 'inherits' not in Person.__mapper_args__ assert class_mapper(Engineer).polymorphic_identity is None assert class_mapper(Engineer).polymorphic_on is Person.__table__.c.type def test_we_must_only_copy_column_mapper_args(self): class Person(Base): __tablename__ = 'people' id = Column(Integer, primary_key=True) a = Column(Integer) b = Column(Integer) c = Column(Integer) d = Column(Integer) discriminator = Column('type', String(50)) __mapper_args__ = {'polymorphic_on': discriminator, 'polymorphic_identity': 'person', 'version_id_col': 'a', 'column_prefix': 'bar', 'include_properties': ['id', 'a', 'b'], } assert class_mapper(Person).version_id_col == 'a' assert class_mapper(Person).include_properties == set(['id', 'a', 'b']) def test_custom_join_condition(self): class Foo(Base): __tablename__ = 'foo' id = Column('id', Integer, primary_key=True) class Bar(Foo): __tablename__ = 'bar' id = Column('id', Integer, primary_key=True) foo_id = Column('foo_id', Integer) __mapper_args__ = {'inherit_condition': foo_id == Foo.id} # compile succeeds because inherit_condition is honored configure_mappers() def test_joined(self): class Company(Base, fixtures.ComparableEntity): __tablename__ = 'companies' id = Column('id', Integer, primary_key=True, test_needs_autoincrement=True) name = Column('name', String(50)) employees = relationship('Person') class Person(Base, fixtures.ComparableEntity): __tablename__ = 'people' id = Column('id', Integer, primary_key=True, test_needs_autoincrement=True) company_id = Column('company_id', Integer, ForeignKey('companies.id')) name = Column('name', String(50)) discriminator = Column('type', String(50)) __mapper_args__ = {'polymorphic_on': discriminator} class Engineer(Person): __tablename__ = 'engineers' __mapper_args__ = {'polymorphic_identity': 'engineer'} id = Column('id', Integer, ForeignKey('people.id'), primary_key=True) primary_language = Column('primary_language', String(50)) class Manager(Person): __tablename__ = 'managers' __mapper_args__ = {'polymorphic_identity': 'manager'} id = Column('id', Integer, ForeignKey('people.id'), primary_key=True) golf_swing = Column('golf_swing', String(50)) Base.metadata.create_all() sess = create_session() c1 = Company(name='MegaCorp, Inc.', employees=[Engineer(name='dilbert', primary_language='java'), Engineer(name='wally', primary_language='c++'), Manager(name='dogbert', golf_swing='fore!')]) c2 = Company(name='Elbonia, Inc.', employees=[Engineer(name='vlad', primary_language='cobol')]) sess.add(c1) sess.add(c2) sess.flush() sess.expunge_all() eq_(sess.query(Company).filter(Company.employees.of_type(Engineer). any(Engineer.primary_language == 'cobol')).first(), c2) # ensure that the Manager mapper was compiled with the Manager id # column as higher priority. this ensures that "Manager.id" # is appropriately treated as the "id" column in the "manager" # table (reversed from 0.6's behavior.) eq_( Manager.id.property.columns, [Manager.__table__.c.id, Person.__table__.c.id] ) # assert that the "id" column is available without a second # load. as of 0.7, the ColumnProperty tests all columns # in it's list to see which is present in the row. sess.expunge_all() def go(): assert sess.query(Manager).filter(Manager.name == 'dogbert' ).one().id self.assert_sql_count(testing.db, go, 1) sess.expunge_all() def go(): assert sess.query(Person).filter(Manager.name == 'dogbert' ).one().id self.assert_sql_count(testing.db, go, 1) def test_add_subcol_after_the_fact(self): class Person(Base, fixtures.ComparableEntity): __tablename__ = 'people' id = Column('id', Integer, primary_key=True, test_needs_autoincrement=True) name = Column('name', String(50)) discriminator = Column('type', String(50)) __mapper_args__ = {'polymorphic_on': discriminator} class Engineer(Person): __tablename__ = 'engineers' __mapper_args__ = {'polymorphic_identity': 'engineer'} id = Column('id', Integer, ForeignKey('people.id'), primary_key=True) Engineer.primary_language = Column('primary_language', String(50)) Base.metadata.create_all() sess = create_session() e1 = Engineer(primary_language='java', name='dilbert') sess.add(e1) sess.flush() sess.expunge_all() eq_(sess.query(Person).first(), Engineer(primary_language='java', name='dilbert')) def test_add_parentcol_after_the_fact(self): class Person(Base, fixtures.ComparableEntity): __tablename__ = 'people' id = Column('id', Integer, primary_key=True, test_needs_autoincrement=True) discriminator = Column('type', String(50)) __mapper_args__ = {'polymorphic_on': discriminator} class Engineer(Person): __tablename__ = 'engineers' __mapper_args__ = {'polymorphic_identity': 'engineer'} primary_language = Column(String(50)) id = Column('id', Integer, ForeignKey('people.id'), primary_key=True) Person.name = Column('name', String(50)) Base.metadata.create_all() sess = create_session() e1 = Engineer(primary_language='java', name='dilbert') sess.add(e1) sess.flush() sess.expunge_all() eq_(sess.query(Person).first(), Engineer(primary_language='java', name='dilbert')) def test_add_sub_parentcol_after_the_fact(self): class Person(Base, fixtures.ComparableEntity): __tablename__ = 'people' id = Column('id', Integer, primary_key=True, test_needs_autoincrement=True) discriminator = Column('type', String(50)) __mapper_args__ = {'polymorphic_on': discriminator} class Engineer(Person): __tablename__ = 'engineers' __mapper_args__ = {'polymorphic_identity': 'engineer'} primary_language = Column(String(50)) id = Column('id', Integer, ForeignKey('people.id'), primary_key=True) class Admin(Engineer): __tablename__ = 'admins' __mapper_args__ = {'polymorphic_identity': 'admin'} workstation = Column(String(50)) id = Column('id', Integer, ForeignKey('engineers.id'), primary_key=True) Person.name = Column('name', String(50)) Base.metadata.create_all() sess = create_session() e1 = Admin(primary_language='java', name='dilbert', workstation='foo') sess.add(e1) sess.flush() sess.expunge_all() eq_(sess.query(Person).first(), Admin(primary_language='java', name='dilbert', workstation='foo')) def test_subclass_mixin(self): class Person(Base, fixtures.ComparableEntity): __tablename__ = 'people' id = Column('id', Integer, primary_key=True) name = Column('name', String(50)) discriminator = Column('type', String(50)) __mapper_args__ = {'polymorphic_on': discriminator} class MyMixin(object): pass class Engineer(MyMixin, Person): __tablename__ = 'engineers' __mapper_args__ = {'polymorphic_identity': 'engineer'} id = Column('id', Integer, ForeignKey('people.id'), primary_key=True) primary_language = Column('primary_language', String(50)) assert class_mapper(Engineer).inherits is class_mapper(Person) def test_with_undefined_foreignkey(self): class Parent(Base): __tablename__ = 'parent' id = Column('id', Integer, primary_key=True) tp = Column('type', String(50)) __mapper_args__ = dict(polymorphic_on=tp) class Child1(Parent): __tablename__ = 'child1' id = Column('id', Integer, ForeignKey('parent.id'), primary_key=True) related_child2 = Column('c2', Integer, ForeignKey('child2.id')) __mapper_args__ = dict(polymorphic_identity='child1') # no exception is raised by the ForeignKey to "child2" even # though child2 doesn't exist yet class Child2(Parent): __tablename__ = 'child2' id = Column('id', Integer, ForeignKey('parent.id'), primary_key=True) related_child1 = Column('c1', Integer) __mapper_args__ = dict(polymorphic_identity='child2') sa.orm.configure_mappers() # no exceptions here def test_foreign_keys_with_col(self): """Test that foreign keys that reference a literal 'id' subclass 'id' attribute behave intuitively. See [ticket:1892]. """ class Booking(Base): __tablename__ = 'booking' id = Column(Integer, primary_key=True) class PlanBooking(Booking): __tablename__ = 'plan_booking' id = Column(Integer, ForeignKey(Booking.id), primary_key=True) # referencing PlanBooking.id gives us the column # on plan_booking, not booking class FeatureBooking(Booking): __tablename__ = 'feature_booking' id = Column(Integer, ForeignKey(Booking.id), primary_key=True) plan_booking_id = Column(Integer, ForeignKey(PlanBooking.id)) plan_booking = relationship(PlanBooking, backref='feature_bookings') assert FeatureBooking.__table__.c.plan_booking_id.\ references(PlanBooking.__table__.c.id) assert FeatureBooking.__table__.c.id.\ references(Booking.__table__.c.id) def test_single_colsonbase(self): """test single inheritance where all the columns are on the base class.""" class Company(Base, fixtures.ComparableEntity): __tablename__ = 'companies' id = Column('id', Integer, primary_key=True, test_needs_autoincrement=True) name = Column('name', String(50)) employees = relationship('Person') class Person(Base, fixtures.ComparableEntity): __tablename__ = 'people' id = Column('id', Integer, primary_key=True, test_needs_autoincrement=True) company_id = Column('company_id', Integer, ForeignKey('companies.id')) name = Column('name', String(50)) discriminator = Column('type', String(50)) primary_language = Column('primary_language', String(50)) golf_swing = Column('golf_swing', String(50)) __mapper_args__ = {'polymorphic_on': discriminator} class Engineer(Person): __mapper_args__ = {'polymorphic_identity': 'engineer'} class Manager(Person): __mapper_args__ = {'polymorphic_identity': 'manager'} Base.metadata.create_all() sess = create_session() c1 = Company(name='MegaCorp, Inc.', employees=[Engineer(name='dilbert', primary_language='java'), Engineer(name='wally', primary_language='c++'), Manager(name='dogbert', golf_swing='fore!')]) c2 = Company(name='Elbonia, Inc.', employees=[Engineer(name='vlad', primary_language='cobol')]) sess.add(c1) sess.add(c2) sess.flush() sess.expunge_all() eq_(sess.query(Person).filter(Engineer.primary_language == 'cobol').first(), Engineer(name='vlad')) eq_(sess.query(Company).filter(Company.employees.of_type(Engineer). any(Engineer.primary_language == 'cobol')).first(), c2) def test_single_colsonsub(self): """test single inheritance where the columns are local to their class. this is a newer usage. """ class Company(Base, fixtures.ComparableEntity): __tablename__ = 'companies' id = Column('id', Integer, primary_key=True, test_needs_autoincrement=True) name = Column('name', String(50)) employees = relationship('Person') class Person(Base, fixtures.ComparableEntity): __tablename__ = 'people' id = Column(Integer, primary_key=True, test_needs_autoincrement=True) company_id = Column(Integer, ForeignKey('companies.id')) name = Column(String(50)) discriminator = Column('type', String(50)) __mapper_args__ = {'polymorphic_on': discriminator} class Engineer(Person): __mapper_args__ = {'polymorphic_identity': 'engineer'} primary_language = Column(String(50)) class Manager(Person): __mapper_args__ = {'polymorphic_identity': 'manager'} golf_swing = Column(String(50)) # we have here a situation that is somewhat unique. the Person # class is mapped to the "people" table, but it was mapped when # the table did not include the "primary_language" or # "golf_swing" columns. declarative will also manipulate the # exclude_properties collection so that sibling classes don't # cross-pollinate. assert Person.__table__.c.company_id is not None assert Person.__table__.c.golf_swing is not None assert Person.__table__.c.primary_language is not None assert Engineer.primary_language is not None assert Manager.golf_swing is not None assert not hasattr(Person, 'primary_language') assert not hasattr(Person, 'golf_swing') assert not hasattr(Engineer, 'golf_swing') assert not hasattr(Manager, 'primary_language') Base.metadata.create_all() sess = create_session() e1 = Engineer(name='dilbert', primary_language='java') e2 = Engineer(name='wally', primary_language='c++') m1 = Manager(name='dogbert', golf_swing='fore!') c1 = Company(name='MegaCorp, Inc.', employees=[e1, e2, m1]) e3 = Engineer(name='vlad', primary_language='cobol') c2 = Company(name='Elbonia, Inc.', employees=[e3]) sess.add(c1) sess.add(c2) sess.flush() sess.expunge_all() eq_(sess.query(Person).filter(Engineer.primary_language == 'cobol').first(), Engineer(name='vlad')) eq_(sess.query(Company).filter(Company.employees.of_type(Engineer). any(Engineer.primary_language == 'cobol')).first(), c2) eq_(sess.query(Engineer).filter_by(primary_language='cobol' ).one(), Engineer(name='vlad', primary_language='cobol')) @testing.skip_if(lambda: testing.against('oracle'), "Test has an empty insert in it at the moment") def test_columns_single_inheritance_conflict_resolution(self): """Test that a declared_attr can return the existing column and it will be ignored. this allows conditional columns to be added. See [ticket:2472]. """ class Person(Base): __tablename__ = 'person' id = Column(Integer, primary_key=True) class Engineer(Person): """single table inheritance""" @declared_attr def target_id(cls): return cls.__table__.c.get('target_id', Column(Integer, ForeignKey('other.id')) ) @declared_attr def target(cls): return relationship("Other") class Manager(Person): """single table inheritance""" @declared_attr def target_id(cls): return cls.__table__.c.get('target_id', Column(Integer, ForeignKey('other.id')) ) @declared_attr def target(cls): return relationship("Other") class Other(Base): __tablename__ = 'other' id = Column(Integer, primary_key=True) is_( Engineer.target_id.property.columns[0], Person.__table__.c.target_id ) is_( Manager.target_id.property.columns[0], Person.__table__.c.target_id ) # do a brief round trip on this Base.metadata.create_all() session = Session() o1, o2 = Other(), Other() session.add_all([ Engineer(target=o1), Manager(target=o2), Manager(target=o1) ]) session.commit() eq_(session.query(Engineer).first().target, o1) def test_joined_from_single(self): class Company(Base, fixtures.ComparableEntity): __tablename__ = 'companies' id = Column('id', Integer, primary_key=True, test_needs_autoincrement=True) name = Column('name', String(50)) employees = relationship('Person') class Person(Base, fixtures.ComparableEntity): __tablename__ = 'people' id = Column(Integer, primary_key=True, test_needs_autoincrement=True) company_id = Column(Integer, ForeignKey('companies.id')) name = Column(String(50)) discriminator = Column('type', String(50)) __mapper_args__ = {'polymorphic_on': discriminator} class Manager(Person): __mapper_args__ = {'polymorphic_identity': 'manager'} golf_swing = Column(String(50)) class Engineer(Person): __tablename__ = 'engineers' __mapper_args__ = {'polymorphic_identity': 'engineer'} id = Column(Integer, ForeignKey('people.id'), primary_key=True) primary_language = Column(String(50)) assert Person.__table__.c.golf_swing is not None assert not Person.__table__.c.has_key('primary_language') assert Engineer.__table__.c.primary_language is not None assert Engineer.primary_language is not None assert Manager.golf_swing is not None assert not hasattr(Person, 'primary_language') assert not hasattr(Person, 'golf_swing') assert not hasattr(Engineer, 'golf_swing') assert not hasattr(Manager, 'primary_language') Base.metadata.create_all() sess = create_session() e1 = Engineer(name='dilbert', primary_language='java') e2 = Engineer(name='wally', primary_language='c++') m1 = Manager(name='dogbert', golf_swing='fore!') c1 = Company(name='MegaCorp, Inc.', employees=[e1, e2, m1]) e3 = Engineer(name='vlad', primary_language='cobol') c2 = Company(name='Elbonia, Inc.', employees=[e3]) sess.add(c1) sess.add(c2) sess.flush() sess.expunge_all() eq_(sess.query(Person).with_polymorphic(Engineer). filter(Engineer.primary_language == 'cobol').first(), Engineer(name='vlad')) eq_(sess.query(Company).filter(Company.employees.of_type(Engineer). any(Engineer.primary_language == 'cobol')).first(), c2) eq_(sess.query(Engineer).filter_by(primary_language='cobol' ).one(), Engineer(name='vlad', primary_language='cobol')) def test_single_from_joined_colsonsub(self): class Person(Base, fixtures.ComparableEntity): __tablename__ = 'people' id = Column(Integer, primary_key=True, test_needs_autoincrement=True) name = Column(String(50)) discriminator = Column('type', String(50)) __mapper_args__ = {'polymorphic_on': discriminator} class Manager(Person): __tablename__ = 'manager' __mapper_args__ = {'polymorphic_identity': 'manager'} id = Column(Integer, ForeignKey('people.id'), primary_key=True) golf_swing = Column(String(50)) class Boss(Manager): boss_name = Column(String(50)) is_( Boss.__mapper__.column_attrs['boss_name'].columns[0], Manager.__table__.c.boss_name ) def test_polymorphic_on_converted_from_inst(self): class A(Base): __tablename__ = 'A' id = Column(Integer, primary_key=True) discriminator = Column(String) @declared_attr def __mapper_args__(cls): return { 'polymorphic_identity': cls.__name__, 'polymorphic_on': cls.discriminator } class B(A): pass is_(B.__mapper__.polymorphic_on, A.__table__.c.discriminator) def test_add_deferred(self): class Person(Base, fixtures.ComparableEntity): __tablename__ = 'people' id = Column('id', Integer, primary_key=True, test_needs_autoincrement=True) Person.name = deferred(Column(String(10))) Base.metadata.create_all() sess = create_session() p = Person(name='ratbert') sess.add(p) sess.flush() sess.expunge_all() eq_(sess.query(Person).all(), [Person(name='ratbert')]) sess.expunge_all() person = sess.query(Person).filter(Person.name == 'ratbert' ).one() assert 'name' not in person.__dict__ def test_single_fksonsub(self): """test single inheritance with a foreign key-holding column on a subclass. """ class Person(Base, fixtures.ComparableEntity): __tablename__ = 'people' id = Column(Integer, primary_key=True, test_needs_autoincrement=True) name = Column(String(50)) discriminator = Column('type', String(50)) __mapper_args__ = {'polymorphic_on': discriminator} class Engineer(Person): __mapper_args__ = {'polymorphic_identity': 'engineer'} primary_language_id = Column(Integer, ForeignKey('languages.id')) primary_language = relationship('Language') class Language(Base, fixtures.ComparableEntity): __tablename__ = 'languages' id = Column(Integer, primary_key=True, test_needs_autoincrement=True) name = Column(String(50)) assert not hasattr(Person, 'primary_language_id') Base.metadata.create_all() sess = create_session() java, cpp, cobol = Language(name='java'), Language(name='cpp'), \ Language(name='cobol') e1 = Engineer(name='dilbert', primary_language=java) e2 = Engineer(name='wally', primary_language=cpp) e3 = Engineer(name='vlad', primary_language=cobol) sess.add_all([e1, e2, e3]) sess.flush() sess.expunge_all() eq_(sess.query(Person).filter(Engineer.primary_language.has( Language.name == 'cobol')).first(), Engineer(name='vlad', primary_language=Language(name='cobol'))) eq_(sess.query(Engineer).filter(Engineer.primary_language.has( Language.name == 'cobol')).one(), Engineer(name='vlad', primary_language=Language(name='cobol'))) eq_(sess.query(Person).join(Engineer.primary_language).order_by( Language.name).all(), [Engineer(name='vlad', primary_language=Language(name='cobol')), Engineer(name='wally', primary_language=Language(name='cpp' )), Engineer(name='dilbert', primary_language=Language(name='java'))]) def test_single_three_levels(self): class Person(Base, fixtures.ComparableEntity): __tablename__ = 'people' id = Column(Integer, primary_key=True) name = Column(String(50)) discriminator = Column('type', String(50)) __mapper_args__ = {'polymorphic_on': discriminator} class Engineer(Person): __mapper_args__ = {'polymorphic_identity': 'engineer'} primary_language = Column(String(50)) class JuniorEngineer(Engineer): __mapper_args__ = \ {'polymorphic_identity': 'junior_engineer'} nerf_gun = Column(String(50)) class Manager(Person): __mapper_args__ = {'polymorphic_identity': 'manager'} golf_swing = Column(String(50)) assert JuniorEngineer.nerf_gun assert JuniorEngineer.primary_language assert JuniorEngineer.name assert Manager.golf_swing assert Engineer.primary_language assert not hasattr(Engineer, 'golf_swing') assert not hasattr(Engineer, 'nerf_gun') assert not hasattr(Manager, 'nerf_gun') assert not hasattr(Manager, 'primary_language') def test_single_detects_conflict(self): class Person(Base): __tablename__ = 'people' id = Column(Integer, primary_key=True) name = Column(String(50)) discriminator = Column('type', String(50)) __mapper_args__ = {'polymorphic_on': discriminator} class Engineer(Person): __mapper_args__ = {'polymorphic_identity': 'engineer'} primary_language = Column(String(50)) # test sibling col conflict def go(): class Manager(Person): __mapper_args__ = {'polymorphic_identity': 'manager'} golf_swing = Column(String(50)) primary_language = Column(String(50)) assert_raises(sa.exc.ArgumentError, go) # test parent col conflict def go(): class Salesman(Person): __mapper_args__ = {'polymorphic_identity': 'manager'} name = Column(String(50)) assert_raises(sa.exc.ArgumentError, go) def test_single_no_special_cols(self): class Person(Base, fixtures.ComparableEntity): __tablename__ = 'people' id = Column('id', Integer, primary_key=True) name = Column('name', String(50)) discriminator = Column('type', String(50)) __mapper_args__ = {'polymorphic_on': discriminator} def go(): class Engineer(Person): __mapper_args__ = {'polymorphic_identity': 'engineer'} primary_language = Column('primary_language', String(50)) foo_bar = Column(Integer, primary_key=True) assert_raises_message(sa.exc.ArgumentError, 'place primary key', go) def test_single_no_table_args(self): class Person(Base, fixtures.ComparableEntity): __tablename__ = 'people' id = Column('id', Integer, primary_key=True) name = Column('name', String(50)) discriminator = Column('type', String(50)) __mapper_args__ = {'polymorphic_on': discriminator} def go(): class Engineer(Person): __mapper_args__ = {'polymorphic_identity': 'engineer'} primary_language = Column('primary_language', String(50)) # this should be on the Person class, as this is single # table inheritance, which is why we test that this # throws an exception! __table_args__ = {'mysql_engine': 'InnoDB'} assert_raises_message(sa.exc.ArgumentError, 'place __table_args__', go) @testing.emits_warning("This declarative") def test_dupe_name_in_hierarchy(self): class A(Base): __tablename__ = "a" id = Column(Integer, primary_key=True) a_1 = A class A(a_1): __tablename__ = 'b' id = Column(Integer(), ForeignKey(a_1.id), primary_key=True) assert A.__mapper__.inherits is a_1.__mapper__ class OverlapColPrecedenceTest(DeclarativeTestBase): """test #1892 cases when declarative does column precedence.""" def _run_test(self, Engineer, e_id, p_id): p_table = Base.metadata.tables['person'] e_table = Base.metadata.tables['engineer'] assert Engineer.id.property.columns[0] is e_table.c[e_id] assert Engineer.id.property.columns[1] is p_table.c[p_id] def test_basic(self): class Person(Base): __tablename__ = 'person' id = Column(Integer, primary_key=True) class Engineer(Person): __tablename__ = 'engineer' id = Column(Integer, ForeignKey('person.id'), primary_key=True) self._run_test(Engineer, "id", "id") def test_alt_name_base(self): class Person(Base): __tablename__ = 'person' id = Column("pid", Integer, primary_key=True) class Engineer(Person): __tablename__ = 'engineer' id = Column(Integer, ForeignKey('person.pid'), primary_key=True) self._run_test(Engineer, "id", "pid") def test_alt_name_sub(self): class Person(Base): __tablename__ = 'person' id = Column(Integer, primary_key=True) class Engineer(Person): __tablename__ = 'engineer' id = Column("eid", Integer, ForeignKey('person.id'), primary_key=True) self._run_test(Engineer, "eid", "id") def test_alt_name_both(self): class Person(Base): __tablename__ = 'person' id = Column("pid", Integer, primary_key=True) class Engineer(Person): __tablename__ = 'engineer' id = Column("eid", Integer, ForeignKey('person.pid'), primary_key=True) self._run_test(Engineer, "eid", "pid") from test.orm.test_events import _RemoveListeners class ConcreteInhTest(_RemoveListeners, DeclarativeTestBase): def _roundtrip(self, Employee, Manager, Engineer, Boss, polymorphic=True, explicit_type=False): Base.metadata.create_all() sess = create_session() e1 = Engineer(name='dilbert', primary_language='java') e2 = Engineer(name='wally', primary_language='c++') m1 = Manager(name='dogbert', golf_swing='fore!') e3 = Engineer(name='vlad', primary_language='cobol') b1 = Boss(name="pointy haired") if polymorphic: for obj in [e1, e2, m1, e3, b1]: if explicit_type: eq_(obj.type, obj.__mapper__.polymorphic_identity) else: assert_raises_message( AttributeError, "does not implement attribute .?'type' " "at the instance level.", getattr, obj, "type" ) else: assert "type" not in Engineer.__dict__ assert "type" not in Manager.__dict__ assert "type" not in Boss.__dict__ sess.add_all([e1, e2, m1, e3, b1]) sess.flush() sess.expunge_all() if polymorphic: eq_(sess.query(Employee).order_by(Employee.name).all(), [Engineer(name='dilbert'), Manager(name='dogbert'), Boss(name='pointy haired'), Engineer(name='vlad'), Engineer(name='wally')]) else: eq_(sess.query(Engineer).order_by(Engineer.name).all(), [Engineer(name='dilbert'), Engineer(name='vlad'), Engineer(name='wally')]) eq_(sess.query(Manager).all(), [Manager(name='dogbert')]) eq_(sess.query(Boss).all(), [Boss(name='pointy haired')]) def test_explicit(self): engineers = Table('engineers', Base.metadata, Column('id', Integer, primary_key=True, test_needs_autoincrement=True), Column('name', String(50)), Column('primary_language', String(50))) managers = Table('managers', Base.metadata, Column('id', Integer, primary_key=True, test_needs_autoincrement=True), Column('name', String(50)), Column('golf_swing', String(50)) ) boss = Table('boss', Base.metadata, Column('id', Integer, primary_key=True, test_needs_autoincrement=True), Column('name', String(50)), Column('golf_swing', String(50)) ) punion = polymorphic_union({ 'engineer': engineers, 'manager': managers, 'boss': boss}, 'type', 'punion') class Employee(Base, fixtures.ComparableEntity): __table__ = punion __mapper_args__ = {'polymorphic_on': punion.c.type} class Engineer(Employee): __table__ = engineers __mapper_args__ = {'polymorphic_identity': 'engineer', 'concrete': True} class Manager(Employee): __table__ = managers __mapper_args__ = {'polymorphic_identity': 'manager', 'concrete': True} class Boss(Manager): __table__ = boss __mapper_args__ = {'polymorphic_identity': 'boss', 'concrete': True} self._roundtrip(Employee, Manager, Engineer, Boss) def test_concrete_inline_non_polymorphic(self): """test the example from the declarative docs.""" class Employee(Base, fixtures.ComparableEntity): __tablename__ = 'people' id = Column(Integer, primary_key=True, test_needs_autoincrement=True) name = Column(String(50)) class Engineer(Employee): __tablename__ = 'engineers' __mapper_args__ = {'concrete': True} id = Column(Integer, primary_key=True, test_needs_autoincrement=True) primary_language = Column(String(50)) name = Column(String(50)) class Manager(Employee): __tablename__ = 'manager' __mapper_args__ = {'concrete': True} id = Column(Integer, primary_key=True, test_needs_autoincrement=True) golf_swing = Column(String(50)) name = Column(String(50)) class Boss(Manager): __tablename__ = 'boss' __mapper_args__ = {'concrete': True} id = Column(Integer, primary_key=True, test_needs_autoincrement=True) golf_swing = Column(String(50)) name = Column(String(50)) self._roundtrip(Employee, Manager, Engineer, Boss, polymorphic=False) def test_abstract_concrete_extension(self): class Employee(AbstractConcreteBase, Base, fixtures.ComparableEntity): pass class Manager(Employee): __tablename__ = 'manager' employee_id = Column(Integer, primary_key=True, test_needs_autoincrement=True) name = Column(String(50)) golf_swing = Column(String(40)) __mapper_args__ = { 'polymorphic_identity': 'manager', 'concrete': True} class Boss(Manager): __tablename__ = 'boss' employee_id = Column(Integer, primary_key=True, test_needs_autoincrement=True) name = Column(String(50)) golf_swing = Column(String(40)) __mapper_args__ = { 'polymorphic_identity': 'boss', 'concrete': True} class Engineer(Employee): __tablename__ = 'engineer' employee_id = Column(Integer, primary_key=True, test_needs_autoincrement=True) name = Column(String(50)) primary_language = Column(String(40)) __mapper_args__ = {'polymorphic_identity': 'engineer', 'concrete': True} self._roundtrip(Employee, Manager, Engineer, Boss) def test_concrete_extension(self): class Employee(ConcreteBase, Base, fixtures.ComparableEntity): __tablename__ = 'employee' employee_id = Column(Integer, primary_key=True, test_needs_autoincrement=True) name = Column(String(50)) __mapper_args__ = { 'polymorphic_identity': 'employee', 'concrete': True} class Manager(Employee): __tablename__ = 'manager' employee_id = Column(Integer, primary_key=True, test_needs_autoincrement=True) name = Column(String(50)) golf_swing = Column(String(40)) __mapper_args__ = { 'polymorphic_identity': 'manager', 'concrete': True} class Boss(Manager): __tablename__ = 'boss' employee_id = Column(Integer, primary_key=True, test_needs_autoincrement=True) name = Column(String(50)) golf_swing = Column(String(40)) __mapper_args__ = { 'polymorphic_identity': 'boss', 'concrete': True} class Engineer(Employee): __tablename__ = 'engineer' employee_id = Column(Integer, primary_key=True, test_needs_autoincrement=True) name = Column(String(50)) primary_language = Column(String(40)) __mapper_args__ = {'polymorphic_identity': 'engineer', 'concrete': True} self._roundtrip(Employee, Manager, Engineer, Boss) def test_has_inherited_table_doesnt_consider_base(self): class A(Base): __tablename__ = 'a' id = Column(Integer, primary_key=True) assert not has_inherited_table(A) class B(A): __tablename__ = 'b' id = Column(Integer, ForeignKey('a.id'), primary_key=True) assert has_inherited_table(B) def test_has_inherited_table_in_mapper_args(self): class Test(Base): __tablename__ = 'test' id = Column(Integer, primary_key=True) type = Column(String(20)) @declared_attr def __mapper_args__(cls): if not has_inherited_table(cls): ret = { 'polymorphic_identity': 'default', 'polymorphic_on': cls.type, } else: ret = {'polymorphic_identity': cls.__name__} return ret class PolyTest(Test): __tablename__ = 'poly_test' id = Column(Integer, ForeignKey(Test.id), primary_key=True) configure_mappers() assert Test.__mapper__.polymorphic_on is Test.__table__.c.type assert PolyTest.__mapper__.polymorphic_on is Test.__table__.c.type def test_ok_to_override_type_from_abstract(self): class Employee(AbstractConcreteBase, Base, fixtures.ComparableEntity): pass class Manager(Employee): __tablename__ = 'manager' employee_id = Column(Integer, primary_key=True, test_needs_autoincrement=True) name = Column(String(50)) golf_swing = Column(String(40)) @property def type(self): return "manager" __mapper_args__ = { 'polymorphic_identity': "manager", 'concrete': True} class Boss(Manager): __tablename__ = 'boss' employee_id = Column(Integer, primary_key=True, test_needs_autoincrement=True) name = Column(String(50)) golf_swing = Column(String(40)) @property def type(self): return "boss" __mapper_args__ = { 'polymorphic_identity': "boss", 'concrete': True} class Engineer(Employee): __tablename__ = 'engineer' employee_id = Column(Integer, primary_key=True, test_needs_autoincrement=True) name = Column(String(50)) primary_language = Column(String(40)) @property def type(self): return "engineer" __mapper_args__ = {'polymorphic_identity': "engineer", 'concrete': True} self._roundtrip(Employee, Manager, Engineer, Boss, explicit_type=True) SQLAlchemy-0.8.4/test/ext/declarative/test_mixin.py0000644000076500000240000011433512251150015023027 0ustar classicstaff00000000000000from sqlalchemy.testing import eq_, assert_raises, \ assert_raises_message, is_ from sqlalchemy.ext import declarative as decl import sqlalchemy as sa from sqlalchemy import testing from sqlalchemy import Integer, String, ForeignKey from sqlalchemy.testing.schema import Table, Column from sqlalchemy.orm import relationship, create_session, class_mapper, \ configure_mappers, clear_mappers, \ deferred, column_property, \ Session from sqlalchemy.util import classproperty from sqlalchemy.ext.declarative import declared_attr from sqlalchemy.testing import fixtures Base = None class DeclarativeTestBase(fixtures.TestBase, testing.AssertsExecutionResults): def setup(self): global Base Base = decl.declarative_base(testing.db) def teardown(self): Session.close_all() clear_mappers() Base.metadata.drop_all() class DeclarativeMixinTest(DeclarativeTestBase): def test_simple(self): class MyMixin(object): id = Column(Integer, primary_key=True, test_needs_autoincrement=True) def foo(self): return 'bar' + str(self.id) class MyModel(Base, MyMixin): __tablename__ = 'test' name = Column(String(100), nullable=False, index=True) Base.metadata.create_all() session = create_session() session.add(MyModel(name='testing')) session.flush() session.expunge_all() obj = session.query(MyModel).one() eq_(obj.id, 1) eq_(obj.name, 'testing') eq_(obj.foo(), 'bar1') def test_unique_column(self): class MyMixin(object): id = Column(Integer, primary_key=True) value = Column(String, unique=True) class MyModel(Base, MyMixin): __tablename__ = 'test' assert MyModel.__table__.c.value.unique def test_hierarchical_bases(self): class MyMixinParent: id = Column(Integer, primary_key=True, test_needs_autoincrement=True) def foo(self): return 'bar' + str(self.id) class MyMixin(MyMixinParent): baz = Column(String(100), nullable=False, index=True) class MyModel(Base, MyMixin): __tablename__ = 'test' name = Column(String(100), nullable=False, index=True) Base.metadata.create_all() session = create_session() session.add(MyModel(name='testing', baz='fu')) session.flush() session.expunge_all() obj = session.query(MyModel).one() eq_(obj.id, 1) eq_(obj.name, 'testing') eq_(obj.foo(), 'bar1') eq_(obj.baz, 'fu') def test_mixin_overrides(self): """test a mixin that overrides a column on a superclass.""" class MixinA(object): foo = Column(String(50)) class MixinB(MixinA): foo = Column(Integer) class MyModelA(Base, MixinA): __tablename__ = 'testa' id = Column(Integer, primary_key=True) class MyModelB(Base, MixinB): __tablename__ = 'testb' id = Column(Integer, primary_key=True) eq_(MyModelA.__table__.c.foo.type.__class__, String) eq_(MyModelB.__table__.c.foo.type.__class__, Integer) def test_not_allowed(self): class MyMixin: foo = Column(Integer, ForeignKey('bar.id')) def go(): class MyModel(Base, MyMixin): __tablename__ = 'foo' assert_raises(sa.exc.InvalidRequestError, go) class MyRelMixin: foo = relationship('Bar') def go(): class MyModel(Base, MyRelMixin): __tablename__ = 'foo' assert_raises(sa.exc.InvalidRequestError, go) class MyDefMixin: foo = deferred(Column('foo', String)) def go(): class MyModel(Base, MyDefMixin): __tablename__ = 'foo' assert_raises(sa.exc.InvalidRequestError, go) class MyCPropMixin: foo = column_property(Column('foo', String)) def go(): class MyModel(Base, MyCPropMixin): __tablename__ = 'foo' assert_raises(sa.exc.InvalidRequestError, go) def test_table_name_inherited(self): class MyMixin: @declared_attr def __tablename__(cls): return cls.__name__.lower() id = Column(Integer, primary_key=True) class MyModel(Base, MyMixin): pass eq_(MyModel.__table__.name, 'mymodel') def test_classproperty_still_works(self): class MyMixin(object): @classproperty def __tablename__(cls): return cls.__name__.lower() id = Column(Integer, primary_key=True) class MyModel(Base, MyMixin): __tablename__ = 'overridden' eq_(MyModel.__table__.name, 'overridden') def test_table_name_not_inherited(self): class MyMixin: @declared_attr def __tablename__(cls): return cls.__name__.lower() id = Column(Integer, primary_key=True) class MyModel(Base, MyMixin): __tablename__ = 'overridden' eq_(MyModel.__table__.name, 'overridden') def test_table_name_inheritance_order(self): class MyMixin1: @declared_attr def __tablename__(cls): return cls.__name__.lower() + '1' class MyMixin2: @declared_attr def __tablename__(cls): return cls.__name__.lower() + '2' class MyModel(Base, MyMixin1, MyMixin2): id = Column(Integer, primary_key=True) eq_(MyModel.__table__.name, 'mymodel1') def test_table_name_dependent_on_subclass(self): class MyHistoryMixin: @declared_attr def __tablename__(cls): return cls.parent_name + '_changelog' class MyModel(Base, MyHistoryMixin): parent_name = 'foo' id = Column(Integer, primary_key=True) eq_(MyModel.__table__.name, 'foo_changelog') def test_table_args_inherited(self): class MyMixin: __table_args__ = {'mysql_engine': 'InnoDB'} class MyModel(Base, MyMixin): __tablename__ = 'test' id = Column(Integer, primary_key=True) eq_(MyModel.__table__.kwargs, {'mysql_engine': 'InnoDB'}) def test_table_args_inherited_descriptor(self): class MyMixin: @declared_attr def __table_args__(cls): return {'info': cls.__name__} class MyModel(Base, MyMixin): __tablename__ = 'test' id = Column(Integer, primary_key=True) eq_(MyModel.__table__.info, 'MyModel') def test_table_args_inherited_single_table_inheritance(self): class MyMixin: __table_args__ = {'mysql_engine': 'InnoDB'} class General(Base, MyMixin): __tablename__ = 'test' id = Column(Integer, primary_key=True) type_ = Column(String(50)) __mapper__args = {'polymorphic_on': type_} class Specific(General): __mapper_args__ = {'polymorphic_identity': 'specific'} assert Specific.__table__ is General.__table__ eq_(General.__table__.kwargs, {'mysql_engine': 'InnoDB'}) def test_columns_single_table_inheritance(self): """Test a column on a mixin with an alternate attribute name, mapped to a superclass and single-table inheritance subclass. The superclass table gets the column, the subclass shares the MapperProperty. """ class MyMixin(object): foo = Column('foo', Integer) bar = Column('bar_newname', Integer) class General(Base, MyMixin): __tablename__ = 'test' id = Column(Integer, primary_key=True) type_ = Column(String(50)) __mapper__args = {'polymorphic_on': type_} class Specific(General): __mapper_args__ = {'polymorphic_identity': 'specific'} assert General.bar.prop.columns[0] is General.__table__.c.bar_newname assert len(General.bar.prop.columns) == 1 assert Specific.bar.prop is General.bar.prop @testing.skip_if(lambda: testing.against('oracle'), "Test has an empty insert in it at the moment") def test_columns_single_inheritance_conflict_resolution(self): """Test that a declared_attr can return the existing column and it will be ignored. this allows conditional columns to be added. See [ticket:2472]. """ class Person(Base): __tablename__ = 'person' id = Column(Integer, primary_key=True) class Mixin(object): @declared_attr def target_id(cls): return cls.__table__.c.get('target_id', Column(Integer, ForeignKey('other.id')) ) @declared_attr def target(cls): return relationship("Other") class Engineer(Mixin, Person): """single table inheritance""" class Manager(Mixin, Person): """single table inheritance""" class Other(Base): __tablename__ = 'other' id = Column(Integer, primary_key=True) is_( Engineer.target_id.property.columns[0], Person.__table__.c.target_id ) is_( Manager.target_id.property.columns[0], Person.__table__.c.target_id ) # do a brief round trip on this Base.metadata.create_all() session = Session() o1, o2 = Other(), Other() session.add_all([ Engineer(target=o1), Manager(target=o2), Manager(target=o1) ]) session.commit() eq_(session.query(Engineer).first().target, o1) def test_columns_joined_table_inheritance(self): """Test a column on a mixin with an alternate attribute name, mapped to a superclass and joined-table inheritance subclass. Both tables get the column, in the case of the subclass the two columns are joined under one MapperProperty. """ class MyMixin(object): foo = Column('foo', Integer) bar = Column('bar_newname', Integer) class General(Base, MyMixin): __tablename__ = 'test' id = Column(Integer, primary_key=True) type_ = Column(String(50)) __mapper__args = {'polymorphic_on': type_} class Specific(General): __tablename__ = 'sub' id = Column(Integer, ForeignKey('test.id'), primary_key=True) __mapper_args__ = {'polymorphic_identity': 'specific'} assert General.bar.prop.columns[0] is General.__table__.c.bar_newname assert len(General.bar.prop.columns) == 1 assert Specific.bar.prop is General.bar.prop eq_(len(Specific.bar.prop.columns), 1) assert Specific.bar.prop.columns[0] is General.__table__.c.bar_newname def test_column_join_checks_superclass_type(self): """Test that the logic which joins subclass props to those of the superclass checks that the superclass property is a column. """ class General(Base): __tablename__ = 'test' id = Column(Integer, primary_key=True) general_id = Column(Integer, ForeignKey('test.id')) type_ = relationship("General") class Specific(General): __tablename__ = 'sub' id = Column(Integer, ForeignKey('test.id'), primary_key=True) type_ = Column('foob', String(50)) assert isinstance(General.type_.property, sa.orm.RelationshipProperty) assert Specific.type_.property.columns[0] is Specific.__table__.c.foob def test_column_join_checks_subclass_type(self): """Test that the logic which joins subclass props to those of the superclass checks that the subclass property is a column. """ def go(): class General(Base): __tablename__ = 'test' id = Column(Integer, primary_key=True) type_ = Column('foob', Integer) class Specific(General): __tablename__ = 'sub' id = Column(Integer, ForeignKey('test.id'), primary_key=True) specific_id = Column(Integer, ForeignKey('sub.id')) type_ = relationship("Specific") assert_raises_message( sa.exc.ArgumentError, "column 'foob' conflicts with property", go ) def test_table_args_overridden(self): class MyMixin: __table_args__ = {'mysql_engine': 'Foo'} class MyModel(Base, MyMixin): __tablename__ = 'test' __table_args__ = {'mysql_engine': 'InnoDB'} id = Column(Integer, primary_key=True) eq_(MyModel.__table__.kwargs, {'mysql_engine': 'InnoDB'}) def test_mapper_args_declared_attr(self): class ComputedMapperArgs: @declared_attr def __mapper_args__(cls): if cls.__name__ == 'Person': return {'polymorphic_on': cls.discriminator} else: return {'polymorphic_identity': cls.__name__} class Person(Base, ComputedMapperArgs): __tablename__ = 'people' id = Column(Integer, primary_key=True) discriminator = Column('type', String(50)) class Engineer(Person): pass configure_mappers() assert class_mapper(Person).polymorphic_on \ is Person.__table__.c.type eq_(class_mapper(Engineer).polymorphic_identity, 'Engineer') def test_mapper_args_declared_attr_two(self): # same as test_mapper_args_declared_attr, but we repeat # ComputedMapperArgs on both classes for no apparent reason. class ComputedMapperArgs: @declared_attr def __mapper_args__(cls): if cls.__name__ == 'Person': return {'polymorphic_on': cls.discriminator} else: return {'polymorphic_identity': cls.__name__} class Person(Base, ComputedMapperArgs): __tablename__ = 'people' id = Column(Integer, primary_key=True) discriminator = Column('type', String(50)) class Engineer(Person, ComputedMapperArgs): pass configure_mappers() assert class_mapper(Person).polymorphic_on \ is Person.__table__.c.type eq_(class_mapper(Engineer).polymorphic_identity, 'Engineer') def test_table_args_composite(self): class MyMixin1: __table_args__ = {'info': {'baz': 'bob'}} class MyMixin2: __table_args__ = {'info': {'foo': 'bar'}} class MyModel(Base, MyMixin1, MyMixin2): __tablename__ = 'test' @declared_attr def __table_args__(self): info = {} args = dict(info=info) info.update(MyMixin1.__table_args__['info']) info.update(MyMixin2.__table_args__['info']) return args id = Column(Integer, primary_key=True) eq_(MyModel.__table__.info, {'foo': 'bar', 'baz': 'bob'}) def test_mapper_args_inherited(self): class MyMixin: __mapper_args__ = {'always_refresh': True} class MyModel(Base, MyMixin): __tablename__ = 'test' id = Column(Integer, primary_key=True) eq_(MyModel.__mapper__.always_refresh, True) def test_mapper_args_inherited_descriptor(self): class MyMixin: @declared_attr def __mapper_args__(cls): # tenuous, but illustrates the problem! if cls.__name__ == 'MyModel': return dict(always_refresh=True) else: return dict(always_refresh=False) class MyModel(Base, MyMixin): __tablename__ = 'test' id = Column(Integer, primary_key=True) eq_(MyModel.__mapper__.always_refresh, True) def test_mapper_args_polymorphic_on_inherited(self): class MyMixin: type_ = Column(String(50)) __mapper_args__ = {'polymorphic_on': type_} class MyModel(Base, MyMixin): __tablename__ = 'test' id = Column(Integer, primary_key=True) col = MyModel.__mapper__.polymorphic_on eq_(col.name, 'type_') assert col.table is not None def test_mapper_args_overridden(self): class MyMixin: __mapper_args__ = dict(always_refresh=True) class MyModel(Base, MyMixin): __tablename__ = 'test' __mapper_args__ = dict(always_refresh=False) id = Column(Integer, primary_key=True) eq_(MyModel.__mapper__.always_refresh, False) def test_mapper_args_composite(self): class MyMixin1: type_ = Column(String(50)) __mapper_args__ = {'polymorphic_on': type_} class MyMixin2: __mapper_args__ = {'always_refresh': True} class MyModel(Base, MyMixin1, MyMixin2): __tablename__ = 'test' @declared_attr def __mapper_args__(cls): args = {} args.update(MyMixin1.__mapper_args__) args.update(MyMixin2.__mapper_args__) if cls.__name__ != 'MyModel': args.pop('polymorphic_on') args['polymorphic_identity'] = cls.__name__ return args id = Column(Integer, primary_key=True) class MySubModel(MyModel): pass eq_( MyModel.__mapper__.polymorphic_on.name, 'type_' ) assert MyModel.__mapper__.polymorphic_on.table is not None eq_(MyModel.__mapper__.always_refresh, True) eq_(MySubModel.__mapper__.always_refresh, True) eq_(MySubModel.__mapper__.polymorphic_identity, 'MySubModel') def test_mapper_args_property(self): class MyModel(Base): @declared_attr def __tablename__(cls): return cls.__name__.lower() @declared_attr def __table_args__(cls): return {'mysql_engine':'InnoDB'} @declared_attr def __mapper_args__(cls): args = {} args['polymorphic_identity'] = cls.__name__ return args id = Column(Integer, primary_key=True) class MySubModel(MyModel): id = Column(Integer, ForeignKey('mymodel.id'), primary_key=True) class MySubModel2(MyModel): __tablename__ = 'sometable' id = Column(Integer, ForeignKey('mymodel.id'), primary_key=True) eq_(MyModel.__mapper__.polymorphic_identity, 'MyModel') eq_(MySubModel.__mapper__.polymorphic_identity, 'MySubModel') eq_(MyModel.__table__.kwargs['mysql_engine'], 'InnoDB') eq_(MySubModel.__table__.kwargs['mysql_engine'], 'InnoDB') eq_(MySubModel2.__table__.kwargs['mysql_engine'], 'InnoDB') eq_(MyModel.__table__.name, 'mymodel') eq_(MySubModel.__table__.name, 'mysubmodel') def test_mapper_args_custom_base(self): """test the @declared_attr approach from a custom base.""" class Base(object): @declared_attr def __tablename__(cls): return cls.__name__.lower() @declared_attr def __table_args__(cls): return {'mysql_engine':'InnoDB'} @declared_attr def id(self): return Column(Integer, primary_key=True) Base = decl.declarative_base(cls=Base) class MyClass(Base): pass class MyOtherClass(Base): pass eq_(MyClass.__table__.kwargs['mysql_engine'], 'InnoDB') eq_(MyClass.__table__.name, 'myclass') eq_(MyOtherClass.__table__.name, 'myotherclass') assert MyClass.__table__.c.id.table is MyClass.__table__ assert MyOtherClass.__table__.c.id.table is MyOtherClass.__table__ def test_single_table_no_propagation(self): class IdColumn: id = Column(Integer, primary_key=True) class Generic(Base, IdColumn): __tablename__ = 'base' discriminator = Column('type', String(50)) __mapper_args__ = dict(polymorphic_on=discriminator) value = Column(Integer()) class Specific(Generic): __mapper_args__ = dict(polymorphic_identity='specific') assert Specific.__table__ is Generic.__table__ eq_(Generic.__table__.c.keys(), ['id', 'type', 'value']) assert class_mapper(Specific).polymorphic_on \ is Generic.__table__.c.type eq_(class_mapper(Specific).polymorphic_identity, 'specific') def test_joined_table_propagation(self): class CommonMixin: @declared_attr def __tablename__(cls): return cls.__name__.lower() __table_args__ = {'mysql_engine': 'InnoDB'} timestamp = Column(Integer) id = Column(Integer, primary_key=True) class Generic(Base, CommonMixin): discriminator = Column('python_type', String(50)) __mapper_args__ = dict(polymorphic_on=discriminator) class Specific(Generic): __mapper_args__ = dict(polymorphic_identity='specific') id = Column(Integer, ForeignKey('generic.id'), primary_key=True) eq_(Generic.__table__.name, 'generic') eq_(Specific.__table__.name, 'specific') eq_(Generic.__table__.c.keys(), ['timestamp', 'id', 'python_type']) eq_(Specific.__table__.c.keys(), ['id']) eq_(Generic.__table__.kwargs, {'mysql_engine': 'InnoDB'}) eq_(Specific.__table__.kwargs, {'mysql_engine': 'InnoDB'}) def test_some_propagation(self): class CommonMixin: @declared_attr def __tablename__(cls): return cls.__name__.lower() __table_args__ = {'mysql_engine': 'InnoDB'} timestamp = Column(Integer) class BaseType(Base, CommonMixin): discriminator = Column('type', String(50)) __mapper_args__ = dict(polymorphic_on=discriminator) id = Column(Integer, primary_key=True) value = Column(Integer()) class Single(BaseType): __tablename__ = None __mapper_args__ = dict(polymorphic_identity='type1') class Joined(BaseType): __mapper_args__ = dict(polymorphic_identity='type2') id = Column(Integer, ForeignKey('basetype.id'), primary_key=True) eq_(BaseType.__table__.name, 'basetype') eq_(BaseType.__table__.c.keys(), ['timestamp', 'type', 'id', 'value']) eq_(BaseType.__table__.kwargs, {'mysql_engine': 'InnoDB'}) assert Single.__table__ is BaseType.__table__ eq_(Joined.__table__.name, 'joined') eq_(Joined.__table__.c.keys(), ['id']) eq_(Joined.__table__.kwargs, {'mysql_engine': 'InnoDB'}) def test_col_copy_vs_declared_attr_joined_propagation(self): class Mixin(object): a = Column(Integer) @declared_attr def b(cls): return Column(Integer) class A(Mixin, Base): __tablename__ = 'a' id = Column(Integer, primary_key=True) class B(A): __tablename__ = 'b' id = Column(Integer, ForeignKey('a.id'), primary_key=True) assert 'a' in A.__table__.c assert 'b' in A.__table__.c assert 'a' not in B.__table__.c assert 'b' not in B.__table__.c def test_col_copy_vs_declared_attr_joined_propagation_newname(self): class Mixin(object): a = Column('a1', Integer) @declared_attr def b(cls): return Column('b1', Integer) class A(Mixin, Base): __tablename__ = 'a' id = Column(Integer, primary_key=True) class B(A): __tablename__ = 'b' id = Column(Integer, ForeignKey('a.id'), primary_key=True) assert 'a1' in A.__table__.c assert 'b1' in A.__table__.c assert 'a1' not in B.__table__.c assert 'b1' not in B.__table__.c def test_col_copy_vs_declared_attr_single_propagation(self): class Mixin(object): a = Column(Integer) @declared_attr def b(cls): return Column(Integer) class A(Mixin, Base): __tablename__ = 'a' id = Column(Integer, primary_key=True) class B(A): pass assert 'a' in A.__table__.c assert 'b' in A.__table__.c def test_non_propagating_mixin(self): class NoJoinedTableNameMixin: @declared_attr def __tablename__(cls): if decl.has_inherited_table(cls): return None return cls.__name__.lower() class BaseType(Base, NoJoinedTableNameMixin): discriminator = Column('type', String(50)) __mapper_args__ = dict(polymorphic_on=discriminator) id = Column(Integer, primary_key=True) value = Column(Integer()) class Specific(BaseType): __mapper_args__ = dict(polymorphic_identity='specific') eq_(BaseType.__table__.name, 'basetype') eq_(BaseType.__table__.c.keys(), ['type', 'id', 'value']) assert Specific.__table__ is BaseType.__table__ assert class_mapper(Specific).polymorphic_on \ is BaseType.__table__.c.type eq_(class_mapper(Specific).polymorphic_identity, 'specific') def test_non_propagating_mixin_used_for_joined(self): class TableNameMixin: @declared_attr def __tablename__(cls): if decl.has_inherited_table(cls) and TableNameMixin \ not in cls.__bases__: return None return cls.__name__.lower() class BaseType(Base, TableNameMixin): discriminator = Column('type', String(50)) __mapper_args__ = dict(polymorphic_on=discriminator) id = Column(Integer, primary_key=True) value = Column(Integer()) class Specific(BaseType, TableNameMixin): __mapper_args__ = dict(polymorphic_identity='specific') id = Column(Integer, ForeignKey('basetype.id'), primary_key=True) eq_(BaseType.__table__.name, 'basetype') eq_(BaseType.__table__.c.keys(), ['type', 'id', 'value']) eq_(Specific.__table__.name, 'specific') eq_(Specific.__table__.c.keys(), ['id']) def test_single_back_propagate(self): class ColumnMixin: timestamp = Column(Integer) class BaseType(Base): __tablename__ = 'foo' discriminator = Column('type', String(50)) __mapper_args__ = dict(polymorphic_on=discriminator) id = Column(Integer, primary_key=True) class Specific(BaseType, ColumnMixin): __mapper_args__ = dict(polymorphic_identity='specific') eq_(BaseType.__table__.c.keys(), ['type', 'id', 'timestamp']) def test_table_in_model_and_same_column_in_mixin(self): class ColumnMixin: data = Column(Integer) class Model(Base, ColumnMixin): __table__ = Table('foo', Base.metadata, Column('data', Integer), Column('id', Integer, primary_key=True)) model_col = Model.__table__.c.data mixin_col = ColumnMixin.data assert model_col is not mixin_col eq_(model_col.name, 'data') assert model_col.type.__class__ is mixin_col.type.__class__ def test_table_in_model_and_different_named_column_in_mixin(self): class ColumnMixin: tada = Column(Integer) def go(): class Model(Base, ColumnMixin): __table__ = Table('foo', Base.metadata, Column('data',Integer), Column('id', Integer,primary_key=True)) foo = relationship("Dest") assert_raises_message(sa.exc.ArgumentError, "Can't add additional column 'tada' when " "specifying __table__", go) def test_table_in_model_and_different_named_alt_key_column_in_mixin(self): # here, the __table__ has a column 'tada'. We disallow # the add of the 'foobar' column, even though it's # keyed to 'tada'. class ColumnMixin: tada = Column('foobar', Integer) def go(): class Model(Base, ColumnMixin): __table__ = Table('foo', Base.metadata, Column('data',Integer), Column('tada', Integer), Column('id', Integer,primary_key=True)) foo = relationship("Dest") assert_raises_message(sa.exc.ArgumentError, "Can't add additional column 'foobar' when " "specifying __table__", go) def test_table_in_model_overrides_different_typed_column_in_mixin(self): class ColumnMixin: data = Column(String) class Model(Base, ColumnMixin): __table__ = Table('foo', Base.metadata, Column('data', Integer), Column('id', Integer, primary_key=True)) model_col = Model.__table__.c.data mixin_col = ColumnMixin.data assert model_col is not mixin_col eq_(model_col.name, 'data') assert model_col.type.__class__ is Integer def test_mixin_column_ordering(self): class Foo(object): col1 = Column(Integer) col3 = Column(Integer) class Bar(object): col2 = Column(Integer) col4 = Column(Integer) class Model(Base, Foo, Bar): id = Column(Integer, primary_key=True) __tablename__ = 'model' eq_(Model.__table__.c.keys(), ['col1', 'col3', 'col2', 'col4', 'id']) def test_honor_class_mro_one(self): class HasXMixin(object): @declared_attr def x(self): return Column(Integer) class Parent(HasXMixin, Base): __tablename__ = 'parent' id = Column(Integer, primary_key=True) class Child(Parent): __tablename__ = 'child' id = Column(Integer, ForeignKey('parent.id'), primary_key=True) assert "x" not in Child.__table__.c def test_honor_class_mro_two(self): class HasXMixin(object): @declared_attr def x(self): return Column(Integer) class Parent(HasXMixin, Base): __tablename__ = 'parent' id = Column(Integer, primary_key=True) def x(self): return "hi" class C(Parent): __tablename__ = 'c' id = Column(Integer, ForeignKey('parent.id'), primary_key=True) assert C().x() == 'hi' def test_arbitrary_attrs_one(self): class HasMixin(object): @declared_attr def some_attr(cls): return cls.__name__ + "SOME ATTR" class Mapped(HasMixin, Base): __tablename__ = 't' id = Column(Integer, primary_key=True) eq_(Mapped.some_attr, "MappedSOME ATTR") eq_(Mapped.__dict__['some_attr'], "MappedSOME ATTR") def test_arbitrary_attrs_two(self): from sqlalchemy.ext.associationproxy import association_proxy class FilterA(Base): __tablename__ = 'filter_a' id = Column(Integer(), primary_key=True) parent_id = Column(Integer(), ForeignKey('type_a.id')) filter = Column(String()) def __init__(self, filter_, **kw): self.filter = filter_ class FilterB(Base): __tablename__ = 'filter_b' id = Column(Integer(), primary_key=True) parent_id = Column(Integer(), ForeignKey('type_b.id')) filter = Column(String()) def __init__(self, filter_, **kw): self.filter = filter_ class FilterMixin(object): @declared_attr def _filters(cls): return relationship(cls.filter_class, cascade='all,delete,delete-orphan') @declared_attr def filters(cls): return association_proxy('_filters', 'filter') class TypeA(Base, FilterMixin): __tablename__ = 'type_a' filter_class = FilterA id = Column(Integer(), primary_key=True) class TypeB(Base, FilterMixin): __tablename__ = 'type_b' filter_class = FilterB id = Column(Integer(), primary_key=True) TypeA(filters=[u'foo']) TypeB(filters=[u'foo']) class DeclarativeMixinPropertyTest(DeclarativeTestBase): def test_column_property(self): class MyMixin(object): @declared_attr def prop_hoho(cls): return column_property(Column('prop', String(50))) class MyModel(Base, MyMixin): __tablename__ = 'test' id = Column(Integer, primary_key=True, test_needs_autoincrement=True) class MyOtherModel(Base, MyMixin): __tablename__ = 'othertest' id = Column(Integer, primary_key=True, test_needs_autoincrement=True) assert MyModel.__table__.c.prop is not None assert MyOtherModel.__table__.c.prop is not None assert MyModel.__table__.c.prop \ is not MyOtherModel.__table__.c.prop assert MyModel.prop_hoho.property.columns \ == [MyModel.__table__.c.prop] assert MyOtherModel.prop_hoho.property.columns \ == [MyOtherModel.__table__.c.prop] assert MyModel.prop_hoho.property \ is not MyOtherModel.prop_hoho.property Base.metadata.create_all() sess = create_session() m1, m2 = MyModel(prop_hoho='foo'), MyOtherModel(prop_hoho='bar') sess.add_all([m1, m2]) sess.flush() eq_(sess.query(MyModel).filter(MyModel.prop_hoho == 'foo' ).one(), m1) eq_(sess.query(MyOtherModel).filter(MyOtherModel.prop_hoho == 'bar').one(), m2) def test_doc(self): """test documentation transfer. the documentation situation with @declared_attr is problematic. at least see if mapped subclasses get the doc. """ class MyMixin(object): @declared_attr def type_(cls): """this is a document.""" return Column(String(50)) @declared_attr def t2(cls): """this is another document.""" return column_property(Column(String(50))) class MyModel(Base, MyMixin): __tablename__ = 'test' id = Column(Integer, primary_key=True) configure_mappers() eq_(MyModel.type_.__doc__, """this is a document.""") eq_(MyModel.t2.__doc__, """this is another document.""") def test_column_in_mapper_args(self): class MyMixin(object): @declared_attr def type_(cls): return Column(String(50)) __mapper_args__ = {'polymorphic_on': type_} class MyModel(Base, MyMixin): __tablename__ = 'test' id = Column(Integer, primary_key=True) configure_mappers() col = MyModel.__mapper__.polymorphic_on eq_(col.name, 'type_') assert col.table is not None def test_deferred(self): class MyMixin(object): @declared_attr def data(cls): return deferred(Column('data', String(50))) class MyModel(Base, MyMixin): __tablename__ = 'test' id = Column(Integer, primary_key=True, test_needs_autoincrement=True) Base.metadata.create_all() sess = create_session() sess.add_all([MyModel(data='d1'), MyModel(data='d2')]) sess.flush() sess.expunge_all() d1, d2 = sess.query(MyModel).order_by(MyModel.data) assert 'data' not in d1.__dict__ assert d1.data == 'd1' assert 'data' in d1.__dict__ def _test_relationship(self, usestring): class RefTargetMixin(object): @declared_attr def target_id(cls): return Column('target_id', ForeignKey('target.id')) if usestring: @declared_attr def target(cls): return relationship('Target', primaryjoin='Target.id==%s.target_id' % cls.__name__) else: @declared_attr def target(cls): return relationship('Target') class Foo(Base, RefTargetMixin): __tablename__ = 'foo' id = Column(Integer, primary_key=True, test_needs_autoincrement=True) class Bar(Base, RefTargetMixin): __tablename__ = 'bar' id = Column(Integer, primary_key=True, test_needs_autoincrement=True) class Target(Base): __tablename__ = 'target' id = Column(Integer, primary_key=True, test_needs_autoincrement=True) Base.metadata.create_all() sess = create_session() t1, t2 = Target(), Target() f1, f2, b1 = Foo(target=t1), Foo(target=t2), Bar(target=t1) sess.add_all([f1, f2, b1]) sess.flush() eq_(sess.query(Foo).filter(Foo.target == t2).one(), f2) eq_(sess.query(Bar).filter(Bar.target == t2).first(), None) sess.expire_all() eq_(f1.target, t1) def test_relationship(self): self._test_relationship(False) def test_relationship_primryjoin(self): self._test_relationship(True) SQLAlchemy-0.8.4/test/ext/declarative/test_reflection.py0000644000076500000240000003623212251150015024034 0ustar classicstaff00000000000000from sqlalchemy.testing import eq_, assert_raises from sqlalchemy.ext import declarative as decl from sqlalchemy import testing from sqlalchemy import MetaData, Integer, String, ForeignKey from sqlalchemy.testing.schema import Table, Column from sqlalchemy.orm import relationship, create_session, \ clear_mappers, \ Session from sqlalchemy.testing import fixtures class DeclarativeReflectionBase(fixtures.TablesTest): __requires__ = 'reflectable_autoincrement', def setup(self): global Base Base = decl.declarative_base(testing.db) def teardown(self): super(DeclarativeReflectionBase, self).teardown() clear_mappers() class DeclarativeReflectionTest(DeclarativeReflectionBase): @classmethod def define_tables(cls, metadata): Table('users', metadata, Column('id', Integer, primary_key=True, test_needs_autoincrement=True), Column('name', String(50)), test_needs_fk=True) Table( 'addresses', metadata, Column('id', Integer, primary_key=True, test_needs_autoincrement=True), Column('email', String(50)), Column('user_id', Integer, ForeignKey('users.id')), test_needs_fk=True, ) Table( 'imhandles', metadata, Column('id', Integer, primary_key=True, test_needs_autoincrement=True), Column('user_id', Integer), Column('network', String(50)), Column('handle', String(50)), test_needs_fk=True, ) def test_basic(self): meta = MetaData(testing.db) class User(Base, fixtures.ComparableEntity): __tablename__ = 'users' __autoload__ = True if testing.against('oracle', 'firebird'): id = Column('id', Integer, primary_key=True, test_needs_autoincrement=True) addresses = relationship('Address', backref='user') class Address(Base, fixtures.ComparableEntity): __tablename__ = 'addresses' __autoload__ = True if testing.against('oracle', 'firebird'): id = Column('id', Integer, primary_key=True, test_needs_autoincrement=True) u1 = User(name='u1', addresses=[Address(email='one'), Address(email='two')]) sess = create_session() sess.add(u1) sess.flush() sess.expunge_all() eq_(sess.query(User).all(), [User(name='u1', addresses=[Address(email='one'), Address(email='two')])]) a1 = sess.query(Address).filter(Address.email == 'two').one() eq_(a1, Address(email='two')) eq_(a1.user, User(name='u1')) def test_rekey(self): meta = MetaData(testing.db) class User(Base, fixtures.ComparableEntity): __tablename__ = 'users' __autoload__ = True if testing.against('oracle', 'firebird'): id = Column('id', Integer, primary_key=True, test_needs_autoincrement=True) nom = Column('name', String(50), key='nom') addresses = relationship('Address', backref='user') class Address(Base, fixtures.ComparableEntity): __tablename__ = 'addresses' __autoload__ = True if testing.against('oracle', 'firebird'): id = Column('id', Integer, primary_key=True, test_needs_autoincrement=True) u1 = User(nom='u1', addresses=[Address(email='one'), Address(email='two')]) sess = create_session() sess.add(u1) sess.flush() sess.expunge_all() eq_(sess.query(User).all(), [User(nom='u1', addresses=[Address(email='one'), Address(email='two')])]) a1 = sess.query(Address).filter(Address.email == 'two').one() eq_(a1, Address(email='two')) eq_(a1.user, User(nom='u1')) assert_raises(TypeError, User, name='u3') def test_supplied_fk(self): meta = MetaData(testing.db) class IMHandle(Base, fixtures.ComparableEntity): __tablename__ = 'imhandles' __autoload__ = True if testing.against('oracle', 'firebird'): id = Column('id', Integer, primary_key=True, test_needs_autoincrement=True) user_id = Column('user_id', Integer, ForeignKey('users.id')) class User(Base, fixtures.ComparableEntity): __tablename__ = 'users' __autoload__ = True if testing.against('oracle', 'firebird'): id = Column('id', Integer, primary_key=True, test_needs_autoincrement=True) handles = relationship('IMHandle', backref='user') u1 = User(name='u1', handles=[IMHandle(network='blabber', handle='foo'), IMHandle(network='lol', handle='zomg' )]) sess = create_session() sess.add(u1) sess.flush() sess.expunge_all() eq_(sess.query(User).all(), [User(name='u1', handles=[IMHandle(network='blabber', handle='foo'), IMHandle(network='lol', handle='zomg')])]) a1 = sess.query(IMHandle).filter(IMHandle.handle == 'zomg' ).one() eq_(a1, IMHandle(network='lol', handle='zomg')) eq_(a1.user, User(name='u1')) class DeferredReflectBase(DeclarativeReflectionBase): def teardown(self): super(DeferredReflectBase,self).teardown() from sqlalchemy.ext.declarative.base import _MapperConfig _MapperConfig.configs.clear() Base = None class DeferredReflectPKFKTest(DeferredReflectBase): @classmethod def define_tables(cls, metadata): Table("a", metadata, Column('id', Integer, primary_key=True, test_needs_autoincrement=True), ) Table("b", metadata, Column('id', Integer, ForeignKey('a.id'), primary_key=True), Column('x', Integer, primary_key=True) ) def test_pk_fk(self): class B(decl.DeferredReflection, fixtures.ComparableEntity, Base): __tablename__ = 'b' a = relationship("A") class A(decl.DeferredReflection, fixtures.ComparableEntity, Base): __tablename__ = 'a' decl.DeferredReflection.prepare(testing.db) class DeferredReflectionTest(DeferredReflectBase): @classmethod def define_tables(cls, metadata): Table('users', metadata, Column('id', Integer, primary_key=True, test_needs_autoincrement=True), Column('name', String(50)), test_needs_fk=True) Table( 'addresses', metadata, Column('id', Integer, primary_key=True, test_needs_autoincrement=True), Column('email', String(50)), Column('user_id', Integer, ForeignKey('users.id')), test_needs_fk=True, ) def _roundtrip(self): User = Base._decl_class_registry['User'] Address = Base._decl_class_registry['Address'] u1 = User(name='u1', addresses=[Address(email='one'), Address(email='two')]) sess = create_session() sess.add(u1) sess.flush() sess.expunge_all() eq_(sess.query(User).all(), [User(name='u1', addresses=[Address(email='one'), Address(email='two')])]) a1 = sess.query(Address).filter(Address.email == 'two').one() eq_(a1, Address(email='two')) eq_(a1.user, User(name='u1')) def test_basic_deferred(self): class User(decl.DeferredReflection, fixtures.ComparableEntity, Base): __tablename__ = 'users' addresses = relationship("Address", backref="user") class Address(decl.DeferredReflection, fixtures.ComparableEntity, Base): __tablename__ = 'addresses' decl.DeferredReflection.prepare(testing.db) self._roundtrip() def test_abstract_base(self): class DefBase(decl.DeferredReflection, Base): __abstract__ = True class OtherDefBase(decl.DeferredReflection, Base): __abstract__ = True class User(fixtures.ComparableEntity, DefBase): __tablename__ = 'users' addresses = relationship("Address", backref="user") class Address(fixtures.ComparableEntity, DefBase): __tablename__ = 'addresses' class Fake(OtherDefBase): __tablename__ = 'nonexistent' DefBase.prepare(testing.db) self._roundtrip() def test_redefine_fk_double(self): class User(decl.DeferredReflection, fixtures.ComparableEntity, Base): __tablename__ = 'users' addresses = relationship("Address", backref="user") class Address(decl.DeferredReflection, fixtures.ComparableEntity, Base): __tablename__ = 'addresses' user_id = Column(Integer, ForeignKey('users.id')) decl.DeferredReflection.prepare(testing.db) self._roundtrip() def test_mapper_args_deferred(self): """test that __mapper_args__ is not called until *after* table reflection""" class User(decl.DeferredReflection, fixtures.ComparableEntity, Base): __tablename__ = 'users' @decl.declared_attr def __mapper_args__(cls): return { "order_by":cls.__table__.c.name } decl.DeferredReflection.prepare(testing.db) sess = Session() sess.add_all([ User(name='G'), User(name='Q'), User(name='A'), User(name='C'), ]) sess.commit() eq_( sess.query(User).all(), [ User(name='A'), User(name='C'), User(name='G'), User(name='Q'), ] ) class DeferredInhReflectBase(DeferredReflectBase): def _roundtrip(self): Foo = Base._decl_class_registry['Foo'] Bar = Base._decl_class_registry['Bar'] s = Session(testing.db) s.add_all([ Bar(data='d1', bar_data='b1'), Bar(data='d2', bar_data='b2'), Bar(data='d3', bar_data='b3'), Foo(data='d4') ]) s.commit() eq_( s.query(Foo).order_by(Foo.id).all(), [ Bar(data='d1', bar_data='b1'), Bar(data='d2', bar_data='b2'), Bar(data='d3', bar_data='b3'), Foo(data='d4') ] ) class DeferredSingleInhReflectionTest(DeferredInhReflectBase): @classmethod def define_tables(cls, metadata): Table("foo", metadata, Column('id', Integer, primary_key=True, test_needs_autoincrement=True), Column('type', String(32)), Column('data', String(30)), Column('bar_data', String(30)) ) def test_basic(self): class Foo(decl.DeferredReflection, fixtures.ComparableEntity, Base): __tablename__ = 'foo' __mapper_args__ = {"polymorphic_on":"type", "polymorphic_identity":"foo"} class Bar(Foo): __mapper_args__ = {"polymorphic_identity":"bar"} decl.DeferredReflection.prepare(testing.db) self._roundtrip() def test_add_subclass_column(self): class Foo(decl.DeferredReflection, fixtures.ComparableEntity, Base): __tablename__ = 'foo' __mapper_args__ = {"polymorphic_on":"type", "polymorphic_identity":"foo"} class Bar(Foo): __mapper_args__ = {"polymorphic_identity":"bar"} bar_data = Column(String(30)) decl.DeferredReflection.prepare(testing.db) self._roundtrip() def test_add_pk_column(self): class Foo(decl.DeferredReflection, fixtures.ComparableEntity, Base): __tablename__ = 'foo' __mapper_args__ = {"polymorphic_on":"type", "polymorphic_identity":"foo"} id = Column(Integer, primary_key=True) class Bar(Foo): __mapper_args__ = {"polymorphic_identity":"bar"} decl.DeferredReflection.prepare(testing.db) self._roundtrip() class DeferredJoinedInhReflectionTest(DeferredInhReflectBase): @classmethod def define_tables(cls, metadata): Table("foo", metadata, Column('id', Integer, primary_key=True, test_needs_autoincrement=True), Column('type', String(32)), Column('data', String(30)), test_needs_fk=True, ) Table('bar', metadata, Column('id', Integer, ForeignKey('foo.id'), primary_key=True), Column('bar_data', String(30)), test_needs_fk=True, ) def test_basic(self): class Foo(decl.DeferredReflection, fixtures.ComparableEntity, Base): __tablename__ = 'foo' __mapper_args__ = {"polymorphic_on":"type", "polymorphic_identity":"foo"} class Bar(Foo): __tablename__ = 'bar' __mapper_args__ = {"polymorphic_identity":"bar"} decl.DeferredReflection.prepare(testing.db) self._roundtrip() def test_add_subclass_column(self): class Foo(decl.DeferredReflection, fixtures.ComparableEntity, Base): __tablename__ = 'foo' __mapper_args__ = {"polymorphic_on":"type", "polymorphic_identity":"foo"} class Bar(Foo): __tablename__ = 'bar' __mapper_args__ = {"polymorphic_identity":"bar"} bar_data = Column(String(30)) decl.DeferredReflection.prepare(testing.db) self._roundtrip() def test_add_pk_column(self): class Foo(decl.DeferredReflection, fixtures.ComparableEntity, Base): __tablename__ = 'foo' __mapper_args__ = {"polymorphic_on":"type", "polymorphic_identity":"foo"} id = Column(Integer, primary_key=True) class Bar(Foo): __tablename__ = 'bar' __mapper_args__ = {"polymorphic_identity":"bar"} decl.DeferredReflection.prepare(testing.db) self._roundtrip() def test_add_fk_pk_column(self): class Foo(decl.DeferredReflection, fixtures.ComparableEntity, Base): __tablename__ = 'foo' __mapper_args__ = {"polymorphic_on":"type", "polymorphic_identity":"foo"} class Bar(Foo): __tablename__ = 'bar' __mapper_args__ = {"polymorphic_identity":"bar"} id = Column(Integer, ForeignKey('foo.id'), primary_key=True) decl.DeferredReflection.prepare(testing.db) self._roundtrip() SQLAlchemy-0.8.4/test/ext/test_associationproxy.py0000644000076500000240000013125312251150015023034 0ustar classicstaff00000000000000from sqlalchemy.testing import eq_, assert_raises import copy import pickle from sqlalchemy import * from sqlalchemy.orm import * from sqlalchemy.orm.collections import collection, attribute_mapped_collection from sqlalchemy.ext.associationproxy import * from sqlalchemy.ext.associationproxy import _AssociationList from sqlalchemy.testing import assert_raises_message from sqlalchemy.testing.util import gc_collect from sqlalchemy.testing import fixtures, AssertsCompiledSQL from sqlalchemy import testing from sqlalchemy.testing.schema import Table, Column class DictCollection(dict): @collection.appender def append(self, obj): self[obj.foo] = obj @collection.remover def remove(self, obj): del self[obj.foo] class SetCollection(set): pass class ListCollection(list): pass class ObjectCollection(object): def __init__(self): self.values = list() @collection.appender def append(self, obj): self.values.append(obj) @collection.remover def remove(self, obj): self.values.remove(obj) def __iter__(self): return iter(self.values) class _CollectionOperations(fixtures.TestBase): def setup(self): collection_class = self.collection_class metadata = MetaData(testing.db) parents_table = Table('Parent', metadata, Column('id', Integer, primary_key=True, test_needs_autoincrement=True), Column('name', String(128))) children_table = Table('Children', metadata, Column('id', Integer, primary_key=True, test_needs_autoincrement=True), Column('parent_id', Integer, ForeignKey('Parent.id')), Column('foo', String(128)), Column('name', String(128))) class Parent(object): children = association_proxy('_children', 'name') def __init__(self, name): self.name = name class Child(object): if collection_class and issubclass(collection_class, dict): def __init__(self, foo, name): self.foo = foo self.name = name else: def __init__(self, name): self.name = name mapper(Parent, parents_table, properties={ '_children': relationship(Child, lazy='joined', collection_class=collection_class)}) mapper(Child, children_table) metadata.create_all() self.metadata = metadata self.session = create_session() self.Parent, self.Child = Parent, Child def teardown(self): self.metadata.drop_all() def roundtrip(self, obj): if obj not in self.session: self.session.add(obj) self.session.flush() id, type_ = obj.id, type(obj) self.session.expunge_all() return self.session.query(type_).get(id) def _test_sequence_ops(self): Parent, Child = self.Parent, self.Child p1 = Parent('P1') self.assert_(not p1._children) self.assert_(not p1.children) ch = Child('regular') p1._children.append(ch) self.assert_(ch in p1._children) self.assert_(len(p1._children) == 1) self.assert_(p1.children) self.assert_(len(p1.children) == 1) self.assert_(ch not in p1.children) self.assert_('regular' in p1.children) p1.children.append('proxied') self.assert_('proxied' in p1.children) self.assert_('proxied' not in p1._children) self.assert_(len(p1.children) == 2) self.assert_(len(p1._children) == 2) self.assert_(p1._children[0].name == 'regular') self.assert_(p1._children[1].name == 'proxied') del p1._children[1] self.assert_(len(p1._children) == 1) self.assert_(len(p1.children) == 1) self.assert_(p1._children[0] == ch) del p1.children[0] self.assert_(len(p1._children) == 0) self.assert_(len(p1.children) == 0) p1.children = ['a', 'b', 'c'] self.assert_(len(p1._children) == 3) self.assert_(len(p1.children) == 3) del ch p1 = self.roundtrip(p1) self.assert_(len(p1._children) == 3) self.assert_(len(p1.children) == 3) popped = p1.children.pop() self.assert_(len(p1.children) == 2) self.assert_(popped not in p1.children) p1 = self.roundtrip(p1) self.assert_(len(p1.children) == 2) self.assert_(popped not in p1.children) p1.children[1] = 'changed-in-place' self.assert_(p1.children[1] == 'changed-in-place') inplace_id = p1._children[1].id p1 = self.roundtrip(p1) self.assert_(p1.children[1] == 'changed-in-place') assert p1._children[1].id == inplace_id p1.children.append('changed-in-place') self.assert_(p1.children.count('changed-in-place') == 2) p1.children.remove('changed-in-place') self.assert_(p1.children.count('changed-in-place') == 1) p1 = self.roundtrip(p1) self.assert_(p1.children.count('changed-in-place') == 1) p1._children = [] self.assert_(len(p1.children) == 0) after = ['a', 'b', 'c', 'd', 'e', 'f', 'g', 'h', 'i', 'j'] p1.children = ['a', 'b', 'c', 'd', 'e', 'f', 'g', 'h', 'i', 'j'] self.assert_(len(p1.children) == 10) self.assert_([c.name for c in p1._children] == after) p1.children[2:6] = ['x'] * 4 after = ['a', 'b', 'x', 'x', 'x', 'x', 'g', 'h', 'i', 'j'] self.assert_(p1.children == after) self.assert_([c.name for c in p1._children] == after) p1.children[2:6] = ['y'] after = ['a', 'b', 'y', 'g', 'h', 'i', 'j'] self.assert_(p1.children == after) self.assert_([c.name for c in p1._children] == after) p1.children[2:3] = ['z'] * 4 after = ['a', 'b', 'z', 'z', 'z', 'z', 'g', 'h', 'i', 'j'] self.assert_(p1.children == after) self.assert_([c.name for c in p1._children] == after) p1.children[2::2] = ['O'] * 4 after = ['a', 'b', 'O', 'z', 'O', 'z', 'O', 'h', 'O', 'j'] self.assert_(p1.children == after) self.assert_([c.name for c in p1._children] == after) assert_raises(TypeError, set, [p1.children]) p1.children *= 0 after = [] self.assert_(p1.children == after) self.assert_([c.name for c in p1._children] == after) p1.children += ['a', 'b'] after = ['a', 'b'] self.assert_(p1.children == after) self.assert_([c.name for c in p1._children] == after) p1.children += ['c'] after = ['a', 'b', 'c'] self.assert_(p1.children == after) self.assert_([c.name for c in p1._children] == after) p1.children *= 1 after = ['a', 'b', 'c'] self.assert_(p1.children == after) self.assert_([c.name for c in p1._children] == after) p1.children *= 2 after = ['a', 'b', 'c', 'a', 'b', 'c'] self.assert_(p1.children == after) self.assert_([c.name for c in p1._children] == after) p1.children = ['a'] after = ['a'] self.assert_(p1.children == after) self.assert_([c.name for c in p1._children] == after) self.assert_((p1.children * 2) == ['a', 'a']) self.assert_((2 * p1.children) == ['a', 'a']) self.assert_((p1.children * 0) == []) self.assert_((0 * p1.children) == []) self.assert_((p1.children + ['b']) == ['a', 'b']) self.assert_((['b'] + p1.children) == ['b', 'a']) try: p1.children + 123 assert False except TypeError: assert True class DefaultTest(_CollectionOperations): def __init__(self, *args, **kw): super(DefaultTest, self).__init__(*args, **kw) self.collection_class = None def test_sequence_ops(self): self._test_sequence_ops() class ListTest(_CollectionOperations): def __init__(self, *args, **kw): super(ListTest, self).__init__(*args, **kw) self.collection_class = list def test_sequence_ops(self): self._test_sequence_ops() class CustomListTest(ListTest): def __init__(self, *args, **kw): super(CustomListTest, self).__init__(*args, **kw) self.collection_class = list # No-can-do until ticket #213 class DictTest(_CollectionOperations): pass class CustomDictTest(DictTest): def __init__(self, *args, **kw): super(DictTest, self).__init__(*args, **kw) self.collection_class = DictCollection def test_mapping_ops(self): Parent, Child = self.Parent, self.Child p1 = Parent('P1') self.assert_(not p1._children) self.assert_(not p1.children) ch = Child('a', 'regular') p1._children.append(ch) self.assert_(ch in p1._children.values()) self.assert_(len(p1._children) == 1) self.assert_(p1.children) self.assert_(len(p1.children) == 1) self.assert_(ch not in p1.children) self.assert_('a' in p1.children) self.assert_(p1.children['a'] == 'regular') self.assert_(p1._children['a'] == ch) p1.children['b'] = 'proxied' self.assert_('proxied' in p1.children.values()) self.assert_('b' in p1.children) self.assert_('proxied' not in p1._children) self.assert_(len(p1.children) == 2) self.assert_(len(p1._children) == 2) self.assert_(p1._children['a'].name == 'regular') self.assert_(p1._children['b'].name == 'proxied') del p1._children['b'] self.assert_(len(p1._children) == 1) self.assert_(len(p1.children) == 1) self.assert_(p1._children['a'] == ch) del p1.children['a'] self.assert_(len(p1._children) == 0) self.assert_(len(p1.children) == 0) p1.children = {'d': 'v d', 'e': 'v e', 'f': 'v f'} self.assert_(len(p1._children) == 3) self.assert_(len(p1.children) == 3) self.assert_(set(p1.children) == set(['d', 'e', 'f'])) del ch p1 = self.roundtrip(p1) self.assert_(len(p1._children) == 3) self.assert_(len(p1.children) == 3) p1.children['e'] = 'changed-in-place' self.assert_(p1.children['e'] == 'changed-in-place') inplace_id = p1._children['e'].id p1 = self.roundtrip(p1) self.assert_(p1.children['e'] == 'changed-in-place') self.assert_(p1._children['e'].id == inplace_id) p1._children = {} self.assert_(len(p1.children) == 0) try: p1._children = [] self.assert_(False) except TypeError: self.assert_(True) try: p1._children = None self.assert_(False) except TypeError: self.assert_(True) assert_raises(TypeError, set, [p1.children]) class SetTest(_CollectionOperations): def __init__(self, *args, **kw): super(SetTest, self).__init__(*args, **kw) self.collection_class = set def test_set_operations(self): Parent, Child = self.Parent, self.Child p1 = Parent('P1') self.assert_(not p1._children) self.assert_(not p1.children) ch1 = Child('regular') p1._children.add(ch1) self.assert_(ch1 in p1._children) self.assert_(len(p1._children) == 1) self.assert_(p1.children) self.assert_(len(p1.children) == 1) self.assert_(ch1 not in p1.children) self.assert_('regular' in p1.children) p1.children.add('proxied') self.assert_('proxied' in p1.children) self.assert_('proxied' not in p1._children) self.assert_(len(p1.children) == 2) self.assert_(len(p1._children) == 2) self.assert_(set([o.name for o in p1._children]) == set(['regular', 'proxied'])) ch2 = None for o in p1._children: if o.name == 'proxied': ch2 = o break p1._children.remove(ch2) self.assert_(len(p1._children) == 1) self.assert_(len(p1.children) == 1) self.assert_(p1._children == set([ch1])) p1.children.remove('regular') self.assert_(len(p1._children) == 0) self.assert_(len(p1.children) == 0) p1.children = ['a', 'b', 'c'] self.assert_(len(p1._children) == 3) self.assert_(len(p1.children) == 3) del ch1 p1 = self.roundtrip(p1) self.assert_(len(p1._children) == 3) self.assert_(len(p1.children) == 3) self.assert_('a' in p1.children) self.assert_('b' in p1.children) self.assert_('d' not in p1.children) self.assert_(p1.children == set(['a', 'b', 'c'])) assert_raises( KeyError, p1.children.remove, "d" ) self.assert_(len(p1.children) == 3) p1.children.discard('d') self.assert_(len(p1.children) == 3) p1 = self.roundtrip(p1) self.assert_(len(p1.children) == 3) popped = p1.children.pop() self.assert_(len(p1.children) == 2) self.assert_(popped not in p1.children) p1 = self.roundtrip(p1) self.assert_(len(p1.children) == 2) self.assert_(popped not in p1.children) p1.children = ['a', 'b', 'c'] p1 = self.roundtrip(p1) self.assert_(p1.children == set(['a', 'b', 'c'])) p1.children.discard('b') p1 = self.roundtrip(p1) self.assert_(p1.children == set(['a', 'c'])) p1.children.remove('a') p1 = self.roundtrip(p1) self.assert_(p1.children == set(['c'])) p1._children = set() self.assert_(len(p1.children) == 0) try: p1._children = [] self.assert_(False) except TypeError: self.assert_(True) try: p1._children = None self.assert_(False) except TypeError: self.assert_(True) assert_raises(TypeError, set, [p1.children]) def test_set_comparisons(self): Parent, Child = self.Parent, self.Child p1 = Parent('P1') p1.children = ['a', 'b', 'c'] control = set(['a', 'b', 'c']) for other in (set(['a', 'b', 'c']), set(['a', 'b', 'c', 'd']), set(['a']), set(['a', 'b']), set(['c', 'd']), set(['e', 'f', 'g']), set()): eq_(p1.children.union(other), control.union(other)) eq_(p1.children.difference(other), control.difference(other)) eq_((p1.children - other), (control - other)) eq_(p1.children.intersection(other), control.intersection(other)) eq_(p1.children.symmetric_difference(other), control.symmetric_difference(other)) eq_(p1.children.issubset(other), control.issubset(other)) eq_(p1.children.issuperset(other), control.issuperset(other)) self.assert_((p1.children == other) == (control == other)) self.assert_((p1.children != other) == (control != other)) self.assert_((p1.children < other) == (control < other)) self.assert_((p1.children <= other) == (control <= other)) self.assert_((p1.children > other) == (control > other)) self.assert_((p1.children >= other) == (control >= other)) def test_set_mutation(self): Parent, Child = self.Parent, self.Child # mutations for op in ('update', 'intersection_update', 'difference_update', 'symmetric_difference_update'): for base in (['a', 'b', 'c'], []): for other in (set(['a', 'b', 'c']), set(['a', 'b', 'c', 'd']), set(['a']), set(['a', 'b']), set(['c', 'd']), set(['e', 'f', 'g']), set()): p = Parent('p') p.children = base[:] control = set(base[:]) getattr(p.children, op)(other) getattr(control, op)(other) try: self.assert_(p.children == control) except: print 'Test %s.%s(%s):' % (set(base), op, other) print 'want', repr(control) print 'got', repr(p.children) raise p = self.roundtrip(p) try: self.assert_(p.children == control) except: print 'Test %s.%s(%s):' % (base, op, other) print 'want', repr(control) print 'got', repr(p.children) raise # in-place mutations for op in ('|=', '-=', '&=', '^='): for base in (['a', 'b', 'c'], []): for other in (set(['a', 'b', 'c']), set(['a', 'b', 'c', 'd']), set(['a']), set(['a', 'b']), set(['c', 'd']), set(['e', 'f', 'g']), frozenset(['e', 'f', 'g']), set()): p = Parent('p') p.children = base[:] control = set(base[:]) exec "p.children %s other" % op exec "control %s other" % op try: self.assert_(p.children == control) except: print 'Test %s %s %s:' % (set(base), op, other) print 'want', repr(control) print 'got', repr(p.children) raise p = self.roundtrip(p) try: self.assert_(p.children == control) except: print 'Test %s %s %s:' % (base, op, other) print 'want', repr(control) print 'got', repr(p.children) raise class CustomSetTest(SetTest): def __init__(self, *args, **kw): super(CustomSetTest, self).__init__(*args, **kw) self.collection_class = SetCollection class CustomObjectTest(_CollectionOperations): def __init__(self, *args, **kw): super(CustomObjectTest, self).__init__(*args, **kw) self.collection_class = ObjectCollection def test_basic(self): Parent, Child = self.Parent, self.Child p = Parent('p1') self.assert_(len(list(p.children)) == 0) p.children.append('child') self.assert_(len(list(p.children)) == 1) p = self.roundtrip(p) self.assert_(len(list(p.children)) == 1) # We didn't provide an alternate _AssociationList implementation # for our ObjectCollection, so indexing will fail. assert_raises( TypeError, p.children.__getitem__, 1 ) class ProxyFactoryTest(ListTest): def setup(self): metadata = MetaData(testing.db) parents_table = Table('Parent', metadata, Column('id', Integer, primary_key=True, test_needs_autoincrement=True), Column('name', String(128))) children_table = Table('Children', metadata, Column('id', Integer, primary_key=True, test_needs_autoincrement=True), Column('parent_id', Integer, ForeignKey('Parent.id')), Column('foo', String(128)), Column('name', String(128))) class CustomProxy(_AssociationList): def __init__( self, lazy_collection, creator, value_attr, parent, ): getter, setter = parent._default_getset(lazy_collection) _AssociationList.__init__( self, lazy_collection, creator, getter, setter, parent, ) class Parent(object): children = association_proxy('_children', 'name', proxy_factory=CustomProxy, proxy_bulk_set=CustomProxy.extend ) def __init__(self, name): self.name = name class Child(object): def __init__(self, name): self.name = name mapper(Parent, parents_table, properties={ '_children': relationship(Child, lazy='joined', collection_class=list)}) mapper(Child, children_table) metadata.create_all() self.metadata = metadata self.session = create_session() self.Parent, self.Child = Parent, Child def test_sequence_ops(self): self._test_sequence_ops() class ScalarTest(fixtures.TestBase): def test_scalar_proxy(self): metadata = MetaData(testing.db) parents_table = Table('Parent', metadata, Column('id', Integer, primary_key=True, test_needs_autoincrement=True), Column('name', String(128))) children_table = Table('Children', metadata, Column('id', Integer, primary_key=True, test_needs_autoincrement=True), Column('parent_id', Integer, ForeignKey('Parent.id')), Column('foo', String(128)), Column('bar', String(128)), Column('baz', String(128))) class Parent(object): foo = association_proxy('child', 'foo') bar = association_proxy('child', 'bar', creator=lambda v: Child(bar=v)) baz = association_proxy('child', 'baz', creator=lambda v: Child(baz=v)) def __init__(self, name): self.name = name class Child(object): def __init__(self, **kw): for attr in kw: setattr(self, attr, kw[attr]) mapper(Parent, parents_table, properties={ 'child': relationship(Child, lazy='joined', backref='parent', uselist=False)}) mapper(Child, children_table) metadata.create_all() session = create_session() def roundtrip(obj): if obj not in session: session.add(obj) session.flush() id, type_ = obj.id, type(obj) session.expunge_all() return session.query(type_).get(id) p = Parent('p') # No child assert_raises( AttributeError, getattr, p, "foo" ) p.child = Child(foo='a', bar='b', baz='c') self.assert_(p.foo == 'a') self.assert_(p.bar == 'b') self.assert_(p.baz == 'c') p.bar = 'x' self.assert_(p.foo == 'a') self.assert_(p.bar == 'x') self.assert_(p.baz == 'c') p = roundtrip(p) self.assert_(p.foo == 'a') self.assert_(p.bar == 'x') self.assert_(p.baz == 'c') p.child = None # No child again assert_raises( AttributeError, getattr, p, "foo" ) # Bogus creator for this scalar type assert_raises( TypeError, setattr, p, "foo", "zzz" ) p.bar = 'yyy' self.assert_(p.foo is None) self.assert_(p.bar == 'yyy') self.assert_(p.baz is None) del p.child p = roundtrip(p) self.assert_(p.child is None) p.baz = 'xxx' self.assert_(p.foo is None) self.assert_(p.bar is None) self.assert_(p.baz == 'xxx') p = roundtrip(p) self.assert_(p.foo is None) self.assert_(p.bar is None) self.assert_(p.baz == 'xxx') # Ensure an immediate __set__ works. p2 = Parent('p2') p2.bar = 'quux' class LazyLoadTest(fixtures.TestBase): def setup(self): metadata = MetaData(testing.db) parents_table = Table('Parent', metadata, Column('id', Integer, primary_key=True, test_needs_autoincrement=True), Column('name', String(128))) children_table = Table('Children', metadata, Column('id', Integer, primary_key=True, test_needs_autoincrement=True), Column('parent_id', Integer, ForeignKey('Parent.id')), Column('foo', String(128)), Column('name', String(128))) class Parent(object): children = association_proxy('_children', 'name') def __init__(self, name): self.name = name class Child(object): def __init__(self, name): self.name = name mapper(Child, children_table) metadata.create_all() self.metadata = metadata self.session = create_session() self.Parent, self.Child = Parent, Child self.table = parents_table def teardown(self): self.metadata.drop_all() def roundtrip(self, obj): self.session.add(obj) self.session.flush() id, type_ = obj.id, type(obj) self.session.expunge_all() return self.session.query(type_).get(id) def test_lazy_list(self): Parent, Child = self.Parent, self.Child mapper(Parent, self.table, properties={ '_children': relationship(Child, lazy='select', collection_class=list)}) p = Parent('p') p.children = ['a', 'b', 'c'] p = self.roundtrip(p) # Is there a better way to ensure that the association_proxy # didn't convert a lazy load to an eager load? This does work though. self.assert_('_children' not in p.__dict__) self.assert_(len(p._children) == 3) self.assert_('_children' in p.__dict__) def test_eager_list(self): Parent, Child = self.Parent, self.Child mapper(Parent, self.table, properties={ '_children': relationship(Child, lazy='joined', collection_class=list)}) p = Parent('p') p.children = ['a', 'b', 'c'] p = self.roundtrip(p) self.assert_('_children' in p.__dict__) self.assert_(len(p._children) == 3) def test_lazy_scalar(self): Parent, Child = self.Parent, self.Child mapper(Parent, self.table, properties={ '_children': relationship(Child, lazy='select', uselist=False)}) p = Parent('p') p.children = 'value' p = self.roundtrip(p) self.assert_('_children' not in p.__dict__) self.assert_(p._children is not None) def test_eager_scalar(self): Parent, Child = self.Parent, self.Child mapper(Parent, self.table, properties={ '_children': relationship(Child, lazy='joined', uselist=False)}) p = Parent('p') p.children = 'value' p = self.roundtrip(p) self.assert_('_children' in p.__dict__) self.assert_(p._children is not None) class Parent(object): def __init__(self, name): self.name = name class Child(object): def __init__(self, name): self.name = name class KVChild(object): def __init__(self, name, value): self.name = name self.value = value class ReconstitutionTest(fixtures.TestBase): def setup(self): metadata = MetaData(testing.db) parents = Table('parents', metadata, Column('id', Integer, primary_key=True, test_needs_autoincrement=True), Column('name', String(30))) children = Table('children', metadata, Column('id', Integer, primary_key=True, test_needs_autoincrement=True), Column('parent_id', Integer, ForeignKey('parents.id')), Column('name', String(30))) metadata.create_all() parents.insert().execute(name='p1') self.metadata = metadata self.parents = parents self.children = children Parent.kids = association_proxy('children', 'name') def teardown(self): self.metadata.drop_all() clear_mappers() def test_weak_identity_map(self): mapper(Parent, self.parents, properties=dict(children=relationship(Child))) mapper(Child, self.children) session = create_session(weak_identity_map=True) def add_child(parent_name, child_name): parent = \ session.query(Parent).filter_by(name=parent_name).one() parent.kids.append(child_name) add_child('p1', 'c1') gc_collect() add_child('p1', 'c2') session.flush() p = session.query(Parent).filter_by(name='p1').one() assert set(p.kids) == set(['c1', 'c2']), p.kids def test_copy(self): mapper(Parent, self.parents, properties=dict(children=relationship(Child))) mapper(Child, self.children) p = Parent('p1') p.kids.extend(['c1', 'c2']) p_copy = copy.copy(p) del p gc_collect() assert set(p_copy.kids) == set(['c1', 'c2']), p.kids def test_pickle_list(self): mapper(Parent, self.parents, properties=dict(children=relationship(Child))) mapper(Child, self.children) p = Parent('p1') p.kids.extend(['c1', 'c2']) r1 = pickle.loads(pickle.dumps(p)) assert r1.kids == ['c1', 'c2'] # can't do this without parent having a cycle #r2 = pickle.loads(pickle.dumps(p.kids)) #assert r2 == ['c1', 'c2'] def test_pickle_set(self): mapper(Parent, self.parents, properties=dict(children=relationship(Child, collection_class=set))) mapper(Child, self.children) p = Parent('p1') p.kids.update(['c1', 'c2']) r1 = pickle.loads(pickle.dumps(p)) assert r1.kids == set(['c1', 'c2']) # can't do this without parent having a cycle #r2 = pickle.loads(pickle.dumps(p.kids)) #assert r2 == set(['c1', 'c2']) def test_pickle_dict(self): mapper(Parent, self.parents, properties=dict(children=relationship(KVChild, collection_class= collections.mapped_collection(PickleKeyFunc('name'))))) mapper(KVChild, self.children) p = Parent('p1') p.kids.update({'c1': 'v1', 'c2': 'v2'}) assert p.kids == {'c1': 'c1', 'c2': 'c2'} r1 = pickle.loads(pickle.dumps(p)) assert r1.kids == {'c1': 'c1', 'c2': 'c2'} # can't do this without parent having a cycle #r2 = pickle.loads(pickle.dumps(p.kids)) #assert r2 == {'c1': 'c1', 'c2': 'c2'} class PickleKeyFunc(object): def __init__(self, name): self.name = name def __call__(self, obj): return getattr(obj, self.name) class ComparatorTest(fixtures.MappedTest, AssertsCompiledSQL): __dialect__ = 'default' run_inserts = 'once' run_deletes = None run_setup_mappers = 'once' run_setup_classes = 'once' @classmethod def define_tables(cls, metadata): Table('userkeywords', metadata, Column('keyword_id', Integer, ForeignKey('keywords.id'), primary_key=True), Column('user_id', Integer, ForeignKey('users.id')) ) Table('users', metadata, Column('id', Integer, primary_key=True, test_needs_autoincrement=True), Column('name', String(64)), Column('singular_id', Integer, ForeignKey('singular.id')) ) Table('keywords', metadata, Column('id', Integer, primary_key=True, test_needs_autoincrement=True), Column('keyword', String(64)), Column('singular_id', Integer, ForeignKey('singular.id')) ) Table('singular', metadata, Column('id', Integer, primary_key=True, test_needs_autoincrement=True), ) @classmethod def setup_classes(cls): class User(cls.Comparable): def __init__(self, name): self.name = name # o2m -> m2o # uselist -> nonuselist keywords = association_proxy('user_keywords', 'keyword', creator=lambda k: UserKeyword(keyword=k)) # m2o -> o2m # nonuselist -> uselist singular_keywords = association_proxy('singular', 'keywords') class Keyword(cls.Comparable): def __init__(self, keyword): self.keyword = keyword # o2o -> m2o # nonuselist -> nonuselist user = association_proxy('user_keyword', 'user') class UserKeyword(cls.Comparable): def __init__(self, user=None, keyword=None): self.user = user self.keyword = keyword class Singular(cls.Comparable): def __init__(self, value=None): self.value = value @classmethod def setup_mappers(cls): users, Keyword, UserKeyword, singular, \ userkeywords, User, keywords, Singular = (cls.tables.users, cls.classes.Keyword, cls.classes.UserKeyword, cls.tables.singular, cls.tables.userkeywords, cls.classes.User, cls.tables.keywords, cls.classes.Singular) mapper(User, users, properties={ 'singular': relationship(Singular) }) mapper(Keyword, keywords, properties={ 'user_keyword': relationship(UserKeyword, uselist=False) }) mapper(UserKeyword, userkeywords, properties={ 'user': relationship(User, backref='user_keywords'), 'keyword': relationship(Keyword) }) mapper(Singular, singular, properties={ 'keywords': relationship(Keyword) }) @classmethod def insert_data(cls): UserKeyword, User, Keyword, Singular = (cls.classes.UserKeyword, cls.classes.User, cls.classes.Keyword, cls.classes.Singular) session = sessionmaker()() words = ( 'quick', 'brown', 'fox', 'jumped', 'over', 'the', 'lazy', ) for ii in range(4): user = User('user%d' % ii) user.singular = Singular() session.add(user) for jj in words[ii:ii + 3]: k = Keyword(jj) user.keywords.append(k) user.singular.keywords.append(k) orphan = Keyword('orphan') orphan.user_keyword = UserKeyword(keyword=orphan, user=None) session.add(orphan) session.commit() cls.u = user cls.kw = user.keywords[0] cls.session = session def _equivalent(self, q_proxy, q_direct): eq_(q_proxy.all(), q_direct.all()) def test_filter_any_kwarg_ul_nul(self): UserKeyword, User = self.classes.UserKeyword, self.classes.User self._equivalent(self.session.query(User). filter(User.keywords.any(keyword='jumped' )), self.session.query(User).filter( User.user_keywords.any( UserKeyword.keyword.has(keyword='jumped' )))) def test_filter_has_kwarg_nul_nul(self): UserKeyword, Keyword = self.classes.UserKeyword, self.classes.Keyword self._equivalent(self.session.query(Keyword). filter(Keyword.user.has(name='user2' )), self.session.query(Keyword). filter(Keyword.user_keyword.has( UserKeyword.user.has(name='user2' )))) def test_filter_has_kwarg_nul_ul(self): User, Singular = self.classes.User, self.classes.Singular self._equivalent( self.session.query(User).\ filter(User.singular_keywords.any(keyword='jumped')), self.session.query(User).\ filter( User.singular.has( Singular.keywords.any(keyword='jumped') ) ) ) def test_filter_any_criterion_ul_nul(self): UserKeyword, User, Keyword = (self.classes.UserKeyword, self.classes.User, self.classes.Keyword) self._equivalent(self.session.query(User). filter(User.keywords.any(Keyword.keyword == 'jumped')), self.session.query(User). filter(User.user_keywords.any( UserKeyword.keyword.has(Keyword.keyword == 'jumped')))) def test_filter_has_criterion_nul_nul(self): UserKeyword, User, Keyword = (self.classes.UserKeyword, self.classes.User, self.classes.Keyword) self._equivalent(self.session.query(Keyword). filter(Keyword.user.has(User.name == 'user2')), self.session.query(Keyword). filter(Keyword.user_keyword.has( UserKeyword.user.has(User.name == 'user2')))) def test_filter_any_criterion_nul_ul(self): User, Keyword, Singular = (self.classes.User, self.classes.Keyword, self.classes.Singular) self._equivalent( self.session.query(User).\ filter(User.singular_keywords.any(Keyword.keyword == 'jumped')), self.session.query(User).\ filter( User.singular.has( Singular.keywords.any(Keyword.keyword == 'jumped') ) ) ) def test_filter_contains_ul_nul(self): User = self.classes.User self._equivalent(self.session.query(User). filter(User.keywords.contains(self.kw)), self.session.query(User). filter(User.user_keywords.any(keyword=self.kw))) def test_filter_contains_nul_ul(self): User, Singular = self.classes.User, self.classes.Singular self._equivalent( self.session.query(User).filter( User.singular_keywords.contains(self.kw) ), self.session.query(User).filter( User.singular.has( Singular.keywords.contains(self.kw) ) ), ) def test_filter_eq_nul_nul(self): Keyword = self.classes.Keyword self._equivalent(self.session.query(Keyword).filter(Keyword.user == self.u), self.session.query(Keyword). filter(Keyword.user_keyword.has(user=self.u))) def test_filter_ne_nul_nul(self): Keyword = self.classes.Keyword self._equivalent(self.session.query(Keyword).filter(Keyword.user != self.u), self.session.query(Keyword). filter(not_(Keyword.user_keyword.has(user=self.u)))) def test_filter_eq_null_nul_nul(self): UserKeyword, Keyword = self.classes.UserKeyword, self.classes.Keyword self._equivalent( self.session.query(Keyword).filter(Keyword.user == None), self.session.query(Keyword). filter(Keyword.user_keyword.has(UserKeyword.user == None))) def test_filter_scalar_contains_fails_nul_nul(self): Keyword = self.classes.Keyword assert_raises(exc.InvalidRequestError, lambda: Keyword.user.contains(self.u)) def test_filter_scalar_any_fails_nul_nul(self): Keyword = self.classes.Keyword assert_raises(exc.InvalidRequestError, lambda: Keyword.user.any(name='user2')) def test_filter_collection_has_fails_ul_nul(self): User = self.classes.User assert_raises(exc.InvalidRequestError, lambda: User.keywords.has(keyword='quick')) def test_filter_collection_eq_fails_ul_nul(self): User = self.classes.User assert_raises(exc.InvalidRequestError, lambda: User.keywords == self.kw) def test_filter_collection_ne_fails_ul_nul(self): User = self.classes.User assert_raises(exc.InvalidRequestError, lambda: User.keywords != self.kw) def test_join_separate_attr(self): User = self.classes.User self.assert_compile( self.session.query(User).join( User.keywords.local_attr, User.keywords.remote_attr), "SELECT users.id AS users_id, users.name AS users_name, " "users.singular_id AS users_singular_id " "FROM users JOIN userkeywords ON users.id = " "userkeywords.user_id JOIN keywords ON keywords.id = " "userkeywords.keyword_id" ) def test_join_single_attr(self): User = self.classes.User self.assert_compile( self.session.query(User).join( *User.keywords.attr), "SELECT users.id AS users_id, users.name AS users_name, " "users.singular_id AS users_singular_id " "FROM users JOIN userkeywords ON users.id = " "userkeywords.user_id JOIN keywords ON keywords.id = " "userkeywords.keyword_id" ) class DictOfTupleUpdateTest(fixtures.TestBase): def setup(self): class B(object): def __init__(self, key, elem): self.key = key self.elem = elem class A(object): elements = association_proxy("orig", "elem", creator=B) m = MetaData() a = Table('a', m, Column('id', Integer, primary_key=True)) b = Table('b', m, Column('id', Integer, primary_key=True), Column('aid', Integer, ForeignKey('a.id'))) mapper(A, a, properties={ 'orig': relationship(B, collection_class=attribute_mapped_collection('key')) }) mapper(B, b) self.A = A self.B = B def test_update_one_elem_dict(self): a1 = self.A() a1.elements.update({("B", 3): 'elem2'}) eq_(a1.elements, {("B", 3): 'elem2'}) def test_update_multi_elem_dict(self): a1 = self.A() a1.elements.update({("B", 3): 'elem2', ("C", 4): "elem3"}) eq_(a1.elements, {("B", 3): 'elem2', ("C", 4): "elem3"}) def test_update_one_elem_list(self): a1 = self.A() a1.elements.update([(("B", 3), 'elem2')]) eq_(a1.elements, {("B", 3): 'elem2'}) def test_update_multi_elem_list(self): a1 = self.A() a1.elements.update([(("B", 3), 'elem2'), (("C", 4), "elem3")]) eq_(a1.elements, {("B", 3): 'elem2', ("C", 4): "elem3"}) def test_update_one_elem_varg(self): a1 = self.A() assert_raises_message( ValueError, "dictionary update sequence requires " "2-element tuples", a1.elements.update, (("B", 3), 'elem2') ) def test_update_multi_elem_varg(self): a1 = self.A() assert_raises_message( TypeError, "update expected at most 1 arguments, got 2", a1.elements.update, (("B", 3), 'elem2'), (("C", 4), "elem3") ) SQLAlchemy-0.8.4/test/ext/test_compiler.py0000644000076500000240000002637112251147172021246 0ustar classicstaff00000000000000from sqlalchemy import * from sqlalchemy.types import TypeEngine from sqlalchemy.sql.expression import ClauseElement, ColumnClause,\ FunctionElement, Select, \ BindParameter from sqlalchemy.schema import DDLElement, CreateColumn, CreateTable from sqlalchemy.ext.compiler import compiles, deregister from sqlalchemy import exc from sqlalchemy.sql import table, column, visitors from sqlalchemy.testing import assert_raises_message from sqlalchemy.testing import fixtures, AssertsCompiledSQL class UserDefinedTest(fixtures.TestBase, AssertsCompiledSQL): __dialect__ = 'default' def test_column(self): class MyThingy(ColumnClause): def __init__(self, arg= None): super(MyThingy, self).__init__(arg or 'MYTHINGY!') @compiles(MyThingy) def visit_thingy(thingy, compiler, **kw): return ">>%s<<" % thingy.name self.assert_compile( select([column('foo'), MyThingy()]), "SELECT foo, >>MYTHINGY!<<" ) self.assert_compile( select([MyThingy('x'), MyThingy('y')]).where(MyThingy() == 5), "SELECT >>x<<, >>y<< WHERE >>MYTHINGY!<< = :MYTHINGY!_1" ) def test_create_column_skip(self): @compiles(CreateColumn) def skip_xmin(element, compiler, **kw): if element.element.name == 'xmin': return None else: return compiler.visit_create_column(element, **kw) t = Table('t', MetaData(), Column('a', Integer), Column('xmin', Integer), Column('c', Integer)) self.assert_compile( CreateTable(t), "CREATE TABLE t (a INTEGER, c INTEGER)" ) def test_types(self): class MyType(TypeEngine): pass @compiles(MyType, 'sqlite') def visit_type(type, compiler, **kw): return "SQLITE_FOO" @compiles(MyType, 'postgresql') def visit_type(type, compiler, **kw): return "POSTGRES_FOO" from sqlalchemy.dialects.sqlite import base as sqlite from sqlalchemy.dialects.postgresql import base as postgresql self.assert_compile( MyType(), "SQLITE_FOO", dialect=sqlite.dialect() ) self.assert_compile( MyType(), "POSTGRES_FOO", dialect=postgresql.dialect() ) def test_stateful(self): class MyThingy(ColumnClause): def __init__(self): super(MyThingy, self).__init__('MYTHINGY!') @compiles(MyThingy) def visit_thingy(thingy, compiler, **kw): if not hasattr(compiler, 'counter'): compiler.counter = 0 compiler.counter += 1 return str(compiler.counter) self.assert_compile( select([column('foo'), MyThingy()]).order_by(desc(MyThingy())), "SELECT foo, 1 ORDER BY 2 DESC" ) self.assert_compile( select([MyThingy(), MyThingy()]).where(MyThingy() == 5), "SELECT 1, 2 WHERE 3 = :MYTHINGY!_1" ) def test_callout_to_compiler(self): class InsertFromSelect(ClauseElement): def __init__(self, table, select): self.table = table self.select = select @compiles(InsertFromSelect) def visit_insert_from_select(element, compiler, **kw): return "INSERT INTO %s (%s)" % ( compiler.process(element.table, asfrom=True), compiler.process(element.select) ) t1 = table("mytable", column('x'), column('y'), column('z')) self.assert_compile( InsertFromSelect( t1, select([t1]).where(t1.c.x>5) ), "INSERT INTO mytable (SELECT mytable.x, mytable.y, mytable.z " "FROM mytable WHERE mytable.x > :x_1)" ) def test_no_default_message(self): class MyThingy(ColumnClause): pass @compiles(MyThingy, "psotgresql") def visit_thingy(thingy, compiler, **kw): return "mythingy" assert_raises_message( exc.CompileError, " " "construct has no default compilation handler.", str, MyThingy('x') ) def test_annotations(self): """test that annotated clause constructs use the decorated class' compiler. """ t1 = table('t1', column('c1'), column('c2')) dispatch = Select._compiler_dispatch try: @compiles(Select) def compile(element, compiler, **kw): return "OVERRIDE" s1 = select([t1]) self.assert_compile( s1, "OVERRIDE" ) self.assert_compile( s1._annotate({}), "OVERRIDE" ) finally: Select._compiler_dispatch = dispatch if hasattr(Select, '_compiler_dispatcher'): del Select._compiler_dispatcher def test_dialect_specific(self): class AddThingy(DDLElement): __visit_name__ = 'add_thingy' class DropThingy(DDLElement): __visit_name__ = 'drop_thingy' @compiles(AddThingy, 'sqlite') def visit_add_thingy(thingy, compiler, **kw): return "ADD SPECIAL SL THINGY" @compiles(AddThingy) def visit_add_thingy(thingy, compiler, **kw): return "ADD THINGY" @compiles(DropThingy) def visit_drop_thingy(thingy, compiler, **kw): return "DROP THINGY" self.assert_compile(AddThingy(), "ADD THINGY" ) self.assert_compile(DropThingy(), "DROP THINGY" ) from sqlalchemy.dialects.sqlite import base self.assert_compile(AddThingy(), "ADD SPECIAL SL THINGY", dialect=base.dialect() ) self.assert_compile(DropThingy(), "DROP THINGY", dialect=base.dialect() ) @compiles(DropThingy, 'sqlite') def visit_drop_thingy(thingy, compiler, **kw): return "DROP SPECIAL SL THINGY" self.assert_compile(DropThingy(), "DROP SPECIAL SL THINGY", dialect=base.dialect() ) self.assert_compile(DropThingy(), "DROP THINGY", ) def test_functions(self): from sqlalchemy.dialects import postgresql class MyUtcFunction(FunctionElement): pass @compiles(MyUtcFunction) def visit_myfunc(element, compiler, **kw): return "utcnow()" @compiles(MyUtcFunction, 'postgresql') def visit_myfunc(element, compiler, **kw): return "timezone('utc', current_timestamp)" self.assert_compile( MyUtcFunction(), "utcnow()", use_default_dialect=True ) self.assert_compile( MyUtcFunction(), "timezone('utc', current_timestamp)", dialect=postgresql.dialect() ) def test_function_calls_base(self): from sqlalchemy.dialects import mssql class greatest(FunctionElement): type = Numeric() name = 'greatest' @compiles(greatest) def default_greatest(element, compiler, **kw): return compiler.visit_function(element) @compiles(greatest, 'mssql') def case_greatest(element, compiler, **kw): arg1, arg2 = list(element.clauses) return "CASE WHEN %s > %s THEN %s ELSE %s END" % ( compiler.process(arg1), compiler.process(arg2), compiler.process(arg1), compiler.process(arg2), ) self.assert_compile( greatest('a', 'b'), 'greatest(:greatest_1, :greatest_2)', use_default_dialect=True ) self.assert_compile( greatest('a', 'b'), "CASE WHEN :greatest_1 > :greatest_2 " "THEN :greatest_1 ELSE :greatest_2 END", dialect=mssql.dialect() ) def test_subclasses_one(self): class Base(FunctionElement): name = 'base' class Sub1(Base): name = 'sub1' class Sub2(Base): name = 'sub2' @compiles(Base) def visit_base(element, compiler, **kw): return element.name @compiles(Sub1) def visit_base(element, compiler, **kw): return "FOO" + element.name self.assert_compile( select([Sub1(), Sub2()]), 'SELECT FOOsub1, sub2', use_default_dialect=True ) def test_subclasses_two(self): class Base(FunctionElement): name = 'base' class Sub1(Base): name = 'sub1' @compiles(Base) def visit_base(element, compiler, **kw): return element.name class Sub2(Base): name = 'sub2' class SubSub1(Sub1): name = 'subsub1' self.assert_compile( select([Sub1(), Sub2(), SubSub1()]), 'SELECT sub1, sub2, subsub1', use_default_dialect=True ) @compiles(Sub1) def visit_base(element, compiler, **kw): return "FOO" + element.name self.assert_compile( select([Sub1(), Sub2(), SubSub1()]), 'SELECT FOOsub1, sub2, FOOsubsub1', use_default_dialect=True ) class DefaultOnExistingTest(fixtures.TestBase, AssertsCompiledSQL): """Test replacement of default compilation on existing constructs.""" __dialect__ = 'default' def teardown(self): for cls in (Select, BindParameter): deregister(cls) def test_select(self): t1 = table('t1', column('c1'), column('c2')) @compiles(Select, 'sqlite') def compile(element, compiler, **kw): return "OVERRIDE" s1 = select([t1]) self.assert_compile( s1, "SELECT t1.c1, t1.c2 FROM t1", ) from sqlalchemy.dialects.sqlite import base as sqlite self.assert_compile( s1, "OVERRIDE", dialect=sqlite.dialect() ) def test_binds_in_select(self): t = table('t', column('a'), column('b'), column('c') ) @compiles(BindParameter) def gen_bind(element, compiler, **kw): return "BIND(%s)" % compiler.visit_bindparam(element, **kw) self.assert_compile( t.select().where(t.c.c == 5), "SELECT t.a, t.b, t.c FROM t WHERE t.c = BIND(:c_1)", use_default_dialect=True ) def test_binds_in_dml(self): t = table('t', column('a'), column('b'), column('c') ) @compiles(BindParameter) def gen_bind(element, compiler, **kw): return "BIND(%s)" % compiler.visit_bindparam(element, **kw) self.assert_compile( t.insert(), "INSERT INTO t (a, b) VALUES (BIND(:a), BIND(:b))", {'a':1, 'b':2}, use_default_dialect=True ) SQLAlchemy-0.8.4/test/ext/test_extendedattr.py0000644000076500000240000004165512251150015022117 0ustar classicstaff00000000000000from sqlalchemy.testing import eq_, assert_raises, assert_raises_message, ne_ from sqlalchemy import util from sqlalchemy.orm import attributes from sqlalchemy.orm.attributes import set_attribute, get_attribute, del_attribute from sqlalchemy.orm.instrumentation import is_instrumented from sqlalchemy.orm import clear_mappers from sqlalchemy import testing from sqlalchemy.testing import fixtures from sqlalchemy.ext import instrumentation from sqlalchemy.orm.instrumentation import register_class from sqlalchemy.testing.util import decorator from sqlalchemy.orm import events from sqlalchemy import event @decorator def modifies_instrumentation_finders(fn, *args, **kw): pristine = instrumentation.instrumentation_finders[:] try: fn(*args, **kw) finally: del instrumentation.instrumentation_finders[:] instrumentation.instrumentation_finders.extend(pristine) def with_lookup_strategy(strategy): @decorator def decorate(fn, *args, **kw): try: ext_instrumentation._install_instrumented_lookups() return fn(*args, **kw) finally: ext_instrumentation._reinstall_default_lookups() return decorate class MyTypesManager(instrumentation.InstrumentationManager): def instrument_attribute(self, class_, key, attr): pass def install_descriptor(self, class_, key, attr): pass def uninstall_descriptor(self, class_, key): pass def instrument_collection_class(self, class_, key, collection_class): return MyListLike def get_instance_dict(self, class_, instance): return instance._goofy_dict def initialize_instance_dict(self, class_, instance): instance.__dict__['_goofy_dict'] = {} def install_state(self, class_, instance, state): instance.__dict__['_my_state'] = state def state_getter(self, class_): return lambda instance: instance.__dict__['_my_state'] class MyListLike(list): # add @appender, @remover decorators as needed _sa_iterator = list.__iter__ def _sa_appender(self, item, _sa_initiator=None): if _sa_initiator is not False: self._sa_adapter.fire_append_event(item, _sa_initiator) list.append(self, item) append = _sa_appender def _sa_remover(self, item, _sa_initiator=None): self._sa_adapter.fire_pre_remove_event(_sa_initiator) if _sa_initiator is not False: self._sa_adapter.fire_remove_event(item, _sa_initiator) list.remove(self, item) remove = _sa_remover class MyBaseClass(object): __sa_instrumentation_manager__ = instrumentation.InstrumentationManager class MyClass(object): # This proves that a staticmethod will work here; don't # flatten this back to a class assignment! def __sa_instrumentation_manager__(cls): return MyTypesManager(cls) __sa_instrumentation_manager__ = staticmethod(__sa_instrumentation_manager__) # This proves SA can handle a class with non-string dict keys if not util.pypy and not util.jython: locals()[42] = 99 # Don't remove this line! def __init__(self, **kwargs): for k in kwargs: setattr(self, k, kwargs[k]) def __getattr__(self, key): if is_instrumented(self, key): return get_attribute(self, key) else: try: return self._goofy_dict[key] except KeyError: raise AttributeError(key) def __setattr__(self, key, value): if is_instrumented(self, key): set_attribute(self, key, value) else: self._goofy_dict[key] = value def __hasattr__(self, key): if is_instrumented(self, key): return True else: return key in self._goofy_dict def __delattr__(self, key): if is_instrumented(self, key): del_attribute(self, key) else: del self._goofy_dict[key] class UserDefinedExtensionTest(fixtures.ORMTest): @classmethod def teardown_class(cls): clear_mappers() instrumentation._reinstall_default_lookups() def test_instance_dict(self): class User(MyClass): pass register_class(User) attributes.register_attribute(User, 'user_id', uselist = False, useobject=False) attributes.register_attribute(User, 'user_name', uselist = False, useobject=False) attributes.register_attribute(User, 'email_address', uselist = False, useobject=False) u = User() u.user_id = 7 u.user_name = 'john' u.email_address = 'lala@123.com' self.assert_(u.__dict__ == {'_my_state':u._my_state, '_goofy_dict':{'user_id':7, 'user_name':'john', 'email_address':'lala@123.com'}}, u.__dict__) def test_basic(self): for base in (object, MyBaseClass, MyClass): class User(base): pass register_class(User) attributes.register_attribute(User, 'user_id', uselist = False, useobject=False) attributes.register_attribute(User, 'user_name', uselist = False, useobject=False) attributes.register_attribute(User, 'email_address', uselist = False, useobject=False) u = User() u.user_id = 7 u.user_name = 'john' u.email_address = 'lala@123.com' self.assert_(u.user_id == 7 and u.user_name == 'john' and u.email_address == 'lala@123.com') attributes.instance_state(u)._commit_all(attributes.instance_dict(u)) self.assert_(u.user_id == 7 and u.user_name == 'john' and u.email_address == 'lala@123.com') u.user_name = 'heythere' u.email_address = 'foo@bar.com' self.assert_(u.user_id == 7 and u.user_name == 'heythere' and u.email_address == 'foo@bar.com') def test_deferred(self): for base in (object, MyBaseClass, MyClass): class Foo(base): pass data = {'a':'this is a', 'b':12} def loader(state, keys): for k in keys: state.dict[k] = data[k] return attributes.ATTR_WAS_SET manager = register_class(Foo) manager.deferred_scalar_loader = loader attributes.register_attribute(Foo, 'a', uselist=False, useobject=False) attributes.register_attribute(Foo, 'b', uselist=False, useobject=False) if base is object: assert Foo not in instrumentation._instrumentation_factory._state_finders else: assert Foo in instrumentation._instrumentation_factory._state_finders f = Foo() attributes.instance_state(f)._expire(attributes.instance_dict(f), set()) eq_(f.a, "this is a") eq_(f.b, 12) f.a = "this is some new a" attributes.instance_state(f)._expire(attributes.instance_dict(f), set()) eq_(f.a, "this is a") eq_(f.b, 12) attributes.instance_state(f)._expire(attributes.instance_dict(f), set()) f.a = "this is another new a" eq_(f.a, "this is another new a") eq_(f.b, 12) attributes.instance_state(f)._expire(attributes.instance_dict(f), set()) eq_(f.a, "this is a") eq_(f.b, 12) del f.a eq_(f.a, None) eq_(f.b, 12) attributes.instance_state(f)._commit_all(attributes.instance_dict(f)) eq_(f.a, None) eq_(f.b, 12) def test_inheritance(self): """tests that attributes are polymorphic""" for base in (object, MyBaseClass, MyClass): class Foo(base):pass class Bar(Foo):pass register_class(Foo) register_class(Bar) def func1(state, passive): return "this is the foo attr" def func2(state, passive): return "this is the bar attr" def func3(state, passive): return "this is the shared attr" attributes.register_attribute(Foo, 'element', uselist=False, callable_=func1, useobject=True) attributes.register_attribute(Foo, 'element2', uselist=False, callable_=func3, useobject=True) attributes.register_attribute(Bar, 'element', uselist=False, callable_=func2, useobject=True) x = Foo() y = Bar() assert x.element == 'this is the foo attr' assert y.element == 'this is the bar attr', y.element assert x.element2 == 'this is the shared attr' assert y.element2 == 'this is the shared attr' def test_collection_with_backref(self): for base in (object, MyBaseClass, MyClass): class Post(base):pass class Blog(base):pass register_class(Post) register_class(Blog) attributes.register_attribute(Post, 'blog', uselist=False, backref='posts', trackparent=True, useobject=True) attributes.register_attribute(Blog, 'posts', uselist=True, backref='blog', trackparent=True, useobject=True) b = Blog() (p1, p2, p3) = (Post(), Post(), Post()) b.posts.append(p1) b.posts.append(p2) b.posts.append(p3) self.assert_(b.posts == [p1, p2, p3]) self.assert_(p2.blog is b) p3.blog = None self.assert_(b.posts == [p1, p2]) p4 = Post() p4.blog = b self.assert_(b.posts == [p1, p2, p4]) p4.blog = b p4.blog = b self.assert_(b.posts == [p1, p2, p4]) # assert no failure removing None p5 = Post() p5.blog = None del p5.blog def test_history(self): for base in (object, MyBaseClass, MyClass): class Foo(base): pass class Bar(base): pass register_class(Foo) register_class(Bar) attributes.register_attribute(Foo, "name", uselist=False, useobject=False) attributes.register_attribute(Foo, "bars", uselist=True, trackparent=True, useobject=True) attributes.register_attribute(Bar, "name", uselist=False, useobject=False) f1 = Foo() f1.name = 'f1' eq_(attributes.get_state_history(attributes.instance_state(f1), 'name'), (['f1'], (), ())) b1 = Bar() b1.name = 'b1' f1.bars.append(b1) eq_(attributes.get_state_history(attributes.instance_state(f1), 'bars'), ([b1], [], [])) attributes.instance_state(f1)._commit_all(attributes.instance_dict(f1)) attributes.instance_state(b1)._commit_all(attributes.instance_dict(b1)) eq_(attributes.get_state_history(attributes.instance_state(f1), 'name'), ((), ['f1'], ())) eq_(attributes.get_state_history(attributes.instance_state(f1), 'bars'), ((), [b1], ())) f1.name = 'f1mod' b2 = Bar() b2.name = 'b2' f1.bars.append(b2) eq_(attributes.get_state_history(attributes.instance_state(f1), 'name'), (['f1mod'], (), ['f1'])) eq_(attributes.get_state_history(attributes.instance_state(f1), 'bars'), ([b2], [b1], [])) f1.bars.remove(b1) eq_(attributes.get_state_history(attributes.instance_state(f1), 'bars'), ([b2], [], [b1])) def test_null_instrumentation(self): class Foo(MyBaseClass): pass register_class(Foo) attributes.register_attribute(Foo, "name", uselist=False, useobject=False) attributes.register_attribute(Foo, "bars", uselist=True, trackparent=True, useobject=True) assert Foo.name == attributes.manager_of_class(Foo)['name'] assert Foo.bars == attributes.manager_of_class(Foo)['bars'] def test_alternate_finders(self): """Ensure the generic finder front-end deals with edge cases.""" class Unknown(object): pass class Known(MyBaseClass): pass register_class(Known) k, u = Known(), Unknown() assert instrumentation.manager_of_class(Unknown) is None assert instrumentation.manager_of_class(Known) is not None assert instrumentation.manager_of_class(None) is None assert attributes.instance_state(k) is not None assert_raises((AttributeError, KeyError), attributes.instance_state, u) assert_raises((AttributeError, KeyError), attributes.instance_state, None) class FinderTest(fixtures.ORMTest): def test_standard(self): class A(object): pass register_class(A) eq_(type(instrumentation.manager_of_class(A)), instrumentation.ClassManager) def test_nativeext_interfaceexact(self): class A(object): __sa_instrumentation_manager__ = instrumentation.InstrumentationManager register_class(A) ne_(type(instrumentation.manager_of_class(A)), instrumentation.ClassManager) def test_nativeext_submanager(self): class Mine(instrumentation.ClassManager): pass class A(object): __sa_instrumentation_manager__ = Mine register_class(A) eq_(type(instrumentation.manager_of_class(A)), Mine) @modifies_instrumentation_finders def test_customfinder_greedy(self): class Mine(instrumentation.ClassManager): pass class A(object): pass def find(cls): return Mine instrumentation.instrumentation_finders.insert(0, find) register_class(A) eq_(type(instrumentation.manager_of_class(A)), Mine) @modifies_instrumentation_finders def test_customfinder_pass(self): class A(object): pass def find(cls): return None instrumentation.instrumentation_finders.insert(0, find) register_class(A) eq_(type(instrumentation.manager_of_class(A)), instrumentation.ClassManager) class InstrumentationCollisionTest(fixtures.ORMTest): def test_none(self): class A(object): pass register_class(A) mgr_factory = lambda cls: instrumentation.ClassManager(cls) class B(object): __sa_instrumentation_manager__ = staticmethod(mgr_factory) register_class(B) class C(object): __sa_instrumentation_manager__ = instrumentation.ClassManager register_class(C) def test_single_down(self): class A(object): pass register_class(A) mgr_factory = lambda cls: instrumentation.ClassManager(cls) class B(A): __sa_instrumentation_manager__ = staticmethod(mgr_factory) assert_raises_message(TypeError, "multiple instrumentation implementations", register_class, B) def test_single_up(self): class A(object): pass # delay registration mgr_factory = lambda cls: instrumentation.ClassManager(cls) class B(A): __sa_instrumentation_manager__ = staticmethod(mgr_factory) register_class(B) assert_raises_message(TypeError, "multiple instrumentation implementations", register_class, A) def test_diamond_b1(self): mgr_factory = lambda cls: instrumentation.ClassManager(cls) class A(object): pass class B1(A): pass class B2(A): __sa_instrumentation_manager__ = staticmethod(mgr_factory) class C(object): pass assert_raises_message(TypeError, "multiple instrumentation implementations", register_class, B1) def test_diamond_b2(self): mgr_factory = lambda cls: instrumentation.ClassManager(cls) class A(object): pass class B1(A): pass class B2(A): __sa_instrumentation_manager__ = staticmethod(mgr_factory) class C(object): pass register_class(B2) assert_raises_message(TypeError, "multiple instrumentation implementations", register_class, B1) def test_diamond_c_b(self): mgr_factory = lambda cls: instrumentation.ClassManager(cls) class A(object): pass class B1(A): pass class B2(A): __sa_instrumentation_manager__ = staticmethod(mgr_factory) class C(object): pass register_class(C) assert_raises_message(TypeError, "multiple instrumentation implementations", register_class, B1) class ExtendedEventsTest(fixtures.ORMTest): """Allow custom Events implementations.""" @modifies_instrumentation_finders def test_subclassed(self): class MyEvents(events.InstanceEvents): pass class MyClassManager(instrumentation.ClassManager): dispatch = event.dispatcher(MyEvents) instrumentation.instrumentation_finders.insert(0, lambda cls: MyClassManager) class A(object): pass register_class(A) manager = instrumentation.manager_of_class(A) assert issubclass(manager.dispatch._parent_cls.__dict__['dispatch'].events, MyEvents) SQLAlchemy-0.8.4/test/ext/test_horizontal_shard.py0000644000076500000240000002117212251147172023000 0ustar classicstaff00000000000000import datetime, os from sqlalchemy import * from sqlalchemy import event from sqlalchemy import sql, util from sqlalchemy.orm import * from sqlalchemy.ext.horizontal_shard import ShardedSession from sqlalchemy.sql import operators from sqlalchemy import pool from sqlalchemy.testing import fixtures from sqlalchemy import testing from sqlalchemy.testing.engines import testing_engine from sqlalchemy.testing import eq_ from nose import SkipTest # TODO: ShardTest can be turned into a base for further subclasses class ShardTest(object): __skip_if__ = (lambda: util.win32,) __requires__ = 'sqlite', schema = None def setUp(self): global db1, db2, db3, db4, weather_locations, weather_reports db1, db2, db3, db4 = self._init_dbs() meta = MetaData() ids = Table('ids', meta, Column('nextid', Integer, nullable=False)) def id_generator(ctx): # in reality, might want to use a separate transaction for this. c = db1.contextual_connect() nextid = c.execute(ids.select(for_update=True)).scalar() c.execute(ids.update(values={ids.c.nextid: ids.c.nextid + 1})) return nextid weather_locations = Table("weather_locations", meta, Column('id', Integer, primary_key=True, default=id_generator), Column('continent', String(30), nullable=False), Column('city', String(50), nullable=False), schema=self.schema ) weather_reports = Table( 'weather_reports', meta, Column('id', Integer, primary_key=True), Column('location_id', Integer, ForeignKey(weather_locations.c.id)), Column('temperature', Float), Column('report_time', DateTime, default=datetime.datetime.now), schema=self.schema ) for db in (db1, db2, db3, db4): meta.create_all(db) db1.execute(ids.insert(), nextid=1) self.setup_session() self.setup_mappers() @classmethod def setup_session(cls): global create_session shard_lookup = { 'North America': 'north_america', 'Asia': 'asia', 'Europe': 'europe', 'South America': 'south_america', } def shard_chooser(mapper, instance, clause=None): if isinstance(instance, WeatherLocation): return shard_lookup[instance.continent] else: return shard_chooser(mapper, instance.location) def id_chooser(query, ident): return ['north_america', 'asia', 'europe', 'south_america'] def query_chooser(query): ids = [] class FindContinent(sql.ClauseVisitor): def visit_binary(self, binary): if binary.left.shares_lineage( weather_locations.c.continent): if binary.operator == operators.eq: ids.append(shard_lookup[binary.right.value]) elif binary.operator == operators.in_op: for bind in binary.right.clauses: ids.append(shard_lookup[bind.value]) if query._criterion is not None: FindContinent().traverse(query._criterion) if len(ids) == 0: return ['north_america', 'asia', 'europe', 'south_america'] else: return ids create_session = sessionmaker(class_=ShardedSession, autoflush=True, autocommit=False) create_session.configure(shards={ 'north_america': db1, 'asia': db2, 'europe': db3, 'south_america': db4, }, shard_chooser=shard_chooser, id_chooser=id_chooser, query_chooser=query_chooser) @classmethod def setup_mappers(cls): global WeatherLocation, Report class WeatherLocation(object): def __init__(self, continent, city): self.continent = continent self.city = city class Report(object): def __init__(self, temperature): self.temperature = temperature mapper(WeatherLocation, weather_locations, properties={ 'reports': relationship(Report, backref='location'), 'city': deferred(weather_locations.c.city), }) mapper(Report, weather_reports) def _fixture_data(self): tokyo = WeatherLocation('Asia', 'Tokyo') newyork = WeatherLocation('North America', 'New York') toronto = WeatherLocation('North America', 'Toronto') london = WeatherLocation('Europe', 'London') dublin = WeatherLocation('Europe', 'Dublin') brasilia = WeatherLocation('South America', 'Brasila') quito = WeatherLocation('South America', 'Quito') tokyo.reports.append(Report(80.0)) newyork.reports.append(Report(75)) quito.reports.append(Report(85)) sess = create_session() for c in [ tokyo, newyork, toronto, london, dublin, brasilia, quito, ]: sess.add(c) sess.commit() sess.close() return sess def test_roundtrip(self): sess = self._fixture_data() tokyo = sess.query(WeatherLocation).filter_by(city="Tokyo").one() tokyo.city # reload 'city' attribute on tokyo sess.expunge_all() eq_(db2.execute(weather_locations.select()).fetchall(), [(1, 'Asia', 'Tokyo')]) eq_(db1.execute(weather_locations.select()).fetchall(), [(2, 'North America', 'New York'), (3, 'North America', 'Toronto' )]) eq_(sess.execute(weather_locations.select(), shard_id='asia' ).fetchall(), [(1, 'Asia', 'Tokyo')]) t = sess.query(WeatherLocation).get(tokyo.id) eq_(t.city, tokyo.city) eq_(t.reports[0].temperature, 80.0) north_american_cities = \ sess.query(WeatherLocation).filter(WeatherLocation.continent == 'North America') eq_(set([c.city for c in north_american_cities]), set(['New York', 'Toronto'])) asia_and_europe = \ sess.query(WeatherLocation).filter( WeatherLocation.continent.in_(['Europe', 'Asia'])) eq_(set([c.city for c in asia_and_europe]), set(['Tokyo', 'London', 'Dublin'])) def test_shard_id_event(self): canary = [] def load(instance, ctx): canary.append(ctx.attributes["shard_id"]) event.listen(WeatherLocation, "load", load) sess = self._fixture_data() tokyo = sess.query(WeatherLocation).\ filter_by(city="Tokyo").set_shard("asia").one() sess.query(WeatherLocation).all() eq_( canary, ['asia', 'north_america', 'north_america', 'europe', 'europe', 'south_america', 'south_america'] ) class DistinctEngineShardTest(ShardTest, fixtures.TestBase): def _init_dbs(self): db1 = testing_engine('sqlite:///shard1.db', options=dict(pool_threadlocal=True)) db2 = testing_engine('sqlite:///shard2.db') db3 = testing_engine('sqlite:///shard3.db') db4 = testing_engine('sqlite:///shard4.db') return db1, db2, db3, db4 def tearDown(self): clear_mappers() for db in (db1, db2, db3, db4): db.connect().invalidate() for i in range(1, 5): os.remove("shard%d.db" % i) class AttachedFileShardTest(ShardTest, fixtures.TestBase): schema = "changeme" def _init_dbs(self): db1 = testing_engine('sqlite://', options={"execution_options": {"shard_id": "shard1"}}) assert db1._has_events db2 = db1.execution_options(shard_id="shard2") db3 = db1.execution_options(shard_id="shard3") db4 = db1.execution_options(shard_id="shard4") import re @event.listens_for(db1, "before_cursor_execute", retval=True) def _switch_shard(conn, cursor, stmt, params, context, executemany): shard_id = conn._execution_options['shard_id'] # because SQLite can't just give us a "use" statement, we have # to use the schema hack to locate table names if shard_id: stmt = re.sub(r"\"?changeme\"?\.", shard_id + "_", stmt) return stmt, params return db1, db2, db3, db4 SQLAlchemy-0.8.4/test/ext/test_hybrid.py0000644000076500000240000002117112251147172020706 0ustar classicstaff00000000000000from sqlalchemy import func, Integer, String, ForeignKey from sqlalchemy.orm import relationship, Session, aliased from sqlalchemy.testing.schema import Column from sqlalchemy.ext.declarative import declarative_base from sqlalchemy.ext import hybrid from sqlalchemy.testing import eq_, AssertsCompiledSQL, assert_raises_message from sqlalchemy.testing import fixtures class PropertyComparatorTest(fixtures.TestBase, AssertsCompiledSQL): __dialect__ = 'default' def _fixture(self): Base = declarative_base() class UCComparator(hybrid.Comparator): def __eq__(self, other): if other is None: return self.expression == None else: return func.upper(self.expression) == func.upper(other) class A(Base): __tablename__ = 'a' id = Column(Integer, primary_key=True) _value = Column("value", String) @hybrid.hybrid_property def value(self): return self._value - 5 @value.comparator def value(cls): return UCComparator(cls._value) @value.setter def value(self, v): self._value = v + 5 return A def test_set_get(self): A = self._fixture() a1 = A(value=5) eq_(a1._value, 10) eq_(a1.value, 5) def test_value(self): A = self._fixture() eq_(str(A.value==5), "upper(a.value) = upper(:upper_1)") def test_aliased_value(self): A = self._fixture() eq_(str(aliased(A).value==5), "upper(a_1.value) = upper(:upper_1)") def test_query(self): A = self._fixture() sess = Session() self.assert_compile( sess.query(A.value), "SELECT a.value AS a_value FROM a" ) def test_aliased_query(self): A = self._fixture() sess = Session() self.assert_compile( sess.query(aliased(A).value), "SELECT a_1.value AS a_1_value FROM a AS a_1" ) def test_aliased_filter(self): A = self._fixture() sess = Session() self.assert_compile( sess.query(aliased(A)).filter_by(value="foo"), "SELECT a_1.value AS a_1_value, a_1.id AS a_1_id " "FROM a AS a_1 WHERE upper(a_1.value) = upper(:upper_1)" ) class PropertyExpressionTest(fixtures.TestBase, AssertsCompiledSQL): __dialect__ = 'default' def _fixture(self): Base = declarative_base() class A(Base): __tablename__ = 'a' id = Column(Integer, primary_key=True) _value = Column("value", String) @hybrid.hybrid_property def value(self): return int(self._value) - 5 @value.expression def value(cls): return func.foo(cls._value) + cls.bar_value @value.setter def value(self, v): self._value = v + 5 @hybrid.hybrid_property def bar_value(cls): return func.bar(cls._value) return A def _relationship_fixture(self): Base = declarative_base() class A(Base): __tablename__ = 'a' id = Column(Integer, primary_key=True) b_id = Column('bid', Integer, ForeignKey('b.id')) _value = Column("value", String) @hybrid.hybrid_property def value(self): return int(self._value) - 5 @value.expression def value(cls): return func.foo(cls._value) + cls.bar_value @value.setter def value(self, v): self._value = v + 5 @hybrid.hybrid_property def bar_value(cls): return func.bar(cls._value) class B(Base): __tablename__ = 'b' id = Column(Integer, primary_key=True) as_ = relationship("A") return A, B def test_set_get(self): A = self._fixture() a1 = A(value=5) eq_(a1._value, 10) eq_(a1.value, 5) def test_expression(self): A = self._fixture() self.assert_compile( A.value, "foo(a.value) + bar(a.value)" ) def test_any(self): A, B = self._relationship_fixture() sess = Session() self.assert_compile( sess.query(B).filter(B.as_.any(value=5)), "SELECT b.id AS b_id FROM b WHERE EXISTS " "(SELECT 1 FROM a WHERE b.id = a.bid " "AND foo(a.value) + bar(a.value) = :param_1)" ) def test_aliased_expression(self): A = self._fixture() self.assert_compile( aliased(A).value, "foo(a_1.value) + bar(a_1.value)" ) def test_query(self): A = self._fixture() sess = Session() self.assert_compile( sess.query(A).filter_by(value="foo"), "SELECT a.value AS a_value, a.id AS a_id " "FROM a WHERE foo(a.value) + bar(a.value) = :param_1" ) def test_aliased_query(self): A = self._fixture() sess = Session() self.assert_compile( sess.query(aliased(A)).filter_by(value="foo"), "SELECT a_1.value AS a_1_value, a_1.id AS a_1_id " "FROM a AS a_1 WHERE foo(a_1.value) + bar(a_1.value) = :param_1" ) class PropertyValueTest(fixtures.TestBase, AssertsCompiledSQL): __dialect__ = 'default' def _fixture(self, assignable): Base = declarative_base() class A(Base): __tablename__ = 'a' id = Column(Integer, primary_key=True) _value = Column("value", String) @hybrid.hybrid_property def value(self): return self._value - 5 if assignable: @value.setter def value(self, v): self._value = v + 5 return A def test_nonassignable(self): A = self._fixture(False) a1 = A(_value=5) assert_raises_message( AttributeError, "can't set attribute", setattr, a1, 'value', 10 ) def test_nondeletable(self): A = self._fixture(False) a1 = A(_value=5) assert_raises_message( AttributeError, "can't delete attribute", delattr, a1, 'value' ) def test_set_get(self): A = self._fixture(True) a1 = A(value=5) eq_(a1.value, 5) eq_(a1._value, 10) class MethodExpressionTest(fixtures.TestBase, AssertsCompiledSQL): __dialect__ = 'default' def _fixture(self): Base = declarative_base() class A(Base): __tablename__ = 'a' id = Column(Integer, primary_key=True) _value = Column("value", String) @hybrid.hybrid_method def value(self, x): return int(self._value) + x @value.expression def value(cls, value): return func.foo(cls._value, value) + value return A def test_call(self): A = self._fixture() a1 = A(_value=10) eq_(a1.value(7), 17) def test_expression(self): A = self._fixture() self.assert_compile( A.value(5), "foo(a.value, :foo_1) + :foo_2" ) def test_aliased_expression(self): A = self._fixture() self.assert_compile( aliased(A).value(5), "foo(a_1.value, :foo_1) + :foo_2" ) def test_query(self): A = self._fixture() sess = Session() self.assert_compile( sess.query(A).filter(A.value(5)=="foo"), "SELECT a.value AS a_value, a.id AS a_id " "FROM a WHERE foo(a.value, :foo_1) + :foo_2 = :param_1" ) def test_aliased_query(self): A = self._fixture() sess = Session() a1 = aliased(A) self.assert_compile( sess.query(a1).filter(a1.value(5)=="foo"), "SELECT a_1.value AS a_1_value, a_1.id AS a_1_id " "FROM a AS a_1 WHERE foo(a_1.value, :foo_1) + :foo_2 = :param_1" ) def test_query_col(self): A = self._fixture() sess = Session() self.assert_compile( sess.query(A.value(5)), "SELECT foo(a.value, :foo_1) + :foo_2 AS anon_1 FROM a" ) def test_aliased_query_col(self): A = self._fixture() sess = Session() self.assert_compile( sess.query(aliased(A).value(5)), "SELECT foo(a_1.value, :foo_1) + :foo_2 AS anon_1 FROM a AS a_1" ) SQLAlchemy-0.8.4/test/ext/test_mutable.py0000644000076500000240000003375712251150015021061 0ustar classicstaff00000000000000from sqlalchemy import Integer, ForeignKey, String from sqlalchemy.types import PickleType, TypeDecorator, VARCHAR from sqlalchemy.orm import mapper, Session, composite from sqlalchemy.orm.mapper import Mapper from sqlalchemy.orm.instrumentation import ClassManager from sqlalchemy.testing.schema import Table, Column from sqlalchemy.testing import eq_, assert_raises_message from sqlalchemy.testing.util import picklers from sqlalchemy import testing from sqlalchemy.testing import fixtures import sys import pickle class Foo(fixtures.BasicEntity): pass class SubFoo(Foo): pass class FooWithEq(object): def __init__(self, **kw): for k in kw: setattr(self, k, kw[k]) def __hash__(self): return hash(self.id) def __eq__(self, other): return self.id == other.id from sqlalchemy.ext.mutable import MutableDict class _MutableDictTestBase(object): run_define_tables = 'each' @classmethod def _type_fixture(cls): return MutableDict def setup_mappers(cls): foo = cls.tables.foo mapper(Foo, foo) def teardown(self): # clear out mapper events Mapper.dispatch._clear() ClassManager.dispatch._clear() super(_MutableDictTestBase, self).teardown() def test_coerce_none(self): sess = Session() f1 = Foo(data=None) sess.add(f1) sess.commit() eq_(f1.data, None) def test_coerce_raise(self): assert_raises_message( ValueError, "Attribute 'data' does not accept objects of type", Foo, data=set([1, 2, 3]) ) def test_in_place_mutation(self): sess = Session() f1 = Foo(data={'a': 'b'}) sess.add(f1) sess.commit() f1.data['a'] = 'c' sess.commit() eq_(f1.data, {'a': 'c'}) def test_clear(self): sess = Session() f1 = Foo(data={'a': 'b'}) sess.add(f1) sess.commit() f1.data.clear() sess.commit() eq_(f1.data, {}) def test_replace(self): sess = Session() f1 = Foo(data={'a': 'b'}) sess.add(f1) sess.flush() f1.data = {'b': 'c'} sess.commit() eq_(f1.data, {'b': 'c'}) def test_pickle_parent(self): sess = Session() f1 = Foo(data={'a': 'b'}) sess.add(f1) sess.commit() f1.data sess.close() for loads, dumps in picklers(): sess = Session() f2 = loads(dumps(f1)) sess.add(f2) f2.data['a'] = 'c' assert f2 in sess.dirty def test_unrelated_flush(self): sess = Session() f1 = Foo(data={"a": "b"}, unrelated_data="unrelated") sess.add(f1) sess.flush() f1.unrelated_data = "unrelated 2" sess.flush() f1.data["a"] = "c" sess.commit() eq_(f1.data["a"], "c") def _test_non_mutable(self): sess = Session() f1 = Foo(non_mutable_data={'a': 'b'}) sess.add(f1) sess.commit() f1.non_mutable_data['a'] = 'c' sess.commit() eq_(f1.non_mutable_data, {'a': 'b'}) class MutableWithScalarPickleTest(_MutableDictTestBase, fixtures.MappedTest): @classmethod def define_tables(cls, metadata): MutableDict = cls._type_fixture() mutable_pickle = MutableDict.as_mutable(PickleType) Table('foo', metadata, Column('id', Integer, primary_key=True, test_needs_autoincrement=True), Column('skip', mutable_pickle), Column('data', mutable_pickle), Column('non_mutable_data', PickleType), Column('unrelated_data', String(50)) ) def test_non_mutable(self): self._test_non_mutable() class MutableWithScalarJSONTest(_MutableDictTestBase, fixtures.MappedTest): # json introduced in 2.6 __skip_if__ = lambda: sys.version_info < (2, 6), @classmethod def define_tables(cls, metadata): import json class JSONEncodedDict(TypeDecorator): impl = VARCHAR(50) def process_bind_param(self, value, dialect): if value is not None: value = json.dumps(value) return value def process_result_value(self, value, dialect): if value is not None: value = json.loads(value) return value MutableDict = cls._type_fixture() Table('foo', metadata, Column('id', Integer, primary_key=True, test_needs_autoincrement=True), Column('data', MutableDict.as_mutable(JSONEncodedDict)), Column('non_mutable_data', JSONEncodedDict), Column('unrelated_data', String(50)) ) def test_non_mutable(self): self._test_non_mutable() class MutableAssocWithAttrInheritTest(_MutableDictTestBase, fixtures.MappedTest): @classmethod def define_tables(cls, metadata): Table('foo', metadata, Column('id', Integer, primary_key=True, test_needs_autoincrement=True), Column('data', PickleType), Column('non_mutable_data', PickleType), Column('unrelated_data', String(50)) ) Table('subfoo', metadata, Column('id', Integer, ForeignKey('foo.id'), primary_key=True), ) def setup_mappers(cls): foo = cls.tables.foo subfoo = cls.tables.subfoo mapper(Foo, foo) mapper(SubFoo, subfoo, inherits=Foo) MutableDict.associate_with_attribute(Foo.data) def test_in_place_mutation(self): sess = Session() f1 = SubFoo(data={'a': 'b'}) sess.add(f1) sess.commit() f1.data['a'] = 'c' sess.commit() eq_(f1.data, {'a': 'c'}) def test_replace(self): sess = Session() f1 = SubFoo(data={'a': 'b'}) sess.add(f1) sess.flush() f1.data = {'b': 'c'} sess.commit() eq_(f1.data, {'b': 'c'}) class MutableAssociationScalarPickleTest(_MutableDictTestBase, fixtures.MappedTest): @classmethod def define_tables(cls, metadata): MutableDict = cls._type_fixture() MutableDict.associate_with(PickleType) Table('foo', metadata, Column('id', Integer, primary_key=True, test_needs_autoincrement=True), Column('skip', PickleType), Column('data', PickleType), Column('unrelated_data', String(50)) ) class MutableAssociationScalarJSONTest(_MutableDictTestBase, fixtures.MappedTest): # json introduced in 2.6 __skip_if__ = lambda: sys.version_info < (2, 6), @classmethod def define_tables(cls, metadata): import json class JSONEncodedDict(TypeDecorator): impl = VARCHAR(50) def process_bind_param(self, value, dialect): if value is not None: value = json.dumps(value) return value def process_result_value(self, value, dialect): if value is not None: value = json.loads(value) return value MutableDict = cls._type_fixture() MutableDict.associate_with(JSONEncodedDict) Table('foo', metadata, Column('id', Integer, primary_key=True, test_needs_autoincrement=True), Column('data', JSONEncodedDict), Column('unrelated_data', String(50)) ) class _CompositeTestBase(object): @classmethod def define_tables(cls, metadata): Table('foo', metadata, Column('id', Integer, primary_key=True, test_needs_autoincrement=True), Column('x', Integer), Column('y', Integer), Column('unrelated_data', String(50)) ) def setup(self): from sqlalchemy.ext import mutable mutable._setup_composite_listener() super(_CompositeTestBase, self).setup() def teardown(self): # clear out mapper events Mapper.dispatch._clear() ClassManager.dispatch._clear() super(_CompositeTestBase, self).teardown() @classmethod def _type_fixture(cls): from sqlalchemy.ext.mutable import MutableComposite global Point class Point(MutableComposite): def __init__(self, x, y): self.x = x self.y = y def __setattr__(self, key, value): object.__setattr__(self, key, value) self.changed() def __composite_values__(self): return self.x, self.y def __getstate__(self): return self.x, self.y def __setstate__(self, state): self.x, self.y = state def __eq__(self, other): return isinstance(other, Point) and \ other.x == self.x and \ other.y == self.y return Point class MutableCompositesUnpickleTest(_CompositeTestBase, fixtures.MappedTest): @classmethod def setup_mappers(cls): foo = cls.tables.foo cls.Point = cls._type_fixture() mapper(FooWithEq, foo, properties={ 'data': composite(cls.Point, foo.c.x, foo.c.y) }) def test_unpickle_modified_eq(self): u1 = FooWithEq(data=self.Point(3, 5)) for loads, dumps in picklers(): loads(dumps(u1)) class MutableCompositesTest(_CompositeTestBase, fixtures.MappedTest): @classmethod def setup_mappers(cls): foo = cls.tables.foo Point = cls._type_fixture() mapper(Foo, foo, properties={ 'data': composite(Point, foo.c.x, foo.c.y) }) def test_in_place_mutation(self): sess = Session() d = Point(3, 4) f1 = Foo(data=d) sess.add(f1) sess.commit() f1.data.y = 5 sess.commit() eq_(f1.data, Point(3, 5)) def test_pickle_of_parent(self): sess = Session() d = Point(3, 4) f1 = Foo(data=d) sess.add(f1) sess.commit() f1.data assert 'data' in f1.__dict__ sess.close() for loads, dumps in picklers(): sess = Session() f2 = loads(dumps(f1)) sess.add(f2) f2.data.y = 12 assert f2 in sess.dirty def test_set_none(self): sess = Session() f1 = Foo(data=None) sess.add(f1) sess.commit() eq_(f1.data, Point(None, None)) f1.data.y = 5 sess.commit() eq_(f1.data, Point(None, 5)) def test_set_illegal(self): f1 = Foo() assert_raises_message( ValueError, "Attribute 'data' does not accept objects", setattr, f1, 'data', 'foo' ) def test_unrelated_flush(self): sess = Session() f1 = Foo(data=Point(3, 4), unrelated_data="unrelated") sess.add(f1) sess.flush() f1.unrelated_data = "unrelated 2" sess.flush() f1.data.x = 5 sess.commit() eq_(f1.data.x, 5) class MutableCompositeCustomCoerceTest(_CompositeTestBase, fixtures.MappedTest): @classmethod def _type_fixture(cls): from sqlalchemy.ext.mutable import MutableComposite global Point class Point(MutableComposite): def __init__(self, x, y): self.x = x self.y = y @classmethod def coerce(cls, key, value): if isinstance(value, tuple): value = Point(*value) return value def __setattr__(self, key, value): object.__setattr__(self, key, value) self.changed() def __composite_values__(self): return self.x, self.y def __getstate__(self): return self.x, self.y def __setstate__(self, state): self.x, self.y = state def __eq__(self, other): return isinstance(other, Point) and \ other.x == self.x and \ other.y == self.y return Point @classmethod def setup_mappers(cls): foo = cls.tables.foo Point = cls._type_fixture() mapper(Foo, foo, properties={ 'data': composite(Point, foo.c.x, foo.c.y) }) def test_custom_coerce(self): f = Foo() f.data = (3, 4) eq_(f.data, Point(3, 4)) def test_round_trip_ok(self): sess = Session() f = Foo() f.data = (3, 4) sess.add(f) sess.commit() eq_(f.data, Point(3, 4)) class MutableInheritedCompositesTest(_CompositeTestBase, fixtures.MappedTest): @classmethod def define_tables(cls, metadata): Table('foo', metadata, Column('id', Integer, primary_key=True, test_needs_autoincrement=True), Column('x', Integer), Column('y', Integer) ) Table('subfoo', metadata, Column('id', Integer, ForeignKey('foo.id'), primary_key=True), ) @classmethod def setup_mappers(cls): foo = cls.tables.foo subfoo = cls.tables.subfoo Point = cls._type_fixture() mapper(Foo, foo, properties={ 'data': composite(Point, foo.c.x, foo.c.y) }) mapper(SubFoo, subfoo, inherits=Foo) def test_in_place_mutation_subclass(self): sess = Session() d = Point(3, 4) f1 = SubFoo(data=d) sess.add(f1) sess.commit() f1.data.y = 5 sess.commit() eq_(f1.data, Point(3, 5)) def test_pickle_of_parent_subclass(self): sess = Session() d = Point(3, 4) f1 = SubFoo(data=d) sess.add(f1) sess.commit() f1.data assert 'data' in f1.__dict__ sess.close() for loads, dumps in picklers(): sess = Session() f2 = loads(dumps(f1)) sess.add(f2) f2.data.y = 12 assert f2 in sess.dirty SQLAlchemy-0.8.4/test/ext/test_orderinglist.py0000644000076500000240000003171612251147172022140 0ustar classicstaff00000000000000from sqlalchemy import Integer, ForeignKey, String, MetaData from sqlalchemy.orm import relationship, mapper, create_session from sqlalchemy.ext.orderinglist import ordering_list from sqlalchemy.testing import eq_ from sqlalchemy.testing import fixtures from sqlalchemy import testing from sqlalchemy.testing.schema import Table, Column from sqlalchemy.testing.util import picklers metadata = None # order in whole steps def step_numbering(step): def f(index, collection): return step * index return f # almost fibonacci- skip the first 2 steps # e.g. 1, 2, 3, 5, 8, ... instead of 0, 1, 1, 2, 3, ... # otherwise ordering of the elements at '1' is undefined... ;) def fibonacci_numbering(order_col): def f(index, collection): if index == 0: return 1 elif index == 1: return 2 else: return (getattr(collection[index - 1], order_col) + getattr(collection[index - 2], order_col)) return f # 0 -> A, 1 -> B, ... 25 -> Z, 26 -> AA, 27 -> AB, ... def alpha_ordering(index, collection): s = '' while index > 25: d = index / 26 s += chr((d % 26) + 64) index -= d * 26 s += chr(index + 65) return s class OrderingListTest(fixtures.TestBase): def setup(self): global metadata, slides_table, bullets_table, Slide, Bullet slides_table, bullets_table = None, None Slide, Bullet = None, None if metadata: metadata.clear() def _setup(self, test_collection_class): """Build a relationship situation using the given test_collection_class factory""" global metadata, slides_table, bullets_table, Slide, Bullet metadata = MetaData(testing.db) slides_table = Table('test_Slides', metadata, Column('id', Integer, primary_key=True, test_needs_autoincrement=True), Column('name', String(128))) bullets_table = Table('test_Bullets', metadata, Column('id', Integer, primary_key=True, test_needs_autoincrement=True), Column('slide_id', Integer, ForeignKey('test_Slides.id')), Column('position', Integer), Column('text', String(128))) class Slide(object): def __init__(self, name): self.name = name def __repr__(self): return '' % self.name class Bullet(object): def __init__(self, text): self.text = text def __repr__(self): return '' % (self.text, self.position) mapper(Slide, slides_table, properties={ 'bullets': relationship(Bullet, lazy='joined', collection_class=test_collection_class, backref='slide', order_by=[bullets_table.c.position]) }) mapper(Bullet, bullets_table) metadata.create_all() def teardown(self): metadata.drop_all() def test_append_no_reorder(self): self._setup(ordering_list('position', count_from=1, reorder_on_append=False)) s1 = Slide('Slide #1') self.assert_(not s1.bullets) self.assert_(len(s1.bullets) == 0) s1.bullets.append(Bullet('s1/b1')) self.assert_(s1.bullets) self.assert_(len(s1.bullets) == 1) self.assert_(s1.bullets[0].position == 1) s1.bullets.append(Bullet('s1/b2')) self.assert_(len(s1.bullets) == 2) self.assert_(s1.bullets[0].position == 1) self.assert_(s1.bullets[1].position == 2) bul = Bullet('s1/b100') bul.position = 100 s1.bullets.append(bul) self.assert_(s1.bullets[0].position == 1) self.assert_(s1.bullets[1].position == 2) self.assert_(s1.bullets[2].position == 100) s1.bullets.append(Bullet('s1/b4')) self.assert_(s1.bullets[0].position == 1) self.assert_(s1.bullets[1].position == 2) self.assert_(s1.bullets[2].position == 100) self.assert_(s1.bullets[3].position == 4) s1.bullets._reorder() self.assert_(s1.bullets[0].position == 1) self.assert_(s1.bullets[1].position == 2) self.assert_(s1.bullets[2].position == 3) self.assert_(s1.bullets[3].position == 4) session = create_session() session.add(s1) session.flush() id = s1.id session.expunge_all() del s1 srt = session.query(Slide).get(id) self.assert_(srt.bullets) self.assert_(len(srt.bullets) == 4) titles = ['s1/b1','s1/b2','s1/b100','s1/b4'] found = [b.text for b in srt.bullets] self.assert_(titles == found) def test_append_reorder(self): self._setup(ordering_list('position', count_from=1, reorder_on_append=True)) s1 = Slide('Slide #1') self.assert_(not s1.bullets) self.assert_(len(s1.bullets) == 0) s1.bullets.append(Bullet('s1/b1')) self.assert_(s1.bullets) self.assert_(len(s1.bullets) == 1) self.assert_(s1.bullets[0].position == 1) s1.bullets.append(Bullet('s1/b2')) self.assert_(len(s1.bullets) == 2) self.assert_(s1.bullets[0].position == 1) self.assert_(s1.bullets[1].position == 2) bul = Bullet('s1/b100') bul.position = 100 s1.bullets.append(bul) self.assert_(s1.bullets[0].position == 1) self.assert_(s1.bullets[1].position == 2) self.assert_(s1.bullets[2].position == 3) s1.bullets.append(Bullet('s1/b4')) self.assert_(s1.bullets[0].position == 1) self.assert_(s1.bullets[1].position == 2) self.assert_(s1.bullets[2].position == 3) self.assert_(s1.bullets[3].position == 4) s1.bullets._reorder() self.assert_(s1.bullets[0].position == 1) self.assert_(s1.bullets[1].position == 2) self.assert_(s1.bullets[2].position == 3) self.assert_(s1.bullets[3].position == 4) s1.bullets._raw_append(Bullet('raw')) self.assert_(s1.bullets[4].position is None) s1.bullets._reorder() self.assert_(s1.bullets[4].position == 5) session = create_session() session.add(s1) session.flush() id = s1.id session.expunge_all() del s1 srt = session.query(Slide).get(id) self.assert_(srt.bullets) self.assert_(len(srt.bullets) == 5) titles = ['s1/b1','s1/b2','s1/b100','s1/b4', 'raw'] found = [b.text for b in srt.bullets] eq_(titles, found) srt.bullets._raw_append(Bullet('raw2')) srt.bullets[-1].position = 6 session.flush() session.expunge_all() srt = session.query(Slide).get(id) titles = ['s1/b1','s1/b2','s1/b100','s1/b4', 'raw', 'raw2'] found = [b.text for b in srt.bullets] eq_(titles, found) def test_insert(self): self._setup(ordering_list('position')) s1 = Slide('Slide #1') s1.bullets.append(Bullet('1')) s1.bullets.append(Bullet('2')) s1.bullets.append(Bullet('3')) s1.bullets.append(Bullet('4')) self.assert_(s1.bullets[0].position == 0) self.assert_(s1.bullets[1].position == 1) self.assert_(s1.bullets[2].position == 2) self.assert_(s1.bullets[3].position == 3) s1.bullets.insert(2, Bullet('insert_at_2')) self.assert_(s1.bullets[0].position == 0) self.assert_(s1.bullets[1].position == 1) self.assert_(s1.bullets[2].position == 2) self.assert_(s1.bullets[3].position == 3) self.assert_(s1.bullets[4].position == 4) self.assert_(s1.bullets[1].text == '2') self.assert_(s1.bullets[2].text == 'insert_at_2') self.assert_(s1.bullets[3].text == '3') s1.bullets.insert(999, Bullet('999')) self.assert_(len(s1.bullets) == 6) self.assert_(s1.bullets[5].position == 5) session = create_session() session.add(s1) session.flush() id = s1.id session.expunge_all() del s1 srt = session.query(Slide).get(id) self.assert_(srt.bullets) self.assert_(len(srt.bullets) == 6) texts = ['1','2','insert_at_2','3','4','999'] found = [b.text for b in srt.bullets] self.assert_(texts == found) def test_slice(self): self._setup(ordering_list('position')) b = [ Bullet('1'), Bullet('2'), Bullet('3'), Bullet('4'), Bullet('5'), Bullet('6') ] s1 = Slide('Slide #1') # 1, 2, 3 s1.bullets[0:3] = b[0:3] for i in 0, 1, 2: self.assert_(s1.bullets[i].position == i) self.assert_(s1.bullets[i] == b[i]) # 1, 4, 5, 6, 3 s1.bullets[1:2] = b[3:6] for li, bi in (0,0), (1,3), (2,4), (3,5), (4,2): self.assert_(s1.bullets[li].position == li) self.assert_(s1.bullets[li] == b[bi]) # 1, 6, 3 del s1.bullets[1:3] for li, bi in (0,0), (1,5), (2,2): self.assert_(s1.bullets[li].position == li) self.assert_(s1.bullets[li] == b[bi]) session = create_session() session.add(s1) session.flush() id = s1.id session.expunge_all() del s1 srt = session.query(Slide).get(id) self.assert_(srt.bullets) self.assert_(len(srt.bullets) == 3) texts = ['1', '6', '3'] for i, text in enumerate(texts): self.assert_(srt.bullets[i].position == i) self.assert_(srt.bullets[i].text == text) def test_replace(self): self._setup(ordering_list('position')) s1 = Slide('Slide #1') s1.bullets = [ Bullet('1'), Bullet('2'), Bullet('3') ] self.assert_(len(s1.bullets) == 3) self.assert_(s1.bullets[2].position == 2) session = create_session() session.add(s1) session.flush() new_bullet = Bullet('new 2') self.assert_(new_bullet.position is None) # mark existing bullet as db-deleted before replacement. #session.delete(s1.bullets[1]) s1.bullets[1] = new_bullet self.assert_(new_bullet.position == 1) self.assert_(len(s1.bullets) == 3) id = s1.id session.flush() session.expunge_all() srt = session.query(Slide).get(id) self.assert_(srt.bullets) self.assert_(len(srt.bullets) == 3) self.assert_(srt.bullets[1].text == 'new 2') self.assert_(srt.bullets[2].text == '3') def test_funky_ordering(self): class Pos(object): def __init__(self): self.position = None step_factory = ordering_list('position', ordering_func=step_numbering(2)) stepped = step_factory() stepped.append(Pos()) stepped.append(Pos()) stepped.append(Pos()) stepped.append(Pos()) for li, pos in (0,0), (1,2), (2,4), (3,6): self.assert_(stepped[li].position == pos) fib_factory = ordering_list('position', ordering_func=fibonacci_numbering('position')) fibbed = fib_factory() fibbed.append(Pos()) fibbed.append(Pos()) fibbed.append(Pos()) fibbed.append(Pos()) fibbed.append(Pos()) for li, pos in (0,1), (1,2), (2,3), (3,5), (4,8): self.assert_(fibbed[li].position == pos) fibbed.insert(2, Pos()) fibbed.insert(4, Pos()) fibbed.insert(6, Pos()) for li, pos in ( (0, 1), (1, 2), (2, 3), (3, 5), (4, 8), (5, 13), (6, 21), (7, 34), ): self.assert_(fibbed[li].position == pos) alpha_factory = ordering_list('position', ordering_func=alpha_ordering) alpha = alpha_factory() alpha.append(Pos()) alpha.append(Pos()) alpha.append(Pos()) alpha.insert(1, Pos()) for li, pos in (0,'A'), (1,'B'), (2,'C'), (3,'D'): self.assert_(alpha[li].position == pos) def test_picklability(self): from sqlalchemy.ext.orderinglist import OrderingList olist = OrderingList('order', reorder_on_append=True) olist.append(DummyItem()) for loads, dumps in picklers(): pck = dumps(olist) copy = loads(pck) self.assert_(copy == olist) self.assert_(copy.__dict__ == olist.__dict__) class DummyItem(object): def __init__(self, order=None): self.order = order def __eq__(self, other): return self.order == other.order def __ne__(self, other): return not (self == other) SQLAlchemy-0.8.4/test/ext/test_serializer.py0000644000076500000240000001670712251150015021575 0ustar classicstaff00000000000000# coding: utf-8 from sqlalchemy.ext import serializer from sqlalchemy import testing from sqlalchemy import Integer, String, ForeignKey, select, \ desc, func, util, MetaData from sqlalchemy.testing.schema import Table from sqlalchemy.testing.schema import Column from sqlalchemy.orm import relationship, sessionmaker, scoped_session, \ class_mapper, mapper, joinedload, configure_mappers, aliased from sqlalchemy.testing import eq_, AssertsCompiledSQL from sqlalchemy.util import u, ue from sqlalchemy.engine import default from sqlalchemy.testing import fixtures class User(fixtures.ComparableEntity): pass class Address(fixtures.ComparableEntity): pass users = addresses = Session = None class SerializeTest(AssertsCompiledSQL, fixtures.MappedTest): run_setup_mappers = 'once' run_inserts = 'once' run_deletes = None @classmethod def define_tables(cls, metadata): global users, addresses users = Table('users', metadata, Column('id', Integer, primary_key=True), Column('name', String(50))) addresses = Table('addresses', metadata, Column('id', Integer, primary_key=True), Column('email', String(50)), Column('user_id', Integer, ForeignKey('users.id'))) @classmethod def setup_mappers(cls): global Session Session = scoped_session(sessionmaker()) mapper(User, users, properties={'addresses' : relationship(Address, backref='user', order_by=addresses.c.id)}) mapper(Address, addresses) configure_mappers() @classmethod def insert_data(cls): params = [dict(zip(('id', 'name'), column_values)) for column_values in [(7, 'jack'), (8, 'ed'), (9, 'fred'), (10, 'chuck')]] users.insert().execute(params) addresses.insert().execute([dict(zip(('id', 'user_id', 'email' ), column_values)) for column_values in [(1, 7, 'jack@bean.com'), (2, 8, 'ed@wood.com'), (3, 8, 'ed@bettyboop.com'), (4, 8, 'ed@lala.com'), (5, 9, 'fred@fred.com')]]) def test_tables(self): assert serializer.loads(serializer.dumps(users, -1), users.metadata, Session) is users def test_columns(self): assert serializer.loads(serializer.dumps(users.c.name, -1), users.metadata, Session) is users.c.name def test_mapper(self): user_mapper = class_mapper(User) assert serializer.loads(serializer.dumps(user_mapper, -1), None, None) is user_mapper def test_attribute(self): assert serializer.loads(serializer.dumps(User.name, -1), None, None) is User.name @testing.requires.python26 # crashes in 2.5 def test_expression(self): expr = \ select([users]).select_from(users.join(addresses)).limit(5) re_expr = serializer.loads(serializer.dumps(expr, -1), users.metadata, None) eq_(str(expr), str(re_expr)) assert re_expr.bind is testing.db eq_(re_expr.execute().fetchall(), [(7, u'jack'), (8, u'ed'), (8, u'ed'), (8, u'ed'), (9, u'fred')]) def test_query_one(self): q = Session.query(User).\ filter(User.name == 'ed').\ options(joinedload(User.addresses)) q2 = serializer.loads( serializer.dumps(q, -1), users.metadata, Session) def go(): eq_(q2.all(), [ User(name='ed', addresses=[Address(id=2), Address(id=3), Address(id=4)])]) self.assert_sql_count(testing.db, go, 1) eq_(q2.join(User.addresses).filter(Address.email == 'ed@bettyboop.com').value(func.count('*')), 1) u1 = Session.query(User).get(8) q = Session.query(Address).filter(Address.user == u1).order_by(desc(Address.email)) q2 = serializer.loads(serializer.dumps(q, -1), users.metadata, Session) eq_(q2.all(), [Address(email='ed@wood.com'), Address(email='ed@lala.com'), Address(email='ed@bettyboop.com')]) @testing.skip_if(lambda: util.pypy, "problems with pypy pickle reported") def test_query_two(self): q = \ Session.query(User).join(User.addresses).\ filter(Address.email.like('%fred%')) q2 = serializer.loads(serializer.dumps(q, -1), users.metadata, Session) eq_(q2.all(), [User(name='fred')]) eq_(list(q2.values(User.id, User.name)), [(9, 'fred')]) @testing.requires.bulletproof_pickle def test_query_three(self): ua = aliased(User) q = \ Session.query(ua).join(ua.addresses).\ filter(Address.email.like('%fred%')) q2 = serializer.loads(serializer.dumps(q, -1), users.metadata, Session) eq_(q2.all(), [User(name='fred')]) # try to pull out the aliased entity here... ua_2 = q2._entities[0].entity_zero.entity eq_(list(q2.values(ua_2.id, ua_2.name)), [(9, u'fred')]) @testing.skip_if(lambda: util.pypy, "problems with pypy pickle reported") def test_orm_join(self): from sqlalchemy.orm.util import join j = join(User, Address, User.addresses) j2 = serializer.loads(serializer.dumps(j, -1), users.metadata) assert j2.left is j.left assert j2.right is j.right assert j2._target_adapter._next @testing.requires.python26 # namedtuple workaround not serializable in 2.5 @testing.exclude('sqlite', '<=', (3, 5, 9), 'id comparison failing on the buildbot') def test_aliases(self): u7, u8, u9, u10 = Session.query(User).order_by(User.id).all() ualias = aliased(User) q = Session.query(User, ualias).join(ualias, User.id < ualias.id).filter(User.id < 9).order_by(User.id, ualias.id) eq_(list(q.all()), [(u7, u8), (u7, u9), (u7, u10), (u8, u9), (u8, u10)]) q2 = serializer.loads(serializer.dumps(q, -1), users.metadata, Session) eq_(list(q2.all()), [(u7, u8), (u7, u9), (u7, u10), (u8, u9), (u8, u10)]) @testing.skip_if(lambda: util.pypy, "pickle sometimes has " "problems here, sometimes not") def test_any(self): r = User.addresses.any(Address.email == 'x') ser = serializer.dumps(r, -1) x = serializer.loads(ser, users.metadata) eq_(str(r), str(x)) def test_unicode(self): m = MetaData() t = Table(ue('\u6e2c\u8a66'), m, Column(ue('\u6e2c\u8a66_id'), Integer)) expr = select([t]).where(t.c[ue('\u6e2c\u8a66_id')] == 5) expr2 = serializer.loads(serializer.dumps(expr, -1), m) self.assert_compile( expr2, ue('SELECT "\u6e2c\u8a66"."\u6e2c\u8a66_id" FROM "\u6e2c\u8a66" ' 'WHERE "\u6e2c\u8a66"."\u6e2c\u8a66_id" = :\u6e2c\u8a66_id_1'), dialect=default.DefaultDialect() ) if __name__ == '__main__': testing.main() SQLAlchemy-0.8.4/test/orm/0000755000076500000240000000000012251151573016007 5ustar classicstaff00000000000000SQLAlchemy-0.8.4/test/orm/__init__.py0000644000076500000240000000000012251147172020106 0ustar classicstaff00000000000000SQLAlchemy-0.8.4/test/orm/_fixtures.py0000644000076500000240000003455112251147172020401 0ustar classicstaff00000000000000from sqlalchemy import MetaData, Integer, String, ForeignKey from sqlalchemy import util from sqlalchemy.testing.schema import Table from sqlalchemy.testing.schema import Column from sqlalchemy.orm import attributes, mapper, relationship, \ backref, configure_mappers from sqlalchemy.testing import fixtures __all__ = () class FixtureTest(fixtures.MappedTest): """A MappedTest pre-configured with a common set of fixtures. """ run_define_tables = 'once' run_setup_classes = 'once' run_setup_mappers = 'each' run_inserts = 'each' run_deletes = 'each' @classmethod def setup_classes(cls): class Base(cls.Comparable): pass class User(Base): pass class Order(Base): pass class Item(Base): pass class Keyword(Base): pass class Address(Base): pass class Dingaling(Base): pass class Node(Base): pass class CompositePk(Base): pass @classmethod def _setup_stock_mapping(cls): Node, composite_pk_table, users, Keyword, items, Dingaling, \ order_items, item_keywords, Item, User, dingalings, \ Address, keywords, CompositePk, nodes, Order, orders, \ addresses = cls.classes.Node, \ cls.tables.composite_pk_table, cls.tables.users, \ cls.classes.Keyword, cls.tables.items, \ cls.classes.Dingaling, cls.tables.order_items, \ cls.tables.item_keywords, cls.classes.Item, \ cls.classes.User, cls.tables.dingalings, \ cls.classes.Address, cls.tables.keywords, \ cls.classes.CompositePk, cls.tables.nodes, \ cls.classes.Order, cls.tables.orders, cls.tables.addresses mapper(User, users, properties={ 'addresses':relationship(Address, backref='user', order_by=addresses.c.id), 'orders':relationship(Order, backref='user', order_by=orders.c.id), # o2m, m2o }) mapper(Address, addresses, properties={ 'dingaling':relationship(Dingaling, uselist=False, backref="address") #o2o }) mapper(Dingaling, dingalings) mapper(Order, orders, properties={ 'items':relationship(Item, secondary=order_items, order_by=items.c.id), #m2m 'address':relationship(Address), # m2o }) mapper(Item, items, properties={ 'keywords':relationship(Keyword, secondary=item_keywords) #m2m }) mapper(Keyword, keywords) mapper(Node, nodes, properties={ 'children':relationship(Node, backref=backref('parent', remote_side=[nodes.c.id]) ) }) mapper(CompositePk, composite_pk_table) configure_mappers() @classmethod def define_tables(cls, metadata): Table('users', metadata, Column('id', Integer, primary_key=True, test_needs_autoincrement=True), Column('name', String(30), nullable=False), test_needs_acid=True, test_needs_fk=True ) Table('addresses', metadata, Column('id', Integer, primary_key=True, test_needs_autoincrement=True), Column('user_id', None, ForeignKey('users.id')), Column('email_address', String(50), nullable=False), test_needs_acid=True, test_needs_fk=True ) Table('email_bounces', metadata, Column('id', Integer, ForeignKey('addresses.id')), Column('bounces', Integer) ) Table('orders', metadata, Column('id', Integer, primary_key=True, test_needs_autoincrement=True), Column('user_id', None, ForeignKey('users.id')), Column('address_id', None, ForeignKey('addresses.id')), Column('description', String(30)), Column('isopen', Integer), test_needs_acid=True, test_needs_fk=True ) Table("dingalings", metadata, Column('id', Integer, primary_key=True, test_needs_autoincrement=True), Column('address_id', None, ForeignKey('addresses.id')), Column('data', String(30)), test_needs_acid=True, test_needs_fk=True ) Table('items', metadata, Column('id', Integer, primary_key=True, test_needs_autoincrement=True), Column('description', String(30), nullable=False), test_needs_acid=True, test_needs_fk=True ) Table('order_items', metadata, Column('item_id', None, ForeignKey('items.id')), Column('order_id', None, ForeignKey('orders.id')), test_needs_acid=True, test_needs_fk=True ) Table('keywords', metadata, Column('id', Integer, primary_key=True, test_needs_autoincrement=True), Column('name', String(30), nullable=False), test_needs_acid=True, test_needs_fk=True ) Table('item_keywords', metadata, Column('item_id', None, ForeignKey('items.id')), Column('keyword_id', None, ForeignKey('keywords.id')), test_needs_acid=True, test_needs_fk=True ) Table('nodes', metadata, Column('id', Integer, primary_key=True, test_needs_autoincrement=True), Column('parent_id', Integer, ForeignKey('nodes.id')), Column('data', String(30)), test_needs_acid=True, test_needs_fk=True ) Table('composite_pk_table', metadata, Column('i', Integer, primary_key=True), Column('j', Integer, primary_key=True), Column('k', Integer, nullable=False), ) @classmethod def setup_mappers(cls): pass @classmethod def fixtures(cls): return dict( users = ( ('id', 'name'), (7, 'jack'), (8, 'ed'), (9, 'fred'), (10, 'chuck') ), addresses = ( ('id', 'user_id', 'email_address'), (1, 7, "jack@bean.com"), (2, 8, "ed@wood.com"), (3, 8, "ed@bettyboop.com"), (4, 8, "ed@lala.com"), (5, 9, "fred@fred.com") ), email_bounces = ( ('id', 'bounces'), (1, 1), (2, 0), (3, 5), (4, 0), (5, 0) ), orders = ( ('id', 'user_id', 'description', 'isopen', 'address_id'), (1, 7, 'order 1', 0, 1), (2, 9, 'order 2', 0, 4), (3, 7, 'order 3', 1, 1), (4, 9, 'order 4', 1, 4), (5, 7, 'order 5', 0, None) ), dingalings = ( ('id', 'address_id', 'data'), (1, 2, 'ding 1/2'), (2, 5, 'ding 2/5') ), items = ( ('id', 'description'), (1, 'item 1'), (2, 'item 2'), (3, 'item 3'), (4, 'item 4'), (5, 'item 5') ), order_items = ( ('item_id', 'order_id'), (1, 1), (2, 1), (3, 1), (1, 2), (2, 2), (3, 2), (3, 3), (4, 3), (5, 3), (1, 4), (5, 4), (5, 5) ), keywords = ( ('id', 'name'), (1, 'blue'), (2, 'red'), (3, 'green'), (4, 'big'), (5, 'small'), (6, 'round'), (7, 'square') ), item_keywords = ( ('keyword_id', 'item_id'), (2, 1), (2, 2), (4, 1), (6, 1), (5, 2), (3, 3), (4, 3), (7, 2), (6, 3) ), nodes = ( ('id', 'parent_id', 'data'), ), composite_pk_table = ( ('i', 'j', 'k'), (1, 2, 3), (2, 1, 4), (1, 1, 5), (2, 2,6) ) ) @util.memoized_property def static(self): return CannedResults(self) class CannedResults(object): """Built on demand, instances use mappers in effect at time of call.""" def __init__(self, test): self.test = test @property def user_result(self): User = self.test.classes.User return [ User(id=7), User(id=8), User(id=9), User(id=10)] @property def user_address_result(self): User, Address = self.test.classes.User, self.test.classes.Address return [ User(id=7, addresses=[ Address(id=1) ]), User(id=8, addresses=[ Address(id=2, email_address='ed@wood.com'), Address(id=3, email_address='ed@bettyboop.com'), Address(id=4, email_address='ed@lala.com'), ]), User(id=9, addresses=[ Address(id=5) ]), User(id=10, addresses=[])] @property def user_all_result(self): User, Address, Order, Item = self.test.classes.User, \ self.test.classes.Address, self.test.classes.Order, \ self.test.classes.Item return [ User(id=7, addresses=[ Address(id=1)], orders=[ Order(description='order 1', items=[ Item(description='item 1'), Item(description='item 2'), Item(description='item 3')]), Order(description='order 3'), Order(description='order 5')]), User(id=8, addresses=[ Address(id=2), Address(id=3), Address(id=4)]), User(id=9, addresses=[ Address(id=5)], orders=[ Order(description='order 2', items=[ Item(description='item 1'), Item(description='item 2'), Item(description='item 3')]), Order(description='order 4', items=[ Item(description='item 1'), Item(description='item 5')])]), User(id=10, addresses=[])] @property def user_order_result(self): User, Order, Item = self.test.classes.User, \ self.test.classes.Order, self.test.classes.Item return [ User(id=7, orders=[ Order(id=1, items=[ Item(id=1), Item(id=2), Item(id=3)]), Order(id=3, items=[ Item(id=3), Item(id=4), Item(id=5)]), Order(id=5, items=[ Item(id=5)])]), User(id=8, orders=[]), User(id=9, orders=[ Order(id=2, items=[ Item(id=1), Item(id=2), Item(id=3)]), Order(id=4, items=[ Item(id=1), Item(id=5)])]), User(id=10)] @property def item_keyword_result(self): Item, Keyword = self.test.classes.Item, self.test.classes.Keyword return [ Item(id=1, keywords=[ Keyword(name='red'), Keyword(name='big'), Keyword(name='round')]), Item(id=2, keywords=[ Keyword(name='red'), Keyword(name='small'), Keyword(name='square')]), Item(id=3, keywords=[ Keyword(name='green'), Keyword(name='big'), Keyword(name='round')]), Item(id=4, keywords=[]), Item(id=5, keywords=[])] @property def user_item_keyword_result(self): Item, Keyword = self.test.classes.Item, self.test.classes.Keyword User, Order = self.test.classes.User, self.test.classes.Order item1, item2, item3, item4, item5 = \ Item(id=1, keywords=[ Keyword(name='red'), Keyword(name='big'), Keyword(name='round')]),\ Item(id=2, keywords=[ Keyword(name='red'), Keyword(name='small'), Keyword(name='square')]),\ Item(id=3, keywords=[ Keyword(name='green'), Keyword(name='big'), Keyword(name='round')]),\ Item(id=4, keywords=[]),\ Item(id=5, keywords=[]) user_result = [ User(id=7, orders=[ Order(id=1, items=[item1, item2, item3]), Order(id=3, items=[item3, item4, item5]), Order(id=5, items=[item5])]), User(id=8, orders=[]), User(id=9, orders=[ Order(id=2, items=[item1, item2, item3]), Order(id=4, items=[item1, item5])]), User(id=10, orders=[])] return user_result SQLAlchemy-0.8.4/test/orm/inheritance/0000755000076500000240000000000012251151573020300 5ustar classicstaff00000000000000SQLAlchemy-0.8.4/test/orm/inheritance/__init__.py0000644000076500000240000000000012251147172022377 0ustar classicstaff00000000000000SQLAlchemy-0.8.4/test/orm/inheritance/_poly_fixtures.py0000644000076500000240000002655312251147172023740 0ustar classicstaff00000000000000from sqlalchemy import Integer, String, ForeignKey, func, desc, and_, or_ from sqlalchemy.orm import interfaces, relationship, mapper, \ clear_mappers, create_session, joinedload, joinedload_all, \ subqueryload, subqueryload_all, polymorphic_union, aliased,\ class_mapper from sqlalchemy import exc as sa_exc from sqlalchemy.engine import default from sqlalchemy.testing import AssertsCompiledSQL, fixtures from sqlalchemy import testing from sqlalchemy.testing.schema import Table, Column from sqlalchemy.testing import assert_raises, eq_ class Company(fixtures.ComparableEntity): pass class Person(fixtures.ComparableEntity): pass class Engineer(Person): pass class Manager(Person): pass class Boss(Manager): pass class Machine(fixtures.ComparableEntity): pass class Paperwork(fixtures.ComparableEntity): pass class _PolymorphicFixtureBase(fixtures.MappedTest, AssertsCompiledSQL): run_inserts = 'once' run_setup_mappers = 'once' run_deletes = None @classmethod def define_tables(cls, metadata): global people, engineers, managers, boss global companies, paperwork, machines companies = Table('companies', metadata, Column('company_id', Integer, primary_key=True, test_needs_autoincrement=True), Column('name', String(50))) people = Table('people', metadata, Column('person_id', Integer, primary_key=True, test_needs_autoincrement=True), Column('company_id', Integer, ForeignKey('companies.company_id')), Column('name', String(50)), Column('type', String(30))) engineers = Table('engineers', metadata, Column('person_id', Integer, ForeignKey('people.person_id'), primary_key=True), Column('status', String(30)), Column('engineer_name', String(50)), Column('primary_language', String(50))) machines = Table('machines', metadata, Column('machine_id', Integer, primary_key=True, test_needs_autoincrement=True), Column('name', String(50)), Column('engineer_id', Integer, ForeignKey('engineers.person_id'))) managers = Table('managers', metadata, Column('person_id', Integer, ForeignKey('people.person_id'), primary_key=True), Column('status', String(30)), Column('manager_name', String(50))) boss = Table('boss', metadata, Column('boss_id', Integer, ForeignKey('managers.person_id'), primary_key=True), Column('golf_swing', String(30))) paperwork = Table('paperwork', metadata, Column('paperwork_id', Integer, primary_key=True, test_needs_autoincrement=True), Column('description', String(50)), Column('person_id', Integer, ForeignKey('people.person_id'))) @classmethod def insert_data(cls): cls.e1 = e1 = Engineer( name="dilbert", engineer_name="dilbert", primary_language="java", status="regular engineer", paperwork=[ Paperwork(description="tps report #1"), Paperwork(description="tps report #2")], machines=[ Machine(name='IBM ThinkPad'), Machine(name='IPhone')]) cls.e2 = e2 = Engineer( name="wally", engineer_name="wally", primary_language="c++", status="regular engineer", paperwork=[ Paperwork(description="tps report #3"), Paperwork(description="tps report #4")], machines=[Machine(name="Commodore 64")]) cls.b1 = b1 = Boss( name="pointy haired boss", golf_swing="fore", manager_name="pointy", status="da boss", paperwork=[Paperwork(description="review #1")]) cls.m1 = m1 = Manager( name="dogbert", manager_name="dogbert", status="regular manager", paperwork=[ Paperwork(description="review #2"), Paperwork(description="review #3")]) cls.e3 = e3 = Engineer( name="vlad", engineer_name="vlad", primary_language="cobol", status="elbonian engineer", paperwork=[ Paperwork(description='elbonian missive #3')], machines=[ Machine(name="Commodore 64"), Machine(name="IBM 3270")]) cls.c1 = c1 = Company(name="MegaCorp, Inc.") c1.employees = [e1, e2, b1, m1] cls.c2 = c2 = Company(name="Elbonia, Inc.") c2.employees = [e3] sess = create_session() sess.add(c1) sess.add(c2) sess.flush() sess.expunge_all() cls.all_employees = [e1, e2, b1, m1, e3] cls.c1_employees = [e1, e2, b1, m1] cls.c2_employees = [e3] def _company_with_emps_machines_fixture(self): fixture = self._company_with_emps_fixture() fixture[0].employees[0].machines = [ Machine(name="IBM ThinkPad"), Machine(name="IPhone"), ] fixture[0].employees[1].machines = [ Machine(name="Commodore 64") ] return fixture def _company_with_emps_fixture(self): return [ Company( name="MegaCorp, Inc.", employees=[ Engineer( name="dilbert", engineer_name="dilbert", primary_language="java", status="regular engineer" ), Engineer( name="wally", engineer_name="wally", primary_language="c++", status="regular engineer"), Boss( name="pointy haired boss", golf_swing="fore", manager_name="pointy", status="da boss"), Manager( name="dogbert", manager_name="dogbert", status="regular manager"), ]), Company( name="Elbonia, Inc.", employees=[ Engineer( name="vlad", engineer_name="vlad", primary_language="cobol", status="elbonian engineer") ]) ] def _emps_wo_relationships_fixture(self): return [ Engineer( name="dilbert", engineer_name="dilbert", primary_language="java", status="regular engineer"), Engineer( name="wally", engineer_name="wally", primary_language="c++", status="regular engineer"), Boss( name="pointy haired boss", golf_swing="fore", manager_name="pointy", status="da boss"), Manager( name="dogbert", manager_name="dogbert", status="regular manager"), Engineer( name="vlad", engineer_name="vlad", primary_language="cobol", status="elbonian engineer") ] @classmethod def setup_mappers(cls): mapper(Company, companies, properties={ 'employees':relationship( Person, order_by=people.c.person_id)}) mapper(Machine, machines) person_with_polymorphic,\ manager_with_polymorphic = cls._get_polymorphics() mapper(Person, people, with_polymorphic=person_with_polymorphic, polymorphic_on=people.c.type, polymorphic_identity='person', order_by=people.c.person_id, properties={ 'paperwork':relationship( Paperwork, order_by=paperwork.c.paperwork_id)}) mapper(Engineer, engineers, inherits=Person, polymorphic_identity='engineer', properties={ 'machines':relationship( Machine, order_by=machines.c.machine_id)}) mapper(Manager, managers, with_polymorphic=manager_with_polymorphic, inherits=Person, polymorphic_identity='manager') mapper(Boss, boss, inherits=Manager, polymorphic_identity='boss') mapper(Paperwork, paperwork) class _Polymorphic(_PolymorphicFixtureBase): select_type = "" @classmethod def _get_polymorphics(cls): return None, None class _PolymorphicPolymorphic(_PolymorphicFixtureBase): select_type = "Polymorphic" @classmethod def _get_polymorphics(cls): return '*', '*' class _PolymorphicUnions(_PolymorphicFixtureBase): select_type = "Unions" @classmethod def _get_polymorphics(cls): people, engineers, managers, boss = \ cls.tables.people, cls.tables.engineers, \ cls.tables.managers, cls.tables.boss person_join = polymorphic_union({ 'engineer':people.join(engineers), 'manager':people.join(managers)}, None, 'pjoin') manager_join = people.join(managers).outerjoin(boss) person_with_polymorphic = ( [Person, Manager, Engineer], person_join) manager_with_polymorphic = ('*', manager_join) return person_with_polymorphic,\ manager_with_polymorphic class _PolymorphicAliasedJoins(_PolymorphicFixtureBase): select_type = "AliasedJoins" @classmethod def _get_polymorphics(cls): people, engineers, managers, boss = \ cls.tables.people, cls.tables.engineers, \ cls.tables.managers, cls.tables.boss person_join = people \ .outerjoin(engineers) \ .outerjoin(managers) \ .select(use_labels=True) \ .alias('pjoin') manager_join = people \ .join(managers) \ .outerjoin(boss) \ .select(use_labels=True) \ .alias('mjoin') person_with_polymorphic = ( [Person, Manager, Engineer], person_join) manager_with_polymorphic = ('*', manager_join) return person_with_polymorphic,\ manager_with_polymorphic class _PolymorphicJoins(_PolymorphicFixtureBase): select_type = "Joins" @classmethod def _get_polymorphics(cls): people, engineers, managers, boss = \ cls.tables.people, cls.tables.engineers, \ cls.tables.managers, cls.tables.boss person_join = people.outerjoin(engineers).outerjoin(managers) manager_join = people.join(managers).outerjoin(boss) person_with_polymorphic = ( [Person, Manager, Engineer], person_join) manager_with_polymorphic = ('*', manager_join) return person_with_polymorphic,\ manager_with_polymorphic SQLAlchemy-0.8.4/test/orm/inheritance/test_abc_inheritance.py0000644000076500000240000001713712251147172025020 0ustar classicstaff00000000000000from sqlalchemy import * from sqlalchemy.orm import * from sqlalchemy.orm.interfaces import ONETOMANY, MANYTOONE from sqlalchemy import testing from sqlalchemy.testing.schema import Table, Column from sqlalchemy.testing import fixtures def produce_test(parent, child, direction): """produce a testcase for A->B->C inheritance with a self-referential relationship between two of the classes, using either one-to-many or many-to-one. the old "no discriminator column" pattern is used. """ class ABCTest(fixtures.MappedTest): @classmethod def define_tables(cls, metadata): global ta, tb, tc ta = ["a", metadata] ta.append(Column('id', Integer, primary_key=True, test_needs_autoincrement=True)), ta.append(Column('a_data', String(30))) if "a"== parent and direction == MANYTOONE: ta.append(Column('child_id', Integer, ForeignKey("%s.id" % child, use_alter=True, name="foo"))) elif "a" == child and direction == ONETOMANY: ta.append(Column('parent_id', Integer, ForeignKey("%s.id" % parent, use_alter=True, name="foo"))) ta = Table(*ta) tb = ["b", metadata] tb.append(Column('id', Integer, ForeignKey("a.id"), primary_key=True, )) tb.append(Column('b_data', String(30))) if "b"== parent and direction == MANYTOONE: tb.append(Column('child_id', Integer, ForeignKey("%s.id" % child, use_alter=True, name="foo"))) elif "b" == child and direction == ONETOMANY: tb.append(Column('parent_id', Integer, ForeignKey("%s.id" % parent, use_alter=True, name="foo"))) tb = Table(*tb) tc = ["c", metadata] tc.append(Column('id', Integer, ForeignKey("b.id"), primary_key=True, )) tc.append(Column('c_data', String(30))) if "c"== parent and direction == MANYTOONE: tc.append(Column('child_id', Integer, ForeignKey("%s.id" % child, use_alter=True, name="foo"))) elif "c" == child and direction == ONETOMANY: tc.append(Column('parent_id', Integer, ForeignKey("%s.id" % parent, use_alter=True, name="foo"))) tc = Table(*tc) def teardown(self): if direction == MANYTOONE: parent_table = {"a":ta, "b":tb, "c": tc}[parent] parent_table.update(values={parent_table.c.child_id:None}).execute() elif direction == ONETOMANY: child_table = {"a":ta, "b":tb, "c": tc}[child] child_table.update(values={child_table.c.parent_id:None}).execute() super(ABCTest, self).teardown() def test_roundtrip(self): parent_table = {"a":ta, "b":tb, "c": tc}[parent] child_table = {"a":ta, "b":tb, "c": tc}[child] remote_side = None if direction == MANYTOONE: foreign_keys = [parent_table.c.child_id] elif direction == ONETOMANY: foreign_keys = [child_table.c.parent_id] atob = ta.c.id==tb.c.id btoc = tc.c.id==tb.c.id if direction == ONETOMANY: relationshipjoin = parent_table.c.id==child_table.c.parent_id elif direction == MANYTOONE: relationshipjoin = parent_table.c.child_id==child_table.c.id if parent is child: remote_side = [child_table.c.id] abcjoin = polymorphic_union( {"a":ta.select(tb.c.id==None, from_obj=[ta.outerjoin(tb, onclause=atob)]), "b":ta.join(tb, onclause=atob).outerjoin(tc, onclause=btoc).select(tc.c.id==None).reduce_columns(), "c":tc.join(tb, onclause=btoc).join(ta, onclause=atob) },"type", "abcjoin" ) bcjoin = polymorphic_union( { "b":ta.join(tb, onclause=atob).outerjoin(tc, onclause=btoc).select(tc.c.id==None).reduce_columns(), "c":tc.join(tb, onclause=btoc).join(ta, onclause=atob) },"type", "bcjoin" ) class A(object): def __init__(self, name): self.a_data = name class B(A):pass class C(B):pass mapper(A, ta, polymorphic_on=abcjoin.c.type, with_polymorphic=('*', abcjoin), polymorphic_identity="a") mapper(B, tb, polymorphic_on=bcjoin.c.type, with_polymorphic=('*', bcjoin), polymorphic_identity="b", inherits=A, inherit_condition=atob) mapper(C, tc, polymorphic_identity="c", inherits=B, inherit_condition=btoc) parent_mapper = class_mapper({ta:A, tb:B, tc:C}[parent_table]) child_mapper = class_mapper({ta:A, tb:B, tc:C}[child_table]) parent_class = parent_mapper.class_ child_class = child_mapper.class_ parent_mapper.add_property("collection", relationship(child_mapper, primaryjoin=relationshipjoin, foreign_keys=foreign_keys, remote_side=remote_side, uselist=True)) sess = create_session() parent_obj = parent_class('parent1') child_obj = child_class('child1') somea = A('somea') someb = B('someb') somec = C('somec') #print "APPENDING", parent.__class__.__name__ , "TO", child.__class__.__name__ sess.add(parent_obj) parent_obj.collection.append(child_obj) if direction == ONETOMANY: child2 = child_class('child2') parent_obj.collection.append(child2) sess.add(child2) elif direction == MANYTOONE: parent2 = parent_class('parent2') parent2.collection.append(child_obj) sess.add(parent2) sess.add(somea) sess.add(someb) sess.add(somec) sess.flush() sess.expunge_all() # assert result via direct get() of parent object result = sess.query(parent_class).get(parent_obj.id) assert result.id == parent_obj.id assert result.collection[0].id == child_obj.id if direction == ONETOMANY: assert result.collection[1].id == child2.id elif direction == MANYTOONE: result2 = sess.query(parent_class).get(parent2.id) assert result2.id == parent2.id assert result2.collection[0].id == child_obj.id sess.expunge_all() # assert result via polymorphic load of parent object result = sess.query(A).filter_by(id=parent_obj.id).one() assert result.id == parent_obj.id assert result.collection[0].id == child_obj.id if direction == ONETOMANY: assert result.collection[1].id == child2.id elif direction == MANYTOONE: result2 = sess.query(A).filter_by(id=parent2.id).one() assert result2.id == parent2.id assert result2.collection[0].id == child_obj.id ABCTest.__name__ = "Test%sTo%s%s" % (parent, child, (direction is ONETOMANY and "O2M" or "M2O")) return ABCTest # test all combinations of polymorphic a/b/c related to another of a/b/c for parent in ["a", "b", "c"]: for child in ["a", "b", "c"]: for direction in [ONETOMANY, MANYTOONE]: testclass = produce_test(parent, child, direction) exec("%s = testclass" % testclass.__name__) del testclass del produce_testSQLAlchemy-0.8.4/test/orm/inheritance/test_abc_polymorphic.py0000644000076500000240000000635112251147172025070 0ustar classicstaff00000000000000from sqlalchemy import * from sqlalchemy import util from sqlalchemy.orm import * from sqlalchemy.testing.util import function_named from sqlalchemy.testing import fixtures from test.orm import _fixtures from sqlalchemy.testing import eq_ from sqlalchemy.testing.schema import Table, Column class ABCTest(fixtures.MappedTest): @classmethod def define_tables(cls, metadata): global a, b, c a = Table('a', metadata, Column('id', Integer, primary_key=True, test_needs_autoincrement=True), Column('adata', String(30)), Column('type', String(30)), ) b = Table('b', metadata, Column('id', Integer, ForeignKey('a.id'), primary_key=True), Column('bdata', String(30))) c = Table('c', metadata, Column('id', Integer, ForeignKey('b.id'), primary_key=True), Column('cdata', String(30))) def make_test(fetchtype): def test_roundtrip(self): class A(fixtures.ComparableEntity):pass class B(A):pass class C(B):pass if fetchtype == 'union': abc = a.outerjoin(b).outerjoin(c) bc = a.join(b).outerjoin(c) else: abc = bc = None mapper(A, a, with_polymorphic=('*', abc), polymorphic_on=a.c.type, polymorphic_identity='a') mapper(B, b, with_polymorphic=('*', bc), inherits=A, polymorphic_identity='b') mapper(C, c, inherits=B, polymorphic_identity='c') a1 = A(adata='a1') b1 = B(bdata='b1', adata='b1') b2 = B(bdata='b2', adata='b2') b3 = B(bdata='b3', adata='b3') c1 = C(cdata='c1', bdata='c1', adata='c1') c2 = C(cdata='c2', bdata='c2', adata='c2') c3 = C(cdata='c2', bdata='c2', adata='c2') sess = create_session() for x in (a1, b1, b2, b3, c1, c2, c3): sess.add(x) sess.flush() sess.expunge_all() #for obj in sess.query(A).all(): # print obj eq_( [ A(adata='a1'), B(bdata='b1', adata='b1'), B(bdata='b2', adata='b2'), B(bdata='b3', adata='b3'), C(cdata='c1', bdata='c1', adata='c1'), C(cdata='c2', bdata='c2', adata='c2'), C(cdata='c2', bdata='c2', adata='c2'), ], sess.query(A).order_by(A.id).all()) eq_([ B(bdata='b1', adata='b1'), B(bdata='b2', adata='b2'), B(bdata='b3', adata='b3'), C(cdata='c1', bdata='c1', adata='c1'), C(cdata='c2', bdata='c2', adata='c2'), C(cdata='c2', bdata='c2', adata='c2'), ], sess.query(B).order_by(A.id).all()) eq_([ C(cdata='c1', bdata='c1', adata='c1'), C(cdata='c2', bdata='c2', adata='c2'), C(cdata='c2', bdata='c2', adata='c2'), ], sess.query(C).order_by(A.id).all()) test_roundtrip = function_named( test_roundtrip, 'test_%s' % fetchtype) return test_roundtrip test_union = make_test('union') test_none = make_test('none') SQLAlchemy-0.8.4/test/orm/inheritance/test_assorted_poly.py0000644000076500000240000016124012251150015024572 0ustar classicstaff00000000000000"""Miscellaneous inheritance-related tests, many very old. These are generally tests derived from specific user issues. """ from sqlalchemy.testing import eq_ from sqlalchemy import * from sqlalchemy import util from sqlalchemy.orm import * from sqlalchemy.orm.interfaces import MANYTOONE from sqlalchemy.testing import AssertsExecutionResults from sqlalchemy import testing from sqlalchemy.testing.util import function_named from sqlalchemy.testing import fixtures from test.orm import _fixtures from sqlalchemy.testing import eq_ from sqlalchemy.testing.schema import Table, Column class AttrSettable(object): def __init__(self, **kwargs): [setattr(self, k, v) for k, v in kwargs.iteritems()] def __repr__(self): return self.__class__.__name__ + "(%s)" % (hex(id(self))) class RelationshipTest1(fixtures.MappedTest): """test self-referential relationships on polymorphic mappers""" @classmethod def define_tables(cls, metadata): global people, managers people = Table('people', metadata, Column('person_id', Integer, Sequence('person_id_seq', optional=True), primary_key=True), Column('manager_id', Integer, ForeignKey('managers.person_id', use_alter=True, name="mpid_fq")), Column('name', String(50)), Column('type', String(30))) managers = Table('managers', metadata, Column('person_id', Integer, ForeignKey('people.person_id'), primary_key=True), Column('status', String(30)), Column('manager_name', String(50)) ) def teardown(self): people.update(values={people.c.manager_id:None}).execute() super(RelationshipTest1, self).teardown() def test_parent_refs_descendant(self): class Person(AttrSettable): pass class Manager(Person): pass mapper(Person, people, properties={ 'manager':relationship(Manager, primaryjoin=( people.c.manager_id == managers.c.person_id), uselist=False, post_update=True) }) mapper(Manager, managers, inherits=Person, inherit_condition=people.c.person_id==managers.c.person_id) eq_(class_mapper(Person).get_property('manager').synchronize_pairs, [(managers.c.person_id,people.c.manager_id)]) session = create_session() p = Person(name='some person') m = Manager(name='some manager') p.manager = m session.add(p) session.flush() session.expunge_all() p = session.query(Person).get(p.person_id) m = session.query(Manager).get(m.person_id) assert p.manager is m def test_descendant_refs_parent(self): class Person(AttrSettable): pass class Manager(Person): pass mapper(Person, people) mapper(Manager, managers, inherits=Person, inherit_condition=people.c.person_id== managers.c.person_id, properties={ 'employee':relationship(Person, primaryjoin=( people.c.manager_id == managers.c.person_id), foreign_keys=[people.c.manager_id], uselist=False, post_update=True) }) session = create_session() p = Person(name='some person') m = Manager(name='some manager') m.employee = p session.add(m) session.flush() session.expunge_all() p = session.query(Person).get(p.person_id) m = session.query(Manager).get(m.person_id) assert m.employee is p class RelationshipTest2(fixtures.MappedTest): """test self-referential relationships on polymorphic mappers""" @classmethod def define_tables(cls, metadata): global people, managers, data people = Table('people', metadata, Column('person_id', Integer, primary_key=True, test_needs_autoincrement=True), Column('name', String(50)), Column('type', String(30))) managers = Table('managers', metadata, Column('person_id', Integer, ForeignKey('people.person_id'), primary_key=True), Column('manager_id', Integer, ForeignKey('people.person_id')), Column('status', String(30)), ) data = Table('data', metadata, Column('person_id', Integer, ForeignKey('managers.person_id'), primary_key=True), Column('data', String(30)) ) def testrelationshiponsubclass_j1_nodata(self): self.do_test("join1", False) def testrelationshiponsubclass_j2_nodata(self): self.do_test("join2", False) def testrelationshiponsubclass_j1_data(self): self.do_test("join1", True) def testrelationshiponsubclass_j2_data(self): self.do_test("join2", True) def testrelationshiponsubclass_j3_nodata(self): self.do_test("join3", False) def testrelationshiponsubclass_j3_data(self): self.do_test("join3", True) def do_test(self, jointype="join1", usedata=False): class Person(AttrSettable): pass class Manager(Person): pass if jointype == "join1": poly_union = polymorphic_union({ 'person':people.select(people.c.type=='person'), 'manager':join(people, managers, people.c.person_id==managers.c.person_id) }, None) polymorphic_on=poly_union.c.type elif jointype == "join2": poly_union = polymorphic_union({ 'person':people.select(people.c.type=='person'), 'manager':managers.join(people, people.c.person_id==managers.c.person_id) }, None) polymorphic_on=poly_union.c.type elif jointype == "join3": poly_union = None polymorphic_on = people.c.type if usedata: class Data(object): def __init__(self, data): self.data = data mapper(Data, data) mapper(Person, people, with_polymorphic=('*', poly_union), polymorphic_identity='person', polymorphic_on=polymorphic_on) if usedata: mapper(Manager, managers, inherits=Person, inherit_condition=people.c.person_id== managers.c.person_id, polymorphic_identity='manager', properties={ 'colleague':relationship( Person, primaryjoin=managers.c.manager_id== people.c.person_id, lazy='select', uselist=False), 'data':relationship(Data, uselist=False) } ) else: mapper(Manager, managers, inherits=Person, inherit_condition=people.c.person_id== managers.c.person_id, polymorphic_identity='manager', properties={ 'colleague':relationship(Person, primaryjoin=managers.c.manager_id== people.c.person_id, lazy='select', uselist=False) } ) sess = create_session() p = Person(name='person1') m = Manager(name='manager1') m.colleague = p if usedata: m.data = Data('ms data') sess.add(m) sess.flush() sess.expunge_all() p = sess.query(Person).get(p.person_id) m = sess.query(Manager).get(m.person_id) assert m.colleague is p if usedata: assert m.data.data == 'ms data' class RelationshipTest3(fixtures.MappedTest): """test self-referential relationships on polymorphic mappers""" @classmethod def define_tables(cls, metadata): global people, managers, data people = Table('people', metadata, Column('person_id', Integer, primary_key=True, test_needs_autoincrement=True), Column('colleague_id', Integer, ForeignKey('people.person_id')), Column('name', String(50)), Column('type', String(30))) managers = Table('managers', metadata, Column('person_id', Integer, ForeignKey('people.person_id'), primary_key=True), Column('status', String(30)), ) data = Table('data', metadata, Column('person_id', Integer, ForeignKey('people.person_id'), primary_key=True), Column('data', String(30)) ) def _generate_test(jointype="join1", usedata=False): def do_test(self): class Person(AttrSettable): pass class Manager(Person): pass if usedata: class Data(object): def __init__(self, data): self.data = data if jointype == "join1": poly_union = polymorphic_union({ 'manager':managers.join(people, people.c.person_id==managers.c.person_id), 'person':people.select(people.c.type=='person') }, None) elif jointype =="join2": poly_union = polymorphic_union({ 'manager':join(people, managers, people.c.person_id==managers.c.person_id), 'person':people.select(people.c.type=='person') }, None) elif jointype == 'join3': poly_union = people.outerjoin(managers) elif jointype == "join4": poly_union=None if usedata: mapper(Data, data) if usedata: mapper(Person, people, with_polymorphic=('*', poly_union), polymorphic_identity='person', polymorphic_on=people.c.type, properties={ 'colleagues':relationship(Person, primaryjoin=people.c.colleague_id== people.c.person_id, remote_side=people.c.colleague_id, uselist=True), 'data':relationship(Data, uselist=False) } ) else: mapper(Person, people, with_polymorphic=('*', poly_union), polymorphic_identity='person', polymorphic_on=people.c.type, properties={ 'colleagues':relationship(Person, primaryjoin=people.c.colleague_id==people.c.person_id, remote_side=people.c.colleague_id, uselist=True) } ) mapper(Manager, managers, inherits=Person, inherit_condition=people.c.person_id== managers.c.person_id, polymorphic_identity='manager') sess = create_session() p = Person(name='person1') p2 = Person(name='person2') p3 = Person(name='person3') m = Manager(name='manager1') p.colleagues.append(p2) m.colleagues.append(p3) if usedata: p.data = Data('ps data') m.data = Data('ms data') sess.add(m) sess.add(p) sess.flush() sess.expunge_all() p = sess.query(Person).get(p.person_id) p2 = sess.query(Person).get(p2.person_id) p3 = sess.query(Person).get(p3.person_id) m = sess.query(Person).get(m.person_id) assert len(p.colleagues) == 1 assert p.colleagues == [p2] assert m.colleagues == [p3] if usedata: assert p.data.data == 'ps data' assert m.data.data == 'ms data' do_test = function_named( do_test, 'test_relationship_on_base_class_%s_%s' % ( jointype, data and "nodata" or "data")) return do_test for jointype in ["join1", "join2", "join3", "join4"]: for data in (True, False): func = _generate_test(jointype, data) setattr(RelationshipTest3, func.__name__, func) del func class RelationshipTest4(fixtures.MappedTest): @classmethod def define_tables(cls, metadata): global people, engineers, managers, cars people = Table('people', metadata, Column('person_id', Integer, primary_key=True, test_needs_autoincrement=True), Column('name', String(50))) engineers = Table('engineers', metadata, Column('person_id', Integer, ForeignKey('people.person_id'), primary_key=True), Column('status', String(30))) managers = Table('managers', metadata, Column('person_id', Integer, ForeignKey('people.person_id'), primary_key=True), Column('longer_status', String(70))) cars = Table('cars', metadata, Column('car_id', Integer, primary_key=True, test_needs_autoincrement=True), Column('owner', Integer, ForeignKey('people.person_id'))) def test_many_to_one_polymorphic(self): """in this test, the polymorphic union is between two subclasses, but does not include the base table by itself in the union. however, the primaryjoin condition is going to be against the base table, and its a many-to-one relationship (unlike the test in polymorph.py) so the column in the base table is explicit. Can the ClauseAdapter figure out how to alias the primaryjoin to the polymorphic union ?""" # class definitions class Person(object): def __init__(self, **kwargs): for key, value in kwargs.iteritems(): setattr(self, key, value) def __repr__(self): return "Ordinary person %s" % self.name class Engineer(Person): def __repr__(self): return "Engineer %s, status %s" % \ (self.name, self.status) class Manager(Person): def __repr__(self): return "Manager %s, status %s" % \ (self.name, self.longer_status) class Car(object): def __init__(self, **kwargs): for key, value in kwargs.iteritems(): setattr(self, key, value) def __repr__(self): return "Car number %d" % self.car_id # create a union that represents both types of joins. employee_join = polymorphic_union( { 'engineer':people.join(engineers), 'manager':people.join(managers), }, "type", 'employee_join') person_mapper = mapper(Person, people, with_polymorphic=('*', employee_join), polymorphic_on=employee_join.c.type, polymorphic_identity='person') engineer_mapper = mapper(Engineer, engineers, inherits=person_mapper, polymorphic_identity='engineer') manager_mapper = mapper(Manager, managers, inherits=person_mapper, polymorphic_identity='manager') car_mapper = mapper(Car, cars, properties= {'employee': relationship(person_mapper)}) session = create_session() # creating 5 managers named from M1 to E5 for i in range(1,5): session.add(Manager(name="M%d" % i, longer_status="YYYYYYYYY")) # creating 5 engineers named from E1 to E5 for i in range(1,5): session.add(Engineer(name="E%d" % i,status="X")) session.flush() engineer4 = session.query(Engineer).\ filter(Engineer.name=="E4").first() manager3 = session.query(Manager).\ filter(Manager.name=="M3").first() car1 = Car(employee=engineer4) session.add(car1) car2 = Car(employee=manager3) session.add(car2) session.flush() session.expunge_all() def go(): testcar = session.query(Car).options( joinedload('employee') ).get(car1.car_id) assert str(testcar.employee) == "Engineer E4, status X" self.assert_sql_count(testing.db, go, 1) car1 = session.query(Car).get(car1.car_id) usingGet = session.query(person_mapper).get(car1.owner) usingProperty = car1.employee assert str(engineer4) == "Engineer E4, status X" assert str(usingGet) == "Engineer E4, status X" assert str(usingProperty) == "Engineer E4, status X" session.expunge_all() # and now for the lightning round, eager ! def go(): testcar = session.query(Car).options( joinedload('employee') ).get(car1.car_id) assert str(testcar.employee) == "Engineer E4, status X" self.assert_sql_count(testing.db, go, 1) session.expunge_all() s = session.query(Car) c = s.join("employee").filter(Person.name=="E4")[0] assert c.car_id==car1.car_id class RelationshipTest5(fixtures.MappedTest): @classmethod def define_tables(cls, metadata): global people, engineers, managers, cars people = Table('people', metadata, Column('person_id', Integer, primary_key=True, test_needs_autoincrement=True), Column('name', String(50)), Column('type', String(50))) engineers = Table('engineers', metadata, Column('person_id', Integer, ForeignKey('people.person_id'), primary_key=True), Column('status', String(30))) managers = Table('managers', metadata, Column('person_id', Integer, ForeignKey('people.person_id'), primary_key=True), Column('longer_status', String(70))) cars = Table('cars', metadata, Column('car_id', Integer, primary_key=True, test_needs_autoincrement=True), Column('owner', Integer, ForeignKey('people.person_id'))) def test_eager_empty(self): """test parent object with child relationship to an inheriting mapper, using eager loads, works when there are no child objects present""" class Person(object): def __init__(self, **kwargs): for key, value in kwargs.iteritems(): setattr(self, key, value) def __repr__(self): return "Ordinary person %s" % self.name class Engineer(Person): def __repr__(self): return "Engineer %s, status %s" % \ (self.name, self.status) class Manager(Person): def __repr__(self): return "Manager %s, status %s" % \ (self.name, self.longer_status) class Car(object): def __init__(self, **kwargs): for key, value in kwargs.iteritems(): setattr(self, key, value) def __repr__(self): return "Car number %d" % self.car_id person_mapper = mapper(Person, people, polymorphic_on=people.c.type, polymorphic_identity='person') engineer_mapper = mapper(Engineer, engineers, inherits=person_mapper, polymorphic_identity='engineer') manager_mapper = mapper(Manager, managers, inherits=person_mapper, polymorphic_identity='manager') car_mapper = mapper(Car, cars, properties= { 'manager':relationship( manager_mapper, lazy='joined')}) sess = create_session() car1 = Car() car2 = Car() car2.manager = Manager() sess.add(car1) sess.add(car2) sess.flush() sess.expunge_all() carlist = sess.query(Car).all() assert carlist[0].manager is None assert carlist[1].manager.person_id == car2.manager.person_id class RelationshipTest6(fixtures.MappedTest): """test self-referential relationships on a single joined-table inheritance mapper""" @classmethod def define_tables(cls, metadata): global people, managers, data people = Table('people', metadata, Column('person_id', Integer, primary_key=True, test_needs_autoincrement=True), Column('name', String(50)), ) managers = Table('managers', metadata, Column('person_id', Integer, ForeignKey('people.person_id'), primary_key=True), Column('colleague_id', Integer, ForeignKey('managers.person_id')), Column('status', String(30)), ) def test_basic(self): class Person(AttrSettable): pass class Manager(Person): pass mapper(Person, people) mapper(Manager, managers, inherits=Person, inherit_condition=people.c.person_id==\ managers.c.person_id, properties={ 'colleague':relationship(Manager, primaryjoin=managers.c.colleague_id==\ managers.c.person_id, lazy='select', uselist=False) } ) sess = create_session() m = Manager(name='manager1') m2 =Manager(name='manager2') m.colleague = m2 sess.add(m) sess.flush() sess.expunge_all() m = sess.query(Manager).get(m.person_id) m2 = sess.query(Manager).get(m2.person_id) assert m.colleague is m2 class RelationshipTest7(fixtures.MappedTest): @classmethod def define_tables(cls, metadata): global people, engineers, managers, cars, offroad_cars cars = Table('cars', metadata, Column('car_id', Integer, primary_key=True, test_needs_autoincrement=True), Column('name', String(30))) offroad_cars = Table('offroad_cars', metadata, Column('car_id',Integer, ForeignKey('cars.car_id'), nullable=False,primary_key=True)) people = Table('people', metadata, Column('person_id', Integer, primary_key=True, test_needs_autoincrement=True), Column('car_id', Integer, ForeignKey('cars.car_id'), nullable=False), Column('name', String(50))) engineers = Table('engineers', metadata, Column('person_id', Integer, ForeignKey('people.person_id'), primary_key=True), Column('field', String(30))) managers = Table('managers', metadata, Column('person_id', Integer, ForeignKey('people.person_id'), primary_key=True), Column('category', String(70))) def test_manytoone_lazyload(self): """test that lazy load clause to a polymorphic child mapper generates correctly [ticket:493]""" class PersistentObject(object): def __init__(self, **kwargs): for key, value in kwargs.iteritems(): setattr(self, key, value) class Status(PersistentObject): def __repr__(self): return "Status %s" % self.name class Person(PersistentObject): def __repr__(self): return "Ordinary person %s" % self.name class Engineer(Person): def __repr__(self): return "Engineer %s, field %s" % (self.name, self.field) class Manager(Person): def __repr__(self): return "Manager %s, category %s" % (self.name, self.category) class Car(PersistentObject): def __repr__(self): return "Car number %d, name %s" % \ (self.car_id, self.name) class Offraod_Car(Car): def __repr__(self): return "Offroad Car number %d, name %s" % \ (self.car_id,self.name) employee_join = polymorphic_union( { 'engineer':people.join(engineers), 'manager':people.join(managers), }, "type", 'employee_join') car_join = polymorphic_union( { 'car' : cars.outerjoin(offroad_cars).\ select(offroad_cars.c.car_id == None).reduce_columns(), 'offroad' : cars.join(offroad_cars) }, "type", 'car_join') car_mapper = mapper(Car, cars, with_polymorphic=('*', car_join) ,polymorphic_on=car_join.c.type, polymorphic_identity='car', ) offroad_car_mapper = mapper(Offraod_Car, offroad_cars, inherits=car_mapper, polymorphic_identity='offroad') person_mapper = mapper(Person, people, with_polymorphic=('*', employee_join), polymorphic_on=employee_join.c.type, polymorphic_identity='person', properties={ 'car':relationship(car_mapper) }) engineer_mapper = mapper(Engineer, engineers, inherits=person_mapper, polymorphic_identity='engineer') manager_mapper = mapper(Manager, managers, inherits=person_mapper, polymorphic_identity='manager') session = create_session() basic_car=Car(name="basic") offroad_car=Offraod_Car(name="offroad") for i in range(1,4): if i%2: car=Car() else: car=Offraod_Car() session.add(Manager(name="M%d" % i, category="YYYYYYYYY",car=car)) session.add(Engineer(name="E%d" % i,field="X",car=car)) session.flush() session.expunge_all() r = session.query(Person).all() for p in r: assert p.car_id == p.car.car_id class RelationshipTest8(fixtures.MappedTest): @classmethod def define_tables(cls, metadata): global taggable, users taggable = Table('taggable', metadata, Column('id', Integer, primary_key=True, test_needs_autoincrement=True), Column('type', String(30)), Column('owner_id', Integer, ForeignKey('taggable.id')), ) users = Table ('users', metadata, Column('id', Integer, ForeignKey('taggable.id'), primary_key=True), Column('data', String(50)), ) def test_selfref_onjoined(self): class Taggable(fixtures.ComparableEntity): pass class User(Taggable): pass mapper( Taggable, taggable, polymorphic_on=taggable.c.type, polymorphic_identity='taggable', properties = { 'owner' : relationship (User, primaryjoin=taggable.c.owner_id ==taggable.c.id, remote_side=taggable.c.id ), }) mapper(User, users, inherits=Taggable, polymorphic_identity='user', inherit_condition=users.c.id == taggable.c.id, ) u1 = User(data='u1') t1 = Taggable(owner=u1) sess = create_session() sess.add(t1) sess.flush() sess.expunge_all() eq_( sess.query(Taggable).order_by(Taggable.id).all(), [User(data='u1'), Taggable(owner=User(data='u1'))] ) class GenerativeTest(fixtures.TestBase, AssertsExecutionResults): @classmethod def setup_class(cls): # cars---owned by--- people (abstract) --- has a --- status # | ^ ^ | # | | | | # | engineers managers | # | | # +--------------------------------------- has a ------+ global metadata, status, people, engineers, managers, cars metadata = MetaData(testing.db) # table definitions status = Table('status', metadata, Column('status_id', Integer, primary_key=True, test_needs_autoincrement=True), Column('name', String(20))) people = Table('people', metadata, Column('person_id', Integer, primary_key=True, test_needs_autoincrement=True), Column('status_id', Integer, ForeignKey('status.status_id'), nullable=False), Column('name', String(50))) engineers = Table('engineers', metadata, Column('person_id', Integer, ForeignKey('people.person_id'), primary_key=True), Column('field', String(30))) managers = Table('managers', metadata, Column('person_id', Integer, ForeignKey('people.person_id'), primary_key=True), Column('category', String(70))) cars = Table('cars', metadata, Column('car_id', Integer, primary_key=True, test_needs_autoincrement=True), Column('status_id', Integer, ForeignKey('status.status_id'), nullable=False), Column('owner', Integer, ForeignKey('people.person_id'), nullable=False)) metadata.create_all() @classmethod def teardown_class(cls): metadata.drop_all() def teardown(self): clear_mappers() for t in reversed(metadata.sorted_tables): t.delete().execute() def testjointo(self): # class definitions class PersistentObject(object): def __init__(self, **kwargs): for key, value in kwargs.iteritems(): setattr(self, key, value) class Status(PersistentObject): def __repr__(self): return "Status %s" % self.name class Person(PersistentObject): def __repr__(self): return "Ordinary person %s" % self.name class Engineer(Person): def __repr__(self): return "Engineer %s, field %s, status %s" % ( self.name, self.field, self.status) class Manager(Person): def __repr__(self): return "Manager %s, category %s, status %s" % ( self.name, self.category, self.status) class Car(PersistentObject): def __repr__(self): return "Car number %d" % self.car_id # create a union that represents both types of joins. employee_join = polymorphic_union( { 'engineer':people.join(engineers), 'manager':people.join(managers), }, "type", 'employee_join') status_mapper = mapper(Status, status) person_mapper = mapper(Person, people, with_polymorphic=('*', employee_join), polymorphic_on=employee_join.c.type, polymorphic_identity='person', properties={'status':relationship(status_mapper)}) engineer_mapper = mapper(Engineer, engineers, inherits=person_mapper, polymorphic_identity='engineer') manager_mapper = mapper(Manager, managers, inherits=person_mapper, polymorphic_identity='manager') car_mapper = mapper(Car, cars, properties= { 'employee':relationship(person_mapper), 'status':relationship(status_mapper)}) session = create_session() active = Status(name="active") dead = Status(name="dead") session.add(active) session.add(dead) session.flush() # TODO: we haven't created assertions for all # the data combinations created here # creating 5 managers named from M1 to M5 # and 5 engineers named from E1 to E5 # M4, M5, E4 and E5 are dead for i in range(1,5): if i<4: st=active else: st=dead session.add(Manager(name="M%d" % i, category="YYYYYYYYY",status=st)) session.add(Engineer(name="E%d" % i,field="X",status=st)) session.flush() # get E4 engineer4 = session.query(engineer_mapper).\ filter_by(name="E4").one() # create 2 cars for E4, one active and one dead car1 = Car(employee=engineer4,status=active) car2 = Car(employee=engineer4,status=dead) session.add(car1) session.add(car2) session.flush() # this particular adapt used to cause a recursion overflow; # added here for testing e = exists([Car.owner], Car.owner==employee_join.c.person_id) Query(Person)._adapt_clause(employee_join, False, False) r = session.query(Person).filter(Person.name.like('%2')).\ join('status').\ filter_by(name="active").\ order_by(Person.person_id) eq_(str(list(r)), "[Manager M2, category YYYYYYYYY, status " "Status active, Engineer E2, field X, " "status Status active]") r = session.query(Engineer).join('status').\ filter(Person.name.in_( ['E2', 'E3', 'E4', 'M4', 'M2', 'M1']) & (status.c.name=="active")).order_by(Person.name) eq_(str(list(r)), "[Engineer E2, field X, status Status " "active, Engineer E3, field X, status " "Status active]") r = session.query(Person).filter(exists([1], Car.owner==Person.person_id)) eq_(str(list(r)), "[Engineer E4, field X, status Status dead]") class MultiLevelTest(fixtures.MappedTest): @classmethod def define_tables(cls, metadata): global table_Employee, table_Engineer, table_Manager table_Employee = Table( 'Employee', metadata, Column( 'name', type_= String(100), ), Column( 'id', primary_key= True, type_= Integer, test_needs_autoincrement=True), Column( 'atype', type_= String(100), ), ) table_Engineer = Table( 'Engineer', metadata, Column( 'machine', type_= String(100), ), Column( 'id', Integer, ForeignKey( 'Employee.id', ), primary_key= True), ) table_Manager = Table( 'Manager', metadata, Column( 'duties', type_= String(100), ), Column( 'id', Integer, ForeignKey( 'Engineer.id', ), primary_key= True, ), ) def test_threelevels(self): class Employee( object): def set( me, **kargs): for k,v in kargs.iteritems(): setattr( me, k, v) return me def __str__(me): return str(me.__class__.__name__)+':'+str(me.name) __repr__ = __str__ class Engineer(Employee): pass class Manager(Engineer): pass pu_Employee = polymorphic_union( { 'Manager': table_Employee.join( table_Engineer).join( table_Manager), 'Engineer': select([table_Employee, table_Engineer.c.machine], table_Employee.c.atype == 'Engineer', from_obj=[ table_Employee.join(table_Engineer)]), 'Employee': table_Employee.select( table_Employee.c.atype == 'Employee'), }, None, 'pu_employee', ) mapper_Employee = mapper( Employee, table_Employee, polymorphic_identity= 'Employee', polymorphic_on= pu_Employee.c.atype, with_polymorphic=('*', pu_Employee), ) pu_Engineer = polymorphic_union( { 'Manager': table_Employee.join( table_Engineer). join( table_Manager), 'Engineer': select([table_Employee, table_Engineer.c.machine], table_Employee.c.atype == 'Engineer', from_obj=[ table_Employee.join(table_Engineer) ]), }, None, 'pu_engineer', ) mapper_Engineer = mapper( Engineer, table_Engineer, inherit_condition= table_Engineer.c.id == \ table_Employee.c.id, inherits= mapper_Employee, polymorphic_identity= 'Engineer', polymorphic_on= pu_Engineer.c.atype, with_polymorphic=('*', pu_Engineer), ) mapper_Manager = mapper( Manager, table_Manager, inherit_condition= table_Manager.c.id == \ table_Engineer.c.id, inherits= mapper_Engineer, polymorphic_identity= 'Manager', ) a = Employee().set( name= 'one') b = Engineer().set( egn= 'two', machine= 'any') c = Manager().set( name= 'head', machine= 'fast', duties= 'many') session = create_session() session.add(a) session.add(b) session.add(c) session.flush() assert set(session.query(Employee).all()) == set([a,b,c]) assert set(session.query( Engineer).all()) == set([b,c]) assert session.query( Manager).all() == [c] class ManyToManyPolyTest(fixtures.MappedTest): @classmethod def define_tables(cls, metadata): global base_item_table, item_table, base_item_collection_table, \ collection_table base_item_table = Table( 'base_item', metadata, Column('id', Integer, primary_key=True, test_needs_autoincrement=True), Column('child_name', String(255), default=None)) item_table = Table( 'item', metadata, Column('id', Integer, ForeignKey('base_item.id'), primary_key=True), Column('dummy', Integer, default=0)) base_item_collection_table = Table( 'base_item_collection', metadata, Column('item_id', Integer, ForeignKey('base_item.id')), Column('collection_id', Integer, ForeignKey('collection.id'))) collection_table = Table( 'collection', metadata, Column('id', Integer, primary_key=True, test_needs_autoincrement=True), Column('name', Unicode(255))) def test_pjoin_compile(self): """test that remote_side columns in the secondary join table arent attempted to be matched to the target polymorphic selectable""" class BaseItem(object): pass class Item(BaseItem): pass class Collection(object): pass item_join = polymorphic_union( { 'BaseItem':base_item_table.select( base_item_table.c.child_name=='BaseItem'), 'Item':base_item_table.join(item_table), }, None, 'item_join') mapper( BaseItem, base_item_table, with_polymorphic=('*', item_join), polymorphic_on=base_item_table.c.child_name, polymorphic_identity='BaseItem', properties=dict(collections=relationship(Collection, secondary=base_item_collection_table, backref="items"))) mapper( Item, item_table, inherits=BaseItem, polymorphic_identity='Item') mapper(Collection, collection_table) class_mapper(BaseItem) class CustomPKTest(fixtures.MappedTest): @classmethod def define_tables(cls, metadata): global t1, t2 t1 = Table('t1', metadata, Column('id', Integer, primary_key=True, test_needs_autoincrement=True), Column('type', String(30), nullable=False), Column('data', String(30))) # note that the primary key column in t2 is named differently t2 = Table('t2', metadata, Column('t2id', Integer, ForeignKey('t1.id'), primary_key=True), Column('t2data', String(30))) def test_custompk(self): """test that the primary_key attribute is propagated to the polymorphic mapper""" class T1(object):pass class T2(T1):pass # create a polymorphic union with the select against the base table first. # with the join being second, the alias of the union will # pick up two "primary key" columns. technically the alias should have a # 2-col pk in any case but the leading select has a NULL for the "t2id" column d = util.OrderedDict() d['t1'] = t1.select(t1.c.type=='t1') d['t2'] = t1.join(t2) pjoin = polymorphic_union(d, None, 'pjoin') mapper(T1, t1, polymorphic_on=t1.c.type, polymorphic_identity='t1', with_polymorphic=('*', pjoin), primary_key=[pjoin.c.id]) mapper(T2, t2, inherits=T1, polymorphic_identity='t2') ot1 = T1() ot2 = T2() sess = create_session() sess.add(ot1) sess.add(ot2) sess.flush() sess.expunge_all() # query using get(), using only one value. # this requires the select_table mapper # has the same single-col primary key. assert sess.query(T1).get(ot1.id).id == ot1.id ot1 = sess.query(T1).get(ot1.id) ot1.data = 'hi' sess.flush() def test_pk_collapses(self): """test that a composite primary key attribute formed by a join is "collapsed" into its minimal columns""" class T1(object):pass class T2(T1):pass # create a polymorphic union with the select against the base table first. # with the join being second, the alias of the union will # pick up two "primary key" columns. technically the alias should have a # 2-col pk in any case but the leading select has a NULL for the "t2id" column d = util.OrderedDict() d['t1'] = t1.select(t1.c.type=='t1') d['t2'] = t1.join(t2) pjoin = polymorphic_union(d, None, 'pjoin') mapper(T1, t1, polymorphic_on=t1.c.type, polymorphic_identity='t1', with_polymorphic=('*', pjoin)) mapper(T2, t2, inherits=T1, polymorphic_identity='t2') assert len(class_mapper(T1).primary_key) == 1 ot1 = T1() ot2 = T2() sess = create_session() sess.add(ot1) sess.add(ot2) sess.flush() sess.expunge_all() # query using get(), using only one value. this requires the # select_table mapper # has the same single-col primary key. assert sess.query(T1).get(ot1.id).id == ot1.id ot1 = sess.query(T1).get(ot1.id) ot1.data = 'hi' sess.flush() class InheritingEagerTest(fixtures.MappedTest): @classmethod def define_tables(cls, metadata): global people, employees, tags, peopleTags people = Table('people', metadata, Column('id', Integer, primary_key=True, test_needs_autoincrement=True), Column('_type', String(30), nullable=False), ) employees = Table('employees', metadata, Column('id', Integer, ForeignKey('people.id'), primary_key=True), ) tags = Table('tags', metadata, Column('id', Integer, primary_key=True, test_needs_autoincrement=True), Column('label', String(50), nullable=False), ) peopleTags = Table('peopleTags', metadata, Column('person_id', Integer, ForeignKey('people.id')), Column('tag_id', Integer, ForeignKey('tags.id')), ) def test_basic(self): """test that Query uses the full set of mapper._eager_loaders when generating SQL""" class Person(fixtures.ComparableEntity): pass class Employee(Person): def __init__(self, name='bob'): self.name = name class Tag(fixtures.ComparableEntity): def __init__(self, label): self.label = label mapper(Person, people, polymorphic_on=people.c._type, polymorphic_identity='person', properties={ 'tags': relationship(Tag, secondary=peopleTags, backref='people', lazy='joined') }) mapper(Employee, employees, inherits=Person, polymorphic_identity='employee') mapper(Tag, tags) session = create_session() bob = Employee() session.add(bob) tag = Tag('crazy') bob.tags.append(tag) tag = Tag('funny') bob.tags.append(tag) session.flush() session.expunge_all() # query from Employee with limit, query needs to apply eager limiting subquery instance = session.query(Employee).\ filter_by(id=1).limit(1).first() assert len(instance.tags) == 2 class MissingPolymorphicOnTest(fixtures.MappedTest): @classmethod def define_tables(cls, metadata): tablea = Table('tablea', metadata, Column('id', Integer, primary_key=True, test_needs_autoincrement=True), Column('adata', String(50)), ) tableb = Table('tableb', metadata, Column('id', Integer, primary_key=True, test_needs_autoincrement=True), Column('aid', Integer, ForeignKey('tablea.id')), Column('data', String(50)), ) tablec = Table('tablec', metadata, Column('id', Integer, ForeignKey('tablea.id'), primary_key=True), Column('cdata', String(50)), ) tabled = Table('tabled', metadata, Column('id', Integer, ForeignKey('tablec.id'), primary_key=True), Column('ddata', String(50)), ) @classmethod def setup_classes(cls): class A(cls.Comparable): pass class B(cls.Comparable): pass class C(A): pass class D(C): pass def test_polyon_col_setsup(self): tablea, tableb, tablec, tabled = self.tables.tablea, \ self.tables.tableb, self.tables.tablec, self.tables.tabled A, B, C, D = self.classes.A, self.classes.B, self.classes.C, \ self.classes.D poly_select = select( [tablea, tableb.c.data.label('discriminator')], from_obj=tablea.join(tableb)).alias('poly') mapper(B, tableb) mapper(A, tablea, with_polymorphic=('*', poly_select), polymorphic_on=poly_select.c.discriminator, properties={ 'b':relationship(B, uselist=False) }) mapper(C, tablec, inherits=A,polymorphic_identity='c') mapper(D, tabled, inherits=C, polymorphic_identity='d') c = C(cdata='c1', adata='a1', b=B(data='c')) d = D(cdata='c2', adata='a2', ddata='d2', b=B(data='d')) sess = create_session() sess.add(c) sess.add(d) sess.flush() sess.expunge_all() eq_( sess.query(A).all(), [ C(cdata='c1', adata='a1'), D(cdata='c2', adata='a2', ddata='d2') ] ) class JoinedInhAdjacencyTest(fixtures.MappedTest): @classmethod def define_tables(cls, metadata): Table('people', metadata, Column('id', Integer, primary_key=True, test_needs_autoincrement=True), Column('type', String(30)), ) Table('users', metadata, Column('id', Integer, ForeignKey('people.id'), primary_key=True), Column('supervisor_id', Integer, ForeignKey('people.id')), ) Table('dudes', metadata, Column('id', Integer, ForeignKey('users.id'), primary_key=True), ) @classmethod def setup_classes(cls): class Person(cls.Comparable): pass class User(Person): pass class Dude(User): pass def _roundtrip(self): Person, User = self.classes.Person, self.classes.User sess = Session() u1 = User() u2 = User() u2.supervisor = u1 sess.add_all([u1, u2]) sess.commit() assert u2.supervisor is u1 def _dude_roundtrip(self): Dude, User = self.classes.Dude, self.classes.User sess = Session() u1 = User() d1 = Dude() d1.supervisor = u1 sess.add_all([u1, d1]) sess.commit() assert d1.supervisor is u1 def test_joined_to_base(self): people, users = self.tables.people, self.tables.users Person, User = self.classes.Person, self.classes.User mapper(Person, people, polymorphic_on=people.c.type, polymorphic_identity='person', ) mapper(User, users, inherits=Person, polymorphic_identity='user', inherit_condition=(users.c.id == people.c.id), properties = { 'supervisor': relationship(Person, primaryjoin=users.c.supervisor_id==people.c.id, ), } ) assert User.supervisor.property.direction is MANYTOONE self._roundtrip() def test_joined_to_same_subclass(self): people, users = self.tables.people, self.tables.users Person, User = self.classes.Person, self.classes.User mapper(Person, people, polymorphic_on=people.c.type, polymorphic_identity='person', ) mapper(User, users, inherits=Person, polymorphic_identity='user', inherit_condition=(users.c.id == people.c.id), properties = { 'supervisor': relationship(User, primaryjoin=users.c.supervisor_id==people.c.id, remote_side=people.c.id, foreign_keys=[users.c.supervisor_id] ), } ) assert User.supervisor.property.direction is MANYTOONE self._roundtrip() def test_joined_subclass_to_superclass(self): people, users, dudes = self.tables.people, self.tables.users, \ self.tables.dudes Person, User, Dude = self.classes.Person, self.classes.User, \ self.classes.Dude mapper(Person, people, polymorphic_on=people.c.type, polymorphic_identity='person', ) mapper(User, users, inherits=Person, polymorphic_identity='user', inherit_condition=(users.c.id == people.c.id), ) mapper(Dude, dudes, inherits=User, polymorphic_identity='dude', inherit_condition=(dudes.c.id==users.c.id), properties={ 'supervisor': relationship(User, primaryjoin=users.c.supervisor_id==people.c.id, remote_side=people.c.id, foreign_keys=[users.c.supervisor_id] ), } ) assert Dude.supervisor.property.direction is MANYTOONE self._dude_roundtrip() class Ticket2419Test(fixtures.DeclarativeMappedTest): """Test [ticket:2419]'s test case.""" @classmethod def setup_classes(cls): Base = cls.DeclarativeBasic class A(Base): __tablename__ = "a" id = Column(Integer, primary_key=True, test_needs_autoincrement=True) class B(Base): __tablename__ = "b" id = Column(Integer, primary_key=True, test_needs_autoincrement=True) ds = relationship("D") es = relationship("E") class C(A): __tablename__ = "c" id = Column(Integer, ForeignKey('a.id'), primary_key=True) b_id = Column(Integer, ForeignKey('b.id')) b = relationship("B", primaryjoin=b_id==B.id) class D(Base): __tablename__ = "d" id = Column(Integer, primary_key=True, test_needs_autoincrement=True) b_id = Column(Integer, ForeignKey('b.id')) class E(Base): __tablename__ = 'e' id = Column(Integer, primary_key=True, test_needs_autoincrement=True) b_id = Column(Integer, ForeignKey('b.id')) @testing.fails_on("oracle", "seems like oracle's query engine can't " "handle this, not clear if there's an " "expression-level bug on our end though") def test_join_w_eager_w_any(self): A, B, C, D, E = self.classes.A, self.classes.B, \ self.classes.C, self.classes.D, \ self.classes.E s = Session(testing.db) b = B(ds=[D()]) s.add_all([ C( b=b ) ]) s.commit() q = s.query(B, B.ds.any(D.id==1)).options(joinedload_all("es")) q = q.join(C, C.b_id==B.id) q = q.limit(5) eq_( q.all(), [(b, True)] ) SQLAlchemy-0.8.4/test/orm/inheritance/test_basic.py0000644000076500000240000024224212251150015022766 0ustar classicstaff00000000000000import warnings from sqlalchemy.testing import eq_, assert_raises, assert_raises_message from sqlalchemy import * from sqlalchemy import exc as sa_exc, util, event from sqlalchemy.orm import * from sqlalchemy.orm.util import instance_str from sqlalchemy.orm import exc as orm_exc, attributes from sqlalchemy.testing.assertsql import AllOf, CompiledSQL from sqlalchemy.sql import table, column from sqlalchemy import testing from sqlalchemy.testing import engines from sqlalchemy.testing import fixtures from test.orm import _fixtures from sqlalchemy.testing.schema import Table, Column from sqlalchemy import inspect from sqlalchemy.ext.declarative import declarative_base from sqlalchemy.testing.util import gc_collect class O2MTest(fixtures.MappedTest): """deals with inheritance and one-to-many relationships""" @classmethod def define_tables(cls, metadata): global foo, bar, blub foo = Table('foo', metadata, Column('id', Integer, primary_key=True, test_needs_autoincrement=True), Column('data', String(20))) bar = Table('bar', metadata, Column('id', Integer, ForeignKey('foo.id'), primary_key=True), Column('data', String(20))) blub = Table('blub', metadata, Column('id', Integer, ForeignKey('bar.id'), primary_key=True), Column('foo_id', Integer, ForeignKey('foo.id'), nullable=False), Column('data', String(20))) def test_basic(self): class Foo(object): def __init__(self, data=None): self.data = data def __repr__(self): return "Foo id %d, data %s" % (self.id, self.data) mapper(Foo, foo) class Bar(Foo): def __repr__(self): return "Bar id %d, data %s" % (self.id, self.data) mapper(Bar, bar, inherits=Foo) class Blub(Bar): def __repr__(self): return "Blub id %d, data %s" % (self.id, self.data) mapper(Blub, blub, inherits=Bar, properties={ 'parent_foo':relationship(Foo) }) sess = create_session() b1 = Blub("blub #1") b2 = Blub("blub #2") f = Foo("foo #1") sess.add(b1) sess.add(b2) sess.add(f) b1.parent_foo = f b2.parent_foo = f sess.flush() compare = ','.join([repr(b1), repr(b2), repr(b1.parent_foo), repr(b2.parent_foo)]) sess.expunge_all() l = sess.query(Blub).all() result = ','.join([repr(l[0]), repr(l[1]), repr(l[0].parent_foo), repr(l[1].parent_foo)]) eq_(compare, result) eq_(l[0].parent_foo.data, 'foo #1') eq_(l[1].parent_foo.data, 'foo #1') class PolymorphicResolutionMultiLevel(fixtures.DeclarativeMappedTest, testing.AssertsCompiledSQL): run_setup_mappers = 'once' __dialect__ = 'default' @classmethod def setup_classes(cls): Base = cls.DeclarativeBasic class A(Base): __tablename__ = 'a' id = Column(Integer, primary_key=True) class B(A): __tablename__ = 'b' id = Column(Integer, ForeignKey('a.id'), primary_key=True) class C(A): __tablename__ = 'c' id = Column(Integer, ForeignKey('a.id'), primary_key=True) class D(B): __tablename__ = 'd' id = Column(Integer, ForeignKey('b.id'), primary_key=True) def test_ordered_b_d(self): a_mapper = inspect(self.classes.A) eq_( a_mapper._mappers_from_spec( [self.classes.B, self.classes.D], None), [a_mapper, inspect(self.classes.B), inspect(self.classes.D)] ) def test_a(self): a_mapper = inspect(self.classes.A) eq_( a_mapper._mappers_from_spec( [self.classes.A], None), [a_mapper] ) def test_b_d_selectable(self): a_mapper = inspect(self.classes.A) spec = [self.classes.D, self.classes.B] eq_( a_mapper._mappers_from_spec( spec, self.classes.B.__table__.join(self.classes.D.__table__) ), [inspect(self.classes.B), inspect(self.classes.D)] ) def test_d_selectable(self): a_mapper = inspect(self.classes.A) spec = [self.classes.D] eq_( a_mapper._mappers_from_spec( spec, self.classes.B.__table__.join(self.classes.D.__table__) ), [inspect(self.classes.D)] ) def test_reverse_d_b(self): a_mapper = inspect(self.classes.A) spec = [self.classes.D, self.classes.B] eq_( a_mapper._mappers_from_spec( spec, None), [a_mapper, inspect(self.classes.B), inspect(self.classes.D)] ) mappers, selectable = a_mapper._with_polymorphic_args(spec=spec) self.assert_compile(selectable, "a LEFT OUTER JOIN b ON a.id = b.id " "LEFT OUTER JOIN d ON b.id = d.id") def test_d_b_missing(self): a_mapper = inspect(self.classes.A) spec = [self.classes.D] eq_( a_mapper._mappers_from_spec( spec, None), [a_mapper, inspect(self.classes.B), inspect(self.classes.D)] ) mappers, selectable = a_mapper._with_polymorphic_args(spec=spec) self.assert_compile(selectable, "a LEFT OUTER JOIN b ON a.id = b.id " "LEFT OUTER JOIN d ON b.id = d.id") def test_d_c_b(self): a_mapper = inspect(self.classes.A) spec = [self.classes.D, self.classes.C, self.classes.B] ms = a_mapper._mappers_from_spec(spec, None) eq_( ms[-1], inspect(self.classes.D) ) eq_(ms[0], a_mapper) eq_( set(ms[1:3]), set(a_mapper._inheriting_mappers) ) class PolymorphicOnNotLocalTest(fixtures.MappedTest): @classmethod def define_tables(cls, metadata): t1 = Table('t1', metadata, Column('id', Integer, primary_key=True, test_needs_autoincrement=True), Column('x', String(10)), Column('q', String(10))) t2 = Table('t2', metadata, Column('id', Integer, primary_key=True, test_needs_autoincrement=True), Column('y', String(10)), Column('xid', ForeignKey('t1.id'))) @classmethod def setup_classes(cls): class Parent(cls.Comparable): pass class Child(Parent): pass def test_non_col_polymorphic_on(self): Parent = self.classes.Parent t2 = self.tables.t2 assert_raises_message( sa_exc.ArgumentError, "Can't determine polymorphic_on " "value 'im not a column' - no " "attribute is mapped to this name.", mapper, Parent, t2, polymorphic_on="im not a column" ) def test_polymorphic_on_non_expr_prop(self): t2, t1 = self.tables.t2, self.tables.t1 Parent = self.classes.Parent t1t2_join = select([t1.c.x], from_obj=[t1.join(t2)]).alias() def go(): interface_m = mapper(Parent, t2, polymorphic_on=lambda:"hi", polymorphic_identity=0) assert_raises_message( sa_exc.ArgumentError, "Only direct column-mapped property or " "SQL expression can be passed for polymorphic_on", go ) def test_polymorphic_on_not_present_col(self): t2, t1 = self.tables.t2, self.tables.t1 Parent = self.classes.Parent t1t2_join = select([t1.c.x], from_obj=[t1.join(t2)]).alias() def go(): t1t2_join_2 = select([t1.c.q], from_obj=[t1.join(t2)]).alias() interface_m = mapper(Parent, t2, polymorphic_on=t1t2_join.c.x, with_polymorphic=('*', t1t2_join_2), polymorphic_identity=0) assert_raises_message( sa_exc.InvalidRequestError, "Could not map polymorphic_on column 'x' to the mapped table - " "polymorphic loads will not function properly", go ) def test_polymorphic_on_only_in_with_poly(self): t2, t1 = self.tables.t2, self.tables.t1 Parent = self.classes.Parent t1t2_join = select([t1.c.x], from_obj=[t1.join(t2)]).alias() # if its in the with_polymorphic, then its OK mapper(Parent, t2, polymorphic_on=t1t2_join.c.x, with_polymorphic=('*', t1t2_join), polymorphic_identity=0) def test_polymorpic_on_not_in_with_poly(self): t2, t1 = self.tables.t2, self.tables.t1 Parent = self.classes.Parent t1t2_join = select([t1.c.x], from_obj=[t1.join(t2)]).alias() # if with_polymorphic, but its not present, not OK def go(): t1t2_join_2 = select([t1.c.q], from_obj=[t1.join(t2)]).alias() interface_m = mapper(Parent, t2, polymorphic_on=t1t2_join.c.x, with_polymorphic=('*', t1t2_join_2), polymorphic_identity=0) assert_raises_message( sa_exc.InvalidRequestError, "Could not map polymorphic_on column 'x' " "to the mapped table - " "polymorphic loads will not function properly", go ) def test_polymorphic_on_expr_explicit_map(self): t2, t1 = self.tables.t2, self.tables.t1 Parent, Child = self.classes.Parent, self.classes.Child expr = case([ (t1.c.x=="p", "parent"), (t1.c.x=="c", "child"), ]) mapper(Parent, t1, properties={ "discriminator":column_property(expr) }, polymorphic_identity="parent", polymorphic_on=expr) mapper(Child, t2, inherits=Parent, polymorphic_identity="child") self._roundtrip(parent_ident='p', child_ident='c') def test_polymorphic_on_expr_implicit_map_no_label_joined(self): t2, t1 = self.tables.t2, self.tables.t1 Parent, Child = self.classes.Parent, self.classes.Child expr = case([ (t1.c.x=="p", "parent"), (t1.c.x=="c", "child"), ]) mapper(Parent, t1, polymorphic_identity="parent", polymorphic_on=expr) mapper(Child, t2, inherits=Parent, polymorphic_identity="child") self._roundtrip(parent_ident='p', child_ident='c') def test_polymorphic_on_expr_implicit_map_w_label_joined(self): t2, t1 = self.tables.t2, self.tables.t1 Parent, Child = self.classes.Parent, self.classes.Child expr = case([ (t1.c.x=="p", "parent"), (t1.c.x=="c", "child"), ]).label(None) mapper(Parent, t1, polymorphic_identity="parent", polymorphic_on=expr) mapper(Child, t2, inherits=Parent, polymorphic_identity="child") self._roundtrip(parent_ident='p', child_ident='c') def test_polymorphic_on_expr_implicit_map_no_label_single(self): """test that single_table_criterion is propagated with a standalone expr""" t2, t1 = self.tables.t2, self.tables.t1 Parent, Child = self.classes.Parent, self.classes.Child expr = case([ (t1.c.x=="p", "parent"), (t1.c.x=="c", "child"), ]) mapper(Parent, t1, polymorphic_identity="parent", polymorphic_on=expr) mapper(Child, inherits=Parent, polymorphic_identity="child") self._roundtrip(parent_ident='p', child_ident='c') def test_polymorphic_on_expr_implicit_map_w_label_single(self): """test that single_table_criterion is propagated with a standalone expr""" t2, t1 = self.tables.t2, self.tables.t1 Parent, Child = self.classes.Parent, self.classes.Child expr = case([ (t1.c.x=="p", "parent"), (t1.c.x=="c", "child"), ]).label(None) mapper(Parent, t1, polymorphic_identity="parent", polymorphic_on=expr) mapper(Child, inherits=Parent, polymorphic_identity="child") self._roundtrip(parent_ident='p', child_ident='c') def test_polymorphic_on_column_prop(self): t2, t1 = self.tables.t2, self.tables.t1 Parent, Child = self.classes.Parent, self.classes.Child expr = case([ (t1.c.x=="p", "parent"), (t1.c.x=="c", "child"), ]) cprop = column_property(expr) mapper(Parent, t1, properties={ "discriminator":cprop }, polymorphic_identity="parent", polymorphic_on=cprop) mapper(Child, t2, inherits=Parent, polymorphic_identity="child") self._roundtrip(parent_ident='p', child_ident='c') def test_polymorphic_on_column_str_prop(self): t2, t1 = self.tables.t2, self.tables.t1 Parent, Child = self.classes.Parent, self.classes.Child expr = case([ (t1.c.x=="p", "parent"), (t1.c.x=="c", "child"), ]) cprop = column_property(expr) mapper(Parent, t1, properties={ "discriminator":cprop }, polymorphic_identity="parent", polymorphic_on="discriminator") mapper(Child, t2, inherits=Parent, polymorphic_identity="child") self._roundtrip(parent_ident='p', child_ident='c') def test_polymorphic_on_synonym(self): t2, t1 = self.tables.t2, self.tables.t1 Parent, Child = self.classes.Parent, self.classes.Child cprop = column_property(t1.c.x) assert_raises_message( sa_exc.ArgumentError, "Only direct column-mapped property or " "SQL expression can be passed for polymorphic_on", mapper, Parent, t1, properties={ "discriminator":cprop, "discrim_syn":synonym(cprop) }, polymorphic_identity="parent", polymorphic_on="discrim_syn") def _roundtrip(self, set_event=True, parent_ident='parent', child_ident='child'): Parent, Child = self.classes.Parent, self.classes.Child if set_event: @event.listens_for(Parent, "init", propagate=True) def set_identity(instance, *arg, **kw): ident = object_mapper(instance).polymorphic_identity if ident == 'parent': instance.x = parent_ident elif ident == 'child': instance.x = child_ident else: assert False, "Got unexpected identity %r" % ident s = Session(testing.db) s.add_all([ Parent(q="p1"), Child(q="c1", y="c1"), Parent(q="p2"), ]) s.commit() s.close() eq_( [type(t) for t in s.query(Parent).order_by(Parent.id)], [Parent, Child, Parent] ) eq_( [type(t) for t in s.query(Child).all()], [Child] ) class SortOnlyOnImportantFKsTest(fixtures.MappedTest): @classmethod def define_tables(cls, metadata): Table('a', metadata, Column('id', Integer, primary_key=True, test_needs_autoincrement=True), Column('b_id', Integer, ForeignKey('b.id', use_alter=True, name='b')) ) Table('b', metadata, Column('id', Integer, ForeignKey('a.id'), primary_key=True) ) @classmethod def setup_classes(cls): Base = declarative_base() class A(Base): __tablename__ = "a" id = Column(Integer, primary_key=True, test_needs_autoincrement=True) b_id = Column(Integer, ForeignKey('b.id')) class B(A): __tablename__ = "b" id = Column(Integer, ForeignKey('a.id'), primary_key=True) __mapper_args__ = {'inherit_condition': id == A.id} cls.classes.A = A cls.classes.B = B def test_flush(self): s = Session(testing.db) s.add(self.classes.B()) s.flush() class FalseDiscriminatorTest(fixtures.MappedTest): @classmethod def define_tables(cls, metadata): global t1 t1 = Table('t1', metadata, Column('id', Integer, primary_key=True, test_needs_autoincrement=True), Column('type', Boolean, nullable=False)) def test_false_on_sub(self): class Foo(object): pass class Bar(Foo): pass mapper(Foo, t1, polymorphic_on=t1.c.type, polymorphic_identity=True) mapper(Bar, inherits=Foo, polymorphic_identity=False) sess = create_session() b1 = Bar() sess.add(b1) sess.flush() assert b1.type is False sess.expunge_all() assert isinstance(sess.query(Foo).one(), Bar) def test_false_on_base(self): class Ding(object):pass class Bat(Ding):pass mapper(Ding, t1, polymorphic_on=t1.c.type, polymorphic_identity=False) mapper(Bat, inherits=Ding, polymorphic_identity=True) sess = create_session() d1 = Ding() sess.add(d1) sess.flush() assert d1.type is False sess.expunge_all() assert sess.query(Ding).one() is not None class PolymorphicSynonymTest(fixtures.MappedTest): @classmethod def define_tables(cls, metadata): global t1, t2 t1 = Table('t1', metadata, Column('id', Integer, primary_key=True, test_needs_autoincrement=True), Column('type', String(10), nullable=False), Column('info', String(255))) t2 = Table('t2', metadata, Column('id', Integer, ForeignKey('t1.id'), primary_key=True), Column('data', String(10), nullable=False)) def test_polymorphic_synonym(self): class T1(fixtures.ComparableEntity): def info(self): return "THE INFO IS:" + self._info def _set_info(self, x): self._info = x info = property(info, _set_info) class T2(T1):pass mapper(T1, t1, polymorphic_on=t1.c.type, polymorphic_identity='t1', properties={ 'info':synonym('_info', map_column=True) }) mapper(T2, t2, inherits=T1, polymorphic_identity='t2') sess = create_session() at1 = T1(info='at1') at2 = T2(info='at2', data='t2 data') sess.add(at1) sess.add(at2) sess.flush() sess.expunge_all() eq_(sess.query(T2).filter(T2.info=='at2').one(), at2) eq_(at2.info, "THE INFO IS:at2") class PolymorphicAttributeManagementTest(fixtures.MappedTest): """Test polymorphic_on can be assigned, can be mirrored, etc.""" run_setup_mappers = 'once' @classmethod def define_tables(cls, metadata): Table('table_a', metadata, Column('id', Integer, primary_key=True, test_needs_autoincrement=True), Column('class_name', String(50)) ) Table('table_b', metadata, Column('id', Integer, ForeignKey('table_a.id'), primary_key=True), Column('class_name', String(50)), ) Table('table_c', metadata, Column('id', Integer, ForeignKey('table_b.id'), primary_key=True), Column('data', String(10)) ) @classmethod def setup_classes(cls): table_b, table_c, table_a = (cls.tables.table_b, cls.tables.table_c, cls.tables.table_a) class A(cls.Basic): pass class B(A): pass class C(B): pass class D(B): pass mapper(A, table_a, polymorphic_on=table_a.c.class_name, polymorphic_identity='a') mapper(B, table_b, inherits=A, polymorphic_on=table_b.c.class_name, polymorphic_identity='b') mapper(C, table_c, inherits=B, polymorphic_identity='c') mapper(D, inherits=B, polymorphic_identity='d') def test_poly_configured_immediate(self): A, C, B = (self.classes.A, self.classes.C, self.classes.B) a = A() b = B() c = C() eq_(a.class_name, 'a') eq_(b.class_name, 'b') eq_(c.class_name, 'c') def test_base_class(self): A, C, B = (self.classes.A, self.classes.C, self.classes.B) sess = Session() c1 = C() sess.add(c1) sess.commit() assert isinstance(sess.query(B).first(), C) sess.close() assert isinstance(sess.query(A).first(), C) def test_valid_assignment_upwards(self): """test that we can assign 'd' to a B, since B/D both involve the same set of tables. """ D, B = self.classes.D, self.classes.B sess = Session() b1 = B() b1.class_name = 'd' sess.add(b1) sess.commit() sess.close() assert isinstance(sess.query(B).first(), D) def test_invalid_assignment_downwards(self): """test that we warn on assign of 'b' to a C, since this adds a row to the C table we'd never load. """ C = self.classes.C sess = Session() c1 = C() c1.class_name = 'b' sess.add(c1) assert_raises_message( sa_exc.SAWarning, "Flushing object %s with incompatible " "polymorphic identity 'b'; the object may not " "refresh and/or load correctly" % instance_str(c1), sess.flush ) def test_invalid_assignment_upwards(self): """test that we warn on assign of 'c' to a B, since we will have a "C" row that has no joined row, which will cause object deleted errors. """ B = self.classes.B sess = Session() b1 = B() b1.class_name = 'c' sess.add(b1) assert_raises_message( sa_exc.SAWarning, "Flushing object %s with incompatible " "polymorphic identity 'c'; the object may not " "refresh and/or load correctly" % instance_str(b1), sess.flush ) def test_entirely_oob_assignment(self): """test warn on an unknown polymorphic identity. """ B = self.classes.B sess = Session() b1 = B() b1.class_name = 'xyz' sess.add(b1) assert_raises_message( sa_exc.SAWarning, "Flushing object %s with incompatible " "polymorphic identity 'xyz'; the object may not " "refresh and/or load correctly" % instance_str(b1), sess.flush ) def test_not_set_on_upate(self): C = self.classes.C sess = Session() c1 = C() sess.add(c1) sess.commit() sess.expire(c1) c1.data = 'foo' sess.flush() def test_validate_on_upate(self): C = self.classes.C sess = Session() c1 = C() sess.add(c1) sess.commit() sess.expire(c1) c1.class_name = 'b' assert_raises_message( sa_exc.SAWarning, "Flushing object %s with incompatible " "polymorphic identity 'b'; the object may not " "refresh and/or load correctly" % instance_str(c1), sess.flush ) class CascadeTest(fixtures.MappedTest): """that cascades on polymorphic relationships continue cascading along the path of the instance's mapper, not the base mapper.""" @classmethod def define_tables(cls, metadata): global t1, t2, t3, t4 t1= Table('t1', metadata, Column('id', Integer, primary_key=True, test_needs_autoincrement=True), Column('data', String(30)) ) t2 = Table('t2', metadata, Column('id', Integer, primary_key=True, test_needs_autoincrement=True), Column('t1id', Integer, ForeignKey('t1.id')), Column('type', String(30)), Column('data', String(30)) ) t3 = Table('t3', metadata, Column('id', Integer, ForeignKey('t2.id'), primary_key=True), Column('moredata', String(30))) t4 = Table('t4', metadata, Column('id', Integer, primary_key=True, test_needs_autoincrement=True), Column('t3id', Integer, ForeignKey('t3.id')), Column('data', String(30))) def test_cascade(self): class T1(fixtures.BasicEntity): pass class T2(fixtures.BasicEntity): pass class T3(T2): pass class T4(fixtures.BasicEntity): pass mapper(T1, t1, properties={ 't2s':relationship(T2, cascade="all") }) mapper(T2, t2, polymorphic_on=t2.c.type, polymorphic_identity='t2') mapper(T3, t3, inherits=T2, polymorphic_identity='t3', properties={ 't4s':relationship(T4, cascade="all") }) mapper(T4, t4) sess = create_session() t1_1 = T1(data='t1') t3_1 = T3(data ='t3', moredata='t3') t2_1 = T2(data='t2') t1_1.t2s.append(t2_1) t1_1.t2s.append(t3_1) t4_1 = T4(data='t4') t3_1.t4s.append(t4_1) sess.add(t1_1) assert t4_1 in sess.new sess.flush() sess.delete(t1_1) assert t4_1 in sess.deleted sess.flush() class M2OUseGetTest(fixtures.MappedTest): @classmethod def define_tables(cls, metadata): Table('base', metadata, Column('id', Integer, primary_key=True, test_needs_autoincrement=True), Column('type', String(30)) ) Table('sub', metadata, Column('id', Integer, ForeignKey('base.id'), primary_key=True), ) Table('related', metadata, Column('id', Integer, primary_key=True, test_needs_autoincrement=True), Column('sub_id', Integer, ForeignKey('sub.id')), ) def test_use_get(self): base, sub, related = (self.tables.base, self.tables.sub, self.tables.related) # test [ticket:1186] class Base(fixtures.BasicEntity): pass class Sub(Base): pass class Related(Base): pass mapper(Base, base, polymorphic_on=base.c.type, polymorphic_identity='b') mapper(Sub, sub, inherits=Base, polymorphic_identity='s') mapper(Related, related, properties={ # previously, this was needed for the comparison to occur: # the 'primaryjoin' looks just like "Sub"'s "get" clause (based on the Base id), # and foreign_keys since that join condition doesn't actually have any fks in it #'sub':relationship(Sub, primaryjoin=base.c.id==related.c.sub_id, foreign_keys=related.c.sub_id) # now we can use this: 'sub':relationship(Sub) }) assert class_mapper(Related).get_property('sub').strategy.use_get sess = create_session() s1 = Sub() r1 = Related(sub=s1) sess.add(r1) sess.flush() sess.expunge_all() r1 = sess.query(Related).first() s1 = sess.query(Sub).first() def go(): assert r1.sub self.assert_sql_count(testing.db, go, 0) class GetTest(fixtures.MappedTest): @classmethod def define_tables(cls, metadata): global foo, bar, blub foo = Table('foo', metadata, Column('id', Integer, primary_key=True, test_needs_autoincrement=True), Column('type', String(30)), Column('data', String(20))) bar = Table('bar', metadata, Column('id', Integer, ForeignKey('foo.id'), primary_key=True), Column('data', String(20))) blub = Table('blub', metadata, Column('id', Integer, primary_key=True, test_needs_autoincrement=True), Column('foo_id', Integer, ForeignKey('foo.id')), Column('bar_id', Integer, ForeignKey('bar.id')), Column('data', String(20))) @classmethod def setup_classes(cls): class Foo(cls.Basic): pass class Bar(Foo): pass class Blub(Bar): pass def test_get_polymorphic(self): self._do_get_test(True) def test_get_nonpolymorphic(self): self._do_get_test(False) def _do_get_test(self, polymorphic): foo, Bar, Blub, blub, bar, Foo = (self.tables.foo, self.classes.Bar, self.classes.Blub, self.tables.blub, self.tables.bar, self.classes.Foo) if polymorphic: mapper(Foo, foo, polymorphic_on=foo.c.type, polymorphic_identity='foo') mapper(Bar, bar, inherits=Foo, polymorphic_identity='bar') mapper(Blub, blub, inherits=Bar, polymorphic_identity='blub') else: mapper(Foo, foo) mapper(Bar, bar, inherits=Foo) mapper(Blub, blub, inherits=Bar) sess = create_session() f = Foo() b = Bar() bl = Blub() sess.add(f) sess.add(b) sess.add(bl) sess.flush() if polymorphic: def go(): assert sess.query(Foo).get(f.id) is f assert sess.query(Foo).get(b.id) is b assert sess.query(Foo).get(bl.id) is bl assert sess.query(Bar).get(b.id) is b assert sess.query(Bar).get(bl.id) is bl assert sess.query(Blub).get(bl.id) is bl # test class mismatches - item is present # in the identity map but we requested a subclass assert sess.query(Blub).get(f.id) is None assert sess.query(Blub).get(b.id) is None assert sess.query(Bar).get(f.id) is None self.assert_sql_count(testing.db, go, 0) else: # this is testing the 'wrong' behavior of using get() # polymorphically with mappers that are not configured to be # polymorphic. the important part being that get() always # returns an instance of the query's type. def go(): assert sess.query(Foo).get(f.id) is f bb = sess.query(Foo).get(b.id) assert isinstance(b, Foo) and bb.id==b.id bll = sess.query(Foo).get(bl.id) assert isinstance(bll, Foo) and bll.id==bl.id assert sess.query(Bar).get(b.id) is b bll = sess.query(Bar).get(bl.id) assert isinstance(bll, Bar) and bll.id == bl.id assert sess.query(Blub).get(bl.id) is bl self.assert_sql_count(testing.db, go, 3) class EagerLazyTest(fixtures.MappedTest): """tests eager load/lazy load of child items off inheritance mappers, tests that LazyLoader constructs the right query condition.""" @classmethod def define_tables(cls, metadata): global foo, bar, bar_foo foo = Table('foo', metadata, Column('id', Integer, primary_key=True, test_needs_autoincrement=True), Column('data', String(30))) bar = Table('bar', metadata, Column('id', Integer, ForeignKey('foo.id'), primary_key=True), Column('data', String(30))) bar_foo = Table('bar_foo', metadata, Column('bar_id', Integer, ForeignKey('bar.id')), Column('foo_id', Integer, ForeignKey('foo.id')) ) @testing.fails_on('maxdb', 'FIXME: unknown') def test_basic(self): class Foo(object): pass class Bar(Foo): pass foos = mapper(Foo, foo) bars = mapper(Bar, bar, inherits=foos) bars.add_property('lazy', relationship(foos, bar_foo, lazy='select')) bars.add_property('eager', relationship(foos, bar_foo, lazy='joined')) foo.insert().execute(data='foo1') bar.insert().execute(id=1, data='bar1') foo.insert().execute(data='foo2') bar.insert().execute(id=2, data='bar2') foo.insert().execute(data='foo3') #3 foo.insert().execute(data='foo4') #4 bar_foo.insert().execute(bar_id=1, foo_id=3) bar_foo.insert().execute(bar_id=2, foo_id=4) sess = create_session() q = sess.query(Bar) self.assert_(len(q.first().lazy) == 1) self.assert_(len(q.first().eager) == 1) class EagerTargetingTest(fixtures.MappedTest): """test a scenario where joined table inheritance might be confused as an eagerly loaded joined table.""" @classmethod def define_tables(cls, metadata): Table('a_table', metadata, Column('id', Integer, primary_key=True), Column('name', String(50)), Column('type', String(30), nullable=False), Column('parent_id', Integer, ForeignKey('a_table.id')) ) Table('b_table', metadata, Column('id', Integer, ForeignKey('a_table.id'), primary_key=True), Column('b_data', String(50)), ) def test_adapt_stringency(self): b_table, a_table = self.tables.b_table, self.tables.a_table class A(fixtures.ComparableEntity): pass class B(A): pass mapper(A, a_table, polymorphic_on=a_table.c.type, polymorphic_identity='A', properties={ 'children': relationship(A, order_by=a_table.c.name) }) mapper(B, b_table, inherits=A, polymorphic_identity='B', properties={ 'b_derived':column_property(b_table.c.b_data + "DATA") }) sess=create_session() b1=B(id=1, name='b1',b_data='i') sess.add(b1) sess.flush() b2=B(id=2, name='b2', b_data='l', parent_id=1) sess.add(b2) sess.flush() bid=b1.id sess.expunge_all() node = sess.query(B).filter(B.id==bid).all()[0] eq_(node, B(id=1, name='b1',b_data='i')) eq_(node.children[0], B(id=2, name='b2',b_data='l')) sess.expunge_all() node = sess.query(B).options(joinedload(B.children)).filter(B.id==bid).all()[0] eq_(node, B(id=1, name='b1',b_data='i')) eq_(node.children[0], B(id=2, name='b2',b_data='l')) class FlushTest(fixtures.MappedTest): """test dependency sorting among inheriting mappers""" @classmethod def define_tables(cls, metadata): Table('users', metadata, Column('id', Integer, primary_key=True, test_needs_autoincrement=True), Column('email', String(128)), Column('password', String(16)), ) Table('roles', metadata, Column('id', Integer, primary_key=True, test_needs_autoincrement=True), Column('description', String(32)) ) Table('user_roles', metadata, Column('user_id', Integer, ForeignKey('users.id'), primary_key=True), Column('role_id', Integer, ForeignKey('roles.id'), primary_key=True) ) Table('admins', metadata, Column('admin_id', Integer, primary_key=True, test_needs_autoincrement=True), Column('user_id', Integer, ForeignKey('users.id')) ) def test_one(self): admins, users, roles, user_roles = (self.tables.admins, self.tables.users, self.tables.roles, self.tables.user_roles) class User(object):pass class Role(object):pass class Admin(User):pass role_mapper = mapper(Role, roles) user_mapper = mapper(User, users, properties = { 'roles' : relationship(Role, secondary=user_roles, lazy='joined') } ) admin_mapper = mapper(Admin, admins, inherits=user_mapper) sess = create_session() adminrole = Role() sess.add(adminrole) sess.flush() # create an Admin, and append a Role. the dependency processors # corresponding to the "roles" attribute for the Admin mapper and the User mapper # have to ensure that two dependency processors dont fire off and insert the # many to many row twice. a = Admin() a.roles.append(adminrole) a.password = 'admin' sess.add(a) sess.flush() assert user_roles.count().scalar() == 1 def test_two(self): admins, users, roles, user_roles = (self.tables.admins, self.tables.users, self.tables.roles, self.tables.user_roles) class User(object): def __init__(self, email=None, password=None): self.email = email self.password = password class Role(object): def __init__(self, description=None): self.description = description class Admin(User):pass role_mapper = mapper(Role, roles) user_mapper = mapper(User, users, properties = { 'roles' : relationship(Role, secondary=user_roles, lazy='joined') } ) admin_mapper = mapper(Admin, admins, inherits=user_mapper) # create roles adminrole = Role('admin') sess = create_session() sess.add(adminrole) sess.flush() # create admin user a = Admin(email='tim', password='admin') a.roles.append(adminrole) sess.add(a) sess.flush() a.password = 'sadmin' sess.flush() assert user_roles.count().scalar() == 1 class JoinedNoFKSortingTest(fixtures.MappedTest): @classmethod def define_tables(cls, metadata): Table("a", metadata, Column('id', Integer, primary_key=True, test_needs_autoincrement=True) ) Table("b", metadata, Column('id', Integer, primary_key=True) ) Table("c", metadata, Column('id', Integer, primary_key=True) ) @classmethod def setup_classes(cls): class A(cls.Basic): pass class B(A): pass class C(A): pass @classmethod def setup_mappers(cls): A, B, C = cls.classes.A, cls.classes.B, cls.classes.C mapper(A, cls.tables.a) mapper(B, cls.tables.b, inherits=A, inherit_condition=cls.tables.a.c.id == cls.tables.b.c.id) mapper(C, cls.tables.c, inherits=A, inherit_condition=cls.tables.a.c.id == cls.tables.c.c.id) def test_ordering(self): B, C = self.classes.B, self.classes.C sess = Session() sess.add_all([B(), C(), B(), C()]) self.assert_sql_execution( testing.db, sess.flush, CompiledSQL( "INSERT INTO a () VALUES ()", {} ), CompiledSQL( "INSERT INTO a () VALUES ()", {} ), CompiledSQL( "INSERT INTO a () VALUES ()", {} ), CompiledSQL( "INSERT INTO a () VALUES ()", {} ), AllOf( CompiledSQL( "INSERT INTO b (id) VALUES (:id)", [{"id": 1}, {"id": 3}] ), CompiledSQL( "INSERT INTO c (id) VALUES (:id)", [{"id": 2}, {"id": 4}] ) ) ) class VersioningTest(fixtures.MappedTest): @classmethod def define_tables(cls, metadata): Table('base', metadata, Column('id', Integer, primary_key=True, test_needs_autoincrement=True), Column('version_id', Integer, nullable=False), Column('value', String(40)), Column('discriminator', Integer, nullable=False) ) Table('subtable', metadata, Column('id', None, ForeignKey('base.id'), primary_key=True), Column('subdata', String(50)) ) Table('stuff', metadata, Column('id', Integer, primary_key=True, test_needs_autoincrement=True), Column('parent', Integer, ForeignKey('base.id')) ) @testing.emits_warning(r".*updated rowcount") @engines.close_open_connections def test_save_update(self): subtable, base, stuff = (self.tables.subtable, self.tables.base, self.tables.stuff) class Base(fixtures.BasicEntity): pass class Sub(Base): pass class Stuff(Base): pass mapper(Stuff, stuff) mapper(Base, base, polymorphic_on=base.c.discriminator, version_id_col=base.c.version_id, polymorphic_identity=1, properties={ 'stuff':relationship(Stuff) }) mapper(Sub, subtable, inherits=Base, polymorphic_identity=2) sess = create_session() b1 = Base(value='b1') s1 = Sub(value='sub1', subdata='some subdata') sess.add(b1) sess.add(s1) sess.flush() sess2 = create_session() s2 = sess2.query(Base).get(s1.id) s2.subdata = 'sess2 subdata' s1.subdata = 'sess1 subdata' sess.flush() assert_raises(orm_exc.StaleDataError, sess2.query(Base).with_lockmode('read').get, s1.id) if not testing.db.dialect.supports_sane_rowcount: sess2.flush() else: assert_raises(orm_exc.StaleDataError, sess2.flush) sess2.refresh(s2) if testing.db.dialect.supports_sane_rowcount: assert s2.subdata == 'sess1 subdata' s2.subdata = 'sess2 subdata' sess2.flush() @testing.emits_warning(r".*(update|delete)d rowcount") def test_delete(self): subtable, base = self.tables.subtable, self.tables.base class Base(fixtures.BasicEntity): pass class Sub(Base): pass mapper(Base, base, polymorphic_on=base.c.discriminator, version_id_col=base.c.version_id, polymorphic_identity=1) mapper(Sub, subtable, inherits=Base, polymorphic_identity=2) sess = create_session() b1 = Base(value='b1') s1 = Sub(value='sub1', subdata='some subdata') s2 = Sub(value='sub2', subdata='some other subdata') sess.add(b1) sess.add(s1) sess.add(s2) sess.flush() sess2 = create_session() s3 = sess2.query(Base).get(s1.id) sess2.delete(s3) sess2.flush() s2.subdata = 'some new subdata' sess.flush() s1.subdata = 'some new subdata' if testing.db.dialect.supports_sane_rowcount: assert_raises( orm_exc.StaleDataError, sess.flush ) else: sess.flush() class DistinctPKTest(fixtures.MappedTest): """test the construction of mapper.primary_key when an inheriting relationship joins on a column other than primary key column.""" run_inserts = 'once' run_deletes = None @classmethod def define_tables(cls, metadata): global person_table, employee_table, Person, Employee person_table = Table("persons", metadata, Column("id", Integer, primary_key=True, test_needs_autoincrement=True), Column("name", String(80)), ) employee_table = Table("employees", metadata, Column("id", Integer, primary_key=True, test_needs_autoincrement=True), Column("salary", Integer), Column("person_id", Integer, ForeignKey("persons.id")), ) class Person(object): def __init__(self, name): self.name = name class Employee(Person): pass @classmethod def insert_data(cls): person_insert = person_table.insert() person_insert.execute(id=1, name='alice') person_insert.execute(id=2, name='bob') employee_insert = employee_table.insert() employee_insert.execute(id=2, salary=250, person_id=1) # alice employee_insert.execute(id=3, salary=200, person_id=2) # bob def test_implicit(self): person_mapper = mapper(Person, person_table) mapper(Employee, employee_table, inherits=person_mapper) assert list(class_mapper(Employee).primary_key) == [person_table.c.id] def test_explicit_props(self): person_mapper = mapper(Person, person_table) mapper(Employee, employee_table, inherits=person_mapper, properties={'pid':person_table.c.id, 'eid':employee_table.c.id}) self._do_test(False) def test_explicit_composite_pk(self): person_mapper = mapper(Person, person_table) mapper(Employee, employee_table, inherits=person_mapper, primary_key=[person_table.c.id, employee_table.c.id]) assert_raises_message(sa_exc.SAWarning, r"On mapper Mapper\|Employee\|employees, " "primary key column 'persons.id' is being " "combined with distinct primary key column 'employees.id' " "in attribute 'id'. Use explicit properties to give " "each column its own mapped attribute name.", self._do_test, True ) def test_explicit_pk(self): person_mapper = mapper(Person, person_table) mapper(Employee, employee_table, inherits=person_mapper, primary_key=[person_table.c.id]) self._do_test(False) def _do_test(self, composite): session = create_session() query = session.query(Employee) if composite: alice1 = query.get([1,2]) bob = query.get([2,3]) alice2 = query.get([1,2]) else: alice1 = query.get(1) bob = query.get(2) alice2 = query.get(1) assert alice1.name == alice2.name == 'alice' assert bob.name == 'bob' class SyncCompileTest(fixtures.MappedTest): """test that syncrules compile properly on custom inherit conds""" @classmethod def define_tables(cls, metadata): global _a_table, _b_table, _c_table _a_table = Table('a', metadata, Column('id', Integer, primary_key=True, test_needs_autoincrement=True), Column('data1', String(128)) ) _b_table = Table('b', metadata, Column('a_id', Integer, ForeignKey('a.id'), primary_key=True), Column('data2', String(128)) ) _c_table = Table('c', metadata, # Column('a_id', Integer, ForeignKey('b.a_id'), primary_key=True), #works Column('b_a_id', Integer, ForeignKey('b.a_id'), primary_key=True), Column('data3', String(128)) ) def test_joins(self): for j1 in (None, _b_table.c.a_id==_a_table.c.id, _a_table.c.id==_b_table.c.a_id): for j2 in (None, _b_table.c.a_id==_c_table.c.b_a_id, _c_table.c.b_a_id==_b_table.c.a_id): self._do_test(j1, j2) for t in reversed(_a_table.metadata.sorted_tables): t.delete().execute().close() def _do_test(self, j1, j2): class A(object): def __init__(self, **kwargs): for key, value in kwargs.items(): setattr(self, key, value) class B(A): pass class C(B): pass mapper(A, _a_table) mapper(B, _b_table, inherits=A, inherit_condition=j1 ) mapper(C, _c_table, inherits=B, inherit_condition=j2 ) session = create_session() a = A(data1='a1') session.add(a) b = B(data1='b1', data2='b2') session.add(b) c = C(data1='c1', data2='c2', data3='c3') session.add(c) session.flush() session.expunge_all() assert len(session.query(A).all()) == 3 assert len(session.query(B).all()) == 2 assert len(session.query(C).all()) == 1 class OverrideColKeyTest(fixtures.MappedTest): """test overriding of column attributes.""" @classmethod def define_tables(cls, metadata): global base, subtable base = Table('base', metadata, Column('base_id', Integer, primary_key=True, test_needs_autoincrement=True), Column('data', String(255)), Column('sqlite_fixer', String(10)) ) subtable = Table('subtable', metadata, Column('base_id', Integer, ForeignKey('base.base_id'), primary_key=True), Column('subdata', String(255)) ) def test_plain(self): # control case class Base(object): pass class Sub(Base): pass mapper(Base, base) mapper(Sub, subtable, inherits=Base) # Sub gets a "base_id" property using the "base_id" # column of both tables. eq_( class_mapper(Sub).get_property('base_id').columns, [subtable.c.base_id, base.c.base_id] ) def test_override_explicit(self): # this pattern is what you see when using declarative # in particular, here we do a "manual" version of # what we'd like the mapper to do. class Base(object): pass class Sub(Base): pass mapper(Base, base, properties={ 'id':base.c.base_id }) mapper(Sub, subtable, inherits=Base, properties={ # this is the manual way to do it, is not really # possible in declarative 'id':[base.c.base_id, subtable.c.base_id] }) eq_( class_mapper(Sub).get_property('id').columns, [base.c.base_id, subtable.c.base_id] ) s1 = Sub() s1.id = 10 sess = create_session() sess.add(s1) sess.flush() assert sess.query(Sub).get(10) is s1 def test_override_onlyinparent(self): class Base(object): pass class Sub(Base): pass mapper(Base, base, properties={ 'id':base.c.base_id }) mapper(Sub, subtable, inherits=Base) eq_( class_mapper(Sub).get_property('id').columns, [base.c.base_id] ) eq_( class_mapper(Sub).get_property('base_id').columns, [subtable.c.base_id] ) s1 = Sub() s1.id = 10 s2 = Sub() s2.base_id = 15 sess = create_session() sess.add_all([s1, s2]) sess.flush() # s1 gets '10' assert sess.query(Sub).get(10) is s1 # s2 gets a new id, base_id is overwritten by the ultimate # PK col assert s2.id == s2.base_id != 15 def test_override_implicit(self): # this is originally [ticket:1111]. # the pattern here is now disallowed by [ticket:1892] class Base(object): pass class Sub(Base): pass mapper(Base, base, properties={ 'id':base.c.base_id }) def go(): mapper(Sub, subtable, inherits=Base, properties={ 'id':subtable.c.base_id }) # Sub mapper compilation needs to detect that "base.c.base_id" # is renamed in the inherited mapper as "id", even though # it has its own "id" property. It then generates # an exception in 0.7 due to the implicit conflict. assert_raises(sa_exc.InvalidRequestError, go) def test_plain_descriptor(self): """test that descriptors prevent inheritance from propigating properties to subclasses.""" class Base(object): pass class Sub(Base): @property def data(self): return "im the data" mapper(Base, base) mapper(Sub, subtable, inherits=Base) s1 = Sub() sess = create_session() sess.add(s1) sess.flush() assert sess.query(Sub).one().data == "im the data" def test_custom_descriptor(self): """test that descriptors prevent inheritance from propigating properties to subclasses.""" class MyDesc(object): def __get__(self, instance, owner): if instance is None: return self return "im the data" class Base(object): pass class Sub(Base): data = MyDesc() mapper(Base, base) mapper(Sub, subtable, inherits=Base) s1 = Sub() sess = create_session() sess.add(s1) sess.flush() assert sess.query(Sub).one().data == "im the data" def test_sub_columns_over_base_descriptors(self): class Base(object): @property def subdata(self): return "this is base" class Sub(Base): pass mapper(Base, base) mapper(Sub, subtable, inherits=Base) sess = create_session() b1 = Base() assert b1.subdata == "this is base" s1 = Sub() s1.subdata = "this is sub" assert s1.subdata == "this is sub" sess.add_all([s1, b1]) sess.flush() sess.expunge_all() assert sess.query(Base).get(b1.base_id).subdata == "this is base" assert sess.query(Sub).get(s1.base_id).subdata == "this is sub" def test_base_descriptors_over_base_cols(self): class Base(object): @property def data(self): return "this is base" class Sub(Base): pass mapper(Base, base) mapper(Sub, subtable, inherits=Base) sess = create_session() b1 = Base() assert b1.data == "this is base" s1 = Sub() assert s1.data == "this is base" sess.add_all([s1, b1]) sess.flush() sess.expunge_all() assert sess.query(Base).get(b1.base_id).data == "this is base" assert sess.query(Sub).get(s1.base_id).data == "this is base" class OptimizedLoadTest(fixtures.MappedTest): """tests for the "optimized load" routine.""" @classmethod def define_tables(cls, metadata): Table('base', metadata, Column('id', Integer, primary_key=True, test_needs_autoincrement=True), Column('data', String(50)), Column('type', String(50)), Column('counter', Integer, server_default="1") ) Table('sub', metadata, Column('id', Integer, ForeignKey('base.id'), primary_key=True), Column('sub', String(50)), Column('counter', Integer, server_default="1"), Column('counter2', Integer, server_default="1") ) Table('subsub', metadata, Column('id', Integer, ForeignKey('sub.id'), primary_key=True), Column('counter2', Integer, server_default="1") ) Table('with_comp', metadata, Column('id', Integer, ForeignKey('base.id'), primary_key=True), Column('a', String(10)), Column('b', String(10)) ) def test_no_optimize_on_map_to_join(self): base, sub = self.tables.base, self.tables.sub class Base(fixtures.ComparableEntity): pass class JoinBase(fixtures.ComparableEntity): pass class SubJoinBase(JoinBase): pass mapper(Base, base) mapper(JoinBase, base.outerjoin(sub), properties=util.OrderedDict( [('id', [base.c.id, sub.c.id]), ('counter', [base.c.counter, sub.c.counter])]) ) mapper(SubJoinBase, inherits=JoinBase) sess = Session() sess.add(Base(data='data')) sess.commit() sjb = sess.query(SubJoinBase).one() sjb_id = sjb.id sess.expire(sjb) # this should not use the optimized load, # which assumes discrete tables def go(): eq_(sjb.data, 'data') self.assert_sql_execution( testing.db, go, CompiledSQL( "SELECT base.id AS base_id, sub.id AS sub_id, " "base.counter AS base_counter, sub.counter AS sub_counter, " "base.data AS base_data, " "base.type AS base_type, sub.sub AS sub_sub, " "sub.counter2 AS sub_counter2 FROM base " "LEFT OUTER JOIN sub ON base.id = sub.id " "WHERE base.id = :param_1", {'param_1': sjb_id} ), ) def test_optimized_passes(self): """"test that the 'optimized load' routine doesn't crash when a column in the join condition is not available.""" base, sub = self.tables.base, self.tables.sub class Base(fixtures.ComparableEntity): pass class Sub(Base): pass mapper(Base, base, polymorphic_on=base.c.type, polymorphic_identity='base') # redefine Sub's "id" to favor the "id" col in the subtable. # "id" is also part of the primary join condition mapper(Sub, sub, inherits=Base, polymorphic_identity='sub', properties={'id':[sub.c.id, base.c.id]}) sess = sessionmaker()() s1 = Sub(data='s1data', sub='s1sub') sess.add(s1) sess.commit() sess.expunge_all() # load s1 via Base. s1.id won't populate since it's relative to # the "sub" table. The optimized load kicks in and tries to # generate on the primary join, but cannot since "id" is itself unloaded. # the optimized load needs to return "None" so regular full-row loading proceeds s1 = sess.query(Base).first() assert s1.sub == 's1sub' def test_column_expression(self): base, sub = self.tables.base, self.tables.sub class Base(fixtures.ComparableEntity): pass class Sub(Base): pass mapper(Base, base, polymorphic_on=base.c.type, polymorphic_identity='base') mapper(Sub, sub, inherits=Base, polymorphic_identity='sub', properties={ 'concat': column_property(sub.c.sub + "|" + sub.c.sub) }) sess = sessionmaker()() s1 = Sub(data='s1data', sub='s1sub') sess.add(s1) sess.commit() sess.expunge_all() s1 = sess.query(Base).first() assert s1.concat == 's1sub|s1sub' def test_column_expression_joined(self): base, sub = self.tables.base, self.tables.sub class Base(fixtures.ComparableEntity): pass class Sub(Base): pass mapper(Base, base, polymorphic_on=base.c.type, polymorphic_identity='base') mapper(Sub, sub, inherits=Base, polymorphic_identity='sub', properties={ 'concat': column_property(base.c.data + "|" + sub.c.sub) }) sess = sessionmaker()() s1 = Sub(data='s1data', sub='s1sub') s2 = Sub(data='s2data', sub='s2sub') s3 = Sub(data='s3data', sub='s3sub') sess.add_all([s1, s2, s3]) sess.commit() sess.expunge_all() # query a bunch of rows to ensure there's no cartesian # product against "base" occurring, it is in fact # detecting that "base" needs to be in the join # criterion eq_( sess.query(Base).order_by(Base.id).all(), [ Sub(data='s1data', sub='s1sub', concat='s1data|s1sub'), Sub(data='s2data', sub='s2sub', concat='s2data|s2sub'), Sub(data='s3data', sub='s3sub', concat='s3data|s3sub') ] ) def test_composite_column_joined(self): base, with_comp = self.tables.base, self.tables.with_comp class Base(fixtures.BasicEntity): pass class WithComp(Base): pass class Comp(object): def __init__(self, a, b): self.a = a self.b = b def __composite_values__(self): return self.a, self.b def __eq__(self, other): return (self.a == other.a) and (self.b == other.b) mapper(Base, base, polymorphic_on=base.c.type, polymorphic_identity='base') mapper(WithComp, with_comp, inherits=Base, polymorphic_identity='wc', properties={ 'comp': composite(Comp, with_comp.c.a, with_comp.c.b) }) sess = sessionmaker()() s1 = WithComp(data='s1data', comp=Comp('ham', 'cheese')) s2 = WithComp(data='s2data', comp=Comp('bacon', 'eggs')) sess.add_all([s1, s2]) sess.commit() sess.expunge_all() s1test, s2test = sess.query(Base).order_by(Base.id).all() assert s1test.comp assert s2test.comp eq_(s1test.comp, Comp('ham', 'cheese')) eq_(s2test.comp, Comp('bacon', 'eggs')) def test_load_expired_on_pending(self): base, sub = self.tables.base, self.tables.sub class Base(fixtures.BasicEntity): pass class Sub(Base): pass mapper(Base, base, polymorphic_on=base.c.type, polymorphic_identity='base') mapper(Sub, sub, inherits=Base, polymorphic_identity='sub') sess = Session() s1 = Sub(data='s1') sess.add(s1) self.assert_sql_execution( testing.db, sess.flush, CompiledSQL( "INSERT INTO base (data, type) VALUES (:data, :type)", [{'data':'s1','type':'sub'}] ), CompiledSQL( "INSERT INTO sub (id, sub) VALUES (:id, :sub)", lambda ctx:{'id':s1.id, 'sub':None} ), ) def go(): eq_( s1.counter2, 1 ) self.assert_sql_execution( testing.db, go, CompiledSQL( "SELECT sub.counter AS sub_counter, base.counter AS base_counter, " "sub.counter2 AS sub_counter2 FROM base JOIN sub ON " "base.id = sub.id WHERE base.id = :param_1", lambda ctx:{u'param_1': s1.id} ), ) def test_dont_generate_on_none(self): base, sub = self.tables.base, self.tables.sub class Base(fixtures.BasicEntity): pass class Sub(Base): pass mapper(Base, base, polymorphic_on=base.c.type, polymorphic_identity='base') m = mapper(Sub, sub, inherits=Base, polymorphic_identity='sub') s1 = Sub() assert m._optimized_get_statement(attributes.instance_state(s1), ['counter2']) is None # loads s1.id as None eq_(s1.id, None) # this now will come up with a value of None for id - should reject assert m._optimized_get_statement(attributes.instance_state(s1), ['counter2']) is None s1.id = 1 attributes.instance_state(s1)._commit_all(s1.__dict__, None) assert m._optimized_get_statement(attributes.instance_state(s1), ['counter2']) is not None def test_load_expired_on_pending_twolevel(self): base, sub, subsub = (self.tables.base, self.tables.sub, self.tables.subsub) class Base(fixtures.BasicEntity): pass class Sub(Base): pass class SubSub(Sub): pass mapper(Base, base, polymorphic_on=base.c.type, polymorphic_identity='base') mapper(Sub, sub, inherits=Base, polymorphic_identity='sub') mapper(SubSub, subsub, inherits=Sub, polymorphic_identity='subsub') sess = Session() s1 = SubSub(data='s1', counter=1) sess.add(s1) self.assert_sql_execution( testing.db, sess.flush, CompiledSQL( "INSERT INTO base (data, type, counter) VALUES " "(:data, :type, :counter)", [{'data':'s1','type':'subsub','counter':1}] ), CompiledSQL( "INSERT INTO sub (id, sub, counter) VALUES " "(:id, :sub, :counter)", lambda ctx:[{'counter': 1, 'sub': None, 'id': s1.id}] ), CompiledSQL( "INSERT INTO subsub (id) VALUES (:id)", lambda ctx:{'id':s1.id} ), ) def go(): eq_( s1.counter2, 1 ) self.assert_sql_execution( testing.db, go, CompiledSQL( "SELECT subsub.counter2 AS subsub_counter2, " "sub.counter2 AS sub_counter2 FROM subsub, sub " "WHERE :param_1 = sub.id AND sub.id = subsub.id", lambda ctx:{u'param_1': s1.id} ), ) class TransientInheritingGCTest(fixtures.TestBase): __requires__ = ('cpython',) def _fixture(self): Base = declarative_base() class A(Base): __tablename__ = 'a' id = Column(Integer, primary_key=True, test_needs_autoincrement=True) data = Column(String(10)) self.A = A return Base def setUp(self): self.Base = self._fixture() def tearDown(self): self.Base.metadata.drop_all(testing.db) #clear_mappers() self.Base = None def _do_test(self, go): B = go() self.Base.metadata.create_all(testing.db) sess = Session(testing.db) sess.add(B(data='some b')) sess.commit() b1 = sess.query(B).one() assert isinstance(b1, B) sess.close() del sess del b1 del B gc_collect() eq_( len(self.A.__subclasses__()), 0) def test_single(self): def go(): class B(self.A): pass return B self._do_test(go) @testing.fails_if(lambda: True, "not supported for joined inh right now.") def test_joined(self): def go(): class B(self.A): __tablename__ = 'b' id = Column(Integer, ForeignKey('a.id'), primary_key=True) return B self._do_test(go) class NoPKOnSubTableWarningTest(fixtures.TestBase): def _fixture(self): metadata = MetaData() parent = Table('parent', metadata, Column('id', Integer, primary_key=True) ) child = Table('child', metadata, Column('id', Integer, ForeignKey('parent.id')) ) return parent, child def tearDown(self): clear_mappers() def test_warning_on_sub(self): parent, child = self._fixture() class P(object): pass class C(P): pass mapper(P, parent) assert_raises_message( sa_exc.SAWarning, "Could not assemble any primary keys for locally mapped " "table 'child' - no rows will be persisted in this Table.", mapper, C, child, inherits=P ) def test_no_warning_with_explicit(self): parent, child = self._fixture() class P(object): pass class C(P): pass mapper(P, parent) mc = mapper(C, child, inherits=P, primary_key=[parent.c.id]) eq_(mc.primary_key, (parent.c.id,)) class InhCondTest(fixtures.TestBase): def test_inh_cond_nonexistent_table_unrelated(self): metadata = MetaData() base_table = Table("base", metadata, Column("id", Integer, primary_key=True) ) derived_table = Table("derived", metadata, Column("id", Integer, ForeignKey("base.id"), primary_key=True), Column("owner_id", Integer, ForeignKey("owner.owner_id")) ) class Base(object): pass class Derived(Base): pass mapper(Base, base_table) # succeeds, despite "owner" table not configured yet m2 = mapper(Derived, derived_table, inherits=Base) assert m2.inherit_condition.compare( base_table.c.id==derived_table.c.id ) def test_inh_cond_nonexistent_col_unrelated(self): m = MetaData() base_table = Table("base", m, Column("id", Integer, primary_key=True) ) derived_table = Table("derived", m, Column("id", Integer, ForeignKey('base.id'), primary_key=True), Column('order_id', Integer, ForeignKey('order.foo')) ) order_table = Table('order', m, Column('id', Integer, primary_key=True)) class Base(object): pass class Derived(Base): pass mapper(Base, base_table) # succeeds, despite "order.foo" doesn't exist m2 = mapper(Derived, derived_table, inherits=Base) assert m2.inherit_condition.compare( base_table.c.id==derived_table.c.id ) def test_inh_cond_no_fk(self): metadata = MetaData() base_table = Table("base", metadata, Column("id", Integer, primary_key=True) ) derived_table = Table("derived", metadata, Column("id", Integer, primary_key=True), ) class Base(object): pass class Derived(Base): pass mapper(Base, base_table) assert_raises_message( sa_exc.ArgumentError, "Can't find any foreign key relationships between " "'base' and 'derived'.", mapper, Derived, derived_table, inherits=Base ) def test_inh_cond_nonexistent_table_related(self): m1 = MetaData() m2 = MetaData() base_table = Table("base", m1, Column("id", Integer, primary_key=True) ) derived_table = Table("derived", m2, Column("id", Integer, ForeignKey('base.id'), primary_key=True), ) class Base(object): pass class Derived(Base): pass mapper(Base, base_table) # the ForeignKey def is correct but there are two # different metadatas. Would like the traditional # "noreferencedtable" error to raise so that the # user is directed towards the FK definition in question. assert_raises_message( sa_exc.NoReferencedTableError, "Foreign key associated with column 'derived.id' " "could not find table 'base' with which to generate " "a foreign key to target column 'id'", mapper, Derived, derived_table, inherits=Base ) def test_inh_cond_nonexistent_col_related(self): m = MetaData() base_table = Table("base", m, Column("id", Integer, primary_key=True) ) derived_table = Table("derived", m, Column("id", Integer, ForeignKey('base.q'), primary_key=True), ) class Base(object): pass class Derived(Base): pass mapper(Base, base_table) assert_raises_message( sa_exc.NoReferencedColumnError, "Could not create ForeignKey 'base.q' on table " "'derived': table 'base' has no column named 'q'", mapper, Derived, derived_table, inherits=Base ) class PKDiscriminatorTest(fixtures.MappedTest): @classmethod def define_tables(cls, metadata): parents = Table('parents', metadata, Column('id', Integer, primary_key=True, test_needs_autoincrement=True), Column('name', String(60))) children = Table('children', metadata, Column('id', Integer, ForeignKey('parents.id'), primary_key=True), Column('type', Integer,primary_key=True), Column('name', String(60))) def test_pk_as_discriminator(self): parents, children = self.tables.parents, self.tables.children class Parent(object): def __init__(self, name=None): self.name = name class Child(object): def __init__(self, name=None): self.name = name class A(Child): pass mapper(Parent, parents, properties={ 'children': relationship(Child, backref='parent'), }) mapper(Child, children, polymorphic_on=children.c.type, polymorphic_identity=1) mapper(A, inherits=Child, polymorphic_identity=2) s = create_session() p = Parent('p1') a = A('a1') p.children.append(a) s.add(p) s.flush() assert a.id assert a.type == 2 p.name='p1new' a.name='a1new' s.flush() s.expire_all() assert a.name=='a1new' assert p.name=='p1new' class NoPolyIdentInMiddleTest(fixtures.MappedTest): @classmethod def define_tables(cls, metadata): Table('base', metadata, Column('id', Integer, primary_key=True, test_needs_autoincrement=True), Column('type', String(50), nullable=False), ) @classmethod def setup_classes(cls): class A(cls.Comparable): pass class B(A): pass class C(B): pass class D(B): pass class E(A): pass @classmethod def setup_mappers(cls): A, C, B, E, D, base = (cls.classes.A, cls.classes.C, cls.classes.B, cls.classes.E, cls.classes.D, cls.tables.base) mapper(A, base, polymorphic_on=base.c.type) mapper(B, inherits=A, ) mapper(C, inherits=B, polymorphic_identity='c') mapper(D, inherits=B, polymorphic_identity='d') mapper(E, inherits=A, polymorphic_identity='e') def test_load_from_middle(self): C, B = self.classes.C, self.classes.B s = Session() s.add(C()) o = s.query(B).first() eq_(o.type, 'c') assert isinstance(o, C) def test_load_from_base(self): A, C = self.classes.A, self.classes.C s = Session() s.add(C()) o = s.query(A).first() eq_(o.type, 'c') assert isinstance(o, C) def test_discriminator(self): C, B, base = (self.classes.C, self.classes.B, self.tables.base) assert class_mapper(B).polymorphic_on is base.c.type assert class_mapper(C).polymorphic_on is base.c.type def test_load_multiple_from_middle(self): C, B, E, D, base = (self.classes.C, self.classes.B, self.classes.E, self.classes.D, self.tables.base) s = Session() s.add_all([C(), D(), E()]) eq_( s.query(B).order_by(base.c.type).all(), [C(), D()] ) class DeleteOrphanTest(fixtures.MappedTest): """Test the fairly obvious, that an error is raised when attempting to insert an orphan. Previous SQLA versions would check this constraint in memory which is the original rationale for this test. """ @classmethod def define_tables(cls, metadata): global single, parent single = Table('single', metadata, Column('id', Integer, primary_key=True, test_needs_autoincrement=True), Column('type', String(50), nullable=False), Column('data', String(50)), Column('parent_id', Integer, ForeignKey('parent.id'), nullable=False), ) parent = Table('parent', metadata, Column('id', Integer, primary_key=True, test_needs_autoincrement=True), Column('data', String(50)) ) def test_orphan_message(self): class Base(fixtures.BasicEntity): pass class SubClass(Base): pass class Parent(fixtures.BasicEntity): pass mapper(Base, single, polymorphic_on=single.c.type, polymorphic_identity='base') mapper(SubClass, inherits=Base, polymorphic_identity='sub') mapper(Parent, parent, properties={ 'related':relationship(Base, cascade="all, delete-orphan") }) sess = create_session() s1 = SubClass(data='s1') sess.add(s1) assert_raises(sa_exc.DBAPIError, sess.flush) class PolymorphicUnionTest(fixtures.TestBase, testing.AssertsCompiledSQL): __dialect__ = 'default' def _fixture(self): t1 = table('t1', column('c1', Integer), column('c2', Integer), column('c3', Integer)) t2 = table('t2', column('c1', Integer), column('c2', Integer), column('c3', Integer), column('c4', Integer)) t3 = table('t3', column('c1', Integer), column('c3', Integer), column('c5', Integer)) return t1, t2, t3 def test_type_col_present(self): t1, t2, t3 = self._fixture() self.assert_compile( polymorphic_union( util.OrderedDict([("a", t1), ("b", t2), ("c", t3)]), 'q1' ), "SELECT t1.c1, t1.c2, t1.c3, CAST(NULL AS INTEGER) AS c4, " "CAST(NULL AS INTEGER) AS c5, 'a' AS q1 FROM t1 UNION ALL " "SELECT t2.c1, t2.c2, t2.c3, t2.c4, CAST(NULL AS INTEGER) AS c5, " "'b' AS q1 FROM t2 UNION ALL SELECT t3.c1, " "CAST(NULL AS INTEGER) AS c2, t3.c3, CAST(NULL AS INTEGER) AS c4, " "t3.c5, 'c' AS q1 FROM t3" ) def test_type_col_non_present(self): t1, t2, t3 = self._fixture() self.assert_compile( polymorphic_union( util.OrderedDict([("a", t1), ("b", t2), ("c", t3)]), None ), "SELECT t1.c1, t1.c2, t1.c3, CAST(NULL AS INTEGER) AS c4, " "CAST(NULL AS INTEGER) AS c5 FROM t1 UNION ALL SELECT t2.c1, " "t2.c2, t2.c3, t2.c4, CAST(NULL AS INTEGER) AS c5 FROM t2 " "UNION ALL SELECT t3.c1, CAST(NULL AS INTEGER) AS c2, t3.c3, " "CAST(NULL AS INTEGER) AS c4, t3.c5 FROM t3" ) def test_no_cast_null(self): t1, t2, t3 = self._fixture() self.assert_compile( polymorphic_union( util.OrderedDict([("a", t1), ("b", t2), ("c", t3)]), 'q1', cast_nulls=False ), "SELECT t1.c1, t1.c2, t1.c3, NULL AS c4, NULL AS c5, 'a' AS q1 " "FROM t1 UNION ALL SELECT t2.c1, t2.c2, t2.c3, t2.c4, NULL AS c5, " "'b' AS q1 FROM t2 UNION ALL SELECT t3.c1, NULL AS c2, t3.c3, " "NULL AS c4, t3.c5, 'c' AS q1 FROM t3" ) class NameConflictTest(fixtures.MappedTest): @classmethod def define_tables(cls, metadata): content = Table('content', metadata, Column('id', Integer, primary_key=True, test_needs_autoincrement=True), Column('type', String(30)) ) foo = Table('foo', metadata, Column('id', Integer, ForeignKey('content.id'), primary_key=True), Column('content_type', String(30)) ) def test_name_conflict(self): class Content(object): pass class Foo(Content): pass mapper(Content, self.tables.content, polymorphic_on=self.tables.content.c.type) mapper(Foo, self.tables.foo, inherits=Content, polymorphic_identity='foo') sess = create_session() f = Foo() f.content_type = u'bar' sess.add(f) sess.flush() f_id = f.id sess.expunge_all() assert sess.query(Content).get(f_id).content_type == u'bar' SQLAlchemy-0.8.4/test/orm/inheritance/test_concrete.py0000644000076500000240000007336312251150015023515 0ustar classicstaff00000000000000from sqlalchemy.testing import eq_, assert_raises, \ assert_raises_message from sqlalchemy import * from sqlalchemy.orm import * from sqlalchemy.orm import exc as orm_exc from sqlalchemy.testing import * import sqlalchemy as sa from sqlalchemy import testing from sqlalchemy.testing import fixtures from sqlalchemy.orm import attributes from sqlalchemy.testing import eq_ from sqlalchemy.testing.schema import Table, Column class Employee(object): def __init__(self, name): self.name = name def __repr__(self): return self.__class__.__name__ + ' ' + self.name class Manager(Employee): def __init__(self, name, manager_data): self.name = name self.manager_data = manager_data def __repr__(self): return self.__class__.__name__ + ' ' + self.name + ' ' \ + self.manager_data class Engineer(Employee): def __init__(self, name, engineer_info): self.name = name self.engineer_info = engineer_info def __repr__(self): return self.__class__.__name__ + ' ' + self.name + ' ' \ + self.engineer_info class Hacker(Engineer): def __init__( self, name, nickname, engineer_info, ): self.name = name self.nickname = nickname self.engineer_info = engineer_info def __repr__(self): return self.__class__.__name__ + ' ' + self.name + " '" \ + self.nickname + "' " + self.engineer_info class Company(object): pass class ConcreteTest(fixtures.MappedTest): @classmethod def define_tables(cls, metadata): global managers_table, engineers_table, hackers_table, \ companies, employees_table companies = Table('companies', metadata, Column('id', Integer, primary_key=True, test_needs_autoincrement=True), Column('name' , String(50))) employees_table = Table('employees', metadata, Column('employee_id', Integer, primary_key=True, test_needs_autoincrement=True), Column('name', String(50)), Column('company_id', Integer, ForeignKey('companies.id'))) managers_table = Table( 'managers', metadata, Column('employee_id', Integer, primary_key=True, test_needs_autoincrement=True), Column('name', String(50)), Column('manager_data', String(50)), Column('company_id', Integer, ForeignKey('companies.id')), ) engineers_table = Table( 'engineers', metadata, Column('employee_id', Integer, primary_key=True, test_needs_autoincrement=True), Column('name', String(50)), Column('engineer_info', String(50)), Column('company_id', Integer, ForeignKey('companies.id')), ) hackers_table = Table( 'hackers', metadata, Column('employee_id', Integer, primary_key=True, test_needs_autoincrement=True), Column('name', String(50)), Column('engineer_info', String(50)), Column('company_id', Integer, ForeignKey('companies.id')), Column('nickname', String(50)), ) def test_basic(self): pjoin = polymorphic_union({'manager': managers_table, 'engineer' : engineers_table}, 'type', 'pjoin') employee_mapper = mapper(Employee, pjoin, polymorphic_on=pjoin.c.type) manager_mapper = mapper(Manager, managers_table, inherits=employee_mapper, concrete=True, polymorphic_identity='manager') engineer_mapper = mapper(Engineer, engineers_table, inherits=employee_mapper, concrete=True, polymorphic_identity='engineer') session = create_session() session.add(Manager('Tom', 'knows how to manage things')) session.add(Engineer('Kurt', 'knows how to hack')) session.flush() session.expunge_all() assert set([repr(x) for x in session.query(Employee)]) \ == set(['Engineer Kurt knows how to hack', 'Manager Tom knows how to manage things']) assert set([repr(x) for x in session.query(Manager)]) \ == set(['Manager Tom knows how to manage things']) assert set([repr(x) for x in session.query(Engineer)]) \ == set(['Engineer Kurt knows how to hack']) manager = session.query(Manager).one() session.expire(manager, ['manager_data']) eq_(manager.manager_data, 'knows how to manage things') def test_multi_level_no_base(self): pjoin = polymorphic_union({'manager': managers_table, 'engineer' : engineers_table, 'hacker' : hackers_table}, 'type', 'pjoin') pjoin2 = polymorphic_union({'engineer': engineers_table, 'hacker': hackers_table}, 'type', 'pjoin2') employee_mapper = mapper(Employee, pjoin, polymorphic_on=pjoin.c.type) manager_mapper = mapper(Manager, managers_table, inherits=employee_mapper, concrete=True, polymorphic_identity='manager') engineer_mapper = mapper( Engineer, engineers_table, with_polymorphic=('*', pjoin2), polymorphic_on=pjoin2.c.type, inherits=employee_mapper, concrete=True, polymorphic_identity='engineer', ) hacker_mapper = mapper(Hacker, hackers_table, inherits=engineer_mapper, concrete=True, polymorphic_identity='hacker') session = create_session() tom = Manager('Tom', 'knows how to manage things') assert_raises_message(AttributeError, "does not implement attribute .?'type' at the instance level.", setattr, tom, "type", "sometype") jerry = Engineer('Jerry', 'knows how to program') hacker = Hacker('Kurt', 'Badass', 'knows how to hack') assert_raises_message(AttributeError, "does not implement attribute .?'type' at the instance level.", setattr, hacker, "type", "sometype") session.add_all((tom, jerry, hacker)) session.flush() # ensure "readonly" on save logic didn't pollute the # expired_attributes collection assert 'nickname' \ not in attributes.instance_state(jerry).expired_attributes assert 'name' \ not in attributes.instance_state(jerry).expired_attributes assert 'name' \ not in attributes.instance_state(hacker).expired_attributes assert 'nickname' \ not in attributes.instance_state(hacker).expired_attributes def go(): eq_(jerry.name, 'Jerry') eq_(hacker.nickname, 'Badass') self.assert_sql_count(testing.db, go, 0) session.expunge_all() assert repr(session.query(Employee).filter(Employee.name == 'Tom').one()) \ == 'Manager Tom knows how to manage things' assert repr(session.query(Manager).filter(Manager.name == 'Tom' ).one()) == 'Manager Tom knows how to manage things' assert set([repr(x) for x in session.query(Employee).all()]) \ == set(['Engineer Jerry knows how to program', 'Manager Tom knows how to manage things', "Hacker Kurt 'Badass' knows how to hack"]) assert set([repr(x) for x in session.query(Manager).all()]) \ == set(['Manager Tom knows how to manage things']) assert set([repr(x) for x in session.query(Engineer).all()]) \ == set(['Engineer Jerry knows how to program', "Hacker Kurt 'Badass' knows how to hack"]) assert set([repr(x) for x in session.query(Hacker).all()]) \ == set(["Hacker Kurt 'Badass' knows how to hack"]) def test_multi_level_with_base(self): pjoin = polymorphic_union({ 'employee': employees_table, 'manager': managers_table, 'engineer': engineers_table, 'hacker': hackers_table, }, 'type', 'pjoin') pjoin2 = polymorphic_union({'engineer': engineers_table, 'hacker': hackers_table}, 'type', 'pjoin2') employee_mapper = mapper(Employee, employees_table, with_polymorphic=('*', pjoin), polymorphic_on=pjoin.c.type) manager_mapper = mapper(Manager, managers_table, inherits=employee_mapper, concrete=True, polymorphic_identity='manager') engineer_mapper = mapper( Engineer, engineers_table, with_polymorphic=('*', pjoin2), polymorphic_on=pjoin2.c.type, inherits=employee_mapper, concrete=True, polymorphic_identity='engineer', ) hacker_mapper = mapper(Hacker, hackers_table, inherits=engineer_mapper, concrete=True, polymorphic_identity='hacker') session = create_session() tom = Manager('Tom', 'knows how to manage things') jerry = Engineer('Jerry', 'knows how to program') hacker = Hacker('Kurt', 'Badass', 'knows how to hack') session.add_all((tom, jerry, hacker)) session.flush() def go(): eq_(jerry.name, 'Jerry') eq_(hacker.nickname, 'Badass') self.assert_sql_count(testing.db, go, 0) session.expunge_all() # check that we aren't getting a cartesian product in the raw # SQL. this requires that Engineer's polymorphic discriminator # is not rendered in the statement which is only against # Employee's "pjoin" assert len(testing.db.execute(session.query(Employee).with_labels().statement).fetchall()) \ == 3 assert set([repr(x) for x in session.query(Employee)]) \ == set(['Engineer Jerry knows how to program', 'Manager Tom knows how to manage things', "Hacker Kurt 'Badass' knows how to hack"]) assert set([repr(x) for x in session.query(Manager)]) \ == set(['Manager Tom knows how to manage things']) assert set([repr(x) for x in session.query(Engineer)]) \ == set(['Engineer Jerry knows how to program', "Hacker Kurt 'Badass' knows how to hack"]) assert set([repr(x) for x in session.query(Hacker)]) \ == set(["Hacker Kurt 'Badass' knows how to hack"]) def test_without_default_polymorphic(self): pjoin = polymorphic_union({ 'employee': employees_table, 'manager': managers_table, 'engineer': engineers_table, 'hacker': hackers_table, }, 'type', 'pjoin') pjoin2 = polymorphic_union({'engineer': engineers_table, 'hacker': hackers_table}, 'type', 'pjoin2') employee_mapper = mapper(Employee, employees_table, polymorphic_identity='employee') manager_mapper = mapper(Manager, managers_table, inherits=employee_mapper, concrete=True, polymorphic_identity='manager') engineer_mapper = mapper(Engineer, engineers_table, inherits=employee_mapper, concrete=True, polymorphic_identity='engineer') hacker_mapper = mapper(Hacker, hackers_table, inherits=engineer_mapper, concrete=True, polymorphic_identity='hacker') session = create_session() jdoe = Employee('Jdoe') tom = Manager('Tom', 'knows how to manage things') jerry = Engineer('Jerry', 'knows how to program') hacker = Hacker('Kurt', 'Badass', 'knows how to hack') session.add_all((jdoe, tom, jerry, hacker)) session.flush() eq_(len(testing.db.execute(session.query(Employee).with_polymorphic('*' , pjoin, pjoin.c.type).with_labels().statement).fetchall()), 4) eq_(session.query(Employee).get(jdoe.employee_id), jdoe) eq_(session.query(Engineer).get(jerry.employee_id), jerry) eq_(set([repr(x) for x in session.query(Employee).with_polymorphic('*', pjoin, pjoin.c.type)]), set(['Employee Jdoe', 'Engineer Jerry knows how to program', 'Manager Tom knows how to manage things', "Hacker Kurt 'Badass' knows how to hack"])) eq_(set([repr(x) for x in session.query(Manager)]), set(['Manager Tom knows how to manage things'])) eq_(set([repr(x) for x in session.query(Engineer).with_polymorphic('*', pjoin2, pjoin2.c.type)]), set(['Engineer Jerry knows how to program' , "Hacker Kurt 'Badass' knows how to hack"])) eq_(set([repr(x) for x in session.query(Hacker)]), set(["Hacker Kurt 'Badass' knows how to hack"])) # test adaption of the column by wrapping the query in a # subquery eq_(len(testing.db.execute(session.query(Engineer).with_polymorphic('*' , pjoin2, pjoin2.c.type).from_self().statement).fetchall()), 2) eq_(set([repr(x) for x in session.query(Engineer).with_polymorphic('*', pjoin2, pjoin2.c.type).from_self()]), set(['Engineer Jerry knows how to program', "Hacker Kurt 'Badass' knows how to hack"])) def test_relationship(self): pjoin = polymorphic_union({'manager': managers_table, 'engineer' : engineers_table}, 'type', 'pjoin') mapper(Company, companies, properties={'employees' : relationship(Employee)}) employee_mapper = mapper(Employee, pjoin, polymorphic_on=pjoin.c.type) manager_mapper = mapper(Manager, managers_table, inherits=employee_mapper, concrete=True, polymorphic_identity='manager') engineer_mapper = mapper(Engineer, engineers_table, inherits=employee_mapper, concrete=True, polymorphic_identity='engineer') session = create_session() c = Company() c.employees.append(Manager('Tom', 'knows how to manage things')) c.employees.append(Engineer('Kurt', 'knows how to hack')) session.add(c) session.flush() session.expunge_all() def go(): c2 = session.query(Company).get(c.id) assert set([repr(x) for x in c2.employees]) \ == set(['Engineer Kurt knows how to hack', 'Manager Tom knows how to manage things']) self.assert_sql_count(testing.db, go, 2) session.expunge_all() def go(): c2 = \ session.query(Company).options(joinedload(Company.employees)).get(c.id) assert set([repr(x) for x in c2.employees]) \ == set(['Engineer Kurt knows how to hack', 'Manager Tom knows how to manage things']) self.assert_sql_count(testing.db, go, 1) class PropertyInheritanceTest(fixtures.MappedTest): @classmethod def define_tables(cls, metadata): Table('a_table', metadata, Column('id', Integer, primary_key=True, test_needs_autoincrement=True), Column('some_dest_id', Integer, ForeignKey('dest_table.id')), Column('aname', String(50))) Table('b_table', metadata, Column('id', Integer, primary_key=True, test_needs_autoincrement=True), Column('some_dest_id', Integer, ForeignKey('dest_table.id')), Column('bname', String(50))) Table('c_table', metadata, Column('id', Integer, primary_key=True, test_needs_autoincrement=True), Column('some_dest_id', Integer, ForeignKey('dest_table.id')), Column('cname', String(50))) Table('dest_table', metadata, Column('id', Integer, primary_key=True, test_needs_autoincrement=True), Column('name', String(50))) @classmethod def setup_classes(cls): class A(cls.Comparable): pass class B(A): pass class C(A): pass class Dest(cls.Comparable): pass def test_noninherited_warning(self): A, B, b_table, a_table, Dest, dest_table = (self.classes.A, self.classes.B, self.tables.b_table, self.tables.a_table, self.classes.Dest, self.tables.dest_table) mapper(A, a_table, properties={'some_dest': relationship(Dest)}) mapper(B, b_table, inherits=A, concrete=True) mapper(Dest, dest_table) b = B() dest = Dest() assert_raises(AttributeError, setattr, b, 'some_dest', dest) clear_mappers() mapper(A, a_table, properties={'a_id': a_table.c.id}) mapper(B, b_table, inherits=A, concrete=True) mapper(Dest, dest_table) b = B() assert_raises(AttributeError, setattr, b, 'a_id', 3) clear_mappers() mapper(A, a_table, properties={'a_id': a_table.c.id}) mapper(B, b_table, inherits=A, concrete=True) mapper(Dest, dest_table) def test_inheriting(self): A, B, b_table, a_table, Dest, dest_table = (self.classes.A, self.classes.B, self.tables.b_table, self.tables.a_table, self.classes.Dest, self.tables.dest_table) mapper(A, a_table, properties={ 'some_dest': relationship(Dest,back_populates='many_a') }) mapper(B, b_table, inherits=A, concrete=True, properties={ 'some_dest': relationship(Dest, back_populates='many_b') }) mapper(Dest, dest_table, properties={ 'many_a': relationship(A,back_populates='some_dest'), 'many_b': relationship(B,back_populates='some_dest') }) sess = sessionmaker()() dest1 = Dest(name='c1') dest2 = Dest(name='c2') a1 = A(some_dest=dest1, aname='a1') a2 = A(some_dest=dest2, aname='a2') b1 = B(some_dest=dest1, bname='b1') b2 = B(some_dest=dest1, bname='b2') assert_raises(AttributeError, setattr, b1, 'aname', 'foo') assert_raises(AttributeError, getattr, A, 'bname') assert dest2.many_a == [a2] assert dest1.many_a == [a1] assert dest1.many_b == [b1, b2] sess.add_all([dest1, dest2]) sess.commit() assert sess.query(Dest).filter(Dest.many_a.contains(a2)).one() is dest2 assert dest2.many_a == [a2] assert dest1.many_a == [a1] assert dest1.many_b == [b1, b2] assert sess.query(B).filter(B.bname == 'b1').one() is b1 def test_polymorphic_backref(self): """test multiple backrefs to the same polymorphically-loading attribute.""" A, C, B, c_table, b_table, a_table, Dest, dest_table = (self.classes.A, self.classes.C, self.classes.B, self.tables.c_table, self.tables.b_table, self.tables.a_table, self.classes.Dest, self.tables.dest_table) ajoin = polymorphic_union({'a': a_table, 'b': b_table, 'c':c_table}, 'type','ajoin') mapper( A, a_table, with_polymorphic=('*', ajoin), polymorphic_on=ajoin.c.type, polymorphic_identity='a', properties={ 'some_dest': relationship(Dest, back_populates='many_a') }, ) mapper( B, b_table, inherits=A, concrete=True, polymorphic_identity='b', properties={ 'some_dest': relationship(Dest, back_populates='many_a')}, ) mapper( C, c_table, inherits=A, concrete=True, polymorphic_identity='c', properties={ 'some_dest': relationship(Dest, back_populates='many_a')}, ) mapper(Dest, dest_table, properties={ 'many_a': relationship(A, back_populates='some_dest', order_by=ajoin.c.id) } ) sess = sessionmaker()() dest1 = Dest(name='c1') dest2 = Dest(name='c2') a1 = A(some_dest=dest1, aname='a1', id=1) a2 = A(some_dest=dest2, aname='a2', id=2) b1 = B(some_dest=dest1, bname='b1', id=3) b2 = B(some_dest=dest1, bname='b2', id=4) c1 = C(some_dest=dest1, cname='c1', id=5) c2 = C(some_dest=dest2, cname='c2', id=6) eq_([a2, c2], dest2.many_a) eq_([a1, b1, b2, c1], dest1.many_a) sess.add_all([dest1, dest2]) sess.commit() assert sess.query(Dest).filter(Dest.many_a.contains(a2)).one() is dest2 assert sess.query(Dest).filter(Dest.many_a.contains(b1)).one() is dest1 assert sess.query(Dest).filter(Dest.many_a.contains(c2)).one() is dest2 eq_(dest2.many_a, [a2, c2]) eq_(dest1.many_a, [a1, b1, b2, c1]) sess.expire_all() def go(): eq_( [ Dest(many_a=[A(aname='a1'), B(bname='b1'), B(bname='b2'), C(cname='c1')]), Dest(many_a=[A(aname='a2'), C(cname='c2')])], sess.query(Dest).options(joinedload(Dest.many_a)).order_by(Dest.id).all()) self.assert_sql_count(testing.db, go, 1) def test_merge_w_relationship(self): A, C, B, c_table, b_table, a_table, Dest, dest_table = (self.classes.A, self.classes.C, self.classes.B, self.tables.c_table, self.tables.b_table, self.tables.a_table, self.classes.Dest, self.tables.dest_table) ajoin = polymorphic_union({'a': a_table, 'b': b_table, 'c':c_table}, 'type','ajoin') mapper( A, a_table, with_polymorphic=('*', ajoin), polymorphic_on=ajoin.c.type, polymorphic_identity='a', properties={ 'some_dest': relationship(Dest, back_populates='many_a') }, ) mapper( B, b_table, inherits=A, concrete=True, polymorphic_identity='b', properties={ 'some_dest': relationship(Dest, back_populates='many_a')}, ) mapper( C, c_table, inherits=A, concrete=True, polymorphic_identity='c', properties={ 'some_dest': relationship(Dest, back_populates='many_a')}, ) mapper(Dest, dest_table, properties={ 'many_a': relationship(A, back_populates='some_dest', order_by=ajoin.c.id) } ) assert C.some_dest.property.parent is class_mapper(C) assert B.some_dest.property.parent is class_mapper(B) assert A.some_dest.property.parent is class_mapper(A) sess = sessionmaker()() dest1 = Dest(name='d1') dest2 = Dest(name='d2') a1 = A(some_dest=dest2, aname='a1') b1 = B(some_dest=dest1, bname='b1') c1 = C(some_dest=dest2, cname='c1') sess.add_all([dest1, dest2, c1, a1, b1]) sess.commit() sess2 = sessionmaker()() merged_c1 = sess2.merge(c1) eq_(merged_c1.some_dest.name, 'd2') eq_(merged_c1.some_dest_id, c1.some_dest_id) class ManyToManyTest(fixtures.MappedTest): @classmethod def define_tables(cls, metadata): Table('base', metadata, Column('id', Integer, primary_key=True, test_needs_autoincrement=True)) Table('sub', metadata, Column('id', Integer, primary_key=True, test_needs_autoincrement=True)) Table('base_mtom', metadata, Column('base_id', Integer, ForeignKey('base.id'), primary_key=True), Column('related_id', Integer, ForeignKey('related.id'), primary_key=True)) Table('sub_mtom', metadata, Column('base_id', Integer, ForeignKey('sub.id'), primary_key=True), Column('related_id', Integer, ForeignKey('related.id'), primary_key=True)) Table('related', metadata, Column('id', Integer, primary_key=True, test_needs_autoincrement=True)) @classmethod def setup_classes(cls): class Base(cls.Comparable): pass class Sub(Base): pass class Related(cls.Comparable): pass def test_selective_relationships(self): sub, base_mtom, Related, Base, related, sub_mtom, base, Sub = (self.tables.sub, self.tables.base_mtom, self.classes.Related, self.classes.Base, self.tables.related, self.tables.sub_mtom, self.tables.base, self.classes.Sub) mapper(Base, base, properties={'related': relationship(Related, secondary=base_mtom, backref='bases', order_by=related.c.id)}) mapper(Sub, sub, inherits=Base, concrete=True, properties={'related': relationship(Related, secondary=sub_mtom, backref='subs', order_by=related.c.id)}) mapper(Related, related) sess = sessionmaker()() b1, s1, r1, r2, r3 = Base(), Sub(), Related(), Related(), \ Related() b1.related.append(r1) b1.related.append(r2) s1.related.append(r2) s1.related.append(r3) sess.add_all([b1, s1]) sess.commit() eq_(s1.related, [r2, r3]) eq_(b1.related, [r1, r2]) class ColKeysTest(fixtures.MappedTest): @classmethod def define_tables(cls, metadata): global offices_table, refugees_table refugees_table = Table('refugee', metadata, Column('refugee_fid' , Integer, primary_key=True, test_needs_autoincrement=True), Column('refugee_name', Unicode(30), key='name')) offices_table = Table('office', metadata, Column('office_fid', Integer, primary_key=True, test_needs_autoincrement=True), Column('office_name', Unicode(30), key='name')) @classmethod def insert_data(cls): refugees_table.insert().execute(dict(refugee_fid=1, name=u'refugee1'), dict(refugee_fid=2, name=u'refugee2' )) offices_table.insert().execute(dict(office_fid=1, name=u'office1'), dict(office_fid=2, name=u'office2')) def test_keys(self): pjoin = polymorphic_union({'refugee': refugees_table, 'office' : offices_table}, 'type', 'pjoin') class Location(object): pass class Refugee(Location): pass class Office(Location): pass location_mapper = mapper(Location, pjoin, polymorphic_on=pjoin.c.type, polymorphic_identity='location') office_mapper = mapper(Office, offices_table, inherits=location_mapper, concrete=True, polymorphic_identity='office') refugee_mapper = mapper(Refugee, refugees_table, inherits=location_mapper, concrete=True, polymorphic_identity='refugee') sess = create_session() eq_(sess.query(Refugee).get(1).name, 'refugee1') eq_(sess.query(Refugee).get(2).name, 'refugee2') eq_(sess.query(Office).get(1).name, 'office1') eq_(sess.query(Office).get(2).name, 'office2') SQLAlchemy-0.8.4/test/orm/inheritance/test_magazine.py0000644000076500000240000002214512251150015023476 0ustar classicstaff00000000000000from sqlalchemy import * from sqlalchemy.orm import * from sqlalchemy import testing from sqlalchemy.testing.util import function_named from sqlalchemy.testing import fixtures from sqlalchemy.testing.schema import Table, Column class BaseObject(object): def __init__(self, *args, **kwargs): for key, value in kwargs.iteritems(): setattr(self, key, value) class Publication(BaseObject): pass class Issue(BaseObject): pass class Location(BaseObject): def __repr__(self): return "%s(%s, %s)" % (self.__class__.__name__, str(getattr(self, 'issue_id', None)), repr(str(self._name.name))) def _get_name(self): return self._name def _set_name(self, name): session = create_session() s = session.query(LocationName).filter(LocationName.name==name).first() session.expunge_all() if s is not None: self._name = s return found = False for i in session.new: if isinstance(i, LocationName) and i.name == name: self._name = i found = True break if found == False: self._name = LocationName(name=name) name = property(_get_name, _set_name) class LocationName(BaseObject): def __repr__(self): return "%s()" % (self.__class__.__name__) class PageSize(BaseObject): def __repr__(self): return "%s(%sx%s, %s)" % (self.__class__.__name__, self.width, self.height, self.name) class Magazine(BaseObject): def __repr__(self): return "%s(%s, %s)" % (self.__class__.__name__, repr(self.location), repr(self.size)) class Page(BaseObject): def __repr__(self): return "%s(%s)" % (self.__class__.__name__, str(self.page_no)) class MagazinePage(Page): def __repr__(self): return "%s(%s, %s)" % (self.__class__.__name__, str(self.page_no), repr(self.magazine)) class ClassifiedPage(MagazinePage): pass class MagazineTest(fixtures.MappedTest): @classmethod def define_tables(cls, metadata): global publication_table, issue_table, location_table, location_name_table, magazine_table, \ page_table, magazine_page_table, classified_page_table, page_size_table publication_table = Table('publication', metadata, Column('id', Integer, primary_key=True, test_needs_autoincrement=True), Column('name', String(45), default=''), ) issue_table = Table('issue', metadata, Column('id', Integer, primary_key=True, test_needs_autoincrement=True), Column('publication_id', Integer, ForeignKey('publication.id')), Column('issue', Integer), ) location_table = Table('location', metadata, Column('id', Integer, primary_key=True, test_needs_autoincrement=True), Column('issue_id', Integer, ForeignKey('issue.id')), Column('ref', CHAR(3), default=''), Column('location_name_id', Integer, ForeignKey('location_name.id')), ) location_name_table = Table('location_name', metadata, Column('id', Integer, primary_key=True, test_needs_autoincrement=True), Column('name', String(45), default=''), ) magazine_table = Table('magazine', metadata, Column('id', Integer, primary_key=True, test_needs_autoincrement=True), Column('location_id', Integer, ForeignKey('location.id')), Column('page_size_id', Integer, ForeignKey('page_size.id')), ) page_table = Table('page', metadata, Column('id', Integer, primary_key=True, test_needs_autoincrement=True), Column('page_no', Integer), Column('type', CHAR(1), default='p'), ) magazine_page_table = Table('magazine_page', metadata, Column('page_id', Integer, ForeignKey('page.id'), primary_key=True), Column('magazine_id', Integer, ForeignKey('magazine.id')), Column('orders', Text, default=''), ) classified_page_table = Table('classified_page', metadata, Column('magazine_page_id', Integer, ForeignKey('magazine_page.page_id'), primary_key=True), Column('titles', String(45), default=''), ) page_size_table = Table('page_size', metadata, Column('id', Integer, primary_key=True, test_needs_autoincrement=True), Column('width', Integer), Column('height', Integer), Column('name', String(45), default=''), ) def _generate_round_trip_test(use_unions=False, use_joins=False): def test_roundtrip(self): publication_mapper = mapper(Publication, publication_table) issue_mapper = mapper(Issue, issue_table, properties = { 'publication': relationship(Publication, backref=backref('issues', cascade="all, delete-orphan")), }) location_name_mapper = mapper(LocationName, location_name_table) location_mapper = mapper(Location, location_table, properties = { 'issue': relationship(Issue, backref=backref('locations', lazy='joined', cascade="all, delete-orphan")), '_name': relationship(LocationName), }) page_size_mapper = mapper(PageSize, page_size_table) magazine_mapper = mapper(Magazine, magazine_table, properties = { 'location': relationship(Location, backref=backref('magazine', uselist=False)), 'size': relationship(PageSize), }) if use_unions: page_join = polymorphic_union( { 'm': page_table.join(magazine_page_table), 'c': page_table.join(magazine_page_table).join(classified_page_table), 'p': page_table.select(page_table.c.type=='p'), }, None, 'page_join') page_mapper = mapper(Page, page_table, with_polymorphic=('*', page_join), polymorphic_on=page_join.c.type, polymorphic_identity='p') elif use_joins: page_join = page_table.outerjoin(magazine_page_table).outerjoin(classified_page_table) page_mapper = mapper(Page, page_table, with_polymorphic=('*', page_join), polymorphic_on=page_table.c.type, polymorphic_identity='p') else: page_mapper = mapper(Page, page_table, polymorphic_on=page_table.c.type, polymorphic_identity='p') if use_unions: magazine_join = polymorphic_union( { 'm': page_table.join(magazine_page_table), 'c': page_table.join(magazine_page_table).join(classified_page_table), }, None, 'page_join') magazine_page_mapper = mapper(MagazinePage, magazine_page_table, with_polymorphic=('*', magazine_join), inherits=page_mapper, polymorphic_identity='m', properties={ 'magazine': relationship(Magazine, backref=backref('pages', order_by=magazine_join.c.page_no)) }) elif use_joins: magazine_join = page_table.join(magazine_page_table).outerjoin(classified_page_table) magazine_page_mapper = mapper(MagazinePage, magazine_page_table, with_polymorphic=('*', magazine_join), inherits=page_mapper, polymorphic_identity='m', properties={ 'magazine': relationship(Magazine, backref=backref('pages', order_by=page_table.c.page_no)) }) else: magazine_page_mapper = mapper(MagazinePage, magazine_page_table, inherits=page_mapper, polymorphic_identity='m', properties={ 'magazine': relationship(Magazine, backref=backref('pages', order_by=page_table.c.page_no)) }) classified_page_mapper = mapper(ClassifiedPage, classified_page_table, inherits=magazine_page_mapper, polymorphic_identity='c', primary_key=[page_table.c.id]) session = create_session() pub = Publication(name='Test') issue = Issue(issue=46,publication=pub) location = Location(ref='ABC',name='London',issue=issue) page_size = PageSize(name='A4',width=210,height=297) magazine = Magazine(location=location,size=page_size) page = ClassifiedPage(magazine=magazine,page_no=1) page2 = MagazinePage(magazine=magazine,page_no=2) page3 = ClassifiedPage(magazine=magazine,page_no=3) session.add(pub) session.flush() print [x for x in session] session.expunge_all() session.flush() session.expunge_all() p = session.query(Publication).filter(Publication.name=="Test").one() print p.issues[0].locations[0].magazine.pages print [page, page2, page3] assert repr(p.issues[0].locations[0].magazine.pages) == repr([page, page2, page3]), repr(p.issues[0].locations[0].magazine.pages) test_roundtrip = function_named( test_roundtrip, "test_%s" % (not use_union and (use_joins and "joins" or "select") or "unions")) setattr(MagazineTest, test_roundtrip.__name__, test_roundtrip) for (use_union, use_join) in [(True, False), (False, True), (False, False)]: _generate_round_trip_test(use_union, use_join) SQLAlchemy-0.8.4/test/orm/inheritance/test_manytomany.py0000644000076500000240000002027212251150015024076 0ustar classicstaff00000000000000from sqlalchemy.testing import eq_ from sqlalchemy import * from sqlalchemy.orm import * from sqlalchemy import testing from sqlalchemy.testing import fixtures class InheritTest(fixtures.MappedTest): """deals with inheritance and many-to-many relationships""" @classmethod def define_tables(cls, metadata): global principals global users global groups global user_group_map principals = Table('principals', metadata, Column('principal_id', Integer, Sequence('principal_id_seq', optional=False), primary_key=True), Column('name', String(50), nullable=False)) users = Table('prin_users', metadata, Column('principal_id', Integer, ForeignKey('principals.principal_id'), primary_key=True), Column('password', String(50), nullable=False), Column('email', String(50), nullable=False), Column('login_id', String(50), nullable=False)) groups = Table('prin_groups', metadata, Column('principal_id', Integer, ForeignKey('principals.principal_id'), primary_key=True)) user_group_map = Table('prin_user_group_map', metadata, Column('user_id', Integer, ForeignKey( "prin_users.principal_id"), primary_key=True ), Column('group_id', Integer, ForeignKey( "prin_groups.principal_id"), primary_key=True ), ) def testbasic(self): class Principal(object): def __init__(self, **kwargs): for key, value in kwargs.iteritems(): setattr(self, key, value) class User(Principal): pass class Group(Principal): pass mapper(Principal, principals) mapper(User, users, inherits=Principal) mapper(Group, groups, inherits=Principal, properties={ 'users': relationship(User, secondary=user_group_map, lazy='select', backref="groups") }) g = Group(name="group1") g.users.append(User(name="user1", password="pw", email="foo@bar.com", login_id="lg1")) sess = create_session() sess.add(g) sess.flush() # TODO: put an assertion class InheritTest2(fixtures.MappedTest): """deals with inheritance and many-to-many relationships""" @classmethod def define_tables(cls, metadata): global foo, bar, foo_bar foo = Table('foo', metadata, Column('id', Integer, Sequence('foo_id_seq', optional=True), primary_key=True), Column('data', String(20)), ) bar = Table('bar', metadata, Column('bid', Integer, ForeignKey('foo.id'), primary_key=True), #Column('fid', Integer, ForeignKey('foo.id'), ) ) foo_bar = Table('foo_bar', metadata, Column('foo_id', Integer, ForeignKey('foo.id')), Column('bar_id', Integer, ForeignKey('bar.bid'))) def testget(self): class Foo(object): def __init__(self, data=None): self.data = data class Bar(Foo):pass mapper(Foo, foo) mapper(Bar, bar, inherits=Foo) print foo.join(bar).primary_key print class_mapper(Bar).primary_key b = Bar('somedata') sess = create_session() sess.add(b) sess.flush() sess.expunge_all() # test that "bar.bid" does not need to be referenced in a get # (ticket 185) assert sess.query(Bar).get(b.id).id == b.id def testbasic(self): class Foo(object): def __init__(self, data=None): self.data = data mapper(Foo, foo) class Bar(Foo): pass mapper(Bar, bar, inherits=Foo, properties={ 'foos': relationship(Foo, secondary=foo_bar, lazy='joined') }) sess = create_session() b = Bar('barfoo') sess.add(b) sess.flush() f1 = Foo('subfoo1') f2 = Foo('subfoo2') b.foos.append(f1) b.foos.append(f2) sess.flush() sess.expunge_all() l = sess.query(Bar).all() print l[0] print l[0].foos self.assert_unordered_result(l, Bar, # {'id':1, 'data':'barfoo', 'bid':1, 'foos':(Foo, [{'id':2,'data':'subfoo1'}, {'id':3,'data':'subfoo2'}])}, {'id':b.id, 'data':'barfoo', 'foos':(Foo, [{'id':f1.id,'data':'subfoo1'}, {'id':f2.id,'data':'subfoo2'}])}, ) class InheritTest3(fixtures.MappedTest): """deals with inheritance and many-to-many relationships""" @classmethod def define_tables(cls, metadata): global foo, bar, blub, bar_foo, blub_bar, blub_foo # the 'data' columns are to appease SQLite which cant handle a blank INSERT foo = Table('foo', metadata, Column('id', Integer, Sequence('foo_seq', optional=True), primary_key=True), Column('data', String(20))) bar = Table('bar', metadata, Column('id', Integer, ForeignKey('foo.id'), primary_key=True), Column('data', String(20))) blub = Table('blub', metadata, Column('id', Integer, ForeignKey('bar.id'), primary_key=True), Column('data', String(20))) bar_foo = Table('bar_foo', metadata, Column('bar_id', Integer, ForeignKey('bar.id')), Column('foo_id', Integer, ForeignKey('foo.id'))) blub_bar = Table('bar_blub', metadata, Column('blub_id', Integer, ForeignKey('blub.id')), Column('bar_id', Integer, ForeignKey('bar.id'))) blub_foo = Table('blub_foo', metadata, Column('blub_id', Integer, ForeignKey('blub.id')), Column('foo_id', Integer, ForeignKey('foo.id'))) def testbasic(self): class Foo(object): def __init__(self, data=None): self.data = data def __repr__(self): return "Foo id %d, data %s" % (self.id, self.data) mapper(Foo, foo) class Bar(Foo): def __repr__(self): return "Bar id %d, data %s" % (self.id, self.data) mapper(Bar, bar, inherits=Foo, properties={ 'foos' :relationship(Foo, secondary=bar_foo, lazy='select') }) sess = create_session() b = Bar('bar #1') sess.add(b) b.foos.append(Foo("foo #1")) b.foos.append(Foo("foo #2")) sess.flush() compare = [repr(b)] + sorted([repr(o) for o in b.foos]) sess.expunge_all() l = sess.query(Bar).all() print repr(l[0]) + repr(l[0].foos) found = [repr(l[0])] + sorted([repr(o) for o in l[0].foos]) eq_(found, compare) @testing.fails_on('maxdb', 'FIXME: unknown') def testadvanced(self): class Foo(object): def __init__(self, data=None): self.data = data def __repr__(self): return "Foo id %d, data %s" % (self.id, self.data) mapper(Foo, foo) class Bar(Foo): def __repr__(self): return "Bar id %d, data %s" % (self.id, self.data) mapper(Bar, bar, inherits=Foo) class Blub(Bar): def __repr__(self): return "Blub id %d, data %s, bars %s, foos %s" % (self.id, self.data, repr([b for b in self.bars]), repr([f for f in self.foos])) mapper(Blub, blub, inherits=Bar, properties={ 'bars':relationship(Bar, secondary=blub_bar, lazy='joined'), 'foos':relationship(Foo, secondary=blub_foo, lazy='joined'), }) sess = create_session() f1 = Foo("foo #1") b1 = Bar("bar #1") b2 = Bar("bar #2") bl1 = Blub("blub #1") for o in (f1, b1, b2, bl1): sess.add(o) bl1.foos.append(f1) bl1.bars.append(b2) sess.flush() compare = repr(bl1) blubid = bl1.id sess.expunge_all() l = sess.query(Blub).all() print l self.assert_(repr(l[0]) == compare) sess.expunge_all() x = sess.query(Blub).filter_by(id=blubid).one() print x self.assert_(repr(x) == compare) SQLAlchemy-0.8.4/test/orm/inheritance/test_poly_linked_list.py0000644000076500000240000001635712251150015025257 0ustar classicstaff00000000000000from sqlalchemy import * from sqlalchemy.orm import * from sqlalchemy.testing import fixtures from sqlalchemy import testing from sqlalchemy.testing.schema import Table, Column class PolymorphicCircularTest(fixtures.MappedTest): run_setup_mappers = 'once' @classmethod def define_tables(cls, metadata): global Table1, Table1B, Table2, Table3, Data table1 = Table('table1', metadata, Column('id', Integer, primary_key=True, test_needs_autoincrement=True), Column('related_id', Integer, ForeignKey('table1.id'), nullable=True), Column('type', String(30)), Column('name', String(30)) ) table2 = Table('table2', metadata, Column('id', Integer, ForeignKey('table1.id'), primary_key=True), ) table3 = Table('table3', metadata, Column('id', Integer, ForeignKey('table1.id'), primary_key=True), ) data = Table('data', metadata, Column('id', Integer, primary_key=True, test_needs_autoincrement=True), Column('node_id', Integer, ForeignKey('table1.id')), Column('data', String(30)) ) #join = polymorphic_union( # { # 'table3' : table1.join(table3), # 'table2' : table1.join(table2), # 'table1' : table1.select(table1.c.type.in_(['table1', 'table1b'])), # }, None, 'pjoin') join = table1.outerjoin(table2).outerjoin(table3).alias('pjoin') #join = None class Table1(object): def __init__(self, name, data=None): self.name = name if data is not None: self.data = data def __repr__(self): return "%s(%s, %s, %s)" % (self.__class__.__name__, self.id, repr(str(self.name)), repr(self.data)) class Table1B(Table1): pass class Table2(Table1): pass class Table3(Table1): pass class Data(object): def __init__(self, data): self.data = data def __repr__(self): return "%s(%s, %s)" % (self.__class__.__name__, self.id, repr(str(self.data))) try: # this is how the mapping used to work. ensure that this raises an error now table1_mapper = mapper(Table1, table1, select_table=join, polymorphic_on=table1.c.type, polymorphic_identity='table1', properties={ 'nxt': relationship(Table1, backref=backref('prev', foreignkey=join.c.id, uselist=False), uselist=False, primaryjoin=join.c.id==join.c.related_id), 'data':relationship(mapper(Data, data)) }, order_by=table1.c.id) configure_mappers() assert False except: assert True clear_mappers() # currently, the "eager" relationships degrade to lazy relationships # due to the polymorphic load. # the "nxt" relationship used to have a "lazy='joined'" on it, but the EagerLoader raises the "self-referential" # exception now. since eager loading would never work for that relationship anyway, its better that the user # gets an exception instead of it silently not eager loading. # NOTE: using "nxt" instead of "next" to avoid 2to3 turning it into __next__() for some reason. table1_mapper = mapper(Table1, table1, #select_table=join, polymorphic_on=table1.c.type, polymorphic_identity='table1', properties={ 'nxt': relationship(Table1, backref=backref('prev', remote_side=table1.c.id, uselist=False), uselist=False, primaryjoin=table1.c.id==table1.c.related_id), 'data':relationship(mapper(Data, data), lazy='joined', order_by=data.c.id) }, order_by=table1.c.id ) table1b_mapper = mapper(Table1B, inherits=table1_mapper, polymorphic_identity='table1b') table2_mapper = mapper(Table2, table2, inherits=table1_mapper, polymorphic_identity='table2') table3_mapper = mapper(Table3, table3, inherits=table1_mapper, polymorphic_identity='table3') configure_mappers() assert table1_mapper.primary_key == (table1.c.id,), table1_mapper.primary_key @testing.fails_on('maxdb', 'FIXME: unknown') def testone(self): self._testlist([Table1, Table2, Table1, Table2]) @testing.fails_on('maxdb', 'FIXME: unknown') def testtwo(self): self._testlist([Table3]) @testing.fails_on('maxdb', 'FIXME: unknown') def testthree(self): self._testlist([Table2, Table1, Table1B, Table3, Table3, Table1B, Table1B, Table2, Table1]) @testing.fails_on('maxdb', 'FIXME: unknown') def testfour(self): self._testlist([ Table2('t2', [Data('data1'), Data('data2')]), Table1('t1', []), Table3('t3', [Data('data3')]), Table1B('t1b', [Data('data4'), Data('data5')]) ]) def _testlist(self, classes): sess = create_session( ) # create objects in a linked list count = 1 obj = None for c in classes: if isinstance(c, type): newobj = c('item %d' % count) count += 1 else: newobj = c if obj is not None: obj.nxt = newobj else: t = newobj obj = newobj # save to DB sess.add(t) sess.flush() # string version of the saved list assertlist = [] node = t while (node): assertlist.append(node) n = node.nxt if n is not None: assert n.prev is node node = n original = repr(assertlist) # clear and query forwards sess.expunge_all() node = sess.query(Table1).filter(Table1.id==t.id).first() assertlist = [] while (node): assertlist.append(node) n = node.nxt if n is not None: assert n.prev is node node = n forwards = repr(assertlist) # clear and query backwards sess.expunge_all() node = sess.query(Table1).filter(Table1.id==obj.id).first() assertlist = [] while (node): assertlist.insert(0, node) n = node.prev if n is not None: assert n.nxt is node node = n backwards = repr(assertlist) # everything should match ! assert original == forwards == backwards SQLAlchemy-0.8.4/test/orm/inheritance/test_poly_persistence.py0000644000076500000240000003324712251147172025311 0ustar classicstaff00000000000000"""tests basic polymorphic mapper loading/saving, minimal relationships""" from sqlalchemy.testing import eq_, assert_raises, assert_raises_message from sqlalchemy import * from sqlalchemy.orm import * from sqlalchemy.orm import exc as orm_exc from sqlalchemy import exc as sa_exc from sqlalchemy.testing.schema import Column from sqlalchemy import testing from sqlalchemy.testing.util import function_named from test.orm import _fixtures from sqlalchemy.testing import fixtures class Person(fixtures.ComparableEntity): pass class Engineer(Person): pass class Manager(Person): pass class Boss(Manager): pass class Company(fixtures.ComparableEntity): pass class PolymorphTest(fixtures.MappedTest): @classmethod def define_tables(cls, metadata): global companies, people, engineers, managers, boss companies = Table('companies', metadata, Column('company_id', Integer, primary_key=True, test_needs_autoincrement=True), Column('name', String(50))) people = Table('people', metadata, Column('person_id', Integer, primary_key=True, test_needs_autoincrement=True), Column('company_id', Integer, ForeignKey('companies.company_id'), nullable=False), Column('name', String(50)), Column('type', String(30))) engineers = Table('engineers', metadata, Column('person_id', Integer, ForeignKey('people.person_id'), primary_key=True), Column('status', String(30)), Column('engineer_name', String(50)), Column('primary_language', String(50)), ) managers = Table('managers', metadata, Column('person_id', Integer, ForeignKey('people.person_id'), primary_key=True), Column('status', String(30)), Column('manager_name', String(50)) ) boss = Table('boss', metadata, Column('boss_id', Integer, ForeignKey('managers.person_id'), primary_key=True), Column('golf_swing', String(30)), ) metadata.create_all() class InsertOrderTest(PolymorphTest): def test_insert_order(self): """test that classes of multiple types mix up mapper inserts so that insert order of individual tables is maintained""" person_join = polymorphic_union( { 'engineer':people.join(engineers), 'manager':people.join(managers), 'person':people.select(people.c.type=='person'), }, None, 'pjoin') person_mapper = mapper(Person, people, with_polymorphic=('*', person_join), polymorphic_on=person_join.c.type, polymorphic_identity='person') mapper(Engineer, engineers, inherits=person_mapper, polymorphic_identity='engineer') mapper(Manager, managers, inherits=person_mapper, polymorphic_identity='manager') mapper(Company, companies, properties={ 'employees': relationship(Person, backref='company', order_by=person_join.c.person_id) }) session = create_session() c = Company(name='company1') c.employees.append(Manager(status='AAB', manager_name='manager1' , name='pointy haired boss')) c.employees.append(Engineer(status='BBA', engineer_name='engineer1', primary_language='java', name='dilbert')) c.employees.append(Person(status='HHH', name='joesmith')) c.employees.append(Engineer(status='CGG', engineer_name='engineer2', primary_language='python', name='wally')) c.employees.append(Manager(status='ABA', manager_name='manager2' , name='jsmith')) session.add(c) session.flush() session.expunge_all() eq_(session.query(Company).get(c.company_id), c) class RoundTripTest(PolymorphTest): pass def _generate_round_trip_test(include_base, lazy_relationship, redefine_colprop, with_polymorphic): """generates a round trip test. include_base - whether or not to include the base 'person' type in the union. lazy_relationship - whether or not the Company relationship to People is lazy or eager. redefine_colprop - if we redefine the 'name' column to be 'people_name' on the base Person class use_literal_join - primary join condition is explicitly specified """ def test_roundtrip(self): if with_polymorphic == 'unions': if include_base: person_join = polymorphic_union( { 'engineer':people.join(engineers), 'manager':people.join(managers), 'person':people.select(people.c.type=='person'), }, None, 'pjoin') else: person_join = polymorphic_union( { 'engineer':people.join(engineers), 'manager':people.join(managers), }, None, 'pjoin') manager_join = people.join(managers).outerjoin(boss) person_with_polymorphic = ['*', person_join] manager_with_polymorphic = ['*', manager_join] elif with_polymorphic == 'joins': person_join = people.outerjoin(engineers).outerjoin(managers).\ outerjoin(boss) manager_join = people.join(managers).outerjoin(boss) person_with_polymorphic = ['*', person_join] manager_with_polymorphic = ['*', manager_join] elif with_polymorphic == 'auto': person_with_polymorphic = '*' manager_with_polymorphic = '*' else: person_with_polymorphic = None manager_with_polymorphic = None if redefine_colprop: person_mapper = mapper(Person, people, with_polymorphic=person_with_polymorphic, polymorphic_on=people.c.type, polymorphic_identity='person', properties= {'person_name':people.c.name}) else: person_mapper = mapper(Person, people, with_polymorphic=person_with_polymorphic, polymorphic_on=people.c.type, polymorphic_identity='person') mapper(Engineer, engineers, inherits=person_mapper, polymorphic_identity='engineer') mapper(Manager, managers, inherits=person_mapper, with_polymorphic=manager_with_polymorphic, polymorphic_identity='manager') mapper(Boss, boss, inherits=Manager, polymorphic_identity='boss') mapper(Company, companies, properties={ 'employees': relationship(Person, lazy=lazy_relationship, cascade="all, delete-orphan", backref="company", order_by=people.c.person_id ) }) if redefine_colprop: person_attribute_name = 'person_name' else: person_attribute_name = 'name' employees = [ Manager(status='AAB', manager_name='manager1', **{person_attribute_name:'pointy haired boss'}), Engineer(status='BBA', engineer_name='engineer1', primary_language='java', **{person_attribute_name:'dilbert'}), ] if include_base: employees.append(Person(**{person_attribute_name:'joesmith'})) employees += [ Engineer(status='CGG', engineer_name='engineer2', primary_language='python', **{person_attribute_name:'wally'}), Manager(status='ABA', manager_name='manager2', **{person_attribute_name:'jsmith'}) ] pointy = employees[0] jsmith = employees[-1] dilbert = employees[1] session = create_session() c = Company(name='company1') c.employees = employees session.add(c) session.flush() session.expunge_all() eq_(session.query(Person).get(dilbert.person_id), dilbert) session.expunge_all() eq_(session.query(Person).filter( Person.person_id==dilbert.person_id).one(), dilbert) session.expunge_all() def go(): cc = session.query(Company).get(c.company_id) eq_(cc.employees, employees) if not lazy_relationship: if with_polymorphic != 'none': self.assert_sql_count(testing.db, go, 1) else: self.assert_sql_count(testing.db, go, 5) else: if with_polymorphic != 'none': self.assert_sql_count(testing.db, go, 2) else: self.assert_sql_count(testing.db, go, 6) # test selecting from the query, using the base # mapped table (people) as the selection criterion. # in the case of the polymorphic Person query, # the "people" selectable should be adapted to be "person_join" eq_( session.query(Person).filter( getattr(Person, person_attribute_name)=='dilbert' ).first(), dilbert ) assert session.query(Person).filter( getattr(Person, person_attribute_name)=='dilbert' ).first().person_id eq_( session.query(Engineer).filter( getattr(Person, person_attribute_name)=='dilbert' ).first(), dilbert ) # test selecting from the query, joining against # an alias of the base "people" table. test that # the "palias" alias does *not* get sucked up # into the "person_join" conversion. palias = people.alias("palias") dilbert = session.query(Person).get(dilbert.person_id) assert dilbert is session.query(Person).filter( (palias.c.name=='dilbert') & \ (palias.c.person_id==Person.person_id)).first() assert dilbert is session.query(Engineer).filter( (palias.c.name=='dilbert') & \ (palias.c.person_id==Person.person_id)).first() assert dilbert is session.query(Person).filter( (Engineer.engineer_name=="engineer1") & \ (engineers.c.person_id==people.c.person_id) ).first() assert dilbert is session.query(Engineer).\ filter(Engineer.engineer_name=="engineer1")[0] session.flush() session.expunge_all() def go(): session.query(Person).filter(getattr(Person, person_attribute_name)=='dilbert').first() self.assert_sql_count(testing.db, go, 1) session.expunge_all() dilbert = session.query(Person).filter(getattr(Person, person_attribute_name)=='dilbert').first() def go(): # assert that only primary table is queried for # already-present-in-session d = session.query(Person).filter(getattr(Person, person_attribute_name)=='dilbert').first() self.assert_sql_count(testing.db, go, 1) # test standalone orphans daboss = Boss(status='BBB', manager_name='boss', golf_swing='fore', **{person_attribute_name:'daboss'}) session.add(daboss) assert_raises(sa_exc.DBAPIError, session.flush) c = session.query(Company).first() daboss.company = c manager_list = [e for e in c.employees if isinstance(e, Manager)] session.flush() session.expunge_all() eq_(session.query(Manager).order_by(Manager.person_id).all(), manager_list) c = session.query(Company).first() session.delete(c) session.flush() eq_(people.count().scalar(), 0) test_roundtrip = function_named( test_roundtrip, "test_%s%s%s_%s" % ( (lazy_relationship and "lazy" or "eager"), (include_base and "_inclbase" or ""), (redefine_colprop and "_redefcol" or ""), with_polymorphic)) setattr(RoundTripTest, test_roundtrip.__name__, test_roundtrip) for lazy_relationship in [True, False]: for redefine_colprop in [True, False]: for with_polymorphic in ['unions', 'joins', 'auto', 'none']: if with_polymorphic == 'unions': for include_base in [True, False]: _generate_round_trip_test(include_base, lazy_relationship, redefine_colprop, with_polymorphic) else: _generate_round_trip_test(False, lazy_relationship, redefine_colprop, with_polymorphic) SQLAlchemy-0.8.4/test/orm/inheritance/test_polymorphic_rel.py0000644000076500000240000013243612251150015025117 0ustar classicstaff00000000000000from sqlalchemy import func, desc from sqlalchemy.orm import interfaces, create_session, joinedload, joinedload_all, \ subqueryload, subqueryload_all, aliased,\ class_mapper from sqlalchemy import exc as sa_exc from sqlalchemy import testing from sqlalchemy.testing import assert_raises, eq_ from _poly_fixtures import Company, Person, Engineer, Manager, Boss, \ Machine, Paperwork, _Polymorphic,\ _PolymorphicPolymorphic, _PolymorphicUnions, _PolymorphicJoins,\ _PolymorphicAliasedJoins class _PolymorphicTestBase(object): @classmethod def setup_mappers(cls): super(_PolymorphicTestBase, cls).setup_mappers() global people, engineers, managers, boss global companies, paperwork, machines people, engineers, managers, boss,\ companies, paperwork, machines = \ cls.tables.people, cls.tables.engineers, \ cls.tables.managers, cls.tables.boss,\ cls.tables.companies, cls.tables.paperwork, cls.tables.machines @classmethod def insert_data(cls): super(_PolymorphicTestBase, cls).insert_data() global all_employees, c1_employees, c2_employees global c1, c2, e1, e2, e3, b1, m1 c1, c2, all_employees, c1_employees, c2_employees = \ cls.c1, cls.c2, cls.all_employees, \ cls.c1_employees, cls.c2_employees e1, e2, e3, b1, m1 = \ cls.e1, cls.e2, cls.e3, cls.b1, cls.m1 def test_loads_at_once(self): """ Test that all objects load from the full query, when with_polymorphic is used. """ sess = create_session() def go(): eq_(sess.query(Person).all(), all_employees) count = {'':14, 'Polymorphic':9}.get(self.select_type, 10) self.assert_sql_count(testing.db, go, count) def test_primary_eager_aliasing_one(self): # For both joinedload() and subqueryload(), if the original q is # not loading the subclass table, the joinedload doesn't happen. sess = create_session() def go(): eq_(sess.query(Person) .options(joinedload(Engineer.machines))[1:3], all_employees[1:3]) count = {'':6, 'Polymorphic':3}.get(self.select_type, 4) self.assert_sql_count(testing.db, go, count) def test_primary_eager_aliasing_two(self): sess = create_session() def go(): eq_(sess.query(Person) .options(subqueryload(Engineer.machines)).all(), all_employees) count = {'':14, 'Polymorphic':7}.get(self.select_type, 8) self.assert_sql_count(testing.db, go, count) def test_primary_eager_aliasing_three(self): # assert the JOINs don't over JOIN sess = create_session() def go(): eq_(sess.query(Person).with_polymorphic('*') .options(joinedload(Engineer.machines))[1:3], all_employees[1:3]) self.assert_sql_count(testing.db, go, 3) eq_(sess.query(Person).with_polymorphic('*') .options(joinedload(Engineer.machines)) .limit(2).offset(1).with_labels() .subquery().count().scalar(), 2) def test_get_one(self): """ For all mappers, ensure the primary key has been calculated as just the "person_id" column. """ sess = create_session() eq_(sess.query(Person).get(e1.person_id), Engineer(name="dilbert", primary_language="java")) def test_get_two(self): sess = create_session() eq_(sess.query(Engineer).get(e1.person_id), Engineer(name="dilbert", primary_language="java")) def test_get_three(self): sess = create_session() eq_(sess.query(Manager).get(b1.person_id), Boss(name="pointy haired boss", golf_swing="fore")) def test_multi_join(self): sess = create_session() e = aliased(Person) c = aliased(Company) q = sess.query(Company, Person, c, e)\ .join(Person, Company.employees)\ .join(e, c.employees)\ .filter(Person.name == 'dilbert')\ .filter(e.name == 'wally') eq_(q.count(), 1) eq_(q.all(), [ ( Company(company_id=1, name=u'MegaCorp, Inc.'), Engineer( status=u'regular engineer', engineer_name=u'dilbert', name=u'dilbert', company_id=1, primary_language=u'java', person_id=1, type=u'engineer'), Company(company_id=1, name=u'MegaCorp, Inc.'), Engineer( status=u'regular engineer', engineer_name=u'wally', name=u'wally', company_id=1, primary_language=u'c++', person_id=2, type=u'engineer') ) ]) def test_filter_on_subclass_one(self): sess = create_session() eq_(sess.query(Engineer).all()[0], Engineer(name="dilbert")) def test_filter_on_subclass_two(self): sess = create_session() eq_(sess.query(Engineer).first(), Engineer(name="dilbert")) def test_filter_on_subclass_three(self): sess = create_session() eq_(sess.query(Engineer) .filter(Engineer.person_id == e1.person_id).first(), Engineer(name="dilbert")) def test_filter_on_subclass_four(self): sess = create_session() eq_(sess.query(Manager) .filter(Manager.person_id == m1.person_id).one(), Manager(name="dogbert")) def test_filter_on_subclass_five(self): sess = create_session() eq_(sess.query(Manager) .filter(Manager.person_id == b1.person_id).one(), Boss(name="pointy haired boss")) def test_filter_on_subclass_six(self): sess = create_session() eq_(sess.query(Boss) .filter(Boss.person_id == b1.person_id).one(), Boss(name="pointy haired boss")) def test_join_from_polymorphic_nonaliased_one(self): sess = create_session() eq_(sess.query(Person) .join('paperwork', aliased=False) .filter(Paperwork.description.like('%review%')).all(), [b1, m1]) def test_join_from_polymorphic_nonaliased_two(self): sess = create_session() eq_(sess.query(Person) .join('paperwork', aliased=False) .filter(Paperwork.description.like('%#2%')).all(), [e1, m1]) def test_join_from_polymorphic_nonaliased_three(self): sess = create_session() eq_(sess.query(Engineer) .join('paperwork', aliased=False) .filter(Paperwork.description.like('%#2%')).all(), [e1]) def test_join_from_polymorphic_nonaliased_four(self): sess = create_session() eq_(sess.query(Person) .join('paperwork', aliased=False) .filter(Person.name.like('%dog%')) .filter(Paperwork.description.like('%#2%')).all(), [m1]) def test_join_from_polymorphic_aliased_one(self): sess = create_session() eq_(sess.query(Person) .join('paperwork', aliased=True) .filter(Paperwork.description.like('%review%')).all(), [b1, m1]) def test_join_from_polymorphic_aliased_two(self): sess = create_session() eq_(sess.query(Person) .join('paperwork', aliased=True) .filter(Paperwork.description.like('%#2%')).all(), [e1, m1]) def test_join_from_polymorphic_aliased_three(self): sess = create_session() eq_(sess.query(Engineer) .join('paperwork', aliased=True) .filter(Paperwork.description.like('%#2%')).all(), [e1]) def test_join_from_polymorphic_aliased_four(self): sess = create_session() eq_(sess.query(Person) .join('paperwork', aliased=True) .filter(Person.name.like('%dog%')) .filter(Paperwork.description.like('%#2%')).all(), [m1]) def test_join_from_with_polymorphic_nonaliased_one(self): sess = create_session() eq_(sess.query(Person) .with_polymorphic(Manager) .join('paperwork') .filter(Paperwork.description.like('%review%')).all(), [b1, m1]) def test_join_from_with_polymorphic_nonaliased_two(self): sess = create_session() eq_(sess.query(Person) .with_polymorphic([Manager, Engineer]) .join('paperwork') .filter(Paperwork.description.like('%#2%')).all(), [e1, m1]) def test_join_from_with_polymorphic_nonaliased_three(self): sess = create_session() eq_(sess.query(Person) .with_polymorphic([Manager, Engineer]) .join('paperwork') .filter(Person.name.like('%dog%')) .filter(Paperwork.description.like('%#2%')).all(), [m1]) def test_join_from_with_polymorphic_aliased_one(self): sess = create_session() eq_(sess.query(Person) .with_polymorphic(Manager) .join('paperwork', aliased=True) .filter(Paperwork.description.like('%review%')).all(), [b1, m1]) def test_join_from_with_polymorphic_aliased_two(self): sess = create_session() eq_(sess.query(Person) .with_polymorphic([Manager, Engineer]) .join('paperwork', aliased=True) .filter(Paperwork.description.like('%#2%')).all(), [e1, m1]) def test_join_from_with_polymorphic_aliased_three(self): sess = create_session() eq_(sess.query(Person) .with_polymorphic([Manager, Engineer]) .join('paperwork', aliased=True) .filter(Person.name.like('%dog%')) .filter(Paperwork.description.like('%#2%')).all(), [m1]) def test_join_to_polymorphic_nonaliased(self): sess = create_session() eq_(sess.query(Company) .join('employees') .filter(Person.name == 'vlad').one(), c2) def test_join_to_polymorphic_aliased(self): sess = create_session() eq_(sess.query(Company) .join('employees', aliased=True) .filter(Person.name == 'vlad').one(), c2) def test_polymorphic_any_one(self): sess = create_session() any_ = Company.employees.any(Person.name == 'vlad') eq_(sess.query(Company).filter(any_).all(), [c2]) def test_polymorphic_any_two(self): sess = create_session() # test that the aliasing on "Person" does not bleed into the # EXISTS clause generated by any() any_ = Company.employees.any(Person.name == 'wally') eq_(sess.query(Company) .join(Company.employees, aliased=True) .filter(Person.name == 'dilbert') .filter(any_).all(), [c1]) def test_polymorphic_any_three(self): sess = create_session() any_ = Company.employees.any(Person.name == 'vlad') eq_(sess.query(Company) .join(Company.employees, aliased=True) .filter(Person.name == 'dilbert') .filter(any_).all(), []) def test_polymorphic_any_eight(self): sess = create_session() any_ = Engineer.machines.any( Machine.name == "Commodore 64") eq_(sess.query(Person).filter(any_).all(), [e2, e3]) def test_polymorphic_any_nine(self): sess = create_session() any_ = Person.paperwork.any( Paperwork.description == "review #2") eq_(sess.query(Person).filter(any_).all(), [m1]) def test_join_from_columns_or_subclass_one(self): sess = create_session() expected = [ (u'dogbert',), (u'pointy haired boss',)] eq_(sess.query(Manager.name) .order_by(Manager.name).all(), expected) def test_join_from_columns_or_subclass_two(self): sess = create_session() expected = [ (u'dogbert',), (u'dogbert',), (u'pointy haired boss',)] eq_(sess.query(Manager.name) .join(Paperwork, Manager.paperwork) .order_by(Manager.name).all(), expected) def test_join_from_columns_or_subclass_three(self): sess = create_session() expected = [ (u'dilbert',), (u'dilbert',), (u'dogbert',), (u'dogbert',), (u'pointy haired boss',), (u'vlad',), (u'wally',), (u'wally',)] eq_(sess.query(Person.name) .join(Paperwork, Person.paperwork) .order_by(Person.name).all(), expected) def test_join_from_columns_or_subclass_four(self): sess = create_session() # Load Person.name, joining from Person -> paperwork, get all # the people. expected = [ (u'dilbert',), (u'dilbert',), (u'dogbert',), (u'dogbert',), (u'pointy haired boss',), (u'vlad',), (u'wally',), (u'wally',)] eq_(sess.query(Person.name) .join(paperwork, Person.person_id == paperwork.c.person_id) .order_by(Person.name).all(), expected) def test_join_from_columns_or_subclass_five(self): sess = create_session() # same, on manager. get only managers. expected = [ (u'dogbert',), (u'dogbert',), (u'pointy haired boss',)] eq_(sess.query(Manager.name) .join(paperwork, Manager.person_id == paperwork.c.person_id) .order_by(Person.name).all(), expected) def test_join_from_columns_or_subclass_six(self): sess = create_session() if self.select_type == '': # this now raises, due to [ticket:1892]. Manager.person_id # is now the "person_id" column on Manager. SQL is incorrect. assert_raises( sa_exc.DBAPIError, sess.query(Person.name) .join(paperwork, Manager.person_id == paperwork.c.person_id) .order_by(Person.name).all) elif self.select_type == 'Unions': # with the union, not something anyone would really be using # here, it joins to the full result set. This is 0.6's # behavior and is more or less wrong. expected = [ (u'dilbert',), (u'dilbert',), (u'dogbert',), (u'dogbert',), (u'pointy haired boss',), (u'vlad',), (u'wally',), (u'wally',)] eq_(sess.query(Person.name) .join(paperwork, Manager.person_id == paperwork.c.person_id) .order_by(Person.name).all(), expected) else: # when a join is present and managers.person_id is available, # you get the managers. expected = [ (u'dogbert',), (u'dogbert',), (u'pointy haired boss',)] eq_(sess.query(Person.name) .join(paperwork, Manager.person_id == paperwork.c.person_id) .order_by(Person.name).all(), expected) def test_join_from_columns_or_subclass_seven(self): sess = create_session() eq_(sess.query(Manager) .join(Paperwork, Manager.paperwork) .order_by(Manager.name).all(), [m1, b1]) def test_join_from_columns_or_subclass_eight(self): sess = create_session() expected = [ (u'dogbert',), (u'dogbert',), (u'pointy haired boss',)] eq_(sess.query(Manager.name) .join(paperwork, Manager.person_id == paperwork.c.person_id) .order_by(Manager.name).all(), expected) def test_join_from_columns_or_subclass_nine(self): sess = create_session() eq_(sess.query(Manager.person_id) .join(paperwork, Manager.person_id == paperwork.c.person_id) .order_by(Manager.name).all(), [(4,), (4,), (3,)]) def test_join_from_columns_or_subclass_ten(self): sess = create_session() expected = [ (u'pointy haired boss', u'review #1'), (u'dogbert', u'review #2'), (u'dogbert', u'review #3')] eq_(sess.query(Manager.name, Paperwork.description) .join(Paperwork, Manager.person_id == Paperwork.person_id) .order_by(Paperwork.paperwork_id).all(), expected) def test_join_from_columns_or_subclass_eleven(self): sess = create_session() expected = [ (u'pointy haired boss',), (u'dogbert',), (u'dogbert',)] malias = aliased(Manager) eq_(sess.query(malias.name) .join(paperwork, malias.person_id == paperwork.c.person_id) .all(), expected) def test_subclass_option_pathing(self): from sqlalchemy.orm import defer sess = create_session() dilbert = sess.query(Person).\ options(defer(Engineer.machines, Machine.name)).\ filter(Person.name == 'dilbert').first() m = dilbert.machines[0] assert 'name' not in m.__dict__ eq_(m.name, 'IBM ThinkPad') def test_expire(self): """ Test that individual column refresh doesn't get tripped up by the select_table mapper. """ sess = create_session() name = 'dogbert' m1 = sess.query(Manager).filter(Manager.name == name).one() sess.expire(m1) assert m1.status == 'regular manager' name = 'pointy haired boss' m2 = sess.query(Manager).filter(Manager.name == name).one() sess.expire(m2, ['manager_name', 'golf_swing']) assert m2.golf_swing == 'fore' def test_with_polymorphic_one(self): sess = create_session() def go(): eq_(sess.query(Person) .with_polymorphic(Engineer) .filter(Engineer.primary_language == 'java').all(), self._emps_wo_relationships_fixture()[0:1]) self.assert_sql_count(testing.db, go, 1) def test_with_polymorphic_two(self): sess = create_session() def go(): eq_(sess.query(Person) .with_polymorphic('*').all(), self._emps_wo_relationships_fixture()) self.assert_sql_count(testing.db, go, 1) def test_with_polymorphic_three(self): sess = create_session() def go(): eq_(sess.query(Person) .with_polymorphic(Engineer).all(), self._emps_wo_relationships_fixture()) self.assert_sql_count(testing.db, go, 3) def test_with_polymorphic_four(self): sess = create_session() def go(): eq_(sess.query(Person) .with_polymorphic( Engineer, people.outerjoin(engineers)) .all(), self._emps_wo_relationships_fixture()) self.assert_sql_count(testing.db, go, 3) def test_with_polymorphic_five(self): sess = create_session() def go(): # limit the polymorphic join down to just "Person", # overriding select_table eq_(sess.query(Person) .with_polymorphic(Person).all(), self._emps_wo_relationships_fixture()) self.assert_sql_count(testing.db, go, 6) def test_with_polymorphic_six(self): sess = create_session() assert_raises(sa_exc.InvalidRequestError, sess.query(Person).with_polymorphic, Paperwork) assert_raises(sa_exc.InvalidRequestError, sess.query(Engineer).with_polymorphic, Boss) assert_raises(sa_exc.InvalidRequestError, sess.query(Engineer).with_polymorphic, Person) def test_with_polymorphic_seven(self): sess = create_session() # compare to entities without related collections to prevent # additional lazy SQL from firing on loaded entities eq_(sess.query(Person).with_polymorphic('*').all(), self._emps_wo_relationships_fixture()) def test_relationship_to_polymorphic_one(self): expected = self._company_with_emps_machines_fixture() sess = create_session() def go(): # test load Companies with lazy load to 'employees' eq_(sess.query(Company).all(), expected) count = {'':10, 'Polymorphic':5}.get(self.select_type, 6) self.assert_sql_count(testing.db, go, count) def test_relationship_to_polymorphic_two(self): expected = self._company_with_emps_machines_fixture() sess = create_session() def go(): # with #2438, of_type() is recognized. This # overrides the with_polymorphic of the mapper # and we get a consistent 3 queries now. eq_(sess.query(Company) .options(joinedload_all( Company.employees.of_type(Engineer), Engineer.machines)) .all(), expected) # in the old case, we would get this #count = {'':7, 'Polymorphic':1}.get(self.select_type, 2) # query one is company->Person/Engineer->Machines # query two is managers + boss for row #3 # query three is managers for row #4 count = 3 self.assert_sql_count(testing.db, go, count) def test_relationship_to_polymorphic_three(self): expected = self._company_with_emps_machines_fixture() sess = create_session() sess = create_session() def go(): eq_(sess.query(Company) .options(subqueryload_all( Company.employees.of_type(Engineer), Engineer.machines)) .all(), expected) # the old case where subqueryload_all # didn't work with of_tyoe #count = { '':8, 'Joins':4, 'Unions':4, 'Polymorphic':3, # 'AliasedJoins':4}[self.select_type] # query one is company->Person/Engineer->Machines # query two is Person/Engineer subq # query three is Machines subq # (however this test can't tell if the Q was a # lazyload or subqload ...) # query four is managers + boss for row #3 # query five is managers for row #4 count = 5 self.assert_sql_count(testing.db, go, count) def test_joinedload_on_subclass(self): sess = create_session() expected = [ Engineer( name="dilbert", engineer_name="dilbert", primary_language="java", status="regular engineer", machines=[ Machine(name="IBM ThinkPad"), Machine(name="IPhone")])] def go(): # test load People with joinedload to engineers + machines eq_(sess.query(Person) .with_polymorphic('*') .options(joinedload(Engineer.machines)) .filter(Person.name == 'dilbert').all(), expected) self.assert_sql_count(testing.db, go, 1) sess = create_session() def go(): # test load People with subqueryload to engineers + machines eq_(sess.query(Person) .with_polymorphic('*') .options(subqueryload(Engineer.machines)) .filter(Person.name == 'dilbert').all(), expected) self.assert_sql_count(testing.db, go, 2) def test_query_subclass_join_to_base_relationship(self): sess = create_session() # non-polymorphic eq_(sess.query(Engineer) .join(Person.paperwork).all(), [e1, e2, e3]) def test_join_to_subclass(self): sess = create_session() eq_(sess.query(Company) .join(people.join(engineers), 'employees') .filter(Engineer.primary_language == 'java').all(), [c1]) def test_join_to_subclass_one(self): sess = create_session() eq_(sess.query(Company) .select_from(companies.join(people).join(engineers)) .filter(Engineer.primary_language == 'java').all(), [c1]) def test_join_to_subclass_two(self): sess = create_session() eq_(sess.query(Company) .join(people.join(engineers), 'employees') .filter(Engineer.primary_language == 'java').all(), [c1]) def test_join_to_subclass_three(self): sess = create_session() ealias = aliased(Engineer) eq_(sess.query(Company) .join(ealias, 'employees') .filter(ealias.primary_language == 'java').all(), [c1]) def test_join_to_subclass_six(self): sess = create_session() eq_(sess.query(Company) .join(people.join(engineers), 'employees') .join(Engineer.machines).all(), [c1, c2]) def test_join_to_subclass_seven(self): sess = create_session() eq_(sess.query(Company) .join(people.join(engineers), 'employees') .join(Engineer.machines) .filter(Machine.name.ilike("%thinkpad%")).all(), [c1]) def test_join_to_subclass_eight(self): sess = create_session() eq_(sess.query(Person) .join(Engineer.machines).all(), [e1, e2, e3]) def test_join_to_subclass_nine(self): sess = create_session() eq_(sess.query(Company) .select_from(companies.join(people).join(engineers)) .filter(Engineer.primary_language == 'java').all(), [c1]) def test_join_to_subclass_ten(self): sess = create_session() eq_(sess.query(Company) .join('employees') .filter(Engineer.primary_language == 'java').all(), [c1]) def test_join_to_subclass_eleven(self): sess = create_session() eq_(sess.query(Company) .select_from(companies.join(people).join(engineers)) .filter(Engineer.primary_language == 'java').all(), [c1]) def test_join_to_subclass_twelve(self): sess = create_session() eq_(sess.query(Person) .join(Engineer.machines).all(), [e1, e2, e3]) def test_join_to_subclass_thirteen(self): sess = create_session() eq_(sess.query(Person) .join(Engineer.machines) .filter(Machine.name.ilike("%ibm%")).all(), [e1, e3]) def test_join_to_subclass_fourteen(self): sess = create_session() eq_(sess.query(Company) .join('employees', Engineer.machines).all(), [c1, c2]) def test_join_to_subclass_fifteen(self): sess = create_session() eq_(sess.query(Company) .join('employees', Engineer.machines) .filter(Machine.name.ilike("%thinkpad%")).all(), [c1]) def test_join_to_subclass_sixteen(self): sess = create_session() # non-polymorphic eq_(sess.query(Engineer) .join(Engineer.machines).all(), [e1, e2, e3]) def test_join_to_subclass_seventeen(self): sess = create_session() eq_(sess.query(Engineer) .join(Engineer.machines) .filter(Machine.name.ilike("%ibm%")).all(), [e1, e3]) def test_join_through_polymorphic_nonaliased_one(self): sess = create_session() eq_(sess.query(Company) .join('employees', 'paperwork', aliased=False) .filter(Paperwork.description.like('%#2%')).all(), [c1]) def test_join_through_polymorphic_nonaliased_two(self): sess = create_session() eq_(sess.query(Company) .join('employees', 'paperwork', aliased=False) .filter(Paperwork.description.like('%#%')).all(), [c1, c2]) def test_join_through_polymorphic_nonaliased_three(self): sess = create_session() eq_(sess.query(Company) .join('employees', 'paperwork', aliased=False) .filter(Person.name.in_(['dilbert', 'vlad'])) .filter(Paperwork.description.like('%#2%')).all(), [c1]) def test_join_through_polymorphic_nonaliased_four(self): sess = create_session() eq_(sess.query(Company) .join('employees', 'paperwork', aliased=False) .filter(Person.name.in_(['dilbert', 'vlad'])) .filter(Paperwork.description.like('%#%')).all(), [c1, c2]) def test_join_through_polymorphic_nonaliased_five(self): sess = create_session() eq_(sess.query(Company) .join('employees', aliased=aliased) .filter(Person.name.in_(['dilbert', 'vlad'])) .join('paperwork', from_joinpoint=True, aliased=False) .filter(Paperwork.description.like('%#2%')).all(), [c1]) def test_join_through_polymorphic_nonaliased_six(self): sess = create_session() eq_(sess.query(Company) .join('employees', aliased=aliased) .filter(Person.name.in_(['dilbert', 'vlad'])) .join('paperwork', from_joinpoint=True, aliased=False) .filter(Paperwork.description.like('%#%')).all(), [c1, c2]) def test_join_through_polymorphic_aliased_one(self): sess = create_session() eq_(sess.query(Company) .join('employees', 'paperwork', aliased=True) .filter(Paperwork.description.like('%#2%')).all(), [c1]) def test_join_through_polymorphic_aliased_two(self): sess = create_session() eq_(sess.query(Company) .join('employees', 'paperwork', aliased=True) .filter(Paperwork.description.like('%#%')).all(), [c1, c2]) def test_join_through_polymorphic_aliased_three(self): sess = create_session() eq_(sess.query(Company) .join('employees', 'paperwork', aliased=True) .filter(Person.name.in_(['dilbert', 'vlad'])) .filter(Paperwork.description.like('%#2%')).all(), [c1]) def test_join_through_polymorphic_aliased_four(self): sess = create_session() eq_(sess.query(Company) .join('employees', 'paperwork', aliased=True) .filter(Person.name.in_(['dilbert', 'vlad'])) .filter(Paperwork.description.like('%#%')).all(), [c1, c2]) def test_join_through_polymorphic_aliased_five(self): sess = create_session() eq_(sess.query(Company) .join('employees', aliased=aliased) .filter(Person.name.in_(['dilbert', 'vlad'])) .join('paperwork', from_joinpoint=True, aliased=True) .filter(Paperwork.description.like('%#2%')).all(), [c1]) def test_join_through_polymorphic_aliased_six(self): sess = create_session() eq_(sess.query(Company) .join('employees', aliased=aliased) .filter(Person.name.in_(['dilbert', 'vlad'])) .join('paperwork', from_joinpoint=True, aliased=True) .filter(Paperwork.description.like('%#%')).all(), [c1, c2]) def test_explicit_polymorphic_join(self): sess = create_session() # join from Company to Engineer; join condition formulated by # ORMJoin using regular table foreign key connections. Engineer # is expressed as "(select * people join engineers) as anon_1" # so the join is contained. eq_(sess.query(Company) .join(Engineer) .filter(Engineer.engineer_name == 'vlad').one(), c2) # same, using explicit join condition. Query.join() must # adapt the on clause here to match the subquery wrapped around # "people join engineers". eq_(sess.query(Company) .join(Engineer, Company.company_id == Engineer.company_id) .filter(Engineer.engineer_name == 'vlad').one(), c2) def test_filter_on_baseclass(self): sess = create_session() eq_(sess.query(Person).all(), all_employees) eq_(sess.query(Person).first(), all_employees[0]) eq_(sess.query(Person) .filter(Person.person_id == e2.person_id).one(), e2) def test_from_alias(self): sess = create_session() palias = aliased(Person) eq_(sess.query(palias) .filter(palias.name.in_(['dilbert', 'wally'])).all(), [e1, e2]) def test_self_referential_one(self): sess = create_session() palias = aliased(Person) expected = [(m1, e1), (m1, e2), (m1, b1)] eq_(sess.query(Person, palias) .filter(Person.company_id == palias.company_id) .filter(Person.name == 'dogbert') .filter(Person.person_id > palias.person_id) .order_by(Person.person_id, palias.person_id).all(), expected) def test_self_referential_two(self): sess = create_session() palias = aliased(Person) expected = [(m1, e1), (m1, e2), (m1, b1)] eq_(sess.query(Person, palias) .filter(Person.company_id == palias.company_id) .filter(Person.name == 'dogbert') .filter(Person.person_id > palias.person_id) .from_self() .order_by(Person.person_id, palias.person_id).all(), expected) def test_nesting_queries(self): # query.statement places a flag "no_adapt" on the returned # statement. This prevents the polymorphic adaptation in the # second "filter" from hitting it, which would pollute the # subquery and usually results in recursion overflow errors # within the adaption. sess = create_session() subq = (sess.query(engineers.c.person_id) .filter(Engineer.primary_language == 'java') .statement.as_scalar()) eq_(sess.query(Person) .filter(Person.person_id.in_(subq)).one(), e1) def test_mixed_entities_one(self): sess = create_session() expected = [ (Engineer( status=u'regular engineer', engineer_name=u'dilbert', name=u'dilbert', company_id=1, primary_language=u'java', person_id=1, type=u'engineer'), u'MegaCorp, Inc.'), (Engineer( status=u'regular engineer', engineer_name=u'wally', name=u'wally', company_id=1, primary_language=u'c++', person_id=2, type=u'engineer'), u'MegaCorp, Inc.'), (Engineer( status=u'elbonian engineer', engineer_name=u'vlad', name=u'vlad', company_id=2, primary_language=u'cobol', person_id=5, type=u'engineer'), u'Elbonia, Inc.')] eq_(sess.query(Engineer, Company.name) .join(Company.employees) .filter(Person.type == 'engineer').all(), expected) def test_mixed_entities_two(self): sess = create_session() expected = [ (u'java', u'MegaCorp, Inc.'), (u'cobol', u'Elbonia, Inc.'), (u'c++', u'MegaCorp, Inc.')] eq_(sess.query(Engineer.primary_language, Company.name) .join(Company.employees) .filter(Person.type == 'engineer') .order_by(desc(Engineer.primary_language)).all(), expected) def test_mixed_entities_three(self): sess = create_session() palias = aliased(Person) expected = [( Engineer( status=u'elbonian engineer', engineer_name=u'vlad', name=u'vlad', primary_language=u'cobol'), u'Elbonia, Inc.', Engineer( status=u'regular engineer', engineer_name=u'dilbert', name=u'dilbert', company_id=1, primary_language=u'java', person_id=1, type=u'engineer'))] eq_(sess.query(Person, Company.name, palias) .join(Company.employees) .filter(Company.name == 'Elbonia, Inc.') .filter(palias.name == 'dilbert').all(), expected) def test_mixed_entities_four(self): sess = create_session() palias = aliased(Person) expected = [( Engineer( status=u'regular engineer', engineer_name=u'dilbert', name=u'dilbert', company_id=1, primary_language=u'java', person_id=1, type=u'engineer'), u'Elbonia, Inc.', Engineer( status=u'elbonian engineer', engineer_name=u'vlad', name=u'vlad', primary_language=u'cobol'),)] eq_(sess.query(palias, Company.name, Person) .join(Company.employees) .filter(Company.name == 'Elbonia, Inc.') .filter(palias.name == 'dilbert').all(), expected) def test_mixed_entities_five(self): sess = create_session() palias = aliased(Person) expected = [(u'vlad', u'Elbonia, Inc.', u'dilbert')] eq_(sess.query(Person.name, Company.name, palias.name) .join(Company.employees) .filter(Company.name == 'Elbonia, Inc.') .filter(palias.name == 'dilbert').all(), expected) def test_mixed_entities_six(self): sess = create_session() palias = aliased(Person) expected = [ (u'manager', u'dogbert', u'engineer', u'dilbert'), (u'manager', u'dogbert', u'engineer', u'wally'), (u'manager', u'dogbert', u'boss', u'pointy haired boss')] eq_(sess.query(Person.type, Person.name, palias.type, palias.name) .filter(Person.company_id == palias.company_id) .filter(Person.name == 'dogbert') .filter(Person.person_id > palias.person_id) .order_by(Person.person_id, palias.person_id).all(), expected) def test_mixed_entities_seven(self): sess = create_session() expected = [ (u'dilbert', u'tps report #1'), (u'dilbert', u'tps report #2'), (u'dogbert', u'review #2'), (u'dogbert', u'review #3'), (u'pointy haired boss', u'review #1'), (u'vlad', u'elbonian missive #3'), (u'wally', u'tps report #3'), (u'wally', u'tps report #4')] eq_(sess.query(Person.name, Paperwork.description) .filter(Person.person_id == Paperwork.person_id) .order_by(Person.name, Paperwork.description).all(), expected) def test_mixed_entities_eight(self): sess = create_session() eq_(sess.query(func.count(Person.person_id)) .filter(Engineer.primary_language == 'java').all(), [(1,)]) def test_mixed_entities_nine(self): sess = create_session() expected = [(u'Elbonia, Inc.', 1), (u'MegaCorp, Inc.', 4)] eq_(sess.query(Company.name, func.count(Person.person_id)) .filter(Company.company_id == Person.company_id) .group_by(Company.name) .order_by(Company.name).all(), expected) def test_mixed_entities_ten(self): sess = create_session() expected = [(u'Elbonia, Inc.', 1), (u'MegaCorp, Inc.', 4)] eq_(sess.query(Company.name, func.count(Person.person_id)) .join(Company.employees) .group_by(Company.name) .order_by(Company.name).all(), expected) #def test_mixed_entities(self): # sess = create_session() # TODO: I think raise error on these for now. different # inheritance/loading schemes have different results here, # all incorrect # # eq_( # sess.query(Person.name, Engineer.primary_language).all(), # []) #def test_mixed_entities(self): # sess = create_session() # eq_(sess.query( # Person.name, # Engineer.primary_language, # Manager.manager_name) # .all(), # []) def test_mixed_entities_eleven(self): sess = create_session() expected = [(u'java',), (u'c++',), (u'cobol',)] eq_(sess.query(Engineer.primary_language) .filter(Person.type == 'engineer').all(), expected) def test_mixed_entities_twelve(self): sess = create_session() expected = [(u'vlad', u'Elbonia, Inc.')] eq_(sess.query(Person.name, Company.name) .join(Company.employees) .filter(Company.name == 'Elbonia, Inc.').all(), expected) def test_mixed_entities_thirteen(self): sess = create_session() expected = [(u'pointy haired boss', u'fore')] eq_(sess.query(Boss.name, Boss.golf_swing).all(), expected) def test_mixed_entities_fourteen(self): sess = create_session() expected = [ (u'dilbert', u'java'), (u'wally', u'c++'), (u'vlad', u'cobol')] eq_(sess.query(Engineer.name, Engineer.primary_language).all(), expected) def test_mixed_entities_fifteen(self): sess = create_session() expected = [( u'Elbonia, Inc.', Engineer( status=u'elbonian engineer', engineer_name=u'vlad', name=u'vlad', primary_language=u'cobol'))] eq_(sess.query(Company.name, Person) .join(Company.employees) .filter(Company.name == 'Elbonia, Inc.').all(), expected) def test_mixed_entities_sixteen(self): sess = create_session() expected = [( Engineer( status=u'elbonian engineer', engineer_name=u'vlad', name=u'vlad', primary_language=u'cobol'), u'Elbonia, Inc.')] eq_(sess.query(Person, Company.name) .join(Company.employees) .filter(Company.name == 'Elbonia, Inc.').all(), expected) def test_mixed_entities_seventeen(self): sess = create_session() expected = [('pointy haired boss',), ('dogbert',)] eq_(sess.query(Manager.name).all(), expected) def test_mixed_entities_eighteen(self): sess = create_session() expected = [('pointy haired boss foo',), ('dogbert foo',)] eq_(sess.query(Manager.name + " foo").all(), expected) def test_mixed_entities_nineteen(self): sess = create_session() row = sess.query(Engineer.name, Engineer.primary_language) \ .filter(Engineer.name == 'dilbert').first() assert row.name == 'dilbert' assert row.primary_language == 'java' class PolymorphicTest(_PolymorphicTestBase, _Polymorphic): def test_join_to_subclass_four(self): sess = create_session() eq_(sess.query(Person) .select_from(people.join(engineers)) .join(Engineer.machines).all(), [e1, e2, e3]) def test_join_to_subclass_five(self): sess = create_session() eq_(sess.query(Person) .select_from(people.join(engineers)) .join(Engineer.machines) .filter(Machine.name.ilike("%ibm%")).all(), [e1, e3]) def test_join_to_subclass_ten(self): pass def test_mixed_entities_one(self): pass def test_mixed_entities_two(self): pass def test_mixed_entities_eight(self): pass def test_polymorphic_any_eight(self): pass class PolymorphicPolymorphicTest(_PolymorphicTestBase, _PolymorphicPolymorphic): pass class PolymorphicUnionsTest(_PolymorphicTestBase, _PolymorphicUnions): pass class PolymorphicAliasedJoinsTest(_PolymorphicTestBase, _PolymorphicAliasedJoins): pass class PolymorphicJoinsTest(_PolymorphicTestBase, _PolymorphicJoins): pass SQLAlchemy-0.8.4/test/orm/inheritance/test_productspec.py0000644000076500000240000003075112251150015024240 0ustar classicstaff00000000000000from datetime import datetime from sqlalchemy import * from sqlalchemy.orm import * from sqlalchemy import testing from sqlalchemy.testing import fixtures from sqlalchemy.testing.schema import Table, Column class InheritTest(fixtures.MappedTest): """tests some various inheritance round trips involving a particular set of polymorphic inheritance relationships""" @classmethod def define_tables(cls, metadata): global products_table, specification_table, documents_table global Product, Detail, Assembly, SpecLine, Document, RasterDocument products_table = Table('products', metadata, Column('product_id', Integer, primary_key=True, test_needs_autoincrement=True), Column('product_type', String(128)), Column('name', String(128)), Column('mark', String(128)), ) specification_table = Table('specification', metadata, Column('spec_line_id', Integer, primary_key=True, test_needs_autoincrement=True), Column('master_id', Integer, ForeignKey("products.product_id"), nullable=True), Column('slave_id', Integer, ForeignKey("products.product_id"), nullable=True), Column('quantity', Float, default=1.), ) documents_table = Table('documents', metadata, Column('document_id', Integer, primary_key=True, test_needs_autoincrement=True), Column('document_type', String(128)), Column('product_id', Integer, ForeignKey('products.product_id')), Column('create_date', DateTime, default=lambda:datetime.now()), Column('last_updated', DateTime, default=lambda:datetime.now(), onupdate=lambda:datetime.now()), Column('name', String(128)), Column('data', LargeBinary), Column('size', Integer, default=0), ) class Product(object): def __init__(self, name, mark=''): self.name = name self.mark = mark def __repr__(self): return '<%s %s>' % (self.__class__.__name__, self.name) class Detail(Product): def __init__(self, name): self.name = name class Assembly(Product): def __repr__(self): return Product.__repr__(self) + " " + " ".join([x + "=" + repr(getattr(self, x, None)) for x in ['specification', 'documents']]) class SpecLine(object): def __init__(self, master=None, slave=None, quantity=1): self.master = master self.slave = slave self.quantity = quantity def __repr__(self): return '<%s %.01f %s>' % ( self.__class__.__name__, self.quantity or 0., repr(self.slave) ) class Document(object): def __init__(self, name, data=None): self.name = name self.data = data def __repr__(self): return '<%s %s>' % (self.__class__.__name__, self.name) class RasterDocument(Document): pass def testone(self): product_mapper = mapper(Product, products_table, polymorphic_on=products_table.c.product_type, polymorphic_identity='product') detail_mapper = mapper(Detail, inherits=product_mapper, polymorphic_identity='detail') assembly_mapper = mapper(Assembly, inherits=product_mapper, polymorphic_identity='assembly') specification_mapper = mapper(SpecLine, specification_table, properties=dict( master=relationship(Assembly, foreign_keys=[specification_table.c.master_id], primaryjoin=specification_table.c.master_id==products_table.c.product_id, lazy='select', backref=backref('specification'), uselist=False), slave=relationship(Product, foreign_keys=[specification_table.c.slave_id], primaryjoin=specification_table.c.slave_id==products_table.c.product_id, lazy='select', uselist=False), quantity=specification_table.c.quantity, ) ) session = create_session( ) a1 = Assembly(name='a1') p1 = Product(name='p1') a1.specification.append(SpecLine(slave=p1)) d1 = Detail(name='d1') a1.specification.append(SpecLine(slave=d1)) session.add(a1) orig = repr(a1) session.flush() session.expunge_all() a1 = session.query(Product).filter_by(name='a1').one() new = repr(a1) print orig print new assert orig == new == ' specification=[>, >] documents=None' def testtwo(self): product_mapper = mapper(Product, products_table, polymorphic_on=products_table.c.product_type, polymorphic_identity='product') detail_mapper = mapper(Detail, inherits=product_mapper, polymorphic_identity='detail') specification_mapper = mapper(SpecLine, specification_table, properties=dict( slave=relationship(Product, foreign_keys=[specification_table.c.slave_id], primaryjoin=specification_table.c.slave_id==products_table.c.product_id, lazy='select', uselist=False), ) ) session = create_session( ) s = SpecLine(slave=Product(name='p1')) s2 = SpecLine(slave=Detail(name='d1')) session.add(s) session.add(s2) orig = repr([s, s2]) session.flush() session.expunge_all() new = repr(session.query(SpecLine).all()) print orig print new assert orig == new == '[>, >]' def testthree(self): product_mapper = mapper(Product, products_table, polymorphic_on=products_table.c.product_type, polymorphic_identity='product') detail_mapper = mapper(Detail, inherits=product_mapper, polymorphic_identity='detail') assembly_mapper = mapper(Assembly, inherits=product_mapper, polymorphic_identity='assembly') specification_mapper = mapper(SpecLine, specification_table, properties=dict( master=relationship(Assembly, lazy='joined', uselist=False, foreign_keys=[specification_table.c.master_id], primaryjoin=specification_table.c.master_id==products_table.c.product_id, backref=backref('specification', cascade="all, delete-orphan"), ), slave=relationship(Product, lazy='joined', uselist=False, foreign_keys=[specification_table.c.slave_id], primaryjoin=specification_table.c.slave_id==products_table.c.product_id, ), quantity=specification_table.c.quantity, ) ) document_mapper = mapper(Document, documents_table, polymorphic_on=documents_table.c.document_type, polymorphic_identity='document', properties=dict( name=documents_table.c.name, data=deferred(documents_table.c.data), product=relationship(Product, lazy='select', backref=backref('documents', cascade="all, delete-orphan")), ), ) raster_document_mapper = mapper(RasterDocument, inherits=document_mapper, polymorphic_identity='raster_document') session = create_session() a1 = Assembly(name='a1') a1.specification.append(SpecLine(slave=Detail(name='d1'))) a1.documents.append(Document('doc1')) a1.documents.append(RasterDocument('doc2')) session.add(a1) orig = repr(a1) session.flush() session.expunge_all() a1 = session.query(Product).filter_by(name='a1').one() new = repr(a1) print orig print new assert orig == new == ' specification=[>] documents=[, ]' def testfour(self): """this tests the RasterDocument being attached to the Assembly, but *not* the Document. this means only a "sub-class" task, i.e. corresponding to an inheriting mapper but not the base mapper, is created. """ product_mapper = mapper(Product, products_table, polymorphic_on=products_table.c.product_type, polymorphic_identity='product') detail_mapper = mapper(Detail, inherits=product_mapper, polymorphic_identity='detail') assembly_mapper = mapper(Assembly, inherits=product_mapper, polymorphic_identity='assembly') document_mapper = mapper(Document, documents_table, polymorphic_on=documents_table.c.document_type, polymorphic_identity='document', properties=dict( name=documents_table.c.name, data=deferred(documents_table.c.data), product=relationship(Product, lazy='select', backref=backref('documents', cascade="all, delete-orphan")), ), ) raster_document_mapper = mapper(RasterDocument, inherits=document_mapper, polymorphic_identity='raster_document') session = create_session( ) a1 = Assembly(name='a1') a1.documents.append(RasterDocument('doc2')) session.add(a1) orig = repr(a1) session.flush() session.expunge_all() a1 = session.query(Product).filter_by(name='a1').one() new = repr(a1) print orig print new assert orig == new == ' specification=None documents=[]' del a1.documents[0] session.flush() session.expunge_all() a1 = session.query(Product).filter_by(name='a1').one() assert len(session.query(Document).all()) == 0 def testfive(self): """tests the late compilation of mappers""" specification_mapper = mapper(SpecLine, specification_table, properties=dict( master=relationship(Assembly, lazy='joined', uselist=False, foreign_keys=[specification_table.c.master_id], primaryjoin=specification_table.c.master_id==products_table.c.product_id, backref=backref('specification'), ), slave=relationship(Product, lazy='joined', uselist=False, foreign_keys=[specification_table.c.slave_id], primaryjoin=specification_table.c.slave_id==products_table.c.product_id, ), quantity=specification_table.c.quantity, ) ) product_mapper = mapper(Product, products_table, polymorphic_on=products_table.c.product_type, polymorphic_identity='product', properties={ 'documents' : relationship(Document, lazy='select', backref='product', cascade='all, delete-orphan'), }) detail_mapper = mapper(Detail, inherits=Product, polymorphic_identity='detail') document_mapper = mapper(Document, documents_table, polymorphic_on=documents_table.c.document_type, polymorphic_identity='document', properties=dict( name=documents_table.c.name, data=deferred(documents_table.c.data), ), ) raster_document_mapper = mapper(RasterDocument, inherits=Document, polymorphic_identity='raster_document') assembly_mapper = mapper(Assembly, inherits=Product, polymorphic_identity='assembly') session = create_session() a1 = Assembly(name='a1') a1.specification.append(SpecLine(slave=Detail(name='d1'))) a1.documents.append(Document('doc1')) a1.documents.append(RasterDocument('doc2')) session.add(a1) orig = repr(a1) session.flush() session.expunge_all() a1 = session.query(Product).filter_by(name='a1').one() new = repr(a1) print orig print new assert orig == new == ' specification=[>] documents=[, ]' SQLAlchemy-0.8.4/test/orm/inheritance/test_relationship.py0000644000076500000240000014124212251150015024404 0ustar classicstaff00000000000000from sqlalchemy.orm import create_session, relationship, mapper, \ contains_eager, joinedload, subqueryload, subqueryload_all,\ Session, aliased, with_polymorphic from sqlalchemy import Integer, String, ForeignKey from sqlalchemy.engine import default from sqlalchemy.testing import AssertsCompiledSQL, fixtures from sqlalchemy import testing from sqlalchemy.testing.schema import Table, Column from sqlalchemy.testing import assert_raises, eq_, is_ class Company(fixtures.ComparableEntity): pass class Person(fixtures.ComparableEntity): pass class Engineer(Person): pass class Manager(Person): pass class Boss(Manager): pass class Machine(fixtures.ComparableEntity): pass class Paperwork(fixtures.ComparableEntity): pass class SelfReferentialTestJoinedToBase(fixtures.MappedTest): run_setup_mappers = 'once' @classmethod def define_tables(cls, metadata): Table('people', metadata, Column('person_id', Integer, primary_key=True, test_needs_autoincrement=True), Column('name', String(50)), Column('type', String(30))) Table('engineers', metadata, Column('person_id', Integer, ForeignKey('people.person_id'), primary_key=True), Column('primary_language', String(50)), Column('reports_to_id', Integer, ForeignKey('people.person_id'))) @classmethod def setup_mappers(cls): engineers, people = cls.tables.engineers, cls.tables.people mapper(Person, people, polymorphic_on=people.c.type, polymorphic_identity='person') mapper(Engineer, engineers, inherits=Person, inherit_condition=engineers.c.person_id == people.c.person_id, polymorphic_identity='engineer', properties={ 'reports_to':relationship( Person, primaryjoin= people.c.person_id == engineers.c.reports_to_id)}) def test_has(self): p1 = Person(name='dogbert') e1 = Engineer(name='dilbert', primary_language='java', reports_to=p1) sess = create_session() sess.add(p1) sess.add(e1) sess.flush() sess.expunge_all() eq_(sess.query(Engineer) .filter(Engineer.reports_to.has(Person.name == 'dogbert')) .first(), Engineer(name='dilbert')) def test_oftype_aliases_in_exists(self): e1 = Engineer(name='dilbert', primary_language='java') e2 = Engineer(name='wally', primary_language='c++', reports_to=e1) sess = create_session() sess.add_all([e1, e2]) sess.flush() eq_(sess.query(Engineer) .filter(Engineer.reports_to .of_type(Engineer) .has(Engineer.name == 'dilbert')) .first(), e2) def test_join(self): p1 = Person(name='dogbert') e1 = Engineer(name='dilbert', primary_language='java', reports_to=p1) sess = create_session() sess.add(p1) sess.add(e1) sess.flush() sess.expunge_all() eq_(sess.query(Engineer) .join('reports_to', aliased=True) .filter(Person.name == 'dogbert').first(), Engineer(name='dilbert')) class SelfReferentialJ2JTest(fixtures.MappedTest): run_setup_mappers = 'once' @classmethod def define_tables(cls, metadata): people = Table('people', metadata, Column('person_id', Integer, primary_key=True, test_needs_autoincrement=True), Column('name', String(50)), Column('type', String(30))) engineers = Table('engineers', metadata, Column('person_id', Integer, ForeignKey('people.person_id'), primary_key=True), Column('primary_language', String(50)), Column('reports_to_id', Integer, ForeignKey('managers.person_id')) ) managers = Table('managers', metadata, Column('person_id', Integer, ForeignKey('people.person_id'), primary_key=True), ) @classmethod def setup_mappers(cls): engineers = cls.tables.engineers managers = cls.tables.managers people = cls.tables.people mapper(Person, people, polymorphic_on=people.c.type, polymorphic_identity='person') mapper(Manager, managers, inherits=Person, polymorphic_identity='manager') mapper(Engineer, engineers, inherits=Person, polymorphic_identity='engineer', properties={ 'reports_to':relationship( Manager, primaryjoin= managers.c.person_id == engineers.c.reports_to_id, backref='engineers')}) def test_has(self): m1 = Manager(name='dogbert') e1 = Engineer(name='dilbert', primary_language='java', reports_to=m1) sess = create_session() sess.add(m1) sess.add(e1) sess.flush() sess.expunge_all() eq_(sess.query(Engineer) .filter(Engineer.reports_to.has(Manager.name == 'dogbert')) .first(), Engineer(name='dilbert')) def test_join(self): m1 = Manager(name='dogbert') e1 = Engineer(name='dilbert', primary_language='java', reports_to=m1) sess = create_session() sess.add(m1) sess.add(e1) sess.flush() sess.expunge_all() eq_(sess.query(Engineer) .join('reports_to', aliased=True) .filter(Manager.name == 'dogbert').first(), Engineer(name='dilbert')) def test_filter_aliasing(self): m1 = Manager(name='dogbert') m2 = Manager(name='foo') e1 = Engineer(name='wally', primary_language='java', reports_to=m1) e2 = Engineer(name='dilbert', primary_language='c++', reports_to=m2) e3 = Engineer(name='etc', primary_language='c++') sess = create_session() sess.add_all([m1, m2, e1, e2, e3]) sess.flush() sess.expunge_all() # filter aliasing applied to Engineer doesn't whack Manager eq_(sess.query(Manager) .join(Manager.engineers) .filter(Manager.name == 'dogbert').all(), [m1]) eq_(sess.query(Manager) .join(Manager.engineers) .filter(Engineer.name == 'dilbert').all(), [m2]) eq_(sess.query(Manager, Engineer) .join(Manager.engineers) .order_by(Manager.name.desc()).all(), [(m2, e2), (m1, e1)]) def test_relationship_compare(self): m1 = Manager(name='dogbert') m2 = Manager(name='foo') e1 = Engineer(name='dilbert', primary_language='java', reports_to=m1) e2 = Engineer(name='wally', primary_language='c++', reports_to=m2) e3 = Engineer(name='etc', primary_language='c++') sess = create_session() sess.add(m1) sess.add(m2) sess.add(e1) sess.add(e2) sess.add(e3) sess.flush() sess.expunge_all() eq_(sess.query(Manager) .join(Manager.engineers) .filter(Engineer.reports_to == None).all(), []) eq_(sess.query(Manager) .join(Manager.engineers) .filter(Engineer.reports_to == m1).all(), [m1]) class SelfReferentialJ2JSelfTest(fixtures.MappedTest): run_setup_mappers = 'once' @classmethod def define_tables(cls, metadata): people = Table('people', metadata, Column('person_id', Integer, primary_key=True, test_needs_autoincrement=True), Column('name', String(50)), Column('type', String(30))) engineers = Table('engineers', metadata, Column('person_id', Integer, ForeignKey('people.person_id'), primary_key=True), Column('reports_to_id', Integer, ForeignKey('engineers.person_id'))) @classmethod def setup_mappers(cls): engineers = cls.tables.engineers people = cls.tables.people mapper(Person, people, polymorphic_on=people.c.type, polymorphic_identity='person') mapper(Engineer, engineers, inherits=Person, polymorphic_identity='engineer', properties={ 'reports_to':relationship( Engineer, primaryjoin= engineers.c.person_id == engineers.c.reports_to_id, backref='engineers', remote_side=engineers.c.person_id)}) def _two_obj_fixture(self): e1 = Engineer(name='wally') e2 = Engineer(name='dilbert', reports_to=e1) sess = Session() sess.add_all([e1, e2]) sess.commit() return sess def _five_obj_fixture(self): sess = Session() e1, e2, e3, e4, e5 = [ Engineer(name='e%d' % (i + 1)) for i in xrange(5) ] e3.reports_to = e1 e4.reports_to = e2 sess.add_all([e1, e2, e3, e4, e5]) sess.commit() return sess def test_has(self): sess = self._two_obj_fixture() eq_(sess.query(Engineer) .filter(Engineer.reports_to.has(Engineer.name == 'wally')) .first(), Engineer(name='dilbert')) def test_join_explicit_alias(self): sess = self._five_obj_fixture() ea = aliased(Engineer) eq_(sess.query(Engineer) .join(ea, Engineer.engineers) .filter(Engineer.name == 'e1').all(), [Engineer(name='e1')]) def test_join_aliased_flag_one(self): sess = self._two_obj_fixture() eq_(sess.query(Engineer) .join('reports_to', aliased=True) .filter(Engineer.name == 'wally').first(), Engineer(name='dilbert')) def test_join_aliased_flag_two(self): sess = self._five_obj_fixture() eq_(sess.query(Engineer) .join(Engineer.engineers, aliased=True) .filter(Engineer.name == 'e4').all(), [Engineer(name='e2')]) def test_relationship_compare(self): sess = self._five_obj_fixture() e1 = sess.query(Engineer).filter_by(name='e1').one() eq_(sess.query(Engineer) .join(Engineer.engineers, aliased=True) .filter(Engineer.reports_to == None).all(), []) eq_(sess.query(Engineer) .join(Engineer.engineers, aliased=True) .filter(Engineer.reports_to == e1).all(), [e1]) class M2MFilterTest(fixtures.MappedTest): run_setup_mappers = 'once' run_inserts = 'once' run_deletes = None @classmethod def define_tables(cls, metadata): organizations = Table('organizations', metadata, Column('id', Integer, primary_key=True, test_needs_autoincrement=True), Column('name', String(50))) engineers_to_org = Table('engineers_to_org', metadata, Column('org_id', Integer, ForeignKey('organizations.id')), Column('engineer_id', Integer, ForeignKey('engineers.person_id'))) people = Table('people', metadata, Column('person_id', Integer, primary_key=True, test_needs_autoincrement=True), Column('name', String(50)), Column('type', String(30))) engineers = Table('engineers', metadata, Column('person_id', Integer, ForeignKey('people.person_id'), primary_key=True), Column('primary_language', String(50))) @classmethod def setup_mappers(cls): organizations = cls.tables.organizations people = cls.tables.people engineers = cls.tables.engineers engineers_to_org = cls.tables.engineers_to_org class Organization(cls.Comparable): pass mapper(Organization, organizations, properties={ 'engineers':relationship( Engineer, secondary=engineers_to_org, backref='organizations')}) mapper(Person, people, polymorphic_on=people.c.type, polymorphic_identity='person') mapper(Engineer, engineers, inherits=Person, polymorphic_identity='engineer') @classmethod def insert_data(cls): Organization = cls.classes.Organization e1 = Engineer(name='e1') e2 = Engineer(name='e2') e3 = Engineer(name='e3') e4 = Engineer(name='e4') org1 = Organization(name='org1', engineers=[e1, e2]) org2 = Organization(name='org2', engineers=[e3, e4]) sess = create_session() sess.add(org1) sess.add(org2) sess.flush() def test_not_contains(self): Organization = self.classes.Organization sess = create_session() e1 = sess.query(Person).filter(Engineer.name == 'e1').one() # this works eq_(sess.query(Organization) .filter(~Organization.engineers .of_type(Engineer) .contains(e1)) .all(), [Organization(name='org2')]) # this had a bug eq_(sess.query(Organization) .filter(~Organization.engineers .contains(e1)) .all(), [Organization(name='org2')]) def test_any(self): sess = create_session() Organization = self.classes.Organization eq_(sess.query(Organization) .filter(Organization.engineers .of_type(Engineer) .any(Engineer.name == 'e1')) .all(), [Organization(name='org1')]) eq_(sess.query(Organization) .filter(Organization.engineers .any(Engineer.name == 'e1')) .all(), [Organization(name='org1')]) class SelfReferentialM2MTest(fixtures.MappedTest, AssertsCompiledSQL): @classmethod def define_tables(cls, metadata): Table('secondary', metadata, Column('left_id', Integer, ForeignKey('parent.id'), nullable=False), Column('right_id', Integer, ForeignKey('parent.id'), nullable=False)) Table('parent', metadata, Column('id', Integer, primary_key=True, test_needs_autoincrement=True), Column('cls', String(50))) Table('child1', metadata, Column('id', Integer, ForeignKey('parent.id'), primary_key=True)) Table('child2', metadata, Column('id', Integer, ForeignKey('parent.id'), primary_key=True)) @classmethod def setup_classes(cls): class Parent(cls.Basic): pass class Child1(Parent): pass class Child2(Parent): pass @classmethod def setup_mappers(cls): child1 = cls.tables.child1 child2 = cls.tables.child2 Parent = cls.classes.Parent parent = cls.tables.parent Child1 = cls.classes.Child1 Child2 = cls.classes.Child2 secondary = cls.tables.secondary mapper(Parent, parent, polymorphic_on=parent.c.cls) mapper(Child1, child1, inherits=Parent, polymorphic_identity='child1', properties={ 'left_child2':relationship( Child2, secondary=secondary, primaryjoin=parent.c.id == secondary.c.right_id, secondaryjoin=parent.c.id == secondary.c.left_id, uselist=False, backref="right_children")}) mapper(Child2, child2, inherits=Parent, polymorphic_identity='child2') def test_query_crit(self): Child1, Child2 = self.classes.Child1, self.classes.Child2 sess = create_session() c11, c12, c13 = Child1(), Child1(), Child1() c21, c22, c23 = Child2(), Child2(), Child2() c11.left_child2 = c22 c12.left_child2 = c22 c13.left_child2 = c23 sess.add_all([c11, c12, c13, c21, c22, c23]) sess.flush() # test that the join to Child2 doesn't alias Child1 in the select eq_(set(sess.query(Child1).join(Child1.left_child2)), set([c11, c12, c13])) eq_(set(sess.query(Child1, Child2).join(Child1.left_child2)), set([(c11, c22), (c12, c22), (c13, c23)])) # test __eq__() on property is annotating correctly eq_(set(sess.query(Child2) .join(Child2.right_children) .filter(Child1.left_child2 == c22)), set([c22])) # test the same again self.assert_compile( sess.query(Child2) .join(Child2.right_children) .filter(Child1.left_child2 == c22) .with_labels().statement, "SELECT child2.id AS child2_id, parent.id AS parent_id, " "parent.cls AS parent_cls FROM secondary AS secondary_1, " "parent JOIN child2 ON parent.id = child2.id JOIN secondary AS " "secondary_2 ON parent.id = secondary_2.left_id JOIN (SELECT " "parent.id AS parent_id, parent.cls AS parent_cls, child1.id AS " "child1_id FROM parent JOIN child1 ON parent.id = child1.id) AS " "anon_1 ON anon_1.parent_id = secondary_2.right_id WHERE " "anon_1.parent_id = secondary_1.right_id AND :param_1 = " "secondary_1.left_id", dialect=default.DefaultDialect() ) def test_eager_join(self): Child1, Child2 = self.classes.Child1, self.classes.Child2 sess = create_session() c1 = Child1() c1.left_child2 = Child2() sess.add(c1) sess.flush() # test that the splicing of the join works here, doesn't break in # the middle of "parent join child1" q = sess.query(Child1).options(joinedload('left_child2')) self.assert_compile(q.limit(1).with_labels().statement, "SELECT anon_1.child1_id AS anon_1_child1_id, anon_1.parent_id " "AS anon_1_parent_id, anon_1.parent_cls AS anon_1_parent_cls, " "anon_2.child2_id AS anon_2_child2_id, anon_2.parent_id AS " "anon_2_parent_id, anon_2.parent_cls AS anon_2_parent_cls FROM " "(SELECT child1.id AS child1_id, parent.id AS parent_id, " "parent.cls AS parent_cls FROM parent JOIN child1 ON parent.id = " "child1.id LIMIT :param_1) AS anon_1 LEFT OUTER JOIN secondary " "AS secondary_1 ON anon_1.parent_id = secondary_1.right_id LEFT " "OUTER JOIN (SELECT parent.id AS parent_id, parent.cls AS " "parent_cls, child2.id AS child2_id FROM parent JOIN child2 ON " "parent.id = child2.id) AS anon_2 ON anon_2.parent_id = " "secondary_1.left_id", {'param_1':1}, dialect=default.DefaultDialect()) # another way to check assert q.limit(1).with_labels().subquery().count().scalar() == 1 assert q.first() is c1 def test_subquery_load(self): Child1, Child2 = self.classes.Child1, self.classes.Child2 sess = create_session() c1 = Child1() c1.left_child2 = Child2() sess.add(c1) sess.flush() sess.expunge_all() query_ = sess.query(Child1).options(subqueryload('left_child2')) for row in query_.all(): assert row.left_child2 class EagerToSubclassTest(fixtures.MappedTest): """Test eager loads to subclass mappers""" run_setup_classes = 'once' run_setup_mappers = 'once' run_inserts = 'once' run_deletes = None @classmethod def define_tables(cls, metadata): Table('parent', metadata, Column('id', Integer, primary_key=True, test_needs_autoincrement=True), Column('data', String(10))) Table('base', metadata, Column('id', Integer, primary_key=True, test_needs_autoincrement=True), Column('type', String(10)), Column('related_id', Integer, ForeignKey('related.id'))) Table('sub', metadata, Column('id', Integer, ForeignKey('base.id'), primary_key=True), Column('data', String(10)), Column('parent_id', Integer, ForeignKey('parent.id'), nullable=False)) Table('related', metadata, Column('id', Integer, primary_key=True, test_needs_autoincrement=True), Column('data', String(10))) @classmethod def setup_classes(cls): class Parent(cls.Comparable): pass class Base(cls.Comparable): pass class Sub(Base): pass class Related(cls.Comparable): pass @classmethod def setup_mappers(cls): sub = cls.tables.sub Sub = cls.classes.Sub base = cls.tables.base Base = cls.classes.Base parent = cls.tables.parent Parent = cls.classes.Parent related = cls.tables.related Related = cls.classes.Related mapper(Parent, parent, properties={'children':relationship(Sub, order_by=sub.c.data)}) mapper(Base, base, polymorphic_on=base.c.type, polymorphic_identity='b', properties={'related':relationship(Related)}) mapper(Sub, sub, inherits=Base, polymorphic_identity='s') mapper(Related, related) @classmethod def insert_data(cls): global p1, p2 Parent = cls.classes.Parent Sub = cls.classes.Sub Related = cls.classes.Related sess = Session() r1, r2 = Related(data='r1'), Related(data='r2') s1 = Sub(data='s1', related=r1) s2 = Sub(data='s2', related=r2) s3 = Sub(data='s3') s4 = Sub(data='s4', related=r2) s5 = Sub(data='s5') p1 = Parent(data='p1', children=[s1, s2, s3]) p2 = Parent(data='p2', children=[s4, s5]) sess.add(p1) sess.add(p2) sess.commit() def test_joinedload(self): Parent = self.classes.Parent sess = Session() def go(): eq_(sess.query(Parent) .options(joinedload(Parent.children)).all(), [p1, p2]) self.assert_sql_count(testing.db, go, 1) def test_contains_eager(self): Parent = self.classes.Parent Sub = self.classes.Sub sess = Session() def go(): eq_(sess.query(Parent) .join(Parent.children) .options(contains_eager(Parent.children)) .order_by(Parent.data, Sub.data).all(), [p1, p2]) self.assert_sql_count(testing.db, go, 1) def test_subq_through_related(self): Parent = self.classes.Parent Base = self.classes.Base sess = Session() def go(): eq_(sess.query(Parent) .options(subqueryload_all(Parent.children, Base.related)) .order_by(Parent.data).all(), [p1, p2]) self.assert_sql_count(testing.db, go, 3) def test_subq_through_related_aliased(self): Parent = self.classes.Parent Base = self.classes.Base pa = aliased(Parent) sess = Session() def go(): eq_(sess.query(pa) .options(subqueryload_all(pa.children, Base.related)) .order_by(pa.data).all(), [p1, p2]) self.assert_sql_count(testing.db, go, 3) class SubClassEagerToSubClassTest(fixtures.MappedTest): """Test joinedloads from subclass to subclass mappers""" run_setup_classes = 'once' run_setup_mappers = 'once' run_inserts = 'once' run_deletes = None @classmethod def define_tables(cls, metadata): Table('parent', metadata, Column('id', Integer, primary_key=True, test_needs_autoincrement=True), Column('type', String(10)), ) Table('subparent', metadata, Column('id', Integer, ForeignKey('parent.id'), primary_key=True), Column('data', String(10)), ) Table('base', metadata, Column('id', Integer, primary_key=True, test_needs_autoincrement=True), Column('type', String(10)), ) Table('sub', metadata, Column('id', Integer, ForeignKey('base.id'), primary_key=True), Column('data', String(10)), Column('subparent_id', Integer, ForeignKey('subparent.id'), nullable=False) ) @classmethod def setup_classes(cls): class Parent(cls.Comparable): pass class Subparent(Parent): pass class Base(cls.Comparable): pass class Sub(Base): pass @classmethod def setup_mappers(cls): sub = cls.tables.sub Sub = cls.classes.Sub base = cls.tables.base Base = cls.classes.Base parent = cls.tables.parent Parent = cls.classes.Parent subparent = cls.tables.subparent Subparent = cls.classes.Subparent mapper(Parent, parent, polymorphic_on=parent.c.type, polymorphic_identity='b') mapper(Subparent, subparent, inherits=Parent, polymorphic_identity='s', properties={ 'children':relationship(Sub, order_by=base.c.id)}) mapper(Base, base, polymorphic_on=base.c.type, polymorphic_identity='b') mapper(Sub, sub, inherits=Base, polymorphic_identity='s') @classmethod def insert_data(cls): global p1, p2 Sub, Subparent = cls.classes.Sub, cls.classes.Subparent sess = create_session() p1 = Subparent( data='p1', children=[Sub(data='s1'), Sub(data='s2'), Sub(data='s3')]) p2 = Subparent( data='p2', children=[Sub(data='s4'), Sub(data='s5')]) sess.add(p1) sess.add(p2) sess.flush() def test_joinedload(self): Subparent = self.classes.Subparent sess = create_session() def go(): eq_(sess.query(Subparent) .options(joinedload(Subparent.children)).all(), [p1, p2]) self.assert_sql_count(testing.db, go, 1) sess.expunge_all() def go(): eq_(sess.query(Subparent) .options(joinedload("children")).all(), [p1, p2]) self.assert_sql_count(testing.db, go, 1) def test_contains_eager(self): Subparent = self.classes.Subparent sess = create_session() def go(): eq_(sess.query(Subparent) .join(Subparent.children) .options(contains_eager(Subparent.children)).all(), [p1, p2]) self.assert_sql_count(testing.db, go, 1) sess.expunge_all() def go(): eq_(sess.query(Subparent) .join(Subparent.children) .options(contains_eager("children")).all(), [p1, p2]) self.assert_sql_count(testing.db, go, 1) def test_subqueryload(self): Subparent = self.classes.Subparent sess = create_session() def go(): eq_(sess.query(Subparent) .options(subqueryload(Subparent.children)).all(), [p1, p2]) self.assert_sql_count(testing.db, go, 2) sess.expunge_all() def go(): eq_(sess.query(Subparent) .options(subqueryload("children")).all(), [p1, p2]) self.assert_sql_count(testing.db, go, 2) class SameNamedPropTwoPolymorphicSubClassesTest(fixtures.MappedTest): """test pathing when two subclasses contain a different property for the same name, and polymorphic loading is used. #2614 """ run_setup_classes = 'once' run_setup_mappers = 'once' run_inserts = 'once' run_deletes = None @classmethod def define_tables(cls, metadata): Table('a', metadata, Column('id', Integer, primary_key=True, test_needs_autoincrement=True), Column('type', String(10)) ) Table('b', metadata, Column('id', Integer, ForeignKey('a.id'), primary_key=True) ) Table('btod', metadata, Column('bid', Integer, ForeignKey('b.id'), nullable=False), Column('did', Integer, ForeignKey('d.id'), nullable=False) ) Table('c', metadata, Column('id', Integer, ForeignKey('a.id'), primary_key=True) ) Table('ctod', metadata, Column('cid', Integer, ForeignKey('c.id'), nullable=False), Column('did', Integer, ForeignKey('d.id'), nullable=False) ) Table('d', metadata, Column('id', Integer, primary_key=True, test_needs_autoincrement=True) ) @classmethod def setup_classes(cls): class A(cls.Comparable): pass class B(A): pass class C(A): pass class D(cls.Comparable): pass @classmethod def setup_mappers(cls): A = cls.classes.A B = cls.classes.B C = cls.classes.C D = cls.classes.D mapper(A, cls.tables.a, polymorphic_on=cls.tables.a.c.type) mapper(B, cls.tables.b, inherits=A, polymorphic_identity='b', properties={ 'related': relationship(D, secondary=cls.tables.btod) }) mapper(C, cls.tables.c, inherits=A, polymorphic_identity='c', properties={ 'related': relationship(D, secondary=cls.tables.ctod) }) mapper(D, cls.tables.d) @classmethod def insert_data(cls): B = cls.classes.B C = cls.classes.C D = cls.classes.D session = Session() d = D() session.add_all([ B(related=[d]), C(related=[d]) ]) session.commit() def test_free_w_poly_subquery(self): A = self.classes.A B = self.classes.B C = self.classes.C D = self.classes.D session = Session() d = session.query(D).one() a_poly = with_polymorphic(A, [B, C]) def go(): for a in session.query(a_poly).\ options( subqueryload(a_poly.B.related), subqueryload(a_poly.C.related)): eq_(a.related, [d]) self.assert_sql_count(testing.db, go, 3) def test_fixed_w_poly_subquery(self): A = self.classes.A B = self.classes.B C = self.classes.C D = self.classes.D session = Session() d = session.query(D).one() def go(): for a in session.query(A).with_polymorphic([B, C]).\ options(subqueryload(B.related), subqueryload(C.related)): eq_(a.related, [d]) self.assert_sql_count(testing.db, go, 3) def test_free_w_poly_joined(self): A = self.classes.A B = self.classes.B C = self.classes.C D = self.classes.D session = Session() d = session.query(D).one() a_poly = with_polymorphic(A, [B, C]) def go(): for a in session.query(a_poly).\ options( joinedload(a_poly.B.related), joinedload(a_poly.C.related)): eq_(a.related, [d]) self.assert_sql_count(testing.db, go, 1) def test_fixed_w_poly_joined(self): A = self.classes.A B = self.classes.B C = self.classes.C D = self.classes.D session = Session() d = session.query(D).one() def go(): for a in session.query(A).with_polymorphic([B, C]).\ options(joinedload(B.related), joinedload(C.related)): eq_(a.related, [d]) self.assert_sql_count(testing.db, go, 1) class SubClassToSubClassFromParentTest(fixtures.MappedTest): """test #2617 """ run_setup_classes = 'once' run_setup_mappers = 'once' run_inserts = 'once' run_deletes = None @classmethod def define_tables(cls, metadata): Table('z', metadata, Column('id', Integer, primary_key=True, test_needs_autoincrement=True) ) Table('a', metadata, Column('id', Integer, primary_key=True, test_needs_autoincrement=True), Column('type', String(10)), Column('z_id', Integer, ForeignKey('z.id')) ) Table('b', metadata, Column('id', Integer, ForeignKey('a.id'), primary_key=True) ) Table('d', metadata, Column('id', Integer, ForeignKey('a.id'), primary_key=True), Column('b_id', Integer, ForeignKey('b.id')) ) @classmethod def setup_classes(cls): class Z(cls.Comparable): pass class A(cls.Comparable): pass class B(A): pass class D(A): pass @classmethod def setup_mappers(cls): Z = cls.classes.Z A = cls.classes.A B = cls.classes.B D = cls.classes.D mapper(Z, cls.tables.z) mapper(A, cls.tables.a, polymorphic_on=cls.tables.a.c.type, with_polymorphic='*', properties={ 'zs': relationship(Z, lazy="subquery") }) mapper(B, cls.tables.b, inherits=A, polymorphic_identity='b', properties={ 'related': relationship(D, lazy="subquery", primaryjoin=cls.tables.d.c.b_id == cls.tables.b.c.id) }) mapper(D, cls.tables.d, inherits=A, polymorphic_identity='d') @classmethod def insert_data(cls): B = cls.classes.B session = Session() session.add(B()) session.commit() def test_2617(self): A = self.classes.A session = Session() def go(): a1 = session.query(A).first() eq_(a1.related, []) self.assert_sql_count(testing.db, go, 3) class SubClassToSubClassMultiTest(AssertsCompiledSQL, fixtures.MappedTest): """ Two different joined-inh subclasses, led by a parent, with two distinct endpoints: parent -> subcl1 -> subcl2 -> (ep1, ep2) the join to ep2 indicates we need to join from the middle of the joinpoint, skipping ep1 """ run_create_tables = None run_deletes = None __dialect__ = 'default' @classmethod def define_tables(cls, metadata): Table('parent', metadata, Column('id', Integer, primary_key=True, test_needs_autoincrement=True), Column('data', String(30)) ) Table('base1', metadata, Column('id', Integer, primary_key=True, test_needs_autoincrement=True), Column('data', String(30)) ) Table('sub1', metadata, Column('id', Integer, ForeignKey('base1.id'), primary_key=True), Column('parent_id', ForeignKey('parent.id')), Column('subdata', String(30)) ) Table('base2', metadata, Column('id', Integer, primary_key=True, test_needs_autoincrement=True), Column('base1_id', ForeignKey('base1.id')), Column('data', String(30)) ) Table('sub2', metadata, Column('id', Integer, ForeignKey('base2.id'), primary_key=True), Column('subdata', String(30)) ) Table('ep1', metadata, Column('id', Integer, primary_key=True, test_needs_autoincrement=True), Column('base2_id', Integer, ForeignKey('base2.id')), Column('data', String(30)) ) Table('ep2', metadata, Column('id', Integer, primary_key=True, test_needs_autoincrement=True), Column('base2_id', Integer, ForeignKey('base2.id')), Column('data', String(30)) ) @classmethod def setup_classes(cls): class Parent(cls.Comparable): pass class Base1(cls.Comparable): pass class Sub1(Base1): pass class Base2(cls.Comparable): pass class Sub2(Base2): pass class EP1(cls.Comparable): pass class EP2(cls.Comparable): pass @classmethod def _classes(cls): return cls.classes.Parent, cls.classes.Base1,\ cls.classes.Base2, cls.classes.Sub1,\ cls.classes.Sub2, cls.classes.EP1,\ cls.classes.EP2 @classmethod def setup_mappers(cls): Parent, Base1, Base2, Sub1, Sub2, EP1, EP2 = cls._classes() mapper(Parent, cls.tables.parent, properties={ 'sub1': relationship(Sub1) }) mapper(Base1, cls.tables.base1, properties={ 'sub2': relationship(Sub2) }) mapper(Sub1, cls.tables.sub1, inherits=Base1) mapper(Base2, cls.tables.base2, properties={ 'ep1': relationship(EP1), 'ep2': relationship(EP2) }) mapper(Sub2, cls.tables.sub2, inherits=Base2) mapper(EP1, cls.tables.ep1) mapper(EP2, cls.tables.ep2) def test_one(self): Parent, Base1, Base2, Sub1, Sub2, EP1, EP2 = self._classes() s = Session() self.assert_compile( s.query(Parent).join(Parent.sub1, Sub1.sub2). join(Sub2.ep1). join(Sub2.ep2), "SELECT parent.id AS parent_id, parent.data AS parent_data " "FROM parent JOIN (SELECT base1.id AS base1_id, " "base1.data AS base1_data, sub1.id AS sub1_id, " "sub1.parent_id AS sub1_parent_id, sub1.subdata AS sub1_subdata " "FROM base1 JOIN sub1 ON base1.id = sub1.id) AS anon_1 " "ON parent.id = anon_1.sub1_parent_id JOIN " "(SELECT base2.id AS base2_id, base2.base1_id AS base2_base1_id, " "base2.data AS base2_data, sub2.id AS sub2_id, " "sub2.subdata AS sub2_subdata FROM base2 JOIN sub2 " "ON base2.id = sub2.id) AS anon_2 " "ON anon_1.base1_id = anon_2.base2_base1_id " "JOIN ep1 ON anon_2.base2_id = ep1.base2_id " "JOIN ep2 ON anon_2.base2_id = ep2.base2_id" ) def test_two(self): Parent, Base1, Base2, Sub1, Sub2, EP1, EP2 = self._classes() s2a = aliased(Sub2) s = Session() self.assert_compile( s.query(Parent).join(Parent.sub1). join(s2a, Sub1.sub2), "SELECT parent.id AS parent_id, parent.data AS parent_data " "FROM parent JOIN (SELECT base1.id AS base1_id, " "base1.data AS base1_data, sub1.id AS sub1_id, " "sub1.parent_id AS sub1_parent_id, sub1.subdata AS sub1_subdata " "FROM base1 JOIN sub1 ON base1.id = sub1.id) AS anon_1 " "ON parent.id = anon_1.sub1_parent_id JOIN " "(SELECT base2.id AS base2_id, base2.base1_id AS base2_base1_id, " "base2.data AS base2_data, sub2.id AS sub2_id, " "sub2.subdata AS sub2_subdata FROM base2 JOIN sub2 " "ON base2.id = sub2.id) AS anon_2 " "ON anon_1.base1_id = anon_2.base2_base1_id" ) def test_three(self): Parent, Base1, Base2, Sub1, Sub2, EP1, EP2 = self._classes() s = Session() self.assert_compile( s.query(Base1).join(Base1.sub2). join(Sub2.ep1).\ join(Sub2.ep2), "SELECT base1.id AS base1_id, base1.data AS base1_data " "FROM base1 JOIN (SELECT base2.id AS base2_id, base2.base1_id " "AS base2_base1_id, base2.data AS base2_data, sub2.id AS sub2_id, " "sub2.subdata AS sub2_subdata FROM base2 JOIN sub2 " "ON base2.id = sub2.id) AS anon_1 ON base1.id = " "anon_1.base2_base1_id " "JOIN ep1 ON anon_1.base2_id = ep1.base2_id " "JOIN ep2 ON anon_1.base2_id = ep2.base2_id" ) def test_four(self): Parent, Base1, Base2, Sub1, Sub2, EP1, EP2 = self._classes() s = Session() self.assert_compile( s.query(Sub2).join(Base1, Base1.id == Sub2.base1_id). join(Sub2.ep1).\ join(Sub2.ep2), "SELECT sub2.id AS sub2_id, base2.id AS base2_id, " "base2.base1_id AS base2_base1_id, base2.data AS base2_data, " "sub2.subdata AS sub2_subdata " "FROM base2 JOIN sub2 ON base2.id = sub2.id " "JOIN base1 ON base1.id = base2.base1_id " "JOIN ep1 ON base2.id = ep1.base2_id " "JOIN ep2 ON base2.id = ep2.base2_id" ) def test_five(self): Parent, Base1, Base2, Sub1, Sub2, EP1, EP2 = self._classes() s = Session() self.assert_compile( s.query(Sub2).join(Sub1, Sub1.id == Sub2.base1_id). join(Sub2.ep1).\ join(Sub2.ep2), "SELECT sub2.id AS sub2_id, base2.id AS base2_id, " "base2.base1_id AS base2_base1_id, base2.data AS base2_data, " "sub2.subdata AS sub2_subdata " "FROM base2 JOIN sub2 ON base2.id = sub2.id " "JOIN " "(SELECT base1.id AS base1_id, base1.data AS base1_data, " "sub1.id AS sub1_id, sub1.parent_id AS sub1_parent_id, " "sub1.subdata AS sub1_subdata " "FROM base1 JOIN sub1 ON base1.id = sub1.id) AS anon_1 " "ON anon_1.sub1_id = base2.base1_id " "JOIN ep1 ON base2.id = ep1.base2_id " "JOIN ep2 ON base2.id = ep2.base2_id" ) def test_six(self): Parent, Base1, Base2, Sub1, Sub2, EP1, EP2 = self._classes() s = Session() self.assert_compile( s.query(Sub2).from_self().\ join(Sub2.ep1). join(Sub2.ep2), "SELECT anon_1.sub2_id AS anon_1_sub2_id, " "anon_1.base2_id AS anon_1_base2_id, " "anon_1.base2_base1_id AS anon_1_base2_base1_id, " "anon_1.base2_data AS anon_1_base2_data, " "anon_1.sub2_subdata AS anon_1_sub2_subdata " "FROM (SELECT sub2.id AS sub2_id, base2.id AS base2_id, " "base2.base1_id AS base2_base1_id, base2.data AS base2_data, " "sub2.subdata AS sub2_subdata " "FROM base2 JOIN sub2 ON base2.id = sub2.id) AS anon_1 " "JOIN ep1 ON anon_1.base2_id = ep1.base2_id " "JOIN ep2 ON anon_1.base2_id = ep2.base2_id" ) def test_seven(self): Parent, Base1, Base2, Sub1, Sub2, EP1, EP2 = self._classes() s = Session() self.assert_compile( # adding Sub2 to the entities list helps it, # otherwise the joins for Sub2.ep1/ep2 don't have columns # to latch onto. Can't really make it better than this s.query(Parent, Sub2).join(Parent.sub1).\ join(Sub1.sub2).from_self().\ join(Sub2.ep1). join(Sub2.ep2), "SELECT anon_1.parent_id AS anon_1_parent_id, " "anon_1.parent_data AS anon_1_parent_data, " "anon_1.anon_2_sub2_id AS anon_1_anon_2_sub2_id, " "anon_1.anon_2_base2_id AS anon_1_anon_2_base2_id, " "anon_1.anon_2_base2_base1_id AS anon_1_anon_2_base2_base1_id, " "anon_1.anon_2_base2_data AS anon_1_anon_2_base2_data, " "anon_1.anon_2_sub2_subdata AS anon_1_anon_2_sub2_subdata " "FROM (SELECT parent.id AS parent_id, parent.data AS parent_data, " "anon_2.sub2_id AS anon_2_sub2_id, " "anon_2.base2_id AS anon_2_base2_id, " "anon_2.base2_base1_id AS anon_2_base2_base1_id, " "anon_2.base2_data AS anon_2_base2_data, " "anon_2.sub2_subdata AS anon_2_sub2_subdata " "FROM parent JOIN (SELECT base1.id AS base1_id, " "base1.data AS base1_data, sub1.id AS sub1_id, " "sub1.parent_id AS sub1_parent_id, sub1.subdata AS sub1_subdata " "FROM base1 JOIN sub1 ON base1.id = sub1.id) AS anon_3 " "ON parent.id = anon_3.sub1_parent_id JOIN " "(SELECT base2.id AS base2_id, base2.base1_id AS base2_base1_id, " "base2.data AS base2_data, sub2.id AS sub2_id, " "sub2.subdata AS sub2_subdata " "FROM base2 JOIN sub2 ON base2.id = sub2.id) AS anon_2 " "ON anon_3.base1_id = anon_2.base2_base1_id) AS anon_1 " "JOIN ep1 ON anon_1.anon_2_base2_id = ep1.base2_id " "JOIN ep2 ON anon_1.anon_2_base2_id = ep2.base2_id" ) class MultipleAdaptUsesEntityOverTableTest(AssertsCompiledSQL, fixtures.MappedTest): __dialect__ = 'default' run_create_tables = None @classmethod def define_tables(cls, metadata): Table('a', metadata, Column('id', Integer, primary_key=True), Column('name', String) ) Table('b', metadata, Column('id', Integer, ForeignKey('a.id'), primary_key=True) ) Table('c', metadata, Column('id', Integer, ForeignKey('a.id'), primary_key=True), Column('bid', Integer, ForeignKey('b.id')) ) Table('d', metadata, Column('id', Integer, ForeignKey('a.id'), primary_key=True), Column('cid', Integer, ForeignKey('c.id')) ) @classmethod def setup_classes(cls): class A(cls.Comparable): pass class B(A): pass class C(A): pass class D(A): pass @classmethod def setup_mappers(cls): A, B, C, D = cls.classes.A, cls.classes.B, cls.classes.C, cls.classes.D a, b, c, d = cls.tables.a, cls.tables.b, cls.tables.c, cls.tables.d mapper(A, a) mapper(B, b, inherits=A) mapper(C, c, inherits=A) mapper(D, d, inherits=A) def _two_join_fixture(self): A, B, C, D = self.classes.A, self.classes.B, self.classes.C, self.classes.D s = Session() return s.query(B.name, C.name, D.name).select_from(B).\ join(C, C.bid == B.id).\ join(D, D.cid == C.id) def test_two_joins_adaption(self): a, b, c, d = self.tables.a, self.tables.b, self.tables.c, self.tables.d q = self._two_join_fixture() btoc = q._from_obj[0].left ac_adapted = btoc.right ctod = q._from_obj[0].right bname, cname, dname = q._entities b_name_adapted = bname._resolve_expr_against_query_aliases( q, bname.column, None) c_name_adapted = cname._resolve_expr_against_query_aliases( q, cname.column, None) d_name_adapted = dname._resolve_expr_against_query_aliases( q, dname.column, None) assert bool(b_name_adapted == a.c.name) assert bool(c_name_adapted == ac_adapted.corresponding_column(a.c.name)) assert bool(d_name_adapted == ctod.corresponding_column(a.c.name)) def test_two_joins_sql(self): q = self._two_join_fixture() self.assert_compile(q, "SELECT a.name AS a_name, anon_1.a_name AS anon_1_a_name, " "anon_2.a_name AS anon_2_a_name FROM a JOIN b ON a.id = b.id " "JOIN (SELECT a.id AS a_id, a.name AS a_name, c.id AS c_id, " "c.bid AS c_bid FROM a JOIN c ON a.id = c.id) AS anon_1 ON " "anon_1.c_bid = b.id JOIN (SELECT a.id AS a_id, a.name AS a_name, " "d.id AS d_id, d.cid AS d_cid FROM a JOIN d ON a.id = d.id) " "AS anon_2 ON anon_2.d_cid = anon_1.c_id" ) SQLAlchemy-0.8.4/test/orm/inheritance/test_selects.py0000644000076500000240000000343412251147172023357 0ustar classicstaff00000000000000from sqlalchemy import * from sqlalchemy.orm import * from sqlalchemy import testing from sqlalchemy.testing import fixtures class InheritingSelectablesTest(fixtures.MappedTest): @classmethod def define_tables(cls, metadata): global foo, bar, baz foo = Table('foo', metadata, Column('a', String(30), primary_key=1), Column('b', String(30), nullable=0)) bar = foo.select(foo.c.b == 'bar').alias('bar') baz = foo.select(foo.c.b == 'baz').alias('baz') def test_load(self): # TODO: add persistence test also testing.db.execute(foo.insert(), a='not bar', b='baz') testing.db.execute(foo.insert(), a='also not bar', b='baz') testing.db.execute(foo.insert(), a='i am bar', b='bar') testing.db.execute(foo.insert(), a='also bar', b='bar') class Foo(fixtures.ComparableEntity): pass class Bar(Foo): pass class Baz(Foo): pass mapper(Foo, foo, polymorphic_on=foo.c.b) mapper(Baz, baz, with_polymorphic=('*', foo.join(baz, foo.c.b=='baz').alias('baz')), inherits=Foo, inherit_condition=(foo.c.a==baz.c.a), inherit_foreign_keys=[baz.c.a], polymorphic_identity='baz') mapper(Bar, bar, with_polymorphic=('*', foo.join(bar, foo.c.b=='bar').alias('bar')), inherits=Foo, inherit_condition=(foo.c.a==bar.c.a), inherit_foreign_keys=[bar.c.a], polymorphic_identity='bar') s = sessionmaker(bind=testing.db)() assert [Baz(), Baz(), Bar(), Bar()] == s.query(Foo).order_by(Foo.b.desc()).all() assert [Bar(), Bar()] == s.query(Bar).all() SQLAlchemy-0.8.4/test/orm/inheritance/test_single.py0000644000076500000240000005515212251150015023170 0ustar classicstaff00000000000000from sqlalchemy.testing import eq_ from sqlalchemy import * from sqlalchemy.orm import * from sqlalchemy import testing from test.orm import _fixtures from sqlalchemy.testing import fixtures from sqlalchemy.testing.schema import Table, Column class SingleInheritanceTest(testing.AssertsCompiledSQL, fixtures.MappedTest): @classmethod def define_tables(cls, metadata): Table('employees', metadata, Column('employee_id', Integer, primary_key=True, test_needs_autoincrement=True), Column('name', String(50)), Column('manager_data', String(50)), Column('engineer_info', String(50)), Column('type', String(20))) Table('reports', metadata, Column('report_id', Integer, primary_key=True, test_needs_autoincrement=True), Column('employee_id', ForeignKey('employees.employee_id')), Column('name', String(50)), ) @classmethod def setup_classes(cls): global Employee, Manager, Engineer, JuniorEngineer class Employee(cls.Comparable): pass class Manager(Employee): pass class Engineer(Employee): pass class JuniorEngineer(Engineer): pass @classmethod def setup_mappers(cls): Employee, Manager, JuniorEngineer, employees, Engineer = (cls.classes.Employee, cls.classes.Manager, cls.classes.JuniorEngineer, cls.tables.employees, cls.classes.Engineer) mapper(Employee, employees, polymorphic_on=employees.c.type) mapper(Manager, inherits=Employee, polymorphic_identity='manager') mapper(Engineer, inherits=Employee, polymorphic_identity='engineer') mapper(JuniorEngineer, inherits=Engineer, polymorphic_identity='juniorengineer') def test_single_inheritance(self): Employee, JuniorEngineer, Manager, Engineer = (self.classes.Employee, self.classes.JuniorEngineer, self.classes.Manager, self.classes.Engineer) session = create_session() m1 = Manager(name='Tom', manager_data='knows how to manage things') e1 = Engineer(name='Kurt', engineer_info='knows how to hack') e2 = JuniorEngineer(name='Ed', engineer_info='oh that ed') session.add_all([m1, e1, e2]) session.flush() assert session.query(Employee).all() == [m1, e1, e2] assert session.query(Engineer).all() == [e1, e2] assert session.query(Manager).all() == [m1] assert session.query(JuniorEngineer).all() == [e2] m1 = session.query(Manager).one() session.expire(m1, ['manager_data']) eq_(m1.manager_data, "knows how to manage things") row = session.query(Engineer.name, Engineer.employee_id).filter(Engineer.name=='Kurt').first() assert row.name == 'Kurt' assert row.employee_id == e1.employee_id def test_multi_qualification(self): JuniorEngineer, Manager, Engineer = (self.classes.JuniorEngineer, self.classes.Manager, self.classes.Engineer) session = create_session() m1 = Manager(name='Tom', manager_data='knows how to manage things') e1 = Engineer(name='Kurt', engineer_info='knows how to hack') e2 = JuniorEngineer(name='Ed', engineer_info='oh that ed') session.add_all([m1, e1, e2]) session.flush() ealias = aliased(Engineer) eq_( session.query(Manager, ealias).all(), [(m1, e1), (m1, e2)] ) eq_( session.query(Manager.name).all(), [("Tom",)] ) eq_( session.query(Manager.name, ealias.name).all(), [("Tom", "Kurt"), ("Tom", "Ed")] ) eq_( session.query(func.upper(Manager.name), func.upper(ealias.name)).all(), [("TOM", "KURT"), ("TOM", "ED")] ) eq_( session.query(Manager).add_entity(ealias).all(), [(m1, e1), (m1, e2)] ) eq_( session.query(Manager.name).add_column(ealias.name).all(), [("Tom", "Kurt"), ("Tom", "Ed")] ) # TODO: I think raise error on this for now # self.assertEquals( # session.query(Employee.name, Manager.manager_data, Engineer.engineer_info).all(), # [] # ) def test_from_self(self): Engineer = self.classes.Engineer sess = create_session() self.assert_compile(sess.query(Engineer).from_self(), 'SELECT anon_1.employees_employee_id AS ' 'anon_1_employees_employee_id, ' 'anon_1.employees_name AS ' 'anon_1_employees_name, ' 'anon_1.employees_manager_data AS ' 'anon_1_employees_manager_data, ' 'anon_1.employees_engineer_info AS ' 'anon_1_employees_engineer_info, ' 'anon_1.employees_type AS ' 'anon_1_employees_type FROM (SELECT ' 'employees.employee_id AS ' 'employees_employee_id, employees.name AS ' 'employees_name, employees.manager_data AS ' 'employees_manager_data, ' 'employees.engineer_info AS ' 'employees_engineer_info, employees.type ' 'AS employees_type FROM employees) AS ' 'anon_1 WHERE anon_1.employees_type IN ' '(:type_1, :type_2)', use_default_dialect=True) def test_select_from(self): Manager, JuniorEngineer, employees, Engineer = (self.classes.Manager, self.classes.JuniorEngineer, self.tables.employees, self.classes.Engineer) sess = create_session() m1 = Manager(name='Tom', manager_data='data1') m2 = Manager(name='Tom2', manager_data='data2') e1 = Engineer(name='Kurt', engineer_info='knows how to hack') e2 = JuniorEngineer(name='Ed', engineer_info='oh that ed') sess.add_all([m1, m2, e1, e2]) sess.flush() eq_( sess.query(Manager).select_from(employees.select().limit(10)).all(), [m1, m2] ) def test_count(self): Employee, JuniorEngineer, Manager, Engineer = (self.classes.Employee, self.classes.JuniorEngineer, self.classes.Manager, self.classes.Engineer) sess = create_session() m1 = Manager(name='Tom', manager_data='data1') m2 = Manager(name='Tom2', manager_data='data2') e1 = Engineer(name='Kurt', engineer_info='data3') e2 = JuniorEngineer(name='marvin', engineer_info='data4') sess.add_all([m1, m2, e1, e2]) sess.flush() eq_(sess.query(Manager).count(), 2) eq_(sess.query(Engineer).count(), 2) eq_(sess.query(Employee).count(), 4) eq_(sess.query(Manager).filter(Manager.name.like('%m%')).count(), 2) eq_(sess.query(Employee).filter(Employee.name.like('%m%')).count(), 3) def test_type_filtering(self): Employee, Manager, reports, Engineer = (self.classes.Employee, self.classes.Manager, self.tables.reports, self.classes.Engineer) class Report(fixtures.ComparableEntity): pass mapper(Report, reports, properties={ 'employee': relationship(Employee, backref='reports')}) sess = create_session() m1 = Manager(name='Tom', manager_data='data1') r1 = Report(employee=m1) sess.add_all([m1, r1]) sess.flush() rq = sess.query(Report) assert len(rq.filter(Report.employee.of_type(Manager).has()).all()) == 1 assert len(rq.filter(Report.employee.of_type(Engineer).has()).all()) == 0 def test_type_joins(self): Employee, Manager, reports, Engineer = (self.classes.Employee, self.classes.Manager, self.tables.reports, self.classes.Engineer) class Report(fixtures.ComparableEntity): pass mapper(Report, reports, properties={ 'employee': relationship(Employee, backref='reports')}) sess = create_session() m1 = Manager(name='Tom', manager_data='data1') r1 = Report(employee=m1) sess.add_all([m1, r1]) sess.flush() rq = sess.query(Report) assert len(rq.join(Report.employee.of_type(Manager)).all()) == 1 assert len(rq.join(Report.employee.of_type(Engineer)).all()) == 0 class RelationshipFromSingleTest(testing.AssertsCompiledSQL, fixtures.MappedTest): @classmethod def define_tables(cls, metadata): Table('employee', metadata, Column('id', Integer, primary_key=True, test_needs_autoincrement=True), Column('name', String(50)), Column('type', String(20)), ) Table('employee_stuff', metadata, Column('id', Integer, primary_key=True, test_needs_autoincrement=True), Column('employee_id', Integer, ForeignKey('employee.id')), Column('name', String(50)), ) @classmethod def setup_classes(cls): class Employee(cls.Comparable): pass class Manager(Employee): pass class Stuff(cls.Comparable): pass def test_subquery_load(self): employee, employee_stuff, Employee, Stuff, Manager = (self.tables.employee, self.tables.employee_stuff, self.classes.Employee, self.classes.Stuff, self.classes.Manager) mapper(Employee, employee, polymorphic_on=employee.c.type, polymorphic_identity='employee') mapper(Manager, inherits=Employee, polymorphic_identity='manager', properties={ 'stuff':relationship(Stuff) }) mapper(Stuff, employee_stuff) sess = create_session() context = sess.query(Manager).options(subqueryload('stuff'))._compile_context() subq = context.attributes[('subquery', (class_mapper(Manager), class_mapper(Manager).attrs.stuff))] self.assert_compile(subq, 'SELECT employee_stuff.id AS ' 'employee_stuff_id, employee_stuff.employee' '_id AS employee_stuff_employee_id, ' 'employee_stuff.name AS ' 'employee_stuff_name, anon_1.employee_id ' 'AS anon_1_employee_id FROM (SELECT ' 'employee.id AS employee_id FROM employee ' 'WHERE employee.type IN (:type_1)) AS anon_1 ' 'JOIN employee_stuff ON anon_1.employee_id ' '= employee_stuff.employee_id ORDER BY ' 'anon_1.employee_id', use_default_dialect=True ) class RelationshipToSingleTest(testing.AssertsCompiledSQL, fixtures.MappedTest): __dialect__ = 'default' @classmethod def define_tables(cls, metadata): Table('employees', metadata, Column('employee_id', Integer, primary_key=True, test_needs_autoincrement=True), Column('name', String(50)), Column('manager_data', String(50)), Column('engineer_info', String(50)), Column('type', String(20)), Column('company_id', Integer, ForeignKey('companies.company_id')) ) Table('companies', metadata, Column('company_id', Integer, primary_key=True, test_needs_autoincrement=True), Column('name', String(50)), ) @classmethod def setup_classes(cls): class Company(cls.Comparable): pass class Employee(cls.Comparable): pass class Manager(Employee): pass class Engineer(Employee): pass class JuniorEngineer(Engineer): pass def test_of_type(self): JuniorEngineer, Company, companies, Manager,\ Employee, employees, Engineer = (self.classes.JuniorEngineer, self.classes.Company, self.tables.companies, self.classes.Manager, self.classes.Employee, self.tables.employees, self.classes.Engineer) mapper(Company, companies, properties={ 'employees':relationship(Employee, backref='company') }) mapper(Employee, employees, polymorphic_on=employees.c.type) mapper(Manager, inherits=Employee, polymorphic_identity='manager') mapper(Engineer, inherits=Employee, polymorphic_identity='engineer') mapper(JuniorEngineer, inherits=Engineer, polymorphic_identity='juniorengineer') sess = sessionmaker()() c1 = Company(name='c1') c2 = Company(name='c2') m1 = Manager(name='Tom', manager_data='data1', company=c1) m2 = Manager(name='Tom2', manager_data='data2', company=c2) e1 = Engineer(name='Kurt', engineer_info='knows how to hack', company=c2) e2 = JuniorEngineer(name='Ed', engineer_info='oh that ed', company=c1) sess.add_all([c1, c2, m1, m2, e1, e2]) sess.commit() sess.expunge_all() eq_( sess.query(Company).filter(Company.employees.of_type(JuniorEngineer).any()).all(), [ Company(name='c1'), ] ) eq_( sess.query(Company).join(Company.employees.of_type(JuniorEngineer)).all(), [ Company(name='c1'), ] ) def test_outer_join(self): Company, Employee, Engineer = self.classes.Company,\ self.classes.Employee,\ self.classes.Engineer companies, employees = self.tables.companies, self.tables.employees mapper(Company, companies, properties={ 'engineers':relationship(Engineer) }) mapper(Employee, employees, polymorphic_on=employees.c.type) mapper(Engineer, inherits=Employee, polymorphic_identity='engineer') sess = create_session() self.assert_compile( sess.query(Company, Engineer.name).outerjoin("engineers"), "SELECT companies.company_id AS companies_company_id, " "companies.name AS companies_name, employees.name AS employees_name " "FROM companies LEFT OUTER JOIN employees ON companies.company_id " "= employees.company_id AND employees.type IN (:type_1)" ) def test_outer_join_alias(self): Company, Employee, Engineer = self.classes.Company,\ self.classes.Employee,\ self.classes.Engineer companies, employees = self.tables.companies, self.tables.employees mapper(Company, companies, properties={ 'engineers':relationship(Engineer) }) mapper(Employee, employees, polymorphic_on=employees.c.type) mapper(Engineer, inherits=Employee, polymorphic_identity='engineer') eng_alias = aliased(Engineer) sess = create_session() self.assert_compile( sess.query(Company, eng_alias.name).outerjoin(eng_alias, Company.engineers), "SELECT companies.company_id AS companies_company_id, " "companies.name AS companies_name, employees_1.name AS " "employees_1_name FROM companies LEFT OUTER " "JOIN employees AS employees_1 ON companies.company_id " "= employees_1.company_id AND employees_1.type IN (:type_1)" ) def test_relationship_to_subclass(self): JuniorEngineer, Company, companies, Manager, Employee, employees, Engineer = (self.classes.JuniorEngineer, self.classes.Company, self.tables.companies, self.classes.Manager, self.classes.Employee, self.tables.employees, self.classes.Engineer) mapper(Company, companies, properties={ 'engineers':relationship(Engineer) }) mapper(Employee, employees, polymorphic_on=employees.c.type, properties={ 'company':relationship(Company) }) mapper(Manager, inherits=Employee, polymorphic_identity='manager') mapper(Engineer, inherits=Employee, polymorphic_identity='engineer') mapper(JuniorEngineer, inherits=Engineer, polymorphic_identity='juniorengineer') sess = sessionmaker()() c1 = Company(name='c1') c2 = Company(name='c2') m1 = Manager(name='Tom', manager_data='data1', company=c1) m2 = Manager(name='Tom2', manager_data='data2', company=c2) e1 = Engineer(name='Kurt', engineer_info='knows how to hack', company=c2) e2 = JuniorEngineer(name='Ed', engineer_info='oh that ed', company=c1) sess.add_all([c1, c2, m1, m2, e1, e2]) sess.commit() eq_(c1.engineers, [e2]) eq_(c2.engineers, [e1]) sess.expunge_all() eq_(sess.query(Company).order_by(Company.name).all(), [ Company(name='c1', engineers=[JuniorEngineer(name='Ed')]), Company(name='c2', engineers=[Engineer(name='Kurt')]) ] ) # eager load join should limit to only "Engineer" sess.expunge_all() eq_(sess.query(Company).options(joinedload('engineers')).order_by(Company.name).all(), [ Company(name='c1', engineers=[JuniorEngineer(name='Ed')]), Company(name='c2', engineers=[Engineer(name='Kurt')]) ] ) # join() to Company.engineers, Employee as the requested entity sess.expunge_all() eq_(sess.query(Company, Employee).join(Company.engineers).order_by(Company.name).all(), [ (Company(name='c1'), JuniorEngineer(name='Ed')), (Company(name='c2'), Engineer(name='Kurt')) ] ) # join() to Company.engineers, Engineer as the requested entity. # this actually applies the IN criterion twice which is less than ideal. sess.expunge_all() eq_(sess.query(Company, Engineer).join(Company.engineers).order_by(Company.name).all(), [ (Company(name='c1'), JuniorEngineer(name='Ed')), (Company(name='c2'), Engineer(name='Kurt')) ] ) # join() to Company.engineers without any Employee/Engineer entity sess.expunge_all() eq_(sess.query(Company).join(Company.engineers).filter(Engineer.name.in_(['Tom', 'Kurt'])).all(), [ Company(name='c2') ] ) # this however fails as it does not limit the subtypes to just "Engineer". # with joins constructed by filter(), we seem to be following a policy where # we don't try to make decisions on how to join to the target class, whereas when using join() we # seem to have a lot more capabilities. # we might want to document "advantages of join() vs. straight filtering", or add a large # section to "inheritance" laying out all the various behaviors Query has. @testing.fails_on_everything_except() def go(): sess.expunge_all() eq_(sess.query(Company).\ filter(Company.company_id==Engineer.company_id).filter(Engineer.name.in_(['Tom', 'Kurt'])).all(), [ Company(name='c2') ] ) go() class SingleOnJoinedTest(fixtures.MappedTest): @classmethod def define_tables(cls, metadata): global persons_table, employees_table persons_table = Table('persons', metadata, Column('person_id', Integer, primary_key=True, test_needs_autoincrement=True), Column('name', String(50)), Column('type', String(20), nullable=False) ) employees_table = Table('employees', metadata, Column('person_id', Integer, ForeignKey('persons.person_id'),primary_key=True), Column('employee_data', String(50)), Column('manager_data', String(50)), ) def test_single_on_joined(self): class Person(fixtures.ComparableEntity): pass class Employee(Person): pass class Manager(Employee): pass mapper(Person, persons_table, polymorphic_on=persons_table.c.type, polymorphic_identity='person') mapper(Employee, employees_table, inherits=Person,polymorphic_identity='engineer') mapper(Manager, inherits=Employee,polymorphic_identity='manager') sess = create_session() sess.add(Person(name='p1')) sess.add(Employee(name='e1', employee_data='ed1')) sess.add(Manager(name='m1', employee_data='ed2', manager_data='md1')) sess.flush() sess.expunge_all() eq_(sess.query(Person).order_by(Person.person_id).all(), [ Person(name='p1'), Employee(name='e1', employee_data='ed1'), Manager(name='m1', employee_data='ed2', manager_data='md1') ]) sess.expunge_all() eq_(sess.query(Employee).order_by(Person.person_id).all(), [ Employee(name='e1', employee_data='ed1'), Manager(name='m1', employee_data='ed2', manager_data='md1') ]) sess.expunge_all() eq_(sess.query(Manager).order_by(Person.person_id).all(), [ Manager(name='m1', employee_data='ed2', manager_data='md1') ]) sess.expunge_all() def go(): eq_(sess.query(Person).with_polymorphic('*').order_by(Person.person_id).all(), [ Person(name='p1'), Employee(name='e1', employee_data='ed1'), Manager(name='m1', employee_data='ed2', manager_data='md1') ]) self.assert_sql_count(testing.db, go, 1) SQLAlchemy-0.8.4/test/orm/inheritance/test_with_poly.py0000644000076500000240000001052212251150015023715 0ustar classicstaff00000000000000from sqlalchemy import Integer, String, ForeignKey, func, desc, and_, or_ from sqlalchemy.orm import interfaces, relationship, mapper, \ clear_mappers, create_session, joinedload, joinedload_all, \ subqueryload, subqueryload_all, polymorphic_union, aliased,\ class_mapper, with_polymorphic from sqlalchemy import exc as sa_exc from sqlalchemy.engine import default from sqlalchemy.testing import AssertsCompiledSQL, fixtures from sqlalchemy import testing from sqlalchemy.testing.schema import Table, Column from sqlalchemy.testing import assert_raises, eq_ from _poly_fixtures import Company, Person, Engineer, Manager, Boss, \ Machine, Paperwork, _PolymorphicFixtureBase, _Polymorphic,\ _PolymorphicPolymorphic, _PolymorphicUnions, _PolymorphicJoins,\ _PolymorphicAliasedJoins class _WithPolymorphicBase(_PolymorphicFixtureBase): def test_join_base_to_sub(self): sess = create_session() pa = with_polymorphic(Person, [Engineer]) def go(): eq_(sess.query(pa) .filter(pa.Engineer.primary_language == 'java').all(), self._emps_wo_relationships_fixture()[0:1]) self.assert_sql_count(testing.db, go, 1) def test_col_expression_base_plus_two_subs(self): sess = create_session() pa = with_polymorphic(Person, [Engineer, Manager]) eq_( sess.query(pa.name, pa.Engineer.primary_language, pa.Manager.manager_name).\ filter(or_(pa.Engineer.primary_language=='java', pa.Manager.manager_name=='dogbert')).\ order_by(pa.Engineer.type).all(), [ (u'dilbert', u'java', None), (u'dogbert', None, u'dogbert'), ] ) def test_join_to_join_entities(self): sess = create_session() pa = with_polymorphic(Person, [Engineer]) pa_alias = with_polymorphic(Person, [Engineer], aliased=True) eq_( [(p1.name, type(p1), p2.name, type(p2)) for (p1, p2) in sess.query( pa, pa_alias ).join(pa_alias, or_( pa.Engineer.primary_language==\ pa_alias.Engineer.primary_language, and_( pa.Engineer.primary_language == None, pa_alias.Engineer.primary_language == None, pa.person_id > pa_alias.person_id ) ) ).order_by(pa.name, pa_alias.name)], [ (u'dilbert', Engineer, u'dilbert', Engineer), (u'dogbert', Manager, u'pointy haired boss', Boss), (u'vlad', Engineer, u'vlad', Engineer), (u'wally', Engineer, u'wally', Engineer) ] ) def test_join_to_join_columns(self): sess = create_session() pa = with_polymorphic(Person, [Engineer]) pa_alias = with_polymorphic(Person, [Engineer], aliased=True) eq_( [row for row in sess.query( pa.name, pa.Engineer.primary_language, pa_alias.name, pa_alias.Engineer.primary_language ).join(pa_alias, or_( pa.Engineer.primary_language==\ pa_alias.Engineer.primary_language, and_( pa.Engineer.primary_language == None, pa_alias.Engineer.primary_language == None, pa.person_id > pa_alias.person_id ) ) ).order_by(pa.name, pa_alias.name)], [ (u'dilbert', u'java', u'dilbert', u'java'), (u'dogbert', None, u'pointy haired boss', None), (u'vlad', u'cobol', u'vlad', u'cobol'), (u'wally', u'c++', u'wally', u'c++') ] ) class PolymorphicTest(_WithPolymorphicBase, _Polymorphic): pass class PolymorphicPolymorphicTest(_WithPolymorphicBase, _PolymorphicPolymorphic): pass class PolymorphicUnionsTest(_WithPolymorphicBase, _PolymorphicUnions): pass class PolymorphicAliasedJoinsTest(_WithPolymorphicBase, _PolymorphicAliasedJoins): pass class PolymorphicJoinsTest(_WithPolymorphicBase, _PolymorphicJoins): pass SQLAlchemy-0.8.4/test/orm/test_association.py0000644000076500000240000001427512251147172021745 0ustar classicstaff00000000000000 from sqlalchemy import testing from sqlalchemy import Integer, String, ForeignKey from sqlalchemy.testing.schema import Table, Column from sqlalchemy.orm import mapper, relationship, create_session from sqlalchemy.testing import fixtures from sqlalchemy.testing import eq_ class AssociationTest(fixtures.MappedTest): run_setup_classes = 'once' run_setup_mappers = 'once' @classmethod def define_tables(cls, metadata): Table('items', metadata, Column('item_id', Integer, primary_key=True, test_needs_autoincrement=True), Column('name', String(40))) Table('item_keywords', metadata, Column('item_id', Integer, ForeignKey('items.item_id')), Column('keyword_id', Integer, ForeignKey('keywords.keyword_id')), Column('data', String(40))) Table('keywords', metadata, Column('keyword_id', Integer, primary_key=True, test_needs_autoincrement=True), Column('name', String(40))) @classmethod def setup_classes(cls): class Item(cls.Basic): def __init__(self, name): self.name = name def __repr__(self): return "Item id=%d name=%s keywordassoc=%r" % ( self.item_id, self.name, self.keywords) class Keyword(cls.Basic): def __init__(self, name): self.name = name def __repr__(self): return "Keyword id=%d name=%s" % (self.keyword_id, self.name) class KeywordAssociation(cls.Basic): def __init__(self, keyword, data): self.keyword = keyword self.data = data def __repr__(self): return "KeywordAssociation itemid=%d keyword=%r data=%s" % ( self.item_id, self.keyword, self.data) @classmethod def setup_mappers(cls): KeywordAssociation, Item, Keyword = (cls.classes.KeywordAssociation, cls.classes.Item, cls.classes.Keyword) items, item_keywords, keywords = cls.tables.get_all( 'items', 'item_keywords', 'keywords') mapper(Keyword, keywords) mapper(KeywordAssociation, item_keywords, properties={ 'keyword':relationship(Keyword, lazy='joined')}, primary_key=[item_keywords.c.item_id, item_keywords.c.keyword_id], order_by=[item_keywords.c.data]) mapper(Item, items, properties={ 'keywords' : relationship(KeywordAssociation, cascade="all, delete-orphan") }) def test_insert(self): KeywordAssociation, Item, Keyword = (self.classes.KeywordAssociation, self.classes.Item, self.classes.Keyword) sess = create_session() item1 = Item('item1') item2 = Item('item2') item1.keywords.append(KeywordAssociation(Keyword('blue'), 'blue_assoc')) item1.keywords.append(KeywordAssociation(Keyword('red'), 'red_assoc')) item2.keywords.append(KeywordAssociation(Keyword('green'), 'green_assoc')) sess.add_all((item1, item2)) sess.flush() saved = repr([item1, item2]) sess.expunge_all() l = sess.query(Item).all() loaded = repr(l) eq_(saved, loaded) def test_replace(self): KeywordAssociation, Item, Keyword = (self.classes.KeywordAssociation, self.classes.Item, self.classes.Keyword) sess = create_session() item1 = Item('item1') item1.keywords.append(KeywordAssociation(Keyword('blue'), 'blue_assoc')) item1.keywords.append(KeywordAssociation(Keyword('red'), 'red_assoc')) sess.add(item1) sess.flush() red_keyword = item1.keywords[1].keyword del item1.keywords[1] item1.keywords.append(KeywordAssociation(red_keyword, 'new_red_assoc')) sess.flush() saved = repr([item1]) sess.expunge_all() l = sess.query(Item).all() loaded = repr(l) eq_(saved, loaded) def test_modify(self): KeywordAssociation, Item, Keyword = (self.classes.KeywordAssociation, self.classes.Item, self.classes.Keyword) sess = create_session() item1 = Item('item1') item2 = Item('item2') item1.keywords.append(KeywordAssociation(Keyword('blue'), 'blue_assoc')) item1.keywords.append(KeywordAssociation(Keyword('red'), 'red_assoc')) item2.keywords.append(KeywordAssociation(Keyword('green'), 'green_assoc')) sess.add_all((item1, item2)) sess.flush() red_keyword = item1.keywords[1].keyword del item1.keywords[0] del item1.keywords[0] purple_keyword = Keyword('purple') item1.keywords.append(KeywordAssociation(red_keyword, 'new_red_assoc')) item2.keywords.append(KeywordAssociation(purple_keyword, 'purple_item2_assoc')) item1.keywords.append(KeywordAssociation(purple_keyword, 'purple_item1_assoc')) item1.keywords.append(KeywordAssociation(Keyword('yellow'), 'yellow_assoc')) sess.flush() saved = repr([item1, item2]) sess.expunge_all() l = sess.query(Item).all() loaded = repr(l) eq_(saved, loaded) def test_delete(self): KeywordAssociation, Item, item_keywords, Keyword = (self.classes.KeywordAssociation, self.classes.Item, self.tables.item_keywords, self.classes.Keyword) sess = create_session() item1 = Item('item1') item2 = Item('item2') item1.keywords.append(KeywordAssociation(Keyword('blue'), 'blue_assoc')) item1.keywords.append(KeywordAssociation(Keyword('red'), 'red_assoc')) item2.keywords.append(KeywordAssociation(Keyword('green'), 'green_assoc')) sess.add_all((item1, item2)) sess.flush() eq_(item_keywords.count().scalar(), 3) sess.delete(item1) sess.delete(item2) sess.flush() eq_(item_keywords.count().scalar(), 0) SQLAlchemy-0.8.4/test/orm/test_assorted_eager.py0000644000076500000240000010057112251150015022401 0ustar classicstaff00000000000000"""Exercises for eager loading. Derived from mailing list-reported problems and trac tickets. These are generally very old 0.1-era tests and at some point should be cleaned up and modernized. """ import datetime import sqlalchemy as sa from sqlalchemy import testing from sqlalchemy import Integer, String, ForeignKey from sqlalchemy.testing.schema import Table, Column from sqlalchemy.orm import mapper, relationship, backref, create_session from sqlalchemy.testing import eq_ from sqlalchemy.testing import fixtures class EagerTest(fixtures.MappedTest): run_deletes = None run_inserts = "once" run_setup_mappers = "once" @classmethod def define_tables(cls, metadata): if testing.db.dialect.supports_native_boolean: false = 'false' else: false = "0" cls.other['false'] = false Table('owners', metadata , Column('id', Integer, primary_key=True, test_needs_autoincrement=True), Column('data', String(30))) Table('categories', metadata, Column('id', Integer, primary_key=True, test_needs_autoincrement=True), Column('name', String(20))) Table('tests', metadata , Column('id', Integer, primary_key=True, test_needs_autoincrement=True), Column('owner_id', Integer, ForeignKey('owners.id'), nullable=False), Column('category_id', Integer, ForeignKey('categories.id'), nullable=False)) Table('options', metadata , Column('test_id', Integer, ForeignKey('tests.id'), primary_key=True), Column('owner_id', Integer, ForeignKey('owners.id'), primary_key=True), Column('someoption', sa.Boolean, server_default=false, nullable=False)) @classmethod def setup_classes(cls): class Owner(cls.Basic): pass class Category(cls.Basic): pass class Thing(cls.Basic): pass class Option(cls.Basic): pass @classmethod def setup_mappers(cls): Category, owners, Option, tests, Thing, Owner, options, categories = (cls.classes.Category, cls.tables.owners, cls.classes.Option, cls.tables.tests, cls.classes.Thing, cls.classes.Owner, cls.tables.options, cls.tables.categories) mapper(Owner, owners) mapper(Category, categories) mapper(Option, options, properties=dict( owner=relationship(Owner), test=relationship(Thing))) mapper(Thing, tests, properties=dict( owner=relationship(Owner, backref='tests'), category=relationship(Category), owner_option=relationship(Option, primaryjoin=sa.and_(tests.c.id == options.c.test_id, tests.c.owner_id == options.c.owner_id), foreign_keys=[options.c.test_id, options.c.owner_id], uselist=False))) @classmethod def insert_data(cls): Owner, Category, Option, Thing = (cls.classes.Owner, cls.classes.Category, cls.classes.Option, cls.classes.Thing) session = create_session() o = Owner() c = Category(name='Some Category') session.add_all(( Thing(owner=o, category=c), Thing(owner=o, category=c, owner_option=Option(someoption=True)), Thing(owner=o, category=c, owner_option=Option()))) session.flush() def test_noorm(self): """test the control case""" tests, options, categories = (self.tables.tests, self.tables.options, self.tables.categories) # I want to display a list of tests owned by owner 1 # if someoption is false or they haven't specified it yet (null) # but not if they set it to true (example someoption is for hiding) # desired output for owner 1 # test_id, cat_name # 1 'Some Category' # 3 " # not orm style correct query print "Obtaining correct results without orm" result = sa.select( [tests.c.id,categories.c.name], sa.and_(tests.c.owner_id == 1, sa.or_(options.c.someoption==None, options.c.someoption==False)), order_by=[tests.c.id], from_obj=[tests.join(categories).outerjoin(options, sa.and_( tests.c.id == options.c.test_id, tests.c.owner_id == options.c.owner_id))] ).execute().fetchall() eq_(result, [(1, u'Some Category'), (3, u'Some Category')]) def test_withoutjoinedload(self): Thing, tests, options = (self.classes.Thing, self.tables.tests, self.tables.options) s = create_session() l = (s.query(Thing). select_from(tests.outerjoin(options, sa.and_(tests.c.id == options.c.test_id, tests.c.owner_id == options.c.owner_id))). filter(sa.and_(tests.c.owner_id==1, sa.or_(options.c.someoption==None, options.c.someoption==False)))) result = ["%d %s" % ( t.id,t.category.name ) for t in l] eq_(result, [u'1 Some Category', u'3 Some Category']) def test_withjoinedload(self): """ Test that an joinedload locates the correct "from" clause with which to attach to, when presented with a query that already has a complicated from clause. """ Thing, tests, options = (self.classes.Thing, self.tables.tests, self.tables.options) s = create_session() q=s.query(Thing).options(sa.orm.joinedload('category')) l=(q.select_from(tests.outerjoin(options, sa.and_(tests.c.id == options.c.test_id, tests.c.owner_id == options.c.owner_id))). filter(sa.and_(tests.c.owner_id == 1, sa.or_(options.c.someoption==None, options.c.someoption==False)))) result = ["%d %s" % ( t.id,t.category.name ) for t in l] eq_(result, [u'1 Some Category', u'3 Some Category']) def test_dslish(self): """test the same as withjoinedload except using generative""" Thing, tests, options = (self.classes.Thing, self.tables.tests, self.tables.options) s = create_session() q = s.query(Thing).options(sa.orm.joinedload('category')) l = q.filter ( sa.and_(tests.c.owner_id == 1, sa.or_(options.c.someoption == None, options.c.someoption == False)) ).outerjoin('owner_option') result = ["%d %s" % ( t.id,t.category.name ) for t in l] eq_(result, [u'1 Some Category', u'3 Some Category']) @testing.crashes('sybase', 'FIXME: unknown, verify not fails_on') def test_without_outerjoin_literal(self): Thing, tests, false = (self.classes.Thing, self.tables.tests, self.other.false) s = create_session() q = s.query(Thing).options(sa.orm.joinedload('category')) l = (q.filter( (tests.c.owner_id==1) & ('options.someoption is null or options.someoption=%s' % false)). join('owner_option')) result = ["%d %s" % ( t.id,t.category.name ) for t in l] eq_(result, [u'3 Some Category']) def test_withoutouterjoin(self): Thing, tests, options = (self.classes.Thing, self.tables.tests, self.tables.options) s = create_session() q = s.query(Thing).options(sa.orm.joinedload('category')) l = q.filter( (tests.c.owner_id==1) & ((options.c.someoption==None) | (options.c.someoption==False)) ).join('owner_option') result = ["%d %s" % ( t.id,t.category.name ) for t in l] eq_(result, [u'3 Some Category']) class EagerTest2(fixtures.MappedTest): @classmethod def define_tables(cls, metadata): Table('left', metadata, Column('id', Integer, ForeignKey('middle.id'), primary_key=True), Column('data', String(50), primary_key=True)) Table('middle', metadata, Column('id', Integer, primary_key = True, test_needs_autoincrement=True), Column('data', String(50))) Table('right', metadata, Column('id', Integer, ForeignKey('middle.id'), primary_key=True), Column('data', String(50), primary_key=True)) @classmethod def setup_classes(cls): class Left(cls.Basic): def __init__(self, data): self.data = data class Middle(cls.Basic): def __init__(self, data): self.data = data class Right(cls.Basic): def __init__(self, data): self.data = data @classmethod def setup_mappers(cls): Right, Middle, middle, right, left, Left = (cls.classes.Right, cls.classes.Middle, cls.tables.middle, cls.tables.right, cls.tables.left, cls.classes.Left) # set up bi-directional eager loads mapper(Left, left) mapper(Right, right) mapper(Middle, middle, properties=dict( left=relationship(Left, lazy='joined', backref=backref('middle',lazy='joined')), right=relationship(Right, lazy='joined', backref=backref('middle', lazy='joined')))), @testing.fails_on('maxdb', 'FIXME: unknown') def test_eager_terminate(self): """Eager query generation does not include the same mapper's table twice. Or, that bi-directional eager loads dont include each other in eager query generation. """ Middle, Right, Left = (self.classes.Middle, self.classes.Right, self.classes.Left) p = Middle('m1') p.left.append(Left('l1')) p.right.append(Right('r1')) session = create_session() session.add(p) session.flush() session.expunge_all() obj = session.query(Left).filter_by(data='l1').one() class EagerTest3(fixtures.MappedTest): """Eager loading combined with nested SELECT statements, functions, and aggregates.""" @classmethod def define_tables(cls, metadata): Table('datas', metadata, Column('id', Integer, primary_key=True, test_needs_autoincrement=True), Column('a', Integer, nullable=False)) Table('foo', metadata, Column('data_id', Integer, ForeignKey('datas.id'),primary_key=True), Column('bar', Integer)) Table('stats', metadata, Column('id', Integer, primary_key=True, test_needs_autoincrement=True), Column('data_id', Integer, ForeignKey('datas.id')), Column('somedata', Integer, nullable=False )) @classmethod def setup_classes(cls): class Data(cls.Basic): pass class Foo(cls.Basic): pass class Stat(cls.Basic): pass @testing.fails_on('maxdb', 'FIXME: unknown') def test_nesting_with_functions(self): Stat, Foo, stats, foo, Data, datas = (self.classes.Stat, self.classes.Foo, self.tables.stats, self.tables.foo, self.classes.Data, self.tables.datas) mapper(Data, datas) mapper(Foo, foo, properties={ 'data': relationship(Data,backref=backref('foo',uselist=False))}) mapper(Stat, stats, properties={ 'data':relationship(Data)}) session = create_session() data = [Data(a=x) for x in range(5)] session.add_all(data) session.add_all(( Stat(data=data[0], somedata=1), Stat(data=data[1], somedata=2), Stat(data=data[2], somedata=3), Stat(data=data[3], somedata=4), Stat(data=data[4], somedata=5), Stat(data=data[0], somedata=6), Stat(data=data[1], somedata=7), Stat(data=data[2], somedata=8), Stat(data=data[3], somedata=9), Stat(data=data[4], somedata=10))) session.flush() arb_data = sa.select( [stats.c.data_id, sa.func.max(stats.c.somedata).label('max')], stats.c.data_id <= 5, group_by=[stats.c.data_id]) arb_result = arb_data.execute().fetchall() # order the result list descending based on 'max' arb_result.sort(key = lambda a: a['max'], reverse=True) # extract just the "data_id" from it arb_result = [row['data_id'] for row in arb_result] arb_data = arb_data.alias('arb') # now query for Data objects using that above select, adding the # "order by max desc" separately q = (session.query(Data). options(sa.orm.joinedload('foo')). select_from(datas.join(arb_data, arb_data.c.data_id == datas.c.id)). order_by(sa.desc(arb_data.c.max)). limit(10)) # extract "data_id" from the list of result objects verify_result = [d.id for d in q] eq_(verify_result, arb_result) class EagerTest4(fixtures.MappedTest): @classmethod def define_tables(cls, metadata): Table('departments', metadata, Column('department_id', Integer, primary_key=True, test_needs_autoincrement=True), Column('name', String(50))) Table('employees', metadata, Column('person_id', Integer, primary_key=True, test_needs_autoincrement=True), Column('name', String(50)), Column('department_id', Integer, ForeignKey('departments.department_id'))) @classmethod def setup_classes(cls): class Department(cls.Basic): pass class Employee(cls.Basic): pass @testing.fails_on('maxdb', 'FIXME: unknown') def test_basic(self): Department, Employee, employees, departments = (self.classes.Department, self.classes.Employee, self.tables.employees, self.tables.departments) mapper(Employee, employees) mapper(Department, departments, properties=dict( employees=relationship(Employee, lazy='joined', backref='department'))) d1 = Department(name='One') for e in 'Jim', 'Jack', 'John', 'Susan': d1.employees.append(Employee(name=e)) d2 = Department(name='Two') for e in 'Joe', 'Bob', 'Mary', 'Wally': d2.employees.append(Employee(name=e)) sess = create_session() sess.add_all((d1, d2)) sess.flush() q = (sess.query(Department). join('employees'). filter(Employee.name.startswith('J')). distinct(). order_by(sa.desc(Department.name))) eq_(q.count(), 2) assert q[0] is d2 class EagerTest5(fixtures.MappedTest): """Construction of AliasedClauses for the same eager load property but different parent mappers, due to inheritance.""" @classmethod def define_tables(cls, metadata): Table('base', metadata, Column('uid', String(30), primary_key=True), Column('x', String(30))) Table('derived', metadata, Column('uid', String(30), ForeignKey('base.uid'), primary_key=True), Column('y', String(30))) Table('derivedII', metadata, Column('uid', String(30), ForeignKey('base.uid'), primary_key=True), Column('z', String(30))) Table('comments', metadata, Column('id', Integer, primary_key=True, test_needs_autoincrement=True), Column('uid', String(30), ForeignKey('base.uid')), Column('comment', String(30))) @classmethod def setup_classes(cls): class Base(cls.Basic): def __init__(self, uid, x): self.uid = uid self.x = x class Derived(Base): def __init__(self, uid, x, y): self.uid = uid self.x = x self.y = y class DerivedII(Base): def __init__(self, uid, x, z): self.uid = uid self.x = x self.z = z class Comment(cls.Basic): def __init__(self, uid, comment): self.uid = uid self.comment = comment def test_basic(self): Comment, Derived, derived, comments, DerivedII, Base, base, derivedII = (self.classes.Comment, self.classes.Derived, self.tables.derived, self.tables.comments, self.classes.DerivedII, self.classes.Base, self.tables.base, self.tables.derivedII) commentMapper = mapper(Comment, comments) baseMapper = mapper(Base, base, properties=dict( comments=relationship(Comment, lazy='joined', cascade='all, delete-orphan'))) mapper(Derived, derived, inherits=baseMapper) mapper(DerivedII, derivedII, inherits=baseMapper) sess = create_session() d = Derived('uid1', 'x', 'y') d.comments = [Comment('uid1', 'comment')] d2 = DerivedII('uid2', 'xx', 'z') d2.comments = [Comment('uid2', 'comment')] sess.add_all((d, d2)) sess.flush() sess.expunge_all() # this eager load sets up an AliasedClauses for the "comment" # relationship, then stores it in clauses_by_lead_mapper[mapper for # Derived] d = sess.query(Derived).get('uid1') sess.expunge_all() assert len([c for c in d.comments]) == 1 # this eager load sets up an AliasedClauses for the "comment" # relationship, and should store it in clauses_by_lead_mapper[mapper # for DerivedII]. the bug was that the previous AliasedClause create # prevented this population from occurring. d2 = sess.query(DerivedII).get('uid2') sess.expunge_all() # object is not in the session; therefore the lazy load cant trigger # here, eager load had to succeed assert len([c for c in d2.comments]) == 1 class EagerTest6(fixtures.MappedTest): @classmethod def define_tables(cls, metadata): Table('design_types', metadata, Column('design_type_id', Integer, primary_key=True, test_needs_autoincrement=True)) Table('design', metadata, Column('design_id', Integer, primary_key=True, test_needs_autoincrement=True), Column('design_type_id', Integer, ForeignKey('design_types.design_type_id'))) Table('parts', metadata, Column('part_id', Integer, primary_key=True, test_needs_autoincrement=True), Column('design_id', Integer, ForeignKey('design.design_id')), Column('design_type_id', Integer, ForeignKey('design_types.design_type_id'))) Table('inherited_part', metadata, Column('ip_id', Integer, primary_key=True, test_needs_autoincrement=True), Column('part_id', Integer, ForeignKey('parts.part_id')), Column('design_id', Integer, ForeignKey('design.design_id'))) @classmethod def setup_classes(cls): class Part(cls.Basic): pass class Design(cls.Basic): pass class DesignType(cls.Basic): pass class InheritedPart(cls.Basic): pass def test_one(self): Part, inherited_part, design_types, DesignType, parts, design, Design, InheritedPart = (self.classes.Part, self.tables.inherited_part, self.tables.design_types, self.classes.DesignType, self.tables.parts, self.tables.design, self.classes.Design, self.classes.InheritedPart) p_m = mapper(Part, parts) mapper(InheritedPart, inherited_part, properties=dict( part=relationship(Part, lazy='joined'))) d_m = mapper(Design, design, properties=dict( inheritedParts=relationship(InheritedPart, cascade="all, delete-orphan", backref="design"))) mapper(DesignType, design_types) d_m.add_property( "type", relationship(DesignType, lazy='joined', backref="designs")) p_m.add_property( "design", relationship( Design, lazy='joined', backref=backref("parts", cascade="all, delete-orphan"))) d = Design() sess = create_session() sess.add(d) sess.flush() sess.expunge_all() x = sess.query(Design).get(1) x.inheritedParts class EagerTest7(fixtures.MappedTest): @classmethod def define_tables(cls, metadata): Table('companies', metadata, Column('company_id', Integer, primary_key=True, test_needs_autoincrement=True), Column('company_name', String(40))) Table('addresses', metadata, Column('address_id', Integer, primary_key=True,test_needs_autoincrement=True), Column('company_id', Integer, ForeignKey("companies.company_id")), Column('address', String(40))) Table('phone_numbers', metadata, Column('phone_id', Integer, primary_key=True,test_needs_autoincrement=True), Column('address_id', Integer, ForeignKey('addresses.address_id')), Column('type', String(20)), Column('number', String(10))) Table('invoices', metadata, Column('invoice_id', Integer, primary_key=True, test_needs_autoincrement=True), Column('company_id', Integer, ForeignKey("companies.company_id")), Column('date', sa.DateTime)) @classmethod def setup_classes(cls): class Company(cls.Comparable): pass class Address(cls.Comparable): pass class Phone(cls.Comparable): pass class Invoice(cls.Comparable): pass def test_load_m2o_attached_to_o2(self): """ Tests eager load of a many-to-one attached to a one-to-many. this testcase illustrated the bug, which is that when the single Company is loaded, no further processing of the rows occurred in order to load the Company's second Address object. """ addresses, invoices, Company, companies, Invoice, Address = (self.tables.addresses, self.tables.invoices, self.classes.Company, self.tables.companies, self.classes.Invoice, self.classes.Address) mapper(Address, addresses) mapper(Company, companies, properties={ 'addresses' : relationship(Address, lazy='joined')}) mapper(Invoice, invoices, properties={ 'company': relationship(Company, lazy='joined')}) a1 = Address(address='a1 address') a2 = Address(address='a2 address') c1 = Company(company_name='company 1', addresses=[a1, a2]) i1 = Invoice(date=datetime.datetime.now(), company=c1) session = create_session() session.add(i1) session.flush() company_id = c1.company_id invoice_id = i1.invoice_id session.expunge_all() c = session.query(Company).get(company_id) session.expunge_all() i = session.query(Invoice).get(invoice_id) def go(): eq_(c, i.company) eq_(c.addresses, i.company.addresses) self.assert_sql_count(testing.db, go, 0) class EagerTest8(fixtures.MappedTest): @classmethod def define_tables(cls, metadata): Table('prj', metadata, Column('id', Integer, primary_key=True, test_needs_autoincrement=True), Column('created', sa.DateTime ), Column('title', sa.Unicode(100))) Table('task', metadata, Column('id', Integer, primary_key=True, test_needs_autoincrement=True), Column('status_id', Integer, ForeignKey('task_status.id'), nullable=False), Column('title', sa.Unicode(100)), Column('task_type_id', Integer , ForeignKey('task_type.id'), nullable=False), Column('prj_id', Integer , ForeignKey('prj.id'), nullable=False)) Table('task_status', metadata, Column('id', Integer, primary_key=True, test_needs_autoincrement=True)) Table('task_type', metadata, Column('id', Integer, primary_key=True, test_needs_autoincrement=True)) Table('msg', metadata, Column('id', Integer, primary_key=True, test_needs_autoincrement=True), Column('posted', sa.DateTime, index=True,), Column('type_id', Integer, ForeignKey('msg_type.id')), Column('task_id', Integer, ForeignKey('task.id'))) Table('msg_type', metadata, Column('id', Integer, primary_key=True, test_needs_autoincrement=True), Column('name', sa.Unicode(20)), Column('display_name', sa.Unicode(20))) @classmethod def fixtures(cls): return dict( prj=(('id',), (1,)), task_status=(('id',), (1,)), task_type=(('id',), (1,),), task=(('title', 'task_type_id', 'status_id', 'prj_id'), (u'task 1', 1, 1, 1))) @classmethod def setup_classes(cls): class Task_Type(cls.Comparable): pass class Joined(cls.Comparable): pass @testing.fails_on('maxdb', 'FIXME: unknown') def test_nested_joins(self): task, Task_Type, Joined, prj, task_type, msg = (self.tables.task, self.classes.Task_Type, self.classes.Joined, self.tables.prj, self.tables.task_type, self.tables.msg) # this is testing some subtle column resolution stuff, # concerning corresponding_column() being extremely accurate # as well as how mapper sets up its column properties mapper(Task_Type, task_type) tsk_cnt_join = sa.outerjoin(prj, task, task.c.prj_id==prj.c.id) j = sa.outerjoin(task, msg, task.c.id==msg.c.task_id) jj = sa.select([ task.c.id.label('task_id'), sa.func.count(msg.c.id).label('props_cnt')], from_obj=[j], group_by=[task.c.id]).alias('prop_c_s') jjj = sa.join(task, jj, task.c.id == jj.c.task_id) mapper(Joined, jjj, properties=dict( type=relationship(Task_Type, lazy='joined'))) session = create_session() eq_(session.query(Joined).limit(10).offset(0).one(), Joined(id=1, title=u'task 1', props_cnt=0)) class EagerTest9(fixtures.MappedTest): """Test the usage of query options to eagerly load specific paths. This relies upon the 'path' construct used by PropertyOption to relate LoaderStrategies to specific paths, as well as the path state maintained throughout the query setup/mapper instances process. """ @classmethod def define_tables(cls, metadata): Table('accounts', metadata, Column('account_id', Integer, primary_key=True, test_needs_autoincrement=True), Column('name', String(40))) Table('transactions', metadata, Column('transaction_id', Integer, primary_key=True, test_needs_autoincrement=True), Column('name', String(40))) Table('entries', metadata, Column('entry_id', Integer, primary_key=True, test_needs_autoincrement=True), Column('name', String(40)), Column('account_id', Integer, ForeignKey('accounts.account_id')), Column('transaction_id', Integer, ForeignKey('transactions.transaction_id'))) @classmethod def setup_classes(cls): class Account(cls.Basic): pass class Transaction(cls.Basic): pass class Entry(cls.Basic): pass @classmethod def setup_mappers(cls): Account, Transaction, transactions, accounts, entries, Entry = (cls.classes.Account, cls.classes.Transaction, cls.tables.transactions, cls.tables.accounts, cls.tables.entries, cls.classes.Entry) mapper(Account, accounts) mapper(Transaction, transactions) mapper(Entry, entries, properties=dict( account=relationship(Account, uselist=False, backref=backref('entries', lazy='select', order_by=entries.c.entry_id)), transaction=relationship(Transaction, uselist=False, backref=backref('entries', lazy='joined', order_by=entries.c.entry_id)))) @testing.fails_on('maxdb', 'FIXME: unknown') def test_joinedload_on_path(self): Entry, Account, Transaction = (self.classes.Entry, self.classes.Account, self.classes.Transaction) session = create_session() tx1 = Transaction(name='tx1') tx2 = Transaction(name='tx2') acc1 = Account(name='acc1') ent11 = Entry(name='ent11', account=acc1, transaction=tx1) ent12 = Entry(name='ent12', account=acc1, transaction=tx2) acc2 = Account(name='acc2') ent21 = Entry(name='ent21', account=acc2, transaction=tx1) ent22 = Entry(name='ent22', account=acc2, transaction=tx2) session.add(acc1) session.flush() session.expunge_all() def go(): # load just the first Account. eager loading will actually load # all objects saved thus far, but will not eagerly load the # "accounts" off the immediate "entries"; only the "accounts" off # the entries->transaction->entries acc = (session.query(Account). options(sa.orm.joinedload_all('entries.transaction.entries.account')). order_by(Account.account_id)).first() # no sql occurs eq_(acc.name, 'acc1') eq_(acc.entries[0].transaction.entries[0].account.name, 'acc1') eq_(acc.entries[0].transaction.entries[1].account.name, 'acc2') # lazyload triggers but no sql occurs because many-to-one uses # cached query.get() for e in acc.entries: assert e.account is acc self.assert_sql_count(testing.db, go, 1) SQLAlchemy-0.8.4/test/orm/test_attributes.py0000644000076500000240000025767612251150015021623 0ustar classicstaff00000000000000import pickle from sqlalchemy.orm import attributes, instrumentation, exc as orm_exc from sqlalchemy.orm.collections import collection from sqlalchemy.orm.interfaces import AttributeExtension from sqlalchemy import exc as sa_exc from sqlalchemy.testing import eq_, ne_, assert_raises, \ assert_raises_message from sqlalchemy.testing import fixtures from sqlalchemy.testing.util import gc_collect, all_partial_orderings from sqlalchemy.util import jython from sqlalchemy import event from sqlalchemy import testing from sqlalchemy.testing.mock import Mock, call from sqlalchemy.orm.state import InstanceState # global for pickling tests MyTest = None MyTest2 = None def _set_callable(state, dict_, key, callable_): fn = InstanceState._row_processor(state.manager, callable_, key) fn(state, dict_, None) class AttributeImplAPITest(fixtures.MappedTest): def _scalar_obj_fixture(self): class A(object): pass class B(object): pass instrumentation.register_class(A) instrumentation.register_class(B) attributes.register_attribute(A, "b", uselist=False, useobject=True) return A, B def _collection_obj_fixture(self): class A(object): pass class B(object): pass instrumentation.register_class(A) instrumentation.register_class(B) attributes.register_attribute(A, "b", uselist=True, useobject=True) return A, B def test_scalar_obj_remove_invalid(self): A, B = self._scalar_obj_fixture() a1 = A() b1 = B() b2 = B() A.b.impl.append( attributes.instance_state(a1), attributes.instance_dict(a1), b1, None ) assert a1.b is b1 assert_raises_message( ValueError, "Object not " "associated with on attribute 'b'", A.b.impl.remove, attributes.instance_state(a1), attributes.instance_dict(a1), b2, None ) def test_scalar_obj_pop_invalid(self): A, B = self._scalar_obj_fixture() a1 = A() b1 = B() b2 = B() A.b.impl.append( attributes.instance_state(a1), attributes.instance_dict(a1), b1, None ) assert a1.b is b1 A.b.impl.pop( attributes.instance_state(a1), attributes.instance_dict(a1), b2, None ) assert a1.b is b1 def test_scalar_obj_pop_valid(self): A, B = self._scalar_obj_fixture() a1 = A() b1 = B() A.b.impl.append( attributes.instance_state(a1), attributes.instance_dict(a1), b1, None ) assert a1.b is b1 A.b.impl.pop( attributes.instance_state(a1), attributes.instance_dict(a1), b1, None ) assert a1.b is None def test_collection_obj_remove_invalid(self): A, B = self._collection_obj_fixture() a1 = A() b1 = B() b2 = B() A.b.impl.append( attributes.instance_state(a1), attributes.instance_dict(a1), b1, None ) assert a1.b == [b1] assert_raises_message( ValueError, r"list.remove\(x\): x not in list", A.b.impl.remove, attributes.instance_state(a1), attributes.instance_dict(a1), b2, None ) def test_collection_obj_pop_invalid(self): A, B = self._collection_obj_fixture() a1 = A() b1 = B() b2 = B() A.b.impl.append( attributes.instance_state(a1), attributes.instance_dict(a1), b1, None ) assert a1.b == [b1] A.b.impl.pop( attributes.instance_state(a1), attributes.instance_dict(a1), b2, None ) assert a1.b == [b1] def test_collection_obj_pop_valid(self): A, B = self._collection_obj_fixture() a1 = A() b1 = B() A.b.impl.append( attributes.instance_state(a1), attributes.instance_dict(a1), b1, None ) assert a1.b == [b1] A.b.impl.pop( attributes.instance_state(a1), attributes.instance_dict(a1), b1, None ) assert a1.b == [] class AttributesTest(fixtures.ORMTest): def setup(self): global MyTest, MyTest2 class MyTest(object): pass class MyTest2(object): pass def teardown(self): global MyTest, MyTest2 MyTest, MyTest2 = None, None def test_basic(self): class User(object): pass instrumentation.register_class(User) attributes.register_attribute(User, 'user_id', uselist=False, useobject=False) attributes.register_attribute(User, 'user_name', uselist=False, useobject=False) attributes.register_attribute(User, 'email_address', uselist=False, useobject=False) u = User() u.user_id = 7 u.user_name = 'john' u.email_address = 'lala@123.com' self.assert_(u.user_id == 7 and u.user_name == 'john' and u.email_address == 'lala@123.com') attributes.instance_state(u)._commit_all(attributes.instance_dict(u)) self.assert_(u.user_id == 7 and u.user_name == 'john' and u.email_address == 'lala@123.com') u.user_name = 'heythere' u.email_address = 'foo@bar.com' self.assert_(u.user_id == 7 and u.user_name == 'heythere' and u.email_address == 'foo@bar.com') def test_pickleness(self): instrumentation.register_class(MyTest) instrumentation.register_class(MyTest2) attributes.register_attribute(MyTest, 'user_id', uselist=False, useobject=False) attributes.register_attribute(MyTest, 'user_name', uselist=False, useobject=False) attributes.register_attribute(MyTest, 'email_address', uselist=False, useobject=False) attributes.register_attribute(MyTest2, 'a', uselist=False, useobject=False) attributes.register_attribute(MyTest2, 'b', uselist=False, useobject=False) # shouldnt be pickling callables at the class level def somecallable(state, passive): return None attributes.register_attribute(MyTest, 'mt2', uselist=True, trackparent=True, callable_=somecallable, useobject=True) o = MyTest() o.mt2.append(MyTest2()) o.user_id=7 o.mt2[0].a = 'abcde' pk_o = pickle.dumps(o) o2 = pickle.loads(pk_o) pk_o2 = pickle.dumps(o2) # so... pickle is creating a new 'mt2' string after a roundtrip here, # so we'll brute-force set it to be id-equal to the original string if False: o_mt2_str = [ k for k in o.__dict__ if k == 'mt2'][0] o2_mt2_str = [ k for k in o2.__dict__ if k == 'mt2'][0] self.assert_(o_mt2_str == o2_mt2_str) self.assert_(o_mt2_str is not o2_mt2_str) # change the id of o2.__dict__['mt2'] former = o2.__dict__['mt2'] del o2.__dict__['mt2'] o2.__dict__[o_mt2_str] = former # Relies on dict ordering if not jython: self.assert_(pk_o == pk_o2) # the above is kind of distrurbing, so let's do it again a little # differently. the string-id in serialization thing is just an # artifact of pickling that comes up in the first round-trip. # a -> b differs in pickle memoization of 'mt2', but b -> c will # serialize identically. o3 = pickle.loads(pk_o2) pk_o3 = pickle.dumps(o3) o4 = pickle.loads(pk_o3) pk_o4 = pickle.dumps(o4) # Relies on dict ordering if not jython: self.assert_(pk_o3 == pk_o4) # and lastly make sure we still have our data after all that. # identical serialzation is great, *if* it's complete :) self.assert_(o4.user_id == 7) self.assert_(o4.user_name is None) self.assert_(o4.email_address is None) self.assert_(len(o4.mt2) == 1) self.assert_(o4.mt2[0].a == 'abcde') self.assert_(o4.mt2[0].b is None) @testing.requires.predictable_gc def test_state_gc(self): """test that InstanceState always has a dict, even after host object gc'ed.""" class Foo(object): pass instrumentation.register_class(Foo) f = Foo() state = attributes.instance_state(f) f.bar = "foo" eq_(state.dict, {'bar': 'foo', state.manager.STATE_ATTR: state}) del f gc_collect() assert state.obj() is None assert state.dict == {} def test_object_dereferenced_error(self): class Foo(object): pass class Bar(object): def __init__(self): gc_collect() instrumentation.register_class(Foo) instrumentation.register_class(Bar) attributes.register_attribute(Foo, 'bars', uselist=True, useobject=True) assert_raises_message( orm_exc.ObjectDereferencedError, "Can't emit change event for attribute " "'Foo.bars' - parent object of type " "has been garbage collected.", lambda: Foo().bars.append(Bar()) ) def test_deferred(self): class Foo(object):pass data = {'a':'this is a', 'b':12} def loader(state, keys): for k in keys: state.dict[k] = data[k] return attributes.ATTR_WAS_SET instrumentation.register_class(Foo) manager = attributes.manager_of_class(Foo) manager.deferred_scalar_loader = loader attributes.register_attribute(Foo, 'a', uselist=False, useobject=False) attributes.register_attribute(Foo, 'b', uselist=False, useobject=False) f = Foo() attributes.instance_state(f)._expire(attributes.instance_dict(f), set()) eq_(f.a, 'this is a') eq_(f.b, 12) f.a = 'this is some new a' attributes.instance_state(f)._expire(attributes.instance_dict(f), set()) eq_(f.a, 'this is a') eq_(f.b, 12) attributes.instance_state(f)._expire(attributes.instance_dict(f), set()) f.a = 'this is another new a' eq_(f.a, 'this is another new a') eq_(f.b, 12) attributes.instance_state(f)._expire(attributes.instance_dict(f), set()) eq_(f.a, 'this is a') eq_(f.b, 12) del f.a eq_(f.a, None) eq_(f.b, 12) attributes.instance_state(f)._commit_all(attributes.instance_dict(f), set()) eq_(f.a, None) eq_(f.b, 12) def test_deferred_pickleable(self): data = {'a':'this is a', 'b':12} def loader(state, keys): for k in keys: state.dict[k] = data[k] return attributes.ATTR_WAS_SET instrumentation.register_class(MyTest) manager = attributes.manager_of_class(MyTest) manager.deferred_scalar_loader=loader attributes.register_attribute(MyTest, 'a', uselist=False, useobject=False) attributes.register_attribute(MyTest, 'b', uselist=False, useobject=False) m = MyTest() attributes.instance_state(m)._expire(attributes.instance_dict(m), set()) assert 'a' not in m.__dict__ m2 = pickle.loads(pickle.dumps(m)) assert 'a' not in m2.__dict__ eq_(m2.a, "this is a") eq_(m2.b, 12) def test_list(self): class User(object):pass class Address(object):pass instrumentation.register_class(User) instrumentation.register_class(Address) attributes.register_attribute(User, 'user_id', uselist=False, useobject=False) attributes.register_attribute(User, 'user_name', uselist=False, useobject=False) attributes.register_attribute(User, 'addresses', uselist=True, useobject=True) attributes.register_attribute(Address, 'address_id', uselist=False, useobject=False) attributes.register_attribute(Address, 'email_address', uselist=False, useobject=False) u = User() u.user_id = 7 u.user_name = 'john' u.addresses = [] a = Address() a.address_id = 10 a.email_address = 'lala@123.com' u.addresses.append(a) self.assert_(u.user_id == 7 and u.user_name == 'john' and u.addresses[0].email_address == 'lala@123.com') (u, attributes.instance_state(a)._commit_all(attributes.instance_dict(a))) self.assert_(u.user_id == 7 and u.user_name == 'john' and u.addresses[0].email_address == 'lala@123.com') u.user_name = 'heythere' a = Address() a.address_id = 11 a.email_address = 'foo@bar.com' u.addresses.append(a) eq_(u.user_id, 7) eq_(u.user_name, 'heythere') eq_(u.addresses[0].email_address,'lala@123.com') eq_(u.addresses[1].email_address,'foo@bar.com') def test_extension_commit_attr(self): """test that an extension which commits attribute history maintains the end-result history. This won't work in conjunction with some unitofwork extensions. """ class Foo(fixtures.BasicEntity): pass class Bar(fixtures.BasicEntity): pass class ReceiveEvents(AttributeExtension): def __init__(self, key): self.key = key def append(self, state, child, initiator): if commit: state._commit_all(state.dict) return child def remove(self, state, child, initiator): if commit: state._commit_all(state.dict) return child def set(self, state, child, oldchild, initiator): if commit: state._commit_all(state.dict) return child instrumentation.register_class(Foo) instrumentation.register_class(Bar) b1, b2, b3, b4 = Bar(id='b1'), Bar(id='b2'), Bar(id='b3'), Bar(id='b4') def loadcollection(state, passive): if passive is attributes.PASSIVE_NO_FETCH: return attributes.PASSIVE_NO_RESULT return [b1, b2] def loadscalar(state, passive): if passive is attributes.PASSIVE_NO_FETCH: return attributes.PASSIVE_NO_RESULT return b2 attributes.register_attribute(Foo, 'bars', uselist=True, useobject=True, callable_=loadcollection, extension=[ReceiveEvents('bars')]) attributes.register_attribute(Foo, 'bar', uselist=False, useobject=True, callable_=loadscalar, extension=[ReceiveEvents('bar')]) attributes.register_attribute(Foo, 'scalar', uselist=False, useobject=False, extension=[ReceiveEvents('scalar')]) def create_hist(): def hist(key, shouldmatch, fn, *arg): attributes.instance_state(f1)._commit_all(attributes.instance_dict(f1)) fn(*arg) histories.append((shouldmatch, attributes.get_history(f1, key))) f1 = Foo() hist('bars', True, f1.bars.append, b3) hist('bars', True, f1.bars.append, b4) hist('bars', False, f1.bars.remove, b2) hist('bar', True, setattr, f1, 'bar', b3) hist('bar', True, setattr, f1, 'bar', None) hist('bar', True, setattr, f1, 'bar', b4) hist('scalar', True, setattr, f1, 'scalar', 5) hist('scalar', True, setattr, f1, 'scalar', None) hist('scalar', True, setattr, f1, 'scalar', 4) histories = [] commit = False create_hist() without_commit = list(histories) histories[:] = [] commit = True create_hist() with_commit = histories for without, with_ in zip(without_commit, with_commit): shouldmatch, woc = without shouldmatch, wic = with_ if shouldmatch: eq_(woc, wic) else: ne_(woc, wic) def test_extension_lazyload_assertion(self): class Foo(fixtures.BasicEntity): pass class Bar(fixtures.BasicEntity): pass class ReceiveEvents(AttributeExtension): def append(self, state, child, initiator): state.obj().bars return child def remove(self, state, child, initiator): state.obj().bars return child def set(self, state, child, oldchild, initiator): return child instrumentation.register_class(Foo) instrumentation.register_class(Bar) bar1, bar2, bar3 = [Bar(id=1), Bar(id=2), Bar(id=3)] def func1(state, passive): if passive is attributes.PASSIVE_NO_FETCH: return attributes.PASSIVE_NO_RESULT return [bar1, bar2, bar3] attributes.register_attribute(Foo, 'bars', uselist=True, callable_=func1, useobject=True, extension=[ReceiveEvents()]) attributes.register_attribute(Bar, 'foos', uselist=True, useobject=True, backref='bars') x = Foo() assert_raises(AssertionError, Bar(id=4).foos.append, x) x.bars b = Bar(id=4) b.foos.append(x) attributes.instance_state(x)._expire_attributes(attributes.instance_dict(x), ['bars']) assert_raises(AssertionError, b.foos.remove, x) def test_scalar_listener(self): # listeners on ScalarAttributeImpl aren't used normally. test that # they work for the benefit of user extensions class Foo(object): pass results = [] class ReceiveEvents(AttributeExtension): def append(self, state, child, initiator): assert False def remove(self, state, child, initiator): results.append(("remove", state.obj(), child)) def set(self, state, child, oldchild, initiator): results.append(("set", state.obj(), child, oldchild)) return child instrumentation.register_class(Foo) attributes.register_attribute(Foo, 'x', uselist=False, useobject=False, extension=ReceiveEvents()) f = Foo() f.x = 5 f.x = 17 del f.x eq_(results, [ ('set', f, 5, attributes.NEVER_SET), ('set', f, 17, 5), ('remove', f, 17), ]) def test_lazytrackparent(self): """test that the "hasparent" flag works properly when lazy loaders and backrefs are used """ class Post(object): pass class Blog(object): pass instrumentation.register_class(Post) instrumentation.register_class(Blog) # set up instrumented attributes with backrefs attributes.register_attribute(Post, 'blog', uselist=False, backref='posts', trackparent=True, useobject=True) attributes.register_attribute(Blog, 'posts', uselist=True, backref='blog', trackparent=True, useobject=True) # create objects as if they'd been freshly loaded from the database (without history) b = Blog() p1 = Post() _set_callable(attributes.instance_state(b), attributes.instance_dict(b), 'posts', lambda state, passive:[p1]) _set_callable(attributes.instance_state(p1), attributes.instance_dict(p1), 'blog', lambda state, passive:b) p1, attributes.instance_state(b)._commit_all(attributes.instance_dict(b)) # no orphans (called before the lazy loaders fire off) assert attributes.has_parent(Blog, p1, 'posts', optimistic=True) assert attributes.has_parent(Post, b, 'blog', optimistic=True) # assert connections assert p1.blog is b assert p1 in b.posts # manual connections b2 = Blog() p2 = Post() b2.posts.append(p2) assert attributes.has_parent(Blog, p2, 'posts') assert attributes.has_parent(Post, b2, 'blog') def test_illegal_trackparent(self): class Post(object):pass class Blog(object):pass instrumentation.register_class(Post) instrumentation.register_class(Blog) attributes.register_attribute(Post, 'blog', useobject=True) assert_raises_message( AssertionError, "This AttributeImpl is not configured to track parents.", attributes.has_parent, Post, Blog(), 'blog' ) assert_raises_message( AssertionError, "This AttributeImpl is not configured to track parents.", Post.blog.impl.sethasparent, "x", "x", True ) def test_inheritance(self): """tests that attributes are polymorphic""" class Foo(object):pass class Bar(Foo):pass instrumentation.register_class(Foo) instrumentation.register_class(Bar) def func1(state, passive): return "this is the foo attr" def func2(state, passive): return "this is the bar attr" def func3(state, passive): return "this is the shared attr" attributes.register_attribute(Foo, 'element', uselist=False, callable_=func1, useobject=True) attributes.register_attribute(Foo, 'element2', uselist=False, callable_=func3, useobject=True) attributes.register_attribute(Bar, 'element', uselist=False, callable_=func2, useobject=True) x = Foo() y = Bar() assert x.element == 'this is the foo attr' assert y.element == 'this is the bar attr' assert x.element2 == 'this is the shared attr' assert y.element2 == 'this is the shared attr' def test_no_double_state(self): states = set() class Foo(object): def __init__(self): states.add(attributes.instance_state(self)) class Bar(Foo): def __init__(self): states.add(attributes.instance_state(self)) Foo.__init__(self) instrumentation.register_class(Foo) instrumentation.register_class(Bar) b = Bar() eq_(len(states), 1) eq_(list(states)[0].obj(), b) def test_inheritance2(self): """test that the attribute manager can properly traverse the managed attributes of an object, if the object is of a descendant class with managed attributes in the parent class""" class Foo(object): pass class Bar(Foo): pass class Element(object): _state = True instrumentation.register_class(Foo) instrumentation.register_class(Bar) attributes.register_attribute(Foo, 'element', uselist=False, useobject=True) el = Element() x = Bar() x.element = el eq_(attributes.get_state_history(attributes.instance_state(x), 'element'), ([el], (), ())) attributes.instance_state(x)._commit_all(attributes.instance_dict(x)) added, unchanged, deleted = \ attributes.get_state_history(attributes.instance_state(x), 'element') assert added == () assert unchanged == [el] def test_lazyhistory(self): """tests that history functions work with lazy-loading attributes""" class Foo(fixtures.BasicEntity): pass class Bar(fixtures.BasicEntity): pass instrumentation.register_class(Foo) instrumentation.register_class(Bar) bar1, bar2, bar3, bar4 = [Bar(id=1), Bar(id=2), Bar(id=3), Bar(id=4)] def func1(state, passive): return 'this is func 1' def func2(state, passive): return [bar1, bar2, bar3] attributes.register_attribute(Foo, 'col1', uselist=False, callable_=func1, useobject=True) attributes.register_attribute(Foo, 'col2', uselist=True, callable_=func2, useobject=True) attributes.register_attribute(Bar, 'id', uselist=False, useobject=True) x = Foo() attributes.instance_state(x)._commit_all(attributes.instance_dict(x)) x.col2.append(bar4) eq_(attributes.get_state_history(attributes.instance_state(x), 'col2'), ([bar4], [bar1, bar2, bar3], [])) def test_parenttrack(self): class Foo(object): pass class Bar(object): pass instrumentation.register_class(Foo) instrumentation.register_class(Bar) attributes.register_attribute(Foo, 'element', uselist=False, trackparent=True, useobject=True) attributes.register_attribute(Bar, 'element', uselist=False, trackparent=True, useobject=True) f1 = Foo() f2 = Foo() b1 = Bar() b2 = Bar() f1.element = b1 b2.element = f2 assert attributes.has_parent(Foo, b1, 'element') assert not attributes.has_parent(Foo, b2, 'element') assert not attributes.has_parent(Foo, f2, 'element') assert attributes.has_parent(Bar, f2, 'element') b2.element = None assert not attributes.has_parent(Bar, f2, 'element') # test that double assignment doesn't accidentally reset the # 'parent' flag. b3 = Bar() f4 = Foo() b3.element = f4 assert attributes.has_parent(Bar, f4, 'element') b3.element = f4 assert attributes.has_parent(Bar, f4, 'element') def test_descriptorattributes(self): """changeset: 1633 broke ability to use ORM to map classes with unusual descriptor attributes (for example, classes that inherit from ones implementing zope.interface.Interface). This is a simple regression test to prevent that defect. """ class des(object): def __get__(self, instance, owner): raise AttributeError('fake attribute') class Foo(object): A = des() instrumentation.register_class(Foo) instrumentation.unregister_class(Foo) def test_collectionclasses(self): class Foo(object): pass instrumentation.register_class(Foo) attributes.register_attribute(Foo, 'collection', uselist=True, typecallable=set, useobject=True) assert attributes.manager_of_class(Foo).is_instrumented('collection' ) assert isinstance(Foo().collection, set) attributes.unregister_attribute(Foo, 'collection') assert not attributes.manager_of_class(Foo).is_instrumented('collection' ) try: attributes.register_attribute(Foo, 'collection', uselist=True, typecallable=dict, useobject=True) assert False except sa_exc.ArgumentError, e: assert str(e) \ == 'Type InstrumentedDict must elect an appender '\ 'method to be a collection class' class MyDict(dict): @collection.appender def append(self, item): self[item.foo] = item @collection.remover def remove(self, item): del self[item.foo] attributes.register_attribute(Foo, 'collection', uselist=True, typecallable=MyDict, useobject=True) assert isinstance(Foo().collection, MyDict) attributes.unregister_attribute(Foo, 'collection') class MyColl(object): pass try: attributes.register_attribute(Foo, 'collection', uselist=True, typecallable=MyColl, useobject=True) assert False except sa_exc.ArgumentError, e: assert str(e) \ == 'Type MyColl must elect an appender method to be a '\ 'collection class' class MyColl(object): @collection.iterator def __iter__(self): return iter([]) @collection.appender def append(self, item): pass @collection.remover def remove(self, item): pass attributes.register_attribute(Foo, 'collection', uselist=True, typecallable=MyColl, useobject=True) try: Foo().collection assert True except sa_exc.ArgumentError, e: assert False class GetNoValueTest(fixtures.ORMTest): def _fixture(self, expected): class Foo(object): pass class Bar(object): pass def lazy_callable(state, passive): return expected instrumentation.register_class(Foo) instrumentation.register_class(Bar) if expected is not None: attributes.register_attribute(Foo, "attr", useobject=True, uselist=False, callable_=lazy_callable) else: attributes.register_attribute(Foo, "attr", useobject=True, uselist=False) f1 = Foo() return Foo.attr.impl,\ attributes.instance_state(f1), \ attributes.instance_dict(f1) def test_passive_no_result(self): attr, state, dict_ = self._fixture(attributes.PASSIVE_NO_RESULT) eq_( attr.get(state, dict_, passive=attributes.PASSIVE_NO_INITIALIZE), attributes.PASSIVE_NO_RESULT ) def test_passive_no_result_never_set(self): attr, state, dict_ = self._fixture(attributes.NEVER_SET) eq_( attr.get(state, dict_, passive=attributes.PASSIVE_NO_INITIALIZE), attributes.PASSIVE_NO_RESULT ) assert 'attr' not in dict_ def test_passive_ret_never_set_never_set(self): attr, state, dict_ = self._fixture(attributes.NEVER_SET) eq_( attr.get(state, dict_, passive=attributes.PASSIVE_RETURN_NEVER_SET), attributes.NEVER_SET ) assert 'attr' not in dict_ def test_passive_ret_never_set_empty(self): attr, state, dict_ = self._fixture(None) eq_( attr.get(state, dict_, passive=attributes.PASSIVE_RETURN_NEVER_SET), attributes.NEVER_SET ) assert 'attr' not in dict_ def test_off_empty(self): attr, state, dict_ = self._fixture(None) eq_( attr.get(state, dict_, passive=attributes.PASSIVE_OFF), None ) assert 'attr' in dict_ class UtilTest(fixtures.ORMTest): def test_helpers(self): class Foo(object): pass class Bar(object): pass instrumentation.register_class(Foo) instrumentation.register_class(Bar) attributes.register_attribute(Foo, "coll", uselist=True, useobject=True) f1 = Foo() b1 = Bar() b2 = Bar() coll = attributes.init_collection(f1, "coll") assert coll.data is f1.coll assert attributes.get_attribute(f1, "coll") is f1.coll attributes.set_attribute(f1, "coll", [b1]) assert f1.coll == [b1] eq_(attributes.get_history(f1, "coll"), ([b1], [], [])) attributes.set_committed_value(f1, "coll", [b2]) eq_(attributes.get_history(f1, "coll"), ((), [b2], ())) attributes.del_attribute(f1, "coll") assert "coll" not in f1.__dict__ class BackrefTest(fixtures.ORMTest): def test_m2m(self): class Student(object):pass class Course(object):pass instrumentation.register_class(Student) instrumentation.register_class(Course) attributes.register_attribute(Student, 'courses', uselist=True, backref="students", useobject=True) attributes.register_attribute(Course, 'students', uselist=True, backref="courses", useobject=True) s = Student() c = Course() s.courses.append(c) self.assert_(c.students == [s]) s.courses.remove(c) self.assert_(c.students == []) (s1, s2, s3) = (Student(), Student(), Student()) c.students = [s1, s2, s3] self.assert_(s2.courses == [c]) self.assert_(s1.courses == [c]) s1.courses.remove(c) self.assert_(c.students == [s2,s3]) def test_o2m(self): class Post(object):pass class Blog(object):pass instrumentation.register_class(Post) instrumentation.register_class(Blog) attributes.register_attribute(Post, 'blog', uselist=False, backref='posts', trackparent=True, useobject=True) attributes.register_attribute(Blog, 'posts', uselist=True, backref='blog', trackparent=True, useobject=True) b = Blog() (p1, p2, p3) = (Post(), Post(), Post()) b.posts.append(p1) b.posts.append(p2) b.posts.append(p3) self.assert_(b.posts == [p1, p2, p3]) self.assert_(p2.blog is b) p3.blog = None self.assert_(b.posts == [p1, p2]) p4 = Post() p4.blog = b self.assert_(b.posts == [p1, p2, p4]) p4.blog = b p4.blog = b self.assert_(b.posts == [p1, p2, p4]) # assert no failure removing None p5 = Post() p5.blog = None del p5.blog def test_o2o(self): class Port(object):pass class Jack(object):pass instrumentation.register_class(Port) instrumentation.register_class(Jack) attributes.register_attribute(Port, 'jack', uselist=False, useobject=True, backref="port") attributes.register_attribute(Jack, 'port', uselist=False, useobject=True, backref="jack") p = Port() j = Jack() p.jack = j self.assert_(j.port is p) self.assert_(p.jack is not None) j.port = None self.assert_(p.jack is None) def test_symmetric_o2o_inheritance(self): """Test that backref 'initiator' catching goes against a token that is global to all InstrumentedAttribute objects within a particular class, not just the indvidual IA object since we use distinct objects in an inheritance scenario. """ class Parent(object): pass class Child(object): pass class SubChild(Child): pass p_token = object() c_token = object() instrumentation.register_class(Parent) instrumentation.register_class(Child) instrumentation.register_class(SubChild) attributes.register_attribute(Parent, 'child', uselist=False, backref="parent", parent_token = p_token, useobject=True) attributes.register_attribute(Child, 'parent', uselist=False, backref="child", parent_token = c_token, useobject=True) attributes.register_attribute(SubChild, 'parent', uselist=False, backref="child", parent_token = c_token, useobject=True) p1 = Parent() c1 = Child() p1.child = c1 c2 = SubChild() c2.parent = p1 def test_symmetric_o2m_inheritance(self): class Parent(object): pass class SubParent(Parent): pass class Child(object): pass p_token = object() c_token = object() instrumentation.register_class(Parent) instrumentation.register_class(SubParent) instrumentation.register_class(Child) attributes.register_attribute(Parent, 'children', uselist=True, backref='parent', parent_token = p_token, useobject=True) attributes.register_attribute(SubParent, 'children', uselist=True, backref='parent', parent_token = p_token, useobject=True) attributes.register_attribute(Child, 'parent', uselist=False, backref='children', parent_token = c_token, useobject=True) p1 = Parent() p2 = SubParent() c1 = Child() p1.children.append(c1) assert c1.parent is p1 assert c1 in p1.children p2.children.append(c1) assert c1.parent is p2 # note its still in p1.children - # the event model currently allows only # one level deep. without the parent_token, # it keeps going until a ValueError is raised # and this condition changes. assert c1 in p1.children class CyclicBackrefAssertionTest(fixtures.TestBase): """test that infinite recursion due to incorrect backref assignments is blocked. """ def test_scalar_set_type_assertion(self): A, B, C = self._scalar_fixture() c1 = C() b1 = B() assert_raises_message( ValueError, 'Bidirectional attribute conflict detected: ' 'Passing object to attribute "C.a" ' 'triggers a modify event on attribute "C.b" ' 'via the backref "B.c".', setattr, c1, 'a', b1 ) def test_collection_append_type_assertion(self): A, B, C = self._collection_fixture() c1 = C() b1 = B() assert_raises_message( ValueError, 'Bidirectional attribute conflict detected: ' 'Passing object to attribute "C.a" ' 'triggers a modify event on attribute "C.b" ' 'via the backref "B.c".', c1.a.append, b1 ) def _scalar_fixture(self): class A(object): pass class B(object): pass class C(object): pass instrumentation.register_class(A) instrumentation.register_class(B) instrumentation.register_class(C) attributes.register_attribute(C, 'a', backref='c', useobject=True) attributes.register_attribute(C, 'b', backref='c', useobject=True) attributes.register_attribute(A, 'c', backref='a', useobject=True, uselist=True) attributes.register_attribute(B, 'c', backref='b', useobject=True, uselist=True) return A, B, C def _collection_fixture(self): class A(object): pass class B(object): pass class C(object): pass instrumentation.register_class(A) instrumentation.register_class(B) instrumentation.register_class(C) attributes.register_attribute(C, 'a', backref='c', useobject=True, uselist=True) attributes.register_attribute(C, 'b', backref='c', useobject=True, uselist=True) attributes.register_attribute(A, 'c', backref='a', useobject=True) attributes.register_attribute(B, 'c', backref='b', useobject=True) return A, B, C def _broken_collection_fixture(self): class A(object): pass class B(object): pass instrumentation.register_class(A) instrumentation.register_class(B) attributes.register_attribute(A, 'b', backref='a1', useobject=True) attributes.register_attribute(B, 'a1', backref='b', useobject=True, uselist=True) attributes.register_attribute(B, 'a2', backref='b', useobject=True, uselist=True) return A, B def test_broken_collection_assertion(self): A, B = self._broken_collection_fixture() b1 = B() a1 = A() assert_raises_message( ValueError, 'Bidirectional attribute conflict detected: ' 'Passing object to attribute "B.a2" ' 'triggers a modify event on attribute "B.a1" ' 'via the backref "A.b".', b1.a2.append, a1 ) class PendingBackrefTest(fixtures.ORMTest): def _fixture(self): class Post(object): def __init__(self, name): self.name = name __hash__ = None def __eq__(self, other): return other is not None and other.name == self.name class Blog(object): def __init__(self, name): self.name = name __hash__ = None def __eq__(self, other): return other is not None and other.name == self.name lazy_posts = Mock() instrumentation.register_class(Post) instrumentation.register_class(Blog) attributes.register_attribute(Post, 'blog', uselist=False, backref='posts', trackparent=True, useobject=True) attributes.register_attribute(Blog, 'posts', uselist=True, backref='blog', callable_=lazy_posts, trackparent=True, useobject=True) return Post, Blog, lazy_posts def test_lazy_add(self): Post, Blog, lazy_posts = self._fixture() p1, p2, p3 = Post("post 1"), Post("post 2"), Post("post 3") lazy_posts.return_value = attributes.PASSIVE_NO_RESULT b = Blog("blog 1") b1_state = attributes.instance_state(b) p = Post("post 4") p.blog = b eq_( lazy_posts.mock_calls, [ call(b1_state, attributes.PASSIVE_NO_FETCH) ] ) p = Post("post 5") # setting blog doesnt call 'posts' callable, calls with no fetch p.blog = b eq_( lazy_posts.mock_calls, [ call(b1_state, attributes.PASSIVE_NO_FETCH), call(b1_state, attributes.PASSIVE_NO_FETCH) ] ) lazy_posts.return_value = [p1, p2, p3] # calling backref calls the callable, populates extra posts eq_(b.posts, [p1, p2, p3, Post("post 4"), Post("post 5")]) eq_( lazy_posts.mock_calls, [ call(b1_state, attributes.PASSIVE_NO_FETCH), call(b1_state, attributes.PASSIVE_NO_FETCH), call(b1_state, attributes.PASSIVE_OFF) ] ) def test_lazy_history(self): Post, Blog, lazy_posts = self._fixture() p1, p2, p3 = Post("post 1"), Post("post 2"), Post("post 3") lazy_posts.return_value = [p1, p2, p3] b = Blog("blog 1") p = Post("post 4") p.blog = b p4 = Post("post 5") p4.blog = b eq_(lazy_posts.call_count, 1) eq_(attributes.instance_state(b). get_history('posts', attributes.PASSIVE_OFF), ([p, p4], [p1, p2, p3], [])) eq_(lazy_posts.call_count, 1) def test_passive_history_collection_never_set(self): Post, Blog, lazy_posts = self._fixture() lazy_posts.return_value = attributes.PASSIVE_NO_RESULT b = Blog("blog 1") p = Post("post 1") state, dict_ = attributes.instance_state(b), attributes.instance_dict(b) # this sets up NEVER_SET on b.posts p.blog = b eq_(state.committed_state, {"posts": attributes.NEVER_SET}) assert 'posts' not in dict_ # then suppose the object was made transient again, # the lazy loader would return this lazy_posts.return_value = attributes.ATTR_EMPTY p2 = Post('asdf') p2.blog = b eq_(state.committed_state, {"posts": attributes.NEVER_SET}) eq_(dict_['posts'], [p2]) # then this would fail. eq_( Blog.posts.impl.get_history(state, dict_, passive=True), ([p2], (), ()) ) eq_( Blog.posts.impl.get_all_pending(state, dict_), [(attributes.instance_state(p2), p2)] ) def test_state_on_add_remove(self): Post, Blog, lazy_posts = self._fixture() lazy_posts.return_value = attributes.PASSIVE_NO_RESULT b = Blog("blog 1") b1_state = attributes.instance_state(b) p = Post("post 1") p.blog = b eq_(lazy_posts.mock_calls, [call(b1_state, attributes.PASSIVE_NO_FETCH)]) p.blog = None eq_(lazy_posts.mock_calls, [call(b1_state, attributes.PASSIVE_NO_FETCH), call(b1_state, attributes.PASSIVE_NO_FETCH)]) lazy_posts.return_value = [] eq_(b.posts, []) eq_(lazy_posts.mock_calls, [call(b1_state, attributes.PASSIVE_NO_FETCH), call(b1_state, attributes.PASSIVE_NO_FETCH), call(b1_state, attributes.PASSIVE_OFF)]) def test_pending_combines_with_lazy(self): Post, Blog, lazy_posts = self._fixture() lazy_posts.return_value = attributes.PASSIVE_NO_RESULT b = Blog("blog 1") p = Post("post 1") p2 = Post("post 2") p.blog = b eq_(lazy_posts.call_count, 1) lazy_posts.return_value = [p, p2] # lazy loaded + pending get added together. # This isn't seen often with the ORM due # to usual practices surrounding the # load/flush/load cycle. eq_(b.posts, [p, p2, p]) eq_(lazy_posts.call_count, 2) def test_normal_load(self): Post, Blog, lazy_posts = self._fixture() lazy_posts.return_value = \ (p1, p2, p3) = [Post("post 1"), Post("post 2"), Post("post 3")] b = Blog("blog 1") # assign without using backref system p2.__dict__['blog'] = b eq_(b.posts, [Post("post 1"), Post("post 2"), Post("post 3")]) eq_(lazy_posts.call_count, 1) p2.blog = None p4 = Post("post 4") p4.blog = b eq_(b.posts, [Post("post 1"), Post("post 3"), Post("post 4")]) b_state = attributes.instance_state(b) eq_(lazy_posts.call_count, 1) eq_(lazy_posts.mock_calls, [call(b_state, attributes.PASSIVE_OFF)]) def test_commit_removes_pending(self): Post, Blog, lazy_posts = self._fixture() p1 = Post("post 1") lazy_posts.return_value = attributes.PASSIVE_NO_RESULT b = Blog("blog 1") p1.blog = b b_state = attributes.instance_state(b) p1_state = attributes.instance_state(p1) b_state._commit_all(attributes.instance_dict(b)) p1_state._commit_all(attributes.instance_dict(p1)) lazy_posts.return_value = [p1] eq_(b.posts, [Post("post 1")]) eq_(lazy_posts.mock_calls, [call(b_state, attributes.PASSIVE_NO_FETCH), call(b_state, attributes.PASSIVE_OFF)]) class HistoryTest(fixtures.TestBase): def _fixture(self, uselist, useobject, active_history, **kw): class Foo(fixtures.BasicEntity): pass instrumentation.register_class(Foo) attributes.register_attribute( Foo, 'someattr', uselist=uselist, useobject=useobject, active_history=active_history, **kw) return Foo def _two_obj_fixture(self, uselist): class Foo(fixtures.BasicEntity): pass class Bar(fixtures.BasicEntity): def __nonzero__(self): assert False instrumentation.register_class(Foo) instrumentation.register_class(Bar) attributes.register_attribute(Foo, 'someattr', uselist=uselist, useobject=True) return Foo, Bar def _someattr_history(self, f, **kw): return attributes.get_state_history( attributes.instance_state(f), 'someattr', **kw) def _commit_someattr(self, f): attributes.instance_state(f)._commit(attributes.instance_dict(f), ['someattr']) def _someattr_committed_state(self, f): Foo = f.__class__ return Foo.someattr.impl.get_committed_value( attributes.instance_state(f), attributes.instance_dict(f)) def test_committed_value_init(self): Foo = self._fixture(uselist=False, useobject=False, active_history=False) f = Foo() eq_(self._someattr_committed_state(f), None) def test_committed_value_set(self): Foo = self._fixture(uselist=False, useobject=False, active_history=False) f = Foo() f.someattr = 3 eq_(self._someattr_committed_state(f), None) def test_committed_value_set_commit(self): Foo = self._fixture(uselist=False, useobject=False, active_history=False) f = Foo() f.someattr = 3 self._commit_someattr(f) eq_(self._someattr_committed_state(f), 3) def test_scalar_init(self): Foo = self._fixture(uselist=False, useobject=False, active_history=False) f = Foo() eq_(self._someattr_history(f), ((), (), ())) def test_object_init(self): Foo = self._fixture(uselist=False, useobject=True, active_history=False) f = Foo() eq_(self._someattr_history(f), ((), (), ())) def test_object_init_active_history(self): Foo = self._fixture(uselist=False, useobject=True, active_history=True) f = Foo() eq_(self._someattr_history(f), ((), (), ())) def test_scalar_no_init_side_effect(self): Foo = self._fixture(uselist=False, useobject=False, active_history=False) f = Foo() self._someattr_history(f) # no side effects assert 'someattr' not in f.__dict__ assert 'someattr' not in attributes.instance_state(f).committed_state def test_scalar_set(self): Foo = self._fixture(uselist=False, useobject=False, active_history=False) f = Foo() f.someattr = 'hi' eq_(self._someattr_history(f), (['hi'], (), ())) def test_scalar_set_commit(self): Foo = self._fixture(uselist=False, useobject=False, active_history=False) f = Foo() f.someattr = 'hi' self._commit_someattr(f) eq_(self._someattr_history(f), ((), ['hi'], ())) def test_scalar_set_commit_reset(self): Foo = self._fixture(uselist=False, useobject=False, active_history=False) f = Foo() f.someattr = 'hi' self._commit_someattr(f) f.someattr = 'there' eq_(self._someattr_history(f), (['there'], (), ['hi'])) def test_scalar_set_commit_reset_commit(self): Foo = self._fixture(uselist=False, useobject=False, active_history=False) f = Foo() f.someattr = 'hi' self._commit_someattr(f) f.someattr = 'there' self._commit_someattr(f) eq_(self._someattr_history(f), ((), ['there'], ())) def test_scalar_set_commit_reset_commit_del(self): Foo = self._fixture(uselist=False, useobject=False, active_history=False) f = Foo() f.someattr = 'there' self._commit_someattr(f) del f.someattr eq_(self._someattr_history(f), ((), (), ['there'])) def test_scalar_set_dict(self): Foo = self._fixture(uselist=False, useobject=False, active_history=False) f = Foo() f.__dict__['someattr'] = 'new' eq_(self._someattr_history(f), ((), ['new'], ())) def test_scalar_set_dict_set(self): Foo = self._fixture(uselist=False, useobject=False, active_history=False) f = Foo() f.__dict__['someattr'] = 'new' self._someattr_history(f) f.someattr = 'old' eq_(self._someattr_history(f), (['old'], (), ['new'])) def test_scalar_set_dict_set_commit(self): Foo = self._fixture(uselist=False, useobject=False, active_history=False) f = Foo() f.__dict__['someattr'] = 'new' self._someattr_history(f) f.someattr = 'old' self._commit_someattr(f) eq_(self._someattr_history(f), ((), ['old'], ())) def test_scalar_set_None(self): Foo = self._fixture(uselist=False, useobject=False, active_history=False) f = Foo() f.someattr = None eq_(self._someattr_history(f), ([None], (), ())) def test_scalar_set_None_from_dict_set(self): Foo = self._fixture(uselist=False, useobject=False, active_history=False) f = Foo() f.__dict__['someattr'] = 'new' f.someattr = None eq_(self._someattr_history(f), ([None], (), ['new'])) def test_scalar_set_twice_no_commit(self): Foo = self._fixture(uselist=False, useobject=False, active_history=False) f = Foo() f.someattr = 'one' eq_(self._someattr_history(f), (['one'], (), ())) f.someattr = 'two' eq_(self._someattr_history(f), (['two'], (), ())) def test_scalar_active_init(self): Foo = self._fixture(uselist=False, useobject=False, active_history=True) f = Foo() eq_(self._someattr_history(f), ((), (), ())) def test_scalar_active_no_init_side_effect(self): Foo = self._fixture(uselist=False, useobject=False, active_history=True) f = Foo() self._someattr_history(f) # no side effects assert 'someattr' not in f.__dict__ assert 'someattr' not in attributes.instance_state(f).committed_state def test_collection_never_set(self): Foo = self._fixture(uselist=True, useobject=True, active_history=True) f = Foo() eq_(self._someattr_history(f, passive=True), ((), (), ())) def test_scalar_obj_never_set(self): Foo = self._fixture(uselist=False, useobject=True, active_history=True) f = Foo() eq_(self._someattr_history(f, passive=True), ((), (), ())) def test_scalar_never_set(self): Foo = self._fixture(uselist=False, useobject=False, active_history=True) f = Foo() eq_(self._someattr_history(f, passive=True), ((), (), ())) def test_scalar_active_set(self): Foo = self._fixture(uselist=False, useobject=False, active_history=True) f = Foo() f.someattr = 'hi' eq_(self._someattr_history(f), (['hi'], (), ())) def test_scalar_active_set_commit(self): Foo = self._fixture(uselist=False, useobject=False, active_history=True) f = Foo() f.someattr = 'hi' self._commit_someattr(f) eq_(self._someattr_history(f), ((), ['hi'], ())) def test_scalar_active_set_commit_reset(self): Foo = self._fixture(uselist=False, useobject=False, active_history=True) f = Foo() f.someattr = 'hi' self._commit_someattr(f) f.someattr = 'there' eq_(self._someattr_history(f), (['there'], (), ['hi'])) def test_scalar_active_set_commit_reset_commit(self): Foo = self._fixture(uselist=False, useobject=False, active_history=True) f = Foo() f.someattr = 'hi' self._commit_someattr(f) f.someattr = 'there' self._commit_someattr(f) eq_(self._someattr_history(f), ((), ['there'], ())) def test_scalar_active_set_commit_reset_commit_del(self): Foo = self._fixture(uselist=False, useobject=False, active_history=True) f = Foo() f.someattr = 'there' self._commit_someattr(f) del f.someattr eq_(self._someattr_history(f), ((), (), ['there'])) def test_scalar_active_set_dict(self): Foo = self._fixture(uselist=False, useobject=False, active_history=True) f = Foo() f.__dict__['someattr'] = 'new' eq_(self._someattr_history(f), ((), ['new'], ())) def test_scalar_active_set_dict_set(self): Foo = self._fixture(uselist=False, useobject=False, active_history=True) f = Foo() f.__dict__['someattr'] = 'new' self._someattr_history(f) f.someattr = 'old' eq_(self._someattr_history(f), (['old'], (), ['new'])) def test_scalar_active_set_dict_set_commit(self): Foo = self._fixture(uselist=False, useobject=False, active_history=True) f = Foo() f.__dict__['someattr'] = 'new' self._someattr_history(f) f.someattr = 'old' self._commit_someattr(f) eq_(self._someattr_history(f), ((), ['old'], ())) def test_scalar_active_set_None(self): Foo = self._fixture(uselist=False, useobject=False, active_history=True) f = Foo() f.someattr = None eq_(self._someattr_history(f), ([None], (), ())) def test_scalar_active_set_None_from_dict_set(self): Foo = self._fixture(uselist=False, useobject=False, active_history=True) f = Foo() f.__dict__['someattr'] = 'new' f.someattr = None eq_(self._someattr_history(f), ([None], (), ['new'])) def test_scalar_active_set_twice_no_commit(self): Foo = self._fixture(uselist=False, useobject=False, active_history=True) f = Foo() f.someattr = 'one' eq_(self._someattr_history(f), (['one'], (), ())) f.someattr = 'two' eq_(self._someattr_history(f), (['two'], (), ())) def test_scalar_inplace_mutation_set(self): Foo = self._fixture(uselist=False, useobject=False, active_history=False) f = Foo() f.someattr = {'a': 'b'} eq_(self._someattr_history(f), ([{'a': 'b'}], (), ())) def test_scalar_inplace_mutation_set_commit(self): Foo = self._fixture(uselist=False, useobject=False, active_history=False) f = Foo() f.someattr = {'a': 'b'} self._commit_someattr(f) eq_(self._someattr_history(f), ((), [{'a': 'b'}], ())) def test_scalar_inplace_mutation_set_commit_set(self): Foo = self._fixture(uselist=False, useobject=False, active_history=False) f = Foo() f.someattr = {'a': 'b'} self._commit_someattr(f) f.someattr['a'] = 'c' eq_(self._someattr_history(f), ((), [{'a': 'c'}], ())) def test_scalar_inplace_mutation_set_commit_flag_modified(self): Foo = self._fixture(uselist=False, useobject=False, active_history=False) f = Foo() f.someattr = {'a': 'b'} self._commit_someattr(f) attributes.flag_modified(f, 'someattr') eq_(self._someattr_history(f), ([{'a': 'b'}], (), ())) def test_scalar_inplace_mutation_set_commit_set_flag_modified(self): Foo = self._fixture(uselist=False, useobject=False, active_history=False) f = Foo() f.someattr = {'a': 'b'} self._commit_someattr(f) f.someattr['a'] = 'c' attributes.flag_modified(f, 'someattr') eq_(self._someattr_history(f), ([{'a': 'c'}], (), ())) def test_scalar_inplace_mutation_set_commit_flag_modified_set(self): Foo = self._fixture(uselist=False, useobject=False, active_history=False) f = Foo() f.someattr = {'a': 'b'} self._commit_someattr(f) attributes.flag_modified(f, 'someattr') eq_(self._someattr_history(f), ([{'a': 'b'}], (), ())) f.someattr = ['a'] eq_(self._someattr_history(f), ([['a']], (), ())) def test_use_object_init(self): Foo, Bar = self._two_obj_fixture(uselist=False) f = Foo() eq_(self._someattr_history(f), ((), (), ())) def test_use_object_no_init_side_effect(self): Foo, Bar = self._two_obj_fixture(uselist=False) f = Foo() self._someattr_history(f) assert 'someattr' not in f.__dict__ assert 'someattr' not in attributes.instance_state(f).committed_state def test_use_object_set(self): Foo, Bar = self._two_obj_fixture(uselist=False) f = Foo() hi = Bar(name='hi') f.someattr = hi eq_(self._someattr_history(f), ([hi], (), ())) def test_use_object_set_commit(self): Foo, Bar = self._two_obj_fixture(uselist=False) f = Foo() hi = Bar(name='hi') f.someattr = hi self._commit_someattr(f) eq_(self._someattr_history(f), ((), [hi], ())) def test_use_object_set_commit_set(self): Foo, Bar = self._two_obj_fixture(uselist=False) f = Foo() hi = Bar(name='hi') f.someattr = hi self._commit_someattr(f) there = Bar(name='there') f.someattr = there eq_(self._someattr_history(f), ([there], (), [hi])) def test_use_object_set_commit_set_commit(self): Foo, Bar = self._two_obj_fixture(uselist=False) f = Foo() hi = Bar(name='hi') f.someattr = hi self._commit_someattr(f) there = Bar(name='there') f.someattr = there self._commit_someattr(f) eq_(self._someattr_history(f), ((), [there], ())) def test_use_object_set_commit_del(self): Foo, Bar = self._two_obj_fixture(uselist=False) f = Foo() hi = Bar(name='hi') f.someattr = hi self._commit_someattr(f) del f.someattr eq_(self._someattr_history(f), ((), (), [hi])) def test_use_object_set_dict(self): Foo, Bar = self._two_obj_fixture(uselist=False) f = Foo() hi = Bar(name='hi') f.__dict__['someattr'] = hi eq_(self._someattr_history(f), ((), [hi], ())) def test_use_object_set_dict_set(self): Foo, Bar = self._two_obj_fixture(uselist=False) f = Foo() hi = Bar(name='hi') f.__dict__['someattr'] = hi there = Bar(name='there') f.someattr = there eq_(self._someattr_history(f), ([there], (), [hi])) def test_use_object_set_dict_set_commit(self): Foo, Bar = self._two_obj_fixture(uselist=False) f = Foo() hi = Bar(name='hi') f.__dict__['someattr'] = hi there = Bar(name='there') f.someattr = there self._commit_someattr(f) eq_(self._someattr_history(f), ((), [there], ())) def test_use_object_set_None(self): Foo, Bar = self._two_obj_fixture(uselist=False) f = Foo() f.someattr = None eq_(self._someattr_history(f), ((), [None], ())) def test_use_object_set_dict_set_None(self): Foo, Bar = self._two_obj_fixture(uselist=False) f = Foo() hi =Bar(name='hi') f.__dict__['someattr'] = hi f.someattr = None eq_(self._someattr_history(f), ([None], (), [hi])) def test_use_object_set_value_twice(self): Foo, Bar = self._two_obj_fixture(uselist=False) f = Foo() hi = Bar(name='hi') there = Bar(name='there') f.someattr = hi f.someattr = there eq_(self._someattr_history(f), ([there], (), ())) def test_object_collections_set(self): # TODO: break into individual tests Foo, Bar = self._two_obj_fixture(uselist=True) hi = Bar(name='hi') there = Bar(name='there') old = Bar(name='old') new = Bar(name='new') # case 1. new object f = Foo() eq_(attributes.get_state_history(attributes.instance_state(f), 'someattr'), ((), [], ())) f.someattr = [hi] eq_(attributes.get_state_history(attributes.instance_state(f), 'someattr'), ([hi], [], [])) self._commit_someattr(f) eq_(attributes.get_state_history(attributes.instance_state(f), 'someattr'), ((), [hi], ())) f.someattr = [there] eq_(attributes.get_state_history(attributes.instance_state(f), 'someattr'), ([there], [], [hi])) self._commit_someattr(f) eq_(attributes.get_state_history(attributes.instance_state(f), 'someattr'), ((), [there], ())) f.someattr = [hi] eq_(attributes.get_state_history(attributes.instance_state(f), 'someattr'), ([hi], [], [there])) f.someattr = [old, new] eq_(attributes.get_state_history(attributes.instance_state(f), 'someattr'), ([old, new], [], [there])) # case 2. object with direct settings (similar to a load # operation) f = Foo() collection = attributes.init_collection(f, 'someattr') collection.append_without_event(new) attributes.instance_state(f)._commit_all(attributes.instance_dict(f)) eq_(attributes.get_state_history(attributes.instance_state(f), 'someattr'), ((), [new], ())) f.someattr = [old] eq_(attributes.get_state_history(attributes.instance_state(f), 'someattr'), ([old], [], [new])) self._commit_someattr(f) eq_(attributes.get_state_history(attributes.instance_state(f), 'someattr'), ((), [old], ())) def test_dict_collections(self): # TODO: break into individual tests class Foo(fixtures.BasicEntity): pass class Bar(fixtures.BasicEntity): pass from sqlalchemy.orm.collections import attribute_mapped_collection instrumentation.register_class(Foo) instrumentation.register_class(Bar) attributes.register_attribute(Foo, 'someattr', uselist=True, useobject=True, typecallable=attribute_mapped_collection('name')) hi = Bar(name='hi') there = Bar(name='there') old = Bar(name='old') new = Bar(name='new') f = Foo() eq_(attributes.get_state_history(attributes.instance_state(f), 'someattr'), ((), [], ())) f.someattr['hi'] = hi eq_(attributes.get_state_history(attributes.instance_state(f), 'someattr'), ([hi], [], [])) f.someattr['there'] = there eq_(tuple([set(x) for x in attributes.get_state_history(attributes.instance_state(f), 'someattr')]), (set([hi, there]), set(), set())) self._commit_someattr(f) eq_(tuple([set(x) for x in attributes.get_state_history(attributes.instance_state(f), 'someattr')]), (set(), set([hi, there]), set())) def test_object_collections_mutate(self): # TODO: break into individual tests class Foo(fixtures.BasicEntity): pass class Bar(fixtures.BasicEntity): pass instrumentation.register_class(Foo) attributes.register_attribute(Foo, 'someattr', uselist=True, useobject=True) attributes.register_attribute(Foo, 'id', uselist=False, useobject=False) instrumentation.register_class(Bar) hi = Bar(name='hi') there = Bar(name='there') old = Bar(name='old') new = Bar(name='new') # case 1. new object f = Foo(id=1) eq_(attributes.get_state_history(attributes.instance_state(f), 'someattr'), ((), [], ())) f.someattr.append(hi) eq_(attributes.get_state_history(attributes.instance_state(f), 'someattr'), ([hi], [], [])) self._commit_someattr(f) eq_(attributes.get_state_history(attributes.instance_state(f), 'someattr'), ((), [hi], ())) f.someattr.append(there) eq_(attributes.get_state_history(attributes.instance_state(f), 'someattr'), ([there], [hi], [])) self._commit_someattr(f) eq_(attributes.get_state_history(attributes.instance_state(f), 'someattr'), ((), [hi, there], ())) f.someattr.remove(there) eq_(attributes.get_state_history(attributes.instance_state(f), 'someattr'), ([], [hi], [there])) f.someattr.append(old) f.someattr.append(new) eq_(attributes.get_state_history(attributes.instance_state(f), 'someattr'), ([old, new], [hi], [there])) attributes.instance_state(f)._commit(attributes.instance_dict(f), ['someattr']) eq_(attributes.get_state_history(attributes.instance_state(f), 'someattr'), ((), [hi, old, new], ())) f.someattr.pop(0) eq_(attributes.get_state_history(attributes.instance_state(f), 'someattr'), ([], [old, new], [hi])) # case 2. object with direct settings (similar to a load # operation) f = Foo() f.__dict__['id'] = 1 collection = attributes.init_collection(f, 'someattr') collection.append_without_event(new) attributes.instance_state(f)._commit_all(attributes.instance_dict(f)) eq_(attributes.get_state_history(attributes.instance_state(f), 'someattr'), ((), [new], ())) f.someattr.append(old) eq_(attributes.get_state_history(attributes.instance_state(f), 'someattr'), ([old], [new], [])) attributes.instance_state(f)._commit(attributes.instance_dict(f), ['someattr']) eq_(attributes.get_state_history(attributes.instance_state(f), 'someattr'), ((), [new, old], ())) f = Foo() collection = attributes.init_collection(f, 'someattr') collection.append_without_event(new) attributes.instance_state(f)._commit_all(attributes.instance_dict(f)) eq_(attributes.get_state_history(attributes.instance_state(f), 'someattr'), ((), [new], ())) f.id = 1 f.someattr.remove(new) eq_(attributes.get_state_history(attributes.instance_state(f), 'someattr'), ([], [], [new])) # case 3. mixing appends with sets f = Foo() f.someattr.append(hi) eq_(attributes.get_state_history(attributes.instance_state(f), 'someattr'), ([hi], [], [])) f.someattr.append(there) eq_(attributes.get_state_history(attributes.instance_state(f), 'someattr'), ([hi, there], [], [])) f.someattr = [there] eq_(attributes.get_state_history(attributes.instance_state(f), 'someattr'), ([there], [], [])) # case 4. ensure duplicates show up, order is maintained f = Foo() f.someattr.append(hi) f.someattr.append(there) f.someattr.append(hi) eq_(attributes.get_state_history(attributes.instance_state(f), 'someattr'), ([hi, there, hi], [], [])) attributes.instance_state(f)._commit_all(attributes.instance_dict(f)) eq_(attributes.get_state_history(attributes.instance_state(f), 'someattr'), ((), [hi, there, hi], ())) f.someattr = [] eq_(attributes.get_state_history(attributes.instance_state(f), 'someattr'), ([], [], [hi, there, hi])) def test_collections_via_backref(self): # TODO: break into individual tests class Foo(fixtures.BasicEntity): pass class Bar(fixtures.BasicEntity): pass instrumentation.register_class(Foo) instrumentation.register_class(Bar) attributes.register_attribute(Foo, 'bars', uselist=True, backref='foo', trackparent=True, useobject=True) attributes.register_attribute(Bar, 'foo', uselist=False, backref='bars', trackparent=True, useobject=True) f1 = Foo() b1 = Bar() eq_(attributes.get_state_history(attributes.instance_state(f1), 'bars'), ((), [], ())) eq_(attributes.get_state_history(attributes.instance_state(b1), 'foo'), ((), (), ())) # b1.foo = f1 f1.bars.append(b1) eq_(attributes.get_state_history(attributes.instance_state(f1), 'bars'), ([b1], [], [])) eq_(attributes.get_state_history(attributes.instance_state(b1), 'foo'), ([f1], (), ())) b2 = Bar() f1.bars.append(b2) eq_(attributes.get_state_history(attributes.instance_state(f1), 'bars'), ([b1, b2], [], [])) eq_(attributes.get_state_history(attributes.instance_state(b1), 'foo'), ([f1], (), ())) eq_(attributes.get_state_history(attributes.instance_state(b2), 'foo'), ([f1], (), ())) def test_deprecated_flags(self): assert_raises_message( sa_exc.SADeprecationWarning, "Passing True for 'passive' is deprecated. " "Use attributes.PASSIVE_NO_INITIALIZE", attributes.get_history, object(), 'foo', True ) assert_raises_message( sa_exc.SADeprecationWarning, "Passing False for 'passive' is deprecated. " "Use attributes.PASSIVE_OFF", attributes.get_history, object(), 'foo', False ) class LazyloadHistoryTest(fixtures.TestBase): def test_lazy_backref_collections(self): # TODO: break into individual tests class Foo(fixtures.BasicEntity): pass class Bar(fixtures.BasicEntity): pass lazy_load = [] def lazyload(state, passive): return lazy_load instrumentation.register_class(Foo) instrumentation.register_class(Bar) attributes.register_attribute(Foo, 'bars', uselist=True, backref='foo', trackparent=True, callable_=lazyload, useobject=True) attributes.register_attribute(Bar, 'foo', uselist=False, backref='bars', trackparent=True, useobject=True) bar1, bar2, bar3, bar4 = [Bar(id=1), Bar(id=2), Bar(id=3), Bar(id=4)] lazy_load = [bar1, bar2, bar3] f = Foo() bar4 = Bar() bar4.foo = f eq_(attributes.get_state_history(attributes.instance_state(f), 'bars'), ([bar4], [bar1, bar2, bar3], [])) lazy_load = None f = Foo() bar4 = Bar() bar4.foo = f eq_(attributes.get_state_history(attributes.instance_state(f), 'bars'), ([bar4], [], [])) lazy_load = [bar1, bar2, bar3] attributes.instance_state(f)._expire_attributes(attributes.instance_dict(f), ['bars']) eq_(attributes.get_state_history(attributes.instance_state(f), 'bars'), ((), [bar1, bar2, bar3], ())) def test_collections_via_lazyload(self): # TODO: break into individual tests class Foo(fixtures.BasicEntity): pass class Bar(fixtures.BasicEntity): pass lazy_load = [] def lazyload(state, passive): return lazy_load instrumentation.register_class(Foo) instrumentation.register_class(Bar) attributes.register_attribute(Foo, 'bars', uselist=True, callable_=lazyload, trackparent=True, useobject=True) bar1, bar2, bar3, bar4 = [Bar(id=1), Bar(id=2), Bar(id=3), Bar(id=4)] lazy_load = [bar1, bar2, bar3] f = Foo() f.bars = [] eq_(attributes.get_state_history(attributes.instance_state(f), 'bars'), ([], [], [bar1, bar2, bar3])) f = Foo() f.bars.append(bar4) eq_(attributes.get_state_history(attributes.instance_state(f), 'bars'), ([bar4], [bar1, bar2, bar3], [])) f = Foo() f.bars.remove(bar2) eq_(attributes.get_state_history(attributes.instance_state(f), 'bars'), ([], [bar1, bar3], [bar2])) f.bars.append(bar4) eq_(attributes.get_state_history(attributes.instance_state(f), 'bars'), ([bar4], [bar1, bar3], [bar2])) f = Foo() del f.bars[1] eq_(attributes.get_state_history(attributes.instance_state(f), 'bars'), ([], [bar1, bar3], [bar2])) lazy_load = None f = Foo() f.bars.append(bar2) eq_(attributes.get_state_history(attributes.instance_state(f), 'bars'), ([bar2], [], [])) def test_scalar_via_lazyload(self): # TODO: break into individual tests class Foo(fixtures.BasicEntity): pass lazy_load = None def lazyload(state, passive): return lazy_load instrumentation.register_class(Foo) attributes.register_attribute(Foo, 'bar', uselist=False, callable_=lazyload, useobject=False) lazy_load = 'hi' # with scalar non-object and active_history=False, the lazy # callable is only executed on gets, not history operations f = Foo() eq_(f.bar, 'hi') eq_(attributes.get_state_history(attributes.instance_state(f), 'bar'), ((), ['hi'], ())) f = Foo() f.bar = None eq_(attributes.get_state_history(attributes.instance_state(f), 'bar'), ([None], (), ())) f = Foo() f.bar = 'there' eq_(attributes.get_state_history(attributes.instance_state(f), 'bar'), (['there'], (), ())) f.bar = 'hi' eq_(attributes.get_state_history(attributes.instance_state(f), 'bar'), (['hi'], (), ())) f = Foo() eq_(f.bar, 'hi') del f.bar eq_(attributes.get_state_history(attributes.instance_state(f), 'bar'), ((), (), ['hi'])) assert f.bar is None eq_(attributes.get_state_history(attributes.instance_state(f), 'bar'), ([None], (), ['hi'])) def test_scalar_via_lazyload_with_active(self): # TODO: break into individual tests class Foo(fixtures.BasicEntity): pass lazy_load = None def lazyload(state, passive): return lazy_load instrumentation.register_class(Foo) attributes.register_attribute(Foo, 'bar', uselist=False, callable_=lazyload, useobject=False, active_history=True) lazy_load = 'hi' # active_history=True means the lazy callable is executed on set # as well as get, causing the old value to appear in the history f = Foo() eq_(f.bar, 'hi') eq_(attributes.get_state_history(attributes.instance_state(f), 'bar'), ((), ['hi'], ())) f = Foo() f.bar = None eq_(attributes.get_state_history(attributes.instance_state(f), 'bar'), ([None], (), ['hi'])) f = Foo() f.bar = 'there' eq_(attributes.get_state_history(attributes.instance_state(f), 'bar'), (['there'], (), ['hi'])) f.bar = 'hi' eq_(attributes.get_state_history(attributes.instance_state(f), 'bar'), ((), ['hi'], ())) f = Foo() eq_(f.bar, 'hi') del f.bar eq_(attributes.get_state_history(attributes.instance_state(f), 'bar'), ((), (), ['hi'])) assert f.bar is None eq_(attributes.get_state_history(attributes.instance_state(f), 'bar'), ([None], (), ['hi'])) def test_scalar_object_via_lazyload(self): # TODO: break into individual tests class Foo(fixtures.BasicEntity): pass class Bar(fixtures.BasicEntity): pass lazy_load = None def lazyload(state, passive): return lazy_load instrumentation.register_class(Foo) instrumentation.register_class(Bar) attributes.register_attribute(Foo, 'bar', uselist=False, callable_=lazyload, trackparent=True, useobject=True) bar1, bar2 = [Bar(id=1), Bar(id=2)] lazy_load = bar1 # with scalar object, the lazy callable is only executed on gets # and history operations f = Foo() eq_(attributes.get_state_history(attributes.instance_state(f), 'bar'), ((), [bar1], ())) f = Foo() f.bar = None eq_(attributes.get_state_history(attributes.instance_state(f), 'bar'), ([None], (), [bar1])) f = Foo() f.bar = bar2 eq_(attributes.get_state_history(attributes.instance_state(f), 'bar'), ([bar2], (), [bar1])) f.bar = bar1 eq_(attributes.get_state_history(attributes.instance_state(f), 'bar'), ((), [bar1], ())) f = Foo() eq_(f.bar, bar1) del f.bar eq_(attributes.get_state_history(attributes.instance_state(f), 'bar'), ((), (), [bar1])) assert f.bar is None eq_(attributes.get_state_history(attributes.instance_state(f), 'bar'), ([None], (), [bar1])) class ListenerTest(fixtures.ORMTest): def test_receive_changes(self): """test that Listeners can mutate the given value.""" class Foo(object): pass class Bar(object): pass def append(state, child, initiator): b2 = Bar() b2.data = b1.data + " appended" return b2 def on_set(state, value, oldvalue, initiator): return value + " modified" instrumentation.register_class(Foo) instrumentation.register_class(Bar) attributes.register_attribute(Foo, 'data', uselist=False, useobject=False) attributes.register_attribute(Foo, 'barlist', uselist=True, useobject=True) attributes.register_attribute(Foo, 'barset', typecallable=set, uselist=True, useobject=True) attributes.register_attribute(Bar, 'data', uselist=False, useobject=False) event.listen(Foo.data, 'set', on_set, retval=True) event.listen(Foo.barlist, 'append', append, retval=True) event.listen(Foo.barset, 'append', append, retval=True) f1 = Foo() f1.data = 'some data' eq_(f1.data, 'some data modified') b1 = Bar() b1.data = 'some bar' f1.barlist.append(b1) assert b1.data == 'some bar' assert f1.barlist[0].data == 'some bar appended' f1.barset.add(b1) assert f1.barset.pop().data == 'some bar appended' def test_none_on_collection_event(self): """test that append/remove of None in collections emits events. This is new behavior as of 0.8. """ class Foo(object): pass class Bar(object): pass instrumentation.register_class(Foo) instrumentation.register_class(Bar) attributes.register_attribute(Foo, 'barlist', uselist=True, useobject=True) canary = [] def append(state, child, initiator): canary.append((state, child)) def remove(state, child, initiator): canary.append((state, child)) event.listen(Foo.barlist, 'append', append) event.listen(Foo.barlist, 'remove', remove) b1, b2 = Bar(), Bar() f1 = Foo() f1.barlist.append(None) eq_(canary, [(f1, None)]) canary[:] = [] f1 = Foo() f1.barlist = [None, b2] eq_(canary, [(f1, None), (f1, b2)]) canary[:] = [] f1 = Foo() f1.barlist = [b1, None, b2] eq_(canary, [(f1, b1), (f1, None), (f1, b2)]) f1.barlist.remove(None) eq_(canary, [(f1, b1), (f1, None), (f1, b2), (f1, None)]) def test_propagate(self): classes = [None, None, None] canary = [] def make_a(): class A(object): pass classes[0] = A def make_b(): class B(classes[0]): pass classes[1] = B def make_c(): class C(classes[1]): pass classes[2] = C def instrument_a(): instrumentation.register_class(classes[0]) def instrument_b(): instrumentation.register_class(classes[1]) def instrument_c(): instrumentation.register_class(classes[2]) def attr_a(): attributes.register_attribute(classes[0], 'attrib', uselist=False, useobject=False) def attr_b(): attributes.register_attribute(classes[1], 'attrib', uselist=False, useobject=False) def attr_c(): attributes.register_attribute(classes[2], 'attrib', uselist=False, useobject=False) def set(state, value, oldvalue, initiator): canary.append(value) def events_a(): event.listen(classes[0].attrib, 'set', set, propagate=True) def teardown(): classes[:] = [None, None, None] canary[:] = [] ordering = [ (instrument_a, instrument_b), (instrument_b, instrument_c), (attr_a, attr_b), (attr_b, attr_c), (make_a, instrument_a), (instrument_a, attr_a), (attr_a, events_a), (make_b, instrument_b), (instrument_b, attr_b), (make_c, instrument_c), (instrument_c, attr_c), (make_a, make_b), (make_b, make_c) ] elements = [make_a, make_b, make_c, instrument_a, instrument_b, instrument_c, attr_a, attr_b, attr_c, events_a] for i, series in enumerate(all_partial_orderings(ordering, elements)): for fn in series: fn() b = classes[1]() b.attrib = "foo" eq_(b.attrib, "foo") eq_(canary, ["foo"]) c = classes[2]() c.attrib = "bar" eq_(c.attrib, "bar") eq_(canary, ["foo", "bar"]) teardown() class TestUnlink(fixtures.TestBase): def setUp(self): class A(object): pass class B(object): pass self.A = A self.B = B instrumentation.register_class(A) instrumentation.register_class(B) attributes.register_attribute(A, 'bs', uselist=True, useobject=True) def test_expired(self): A, B = self.A, self.B a1 = A() coll = a1.bs a1.bs.append(B()) state = attributes.instance_state(a1) state._expire(state.dict, set()) assert_raises( Warning, coll.append, B() ) def test_replaced(self): A, B = self.A, self.B a1 = A() coll = a1.bs a1.bs.append(B()) a1.bs = [] # a bulk replace empties the old collection assert len(coll) == 0 coll.append(B()) assert len(coll) == 1 def test_pop_existing(self): A, B = self.A, self.B a1 = A() coll = a1.bs a1.bs.append(B()) state = attributes.instance_state(a1) state._reset(state.dict, "bs") assert_raises( Warning, coll.append, B() ) def test_ad_hoc_lazy(self): A, B = self.A, self.B a1 = A() coll = a1.bs a1.bs.append(B()) state = attributes.instance_state(a1) _set_callable(state, state.dict, "bs", lambda: B()) assert_raises( Warning, coll.append, B() ) SQLAlchemy-0.8.4/test/orm/test_backref_mutations.py0000644000076500000240000005107412251150015023115 0ustar classicstaff00000000000000""" a series of tests which assert the behavior of moving objects between collections and scalar attributes resulting in the expected state w.r.t. backrefs, add/remove events, etc. there's a particular focus on collections that have "uselist=False", since in these cases the re-assignment of an attribute means the previous owner needs an UPDATE in the database. """ from sqlalchemy.testing import assert_raises, assert_raises_message from sqlalchemy import Integer, String, ForeignKey, Sequence, exc as sa_exc from sqlalchemy.testing.schema import Table from sqlalchemy.testing.schema import Column from sqlalchemy.orm import mapper, relationship, create_session, \ class_mapper, backref, sessionmaker, Session from sqlalchemy.orm import attributes, exc as orm_exc from sqlalchemy import testing from sqlalchemy.testing import eq_ from sqlalchemy.testing import fixtures from test.orm import _fixtures class O2MCollectionTest(_fixtures.FixtureTest): run_inserts = None @classmethod def setup_mappers(cls): Address, addresses, users, User = (cls.classes.Address, cls.tables.addresses, cls.tables.users, cls.classes.User) mapper(Address, addresses) mapper(User, users, properties = dict( addresses = relationship(Address, backref="user"), )) def test_collection_move_hitslazy(self): User, Address = self.classes.User, self.classes.Address sess = sessionmaker()() a1 = Address(email_address="address1") a2 = Address(email_address="address2") a3 = Address(email_address="address3") u1= User(name='jack', addresses=[a1, a2, a3]) u2= User(name='ed') sess.add_all([u1, a1, a2, a3]) sess.commit() #u1.addresses def go(): u2.addresses.append(a1) u2.addresses.append(a2) u2.addresses.append(a3) self.assert_sql_count(testing.db, go, 0) def test_collection_move_preloaded(self): User, Address = self.classes.User, self.classes.Address sess = sessionmaker()() a1 = Address(email_address="address1") u1 = User(name='jack', addresses=[a1]) u2 = User(name='ed') sess.add_all([u1, u2]) sess.commit() # everything is expired # load u1.addresses collection u1.addresses u2.addresses.append(a1) # backref fires assert a1.user is u2 # doesn't extend to the previous collection tho, # which was already loaded. # flushing at this point means its anyone's guess. assert a1 in u1.addresses assert a1 in u2.addresses def test_collection_move_notloaded(self): User, Address = self.classes.User, self.classes.Address sess = sessionmaker()() a1 = Address(email_address="address1") u1 = User(name='jack', addresses=[a1]) u2 = User(name='ed') sess.add_all([u1, u2]) sess.commit() # everything is expired u2.addresses.append(a1) # backref fires assert a1.user is u2 # u1.addresses wasn't loaded, # so when it loads its correct assert a1 not in u1.addresses assert a1 in u2.addresses def test_collection_move_commitfirst(self): User, Address = self.classes.User, self.classes.Address sess = sessionmaker()() a1 = Address(email_address="address1") u1 = User(name='jack', addresses=[a1]) u2 = User(name='ed') sess.add_all([u1, u2]) sess.commit() # everything is expired # load u1.addresses collection u1.addresses u2.addresses.append(a1) # backref fires assert a1.user is u2 # everything expires, no changes in # u1.addresses, so all is fine sess.commit() assert a1 not in u1.addresses assert a1 in u2.addresses def test_scalar_move_preloaded(self): User, Address = self.classes.User, self.classes.Address sess = sessionmaker()() u1 = User(name='jack') u2 = User(name='ed') a1 = Address(email_address='a1') a1.user = u1 sess.add_all([u1, u2, a1]) sess.commit() # u1.addresses is loaded u1.addresses # direct set - the "old" is "fetched", # but only from the local session - not the # database, due to the PASSIVE_NO_FETCH flag. # this is a more fine grained behavior introduced # in 0.6 a1.user = u2 assert a1 not in u1.addresses assert a1 in u2.addresses def test_plain_load_passive(self): """test that many-to-one set doesn't load the old value.""" User, Address = self.classes.User, self.classes.Address sess = sessionmaker()() u1 = User(name='jack') u2 = User(name='ed') a1 = Address(email_address='a1') a1.user = u1 sess.add_all([u1, u2, a1]) sess.commit() # in this case, a lazyload would # ordinarily occur except for the # PASSIVE_NO_FETCH flag. def go(): a1.user = u2 self.assert_sql_count(testing.db, go, 0) assert a1 not in u1.addresses assert a1 in u2.addresses def test_set_none(self): User, Address = self.classes.User, self.classes.Address sess = sessionmaker()() u1 = User(name='jack') a1 = Address(email_address='a1') a1.user = u1 sess.add_all([u1, a1]) sess.commit() # works for None too def go(): a1.user = None self.assert_sql_count(testing.db, go, 0) assert a1 not in u1.addresses def test_scalar_move_notloaded(self): User, Address = self.classes.User, self.classes.Address sess = sessionmaker()() u1 = User(name='jack') u2 = User(name='ed') a1 = Address(email_address='a1') a1.user = u1 sess.add_all([u1, u2, a1]) sess.commit() # direct set - the fetching of the # "old" u1 here allows the backref # to remove it from the addresses collection a1.user = u2 assert a1 not in u1.addresses assert a1 in u2.addresses def test_scalar_move_commitfirst(self): User, Address = self.classes.User, self.classes.Address sess = sessionmaker()() u1 = User(name='jack') u2 = User(name='ed') a1 = Address(email_address='a1') a1.user = u1 sess.add_all([u1, u2, a1]) sess.commit() # u1.addresses is loaded u1.addresses # direct set - the fetching of the # "old" u1 here allows the backref # to remove it from the addresses collection a1.user = u2 sess.commit() assert a1 not in u1.addresses assert a1 in u2.addresses class O2OScalarBackrefMoveTest(_fixtures.FixtureTest): run_inserts = None @classmethod def setup_mappers(cls): Address, addresses, users, User = (cls.classes.Address, cls.tables.addresses, cls.tables.users, cls.classes.User) mapper(Address, addresses) mapper(User, users, properties = { 'address':relationship(Address, backref=backref("user"), uselist=False) }) def test_collection_move_preloaded(self): User, Address = self.classes.User, self.classes.Address sess = sessionmaker()() a1 = Address(email_address="address1") u1 = User(name='jack', address=a1) u2 = User(name='ed') sess.add_all([u1, u2]) sess.commit() # everything is expired # load u1.address u1.address # reassign u2.address = a1 assert u2.address is a1 # backref fires assert a1.user is u2 # doesn't extend to the previous attribute tho. # flushing at this point means its anyone's guess. assert u1.address is a1 assert u2.address is a1 def test_scalar_move_preloaded(self): User, Address = self.classes.User, self.classes.Address sess = sessionmaker()() a1 = Address(email_address="address1") a2 = Address(email_address="address1") u1 = User(name='jack', address=a1) sess.add_all([u1, a1, a2]) sess.commit() # everything is expired # load a1.user a1.user # reassign a2.user = u1 # backref fires assert u1.address is a2 # stays on both sides assert a1.user is u1 assert a2.user is u1 def test_collection_move_notloaded(self): User, Address = self.classes.User, self.classes.Address sess = sessionmaker()() a1 = Address(email_address="address1") u1 = User(name='jack', address=a1) u2 = User(name='ed') sess.add_all([u1, u2]) sess.commit() # everything is expired # reassign u2.address = a1 assert u2.address is a1 # backref fires assert a1.user is u2 # u1.address loads now after a flush assert u1.address is None assert u2.address is a1 def test_scalar_move_notloaded(self): User, Address = self.classes.User, self.classes.Address sess = sessionmaker()() a1 = Address(email_address="address1") a2 = Address(email_address="address1") u1 = User(name='jack', address=a1) sess.add_all([u1, a1, a2]) sess.commit() # everything is expired # reassign a2.user = u1 # backref fires assert u1.address is a2 # stays on both sides assert a1.user is u1 assert a2.user is u1 def test_collection_move_commitfirst(self): User, Address = self.classes.User, self.classes.Address sess = sessionmaker()() a1 = Address(email_address="address1") u1 = User(name='jack', address=a1) u2 = User(name='ed') sess.add_all([u1, u2]) sess.commit() # everything is expired # load u1.address u1.address # reassign u2.address = a1 assert u2.address is a1 # backref fires assert a1.user is u2 # the commit cancels out u1.addresses # being loaded, on next access its fine. sess.commit() assert u1.address is None assert u2.address is a1 def test_scalar_move_commitfirst(self): User, Address = self.classes.User, self.classes.Address sess = sessionmaker()() a1 = Address(email_address="address1") a2 = Address(email_address="address2") u1 = User(name='jack', address=a1) sess.add_all([u1, a1, a2]) sess.commit() # everything is expired # load assert a1.user is u1 # reassign a2.user = u1 # backref fires assert u1.address is a2 # didnt work this way tho assert a1.user is u1 # moves appropriately after commit sess.commit() assert u1.address is a2 assert a1.user is None assert a2.user is u1 class O2OScalarMoveTest(_fixtures.FixtureTest): run_inserts = None @classmethod def setup_mappers(cls): Address, addresses, users, User = (cls.classes.Address, cls.tables.addresses, cls.tables.users, cls.classes.User) mapper(Address, addresses) mapper(User, users, properties = { 'address':relationship(Address, uselist=False) }) def test_collection_move_commitfirst(self): User, Address = self.classes.User, self.classes.Address sess = sessionmaker()() a1 = Address(email_address="address1") u1 = User(name='jack', address=a1) u2 = User(name='ed') sess.add_all([u1, u2]) sess.commit() # everything is expired # load u1.address u1.address # reassign u2.address = a1 assert u2.address is a1 # the commit cancels out u1.addresses # being loaded, on next access its fine. sess.commit() assert u1.address is None assert u2.address is a1 class O2OScalarOrphanTest(_fixtures.FixtureTest): run_inserts = None @classmethod def setup_mappers(cls): Address, addresses, users, User = (cls.classes.Address, cls.tables.addresses, cls.tables.users, cls.classes.User) mapper(Address, addresses) mapper(User, users, properties = { 'address':relationship(Address, uselist=False, backref=backref('user', single_parent=True, cascade="all, delete-orphan")) }) def test_m2o_event(self): User, Address = self.classes.User, self.classes.Address sess = sessionmaker()() a1 = Address(email_address="address1") u1 = User(name='jack', address=a1) sess.add(u1) sess.commit() sess.expunge(u1) u2= User(name='ed') # the _SingleParent extension sets the backref get to "active" ! # u1 gets loaded and deleted u2.address = a1 sess.commit() assert sess.query(User).count() == 1 class M2MCollectionMoveTest(_fixtures.FixtureTest): run_inserts = None @classmethod def setup_mappers(cls): keywords, items, item_keywords, Keyword, Item = (cls.tables.keywords, cls.tables.items, cls.tables.item_keywords, cls.classes.Keyword, cls.classes.Item) mapper(Item, items, properties={ 'keywords':relationship(Keyword, secondary=item_keywords, backref='items') }) mapper(Keyword, keywords) def test_add_remove_pending_backref(self): """test that pending doesn't add an item that's not a net add.""" Item, Keyword = (self.classes.Item, self.classes.Keyword) session = Session(autoflush=False) i1 = Item(description='i1') session.add(i1) session.commit() session.expire(i1, ['keywords']) k1= Keyword(name='k1') k1.items.append(i1) k1.items.remove(i1) eq_(i1.keywords, []) def test_remove_add_pending_backref(self): """test that pending doesn't remove an item that's not a net remove.""" Item, Keyword = (self.classes.Item, self.classes.Keyword) session = Session(autoflush=False) k1= Keyword(name='k1') i1 = Item(description='i1', keywords=[k1]) session.add(i1) session.commit() session.expire(i1, ['keywords']) k1.items.remove(i1) k1.items.append(i1) eq_(i1.keywords, [k1]) def test_pending_combines_with_flushed(self): """test the combination of unflushed pending + lazy loaded from DB.""" Item, Keyword = (self.classes.Item, self.classes.Keyword) session = Session(testing.db, autoflush=False) k1 = Keyword(name='k1') k2 = Keyword(name='k2') i1 = Item(description='i1', keywords=[k1]) session.add(i1) session.add(k2) session.commit() k2.items.append(i1) # the pending # list is still here. eq_( set(attributes.instance_state(i1). _pending_mutations['keywords'].added_items), set([k2]) ) # because autoflush is off, k2 is still # coming in from pending eq_(i1.keywords, [k1, k2]) # prove it didn't flush eq_(session.scalar("select count(*) from item_keywords"), 1) # the pending collection was removed assert 'keywords' not in attributes.\ instance_state(i1).\ _pending_mutations def test_duplicate_adds(self): Item, Keyword = (self.classes.Item, self.classes.Keyword) session = Session(testing.db, autoflush=False) k1 = Keyword(name='k1') i1 = Item(description='i1', keywords=[k1]) session.add(i1) session.commit() k1.items.append(i1) eq_(i1.keywords, [k1, k1]) session.expire(i1, ['keywords']) k1.items.append(i1) eq_(i1.keywords, [k1, k1]) session.expire(i1, ['keywords']) k1.items.append(i1) eq_(i1.keywords, [k1, k1]) eq_(k1.items, [i1, i1, i1, i1]) session.commit() eq_(k1.items, [i1]) class M2MScalarMoveTest(_fixtures.FixtureTest): run_inserts = None @classmethod def setup_mappers(cls): keywords, items, item_keywords, Keyword, Item = (cls.tables.keywords, cls.tables.items, cls.tables.item_keywords, cls.classes.Keyword, cls.classes.Item) mapper(Item, items, properties={ 'keyword':relationship(Keyword, secondary=item_keywords, uselist=False, backref=backref("item", uselist=False)) }) mapper(Keyword, keywords) def test_collection_move_preloaded(self): Item, Keyword = self.classes.Item, self.classes.Keyword sess = sessionmaker()() k1 = Keyword(name='k1') i1 = Item(description='i1', keyword=k1) i2 = Item(description='i2') sess.add_all([i1, i2, k1]) sess.commit() # everything is expired # load i1.keyword assert i1.keyword is k1 i2.keyword = k1 assert k1.item is i2 # nothing happens. assert i1.keyword is k1 assert i2.keyword is k1 def test_collection_move_notloaded(self): Item, Keyword = self.classes.Item, self.classes.Keyword sess = sessionmaker()() k1 = Keyword(name='k1') i1 = Item(description='i1', keyword=k1) i2 = Item(description='i2') sess.add_all([i1, i2, k1]) sess.commit() # everything is expired i2.keyword = k1 assert k1.item is i2 assert i1.keyword is None assert i2.keyword is k1 def test_collection_move_commit(self): Item, Keyword = self.classes.Item, self.classes.Keyword sess = sessionmaker()() k1 = Keyword(name='k1') i1 = Item(description='i1', keyword=k1) i2 = Item(description='i2') sess.add_all([i1, i2, k1]) sess.commit() # everything is expired # load i1.keyword assert i1.keyword is k1 i2.keyword = k1 assert k1.item is i2 sess.commit() assert i1.keyword is None assert i2.keyword is k1 class O2MStaleBackrefTest(_fixtures.FixtureTest): run_inserts = None @classmethod def setup_mappers(cls): Address, addresses, users, User = (cls.classes.Address, cls.tables.addresses, cls.tables.users, cls.classes.User) mapper(Address, addresses) mapper(User, users, properties = dict( addresses = relationship(Address, backref="user"), )) def test_backref_pop_m2o(self): User, Address = self.classes.User, self.classes.Address u1 = User() u2 = User() a1 = Address() u1.addresses.append(a1) u2.addresses.append(a1) # events haven't updated # u1.addresses here. u1.addresses.remove(a1) assert a1.user is u2 assert a1 in u2.addresses class M2MStaleBackrefTest(_fixtures.FixtureTest): run_inserts = None @classmethod def setup_mappers(cls): keywords, items, item_keywords, Keyword, Item = (cls.tables.keywords, cls.tables.items, cls.tables.item_keywords, cls.classes.Keyword, cls.classes.Item) mapper(Item, items, properties={ 'keywords':relationship(Keyword, secondary=item_keywords, backref='items') }) mapper(Keyword, keywords) def test_backref_pop_m2m(self): Keyword, Item = self.classes.Keyword, self.classes.Item k1 = Keyword() k2 = Keyword() i1 = Item() k1.items.append(i1) k2.items.append(i1) k2.items.append(i1) i1.keywords = [] k2.items.remove(i1) assert len(k2.items) == 0 SQLAlchemy-0.8.4/test/orm/test_bind.py0000644000076500000240000000342712251147172020342 0ustar classicstaff00000000000000from sqlalchemy.testing import assert_raises, assert_raises_message from sqlalchemy import MetaData, Integer from sqlalchemy.testing.schema import Table from sqlalchemy.testing.schema import Column from sqlalchemy.orm import mapper, create_session import sqlalchemy as sa from sqlalchemy import testing from sqlalchemy.testing import fixtures class BindTest(fixtures.MappedTest): @classmethod def define_tables(cls, metadata): Table('test_table', metadata, Column('id', Integer, primary_key=True, test_needs_autoincrement=True), Column('data', Integer)) @classmethod def setup_classes(cls): class Foo(cls.Basic): pass @classmethod def setup_mappers(cls): test_table, Foo = cls.tables.test_table, cls.classes.Foo meta = MetaData() test_table.tometadata(meta) assert meta.tables['test_table'].bind is None mapper(Foo, meta.tables['test_table']) def test_session_bind(self): Foo = self.classes.Foo engine = self.metadata.bind for bind in (engine, engine.connect()): try: sess = create_session(bind=bind) assert sess.bind is bind f = Foo() sess.add(f) sess.flush() assert sess.query(Foo).get(f.id) is f finally: if hasattr(bind, 'close'): bind.close() def test_session_unbound(self): Foo = self.classes.Foo sess = create_session() sess.add(Foo()) assert_raises_message( sa.exc.UnboundExecutionError, ('Could not locate a bind configured on Mapper|Foo|test_table ' 'or this Session'), sess.flush) SQLAlchemy-0.8.4/test/orm/test_cascade.py0000644000076500000240000027271512251150015021007 0ustar classicstaff00000000000000 from sqlalchemy.testing import assert_raises, assert_raises_message from sqlalchemy import Integer, String, ForeignKey, Sequence, \ exc as sa_exc from sqlalchemy.testing.schema import Table, Column from sqlalchemy.orm import mapper, relationship, create_session, \ sessionmaker, class_mapper, backref, Session, util as orm_util,\ configure_mappers from sqlalchemy.orm.attributes import instance_state from sqlalchemy.orm import attributes, exc as orm_exc, object_mapper from sqlalchemy import testing from sqlalchemy.testing import eq_ from sqlalchemy.testing import fixtures from test.orm import _fixtures class CascadeArgTest(fixtures.MappedTest): run_inserts = None run_create_tables = None run_deletes = None @classmethod def define_tables(cls, metadata): Table('users', metadata, Column('id', Integer, primary_key=True, test_needs_autoincrement=True), Column('name', String(30), nullable=False), ) Table('addresses', metadata, Column('id', Integer, primary_key=True, test_needs_autoincrement=True), Column('user_id', Integer, ForeignKey('users.id')), Column('email_address', String(50), nullable=False), ) @classmethod def setup_classes(cls): class User(cls.Basic): pass class Address(cls.Basic): pass def test_delete_with_passive_deletes_all(self): User, Address = self.classes.User, self.classes.Address users, addresses = self.tables.users, self.tables.addresses mapper(User, users, properties={ 'addresses': relationship(Address, passive_deletes="all", cascade="all, delete-orphan") }) mapper(Address, addresses) assert_raises_message( sa_exc.ArgumentError, "On User.addresses, can't set passive_deletes='all' " "in conjunction with 'delete' or 'delete-orphan' cascade", configure_mappers ) def test_delete_orphan_without_delete(self): User, Address = self.classes.User, self.classes.Address users, addresses = self.tables.users, self.tables.addresses assert_raises_message( sa_exc.SAWarning, "The 'delete-orphan' cascade option requires 'delete'.", relationship, Address, cascade="save-update, delete-orphan" ) def test_bad_cascade(self): addresses, Address = self.tables.addresses, self.classes.Address mapper(Address, addresses) assert_raises_message( sa_exc.ArgumentError, r"Invalid cascade option\(s\): 'fake', 'fake2'", relationship, Address, cascade="fake, all, delete-orphan, fake2" ) def test_cascade_repr(self): eq_( repr(orm_util.CascadeOptions("all, delete-orphan")), "CascadeOptions('delete,delete-orphan,expunge," "merge,refresh-expire,save-update')" ) def test_cascade_immutable(self): assert isinstance( orm_util.CascadeOptions("all, delete-orphan"), frozenset) def test_cascade_assignable(self): User, Address = self.classes.User, self.classes.Address users, addresses = self.tables.users, self.tables.addresses rel = relationship(Address) eq_(rel.cascade, set(['save-update', 'merge'])) rel.cascade = "save-update, merge, expunge" eq_(rel.cascade, set(['save-update', 'merge', 'expunge'])) mapper(User, users, properties={ 'addresses': rel }) am = mapper(Address, addresses) configure_mappers() eq_(rel.cascade, set(['save-update', 'merge', 'expunge'])) assert ("addresses", User) not in am._delete_orphans rel.cascade = "all, delete, delete-orphan" assert ("addresses", User) in am._delete_orphans eq_(rel.cascade, set(['delete', 'delete-orphan', 'expunge', 'merge', 'refresh-expire', 'save-update']) ) class O2MCascadeDeleteOrphanTest(fixtures.MappedTest): run_inserts = None @classmethod def define_tables(cls, metadata): Table('users', metadata, Column('id', Integer, primary_key=True, test_needs_autoincrement=True), Column('name', String(30), nullable=False), ) Table('addresses', metadata, Column('id', Integer, primary_key=True, test_needs_autoincrement=True), Column('user_id', Integer, ForeignKey('users.id')), Column('email_address', String(50), nullable=False), ) Table('orders', metadata, Column('id', Integer, primary_key=True, test_needs_autoincrement=True), Column('user_id', Integer, ForeignKey('users.id'), nullable=False), Column('description', String(30)), ) Table("dingalings", metadata, Column('id', Integer, primary_key=True, test_needs_autoincrement=True), Column('address_id', Integer, ForeignKey('addresses.id')), Column('data', String(30)) ) @classmethod def setup_classes(cls): class User(cls.Comparable): pass class Address(cls.Comparable): pass class Order(cls.Comparable): pass class Dingaling(cls.Comparable): pass @classmethod def setup_mappers(cls): users, Dingaling, Order, User, dingalings, Address, orders, addresses = (cls.tables.users, cls.classes.Dingaling, cls.classes.Order, cls.classes.User, cls.tables.dingalings, cls.classes.Address, cls.tables.orders, cls.tables.addresses) mapper(Address, addresses) mapper(Order, orders) mapper(User, users, properties={ 'addresses':relationship(Address, cascade='all, delete-orphan', backref='user'), 'orders':relationship(Order, cascade='all, delete-orphan', order_by=orders.c.id) }) mapper(Dingaling, dingalings, properties={ 'address' : relationship(Address) }) def test_list_assignment_new(self): User, Order = self.classes.User, self.classes.Order sess = Session() u = User(name='jack', orders=[ Order(description='order 1'), Order(description='order 2')]) sess.add(u) sess.commit() eq_(u, User(name='jack', orders=[Order(description='order 1'), Order(description='order 2')])) def test_list_assignment_replace(self): User, Order = self.classes.User, self.classes.Order sess = Session() u = User(name='jack', orders=[ Order(description='someorder'), Order(description='someotherorder')]) sess.add(u) u.orders=[Order(description="order 3"), Order(description="order 4")] sess.commit() eq_(u, User(name='jack', orders=[Order(description="order 3"), Order(description="order 4")])) # order 1, order 2 have been deleted eq_(sess.query(Order).order_by(Order.id).all(), [Order(description="order 3"), Order(description="order 4")]) def test_standalone_orphan(self): Order = self.classes.Order sess = Session() o5 = Order(description="order 5") sess.add(o5) assert_raises(sa_exc.DBAPIError, sess.flush) def test_save_update_sends_pending(self): """test that newly added and deleted collection items are cascaded on save-update""" Order, User = self.classes.Order, self.classes.User sess = sessionmaker(expire_on_commit=False)() o1, o2, o3 = Order(description='o1'), Order(description='o2'), \ Order(description='o3') u = User(name='jack', orders=[o1, o2]) sess.add(u) sess.commit() sess.close() u.orders.append(o3) u.orders.remove(o1) sess.add(u) assert o1 in sess assert o2 in sess assert o3 in sess sess.commit() def test_remove_pending_from_collection(self): User, Order = self.classes.User, self.classes.Order sess = Session() u = User(name='jack') sess.add(u) sess.commit() o1 = Order() u.orders.append(o1) assert o1 in sess u.orders.remove(o1) assert o1 not in sess def test_delete(self): User, users, orders, Order = (self.classes.User, self.tables.users, self.tables.orders, self.classes.Order) sess = create_session() u = User(name='jack', orders=[Order(description='someorder'), Order(description='someotherorder')]) sess.add(u) sess.flush() sess.delete(u) sess.flush() assert users.count().scalar() == 0 assert orders.count().scalar() == 0 def test_delete_unloaded_collections(self): """Unloaded collections are still included in a delete-cascade by default.""" User, addresses, users, Address = (self.classes.User, self.tables.addresses, self.tables.users, self.classes.Address) sess = create_session() u = User(name='jack', addresses=[Address(email_address="address1"), Address(email_address="address2")]) sess.add(u) sess.flush() sess.expunge_all() assert addresses.count().scalar() == 2 assert users.count().scalar() == 1 u = sess.query(User).get(u.id) assert 'addresses' not in u.__dict__ sess.delete(u) sess.flush() assert addresses.count().scalar() == 0 assert users.count().scalar() == 0 def test_cascades_onlycollection(self): """Cascade only reaches instances that are still part of the collection, not those that have been removed""" User, Order, users, orders = (self.classes.User, self.classes.Order, self.tables.users, self.tables.orders) sess = create_session() u = User(name='jack', orders=[Order(description='someorder'), Order(description='someotherorder')]) sess.add(u) sess.flush() o = u.orders[0] del u.orders[0] sess.delete(u) assert u in sess.deleted assert o not in sess.deleted assert o in sess u2 = User(name='newuser', orders=[o]) sess.add(u2) sess.flush() sess.expunge_all() assert users.count().scalar() == 1 assert orders.count().scalar() == 1 eq_(sess.query(User).all(), [User(name='newuser', orders=[Order(description='someorder')])]) def test_cascade_nosideeffects(self): """test that cascade leaves the state of unloaded scalars/collections unchanged.""" Dingaling, User, Address = (self.classes.Dingaling, self.classes.User, self.classes.Address) sess = create_session() u = User(name='jack') sess.add(u) assert 'orders' not in u.__dict__ sess.flush() assert 'orders' not in u.__dict__ a = Address(email_address='foo@bar.com') sess.add(a) assert 'user' not in a.__dict__ a.user = u sess.flush() d = Dingaling(data='d1') d.address_id = a.id sess.add(d) assert 'address' not in d.__dict__ sess.flush() assert d.address is a def test_cascade_delete_plusorphans(self): User, users, orders, Order = (self.classes.User, self.tables.users, self.tables.orders, self.classes.Order) sess = create_session() u = User(name='jack', orders=[Order(description='someorder'), Order(description='someotherorder')]) sess.add(u) sess.flush() assert users.count().scalar() == 1 assert orders.count().scalar() == 2 del u.orders[0] sess.delete(u) sess.flush() assert users.count().scalar() == 0 assert orders.count().scalar() == 0 def test_collection_orphans(self): User, users, orders, Order = (self.classes.User, self.tables.users, self.tables.orders, self.classes.Order) sess = create_session() u = User(name='jack', orders=[Order(description='someorder'), Order(description='someotherorder')]) sess.add(u) sess.flush() assert users.count().scalar() == 1 assert orders.count().scalar() == 2 u.orders[:] = [] sess.flush() assert users.count().scalar() == 1 assert orders.count().scalar() == 0 class O2MCascadeTest(fixtures.MappedTest): run_inserts = None @classmethod def define_tables(cls, metadata): Table('users', metadata, Column('id', Integer, primary_key=True, test_needs_autoincrement=True), Column('name', String(30), nullable=False), ) Table('addresses', metadata, Column('id', Integer, primary_key=True, test_needs_autoincrement=True), Column('user_id', Integer, ForeignKey('users.id')), Column('email_address', String(50), nullable=False), ) @classmethod def setup_classes(cls): class User(cls.Comparable): pass class Address(cls.Comparable): pass @classmethod def setup_mappers(cls): users, User, Address, addresses = ( cls.tables.users, cls.classes.User, cls.classes.Address, cls.tables.addresses) mapper(Address, addresses) mapper(User, users, properties={ 'addresses':relationship(Address, backref="user"), }) def test_none_o2m_collection_assignment(self): User, Address = self.classes.User, self.classes.Address s = Session() u1 = User(name='u', addresses=[None]) s.add(u1) eq_(u1.addresses, [None]) assert_raises_message( orm_exc.FlushError, "Can't flush None value found in collection User.addresses", s.commit ) eq_(u1.addresses, [None]) def test_none_o2m_collection_append(self): User, Address = self.classes.User, self.classes.Address s = Session() u1 = User(name='u') s.add(u1) u1.addresses.append(None) eq_(u1.addresses, [None]) assert_raises_message( orm_exc.FlushError, "Can't flush None value found in collection User.addresses", s.commit ) eq_(u1.addresses, [None]) class O2MCascadeDeleteNoOrphanTest(fixtures.MappedTest): run_inserts = None @classmethod def define_tables(cls, metadata): Table('users', metadata, Column('id', Integer, primary_key=True, test_needs_autoincrement=True), Column('name', String(30)) ) Table('orders', metadata, Column('id', Integer, primary_key=True, test_needs_autoincrement=True), Column('user_id', Integer, ForeignKey('users.id')), Column('description', String(30)) ) @classmethod def setup_classes(cls): class User(cls.Comparable): pass class Order(cls.Comparable): pass @classmethod def setup_mappers(cls): User, Order, orders, users = (cls.classes.User, cls.classes.Order, cls.tables.orders, cls.tables.users) mapper(User, users, properties = dict( orders = relationship( mapper(Order, orders), cascade="all") )) def test_cascade_delete_noorphans(self): User, Order, orders, users = (self.classes.User, self.classes.Order, self.tables.orders, self.tables.users) sess = create_session() u = User(name='jack', orders=[Order(description='someorder'), Order(description='someotherorder')]) sess.add(u) sess.flush() assert users.count().scalar() == 1 assert orders.count().scalar() == 2 del u.orders[0] sess.delete(u) sess.flush() assert users.count().scalar() == 0 assert orders.count().scalar() == 1 class O2OSingleParentTest(_fixtures.FixtureTest): run_inserts = None @classmethod def setup_mappers(cls): Address, addresses, users, User = (cls.classes.Address, cls.tables.addresses, cls.tables.users, cls.classes.User) mapper(Address, addresses) mapper(User, users, properties={'address' : relationship(Address, backref=backref('user', single_parent=True), uselist=False)}) def test_single_parent_raise(self): User, Address = self.classes.User, self.classes.Address a1 = Address(email_address='some address') u1 = User(name='u1', address=a1) assert_raises(sa_exc.InvalidRequestError, Address, email_address='asd', user=u1) a2 = Address(email_address='asd') u1.address = a2 assert u1.address is not a1 assert a1.user is None class NoSaveCascadeFlushTest(_fixtures.FixtureTest): """Test related item not present in session, commit proceeds.""" run_inserts = None def _one_to_many_fixture(self, o2m_cascade=True, m2o_cascade=True, o2m=False, m2o=False, o2m_cascade_backrefs=True, m2o_cascade_backrefs=True): Address, addresses, users, User = (self.classes.Address, self.tables.addresses, self.tables.users, self.classes.User) if o2m: if m2o: addresses_rel = {'addresses':relationship( Address, cascade_backrefs=o2m_cascade_backrefs, cascade=o2m_cascade and 'save-update' or '', backref=backref('user', cascade=m2o_cascade and 'save-update' or '', cascade_backrefs=m2o_cascade_backrefs ) )} else: addresses_rel = {'addresses':relationship( Address, cascade=o2m_cascade and 'save-update' or '', cascade_backrefs=o2m_cascade_backrefs, )} user_rel = {} elif m2o: user_rel = {'user':relationship(User, cascade=m2o_cascade and 'save-update' or '', cascade_backrefs=m2o_cascade_backrefs )} addresses_rel = {} else: addresses_rel = {} user_rel = {} mapper(User, users, properties=addresses_rel) mapper(Address, addresses, properties=user_rel) def _many_to_many_fixture(self, fwd_cascade=True, bkd_cascade=True, fwd=False, bkd=False, fwd_cascade_backrefs=True, bkd_cascade_backrefs=True): keywords, items, item_keywords, Keyword, Item = (self.tables.keywords, self.tables.items, self.tables.item_keywords, self.classes.Keyword, self.classes.Item) if fwd: if bkd: keywords_rel = {'keywords':relationship( Keyword, secondary=item_keywords, cascade_backrefs=fwd_cascade_backrefs, cascade=fwd_cascade and 'save-update' or '', backref=backref('items', cascade=bkd_cascade and 'save-update' or '', cascade_backrefs=bkd_cascade_backrefs ) )} else: keywords_rel = {'keywords':relationship( Keyword, secondary=item_keywords, cascade=fwd_cascade and 'save-update' or '', cascade_backrefs=fwd_cascade_backrefs, )} items_rel = {} elif bkd: items_rel = {'items':relationship(Item, secondary=item_keywords, cascade=bkd_cascade and 'save-update' or '', cascade_backrefs=bkd_cascade_backrefs )} keywords_rel = {} else: keywords_rel = {} items_rel = {} mapper(Item, items, properties=keywords_rel) mapper(Keyword, keywords, properties=items_rel) def test_o2m_only_child_pending(self): User, Address = self.classes.User, self.classes.Address self._one_to_many_fixture(o2m=True, m2o=False) sess = Session() u1 = User(name='u1') a1 = Address(email_address='a1') u1.addresses.append(a1) sess.add(u1) assert u1 in sess assert a1 in sess sess.flush() def test_o2m_only_child_transient(self): User, Address = self.classes.User, self.classes.Address self._one_to_many_fixture(o2m=True, m2o=False, o2m_cascade=False) sess = Session() u1 = User(name='u1') a1 = Address(email_address='a1') u1.addresses.append(a1) sess.add(u1) assert u1 in sess assert a1 not in sess assert_raises_message( sa_exc.SAWarning, "not in session", sess.flush ) def test_o2m_only_child_persistent(self): User, Address = self.classes.User, self.classes.Address self._one_to_many_fixture(o2m=True, m2o=False, o2m_cascade=False) sess = Session() u1 = User(name='u1') a1 = Address(email_address='a1') sess.add(a1) sess.flush() sess.expunge_all() u1.addresses.append(a1) sess.add(u1) assert u1 in sess assert a1 not in sess assert_raises_message( sa_exc.SAWarning, "not in session", sess.flush ) def test_o2m_backref_child_pending(self): User, Address = self.classes.User, self.classes.Address self._one_to_many_fixture(o2m=True, m2o=True) sess = Session() u1 = User(name='u1') a1 = Address(email_address='a1') u1.addresses.append(a1) sess.add(u1) assert u1 in sess assert a1 in sess sess.flush() def test_o2m_backref_child_transient(self): User, Address = self.classes.User, self.classes.Address self._one_to_many_fixture(o2m=True, m2o=True, o2m_cascade=False) sess = Session() u1 = User(name='u1') a1 = Address(email_address='a1') u1.addresses.append(a1) sess.add(u1) assert u1 in sess assert a1 not in sess assert_raises_message( sa_exc.SAWarning, "not in session", sess.flush ) def test_o2m_backref_child_transient_nochange(self): User, Address = self.classes.User, self.classes.Address self._one_to_many_fixture(o2m=True, m2o=True, o2m_cascade=False) sess = Session() u1 = User(name='u1') a1 = Address(email_address='a1') u1.addresses.append(a1) sess.add(u1) assert u1 in sess assert a1 not in sess @testing.emits_warning(r'.*not in session') def go(): sess.commit() go() eq_(u1.addresses, []) def test_o2m_backref_child_expunged(self): User, Address = self.classes.User, self.classes.Address self._one_to_many_fixture(o2m=True, m2o=True, o2m_cascade=False) sess = Session() u1 = User(name='u1') a1 = Address(email_address='a1') sess.add(a1) sess.flush() sess.add(u1) u1.addresses.append(a1) sess.expunge(a1) assert u1 in sess assert a1 not in sess assert_raises_message( sa_exc.SAWarning, "not in session", sess.flush ) def test_o2m_backref_child_expunged_nochange(self): User, Address = self.classes.User, self.classes.Address self._one_to_many_fixture(o2m=True, m2o=True, o2m_cascade=False) sess = Session() u1 = User(name='u1') a1 = Address(email_address='a1') sess.add(a1) sess.flush() sess.add(u1) u1.addresses.append(a1) sess.expunge(a1) assert u1 in sess assert a1 not in sess @testing.emits_warning(r'.*not in session') def go(): sess.commit() go() eq_(u1.addresses, []) def test_m2o_only_child_pending(self): User, Address = self.classes.User, self.classes.Address self._one_to_many_fixture(o2m=False, m2o=True) sess = Session() u1 = User(name='u1') a1 = Address(email_address='a1') a1.user = u1 sess.add(a1) assert u1 in sess assert a1 in sess sess.flush() def test_m2o_only_child_transient(self): User, Address = self.classes.User, self.classes.Address self._one_to_many_fixture(o2m=False, m2o=True, m2o_cascade=False) sess = Session() u1 = User(name='u1') a1 = Address(email_address='a1') a1.user = u1 sess.add(a1) assert u1 not in sess assert a1 in sess assert_raises_message( sa_exc.SAWarning, "not in session", sess.flush ) def test_m2o_only_child_expunged(self): User, Address = self.classes.User, self.classes.Address self._one_to_many_fixture(o2m=False, m2o=True, m2o_cascade=False) sess = Session() u1 = User(name='u1') sess.add(u1) sess.flush() a1 = Address(email_address='a1') a1.user = u1 sess.add(a1) sess.expunge(u1) assert u1 not in sess assert a1 in sess assert_raises_message( sa_exc.SAWarning, "not in session", sess.flush ) def test_m2o_backref_child_pending(self): User, Address = self.classes.User, self.classes.Address self._one_to_many_fixture(o2m=True, m2o=True) sess = Session() u1 = User(name='u1') a1 = Address(email_address='a1') a1.user = u1 sess.add(a1) assert u1 in sess assert a1 in sess sess.flush() def test_m2o_backref_child_transient(self): User, Address = self.classes.User, self.classes.Address self._one_to_many_fixture(o2m=True, m2o=True, m2o_cascade=False) sess = Session() u1 = User(name='u1') a1 = Address(email_address='a1') a1.user = u1 sess.add(a1) assert u1 not in sess assert a1 in sess assert_raises_message( sa_exc.SAWarning, "not in session", sess.flush ) def test_m2o_backref_child_expunged(self): User, Address = self.classes.User, self.classes.Address self._one_to_many_fixture(o2m=True, m2o=True, m2o_cascade=False) sess = Session() u1 = User(name='u1') sess.add(u1) sess.flush() a1 = Address(email_address='a1') a1.user = u1 sess.add(a1) sess.expunge(u1) assert u1 not in sess assert a1 in sess assert_raises_message( sa_exc.SAWarning, "not in session", sess.flush ) def test_m2o_backref_child_pending_nochange(self): User, Address = self.classes.User, self.classes.Address self._one_to_many_fixture(o2m=True, m2o=True, m2o_cascade=False) sess = Session() u1 = User(name='u1') a1 = Address(email_address='a1') a1.user = u1 sess.add(a1) assert u1 not in sess assert a1 in sess @testing.emits_warning(r'.*not in session') def go(): sess.commit() go() # didn't get flushed assert a1.user is None def test_m2o_backref_child_expunged_nochange(self): User, Address = self.classes.User, self.classes.Address self._one_to_many_fixture(o2m=True, m2o=True, m2o_cascade=False) sess = Session() u1 = User(name='u1') sess.add(u1) sess.flush() a1 = Address(email_address='a1') a1.user = u1 sess.add(a1) sess.expunge(u1) assert u1 not in sess assert a1 in sess @testing.emits_warning(r'.*not in session') def go(): sess.commit() go() # didn't get flushed assert a1.user is None def test_m2m_only_child_pending(self): Item, Keyword = self.classes.Item, self.classes.Keyword self._many_to_many_fixture(fwd=True, bkd=False) sess = Session() i1 = Item(description='i1') k1 = Keyword(name='k1') i1.keywords.append(k1) sess.add(i1) assert i1 in sess assert k1 in sess sess.flush() def test_m2m_only_child_transient(self): Item, Keyword = self.classes.Item, self.classes.Keyword self._many_to_many_fixture(fwd=True, bkd=False, fwd_cascade=False) sess = Session() i1 = Item(description='i1') k1 = Keyword(name='k1') i1.keywords.append(k1) sess.add(i1) assert i1 in sess assert k1 not in sess assert_raises_message( sa_exc.SAWarning, "not in session", sess.flush ) def test_m2m_only_child_persistent(self): Item, Keyword = self.classes.Item, self.classes.Keyword self._many_to_many_fixture(fwd=True, bkd=False, fwd_cascade=False) sess = Session() i1 = Item(description='i1') k1 = Keyword(name='k1') sess.add(k1) sess.flush() sess.expunge_all() i1.keywords.append(k1) sess.add(i1) assert i1 in sess assert k1 not in sess assert_raises_message( sa_exc.SAWarning, "not in session", sess.flush ) def test_m2m_backref_child_pending(self): Item, Keyword = self.classes.Item, self.classes.Keyword self._many_to_many_fixture(fwd=True, bkd=True) sess = Session() i1 = Item(description='i1') k1 = Keyword(name='k1') i1.keywords.append(k1) sess.add(i1) assert i1 in sess assert k1 in sess sess.flush() def test_m2m_backref_child_transient(self): Item, Keyword = self.classes.Item, self.classes.Keyword self._many_to_many_fixture(fwd=True, bkd=True, fwd_cascade=False) sess = Session() i1 = Item(description='i1') k1 = Keyword(name='k1') i1.keywords.append(k1) sess.add(i1) assert i1 in sess assert k1 not in sess assert_raises_message( sa_exc.SAWarning, "not in session", sess.flush ) def test_m2m_backref_child_transient_nochange(self): Item, Keyword = self.classes.Item, self.classes.Keyword self._many_to_many_fixture(fwd=True, bkd=True, fwd_cascade=False) sess = Session() i1 = Item(description='i1') k1 = Keyword(name='k1') i1.keywords.append(k1) sess.add(i1) assert i1 in sess assert k1 not in sess @testing.emits_warning(r'.*not in session') def go(): sess.commit() go() eq_(i1.keywords, []) def test_m2m_backref_child_expunged(self): Item, Keyword = self.classes.Item, self.classes.Keyword self._many_to_many_fixture(fwd=True, bkd=True, fwd_cascade=False) sess = Session() i1 = Item(description='i1') k1 = Keyword(name='k1') sess.add(k1) sess.flush() sess.add(i1) i1.keywords.append(k1) sess.expunge(k1) assert i1 in sess assert k1 not in sess assert_raises_message( sa_exc.SAWarning, "not in session", sess.flush ) def test_m2m_backref_child_expunged_nochange(self): Item, Keyword = self.classes.Item, self.classes.Keyword self._many_to_many_fixture(fwd=True, bkd=True, fwd_cascade=False) sess = Session() i1 = Item(description='i1') k1 = Keyword(name='k1') sess.add(k1) sess.flush() sess.add(i1) i1.keywords.append(k1) sess.expunge(k1) assert i1 in sess assert k1 not in sess @testing.emits_warning(r'.*not in session') def go(): sess.commit() go() eq_(i1.keywords, []) class NoSaveCascadeBackrefTest(_fixtures.FixtureTest): """test that backrefs don't force save-update cascades to occur when the cascade initiated from the forwards side.""" def test_unidirectional_cascade_o2m(self): User, Order, users, orders = (self.classes.User, self.classes.Order, self.tables.users, self.tables.orders) mapper(Order, orders) mapper(User, users, properties = dict( orders = relationship( Order, backref=backref("user", cascade=None)) )) sess = create_session() o1 = Order() sess.add(o1) u1 = User(orders=[o1]) assert u1 not in sess assert o1 in sess sess.expunge_all() o1 = Order() u1 = User(orders=[o1]) sess.add(o1) assert u1 not in sess assert o1 in sess def test_unidirectional_cascade_m2o(self): User, Order, users, orders = (self.classes.User, self.classes.Order, self.tables.users, self.tables.orders) mapper(Order, orders, properties={ 'user':relationship(User, backref=backref("orders", cascade=None)) }) mapper(User, users) sess = create_session() u1 = User() sess.add(u1) o1 = Order() o1.user = u1 assert o1 not in sess assert u1 in sess sess.expunge_all() u1 = User() o1 = Order() o1.user = u1 sess.add(u1) assert o1 not in sess assert u1 in sess def test_unidirectional_cascade_m2m(self): keywords, items, item_keywords, Keyword, Item = (self.tables.keywords, self.tables.items, self.tables.item_keywords, self.classes.Keyword, self.classes.Item) mapper(Item, items, properties={'keywords' : relationship(Keyword, secondary=item_keywords, cascade='none', backref='items')}) mapper(Keyword, keywords) sess = create_session() i1 = Item() k1 = Keyword() sess.add(i1) i1.keywords.append(k1) assert i1 in sess assert k1 not in sess sess.expunge_all() i1 = Item() k1 = Keyword() sess.add(i1) k1.items.append(i1) assert i1 in sess assert k1 not in sess class M2OCascadeDeleteOrphanTestOne(fixtures.MappedTest): @classmethod def define_tables(cls, metadata): Table('extra', metadata, Column('id', Integer, primary_key=True, test_needs_autoincrement=True), Column('prefs_id', Integer, ForeignKey('prefs.id'))) Table('prefs', metadata, Column('id', Integer, primary_key=True, test_needs_autoincrement=True), Column('data', String(40))) Table( 'users', metadata, Column('id', Integer, primary_key=True, test_needs_autoincrement=True), Column('name', String(40)), Column('pref_id', Integer, ForeignKey('prefs.id')), Column('foo_id', Integer, ForeignKey('foo.id')), ) Table('foo', metadata, Column('id', Integer, primary_key=True, test_needs_autoincrement=True), Column('data', String(40))) @classmethod def setup_classes(cls): class User(cls.Comparable): pass class Pref(cls.Comparable): pass class Extra(cls.Comparable): pass class Foo(cls.Comparable): pass @classmethod def setup_mappers(cls): extra, foo, users, Extra, Pref, User, prefs, Foo = (cls.tables.extra, cls.tables.foo, cls.tables.users, cls.classes.Extra, cls.classes.Pref, cls.classes.User, cls.tables.prefs, cls.classes.Foo) mapper(Extra, extra) mapper(Pref, prefs, properties=dict(extra=relationship(Extra, cascade='all, delete'))) mapper(User, users, properties=dict(pref=relationship(Pref, lazy='joined', cascade='all, delete-orphan', single_parent=True), foo=relationship(Foo))) # straight m2o mapper(Foo, foo) @classmethod def insert_data(cls): Pref, User, Extra = (cls.classes.Pref, cls.classes.User, cls.classes.Extra) u1 = User(name='ed', pref=Pref(data="pref 1", extra=[Extra()])) u2 = User(name='jack', pref=Pref(data="pref 2", extra=[Extra()])) u3 = User(name="foo", pref=Pref(data="pref 3", extra=[Extra()])) sess = create_session() sess.add_all((u1, u2, u3)) sess.flush() sess.close() @testing.fails_on('maxdb', 'FIXME: unknown') def test_orphan(self): prefs, User, extra = (self.tables.prefs, self.classes.User, self.tables.extra) sess = create_session() assert prefs.count().scalar() == 3 assert extra.count().scalar() == 3 jack = sess.query(User).filter_by(name="jack").one() jack.pref = None sess.flush() assert prefs.count().scalar() == 2 assert extra.count().scalar() == 2 def test_cascade_on_deleted(self): """test a bug introduced by r6711""" Foo, User = self.classes.Foo, self.classes.User sess = sessionmaker(expire_on_commit=True)() u1 = User(name='jack', foo=Foo(data='f1')) sess.add(u1) sess.commit() u1.foo = None # the error condition relies upon # these things being true assert User.foo.dispatch._active_history is False eq_( attributes.get_history(u1, 'foo'), ([None], (), ()) ) sess.add(u1) assert u1 in sess sess.commit() def test_save_update_sends_pending(self): """test that newly added and deleted scalar items are cascaded on save-update""" Pref, User = self.classes.Pref, self.classes.User sess = sessionmaker(expire_on_commit=False)() p1, p2 = Pref(data='p1'), Pref(data='p2') u = User(name='jack', pref=p1) sess.add(u) sess.commit() sess.close() u.pref = p2 sess.add(u) assert p1 in sess assert p2 in sess sess.commit() @testing.fails_on('maxdb', 'FIXME: unknown') def test_orphan_on_update(self): prefs, User, extra = (self.tables.prefs, self.classes.User, self.tables.extra) sess = create_session() jack = sess.query(User).filter_by(name="jack").one() p = jack.pref e = jack.pref.extra[0] sess.expunge_all() jack.pref = None sess.add(jack) sess.add(p) sess.add(e) assert p in sess assert e in sess sess.flush() assert prefs.count().scalar() == 2 assert extra.count().scalar() == 2 def test_pending_expunge(self): Pref, User = self.classes.Pref, self.classes.User sess = create_session() someuser = User(name='someuser') sess.add(someuser) sess.flush() someuser.pref = p1 = Pref(data='somepref') assert p1 in sess someuser.pref = Pref(data='someotherpref') assert p1 not in sess sess.flush() eq_(sess.query(Pref).with_parent(someuser).all(), [Pref(data="someotherpref")]) def test_double_assignment(self): """Double assignment will not accidentally reset the 'parent' flag.""" Pref, User = self.classes.Pref, self.classes.User sess = create_session() jack = sess.query(User).filter_by(name="jack").one() newpref = Pref(data="newpref") jack.pref = newpref jack.pref = newpref sess.flush() eq_(sess.query(Pref).order_by(Pref.id).all(), [Pref(data="pref 1"), Pref(data="pref 3"), Pref(data="newpref")]) class M2OCascadeDeleteOrphanTestTwo(fixtures.MappedTest): @classmethod def define_tables(cls, metadata): Table('t1', metadata, Column('id', Integer, primary_key=True, test_needs_autoincrement=True), Column('data', String(50)), Column('t2id', Integer, ForeignKey('t2.id'))) Table('t2', metadata, Column('id', Integer, primary_key=True, test_needs_autoincrement=True), Column('data', String(50)), Column('t3id', Integer, ForeignKey('t3.id'))) Table('t3', metadata, Column('id', Integer, primary_key=True, test_needs_autoincrement=True), Column('data', String(50))) @classmethod def setup_classes(cls): class T1(cls.Comparable): pass class T2(cls.Comparable): pass class T3(cls.Comparable): pass @classmethod def setup_mappers(cls): t2, T2, T3, t1, t3, T1 = (cls.tables.t2, cls.classes.T2, cls.classes.T3, cls.tables.t1, cls.tables.t3, cls.classes.T1) mapper(T1, t1, properties=dict(t2=relationship(T2, cascade='all, delete-orphan', single_parent=True))) mapper(T2, t2, properties=dict(t3=relationship(T3, cascade='all, delete-orphan', single_parent=True, backref=backref('t2', uselist=False)))) mapper(T3, t3) def test_cascade_delete(self): T2, T3, T1 = (self.classes.T2, self.classes.T3, self.classes.T1) sess = create_session() x = T1(data='t1a', t2=T2(data='t2a', t3=T3(data='t3a'))) sess.add(x) sess.flush() sess.delete(x) sess.flush() eq_(sess.query(T1).all(), []) eq_(sess.query(T2).all(), []) eq_(sess.query(T3).all(), []) def test_deletes_orphans_onelevel(self): T2, T3, T1 = (self.classes.T2, self.classes.T3, self.classes.T1) sess = create_session() x2 = T1(data='t1b', t2=T2(data='t2b', t3=T3(data='t3b'))) sess.add(x2) sess.flush() x2.t2 = None sess.delete(x2) sess.flush() eq_(sess.query(T1).all(), []) eq_(sess.query(T2).all(), []) eq_(sess.query(T3).all(), []) def test_deletes_orphans_twolevel(self): T2, T3, T1 = (self.classes.T2, self.classes.T3, self.classes.T1) sess = create_session() x = T1(data='t1a', t2=T2(data='t2a', t3=T3(data='t3a'))) sess.add(x) sess.flush() x.t2.t3 = None sess.delete(x) sess.flush() eq_(sess.query(T1).all(), []) eq_(sess.query(T2).all(), []) eq_(sess.query(T3).all(), []) def test_finds_orphans_twolevel(self): T2, T3, T1 = (self.classes.T2, self.classes.T3, self.classes.T1) sess = create_session() x = T1(data='t1a', t2=T2(data='t2a', t3=T3(data='t3a'))) sess.add(x) sess.flush() x.t2.t3 = None sess.flush() eq_(sess.query(T1).all(), [T1()]) eq_(sess.query(T2).all(), [T2()]) eq_(sess.query(T3).all(), []) def test_single_parent_raise(self): T2, T1 = self.classes.T2, self.classes.T1 sess = create_session() y = T2(data='T2a') x = T1(data='T1a', t2=y) assert_raises(sa_exc.InvalidRequestError, T1, data='T1b', t2=y) def test_single_parent_backref(self): T2, T3 = self.classes.T2, self.classes.T3 sess = create_session() y = T3(data='T3a') x = T2(data='T2a', t3=y) # cant attach the T3 to another T2 assert_raises(sa_exc.InvalidRequestError, T2, data='T2b', t3=y) # set via backref tho is OK, unsets from previous parent # first z = T2(data='T2b') y.t2 = z assert z.t3 is y assert x.t3 is None class M2OCascadeDeleteNoOrphanTest(fixtures.MappedTest): @classmethod def define_tables(cls, metadata): Table('t1', metadata, Column('id', Integer, primary_key=True, test_needs_autoincrement=True), Column('data',String(50)), Column('t2id', Integer, ForeignKey('t2.id'))) Table('t2', metadata, Column('id', Integer, primary_key=True, test_needs_autoincrement=True), Column('data',String(50)), Column('t3id', Integer, ForeignKey('t3.id'))) Table('t3', metadata, Column('id', Integer, primary_key=True, test_needs_autoincrement=True), Column('data', String(50))) @classmethod def setup_classes(cls): class T1(cls.Comparable): pass class T2(cls.Comparable): pass class T3(cls.Comparable): pass @classmethod def setup_mappers(cls): t2, T2, T3, t1, t3, T1 = (cls.tables.t2, cls.classes.T2, cls.classes.T3, cls.tables.t1, cls.tables.t3, cls.classes.T1) mapper(T1, t1, properties={'t2': relationship(T2, cascade="all")}) mapper(T2, t2, properties={'t3': relationship(T3, cascade="all")}) mapper(T3, t3) def test_cascade_delete(self): T2, T3, T1 = (self.classes.T2, self.classes.T3, self.classes.T1) sess = create_session() x = T1(data='t1a', t2=T2(data='t2a', t3=T3(data='t3a'))) sess.add(x) sess.flush() sess.delete(x) sess.flush() eq_(sess.query(T1).all(), []) eq_(sess.query(T2).all(), []) eq_(sess.query(T3).all(), []) def test_cascade_delete_postappend_onelevel(self): T2, T3, T1 = (self.classes.T2, self.classes.T3, self.classes.T1) sess = create_session() x1 = T1(data='t1', ) x2 = T2(data='t2') x3 = T3(data='t3') sess.add_all((x1, x2, x3)) sess.flush() sess.delete(x1) x1.t2 = x2 x2.t3 = x3 sess.flush() eq_(sess.query(T1).all(), []) eq_(sess.query(T2).all(), []) eq_(sess.query(T3).all(), []) def test_cascade_delete_postappend_twolevel(self): T2, T3, T1 = (self.classes.T2, self.classes.T3, self.classes.T1) sess = create_session() x1 = T1(data='t1', t2=T2(data='t2')) x3 = T3(data='t3') sess.add_all((x1, x3)) sess.flush() sess.delete(x1) x1.t2.t3 = x3 sess.flush() eq_(sess.query(T1).all(), []) eq_(sess.query(T2).all(), []) eq_(sess.query(T3).all(), []) def test_preserves_orphans_onelevel(self): T2, T3, T1 = (self.classes.T2, self.classes.T3, self.classes.T1) sess = create_session() x2 = T1(data='t1b', t2=T2(data='t2b', t3=T3(data='t3b'))) sess.add(x2) sess.flush() x2.t2 = None sess.delete(x2) sess.flush() eq_(sess.query(T1).all(), []) eq_(sess.query(T2).all(), [T2()]) eq_(sess.query(T3).all(), [T3()]) @testing.future def test_preserves_orphans_onelevel_postremove(self): T2, T3, T1 = (self.classes.T2, self.classes.T3, self.classes.T1) sess = create_session() x2 = T1(data='t1b', t2=T2(data='t2b', t3=T3(data='t3b'))) sess.add(x2) sess.flush() sess.delete(x2) x2.t2 = None sess.flush() eq_(sess.query(T1).all(), []) eq_(sess.query(T2).all(), [T2()]) eq_(sess.query(T3).all(), [T3()]) def test_preserves_orphans_twolevel(self): T2, T3, T1 = (self.classes.T2, self.classes.T3, self.classes.T1) sess = create_session() x = T1(data='t1a', t2=T2(data='t2a', t3=T3(data='t3a'))) sess.add(x) sess.flush() x.t2.t3 = None sess.delete(x) sess.flush() eq_(sess.query(T1).all(), []) eq_(sess.query(T2).all(), []) eq_(sess.query(T3).all(), [T3()]) class M2MCascadeTest(fixtures.MappedTest): @classmethod def define_tables(cls, metadata): Table('a', metadata, Column('id', Integer, primary_key=True, test_needs_autoincrement=True), Column('data', String(30)), test_needs_fk=True ) Table('b', metadata, Column('id', Integer, primary_key=True, test_needs_autoincrement=True), Column('data', String(30)), test_needs_fk=True ) Table('atob', metadata, Column('aid', Integer, ForeignKey('a.id')), Column('bid', Integer, ForeignKey('b.id')), test_needs_fk=True ) Table('c', metadata, Column('id', Integer, primary_key=True, test_needs_autoincrement=True), Column('data', String(30)), Column('bid', Integer, ForeignKey('b.id')), test_needs_fk=True ) @classmethod def setup_classes(cls): class A(cls.Comparable): pass class B(cls.Comparable): pass class C(cls.Comparable): pass def test_delete_orphan(self): a, A, B, b, atob = (self.tables.a, self.classes.A, self.classes.B, self.tables.b, self.tables.atob) # if no backref here, delete-orphan failed until [ticket:427] # was fixed mapper(A, a, properties={'bs': relationship(B, secondary=atob, cascade='all, delete-orphan', single_parent=True)}) mapper(B, b) sess = create_session() b1 = B(data='b1') a1 = A(data='a1', bs=[b1]) sess.add(a1) sess.flush() a1.bs.remove(b1) sess.flush() assert atob.count().scalar() ==0 assert b.count().scalar() == 0 assert a.count().scalar() == 1 def test_delete_orphan_dynamic(self): a, A, B, b, atob = (self.tables.a, self.classes.A, self.classes.B, self.tables.b, self.tables.atob) mapper(A, a, properties={'bs': relationship(B, secondary=atob, cascade='all, delete-orphan', single_parent=True, lazy='dynamic')}) # if no backref here, delete-orphan # failed until [ticket:427] was fixed mapper(B, b) sess = create_session() b1 = B(data='b1') a1 = A(data='a1', bs=[b1]) sess.add(a1) sess.flush() a1.bs.remove(b1) sess.flush() assert atob.count().scalar() ==0 assert b.count().scalar() == 0 assert a.count().scalar() == 1 def test_delete_orphan_cascades(self): a, A, c, b, C, B, atob = (self.tables.a, self.classes.A, self.tables.c, self.tables.b, self.classes.C, self.classes.B, self.tables.atob) mapper(A, a, properties={ # if no backref here, delete-orphan failed until [ticket:427] was # fixed 'bs':relationship(B, secondary=atob, cascade="all, delete-orphan", single_parent=True) }) mapper(B, b, properties={'cs': relationship(C, cascade="all, delete-orphan")}) mapper(C, c) sess = create_session() b1 = B(data='b1', cs=[C(data='c1')]) a1 = A(data='a1', bs=[b1]) sess.add(a1) sess.flush() a1.bs.remove(b1) sess.flush() assert atob.count().scalar() ==0 assert b.count().scalar() == 0 assert a.count().scalar() == 1 assert c.count().scalar() == 0 def test_cascade_delete(self): a, A, B, b, atob = (self.tables.a, self.classes.A, self.classes.B, self.tables.b, self.tables.atob) mapper(A, a, properties={ 'bs':relationship(B, secondary=atob, cascade="all, delete-orphan", single_parent=True) }) mapper(B, b) sess = create_session() a1 = A(data='a1', bs=[B(data='b1')]) sess.add(a1) sess.flush() sess.delete(a1) sess.flush() assert atob.count().scalar() ==0 assert b.count().scalar() == 0 assert a.count().scalar() == 0 def test_single_parent_error(self): a, A, B, b, atob = (self.tables.a, self.classes.A, self.classes.B, self.tables.b, self.tables.atob) mapper(A, a, properties={ 'bs':relationship(B, secondary=atob, cascade="all, delete-orphan") }) mapper(B, b) assert_raises_message( sa_exc.ArgumentError, "On A.bs, delete-orphan cascade is not supported", configure_mappers ) def test_single_parent_raise(self): a, A, B, b, atob = (self.tables.a, self.classes.A, self.classes.B, self.tables.b, self.tables.atob) mapper(A, a, properties={ 'bs':relationship(B, secondary=atob, cascade="all, delete-orphan", single_parent=True) }) mapper(B, b) sess = create_session() b1 =B(data='b1') a1 = A(data='a1', bs=[b1]) assert_raises(sa_exc.InvalidRequestError, A, data='a2', bs=[b1] ) def test_single_parent_backref(self): """test that setting m2m via a uselist=False backref bypasses the single_parent raise""" a, A, B, b, atob = (self.tables.a, self.classes.A, self.classes.B, self.tables.b, self.tables.atob) mapper(A, a, properties={ 'bs':relationship(B, secondary=atob, cascade="all, delete-orphan", single_parent=True, backref=backref('a', uselist=False)) }) mapper(B, b) sess = create_session() b1 =B(data='b1') a1 = A(data='a1', bs=[b1]) assert_raises( sa_exc.InvalidRequestError, A, data='a2', bs=[b1] ) a2 = A(data='a2') b1.a = a2 assert b1 not in a1.bs assert b1 in a2.bs def test_none_m2m_collection_assignment(self): a, A, B, b, atob = (self.tables.a, self.classes.A, self.classes.B, self.tables.b, self.tables.atob) mapper(A, a, properties={ 'bs': relationship(B, secondary=atob, backref="as") }) mapper(B, b) s = Session() a1 = A(bs=[None]) s.add(a1) eq_(a1.bs, [None]) assert_raises_message( orm_exc.FlushError, "Can't flush None value found in collection A.bs", s.commit ) eq_(a1.bs, [None]) def test_none_m2m_collection_append(self): a, A, B, b, atob = (self.tables.a, self.classes.A, self.classes.B, self.tables.b, self.tables.atob) mapper(A, a, properties={ 'bs': relationship(B, secondary=atob, backref="as") }) mapper(B, b) s = Session() a1 = A() a1.bs.append(None) s.add(a1) eq_(a1.bs, [None]) assert_raises_message( orm_exc.FlushError, "Can't flush None value found in collection A.bs", s.commit ) eq_(a1.bs, [None]) class O2MSelfReferentialDetelOrphanTest(fixtures.MappedTest): @classmethod def define_tables(cls, metadata): Table('node', metadata, Column('id', Integer, primary_key=True, test_needs_autoincrement=True), Column('parent_id', Integer, ForeignKey('node.id')) ) @classmethod def setup_classes(cls): class Node(cls.Basic): pass @classmethod def setup_mappers(cls): Node = cls.classes.Node node = cls.tables.node mapper(Node, node, properties={ "children":relationship( Node, cascade="all, delete-orphan", backref=backref( "parent", remote_side=node.c.id ) ) }) def test_self_referential_delete(self): Node = self.classes.Node s = Session() n1, n2, n3, n4 = Node(), Node(), Node(), Node() n1.children = [n2, n3] n3.children = [n4] s.add_all([n1, n2, n3, n4]) s.commit() eq_(s.query(Node).count(), 4) n1.children.remove(n3) s.commit() eq_(s.query(Node).count(), 2) class NoBackrefCascadeTest(_fixtures.FixtureTest): run_inserts = None @classmethod def setup_mappers(cls): addresses, Dingaling, User, dingalings, Address, users = (cls.tables.addresses, cls.classes.Dingaling, cls.classes.User, cls.tables.dingalings, cls.classes.Address, cls.tables.users) mapper(Address, addresses) mapper(User, users, properties={ 'addresses':relationship(Address, backref='user', cascade_backrefs=False) }) mapper(Dingaling, dingalings, properties={ 'address' : relationship(Address, backref='dingalings', cascade_backrefs=False) }) def test_o2m_basic(self): User, Address = self.classes.User, self.classes.Address sess = Session() u1 = User(name='u1') sess.add(u1) a1 = Address(email_address='a1') a1.user = u1 assert a1 not in sess def test_o2m_commit_warns(self): User, Address = self.classes.User, self.classes.Address sess = Session() u1 = User(name='u1') sess.add(u1) a1 = Address(email_address='a1') a1.user = u1 assert_raises_message( sa_exc.SAWarning, "not in session", sess.commit ) assert a1 not in sess def test_o2m_flag_on_backref(self): Dingaling, Address = self.classes.Dingaling, self.classes.Address sess = Session() a1 = Address(email_address='a1') sess.add(a1) d1 = Dingaling() d1.address = a1 assert d1 in a1.dingalings assert d1 in sess sess.commit() def test_m2o_basic(self): Dingaling, Address = self.classes.Dingaling, self.classes.Address sess = Session() a1 = Address(email_address='a1') d1 = Dingaling() sess.add(d1) a1.dingalings.append(d1) assert a1 not in sess def test_m2o_flag_on_backref(self): User, Address = self.classes.User, self.classes.Address sess = Session() a1 = Address(email_address='a1') sess.add(a1) u1 = User(name='u1') u1.addresses.append(a1) assert u1 in sess def test_m2o_commit_warns(self): Dingaling, Address = self.classes.Dingaling, self.classes.Address sess = Session() a1 = Address(email_address='a1') d1 = Dingaling() sess.add(d1) a1.dingalings.append(d1) assert a1 not in sess assert_raises_message( sa_exc.SAWarning, "not in session", sess.commit ) class PendingOrphanTestSingleLevel(fixtures.MappedTest): """Pending entities that are orphans""" @classmethod def define_tables(cls, metadata): Table('users', metadata, Column('user_id', Integer,primary_key=True, test_needs_autoincrement=True), Column('name', String(40))) Table('addresses', metadata, Column('address_id', Integer,primary_key=True, test_needs_autoincrement=True), Column('user_id', Integer, ForeignKey('users.user_id')), Column('email_address', String(40))) Table('orders', metadata, Column('order_id', Integer, primary_key=True, test_needs_autoincrement=True), Column('user_id', Integer, ForeignKey('users.user_id'), nullable=False), ) @classmethod def setup_classes(cls): class User(cls.Comparable): pass class Address(cls.Comparable): pass class Order(cls.Comparable): pass def test_pending_standalone_orphan(self): """Standalone 'orphan' objects can now be persisted, if the underlying constraints of the database allow it. This now supports persisting of objects based on foreign key values alone. """ users, orders, User, Address, Order, addresses = (self.tables.users, self.tables.orders, self.classes.User, self.classes.Address, self.classes.Order, self.tables.addresses) mapper(Order, orders) mapper(Address, addresses) mapper(User, users, properties=dict( addresses=relationship(Address, cascade="all,delete-orphan", backref="user"), orders=relationship(Order, cascade='all, delete-orphan') )) s = Session() # the standalone Address goes in, its foreign key # allows NULL a = Address() s.add(a) s.commit() # the standalone Order does not. o = Order() s.add(o) assert_raises(sa_exc.DBAPIError, s.commit) s.rollback() # can assign o.user_id by foreign key, # flush succeeds u = User() s.add(u) s.flush() o = Order(user_id=u.user_id) s.add(o) s.commit() assert o in s and o not in s.new def test_pending_collection_expunge(self): """Removing a pending item from a collection expunges it from the session.""" users, Address, addresses, User = (self.tables.users, self.classes.Address, self.tables.addresses, self.classes.User) mapper(Address, addresses) mapper(User, users, properties=dict( addresses=relationship(Address, cascade="all,delete-orphan", backref="user") )) s = create_session() u = User() s.add(u) s.flush() a = Address() u.addresses.append(a) assert a in s u.addresses.remove(a) assert a not in s s.delete(u) s.flush() assert a.address_id is None, "Error: address should not be persistent" def test_nonorphans_ok(self): users, Address, addresses, User = (self.tables.users, self.classes.Address, self.tables.addresses, self.classes.User) mapper(Address, addresses) mapper(User, users, properties=dict( addresses=relationship(Address, cascade="all,delete", backref="user") )) s = create_session() u = User(name='u1', addresses=[Address(email_address='ad1')]) s.add(u) a1 = u.addresses[0] u.addresses.remove(a1) assert a1 in s s.flush() s.expunge_all() eq_(s.query(Address).all(), [Address(email_address='ad1')]) class PendingOrphanTestTwoLevel(fixtures.MappedTest): """test usages stated at http://article.gmane.org/gmane.comp.python.sqlalchemy.user/3085 http://article.gmane.org/gmane.comp.python.sqlalchemy.user/3119 """ @classmethod def define_tables(cls, metadata): Table('order', metadata, Column('id', Integer, primary_key=True, test_needs_autoincrement=True) ) Table('item', metadata, Column('id', Integer, primary_key=True, test_needs_autoincrement=True), Column('order_id', Integer, ForeignKey('order.id'), nullable=False) ) Table('attribute', metadata, Column('id', Integer, primary_key=True, test_needs_autoincrement=True), Column('item_id', Integer, ForeignKey('item.id'), nullable=False) ) @classmethod def setup_classes(cls): class Order(cls.Comparable): pass class Item(cls.Comparable): pass class Attribute(cls.Comparable): pass def test_singlelevel_remove(self): item, Order, order, Item = (self.tables.item, self.classes.Order, self.tables.order, self.classes.Item) mapper(Order, order, properties={ 'items':relationship(Item, cascade="all, delete-orphan") }) mapper(Item, item) s = Session() o1 = Order() s.add(o1) i1 = Item() o1.items.append(i1) o1.items.remove(i1) s.commit() assert i1 not in o1.items def test_multilevel_remove(self): Item, Attribute, order, item, attribute, Order = (self.classes.Item, self.classes.Attribute, self.tables.order, self.tables.item, self.tables.attribute, self.classes.Order) mapper(Order, order, properties={ 'items':relationship(Item, cascade="all, delete-orphan") }) mapper(Item, item, properties={ 'attributes':relationship(Attribute, cascade="all, delete-orphan") }) mapper(Attribute, attribute) s = Session() o1 = Order() s.add(o1) i1 = Item() a1 = Attribute() i1.attributes.append(a1) o1.items.append(i1) assert i1 in s assert a1 in s # i1 is an orphan so the operation # removes 'i1'. The operation # cascades down to 'a1'. o1.items.remove(i1) assert i1 not in s assert a1 not in s s.commit() assert o1 in s assert a1 not in s assert i1 not in s assert a1 not in o1.items class DoubleParentO2MOrphanTest(fixtures.MappedTest): """Test orphan behavior on an entity that requires two parents via many-to-one (one-to-many collection.). """ @classmethod def define_tables(cls, meta): Table('sales_reps', meta, Column('sales_rep_id', Integer,primary_key=True, test_needs_autoincrement=True), Column('name', String(50))) Table('accounts', meta, Column('account_id', Integer,primary_key=True, test_needs_autoincrement=True), Column('balance', Integer)) Table('customers', meta, Column('customer_id', Integer,primary_key=True, test_needs_autoincrement=True), Column('name', String(50)), Column('sales_rep_id', Integer, ForeignKey('sales_reps.sales_rep_id')), Column('account_id', Integer, ForeignKey('accounts.account_id'))) def _fixture(self, legacy_is_orphan, uselist): sales_reps, customers, accounts = (self.tables.sales_reps, self.tables.customers, self.tables.accounts) class Customer(fixtures.ComparableEntity): pass class Account(fixtures.ComparableEntity): pass class SalesRep(fixtures.ComparableEntity): pass mapper(Customer, customers, legacy_is_orphan=legacy_is_orphan) mapper(Account, accounts, properties=dict( customers=relationship(Customer, cascade="all,delete-orphan", backref="account", uselist=uselist))) mapper(SalesRep, sales_reps, properties=dict( customers=relationship(Customer, cascade="all,delete-orphan", backref="sales_rep", uselist=uselist))) s = create_session() a = Account(balance=0) sr = SalesRep(name="John") s.add_all((a, sr)) s.flush() c = Customer(name="Jane") if uselist: a.customers.append(c) sr.customers.append(c) else: a.customers = c sr.customers = c assert c in s return s, c, a, sr def test_double_parent_expunge_o2m_legacy(self): """test the delete-orphan uow event for multiple delete-orphan parent relationships.""" s, c, a, sr = self._fixture(True, True) a.customers.remove(c) assert c in s, "Should not expunge customer yet, still has one parent" sr.customers.remove(c) assert c not in s, \ 'Should expunge customer when both parents are gone' def test_double_parent_expunge_o2m_current(self): """test the delete-orphan uow event for multiple delete-orphan parent relationships.""" s, c, a, sr = self._fixture(False, True) a.customers.remove(c) assert c not in s, "Should expunge customer when either parent is gone" sr.customers.remove(c) assert c not in s, \ 'Should expunge customer when both parents are gone' def test_double_parent_expunge_o2o_legacy(self): """test the delete-orphan uow event for multiple delete-orphan parent relationships.""" s, c, a, sr = self._fixture(True, False) a.customers = None assert c in s, "Should not expunge customer yet, still has one parent" sr.customers = None assert c not in s, \ 'Should expunge customer when both parents are gone' def test_double_parent_expunge_o2o_current(self): """test the delete-orphan uow event for multiple delete-orphan parent relationships.""" s, c, a, sr = self._fixture(False, False) a.customers = None assert c not in s, "Should expunge customer when either parent is gone" sr.customers = None assert c not in s, \ 'Should expunge customer when both parents are gone' class DoubleParentM2OOrphanTest(fixtures.MappedTest): """Test orphan behavior on an entity that requires two parents via one-to-many (many-to-one reference to the orphan). """ @classmethod def define_tables(cls, metadata): Table('addresses', metadata, Column('address_id', Integer, primary_key=True, test_needs_autoincrement=True), Column('street', String(30)), ) Table('homes', metadata, Column('home_id', Integer, primary_key=True, key="id", test_needs_autoincrement=True), Column('description', String(30)), Column('address_id', Integer, ForeignKey('addresses.address_id'), nullable=False), ) Table('businesses', metadata, Column('business_id', Integer, primary_key=True, key="id", test_needs_autoincrement=True), Column('description', String(30), key="description"), Column('address_id', Integer, ForeignKey('addresses.address_id'), nullable=False), ) def test_non_orphan(self): """test that an entity can have two parent delete-orphan cascades, and persists normally.""" homes, businesses, addresses = (self.tables.homes, self.tables.businesses, self.tables.addresses) class Address(fixtures.ComparableEntity): pass class Home(fixtures.ComparableEntity): pass class Business(fixtures.ComparableEntity): pass mapper(Address, addresses) mapper(Home, homes, properties={'address' : relationship(Address, cascade='all,delete-orphan', single_parent=True)}) mapper(Business, businesses, properties={'address' : relationship(Address, cascade='all,delete-orphan', single_parent=True)}) session = create_session() h1 = Home(description='home1', address=Address(street='address1')) b1 = Business(description='business1', address=Address(street='address2')) session.add_all((h1,b1)) session.flush() session.expunge_all() eq_(session.query(Home).get(h1.id), Home(description='home1', address=Address(street='address1'))) eq_(session.query(Business).get(b1.id), Business(description='business1', address=Address(street='address2'))) def test_orphan(self): """test that an entity can have two parent delete-orphan cascades, and is detected as an orphan when saved without a parent.""" homes, businesses, addresses = (self.tables.homes, self.tables.businesses, self.tables.addresses) class Address(fixtures.ComparableEntity): pass class Home(fixtures.ComparableEntity): pass class Business(fixtures.ComparableEntity): pass mapper(Address, addresses) mapper(Home, homes, properties={'address' : relationship(Address, cascade='all,delete-orphan', single_parent=True)}) mapper(Business, businesses, properties={'address' : relationship(Address, cascade='all,delete-orphan', single_parent=True)}) session = create_session() a1 = Address() session.add(a1) session.flush() class CollectionAssignmentOrphanTest(fixtures.MappedTest): @classmethod def define_tables(cls, metadata): Table('table_a', metadata, Column('id', Integer, primary_key=True, test_needs_autoincrement=True), Column('name', String(30))) Table('table_b', metadata, Column('id', Integer, primary_key=True, test_needs_autoincrement=True), Column('name', String(30)), Column('a_id', Integer, ForeignKey('table_a.id'))) def test_basic(self): table_b, table_a = self.tables.table_b, self.tables.table_a class A(fixtures.ComparableEntity): pass class B(fixtures.ComparableEntity): pass mapper(A, table_a, properties={ 'bs':relationship(B, cascade="all, delete-orphan") }) mapper(B, table_b) a1 = A(name='a1', bs=[B(name='b1'), B(name='b2'), B(name='b3')]) sess = create_session() sess.add(a1) sess.flush() sess.expunge_all() eq_(sess.query(A).get(a1.id), A(name='a1', bs=[B(name='b1'), B(name='b2'), B(name='b3')])) a1 = sess.query(A).get(a1.id) assert not class_mapper(B)._is_orphan( attributes.instance_state(a1.bs[0])) a1.bs[0].foo='b2modified' a1.bs[1].foo='b3modified' sess.flush() sess.expunge_all() eq_(sess.query(A).get(a1.id), A(name='a1', bs=[B(name='b1'), B(name='b2'), B(name='b3')])) class OrphanCriterionTest(fixtures.MappedTest): @classmethod def define_tables(self, metadata): Table("core", metadata, Column("id", Integer, primary_key=True, test_needs_autoincrement=True), Column("related_one_id", Integer, ForeignKey("related_one.id")), Column("related_two_id", Integer, ForeignKey("related_two.id")) ) Table("related_one", metadata, Column("id", Integer, primary_key=True, test_needs_autoincrement=True), ) Table("related_two", metadata, Column("id", Integer, primary_key=True, test_needs_autoincrement=True), ) def _fixture(self, legacy_is_orphan, persistent, r1_present, r2_present, detach_event=True): class Core(object): pass class RelatedOne(object): def __init__(self, cores): self.cores = cores class RelatedTwo(object): def __init__(self, cores): self.cores = cores mapper(Core, self.tables.core, legacy_is_orphan=legacy_is_orphan) mapper(RelatedOne, self.tables.related_one, properties={ 'cores': relationship(Core, cascade="all, delete-orphan", backref="r1") }) mapper(RelatedTwo, self.tables.related_two, properties={ 'cores': relationship(Core, cascade="all, delete-orphan", backref="r2") }) c1 = Core() if detach_event: r1 = RelatedOne(cores=[c1]) r2 = RelatedTwo(cores=[c1]) else: if r1_present: r1 = RelatedOne(cores=[c1]) if r2_present: r2 = RelatedTwo(cores=[c1]) if persistent: s = Session() s.add(c1) s.flush() if detach_event: if not r1_present: c1.r1 = None if not r2_present: c1.r2 = None return c1 def _assert_not_orphan(self, c1): mapper = object_mapper(c1) state = instance_state(c1) assert not mapper._is_orphan(state) def _assert_is_orphan(self, c1): mapper = object_mapper(c1) state = instance_state(c1) assert mapper._is_orphan(state) def test_leg_pers_r1_r2(self): c1 = self._fixture(True, True, True, True) self._assert_not_orphan(c1) def test_current_pers_r1_r2(self): c1 = self._fixture(False, True, True, True) self._assert_not_orphan(c1) def test_leg_pers_r1_notr2(self): c1 = self._fixture(True, True, True, False) self._assert_not_orphan(c1) def test_current_pers_r1_notr2(self): c1 = self._fixture(False, True, True, False) self._assert_is_orphan(c1) def test_leg_pers_notr1_notr2(self): c1 = self._fixture(True, True, False, False) self._assert_is_orphan(c1) def test_current_pers_notr1_notr2(self): c1 = self._fixture(False, True, True, False) self._assert_is_orphan(c1) def test_leg_transient_r1_r2(self): c1 = self._fixture(True, False, True, True) self._assert_not_orphan(c1) def test_current_transient_r1_r2(self): c1 = self._fixture(False, False, True, True) self._assert_not_orphan(c1) def test_leg_transient_r1_notr2(self): c1 = self._fixture(True, False, True, False) self._assert_not_orphan(c1) def test_current_transient_r1_notr2(self): c1 = self._fixture(False, False, True, False) self._assert_is_orphan(c1) def test_leg_transient_notr1_notr2(self): c1 = self._fixture(True, False, False, False) self._assert_is_orphan(c1) def test_current_transient_notr1_notr2(self): c1 = self._fixture(False, False, False, False) self._assert_is_orphan(c1) def test_leg_transient_notr1_notr2_noevent(self): c1 = self._fixture(True, False, False, False, False) self._assert_is_orphan(c1) def test_current_transient_notr1_notr2_noevent(self): c1 = self._fixture(False, False, False, False, False) self._assert_is_orphan(c1) def test_leg_persistent_notr1_notr2_noevent(self): c1 = self._fixture(True, True, False, False, False) self._assert_not_orphan(c1) def test_current_persistent_notr1_notr2_noevent(self): c1 = self._fixture(False, True, False, False, False) self._assert_not_orphan(c1) class O2MConflictTest(fixtures.MappedTest): """test that O2M dependency detects a change in parent, does the right thing, and updates the collection/attribute. """ @classmethod def define_tables(cls, metadata): Table("parent", metadata, Column("id", Integer, primary_key=True, test_needs_autoincrement=True) ) Table("child", metadata, Column("id", Integer, primary_key=True, test_needs_autoincrement=True), Column('parent_id', Integer, ForeignKey('parent.id'), nullable=False) ) @classmethod def setup_classes(cls): class Parent(cls.Comparable): pass class Child(cls.Comparable): pass def _do_move_test(self, delete_old): Parent, Child = self.classes.Parent, self.classes.Child sess = create_session() p1, p2, c1 = Parent(), Parent(), Child() if Parent.child.property.uselist: p1.child.append(c1) else: p1.child = c1 sess.add_all([p1, c1]) sess.flush() if delete_old: sess.delete(p1) if Parent.child.property.uselist: p2.child.append(c1) else: p2.child = c1 sess.add(p2) sess.flush() eq_(sess.query(Child).filter(Child.parent_id==p2.id).all(), [c1]) def test_o2o_delete_old(self): Child, Parent, parent, child = (self.classes.Child, self.classes.Parent, self.tables.parent, self.tables.child) mapper(Parent, parent, properties={ 'child':relationship(Child, uselist=False) }) mapper(Child, child) self._do_move_test(True) self._do_move_test(False) def test_o2m_delete_old(self): Child, Parent, parent, child = (self.classes.Child, self.classes.Parent, self.tables.parent, self.tables.child) mapper(Parent, parent, properties={ 'child':relationship(Child, uselist=True) }) mapper(Child, child) self._do_move_test(True) self._do_move_test(False) def test_o2o_backref_delete_old(self): Child, Parent, parent, child = (self.classes.Child, self.classes.Parent, self.tables.parent, self.tables.child) mapper(Parent, parent, properties={ 'child':relationship(Child, uselist=False, backref='parent') }) mapper(Child, child) self._do_move_test(True) self._do_move_test(False) def test_o2o_delcascade_delete_old(self): Child, Parent, parent, child = (self.classes.Child, self.classes.Parent, self.tables.parent, self.tables.child) mapper(Parent, parent, properties={ 'child':relationship(Child, uselist=False, cascade="all, delete") }) mapper(Child, child) self._do_move_test(True) self._do_move_test(False) def test_o2o_delorphan_delete_old(self): Child, Parent, parent, child = (self.classes.Child, self.classes.Parent, self.tables.parent, self.tables.child) mapper(Parent, parent, properties={ 'child':relationship(Child, uselist=False, cascade="all, delete, delete-orphan") }) mapper(Child, child) self._do_move_test(True) self._do_move_test(False) def test_o2o_delorphan_backref_delete_old(self): Child, Parent, parent, child = (self.classes.Child, self.classes.Parent, self.tables.parent, self.tables.child) mapper(Parent, parent, properties={ 'child':relationship(Child, uselist=False, cascade="all, delete, delete-orphan", backref='parent') }) mapper(Child, child) self._do_move_test(True) self._do_move_test(False) def test_o2o_backref_delorphan_delete_old(self): Child, Parent, parent, child = (self.classes.Child, self.classes.Parent, self.tables.parent, self.tables.child) mapper(Parent, parent) mapper(Child, child, properties = { 'parent' : relationship(Parent, uselist=False, single_parent=True, backref=backref('child', uselist=False), cascade="all,delete,delete-orphan") }) self._do_move_test(True) self._do_move_test(False) def test_o2m_backref_delorphan_delete_old(self): Child, Parent, parent, child = (self.classes.Child, self.classes.Parent, self.tables.parent, self.tables.child) mapper(Parent, parent) mapper(Child, child, properties = { 'parent' : relationship(Parent, uselist=False, single_parent=True, backref=backref('child', uselist=True), cascade="all,delete,delete-orphan") }) self._do_move_test(True) self._do_move_test(False) class PartialFlushTest(fixtures.MappedTest): """test cascade behavior as it relates to object lists passed to flush().""" @classmethod def define_tables(cls, metadata): Table("base", metadata, Column("id", Integer, primary_key=True, test_needs_autoincrement=True), Column("descr", String(50)) ) Table("noninh_child", metadata, Column('id', Integer, primary_key=True, test_needs_autoincrement=True), Column('base_id', Integer, ForeignKey('base.id')) ) Table("parent", metadata, Column("id", Integer, ForeignKey("base.id"), primary_key=True) ) Table("inh_child", metadata, Column("id", Integer, ForeignKey("base.id"), primary_key=True), Column("parent_id", Integer, ForeignKey("parent.id")) ) def test_o2m_m2o(self): base, noninh_child = self.tables.base, self.tables.noninh_child class Base(fixtures.ComparableEntity): pass class Child(fixtures.ComparableEntity): pass mapper(Base, base, properties={ 'children':relationship(Child, backref='parent') }) mapper(Child, noninh_child) sess = create_session() c1, c2 = Child(), Child() b1 = Base(descr='b1', children=[c1, c2]) sess.add(b1) assert c1 in sess.new assert c2 in sess.new sess.flush([b1]) # c1, c2 get cascaded into the session on o2m. # not sure if this is how I like this # to work but that's how it works for now. assert c1 in sess and c1 not in sess.new assert c2 in sess and c2 not in sess.new assert b1 in sess and b1 not in sess.new sess = create_session() c1, c2 = Child(), Child() b1 = Base(descr='b1', children=[c1, c2]) sess.add(b1) sess.flush([c1]) # m2o, otoh, doesn't cascade up the other way. assert c1 in sess and c1 not in sess.new assert c2 in sess and c2 in sess.new assert b1 in sess and b1 in sess.new sess = create_session() c1, c2 = Child(), Child() b1 = Base(descr='b1', children=[c1, c2]) sess.add(b1) sess.flush([c1, c2]) # m2o, otoh, doesn't cascade up the other way. assert c1 in sess and c1 not in sess.new assert c2 in sess and c2 not in sess.new assert b1 in sess and b1 in sess.new def test_circular_sort(self): """test ticket 1306""" base, inh_child, parent = (self.tables.base, self.tables.inh_child, self.tables.parent) class Base(fixtures.ComparableEntity): pass class Parent(Base): pass class Child(Base): pass mapper(Base,base) mapper(Child, inh_child, inherits=Base, properties={'parent': relationship( Parent, backref='children', primaryjoin=inh_child.c.parent_id == parent.c.id )} ) mapper(Parent,parent, inherits=Base) sess = create_session() p1 = Parent() c1, c2, c3 = Child(), Child(), Child() p1.children = [c1, c2, c3] sess.add(p1) sess.flush([c1]) assert p1 in sess.new assert c1 not in sess.new assert c2 in sess.new SQLAlchemy-0.8.4/test/orm/test_collection.py0000644000076500000240000020250612251150016021547 0ustar classicstaff00000000000000from sqlalchemy.testing import eq_ import sys from operator import and_ import sqlalchemy.orm.collections as collections from sqlalchemy.orm.collections import collection import sqlalchemy as sa from sqlalchemy import Integer, String, ForeignKey, text from sqlalchemy.testing.schema import Table, Column from sqlalchemy import util, exc as sa_exc from sqlalchemy.orm import create_session, mapper, relationship, \ attributes, instrumentation from sqlalchemy.testing import fixtures from sqlalchemy.testing import assert_raises, assert_raises_message class Canary(sa.orm.interfaces.AttributeExtension): def __init__(self): self.data = set() self.added = set() self.removed = set() def append(self, obj, value, initiator): assert value not in self.added self.data.add(value) self.added.add(value) return value def remove(self, obj, value, initiator): assert value not in self.removed self.data.remove(value) self.removed.add(value) def set(self, obj, value, oldvalue, initiator): if isinstance(value, str): value = CollectionsTest.entity_maker() if oldvalue is not None: self.remove(obj, oldvalue, None) self.append(obj, value, None) return value class CollectionsTest(fixtures.ORMTest): class Entity(object): def __init__(self, a=None, b=None, c=None): self.a = a self.b = b self.c = c def __repr__(self): return str((id(self), self.a, self.b, self.c)) @classmethod def setup_class(cls): instrumentation.register_class(cls.Entity) @classmethod def teardown_class(cls): instrumentation.unregister_class(cls.Entity) super(CollectionsTest, cls).teardown_class() _entity_id = 1 @classmethod def entity_maker(cls): cls._entity_id += 1 return cls.Entity(cls._entity_id) @classmethod def dictable_entity(cls, a=None, b=None, c=None): id = cls._entity_id = (cls._entity_id + 1) return cls.Entity(a or str(id), b or 'value %s' % id, c) def _test_adapter(self, typecallable, creator=None, to_set=None): if creator is None: creator = self.entity_maker class Foo(object): pass canary = Canary() instrumentation.register_class(Foo) attributes.register_attribute(Foo, 'attr', uselist=True, extension=canary, typecallable=typecallable, useobject=True) obj = Foo() adapter = collections.collection_adapter(obj.attr) direct = obj.attr if to_set is None: to_set = lambda col: set(col) def assert_eq(): self.assert_(to_set(direct) == canary.data) self.assert_(set(adapter) == canary.data) assert_ne = lambda: self.assert_(to_set(direct) != canary.data) e1, e2 = creator(), creator() adapter.append_with_event(e1) assert_eq() adapter.append_without_event(e2) assert_ne() canary.data.add(e2) assert_eq() adapter.remove_without_event(e2) assert_ne() canary.data.remove(e2) assert_eq() adapter.remove_with_event(e1) assert_eq() def _test_list(self, typecallable, creator=None): if creator is None: creator = self.entity_maker class Foo(object): pass canary = Canary() instrumentation.register_class(Foo) attributes.register_attribute(Foo, 'attr', uselist=True, extension=canary, typecallable=typecallable, useobject=True) obj = Foo() adapter = collections.collection_adapter(obj.attr) direct = obj.attr control = list() def assert_eq(): eq_(set(direct), canary.data) eq_(set(adapter), canary.data) eq_(direct, control) # assume append() is available for list tests e = creator() direct.append(e) control.append(e) assert_eq() if hasattr(direct, 'pop'): direct.pop() control.pop() assert_eq() if hasattr(direct, '__setitem__'): e = creator() direct.append(e) control.append(e) e = creator() direct[0] = e control[0] = e assert_eq() if util.reduce(and_, [hasattr(direct, a) for a in ('__delitem__', 'insert', '__len__')], True): values = [creator(), creator(), creator(), creator()] direct[slice(0, 1)] = values control[slice(0, 1)] = values assert_eq() values = [creator(), creator()] direct[slice(0, -1, 2)] = values control[slice(0, -1, 2)] = values assert_eq() values = [creator()] direct[slice(0, -1)] = values control[slice(0, -1)] = values assert_eq() values = [creator(), creator(), creator()] control[:] = values direct[:] = values def invalid(): direct[slice(0, 6, 2)] = [creator()] assert_raises(ValueError, invalid) if hasattr(direct, '__delitem__'): e = creator() direct.append(e) control.append(e) del direct[-1] del control[-1] assert_eq() if hasattr(direct, '__getslice__'): for e in [creator(), creator(), creator(), creator()]: direct.append(e) control.append(e) del direct[:-3] del control[:-3] assert_eq() del direct[0:1] del control[0:1] assert_eq() del direct[::2] del control[::2] assert_eq() if hasattr(direct, 'remove'): e = creator() direct.append(e) control.append(e) direct.remove(e) control.remove(e) assert_eq() if hasattr(direct, '__setitem__') or hasattr(direct, '__setslice__'): values = [creator(), creator()] direct[:] = values control[:] = values assert_eq() # test slice assignment where # slice size goes over the number of items values = [creator(), creator()] direct[1:3] = values control[1:3] = values assert_eq() values = [creator(), creator()] direct[0:1] = values control[0:1] = values assert_eq() values = [creator()] direct[0:] = values control[0:] = values assert_eq() values = [creator()] direct[:1] = values control[:1] = values assert_eq() values = [creator()] direct[-1::2] = values control[-1::2] = values assert_eq() values = [creator()] * len(direct[1::2]) direct[1::2] = values control[1::2] = values assert_eq() values = [creator(), creator()] direct[-1:-3] = values control[-1:-3] = values assert_eq() values = [creator(), creator()] direct[-2:-1] = values control[-2:-1] = values assert_eq() values = [creator()] direct[0:0] = values control[0:0] = values assert_eq() if hasattr(direct, '__delitem__') or hasattr(direct, '__delslice__'): for i in range(1, 4): e = creator() direct.append(e) control.append(e) del direct[-1:] del control[-1:] assert_eq() del direct[1:2] del control[1:2] assert_eq() del direct[:] del control[:] assert_eq() if hasattr(direct, 'extend'): values = [creator(), creator(), creator()] direct.extend(values) control.extend(values) assert_eq() if hasattr(direct, '__iadd__'): values = [creator(), creator(), creator()] direct += values control += values assert_eq() direct += [] control += [] assert_eq() values = [creator(), creator()] obj.attr += values control += values assert_eq() if hasattr(direct, '__imul__'): direct *= 2 control *= 2 assert_eq() obj.attr *= 2 control *= 2 assert_eq() def _test_list_bulk(self, typecallable, creator=None): if creator is None: creator = self.entity_maker class Foo(object): pass canary = Canary() instrumentation.register_class(Foo) attributes.register_attribute(Foo, 'attr', uselist=True, extension=canary, typecallable=typecallable, useobject=True) obj = Foo() direct = obj.attr e1 = creator() obj.attr.append(e1) like_me = typecallable() e2 = creator() like_me.append(e2) self.assert_(obj.attr is direct) obj.attr = like_me self.assert_(obj.attr is not direct) self.assert_(obj.attr is not like_me) self.assert_(set(obj.attr) == set([e2])) self.assert_(e1 in canary.removed) self.assert_(e2 in canary.added) e3 = creator() real_list = [e3] obj.attr = real_list self.assert_(obj.attr is not real_list) self.assert_(set(obj.attr) == set([e3])) self.assert_(e2 in canary.removed) self.assert_(e3 in canary.added) e4 = creator() try: obj.attr = set([e4]) self.assert_(False) except TypeError: self.assert_(e4 not in canary.data) self.assert_(e3 in canary.data) e5 = creator() e6 = creator() e7 = creator() obj.attr = [e5, e6, e7] self.assert_(e5 in canary.added) self.assert_(e6 in canary.added) self.assert_(e7 in canary.added) obj.attr = [e6, e7] self.assert_(e5 in canary.removed) self.assert_(e6 in canary.added) self.assert_(e7 in canary.added) self.assert_(e6 not in canary.removed) self.assert_(e7 not in canary.removed) def test_list(self): self._test_adapter(list) self._test_list(list) self._test_list_bulk(list) def test_list_setitem_with_slices(self): # this is a "list" that has no __setslice__ # or __delslice__ methods. The __setitem__ # and __delitem__ must therefore accept # slice objects (i.e. as in py3k) class ListLike(object): def __init__(self): self.data = list() def append(self, item): self.data.append(item) def remove(self, item): self.data.remove(item) def insert(self, index, item): self.data.insert(index, item) def pop(self, index=-1): return self.data.pop(index) def extend(self): assert False def __len__(self): return len(self.data) def __setitem__(self, key, value): self.data[key] = value def __getitem__(self, key): return self.data[key] def __delitem__(self, key): del self.data[key] def __iter__(self): return iter(self.data) __hash__ = object.__hash__ def __eq__(self, other): return self.data == other def __repr__(self): return 'ListLike(%s)' % repr(self.data) self._test_adapter(ListLike) self._test_list(ListLike) self._test_list_bulk(ListLike) def test_list_subclass(self): class MyList(list): pass self._test_adapter(MyList) self._test_list(MyList) self._test_list_bulk(MyList) self.assert_(getattr(MyList, '_sa_instrumented') == id(MyList)) def test_list_duck(self): class ListLike(object): def __init__(self): self.data = list() def append(self, item): self.data.append(item) def remove(self, item): self.data.remove(item) def insert(self, index, item): self.data.insert(index, item) def pop(self, index=-1): return self.data.pop(index) def extend(self): assert False def __iter__(self): return iter(self.data) __hash__ = object.__hash__ def __eq__(self, other): return self.data == other def __repr__(self): return 'ListLike(%s)' % repr(self.data) self._test_adapter(ListLike) self._test_list(ListLike) self._test_list_bulk(ListLike) self.assert_(getattr(ListLike, '_sa_instrumented') == id(ListLike)) def test_list_emulates(self): class ListIsh(object): __emulates__ = list def __init__(self): self.data = list() def append(self, item): self.data.append(item) def remove(self, item): self.data.remove(item) def insert(self, index, item): self.data.insert(index, item) def pop(self, index=-1): return self.data.pop(index) def extend(self): assert False def __iter__(self): return iter(self.data) __hash__ = object.__hash__ def __eq__(self, other): return self.data == other def __repr__(self): return 'ListIsh(%s)' % repr(self.data) self._test_adapter(ListIsh) self._test_list(ListIsh) self._test_list_bulk(ListIsh) self.assert_(getattr(ListIsh, '_sa_instrumented') == id(ListIsh)) def _test_set(self, typecallable, creator=None): if creator is None: creator = self.entity_maker class Foo(object): pass canary = Canary() instrumentation.register_class(Foo) attributes.register_attribute(Foo, 'attr', uselist=True, extension=canary, typecallable=typecallable, useobject=True) obj = Foo() adapter = collections.collection_adapter(obj.attr) direct = obj.attr control = set() def assert_eq(): self.assert_(set(direct) == canary.data) self.assert_(set(adapter) == canary.data) self.assert_(direct == control) def addall(*values): for item in values: direct.add(item) control.add(item) assert_eq() def zap(): for item in list(direct): direct.remove(item) control.clear() addall(creator()) e = creator() addall(e) addall(e) if hasattr(direct, 'pop'): direct.pop() control.pop() assert_eq() if hasattr(direct, 'remove'): e = creator() addall(e) direct.remove(e) control.remove(e) assert_eq() e = creator() try: direct.remove(e) except KeyError: assert_eq() self.assert_(e not in canary.removed) else: self.assert_(False) if hasattr(direct, 'discard'): e = creator() addall(e) direct.discard(e) control.discard(e) assert_eq() e = creator() direct.discard(e) self.assert_(e not in canary.removed) assert_eq() if hasattr(direct, 'update'): zap() e = creator() addall(e) values = set([e, creator(), creator()]) direct.update(values) control.update(values) assert_eq() if hasattr(direct, '__ior__'): zap() e = creator() addall(e) values = set([e, creator(), creator()]) direct |= values control |= values assert_eq() # cover self-assignment short-circuit values = set([e, creator(), creator()]) obj.attr |= values control |= values assert_eq() values = frozenset([e, creator()]) obj.attr |= values control |= values assert_eq() try: direct |= [e, creator()] assert False except TypeError: assert True if hasattr(direct, 'clear'): addall(creator(), creator()) direct.clear() control.clear() assert_eq() if hasattr(direct, 'difference_update'): zap() e = creator() addall(creator(), creator()) values = set([creator()]) direct.difference_update(values) control.difference_update(values) assert_eq() values.update(set([e, creator()])) direct.difference_update(values) control.difference_update(values) assert_eq() if hasattr(direct, '__isub__'): zap() e = creator() addall(creator(), creator()) values = set([creator()]) direct -= values control -= values assert_eq() values.update(set([e, creator()])) direct -= values control -= values assert_eq() values = set([creator()]) obj.attr -= values control -= values assert_eq() values = frozenset([creator()]) obj.attr -= values control -= values assert_eq() try: direct -= [e, creator()] assert False except TypeError: assert True if hasattr(direct, 'intersection_update'): zap() e = creator() addall(e, creator(), creator()) values = set(control) direct.intersection_update(values) control.intersection_update(values) assert_eq() values.update(set([e, creator()])) direct.intersection_update(values) control.intersection_update(values) assert_eq() if hasattr(direct, '__iand__'): zap() e = creator() addall(e, creator(), creator()) values = set(control) direct &= values control &= values assert_eq() values.update(set([e, creator()])) direct &= values control &= values assert_eq() values.update(set([creator()])) obj.attr &= values control &= values assert_eq() try: direct &= [e, creator()] assert False except TypeError: assert True if hasattr(direct, 'symmetric_difference_update'): zap() e = creator() addall(e, creator(), creator()) values = set([e, creator()]) direct.symmetric_difference_update(values) control.symmetric_difference_update(values) assert_eq() e = creator() addall(e) values = set([e]) direct.symmetric_difference_update(values) control.symmetric_difference_update(values) assert_eq() values = set() direct.symmetric_difference_update(values) control.symmetric_difference_update(values) assert_eq() if hasattr(direct, '__ixor__'): zap() e = creator() addall(e, creator(), creator()) values = set([e, creator()]) direct ^= values control ^= values assert_eq() e = creator() addall(e) values = set([e]) direct ^= values control ^= values assert_eq() values = set() direct ^= values control ^= values assert_eq() values = set([creator()]) obj.attr ^= values control ^= values assert_eq() try: direct ^= [e, creator()] assert False except TypeError: assert True def _test_set_bulk(self, typecallable, creator=None): if creator is None: creator = self.entity_maker class Foo(object): pass canary = Canary() instrumentation.register_class(Foo) attributes.register_attribute(Foo, 'attr', uselist=True, extension=canary, typecallable=typecallable, useobject=True) obj = Foo() direct = obj.attr e1 = creator() obj.attr.add(e1) like_me = typecallable() e2 = creator() like_me.add(e2) self.assert_(obj.attr is direct) obj.attr = like_me self.assert_(obj.attr is not direct) self.assert_(obj.attr is not like_me) self.assert_(obj.attr == set([e2])) self.assert_(e1 in canary.removed) self.assert_(e2 in canary.added) e3 = creator() real_set = set([e3]) obj.attr = real_set self.assert_(obj.attr is not real_set) self.assert_(obj.attr == set([e3])) self.assert_(e2 in canary.removed) self.assert_(e3 in canary.added) e4 = creator() try: obj.attr = [e4] self.assert_(False) except TypeError: self.assert_(e4 not in canary.data) self.assert_(e3 in canary.data) def test_set(self): self._test_adapter(set) self._test_set(set) self._test_set_bulk(set) def test_set_subclass(self): class MySet(set): pass self._test_adapter(MySet) self._test_set(MySet) self._test_set_bulk(MySet) self.assert_(getattr(MySet, '_sa_instrumented') == id(MySet)) def test_set_duck(self): class SetLike(object): def __init__(self): self.data = set() def add(self, item): self.data.add(item) def remove(self, item): self.data.remove(item) def discard(self, item): self.data.discard(item) def pop(self): return self.data.pop() def update(self, other): self.data.update(other) def __iter__(self): return iter(self.data) __hash__ = object.__hash__ def __eq__(self, other): return self.data == other self._test_adapter(SetLike) self._test_set(SetLike) self._test_set_bulk(SetLike) self.assert_(getattr(SetLike, '_sa_instrumented') == id(SetLike)) def test_set_emulates(self): class SetIsh(object): __emulates__ = set def __init__(self): self.data = set() def add(self, item): self.data.add(item) def remove(self, item): self.data.remove(item) def discard(self, item): self.data.discard(item) def pop(self): return self.data.pop() def update(self, other): self.data.update(other) def __iter__(self): return iter(self.data) __hash__ = object.__hash__ def __eq__(self, other): return self.data == other self._test_adapter(SetIsh) self._test_set(SetIsh) self._test_set_bulk(SetIsh) self.assert_(getattr(SetIsh, '_sa_instrumented') == id(SetIsh)) def _test_dict(self, typecallable, creator=None): if creator is None: creator = self.dictable_entity class Foo(object): pass canary = Canary() instrumentation.register_class(Foo) attributes.register_attribute(Foo, 'attr', uselist=True, extension=canary, typecallable=typecallable, useobject=True) obj = Foo() adapter = collections.collection_adapter(obj.attr) direct = obj.attr control = dict() def assert_eq(): self.assert_(set(direct.values()) == canary.data) self.assert_(set(adapter) == canary.data) self.assert_(direct == control) def addall(*values): for item in values: direct.set(item) control[item.a] = item assert_eq() def zap(): for item in list(adapter): direct.remove(item) control.clear() # assume an 'set' method is available for tests addall(creator()) if hasattr(direct, '__setitem__'): e = creator() direct[e.a] = e control[e.a] = e assert_eq() e = creator(e.a, e.b) direct[e.a] = e control[e.a] = e assert_eq() if hasattr(direct, '__delitem__'): e = creator() addall(e) del direct[e.a] del control[e.a] assert_eq() e = creator() try: del direct[e.a] except KeyError: self.assert_(e not in canary.removed) if hasattr(direct, 'clear'): addall(creator(), creator(), creator()) direct.clear() control.clear() assert_eq() direct.clear() control.clear() assert_eq() if hasattr(direct, 'pop'): e = creator() addall(e) direct.pop(e.a) control.pop(e.a) assert_eq() e = creator() try: direct.pop(e.a) except KeyError: self.assert_(e not in canary.removed) if hasattr(direct, 'popitem'): zap() e = creator() addall(e) direct.popitem() control.popitem() assert_eq() if hasattr(direct, 'setdefault'): e = creator() val_a = direct.setdefault(e.a, e) val_b = control.setdefault(e.a, e) assert_eq() self.assert_(val_a is val_b) val_a = direct.setdefault(e.a, e) val_b = control.setdefault(e.a, e) assert_eq() self.assert_(val_a is val_b) if hasattr(direct, 'update'): e = creator() d = dict([(ee.a, ee) for ee in [e, creator(), creator()]]) addall(e, creator()) direct.update(d) control.update(d) assert_eq() if sys.version_info >= (2, 4): kw = dict([(ee.a, ee) for ee in [e, creator()]]) direct.update(**kw) control.update(**kw) assert_eq() def _test_dict_bulk(self, typecallable, creator=None): if creator is None: creator = self.dictable_entity class Foo(object): pass canary = Canary() instrumentation.register_class(Foo) attributes.register_attribute(Foo, 'attr', uselist=True, extension=canary, typecallable=typecallable, useobject=True) obj = Foo() direct = obj.attr e1 = creator() collections.collection_adapter(direct).append_with_event(e1) like_me = typecallable() e2 = creator() like_me.set(e2) self.assert_(obj.attr is direct) obj.attr = like_me self.assert_(obj.attr is not direct) self.assert_(obj.attr is not like_me) self.assert_( set(collections.collection_adapter(obj.attr)) == set([e2])) self.assert_(e1 in canary.removed) self.assert_(e2 in canary.added) # key validity on bulk assignment is a basic feature of # MappedCollection but is not present in basic, @converter-less # dict collections. e3 = creator() if isinstance(obj.attr, collections.MappedCollection): real_dict = dict(badkey=e3) try: obj.attr = real_dict self.assert_(False) except TypeError: pass self.assert_(obj.attr is not real_dict) self.assert_('badkey' not in obj.attr) eq_(set(collections.collection_adapter(obj.attr)), set([e2])) self.assert_(e3 not in canary.added) else: real_dict = dict(keyignored1=e3) obj.attr = real_dict self.assert_(obj.attr is not real_dict) self.assert_('keyignored1' not in obj.attr) eq_(set(collections.collection_adapter(obj.attr)), set([e3])) self.assert_(e2 in canary.removed) self.assert_(e3 in canary.added) obj.attr = typecallable() eq_(list(collections.collection_adapter(obj.attr)), []) e4 = creator() try: obj.attr = [e4] self.assert_(False) except TypeError: self.assert_(e4 not in canary.data) def test_dict(self): assert_raises_message( sa_exc.ArgumentError, 'Type InstrumentedDict must elect an appender ' 'method to be a collection class', self._test_adapter, dict, self.dictable_entity, to_set=lambda c: set(c.values()) ) assert_raises_message( sa_exc.ArgumentError, 'Type InstrumentedDict must elect an appender method ' 'to be a collection class', self._test_dict, dict ) def test_dict_subclass(self): class MyDict(dict): @collection.appender @collection.internally_instrumented def set(self, item, _sa_initiator=None): self.__setitem__(item.a, item, _sa_initiator=_sa_initiator) @collection.remover @collection.internally_instrumented def _remove(self, item, _sa_initiator=None): self.__delitem__(item.a, _sa_initiator=_sa_initiator) self._test_adapter(MyDict, self.dictable_entity, to_set=lambda c: set(c.values())) self._test_dict(MyDict) self._test_dict_bulk(MyDict) self.assert_(getattr(MyDict, '_sa_instrumented') == id(MyDict)) def test_dict_subclass2(self): class MyEasyDict(collections.MappedCollection): def __init__(self): super(MyEasyDict, self).__init__(lambda e: e.a) self._test_adapter(MyEasyDict, self.dictable_entity, to_set=lambda c: set(c.values())) self._test_dict(MyEasyDict) self._test_dict_bulk(MyEasyDict) self.assert_(getattr(MyEasyDict, '_sa_instrumented') == id(MyEasyDict)) def test_dict_subclass3(self): class MyOrdered(util.OrderedDict, collections.MappedCollection): def __init__(self): collections.MappedCollection.__init__(self, lambda e: e.a) util.OrderedDict.__init__(self) self._test_adapter(MyOrdered, self.dictable_entity, to_set=lambda c: set(c.values())) self._test_dict(MyOrdered) self._test_dict_bulk(MyOrdered) self.assert_(getattr(MyOrdered, '_sa_instrumented') == id(MyOrdered)) def test_dict_subclass4(self): # tests #2654 class MyDict(collections.MappedCollection): def __init__(self): super(MyDict, self).__init__(lambda value: "k%d" % value) @collection.converter def _convert(self, dictlike): for key, value in dictlike.iteritems(): yield value + 5 class Foo(object): pass canary = Canary() instrumentation.register_class(Foo) attributes.register_attribute(Foo, 'attr', uselist=True, extension=canary, typecallable=MyDict, useobject=True) f = Foo() f.attr = {"k1": 1, "k2": 2} eq_(f.attr, {'k7': 7, 'k6': 6}) def test_dict_duck(self): class DictLike(object): def __init__(self): self.data = dict() @collection.appender @collection.replaces(1) def set(self, item): current = self.data.get(item.a, None) self.data[item.a] = item return current @collection.remover def _remove(self, item): del self.data[item.a] def __setitem__(self, key, value): self.data[key] = value def __getitem__(self, key): return self.data[key] def __delitem__(self, key): del self.data[key] def values(self): return self.data.values() def __contains__(self, key): return key in self.data @collection.iterator def itervalues(self): return self.data.itervalues() __hash__ = object.__hash__ def __eq__(self, other): return self.data == other def __repr__(self): return 'DictLike(%s)' % repr(self.data) self._test_adapter(DictLike, self.dictable_entity, to_set=lambda c: set(c.itervalues())) self._test_dict(DictLike) self._test_dict_bulk(DictLike) self.assert_(getattr(DictLike, '_sa_instrumented') == id(DictLike)) def test_dict_emulates(self): class DictIsh(object): __emulates__ = dict def __init__(self): self.data = dict() @collection.appender @collection.replaces(1) def set(self, item): current = self.data.get(item.a, None) self.data[item.a] = item return current @collection.remover def _remove(self, item): del self.data[item.a] def __setitem__(self, key, value): self.data[key] = value def __getitem__(self, key): return self.data[key] def __delitem__(self, key): del self.data[key] def values(self): return self.data.values() def __contains__(self, key): return key in self.data @collection.iterator def itervalues(self): return self.data.itervalues() __hash__ = object.__hash__ def __eq__(self, other): return self.data == other def __repr__(self): return 'DictIsh(%s)' % repr(self.data) self._test_adapter(DictIsh, self.dictable_entity, to_set=lambda c: set(c.itervalues())) self._test_dict(DictIsh) self._test_dict_bulk(DictIsh) self.assert_(getattr(DictIsh, '_sa_instrumented') == id(DictIsh)) def _test_object(self, typecallable, creator=None): if creator is None: creator = self.entity_maker class Foo(object): pass canary = Canary() instrumentation.register_class(Foo) attributes.register_attribute(Foo, 'attr', uselist=True, extension=canary, typecallable=typecallable, useobject=True) obj = Foo() adapter = collections.collection_adapter(obj.attr) direct = obj.attr control = set() def assert_eq(): self.assert_(set(direct) == canary.data) self.assert_(set(adapter) == canary.data) self.assert_(direct == control) # There is no API for object collections. We'll make one up # for the purposes of the test. e = creator() direct.push(e) control.add(e) assert_eq() direct.zark(e) control.remove(e) assert_eq() e = creator() direct.maybe_zark(e) control.discard(e) assert_eq() e = creator() direct.push(e) control.add(e) assert_eq() e = creator() direct.maybe_zark(e) control.discard(e) assert_eq() def test_object_duck(self): class MyCollection(object): def __init__(self): self.data = set() @collection.appender def push(self, item): self.data.add(item) @collection.remover def zark(self, item): self.data.remove(item) @collection.removes_return() def maybe_zark(self, item): if item in self.data: self.data.remove(item) return item @collection.iterator def __iter__(self): return iter(self.data) __hash__ = object.__hash__ def __eq__(self, other): return self.data == other self._test_adapter(MyCollection) self._test_object(MyCollection) self.assert_(getattr(MyCollection, '_sa_instrumented') == id(MyCollection)) def test_object_emulates(self): class MyCollection2(object): __emulates__ = None def __init__(self): self.data = set() # looks like a list def append(self, item): assert False @collection.appender def push(self, item): self.data.add(item) @collection.remover def zark(self, item): self.data.remove(item) @collection.removes_return() def maybe_zark(self, item): if item in self.data: self.data.remove(item) return item @collection.iterator def __iter__(self): return iter(self.data) __hash__ = object.__hash__ def __eq__(self, other): return self.data == other self._test_adapter(MyCollection2) self._test_object(MyCollection2) self.assert_(getattr(MyCollection2, '_sa_instrumented') == id(MyCollection2)) def test_recipes(self): class Custom(object): def __init__(self): self.data = [] @collection.appender @collection.adds('entity') def put(self, entity): self.data.append(entity) @collection.remover @collection.removes(1) def remove(self, entity): self.data.remove(entity) @collection.adds(1) def push(self, *args): self.data.append(args[0]) @collection.removes('entity') def yank(self, entity, arg): self.data.remove(entity) @collection.replaces(2) def replace(self, arg, entity, **kw): self.data.insert(0, entity) return self.data.pop() @collection.removes_return() def pop(self, key): return self.data.pop() @collection.iterator def __iter__(self): return iter(self.data) class Foo(object): pass canary = Canary() instrumentation.register_class(Foo) attributes.register_attribute(Foo, 'attr', uselist=True, extension=canary, typecallable=Custom, useobject=True) obj = Foo() adapter = collections.collection_adapter(obj.attr) direct = obj.attr control = list() def assert_eq(): self.assert_(set(direct) == canary.data) self.assert_(set(adapter) == canary.data) self.assert_(list(direct) == control) creator = self.entity_maker e1 = creator() direct.put(e1) control.append(e1) assert_eq() e2 = creator() direct.put(entity=e2) control.append(e2) assert_eq() direct.remove(e2) control.remove(e2) assert_eq() direct.remove(entity=e1) control.remove(e1) assert_eq() e3 = creator() direct.push(e3) control.append(e3) assert_eq() direct.yank(e3, 'blah') control.remove(e3) assert_eq() e4, e5, e6, e7 = creator(), creator(), creator(), creator() direct.put(e4) direct.put(e5) control.append(e4) control.append(e5) dr1 = direct.replace('foo', e6, bar='baz') control.insert(0, e6) cr1 = control.pop() assert_eq() self.assert_(dr1 is cr1) dr2 = direct.replace(arg=1, entity=e7) control.insert(0, e7) cr2 = control.pop() assert_eq() self.assert_(dr2 is cr2) dr3 = direct.pop('blah') cr3 = control.pop() assert_eq() self.assert_(dr3 is cr3) def test_lifecycle(self): class Foo(object): pass canary = Canary() creator = self.entity_maker instrumentation.register_class(Foo) attributes.register_attribute(Foo, 'attr', uselist=True, extension=canary, useobject=True) obj = Foo() col1 = obj.attr e1 = creator() obj.attr.append(e1) e2 = creator() bulk1 = [e2] # empty & sever col1 from obj obj.attr = bulk1 self.assert_(len(col1) == 0) self.assert_(len(canary.data) == 1) self.assert_(obj.attr is not col1) self.assert_(obj.attr is not bulk1) self.assert_(obj.attr == bulk1) e3 = creator() col1.append(e3) self.assert_(e3 not in canary.data) self.assert_(collections.collection_adapter(col1) is None) obj.attr[0] = e3 self.assert_(e3 in canary.data) class DictHelpersTest(fixtures.MappedTest): @classmethod def define_tables(cls, metadata): Table('parents', metadata, Column('id', Integer, primary_key=True, test_needs_autoincrement=True), Column('label', String(128))) Table('children', metadata, Column('id', Integer, primary_key=True, test_needs_autoincrement=True), Column('parent_id', Integer, ForeignKey('parents.id'), nullable=False), Column('a', String(128)), Column('b', String(128)), Column('c', String(128))) @classmethod def setup_classes(cls): class Parent(cls.Basic): def __init__(self, label=None): self.label = label class Child(cls.Basic): def __init__(self, a=None, b=None, c=None): self.a = a self.b = b self.c = c def _test_scalar_mapped(self, collection_class): parents, children, Parent, Child = (self.tables.parents, self.tables.children, self.classes.Parent, self.classes.Child) mapper(Child, children) mapper(Parent, parents, properties={ 'children': relationship(Child, collection_class=collection_class, cascade="all, delete-orphan")}) p = Parent() p.children['foo'] = Child('foo', 'value') p.children['bar'] = Child('bar', 'value') session = create_session() session.add(p) session.flush() pid = p.id session.expunge_all() p = session.query(Parent).get(pid) eq_(set(p.children.keys()), set(['foo', 'bar'])) cid = p.children['foo'].id collections.collection_adapter(p.children).append_with_event( Child('foo', 'newvalue')) session.flush() session.expunge_all() p = session.query(Parent).get(pid) self.assert_(set(p.children.keys()) == set(['foo', 'bar'])) self.assert_(p.children['foo'].id != cid) self.assert_( len(list(collections.collection_adapter(p.children))) == 2) session.flush() session.expunge_all() p = session.query(Parent).get(pid) self.assert_( len(list(collections.collection_adapter(p.children))) == 2) collections.collection_adapter(p.children).remove_with_event( p.children['foo']) self.assert_( len(list(collections.collection_adapter(p.children))) == 1) session.flush() session.expunge_all() p = session.query(Parent).get(pid) self.assert_( len(list(collections.collection_adapter(p.children))) == 1) del p.children['bar'] self.assert_( len(list(collections.collection_adapter(p.children))) == 0) session.flush() session.expunge_all() p = session.query(Parent).get(pid) self.assert_( len(list(collections.collection_adapter(p.children))) == 0) def _test_composite_mapped(self, collection_class): parents, children, Parent, Child = (self.tables.parents, self.tables.children, self.classes.Parent, self.classes.Child) mapper(Child, children) mapper(Parent, parents, properties={ 'children': relationship(Child, collection_class=collection_class, cascade="all, delete-orphan") }) p = Parent() p.children[('foo', '1')] = Child('foo', '1', 'value 1') p.children[('foo', '2')] = Child('foo', '2', 'value 2') session = create_session() session.add(p) session.flush() pid = p.id session.expunge_all() p = session.query(Parent).get(pid) self.assert_( set(p.children.keys()) == set([('foo', '1'), ('foo', '2')])) cid = p.children[('foo', '1')].id collections.collection_adapter(p.children).append_with_event( Child('foo', '1', 'newvalue')) session.flush() session.expunge_all() p = session.query(Parent).get(pid) self.assert_( set(p.children.keys()) == set([('foo', '1'), ('foo', '2')])) self.assert_(p.children[('foo', '1')].id != cid) self.assert_( len(list(collections.collection_adapter(p.children))) == 2) def test_mapped_collection(self): collection_class = collections.mapped_collection(lambda c: c.a) self._test_scalar_mapped(collection_class) def test_mapped_collection2(self): collection_class = collections.mapped_collection(lambda c: (c.a, c.b)) self._test_composite_mapped(collection_class) def test_attr_mapped_collection(self): collection_class = collections.attribute_mapped_collection('a') self._test_scalar_mapped(collection_class) def test_declarative_column_mapped(self): """test that uncompiled attribute usage works with column_mapped_collection""" from sqlalchemy.ext.declarative import declarative_base BaseObject = declarative_base() class Foo(BaseObject): __tablename__ = "foo" id = Column(Integer(), primary_key=True) bar_id = Column(Integer, ForeignKey('bar.id')) for spec, obj, expected in ( (Foo.id, Foo(id=3), 3), ((Foo.id, Foo.bar_id), Foo(id=3, bar_id=12), (3, 12)) ): eq_( collections.column_mapped_collection(spec)().keyfunc(obj), expected ) def test_column_mapped_assertions(self): assert_raises_message(sa_exc.ArgumentError, "Column-based expression object expected " "for argument 'mapping_spec'; got: 'a'", collections.column_mapped_collection, 'a') assert_raises_message(sa_exc.ArgumentError, "Column-based expression object expected " "for argument 'mapping_spec'; got: 'a'", collections.column_mapped_collection, text('a')) def test_column_mapped_collection(self): children = self.tables.children collection_class = collections.column_mapped_collection( children.c.a) self._test_scalar_mapped(collection_class) def test_column_mapped_collection2(self): children = self.tables.children collection_class = collections.column_mapped_collection( (children.c.a, children.c.b)) self._test_composite_mapped(collection_class) def test_mixin(self): class Ordered(util.OrderedDict, collections.MappedCollection): def __init__(self): collections.MappedCollection.__init__(self, lambda v: v.a) util.OrderedDict.__init__(self) collection_class = Ordered self._test_scalar_mapped(collection_class) def test_mixin2(self): class Ordered2(util.OrderedDict, collections.MappedCollection): def __init__(self, keyfunc): collections.MappedCollection.__init__(self, keyfunc) util.OrderedDict.__init__(self) collection_class = lambda: Ordered2(lambda v: (v.a, v.b)) self._test_composite_mapped(collection_class) class ColumnMappedWSerialize(fixtures.MappedTest): """test the column_mapped_collection serializer against multi-table and indirect table edge cases, including serialization.""" run_create_tables = run_deletes = None @classmethod def define_tables(cls, metadata): Table('foo', metadata, Column('id', Integer(), primary_key=True), Column('b', String(128)) ) Table('bar', metadata, Column('id', Integer(), primary_key=True), Column('foo_id', Integer, ForeignKey('foo.id')), Column('bat_id', Integer), schema="x" ) @classmethod def setup_classes(cls): class Foo(cls.Basic): pass class Bar(Foo): pass def test_indirect_table_column_mapped(self): Foo = self.classes.Foo Bar = self.classes.Bar bar = self.tables["x.bar"] mapper(Foo, self.tables.foo, properties={ "foo_id": self.tables.foo.c.id }) mapper(Bar, bar, inherits=Foo, properties={ "bar_id": bar.c.id, }) bar_spec = Bar(foo_id=1, bar_id=2, bat_id=3) self._run_test([ (Foo.foo_id, bar_spec, 1), ((Bar.bar_id, Bar.bat_id), bar_spec, (2, 3)), (Bar.foo_id, bar_spec, 1), (bar.c.id, bar_spec, 2), ]) def test_selectable_column_mapped(self): from sqlalchemy import select s = select([self.tables.foo]).alias() Foo = self.classes.Foo mapper(Foo, s) self._run_test([ (Foo.b, Foo(b=5), 5), (s.c.b, Foo(b=5), 5) ]) def _run_test(self, specs): from sqlalchemy.testing.util import picklers for spec, obj, expected in specs: coll = collections.column_mapped_collection(spec)() eq_( coll.keyfunc(obj), expected ) # ensure we do the right thing with __reduce__ for loads, dumps in picklers(): c2 = loads(dumps(coll)) eq_(c2.keyfunc(obj), expected) c3 = loads(dumps(c2)) eq_(c3.keyfunc(obj), expected) class CustomCollectionsTest(fixtures.MappedTest): """test the integration of collections with mapped classes.""" @classmethod def define_tables(cls, metadata): Table('sometable', metadata, Column('col1', Integer, primary_key=True, test_needs_autoincrement=True), Column('data', String(30))) Table('someothertable', metadata, Column('col1', Integer, primary_key=True, test_needs_autoincrement=True), Column('scol1', Integer, ForeignKey('sometable.col1')), Column('data', String(20))) def test_basic(self): someothertable, sometable = self.tables.someothertable, \ self.tables.sometable class MyList(list): pass class Foo(object): pass class Bar(object): pass mapper(Foo, sometable, properties={ 'bars': relationship(Bar, collection_class=MyList) }) mapper(Bar, someothertable) f = Foo() assert isinstance(f.bars, MyList) def test_lazyload(self): """test that a 'set' can be used as a collection and can lazyload.""" someothertable, sometable = self.tables.someothertable, \ self.tables.sometable class Foo(object): pass class Bar(object): pass mapper(Foo, sometable, properties={ 'bars': relationship(Bar, collection_class=set) }) mapper(Bar, someothertable) f = Foo() f.bars.add(Bar()) f.bars.add(Bar()) sess = create_session() sess.add(f) sess.flush() sess.expunge_all() f = sess.query(Foo).get(f.col1) assert len(list(f.bars)) == 2 f.bars.clear() def test_dict(self): """test that a 'dict' can be used as a collection and can lazyload.""" someothertable, sometable = self.tables.someothertable, \ self.tables.sometable class Foo(object): pass class Bar(object): pass class AppenderDict(dict): @collection.appender def set(self, item): self[id(item)] = item @collection.remover def remove(self, item): if id(item) in self: del self[id(item)] mapper(Foo, sometable, properties={ 'bars': relationship(Bar, collection_class=AppenderDict) }) mapper(Bar, someothertable) f = Foo() f.bars.set(Bar()) f.bars.set(Bar()) sess = create_session() sess.add(f) sess.flush() sess.expunge_all() f = sess.query(Foo).get(f.col1) assert len(list(f.bars)) == 2 f.bars.clear() def test_dict_wrapper(self): """test that the supplied 'dict' wrapper can be used as a collection and can lazyload.""" someothertable, sometable = self.tables.someothertable, \ self.tables.sometable class Foo(object): pass class Bar(object): def __init__(self, data): self.data = data mapper(Foo, sometable, properties={ 'bars':relationship(Bar, collection_class=collections.column_mapped_collection( someothertable.c.data)) }) mapper(Bar, someothertable) f = Foo() col = collections.collection_adapter(f.bars) col.append_with_event(Bar('a')) col.append_with_event(Bar('b')) sess = create_session() sess.add(f) sess.flush() sess.expunge_all() f = sess.query(Foo).get(f.col1) assert len(list(f.bars)) == 2 existing = set([id(b) for b in f.bars.values()]) col = collections.collection_adapter(f.bars) col.append_with_event(Bar('b')) f.bars['a'] = Bar('a') sess.flush() sess.expunge_all() f = sess.query(Foo).get(f.col1) assert len(list(f.bars)) == 2 replaced = set([id(b) for b in f.bars.values()]) self.assert_(existing != replaced) def test_list(self): self._test_list(list) def test_list_no_setslice(self): class ListLike(object): def __init__(self): self.data = list() def append(self, item): self.data.append(item) def remove(self, item): self.data.remove(item) def insert(self, index, item): self.data.insert(index, item) def pop(self, index=-1): return self.data.pop(index) def extend(self): assert False def __len__(self): return len(self.data) def __setitem__(self, key, value): self.data[key] = value def __getitem__(self, key): return self.data[key] def __delitem__(self, key): del self.data[key] def __iter__(self): return iter(self.data) __hash__ = object.__hash__ def __eq__(self, other): return self.data == other def __repr__(self): return 'ListLike(%s)' % repr(self.data) self._test_list(ListLike) def _test_list(self, listcls): someothertable, sometable = self.tables.someothertable, \ self.tables.sometable class Parent(object): pass class Child(object): pass mapper(Parent, sometable, properties={ 'children': relationship(Child, collection_class=listcls) }) mapper(Child, someothertable) control = list() p = Parent() o = Child() control.append(o) p.children.append(o) assert control == p.children assert control == list(p.children) o = [Child(), Child(), Child(), Child()] control.extend(o) p.children.extend(o) assert control == p.children assert control == list(p.children) assert control[0] == p.children[0] assert control[-1] == p.children[-1] assert control[1:3] == p.children[1:3] del control[1] del p.children[1] assert control == p.children assert control == list(p.children) o = [Child()] control[1:3] = o p.children[1:3] = o assert control == p.children assert control == list(p.children) o = [Child(), Child(), Child(), Child()] control[1:3] = o p.children[1:3] = o assert control == p.children assert control == list(p.children) o = [Child(), Child(), Child(), Child()] control[-1:-2] = o p.children[-1:-2] = o assert control == p.children assert control == list(p.children) o = [Child(), Child(), Child(), Child()] control[4:] = o p.children[4:] = o assert control == p.children assert control == list(p.children) o = Child() control.insert(0, o) p.children.insert(0, o) assert control == p.children assert control == list(p.children) o = Child() control.insert(3, o) p.children.insert(3, o) assert control == p.children assert control == list(p.children) o = Child() control.insert(999, o) p.children.insert(999, o) assert control == p.children assert control == list(p.children) del control[0:1] del p.children[0:1] assert control == p.children assert control == list(p.children) del control[1:1] del p.children[1:1] assert control == p.children assert control == list(p.children) del control[1:3] del p.children[1:3] assert control == p.children assert control == list(p.children) del control[7:] del p.children[7:] assert control == p.children assert control == list(p.children) assert control.pop() == p.children.pop() assert control == p.children assert control == list(p.children) assert control.pop(0) == p.children.pop(0) assert control == p.children assert control == list(p.children) assert control.pop(2) == p.children.pop(2) assert control == p.children assert control == list(p.children) o = Child() control.insert(2, o) p.children.insert(2, o) assert control == p.children assert control == list(p.children) control.remove(o) p.children.remove(o) assert control == p.children assert control == list(p.children) def test_custom(self): someothertable, sometable = self.tables.someothertable, \ self.tables.sometable class Parent(object): pass class Child(object): pass class MyCollection(object): def __init__(self): self.data = [] @collection.appender def append(self, value): self.data.append(value) @collection.remover def remove(self, value): self.data.remove(value) @collection.iterator def __iter__(self): return iter(self.data) mapper(Parent, sometable, properties={ 'children': relationship(Child, collection_class=MyCollection) }) mapper(Child, someothertable) control = list() p1 = Parent() o = Child() control.append(o) p1.children.append(o) assert control == list(p1.children) o = Child() control.append(o) p1.children.append(o) assert control == list(p1.children) o = Child() control.append(o) p1.children.append(o) assert control == list(p1.children) sess = create_session() sess.add(p1) sess.flush() sess.expunge_all() p2 = sess.query(Parent).get(p1.col1) o = list(p2.children) assert len(o) == 3 class InstrumentationTest(fixtures.ORMTest): def test_uncooperative_descriptor_in_sweep(self): class DoNotTouch(object): def __get__(self, obj, owner): raise AttributeError class Touchy(list): no_touch = DoNotTouch() assert 'no_touch' in Touchy.__dict__ assert not hasattr(Touchy, 'no_touch') assert 'no_touch' in dir(Touchy) collections._instrument_class(Touchy) def test_name_setup(self): class Base(object): @collection.iterator def base_iterate(self, x): return "base_iterate" @collection.appender def base_append(self, x): return "base_append" @collection.converter def base_convert(self, x): return "base_convert" @collection.remover def base_remove(self, x): return "base_remove" from sqlalchemy.orm.collections import _instrument_class _instrument_class(Base) eq_(Base._sa_remover(Base(), 5), "base_remove") eq_(Base._sa_appender(Base(), 5), "base_append") eq_(Base._sa_iterator(Base(), 5), "base_iterate") eq_(Base._sa_converter(Base(), 5), "base_convert") class Sub(Base): @collection.converter def base_convert(self, x): return "sub_convert" @collection.remover def sub_remove(self, x): return "sub_remove" _instrument_class(Sub) eq_(Sub._sa_appender(Sub(), 5), "base_append") eq_(Sub._sa_remover(Sub(), 5), "sub_remove") eq_(Sub._sa_iterator(Sub(), 5), "base_iterate") eq_(Sub._sa_converter(Sub(), 5), "sub_convert") def test_link_event(self): canary = [] class Collection(list): @collection.linker def _on_link(self, obj): canary.append(obj) class Foo(object): pass instrumentation.register_class(Foo) attributes.register_attribute(Foo, 'attr', uselist=True, typecallable=Collection, useobject=True) f1 = Foo() f1.attr.append(3) eq_(canary, [f1.attr._sa_adapter]) adapter_1 = f1.attr._sa_adapter l2 = Collection() f1.attr = l2 eq_(canary, [adapter_1, f1.attr._sa_adapter, None]) SQLAlchemy-0.8.4/test/orm/test_compile.py0000644000076500000240000001526312251147172021057 0ustar classicstaff00000000000000from sqlalchemy import * from sqlalchemy import exc as sa_exc from sqlalchemy.orm import * from sqlalchemy.testing import assert_raises_message from sqlalchemy.testing import fixtures from sqlalchemy import testing class CompileTest(fixtures.ORMTest): """test various mapper compilation scenarios""" def teardown(self): clear_mappers() def test_with_polymorphic(self): metadata = MetaData(testing.db) order = Table('orders', metadata, Column('id', Integer, primary_key=True), Column('employee_id', Integer, ForeignKey('employees.id'), nullable=False), Column('type', Unicode(16))) employee = Table('employees', metadata, Column('id', Integer, primary_key=True), Column('name', Unicode(16), unique=True, nullable=False)) product = Table('products', metadata, Column('id', Integer, primary_key=True), ) orderproduct = Table('orderproducts', metadata, Column('id', Integer, primary_key=True), Column('order_id', Integer, ForeignKey("orders.id"), nullable=False), Column('product_id', Integer, ForeignKey("products.id"), nullable=False), ) class Order(object): pass class Employee(object): pass class Product(object): pass class OrderProduct(object): pass order_join = order.select().alias('pjoin') order_mapper = mapper(Order, order, with_polymorphic=('*', order_join), polymorphic_on=order_join.c.type, polymorphic_identity='order', properties={ 'orderproducts': relationship(OrderProduct, lazy='select', backref='order')} ) mapper(Product, product, properties={ 'orderproducts': relationship(OrderProduct, lazy='select', backref='product')} ) mapper(Employee, employee, properties={ 'orders': relationship(Order, lazy='select', backref='employee')}) mapper(OrderProduct, orderproduct) # this requires that the compilation of order_mapper's "surrogate # mapper" occur after the initial setup of MapperProperty objects on # the mapper. configure_mappers() def test_conflicting_backref_one(self): """test that conflicting backrefs raises an exception""" metadata = MetaData(testing.db) order = Table('orders', metadata, Column('id', Integer, primary_key=True), Column('type', Unicode(16))) product = Table('products', metadata, Column('id', Integer, primary_key=True), ) orderproduct = Table('orderproducts', metadata, Column('id', Integer, primary_key=True), Column('order_id', Integer, ForeignKey("orders.id"), nullable=False), Column('product_id', Integer, ForeignKey("products.id"), nullable=False), ) class Order(object): pass class Product(object): pass class OrderProduct(object): pass order_join = order.select().alias('pjoin') order_mapper = mapper(Order, order, with_polymorphic=('*', order_join), polymorphic_on=order_join.c.type, polymorphic_identity='order', properties={ 'orderproducts': relationship(OrderProduct, lazy='select', backref='product')} ) mapper(Product, product, properties={ 'orderproducts': relationship(OrderProduct, lazy='select', backref='product')} ) mapper(OrderProduct, orderproduct) assert_raises_message( sa_exc.ArgumentError, "Error creating backref", configure_mappers ) def test_misc_one(self): metadata = MetaData(testing.db) node_table = Table("node", metadata, Column('node_id', Integer, primary_key=True), Column('name_index', Integer, nullable=True), ) node_name_table = Table("node_name", metadata, Column('node_name_id', Integer, primary_key=True), Column('node_id', Integer, ForeignKey('node.node_id')), Column('host_id', Integer, ForeignKey('host.host_id')), Column('name', String(64), nullable=False), ) host_table = Table("host", metadata, Column('host_id', Integer, primary_key=True), Column('hostname', String(64), nullable=False, unique=True), ) metadata.create_all() try: node_table.insert().execute(node_id=1, node_index=5) class Node(object):pass class NodeName(object):pass class Host(object):pass node_mapper = mapper(Node, node_table) host_mapper = mapper(Host, host_table) node_name_mapper = mapper(NodeName, node_name_table, properties = { 'node' : relationship(Node, backref=backref('names')), 'host' : relationship(Host), } ) sess = create_session() assert sess.query(Node).get(1).names == [] finally: metadata.drop_all() def test_conflicting_backref_two(self): meta = MetaData() a = Table('a', meta, Column('id', Integer, primary_key=True)) b = Table('b', meta, Column('id', Integer, primary_key=True), Column('a_id', Integer, ForeignKey('a.id'))) class A(object): pass class B(object): pass mapper(A, a, properties={ 'b':relationship(B, backref='a') }) mapper(B, b, properties={ 'a':relationship(A, backref='b') }) assert_raises_message( sa_exc.ArgumentError, "Error creating backref", configure_mappers ) def test_conflicting_backref_subclass(self): meta = MetaData() a = Table('a', meta, Column('id', Integer, primary_key=True)) b = Table('b', meta, Column('id', Integer, primary_key=True), Column('a_id', Integer, ForeignKey('a.id'))) class A(object): pass class B(object): pass class C(B): pass mapper(A, a, properties={ 'b': relationship(B, backref='a'), 'c': relationship(C, backref='a') }) mapper(B, b) mapper(C, None, inherits=B) assert_raises_message( sa_exc.ArgumentError, "Error creating backref", configure_mappers ) SQLAlchemy-0.8.4/test/orm/test_composites.py0000644000076500000240000006474712251150016021616 0ustar classicstaff00000000000000from sqlalchemy.testing import assert_raises, assert_raises_message import sqlalchemy as sa from sqlalchemy import testing from sqlalchemy import MetaData, Integer, String, ForeignKey, func, \ util, select from sqlalchemy.testing.schema import Table, Column from sqlalchemy.orm import mapper, relationship, backref, \ class_mapper, CompositeProperty, \ validates, aliased from sqlalchemy.orm import attributes, \ composite, relationship, \ Session from sqlalchemy.testing import eq_ from sqlalchemy.testing import fixtures from test.orm import _fixtures class PointTest(fixtures.MappedTest): @classmethod def define_tables(cls, metadata): Table('graphs', metadata, Column('id', Integer, primary_key=True, test_needs_autoincrement=True), Column('name', String(30))) Table('edges', metadata, Column('id', Integer, primary_key=True, test_needs_autoincrement=True), Column('graph_id', Integer, ForeignKey('graphs.id')), Column('x1', Integer), Column('y1', Integer), Column('x2', Integer), Column('y2', Integer), ) @classmethod def setup_mappers(cls): graphs, edges = cls.tables.graphs, cls.tables.edges class Point(cls.Comparable): def __init__(self, x, y): self.x = x self.y = y def __composite_values__(self): return [self.x, self.y] __hash__ = None def __eq__(self, other): return isinstance(other, Point) and \ other.x == self.x and \ other.y == self.y def __ne__(self, other): return not isinstance(other, Point) or \ not self.__eq__(other) class Graph(cls.Comparable): pass class Edge(cls.Comparable): def __init__(self, *args): if args: self.start, self.end = args mapper(Graph, graphs, properties={ 'edges':relationship(Edge) }) mapper(Edge, edges, properties={ 'start':sa.orm.composite(Point, edges.c.x1, edges.c.y1), 'end': sa.orm.composite(Point, edges.c.x2, edges.c.y2) }) def _fixture(self): Graph, Edge, Point = (self.classes.Graph, self.classes.Edge, self.classes.Point) sess = Session() g = Graph(id=1, edges=[ Edge(Point(3, 4), Point(5, 6)), Edge(Point(14, 5), Point(2, 7)) ]) sess.add(g) sess.commit() return sess def test_round_trip(self): Graph, Point = self.classes.Graph, self.classes.Point sess = self._fixture() g1 = sess.query(Graph).first() sess.close() g = sess.query(Graph).get(g1.id) eq_( [(e.start, e.end) for e in g.edges], [ (Point(3, 4), Point(5, 6)), (Point(14, 5), Point(2, 7)), ] ) def test_detect_change(self): Graph, Edge, Point = (self.classes.Graph, self.classes.Edge, self.classes.Point) sess = self._fixture() g = sess.query(Graph).first() g.edges[1].end = Point(18, 4) sess.commit() e = sess.query(Edge).get(g.edges[1].id) eq_(e.end, Point(18, 4)) def test_not_none(self): Graph, Edge, Point = (self.classes.Graph, self.classes.Edge, self.classes.Point) # current contract. the composite is None # when hasn't been populated etc. on a # pending/transient object. e1 = Edge() assert e1.end is None sess = Session() sess.add(e1) # however, once it's persistent, the code as of 0.7.3 # would unconditionally populate it, even though it's # all None. I think this usage contract is inconsistent, # and it would be better that the composite is just # created unconditionally in all cases. # but as we are just trying to fix [ticket:2308] and # [ticket:2309] without changing behavior we maintain # that only "persistent" gets the composite with the # Nones sess.flush() assert e1.end is not None def test_eager_load(self): Graph, Point = self.classes.Graph, self.classes.Point sess = self._fixture() g = sess.query(Graph).first() sess.close() def go(): g2 = sess.query(Graph).\ options(sa.orm.joinedload('edges')).\ get(g.id) eq_( [(e.start, e.end) for e in g2.edges], [ (Point(3, 4), Point(5, 6)), (Point(14, 5), Point(2, 7)), ] ) self.assert_sql_count(testing.db, go, 1) def test_comparator(self): Graph, Edge, Point = (self.classes.Graph, self.classes.Edge, self.classes.Point) sess = self._fixture() g = sess.query(Graph).first() assert sess.query(Edge).\ filter(Edge.start == Point(3, 4)).one() is \ g.edges[0] assert sess.query(Edge).\ filter(Edge.start != Point(3, 4)).first() is \ g.edges[1] eq_( sess.query(Edge).filter(Edge.start == None).all(), [] ) def test_comparator_aliased(self): Graph, Edge, Point = (self.classes.Graph, self.classes.Edge, self.classes.Point) sess = self._fixture() g = sess.query(Graph).first() ea = aliased(Edge) assert sess.query(ea).\ filter(ea.start != Point(3, 4)).first() is \ g.edges[1] def test_get_history(self): Edge = self.classes.Edge Point = self.classes.Point from sqlalchemy.orm.attributes import get_history e1 = Edge() e1.start = Point(1,2) eq_( get_history(e1, 'start'), ([Point(x=1, y=2)], (), [Point(x=None, y=None)]) ) eq_( get_history(e1, 'end'), ((), [Point(x=None, y=None)], ()) ) def test_query_cols(self): Edge = self.classes.Edge sess = self._fixture() eq_( sess.query(Edge.start, Edge.end).all(), [(3, 4, 5, 6), (14, 5, 2, 7)] ) def test_delete(self): Graph, Edge = self.classes.Graph, self.classes.Edge sess = self._fixture() g = sess.query(Graph).first() e = g.edges[1] del e.end sess.flush() eq_( sess.query(Edge.start, Edge.end).all(), [(3, 4, 5, 6), (14, 5, None, None)] ) def test_save_null(self): """test saving a null composite value See google groups thread for more context: http://groups.google.com/group/sqlalchemy/browse_thread/thread/0c6580a1761b2c29 """ Graph, Edge = self.classes.Graph, self.classes.Edge sess = Session() g = Graph(id=1) e = Edge(None, None) g.edges.append(e) sess.add(g) sess.commit() g2 = sess.query(Graph).get(1) assert g2.edges[-1].start.x is None assert g2.edges[-1].start.y is None def test_expire(self): Graph, Point = self.classes.Graph, self.classes.Point sess = self._fixture() g = sess.query(Graph).first() e = g.edges[0] sess.expire(e) assert 'start' not in e.__dict__ assert e.start == Point(3, 4) def test_default_value(self): Edge = self.classes.Edge e = Edge() eq_(e.start, None) class PrimaryKeyTest(fixtures.MappedTest): @classmethod def define_tables(cls, metadata): Table('graphs', metadata, Column('id', Integer, primary_key=True, test_needs_autoincrement=True), Column('version_id', Integer, primary_key=True, nullable=True), Column('name', String(30))) @classmethod def setup_mappers(cls): graphs = cls.tables.graphs class Version(cls.Comparable): def __init__(self, id, version): self.id = id self.version = version def __composite_values__(self): return (self.id, self.version) __hash__ = None def __eq__(self, other): return isinstance(other, Version) and \ other.id == self.id and \ other.version == self.version def __ne__(self, other): return not self.__eq__(other) class Graph(cls.Comparable): def __init__(self, version): self.version = version mapper(Graph, graphs, properties={ 'version':sa.orm.composite(Version, graphs.c.id, graphs.c.version_id)}) def _fixture(self): Graph, Version = self.classes.Graph, self.classes.Version sess = Session() g = Graph(Version(1, 1)) sess.add(g) sess.commit() return sess def test_get_by_col(self): Graph = self.classes.Graph sess = self._fixture() g = sess.query(Graph).first() g2 = sess.query(Graph).get([g.id, g.version_id]) eq_(g.version, g2.version) def test_get_by_composite(self): Graph, Version = self.classes.Graph, self.classes.Version sess = self._fixture() g = sess.query(Graph).first() g2 = sess.query(Graph).get(Version(g.id, g.version_id)) eq_(g.version, g2.version) @testing.fails_on('mssql', 'Cannot update identity columns.') def test_pk_mutation(self): Graph, Version = self.classes.Graph, self.classes.Version sess = self._fixture() g = sess.query(Graph).first() g.version = Version(2, 1) sess.commit() g2 = sess.query(Graph).get(Version(2, 1)) eq_(g.version, g2.version) @testing.fails_on_everything_except("sqlite") def test_null_pk(self): Graph, Version = self.classes.Graph, self.classes.Version sess = Session() # test pk with one column NULL # only sqlite can really handle this g = Graph(Version(2, None)) sess.add(g) sess.commit() g2 = sess.query(Graph).filter_by(version=Version(2, None)).one() eq_(g.version, g2.version) class DefaultsTest(fixtures.MappedTest): @classmethod def define_tables(cls, metadata): Table('foobars', metadata, Column('id', Integer, primary_key=True, test_needs_autoincrement=True), Column('x1', Integer, default=2), Column('x2', Integer), Column('x3', Integer, server_default="15"), Column('x4', Integer) ) @classmethod def setup_mappers(cls): foobars = cls.tables.foobars class Foobar(cls.Comparable): pass class FBComposite(cls.Comparable): def __init__(self, x1, x2, x3, x4): self.goofy_x1 = x1 self.x2 = x2 self.x3 = x3 self.x4 = x4 def __composite_values__(self): return self.goofy_x1, self.x2, self.x3, self.x4 __hash__ = None def __eq__(self, other): return other.goofy_x1 == self.goofy_x1 and \ other.x2 == self.x2 and \ other.x3 == self.x3 and \ other.x4 == self.x4 def __ne__(self, other): return not self.__eq__(other) def __repr__(self): return "FBComposite(%r, %r, %r, %r)" % ( self.goofy_x1, self.x2, self.x3, self.x4 ) mapper(Foobar, foobars, properties=dict( foob=sa.orm.composite(FBComposite, foobars.c.x1, foobars.c.x2, foobars.c.x3, foobars.c.x4) )) def test_attributes_with_defaults(self): Foobar, FBComposite = self.classes.Foobar, self.classes.FBComposite sess = Session() f1 = Foobar() f1.foob = FBComposite(None, 5, None, None) sess.add(f1) sess.flush() eq_(f1.foob, FBComposite(2, 5, 15, None)) f2 = Foobar() sess.add(f2) sess.flush() eq_(f2.foob, FBComposite(2, None, 15, None)) def test_set_composite_values(self): Foobar, FBComposite = self.classes.Foobar, self.classes.FBComposite sess = Session() f1 = Foobar() f1.foob = FBComposite(None, 5, None, None) sess.add(f1) sess.flush() eq_(f1.foob, FBComposite(2, 5, 15, None)) class MappedSelectTest(fixtures.MappedTest): @classmethod def define_tables(cls, metadata): Table('descriptions', metadata, Column('id', Integer, primary_key=True, test_needs_autoincrement=True), Column('d1', String(20)), Column('d2', String(20)), ) Table('values', metadata, Column('id', Integer, primary_key=True, test_needs_autoincrement=True), Column('description_id', Integer, ForeignKey('descriptions.id'), nullable=False), Column('v1', String(20)), Column('v2', String(20)), ) @classmethod def setup_mappers(cls): values, descriptions = cls.tables.values, cls.tables.descriptions class Descriptions(cls.Comparable): pass class Values(cls.Comparable): pass class CustomValues(cls.Comparable, list): def __init__(self, *args): self.extend(args) def __composite_values__(self): return self desc_values = select( [values, descriptions.c.d1, descriptions.c.d2], descriptions.c.id == values.c.description_id ).alias('descriptions_values') mapper(Descriptions, descriptions, properties={ 'values': relationship(Values, lazy='dynamic'), 'custom_descriptions': composite( CustomValues, descriptions.c.d1, descriptions.c.d2), }) mapper(Values, desc_values, properties={ 'custom_values': composite(CustomValues, desc_values.c.v1, desc_values.c.v2), }) def test_set_composite_attrs_via_selectable(self): Values, CustomValues, values, Descriptions, descriptions = (self.classes.Values, self.classes.CustomValues, self.tables.values, self.classes.Descriptions, self.tables.descriptions) session = Session() d = Descriptions( custom_descriptions = CustomValues('Color', 'Number'), values =[ Values(custom_values = CustomValues('Red', '5')), Values(custom_values=CustomValues('Blue', '1')) ] ) session.add(d) session.commit() eq_( testing.db.execute(descriptions.select()).fetchall(), [(1, u'Color', u'Number')] ) eq_( testing.db.execute(values.select()).fetchall(), [(1, 1, u'Red', u'5'), (2, 1, u'Blue', u'1')] ) class ManyToOneTest(fixtures.MappedTest): @classmethod def define_tables(cls, metadata): Table('a', metadata, Column('id', Integer, primary_key=True, test_needs_autoincrement=True), Column('b1', String(20)), Column('b2_id', Integer, ForeignKey('b.id')) ) Table('b', metadata, Column('id', Integer, primary_key=True, test_needs_autoincrement=True), Column('data', String(20)) ) @classmethod def setup_mappers(cls): a, b = cls.tables.a, cls.tables.b class A(cls.Comparable): pass class B(cls.Comparable): pass class C(cls.Comparable): def __init__(self, b1, b2): self.b1, self.b2 = b1, b2 def __composite_values__(self): return self.b1, self.b2 def __eq__(self, other): return isinstance(other, C) and \ other.b1 == self.b1 and \ other.b2 == self.b2 mapper(A, a, properties={ 'b2':relationship(B), 'c':composite(C, 'b1', 'b2') }) mapper(B, b) def test_persist(self): A, C, B = (self.classes.A, self.classes.C, self.classes.B) sess = Session() sess.add(A(c=C('b1', B(data='b2')))) sess.commit() a1 = sess.query(A).one() eq_(a1.c, C('b1', B(data='b2'))) def test_query(self): A, C, B = (self.classes.A, self.classes.C, self.classes.B) sess = Session() b1, b2 = B(data='b1'), B(data='b2') a1 = A(c=C('a1b1', b1)) a2 = A(c=C('a2b1', b2)) sess.add_all([a1, a2]) sess.commit() eq_( sess.query(A).filter(A.c == C('a2b1', b2)).one(), a2 ) def test_query_aliased(self): A, C, B = (self.classes.A, self.classes.C, self.classes.B) sess = Session() b1, b2 = B(data='b1'), B(data='b2') a1 = A(c=C('a1b1', b1)) a2 = A(c=C('a2b1', b2)) sess.add_all([a1, a2]) sess.commit() ae = aliased(A) eq_( sess.query(ae).filter(ae.c == C('a2b1', b2)).one(), a2 ) class ConfigurationTest(fixtures.MappedTest): @classmethod def define_tables(cls, metadata): Table('edge', metadata, Column('id', Integer, primary_key=True, test_needs_autoincrement=True), Column('x1', Integer), Column('y1', Integer), Column('x2', Integer), Column('y2', Integer), ) @classmethod def setup_mappers(cls): class Point(cls.Comparable): def __init__(self, x, y): self.x = x self.y = y def __composite_values__(self): return [self.x, self.y] def __eq__(self, other): return isinstance(other, Point) and \ other.x == self.x and \ other.y == self.y def __ne__(self, other): return not isinstance(other, Point) or \ not self.__eq__(other) class Edge(cls.Comparable): pass def _test_roundtrip(self): Edge, Point = self.classes.Edge, self.classes.Point e1 = Edge(start=Point(3, 4), end=Point(5, 6)) sess = Session() sess.add(e1) sess.commit() eq_( sess.query(Edge).one(), Edge(start=Point(3, 4), end=Point(5, 6)) ) def test_columns(self): edge, Edge, Point = (self.tables.edge, self.classes.Edge, self.classes.Point) mapper(Edge, edge, properties={ 'start':sa.orm.composite(Point, edge.c.x1, edge.c.y1), 'end': sa.orm.composite(Point, edge.c.x2, edge.c.y2) }) self._test_roundtrip() def test_attributes(self): edge, Edge, Point = (self.tables.edge, self.classes.Edge, self.classes.Point) m = mapper(Edge, edge) m.add_property('start', sa.orm.composite(Point, Edge.x1, Edge.y1)) m.add_property('end', sa.orm.composite(Point, Edge.x2, Edge.y2)) self._test_roundtrip() def test_strings(self): edge, Edge, Point = (self.tables.edge, self.classes.Edge, self.classes.Point) m = mapper(Edge, edge) m.add_property('start', sa.orm.composite(Point, 'x1', 'y1')) m.add_property('end', sa.orm.composite(Point, 'x2', 'y2')) self._test_roundtrip() def test_deferred(self): edge, Edge, Point = (self.tables.edge, self.classes.Edge, self.classes.Point) mapper(Edge, edge, properties={ 'start':sa.orm.composite(Point, edge.c.x1, edge.c.y1, deferred=True, group='s'), 'end': sa.orm.composite(Point, edge.c.x2, edge.c.y2, deferred=True) }) self._test_roundtrip() class ComparatorTest(fixtures.MappedTest, testing.AssertsCompiledSQL): __dialect__ = 'default' @classmethod def define_tables(cls, metadata): Table('edge', metadata, Column('id', Integer, primary_key=True, test_needs_autoincrement=True), Column('x1', Integer), Column('y1', Integer), Column('x2', Integer), Column('y2', Integer), ) @classmethod def setup_mappers(cls): class Point(cls.Comparable): def __init__(self, x, y): self.x = x self.y = y def __composite_values__(self): return [self.x, self.y] def __eq__(self, other): return isinstance(other, Point) and \ other.x == self.x and \ other.y == self.y def __ne__(self, other): return not isinstance(other, Point) or \ not self.__eq__(other) class Edge(cls.Comparable): def __init__(self, start, end): self.start = start self.end = end def __eq__(self, other): return isinstance(other, Edge) and \ other.id == self.id def _fixture(self, custom): edge, Edge, Point = (self.tables.edge, self.classes.Edge, self.classes.Point) if custom: class CustomComparator(sa.orm.CompositeProperty.Comparator): def near(self, other, d): clauses = self.__clause_element__().clauses diff_x = clauses[0] - other.x diff_y = clauses[1] - other.y return diff_x * diff_x + diff_y * diff_y <= d * d mapper(Edge, edge, properties={ 'start': sa.orm.composite(Point, edge.c.x1, edge.c.y1, comparator_factory=CustomComparator), 'end': sa.orm.composite(Point, edge.c.x2, edge.c.y2) }) else: mapper(Edge, edge, properties={ 'start': sa.orm.composite(Point, edge.c.x1, edge.c.y1), 'end': sa.orm.composite(Point, edge.c.x2, edge.c.y2) }) def test_comparator_behavior_default(self): self._fixture(False) self._test_comparator_behavior() def test_comparator_behavior_custom(self): self._fixture(True) self._test_comparator_behavior() def _test_comparator_behavior(self): Edge, Point = (self.classes.Edge, self.classes.Point) sess = Session() e1 = Edge(Point(3, 4), Point(5, 6)) e2 = Edge(Point(14, 5), Point(2, 7)) sess.add_all([e1, e2]) sess.commit() assert sess.query(Edge).\ filter(Edge.start==Point(3, 4)).one() is \ e1 assert sess.query(Edge).\ filter(Edge.start!=Point(3, 4)).first() is \ e2 eq_( sess.query(Edge).filter(Edge.start==None).all(), [] ) def test_default_comparator_factory(self): self._fixture(False) Edge = self.classes.Edge start_prop = Edge.start.property assert start_prop.comparator_factory is CompositeProperty.Comparator def test_custom_comparator_factory(self): self._fixture(True) Edge, Point = (self.classes.Edge, self.classes.Point) edge_1, edge_2 = Edge(Point(0, 0), Point(3, 5)), \ Edge(Point(0, 1), Point(3, 5)) sess = Session() sess.add_all([edge_1, edge_2]) sess.commit() near_edges = sess.query(Edge).filter( Edge.start.near(Point(1, 1), 1) ).all() assert edge_1 not in near_edges assert edge_2 in near_edges near_edges = sess.query(Edge).filter( Edge.start.near(Point(0, 1), 1) ).all() assert edge_1 in near_edges and edge_2 in near_edges def test_order_by(self): self._fixture(False) Edge = self.classes.Edge s = Session() self.assert_compile( s.query(Edge).order_by(Edge.start, Edge.end), "SELECT edge.id AS edge_id, edge.x1 AS edge_x1, " "edge.y1 AS edge_y1, edge.x2 AS edge_x2, edge.y2 AS edge_y2 " "FROM edge ORDER BY edge.x1, edge.y1, edge.x2, edge.y2" ) def test_order_by_aliased(self): self._fixture(False) Edge = self.classes.Edge s = Session() ea = aliased(Edge) self.assert_compile( s.query(ea).order_by(ea.start, ea.end), "SELECT edge_1.id AS edge_1_id, edge_1.x1 AS edge_1_x1, " "edge_1.y1 AS edge_1_y1, edge_1.x2 AS edge_1_x2, " "edge_1.y2 AS edge_1_y2 " "FROM edge AS edge_1 ORDER BY edge_1.x1, edge_1.y1, " "edge_1.x2, edge_1.y2" ) SQLAlchemy-0.8.4/test/orm/test_cycles.py0000644000076500000240000011657412251147172020720 0ustar classicstaff00000000000000"""Tests cyclical mapper relationships. We might want to try an automated generate of much of this, all combos of T1<->T2, with o2m or m2o between them, and a third T3 with o2m/m2o to one/both T1/T2. """ from sqlalchemy import testing from sqlalchemy import Integer, String, ForeignKey from sqlalchemy.testing.schema import Table, Column from sqlalchemy.orm import mapper, relationship, backref, \ create_session, sessionmaker from sqlalchemy.testing import eq_ from sqlalchemy.testing.assertsql import RegexSQL, ExactSQL, CompiledSQL, AllOf from sqlalchemy.testing import fixtures class SelfReferentialTest(fixtures.MappedTest): """A self-referential mapper with an additional list of child objects.""" @classmethod def define_tables(cls, metadata): Table('t1', metadata, Column('c1', Integer, primary_key=True, test_needs_autoincrement=True), Column('parent_c1', Integer, ForeignKey('t1.c1')), Column('data', String(20))) Table('t2', metadata, Column('c1', Integer, primary_key=True, test_needs_autoincrement=True), Column('c1id', Integer, ForeignKey('t1.c1')), Column('data', String(20))) @classmethod def setup_classes(cls): class C1(cls.Basic): def __init__(self, data=None): self.data = data class C2(cls.Basic): def __init__(self, data=None): self.data = data def test_single(self): C1, t1 = self.classes.C1, self.tables.t1 mapper(C1, t1, properties = { 'c1s':relationship(C1, cascade="all"), 'parent':relationship(C1, primaryjoin=t1.c.parent_c1 == t1.c.c1, remote_side=t1.c.c1, lazy='select', uselist=False)}) a = C1('head c1') a.c1s.append(C1('another c1')) sess = create_session( ) sess.add(a) sess.flush() sess.delete(a) sess.flush() def test_many_to_one_only(self): """ test that the circular dependency sort can assemble a many-to-one dependency processor when only the object on the "many" side is actually in the list of modified objects. """ C1, t1 = self.classes.C1, self.tables.t1 mapper(C1, t1, properties={ 'parent':relationship(C1, primaryjoin=t1.c.parent_c1 == t1.c.c1, remote_side=t1.c.c1)}) c1 = C1() sess = create_session() sess.add(c1) sess.flush() sess.expunge_all() c1 = sess.query(C1).get(c1.c1) c2 = C1() c2.parent = c1 sess.add(c2) sess.flush() assert c2.parent_c1==c1.c1 def test_cycle(self): C2, C1, t2, t1 = (self.classes.C2, self.classes.C1, self.tables.t2, self.tables.t1) mapper(C1, t1, properties = { 'c1s' : relationship(C1, cascade="all"), 'c2s' : relationship(mapper(C2, t2), cascade="all, delete-orphan")}) a = C1('head c1') a.c1s.append(C1('child1')) a.c1s.append(C1('child2')) a.c1s[0].c1s.append(C1('subchild1')) a.c1s[0].c1s.append(C1('subchild2')) a.c1s[1].c2s.append(C2('child2 data1')) a.c1s[1].c2s.append(C2('child2 data2')) sess = create_session( ) sess.add(a) sess.flush() sess.delete(a) sess.flush() def test_setnull_ondelete(self): C1, t1 = self.classes.C1, self.tables.t1 mapper(C1, t1, properties={ 'children':relationship(C1) }) sess = create_session() c1 = C1() c2 = C1() c1.children.append(c2) sess.add(c1) sess.flush() assert c2.parent_c1 == c1.c1 sess.delete(c1) sess.flush() assert c2.parent_c1 is None sess.expire_all() assert c2.parent_c1 is None class SelfReferentialNoPKTest(fixtures.MappedTest): """A self-referential relationship that joins on a column other than the primary key column""" @classmethod def define_tables(cls, metadata): Table('item', metadata, Column('id', Integer, primary_key=True, test_needs_autoincrement=True), Column('uuid', String(32), unique=True, nullable=False), Column('parent_uuid', String(32), ForeignKey('item.uuid'), nullable=True)) @classmethod def setup_classes(cls): class TT(cls.Basic): def __init__(self): self.uuid = hex(id(self)) @classmethod def setup_mappers(cls): item, TT = cls.tables.item, cls.classes.TT mapper(TT, item, properties={ 'children': relationship( TT, remote_side=[item.c.parent_uuid], backref=backref('parent', remote_side=[item.c.uuid]))}) def test_basic(self): TT = self.classes.TT t1 = TT() t1.children.append(TT()) t1.children.append(TT()) s = create_session() s.add(t1) s.flush() s.expunge_all() t = s.query(TT).filter_by(id=t1.id).one() eq_(t.children[0].parent_uuid, t1.uuid) def test_lazy_clause(self): TT = self.classes.TT s = create_session() t1 = TT() t2 = TT() t1.children.append(t2) s.add(t1) s.flush() s.expunge_all() t = s.query(TT).filter_by(id=t2.id).one() eq_(t.uuid, t2.uuid) eq_(t.parent.uuid, t1.uuid) class InheritTestOne(fixtures.MappedTest): @classmethod def define_tables(cls, metadata): Table("parent", metadata, Column("id", Integer, primary_key=True, test_needs_autoincrement=True), Column("parent_data", String(50)), Column("type", String(10))) Table("child1", metadata, Column("id", Integer, ForeignKey("parent.id"), primary_key=True), Column("child1_data", String(50))) Table("child2", metadata, Column("id", Integer, ForeignKey("parent.id"), primary_key=True), Column("child1_id", Integer, ForeignKey("child1.id"), nullable=False), Column("child2_data", String(50))) @classmethod def setup_classes(cls): class Parent(cls.Basic): pass class Child1(Parent): pass class Child2(Parent): pass @classmethod def setup_mappers(cls): child1, child2, parent, Parent, Child1, Child2 = (cls.tables.child1, cls.tables.child2, cls.tables.parent, cls.classes.Parent, cls.classes.Child1, cls.classes.Child2) mapper(Parent, parent) mapper(Child1, child1, inherits=Parent) mapper(Child2, child2, inherits=Parent, properties=dict( child1=relationship(Child1, primaryjoin=child2.c.child1_id == child1.c.id))) def test_many_to_one_only(self): """test similar to SelfReferentialTest.testmanytooneonly""" Child1, Child2 = self.classes.Child1, self.classes.Child2 session = create_session() c1 = Child1() c1.child1_data = "qwerty" session.add(c1) session.flush() session.expunge_all() c1 = session.query(Child1).filter_by(child1_data="qwerty").one() c2 = Child2() c2.child1 = c1 c2.child2_data = "asdfgh" session.add(c2) # the flush will fail if the UOW does not set up a many-to-one DP # attached to a task corresponding to c1, since "child1_id" is not # nullable session.flush() class InheritTestTwo(fixtures.MappedTest): """ The fix in BiDirectionalManyToOneTest raised this issue, regarding the 'circular sort' containing UOWTasks that were still polymorphic, which could create duplicate entries in the final sort """ @classmethod def define_tables(cls, metadata): Table('a', metadata, Column('id', Integer, primary_key=True, test_needs_autoincrement=True), Column('data', String(30)), Column('cid', Integer, ForeignKey('c.id'))) Table('b', metadata, Column('id', Integer, ForeignKey("a.id"), primary_key=True), Column('data', String(30))) Table('c', metadata, Column('id', Integer, primary_key=True, test_needs_autoincrement=True), Column('data', String(30)), Column('aid', Integer, ForeignKey('a.id', use_alter=True, name="foo"))) @classmethod def setup_classes(cls): class A(cls.Basic): pass class B(A): pass class C(cls.Basic): pass def test_flush(self): a, A, c, b, C, B = (self.tables.a, self.classes.A, self.tables.c, self.tables.b, self.classes.C, self.classes.B) mapper(A, a, properties={ 'cs':relationship(C, primaryjoin=a.c.cid==c.c.id)}) mapper(B, b, inherits=A, inherit_condition=b.c.id == a.c.id) mapper(C, c, properties={ 'arel':relationship(A, primaryjoin=a.c.id == c.c.aid)}) sess = create_session() bobj = B() sess.add(bobj) cobj = C() sess.add(cobj) sess.flush() class BiDirectionalManyToOneTest(fixtures.MappedTest): run_define_tables = 'each' @classmethod def define_tables(cls, metadata): Table('t1', metadata, Column('id', Integer, primary_key=True, test_needs_autoincrement=True), Column('data', String(30)), Column('t2id', Integer, ForeignKey('t2.id'))) Table('t2', metadata, Column('id', Integer, primary_key=True, test_needs_autoincrement=True), Column('data', String(30)), Column('t1id', Integer, ForeignKey('t1.id', use_alter=True, name="foo_fk"))) Table('t3', metadata, Column('id', Integer, primary_key=True, test_needs_autoincrement=True), Column('data', String(30)), Column('t1id', Integer, ForeignKey('t1.id'), nullable=False), Column('t2id', Integer, ForeignKey('t2.id'), nullable=False)) @classmethod def setup_classes(cls): class T1(cls.Basic): pass class T2(cls.Basic): pass class T3(cls.Basic): pass @classmethod def setup_mappers(cls): t2, T2, T3, t1, t3, T1 = (cls.tables.t2, cls.classes.T2, cls.classes.T3, cls.tables.t1, cls.tables.t3, cls.classes.T1) mapper(T1, t1, properties={ 't2':relationship(T2, primaryjoin=t1.c.t2id == t2.c.id)}) mapper(T2, t2, properties={ 't1':relationship(T1, primaryjoin=t2.c.t1id == t1.c.id)}) mapper(T3, t3, properties={ 't1':relationship(T1), 't2':relationship(T2)}) def test_reflush(self): T2, T3, T1 = (self.classes.T2, self.classes.T3, self.classes.T1) o1 = T1() o1.t2 = T2() sess = create_session() sess.add(o1) sess.flush() # the bug here is that the dependency sort comes up with T1/T2 in a # cycle, but there are no T1/T2 objects to be saved. therefore no # "cyclical subtree" gets generated, and one or the other of T1/T2 # gets lost, and processors on T3 dont fire off. the test will then # fail because the FK's on T3 are not nullable. o3 = T3() o3.t1 = o1 o3.t2 = o1.t2 sess.add(o3) sess.flush() def test_reflush_2(self): """A variant on test_reflush()""" T2, T3, T1 = (self.classes.T2, self.classes.T3, self.classes.T1) o1 = T1() o1.t2 = T2() sess = create_session() sess.add(o1) sess.flush() # in this case, T1, T2, and T3 tasks will all be in the cyclical # tree normally. the dependency processors for T3 are part of the # 'extradeps' collection so they all get assembled into the tree # as well. o1a = T1() o2a = T2() sess.add(o1a) sess.add(o2a) o3b = T3() o3b.t1 = o1a o3b.t2 = o2a sess.add(o3b) o3 = T3() o3.t1 = o1 o3.t2 = o1.t2 sess.add(o3) sess.flush() class BiDirectionalOneToManyTest(fixtures.MappedTest): """tests two mappers with a one-to-many relationship to each other.""" run_define_tables = 'each' @classmethod def define_tables(cls, metadata): Table('t1', metadata, Column('c1', Integer, primary_key=True, test_needs_autoincrement=True), Column('c2', Integer, ForeignKey('t2.c1'))) Table('t2', metadata, Column('c1', Integer, primary_key=True, test_needs_autoincrement=True), Column('c2', Integer, ForeignKey('t1.c1', use_alter=True, name='t1c1_fk'))) @classmethod def setup_classes(cls): class C1(cls.Basic): pass class C2(cls.Basic): pass def test_cycle(self): C2, C1, t2, t1 = (self.classes.C2, self.classes.C1, self.tables.t2, self.tables.t1) mapper(C2, t2, properties={ 'c1s': relationship(C1, primaryjoin=t2.c.c1 == t1.c.c2, uselist=True)}) mapper(C1, t1, properties={ 'c2s': relationship(C2, primaryjoin=t1.c.c1 == t2.c.c2, uselist=True)}) a = C1() b = C2() c = C1() d = C2() e = C2() f = C2() a.c2s.append(b) d.c1s.append(c) b.c1s.append(c) sess = create_session() sess.add_all((a, b, c, d, e, f)) sess.flush() class BiDirectionalOneToManyTest2(fixtures.MappedTest): """Two mappers with a one-to-many relationship to each other, with a second one-to-many on one of the mappers""" run_define_tables = 'each' @classmethod def define_tables(cls, metadata): Table('t1', metadata, Column('c1', Integer, primary_key=True, test_needs_autoincrement=True), Column('c2', Integer, ForeignKey('t2.c1')), test_needs_autoincrement=True) Table('t2', metadata, Column('c1', Integer, primary_key=True, test_needs_autoincrement=True), Column('c2', Integer, ForeignKey('t1.c1', use_alter=True, name='t1c1_fq')), test_needs_autoincrement=True) Table('t1_data', metadata, Column('c1', Integer, primary_key=True, test_needs_autoincrement=True), Column('t1id', Integer, ForeignKey('t1.c1')), Column('data', String(20)), test_needs_autoincrement=True) @classmethod def setup_classes(cls): class C1(cls.Basic): pass class C2(cls.Basic): pass class C1Data(cls.Basic): pass @classmethod def setup_mappers(cls): t2, t1, C1Data, t1_data, C2, C1 = (cls.tables.t2, cls.tables.t1, cls.classes.C1Data, cls.tables.t1_data, cls.classes.C2, cls.classes.C1) mapper(C2, t2, properties={ 'c1s': relationship(C1, primaryjoin=t2.c.c1 == t1.c.c2, uselist=True)}) mapper(C1, t1, properties={ 'c2s': relationship(C2, primaryjoin=t1.c.c1 == t2.c.c2, uselist=True), 'data': relationship(mapper(C1Data, t1_data))}) def test_cycle(self): C2, C1, C1Data = (self.classes.C2, self.classes.C1, self.classes.C1Data) a = C1() b = C2() c = C1() d = C2() e = C2() f = C2() a.c2s.append(b) d.c1s.append(c) b.c1s.append(c) a.data.append(C1Data(data='c1data1')) a.data.append(C1Data(data='c1data2')) c.data.append(C1Data(data='c1data3')) sess = create_session() sess.add_all((a, b, c, d, e, f)) sess.flush() sess.delete(d) sess.delete(c) sess.flush() class OneToManyManyToOneTest(fixtures.MappedTest): """ Tests two mappers, one has a one-to-many on the other mapper, the other has a separate many-to-one relationship to the first. two tests will have a row for each item that is dependent on the other. without the "post_update" flag, such relationships raise an exception when dependencies are sorted. """ run_define_tables = 'each' @classmethod def define_tables(cls, metadata): Table('ball', metadata, Column('id', Integer, primary_key=True, test_needs_autoincrement=True), Column('person_id', Integer, ForeignKey('person.id', use_alter=True, name='fk_person_id')), Column('data', String(30))) Table('person', metadata, Column('id', Integer, primary_key=True, test_needs_autoincrement=True), Column('favorite_ball_id', Integer, ForeignKey('ball.id')), Column('data', String(30))) @classmethod def setup_classes(cls): class Person(cls.Basic): pass class Ball(cls.Basic): pass def test_cycle(self): """ This test has a peculiar aspect in that it doesnt create as many dependent relationships as the other tests, and revealed a small glitch in the circular dependency sorting. """ person, ball, Ball, Person = (self.tables.person, self.tables.ball, self.classes.Ball, self.classes.Person) mapper(Ball, ball) mapper(Person, person, properties=dict( balls=relationship(Ball, primaryjoin=ball.c.person_id == person.c.id, remote_side=ball.c.person_id), favorite=relationship(Ball, primaryjoin=person.c.favorite_ball_id == ball.c.id, remote_side=ball.c.id))) b = Ball() p = Person() p.balls.append(b) sess = create_session() sess.add(p) sess.flush() def test_post_update_m2o(self): """A cycle between two rows, with a post_update on the many-to-one""" person, ball, Ball, Person = (self.tables.person, self.tables.ball, self.classes.Ball, self.classes.Person) mapper(Ball, ball) mapper(Person, person, properties=dict( balls=relationship(Ball, primaryjoin=ball.c.person_id == person.c.id, remote_side=ball.c.person_id, post_update=False, cascade="all, delete-orphan"), favorite=relationship(Ball, primaryjoin=person.c.favorite_ball_id == ball.c.id, remote_side=person.c.favorite_ball_id, post_update=True))) b = Ball(data='some data') p = Person(data='some data') p.balls.append(b) p.balls.append(Ball(data='some data')) p.balls.append(Ball(data='some data')) p.balls.append(Ball(data='some data')) p.favorite = b sess = create_session() sess.add(b) sess.add(p) self.assert_sql_execution( testing.db, sess.flush, RegexSQL("^INSERT INTO person", {'data':'some data'}), RegexSQL("^INSERT INTO ball", lambda c: {'person_id':p.id, 'data':'some data'}), RegexSQL("^INSERT INTO ball", lambda c: {'person_id':p.id, 'data':'some data'}), RegexSQL("^INSERT INTO ball", lambda c: {'person_id':p.id, 'data':'some data'}), RegexSQL("^INSERT INTO ball", lambda c: {'person_id':p.id, 'data':'some data'}), ExactSQL("UPDATE person SET favorite_ball_id=:favorite_ball_id " "WHERE person.id = :person_id", lambda ctx:{'favorite_ball_id':p.favorite.id, 'person_id':p.id} ), ) sess.delete(p) self.assert_sql_execution( testing.db, sess.flush, ExactSQL("UPDATE person SET favorite_ball_id=:favorite_ball_id " "WHERE person.id = :person_id", lambda ctx: {'person_id': p.id, 'favorite_ball_id': None}), ExactSQL("DELETE FROM ball WHERE ball.id = :id", None), # lambda ctx:[{'id': 1L}, {'id': 4L}, {'id': 3L}, {'id': 2L}]) ExactSQL("DELETE FROM person WHERE person.id = :id", lambda ctx:[{'id': p.id}]) ) def test_post_update_backref(self): """test bidirectional post_update.""" person, ball, Ball, Person = (self.tables.person, self.tables.ball, self.classes.Ball, self.classes.Person) mapper(Ball, ball) mapper(Person, person, properties=dict( balls=relationship(Ball, primaryjoin=ball.c.person_id == person.c.id, remote_side=ball.c.person_id, post_update=True, backref=backref('person', post_update=True) ), favorite=relationship(Ball, primaryjoin=person.c.favorite_ball_id == ball.c.id, remote_side=person.c.favorite_ball_id) )) sess = sessionmaker()() p1 = Person(data='p1') p2 = Person(data='p2') p3 = Person(data='p3') b1 = Ball(data='b1') b1.person = p1 sess.add_all([p1, p2, p3]) sess.commit() # switch here. the post_update # on ball.person can't get tripped up # by the fact that there's a "reverse" prop. b1.person = p2 sess.commit() eq_( p2, b1.person ) # do it the other way p3.balls.append(b1) sess.commit() eq_( p3, b1.person ) def test_post_update_o2m(self): """A cycle between two rows, with a post_update on the one-to-many""" person, ball, Ball, Person = (self.tables.person, self.tables.ball, self.classes.Ball, self.classes.Person) mapper(Ball, ball) mapper(Person, person, properties=dict( balls=relationship(Ball, primaryjoin=ball.c.person_id == person.c.id, remote_side=ball.c.person_id, cascade="all, delete-orphan", post_update=True, backref='person'), favorite=relationship(Ball, primaryjoin=person.c.favorite_ball_id == ball.c.id, remote_side=person.c.favorite_ball_id))) b = Ball(data='some data') p = Person(data='some data') p.balls.append(b) b2 = Ball(data='some data') p.balls.append(b2) b3 = Ball(data='some data') p.balls.append(b3) b4 = Ball(data='some data') p.balls.append(b4) p.favorite = b sess = create_session() sess.add_all((b,p,b2,b3,b4)) self.assert_sql_execution( testing.db, sess.flush, CompiledSQL("INSERT INTO ball (person_id, data) " "VALUES (:person_id, :data)", {'person_id':None, 'data':'some data'}), CompiledSQL("INSERT INTO ball (person_id, data) " "VALUES (:person_id, :data)", {'person_id':None, 'data':'some data'}), CompiledSQL("INSERT INTO ball (person_id, data) " "VALUES (:person_id, :data)", {'person_id':None, 'data':'some data'}), CompiledSQL("INSERT INTO ball (person_id, data) " "VALUES (:person_id, :data)", {'person_id':None, 'data':'some data'}), CompiledSQL("INSERT INTO person (favorite_ball_id, data) " "VALUES (:favorite_ball_id, :data)", lambda ctx:{'favorite_ball_id':b.id, 'data':'some data'}), CompiledSQL("UPDATE ball SET person_id=:person_id " "WHERE ball.id = :ball_id", lambda ctx:[ {'person_id':p.id,'ball_id':b.id}, {'person_id':p.id,'ball_id':b2.id}, {'person_id':p.id,'ball_id':b3.id}, {'person_id':p.id,'ball_id':b4.id} ] ), ) sess.delete(p) self.assert_sql_execution(testing.db, sess.flush, CompiledSQL("UPDATE ball SET person_id=:person_id " "WHERE ball.id = :ball_id", lambda ctx:[ {'person_id': None, 'ball_id': b.id}, {'person_id': None, 'ball_id': b2.id}, {'person_id': None, 'ball_id': b3.id}, {'person_id': None, 'ball_id': b4.id} ] ), CompiledSQL("DELETE FROM person WHERE person.id = :id", lambda ctx:[{'id':p.id}]), CompiledSQL("DELETE FROM ball WHERE ball.id = :id", lambda ctx:[{'id': b.id}, {'id': b2.id}, {'id': b3.id}, {'id': b4.id}]) ) class SelfReferentialPostUpdateTest(fixtures.MappedTest): """Post_update on a single self-referential mapper. """ @classmethod def define_tables(cls, metadata): Table('node', metadata, Column('id', Integer, primary_key=True, test_needs_autoincrement=True), Column('path', String(50), nullable=False), Column('parent_id', Integer, ForeignKey('node.id'), nullable=True), Column('prev_sibling_id', Integer, ForeignKey('node.id'), nullable=True), Column('next_sibling_id', Integer, ForeignKey('node.id'), nullable=True)) @classmethod def setup_classes(cls): class Node(cls.Basic): def __init__(self, path=''): self.path = path def test_one(self): """Post_update only fires off when needed. This test case used to produce many superfluous update statements, particularly upon delete """ node, Node = self.tables.node, self.classes.Node mapper(Node, node, properties={ 'children': relationship( Node, primaryjoin=node.c.id==node.c.parent_id, cascade="all", backref=backref("parent", remote_side=node.c.id) ), 'prev_sibling': relationship( Node, primaryjoin=node.c.prev_sibling_id==node.c.id, remote_side=node.c.id, uselist=False), 'next_sibling': relationship( Node, primaryjoin=node.c.next_sibling_id==node.c.id, remote_side=node.c.id, uselist=False, post_update=True)}) session = create_session() def append_child(parent, child): if parent.children: parent.children[-1].next_sibling = child child.prev_sibling = parent.children[-1] parent.children.append(child) def remove_child(parent, child): child.parent = None node = child.next_sibling node.prev_sibling = child.prev_sibling child.prev_sibling.next_sibling = node session.delete(child) root = Node('root') about = Node('about') cats = Node('cats') stories = Node('stories') bruce = Node('bruce') append_child(root, about) assert(about.prev_sibling is None) append_child(root, cats) assert(cats.prev_sibling is about) assert(cats.next_sibling is None) assert(about.next_sibling is cats) assert(about.prev_sibling is None) append_child(root, stories) append_child(root, bruce) session.add(root) session.flush() remove_child(root, cats) # pre-trigger lazy loader on 'cats' to make the test easier cats.children self.assert_sql_execution( testing.db, session.flush, AllOf( CompiledSQL("UPDATE node SET prev_sibling_id=:prev_sibling_id " "WHERE node.id = :node_id", lambda ctx:{'prev_sibling_id':about.id, 'node_id':stories.id}), CompiledSQL("UPDATE node SET next_sibling_id=:next_sibling_id " "WHERE node.id = :node_id", lambda ctx:{'next_sibling_id':stories.id, 'node_id':about.id}), CompiledSQL("UPDATE node SET next_sibling_id=:next_sibling_id " "WHERE node.id = :node_id", lambda ctx:{'next_sibling_id':None, 'node_id':cats.id}), ), CompiledSQL("DELETE FROM node WHERE node.id = :id", lambda ctx:[{'id':cats.id}]) ) session.delete(root) self.assert_sql_execution( testing.db, session.flush, CompiledSQL("UPDATE node SET next_sibling_id=:next_sibling_id " "WHERE node.id = :node_id", lambda ctx: [ {'node_id': about.id, 'next_sibling_id': None}, {'node_id': stories.id, 'next_sibling_id': None} ] ), AllOf( CompiledSQL("DELETE FROM node WHERE node.id = :id", lambda ctx:{'id':about.id} ), CompiledSQL("DELETE FROM node WHERE node.id = :id", lambda ctx:{'id':stories.id} ), CompiledSQL("DELETE FROM node WHERE node.id = :id", lambda ctx:{'id':bruce.id} ), ), CompiledSQL("DELETE FROM node WHERE node.id = :id", lambda ctx:{'id':root.id} ), ) about = Node('about') cats = Node('cats') about.next_sibling = cats cats.prev_sibling = about session.add(about) session.flush() session.delete(about) cats.prev_sibling = None session.flush() class SelfReferentialPostUpdateTest2(fixtures.MappedTest): @classmethod def define_tables(cls, metadata): Table("a_table", metadata, Column("id", Integer(), primary_key=True, test_needs_autoincrement=True), Column("fui", String(128)), Column("b", Integer(), ForeignKey("a_table.id"))) @classmethod def setup_classes(cls): class A(cls.Basic): pass def test_one(self): """ Test that post_update remembers to be involved in update operations as well, since it replaces the normal dependency processing completely [ticket:413] """ A, a_table = self.classes.A, self.tables.a_table mapper(A, a_table, properties={ 'foo': relationship(A, remote_side=[a_table.c.id], post_update=True)}) session = create_session() f1 = A(fui="f1") session.add(f1) session.flush() f2 = A(fui="f2", foo=f1) # at this point f1 is already inserted. but we need post_update # to fire off anyway session.add(f2) session.flush() session.expunge_all() f1 = session.query(A).get(f1.id) f2 = session.query(A).get(f2.id) assert f2.foo is f1 class SelfReferentialPostUpdateTest3(fixtures.MappedTest): @classmethod def define_tables(cls, metadata): Table('parent', metadata, Column('id', Integer, primary_key=True, test_needs_autoincrement=True), Column('name', String(50), nullable=False), Column('child_id', Integer, ForeignKey('child.id', use_alter=True, name='c1'), nullable=True)) Table('child', metadata, Column('id', Integer, primary_key=True, test_needs_autoincrement=True), Column('name', String(50), nullable=False), Column('child_id', Integer, ForeignKey('child.id')), Column('parent_id', Integer, ForeignKey('parent.id'), nullable=True)) @classmethod def setup_classes(cls): class Parent(cls.Basic): def __init__(self, name=''): self.name = name class Child(cls.Basic): def __init__(self, name=''): self.name = name def test_one(self): Child, Parent, parent, child = (self.classes.Child, self.classes.Parent, self.tables.parent, self.tables.child) mapper(Parent, parent, properties={ 'children':relationship(Child, primaryjoin=parent.c.id==child.c.parent_id), 'child':relationship(Child, primaryjoin=parent.c.child_id==child.c.id, post_update=True) }) mapper(Child, child, properties={ 'parent':relationship(Child, remote_side=child.c.id) }) session = create_session() p1 = Parent('p1') c1 = Child('c1') c2 = Child('c2') p1.children =[c1, c2] c2.parent = c1 p1.child = c2 session.add_all([p1, c1, c2]) session.flush() p2 = Parent('p2') c3 = Child('c3') p2.children = [c3] p2.child = c3 session.add(p2) session.delete(c2) p1.children.remove(c2) p1.child = None session.flush() p2.child = None session.flush() class PostUpdateBatchingTest(fixtures.MappedTest): """test that lots of post update cols batch together into a single UPDATE.""" @classmethod def define_tables(cls, metadata): Table('parent', metadata, Column('id', Integer, primary_key=True, test_needs_autoincrement=True), Column('name', String(50), nullable=False), Column('c1_id', Integer, ForeignKey('child1.id', use_alter=True, name='c1'), nullable=True), Column('c2_id', Integer, ForeignKey('child2.id', use_alter=True, name='c2'), nullable=True), Column('c3_id', Integer, ForeignKey('child3.id', use_alter=True, name='c3'), nullable=True) ) Table('child1', metadata, Column('id', Integer, primary_key=True, test_needs_autoincrement=True), Column('name', String(50), nullable=False), Column('parent_id', Integer, ForeignKey('parent.id'), nullable=False)) Table('child2', metadata, Column('id', Integer, primary_key=True, test_needs_autoincrement=True), Column('name', String(50), nullable=False), Column('parent_id', Integer, ForeignKey('parent.id'), nullable=False)) Table('child3', metadata, Column('id', Integer, primary_key=True, test_needs_autoincrement=True), Column('name', String(50), nullable=False), Column('parent_id', Integer, ForeignKey('parent.id'), nullable=False)) @classmethod def setup_classes(cls): class Parent(cls.Basic): def __init__(self, name=''): self.name = name class Child1(cls.Basic): def __init__(self, name=''): self.name = name class Child2(cls.Basic): def __init__(self, name=''): self.name = name class Child3(cls.Basic): def __init__(self, name=''): self.name = name def test_one(self): child1, child2, child3, Parent, parent, Child1, Child2, Child3 = (self.tables.child1, self.tables.child2, self.tables.child3, self.classes.Parent, self.tables.parent, self.classes.Child1, self.classes.Child2, self.classes.Child3) mapper(Parent, parent, properties={ 'c1s':relationship(Child1, primaryjoin=child1.c.parent_id==parent.c.id), 'c2s':relationship(Child2, primaryjoin=child2.c.parent_id==parent.c.id), 'c3s':relationship(Child3, primaryjoin=child3.c.parent_id==parent.c.id), 'c1':relationship(Child1, primaryjoin=child1.c.id==parent.c.c1_id, post_update=True), 'c2':relationship(Child2, primaryjoin=child2.c.id==parent.c.c2_id, post_update=True), 'c3':relationship(Child3, primaryjoin=child3.c.id==parent.c.c3_id, post_update=True), }) mapper(Child1, child1) mapper(Child2, child2) mapper(Child3, child3) sess = create_session() p1 = Parent('p1') c11, c12, c13 = Child1('c1'), Child1('c2'), Child1('c3') c21, c22, c23 = Child2('c1'), Child2('c2'), Child2('c3') c31, c32, c33 = Child3('c1'), Child3('c2'), Child3('c3') p1.c1s = [c11, c12, c13] p1.c2s = [c21, c22, c23] p1.c3s = [c31, c32, c33] sess.add(p1) sess.flush() p1.c1 = c12 p1.c2 = c23 p1.c3 = c31 self.assert_sql_execution( testing.db, sess.flush, CompiledSQL( "UPDATE parent SET c1_id=:c1_id, c2_id=:c2_id, " "c3_id=:c3_id WHERE parent.id = :parent_id", lambda ctx: {'c2_id': c23.id, 'parent_id': p1.id, 'c1_id': c12.id, 'c3_id': c31.id} ) ) p1.c1 = p1.c2 = p1.c3 = None self.assert_sql_execution( testing.db, sess.flush, CompiledSQL( "UPDATE parent SET c1_id=:c1_id, c2_id=:c2_id, " "c3_id=:c3_id WHERE parent.id = :parent_id", lambda ctx: {'c2_id': None, 'parent_id': p1.id, 'c1_id': None, 'c3_id': None} ) ) SQLAlchemy-0.8.4/test/orm/test_default_strategies.py0000644000076500000240000003723712251150016023301 0ustar classicstaff00000000000000from test.orm import _fixtures from sqlalchemy import testing from sqlalchemy.orm import mapper, relationship, create_session from sqlalchemy import util import sqlalchemy as sa from sqlalchemy.testing import eq_, assert_raises_message class DefaultStrategyOptionsTest(_fixtures.FixtureTest): def _assert_fully_loaded(self, users): # verify everything loaded, with no additional sql needed def go(): # comparison with no additional sql eq_(users, self.static.user_all_result) # keywords are not part of self.static.user_all_result, so # verify all the item keywords were loaded, with no more sql. # 'any' verifies at least some items have keywords; we build # a list for any([...]) instead of any(...) to prove we've # iterated all the items with no sql. f = util.flatten_iterator assert any([i.keywords for i in f([o.items for o in f([u.orders for u in users])])]) self.assert_sql_count(testing.db, go, 0) def _assert_addresses_loaded(self, users): # verify all the addresses were joined loaded with no more sql def go(): for u, static in zip(users, self.static.user_all_result): eq_(u.addresses, static.addresses) self.assert_sql_count(testing.db, go, 0) def _downgrade_fixture(self): users, Keyword, items, order_items, orders, Item, User, \ Address, keywords, item_keywords, Order, addresses = \ self.tables.users, self.classes.Keyword, self.tables.items, \ self.tables.order_items, self.tables.orders, \ self.classes.Item, self.classes.User, self.classes.Address, \ self.tables.keywords, self.tables.item_keywords, \ self.classes.Order, self.tables.addresses mapper(Address, addresses) mapper(Keyword, keywords) mapper(Item, items, properties=dict( keywords=relationship(Keyword, secondary=item_keywords, lazy='subquery', order_by=item_keywords.c.keyword_id))) mapper(Order, orders, properties=dict( items=relationship(Item, secondary=order_items, lazy='subquery', order_by=order_items.c.item_id))) mapper(User, users, properties=dict( addresses=relationship(Address, lazy='joined', order_by=addresses.c.id), orders=relationship(Order, lazy='joined', order_by=orders.c.id))) return create_session() def _upgrade_fixture(self): users, Keyword, items, order_items, orders, Item, User, \ Address, keywords, item_keywords, Order, addresses = \ self.tables.users, self.classes.Keyword, self.tables.items, \ self.tables.order_items, self.tables.orders, \ self.classes.Item, self.classes.User, self.classes.Address, \ self.tables.keywords, self.tables.item_keywords, \ self.classes.Order, self.tables.addresses mapper(Address, addresses) mapper(Keyword, keywords) mapper(Item, items, properties=dict( keywords=relationship(Keyword, secondary=item_keywords, lazy='select', order_by=item_keywords.c.keyword_id))) mapper(Order, orders, properties=dict( items=relationship(Item, secondary=order_items, lazy=True, order_by=order_items.c.item_id))) mapper(User, users, properties=dict( addresses=relationship(Address, lazy=True, order_by=addresses.c.id), orders=relationship(Order, order_by=orders.c.id))) return create_session() def test_downgrade_baseline(self): """Mapper strategy defaults load as expected (compare to rest of DefaultStrategyOptionsTest downgrade tests).""" sess = self._downgrade_fixture() users = [] # test _downgrade_fixture mapper defaults, 3 queries (2 subquery loads). def go(): users[:] = sess.query(self.classes.User)\ .order_by(self.classes.User.id)\ .all() self.assert_sql_count(testing.db, go, 3) # all loaded with no additional sql self._assert_fully_loaded(users) def test_disable_eagerloads(self): """Mapper eager load strategy defaults can be shut off with enable_eagerloads(False).""" # While this isn't testing a mapper option, it is included # as baseline reference for how XYZload('*') option # should work, namely, it shouldn't affect later queries # (see other test_select_s) sess = self._downgrade_fixture() users = [] # demonstrate that enable_eagerloads loads with only 1 sql def go(): users[:] = sess.query(self.classes.User)\ .enable_eagerloads(False)\ .order_by(self.classes.User.id)\ .all() self.assert_sql_count(testing.db, go, 1) # demonstrate that users[0].orders must now be loaded with 3 sql # (need to lazyload, and 2 subquery: 3 total) def go(): users[0].orders self.assert_sql_count(testing.db, go, 3) def test_last_one_wins(self): sess = self._downgrade_fixture() users = [] def go(): users[:] = sess.query(self.classes.User)\ .options(sa.orm.subqueryload('*'))\ .options(sa.orm.joinedload(self.classes.User.addresses))\ .options(sa.orm.lazyload('*'))\ .order_by(self.classes.User.id)\ .all() self.assert_sql_count(testing.db, go, 1) # verify all the addresses were joined loaded (no more sql) self._assert_addresses_loaded(users) def test_star_must_be_alone(self): sess = self._downgrade_fixture() User = self.classes.User assert_raises_message( sa.exc.ArgumentError, "Wildcard identifier '\*' must be specified alone.", sa.orm.subqueryload, '*', User.addresses ) def test_select_with_joinedload(self): """Mapper load strategy defaults can be downgraded with lazyload('*') option, while explicit joinedload() option is still honored""" sess = self._downgrade_fixture() users = [] # lazyload('*') shuts off 'orders' subquery: only 1 sql def go(): users[:] = sess.query(self.classes.User)\ .options(sa.orm.lazyload('*'))\ .options(sa.orm.joinedload(self.classes.User.addresses))\ .order_by(self.classes.User.id)\ .all() self.assert_sql_count(testing.db, go, 1) # verify all the addresses were joined loaded (no more sql) self._assert_addresses_loaded(users) # users[0] has orders, which need to lazy load, and 2 subquery: # (same as with test_disable_eagerloads): 3 total sql def go(): users[0].orders self.assert_sql_count(testing.db, go, 3) def test_select_with_subqueryload(self): """Mapper load strategy defaults can be downgraded with lazyload('*') option, while explicit subqueryload() option is still honored""" sess = self._downgrade_fixture() users = [] # now test 'default_strategy' option combined with 'subquery' # shuts off 'addresses' load AND orders.items load: 2 sql expected def go(): users[:] = sess.query(self.classes.User)\ .options(sa.orm.lazyload('*'))\ .options(sa.orm.subqueryload(self.classes.User.orders))\ .order_by(self.classes.User.id)\ .all() self.assert_sql_count(testing.db, go, 2) # Verify orders have already been loaded: 0 sql def go(): for u, static in zip(users, self.static.user_all_result): assert len(u.orders) == len(static.orders) self.assert_sql_count(testing.db, go, 0) # Verify lazyload('*') prevented orders.items load # users[0].orders[0] has 3 items, each with keywords: 2 sql # ('items' and 'items.keywords' subquery) def go(): for i in users[0].orders[0].items: i.keywords self.assert_sql_count(testing.db, go, 2) # lastly, make sure they actually loaded properly eq_(users, self.static.user_all_result) def test_noload_with_joinedload(self): """Mapper load strategy defaults can be downgraded with noload('*') option, while explicit joinedload() option is still honored""" sess = self._downgrade_fixture() users = [] # test noload('*') shuts off 'orders' subquery, only 1 sql def go(): users[:] = sess.query(self.classes.User)\ .options(sa.orm.noload('*'))\ .options(sa.orm.joinedload(self.classes.User.addresses))\ .order_by(self.classes.User.id)\ .all() self.assert_sql_count(testing.db, go, 1) # verify all the addresses were joined loaded (no more sql) self._assert_addresses_loaded(users) # User.orders should have loaded "noload" (meaning []) def go(): for u in users: assert u.orders == [] self.assert_sql_count(testing.db, go, 0) def test_noload_with_subqueryload(self): """Mapper load strategy defaults can be downgraded with noload('*') option, while explicit subqueryload() option is still honored""" sess = self._downgrade_fixture() users = [] # test noload('*') option combined with subqueryload() # shuts off 'addresses' load AND orders.items load: 2 sql expected def go(): users[:] = sess.query(self.classes.User)\ .options(sa.orm.noload('*'))\ .options(sa.orm.subqueryload(self.classes.User.orders))\ .order_by(self.classes.User.id)\ .all() self.assert_sql_count(testing.db, go, 2) def go(): # Verify orders have already been loaded: 0 sql for u, static in zip(users, self.static.user_all_result): assert len(u.orders) == len(static.orders) # Verify noload('*') prevented orders.items load # and set 'items' to [] for u in users: for o in u.orders: assert o.items == [] self.assert_sql_count(testing.db, go, 0) def test_joined(self): """Mapper load strategy defaults can be upgraded with joinedload('*') option.""" sess = self._upgrade_fixture() users = [] # test upgrade all to joined: 1 sql def go(): users[:] = sess.query(self.classes.User)\ .options(sa.orm.joinedload('*'))\ .order_by(self.classes.User.id)\ .all() self.assert_sql_count(testing.db, go, 1) # verify everything loaded, with no additional sql needed self._assert_fully_loaded(users) def test_joined_with_lazyload(self): """Mapper load strategy defaults can be upgraded with joinedload('*') option, while explicit lazyload() option is still honored""" sess = self._upgrade_fixture() users = [] # test joined all but 'keywords': upgraded to 1 sql def go(): users[:] = sess.query(self.classes.User)\ .options(sa.orm.lazyload('orders.items.keywords'))\ .options(sa.orm.joinedload('*'))\ .order_by(self.classes.User.id)\ .all() self.assert_sql_count(testing.db, go, 1) # everything (but keywords) loaded ok # (note self.static.user_all_result contains no keywords) def go(): eq_(users, self.static.user_all_result) self.assert_sql_count(testing.db, go, 0) # verify the items were loaded, while item.keywords were not def go(): # redundant with last test, but illustrative users[0].orders[0].items[0] self.assert_sql_count(testing.db, go, 0) def go(): users[0].orders[0].items[0].keywords self.assert_sql_count(testing.db, go, 1) def test_joined_with_subqueryload(self): """Mapper load strategy defaults can be upgraded with joinedload('*') option, while explicit subqueryload() option is still honored""" sess = self._upgrade_fixture() users = [] # test upgrade all but 'addresses', which is subquery loaded (2 sql) def go(): users[:] = sess.query(self.classes.User)\ .options(sa.orm.subqueryload(self.classes.User.addresses))\ .options(sa.orm.joinedload('*'))\ .order_by(self.classes.User.id)\ .all() self.assert_sql_count(testing.db, go, 2) # verify everything loaded, with no additional sql needed self._assert_fully_loaded(users) def test_subquery(self): """Mapper load strategy defaults can be upgraded with subqueryload('*') option.""" sess = self._upgrade_fixture() users = [] # test upgrade all to subquery: 1 sql + 4 relationships = 5 def go(): users[:] = sess.query(self.classes.User)\ .options(sa.orm.subqueryload('*'))\ .order_by(self.classes.User.id)\ .all() self.assert_sql_count(testing.db, go, 5) # verify everything loaded, with no additional sql needed self._assert_fully_loaded(users) def test_subquery_with_lazyload(self): """Mapper load strategy defaults can be upgraded with subqueryload('*') option, while explicit lazyload() option is still honored""" sess = self._upgrade_fixture() users = [] # test subquery all but 'keywords' (1 sql + 3 relationships = 4) def go(): users[:] = sess.query(self.classes.User)\ .options(sa.orm.lazyload('orders.items.keywords'))\ .options(sa.orm.subqueryload('*'))\ .order_by(self.classes.User.id)\ .all() self.assert_sql_count(testing.db, go, 4) # no more sql # (note self.static.user_all_result contains no keywords) def go(): eq_(users, self.static.user_all_result) self.assert_sql_count(testing.db, go, 0) # verify the item.keywords were not loaded def go(): users[0].orders[0].items[0] self.assert_sql_count(testing.db, go, 0) def go(): users[0].orders[0].items[0].keywords self.assert_sql_count(testing.db, go, 1) def test_subquery_with_joinedload(self): """Mapper load strategy defaults can be upgraded with subqueryload('*') option, while multiple explicit joinedload() options are still honored""" sess = self._upgrade_fixture() users = [] # test upgrade all but 'addresses' & 'orders', which are joinedloaded # (1 sql + items + keywords = 3) def go(): users[:] = sess.query(self.classes.User)\ .options(sa.orm.joinedload(self.classes.User.addresses))\ .options(sa.orm.joinedload(self.classes.User.orders))\ .options(sa.orm.subqueryload('*'))\ .order_by(self.classes.User.id)\ .all() self.assert_sql_count(testing.db, go, 3) # verify everything loaded, with no additional sql needed self._assert_fully_loaded(users) SQLAlchemy-0.8.4/test/orm/test_defaults.py0000644000076500000240000001172312251147172021233 0ustar classicstaff00000000000000import sqlalchemy as sa from sqlalchemy import Integer, String, ForeignKey, event from sqlalchemy import testing from sqlalchemy.testing.schema import Table, Column from sqlalchemy.orm import mapper, relationship, create_session from sqlalchemy.testing import fixtures from sqlalchemy.testing import eq_ class TriggerDefaultsTest(fixtures.MappedTest): __requires__ = ('row_triggers',) @classmethod def define_tables(cls, metadata): dt = Table('dt', metadata, Column('id', Integer, primary_key=True, test_needs_autoincrement=True), Column('col1', String(20)), Column('col2', String(20), server_default=sa.schema.FetchedValue()), Column('col3', String(20), sa.schema.FetchedValue(for_update=True)), Column('col4', String(20), sa.schema.FetchedValue(), sa.schema.FetchedValue(for_update=True))) for ins in ( sa.DDL("CREATE TRIGGER dt_ins AFTER INSERT ON dt " "FOR EACH ROW BEGIN " "UPDATE dt SET col2='ins', col4='ins' " "WHERE dt.id = NEW.id; END", on='sqlite'), sa.DDL("CREATE TRIGGER dt_ins ON dt AFTER INSERT AS " "UPDATE dt SET col2='ins', col4='ins' " "WHERE dt.id IN (SELECT id FROM inserted);", on='mssql'), sa.DDL("CREATE TRIGGER dt_ins BEFORE INSERT " "ON dt " "FOR EACH ROW " "BEGIN " ":NEW.col2 := 'ins'; :NEW.col4 := 'ins'; END;", on='oracle'), sa.DDL("CREATE TRIGGER dt_ins BEFORE INSERT ON dt " "FOR EACH ROW BEGIN " "SET NEW.col2='ins'; SET NEW.col4='ins'; END", on=lambda ddl, event, target, bind, **kw: bind.engine.name not in ('oracle', 'mssql', 'sqlite') ), ): event.listen(dt, 'after_create', ins) event.listen(dt, 'before_drop', sa.DDL("DROP TRIGGER dt_ins")) for up in ( sa.DDL("CREATE TRIGGER dt_up AFTER UPDATE ON dt " "FOR EACH ROW BEGIN " "UPDATE dt SET col3='up', col4='up' " "WHERE dt.id = OLD.id; END", on='sqlite'), sa.DDL("CREATE TRIGGER dt_up ON dt AFTER UPDATE AS " "UPDATE dt SET col3='up', col4='up' " "WHERE dt.id IN (SELECT id FROM deleted);", on='mssql'), sa.DDL("CREATE TRIGGER dt_up BEFORE UPDATE ON dt " "FOR EACH ROW BEGIN " ":NEW.col3 := 'up'; :NEW.col4 := 'up'; END;", on='oracle'), sa.DDL("CREATE TRIGGER dt_up BEFORE UPDATE ON dt " "FOR EACH ROW BEGIN " "SET NEW.col3='up'; SET NEW.col4='up'; END", on=lambda ddl, event, target, bind, **kw: bind.engine.name not in ('oracle', 'mssql', 'sqlite') ), ): event.listen(dt, 'after_create', up) event.listen(dt, 'before_drop', sa.DDL("DROP TRIGGER dt_up")) @classmethod def setup_classes(cls): class Default(cls.Comparable): pass @classmethod def setup_mappers(cls): Default, dt = cls.classes.Default, cls.tables.dt mapper(Default, dt) def test_insert(self): Default = self.classes.Default d1 = Default(id=1) eq_(d1.col1, None) eq_(d1.col2, None) eq_(d1.col3, None) eq_(d1.col4, None) session = create_session() session.add(d1) session.flush() eq_(d1.col1, None) eq_(d1.col2, 'ins') eq_(d1.col3, None) # don't care which trigger fired assert d1.col4 in ('ins', 'up') def test_update(self): Default = self.classes.Default d1 = Default(id=1) session = create_session() session.add(d1) session.flush() d1.col1 = 'set' session.flush() eq_(d1.col1, 'set') eq_(d1.col2, 'ins') eq_(d1.col3, 'up') eq_(d1.col4, 'up') class ExcludedDefaultsTest(fixtures.MappedTest): @classmethod def define_tables(cls, metadata): dt = Table('dt', metadata, Column('id', Integer, primary_key=True, test_needs_autoincrement=True), Column('col1', String(20), default="hello"), ) def test_exclude(self): dt = self.tables.dt class Foo(fixtures.BasicEntity): pass mapper(Foo, dt, exclude_properties=('col1',)) f1 = Foo() sess = create_session() sess.add(f1) sess.flush() eq_(dt.select().execute().fetchall(), [(1, "hello")]) SQLAlchemy-0.8.4/test/orm/test_deprecations.py0000644000076500000240000003476012251150016022101 0ustar classicstaff00000000000000"""The collection of modern alternatives to deprecated & removed functionality. Collects specimens of old ORM code and explicitly covers the recommended modern (i.e. not deprecated) alternative to them. The tests snippets here can be migrated directly to the wiki, docs, etc. """ from sqlalchemy import testing from sqlalchemy import Integer, String, ForeignKey, func from sqlalchemy.testing.schema import Table from sqlalchemy.testing.schema import Column from sqlalchemy.orm import mapper, relationship, relation, create_session, sessionmaker from sqlalchemy.testing import fixtures class QueryAlternativesTest(fixtures.MappedTest): '''Collects modern idioms for Queries The docstring for each test case serves as miniature documentation about the deprecated use case, and the test body illustrates (and covers) the intended replacement code to accomplish the same task. Documenting the "old way" including the argument signature helps these cases remain useful to readers even after the deprecated method has been removed from the modern codebase. Format: def test_deprecated_thing(self): """Query.methodname(old, arg, **signature) output = session.query(User).deprecatedmethod(inputs) """ # 0.4+ output = session.query(User).newway(inputs) assert output is correct # 0.5+ output = session.query(User).evennewerway(inputs) assert output is correct ''' run_inserts = 'once' run_deletes = None @classmethod def define_tables(cls, metadata): Table('users_table', metadata, Column('id', Integer, primary_key=True), Column('name', String(64))) Table('addresses_table', metadata, Column('id', Integer, primary_key=True), Column('user_id', Integer, ForeignKey('users_table.id')), Column('email_address', String(128)), Column('purpose', String(16)), Column('bounces', Integer, default=0)) @classmethod def setup_classes(cls): class User(cls.Basic): pass class Address(cls.Basic): pass @classmethod def setup_mappers(cls): addresses_table, User, users_table, Address = (cls.tables.addresses_table, cls.classes.User, cls.tables.users_table, cls.classes.Address) mapper(User, users_table, properties=dict( addresses=relationship(Address, backref='user'), )) mapper(Address, addresses_table) @classmethod def fixtures(cls): return dict( users_table=( ('id', 'name'), (1, 'jack'), (2, 'ed'), (3, 'fred'), (4, 'chuck')), addresses_table=( ('id', 'user_id', 'email_address', 'purpose', 'bounces'), (1, 1, 'jack@jack.home', 'Personal', 0), (2, 1, 'jack@jack.bizz', 'Work', 1), (3, 2, 'ed@foo.bar', 'Personal', 0), (4, 3, 'fred@the.fred', 'Personal', 10))) ###################################################################### def test_override_get(self): """MapperExtension.get() x = session.query.get(5) """ Address = self.classes.Address from sqlalchemy.orm.query import Query cache = {} class MyQuery(Query): def get(self, ident, **kwargs): if ident in cache: return cache[ident] else: x = super(MyQuery, self).get(ident) cache[ident] = x return x session = sessionmaker(query_cls=MyQuery)() ad1 = session.query(Address).get(1) assert ad1 in cache.values() def test_load(self): """x = session.query(Address).load(1) x = session.load(Address, 1) """ Address = self.classes.Address session = create_session() ad1 = session.query(Address).populate_existing().get(1) assert bool(ad1) def test_apply_max(self): """Query.apply_max(col) max = session.query(Address).apply_max(Address.bounces) """ Address = self.classes.Address session = create_session() # 0.5.0 maxes = list(session.query(Address).values(func.max(Address.bounces))) max = maxes[0][0] assert max == 10 max = session.query(func.max(Address.bounces)).one()[0] assert max == 10 def test_apply_min(self): """Query.apply_min(col) min = session.query(Address).apply_min(Address.bounces) """ Address = self.classes.Address session = create_session() # 0.5.0 mins = list(session.query(Address).values(func.min(Address.bounces))) min = mins[0][0] assert min == 0 min = session.query(func.min(Address.bounces)).one()[0] assert min == 0 def test_apply_avg(self): """Query.apply_avg(col) avg = session.query(Address).apply_avg(Address.bounces) """ Address = self.classes.Address session = create_session() avgs = list(session.query(Address).values(func.avg(Address.bounces))) avg = avgs[0][0] assert avg > 0 and avg < 10 avg = session.query(func.avg(Address.bounces)).one()[0] assert avg > 0 and avg < 10 def test_apply_sum(self): """Query.apply_sum(col) avg = session.query(Address).apply_avg(Address.bounces) """ Address = self.classes.Address session = create_session() avgs = list(session.query(Address).values(func.sum(Address.bounces))) avg = avgs[0][0] assert avg == 11 avg = session.query(func.sum(Address.bounces)).one()[0] assert avg == 11 def test_count_by(self): """Query.count_by(*args, **params) num = session.query(Address).count_by(purpose='Personal') # old-style implicit *_by join num = session.query(User).count_by(purpose='Personal') """ User, Address = self.classes.User, self.classes.Address session = create_session() num = session.query(Address).filter_by(purpose='Personal').count() assert num == 3, num num = (session.query(User).join('addresses'). filter(Address.purpose=='Personal')).count() assert num == 3, num def test_count_whereclause(self): """Query.count(whereclause=None, params=None, **kwargs) num = session.query(Address).count(address_table.c.bounces > 1) """ Address = self.classes.Address session = create_session() num = session.query(Address).filter(Address.bounces > 1).count() assert num == 1, num def test_execute(self): """Query.execute(clauseelement, params=None, *args, **kwargs) users = session.query(User).execute(users_table.select()) """ User, users_table = self.classes.User, self.tables.users_table session = create_session() users = session.query(User).from_statement(users_table.select()).all() assert len(users) == 4 def test_get_by(self): """Query.get_by(*args, **params) user = session.query(User).get_by(name='ed') # 0.3-style implicit *_by join user = session.query(User).get_by(email_addresss='fred@the.fred') """ User, Address = self.classes.User, self.classes.Address session = create_session() user = session.query(User).filter_by(name='ed').first() assert user.name == 'ed' user = (session.query(User).join('addresses'). filter(Address.email_address=='fred@the.fred')).first() assert user.name == 'fred' user = session.query(User).filter( User.addresses.any(Address.email_address=='fred@the.fred')).first() assert user.name == 'fred' def test_instances_entities(self): """Query.instances(cursor, *mappers_or_columns, **kwargs) sel = users_table.join(addresses_table).select(use_labels=True) res = session.query(User).instances(sel.execute(), Address) """ addresses_table, User, users_table, Address = (self.tables.addresses_table, self.classes.User, self.tables.users_table, self.classes.Address) session = create_session() sel = users_table.join(addresses_table).select(use_labels=True) res = list(session.query(User, Address).instances(sel.execute())) assert len(res) == 4 cola, colb = res[0] assert isinstance(cola, User) and isinstance(colb, Address) def test_join_by(self): """Query.join_by(*args, **params) TODO """ session = create_session() def test_join_to(self): """Query.join_to(key) TODO """ session = create_session() def test_join_via(self): """Query.join_via(keys) TODO """ session = create_session() def test_list(self): """Query.list() users = session.query(User).list() """ User = self.classes.User session = create_session() users = session.query(User).all() assert len(users) == 4 def test_scalar(self): """Query.scalar() user = session.query(User).filter(User.id==1).scalar() """ User = self.classes.User session = create_session() user = session.query(User).filter(User.id==1).first() assert user.id==1 def test_select(self): """Query.select(arg=None, **kwargs) users = session.query(User).select(users_table.c.name != None) """ User = self.classes.User session = create_session() users = session.query(User).filter(User.name != None).all() assert len(users) == 4 def test_select_by(self): """Query.select_by(*args, **params) users = session.query(User).select_by(name='fred') # 0.3 magic join on *_by methods users = session.query(User).select_by(email_address='fred@the.fred') """ User, Address = self.classes.User, self.classes.Address session = create_session() users = session.query(User).filter_by(name='fred').all() assert len(users) == 1 users = session.query(User).filter(User.name=='fred').all() assert len(users) == 1 users = (session.query(User).join('addresses'). filter_by(email_address='fred@the.fred')).all() assert len(users) == 1 users = session.query(User).filter(User.addresses.any( Address.email_address == 'fred@the.fred')).all() assert len(users) == 1 def test_selectfirst(self): """Query.selectfirst(arg=None, **kwargs) bounced = session.query(Address).selectfirst( addresses_table.c.bounces > 0) """ Address = self.classes.Address session = create_session() bounced = session.query(Address).filter(Address.bounces > 0).first() assert bounced.bounces > 0 def test_selectfirst_by(self): """Query.selectfirst_by(*args, **params) onebounce = session.query(Address).selectfirst_by(bounces=1) # 0.3 magic join on *_by methods onebounce_user = session.query(User).selectfirst_by(bounces=1) """ User, Address = self.classes.User, self.classes.Address session = create_session() onebounce = session.query(Address).filter_by(bounces=1).first() assert onebounce.bounces == 1 onebounce_user = (session.query(User).join('addresses'). filter_by(bounces=1)).first() assert onebounce_user.name == 'jack' onebounce_user = (session.query(User).join('addresses'). filter(Address.bounces == 1)).first() assert onebounce_user.name == 'jack' onebounce_user = session.query(User).filter(User.addresses.any( Address.bounces == 1)).first() assert onebounce_user.name == 'jack' def test_selectone(self): """Query.selectone(arg=None, **kwargs) ed = session.query(User).selectone(users_table.c.name == 'ed') """ User = self.classes.User session = create_session() ed = session.query(User).filter(User.name == 'jack').one() def test_selectone_by(self): """Query.selectone_by ed = session.query(User).selectone_by(name='ed') # 0.3 magic join on *_by methods ed = session.query(User).selectone_by(email_address='ed@foo.bar') """ User, Address = self.classes.User, self.classes.Address session = create_session() ed = session.query(User).filter_by(name='jack').one() ed = session.query(User).filter(User.name == 'jack').one() ed = session.query(User).join('addresses').filter( Address.email_address == 'ed@foo.bar').one() ed = session.query(User).filter(User.addresses.any( Address.email_address == 'ed@foo.bar')).one() def test_select_statement(self): """Query.select_statement(statement, **params) users = session.query(User).select_statement(users_table.select()) """ User, users_table = self.classes.User, self.tables.users_table session = create_session() users = session.query(User).from_statement(users_table.select()).all() assert len(users) == 4 def test_select_text(self): """Query.select_text(text, **params) users = session.query(User).select_text('SELECT * FROM users_table') """ User = self.classes.User session = create_session() users = (session.query(User). from_statement('SELECT * FROM users_table')).all() assert len(users) == 4 def test_select_whereclause(self): """Query.select_whereclause(whereclause=None, params=None, **kwargs) users = session,query(User).select_whereclause(users.c.name=='ed') users = session.query(User).select_whereclause("name='ed'") """ User = self.classes.User session = create_session() users = session.query(User).filter(User.name=='ed').all() assert len(users) == 1 and users[0].name == 'ed' users = session.query(User).filter("name='ed'").all() assert len(users) == 1 and users[0].name == 'ed' SQLAlchemy-0.8.4/test/orm/test_descriptor.py0000644000076500000240000000725612251147172021610 0ustar classicstaff00000000000000from sqlalchemy.orm import descriptor_props, aliased from sqlalchemy.orm.interfaces import PropComparator from sqlalchemy.orm.properties import ColumnProperty from sqlalchemy.sql import column from sqlalchemy import Column, Integer, func, String from sqlalchemy.ext.declarative import declarative_base from sqlalchemy.util import partial from sqlalchemy.testing import fixtures from sqlalchemy.testing import eq_ class TestDescriptor(descriptor_props.DescriptorProperty): def __init__(self, cls, key, descriptor=None, doc=None, comparator_factory = None): self.parent = cls.__mapper__ self.key = key self.doc = doc self.descriptor = descriptor if comparator_factory: self._comparator_factory = partial(comparator_factory, self) else: self._comparator_factory = lambda mapper: None class DescriptorInstrumentationTest(fixtures.ORMTest): def _fixture(self): Base = declarative_base() class Foo(Base): __tablename__ = 'foo' id = Column(Integer, primary_key=True) return Foo def test_fixture(self): Foo = self._fixture() d = TestDescriptor(Foo, 'foo') d.instrument_class(Foo.__mapper__) assert Foo.foo def test_property_wrapped_classlevel(self): Foo = self._fixture() prop = property(lambda self:None) Foo.foo = prop d = TestDescriptor(Foo, 'foo') d.instrument_class(Foo.__mapper__) assert Foo().foo is None assert Foo.foo is not prop def test_property_subclass_wrapped_classlevel(self): Foo = self._fixture() class myprop(property): attr = 'bar' def method1(self): return "method1" prop = myprop(lambda self:None) Foo.foo = prop d = TestDescriptor(Foo, 'foo') d.instrument_class(Foo.__mapper__) assert Foo().foo is None assert Foo.foo is not prop assert Foo.foo.attr == 'bar' assert Foo.foo.method1() == 'method1' def test_comparator(self): class Comparator(PropComparator): __hash__ = None attr = 'bar' def method1(self): return "method1" def method2(self, other): return "method2" def __getitem__(self, key): return 'value' def __eq__(self, other): return column('foo') == func.upper(other) Foo = self._fixture() d = TestDescriptor(Foo, 'foo', comparator_factory=Comparator) d.instrument_class(Foo.__mapper__) eq_(Foo.foo.method1(), "method1") eq_(Foo.foo.method2('x'), "method2") assert Foo.foo.attr == 'bar' assert Foo.foo['bar'] == 'value' eq_( (Foo.foo == 'bar').__str__(), "foo = upper(:upper_1)" ) def test_aliased_comparator(self): class Comparator(ColumnProperty.Comparator): __hash__ = None def __eq__(self, other): return func.foobar(self.__clause_element__()) ==\ func.foobar(other) Foo = self._fixture() Foo._name = Column('name', String) def comparator_factory(self, mapper): prop = mapper._props['_name'] return Comparator(prop, mapper) d = TestDescriptor(Foo, 'foo', comparator_factory=comparator_factory) d.instrument_class(Foo.__mapper__) eq_( str(Foo.foo == 'ed'), "foobar(foo.name) = foobar(:foobar_1)" ) eq_( str(aliased(Foo).foo == 'ed'), "foobar(foo_1.name) = foobar(:foobar_1)" ) SQLAlchemy-0.8.4/test/orm/test_dynamic.py0000644000076500000240000007262612251150016021050 0ustar classicstaff00000000000000from sqlalchemy.testing import eq_, is_ from sqlalchemy.orm import backref, configure_mappers from sqlalchemy import testing from sqlalchemy import desc, select, func, exc from sqlalchemy.orm import mapper, relationship, create_session, Query, \ attributes, exc as orm_exc, Session from sqlalchemy.orm.dynamic import AppenderMixin from sqlalchemy.testing import AssertsCompiledSQL, \ assert_raises_message, assert_raises from test.orm import _fixtures from sqlalchemy.testing.assertsql import CompiledSQL class _DynamicFixture(object): def _user_address_fixture(self, addresses_args={}): users, Address, addresses, User = (self.tables.users, self.classes.Address, self.tables.addresses, self.classes.User) mapper(User, users, properties={ 'addresses': relationship(Address, lazy="dynamic", **addresses_args) }) mapper(Address, addresses) return User, Address def _order_item_fixture(self, items_args={}): items, Order, orders, order_items, Item = (self.tables.items, self.classes.Order, self.tables.orders, self.tables.order_items, self.classes.Item) mapper(Order, orders, properties={ 'items': relationship(Item, secondary=order_items, lazy="dynamic", **items_args ) }) mapper(Item, items) return Order, Item class DynamicTest(_DynamicFixture, _fixtures.FixtureTest, AssertsCompiledSQL): def test_basic(self): User, Address = self._user_address_fixture() sess = create_session() q = sess.query(User) eq_([User(id=7, addresses=[Address(id=1, email_address='jack@bean.com')])], q.filter(User.id == 7).all()) eq_(self.static.user_address_result, q.all()) def test_statement(self): """test that the .statement accessor returns the actual statement that would render, without any _clones called.""" User, Address = self._user_address_fixture() sess = create_session() q = sess.query(User) u = q.filter(User.id == 7).first() self.assert_compile( u.addresses.statement, "SELECT addresses.id, addresses.user_id, addresses.email_address " "FROM " "addresses WHERE :param_1 = addresses.user_id", use_default_dialect=True ) def test_detached_raise(self): User, Address = self._user_address_fixture() sess = create_session() u = sess.query(User).get(8) sess.expunge(u) assert_raises( orm_exc.DetachedInstanceError, u.addresses.filter_by, email_address='e' ) def test_no_uselist_false(self): User, Address = self._user_address_fixture( addresses_args={"uselist": False}) assert_raises_message( exc.InvalidRequestError, "On relationship User.addresses, 'dynamic' loaders cannot be " "used with many-to-one/one-to-one relationships and/or " "uselist=False.", configure_mappers ) def test_no_m2o(self): users, Address, addresses, User = (self.tables.users, self.classes.Address, self.tables.addresses, self.classes.User) mapper(Address, addresses, properties={ 'user': relationship(User, lazy='dynamic') }) mapper(User, users) assert_raises_message( exc.InvalidRequestError, "On relationship Address.user, 'dynamic' loaders cannot be " "used with many-to-one/one-to-one relationships and/or " "uselist=False.", configure_mappers ) def test_order_by(self): User, Address = self._user_address_fixture() sess = create_session() u = sess.query(User).get(8) eq_( list(u.addresses.order_by(desc(Address.email_address))), [ Address(email_address=u'ed@wood.com'), Address(email_address=u'ed@lala.com'), Address(email_address=u'ed@bettyboop.com') ] ) def test_configured_order_by(self): addresses = self.tables.addresses User, Address = self._user_address_fixture( addresses_args={ "order_by": addresses.c.email_address.desc()}) sess = create_session() u = sess.query(User).get(8) eq_( list(u.addresses), [ Address(email_address=u'ed@wood.com'), Address(email_address=u'ed@lala.com'), Address(email_address=u'ed@bettyboop.com') ] ) # test cancellation of None, replacement with something else eq_( list(u.addresses.order_by(None).order_by(Address.email_address)), [ Address(email_address=u'ed@bettyboop.com'), Address(email_address=u'ed@lala.com'), Address(email_address=u'ed@wood.com') ] ) # test cancellation of None, replacement with nothing eq_( set(u.addresses.order_by(None)), set([ Address(email_address=u'ed@bettyboop.com'), Address(email_address=u'ed@lala.com'), Address(email_address=u'ed@wood.com') ]) ) def test_count(self): User, Address = self._user_address_fixture() sess = create_session() u = sess.query(User).first() eq_(u.addresses.count(), 1) def test_dynamic_on_backref(self): users, Address, addresses, User = (self.tables.users, self.classes.Address, self.tables.addresses, self.classes.User) mapper(Address, addresses, properties={ 'user': relationship(User, backref=backref('addresses', lazy='dynamic')) }) mapper(User, users) sess = create_session() ad = sess.query(Address).get(1) def go(): ad.user = None self.assert_sql_count(testing.db, go, 0) sess.flush() u = sess.query(User).get(7) assert ad not in u.addresses def test_no_count(self): User, Address = self._user_address_fixture() sess = create_session() q = sess.query(User) # dynamic collection cannot implement __len__() (at least one that # returns a live database result), else additional count() queries are # issued when evaluating in a list context def go(): eq_( q.filter(User.id == 7).all(), [ User(id=7, addresses=[ Address(id=1, email_address='jack@bean.com') ]) ] ) self.assert_sql_count(testing.db, go, 2) def test_no_populate(self): User, Address = self._user_address_fixture() u1 = User() assert_raises_message( NotImplementedError, "Dynamic attributes don't support collection population.", attributes.set_committed_value, u1, 'addresses', [] ) def test_m2m(self): Order, Item = self._order_item_fixture(items_args={ "backref": backref("orders", lazy="dynamic") }) sess = create_session() o1 = Order(id=15, description="order 10") i1 = Item(id=10, description="item 8") o1.items.append(i1) sess.add(o1) sess.flush() assert o1 in i1.orders.all() assert i1 in o1.items.all() @testing.exclude('mysql', 'between', ((5, 1, 49), (5, 1, 52)), 'https://bugs.launchpad.net/ubuntu/+source/mysql-5.1/+bug/706988') def test_association_nonaliased(self): items, Order, orders, order_items, Item = (self.tables.items, self.classes.Order, self.tables.orders, self.tables.order_items, self.classes.Item) mapper(Order, orders, properties={ 'items': relationship(Item, secondary=order_items, lazy="dynamic", order_by=order_items.c.item_id) }) mapper(Item, items) sess = create_session() o = sess.query(Order).first() self.assert_compile( o.items, "SELECT items.id AS items_id, items.description AS " "items_description FROM items," " order_items WHERE :param_1 = order_items.order_id AND " "items.id = order_items.item_id" " ORDER BY order_items.item_id", use_default_dialect=True ) # filter criterion against the secondary table # works eq_( o.items.filter(order_items.c.item_id == 2).all(), [Item(id=2)] ) def test_transient_count(self): User, Address = self._user_address_fixture() u1 = User() u1.addresses.append(Address()) eq_(u1.addresses.count(), 1) def test_transient_access(self): User, Address = self._user_address_fixture() u1 = User() u1.addresses.append(Address()) eq_(u1.addresses[0], Address()) def test_custom_query(self): class MyQuery(Query): pass User, Address = self._user_address_fixture( addresses_args={"query_class": MyQuery}) sess = create_session() u = User() sess.add(u) col = u.addresses assert isinstance(col, Query) assert isinstance(col, MyQuery) assert hasattr(col, 'append') eq_(type(col).__name__, 'AppenderMyQuery') q = col.limit(1) assert isinstance(q, Query) assert isinstance(q, MyQuery) assert not hasattr(q, 'append') eq_(type(q).__name__, 'MyQuery') def test_custom_query_with_custom_mixin(self): class MyAppenderMixin(AppenderMixin): def add(self, items): if isinstance(items, list): for item in items: self.append(item) else: self.append(items) class MyQuery(Query): pass class MyAppenderQuery(MyAppenderMixin, MyQuery): query_class = MyQuery User, Address = self._user_address_fixture( addresses_args={ "query_class": MyAppenderQuery}) sess = create_session() u = User() sess.add(u) col = u.addresses assert isinstance(col, Query) assert isinstance(col, MyQuery) assert hasattr(col, 'append') assert hasattr(col, 'add') eq_(type(col).__name__, 'MyAppenderQuery') q = col.limit(1) assert isinstance(q, Query) assert isinstance(q, MyQuery) assert not hasattr(q, 'append') assert not hasattr(q, 'add') eq_(type(q).__name__, 'MyQuery') class UOWTest(_DynamicFixture, _fixtures.FixtureTest, testing.AssertsExecutionResults): run_inserts = None def test_persistence(self): addresses = self.tables.addresses User, Address = self._user_address_fixture() sess = create_session() u1 = User(name='jack') a1 = Address(email_address='foo') sess.add_all([u1, a1]) sess.flush() eq_( testing.db.scalar( select([func.count(1)]).where(addresses.c.user_id != None) ), 0 ) u1 = sess.query(User).get(u1.id) u1.addresses.append(a1) sess.flush() eq_( testing.db.execute( select([addresses]).where(addresses.c.user_id != None) ).fetchall(), [(a1.id, u1.id, 'foo')] ) u1.addresses.remove(a1) sess.flush() eq_( testing.db.scalar( select([func.count(1)]).where(addresses.c.user_id != None) ), 0 ) u1.addresses.append(a1) sess.flush() eq_( testing.db.execute( select([addresses]).where(addresses.c.user_id != None) ).fetchall(), [(a1.id, u1.id, 'foo')] ) a2 = Address(email_address='bar') u1.addresses.remove(a1) u1.addresses.append(a2) sess.flush() eq_( testing.db.execute( select([addresses]).where(addresses.c.user_id != None) ).fetchall(), [(a2.id, u1.id, 'bar')] ) def test_merge(self): addresses = self.tables.addresses User, Address = self._user_address_fixture( addresses_args={ "order_by": addresses.c.email_address}) sess = create_session() u1 = User(name='jack') a1 = Address(email_address='a1') a2 = Address(email_address='a2') a3 = Address(email_address='a3') u1.addresses.append(a2) u1.addresses.append(a3) sess.add_all([u1, a1]) sess.flush() u1 = User(id=u1.id, name='jack') u1.addresses.append(a1) u1.addresses.append(a3) u1 = sess.merge(u1) eq_(attributes.get_history(u1, 'addresses'), ( [a1], [a3], [a2] )) sess.flush() eq_( list(u1.addresses), [a1, a3] ) def test_hasattr(self): User, Address = self._user_address_fixture() u1 = User(name='jack') assert 'addresses' not in u1.__dict__ u1.addresses = [Address(email_address='test')] assert 'addresses' in u1.__dict__ def test_collection_set(self): addresses = self.tables.addresses User, Address = self._user_address_fixture( addresses_args={ "order_by": addresses.c.email_address}) sess = create_session(autoflush=True, autocommit=False) u1 = User(name='jack') a1 = Address(email_address='a1') a2 = Address(email_address='a2') a3 = Address(email_address='a3') a4 = Address(email_address='a4') sess.add(u1) u1.addresses = [a1, a3] eq_(list(u1.addresses), [a1, a3]) u1.addresses = [a1, a2, a4] eq_(list(u1.addresses), [a1, a2, a4]) u1.addresses = [a2, a3] eq_(list(u1.addresses), [a2, a3]) u1.addresses = [] eq_(list(u1.addresses), []) def test_noload_append(self): # test that a load of User.addresses is not emitted # when flushing an append User, Address = self._user_address_fixture() sess = Session() u1 = User(name="jack", addresses=[Address(email_address="a1")]) sess.add(u1) sess.commit() u1_id = u1.id sess.expire_all() u1.addresses.append(Address(email_address='a2')) self.assert_sql_execution( testing.db, sess.flush, CompiledSQL( "SELECT users.id AS users_id, users.name AS users_name " "FROM users WHERE users.id = :param_1", lambda ctx: [{"param_1": u1_id}]), CompiledSQL( "INSERT INTO addresses (user_id, email_address) " "VALUES (:user_id, :email_address)", lambda ctx: [{'email_address': 'a2', 'user_id': u1_id}] ) ) def test_noload_remove(self): # test that a load of User.addresses is not emitted # when flushing a remove User, Address = self._user_address_fixture() sess = Session() u1 = User(name="jack", addresses=[Address(email_address="a1")]) a2 = Address(email_address='a2') u1.addresses.append(a2) sess.add(u1) sess.commit() u1_id = u1.id a2_id = a2.id sess.expire_all() u1.addresses.remove(a2) self.assert_sql_execution( testing.db, sess.flush, CompiledSQL( "SELECT users.id AS users_id, users.name AS users_name " "FROM users WHERE users.id = :param_1", lambda ctx: [{"param_1": u1_id}]), CompiledSQL( "SELECT addresses.id AS addresses_id, addresses.email_address " "AS addresses_email_address FROM addresses " "WHERE addresses.id = :param_1", lambda ctx: [{u'param_1': a2_id}] ), CompiledSQL( "UPDATE addresses SET user_id=:user_id WHERE addresses.id = " ":addresses_id", lambda ctx: [{u'addresses_id': a2_id, 'user_id': None}] ) ) def test_rollback(self): User, Address = self._user_address_fixture() sess = create_session( expire_on_commit=False, autocommit=False, autoflush=True) u1 = User(name='jack') u1.addresses.append(Address(email_address='lala@hoho.com')) sess.add(u1) sess.flush() sess.commit() u1.addresses.append(Address(email_address='foo@bar.com')) eq_( u1.addresses.order_by(Address.id).all(), [ Address(email_address='lala@hoho.com'), Address(email_address='foo@bar.com') ] ) sess.rollback() eq_( u1.addresses.all(), [Address(email_address='lala@hoho.com')] ) def _test_delete_cascade(self, expected): addresses = self.tables.addresses User, Address = self._user_address_fixture(addresses_args={ "order_by": addresses.c.id, "backref": "user", "cascade": "save-update" if expected \ else "all, delete" }) sess = create_session(autoflush=True, autocommit=False) u = User(name='ed') u.addresses.extend( [Address(email_address=letter) for letter in 'abcdef'] ) sess.add(u) sess.commit() eq_(testing.db.scalar( addresses.count(addresses.c.user_id == None)), 0) eq_(testing.db.scalar( addresses.count(addresses.c.user_id != None)), 6) sess.delete(u) sess.commit() if expected: eq_(testing.db.scalar( addresses.count(addresses.c.user_id == None)), 6) eq_(testing.db.scalar( addresses.count(addresses.c.user_id != None)), 0) else: eq_(testing.db.scalar(addresses.count()), 0) def test_delete_nocascade(self): self._test_delete_cascade(True) def test_delete_cascade(self): self._test_delete_cascade(False) def test_remove_orphans(self): addresses = self.tables.addresses User, Address = self._user_address_fixture(addresses_args={ "order_by": addresses.c.id, "backref": "user", "cascade": "all, delete-orphan" }) sess = create_session(autoflush=True, autocommit=False) u = User(name='ed') u.addresses.extend( [Address(email_address=letter) for letter in 'abcdef'] ) sess.add(u) for a in u.addresses.filter( Address.email_address.in_(['c', 'e', 'f'])): u.addresses.remove(a) eq_( set(ad for ad, in sess.query(Address.email_address)), set(['a', 'b', 'd']) ) def _backref_test(self, autoflush, saveuser): User, Address = self._user_address_fixture(addresses_args={ "backref": "user", }) sess = create_session(autoflush=autoflush, autocommit=False) u = User(name='buffy') a = Address(email_address='foo@bar.com') a.user = u if saveuser: sess.add(u) else: sess.add(a) if not autoflush: sess.flush() assert u in sess assert a in sess eq_(list(u.addresses), [a]) a.user = None if not autoflush: eq_(list(u.addresses), [a]) if not autoflush: sess.flush() eq_(list(u.addresses), []) def test_backref_autoflush_saveuser(self): self._backref_test(True, True) def test_backref_autoflush_savead(self): self._backref_test(True, False) def test_backref_saveuser(self): self._backref_test(False, True) def test_backref_savead(self): self._backref_test(False, False) def test_backref_events(self): User, Address = self._user_address_fixture(addresses_args={ "backref": "user", }) u1 = User() a1 = Address() u1.addresses.append(a1) is_(a1.user, u1) def test_no_deref(self): User, Address = self._user_address_fixture(addresses_args={ "backref": "user", }) session = create_session() user = User() user.name = 'joe' user.fullname = 'Joe User' user.password = 'Joe\'s secret' address = Address() address.email_address = 'joe@joesdomain.example' address.user = user session.add(user) session.flush() session.expunge_all() def query1(): session = create_session(testing.db) user = session.query(User).first() return user.addresses.all() def query2(): session = create_session(testing.db) return session.query(User).first().addresses.all() def query3(): session = create_session(testing.db) user = session.query(User).first() return session.query(User).first().addresses.all() eq_(query1(), [Address(email_address='joe@joesdomain.example')]) eq_(query2(), [Address(email_address='joe@joesdomain.example')]) eq_(query3(), [Address(email_address='joe@joesdomain.example')]) class HistoryTest(_DynamicFixture, _fixtures.FixtureTest): run_inserts = None def _transient_fixture(self, addresses_args={}): User, Address = self._user_address_fixture( addresses_args=addresses_args) u1 = User() a1 = Address() return u1, a1 def _persistent_fixture(self, autoflush=True, addresses_args={}): User, Address = self._user_address_fixture( addresses_args=addresses_args) u1 = User(name='u1') a1 = Address(email_address='a1') s = Session(autoflush=autoflush) s.add(u1) s.flush() return u1, a1, s def _persistent_m2m_fixture(self, autoflush=True, items_args={}): Order, Item = self._order_item_fixture(items_args=items_args) o1 = Order() i1 = Item(description="i1") s = Session(autoflush=autoflush) s.add(o1) s.flush() return o1, i1, s def _assert_history(self, obj, compare, compare_passive=None): if isinstance(obj, self.classes.User): attrname = "addresses" elif isinstance(obj, self.classes.Order): attrname = "items" eq_( attributes.get_history(obj, attrname), compare ) if compare_passive is None: compare_passive = compare eq_( attributes.get_history(obj, attrname, attributes.LOAD_AGAINST_COMMITTED), compare_passive ) def test_append_transient(self): u1, a1 = self._transient_fixture() u1.addresses.append(a1) self._assert_history(u1, ([a1], [], []) ) def test_append_persistent(self): u1, a1, s = self._persistent_fixture() u1.addresses.append(a1) self._assert_history(u1, ([a1], [], []) ) def test_remove_transient(self): u1, a1 = self._transient_fixture() u1.addresses.append(a1) u1.addresses.remove(a1) self._assert_history(u1, ([], [], []) ) def test_backref_pop_transient(self): u1, a1 = self._transient_fixture(addresses_args={"backref": "user"}) u1.addresses.append(a1) self._assert_history(u1, ([a1], [], []), ) a1.user = None # removed from added self._assert_history(u1, ([], [], []), ) def test_remove_persistent(self): u1, a1, s = self._persistent_fixture() u1.addresses.append(a1) s.flush() s.expire_all() u1.addresses.remove(a1) self._assert_history(u1, ([], [], [a1]) ) def test_backref_pop_persistent_autoflush_o2m_active_hist(self): u1, a1, s = self._persistent_fixture( addresses_args={"backref": backref("user", active_history=True)}) u1.addresses.append(a1) s.flush() s.expire_all() a1.user = None self._assert_history(u1, ([], [], [a1]), ) def test_backref_pop_persistent_autoflush_m2m(self): o1, i1, s = self._persistent_m2m_fixture( items_args={"backref": "orders"}) o1.items.append(i1) s.flush() s.expire_all() i1.orders.remove(o1) self._assert_history(o1, ([], [], [i1]), ) def test_backref_pop_persistent_noflush_m2m(self): o1, i1, s = self._persistent_m2m_fixture( items_args={"backref": "orders"}, autoflush=False) o1.items.append(i1) s.flush() s.expire_all() i1.orders.remove(o1) self._assert_history(o1, ([], [], [i1]), ) def test_unchanged_persistent(self): Address = self.classes.Address u1, a1, s = self._persistent_fixture() a2, a3 = Address(email_address='a2'), Address(email_address='a3') u1.addresses.append(a1) u1.addresses.append(a2) s.flush() u1.addresses.append(a3) u1.addresses.remove(a2) self._assert_history(u1, ([a3], [a1], [a2]), compare_passive=([a3], [], [a2]) ) def test_replace_transient(self): Address = self.classes.Address u1, a1 = self._transient_fixture() a2, a3, a4, a5 = Address(email_address='a2'), \ Address(email_address='a3'), \ Address(email_address='a4'), \ Address(email_address='a5') u1.addresses = [a1, a2] u1.addresses = [a2, a3, a4, a5] self._assert_history(u1, ([a2, a3, a4, a5], [], []) ) def test_replace_persistent_noflush(self): Address = self.classes.Address u1, a1, s = self._persistent_fixture(autoflush=False) a2, a3, a4, a5 = Address(email_address='a2'), \ Address(email_address='a3'), \ Address(email_address='a4'), \ Address(email_address='a5') u1.addresses = [a1, a2] u1.addresses = [a2, a3, a4, a5] self._assert_history(u1, ([a2, a3, a4, a5], [], []) ) def test_replace_persistent_autoflush(self): Address = self.classes.Address u1, a1, s = self._persistent_fixture(autoflush=True) a2, a3, a4, a5 = Address(email_address='a2'), \ Address(email_address='a3'), \ Address(email_address='a4'), \ Address(email_address='a5') u1.addresses = [a1, a2] u1.addresses = [a2, a3, a4, a5] self._assert_history(u1, ([a3, a4, a5], [a2], [a1]), compare_passive=([a3, a4, a5], [], [a1]) ) def test_persistent_but_readded_noflush(self): u1, a1, s = self._persistent_fixture(autoflush=False) u1.addresses.append(a1) s.flush() u1.addresses.append(a1) self._assert_history(u1, ([], [a1], []), compare_passive=([a1], [], []) ) def test_persistent_but_readded_autoflush(self): u1, a1, s = self._persistent_fixture(autoflush=True) u1.addresses.append(a1) s.flush() u1.addresses.append(a1) self._assert_history(u1, ([], [a1], []), compare_passive=([a1], [], []) ) def test_missing_but_removed_noflush(self): u1, a1, s = self._persistent_fixture(autoflush=False) u1.addresses.remove(a1) self._assert_history(u1, ([], [], []), compare_passive=([], [], [a1]) ) SQLAlchemy-0.8.4/test/orm/test_eager_relations.py0000644000076500000240000033342412251150016022563 0ustar classicstaff00000000000000"""tests of joined-eager loaded attributes""" from sqlalchemy.testing import eq_, is_, is_not_ import sqlalchemy as sa from sqlalchemy import testing from sqlalchemy.orm import joinedload, deferred, undefer, \ joinedload_all, backref, eagerload, Session, immediateload from sqlalchemy import Integer, String, Date, ForeignKey, and_, select, \ func from sqlalchemy.testing.schema import Table, Column from sqlalchemy.orm import mapper, relationship, create_session, \ lazyload, aliased, column_property from sqlalchemy.sql import operators from sqlalchemy.testing import eq_, assert_raises, \ assert_raises_message from sqlalchemy.testing.assertsql import CompiledSQL from sqlalchemy.testing import fixtures from test.orm import _fixtures from sqlalchemy.util import OrderedDict as odict import datetime class EagerTest(_fixtures.FixtureTest, testing.AssertsCompiledSQL): run_inserts = 'once' run_deletes = None def test_basic(self): users, Address, addresses, User = (self.tables.users, self.classes.Address, self.tables.addresses, self.classes.User) mapper(User, users, properties={ 'addresses':relationship(mapper(Address, addresses), lazy='joined', order_by=Address.id) }) sess = create_session() q = sess.query(User) eq_([User(id=7, addresses=[Address(id=1, email_address='jack@bean.com')])], q.filter(User.id==7).all()) eq_(self.static.user_address_result, q.order_by(User.id).all()) def test_late_compile(self): User, Address, addresses, users = (self.classes.User, self.classes.Address, self.tables.addresses, self.tables.users) m = mapper(User, users) sess = create_session() sess.query(User).all() m.add_property("addresses", relationship(mapper(Address, addresses))) sess.expunge_all() def go(): eq_( [User(id=7, addresses=[Address(id=1, email_address='jack@bean.com')])], sess.query(User).options(joinedload('addresses')).filter(User.id==7).all() ) self.assert_sql_count(testing.db, go, 1) def test_no_orphan(self): """An eagerly loaded child object is not marked as an orphan""" users, Address, addresses, User = (self.tables.users, self.classes.Address, self.tables.addresses, self.classes.User) mapper(User, users, properties={ 'addresses':relationship(Address, cascade="all,delete-orphan", lazy='joined') }) mapper(Address, addresses) sess = create_session() user = sess.query(User).get(7) assert getattr(User, 'addresses').\ hasparent(sa.orm.attributes.instance_state(user.addresses[0]), optimistic=True) assert not sa.orm.class_mapper(Address).\ _is_orphan(sa.orm.attributes.instance_state(user.addresses[0])) def test_orderby(self): users, Address, addresses, User = (self.tables.users, self.classes.Address, self.tables.addresses, self.classes.User) mapper(User, users, properties = { 'addresses':relationship(mapper(Address, addresses), lazy='joined', order_by=addresses.c.email_address), }) q = create_session().query(User) eq_([ User(id=7, addresses=[ Address(id=1) ]), User(id=8, addresses=[ Address(id=3, email_address='ed@bettyboop.com'), Address(id=4, email_address='ed@lala.com'), Address(id=2, email_address='ed@wood.com') ]), User(id=9, addresses=[ Address(id=5) ]), User(id=10, addresses=[]) ], q.order_by(User.id).all()) def test_orderby_multi(self): users, Address, addresses, User = (self.tables.users, self.classes.Address, self.tables.addresses, self.classes.User) mapper(User, users, properties = { 'addresses':relationship(mapper(Address, addresses), lazy='joined', order_by=[addresses.c.email_address, addresses.c.id]), }) q = create_session().query(User) eq_([ User(id=7, addresses=[ Address(id=1) ]), User(id=8, addresses=[ Address(id=3, email_address='ed@bettyboop.com'), Address(id=4, email_address='ed@lala.com'), Address(id=2, email_address='ed@wood.com') ]), User(id=9, addresses=[ Address(id=5) ]), User(id=10, addresses=[]) ], q.order_by(User.id).all()) def test_orderby_related(self): """A regular mapper select on a single table can order by a relationship to a second table""" Address, addresses, users, User = (self.classes.Address, self.tables.addresses, self.tables.users, self.classes.User) mapper(Address, addresses) mapper(User, users, properties = dict( addresses = relationship(Address, lazy='joined', order_by=addresses.c.id), )) q = create_session().query(User) l = q.filter(User.id==Address.user_id).order_by(Address.email_address).all() eq_([ User(id=8, addresses=[ Address(id=2, email_address='ed@wood.com'), Address(id=3, email_address='ed@bettyboop.com'), Address(id=4, email_address='ed@lala.com'), ]), User(id=9, addresses=[ Address(id=5) ]), User(id=7, addresses=[ Address(id=1) ]), ], l) def test_orderby_desc(self): Address, addresses, users, User = (self.classes.Address, self.tables.addresses, self.tables.users, self.classes.User) mapper(Address, addresses) mapper(User, users, properties = dict( addresses = relationship(Address, lazy='joined', order_by=[sa.desc(addresses.c.email_address)]), )) sess = create_session() eq_([ User(id=7, addresses=[ Address(id=1) ]), User(id=8, addresses=[ Address(id=2, email_address='ed@wood.com'), Address(id=4, email_address='ed@lala.com'), Address(id=3, email_address='ed@bettyboop.com'), ]), User(id=9, addresses=[ Address(id=5) ]), User(id=10, addresses=[]) ], sess.query(User).order_by(User.id).all()) def test_deferred_fk_col(self): users, Dingaling, User, dingalings, Address, addresses = (self.tables.users, self.classes.Dingaling, self.classes.User, self.tables.dingalings, self.classes.Address, self.tables.addresses) mapper(Address, addresses, properties={ 'user_id':deferred(addresses.c.user_id), 'user':relationship(User, lazy='joined') }) mapper(User, users) sess = create_session() for q in [ sess.query(Address).filter(Address.id.in_([1, 4, 5])).order_by(Address.id), sess.query(Address).filter(Address.id.in_([1, 4, 5])).order_by(Address.id).limit(3) ]: sess.expunge_all() eq_(q.all(), [Address(id=1, user=User(id=7)), Address(id=4, user=User(id=8)), Address(id=5, user=User(id=9))] ) sess.expunge_all() a = sess.query(Address).filter(Address.id==1).all()[0] def go(): eq_(a.user_id, 7) # assert that the eager loader added 'user_id' to the row and deferred # loading of that col was disabled self.assert_sql_count(testing.db, go, 0) sess.expunge_all() a = sess.query(Address).filter(Address.id==1).first() def go(): eq_(a.user_id, 7) # assert that the eager loader added 'user_id' to the row and deferred # loading of that col was disabled self.assert_sql_count(testing.db, go, 0) # do the mapping in reverse # (we would have just used an "addresses" backref but the test # fixtures then require the whole backref to be set up, lazy loaders # trigger, etc.) sa.orm.clear_mappers() mapper(Address, addresses, properties={ 'user_id':deferred(addresses.c.user_id), }) mapper(User, users, properties={ 'addresses':relationship(Address, lazy='joined')}) for q in [ sess.query(User).filter(User.id==7), sess.query(User).filter(User.id==7).limit(1) ]: sess.expunge_all() eq_(q.all(), [User(id=7, addresses=[Address(id=1)])] ) sess.expunge_all() u = sess.query(User).get(7) def go(): eq_(u.addresses[0].user_id, 7) # assert that the eager loader didn't have to affect 'user_id' here # and that its still deferred self.assert_sql_count(testing.db, go, 1) sa.orm.clear_mappers() mapper(User, users, properties={ 'addresses':relationship(Address, lazy='joined', order_by=addresses.c.id)}) mapper(Address, addresses, properties={ 'user_id':deferred(addresses.c.user_id), 'dingalings':relationship(Dingaling, lazy='joined')}) mapper(Dingaling, dingalings, properties={ 'address_id':deferred(dingalings.c.address_id)}) sess.expunge_all() def go(): u = sess.query(User).get(8) eq_(User(id=8, addresses=[Address(id=2, dingalings=[Dingaling(id=1)]), Address(id=3), Address(id=4)]), u) self.assert_sql_count(testing.db, go, 1) def test_options_pathing(self): users, Keyword, orders, items, order_items, Order, Item, User, keywords, item_keywords = (self.tables.users, self.classes.Keyword, self.tables.orders, self.tables.items, self.tables.order_items, self.classes.Order, self.classes.Item, self.classes.User, self.tables.keywords, self.tables.item_keywords) mapper(User, users, properties={ 'orders':relationship(Order, order_by=orders.c.id), # o2m, m2o }) mapper(Order, orders, properties={ 'items':relationship(Item, secondary=order_items, order_by=items.c.id), #m2m }) mapper(Item, items, properties={ 'keywords':relationship(Keyword, secondary=item_keywords, order_by=keywords.c.id) #m2m }) mapper(Keyword, keywords) for opt, count in [ (( joinedload(User.orders, Order.items), ), 10), ((joinedload("orders.items"), ), 10), (( joinedload(User.orders, ), joinedload(User.orders, Order.items), joinedload(User.orders, Order.items, Item.keywords), ), 1), (( joinedload(User.orders, Order.items, Item.keywords), ), 10), (( joinedload(User.orders, Order.items), joinedload(User.orders, Order.items, Item.keywords), ), 5), ]: sess = create_session() def go(): eq_( sess.query(User).options(*opt).order_by(User.id).all(), self.static.user_item_keyword_result ) self.assert_sql_count(testing.db, go, count) def test_disable_dynamic(self): """test no joined option on a dynamic.""" users, Address, addresses, User = (self.tables.users, self.classes.Address, self.tables.addresses, self.classes.User) mapper(User, users, properties={ 'addresses':relationship(Address, lazy="dynamic") }) mapper(Address, addresses) sess = create_session() assert_raises_message( sa.exc.InvalidRequestError, "User.addresses' does not support object population - eager loading cannot be applied.", sess.query(User).options(joinedload(User.addresses)).first, ) def test_many_to_many(self): keywords, items, item_keywords, Keyword, Item = (self.tables.keywords, self.tables.items, self.tables.item_keywords, self.classes.Keyword, self.classes.Item) mapper(Keyword, keywords) mapper(Item, items, properties = dict( keywords = relationship(Keyword, secondary=item_keywords, lazy='joined', order_by=keywords.c.id))) q = create_session().query(Item).order_by(Item.id) def go(): eq_(self.static.item_keyword_result, q.all()) self.assert_sql_count(testing.db, go, 1) def go(): eq_(self.static.item_keyword_result[0:2], q.join('keywords').filter(Keyword.name == 'red').all()) self.assert_sql_count(testing.db, go, 1) def go(): eq_(self.static.item_keyword_result[0:2], (q.join('keywords', aliased=True). filter(Keyword.name == 'red')).all()) self.assert_sql_count(testing.db, go, 1) def test_eager_option(self): keywords, items, item_keywords, Keyword, Item = (self.tables.keywords, self.tables.items, self.tables.item_keywords, self.classes.Keyword, self.classes.Item) mapper(Keyword, keywords) mapper(Item, items, properties = dict( keywords = relationship(Keyword, secondary=item_keywords, lazy='select', order_by=keywords.c.id))) q = create_session().query(Item) def go(): eq_(self.static.item_keyword_result[0:2], (q.options(joinedload('keywords')). join('keywords').filter(keywords.c.name == 'red')).order_by(Item.id).all()) self.assert_sql_count(testing.db, go, 1) def test_cyclical(self): """A circular eager relationship breaks the cycle with a lazy loader""" Address, addresses, users, User = (self.classes.Address, self.tables.addresses, self.tables.users, self.classes.User) mapper(Address, addresses) mapper(User, users, properties = dict( addresses = relationship(Address, lazy='joined', backref=sa.orm.backref('user', lazy='joined'), order_by=Address.id) )) eq_(sa.orm.class_mapper(User).get_property('addresses').lazy, 'joined') eq_(sa.orm.class_mapper(Address).get_property('user').lazy, 'joined') sess = create_session() eq_(self.static.user_address_result, sess.query(User).order_by(User.id).all()) def test_double(self): """Eager loading with two relationships simultaneously, from the same table, using aliases.""" users, orders, User, Address, Order, addresses = (self.tables.users, self.tables.orders, self.classes.User, self.classes.Address, self.classes.Order, self.tables.addresses) openorders = sa.alias(orders, 'openorders') closedorders = sa.alias(orders, 'closedorders') mapper(Address, addresses) mapper(Order, orders) open_mapper = mapper(Order, openorders, non_primary=True) closed_mapper = mapper(Order, closedorders, non_primary=True) mapper(User, users, properties = dict( addresses = relationship(Address, lazy='joined', order_by=addresses.c.id), open_orders = relationship( open_mapper, primaryjoin=sa.and_(openorders.c.isopen == 1, users.c.id==openorders.c.user_id), lazy='joined', order_by=openorders.c.id), closed_orders = relationship( closed_mapper, primaryjoin=sa.and_(closedorders.c.isopen == 0, users.c.id==closedorders.c.user_id), lazy='joined', order_by=closedorders.c.id))) q = create_session().query(User).order_by(User.id) def go(): eq_([ User( id=7, addresses=[Address(id=1)], open_orders = [Order(id=3)], closed_orders = [Order(id=1), Order(id=5)] ), User( id=8, addresses=[Address(id=2), Address(id=3), Address(id=4)], open_orders = [], closed_orders = [] ), User( id=9, addresses=[Address(id=5)], open_orders = [Order(id=4)], closed_orders = [Order(id=2)] ), User(id=10) ], q.all()) self.assert_sql_count(testing.db, go, 1) def test_double_same_mappers(self): """Eager loading with two relationships simulatneously, from the same table, using aliases.""" addresses, items, order_items, orders, Item, User, Address, Order, users = (self.tables.addresses, self.tables.items, self.tables.order_items, self.tables.orders, self.classes.Item, self.classes.User, self.classes.Address, self.classes.Order, self.tables.users) mapper(Address, addresses) mapper(Order, orders, properties={ 'items': relationship(Item, secondary=order_items, lazy='joined', order_by=items.c.id)}) mapper(Item, items) mapper(User, users, properties=dict( addresses=relationship(Address, lazy='joined', order_by=addresses.c.id), open_orders=relationship( Order, primaryjoin=sa.and_(orders.c.isopen == 1, users.c.id==orders.c.user_id), lazy='joined', order_by=orders.c.id), closed_orders=relationship( Order, primaryjoin=sa.and_(orders.c.isopen == 0, users.c.id==orders.c.user_id), lazy='joined', order_by=orders.c.id))) q = create_session().query(User).order_by(User.id) def go(): eq_([ User(id=7, addresses=[ Address(id=1)], open_orders=[Order(id=3, items=[ Item(id=3), Item(id=4), Item(id=5)])], closed_orders=[Order(id=1, items=[ Item(id=1), Item(id=2), Item(id=3)]), Order(id=5, items=[ Item(id=5)])]), User(id=8, addresses=[ Address(id=2), Address(id=3), Address(id=4)], open_orders = [], closed_orders = []), User(id=9, addresses=[ Address(id=5)], open_orders=[ Order(id=4, items=[ Item(id=1), Item(id=5)])], closed_orders=[ Order(id=2, items=[ Item(id=1), Item(id=2), Item(id=3)])]), User(id=10) ], q.all()) self.assert_sql_count(testing.db, go, 1) def test_no_false_hits(self): """Eager loaders don't interpret main table columns as part of their eager load.""" addresses, orders, User, Address, Order, users = (self.tables.addresses, self.tables.orders, self.classes.User, self.classes.Address, self.classes.Order, self.tables.users) mapper(User, users, properties={ 'addresses':relationship(Address, lazy='joined'), 'orders':relationship(Order, lazy='joined') }) mapper(Address, addresses) mapper(Order, orders) allusers = create_session().query(User).all() # using a textual select, the columns will be 'id' and 'name'. the # eager loaders have aliases which should not hit on those columns, # they should be required to locate only their aliased/fully table # qualified column name. noeagers = create_session().query(User).\ from_statement("select * from users").all() assert 'orders' not in noeagers[0].__dict__ assert 'addresses' not in noeagers[0].__dict__ @testing.fails_on('maxdb', 'FIXME: unknown') def test_limit(self): """Limit operations combined with lazy-load relationships.""" users, items, order_items, orders, Item, User, Address, Order, addresses = (self.tables.users, self.tables.items, self.tables.order_items, self.tables.orders, self.classes.Item, self.classes.User, self.classes.Address, self.classes.Order, self.tables.addresses) mapper(Item, items) mapper(Order, orders, properties={ 'items':relationship(Item, secondary=order_items, lazy='joined', order_by=items.c.id) }) mapper(User, users, properties={ 'addresses':relationship(mapper(Address, addresses), lazy='joined', order_by=addresses.c.id), 'orders':relationship(Order, lazy='select', order_by=orders.c.id) }) sess = create_session() q = sess.query(User) l = q.order_by(User.id).limit(2).offset(1).all() eq_(self.static.user_all_result[1:3], l) def test_distinct(self): Address, addresses, users, User = (self.classes.Address, self.tables.addresses, self.tables.users, self.classes.User) # this is an involved 3x union of the users table to get a lot of rows. # then see if the "distinct" works its way out. you actually get the same # result with or without the distinct, just via less or more rows. u2 = users.alias('u2') s = sa.union_all(u2.select(use_labels=True), u2.select(use_labels=True), u2.select(use_labels=True)).alias('u') mapper(User, users, properties={ 'addresses':relationship(mapper(Address, addresses), lazy='joined', order_by=addresses.c.id), }) sess = create_session() q = sess.query(User) def go(): l = q.filter(s.c.u2_id==User.id).distinct().order_by(User.id).all() eq_(self.static.user_address_result, l) self.assert_sql_count(testing.db, go, 1) @testing.fails_on('maxdb', 'FIXME: unknown') def test_limit_2(self): keywords, items, item_keywords, Keyword, Item = (self.tables.keywords, self.tables.items, self.tables.item_keywords, self.classes.Keyword, self.classes.Item) mapper(Keyword, keywords) mapper(Item, items, properties = dict( keywords = relationship(Keyword, secondary=item_keywords, lazy='joined', order_by=[keywords.c.id]), )) sess = create_session() q = sess.query(Item) l = q.filter((Item.description=='item 2') | (Item.description=='item 5') | (Item.description=='item 3')).\ order_by(Item.id).limit(2).all() eq_(self.static.item_keyword_result[1:3], l) @testing.fails_on('maxdb', 'FIXME: unknown') def test_limit_3(self): """test that the ORDER BY is propagated from the inner select to the outer select, when using the 'wrapped' select statement resulting from the combination of eager loading and limit/offset clauses.""" addresses, items, order_items, orders, Item, User, Address, Order, users = (self.tables.addresses, self.tables.items, self.tables.order_items, self.tables.orders, self.classes.Item, self.classes.User, self.classes.Address, self.classes.Order, self.tables.users) mapper(Item, items) mapper(Order, orders, properties = dict( items = relationship(Item, secondary=order_items, lazy='joined') )) mapper(Address, addresses) mapper(User, users, properties = dict( addresses = relationship(Address, lazy='joined', order_by=addresses.c.id), orders = relationship(Order, lazy='joined', order_by=orders.c.id), )) sess = create_session() q = sess.query(User) if not testing.against('maxdb', 'mssql'): l = q.join('orders').order_by(Order.user_id.desc()).limit(2).offset(1) eq_([ User(id=9, orders=[Order(id=2), Order(id=4)], addresses=[Address(id=5)] ), User(id=7, orders=[Order(id=1), Order(id=3), Order(id=5)], addresses=[Address(id=1)] ) ], l.all()) l = q.join('addresses').order_by(Address.email_address.desc()).limit(1).offset(0) eq_([ User(id=7, orders=[Order(id=1), Order(id=3), Order(id=5)], addresses=[Address(id=1)] ) ], l.all()) def test_limit_4(self): User, Order, addresses, users, orders = (self.classes.User, self.classes.Order, self.tables.addresses, self.tables.users, self.tables.orders) # tests the LIMIT/OFFSET aliasing on a mapper # against a select. original issue from ticket #904 sel = sa.select([users, addresses.c.email_address], users.c.id==addresses.c.user_id).alias('useralias') mapper(User, sel, properties={ 'orders':relationship(Order, primaryjoin=sel.c.id==orders.c.user_id, lazy='joined', order_by=orders.c.id) }) mapper(Order, orders) sess = create_session() eq_(sess.query(User).first(), User(name=u'jack',orders=[ Order(address_id=1,description=u'order 1',isopen=0,user_id=7,id=1), Order(address_id=1,description=u'order 3',isopen=1,user_id=7,id=3), Order(address_id=None,description=u'order 5',isopen=0,user_id=7,id=5)], email_address=u'jack@bean.com',id=7) ) def test_useget_cancels_eager(self): """test that a one to many lazyload cancels the unnecessary eager many-to-one join on the other side.""" users, Address, addresses, User = (self.tables.users, self.classes.Address, self.tables.addresses, self.classes.User) mapper(User, users) mapper(Address, addresses, properties={ 'user':relationship(User, lazy='joined', backref='addresses') }) sess = create_session() u1 = sess.query(User).filter(User.id==8).one() def go(): eq_(u1.addresses[0].user, u1) self.assert_sql_execution(testing.db, go, CompiledSQL( "SELECT addresses.id AS addresses_id, addresses.user_id AS " "addresses_user_id, addresses.email_address AS " "addresses_email_address FROM addresses WHERE :param_1 = " "addresses.user_id", {'param_1': 8}) ) def test_manytoone_limit(self): """test that the subquery wrapping only occurs with limit/offset and m2m or o2m joins present.""" users, items, order_items, Order, Item, User, Address, orders, addresses = (self.tables.users, self.tables.items, self.tables.order_items, self.classes.Order, self.classes.Item, self.classes.User, self.classes.Address, self.tables.orders, self.tables.addresses) mapper(User, users, properties=odict( orders=relationship(Order, backref='user') )) mapper(Order, orders, properties=odict([ ('items', relationship(Item, secondary=order_items, backref='orders')), ('address', relationship(Address)) ])) mapper(Address, addresses) mapper(Item, items) sess = create_session() self.assert_compile( sess.query(User).options(joinedload(User.orders)).limit(10), "SELECT anon_1.users_id AS anon_1_users_id, anon_1.users_name AS anon_1_users_name, " "orders_1.id AS orders_1_id, orders_1.user_id AS orders_1_user_id, orders_1.address_id AS " "orders_1_address_id, orders_1.description AS orders_1_description, orders_1.isopen AS orders_1_isopen " "FROM (SELECT users.id AS users_id, users.name AS users_name " "FROM users " "LIMIT :param_1) AS anon_1 LEFT OUTER JOIN orders AS orders_1 ON anon_1.users_id = orders_1.user_id", {'param_1':10}, use_default_dialect=True ) self.assert_compile( sess.query(Order).options(joinedload(Order.user)).limit(10), "SELECT orders.id AS orders_id, orders.user_id AS orders_user_id, orders.address_id AS " "orders_address_id, orders.description AS orders_description, orders.isopen AS orders_isopen, " "users_1.id AS users_1_id, users_1.name AS users_1_name FROM orders LEFT OUTER JOIN users AS " "users_1 ON users_1.id = orders.user_id LIMIT :param_1", {'param_1':10}, use_default_dialect=True ) self.assert_compile( sess.query(Order).options(joinedload(Order.user, innerjoin=True)).limit(10), "SELECT orders.id AS orders_id, orders.user_id AS orders_user_id, orders.address_id AS " "orders_address_id, orders.description AS orders_description, orders.isopen AS orders_isopen, " "users_1.id AS users_1_id, users_1.name AS users_1_name FROM orders JOIN users AS " "users_1 ON users_1.id = orders.user_id LIMIT :param_1", {'param_1':10}, use_default_dialect=True ) self.assert_compile( sess.query(User).options(joinedload_all("orders.address")).limit(10), "SELECT anon_1.users_id AS anon_1_users_id, anon_1.users_name AS anon_1_users_name, " "addresses_1.id AS addresses_1_id, addresses_1.user_id AS addresses_1_user_id, " "addresses_1.email_address AS addresses_1_email_address, orders_1.id AS orders_1_id, " "orders_1.user_id AS orders_1_user_id, orders_1.address_id AS orders_1_address_id, " "orders_1.description AS orders_1_description, orders_1.isopen AS orders_1_isopen FROM " "(SELECT users.id AS users_id, users.name AS users_name FROM users LIMIT :param_1) AS anon_1 " "LEFT OUTER JOIN orders AS orders_1 ON anon_1.users_id = orders_1.user_id LEFT OUTER JOIN " "addresses AS addresses_1 ON addresses_1.id = orders_1.address_id", {'param_1':10}, use_default_dialect=True ) self.assert_compile( sess.query(User).options(joinedload_all("orders.items"), joinedload("orders.address")), "SELECT users.id AS users_id, users.name AS users_name, items_1.id AS items_1_id, " "items_1.description AS items_1_description, addresses_1.id AS addresses_1_id, " "addresses_1.user_id AS addresses_1_user_id, addresses_1.email_address AS " "addresses_1_email_address, orders_1.id AS orders_1_id, orders_1.user_id AS " "orders_1_user_id, orders_1.address_id AS orders_1_address_id, orders_1.description " "AS orders_1_description, orders_1.isopen AS orders_1_isopen FROM users LEFT OUTER JOIN " "orders AS orders_1 ON users.id = orders_1.user_id LEFT OUTER JOIN order_items AS " "order_items_1 ON orders_1.id = order_items_1.order_id LEFT OUTER JOIN items AS " "items_1 ON items_1.id = order_items_1.item_id LEFT OUTER JOIN addresses AS " "addresses_1 ON addresses_1.id = orders_1.address_id" ,use_default_dialect=True ) self.assert_compile( sess.query(User).options(joinedload("orders"), joinedload("orders.address", innerjoin=True)).limit(10), "SELECT anon_1.users_id AS anon_1_users_id, anon_1.users_name AS anon_1_users_name, " "addresses_1.id AS addresses_1_id, addresses_1.user_id AS addresses_1_user_id, " "addresses_1.email_address AS addresses_1_email_address, orders_1.id AS orders_1_id, " "orders_1.user_id AS orders_1_user_id, orders_1.address_id AS orders_1_address_id, " "orders_1.description AS orders_1_description, orders_1.isopen AS orders_1_isopen " "FROM (SELECT users.id AS users_id, users.name AS users_name " "FROM users " "LIMIT :param_1) AS anon_1 LEFT OUTER JOIN orders AS orders_1 ON anon_1.users_id = " "orders_1.user_id LEFT OUTER JOIN addresses AS addresses_1 ON addresses_1.id = orders_1.address_id", {'param_1':10}, use_default_dialect=True ) self.assert_compile( sess.query(User).options(joinedload("orders", innerjoin=True), joinedload("orders.address", innerjoin=True)).limit(10), "SELECT anon_1.users_id AS anon_1_users_id, anon_1.users_name AS anon_1_users_name, " "addresses_1.id AS addresses_1_id, addresses_1.user_id AS addresses_1_user_id, " "addresses_1.email_address AS addresses_1_email_address, orders_1.id AS orders_1_id, " "orders_1.user_id AS orders_1_user_id, orders_1.address_id AS orders_1_address_id, " "orders_1.description AS orders_1_description, orders_1.isopen AS orders_1_isopen " "FROM (SELECT users.id AS users_id, users.name AS users_name " "FROM users " "LIMIT :param_1) AS anon_1 JOIN orders AS orders_1 ON anon_1.users_id = " "orders_1.user_id JOIN addresses AS addresses_1 ON addresses_1.id = orders_1.address_id", {'param_1':10}, use_default_dialect=True ) def test_one_to_many_scalar(self): Address, addresses, users, User = (self.classes.Address, self.tables.addresses, self.tables.users, self.classes.User) mapper(User, users, properties = dict( address = relationship(mapper(Address, addresses), lazy='joined', uselist=False) )) q = create_session().query(User) def go(): l = q.filter(users.c.id == 7).all() eq_([User(id=7, address=Address(id=1))], l) self.assert_sql_count(testing.db, go, 1) @testing.fails_on('maxdb', 'FIXME: unknown') def test_many_to_one(self): users, Address, addresses, User = (self.tables.users, self.classes.Address, self.tables.addresses, self.classes.User) mapper(Address, addresses, properties = dict( user = relationship(mapper(User, users), lazy='joined') )) sess = create_session() q = sess.query(Address) def go(): a = q.filter(addresses.c.id==1).one() is_not_(a.user, None) u1 = sess.query(User).get(7) is_(a.user, u1) self.assert_sql_count(testing.db, go, 1) def test_many_to_one_null(self): """test that a many-to-one eager load which loads None does not later trigger a lazy load. """ Order, Address, addresses, orders = (self.classes.Order, self.classes.Address, self.tables.addresses, self.tables.orders) # use a primaryjoin intended to defeat SA's usage of # query.get() for a many-to-one lazyload mapper(Order, orders, properties = dict( address = relationship(mapper(Address, addresses), primaryjoin=and_( addresses.c.id==orders.c.address_id, addresses.c.email_address != None ), lazy='joined') )) sess = create_session() def go(): o1 = sess.query(Order).options(lazyload('address')).filter(Order.id==5).one() eq_(o1.address, None) self.assert_sql_count(testing.db, go, 2) sess.expunge_all() def go(): o1 = sess.query(Order).filter(Order.id==5).one() eq_(o1.address, None) self.assert_sql_count(testing.db, go, 1) def test_one_and_many(self): """tests eager load for a parent object with a child object that contains a many-to-many relationship to a third object.""" users, items, order_items, orders, Item, User, Order = (self.tables.users, self.tables.items, self.tables.order_items, self.tables.orders, self.classes.Item, self.classes.User, self.classes.Order) mapper(User, users, properties={ 'orders':relationship(Order, lazy='joined', order_by=orders.c.id) }) mapper(Item, items) mapper(Order, orders, properties = dict( items = relationship(Item, secondary=order_items, lazy='joined', order_by=items.c.id) )) q = create_session().query(User) l = q.filter("users.id in (7, 8, 9)").order_by("users.id") def go(): eq_(self.static.user_order_result[0:3], l.all()) self.assert_sql_count(testing.db, go, 1) def test_double_with_aggregate(self): User, users, orders, Order = (self.classes.User, self.tables.users, self.tables.orders, self.classes.Order) max_orders_by_user = sa.select([sa.func.max(orders.c.id).label('order_id')], group_by=[orders.c.user_id] ).alias('max_orders_by_user') max_orders = orders.select(orders.c.id==max_orders_by_user.c.order_id).\ alias('max_orders') mapper(Order, orders) mapper(User, users, properties={ 'orders':relationship(Order, backref='user', lazy='joined', order_by=orders.c.id), 'max_order':relationship( mapper(Order, max_orders, non_primary=True), lazy='joined', uselist=False) }) q = create_session().query(User) def go(): eq_([ User(id=7, orders=[ Order(id=1), Order(id=3), Order(id=5), ], max_order=Order(id=5) ), User(id=8, orders=[]), User(id=9, orders=[Order(id=2),Order(id=4)], max_order=Order(id=4) ), User(id=10), ], q.order_by(User.id).all()) self.assert_sql_count(testing.db, go, 1) def test_uselist_false_warning(self): """test that multiple rows received by a uselist=False raises a warning.""" User, users, orders, Order = (self.classes.User, self.tables.users, self.tables.orders, self.classes.Order) mapper(User, users, properties={ 'order':relationship(Order, uselist=False) }) mapper(Order, orders) s = create_session() assert_raises(sa.exc.SAWarning, s.query(User).options(joinedload(User.order)).all) def test_wide(self): users, items, order_items, Order, Item, User, Address, orders, addresses = (self.tables.users, self.tables.items, self.tables.order_items, self.classes.Order, self.classes.Item, self.classes.User, self.classes.Address, self.tables.orders, self.tables.addresses) mapper(Order, orders, properties={'items':relationship(Item, secondary=order_items, lazy='joined', order_by=items.c.id)}) mapper(Item, items) mapper(User, users, properties = dict( addresses = relationship(mapper(Address, addresses), lazy = False, order_by=addresses.c.id), orders = relationship(Order, lazy = False, order_by=orders.c.id), )) q = create_session().query(User) l = q.all() eq_(self.static.user_all_result, q.order_by(User.id).all()) def test_against_select(self): """test eager loading of a mapper which is against a select""" users, items, order_items, orders, Item, User, Order = (self.tables.users, self.tables.items, self.tables.order_items, self.tables.orders, self.classes.Item, self.classes.User, self.classes.Order) s = sa.select([orders], orders.c.isopen==1).alias('openorders') mapper(Order, s, properties={ 'user':relationship(User, lazy='joined') }) mapper(User, users) mapper(Item, items) q = create_session().query(Order) eq_([ Order(id=3, user=User(id=7)), Order(id=4, user=User(id=9)) ], q.all()) q = q.select_from(s.join(order_items).join(items)).filter(~Item.id.in_([1, 2, 5])) eq_([ Order(id=3, user=User(id=7)), ], q.all()) def test_aliasing(self): """test that eager loading uses aliases to insulate the eager load from regular criterion against those tables.""" Address, addresses, users, User = (self.classes.Address, self.tables.addresses, self.tables.users, self.classes.User) mapper(User, users, properties = dict( addresses = relationship(mapper(Address, addresses), lazy='joined', order_by=addresses.c.id) )) q = create_session().query(User) l = q.filter(addresses.c.email_address == 'ed@lala.com').filter( Address.user_id==User.id).order_by(User.id) eq_(self.static.user_address_result[1:2], l.all()) def test_inner_join(self): Address, addresses, users, User = (self.classes.Address, self.tables.addresses, self.tables.users, self.classes.User) mapper(User, users, properties = dict( addresses = relationship(mapper(Address, addresses), lazy='joined', innerjoin=True, order_by=addresses.c.id) )) sess = create_session() eq_( [User(id=7, addresses=[ Address(id=1) ]), User(id=8, addresses=[ Address(id=2, email_address='ed@wood.com'), Address(id=3, email_address='ed@bettyboop.com'), Address(id=4, email_address='ed@lala.com'), ]), User(id=9, addresses=[ Address(id=5) ])] ,sess.query(User).all() ) self.assert_compile(sess.query(User), "SELECT users.id AS users_id, users.name AS users_name, " "addresses_1.id AS addresses_1_id, addresses_1.user_id AS addresses_1_user_id, " "addresses_1.email_address AS addresses_1_email_address FROM users JOIN " "addresses AS addresses_1 ON users.id = addresses_1.user_id ORDER BY addresses_1.id" , use_default_dialect=True) def test_inner_join_chaining_options(self): users, items, order_items, Order, Item, User, orders = (self.tables.users, self.tables.items, self.tables.order_items, self.classes.Order, self.classes.Item, self.classes.User, self.tables.orders) mapper(User, users, properties = dict( orders =relationship(Order, innerjoin=True, lazy=False) )) mapper(Order, orders, properties=dict( items=relationship(Item, secondary=order_items, lazy=False, innerjoin=True) )) mapper(Item, items) sess = create_session() self.assert_compile( sess.query(User), "SELECT users.id AS users_id, users.name AS users_name, items_1.id AS " "items_1_id, items_1.description AS items_1_description, orders_1.id AS " "orders_1_id, orders_1.user_id AS orders_1_user_id, orders_1.address_id AS " "orders_1_address_id, orders_1.description AS orders_1_description, " "orders_1.isopen AS orders_1_isopen FROM users JOIN orders AS orders_1 ON " "users.id = orders_1.user_id JOIN order_items AS order_items_1 ON orders_1.id = " "order_items_1.order_id JOIN items AS items_1 ON items_1.id = " "order_items_1.item_id", use_default_dialect=True ) self.assert_compile( sess.query(User).options(joinedload(User.orders, innerjoin=False)), "SELECT users.id AS users_id, users.name AS users_name, items_1.id AS " "items_1_id, items_1.description AS items_1_description, orders_1.id AS " "orders_1_id, orders_1.user_id AS orders_1_user_id, orders_1.address_id AS " "orders_1_address_id, orders_1.description AS orders_1_description, " "orders_1.isopen AS orders_1_isopen FROM users LEFT OUTER JOIN orders AS orders_1 ON " "users.id = orders_1.user_id LEFT OUTER JOIN order_items AS order_items_1 ON orders_1.id = " "order_items_1.order_id LEFT OUTER JOIN items AS items_1 ON items_1.id = " "order_items_1.item_id", use_default_dialect=True ) self.assert_compile( sess.query(User).options(joinedload(User.orders, Order.items, innerjoin=False)), "SELECT users.id AS users_id, users.name AS users_name, items_1.id AS " "items_1_id, items_1.description AS items_1_description, orders_1.id AS " "orders_1_id, orders_1.user_id AS orders_1_user_id, orders_1.address_id AS " "orders_1_address_id, orders_1.description AS orders_1_description, " "orders_1.isopen AS orders_1_isopen FROM users JOIN orders AS orders_1 ON " "users.id = orders_1.user_id LEFT OUTER JOIN order_items AS order_items_1 ON orders_1.id = " "order_items_1.order_id LEFT OUTER JOIN items AS items_1 ON items_1.id = " "order_items_1.item_id", use_default_dialect=True ) def test_inner_join_chaining_fixed(self): users, items, order_items, Order, Item, User, orders = (self.tables.users, self.tables.items, self.tables.order_items, self.classes.Order, self.classes.Item, self.classes.User, self.tables.orders) mapper(User, users, properties = dict( orders =relationship(Order, lazy=False) )) mapper(Order, orders, properties=dict( items=relationship(Item, secondary=order_items, lazy=False, innerjoin=True) )) mapper(Item, items) sess = create_session() # joining from user, its all LEFT OUTER JOINs self.assert_compile( sess.query(User), "SELECT users.id AS users_id, users.name AS users_name, items_1.id AS " "items_1_id, items_1.description AS items_1_description, orders_1.id AS " "orders_1_id, orders_1.user_id AS orders_1_user_id, orders_1.address_id AS " "orders_1_address_id, orders_1.description AS orders_1_description, " "orders_1.isopen AS orders_1_isopen FROM users LEFT OUTER JOIN orders AS orders_1 ON " "users.id = orders_1.user_id LEFT OUTER JOIN order_items AS order_items_1 ON orders_1.id = " "order_items_1.order_id LEFT OUTER JOIN items AS items_1 ON items_1.id = " "order_items_1.item_id", use_default_dialect=True ) # joining just from Order, innerjoin=True can be respected self.assert_compile( sess.query(Order), "SELECT orders.id AS orders_id, orders.user_id AS orders_user_id, " "orders.address_id AS orders_address_id, orders.description AS " "orders_description, orders.isopen AS orders_isopen, items_1.id " "AS items_1_id, items_1.description AS items_1_description FROM " "orders JOIN order_items AS order_items_1 ON orders.id = " "order_items_1.order_id JOIN items AS items_1 ON items_1.id = " "order_items_1.item_id", use_default_dialect=True ) def test_inner_join_options(self): users, items, order_items, Order, Item, User, orders = (self.tables.users, self.tables.items, self.tables.order_items, self.classes.Order, self.classes.Item, self.classes.User, self.tables.orders) mapper(User, users, properties = dict( orders =relationship(Order, backref=backref('user', innerjoin=True), order_by=orders.c.id) )) mapper(Order, orders, properties=dict( items=relationship(Item, secondary=order_items, order_by=items.c.id) )) mapper(Item, items) sess = create_session() self.assert_compile(sess.query(User).options(joinedload(User.orders, innerjoin=True)), "SELECT users.id AS users_id, users.name AS users_name, orders_1.id AS orders_1_id, " "orders_1.user_id AS orders_1_user_id, orders_1.address_id AS orders_1_address_id, " "orders_1.description AS orders_1_description, orders_1.isopen AS orders_1_isopen " "FROM users JOIN orders AS orders_1 ON users.id = orders_1.user_id ORDER BY orders_1.id" , use_default_dialect=True) self.assert_compile(sess.query(User).options(joinedload_all(User.orders, Order.items, innerjoin=True)), "SELECT users.id AS users_id, users.name AS users_name, items_1.id AS items_1_id, " "items_1.description AS items_1_description, orders_1.id AS orders_1_id, " "orders_1.user_id AS orders_1_user_id, orders_1.address_id AS orders_1_address_id, " "orders_1.description AS orders_1_description, orders_1.isopen AS orders_1_isopen " "FROM users JOIN orders AS orders_1 ON users.id = orders_1.user_id JOIN order_items AS " "order_items_1 ON orders_1.id = order_items_1.order_id JOIN items AS items_1 ON " "items_1.id = order_items_1.item_id ORDER BY orders_1.id, items_1.id" , use_default_dialect=True) def go(): eq_( sess.query(User).options( joinedload(User.orders, innerjoin=True), joinedload(User.orders, Order.items, innerjoin=True)). order_by(User.id).all(), [User(id=7, orders=[ Order(id=1, items=[ Item(id=1), Item(id=2), Item(id=3)]), Order(id=3, items=[ Item(id=3), Item(id=4), Item(id=5)]), Order(id=5, items=[Item(id=5)])]), User(id=9, orders=[ Order(id=2, items=[ Item(id=1), Item(id=2), Item(id=3)]), Order(id=4, items=[ Item(id=1), Item(id=5)])]) ] ) self.assert_sql_count(testing.db, go, 1) # test that default innerjoin setting is used for options self.assert_compile( sess.query(Order).options(joinedload(Order.user)).filter(Order.description == 'foo'), "SELECT orders.id AS orders_id, orders.user_id AS orders_user_id, orders.address_id AS " "orders_address_id, orders.description AS orders_description, orders.isopen AS " "orders_isopen, users_1.id AS users_1_id, users_1.name AS users_1_name " "FROM orders JOIN users AS users_1 ON users_1.id = orders.user_id " "WHERE orders.description = :description_1", use_default_dialect=True ) class SubqueryAliasingTest(fixtures.MappedTest, testing.AssertsCompiledSQL): """test #2188""" __dialect__ = 'default' @classmethod def define_tables(cls, metadata): Table('a', metadata, Column('id', Integer, primary_key=True) ) Table('b', metadata, Column('id', Integer, primary_key=True), Column('a_id', Integer, ForeignKey('a.id')), Column('value', Integer), ) @classmethod def setup_classes(cls): class A(cls.Comparable): pass class B(cls.Comparable): pass def _fixture(self, props): A, B = self.classes.A, self.classes.B b_table, a_table = self.tables.b, self.tables.a mapper(A,a_table, properties=props) mapper(B,b_table,properties = { 'a':relationship(A, backref="bs") }) def test_column_property(self): A, B = self.classes.A, self.classes.B b_table, a_table = self.tables.b, self.tables.a cp = select([func.sum(b_table.c.value)]).\ where(b_table.c.a_id==a_table.c.id) self._fixture({ 'summation':column_property(cp) }) self.assert_compile( create_session().query(A).options(joinedload_all('bs')). order_by(A.summation). limit(50), "SELECT anon_1.anon_2 AS anon_1_anon_2, anon_1.a_id " "AS anon_1_a_id, b_1.id AS b_1_id, b_1.a_id AS " "b_1_a_id, b_1.value AS b_1_value FROM (SELECT " "(SELECT sum(b.value) AS sum_1 FROM b WHERE b.a_id = a.id) " "AS anon_2, a.id AS a_id FROM a ORDER BY (SELECT " "sum(b.value) AS sum_1 FROM b WHERE b.a_id = a.id) " "LIMIT :param_1) AS anon_1 LEFT OUTER JOIN b AS b_1 ON " "anon_1.a_id = b_1.a_id ORDER BY anon_1.anon_2" ) def test_column_property_desc(self): A, B = self.classes.A, self.classes.B b_table, a_table = self.tables.b, self.tables.a cp = select([func.sum(b_table.c.value)]).\ where(b_table.c.a_id==a_table.c.id) self._fixture({ 'summation':column_property(cp) }) self.assert_compile( create_session().query(A).options(joinedload_all('bs')). order_by(A.summation.desc()). limit(50), "SELECT anon_1.anon_2 AS anon_1_anon_2, anon_1.a_id " "AS anon_1_a_id, b_1.id AS b_1_id, b_1.a_id AS " "b_1_a_id, b_1.value AS b_1_value FROM (SELECT " "(SELECT sum(b.value) AS sum_1 FROM b WHERE b.a_id = a.id) " "AS anon_2, a.id AS a_id FROM a ORDER BY (SELECT " "sum(b.value) AS sum_1 FROM b WHERE b.a_id = a.id) DESC " "LIMIT :param_1) AS anon_1 LEFT OUTER JOIN b AS b_1 ON " "anon_1.a_id = b_1.a_id ORDER BY anon_1.anon_2 DESC" ) def test_column_property_correlated(self): A, B = self.classes.A, self.classes.B b_table, a_table = self.tables.b, self.tables.a cp = select([func.sum(b_table.c.value)]).\ where(b_table.c.a_id==a_table.c.id).\ correlate(a_table) self._fixture({ 'summation':column_property(cp) }) self.assert_compile( create_session().query(A).options(joinedload_all('bs')). order_by(A.summation). limit(50), "SELECT anon_1.anon_2 AS anon_1_anon_2, anon_1.a_id " "AS anon_1_a_id, b_1.id AS b_1_id, b_1.a_id AS " "b_1_a_id, b_1.value AS b_1_value FROM (SELECT " "(SELECT sum(b.value) AS sum_1 FROM b WHERE b.a_id = a.id) " "AS anon_2, a.id AS a_id FROM a ORDER BY (SELECT " "sum(b.value) AS sum_1 FROM b WHERE b.a_id = a.id) " "LIMIT :param_1) AS anon_1 LEFT OUTER JOIN b AS b_1 ON " "anon_1.a_id = b_1.a_id ORDER BY anon_1.anon_2" ) def test_standalone_subquery_unlabeled(self): A, B = self.classes.A, self.classes.B b_table, a_table = self.tables.b, self.tables.a self._fixture({}) cp = select([func.sum(b_table.c.value)]).\ where(b_table.c.a_id == a_table.c.id).\ correlate(a_table).as_scalar() # up until 0.8, this was ordering by a new subquery. # the removal of a separate _make_proxy() from ScalarSelect # fixed that. self.assert_compile( create_session().query(A).options(joinedload_all('bs')). order_by(cp). limit(50), "SELECT anon_1.a_id AS anon_1_a_id, anon_1.anon_2 " "AS anon_1_anon_2, b_1.id AS b_1_id, b_1.a_id AS " "b_1_a_id, b_1.value AS b_1_value FROM (SELECT a.id " "AS a_id, (SELECT sum(b.value) AS sum_1 FROM b WHERE " "b.a_id = a.id) AS anon_2 FROM a ORDER BY (SELECT " "sum(b.value) AS sum_1 FROM b WHERE b.a_id = a.id) " "LIMIT :param_1) AS anon_1 LEFT OUTER JOIN b AS b_1 " "ON anon_1.a_id = b_1.a_id ORDER BY anon_1.anon_2" ) def test_standalone_subquery_labeled(self): A, B = self.classes.A, self.classes.B b_table, a_table = self.tables.b, self.tables.a self._fixture({}) cp = select([func.sum(b_table.c.value)]).\ where(b_table.c.a_id==a_table.c.id).\ correlate(a_table).as_scalar().label('foo') self.assert_compile( create_session().query(A).options(joinedload_all('bs')). order_by(cp). limit(50), "SELECT anon_1.a_id AS anon_1_a_id, anon_1.foo " "AS anon_1_foo, b_1.id AS b_1_id, b_1.a_id AS " "b_1_a_id, b_1.value AS b_1_value FROM (SELECT a.id " "AS a_id, (SELECT sum(b.value) AS sum_1 FROM b WHERE " "b.a_id = a.id) AS foo FROM a ORDER BY (SELECT " "sum(b.value) AS sum_1 FROM b WHERE b.a_id = a.id) " "LIMIT :param_1) AS anon_1 LEFT OUTER JOIN b AS b_1 " "ON anon_1.a_id = b_1.a_id ORDER BY " "anon_1.foo" ) def test_standalone_negated(self): A, B = self.classes.A, self.classes.B b_table, a_table = self.tables.b, self.tables.a self._fixture({}) cp = select([func.sum(b_table.c.value)]).\ where(b_table.c.a_id==a_table.c.id).\ correlate(a_table).\ as_scalar() # test a different unary operator self.assert_compile( create_session().query(A).options(joinedload_all('bs')). order_by(~cp). limit(50), "SELECT anon_1.a_id AS anon_1_a_id, anon_1.anon_2 " "AS anon_1_anon_2, b_1.id AS b_1_id, b_1.a_id AS " "b_1_a_id, b_1.value AS b_1_value FROM (SELECT a.id " "AS a_id, NOT (SELECT sum(b.value) AS sum_1 FROM b " "WHERE b.a_id = a.id) FROM a ORDER BY NOT (SELECT " "sum(b.value) AS sum_1 FROM b WHERE b.a_id = a.id) " "LIMIT :param_1) AS anon_1 LEFT OUTER JOIN b AS b_1 " "ON anon_1.a_id = b_1.a_id ORDER BY anon_1.anon_2" ) class LoadOnExistingTest(_fixtures.FixtureTest): """test that loaders from a base Query fully populate.""" run_inserts = 'once' run_deletes = None def _collection_to_scalar_fixture(self): User, Address, Dingaling = self.classes.User, \ self.classes.Address, self.classes.Dingaling mapper(User, self.tables.users, properties={ 'addresses':relationship(Address), }) mapper(Address, self.tables.addresses, properties={ 'dingaling':relationship(Dingaling) }) mapper(Dingaling, self.tables.dingalings) sess = Session(autoflush=False) return User, Address, Dingaling, sess def _collection_to_collection_fixture(self): User, Order, Item = self.classes.User, \ self.classes.Order, self.classes.Item mapper(User, self.tables.users, properties={ 'orders':relationship(Order), }) mapper(Order, self.tables.orders, properties={ 'items':relationship(Item, secondary=self.tables.order_items), }) mapper(Item, self.tables.items) sess = Session(autoflush=False) return User, Order, Item, sess def _eager_config_fixture(self): User, Address = self.classes.User, self.classes.Address mapper(User, self.tables.users, properties={ 'addresses':relationship(Address, lazy="joined"), }) mapper(Address, self.tables.addresses) sess = Session(autoflush=False) return User, Address, sess def test_no_query_on_refresh(self): User, Address, sess = self._eager_config_fixture() u1 = sess.query(User).get(8) assert 'addresses' in u1.__dict__ sess.expire(u1) def go(): eq_(u1.id, 8) self.assert_sql_count(testing.db, go, 1) assert 'addresses' not in u1.__dict__ def test_loads_second_level_collection_to_scalar(self): User, Address, Dingaling, sess = self._collection_to_scalar_fixture() u1 = sess.query(User).get(8) a1 = Address() u1.addresses.append(a1) a2 = u1.addresses[0] a2.email_address = 'foo' sess.query(User).options(joinedload_all("addresses.dingaling")).\ filter_by(id=8).all() assert u1.addresses[-1] is a1 for a in u1.addresses: if a is not a1: assert 'dingaling' in a.__dict__ else: assert 'dingaling' not in a.__dict__ if a is a2: eq_(a2.email_address, 'foo') def test_loads_second_level_collection_to_collection(self): User, Order, Item, sess = self._collection_to_collection_fixture() u1 = sess.query(User).get(7) u1.orders o1 = Order() u1.orders.append(o1) sess.query(User).options(joinedload_all("orders.items")).\ filter_by(id=7).all() for o in u1.orders: if o is not o1: assert 'items' in o.__dict__ else: assert 'items' not in o.__dict__ def test_load_two_levels_collection_to_scalar(self): User, Address, Dingaling, sess = self._collection_to_scalar_fixture() u1 = sess.query(User).filter_by(id=8).options(joinedload("addresses")).one() sess.query(User).filter_by(id=8).options(joinedload_all("addresses.dingaling")).first() assert 'dingaling' in u1.addresses[0].__dict__ def test_load_two_levels_collection_to_collection(self): User, Order, Item, sess = self._collection_to_collection_fixture() u1 = sess.query(User).filter_by(id=7).options(joinedload("orders")).one() sess.query(User).filter_by(id=7).options(joinedload_all("orders.items")).first() assert 'items' in u1.orders[0].__dict__ class AddEntityTest(_fixtures.FixtureTest): run_inserts = 'once' run_deletes = None def _assert_result(self): Item, Address, Order, User = (self.classes.Item, self.classes.Address, self.classes.Order, self.classes.User) return [ ( User(id=7, addresses=[Address(id=1)] ), Order(id=1, items=[Item(id=1), Item(id=2), Item(id=3)] ), ), ( User(id=7, addresses=[Address(id=1)] ), Order(id=3, items=[Item(id=3), Item(id=4), Item(id=5)] ), ), ( User(id=7, addresses=[Address(id=1)] ), Order(id=5, items=[Item(id=5)] ), ), ( User(id=9, addresses=[Address(id=5)] ), Order(id=2, items=[Item(id=1), Item(id=2), Item(id=3)] ), ), ( User(id=9, addresses=[Address(id=5)] ), Order(id=4, items=[Item(id=1), Item(id=5)] ), ) ] def test_mapper_configured(self): users, items, order_items, Order, Item, User, Address, orders, addresses = (self.tables.users, self.tables.items, self.tables.order_items, self.classes.Order, self.classes.Item, self.classes.User, self.classes.Address, self.tables.orders, self.tables.addresses) mapper(User, users, properties={ 'addresses':relationship(Address, lazy='joined'), 'orders':relationship(Order) }) mapper(Address, addresses) mapper(Order, orders, properties={ 'items':relationship(Item, secondary=order_items, lazy='joined', order_by=items.c.id) }) mapper(Item, items) sess = create_session() oalias = sa.orm.aliased(Order) def go(): ret = sess.query(User, oalias).join(oalias, 'orders').\ order_by(User.id,oalias.id).all() eq_(ret, self._assert_result()) self.assert_sql_count(testing.db, go, 1) def test_options(self): users, items, order_items, Order, Item, User, Address, orders, addresses = (self.tables.users, self.tables.items, self.tables.order_items, self.classes.Order, self.classes.Item, self.classes.User, self.classes.Address, self.tables.orders, self.tables.addresses) mapper(User, users, properties={ 'addresses':relationship(Address), 'orders':relationship(Order) }) mapper(Address, addresses) mapper(Order, orders, properties={ 'items':relationship(Item, secondary=order_items, order_by=items.c.id) }) mapper(Item, items) sess = create_session() oalias = sa.orm.aliased(Order) def go(): ret = sess.query(User, oalias).options(joinedload('addresses')).\ join(oalias, 'orders').\ order_by(User.id, oalias.id).all() eq_(ret, self._assert_result()) self.assert_sql_count(testing.db, go, 6) sess.expunge_all() def go(): ret = sess.query(User, oalias).\ options(joinedload('addresses'), joinedload(oalias.items)).\ join(oalias, 'orders').\ order_by(User.id, oalias.id).all() eq_(ret, self._assert_result()) self.assert_sql_count(testing.db, go, 1) class OrderBySecondaryTest(fixtures.MappedTest): @classmethod def define_tables(cls, metadata): Table('m2m', metadata, Column('id', Integer, primary_key=True, test_needs_autoincrement=True), Column('aid', Integer, ForeignKey('a.id')), Column('bid', Integer, ForeignKey('b.id'))) Table('a', metadata, Column('id', Integer, primary_key=True, test_needs_autoincrement=True), Column('data', String(50))) Table('b', metadata, Column('id', Integer, primary_key=True, test_needs_autoincrement=True), Column('data', String(50))) @classmethod def fixtures(cls): return dict( a=(('id', 'data'), (1, 'a1'), (2, 'a2')), b=(('id', 'data'), (1, 'b1'), (2, 'b2'), (3, 'b3'), (4, 'b4')), m2m=(('id', 'aid', 'bid'), (2, 1, 1), (4, 2, 4), (1, 1, 3), (6, 2, 2), (3, 1, 2), (5, 2, 3))) def test_ordering(self): a, m2m, b = (self.tables.a, self.tables.m2m, self.tables.b) class A(fixtures.ComparableEntity):pass class B(fixtures.ComparableEntity):pass mapper(A, a, properties={ 'bs':relationship(B, secondary=m2m, lazy='joined', order_by=m2m.c.id) }) mapper(B, b) sess = create_session() eq_(sess.query(A).all(), [ A(data='a1', bs=[B(data='b3'), B(data='b1'), B(data='b2')]), A(bs=[B(data='b4'), B(data='b3'), B(data='b2')]) ]) class SelfReferentialEagerTest(fixtures.MappedTest): @classmethod def define_tables(cls, metadata): Table('nodes', metadata, Column('id', Integer, primary_key=True, test_needs_autoincrement=True), Column('parent_id', Integer, ForeignKey('nodes.id')), Column('data', String(30))) @testing.fails_on('maxdb', 'FIXME: unknown') def test_basic(self): nodes = self.tables.nodes class Node(fixtures.ComparableEntity): def append(self, node): self.children.append(node) mapper(Node, nodes, properties={ 'children':relationship(Node, lazy='joined', join_depth=3, order_by=nodes.c.id) }) sess = create_session() n1 = Node(data='n1') n1.append(Node(data='n11')) n1.append(Node(data='n12')) n1.append(Node(data='n13')) n1.children[1].append(Node(data='n121')) n1.children[1].append(Node(data='n122')) n1.children[1].append(Node(data='n123')) sess.add(n1) sess.flush() sess.expunge_all() def go(): d = sess.query(Node).filter_by(data='n1').all()[0] eq_(Node(data='n1', children=[ Node(data='n11'), Node(data='n12', children=[ Node(data='n121'), Node(data='n122'), Node(data='n123') ]), Node(data='n13') ]), d) self.assert_sql_count(testing.db, go, 1) sess.expunge_all() def go(): d = sess.query(Node).filter_by(data='n1').first() eq_(Node(data='n1', children=[ Node(data='n11'), Node(data='n12', children=[ Node(data='n121'), Node(data='n122'), Node(data='n123') ]), Node(data='n13') ]), d) self.assert_sql_count(testing.db, go, 1) def test_lazy_fallback_doesnt_affect_eager(self): nodes = self.tables.nodes class Node(fixtures.ComparableEntity): def append(self, node): self.children.append(node) mapper(Node, nodes, properties={ 'children':relationship(Node, lazy='joined', join_depth=1, order_by=nodes.c.id) }) sess = create_session() n1 = Node(data='n1') n1.append(Node(data='n11')) n1.append(Node(data='n12')) n1.append(Node(data='n13')) n1.children[1].append(Node(data='n121')) n1.children[1].append(Node(data='n122')) n1.children[1].append(Node(data='n123')) sess.add(n1) sess.flush() sess.expunge_all() # eager load with join depth 1. when eager load of 'n1' hits the # children of 'n12', no columns are present, eager loader degrades to # lazy loader; fine. but then, 'n12' is *also* in the first level of # columns since we're loading the whole table. when those rows # arrive, now we *can* eager load its children and an eager collection # should be initialized. essentially the 'n12' instance is present in # not just two different rows but two distinct sets of columns in this # result set. def go(): allnodes = sess.query(Node).order_by(Node.data).all() n12 = allnodes[2] eq_(n12.data, 'n12') eq_([ Node(data='n121'), Node(data='n122'), Node(data='n123') ], list(n12.children)) self.assert_sql_count(testing.db, go, 1) def test_with_deferred(self): nodes = self.tables.nodes class Node(fixtures.ComparableEntity): def append(self, node): self.children.append(node) mapper(Node, nodes, properties={ 'children':relationship(Node, lazy='joined', join_depth=3, order_by=nodes.c.id), 'data':deferred(nodes.c.data) }) sess = create_session() n1 = Node(data='n1') n1.append(Node(data='n11')) n1.append(Node(data='n12')) sess.add(n1) sess.flush() sess.expunge_all() def go(): eq_( Node(data='n1', children=[Node(data='n11'), Node(data='n12')]), sess.query(Node).order_by(Node.id).first(), ) self.assert_sql_count(testing.db, go, 4) sess.expunge_all() def go(): eq_(Node(data='n1', children=[Node(data='n11'), Node(data='n12')]), sess.query(Node).options(undefer('data')).order_by(Node.id).first()) self.assert_sql_count(testing.db, go, 3) sess.expunge_all() def go(): eq_(Node(data='n1', children=[Node(data='n11'), Node(data='n12')]), sess.query(Node).options(undefer('data'), undefer('children.data')).first()) self.assert_sql_count(testing.db, go, 1) def test_options(self): nodes = self.tables.nodes class Node(fixtures.ComparableEntity): def append(self, node): self.children.append(node) mapper(Node, nodes, properties={ 'children':relationship(Node, lazy='select', order_by=nodes.c.id) }, order_by=nodes.c.id) sess = create_session() n1 = Node(data='n1') n1.append(Node(data='n11')) n1.append(Node(data='n12')) n1.append(Node(data='n13')) n1.children[1].append(Node(data='n121')) n1.children[1].append(Node(data='n122')) n1.children[1].append(Node(data='n123')) sess.add(n1) sess.flush() sess.expunge_all() def go(): d = sess.query(Node).filter_by(data='n1').\ options(joinedload('children.children')).first() eq_(Node(data='n1', children=[ Node(data='n11'), Node(data='n12', children=[ Node(data='n121'), Node(data='n122'), Node(data='n123') ]), Node(data='n13') ]), d) self.assert_sql_count(testing.db, go, 2) def go(): d = sess.query(Node).filter_by(data='n1').\ options(joinedload('children.children')).first() # test that the query isn't wrapping the initial query for eager loading. self.assert_sql_execution(testing.db, go, CompiledSQL( "SELECT nodes.id AS nodes_id, nodes.parent_id AS " "nodes_parent_id, nodes.data AS nodes_data FROM nodes " "WHERE nodes.data = :data_1 ORDER BY nodes.id LIMIT :param_1", {'data_1': 'n1'} ) ) @testing.fails_on('maxdb', 'FIXME: unknown') def test_no_depth(self): nodes = self.tables.nodes class Node(fixtures.ComparableEntity): def append(self, node): self.children.append(node) mapper(Node, nodes, properties={ 'children':relationship(Node, lazy='joined') }) sess = create_session() n1 = Node(data='n1') n1.append(Node(data='n11')) n1.append(Node(data='n12')) n1.append(Node(data='n13')) n1.children[1].append(Node(data='n121')) n1.children[1].append(Node(data='n122')) n1.children[1].append(Node(data='n123')) sess.add(n1) sess.flush() sess.expunge_all() def go(): d = sess.query(Node).filter_by(data='n1').first() eq_(Node(data='n1', children=[ Node(data='n11'), Node(data='n12', children=[ Node(data='n121'), Node(data='n122'), Node(data='n123') ]), Node(data='n13') ]), d) self.assert_sql_count(testing.db, go, 3) class MixedSelfReferentialEagerTest(fixtures.MappedTest): @classmethod def define_tables(cls, metadata): Table('a_table', metadata, Column('id', Integer, primary_key=True, test_needs_autoincrement=True) ) Table('b_table', metadata, Column('id', Integer, primary_key=True, test_needs_autoincrement=True), Column('parent_b1_id', Integer, ForeignKey('b_table.id')), Column('parent_a_id', Integer, ForeignKey('a_table.id')), Column('parent_b2_id', Integer, ForeignKey('b_table.id'))) @classmethod def setup_mappers(cls): b_table, a_table = cls.tables.b_table, cls.tables.a_table class A(cls.Comparable): pass class B(cls.Comparable): pass mapper(A,a_table) mapper(B,b_table,properties = { 'parent_b1': relationship(B, remote_side = [b_table.c.id], primaryjoin = (b_table.c.parent_b1_id ==b_table.c.id), order_by = b_table.c.id ), 'parent_z': relationship(A,lazy = True), 'parent_b2': relationship(B, remote_side = [b_table.c.id], primaryjoin = (b_table.c.parent_b2_id ==b_table.c.id), order_by = b_table.c.id ) }); @classmethod def insert_data(cls): b_table, a_table = cls.tables.b_table, cls.tables.a_table a_table.insert().execute(dict(id=1), dict(id=2), dict(id=3)) b_table.insert().execute( dict(id=1, parent_a_id=2, parent_b1_id=None, parent_b2_id=None), dict(id=2, parent_a_id=1, parent_b1_id=1, parent_b2_id=None), dict(id=3, parent_a_id=1, parent_b1_id=1, parent_b2_id=2), dict(id=4, parent_a_id=3, parent_b1_id=1, parent_b2_id=None), dict(id=5, parent_a_id=3, parent_b1_id=None, parent_b2_id=2), dict(id=6, parent_a_id=1, parent_b1_id=1, parent_b2_id=3), dict(id=7, parent_a_id=2, parent_b1_id=None, parent_b2_id=3), dict(id=8, parent_a_id=2, parent_b1_id=1, parent_b2_id=2), dict(id=9, parent_a_id=None, parent_b1_id=1, parent_b2_id=None), dict(id=10, parent_a_id=3, parent_b1_id=7, parent_b2_id=2), dict(id=11, parent_a_id=3, parent_b1_id=1, parent_b2_id=8), dict(id=12, parent_a_id=2, parent_b1_id=5, parent_b2_id=2), dict(id=13, parent_a_id=3, parent_b1_id=4, parent_b2_id=4), dict(id=14, parent_a_id=3, parent_b1_id=7, parent_b2_id=2), ) def test_eager_load(self): A, B = self.classes.A, self.classes.B session = create_session() def go(): eq_( session.query(B).\ options( joinedload('parent_b1'), joinedload('parent_b2'), joinedload('parent_z')). filter(B.id.in_([2, 8, 11])).order_by(B.id).all(), [ B(id=2, parent_z=A(id=1), parent_b1=B(id=1), parent_b2=None), B(id=8, parent_z=A(id=2), parent_b1=B(id=1), parent_b2=B(id=2)), B(id=11, parent_z=A(id=3), parent_b1=B(id=1), parent_b2=B(id=8)) ] ) self.assert_sql_count(testing.db, go, 1) class SelfReferentialM2MEagerTest(fixtures.MappedTest): @classmethod def define_tables(cls, metadata): Table('widget', metadata, Column('id', Integer, primary_key=True, test_needs_autoincrement=True), Column('name', sa.Unicode(40), nullable=False, unique=True), ) Table('widget_rel', metadata, Column('parent_id', Integer, ForeignKey('widget.id')), Column('child_id', Integer, ForeignKey('widget.id')), sa.UniqueConstraint('parent_id', 'child_id'), ) def test_basic(self): widget, widget_rel = self.tables.widget, self.tables.widget_rel class Widget(fixtures.ComparableEntity): pass mapper(Widget, widget, properties={ 'children': relationship(Widget, secondary=widget_rel, primaryjoin=widget_rel.c.parent_id==widget.c.id, secondaryjoin=widget_rel.c.child_id==widget.c.id, lazy='joined', join_depth=1, ) }) sess = create_session() w1 = Widget(name=u'w1') w2 = Widget(name=u'w2') w1.children.append(w2) sess.add(w1) sess.flush() sess.expunge_all() eq_([Widget(name='w1', children=[Widget(name='w2')])], sess.query(Widget).filter(Widget.name==u'w1').all()) class MixedEntitiesTest(_fixtures.FixtureTest, testing.AssertsCompiledSQL): run_setup_mappers = 'once' run_inserts = 'once' run_deletes = None @classmethod def setup_mappers(cls): users, Keyword, items, order_items, orders, Item, User, Address, keywords, Order, item_keywords, addresses = (cls.tables.users, cls.classes.Keyword, cls.tables.items, cls.tables.order_items, cls.tables.orders, cls.classes.Item, cls.classes.User, cls.classes.Address, cls.tables.keywords, cls.classes.Order, cls.tables.item_keywords, cls.tables.addresses) mapper(User, users, properties={ 'addresses':relationship(Address, backref='user'), 'orders':relationship(Order, backref='user'), # o2m, m2o }) mapper(Address, addresses) mapper(Order, orders, properties={ 'items':relationship(Item, secondary=order_items, order_by=items.c.id), #m2m }) mapper(Item, items, properties={ 'keywords':relationship(Keyword, secondary=item_keywords) #m2m }) mapper(Keyword, keywords) def test_two_entities(self): Item, Order, User, Address = (self.classes.Item, self.classes.Order, self.classes.User, self.classes.Address) sess = create_session() # two FROM clauses def go(): eq_( [ (User(id=9, addresses=[Address(id=5)]), Order(id=2, items=[Item(id=1), Item(id=2), Item(id=3)])), (User(id=9, addresses=[Address(id=5)]), Order(id=4, items=[Item(id=1), Item(id=5)])), ], sess.query(User, Order).filter(User.id==Order.user_id).\ options(joinedload(User.addresses), joinedload(Order.items)).filter(User.id==9).\ order_by(User.id, Order.id).all(), ) self.assert_sql_count(testing.db, go, 1) # one FROM clause def go(): eq_( [ (User(id=9, addresses=[Address(id=5)]), Order(id=2, items=[Item(id=1), Item(id=2), Item(id=3)])), (User(id=9, addresses=[Address(id=5)]), Order(id=4, items=[Item(id=1), Item(id=5)])), ], sess.query(User, Order).join(User.orders).options(joinedload(User.addresses), joinedload(Order.items)).filter(User.id==9).\ order_by(User.id, Order.id).all(), ) self.assert_sql_count(testing.db, go, 1) @testing.exclude('sqlite', '>', (0, ), "sqlite flat out blows it on the multiple JOINs") def test_two_entities_with_joins(self): Item, Order, User, Address = (self.classes.Item, self.classes.Order, self.classes.User, self.classes.Address) sess = create_session() # two FROM clauses where there's a join on each one def go(): u1 = aliased(User) o1 = aliased(Order) eq_( [ ( User(addresses=[Address(email_address=u'fred@fred.com')], name=u'fred'), Order(description=u'order 2', isopen=0, items=[Item(description=u'item 1'), Item(description=u'item 2'), Item(description=u'item 3')]), User(addresses=[Address(email_address=u'jack@bean.com')], name=u'jack'), Order(description=u'order 3', isopen=1, items=[Item(description=u'item 3'), Item(description=u'item 4'), Item(description=u'item 5')]) ), ( User(addresses=[Address(email_address=u'fred@fred.com')], name=u'fred'), Order(description=u'order 2', isopen=0, items=[Item(description=u'item 1'), Item(description=u'item 2'), Item(description=u'item 3')]), User(addresses=[Address(email_address=u'jack@bean.com')], name=u'jack'), Order(address_id=None, description=u'order 5', isopen=0, items=[Item(description=u'item 5')]) ), ( User(addresses=[Address(email_address=u'fred@fred.com')], name=u'fred'), Order(description=u'order 4', isopen=1, items=[Item(description=u'item 1'), Item(description=u'item 5')]), User(addresses=[Address(email_address=u'jack@bean.com')], name=u'jack'), Order(address_id=None, description=u'order 5', isopen=0, items=[Item(description=u'item 5')]) ), ], sess.query(User, Order, u1, o1).\ join(Order, User.orders).options(joinedload(User.addresses), joinedload(Order.items)).filter(User.id==9).\ join(o1, u1.orders).options(joinedload(u1.addresses), joinedload(o1.items)).filter(u1.id==7).\ filter(Order.id' has been " "deleted, or its row is otherwise not present.", getattr, u, 'name' ) def test_rollback_undoes_expunge_from_deleted(self): users, User = self.tables.users, self.classes.User mapper(User, users) s = create_session(autocommit=False) u = s.query(User).get(10) s.expire_all() s.execute(users.delete().where(User.id==10)) # do a get()/remove u from session assert s.query(User).get(10) is None assert u not in s s.rollback() assert u in s # but now its back, rollback has occurred, the # _remove_newly_deleted is reverted eq_(u.name, 'chuck') def test_deferred(self): """test that unloaded, deferred attributes aren't included in the expiry list.""" Order, orders = self.classes.Order, self.tables.orders mapper(Order, orders, properties={ 'description':deferred(orders.c.description)}) s = create_session() o1 = s.query(Order).first() assert 'description' not in o1.__dict__ s.expire(o1) assert o1.isopen is not None assert 'description' not in o1.__dict__ assert o1.description def test_deferred_notfound(self): users, User = self.tables.users, self.classes.User mapper(User, users, properties={ 'name':deferred(users.c.name) }) s = create_session(autocommit=False) u = s.query(User).get(10) assert 'name' not in u.__dict__ s.execute(users.delete().where(User.id==10)) assert_raises_message( sa.orm.exc.ObjectDeletedError, "Instance '' has been " "deleted, or its row is otherwise not present.", getattr, u, 'name' ) def test_lazyload_autoflushes(self): users, Address, addresses, User = (self.tables.users, self.classes.Address, self.tables.addresses, self.classes.User) mapper(User, users, properties={ 'addresses':relationship(Address, order_by=addresses.c.email_address) }) mapper(Address, addresses) s = create_session(autoflush=True, autocommit=False) u = s.query(User).get(8) adlist = u.addresses eq_(adlist, [ Address(email_address='ed@bettyboop.com'), Address(email_address='ed@lala.com'), Address(email_address='ed@wood.com'), ]) a1 = u.addresses[2] a1.email_address = 'aaaaa' s.expire(u, ['addresses']) eq_(u.addresses, [ Address(email_address='aaaaa'), Address(email_address='ed@bettyboop.com'), Address(email_address='ed@lala.com'), ]) def test_refresh_collection_exception(self): """test graceful failure for currently unsupported immediate refresh of a collection""" users, Address, addresses, User = (self.tables.users, self.classes.Address, self.tables.addresses, self.classes.User) mapper(User, users, properties={ 'addresses':relationship(Address, order_by=addresses.c.email_address) }) mapper(Address, addresses) s = create_session(autoflush=True, autocommit=False) u = s.query(User).get(8) assert_raises_message(sa_exc.InvalidRequestError, "properties specified for refresh", s.refresh, u, ['addresses']) # in contrast to a regular query with no columns assert_raises_message(sa_exc.InvalidRequestError, "no columns with which to SELECT", s.query().all) def test_refresh_cancels_expire(self): users, User = self.tables.users, self.classes.User mapper(User, users) s = create_session() u = s.query(User).get(7) s.expire(u) s.refresh(u) def go(): u = s.query(User).get(7) eq_(u.name, 'jack') self.assert_sql_count(testing.db, go, 0) def test_expire_doesntload_on_set(self): User, users = self.classes.User, self.tables.users mapper(User, users) sess = create_session() u = sess.query(User).get(7) sess.expire(u, attribute_names=['name']) def go(): u.name = 'somenewname' self.assert_sql_count(testing.db, go, 0) sess.flush() sess.expunge_all() assert sess.query(User).get(7).name == 'somenewname' def test_no_session(self): users, User = self.tables.users, self.classes.User mapper(User, users) sess = create_session() u = sess.query(User).get(7) sess.expire(u, attribute_names=['name']) sess.expunge(u) assert_raises(orm_exc.DetachedInstanceError, getattr, u, 'name') def test_pending_raises(self): users, User = self.tables.users, self.classes.User # this was the opposite in 0.4, but the reasoning there seemed off. # expiring a pending instance makes no sense, so should raise mapper(User, users) sess = create_session() u = User(id=15) sess.add(u) assert_raises(sa_exc.InvalidRequestError, sess.expire, u, ['name']) def test_no_instance_key(self): User, users = self.classes.User, self.tables.users # this tests an artificial condition such that # an instance is pending, but has expired attributes. this # is actually part of a larger behavior when postfetch needs to # occur during a flush() on an instance that was just inserted mapper(User, users) sess = create_session() u = sess.query(User).get(7) sess.expire(u, attribute_names=['name']) sess.expunge(u) attributes.instance_state(u).key = None assert 'name' not in u.__dict__ sess.add(u) assert u.name == 'jack' def test_no_instance_key_no_pk(self): users, User = self.tables.users, self.classes.User # same as test_no_instance_key, but the PK columns # are absent. ensure an error is raised. mapper(User, users) sess = create_session() u = sess.query(User).get(7) sess.expire(u, attribute_names=['name', 'id']) sess.expunge(u) attributes.instance_state(u).key = None assert 'name' not in u.__dict__ sess.add(u) assert_raises(sa_exc.InvalidRequestError, getattr, u, 'name') def test_expire_preserves_changes(self): """test that the expire load operation doesn't revert post-expire changes""" Order, orders = self.classes.Order, self.tables.orders mapper(Order, orders) sess = create_session() o = sess.query(Order).get(3) sess.expire(o) o.description = "order 3 modified" def go(): assert o.isopen == 1 self.assert_sql_count(testing.db, go, 1) assert o.description == 'order 3 modified' del o.description assert "description" not in o.__dict__ sess.expire(o, ['isopen']) sess.query(Order).all() assert o.isopen == 1 assert "description" not in o.__dict__ assert o.description is None o.isopen=15 sess.expire(o, ['isopen', 'description']) o.description = 'some new description' sess.query(Order).all() assert o.isopen == 1 assert o.description == 'some new description' sess.expire(o, ['isopen', 'description']) sess.query(Order).all() del o.isopen def go(): assert o.isopen is None self.assert_sql_count(testing.db, go, 0) o.isopen=14 sess.expire(o) o.description = 'another new description' sess.query(Order).all() assert o.isopen == 1 assert o.description == 'another new description' def test_expire_committed(self): """test that the committed state of the attribute receives the most recent DB data""" orders, Order = self.tables.orders, self.classes.Order mapper(Order, orders) sess = create_session() o = sess.query(Order).get(3) sess.expire(o) orders.update(id=3).execute(description='order 3 modified') assert o.isopen == 1 assert attributes.instance_state(o).dict['description'] == 'order 3 modified' def go(): sess.flush() self.assert_sql_count(testing.db, go, 0) def test_expire_cascade(self): users, Address, addresses, User = (self.tables.users, self.classes.Address, self.tables.addresses, self.classes.User) mapper(User, users, properties={ 'addresses':relationship(Address, cascade="all, refresh-expire") }) mapper(Address, addresses) s = create_session() u = s.query(User).get(8) assert u.addresses[0].email_address == 'ed@wood.com' u.addresses[0].email_address = 'someotheraddress' s.expire(u) assert u.addresses[0].email_address == 'ed@wood.com' def test_refresh_cascade(self): users, Address, addresses, User = (self.tables.users, self.classes.Address, self.tables.addresses, self.classes.User) mapper(User, users, properties={ 'addresses':relationship(Address, cascade="all, refresh-expire") }) mapper(Address, addresses) s = create_session() u = s.query(User).get(8) assert u.addresses[0].email_address == 'ed@wood.com' u.addresses[0].email_address = 'someotheraddress' s.refresh(u) assert u.addresses[0].email_address == 'ed@wood.com' def test_expire_cascade_pending_orphan(self): cascade = 'save-update, refresh-expire, delete, delete-orphan' self._test_cascade_to_pending(cascade, True) def test_refresh_cascade_pending_orphan(self): cascade = 'save-update, refresh-expire, delete, delete-orphan' self._test_cascade_to_pending(cascade, False) def test_expire_cascade_pending(self): cascade = 'save-update, refresh-expire' self._test_cascade_to_pending(cascade, True) def test_refresh_cascade_pending(self): cascade = 'save-update, refresh-expire' self._test_cascade_to_pending(cascade, False) def _test_cascade_to_pending(self, cascade, expire_or_refresh): users, Address, addresses, User = (self.tables.users, self.classes.Address, self.tables.addresses, self.classes.User) mapper(User, users, properties={ 'addresses':relationship(Address, cascade=cascade) }) mapper(Address, addresses) s = create_session() u = s.query(User).get(8) a = Address(id=12, email_address='foobar') u.addresses.append(a) if expire_or_refresh: s.expire(u) else: s.refresh(u) if "delete-orphan" in cascade: assert a not in s else: assert a in s assert a not in u.addresses s.flush() def test_expired_lazy(self): users, Address, addresses, User = (self.tables.users, self.classes.Address, self.tables.addresses, self.classes.User) mapper(User, users, properties={ 'addresses':relationship(Address, backref='user'), }) mapper(Address, addresses) sess = create_session() u = sess.query(User).get(7) sess.expire(u) assert 'name' not in u.__dict__ assert 'addresses' not in u.__dict__ def go(): assert u.addresses[0].email_address == 'jack@bean.com' assert u.name == 'jack' # two loads self.assert_sql_count(testing.db, go, 2) assert 'name' in u.__dict__ assert 'addresses' in u.__dict__ def test_expired_eager(self): users, Address, addresses, User = (self.tables.users, self.classes.Address, self.tables.addresses, self.classes.User) mapper(User, users, properties={ 'addresses':relationship(Address, backref='user', lazy='joined'), }) mapper(Address, addresses) sess = create_session() u = sess.query(User).get(7) sess.expire(u) assert 'name' not in u.__dict__ assert 'addresses' not in u.__dict__ def go(): assert u.addresses[0].email_address == 'jack@bean.com' assert u.name == 'jack' # two loads, since relationship() + scalar are # separate right now on per-attribute load self.assert_sql_count(testing.db, go, 2) assert 'name' in u.__dict__ assert 'addresses' in u.__dict__ sess.expire(u, ['name', 'addresses']) assert 'name' not in u.__dict__ assert 'addresses' not in u.__dict__ def go(): sess.query(User).filter_by(id=7).one() assert u.addresses[0].email_address == 'jack@bean.com' assert u.name == 'jack' # one load, since relationship() + scalar are # together when eager load used with Query self.assert_sql_count(testing.db, go, 1) def test_relationship_changes_preserved(self): users, Address, addresses, User = (self.tables.users, self.classes.Address, self.tables.addresses, self.classes.User) mapper(User, users, properties={ 'addresses':relationship(Address, backref='user', lazy='joined'), }) mapper(Address, addresses) sess = create_session() u = sess.query(User).get(8) sess.expire(u, ['name', 'addresses']) u.addresses assert 'name' not in u.__dict__ del u.addresses[1] u.name assert 'name' in u.__dict__ assert len(u.addresses) == 2 def test_joinedload_props_dontload(self): users, Address, addresses, User = (self.tables.users, self.classes.Address, self.tables.addresses, self.classes.User) # relationships currently have to load separately from scalar instances. # the use case is: expire "addresses". then access it. lazy load # fires off to load "addresses", but needs foreign key or primary key # attributes in order to lazy load; hits those attributes, such as # below it hits "u.id". "u.id" triggers full unexpire operation, # joinedloads addresses since lazy='joined'. this is all wihtin lazy load # which fires unconditionally; so an unnecessary joinedload (or # lazyload) was issued. would prefer not to complicate lazyloading to # "figure out" that the operation should be aborted right now. mapper(User, users, properties={ 'addresses':relationship(Address, backref='user', lazy='joined'), }) mapper(Address, addresses) sess = create_session() u = sess.query(User).get(8) sess.expire(u) u.id assert 'addresses' not in u.__dict__ u.addresses assert 'addresses' in u.__dict__ def test_expire_synonym(self): User, users = self.classes.User, self.tables.users mapper(User, users, properties={ 'uname': sa.orm.synonym('name') }) sess = create_session() u = sess.query(User).get(7) assert 'name' in u.__dict__ assert u.uname == u.name sess.expire(u) assert 'name' not in u.__dict__ users.update(users.c.id==7).execute(name='jack2') assert u.name == 'jack2' assert u.uname == 'jack2' assert 'name' in u.__dict__ # this wont work unless we add API hooks through the attr. system to # provide "expire" behavior on a synonym # sess.expire(u, ['uname']) # users.update(users.c.id==7).execute(name='jack3') # assert u.uname == 'jack3' def test_partial_expire(self): orders, Order = self.tables.orders, self.classes.Order mapper(Order, orders) sess = create_session() o = sess.query(Order).get(3) sess.expire(o, attribute_names=['description']) assert 'id' in o.__dict__ assert 'description' not in o.__dict__ assert attributes.instance_state(o).dict['isopen'] == 1 orders.update(orders.c.id==3).execute(description='order 3 modified') def go(): assert o.description == 'order 3 modified' self.assert_sql_count(testing.db, go, 1) assert attributes.instance_state(o).dict['description'] == 'order 3 modified' o.isopen = 5 sess.expire(o, attribute_names=['description']) assert 'id' in o.__dict__ assert 'description' not in o.__dict__ assert o.__dict__['isopen'] == 5 assert attributes.instance_state(o).committed_state['isopen'] == 1 def go(): assert o.description == 'order 3 modified' self.assert_sql_count(testing.db, go, 1) assert o.__dict__['isopen'] == 5 assert attributes.instance_state(o).dict['description'] == 'order 3 modified' assert attributes.instance_state(o).committed_state['isopen'] == 1 sess.flush() sess.expire(o, attribute_names=['id', 'isopen', 'description']) assert 'id' not in o.__dict__ assert 'isopen' not in o.__dict__ assert 'description' not in o.__dict__ def go(): assert o.description == 'order 3 modified' assert o.id == 3 assert o.isopen == 5 self.assert_sql_count(testing.db, go, 1) def test_partial_expire_lazy(self): users, Address, addresses, User = (self.tables.users, self.classes.Address, self.tables.addresses, self.classes.User) mapper(User, users, properties={ 'addresses':relationship(Address, backref='user'), }) mapper(Address, addresses) sess = create_session() u = sess.query(User).get(8) sess.expire(u, ['name', 'addresses']) assert 'name' not in u.__dict__ assert 'addresses' not in u.__dict__ # hit the lazy loader. just does the lazy load, # doesnt do the overall refresh def go(): assert u.addresses[0].email_address=='ed@wood.com' self.assert_sql_count(testing.db, go, 1) assert 'name' not in u.__dict__ # check that mods to expired lazy-load attributes # only do the lazy load sess.expire(u, ['name', 'addresses']) def go(): u.addresses = [Address(id=10, email_address='foo@bar.com')] self.assert_sql_count(testing.db, go, 1) sess.flush() # flush has occurred, and addresses was modified, # so the addresses collection got committed and is # longer expired def go(): assert u.addresses[0].email_address=='foo@bar.com' assert len(u.addresses) == 1 self.assert_sql_count(testing.db, go, 0) # but the name attribute was never loaded and so # still loads def go(): assert u.name == 'ed' self.assert_sql_count(testing.db, go, 1) def test_partial_expire_eager(self): users, Address, addresses, User = (self.tables.users, self.classes.Address, self.tables.addresses, self.classes.User) mapper(User, users, properties={ 'addresses':relationship(Address, backref='user', lazy='joined'), }) mapper(Address, addresses) sess = create_session() u = sess.query(User).get(8) sess.expire(u, ['name', 'addresses']) assert 'name' not in u.__dict__ assert 'addresses' not in u.__dict__ def go(): assert u.addresses[0].email_address=='ed@wood.com' self.assert_sql_count(testing.db, go, 1) # check that mods to expired eager-load attributes # do the refresh sess.expire(u, ['name', 'addresses']) def go(): u.addresses = [Address(id=10, email_address='foo@bar.com')] self.assert_sql_count(testing.db, go, 1) sess.flush() # this should ideally trigger the whole load # but currently it works like the lazy case def go(): assert u.addresses[0].email_address=='foo@bar.com' assert len(u.addresses) == 1 self.assert_sql_count(testing.db, go, 0) def go(): assert u.name == 'ed' # scalar attributes have their own load self.assert_sql_count(testing.db, go, 1) # ideally, this was already loaded, but we arent # doing it that way right now #self.assert_sql_count(testing.db, go, 0) def test_relationships_load_on_query(self): users, Address, addresses, User = (self.tables.users, self.classes.Address, self.tables.addresses, self.classes.User) mapper(User, users, properties={ 'addresses':relationship(Address, backref='user'), }) mapper(Address, addresses) sess = create_session() u = sess.query(User).get(8) assert 'name' in u.__dict__ u.addresses assert 'addresses' in u.__dict__ sess.expire(u, ['name', 'addresses']) assert 'name' not in u.__dict__ assert 'addresses' not in u.__dict__ (sess.query(User).options(sa.orm.joinedload('addresses')). filter_by(id=8).all()) assert 'name' in u.__dict__ assert 'addresses' in u.__dict__ def test_partial_expire_deferred(self): orders, Order = self.tables.orders, self.classes.Order mapper(Order, orders, properties={ 'description': sa.orm.deferred(orders.c.description) }) sess = create_session() o = sess.query(Order).get(3) sess.expire(o, ['description', 'isopen']) assert 'isopen' not in o.__dict__ assert 'description' not in o.__dict__ # test that expired attribute access refreshes # the deferred def go(): assert o.isopen == 1 assert o.description == 'order 3' self.assert_sql_count(testing.db, go, 1) sess.expire(o, ['description', 'isopen']) assert 'isopen' not in o.__dict__ assert 'description' not in o.__dict__ # test that the deferred attribute triggers the full # reload def go(): assert o.description == 'order 3' assert o.isopen == 1 self.assert_sql_count(testing.db, go, 1) sa.orm.clear_mappers() mapper(Order, orders) sess.expunge_all() # same tests, using deferred at the options level o = sess.query(Order).options(sa.orm.defer('description')).get(3) assert 'description' not in o.__dict__ # sanity check def go(): assert o.description == 'order 3' self.assert_sql_count(testing.db, go, 1) assert 'description' in o.__dict__ assert 'isopen' in o.__dict__ sess.expire(o, ['description', 'isopen']) assert 'isopen' not in o.__dict__ assert 'description' not in o.__dict__ # test that expired attribute access refreshes # the deferred def go(): assert o.isopen == 1 assert o.description == 'order 3' self.assert_sql_count(testing.db, go, 1) sess.expire(o, ['description', 'isopen']) assert 'isopen' not in o.__dict__ assert 'description' not in o.__dict__ # test that the deferred attribute triggers the full # reload def go(): assert o.description == 'order 3' assert o.isopen == 1 self.assert_sql_count(testing.db, go, 1) def test_joinedload_query_refreshes(self): users, Address, addresses, User = (self.tables.users, self.classes.Address, self.tables.addresses, self.classes.User) mapper(User, users, properties={ 'addresses':relationship(Address, backref='user', lazy='joined'), }) mapper(Address, addresses) sess = create_session() u = sess.query(User).get(8) assert len(u.addresses) == 3 sess.expire(u) assert 'addresses' not in u.__dict__ print "-------------------------------------------" sess.query(User).filter_by(id=8).all() assert 'addresses' in u.__dict__ assert len(u.addresses) == 3 def test_expire_all(self): users, Address, addresses, User = (self.tables.users, self.classes.Address, self.tables.addresses, self.classes.User) mapper(User, users, properties={ 'addresses':relationship(Address, backref='user', lazy='joined', order_by=addresses.c.id), }) mapper(Address, addresses) sess = create_session() userlist = sess.query(User).order_by(User.id).all() assert self.static.user_address_result == userlist assert len(list(sess)) == 9 sess.expire_all() gc_collect() assert len(list(sess)) == 4 # since addresses were gc'ed userlist = sess.query(User).order_by(User.id).all() u = userlist[1] eq_(self.static.user_address_result, userlist) assert len(list(sess)) == 9 def test_state_change_col_to_deferred(self): """Behavioral test to verify the current activity of loader callables.""" users, User = self.tables.users, self.classes.User mapper(User, users) sess = create_session() # deferred attribute option, gets the LoadDeferredColumns # callable u1 = sess.query(User).options(defer(User.name)).first() assert isinstance( attributes.instance_state(u1).callables['name'], strategies.LoadDeferredColumns ) # expire the attr, it gets the InstanceState callable sess.expire(u1, ['name']) assert isinstance( attributes.instance_state(u1).callables['name'], state.InstanceState ) # load it, callable is gone u1.name assert 'name' not in attributes.instance_state(u1).callables # same for expire all sess.expunge_all() u1 = sess.query(User).options(defer(User.name)).first() sess.expire(u1) assert isinstance( attributes.instance_state(u1).callables['name'], state.InstanceState ) # load over it. everything normal. sess.query(User).first() assert 'name' not in attributes.instance_state(u1).callables sess.expunge_all() u1 = sess.query(User).first() # for non present, still expires the same way del u1.name sess.expire(u1) assert 'name' in attributes.instance_state(u1).callables def test_state_deferred_to_col(self): """Behavioral test to verify the current activity of loader callables.""" users, User = self.tables.users, self.classes.User mapper(User, users, properties={'name':deferred(users.c.name)}) sess = create_session() u1 = sess.query(User).options(undefer(User.name)).first() assert 'name' not in attributes.instance_state(u1).callables # mass expire, the attribute was loaded, # the attribute gets the callable sess.expire(u1) assert isinstance( attributes.instance_state(u1).callables['name'], state.InstanceState ) # load it, callable is gone u1.name assert 'name' not in attributes.instance_state(u1).callables # mass expire, attribute was loaded but then deleted, # the callable goes away - the state wants to flip # it back to its "deferred" loader. sess.expunge_all() u1 = sess.query(User).options(undefer(User.name)).first() del u1.name sess.expire(u1) assert 'name' not in attributes.instance_state(u1).callables # single attribute expire, the attribute gets the callable sess.expunge_all() u1 = sess.query(User).options(undefer(User.name)).first() sess.expire(u1, ['name']) assert isinstance( attributes.instance_state(u1).callables['name'], state.InstanceState ) def test_state_noload_to_lazy(self): """Behavioral test to verify the current activity of loader callables.""" users, Address, addresses, User = (self.tables.users, self.classes.Address, self.tables.addresses, self.classes.User) mapper(User, users, properties={'addresses':relationship(Address, lazy='noload')}) mapper(Address, addresses) sess = create_session() u1 = sess.query(User).options(lazyload(User.addresses)).first() assert isinstance( attributes.instance_state(u1).callables['addresses'], strategies.LoadLazyAttribute ) # expire, it stays sess.expire(u1) assert isinstance( attributes.instance_state(u1).callables['addresses'], strategies.LoadLazyAttribute ) # load over it. callable goes away. sess.query(User).first() assert 'addresses' not in attributes.instance_state(u1).callables sess.expunge_all() u1 = sess.query(User).options(lazyload(User.addresses)).first() sess.expire(u1, ['addresses']) assert isinstance( attributes.instance_state(u1).callables['addresses'], strategies.LoadLazyAttribute ) # load the attr, goes away u1.addresses assert 'addresses' not in attributes.instance_state(u1).callables class PolymorphicExpireTest(fixtures.MappedTest): run_inserts = 'once' run_deletes = None @classmethod def define_tables(cls, metadata): people = Table('people', metadata, Column('person_id', Integer, primary_key=True, test_needs_autoincrement=True), Column('name', String(50)), Column('type', String(30)), ) engineers = Table('engineers', metadata, Column('person_id', Integer, ForeignKey('people.person_id'), primary_key=True), Column('status', String(30)), ) @classmethod def setup_classes(cls): class Person(cls.Basic): pass class Engineer(Person): pass @classmethod def insert_data(cls): people, engineers = cls.tables.people, cls.tables.engineers people.insert().execute( {'person_id':1, 'name':'person1', 'type':'person'}, {'person_id':2, 'name':'engineer1', 'type':'engineer'}, {'person_id':3, 'name':'engineer2', 'type':'engineer'}, ) engineers.insert().execute( {'person_id':2, 'status':'new engineer'}, {'person_id':3, 'status':'old engineer'}, ) @classmethod def setup_mappers(cls): Person, people, engineers, Engineer = (cls.classes.Person, cls.tables.people, cls.tables.engineers, cls.classes.Engineer) mapper(Person, people, polymorphic_on=people.c.type, polymorphic_identity='person') mapper(Engineer, engineers, inherits=Person, polymorphic_identity='engineer') def test_poly_deferred(self): Person, people, Engineer = (self.classes.Person, self.tables.people, self.classes.Engineer) sess = create_session() [p1, e1, e2] = sess.query(Person).order_by(people.c.person_id).all() sess.expire(p1) sess.expire(e1, ['status']) sess.expire(e2) for p in [p1, e2]: assert 'name' not in p.__dict__ assert 'name' in e1.__dict__ assert 'status' not in e2.__dict__ assert 'status' not in e1.__dict__ e1.name = 'new engineer name' def go(): sess.query(Person).all() self.assert_sql_count(testing.db, go, 1) for p in [p1, e1, e2]: assert 'name' in p.__dict__ assert 'status' not in e2.__dict__ assert 'status' not in e1.__dict__ def go(): assert e1.name == 'new engineer name' assert e2.name == 'engineer2' assert e1.status == 'new engineer' assert e2.status == 'old engineer' self.assert_sql_count(testing.db, go, 2) eq_(Engineer.name.get_history(e1), (['new engineer name'],(), ['engineer1'])) def test_no_instance_key(self): Engineer = self.classes.Engineer sess = create_session() e1 = sess.query(Engineer).get(2) sess.expire(e1, attribute_names=['name']) sess.expunge(e1) attributes.instance_state(e1).key = None assert 'name' not in e1.__dict__ sess.add(e1) assert e1.name == 'engineer1' def test_no_instance_key(self): Engineer = self.classes.Engineer # same as test_no_instance_key, but the PK columns # are absent. ensure an error is raised. sess = create_session() e1 = sess.query(Engineer).get(2) sess.expire(e1, attribute_names=['name', 'person_id']) sess.expunge(e1) attributes.instance_state(e1).key = None assert 'name' not in e1.__dict__ sess.add(e1) assert_raises(sa_exc.InvalidRequestError, getattr, e1, 'name') class ExpiredPendingTest(_fixtures.FixtureTest): run_define_tables = 'once' run_setup_classes = 'once' run_setup_mappers = None run_inserts = None def test_expired_pending(self): users, Address, addresses, User = (self.tables.users, self.classes.Address, self.tables.addresses, self.classes.User) mapper(User, users, properties={ 'addresses':relationship(Address, backref='user'), }) mapper(Address, addresses) sess = create_session() a1 = Address(email_address='a1') sess.add(a1) sess.flush() u1 = User(name='u1') a1.user = u1 sess.flush() # expire 'addresses'. backrefs # which attach to u1 will expect to be "pending" sess.expire(u1, ['addresses']) # attach an Address. now its "pending" # in user.addresses a2 = Address(email_address='a2') a2.user = u1 # expire u1.addresses again. this expires # "pending" as well. sess.expire(u1, ['addresses']) # insert a new row sess.execute(addresses.insert(), dict(email_address='a3', user_id=u1.id)) # only two addresses pulled from the DB, no "pending" assert len(u1.addresses) == 2 sess.flush() sess.expire_all() assert len(u1.addresses) == 3 class RefreshTest(_fixtures.FixtureTest): def test_refresh(self): users, Address, addresses, User = (self.tables.users, self.classes.Address, self.tables.addresses, self.classes.User) mapper(User, users, properties={ 'addresses':relationship(mapper(Address, addresses), backref='user') }) s = create_session() u = s.query(User).get(7) u.name = 'foo' a = Address() assert sa.orm.object_session(a) is None u.addresses.append(a) assert a.email_address is None assert id(a) in [id(x) for x in u.addresses] s.refresh(u) # its refreshed, so not dirty assert u not in s.dirty # username is back to the DB assert u.name == 'jack' assert id(a) not in [id(x) for x in u.addresses] u.name = 'foo' u.addresses.append(a) # now its dirty assert u in s.dirty assert u.name == 'foo' assert id(a) in [id(x) for x in u.addresses] s.expire(u) # get the attribute, it refreshes assert u.name == 'jack' assert id(a) not in [id(x) for x in u.addresses] def test_persistence_check(self): users, User = self.tables.users, self.classes.User mapper(User, users) s = create_session() u = s.query(User).get(7) s.expunge_all() assert_raises_message(sa_exc.InvalidRequestError, r"is not persistent within this Session", lambda: s.refresh(u)) def test_refresh_expired(self): User, users = self.classes.User, self.tables.users mapper(User, users) s = create_session() u = s.query(User).get(7) s.expire(u) assert 'name' not in u.__dict__ s.refresh(u) assert u.name == 'jack' def test_refresh_with_lazy(self): """test that when a lazy loader is set as a trigger on an object's attribute (at the attribute level, not the class level), a refresh() operation doesnt fire the lazy loader or create any problems""" User, Address, addresses, users = (self.classes.User, self.classes.Address, self.tables.addresses, self.tables.users) s = create_session() mapper(User, users, properties={'addresses':relationship(mapper(Address, addresses))}) q = s.query(User).options(sa.orm.lazyload('addresses')) u = q.filter(users.c.id==8).first() def go(): s.refresh(u) self.assert_sql_count(testing.db, go, 1) def test_refresh_with_eager(self): """test that a refresh/expire operation loads rows properly and sends correct "isnew" state to eager loaders""" users, Address, addresses, User = (self.tables.users, self.classes.Address, self.tables.addresses, self.classes.User) mapper(User, users, properties={ 'addresses':relationship(mapper(Address, addresses), lazy='joined') }) s = create_session() u = s.query(User).get(8) assert len(u.addresses) == 3 s.refresh(u) assert len(u.addresses) == 3 s = create_session() u = s.query(User).get(8) assert len(u.addresses) == 3 s.expire(u) assert len(u.addresses) == 3 @testing.fails_on('maxdb', 'FIXME: unknown') def test_refresh2(self): """test a hang condition that was occurring on expire/refresh""" Address, addresses, users, User = (self.classes.Address, self.tables.addresses, self.tables.users, self.classes.User) s = create_session() mapper(Address, addresses) mapper(User, users, properties = dict(addresses=relationship(Address,cascade="all, delete-orphan",lazy='joined')) ) u = User() u.name='Justin' a = Address(id=10, email_address='lala') u.addresses.append(a) s.add(u) s.flush() s.expunge_all() u = s.query(User).filter(User.name=='Justin').one() s.expire(u) assert u.name == 'Justin' s.refresh(u) SQLAlchemy-0.8.4/test/orm/test_froms.py0000644000076500000240000027004712251150016020547 0ustar classicstaff00000000000000from sqlalchemy.testing import eq_, assert_raises, assert_raises_message import operator from sqlalchemy import * from sqlalchemy import exc as sa_exc, util from sqlalchemy.sql import compiler, table, column from sqlalchemy.engine import default from sqlalchemy.orm import * from sqlalchemy.orm import attributes from sqlalchemy.testing import eq_ import sqlalchemy as sa from sqlalchemy import testing from sqlalchemy.testing import AssertsCompiledSQL, engines from sqlalchemy.testing.schema import Column from test.orm import _fixtures from sqlalchemy.testing import fixtures from sqlalchemy.orm.util import join, outerjoin, with_parent class QueryTest(_fixtures.FixtureTest): run_setup_mappers = 'once' run_inserts = 'once' run_deletes = None @classmethod def setup_mappers(cls): Node, composite_pk_table, users, Keyword, items, Dingaling, \ order_items, item_keywords, Item, User, dingalings, \ Address, keywords, CompositePk, nodes, Order, orders, \ addresses = cls.classes.Node, \ cls.tables.composite_pk_table, cls.tables.users, \ cls.classes.Keyword, cls.tables.items, \ cls.classes.Dingaling, cls.tables.order_items, \ cls.tables.item_keywords, cls.classes.Item, \ cls.classes.User, cls.tables.dingalings, \ cls.classes.Address, cls.tables.keywords, \ cls.classes.CompositePk, cls.tables.nodes, \ cls.classes.Order, cls.tables.orders, cls.tables.addresses mapper(User, users, properties={ 'addresses':relationship(Address, backref='user', order_by=addresses.c.id), 'orders':relationship(Order, backref='user', order_by=orders.c.id), # o2m, m2o }) mapper(Address, addresses, properties={ 'dingaling':relationship(Dingaling, uselist=False, backref="address") #o2o }) mapper(Dingaling, dingalings) mapper(Order, orders, properties={ 'items':relationship(Item, secondary=order_items, order_by=items.c.id), #m2m 'address':relationship(Address), # m2o }) mapper(Item, items, properties={ 'keywords':relationship(Keyword, secondary=item_keywords) #m2m }) mapper(Keyword, keywords) mapper(Node, nodes, properties={ 'children':relationship(Node, backref=backref('parent', remote_side=[nodes.c.id]) ) }) mapper(CompositePk, composite_pk_table) configure_mappers() class QueryCorrelatesLikeSelect(QueryTest, AssertsCompiledSQL): query_correlated = "SELECT users.name AS users_name, " \ "(SELECT count(addresses.id) AS count_1 FROM addresses " \ "WHERE addresses.user_id = users.id) AS anon_1 FROM users" query_not_correlated = "SELECT users.name AS users_name, " \ "(SELECT count(addresses.id) AS count_1 FROM addresses, users " \ "WHERE addresses.user_id = users.id) AS anon_1 FROM users" def test_as_scalar_select_auto_correlate(self): addresses, users = self.tables.addresses, self.tables.users query = select( [func.count(addresses.c.id)], addresses.c.user_id==users.c.id ).as_scalar() query = select([users.c.name.label('users_name'), query]) self.assert_compile(query, self.query_correlated, dialect=default.DefaultDialect() ) def test_as_scalar_select_explicit_correlate(self): addresses, users = self.tables.addresses, self.tables.users query = select( [func.count(addresses.c.id)], addresses.c.user_id==users.c.id ).correlate(users).as_scalar() query = select([users.c.name.label('users_name'), query]) self.assert_compile(query, self.query_correlated, dialect=default.DefaultDialect() ) def test_as_scalar_select_correlate_off(self): addresses, users = self.tables.addresses, self.tables.users query = select( [func.count(addresses.c.id)], addresses.c.user_id==users.c.id ).correlate(None).as_scalar() query = select([ users.c.name.label('users_name'), query]) self.assert_compile(query, self.query_not_correlated, dialect=default.DefaultDialect() ) def test_as_scalar_query_auto_correlate(self): sess = create_session() Address, User = self.classes.Address, self.classes.User query = sess.query(func.count(Address.id))\ .filter(Address.user_id==User.id)\ .as_scalar() query = sess.query(User.name, query) self.assert_compile(query, self.query_correlated, dialect=default.DefaultDialect() ) def test_as_scalar_query_explicit_correlate(self): sess = create_session() Address, User = self.classes.Address, self.classes.User query = sess.query(func.count(Address.id))\ .filter(Address.user_id==User.id)\ .correlate(self.tables.users)\ .as_scalar() query = sess.query(User.name, query) self.assert_compile(query, self.query_correlated, dialect=default.DefaultDialect() ) def test_as_scalar_query_correlate_off(self): sess = create_session() Address, User = self.classes.Address, self.classes.User query = sess.query(func.count(Address.id))\ .filter(Address.user_id==User.id)\ .correlate(None)\ .as_scalar() query = sess.query(User.name, query) self.assert_compile(query, self.query_not_correlated, dialect=default.DefaultDialect() ) class RawSelectTest(QueryTest, AssertsCompiledSQL): """compare a bunch of select() tests with the equivalent Query using straight table/columns. Results should be the same as Query should act as a select() pass- thru for ClauseElement entities. """ __dialect__ = 'default' def test_select(self): addresses, users = self.tables.addresses, self.tables.users sess = create_session() self.assert_compile(sess.query(users).select_from( users.select()).with_labels().statement, "SELECT users.id AS users_id, users.name AS users_name FROM users, " "(SELECT users.id AS id, users.name AS name FROM users) AS anon_1", ) self.assert_compile(sess.query(users, exists([1], from_obj=addresses) ).with_labels().statement, "SELECT users.id AS users_id, users.name AS users_name, EXISTS " "(SELECT 1 FROM addresses) AS anon_1 FROM users", ) # a little tedious here, adding labels to work around Query's # auto-labelling. s = sess.query(addresses.c.id.label('id'), addresses.c.email_address.label('email')).\ filter(addresses.c.user_id == users.c.id).correlate(users).\ statement.alias() self.assert_compile(sess.query(users, s.c.email).select_from( users.join(s, s.c.id == users.c.id) ).with_labels().statement, "SELECT users.id AS users_id, users.name AS users_name, " "anon_1.email AS anon_1_email " "FROM users JOIN (SELECT addresses.id AS id, " "addresses.email_address AS email FROM addresses, users " "WHERE addresses.user_id = users.id) AS anon_1 " "ON anon_1.id = users.id", ) x = func.lala(users.c.id).label('foo') self.assert_compile(sess.query(x).filter(x == 5).statement, "SELECT lala(users.id) AS foo FROM users WHERE " "lala(users.id) = :param_1") self.assert_compile(sess.query(func.sum(x).label('bar')).statement, "SELECT sum(lala(users.id)) AS bar FROM users") class FromSelfTest(QueryTest, AssertsCompiledSQL): __dialect__ = 'default' def test_filter(self): User = self.classes.User eq_( [User(id=8), User(id=9)], create_session(). query(User). filter(User.id.in_([8,9])). from_self().all() ) eq_( [User(id=8), User(id=9)], create_session().query(User). order_by(User.id).slice(1,3). from_self().all() ) eq_( [User(id=8)], list( create_session(). query(User). filter(User.id.in_([8,9])). from_self().order_by(User.id)[0:1] ) ) def test_join(self): User, Address = self.classes.User, self.classes.Address eq_( [ (User(id=8), Address(id=2)), (User(id=8), Address(id=3)), (User(id=8), Address(id=4)), (User(id=9), Address(id=5)) ], create_session(). query(User). filter(User.id.in_([8,9])). from_self(). join('addresses'). add_entity(Address). order_by(User.id, Address.id).all() ) def test_group_by(self): Address = self.classes.Address eq_( create_session().query(Address.user_id, func.count(Address.id).label('count')).\ group_by(Address.user_id). order_by(Address.user_id).all(), [(7, 1), (8, 3), (9, 1)] ) eq_( create_session().query(Address.user_id, Address.id).\ from_self(Address.user_id, func.count(Address.id)).\ group_by(Address.user_id). order_by(Address.user_id).all(), [(7, 1), (8, 3), (9, 1)] ) def test_having(self): User = self.classes.User s = create_session() self.assert_compile( s.query(User.id).group_by(User.id).having(User.id>5). from_self(), "SELECT anon_1.users_id AS anon_1_users_id FROM " "(SELECT users.id AS users_id FROM users GROUP " "BY users.id HAVING users.id > :id_1) AS anon_1" ) def test_no_joinedload(self): """test that joinedloads are pushed outwards and not rendered in subqueries.""" User = self.classes.User s = create_session() self.assert_compile( s.query(User).options(joinedload(User.addresses)). from_self().statement, "SELECT anon_1.users_id, anon_1.users_name, addresses_1.id, " "addresses_1.user_id, addresses_1.email_address FROM " "(SELECT users.id AS users_id, users.name AS " "users_name FROM users) AS anon_1 LEFT OUTER JOIN " "addresses AS addresses_1 ON anon_1.users_id = " "addresses_1.user_id ORDER BY addresses_1.id" ) def test_aliases(self): """test that aliased objects are accessible externally to a from_self() call.""" User, Address = self.classes.User, self.classes.Address s = create_session() ualias = aliased(User) eq_( s.query(User, ualias).filter(User.id > ualias.id). from_self(User.name, ualias.name). order_by(User.name, ualias.name).all(), [ (u'chuck', u'ed'), (u'chuck', u'fred'), (u'chuck', u'jack'), (u'ed', u'jack'), (u'fred', u'ed'), (u'fred', u'jack') ] ) eq_( s.query(User, ualias). filter(User.id > ualias.id). from_self(User.name, ualias.name). filter(ualias.name=='ed')\ .order_by(User.name, ualias.name).all(), [(u'chuck', u'ed'), (u'fred', u'ed')] ) eq_( s.query(User, ualias). filter(User.id > ualias.id). from_self(ualias.name, Address.email_address). join(ualias.addresses). order_by(ualias.name, Address.email_address).all(), [ (u'ed', u'fred@fred.com'), (u'jack', u'ed@bettyboop.com'), (u'jack', u'ed@lala.com'), (u'jack', u'ed@wood.com'), (u'jack', u'fred@fred.com')] ) def test_multiple_entities(self): User, Address = self.classes.User, self.classes.Address sess = create_session() eq_( sess.query(User, Address).\ filter(User.id==Address.user_id).\ filter(Address.id.in_([2, 5])).from_self().all(), [ (User(id=8), Address(id=2)), (User(id=9), Address(id=5)) ] ) eq_( sess.query(User, Address).\ filter(User.id==Address.user_id).\ filter(Address.id.in_([2, 5])).\ from_self().\ options(joinedload('addresses')).first(), (User(id=8, addresses=[Address(), Address(), Address()]), Address(id=2)), ) def test_multiple_with_column_entities(self): User = self.classes.User sess = create_session() eq_( sess.query(User.id).from_self().\ add_column(func.count().label('foo')).\ group_by(User.id).\ order_by(User.id).\ from_self().all(), [ (7,1), (8, 1), (9, 1), (10, 1) ] ) class ColumnAccessTest(QueryTest, AssertsCompiledSQL): """test access of columns after _from_selectable has been applied""" __dialect__ = 'default' def test_from_self(self): User = self.classes.User sess = create_session() q = sess.query(User).from_self() self.assert_compile( q.filter(User.name=='ed'), "SELECT anon_1.users_id AS anon_1_users_id, anon_1.users_name AS " "anon_1_users_name FROM (SELECT users.id AS users_id, users.name " "AS users_name FROM users) AS anon_1 WHERE anon_1.users_name = " ":name_1" ) def test_from_self_twice(self): User = self.classes.User sess = create_session() q = sess.query(User).from_self(User.id, User.name).from_self() self.assert_compile( q.filter(User.name=='ed'), "SELECT anon_1.anon_2_users_id AS anon_1_anon_2_users_id, " "anon_1.anon_2_users_name AS anon_1_anon_2_users_name FROM " "(SELECT anon_2.users_id AS anon_2_users_id, anon_2.users_name " "AS anon_2_users_name FROM (SELECT users.id AS users_id, " "users.name AS users_name FROM users) AS anon_2) AS anon_1 " "WHERE anon_1.anon_2_users_name = :name_1" ) def test_select_entity_from(self): User = self.classes.User sess = create_session() q = sess.query(User) q = sess.query(User).select_entity_from(q.statement) self.assert_compile( q.filter(User.name=='ed'), "SELECT anon_1.id AS anon_1_id, anon_1.name AS anon_1_name " "FROM (SELECT users.id AS id, users.name AS name FROM " "users) AS anon_1 WHERE anon_1.name = :name_1" ) def test_select_entity_from_no_entities(self): User = self.classes.User sess = create_session() q = sess.query(User) assert_raises_message( sa.exc.ArgumentError, r"A selectable \(FromClause\) instance is " "expected when the base alias is being set.", sess.query(User).select_entity_from, User ) def test_select_from(self): User = self.classes.User sess = create_session() q = sess.query(User) q = sess.query(User).select_from(q.statement) self.assert_compile( q.filter(User.name=='ed'), "SELECT anon_1.id AS anon_1_id, anon_1.name AS anon_1_name " "FROM (SELECT users.id AS id, users.name AS name FROM " "users) AS anon_1 WHERE anon_1.name = :name_1" ) def test_anonymous_expression(self): from sqlalchemy.sql import column sess = create_session() c1, c2 = column('c1'), column('c2') q1 = sess.query(c1, c2).filter(c1 == 'dog') q2 = sess.query(c1, c2).filter(c1 == 'cat') q3 = q1.union(q2) self.assert_compile( q3.order_by(c1), "SELECT anon_1.c1 AS anon_1_c1, anon_1.c2 " "AS anon_1_c2 FROM (SELECT c1 AS c1, c2 AS c2 WHERE " "c1 = :c1_1 UNION SELECT c1 AS c1, c2 AS c2 " "WHERE c1 = :c1_2) AS anon_1 ORDER BY anon_1.c1" ) def test_anonymous_expression_from_self_twice(self): from sqlalchemy.sql import column sess = create_session() c1, c2 = column('c1'), column('c2') q1 = sess.query(c1, c2).filter(c1 == 'dog') q1 = q1.from_self().from_self() self.assert_compile( q1.order_by(c1), "SELECT anon_1.anon_2_c1 AS anon_1_anon_2_c1, anon_1.anon_2_c2 AS " "anon_1_anon_2_c2 FROM (SELECT anon_2.c1 AS anon_2_c1, anon_2.c2 " "AS anon_2_c2 FROM (SELECT c1 AS c1, c2 AS c2 WHERE c1 = :c1_1) AS " "anon_2) AS anon_1 ORDER BY anon_1.anon_2_c1" ) def test_anonymous_expression_union(self): from sqlalchemy.sql import column sess = create_session() c1, c2 = column('c1'), column('c2') q1 = sess.query(c1, c2).filter(c1 == 'dog') q2 = sess.query(c1, c2).filter(c1 == 'cat') q3 = q1.union(q2) self.assert_compile( q3.order_by(c1), "SELECT anon_1.c1 AS anon_1_c1, anon_1.c2 " "AS anon_1_c2 FROM (SELECT c1 AS c1, c2 AS c2 WHERE " "c1 = :c1_1 UNION SELECT c1 AS c1, c2 AS c2 " "WHERE c1 = :c1_2) AS anon_1 ORDER BY anon_1.c1" ) def test_table_anonymous_expression_from_self_twice(self): from sqlalchemy.sql import column, table sess = create_session() t1 = table('t1', column('c1'), column('c2')) q1 = sess.query(t1.c.c1, t1.c.c2).filter(t1.c.c1 == 'dog') q1 = q1.from_self().from_self() self.assert_compile( q1.order_by(t1.c.c1), "SELECT anon_1.anon_2_t1_c1 AS anon_1_anon_2_t1_c1, anon_1.anon_2_t1_c2 " "AS anon_1_anon_2_t1_c2 FROM (SELECT anon_2.t1_c1 AS anon_2_t1_c1, " "anon_2.t1_c2 AS anon_2_t1_c2 FROM (SELECT t1.c1 AS t1_c1, t1.c2 " "AS t1_c2 FROM t1 WHERE t1.c1 = :c1_1) AS anon_2) AS anon_1 ORDER BY " "anon_1.anon_2_t1_c1" ) def test_anonymous_labeled_expression(self): from sqlalchemy.sql import column sess = create_session() c1, c2 = column('c1'), column('c2') q1 = sess.query(c1.label('foo'), c2.label('bar')).filter(c1 == 'dog') q2 = sess.query(c1.label('foo'), c2.label('bar')).filter(c1 == 'cat') q3 = q1.union(q2) self.assert_compile( q3.order_by(c1), "SELECT anon_1.foo AS anon_1_foo, anon_1.bar AS anon_1_bar FROM " "(SELECT c1 AS foo, c2 AS bar WHERE c1 = :c1_1 UNION SELECT " "c1 AS foo, c2 AS bar WHERE c1 = :c1_2) AS anon_1 ORDER BY anon_1.foo" ) def test_anonymous_expression_plus_aliased_join(self): """test that the 'dont alias non-ORM' rule remains for other kinds of aliasing when _from_selectable() is used.""" User = self.classes.User Address = self.classes.Address addresses = self.tables.addresses sess = create_session() q1 = sess.query(User.id).filter(User.id > 5) q1 = q1.from_self() q1 = q1.join(User.addresses, aliased=True).\ order_by(User.id, Address.id, addresses.c.id) self.assert_compile( q1, "SELECT anon_1.users_id AS anon_1_users_id " "FROM (SELECT users.id AS users_id FROM users " "WHERE users.id > :id_1) AS anon_1 JOIN addresses AS addresses_1 " "ON anon_1.users_id = addresses_1.user_id " "ORDER BY anon_1.users_id, addresses_1.id, addresses.id" ) class AddEntityEquivalenceTest(fixtures.MappedTest, AssertsCompiledSQL): run_setup_mappers = 'once' @classmethod def define_tables(cls, metadata): Table('a', metadata, Column('id', Integer, primary_key=True, test_needs_autoincrement=True), Column('name', String(50)), Column('type', String(20)), Column('bid', Integer, ForeignKey('b.id')) ) Table('b', metadata, Column('id', Integer, primary_key=True, test_needs_autoincrement=True), Column('name', String(50)), Column('type', String(20)) ) Table('c', metadata, Column('id', Integer, ForeignKey('b.id'), primary_key=True), Column('age', Integer) ) Table('d', metadata, Column('id', Integer, ForeignKey('a.id'), primary_key=True), Column('dede', Integer) ) @classmethod def setup_classes(cls): a, c, b, d = (cls.tables.a, cls.tables.c, cls.tables.b, cls.tables.d) class A(cls.Comparable): pass class B(cls.Comparable): pass class C(B): pass class D(A): pass mapper(A, a, polymorphic_identity='a', polymorphic_on=a.c.type, with_polymorphic= ('*', None), properties={ 'link':relation( B, uselist=False, backref='back') }) mapper(B, b, polymorphic_identity='b', polymorphic_on=b.c.type, with_polymorphic= ('*', None) ) mapper(C, c, inherits=B, polymorphic_identity='c') mapper(D, d, inherits=A, polymorphic_identity='d') @classmethod def insert_data(cls): A, C, B = (cls.classes.A, cls.classes.C, cls.classes.B) sess = create_session() sess.add_all([ B(name='b1'), A(name='a1', link= C(name='c1',age=3)), C(name='c2',age=6), A(name='a2') ]) sess.flush() def test_add_entity_equivalence(self): A, C, B = (self.classes.A, self.classes.C, self.classes.B) sess = create_session() for q in [ sess.query( A,B).join( A.link), sess.query( A).join( A.link).add_entity(B), ]: eq_( q.all(), [( A(bid=2, id=1, name=u'a1', type=u'a'), C(age=3, id=2, name=u'c1', type=u'c') )] ) for q in [ sess.query( B,A).join( B.back), sess.query( B).join( B.back).add_entity(A), sess.query( B).add_entity(A).join( B.back) ]: eq_( q.all(), [( C(age=3, id=2, name=u'c1', type=u'c'), A(bid=2, id=1, name=u'a1', type=u'a') )] ) class InstancesTest(QueryTest, AssertsCompiledSQL): def test_from_alias(self): User, addresses, users = (self.classes.User, self.tables.addresses, self.tables.users) query = users.select(users.c.id==7).\ union(users.select(users.c.id>7)).\ alias('ulist').\ outerjoin(addresses).\ select(use_labels=True, order_by=['ulist.id', addresses.c.id]) sess =create_session() q = sess.query(User) def go(): l = list(q.options(contains_alias('ulist'), contains_eager('addresses')).\ instances(query.execute())) assert self.static.user_address_result == l self.assert_sql_count(testing.db, go, 1) sess.expunge_all() def go(): l = q.options(contains_alias('ulist'), contains_eager('addresses')).\ from_statement(query).all() assert self.static.user_address_result == l self.assert_sql_count(testing.db, go, 1) # better way. use select_from() def go(): l = sess.query(User).select_from(query).\ options(contains_eager('addresses')).all() assert self.static.user_address_result == l self.assert_sql_count(testing.db, go, 1) # same thing, but alias addresses, so that the adapter # generated by select_from() is wrapped within # the adapter created by contains_eager() adalias = addresses.alias() query = users.select(users.c.id==7).\ union(users.select(users.c.id>7)).\ alias('ulist').\ outerjoin(adalias).\ select(use_labels=True, order_by=['ulist.id', adalias.c.id]) def go(): l = sess.query(User).select_from(query).\ options(contains_eager('addresses', alias=adalias)).all() assert self.static.user_address_result == l self.assert_sql_count(testing.db, go, 1) def test_contains_eager(self): users, addresses, User = (self.tables.users, self.tables.addresses, self.classes.User) sess = create_session() # test that contains_eager suppresses the normal outer join rendering q = sess.query(User).outerjoin(User.addresses).\ options(contains_eager(User.addresses)).\ order_by(User.id, addresses.c.id) self.assert_compile(q.with_labels().statement, 'SELECT addresses.id AS addresses_id, ' 'addresses.user_id AS addresses_user_id, ' 'addresses.email_address AS ' 'addresses_email_address, users.id AS ' 'users_id, users.name AS users_name FROM ' 'users LEFT OUTER JOIN addresses ON ' 'users.id = addresses.user_id ORDER BY ' 'users.id, addresses.id', dialect=default.DefaultDialect()) def go(): assert self.static.user_address_result == q.all() self.assert_sql_count(testing.db, go, 1) sess.expunge_all() adalias = addresses.alias() q = sess.query(User).\ select_from(users.outerjoin(adalias)).\ options(contains_eager(User.addresses, alias=adalias)).\ order_by(User.id, adalias.c.id) def go(): eq_(self.static.user_address_result, q.order_by(User.id).all()) self.assert_sql_count(testing.db, go, 1) sess.expunge_all() selectquery = users.\ outerjoin(addresses).\ select(users.c.id<10, use_labels=True, order_by=[users.c.id, addresses.c.id]) q = sess.query(User) def go(): l = list(q.options( contains_eager('addresses') ).instances(selectquery.execute())) assert self.static.user_address_result[0:3] == l self.assert_sql_count(testing.db, go, 1) sess.expunge_all() def go(): l = list(q.options( contains_eager(User.addresses) ).instances(selectquery.execute())) assert self.static.user_address_result[0:3] == l self.assert_sql_count(testing.db, go, 1) sess.expunge_all() def go(): l = q.options( contains_eager('addresses') ).from_statement(selectquery).all() assert self.static.user_address_result[0:3] == l self.assert_sql_count(testing.db, go, 1) def test_contains_eager_string_alias(self): addresses, users, User = (self.tables.addresses, self.tables.users, self.classes.User) sess = create_session() q = sess.query(User) adalias = addresses.alias('adalias') selectquery = users.outerjoin(adalias).\ select(use_labels=True, order_by=[users.c.id, adalias.c.id]) # string alias name def go(): l = list(q.options( contains_eager('addresses', alias="adalias") ).instances(selectquery.execute())) assert self.static.user_address_result == l self.assert_sql_count(testing.db, go, 1) def test_contains_eager_aliased_instances(self): addresses, users, User = (self.tables.addresses, self.tables.users, self.classes.User) sess = create_session() q = sess.query(User) adalias = addresses.alias('adalias') selectquery = users.outerjoin(adalias).\ select(use_labels=True, order_by=[users.c.id, adalias.c.id]) # expression.Alias object def go(): l = list(q.options( contains_eager('addresses', alias=adalias) ).instances(selectquery.execute())) assert self.static.user_address_result == l self.assert_sql_count(testing.db, go, 1) def test_contains_eager_aliased(self): User, Address = self.classes.User, self.classes.Address sess = create_session() q = sess.query(User) # Aliased object adalias = aliased(Address) def go(): l = q.options( contains_eager('addresses', alias=adalias) ).\ outerjoin(adalias, User.addresses).\ order_by(User.id, adalias.id) assert self.static.user_address_result == l.all() self.assert_sql_count(testing.db, go, 1) def test_contains_eager_multi_string_alias(self): orders, items, users, order_items, User = (self.tables.orders, self.tables.items, self.tables.users, self.tables.order_items, self.classes.User) sess = create_session() q = sess.query(User) oalias = orders.alias('o1') ialias = items.alias('i1') query = users.outerjoin(oalias).\ outerjoin(order_items).\ outerjoin(ialias).\ select(use_labels=True).\ order_by(users.c.id, oalias.c.id, ialias.c.id) # test using string alias with more than one level deep def go(): l = list(q.options( contains_eager('orders', alias='o1'), contains_eager('orders.items', alias='i1') ).instances(query.execute())) assert self.static.user_order_result == l self.assert_sql_count(testing.db, go, 1) def test_contains_eager_multi_alias(self): orders, items, users, order_items, User = (self.tables.orders, self.tables.items, self.tables.users, self.tables.order_items, self.classes.User) sess = create_session() q = sess.query(User) oalias = orders.alias('o1') ialias = items.alias('i1') query = users.outerjoin(oalias).\ outerjoin(order_items).\ outerjoin(ialias).\ select(use_labels=True).\ order_by(users.c.id, oalias.c.id, ialias.c.id) # test using Alias with more than one level deep def go(): l = list(q.options( contains_eager('orders', alias=oalias), contains_eager('orders.items', alias=ialias) ).instances(query.execute())) assert self.static.user_order_result == l self.assert_sql_count(testing.db, go, 1) def test_contains_eager_multi_aliased(self): Item, User, Order = (self.classes.Item, self.classes.User, self.classes.Order) sess = create_session() q = sess.query(User) # test using Aliased with more than one level deep oalias = aliased(Order) ialias = aliased(Item) def go(): l = q.options( contains_eager(User.orders, alias=oalias), contains_eager(User.orders, Order.items, alias=ialias) ).\ outerjoin(oalias, User.orders).\ outerjoin(ialias, oalias.items).\ order_by(User.id, oalias.id, ialias.id) assert self.static.user_order_result == l.all() self.assert_sql_count(testing.db, go, 1) def test_contains_eager_chaining(self): """test that contains_eager() 'chains' by default.""" Dingaling, User, Address = (self.classes.Dingaling, self.classes.User, self.classes.Address) sess = create_session() q = sess.query(User).\ join(User.addresses).\ join(Address.dingaling).\ options( contains_eager(User.addresses, Address.dingaling), ) def go(): eq_( q.all(), # note we only load the Address records that # have a Dingaling here due to using the inner # join for the eager load [ User(name=u'ed', addresses=[ Address(email_address=u'ed@wood.com', dingaling=Dingaling(data='ding 1/2')), ]), User(name=u'fred', addresses=[ Address(email_address=u'fred@fred.com', dingaling=Dingaling(data='ding 2/5')) ]) ] ) self.assert_sql_count(testing.db, go, 1) def test_contains_eager_chaining_aliased_endpoint(self): """test that contains_eager() 'chains' by default and supports an alias at the end.""" Dingaling, User, Address = (self.classes.Dingaling, self.classes.User, self.classes.Address) sess = create_session() da = aliased(Dingaling, name="foob") q = sess.query(User).\ join(User.addresses).\ join(da, Address.dingaling).\ options( contains_eager(User.addresses, Address.dingaling, alias=da), ) def go(): eq_( q.all(), # note we only load the Address records that # have a Dingaling here due to using the inner # join for the eager load [ User(name=u'ed', addresses=[ Address(email_address=u'ed@wood.com', dingaling=Dingaling(data='ding 1/2')), ]), User(name=u'fred', addresses=[ Address(email_address=u'fred@fred.com', dingaling=Dingaling(data='ding 2/5')) ]) ] ) self.assert_sql_count(testing.db, go, 1) def test_mixed_eager_contains_with_limit(self): Order, User, Address = (self.classes.Order, self.classes.User, self.classes.Address) sess = create_session() q = sess.query(User) def go(): # outerjoin to User.orders, offset 1/limit 2 so we get user # 7 + second two orders. then joinedload the addresses. # User + Order columns go into the subquery, address left # outer joins to the subquery, joinedloader for User.orders # applies context.adapter to result rows. This was # [ticket:1180]. l = \ q.outerjoin(User.orders).options(joinedload(User.addresses), contains_eager(User.orders)).order_by(User.id, Order.id).offset(1).limit(2).all() eq_(l, [User(id=7, addresses=[Address(email_address=u'jack@bean.com', user_id=7, id=1)], name=u'jack', orders=[Order(address_id=1, user_id=7, description=u'order 3', isopen=1, id=3), Order(address_id=None, user_id=7, description=u'order 5' , isopen=0, id=5)])]) self.assert_sql_count(testing.db, go, 1) sess.expunge_all() def go(): # same as above, except Order is aliased, so two adapters # are applied by the eager loader oalias = aliased(Order) l = q.outerjoin(oalias, User.orders).\ options(joinedload(User.addresses), contains_eager(User.orders, alias=oalias)).\ order_by(User.id, oalias.id).\ offset(1).limit(2).all() eq_(l, [User(id=7, addresses=[Address(email_address=u'jack@bean.com', user_id=7, id=1)], name=u'jack', orders=[Order(address_id=1, user_id=7, description=u'order 3', isopen=1, id=3), Order(address_id=None, user_id=7, description=u'order 5' , isopen=0, id=5)])]) self.assert_sql_count(testing.db, go, 1) class MixedEntitiesTest(QueryTest, AssertsCompiledSQL): __dialect__ = 'default' def test_values(self): Address, users, User = (self.classes.Address, self.tables.users, self.classes.User) sess = create_session() assert list(sess.query(User).values()) == list() sel = users.select(User.id.in_([7, 8])).alias() q = sess.query(User) q2 = q.select_from(sel).values(User.name) eq_(list(q2), [(u'jack',), (u'ed',)]) q = sess.query(User) q2 = q.order_by(User.id).\ values(User.name, User.name + " " + cast(User.id, String(50))) eq_( list(q2), [(u'jack', u'jack 7'), (u'ed', u'ed 8'), (u'fred', u'fred 9'), (u'chuck', u'chuck 10')] ) q2 = q.join('addresses').\ filter(User.name.like('%e%')).\ order_by(User.id, Address.id).\ values(User.name, Address.email_address) eq_(list(q2), [(u'ed', u'ed@wood.com'), (u'ed', u'ed@bettyboop.com'), (u'ed', u'ed@lala.com'), (u'fred', u'fred@fred.com')]) q2 = q.join('addresses').\ filter(User.name.like('%e%')).\ order_by(desc(Address.email_address)).\ slice(1, 3).values(User.name, Address.email_address) eq_(list(q2), [(u'ed', u'ed@wood.com'), (u'ed', u'ed@lala.com')]) adalias = aliased(Address) q2 = q.join(adalias, 'addresses').\ filter(User.name.like('%e%')).order_by(adalias.email_address).\ values(User.name, adalias.email_address) eq_(list(q2), [(u'ed', u'ed@bettyboop.com'), (u'ed', u'ed@lala.com'), (u'ed', u'ed@wood.com'), (u'fred', u'fred@fred.com')]) q2 = q.values(func.count(User.name)) assert q2.next() == (4,) q2 = q.select_from(sel).filter(User.id==8).values(User.name, sel.c.name, User.name) eq_(list(q2), [(u'ed', u'ed', u'ed')]) # using User.xxx is alised against "sel", so this query returns nothing q2 = q.select_from(sel).\ filter(User.id==8).\ filter(User.id>sel.c.id).values(User.name, sel.c.name, User.name) eq_(list(q2), []) # whereas this uses users.c.xxx, is not aliased and creates a new join q2 = q.select_from(sel).\ filter(users.c.id==8).\ filter(users.c.id>sel.c.id).values(users.c.name, sel.c.name, User.name) eq_(list(q2), [(u'ed', u'jack', u'jack')]) def test_alias_naming(self): User = self.classes.User sess = create_session() ua = aliased(User, name="foobar") q= sess.query(ua) self.assert_compile( q, "SELECT foobar.id AS foobar_id, " "foobar.name AS foobar_name FROM users AS foobar" ) @testing.fails_on('mssql', 'FIXME: unknown') def test_values_specific_order_by(self): users, User = self.tables.users, self.classes.User sess = create_session() assert list(sess.query(User).values()) == list() sel = users.select(User.id.in_([7, 8])).alias() q = sess.query(User) u2 = aliased(User) q2 = q.select_from(sel).\ filter(u2.id>1).\ order_by(User.id, sel.c.id, u2.id).\ values(User.name, sel.c.name, u2.name) eq_(list(q2), [(u'jack', u'jack', u'jack'), (u'jack', u'jack', u'ed'), (u'jack', u'jack', u'fred'), (u'jack', u'jack', u'chuck'), (u'ed', u'ed', u'jack'), (u'ed', u'ed', u'ed'), (u'ed', u'ed', u'fred'), (u'ed', u'ed', u'chuck')]) @testing.fails_on('mssql', 'FIXME: unknown') @testing.fails_on('oracle', "Oracle doesn't support boolean expressions as " "columns") @testing.fails_on('postgresql+pg8000', "pg8000 parses the SQL itself before passing on " "to PG, doesn't parse this") @testing.fails_on('postgresql+zxjdbc', "zxjdbc parses the SQL itself before passing on " "to PG, doesn't parse this") @testing.fails_on("firebird", "unknown") def test_values_with_boolean_selects(self): """Tests a values clause that works with select boolean evaluations""" User = self.classes.User sess = create_session() q = sess.query(User) q2 = q.group_by(User.name.like('%j%')).\ order_by(desc(User.name.like('%j%'))).\ values(User.name.like('%j%'), func.count(User.name.like('%j%'))) eq_(list(q2), [(True, 1), (False, 3)]) q2 = q.order_by(desc(User.name.like('%j%'))).values(User.name.like('%j%')) eq_(list(q2), [(True,), (False,), (False,), (False,)]) def test_correlated_subquery(self): """test that a subquery constructed from ORM attributes doesn't leak out those entities to the outermost query. """ Address, users, User = (self.classes.Address, self.tables.users, self.classes.User) sess = create_session() subq = select([func.count()]).\ where(User.id==Address.user_id).\ correlate(users).\ label('count') # we don't want Address to be outside of the subquery here eq_( list(sess.query(User, subq)[0:3]), [(User(id=7,name=u'jack'), 1), (User(id=8,name=u'ed'), 3), (User(id=9,name=u'fred'), 1)] ) # same thing without the correlate, as it should # not be needed subq = select([func.count()]).\ where(User.id==Address.user_id).\ label('count') # we don't want Address to be outside of the subquery here eq_( list(sess.query(User, subq)[0:3]), [(User(id=7,name=u'jack'), 1), (User(id=8,name=u'ed'), 3), (User(id=9,name=u'fred'), 1)] ) def test_column_queries(self): Address, users, User = (self.classes.Address, self.tables.users, self.classes.User) sess = create_session() eq_(sess.query(User.name).all(), [(u'jack',), (u'ed',), (u'fred',), (u'chuck',)]) sel = users.select(User.id.in_([7, 8])).alias() q = sess.query(User.name) q2 = q.select_from(sel).all() eq_(list(q2), [(u'jack',), (u'ed',)]) eq_(sess.query(User.name, Address.email_address).filter(User.id==Address.user_id).all(), [ (u'jack', u'jack@bean.com'), (u'ed', u'ed@wood.com'), (u'ed', u'ed@bettyboop.com'), (u'ed', u'ed@lala.com'), (u'fred', u'fred@fred.com') ]) eq_(sess.query(User.name, func.count(Address.email_address)).\ outerjoin(User.addresses).group_by(User.id, User.name).\ order_by(User.id).all(), [(u'jack', 1), (u'ed', 3), (u'fred', 1), (u'chuck', 0)] ) eq_(sess.query(User, func.count(Address.email_address)).\ outerjoin(User.addresses).group_by(User).\ order_by(User.id).all(), [(User(name='jack',id=7), 1), (User(name='ed',id=8), 3), (User(name='fred',id=9), 1), (User(name='chuck',id=10), 0)] ) eq_(sess.query(func.count(Address.email_address), User).\ outerjoin(User.addresses).group_by(User).\ order_by(User.id).all(), [(1, User(name='jack',id=7)), (3, User(name='ed',id=8)), (1, User(name='fred',id=9)), (0, User(name='chuck',id=10))] ) adalias = aliased(Address) eq_(sess.query(User, func.count(adalias.email_address)).\ outerjoin(adalias, 'addresses').group_by(User).\ order_by(User.id).all(), [(User(name='jack',id=7), 1), (User(name='ed',id=8), 3), (User(name='fred',id=9), 1), (User(name='chuck',id=10), 0)] ) eq_(sess.query(func.count(adalias.email_address), User).\ outerjoin(adalias, User.addresses).group_by(User).\ order_by(User.id).all(), [(1, User(name=u'jack',id=7)), (3, User(name=u'ed',id=8)), (1, User(name=u'fred',id=9)), (0, User(name=u'chuck',id=10))] ) # select from aliasing + explicit aliasing eq_( sess.query(User, adalias.email_address, adalias.id).\ outerjoin(adalias, User.addresses).\ from_self(User, adalias.email_address).\ order_by(User.id, adalias.id).all(), [ (User(name=u'jack',id=7), u'jack@bean.com'), (User(name=u'ed',id=8), u'ed@wood.com'), (User(name=u'ed',id=8), u'ed@bettyboop.com'), (User(name=u'ed',id=8), u'ed@lala.com'), (User(name=u'fred',id=9), u'fred@fred.com'), (User(name=u'chuck',id=10), None) ] ) # anon + select from aliasing eq_( sess.query(User).join(User.addresses, aliased=True).\ filter(Address.email_address.like('%ed%')).\ from_self().all(), [ User(name=u'ed',id=8), User(name=u'fred',id=9), ] ) # test eager aliasing, with/without select_from aliasing for q in [ sess.query(User, adalias.email_address).\ outerjoin(adalias, User.addresses).\ options(joinedload(User.addresses)).\ order_by(User.id, adalias.id).limit(10), sess.query(User, adalias.email_address, adalias.id).\ outerjoin(adalias, User.addresses).\ from_self(User, adalias.email_address).\ options(joinedload(User.addresses)).\ order_by(User.id, adalias.id).limit(10), ]: eq_( q.all(), [(User(addresses=[ Address(user_id=7,email_address=u'jack@bean.com',id=1)], name=u'jack',id=7), u'jack@bean.com'), (User(addresses=[ Address(user_id=8,email_address=u'ed@wood.com',id=2), Address(user_id=8,email_address=u'ed@bettyboop.com',id=3), Address(user_id=8,email_address=u'ed@lala.com',id=4)], name=u'ed',id=8), u'ed@wood.com'), (User(addresses=[ Address(user_id=8,email_address=u'ed@wood.com',id=2), Address(user_id=8,email_address=u'ed@bettyboop.com',id=3), Address(user_id=8,email_address=u'ed@lala.com',id=4)],name=u'ed',id=8), u'ed@bettyboop.com'), (User(addresses=[ Address(user_id=8,email_address=u'ed@wood.com',id=2), Address(user_id=8,email_address=u'ed@bettyboop.com',id=3), Address(user_id=8,email_address=u'ed@lala.com',id=4)],name=u'ed',id=8), u'ed@lala.com'), (User(addresses=[Address(user_id=9,email_address=u'fred@fred.com',id=5)],name=u'fred',id=9), u'fred@fred.com'), (User(addresses=[],name=u'chuck',id=10), None)] ) def test_column_from_limited_joinedload(self): User = self.classes.User sess = create_session() def go(): results = sess.query(User).limit(1).\ options(joinedload('addresses')).\ add_column(User.name).all() eq_(results, [(User(name='jack'), 'jack')]) self.assert_sql_count(testing.db, go, 1) @testing.fails_on("firebird", "unknown") @testing.fails_on('postgresql+pg8000', "'type oid 705 not mapped to py type' (due to literal)") def test_self_referential(self): Order = self.classes.Order sess = create_session() oalias = aliased(Order) for q in [ sess.query(Order, oalias).\ filter(Order.user_id==oalias.user_id).filter(Order.user_id==7).\ filter(Order.id>oalias.id).order_by(Order.id, oalias.id), sess.query(Order, oalias).from_self().filter(Order.user_id==oalias.user_id).\ filter(Order.user_id==7).filter(Order.id>oalias.id).\ order_by(Order.id, oalias.id), # same thing, but reversed. sess.query(oalias, Order).from_self().filter(oalias.user_id==Order.user_id).\ filter(oalias.user_id==7).filter(Order.idoalias.id).\ from_self().order_by(Order.id, oalias.id).\ limit(10).options(joinedload(Order.items)), # gratuitous four layers sess.query(Order, oalias).filter(Order.user_id==oalias.user_id).\ filter(Order.user_id==7).filter(Order.id>oalias.id).from_self().\ from_self().from_self().order_by(Order.id, oalias.id).\ limit(10).options(joinedload(Order.items)), ]: eq_( q.all(), [ (Order(address_id=1,description=u'order 3',isopen=1,user_id=7,id=3), Order(address_id=1,description=u'order 1',isopen=0,user_id=7,id=1)), (Order(address_id=None,description=u'order 5',isopen=0,user_id=7,id=5), Order(address_id=1,description=u'order 1',isopen=0,user_id=7,id=1)), (Order(address_id=None,description=u'order 5',isopen=0,user_id=7,id=5), Order(address_id=1,description=u'order 3',isopen=1,user_id=7,id=3)) ] ) # ensure column expressions are taken from inside the subquery, not restated at the top q = sess.query(Order.id, Order.description, literal_column("'q'").label('foo')).\ filter(Order.description == u'order 3').from_self() self.assert_compile(q, "SELECT anon_1.orders_id AS " "anon_1_orders_id, anon_1.orders_descriptio" "n AS anon_1_orders_description, " "anon_1.foo AS anon_1_foo FROM (SELECT " "orders.id AS orders_id, " "orders.description AS orders_description, " "'q' AS foo FROM orders WHERE " "orders.description = :description_1) AS " "anon_1") eq_( q.all(), [(3, u'order 3', 'q')] ) def test_multi_mappers(self): Address, addresses, users, User = (self.classes.Address, self.tables.addresses, self.tables.users, self.classes.User) test_session = create_session() (user7, user8, user9, user10) = test_session.query(User).all() (address1, address2, address3, address4, address5) = \ test_session.query(Address).all() expected = [(user7, address1), (user8, address2), (user8, address3), (user8, address4), (user9, address5), (user10, None)] sess = create_session() selectquery = users.outerjoin(addresses).select(use_labels=True, order_by=[users.c.id, addresses.c.id]) eq_(list(sess.query(User, Address).instances(selectquery.execute())), expected) sess.expunge_all() for address_entity in (Address, aliased(Address)): q = sess.query(User).add_entity(address_entity).\ outerjoin(address_entity, 'addresses').\ order_by(User.id, address_entity.id) eq_(q.all(), expected) sess.expunge_all() q = sess.query(User).add_entity(address_entity) q = q.join(address_entity, 'addresses') q = q.filter_by(email_address='ed@bettyboop.com') eq_(q.all(), [(user8, address3)]) sess.expunge_all() q = sess.query(User, address_entity).join(address_entity, 'addresses').\ filter_by(email_address='ed@bettyboop.com') eq_(q.all(), [(user8, address3)]) sess.expunge_all() q = sess.query(User, address_entity).join(address_entity, 'addresses').\ options(joinedload('addresses')).\ filter_by(email_address='ed@bettyboop.com') eq_(list(util.OrderedSet(q.all())), [(user8, address3)]) sess.expunge_all() def test_aliased_multi_mappers(self): User, addresses, users, Address = (self.classes.User, self.tables.addresses, self.tables.users, self.classes.Address) sess = create_session() (user7, user8, user9, user10) = sess.query(User).all() (address1, address2, address3, address4, address5) = sess.query(Address).all() expected = [(user7, address1), (user8, address2), (user8, address3), (user8, address4), (user9, address5), (user10, None)] q = sess.query(User) adalias = addresses.alias('adalias') q = q.add_entity(Address, alias=adalias).select_from(users.outerjoin(adalias)) l = q.order_by(User.id, adalias.c.id).all() assert l == expected sess.expunge_all() q = sess.query(User).add_entity(Address, alias=adalias) l = q.select_from(users.outerjoin(adalias)).filter(adalias.c.email_address=='ed@bettyboop.com').all() assert l == [(user8, address3)] def test_with_entities(self): User, Address = self.classes.User, self.classes.Address sess = create_session() q = sess.query(User).filter(User.id==7).order_by(User.name) self.assert_compile( q.with_entities(User.id,Address).\ filter(Address.user_id == User.id), 'SELECT users.id AS users_id, addresses.id ' 'AS addresses_id, addresses.user_id AS ' 'addresses_user_id, addresses.email_address' ' AS addresses_email_address FROM users, ' 'addresses WHERE users.id = :id_1 AND ' 'addresses.user_id = users.id ORDER BY ' 'users.name') def test_multi_columns(self): users, User = self.tables.users, self.classes.User sess = create_session() expected = [(u, u.name) for u in sess.query(User).all()] for add_col in (User.name, users.c.name): assert sess.query(User).add_column(add_col).all() == expected sess.expunge_all() assert_raises(sa_exc.InvalidRequestError, sess.query(User).add_column, object()) def test_add_multi_columns(self): """test that add_column accepts a FROM clause.""" users, User = self.tables.users, self.classes.User sess = create_session() eq_( sess.query(User.id).add_column(users).all(), [(7, 7, u'jack'), (8, 8, u'ed'), (9, 9, u'fred'), (10, 10, u'chuck')] ) def test_multi_columns_2(self): """test aliased/nonalised joins with the usage of add_column()""" User, Address, addresses, users = (self.classes.User, self.classes.Address, self.tables.addresses, self.tables.users) sess = create_session() (user7, user8, user9, user10) = sess.query(User).all() expected = [(user7, 1), (user8, 3), (user9, 1), (user10, 0) ] q = sess.query(User) q = q.group_by(users).order_by(User.id).outerjoin('addresses').\ add_column(func.count(Address.id).label('count')) eq_(q.all(), expected) sess.expunge_all() adalias = aliased(Address) q = sess.query(User) q = q.group_by(users).order_by(User.id).outerjoin(adalias, 'addresses').\ add_column(func.count(adalias.id).label('count')) eq_(q.all(), expected) sess.expunge_all() # TODO: figure out why group_by(users) doesn't work here s = select([users, func.count(addresses.c.id).label('count')]).\ select_from(users.outerjoin(addresses)).\ group_by(*[c for c in users.c]).order_by(User.id) q = sess.query(User) l = q.add_column("count").from_statement(s).all() assert l == expected def test_raw_columns(self): addresses, users, User = (self.tables.addresses, self.tables.users, self.classes.User) sess = create_session() (user7, user8, user9, user10) = sess.query(User).all() expected = [ (user7, 1, "Name:jack"), (user8, 3, "Name:ed"), (user9, 1, "Name:fred"), (user10, 0, "Name:chuck")] adalias = addresses.alias() q = create_session().query(User).add_column(func.count(adalias.c.id))\ .add_column(("Name:" + users.c.name)).outerjoin(adalias, 'addresses')\ .group_by(users).order_by(users.c.id) assert q.all() == expected # test with a straight statement s = select([users, func.count(addresses.c.id).label('count'), ("Name:" + users.c.name).label('concat')], from_obj=[users.outerjoin(addresses)], group_by=[c for c in users.c], order_by=[users.c.id]) q = create_session().query(User) l = q.add_column("count").add_column("concat").from_statement(s).all() assert l == expected sess.expunge_all() # test with select_from() q = create_session().query(User).add_column(func.count(addresses.c.id))\ .add_column(("Name:" + users.c.name)).select_from(users.outerjoin(addresses))\ .group_by(users).order_by(users.c.id) assert q.all() == expected sess.expunge_all() q = create_session().query(User).add_column(func.count(addresses.c.id))\ .add_column(("Name:" + users.c.name)).outerjoin('addresses')\ .group_by(users).order_by(users.c.id) assert q.all() == expected sess.expunge_all() q = create_session().query(User).add_column(func.count(adalias.c.id))\ .add_column(("Name:" + users.c.name)).outerjoin(adalias, 'addresses')\ .group_by(users).order_by(users.c.id) assert q.all() == expected sess.expunge_all() def test_expression_selectable_matches_mzero(self): User, Address = self.classes.User, self.classes.Address ua = aliased(User) aa = aliased(Address) s = create_session() for crit, j, exp in [ (User.id + Address.id, User.addresses, "SELECT users.id + addresses.id AS anon_1 " "FROM users JOIN addresses ON users.id = " "addresses.user_id" ), (User.id + Address.id, Address.user, "SELECT users.id + addresses.id AS anon_1 " "FROM addresses JOIN users ON users.id = " "addresses.user_id" ), (Address.id + User.id, User.addresses, "SELECT addresses.id + users.id AS anon_1 " "FROM users JOIN addresses ON users.id = " "addresses.user_id" ), (User.id + aa.id, (aa, User.addresses), "SELECT users.id + addresses_1.id AS anon_1 " "FROM users JOIN addresses AS addresses_1 " "ON users.id = addresses_1.user_id" ), ]: q = s.query(crit) mzero = q._mapper_zero() assert mzero.mapped_table is q._entity_zero().selectable q = q.join(j) self.assert_compile(q, exp) for crit, j, exp in [ (ua.id + Address.id, ua.addresses, "SELECT users_1.id + addresses.id AS anon_1 " "FROM users AS users_1 JOIN addresses " "ON users_1.id = addresses.user_id"), (ua.id + aa.id, (aa, ua.addresses), "SELECT users_1.id + addresses_1.id AS anon_1 " "FROM users AS users_1 JOIN addresses AS " "addresses_1 ON users_1.id = addresses_1.user_id"), (ua.id + aa.id, (ua, aa.user), "SELECT users_1.id + addresses_1.id AS anon_1 " "FROM addresses AS addresses_1 JOIN " "users AS users_1 " "ON users_1.id = addresses_1.user_id") ]: q = s.query(crit) mzero = q._mapper_zero() assert inspect(mzero).selectable is q._entity_zero().selectable q = q.join(j) self.assert_compile(q, exp) def test_aliased_adapt_on_names(self): User, Address = self.classes.User, self.classes.Address sess = Session() agg_address = sess.query(Address.id, func.sum(func.length(Address.email_address)).label('email_address') ).group_by(Address.user_id) ag1 = aliased(Address, agg_address.subquery()) ag2 = aliased(Address, agg_address.subquery(), adapt_on_names=True) # first, without adapt on names, 'email_address' isn't matched up - we get the raw "address" # element in the SELECT self.assert_compile( sess.query(User, ag1.email_address).join(ag1, User.addresses).filter(ag1.email_address > 5), "SELECT users.id AS users_id, users.name AS users_name, addresses.email_address " "AS addresses_email_address FROM addresses, users JOIN " "(SELECT addresses.id AS id, sum(length(addresses.email_address)) " "AS email_address FROM addresses GROUP BY addresses.user_id) AS " "anon_1 ON users.id = addresses.user_id WHERE addresses.email_address > :email_address_1" ) # second, 'email_address' matches up to the aggreagte, and we get a smooth JOIN # from users->subquery and that's it self.assert_compile( sess.query(User, ag2.email_address).join(ag2, User.addresses).filter(ag2.email_address > 5), "SELECT users.id AS users_id, users.name AS users_name, " "anon_1.email_address AS anon_1_email_address FROM users " "JOIN (SELECT addresses.id AS id, sum(length(addresses.email_address)) " "AS email_address FROM addresses GROUP BY addresses.user_id) AS " "anon_1 ON users.id = addresses.user_id WHERE anon_1.email_address > :email_address_1", ) class SelectFromTest(QueryTest, AssertsCompiledSQL): run_setup_mappers = None __dialect__ = 'default' def test_replace_with_select(self): users, Address, addresses, User = (self.tables.users, self.classes.Address, self.tables.addresses, self.classes.User) mapper(User, users, properties = { 'addresses':relationship(Address) }) mapper(Address, addresses) sel = users.select(users.c.id.in_([7, 8])).alias() sess = create_session() eq_(sess.query(User).select_from(sel).all(), [User(id=7), User(id=8)]) eq_(sess.query(User).select_from(sel).filter(User.id==8).all(), [User(id=8)]) eq_(sess.query(User).select_from(sel).order_by(desc(User.name)).all(), [ User(name='jack',id=7), User(name='ed',id=8) ]) eq_(sess.query(User).select_from(sel).order_by(asc(User.name)).all(), [ User(name='ed',id=8), User(name='jack',id=7) ]) eq_(sess.query(User).select_from(sel).options(joinedload('addresses')).first(), User(name='jack', addresses=[Address(id=1)]) ) def test_join_mapper_order_by(self): """test that mapper-level order_by is adapted to a selectable.""" User, users = self.classes.User, self.tables.users mapper(User, users, order_by=users.c.id) sel = users.select(users.c.id.in_([7, 8])) sess = create_session() eq_(sess.query(User).select_from(sel).all(), [ User(name='jack',id=7), User(name='ed',id=8) ] ) def test_differentiate_self_external(self): """test some different combinations of joining a table to a subquery of itself.""" users, User = self.tables.users, self.classes.User mapper(User, users) sess = create_session() sel = sess.query(User).filter(User.id.in_([7, 8])).subquery() ualias = aliased(User) self.assert_compile( sess.query(User).join(sel, User.id>sel.c.id), "SELECT users.id AS users_id, users.name AS users_name FROM " "users JOIN (SELECT users.id AS id, users.name AS name FROM " "users WHERE users.id IN (:id_1, :id_2)) AS anon_1 ON users.id > anon_1.id", ) self.assert_compile( sess.query(ualias).select_from(sel).filter(ualias.id>sel.c.id), "SELECT users_1.id AS users_1_id, users_1.name AS users_1_name FROM " "users AS users_1, (SELECT users.id AS id, users.name AS name FROM " "users WHERE users.id IN (:id_1, :id_2)) AS anon_1 WHERE users_1.id > anon_1.id", ) self.assert_compile( sess.query(ualias).select_from(sel).join(ualias, ualias.id>sel.c.id), "SELECT users_1.id AS users_1_id, users_1.name AS users_1_name " "FROM (SELECT users.id AS id, users.name AS name " "FROM users WHERE users.id IN (:id_1, :id_2)) AS anon_1 " "JOIN users AS users_1 ON users_1.id > anon_1.id" ) self.assert_compile( sess.query(ualias).select_from(sel).join(ualias, ualias.id>User.id), "SELECT users_1.id AS users_1_id, users_1.name AS users_1_name " "FROM (SELECT users.id AS id, users.name AS name FROM " "users WHERE users.id IN (:id_1, :id_2)) AS anon_1 " "JOIN users AS users_1 ON anon_1.id < users_1.id" ) salias = aliased(User, sel) self.assert_compile( sess.query(salias).join(ualias, ualias.id>salias.id), "SELECT anon_1.id AS anon_1_id, anon_1.name AS anon_1_name FROM " "(SELECT users.id AS id, users.name AS name FROM users WHERE users.id " "IN (:id_1, :id_2)) AS anon_1 JOIN users AS users_1 ON users_1.id > anon_1.id", ) # this one uses an explicit join(left, right, onclause) so works self.assert_compile( sess.query(ualias).select_from(join(sel, ualias, ualias.id>sel.c.id)), "SELECT users_1.id AS users_1_id, users_1.name AS users_1_name FROM " "(SELECT users.id AS id, users.name AS name FROM users WHERE users.id " "IN (:id_1, :id_2)) AS anon_1 JOIN users AS users_1 ON users_1.id > anon_1.id", use_default_dialect=True ) def test_aliased_class_vs_nonaliased(self): User, users = self.classes.User, self.tables.users mapper(User, users) ua = aliased(User) sess = create_session() self.assert_compile( sess.query(User).select_from(ua).join(User, ua.name > User.name), "SELECT users.id AS users_id, users.name AS users_name " "FROM users AS users_1 JOIN users ON users.name < users_1.name" ) self.assert_compile( sess.query(User.name).select_from(ua).join(User, ua.name > User.name), "SELECT users.name AS users_name FROM users AS users_1 " "JOIN users ON users.name < users_1.name" ) self.assert_compile( sess.query(ua.name).select_from(ua).join(User, ua.name > User.name), "SELECT users_1.name AS users_1_name FROM users AS users_1 " "JOIN users ON users.name < users_1.name" ) self.assert_compile( sess.query(ua).select_from(User).join(ua, ua.name > User.name), "SELECT users_1.id AS users_1_id, users_1.name AS users_1_name " "FROM users JOIN users AS users_1 ON users.name < users_1.name" ) # this is tested in many other places here, just adding it # here for comparison self.assert_compile( sess.query(User.name).\ select_from(users.select().where(users.c.id > 5)), "SELECT anon_1.name AS anon_1_name FROM (SELECT users.id AS id, " "users.name AS name FROM users WHERE users.id > :id_1) AS anon_1" ) def test_join_no_order_by(self): User, users = self.classes.User, self.tables.users mapper(User, users) sel = users.select(users.c.id.in_([7, 8])) sess = create_session() eq_(sess.query(User).select_from(sel).all(), [ User(name='jack',id=7), User(name='ed',id=8) ] ) def test_join(self): users, Address, addresses, User = (self.tables.users, self.classes.Address, self.tables.addresses, self.classes.User) mapper(User, users, properties = { 'addresses':relationship(Address) }) mapper(Address, addresses) sel = users.select(users.c.id.in_([7, 8])) sess = create_session() eq_(sess.query(User).select_from(sel).join('addresses'). add_entity(Address).order_by(User.id).order_by(Address.id).all(), [ (User(name='jack',id=7), Address(user_id=7,email_address='jack@bean.com',id=1)), (User(name='ed',id=8), Address(user_id=8,email_address='ed@wood.com',id=2)), (User(name='ed',id=8), Address(user_id=8,email_address='ed@bettyboop.com',id=3)), (User(name='ed',id=8), Address(user_id=8,email_address='ed@lala.com',id=4)) ] ) adalias = aliased(Address) eq_(sess.query(User).select_from(sel).join(adalias, 'addresses'). add_entity(adalias).order_by(User.id).order_by(adalias.id).all(), [ (User(name='jack',id=7), Address(user_id=7,email_address='jack@bean.com',id=1)), (User(name='ed',id=8), Address(user_id=8,email_address='ed@wood.com',id=2)), (User(name='ed',id=8), Address(user_id=8,email_address='ed@bettyboop.com',id=3)), (User(name='ed',id=8), Address(user_id=8,email_address='ed@lala.com',id=4)) ] ) def test_more_joins(self): users, Keyword, orders, items, order_items, Order, Item, User, keywords, item_keywords = (self.tables.users, self.classes.Keyword, self.tables.orders, self.tables.items, self.tables.order_items, self.classes.Order, self.classes.Item, self.classes.User, self.tables.keywords, self.tables.item_keywords) mapper(User, users, properties={ 'orders':relationship(Order, backref='user'), # o2m, m2o }) mapper(Order, orders, properties={ 'items':relationship(Item, secondary=order_items, order_by=items.c.id), #m2m }) mapper(Item, items, properties={ 'keywords':relationship(Keyword, secondary=item_keywords, order_by=keywords.c.id) #m2m }) mapper(Keyword, keywords) sess = create_session() sel = users.select(users.c.id.in_([7, 8])) eq_(sess.query(User).select_from(sel).\ join('orders', 'items', 'keywords').\ filter(Keyword.name.in_(['red', 'big', 'round'])).\ all(), [ User(name=u'jack',id=7) ]) eq_(sess.query(User).select_from(sel).\ join('orders', 'items', 'keywords', aliased=True).\ filter(Keyword.name.in_(['red', 'big', 'round'])).\ all(), [ User(name=u'jack',id=7) ]) def go(): eq_( sess.query(User).select_from(sel). options(joinedload_all('orders.items.keywords')). join('orders', 'items', 'keywords', aliased=True). filter(Keyword.name.in_(['red', 'big', 'round'])).\ all(), [ User(name=u'jack',orders=[ Order(description=u'order 1',items=[ Item(description=u'item 1', keywords=[ Keyword(name=u'red'), Keyword(name=u'big'), Keyword(name=u'round') ]), Item(description=u'item 2', keywords=[ Keyword(name=u'red',id=2), Keyword(name=u'small',id=5), Keyword(name=u'square') ]), Item(description=u'item 3', keywords=[ Keyword(name=u'green',id=3), Keyword(name=u'big',id=4), Keyword(name=u'round',id=6)]) ]), Order(description=u'order 3',items=[ Item(description=u'item 3', keywords=[ Keyword(name=u'green',id=3), Keyword(name=u'big',id=4), Keyword(name=u'round',id=6) ]), Item(description=u'item 4',keywords=[],id=4), Item(description=u'item 5',keywords=[],id=5) ]), Order(description=u'order 5', items=[ Item(description=u'item 5',keywords=[])]) ]) ]) self.assert_sql_count(testing.db, go, 1) sess.expunge_all() sel2 = orders.select(orders.c.id.in_([1,2,3])) eq_(sess.query(Order).select_from(sel2).\ join('items', 'keywords').\ filter(Keyword.name == 'red').\ order_by(Order.id).all(), [ Order(description=u'order 1',id=1), Order(description=u'order 2',id=2), ]) eq_(sess.query(Order).select_from(sel2).\ join('items', 'keywords', aliased=True).\ filter(Keyword.name == 'red').\ order_by(Order.id).all(), [ Order(description=u'order 1',id=1), Order(description=u'order 2',id=2), ]) def test_replace_with_eager(self): users, Address, addresses, User = (self.tables.users, self.classes.Address, self.tables.addresses, self.classes.User) mapper(User, users, properties = { 'addresses':relationship(Address, order_by=addresses.c.id) }) mapper(Address, addresses) sel = users.select(users.c.id.in_([7, 8])) sess = create_session() def go(): eq_(sess.query(User).options(joinedload('addresses')).select_from(sel).order_by(User.id).all(), [ User(id=7, addresses=[Address(id=1)]), User(id=8, addresses=[Address(id=2), Address(id=3), Address(id=4)]) ] ) self.assert_sql_count(testing.db, go, 1) sess.expunge_all() def go(): eq_(sess.query(User).options(joinedload('addresses')).select_from(sel).filter(User.id==8).order_by(User.id).all(), [User(id=8, addresses=[Address(id=2), Address(id=3), Address(id=4)])] ) self.assert_sql_count(testing.db, go, 1) sess.expunge_all() def go(): eq_(sess.query(User).options(joinedload('addresses')).select_from(sel).order_by(User.id)[1], User(id=8, addresses=[Address(id=2), Address(id=3), Address(id=4)])) self.assert_sql_count(testing.db, go, 1) class CustomJoinTest(QueryTest): run_setup_mappers = None def test_double_same_mappers(self): """test aliasing of joins with a custom join condition""" addresses, items, order_items, orders, Item, User, Address, Order, users = (self.tables.addresses, self.tables.items, self.tables.order_items, self.tables.orders, self.classes.Item, self.classes.User, self.classes.Address, self.classes.Order, self.tables.users) mapper(Address, addresses) mapper(Order, orders, properties={ 'items':relationship(Item, secondary=order_items, lazy='select', order_by=items.c.id), }) mapper(Item, items) mapper(User, users, properties = dict( addresses = relationship(Address, lazy='select'), open_orders = relationship(Order, primaryjoin = and_(orders.c.isopen == 1, users.c.id==orders.c.user_id), lazy='select'), closed_orders = relationship(Order, primaryjoin = and_(orders.c.isopen == 0, users.c.id==orders.c.user_id), lazy='select') )) q = create_session().query(User) eq_( q.join('open_orders', 'items', aliased=True).filter(Item.id==4).\ join('closed_orders', 'items', aliased=True).filter(Item.id==3).all(), [User(id=7)] ) class ExternalColumnsTest(QueryTest): """test mappers with SQL-expressions added as column properties.""" run_setup_mappers = None def test_external_columns_bad(self): users, User = self.tables.users, self.classes.User assert_raises_message(sa_exc.ArgumentError, "not represented in the mapper's table", mapper, User, users, properties={ 'concat': (users.c.id * 2), }) clear_mappers() def test_external_columns(self): """test querying mappings that reference external columns or selectables.""" users, Address, addresses, User = (self.tables.users, self.classes.Address, self.tables.addresses, self.classes.User) mapper(User, users, properties={ 'concat': column_property((users.c.id * 2)), 'count': column_property( select([func.count(addresses.c.id)], users.c.id==addresses.c.user_id).\ correlate(users).\ as_scalar()) }) mapper(Address, addresses, properties={ 'user':relationship(User) }) sess = create_session() sess.query(Address).options(joinedload('user')).all() eq_(sess.query(User).all(), [ User(id=7, concat=14, count=1), User(id=8, concat=16, count=3), User(id=9, concat=18, count=1), User(id=10, concat=20, count=0), ] ) address_result = [ Address(id=1, user=User(id=7, concat=14, count=1)), Address(id=2, user=User(id=8, concat=16, count=3)), Address(id=3, user=User(id=8, concat=16, count=3)), Address(id=4, user=User(id=8, concat=16, count=3)), Address(id=5, user=User(id=9, concat=18, count=1)) ] eq_(sess.query(Address).all(), address_result) # run the eager version twice to test caching of aliased clauses for x in range(2): sess.expunge_all() def go(): eq_(sess.query(Address).\ options(joinedload('user')).\ order_by(Address.id).all(), address_result) self.assert_sql_count(testing.db, go, 1) ualias = aliased(User) eq_( sess.query(Address, ualias).join(ualias, 'user').all(), [(address, address.user) for address in address_result] ) eq_( sess.query(Address, ualias.count).\ join(ualias, 'user').\ join('user', aliased=True).\ order_by(Address.id).all(), [ (Address(id=1), 1), (Address(id=2), 3), (Address(id=3), 3), (Address(id=4), 3), (Address(id=5), 1) ] ) eq_(sess.query(Address, ualias.concat, ualias.count). join(ualias, 'user'). join('user', aliased=True).order_by(Address.id).all(), [ (Address(id=1), 14, 1), (Address(id=2), 16, 3), (Address(id=3), 16, 3), (Address(id=4), 16, 3), (Address(id=5), 18, 1) ] ) ua = aliased(User) eq_(sess.query(Address, ua.concat, ua.count). select_from(join(Address, ua, 'user')). options(joinedload(Address.user)).order_by(Address.id).all(), [ (Address(id=1, user=User(id=7, concat=14, count=1)), 14, 1), (Address(id=2, user=User(id=8, concat=16, count=3)), 16, 3), (Address(id=3, user=User(id=8, concat=16, count=3)), 16, 3), (Address(id=4, user=User(id=8, concat=16, count=3)), 16, 3), (Address(id=5, user=User(id=9, concat=18, count=1)), 18, 1) ] ) eq_(list(sess.query(Address).join('user').values(Address.id, User.id, User.concat, User.count)), [(1, 7, 14, 1), (2, 8, 16, 3), (3, 8, 16, 3), (4, 8, 16, 3), (5, 9, 18, 1)] ) eq_(list(sess.query(Address, ua).select_from(join(Address,ua, 'user')).values(Address.id, ua.id, ua.concat, ua.count)), [(1, 7, 14, 1), (2, 8, 16, 3), (3, 8, 16, 3), (4, 8, 16, 3), (5, 9, 18, 1)] ) def test_external_columns_joinedload(self): users, orders, User, Address, Order, addresses = (self.tables.users, self.tables.orders, self.classes.User, self.classes.Address, self.classes.Order, self.tables.addresses) # in this test, we have a subquery on User that accesses "addresses", underneath # an joinedload for "addresses". So the "addresses" alias adapter needs to *not* hit # the "addresses" table within the "user" subquery, but "user" still needs to be adapted. # therefore the long standing practice of eager adapters being "chained" has been removed # since its unnecessary and breaks this exact condition. mapper(User, users, properties={ 'addresses':relationship(Address, backref='user', order_by=addresses.c.id), 'concat': column_property((users.c.id * 2)), 'count': column_property(select([func.count(addresses.c.id)], users.c.id==addresses.c.user_id).correlate(users)) }) mapper(Address, addresses) mapper(Order, orders, properties={ 'address':relationship(Address), # m2o }) sess = create_session() def go(): o1 = sess.query(Order).options(joinedload_all('address.user')).get(1) eq_(o1.address.user.count, 1) self.assert_sql_count(testing.db, go, 1) sess = create_session() def go(): o1 = sess.query(Order).options(joinedload_all('address.user')).first() eq_(o1.address.user.count, 1) self.assert_sql_count(testing.db, go, 1) def test_external_columns_compound(self): # see [ticket:2167] for background users, Address, addresses, User = (self.tables.users, self.classes.Address, self.tables.addresses, self.classes.User) mapper(User, users, properties={ 'fullname':column_property(users.c.name.label('x')) }) mapper(Address, addresses, properties={ 'username':column_property( select([User.fullname]).\ where(User.id==addresses.c.user_id).label('y')) }) sess = create_session() a1 = sess.query(Address).first() eq_(a1.username, "jack") sess = create_session() a1 = sess.query(Address).from_self().first() eq_(a1.username, "jack") class TestOverlyEagerEquivalentCols(fixtures.MappedTest): @classmethod def define_tables(cls, metadata): base = Table('base', metadata, Column('id', Integer, primary_key=True, test_needs_autoincrement=True), Column('data', String(50)) ) sub1 = Table('sub1', metadata, Column('id', Integer, ForeignKey('base.id'), primary_key=True), Column('data', String(50)) ) sub2 = Table('sub2', metadata, Column('id', Integer, ForeignKey('base.id'), ForeignKey('sub1.id'), primary_key=True), Column('data', String(50)) ) def test_equivs(self): base, sub2, sub1 = (self.tables.base, self.tables.sub2, self.tables.sub1) class Base(fixtures.ComparableEntity): pass class Sub1(fixtures.ComparableEntity): pass class Sub2(fixtures.ComparableEntity): pass mapper(Base, base, properties={ 'sub1':relationship(Sub1), 'sub2':relationship(Sub2) }) mapper(Sub1, sub1) mapper(Sub2, sub2) sess = create_session() s11 = Sub1(data='s11') s12 = Sub1(data='s12') s2 = Sub2(data='s2') b1 = Base(data='b1', sub1=[s11], sub2=[]) b2 = Base(data='b1', sub1=[s12], sub2=[]) sess.add(b1) sess.add(b2) sess.flush() # theres an overlapping ForeignKey here, so not much option except # to artifically control the flush order b2.sub2 = [s2] sess.flush() q = sess.query(Base).outerjoin('sub2', aliased=True) assert sub1.c.id not in q._filter_aliases.equivalents eq_( sess.query(Base).join('sub1').outerjoin('sub2', aliased=True).\ filter(Sub1.id==1).one(), b1 ) class LabelCollideTest(fixtures.MappedTest): """Test handling for a label collision. This collision is handled by core, see ticket:2702 as well as test/sql/test_selectable->WithLabelsTest. here we want to make sure the end result is as we expect. """ @classmethod def define_tables(cls, metadata): Table('foo', metadata, Column('id', Integer, primary_key=True), Column('bar_id', Integer) ) Table('foo_bar', metadata, Column('id', Integer, primary_key=True), ) @classmethod def setup_classes(cls): class Foo(cls.Basic): pass class Bar(cls.Basic): pass @classmethod def setup_mappers(cls): mapper(cls.classes.Foo, cls.tables.foo) mapper(cls.classes.Bar, cls.tables.foo_bar) @classmethod def insert_data(cls): s = Session() s.add_all([ cls.classes.Foo(id=1, bar_id=2), cls.classes.Bar(id=3) ]) s.commit() def test_overlap_plain(self): s = Session() row = s.query(self.classes.Foo, self.classes.Bar).all()[0] def go(): eq_(row.Foo.id, 1) eq_(row.Foo.bar_id, 2) eq_(row.Bar.id, 3) # all three columns are loaded independently without # overlap, no additional SQL to load all attributes self.assert_sql_count(testing.db, go, 0) def test_overlap_subquery(self): s = Session() row = s.query(self.classes.Foo, self.classes.Bar).from_self().all()[0] def go(): eq_(row.Foo.id, 1) eq_(row.Foo.bar_id, 2) eq_(row.Bar.id, 3) # all three columns are loaded independently without # overlap, no additional SQL to load all attributes self.assert_sql_count(testing.db, go, 0)SQLAlchemy-0.8.4/test/orm/test_generative.py0000644000076500000240000002557312251150016021554 0ustar classicstaff00000000000000from sqlalchemy.testing import eq_ import sqlalchemy as sa from sqlalchemy import testing from sqlalchemy import Integer, String, ForeignKey, MetaData, func from sqlalchemy.testing.schema import Table from sqlalchemy.testing.schema import Column from sqlalchemy.orm import mapper, relationship, create_session from sqlalchemy.testing import eq_ from sqlalchemy.testing import fixtures from test.orm import _fixtures class GenerativeQueryTest(fixtures.MappedTest): run_inserts = 'once' run_deletes = None @classmethod def define_tables(cls, metadata): Table('foo', metadata, Column('id', Integer, sa.Sequence('foo_id_seq'), primary_key=True), Column('bar', Integer), Column('range', Integer)) @classmethod def fixtures(cls): rows = tuple([(i, i % 10) for i in range(100)]) foo_data = (('bar', 'range'),) + rows return dict(foo=foo_data) @classmethod def setup_mappers(cls): foo = cls.tables.foo class Foo(cls.Basic): pass mapper(Foo, foo) def test_selectby(self): Foo = self.classes.Foo res = create_session().query(Foo).filter_by(range=5) assert res.order_by(Foo.bar)[0].bar == 5 assert res.order_by(sa.desc(Foo.bar))[0].bar == 95 @testing.fails_on('maxdb', 'FIXME: unknown') def test_slice(self): Foo = self.classes.Foo sess = create_session() query = sess.query(Foo).order_by(Foo.id) orig = query.all() assert query[1] == orig[1] assert query[-4] == orig[-4] assert query[-1] == orig[-1] assert list(query[10:20]) == orig[10:20] assert list(query[10:]) == orig[10:] assert list(query[:10]) == orig[:10] assert list(query[:10]) == orig[:10] assert list(query[5:5]) == orig[5:5] assert list(query[10:40:3]) == orig[10:40:3] assert list(query[-5:]) == orig[-5:] assert list(query[-2:-5]) == orig[-2:-5] assert list(query[-5:-2]) == orig[-5:-2] assert list(query[:-2]) == orig[:-2] assert query[10:20][5] == orig[10:20][5] @testing.uses_deprecated('Call to deprecated function apply_max') def test_aggregate(self): foo, Foo = self.tables.foo, self.classes.Foo sess = create_session() query = sess.query(Foo) assert query.count() == 100 assert sess.query(func.min(foo.c.bar)).filter(foo.c.bar<30).one() == (0,) assert sess.query(func.max(foo.c.bar)).filter(foo.c.bar<30).one() == (29,) # Py3K #assert query.filter(foo.c.bar<30).values(sa.func.max(foo.c.bar)).__next__()[0] == 29 #assert query.filter(foo.c.bar<30).values(sa.func.max(foo.c.bar)).__next__()[0] == 29 # Py2K assert query.filter(foo.c.bar<30).values(sa.func.max(foo.c.bar)).next()[0] == 29 assert query.filter(foo.c.bar<30).values(sa.func.max(foo.c.bar)).next()[0] == 29 # end Py2K @testing.fails_if(lambda:testing.against('mysql+mysqldb') and testing.db.dialect.dbapi.version_info[:4] == (1, 2, 1, 'gamma'), "unknown incompatibility") def test_aggregate_1(self): foo = self.tables.foo query = create_session().query(func.sum(foo.c.bar)) assert query.filter(foo.c.bar<30).one() == (435,) @testing.fails_on('firebird', 'FIXME: unknown') @testing.fails_on('mssql', 'AVG produces an average as the original column type on mssql.') def test_aggregate_2(self): foo = self.tables.foo query = create_session().query(func.avg(foo.c.bar)) avg = query.filter(foo.c.bar < 30).one()[0] eq_(float(round(avg, 1)), 14.5) @testing.fails_on('mssql', 'AVG produces an average as the original column type on mssql.') def test_aggregate_3(self): foo, Foo = self.tables.foo, self.classes.Foo query = create_session().query(Foo) # Py3K #avg_f = query.filter(foo.c.bar<30).values(sa.func.avg(foo.c.bar)).__next__()[0] # Py2K avg_f = query.filter(foo.c.bar<30).values(sa.func.avg(foo.c.bar)).next()[0] # end Py2K assert float(round(avg_f, 1)) == 14.5 # Py3K #avg_o = query.filter(foo.c.bar<30).values(sa.func.avg(foo.c.bar)).__next__()[0] # Py2K avg_o = query.filter(foo.c.bar<30).values(sa.func.avg(foo.c.bar)).next()[0] # end Py2K assert float(round(avg_o, 1)) == 14.5 def test_filter(self): Foo = self.classes.Foo query = create_session().query(Foo) assert query.count() == 100 assert query.filter(Foo.bar < 30).count() == 30 res2 = query.filter(Foo.bar < 30).filter(Foo.bar > 10) assert res2.count() == 19 def test_order_by(self): Foo = self.classes.Foo query = create_session().query(Foo) assert query.order_by(Foo.bar)[0].bar == 0 assert query.order_by(sa.desc(Foo.bar))[0].bar == 99 def test_offset(self): Foo = self.classes.Foo query = create_session().query(Foo) assert list(query.order_by(Foo.bar).offset(10))[0].bar == 10 def test_offset(self): Foo = self.classes.Foo query = create_session().query(Foo) assert len(list(query.limit(10))) == 10 class GenerativeTest2(fixtures.MappedTest): @classmethod def define_tables(cls, metadata): Table('table1', metadata, Column('id', Integer, primary_key=True)) Table('table2', metadata, Column('t1id', Integer, ForeignKey("table1.id"), primary_key=True), Column('num', Integer, primary_key=True)) @classmethod def setup_mappers(cls): table2, table1 = cls.tables.table2, cls.tables.table1 class Obj1(cls.Basic): pass class Obj2(cls.Basic): pass mapper(Obj1, table1) mapper(Obj2, table2) @classmethod def fixtures(cls): return dict( table1=(('id',), (1,), (2,), (3,), (4,)), table2=(('num', 't1id'), (1, 1), (2, 1), (3, 1), (4, 2), (5, 2), (6, 3))) def test_distinct_count(self): table2, Obj1, table1 = (self.tables.table2, self.classes.Obj1, self.tables.table1) query = create_session().query(Obj1) eq_(query.count(), 4) res = query.filter(sa.and_(table1.c.id == table2.c.t1id, table2.c.t1id == 1)) eq_(res.count(), 3) res = query.filter(sa.and_(table1.c.id == table2.c.t1id, table2.c.t1id == 1)).distinct() eq_(res.count(), 1) class RelationshipsTest(_fixtures.FixtureTest): run_setup_mappers = 'once' run_inserts = 'once' run_deletes = None @classmethod def setup_mappers(cls): addresses, Order, User, Address, orders, users = (cls.tables.addresses, cls.classes.Order, cls.classes.User, cls.classes.Address, cls.tables.orders, cls.tables.users) mapper(User, users, properties={ 'orders':relationship(mapper(Order, orders, properties={ 'addresses':relationship(mapper(Address, addresses))}))}) def test_join(self): """Query.join""" User, Address = self.classes.User, self.classes.Address session = create_session() q = (session.query(User).join('orders', 'addresses'). filter(Address.id == 1)) eq_([User(id=7)], q.all()) def test_outer_join(self): """Query.outerjoin""" Order, User, Address = (self.classes.Order, self.classes.User, self.classes.Address) session = create_session() q = (session.query(User).outerjoin('orders', 'addresses'). filter(sa.or_(Order.id == None, Address.id == 1))) eq_(set([User(id=7), User(id=8), User(id=10)]), set(q.all())) def test_outer_join_count(self): """test the join and outerjoin functions on Query""" Order, User, Address = (self.classes.Order, self.classes.User, self.classes.Address) session = create_session() q = (session.query(User).outerjoin('orders', 'addresses'). filter(sa.or_(Order.id == None, Address.id == 1))) eq_(q.count(), 4) def test_from(self): users, Order, User, Address, orders, addresses = (self.tables.users, self.classes.Order, self.classes.User, self.classes.Address, self.tables.orders, self.tables.addresses) session = create_session() sel = users.outerjoin(orders).outerjoin( addresses, orders.c.address_id == addresses.c.id) q = (session.query(User).select_from(sel). filter(sa.or_(Order.id == None, Address.id == 1))) eq_(set([User(id=7), User(id=8), User(id=10)]), set(q.all())) class CaseSensitiveTest(fixtures.MappedTest): @classmethod def define_tables(cls, metadata): Table('Table1', metadata, Column('ID', Integer, primary_key=True)) Table('Table2', metadata, Column('T1ID', Integer, ForeignKey("Table1.ID"), primary_key=True), Column('NUM', Integer, primary_key=True)) @classmethod def setup_mappers(cls): Table2, Table1 = cls.tables.Table2, cls.tables.Table1 class Obj1(cls.Basic): pass class Obj2(cls.Basic): pass mapper(Obj1, Table1) mapper(Obj2, Table2) @classmethod def fixtures(cls): return dict( Table1=(('ID',), (1,), (2,), (3,), (4,)), Table2=(('NUM', 'T1ID'), (1, 1), (2, 1), (3, 1), (4, 2), (5, 2), (6, 3))) def test_distinct_count(self): Table2, Obj1, Table1 = (self.tables.Table2, self.classes.Obj1, self.tables.Table1) q = create_session(bind=testing.db).query(Obj1) assert q.count() == 4 res = q.filter(sa.and_(Table1.c.ID==Table2.c.T1ID,Table2.c.T1ID==1)) assert res.count() == 3 res = q.filter(sa.and_(Table1.c.ID==Table2.c.T1ID,Table2.c.T1ID==1)).distinct() eq_(res.count(), 1) SQLAlchemy-0.8.4/test/orm/test_hasparent.py0000644000076500000240000001333512251147172021412 0ustar classicstaff00000000000000"""test the current state of the hasparent() flag.""" from sqlalchemy.testing import assert_raises, assert_raises_message from sqlalchemy import Integer, String, ForeignKey, Sequence, \ exc as sa_exc from sqlalchemy.testing.schema import Table, Column from sqlalchemy.orm import mapper, relationship, create_session, \ sessionmaker, class_mapper, backref, Session from sqlalchemy.orm import attributes, exc as orm_exc from sqlalchemy import testing from sqlalchemy.testing import eq_ from sqlalchemy.testing import fixtures from test.orm import _fixtures from sqlalchemy.testing.util import gc_collect class ParentRemovalTest(fixtures.MappedTest): """Test that the 'hasparent' flag gets flipped to False only if we're sure this object is the real parent. In ambiguous cases a stale data exception is raised. """ run_inserts = None @classmethod def define_tables(cls, metadata): if testing.against('oracle'): fk_args = dict(deferrable=True, initially='deferred') elif testing.against('mysql'): fk_args = {} else: fk_args = dict(onupdate='cascade') Table('users', metadata, Column('id', Integer, primary_key=True, test_needs_autoincrement=True), ) Table('addresses', metadata, Column('id', Integer, primary_key=True, test_needs_autoincrement=True), Column('user_id', Integer, ForeignKey('users.id', **fk_args)), ) @classmethod def setup_classes(cls): class User(cls.Comparable): pass class Address(cls.Comparable): pass @classmethod def setup_mappers(cls): mapper(cls.classes.Address, cls.tables.addresses) mapper(cls.classes.User, cls.tables.users, properties={ 'addresses':relationship(cls.classes.Address, cascade='all, delete-orphan'), }) def _assert_hasparent(self, a1): assert attributes.has_parent( self.classes.User, a1, "addresses") def _assert_not_hasparent(self, a1): assert not attributes.has_parent( self.classes.User, a1, "addresses") def _fixture(self): User, Address = self.classes.User, self.classes.Address s = Session() u1 = User() a1 = Address() u1.addresses.append(a1) s.add(u1) s.flush() return s, u1, a1 def test_stale_state_positive(self): User = self.classes.User s, u1, a1 = self._fixture() s.expunge(u1) u1 = s.query(User).first() u1.addresses.remove(a1) self._assert_not_hasparent(a1) @testing.requires.predictable_gc def test_stale_state_positive_gc(self): User = self.classes.User s, u1, a1 = self._fixture() s.expunge(u1) del u1 gc_collect() u1 = s.query(User).first() u1.addresses.remove(a1) self._assert_not_hasparent(a1) @testing.requires.predictable_gc def test_stale_state_positive_pk_change(self): """Illustrate that we can't easily link a stale state to a fresh one if the fresh one has a PK change (unless we a. tracked all the previous PKs, wasteful, or b. recycled states - time consuming, breaks lots of edge cases, destabilizes the code) """ User = self.classes.User s, u1, a1 = self._fixture() s._expunge_state(attributes.instance_state(u1)) del u1 gc_collect() u1 = s.query(User).first() # primary key change. now we # can't rely on state.key as the # identifier. u1.id = 5 a1.user_id = 5 s.flush() assert_raises_message( orm_exc.StaleDataError, "can't be sure this is the most recent parent.", u1.addresses.remove, a1 ) # unfortunately, u1.addresses was impacted # here assert u1.addresses == [] # expire all and we can continue s.expire_all() u1.addresses.remove(a1) self._assert_not_hasparent(a1) def test_stale_state_negative_child_expired(self): """illustrate the current behavior of expiration on the child. there's some uncertainty here in how this use case should work. """ User = self.classes.User s, u1, a1 = self._fixture() u2 = User(addresses=[a1]) s.expire(a1) u1.addresses.remove(a1) # controversy here. The action is # to expire one object, not the other, and remove; # this is pretty abusive in any case. for now # we are expiring away the 'parents' collection # so the remove will unset the hasparent flag. # this is what has occurred historically in any case. self._assert_not_hasparent(a1) #self._assert_hasparent(a1) @testing.requires.predictable_gc def test_stale_state_negative(self): User = self.classes.User s, u1, a1 = self._fixture() u2 = User(addresses=[a1]) s.add(u2) s.flush() s._expunge_state(attributes.instance_state(u2)) del u2 gc_collect() assert_raises_message( orm_exc.StaleDataError, "can't be sure this is the most recent parent.", u1.addresses.remove, a1 ) s.flush() self._assert_hasparent(a1) def test_fresh_state_positive(self): User = self.classes.User s, u1, a1 = self._fixture() self._assert_hasparent(a1) def test_fresh_state_negative(self): User = self.classes.User s, u1, a1 = self._fixture() u1.addresses.remove(a1) self._assert_not_hasparent(a1) SQLAlchemy-0.8.4/test/orm/test_immediate_load.py0000644000076500000240000000323512251147172022360 0ustar classicstaff00000000000000"""basic tests of lazy loaded attributes""" from sqlalchemy import testing from sqlalchemy.orm import mapper, relationship, create_session, immediateload from sqlalchemy.testing import eq_ from test.orm import _fixtures class ImmediateTest(_fixtures.FixtureTest): run_inserts = 'once' run_deletes = None def test_basic_option(self): Address, addresses, users, User = (self.classes.Address, self.tables.addresses, self.tables.users, self.classes.User) mapper(Address, addresses) mapper(User, users, properties={ 'addresses':relationship(Address) }) sess = create_session() l = sess.query(User).options(immediateload(User.addresses)).filter(users.c.id==7).all() eq_(len(sess.identity_map), 2) sess.close() eq_( [User(id=7, addresses=[Address(id=1, email_address='jack@bean.com')])], l ) def test_basic(self): Address, addresses, users, User = (self.classes.Address, self.tables.addresses, self.tables.users, self.classes.User) mapper(Address, addresses) mapper(User, users, properties={ 'addresses':relationship(Address, lazy='immediate') }) sess = create_session() l = sess.query(User).filter(users.c.id==7).all() eq_(len(sess.identity_map), 2) sess.close() eq_( [User(id=7, addresses=[Address(id=1, email_address='jack@bean.com')])], l ) SQLAlchemy-0.8.4/test/orm/test_inspect.py0000644000076500000240000003215612251150016021063 0ustar classicstaff00000000000000"""test the inspection registry system.""" from sqlalchemy.testing import eq_, assert_raises_message, is_ from sqlalchemy import exc, util from sqlalchemy import inspect from test.orm import _fixtures from sqlalchemy.orm import class_mapper, synonym, Session, aliased from sqlalchemy.orm.attributes import instance_state, NO_VALUE from sqlalchemy import testing class TestORMInspection(_fixtures.FixtureTest): @classmethod def setup_mappers(cls): cls._setup_stock_mapping() inspect(cls.classes.User).add_property( "name_syn",synonym("name") ) def test_class_mapper(self): User = self.classes.User assert inspect(User) is class_mapper(User) def test_column_collection_iterate(self): User = self.classes.User user_table = self.tables.users insp = inspect(User) eq_( list(insp.columns), [user_table.c.id, user_table.c.name] ) is_( insp.columns.id, user_table.c.id ) def test_primary_key(self): User = self.classes.User user_table = self.tables.users insp = inspect(User) eq_(insp.primary_key, (user_table.c.id,) ) def test_local_table(self): User = self.classes.User user_table = self.tables.users insp = inspect(User) is_(insp.local_table, user_table) def test_mapped_table(self): User = self.classes.User user_table = self.tables.users insp = inspect(User) is_(insp.mapped_table, user_table) def test_mapper_selectable(self): User = self.classes.User user_table = self.tables.users insp = inspect(User) is_(insp.selectable, user_table) assert not insp.is_selectable assert not insp.is_aliased_class def test_mapper_selectable_fixed(self): from sqlalchemy.orm import mapper class Foo(object): pass class Bar(Foo): pass user_table = self.tables.users addresses_table = self.tables.addresses mapper(Foo, user_table, with_polymorphic=(Bar,)) mapper(Bar, addresses_table, inherits=Foo) i1 = inspect(Foo) i2 = inspect(Foo) assert i1.selectable is i2.selectable def test_aliased_class(self): Address = self.classes.Address ualias = aliased(Address) insp = inspect(ualias) is_(insp.mapper, inspect(Address)) is_(insp.selectable, ualias._AliasedClass__adapter.selectable) assert not insp.is_selectable assert insp.is_aliased_class def test_not_mapped_class(self): class Foo(object): pass assert_raises_message( exc.NoInspectionAvailable, "No inspection system is available for object of type", inspect, Foo ) def test_not_mapped_instance(self): class Foo(object): pass assert_raises_message( exc.NoInspectionAvailable, "No inspection system is available for object of type", inspect, Foo() ) def test_property(self): User = self.classes.User insp = inspect(User) is_(insp.attrs.id, class_mapper(User).get_property('id')) def test_with_polymorphic(self): User = self.classes.User insp = inspect(User) eq_(insp.with_polymorphic_mappers, []) def test_col_property(self): User = self.classes.User user_table = self.tables.users insp = inspect(User) id_prop = insp.attrs.id eq_(id_prop.columns, [user_table.c.id]) is_(id_prop.expression, user_table.c.id) assert not hasattr(id_prop, 'mapper') def test_attr_keys(self): User = self.classes.User insp = inspect(User) eq_( set(insp.attrs.keys()), set(['addresses', 'orders', 'id', 'name', 'name_syn']) ) def test_col_filter(self): User = self.classes.User insp = inspect(User) eq_( list(insp.column_attrs), [insp.get_property('id'), insp.get_property('name')] ) eq_( insp.column_attrs.keys(), ['id', 'name'] ) is_( insp.column_attrs.id, User.id.property ) def test_synonym_filter(self): User = self.classes.User syn = inspect(User).synonyms eq_( list(syn.keys()), ['name_syn'] ) is_(syn.name_syn, User.name_syn.original_property) eq_(dict(syn), { "name_syn": User.name_syn.original_property }) def test_relationship_filter(self): User = self.classes.User rel = inspect(User).relationships eq_( rel.addresses, User.addresses.property ) eq_( set(rel.keys()), set(['orders', 'addresses']) ) def test_insp_relationship_prop(self): User = self.classes.User Address = self.classes.Address prop = inspect(User.addresses) is_(prop, User.addresses) is_(prop.parent, class_mapper(User)) is_(prop._parentmapper, class_mapper(User)) is_(prop.mapper, class_mapper(Address)) def test_insp_aliased_relationship_prop(self): User = self.classes.User Address = self.classes.Address ua = aliased(User) prop = inspect(ua.addresses) is_(prop, ua.addresses) is_(prop.property.parent.mapper, class_mapper(User)) is_(prop.property.mapper, class_mapper(Address)) is_(prop.parent.entity, ua) is_(prop.parent.class_, User) is_(prop._parentmapper, class_mapper(User)) is_(prop.mapper, class_mapper(Address)) is_(prop._parententity, inspect(ua)) def test_insp_column_prop(self): User = self.classes.User prop = inspect(User.name) is_(prop, User.name) is_(prop.parent, class_mapper(User)) assert not hasattr(prop, "mapper") def test_insp_aliased_column_prop(self): User = self.classes.User ua = aliased(User) prop = inspect(ua.name) is_(prop, ua.name) is_(prop.property.parent.mapper, class_mapper(User)) assert not hasattr(prop.property, "mapper") is_(prop.parent.entity, ua) is_(prop.parent.class_, User) is_(prop._parentmapper, class_mapper(User)) assert not hasattr(prop, "mapper") is_(prop._parententity, inspect(ua)) def test_rel_accessors(self): User = self.classes.User Address = self.classes.Address prop = inspect(User.addresses) is_(prop.property.parent, class_mapper(User)) is_(prop.property.mapper, class_mapper(Address)) is_(prop.parent, class_mapper(User)) is_(prop.mapper, class_mapper(Address)) assert not hasattr(prop, 'columns') assert hasattr(prop, 'expression') def test_extension_types(self): from sqlalchemy.ext.associationproxy import \ association_proxy, ASSOCIATION_PROXY from sqlalchemy.ext.hybrid import hybrid_property, hybrid_method, \ HYBRID_PROPERTY, HYBRID_METHOD from sqlalchemy import Table, MetaData, Integer, Column from sqlalchemy.orm import mapper from sqlalchemy.orm.interfaces import NOT_EXTENSION class SomeClass(self.classes.User): some_assoc = association_proxy('addresses', 'email_address') @hybrid_property def upper_name(self): raise NotImplementedError() @hybrid_method def conv(self, fn): raise NotImplementedError() class SomeSubClass(SomeClass): @hybrid_property def upper_name(self): raise NotImplementedError() @hybrid_property def foo(self): raise NotImplementedError() t = Table('sometable', MetaData(), Column('id', Integer, primary_key=True)) mapper(SomeClass, t) mapper(SomeSubClass, inherits=SomeClass) insp = inspect(SomeSubClass) eq_( dict((k, v.extension_type) for k, v in insp.all_orm_descriptors.items() ), { 'id': NOT_EXTENSION, 'name': NOT_EXTENSION, 'name_syn': NOT_EXTENSION, 'addresses': NOT_EXTENSION, 'orders': NOT_EXTENSION, 'upper_name': HYBRID_PROPERTY, 'foo': HYBRID_PROPERTY, 'conv': HYBRID_METHOD, 'some_assoc': ASSOCIATION_PROXY } ) is_( insp.all_orm_descriptors.upper_name, SomeSubClass.__dict__['upper_name'] ) is_( insp.all_orm_descriptors.some_assoc, SomeClass.some_assoc ) is_( inspect(SomeClass).all_orm_descriptors.upper_name, SomeClass.__dict__['upper_name'] ) def test_instance_state(self): User = self.classes.User u1 = User() insp = inspect(u1) is_(insp, instance_state(u1)) def test_instance_state_attr(self): User = self.classes.User u1 = User(name='ed') insp = inspect(u1) eq_( set(insp.attrs.keys()), set(['id', 'name', 'name_syn', 'addresses', 'orders']) ) eq_( insp.attrs.name.value, 'ed' ) eq_( insp.attrs.name.loaded_value, 'ed' ) def test_instance_state_attr_passive_value_scalar(self): User = self.classes.User u1 = User(name='ed') insp = inspect(u1) # value was not set, NO_VALUE eq_( insp.attrs.id.loaded_value, NO_VALUE ) # regular accessor sets it eq_( insp.attrs.id.value, None ) # now the None is there eq_( insp.attrs.id.loaded_value, None ) def test_instance_state_attr_passive_value_collection(self): User = self.classes.User u1 = User(name='ed') insp = inspect(u1) # value was not set, NO_VALUE eq_( insp.attrs.addresses.loaded_value, NO_VALUE ) # regular accessor sets it eq_( insp.attrs.addresses.value, [] ) # now the None is there eq_( insp.attrs.addresses.loaded_value, [] ) def test_instance_state_attr_hist(self): User = self.classes.User u1 = User(name='ed') insp = inspect(u1) hist = insp.attrs.addresses.history eq_( hist.unchanged, None ) u1.addresses hist = insp.attrs.addresses.history eq_( hist.unchanged, [] ) def test_instance_state_ident_transient(self): User = self.classes.User u1 = User(name='ed') insp = inspect(u1) is_(insp.identity, None) def test_instance_state_ident_persistent(self): User = self.classes.User u1 = User(name='ed') s = Session(testing.db) s.add(u1) s.flush() insp = inspect(u1) eq_(insp.identity, (u1.id,)) is_(s.query(User).get(insp.identity), u1) def test_is_instance(self): User = self.classes.User u1 = User(name='ed') insp = inspect(u1) assert insp.is_instance insp = inspect(User) assert not insp.is_instance insp = inspect(aliased(User)) assert not insp.is_instance def test_identity_key(self): User = self.classes.User u1 = User(name='ed') s = Session(testing.db) s.add(u1) s.flush() insp = inspect(u1) eq_( insp.identity_key, (User, (u1.id, )) ) def test_persistence_states(self): User = self.classes.User u1 = User(name='ed') insp = inspect(u1) eq_( (insp.transient, insp.pending, insp.persistent, insp.detached), (True, False, False, False) ) s = Session(testing.db) s.add(u1) eq_( (insp.transient, insp.pending, insp.persistent, insp.detached), (False, True, False, False) ) s.flush() eq_( (insp.transient, insp.pending, insp.persistent, insp.detached), (False, False, True, False) ) s.expunge(u1) eq_( (insp.transient, insp.pending, insp.persistent, insp.detached), (False, False, False, True) ) def test_session_accessor(self): User = self.classes.User u1 = User(name='ed') insp = inspect(u1) is_(insp.session, None) s = Session() s.add(u1) is_(insp.session, s) def test_object_accessor(self): User = self.classes.User u1 = User(name='ed') insp = inspect(u1) is_(insp.object, u1) SQLAlchemy-0.8.4/test/orm/test_instrumentation.py0000644000076500000240000004441412251150016022661 0ustar classicstaff00000000000000 from sqlalchemy.testing import assert_raises, assert_raises_message import sqlalchemy as sa from sqlalchemy import MetaData, Integer, ForeignKey, util, event from sqlalchemy.orm import mapper, relationship, create_session, \ attributes, class_mapper, clear_mappers, instrumentation, events from sqlalchemy.testing.schema import Table from sqlalchemy.testing.schema import Column from sqlalchemy.testing import eq_, ne_ from sqlalchemy.testing import fixtures from sqlalchemy import testing class InitTest(fixtures.ORMTest): def fixture(self): return Table('t', MetaData(), Column('id', Integer, primary_key=True), Column('type', Integer), Column('x', Integer), Column('y', Integer)) def register(self, cls, canary): original_init = cls.__init__ instrumentation.register_class(cls) ne_(cls.__init__, original_init) manager = instrumentation.manager_of_class(cls) def init(state, args, kwargs): canary.append((cls, 'init', state.class_)) event.listen(manager, 'init', init, raw=True) def test_ai(self): inits = [] class A(object): def __init__(self): inits.append((A, '__init__')) obj = A() eq_(inits, [(A, '__init__')]) def test_A(self): inits = [] class A(object): pass self.register(A, inits) obj = A() eq_(inits, [(A, 'init', A)]) def test_Ai(self): inits = [] class A(object): def __init__(self): inits.append((A, '__init__')) self.register(A, inits) obj = A() eq_(inits, [(A, 'init', A), (A, '__init__')]) def test_ai_B(self): inits = [] class A(object): def __init__(self): inits.append((A, '__init__')) class B(A): pass self.register(B, inits) obj = A() eq_(inits, [(A, '__init__')]) del inits[:] obj = B() eq_(inits, [(B, 'init', B), (A, '__init__')]) def test_ai_Bi(self): inits = [] class A(object): def __init__(self): inits.append((A, '__init__')) class B(A): def __init__(self): inits.append((B, '__init__')) super(B, self).__init__() self.register(B, inits) obj = A() eq_(inits, [(A, '__init__')]) del inits[:] obj = B() eq_(inits, [(B, 'init', B), (B, '__init__'), (A, '__init__')]) def test_Ai_bi(self): inits = [] class A(object): def __init__(self): inits.append((A, '__init__')) self.register(A, inits) class B(A): def __init__(self): inits.append((B, '__init__')) super(B, self).__init__() obj = A() eq_(inits, [(A, 'init', A), (A, '__init__')]) del inits[:] obj = B() eq_(inits, [(B, '__init__'), (A, 'init', B), (A, '__init__')]) def test_Ai_Bi(self): inits = [] class A(object): def __init__(self): inits.append((A, '__init__')) self.register(A, inits) class B(A): def __init__(self): inits.append((B, '__init__')) super(B, self).__init__() self.register(B, inits) obj = A() eq_(inits, [(A, 'init', A), (A, '__init__')]) del inits[:] obj = B() eq_(inits, [(B, 'init', B), (B, '__init__'), (A, '__init__')]) def test_Ai_B(self): inits = [] class A(object): def __init__(self): inits.append((A, '__init__')) self.register(A, inits) class B(A): pass self.register(B, inits) obj = A() eq_(inits, [(A, 'init', A), (A, '__init__')]) del inits[:] obj = B() eq_(inits, [(B, 'init', B), (A, '__init__')]) def test_Ai_Bi_Ci(self): inits = [] class A(object): def __init__(self): inits.append((A, '__init__')) self.register(A, inits) class B(A): def __init__(self): inits.append((B, '__init__')) super(B, self).__init__() self.register(B, inits) class C(B): def __init__(self): inits.append((C, '__init__')) super(C, self).__init__() self.register(C, inits) obj = A() eq_(inits, [(A, 'init', A), (A, '__init__')]) del inits[:] obj = B() eq_(inits, [(B, 'init', B), (B, '__init__'), (A, '__init__')]) del inits[:] obj = C() eq_(inits, [(C, 'init', C), (C, '__init__'), (B, '__init__'), (A, '__init__')]) def test_Ai_bi_Ci(self): inits = [] class A(object): def __init__(self): inits.append((A, '__init__')) self.register(A, inits) class B(A): def __init__(self): inits.append((B, '__init__')) super(B, self).__init__() class C(B): def __init__(self): inits.append((C, '__init__')) super(C, self).__init__() self.register(C, inits) obj = A() eq_(inits, [(A, 'init', A), (A, '__init__')]) del inits[:] obj = B() eq_(inits, [(B, '__init__'), (A, 'init', B), (A, '__init__')]) del inits[:] obj = C() eq_(inits, [(C, 'init', C), (C, '__init__'), (B, '__init__'), (A, '__init__')]) def test_Ai_b_Ci(self): inits = [] class A(object): def __init__(self): inits.append((A, '__init__')) self.register(A, inits) class B(A): pass class C(B): def __init__(self): inits.append((C, '__init__')) super(C, self).__init__() self.register(C, inits) obj = A() eq_(inits, [(A, 'init', A), (A, '__init__')]) del inits[:] obj = B() eq_(inits, [(A, 'init', B), (A, '__init__')]) del inits[:] obj = C() eq_(inits, [(C, 'init', C), (C, '__init__'), (A, '__init__')]) def test_Ai_B_Ci(self): inits = [] class A(object): def __init__(self): inits.append((A, '__init__')) self.register(A, inits) class B(A): pass self.register(B, inits) class C(B): def __init__(self): inits.append((C, '__init__')) super(C, self).__init__() self.register(C, inits) obj = A() eq_(inits, [(A, 'init', A), (A, '__init__')]) del inits[:] obj = B() eq_(inits, [(B, 'init', B), (A, '__init__')]) del inits[:] obj = C() eq_(inits, [(C, 'init', C), (C, '__init__'), (A, '__init__')]) def test_Ai_B_C(self): inits = [] class A(object): def __init__(self): inits.append((A, '__init__')) self.register(A, inits) class B(A): pass self.register(B, inits) class C(B): pass self.register(C, inits) obj = A() eq_(inits, [(A, 'init', A), (A, '__init__')]) del inits[:] obj = B() eq_(inits, [(B, 'init', B), (A, '__init__')]) del inits[:] obj = C() eq_(inits, [(C, 'init', C), (A, '__init__')]) def test_A_Bi_C(self): inits = [] class A(object): pass self.register(A, inits) class B(A): def __init__(self): inits.append((B, '__init__')) self.register(B, inits) class C(B): pass self.register(C, inits) obj = A() eq_(inits, [(A, 'init', A)]) del inits[:] obj = B() eq_(inits, [(B, 'init', B), (B, '__init__')]) del inits[:] obj = C() eq_(inits, [(C, 'init', C), (B, '__init__')]) def test_A_B_Ci(self): inits = [] class A(object): pass self.register(A, inits) class B(A): pass self.register(B, inits) class C(B): def __init__(self): inits.append((C, '__init__')) self.register(C, inits) obj = A() eq_(inits, [(A, 'init', A)]) del inits[:] obj = B() eq_(inits, [(B, 'init', B)]) del inits[:] obj = C() eq_(inits, [(C, 'init', C), (C, '__init__')]) def test_A_B_C(self): inits = [] class A(object): pass self.register(A, inits) class B(A): pass self.register(B, inits) class C(B): pass self.register(C, inits) obj = A() eq_(inits, [(A, 'init', A)]) del inits[:] obj = B() eq_(inits, [(B, 'init', B)]) del inits[:] obj = C() eq_(inits, [(C, 'init', C)]) def test_defaulted_init(self): class X(object): def __init__(self_, a, b=123, c='abc'): self_.a = a self_.b = b self_.c = c instrumentation.register_class(X) o = X('foo') eq_(o.a, 'foo') eq_(o.b, 123) eq_(o.c, 'abc') class Y(object): unique = object() class OutOfScopeForEval(object): def __repr__(self_): # misleading repr return '123' outofscope = OutOfScopeForEval() def __init__(self_, u=unique, o=outofscope): self_.u = u self_.o = o instrumentation.register_class(Y) o = Y() assert o.u is Y.unique assert o.o is Y.outofscope class MapperInitTest(fixtures.ORMTest): def fixture(self): return Table('t', MetaData(), Column('id', Integer, primary_key=True), Column('type', Integer), Column('x', Integer), Column('y', Integer)) def test_partially_mapped_inheritance(self): class A(object): pass class B(A): pass class C(B): def __init__(self, x): pass m = mapper(A, self.fixture()) # B is not mapped in the current implementation assert_raises(sa.orm.exc.UnmappedClassError, class_mapper, B) # C is not mapped in the current implementation assert_raises(sa.orm.exc.UnmappedClassError, class_mapper, C) def test_del_warning(self): class A(object): def __del__(self): pass assert_raises_message( sa.exc.SAWarning, r"__del__\(\) method on class " " will cause " "unreachable cycles and memory leaks, as SQLAlchemy " "instrumentation often creates reference cycles. " "Please remove this method.", mapper, A, self.fixture() ) class OnLoadTest(fixtures.ORMTest): """Check that Events.load is not hit in regular attributes operations.""" def test_basic(self): import pickle global A class A(object): pass def canary(instance): assert False try: instrumentation.register_class(A) manager = instrumentation.manager_of_class(A) event.listen(manager, 'load', canary) a = A() p_a = pickle.dumps(a) re_a = pickle.loads(p_a) finally: del A class NativeInstrumentationTest(fixtures.ORMTest): def test_register_reserved_attribute(self): class T(object): pass instrumentation.register_class(T) manager = instrumentation.manager_of_class(T) sa = instrumentation.ClassManager.STATE_ATTR ma = instrumentation.ClassManager.MANAGER_ATTR fails = lambda method, attr: assert_raises( KeyError, getattr(manager, method), attr, property()) fails('install_member', sa) fails('install_member', ma) fails('install_descriptor', sa) fails('install_descriptor', ma) def test_mapped_stateattr(self): t = Table('t', MetaData(), Column('id', Integer, primary_key=True), Column(instrumentation.ClassManager.STATE_ATTR, Integer)) class T(object): pass assert_raises(KeyError, mapper, T, t) def test_mapped_managerattr(self): t = Table('t', MetaData(), Column('id', Integer, primary_key=True), Column(instrumentation.ClassManager.MANAGER_ATTR, Integer)) class T(object): pass assert_raises(KeyError, mapper, T, t) class Py3KFunctionInstTest(fixtures.ORMTest): __requires__ = ("python3", ) # Py3K #def _kw_only_fixture(self): # class A(object): # def __init__(self, a, *, b, c): # self.a = a # self.b = b # self.c = c # return self._instrument(A) # #def _kw_plus_posn_fixture(self): # class A(object): # def __init__(self, a, *args, b, c): # self.a = a # self.b = b # self.c = c # return self._instrument(A) # #def _kw_opt_fixture(self): # class A(object): # def __init__(self, a, *, b, c="c"): # self.a = a # self.b = b # self.c = c # return self._instrument(A) def _instrument(self, cls): manager = instrumentation.register_class(cls) canary = [] def check(target, args, kwargs): canary.append((args, kwargs)) event.listen(manager, "init", check) return cls, canary def test_kw_only_args(self): cls, canary = self._kw_only_fixture() a = cls("a", b="b", c="c") eq_(canary, [(('a', ), {'b': 'b', 'c': 'c'})]) def test_kw_plus_posn_args(self): cls, canary = self._kw_plus_posn_fixture() a = cls("a", 1, 2, 3, b="b", c="c") eq_(canary, [(('a', 1, 2, 3), {'b': 'b', 'c': 'c'})]) def test_kw_only_args_plus_opt(self): cls, canary = self._kw_opt_fixture() a = cls("a", b="b") eq_(canary, [(('a', ), {'b': 'b', 'c': 'c'})]) canary[:] = [] a = cls("a", b="b", c="d") eq_(canary, [(('a', ), {'b': 'b', 'c': 'd'})]) def test_kw_only_sig(self): cls, canary = self._kw_only_fixture() assert_raises( TypeError, cls, "a", "b", "c" ) def test_kw_plus_opt_sig(self): cls, canary = self._kw_only_fixture() assert_raises( TypeError, cls, "a", "b", "c" ) assert_raises( TypeError, cls, "a", "b", c="c" ) class MiscTest(fixtures.ORMTest): """Seems basic, but not directly covered elsewhere!""" def test_compileonattr(self): t = Table('t', MetaData(), Column('id', Integer, primary_key=True), Column('x', Integer)) class A(object): pass mapper(A, t) a = A() assert a.id is None def test_compileonattr_rel(self): m = MetaData() t1 = Table('t1', m, Column('id', Integer, primary_key=True), Column('x', Integer)) t2 = Table('t2', m, Column('id', Integer, primary_key=True), Column('t1_id', Integer, ForeignKey('t1.id'))) class A(object): pass class B(object): pass mapper(A, t1, properties=dict(bs=relationship(B))) mapper(B, t2) a = A() assert not a.bs def test_uninstrument(self): class A(object): pass manager = instrumentation.register_class(A) attributes.register_attribute(A, 'x', uselist=False, useobject=False) assert instrumentation.manager_of_class(A) is manager instrumentation.unregister_class(A) assert instrumentation.manager_of_class(A) is None assert not hasattr(A, 'x') # I prefer 'is' here but on pypy # it seems only == works assert A.__init__ == object.__init__ def test_compileonattr_rel_backref_a(self): m = MetaData() t1 = Table('t1', m, Column('id', Integer, primary_key=True), Column('x', Integer)) t2 = Table('t2', m, Column('id', Integer, primary_key=True), Column('t1_id', Integer, ForeignKey('t1.id'))) class Base(object): def __init__(self, *args, **kwargs): pass for base in object, Base: class A(base): pass class B(base): pass mapper(A, t1, properties=dict(bs=relationship(B, backref='a'))) mapper(B, t2) b = B() assert b.a is None a = A() b.a = a session = create_session() session.add(b) assert a in session, "base is %s" % base def test_compileonattr_rel_backref_b(self): m = MetaData() t1 = Table('t1', m, Column('id', Integer, primary_key=True), Column('x', Integer)) t2 = Table('t2', m, Column('id', Integer, primary_key=True), Column('t1_id', Integer, ForeignKey('t1.id'))) class Base(object): def __init__(self): pass class Base_AKW(object): def __init__(self, *args, **kwargs): pass for base in object, Base, Base_AKW: class A(base): pass class B(base): pass mapper(A, t1) mapper(B, t2, properties=dict(a=relationship(A, backref='bs'))) a = A() b = B() b.a = a session = create_session() session.add(a) assert b in session, 'base: %s' % base SQLAlchemy-0.8.4/test/orm/test_joins.py0000644000076500000240000026731212251150016020544 0ustar classicstaff00000000000000from sqlalchemy.testing import eq_, assert_raises, assert_raises_message import operator from sqlalchemy import * from sqlalchemy import exc as sa_exc, util from sqlalchemy.sql import compiler, table, column from sqlalchemy.engine import default from sqlalchemy.orm import * from sqlalchemy.orm import attributes from sqlalchemy.testing import eq_ import sqlalchemy as sa from sqlalchemy import testing from sqlalchemy.testing import AssertsCompiledSQL, engines from sqlalchemy.testing.schema import Column from test.orm import _fixtures from sqlalchemy.testing import fixtures from sqlalchemy.orm.util import join, outerjoin, with_parent class QueryTest(_fixtures.FixtureTest): run_setup_mappers = 'once' run_inserts = 'once' run_deletes = None @classmethod def setup_mappers(cls): Node, composite_pk_table, users, Keyword, items, Dingaling, \ order_items, item_keywords, Item, User, dingalings, \ Address, keywords, CompositePk, nodes, Order, orders, \ addresses = cls.classes.Node, \ cls.tables.composite_pk_table, cls.tables.users, \ cls.classes.Keyword, cls.tables.items, \ cls.classes.Dingaling, cls.tables.order_items, \ cls.tables.item_keywords, cls.classes.Item, \ cls.classes.User, cls.tables.dingalings, \ cls.classes.Address, cls.tables.keywords, \ cls.classes.CompositePk, cls.tables.nodes, \ cls.classes.Order, cls.tables.orders, cls.tables.addresses mapper(User, users, properties={ 'addresses':relationship(Address, backref='user', order_by=addresses.c.id), 'orders':relationship(Order, backref='user', order_by=orders.c.id), # o2m, m2o }) mapper(Address, addresses, properties={ 'dingaling':relationship(Dingaling, uselist=False, backref="address") #o2o }) mapper(Dingaling, dingalings) mapper(Order, orders, properties={ 'items':relationship(Item, secondary=order_items, order_by=items.c.id), #m2m 'address':relationship(Address), # m2o }) mapper(Item, items, properties={ 'keywords':relationship(Keyword, secondary=item_keywords) #m2m }) mapper(Keyword, keywords) mapper(Node, nodes, properties={ 'children':relationship(Node, backref=backref('parent', remote_side=[nodes.c.id]) ) }) mapper(CompositePk, composite_pk_table) configure_mappers() class InheritedJoinTest(fixtures.MappedTest, AssertsCompiledSQL): run_setup_mappers = 'once' @classmethod def define_tables(cls, metadata): Table('companies', metadata, Column('company_id', Integer, primary_key=True, test_needs_autoincrement=True), Column('name', String(50))) Table('people', metadata, Column('person_id', Integer, primary_key=True, test_needs_autoincrement=True), Column('company_id', Integer, ForeignKey('companies.company_id')), Column('name', String(50)), Column('type', String(30))) Table('engineers', metadata, Column('person_id', Integer, ForeignKey('people.person_id'), primary_key=True), Column('status', String(30)), Column('engineer_name', String(50)), Column('primary_language', String(50)), ) Table('machines', metadata, Column('machine_id', Integer, primary_key=True, test_needs_autoincrement=True), Column('name', String(50)), Column('engineer_id', Integer, ForeignKey('engineers.person_id'))) Table('managers', metadata, Column('person_id', Integer, ForeignKey('people.person_id'), primary_key=True), Column('status', String(30)), Column('manager_name', String(50)) ) Table('boss', metadata, Column('boss_id', Integer, ForeignKey('managers.person_id'), primary_key=True), Column('golf_swing', String(30)), ) Table('paperwork', metadata, Column('paperwork_id', Integer, primary_key=True, test_needs_autoincrement=True), Column('description', String(50)), Column('person_id', Integer, ForeignKey('people.person_id'))) @classmethod def setup_classes(cls): paperwork, people, companies, boss, managers, machines, engineers = (cls.tables.paperwork, cls.tables.people, cls.tables.companies, cls.tables.boss, cls.tables.managers, cls.tables.machines, cls.tables.engineers) class Company(cls.Comparable): pass class Person(cls.Comparable): pass class Engineer(Person): pass class Manager(Person): pass class Boss(Manager): pass class Machine(cls.Comparable): pass class Paperwork(cls.Comparable): pass mapper(Company, companies, properties={ 'employees':relationship(Person, order_by=people.c.person_id) }) mapper(Machine, machines) mapper(Person, people, polymorphic_on=people.c.type, polymorphic_identity='person', order_by=people.c.person_id, properties={ 'paperwork':relationship(Paperwork, order_by=paperwork.c.paperwork_id) }) mapper(Engineer, engineers, inherits=Person, polymorphic_identity='engineer', properties={ 'machines':relationship(Machine, order_by=machines.c.machine_id) }) mapper(Manager, managers, inherits=Person, polymorphic_identity='manager') mapper(Boss, boss, inherits=Manager, polymorphic_identity='boss') mapper(Paperwork, paperwork) def test_single_prop(self): Company = self.classes.Company sess = create_session() self.assert_compile( sess.query(Company).join(Company.employees), "SELECT companies.company_id AS companies_company_id, companies.name AS companies_name " "FROM companies JOIN people ON companies.company_id = people.company_id" , use_default_dialect = True ) def test_force_via_select_from(self): Company, Engineer = self.classes.Company, self.classes.Engineer sess = create_session() self.assert_compile( sess.query(Company).\ filter(Company.company_id==Engineer.company_id).\ filter(Engineer.primary_language=='java'), "SELECT companies.company_id AS companies_company_id, companies.name AS companies_name " "FROM companies, people, engineers " "WHERE companies.company_id = people.company_id AND engineers.primary_language " "= :primary_language_1", use_default_dialect=True ) self.assert_compile( sess.query(Company).select_from(Company, Engineer).\ filter(Company.company_id==Engineer.company_id).\ filter(Engineer.primary_language=='java'), "SELECT companies.company_id AS companies_company_id, companies.name AS companies_name " "FROM companies, people JOIN engineers ON people.person_id = engineers.person_id " "WHERE companies.company_id = people.company_id AND engineers.primary_language =" " :primary_language_1", use_default_dialect=True ) def test_single_prop_of_type(self): Company, Engineer = self.classes.Company, self.classes.Engineer sess = create_session() self.assert_compile( sess.query(Company).join(Company.employees.of_type(Engineer)), "SELECT companies.company_id AS companies_company_id, companies.name AS companies_name " "FROM companies JOIN (SELECT people.person_id AS people_person_id, " "people.company_id AS people_company_id, people.name AS people_name, " "people.type AS people_type, engineers.person_id AS " "engineers_person_id, engineers.status AS engineers_status, " "engineers.engineer_name AS engineers_engineer_name, " "engineers.primary_language AS engineers_primary_language " "FROM people JOIN engineers ON people.person_id = engineers.person_id) AS " "anon_1 ON companies.company_id = anon_1.people_company_id" , use_default_dialect = True ) def test_prop_with_polymorphic_1(self): Person, Manager, Paperwork = (self.classes.Person, self.classes.Manager, self.classes.Paperwork) sess = create_session() self.assert_compile( sess.query(Person).with_polymorphic(Manager). join('paperwork').filter(Paperwork.description.like('%review%')), "SELECT people.person_id AS people_person_id, people.company_id AS" " people_company_id, " "people.name AS people_name, people.type AS people_type, managers.person_id " "AS managers_person_id, " "managers.status AS managers_status, managers.manager_name AS " "managers_manager_name FROM people " "LEFT OUTER JOIN managers ON people.person_id = managers.person_id JOIN " "paperwork ON people.person_id = " "paperwork.person_id WHERE paperwork.description LIKE :description_1 " "ORDER BY people.person_id" , use_default_dialect=True ) def test_prop_with_polymorphic_2(self): Person, Manager, Paperwork = (self.classes.Person, self.classes.Manager, self.classes.Paperwork) sess = create_session() self.assert_compile( sess.query(Person).with_polymorphic(Manager). join('paperwork', aliased=True). filter(Paperwork.description.like('%review%')), "SELECT people.person_id AS people_person_id, people.company_id AS people_company_id, " "people.name AS people_name, people.type AS people_type, managers.person_id " "AS managers_person_id, " "managers.status AS managers_status, managers.manager_name AS managers_manager_name " "FROM people LEFT OUTER JOIN managers ON people.person_id = managers.person_id JOIN " "paperwork AS paperwork_1 ON people.person_id = paperwork_1.person_id " "WHERE paperwork_1.description LIKE :description_1 ORDER BY people.person_id" , use_default_dialect=True ) def test_explicit_polymorphic_join(self): Company, Engineer = self.classes.Company, self.classes.Engineer sess = create_session() self.assert_compile( sess.query(Company).join(Engineer).filter(Engineer.engineer_name=='vlad'), "SELECT companies.company_id AS companies_company_id, companies.name AS " "companies_name " "FROM companies JOIN (SELECT people.person_id AS people_person_id, " "people.company_id AS " "people_company_id, people.name AS people_name, people.type AS people_type," " engineers.person_id AS " "engineers_person_id, engineers.status AS engineers_status, " "engineers.engineer_name AS engineers_engineer_name, " "engineers.primary_language AS engineers_primary_language " "FROM people JOIN engineers ON people.person_id = engineers.person_id) " "AS anon_1 ON " "companies.company_id = anon_1.people_company_id " "WHERE anon_1.engineers_engineer_name = :engineer_name_1" , use_default_dialect=True ) self.assert_compile( sess.query(Company).join(Engineer, Company.company_id==Engineer.company_id). filter(Engineer.engineer_name=='vlad'), "SELECT companies.company_id AS companies_company_id, companies.name " "AS companies_name " "FROM companies JOIN (SELECT people.person_id AS people_person_id, " "people.company_id AS " "people_company_id, people.name AS people_name, people.type AS " "people_type, engineers.person_id AS " "engineers_person_id, engineers.status AS engineers_status, " "engineers.engineer_name AS engineers_engineer_name, " "engineers.primary_language AS engineers_primary_language " "FROM people JOIN engineers ON people.person_id = engineers.person_id) AS " "anon_1 ON " "companies.company_id = anon_1.people_company_id " "WHERE anon_1.engineers_engineer_name = :engineer_name_1" , use_default_dialect=True ) def test_multiple_adaption(self): """test that multiple filter() adapters get chained together " and work correctly within a multiple-entry join().""" people, Company, Machine, engineers, machines, Engineer = (self.tables.people, self.classes.Company, self.classes.Machine, self.tables.engineers, self.tables.machines, self.classes.Engineer) sess = create_session() self.assert_compile( sess.query(Company).join(people.join(engineers), Company.employees). filter(Engineer.name=='dilbert'), "SELECT companies.company_id AS companies_company_id, companies.name AS " "companies_name " "FROM companies JOIN (SELECT people.person_id AS people_person_id, " "people.company_id AS " "people_company_id, people.name AS people_name, people.type AS " "people_type, engineers.person_id " "AS engineers_person_id, engineers.status AS engineers_status, " "engineers.engineer_name AS engineers_engineer_name, " "engineers.primary_language AS engineers_primary_language FROM people " "JOIN engineers ON people.person_id = " "engineers.person_id) AS anon_1 ON companies.company_id = " "anon_1.people_company_id WHERE anon_1.people_name = :name_1" , use_default_dialect = True ) mach_alias = machines.select() self.assert_compile( sess.query(Company).join(people.join(engineers), Company.employees). join(mach_alias, Engineer.machines, from_joinpoint=True). filter(Engineer.name=='dilbert').filter(Machine.name=='foo'), "SELECT companies.company_id AS companies_company_id, companies.name AS " "companies_name " "FROM companies JOIN (SELECT people.person_id AS people_person_id, " "people.company_id AS " "people_company_id, people.name AS people_name, people.type AS people_type," " engineers.person_id " "AS engineers_person_id, engineers.status AS engineers_status, " "engineers.engineer_name AS engineers_engineer_name, " "engineers.primary_language AS engineers_primary_language FROM people " "JOIN engineers ON people.person_id = " "engineers.person_id) AS anon_1 ON companies.company_id = " "anon_1.people_company_id JOIN " "(SELECT machines.machine_id AS machine_id, machines.name AS name, " "machines.engineer_id AS engineer_id " "FROM machines) AS anon_2 ON anon_1.engineers_person_id = anon_2.engineer_id " "WHERE anon_1.people_name = :name_1 AND anon_2.name = :name_2" , use_default_dialect = True ) class JoinTest(QueryTest, AssertsCompiledSQL): __dialect__ = 'default' def test_single_name(self): User = self.classes.User sess = create_session() self.assert_compile( sess.query(User).join("orders"), "SELECT users.id AS users_id, users.name AS users_name " "FROM users JOIN orders ON users.id = orders.user_id" ) assert_raises( sa_exc.InvalidRequestError, sess.query(User).join, "user", ) self.assert_compile( sess.query(User).join("orders", "items"), "SELECT users.id AS users_id, users.name AS users_name FROM users " "JOIN orders ON users.id = orders.user_id JOIN order_items AS order_items_1 " "ON orders.id = order_items_1.order_id JOIN items ON items.id = order_items_1.item_id" ) # test overlapping paths. User->orders is used by both joins, but rendered once. self.assert_compile( sess.query(User).join("orders", "items").join("orders", "address"), "SELECT users.id AS users_id, users.name AS users_name FROM users JOIN orders " "ON users.id = orders.user_id JOIN order_items AS order_items_1 ON orders.id = " "order_items_1.order_id JOIN items ON items.id = order_items_1.item_id JOIN addresses " "ON addresses.id = orders.address_id" ) def test_join_on_synonym(self): class User(object): pass class Address(object): pass users, addresses = (self.tables.users, self.tables.addresses) mapper(User, users, properties={ 'addresses':relationship(Address), 'ad_syn':synonym("addresses") }) mapper(Address, addresses) self.assert_compile( Session().query(User).join(User.ad_syn), "SELECT users.id AS users_id, users.name AS users_name " "FROM users JOIN addresses ON users.id = addresses.user_id" ) def test_multi_tuple_form(self): """test the 'tuple' form of join, now superseded by the two-element join() form. Not deprecating this style as of yet. """ Item, Order, User = (self.classes.Item, self.classes.Order, self.classes.User) sess = create_session() #assert_raises( # sa.exc.SADeprecationWarning, # sess.query(User).join, (Order, User.id==Order.user_id) #) self.assert_compile( sess.query(User).join((Order, User.id == Order.user_id)), "SELECT users.id AS users_id, users.name AS users_name " "FROM users JOIN orders ON users.id = orders.user_id", ) self.assert_compile( sess.query(User).join( (Order, User.id == Order.user_id), (Item, Order.items)), "SELECT users.id AS users_id, users.name AS users_name " "FROM users JOIN orders ON users.id = orders.user_id " "JOIN order_items AS order_items_1 ON orders.id = " "order_items_1.order_id JOIN items ON items.id = " "order_items_1.item_id", ) # the old "backwards" form self.assert_compile( sess.query(User).join(("orders", Order)), "SELECT users.id AS users_id, users.name AS users_name " "FROM users JOIN orders ON users.id = orders.user_id", ) def test_single_prop_1(self): Item, Order, User, Address = (self.classes.Item, self.classes.Order, self.classes.User, self.classes.Address) sess = create_session() self.assert_compile( sess.query(User).join(User.orders), "SELECT users.id AS users_id, users.name AS users_name " "FROM users JOIN orders ON users.id = orders.user_id" ) def test_single_prop_2(self): Item, Order, User, Address = (self.classes.Item, self.classes.Order, self.classes.User, self.classes.Address) sess = create_session() self.assert_compile( sess.query(User).join(Order.user), "SELECT users.id AS users_id, users.name AS users_name " "FROM orders JOIN users ON users.id = orders.user_id" ) def test_single_prop_3(self): Item, Order, User, Address = (self.classes.Item, self.classes.Order, self.classes.User, self.classes.Address) sess = create_session() oalias1 = aliased(Order) self.assert_compile( sess.query(User).join(oalias1.user), "SELECT users.id AS users_id, users.name AS users_name " "FROM orders AS orders_1 JOIN users ON users.id = orders_1.user_id" ) def test_single_prop_4(self): Item, Order, User, Address = (self.classes.Item, self.classes.Order, self.classes.User, self.classes.Address) sess = create_session() oalias1 = aliased(Order) oalias2 = aliased(Order) # another nonsensical query. (from [ticket:1537]). # in this case, the contract of "left to right" is honored self.assert_compile( sess.query(User).join(oalias1.user).join(oalias2.user), "SELECT users.id AS users_id, users.name AS users_name " "FROM orders AS orders_1 JOIN users ON users.id = orders_1.user_id, " "orders AS orders_2 JOIN users ON users.id = orders_2.user_id" ) def test_single_prop_5(self): Item, Order, User, Address = (self.classes.Item, self.classes.Order, self.classes.User, self.classes.Address) sess = create_session() self.assert_compile( sess.query(User).join(User.orders, Order.items), "SELECT users.id AS users_id, users.name AS users_name FROM users " "JOIN orders ON users.id = orders.user_id JOIN order_items AS order_items_1 " "ON orders.id = order_items_1.order_id JOIN items ON items.id = order_items_1.item_id" ) def test_single_prop_6(self): Item, Order, User, Address = (self.classes.Item, self.classes.Order, self.classes.User, self.classes.Address) sess = create_session() ualias = aliased(User) self.assert_compile( sess.query(ualias).join(ualias.orders), "SELECT users_1.id AS users_1_id, users_1.name AS users_1_name " "FROM users AS users_1 JOIN orders ON users_1.id = orders.user_id" ) def test_single_prop_7(self): Item, Order, User, Address = (self.classes.Item, self.classes.Order, self.classes.User, self.classes.Address) sess = create_session() # this query is somewhat nonsensical. the old system didn't render a correct # query for this. In this case its the most faithful to what was asked - # there's no linkage between User.orders and "oalias", so two FROM elements # are generated. oalias = aliased(Order) self.assert_compile( sess.query(User).join(User.orders, oalias.items), "SELECT users.id AS users_id, users.name AS users_name FROM users " "JOIN orders ON users.id = orders.user_id, " "orders AS orders_1 JOIN order_items AS order_items_1 ON orders_1.id = order_items_1.order_id " "JOIN items ON items.id = order_items_1.item_id" ) def test_single_prop_8(self): Item, Order, User, Address = (self.classes.Item, self.classes.Order, self.classes.User, self.classes.Address) sess = create_session() # same as before using an aliased() for User as well ualias = aliased(User) oalias = aliased(Order) self.assert_compile( sess.query(ualias).join(ualias.orders, oalias.items), "SELECT users_1.id AS users_1_id, users_1.name AS users_1_name FROM users AS users_1 " "JOIN orders ON users_1.id = orders.user_id, " "orders AS orders_1 JOIN order_items AS order_items_1 ON orders_1.id = order_items_1.order_id " "JOIN items ON items.id = order_items_1.item_id" ) def test_single_prop_9(self): Item, Order, User, Address = (self.classes.Item, self.classes.Order, self.classes.User, self.classes.Address) sess = create_session() self.assert_compile( sess.query(User).filter(User.name == 'ed').from_self(). join(User.orders), "SELECT anon_1.users_id AS anon_1_users_id, anon_1.users_name AS anon_1_users_name " "FROM (SELECT users.id AS users_id, users.name AS users_name " "FROM users " "WHERE users.name = :name_1) AS anon_1 JOIN orders ON anon_1.users_id = orders.user_id" ) def test_single_prop_10(self): Item, Order, User, Address = (self.classes.Item, self.classes.Order, self.classes.User, self.classes.Address) sess = create_session() self.assert_compile( sess.query(User).join(User.addresses, aliased=True). filter(Address.email_address == 'foo'), "SELECT users.id AS users_id, users.name AS users_name " "FROM users JOIN addresses AS addresses_1 ON users.id = addresses_1.user_id " "WHERE addresses_1.email_address = :email_address_1" ) def test_single_prop_11(self): Item, Order, User, Address = (self.classes.Item, self.classes.Order, self.classes.User, self.classes.Address) sess = create_session() self.assert_compile( sess.query(User).join(User.orders, Order.items, aliased=True). filter(Item.id == 10), "SELECT users.id AS users_id, users.name AS users_name " "FROM users JOIN orders AS orders_1 ON users.id = orders_1.user_id " "JOIN order_items AS order_items_1 ON orders_1.id = order_items_1.order_id " "JOIN items AS items_1 ON items_1.id = order_items_1.item_id " "WHERE items_1.id = :id_1" ) def test_single_prop_12(self): Item, Order, User, Address = (self.classes.Item, self.classes.Order, self.classes.User, self.classes.Address) sess = create_session() oalias1 = aliased(Order) # test #1 for [ticket:1706] ualias = aliased(User) self.assert_compile( sess.query(ualias). join(oalias1, ualias.orders).\ join(Address, ualias.addresses), "SELECT users_1.id AS users_1_id, users_1.name AS " "users_1_name FROM users AS users_1 JOIN orders AS orders_1 " "ON users_1.id = orders_1.user_id JOIN addresses ON users_1.id " "= addresses.user_id" ) def test_single_prop_13(self): Item, Order, User, Address = (self.classes.Item, self.classes.Order, self.classes.User, self.classes.Address) sess = create_session() # test #2 for [ticket:1706] ualias = aliased(User) ualias2 = aliased(User) self.assert_compile( sess.query(ualias). join(Address, ualias.addresses). join(ualias2, Address.user). join(Order, ualias.orders), "SELECT users_1.id AS users_1_id, users_1.name AS users_1_name FROM users " "AS users_1 JOIN addresses ON users_1.id = addresses.user_id JOIN users AS users_2 " "ON users_2.id = addresses.user_id JOIN orders ON users_1.id = orders.user_id" ) def test_overlapping_paths(self): User = self.classes.User for aliased in (True,False): # load a user who has an order that contains item id 3 and address id 1 (order 3, owned by jack) result = create_session().query(User).join('orders', 'items', aliased=aliased).\ filter_by(id=3).join('orders','address', aliased=aliased).filter_by(id=1).all() assert [User(id=7, name='jack')] == result def test_overlapping_paths_multilevel(self): User = self.classes.User s = Session() q = s.query(User).\ join('orders').\ join('addresses').\ join('orders', 'items').\ join('addresses', 'dingaling') self.assert_compile( q, "SELECT users.id AS users_id, users.name AS users_name " "FROM users JOIN orders ON users.id = orders.user_id " "JOIN addresses ON users.id = addresses.user_id " "JOIN order_items AS order_items_1 ON orders.id = " "order_items_1.order_id " "JOIN items ON items.id = order_items_1.item_id " "JOIN dingalings ON addresses.id = dingalings.address_id" ) def test_overlapping_paths_outerjoin(self): User = self.classes.User result = create_session().query(User).outerjoin('orders', 'items').\ filter_by(id=3).outerjoin('orders','address').filter_by(id=1).all() assert [User(id=7, name='jack')] == result def test_from_joinpoint(self): Item, User, Order = (self.classes.Item, self.classes.User, self.classes.Order) sess = create_session() for oalias,ialias in [(True, True), (False, False), (True, False), (False, True)]: eq_( sess.query(User).join('orders', aliased=oalias).\ join('items', from_joinpoint=True, aliased=ialias).\ filter(Item.description == 'item 4').all(), [User(name='jack')] ) # use middle criterion eq_( sess.query(User).join('orders', aliased=oalias).\ filter(Order.user_id==9).\ join('items', from_joinpoint=True, aliased=ialias).\ filter(Item.description=='item 4').all(), [] ) orderalias = aliased(Order) itemalias = aliased(Item) eq_( sess.query(User).join(orderalias, 'orders'). join(itemalias, 'items', from_joinpoint=True). filter(itemalias.description == 'item 4').all(), [User(name='jack')] ) eq_( sess.query(User).join(orderalias, 'orders'). join(itemalias, 'items', from_joinpoint=True). filter(orderalias.user_id==9).\ filter(itemalias.description=='item 4').all(), [] ) def test_join_nonmapped_column(self): """test that the search for a 'left' doesn't trip on non-mapped cols""" Order, User = self.classes.Order, self.classes.User sess = create_session() # intentionally join() with a non-existent "left" side self.assert_compile( sess.query(User.id, literal_column('foo')).join(Order.user), "SELECT users.id AS users_id, foo FROM " "orders JOIN users ON users.id = orders.user_id" ) def test_backwards_join(self): User, Address = self.classes.User, self.classes.Address # a more controversial feature. join from # User->Address, but the onclause is Address.user. sess = create_session() eq_( sess.query(User).join(Address.user).\ filter(Address.email_address=='ed@wood.com').all(), [User(id=8,name=u'ed')] ) # its actually not so controversial if you view it in terms # of multiple entities. eq_( sess.query(User, Address).join(Address.user).filter(Address.email_address=='ed@wood.com').all(), [(User(id=8,name=u'ed'), Address(email_address='ed@wood.com'))] ) # this was the controversial part. now, raise an error if the feature is abused. # before the error raise was added, this would silently work..... assert_raises( sa_exc.InvalidRequestError, sess.query(User).join, Address, Address.user, ) # but this one would silently fail adalias = aliased(Address) assert_raises( sa_exc.InvalidRequestError, sess.query(User).join, adalias, Address.user, ) def test_multiple_with_aliases(self): Order, User = self.classes.Order, self.classes.User sess = create_session() ualias = aliased(User) oalias1 = aliased(Order) oalias2 = aliased(Order) self.assert_compile( sess.query(ualias).join(oalias1, ualias.orders). join(oalias2, ualias.orders). filter(or_(oalias1.user_id==9, oalias2.user_id==7)), "SELECT users_1.id AS users_1_id, users_1.name AS users_1_name FROM users AS users_1 " "JOIN orders AS orders_1 ON users_1.id = orders_1.user_id JOIN orders AS orders_2 ON " "users_1.id = orders_2.user_id WHERE orders_1.user_id = :user_id_1 OR orders_2.user_id = :user_id_2", use_default_dialect=True ) def test_select_from_orm_joins(self): User, Order = self.classes.User, self.classes.Order sess = create_session() ualias = aliased(User) oalias1 = aliased(Order) oalias2 = aliased(Order) self.assert_compile( join(User, oalias2, User.id==oalias2.user_id), "users JOIN orders AS orders_1 ON users.id = orders_1.user_id", use_default_dialect=True ) self.assert_compile( join(ualias, oalias1, ualias.orders), "users AS users_1 JOIN orders AS orders_1 ON users_1.id = orders_1.user_id", use_default_dialect=True ) self.assert_compile( sess.query(ualias).select_from(join(ualias, oalias1, ualias.orders)), "SELECT users_1.id AS users_1_id, users_1.name AS users_1_name FROM users AS users_1 " "JOIN orders AS orders_1 ON users_1.id = orders_1.user_id", use_default_dialect=True ) self.assert_compile( sess.query(User, ualias).select_from(join(ualias, oalias1, ualias.orders)), "SELECT users.id AS users_id, users.name AS users_name, users_1.id AS users_1_id, " "users_1.name AS users_1_name FROM users, users AS users_1 JOIN orders AS orders_1 ON users_1.id = orders_1.user_id", use_default_dialect=True ) # this fails (and we cant quite fix right now). if False: self.assert_compile( sess.query(User, ualias).\ join(oalias1, ualias.orders).\ join(oalias2, User.id==oalias2.user_id).\ filter(or_(oalias1.user_id==9, oalias2.user_id==7)), "SELECT users.id AS users_id, users.name AS users_name, users_1.id AS users_1_id, users_1.name AS " "users_1_name FROM users JOIN orders AS orders_2 ON users.id = orders_2.user_id, " "users AS users_1 JOIN orders AS orders_1 ON users_1.id = orders_1.user_id " "WHERE orders_1.user_id = :user_id_1 OR orders_2.user_id = :user_id_2", use_default_dialect=True ) # this is the same thing using explicit orm.join() (which now offers multiple again) self.assert_compile( sess.query(User, ualias).\ select_from( join(ualias, oalias1, ualias.orders), join(User, oalias2, User.id==oalias2.user_id), ).\ filter(or_(oalias1.user_id==9, oalias2.user_id==7)), "SELECT users.id AS users_id, users.name AS users_name, users_1.id AS users_1_id, users_1.name AS " "users_1_name FROM users AS users_1 JOIN orders AS orders_1 ON users_1.id = orders_1.user_id, " "users JOIN orders AS orders_2 ON users.id = orders_2.user_id " "WHERE orders_1.user_id = :user_id_1 OR orders_2.user_id = :user_id_2", use_default_dialect=True ) def test_overlapping_backwards_joins(self): User, Order = self.classes.User, self.classes.Order sess = create_session() oalias1 = aliased(Order) oalias2 = aliased(Order) # this is invalid SQL - joins from orders_1/orders_2 to User twice. # but that is what was asked for so they get it ! self.assert_compile( sess.query(User).join(oalias1.user).join(oalias2.user), "SELECT users.id AS users_id, users.name AS users_name FROM orders AS orders_1 " "JOIN users ON users.id = orders_1.user_id, orders AS orders_2 JOIN users ON users.id = orders_2.user_id", use_default_dialect=True, ) def test_replace_multiple_from_clause(self): """test adding joins onto multiple FROM clauses""" User, Order, Address = (self.classes.User, self.classes.Order, self.classes.Address) sess = create_session() self.assert_compile( sess.query(Address, User).join(Address.dingaling).join(User.orders, Order.items), "SELECT addresses.id AS addresses_id, addresses.user_id AS addresses_user_id, " "addresses.email_address AS addresses_email_address, users.id AS users_id, " "users.name AS users_name FROM addresses JOIN dingalings ON addresses.id = dingalings.address_id, " "users JOIN orders ON users.id = orders.user_id JOIN order_items AS order_items_1 " "ON orders.id = order_items_1.order_id JOIN items ON items.id = order_items_1.item_id", use_default_dialect = True ) def test_multiple_adaption(self): Item, Order, User = (self.classes.Item, self.classes.Order, self.classes.User) sess = create_session() self.assert_compile( sess.query(User).join(User.orders, Order.items, aliased=True).filter(Order.id==7).filter(Item.id==8), "SELECT users.id AS users_id, users.name AS users_name FROM users JOIN orders AS orders_1 " "ON users.id = orders_1.user_id JOIN order_items AS order_items_1 ON orders_1.id = order_items_1.order_id " "JOIN items AS items_1 ON items_1.id = order_items_1.item_id WHERE orders_1.id = :id_1 AND items_1.id = :id_2", use_default_dialect=True ) def test_onclause_conditional_adaption(self): Item, Order, orders, order_items, User = (self.classes.Item, self.classes.Order, self.tables.orders, self.tables.order_items, self.classes.User) sess = create_session() # this is now a very weird test, nobody should really # be using the aliased flag in this way. self.assert_compile( sess.query(User).join(User.orders, aliased=True). join(Item, and_(Order.id==order_items.c.order_id, order_items.c.item_id==Item.id), from_joinpoint=True, aliased=True ), "SELECT users.id AS users_id, users.name AS users_name FROM users JOIN " "orders AS orders_1 ON users.id = orders_1.user_id JOIN items AS items_1 " "ON orders_1.id = order_items.order_id AND order_items.item_id = items_1.id", use_default_dialect=True ) oalias = orders.select() self.assert_compile( sess.query(User).join(oalias, User.orders). join(Item, and_(Order.id==order_items.c.order_id, order_items.c.item_id==Item.id), from_joinpoint=True ), "SELECT users.id AS users_id, users.name AS users_name FROM users JOIN " "(SELECT orders.id AS id, orders.user_id AS user_id, orders.address_id AS address_id, orders.description " "AS description, orders.isopen AS isopen FROM orders) AS anon_1 ON users.id = anon_1.user_id JOIN items " "ON anon_1.id = order_items.order_id AND order_items.item_id = items.id", use_default_dialect=True ) # query.join(, aliased=True).join(target, sql_expression) # or: query.join(path_to_some_joined_table_mapper).join(target, sql_expression) def test_pure_expression_error(self): addresses, users = self.tables.addresses, self.tables.users sess = create_session() self.assert_compile( sess.query(users).join(addresses), "SELECT users.id AS users_id, users.name AS users_name " "FROM users JOIN addresses ON users.id = addresses.user_id" ) def test_orderby_arg_bug(self): User, users, Order = (self.classes.User, self.tables.users, self.classes.Order) sess = create_session() # no arg error result = sess.query(User).join('orders', aliased=True).order_by(Order.id).reset_joinpoint().order_by(users.c.id).all() def test_no_onclause(self): Item, User, Order = (self.classes.Item, self.classes.User, self.classes.Order) sess = create_session() eq_( sess.query(User).select_from(join(User, Order).join(Item, Order.items)).filter(Item.description == 'item 4').all(), [User(name='jack')] ) eq_( sess.query(User.name).select_from(join(User, Order).join(Item, Order.items)).filter(Item.description == 'item 4').all(), [('jack',)] ) eq_( sess.query(User).join(Order).join(Item, Order.items) .filter(Item.description == 'item 4').all(), [User(name='jack')] ) def test_clause_onclause(self): Item, Order, users, order_items, User = (self.classes.Item, self.classes.Order, self.tables.users, self.tables.order_items, self.classes.User) sess = create_session() eq_( sess.query(User).join(Order, User.id==Order.user_id). join(order_items, Order.id==order_items.c.order_id). join(Item, order_items.c.item_id==Item.id). filter(Item.description == 'item 4').all(), [User(name='jack')] ) eq_( sess.query(User.name).join(Order, User.id==Order.user_id). join(order_items, Order.id==order_items.c.order_id). join(Item, order_items.c.item_id==Item.id). filter(Item.description == 'item 4').all(), [('jack',)] ) ualias = aliased(User) eq_( sess.query(ualias.name).join(Order, ualias.id==Order.user_id). join(order_items, Order.id==order_items.c.order_id). join(Item, order_items.c.item_id==Item.id). filter(Item.description == 'item 4').all(), [('jack',)] ) # explicit onclause with from_self(), means # the onclause must be aliased against the query's custom # FROM object eq_( sess.query(User).order_by(User.id).offset(2). from_self(). join(Order, User.id==Order.user_id). all(), [User(name='fred')] ) # same with an explicit select_from() eq_( sess.query(User).select_from(select([users]). order_by(User.id).offset(2).alias()). join(Order, User.id==Order.user_id). all(), [User(name='fred')] ) def test_aliased_classes(self): User, Address = self.classes.User, self.classes.Address sess = create_session() (user7, user8, user9, user10) = sess.query(User).all() (address1, address2, address3, address4, address5) = sess.query(Address).all() expected = [(user7, address1), (user8, address2), (user8, address3), (user8, address4), (user9, address5), (user10, None)] q = sess.query(User) AdAlias = aliased(Address) q = q.add_entity(AdAlias).select_from(outerjoin(User, AdAlias)) l = q.order_by(User.id, AdAlias.id).all() eq_(l, expected) sess.expunge_all() q = sess.query(User).add_entity(AdAlias) l = q.select_from(outerjoin(User, AdAlias)).filter(AdAlias.email_address=='ed@bettyboop.com').all() eq_(l, [(user8, address3)]) l = q.select_from(outerjoin(User, AdAlias, 'addresses')).filter(AdAlias.email_address=='ed@bettyboop.com').all() eq_(l, [(user8, address3)]) l = q.select_from(outerjoin(User, AdAlias, User.id==AdAlias.user_id)).filter(AdAlias.email_address=='ed@bettyboop.com').all() eq_(l, [(user8, address3)]) # this is the first test where we are joining "backwards" - from AdAlias to User even though # the query is against User q = sess.query(User, AdAlias) l = q.join(AdAlias.user).filter(User.name=='ed').order_by(User.id, AdAlias.id) eq_(l.all(), [(user8, address2),(user8, address3),(user8, address4),]) q = sess.query(User, AdAlias).select_from(join(AdAlias, User, AdAlias.user)).filter(User.name=='ed') eq_(l.all(), [(user8, address2),(user8, address3),(user8, address4),]) def test_expression_onclauses(self): Order, User = self.classes.Order, self.classes.User sess = create_session() subq = sess.query(User).subquery() self.assert_compile( sess.query(User).join(subq, User.name==subq.c.name), "SELECT users.id AS users_id, users.name AS users_name " "FROM users JOIN (SELECT users.id AS id, users.name " "AS name FROM users) AS anon_1 ON users.name = anon_1.name", use_default_dialect=True ) subq = sess.query(Order).subquery() self.assert_compile( sess.query(User).join(subq, User.id==subq.c.user_id), "SELECT users.id AS users_id, users.name AS users_name FROM " "users JOIN (SELECT orders.id AS id, orders.user_id AS user_id, " "orders.address_id AS address_id, orders.description AS " "description, orders.isopen AS isopen FROM orders) AS " "anon_1 ON users.id = anon_1.user_id", use_default_dialect=True ) self.assert_compile( sess.query(User).join(Order, User.id==Order.user_id), "SELECT users.id AS users_id, users.name AS users_name " "FROM users JOIN orders ON users.id = orders.user_id", use_default_dialect=True ) def test_implicit_joins_from_aliases(self): Item, User, Order = (self.classes.Item, self.classes.User, self.classes.Order) sess = create_session() OrderAlias = aliased(Order) eq_( sess.query(OrderAlias).join('items').filter_by(description='item 3').\ order_by(OrderAlias.id).all(), [ Order(address_id=1,description=u'order 1',isopen=0,user_id=7,id=1), Order(address_id=4,description=u'order 2',isopen=0,user_id=9,id=2), Order(address_id=1,description=u'order 3',isopen=1,user_id=7,id=3) ] ) eq_( sess.query(User, OrderAlias, Item.description). join(OrderAlias, 'orders'). join('items', from_joinpoint=True). filter_by(description='item 3').\ order_by(User.id, OrderAlias.id).all(), [ (User(name=u'jack',id=7), Order(address_id=1,description=u'order 1',isopen=0,user_id=7,id=1), u'item 3'), (User(name=u'jack',id=7), Order(address_id=1,description=u'order 3',isopen=1,user_id=7,id=3), u'item 3'), (User(name=u'fred',id=9), Order(address_id=4,description=u'order 2',isopen=0,user_id=9,id=2), u'item 3') ] ) def test_aliased_classes_m2m(self): Item, Order = self.classes.Item, self.classes.Order sess = create_session() (order1, order2, order3, order4, order5) = sess.query(Order).all() (item1, item2, item3, item4, item5) = sess.query(Item).all() expected = [ (order1, item1), (order1, item2), (order1, item3), (order2, item1), (order2, item2), (order2, item3), (order3, item3), (order3, item4), (order3, item5), (order4, item1), (order4, item5), (order5, item5), ] q = sess.query(Order) q = q.add_entity(Item).select_from(join(Order, Item, 'items')).order_by(Order.id, Item.id) l = q.all() eq_(l, expected) IAlias = aliased(Item) q = sess.query(Order, IAlias).select_from(join(Order, IAlias, 'items')).filter(IAlias.description=='item 3') l = q.all() eq_(l, [ (order1, item3), (order2, item3), (order3, item3), ] ) def test_joins_from_adapted_entities(self): User = self.classes.User # test for #1853 session = create_session() first = session.query(User) second = session.query(User) unioned = first.union(second) subquery = session.query(User.id).subquery() join = subquery, subquery.c.id == User.id joined = unioned.outerjoin(*join) self.assert_compile(joined, 'SELECT anon_1.users_id AS ' 'anon_1_users_id, anon_1.users_name AS ' 'anon_1_users_name FROM (SELECT users.id ' 'AS users_id, users.name AS users_name ' 'FROM users UNION SELECT users.id AS ' 'users_id, users.name AS users_name FROM ' 'users) AS anon_1 LEFT OUTER JOIN (SELECT ' 'users.id AS id FROM users) AS anon_2 ON ' 'anon_2.id = anon_1.users_id', use_default_dialect=True) first = session.query(User.id) second = session.query(User.id) unioned = first.union(second) subquery = session.query(User.id).subquery() join = subquery, subquery.c.id == User.id joined = unioned.outerjoin(*join) self.assert_compile(joined, 'SELECT anon_1.users_id AS anon_1_users_id ' 'FROM (SELECT users.id AS users_id FROM ' 'users UNION SELECT users.id AS users_id ' 'FROM users) AS anon_1 LEFT OUTER JOIN ' '(SELECT users.id AS id FROM users) AS ' 'anon_2 ON anon_2.id = anon_1.users_id', use_default_dialect=True) def test_reset_joinpoint(self): User = self.classes.User for aliased in (True, False): # load a user who has an order that contains item id 3 and address id 1 (order 3, owned by jack) result = create_session().query(User).join('orders', 'items', aliased=aliased).filter_by(id=3).reset_joinpoint().join('orders','address', aliased=aliased).filter_by(id=1).all() assert [User(id=7, name='jack')] == result result = create_session().query(User).outerjoin('orders', 'items', aliased=aliased).filter_by(id=3).reset_joinpoint().outerjoin('orders','address', aliased=aliased).filter_by(id=1).all() assert [User(id=7, name='jack')] == result def test_overlap_with_aliases(self): orders, User, users = (self.tables.orders, self.classes.User, self.tables.users) oalias = orders.alias('oalias') result = create_session().query(User).select_from(users.join(oalias)).filter(oalias.c.description.in_(["order 1", "order 2", "order 3"])).join('orders', 'items').order_by(User.id).all() assert [User(id=7, name='jack'), User(id=9, name='fred')] == result result = create_session().query(User).select_from(users.join(oalias)).filter(oalias.c.description.in_(["order 1", "order 2", "order 3"])).join('orders', 'items').filter_by(id=4).all() assert [User(id=7, name='jack')] == result def test_aliased(self): """test automatic generation of aliased joins.""" Item, Order, User, Address = (self.classes.Item, self.classes.Order, self.classes.User, self.classes.Address) sess = create_session() # test a basic aliasized path q = sess.query(User).join('addresses', aliased=True).filter_by(email_address='jack@bean.com') assert [User(id=7)] == q.all() q = sess.query(User).join('addresses', aliased=True).filter(Address.email_address=='jack@bean.com') assert [User(id=7)] == q.all() q = sess.query(User).join('addresses', aliased=True).filter(or_(Address.email_address=='jack@bean.com', Address.email_address=='fred@fred.com')) assert [User(id=7), User(id=9)] == q.all() # test two aliasized paths, one to 'orders' and the other to 'orders','items'. # one row is returned because user 7 has order 3 and also has order 1 which has item 1 # this tests a o2m join and a m2m join. q = sess.query(User).join('orders', aliased=True).filter(Order.description=="order 3").join('orders', 'items', aliased=True).filter(Item.description=="item 1") assert q.count() == 1 assert [User(id=7)] == q.all() # test the control version - same joins but not aliased. rows are not returned because order 3 does not have item 1 q = sess.query(User).join('orders').filter(Order.description=="order 3").join('orders', 'items').filter(Item.description=="item 1") assert [] == q.all() assert q.count() == 0 # the left half of the join condition of the any() is aliased. q = sess.query(User).join('orders', aliased=True).filter(Order.items.any(Item.description=='item 4')) assert [User(id=7)] == q.all() # test that aliasing gets reset when join() is called q = sess.query(User).join('orders', aliased=True).filter(Order.description=="order 3").join('orders', aliased=True).filter(Order.description=="order 5") assert q.count() == 1 assert [User(id=7)] == q.all() def test_aliased_order_by(self): User = self.classes.User sess = create_session() ualias = aliased(User) eq_( sess.query(User, ualias).filter(User.id > ualias.id).order_by(desc(ualias.id), User.name).all(), [ (User(id=10,name=u'chuck'), User(id=9,name=u'fred')), (User(id=10,name=u'chuck'), User(id=8,name=u'ed')), (User(id=9,name=u'fred'), User(id=8,name=u'ed')), (User(id=10,name=u'chuck'), User(id=7,name=u'jack')), (User(id=8,name=u'ed'), User(id=7,name=u'jack')), (User(id=9,name=u'fred'), User(id=7,name=u'jack')) ] ) def test_plain_table(self): addresses, User = self.tables.addresses, self.classes.User sess = create_session() eq_( sess.query(User.name).join(addresses, User.id==addresses.c.user_id).order_by(User.id).all(), [(u'jack',), (u'ed',), (u'ed',), (u'ed',), (u'fred',)] ) def test_no_joinpoint_expr(self): User, users = self.classes.User, self.tables.users sess = create_session() # these are consistent regardless of # select_from() being present. assert_raises_message( sa_exc.InvalidRequestError, "Could not find a FROM clause to join from. Tried joining " "to .*?, but got: " "Can't find any foreign key relationships " "between 'users' and 'users'.", sess.query(users.c.id).join, User ) assert_raises_message( sa_exc.InvalidRequestError, "Could not find a FROM clause to join from. Tried joining " "to .*?, but got: " "Can't find any foreign key relationships " "between 'users' and 'users'.", sess.query(users.c.id).select_from(users).join, User ) def test_select_from(self): """Test that the left edge of the join can be set reliably with select_from().""" Item, Order, User = (self.classes.Item, self.classes.Order, self.classes.User) sess = create_session() self.assert_compile( sess.query(Item.id).select_from(User).join(User.orders).join(Order.items), "SELECT items.id AS items_id FROM users JOIN orders ON " "users.id = orders.user_id JOIN order_items AS order_items_1 " "ON orders.id = order_items_1.order_id JOIN items ON items.id = " "order_items_1.item_id", use_default_dialect=True ) # here, the join really wants to add a second FROM clause # for "Item". but select_from disallows that self.assert_compile( sess.query(Item.id).select_from(User).join(Item, User.id==Item.id), "SELECT items.id AS items_id FROM users JOIN items ON users.id = items.id", use_default_dialect=True ) def test_from_self_resets_joinpaths(self): """test a join from from_self() doesn't confuse joins inside the subquery with the outside. """ Item, Keyword = self.classes.Item, self.classes.Keyword sess = create_session() self.assert_compile( sess.query(Item).join(Item.keywords).from_self(Keyword).join(Item.keywords), "SELECT keywords.id AS keywords_id, keywords.name AS keywords_name FROM " "(SELECT items.id AS items_id, items.description AS items_description " "FROM items JOIN item_keywords AS item_keywords_1 ON items.id = " "item_keywords_1.item_id JOIN keywords ON keywords.id = item_keywords_1.keyword_id) " "AS anon_1 JOIN item_keywords AS item_keywords_2 ON " "anon_1.items_id = item_keywords_2.item_id " "JOIN keywords ON " "keywords.id = item_keywords_2.keyword_id", use_default_dialect=True ) class JoinFromSelectableTest(fixtures.MappedTest, AssertsCompiledSQL): __dialect__ = 'default' run_setup_mappers = 'once' @classmethod def define_tables(cls, metadata): Table('table1', metadata, Column('id', Integer, primary_key=True) ) Table('table2', metadata, Column('id', Integer, primary_key=True), Column('t1_id', Integer) ) @classmethod def setup_classes(cls): table1, table2 = cls.tables.table1, cls.tables.table2 class T1(cls.Comparable): pass class T2(cls.Comparable): pass mapper(T1, table1) mapper(T2, table2) def test_select_mapped_to_mapped_explicit_left(self): T1, T2 = self.classes.T1, self.classes.T2 sess = Session() subq = sess.query(T2.t1_id, func.count(T2.id).label('count')).\ group_by(T2.t1_id).subquery() self.assert_compile( sess.query(subq.c.count, T1.id).select_from(subq).join(T1, subq.c.t1_id==T1.id), "SELECT anon_1.count AS anon_1_count, table1.id AS table1_id " "FROM (SELECT table2.t1_id AS t1_id, " "count(table2.id) AS count FROM table2 " "GROUP BY table2.t1_id) AS anon_1 JOIN table1 ON anon_1.t1_id = table1.id" ) def test_select_mapped_to_mapped_implicit_left(self): T1, T2 = self.classes.T1, self.classes.T2 sess = Session() subq = sess.query(T2.t1_id, func.count(T2.id).label('count')).\ group_by(T2.t1_id).subquery() self.assert_compile( sess.query(subq.c.count, T1.id).join(T1, subq.c.t1_id==T1.id), "SELECT anon_1.count AS anon_1_count, table1.id AS table1_id " "FROM (SELECT table2.t1_id AS t1_id, " "count(table2.id) AS count FROM table2 " "GROUP BY table2.t1_id) AS anon_1 JOIN table1 ON anon_1.t1_id = table1.id" ) def test_select_mapped_to_select_explicit_left(self): T1, T2 = self.classes.T1, self.classes.T2 sess = Session() subq = sess.query(T2.t1_id, func.count(T2.id).label('count')).\ group_by(T2.t1_id).subquery() self.assert_compile( sess.query(subq.c.count, T1.id).select_from(T1).join(subq, subq.c.t1_id==T1.id), "SELECT anon_1.count AS anon_1_count, table1.id AS table1_id " "FROM table1 JOIN (SELECT table2.t1_id AS t1_id, " "count(table2.id) AS count FROM table2 GROUP BY table2.t1_id) " "AS anon_1 ON anon_1.t1_id = table1.id" ) def test_select_mapped_to_select_implicit_left(self): T1, T2 = self.classes.T1, self.classes.T2 sess = Session() subq = sess.query(T2.t1_id, func.count(T2.id).label('count')).\ group_by(T2.t1_id).subquery() assert_raises_message( sa_exc.InvalidRequestError, r"Can't construct a join from ", sess.query(subq.c.count, T1.id).join, subq, subq.c.t1_id==T1.id, ) def test_mapped_select_to_mapped_implicit_left(self): T1, T2 = self.classes.T1, self.classes.T2 sess = Session() subq = sess.query(T2.t1_id, func.count(T2.id).label('count')).\ group_by(T2.t1_id).subquery() # this query is wrong, but verifying behavior stays the same # (or improves, like an error message) self.assert_compile( sess.query(T1.id, subq.c.count).join(T1, subq.c.t1_id==T1.id), "SELECT table1.id AS table1_id, anon_1.count AS anon_1_count FROM " "(SELECT table2.t1_id AS t1_id, count(table2.id) AS count FROM " "table2 GROUP BY table2.t1_id) AS anon_1, table1 JOIN table1 " "ON anon_1.t1_id = table1.id" ) def test_mapped_select_to_mapped_explicit_left(self): T1, T2 = self.classes.T1, self.classes.T2 sess = Session() subq = sess.query(T2.t1_id, func.count(T2.id).label('count')).\ group_by(T2.t1_id).subquery() self.assert_compile( sess.query(T1.id, subq.c.count).select_from(subq).join(T1, subq.c.t1_id==T1.id), "SELECT table1.id AS table1_id, anon_1.count AS anon_1_count " "FROM (SELECT table2.t1_id AS t1_id, count(table2.id) AS count " "FROM table2 GROUP BY table2.t1_id) AS anon_1 JOIN table1 " "ON anon_1.t1_id = table1.id" ) def test_mapped_select_to_select_explicit_left(self): T1, T2 = self.classes.T1, self.classes.T2 sess = Session() subq = sess.query(T2.t1_id, func.count(T2.id).label('count')).\ group_by(T2.t1_id).subquery() self.assert_compile( sess.query(T1.id, subq.c.count).select_from(T1).join(subq, subq.c.t1_id==T1.id), "SELECT table1.id AS table1_id, anon_1.count AS anon_1_count " "FROM table1 JOIN (SELECT table2.t1_id AS t1_id, count(table2.id) AS count " "FROM table2 GROUP BY table2.t1_id) AS anon_1 " "ON anon_1.t1_id = table1.id" ) def test_mapped_select_to_select_implicit_left(self): T1, T2 = self.classes.T1, self.classes.T2 sess = Session() subq = sess.query(T2.t1_id, func.count(T2.id).label('count')).\ group_by(T2.t1_id).subquery() self.assert_compile( sess.query(T1.id, subq.c.count).join(subq, subq.c.t1_id==T1.id), "SELECT table1.id AS table1_id, anon_1.count AS anon_1_count " "FROM table1 JOIN (SELECT table2.t1_id AS t1_id, count(table2.id) AS count " "FROM table2 GROUP BY table2.t1_id) AS anon_1 " "ON anon_1.t1_id = table1.id" ) class MultiplePathTest(fixtures.MappedTest, AssertsCompiledSQL): @classmethod def define_tables(cls, metadata): t1 = Table('t1', metadata, Column('id', Integer, primary_key=True, test_needs_autoincrement=True), Column('data', String(30)) ) t2 = Table('t2', metadata, Column('id', Integer, primary_key=True, test_needs_autoincrement=True), Column('data', String(30)) ) t1t2_1 = Table('t1t2_1', metadata, Column('t1id', Integer, ForeignKey('t1.id')), Column('t2id', Integer, ForeignKey('t2.id')) ) t1t2_2 = Table('t1t2_2', metadata, Column('t1id', Integer, ForeignKey('t1.id')), Column('t2id', Integer, ForeignKey('t2.id')) ) def test_basic(self): t2, t1t2_1, t1t2_2, t1 = (self.tables.t2, self.tables.t1t2_1, self.tables.t1t2_2, self.tables.t1) class T1(object):pass class T2(object):pass mapper(T1, t1, properties={ 't2s_1':relationship(T2, secondary=t1t2_1), 't2s_2':relationship(T2, secondary=t1t2_2), }) mapper(T2, t2) q = create_session().query(T1).join('t2s_1').filter(t2.c.id==5).reset_joinpoint().join('t2s_2') self.assert_compile( q, "SELECT t1.id AS t1_id, t1.data AS t1_data FROM t1 JOIN t1t2_1 AS t1t2_1_1 " "ON t1.id = t1t2_1_1.t1id JOIN t2 ON t2.id = t1t2_1_1.t2id JOIN t1t2_2 AS t1t2_2_1 " "ON t1.id = t1t2_2_1.t1id JOIN t2 ON t2.id = t1t2_2_1.t2id WHERE t2.id = :id_1" , use_default_dialect=True ) class SelfRefMixedTest(fixtures.MappedTest, AssertsCompiledSQL): run_setup_mappers = 'once' __dialect__ = default.DefaultDialect() @classmethod def define_tables(cls, metadata): nodes = Table('nodes', metadata, Column('id', Integer, primary_key=True, test_needs_autoincrement=True), Column('parent_id', Integer, ForeignKey('nodes.id')) ) sub_table = Table('sub_table', metadata, Column('id', Integer, primary_key=True, test_needs_autoincrement=True), Column('node_id', Integer, ForeignKey('nodes.id')), ) assoc_table = Table('assoc_table', metadata, Column('left_id', Integer, ForeignKey('nodes.id')), Column('right_id', Integer, ForeignKey('nodes.id')) ) @classmethod def setup_classes(cls): nodes, assoc_table, sub_table = (cls.tables.nodes, cls.tables.assoc_table, cls.tables.sub_table) class Node(cls.Comparable): pass class Sub(cls.Comparable): pass mapper(Node, nodes, properties={ 'children':relationship(Node, lazy='select', join_depth=3, backref=backref('parent', remote_side=[nodes.c.id]) ), 'subs' : relationship(Sub), 'assoc':relationship(Node, secondary=assoc_table, primaryjoin=nodes.c.id==assoc_table.c.left_id, secondaryjoin=nodes.c.id==assoc_table.c.right_id) }) mapper(Sub, sub_table) def test_o2m_aliased_plus_o2m(self): Node, Sub = self.classes.Node, self.classes.Sub sess = create_session() n1 = aliased(Node) self.assert_compile( sess.query(Node).join(n1, Node.children).join(Sub, n1.subs), "SELECT nodes.id AS nodes_id, nodes.parent_id AS nodes_parent_id " "FROM nodes JOIN nodes AS nodes_1 ON nodes.id = nodes_1.parent_id " "JOIN sub_table ON nodes_1.id = sub_table.node_id" ) self.assert_compile( sess.query(Node).join(n1, Node.children).join(Sub, Node.subs), "SELECT nodes.id AS nodes_id, nodes.parent_id AS nodes_parent_id " "FROM nodes JOIN nodes AS nodes_1 ON nodes.id = nodes_1.parent_id " "JOIN sub_table ON nodes.id = sub_table.node_id" ) def test_m2m_aliased_plus_o2m(self): Node, Sub = self.classes.Node, self.classes.Sub sess = create_session() n1 = aliased(Node) self.assert_compile( sess.query(Node).join(n1, Node.assoc).join(Sub, n1.subs), "SELECT nodes.id AS nodes_id, nodes.parent_id AS nodes_parent_id " "FROM nodes JOIN assoc_table AS assoc_table_1 ON nodes.id = " "assoc_table_1.left_id JOIN nodes AS nodes_1 ON nodes_1.id = " "assoc_table_1.right_id JOIN sub_table ON nodes_1.id = sub_table.node_id", ) self.assert_compile( sess.query(Node).join(n1, Node.assoc).join(Sub, Node.subs), "SELECT nodes.id AS nodes_id, nodes.parent_id AS nodes_parent_id " "FROM nodes JOIN assoc_table AS assoc_table_1 ON nodes.id = " "assoc_table_1.left_id JOIN nodes AS nodes_1 ON nodes_1.id = " "assoc_table_1.right_id JOIN sub_table ON nodes.id = sub_table.node_id", ) class CreateJoinsTest(fixtures.ORMTest, AssertsCompiledSQL): __dialect__ = 'default' def _inherits_fixture(self): m = MetaData() base = Table('base', m, Column('id', Integer, primary_key=True)) a = Table('a', m, Column('id', Integer, ForeignKey('base.id'), primary_key=True), Column('b_id', Integer, ForeignKey('b.id'))) b = Table('b', m, Column('id', Integer, ForeignKey('base.id'), primary_key=True), Column('c_id', Integer, ForeignKey('c.id'))) c = Table('c', m, Column('id', Integer, ForeignKey('base.id'), primary_key=True)) class Base(object): pass class A(Base): pass class B(Base): pass class C(Base): pass mapper(Base, base) mapper(A, a, inherits=Base, properties={'b':relationship(B, primaryjoin=a.c.b_id==b.c.id)}) mapper(B, b, inherits=Base, properties={'c':relationship(C, primaryjoin=b.c.c_id==c.c.id)}) mapper(C, c, inherits=Base) return A, B, C, Base def test_double_level_aliased_exists(self): A, B, C, Base = self._inherits_fixture() s = Session() self.assert_compile( s.query(A).filter(A.b.has(B.c.has(C.id==5))), "SELECT a.id AS a_id, base.id AS base_id, a.b_id AS a_b_id " "FROM base JOIN a ON base.id = a.id WHERE " "EXISTS (SELECT 1 FROM (SELECT base.id AS base_id, b.id AS " "b_id, b.c_id AS b_c_id FROM base JOIN b ON base.id = b.id) " "AS anon_1 WHERE a.b_id = anon_1.b_id AND (EXISTS " "(SELECT 1 FROM (SELECT base.id AS base_id, c.id AS c_id " "FROM base JOIN c ON base.id = c.id) AS anon_2 " "WHERE anon_1.b_c_id = anon_2.c_id AND anon_2.c_id = :id_1" ")))" ) class SelfReferentialTest(fixtures.MappedTest, AssertsCompiledSQL): run_setup_mappers = 'once' run_inserts = 'once' run_deletes = None @classmethod def define_tables(cls, metadata): Table('nodes', metadata, Column('id', Integer, primary_key=True, test_needs_autoincrement=True), Column('parent_id', Integer, ForeignKey('nodes.id')), Column('data', String(30))) @classmethod def setup_classes(cls): class Node(cls.Comparable): def append(self, node): self.children.append(node) @classmethod def setup_mappers(cls): Node, nodes = cls.classes.Node, cls.tables.nodes mapper(Node, nodes, properties={ 'children':relationship(Node, lazy='select', join_depth=3, backref=backref('parent', remote_side=[nodes.c.id]) ), }) @classmethod def insert_data(cls): Node = cls.classes.Node sess = create_session() n1 = Node(data='n1') n1.append(Node(data='n11')) n1.append(Node(data='n12')) n1.append(Node(data='n13')) n1.children[1].append(Node(data='n121')) n1.children[1].append(Node(data='n122')) n1.children[1].append(Node(data='n123')) sess.add(n1) sess.flush() sess.close() def test_join_1(self): Node = self.classes.Node sess = create_session() node = sess.query(Node).join('children', aliased=True).filter_by(data='n122').first() assert node.data=='n12' def test_join_2(self): Node = self.classes.Node sess = create_session() ret = sess.query(Node.data).join(Node.children, aliased=True).filter_by(data='n122').all() assert ret == [('n12',)] def test_join_3(self): Node = self.classes.Node sess = create_session() node = sess.query(Node).join('children', 'children', aliased=True).filter_by(data='n122').first() assert node.data=='n1' def test_join_4(self): Node = self.classes.Node sess = create_session() node = sess.query(Node).filter_by(data='n122').join('parent', aliased=True).filter_by(data='n12').\ join('parent', aliased=True, from_joinpoint=True).filter_by(data='n1').first() assert node.data == 'n122' def test_string_or_prop_aliased(self): """test that join('foo') behaves the same as join(Cls.foo) in a self referential scenario. """ Node = self.classes.Node sess = create_session() nalias = aliased(Node, sess.query(Node).filter_by(data='n1').subquery()) q1 = sess.query(nalias).join(nalias.children, aliased=True).\ join(Node.children, from_joinpoint=True) q2 = sess.query(nalias).join(nalias.children, aliased=True).\ join("children", from_joinpoint=True) for q in (q1, q2): self.assert_compile( q, "SELECT anon_1.id AS anon_1_id, anon_1.parent_id AS " "anon_1_parent_id, anon_1.data AS anon_1_data FROM " "(SELECT nodes.id AS id, nodes.parent_id AS parent_id, " "nodes.data AS data FROM nodes WHERE nodes.data = :data_1) " "AS anon_1 JOIN nodes AS nodes_1 ON anon_1.id = " "nodes_1.parent_id JOIN nodes ON nodes_1.id = nodes.parent_id", use_default_dialect=True ) q1 = sess.query(Node).join(nalias.children, aliased=True).\ join(Node.children, aliased=True, from_joinpoint=True).\ join(Node.children, from_joinpoint=True) q2 = sess.query(Node).join(nalias.children, aliased=True).\ join("children", aliased=True, from_joinpoint=True).\ join("children", from_joinpoint=True) for q in (q1, q2): self.assert_compile( q, "SELECT nodes.id AS nodes_id, nodes.parent_id AS " "nodes_parent_id, nodes.data AS nodes_data FROM (SELECT " "nodes.id AS id, nodes.parent_id AS parent_id, nodes.data " "AS data FROM nodes WHERE nodes.data = :data_1) AS anon_1 " "JOIN nodes AS nodes_1 ON anon_1.id = nodes_1.parent_id " "JOIN nodes AS nodes_2 ON nodes_1.id = nodes_2.parent_id " "JOIN nodes ON nodes_2.id = nodes.parent_id", use_default_dialect=True ) def test_from_self_inside_excludes_outside(self): """test the propagation of aliased() from inside to outside on a from_self().. """ Node = self.classes.Node sess = create_session() n1 = aliased(Node) # n1 is not inside the from_self(), so all cols must be maintained # on the outside self.assert_compile( sess.query(Node).filter(Node.data=='n122').from_self(n1, Node.id), "SELECT nodes_1.id AS nodes_1_id, nodes_1.parent_id AS nodes_1_parent_id, " "nodes_1.data AS nodes_1_data, anon_1.nodes_id AS anon_1_nodes_id " "FROM nodes AS nodes_1, (SELECT nodes.id AS nodes_id, " "nodes.parent_id AS nodes_parent_id, nodes.data AS nodes_data FROM " "nodes WHERE nodes.data = :data_1) AS anon_1", use_default_dialect=True ) parent = aliased(Node) grandparent = aliased(Node) q = sess.query(Node, parent, grandparent).\ join(parent, Node.parent).\ join(grandparent, parent.parent).\ filter(Node.data=='n122').filter(parent.data=='n12').\ filter(grandparent.data=='n1').from_self().limit(1) # parent, grandparent *are* inside the from_self(), so they # should get aliased to the outside. self.assert_compile( q, "SELECT anon_1.nodes_id AS anon_1_nodes_id, " "anon_1.nodes_parent_id AS anon_1_nodes_parent_id, " "anon_1.nodes_data AS anon_1_nodes_data, " "anon_1.nodes_1_id AS anon_1_nodes_1_id, " "anon_1.nodes_1_parent_id AS anon_1_nodes_1_parent_id, " "anon_1.nodes_1_data AS anon_1_nodes_1_data, " "anon_1.nodes_2_id AS anon_1_nodes_2_id, " "anon_1.nodes_2_parent_id AS anon_1_nodes_2_parent_id, " "anon_1.nodes_2_data AS anon_1_nodes_2_data " "FROM (SELECT nodes.id AS nodes_id, nodes.parent_id " "AS nodes_parent_id, nodes.data AS nodes_data, " "nodes_1.id AS nodes_1_id, nodes_1.parent_id AS nodes_1_parent_id, " "nodes_1.data AS nodes_1_data, nodes_2.id AS nodes_2_id, " "nodes_2.parent_id AS nodes_2_parent_id, nodes_2.data AS " "nodes_2_data FROM nodes JOIN nodes AS nodes_1 ON " "nodes_1.id = nodes.parent_id JOIN nodes AS nodes_2 " "ON nodes_2.id = nodes_1.parent_id " "WHERE nodes.data = :data_1 AND nodes_1.data = :data_2 AND " "nodes_2.data = :data_3) AS anon_1 LIMIT :param_1", {'param_1':1}, use_default_dialect=True ) def test_explicit_join_1(self): Node = self.classes.Node n1 = aliased(Node) n2 = aliased(Node) self.assert_compile( join(Node, n1, 'children').join(n2, 'children'), "nodes JOIN nodes AS nodes_1 ON nodes.id = nodes_1.parent_id " "JOIN nodes AS nodes_2 ON nodes_1.id = nodes_2.parent_id", use_default_dialect=True ) def test_explicit_join_2(self): Node = self.classes.Node n1 = aliased(Node) n2 = aliased(Node) self.assert_compile( join(Node, n1, Node.children).join(n2, n1.children), "nodes JOIN nodes AS nodes_1 ON nodes.id = nodes_1.parent_id " "JOIN nodes AS nodes_2 ON nodes_1.id = nodes_2.parent_id", use_default_dialect=True ) def test_explicit_join_3(self): Node = self.classes.Node n1 = aliased(Node) n2 = aliased(Node) # the join_to_left=False here is unfortunate. the default on this flag should # be False. self.assert_compile( join(Node, n1, Node.children).join(n2, Node.children, join_to_left=False), "nodes JOIN nodes AS nodes_1 ON nodes.id = nodes_1.parent_id " "JOIN nodes AS nodes_2 ON nodes.id = nodes_2.parent_id", use_default_dialect=True ) def test_explicit_join_4(self): Node = self.classes.Node sess = create_session() n1 = aliased(Node) n2 = aliased(Node) self.assert_compile( sess.query(Node).join(n1, Node.children).join(n2, n1.children), "SELECT nodes.id AS nodes_id, nodes.parent_id AS nodes_parent_id, nodes.data AS " "nodes_data FROM nodes JOIN nodes AS nodes_1 ON nodes.id = nodes_1.parent_id " "JOIN nodes AS nodes_2 ON nodes_1.id = nodes_2.parent_id", use_default_dialect=True ) def test_explicit_join_5(self): Node = self.classes.Node sess = create_session() n1 = aliased(Node) n2 = aliased(Node) self.assert_compile( sess.query(Node).join(n1, Node.children).join(n2, Node.children), "SELECT nodes.id AS nodes_id, nodes.parent_id AS nodes_parent_id, nodes.data AS " "nodes_data FROM nodes JOIN nodes AS nodes_1 ON nodes.id = nodes_1.parent_id " "JOIN nodes AS nodes_2 ON nodes.id = nodes_2.parent_id", use_default_dialect=True ) def test_explicit_join_6(self): Node = self.classes.Node sess = create_session() n1 = aliased(Node) node = sess.query(Node).select_from(join(Node, n1, 'children')).\ filter(n1.data == 'n122').first() assert node.data == 'n12' def test_explicit_join_7(self): Node = self.classes.Node sess = create_session() n1 = aliased(Node) n2 = aliased(Node) node = sess.query(Node).select_from( join(Node, n1, 'children').join(n2, 'children')).\ filter(n2.data == 'n122').first() assert node.data == 'n1' def test_explicit_join_8(self): Node = self.classes.Node sess = create_session() n1 = aliased(Node) n2 = aliased(Node) # mix explicit and named onclauses node = sess.query(Node).select_from( join(Node, n1, Node.id == n1.parent_id).join(n2, 'children')).\ filter(n2.data == 'n122').first() assert node.data == 'n1' def test_explicit_join_9(self): Node = self.classes.Node sess = create_session() n1 = aliased(Node) n2 = aliased(Node) node = sess.query(Node).select_from(join(Node, n1, 'parent').join(n2, 'parent')).\ filter(and_(Node.data == 'n122', n1.data == 'n12', n2.data == 'n1')).first() assert node.data == 'n122' def test_explicit_join_10(self): Node = self.classes.Node sess = create_session() n1 = aliased(Node) n2 = aliased(Node) eq_( list(sess.query(Node).select_from(join(Node, n1, 'parent').join(n2, 'parent')).\ filter(and_(Node.data == 'n122', n1.data == 'n12', n2.data == 'n1')).values(Node.data, n1.data, n2.data)), [('n122', 'n12', 'n1')]) def test_join_to_nonaliased(self): Node = self.classes.Node sess = create_session() n1 = aliased(Node) # using 'n1.parent' implicitly joins to unaliased Node eq_( sess.query(n1).join(n1.parent).filter(Node.data=='n1').all(), [Node(parent_id=1,data=u'n11',id=2), Node(parent_id=1,data=u'n12',id=3), Node(parent_id=1,data=u'n13',id=4)] ) # explicit (new syntax) eq_( sess.query(n1).join(Node, n1.parent).filter(Node.data=='n1').all(), [Node(parent_id=1,data=u'n11',id=2), Node(parent_id=1,data=u'n12',id=3), Node(parent_id=1,data=u'n13',id=4)] ) def test_multiple_explicit_entities_one(self): Node = self.classes.Node sess = create_session() parent = aliased(Node) grandparent = aliased(Node) eq_( sess.query(Node, parent, grandparent).\ join(parent, Node.parent).\ join(grandparent, parent.parent).\ filter(Node.data=='n122').filter(parent.data=='n12').\ filter(grandparent.data=='n1').first(), (Node(data='n122'), Node(data='n12'), Node(data='n1')) ) def test_multiple_explicit_entities_two(self): Node = self.classes.Node sess = create_session() parent = aliased(Node) grandparent = aliased(Node) eq_( sess.query(Node, parent, grandparent).\ join(parent, Node.parent).\ join(grandparent, parent.parent).\ filter(Node.data == 'n122').filter(parent.data == 'n12').\ filter(grandparent.data == 'n1').from_self().first(), (Node(data='n122'), Node(data='n12'), Node(data='n1')) ) def test_multiple_explicit_entities_three(self): Node = self.classes.Node sess = create_session() parent = aliased(Node) grandparent = aliased(Node) # same, change order around eq_( sess.query(parent, grandparent, Node).\ join(parent, Node.parent).\ join(grandparent, parent.parent).\ filter(Node.data == 'n122').filter(parent.data == 'n12').\ filter(grandparent.data == 'n1').from_self().first(), (Node(data='n12'), Node(data='n1'), Node(data='n122')) ) def test_multiple_explicit_entities_four(self): Node = self.classes.Node sess = create_session() parent = aliased(Node) grandparent = aliased(Node) eq_( sess.query(Node, parent, grandparent).\ join(parent, Node.parent).\ join(grandparent, parent.parent).\ filter(Node.data=='n122').filter(parent.data=='n12').\ filter(grandparent.data=='n1').\ options(joinedload(Node.children)).first(), (Node(data='n122'), Node(data='n12'), Node(data='n1')) ) def test_multiple_explicit_entities_five(self): Node = self.classes.Node sess = create_session() parent = aliased(Node) grandparent = aliased(Node) eq_( sess.query(Node, parent, grandparent).\ join(parent, Node.parent).\ join(grandparent, parent.parent).\ filter(Node.data=='n122').filter(parent.data=='n12').\ filter(grandparent.data=='n1').from_self().\ options(joinedload(Node.children)).first(), (Node(data='n122'), Node(data='n12'), Node(data='n1')) ) def test_any(self): Node = self.classes.Node sess = create_session() eq_(sess.query(Node).filter(Node.children.any(Node.data=='n1')).all(), []) eq_(sess.query(Node).filter(Node.children.any(Node.data=='n12')).all(), [Node(data='n1')]) eq_(sess.query(Node).filter(~Node.children.any()).order_by(Node.id).all(), [Node(data='n11'), Node(data='n13'),Node(data='n121'),Node(data='n122'),Node(data='n123'),]) def test_has(self): Node = self.classes.Node sess = create_session() eq_(sess.query(Node).filter(Node.parent.has(Node.data=='n12')).order_by(Node.id).all(), [Node(data='n121'),Node(data='n122'),Node(data='n123')]) eq_(sess.query(Node).filter(Node.parent.has(Node.data=='n122')).all(), []) eq_(sess.query(Node).filter(~Node.parent.has()).all(), [Node(data='n1')]) def test_contains(self): Node = self.classes.Node sess = create_session() n122 = sess.query(Node).filter(Node.data=='n122').one() eq_(sess.query(Node).filter(Node.children.contains(n122)).all(), [Node(data='n12')]) n13 = sess.query(Node).filter(Node.data=='n13').one() eq_(sess.query(Node).filter(Node.children.contains(n13)).all(), [Node(data='n1')]) def test_eq_ne(self): Node = self.classes.Node sess = create_session() n12 = sess.query(Node).filter(Node.data=='n12').one() eq_(sess.query(Node).filter(Node.parent==n12).all(), [Node(data='n121'),Node(data='n122'),Node(data='n123')]) eq_(sess.query(Node).filter(Node.parent != n12).all(), [Node(data='n1'), Node(data='n11'), Node(data='n12'), Node(data='n13')]) class SelfReferentialM2MTest(fixtures.MappedTest): run_setup_mappers = 'once' run_inserts = 'once' run_deletes = None @classmethod def define_tables(cls, metadata): nodes = Table('nodes', metadata, Column('id', Integer, primary_key=True, test_needs_autoincrement=True), Column('data', String(30))) node_to_nodes =Table('node_to_nodes', metadata, Column('left_node_id', Integer, ForeignKey('nodes.id'),primary_key=True), Column('right_node_id', Integer, ForeignKey('nodes.id'),primary_key=True), ) @classmethod def setup_classes(cls): class Node(cls.Comparable): pass @classmethod def insert_data(cls): Node, nodes, node_to_nodes = (cls.classes.Node, cls.tables.nodes, cls.tables.node_to_nodes) mapper(Node, nodes, properties={ 'children':relationship(Node, lazy='select', secondary=node_to_nodes, primaryjoin=nodes.c.id==node_to_nodes.c.left_node_id, secondaryjoin=nodes.c.id==node_to_nodes.c.right_node_id, ) }) sess = create_session() n1 = Node(data='n1') n2 = Node(data='n2') n3 = Node(data='n3') n4 = Node(data='n4') n5 = Node(data='n5') n6 = Node(data='n6') n7 = Node(data='n7') n1.children = [n2, n3, n4] n2.children = [n3, n6, n7] n3.children = [n5, n4] sess.add(n1) sess.add(n2) sess.add(n3) sess.add(n4) sess.flush() sess.close() def test_any(self): Node = self.classes.Node sess = create_session() eq_(sess.query(Node).filter(Node.children.any(Node.data == 'n3' )).order_by(Node.data).all(), [Node(data='n1'), Node(data='n2')]) def test_contains(self): Node = self.classes.Node sess = create_session() n4 = sess.query(Node).filter_by(data='n4').one() eq_(sess.query(Node).filter(Node.children.contains(n4)).order_by(Node.data).all(), [Node(data='n1'), Node(data='n3')]) eq_(sess.query(Node).filter(not_(Node.children.contains(n4))).order_by(Node.data).all(), [Node(data='n2'), Node(data='n4'), Node(data='n5'), Node(data='n6'), Node(data='n7')]) def test_explicit_join(self): Node = self.classes.Node sess = create_session() n1 = aliased(Node) eq_( sess.query(Node).select_from(join(Node, n1, 'children' )).filter(n1.data.in_(['n3', 'n7' ])).order_by(Node.id).all(), [Node(data='n1'), Node(data='n2')] ) SQLAlchemy-0.8.4/test/orm/test_lazy_relations.py0000644000076500000240000006150012251150016022450 0ustar classicstaff00000000000000"""basic tests of lazy loaded attributes""" from sqlalchemy.testing import assert_raises, assert_raises_message import datetime from sqlalchemy import exc as sa_exc from sqlalchemy.orm import attributes, exc as orm_exc import sqlalchemy as sa from sqlalchemy import testing from sqlalchemy import Integer, String, ForeignKey, SmallInteger from sqlalchemy.types import TypeDecorator from sqlalchemy.testing.schema import Table from sqlalchemy.testing.schema import Column from sqlalchemy.orm import mapper, relationship, create_session from sqlalchemy.testing import eq_ from sqlalchemy.testing import fixtures from test.orm import _fixtures class LazyTest(_fixtures.FixtureTest): run_inserts = 'once' run_deletes = None def test_basic(self): users, Address, addresses, User = (self.tables.users, self.classes.Address, self.tables.addresses, self.classes.User) mapper(User, users, properties={ 'addresses':relationship(mapper(Address, addresses), lazy='select') }) sess = create_session() q = sess.query(User) assert [User(id=7, addresses=[Address(id=1, email_address='jack@bean.com')])] == q.filter(users.c.id == 7).all() def test_needs_parent(self): """test the error raised when parent object is not bound.""" users, Address, addresses, User = (self.tables.users, self.classes.Address, self.tables.addresses, self.classes.User) mapper(User, users, properties={ 'addresses':relationship(mapper(Address, addresses), lazy='select') }) sess = create_session() q = sess.query(User) u = q.filter(users.c.id == 7).first() sess.expunge(u) assert_raises(orm_exc.DetachedInstanceError, getattr, u, 'addresses') def test_orderby(self): users, Address, addresses, User = (self.tables.users, self.classes.Address, self.tables.addresses, self.classes.User) mapper(User, users, properties = { 'addresses':relationship(mapper(Address, addresses), lazy='select', order_by=addresses.c.email_address), }) q = create_session().query(User) assert [ User(id=7, addresses=[ Address(id=1) ]), User(id=8, addresses=[ Address(id=3, email_address='ed@bettyboop.com'), Address(id=4, email_address='ed@lala.com'), Address(id=2, email_address='ed@wood.com') ]), User(id=9, addresses=[ Address(id=5) ]), User(id=10, addresses=[]) ] == q.all() def test_orderby_secondary(self): """tests that a regular mapper select on a single table can order by a relationship to a second table""" Address, addresses, users, User = (self.classes.Address, self.tables.addresses, self.tables.users, self.classes.User) mapper(Address, addresses) mapper(User, users, properties = dict( addresses = relationship(Address, lazy='select'), )) q = create_session().query(User) l = q.filter(users.c.id==addresses.c.user_id).order_by(addresses.c.email_address).all() assert [ User(id=8, addresses=[ Address(id=2, email_address='ed@wood.com'), Address(id=3, email_address='ed@bettyboop.com'), Address(id=4, email_address='ed@lala.com'), ]), User(id=9, addresses=[ Address(id=5) ]), User(id=7, addresses=[ Address(id=1) ]), ] == l def test_orderby_desc(self): Address, addresses, users, User = (self.classes.Address, self.tables.addresses, self.tables.users, self.classes.User) mapper(Address, addresses) mapper(User, users, properties = dict( addresses = relationship(Address, lazy='select', order_by=[sa.desc(addresses.c.email_address)]), )) sess = create_session() assert [ User(id=7, addresses=[ Address(id=1) ]), User(id=8, addresses=[ Address(id=2, email_address='ed@wood.com'), Address(id=4, email_address='ed@lala.com'), Address(id=3, email_address='ed@bettyboop.com'), ]), User(id=9, addresses=[ Address(id=5) ]), User(id=10, addresses=[]) ] == sess.query(User).all() def test_no_orphan(self): """test that a lazily loaded child object is not marked as an orphan""" users, Address, addresses, User = (self.tables.users, self.classes.Address, self.tables.addresses, self.classes.User) mapper(User, users, properties={ 'addresses':relationship(Address, cascade="all,delete-orphan", lazy='select') }) mapper(Address, addresses) sess = create_session() user = sess.query(User).get(7) assert getattr(User, 'addresses').hasparent(attributes.instance_state(user.addresses[0]), optimistic=True) assert not sa.orm.class_mapper(Address)._is_orphan(attributes.instance_state(user.addresses[0])) def test_limit(self): """test limit operations combined with lazy-load relationships.""" users, items, order_items, orders, Item, User, Address, Order, addresses = (self.tables.users, self.tables.items, self.tables.order_items, self.tables.orders, self.classes.Item, self.classes.User, self.classes.Address, self.classes.Order, self.tables.addresses) mapper(Item, items) mapper(Order, orders, properties={ 'items':relationship(Item, secondary=order_items, lazy='select') }) mapper(User, users, properties={ 'addresses':relationship(mapper(Address, addresses), lazy='select'), 'orders':relationship(Order, lazy='select') }) sess = create_session() q = sess.query(User) if testing.against('maxdb', 'mssql'): l = q.limit(2).all() assert self.static.user_all_result[:2] == l else: l = q.limit(2).offset(1).all() assert self.static.user_all_result[1:3] == l def test_distinct(self): users, items, order_items, orders, Item, User, Address, Order, addresses = (self.tables.users, self.tables.items, self.tables.order_items, self.tables.orders, self.classes.Item, self.classes.User, self.classes.Address, self.classes.Order, self.tables.addresses) mapper(Item, items) mapper(Order, orders, properties={ 'items':relationship(Item, secondary=order_items, lazy='select') }) mapper(User, users, properties={ 'addresses':relationship(mapper(Address, addresses), lazy='select'), 'orders':relationship(Order, lazy='select') }) sess = create_session() q = sess.query(User) # use a union all to get a lot of rows to join against u2 = users.alias('u2') s = sa.union_all(u2.select(use_labels=True), u2.select(use_labels=True), u2.select(use_labels=True)).alias('u') l = q.filter(s.c.u2_id==User.id).order_by(User.id).distinct().all() eq_(self.static.user_all_result, l) def test_uselist_false_warning(self): """test that multiple rows received by a uselist=False raises a warning.""" User, users, orders, Order = (self.classes.User, self.tables.users, self.tables.orders, self.classes.Order) mapper(User, users, properties={ 'order':relationship(Order, uselist=False) }) mapper(Order, orders) s = create_session() u1 = s.query(User).filter(User.id==7).one() assert_raises(sa.exc.SAWarning, getattr, u1, 'order') def test_one_to_many_scalar(self): Address, addresses, users, User = (self.classes.Address, self.tables.addresses, self.tables.users, self.classes.User) mapper(User, users, properties = dict( address = relationship(mapper(Address, addresses), lazy='select', uselist=False) )) q = create_session().query(User) l = q.filter(users.c.id == 7).all() assert [User(id=7, address=Address(id=1))] == l def test_many_to_one_binds(self): Address, addresses, users, User = (self.classes.Address, self.tables.addresses, self.tables.users, self.classes.User) mapper(Address, addresses, primary_key=[addresses.c.user_id, addresses.c.email_address]) mapper(User, users, properties = dict( address = relationship(Address, uselist=False, primaryjoin=sa.and_(users.c.id==addresses.c.user_id, addresses.c.email_address=='ed@bettyboop.com') ) )) q = create_session().query(User) eq_( [ User(id=7, address=None), User(id=8, address=Address(id=3)), User(id=9, address=None), User(id=10, address=None), ], list(q) ) def test_double(self): """tests lazy loading with two relationships simulatneously, from the same table, using aliases. """ users, orders, User, Address, Order, addresses = (self.tables.users, self.tables.orders, self.classes.User, self.classes.Address, self.classes.Order, self.tables.addresses) openorders = sa.alias(orders, 'openorders') closedorders = sa.alias(orders, 'closedorders') mapper(Address, addresses) mapper(Order, orders) open_mapper = mapper(Order, openorders, non_primary=True) closed_mapper = mapper(Order, closedorders, non_primary=True) mapper(User, users, properties = dict( addresses = relationship(Address, lazy = True), open_orders = relationship(open_mapper, primaryjoin = sa.and_(openorders.c.isopen == 1, users.c.id==openorders.c.user_id), lazy='select'), closed_orders = relationship(closed_mapper, primaryjoin = sa.and_(closedorders.c.isopen == 0, users.c.id==closedorders.c.user_id), lazy='select') )) q = create_session().query(User) assert [ User( id=7, addresses=[Address(id=1)], open_orders = [Order(id=3)], closed_orders = [Order(id=1), Order(id=5)] ), User( id=8, addresses=[Address(id=2), Address(id=3), Address(id=4)], open_orders = [], closed_orders = [] ), User( id=9, addresses=[Address(id=5)], open_orders = [Order(id=4)], closed_orders = [Order(id=2)] ), User(id=10) ] == q.all() sess = create_session() user = sess.query(User).get(7) assert [Order(id=1), Order(id=5)] == create_session().query(closed_mapper).with_parent(user, property='closed_orders').all() assert [Order(id=3)] == create_session().query(open_mapper).with_parent(user, property='open_orders').all() def test_many_to_many(self): keywords, items, item_keywords, Keyword, Item = (self.tables.keywords, self.tables.items, self.tables.item_keywords, self.classes.Keyword, self.classes.Item) mapper(Keyword, keywords) mapper(Item, items, properties = dict( keywords = relationship(Keyword, secondary=item_keywords, lazy='select'), )) q = create_session().query(Item) assert self.static.item_keyword_result == q.all() assert self.static.item_keyword_result[0:2] == q.join('keywords').filter(keywords.c.name == 'red').all() def test_uses_get(self): """test that a simple many-to-one lazyload optimizes to use query.get().""" Address, addresses, users, User = (self.classes.Address, self.tables.addresses, self.tables.users, self.classes.User) for pj in ( None, users.c.id==addresses.c.user_id, addresses.c.user_id==users.c.id ): mapper(Address, addresses, properties = dict( user = relationship(mapper(User, users), lazy='select', primaryjoin=pj) )) sess = create_session() # load address a1 = sess.query(Address).filter_by(email_address="ed@wood.com").one() # load user that is attached to the address u1 = sess.query(User).get(8) def go(): # lazy load of a1.user should get it from the session assert a1.user is u1 self.assert_sql_count(testing.db, go, 0) sa.orm.clear_mappers() def test_uses_get_compatible_types(self): """test the use_get optimization with compatible but non-identical types""" User, Address = self.classes.User, self.classes.Address class IntDecorator(TypeDecorator): impl = Integer class SmallintDecorator(TypeDecorator): impl = SmallInteger class SomeDBInteger(sa.Integer): pass for tt in [ Integer, SmallInteger, IntDecorator, SmallintDecorator, SomeDBInteger, ]: m = sa.MetaData() users = Table('users', m, Column('id', Integer, primary_key=True, test_needs_autoincrement=True), Column('name', String(30), nullable=False), ) addresses = Table('addresses', m, Column('id', Integer, primary_key=True, test_needs_autoincrement=True), Column('user_id', tt, ForeignKey('users.id')), Column('email_address', String(50), nullable=False), ) mapper(Address, addresses, properties = dict( user = relationship(mapper(User, users)) )) sess = create_session(bind=testing.db) # load address a1 = sess.query(Address).filter_by(email_address="ed@wood.com").one() # load user that is attached to the address u1 = sess.query(User).get(8) def go(): # lazy load of a1.user should get it from the session assert a1.user is u1 self.assert_sql_count(testing.db, go, 0) sa.orm.clear_mappers() def test_many_to_one(self): users, Address, addresses, User = (self.tables.users, self.classes.Address, self.tables.addresses, self.classes.User) mapper(Address, addresses, properties = dict( user = relationship(mapper(User, users), lazy='select') )) sess = create_session() q = sess.query(Address) a = q.filter(addresses.c.id==1).one() assert a.user is not None u1 = sess.query(User).get(7) assert a.user is u1 def test_backrefs_dont_lazyload(self): users, Address, addresses, User = (self.tables.users, self.classes.Address, self.tables.addresses, self.classes.User) mapper(User, users, properties={ 'addresses':relationship(Address, backref='user') }) mapper(Address, addresses) sess = create_session() ad = sess.query(Address).filter_by(id=1).one() assert ad.user.id == 7 def go(): ad.user = None assert ad.user is None self.assert_sql_count(testing.db, go, 0) u1 = sess.query(User).filter_by(id=7).one() def go(): assert ad not in u1.addresses self.assert_sql_count(testing.db, go, 1) sess.expire(u1, ['addresses']) def go(): assert ad in u1.addresses self.assert_sql_count(testing.db, go, 1) sess.expire(u1, ['addresses']) ad2 = Address() def go(): ad2.user = u1 assert ad2.user is u1 self.assert_sql_count(testing.db, go, 0) def go(): assert ad2 in u1.addresses self.assert_sql_count(testing.db, go, 1) class GetterStateTest(_fixtures.FixtureTest): """test lazyloader on non-existent attribute returns expected attribute symbols, maintain expected state""" run_inserts = None def _u_ad_fixture(self, populate_user): users, Address, addresses, User = (self.tables.users, self.classes.Address, self.tables.addresses, self.classes.User) mapper(User, users, properties={ 'addresses':relationship(Address, backref='user') }) mapper(Address, addresses) sess = create_session() a1 = Address(email_address='a1') sess.add(a1) if populate_user: a1.user = User(name='ed') sess.flush() if populate_user: sess.expire_all() return User, Address, sess, a1 def test_get_empty_passive_return_never_set(self): User, Address, sess, a1 = self._u_ad_fixture(False) eq_( Address.user.impl.get( attributes.instance_state(a1), attributes.instance_dict(a1), passive=attributes.PASSIVE_RETURN_NEVER_SET), attributes.NEVER_SET ) assert 'user_id' not in a1.__dict__ assert 'user' not in a1.__dict__ def test_history_empty_passive_return_never_set(self): User, Address, sess, a1 = self._u_ad_fixture(False) eq_( Address.user.impl.get_history( attributes.instance_state(a1), attributes.instance_dict(a1), passive=attributes.PASSIVE_RETURN_NEVER_SET), ((), (), ()) ) assert 'user_id' not in a1.__dict__ assert 'user' not in a1.__dict__ def test_get_empty_passive_no_initialize(self): User, Address, sess, a1 = self._u_ad_fixture(False) eq_( Address.user.impl.get( attributes.instance_state(a1), attributes.instance_dict(a1), passive=attributes.PASSIVE_NO_INITIALIZE), attributes.PASSIVE_NO_RESULT ) assert 'user_id' not in a1.__dict__ assert 'user' not in a1.__dict__ def test_history_empty_passive_no_initialize(self): User, Address, sess, a1 = self._u_ad_fixture(False) eq_( Address.user.impl.get_history( attributes.instance_state(a1), attributes.instance_dict(a1), passive=attributes.PASSIVE_NO_INITIALIZE), attributes.HISTORY_BLANK ) assert 'user_id' not in a1.__dict__ assert 'user' not in a1.__dict__ def test_get_populated_passive_no_initialize(self): User, Address, sess, a1 = self._u_ad_fixture(True) eq_( Address.user.impl.get( attributes.instance_state(a1), attributes.instance_dict(a1), passive=attributes.PASSIVE_NO_INITIALIZE), attributes.PASSIVE_NO_RESULT ) assert 'user_id' not in a1.__dict__ assert 'user' not in a1.__dict__ def test_history_populated_passive_no_initialize(self): User, Address, sess, a1 = self._u_ad_fixture(True) eq_( Address.user.impl.get_history( attributes.instance_state(a1), attributes.instance_dict(a1), passive=attributes.PASSIVE_NO_INITIALIZE), attributes.HISTORY_BLANK ) assert 'user_id' not in a1.__dict__ assert 'user' not in a1.__dict__ def test_get_populated_passive_return_never_set(self): User, Address, sess, a1 = self._u_ad_fixture(True) eq_( Address.user.impl.get( attributes.instance_state(a1), attributes.instance_dict(a1), passive=attributes.PASSIVE_RETURN_NEVER_SET), User(name='ed') ) def test_history_populated_passive_return_never_set(self): User, Address, sess, a1 = self._u_ad_fixture(True) eq_( Address.user.impl.get_history( attributes.instance_state(a1), attributes.instance_dict(a1), passive=attributes.PASSIVE_RETURN_NEVER_SET), ((), [User(name='ed'), ], ()) ) class M2OGetTest(_fixtures.FixtureTest): run_inserts = 'once' run_deletes = None def test_m2o_noload(self): """test that a NULL foreign key doesn't trigger a lazy load""" users, Address, addresses, User = (self.tables.users, self.classes.Address, self.tables.addresses, self.classes.User) mapper(User, users) mapper(Address, addresses, properties={ 'user':relationship(User) }) sess = create_session() ad1 = Address(email_address='somenewaddress', id=12) sess.add(ad1) sess.flush() sess.expunge_all() ad2 = sess.query(Address).get(1) ad3 = sess.query(Address).get(ad1.id) def go(): # one lazy load assert ad2.user.name == 'jack' # no lazy load assert ad3.user is None self.assert_sql_count(testing.db, go, 1) class CorrelatedTest(fixtures.MappedTest): @classmethod def define_tables(self, meta): Table('user_t', meta, Column('id', Integer, primary_key=True), Column('name', String(50))) Table('stuff', meta, Column('id', Integer, primary_key=True), Column('date', sa.Date), Column('user_id', Integer, ForeignKey('user_t.id'))) @classmethod def insert_data(cls): stuff, user_t = cls.tables.stuff, cls.tables.user_t user_t.insert().execute( {'id':1, 'name':'user1'}, {'id':2, 'name':'user2'}, {'id':3, 'name':'user3'}) stuff.insert().execute( {'id':1, 'user_id':1, 'date':datetime.date(2007, 10, 15)}, {'id':2, 'user_id':1, 'date':datetime.date(2007, 12, 15)}, {'id':3, 'user_id':1, 'date':datetime.date(2007, 11, 15)}, {'id':4, 'user_id':2, 'date':datetime.date(2008, 1, 15)}, {'id':5, 'user_id':3, 'date':datetime.date(2007, 6, 15)}) def test_correlated_lazyload(self): stuff, user_t = self.tables.stuff, self.tables.user_t class User(fixtures.ComparableEntity): pass class Stuff(fixtures.ComparableEntity): pass mapper(Stuff, stuff) stuff_view = sa.select([stuff.c.id]).where(stuff.c.user_id==user_t.c.id).correlate(user_t).order_by(sa.desc(stuff.c.date)).limit(1) mapper(User, user_t, properties={ 'stuff':relationship(Stuff, primaryjoin=sa.and_(user_t.c.id==stuff.c.user_id, stuff.c.id==(stuff_view.as_scalar()))) }) sess = create_session() eq_(sess.query(User).all(), [ User(name='user1', stuff=[Stuff(date=datetime.date(2007, 12, 15), id=2)]), User(name='user2', stuff=[Stuff(id=4, date=datetime.date(2008, 1 , 15))]), User(name='user3', stuff=[Stuff(id=5, date=datetime.date(2007, 6, 15))]) ]) SQLAlchemy-0.8.4/test/orm/test_load_on_fks.py0000644000076500000240000002432212251147172021701 0ustar classicstaff00000000000000from sqlalchemy import * from sqlalchemy.orm import * from sqlalchemy.ext.declarative import declarative_base from sqlalchemy.testing import eq_, AssertsExecutionResults, assert_raises from sqlalchemy import testing from sqlalchemy.testing import fixtures from sqlalchemy.orm.attributes import instance_state from sqlalchemy.orm.exc import FlushError from sqlalchemy.testing.schema import Table, Column engine = testing.db class FlushOnPendingTest(AssertsExecutionResults, fixtures.TestBase): def setUp(self): global Parent, Child, Base Base= declarative_base() class Parent(Base): __tablename__ = 'parent' id= Column(Integer, primary_key=True, test_needs_autoincrement=True) name = Column(String(50), nullable=False) children = relationship("Child", load_on_pending=True) class Child(Base): __tablename__ = 'child' id= Column(Integer, primary_key=True, test_needs_autoincrement=True) parent_id = Column(Integer, ForeignKey('parent.id')) Base.metadata.create_all(engine) def tearDown(self): Base.metadata.drop_all(engine) def test_annoying_autoflush_one(self): sess = Session(engine) p1 = Parent() sess.add(p1) p1.children = [] def test_annoying_autoflush_two(self): sess = Session(engine) p1 = Parent() sess.add(p1) assert p1.children == [] def test_dont_load_if_no_keys(self): sess = Session(engine) p1 = Parent() sess.add(p1) def go(): assert p1.children == [] self.assert_sql_count(testing.db, go, 0) class LoadOnFKsTest(AssertsExecutionResults, fixtures.TestBase): def setUp(self): global Parent, Child, Base Base= declarative_base() class Parent(Base): __tablename__ = 'parent' __table_args__ = {'mysql_engine':'InnoDB'} id= Column(Integer, primary_key=True, test_needs_autoincrement=True) class Child(Base): __tablename__ = 'child' __table_args__ = {'mysql_engine':'InnoDB'} id= Column(Integer, primary_key=True, test_needs_autoincrement=True) parent_id = Column(Integer, ForeignKey('parent.id')) parent = relationship(Parent, backref=backref("children")) Base.metadata.create_all(engine) global sess, p1, p2, c1, c2 sess = Session(bind=engine) p1 = Parent() p2 = Parent() c1, c2 = Child(), Child() c1.parent = p1 sess.add_all([p1, p2]) assert c1 in sess sess.commit() def tearDown(self): sess.rollback() Base.metadata.drop_all(engine) def test_load_on_pending_disallows_backref_event(self): Child.parent.property.load_on_pending = True sess.autoflush = False c3 = Child() sess.add(c3) c3.parent_id = p1.id c3.parent = p1 # a side effect of load-on-pending with no autoflush. # a change to the backref event handler to check # collection membership before assuming "old == new so return" # would fix this - but this is wasteful and autoflush # should be turned on. assert c3 not in p1.children def test_enable_rel_loading_disallows_backref_event(self): sess.autoflush = False c3 = Child() sess.enable_relationship_loading(c3) c3.parent_id = p1.id c3.parent = p1 # c3.parent is already acting like a "load" here, # so backref events don't work assert c3 not in p1.children def test_load_on_persistent_allows_backref_event(self): Child.parent.property.load_on_pending = True c3 = Child() sess.add(c3) c3.parent_id = p1.id c3.parent = p1 assert c3 in p1.children def test_enable_rel_loading_on_persistent_disallows_backref_event(self): c3 = Child() sess.enable_relationship_loading(c3) c3.parent_id = p1.id c3.parent = p1 # c3.parent is already acting like a "load" here, # so backref events don't work assert c3 not in p1.children def test_no_load_on_pending_allows_backref_event(self): # users who stick with the program and don't use # 'load_on_pending' get expected behavior sess.autoflush = False c3 = Child() sess.add(c3) c3.parent_id = p1.id c3.parent = p1 assert c3 in p1.children def test_autoflush_on_pending(self): c3 = Child() sess.add(c3) c3.parent_id = p1.id # pendings don't autoflush assert c3.parent is None def test_autoflush_on_pending(self): Child.parent.property.load_on_pending = True c3 = Child() sess.add(c3) c3.parent_id = p1.id # ...unless the flag is on assert c3.parent is p1 def test_collection_load_from_pending_populated(self): Parent.children.property.load_on_pending = True p2 = Parent(id=p1.id) sess.add(p2) # load should emit since PK is populated def go(): assert p2.children self.assert_sql_count(testing.db, go, 1) def test_collection_load_from_pending_no_sql(self): Parent.children.property.load_on_pending = True p2 = Parent(id=None) sess.add(p2) # load should not emit since "None" is the bound # param list def go(): assert not p2.children self.assert_sql_count(testing.db, go, 0) def test_load_on_pending_with_set(self): Child.parent.property.load_on_pending = True p1.children c3 = Child() sess.add(c3) c3.parent_id = p1.id def go(): c3.parent = p1 self.assert_sql_count(testing.db, go, 0) def test_backref_doesnt_double(self): Child.parent.property.load_on_pending = True sess.autoflush = False p1.children c3 = Child() sess.add(c3) c3.parent = p1 c3.parent = p1 c3.parent = p1 c3.parent = p1 assert len(p1.children)== 2 def test_m2o_lazy_loader_on_persistent(self): """Compare the behaviors from the lazyloader using the "committed" state in all cases, vs. the lazyloader using the "current" state in all cases except during flush. """ for loadfk in (True, False): for loadrel in (True, False): for autoflush in (True, False): for manualflush in (True, False): for fake_autoexpire in (True, False): sess.autoflush = autoflush if loadfk: c1.parent_id if loadrel: c1.parent c1.parent_id = p2.id if manualflush: sess.flush() # fake_autoexpire refers to the eventual # auto-expire of 'parent' when c1.parent_id # is altered. if fake_autoexpire: sess.expire(c1, ['parent']) # old 0.6 behavior #if manualflush and (not loadrel or fake_autoexpire): # # a flush occurs, we get p2 # assert c1.parent is p2 #elif not loadrel and not loadfk: # # problematically - we get None since committed state # # is empty when c1.parent_id was mutated, since we want # # to save on selects. this is # # why the patch goes in in 0.6 - this is mostly a bug. # assert c1.parent is None #else: # # if things were loaded, autoflush doesn't even # # happen. # assert c1.parent is p1 # new behavior if loadrel and not fake_autoexpire: assert c1.parent is p1 else: assert c1.parent is p2 sess.rollback() def test_m2o_lazy_loader_on_pending(self): for loadonpending in (False, True): for autoflush in (False, True): for manualflush in (False, True): Child.parent.property.load_on_pending = loadonpending sess.autoflush = autoflush c2 = Child() sess.add(c2) c2.parent_id = p2.id if manualflush: sess.flush() if loadonpending or manualflush: assert c2.parent is p2 else: assert c2.parent is None sess.rollback() def test_m2o_lazy_loader_on_transient(self): for loadonpending in (False, True): for attach in (False, True): for autoflush in (False, True): for manualflush in (False, True): for enable_relationship_rel in (False, True): Child.parent.property.load_on_pending = loadonpending sess.autoflush = autoflush c2 = Child() if attach: sess._attach(instance_state(c2)) if enable_relationship_rel: sess.enable_relationship_loading(c2) c2.parent_id = p2.id if manualflush: sess.flush() if (loadonpending and attach) or enable_relationship_rel: assert c2.parent is p2 else: assert c2.parent is None sess.rollback() SQLAlchemy-0.8.4/test/orm/test_loading.py0000644000076500000240000000615112251150016021027 0ustar classicstaff00000000000000from . import _fixtures from sqlalchemy.orm import loading, Session, aliased from sqlalchemy.testing.assertions import eq_ from sqlalchemy.util import KeyedTuple # class InstancesTest(_fixtures.FixtureTest): # class GetFromIdentityTest(_fixtures.FixtureTest): # class LoadOnIdentTest(_fixtures.FixtureTest): # class InstanceProcessorTest(_fixture.FixtureTest): class MergeResultTest(_fixtures.FixtureTest): run_setup_mappers = 'once' run_inserts = 'once' run_deletes = None @classmethod def setup_mappers(cls): cls._setup_stock_mapping() def _fixture(self): User = self.classes.User s = Session() u1, u2, u3, u4 = User(id=1, name='u1'), User(id=2, name='u2'), \ User(id=7, name='u3'), User(id=8, name='u4') s.query(User).filter(User.id.in_([7, 8])).all() s.close() return s, [u1, u2, u3, u4] def test_single_entity(self): s, (u1, u2, u3, u4) = self._fixture() User = self.classes.User q = s.query(User) collection = [u1, u2, u3, u4] it = loading.merge_result( q, collection ) eq_( [x.id for x in it], [1, 2, 7, 8] ) def test_single_column(self): User = self.classes.User s = Session() q = s.query(User.id) collection = [(1, ), (2, ), (7, ), (8, )] it = loading.merge_result( q, collection ) eq_( list(it), [(1, ), (2, ), (7, ), (8, )] ) def test_entity_col_mix_plain_tuple(self): s, (u1, u2, u3, u4) = self._fixture() User = self.classes.User q = s.query(User, User.id) collection = [(u1, 1), (u2, 2), (u3, 7), (u4, 8)] it = loading.merge_result( q, collection ) it = list(it) eq_( [(x.id, y) for x, y in it], [(1, 1), (2, 2), (7, 7), (8, 8)] ) eq_(it[0].keys(), ['User', 'id']) def test_entity_col_mix_keyed_tuple(self): s, (u1, u2, u3, u4) = self._fixture() User = self.classes.User q = s.query(User, User.id) kt = lambda *x: KeyedTuple(x, ['User', 'id']) collection = [kt(u1, 1), kt(u2, 2), kt(u3, 7), kt(u4, 8)] it = loading.merge_result( q, collection ) it = list(it) eq_( [(x.id, y) for x, y in it], [(1, 1), (2, 2), (7, 7), (8, 8)] ) eq_(it[0].keys(), ['User', 'id']) def test_none_entity(self): s, (u1, u2, u3, u4) = self._fixture() User = self.classes.User ua = aliased(User) q = s.query(User, ua) kt = lambda *x: KeyedTuple(x, ['User', 'useralias']) collection = [kt(u1, u2), kt(u1, None), kt(u2, u3)] it = loading.merge_result( q, collection ) eq_( [ (x and x.id or None, y and y.id or None) for x, y in it ], [(u1.id, u2.id), (u1.id, None), (u2.id, u3.id)] ) SQLAlchemy-0.8.4/test/orm/test_lockmode.py0000644000076500000240000000766012251150016021215 0ustar classicstaff00000000000000from sqlalchemy.engine import default from sqlalchemy.databases import * from sqlalchemy.orm import mapper from sqlalchemy.orm import Session from sqlalchemy.testing import AssertsCompiledSQL from sqlalchemy.testing import assert_raises_message from test.orm import _fixtures class LockModeTest(_fixtures.FixtureTest, AssertsCompiledSQL): run_inserts = None @classmethod def setup_mappers(cls): User, users = cls.classes.User, cls.tables.users mapper(User, users) def test_default_update(self): User = self.classes.User sess = Session() self.assert_compile(sess.query(User.id).with_lockmode('update'), "SELECT users.id AS users_id FROM users FOR UPDATE", dialect=default.DefaultDialect() ) def test_not_supported_by_dialect_should_just_use_update(self): User = self.classes.User sess = Session() self.assert_compile(sess.query(User.id).with_lockmode('read'), "SELECT users.id AS users_id FROM users FOR UPDATE", dialect=default.DefaultDialect() ) def test_none_lock_mode(self): User = self.classes.User sess = Session() self.assert_compile(sess.query(User.id).with_lockmode(None), "SELECT users.id AS users_id FROM users", dialect=default.DefaultDialect() ) def test_unknown_lock_mode(self): User = self.classes.User sess = Session() assert_raises_message( Exception, "Unknown lockmode 'unknown_mode'", self.assert_compile, sess.query(User.id).with_lockmode('unknown_mode'), None, dialect=default.DefaultDialect() ) def test_postgres_read(self): User = self.classes.User sess = Session() self.assert_compile(sess.query(User.id).with_lockmode('read'), "SELECT users.id AS users_id FROM users FOR SHARE", dialect=postgresql.dialect() ) def test_postgres_read_nowait(self): User = self.classes.User sess = Session() self.assert_compile(sess.query(User.id).with_lockmode('read_nowait'), "SELECT users.id AS users_id FROM users FOR SHARE NOWAIT", dialect=postgresql.dialect() ) def test_postgres_update(self): User = self.classes.User sess = Session() self.assert_compile(sess.query(User.id).with_lockmode('update'), "SELECT users.id AS users_id FROM users FOR UPDATE", dialect=postgresql.dialect() ) def test_postgres_update_nowait(self): User = self.classes.User sess = Session() self.assert_compile(sess.query(User.id).with_lockmode('update_nowait'), "SELECT users.id AS users_id FROM users FOR UPDATE NOWAIT", dialect=postgresql.dialect() ) def test_oracle_update(self): User = self.classes.User sess = Session() self.assert_compile(sess.query(User.id).with_lockmode('update'), "SELECT users.id AS users_id FROM users FOR UPDATE", dialect=oracle.dialect() ) def test_oracle_update_nowait(self): User = self.classes.User sess = Session() self.assert_compile(sess.query(User.id).with_lockmode('update_nowait'), "SELECT users.id AS users_id FROM users FOR UPDATE NOWAIT", dialect=oracle.dialect() ) def test_mysql_read(self): User = self.classes.User sess = Session() self.assert_compile(sess.query(User.id).with_lockmode('read'), "SELECT users.id AS users_id FROM users LOCK IN SHARE MODE", dialect=mysql.dialect() ) def test_mysql_update(self): User = self.classes.User sess = Session() self.assert_compile(sess.query(User.id).with_lockmode('update'), "SELECT users.id AS users_id FROM users FOR UPDATE", dialect=mysql.dialect() ) SQLAlchemy-0.8.4/test/orm/test_manytomany.py0000644000076500000240000003334112251147172021620 0ustar classicstaff00000000000000from sqlalchemy.testing import assert_raises, \ assert_raises_message, eq_ import sqlalchemy as sa from sqlalchemy import testing from sqlalchemy import Integer, String, ForeignKey from sqlalchemy.testing.schema import Table from sqlalchemy.testing.schema import Column from sqlalchemy.orm import mapper, relationship, Session, \ exc as orm_exc, sessionmaker, backref from sqlalchemy.testing import fixtures class M2MTest(fixtures.MappedTest): @classmethod def define_tables(cls, metadata): Table('place', metadata, Column('place_id', Integer, test_needs_autoincrement=True, primary_key=True), Column('name', String(30), nullable=False), test_needs_acid=True, ) Table('transition', metadata, Column('transition_id', Integer, test_needs_autoincrement=True, primary_key=True), Column('name', String(30), nullable=False), test_needs_acid=True, ) Table('place_thingy', metadata, Column('thingy_id', Integer, test_needs_autoincrement=True, primary_key=True), Column('place_id', Integer, ForeignKey('place.place_id'), nullable=False), Column('name', String(30), nullable=False), test_needs_acid=True, ) # association table #1 Table('place_input', metadata, Column('place_id', Integer, ForeignKey('place.place_id')), Column('transition_id', Integer, ForeignKey('transition.transition_id')), test_needs_acid=True, ) # association table #2 Table('place_output', metadata, Column('place_id', Integer, ForeignKey('place.place_id')), Column('transition_id', Integer, ForeignKey('transition.transition_id')), test_needs_acid=True, ) Table('place_place', metadata, Column('pl1_id', Integer, ForeignKey('place.place_id')), Column('pl2_id', Integer, ForeignKey('place.place_id')), test_needs_acid=True, ) @classmethod def setup_classes(cls): class Place(cls.Basic): def __init__(self, name): self.name = name class PlaceThingy(cls.Basic): def __init__(self, name): self.name = name class Transition(cls.Basic): def __init__(self, name): self.name = name def test_overlapping_attribute_error(self): place, Transition, place_input, Place, transition = (self.tables.place, self.classes.Transition, self.tables.place_input, self.classes.Place, self.tables.transition) mapper(Place, place, properties={ 'transitions': relationship(Transition, secondary=place_input, backref='places') }) mapper(Transition, transition, properties={ 'places': relationship(Place, secondary=place_input, backref='transitions') }) assert_raises_message(sa.exc.ArgumentError, "property of that name exists", sa.orm.configure_mappers) def test_self_referential_roundtrip(self): place, Place, place_place = (self.tables.place, self.classes.Place, self.tables.place_place) mapper(Place, place, properties={ 'places': relationship( Place, secondary=place_place, primaryjoin=place.c.place_id == place_place.c.pl1_id, secondaryjoin=place.c.place_id == place_place.c.pl2_id, order_by=place_place.c.pl2_id ) }) sess = Session() p1 = Place('place1') p2 = Place('place2') p3 = Place('place3') p4 = Place('place4') p5 = Place('place5') p6 = Place('place6') p7 = Place('place7') sess.add_all((p1, p2, p3, p4, p5, p6, p7)) p1.places.append(p2) p1.places.append(p3) p5.places.append(p6) p6.places.append(p1) p7.places.append(p1) p1.places.append(p5) p4.places.append(p3) p3.places.append(p4) sess.commit() eq_(p1.places, [p2, p3, p5]) eq_(p5.places, [p6]) eq_(p7.places, [p1]) eq_(p6.places, [p1]) eq_(p4.places, [p3]) eq_(p3.places, [p4]) eq_(p2.places, []) def test_self_referential_bidirectional_mutation(self): place, Place, place_place = (self.tables.place, self.classes.Place, self.tables.place_place) mapper(Place, place, properties={ 'child_places': relationship( Place, secondary=place_place, primaryjoin=place.c.place_id == place_place.c.pl1_id, secondaryjoin=place.c.place_id == place_place.c.pl2_id, order_by=place_place.c.pl2_id, backref='parent_places' ) }) sess = Session() p1 = Place('place1') p2 = Place('place2') p2.parent_places = [p1] sess.add_all([p1, p2]) p1.parent_places.append(p2) sess.commit() assert p1 in p2.parent_places assert p2 in p1.parent_places def test_joinedload_on_double(self): """test that a mapper can have two eager relationships to the same table, via two different association tables. aliases are required.""" place_input, transition, Transition, PlaceThingy, \ place, place_thingy, Place, \ place_output = (self.tables.place_input, self.tables.transition, self.classes.Transition, self.classes.PlaceThingy, self.tables.place, self.tables.place_thingy, self.classes.Place, self.tables.place_output) mapper(PlaceThingy, place_thingy) mapper(Place, place, properties={ 'thingies': relationship(PlaceThingy, lazy='joined') }) mapper(Transition, transition, properties=dict( inputs=relationship(Place, place_output, lazy='joined'), outputs=relationship(Place, place_input, lazy='joined'), ) ) tran = Transition('transition1') tran.inputs.append(Place('place1')) tran.outputs.append(Place('place2')) tran.outputs.append(Place('place3')) sess = Session() sess.add(tran) sess.commit() r = sess.query(Transition).all() self.assert_unordered_result(r, Transition, {'name': 'transition1', 'inputs': (Place, [{'name': 'place1'}]), 'outputs': (Place, [{'name': 'place2'}, {'name': 'place3'}]) }) def test_bidirectional(self): place_input, transition, Transition, Place, place, place_output = ( self.tables.place_input, self.tables.transition, self.classes.Transition, self.classes.Place, self.tables.place, self.tables.place_output) mapper(Place, place) mapper(Transition, transition, properties=dict( inputs=relationship(Place, place_output, backref=backref('inputs', order_by=transition.c.transition_id), order_by=Place.place_id), outputs=relationship(Place, place_input, backref=backref('outputs', order_by=transition.c.transition_id), order_by=Place.place_id), ) ) t1 = Transition('transition1') t2 = Transition('transition2') t3 = Transition('transition3') p1 = Place('place1') p2 = Place('place2') p3 = Place('place3') sess = Session() sess.add_all([p3, p1, t1, t2, p2, t3]) t1.inputs.append(p1) t1.inputs.append(p2) t1.outputs.append(p3) t2.inputs.append(p1) p2.inputs.append(t2) p3.inputs.append(t2) p1.outputs.append(t1) sess.commit() self.assert_result([t1], Transition, {'outputs': (Place, [{'name': 'place3'}, {'name': 'place1'}])}) self.assert_result([p2], Place, {'inputs': (Transition, [{'name': 'transition1'}, {'name': 'transition2'}])}) @testing.requires.sane_multi_rowcount def test_stale_conditions(self): Place, Transition, place_input, place, transition = ( self.classes.Place, self.classes.Transition, self.tables.place_input, self.tables.place, self.tables.transition) mapper(Place, place, properties={ 'transitions': relationship(Transition, secondary=place_input, passive_updates=False) }) mapper(Transition, transition) p1 = Place('place1') t1 = Transition('t1') p1.transitions.append(t1) sess = sessionmaker()() sess.add_all([p1, t1]) sess.commit() p1.place_id p1.transitions sess.execute("delete from place_input", mapper=Place) p1.place_id = 7 assert_raises_message( orm_exc.StaleDataError, r"UPDATE statement on table 'place_input' expected to " r"update 1 row\(s\); Only 0 were matched.", sess.commit ) sess.rollback() p1.place_id p1.transitions sess.execute("delete from place_input", mapper=Place) p1.transitions.remove(t1) assert_raises_message( orm_exc.StaleDataError, r"DELETE statement on table 'place_input' expected to " r"delete 1 row\(s\); Only 0 were matched.", sess.commit ) class AssortedPersistenceTests(fixtures.MappedTest): @classmethod def define_tables(cls, metadata): Table("left", metadata, Column('id', Integer, primary_key=True, test_needs_autoincrement=True), Column('data', String(30)) ) Table("right", metadata, Column('id', Integer, primary_key=True, test_needs_autoincrement=True), Column('data', String(30)), ) Table('secondary', metadata, Column('left_id', Integer, ForeignKey('left.id'), primary_key=True), Column('right_id', Integer, ForeignKey('right.id'), primary_key=True), ) @classmethod def setup_classes(cls): class A(cls.Comparable): pass class B(cls.Comparable): pass def _standard_bidirectional_fixture(self): left, secondary, right = self.tables.left, \ self.tables.secondary, self.tables.right A, B = self.classes.A, self.classes.B mapper(A, left, properties={ 'bs': relationship(B, secondary=secondary, backref='as', order_by=right.c.id) }) mapper(B, right) def _bidirectional_onescalar_fixture(self): left, secondary, right = self.tables.left, \ self.tables.secondary, self.tables.right A, B = self.classes.A, self.classes.B mapper(A, left, properties={ 'bs': relationship(B, secondary=secondary, backref=backref('a', uselist=False), order_by=right.c.id) }) mapper(B, right) def test_session_delete(self): self._standard_bidirectional_fixture() A, B = self.classes.A, self.classes.B secondary = self.tables.secondary sess = Session() sess.add_all([ A(data='a1', bs=[B(data='b1')]), A(data='a2', bs=[B(data='b2')]) ]) sess.commit() a1 = sess.query(A).filter_by(data='a1').one() sess.delete(a1) sess.flush() eq_(sess.query(secondary).count(), 1) a2 = sess.query(A).filter_by(data='a2').one() sess.delete(a2) sess.flush() eq_(sess.query(secondary).count(), 0) def test_remove_scalar(self): # test setting a uselist=False to None self._bidirectional_onescalar_fixture() A, B = self.classes.A, self.classes.B secondary = self.tables.secondary sess = Session() sess.add_all([ A(data='a1', bs=[B(data='b1'), B(data='b2')]), ]) sess.commit() a1 = sess.query(A).filter_by(data='a1').one() b2 = sess.query(B).filter_by(data='b2').one() assert b2.a is a1 b2.a = None sess.commit() eq_(a1.bs, [B(data='b1')]) eq_(b2.a, None) eq_(sess.query(secondary).count(), 1) SQLAlchemy-0.8.4/test/orm/test_mapper.py0000644000076500000240000034102112251150016020674 0ustar classicstaff00000000000000"""General mapper operations with an emphasis on selecting/loading.""" from sqlalchemy.testing import assert_raises, assert_raises_message import sqlalchemy as sa from sqlalchemy import testing from sqlalchemy import MetaData, Integer, String, ForeignKey, func, util from sqlalchemy.testing.schema import Table, Column from sqlalchemy.engine import default from sqlalchemy.orm import mapper, relationship, backref, \ create_session, class_mapper, configure_mappers, reconstructor, \ validates, aliased, defer, deferred, synonym, attributes, \ column_property, composite, dynamic_loader, \ comparable_property, Session from sqlalchemy.orm.persistence import _sort_states from sqlalchemy.testing import eq_, AssertsCompiledSQL, is_ from sqlalchemy.testing import fixtures from test.orm import _fixtures from sqlalchemy.testing.assertsql import CompiledSQL import logging class MapperTest(_fixtures.FixtureTest, AssertsCompiledSQL): __dialect__ = 'default' def test_prop_shadow(self): """A backref name may not shadow an existing property name.""" Address, addresses, users, User = (self.classes.Address, self.tables.addresses, self.tables.users, self.classes.User) mapper(Address, addresses) mapper(User, users, properties={ 'addresses':relationship(Address, backref='email_address') }) assert_raises(sa.exc.ArgumentError, sa.orm.configure_mappers) def test_update_attr_keys(self): """test that update()/insert() use the correct key when given InstrumentedAttributes.""" User, users = self.classes.User, self.tables.users mapper(User, users, properties={ 'foobar':users.c.name }) users.insert().values({User.foobar:'name1'}).execute() eq_(sa.select([User.foobar]).where(User.foobar=='name1').execute().fetchall(), [('name1',)]) users.update().values({User.foobar:User.foobar + 'foo'}).execute() eq_(sa.select([User.foobar]).where(User.foobar=='name1foo').execute().fetchall(), [('name1foo',)]) def test_utils(self): users = self.tables.users addresses = self.tables.addresses Address = self.classes.Address from sqlalchemy.orm.util import _is_mapped_class, _is_aliased_class class Foo(object): x = "something" @property def y(self): return "somethign else" m = mapper(Foo, users, properties={"addresses":relationship(Address)}) mapper(Address, addresses) a1 = aliased(Foo) f = Foo() for fn, arg, ret in [ (_is_mapped_class, Foo.x, False), (_is_mapped_class, Foo.y, False), (_is_mapped_class, Foo.name, False), (_is_mapped_class, Foo.addresses, False), (_is_mapped_class, Foo, True), (_is_mapped_class, f, False), (_is_mapped_class, a1, True), (_is_mapped_class, m, True), (_is_aliased_class, a1, True), (_is_aliased_class, Foo.x, False), (_is_aliased_class, Foo.y, False), (_is_aliased_class, Foo, False), (_is_aliased_class, f, False), (_is_aliased_class, a1, True), (_is_aliased_class, m, False), ]: assert fn(arg) == ret def test_entity_descriptor(self): users = self.tables.users from sqlalchemy.orm.util import _entity_descriptor class Foo(object): x = "something" @property def y(self): return "somethign else" m = mapper(Foo, users) a1 = aliased(Foo) f = Foo() for arg, key, ret in [ (m, "x", Foo.x), (Foo, "x", Foo.x), (a1, "x", a1.x), (users, "name", users.c.name) ]: assert _entity_descriptor(arg, key) is ret def test_friendly_attribute_str_on_uncompiled_boom(self): User, users = self.classes.User, self.tables.users def boom(): raise Exception("it broke") mapper(User, users, properties={ 'addresses':relationship(boom) }) # test that QueryableAttribute.__str__() doesn't # cause a compile. eq_(str(User.addresses), "User.addresses") def test_exceptions_sticky(self): """test preservation of mapper compile errors raised during hasattr(), as well as for redundant mapper compile calls. Test that repeated calls don't stack up error messages. """ Address, addresses, User = (self.classes.Address, self.tables.addresses, self.classes.User) mapper(Address, addresses, properties={ 'user':relationship(User) }) try: hasattr(Address.user, 'property') except sa.orm.exc.UnmappedClassError: assert util.compat.py32 for i in range(3): assert_raises_message(sa.exc.InvalidRequestError, "^One or more mappers failed to " "initialize - can't proceed with " "initialization of other mappers. " "Original exception was: Class " "'test.orm._fixtures.User' is not mapped$" , configure_mappers) def test_column_prefix(self): users, User = self.tables.users, self.classes.User mapper(User, users, column_prefix='_', properties={ 'user_name': synonym('_name') }) s = create_session() u = s.query(User).get(7) eq_(u._name, 'jack') eq_(u._id,7) u2 = s.query(User).filter_by(user_name='jack').one() assert u is u2 def test_no_pks_1(self): User, users = self.classes.User, self.tables.users s = sa.select([users.c.name]).alias('foo') assert_raises(sa.exc.ArgumentError, mapper, User, s) def test_no_pks_2(self): User, users = self.classes.User, self.tables.users s = sa.select([users.c.name]).alias() assert_raises(sa.exc.ArgumentError, mapper, User, s) def test_reconfigure_on_other_mapper(self): """A configure trigger on an already-configured mapper still triggers a check against all mappers.""" users, Address, addresses, User = (self.tables.users, self.classes.Address, self.tables.addresses, self.classes.User) mapper(User, users) sa.orm.configure_mappers() assert sa.orm.mapperlib._new_mappers is False m = mapper(Address, addresses, properties={ 'user': relationship(User, backref="addresses")}) assert m.configured is False assert sa.orm.mapperlib._new_mappers is True u = User() assert User.addresses assert sa.orm.mapperlib._new_mappers is False def test_configure_on_session(self): User, users = self.classes.User, self.tables.users m = mapper(User, users) session = create_session() session.connection(m) def test_incomplete_columns(self): """Loading from a select which does not contain all columns""" addresses, Address = self.tables.addresses, self.classes.Address mapper(Address, addresses) s = create_session() a = s.query(Address).from_statement( sa.select([addresses.c.id, addresses.c.user_id])).first() eq_(a.user_id, 7) eq_(a.id, 1) # email address auto-defers assert 'email_addres' not in a.__dict__ eq_(a.email_address, 'jack@bean.com') def test_column_not_present(self): users, addresses, User = (self.tables.users, self.tables.addresses, self.classes.User) assert_raises_message(sa.exc.ArgumentError, "not represented in the mapper's table", mapper, User, users, properties={'foo' : addresses.c.user_id}) def test_constructor_exc(self): """TypeError is raised for illegal constructor args, whether or not explicit __init__ is present [ticket:908].""" users, addresses = self.tables.users, self.tables.addresses class Foo(object): def __init__(self): pass class Bar(object): pass mapper(Foo, users) mapper(Bar, addresses) assert_raises(TypeError, Foo, x=5) assert_raises(TypeError, Bar, x=5) def test_sort_states_comparisons(self): """test that _sort_states() doesn't compare insert_order to state.key, for set of mixed persistent/pending. In particular Python 3 disallows this. """ class Foo(object): def __init__(self, id): self.id = id m = MetaData() foo_t = Table('foo', m, Column('id', String, primary_key=True) ) m = mapper(Foo, foo_t) class DontCompareMeToString(int): # Py3K # pass # Py2K def __lt__(self, other): assert not isinstance(other, basestring) return int(self) < other # end Py2K foos = [Foo(id='f%d' % i) for i in range(5)] states = [attributes.instance_state(f) for f in foos] for s in states[0:3]: s.key = m._identity_key_from_state(s) states[3].insert_order = DontCompareMeToString(5) states[4].insert_order = DontCompareMeToString(1) states[2].insert_order = DontCompareMeToString(3) eq_( _sort_states(states), [states[4], states[3], states[0], states[1], states[2]] ) def test_props(self): users, Address, addresses, User = (self.tables.users, self.classes.Address, self.tables.addresses, self.classes.User) m = mapper(User, users, properties = { 'addresses' : relationship(mapper(Address, addresses)) }) assert User.addresses.property is m.get_property('addresses') def test_configure_on_prop_1(self): users, Address, addresses, User = (self.tables.users, self.classes.Address, self.tables.addresses, self.classes.User) mapper(User, users, properties = { 'addresses' : relationship(mapper(Address, addresses)) }) User.addresses.any(Address.email_address=='foo@bar.com') def test_configure_on_prop_2(self): users, Address, addresses, User = (self.tables.users, self.classes.Address, self.tables.addresses, self.classes.User) mapper(User, users, properties = { 'addresses' : relationship(mapper(Address, addresses)) }) eq_(str(User.id == 3), str(users.c.id==3)) def test_configure_on_prop_3(self): users, addresses, User = (self.tables.users, self.tables.addresses, self.classes.User) class Foo(User):pass mapper(User, users) mapper(Foo, addresses, inherits=User) assert getattr(Foo().__class__, 'name').impl is not None def test_deferred_subclass_attribute_instrument(self): users, addresses, User = (self.tables.users, self.tables.addresses, self.classes.User) class Foo(User):pass mapper(User, users) configure_mappers() mapper(Foo, addresses, inherits=User) assert getattr(Foo().__class__, 'name').impl is not None def test_check_descriptor_as_method(self): User, users = self.classes.User, self.tables.users m = mapper(User, users) class MyClass(User): def foo(self): pass m._is_userland_descriptor(MyClass.foo) def test_configure_on_get_props_1(self): User, users = self.classes.User, self.tables.users m =mapper(User, users) assert not m.configured assert list(m.iterate_properties) assert m.configured def test_configure_on_get_props_2(self): User, users = self.classes.User, self.tables.users m= mapper(User, users) assert not m.configured assert m.get_property('name') assert m.configured def test_configure_on_get_props_3(self): users, Address, addresses, User = (self.tables.users, self.classes.Address, self.tables.addresses, self.classes.User) m= mapper(User, users) assert not m.configured configure_mappers() m2 = mapper(Address, addresses, properties={ 'user':relationship(User, backref='addresses') }) assert m.get_property('addresses') def test_info(self): users = self.tables.users Address = self.classes.Address class MyComposite(object): pass for constructor, args in [ (column_property, (users.c.name,)), (relationship, (Address,)), (composite, (MyComposite, 'id', 'name')) ]: obj = constructor(info={"x": "y"}, *args) eq_(obj.info, {"x": "y"}) obj.info["q"] = "p" eq_(obj.info, {"x": "y", "q": "p"}) obj = constructor(*args) eq_(obj.info, {}) obj.info["q"] = "p" eq_(obj.info, {"q": "p"}) def test_info_via_instrumented(self): m = MetaData() # create specific tables here as we don't want # users.c.id.info to be pre-initialized users = Table('u', m, Column('id', Integer, primary_key=True), Column('name', String)) addresses = Table('a', m, Column('id', Integer, primary_key=True), Column('name', String), Column('user_id', Integer, ForeignKey('u.id'))) Address = self.classes.Address User = self.classes.User mapper(User, users, properties={ "name_lower": column_property(func.lower(users.c.name)), "addresses": relationship(Address) }) mapper(Address, addresses) # attr.info goes down to the original Column object # for the dictionary. The annotated element needs to pass # this on. assert 'info' not in users.c.id.__dict__ is_(User.id.info, users.c.id.info) assert 'info' in users.c.id.__dict__ # for SQL expressions, ORM-level .info is_(User.name_lower.info, User.name_lower.property.info) # same for relationships is_(User.addresses.info, User.addresses.property.info) def test_add_property(self): users, addresses, Address = (self.tables.users, self.tables.addresses, self.classes.Address) assert_col = [] class User(fixtures.ComparableEntity): def _get_name(self): assert_col.append(('get', self._name)) return self._name def _set_name(self, name): assert_col.append(('set', name)) self._name = name name = property(_get_name, _set_name) def _uc_name(self): if self._name is None: return None return self._name.upper() uc_name = property(_uc_name) uc_name2 = property(_uc_name) m = mapper(User, users) mapper(Address, addresses) class UCComparator(sa.orm.PropComparator): __hash__ = None def __eq__(self, other): cls = self.prop.parent.class_ col = getattr(cls, 'name') if other is None: return col == None else: return sa.func.upper(col) == sa.func.upper(other) m.add_property('_name', deferred(users.c.name)) m.add_property('name', synonym('_name')) m.add_property('addresses', relationship(Address)) m.add_property('uc_name', sa.orm.comparable_property(UCComparator)) m.add_property('uc_name2', sa.orm.comparable_property( UCComparator, User.uc_name2)) sess = create_session(autocommit=False) assert sess.query(User).get(7) u = sess.query(User).filter_by(name='jack').one() def go(): eq_(len(u.addresses), len(self.static.user_address_result[0].addresses)) eq_(u.name, 'jack') eq_(u.uc_name, 'JACK') eq_(u.uc_name2, 'JACK') eq_(assert_col, [('get', 'jack')], str(assert_col)) self.sql_count_(2, go) u.name = 'ed' u3 = User() u3.name = 'some user' sess.add(u3) sess.flush() sess.rollback() def test_add_prop_via_backref_resets_memoizations_reconfigures(self): users, User = self.tables.users, self.classes.User addresses, Address = self.tables.addresses, self.classes.Address m1 = mapper(User, users) User() m2 = mapper(Address, addresses, properties={ 'user':relationship(User, backref="addresses") }) # configure mappers takes place when User is generated User() assert hasattr(User, 'addresses') assert "addresses" in [p.key for p in m1._polymorphic_properties] def test_replace_col_prop_w_syn(self): users, User = self.tables.users, self.classes.User m = mapper(User, users) m.add_property('_name',users.c.name) m.add_property('name', synonym('_name')) sess = create_session() u = sess.query(User).filter_by(name='jack').one() eq_(u._name, 'jack') eq_(u.name, 'jack') u.name = 'jacko' assert m._columntoproperty[users.c.name] is m.get_property('_name') sa.orm.clear_mappers() m = mapper(User, users) m.add_property('name', synonym('_name', map_column=True)) sess.expunge_all() u = sess.query(User).filter_by(name='jack').one() eq_(u._name, 'jack') eq_(u.name, 'jack') u.name = 'jacko' assert m._columntoproperty[users.c.name] is m.get_property('_name') def test_replace_rel_prop_with_rel_warns(self): users, User = self.tables.users, self.classes.User addresses, Address = self.tables.addresses, self.classes.Address m = mapper(User, users, properties={ "addresses": relationship(Address) }) mapper(Address, addresses) assert_raises_message( sa.exc.SAWarning, "Property User.addresses on Mapper|User|users being replaced " "with new property User.addresses; the old property will " "be discarded", m.add_property, "addresses", relationship(Address) ) def test_add_column_prop_deannotate(self): User, users = self.classes.User, self.tables.users Address, addresses = self.classes.Address, self.tables.addresses class SubUser(User): pass m = mapper(User, users) m2 = mapper(SubUser, addresses, inherits=User) m3 = mapper(Address, addresses, properties={ 'foo':relationship(m2) }) # add property using annotated User.name, # needs to be deannotated m.add_property("x", column_property(User.name + "name")) s = create_session() q = s.query(m2).select_from(Address).join(Address.foo) self.assert_compile( q, "SELECT " "anon_1.addresses_id AS anon_1_addresses_id, " "anon_1.users_id AS anon_1_users_id, " "anon_1.users_name AS anon_1_users_name, " "anon_1.addresses_user_id AS anon_1_addresses_user_id, " "anon_1.addresses_email_address AS " "anon_1_addresses_email_address, " "anon_1.users_name || :name_1 AS anon_2 " "FROM addresses JOIN (SELECT users.id AS users_id, " "users.name AS users_name, addresses.id AS addresses_id, " "addresses.user_id AS addresses_user_id, " "addresses.email_address AS addresses_email_address " "FROM users JOIN addresses ON users.id = " "addresses.user_id) AS anon_1 ON " "anon_1.users_id = addresses.user_id" ) def test_column_prop_deannotate(self): """test that column property deannotates, bringing expressions down to the original mapped columns. """ User, users = self.classes.User, self.tables.users m = mapper(User, users) assert User.id.property.columns[0] is users.c.id assert User.name.property.columns[0] is users.c.name expr = User.name + "name" expr2 = sa.select([User.name, users.c.id]) m.add_property("x", column_property(expr)) m.add_property("y", column_property(expr2)) assert User.x.property.columns[0] is not expr assert User.x.property.columns[0].element.left is users.c.name # a deannotate needs to clone the base, in case # the original one referenced annotated elements. assert User.x.property.columns[0].element.right is not expr.right assert User.y.property.columns[0] is not expr2 assert User.y.property.columns[0].element.\ _raw_columns[0] is users.c.name assert User.y.property.columns[0].element.\ _raw_columns[1] is users.c.id def test_synonym_replaces_backref(self): addresses, users, User = (self.tables.addresses, self.tables.users, self.classes.User) assert_calls = [] class Address(object): def _get_user(self): assert_calls.append("get") return self._user def _set_user(self, user): assert_calls.append("set") self._user = user user = property(_get_user, _set_user) # synonym is created against nonexistent prop mapper(Address, addresses, properties={ 'user':synonym('_user') }) sa.orm.configure_mappers() # later, backref sets up the prop mapper(User, users, properties={ 'addresses':relationship(Address, backref='_user') }) sess = create_session() u1 = sess.query(User).get(7) u2 = sess.query(User).get(8) # comparaison ops need to work a1 = sess.query(Address).filter(Address.user==u1).one() eq_(a1.id, 1) a1.user = u2 assert a1.user is u2 eq_(assert_calls, ["set", "get"]) def test_self_ref_synonym(self): t = Table('nodes', MetaData(), Column('id', Integer, primary_key=True, test_needs_autoincrement=True), Column('parent_id', Integer, ForeignKey('nodes.id'))) class Node(object): pass mapper(Node, t, properties={ '_children':relationship(Node, backref=backref('_parent', remote_side=t.c.id)), 'children':synonym('_children'), 'parent':synonym('_parent') }) n1 = Node() n2 = Node() n1.children.append(n2) assert n2.parent is n2._parent is n1 assert n1.children[0] is n1._children[0] is n2 eq_(str(Node.parent == n2), ":param_1 = nodes.parent_id") def test_non_primary_identity_class(self): User = self.classes.User users, addresses = self.tables.users, self.tables.addresses class AddressUser(User): pass m1 = mapper(User, users, polymorphic_identity='user') m2 = mapper(AddressUser, addresses, inherits=User, polymorphic_identity='address') m3 = mapper(AddressUser, addresses, non_primary=True) assert m3._identity_class is m2._identity_class eq_( m2.identity_key_from_instance(AddressUser()), m3.identity_key_from_instance(AddressUser()) ) def test_illegal_non_primary(self): users, Address, addresses, User = (self.tables.users, self.classes.Address, self.tables.addresses, self.classes.User) mapper(User, users) mapper(Address, addresses) mapper(User, users, non_primary=True, properties={ 'addresses':relationship(Address) }) assert_raises_message( sa.exc.ArgumentError, "Attempting to assign a new relationship 'addresses' " "to a non-primary mapper on class 'User'", configure_mappers ) def test_illegal_non_primary_2(self): User, users = self.classes.User, self.tables.users assert_raises_message( sa.exc.InvalidRequestError, "Configure a primary mapper first", mapper, User, users, non_primary=True) def test_illegal_non_primary_3(self): users, addresses = self.tables.users, self.tables.addresses class Base(object): pass class Sub(Base): pass mapper(Base, users) assert_raises_message(sa.exc.InvalidRequestError, "Configure a primary mapper first", mapper, Sub, addresses, non_primary=True ) def test_prop_filters(self): t = Table('person', MetaData(), Column('id', Integer, primary_key=True, test_needs_autoincrement=True), Column('type', String(128)), Column('name', String(128)), Column('employee_number', Integer), Column('boss_id', Integer, ForeignKey('person.id')), Column('vendor_id', Integer)) class Person(object): pass class Vendor(Person): pass class Employee(Person): pass class Manager(Employee): pass class Hoho(object): pass class Lala(object): pass class Fub(object):pass class Frob(object):pass class HasDef(object): def name(self): pass class Empty(object):pass empty = mapper(Empty, t, properties={'empty_id' : t.c.id}, include_properties=[]) p_m = mapper(Person, t, polymorphic_on=t.c.type, include_properties=('id', 'type', 'name')) e_m = mapper(Employee, inherits=p_m, polymorphic_identity='employee', properties={'boss' : relationship(Manager, backref=backref('peon'), remote_side=t.c.id)}, exclude_properties=('vendor_id', )) m_m = mapper(Manager, inherits=e_m, polymorphic_identity='manager', include_properties=('id', 'type')) v_m = mapper(Vendor, inherits=p_m, polymorphic_identity='vendor', exclude_properties=('boss_id', 'employee_number')) h_m = mapper(Hoho, t, include_properties=('id', 'type', 'name')) l_m = mapper(Lala, t, exclude_properties=('vendor_id', 'boss_id'), column_prefix="p_") hd_m = mapper(HasDef, t, column_prefix="h_") fb_m = mapper(Fub, t, include_properties=(t.c.id, t.c.type)) frb_m = mapper(Frob, t, column_prefix='f_', exclude_properties=(t.c.boss_id, 'employee_number', t.c.vendor_id)) configure_mappers() def assert_props(cls, want): have = set([n for n in dir(cls) if not n.startswith('_')]) want = set(want) eq_(have, want) def assert_instrumented(cls, want): have = set([p.key for p in class_mapper(cls).iterate_properties]) want = set(want) eq_(have, want) assert_props(HasDef, ['h_boss_id', 'h_employee_number', 'h_id', 'name', 'h_name', 'h_vendor_id', 'h_type']) assert_props(Person, ['id', 'name', 'type']) assert_instrumented(Person, ['id', 'name', 'type']) assert_props(Employee, ['boss', 'boss_id', 'employee_number', 'id', 'name', 'type']) assert_instrumented(Employee,['boss', 'boss_id', 'employee_number', 'id', 'name', 'type']) assert_props(Manager, ['boss', 'boss_id', 'employee_number', 'peon', 'id', 'name', 'type']) # 'peon' and 'type' are both explicitly stated properties assert_instrumented(Manager, ['peon', 'type', 'id']) assert_props(Vendor, ['vendor_id', 'id', 'name', 'type']) assert_props(Hoho, ['id', 'name', 'type']) assert_props(Lala, ['p_employee_number', 'p_id', 'p_name', 'p_type']) assert_props(Fub, ['id', 'type']) assert_props(Frob, ['f_id', 'f_type', 'f_name', ]) # putting the discriminator column in exclude_properties, # very weird. As of 0.7.4 this re-maps it. class Foo(Person): pass assert_props(Empty, ['empty_id']) mapper( Foo, inherits=Person, polymorphic_identity='foo', exclude_properties=('type', ), ) assert hasattr(Foo, "type") assert Foo.type.property.columns[0] is t.c.type @testing.provide_metadata def test_prop_filters_defaults(self): metadata = self.metadata t = Table('t', metadata, Column('id', Integer(), primary_key=True, test_needs_autoincrement=True), Column('x', Integer(), nullable=False, server_default='0') ) t.create() class A(object): pass mapper(A, t, include_properties=['id']) s = Session() s.add(A()) s.commit() def test_we_dont_call_bool(self): class NoBoolAllowed(object): def __nonzero__(self): raise Exception("nope") mapper(NoBoolAllowed, self.tables.users) u1 = NoBoolAllowed() u1.name = "some name" s = Session(testing.db) s.add(u1) s.commit() assert s.query(NoBoolAllowed).get(u1.id) is u1 def test_we_dont_call_eq(self): class NoEqAllowed(object): def __eq__(self, other): raise Exception("nope") addresses, users = self.tables.addresses, self.tables.users Address = self.classes.Address mapper(NoEqAllowed, users, properties={ 'addresses':relationship(Address, backref='user') }) mapper(Address, addresses) u1 = NoEqAllowed() u1.name = "some name" u1.addresses = [Address(id=12, email_address='a1')] s = Session(testing.db) s.add(u1) s.commit() a1 = s.query(Address).filter_by(id=12).one() assert a1.user is u1 def test_mapping_to_join_raises(self): """Test implicit merging of two cols raises.""" addresses, users, User = (self.tables.addresses, self.tables.users, self.classes.User) usersaddresses = sa.join(users, addresses, users.c.id == addresses.c.user_id) assert_raises_message( sa.exc.InvalidRequestError, "Implicitly", mapper, User, usersaddresses, primary_key=[users.c.id] ) def test_mapping_to_join_explicit_prop(self): """Mapping to a join""" User, addresses, users = (self.classes.User, self.tables.addresses, self.tables.users) usersaddresses = sa.join(users, addresses, users.c.id == addresses.c.user_id) mapper(User, usersaddresses, primary_key=[users.c.id], properties={'add_id':addresses.c.id} ) l = create_session().query(User).order_by(users.c.id).all() eq_(l, self.static.user_result[:3]) def test_mapping_to_join_exclude_prop(self): """Mapping to a join""" User, addresses, users = (self.classes.User, self.tables.addresses, self.tables.users) usersaddresses = sa.join(users, addresses, users.c.id == addresses.c.user_id) mapper(User, usersaddresses, primary_key=[users.c.id], exclude_properties=[addresses.c.id] ) l = create_session().query(User).order_by(users.c.id).all() eq_(l, self.static.user_result[:3]) def test_mapping_to_join_no_pk(self): email_bounces, addresses, Address = (self.tables.email_bounces, self.tables.addresses, self.classes.Address) m = mapper(Address, addresses.join(email_bounces), properties={'id':[addresses.c.id, email_bounces.c.id]} ) configure_mappers() assert addresses in m._pks_by_table assert email_bounces not in m._pks_by_table sess = create_session() a = Address(id=10, email_address='e1') sess.add(a) sess.flush() eq_(addresses.count().scalar(), 6) eq_(email_bounces.count().scalar(), 5) def test_mapping_to_outerjoin(self): """Mapping to an outer join with a nullable composite primary key.""" users, addresses, User = (self.tables.users, self.tables.addresses, self.classes.User) mapper(User, users.outerjoin(addresses), primary_key=[users.c.id, addresses.c.id], properties=dict( address_id=addresses.c.id)) session = create_session() l = session.query(User).order_by(User.id, User.address_id).all() eq_(l, [ User(id=7, address_id=1), User(id=8, address_id=2), User(id=8, address_id=3), User(id=8, address_id=4), User(id=9, address_id=5), User(id=10, address_id=None)]) def test_mapping_to_outerjoin_no_partial_pks(self): """test the allow_partial_pks=False flag.""" users, addresses, User = (self.tables.users, self.tables.addresses, self.classes.User) mapper(User, users.outerjoin(addresses), allow_partial_pks=False, primary_key=[users.c.id, addresses.c.id], properties=dict( address_id=addresses.c.id)) session = create_session() l = session.query(User).order_by(User.id, User.address_id).all() eq_(l, [ User(id=7, address_id=1), User(id=8, address_id=2), User(id=8, address_id=3), User(id=8, address_id=4), User(id=9, address_id=5), None]) def test_scalar_pk_arg(self): users, Keyword, items, Item, User, keywords = (self.tables.users, self.classes.Keyword, self.tables.items, self.classes.Item, self.classes.User, self.tables.keywords) m1 = mapper(Item, items, primary_key=[items.c.id]) m2 = mapper(Keyword, keywords, primary_key=keywords.c.id) m3 = mapper(User, users, primary_key=(users.c.id,)) assert m1.primary_key[0] is items.c.id assert m2.primary_key[0] is keywords.c.id assert m3.primary_key[0] is users.c.id def test_custom_join(self): """select_from totally replace the FROM parameters.""" users, items, order_items, orders, Item, User, Order = (self.tables.users, self.tables.items, self.tables.order_items, self.tables.orders, self.classes.Item, self.classes.User, self.classes.Order) mapper(Item, items) mapper(Order, orders, properties=dict( items=relationship(Item, order_items))) mapper(User, users, properties=dict( orders=relationship(Order))) session = create_session() l = (session.query(User). select_from(users.join(orders). join(order_items). join(items)). filter(items.c.description == 'item 4')).all() eq_(l, [self.static.user_result[0]]) def test_cancel_order_by(self): users, User = self.tables.users, self.classes.User mapper(User, users, order_by=users.c.name.desc()) assert "order by users.name desc" in str(create_session().query(User).statement).lower() assert "order by" not in str(create_session().query(User).order_by(None).statement).lower() assert "order by users.name asc" in str(create_session().query(User).order_by(User.name.asc()).statement).lower() eq_( create_session().query(User).all(), [User(id=7, name=u'jack'), User(id=9, name=u'fred'), User(id=8, name=u'ed'), User(id=10, name=u'chuck')] ) eq_( create_session().query(User).order_by(User.name).all(), [User(id=10, name=u'chuck'), User(id=8, name=u'ed'), User(id=9, name=u'fred'), User(id=7, name=u'jack')] ) # 'Raises a "expression evaluation not supported" error at prepare time @testing.fails_on('firebird', 'FIXME: unknown') def test_function(self): """Mapping to a SELECT statement that has functions in it.""" addresses, users, User = (self.tables.addresses, self.tables.users, self.classes.User) s = sa.select([users, (users.c.id * 2).label('concat'), sa.func.count(addresses.c.id).label('count')], users.c.id == addresses.c.user_id, group_by=[c for c in users.c]).alias('myselect') mapper(User, s, order_by=s.c.id) sess = create_session() l = sess.query(User).all() for idx, total in enumerate((14, 16)): eq_(l[idx].concat, l[idx].id * 2) eq_(l[idx].concat, total) def test_count(self): """The count function on Query.""" User, users = self.classes.User, self.tables.users mapper(User, users) session = create_session() q = session.query(User) eq_(q.count(), 4) eq_(q.filter(User.id.in_([8,9])).count(), 2) eq_(q.filter(users.c.id.in_([8,9])).count(), 2) eq_(session.query(User.id).count(), 4) eq_(session.query(User.id).filter(User.id.in_((8, 9))).count(), 2) def test_many_to_many_count(self): keywords, items, item_keywords, Keyword, Item = (self.tables.keywords, self.tables.items, self.tables.item_keywords, self.classes.Keyword, self.classes.Item) mapper(Keyword, keywords) mapper(Item, items, properties=dict( keywords = relationship(Keyword, item_keywords, lazy='select'))) session = create_session() q = (session.query(Item). join('keywords'). distinct(). filter(Keyword.name == "red")) eq_(q.count(), 2) def test_override_1(self): """Overriding a column raises an error.""" Address, addresses, users, User = (self.classes.Address, self.tables.addresses, self.tables.users, self.classes.User) def go(): mapper(User, users, properties=dict( name=relationship(mapper(Address, addresses)))) assert_raises(sa.exc.ArgumentError, go) def test_override_2(self): """exclude_properties cancels the error.""" Address, addresses, users, User = (self.classes.Address, self.tables.addresses, self.tables.users, self.classes.User) mapper(User, users, exclude_properties=['name'], properties=dict( name=relationship(mapper(Address, addresses)))) assert bool(User.name) def test_override_3(self): """The column being named elsewhere also cancels the error,""" Address, addresses, users, User = (self.classes.Address, self.tables.addresses, self.tables.users, self.classes.User) mapper(User, users, properties=dict( name=relationship(mapper(Address, addresses)), foo=users.c.name)) def test_synonym(self): users, addresses, Address = (self.tables.users, self.tables.addresses, self.classes.Address) assert_col = [] class extendedproperty(property): attribute = 123 class User(object): def _get_name(self): assert_col.append(('get', self.name)) return self.name def _set_name(self, name): assert_col.append(('set', name)) self.name = name uname = extendedproperty(_get_name, _set_name) mapper(User, users, properties=dict( addresses = relationship(mapper(Address, addresses), lazy='select'), uname = synonym('name'), adlist = synonym('addresses'), adname = synonym('addresses') )) # ensure the synonym can get at the proxied comparators without # an explicit compile User.name == 'ed' User.adname.any() assert hasattr(User, 'adlist') # as of 0.4.2, synonyms always create a property assert hasattr(User, 'adname') # test compile assert not isinstance(User.uname == 'jack', bool) assert User.uname.property assert User.adlist.property sess = create_session() # test RowTuple names row = sess.query(User.id, User.uname).first() assert row.uname == row[1] u = sess.query(User).filter(User.uname=='jack').one() fixture = self.static.user_address_result[0].addresses eq_(u.adlist, fixture) addr = sess.query(Address).filter_by(id=fixture[0].id).one() u = sess.query(User).filter(User.adname.contains(addr)).one() u2 = sess.query(User).filter(User.adlist.contains(addr)).one() assert u is u2 assert u not in sess.dirty u.uname = "some user name" assert len(assert_col) > 0 eq_(assert_col, [('set', 'some user name')]) eq_(u.uname, "some user name") eq_(assert_col, [('set', 'some user name'), ('get', 'some user name')]) eq_(u.name, "some user name") assert u in sess.dirty eq_(User.uname.attribute, 123) def test_synonym_of_synonym(self): users, User = (self.tables.users, self.classes.User) mapper(User, users, properties={ 'x':synonym('id'), 'y':synonym('x') }) s = Session() u = s.query(User).filter(User.y==8).one() eq_(u.y, 8) def test_synonym_column_location(self): users, User = self.tables.users, self.classes.User def go(): mapper(User, users, properties={ 'not_name':synonym('_name', map_column=True)}) assert_raises_message( sa.exc.ArgumentError, ("Can't compile synonym '_name': no column on table " "'users' named 'not_name'"), go) def test_column_synonyms(self): """Synonyms which automatically instrument properties, set up aliased column, etc.""" addresses, users, Address = (self.tables.addresses, self.tables.users, self.classes.Address) assert_col = [] class User(object): def _get_name(self): assert_col.append(('get', self._name)) return self._name def _set_name(self, name): assert_col.append(('set', name)) self._name = name name = property(_get_name, _set_name) mapper(Address, addresses) mapper(User, users, properties = { 'addresses':relationship(Address, lazy='select'), 'name':synonym('_name', map_column=True) }) # test compile assert not isinstance(User.name == 'jack', bool) assert hasattr(User, 'name') assert hasattr(User, '_name') sess = create_session() u = sess.query(User).filter(User.name == 'jack').one() eq_(u.name, 'jack') u.name = 'foo' eq_(u.name, 'foo') eq_(assert_col, [('get', 'jack'), ('set', 'foo'), ('get', 'foo')]) def test_synonym_map_column_conflict(self): users, User = self.tables.users, self.classes.User assert_raises( sa.exc.ArgumentError, mapper, User, users, properties=util.OrderedDict([ ('_user_id', users.c.id), ('id', synonym('_user_id', map_column=True)), ]) ) assert_raises( sa.exc.ArgumentError, mapper, User, users, properties=util.OrderedDict([ ('id', synonym('_user_id', map_column=True)), ('_user_id', users.c.id), ]) ) def test_comparable(self): users = self.tables.users class extendedproperty(property): attribute = 123 def method1(self): return "method1" from sqlalchemy.orm.properties import ColumnProperty class UCComparator(ColumnProperty.Comparator): __hash__ = None def method1(self): return "uccmethod1" def method2(self, other): return "method2" def __eq__(self, other): cls = self.prop.parent.class_ col = getattr(cls, 'name') if other is None: return col == None else: return sa.func.upper(col) == sa.func.upper(other) def map_(with_explicit_property): class User(object): @extendedproperty def uc_name(self): if self.name is None: return None return self.name.upper() if with_explicit_property: args = (UCComparator, User.uc_name) else: args = (UCComparator,) mapper(User, users, properties=dict( uc_name = sa.orm.comparable_property(*args))) return User for User in (map_(True), map_(False)): sess = create_session() sess.begin() q = sess.query(User) assert hasattr(User, 'name') assert hasattr(User, 'uc_name') eq_(User.uc_name.method1(), "method1") eq_(User.uc_name.method2('x'), "method2") assert_raises_message( AttributeError, "Neither 'extendedproperty' object nor 'UCComparator' " "object associated with User.uc_name has an attribute 'nonexistent'", getattr, User.uc_name, 'nonexistent') # test compile assert not isinstance(User.uc_name == 'jack', bool) u = q.filter(User.uc_name=='JACK').one() assert u.uc_name == "JACK" assert u not in sess.dirty u.name = "some user name" eq_(u.name, "some user name") assert u in sess.dirty eq_(u.uc_name, "SOME USER NAME") sess.flush() sess.expunge_all() q = sess.query(User) u2 = q.filter(User.name == 'some user name').one() u3 = q.filter(User.uc_name == 'SOME USER NAME').one() assert u2 is u3 eq_(User.uc_name.attribute, 123) sess.rollback() def test_comparable_column(self): users, User = self.tables.users, self.classes.User class MyComparator(sa.orm.properties.ColumnProperty.Comparator): __hash__ = None def __eq__(self, other): # lower case comparison return func.lower(self.__clause_element__() ) == func.lower(other) def intersects(self, other): # non-standard comparator return self.__clause_element__().op('&=')(other) mapper(User, users, properties={ 'name': sa.orm.column_property(users.c.name, comparator_factory=MyComparator) }) assert_raises_message( AttributeError, "Neither 'InstrumentedAttribute' object nor " "'MyComparator' object associated with User.name has " "an attribute 'nonexistent'", getattr, User.name, "nonexistent") eq_( str((User.name == 'ed').compile( dialect=sa.engine.default.DefaultDialect())), "lower(users.name) = lower(:lower_1)") eq_( str((User.name.intersects('ed')).compile( dialect=sa.engine.default.DefaultDialect())), "users.name &= :name_1") def test_reentrant_compile(self): users, Address, addresses, User = (self.tables.users, self.classes.Address, self.tables.addresses, self.classes.User) class MyFakeProperty(sa.orm.properties.ColumnProperty): def post_instrument_class(self, mapper): super(MyFakeProperty, self).post_instrument_class(mapper) configure_mappers() m1 = mapper(User, users, properties={ 'name':MyFakeProperty(users.c.name) }) m2 = mapper(Address, addresses) configure_mappers() sa.orm.clear_mappers() class MyFakeProperty(sa.orm.properties.ColumnProperty): def post_instrument_class(self, mapper): super(MyFakeProperty, self).post_instrument_class(mapper) configure_mappers() m1 = mapper(User, users, properties={ 'name':MyFakeProperty(users.c.name) }) m2 = mapper(Address, addresses) configure_mappers() def test_reconstructor(self): users = self.tables.users recon = [] class User(object): @reconstructor def reconstruct(self): recon.append('go') mapper(User, users) User() eq_(recon, []) create_session().query(User).first() eq_(recon, ['go']) def test_reconstructor_inheritance(self): users = self.tables.users recon = [] class A(object): @reconstructor def reconstruct(self): assert isinstance(self, A) recon.append('A') class B(A): @reconstructor def reconstruct(self): assert isinstance(self, B) recon.append('B') class C(A): @reconstructor def reconstruct(self): assert isinstance(self, C) recon.append('C') mapper(A, users, polymorphic_on=users.c.name, polymorphic_identity='jack') mapper(B, inherits=A, polymorphic_identity='ed') mapper(C, inherits=A, polymorphic_identity='chuck') A() B() C() eq_(recon, []) sess = create_session() sess.query(A).first() sess.query(B).first() sess.query(C).first() eq_(recon, ['A', 'B', 'C']) def test_unmapped_reconstructor_inheritance(self): users = self.tables.users recon = [] class Base(object): @reconstructor def reconstruct(self): recon.append('go') class User(Base): pass mapper(User, users) User() eq_(recon, []) create_session().query(User).first() eq_(recon, ['go']) def test_unmapped_error(self): Address, addresses, users, User = (self.classes.Address, self.tables.addresses, self.tables.users, self.classes.User) mapper(Address, addresses) sa.orm.clear_mappers() mapper(User, users, properties={ 'addresses':relationship(Address) }) assert_raises_message( sa.orm.exc.UnmappedClassError, "Class 'test.orm._fixtures.Address' is not mapped", sa.orm.configure_mappers) def test_unmapped_not_type_error(self): assert_raises_message( sa.exc.ArgumentError, "Class object expected, got '5'.", class_mapper, 5 ) def test_unmapped_subclass_error_postmap(self): users = self.tables.users class Base(object): pass class Sub(Base): pass mapper(Base, users) sa.orm.configure_mappers() # we can create new instances, set attributes. s = Sub() s.name = 'foo' eq_(s.name, 'foo') eq_( attributes.get_history(s, 'name'), (['foo'], (), ()) ) # using it with an ORM operation, raises assert_raises(sa.orm.exc.UnmappedClassError, create_session().add, Sub()) def test_unmapped_subclass_error_premap(self): users = self.tables.users class Base(object): pass mapper(Base, users) class Sub(Base): pass sa.orm.configure_mappers() # we can create new instances, set attributes. s = Sub() s.name = 'foo' eq_(s.name, 'foo') eq_( attributes.get_history(s, 'name'), (['foo'], (), ()) ) # using it with an ORM operation, raises assert_raises(sa.orm.exc.UnmappedClassError, create_session().add, Sub()) def test_oldstyle_mixin(self): users = self.tables.users class OldStyle: pass class NewStyle(object): pass class A(NewStyle, OldStyle): pass mapper(A, users) class B(OldStyle, NewStyle): pass mapper(B, users) class DocumentTest(fixtures.TestBase): def test_doc_propagate(self): metadata = MetaData() t1 = Table('t1', metadata, Column('col1', Integer, primary_key=True, doc="primary key column"), Column('col2', String, doc="data col"), Column('col3', String, doc="data col 2"), Column('col4', String, doc="data col 3"), Column('col5', String), ) t2 = Table('t2', metadata, Column('col1', Integer, primary_key=True, doc="primary key column"), Column('col2', String, doc="data col"), Column('col3', Integer, ForeignKey('t1.col1'), doc="foreign key to t1.col1") ) class Foo(object): pass class Bar(object): pass mapper(Foo, t1, properties={ 'bars':relationship(Bar, doc="bar relationship", backref=backref('foo',doc='foo relationship') ), 'foober':column_property(t1.c.col3, doc='alternate data col'), 'hoho':synonym("col4", doc="syn of col4") }) mapper(Bar, t2) configure_mappers() eq_(Foo.col1.__doc__, "primary key column") eq_(Foo.col2.__doc__, "data col") eq_(Foo.col5.__doc__, None) eq_(Foo.foober.__doc__, "alternate data col") eq_(Foo.bars.__doc__, "bar relationship") eq_(Foo.hoho.__doc__, "syn of col4") eq_(Bar.col1.__doc__, "primary key column") eq_(Bar.foo.__doc__, "foo relationship") class ORMLoggingTest(_fixtures.FixtureTest): def setup(self): self.buf = logging.handlers.BufferingHandler(100) for log in [ logging.getLogger('sqlalchemy.orm'), ]: log.addHandler(self.buf) def teardown(self): for log in [ logging.getLogger('sqlalchemy.orm'), ]: log.removeHandler(self.buf) def _current_messages(self): return [b.getMessage() for b in self.buf.buffer] def test_mapper_info_aliased(self): User, users = self.classes.User, self.tables.users tb = users.select().alias() mapper(User, tb) s = Session() s.add(User(name='ed')) s.commit() for msg in self._current_messages(): assert msg.startswith('(User|%%(%d anon)s) ' % id(tb)) class OptionsTest(_fixtures.FixtureTest): @testing.fails_on('maxdb', 'FIXME: unknown') def test_synonym_options(self): Address, addresses, users, User = (self.classes.Address, self.tables.addresses, self.tables.users, self.classes.User) mapper(User, users, properties=dict( addresses = relationship(mapper(Address, addresses), lazy='select', order_by=addresses.c.id), adlist = synonym('addresses'))) def go(): sess = create_session() u = (sess.query(User). order_by(User.id). options(sa.orm.joinedload('adlist')). filter_by(name='jack')).one() eq_(u.adlist, [self.static.user_address_result[0].addresses[0]]) self.assert_sql_count(testing.db, go, 1) def test_eager_options(self): """A lazy relationship can be upgraded to an eager relationship.""" Address, addresses, users, User = (self.classes.Address, self.tables.addresses, self.tables.users, self.classes.User) mapper(User, users, properties=dict( addresses = relationship(mapper(Address, addresses), order_by=addresses.c.id))) sess = create_session() l = (sess.query(User). order_by(User.id). options(sa.orm.joinedload('addresses'))).all() def go(): eq_(l, self.static.user_address_result) self.sql_count_(0, go) @testing.fails_on('maxdb', 'FIXME: unknown') def test_eager_options_with_limit(self): Address, addresses, users, User = (self.classes.Address, self.tables.addresses, self.tables.users, self.classes.User) mapper(User, users, properties=dict( addresses=relationship(mapper(Address, addresses), lazy='select'))) sess = create_session() u = (sess.query(User). options(sa.orm.joinedload('addresses')). filter_by(id=8)).one() def go(): eq_(u.id, 8) eq_(len(u.addresses), 3) self.sql_count_(0, go) sess.expunge_all() u = sess.query(User).filter_by(id=8).one() eq_(u.id, 8) eq_(len(u.addresses), 3) @testing.fails_on('maxdb', 'FIXME: unknown') def test_lazy_options_with_limit(self): Address, addresses, users, User = (self.classes.Address, self.tables.addresses, self.tables.users, self.classes.User) mapper(User, users, properties=dict( addresses = relationship(mapper(Address, addresses), lazy='joined'))) sess = create_session() u = (sess.query(User). options(sa.orm.lazyload('addresses')). filter_by(id=8)).one() def go(): eq_(u.id, 8) eq_(len(u.addresses), 3) self.sql_count_(1, go) def test_eager_degrade(self): """An eager relationship automatically degrades to a lazy relationship if eager columns are not available""" Address, addresses, users, User = (self.classes.Address, self.tables.addresses, self.tables.users, self.classes.User) mapper(User, users, properties=dict( addresses = relationship(mapper(Address, addresses), lazy='joined', order_by=addresses.c.id))) sess = create_session() # first test straight eager load, 1 statement def go(): l = sess.query(User).order_by(User.id).all() eq_(l, self.static.user_address_result) self.sql_count_(1, go) sess.expunge_all() # then select just from users. run it into instances. # then assert the data, which will launch 3 more lazy loads # (previous users in session fell out of scope and were removed from # session's identity map) r = users.select().order_by(users.c.id).execute() def go(): l = list(sess.query(User).instances(r)) eq_(l, self.static.user_address_result) self.sql_count_(4, go) def test_eager_degrade_deep(self): users, Keyword, items, order_items, orders, Item, User, Address, keywords, item_keywords, Order, addresses = (self.tables.users, self.classes.Keyword, self.tables.items, self.tables.order_items, self.tables.orders, self.classes.Item, self.classes.User, self.classes.Address, self.tables.keywords, self.tables.item_keywords, self.classes.Order, self.tables.addresses) # test with a deeper set of eager loads. when we first load the three # users, they will have no addresses or orders. the number of lazy # loads when traversing the whole thing will be three for the # addresses and three for the orders. mapper(Address, addresses) mapper(Keyword, keywords) mapper(Item, items, properties=dict( keywords=relationship(Keyword, secondary=item_keywords, lazy='joined', order_by=item_keywords.c.keyword_id))) mapper(Order, orders, properties=dict( items=relationship(Item, secondary=order_items, lazy='joined', order_by=order_items.c.item_id))) mapper(User, users, properties=dict( addresses=relationship(Address, lazy='joined', order_by=addresses.c.id), orders=relationship(Order, lazy='joined', order_by=orders.c.id))) sess = create_session() # first test straight eager load, 1 statement def go(): l = sess.query(User).order_by(User.id).all() eq_(l, self.static.user_all_result) self.assert_sql_count(testing.db, go, 1) sess.expunge_all() # then select just from users. run it into instances. # then assert the data, which will launch 6 more lazy loads r = users.select().execute() def go(): l = list(sess.query(User).instances(r)) eq_(l, self.static.user_all_result) self.assert_sql_count(testing.db, go, 6) def test_lazy_options(self): """An eager relationship can be upgraded to a lazy relationship.""" Address, addresses, users, User = (self.classes.Address, self.tables.addresses, self.tables.users, self.classes.User) mapper(User, users, properties=dict( addresses = relationship(mapper(Address, addresses), lazy='joined') )) sess = create_session() l = (sess.query(User). order_by(User.id). options(sa.orm.lazyload('addresses'))).all() def go(): eq_(l, self.static.user_address_result) self.sql_count_(4, go) def test_option_propagate(self): users, items, order_items, Order, Item, User, orders = (self.tables.users, self.tables.items, self.tables.order_items, self.classes.Order, self.classes.Item, self.classes.User, self.tables.orders) mapper(User, users, properties=dict( orders = relationship(Order) )) mapper(Order, orders, properties=dict( items = relationship(Item, secondary=order_items) )) mapper(Item, items) sess = create_session() oalias = aliased(Order) opt1 = sa.orm.joinedload(User.orders, Order.items) opt2a, opt2b = sa.orm.contains_eager(User.orders, Order.items, alias=oalias) u1 = sess.query(User).join(oalias, User.orders).options(opt1, opt2a, opt2b).first() ustate = attributes.instance_state(u1) assert opt1 in ustate.load_options assert opt2a not in ustate.load_options assert opt2b not in ustate.load_options class DeepOptionsTest(_fixtures.FixtureTest): @classmethod def setup_mappers(cls): users, Keyword, items, order_items, Order, Item, User, keywords, item_keywords, orders = (cls.tables.users, cls.classes.Keyword, cls.tables.items, cls.tables.order_items, cls.classes.Order, cls.classes.Item, cls.classes.User, cls.tables.keywords, cls.tables.item_keywords, cls.tables.orders) mapper(Keyword, keywords) mapper(Item, items, properties=dict( keywords=relationship(Keyword, item_keywords, order_by=item_keywords.c.item_id))) mapper(Order, orders, properties=dict( items=relationship(Item, order_items, order_by=items.c.id))) mapper(User, users, order_by=users.c.id, properties=dict( orders=relationship(Order, order_by=orders.c.id))) def test_deep_options_1(self): User = self.classes.User sess = create_session() # joinedload nothing. u = sess.query(User).all() def go(): x = u[0].orders[1].items[0].keywords[1] self.assert_sql_count(testing.db, go, 3) def test_deep_options_2(self): """test (joined|subquery)load_all() options""" User = self.classes.User sess = create_session() l = (sess.query(User). options(sa.orm.joinedload_all('orders.items.keywords'))).all() def go(): x = l[0].orders[1].items[0].keywords[1] self.sql_count_(0, go) sess = create_session() l = (sess.query(User). options(sa.orm.subqueryload_all('orders.items.keywords'))).all() def go(): x = l[0].orders[1].items[0].keywords[1] self.sql_count_(0, go) def test_deep_options_3(self): User = self.classes.User sess = create_session() # same thing, with separate options calls q2 = (sess.query(User). options(sa.orm.joinedload('orders')). options(sa.orm.joinedload('orders.items')). options(sa.orm.joinedload('orders.items.keywords'))) u = q2.all() def go(): x = u[0].orders[1].items[0].keywords[1] self.sql_count_(0, go) def test_deep_options_4(self): Item, User, Order = (self.classes.Item, self.classes.User, self.classes.Order) sess = create_session() assert_raises_message( sa.exc.ArgumentError, "Can't find property 'items' on any entity " "specified in this Query.", sess.query(User).options, sa.orm.joinedload(Order.items)) # joinedload "keywords" on items. it will lazy load "orders", then # lazy load the "items" on the order, but on "items" it will eager # load the "keywords" q3 = sess.query(User).options(sa.orm.joinedload('orders.items.keywords')) u = q3.all() def go(): x = u[0].orders[1].items[0].keywords[1] self.sql_count_(2, go) sess = create_session() q3 = sess.query(User).options( sa.orm.joinedload(User.orders, Order.items, Item.keywords)) u = q3.all() def go(): x = u[0].orders[1].items[0].keywords[1] self.sql_count_(2, go) class ValidatorTest(_fixtures.FixtureTest): def test_scalar(self): users = self.tables.users canary = [] class User(fixtures.ComparableEntity): @validates('name') def validate_name(self, key, name): canary.append((key, name)) assert name != 'fred' return name + ' modified' mapper(User, users) sess = create_session() u1 = User(name='ed') eq_(u1.name, 'ed modified') assert_raises(AssertionError, setattr, u1, "name", "fred") eq_(u1.name, 'ed modified') eq_(canary, [('name', 'ed'), ('name', 'fred')]) sess.add(u1) sess.flush() sess.expunge_all() eq_(sess.query(User).filter_by(name='ed modified').one(), User(name='ed')) def test_collection(self): users, addresses, Address = (self.tables.users, self.tables.addresses, self.classes.Address) canary = [] class User(fixtures.ComparableEntity): @validates('addresses') def validate_address(self, key, ad): canary.append((key, ad)) assert '@' in ad.email_address return ad mapper(User, users, properties={'addresses':relationship(Address)}) mapper(Address, addresses) sess = create_session() u1 = User(name='edward') a0 = Address(email_address='noemail') assert_raises(AssertionError, u1.addresses.append, a0) a1 = Address(id=15, email_address='foo@bar.com') u1.addresses.append(a1) eq_(canary, [('addresses', a0), ('addresses', a1)]) sess.add(u1) sess.flush() sess.expunge_all() eq_( sess.query(User).filter_by(name='edward').one(), User(name='edward', addresses=[Address(email_address='foo@bar.com')]) ) def test_validators_dict(self): users, addresses, Address = (self.tables.users, self.tables.addresses, self.classes.Address) class User(fixtures.ComparableEntity): @validates('name') def validate_name(self, key, name): assert name != 'fred' return name + ' modified' @validates('addresses') def validate_address(self, key, ad): assert '@' in ad.email_address return ad def simple_function(self, key, value): return key, value u_m = mapper(User, users, properties={'addresses':relationship(Address)}) mapper(Address, addresses) eq_( dict((k, v[0].__name__) for k, v in u_m.validators.items()), {'name':'validate_name', 'addresses':'validate_address'} ) def test_validator_w_removes(self): users, addresses, Address = (self.tables.users, self.tables.addresses, self.classes.Address) canary = [] class User(fixtures.ComparableEntity): @validates('name', include_removes=True) def validate_name(self, key, item, remove): canary.append((key, item, remove)) return item @validates('addresses', include_removes=True) def validate_address(self, key, item, remove): canary.append((key, item, remove)) return item mapper(User, users, properties={'addresses':relationship(Address)}) mapper(Address, addresses) u1 = User() u1.name = "ed" u1.name = "mary" del u1.name a1, a2, a3 = Address(), Address(), Address() u1.addresses.append(a1) u1.addresses.remove(a1) u1.addresses = [a1, a2] u1.addresses = [a2, a3] eq_(canary, [ ('name', 'ed', False), ('name', 'mary', False), ('name', 'mary', True), # append a1 ('addresses', a1, False), # remove a1 ('addresses', a1, True), # set to [a1, a2] - this is two appends ('addresses', a1, False), ('addresses', a2, False), # set to [a2, a3] - this is a remove of a1, # append of a3. the appends are first. ('addresses', a3, False), ('addresses', a1, True), ] ) class ComparatorFactoryTest(_fixtures.FixtureTest, AssertsCompiledSQL): def test_kwarg_accepted(self): users, Address = self.tables.users, self.classes.Address class DummyComposite(object): def __init__(self, x, y): pass from sqlalchemy.orm.interfaces import PropComparator class MyFactory(PropComparator): pass for args in ( (column_property, users.c.name), (deferred, users.c.name), (synonym, 'name'), (composite, DummyComposite, users.c.id, users.c.name), (relationship, Address), (backref, 'address'), (comparable_property, ), (dynamic_loader, Address) ): fn = args[0] args = args[1:] fn(comparator_factory=MyFactory, *args) def test_column(self): User, users = self.classes.User, self.tables.users from sqlalchemy.orm.properties import ColumnProperty class MyFactory(ColumnProperty.Comparator): __hash__ = None def __eq__(self, other): return func.foobar(self.__clause_element__()) == func.foobar(other) mapper(User, users, properties={'name':column_property(users.c.name, comparator_factory=MyFactory)}) self.assert_compile(User.name == 'ed', "foobar(users.name) = foobar(:foobar_1)", dialect=default.DefaultDialect()) self.assert_compile(aliased(User).name == 'ed', "foobar(users_1.name) = foobar(:foobar_1)", dialect=default.DefaultDialect()) def test_synonym(self): users, User = self.tables.users, self.classes.User from sqlalchemy.orm.properties import ColumnProperty class MyFactory(ColumnProperty.Comparator): __hash__ = None def __eq__(self, other): return func.foobar(self.__clause_element__()) ==\ func.foobar(other) mapper(User, users, properties={ 'name':synonym('_name', map_column=True, comparator_factory=MyFactory) }) self.assert_compile( User.name == 'ed', "foobar(users.name) = foobar(:foobar_1)", dialect=default.DefaultDialect()) self.assert_compile( aliased(User).name == 'ed', "foobar(users_1.name) = foobar(:foobar_1)", dialect=default.DefaultDialect()) def test_relationship(self): users, Address, addresses, User = (self.tables.users, self.classes.Address, self.tables.addresses, self.classes.User) from sqlalchemy.orm.properties import PropertyLoader # NOTE: this API changed in 0.8, previously __clause_element__() # gave the parent selecatable, now it gives the # primaryjoin/secondaryjoin class MyFactory(PropertyLoader.Comparator): __hash__ = None def __eq__(self, other): return func.foobar(self._source_selectable().c.user_id) == \ func.foobar(other.id) class MyFactory2(PropertyLoader.Comparator): __hash__ = None def __eq__(self, other): return func.foobar(self._source_selectable().c.id) == \ func.foobar(other.user_id) mapper(User, users) mapper(Address, addresses, properties={ 'user': relationship(User, comparator_factory=MyFactory, backref=backref("addresses", comparator_factory=MyFactory2) ) } ) # these are kind of nonsensical tests. self.assert_compile(Address.user == User(id=5), "foobar(addresses.user_id) = foobar(:foobar_1)", dialect=default.DefaultDialect()) self.assert_compile(User.addresses == Address(id=5, user_id=7), "foobar(users.id) = foobar(:foobar_1)", dialect=default.DefaultDialect()) self.assert_compile( aliased(Address).user == User(id=5), "foobar(addresses_1.user_id) = foobar(:foobar_1)", dialect=default.DefaultDialect()) self.assert_compile( aliased(User).addresses == Address(id=5, user_id=7), "foobar(users_1.id) = foobar(:foobar_1)", dialect=default.DefaultDialect()) class DeferredTest(_fixtures.FixtureTest): def test_basic(self): """A basic deferred load.""" Order, orders = self.classes.Order, self.tables.orders mapper(Order, orders, order_by=orders.c.id, properties={ 'description': deferred(orders.c.description)}) o = Order() self.assert_(o.description is None) q = create_session().query(Order) def go(): l = q.all() o2 = l[2] x = o2.description self.sql_eq_(go, [ ("SELECT orders.id AS orders_id, " "orders.user_id AS orders_user_id, " "orders.address_id AS orders_address_id, " "orders.isopen AS orders_isopen " "FROM orders ORDER BY orders.id", {}), ("SELECT orders.description AS orders_description " "FROM orders WHERE orders.id = :param_1", {'param_1':3})]) def test_unsaved(self): """Deferred loading does not kick in when just PK cols are set.""" Order, orders = self.classes.Order, self.tables.orders mapper(Order, orders, properties={ 'description': deferred(orders.c.description)}) sess = create_session() o = Order() sess.add(o) o.id = 7 def go(): o.description = "some description" self.sql_count_(0, go) def test_synonym_group_bug(self): orders, Order = self.tables.orders, self.classes.Order mapper(Order, orders, properties={ 'isopen':synonym('_isopen', map_column=True), 'description':deferred(orders.c.description, group='foo') }) sess = create_session() o1 = sess.query(Order).get(1) eq_(o1.description, "order 1") def test_unsaved_2(self): Order, orders = self.classes.Order, self.tables.orders mapper(Order, orders, properties={ 'description': deferred(orders.c.description)}) sess = create_session() o = Order() sess.add(o) def go(): o.description = "some description" self.sql_count_(0, go) def test_unsaved_group(self): """Deferred loading doesnt kick in when just PK cols are set""" orders, Order = self.tables.orders, self.classes.Order mapper(Order, orders, order_by=orders.c.id, properties=dict( description=deferred(orders.c.description, group='primary'), opened=deferred(orders.c.isopen, group='primary'))) sess = create_session() o = Order() sess.add(o) o.id = 7 def go(): o.description = "some description" self.sql_count_(0, go) def test_unsaved_group_2(self): orders, Order = self.tables.orders, self.classes.Order mapper(Order, orders, order_by=orders.c.id, properties=dict( description=deferred(orders.c.description, group='primary'), opened=deferred(orders.c.isopen, group='primary'))) sess = create_session() o = Order() sess.add(o) def go(): o.description = "some description" self.sql_count_(0, go) def test_save(self): Order, orders = self.classes.Order, self.tables.orders m = mapper(Order, orders, properties={ 'description': deferred(orders.c.description)}) sess = create_session() o2 = sess.query(Order).get(2) o2.isopen = 1 sess.flush() def test_group(self): """Deferred load with a group""" orders, Order = self.tables.orders, self.classes.Order mapper(Order, orders, properties=util.OrderedDict([ ('userident', deferred(orders.c.user_id, group='primary')), ('addrident', deferred(orders.c.address_id, group='primary')), ('description', deferred(orders.c.description, group='primary')), ('opened', deferred(orders.c.isopen, group='primary')) ])) sess = create_session() q = sess.query(Order).order_by(Order.id) def go(): l = q.all() o2 = l[2] eq_(o2.opened, 1) eq_(o2.userident, 7) eq_(o2.description, 'order 3') self.sql_eq_(go, [ ("SELECT orders.id AS orders_id " "FROM orders ORDER BY orders.id", {}), ("SELECT orders.user_id AS orders_user_id, " "orders.address_id AS orders_address_id, " "orders.description AS orders_description, " "orders.isopen AS orders_isopen " "FROM orders WHERE orders.id = :param_1", {'param_1':3})]) o2 = q.all()[2] eq_(o2.description, 'order 3') assert o2 not in sess.dirty o2.description = 'order 3' def go(): sess.flush() self.sql_count_(0, go) def test_preserve_changes(self): """A deferred load operation doesn't revert modifications on attributes""" orders, Order = self.tables.orders, self.classes.Order mapper(Order, orders, properties = { 'userident': deferred(orders.c.user_id, group='primary'), 'description': deferred(orders.c.description, group='primary'), 'opened': deferred(orders.c.isopen, group='primary') }) sess = create_session() o = sess.query(Order).get(3) assert 'userident' not in o.__dict__ o.description = 'somenewdescription' eq_(o.description, 'somenewdescription') def go(): eq_(o.opened, 1) self.assert_sql_count(testing.db, go, 1) eq_(o.description, 'somenewdescription') assert o in sess.dirty def test_commits_state(self): """ When deferred elements are loaded via a group, they get the proper CommittedState and don't result in changes being committed """ orders, Order = self.tables.orders, self.classes.Order mapper(Order, orders, properties = { 'userident':deferred(orders.c.user_id, group='primary'), 'description':deferred(orders.c.description, group='primary'), 'opened':deferred(orders.c.isopen, group='primary')}) sess = create_session() o2 = sess.query(Order).get(3) # this will load the group of attributes eq_(o2.description, 'order 3') assert o2 not in sess.dirty # this will mark it as 'dirty', but nothing actually changed o2.description = 'order 3' # therefore the flush() shouldnt actually issue any SQL self.assert_sql_count(testing.db, sess.flush, 0) def test_options(self): """Options on a mapper to create deferred and undeferred columns""" orders, Order = self.tables.orders, self.classes.Order mapper(Order, orders) sess = create_session() q = sess.query(Order).order_by(Order.id).options(defer('user_id')) def go(): q.all()[0].user_id self.sql_eq_(go, [ ("SELECT orders.id AS orders_id, " "orders.address_id AS orders_address_id, " "orders.description AS orders_description, " "orders.isopen AS orders_isopen " "FROM orders ORDER BY orders.id", {}), ("SELECT orders.user_id AS orders_user_id " "FROM orders WHERE orders.id = :param_1", {'param_1':1})]) sess.expunge_all() q2 = q.options(sa.orm.undefer('user_id')) self.sql_eq_(q2.all, [ ("SELECT orders.id AS orders_id, " "orders.user_id AS orders_user_id, " "orders.address_id AS orders_address_id, " "orders.description AS orders_description, " "orders.isopen AS orders_isopen " "FROM orders ORDER BY orders.id", {})]) def test_undefer_group(self): orders, Order = self.tables.orders, self.classes.Order mapper(Order, orders, properties=util.OrderedDict([ ('userident',deferred(orders.c.user_id, group='primary')), ('description',deferred(orders.c.description, group='primary')), ('opened',deferred(orders.c.isopen, group='primary')) ] )) sess = create_session() q = sess.query(Order).order_by(Order.id) def go(): l = q.options(sa.orm.undefer_group('primary')).all() o2 = l[2] eq_(o2.opened, 1) eq_(o2.userident, 7) eq_(o2.description, 'order 3') self.sql_eq_(go, [ ("SELECT orders.user_id AS orders_user_id, " "orders.description AS orders_description, " "orders.isopen AS orders_isopen, " "orders.id AS orders_id, " "orders.address_id AS orders_address_id " "FROM orders ORDER BY orders.id", {})]) def test_locates_col(self): """Manually adding a column to the result undefers the column.""" orders, Order = self.tables.orders, self.classes.Order mapper(Order, orders, properties={ 'description':deferred(orders.c.description)}) sess = create_session() o1 = sess.query(Order).order_by(Order.id).first() def go(): eq_(o1.description, 'order 1') self.sql_count_(1, go) sess = create_session() o1 = (sess.query(Order). order_by(Order.id). add_column(orders.c.description).first())[0] def go(): eq_(o1.description, 'order 1') self.sql_count_(0, go) def test_map_selectable_wo_deferred(self): """test mapping to a selectable with deferred cols, the selectable doesn't include the deferred col. """ Order, orders = self.classes.Order, self.tables.orders order_select = sa.select([ orders.c.id, orders.c.user_id, orders.c.address_id, orders.c.description, orders.c.isopen]).alias() mapper(Order, order_select, properties={ 'description':deferred(order_select.c.description) }) sess = Session() o1 = sess.query(Order).order_by(Order.id).first() assert 'description' not in o1.__dict__ eq_(o1.description, 'order 1') def test_deep_options(self): users, items, order_items, Order, Item, User, orders = (self.tables.users, self.tables.items, self.tables.order_items, self.classes.Order, self.classes.Item, self.classes.User, self.tables.orders) mapper(Item, items, properties=dict( description=deferred(items.c.description))) mapper(Order, orders, properties=dict( items=relationship(Item, secondary=order_items))) mapper(User, users, properties=dict( orders=relationship(Order, order_by=orders.c.id))) sess = create_session() q = sess.query(User).order_by(User.id) l = q.all() item = l[0].orders[1].items[1] def go(): eq_(item.description, 'item 4') self.sql_count_(1, go) eq_(item.description, 'item 4') sess.expunge_all() l = q.options(sa.orm.undefer('orders.items.description')).all() item = l[0].orders[1].items[1] def go(): eq_(item.description, 'item 4') self.sql_count_(0, go) eq_(item.description, 'item 4') class SecondaryOptionsTest(fixtures.MappedTest): """test that the contains_eager() option doesn't bleed into a secondary load.""" run_inserts = 'once' run_deletes = None @classmethod def define_tables(cls, metadata): Table("base", metadata, Column('id', Integer, primary_key=True), Column('type', String(50), nullable=False) ) Table("child1", metadata, Column('id', Integer, ForeignKey('base.id'), primary_key=True), Column('child2id', Integer, ForeignKey('child2.id'), nullable=False) ) Table("child2", metadata, Column('id', Integer, ForeignKey('base.id'), primary_key=True), ) Table('related', metadata, Column('id', Integer, ForeignKey('base.id'), primary_key=True), ) @classmethod def setup_mappers(cls): child1, child2, base, related = (cls.tables.child1, cls.tables.child2, cls.tables.base, cls.tables.related) class Base(cls.Comparable): pass class Child1(Base): pass class Child2(Base): pass class Related(cls.Comparable): pass mapper(Base, base, polymorphic_on=base.c.type, properties={ 'related':relationship(Related, uselist=False) }) mapper(Child1, child1, inherits=Base, polymorphic_identity='child1', properties={ 'child2':relationship(Child2, primaryjoin=child1.c.child2id==base.c.id, foreign_keys=child1.c.child2id) }) mapper(Child2, child2, inherits=Base, polymorphic_identity='child2') mapper(Related, related) @classmethod def insert_data(cls): child1, child2, base, related = (cls.tables.child1, cls.tables.child2, cls.tables.base, cls.tables.related) base.insert().execute([ {'id':1, 'type':'child1'}, {'id':2, 'type':'child1'}, {'id':3, 'type':'child1'}, {'id':4, 'type':'child2'}, {'id':5, 'type':'child2'}, {'id':6, 'type':'child2'}, ]) child2.insert().execute([ {'id':4}, {'id':5}, {'id':6}, ]) child1.insert().execute([ {'id':1, 'child2id':4}, {'id':2, 'child2id':5}, {'id':3, 'child2id':6}, ]) related.insert().execute([ {'id':1}, {'id':2}, {'id':3}, {'id':4}, {'id':5}, {'id':6}, ]) def test_contains_eager(self): Child1, Related = self.classes.Child1, self.classes.Related sess = create_session() child1s = sess.query(Child1).\ join(Child1.related).\ options(sa.orm.contains_eager(Child1.related)).\ order_by(Child1.id) def go(): eq_( child1s.all(), [ Child1(id=1, related=Related(id=1)), Child1(id=2, related=Related(id=2)), Child1(id=3, related=Related(id=3)) ] ) self.assert_sql_count(testing.db, go, 1) c1 = child1s[0] self.assert_sql_execution( testing.db, lambda: c1.child2, CompiledSQL( "SELECT child2.id AS child2_id, base.id AS base_id, base.type AS base_type " "FROM base JOIN child2 ON base.id = child2.id " "WHERE base.id = :param_1", {'param_1':4} ) ) def test_joinedload_on_other(self): Child1, Related = self.classes.Child1, self.classes.Related sess = create_session() child1s = sess.query(Child1).join(Child1.related).options(sa.orm.joinedload(Child1.related)).order_by(Child1.id) def go(): eq_( child1s.all(), [Child1(id=1, related=Related(id=1)), Child1(id=2, related=Related(id=2)), Child1(id=3, related=Related(id=3))] ) self.assert_sql_count(testing.db, go, 1) c1 = child1s[0] self.assert_sql_execution( testing.db, lambda: c1.child2, CompiledSQL( "SELECT child2.id AS child2_id, base.id AS base_id, base.type AS base_type " "FROM base JOIN child2 ON base.id = child2.id WHERE base.id = :param_1", # joinedload- this shouldn't happen # "SELECT base.id AS base_id, child2.id AS child2_id, base.type AS base_type, " # "related_1.id AS related_1_id FROM base JOIN child2 ON base.id = child2.id " # "LEFT OUTER JOIN related AS related_1 ON base.id = related_1.id WHERE base.id = :param_1", {'param_1':4} ) ) def test_joinedload_on_same(self): Child1, Child2, Related = (self.classes.Child1, self.classes.Child2, self.classes.Related) sess = create_session() child1s = sess.query(Child1).join(Child1.related).options(sa.orm.joinedload(Child1.child2, Child2.related)).order_by(Child1.id) def go(): eq_( child1s.all(), [Child1(id=1, related=Related(id=1)), Child1(id=2, related=Related(id=2)), Child1(id=3, related=Related(id=3))] ) self.assert_sql_count(testing.db, go, 4) c1 = child1s[0] # this *does* joinedload self.assert_sql_execution( testing.db, lambda: c1.child2, CompiledSQL( "SELECT child2.id AS child2_id, base.id AS base_id, base.type AS base_type, " "related_1.id AS related_1_id FROM base JOIN child2 ON base.id = child2.id " "LEFT OUTER JOIN related AS related_1 ON base.id = related_1.id WHERE base.id = :param_1", {'param_1':4} ) ) class DeferredPopulationTest(fixtures.MappedTest): @classmethod def define_tables(cls, metadata): Table("thing", metadata, Column("id", Integer, primary_key=True, test_needs_autoincrement=True), Column("name", String(20))) Table("human", metadata, Column("id", Integer, primary_key=True, test_needs_autoincrement=True), Column("thing_id", Integer, ForeignKey("thing.id")), Column("name", String(20))) @classmethod def setup_mappers(cls): thing, human = cls.tables.thing, cls.tables.human class Human(cls.Basic): pass class Thing(cls.Basic): pass mapper(Human, human, properties={"thing": relationship(Thing)}) mapper(Thing, thing, properties={"name": deferred(thing.c.name)}) @classmethod def insert_data(cls): thing, human = cls.tables.thing, cls.tables.human thing.insert().execute([ {"id": 1, "name": "Chair"}, ]) human.insert().execute([ {"id": 1, "thing_id": 1, "name": "Clark Kent"}, ]) def _test(self, thing): assert "name" in attributes.instance_state(thing).dict def test_no_previous_query(self): Thing = self.classes.Thing session = create_session() thing = session.query(Thing).options(sa.orm.undefer("name")).first() self._test(thing) def test_query_twice_with_clear(self): Thing = self.classes.Thing session = create_session() result = session.query(Thing).first() session.expunge_all() thing = session.query(Thing).options(sa.orm.undefer("name")).first() self._test(thing) def test_query_twice_no_clear(self): Thing = self.classes.Thing session = create_session() result = session.query(Thing).first() thing = session.query(Thing).options(sa.orm.undefer("name")).first() self._test(thing) def test_joinedload_with_clear(self): Thing, Human = self.classes.Thing, self.classes.Human session = create_session() human = session.query(Human).options(sa.orm.joinedload("thing")).first() session.expunge_all() thing = session.query(Thing).options(sa.orm.undefer("name")).first() self._test(thing) def test_joinedload_no_clear(self): Thing, Human = self.classes.Thing, self.classes.Human session = create_session() human = session.query(Human).options(sa.orm.joinedload("thing")).first() thing = session.query(Thing).options(sa.orm.undefer("name")).first() self._test(thing) def test_join_with_clear(self): Thing, Human = self.classes.Thing, self.classes.Human session = create_session() result = session.query(Human).add_entity(Thing).join("thing").first() session.expunge_all() thing = session.query(Thing).options(sa.orm.undefer("name")).first() self._test(thing) def test_join_no_clear(self): Thing, Human = self.classes.Thing, self.classes.Human session = create_session() result = session.query(Human).add_entity(Thing).join("thing").first() thing = session.query(Thing).options(sa.orm.undefer("name")).first() self._test(thing) class NoLoadTest(_fixtures.FixtureTest): run_inserts = 'once' run_deletes = None def test_basic(self): """A basic one-to-many lazy load""" Address, addresses, users, User = (self.classes.Address, self.tables.addresses, self.tables.users, self.classes.User) m = mapper(User, users, properties=dict( addresses = relationship(mapper(Address, addresses), lazy='noload') )) q = create_session().query(m) l = [None] def go(): x = q.filter(User.id == 7).all() x[0].addresses l[0] = x self.assert_sql_count(testing.db, go, 1) self.assert_result(l[0], User, {'id' : 7, 'addresses' : (Address, [])}, ) def test_options(self): Address, addresses, users, User = (self.classes.Address, self.tables.addresses, self.tables.users, self.classes.User) m = mapper(User, users, properties=dict( addresses = relationship(mapper(Address, addresses), lazy='noload') )) q = create_session().query(m).options(sa.orm.lazyload('addresses')) l = [None] def go(): x = q.filter(User.id == 7).all() x[0].addresses l[0] = x self.sql_count_(2, go) self.assert_result(l[0], User, {'id' : 7, 'addresses' : (Address, [{'id' : 1}])}, ) class RequirementsTest(fixtures.MappedTest): """Tests the contract for user classes.""" @classmethod def define_tables(cls, metadata): Table('ht1', metadata, Column('id', Integer, primary_key=True, test_needs_autoincrement=True), Column('value', String(10))) Table('ht2', metadata, Column('id', Integer, primary_key=True, test_needs_autoincrement=True), Column('ht1_id', Integer, ForeignKey('ht1.id')), Column('value', String(10))) Table('ht3', metadata, Column('id', Integer, primary_key=True, test_needs_autoincrement=True), Column('value', String(10))) Table('ht4', metadata, Column('ht1_id', Integer, ForeignKey('ht1.id'), primary_key=True), Column('ht3_id', Integer, ForeignKey('ht3.id'), primary_key=True)) Table('ht5', metadata, Column('ht1_id', Integer, ForeignKey('ht1.id'), primary_key=True)) Table('ht6', metadata, Column('ht1a_id', Integer, ForeignKey('ht1.id'), primary_key=True), Column('ht1b_id', Integer, ForeignKey('ht1.id'), primary_key=True), Column('value', String(10))) # Py2K def test_baseclass(self): ht1 = self.tables.ht1 class OldStyle: pass assert_raises(sa.exc.ArgumentError, mapper, OldStyle, ht1) assert_raises(sa.exc.ArgumentError, mapper, 123) class NoWeakrefSupport(str): pass # TODO: is weakref support detectable without an instance? #self.assertRaises(sa.exc.ArgumentError, mapper, NoWeakrefSupport, t2) # end Py2K class _ValueBase(object): def __init__(self, value='abc', id=None): self.id = id self.value = value def __nonzero__(self): return False def __hash__(self): return hash(self.value) def __eq__(self, other): if isinstance(other, type(self)): return self.value == other.value return False def test_comparison_overrides(self): """Simple tests to ensure users can supply comparison __methods__. The suite-level test --options are better suited to detect problems- they add selected __methods__ across the board on all ORM tests. This test simply shoves a variety of operations through the ORM to catch basic regressions early in a standard test run. """ ht6, ht5, ht4, ht3, ht2, ht1 = (self.tables.ht6, self.tables.ht5, self.tables.ht4, self.tables.ht3, self.tables.ht2, self.tables.ht1) class H1(self._ValueBase): pass class H2(self._ValueBase): pass class H3(self._ValueBase): pass class H6(self._ValueBase): pass mapper(H1, ht1, properties={ 'h2s': relationship(H2, backref='h1'), 'h3s': relationship(H3, secondary=ht4, backref='h1s'), 'h1s': relationship(H1, secondary=ht5, backref='parent_h1'), 't6a': relationship(H6, backref='h1a', primaryjoin=ht1.c.id==ht6.c.ht1a_id), 't6b': relationship(H6, backref='h1b', primaryjoin=ht1.c.id==ht6.c.ht1b_id), }) mapper(H2, ht2) mapper(H3, ht3) mapper(H6, ht6) s = create_session() s.add_all([ H1('abc'), H1('def'), ]) h1 = H1('ghi') s.add(h1) h1.h2s.append(H2('abc')) h1.h3s.extend([H3(), H3()]) h1.h1s.append(H1()) s.flush() eq_(ht1.count().scalar(), 4) h6 = H6() h6.h1a = h1 h6.h1b = h1 h6 = H6() h6.h1a = h1 h6.h1b = x = H1() assert x in s h6.h1b.h2s.append(H2('def')) s.flush() h1.h2s.extend([H2('abc'), H2('def')]) s.flush() h1s = s.query(H1).options(sa.orm.joinedload('h2s')).all() eq_(len(h1s), 5) self.assert_unordered_result(h1s, H1, {'h2s': []}, {'h2s': []}, {'h2s': (H2, [{'value': 'abc'}, {'value': 'def'}, {'value': 'abc'}])}, {'h2s': []}, {'h2s': (H2, [{'value': 'def'}])}) h1s = s.query(H1).options(sa.orm.joinedload('h3s')).all() eq_(len(h1s), 5) h1s = s.query(H1).options(sa.orm.joinedload_all('t6a.h1b'), sa.orm.joinedload('h2s'), sa.orm.joinedload_all('h3s.h1s')).all() eq_(len(h1s), 5) def test_composite_results(self): ht2, ht1 = (self.tables.ht2, self.tables.ht1) class H1(self._ValueBase): def __init__(self, value, id, h2s): self.value = value self.id = id self.h2s = h2s class H2(self._ValueBase): def __init__(self, value, id): self.value = value self.id = id mapper(H1, ht1, properties={ 'h2s': relationship(H2, backref='h1'), }) mapper(H2, ht2) s = Session() s.add_all([ H1('abc', 1, h2s=[ H2('abc', id=1), H2('def', id=2), H2('def', id=3), ]), H1('def', 2, h2s=[ H2('abc', id=4), H2('abc', id=5), H2('def', id=6), ]), ]) s.commit() eq_( [(h1.value, h1.id, h2.value, h2.id) for h1, h2 in s.query(H1, H2).join(H1.h2s).order_by(H1.id, H2.id)], [ ('abc', 1, 'abc', 1), ('abc', 1, 'def', 2), ('abc', 1, 'def', 3), ('def', 2, 'abc', 4), ('def', 2, 'abc', 5), ('def', 2, 'def', 6), ] ) def test_nonzero_len_recursion(self): ht1 = self.tables.ht1 class H1(object): def __len__(self): return len(self.get_value()) def get_value(self): self.value = "foobar" return self.value class H2(object): def __nonzero__(self): return bool(self.get_value()) def get_value(self): self.value = "foobar" return self.value mapper(H1, ht1) mapper(H2, ht1) h1 = H1() h1.value = "Asdf" h1.value = "asdf asdf" # ding h2 = H2() h2.value = "Asdf" h2.value = "asdf asdf" # ding class IsUserlandTest(fixtures.MappedTest): @classmethod def define_tables(cls, metadata): Table('foo', metadata, Column('id', Integer, primary_key=True), Column('someprop', Integer) ) def _test(self, value, instancelevel=None): class Foo(object): someprop = value m = mapper(Foo, self.tables.foo) eq_(Foo.someprop, value) f1 = Foo() if instancelevel is not None: eq_(f1.someprop, instancelevel) else: eq_(f1.someprop, value) assert self.tables.foo.c.someprop not in m._columntoproperty def _test_not(self, value): class Foo(object): someprop = value m = mapper(Foo, self.tables.foo) is_(Foo.someprop.property.columns[0], self.tables.foo.c.someprop) assert self.tables.foo.c.someprop in m._columntoproperty def test_string(self): self._test("someprop") def test_unicode(self): self._test(u"someprop") def test_int(self): self._test(5) def test_dict(self): self._test({"bar": "bat"}) def test_set(self): self._test(set([6])) def test_column(self): self._test_not(self.tables.foo.c.someprop) def test_relationship(self): self._test_not(relationship("bar")) def test_descriptor(self): def somefunc(self): return "hi" self._test(property(somefunc), "hi") class MagicNamesTest(fixtures.MappedTest): @classmethod def define_tables(cls, metadata): Table('cartographers', metadata, Column('id', Integer, primary_key=True, test_needs_autoincrement=True), Column('name', String(50)), Column('alias', String(50)), Column('quip', String(100))) Table('maps', metadata, Column('id', Integer, primary_key=True, test_needs_autoincrement=True), Column('cart_id', Integer, ForeignKey('cartographers.id')), Column('state', String(2)), Column('data', sa.Text)) @classmethod def setup_classes(cls): class Cartographer(cls.Basic): pass class Map(cls.Basic): pass def test_mappish(self): maps, Cartographer, cartographers, Map = (self.tables.maps, self.classes.Cartographer, self.tables.cartographers, self.classes.Map) mapper(Cartographer, cartographers, properties=dict( query=cartographers.c.quip)) mapper(Map, maps, properties=dict( mapper=relationship(Cartographer, backref='maps'))) c = Cartographer(name='Lenny', alias='The Dude', query='Where be dragons?') m = Map(state='AK', mapper=c) sess = create_session() sess.add(c) sess.flush() sess.expunge_all() for C, M in ((Cartographer, Map), (sa.orm.aliased(Cartographer), sa.orm.aliased(Map))): c1 = (sess.query(C). filter(C.alias=='The Dude'). filter(C.query=='Where be dragons?')).one() m1 = sess.query(M).filter(M.mapper==c1).one() def test_direct_stateish(self): for reserved in (sa.orm.instrumentation.ClassManager.STATE_ATTR, sa.orm.instrumentation.ClassManager.MANAGER_ATTR): t = Table('t', sa.MetaData(), Column('id', Integer, primary_key=True, test_needs_autoincrement=True), Column(reserved, Integer)) class T(object): pass assert_raises_message( KeyError, ('%r: requested attribute name conflicts with ' 'instrumentation attribute of the same name.' % reserved), mapper, T, t) def test_indirect_stateish(self): maps = self.tables.maps for reserved in (sa.orm.instrumentation.ClassManager.STATE_ATTR, sa.orm.instrumentation.ClassManager.MANAGER_ATTR): class M(object): pass assert_raises_message( KeyError, ('requested attribute name conflicts with ' 'instrumentation attribute of the same name'), mapper, M, maps, properties={ reserved: maps.c.state}) SQLAlchemy-0.8.4/test/orm/test_merge.py0000644000076500000240000013122612251150016020513 0ustar classicstaff00000000000000from sqlalchemy.testing import assert_raises, assert_raises_message import sqlalchemy as sa from sqlalchemy import Integer, PickleType, String, ForeignKey import operator from sqlalchemy import testing from sqlalchemy.util import OrderedSet from sqlalchemy.orm import mapper, relationship, create_session, \ PropComparator, synonym, comparable_property, sessionmaker, \ attributes, Session, backref, configure_mappers from sqlalchemy.orm.collections import attribute_mapped_collection from sqlalchemy.orm.interfaces import MapperOption from sqlalchemy.testing import eq_, ne_ from sqlalchemy.testing import fixtures from test.orm import _fixtures from sqlalchemy import event, and_, case from sqlalchemy.testing.schema import Table, Column class MergeTest(_fixtures.FixtureTest): """Session.merge() functionality""" run_inserts = None def load_tracker(self, cls, canary=None): if canary is None: def canary(instance, *args): canary.called += 1 canary.called = 0 event.listen(cls, 'load', canary) return canary def test_transient_to_pending(self): User, users = self.classes.User, self.tables.users mapper(User, users) sess = create_session() load = self.load_tracker(User) u = User(id=7, name='fred') eq_(load.called, 0) u2 = sess.merge(u) eq_(load.called, 1) assert u2 in sess eq_(u2, User(id=7, name='fred')) sess.flush() sess.expunge_all() eq_(sess.query(User).first(), User(id=7, name='fred')) def test_transient_to_pending_no_pk(self): """test that a transient object with no PK attribute doesn't trigger a needless load.""" User, users = self.classes.User, self.tables.users mapper(User, users) sess = create_session() u = User(name='fred') def go(): sess.merge(u) self.assert_sql_count(testing.db, go, 0) def test_transient_to_pending_collection(self): User, Address, addresses, users = (self.classes.User, self.classes.Address, self.tables.addresses, self.tables.users) mapper(User, users, properties={ 'addresses': relationship(Address, backref='user', collection_class=OrderedSet)}) mapper(Address, addresses) load = self.load_tracker(User) self.load_tracker(Address, load) u = User(id=7, name='fred', addresses=OrderedSet([ Address(id=1, email_address='fred1'), Address(id=2, email_address='fred2'), ])) eq_(load.called, 0) sess = create_session() sess.merge(u) eq_(load.called, 3) merged_users = [e for e in sess if isinstance(e, User)] eq_(len(merged_users), 1) assert merged_users[0] is not u sess.flush() sess.expunge_all() eq_(sess.query(User).one(), User(id=7, name='fred', addresses=OrderedSet([ Address(id=1, email_address='fred1'), Address(id=2, email_address='fred2'), ])) ) def test_transient_to_persistent(self): User, users = self.classes.User, self.tables.users mapper(User, users) load = self.load_tracker(User) sess = create_session() u = User(id=7, name='fred') sess.add(u) sess.flush() sess.expunge_all() eq_(load.called, 0) _u2 = u2 = User(id=7, name='fred jones') eq_(load.called, 0) u2 = sess.merge(u2) assert u2 is not _u2 eq_(load.called, 1) sess.flush() sess.expunge_all() eq_(sess.query(User).first(), User(id=7, name='fred jones')) eq_(load.called, 2) def test_transient_to_persistent_collection(self): User, Address, addresses, users = (self.classes.User, self.classes.Address, self.tables.addresses, self.tables.users) mapper(User, users, properties={ 'addresses':relationship(Address, backref='user', collection_class=OrderedSet, order_by=addresses.c.id, cascade="all, delete-orphan") }) mapper(Address, addresses) load = self.load_tracker(User) self.load_tracker(Address, load) u = User(id=7, name='fred', addresses=OrderedSet([ Address(id=1, email_address='fred1'), Address(id=2, email_address='fred2'), ])) sess = create_session() sess.add(u) sess.flush() sess.expunge_all() eq_(load.called, 0) u = User(id=7, name='fred', addresses=OrderedSet([ Address(id=3, email_address='fred3'), Address(id=4, email_address='fred4'), ])) u = sess.merge(u) # 1. merges User object. updates into session. # 2.,3. merges Address ids 3 & 4, saves into session. # 4.,5. loads pre-existing elements in "addresses" collection, # marks as deleted, Address ids 1 and 2. eq_(load.called, 5) eq_(u, User(id=7, name='fred', addresses=OrderedSet([ Address(id=3, email_address='fred3'), Address(id=4, email_address='fred4'), ])) ) sess.flush() sess.expunge_all() eq_(sess.query(User).one(), User(id=7, name='fred', addresses=OrderedSet([ Address(id=3, email_address='fred3'), Address(id=4, email_address='fred4'), ])) ) def test_detached_to_persistent_collection(self): users, Address, addresses, User = (self.tables.users, self.classes.Address, self.tables.addresses, self.classes.User) mapper(User, users, properties={ 'addresses':relationship(Address, backref='user', order_by=addresses.c.id, collection_class=OrderedSet)}) mapper(Address, addresses) load = self.load_tracker(User) self.load_tracker(Address, load) a = Address(id=1, email_address='fred1') u = User(id=7, name='fred', addresses=OrderedSet([ a, Address(id=2, email_address='fred2'), ])) sess = create_session() sess.add(u) sess.flush() sess.expunge_all() u.name='fred jones' u.addresses.add(Address(id=3, email_address='fred3')) u.addresses.remove(a) eq_(load.called, 0) u = sess.merge(u) eq_(load.called, 4) sess.flush() sess.expunge_all() eq_(sess.query(User).first(), User(id=7, name='fred jones', addresses=OrderedSet([ Address(id=2, email_address='fred2'), Address(id=3, email_address='fred3')]))) def test_unsaved_cascade(self): """Merge of a transient entity with two child transient entities, with a bidirectional relationship.""" users, Address, addresses, User = (self.tables.users, self.classes.Address, self.tables.addresses, self.classes.User) mapper(User, users, properties={ 'addresses':relationship(mapper(Address, addresses), cascade="all", backref="user") }) load = self.load_tracker(User) self.load_tracker(Address, load) sess = create_session() u = User(id=7, name='fred') a1 = Address(email_address='foo@bar.com') a2 = Address(email_address='hoho@bar.com') u.addresses.append(a1) u.addresses.append(a2) u2 = sess.merge(u) eq_(load.called, 3) eq_(u, User(id=7, name='fred', addresses=[ Address(email_address='foo@bar.com'), Address(email_address='hoho@bar.com')])) eq_(u2, User(id=7, name='fred', addresses=[ Address(email_address='foo@bar.com'), Address(email_address='hoho@bar.com')])) sess.flush() sess.expunge_all() u2 = sess.query(User).get(7) eq_(u2, User(id=7, name='fred', addresses=[ Address(email_address='foo@bar.com'), Address(email_address='hoho@bar.com')])) eq_(load.called, 6) def test_merge_empty_attributes(self): User, dingalings = self.classes.User, self.tables.dingalings mapper(User, dingalings) sess = create_session() # merge empty stuff. goes in as NULL. # not sure what this was originally trying to # test. u1 = sess.merge(User(id=1)) sess.flush() assert u1.data is None # save another user with "data" u2 = User(id=2, data="foo") sess.add(u2) sess.flush() # merge User on u2's pk with # no "data". # value isn't whacked from the destination # dict. u3 = sess.merge(User(id=2)) eq_(u3.__dict__['data'], "foo") # make a change. u3.data = 'bar' # merge another no-"data" user. # attribute maintains modified state. # (usually autoflush would have happened # here anyway). u4 = sess.merge(User(id=2)) eq_(u3.__dict__['data'], "bar") sess.flush() # and after the flush. eq_(u3.data, "bar") # new row. u5 = User(id=3, data="foo") sess.add(u5) sess.flush() # blow it away from u5, but don't # mark as expired. so it would just # be blank. del u5.data # the merge adds expiry to the # attribute so that it loads. # not sure if I like this - it currently is needed # for test_pickled:PickleTest.test_instance_deferred_cols u6 = sess.merge(User(id=3)) assert 'data' not in u6.__dict__ assert u6.data == "foo" # set it to None. this is actually # a change so gets preserved. u6.data = None u7 = sess.merge(User(id=3)) assert u6.__dict__['data'] is None def test_merge_irregular_collection(self): users, Address, addresses, User = (self.tables.users, self.classes.Address, self.tables.addresses, self.classes.User) mapper(User, users, properties={ 'addresses': relationship( mapper(Address, addresses), backref='user', collection_class= attribute_mapped_collection('email_address')), }) u1 = User(id=7, name='fred') u1.addresses['foo@bar.com'] = Address(email_address='foo@bar.com') sess = create_session() sess.merge(u1) sess.flush() assert u1.addresses.keys() == ['foo@bar.com'] def test_attribute_cascade(self): """Merge of a persistent entity with two child persistent entities.""" users, Address, addresses, User = (self.tables.users, self.classes.Address, self.tables.addresses, self.classes.User) mapper(User, users, properties={ 'addresses':relationship(mapper(Address, addresses), backref='user') }) load = self.load_tracker(User) self.load_tracker(Address, load) sess = create_session() # set up data and save u = User(id=7, name='fred', addresses=[ Address(email_address='foo@bar.com'), Address(email_address = 'hoho@la.com')]) sess.add(u) sess.flush() # assert data was saved sess2 = create_session() u2 = sess2.query(User).get(7) eq_(u2, User(id=7, name='fred', addresses=[ Address(email_address='foo@bar.com'), Address(email_address='hoho@la.com')])) # make local changes to data u.name = 'fred2' u.addresses[1].email_address = 'hoho@lalala.com' eq_(load.called, 3) # new session, merge modified data into session sess3 = create_session() u3 = sess3.merge(u) eq_(load.called, 6) # ensure local changes are pending eq_(u3, User(id=7, name='fred2', addresses=[ Address(email_address='foo@bar.com'), Address(email_address='hoho@lalala.com')])) # save merged data sess3.flush() # assert modified/merged data was saved sess.expunge_all() u = sess.query(User).get(7) eq_(u, User(id=7, name='fred2', addresses=[ Address(email_address='foo@bar.com'), Address(email_address='hoho@lalala.com')])) eq_(load.called, 9) # merge persistent object into another session sess4 = create_session() u = sess4.merge(u) assert len(u.addresses) for a in u.addresses: assert a.user is u def go(): sess4.flush() # no changes; therefore flush should do nothing self.assert_sql_count(testing.db, go, 0) eq_(load.called, 12) # test with "dontload" merge sess5 = create_session() u = sess5.merge(u, load=False) assert len(u.addresses) for a in u.addresses: assert a.user is u def go(): sess5.flush() # no changes; therefore flush should do nothing # but also, load=False wipes out any difference in committed state, # so no flush at all self.assert_sql_count(testing.db, go, 0) eq_(load.called, 15) sess4 = create_session() u = sess4.merge(u, load=False) # post merge change u.addresses[1].email_address='afafds' def go(): sess4.flush() # afafds change flushes self.assert_sql_count(testing.db, go, 1) eq_(load.called, 18) sess5 = create_session() u2 = sess5.query(User).get(u.id) eq_(u2.name, 'fred2') eq_(u2.addresses[1].email_address, 'afafds') eq_(load.called, 21) def test_no_relationship_cascade(self): """test that merge doesn't interfere with a relationship() target that specifically doesn't include 'merge' cascade. """ Address, addresses, users, User = (self.classes.Address, self.tables.addresses, self.tables.users, self.classes.User) mapper(Address, addresses, properties={ 'user':relationship(User, cascade="save-update") }) mapper(User, users) sess = create_session() u1 = User(name="fred") a1 = Address(email_address="asdf", user=u1) sess.add(a1) sess.flush() a2 = Address(id=a1.id, email_address="bar", user=User(name="hoho")) a2 = sess.merge(a2) sess.flush() # no expire of the attribute assert a2.__dict__['user'] is u1 # merge succeeded eq_( sess.query(Address).all(), [Address(id=a1.id, email_address="bar")] ) # didn't touch user eq_( sess.query(User).all(), [User(name="fred")] ) def test_one_to_many_cascade(self): users, Address, addresses, User = (self.tables.users, self.classes.Address, self.tables.addresses, self.classes.User) mapper(User, users, properties={ 'addresses':relationship(mapper(Address, addresses))}) load = self.load_tracker(User) self.load_tracker(Address, load) sess = create_session() u = User(name='fred') a1 = Address(email_address='foo@bar') a2 = Address(email_address='foo@quux') u.addresses.extend([a1, a2]) sess.add(u) sess.flush() eq_(load.called, 0) sess2 = create_session() u2 = sess2.query(User).get(u.id) eq_(load.called, 1) u.addresses[1].email_address = 'addr 2 modified' sess2.merge(u) eq_(u2.addresses[1].email_address, 'addr 2 modified') eq_(load.called, 3) sess3 = create_session() u3 = sess3.query(User).get(u.id) eq_(load.called, 4) u.name = 'also fred' sess3.merge(u) eq_(load.called, 6) eq_(u3.name, 'also fred') def test_many_to_one_cascade(self): Address, addresses, users, User = (self.classes.Address, self.tables.addresses, self.tables.users, self.classes.User) mapper(Address, addresses, properties={ 'user':relationship(User) }) mapper(User, users) u1 = User(id=1, name="u1") a1 =Address(id=1, email_address="a1", user=u1) u2 = User(id=2, name="u2") sess = create_session() sess.add_all([a1, u2]) sess.flush() a1.user = u2 sess2 = create_session() a2 = sess2.merge(a1) eq_( attributes.get_history(a2, 'user'), ([u2], (), ()) ) assert a2 in sess2.dirty sess.refresh(a1) sess2 = create_session() a2 = sess2.merge(a1, load=False) eq_( attributes.get_history(a2, 'user'), ((), [u1], ()) ) assert a2 not in sess2.dirty def test_many_to_many_cascade(self): items, Order, orders, order_items, Item = (self.tables.items, self.classes.Order, self.tables.orders, self.tables.order_items, self.classes.Item) mapper(Order, orders, properties={ 'items':relationship(mapper(Item, items), secondary=order_items)}) load = self.load_tracker(Order) self.load_tracker(Item, load) sess = create_session() i1 = Item() i1.description='item 1' i2 = Item() i2.description = 'item 2' o = Order() o.description = 'order description' o.items.append(i1) o.items.append(i2) sess.add(o) sess.flush() eq_(load.called, 0) sess2 = create_session() o2 = sess2.query(Order).get(o.id) eq_(load.called, 1) o.items[1].description = 'item 2 modified' sess2.merge(o) eq_(o2.items[1].description, 'item 2 modified') eq_(load.called, 3) sess3 = create_session() o3 = sess3.query(Order).get(o.id) eq_( load.called, 4) o.description = 'desc modified' sess3.merge(o) eq_(load.called, 6) eq_(o3.description, 'desc modified') def test_one_to_one_cascade(self): users, Address, addresses, User = (self.tables.users, self.classes.Address, self.tables.addresses, self.classes.User) mapper(User, users, properties={ 'address':relationship(mapper(Address, addresses), uselist = False) }) load = self.load_tracker(User) self.load_tracker(Address, load) sess = create_session() u = User() u.id = 7 u.name = "fred" a1 = Address() a1.email_address='foo@bar.com' u.address = a1 sess.add(u) sess.flush() eq_(load.called, 0) sess2 = create_session() u2 = sess2.query(User).get(7) eq_(load.called, 1) u2.name = 'fred2' u2.address.email_address = 'hoho@lalala.com' eq_(load.called, 2) u3 = sess.merge(u2) eq_(load.called, 2) assert u3 is u def test_value_to_none(self): users, Address, addresses, User = (self.tables.users, self.classes.Address, self.tables.addresses, self.classes.User) mapper(User, users, properties={ 'address':relationship(mapper(Address, addresses), uselist = False, backref='user') }) sess = sessionmaker()() u = User(id=7, name="fred", address=Address(id=1, email_address='foo@bar.com')) sess.add(u) sess.commit() sess.close() u2 = User(id=7, name=None, address=None) u3 = sess.merge(u2) assert u3.name is None assert u3.address is None sess.close() a1 = Address(id=1, user=None) a2 = sess.merge(a1) assert a2.user is None def test_transient_no_load(self): users, User = self.tables.users, self.classes.User mapper(User, users) sess = create_session() u = User() assert_raises_message(sa.exc.InvalidRequestError, "load=False option does not support", sess.merge, u, load=False) def test_no_load_with_backrefs(self): """load=False populates relationships in both directions without requiring a load""" users, Address, addresses, User = (self.tables.users, self.classes.Address, self.tables.addresses, self.classes.User) mapper(User, users, properties={ 'addresses':relationship(mapper(Address, addresses), backref='user') }) u = User(id=7, name='fred', addresses=[ Address(email_address='ad1'), Address(email_address='ad2')]) sess = create_session() sess.add(u) sess.flush() sess.close() assert 'user' in u.addresses[1].__dict__ sess = create_session() u2 = sess.merge(u, load=False) assert 'user' in u2.addresses[1].__dict__ eq_(u2.addresses[1].user, User(id=7, name='fred')) sess.expire(u2.addresses[1], ['user']) assert 'user' not in u2.addresses[1].__dict__ sess.close() sess = create_session() u = sess.merge(u2, load=False) assert 'user' not in u.addresses[1].__dict__ eq_(u.addresses[1].user, User(id=7, name='fred')) def test_dontload_with_eager(self): """ This test illustrates that with load=False, we can't just copy the committed_state of the merged instance over; since it references collection objects which themselves are to be merged. This committed_state would instead need to be piecemeal 'converted' to represent the correct objects. However, at the moment I'd rather not support this use case; if you are merging with load=False, you're typically dealing with caching and the merged objects shouldnt be 'dirty'. """ users, Address, addresses, User = (self.tables.users, self.classes.Address, self.tables.addresses, self.classes.User) mapper(User, users, properties={ 'addresses':relationship(mapper(Address, addresses)) }) sess = create_session() u = User() u.id = 7 u.name = "fred" a1 = Address() a1.email_address='foo@bar.com' u.addresses.append(a1) sess.add(u) sess.flush() sess2 = create_session() u2 = sess2.query(User).\ options(sa.orm.joinedload('addresses')).get(7) sess3 = create_session() u3 = sess3.merge(u2, load=False) def go(): sess3.flush() self.assert_sql_count(testing.db, go, 0) def test_no_load_disallows_dirty(self): """load=False doesnt support 'dirty' objects right now (see test_no_load_with_eager()). Therefore lets assert it. """ users, User = self.tables.users, self.classes.User mapper(User, users) sess = create_session() u = User() u.id = 7 u.name = "fred" sess.add(u) sess.flush() u.name = 'ed' sess2 = create_session() try: sess2.merge(u, load=False) assert False except sa.exc.InvalidRequestError, e: assert "merge() with load=False option does not support "\ "objects marked as 'dirty'. flush() all changes on "\ "mapped instances before merging with load=False." \ in str(e) u2 = sess2.query(User).get(7) sess3 = create_session() u3 = sess3.merge(u2, load=False) assert not sess3.dirty def go(): sess3.flush() self.assert_sql_count(testing.db, go, 0) def test_no_load_sets_backrefs(self): users, Address, addresses, User = (self.tables.users, self.classes.Address, self.tables.addresses, self.classes.User) mapper(User, users, properties={ 'addresses':relationship(mapper(Address, addresses), backref='user')}) sess = create_session() u = User() u.id = 7 u.name = "fred" a1 = Address() a1.email_address='foo@bar.com' u.addresses.append(a1) sess.add(u) sess.flush() assert u.addresses[0].user is u sess2 = create_session() u2 = sess2.merge(u, load=False) assert not sess2.dirty def go(): assert u2.addresses[0].user is u2 self.assert_sql_count(testing.db, go, 0) def test_no_load_preserves_parents(self): """Merge with load=False does not trigger a 'delete-orphan' operation. merge with load=False sets attributes without using events. this means the 'hasparent' flag is not propagated to the newly merged instance. in fact this works out OK, because the '_state.parents' collection on the newly merged instance is empty; since the mapper doesn't see an active 'False' setting in this collection when _is_orphan() is called, it does not count as an orphan (i.e. this is the 'optimistic' logic in mapper._is_orphan().) """ users, Address, addresses, User = (self.tables.users, self.classes.Address, self.tables.addresses, self.classes.User) mapper(User, users, properties={ 'addresses':relationship(mapper(Address, addresses), backref='user', cascade="all, delete-orphan")}) sess = create_session() u = User() u.id = 7 u.name = "fred" a1 = Address() a1.email_address='foo@bar.com' u.addresses.append(a1) sess.add(u) sess.flush() assert u.addresses[0].user is u sess2 = create_session() u2 = sess2.merge(u, load=False) assert not sess2.dirty a2 = u2.addresses[0] a2.email_address='somenewaddress' assert not sa.orm.object_mapper(a2)._is_orphan( sa.orm.attributes.instance_state(a2)) sess2.flush() sess2.expunge_all() eq_(sess2.query(User).get(u2.id).addresses[0].email_address, 'somenewaddress') # this use case is not supported; this is with a pending Address # on the pre-merged object, and we currently dont support # 'dirty' objects being merged with load=False. in this case, # the empty '_state.parents' collection would be an issue, since # the optimistic flag is False in _is_orphan() for pending # instances. so if we start supporting 'dirty' with load=False, # this test will need to pass sess = create_session() u = sess.query(User).get(7) u.addresses.append(Address()) sess2 = create_session() try: u2 = sess2.merge(u, load=False) assert False # if load=False is changed to support dirty objects, this code # needs to pass a2 = u2.addresses[0] a2.email_address='somenewaddress' assert not sa.orm.object_mapper(a2)._is_orphan( sa.orm.attributes.instance_state(a2)) sess2.flush() sess2.expunge_all() eq_(sess2.query(User).get(u2.id).addresses[0].email_address, 'somenewaddress') except sa.exc.InvalidRequestError, e: assert "load=False option does not support" in str(e) def test_synonym_comparable(self): users = self.tables.users class User(object): class Comparator(PropComparator): pass def _getValue(self): return self._value def _setValue(self, value): setattr(self, '_value', value) value = property(_getValue, _setValue) mapper(User, users, properties={ 'uid':synonym('id'), 'foobar':comparable_property(User.Comparator,User.value), }) sess = create_session() u = User() u.name = 'ed' sess.add(u) sess.flush() sess.expunge(u) sess.merge(u) def test_cascade_doesnt_blowaway_manytoone(self): """a merge test that was fixed by [ticket:1202]""" User, Address, addresses, users = (self.classes.User, self.classes.Address, self.tables.addresses, self.tables.users) s = create_session(autoflush=True, autocommit=False) mapper(User, users, properties={ 'addresses':relationship(mapper(Address, addresses), backref='user')}) a1 = Address(user=s.merge(User(id=1, name='ed')), email_address='x') before_id = id(a1.user) a2 = Address(user=s.merge(User(id=1, name='jack')), email_address='x') after_id = id(a1.user) other_id = id(a2.user) eq_(before_id, other_id) eq_(after_id, other_id) eq_(before_id, after_id) eq_(a1.user, a2.user) def test_cascades_dont_autoflush(self): User, Address, addresses, users = (self.classes.User, self.classes.Address, self.tables.addresses, self.tables.users) sess = create_session(autoflush=True, autocommit=False) m = mapper(User, users, properties={ 'addresses':relationship(mapper(Address, addresses), backref='user')}) user = User(id=8, name='fred', addresses=[Address(email_address='user')]) merged_user = sess.merge(user) assert merged_user in sess.new sess.flush() assert merged_user not in sess.new def test_cascades_dont_autoflush_2(self): users, Address, addresses, User = (self.tables.users, self.classes.Address, self.tables.addresses, self.classes.User) mapper(User, users, properties={ 'addresses':relationship(Address, backref='user', cascade="all, delete-orphan") }) mapper(Address, addresses) u = User(id=7, name='fred', addresses=[ Address(id=1, email_address='fred1'), ]) sess = create_session(autoflush=True, autocommit=False) sess.add(u) sess.commit() sess.expunge_all() u = User(id=7, name='fred', addresses=[ Address(id=1, email_address='fred1'), Address(id=2, email_address='fred2'), ]) sess.merge(u) assert sess.autoflush sess.commit() def test_dont_expire_pending(self): """test that pending instances aren't expired during a merge.""" users, User = self.tables.users, self.classes.User mapper(User, users) u = User(id=7) sess = create_session(autoflush=True, autocommit=False) u = sess.merge(u) assert not bool(attributes.instance_state(u).expired_attributes) def go(): eq_(u.name, None) self.assert_sql_count(testing.db, go, 0) def test_option_state(self): """test that the merged takes on the MapperOption characteristics of that which is merged. """ users, User = self.tables.users, self.classes.User class Option(MapperOption): propagate_to_loaders = True opt1, opt2 = Option(), Option() sess = sessionmaker()() umapper = mapper(User, users) sess.add_all([ User(id=1, name='u1'), User(id=2, name='u2'), ]) sess.commit() sess2 = sessionmaker()() s2_users = sess2.query(User).options(opt2).all() # test 1. no options are replaced by merge options sess = sessionmaker()() s1_users = sess.query(User).all() for u in s1_users: ustate = attributes.instance_state(u) eq_(ustate.load_path, ()) eq_(ustate.load_options, set()) for u in s2_users: sess.merge(u) for u in s1_users: ustate = attributes.instance_state(u) eq_(ustate.load_path.path, (umapper, )) eq_(ustate.load_options, set([opt2])) # test 2. present options are replaced by merge options sess = sessionmaker()() s1_users = sess.query(User).options(opt1).all() for u in s1_users: ustate = attributes.instance_state(u) eq_(ustate.load_path.path, (umapper, )) eq_(ustate.load_options, set([opt1])) for u in s2_users: sess.merge(u) for u in s1_users: ustate = attributes.instance_state(u) eq_(ustate.load_path.path, (umapper, )) eq_(ustate.load_options, set([opt2])) class M2ONoUseGetLoadingTest(fixtures.MappedTest): """Merge a one-to-many. The many-to-one on the other side is set up so that use_get is False. See if skipping the "m2o" merge vs. doing it saves on SQL calls. """ @classmethod def define_tables(cls, metadata): Table('user', metadata, Column('id', Integer, primary_key=True, test_needs_autoincrement=True), Column('name', String(50)), ) Table('address', metadata, Column('id', Integer, primary_key=True, test_needs_autoincrement=True), Column('user_id', Integer, ForeignKey('user.id')), Column('email', String(50)), ) @classmethod def setup_classes(cls): class User(cls.Comparable): pass class Address(cls.Comparable): pass @classmethod def setup_mappers(cls): User, Address = cls.classes.User, cls.classes.Address user, address = cls.tables.user, cls.tables.address mapper(User, user, properties={ 'addresses':relationship(Address, backref= backref('user', # needlessly complex primaryjoin so that the # use_get flag is False primaryjoin=and_( user.c.id==address.c.user_id, user.c.id==user.c.id ) ) ) }) mapper(Address, address) configure_mappers() assert Address.user.property._use_get is False @classmethod def insert_data(cls): User, Address = cls.classes.User, cls.classes.Address s = Session() s.add_all([ User(id=1, name='u1', addresses=[Address(id=1, email='a1'), Address(id=2, email='a2')]) ]) s.commit() # "persistent" - we get at an Address that was already present. # With the "skip bidirectional" check removed, the "set" emits SQL # for the "previous" version in any case, # address.user_id is 1, you get a load. def test_persistent_access_none(self): User, Address = self.classes.User, self.classes.Address s = Session() def go(): u1 = User(id=1, addresses =[Address(id=1), Address(id=2)] ) u2 = s.merge(u1) self.assert_sql_count(testing.db, go, 2) def test_persistent_access_one(self): User, Address = self.classes.User, self.classes.Address s = Session() def go(): u1 = User(id=1, addresses =[Address(id=1), Address(id=2)] ) u2 = s.merge(u1) a1 = u2.addresses[0] assert a1.user is u2 self.assert_sql_count(testing.db, go, 3) def test_persistent_access_two(self): User, Address = self.classes.User, self.classes.Address s = Session() def go(): u1 = User(id=1, addresses =[Address(id=1), Address(id=2)] ) u2 = s.merge(u1) a1 = u2.addresses[0] assert a1.user is u2 a2 = u2.addresses[1] assert a2.user is u2 self.assert_sql_count(testing.db, go, 4) # "pending" - we get at an Address that is new- user_id should be # None. But in this case the set attribute on the forward side # already sets the backref. commenting out the "skip bidirectional" # check emits SQL again for the other two Address objects already # persistent. def test_pending_access_one(self): User, Address = self.classes.User, self.classes.Address s = Session() def go(): u1 = User(id=1, addresses =[Address(id=1), Address(id=2), Address(id=3, email='a3')] ) u2 = s.merge(u1) a3 = u2.addresses[2] assert a3.user is u2 self.assert_sql_count(testing.db, go, 3) def test_pending_access_two(self): User, Address = self.classes.User, self.classes.Address s = Session() def go(): u1 = User(id=1, addresses =[Address(id=1), Address(id=2), Address(id=3, email='a3')] ) u2 = s.merge(u1) a3 = u2.addresses[2] assert a3.user is u2 a2 = u2.addresses[1] assert a2.user is u2 self.assert_sql_count(testing.db, go, 5) class MutableMergeTest(fixtures.MappedTest): @classmethod def define_tables(cls, metadata): Table("data", metadata, Column('id', Integer, primary_key=True, test_needs_autoincrement=True), Column('data', PickleType(comparator=operator.eq)) ) @classmethod def setup_classes(cls): class Data(cls.Basic): pass def test_list(self): Data, data = self.classes.Data, self.tables.data mapper(Data, data) sess = sessionmaker()() d = Data(data=["this", "is", "a", "list"]) sess.add(d) sess.commit() d2 = Data(id=d.id, data=["this", "is", "another", "list"]) d3 = sess.merge(d2) eq_(d3.data, ["this", "is", "another", "list"]) class CompositeNullPksTest(fixtures.MappedTest): @classmethod def define_tables(cls, metadata): Table("data", metadata, Column('pk1', String(10), primary_key=True), Column('pk2', String(10), primary_key=True), ) @classmethod def setup_classes(cls): class Data(cls.Basic): pass def test_merge_allow_partial(self): Data, data = self.classes.Data, self.tables.data mapper(Data, data) sess = sessionmaker()() d1 = Data(pk1="someval", pk2=None) def go(): return sess.merge(d1) self.assert_sql_count(testing.db, go, 1) def test_merge_disallow_partial(self): Data, data = self.classes.Data, self.tables.data mapper(Data, data, allow_partial_pks=False) sess = sessionmaker()() d1 = Data(pk1="someval", pk2=None) def go(): return sess.merge(d1) self.assert_sql_count(testing.db, go, 0) class LoadOnPendingTest(fixtures.MappedTest): """Test interaction of merge() with load_on_pending relationships""" @classmethod def define_tables(cls, metadata): rocks_table = Table("rocks", metadata, Column("id", Integer, primary_key=True), Column("description", String(10)), ) bugs_table = Table("bugs", metadata, Column("id", Integer, primary_key=True), Column("rockid", Integer, ForeignKey('rocks.id')), ) @classmethod def setup_classes(cls): class Rock(cls.Basic, fixtures.ComparableEntity): pass class Bug(cls.Basic, fixtures.ComparableEntity): pass def _setup_delete_orphan_o2o(self): mapper(self.classes.Rock, self.tables.rocks, properties={'bug': relationship(self.classes.Bug, cascade='all,delete-orphan', load_on_pending=True, uselist=False) }) mapper(self.classes.Bug, self.tables.bugs) self.sess = sessionmaker()() def _merge_delete_orphan_o2o_with(self, bug): # create a transient rock with passed bug r = self.classes.Rock(id=0, description='moldy') r.bug = bug m = self.sess.merge(r) # we've already passed ticket #2374 problem since merge() returned, # but for good measure: assert m is not r eq_(m,r) def test_merge_delete_orphan_o2o_none(self): """one to one delete_orphan relationships marked load_on_pending should be able to merge() with attribute None""" self._setup_delete_orphan_o2o() self._merge_delete_orphan_o2o_with(None) def test_merge_delete_orphan_o2o(self): """one to one delete_orphan relationships marked load_on_pending should be able to merge()""" self._setup_delete_orphan_o2o() self._merge_delete_orphan_o2o_with(self.classes.Bug(id=1)) class PolymorphicOnTest(fixtures.MappedTest): """Test merge() of polymorphic object when polymorphic_on isn't a Column""" @classmethod def define_tables(cls, metadata): Table('employees', metadata, Column('employee_id', Integer, primary_key=True, test_needs_autoincrement=True), Column('type', String(1), nullable=False), Column('data', String(50)), ) @classmethod def setup_classes(cls): class Employee(cls.Basic, fixtures.ComparableEntity): pass class Manager(Employee): pass class Engineer(Employee): pass def _setup_polymorphic_on_mappers(self): employee_mapper = mapper(self.classes.Employee, self.tables.employees, polymorphic_on=case(value=self.tables.employees.c.type, whens={ 'E': 'employee', 'M': 'manager', 'G': 'engineer', 'R': 'engineer', }), polymorphic_identity='employee') mapper(self.classes.Manager, inherits=employee_mapper, polymorphic_identity='manager') mapper(self.classes.Engineer, inherits=employee_mapper, polymorphic_identity='engineer') self.sess = sessionmaker()() def test_merge_polymorphic_on(self): """merge() should succeed with a polymorphic object even when polymorphic_on is not a Column """ self._setup_polymorphic_on_mappers() m = self.classes.Manager(employee_id=55, type='M', data='original data') self.sess.add(m) self.sess.commit() self.sess.expunge_all() m = self.classes.Manager(employee_id=55, data='updated data') merged = self.sess.merge(m) # we've already passed ticket #2449 problem since # merge() returned, but for good measure: assert m is not merged eq_(m,merged) SQLAlchemy-0.8.4/test/orm/test_naturalpks.py0000644000076500000240000011160412251150016021576 0ustar classicstaff00000000000000""" Primary key changing capabilities and passive/non-passive cascading updates. """ from __future__ import with_statement from sqlalchemy.testing import eq_, ne_, \ assert_raises, assert_raises_message import sqlalchemy as sa from sqlalchemy import testing from sqlalchemy import Integer, String, ForeignKey, Unicode from sqlalchemy.testing.schema import Table, Column from sqlalchemy.orm import mapper, relationship, create_session, backref, Session from sqlalchemy.orm.session import make_transient from sqlalchemy.testing import eq_ from sqlalchemy.testing import fixtures from test.orm import _fixtures def _backend_specific_fk_args(): if testing.requires.deferrable_fks.enabled: fk_args = dict(deferrable=True, initially='deferred') elif not testing.requires.on_update_cascade.enabled: fk_args = dict() else: fk_args = dict(onupdate='cascade') return fk_args class NaturalPKTest(fixtures.MappedTest): # MySQL 5.5 on Windows crashes (the entire server, not the client) # if you screw around with ON UPDATE CASCADE type of stuff. __requires__ = 'skip_mysql_on_windows', 'on_update_or_deferrable_fks' @classmethod def define_tables(cls, metadata): fk_args = _backend_specific_fk_args() users = Table('users', metadata, Column('username', String(50), primary_key=True), Column('fullname', String(100)), test_needs_fk=True) addresses = Table('addresses', metadata, Column('email', String(50), primary_key=True), Column('username', String(50), ForeignKey('users.username', **fk_args)), test_needs_fk=True) items = Table('items', metadata, Column('itemname', String(50), primary_key=True), Column('description', String(100)), test_needs_fk=True) users_to_items = Table('users_to_items', metadata, Column('username', String(50), ForeignKey('users.username', **fk_args), primary_key=True), Column('itemname', String(50), ForeignKey('items.itemname', **fk_args), primary_key=True), test_needs_fk=True) @classmethod def setup_classes(cls): class User(cls.Comparable): pass class Address(cls.Comparable): pass class Item(cls.Comparable): pass def test_entity(self): users, User = self.tables.users, self.classes.User mapper(User, users) sess = create_session() u1 = User(username='jack', fullname='jack') sess.add(u1) sess.flush() assert sess.query(User).get('jack') is u1 u1.username = 'ed' sess.flush() def go(): assert sess.query(User).get('ed') is u1 self.assert_sql_count(testing.db, go, 0) assert sess.query(User).get('jack') is None sess.expunge_all() u1 = sess.query(User).get('ed') eq_(User(username='ed', fullname='jack'), u1) def test_load_after_expire(self): users, User = self.tables.users, self.classes.User mapper(User, users) sess = create_session() u1 = User(username='jack', fullname='jack') sess.add(u1) sess.flush() assert sess.query(User).get('jack') is u1 users.update(values={User.username:'jack'}).execute(username='ed') # expire/refresh works off of primary key. the PK is gone # in this case so theres no way to look it up. criterion- # based session invalidation could solve this [ticket:911] sess.expire(u1) assert_raises(sa.orm.exc.ObjectDeletedError, getattr, u1, 'username') sess.expunge_all() assert sess.query(User).get('jack') is None assert sess.query(User).get('ed').fullname == 'jack' def test_flush_new_pk_after_expire(self): User, users = self.classes.User, self.tables.users mapper(User, users) sess = create_session() u1 = User(username='jack', fullname='jack') sess.add(u1) sess.flush() assert sess.query(User).get('jack') is u1 sess.expire(u1) u1.username = 'ed' sess.flush() sess.expunge_all() assert sess.query(User).get('ed').fullname == 'jack' @testing.requires.on_update_cascade def test_onetomany_passive(self): self._test_onetomany(True) def test_onetomany_nonpassive(self): self._test_onetomany(False) def _test_onetomany(self, passive_updates): users, Address, addresses, User = (self.tables.users, self.classes.Address, self.tables.addresses, self.classes.User) mapper(User, users, properties={ 'addresses':relationship(Address, passive_updates=passive_updates) }) mapper(Address, addresses) sess = create_session() u1 = User(username='jack', fullname='jack') u1.addresses.append(Address(email='jack1')) u1.addresses.append(Address(email='jack2')) sess.add(u1) sess.flush() assert sess.query(Address).get('jack1') is u1.addresses[0] u1.username = 'ed' sess.flush() assert u1.addresses[0].username == 'ed' sess.expunge_all() eq_([Address(username='ed'), Address(username='ed')], sess.query(Address).all()) u1 = sess.query(User).get('ed') u1.username = 'jack' def go(): sess.flush() if not passive_updates: # test passive_updates=False; #load addresses, update user, update 2 addresses self.assert_sql_count(testing.db, go, 4) else: # test passive_updates=True; update user self.assert_sql_count(testing.db, go, 1) sess.expunge_all() assert User(username='jack', addresses=[ Address(username='jack'), Address(username='jack')]) == \ sess.query(User).get('jack') u1 = sess.query(User).get('jack') u1.addresses = [] u1.username = 'fred' sess.flush() sess.expunge_all() assert sess.query(Address).get('jack1').username is None u1 = sess.query(User).get('fred') eq_(User(username='fred', fullname='jack'), u1) @testing.requires.on_update_cascade def test_manytoone_passive(self): self._test_manytoone(True) def test_manytoone_nonpassive(self): self._test_manytoone(False) def test_manytoone_nonpassive_cold_mapping(self): """test that the mapper-level m2o dependency processor is set up even if the opposite side relationship hasn't yet been part of a flush. """ users, Address, addresses, User = (self.tables.users, self.classes.Address, self.tables.addresses, self.classes.User) with testing.db.begin() as conn: conn.execute(users.insert(), username='jack', fullname='jack' ) conn.execute(addresses.insert(), email='jack1', username='jack' ) conn.execute(addresses.insert(), email='jack2', username='jack' ) mapper(User, users) mapper(Address, addresses, properties={ 'user': relationship(User, passive_updates=False) }) sess = create_session() u1 = sess.query(User).first() a1, a2 = sess.query(Address).all() u1.username = 'ed' def go(): sess.flush() self.assert_sql_count(testing.db, go, 3) def _test_manytoone(self, passive_updates): users, Address, addresses, User = (self.tables.users, self.classes.Address, self.tables.addresses, self.classes.User) mapper(User, users) mapper(Address, addresses, properties={ 'user': relationship(User, passive_updates=passive_updates) }) sess = create_session() a1 = Address(email='jack1') a2 = Address(email='jack2') u1 = User(username='jack', fullname='jack') a1.user = u1 a2.user = u1 sess.add(a1) sess.add(a2) sess.flush() u1.username = 'ed' def go(): sess.flush() if passive_updates: self.assert_sql_count(testing.db, go, 1) else: self.assert_sql_count(testing.db, go, 3) def go(): sess.flush() self.assert_sql_count(testing.db, go, 0) assert a1.username == a2.username == 'ed' sess.expunge_all() eq_([Address(username='ed'), Address(username='ed')], sess.query(Address).all()) @testing.requires.on_update_cascade def test_onetoone_passive(self): self._test_onetoone(True) def test_onetoone_nonpassive(self): self._test_onetoone(False) def _test_onetoone(self, passive_updates): users, Address, addresses, User = (self.tables.users, self.classes.Address, self.tables.addresses, self.classes.User) mapper(User, users, properties={ "address":relationship(Address, passive_updates=passive_updates, uselist=False) }) mapper(Address, addresses) sess = create_session() u1 = User(username='jack', fullname='jack') sess.add(u1) sess.flush() a1 = Address(email='jack1') u1.address = a1 sess.add(a1) sess.flush() u1.username = 'ed' def go(): sess.flush() if passive_updates: sess.expire(u1, ['address']) self.assert_sql_count(testing.db, go, 1) else: self.assert_sql_count(testing.db, go, 2) def go(): sess.flush() self.assert_sql_count(testing.db, go, 0) sess.expunge_all() eq_([Address(username='ed')], sess.query(Address).all()) @testing.requires.on_update_cascade def test_bidirectional_passive(self): self._test_bidirectional(True) def test_bidirectional_nonpassive(self): self._test_bidirectional(False) def _test_bidirectional(self, passive_updates): users, Address, addresses, User = (self.tables.users, self.classes.Address, self.tables.addresses, self.classes.User) mapper(User, users) mapper(Address, addresses, properties={ 'user': relationship(User, passive_updates=passive_updates, backref='addresses')}) sess = create_session() a1 = Address(email='jack1') a2 = Address(email='jack2') u1 = User(username='jack', fullname='jack') a1.user = u1 a2.user = u1 sess.add(a1) sess.add(a2) sess.flush() u1.username = 'ed' (ad1, ad2) = sess.query(Address).all() eq_([Address(username='jack'), Address(username='jack')], [ad1, ad2]) def go(): sess.flush() if passive_updates: self.assert_sql_count(testing.db, go, 1) else: self.assert_sql_count(testing.db, go, 3) eq_([Address(username='ed'), Address(username='ed')], [ad1, ad2]) sess.expunge_all() eq_([Address(username='ed'), Address(username='ed')], sess.query(Address).all()) u1 = sess.query(User).get('ed') assert len(u1.addresses) == 2 # load addresses u1.username = 'fred' def go(): sess.flush() # check that the passive_updates is on on the other side if passive_updates: self.assert_sql_count(testing.db, go, 1) else: self.assert_sql_count(testing.db, go, 3) sess.expunge_all() eq_([Address(username='fred'), Address(username='fred')], sess.query(Address).all()) @testing.requires.on_update_cascade def test_manytomany_passive(self): self._test_manytomany(True) # mysqldb executemany() of the association table fails to # report the correct row count @testing.fails_if(lambda: testing.against('mysql') and not testing.against('+zxjdbc')) def test_manytomany_nonpassive(self): self._test_manytomany(False) def _test_manytomany(self, passive_updates): users, items, Item, User, users_to_items = (self.tables.users, self.tables.items, self.classes.Item, self.classes.User, self.tables.users_to_items) mapper(User, users, properties={ 'items':relationship(Item, secondary=users_to_items, backref='users', passive_updates=passive_updates)}) mapper(Item, items) sess = create_session() u1 = User(username='jack') u2 = User(username='fred') i1 = Item(itemname='item1') i2 = Item(itemname='item2') u1.items.append(i1) u1.items.append(i2) i2.users.append(u2) sess.add(u1) sess.add(u2) sess.flush() r = sess.query(Item).all() # ComparableEntity can't handle a comparison with the backrefs # involved.... eq_(Item(itemname='item1'), r[0]) eq_(['jack'], [u.username for u in r[0].users]) eq_(Item(itemname='item2'), r[1]) eq_(['jack', 'fred'], [u.username for u in r[1].users]) u2.username='ed' def go(): sess.flush() go() def go(): sess.flush() self.assert_sql_count(testing.db, go, 0) sess.expunge_all() r = sess.query(Item).all() eq_(Item(itemname='item1'), r[0]) eq_(['jack'], [u.username for u in r[0].users]) eq_(Item(itemname='item2'), r[1]) eq_(['ed', 'jack'], sorted([u.username for u in r[1].users])) sess.expunge_all() u2 = sess.query(User).get(u2.username) u2.username='wendy' sess.flush() r = sess.query(Item).with_parent(u2).all() eq_(Item(itemname='item2'), r[0]) class TransientExceptionTesst(_fixtures.FixtureTest): run_inserts = None def test_transient_exception(self): """An object that goes from a pk value to transient/pending doesn't count as a "pk" switch. """ users, Address, addresses, User = (self.tables.users, self.classes.Address, self.tables.addresses, self.classes.User) mapper(User, users) mapper(Address, addresses, properties={'user':relationship(User)}) sess = create_session() u1 = User(id=5, name='u1') ad1 = Address(email_address='e1', user=u1) sess.add_all([u1, ad1]) sess.flush() make_transient(u1) u1.id = None u1.username='u2' sess.add(u1) sess.flush() eq_(ad1.user_id, 5) sess.expire_all() eq_(ad1.user_id, 5) ne_(u1.id, 5) ne_(u1.id, None) eq_(sess.query(User).count(), 2) class ReversePKsTest(fixtures.MappedTest): """reverse the primary keys of two entities and ensure bookkeeping succeeds.""" @classmethod def define_tables(cls, metadata): Table( 'user', metadata, Column('code', Integer, primary_key=True), Column('status', Integer, primary_key=True), Column('username', Unicode(50), nullable=False), ) @classmethod def setup_classes(cls): class User(cls.Comparable): def __init__(self, code, status, username): self.code = code self.status = status self.username = username def test_reverse(self): user, User = self.tables.user, self.classes.User PUBLISHED, EDITABLE, ARCHIVED = 1, 2, 3 mapper(User, user) session = sa.orm.sessionmaker()() a_published = User(1, PUBLISHED, u'a') session.add(a_published) session.commit() a_editable = User(1, EDITABLE, u'a') session.add(a_editable) session.commit() # do the switch in both directions - # one or the other should raise the error # based on platform dictionary ordering a_published.status = ARCHIVED a_editable.status = PUBLISHED session.commit() assert session.query(User).get([1, PUBLISHED]) is a_editable assert session.query(User).get([1, ARCHIVED]) is a_published a_published.status = PUBLISHED a_editable.status = EDITABLE session.commit() assert session.query(User).get([1, PUBLISHED]) is a_published assert session.query(User).get([1, EDITABLE]) is a_editable class SelfReferentialTest(fixtures.MappedTest): # mssql, mysql don't allow # ON UPDATE on self-referential keys __unsupported_on__ = ('mssql', 'mysql') __requires__ = 'on_update_or_deferrable_fks', @classmethod def define_tables(cls, metadata): fk_args = _backend_specific_fk_args() Table('nodes', metadata, Column('name', String(50), primary_key=True), Column('parent', String(50), ForeignKey('nodes.name', **fk_args)), test_needs_fk=True ) @classmethod def setup_classes(cls): class Node(cls.Comparable): pass def test_one_to_many_on_m2o(self): Node, nodes = self.classes.Node, self.tables.nodes mapper(Node, nodes, properties={ 'children': relationship(Node, backref=sa.orm.backref('parentnode', remote_side=nodes.c.name, passive_updates=False), )}) sess = Session() n1 = Node(name='n1') sess.add(n1) n2 = Node(name='n11', parentnode=n1) n3 = Node(name='n12', parentnode=n1) n4 = Node(name='n13', parentnode=n1) sess.add_all([n2, n3, n4]) sess.commit() n1.name = 'new n1' sess.commit() eq_(['new n1', 'new n1', 'new n1'], [n.parent for n in sess.query(Node).filter( Node.name.in_(['n11', 'n12', 'n13']))]) def test_one_to_many_on_o2m(self): Node, nodes = self.classes.Node, self.tables.nodes mapper(Node, nodes, properties={ 'children': relationship(Node, backref=sa.orm.backref('parentnode', remote_side=nodes.c.name), passive_updates=False )}) sess = Session() n1 = Node(name='n1') n1.children.append(Node(name='n11')) n1.children.append(Node(name='n12')) n1.children.append(Node(name='n13')) sess.add(n1) sess.commit() n1.name = 'new n1' sess.commit() eq_(n1.children[1].parent, 'new n1') eq_(['new n1', 'new n1', 'new n1'], [n.parent for n in sess.query(Node).filter( Node.name.in_(['n11', 'n12', 'n13']))]) @testing.requires.on_update_cascade def test_many_to_one_passive(self): self._test_many_to_one(True) def test_many_to_one_nonpassive(self): self._test_many_to_one(False) def _test_many_to_one(self, passive): Node, nodes = self.classes.Node, self.tables.nodes mapper(Node, nodes, properties={ 'parentnode':relationship(Node, remote_side=nodes.c.name, passive_updates=passive) } ) sess = Session() n1 = Node(name='n1') n11 = Node(name='n11', parentnode=n1) n12 = Node(name='n12', parentnode=n1) n13 = Node(name='n13', parentnode=n1) sess.add_all([n1, n11, n12, n13]) sess.commit() n1.name = 'new n1' sess.commit() eq_(['new n1', 'new n1', 'new n1'], [n.parent for n in sess.query(Node).filter( Node.name.in_(['n11', 'n12', 'n13']))]) class NonPKCascadeTest(fixtures.MappedTest): __requires__ = 'skip_mysql_on_windows', 'on_update_or_deferrable_fks' @classmethod def define_tables(cls, metadata): fk_args = _backend_specific_fk_args() Table('users', metadata, Column('id', Integer, primary_key=True, test_needs_autoincrement=True), Column('username', String(50), unique=True), Column('fullname', String(100)), test_needs_fk=True) Table('addresses', metadata, Column('id', Integer, primary_key=True, test_needs_autoincrement=True), Column('email', String(50)), Column('username', String(50), ForeignKey('users.username', **fk_args)), test_needs_fk=True ) @classmethod def setup_classes(cls): class User(cls.Comparable): pass class Address(cls.Comparable): pass @testing.requires.on_update_cascade def test_onetomany_passive(self): self._test_onetomany(True) def test_onetomany_nonpassive(self): self._test_onetomany(False) def _test_onetomany(self, passive_updates): User, Address, users, addresses = (self.classes.User, self.classes.Address, self.tables.users, self.tables.addresses) mapper(User, users, properties={ 'addresses':relationship(Address, passive_updates=passive_updates)}) mapper(Address, addresses) sess = create_session() u1 = User(username='jack', fullname='jack') u1.addresses.append(Address(email='jack1')) u1.addresses.append(Address(email='jack2')) sess.add(u1) sess.flush() a1 = u1.addresses[0] eq_(sa.select([addresses.c.username]).execute().fetchall(), [('jack',), ('jack',)]) assert sess.query(Address).get(a1.id) is u1.addresses[0] u1.username = 'ed' sess.flush() assert u1.addresses[0].username == 'ed' eq_(sa.select([addresses.c.username]).execute().fetchall(), [('ed',), ('ed',)]) sess.expunge_all() eq_([Address(username='ed'), Address(username='ed')], sess.query(Address).all()) u1 = sess.query(User).get(u1.id) u1.username = 'jack' def go(): sess.flush() if not passive_updates: # test passive_updates=False; load addresses, # update user, update 2 addresses self.assert_sql_count(testing.db, go, 4) else: # test passive_updates=True; update user self.assert_sql_count(testing.db, go, 1) sess.expunge_all() assert User(username='jack', addresses=[Address(username='jack'), Address(username='jack')]) == \ sess.query(User).get(u1.id) sess.expunge_all() u1 = sess.query(User).get(u1.id) u1.addresses = [] u1.username = 'fred' sess.flush() sess.expunge_all() a1 = sess.query(Address).get(a1.id) eq_(a1.username, None) eq_(sa.select([addresses.c.username]).execute().fetchall(), [(None,), (None,)]) u1 = sess.query(User).get(u1.id) eq_(User(username='fred', fullname='jack'), u1) class CascadeToFKPKTest(fixtures.MappedTest, testing.AssertsCompiledSQL): """A primary key mutation cascades onto a foreign key that is itself a primary key.""" @classmethod def define_tables(cls, metadata): fk_args = _backend_specific_fk_args() Table('users', metadata, Column('username', String(50), primary_key=True), test_needs_fk=True) Table('addresses', metadata, Column('username', String(50), ForeignKey('users.username', **fk_args), primary_key=True ), Column('email', String(50), primary_key=True), Column('etc', String(50)), test_needs_fk=True ) @classmethod def setup_classes(cls): class User(cls.Comparable): pass class Address(cls.Comparable): pass @testing.requires.on_update_cascade def test_onetomany_passive(self): self._test_onetomany(True) # PG etc. need passive=True to allow PK->PK cascade @testing.fails_on_everything_except('sqlite', 'oracle', '+zxjdbc') def test_onetomany_nonpassive(self): self._test_onetomany(False) def test_o2m_change_passive(self): self._test_o2m_change(True) def test_o2m_change_nonpassive(self): self._test_o2m_change(False) def _test_o2m_change(self, passive_updates): """Change the PK of a related entity to another. "on update cascade" is not involved here, so the mapper has to do the UPDATE itself. """ User, Address, users, addresses = (self.classes.User, self.classes.Address, self.tables.users, self.tables.addresses) mapper(User, users, properties={ 'addresses':relationship(Address, passive_updates=passive_updates)}) mapper(Address, addresses) sess = create_session() a1 = Address(username='ed', email='ed@host1') u1 = User(username='ed', addresses=[a1]) u2 = User(username='jack') sess.add_all([a1, u1, u2]) sess.flush() a1.username = 'jack' sess.flush() def test_o2m_move_passive(self): self._test_o2m_move(True) def test_o2m_move_nonpassive(self): self._test_o2m_move(False) def _test_o2m_move(self, passive_updates): """Move the related entity to a different collection, changing its PK. """ User, Address, users, addresses = (self.classes.User, self.classes.Address, self.tables.users, self.tables.addresses) mapper(User, users, properties={ 'addresses':relationship(Address, passive_updates=passive_updates)}) mapper(Address, addresses) sess = create_session() a1 = Address(username='ed', email='ed@host1') u1 = User(username='ed', addresses=[a1]) u2 = User(username='jack') sess.add_all([a1, u1, u2]) sess.flush() u1.addresses.remove(a1) u2.addresses.append(a1) sess.flush() @testing.requires.on_update_cascade def test_change_m2o_passive(self): self._test_change_m2o(True) @testing.fails_on_everything_except('sqlite', 'oracle', '+zxjdbc') def test_change_m2o_nonpassive(self): self._test_change_m2o(False) def _test_change_m2o(self, passive_updates): User, Address, users, addresses = (self.classes.User, self.classes.Address, self.tables.users, self.tables.addresses) mapper(User, users) mapper(Address, addresses, properties={ 'user':relationship(User, passive_updates=passive_updates) }) sess = create_session() u1 = User(username='jack') a1 = Address(user=u1, email='foo@bar') sess.add_all([u1, a1]) sess.flush() u1.username='edmodified' sess.flush() eq_(a1.username, 'edmodified') sess.expire_all() eq_(a1.username, 'edmodified') def test_move_m2o_passive(self): self._test_move_m2o(True) def test_move_m2o_nonpassive(self): self._test_move_m2o(False) def _test_move_m2o(self, passive_updates): User, Address, users, addresses = (self.classes.User, self.classes.Address, self.tables.users, self.tables.addresses) # tests [ticket:1856] mapper(User, users) mapper(Address, addresses, properties={ 'user':relationship(User, passive_updates=passive_updates) }) sess = create_session() u1 = User(username='jack') u2 = User(username='ed') a1 = Address(user=u1, email='foo@bar') sess.add_all([u1, u2, a1]) sess.flush() a1.user = u2 sess.flush() def test_rowswitch_doesntfire(self): User, Address, users, addresses = (self.classes.User, self.classes.Address, self.tables.users, self.tables.addresses) mapper(User, users) mapper(Address, addresses, properties={ 'user':relationship(User, passive_updates=True) }) sess = create_session() u1 = User(username='ed') a1 = Address(user=u1, email='ed@host1') sess.add(u1) sess.add(a1) sess.flush() sess.delete(u1) sess.delete(a1) u2 = User(username='ed') a2 = Address(user=u2, email='ed@host1', etc='foo') sess.add(u2) sess.add(a2) from sqlalchemy.testing.assertsql import CompiledSQL # test that the primary key columns of addresses are not # being updated as well, since this is a row switch. self.assert_sql_execution(testing.db, sess.flush, CompiledSQL( "UPDATE addresses SET etc=:etc WHERE " "addresses.username = :addresses_username AND" " addresses.email = :addresses_email", {'etc': 'foo', 'addresses_username':'ed', 'addresses_email':'ed@host1'} ), ) def _test_onetomany(self, passive_updates): """Change the PK of a related entity via foreign key cascade. For databases that require "on update cascade", the mapper has to identify the row by the new value, not the old, when it does the update. """ User, Address, users, addresses = (self.classes.User, self.classes.Address, self.tables.users, self.tables.addresses) mapper(User, users, properties={ 'addresses':relationship(Address, passive_updates=passive_updates)}) mapper(Address, addresses) sess = create_session() a1, a2 = Address(username='ed', email='ed@host1'),\ Address(username='ed', email='ed@host2') u1 = User(username='ed', addresses=[a1, a2]) sess.add(u1) sess.flush() eq_(a1.username, 'ed') eq_(a2.username, 'ed') eq_(sa.select([addresses.c.username]).execute().fetchall(), [('ed',), ('ed',)]) u1.username = 'jack' a2.email='ed@host3' sess.flush() eq_(a1.username, 'jack') eq_(a2.username, 'jack') eq_(sa.select([addresses.c.username]).execute().fetchall(), [('jack',), ('jack', )]) class JoinedInheritanceTest(fixtures.MappedTest): """Test cascades of pk->pk/fk on joined table inh.""" # mssql doesn't allow ON UPDATE on self-referential keys __unsupported_on__ = ('mssql',) __requires__ = 'skip_mysql_on_windows', @classmethod def define_tables(cls, metadata): fk_args = _backend_specific_fk_args() Table('person', metadata, Column('name', String(50), primary_key=True), Column('type', String(50), nullable=False), test_needs_fk=True) Table('engineer', metadata, Column('name', String(50), ForeignKey('person.name', **fk_args), primary_key=True), Column('primary_language', String(50)), Column('boss_name', String(50), ForeignKey('manager.name', **fk_args)), test_needs_fk=True ) Table('manager', metadata, Column('name', String(50), ForeignKey('person.name', **fk_args), primary_key=True), Column('paperwork', String(50)), test_needs_fk=True ) @classmethod def setup_classes(cls): class Person(cls.Comparable): pass class Engineer(Person): pass class Manager(Person): pass @testing.requires.on_update_cascade def test_pk_passive(self): self._test_pk(True) # PG etc. need passive=True to allow PK->PK cascade @testing.fails_on_everything_except('sqlite', 'oracle', '+zxjdbc') def test_pk_nonpassive(self): self._test_pk(False) @testing.requires.on_update_cascade def test_fk_passive(self): self._test_fk(True) # PG etc. need passive=True to allow PK->PK cascade @testing.fails_on_everything_except('sqlite', 'mysql+zxjdbc', 'oracle', 'postgresql+zxjdbc') def test_fk_nonpassive(self): self._test_fk(False) def _test_pk(self, passive_updates): Person, Manager, person, manager, Engineer, engineer = (self.classes.Person, self.classes.Manager, self.tables.person, self.tables.manager, self.classes.Engineer, self.tables.engineer) mapper(Person, person, polymorphic_on=person.c.type, polymorphic_identity='person', passive_updates=passive_updates) mapper(Engineer, engineer, inherits=Person, polymorphic_identity='engineer', properties={ 'boss':relationship(Manager, primaryjoin=manager.c.name==engineer.c.boss_name, passive_updates=passive_updates ) }) mapper(Manager, manager, inherits=Person, polymorphic_identity='manager') sess = sa.orm.sessionmaker()() e1 = Engineer(name='dilbert', primary_language='java') sess.add(e1) sess.commit() e1.name = 'wally' e1.primary_language = 'c++' sess.commit() def _test_fk(self, passive_updates): Person, Manager, person, manager, Engineer, engineer = (self.classes.Person, self.classes.Manager, self.tables.person, self.tables.manager, self.classes.Engineer, self.tables.engineer) mapper(Person, person, polymorphic_on=person.c.type, polymorphic_identity='person', passive_updates=passive_updates) mapper(Engineer, engineer, inherits=Person, polymorphic_identity='engineer', properties={ 'boss':relationship(Manager, primaryjoin=manager.c.name==engineer.c.boss_name, passive_updates=passive_updates ) }) mapper(Manager, manager, inherits=Person, polymorphic_identity='manager') sess = sa.orm.sessionmaker()() m1 = Manager(name='dogbert', paperwork='lots') e1, e2 = \ Engineer(name='dilbert', primary_language='java', boss=m1),\ Engineer(name='wally', primary_language='c++', boss=m1) sess.add_all([ e1, e2, m1 ]) sess.commit() eq_(e1.boss_name, 'dogbert') eq_(e2.boss_name, 'dogbert') sess.expire_all() m1.name = 'pointy haired' e1.primary_language = 'scala' e2.primary_language = 'cobol' sess.commit() eq_(e1.boss_name, 'pointy haired') eq_(e2.boss_name, 'pointy haired') SQLAlchemy-0.8.4/test/orm/test_of_type.py0000644000076500000240000005317312251150016021065 0ustar classicstaff00000000000000from sqlalchemy.orm import Session, aliased, with_polymorphic, \ contains_eager, joinedload, subqueryload, relationship,\ subqueryload_all, joinedload_all from sqlalchemy import and_ from sqlalchemy import testing from sqlalchemy.testing import fixtures from sqlalchemy.testing import assert_raises, eq_ from sqlalchemy.testing.schema import Column from sqlalchemy.engine import default from sqlalchemy.testing.entities import ComparableEntity from sqlalchemy import Integer, String, ForeignKey from .inheritance._poly_fixtures import Company, Person, Engineer, Manager, Boss, \ Machine, Paperwork, _PolymorphicFixtureBase, _Polymorphic,\ _PolymorphicPolymorphic, _PolymorphicUnions, _PolymorphicJoins,\ _PolymorphicAliasedJoins class _PolymorphicTestBase(object): __dialect__ = 'default' def test_any_one(self): sess = Session() any_ = Company.employees.of_type(Engineer).any( Engineer.primary_language == 'cobol') eq_(sess.query(Company).filter(any_).one(), self.c2) def test_any_two(self): sess = Session() calias = aliased(Company) any_ = calias.employees.of_type(Engineer).any( Engineer.primary_language == 'cobol') eq_(sess.query(calias).filter(any_).one(), self.c2) def test_any_three(self): sess = Session() any_ = Company.employees.of_type(Boss).any( Boss.golf_swing == 'fore') eq_(sess.query(Company).filter(any_).one(), self.c1) def test_any_four(self): sess = Session() any_ = Company.employees.of_type(Boss).any( Manager.manager_name == 'pointy') eq_(sess.query(Company).filter(any_).one(), self.c1) def test_any_five(self): sess = Session() any_ = Company.employees.of_type(Engineer).any( and_(Engineer.primary_language == 'cobol')) eq_(sess.query(Company).filter(any_).one(), self.c2) def test_join_to_subclass_one(self): sess = Session() eq_(sess.query(Company) .join(Company.employees.of_type(Engineer)) .filter(Engineer.primary_language == 'java').all(), [self.c1]) def test_join_to_subclass_two(self): sess = Session() eq_(sess.query(Company) .join(Company.employees.of_type(Engineer), 'machines') .filter(Machine.name.ilike("%thinkpad%")).all(), [self.c1]) def test_join_to_subclass_three(self): sess = Session() eq_(sess.query(Company, Engineer) .join(Company.employees.of_type(Engineer)) .filter(Engineer.primary_language == 'java').count(), 1) # test [ticket:2093] eq_(sess.query(Company.company_id, Engineer) .join(Company.employees.of_type(Engineer)) .filter(Engineer.primary_language == 'java').count(), 1) eq_(sess.query(Company) .join(Company.employees.of_type(Engineer)) .filter(Engineer.primary_language == 'java').count(), 1) def test_with_polymorphic_join_compile_one(self): sess = Session() self.assert_compile( sess.query(Company).join( Company.employees.of_type( with_polymorphic(Person, [Engineer, Manager], aliased=True) ) ), "SELECT companies.company_id AS companies_company_id, " "companies.name AS companies_name FROM companies " "JOIN %s" % ( self._polymorphic_join_target([Engineer, Manager]) ) ) def test_with_polymorphic_join_exec_contains_eager_one(self): sess = Session() def go(): wp = with_polymorphic(Person, [Engineer, Manager], aliased=True) eq_( sess.query(Company).join( Company.employees.of_type(wp) ).order_by(Company.company_id, wp.person_id).\ options(contains_eager(Company.employees.of_type(wp))).all(), [self.c1, self.c2] ) self.assert_sql_count(testing.db, go, 1) def test_with_polymorphic_join_exec_contains_eager_two(self): sess = Session() def go(): wp = with_polymorphic(Person, [Engineer, Manager], aliased=True) eq_( sess.query(Company).join( Company.employees.of_type(wp) ).order_by(Company.company_id, wp.person_id).\ options(contains_eager(Company.employees, alias=wp)).all(), [self.c1, self.c2] ) self.assert_sql_count(testing.db, go, 1) def test_with_polymorphic_any(self): sess = Session() wp = with_polymorphic(Person, [Engineer], aliased=True) eq_( sess.query(Company.company_id).\ filter( Company.employees.of_type(wp).any( wp.Engineer.primary_language == 'java') ).all(), [(1, )] ) def test_subqueryload_implicit_withpoly(self): sess = Session() def go(): eq_( sess.query(Company).\ filter_by(company_id=1).\ options(subqueryload(Company.employees.of_type(Engineer))).\ all(), [self._company_with_emps_fixture()[0]] ) self.assert_sql_count(testing.db, go, 4) def test_joinedload_implicit_withpoly(self): sess = Session() def go(): eq_( sess.query(Company).\ filter_by(company_id=1).\ options(joinedload(Company.employees.of_type(Engineer))).\ all(), [self._company_with_emps_fixture()[0]] ) self.assert_sql_count(testing.db, go, 3) def test_subqueryload_explicit_withpoly(self): sess = Session() def go(): target = with_polymorphic(Person, Engineer, aliased=True) eq_( sess.query(Company).\ filter_by(company_id=1).\ options(subqueryload(Company.employees.of_type(target))).\ all(), [self._company_with_emps_fixture()[0]] ) self.assert_sql_count(testing.db, go, 4) def test_joinedload_explicit_withpoly(self): sess = Session() def go(): target = with_polymorphic(Person, Engineer, aliased=True) eq_( sess.query(Company).\ filter_by(company_id=1).\ options(joinedload(Company.employees.of_type(target))).\ all(), [self._company_with_emps_fixture()[0]] ) self.assert_sql_count(testing.db, go, 3) class PolymorphicPolymorphicTest(_PolymorphicTestBase, _PolymorphicPolymorphic): def _polymorphic_join_target(self, cls): from sqlalchemy.orm import class_mapper m, sel = class_mapper(Person)._with_polymorphic_args(cls) sel = sel.alias() comp_sel = sel.compile(dialect=default.DefaultDialect()) return \ comp_sel.process(sel, asfrom=True).replace("\n", "") + \ " ON companies.company_id = anon_1.people_company_id" class PolymorphicUnionsTest(_PolymorphicTestBase, _PolymorphicUnions): def _polymorphic_join_target(self, cls): from sqlalchemy.orm import class_mapper sel = class_mapper(Person)._with_polymorphic_selectable.element comp_sel = sel.compile(dialect=default.DefaultDialect()) return \ comp_sel.process(sel, asfrom=True).replace("\n", "") + \ " AS anon_1 ON companies.company_id = anon_1.company_id" class PolymorphicAliasedJoinsTest(_PolymorphicTestBase, _PolymorphicAliasedJoins): def _polymorphic_join_target(self, cls): from sqlalchemy.orm import class_mapper sel = class_mapper(Person)._with_polymorphic_selectable.element comp_sel = sel.compile(dialect=default.DefaultDialect()) return \ comp_sel.process(sel, asfrom=True).replace("\n", "") + \ " AS anon_1 ON companies.company_id = anon_1.people_company_id" class PolymorphicJoinsTest(_PolymorphicTestBase, _PolymorphicJoins): def _polymorphic_join_target(self, cls): from sqlalchemy.orm import class_mapper sel = class_mapper(Person)._with_polymorphic_selectable.alias() comp_sel = sel.compile(dialect=default.DefaultDialect()) return \ comp_sel.process(sel, asfrom=True).replace("\n", "") + \ " ON companies.company_id = anon_1.people_company_id" class SubclassRelationshipTest(testing.AssertsCompiledSQL, fixtures.DeclarativeMappedTest): """There's overlap here vs. the ones above.""" run_setup_classes = 'once' run_setup_mappers = 'once' run_inserts = 'once' run_deletes = None __dialect__ = 'default' @classmethod def setup_classes(cls): Base = cls.DeclarativeBasic class Job(ComparableEntity, Base): __tablename__ = "job" id = Column(Integer, primary_key=True, test_needs_autoincrement=True) type = Column(String(10)) container_id = Column(Integer, ForeignKey('data_container.id')) __mapper_args__ = {"polymorphic_on": type} class SubJob(Job): __tablename__ = 'subjob' id = Column(Integer, ForeignKey('job.id'), primary_key=True) attr = Column(String(10)) __mapper_args__ = {"polymorphic_identity": "sub"} class ParentThing(ComparableEntity, Base): __tablename__ = 'parent' id = Column(Integer, primary_key=True, test_needs_autoincrement=True) container_id = Column(Integer, ForeignKey('data_container.id')) container = relationship("DataContainer") class DataContainer(ComparableEntity, Base): __tablename__ = "data_container" id = Column(Integer, primary_key=True, test_needs_autoincrement=True) name = Column(String(10)) jobs = relationship(Job, order_by=Job.id) @classmethod def insert_data(cls): s = Session(testing.db) s.add_all(cls._fixture()) s.commit() @classmethod def _fixture(cls): ParentThing, DataContainer, SubJob = \ cls.classes.ParentThing,\ cls.classes.DataContainer,\ cls.classes.SubJob return [ ParentThing( container=DataContainer(name="d1", jobs=[ SubJob(attr="s1"), SubJob(attr="s2") ]) ), ParentThing( container=DataContainer(name="d2", jobs=[ SubJob(attr="s3"), SubJob(attr="s4") ]) ), ] @classmethod def _dc_fixture(cls): return [p.container for p in cls._fixture()] def test_contains_eager_wpoly(self): ParentThing, DataContainer, Job, SubJob = \ self.classes.ParentThing,\ self.classes.DataContainer,\ self.classes.Job,\ self.classes.SubJob Job_P = with_polymorphic(Job, SubJob, aliased=True) s = Session(testing.db) q = s.query(DataContainer).\ join(DataContainer.jobs.of_type(Job_P)).\ options(contains_eager(DataContainer.jobs.of_type(Job_P))) def go(): eq_( q.all(), self._dc_fixture() ) self.assert_sql_count(testing.db, go, 1) def test_joinedload_wpoly(self): ParentThing, DataContainer, Job, SubJob = \ self.classes.ParentThing,\ self.classes.DataContainer,\ self.classes.Job,\ self.classes.SubJob Job_P = with_polymorphic(Job, SubJob, aliased=True) s = Session(testing.db) q = s.query(DataContainer).\ options(joinedload(DataContainer.jobs.of_type(Job_P))) def go(): eq_( q.all(), self._dc_fixture() ) self.assert_sql_count(testing.db, go, 1) def test_joinedload_wsubclass(self): ParentThing, DataContainer, Job, SubJob = \ self.classes.ParentThing,\ self.classes.DataContainer,\ self.classes.Job,\ self.classes.SubJob s = Session(testing.db) q = s.query(DataContainer).\ options(joinedload(DataContainer.jobs.of_type(SubJob))) def go(): eq_( q.all(), self._dc_fixture() ) self.assert_sql_count(testing.db, go, 1) def test_lazyload(self): DataContainer = self.classes.DataContainer s = Session(testing.db) q = s.query(DataContainer) def go(): eq_( q.all(), self._dc_fixture() ) # SELECT data container # SELECT job * 2 container rows # SELECT subjob * 4 rows self.assert_sql_count(testing.db, go, 7) def test_subquery_wsubclass(self): ParentThing, DataContainer, Job, SubJob = \ self.classes.ParentThing,\ self.classes.DataContainer,\ self.classes.Job,\ self.classes.SubJob s = Session(testing.db) q = s.query(DataContainer).\ options(subqueryload(DataContainer.jobs.of_type(SubJob))) def go(): eq_( q.all(), self._dc_fixture() ) self.assert_sql_count(testing.db, go, 2) def test_twolevel_subqueryload_wsubclass(self): ParentThing, DataContainer, Job, SubJob = \ self.classes.ParentThing,\ self.classes.DataContainer,\ self.classes.Job,\ self.classes.SubJob s = Session(testing.db) q = s.query(ParentThing).\ options( subqueryload_all( ParentThing.container, DataContainer.jobs.of_type(SubJob) )) def go(): eq_( q.all(), self._fixture() ) self.assert_sql_count(testing.db, go, 3) def test_twolevel_joinedload_wsubclass(self): ParentThing, DataContainer, Job, SubJob = \ self.classes.ParentThing,\ self.classes.DataContainer,\ self.classes.Job,\ self.classes.SubJob s = Session(testing.db) q = s.query(ParentThing).\ options( joinedload_all( ParentThing.container, DataContainer.jobs.of_type(SubJob) )) def go(): eq_( q.all(), self._fixture() ) self.assert_sql_count(testing.db, go, 1) def test_any_wpoly(self): ParentThing, DataContainer, Job, SubJob = \ self.classes.ParentThing,\ self.classes.DataContainer,\ self.classes.Job,\ self.classes.SubJob Job_P = with_polymorphic(Job, SubJob, aliased=True) s = Session() q = s.query(Job).join(DataContainer.jobs).\ filter( DataContainer.jobs.of_type(Job_P).\ any(Job_P.id < Job.id) ) self.assert_compile(q, "SELECT job.id AS job_id, job.type AS job_type, " "job.container_id " "AS job_container_id " "FROM data_container " "JOIN job ON data_container.id = job.container_id " "WHERE EXISTS (SELECT 1 " "FROM (SELECT job.id AS job_id, job.type AS job_type, " "job.container_id AS job_container_id, " "subjob.id AS subjob_id, subjob.attr AS subjob_attr " "FROM job LEFT OUTER JOIN subjob ON job.id = subjob.id) AS anon_1 " "WHERE data_container.id = anon_1.job_container_id AND job.id > anon_1.job_id)" ) def test_any_walias(self): ParentThing, DataContainer, Job, SubJob = \ self.classes.ParentThing,\ self.classes.DataContainer,\ self.classes.Job,\ self.classes.SubJob Job_A = aliased(Job) s = Session() q = s.query(Job).join(DataContainer.jobs).\ filter( DataContainer.jobs.of_type(Job_A).\ any(and_(Job_A.id < Job.id, Job_A.type=='fred')) ) self.assert_compile(q, "SELECT job.id AS job_id, job.type AS job_type, " "job.container_id AS job_container_id " "FROM data_container JOIN job ON data_container.id = job.container_id " "WHERE EXISTS (SELECT 1 " "FROM job AS job_1 " "WHERE data_container.id = job_1.container_id " "AND job.id > job_1.id AND job_1.type = :type_1)" ) def test_join_wpoly(self): ParentThing, DataContainer, Job, SubJob = \ self.classes.ParentThing,\ self.classes.DataContainer,\ self.classes.Job,\ self.classes.SubJob Job_P = with_polymorphic(Job, SubJob, aliased=True) s = Session() q = s.query(DataContainer).join(DataContainer.jobs.of_type(Job_P)) self.assert_compile(q, "SELECT data_container.id AS data_container_id, " "data_container.name AS data_container_name " "FROM data_container JOIN (SELECT job.id AS job_id, " "job.type AS job_type, job.container_id AS job_container_id, " "subjob.id AS subjob_id, subjob.attr AS subjob_attr " "FROM job LEFT OUTER JOIN subjob ON job.id = subjob.id) " "AS anon_1 ON data_container.id = anon_1.job_container_id") def test_join_wsubclass(self): ParentThing, DataContainer, Job, SubJob = \ self.classes.ParentThing,\ self.classes.DataContainer,\ self.classes.Job,\ self.classes.SubJob s = Session() q = s.query(DataContainer).join(DataContainer.jobs.of_type(SubJob)) # note the of_type() here renders JOIN for the Job->SubJob. # this is because it's using the SubJob mapper directly within # query.join(). When we do joinedload() etc., we're instead # doing a with_polymorphic(), and there we need the join to be # outer by default. self.assert_compile(q, "SELECT data_container.id AS data_container_id, " "data_container.name AS data_container_name " "FROM data_container JOIN (SELECT job.id AS job_id, " "job.type AS job_type, job.container_id AS job_container_id, " "subjob.id AS subjob_id, subjob.attr AS subjob_attr " "FROM job JOIN subjob ON job.id = subjob.id) AS anon_1 " "ON data_container.id = anon_1.job_container_id" ) def test_join_wpoly_innerjoin(self): ParentThing, DataContainer, Job, SubJob = \ self.classes.ParentThing,\ self.classes.DataContainer,\ self.classes.Job,\ self.classes.SubJob Job_P = with_polymorphic(Job, SubJob, aliased=True, innerjoin=True) s = Session() q = s.query(DataContainer).join(DataContainer.jobs.of_type(Job_P)) self.assert_compile(q, "SELECT data_container.id AS data_container_id, " "data_container.name AS data_container_name " "FROM data_container JOIN (SELECT job.id AS job_id, " "job.type AS job_type, job.container_id AS job_container_id, " "subjob.id AS subjob_id, subjob.attr AS subjob_attr " "FROM job JOIN subjob ON job.id = subjob.id) " "AS anon_1 ON data_container.id = anon_1.job_container_id") def test_join_walias(self): ParentThing, DataContainer, Job, SubJob = \ self.classes.ParentThing,\ self.classes.DataContainer,\ self.classes.Job,\ self.classes.SubJob Job_A = aliased(Job) s = Session() q = s.query(DataContainer).join(DataContainer.jobs.of_type(Job_A)) self.assert_compile(q, "SELECT data_container.id AS data_container_id, " "data_container.name AS data_container_name " "FROM data_container JOIN job AS job_1 " "ON data_container.id = job_1.container_id") def test_join_explicit_wpoly(self): ParentThing, DataContainer, Job, SubJob = \ self.classes.ParentThing,\ self.classes.DataContainer,\ self.classes.Job,\ self.classes.SubJob Job_P = with_polymorphic(Job, SubJob, aliased=True) s = Session() q = s.query(DataContainer).join(Job_P, DataContainer.jobs) self.assert_compile(q, "SELECT data_container.id AS data_container_id, " "data_container.name AS data_container_name " "FROM data_container JOIN (SELECT job.id AS job_id, " "job.type AS job_type, job.container_id AS job_container_id, " "subjob.id AS subjob_id, subjob.attr AS subjob_attr " "FROM job LEFT OUTER JOIN subjob ON job.id = subjob.id) " "AS anon_1 ON data_container.id = anon_1.job_container_id") SQLAlchemy-0.8.4/test/orm/test_onetoone.py0000644000076500000240000000430412251147172021247 0ustar classicstaff00000000000000import sqlalchemy as sa from sqlalchemy import testing from sqlalchemy import Integer, String, ForeignKey from sqlalchemy.testing.schema import Table, Column from sqlalchemy.orm import mapper, relationship, create_session from sqlalchemy.testing import fixtures class O2OTest(fixtures.MappedTest): @classmethod def define_tables(cls, metadata): Table('jack', metadata, Column('id', Integer, primary_key=True, test_needs_autoincrement=True), Column('number', String(50)), Column('status', String(20)), Column('subroom', String(5))) Table('port', metadata, Column('id', Integer, primary_key=True, test_needs_autoincrement=True), Column('name', String(30)), Column('description', String(100)), Column('jack_id', Integer, ForeignKey("jack.id"))) @classmethod def setup_mappers(cls): class Jack(cls.Basic): pass class Port(cls.Basic): pass def test_basic(self): Port, port, jack, Jack = (self.classes.Port, self.tables.port, self.tables.jack, self.classes.Jack) mapper(Port, port) mapper(Jack, jack, order_by=[jack.c.number], properties=dict( port=relationship(Port, backref='jack', uselist=False, )), ) session = create_session() j = Jack(number='101') session.add(j) p = Port(name='fa0/1') session.add(p) j.port=p session.flush() jid = j.id pid = p.id j=session.query(Jack).get(jid) p=session.query(Port).get(pid) assert p.jack is not None assert p.jack is j assert j.port is not None p.jack = None assert j.port is None session.expunge_all() j = session.query(Jack).get(jid) p = session.query(Port).get(pid) j.port=None self.assert_(p.jack is None) session.flush() session.delete(j) session.flush() SQLAlchemy-0.8.4/test/orm/test_pickled.py0000644000076500000240000004403712251150016021032 0ustar classicstaff00000000000000from sqlalchemy.testing import eq_ from sqlalchemy.util import pickle import sqlalchemy as sa from sqlalchemy import testing from sqlalchemy.testing.util import picklers from sqlalchemy.testing import assert_raises_message from sqlalchemy import Integer, String, ForeignKey, exc, MetaData from sqlalchemy.testing.schema import Table, Column from sqlalchemy.orm import mapper, relationship, create_session, \ sessionmaker, attributes, interfaces,\ clear_mappers, exc as orm_exc,\ configure_mappers, Session, lazyload_all,\ lazyload, aliased from sqlalchemy.orm.collections import attribute_mapped_collection, \ column_mapped_collection from sqlalchemy.testing import fixtures from test.orm import _fixtures from sqlalchemy.testing.pickleable import User, Address, Dingaling, Order, \ Child1, Child2, Parent, Screen, EmailUser class PickleTest(fixtures.MappedTest): @classmethod def define_tables(cls, metadata): Table('users', metadata, Column('id', Integer, primary_key=True, test_needs_autoincrement=True), Column('name', String(30), nullable=False), test_needs_acid=True, test_needs_fk=True ) Table('addresses', metadata, Column('id', Integer, primary_key=True, test_needs_autoincrement=True), Column('user_id', None, ForeignKey('users.id')), Column('email_address', String(50), nullable=False), test_needs_acid=True, test_needs_fk=True ) Table('orders', metadata, Column('id', Integer, primary_key=True, test_needs_autoincrement=True), Column('user_id', None, ForeignKey('users.id')), Column('address_id', None, ForeignKey('addresses.id')), Column('description', String(30)), Column('isopen', Integer), test_needs_acid=True, test_needs_fk=True ) Table("dingalings", metadata, Column('id', Integer, primary_key=True, test_needs_autoincrement=True), Column('address_id', None, ForeignKey('addresses.id')), Column('data', String(30)), test_needs_acid=True, test_needs_fk=True ) def test_transient(self): users, addresses = (self.tables.users, self.tables.addresses) mapper(User, users, properties={ 'addresses':relationship(Address, backref="user") }) mapper(Address, addresses) sess = create_session() u1 = User(name='ed') u1.addresses.append(Address(email_address='ed@bar.com')) u2 = pickle.loads(pickle.dumps(u1)) sess.add(u2) sess.flush() sess.expunge_all() eq_(u1, sess.query(User).get(u2.id)) def test_no_mappers(self): users = self.tables.users umapper = mapper(User, users) u1 = User(name='ed') u1_pickled = pickle.dumps(u1, -1) clear_mappers() assert_raises_message( orm_exc.UnmappedInstanceError, "Cannot deserialize object of type - no mapper()", pickle.loads, u1_pickled) def test_no_instrumentation(self): users = self.tables.users umapper = mapper(User, users) u1 = User(name='ed') u1_pickled = pickle.dumps(u1, -1) clear_mappers() umapper = mapper(User, users) u1 = pickle.loads(u1_pickled) # this fails unless the InstanceState # compiles the mapper eq_(str(u1), "User(name='ed')") def test_class_deferred_cols(self): addresses, users = (self.tables.addresses, self.tables.users) mapper(User, users, properties={ 'name': sa.orm.deferred(users.c.name), 'addresses': relationship(Address, backref="user") }) mapper(Address, addresses, properties={ 'email_address': sa.orm.deferred(addresses.c.email_address) }) sess = create_session() u1 = User(name='ed') u1.addresses.append(Address(email_address='ed@bar.com')) sess.add(u1) sess.flush() sess.expunge_all() u1 = sess.query(User).get(u1.id) assert 'name' not in u1.__dict__ assert 'addresses' not in u1.__dict__ u2 = pickle.loads(pickle.dumps(u1)) sess2 = create_session() sess2.add(u2) eq_(u2.name, 'ed') eq_(u2, User(name='ed', addresses=[Address(email_address='ed@bar.com')])) u2 = pickle.loads(pickle.dumps(u1)) sess2 = create_session() u2 = sess2.merge(u2, load=False) eq_(u2.name, 'ed') eq_(u2, User(name='ed', addresses=[Address(email_address='ed@bar.com')])) def test_instance_lazy_relation_loaders(self): users, addresses = (self.tables.users, self.tables.addresses) mapper(User, users, properties={ 'addresses': relationship(Address, lazy='noload') }) mapper(Address, addresses) sess = Session() u1 = User(name='ed', addresses=[ Address( email_address='ed@bar.com', ) ]) sess.add(u1) sess.commit() sess.close() u1 = sess.query(User).options( lazyload(User.addresses) ).first() u2 = pickle.loads(pickle.dumps(u1)) sess = Session() sess.add(u2) assert u2.addresses def test_instance_deferred_cols(self): users, addresses = (self.tables.users, self.tables.addresses) mapper(User, users, properties={ 'addresses':relationship(Address, backref="user") }) mapper(Address, addresses) sess = create_session() u1 = User(name='ed') u1.addresses.append(Address(email_address='ed@bar.com')) sess.add(u1) sess.flush() sess.expunge_all() u1 = sess.query(User).\ options(sa.orm.defer('name'), sa.orm.defer('addresses.email_address')).\ get(u1.id) assert 'name' not in u1.__dict__ assert 'addresses' not in u1.__dict__ u2 = pickle.loads(pickle.dumps(u1)) sess2 = create_session() sess2.add(u2) eq_(u2.name, 'ed') assert 'addresses' not in u2.__dict__ ad = u2.addresses[0] assert 'email_address' not in ad.__dict__ eq_(ad.email_address, 'ed@bar.com') eq_(u2, User(name='ed', addresses=[Address(email_address='ed@bar.com')])) u2 = pickle.loads(pickle.dumps(u1)) sess2 = create_session() u2 = sess2.merge(u2, load=False) eq_(u2.name, 'ed') assert 'addresses' not in u2.__dict__ ad = u2.addresses[0] # mapper options now transmit over merge(), # new as of 0.6, so email_address is deferred. assert 'email_address' not in ad.__dict__ eq_(ad.email_address, 'ed@bar.com') eq_(u2, User(name='ed', addresses=[Address(email_address='ed@bar.com')])) def test_pickle_protocols(self): users, addresses = (self.tables.users, self.tables.addresses) mapper(User, users, properties={ 'addresses':relationship(Address, backref="user") }) mapper(Address, addresses) sess = sessionmaker()() u1 = User(name='ed') u1.addresses.append(Address(email_address='ed@bar.com')) sess.add(u1) sess.commit() u1 = sess.query(User).first() u1.addresses for loads, dumps in picklers(): u2 = loads(dumps(u1)) eq_(u1, u2) def test_options_with_descriptors(self): users, addresses, dingalings = (self.tables.users, self.tables.addresses, self.tables.dingalings) mapper(User, users, properties={ 'addresses':relationship(Address, backref="user") }) mapper(Address, addresses, properties={ 'dingaling':relationship(Dingaling) }) mapper(Dingaling, dingalings) sess = create_session() u1 = User(name='ed') u1.addresses.append(Address(email_address='ed@bar.com')) sess.add(u1) sess.flush() sess.expunge_all() for opt in [ sa.orm.joinedload(User.addresses), sa.orm.joinedload("addresses"), sa.orm.defer("name"), sa.orm.defer(User.name), sa.orm.joinedload("addresses", Address.dingaling), ]: opt2 = pickle.loads(pickle.dumps(opt)) eq_(opt.key, opt2.key) u1 = sess.query(User).options(opt).first() u2 = pickle.loads(pickle.dumps(u1)) def test_collection_setstate(self): """test a particular cycle that requires CollectionAdapter to not rely upon InstanceState to deserialize.""" m = MetaData() c1 = Table('c1', m, Column('parent_id', String, ForeignKey('p.id'), primary_key=True) ) c2 = Table('c2', m, Column('parent_id', String, ForeignKey('p.id'), primary_key=True) ) p = Table('p', m, Column('id', String, primary_key=True) ) mapper(Parent, p, properties={ 'children1':relationship(Child1), 'children2':relationship(Child2) }) mapper(Child1, c1) mapper(Child2, c2) obj = Parent() screen1 = Screen(obj) screen1.errors = [obj.children1, obj.children2] screen2 = Screen(Child2(), screen1) pickle.loads(pickle.dumps(screen2)) def test_exceptions(self): class Foo(object): pass users = self.tables.users mapper(User, users) for sa_exc in ( orm_exc.UnmappedInstanceError(Foo()), orm_exc.UnmappedClassError(Foo), orm_exc.ObjectDeletedError(attributes.instance_state(User())), ): for loads, dumps in picklers(): repickled = loads(dumps(sa_exc)) eq_(repickled.args[0], sa_exc.args[0]) def test_attribute_mapped_collection(self): users, addresses = self.tables.users, self.tables.addresses mapper(User, users, properties={ 'addresses':relationship( Address, collection_class= attribute_mapped_collection('email_address') ) }) mapper(Address, addresses) u1 = User() u1.addresses = {"email1":Address(email_address="email1")} for loads, dumps in picklers(): repickled = loads(dumps(u1)) eq_(u1.addresses, repickled.addresses) eq_(repickled.addresses['email1'], Address(email_address="email1")) def test_column_mapped_collection(self): users, addresses = self.tables.users, self.tables.addresses mapper(User, users, properties={ 'addresses':relationship( Address, collection_class= column_mapped_collection( addresses.c.email_address) ) }) mapper(Address, addresses) u1 = User() u1.addresses = { "email1":Address(email_address="email1"), "email2":Address(email_address="email2") } for loads, dumps in picklers(): repickled = loads(dumps(u1)) eq_(u1.addresses, repickled.addresses) eq_(repickled.addresses['email1'], Address(email_address="email1")) def test_composite_column_mapped_collection(self): users, addresses = self.tables.users, self.tables.addresses mapper(User, users, properties={ 'addresses':relationship( Address, collection_class= column_mapped_collection([ addresses.c.id, addresses.c.email_address]) ) }) mapper(Address, addresses) u1 = User() u1.addresses = { (1, "email1"):Address(id=1, email_address="email1"), (2, "email2"):Address(id=2, email_address="email2") } for loads, dumps in picklers(): repickled = loads(dumps(u1)) eq_(u1.addresses, repickled.addresses) eq_(repickled.addresses[(1, 'email1')], Address(id=1, email_address="email1")) class PolymorphicDeferredTest(fixtures.MappedTest): @classmethod def define_tables(cls, metadata): Table('users', metadata, Column('id', Integer, primary_key=True, test_needs_autoincrement=True), Column('name', String(30)), Column('type', String(30))) Table('email_users', metadata, Column('id', Integer, ForeignKey('users.id'), primary_key=True), Column('email_address', String(30))) def test_polymorphic_deferred(self): email_users, users = (self.tables.email_users, self.tables.users, ) mapper(User, users, polymorphic_identity='user', polymorphic_on=users.c.type) mapper(EmailUser, email_users, inherits=User, polymorphic_identity='emailuser') eu = EmailUser(name="user1", email_address='foo@bar.com') sess = create_session() sess.add(eu) sess.flush() sess.expunge_all() eu = sess.query(User).first() eu2 = pickle.loads(pickle.dumps(eu)) sess2 = create_session() sess2.add(eu2) assert 'email_address' not in eu2.__dict__ eq_(eu2.email_address, 'foo@bar.com') class TupleLabelTest(_fixtures.FixtureTest): @classmethod def setup_classes(cls): pass @classmethod def setup_mappers(cls): users, addresses, orders = cls.tables.users, cls.tables.addresses, cls.tables.orders mapper(User, users, properties={ 'addresses':relationship(Address, backref='user', order_by=addresses.c.id), 'orders':relationship(Order, backref='user', order_by=orders.c.id), # o2m, m2o }) mapper(Address, addresses) mapper(Order, orders, properties={ 'address':relationship(Address), # m2o }) def test_tuple_labeling(self): users = self.tables.users sess = create_session() # test pickle + all the protocols ! for pickled in False, -1, 0, 1, 2: for row in sess.query(User, Address).join(User.addresses).all(): if pickled is not False: row = pickle.loads(pickle.dumps(row, pickled)) eq_(row.keys(), ['User', 'Address']) eq_(row.User, row[0]) eq_(row.Address, row[1]) for row in sess.query(User.name, User.id.label('foobar')): if pickled is not False: row = pickle.loads(pickle.dumps(row, pickled)) eq_(row.keys(), ['name', 'foobar']) eq_(row.name, row[0]) eq_(row.foobar, row[1]) for row in sess.query(User).values(User.name, User.id.label('foobar')): if pickled is not False: row = pickle.loads(pickle.dumps(row, pickled)) eq_(row.keys(), ['name', 'foobar']) eq_(row.name, row[0]) eq_(row.foobar, row[1]) oalias = aliased(Order) for row in sess.query(User, oalias).join(User.orders).all(): if pickled is not False: row = pickle.loads(pickle.dumps(row, pickled)) eq_(row.keys(), ['User']) eq_(row.User, row[0]) oalias = aliased(Order, name='orders') for row in sess.query(User, oalias).join(oalias, User.orders).all(): if pickled is not False: row = pickle.loads(pickle.dumps(row, pickled)) eq_(row.keys(), ['User', 'orders']) eq_(row.User, row[0]) eq_(row.orders, row[1]) # test here that first col is not labeled, only # one name in keys, matches correctly for row in sess.query(User.name + 'hoho', User.name): eq_(row.keys(), ['name']) eq_(row[0], row.name + 'hoho') if pickled is not False: ret = sess.query(User, Address).join(User.addresses).all() pickle.loads(pickle.dumps(ret, pickled)) class CustomSetupTeardownTest(fixtures.MappedTest): @classmethod def define_tables(cls, metadata): Table('users', metadata, Column('id', Integer, primary_key=True, test_needs_autoincrement=True), Column('name', String(30), nullable=False), test_needs_acid=True, test_needs_fk=True ) Table('addresses', metadata, Column('id', Integer, primary_key=True, test_needs_autoincrement=True), Column('user_id', None, ForeignKey('users.id')), Column('email_address', String(50), nullable=False), test_needs_acid=True, test_needs_fk=True ) def test_rebuild_state(self): """not much of a 'test', but illustrate how to remove instance-level state before pickling. """ users = self.tables.users mapper(User, users) u1 = User() attributes.manager_of_class(User).teardown_instance(u1) assert not u1.__dict__ u2 = pickle.loads(pickle.dumps(u1)) attributes.manager_of_class(User).setup_instance(u2) assert attributes.instance_state(u2) SQLAlchemy-0.8.4/test/orm/test_query.py0000644000076500000240000032314512251150016020564 0ustar classicstaff00000000000000import operator from sqlalchemy import MetaData, null, exists, text, union, literal, \ literal_column, func, between, Unicode, desc, and_, bindparam, \ select, distinct, or_, collate, insert from sqlalchemy import inspect from sqlalchemy import exc as sa_exc, util from sqlalchemy.sql import compiler, table, column from sqlalchemy.sql import expression from sqlalchemy.engine import default from sqlalchemy.orm import attributes, mapper, relationship, backref, \ configure_mappers, create_session, synonym, Session, class_mapper, \ aliased, column_property, joinedload_all, joinedload, Query,\ util as orm_util from sqlalchemy.testing.assertsql import CompiledSQL from sqlalchemy.testing.schema import Table, Column import sqlalchemy as sa from sqlalchemy import testing from sqlalchemy.testing.assertions import eq_, assert_raises, assert_raises_message from sqlalchemy.testing import AssertsCompiledSQL from test.orm import _fixtures from sqlalchemy.testing import fixtures, engines from sqlalchemy.orm.util import join, outerjoin, with_parent class QueryTest(_fixtures.FixtureTest): run_setup_mappers = 'once' run_inserts = 'once' run_deletes = None @classmethod def setup_mappers(cls): cls._setup_stock_mapping() class MiscTest(QueryTest): run_create_tables = None run_inserts = None def test_with_session(self): User = self.classes.User s1 = Session() s2 = Session() q1 = s1.query(User) q2 = q1.with_session(s2) assert q2.session is s2 assert q1.session is s1 class RowTupleTest(QueryTest): run_setup_mappers = None def test_custom_names(self): User, users = self.classes.User, self.tables.users mapper(User, users, properties={ 'uname': users.c.name }) row = create_session().\ query(User.id, User.uname).\ filter(User.id == 7).first() assert row.id == 7 assert row.uname == 'jack' def test_column_metadata(self): users, Address, addresses, User = (self.tables.users, self.classes.Address, self.tables.addresses, self.classes.User) mapper(User, users) mapper(Address, addresses) sess = create_session() user_alias = aliased(User) address_alias = aliased(Address, name='aalias') fn = func.count(User.id) name_label = User.name.label('uname') for q, asserted in [ ( sess.query(User), [{'name':'User', 'type':User, 'aliased':False, 'expr':User}] ), ( sess.query(User.id, User), [ {'name':'id', 'type':users.c.id.type, 'aliased':False, 'expr':User.id}, {'name':'User', 'type':User, 'aliased':False, 'expr':User} ] ), ( sess.query(User.id, user_alias), [ {'name':'id', 'type':users.c.id.type, 'aliased':False, 'expr':User.id}, {'name':None, 'type':User, 'aliased':True, 'expr':user_alias} ] ), ( sess.query(address_alias), [ {'name':'aalias', 'type':Address, 'aliased':True, 'expr':address_alias} ] ), ( sess.query(name_label, fn), [ {'name':'uname', 'type':users.c.name.type, 'aliased':False, 'expr':name_label}, {'name':None, 'type':fn.type, 'aliased':False, 'expr':fn }, ] ) ]: eq_( q.column_descriptions, asserted ) def test_unhashable_type(self): from sqlalchemy.types import TypeDecorator, Integer from sqlalchemy.sql import type_coerce class MyType(TypeDecorator): impl = Integer hashable = False def process_result_value(self, value, dialect): return [value] User, users = self.classes.User, self.tables.users mapper(User, users) s = Session() q = s.\ query(User, type_coerce(users.c.id, MyType).label('foo')).\ filter(User.id == 7) row = q.first() eq_( row, (User(id=7), [7]) ) class RawSelectTest(QueryTest, AssertsCompiledSQL): __dialect__ = 'default' def test_select_from_entity(self): User = self.classes.User self.assert_compile( select(['*']).select_from(User), "SELECT * FROM users" ) def test_where_relationship(self): User = self.classes.User self.assert_compile( select([User]).where(User.addresses), "SELECT users.id, users.name FROM users, addresses " "WHERE users.id = addresses.user_id" ) def test_where_m2m_relationship(self): Item = self.classes.Item self.assert_compile( select([Item]).where(Item.keywords), "SELECT items.id, items.description FROM items, " "item_keywords AS item_keywords_1, keywords " "WHERE items.id = item_keywords_1.item_id " "AND keywords.id = item_keywords_1.keyword_id" ) def test_inline_select_from_entity(self): User = self.classes.User self.assert_compile( select(['*'], from_obj=User), "SELECT * FROM users" ) def test_select_from_aliased_entity(self): User = self.classes.User ua = aliased(User, name="ua") self.assert_compile( select(['*']).select_from(ua), "SELECT * FROM users AS ua" ) def test_correlate_entity(self): User = self.classes.User Address = self.classes.Address self.assert_compile( select([User.name, Address.id, select([func.count(Address.id)]).\ where(User.id == Address.user_id).\ correlate(User).as_scalar() ]), "SELECT users.name, addresses.id, " "(SELECT count(addresses.id) AS count_1 " "FROM addresses WHERE users.id = addresses.user_id) AS anon_1 " "FROM users, addresses" ) def test_correlate_aliased_entity(self): User = self.classes.User Address = self.classes.Address uu = aliased(User, name="uu") self.assert_compile( select([uu.name, Address.id, select([func.count(Address.id)]).\ where(uu.id == Address.user_id).\ correlate(uu).as_scalar() ]), # curious, "address.user_id = uu.id" is reversed here "SELECT uu.name, addresses.id, " "(SELECT count(addresses.id) AS count_1 " "FROM addresses WHERE addresses.user_id = uu.id) AS anon_1 " "FROM users AS uu, addresses" ) def test_columns_clause_entity(self): User = self.classes.User self.assert_compile( select([User]), "SELECT users.id, users.name FROM users" ) def test_columns_clause_columns(self): User = self.classes.User self.assert_compile( select([User.id, User.name]), "SELECT users.id, users.name FROM users" ) def test_columns_clause_aliased_columns(self): User = self.classes.User ua = aliased(User, name='ua') self.assert_compile( select([ua.id, ua.name]), "SELECT ua.id, ua.name FROM users AS ua" ) def test_columns_clause_aliased_entity(self): User = self.classes.User ua = aliased(User, name='ua') self.assert_compile( select([ua]), "SELECT ua.id, ua.name FROM users AS ua" ) def test_core_join(self): User = self.classes.User Address = self.classes.Address from sqlalchemy.sql import join self.assert_compile( select([User]).select_from(join(User, Address)), "SELECT users.id, users.name FROM users " "JOIN addresses ON users.id = addresses.user_id" ) def test_insert_from_query(self): User = self.classes.User Address = self.classes.Address s = Session() q = s.query(User.id, User.name).filter_by(name='ed') self.assert_compile( insert(Address).from_select(('id', 'email_address'), q), "INSERT INTO addresses (id, email_address) " "SELECT users.id AS users_id, users.name AS users_name " "FROM users WHERE users.name = :name_1" ) def test_insert_from_query_col_attr(self): User = self.classes.User Address = self.classes.Address s = Session() q = s.query(User.id, User.name).filter_by(name='ed') self.assert_compile( insert(Address).from_select( (Address.id, Address.email_address), q), "INSERT INTO addresses (id, email_address) " "SELECT users.id AS users_id, users.name AS users_name " "FROM users WHERE users.name = :name_1" ) def test_update_from_entity(self): from sqlalchemy.sql import update User = self.classes.User self.assert_compile( update(User), "UPDATE users SET id=:id, name=:name" ) self.assert_compile( update(User).values(name='ed').where(User.id == 5), "UPDATE users SET name=:name WHERE users.id = :id_1", checkparams={"id_1": 5, "name": "ed"} ) def test_delete_from_entity(self): from sqlalchemy.sql import delete User = self.classes.User self.assert_compile( delete(User), "DELETE FROM users" ) self.assert_compile( delete(User).where(User.id == 5), "DELETE FROM users WHERE users.id = :id_1", checkparams={"id_1": 5} ) def test_insert_from_entity(self): from sqlalchemy.sql import insert User = self.classes.User self.assert_compile( insert(User), "INSERT INTO users (id, name) VALUES (:id, :name)" ) self.assert_compile( insert(User).values(name="ed"), "INSERT INTO users (name) VALUES (:name)", checkparams={"name": "ed"} ) class GetTest(QueryTest): def test_get(self): User = self.classes.User s = create_session() assert s.query(User).get(19) is None u = s.query(User).get(7) u2 = s.query(User).get(7) assert u is u2 s.expunge_all() u2 = s.query(User).get(7) assert u is not u2 def test_get_composite_pk_no_result(self): CompositePk = self.classes.CompositePk s = Session() assert s.query(CompositePk).get((100,100)) is None def test_get_composite_pk_result(self): CompositePk = self.classes.CompositePk s = Session() one_two = s.query(CompositePk).get((1,2)) assert one_two.i == 1 assert one_two.j == 2 assert one_two.k == 3 def test_get_too_few_params(self): CompositePk = self.classes.CompositePk s = Session() q = s.query(CompositePk) assert_raises(sa_exc.InvalidRequestError, q.get, 7) def test_get_too_few_params_tuple(self): CompositePk = self.classes.CompositePk s = Session() q = s.query(CompositePk) assert_raises(sa_exc.InvalidRequestError, q.get, (7,)) def test_get_too_many_params(self): CompositePk = self.classes.CompositePk s = Session() q = s.query(CompositePk) assert_raises(sa_exc.InvalidRequestError, q.get, (7, 10, 100)) def test_get_against_col(self): User = self.classes.User s = Session() q = s.query(User.id) assert_raises(sa_exc.InvalidRequestError, q.get, (5, )) def test_get_null_pk(self): """test that a mapping which can have None in a PK (i.e. map to an outerjoin) works with get().""" users, addresses = self.tables.users, self.tables.addresses s = users.outerjoin(addresses) class UserThing(fixtures.ComparableEntity): pass mapper(UserThing, s, properties={ 'id':(users.c.id, addresses.c.user_id), 'address_id':addresses.c.id, }) sess = create_session() u10 = sess.query(UserThing).get((10, None)) eq_(u10, UserThing(id=10) ) def test_no_criterion(self): """test that get()/load() does not use preexisting filter/etc. criterion""" User, Address = self.classes.User, self.classes.Address s = create_session() q = s.query(User).join('addresses').filter(Address.user_id==8) assert_raises(sa_exc.InvalidRequestError, q.get, 7) assert_raises(sa_exc.InvalidRequestError, s.query(User).filter(User.id==7).get, 19) # order_by()/get() doesn't raise s.query(User).order_by(User.id).get(8) def test_unique_param_names(self): users = self.tables.users class SomeUser(object): pass s = users.select(users.c.id!=12).alias('users') m = mapper(SomeUser, s) assert s.primary_key == m.primary_key sess = create_session() assert sess.query(SomeUser).get(7).name == 'jack' def test_load(self): User, Address = self.classes.User, self.classes.Address s = create_session() assert s.query(User).populate_existing().get(19) is None u = s.query(User).populate_existing().get(7) u2 = s.query(User).populate_existing().get(7) assert u is u2 s.expunge_all() u2 = s.query(User).populate_existing().get(7) assert u is not u2 u2.name = 'some name' a = Address(email_address='some other name') u2.addresses.append(a) assert u2 in s.dirty assert a in u2.addresses s.query(User).populate_existing().get(7) assert u2 not in s.dirty assert u2.name =='jack' assert a not in u2.addresses @testing.requires.unicode_connections def test_unicode(self): """test that Query.get properly sets up the type for the bind parameter. using unicode would normally fail on postgresql, mysql and oracle unless it is converted to an encoded string""" metadata = MetaData(engines.utf8_engine()) table = Table('unicode_data', metadata, Column('id', Unicode(40), primary_key=True, test_needs_autoincrement=True), Column('data', Unicode(40))) try: metadata.create_all() # Py3K #ustring = b'petit voix m\xe2\x80\x99a'.decode('utf-8') # Py2K ustring = 'petit voix m\xe2\x80\x99a'.decode('utf-8') # end Py2K table.insert().execute(id=ustring, data=ustring) class LocalFoo(self.classes.Base): pass mapper(LocalFoo, table) eq_(create_session().query(LocalFoo).get(ustring), LocalFoo(id=ustring, data=ustring)) finally: metadata.drop_all() def test_populate_existing(self): User, Address = self.classes.User, self.classes.Address s = create_session() userlist = s.query(User).all() u = userlist[0] u.name = 'foo' a = Address(name='ed') u.addresses.append(a) self.assert_(a in u.addresses) s.query(User).populate_existing().all() self.assert_(u not in s.dirty) self.assert_(u.name == 'jack') self.assert_(a not in u.addresses) u.addresses[0].email_address = 'lala' u.orders[1].items[2].description = 'item 12' # test that lazy load doesnt change child items s.query(User).populate_existing().all() assert u.addresses[0].email_address == 'lala' assert u.orders[1].items[2].description == 'item 12' # eager load does s.query(User).options(joinedload('addresses'), joinedload_all('orders.items')).populate_existing().all() assert u.addresses[0].email_address == 'jack@bean.com' assert u.orders[1].items[2].description == 'item 5' class InvalidGenerationsTest(QueryTest, AssertsCompiledSQL): def test_no_limit_offset(self): User = self.classes.User s = create_session() for q in ( s.query(User).limit(2), s.query(User).offset(2), s.query(User).limit(2).offset(2) ): assert_raises(sa_exc.InvalidRequestError, q.join, "addresses") assert_raises(sa_exc.InvalidRequestError, q.filter, User.name=='ed') assert_raises(sa_exc.InvalidRequestError, q.filter_by, name='ed') assert_raises(sa_exc.InvalidRequestError, q.order_by, 'foo') assert_raises(sa_exc.InvalidRequestError, q.group_by, 'foo') assert_raises(sa_exc.InvalidRequestError, q.having, 'foo') q.enable_assertions(False).join("addresses") q.enable_assertions(False).filter(User.name=='ed') q.enable_assertions(False).order_by('foo') q.enable_assertions(False).group_by('foo') def test_no_from(self): users, User = self.tables.users, self.classes.User s = create_session() q = s.query(User).select_from(users) assert_raises(sa_exc.InvalidRequestError, q.select_from, users) q = s.query(User).join('addresses') assert_raises(sa_exc.InvalidRequestError, q.select_from, users) q = s.query(User).order_by(User.id) assert_raises(sa_exc.InvalidRequestError, q.select_from, users) assert_raises(sa_exc.InvalidRequestError, q.select_from, users) q.enable_assertions(False).select_from(users) # this is fine, however q.from_self() def test_invalid_select_from(self): User = self.classes.User s = create_session() q = s.query(User) assert_raises(sa_exc.ArgumentError, q.select_from, User.id==5) assert_raises(sa_exc.ArgumentError, q.select_from, User.id) def test_invalid_from_statement(self): User, addresses, users = (self.classes.User, self.tables.addresses, self.tables.users) s = create_session() q = s.query(User) assert_raises(sa_exc.ArgumentError, q.from_statement, User.id==5) assert_raises(sa_exc.ArgumentError, q.from_statement, users.join(addresses)) def test_invalid_column(self): User = self.classes.User s = create_session() q = s.query(User) assert_raises(sa_exc.InvalidRequestError, q.add_column, object()) def test_invalid_column_tuple(self): User = self.classes.User s = create_session() q = s.query(User) assert_raises(sa_exc.InvalidRequestError, q.add_column, (1, 1)) def test_distinct(self): """test that a distinct() call is not valid before 'clauseelement' conditions.""" User = self.classes.User s = create_session() q = s.query(User).distinct() assert_raises(sa_exc.InvalidRequestError, q.select_from, User) assert_raises(sa_exc.InvalidRequestError, q.from_statement, text("select * from table")) assert_raises(sa_exc.InvalidRequestError, q.with_polymorphic, User) def test_order_by(self): """test that an order_by() call is not valid before 'clauseelement' conditions.""" User = self.classes.User s = create_session() q = s.query(User).order_by(User.id) assert_raises(sa_exc.InvalidRequestError, q.select_from, User) assert_raises(sa_exc.InvalidRequestError, q.from_statement, text("select * from table")) assert_raises(sa_exc.InvalidRequestError, q.with_polymorphic, User) def test_cancel_order_by(self): User = self.classes.User s = create_session() q = s.query(User).order_by(User.id) self.assert_compile(q, "SELECT users.id AS users_id, users.name AS users_name FROM users ORDER BY users.id", use_default_dialect=True) assert_raises(sa_exc.InvalidRequestError, q._no_select_modifiers, "foo") q = q.order_by(None) self.assert_compile(q, "SELECT users.id AS users_id, users.name AS users_name FROM users", use_default_dialect=True) assert_raises(sa_exc.InvalidRequestError, q._no_select_modifiers, "foo") q = q.order_by(False) self.assert_compile(q, "SELECT users.id AS users_id, users.name AS users_name FROM users", use_default_dialect=True) # after False was set, this should pass q._no_select_modifiers("foo") def test_mapper_zero(self): User, Address = self.classes.User, self.classes.Address s = create_session() q = s.query(User, Address) assert_raises(sa_exc.InvalidRequestError, q.get, 5) def test_from_statement(self): User = self.classes.User s = create_session() for meth, arg, kw in [ (Query.filter, (User.id==5,), {}), (Query.filter_by, (), {'id':5}), (Query.limit, (5, ), {}), (Query.group_by, (User.name,), {}), (Query.order_by, (User.name,), {}) ]: q = s.query(User) q = meth(q, *arg, **kw) assert_raises( sa_exc.InvalidRequestError, q.from_statement, "x" ) q = s.query(User) q = q.from_statement("x") assert_raises( sa_exc.InvalidRequestError, meth, q, *arg, **kw ) class OperatorTest(QueryTest, AssertsCompiledSQL): """test sql.Comparator implementation for MapperProperties""" __dialect__ = 'default' def _test(self, clause, expected, entity=None): dialect = default.DefaultDialect() if entity is not None: # specify a lead entity, so that when we are testing # correlation, the correlation actually happens sess = Session() lead = sess.query(entity) context = lead._compile_context() context.statement.use_labels = True lead = context.statement.compile(dialect=dialect) expected = (str(lead) + " WHERE " + expected).replace("\n", "") clause = sess.query(entity).filter(clause) self.assert_compile(clause, expected) def test_arithmetic(self): User = self.classes.User create_session().query(User) for (py_op, sql_op) in ((operator.add, '+'), (operator.mul, '*'), (operator.sub, '-'), # Py3k #(operator.truediv, '/'), # Py2K (operator.div, '/'), # end Py2K ): for (lhs, rhs, res) in ( (5, User.id, ':id_1 %s users.id'), (5, literal(6), ':param_1 %s :param_2'), (User.id, 5, 'users.id %s :id_1'), (User.id, literal('b'), 'users.id %s :param_1'), (User.id, User.id, 'users.id %s users.id'), (literal(5), 'b', ':param_1 %s :param_2'), (literal(5), User.id, ':param_1 %s users.id'), (literal(5), literal(6), ':param_1 %s :param_2'), ): self._test(py_op(lhs, rhs), res % sql_op) def test_comparison(self): User = self.classes.User create_session().query(User) ualias = aliased(User) for (py_op, fwd_op, rev_op) in ((operator.lt, '<', '>'), (operator.gt, '>', '<'), (operator.eq, '=', '='), (operator.ne, '!=', '!='), (operator.le, '<=', '>='), (operator.ge, '>=', '<=')): for (lhs, rhs, l_sql, r_sql) in ( ('a', User.id, ':id_1', 'users.id'), ('a', literal('b'), ':param_2', ':param_1'), # note swap! (User.id, 'b', 'users.id', ':id_1'), (User.id, literal('b'), 'users.id', ':param_1'), (User.id, User.id, 'users.id', 'users.id'), (literal('a'), 'b', ':param_1', ':param_2'), (literal('a'), User.id, ':param_1', 'users.id'), (literal('a'), literal('b'), ':param_1', ':param_2'), (ualias.id, literal('b'), 'users_1.id', ':param_1'), (User.id, ualias.name, 'users.id', 'users_1.name'), (User.name, ualias.name, 'users.name', 'users_1.name'), (ualias.name, User.name, 'users_1.name', 'users.name'), ): # the compiled clause should match either (e.g.): # 'a' < 'b' -or- 'b' > 'a'. compiled = str(py_op(lhs, rhs).compile(dialect=default.DefaultDialect())) fwd_sql = "%s %s %s" % (l_sql, fwd_op, r_sql) rev_sql = "%s %s %s" % (r_sql, rev_op, l_sql) self.assert_(compiled == fwd_sql or compiled == rev_sql, "\n'" + compiled + "'\n does not match\n'" + fwd_sql + "'\n or\n'" + rev_sql + "'") def test_negated_null(self): User, Address = self.classes.User, self.classes.Address self._test(User.id == None, "users.id IS NULL") self._test(~(User.id==None), "users.id IS NOT NULL") self._test(None == User.id, "users.id IS NULL") self._test(~(None == User.id), "users.id IS NOT NULL") self._test(Address.user == None, "addresses.user_id IS NULL") self._test(~(Address.user==None), "addresses.user_id IS NOT NULL") self._test(None == Address.user, "addresses.user_id IS NULL") self._test(~(None == Address.user), "addresses.user_id IS NOT NULL") def test_relationship_unimplemented(self): User, Address = self.classes.User, self.classes.Address for op in [ User.addresses.like, User.addresses.ilike, User.addresses.__le__, User.addresses.__gt__, ]: assert_raises(NotImplementedError, op, "x") def test_relationship(self): User, Address = self.classes.User, self.classes.Address self._test(User.addresses.any(Address.id==17), "EXISTS (SELECT 1 " "FROM addresses " "WHERE users.id = addresses.user_id AND addresses.id = :id_1)", entity=User ) u7 = User(id=7) attributes.instance_state(u7)._commit_all(attributes.instance_dict(u7)) self._test(Address.user == u7, ":param_1 = addresses.user_id") self._test(Address.user != u7, "addresses.user_id != :user_id_1 OR addresses.user_id IS NULL") self._test(Address.user == None, "addresses.user_id IS NULL") self._test(Address.user != None, "addresses.user_id IS NOT NULL") def test_selfref_relationship(self): Node = self.classes.Node nalias = aliased(Node) # auto self-referential aliasing self._test( Node.children.any(Node.data=='n1'), "EXISTS (SELECT 1 FROM nodes AS nodes_1 WHERE " "nodes.id = nodes_1.parent_id AND nodes_1.data = :data_1)", entity=Node ) # needs autoaliasing self._test( Node.children == None, "NOT (EXISTS (SELECT 1 FROM nodes AS nodes_1 " "WHERE nodes.id = nodes_1.parent_id))", entity=Node ) self._test( Node.parent == None, "nodes.parent_id IS NULL" ) self._test( nalias.parent == None, "nodes_1.parent_id IS NULL" ) self._test( nalias.children == None, "NOT (EXISTS (SELECT 1 FROM nodes WHERE nodes_1.id = nodes.parent_id))", entity=nalias ) self._test( nalias.children.any(Node.data=='some data'), "EXISTS (SELECT 1 FROM nodes WHERE " "nodes_1.id = nodes.parent_id AND nodes.data = :data_1)", entity=nalias) # this fails because self-referential any() is auto-aliasing; # the fact that we use "nalias" here means we get two aliases. #self._test( # Node.children.any(nalias.data == 'some data'), # "EXISTS (SELECT 1 FROM nodes AS nodes_1 WHERE " # "nodes.id = nodes_1.parent_id AND nodes_1.data = :data_1)", # entity=Node # ) self._test( nalias.parent.has(Node.data == 'some data'), "EXISTS (SELECT 1 FROM nodes WHERE nodes.id = nodes_1.parent_id " "AND nodes.data = :data_1)", entity=nalias ) self._test( Node.parent.has(Node.data == 'some data'), "EXISTS (SELECT 1 FROM nodes AS nodes_1 WHERE " "nodes_1.id = nodes.parent_id AND nodes_1.data = :data_1)", entity=Node ) self._test( Node.parent == Node(id=7), ":param_1 = nodes.parent_id" ) self._test( nalias.parent == Node(id=7), ":param_1 = nodes_1.parent_id" ) self._test( nalias.parent != Node(id=7), 'nodes_1.parent_id != :parent_id_1 OR nodes_1.parent_id IS NULL' ) self._test( nalias.children.contains(Node(id=7)), "nodes_1.id = :param_1" ) def test_multilevel_any(self): User, Address, Dingaling = \ self.classes.User, self.classes.Address, self.classes.Dingaling sess = Session() q = sess.query(User).filter( User.addresses.any( and_(Address.id == Dingaling.address_id, Dingaling.data == 'x'))) # new since #2746 - correlate_except() now takes context into account # so its usage in any() is not as disrupting. self.assert_compile(q, "SELECT users.id AS users_id, users.name AS users_name " "FROM users " "WHERE EXISTS (SELECT 1 " "FROM addresses, dingalings " "WHERE users.id = addresses.user_id AND " "addresses.id = dingalings.address_id AND " "dingalings.data = :data_1)" ) def test_op(self): User = self.classes.User self._test(User.name.op('ilike')('17'), "users.name ilike :name_1") def test_in(self): User = self.classes.User self._test(User.id.in_(['a', 'b']), "users.id IN (:id_1, :id_2)") def test_in_on_relationship_not_supported(self): User, Address = self.classes.User, self.classes.Address assert_raises(NotImplementedError, Address.user.in_, [User(id=5)]) def test_neg(self): User = self.classes.User self._test(-User.id, "-users.id") self._test(User.id + -User.id, "users.id + -users.id") def test_between(self): User = self.classes.User self._test(User.id.between('a', 'b'), "users.id BETWEEN :id_1 AND :id_2") def test_collate(self): User = self.classes.User self._test(collate(User.id, 'binary'), "users.id COLLATE binary") self._test(User.id.collate('binary'), "users.id COLLATE binary") def test_selfref_between(self): User = self.classes.User ualias = aliased(User) self._test(User.id.between(ualias.id, ualias.id), "users.id BETWEEN users_1.id AND users_1.id") self._test(ualias.id.between(User.id, User.id), "users_1.id BETWEEN users.id AND users.id") def test_clauses(self): User, Address = self.classes.User, self.classes.Address for (expr, compare) in ( (func.max(User.id), "max(users.id)"), (User.id.desc(), "users.id DESC"), (between(5, User.id, Address.id), ":param_1 BETWEEN users.id AND addresses.id"), # this one would require adding compile() to InstrumentedScalarAttribute. do we want this ? #(User.id, "users.id") ): c = expr.compile(dialect=default.DefaultDialect()) assert str(c) == compare, "%s != %s" % (str(c), compare) class ExpressionTest(QueryTest, AssertsCompiledSQL): __dialect__ = 'default' def test_deferred_instances(self): User, addresses, Address = (self.classes.User, self.tables.addresses, self.classes.Address) session = create_session() s = session.query(User).filter(and_(addresses.c.email_address == bindparam('emailad'), Address.user_id==User.id)).statement l = list(session.query(User).instances(s.execute(emailad = 'jack@bean.com'))) eq_([User(id=7)], l) def test_aliased_sql_construct(self): User, Address = self.classes.User, self.classes.Address j = join(User, Address) a1 = aliased(j) self.assert_compile( a1.select(), "SELECT anon_1.users_id, anon_1.users_name, anon_1.addresses_id, " "anon_1.addresses_user_id, anon_1.addresses_email_address " "FROM (SELECT users.id AS users_id, users.name AS users_name, " "addresses.id AS addresses_id, addresses.user_id AS " "addresses_user_id, addresses.email_address AS " "addresses_email_address FROM users JOIN addresses " "ON users.id = addresses.user_id) AS anon_1" ) def test_aliased_sql_construct_raises_adapt_on_names(self): User, Address = self.classes.User, self.classes.Address j = join(User, Address) assert_raises_message( sa_exc.ArgumentError, "adapt_on_names only applies to ORM elements", aliased, j, adapt_on_names=True ) def test_scalar_subquery_compile_whereclause(self): User = self.classes.User Address = self.classes.Address session = create_session() q = session.query(User.id).filter(User.id==7) q = session.query(Address).filter(Address.user_id==q) assert isinstance(q._criterion.right, expression.ColumnElement) self.assert_compile( q, "SELECT addresses.id AS addresses_id, addresses.user_id " "AS addresses_user_id, addresses.email_address AS " "addresses_email_address FROM addresses WHERE " "addresses.user_id = (SELECT users.id AS users_id " "FROM users WHERE users.id = :id_1)" ) def test_named_subquery(self): User = self.classes.User session = create_session() a1 = session.query(User.id).filter(User.id==7).subquery('foo1') a2 = session.query(User.id).filter(User.id==7).subquery(name='foo2') a3 = session.query(User.id).filter(User.id==7).subquery() eq_(a1.name, 'foo1') eq_(a2.name, 'foo2') eq_(a3.name, '%%(%d anon)s' % id(a3)) def test_labeled_subquery(self): User = self.classes.User session = create_session() a1 = session.query(User.id).filter(User.id == 7).subquery(with_labels=True) assert a1.c.users_id is not None def test_reduced_subquery(self): User = self.classes.User ua = aliased(User) session = create_session() a1 = session.query(User.id, ua.id, ua.name).\ filter(User.id == ua.id).subquery(reduce_columns=True) self.assert_compile(a1, "SELECT users.id, users_1.name FROM " "users, users AS users_1 WHERE users.id = users_1.id") def test_label(self): User = self.classes.User session = create_session() q = session.query(User.id).filter(User.id==7).label('foo') self.assert_compile( session.query(q), "SELECT (SELECT users.id FROM users WHERE users.id = :id_1) AS foo" ) def test_as_scalar(self): User = self.classes.User session = create_session() q = session.query(User.id).filter(User.id==7).as_scalar() self.assert_compile(session.query(User).filter(User.id.in_(q)), 'SELECT users.id AS users_id, users.name ' 'AS users_name FROM users WHERE users.id ' 'IN (SELECT users.id FROM users WHERE ' 'users.id = :id_1)') def test_param_transfer(self): User = self.classes.User session = create_session() q = session.query(User.id).filter(User.id == bindparam('foo')).\ params(foo=7).subquery() q = session.query(User).filter(User.id.in_(q)) eq_(User(id=7), q.one()) def test_in(self): User, Address = self.classes.User, self.classes.Address session = create_session() s = session.query(User.id).join(User.addresses).group_by(User.id).\ having(func.count(Address.id) > 2) eq_( session.query(User).filter(User.id.in_(s)).all(), [User(id=8)] ) def test_union(self): User = self.classes.User s = create_session() q1 = s.query(User).filter(User.name=='ed').with_labels() q2 = s.query(User).filter(User.name=='fred').with_labels() eq_( s.query(User).from_statement(union(q1, q2).order_by('users_name')).all(), [User(name='ed'), User(name='fred')] ) def test_select(self): User = self.classes.User s = create_session() # this is actually not legal on most DBs since the subquery has no alias q1 = s.query(User).filter(User.name=='ed') self.assert_compile( select([q1]), "SELECT users_id, users_name FROM (SELECT users.id AS users_id, " "users.name AS users_name FROM users WHERE users.name = :name_1)" ) def test_join(self): User, Address = self.classes.User, self.classes.Address s = create_session() # TODO: do we want aliased() to detect a query and convert to subquery() # automatically ? q1 = s.query(Address).filter(Address.email_address=='jack@bean.com') adalias = aliased(Address, q1.subquery()) eq_( s.query(User, adalias).join(adalias, User.id==adalias.user_id).all(), [(User(id=7,name=u'jack'), Address(email_address=u'jack@bean.com',user_id=7,id=1))] ) # more slice tests are available in test/orm/generative.py class SliceTest(QueryTest): def test_first(self): User = self.classes.User assert User(id=7) == create_session().query(User).first() assert create_session().query(User).filter(User.id==27).first() is None @testing.only_on('sqlite', 'testing execution but db-specific syntax') def test_limit_offset_applies(self): """Test that the expected LIMIT/OFFSET is applied for slices. The LIMIT/OFFSET syntax differs slightly on all databases, and query[x:y] executes immediately, so we are asserting against SQL strings using sqlite's syntax. """ User = self.classes.User sess = create_session() q = sess.query(User) self.assert_sql(testing.db, lambda: q[10:20], [ ("SELECT users.id AS users_id, users.name AS users_name FROM users LIMIT :param_1 OFFSET :param_2", {'param_1':10, 'param_2':10}) ]) self.assert_sql(testing.db, lambda: q[:20], [ ("SELECT users.id AS users_id, users.name AS users_name FROM users LIMIT :param_1 OFFSET :param_2", {'param_1':20, 'param_2':0}) ]) self.assert_sql(testing.db, lambda: q[5:], [ ("SELECT users.id AS users_id, users.name AS users_name FROM users LIMIT :param_1 OFFSET :param_2", {'param_1':-1, 'param_2':5}) ]) self.assert_sql(testing.db, lambda: q[2:2], []) self.assert_sql(testing.db, lambda: q[-2:-5], []) self.assert_sql(testing.db, lambda: q[-5:-2], [ ("SELECT users.id AS users_id, users.name AS users_name FROM users", {}) ]) self.assert_sql(testing.db, lambda: q[-5:], [ ("SELECT users.id AS users_id, users.name AS users_name FROM users", {}) ]) self.assert_sql(testing.db, lambda: q[:], [ ("SELECT users.id AS users_id, users.name AS users_name FROM users", {}) ]) class FilterTest(QueryTest, AssertsCompiledSQL): __dialect__ = 'default' def test_basic(self): User = self.classes.User users = create_session().query(User).all() eq_( [User(id=7), User(id=8), User(id=9),User(id=10)], users ) @testing.requires.offset def test_limit_offset(self): User = self.classes.User sess = create_session() assert [User(id=8), User(id=9)] == sess.query(User).order_by(User.id).limit(2).offset(1).all() assert [User(id=8), User(id=9)] == list(sess.query(User).order_by(User.id)[1:3]) assert User(id=8) == sess.query(User).order_by(User.id)[1] assert [] == sess.query(User).order_by(User.id)[3:3] assert [] == sess.query(User).order_by(User.id)[0:0] @testing.requires.boolean_col_expressions def test_exists(self): User = self.classes.User sess = create_session(testing.db) assert sess.query(exists().where(User.id==9)).scalar() assert not sess.query(exists().where(User.id==29)).scalar() def test_one_filter(self): User = self.classes.User assert [User(id=8), User(id=9)] == create_session().query(User).filter(User.name.endswith('ed')).all() def test_contains(self): """test comparing a collection to an object instance.""" User, Address = self.classes.User, self.classes.Address sess = create_session() address = sess.query(Address).get(3) assert [User(id=8)] == sess.query(User).filter(User.addresses.contains(address)).all() try: sess.query(User).filter(User.addresses == address) assert False except sa_exc.InvalidRequestError: assert True assert [User(id=10)] == sess.query(User).filter(User.addresses==None).all() try: assert [User(id=7), User(id=9), User(id=10)] == sess.query(User).filter(User.addresses!=address).all() assert False except sa_exc.InvalidRequestError: assert True #assert [User(id=7), User(id=9), User(id=10)] == sess.query(User).filter(User.addresses!=address).all() def test_clause_element_ok(self): User = self.classes.User s = Session() self.assert_compile( s.query(User).filter(User.addresses), "SELECT users.id AS users_id, users.name AS users_name " "FROM users, addresses WHERE users.id = addresses.user_id" ) def test_unique_binds_join_cond(self): """test that binds used when the lazyclause is used in criterion are unique""" User, Address = self.classes.User, self.classes.Address sess = Session() a1, a2 = sess.query(Address).order_by(Address.id)[0:2] self.assert_compile( sess.query(User).filter(User.addresses.contains(a1)).union( sess.query(User).filter(User.addresses.contains(a2)) ), "SELECT anon_1.users_id AS anon_1_users_id, anon_1.users_name AS " "anon_1_users_name FROM (SELECT users.id AS users_id, " "users.name AS users_name FROM users WHERE users.id = :param_1 " "UNION SELECT users.id AS users_id, users.name AS users_name " "FROM users WHERE users.id = :param_2) AS anon_1", checkparams = {u'param_1': 7, u'param_2': 8} ) def test_any(self): User, Address = self.classes.User, self.classes.Address sess = create_session() assert [User(id=8), User(id=9)] == sess.query(User).filter(User.addresses.any(Address.email_address.like('%ed%'))).all() assert [User(id=8)] == sess.query(User).filter(User.addresses.any(Address.email_address.like('%ed%'), id=4)).all() assert [User(id=8)] == sess.query(User).filter(User.addresses.any(Address.email_address.like('%ed%'))).\ filter(User.addresses.any(id=4)).all() assert [User(id=9)] == sess.query(User).filter(User.addresses.any(email_address='fred@fred.com')).all() # test that any() doesn't overcorrelate assert [User(id=7), User(id=8)] == sess.query(User).join("addresses").filter(~User.addresses.any(Address.email_address=='fred@fred.com')).all() # test that the contents are not adapted by the aliased join assert [User(id=7), User(id=8)] == sess.query(User).join("addresses", aliased=True).filter(~User.addresses.any(Address.email_address=='fred@fred.com')).all() assert [User(id=10)] == sess.query(User).outerjoin("addresses", aliased=True).filter(~User.addresses.any()).all() @testing.crashes('maxdb', 'can dump core') def test_has(self): Dingaling, User, Address = (self.classes.Dingaling, self.classes.User, self.classes.Address) sess = create_session() assert [Address(id=5)] == sess.query(Address).filter(Address.user.has(name='fred')).all() assert [Address(id=2), Address(id=3), Address(id=4), Address(id=5)] == \ sess.query(Address).filter(Address.user.has(User.name.like('%ed%'))).order_by(Address.id).all() assert [Address(id=2), Address(id=3), Address(id=4)] == \ sess.query(Address).filter(Address.user.has(User.name.like('%ed%'), id=8)).order_by(Address.id).all() # test has() doesn't overcorrelate assert [Address(id=2), Address(id=3), Address(id=4)] == \ sess.query(Address).join("user").filter(Address.user.has(User.name.like('%ed%'), id=8)).order_by(Address.id).all() # test has() doesnt' get subquery contents adapted by aliased join assert [Address(id=2), Address(id=3), Address(id=4)] == \ sess.query(Address).join("user", aliased=True).filter(Address.user.has(User.name.like('%ed%'), id=8)).order_by(Address.id).all() dingaling = sess.query(Dingaling).get(2) assert [User(id=9)] == sess.query(User).filter(User.addresses.any(Address.dingaling==dingaling)).all() def test_contains_m2m(self): Item, Order = self.classes.Item, self.classes.Order sess = create_session() item = sess.query(Item).get(3) assert [Order(id=1), Order(id=2), Order(id=3)] == sess.query(Order).filter(Order.items.contains(item)).all() assert [Order(id=4), Order(id=5)] == sess.query(Order).filter(~Order.items.contains(item)).all() item2 = sess.query(Item).get(5) assert [Order(id=3)] == sess.query(Order).filter(Order.items.contains(item)).filter(Order.items.contains(item2)).all() def test_comparison(self): """test scalar comparison to an object instance""" Item, Order, Dingaling, User, Address = (self.classes.Item, self.classes.Order, self.classes.Dingaling, self.classes.User, self.classes.Address) sess = create_session() user = sess.query(User).get(8) assert [Address(id=2), Address(id=3), Address(id=4)] == sess.query(Address).filter(Address.user==user).all() assert [Address(id=1), Address(id=5)] == sess.query(Address).filter(Address.user!=user).all() # generates an IS NULL assert [] == sess.query(Address).filter(Address.user == None).all() assert [] == sess.query(Address).filter(Address.user == null()).all() assert [Order(id=5)] == sess.query(Order).filter(Order.address == None).all() # o2o dingaling = sess.query(Dingaling).get(2) assert [Address(id=5)] == sess.query(Address).filter(Address.dingaling==dingaling).all() # m2m eq_(sess.query(Item).filter(Item.keywords==None).order_by(Item.id).all(), [Item(id=4), Item(id=5)]) eq_(sess.query(Item).filter(Item.keywords!=None).order_by(Item.id).all(), [Item(id=1),Item(id=2), Item(id=3)]) def test_filter_by(self): User, Address = self.classes.User, self.classes.Address sess = create_session() user = sess.query(User).get(8) assert [Address(id=2), Address(id=3), Address(id=4)] == sess.query(Address).filter_by(user=user).all() # many to one generates IS NULL assert [] == sess.query(Address).filter_by(user = None).all() assert [] == sess.query(Address).filter_by(user = null()).all() # one to many generates WHERE NOT EXISTS assert [User(name='chuck')] == sess.query(User).filter_by(addresses = None).all() assert [User(name='chuck')] == sess.query(User).filter_by(addresses = null()).all() def test_filter_by_tables(self): users = self.tables.users addresses = self.tables.addresses sess = create_session() self.assert_compile( sess.query(users).\ filter_by(name='ed').\ join(addresses, users.c.id==addresses.c.user_id).\ filter_by(email_address='ed@ed.com'), "SELECT users.id AS users_id, users.name AS users_name " "FROM users JOIN addresses ON users.id = addresses.user_id " "WHERE users.name = :name_1 AND " "addresses.email_address = :email_address_1", checkparams={u'email_address_1': 'ed@ed.com', u'name_1': 'ed'} ) def test_filter_by_no_property(self): addresses = self.tables.addresses sess = create_session() assert_raises_message( sa.exc.InvalidRequestError, "Entity 'addresses' has no property 'name'", sess.query(addresses).\ filter_by, name='ed' ) def test_none_comparison(self): Order, User, Address = (self.classes.Order, self.classes.User, self.classes.Address) sess = create_session() # scalar eq_( [Order(description="order 5")], sess.query(Order).filter(Order.address_id==None).all() ) eq_( [Order(description="order 5")], sess.query(Order).filter(Order.address_id==null()).all() ) # o2o eq_([Address(id=1), Address(id=3), Address(id=4)], sess.query(Address).filter(Address.dingaling==None).order_by(Address.id).all()) eq_([Address(id=1), Address(id=3), Address(id=4)], sess.query(Address).filter(Address.dingaling==null()).order_by(Address.id).all()) eq_([Address(id=2), Address(id=5)], sess.query(Address).filter(Address.dingaling != None).order_by(Address.id).all()) eq_([Address(id=2), Address(id=5)], sess.query(Address).filter(Address.dingaling != null()).order_by(Address.id).all()) # m2o eq_([Order(id=5)], sess.query(Order).filter(Order.address==None).all()) eq_([Order(id=1), Order(id=2), Order(id=3), Order(id=4)], sess.query(Order).order_by(Order.id).filter(Order.address!=None).all()) # o2m eq_([User(id=10)], sess.query(User).filter(User.addresses==None).all()) eq_([User(id=7),User(id=8),User(id=9)], sess.query(User).filter(User.addresses!=None).order_by(User.id).all()) def test_blank_filter_by(self): User = self.classes.User eq_( [(7,), (8,), (9,), (10,)], create_session().query(User.id).filter_by().order_by(User.id).all() ) eq_( [(7,), (8,), (9,), (10,)], create_session().query(User.id).filter_by(**{}).order_by(User.id).all() ) def test_text_coerce(self): User = self.classes.User s = create_session() self.assert_compile( s.query(User).filter("name='ed'"), "SELECT users.id AS users_id, users.name " "AS users_name FROM users WHERE name='ed'" ) class SetOpsTest(QueryTest, AssertsCompiledSQL): __dialect__ = 'default' def test_union(self): User = self.classes.User s = create_session() fred = s.query(User).filter(User.name=='fred') ed = s.query(User).filter(User.name=='ed') jack = s.query(User).filter(User.name=='jack') eq_(fred.union(ed).order_by(User.name).all(), [User(name='ed'), User(name='fred')] ) eq_(fred.union(ed, jack).order_by(User.name).all(), [User(name='ed'), User(name='fred'), User(name='jack')] ) def test_statement_labels(self): """test that label conflicts don't occur with joins etc.""" User, Address = self.classes.User, self.classes.Address s = create_session() q1 = s.query(User, Address).join(User.addresses).\ filter(Address.email_address=="ed@wood.com") q2 = s.query(User, Address).join(User.addresses).\ filter(Address.email_address=="jack@bean.com") q3 = q1.union(q2).order_by(User.name) eq_( q3.all(), [ (User(name='ed'), Address(email_address="ed@wood.com")), (User(name='jack'), Address(email_address="jack@bean.com")), ] ) def test_union_literal_expressions_compile(self): """test that column expressions translate during the _from_statement() portion of union(), others""" User = self.classes.User s = Session() q1 = s.query(User, literal("x")) q2 = s.query(User, literal_column("'y'")) q3 = q1.union(q2) self.assert_compile( q3, "SELECT anon_1.users_id AS anon_1_users_id, anon_1.users_name AS anon_1_users_name," " anon_1.param_1 AS anon_1_param_1 FROM (SELECT users.id AS users_id, users.name AS" " users_name, :param_1 AS param_1 FROM users UNION SELECT users.id AS users_id, " "users.name AS users_name, 'y' FROM users) AS anon_1" ) def test_union_literal_expressions_results(self): User = self.classes.User s = Session() q1 = s.query(User, literal("x")) q2 = s.query(User, literal_column("'y'")) q3 = q1.union(q2) q4 = s.query(User, literal_column("'x'").label('foo')) q5 = s.query(User, literal("y")) q6 = q4.union(q5) eq_( [x['name'] for x in q6.column_descriptions], ['User', 'foo'] ) for q in (q3.order_by(User.id, "anon_1_param_1"), q6.order_by(User.id, "foo")): eq_(q.all(), [ (User(id=7, name=u'jack'), u'x'), (User(id=7, name=u'jack'), u'y'), (User(id=8, name=u'ed'), u'x'), (User(id=8, name=u'ed'), u'y'), (User(id=9, name=u'fred'), u'x'), (User(id=9, name=u'fred'), u'y'), (User(id=10, name=u'chuck'), u'x'), (User(id=10, name=u'chuck'), u'y') ] ) def test_union_labeled_anonymous_columns(self): User = self.classes.User s = Session() c1, c2 = column('c1'), column('c2') q1 = s.query(User, c1.label('foo'), c1.label('bar')) q2 = s.query(User, c1.label('foo'), c2.label('bar')) q3 = q1.union(q2) eq_( [x['name'] for x in q3.column_descriptions], ['User', 'foo', 'bar'] ) self.assert_compile( q3, "SELECT anon_1.users_id AS anon_1_users_id, " "anon_1.users_name AS anon_1_users_name, " "anon_1.foo AS anon_1_foo, anon_1.bar AS anon_1_bar " "FROM (SELECT users.id AS users_id, users.name AS users_name, " "c1 AS foo, c1 AS bar FROM users UNION SELECT users.id AS " "users_id, users.name AS users_name, c1 AS foo, c2 AS bar " "FROM users) AS anon_1" ) def test_order_by_anonymous_col(self): User = self.classes.User s = Session() c1, c2 = column('c1'), column('c2') f = c1.label('foo') q1 = s.query(User, f, c2.label('bar')) q2 = s.query(User, c1.label('foo'), c2.label('bar')) q3 = q1.union(q2) self.assert_compile( q3.order_by(c1), "SELECT anon_1.users_id AS anon_1_users_id, anon_1.users_name AS " "anon_1_users_name, anon_1.foo AS anon_1_foo, anon_1.bar AS " "anon_1_bar FROM (SELECT users.id AS users_id, users.name AS " "users_name, c1 AS foo, c2 AS bar FROM users UNION SELECT users.id " "AS users_id, users.name AS users_name, c1 AS foo, c2 AS bar " "FROM users) AS anon_1 ORDER BY anon_1.foo" ) self.assert_compile( q3.order_by(f), "SELECT anon_1.users_id AS anon_1_users_id, anon_1.users_name AS " "anon_1_users_name, anon_1.foo AS anon_1_foo, anon_1.bar AS " "anon_1_bar FROM (SELECT users.id AS users_id, users.name AS " "users_name, c1 AS foo, c2 AS bar FROM users UNION SELECT users.id " "AS users_id, users.name AS users_name, c1 AS foo, c2 AS bar " "FROM users) AS anon_1 ORDER BY anon_1.foo" ) def test_union_mapped_colnames_preserved_across_subquery(self): User = self.classes.User s = Session() q1 = s.query(User.name) q2 = s.query(User.name) # the label names in the subquery are the typical anonymized ones self.assert_compile( q1.union(q2), "SELECT anon_1.users_name AS anon_1_users_name " "FROM (SELECT users.name AS users_name FROM users " "UNION SELECT users.name AS users_name FROM users) AS anon_1" ) # but in the returned named tuples, # due to [ticket:1942], this should be 'name', not 'users_name' eq_( [x['name'] for x in q1.union(q2).column_descriptions], ['name'] ) @testing.requires.intersect def test_intersect(self): User = self.classes.User s = create_session() fred = s.query(User).filter(User.name=='fred') ed = s.query(User).filter(User.name=='ed') jack = s.query(User).filter(User.name=='jack') eq_(fred.intersect(ed, jack).all(), [] ) eq_(fred.union(ed).intersect(ed.union(jack)).all(), [User(name='ed')] ) def test_eager_load(self): User, Address = self.classes.User, self.classes.Address s = create_session() fred = s.query(User).filter(User.name=='fred') ed = s.query(User).filter(User.name=='ed') jack = s.query(User).filter(User.name=='jack') def go(): eq_( fred.union(ed).order_by(User.name).options(joinedload(User.addresses)).all(), [ User(name='ed', addresses=[Address(), Address(), Address()]), User(name='fred', addresses=[Address()]) ] ) self.assert_sql_count(testing.db, go, 1) class AggregateTest(QueryTest): def test_sum(self): Order = self.classes.Order sess = create_session() orders = sess.query(Order).filter(Order.id.in_([2, 3, 4])) eq_(orders.values(func.sum(Order.user_id * Order.address_id)).next(), (79,)) eq_(orders.value(func.sum(Order.user_id * Order.address_id)), 79) def test_apply(self): Order = self.classes.Order sess = create_session() assert sess.query(func.sum(Order.user_id * Order.address_id)).filter(Order.id.in_([2, 3, 4])).one() == (79,) def test_having(self): User, Address = self.classes.User, self.classes.Address sess = create_session() assert [User(name=u'ed',id=8)] == sess.query(User).order_by(User.id).group_by(User).join('addresses').having(func.count(Address.id)> 2).all() assert [User(name=u'jack',id=7), User(name=u'fred',id=9)] == sess.query(User).order_by(User.id).group_by(User).join('addresses').having(func.count(Address.id)< 2).all() class ExistsTest(QueryTest, AssertsCompiledSQL): __dialect__ = 'default' def test_exists(self): User = self.classes.User sess = create_session() q1 = sess.query(User) self.assert_compile(sess.query(q1.exists()), 'SELECT EXISTS (' 'SELECT 1 FROM users' ') AS anon_1' ) q2 = sess.query(User).filter(User.name == 'fred') self.assert_compile(sess.query(q2.exists()), 'SELECT EXISTS (' 'SELECT 1 FROM users WHERE users.name = :name_1' ') AS anon_1' ) def test_exists_col_warning(self): User = self.classes.User Address = self.classes.Address sess = create_session() q1 = sess.query(User, Address).filter(User.id == Address.user_id) self.assert_compile(sess.query(q1.exists()), 'SELECT EXISTS (' 'SELECT 1 FROM users, addresses ' 'WHERE users.id = addresses.user_id' ') AS anon_1' ) class CountTest(QueryTest): def test_basic(self): users, User = self.tables.users, self.classes.User s = create_session() eq_(s.query(User).count(), 4) eq_(s.query(User).filter(users.c.name.endswith('ed')).count(), 2) def test_count_char(self): User = self.classes.User s = create_session() # '*' is favored here as the most common character, # it is reported that Informix doesn't like count(1), # rumors about Oracle preferring count(1) don't appear # to be well founded. self.assert_sql_execution( testing.db, s.query(User).count, CompiledSQL( "SELECT count(*) AS count_1 FROM " "(SELECT users.id AS users_id, users.name " "AS users_name FROM users) AS anon_1", {} ) ) def test_multiple_entity(self): User, Address = self.classes.User, self.classes.Address s = create_session() q = s.query(User, Address) eq_(q.count(), 20) # cartesian product q = s.query(User, Address).join(User.addresses) eq_(q.count(), 5) def test_nested(self): User, Address = self.classes.User, self.classes.Address s = create_session() q = s.query(User, Address).limit(2) eq_(q.count(), 2) q = s.query(User, Address).limit(100) eq_(q.count(), 20) q = s.query(User, Address).join(User.addresses).limit(100) eq_(q.count(), 5) def test_cols(self): """test that column-based queries always nest.""" User, Address = self.classes.User, self.classes.Address s = create_session() q = s.query(func.count(distinct(User.name))) eq_(q.count(), 1) q = s.query(func.count(distinct(User.name))).distinct() eq_(q.count(), 1) q = s.query(User.name) eq_(q.count(), 4) q = s.query(User.name, Address) eq_(q.count(), 20) q = s.query(Address.user_id) eq_(q.count(), 5) eq_(q.distinct().count(), 3) class DistinctTest(QueryTest): def test_basic(self): User = self.classes.User eq_( [User(id=7), User(id=8), User(id=9),User(id=10)], create_session().query(User).order_by(User.id).distinct().all() ) eq_( [User(id=7), User(id=9), User(id=8),User(id=10)], create_session().query(User).distinct().order_by(desc(User.name)).all() ) def test_joined(self): """test that orderbys from a joined table get placed into the columns clause when DISTINCT is used""" User, Address = self.classes.User, self.classes.Address sess = create_session() q = sess.query(User).join('addresses').distinct().order_by(desc(Address.email_address)) assert [User(id=7), User(id=9), User(id=8)] == q.all() sess.expunge_all() # test that it works on embedded joinedload/LIMIT subquery q = sess.query(User).join('addresses').distinct().options(joinedload('addresses')).order_by(desc(Address.email_address)).limit(2) def go(): assert [ User(id=7, addresses=[ Address(id=1) ]), User(id=9, addresses=[ Address(id=5) ]), ] == q.all() self.assert_sql_count(testing.db, go, 1) class PrefixWithTest(QueryTest, AssertsCompiledSQL): def test_one_prefix(self): User = self.classes.User sess = create_session() query = sess.query(User.name)\ .prefix_with('PREFIX_1') expected = "SELECT PREFIX_1 "\ "users.name AS users_name FROM users" self.assert_compile(query, expected, dialect=default.DefaultDialect() ) def test_many_prefixes(self): User = self.classes.User sess = create_session() query = sess.query(User.name)\ .prefix_with('PREFIX_1', 'PREFIX_2') expected = "SELECT PREFIX_1 PREFIX_2 "\ "users.name AS users_name FROM users" self.assert_compile(query, expected, dialect=default.DefaultDialect() ) def test_chained_prefixes(self): User = self.classes.User sess = create_session() query = sess.query(User.name)\ .prefix_with('PREFIX_1')\ .prefix_with('PREFIX_2', 'PREFIX_3') expected = "SELECT PREFIX_1 PREFIX_2 PREFIX_3 "\ "users.name AS users_name FROM users" self.assert_compile(query, expected, dialect=default.DefaultDialect() ) class YieldTest(QueryTest): def test_basic(self): User = self.classes.User sess = create_session() q = iter(sess.query(User).yield_per(1).from_statement("select * from users")) ret = [] eq_(len(sess.identity_map), 0) ret.append(q.next()) ret.append(q.next()) eq_(len(sess.identity_map), 2) ret.append(q.next()) ret.append(q.next()) eq_(len(sess.identity_map), 4) try: q.next() assert False except StopIteration: pass def test_yield_per_and_execution_options(self): User = self.classes.User sess = create_session() q = sess.query(User).yield_per(1) q = q.execution_options(foo='bar') assert q._yield_per eq_(q._execution_options, {"stream_results": True, "foo": "bar"}) class HintsTest(QueryTest, AssertsCompiledSQL): def test_hints(self): User = self.classes.User from sqlalchemy.dialects import mysql dialect = mysql.dialect() sess = create_session() self.assert_compile( sess.query(User).with_hint(User, 'USE INDEX (col1_index,col2_index)'), "SELECT users.id AS users_id, users.name AS users_name " "FROM users USE INDEX (col1_index,col2_index)", dialect=dialect ) self.assert_compile( sess.query(User).with_hint(User, 'WITH INDEX col1_index', 'sybase'), "SELECT users.id AS users_id, users.name AS users_name " "FROM users", dialect=dialect ) ualias = aliased(User) self.assert_compile( sess.query(User, ualias).with_hint(ualias, 'USE INDEX (col1_index,col2_index)'). join(ualias, ualias.id > User.id), "SELECT users.id AS users_id, users.name AS users_name, " "users_1.id AS users_1_id, users_1.name AS users_1_name " "FROM users INNER JOIN users AS users_1 USE INDEX (col1_index,col2_index) " "ON users.id < users_1.id", dialect=dialect ) class TextTest(QueryTest): def test_fulltext(self): User = self.classes.User eq_( create_session().query(User). from_statement("select * from users order by id").all(), [User(id=7), User(id=8), User(id=9), User(id=10)] ) eq_( create_session().query(User). from_statement("select * from users order by id").first(), User(id=7) ) eq_( create_session().query(User). from_statement( "select * from users where name='nonexistent'").first(), None ) def test_fragment(self): User = self.classes.User eq_( create_session().query(User).filter("id in (8, 9)").all(), [User(id=8), User(id=9)] ) eq_( create_session().query(User).filter("name='fred'"). filter("id=9").all(), [User(id=9)] ) eq_( create_session().query(User).filter("name='fred'"). filter(User.id == 9).all(), [User(id=9)] ) def test_binds(self): User = self.classes.User eq_( create_session().query(User).filter("id in (:id1, :id2)").\ params(id1=8, id2=9).all(), [User(id=8), User(id=9)] ) def test_as_column(self): User = self.classes.User s = create_session() assert_raises(sa_exc.InvalidRequestError, s.query, User.id, text("users.name")) eq_(s.query(User.id, "name").order_by(User.id).all(), [(7, u'jack'), (8, u'ed'), (9, u'fred'), (10, u'chuck')]) def test_via_select(self): User = self.classes.User s = create_session() eq_( s.query(User).from_statement( select(['id', 'name']).select_from('users').order_by('id'), ).all(), [User(id=7), User(id=8), User(id=9), User(id=10)] ) class ParentTest(QueryTest, AssertsCompiledSQL): __dialect__ = 'default' def test_o2m(self): User, orders, Order = (self.classes.User, self.tables.orders, self.classes.Order) sess = create_session() q = sess.query(User) u1 = q.filter_by(name='jack').one() # test auto-lookup of property o = sess.query(Order).with_parent(u1).all() assert [Order(description="order 1"), Order(description="order 3"), Order(description="order 5")] == o # test with explicit property o = sess.query(Order).with_parent(u1, property='orders').all() assert [Order(description="order 1"), Order(description="order 3"), Order(description="order 5")] == o o = sess.query(Order).with_parent(u1, property=User.orders).all() assert [Order(description="order 1"), Order(description="order 3"), Order(description="order 5")] == o o = sess.query(Order).filter(with_parent(u1, User.orders)).all() assert [Order(description="order 1"), Order(description="order 3"), Order(description="order 5")] == o # test generative criterion o = sess.query(Order).with_parent(u1).filter(orders.c.id>2).all() assert [Order(description="order 3"), Order(description="order 5")] == o # test against None for parent? this can't be done with the current API since we don't know # what mapper to use #assert sess.query(Order).with_parent(None, property='addresses').all() == [Order(description="order 5")] def test_noparent(self): Item, User = self.classes.Item, self.classes.User sess = create_session() q = sess.query(User) u1 = q.filter_by(name='jack').one() try: q = sess.query(Item).with_parent(u1) assert False except sa_exc.InvalidRequestError, e: assert str(e) \ == "Could not locate a property which relates "\ "instances of class 'Item' to instances of class 'User'" def test_m2m(self): Item, Keyword = self.classes.Item, self.classes.Keyword sess = create_session() i1 = sess.query(Item).filter_by(id=2).one() k = sess.query(Keyword).with_parent(i1).all() assert [Keyword(name='red'), Keyword(name='small'), Keyword(name='square')] == k def test_with_transient(self): User, Order = self.classes.User, self.classes.Order sess = Session() q = sess.query(User) u1 = q.filter_by(name='jack').one() utrans = User(id=u1.id) o = sess.query(Order).with_parent(utrans, 'orders') eq_( [Order(description="order 1"), Order(description="order 3"), Order(description="order 5")], o.all() ) o = sess.query(Order).filter(with_parent(utrans, 'orders')) eq_( [Order(description="order 1"), Order(description="order 3"), Order(description="order 5")], o.all() ) def test_with_pending_autoflush(self): Order, User = self.classes.Order, self.classes.User sess = Session() o1 = sess.query(Order).first() opending = Order(id=20, user_id=o1.user_id) sess.add(opending) eq_( sess.query(User).with_parent(opending, 'user').one(), User(id=o1.user_id) ) eq_( sess.query(User).filter(with_parent(opending, 'user')).one(), User(id=o1.user_id) ) def test_with_pending_no_autoflush(self): Order, User = self.classes.Order, self.classes.User sess = Session(autoflush=False) o1 = sess.query(Order).first() opending = Order(user_id=o1.user_id) sess.add(opending) eq_( sess.query(User).with_parent(opending, 'user').one(), User(id=o1.user_id) ) def test_unique_binds_union(self): """bindparams used in the 'parent' query are unique""" User, Address = self.classes.User, self.classes.Address sess = Session() u1, u2 = sess.query(User).order_by(User.id)[0:2] q1 = sess.query(Address).with_parent(u1, 'addresses') q2 = sess.query(Address).with_parent(u2, 'addresses') self.assert_compile( q1.union(q2), "SELECT anon_1.addresses_id AS anon_1_addresses_id, " "anon_1.addresses_user_id AS anon_1_addresses_user_id, " "anon_1.addresses_email_address AS " "anon_1_addresses_email_address FROM (SELECT addresses.id AS " "addresses_id, addresses.user_id AS addresses_user_id, " "addresses.email_address AS addresses_email_address FROM " "addresses WHERE :param_1 = addresses.user_id UNION SELECT " "addresses.id AS addresses_id, addresses.user_id AS " "addresses_user_id, addresses.email_address AS addresses_email_address " "FROM addresses WHERE :param_2 = addresses.user_id) AS anon_1", checkparams={u'param_1': 7, u'param_2': 8}, ) def test_unique_binds_or(self): User, Address = self.classes.User, self.classes.Address sess = Session() u1, u2 = sess.query(User).order_by(User.id)[0:2] self.assert_compile( sess.query(Address).filter( or_(with_parent(u1, 'addresses'), with_parent(u2, 'addresses')) ), "SELECT addresses.id AS addresses_id, addresses.user_id AS " "addresses_user_id, addresses.email_address AS " "addresses_email_address FROM addresses WHERE " ":param_1 = addresses.user_id OR :param_2 = addresses.user_id", checkparams={u'param_1': 7, u'param_2': 8}, ) class SynonymTest(QueryTest): @classmethod def setup_mappers(cls): users, Keyword, items, order_items, orders, Item, User, \ Address, keywords, Order, item_keywords, addresses = \ cls.tables.users, cls.classes.Keyword, cls.tables.items, \ cls.tables.order_items, cls.tables.orders, \ cls.classes.Item, cls.classes.User, cls.classes.Address, \ cls.tables.keywords, cls.classes.Order, \ cls.tables.item_keywords, cls.tables.addresses mapper(User, users, properties={ 'name_syn':synonym('name'), 'addresses':relationship(Address), 'orders':relationship(Order, backref='user', order_by=orders.c.id), # o2m, m2o 'orders_syn':synonym('orders'), 'orders_syn_2':synonym('orders_syn') }) mapper(Address, addresses) mapper(Order, orders, properties={ 'items':relationship(Item, secondary=order_items), #m2m 'address':relationship(Address), # m2o 'items_syn':synonym('items') }) mapper(Item, items, properties={ 'keywords':relationship(Keyword, secondary=item_keywords) #m2m }) mapper(Keyword, keywords) def test_options(self): User, Order = self.classes.User, self.classes.Order s = create_session() def go(): result = s.query(User).filter_by(name='jack').\ options(joinedload(User.orders_syn)).all() eq_(result, [ User(id=7, name='jack', orders=[ Order(description=u'order 1'), Order(description=u'order 3'), Order(description=u'order 5') ]) ]) self.assert_sql_count(testing.db, go, 1) def test_options_syn_of_syn(self): User, Order = self.classes.User, self.classes.Order s = create_session() def go(): result = s.query(User).filter_by(name='jack').\ options(joinedload(User.orders_syn_2)).all() eq_(result, [ User(id=7, name='jack', orders=[ Order(description=u'order 1'), Order(description=u'order 3'), Order(description=u'order 5') ]) ]) self.assert_sql_count(testing.db, go, 1) def test_options_syn_of_syn_string(self): User, Order = self.classes.User, self.classes.Order s = create_session() def go(): result = s.query(User).filter_by(name='jack').\ options(joinedload('orders_syn_2')).all() eq_(result, [ User(id=7, name='jack', orders=[ Order(description=u'order 1'), Order(description=u'order 3'), Order(description=u'order 5') ]) ]) self.assert_sql_count(testing.db, go, 1) def test_joins(self): User, Order = self.classes.User, self.classes.Order for j in ( ['orders', 'items'], ['orders_syn', 'items'], [User.orders_syn, Order.items], ['orders_syn_2', 'items'], [User.orders_syn_2, 'items'], ['orders', 'items_syn'], ['orders_syn', 'items_syn'], ['orders_syn_2', 'items_syn'], ): result = create_session().query(User).join(*j).filter_by(id=3).all() assert [User(id=7, name='jack'), User(id=9, name='fred')] == result def test_with_parent(self): Order, User = self.classes.Order, self.classes.User for nameprop, orderprop in ( ('name', 'orders'), ('name_syn', 'orders'), ('name', 'orders_syn'), ('name', 'orders_syn_2'), ('name_syn', 'orders_syn'), ('name_syn', 'orders_syn_2'), ): sess = create_session() q = sess.query(User) u1 = q.filter_by(**{nameprop:'jack'}).one() o = sess.query(Order).with_parent(u1, property=orderprop).all() assert [Order(description="order 1"), Order(description="order 3"), Order(description="order 5")] == o class ImmediateTest(_fixtures.FixtureTest): run_inserts = 'once' run_deletes = None @classmethod def setup_mappers(cls): Address, addresses, users, User = (cls.classes.Address, cls.tables.addresses, cls.tables.users, cls.classes.User) mapper(Address, addresses) mapper(User, users, properties=dict( addresses=relationship(Address))) def test_one(self): User, Address = self.classes.User, self.classes.Address sess = create_session() assert_raises(sa.orm.exc.NoResultFound, sess.query(User).filter(User.id == 99).one) eq_(sess.query(User).filter(User.id == 7).one().id, 7) assert_raises(sa.orm.exc.MultipleResultsFound, sess.query(User).one) assert_raises( sa.orm.exc.NoResultFound, sess.query(User.id, User.name).filter(User.id == 99).one) eq_(sess.query(User.id, User.name).filter(User.id == 7).one(), (7, 'jack')) assert_raises(sa.orm.exc.MultipleResultsFound, sess.query(User.id, User.name).one) assert_raises(sa.orm.exc.NoResultFound, (sess.query(User, Address). join(User.addresses). filter(Address.id == 99)).one) eq_((sess.query(User, Address). join(User.addresses). filter(Address.id == 4)).one(), (User(id=8), Address(id=4))) assert_raises(sa.orm.exc.MultipleResultsFound, sess.query(User, Address).join(User.addresses).one) # this result returns multiple rows, the first # two rows being the same. but uniquing is # not applied for a column based result. assert_raises(sa.orm.exc.MultipleResultsFound, sess.query(User.id). join(User.addresses). filter(User.id.in_([8, 9])). order_by(User.id). one) # test that a join which ultimately returns # multiple identities across many rows still # raises, even though the first two rows are of # the same identity and unique filtering # is applied ([ticket:1688]) assert_raises(sa.orm.exc.MultipleResultsFound, sess.query(User). join(User.addresses). filter(User.id.in_([8, 9])). order_by(User.id). one) @testing.future def test_getslice(self): assert False def test_scalar(self): User = self.classes.User sess = create_session() eq_(sess.query(User.id).filter_by(id=7).scalar(), 7) eq_(sess.query(User.id, User.name).filter_by(id=7).scalar(), 7) eq_(sess.query(User.id).filter_by(id=0).scalar(), None) eq_(sess.query(User).filter_by(id=7).scalar(), sess.query(User).filter_by(id=7).one()) assert_raises(sa.orm.exc.MultipleResultsFound, sess.query(User).scalar) assert_raises(sa.orm.exc.MultipleResultsFound, sess.query(User.id, User.name).scalar) def test_value(self): User = self.classes.User sess = create_session() eq_(sess.query(User).filter_by(id=7).value(User.id), 7) eq_(sess.query(User.id, User.name).filter_by(id=7).value(User.id), 7) eq_(sess.query(User).filter_by(id=0).value(User.id), None) sess.bind = testing.db eq_(sess.query().value(sa.literal_column('1').label('x')), 1) class ExecutionOptionsTest(QueryTest): def test_option_building(self): User = self.classes.User sess = create_session(bind=testing.db, autocommit=False) q1 = sess.query(User) assert q1._execution_options == dict() q2 = q1.execution_options(foo='bar', stream_results=True) # q1's options should be unchanged. assert q1._execution_options == dict() # q2 should have them set. assert q2._execution_options == dict(foo='bar', stream_results=True) q3 = q2.execution_options(foo='not bar', answer=42) assert q2._execution_options == dict(foo='bar', stream_results=True) q3_options = dict(foo='not bar', stream_results=True, answer=42) assert q3._execution_options == q3_options def test_options_in_connection(self): User = self.classes.User execution_options = dict(foo='bar', stream_results=True) class TQuery(Query): def instances(self, result, ctx): try: eq_( result.connection._execution_options, execution_options ) finally: result.close() return iter([]) sess = create_session(bind=testing.db, autocommit=False, query_cls=TQuery) q1 = sess.query(User).execution_options(**execution_options) q1.all() class OptionsTest(QueryTest): """Test the _process_paths() method of PropertyOption.""" def _option_fixture(self, *arg): from sqlalchemy.orm import interfaces class Opt(interfaces.PropertyOption): pass return Opt(arg) def _make_path(self, path): r = [] for i, item in enumerate(path): if i % 2 == 0: if isinstance(item, type): item = class_mapper(item) else: if isinstance(item, basestring): item = inspect(r[-1]).mapper.attrs[item] r.append(item) return tuple(r) def _make_path_registry(self, path): return orm_util.PathRegistry.coerce(self._make_path(path)) def _assert_path_result(self, opt, q, paths): q._attributes = q._attributes.copy() assert_paths = opt._process_paths(q, False) eq_( [p.path for p in assert_paths], [self._make_path(p) for p in paths] ) def test_get_path_one_level_string(self): User = self.classes.User sess = Session() q = sess.query(User) opt = self._option_fixture("addresses") self._assert_path_result(opt, q, [(User, 'addresses')]) def test_get_path_one_level_attribute(self): User = self.classes.User sess = Session() q = sess.query(User) opt = self._option_fixture(User.addresses) self._assert_path_result(opt, q, [(User, 'addresses')]) def test_path_on_entity_but_doesnt_match_currentpath(self): User, Address = self.classes.User, self.classes.Address # ensure "current path" is fully consumed before # matching against current entities. # see [ticket:2098] sess = Session() q = sess.query(User) opt = self._option_fixture('email_address', 'id') q = sess.query(Address)._with_current_path( orm_util.PathRegistry.coerce([inspect(User), inspect(User).attrs.addresses]) ) self._assert_path_result(opt, q, []) def test_get_path_one_level_with_unrelated(self): Order = self.classes.Order sess = Session() q = sess.query(Order) opt = self._option_fixture("addresses") self._assert_path_result(opt, q, []) def test_path_multilevel_string(self): Item, User, Order = (self.classes.Item, self.classes.User, self.classes.Order) sess = Session() q = sess.query(User) opt = self._option_fixture("orders.items.keywords") self._assert_path_result(opt, q, [ (User, 'orders'), (User, 'orders', Order, 'items'), (User, 'orders', Order, 'items', Item, 'keywords') ]) def test_path_multilevel_attribute(self): Item, User, Order = (self.classes.Item, self.classes.User, self.classes.Order) sess = Session() q = sess.query(User) opt = self._option_fixture(User.orders, Order.items, Item.keywords) self._assert_path_result(opt, q, [ (User, 'orders'), (User, 'orders', Order, 'items'), (User, 'orders', Order, 'items', Item, 'keywords') ]) def test_with_current_matching_string(self): Item, User, Order = (self.classes.Item, self.classes.User, self.classes.Order) sess = Session() q = sess.query(Item)._with_current_path( self._make_path_registry([User, 'orders', Order, 'items']) ) opt = self._option_fixture("orders.items.keywords") self._assert_path_result(opt, q, [ (Item, 'keywords') ]) def test_with_current_matching_attribute(self): Item, User, Order = (self.classes.Item, self.classes.User, self.classes.Order) sess = Session() q = sess.query(Item)._with_current_path( self._make_path_registry([User, 'orders', Order, 'items']) ) opt = self._option_fixture(User.orders, Order.items, Item.keywords) self._assert_path_result(opt, q, [ (Item, 'keywords') ]) def test_with_current_nonmatching_string(self): Item, User, Order = (self.classes.Item, self.classes.User, self.classes.Order) sess = Session() q = sess.query(Item)._with_current_path( self._make_path_registry([User, 'orders', Order, 'items']) ) opt = self._option_fixture("keywords") self._assert_path_result(opt, q, []) opt = self._option_fixture("items.keywords") self._assert_path_result(opt, q, []) def test_with_current_nonmatching_attribute(self): Item, User, Order = (self.classes.Item, self.classes.User, self.classes.Order) sess = Session() q = sess.query(Item)._with_current_path( self._make_path_registry([User, 'orders', Order, 'items']) ) opt = self._option_fixture(Item.keywords) self._assert_path_result(opt, q, []) opt = self._option_fixture(Order.items, Item.keywords) self._assert_path_result(opt, q, []) def test_from_base_to_subclass_attr(self): Dingaling, Address = self.classes.Dingaling, self.classes.Address sess = Session() class SubAddr(Address): pass mapper(SubAddr, inherits=Address, properties={ 'flub': relationship(Dingaling) }) q = sess.query(Address) opt = self._option_fixture(SubAddr.flub) self._assert_path_result(opt, q, [(SubAddr, 'flub')]) def test_from_subclass_to_subclass_attr(self): Dingaling, Address = self.classes.Dingaling, self.classes.Address sess = Session() class SubAddr(Address): pass mapper(SubAddr, inherits=Address, properties={ 'flub': relationship(Dingaling) }) q = sess.query(SubAddr) opt = self._option_fixture(SubAddr.flub) self._assert_path_result(opt, q, [(SubAddr, 'flub')]) def test_from_base_to_base_attr_via_subclass(self): Dingaling, Address = self.classes.Dingaling, self.classes.Address sess = Session() class SubAddr(Address): pass mapper(SubAddr, inherits=Address, properties={ 'flub': relationship(Dingaling) }) q = sess.query(Address) opt = self._option_fixture(SubAddr.user) self._assert_path_result(opt, q, [(Address, inspect(Address).attrs.user)]) def test_of_type(self): User, Address = self.classes.User, self.classes.Address sess = Session() class SubAddr(Address): pass mapper(SubAddr, inherits=Address) q = sess.query(User) opt = self._option_fixture(User.addresses.of_type(SubAddr), SubAddr.user) u_mapper = inspect(User) a_mapper = inspect(Address) self._assert_path_result(opt, q, [ (u_mapper, u_mapper.attrs.addresses), (u_mapper, u_mapper.attrs.addresses, a_mapper, a_mapper.attrs.user) ]) def test_of_type_plus_level(self): Dingaling, User, Address = (self.classes.Dingaling, self.classes.User, self.classes.Address) sess = Session() class SubAddr(Address): pass mapper(SubAddr, inherits=Address, properties={ 'flub': relationship(Dingaling) }) q = sess.query(User) opt = self._option_fixture(User.addresses.of_type(SubAddr), SubAddr.flub) u_mapper = inspect(User) sa_mapper = inspect(SubAddr) self._assert_path_result(opt, q, [ (u_mapper, u_mapper.attrs.addresses), (u_mapper, u_mapper.attrs.addresses, sa_mapper, sa_mapper.attrs.flub) ]) def test_aliased_single(self): User = self.classes.User sess = Session() ualias = aliased(User) q = sess.query(ualias) opt = self._option_fixture(ualias.addresses) self._assert_path_result(opt, q, [(inspect(ualias), 'addresses')]) def test_with_current_aliased_single(self): User, Address = self.classes.User, self.classes.Address sess = Session() ualias = aliased(User) q = sess.query(ualias)._with_current_path( self._make_path_registry([Address, 'user']) ) opt = self._option_fixture(Address.user, ualias.addresses) self._assert_path_result(opt, q, [(inspect(ualias), 'addresses')]) def test_with_current_aliased_single_nonmatching_option(self): User, Address = self.classes.User, self.classes.Address sess = Session() ualias = aliased(User) q = sess.query(User)._with_current_path( self._make_path_registry([Address, 'user']) ) opt = self._option_fixture(Address.user, ualias.addresses) self._assert_path_result(opt, q, []) def test_with_current_aliased_single_nonmatching_entity(self): User, Address = self.classes.User, self.classes.Address sess = Session() ualias = aliased(User) q = sess.query(ualias)._with_current_path( self._make_path_registry([Address, 'user']) ) opt = self._option_fixture(Address.user, User.addresses) self._assert_path_result(opt, q, []) def test_multi_entity_opt_on_second(self): Item = self.classes.Item Order = self.classes.Order opt = self._option_fixture(Order.items) sess = Session() q = sess.query(Item, Order) self._assert_path_result(opt, q, [(Order, "items")]) def test_multi_entity_opt_on_string(self): Item = self.classes.Item Order = self.classes.Order opt = self._option_fixture("items") sess = Session() q = sess.query(Item, Order) self._assert_path_result(opt, q, []) def test_multi_entity_no_mapped_entities(self): Item = self.classes.Item Order = self.classes.Order opt = self._option_fixture("items") sess = Session() q = sess.query(Item.id, Order.id) self._assert_path_result(opt, q, []) def test_path_exhausted(self): User = self.classes.User Item = self.classes.Item Order = self.classes.Order opt = self._option_fixture(User.orders) sess = Session() q = sess.query(Item)._with_current_path( self._make_path_registry([User, 'orders', Order, 'items']) ) self._assert_path_result(opt, q, []) class OptionsNoPropTest(_fixtures.FixtureTest): """test the error messages emitted when using property options in conjunection with column-only entities, or for not existing options """ run_create_tables = False run_inserts = None run_deletes = None def test_option_with_mapper_basestring(self): Item = self.classes.Item self._assert_option([Item], 'keywords') def test_option_with_mapper_PropCompatator(self): Item = self.classes.Item self._assert_option([Item], Item.keywords) def test_option_with_mapper_then_column_basestring(self): Item = self.classes.Item self._assert_option([Item, Item.id], 'keywords') def test_option_with_mapper_then_column_PropComparator(self): Item = self.classes.Item self._assert_option([Item, Item.id], Item.keywords) def test_option_with_column_then_mapper_basestring(self): Item = self.classes.Item self._assert_option([Item.id, Item], 'keywords') def test_option_with_column_then_mapper_PropComparator(self): Item = self.classes.Item self._assert_option([Item.id, Item], Item.keywords) def test_option_with_column_basestring(self): Item = self.classes.Item message = \ "Query has only expression-based entities - "\ "can't find property named 'keywords'." self._assert_eager_with_just_column_exception(Item.id, 'keywords', message) def test_option_with_column_PropComparator(self): Item = self.classes.Item self._assert_eager_with_just_column_exception(Item.id, Item.keywords, "Query has only expression-based entities " "- can't find property named 'keywords'." ) def test_option_against_nonexistent_PropComparator(self): Item = self.classes.Item Keyword = self.classes.Keyword self._assert_eager_with_entity_exception( [Keyword], (joinedload(Item.keywords), ), r"Can't find property 'keywords' on any entity specified " r"in this Query. Note the full path from root " r"\(Mapper\|Keyword\|keywords\) to target entity must be specified." ) def test_option_against_nonexistent_basestring(self): Item = self.classes.Item self._assert_eager_with_entity_exception( [Item], (joinedload("foo"), ), r"Can't find property named 'foo' on the mapped " r"entity Mapper\|Item\|items in this Query." ) def test_option_against_nonexistent_twolevel_basestring(self): Item = self.classes.Item self._assert_eager_with_entity_exception( [Item], (joinedload("keywords.foo"), ), r"Can't find property named 'foo' on the mapped entity " r"Mapper\|Keyword\|keywords in this Query." ) def test_option_against_nonexistent_twolevel_all(self): Item = self.classes.Item self._assert_eager_with_entity_exception( [Item], (joinedload_all("keywords.foo"), ), r"Can't find property named 'foo' on the mapped entity " r"Mapper\|Keyword\|keywords in this Query." ) @testing.fails_if(lambda:True, "PropertyOption doesn't yet check for relation/column on end result") def test_option_against_non_relation_basestring(self): Item = self.classes.Item Keyword = self.classes.Keyword self._assert_eager_with_entity_exception( [Keyword, Item], (joinedload_all("keywords"), ), r"Attribute 'keywords' of entity 'Mapper\|Keyword\|keywords' " "does not refer to a mapped entity" ) @testing.fails_if(lambda:True, "PropertyOption doesn't yet check for relation/column on end result") def test_option_against_multi_non_relation_basestring(self): Item = self.classes.Item Keyword = self.classes.Keyword self._assert_eager_with_entity_exception( [Keyword, Item], (joinedload_all("keywords"), ), r"Attribute 'keywords' of entity 'Mapper\|Keyword\|keywords' " "does not refer to a mapped entity" ) def test_option_against_wrong_entity_type_basestring(self): Item = self.classes.Item self._assert_eager_with_entity_exception( [Item], (joinedload_all("id", "keywords"), ), r"Attribute 'id' of entity 'Mapper\|Item\|items' does not " r"refer to a mapped entity" ) def test_option_against_multi_non_relation_twolevel_basestring(self): Item = self.classes.Item Keyword = self.classes.Keyword self._assert_eager_with_entity_exception( [Keyword, Item], (joinedload_all("id", "keywords"), ), r"Attribute 'id' of entity 'Mapper\|Keyword\|keywords' " "does not refer to a mapped entity" ) def test_option_against_multi_nonexistent_basestring(self): Item = self.classes.Item Keyword = self.classes.Keyword self._assert_eager_with_entity_exception( [Keyword, Item], (joinedload_all("description"), ), r"Can't find property named 'description' on the mapped " r"entity Mapper\|Keyword\|keywords in this Query." ) def test_option_against_multi_no_entities_basestring(self): Item = self.classes.Item Keyword = self.classes.Keyword self._assert_eager_with_entity_exception( [Keyword.id, Item.id], (joinedload_all("keywords"), ), r"Query has only expression-based entities - can't find property " "named 'keywords'." ) def test_option_against_wrong_multi_entity_type_attr_one(self): Item = self.classes.Item Keyword = self.classes.Keyword self._assert_eager_with_entity_exception( [Keyword, Item], (joinedload_all(Keyword.id, Item.keywords), ), r"Attribute 'Keyword.id' of entity 'Mapper\|Keyword\|keywords' " "does not refer to a mapped entity" ) def test_option_against_wrong_multi_entity_type_attr_two(self): Item = self.classes.Item Keyword = self.classes.Keyword self._assert_eager_with_entity_exception( [Keyword, Item], (joinedload_all(Keyword.keywords, Item.keywords), ), r"Attribute 'Keyword.keywords' of entity 'Mapper\|Keyword\|keywords' " "does not refer to a mapped entity" ) def test_option_against_wrong_multi_entity_type_attr_three(self): Item = self.classes.Item Keyword = self.classes.Keyword self._assert_eager_with_entity_exception( [Keyword.id, Item.id], (joinedload_all(Keyword.keywords, Item.keywords), ), r"Query has only expression-based entities - " "can't find property named 'keywords'." ) def test_wrong_type_in_option(self): Item = self.classes.Item Keyword = self.classes.Keyword self._assert_eager_with_entity_exception( [Item], (joinedload_all(Keyword), ), r"mapper option expects string key or list of attributes" ) def test_non_contiguous_all_option(self): User = self.classes.User self._assert_eager_with_entity_exception( [User], (joinedload_all(User.addresses, User.orders), ), r"Attribute 'User.orders' does not link " "from element 'Mapper|Address|addresses'" ) @classmethod def setup_mappers(cls): users, User, addresses, Address, orders, Order = ( cls.tables.users, cls.classes.User, cls.tables.addresses, cls.classes.Address, cls.tables.orders, cls.classes.Order) mapper(User, users, properties={ 'addresses': relationship(Address), 'orders': relationship(Order) }) mapper(Address, addresses) mapper(Order, orders) keywords, items, item_keywords, Keyword, Item = (cls.tables.keywords, cls.tables.items, cls.tables.item_keywords, cls.classes.Keyword, cls.classes.Item) mapper(Keyword, keywords, properties={ "keywords": column_property(keywords.c.name + "some keyword") }) mapper(Item, items, properties=dict(keywords=relationship(Keyword, secondary=item_keywords))) def _assert_option(self, entity_list, option): Item = self.classes.Item q = create_session().query(*entity_list).\ options(joinedload(option)) key = ('loaderstrategy', (inspect(Item), inspect(Item).attrs.keywords)) assert key in q._attributes def _assert_eager_with_entity_exception(self, entity_list, options, message): assert_raises_message(sa.exc.ArgumentError, message, create_session().query(*entity_list).options, *options) def _assert_eager_with_just_column_exception(self, column, eager_option, message): assert_raises_message(sa.exc.ArgumentError, message, create_session().query(column).options, joinedload(eager_option)) SQLAlchemy-0.8.4/test/orm/test_rel_fn.py0000644000076500000240000011116512251147172020672 0ustar classicstaff00000000000000from sqlalchemy.testing import assert_raises_message, eq_, \ AssertsCompiledSQL, is_ from sqlalchemy.testing import fixtures from sqlalchemy.orm import relationships, foreign, remote from sqlalchemy import MetaData, Table, Column, ForeignKey, Integer, \ select, ForeignKeyConstraint, exc, func, and_ from sqlalchemy.orm.interfaces import ONETOMANY, MANYTOONE, MANYTOMANY class _JoinFixtures(object): @classmethod def setup_class(cls): m = MetaData() cls.left = Table('lft', m, Column('id', Integer, primary_key=True), Column('x', Integer), Column('y', Integer), ) cls.right = Table('rgt', m, Column('id', Integer, primary_key=True), Column('lid', Integer, ForeignKey('lft.id')), Column('x', Integer), Column('y', Integer), ) cls.right_multi_fk = Table('rgt_multi_fk', m, Column('id', Integer, primary_key=True), Column('lid1', Integer, ForeignKey('lft.id')), Column('lid2', Integer, ForeignKey('lft.id')), ) cls.selfref = Table('selfref', m, Column('id', Integer, primary_key=True), Column('sid', Integer, ForeignKey('selfref.id')) ) cls.composite_selfref = Table('composite_selfref', m, Column('id', Integer, primary_key=True), Column('group_id', Integer, primary_key=True), Column('parent_id', Integer), ForeignKeyConstraint( ['parent_id', 'group_id'], ['composite_selfref.id', 'composite_selfref.group_id'] ) ) cls.m2mleft = Table('m2mlft', m, Column('id', Integer, primary_key=True), ) cls.m2mright = Table('m2mrgt', m, Column('id', Integer, primary_key=True), ) cls.m2msecondary = Table('m2msecondary', m, Column('lid', Integer, ForeignKey('m2mlft.id'), primary_key=True), Column('rid', Integer, ForeignKey('m2mrgt.id'), primary_key=True), ) cls.m2msecondary_no_fks = Table('m2msecondary_no_fks', m, Column('lid', Integer, primary_key=True), Column('rid', Integer, primary_key=True), ) cls.m2msecondary_ambig_fks = Table('m2msecondary_ambig_fks', m, Column('lid1', Integer, ForeignKey('m2mlft.id'), primary_key=True), Column('rid1', Integer, ForeignKey('m2mrgt.id'), primary_key=True), Column('lid2', Integer, ForeignKey('m2mlft.id'), primary_key=True), Column('rid2', Integer, ForeignKey('m2mrgt.id'), primary_key=True), ) cls.base_w_sub_rel = Table('base_w_sub_rel', m, Column('id', Integer, primary_key=True), Column('sub_id', Integer, ForeignKey('rel_sub.id')) ) cls.rel_sub = Table('rel_sub', m, Column('id', Integer, ForeignKey('base_w_sub_rel.id'), primary_key=True) ) cls.base = Table('base', m, Column('id', Integer, primary_key=True), ) cls.sub = Table('sub', m, Column('id', Integer, ForeignKey('base.id'), primary_key=True), ) cls.sub_w_base_rel = Table('sub_w_base_rel', m, Column('id', Integer, ForeignKey('base.id'), primary_key=True), Column('base_id', Integer, ForeignKey('base.id')) ) cls.sub_w_sub_rel = Table('sub_w_sub_rel', m, Column('id', Integer, ForeignKey('base.id'), primary_key=True), Column('sub_id', Integer, ForeignKey('sub.id')) ) cls.right_w_base_rel = Table('right_w_base_rel', m, Column('id', Integer, primary_key=True), Column('base_id', Integer, ForeignKey('base.id')) ) cls.three_tab_a = Table('three_tab_a', m, Column('id', Integer, primary_key=True), ) cls.three_tab_b = Table('three_tab_b', m, Column('id', Integer, primary_key=True), Column('aid', Integer, ForeignKey('three_tab_a.id')) ) cls.three_tab_c = Table('three_tab_c', m, Column('id', Integer, primary_key=True), Column('aid', Integer, ForeignKey('three_tab_a.id')), Column('bid', Integer, ForeignKey('three_tab_b.id')) ) def _join_fixture_overlapping_three_tables(self, **kw): def _can_sync(*cols): for c in cols: if self.three_tab_c.c.contains_column(c): return False else: return True return relationships.JoinCondition( self.three_tab_a, self.three_tab_b, self.three_tab_a, self.three_tab_b, support_sync=False, can_be_synced_fn=_can_sync, primaryjoin=and_( self.three_tab_a.c.id == self.three_tab_b.c.aid, self.three_tab_c.c.bid == self.three_tab_b.c.id, self.three_tab_c.c.aid == self.three_tab_a.c.id ) ) def _join_fixture_m2m(self, **kw): return relationships.JoinCondition( self.m2mleft, self.m2mright, self.m2mleft, self.m2mright, secondary=self.m2msecondary, **kw ) def _join_fixture_m2m_backref(self, **kw): """return JoinCondition in the same way RelationshipProperty calls it for a backref on an m2m. """ j1 = self._join_fixture_m2m() return j1, relationships.JoinCondition( self.m2mright, self.m2mleft, self.m2mright, self.m2mleft, secondary=self.m2msecondary, primaryjoin=j1.secondaryjoin_minus_local, secondaryjoin=j1.primaryjoin_minus_local ) def _join_fixture_o2m(self, **kw): return relationships.JoinCondition( self.left, self.right, self.left, self.right, **kw ) def _join_fixture_m2o(self, **kw): return relationships.JoinCondition( self.right, self.left, self.right, self.left, **kw ) def _join_fixture_o2m_selfref(self, **kw): return relationships.JoinCondition( self.selfref, self.selfref, self.selfref, self.selfref, **kw ) def _join_fixture_m2o_selfref(self, **kw): return relationships.JoinCondition( self.selfref, self.selfref, self.selfref, self.selfref, remote_side=set([self.selfref.c.id]), **kw ) def _join_fixture_o2m_composite_selfref(self, **kw): return relationships.JoinCondition( self.composite_selfref, self.composite_selfref, self.composite_selfref, self.composite_selfref, **kw ) def _join_fixture_m2o_composite_selfref(self, **kw): return relationships.JoinCondition( self.composite_selfref, self.composite_selfref, self.composite_selfref, self.composite_selfref, remote_side=set([self.composite_selfref.c.id, self.composite_selfref.c.group_id]), **kw ) def _join_fixture_o2m_composite_selfref_func(self, **kw): return relationships.JoinCondition( self.composite_selfref, self.composite_selfref, self.composite_selfref, self.composite_selfref, primaryjoin=and_( self.composite_selfref.c.group_id == func.foo(self.composite_selfref.c.group_id), self.composite_selfref.c.parent_id == self.composite_selfref.c.id ), **kw ) def _join_fixture_o2m_composite_selfref_func_annotated(self, **kw): return relationships.JoinCondition( self.composite_selfref, self.composite_selfref, self.composite_selfref, self.composite_selfref, primaryjoin=and_( remote(self.composite_selfref.c.group_id) == func.foo(self.composite_selfref.c.group_id), remote(self.composite_selfref.c.parent_id) == self.composite_selfref.c.id ), **kw ) def _join_fixture_compound_expression_1(self, **kw): return relationships.JoinCondition( self.left, self.right, self.left, self.right, primaryjoin=(self.left.c.x + self.left.c.y) == \ relationships.remote(relationships.foreign( self.right.c.x * self.right.c.y )), **kw ) def _join_fixture_compound_expression_2(self, **kw): return relationships.JoinCondition( self.left, self.right, self.left, self.right, primaryjoin=(self.left.c.x + self.left.c.y) == \ relationships.foreign( self.right.c.x * self.right.c.y ), **kw ) def _join_fixture_compound_expression_1_non_annotated(self, **kw): return relationships.JoinCondition( self.left, self.right, self.left, self.right, primaryjoin=(self.left.c.x + self.left.c.y) == \ ( self.right.c.x * self.right.c.y ), **kw ) def _join_fixture_base_to_joined_sub(self, **kw): # see test/orm/inheritance/test_abc_inheritance:TestaTobM2O # and others there right = self.base_w_sub_rel.join(self.rel_sub, self.base_w_sub_rel.c.id == self.rel_sub.c.id ) return relationships.JoinCondition( self.base_w_sub_rel, right, self.base_w_sub_rel, self.rel_sub, primaryjoin=self.base_w_sub_rel.c.sub_id == \ self.rel_sub.c.id, **kw ) def _join_fixture_o2m_joined_sub_to_base(self, **kw): left = self.base.join(self.sub_w_base_rel, self.base.c.id == self.sub_w_base_rel.c.id) return relationships.JoinCondition( left, self.base, self.sub_w_base_rel, self.base, primaryjoin=self.sub_w_base_rel.c.base_id == self.base.c.id ) def _join_fixture_m2o_joined_sub_to_sub_on_base(self, **kw): # this is a late add - a variant of the test case # in #2491 where we join on the base cols instead. only # m2o has a problem at the time of this test. left = self.base.join(self.sub, self.base.c.id == self.sub.c.id) right = self.base.join(self.sub_w_base_rel, self.base.c.id == self.sub_w_base_rel.c.id) return relationships.JoinCondition( left, right, self.sub, self.sub_w_base_rel, primaryjoin=self.sub_w_base_rel.c.base_id == self.base.c.id, ) def _join_fixture_o2m_joined_sub_to_sub(self, **kw): left = self.base.join(self.sub, self.base.c.id == self.sub.c.id) right = self.base.join(self.sub_w_sub_rel, self.base.c.id == self.sub_w_sub_rel.c.id) return relationships.JoinCondition( left, right, self.sub, self.sub_w_sub_rel, primaryjoin=self.sub.c.id == self.sub_w_sub_rel.c.sub_id ) def _join_fixture_m2o_sub_to_joined_sub(self, **kw): # see test.orm.test_mapper:MapperTest.test_add_column_prop_deannotate, right = self.base.join(self.right_w_base_rel, self.base.c.id == self.right_w_base_rel.c.id) return relationships.JoinCondition( self.right_w_base_rel, right, self.right_w_base_rel, self.right_w_base_rel, ) def _join_fixture_m2o_sub_to_joined_sub_func(self, **kw): # see test.orm.test_mapper:MapperTest.test_add_column_prop_deannotate, right = self.base.join(self.right_w_base_rel, self.base.c.id == self.right_w_base_rel.c.id) return relationships.JoinCondition( self.right_w_base_rel, right, self.right_w_base_rel, self.right_w_base_rel, primaryjoin=self.right_w_base_rel.c.base_id == \ func.foo(self.base.c.id) ) def _join_fixture_o2o_joined_sub_to_base(self, **kw): left = self.base.join(self.sub, self.base.c.id == self.sub.c.id) # see test_relationships->AmbiguousJoinInterpretedAsSelfRef return relationships.JoinCondition( left, self.sub, left, self.sub, ) def _join_fixture_o2m_to_annotated_func(self, **kw): return relationships.JoinCondition( self.left, self.right, self.left, self.right, primaryjoin=self.left.c.id == foreign(func.foo(self.right.c.lid)), **kw ) def _join_fixture_o2m_to_oldstyle_func(self, **kw): return relationships.JoinCondition( self.left, self.right, self.left, self.right, primaryjoin=self.left.c.id == func.foo(self.right.c.lid), consider_as_foreign_keys=[self.right.c.lid], **kw ) def _assert_non_simple_warning(self, fn): assert_raises_message( exc.SAWarning, "Non-simple column elements in " "primary join condition for property " r"None - consider using remote\(\) " "annotations to mark the remote side.", fn ) def _assert_raises_no_relevant_fks(self, fn, expr, relname, primary, *arg, **kw): assert_raises_message( exc.ArgumentError, r"Could not locate any relevant foreign key columns " r"for %s join condition '%s' on relationship %s. " r"Ensure that referencing columns are associated with " r"a ForeignKey or ForeignKeyConstraint, or are annotated " r"in the join condition with the foreign\(\) annotation." % ( primary, expr, relname ), fn, *arg, **kw ) def _assert_raises_no_equality(self, fn, expr, relname, primary, *arg, **kw): assert_raises_message( exc.ArgumentError, "Could not locate any simple equality expressions " "involving locally mapped foreign key columns for %s join " "condition '%s' on relationship %s. " "Ensure that referencing columns are associated with a " "ForeignKey or ForeignKeyConstraint, or are annotated in " r"the join condition with the foreign\(\) annotation. " "To allow comparison operators other than '==', " "the relationship can be marked as viewonly=True." % ( primary, expr, relname ), fn, *arg, **kw ) def _assert_raises_ambig_join(self, fn, relname, secondary_arg, *arg, **kw): if secondary_arg is not None: assert_raises_message( exc.AmbiguousForeignKeysError, "Could not determine join condition between " "parent/child tables on relationship %s - " "there are multiple foreign key paths linking the " "tables via secondary table '%s'. " "Specify the 'foreign_keys' argument, providing a list " "of those columns which should be counted as " "containing a foreign key reference from the " "secondary table to each of the parent and child tables." % (relname, secondary_arg), fn, *arg, **kw) else: assert_raises_message( exc.AmbiguousForeignKeysError, "Could not determine join condition between " "parent/child tables on relationship %s - " "there are no foreign keys linking these tables. " % (relname,), fn, *arg, **kw) def _assert_raises_no_join(self, fn, relname, secondary_arg, *arg, **kw): if secondary_arg is not None: assert_raises_message( exc.NoForeignKeysError, "Could not determine join condition between " "parent/child tables on relationship %s - " "there are no foreign keys linking these tables " "via secondary table '%s'. " "Ensure that referencing columns are associated " "with a ForeignKey " "or ForeignKeyConstraint, or specify 'primaryjoin' and " "'secondaryjoin' expressions" % (relname, secondary_arg), fn, *arg, **kw) else: assert_raises_message( exc.NoForeignKeysError, "Could not determine join condition between " "parent/child tables on relationship %s - " "there are no foreign keys linking these tables. " "Ensure that referencing columns are associated " "with a ForeignKey " "or ForeignKeyConstraint, or specify a 'primaryjoin' " "expression." % (relname,), fn, *arg, **kw) class ColumnCollectionsTest(_JoinFixtures, fixtures.TestBase, AssertsCompiledSQL): def test_determine_local_remote_pairs_o2o_joined_sub_to_base(self): joincond = self._join_fixture_o2o_joined_sub_to_base() eq_( joincond.local_remote_pairs, [(self.base.c.id, self.sub.c.id)] ) def test_determine_synchronize_pairs_o2m_to_annotated_func(self): joincond = self._join_fixture_o2m_to_annotated_func() eq_( joincond.synchronize_pairs, [(self.left.c.id, self.right.c.lid)] ) def test_determine_synchronize_pairs_o2m_to_oldstyle_func(self): joincond = self._join_fixture_o2m_to_oldstyle_func() eq_( joincond.synchronize_pairs, [(self.left.c.id, self.right.c.lid)] ) def test_determinelocal_remote_m2o_joined_sub_to_sub_on_base(self): joincond = self._join_fixture_m2o_joined_sub_to_sub_on_base() eq_( joincond.local_remote_pairs, [(self.base.c.id, self.sub_w_base_rel.c.base_id)] ) def test_determine_local_remote_base_to_joined_sub(self): joincond = self._join_fixture_base_to_joined_sub() eq_( joincond.local_remote_pairs, [ (self.base_w_sub_rel.c.sub_id, self.rel_sub.c.id) ] ) def test_determine_local_remote_o2m_joined_sub_to_base(self): joincond = self._join_fixture_o2m_joined_sub_to_base() eq_( joincond.local_remote_pairs, [ (self.sub_w_base_rel.c.base_id, self.base.c.id) ] ) def test_determine_local_remote_m2o_sub_to_joined_sub(self): joincond = self._join_fixture_m2o_sub_to_joined_sub() eq_( joincond.local_remote_pairs, [ (self.right_w_base_rel.c.base_id, self.base.c.id) ] ) def test_determine_remote_columns_o2m_joined_sub_to_sub(self): joincond = self._join_fixture_o2m_joined_sub_to_sub() eq_( joincond.local_remote_pairs, [ (self.sub.c.id, self.sub_w_sub_rel.c.sub_id) ] ) def test_determine_remote_columns_compound_1(self): joincond = self._join_fixture_compound_expression_1( support_sync=False) eq_( joincond.remote_columns, set([self.right.c.x, self.right.c.y]) ) def test_determine_local_remote_compound_1(self): joincond = self._join_fixture_compound_expression_1( support_sync=False) eq_( joincond.local_remote_pairs, [ (self.left.c.x, self.right.c.x), (self.left.c.x, self.right.c.y), (self.left.c.y, self.right.c.x), (self.left.c.y, self.right.c.y) ] ) def test_determine_local_remote_compound_2(self): joincond = self._join_fixture_compound_expression_2( support_sync=False) eq_( joincond.local_remote_pairs, [ (self.left.c.x, self.right.c.x), (self.left.c.x, self.right.c.y), (self.left.c.y, self.right.c.x), (self.left.c.y, self.right.c.y) ] ) def test_determine_local_remote_compound_3(self): joincond = self._join_fixture_compound_expression_1() eq_( joincond.local_remote_pairs, [ (self.left.c.x, self.right.c.x), (self.left.c.x, self.right.c.y), (self.left.c.y, self.right.c.x), (self.left.c.y, self.right.c.y), ] ) def test_err_local_remote_compound_1(self): self._assert_raises_no_relevant_fks( self._join_fixture_compound_expression_1_non_annotated, r'lft.x \+ lft.y = rgt.x \* rgt.y', "None", "primary" ) def test_determine_remote_columns_compound_2(self): joincond = self._join_fixture_compound_expression_2( support_sync=False) eq_( joincond.remote_columns, set([self.right.c.x, self.right.c.y]) ) def test_determine_remote_columns_o2m(self): joincond = self._join_fixture_o2m() eq_( joincond.remote_columns, set([self.right.c.lid]) ) def test_determine_remote_columns_o2m_selfref(self): joincond = self._join_fixture_o2m_selfref() eq_( joincond.remote_columns, set([self.selfref.c.sid]) ) def test_determine_local_remote_pairs_o2m_composite_selfref(self): joincond = self._join_fixture_o2m_composite_selfref() eq_( joincond.local_remote_pairs, [ (self.composite_selfref.c.group_id, self.composite_selfref.c.group_id), (self.composite_selfref.c.id, self.composite_selfref.c.parent_id), ] ) def test_determine_local_remote_pairs_o2m_composite_selfref_func_warning(self): self._assert_non_simple_warning( self._join_fixture_o2m_composite_selfref_func ) def test_determine_local_remote_pairs_o2m_overlap_func_warning(self): self._assert_non_simple_warning( self._join_fixture_m2o_sub_to_joined_sub_func ) def test_determine_local_remote_pairs_o2m_composite_selfref_func_annotated(self): joincond = self._join_fixture_o2m_composite_selfref_func_annotated() eq_( joincond.local_remote_pairs, [ (self.composite_selfref.c.group_id, self.composite_selfref.c.group_id), (self.composite_selfref.c.id, self.composite_selfref.c.parent_id), ] ) def test_determine_remote_columns_m2o_composite_selfref(self): joincond = self._join_fixture_m2o_composite_selfref() eq_( joincond.remote_columns, set([self.composite_selfref.c.id, self.composite_selfref.c.group_id]) ) def test_determine_remote_columns_m2o(self): joincond = self._join_fixture_m2o() eq_( joincond.remote_columns, set([self.left.c.id]) ) def test_determine_local_remote_pairs_o2m(self): joincond = self._join_fixture_o2m() eq_( joincond.local_remote_pairs, [(self.left.c.id, self.right.c.lid)] ) def test_determine_synchronize_pairs_m2m(self): joincond = self._join_fixture_m2m() eq_( joincond.synchronize_pairs, [(self.m2mleft.c.id, self.m2msecondary.c.lid)] ) eq_( joincond.secondary_synchronize_pairs, [(self.m2mright.c.id, self.m2msecondary.c.rid)] ) def test_determine_local_remote_pairs_o2m_backref(self): joincond = self._join_fixture_o2m() joincond2 = self._join_fixture_m2o( primaryjoin=joincond.primaryjoin_reverse_remote, ) eq_( joincond2.local_remote_pairs, [(self.right.c.lid, self.left.c.id)] ) def test_determine_local_remote_pairs_m2m(self): joincond = self._join_fixture_m2m() eq_( joincond.local_remote_pairs, [(self.m2mleft.c.id, self.m2msecondary.c.lid), (self.m2mright.c.id, self.m2msecondary.c.rid)] ) def test_determine_local_remote_pairs_m2m_backref(self): j1, j2 = self._join_fixture_m2m_backref() eq_( j1.local_remote_pairs, [(self.m2mleft.c.id, self.m2msecondary.c.lid), (self.m2mright.c.id, self.m2msecondary.c.rid)] ) eq_( j2.local_remote_pairs, [ (self.m2mright.c.id, self.m2msecondary.c.rid), (self.m2mleft.c.id, self.m2msecondary.c.lid), ] ) def test_determine_local_columns_m2m_backref(self): j1, j2 = self._join_fixture_m2m_backref() eq_( j1.local_columns, set([self.m2mleft.c.id]) ) eq_( j2.local_columns, set([self.m2mright.c.id]) ) def test_determine_remote_columns_m2m_backref(self): j1, j2 = self._join_fixture_m2m_backref() eq_( j1.remote_columns, set([self.m2msecondary.c.lid, self.m2msecondary.c.rid]) ) eq_( j2.remote_columns, set([self.m2msecondary.c.lid, self.m2msecondary.c.rid]) ) def test_determine_remote_columns_m2o_selfref(self): joincond = self._join_fixture_m2o_selfref() eq_( joincond.remote_columns, set([self.selfref.c.id]) ) def test_determine_local_remote_cols_three_tab_viewonly(self): joincond = self._join_fixture_overlapping_three_tables() eq_( joincond.local_remote_pairs, [(self.three_tab_a.c.id, self.three_tab_b.c.aid)] ) eq_( joincond.remote_columns, set([self.three_tab_b.c.id, self.three_tab_b.c.aid]) ) class DirectionTest(_JoinFixtures, fixtures.TestBase, AssertsCompiledSQL): def test_determine_direction_compound_2(self): joincond = self._join_fixture_compound_expression_2( support_sync=False) is_( joincond.direction, ONETOMANY ) def test_determine_direction_o2m(self): joincond = self._join_fixture_o2m() is_(joincond.direction, ONETOMANY) def test_determine_direction_o2m_selfref(self): joincond = self._join_fixture_o2m_selfref() is_(joincond.direction, ONETOMANY) def test_determine_direction_m2o_selfref(self): joincond = self._join_fixture_m2o_selfref() is_(joincond.direction, MANYTOONE) def test_determine_direction_o2m_composite_selfref(self): joincond = self._join_fixture_o2m_composite_selfref() is_(joincond.direction, ONETOMANY) def test_determine_direction_m2o_composite_selfref(self): joincond = self._join_fixture_m2o_composite_selfref() is_(joincond.direction, MANYTOONE) def test_determine_direction_m2o(self): joincond = self._join_fixture_m2o() is_(joincond.direction, MANYTOONE) class DetermineJoinTest(_JoinFixtures, fixtures.TestBase, AssertsCompiledSQL): __dialect__ = 'default' def test_determine_join_o2m(self): joincond = self._join_fixture_o2m() self.assert_compile( joincond.primaryjoin, "lft.id = rgt.lid" ) def test_determine_join_o2m_selfref(self): joincond = self._join_fixture_o2m_selfref() self.assert_compile( joincond.primaryjoin, "selfref.id = selfref.sid" ) def test_determine_join_m2o_selfref(self): joincond = self._join_fixture_m2o_selfref() self.assert_compile( joincond.primaryjoin, "selfref.id = selfref.sid" ) def test_determine_join_o2m_composite_selfref(self): joincond = self._join_fixture_o2m_composite_selfref() self.assert_compile( joincond.primaryjoin, "composite_selfref.group_id = composite_selfref.group_id " "AND composite_selfref.id = composite_selfref.parent_id" ) def test_determine_join_m2o_composite_selfref(self): joincond = self._join_fixture_m2o_composite_selfref() self.assert_compile( joincond.primaryjoin, "composite_selfref.group_id = composite_selfref.group_id " "AND composite_selfref.id = composite_selfref.parent_id" ) def test_determine_join_m2o(self): joincond = self._join_fixture_m2o() self.assert_compile( joincond.primaryjoin, "lft.id = rgt.lid" ) def test_determine_join_ambiguous_fks_o2m(self): assert_raises_message( exc.AmbiguousForeignKeysError, "Could not determine join condition between " "parent/child tables on relationship None - " "there are multiple foreign key paths linking " "the tables. Specify the 'foreign_keys' argument, " "providing a list of those columns which " "should be counted as containing a foreign " "key reference to the parent table.", relationships.JoinCondition, self.left, self.right_multi_fk, self.left, self.right_multi_fk, ) def test_determine_join_no_fks_o2m(self): self._assert_raises_no_join( relationships.JoinCondition, "None", None, self.left, self.selfref, self.left, self.selfref, ) def test_determine_join_ambiguous_fks_m2m(self): self._assert_raises_ambig_join( relationships.JoinCondition, "None", self.m2msecondary_ambig_fks, self.m2mleft, self.m2mright, self.m2mleft, self.m2mright, secondary=self.m2msecondary_ambig_fks ) def test_determine_join_no_fks_m2m(self): self._assert_raises_no_join( relationships.JoinCondition, "None", self.m2msecondary_no_fks, self.m2mleft, self.m2mright, self.m2mleft, self.m2mright, secondary=self.m2msecondary_no_fks ) def _join_fixture_fks_ambig_m2m(self): return relationships.JoinCondition( self.m2mleft, self.m2mright, self.m2mleft, self.m2mright, secondary=self.m2msecondary_ambig_fks, consider_as_foreign_keys=[ self.m2msecondary_ambig_fks.c.lid1, self.m2msecondary_ambig_fks.c.rid1] ) def test_determine_join_w_fks_ambig_m2m(self): joincond = self._join_fixture_fks_ambig_m2m() self.assert_compile( joincond.primaryjoin, "m2mlft.id = m2msecondary_ambig_fks.lid1" ) self.assert_compile( joincond.secondaryjoin, "m2mrgt.id = m2msecondary_ambig_fks.rid1" ) class AdaptedJoinTest(_JoinFixtures, fixtures.TestBase, AssertsCompiledSQL): __dialect__ = 'default' def test_join_targets_o2m_selfref(self): joincond = self._join_fixture_o2m_selfref() left = select([joincond.parent_selectable]).alias('pj') pj, sj, sec, adapter, ds = joincond.join_targets( left, joincond.child_selectable, True) self.assert_compile( pj, "pj.id = selfref.sid" ) right = select([joincond.child_selectable]).alias('pj') pj, sj, sec, adapter, ds = joincond.join_targets( joincond.parent_selectable, right, True) self.assert_compile( pj, "selfref.id = pj.sid" ) def test_join_targets_o2m_plain(self): joincond = self._join_fixture_o2m() pj, sj, sec, adapter, ds = joincond.join_targets( joincond.parent_selectable, joincond.child_selectable, False) self.assert_compile( pj, "lft.id = rgt.lid" ) def test_join_targets_o2m_left_aliased(self): joincond = self._join_fixture_o2m() left = select([joincond.parent_selectable]).alias('pj') pj, sj, sec, adapter, ds = joincond.join_targets( left, joincond.child_selectable, True) self.assert_compile( pj, "pj.id = rgt.lid" ) def test_join_targets_o2m_right_aliased(self): joincond = self._join_fixture_o2m() right = select([joincond.child_selectable]).alias('pj') pj, sj, sec, adapter, ds = joincond.join_targets( joincond.parent_selectable, right, True) self.assert_compile( pj, "lft.id = pj.lid" ) def test_join_targets_o2m_composite_selfref(self): joincond = self._join_fixture_o2m_composite_selfref() right = select([joincond.child_selectable]).alias('pj') pj, sj, sec, adapter, ds = joincond.join_targets( joincond.parent_selectable, right, True) self.assert_compile( pj, "pj.group_id = composite_selfref.group_id " "AND composite_selfref.id = pj.parent_id" ) def test_join_targets_m2o_composite_selfref(self): joincond = self._join_fixture_m2o_composite_selfref() right = select([joincond.child_selectable]).alias('pj') pj, sj, sec, adapter, ds = joincond.join_targets( joincond.parent_selectable, right, True) self.assert_compile( pj, "pj.group_id = composite_selfref.group_id " "AND pj.id = composite_selfref.parent_id" ) class LazyClauseTest(_JoinFixtures, fixtures.TestBase, AssertsCompiledSQL): __dialect__ = 'default' def test_lazy_clause_o2m(self): joincond = self._join_fixture_o2m() lazywhere, bind_to_col, equated_columns = joincond.create_lazy_clause() self.assert_compile( lazywhere, ":param_1 = rgt.lid" ) def test_lazy_clause_o2m_reverse(self): joincond = self._join_fixture_o2m() lazywhere, bind_to_col, equated_columns =\ joincond.create_lazy_clause(reverse_direction=True) self.assert_compile( lazywhere, "lft.id = :param_1" ) SQLAlchemy-0.8.4/test/orm/test_relationships.py0000644000076500000240000032262112251150016022301 0ustar classicstaff00000000000000from sqlalchemy.testing import assert_raises, assert_raises_message import datetime import sqlalchemy as sa from sqlalchemy import testing from sqlalchemy import Integer, String, ForeignKey, MetaData, and_ from sqlalchemy.testing.schema import Table, Column from sqlalchemy.orm import mapper, relationship, relation, \ backref, create_session, configure_mappers, \ clear_mappers, sessionmaker, attributes,\ Session, composite, column_property, foreign,\ remote, synonym from sqlalchemy.orm.interfaces import ONETOMANY, MANYTOONE, MANYTOMANY from sqlalchemy.testing import eq_, startswith_, AssertsCompiledSQL, is_ from sqlalchemy.testing import fixtures from test.orm import _fixtures from sqlalchemy import exc class _RelationshipErrors(object): def _assert_raises_no_relevant_fks(self, fn, expr, relname, primary, *arg, **kw): assert_raises_message( sa.exc.ArgumentError, "Could not locate any relevant foreign key columns " "for %s join condition '%s' on relationship %s. " "Ensure that referencing columns are associated with " "a ForeignKey or ForeignKeyConstraint, or are annotated " r"in the join condition with the foreign\(\) annotation." % ( primary, expr, relname ), fn, *arg, **kw ) def _assert_raises_no_equality(self, fn, expr, relname, primary, *arg, **kw): assert_raises_message( sa.exc.ArgumentError, "Could not locate any simple equality expressions " "involving locally mapped foreign key columns for %s join " "condition '%s' on relationship %s. " "Ensure that referencing columns are associated with a " "ForeignKey or ForeignKeyConstraint, or are annotated in " r"the join condition with the foreign\(\) annotation. " "To allow comparison operators other than '==', " "the relationship can be marked as viewonly=True." % ( primary, expr, relname ), fn, *arg, **kw ) def _assert_raises_ambig_join(self, fn, relname, secondary_arg, *arg, **kw): if secondary_arg is not None: assert_raises_message( exc.ArgumentError, "Could not determine join condition between " "parent/child tables on relationship %s - " "there are multiple foreign key paths linking the " "tables via secondary table '%s'. " "Specify the 'foreign_keys' argument, providing a list " "of those columns which should be counted as " "containing a foreign key reference from the " "secondary table to each of the parent and child tables." % (relname, secondary_arg), fn, *arg, **kw) else: assert_raises_message( exc.ArgumentError, "Could not determine join " "condition between parent/child tables on " "relationship %s - there are multiple foreign key " "paths linking the tables. Specify the " "'foreign_keys' argument, providing a list of those " "columns which should be counted as containing a " "foreign key reference to the parent table." % (relname,), fn, *arg, **kw) def _assert_raises_no_join(self, fn, relname, secondary_arg, *arg, **kw): if secondary_arg is not None: assert_raises_message( exc.NoForeignKeysError, "Could not determine join condition between " "parent/child tables on relationship %s - " "there are no foreign keys linking these tables " "via secondary table '%s'. " "Ensure that referencing columns are associated with a ForeignKey " "or ForeignKeyConstraint, or specify 'primaryjoin' and " "'secondaryjoin' expressions" % (relname, secondary_arg), fn, *arg, **kw) else: assert_raises_message( exc.NoForeignKeysError, "Could not determine join condition between " "parent/child tables on relationship %s - " "there are no foreign keys linking these tables. " "Ensure that referencing columns are associated with a ForeignKey " "or ForeignKeyConstraint, or specify a 'primaryjoin' " "expression." % (relname,), fn, *arg, **kw) def _assert_raises_ambiguous_direction(self, fn, relname, *arg, **kw): assert_raises_message( sa.exc.ArgumentError, "Can't determine relationship" " direction for relationship '%s' - foreign " "key columns within the join condition are present " "in both the parent and the child's mapped tables. " "Ensure that only those columns referring to a parent column " r"are marked as foreign, either via the foreign\(\) annotation or " "via the foreign_keys argument." % relname, fn, *arg, **kw ) def _assert_raises_no_local_remote(self, fn, relname, *arg, **kw): assert_raises_message( sa.exc.ArgumentError, "Relationship %s could not determine " "any unambiguous local/remote column " "pairs based on join condition and remote_side arguments. " r"Consider using the remote\(\) annotation to " "accurately mark those elements of the join " "condition that are on the remote side of the relationship." % relname, fn, *arg, **kw ) class DependencyTwoParentTest(fixtures.MappedTest): """Test flush() when a mapper is dependent on multiple relationships""" run_setup_mappers = 'once' run_inserts = 'once' run_deletes = None @classmethod def define_tables(cls, metadata): Table("tbl_a", metadata, Column("id", Integer, primary_key=True, test_needs_autoincrement=True), Column("name", String(128))) Table("tbl_b", metadata, Column("id", Integer, primary_key=True, test_needs_autoincrement=True), Column("name", String(128))) Table("tbl_c", metadata, Column("id", Integer, primary_key=True, test_needs_autoincrement=True), Column("tbl_a_id", Integer, ForeignKey("tbl_a.id"), nullable=False), Column("name", String(128))) Table("tbl_d", metadata, Column("id", Integer, primary_key=True, test_needs_autoincrement=True), Column("tbl_c_id", Integer, ForeignKey("tbl_c.id"), nullable=False), Column("tbl_b_id", Integer, ForeignKey("tbl_b.id")), Column("name", String(128))) @classmethod def setup_classes(cls): class A(cls.Basic): pass class B(cls.Basic): pass class C(cls.Basic): pass class D(cls.Basic): pass @classmethod def setup_mappers(cls): A, C, B, D, tbl_b, tbl_c, tbl_a, tbl_d = (cls.classes.A, cls.classes.C, cls.classes.B, cls.classes.D, cls.tables.tbl_b, cls.tables.tbl_c, cls.tables.tbl_a, cls.tables.tbl_d) mapper(A, tbl_a, properties=dict( c_rows=relationship(C, cascade="all, delete-orphan", backref="a_row"))) mapper(B, tbl_b) mapper(C, tbl_c, properties=dict( d_rows=relationship(D, cascade="all, delete-orphan", backref="c_row"))) mapper(D, tbl_d, properties=dict( b_row=relationship(B))) @classmethod def insert_data(cls): A, C, B, D = (cls.classes.A, cls.classes.C, cls.classes.B, cls.classes.D) session = create_session() a = A(name='a1') b = B(name='b1') c = C(name='c1', a_row=a) d1 = D(name='d1', b_row=b, c_row=c) d2 = D(name='d2', b_row=b, c_row=c) d3 = D(name='d3', b_row=b, c_row=c) session.add(a) session.add(b) session.flush() def testDeleteRootTable(self): A = self.classes.A session = create_session() a = session.query(A).filter_by(name='a1').one() session.delete(a) session.flush() def testDeleteMiddleTable(self): C = self.classes.C session = create_session() c = session.query(C).filter_by(name='c1').one() session.delete(c) session.flush() class CompositeSelfRefFKTest(fixtures.MappedTest): """Tests a composite FK where, in the relationship(), one col points to itself in the same table. this is a very unusual case:: company employee ---------- ---------- company_id <--- company_id ------+ name ^ | +------------+ emp_id <---------+ name | reports_to_id ---+ employee joins to its sub-employees both on reports_to_id, *and on company_id to itself*. """ @classmethod def define_tables(cls, metadata): Table('company_t', metadata, Column('company_id', Integer, primary_key=True, test_needs_autoincrement=True), Column('name', String(30))) Table('employee_t', metadata, Column('company_id', Integer, primary_key=True), Column('emp_id', Integer, primary_key=True), Column('name', String(30)), Column('reports_to_id', Integer), sa.ForeignKeyConstraint( ['company_id'], ['company_t.company_id']), sa.ForeignKeyConstraint( ['company_id', 'reports_to_id'], ['employee_t.company_id', 'employee_t.emp_id'])) @classmethod def setup_classes(cls): class Company(cls.Basic): def __init__(self, name): self.name = name class Employee(cls.Basic): def __init__(self, name, company, emp_id, reports_to=None): self.name = name self.company = company self.emp_id = emp_id self.reports_to = reports_to def test_explicit(self): Employee, Company, employee_t, company_t = (self.classes.Employee, self.classes.Company, self.tables.employee_t, self.tables.company_t) mapper(Company, company_t) mapper(Employee, employee_t, properties= { 'company':relationship(Company, primaryjoin=employee_t.c.company_id== company_t.c.company_id, backref='employees'), 'reports_to':relationship(Employee, primaryjoin= sa.and_( employee_t.c.emp_id==employee_t.c.reports_to_id, employee_t.c.company_id==employee_t.c.company_id ), remote_side=[employee_t.c.emp_id, employee_t.c.company_id], foreign_keys=[employee_t.c.reports_to_id, employee_t.c.company_id], backref=backref('employees', foreign_keys=[employee_t.c.reports_to_id, employee_t.c.company_id])) }) self._test() def test_implicit(self): Employee, Company, employee_t, company_t = (self.classes.Employee, self.classes.Company, self.tables.employee_t, self.tables.company_t) mapper(Company, company_t) mapper(Employee, employee_t, properties= { 'company':relationship(Company, backref='employees'), 'reports_to':relationship(Employee, remote_side=[employee_t.c.emp_id, employee_t.c.company_id], foreign_keys=[employee_t.c.reports_to_id, employee_t.c.company_id], backref=backref('employees', foreign_keys= [employee_t.c.reports_to_id, employee_t.c.company_id]) ) }) self._test() def test_very_implicit(self): Employee, Company, employee_t, company_t = (self.classes.Employee, self.classes.Company, self.tables.employee_t, self.tables.company_t) mapper(Company, company_t) mapper(Employee, employee_t, properties= { 'company':relationship(Company, backref='employees'), 'reports_to':relationship(Employee, remote_side=[employee_t.c.emp_id, employee_t.c.company_id], backref='employees' ) }) self._test() def test_very_explicit(self): Employee, Company, employee_t, company_t = (self.classes.Employee, self.classes.Company, self.tables.employee_t, self.tables.company_t) mapper(Company, company_t) mapper(Employee, employee_t, properties= { 'company':relationship(Company, backref='employees'), 'reports_to':relationship(Employee, _local_remote_pairs = [ (employee_t.c.reports_to_id, employee_t.c.emp_id), (employee_t.c.company_id, employee_t.c.company_id) ], foreign_keys=[employee_t.c.reports_to_id, employee_t.c.company_id], backref=backref('employees', foreign_keys= [employee_t.c.reports_to_id, employee_t.c.company_id]) ) }) self._test() def test_annotated(self): Employee, Company, employee_t, company_t = (self.classes.Employee, self.classes.Company, self.tables.employee_t, self.tables.company_t) mapper(Company, company_t) mapper(Employee, employee_t, properties= { 'company':relationship(Company, backref='employees'), 'reports_to':relationship(Employee, primaryjoin=sa.and_( remote(employee_t.c.emp_id)==employee_t.c.reports_to_id, remote(employee_t.c.company_id)==employee_t.c.company_id ), backref=backref('employees') ) }) self._test() def _test(self): self._test_relationships() sess = Session() self._setup_data(sess) self._test_lazy_relations(sess) self._test_join_aliasing(sess) def _test_relationships(self): configure_mappers() Employee = self.classes.Employee employee_t = self.tables.employee_t eq_( set(Employee.employees.property.local_remote_pairs), set([ (employee_t.c.company_id, employee_t.c.company_id), (employee_t.c.emp_id, employee_t.c.reports_to_id), ]) ) eq_( Employee.employees.property.remote_side, set([employee_t.c.company_id, employee_t.c.reports_to_id]) ) eq_( set(Employee.reports_to.property.local_remote_pairs), set([ (employee_t.c.company_id, employee_t.c.company_id), (employee_t.c.reports_to_id, employee_t.c.emp_id), ]) ) def _setup_data(self, sess): Employee, Company = self.classes.Employee, self.classes.Company c1 = Company('c1') c2 = Company('c2') e1 = Employee(u'emp1', c1, 1) e2 = Employee(u'emp2', c1, 2, e1) e3 = Employee(u'emp3', c1, 3, e1) e4 = Employee(u'emp4', c1, 4, e3) e5 = Employee(u'emp5', c2, 1) e6 = Employee(u'emp6', c2, 2, e5) e7 = Employee(u'emp7', c2, 3, e5) sess.add_all((c1, c2)) sess.commit() sess.close() def _test_lazy_relations(self, sess): Employee, Company = self.classes.Employee, self.classes.Company c1 = sess.query(Company).filter_by(name='c1').one() c2 = sess.query(Company).filter_by(name='c2').one() e1 = sess.query(Employee).filter_by(name='emp1').one() e5 = sess.query(Employee).filter_by(name='emp5').one() test_e1 = sess.query(Employee).get([c1.company_id, e1.emp_id]) assert test_e1.name == 'emp1', test_e1.name test_e5 = sess.query(Employee).get([c2.company_id, e5.emp_id]) assert test_e5.name == 'emp5', test_e5.name assert [x.name for x in test_e1.employees] == ['emp2', 'emp3'] assert sess.query(Employee).\ get([c1.company_id, 3]).reports_to.name == 'emp1' assert sess.query(Employee).\ get([c2.company_id, 3]).reports_to.name == 'emp5' def _test_join_aliasing(self, sess): Employee, Company = self.classes.Employee, self.classes.Company eq_( [n for n, in sess.query(Employee.name).\ join(Employee.reports_to, aliased=True).\ filter_by(name='emp5').\ reset_joinpoint().\ order_by(Employee.name)], ['emp6', 'emp7'] ) class CompositeJoinPartialFK(fixtures.MappedTest, AssertsCompiledSQL): __dialect__ = 'default' @classmethod def define_tables(cls, metadata): Table("parent", metadata, Column('x', Integer, primary_key=True), Column('y', Integer, primary_key=True), Column('z', Integer), ) Table("child", metadata, Column('id', Integer, primary_key=True, test_needs_autoincrement=True), Column('x', Integer), Column('y', Integer), Column('z', Integer), # note 'z' is not here sa.ForeignKeyConstraint( ["x", "y"], ["parent.x", "parent.y"] ) ) @classmethod def setup_mappers(cls): parent, child = cls.tables.parent, cls.tables.child class Parent(cls.Comparable): pass class Child(cls.Comparable): pass mapper(Parent, parent, properties={ 'children':relationship(Child, primaryjoin=and_( parent.c.x==child.c.x, parent.c.y==child.c.y, parent.c.z==child.c.z, )) }) mapper(Child, child) def test_joins_fully(self): Parent, Child = self.classes.Parent, self.classes.Child s = Session() self.assert_compile( Parent.children.property.strategy._lazywhere, ":param_1 = child.x AND :param_2 = child.y AND :param_3 = child.z" ) class SynonymsAsFKsTest(fixtures.MappedTest): """Syncrules on foreign keys that are also primary""" @classmethod def define_tables(cls, metadata): Table("tableA", metadata, Column("id",Integer,primary_key=True, test_needs_autoincrement=True), Column("foo",Integer,), test_needs_fk=True) Table("tableB",metadata, Column("id",Integer,primary_key=True, test_needs_autoincrement=True), Column("_a_id", Integer, key='a_id', primary_key=True), test_needs_fk=True) @classmethod def setup_classes(cls): class A(cls.Basic): pass class B(cls.Basic): @property def a_id(self): return self._a_id def test_synonym_fk(self): """test that active history is enabled on a one-to-many/one that has use_get==True""" tableB, A, B, tableA = (self.tables.tableB, self.classes.A, self.classes.B, self.tables.tableA) mapper(B, tableB, properties={ 'a_id': synonym('_a_id', map_column=True)}) mapper(A, tableA, properties={ 'b': relationship(B, primaryjoin=(tableA.c.id == foreign(B.a_id)), uselist=False)}) sess = create_session() b = B(id=0) a = A(id=0, b=b) sess.add(a) sess.add(b) sess.flush() sess.expunge_all() assert a.b == b assert a.id == b.a_id assert a.id == b._a_id class FKsAsPksTest(fixtures.MappedTest): """Syncrules on foreign keys that are also primary""" @classmethod def define_tables(cls, metadata): Table("tableA", metadata, Column("id",Integer,primary_key=True, test_needs_autoincrement=True), Column("foo",Integer,), test_needs_fk=True) Table("tableB",metadata, Column("id",Integer,ForeignKey("tableA.id"),primary_key=True), test_needs_fk=True) @classmethod def setup_classes(cls): class A(cls.Basic): pass class B(cls.Basic): pass def test_onetoone_switch(self): """test that active history is enabled on a one-to-many/one that has use_get==True""" tableB, A, B, tableA = (self.tables.tableB, self.classes.A, self.classes.B, self.tables.tableA) mapper(A, tableA, properties={ 'b':relationship(B, cascade="all,delete-orphan", uselist=False)}) mapper(B, tableB) configure_mappers() assert A.b.property.strategy.use_get sess = create_session() a1 = A() sess.add(a1) sess.flush() sess.close() a1 = sess.query(A).first() a1.b = B() sess.flush() def test_no_delete_PK_AtoB(self): """A cant be deleted without B because B would have no PK value.""" tableB, A, B, tableA = (self.tables.tableB, self.classes.A, self.classes.B, self.tables.tableA) mapper(A, tableA, properties={ 'bs':relationship(B, cascade="save-update")}) mapper(B, tableB) a1 = A() a1.bs.append(B()) sess = create_session() sess.add(a1) sess.flush() sess.delete(a1) try: sess.flush() assert False except AssertionError, e: startswith_(str(e), "Dependency rule tried to blank-out " "primary key column 'tableB.id' on instance ") def test_no_delete_PK_BtoA(self): tableB, A, B, tableA = (self.tables.tableB, self.classes.A, self.classes.B, self.tables.tableA) mapper(B, tableB, properties={ 'a':relationship(A, cascade="save-update")}) mapper(A, tableA) b1 = B() a1 = A() b1.a = a1 sess = create_session() sess.add(b1) sess.flush() b1.a = None try: sess.flush() assert False except AssertionError, e: startswith_(str(e), "Dependency rule tried to blank-out " "primary key column 'tableB.id' on instance ") @testing.fails_on_everything_except('sqlite', 'mysql') def test_nullPKsOK_BtoA(self): A, tableA = self.classes.A, self.tables.tableA # postgresql cant handle a nullable PK column...? tableC = Table('tablec', tableA.metadata, Column('id', Integer, primary_key=True), Column('a_id', Integer, ForeignKey('tableA.id'), primary_key=True, autoincrement=False, nullable=True)) tableC.create() class C(fixtures.BasicEntity): pass mapper(C, tableC, properties={ 'a':relationship(A, cascade="save-update") }) mapper(A, tableA) c1 = C() c1.id = 5 c1.a = None sess = create_session() sess.add(c1) # test that no error is raised. sess.flush() def test_delete_cascade_BtoA(self): """No 'blank the PK' error when the child is to be deleted as part of a cascade""" tableB, A, B, tableA = (self.tables.tableB, self.classes.A, self.classes.B, self.tables.tableA) for cascade in ("save-update, delete", #"save-update, delete-orphan", "save-update, delete, delete-orphan"): mapper(B, tableB, properties={ 'a':relationship(A, cascade=cascade, single_parent=True) }) mapper(A, tableA) b1 = B() a1 = A() b1.a = a1 sess = create_session() sess.add(b1) sess.flush() sess.delete(b1) sess.flush() assert a1 not in sess assert b1 not in sess sess.expunge_all() sa.orm.clear_mappers() def test_delete_cascade_AtoB(self): """No 'blank the PK' error when the child is to be deleted as part of a cascade""" tableB, A, B, tableA = (self.tables.tableB, self.classes.A, self.classes.B, self.tables.tableA) for cascade in ("save-update, delete", #"save-update, delete-orphan", "save-update, delete, delete-orphan"): mapper(A, tableA, properties={ 'bs':relationship(B, cascade=cascade) }) mapper(B, tableB) a1 = A() b1 = B() a1.bs.append(b1) sess = create_session() sess.add(a1) sess.flush() sess.delete(a1) sess.flush() assert a1 not in sess assert b1 not in sess sess.expunge_all() sa.orm.clear_mappers() def test_delete_manual_AtoB(self): tableB, A, B, tableA = (self.tables.tableB, self.classes.A, self.classes.B, self.tables.tableA) mapper(A, tableA, properties={ 'bs':relationship(B, cascade="none")}) mapper(B, tableB) a1 = A() b1 = B() a1.bs.append(b1) sess = create_session() sess.add(a1) sess.add(b1) sess.flush() sess.delete(a1) sess.delete(b1) sess.flush() assert a1 not in sess assert b1 not in sess sess.expunge_all() def test_delete_manual_BtoA(self): tableB, A, B, tableA = (self.tables.tableB, self.classes.A, self.classes.B, self.tables.tableA) mapper(B, tableB, properties={ 'a':relationship(A, cascade="none")}) mapper(A, tableA) b1 = B() a1 = A() b1.a = a1 sess = create_session() sess.add(b1) sess.add(a1) sess.flush() sess.delete(b1) sess.delete(a1) sess.flush() assert a1 not in sess assert b1 not in sess class UniqueColReferenceSwitchTest(fixtures.MappedTest): """test a relationship based on a primary join against a unique non-pk column""" @classmethod def define_tables(cls, metadata): Table("table_a", metadata, Column("id", Integer, primary_key=True, test_needs_autoincrement=True), Column("ident", String(10), nullable=False, unique=True), ) Table("table_b", metadata, Column("id", Integer, primary_key=True, test_needs_autoincrement=True), Column("a_ident", String(10), ForeignKey('table_a.ident'), nullable=False), ) @classmethod def setup_classes(cls): class A(cls.Comparable): pass class B(cls.Comparable): pass def test_switch_parent(self): A, B, table_b, table_a = (self.classes.A, self.classes.B, self.tables.table_b, self.tables.table_a) mapper(A, table_a) mapper(B, table_b, properties={"a": relationship(A, backref="bs")}) session = create_session() a1, a2 = A(ident="uuid1"), A(ident="uuid2") session.add_all([a1, a2]) a1.bs = [ B(), B() ] session.flush() session.expire_all() a1, a2 = session.query(A).all() for b in list(a1.bs): b.a = a2 session.delete(a1) session.flush() class RelationshipToSelectableTest(fixtures.MappedTest): """Test a map to a select that relates to a map to the table.""" @classmethod def define_tables(cls, metadata): Table('items', metadata, Column('item_policy_num', String(10), primary_key=True, key='policyNum'), Column('item_policy_eff_date', sa.Date, primary_key=True, key='policyEffDate'), Column('item_type', String(20), primary_key=True, key='type'), Column('item_id', Integer, primary_key=True, key='id', autoincrement=False)) def test_basic(self): items = self.tables.items class Container(fixtures.BasicEntity): pass class LineItem(fixtures.BasicEntity): pass container_select = sa.select( [items.c.policyNum, items.c.policyEffDate, items.c.type], distinct=True, ).alias('container_select') mapper(LineItem, items) mapper(Container, container_select, order_by=sa.asc(container_select.c.type), properties=dict( lineItems=relationship(LineItem, lazy='select', cascade='all, delete-orphan', order_by=sa.asc(items.c.id), primaryjoin=sa.and_( container_select.c.policyNum==items.c.policyNum, container_select.c.policyEffDate== items.c.policyEffDate, container_select.c.type==items.c.type), foreign_keys=[ items.c.policyNum, items.c.policyEffDate, items.c.type]))) session = create_session() con = Container() con.policyNum = "99" con.policyEffDate = datetime.date.today() con.type = "TESTER" session.add(con) for i in range(0, 10): li = LineItem() li.id = i con.lineItems.append(li) session.add(li) session.flush() session.expunge_all() newcon = session.query(Container).first() assert con.policyNum == newcon.policyNum assert len(newcon.lineItems) == 10 for old, new in zip(con.lineItems, newcon.lineItems): eq_(old.id, new.id) class FKEquatedToConstantTest(fixtures.MappedTest): """test a relationship with a non-column entity in the primary join, is not viewonly, and also has the non-column's clause mentioned in the foreign keys list. """ @classmethod def define_tables(cls, metadata): Table('tags', metadata, Column("id", Integer, primary_key=True, test_needs_autoincrement=True), Column("data", String(50)), ) Table('tag_foo', metadata, Column("id", Integer, primary_key=True, test_needs_autoincrement=True), Column('tagid', Integer), Column("data", String(50)), ) def test_basic(self): tag_foo, tags = self.tables.tag_foo, self.tables.tags class Tag(fixtures.ComparableEntity): pass class TagInstance(fixtures.ComparableEntity): pass mapper(Tag, tags, properties={ 'foo':relationship(TagInstance, primaryjoin=sa.and_(tag_foo.c.data=='iplc_case', tag_foo.c.tagid==tags.c.id), foreign_keys=[tag_foo.c.tagid, tag_foo.c.data], ), }) mapper(TagInstance, tag_foo) sess = create_session() t1 = Tag(data='some tag') t1.foo.append(TagInstance(data='iplc_case')) t1.foo.append(TagInstance(data='not_iplc_case')) sess.add(t1) sess.flush() sess.expunge_all() # relationship works eq_( sess.query(Tag).all(), [Tag(data='some tag', foo=[TagInstance(data='iplc_case')])] ) # both TagInstances were persisted eq_( sess.query(TagInstance).order_by(TagInstance.data).all(), [TagInstance(data='iplc_case'), TagInstance(data='not_iplc_case')] ) class BackrefPropagatesForwardsArgs(fixtures.MappedTest): @classmethod def define_tables(cls, metadata): Table('users', metadata, Column('id', Integer, primary_key=True, test_needs_autoincrement=True), Column('name', String(50)) ) Table('addresses', metadata, Column('id', Integer, primary_key=True, test_needs_autoincrement=True), Column('user_id', Integer), Column('email', String(50)) ) @classmethod def setup_classes(cls): class User(cls.Comparable): pass class Address(cls.Comparable): pass def test_backref(self): User, Address, users, addresses = (self.classes.User, self.classes.Address, self.tables.users, self.tables.addresses) mapper(User, users, properties={ 'addresses':relationship(Address, primaryjoin=addresses.c.user_id==users.c.id, foreign_keys=addresses.c.user_id, backref='user') }) mapper(Address, addresses) sess = sessionmaker()() u1 = User(name='u1', addresses=[Address(email='a1')]) sess.add(u1) sess.commit() eq_(sess.query(Address).all(), [ Address(email='a1', user=User(name='u1')) ]) class AmbiguousJoinInterpretedAsSelfRef(fixtures.MappedTest): """test ambiguous joins due to FKs on both sides treated as self-referential. this mapping is very similar to that of test/orm/inheritance/query.py SelfReferentialTestJoinedToBase , except that inheritance is not used here. """ @classmethod def define_tables(cls, metadata): subscriber_table = Table('subscriber', metadata, Column('id', Integer, primary_key=True, test_needs_autoincrement=True), ) address_table = Table('address', metadata, Column('subscriber_id', Integer, ForeignKey('subscriber.id'), primary_key=True), Column('type', String(1), primary_key=True), ) @classmethod def setup_mappers(cls): subscriber, address = cls.tables.subscriber, cls.tables.address subscriber_and_address = subscriber.join(address, and_(address.c.subscriber_id==subscriber.c.id, address.c.type.in_(['A', 'B', 'C']))) class Address(cls.Comparable): pass class Subscriber(cls.Comparable): pass mapper(Address, address) mapper(Subscriber, subscriber_and_address, properties={ 'id':[subscriber.c.id, address.c.subscriber_id], 'addresses' : relationship(Address, backref=backref("customer")) }) def test_mapping(self): Subscriber, Address = self.classes.Subscriber, self.classes.Address sess = create_session() assert Subscriber.addresses.property.direction is ONETOMANY assert Address.customer.property.direction is MANYTOONE s1 = Subscriber(type='A', addresses = [ Address(type='D'), Address(type='E'), ] ) a1 = Address(type='B', customer=Subscriber(type='C')) assert s1.addresses[0].customer is s1 assert a1.customer.addresses[0] is a1 sess.add_all([s1, a1]) sess.flush() sess.expunge_all() eq_( sess.query(Subscriber).order_by(Subscriber.type).all(), [ Subscriber(id=1, type=u'A'), Subscriber(id=2, type=u'B'), Subscriber(id=2, type=u'C') ] ) class ManualBackrefTest(_fixtures.FixtureTest): """Test explicit relationships that are backrefs to each other.""" run_inserts = None def test_o2m(self): users, Address, addresses, User = (self.tables.users, self.classes.Address, self.tables.addresses, self.classes.User) mapper(User, users, properties={ 'addresses':relationship(Address, back_populates='user') }) mapper(Address, addresses, properties={ 'user':relationship(User, back_populates='addresses') }) sess = create_session() u1 = User(name='u1') a1 = Address(email_address='foo') u1.addresses.append(a1) assert a1.user is u1 sess.add(u1) sess.flush() sess.expire_all() assert sess.query(Address).one() is a1 assert a1.user is u1 assert a1 in u1.addresses def test_invalid_key(self): users, Address, addresses, User = (self.tables.users, self.classes.Address, self.tables.addresses, self.classes.User) mapper(User, users, properties={ 'addresses':relationship(Address, back_populates='userr') }) mapper(Address, addresses, properties={ 'user':relationship(User, back_populates='addresses') }) assert_raises(sa.exc.InvalidRequestError, configure_mappers) def test_invalid_target(self): addresses, Dingaling, User, dingalings, Address, users = (self.tables.addresses, self.classes.Dingaling, self.classes.User, self.tables.dingalings, self.classes.Address, self.tables.users) mapper(User, users, properties={ 'addresses':relationship(Address, back_populates='dingaling'), }) mapper(Dingaling, dingalings) mapper(Address, addresses, properties={ 'dingaling':relationship(Dingaling) }) assert_raises_message(sa.exc.ArgumentError, r"reverse_property 'dingaling' on relationship " "User.addresses references " "relationship Address.dingaling, which does not " "reference mapper Mapper\|User\|users", configure_mappers) class JoinConditionErrorTest(fixtures.TestBase): def test_clauseelement_pj(self): from sqlalchemy.ext.declarative import declarative_base Base = declarative_base() class C1(Base): __tablename__ = 'c1' id = Column('id', Integer, primary_key=True) class C2(Base): __tablename__ = 'c2' id = Column('id', Integer, primary_key=True) c1id = Column('c1id', Integer, ForeignKey('c1.id')) c2 = relationship(C1, primaryjoin=C1.id) assert_raises(sa.exc.ArgumentError, configure_mappers) def test_clauseelement_pj_false(self): from sqlalchemy.ext.declarative import declarative_base Base = declarative_base() class C1(Base): __tablename__ = 'c1' id = Column('id', Integer, primary_key=True) class C2(Base): __tablename__ = 'c2' id = Column('id', Integer, primary_key=True) c1id = Column('c1id', Integer, ForeignKey('c1.id')) c2 = relationship(C1, primaryjoin="x"=="y") assert_raises(sa.exc.ArgumentError, configure_mappers) def test_only_column_elements(self): m = MetaData() t1 = Table('t1', m, Column('id', Integer, primary_key=True), Column('foo_id', Integer, ForeignKey('t2.id')), ) t2 = Table('t2', m, Column('id', Integer, primary_key=True), ) class C1(object): pass class C2(object): pass mapper(C1, t1, properties={'c2':relationship(C2, primaryjoin=t1.join(t2))}) mapper(C2, t2) assert_raises(sa.exc.ArgumentError, configure_mappers) def test_invalid_string_args(self): from sqlalchemy.ext.declarative import declarative_base from sqlalchemy import util for argname, arg in [ ('remote_side', ['c1.id']), ('remote_side', ['id']), ('foreign_keys', ['c1id']), ('foreign_keys', ['C2.c1id']), ('order_by', ['id']), ]: clear_mappers() kw = {argname:arg} Base = declarative_base() class C1(Base): __tablename__ = 'c1' id = Column('id', Integer, primary_key=True) class C2(Base): __tablename__ = 'c2' id_ = Column('id', Integer, primary_key=True) c1id = Column('c1id', Integer, ForeignKey('c1.id')) c2 = relationship(C1, **kw) assert_raises_message( sa.exc.ArgumentError, "Column-based expression object expected " "for argument '%s'; got: '%s', type %r" % (argname, arg[0], type(arg[0])), configure_mappers) def test_fk_error_not_raised_unrelated(self): m = MetaData() t1 = Table('t1', m, Column('id', Integer, primary_key=True), Column('foo_id', Integer, ForeignKey('t2.nonexistent_id')), ) t2 = Table('t2', m, Column('id', Integer, primary_key=True), ) t3 = Table('t3', m, Column('id', Integer, primary_key=True), Column('t1id', Integer, ForeignKey('t1.id')) ) class C1(object): pass class C2(object): pass mapper(C1, t1, properties={'c2':relationship(C2)}) mapper(C2, t3) assert C1.c2.property.primaryjoin.compare(t1.c.id==t3.c.t1id) def test_join_error_raised(self): m = MetaData() t1 = Table('t1', m, Column('id', Integer, primary_key=True), ) t2 = Table('t2', m, Column('id', Integer, primary_key=True), ) t3 = Table('t3', m, Column('id', Integer, primary_key=True), Column('t1id', Integer) ) class C1(object): pass class C2(object): pass mapper(C1, t1, properties={'c2':relationship(C2)}) mapper(C2, t3) assert_raises(sa.exc.ArgumentError, configure_mappers) def teardown(self): clear_mappers() class TypeMatchTest(fixtures.MappedTest): """test errors raised when trying to add items whose type is not handled by a relationship""" @classmethod def define_tables(cls, metadata): Table("a", metadata, Column('aid', Integer, primary_key=True, test_needs_autoincrement=True), Column('data', String(30))) Table("b", metadata, Column('bid', Integer, primary_key=True, test_needs_autoincrement=True), Column("a_id", Integer, ForeignKey("a.aid")), Column('data', String(30))) Table("c", metadata, Column('cid', Integer, primary_key=True, test_needs_autoincrement=True), Column("b_id", Integer, ForeignKey("b.bid")), Column('data', String(30))) Table("d", metadata, Column('did', Integer, primary_key=True, test_needs_autoincrement=True), Column("a_id", Integer, ForeignKey("a.aid")), Column('data', String(30))) def test_o2m_oncascade(self): a, c, b = (self.tables.a, self.tables.c, self.tables.b) class A(fixtures.BasicEntity): pass class B(fixtures.BasicEntity): pass class C(fixtures.BasicEntity): pass mapper(A, a, properties={'bs':relationship(B)}) mapper(B, b) mapper(C, c) a1 = A() b1 = B() c1 = C() a1.bs.append(b1) a1.bs.append(c1) sess = create_session() try: sess.add(a1) assert False except AssertionError, err: eq_(str(err), "Attribute 'bs' on class '%s' doesn't handle " "objects of type '%s'" % (A, C)) def test_o2m_onflush(self): a, c, b = (self.tables.a, self.tables.c, self.tables.b) class A(fixtures.BasicEntity): pass class B(fixtures.BasicEntity): pass class C(fixtures.BasicEntity): pass mapper(A, a, properties={'bs':relationship(B, cascade="none")}) mapper(B, b) mapper(C, c) a1 = A() b1 = B() c1 = C() a1.bs.append(b1) a1.bs.append(c1) sess = create_session() sess.add(a1) sess.add(b1) sess.add(c1) assert_raises_message(sa.orm.exc.FlushError, "Attempting to flush an item", sess.flush) def test_o2m_nopoly_onflush(self): a, c, b = (self.tables.a, self.tables.c, self.tables.b) class A(fixtures.BasicEntity): pass class B(fixtures.BasicEntity): pass class C(B): pass mapper(A, a, properties={'bs':relationship(B, cascade="none")}) mapper(B, b) mapper(C, c, inherits=B) a1 = A() b1 = B() c1 = C() a1.bs.append(b1) a1.bs.append(c1) sess = create_session() sess.add(a1) sess.add(b1) sess.add(c1) assert_raises_message(sa.orm.exc.FlushError, "Attempting to flush an item", sess.flush) def test_m2o_nopoly_onflush(self): a, b, d = (self.tables.a, self.tables.b, self.tables.d) class A(fixtures.BasicEntity): pass class B(A): pass class D(fixtures.BasicEntity): pass mapper(A, a) mapper(B, b, inherits=A) mapper(D, d, properties={"a":relationship(A, cascade="none")}) b1 = B() d1 = D() d1.a = b1 sess = create_session() sess.add(b1) sess.add(d1) assert_raises_message(sa.orm.exc.FlushError, "Attempting to flush an item", sess.flush) def test_m2o_oncascade(self): a, b, d = (self.tables.a, self.tables.b, self.tables.d) class A(fixtures.BasicEntity): pass class B(fixtures.BasicEntity): pass class D(fixtures.BasicEntity): pass mapper(A, a) mapper(B, b) mapper(D, d, properties={"a":relationship(A)}) b1 = B() d1 = D() d1.a = b1 sess = create_session() assert_raises_message(AssertionError, "doesn't handle objects of type", sess.add, d1) class TypedAssociationTable(fixtures.MappedTest): @classmethod def define_tables(cls, metadata): class MySpecialType(sa.types.TypeDecorator): impl = String def process_bind_param(self, value, dialect): return "lala" + value def process_result_value(self, value, dialect): return value[4:] Table('t1', metadata, Column('col1', MySpecialType(30), primary_key=True), Column('col2', String(30))) Table('t2', metadata, Column('col1', MySpecialType(30), primary_key=True), Column('col2', String(30))) Table('t3', metadata, Column('t1c1', MySpecialType(30), ForeignKey('t1.col1')), Column('t2c1', MySpecialType(30), ForeignKey('t2.col1'))) def testm2m(self): """Many-to-many tables with special types for candidate keys.""" t2, t3, t1 = (self.tables.t2, self.tables.t3, self.tables.t1) class T1(fixtures.BasicEntity): pass class T2(fixtures.BasicEntity): pass mapper(T2, t2) mapper(T1, t1, properties={ 't2s':relationship(T2, secondary=t3, backref='t1s')}) a = T1() a.col1 = "aid" b = T2() b.col1 = "bid" c = T2() c.col1 = "cid" a.t2s.append(b) a.t2s.append(c) sess = create_session() sess.add(a) sess.flush() assert t3.count().scalar() == 2 a.t2s.remove(c) sess.flush() assert t3.count().scalar() == 1 class ViewOnlyM2MBackrefTest(fixtures.MappedTest): @classmethod def define_tables(cls, metadata): Table("t1", metadata, Column('id', Integer, primary_key=True, test_needs_autoincrement=True), Column('data', String(40))) Table("t2", metadata, Column('id', Integer, primary_key=True, test_needs_autoincrement=True), Column('data', String(40)), ) Table("t1t2", metadata, Column('t1id', Integer, ForeignKey('t1.id'), primary_key=True), Column('t2id', Integer, ForeignKey('t2.id'), primary_key=True), ) def test_viewonly(self): t1t2, t2, t1 = (self.tables.t1t2, self.tables.t2, self.tables.t1) class A(fixtures.ComparableEntity):pass class B(fixtures.ComparableEntity):pass mapper(A, t1, properties={ 'bs':relationship(B, secondary=t1t2, backref=backref('as_', viewonly=True)) }) mapper(B, t2) sess = create_session() a1 = A() b1 = B(as_=[a1]) sess.add(a1) sess.flush() eq_( sess.query(A).first(), A(bs=[B(id=b1.id)]) ) eq_( sess.query(B).first(), B(as_=[A(id=a1.id)]) ) class ViewOnlyOverlappingNames(fixtures.MappedTest): """'viewonly' mappings with overlapping PK column names.""" @classmethod def define_tables(cls, metadata): Table("t1", metadata, Column('id', Integer, primary_key=True, test_needs_autoincrement=True), Column('data', String(40))) Table("t2", metadata, Column('id', Integer, primary_key=True, test_needs_autoincrement=True), Column('data', String(40)), Column('t1id', Integer, ForeignKey('t1.id'))) Table("t3", metadata, Column('id', Integer, primary_key=True, test_needs_autoincrement=True), Column('data', String(40)), Column('t2id', Integer, ForeignKey('t2.id'))) def test_three_table_view(self): """A three table join with overlapping PK names. A third table is pulled into the primary join condition using overlapping PK column names and should not produce 'conflicting column' error. """ t2, t3, t1 = (self.tables.t2, self.tables.t3, self.tables.t1) class C1(fixtures.BasicEntity): pass class C2(fixtures.BasicEntity): pass class C3(fixtures.BasicEntity): pass mapper(C1, t1, properties={ 't2s':relationship(C2), 't2_view':relationship(C2, viewonly=True, primaryjoin=sa.and_(t1.c.id==t2.c.t1id, t3.c.t2id==t2.c.id, t3.c.data==t1.c.data))}) mapper(C2, t2) mapper(C3, t3, properties={ 't2':relationship(C2)}) c1 = C1() c1.data = 'c1data' c2a = C2() c1.t2s.append(c2a) c2b = C2() c1.t2s.append(c2b) c3 = C3() c3.data='c1data' c3.t2 = c2b sess = create_session() sess.add(c1) sess.add(c3) sess.flush() sess.expunge_all() c1 = sess.query(C1).get(c1.id) assert set([x.id for x in c1.t2s]) == set([c2a.id, c2b.id]) assert set([x.id for x in c1.t2_view]) == set([c2b.id]) class ViewOnlyUniqueNames(fixtures.MappedTest): """'viewonly' mappings with unique PK column names.""" @classmethod def define_tables(cls, metadata): Table("t1", metadata, Column('t1id', Integer, primary_key=True, test_needs_autoincrement=True), Column('data', String(40))) Table("t2", metadata, Column('t2id', Integer, primary_key=True, test_needs_autoincrement=True), Column('data', String(40)), Column('t1id_ref', Integer, ForeignKey('t1.t1id'))) Table("t3", metadata, Column('t3id', Integer, primary_key=True, test_needs_autoincrement=True), Column('data', String(40)), Column('t2id_ref', Integer, ForeignKey('t2.t2id'))) def test_three_table_view(self): """A three table join with overlapping PK names. A third table is pulled into the primary join condition using unique PK column names and should not produce 'mapper has no columnX' error. """ t2, t3, t1 = (self.tables.t2, self.tables.t3, self.tables.t1) class C1(fixtures.BasicEntity): pass class C2(fixtures.BasicEntity): pass class C3(fixtures.BasicEntity): pass mapper(C1, t1, properties={ 't2s':relationship(C2), 't2_view':relationship(C2, viewonly=True, primaryjoin=sa.and_(t1.c.t1id==t2.c.t1id_ref, t3.c.t2id_ref==t2.c.t2id, t3.c.data==t1.c.data))}) mapper(C2, t2) mapper(C3, t3, properties={ 't2':relationship(C2)}) c1 = C1() c1.data = 'c1data' c2a = C2() c1.t2s.append(c2a) c2b = C2() c1.t2s.append(c2b) c3 = C3() c3.data='c1data' c3.t2 = c2b sess = create_session() sess.add_all((c1, c3)) sess.flush() sess.expunge_all() c1 = sess.query(C1).get(c1.t1id) assert set([x.t2id for x in c1.t2s]) == set([c2a.t2id, c2b.t2id]) assert set([x.t2id for x in c1.t2_view]) == set([c2b.t2id]) class ViewOnlyLocalRemoteM2M(fixtures.TestBase): """test that local-remote is correctly determined for m2m""" def test_local_remote(self): meta = MetaData() t1 = Table('t1', meta, Column('id', Integer, primary_key=True), ) t2 = Table('t2', meta, Column('id', Integer, primary_key=True), ) t12 = Table('tab', meta, Column('t1_id', Integer, ForeignKey('t1.id',)), Column('t2_id', Integer, ForeignKey('t2.id',)), ) class A(object): pass class B(object): pass mapper( B, t2, ) m = mapper( A, t1, properties=dict( b_view = relationship( B, secondary=t12, viewonly=True), b_plain= relationship( B, secondary=t12), ) ) configure_mappers() assert m.get_property('b_view').local_remote_pairs == \ m.get_property('b_plain').local_remote_pairs == \ [(t1.c.id, t12.c.t1_id), (t2.c.id, t12.c.t2_id)] class ViewOnlyNonEquijoin(fixtures.MappedTest): """'viewonly' mappings based on non-equijoins.""" @classmethod def define_tables(cls, metadata): Table('foos', metadata, Column('id', Integer, primary_key=True)) Table('bars', metadata, Column('id', Integer, primary_key=True), Column('fid', Integer)) def test_viewonly_join(self): bars, foos = self.tables.bars, self.tables.foos class Foo(fixtures.ComparableEntity): pass class Bar(fixtures.ComparableEntity): pass mapper(Foo, foos, properties={ 'bars':relationship(Bar, primaryjoin=foos.c.id > bars.c.fid, foreign_keys=[bars.c.fid], viewonly=True)}) mapper(Bar, bars) sess = create_session() sess.add_all((Foo(id=4), Foo(id=9), Bar(id=1, fid=2), Bar(id=2, fid=3), Bar(id=3, fid=6), Bar(id=4, fid=7))) sess.flush() sess = create_session() eq_(sess.query(Foo).filter_by(id=4).one(), Foo(id=4, bars=[Bar(fid=2), Bar(fid=3)])) eq_(sess.query(Foo).filter_by(id=9).one(), Foo(id=9, bars=[Bar(fid=2), Bar(fid=3), Bar(fid=6), Bar(fid=7)])) class ViewOnlyRepeatedRemoteColumn(fixtures.MappedTest): """'viewonly' mappings that contain the same 'remote' column twice""" @classmethod def define_tables(cls, metadata): Table('foos', metadata, Column('id', Integer, primary_key=True, test_needs_autoincrement=True), Column('bid1', Integer,ForeignKey('bars.id')), Column('bid2', Integer,ForeignKey('bars.id'))) Table('bars', metadata, Column('id', Integer, primary_key=True, test_needs_autoincrement=True), Column('data', String(50))) def test_relationship_on_or(self): bars, foos = self.tables.bars, self.tables.foos class Foo(fixtures.ComparableEntity): pass class Bar(fixtures.ComparableEntity): pass mapper(Foo, foos, properties={ 'bars':relationship(Bar, primaryjoin=sa.or_(bars.c.id == foos.c.bid1, bars.c.id == foos.c.bid2), uselist=True, viewonly=True)}) mapper(Bar, bars) sess = create_session() b1 = Bar(id=1, data='b1') b2 = Bar(id=2, data='b2') b3 = Bar(id=3, data='b3') f1 = Foo(bid1=1, bid2=2) f2 = Foo(bid1=3, bid2=None) sess.add_all((b1, b2, b3)) sess.flush() sess.add_all((f1, f2)) sess.flush() sess.expunge_all() eq_(sess.query(Foo).filter_by(id=f1.id).one(), Foo(bars=[Bar(data='b1'), Bar(data='b2')])) eq_(sess.query(Foo).filter_by(id=f2.id).one(), Foo(bars=[Bar(data='b3')])) class ViewOnlyRepeatedLocalColumn(fixtures.MappedTest): """'viewonly' mappings that contain the same 'local' column twice""" @classmethod def define_tables(cls, metadata): Table('foos', metadata, Column('id', Integer, primary_key=True, test_needs_autoincrement=True), Column('data', String(50))) Table('bars', metadata, Column('id', Integer, primary_key=True, test_needs_autoincrement=True), Column('fid1', Integer, ForeignKey('foos.id')), Column('fid2', Integer, ForeignKey('foos.id')), Column('data', String(50))) def test_relationship_on_or(self): bars, foos = self.tables.bars, self.tables.foos class Foo(fixtures.ComparableEntity): pass class Bar(fixtures.ComparableEntity): pass mapper(Foo, foos, properties={ 'bars':relationship(Bar, primaryjoin=sa.or_(bars.c.fid1 == foos.c.id, bars.c.fid2 == foos.c.id), viewonly=True)}) mapper(Bar, bars) sess = create_session() f1 = Foo(id=1, data='f1') f2 = Foo(id=2, data='f2') b1 = Bar(fid1=1, data='b1') b2 = Bar(fid2=1, data='b2') b3 = Bar(fid1=2, data='b3') b4 = Bar(fid1=1, fid2=2, data='b4') sess.add_all((f1, f2)) sess.flush() sess.add_all((b1, b2, b3, b4)) sess.flush() sess.expunge_all() eq_(sess.query(Foo).filter_by(id=f1.id).one(), Foo(bars=[Bar(data='b1'), Bar(data='b2'), Bar(data='b4')])) eq_(sess.query(Foo).filter_by(id=f2.id).one(), Foo(bars=[Bar(data='b3'), Bar(data='b4')])) class ViewOnlyComplexJoin(_RelationshipErrors, fixtures.MappedTest): """'viewonly' mappings with a complex join condition.""" @classmethod def define_tables(cls, metadata): Table('t1', metadata, Column('id', Integer, primary_key=True, test_needs_autoincrement=True), Column('data', String(50))) Table('t2', metadata, Column('id', Integer, primary_key=True, test_needs_autoincrement=True), Column('data', String(50)), Column('t1id', Integer, ForeignKey('t1.id'))) Table('t3', metadata, Column('id', Integer, primary_key=True, test_needs_autoincrement=True), Column('data', String(50))) Table('t2tot3', metadata, Column('t2id', Integer, ForeignKey('t2.id')), Column('t3id', Integer, ForeignKey('t3.id'))) @classmethod def setup_classes(cls): class T1(cls.Comparable): pass class T2(cls.Comparable): pass class T3(cls.Comparable): pass def test_basic(self): T1, t2, T2, T3, t3, t2tot3, t1 = (self.classes.T1, self.tables.t2, self.classes.T2, self.classes.T3, self.tables.t3, self.tables.t2tot3, self.tables.t1) mapper(T1, t1, properties={ 't3s':relationship(T3, primaryjoin=sa.and_( t1.c.id==t2.c.t1id, t2.c.id==t2tot3.c.t2id, t3.c.id==t2tot3.c.t3id), viewonly=True, foreign_keys=t3.c.id, remote_side=t2.c.t1id) }) mapper(T2, t2, properties={ 't1':relationship(T1), 't3s':relationship(T3, secondary=t2tot3) }) mapper(T3, t3) sess = create_session() sess.add(T2(data='t2', t1=T1(data='t1'), t3s=[T3(data='t3')])) sess.flush() sess.expunge_all() a = sess.query(T1).first() eq_(a.t3s, [T3(data='t3')]) def test_remote_side_escalation(self): T1, t2, T2, T3, t3, t2tot3, t1 = (self.classes.T1, self.tables.t2, self.classes.T2, self.classes.T3, self.tables.t3, self.tables.t2tot3, self.tables.t1) mapper(T1, t1, properties={ 't3s':relationship(T3, primaryjoin=sa.and_(t1.c.id==t2.c.t1id, t2.c.id==t2tot3.c.t2id, t3.c.id==t2tot3.c.t3id ), viewonly=True, foreign_keys=t3.c.id)}) mapper(T2, t2, properties={ 't1':relationship(T1), 't3s':relationship(T3, secondary=t2tot3)}) mapper(T3, t3) self._assert_raises_no_local_remote(configure_mappers, "T1.t3s") class RemoteForeignBetweenColsTest(fixtures.DeclarativeMappedTest): """test a complex annotation using between(). Using declarative here as an integration test for the local() and remote() annotations in conjunction with already annotated instrumented attributes, etc. """ @classmethod def setup_classes(cls): Base = cls.DeclarativeBasic class Network(fixtures.ComparableEntity, Base): __tablename__ = "network" id = Column(sa.Integer, primary_key=True, test_needs_autoincrement=True) ip_net_addr = Column(Integer) ip_broadcast_addr = Column(Integer) addresses = relationship("Address", primaryjoin="remote(foreign(Address.ip_addr)).between(" "Network.ip_net_addr," "Network.ip_broadcast_addr)", viewonly=True ) class Address(fixtures.ComparableEntity, Base): __tablename__ = "address" ip_addr = Column(Integer, primary_key=True) @classmethod def insert_data(cls): Network, Address = cls.classes.Network, cls.classes.Address s = Session(testing.db) s.add_all([ Network(ip_net_addr=5, ip_broadcast_addr=10), Network(ip_net_addr=15, ip_broadcast_addr=25), Network(ip_net_addr=30, ip_broadcast_addr=35), Address(ip_addr=17), Address(ip_addr=18), Address(ip_addr=9), Address(ip_addr=27) ]) s.commit() def test_col_query(self): Network, Address = self.classes.Network, self.classes.Address session = Session(testing.db) eq_( session.query(Address.ip_addr).\ select_from(Network).\ join(Network.addresses).\ filter(Network.ip_net_addr == 15).\ all(), [(17, ), (18, )] ) def test_lazyload(self): Network, Address = self.classes.Network, self.classes.Address session = Session(testing.db) n3 = session.query(Network).filter(Network.ip_net_addr == 5).one() eq_([a.ip_addr for a in n3.addresses], [9]) class ExplicitLocalRemoteTest(fixtures.MappedTest): @classmethod def define_tables(cls, metadata): Table('t1', metadata, Column('id', String(50), primary_key=True, test_needs_autoincrement=True), Column('data', String(50))) Table('t2', metadata, Column('id', Integer, primary_key=True, test_needs_autoincrement=True), Column('data', String(50)), Column('t1id', String(50))) @classmethod def setup_classes(cls): class T1(cls.Comparable): pass class T2(cls.Comparable): pass def test_onetomany_funcfk_oldstyle(self): T2, T1, t2, t1 = (self.classes.T2, self.classes.T1, self.tables.t2, self.tables.t1) # old _local_remote_pairs mapper(T1, t1, properties={ 't2s':relationship(T2, primaryjoin=t1.c.id==sa.func.lower(t2.c.t1id), _local_remote_pairs=[(t1.c.id, t2.c.t1id)], foreign_keys=[t2.c.t1id] ) }) mapper(T2, t2) self._test_onetomany() def test_onetomany_funcfk_annotated(self): T2, T1, t2, t1 = (self.classes.T2, self.classes.T1, self.tables.t2, self.tables.t1) # use annotation mapper(T1, t1, properties={ 't2s':relationship(T2, primaryjoin=t1.c.id== foreign(sa.func.lower(t2.c.t1id)), )}) mapper(T2, t2) self._test_onetomany() def _test_onetomany(self): T2, T1, t2, t1 = (self.classes.T2, self.classes.T1, self.tables.t2, self.tables.t1) is_(T1.t2s.property.direction, ONETOMANY) eq_(T1.t2s.property.local_remote_pairs, [(t1.c.id, t2.c.t1id)]) sess = create_session() a1 = T1(id='number1', data='a1') a2 = T1(id='number2', data='a2') b1 = T2(data='b1', t1id='NuMbEr1') b2 = T2(data='b2', t1id='Number1') b3 = T2(data='b3', t1id='Number2') sess.add_all((a1, a2, b1, b2, b3)) sess.flush() sess.expunge_all() eq_(sess.query(T1).first(), T1(id='number1', data='a1', t2s=[ T2(data='b1', t1id='NuMbEr1'), T2(data='b2', t1id='Number1')])) def test_manytoone_funcfk(self): T2, T1, t2, t1 = (self.classes.T2, self.classes.T1, self.tables.t2, self.tables.t1) mapper(T1, t1) mapper(T2, t2, properties={ 't1':relationship(T1, primaryjoin=t1.c.id==sa.func.lower(t2.c.t1id), _local_remote_pairs=[(t2.c.t1id, t1.c.id)], foreign_keys=[t2.c.t1id], uselist=True)}) sess = create_session() a1 = T1(id='number1', data='a1') a2 = T1(id='number2', data='a2') b1 = T2(data='b1', t1id='NuMbEr1') b2 = T2(data='b2', t1id='Number1') b3 = T2(data='b3', t1id='Number2') sess.add_all((a1, a2, b1, b2, b3)) sess.flush() sess.expunge_all() eq_(sess.query(T2).filter(T2.data.in_(['b1', 'b2'])).all(), [T2(data='b1', t1=[T1(id='number1', data='a1')]), T2(data='b2', t1=[T1(id='number1', data='a1')])]) def test_onetomany_func_referent(self): T2, T1, t2, t1 = (self.classes.T2, self.classes.T1, self.tables.t2, self.tables.t1) mapper(T1, t1, properties={ 't2s':relationship(T2, primaryjoin=sa.func.lower(t1.c.id)==t2.c.t1id, _local_remote_pairs=[(t1.c.id, t2.c.t1id)], foreign_keys=[t2.c.t1id])}) mapper(T2, t2) sess = create_session() a1 = T1(id='NuMbeR1', data='a1') a2 = T1(id='NuMbeR2', data='a2') b1 = T2(data='b1', t1id='number1') b2 = T2(data='b2', t1id='number1') b3 = T2(data='b2', t1id='number2') sess.add_all((a1, a2, b1, b2, b3)) sess.flush() sess.expunge_all() eq_(sess.query(T1).first(), T1(id='NuMbeR1', data='a1', t2s=[ T2(data='b1', t1id='number1'), T2(data='b2', t1id='number1')])) def test_manytoone_func_referent(self): T2, T1, t2, t1 = (self.classes.T2, self.classes.T1, self.tables.t2, self.tables.t1) mapper(T1, t1) mapper(T2, t2, properties={ 't1':relationship(T1, primaryjoin=sa.func.lower(t1.c.id)==t2.c.t1id, _local_remote_pairs=[(t2.c.t1id, t1.c.id)], foreign_keys=[t2.c.t1id], uselist=True)}) sess = create_session() a1 = T1(id='NuMbeR1', data='a1') a2 = T1(id='NuMbeR2', data='a2') b1 = T2(data='b1', t1id='number1') b2 = T2(data='b2', t1id='number1') b3 = T2(data='b3', t1id='number2') sess.add_all((a1, a2, b1, b2, b3)) sess.flush() sess.expunge_all() eq_(sess.query(T2).filter(T2.data.in_(['b1', 'b2'])).all(), [T2(data='b1', t1=[T1(id='NuMbeR1', data='a1')]), T2(data='b2', t1=[T1(id='NuMbeR1', data='a1')])]) def test_escalation_1(self): T2, T1, t2, t1 = (self.classes.T2, self.classes.T1, self.tables.t2, self.tables.t1) mapper(T1, t1, properties={ 't2s':relationship(T2, primaryjoin=t1.c.id==sa.func.lower(t2.c.t1id), _local_remote_pairs=[(t1.c.id, t2.c.t1id)], foreign_keys=[t2.c.t1id], remote_side=[t2.c.t1id])}) mapper(T2, t2) assert_raises(sa.exc.ArgumentError, sa.orm.configure_mappers) def test_escalation_2(self): T2, T1, t2, t1 = (self.classes.T2, self.classes.T1, self.tables.t2, self.tables.t1) mapper(T1, t1, properties={ 't2s':relationship(T2, primaryjoin=t1.c.id==sa.func.lower(t2.c.t1id), _local_remote_pairs=[(t1.c.id, t2.c.t1id)])}) mapper(T2, t2) assert_raises(sa.exc.ArgumentError, sa.orm.configure_mappers) class InvalidRemoteSideTest(fixtures.MappedTest): @classmethod def define_tables(cls, metadata): Table('t1', metadata, Column('id', Integer, primary_key=True), Column('data', String(50)), Column('t_id', Integer, ForeignKey('t1.id')) ) @classmethod def setup_classes(cls): class T1(cls.Comparable): pass def test_o2m_backref(self): T1, t1 = self.classes.T1, self.tables.t1 mapper(T1, t1, properties={ 't1s':relationship(T1, backref='parent') }) assert_raises_message(sa.exc.ArgumentError, "T1.t1s and back-reference T1.parent are " "both of the same direction . Did you " "mean to set remote_side on the many-to-one side ?", configure_mappers) def test_m2o_backref(self): T1, t1 = self.classes.T1, self.tables.t1 mapper(T1, t1, properties={ 't1s':relationship(T1, backref=backref('parent', remote_side=t1.c.id), remote_side=t1.c.id) }) assert_raises_message(sa.exc.ArgumentError, "T1.t1s and back-reference T1.parent are " "both of the same direction . Did you " "mean to set remote_side on the many-to-one side ?", configure_mappers) def test_o2m_explicit(self): T1, t1 = self.classes.T1, self.tables.t1 mapper(T1, t1, properties={ 't1s':relationship(T1, back_populates='parent'), 'parent':relationship(T1, back_populates='t1s'), }) # can't be sure of ordering here assert_raises_message(sa.exc.ArgumentError, "both of the same direction . Did you " "mean to set remote_side on the many-to-one side ?", configure_mappers) def test_m2o_explicit(self): T1, t1 = self.classes.T1, self.tables.t1 mapper(T1, t1, properties={ 't1s':relationship(T1, back_populates='parent', remote_side=t1.c.id), 'parent':relationship(T1, back_populates='t1s', remote_side=t1.c.id) }) # can't be sure of ordering here assert_raises_message(sa.exc.ArgumentError, "both of the same direction . Did you " "mean to set remote_side on the many-to-one side ?", configure_mappers) class AmbiguousFKResolutionTest(_RelationshipErrors, fixtures.MappedTest): @classmethod def define_tables(cls, metadata): Table("a", metadata, Column('id', Integer, primary_key=True) ) Table("b", metadata, Column('id', Integer, primary_key=True), Column('aid_1', Integer, ForeignKey('a.id')), Column('aid_2', Integer, ForeignKey('a.id')), ) Table("atob", metadata, Column('aid', Integer), Column('bid', Integer), ) Table("atob_ambiguous", metadata, Column('aid1', Integer, ForeignKey('a.id')), Column('bid1', Integer, ForeignKey('b.id')), Column('aid2', Integer, ForeignKey('a.id')), Column('bid2', Integer, ForeignKey('b.id')), ) @classmethod def setup_classes(cls): class A(cls.Basic): pass class B(cls.Basic): pass def test_ambiguous_fks_o2m(self): A, B = self.classes.A, self.classes.B a, b = self.tables.a, self.tables.b mapper(A, a, properties={ 'bs':relationship(B) }) mapper(B, b) self._assert_raises_ambig_join( configure_mappers, "A.bs", None ) def test_with_fks_o2m(self): A, B = self.classes.A, self.classes.B a, b = self.tables.a, self.tables.b mapper(A, a, properties={ 'bs':relationship(B, foreign_keys=b.c.aid_1) }) mapper(B, b) sa.orm.configure_mappers() assert A.bs.property.primaryjoin.compare( a.c.id==b.c.aid_1 ) eq_( A.bs.property._calculated_foreign_keys, set([b.c.aid_1]) ) def test_with_pj_o2m(self): A, B = self.classes.A, self.classes.B a, b = self.tables.a, self.tables.b mapper(A, a, properties={ 'bs':relationship(B, primaryjoin=a.c.id==b.c.aid_1) }) mapper(B, b) sa.orm.configure_mappers() assert A.bs.property.primaryjoin.compare( a.c.id==b.c.aid_1 ) eq_( A.bs.property._calculated_foreign_keys, set([b.c.aid_1]) ) def test_with_annotated_pj_o2m(self): A, B = self.classes.A, self.classes.B a, b = self.tables.a, self.tables.b mapper(A, a, properties={ 'bs':relationship(B, primaryjoin=a.c.id==foreign(b.c.aid_1)) }) mapper(B, b) sa.orm.configure_mappers() assert A.bs.property.primaryjoin.compare( a.c.id==b.c.aid_1 ) eq_( A.bs.property._calculated_foreign_keys, set([b.c.aid_1]) ) def test_no_fks_m2m(self): A, B = self.classes.A, self.classes.B a, b, a_to_b = self.tables.a, self.tables.b, self.tables.atob mapper(A, a, properties={ 'bs':relationship(B, secondary=a_to_b) }) mapper(B, b) self._assert_raises_no_join( sa.orm.configure_mappers, "A.bs", a_to_b, ) def test_ambiguous_fks_m2m(self): A, B = self.classes.A, self.classes.B a, b, a_to_b = self.tables.a, self.tables.b, self.tables.atob_ambiguous mapper(A, a, properties={ 'bs':relationship(B, secondary=a_to_b) }) mapper(B, b) self._assert_raises_ambig_join( configure_mappers, "A.bs", "atob_ambiguous" ) def test_with_fks_m2m(self): A, B = self.classes.A, self.classes.B a, b, a_to_b = self.tables.a, self.tables.b, self.tables.atob_ambiguous mapper(A, a, properties={ 'bs':relationship(B, secondary=a_to_b, foreign_keys=[a_to_b.c.aid1, a_to_b.c.bid1]) }) mapper(B, b) sa.orm.configure_mappers() class InvalidRelationshipEscalationTest(_RelationshipErrors, fixtures.MappedTest): @classmethod def define_tables(cls, metadata): Table('foos', metadata, Column('id', Integer, primary_key=True), Column('fid', Integer)) Table('bars', metadata, Column('id', Integer, primary_key=True), Column('fid', Integer)) Table('foos_with_fks', metadata, Column('id', Integer, primary_key=True), Column('fid', Integer, ForeignKey('foos_with_fks.id'))) Table('bars_with_fks', metadata, Column('id', Integer, primary_key=True), Column('fid', Integer, ForeignKey('foos_with_fks.id'))) @classmethod def setup_classes(cls): class Foo(cls.Basic): pass class Bar(cls.Basic): pass def test_no_join(self): bars, Foo, Bar, foos = (self.tables.bars, self.classes.Foo, self.classes.Bar, self.tables.foos) mapper(Foo, foos, properties={ 'bars':relationship(Bar)}) mapper(Bar, bars) self._assert_raises_no_join(sa.orm.configure_mappers, "Foo.bars", None ) def test_no_join_self_ref(self): bars, Foo, Bar, foos = (self.tables.bars, self.classes.Foo, self.classes.Bar, self.tables.foos) mapper(Foo, foos, properties={ 'foos':relationship(Foo)}) mapper(Bar, bars) self._assert_raises_no_join( configure_mappers, "Foo.foos", None ) def test_no_equated(self): bars, Foo, Bar, foos = (self.tables.bars, self.classes.Foo, self.classes.Bar, self.tables.foos) mapper(Foo, foos, properties={ 'bars':relationship(Bar, primaryjoin=foos.c.id>bars.c.fid)}) mapper(Bar, bars) self._assert_raises_no_relevant_fks( configure_mappers, "foos.id > bars.fid", "Foo.bars", "primary" ) def test_no_equated_fks(self): bars, Foo, Bar, foos = (self.tables.bars, self.classes.Foo, self.classes.Bar, self.tables.foos) mapper(Foo, foos, properties={ 'bars':relationship(Bar, primaryjoin=foos.c.id>bars.c.fid, foreign_keys=bars.c.fid)}) mapper(Bar, bars) self._assert_raises_no_equality( sa.orm.configure_mappers, "foos.id > bars.fid", "Foo.bars", "primary" ) def test_no_equated_wo_fks_works_on_relaxed(self): foos_with_fks, Foo, Bar, bars_with_fks, foos = (self.tables.foos_with_fks, self.classes.Foo, self.classes.Bar, self.tables.bars_with_fks, self.tables.foos) # very unique - the join between parent/child # has no fks, but there is an fk join between two other # tables in the join condition, for those users that try creating # these big-long-string-of-joining-many-tables primaryjoins. # in this case we don't get eq_pairs, but we hit the "works if viewonly" # rule. so here we add another clause regarding "try foreign keys". mapper(Foo, foos, properties={ 'bars':relationship(Bar, primaryjoin=and_( bars_with_fks.c.fid==foos_with_fks.c.id, foos_with_fks.c.id==foos.c.id, ) )}) mapper(Bar, bars_with_fks) self._assert_raises_no_equality( sa.orm.configure_mappers, "bars_with_fks.fid = foos_with_fks.id " "AND foos_with_fks.id = foos.id", "Foo.bars", "primary" ) def test_ambiguous_fks(self): bars, Foo, Bar, foos = (self.tables.bars, self.classes.Foo, self.classes.Bar, self.tables.foos) mapper(Foo, foos, properties={ 'bars':relationship(Bar, primaryjoin=foos.c.id==bars.c.fid, foreign_keys=[foos.c.id, bars.c.fid])}) mapper(Bar, bars) self._assert_raises_ambiguous_direction( sa.orm.configure_mappers, "Foo.bars" ) def test_ambiguous_remoteside_o2m(self): bars, Foo, Bar, foos = (self.tables.bars, self.classes.Foo, self.classes.Bar, self.tables.foos) mapper(Foo, foos, properties={ 'bars':relationship(Bar, primaryjoin=foos.c.id==bars.c.fid, foreign_keys=[bars.c.fid], remote_side=[foos.c.id, bars.c.fid], viewonly=True )}) mapper(Bar, bars) self._assert_raises_no_local_remote( configure_mappers, "Foo.bars", ) def test_ambiguous_remoteside_m2o(self): bars, Foo, Bar, foos = (self.tables.bars, self.classes.Foo, self.classes.Bar, self.tables.foos) mapper(Foo, foos, properties={ 'bars':relationship(Bar, primaryjoin=foos.c.id==bars.c.fid, foreign_keys=[foos.c.id], remote_side=[foos.c.id, bars.c.fid], viewonly=True )}) mapper(Bar, bars) self._assert_raises_no_local_remote( configure_mappers, "Foo.bars", ) def test_no_equated_self_ref_no_fks(self): bars, Foo, Bar, foos = (self.tables.bars, self.classes.Foo, self.classes.Bar, self.tables.foos) mapper(Foo, foos, properties={ 'foos':relationship(Foo, primaryjoin=foos.c.id>foos.c.fid)}) mapper(Bar, bars) self._assert_raises_no_relevant_fks(configure_mappers, "foos.id > foos.fid", "Foo.foos", "primary" ) def test_no_equated_self_ref_no_equality(self): bars, Foo, Bar, foos = (self.tables.bars, self.classes.Foo, self.classes.Bar, self.tables.foos) mapper(Foo, foos, properties={ 'foos':relationship(Foo, primaryjoin=foos.c.id>foos.c.fid, foreign_keys=[foos.c.fid])}) mapper(Bar, bars) self._assert_raises_no_equality(configure_mappers, "foos.id > foos.fid", "Foo.foos", "primary" ) def test_no_equated_viewonly(self): bars, Bar, bars_with_fks, foos_with_fks, Foo, foos = (self.tables.bars, self.classes.Bar, self.tables.bars_with_fks, self.tables.foos_with_fks, self.classes.Foo, self.tables.foos) mapper(Foo, foos, properties={ 'bars':relationship(Bar, primaryjoin=foos.c.id>bars.c.fid, viewonly=True)}) mapper(Bar, bars) self._assert_raises_no_relevant_fks( sa.orm.configure_mappers, "foos.id > bars.fid", "Foo.bars", "primary" ) sa.orm.clear_mappers() mapper(Foo, foos_with_fks, properties={ 'bars':relationship(Bar, primaryjoin=foos_with_fks.c.id>bars_with_fks.c.fid, viewonly=True)}) mapper(Bar, bars_with_fks) sa.orm.configure_mappers() def test_no_equated_self_ref_viewonly(self): bars, Bar, bars_with_fks, foos_with_fks, Foo, foos = (self.tables.bars, self.classes.Bar, self.tables.bars_with_fks, self.tables.foos_with_fks, self.classes.Foo, self.tables.foos) mapper(Foo, foos, properties={ 'foos':relationship(Foo, primaryjoin=foos.c.id>foos.c.fid, viewonly=True)}) mapper(Bar, bars) self._assert_raises_no_relevant_fks( sa.orm.configure_mappers, "foos.id > foos.fid", "Foo.foos", "primary" ) sa.orm.clear_mappers() mapper(Foo, foos_with_fks, properties={ 'foos':relationship(Foo, primaryjoin=foos_with_fks.c.id>foos_with_fks.c.fid, viewonly=True)}) mapper(Bar, bars_with_fks) sa.orm.configure_mappers() def test_no_equated_self_ref_viewonly_fks(self): Foo, foos = self.classes.Foo, self.tables.foos mapper(Foo, foos, properties={ 'foos':relationship(Foo, primaryjoin=foos.c.id>foos.c.fid, viewonly=True, foreign_keys=[foos.c.fid])}) sa.orm.configure_mappers() eq_(Foo.foos.property.local_remote_pairs, [(foos.c.id, foos.c.fid)]) def test_equated(self): bars, Bar, bars_with_fks, foos_with_fks, Foo, foos = (self.tables.bars, self.classes.Bar, self.tables.bars_with_fks, self.tables.foos_with_fks, self.classes.Foo, self.tables.foos) mapper(Foo, foos, properties={ 'bars':relationship(Bar, primaryjoin=foos.c.id==bars.c.fid)}) mapper(Bar, bars) self._assert_raises_no_relevant_fks( configure_mappers, "foos.id = bars.fid", "Foo.bars", "primary" ) sa.orm.clear_mappers() mapper(Foo, foos_with_fks, properties={ 'bars':relationship(Bar, primaryjoin=foos_with_fks.c.id==bars_with_fks.c.fid)}) mapper(Bar, bars_with_fks) sa.orm.configure_mappers() def test_equated_self_ref(self): Foo, foos = self.classes.Foo, self.tables.foos mapper(Foo, foos, properties={ 'foos':relationship(Foo, primaryjoin=foos.c.id==foos.c.fid)}) self._assert_raises_no_relevant_fks( configure_mappers, "foos.id = foos.fid", "Foo.foos", "primary" ) def test_equated_self_ref_wrong_fks(self): bars, Foo, foos = (self.tables.bars, self.classes.Foo, self.tables.foos) mapper(Foo, foos, properties={ 'foos':relationship(Foo, primaryjoin=foos.c.id==foos.c.fid, foreign_keys=[bars.c.id])}) self._assert_raises_no_relevant_fks( configure_mappers, "foos.id = foos.fid", "Foo.foos", "primary" ) class InvalidRelationshipEscalationTestM2M(_RelationshipErrors, fixtures.MappedTest): @classmethod def define_tables(cls, metadata): Table('foos', metadata, Column('id', Integer, primary_key=True)) Table('foobars', metadata, Column('fid', Integer), Column('bid', Integer)) Table('bars', metadata, Column('id', Integer, primary_key=True)) Table('foobars_with_fks', metadata, Column('fid', Integer, ForeignKey('foos.id')), Column('bid', Integer, ForeignKey('bars.id')) ) Table('foobars_with_many_columns', metadata, Column('fid', Integer), Column('bid', Integer), Column('fid1', Integer), Column('bid1', Integer), Column('fid2', Integer), Column('bid2', Integer), ) @classmethod def setup_classes(cls): class Foo(cls.Basic): pass class Bar(cls.Basic): pass def test_no_join(self): foobars, bars, Foo, Bar, foos = (self.tables.foobars, self.tables.bars, self.classes.Foo, self.classes.Bar, self.tables.foos) mapper(Foo, foos, properties={ 'bars': relationship(Bar, secondary=foobars)}) mapper(Bar, bars) self._assert_raises_no_join( configure_mappers, "Foo.bars", "foobars" ) def test_no_secondaryjoin(self): foobars, bars, Foo, Bar, foos = (self.tables.foobars, self.tables.bars, self.classes.Foo, self.classes.Bar, self.tables.foos) mapper(Foo, foos, properties={ 'bars': relationship(Bar, secondary=foobars, primaryjoin=foos.c.id > foobars.c.fid)}) mapper(Bar, bars) self._assert_raises_no_join( configure_mappers, "Foo.bars", "foobars" ) def test_no_fks(self): foobars_with_many_columns, bars, Bar, foobars, Foo, foos = (self.tables.foobars_with_many_columns, self.tables.bars, self.classes.Bar, self.tables.foobars, self.classes.Foo, self.tables.foos) mapper(Foo, foos, properties={ 'bars': relationship(Bar, secondary=foobars, primaryjoin=foos.c.id==foobars.c.fid, secondaryjoin=foobars.c.bid==bars.c.id)}) mapper(Bar, bars) sa.orm.configure_mappers() eq_( Foo.bars.property.synchronize_pairs, [(foos.c.id, foobars.c.fid)] ) eq_( Foo.bars.property.secondary_synchronize_pairs, [(bars.c.id, foobars.c.bid)] ) sa.orm.clear_mappers() mapper(Foo, foos, properties={ 'bars': relationship(Bar, secondary=foobars_with_many_columns, primaryjoin=foos.c.id == foobars_with_many_columns.c.fid, secondaryjoin=foobars_with_many_columns.c.bid == bars.c.id)}) mapper(Bar, bars) sa.orm.configure_mappers() eq_( Foo.bars.property.synchronize_pairs, [(foos.c.id, foobars_with_many_columns.c.fid)] ) eq_( Foo.bars.property.secondary_synchronize_pairs, [(bars.c.id, foobars_with_many_columns.c.bid)] ) def test_local_col_setup(self): foobars_with_fks, bars, Bar, Foo, foos = ( self.tables.foobars_with_fks, self.tables.bars, self.classes.Bar, self.classes.Foo, self.tables.foos) # ensure m2m backref is set up with correct annotations # [ticket:2578] mapper(Foo, foos, properties={ 'bars': relationship(Bar, secondary=foobars_with_fks, backref="foos") }) mapper(Bar, bars) sa.orm.configure_mappers() eq_( Foo.bars.property._join_condition.local_columns, set([foos.c.id]) ) eq_( Bar.foos.property._join_condition.local_columns, set([bars.c.id]) ) def test_bad_primaryjoin(self): foobars_with_fks, bars, Bar, foobars, Foo, foos = (self.tables.foobars_with_fks, self.tables.bars, self.classes.Bar, self.tables.foobars, self.classes.Foo, self.tables.foos) mapper(Foo, foos, properties={ 'bars': relationship(Bar, secondary=foobars, primaryjoin=foos.c.id > foobars.c.fid, secondaryjoin=foobars.c.bid<=bars.c.id)}) mapper(Bar, bars) self._assert_raises_no_equality( configure_mappers, 'foos.id > foobars.fid', "Foo.bars", "primary") sa.orm.clear_mappers() mapper(Foo, foos, properties={ 'bars': relationship(Bar, secondary=foobars_with_fks, primaryjoin=foos.c.id > foobars_with_fks.c.fid, secondaryjoin=foobars_with_fks.c.bid<=bars.c.id)}) mapper(Bar, bars) self._assert_raises_no_equality( configure_mappers, 'foos.id > foobars_with_fks.fid', "Foo.bars", "primary") sa.orm.clear_mappers() mapper(Foo, foos, properties={ 'bars': relationship(Bar, secondary=foobars_with_fks, primaryjoin=foos.c.id > foobars_with_fks.c.fid, secondaryjoin=foobars_with_fks.c.bid<=bars.c.id, viewonly=True)}) mapper(Bar, bars) sa.orm.configure_mappers() def test_bad_secondaryjoin(self): foobars, bars, Foo, Bar, foos = (self.tables.foobars, self.tables.bars, self.classes.Foo, self.classes.Bar, self.tables.foos) mapper(Foo, foos, properties={ 'bars':relationship(Bar, secondary=foobars, primaryjoin=foos.c.id == foobars.c.fid, secondaryjoin=foobars.c.bid <= bars.c.id, foreign_keys=[foobars.c.fid])}) mapper(Bar, bars) self._assert_raises_no_relevant_fks( configure_mappers, "foobars.bid <= bars.id", "Foo.bars", "secondary" ) def test_no_equated_secondaryjoin(self): foobars, bars, Foo, Bar, foos = (self.tables.foobars, self.tables.bars, self.classes.Foo, self.classes.Bar, self.tables.foos) mapper(Foo, foos, properties={ 'bars':relationship(Bar, secondary=foobars, primaryjoin=foos.c.id == foobars.c.fid, secondaryjoin=foobars.c.bid <= bars.c.id, foreign_keys=[foobars.c.fid, foobars.c.bid])}) mapper(Bar, bars) self._assert_raises_no_equality( configure_mappers, "foobars.bid <= bars.id", "Foo.bars", "secondary" ) class ActiveHistoryFlagTest(_fixtures.FixtureTest): run_inserts = None run_deletes = None def _test_attribute(self, obj, attrname, newvalue): sess = Session() sess.add(obj) oldvalue = getattr(obj, attrname) sess.commit() # expired assert attrname not in obj.__dict__ setattr(obj, attrname, newvalue) eq_( attributes.get_history(obj, attrname), ([newvalue,], (), [oldvalue,]) ) def test_column_property_flag(self): User, users = self.classes.User, self.tables.users mapper(User, users, properties={ 'name':column_property(users.c.name, active_history=True) }) u1 = User(name='jack') self._test_attribute(u1, 'name', 'ed') def test_relationship_property_flag(self): Address, addresses, users, User = (self.classes.Address, self.tables.addresses, self.tables.users, self.classes.User) mapper(Address, addresses, properties={ 'user':relationship(User, active_history=True) }) mapper(User, users) u1 = User(name='jack') u2 = User(name='ed') a1 = Address(email_address='a1', user=u1) self._test_attribute(a1, 'user', u2) def test_composite_property_flag(self): Order, orders = self.classes.Order, self.tables.orders class MyComposite(object): def __init__(self, description, isopen): self.description = description self.isopen = isopen def __composite_values__(self): return [self.description, self.isopen] def __eq__(self, other): return isinstance(other, MyComposite) and \ other.description == self.description mapper(Order, orders, properties={ 'composite':composite( MyComposite, orders.c.description, orders.c.isopen, active_history=True) }) o1 = Order(composite=MyComposite('foo', 1)) self._test_attribute(o1, "composite", MyComposite('bar', 1)) class RelationDeprecationTest(fixtures.MappedTest): """test usage of the old 'relation' function.""" run_inserts = 'once' run_deletes = None @classmethod def define_tables(cls, metadata): Table('users_table', metadata, Column('id', Integer, primary_key=True), Column('name', String(64))) Table('addresses_table', metadata, Column('id', Integer, primary_key=True), Column('user_id', Integer, ForeignKey('users_table.id')), Column('email_address', String(128)), Column('purpose', String(16)), Column('bounces', Integer, default=0)) @classmethod def setup_classes(cls): class User(cls.Basic): pass class Address(cls.Basic): pass @classmethod def fixtures(cls): return dict( users_table=( ('id', 'name'), (1, 'jack'), (2, 'ed'), (3, 'fred'), (4, 'chuck')), addresses_table=( ('id', 'user_id', 'email_address', 'purpose', 'bounces'), (1, 1, 'jack@jack.home', 'Personal', 0), (2, 1, 'jack@jack.bizz', 'Work', 1), (3, 2, 'ed@foo.bar', 'Personal', 0), (4, 3, 'fred@the.fred', 'Personal', 10))) def test_relation(self): addresses_table, User, users_table, Address = (self.tables.addresses_table, self.classes.User, self.tables.users_table, self.classes.Address) mapper(User, users_table, properties=dict( addresses=relation(Address, backref='user'), )) mapper(Address, addresses_table) session = create_session() ed = session.query(User).filter(User.addresses.any( Address.email_address == 'ed@foo.bar')).one() SQLAlchemy-0.8.4/test/orm/test_scoping.py0000644000076500000240000000605712251147172021072 0ustar classicstaff00000000000000from sqlalchemy.testing import assert_raises, assert_raises_message import sqlalchemy as sa from sqlalchemy import testing from sqlalchemy.orm import scoped_session from sqlalchemy import Integer, String, ForeignKey from sqlalchemy.testing.schema import Table, Column from sqlalchemy.orm import mapper, relationship, query from sqlalchemy.testing import eq_ from sqlalchemy.testing import fixtures class _ScopedTest(fixtures.MappedTest): """Adds another lookup bucket to emulate Session globals.""" run_setup_mappers = 'once' @classmethod def setup_class(cls): cls.scoping = _base.adict() super(_ScopedTest, cls).setup_class() @classmethod def teardown_class(cls): cls.scoping.clear() super(_ScopedTest, cls).teardown_class() class ScopedSessionTest(fixtures.MappedTest): @classmethod def define_tables(cls, metadata): Table('table1', metadata, Column('id', Integer, primary_key=True, test_needs_autoincrement=True), Column('data', String(30))) Table('table2', metadata, Column('id', Integer, primary_key=True, test_needs_autoincrement=True), Column('someid', None, ForeignKey('table1.id'))) def test_basic(self): table2, table1 = self.tables.table2, self.tables.table1 Session = scoped_session(sa.orm.sessionmaker()) class CustomQuery(query.Query): pass class SomeObject(fixtures.ComparableEntity): query = Session.query_property() class SomeOtherObject(fixtures.ComparableEntity): query = Session.query_property() custom_query = Session.query_property(query_cls=CustomQuery) mapper(SomeObject, table1, properties={ 'options':relationship(SomeOtherObject)}) mapper(SomeOtherObject, table2) s = SomeObject(id=1, data="hello") sso = SomeOtherObject() s.options.append(sso) Session.add(s) Session.commit() Session.refresh(sso) Session.remove() eq_(SomeObject(id=1, data="hello", options=[SomeOtherObject(someid=1)]), Session.query(SomeObject).one()) eq_(SomeObject(id=1, data="hello", options=[SomeOtherObject(someid=1)]), SomeObject.query.one()) eq_(SomeOtherObject(someid=1), SomeOtherObject.query.filter( SomeOtherObject.someid == sso.someid).one()) assert isinstance(SomeOtherObject.query, query.Query) assert not isinstance(SomeOtherObject.query, CustomQuery) assert isinstance(SomeOtherObject.custom_query, query.Query) def test_config_errors(self): Session = scoped_session(sa.orm.sessionmaker()) s = Session() assert_raises_message( sa.exc.InvalidRequestError, "Scoped session is already present", Session, bind=testing.db ) assert_raises_message( sa.exc.SAWarning, "At least one scoped session is already present. ", Session.configure, bind=testing.db ) SQLAlchemy-0.8.4/test/orm/test_selectable.py0000644000076500000240000000471312251147172021530 0ustar classicstaff00000000000000"""Generic mapping to Select statements""" from sqlalchemy.testing import assert_raises, assert_raises_message import sqlalchemy as sa from sqlalchemy import testing from sqlalchemy import String, Integer, select from sqlalchemy.testing.schema import Table, Column from sqlalchemy.orm import mapper, Session from sqlalchemy.testing import eq_, AssertsCompiledSQL from sqlalchemy.testing import fixtures # TODO: more tests mapping to selects class SelectableNoFromsTest(fixtures.MappedTest, AssertsCompiledSQL): @classmethod def define_tables(cls, metadata): Table('common', metadata, Column('id', Integer, primary_key=True, test_needs_autoincrement=True), Column('data', Integer), Column('extra', String(45))) @classmethod def setup_classes(cls): class Subset(cls.Comparable): pass def test_no_tables(self): Subset = self.classes.Subset selectable = select(["x", "y", "z"]).alias() mapper(Subset, selectable, primary_key=[selectable.c.x]) self.assert_compile( Session().query(Subset), "SELECT anon_1.x, anon_1.y, anon_1.z FROM (SELECT x, y, z) AS anon_1", use_default_dialect=True ) def test_no_table_needs_pl(self): Subset = self.classes.Subset selectable = select(["x", "y", "z"]).alias() assert_raises_message( sa.exc.ArgumentError, "could not assemble any primary key columns", mapper, Subset, selectable ) def test_no_selects(self): Subset, common = self.classes.Subset, self.tables.common subset_select = select([common.c.id, common.c.data]) assert_raises(sa.exc.InvalidRequestError, mapper, Subset, subset_select) def test_basic(self): Subset, common = self.classes.Subset, self.tables.common subset_select = select([common.c.id, common.c.data]).alias() subset_mapper = mapper(Subset, subset_select) sess = Session(bind=testing.db) sess.add(Subset(data=1)) sess.flush() sess.expunge_all() eq_(sess.query(Subset).all(), [Subset(data=1)]) eq_(sess.query(Subset).filter(Subset.data==1).one(), Subset(data=1)) eq_(sess.query(Subset).filter(Subset.data!=1).first(), None) subset_select = sa.orm.class_mapper(Subset).mapped_table eq_(sess.query(Subset).filter(subset_select.c.data==1).one(), Subset(data=1)) SQLAlchemy-0.8.4/test/orm/test_session.py0000644000076500000240000014344712251150016021107 0ustar classicstaff00000000000000from sqlalchemy.testing import eq_, assert_raises, \ assert_raises_message from sqlalchemy.testing.util import gc_collect from sqlalchemy.testing import pickleable from sqlalchemy.util import pickle import inspect from sqlalchemy.orm import create_session, sessionmaker, attributes, \ make_transient, Session import sqlalchemy as sa from sqlalchemy.testing import engines, config from sqlalchemy import testing from sqlalchemy import Integer, String, Sequence from sqlalchemy.testing.schema import Table, Column from sqlalchemy.orm import mapper, relationship, backref, joinedload, \ exc as orm_exc, object_session, was_deleted from sqlalchemy.util import pypy from sqlalchemy.testing import fixtures from test.orm import _fixtures from sqlalchemy import event, ForeignKey class BindTest(_fixtures.FixtureTest): run_inserts = None def test_mapped_binds(self): Address, addresses, users, User = (self.classes.Address, self.tables.addresses, self.tables.users, self.classes.User) # ensure tables are unbound m2 = sa.MetaData() users_unbound = users.tometadata(m2) addresses_unbound = addresses.tometadata(m2) mapper(Address, addresses_unbound) mapper(User, users_unbound, properties={ 'addresses': relationship(Address, backref=backref("user", cascade="all"), cascade="all")}) sess = Session(binds={User: self.metadata.bind, Address: self.metadata.bind}) u1 = User(id=1, name='ed') sess.add(u1) eq_(sess.query(User).filter(User.id == 1).all(), [User(id=1, name='ed')]) # test expression binding sess.execute(users_unbound.insert(), params=dict(id=2, name='jack')) eq_(sess.execute(users_unbound.select(users_unbound.c.id == 2)).fetchall(), [(2, 'jack')]) eq_(sess.execute(users_unbound.select(User.id == 2)).fetchall(), [(2, 'jack')]) sess.execute(users_unbound.delete()) eq_(sess.execute(users_unbound.select()).fetchall(), []) sess.close() def test_table_binds(self): Address, addresses, users, User = (self.classes.Address, self.tables.addresses, self.tables.users, self.classes.User) # ensure tables are unbound m2 = sa.MetaData() users_unbound = users.tometadata(m2) addresses_unbound = addresses.tometadata(m2) mapper(Address, addresses_unbound) mapper(User, users_unbound, properties={ 'addresses': relationship(Address, backref=backref("user", cascade="all"), cascade="all")}) Session = sessionmaker(binds={users_unbound: self.metadata.bind, addresses_unbound: self.metadata.bind}) sess = Session() u1 = User(id=1, name='ed') sess.add(u1) eq_(sess.query(User).filter(User.id == 1).all(), [User(id=1, name='ed')]) sess.execute(users_unbound.insert(), params=dict(id=2, name='jack')) eq_(sess.execute(users_unbound.select(users_unbound.c.id == 2)).fetchall(), [(2, 'jack')]) eq_(sess.execute(users_unbound.select(User.id == 2)).fetchall(), [(2, 'jack')]) sess.execute(users_unbound.delete()) eq_(sess.execute(users_unbound.select()).fetchall(), []) sess.close() def test_bind_from_metadata(self): users, User = self.tables.users, self.classes.User mapper(User, users) session = create_session() session.execute(users.insert(), dict(name='Johnny')) assert len(session.query(User).filter_by(name='Johnny').all()) == 1 session.execute(users.delete()) assert len(session.query(User).filter_by(name='Johnny').all()) == 0 session.close() def test_bind_arguments(self): users, Address, addresses, User = (self.tables.users, self.classes.Address, self.tables.addresses, self.classes.User) mapper(User, users) mapper(Address, addresses) e1 = engines.testing_engine() e2 = engines.testing_engine() e3 = engines.testing_engine() sess = Session(e3) sess.bind_mapper(User, e1) sess.bind_mapper(Address, e2) assert sess.connection().engine is e3 assert sess.connection(bind=e1).engine is e1 assert sess.connection(mapper=Address, bind=e1).engine is e1 assert sess.connection(mapper=Address).engine is e2 assert sess.connection(clause=addresses.select()).engine is e2 assert sess.connection(mapper=User, clause=addresses.select()).engine is e1 assert sess.connection(mapper=User, clause=addresses.select(), bind=e2).engine is e2 sess.close() @engines.close_open_connections def test_bound_connection(self): users, User = self.tables.users, self.classes.User mapper(User, users) c = testing.db.connect() sess = create_session(bind=c) sess.begin() transaction = sess.transaction u = User(name='u1') sess.add(u) sess.flush() assert transaction._connection_for_bind(testing.db) \ is transaction._connection_for_bind(c) is c assert_raises_message(sa.exc.InvalidRequestError, 'Session already has a Connection ' 'associated', transaction._connection_for_bind, testing.db.connect()) transaction.rollback() assert len(sess.query(User).all()) == 0 sess.close() def test_bound_connection_transactional(self): User, users = self.classes.User, self.tables.users mapper(User, users) c = testing.db.connect() sess = create_session(bind=c, autocommit=False) u = User(name='u1') sess.add(u) sess.flush() sess.close() assert not c.in_transaction() assert c.scalar("select count(1) from users") == 0 sess = create_session(bind=c, autocommit=False) u = User(name='u2') sess.add(u) sess.flush() sess.commit() assert not c.in_transaction() assert c.scalar("select count(1) from users") == 1 c.execute("delete from users") assert c.scalar("select count(1) from users") == 0 c = testing.db.connect() trans = c.begin() sess = create_session(bind=c, autocommit=True) u = User(name='u3') sess.add(u) sess.flush() assert c.in_transaction() trans.commit() assert not c.in_transaction() assert c.scalar("select count(1) from users") == 1 class ExecutionTest(_fixtures.FixtureTest): run_inserts = None @testing.requires.sequences def test_sequence_execute(self): seq = Sequence("some_sequence") seq.create(testing.db) try: sess = create_session(bind=testing.db) eq_(sess.execute(seq), 1) finally: seq.drop(testing.db) def test_textual_execute(self): """test that Session.execute() converts to text()""" users = self.tables.users sess = create_session(bind=self.metadata.bind) users.insert().execute(id=7, name='jack') # use :bindparam style eq_(sess.execute("select * from users where id=:id", {'id': 7}).fetchall(), [(7, u'jack')]) # use :bindparam style eq_(sess.scalar("select id from users where id=:id", {'id': 7}), 7) def test_parameter_execute(self): users = self.tables.users sess = Session(bind=testing.db) sess.execute(users.insert(), [ {"id": 7, "name": "u7"}, {"id": 8, "name": "u8"} ] ) sess.execute(users.insert(), {"id": 9, "name": "u9"}) eq_( sess.execute(sa.select([users.c.id]).\ order_by(users.c.id)).fetchall(), [(7, ), (8, ), (9, )] ) class TransScopingTest(_fixtures.FixtureTest): run_inserts = None def test_no_close_on_flush(self): """Flush() doesn't close a connection the session didn't open""" User, users = self.classes.User, self.tables.users c = testing.db.connect() c.execute("select * from users") mapper(User, users) s = create_session(bind=c) s.add(User(name='first')) s.flush() c.execute("select * from users") def test_close(self): """close() doesn't close a connection the session didn't open""" User, users = self.classes.User, self.tables.users c = testing.db.connect() c.execute("select * from users") mapper(User, users) s = create_session(bind=c) s.add(User(name='first')) s.flush() c.execute("select * from users") s.close() c.execute("select * from users") @testing.requires.independent_connections @engines.close_open_connections def test_transaction(self): User, users = self.classes.User, self.tables.users mapper(User, users) conn1 = testing.db.connect() conn2 = testing.db.connect() sess = create_session(autocommit=False, bind=conn1) u = User(name='x') sess.add(u) sess.flush() assert conn1.execute("select count(1) from users").scalar() == 1 assert conn2.execute("select count(1) from users").scalar() == 0 sess.commit() assert conn1.execute("select count(1) from users").scalar() == 1 assert testing.db.connect().execute('select count(1) from users' ).scalar() == 1 sess.close() class SessionUtilTest(_fixtures.FixtureTest): run_inserts = None def test_object_session_raises(self): User = self.classes.User assert_raises( orm_exc.UnmappedInstanceError, object_session, object() ) assert_raises( orm_exc.UnmappedInstanceError, object_session, User() ) def test_make_transient(self): users, User = self.tables.users, self.classes.User mapper(User, users) sess = create_session() sess.add(User(name='test')) sess.flush() u1 = sess.query(User).first() make_transient(u1) assert u1 not in sess sess.add(u1) assert u1 in sess.new u1 = sess.query(User).first() sess.expunge(u1) make_transient(u1) sess.add(u1) assert u1 in sess.new # test expired attributes # get unexpired u1 = sess.query(User).first() sess.expire(u1) make_transient(u1) assert u1.id is None assert u1.name is None # works twice make_transient(u1) sess.close() u1.name = 'test2' sess.add(u1) sess.flush() assert u1 in sess sess.delete(u1) sess.flush() assert u1 not in sess assert_raises(sa.exc.InvalidRequestError, sess.add, u1) make_transient(u1) sess.add(u1) sess.flush() assert u1 in sess def test_make_transient_plus_rollback(self): # test for [ticket:2182] users, User = self.tables.users, self.classes.User mapper(User, users) sess = Session() u1 = User(name='test') sess.add(u1) sess.commit() sess.delete(u1) sess.flush() make_transient(u1) sess.rollback() class SessionStateTest(_fixtures.FixtureTest): run_inserts = None @testing.requires.independent_connections @engines.close_open_connections def test_autoflush(self): User, users = self.classes.User, self.tables.users bind = self.metadata.bind mapper(User, users) conn1 = bind.connect() conn2 = bind.connect() sess = create_session(bind=conn1, autocommit=False, autoflush=True) u = User() u.name = 'ed' sess.add(u) u2 = sess.query(User).filter_by(name='ed').one() assert u2 is u eq_(conn1.execute("select count(1) from users").scalar(), 1) eq_(conn2.execute("select count(1) from users").scalar(), 0) sess.commit() eq_(conn1.execute("select count(1) from users").scalar(), 1) eq_(bind.connect().execute("select count(1) from users").scalar(), 1) sess.close() @testing.requires.python26 def test_with_no_autoflush(self): User, users = self.classes.User, self.tables.users mapper(User, users) sess = Session() u = User() u.name = 'ed' sess.add(u) def go(obj): assert u not in sess.query(User).all() testing.run_as_contextmanager(sess.no_autoflush, go) assert u in sess.new assert u in sess.query(User).all() assert u not in sess.new def test_deleted_flag(self): users, User = self.tables.users, self.classes.User mapper(User, users) sess = sessionmaker()() u1 = User(name='u1') sess.add(u1) sess.commit() sess.delete(u1) sess.flush() assert u1 not in sess assert_raises(sa.exc.InvalidRequestError, sess.add, u1) sess.rollback() assert u1 in sess sess.delete(u1) sess.commit() assert u1 not in sess assert_raises(sa.exc.InvalidRequestError, sess.add, u1) make_transient(u1) sess.add(u1) sess.commit() eq_(sess.query(User).count(), 1) def test_autoflush_expressions(self): """test that an expression which is dependent on object state is evaluated after the session autoflushes. This is the lambda inside of strategies.py lazy_clause. """ users, Address, addresses, User = (self.tables.users, self.classes.Address, self.tables.addresses, self.classes.User) mapper(User, users, properties={ 'addresses': relationship(Address, backref="user")}) mapper(Address, addresses) sess = create_session(autoflush=True, autocommit=False) u = User(name='ed', addresses=[Address(email_address='foo')]) sess.add(u) eq_(sess.query(Address).filter(Address.user == u).one(), Address(email_address='foo')) # still works after "u" is garbage collected sess.commit() sess.close() u = sess.query(User).get(u.id) q = sess.query(Address).filter(Address.user == u) del u gc_collect() eq_(q.one(), Address(email_address='foo')) @testing.requires.independent_connections @engines.close_open_connections def test_autoflush_unbound(self): User, users = self.classes.User, self.tables.users mapper(User, users) try: sess = create_session(autocommit=False, autoflush=True) u = User() u.name = 'ed' sess.add(u) u2 = sess.query(User).filter_by(name='ed').one() assert u2 is u assert sess.execute('select count(1) from users', mapper=User).scalar() == 1 assert testing.db.connect().execute('select count(1) from ' 'users').scalar() == 0 sess.commit() assert sess.execute('select count(1) from users', mapper=User).scalar() == 1 assert testing.db.connect().execute('select count(1) from ' 'users').scalar() == 1 sess.close() except: sess.rollback() raise @engines.close_open_connections def test_autoflush_2(self): User, users = self.classes.User, self.tables.users mapper(User, users) conn1 = testing.db.connect() sess = create_session(bind=conn1, autocommit=False, autoflush=True) u = User() u.name = 'ed' sess.add(u) sess.commit() assert conn1.execute('select count(1) from users').scalar() == 1 assert testing.db.connect().execute('select count(1) from users' ).scalar() == 1 sess.commit() def test_autocommit_doesnt_raise_on_pending(self): User, users = self.classes.User, self.tables.users mapper(User, users) session = create_session(autocommit=True) session.add(User(name='ed')) session.begin() session.flush() session.commit() def test_active_flag(self): sess = create_session(bind=config.db, autocommit=True) assert not sess.is_active sess.begin() assert sess.is_active sess.rollback() assert not sess.is_active @engines.close_open_connections def test_add_delete(self): User, Address, addresses, users = (self.classes.User, self.classes.Address, self.tables.addresses, self.tables.users) s = create_session() mapper(User, users, properties={ 'addresses': relationship(Address, cascade="all, delete") }) mapper(Address, addresses) user = User(name='u1') assert_raises_message(sa.exc.InvalidRequestError, 'is not persisted', s.delete, user) s.add(user) s.flush() user = s.query(User).one() s.expunge(user) assert user not in s # modify outside of session, assert changes remain/get saved user.name = "fred" s.add(user) assert user in s assert user in s.dirty s.flush() s.expunge_all() assert s.query(User).count() == 1 user = s.query(User).one() assert user.name == 'fred' # ensure its not dirty if no changes occur s.expunge_all() assert user not in s s.add(user) assert user in s assert user not in s.dirty s2 = create_session() assert_raises_message(sa.exc.InvalidRequestError, 'is already attached to session', s2.delete, user) u2 = s2.query(User).get(user.id) assert_raises_message(sa.exc.InvalidRequestError, 'another instance with key', s.delete, u2) s.expire(user) s.expunge(user) assert user not in s s.delete(user) assert user in s s.flush() assert user not in s assert s.query(User).count() == 0 @testing.uses_deprecated() def test_identity_conflict(self): users, User = self.tables.users, self.classes.User mapper(User, users) for s in ( create_session(), create_session(weak_identity_map=False), ): users.delete().execute() u1 = User(name="ed") s.add(u1) s.flush() s.expunge(u1) u2 = s.query(User).first() s.expunge(u2) s.identity_map.add(sa.orm.attributes.instance_state(u1)) assert_raises(AssertionError, s.identity_map.add, sa.orm.attributes.instance_state(u2)) def test_pickled_update(self): users, User = self.tables.users, pickleable.User mapper(User, users) sess1 = create_session() sess2 = create_session() u1 = User(name='u1') sess1.add(u1) assert_raises_message(sa.exc.InvalidRequestError, 'already attached to session', sess2.add, u1) u2 = pickle.loads(pickle.dumps(u1)) sess2.add(u2) def test_duplicate_update(self): users, User = self.tables.users, self.classes.User mapper(User, users) Session = sessionmaker() sess = Session() u1 = User(name='u1') sess.add(u1) sess.flush() assert u1.id is not None sess.expunge(u1) assert u1 not in sess assert Session.object_session(u1) is None u2 = sess.query(User).get(u1.id) assert u2 is not None and u2 is not u1 assert u2 in sess assert_raises(Exception, lambda: sess.add(u1)) sess.expunge(u2) assert u2 not in sess assert Session.object_session(u2) is None u1.name = "John" u2.name = "Doe" sess.add(u1) assert u1 in sess assert Session.object_session(u1) is sess sess.flush() sess.expunge_all() u3 = sess.query(User).get(u1.id) assert u3 is not u1 and u3 is not u2 and u3.name == u1.name def test_no_double_save(self): users = self.tables.users sess = create_session() class Foo(object): def __init__(self): sess.add(self) class Bar(Foo): def __init__(self): sess.add(self) Foo.__init__(self) mapper(Foo, users) mapper(Bar, users) b = Bar() assert b in sess assert len(list(sess)) == 1 def test_identity_map_mutate(self): users, User = self.tables.users, self.classes.User mapper(User, users) sess = Session() sess.add_all([User(name='u1'), User(name='u2'), User(name='u3')]) sess.commit() # TODO: what are we testing here ? that iteritems() can # withstand a change? should this be # more directly attempting to manipulate the identity_map ? u1, u2, u3 = sess.query(User).all() for i, (key, value) in enumerate(sess.identity_map.iteritems()): if i == 2: del u3 gc_collect() def _test_extra_dirty_state(self): users, User = self.tables.users, self.classes.User m = mapper(User, users) s = Session() @event.listens_for(m, "after_update") def e(mapper, conn, target): sess = object_session(target) for entry in sess.identity_map.values(): entry.name = "5" a1, a2 = User(name="1"), User(name="2") s.add_all([a1, a2]) s.commit() a1.name = "3" return s, a1, a2 def test_extra_dirty_state_post_flush_warning(self): s, a1, a2 = self._test_extra_dirty_state() assert_raises_message( sa.exc.SAWarning, "Attribute history events accumulated on 1 previously " "clean instances", s.commit ) def test_extra_dirty_state_post_flush_state(self): s, a1, a2 = self._test_extra_dirty_state() canary = [] @event.listens_for(s, "after_flush_postexec") def e(sess, ctx): canary.append(bool(sess.identity_map._modified)) @testing.emits_warning("Attribute") def go(): s.commit() go() eq_(canary, [False]) def test_deleted_expunged(self): users, User = self.tables.users, self.classes.User mapper(User, users) sess = Session() sess.add(User(name='x')) sess.commit() u1 = sess.query(User).first() sess.delete(u1) assert not was_deleted(u1) sess.flush() assert was_deleted(u1) assert u1 not in sess assert object_session(u1) is sess sess.commit() assert object_session(u1) is None class SessionStateWFixtureTest(_fixtures.FixtureTest): def test_autoflush_rollback(self): Address, addresses, users, User = (self.classes.Address, self.tables.addresses, self.tables.users, self.classes.User) mapper(Address, addresses) mapper(User, users, properties={ 'addresses': relationship(Address)}) sess = create_session(autocommit=False, autoflush=True) u = sess.query(User).get(8) newad = Address(email_address='a new address') u.addresses.append(newad) u.name = 'some new name' assert u.name == 'some new name' assert len(u.addresses) == 4 assert newad in u.addresses sess.rollback() assert u.name == 'ed' assert len(u.addresses) == 3 assert newad not in u.addresses # pending objects dont get expired assert newad.email_address == 'a new address' def test_expunge_cascade(self): Address, addresses, users, User = (self.classes.Address, self.tables.addresses, self.tables.users, self.classes.User) mapper(Address, addresses) mapper(User, users, properties={ 'addresses': relationship(Address, backref=backref("user", cascade="all"), cascade="all")}) session = create_session() u = session.query(User).filter_by(id=7).one() # get everything to load in both directions print [a.user for a in u.addresses] # then see if expunge fails session.expunge(u) assert sa.orm.object_session(u) is None assert sa.orm.attributes.instance_state(u).session_id is None for a in u.addresses: assert sa.orm.object_session(a) is None assert sa.orm.attributes.instance_state(a).session_id is None class NoCyclesOnTransientDetachedTest(_fixtures.FixtureTest): """Test the instance_state._strong_obj link that it is present only on persistent/pending objects and never transient/detached. """ run_inserts = None def setup(self): mapper(self.classes.User, self.tables.users) def _assert_modified(self, u1): assert sa.orm.attributes.instance_state(u1).modified def _assert_not_modified(self, u1): assert not sa.orm.attributes.instance_state(u1).modified def _assert_cycle(self, u1): assert sa.orm.attributes.instance_state(u1)._strong_obj is not None def _assert_no_cycle(self, u1): assert sa.orm.attributes.instance_state(u1)._strong_obj is None def _persistent_fixture(self): User = self.classes.User u1 = User() u1.name = "ed" sess = Session() sess.add(u1) sess.flush() return sess, u1 def test_transient(self): User = self.classes.User u1 = User() u1.name = 'ed' self._assert_no_cycle(u1) self._assert_modified(u1) def test_transient_to_pending(self): User = self.classes.User u1 = User() u1.name = 'ed' self._assert_modified(u1) self._assert_no_cycle(u1) sess = Session() sess.add(u1) self._assert_cycle(u1) sess.flush() self._assert_no_cycle(u1) self._assert_not_modified(u1) def test_dirty_persistent_to_detached_via_expunge(self): sess, u1 = self._persistent_fixture() u1.name = 'edchanged' self._assert_cycle(u1) sess.expunge(u1) self._assert_no_cycle(u1) def test_dirty_persistent_to_detached_via_close(self): sess, u1 = self._persistent_fixture() u1.name = 'edchanged' self._assert_cycle(u1) sess.close() self._assert_no_cycle(u1) def test_clean_persistent_to_detached_via_close(self): sess, u1 = self._persistent_fixture() self._assert_no_cycle(u1) self._assert_not_modified(u1) sess.close() u1.name = 'edchanged' self._assert_modified(u1) self._assert_no_cycle(u1) def test_detached_to_dirty_deleted(self): sess, u1 = self._persistent_fixture() sess.expunge(u1) u1.name = 'edchanged' self._assert_no_cycle(u1) sess.delete(u1) self._assert_cycle(u1) def test_detached_to_dirty_persistent(self): sess, u1 = self._persistent_fixture() sess.expunge(u1) u1.name = 'edchanged' self._assert_modified(u1) self._assert_no_cycle(u1) sess.add(u1) self._assert_cycle(u1) self._assert_modified(u1) def test_detached_to_clean_persistent(self): sess, u1 = self._persistent_fixture() sess.expunge(u1) self._assert_no_cycle(u1) self._assert_not_modified(u1) sess.add(u1) self._assert_no_cycle(u1) self._assert_not_modified(u1) def test_move_persistent_clean(self): sess, u1 = self._persistent_fixture() sess.close() s2 = Session() s2.add(u1) self._assert_no_cycle(u1) self._assert_not_modified(u1) def test_move_persistent_dirty(self): sess, u1 = self._persistent_fixture() u1.name = 'edchanged' self._assert_cycle(u1) self._assert_modified(u1) sess.close() self._assert_no_cycle(u1) s2 = Session() s2.add(u1) self._assert_cycle(u1) self._assert_modified(u1) @testing.requires.predictable_gc def test_move_gc_session_persistent_dirty(self): sess, u1 = self._persistent_fixture() u1.name = 'edchanged' self._assert_cycle(u1) self._assert_modified(u1) del sess gc_collect() self._assert_cycle(u1) s2 = Session() s2.add(u1) self._assert_cycle(u1) self._assert_modified(u1) def test_persistent_dirty_to_expired(self): sess, u1 = self._persistent_fixture() u1.name = 'edchanged' self._assert_cycle(u1) self._assert_modified(u1) sess.expire(u1) self._assert_no_cycle(u1) self._assert_not_modified(u1) class WeakIdentityMapTest(_fixtures.FixtureTest): run_inserts = None @testing.requires.predictable_gc def test_weakref(self): """test the weak-referencing identity map, which strongly- references modified items.""" users, User = self.tables.users, self.classes.User s = create_session() mapper(User, users) s.add(User(name='ed')) s.flush() assert not s.dirty user = s.query(User).one() del user gc_collect() assert len(s.identity_map) == 0 user = s.query(User).one() user.name = 'fred' del user gc_collect() assert len(s.identity_map) == 1 assert len(s.dirty) == 1 assert None not in s.dirty s.flush() gc_collect() assert not s.dirty assert not s.identity_map user = s.query(User).one() assert user.name == 'fred' assert s.identity_map @testing.requires.predictable_gc def test_weakref_pickled(self): users, User = self.tables.users, pickleable.User s = create_session() mapper(User, users) s.add(User(name='ed')) s.flush() assert not s.dirty user = s.query(User).one() user.name = 'fred' s.expunge(user) u2 = pickle.loads(pickle.dumps(user)) del user s.add(u2) del u2 gc_collect() assert len(s.identity_map) == 1 assert len(s.dirty) == 1 assert None not in s.dirty s.flush() gc_collect() assert not s.dirty assert not s.identity_map @testing.requires.predictable_gc def test_weakref_with_cycles_o2m(self): Address, addresses, users, User = (self.classes.Address, self.tables.addresses, self.tables.users, self.classes.User) s = sessionmaker()() mapper(User, users, properties={ "addresses": relationship(Address, backref="user") }) mapper(Address, addresses) s.add(User(name="ed", addresses=[Address(email_address="ed1")])) s.commit() user = s.query(User).options(joinedload(User.addresses)).one() user.addresses[0].user # lazyload eq_(user, User(name="ed", addresses=[Address(email_address="ed1")])) del user gc_collect() assert len(s.identity_map) == 0 user = s.query(User).options(joinedload(User.addresses)).one() user.addresses[0].email_address = 'ed2' user.addresses[0].user # lazyload del user gc_collect() assert len(s.identity_map) == 2 s.commit() user = s.query(User).options(joinedload(User.addresses)).one() eq_(user, User(name="ed", addresses=[Address(email_address="ed2")])) @testing.requires.predictable_gc def test_weakref_with_cycles_o2o(self): Address, addresses, users, User = (self.classes.Address, self.tables.addresses, self.tables.users, self.classes.User) s = sessionmaker()() mapper(User, users, properties={ "address": relationship(Address, backref="user", uselist=False) }) mapper(Address, addresses) s.add(User(name="ed", address=Address(email_address="ed1"))) s.commit() user = s.query(User).options(joinedload(User.address)).one() user.address.user eq_(user, User(name="ed", address=Address(email_address="ed1"))) del user gc_collect() assert len(s.identity_map) == 0 user = s.query(User).options(joinedload(User.address)).one() user.address.email_address = 'ed2' user.address.user # lazyload del user gc_collect() assert len(s.identity_map) == 2 s.commit() user = s.query(User).options(joinedload(User.address)).one() eq_(user, User(name="ed", address=Address(email_address="ed2"))) def test_auto_detach_on_gc_session(self): users, User = self.tables.users, self.classes.User mapper(User, users) sess = Session() u1 = User(name='u1') sess.add(u1) sess.commit() # can't add u1 to Session, # already belongs to u2 s2 = Session() assert_raises_message( sa.exc.InvalidRequestError, r".*is already attached to session", s2.add, u1 ) # garbage collect sess del sess gc_collect() # s2 lets it in now despite u1 having # session_key s2.add(u1) assert u1 in s2 class StrongIdentityMapTest(_fixtures.FixtureTest): run_inserts = None @testing.uses_deprecated() def test_strong_ref(self): users, User = self.tables.users, self.classes.User s = create_session(weak_identity_map=False) mapper(User, users) # save user s.add(User(name='u1')) s.flush() user = s.query(User).one() user = None print s.identity_map gc_collect() assert len(s.identity_map) == 1 user = s.query(User).one() assert not s.identity_map._modified user.name = 'u2' assert s.identity_map._modified s.flush() eq_(users.select().execute().fetchall(), [(user.id, 'u2')]) @testing.uses_deprecated() @testing.fails_if(lambda: pypy, "pypy has a real GC") @testing.fails_on('+zxjdbc', 'http://www.sqlalchemy.org/trac/ticket/1473') def test_prune(self): users, User = self.tables.users, self.classes.User s = create_session(weak_identity_map=False) mapper(User, users) for o in [User(name='u%s' % x) for x in xrange(10)]: s.add(o) # o is still live after this loop... self.assert_(len(s.identity_map) == 0) self.assert_(s.prune() == 0) s.flush() gc_collect() self.assert_(s.prune() == 9) self.assert_(len(s.identity_map) == 1) id = o.id del o self.assert_(s.prune() == 1) self.assert_(len(s.identity_map) == 0) u = s.query(User).get(id) self.assert_(s.prune() == 0) self.assert_(len(s.identity_map) == 1) u.name = 'squiznart' del u self.assert_(s.prune() == 0) self.assert_(len(s.identity_map) == 1) s.flush() self.assert_(s.prune() == 1) self.assert_(len(s.identity_map) == 0) s.add(User(name='x')) self.assert_(s.prune() == 0) self.assert_(len(s.identity_map) == 0) s.flush() self.assert_(len(s.identity_map) == 1) self.assert_(s.prune() == 1) self.assert_(len(s.identity_map) == 0) u = s.query(User).get(id) s.delete(u) del u self.assert_(s.prune() == 0) self.assert_(len(s.identity_map) == 1) s.flush() self.assert_(s.prune() == 0) self.assert_(len(s.identity_map) == 0) class IsModifiedTest(_fixtures.FixtureTest): run_inserts = None def _default_mapping_fixture(self): User, Address = self.classes.User, self.classes.Address users, addresses = self.tables.users, self.tables.addresses mapper(User, users, properties={ "addresses": relationship(Address) }) mapper(Address, addresses) return User, Address def test_is_modified(self): User, Address = self._default_mapping_fixture() s = create_session() # save user u = User(name='fred') s.add(u) s.flush() s.expunge_all() user = s.query(User).one() assert user not in s.dirty assert not s.is_modified(user) user.name = 'fred' assert user in s.dirty assert not s.is_modified(user) user.name = 'ed' assert user in s.dirty assert s.is_modified(user) s.flush() assert user not in s.dirty assert not s.is_modified(user) a = Address() user.addresses.append(a) assert user in s.dirty assert s.is_modified(user) assert not s.is_modified(user, include_collections=False) def test_is_modified_passive_off(self): """as of 0.8 no SQL is emitted for is_modified() regardless of the passive flag""" User, Address = self._default_mapping_fixture() s = Session() u = User(name='fred', addresses=[ Address(email_address='foo')]) s.add(u) s.commit() u.id def go(): assert not s.is_modified(u) self.assert_sql_count( testing.db, go, 0 ) s.expire_all() u.name = 'newname' # can't predict result here # deterministically, depending on if # 'name' or 'addresses' is tested first mod = s.is_modified(u) addresses_loaded = 'addresses' in u.__dict__ assert mod is not addresses_loaded def test_is_modified_passive_on(self): User, Address = self._default_mapping_fixture() s = Session() u = User(name='fred', addresses=[Address(email_address='foo')]) s.add(u) s.commit() u.id def go(): assert not s.is_modified(u, passive=True) self.assert_sql_count( testing.db, go, 0 ) u.name = 'newname' def go(): assert s.is_modified(u, passive=True) self.assert_sql_count( testing.db, go, 0 ) def test_is_modified_syn(self): User, users = self.classes.User, self.tables.users s = sessionmaker()() mapper(User, users, properties={'uname': sa.orm.synonym('name')}) u = User(uname='fred') assert s.is_modified(u) s.add(u) s.commit() assert not s.is_modified(u) class DisposedStates(fixtures.MappedTest): run_setup_mappers = 'once' run_inserts = 'once' run_deletes = None @classmethod def define_tables(cls, metadata): Table('t1', metadata, Column('id', Integer, primary_key=True, test_needs_autoincrement=True), Column('data', String(50))) @classmethod def setup_classes(cls): class T(cls.Basic): def __init__(self, data): self.data = data mapper(T, cls.tables.t1) def teardown(self): from sqlalchemy.orm.session import _sessions _sessions.clear() super(DisposedStates, self).teardown() def _set_imap_in_disposal(self, sess, *objs): """remove selected objects from the given session, as though they were dereferenced and removed from WeakIdentityMap. Hardcodes the identity map's "all_states()" method to return the full list of states. This simulates the all_states() method returning results, afterwhich some of the states get garbage collected (this normally only happens during asynchronous gc). The Session now has one or more InstanceState's which have been removed from the identity map and disposed. Will the Session not trip over this ??? Stay tuned. """ all_states = sess.identity_map.all_states() sess.identity_map.all_states = lambda: all_states for obj in objs: state = attributes.instance_state(obj) sess.identity_map.discard(state) state._dispose() def _test_session(self, **kwargs): T = self.classes.T sess = create_session(**kwargs) data = o1, o2, o3, o4, o5 = [T('t1'), T('t2'), T('t3'), T('t4' ), T('t5')] sess.add_all(data) sess.flush() o1.data = 't1modified' o5.data = 't5modified' self._set_imap_in_disposal(sess, o2, o4, o5) return sess def test_flush(self): self._test_session().flush() def test_clear(self): self._test_session().expunge_all() def test_close(self): self._test_session().close() def test_expunge_all(self): self._test_session().expunge_all() def test_expire_all(self): self._test_session().expire_all() def test_rollback(self): sess = self._test_session(autocommit=False, expire_on_commit=True) sess.commit() sess.rollback() class SessionInterface(fixtures.TestBase): """Bogus args to Session methods produce actionable exceptions.""" # TODO: expand with message body assertions. _class_methods = set(( 'connection', 'execute', 'get_bind', 'scalar')) def _public_session_methods(self): Session = sa.orm.session.Session blacklist = set(('begin', 'query')) ok = set() for meth in Session.public_methods: if meth in blacklist: continue spec = inspect.getargspec(getattr(Session, meth)) if len(spec[0]) > 1 or spec[1]: ok.add(meth) return ok def _map_it(self, cls): return mapper(cls, Table('t', sa.MetaData(), Column('id', Integer, primary_key=True, test_needs_autoincrement=True))) def _test_instance_guards(self, user_arg): watchdog = set() def x_raises_(obj, method, *args, **kw): watchdog.add(method) callable_ = getattr(obj, method) assert_raises(sa.orm.exc.UnmappedInstanceError, callable_, *args, **kw) def raises_(method, *args, **kw): x_raises_(create_session(), method, *args, **kw) raises_('__contains__', user_arg) raises_('add', user_arg) raises_('add_all', (user_arg,)) raises_('delete', user_arg) raises_('expire', user_arg) raises_('expunge', user_arg) # flush will no-op without something in the unit of work def _(): class OK(object): pass self._map_it(OK) s = create_session() s.add(OK()) x_raises_(s, 'flush', (user_arg,)) _() raises_('is_modified', user_arg) raises_('merge', user_arg) raises_('refresh', user_arg) instance_methods = self._public_session_methods() \ - self._class_methods eq_(watchdog, instance_methods, watchdog.symmetric_difference(instance_methods)) def _test_class_guards(self, user_arg): watchdog = set() def raises_(method, *args, **kw): watchdog.add(method) callable_ = getattr(create_session(), method) assert_raises(sa.orm.exc.UnmappedClassError, callable_, *args, **kw) raises_('connection', mapper=user_arg) raises_('execute', 'SELECT 1', mapper=user_arg) raises_('get_bind', mapper=user_arg) raises_('scalar', 'SELECT 1', mapper=user_arg) eq_(watchdog, self._class_methods, watchdog.symmetric_difference(self._class_methods)) def test_unmapped_instance(self): class Unmapped(object): pass self._test_instance_guards(Unmapped()) self._test_class_guards(Unmapped) def test_unmapped_primitives(self): for prim in ('doh', 123, ('t', 'u', 'p', 'l', 'e')): self._test_instance_guards(prim) self._test_class_guards(prim) def test_unmapped_class_for_instance(self): class Unmapped(object): pass self._test_instance_guards(Unmapped) self._test_class_guards(Unmapped) def test_mapped_class_for_instance(self): class Mapped(object): pass self._map_it(Mapped) self._test_instance_guards(Mapped) # no class guards- it would pass. def test_missing_state(self): class Mapped(object): pass early = Mapped() self._map_it(Mapped) self._test_instance_guards(early) self._test_class_guards(early) class TLTransactionTest(fixtures.MappedTest): run_dispose_bind = 'once' @classmethod def setup_bind(cls): return engines.testing_engine(options=dict(strategy='threadlocal')) @classmethod def define_tables(cls, metadata): Table('users', metadata, Column('id', Integer, primary_key=True, test_needs_autoincrement=True), Column('name', String(20)), test_needs_acid=True) @classmethod def setup_classes(cls): class User(cls.Basic): pass @classmethod def setup_mappers(cls): users, User = cls.tables.users, cls.classes.User mapper(User, users) @testing.exclude('mysql', '<', (5, 0, 3), 'FIXME: unknown') def test_session_nesting(self): User = self.classes.User sess = create_session(bind=self.bind) self.bind.begin() u = User(name='ed') sess.add(u) sess.flush() self.bind.commit() class FlushWarningsTest(fixtures.MappedTest): run_setup_mappers = 'each' @classmethod def define_tables(cls, metadata): Table('user', metadata, Column('id', Integer, primary_key=True, test_needs_autoincrement=True), Column('name', String(20)) ) Table('address', metadata, Column('id', Integer, primary_key=True, test_needs_autoincrement=True), Column('user_id', Integer, ForeignKey('user.id')), Column('email', String(20)) ) @classmethod def setup_classes(cls): class User(cls.Basic): pass class Address(cls.Basic): pass @classmethod def setup_mappers(cls): user, User = cls.tables.user, cls.classes.User address, Address = cls.tables.address, cls.classes.Address mapper(User, user, properties={ 'addresses': relationship(Address, backref="user") }) mapper(Address, address) def test_o2m_cascade_add(self): Address = self.classes.Address def evt(mapper, conn, instance): instance.addresses.append(Address(email='x1')) self._test(evt, "collection append") def test_o2m_cascade_remove(self): def evt(mapper, conn, instance): del instance.addresses[0] self._test(evt, "collection remove") def test_m2o_cascade_add(self): User = self.classes.User def evt(mapper, conn, instance): instance.addresses[0].user = User(name='u2') self._test(evt, "related attribute set") def test_m2o_cascade_remove(self): def evt(mapper, conn, instance): a1 = instance.addresses[0] del a1.user self._test(evt, "related attribute delete") def test_plain_add(self): Address = self.classes.Address def evt(mapper, conn, instance): object_session(instance).add(Address(email='x1')) self._test(evt, "Session.add\(\)") def test_plain_merge(self): Address = self.classes.Address def evt(mapper, conn, instance): object_session(instance).merge(Address(email='x1')) self._test(evt, "Session.merge\(\)") def test_plain_delete(self): Address = self.classes.Address def evt(mapper, conn, instance): object_session(instance).delete(Address(email='x1')) self._test(evt, "Session.delete\(\)") def _test(self, fn, method): User = self.classes.User Address = self.classes.Address s = Session() event.listen(User, "after_insert", fn) u1 = User(name='u1', addresses=[Address(name='a1')]) s.add(u1) assert_raises_message( sa.exc.SAWarning, "Usage of the '%s'" % method, s.commit ) SQLAlchemy-0.8.4/test/orm/test_subquery_relations.py0000644000076500000240000020071312251150016023351 0ustar classicstaff00000000000000from sqlalchemy.testing import eq_, is_, is_not_ from sqlalchemy import testing from sqlalchemy.testing.schema import Table, Column from sqlalchemy import Integer, String, ForeignKey, bindparam, inspect from sqlalchemy.orm import backref, subqueryload, subqueryload_all, \ mapper, relationship, clear_mappers, create_session, lazyload, \ aliased, joinedload, deferred, undefer, eagerload_all,\ Session from sqlalchemy.testing import eq_, assert_raises, \ assert_raises_message from sqlalchemy.testing.assertsql import CompiledSQL from sqlalchemy.testing import fixtures from test.orm import _fixtures import sqlalchemy as sa class EagerTest(_fixtures.FixtureTest, testing.AssertsCompiledSQL): run_inserts = 'once' run_deletes = None def test_basic(self): users, Address, addresses, User = (self.tables.users, self.classes.Address, self.tables.addresses, self.classes.User) mapper(User, users, properties={ 'addresses':relationship( mapper(Address, addresses), order_by=Address.id) }) sess = create_session() q = sess.query(User).options(subqueryload(User.addresses)) def go(): eq_( [User(id=7, addresses=[ Address(id=1, email_address='jack@bean.com')])], q.filter(User.id==7).all() ) self.assert_sql_count(testing.db, go, 2) def go(): eq_( self.static.user_address_result, q.order_by(User.id).all() ) self.assert_sql_count(testing.db, go, 2) def test_from_aliased(self): users, Dingaling, User, dingalings, Address, addresses = (self.tables.users, self.classes.Dingaling, self.classes.User, self.tables.dingalings, self.classes.Address, self.tables.addresses) mapper(Dingaling, dingalings) mapper(Address, addresses, properties={ 'dingalings':relationship(Dingaling, order_by=Dingaling.id) }) mapper(User, users, properties={ 'addresses':relationship( Address, order_by=Address.id) }) sess = create_session() u = aliased(User) q = sess.query(u).options(subqueryload(u.addresses)) def go(): eq_( [User(id=7, addresses=[ Address(id=1, email_address='jack@bean.com')])], q.filter(u.id==7).all() ) self.assert_sql_count(testing.db, go, 2) def go(): eq_( self.static.user_address_result, q.order_by(u.id).all() ) self.assert_sql_count(testing.db, go, 2) q = sess.query(u).\ options(subqueryload_all(u.addresses, Address.dingalings)) def go(): eq_( [ User(id=8, addresses=[ Address(id=2, email_address='ed@wood.com', dingalings=[Dingaling()]), Address(id=3, email_address='ed@bettyboop.com'), Address(id=4, email_address='ed@lala.com'), ]), User(id=9, addresses=[ Address(id=5, dingalings=[Dingaling()]) ]), ], q.filter(u.id.in_([8, 9])).all() ) self.assert_sql_count(testing.db, go, 3) def test_from_get(self): users, Address, addresses, User = (self.tables.users, self.classes.Address, self.tables.addresses, self.classes.User) mapper(User, users, properties={ 'addresses':relationship( mapper(Address, addresses), order_by=Address.id) }) sess = create_session() q = sess.query(User).options(subqueryload(User.addresses)) def go(): eq_( User(id=7, addresses=[ Address(id=1, email_address='jack@bean.com')]), q.get(7) ) self.assert_sql_count(testing.db, go, 2) def test_from_params(self): users, Address, addresses, User = (self.tables.users, self.classes.Address, self.tables.addresses, self.classes.User) mapper(User, users, properties={ 'addresses':relationship( mapper(Address, addresses), order_by=Address.id) }) sess = create_session() q = sess.query(User).options(subqueryload(User.addresses)) def go(): eq_( User(id=7, addresses=[ Address(id=1, email_address='jack@bean.com')]), q.filter(User.id==bindparam('foo')).params(foo=7).one() ) self.assert_sql_count(testing.db, go, 2) def test_disable_dynamic(self): """test no subquery option on a dynamic.""" users, Address, addresses, User = (self.tables.users, self.classes.Address, self.tables.addresses, self.classes.User) mapper(User, users, properties={ 'addresses':relationship(Address, lazy="dynamic") }) mapper(Address, addresses) sess = create_session() # previously this would not raise, but would emit # the query needlessly and put the result nowhere. assert_raises_message( sa.exc.InvalidRequestError, "User.addresses' does not support object population - eager loading cannot be applied.", sess.query(User).options(subqueryload(User.addresses)).first, ) def test_many_to_many_plain(self): keywords, items, item_keywords, Keyword, Item = (self.tables.keywords, self.tables.items, self.tables.item_keywords, self.classes.Keyword, self.classes.Item) mapper(Keyword, keywords) mapper(Item, items, properties = dict( keywords = relationship(Keyword, secondary=item_keywords, lazy='subquery', order_by=keywords.c.id))) q = create_session().query(Item).order_by(Item.id) def go(): eq_(self.static.item_keyword_result, q.all()) self.assert_sql_count(testing.db, go, 2) def test_many_to_many_with_join(self): keywords, items, item_keywords, Keyword, Item = (self.tables.keywords, self.tables.items, self.tables.item_keywords, self.classes.Keyword, self.classes.Item) mapper(Keyword, keywords) mapper(Item, items, properties = dict( keywords = relationship(Keyword, secondary=item_keywords, lazy='subquery', order_by=keywords.c.id))) q = create_session().query(Item).order_by(Item.id) def go(): eq_(self.static.item_keyword_result[0:2], q.join('keywords').filter(Keyword.name == 'red').all()) self.assert_sql_count(testing.db, go, 2) def test_many_to_many_with_join_alias(self): keywords, items, item_keywords, Keyword, Item = (self.tables.keywords, self.tables.items, self.tables.item_keywords, self.classes.Keyword, self.classes.Item) mapper(Keyword, keywords) mapper(Item, items, properties = dict( keywords = relationship(Keyword, secondary=item_keywords, lazy='subquery', order_by=keywords.c.id))) q = create_session().query(Item).order_by(Item.id) def go(): eq_(self.static.item_keyword_result[0:2], (q.join('keywords', aliased=True). filter(Keyword.name == 'red')).all()) self.assert_sql_count(testing.db, go, 2) def test_orderby(self): users, Address, addresses, User = (self.tables.users, self.classes.Address, self.tables.addresses, self.classes.User) mapper(User, users, properties = { 'addresses':relationship(mapper(Address, addresses), lazy='subquery', order_by=addresses.c.email_address), }) q = create_session().query(User) eq_([ User(id=7, addresses=[ Address(id=1) ]), User(id=8, addresses=[ Address(id=3, email_address='ed@bettyboop.com'), Address(id=4, email_address='ed@lala.com'), Address(id=2, email_address='ed@wood.com') ]), User(id=9, addresses=[ Address(id=5) ]), User(id=10, addresses=[]) ], q.order_by(User.id).all()) def test_orderby_multi(self): users, Address, addresses, User = (self.tables.users, self.classes.Address, self.tables.addresses, self.classes.User) mapper(User, users, properties = { 'addresses':relationship(mapper(Address, addresses), lazy='subquery', order_by=[ addresses.c.email_address, addresses.c.id]), }) q = create_session().query(User) eq_([ User(id=7, addresses=[ Address(id=1) ]), User(id=8, addresses=[ Address(id=3, email_address='ed@bettyboop.com'), Address(id=4, email_address='ed@lala.com'), Address(id=2, email_address='ed@wood.com') ]), User(id=9, addresses=[ Address(id=5) ]), User(id=10, addresses=[]) ], q.order_by(User.id).all()) def test_orderby_related(self): """A regular mapper select on a single table can order by a relationship to a second table""" Address, addresses, users, User = (self.classes.Address, self.tables.addresses, self.tables.users, self.classes.User) mapper(Address, addresses) mapper(User, users, properties = dict( addresses = relationship(Address, lazy='subquery', order_by=addresses.c.id), )) q = create_session().query(User) l = q.filter(User.id==Address.user_id).\ order_by(Address.email_address).all() eq_([ User(id=8, addresses=[ Address(id=2, email_address='ed@wood.com'), Address(id=3, email_address='ed@bettyboop.com'), Address(id=4, email_address='ed@lala.com'), ]), User(id=9, addresses=[ Address(id=5) ]), User(id=7, addresses=[ Address(id=1) ]), ], l) def test_orderby_desc(self): Address, addresses, users, User = (self.classes.Address, self.tables.addresses, self.tables.users, self.classes.User) mapper(Address, addresses) mapper(User, users, properties = dict( addresses = relationship(Address, lazy='subquery', order_by=[ sa.desc(addresses.c.email_address) ]), )) sess = create_session() eq_([ User(id=7, addresses=[ Address(id=1) ]), User(id=8, addresses=[ Address(id=2, email_address='ed@wood.com'), Address(id=4, email_address='ed@lala.com'), Address(id=3, email_address='ed@bettyboop.com'), ]), User(id=9, addresses=[ Address(id=5) ]), User(id=10, addresses=[]) ], sess.query(User).order_by(User.id).all()) _pathing_runs = [ ( "lazyload", "lazyload", "lazyload", 15 ), ("subqueryload", "lazyload", "lazyload", 12), ("subqueryload", "subqueryload", "lazyload", 8), ("joinedload", "subqueryload", "lazyload", 7), ("lazyload", "lazyload", "subqueryload", 12), ("subqueryload", "subqueryload", "subqueryload", 4), ("subqueryload", "subqueryload", "joinedload", 3), ] def test_options_pathing(self): self._do_options_test(self._pathing_runs) def test_mapper_pathing(self): self._do_mapper_test(self._pathing_runs) def _do_options_test(self, configs): users, Keyword, orders, items, order_items, Order, Item, User, keywords, item_keywords = (self.tables.users, self.classes.Keyword, self.tables.orders, self.tables.items, self.tables.order_items, self.classes.Order, self.classes.Item, self.classes.User, self.tables.keywords, self.tables.item_keywords) mapper(User, users, properties={ 'orders':relationship(Order, order_by=orders.c.id), # o2m, m2o }) mapper(Order, orders, properties={ 'items':relationship(Item, secondary=order_items, order_by=items.c.id), #m2m }) mapper(Item, items, properties={ 'keywords':relationship(Keyword, secondary=item_keywords, order_by=keywords.c.id) #m2m }) mapper(Keyword, keywords) callables = { 'joinedload':joinedload, 'subqueryload':subqueryload } for o, i, k, count in configs: options = [] if o in callables: options.append(callables[o](User.orders)) if i in callables: options.append(callables[i](User.orders, Order.items)) if k in callables: options.append(callables[k](User.orders, Order.items, Item.keywords)) self._do_query_tests(options, count) def _do_mapper_test(self, configs): users, Keyword, orders, items, order_items, Order, Item, User, keywords, item_keywords = (self.tables.users, self.classes.Keyword, self.tables.orders, self.tables.items, self.tables.order_items, self.classes.Order, self.classes.Item, self.classes.User, self.tables.keywords, self.tables.item_keywords) opts = { 'lazyload':'select', 'joinedload':'joined', 'subqueryload':'subquery', } for o, i, k, count in configs: mapper(User, users, properties={ 'orders':relationship(Order, lazy=opts[o], order_by=orders.c.id), }) mapper(Order, orders, properties={ 'items':relationship(Item, secondary=order_items, lazy=opts[i], order_by=items.c.id), }) mapper(Item, items, properties={ 'keywords':relationship(Keyword, lazy=opts[k], secondary=item_keywords, order_by=keywords.c.id) }) mapper(Keyword, keywords) try: self._do_query_tests([], count) finally: clear_mappers() def _do_query_tests(self, opts, count): Order, User = self.classes.Order, self.classes.User sess = create_session() def go(): eq_( sess.query(User).options(*opts).order_by(User.id).all(), self.static.user_item_keyword_result ) self.assert_sql_count(testing.db, go, count) eq_( sess.query(User).options(*opts).filter(User.name=='fred'). order_by(User.id).all(), self.static.user_item_keyword_result[2:3] ) sess = create_session() eq_( sess.query(User).options(*opts).join(User.orders). filter(Order.id==3).\ order_by(User.id).all(), self.static.user_item_keyword_result[0:1] ) def test_cyclical(self): """A circular eager relationship breaks the cycle with a lazy loader""" Address, addresses, users, User = (self.classes.Address, self.tables.addresses, self.tables.users, self.classes.User) mapper(Address, addresses) mapper(User, users, properties = dict( addresses = relationship(Address, lazy='subquery', backref=sa.orm.backref('user', lazy='subquery'), order_by=Address.id) )) is_(sa.orm.class_mapper(User).get_property('addresses').lazy, 'subquery') is_(sa.orm.class_mapper(Address).get_property('user').lazy, 'subquery') sess = create_session() eq_(self.static.user_address_result, sess.query(User).order_by(User.id).all()) def test_double(self): """Eager loading with two relationships simultaneously, from the same table, using aliases.""" users, orders, User, Address, Order, addresses = (self.tables.users, self.tables.orders, self.classes.User, self.classes.Address, self.classes.Order, self.tables.addresses) openorders = sa.alias(orders, 'openorders') closedorders = sa.alias(orders, 'closedorders') mapper(Address, addresses) mapper(Order, orders) open_mapper = mapper(Order, openorders, non_primary=True) closed_mapper = mapper(Order, closedorders, non_primary=True) mapper(User, users, properties = dict( addresses = relationship(Address, lazy='subquery', order_by=addresses.c.id), open_orders = relationship( open_mapper, primaryjoin=sa.and_(openorders.c.isopen == 1, users.c.id==openorders.c.user_id), lazy='subquery', order_by=openorders.c.id), closed_orders = relationship( closed_mapper, primaryjoin=sa.and_(closedorders.c.isopen == 0, users.c.id==closedorders.c.user_id), lazy='subquery', order_by=closedorders.c.id))) q = create_session().query(User).order_by(User.id) def go(): eq_([ User( id=7, addresses=[Address(id=1)], open_orders = [Order(id=3)], closed_orders = [Order(id=1), Order(id=5)] ), User( id=8, addresses=[Address(id=2), Address(id=3), Address(id=4)], open_orders = [], closed_orders = [] ), User( id=9, addresses=[Address(id=5)], open_orders = [Order(id=4)], closed_orders = [Order(id=2)] ), User(id=10) ], q.all()) self.assert_sql_count(testing.db, go, 4) def test_double_same_mappers(self): """Eager loading with two relationships simulatneously, from the same table, using aliases.""" addresses, items, order_items, orders, Item, User, Address, Order, users = (self.tables.addresses, self.tables.items, self.tables.order_items, self.tables.orders, self.classes.Item, self.classes.User, self.classes.Address, self.classes.Order, self.tables.users) mapper(Address, addresses) mapper(Order, orders, properties={ 'items': relationship(Item, secondary=order_items, lazy='subquery', order_by=items.c.id)}) mapper(Item, items) mapper(User, users, properties=dict( addresses=relationship(Address, lazy='subquery', order_by=addresses.c.id), open_orders=relationship( Order, primaryjoin=sa.and_(orders.c.isopen == 1, users.c.id==orders.c.user_id), lazy='subquery', order_by=orders.c.id), closed_orders=relationship( Order, primaryjoin=sa.and_(orders.c.isopen == 0, users.c.id==orders.c.user_id), lazy='subquery', order_by=orders.c.id))) q = create_session().query(User).order_by(User.id) def go(): eq_([ User(id=7, addresses=[ Address(id=1)], open_orders=[Order(id=3, items=[ Item(id=3), Item(id=4), Item(id=5)])], closed_orders=[Order(id=1, items=[ Item(id=1), Item(id=2), Item(id=3)]), Order(id=5, items=[ Item(id=5)])]), User(id=8, addresses=[ Address(id=2), Address(id=3), Address(id=4)], open_orders = [], closed_orders = []), User(id=9, addresses=[ Address(id=5)], open_orders=[ Order(id=4, items=[ Item(id=1), Item(id=5)])], closed_orders=[ Order(id=2, items=[ Item(id=1), Item(id=2), Item(id=3)])]), User(id=10) ], q.all()) self.assert_sql_count(testing.db, go, 6) @testing.fails_on('maxdb', 'FIXME: unknown') def test_limit(self): """Limit operations combined with lazy-load relationships.""" users, items, order_items, orders, Item, User, Address, Order, addresses = (self.tables.users, self.tables.items, self.tables.order_items, self.tables.orders, self.classes.Item, self.classes.User, self.classes.Address, self.classes.Order, self.tables.addresses) mapper(Item, items) mapper(Order, orders, properties={ 'items':relationship(Item, secondary=order_items, lazy='subquery', order_by=items.c.id) }) mapper(User, users, properties={ 'addresses':relationship(mapper(Address, addresses), lazy='subquery', order_by=addresses.c.id), 'orders':relationship(Order, lazy='select', order_by=orders.c.id) }) sess = create_session() q = sess.query(User) l = q.order_by(User.id).limit(2).offset(1).all() eq_(self.static.user_all_result[1:3], l) sess = create_session() l = q.order_by(sa.desc(User.id)).limit(2).offset(2).all() eq_(list(reversed(self.static.user_all_result[0:2])), l) def test_mapper_order_by(self): users, User, Address, addresses = (self.tables.users, self.classes.User, self.classes.Address, self.tables.addresses) mapper(Address, addresses) mapper(User, users, properties={ 'addresses':relationship(Address, lazy='subquery', order_by=addresses.c.id), },order_by=users.c.id.desc()) sess = create_session() q = sess.query(User) l = q.limit(2).all() eq_(l, list(reversed(self.static.user_address_result[2:4]))) def test_one_to_many_scalar(self): Address, addresses, users, User = (self.classes.Address, self.tables.addresses, self.tables.users, self.classes.User) mapper(User, users, properties = dict( address = relationship(mapper(Address, addresses), lazy='subquery', uselist=False) )) q = create_session().query(User) def go(): l = q.filter(users.c.id == 7).all() eq_([User(id=7, address=Address(id=1))], l) self.assert_sql_count(testing.db, go, 2) @testing.fails_on('maxdb', 'FIXME: unknown') def test_many_to_one(self): users, Address, addresses, User = (self.tables.users, self.classes.Address, self.tables.addresses, self.classes.User) mapper(Address, addresses, properties = dict( user = relationship(mapper(User, users), lazy='subquery') )) sess = create_session() q = sess.query(Address) def go(): a = q.filter(addresses.c.id==1).one() is_not_(a.user, None) u1 = sess.query(User).get(7) is_(a.user, u1) self.assert_sql_count(testing.db, go, 2) def test_double_with_aggregate(self): User, users, orders, Order = (self.classes.User, self.tables.users, self.tables.orders, self.classes.Order) max_orders_by_user = sa.select([sa.func.max(orders.c.id).label('order_id')], group_by=[orders.c.user_id] ).alias('max_orders_by_user') max_orders = orders.select(orders.c.id==max_orders_by_user.c.order_id).\ alias('max_orders') mapper(Order, orders) mapper(User, users, properties={ 'orders':relationship(Order, backref='user', lazy='subquery', order_by=orders.c.id), 'max_order':relationship( mapper(Order, max_orders, non_primary=True), lazy='subquery', uselist=False) }) q = create_session().query(User) def go(): eq_([ User(id=7, orders=[ Order(id=1), Order(id=3), Order(id=5), ], max_order=Order(id=5) ), User(id=8, orders=[]), User(id=9, orders=[Order(id=2),Order(id=4)], max_order=Order(id=4) ), User(id=10), ], q.order_by(User.id).all()) self.assert_sql_count(testing.db, go, 3) def test_uselist_false_warning(self): """test that multiple rows received by a uselist=False raises a warning.""" User, users, orders, Order = (self.classes.User, self.tables.users, self.tables.orders, self.classes.Order) mapper(User, users, properties={ 'order':relationship(Order, uselist=False) }) mapper(Order, orders) s = create_session() assert_raises(sa.exc.SAWarning, s.query(User).options(subqueryload(User.order)).all) class LoadOnExistingTest(_fixtures.FixtureTest): """test that loaders from a base Query fully populate.""" run_inserts = 'once' run_deletes = None def _collection_to_scalar_fixture(self): User, Address, Dingaling = self.classes.User, \ self.classes.Address, self.classes.Dingaling mapper(User, self.tables.users, properties={ 'addresses':relationship(Address), }) mapper(Address, self.tables.addresses, properties={ 'dingaling':relationship(Dingaling) }) mapper(Dingaling, self.tables.dingalings) sess = Session(autoflush=False) return User, Address, Dingaling, sess def _collection_to_collection_fixture(self): User, Order, Item = self.classes.User, \ self.classes.Order, self.classes.Item mapper(User, self.tables.users, properties={ 'orders':relationship(Order), }) mapper(Order, self.tables.orders, properties={ 'items':relationship(Item, secondary=self.tables.order_items), }) mapper(Item, self.tables.items) sess = Session(autoflush=False) return User, Order, Item, sess def _eager_config_fixture(self): User, Address = self.classes.User, self.classes.Address mapper(User, self.tables.users, properties={ 'addresses':relationship(Address, lazy="subquery"), }) mapper(Address, self.tables.addresses) sess = Session(autoflush=False) return User, Address, sess def _deferred_config_fixture(self): User, Address = self.classes.User, self.classes.Address mapper(User, self.tables.users, properties={ 'name':deferred(self.tables.users.c.name), 'addresses':relationship(Address, lazy="subquery"), }) mapper(Address, self.tables.addresses) sess = Session(autoflush=False) return User, Address, sess def test_no_query_on_refresh(self): User, Address, sess = self._eager_config_fixture() u1 = sess.query(User).get(8) assert 'addresses' in u1.__dict__ sess.expire(u1) def go(): eq_(u1.id, 8) self.assert_sql_count(testing.db, go, 1) assert 'addresses' not in u1.__dict__ def test_no_query_on_deferred(self): User, Address, sess = self._deferred_config_fixture() u1 = sess.query(User).get(8) assert 'addresses' in u1.__dict__ sess.expire(u1, ['addresses']) def go(): eq_(u1.name, 'ed') self.assert_sql_count(testing.db, go, 1) assert 'addresses' not in u1.__dict__ def test_populate_existing_propagate(self): User, Address, sess = self._eager_config_fixture() u1 = sess.query(User).get(8) u1.addresses[2].email_address = "foofoo" del u1.addresses[1] u1 = sess.query(User).populate_existing().filter_by(id=8).one() # collection is reverted eq_(len(u1.addresses), 3) # attributes on related items reverted eq_(u1.addresses[2].email_address, "ed@lala.com") def test_loads_second_level_collection_to_scalar(self): User, Address, Dingaling, sess = self._collection_to_scalar_fixture() u1 = sess.query(User).get(8) a1 = Address() u1.addresses.append(a1) a2 = u1.addresses[0] a2.email_address = 'foo' sess.query(User).options(subqueryload_all("addresses.dingaling")).\ filter_by(id=8).all() assert u1.addresses[-1] is a1 for a in u1.addresses: if a is not a1: assert 'dingaling' in a.__dict__ else: assert 'dingaling' not in a.__dict__ if a is a2: eq_(a2.email_address, 'foo') def test_loads_second_level_collection_to_collection(self): User, Order, Item, sess = self._collection_to_collection_fixture() u1 = sess.query(User).get(7) u1.orders o1 = Order() u1.orders.append(o1) sess.query(User).options(subqueryload_all("orders.items")).\ filter_by(id=7).all() for o in u1.orders: if o is not o1: assert 'items' in o.__dict__ else: assert 'items' not in o.__dict__ def test_load_two_levels_collection_to_scalar(self): User, Address, Dingaling, sess = self._collection_to_scalar_fixture() u1 = sess.query(User).filter_by(id=8).options(subqueryload("addresses")).one() sess.query(User).filter_by(id=8).options(subqueryload_all("addresses.dingaling")).first() assert 'dingaling' in u1.addresses[0].__dict__ def test_load_two_levels_collection_to_collection(self): User, Order, Item, sess = self._collection_to_collection_fixture() u1 = sess.query(User).filter_by(id=7).options(subqueryload("orders")).one() sess.query(User).filter_by(id=7).options(subqueryload_all("orders.items")).first() assert 'items' in u1.orders[0].__dict__ class OrderBySecondaryTest(fixtures.MappedTest): @classmethod def define_tables(cls, metadata): Table('m2m', metadata, Column('id', Integer, primary_key=True, test_needs_autoincrement=True), Column('aid', Integer, ForeignKey('a.id')), Column('bid', Integer, ForeignKey('b.id'))) Table('a', metadata, Column('id', Integer, primary_key=True, test_needs_autoincrement=True), Column('data', String(50))) Table('b', metadata, Column('id', Integer, primary_key=True, test_needs_autoincrement=True), Column('data', String(50))) @classmethod def fixtures(cls): return dict( a=(('id', 'data'), (1, 'a1'), (2, 'a2')), b=(('id', 'data'), (1, 'b1'), (2, 'b2'), (3, 'b3'), (4, 'b4')), m2m=(('id', 'aid', 'bid'), (2, 1, 1), (4, 2, 4), (1, 1, 3), (6, 2, 2), (3, 1, 2), (5, 2, 3))) def test_ordering(self): a, m2m, b = (self.tables.a, self.tables.m2m, self.tables.b) class A(fixtures.ComparableEntity):pass class B(fixtures.ComparableEntity):pass mapper(A, a, properties={ 'bs':relationship(B, secondary=m2m, lazy='subquery', order_by=m2m.c.id) }) mapper(B, b) sess = create_session() def go(): eq_(sess.query(A).all(), [ A(data='a1', bs=[B(data='b3'), B(data='b1'), B(data='b2')]), A(bs=[B(data='b4'), B(data='b3'), B(data='b2')]) ]) self.assert_sql_count(testing.db, go, 2) from .inheritance._poly_fixtures import _Polymorphic, Person, Engineer, Paperwork class BaseRelationFromJoinedSubclassTest(_Polymorphic): @classmethod def define_tables(cls, metadata): people = Table('people', metadata, Column('person_id', Integer, primary_key=True, test_needs_autoincrement=True), Column('name', String(50)), Column('type', String(30))) # to test fully, PK of engineers table must be # named differently from that of people engineers = Table('engineers', metadata, Column('engineer_id', Integer, ForeignKey('people.person_id'), primary_key=True), Column('primary_language', String(50))) paperwork = Table('paperwork', metadata, Column('paperwork_id', Integer, primary_key=True, test_needs_autoincrement=True), Column('description', String(50)), Column('person_id', Integer, ForeignKey('people.person_id'))) @classmethod def setup_mappers(cls): people = cls.tables.people engineers = cls.tables.engineers paperwork = cls.tables.paperwork mapper(Person, people, polymorphic_on=people.c.type, polymorphic_identity='person', properties={ 'paperwork': relationship( Paperwork, order_by=paperwork.c.paperwork_id)}) mapper(Engineer, engineers, inherits=Person, polymorphic_identity='engineer') mapper(Paperwork, paperwork) @classmethod def insert_data(cls): e1 = Engineer(primary_language="java") e2 = Engineer(primary_language="c++") e1.paperwork = [Paperwork(description="tps report #1"), Paperwork(description="tps report #2")] e2.paperwork = [Paperwork(description="tps report #3")] sess = create_session() sess.add_all([e1, e2]) sess.flush() def test_correct_subquery_nofrom(self): sess = create_session() # use Person.paperwork here just to give the least # amount of context q = sess.query(Engineer).\ filter(Engineer.primary_language == 'java').\ options(subqueryload(Person.paperwork)) def go(): eq_(q.all()[0].paperwork, [Paperwork(description="tps report #1"), Paperwork(description="tps report #2")], ) self.assert_sql_execution( testing.db, go, CompiledSQL( "SELECT people.person_id AS people_person_id, " "people.name AS people_name, people.type AS people_type, " "engineers.engineer_id AS engineers_engineer_id, " "engineers.primary_language AS engineers_primary_language " "FROM people JOIN engineers ON " "people.person_id = engineers.engineer_id " "WHERE engineers.primary_language = :primary_language_1", {"primary_language_1": "java"} ), # ensure we get "people JOIN engineer" here, even though # primary key "people.person_id" is against "Person" # *and* the path comes out as "Person.paperwork", still # want to select from "Engineer" entity CompiledSQL( "SELECT paperwork.paperwork_id AS paperwork_paperwork_id, " "paperwork.description AS paperwork_description, " "paperwork.person_id AS paperwork_person_id, " "anon_1.people_person_id AS anon_1_people_person_id " "FROM (SELECT people.person_id AS people_person_id " "FROM people JOIN engineers " "ON people.person_id = engineers.engineer_id " "WHERE engineers.primary_language = " ":primary_language_1) AS anon_1 " "JOIN paperwork " "ON anon_1.people_person_id = paperwork.person_id " "ORDER BY anon_1.people_person_id, paperwork.paperwork_id", {"primary_language_1": "java"} ) ) def test_correct_subquery_existingfrom(self): sess = create_session() # use Person.paperwork here just to give the least # amount of context q = sess.query(Engineer).\ filter(Engineer.primary_language == 'java').\ join(Engineer.paperwork).\ filter(Paperwork.description == "tps report #2").\ options(subqueryload(Person.paperwork)) def go(): eq_(q.one().paperwork, [Paperwork(description="tps report #1"), Paperwork(description="tps report #2")], ) self.assert_sql_execution( testing.db, go, CompiledSQL( "SELECT people.person_id AS people_person_id, " "people.name AS people_name, people.type AS people_type, " "engineers.engineer_id AS engineers_engineer_id, " "engineers.primary_language AS engineers_primary_language " "FROM people JOIN engineers " "ON people.person_id = engineers.engineer_id " "JOIN paperwork ON people.person_id = paperwork.person_id " "WHERE engineers.primary_language = :primary_language_1 " "AND paperwork.description = :description_1", {"primary_language_1": "java", "description_1": "tps report #2"} ), CompiledSQL( "SELECT paperwork.paperwork_id AS paperwork_paperwork_id, " "paperwork.description AS paperwork_description, " "paperwork.person_id AS paperwork_person_id, " "anon_1.people_person_id AS anon_1_people_person_id " "FROM (SELECT people.person_id AS people_person_id " "FROM people JOIN engineers ON people.person_id = " "engineers.engineer_id JOIN paperwork " "ON people.person_id = paperwork.person_id " "WHERE engineers.primary_language = :primary_language_1 AND " "paperwork.description = :description_1) AS anon_1 " "JOIN paperwork ON anon_1.people_person_id = " "paperwork.person_id " "ORDER BY anon_1.people_person_id, paperwork.paperwork_id", {"primary_language_1": "java", "description_1": "tps report #2"} ) ) class SelfReferentialTest(fixtures.MappedTest): @classmethod def define_tables(cls, metadata): Table('nodes', metadata, Column('id', Integer, primary_key=True, test_needs_autoincrement=True), Column('parent_id', Integer, ForeignKey('nodes.id')), Column('data', String(30))) @testing.fails_on('maxdb', 'FIXME: unknown') def test_basic(self): nodes = self.tables.nodes class Node(fixtures.ComparableEntity): def append(self, node): self.children.append(node) mapper(Node, nodes, properties={ 'children':relationship(Node, lazy='subquery', join_depth=3, order_by=nodes.c.id) }) sess = create_session() n1 = Node(data='n1') n1.append(Node(data='n11')) n1.append(Node(data='n12')) n1.append(Node(data='n13')) n1.children[1].append(Node(data='n121')) n1.children[1].append(Node(data='n122')) n1.children[1].append(Node(data='n123')) n2 = Node(data='n2') n2.append(Node(data='n21')) n2.children[0].append(Node(data='n211')) n2.children[0].append(Node(data='n212')) sess.add(n1) sess.add(n2) sess.flush() sess.expunge_all() def go(): d = sess.query(Node).filter(Node.data.in_(['n1', 'n2'])).\ order_by(Node.data).all() eq_([Node(data='n1', children=[ Node(data='n11'), Node(data='n12', children=[ Node(data='n121'), Node(data='n122'), Node(data='n123') ]), Node(data='n13') ]), Node(data='n2', children=[ Node(data='n21', children=[ Node(data='n211'), Node(data='n212'), ]) ]) ], d) self.assert_sql_count(testing.db, go, 4) def test_lazy_fallback_doesnt_affect_eager(self): nodes = self.tables.nodes class Node(fixtures.ComparableEntity): def append(self, node): self.children.append(node) mapper(Node, nodes, properties={ 'children':relationship(Node, lazy='subquery', join_depth=1, order_by=nodes.c.id) }) sess = create_session() n1 = Node(data='n1') n1.append(Node(data='n11')) n1.append(Node(data='n12')) n1.append(Node(data='n13')) n1.children[1].append(Node(data='n121')) n1.children[1].append(Node(data='n122')) n1.children[1].append(Node(data='n123')) sess.add(n1) sess.flush() sess.expunge_all() def go(): allnodes = sess.query(Node).order_by(Node.data).all() n12 = allnodes[2] eq_(n12.data, 'n12') eq_([ Node(data='n121'), Node(data='n122'), Node(data='n123') ], list(n12.children)) self.assert_sql_count(testing.db, go, 4) def test_with_deferred(self): nodes = self.tables.nodes class Node(fixtures.ComparableEntity): def append(self, node): self.children.append(node) mapper(Node, nodes, properties={ 'children':relationship(Node, lazy='subquery', join_depth=3, order_by=nodes.c.id), 'data':deferred(nodes.c.data) }) sess = create_session() n1 = Node(data='n1') n1.append(Node(data='n11')) n1.append(Node(data='n12')) sess.add(n1) sess.flush() sess.expunge_all() def go(): eq_( Node(data='n1', children=[Node(data='n11'), Node(data='n12')]), sess.query(Node).order_by(Node.id).first(), ) self.assert_sql_count(testing.db, go, 6) sess.expunge_all() def go(): eq_(Node(data='n1', children=[Node(data='n11'), Node(data='n12')]), sess.query(Node).options(undefer('data')).order_by(Node.id).first()) self.assert_sql_count(testing.db, go, 5) sess.expunge_all() def go(): eq_(Node(data='n1', children=[Node(data='n11'), Node(data='n12')]), sess.query(Node).options(undefer('data'), undefer('children.data')).first()) self.assert_sql_count(testing.db, go, 3) def test_options(self): nodes = self.tables.nodes class Node(fixtures.ComparableEntity): def append(self, node): self.children.append(node) mapper(Node, nodes, properties={ 'children':relationship(Node, order_by=nodes.c.id) }, order_by=nodes.c.id) sess = create_session() n1 = Node(data='n1') n1.append(Node(data='n11')) n1.append(Node(data='n12')) n1.append(Node(data='n13')) n1.children[1].append(Node(data='n121')) n1.children[1].append(Node(data='n122')) n1.children[1].append(Node(data='n123')) sess.add(n1) sess.flush() sess.expunge_all() def go(): d = sess.query(Node).filter_by(data='n1').\ options(subqueryload_all('children.children')).first() eq_(Node(data='n1', children=[ Node(data='n11'), Node(data='n12', children=[ Node(data='n121'), Node(data='n122'), Node(data='n123') ]), Node(data='n13') ]), d) self.assert_sql_count(testing.db, go, 3) @testing.fails_on('maxdb', 'FIXME: unknown') def test_no_depth(self): """no join depth is set, so no eager loading occurs.""" nodes = self.tables.nodes class Node(fixtures.ComparableEntity): def append(self, node): self.children.append(node) mapper(Node, nodes, properties={ 'children':relationship(Node, lazy='subquery') }) sess = create_session() n1 = Node(data='n1') n1.append(Node(data='n11')) n1.append(Node(data='n12')) n1.append(Node(data='n13')) n1.children[1].append(Node(data='n121')) n1.children[1].append(Node(data='n122')) n1.children[1].append(Node(data='n123')) n2 = Node(data='n2') n2.append(Node(data='n21')) sess.add(n1) sess.add(n2) sess.flush() sess.expunge_all() def go(): d = sess.query(Node).filter(Node.data.in_(['n1', 'n2'])).order_by(Node.data).all() eq_([ Node(data='n1', children=[ Node(data='n11'), Node(data='n12', children=[ Node(data='n121'), Node(data='n122'), Node(data='n123') ]), Node(data='n13') ]), Node(data='n2', children=[ Node(data='n21') ]) ], d) self.assert_sql_count(testing.db, go, 4) class InheritanceToRelatedTest(fixtures.MappedTest): @classmethod def define_tables(cls, metadata): Table('foo', metadata, Column("id", Integer, primary_key=True), Column("type", String(50)), Column("related_id", Integer, ForeignKey("related.id")) ) Table("bar", metadata, Column("id", Integer, ForeignKey('foo.id'), primary_key=True), ) Table("baz", metadata, Column("id", Integer, ForeignKey('foo.id'), primary_key=True), ) Table("related", metadata, Column("id", Integer, primary_key=True), ) @classmethod def setup_classes(cls): class Foo(cls.Comparable): pass class Bar(Foo): pass class Baz(Foo): pass class Related(cls.Comparable): pass @classmethod def fixtures(cls): return dict( foo=[ ('id', 'type', 'related_id'), (1, 'bar', 1), (2, 'bar', 2), (3, 'baz', 1), (4, 'baz', 2), ], bar=[ ('id', ), (1,), (2,) ], baz=[ ('id', ), (3,), (4,) ], related=[ ('id', ), (1,), (2,) ] ) @classmethod def setup_mappers(cls): mapper(cls.classes.Foo, cls.tables.foo, properties={ 'related': relationship(cls.classes.Related) }, polymorphic_on=cls.tables.foo.c.type) mapper(cls.classes.Bar, cls.tables.bar, polymorphic_identity='bar', inherits=cls.classes.Foo) mapper(cls.classes.Baz, cls.tables.baz, polymorphic_identity='baz', inherits=cls.classes.Foo) mapper(cls.classes.Related, cls.tables.related) def test_caches_query_per_base_subq(self): Foo, Bar, Baz, Related = self.classes.Foo, self.classes.Bar, \ self.classes.Baz, self.classes.Related s = Session(testing.db) def go(): eq_( s.query(Foo).with_polymorphic([Bar, Baz]).\ order_by(Foo.id).\ options(subqueryload(Foo.related)).all(), [ Bar(id=1, related=Related(id=1)), Bar(id=2, related=Related(id=2)), Baz(id=3, related=Related(id=1)), Baz(id=4, related=Related(id=2)) ] ) self.assert_sql_count(testing.db, go, 2) def test_caches_query_per_base_joined(self): # technically this should be in test_eager_relations Foo, Bar, Baz, Related = self.classes.Foo, self.classes.Bar, \ self.classes.Baz, self.classes.Related s = Session(testing.db) def go(): eq_( s.query(Foo).with_polymorphic([Bar, Baz]).\ order_by(Foo.id).\ options(joinedload(Foo.related)).all(), [ Bar(id=1, related=Related(id=1)), Bar(id=2, related=Related(id=2)), Baz(id=3, related=Related(id=1)), Baz(id=4, related=Related(id=2)) ] ) self.assert_sql_count(testing.db, go, 1) class CyclicalInheritingEagerTestOne(fixtures.MappedTest): @classmethod def define_tables(cls, metadata): Table('t1', metadata, Column('c1', Integer, primary_key=True, test_needs_autoincrement=True), Column('c2', String(30)), Column('type', String(30)) ) Table('t2', metadata, Column('c1', Integer, primary_key=True, test_needs_autoincrement=True), Column('c2', String(30)), Column('type', String(30)), Column('t1.id', Integer, ForeignKey('t1.c1'))) def test_basic(self): t2, t1 = self.tables.t2, self.tables.t1 class T(object): pass class SubT(T): pass class T2(object): pass class SubT2(T2): pass mapper(T, t1, polymorphic_on=t1.c.type, polymorphic_identity='t1') mapper(SubT, None, inherits=T, polymorphic_identity='subt1', properties={ 't2s': relationship(SubT2, lazy='subquery', backref=sa.orm.backref('subt', lazy='subquery')) }) mapper(T2, t2, polymorphic_on=t2.c.type, polymorphic_identity='t2') mapper(SubT2, None, inherits=T2, polymorphic_identity='subt2') # testing a particular endless loop condition in eager load setup create_session().query(SubT).all() class CyclicalInheritingEagerTestTwo(fixtures.DeclarativeMappedTest, testing.AssertsCompiledSQL): __dialect__ = 'default' @classmethod def setup_classes(cls): Base = cls.DeclarativeBasic class PersistentObject(Base): __tablename__ = 'persistent' id = Column(Integer, primary_key=True, test_needs_autoincrement=True) class Movie(PersistentObject): __tablename__ = 'movie' id = Column(Integer, ForeignKey('persistent.id'), primary_key=True) director_id = Column(Integer, ForeignKey('director.id')) title = Column(String(50)) class Director(PersistentObject): __tablename__ = 'director' id = Column(Integer, ForeignKey('persistent.id'), primary_key=True) movies = relationship("Movie", foreign_keys=Movie.director_id) name = Column(String(50)) def test_from_subclass(self): Director = self.classes.Director s = create_session() ctx = s.query(Director).options(subqueryload('*'))._compile_context() q = ctx.attributes[('subquery', (inspect(Director), inspect(Director).attrs.movies))] self.assert_compile(q, "SELECT anon_1.movie_id AS anon_1_movie_id, " "anon_1.persistent_id AS anon_1_persistent_id, " "anon_1.movie_director_id AS anon_1_movie_director_id, " "anon_1.movie_title AS anon_1_movie_title, " "anon_2.director_id AS anon_2_director_id FROM " "(SELECT director.id AS director_id FROM persistent JOIN director " "ON persistent.id = director.id) AS anon_2 " "JOIN (SELECT persistent.id AS persistent_id, movie.id AS movie_id, " "movie.director_id AS movie_director_id, " "movie.title AS movie_title FROM persistent JOIN movie " "ON persistent.id = movie.id) AS anon_1 " "ON anon_2.director_id = anon_1.movie_director_id " "ORDER BY anon_2.director_id") def test_integrate(self): Director = self.classes.Director Movie = self.classes.Movie session = Session(testing.db) rscott = Director(name=u"Ridley Scott") alien = Movie(title=u"Alien") brunner = Movie(title=u"Blade Runner") rscott.movies.append(brunner) rscott.movies.append(alien) session.add_all([rscott, alien, brunner]) session.commit() session.close_all() d = session.query(Director).options(subqueryload('*')).first() assert len(list(session)) == 3 class SubqueryloadDistinctTest(fixtures.DeclarativeMappedTest, testing.AssertsCompiledSQL): __dialect__ = 'default' run_inserts = 'once' run_deletes = None @classmethod def setup_classes(cls): Base = cls.DeclarativeBasic class Director(Base): __tablename__ = 'director' id = Column(Integer, primary_key=True, test_needs_autoincrement=True) name = Column(String(50)) class DirectorPhoto(Base): __tablename__ = 'director_photo' id = Column(Integer, primary_key=True, test_needs_autoincrement=True) path = Column(String(255)) director_id = Column(Integer, ForeignKey('director.id')) director = relationship(Director, backref="photos") class Movie(Base): __tablename__ = 'movie' id = Column(Integer, primary_key=True, test_needs_autoincrement=True) director_id = Column(Integer, ForeignKey('director.id')) director = relationship(Director, backref="movies") title = Column(String(50)) credits = relationship("Credit", backref="movie") class Credit(Base): __tablename__ = 'credit' id = Column(Integer, primary_key=True, test_needs_autoincrement=True) movie_id = Column(Integer, ForeignKey('movie.id')) @classmethod def insert_data(cls): Movie = cls.classes.Movie Director = cls.classes.Director DirectorPhoto = cls.classes.DirectorPhoto Credit = cls.classes.Credit d = Director(name='Woody Allen') d.photos = [DirectorPhoto(path='/1.jpg'), DirectorPhoto(path='/2.jpg')] d.movies = [Movie(title='Manhattan', credits=[Credit(), Credit()]), Movie(title='Sweet and Lowdown', credits=[Credit()])] sess = create_session() sess.add_all([d]) sess.flush() def test_distinct_strategy_opt_m2o(self): self._run_test_m2o(True, None) self._run_test_m2o(False, None) def test_distinct_unrelated_opt_m2o(self): self._run_test_m2o(None, True) self._run_test_m2o(None, False) def _run_test_m2o(self, director_strategy_level, photo_strategy_level): # test where the innermost is m2o, e.g. # Movie->director Movie = self.classes.Movie Director = self.classes.Director Movie.director.property.distinct_target_key = director_strategy_level Director.photos.property.distinct_target_key = photo_strategy_level # the DISTINCT is controlled by # only the Movie->director relationship, *not* the # Director.photos expect_distinct = director_strategy_level in (True, None) s = create_session() q = ( s.query(Movie) .options( subqueryload(Movie.director), subqueryload(Movie.director, Director.photos) ) ) ctx = q._compile_context() q2 = ctx.attributes[ ('subquery', (inspect(Movie), inspect(Movie).attrs.director)) ] self.assert_compile( q2, 'SELECT director.id AS director_id, ' 'director.name AS director_name, ' 'anon_1.movie_director_id AS anon_1_movie_director_id ' 'FROM (SELECT%s movie.director_id AS movie_director_id ' 'FROM movie) AS anon_1 ' 'JOIN director ON director.id = anon_1.movie_director_id ' 'ORDER BY anon_1.movie_director_id' % ( " DISTINCT" if expect_distinct else "") ) ctx2 = q2._compile_context() result = s.execute(q2) rows = result.fetchall() if expect_distinct: eq_(rows, [ (1, 'Woody Allen', 1), ]) else: eq_(rows, [ (1, 'Woody Allen', 1), (1, 'Woody Allen', 1), ]) q3 = ctx2.attributes[ ('subquery', (inspect(Director), inspect(Director).attrs.photos)) ] self.assert_compile( q3, 'SELECT director_photo.id AS director_photo_id, ' 'director_photo.path AS director_photo_path, ' 'director_photo.director_id AS director_photo_director_id, ' 'director_1.id AS director_1_id ' 'FROM (SELECT%s movie.director_id AS movie_director_id ' 'FROM movie) AS anon_1 ' 'JOIN director AS director_1 ON director_1.id = anon_1.movie_director_id ' 'JOIN director_photo ON director_1.id = director_photo.director_id ' 'ORDER BY director_1.id' % ( " DISTINCT" if expect_distinct else "") ) result = s.execute(q3) rows = result.fetchall() if expect_distinct: eq_(set(tuple(r) for r in rows), set([ (1, u'/1.jpg', 1, 1), (2, u'/2.jpg', 1, 1), ])) else: eq_(set(tuple(r) for r in rows), set([ (1, u'/1.jpg', 1, 1), (2, u'/2.jpg', 1, 1), (1, u'/1.jpg', 1, 1), (2, u'/2.jpg', 1, 1), ])) movies = q.all() # check number of persistent objects in session eq_(len(list(s)), 5) def test_cant_do_distinct_in_joins(self): """the DISTINCT feature here works when the m2o is in the innermost mapper, but when we are just joining along relationships outside of that, we can still have dupes, and there's no solution to that. """ Movie = self.classes.Movie Credit = self.classes.Credit Credit.movie.property.distinct_target_key = False Movie.director.property.distinct_target_key = False s = create_session() q = ( s.query(Credit) .options( subqueryload(Credit.movie), subqueryload(Credit.movie, Movie.director) ) ) ctx = q._compile_context() q2 = ctx.attributes[ ('subquery', (inspect(Credit), Credit.movie.property)) ] ctx2 = q2._compile_context() q3 = ctx2.attributes[ ('subquery', (inspect(Movie), Movie.director.property)) ] # three rows due to dupe at Credit.movie level # as well as Movie.director level result = s.execute(q3) eq_( result.fetchall(), [ (1, 'Woody Allen', 1), (1, 'Woody Allen', 1), (1, 'Woody Allen', 1) ] ) SQLAlchemy-0.8.4/test/orm/test_sync.py0000644000076500000240000002121312251147172020373 0ustar classicstaff00000000000000from sqlalchemy.testing import eq_, assert_raises, assert_raises_message from sqlalchemy import testing from sqlalchemy.testing.schema import Table, Column from test.orm import _fixtures from sqlalchemy.testing import fixtures from sqlalchemy import Integer, String, ForeignKey, func from sqlalchemy.orm import mapper, relationship, backref, \ create_session, unitofwork, attributes,\ Session, class_mapper, sync, exc as orm_exc class AssertsUOW(object): def _get_test_uow(self, session): uow = unitofwork.UOWTransaction(session) deleted = set(session._deleted) new = set(session._new) dirty = set(session._dirty_states).difference(deleted) for s in new.union(dirty): uow.register_object(s) for d in deleted: uow.register_object(d, isdelete=True) return uow class SyncTest(fixtures.MappedTest, testing.AssertsExecutionResults, AssertsUOW): @classmethod def define_tables(cls, metadata): Table('t1', metadata, Column('id', Integer, primary_key=True), Column('foo', Integer) ) Table('t2', metadata, Column('id', Integer, ForeignKey('t1.id'), primary_key=True), Column('t1id', Integer, ForeignKey('t1.id')), ) @classmethod def setup_classes(cls): class A(cls.Basic): pass class B(cls.Basic): pass @classmethod def setup_mappers(cls): mapper(cls.classes.A, cls.tables.t1) mapper(cls.classes.B, cls.tables.t2) def _fixture(self): A, B = self.classes.A, self.classes.B session = create_session() uowcommit = self._get_test_uow(session) a_mapper = class_mapper(A) b_mapper= class_mapper(B) self.a1 = a1 = A() self.b1 = b1 = B() uowcommit = self._get_test_uow(session) return uowcommit,\ attributes.instance_state(a1),\ attributes.instance_state(b1),\ a_mapper, b_mapper def test_populate(self): uowcommit, a1, b1, a_mapper, b_mapper = self._fixture() pairs = [(a_mapper.c.id, b_mapper.c.id)] a1.obj().id = 7 assert 'id' not in b1.obj().__dict__ sync.populate(a1, a_mapper, b1, b_mapper, pairs, uowcommit, False) eq_(b1.obj().id, 7) eq_(b1.obj().__dict__['id'], 7) assert ("pk_cascaded", b1, b_mapper.c.id) not in uowcommit.attributes def test_populate_flag_cascaded(self): uowcommit, a1, b1, a_mapper, b_mapper = self._fixture() pairs = [(a_mapper.c.id, b_mapper.c.id)] a1.obj().id = 7 assert 'id' not in b1.obj().__dict__ sync.populate(a1, a_mapper, b1, b_mapper, pairs, uowcommit, True) eq_(b1.obj().id, 7) eq_(b1.obj().__dict__['id'], 7) eq_(uowcommit.attributes[("pk_cascaded", b1, b_mapper.c.id)], True) def test_populate_unmapped_source(self): uowcommit, a1, b1, a_mapper, b_mapper = self._fixture() pairs = [(b_mapper.c.id, b_mapper.c.id)] assert_raises_message( orm_exc.UnmappedColumnError, "Can't execute sync rule for source column 't2.id'; " r"mapper 'Mapper\|A\|t1' does not map this column.", sync.populate, a1, a_mapper, b1, b_mapper, pairs, uowcommit, False ) def test_populate_unmapped_dest(self): uowcommit, a1, b1, a_mapper, b_mapper = self._fixture() pairs = [(a_mapper.c.id, a_mapper.c.id,)] assert_raises_message( orm_exc.UnmappedColumnError, "Can't execute sync rule for destination " r"column 't1.id'; mapper 'Mapper\|B\|t2' does not map this column.", sync.populate, a1, a_mapper, b1, b_mapper, pairs, uowcommit, False ) def test_clear(self): uowcommit, a1, b1, a_mapper, b_mapper = self._fixture() pairs = [(a_mapper.c.id, b_mapper.c.t1id,)] b1.obj().t1id = 8 eq_(b1.obj().__dict__['t1id'], 8) sync.clear(b1, b_mapper, pairs) eq_(b1.obj().__dict__['t1id'], None) def test_clear_pk(self): uowcommit, a1, b1, a_mapper, b_mapper = self._fixture() pairs = [(a_mapper.c.id, b_mapper.c.id,)] b1.obj().id = 8 eq_(b1.obj().__dict__['id'], 8) assert_raises_message( AssertionError, "Dependency rule tried to blank-out primary key " "column 't2.id' on instance ' has a NULL " "identity key. If this is an auto-generated value, " "check that the database table allows generation ", s.commit ) def test_dont_complain_if_no_update(self): T1 = self.classes.T1 s = Session() t = T1(col1="1", col2=None) s.add(t) s.commit() t.col1 = "1" s.commit() SQLAlchemy-0.8.4/test/orm/test_unitofworkv2.py0000644000076500000240000014062712251150016022100 0ustar classicstaff00000000000000from sqlalchemy.testing import eq_, assert_raises, assert_raises_message from sqlalchemy import testing from sqlalchemy.testing import engines from sqlalchemy.testing.schema import Table, Column from test.orm import _fixtures from sqlalchemy.testing import fixtures from sqlalchemy import Integer, String, ForeignKey, func from sqlalchemy.orm import mapper, relationship, backref, \ create_session, unitofwork, attributes,\ Session, class_mapper, sync, exc as orm_exc from sqlalchemy.testing.assertsql import AllOf, CompiledSQL class AssertsUOW(object): def _get_test_uow(self, session): uow = unitofwork.UOWTransaction(session) deleted = set(session._deleted) new = set(session._new) dirty = set(session._dirty_states).difference(deleted) for s in new.union(dirty): uow.register_object(s) for d in deleted: uow.register_object(d, isdelete=True) return uow def _assert_uow_size(self, session, expected ): uow = self._get_test_uow(session) postsort_actions = uow._generate_actions() print postsort_actions eq_(len(postsort_actions), expected, postsort_actions) class UOWTest(_fixtures.FixtureTest, testing.AssertsExecutionResults, AssertsUOW): run_inserts = None class RudimentaryFlushTest(UOWTest): def test_one_to_many_save(self): users, Address, addresses, User = (self.tables.users, self.classes.Address, self.tables.addresses, self.classes.User) mapper(User, users, properties={ 'addresses':relationship(Address), }) mapper(Address, addresses) sess = create_session() a1, a2 = Address(email_address='a1'), Address(email_address='a2') u1 = User(name='u1', addresses=[a1, a2]) sess.add(u1) self.assert_sql_execution( testing.db, sess.flush, CompiledSQL( "INSERT INTO users (name) VALUES (:name)", {'name': 'u1'} ), CompiledSQL( "INSERT INTO addresses (user_id, email_address) " "VALUES (:user_id, :email_address)", lambda ctx: {'email_address': 'a1', 'user_id':u1.id} ), CompiledSQL( "INSERT INTO addresses (user_id, email_address) " "VALUES (:user_id, :email_address)", lambda ctx: {'email_address': 'a2', 'user_id':u1.id} ), ) def test_one_to_many_delete_all(self): users, Address, addresses, User = (self.tables.users, self.classes.Address, self.tables.addresses, self.classes.User) mapper(User, users, properties={ 'addresses':relationship(Address), }) mapper(Address, addresses) sess = create_session() a1, a2 = Address(email_address='a1'), Address(email_address='a2') u1 = User(name='u1', addresses=[a1, a2]) sess.add(u1) sess.flush() sess.delete(u1) sess.delete(a1) sess.delete(a2) self.assert_sql_execution( testing.db, sess.flush, CompiledSQL( "DELETE FROM addresses WHERE addresses.id = :id", [{'id':a1.id},{'id':a2.id}] ), CompiledSQL( "DELETE FROM users WHERE users.id = :id", {'id':u1.id} ), ) def test_one_to_many_delete_parent(self): users, Address, addresses, User = (self.tables.users, self.classes.Address, self.tables.addresses, self.classes.User) mapper(User, users, properties={ 'addresses':relationship(Address), }) mapper(Address, addresses) sess = create_session() a1, a2 = Address(email_address='a1'), Address(email_address='a2') u1 = User(name='u1', addresses=[a1, a2]) sess.add(u1) sess.flush() sess.delete(u1) self.assert_sql_execution( testing.db, sess.flush, CompiledSQL( "UPDATE addresses SET user_id=:user_id WHERE " "addresses.id = :addresses_id", lambda ctx: [{u'addresses_id': a1.id, 'user_id': None}] ), CompiledSQL( "UPDATE addresses SET user_id=:user_id WHERE " "addresses.id = :addresses_id", lambda ctx: [{u'addresses_id': a2.id, 'user_id': None}] ), CompiledSQL( "DELETE FROM users WHERE users.id = :id", {'id':u1.id} ), ) def test_many_to_one_save(self): users, Address, addresses, User = (self.tables.users, self.classes.Address, self.tables.addresses, self.classes.User) mapper(User, users) mapper(Address, addresses, properties={ 'user':relationship(User) }) sess = create_session() u1 = User(name='u1') a1, a2 = Address(email_address='a1', user=u1), \ Address(email_address='a2', user=u1) sess.add_all([a1, a2]) self.assert_sql_execution( testing.db, sess.flush, CompiledSQL( "INSERT INTO users (name) VALUES (:name)", {'name': 'u1'} ), CompiledSQL( "INSERT INTO addresses (user_id, email_address) " "VALUES (:user_id, :email_address)", lambda ctx: {'email_address': 'a1', 'user_id':u1.id} ), CompiledSQL( "INSERT INTO addresses (user_id, email_address) " "VALUES (:user_id, :email_address)", lambda ctx: {'email_address': 'a2', 'user_id':u1.id} ), ) def test_many_to_one_delete_all(self): users, Address, addresses, User = (self.tables.users, self.classes.Address, self.tables.addresses, self.classes.User) mapper(User, users) mapper(Address, addresses, properties={ 'user':relationship(User) }) sess = create_session() u1 = User(name='u1') a1, a2 = Address(email_address='a1', user=u1), \ Address(email_address='a2', user=u1) sess.add_all([a1, a2]) sess.flush() sess.delete(u1) sess.delete(a1) sess.delete(a2) self.assert_sql_execution( testing.db, sess.flush, CompiledSQL( "DELETE FROM addresses WHERE addresses.id = :id", [{'id':a1.id},{'id':a2.id}] ), CompiledSQL( "DELETE FROM users WHERE users.id = :id", {'id':u1.id} ), ) def test_many_to_one_delete_target(self): users, Address, addresses, User = (self.tables.users, self.classes.Address, self.tables.addresses, self.classes.User) mapper(User, users) mapper(Address, addresses, properties={ 'user':relationship(User) }) sess = create_session() u1 = User(name='u1') a1, a2 = Address(email_address='a1', user=u1), \ Address(email_address='a2', user=u1) sess.add_all([a1, a2]) sess.flush() sess.delete(u1) a1.user = a2.user = None self.assert_sql_execution( testing.db, sess.flush, CompiledSQL( "UPDATE addresses SET user_id=:user_id WHERE " "addresses.id = :addresses_id", lambda ctx: [{u'addresses_id': a1.id, 'user_id': None}] ), CompiledSQL( "UPDATE addresses SET user_id=:user_id WHERE " "addresses.id = :addresses_id", lambda ctx: [{u'addresses_id': a2.id, 'user_id': None}] ), CompiledSQL( "DELETE FROM users WHERE users.id = :id", {'id':u1.id} ), ) def test_many_to_one_delete_unloaded(self): users, Address, addresses, User = (self.tables.users, self.classes.Address, self.tables.addresses, self.classes.User) mapper(User, users) mapper(Address, addresses, properties={ 'parent':relationship(User) }) parent = User(name='p1') c1, c2 = Address(email_address='c1', parent=parent), \ Address(email_address='c2', parent=parent) session = Session() session.add_all([c1, c2]) session.add(parent) session.flush() pid = parent.id c1id = c1.id c2id = c2.id session.expire(parent) session.expire(c1) session.expire(c2) session.delete(c1) session.delete(c2) session.delete(parent) # testing that relationships # are loaded even if all ids/references are # expired self.assert_sql_execution( testing.db, session.flush, AllOf( # [ticket:2002] - ensure the m2os are loaded. # the selects here are in fact unexpiring # each row - the m2o comes from the identity map. # the User row might be handled before or the addresses # are loaded so need to use AllOf CompiledSQL( "SELECT addresses.id AS addresses_id, addresses.user_id AS " "addresses_user_id, addresses.email_address AS " "addresses_email_address FROM addresses WHERE addresses.id = " ":param_1", lambda ctx: {'param_1': c1id} ), CompiledSQL( "SELECT addresses.id AS addresses_id, addresses.user_id AS " "addresses_user_id, addresses.email_address AS " "addresses_email_address FROM addresses WHERE addresses.id = " ":param_1", lambda ctx: {'param_1': c2id} ), CompiledSQL( "SELECT users.id AS users_id, users.name AS users_name " "FROM users WHERE users.id = :param_1", lambda ctx: {'param_1': pid} ), CompiledSQL( "DELETE FROM addresses WHERE addresses.id = :id", lambda ctx: [{'id': c1id}, {'id': c2id}] ), CompiledSQL( "DELETE FROM users WHERE users.id = :id", lambda ctx: {'id': pid} ), ), ) def test_many_to_one_delete_childonly_unloaded(self): users, Address, addresses, User = (self.tables.users, self.classes.Address, self.tables.addresses, self.classes.User) mapper(User, users) mapper(Address, addresses, properties={ 'parent':relationship(User) }) parent = User(name='p1') c1, c2 = Address(email_address='c1', parent=parent), \ Address(email_address='c2', parent=parent) session = Session() session.add_all([c1, c2]) session.add(parent) session.flush() pid = parent.id c1id = c1.id c2id = c2.id session.expire(c1) session.expire(c2) session.delete(c1) session.delete(c2) self.assert_sql_execution( testing.db, session.flush, AllOf( # [ticket:2049] - we aren't deleting User, # relationship is simple m2o, no SELECT should be emitted for it. CompiledSQL( "SELECT addresses.id AS addresses_id, addresses.user_id AS " "addresses_user_id, addresses.email_address AS " "addresses_email_address FROM addresses WHERE addresses.id = " ":param_1", lambda ctx: {'param_1': c1id} ), CompiledSQL( "SELECT addresses.id AS addresses_id, addresses.user_id AS " "addresses_user_id, addresses.email_address AS " "addresses_email_address FROM addresses WHERE addresses.id = " ":param_1", lambda ctx: {'param_1': c2id} ), ), CompiledSQL( "DELETE FROM addresses WHERE addresses.id = :id", lambda ctx: [{'id': c1id}, {'id': c2id}] ), ) def test_many_to_one_delete_childonly_unloaded_expired(self): users, Address, addresses, User = (self.tables.users, self.classes.Address, self.tables.addresses, self.classes.User) mapper(User, users) mapper(Address, addresses, properties={ 'parent':relationship(User) }) parent = User(name='p1') c1, c2 = Address(email_address='c1', parent=parent), \ Address(email_address='c2', parent=parent) session = Session() session.add_all([c1, c2]) session.add(parent) session.flush() pid = parent.id c1id = c1.id c2id = c2.id session.expire(parent) session.expire(c1) session.expire(c2) session.delete(c1) session.delete(c2) self.assert_sql_execution( testing.db, session.flush, AllOf( # the parent User is expired, so it gets loaded here. CompiledSQL( "SELECT addresses.id AS addresses_id, addresses.user_id AS " "addresses_user_id, addresses.email_address AS " "addresses_email_address FROM addresses WHERE addresses.id = " ":param_1", lambda ctx: {'param_1': c1id} ), CompiledSQL( "SELECT addresses.id AS addresses_id, addresses.user_id AS " "addresses_user_id, addresses.email_address AS " "addresses_email_address FROM addresses WHERE addresses.id = " ":param_1", lambda ctx: {'param_1': c2id} ), ), CompiledSQL( "DELETE FROM addresses WHERE addresses.id = :id", lambda ctx: [{'id': c1id}, {'id': c2id}] ), ) def test_natural_ordering(self): """test that unconnected items take relationship() into account regardless.""" users, Address, addresses, User = (self.tables.users, self.classes.Address, self.tables.addresses, self.classes.User) mapper(User, users) mapper(Address, addresses, properties={ 'parent':relationship(User) }) sess = create_session() u1 = User(id=1, name='u1') a1 = Address(id=1, user_id=1, email_address='a2') sess.add_all([u1, a1]) self.assert_sql_execution( testing.db, sess.flush, CompiledSQL( "INSERT INTO users (id, name) VALUES (:id, :name)", {'id':1, 'name':'u1'}), CompiledSQL( "INSERT INTO addresses (id, user_id, email_address) " "VALUES (:id, :user_id, :email_address)", {'email_address': 'a2', 'user_id': 1, 'id': 1} ) ) sess.delete(u1) sess.delete(a1) self.assert_sql_execution( testing.db, sess.flush, CompiledSQL( "DELETE FROM addresses WHERE addresses.id = :id", [{'id': 1}] ), CompiledSQL( "DELETE FROM users WHERE users.id = :id", [{'id': 1}] ) ) def test_natural_selfref(self): """test that unconnected items take relationship() into account regardless.""" Node, nodes = self.classes.Node, self.tables.nodes mapper(Node, nodes, properties={ 'children':relationship(Node) }) sess = create_session() n1 = Node(id=1) n2 = Node(id=2, parent_id=1) n3 = Node(id=3, parent_id=2) # insert order is determined from add order since they # are the same class sess.add_all([n1, n2, n3]) self.assert_sql_execution( testing.db, sess.flush, CompiledSQL( "INSERT INTO nodes (id, parent_id, data) VALUES " "(:id, :parent_id, :data)", [{'parent_id': None, 'data': None, 'id': 1}, {'parent_id': 1, 'data': None, 'id': 2}, {'parent_id': 2, 'data': None, 'id': 3}] ), ) def test_many_to_many(self): keywords, items, item_keywords, Keyword, Item = (self.tables.keywords, self.tables.items, self.tables.item_keywords, self.classes.Keyword, self.classes.Item) mapper(Item, items, properties={ 'keywords':relationship(Keyword, secondary=item_keywords) }) mapper(Keyword, keywords) sess = create_session() k1 = Keyword(name='k1') i1 = Item(description='i1', keywords=[k1]) sess.add(i1) self.assert_sql_execution( testing.db, sess.flush, AllOf( CompiledSQL( "INSERT INTO keywords (name) VALUES (:name)", {'name':'k1'} ), CompiledSQL( "INSERT INTO items (description) VALUES (:description)", {'description':'i1'} ), ), CompiledSQL( "INSERT INTO item_keywords (item_id, keyword_id) " "VALUES (:item_id, :keyword_id)", lambda ctx:{'item_id':i1.id, 'keyword_id':k1.id} ) ) # test that keywords collection isn't loaded sess.expire(i1, ['keywords']) i1.description = 'i2' self.assert_sql_execution( testing.db, sess.flush, CompiledSQL("UPDATE items SET description=:description " "WHERE items.id = :items_id", lambda ctx:{'description':'i2', 'items_id':i1.id}) ) def test_m2o_flush_size(self): users, Address, addresses, User = (self.tables.users, self.classes.Address, self.tables.addresses, self.classes.User) mapper(User, users) mapper(Address, addresses, properties={ 'user':relationship(User, passive_updates=True) }) sess = create_session() u1 = User(name='ed') sess.add(u1) self._assert_uow_size(sess, 2) def test_o2m_flush_size(self): users, Address, addresses, User = (self.tables.users, self.classes.Address, self.tables.addresses, self.classes.User) mapper(User, users, properties={ 'addresses':relationship(Address), }) mapper(Address, addresses) sess = create_session() u1 = User(name='ed') sess.add(u1) self._assert_uow_size(sess, 2) sess.flush() u1.name='jack' self._assert_uow_size(sess, 2) sess.flush() a1 = Address(email_address='foo') sess.add(a1) sess.flush() u1.addresses.append(a1) self._assert_uow_size(sess, 6) sess.flush() sess = create_session() u1 = sess.query(User).first() u1.name='ed' self._assert_uow_size(sess, 2) u1.addresses self._assert_uow_size(sess, 6) class SingleCycleTest(UOWTest): def teardown(self): engines.testing_reaper.rollback_all() # mysql can't handle delete from nodes # since it doesn't deal with the FKs correctly, # so wipe out the parent_id first testing.db.execute( self.tables.nodes.update().values(parent_id=None) ) super(SingleCycleTest, self).teardown() def test_one_to_many_save(self): Node, nodes = self.classes.Node, self.tables.nodes mapper(Node, nodes, properties={ 'children':relationship(Node) }) sess = create_session() n2, n3 = Node(data='n2'), Node(data='n3') n1 = Node(data='n1', children=[n2, n3]) sess.add(n1) self.assert_sql_execution( testing.db, sess.flush, CompiledSQL( "INSERT INTO nodes (parent_id, data) VALUES " "(:parent_id, :data)", {'parent_id': None, 'data': 'n1'} ), AllOf( CompiledSQL( "INSERT INTO nodes (parent_id, data) VALUES " "(:parent_id, :data)", lambda ctx: {'parent_id': n1.id, 'data': 'n2'} ), CompiledSQL( "INSERT INTO nodes (parent_id, data) VALUES " "(:parent_id, :data)", lambda ctx: {'parent_id': n1.id, 'data': 'n3'} ), ) ) def test_one_to_many_delete_all(self): Node, nodes = self.classes.Node, self.tables.nodes mapper(Node, nodes, properties={ 'children':relationship(Node) }) sess = create_session() n2, n3 = Node(data='n2', children=[]), Node(data='n3', children=[]) n1 = Node(data='n1', children=[n2, n3]) sess.add(n1) sess.flush() sess.delete(n1) sess.delete(n2) sess.delete(n3) self.assert_sql_execution( testing.db, sess.flush, CompiledSQL("DELETE FROM nodes WHERE nodes.id = :id", lambda ctx:[{'id':n2.id}, {'id':n3.id}]), CompiledSQL("DELETE FROM nodes WHERE nodes.id = :id", lambda ctx: {'id':n1.id}) ) def test_one_to_many_delete_parent(self): Node, nodes = self.classes.Node, self.tables.nodes mapper(Node, nodes, properties={ 'children':relationship(Node) }) sess = create_session() n2, n3 = Node(data='n2', children=[]), Node(data='n3', children=[]) n1 = Node(data='n1', children=[n2, n3]) sess.add(n1) sess.flush() sess.delete(n1) self.assert_sql_execution( testing.db, sess.flush, AllOf( CompiledSQL("UPDATE nodes SET parent_id=:parent_id " "WHERE nodes.id = :nodes_id", lambda ctx: {'nodes_id':n3.id, 'parent_id':None}), CompiledSQL("UPDATE nodes SET parent_id=:parent_id " "WHERE nodes.id = :nodes_id", lambda ctx: {'nodes_id':n2.id, 'parent_id':None}), ), CompiledSQL("DELETE FROM nodes WHERE nodes.id = :id", lambda ctx:{'id':n1.id}) ) def test_many_to_one_save(self): Node, nodes = self.classes.Node, self.tables.nodes mapper(Node, nodes, properties={ 'parent':relationship(Node, remote_side=nodes.c.id) }) sess = create_session() n1 = Node(data='n1') n2, n3 = Node(data='n2', parent=n1), Node(data='n3', parent=n1) sess.add_all([n2, n3]) self.assert_sql_execution( testing.db, sess.flush, CompiledSQL( "INSERT INTO nodes (parent_id, data) VALUES " "(:parent_id, :data)", {'parent_id': None, 'data': 'n1'} ), AllOf( CompiledSQL( "INSERT INTO nodes (parent_id, data) VALUES " "(:parent_id, :data)", lambda ctx: {'parent_id': n1.id, 'data': 'n2'} ), CompiledSQL( "INSERT INTO nodes (parent_id, data) VALUES " "(:parent_id, :data)", lambda ctx: {'parent_id': n1.id, 'data': 'n3'} ), ) ) def test_many_to_one_delete_all(self): Node, nodes = self.classes.Node, self.tables.nodes mapper(Node, nodes, properties={ 'parent':relationship(Node, remote_side=nodes.c.id) }) sess = create_session() n1 = Node(data='n1') n2, n3 = Node(data='n2', parent=n1), Node(data='n3', parent=n1) sess.add_all([n2, n3]) sess.flush() sess.delete(n1) sess.delete(n2) sess.delete(n3) self.assert_sql_execution( testing.db, sess.flush, CompiledSQL("DELETE FROM nodes WHERE nodes.id = :id", lambda ctx:[{'id':n2.id},{'id':n3.id}]), CompiledSQL("DELETE FROM nodes WHERE nodes.id = :id", lambda ctx: {'id':n1.id}) ) def test_many_to_one_set_null_unloaded(self): Node, nodes = self.classes.Node, self.tables.nodes mapper(Node, nodes, properties={ 'parent':relationship(Node, remote_side=nodes.c.id) }) sess = create_session() n1 = Node(data='n1') n2 = Node(data='n2', parent=n1) sess.add_all([n1, n2]) sess.flush() sess.close() n2 = sess.query(Node).filter_by(data='n2').one() n2.parent = None self.assert_sql_execution( testing.db, sess.flush, CompiledSQL( "UPDATE nodes SET parent_id=:parent_id WHERE " "nodes.id = :nodes_id", lambda ctx: {"parent_id":None, "nodes_id":n2.id} ) ) def test_cycle_rowswitch(self): Node, nodes = self.classes.Node, self.tables.nodes mapper(Node, nodes, properties={ 'children':relationship(Node) }) sess = create_session() n2, n3 = Node(data='n2', children=[]), Node(data='n3', children=[]) n1 = Node(data='n1', children=[n2]) sess.add(n1) sess.flush() sess.delete(n2) n3.id = n2.id n1.children.append(n3) sess.flush() def test_bidirectional_mutations_one(self): Node, nodes = self.classes.Node, self.tables.nodes mapper(Node, nodes, properties={ 'children':relationship(Node, backref=backref('parent', remote_side=nodes.c.id)) }) sess = create_session() n2, n3 = Node(data='n2', children=[]), Node(data='n3', children=[]) n1 = Node(data='n1', children=[n2]) sess.add(n1) sess.flush() sess.delete(n2) n1.children.append(n3) sess.flush() sess.delete(n1) sess.delete(n3) sess.flush() def test_bidirectional_multilevel_save(self): Node, nodes = self.classes.Node, self.tables.nodes mapper(Node, nodes, properties={ 'children':relationship(Node, backref=backref('parent', remote_side=nodes.c.id) ) }) sess = create_session() n1 = Node(data='n1') n1.children.append(Node(data='n11')) n12 = Node(data='n12') n1.children.append(n12) n1.children.append(Node(data='n13')) n1.children[1].children.append(Node(data='n121')) n1.children[1].children.append(Node(data='n122')) n1.children[1].children.append(Node(data='n123')) sess.add(n1) self.assert_sql_execution( testing.db, sess.flush, CompiledSQL( "INSERT INTO nodes (parent_id, data) VALUES " "(:parent_id, :data)", lambda ctx:{'parent_id':None, 'data':'n1'} ), CompiledSQL( "INSERT INTO nodes (parent_id, data) VALUES " "(:parent_id, :data)", lambda ctx:{'parent_id':n1.id, 'data':'n11'} ), CompiledSQL( "INSERT INTO nodes (parent_id, data) VALUES " "(:parent_id, :data)", lambda ctx:{'parent_id':n1.id, 'data':'n12'} ), CompiledSQL( "INSERT INTO nodes (parent_id, data) VALUES " "(:parent_id, :data)", lambda ctx:{'parent_id':n1.id, 'data':'n13'} ), CompiledSQL( "INSERT INTO nodes (parent_id, data) VALUES " "(:parent_id, :data)", lambda ctx:{'parent_id':n12.id, 'data':'n121'} ), CompiledSQL( "INSERT INTO nodes (parent_id, data) VALUES " "(:parent_id, :data)", lambda ctx:{'parent_id':n12.id, 'data':'n122'} ), CompiledSQL( "INSERT INTO nodes (parent_id, data) VALUES " "(:parent_id, :data)", lambda ctx:{'parent_id':n12.id, 'data':'n123'} ), ) def test_singlecycle_flush_size(self): Node, nodes = self.classes.Node, self.tables.nodes mapper(Node, nodes, properties={ 'children':relationship(Node) }) sess = create_session() n1 = Node(data='ed') sess.add(n1) self._assert_uow_size(sess, 2) sess.flush() n1.data='jack' self._assert_uow_size(sess, 2) sess.flush() n2 = Node(data='foo') sess.add(n2) sess.flush() n1.children.append(n2) self._assert_uow_size(sess, 3) sess.flush() sess = create_session() n1 = sess.query(Node).first() n1.data='ed' self._assert_uow_size(sess, 2) n1.children self._assert_uow_size(sess, 2) def test_delete_unloaded_m2o(self): Node, nodes = self.classes.Node, self.tables.nodes mapper(Node, nodes, properties={ 'parent':relationship(Node, remote_side=nodes.c.id) }) parent = Node() c1, c2 = Node(parent=parent), Node(parent=parent) session = Session() session.add_all([c1, c2]) session.add(parent) session.flush() pid = parent.id c1id = c1.id c2id = c2.id session.expire(parent) session.expire(c1) session.expire(c2) session.delete(c1) session.delete(c2) session.delete(parent) # testing that relationships # are loaded even if all ids/references are # expired self.assert_sql_execution( testing.db, session.flush, AllOf( # ensure all three m2os are loaded. # the selects here are in fact unexpiring # each row - the m2o comes from the identity map. CompiledSQL( "SELECT nodes.id AS nodes_id, nodes.parent_id AS " "nodes_parent_id, " "nodes.data AS nodes_data FROM nodes " "WHERE nodes.id = :param_1", lambda ctx: {'param_1': pid} ), CompiledSQL( "SELECT nodes.id AS nodes_id, nodes.parent_id AS " "nodes_parent_id, " "nodes.data AS nodes_data FROM nodes " "WHERE nodes.id = :param_1", lambda ctx: {'param_1': c1id} ), CompiledSQL( "SELECT nodes.id AS nodes_id, nodes.parent_id AS " "nodes_parent_id, " "nodes.data AS nodes_data FROM nodes " "WHERE nodes.id = :param_1", lambda ctx: {'param_1': c2id} ), CompiledSQL( "DELETE FROM nodes WHERE nodes.id = :id", lambda ctx: [{'id': c1id}, {'id': c2id}] ), CompiledSQL( "DELETE FROM nodes WHERE nodes.id = :id", lambda ctx: {'id': pid} ), ), ) class SingleCyclePlusAttributeTest(fixtures.MappedTest, testing.AssertsExecutionResults, AssertsUOW): @classmethod def define_tables(cls, metadata): Table('nodes', metadata, Column('id', Integer, primary_key=True, test_needs_autoincrement=True), Column('parent_id', Integer, ForeignKey('nodes.id')), Column('data', String(30)) ) Table('foobars', metadata, Column('id', Integer, primary_key=True, test_needs_autoincrement=True), Column('parent_id', Integer, ForeignKey('nodes.id')), ) def test_flush_size(self): foobars, nodes = self.tables.foobars, self.tables.nodes class Node(fixtures.ComparableEntity): pass class FooBar(fixtures.ComparableEntity): pass mapper(Node, nodes, properties={ 'children':relationship(Node), 'foobars':relationship(FooBar) }) mapper(FooBar, foobars) sess = create_session() n1 = Node(data='n1') n2 = Node(data='n2') n1.children.append(n2) sess.add(n1) # ensure "foobars" doesn't get yanked in here self._assert_uow_size(sess, 3) n1.foobars.append(FooBar()) # saveupdateall/deleteall for FooBar added here, # plus processstate node.foobars # currently the "all" procs stay in pairs self._assert_uow_size(sess, 6) sess.flush() class SingleCycleM2MTest(fixtures.MappedTest, testing.AssertsExecutionResults, AssertsUOW): @classmethod def define_tables(cls, metadata): nodes = Table('nodes', metadata, Column('id', Integer, primary_key=True, test_needs_autoincrement=True), Column('data', String(30)), Column('favorite_node_id', Integer, ForeignKey('nodes.id')) ) node_to_nodes =Table('node_to_nodes', metadata, Column('left_node_id', Integer, ForeignKey('nodes.id'),primary_key=True), Column('right_node_id', Integer, ForeignKey('nodes.id'),primary_key=True), ) def test_many_to_many_one(self): nodes, node_to_nodes = self.tables.nodes, self.tables.node_to_nodes class Node(fixtures.ComparableEntity): pass mapper(Node, nodes, properties={ 'children':relationship(Node, secondary=node_to_nodes, primaryjoin=nodes.c.id==node_to_nodes.c.left_node_id, secondaryjoin=nodes.c.id==node_to_nodes.c.right_node_id, backref='parents' ), 'favorite':relationship(Node, remote_side=nodes.c.id) }) sess = create_session() n1 = Node(data='n1') n2 = Node(data='n2') n3 = Node(data='n3') n4 = Node(data='n4') n5 = Node(data='n5') n4.favorite = n3 n1.favorite = n5 n5.favorite = n2 n1.children = [n2, n3, n4] n2.children = [n3, n5] n3.children = [n5, n4] sess.add_all([n1, n2, n3, n4, n5]) # can't really assert the SQL on this easily # since there's too many ways to insert the rows. # so check the end result sess.flush() eq_( sess.query(node_to_nodes.c.left_node_id, node_to_nodes.c.right_node_id).\ order_by(node_to_nodes.c.left_node_id, node_to_nodes.c.right_node_id).\ all(), sorted([ (n1.id, n2.id), (n1.id, n3.id), (n1.id, n4.id), (n2.id, n3.id), (n2.id, n5.id), (n3.id, n5.id), (n3.id, n4.id) ]) ) sess.delete(n1) self.assert_sql_execution( testing.db, sess.flush, # this is n1.parents firing off, as it should, since # passive_deletes is False for n1.parents CompiledSQL( "SELECT nodes.id AS nodes_id, nodes.data AS nodes_data, " "nodes.favorite_node_id AS nodes_favorite_node_id FROM " "nodes, node_to_nodes WHERE :param_1 = " "node_to_nodes.right_node_id AND nodes.id = " "node_to_nodes.left_node_id" , lambda ctx:{u'param_1': n1.id}, ), CompiledSQL( "DELETE FROM node_to_nodes WHERE " "node_to_nodes.left_node_id = :left_node_id AND " "node_to_nodes.right_node_id = :right_node_id", lambda ctx:[ {'right_node_id': n2.id, 'left_node_id': n1.id}, {'right_node_id': n3.id, 'left_node_id': n1.id}, {'right_node_id': n4.id, 'left_node_id': n1.id} ] ), CompiledSQL( "DELETE FROM nodes WHERE nodes.id = :id", lambda ctx:{'id': n1.id} ), ) for n in [n2, n3, n4, n5]: sess.delete(n) # load these collections # outside of the flush() below n4.children n5.children self.assert_sql_execution( testing.db, sess.flush, CompiledSQL( "DELETE FROM node_to_nodes WHERE node_to_nodes.left_node_id " "= :left_node_id AND node_to_nodes.right_node_id = " ":right_node_id", lambda ctx:[ {'right_node_id': n5.id, 'left_node_id': n3.id}, {'right_node_id': n4.id, 'left_node_id': n3.id}, {'right_node_id': n3.id, 'left_node_id': n2.id}, {'right_node_id': n5.id, 'left_node_id': n2.id} ] ), CompiledSQL( "DELETE FROM nodes WHERE nodes.id = :id", lambda ctx:[{'id': n4.id}, {'id': n5.id}] ), CompiledSQL( "DELETE FROM nodes WHERE nodes.id = :id", lambda ctx:[{'id': n2.id}, {'id': n3.id}] ), ) class RowswitchAccountingTest(fixtures.MappedTest): @classmethod def define_tables(cls, metadata): Table('parent', metadata, Column('id', Integer, primary_key=True) ) Table('child', metadata, Column('id', Integer, ForeignKey('parent.id'), primary_key=True) ) def test_accounting_for_rowswitch(self): parent, child = self.tables.parent, self.tables.child class Parent(object): def __init__(self, id): self.id = id self.child = Child() class Child(object): pass mapper(Parent, parent, properties={ 'child':relationship(Child, uselist=False, cascade="all, delete-orphan", backref="parent") }) mapper(Child, child) sess = create_session(autocommit=False) p1 = Parent(1) sess.add(p1) sess.commit() sess.close() p2 = Parent(1) p3 = sess.merge(p2) old = attributes.get_history(p3, 'child')[2][0] assert old in sess sess.flush() assert p3.child._sa_instance_state.session_id == sess.hash_key assert p3.child in sess p4 = Parent(1) p5 = sess.merge(p4) old = attributes.get_history(p5, 'child')[2][0] assert old in sess sess.flush() class BatchInsertsTest(fixtures.MappedTest, testing.AssertsExecutionResults): @classmethod def define_tables(cls, metadata): Table('t', metadata, Column('id', Integer, primary_key=True, test_needs_autoincrement=True), Column('data', String(50)), Column('def_', String(50), server_default='def1') ) def test_batch_interaction(self): """test batching groups same-structured, primary key present statements together. """ t = self.tables.t class T(fixtures.ComparableEntity): pass mapper(T, t) sess = Session() sess.add_all([ T(data='t1'), T(data='t2'), T(id=3, data='t3'), T(id=4, data='t4'), T(id=5, data='t5'), T(id=6, data=func.lower('t6')), T(id=7, data='t7'), T(id=8, data='t8'), T(id=9, data='t9', def_='def2'), T(id=10, data='t10', def_='def3'), T(id=11, data='t11'), ]) self.assert_sql_execution( testing.db, sess.flush, CompiledSQL( "INSERT INTO t (data) VALUES (:data)", {'data': 't1'} ), CompiledSQL( "INSERT INTO t (data) VALUES (:data)", {'data': 't2'} ), CompiledSQL( "INSERT INTO t (id, data) VALUES (:id, :data)", [{'data': 't3', 'id': 3}, {'data': 't4', 'id': 4}, {'data': 't5', 'id': 5}] ), CompiledSQL( "INSERT INTO t (id, data) VALUES (:id, lower(:lower_1))", {'lower_1': 't6', 'id': 6} ), CompiledSQL( "INSERT INTO t (id, data) VALUES (:id, :data)", [{'data': 't7', 'id': 7}, {'data': 't8', 'id': 8}] ), CompiledSQL( "INSERT INTO t (id, data, def_) VALUES (:id, :data, :def_)", [{'data': 't9', 'id': 9, 'def_':'def2'}, {'data': 't10', 'id': 10, 'def_':'def3'}] ), CompiledSQL( "INSERT INTO t (id, data) VALUES (:id, :data)", {'data': 't11', 'id': 11} ), ) class LoadersUsingCommittedTest(UOWTest): """Test that events which occur within a flush() get the same attribute loading behavior as on the outside of the flush, and that the unit of work itself uses the "committed" version of primary/foreign key attributes when loading a collection for historical purposes (this typically has importance for when primary key values change). """ def _mapper_setup(self, passive_updates=True): users, Address, addresses, User = (self.tables.users, self.classes.Address, self.tables.addresses, self.classes.User) mapper(User, users, properties={ 'addresses': relationship(Address, order_by=addresses.c.email_address, passive_updates=passive_updates, backref='user') }) mapper(Address, addresses) return create_session(autocommit=False) def test_before_update_m2o(self): """Expect normal many to one attribute load behavior (should not get committed value) from within public 'before_update' event""" sess = self._mapper_setup() Address, User = self.classes.Address, self.classes.User def before_update(mapper, connection, target): # if get committed is used to find target.user, then # it will be still be u1 instead of u2 assert target.user.id == target.user_id == u2.id from sqlalchemy import event event.listen(Address, 'before_update', before_update) a1 = Address(email_address='a1') u1 = User(name='u1', addresses=[a1]) sess.add(u1) u2 = User(name='u2') sess.add(u2) sess.commit() sess.expunge_all() # lookup an address and move it to the other user a1 = sess.query(Address).get(a1.id) # move address to another user's fk assert a1.user_id == u1.id a1.user_id = u2.id sess.flush() def test_before_update_o2m_passive(self): """Expect normal one to many attribute load behavior (should not get committed value) from within public 'before_update' event""" self._test_before_update_o2m(True) def test_before_update_o2m_notpassive(self): """Expect normal one to many attribute load behavior (should not get committed value) from within public 'before_update' event with passive_updates=False """ self._test_before_update_o2m(False) def _test_before_update_o2m(self, passive_updates): sess = self._mapper_setup(passive_updates=passive_updates) Address, User = self.classes.Address, self.classes.User class AvoidReferencialError(Exception): """the test here would require ON UPDATE CASCADE on FKs for the flush to fully succeed; this exception is used to cancel the flush before we get that far. """ def before_update(mapper, connection, target): if passive_updates: # we shouldn't be using committed value. # so, having switched target's primary key, # we expect no related items in the collection # since we are using passive_updates # this is a behavior change since #2350 assert 'addresses' not in target.__dict__ eq_(target.addresses, []) else: # in contrast with passive_updates=True, # here we expect the orm to have looked up the addresses # with the committed value (it needs to in order to # update the foreign keys). So we expect addresses # collection to move with the user, # (just like they will be after the update) # collection is already loaded assert 'addresses' in target.__dict__ eq_([a.id for a in target.addresses], [a.id for a in [a1, a2]]) raise AvoidReferencialError() from sqlalchemy import event event.listen(User, 'before_update', before_update) a1 = Address(email_address='jack1') a2 = Address(email_address='jack2') u1 = User(id=1, name='jack', addresses=[a1, a2]) sess.add(u1) sess.commit() sess.expunge_all() u1 = sess.query(User).get(u1.id) u1.id = 2 try: sess.flush() except AvoidReferencialError: pass SQLAlchemy-0.8.4/test/orm/test_update_delete.py0000644000076500000240000006233012251150016022217 0ustar classicstaff00000000000000from sqlalchemy.testing import eq_, assert_raises, assert_raises_message from sqlalchemy.testing import fixtures from sqlalchemy import Integer, String, ForeignKey, or_, and_, exc, \ select, func, Boolean, case from sqlalchemy.orm import mapper, relationship, backref, Session, \ joinedload, aliased from sqlalchemy import testing from sqlalchemy.testing.schema import Table, Column class UpdateDeleteTest(fixtures.MappedTest): @classmethod def define_tables(cls, metadata): Table('users', metadata, Column('id', Integer, primary_key=True, test_needs_autoincrement=True), Column('name', String(32)), Column('age', Integer)) @classmethod def setup_classes(cls): class User(cls.Comparable): pass @classmethod def insert_data(cls): users = cls.tables.users users.insert().execute([ dict(id=1, name='john', age=25), dict(id=2, name='jack', age=47), dict(id=3, name='jill', age=29), dict(id=4, name='jane', age=37), ]) @classmethod def setup_mappers(cls): User = cls.classes.User users = cls.tables.users mapper(User, users) def test_illegal_eval(self): User = self.classes.User s = Session() assert_raises_message( exc.ArgumentError, "Valid strategies for session synchronization " "are 'evaluate', 'fetch', False", s.query(User).update, {}, synchronize_session="fake" ) def test_illegal_operations(self): User = self.classes.User s = Session() for q, mname in ( (s.query(User).limit(2), "limit"), (s.query(User).offset(2), "offset"), (s.query(User).limit(2).offset(2), "limit"), (s.query(User).order_by(User.id), "order_by"), (s.query(User).group_by(User.id), "group_by"), (s.query(User).distinct(), "distinct") ): assert_raises_message( exc.InvalidRequestError, r"Can't call Query.update\(\) when %s\(\) has been called" % mname, q.update, {'name':'ed'}) assert_raises_message( exc.InvalidRequestError, r"Can't call Query.delete\(\) when %s\(\) has been called" % mname, q.delete) def test_delete(self): User = self.classes.User sess = Session() john,jack,jill,jane = sess.query(User).order_by(User.id).all() sess.query(User).filter(or_(User.name == 'john', User.name == 'jill')).delete() assert john not in sess and jill not in sess eq_(sess.query(User).order_by(User.id).all(), [jack,jane]) def test_delete_against_metadata(self): User = self.classes.User users = self.tables.users sess = Session() sess.query(users).delete(synchronize_session=False) eq_(sess.query(User).count(), 0) def test_delete_with_bindparams(self): User = self.classes.User sess = Session() john,jack,jill,jane = sess.query(User).order_by(User.id).all() sess.query(User).filter('name = :name').params(name='john').delete('fetch') assert john not in sess eq_(sess.query(User).order_by(User.id).all(), [jack,jill,jane]) def test_delete_rollback(self): User = self.classes.User sess = Session() john,jack,jill,jane = sess.query(User).order_by(User.id).all() sess.query(User).filter(or_(User.name == 'john', User.name == 'jill')).\ delete(synchronize_session='evaluate') assert john not in sess and jill not in sess sess.rollback() assert john in sess and jill in sess def test_delete_rollback_with_fetch(self): User = self.classes.User sess = Session() john,jack,jill,jane = sess.query(User).order_by(User.id).all() sess.query(User).filter(or_(User.name == 'john', User.name == 'jill')).\ delete(synchronize_session='fetch') assert john not in sess and jill not in sess sess.rollback() assert john in sess and jill in sess def test_delete_without_session_sync(self): User = self.classes.User sess = Session() john,jack,jill,jane = sess.query(User).order_by(User.id).all() sess.query(User).filter(or_(User.name == 'john', User.name == 'jill')).\ delete(synchronize_session=False) assert john in sess and jill in sess eq_(sess.query(User).order_by(User.id).all(), [jack,jane]) def test_delete_with_fetch_strategy(self): User = self.classes.User sess = Session() john,jack,jill,jane = sess.query(User).order_by(User.id).all() sess.query(User).filter(or_(User.name == 'john', User.name == 'jill')).\ delete(synchronize_session='fetch') assert john not in sess and jill not in sess eq_(sess.query(User).order_by(User.id).all(), [jack,jane]) @testing.fails_on('mysql', 'FIXME: unknown') def test_delete_invalid_evaluation(self): User = self.classes.User sess = Session() john,jack,jill,jane = sess.query(User).order_by(User.id).all() assert_raises(exc.InvalidRequestError, sess.query(User). filter(User.name == select([func.max(User.name)])).delete, synchronize_session='evaluate' ) sess.query(User).filter(User.name == select([func.max(User.name)])).\ delete(synchronize_session='fetch') assert john not in sess eq_(sess.query(User).order_by(User.id).all(), [jack,jill,jane]) def test_update(self): User, users = self.classes.User, self.tables.users sess = Session() john,jack,jill,jane = sess.query(User).order_by(User.id).all() sess.query(User).filter(User.age > 29).\ update({'age': User.age - 10}, synchronize_session='evaluate') eq_([john.age, jack.age, jill.age, jane.age], [25,37,29,27]) eq_(sess.query(User.age).order_by(User.id).all(), zip([25,37,29,27])) sess.query(User).filter(User.age > 29).\ update({User.age: User.age - 10}, synchronize_session='evaluate') eq_([john.age, jack.age, jill.age, jane.age], [25,27,29,27]) eq_(sess.query(User.age).order_by(User.id).all(), zip([25,27,29,27])) sess.query(User).filter(User.age > 27).\ update({users.c.age: User.age - 10}, synchronize_session='evaluate') eq_([john.age, jack.age, jill.age, jane.age], [25,27,19,27]) eq_(sess.query(User.age).order_by(User.id).all(), zip([25,27,19,27])) sess.query(User).filter(User.age == 25).\ update({User.age: User.age - 10}, synchronize_session='fetch') eq_([john.age, jack.age, jill.age, jane.age], [15,27,19,27]) eq_(sess.query(User.age).order_by(User.id).all(), zip([15,27,19,27])) def test_update_against_metadata(self): User, users = self.classes.User, self.tables.users sess = Session() sess.query(users).update({users.c.age: 29}, synchronize_session=False) eq_(sess.query(User.age).order_by(User.id).all(), zip([29,29,29,29])) def test_update_with_bindparams(self): User = self.classes.User sess = Session() john,jack,jill,jane = sess.query(User).order_by(User.id).all() sess.query(User).filter('age > :x').params(x=29).\ update({'age': User.age - 10}, synchronize_session='fetch') eq_([john.age, jack.age, jill.age, jane.age], [25,37,29,27]) eq_(sess.query(User.age).order_by(User.id).all(), zip([25,37,29,27])) def test_update_without_load(self): User = self.classes.User sess = Session() sess.query(User).filter(User.id == 3).\ update({'age': 44}, synchronize_session='fetch') eq_(sess.query(User.age).order_by(User.id).all(), zip([25,47,44,37])) def test_update_changes_resets_dirty(self): User = self.classes.User sess = Session(autoflush=False) john,jack,jill,jane = sess.query(User).order_by(User.id).all() john.age = 50 jack.age = 37 # autoflush is false. therefore our '50' and '37' are getting # blown away by this operation. sess.query(User).filter(User.age > 29).\ update({'age': User.age - 10}, synchronize_session='evaluate') for x in (john, jack, jill, jane): assert not sess.is_modified(x) eq_([john.age, jack.age, jill.age, jane.age], [25,37,29,27]) john.age = 25 assert john in sess.dirty assert jack in sess.dirty assert jill not in sess.dirty assert not sess.is_modified(john) assert not sess.is_modified(jack) def test_update_changes_with_autoflush(self): User = self.classes.User sess = Session() john,jack,jill,jane = sess.query(User).order_by(User.id).all() john.age = 50 jack.age = 37 sess.query(User).filter(User.age > 29).\ update({'age': User.age - 10}, synchronize_session='evaluate') for x in (john, jack, jill, jane): assert not sess.is_modified(x) eq_([john.age, jack.age, jill.age, jane.age], [40, 27, 29, 27]) john.age = 25 assert john in sess.dirty assert jack not in sess.dirty assert jill not in sess.dirty assert sess.is_modified(john) assert not sess.is_modified(jack) def test_update_with_expire_strategy(self): User = self.classes.User sess = Session() john,jack,jill,jane = sess.query(User).order_by(User.id).all() sess.query(User).filter(User.age > 29).\ update({'age': User.age - 10}, synchronize_session='fetch') eq_([john.age, jack.age, jill.age, jane.age], [25,37,29,27]) eq_(sess.query(User.age).order_by(User.id).all(), zip([25,37,29,27])) @testing.fails_if(lambda: not testing.db.dialect.supports_sane_rowcount) def test_update_returns_rowcount(self): User = self.classes.User sess = Session() rowcount = sess.query(User).filter(User.age > 29).update({'age': User.age + 0}) eq_(rowcount, 2) rowcount = sess.query(User).filter(User.age > 29).update({'age': User.age - 10}) eq_(rowcount, 2) @testing.fails_if(lambda: not testing.db.dialect.supports_sane_rowcount) def test_delete_returns_rowcount(self): User = self.classes.User sess = Session() rowcount = sess.query(User).filter(User.age > 26).\ delete(synchronize_session=False) eq_(rowcount, 3) def test_update_all(self): User = self.classes.User sess = Session() john,jack,jill,jane = sess.query(User).order_by(User.id).all() sess.query(User).update({'age': 42}, synchronize_session='evaluate') eq_([john.age, jack.age, jill.age, jane.age], [42,42,42,42]) eq_(sess.query(User.age).order_by(User.id).all(), zip([42,42,42,42])) def test_delete_all(self): User = self.classes.User sess = Session() john,jack,jill,jane = sess.query(User).order_by(User.id).all() sess.query(User).delete(synchronize_session='evaluate') assert not (john in sess or jack in sess or jill in sess or jane in sess) eq_(sess.query(User).count(), 0) def test_autoflush_before_evaluate_update(self): User = self.classes.User sess = Session() john = sess.query(User).filter_by(name='john').one() john.name = 'j2' sess.query(User).filter_by(name='j2').\ update({'age':42}, synchronize_session='evaluate') eq_(john.age, 42) def test_autoflush_before_fetch_update(self): User = self.classes.User sess = Session() john = sess.query(User).filter_by(name='john').one() john.name = 'j2' sess.query(User).filter_by(name='j2').\ update({'age':42}, synchronize_session='fetch') eq_(john.age, 42) def test_autoflush_before_evaluate_delete(self): User = self.classes.User sess = Session() john = sess.query(User).filter_by(name='john').one() john.name = 'j2' sess.query(User).filter_by(name='j2').\ delete( synchronize_session='evaluate') assert john not in sess def test_autoflush_before_fetch_delete(self): User = self.classes.User sess = Session() john = sess.query(User).filter_by(name='john').one() john.name = 'j2' sess.query(User).filter_by(name='j2').\ delete( synchronize_session='fetch') assert john not in sess def test_evaluate_before_update(self): User = self.classes.User sess = Session() john = sess.query(User).filter_by(name='john').one() sess.expire(john, ['age']) # eval must be before the update. otherwise # we eval john, age has been expired and doesn't # match the new value coming in sess.query(User).filter_by(name='john').filter_by(age=25).\ update({'name':'j2', 'age':40}, synchronize_session='evaluate') eq_(john.name, 'j2') eq_(john.age, 40) def test_fetch_before_update(self): User = self.classes.User sess = Session() john = sess.query(User).filter_by(name='john').one() sess.expire(john, ['age']) sess.query(User).filter_by(name='john').filter_by(age=25).\ update({'name':'j2', 'age':40}, synchronize_session='fetch') eq_(john.name, 'j2') eq_(john.age, 40) def test_evaluate_before_delete(self): User = self.classes.User sess = Session() john = sess.query(User).filter_by(name='john').one() sess.expire(john, ['age']) sess.query(User).filter_by(name='john').\ filter_by(age=25).\ delete( synchronize_session='evaluate') assert john not in sess def test_fetch_before_delete(self): User = self.classes.User sess = Session() john = sess.query(User).filter_by(name='john').one() sess.expire(john, ['age']) sess.query(User).filter_by(name='john').\ filter_by(age=25).\ delete( synchronize_session='fetch') assert john not in sess class UpdateDeleteIgnoresLoadersTest(fixtures.MappedTest): @classmethod def define_tables(cls, metadata): Table('users', metadata, Column('id', Integer, primary_key=True, test_needs_autoincrement=True), Column('name', String(32)), Column('age', Integer)) Table('documents', metadata, Column('id', Integer, primary_key=True, test_needs_autoincrement=True), Column('user_id', None, ForeignKey('users.id')), Column('title', String(32))) @classmethod def setup_classes(cls): class User(cls.Comparable): pass class Document(cls.Comparable): pass @classmethod def insert_data(cls): users = cls.tables.users users.insert().execute([ dict(id=1, name='john', age=25), dict(id=2, name='jack', age=47), dict(id=3, name='jill', age=29), dict(id=4, name='jane', age=37), ]) documents = cls.tables.documents documents.insert().execute([ dict(id=1, user_id=1, title='foo'), dict(id=2, user_id=1, title='bar'), dict(id=3, user_id=2, title='baz'), ]) @classmethod def setup_mappers(cls): documents, Document, User, users = (cls.tables.documents, cls.classes.Document, cls.classes.User, cls.tables.users) mapper(User, users) mapper(Document, documents, properties={ 'user': relationship(User, lazy='joined', backref=backref('documents', lazy='select')) }) def test_update_with_eager_relationships(self): Document = self.classes.Document sess = Session() foo,bar,baz = sess.query(Document).order_by(Document.id).all() sess.query(Document).filter(Document.user_id == 1).\ update({'title': Document.title+Document.title}, synchronize_session='fetch') eq_([foo.title, bar.title, baz.title], ['foofoo','barbar', 'baz']) eq_(sess.query(Document.title).order_by(Document.id).all(), zip(['foofoo','barbar', 'baz'])) def test_update_with_explicit_joinedload(self): User = self.classes.User sess = Session() john,jack,jill,jane = sess.query(User).order_by(User.id).all() sess.query(User).options(joinedload(User.documents)).filter(User.age > 29).\ update({'age': User.age - 10}, synchronize_session='fetch') eq_([john.age, jack.age, jill.age, jane.age], [25,37,29,27]) eq_(sess.query(User.age).order_by(User.id).all(), zip([25,37,29,27])) def test_delete_with_eager_relationships(self): Document = self.classes.Document sess = Session() sess.query(Document).filter(Document.user_id == 1).\ delete(synchronize_session=False) eq_(sess.query(Document.title).all(), zip(['baz'])) class UpdateDeleteFromTest(fixtures.MappedTest): @classmethod def define_tables(cls, metadata): Table('users', metadata, Column('id', Integer, primary_key=True), ) Table('documents', metadata, Column('id', Integer, primary_key=True), Column('user_id', None, ForeignKey('users.id')), Column('title', String(32)), Column('flag', Boolean) ) @classmethod def setup_classes(cls): class User(cls.Comparable): pass class Document(cls.Comparable): pass @classmethod def insert_data(cls): users = cls.tables.users users.insert().execute([ dict(id=1, ), dict(id=2, ), dict(id=3, ), dict(id=4, ), ]) documents = cls.tables.documents documents.insert().execute([ dict(id=1, user_id=1, title='foo'), dict(id=2, user_id=1, title='bar'), dict(id=3, user_id=2, title='baz'), dict(id=4, user_id=2, title='hoho'), dict(id=5, user_id=3, title='lala'), dict(id=6, user_id=3, title='bleh'), ]) @classmethod def setup_mappers(cls): documents, Document, User, users = (cls.tables.documents, cls.classes.Document, cls.classes.User, cls.tables.users) mapper(User, users) mapper(Document, documents, properties={ 'user': relationship(User, backref='documents') }) @testing.requires.update_from def test_update_from_joined_subq_test(self): Document = self.classes.Document s = Session() subq = s.query(func.max(Document.title).label('title')).\ group_by(Document.user_id).subquery() s.query(Document).filter(Document.title == subq.c.title).\ update({'flag': True}, synchronize_session=False) eq_( set(s.query(Document.id, Document.flag)), set([ (1, True), (2, None), (3, None), (4, True), (5, True), (6, None), ]) ) @testing.requires.update_where_target_in_subquery def test_update_using_in(self): Document = self.classes.Document s = Session() subq = s.query(func.max(Document.title).label('title')).\ group_by(Document.user_id).subquery() s.query(Document).filter(Document.title.in_(subq)).\ update({'flag': True}, synchronize_session=False) eq_( set(s.query(Document.id, Document.flag)), set([ (1, True), (2, None), (3, None), (4, True), (5, True), (6, None), ]) ) @testing.requires.update_where_target_in_subquery @testing.requires.standalone_binds def test_update_using_case(self): Document = self.classes.Document s = Session() subq = s.query(func.max(Document.title).label('title')).\ group_by(Document.user_id).subquery() # this would work with Firebird if you do literal_column('1') # instead case_stmt = case([(Document.title.in_(subq), True)], else_=False) s.query(Document).update({'flag': case_stmt}, synchronize_session=False) eq_( set(s.query(Document.id, Document.flag)), set([ (1, True), (2, False), (3, False), (4, True), (5, True), (6, False), ]) ) class ExpressionUpdateTest(fixtures.MappedTest): @classmethod def define_tables(cls, metadata): data = Table('data', metadata, Column('id', Integer, primary_key=True, test_needs_autoincrement=True), Column('counter', Integer, nullable=False, default=0) ) @classmethod def setup_classes(cls): class Data(cls.Comparable): pass @classmethod def setup_mappers(cls): data = cls.tables.data mapper(cls.classes.Data, data, properties={'cnt':data.c.counter}) @testing.provide_metadata def test_update_attr_names(self): Data = self.classes.Data d1 = Data() sess = Session() sess.add(d1) sess.commit() eq_(d1.cnt, 0) sess.query(Data).update({Data.cnt:Data.cnt + 1}) sess.flush() eq_(d1.cnt, 1) sess.query(Data).update({Data.cnt:Data.cnt + 1}, 'fetch') sess.flush() eq_(d1.cnt, 2) sess.close() class InheritTest(fixtures.DeclarativeMappedTest): run_inserts = 'each' run_deletes = 'each' @classmethod def setup_classes(cls): Base = cls.DeclarativeBasic class Person(Base): __tablename__ = 'person' id = Column(Integer, primary_key=True, test_needs_autoincrement=True) type = Column(String(50)) name = Column(String(50)) class Engineer(Person): __tablename__ = 'engineer' id = Column(Integer, ForeignKey('person.id'), primary_key=True) engineer_name = Column(String(50)) class Manager(Person): __tablename__ = 'manager' id = Column(Integer, ForeignKey('person.id'), primary_key=True) manager_name = Column(String(50)) @classmethod def insert_data(cls): Engineer, Person, Manager = cls.classes.Engineer, \ cls.classes.Person, cls.classes.Manager s = Session(testing.db) s.add_all([ Engineer(name='e1', engineer_name='e1'), Manager(name='m1', manager_name='m1'), Engineer(name='e2', engineer_name='e2'), Person(name='p1'), ]) s.commit() def test_illegal_metadata(self): person = self.classes.Person.__table__ engineer = self.classes.Engineer.__table__ sess = Session() assert_raises_message( exc.InvalidRequestError, "This operation requires only one Table or entity be " "specified as the target.", sess.query(person.join(engineer)).update, {} ) def test_update_subtable_only(self): Engineer = self.classes.Engineer s = Session(testing.db) s.query(Engineer).update({'engineer_name': 'e5'}) eq_( s.query(Engineer.engineer_name).all(), [('e5', ), ('e5', )] ) @testing.requires.update_from def test_update_from(self): Engineer = self.classes.Engineer Person = self.classes.Person s = Session(testing.db) s.query(Engineer).filter(Engineer.id == Person.id).\ filter(Person.name == 'e2').update({'engineer_name': 'e5'}) eq_( set(s.query(Person.name, Engineer.engineer_name)), set([('e1', 'e1', ), ('e2', 'e5')]) ) @testing.only_on('mysql', 'Multi table update') def test_update_from_multitable(self): Engineer = self.classes.Engineer Person = self.classes.Person s = Session(testing.db) s.query(Engineer).filter(Engineer.id == Person.id).\ filter(Person.name == 'e2').update({Person.name: 'e22', Engineer.engineer_name: 'e55'}) eq_( set(s.query(Person.name, Engineer.engineer_name)), set([('e1', 'e1', ), ('e22', 'e55')]) ) SQLAlchemy-0.8.4/test/orm/test_utils.py0000644000076500000240000005210712251150016020554 0ustar classicstaff00000000000000from sqlalchemy.testing import assert_raises, assert_raises_message from sqlalchemy.orm import util from sqlalchemy import Column from sqlalchemy import Integer from sqlalchemy import MetaData from sqlalchemy import Table from sqlalchemy.orm import aliased, with_polymorphic from sqlalchemy.orm import mapper, create_session from sqlalchemy.testing import fixtures from test.orm import _fixtures from sqlalchemy.testing import eq_, is_ from sqlalchemy.orm.util import PathRegistry from sqlalchemy import inspect class AliasedClassTest(fixtures.TestBase): def point_map(self, cls): table = Table('point', MetaData(), Column('id', Integer(), primary_key=True), Column('x', Integer), Column('y', Integer)) mapper(cls, table) return table def test_simple(self): class Point(object): pass table = self.point_map(Point) alias = aliased(Point) assert alias.id assert alias.x assert alias.y assert Point.id.__clause_element__().table is table assert alias.id.__clause_element__().table is not table def test_notcallable(self): class Point(object): pass table = self.point_map(Point) alias = aliased(Point) assert_raises(TypeError, alias) def test_instancemethods(self): class Point(object): def zero(self): self.x, self.y = 0, 0 table = self.point_map(Point) alias = aliased(Point) assert Point.zero # Py2K # TODO: what is this testing ?? assert not getattr(alias, 'zero') # end Py2K def test_classmethods(self): class Point(object): @classmethod def max_x(cls): return 100 table = self.point_map(Point) alias = aliased(Point) assert Point.max_x assert alias.max_x assert Point.max_x() == alias.max_x() def test_simpleproperties(self): class Point(object): @property def max_x(self): return 100 table = self.point_map(Point) alias = aliased(Point) assert Point.max_x assert Point.max_x != 100 assert alias.max_x assert Point.max_x is alias.max_x def test_descriptors(self): """Tortured...""" class descriptor(object): def __init__(self, fn): self.fn = fn def __get__(self, obj, owner): if obj is not None: return self.fn(obj, obj) else: return self def method(self): return 'method' class Point(object): center = (0, 0) @descriptor def thing(self, arg): return arg.center table = self.point_map(Point) alias = aliased(Point) assert Point.thing != (0, 0) assert Point().thing == (0, 0) assert Point.thing.method() == 'method' assert alias.thing != (0, 0) assert alias.thing.method() == 'method' def test_hybrid_descriptors(self): from sqlalchemy import Column # override testlib's override import types class MethodDescriptor(object): def __init__(self, func): self.func = func def __get__(self, instance, owner): if instance is None: # Py3K #args = (self.func, owner) # Py2K args = (self.func, owner, owner.__class__) # end Py2K else: # Py3K #args = (self.func, instance) # Py2K args = (self.func, instance, owner) # end Py2K return types.MethodType(*args) class PropertyDescriptor(object): def __init__(self, fget, fset, fdel): self.fget = fget self.fset = fset self.fdel = fdel def __get__(self, instance, owner): if instance is None: return self.fget(owner) else: return self.fget(instance) def __set__(self, instance, value): self.fset(instance, value) def __delete__(self, instance): self.fdel(instance) hybrid = MethodDescriptor def hybrid_property(fget, fset=None, fdel=None): return PropertyDescriptor(fget, fset, fdel) def assert_table(expr, table): for child in expr.get_children(): if isinstance(child, Column): assert child.table is table class Point(object): def __init__(self, x, y): self.x, self.y = x, y @hybrid def left_of(self, other): return self.x < other.x double_x = hybrid_property(lambda self: self.x * 2) table = self.point_map(Point) alias = aliased(Point) alias_table = alias.x.__clause_element__().table assert table is not alias_table p1 = Point(-10, -10) p2 = Point(20, 20) assert p1.left_of(p2) assert p1.double_x == -20 assert_table(Point.double_x, table) assert_table(alias.double_x, alias_table) assert_table(Point.left_of(p2), table) assert_table(alias.left_of(p2), alias_table) class IdentityKeyTest(_fixtures.FixtureTest): run_inserts = None def test_identity_key_1(self): User, users = self.classes.User, self.tables.users mapper(User, users) key = util.identity_key(User, [1]) eq_(key, (User, (1,))) key = util.identity_key(User, ident=[1]) eq_(key, (User, (1,))) def test_identity_key_scalar(self): User, users = self.classes.User, self.tables.users mapper(User, users) key = util.identity_key(User, 1) eq_(key, (User, (1,))) key = util.identity_key(User, ident=1) eq_(key, (User, (1,))) def test_identity_key_2(self): users, User = self.tables.users, self.classes.User mapper(User, users) s = create_session() u = User(name='u1') s.add(u) s.flush() key = util.identity_key(instance=u) eq_(key, (User, (u.id,))) def test_identity_key_3(self): User, users = self.classes.User, self.tables.users mapper(User, users) row = {users.c.id: 1, users.c.name: "Frank"} key = util.identity_key(User, row=row) eq_(key, (User, (1,))) class PathRegistryTest(_fixtures.FixtureTest): run_setup_mappers = 'once' run_inserts = None run_deletes = None @classmethod def setup_mappers(cls): cls._setup_stock_mapping() def test_root_registry(self): umapper = inspect(self.classes.User) is_( util.RootRegistry()[umapper], umapper._path_registry ) eq_( util.RootRegistry()[umapper], util.PathRegistry.coerce((umapper,)) ) def test_expand(self): umapper = inspect(self.classes.User) amapper = inspect(self.classes.Address) path = PathRegistry.coerce((umapper,)) eq_( path[umapper.attrs.addresses][amapper] [amapper.attrs.email_address], PathRegistry.coerce((umapper, umapper.attrs.addresses, amapper, amapper.attrs.email_address)) ) def test_entity_boolean(self): umapper = inspect(self.classes.User) path = PathRegistry.coerce((umapper,)) is_(bool(path), True) def test_key_boolean(self): umapper = inspect(self.classes.User) path = PathRegistry.coerce((umapper, umapper.attrs.addresses)) is_(bool(path), True) def test_aliased_class(self): User = self.classes.User ua = aliased(User) ua_insp = inspect(ua) path = PathRegistry.coerce((ua_insp, ua_insp.mapper.attrs.addresses)) assert path.parent.is_aliased_class def test_indexed_entity(self): umapper = inspect(self.classes.User) amapper = inspect(self.classes.Address) path = PathRegistry.coerce((umapper, umapper.attrs.addresses, amapper, amapper.attrs.email_address)) is_(path[0], umapper) is_(path[2], amapper) def test_indexed_key(self): umapper = inspect(self.classes.User) amapper = inspect(self.classes.Address) path = PathRegistry.coerce((umapper, umapper.attrs.addresses, amapper, amapper.attrs.email_address)) eq_(path[1], umapper.attrs.addresses) eq_(path[3], amapper.attrs.email_address) def test_slice(self): umapper = inspect(self.classes.User) amapper = inspect(self.classes.Address) path = PathRegistry.coerce((umapper, umapper.attrs.addresses, amapper, amapper.attrs.email_address)) eq_(path[1:3], (umapper.attrs.addresses, amapper)) def test_addition(self): umapper = inspect(self.classes.User) amapper = inspect(self.classes.Address) p1 = PathRegistry.coerce((umapper, umapper.attrs.addresses)) p2 = PathRegistry.coerce((amapper, amapper.attrs.email_address)) eq_( p1 + p2, PathRegistry.coerce((umapper, umapper.attrs.addresses, amapper, amapper.attrs.email_address)) ) def test_length(self): umapper = inspect(self.classes.User) amapper = inspect(self.classes.Address) pneg1 = PathRegistry.coerce(()) p0 = PathRegistry.coerce((umapper,)) p1 = PathRegistry.coerce((umapper, umapper.attrs.addresses)) p2 = PathRegistry.coerce((umapper, umapper.attrs.addresses, amapper)) p3 = PathRegistry.coerce((umapper, umapper.attrs.addresses, amapper, amapper.attrs.email_address)) eq_(len(pneg1), 0) eq_(len(p0), 1) eq_(len(p1), 2) eq_(len(p2), 3) eq_(len(p3), 4) eq_(pneg1.length, 0) eq_(p0.length, 1) eq_(p1.length, 2) eq_(p2.length, 3) eq_(p3.length, 4) def test_eq(self): umapper = inspect(self.classes.User) amapper = inspect(self.classes.Address) u_alias = inspect(aliased(self.classes.User)) p1 = PathRegistry.coerce((umapper, umapper.attrs.addresses)) p2 = PathRegistry.coerce((umapper, umapper.attrs.addresses)) p3 = PathRegistry.coerce((umapper, umapper.attrs.name)) p4 = PathRegistry.coerce((u_alias, umapper.attrs.addresses)) p5 = PathRegistry.coerce((umapper, umapper.attrs.addresses, amapper)) p6 = PathRegistry.coerce((amapper, amapper.attrs.user, umapper, umapper.attrs.addresses)) p7 = PathRegistry.coerce((amapper, amapper.attrs.user, umapper, umapper.attrs.addresses, amapper, amapper.attrs.email_address)) is_(p1 == p2, True) is_(p1 == p3, False) is_(p1 == p4, False) is_(p1 == p5, False) is_(p6 == p7, False) is_(p6 == p7.parent.parent, True) is_(p1 != p2, False) is_(p1 != p3, True) is_(p1 != p4, True) is_(p1 != p5, True) def test_contains_mapper(self): umapper = inspect(self.classes.User) amapper = inspect(self.classes.Address) p1 = PathRegistry.coerce((umapper, umapper.attrs.addresses)) assert p1.contains_mapper(umapper) assert not p1.contains_mapper(amapper) def _registry(self): class Reg(dict): @property def _attributes(self): return self return Reg() def test_path(self): umapper = inspect(self.classes.User) amapper = inspect(self.classes.Address) p1 = PathRegistry.coerce((umapper, umapper.attrs.addresses)) p2 = PathRegistry.coerce((umapper, umapper.attrs.addresses, amapper)) p3 = PathRegistry.coerce((amapper, amapper.attrs.email_address)) eq_( p1.path, (umapper, umapper.attrs.addresses) ) eq_( p2.path, (umapper, umapper.attrs.addresses, amapper) ) eq_( p3.path, (amapper, amapper.attrs.email_address) ) def test_registry_set(self): reg = self._registry() umapper = inspect(self.classes.User) amapper = inspect(self.classes.Address) p1 = PathRegistry.coerce((umapper, umapper.attrs.addresses)) p2 = PathRegistry.coerce((umapper, umapper.attrs.addresses, amapper)) p3 = PathRegistry.coerce((amapper, amapper.attrs.email_address)) p1.set(reg, "p1key", "p1value") p2.set(reg, "p2key", "p2value") p3.set(reg, "p3key", "p3value") eq_( reg, { ('p1key', p1.path): 'p1value', ('p2key', p2.path): 'p2value', ('p3key', p3.path): 'p3value', } ) def test_registry_get(self): reg = self._registry() umapper = inspect(self.classes.User) amapper = inspect(self.classes.Address) p1 = PathRegistry.coerce((umapper, umapper.attrs.addresses)) p2 = PathRegistry.coerce((umapper, umapper.attrs.addresses, amapper)) p3 = PathRegistry.coerce((amapper, amapper.attrs.email_address)) reg.update( { ('p1key', p1.path): 'p1value', ('p2key', p2.path): 'p2value', ('p3key', p3.path): 'p3value', } ) eq_(p1.get(reg, "p1key"), "p1value") eq_(p2.get(reg, "p2key"), "p2value") eq_(p2.get(reg, "p1key"), None) eq_(p3.get(reg, "p3key"), "p3value") eq_(p3.get(reg, "p1key"), None) def test_registry_contains(self): reg = self._registry() umapper = inspect(self.classes.User) amapper = inspect(self.classes.Address) p1 = PathRegistry.coerce((umapper, umapper.attrs.addresses)) p2 = PathRegistry.coerce((umapper, umapper.attrs.addresses, amapper)) p3 = PathRegistry.coerce((amapper, amapper.attrs.email_address)) reg.update( { ('p1key', p1.path): 'p1value', ('p2key', p2.path): 'p2value', ('p3key', p3.path): 'p3value', } ) assert p1.contains(reg, "p1key") assert not p1.contains(reg, "p2key") assert p3.contains(reg, "p3key") assert not p2.contains(reg, "fake") def test_registry_setdefault(self): reg = self._registry() umapper = inspect(self.classes.User) amapper = inspect(self.classes.Address) p1 = PathRegistry.coerce((umapper, umapper.attrs.addresses)) p2 = PathRegistry.coerce((umapper, umapper.attrs.addresses, amapper)) reg.update( { ('p1key', p1.path): 'p1value', } ) p1.setdefault(reg, "p1key", "p1newvalue_a") p1.setdefault(reg, "p1key_new", "p1newvalue_b") p2.setdefault(reg, "p2key", "p2newvalue") eq_( reg, { ('p1key', p1.path): 'p1value', ('p1key_new', p1.path): 'p1newvalue_b', ('p2key', p2.path): 'p2newvalue', } ) def test_serialize(self): User = self.classes.User Address = self.classes.Address umapper = inspect(self.classes.User) amapper = inspect(self.classes.Address) p1 = PathRegistry.coerce((umapper, umapper.attrs.addresses, amapper, amapper.attrs.email_address)) p2 = PathRegistry.coerce((umapper, umapper.attrs.addresses, amapper)) p3 = PathRegistry.coerce((umapper, umapper.attrs.addresses)) eq_( p1.serialize(), [(User, "addresses"), (Address, "email_address")] ) eq_( p2.serialize(), [(User, "addresses"), (Address, None)] ) eq_( p3.serialize(), [(User, "addresses")] ) def test_deseralize(self): User = self.classes.User Address = self.classes.Address umapper = inspect(self.classes.User) amapper = inspect(self.classes.Address) p1 = PathRegistry.coerce((umapper, umapper.attrs.addresses, amapper, amapper.attrs.email_address)) p2 = PathRegistry.coerce((umapper, umapper.attrs.addresses, amapper)) p3 = PathRegistry.coerce((umapper, umapper.attrs.addresses)) eq_( PathRegistry.deserialize([(User, "addresses"), (Address, "email_address")]), p1 ) eq_( PathRegistry.deserialize([(User, "addresses"), (Address, None)]), p2 ) eq_( PathRegistry.deserialize([(User, "addresses")]), p3 ) from .inheritance import _poly_fixtures class PathRegistryInhTest(_poly_fixtures._Polymorphic): run_setup_mappers = 'once' run_inserts = None run_deletes = None def test_plain(self): Person = _poly_fixtures.Person Engineer = _poly_fixtures.Engineer pmapper = inspect(Person) emapper = inspect(Engineer) p1 = PathRegistry.coerce((pmapper, emapper.attrs.machines)) # given a mapper and an attribute on a subclass, # the path converts what you get to be against that subclass eq_( p1.path, (emapper, emapper.attrs.machines) ) def test_plain_compound(self): Company = _poly_fixtures.Company Person = _poly_fixtures.Person Engineer = _poly_fixtures.Engineer cmapper = inspect(Company) pmapper = inspect(Person) emapper = inspect(Engineer) p1 = PathRegistry.coerce((cmapper, cmapper.attrs.employees, pmapper, emapper.attrs.machines)) # given a mapper and an attribute on a subclass, # the path converts what you get to be against that subclass eq_( p1.path, (cmapper, cmapper.attrs.employees, emapper, emapper.attrs.machines) ) def test_plain_aliased(self): Person = _poly_fixtures.Person Engineer = _poly_fixtures.Engineer emapper = inspect(Engineer) p_alias = aliased(Person) p_alias = inspect(p_alias) p1 = PathRegistry.coerce((p_alias, emapper.attrs.machines)) # plain AliasedClass - the path keeps that AliasedClass directly # as is in the path eq_( p1.path, (p_alias, emapper.attrs.machines) ) def test_plain_aliased_compound(self): Company = _poly_fixtures.Company Person = _poly_fixtures.Person Engineer = _poly_fixtures.Engineer cmapper = inspect(Company) emapper = inspect(Engineer) c_alias = aliased(Company) p_alias = aliased(Person) c_alias = inspect(c_alias) p_alias = inspect(p_alias) p1 = PathRegistry.coerce((c_alias, cmapper.attrs.employees, p_alias, emapper.attrs.machines)) # plain AliasedClass - the path keeps that AliasedClass directly # as is in the path eq_( p1.path, (c_alias, cmapper.attrs.employees, p_alias, emapper.attrs.machines) ) def test_with_poly_sub(self): Person = _poly_fixtures.Person Engineer = _poly_fixtures.Engineer emapper = inspect(Engineer) p_poly = with_polymorphic(Person, [Engineer]) e_poly = inspect(p_poly.Engineer) p_poly = inspect(p_poly) p1 = PathRegistry.coerce((p_poly, emapper.attrs.machines)) # polymorphic AliasedClass - the path uses _entity_for_mapper() # to get the most specific sub-entity eq_( p1.path, (e_poly, emapper.attrs.machines) ) def test_with_poly_base(self): Person = _poly_fixtures.Person Engineer = _poly_fixtures.Engineer pmapper = inspect(Person) emapper = inspect(Engineer) p_poly = with_polymorphic(Person, [Engineer]) p_poly = inspect(p_poly) # "name" is actually on Person, not Engineer p1 = PathRegistry.coerce((p_poly, emapper.attrs.name)) # polymorphic AliasedClass - because "name" is on Person, # we get Person, not Engineer eq_( p1.path, (p_poly, pmapper.attrs.name) ) def test_with_poly_use_mapper(self): Person = _poly_fixtures.Person Engineer = _poly_fixtures.Engineer emapper = inspect(Engineer) p_poly = with_polymorphic(Person, [Engineer], _use_mapper_path=True) p_poly = inspect(p_poly) p1 = PathRegistry.coerce((p_poly, emapper.attrs.machines)) # polymorphic AliasedClass with the "use_mapper_path" flag - # the AliasedClass acts just like the base mapper eq_( p1.path, (emapper, emapper.attrs.machines) ) SQLAlchemy-0.8.4/test/orm/test_versioning.py0000644000076500000240000004630112251150016021576 0ustar classicstaff00000000000000import datetime import sqlalchemy as sa from sqlalchemy.testing import engines from sqlalchemy import testing from sqlalchemy import Integer, String, Date, ForeignKey, literal_column, \ orm, exc, select, TypeDecorator from sqlalchemy.testing.schema import Table, Column from sqlalchemy.orm import mapper, relationship, Session, \ create_session, column_property, sessionmaker,\ exc as orm_exc from sqlalchemy.testing import eq_, ne_, assert_raises, assert_raises_message from sqlalchemy.testing import fixtures from test.orm import _fixtures from sqlalchemy.testing import fixtures _uuids = [ '1fc614acbb904742a2990f86af6ded95', '23e253786f4d491b9f9d6189dc33de9b', 'fc44910db37e43fd93e9ec8165b885cf', '0187a1832b4249e6b48911821d86de58', '778af6ea2fb74a009d8d2f5abe5dc29a', '51a6ce031aff47e4b5f2895c4161f120', '7434097cd319401fb9f15fa443ccbbbb', '9bc548a8128e4a85ac18060bc3f4b7d3', '59548715e3c440b7bcb96417d06f7930', 'd7647c7004734de196885ca2bd73adf8', '70cef121d3ff48d39906b6d1ac77f41a', 'ee37a8a6430c466aa322b8a215a0dd70', '782a5f04b4364a53a6fce762f48921c1', 'bef510f2420f4476a7629013ead237f5', ] def make_uuid(): """generate uuids even on Python 2.4 which has no 'uuid'""" return _uuids.pop(0) class VersioningTest(fixtures.MappedTest): @classmethod def define_tables(cls, metadata): Table('version_table', metadata, Column('id', Integer, primary_key=True, test_needs_autoincrement=True), Column('version_id', Integer, nullable=False), Column('value', String(40), nullable=False)) @classmethod def setup_classes(cls): class Foo(cls.Basic): pass def _fixture(self): Foo, version_table = self.classes.Foo, self.tables.version_table mapper(Foo, version_table, version_id_col=version_table.c.version_id) s1 = Session() return s1 @engines.close_open_connections def test_notsane_warning(self): Foo = self.classes.Foo save = testing.db.dialect.supports_sane_rowcount testing.db.dialect.supports_sane_rowcount = False try: s1 = self._fixture() f1 = Foo(value='f1') f2 = Foo(value='f2') s1.add_all((f1, f2)) s1.commit() f1.value='f1rev2' assert_raises(sa.exc.SAWarning, s1.commit) finally: testing.db.dialect.supports_sane_rowcount = save @testing.emits_warning_on('+zxjdbc', r'.*does not support (update|delete)d rowcount') def test_basic(self): Foo = self.classes.Foo s1 = self._fixture() f1 = Foo(value='f1') f2 = Foo(value='f2') s1.add_all((f1, f2)) s1.commit() f1.value='f1rev2' s1.commit() s2 = create_session(autocommit=False) f1_s = s2.query(Foo).get(f1.id) f1_s.value='f1rev3' s2.commit() f1.value='f1rev3mine' # Only dialects with a sane rowcount can detect the # StaleDataError if testing.db.dialect.supports_sane_rowcount: assert_raises_message(sa.orm.exc.StaleDataError, r"UPDATE statement on table 'version_table' expected " r"to update 1 row\(s\); 0 were matched.", s1.commit), s1.rollback() else: s1.commit() # new in 0.5 ! dont need to close the session f1 = s1.query(Foo).get(f1.id) f2 = s1.query(Foo).get(f2.id) f1_s.value='f1rev4' s2.commit() s1.delete(f1) s1.delete(f2) if testing.db.dialect.supports_sane_rowcount: assert_raises_message( sa.orm.exc.StaleDataError, r"DELETE statement on table 'version_table' expected " r"to delete 2 row\(s\); 1 were matched.", s1.commit) else: s1.commit() @testing.emits_warning_on('+zxjdbc', r'.*does not support (update|delete)d rowcount') def test_bump_version(self): """test that version number can be bumped. Ensures that the UPDATE or DELETE is against the last committed version of version_id_col, not the modified state. """ Foo = self.classes.Foo s1 = self._fixture() f1 = Foo(value='f1') s1.add(f1) s1.commit() eq_(f1.version_id, 1) f1.version_id = 2 s1.commit() eq_(f1.version_id, 2) # skip an id, test that history # is honored f1.version_id = 4 f1.value = "something new" s1.commit() eq_(f1.version_id, 4) f1.version_id = 5 s1.delete(f1) s1.commit() eq_(s1.query(Foo).count(), 0) @testing.emits_warning(r'.*does not support updated rowcount') @engines.close_open_connections def test_versioncheck(self): """query.with_lockmode performs a 'version check' on an already loaded instance""" Foo = self.classes.Foo s1 = self._fixture() f1s1 = Foo(value='f1 value') s1.add(f1s1) s1.commit() s2 = create_session(autocommit=False) f1s2 = s2.query(Foo).get(f1s1.id) f1s2.value='f1 new value' s2.commit() # load, version is wrong assert_raises_message( sa.orm.exc.StaleDataError, r"Instance .* has version id '\d+' which does not " r"match database-loaded version id '\d+'", s1.query(Foo).with_lockmode('read').get, f1s1.id ) # reload it - this expires the old version first s1.refresh(f1s1, lockmode='read') # now assert version OK s1.query(Foo).with_lockmode('read').get(f1s1.id) # assert brand new load is OK too s1.close() s1.query(Foo).with_lockmode('read').get(f1s1.id) @testing.emits_warning(r'.*does not support updated rowcount') @engines.close_open_connections @testing.requires.update_nowait def test_versioncheck_for_update(self): """query.with_lockmode performs a 'version check' on an already loaded instance""" Foo = self.classes.Foo s1 = self._fixture() f1s1 = Foo(value='f1 value') s1.add(f1s1) s1.commit() s2 = create_session(autocommit=False) f1s2 = s2.query(Foo).get(f1s1.id) s2.refresh(f1s2, lockmode='update') f1s2.value='f1 new value' assert_raises( exc.DBAPIError, s1.refresh, f1s1, lockmode='update_nowait' ) s1.rollback() s2.commit() s1.refresh(f1s1, lockmode='update_nowait') assert f1s1.version_id == f1s2.version_id @testing.emits_warning(r'.*does not support updated rowcount') @engines.close_open_connections def test_noversioncheck(self): """test query.with_lockmode works when the mapper has no version id col""" Foo, version_table = self.classes.Foo, self.tables.version_table s1 = create_session(autocommit=False) mapper(Foo, version_table) f1s1 = Foo(value="foo", version_id=0) s1.add(f1s1) s1.commit() s2 = create_session(autocommit=False) f1s2 = s2.query(Foo).with_lockmode('read').get(f1s1.id) assert f1s2.id == f1s1.id assert f1s2.value == f1s1.value @testing.emits_warning_on('+zxjdbc', r'.*does not support updated rowcount') def test_merge_no_version(self): Foo = self.classes.Foo s1 = self._fixture() f1 = Foo(value='f1') s1.add(f1) s1.commit() f1.value = 'f2' s1.commit() f2 = Foo(id=f1.id, value='f3') f3 = s1.merge(f2) assert f3 is f1 s1.commit() eq_(f3.version_id, 3) @testing.emits_warning_on('+zxjdbc', r'.*does not support updated rowcount') def test_merge_correct_version(self): Foo = self.classes.Foo s1 = self._fixture() f1 = Foo(value='f1') s1.add(f1) s1.commit() f1.value = 'f2' s1.commit() f2 = Foo(id=f1.id, value='f3', version_id=2) f3 = s1.merge(f2) assert f3 is f1 s1.commit() eq_(f3.version_id, 3) @testing.emits_warning_on('+zxjdbc', r'.*does not support updated rowcount') def test_merge_incorrect_version(self): Foo = self.classes.Foo s1 = self._fixture() f1 = Foo(value='f1') s1.add(f1) s1.commit() f1.value = 'f2' s1.commit() f2 = Foo(id=f1.id, value='f3', version_id=1) assert_raises_message( orm_exc.StaleDataError, "Version id '1' on merged state " " does not match existing version '2'. " "Leave the version attribute unset when " "merging to update the most recent version.", s1.merge, f2 ) @testing.emits_warning_on('+zxjdbc', r'.*does not support updated rowcount') def test_merge_incorrect_version_not_in_session(self): Foo = self.classes.Foo s1 = self._fixture() f1 = Foo(value='f1') s1.add(f1) s1.commit() f1.value = 'f2' s1.commit() f2 = Foo(id=f1.id, value='f3', version_id=1) s1.close() assert_raises_message( orm_exc.StaleDataError, "Version id '1' on merged state " " does not match existing version '2'. " "Leave the version attribute unset when " "merging to update the most recent version.", s1.merge, f2 ) class ColumnTypeTest(fixtures.MappedTest): @classmethod def define_tables(cls, metadata): class SpecialType(TypeDecorator): impl = Date def process_bind_param(self, value, dialect): assert isinstance(value, datetime.date) return value Table('version_table', metadata, Column('id', SpecialType, primary_key=True), Column('version_id', Integer, nullable=False), Column('value', String(40), nullable=False)) @classmethod def setup_classes(cls): class Foo(cls.Basic): pass def _fixture(self): Foo, version_table = self.classes.Foo, self.tables.version_table mapper(Foo, version_table, version_id_col=version_table.c.version_id) s1 = Session() return s1 @engines.close_open_connections def test_update(self): Foo = self.classes.Foo s1 = self._fixture() f1 = Foo(id=datetime.date.today(), value='f1') s1.add(f1) s1.commit() f1.value='f1rev2' s1.commit() class RowSwitchTest(fixtures.MappedTest): @classmethod def define_tables(cls, metadata): Table('p', metadata, Column('id', String(10), primary_key=True), Column('version_id', Integer, default=1, nullable=False), Column('data', String(50)) ) Table('c', metadata, Column('id', String(10), ForeignKey('p.id'), primary_key=True), Column('version_id', Integer, default=1, nullable=False), Column('data', String(50)) ) @classmethod def setup_classes(cls): class P(cls.Basic): pass class C(cls.Basic): pass @classmethod def setup_mappers(cls): p, c, C, P = (cls.tables.p, cls.tables.c, cls.classes.C, cls.classes.P) mapper(P, p, version_id_col=p.c.version_id, properties={ 'c':relationship(C, uselist=False, cascade='all, delete-orphan') }) mapper(C, c, version_id_col=c.c.version_id) @testing.emits_warning_on('+zxjdbc', r'.*does not support updated rowcount') def test_row_switch(self): P = self.classes.P session = sessionmaker()() session.add(P(id='P1', data='P version 1')) session.commit() session.close() p = session.query(P).first() session.delete(p) session.add(P(id='P1', data="really a row-switch")) session.commit() @testing.emits_warning_on('+zxjdbc', r'.*does not support updated rowcount') def test_child_row_switch(self): P, C = self.classes.P, self.classes.C assert P.c.property.strategy.use_get session = sessionmaker()() session.add(P(id='P1', data='P version 1')) session.commit() session.close() p = session.query(P).first() p.c = C(data='child version 1') session.commit() p = session.query(P).first() p.c = C(data='child row-switch') session.commit() class AlternateGeneratorTest(fixtures.MappedTest): @classmethod def define_tables(cls, metadata): Table('p', metadata, Column('id', String(10), primary_key=True), Column('version_id', String(32), nullable=False), Column('data', String(50)) ) Table('c', metadata, Column('id', String(10), ForeignKey('p.id'), primary_key=True), Column('version_id', String(32), nullable=False), Column('data', String(50)) ) @classmethod def setup_classes(cls): class P(cls.Basic): pass class C(cls.Basic): pass @classmethod def setup_mappers(cls): p, c, C, P = (cls.tables.p, cls.tables.c, cls.classes.C, cls.classes.P) mapper(P, p, version_id_col=p.c.version_id, version_id_generator=lambda x:make_uuid(), properties={ 'c':relationship(C, uselist=False, cascade='all, delete-orphan') }) mapper(C, c, version_id_col=c.c.version_id, version_id_generator=lambda x:make_uuid(), ) @testing.emits_warning_on('+zxjdbc', r'.*does not support updated rowcount') def test_row_switch(self): P = self.classes.P session = sessionmaker()() session.add(P(id='P1', data='P version 1')) session.commit() session.close() p = session.query(P).first() session.delete(p) session.add(P(id='P1', data="really a row-switch")) session.commit() @testing.emits_warning_on('+zxjdbc', r'.*does not support (update|delete)d rowcount') def test_child_row_switch_one(self): P, C = self.classes.P, self.classes.C assert P.c.property.strategy.use_get session = sessionmaker()() session.add(P(id='P1', data='P version 1')) session.commit() session.close() p = session.query(P).first() p.c = C(data='child version 1') session.commit() p = session.query(P).first() p.c = C(data='child row-switch') session.commit() @testing.emits_warning_on('+zxjdbc', r'.*does not support (update|delete)d rowcount') def test_child_row_switch_two(self): P = self.classes.P Session = sessionmaker() # TODO: not sure this test is # testing exactly what its looking for sess1 = Session() sess1.add(P(id='P1', data='P version 1')) sess1.commit() sess1.close() p1 = sess1.query(P).first() sess2 = Session() p2 = sess2.query(P).first() sess1.delete(p1) sess1.commit() # this can be removed and it still passes sess1.add(P(id='P1', data='P version 2')) sess1.commit() p2.data = 'P overwritten by concurrent tx' if testing.db.dialect.supports_sane_rowcount: assert_raises_message( orm.exc.StaleDataError, r"UPDATE statement on table 'p' expected to update " r"1 row\(s\); 0 were matched.", sess2.commit ) else: sess2.commit class InheritanceTwoVersionIdsTest(fixtures.MappedTest): """Test versioning where both parent/child table have a versioning column. """ @classmethod def define_tables(cls, metadata): Table('base', metadata, Column('id', Integer, primary_key=True, test_needs_autoincrement=True), Column('version_id', Integer, nullable=True), Column('data', String(50)) ) Table('sub', metadata, Column('id', Integer, ForeignKey('base.id'), primary_key=True), Column('version_id', Integer, nullable=False), Column('sub_data', String(50)) ) @classmethod def setup_classes(cls): class Base(cls.Basic): pass class Sub(Base): pass def test_base_both(self): Base, sub, base, Sub = (self.classes.Base, self.tables.sub, self.tables.base, self.classes.Sub) mapper(Base, base, version_id_col=base.c.version_id) mapper(Sub, sub, inherits=Base) session = Session() b1 = Base(data='b1') session.add(b1) session.commit() eq_(b1.version_id, 1) # base is populated eq_(select([base.c.version_id]).scalar(), 1) def test_sub_both(self): Base, sub, base, Sub = (self.classes.Base, self.tables.sub, self.tables.base, self.classes.Sub) mapper(Base, base, version_id_col=base.c.version_id) mapper(Sub, sub, inherits=Base) session = Session() s1 = Sub(data='s1', sub_data='s1') session.add(s1) session.commit() # table is populated eq_(select([sub.c.version_id]).scalar(), 1) # base is populated eq_(select([base.c.version_id]).scalar(), 1) def test_sub_only(self): Base, sub, base, Sub = (self.classes.Base, self.tables.sub, self.tables.base, self.classes.Sub) mapper(Base, base) mapper(Sub, sub, inherits=Base, version_id_col=sub.c.version_id) session = Session() s1 = Sub(data='s1', sub_data='s1') session.add(s1) session.commit() # table is populated eq_(select([sub.c.version_id]).scalar(), 1) # base is not eq_(select([base.c.version_id]).scalar(), None) def test_mismatch_version_col_warning(self): Base, sub, base, Sub = (self.classes.Base, self.tables.sub, self.tables.base, self.classes.Sub) mapper(Base, base, version_id_col=base.c.version_id) assert_raises_message( exc.SAWarning, "Inheriting version_id_col 'version_id' does not " "match inherited version_id_col 'version_id' and will not " "automatically populate the inherited versioning column. " "version_id_col should only be specified on " "the base-most mapper that includes versioning.", mapper, Sub, sub, inherits=Base, version_id_col=sub.c.version_id) SQLAlchemy-0.8.4/test/perf/0000755000076500000240000000000012251151573016146 5ustar classicstaff00000000000000SQLAlchemy-0.8.4/test/perf/insertspeed.py0000644000076500000240000000626712251150016021047 0ustar classicstaff00000000000000import sys, time from sqlalchemy import * from sqlalchemy.orm import * from sqlalchemy.testing import profiling db = create_engine('sqlite://') metadata = MetaData(db) Person_table = Table('Person', metadata, Column('name', String(40)), Column('sex', Integer), Column('age', Integer)) def sa_unprofiled_insertmany(n): i = Person_table.insert() i.execute([{'name':'John Doe','sex':1,'age':35} for j in xrange(n)]) def sqlite_unprofiled_insertmany(n): conn = db.connect().connection c = conn.cursor() persons = [('john doe', 1, 35) for i in xrange(n)] c.executemany("insert into Person(name, sex, age) values (?,?,?)", persons) @profiling.profiled('sa_profiled_insert_many', always=True) def sa_profiled_insert_many(n): i = Person_table.insert() i.execute([{'name':'John Doe','sex':1,'age':35} for j in xrange(n)]) s = Person_table.select() r = s.execute() res = [[value for value in row] for row in r.fetchall()] def sqlite_unprofiled_insert(n): conn = db.connect().connection c = conn.cursor() for j in xrange(n): c.execute("insert into Person(name, sex, age) values (?,?,?)", ('john doe', 1, 35)) def sa_unprofiled_insert(n): # Another option is to build Person_table.insert() outside of the # loop. But it doesn't make much of a difference, so might as well # use the worst-case/naive version here. for j in xrange(n): Person_table.insert().execute({'name':'John Doe','sex':1,'age':35}) @profiling.profiled('sa_profiled_insert', always=True) def sa_profiled_insert(n): i = Person_table.insert() for j in xrange(n): i.execute({'name':'John Doe','sex':1,'age':35}) s = Person_table.select() r = s.execute() res = [[value for value in row] for row in r.fetchall()] def run_timed(fn, label, *args, **kw): metadata.drop_all() metadata.create_all() sys.stdout.write("%s (%s): " % (label, ', '.join([str(a) for a in args]))) sys.stdout.flush() t = time.clock() fn(*args, **kw) t2 = time.clock() sys.stdout.write("%0.2f seconds\n" % (t2 - t)) def run_profiled(fn, label, *args, **kw): metadata.drop_all() metadata.create_all() print "%s (%s)" % (label, ', '.join([str(a) for a in args])) fn(*args, **kw) def all(): try: print "Bulk INSERTS via executemany():\n" run_timed(sqlite_unprofiled_insertmany, 'pysqlite bulk insert', 50000) run_timed(sa_unprofiled_insertmany, 'SQLAlchemy bulk insert', 50000) run_profiled(sa_profiled_insert_many, 'SQLAlchemy bulk insert/select, profiled', 50000) print "\nIndividual INSERTS via execute():\n" run_timed(sqlite_unprofiled_insert, "pysqlite individual insert", 50000) run_timed(sa_unprofiled_insert, "SQLAlchemy individual insert", 50000) run_profiled(sa_profiled_insert, 'SQLAlchemy individual insert/select, profiled', 50000) finally: metadata.drop_all() if __name__ == '__main__': all() SQLAlchemy-0.8.4/test/perf/large_flush.py0000644000076500000240000000502612251150016021005 0ustar classicstaff00000000000000import sqlalchemy as sa from sqlalchemy import create_engine, MetaData, orm from sqlalchemy import Column, ForeignKey from sqlalchemy import Integer, String from sqlalchemy.orm import mapper from sqlalchemy.testing import profiling class Object(object): pass class Q(Object): pass class A(Object): pass class C(Object): pass class WC(C): pass engine = create_engine('sqlite:///:memory:', echo=True) sm = orm.sessionmaker(bind=engine) SA_Session = orm.scoped_session(sm) SA_Metadata = MetaData() object_table = sa.Table('Object', SA_Metadata, Column('ObjectID', Integer,primary_key=True), Column('Type', String(1), nullable=False)) q_table = sa.Table('Q', SA_Metadata, Column('QID', Integer, ForeignKey('Object.ObjectID'),primary_key=True)) c_table = sa.Table('C', SA_Metadata, Column('CID', Integer, ForeignKey('Object.ObjectID'),primary_key=True)) wc_table = sa.Table('WC', SA_Metadata, Column('WCID', Integer, ForeignKey('C.CID'), primary_key=True)) a_table = sa.Table('A', SA_Metadata, Column('AID', Integer, ForeignKey('Object.ObjectID'),primary_key=True), Column('QID', Integer, ForeignKey('Q.QID')), Column('CID', Integer, ForeignKey('C.CID'))) mapper(Object, object_table, polymorphic_on=object_table.c.Type, polymorphic_identity='O') mapper(Q, q_table, inherits=Object, polymorphic_identity='Q') mapper(C, c_table, inherits=Object, polymorphic_identity='C') mapper(WC, wc_table, inherits=C, polymorphic_identity='W') mapper(A, a_table, inherits=Object, polymorphic_identity='A', properties = { 'Q' : orm.relation(Q,primaryjoin=a_table.c.QID==q_table.c.QID, backref='As' ), 'C' : orm.relation(C,primaryjoin=a_table.c.CID==c_table.c.CID, backref='A', uselist=False) } ) SA_Metadata.create_all(engine) @profiling.profiled('large_flush', always=True, sort=['file']) def generate_error(): q = Q() for j in range(100): #at 306 the error does not pop out (depending on recursion depth) a = A() a.Q = q a.C = WC() SA_Session.add(q) SA_Session.commit() #here the error pops out generate_error()SQLAlchemy-0.8.4/test/perf/objselectspeed.py0000644000076500000240000001015312251150016021502 0ustar classicstaff00000000000000import time, resource from sqlalchemy import * from sqlalchemy.orm import * from sqlalchemy.testing.util import gc_collect from sqlalchemy.testing import profiling db = create_engine('sqlite://') metadata = MetaData(db) Person_table = Table('Person', metadata, Column('id', Integer, primary_key=True), Column('type', String(10)), Column('name', String(40)), Column('sex', Integer), Column('age', Integer)) Employee_table = Table('Employee', metadata, Column('id', Integer, ForeignKey('Person.id'), primary_key=True), Column('foo', String(40)), Column('bar', Integer), Column('bat', Integer)) class RawPerson(object): pass class Person(object): pass mapper(Person, Person_table) class JoinedPerson(object):pass class Employee(JoinedPerson):pass mapper(JoinedPerson, Person_table, \ polymorphic_on=Person_table.c.type, polymorphic_identity='person') mapper(Employee, Employee_table, \ inherits=JoinedPerson, polymorphic_identity='employee') compile_mappers() def setup(): metadata.create_all() i = Person_table.insert() data = [{'name':'John Doe','sex':1,'age':35, 'type':'employee'}] * 100 for j in xrange(500): i.execute(data) # note we arent fetching from employee_table, # so we can leave it empty even though its "incorrect" #i = Employee_table.insert() #data = [{'foo':'foo', 'bar':'bar':'bat':'bat'}] * 100 #for j in xrange(500): # i.execute(data) print "Inserted 50,000 rows" def sqlite_select(entity_cls): conn = db.connect().connection cr = conn.cursor() cr.execute("SELECT id, name, sex, age FROM Person") people = [] for row in cr.fetchall(): person = entity_cls() person.id = row[0] person.name = row[1] person.sex = row[2] person.age = row[3] people.append(person) cr.close() conn.close() def sql_select(entity_cls): people = [] for row in Person_table.select().execute().fetchall(): person = entity_cls() person.id = row['id'] person.name = row['name'] person.sex = row['sex'] person.age = row['age'] people.append(person) #@profiling.profiled(report=True, always=True) def orm_select(): session = create_session() people = session.query(Person).all() #@profiling.profiled(report=True, always=True) def joined_orm_select(): session = create_session() people = session.query(JoinedPerson).all() def all(): setup() try: t, t2 = 0, 0 def usage(label): now = resource.getrusage(resource.RUSAGE_SELF) print "%s: %0.3fs real, %0.3fs user, %0.3fs sys" % ( label, t2 - t, now.ru_utime - usage.last.ru_utime, now.ru_stime - usage.last.ru_stime) usage.snap(now) usage.snap = lambda stats=None: setattr( usage, 'last', stats or resource.getrusage(resource.RUSAGE_SELF)) gc_collect() usage.snap() t = time.clock() sqlite_select(RawPerson) t2 = time.clock() usage('sqlite select/native') gc_collect() usage.snap() t = time.clock() sqlite_select(Person) t2 = time.clock() usage('sqlite select/instrumented') gc_collect() usage.snap() t = time.clock() sql_select(RawPerson) t2 = time.clock() usage('sqlalchemy.sql select/native') gc_collect() usage.snap() t = time.clock() sql_select(Person) t2 = time.clock() usage('sqlalchemy.sql select/instrumented') gc_collect() usage.snap() t = time.clock() orm_select() t2 = time.clock() usage('sqlalchemy.orm fetch') gc_collect() usage.snap() t = time.clock() joined_orm_select() t2 = time.clock() usage('sqlalchemy.orm "joined" fetch') finally: metadata.drop_all() if __name__ == '__main__': all() SQLAlchemy-0.8.4/test/perf/objupdatespeed.py0000644000076500000240000000512712251150016021512 0ustar classicstaff00000000000000import time, resource from sqlalchemy import * from sqlalchemy.orm import * from sqlalchemy.testing import * from sqlalchemy.testing.util import gc_collect NUM = 100 metadata = MetaData(testing.db) Person_table = Table('Person', metadata, Column('id', Integer, primary_key=True), Column('name', String(40)), Column('sex', Integer), Column('age', Integer)) Email_table = Table('Email', metadata, Column('id', Integer, primary_key=True), Column('person_id', Integer, ForeignKey('Person.id')), Column('address', String(300))) class Person(object): pass class Email(object): def __repr__(self): return '' % (getattr(self, 'id', None), getattr(self, 'address', None)) mapper(Person, Person_table, properties={ 'emails': relationship(Email, backref='owner', lazy='joined') }) mapper(Email, Email_table) compile_mappers() def setup(): metadata.create_all() i = Person_table.insert() data = [{'name':'John Doe','sex':1,'age':35}] * NUM i.execute(data) i = Email_table.insert() for j in xrange(1, NUM + 1): i.execute(address='foo@bar', person_id=j) if j % 2: i.execute(address='baz@quux', person_id=j) print "Inserted %d rows." % (NUM + NUM + (NUM // 2)) def orm_select(session): return session.query(Person).all() @profiling.profiled('update_and_flush') def update_and_flush(session, people): for p in people: p.name = 'Exene Cervenka' p.sex = 2 p.emails[0].address = 'hoho@lala' session.flush() def all(): setup() try: t, t2 = 0, 0 def usage(label): now = resource.getrusage(resource.RUSAGE_SELF) print "%s: %0.3fs real, %0.3fs user, %0.3fs sys" % ( label, t2 - t, now.ru_utime - usage.last.ru_utime, now.ru_stime - usage.last.ru_stime) usage.snap(now) usage.snap = lambda stats=None: setattr( usage, 'last', stats or resource.getrusage(resource.RUSAGE_SELF)) session = create_session() gc_collect() usage.snap() t = time.clock() people = orm_select(session) t2 = time.clock() usage('load objects') gc_collect() usage.snap() t = time.clock() update_and_flush(session, people) t2 = time.clock() usage('update and flush') finally: metadata.drop_all() if __name__ == '__main__': all() SQLAlchemy-0.8.4/test/perf/orm2010.py0000644000076500000240000001263412251150016017615 0ustar classicstaff00000000000000# monkeypatch the "cdecimal" library in. # this is a drop-in replacement for "decimal". # All SQLA versions support cdecimal except # for the MS-SQL dialect, which is fixed in 0.7 try: import cdecimal import sys sys.modules['decimal'] = cdecimal except ImportError: pass from sqlalchemy import __version__ from sqlalchemy import Column, Integer, create_engine, ForeignKey, \ String, Numeric if __version__ < "0.6": from sqlalchemy.orm.session import Session from sqlalchemy.orm import relation as relationship else: from sqlalchemy.orm import Session, relationship from sqlalchemy.ext.declarative import declarative_base import random import os from decimal import Decimal Base = declarative_base() class Employee(Base): __tablename__ = 'employee' id = Column(Integer, primary_key=True) name = Column(String(100), nullable=False) type = Column(String(50), nullable=False) __mapper_args__ = {'polymorphic_on':type} class Boss(Employee): __tablename__ = 'boss' id = Column(Integer, ForeignKey('employee.id'), primary_key=True) golf_average = Column(Numeric) __mapper_args__ = {'polymorphic_identity':'boss'} class Grunt(Employee): __tablename__ = 'grunt' id = Column(Integer, ForeignKey('employee.id'), primary_key=True) savings = Column(Numeric) employer_id = Column(Integer, ForeignKey('boss.id')) # Configure an 'employer' relationship, where Grunt references # Boss. This is a joined-table subclass to subclass relationship, # which is a less typical case. # In 0.7, "Boss.id" is the "id" column of "boss", as would be expected. if __version__ >= "0.7": employer = relationship("Boss", backref="employees", primaryjoin=Boss.id==employer_id) # Prior to 0.7, "Boss.id" is the "id" column of "employee". # Long story. So we hardwire the relationship against the "id" # column of Boss' table. elif __version__ >= "0.6": employer = relationship("Boss", backref="employees", primaryjoin=Boss.__table__.c.id==employer_id) # In 0.5, the many-to-one loader wouldn't recognize the above as a # simple "identity map" fetch. So to give 0.5 a chance to emit # the same amount of SQL as 0.6, we hardwire the relationship against # "employee.id" to work around the bug. else: employer = relationship("Boss", backref="employees", primaryjoin=Employee.__table__.c.id==employer_id, foreign_keys=employer_id) __mapper_args__ = {'polymorphic_identity':'grunt'} if os.path.exists('orm2010.db'): os.remove('orm2010.db') # use a file based database so that cursor.execute() has some # palpable overhead. engine = create_engine('sqlite:///orm2010.db') Base.metadata.create_all(engine) sess = Session(engine) def runit(): # create 1000 Boss objects. bosses = [ Boss( name="Boss %d" % i, golf_average=Decimal(random.randint(40, 150)) ) for i in xrange(1000) ] sess.add_all(bosses) # create 10000 Grunt objects. grunts = [ Grunt( name="Grunt %d" % i, savings=Decimal(random.randint(5000000, 15000000) / 100) ) for i in xrange(10000) ] # Assign each Grunt a Boss. Look them up in the DB # to simulate a little bit of two-way activity with the # DB while we populate. Autoflush occurs on each query. # In 0.7 executemany() is used for all the "boss" and "grunt" # tables since priamry key fetching is not needed. while grunts: boss = sess.query(Boss).\ filter_by(name="Boss %d" % (101 - len(grunts) / 100)).\ first() for grunt in grunts[0:100]: grunt.employer = boss grunts = grunts[100:] sess.commit() report = [] # load all the Grunts, print a report with their name, stats, # and their bosses' stats. for grunt in sess.query(Grunt): # here, the overhead of a many-to-one fetch of # "grunt.employer" directly from the identity map # is less than half of that of 0.6. report.append(( grunt.name, grunt.savings, grunt.employer.name, grunt.employer.golf_average )) import cProfile, os, pstats filename = "orm2010.profile" cProfile.runctx('runit()', globals(), locals(), filename) stats = pstats.Stats(filename) counts_by_methname = dict((key[2], stats.stats[key][0]) for key in stats.stats) print "SQLA Version: %s" % __version__ print "Total calls %d" % stats.total_calls print "Total cpu seconds: %.2f" % stats.total_tt print 'Total execute calls: %d' \ % counts_by_methname[""] print 'Total executemany calls: %d' \ % counts_by_methname.get("", 0) #stats.sort_stats('time', 'calls') #stats.print_stats() os.system("runsnake %s" % filename) # SQLA Version: 0.7b1 # Total calls 4956750 # Total execute calls: 11201 # Total executemany calls: 101 # SQLA Version: 0.6.6 # Total calls 7963214 # Total execute calls: 22201 # Total executemany calls: 0 # SQLA Version: 0.5.8 # Total calls 10556480 # Total execute calls: 22201 # Total executemany calls: 0 SQLAlchemy-0.8.4/test/perf/ormsession.py0000644000076500000240000001664712251150016020726 0ustar classicstaff00000000000000import time from datetime import datetime from sqlalchemy import * from sqlalchemy.orm import * from sqlalchemy.testing import * from sqlalchemy.testing.profiling import profiled class Item(object): def __repr__(self): return 'Item<#%s "%s">' % (self.id, self.name) class SubItem(object): def __repr__(self): return 'SubItem<#%s "%s">' % (self.id, self.name) class Customer(object): def __repr__(self): return 'Customer<#%s "%s">' % (self.id, self.name) class Purchase(object): def __repr__(self): return 'Purchase<#%s "%s">' % (self.id, self.purchase_date) items, subitems, customers, purchases, purchaseitems = \ None, None, None, None, None metadata = MetaData() @profiled('table') def define_tables(): global items, subitems, customers, purchases, purchaseitems items = Table('items', metadata, Column('id', Integer, primary_key=True), Column('name', String(100)), test_needs_acid=True) subitems = Table('subitems', metadata, Column('id', Integer, primary_key=True), Column('item_id', Integer, ForeignKey('items.id'), nullable=False), Column('name', String(100), server_default='no name'), test_needs_acid=True) customers = Table('customers', metadata, Column('id', Integer, primary_key=True), Column('name', String(100)), *[Column("col_%s" % chr(i), String(64), default=str(i)) for i in range(97,117)], **dict(test_needs_acid=True)) purchases = Table('purchases', metadata, Column('id', Integer, primary_key=True), Column('customer_id', Integer, ForeignKey('customers.id'), nullable=False), Column('purchase_date', DateTime, default=datetime.now), test_needs_acid=True) purchaseitems = Table('purchaseitems', metadata, Column('purchase_id', Integer, ForeignKey('purchases.id'), nullable=False, primary_key=True), Column('item_id', Integer, ForeignKey('items.id'), nullable=False, primary_key=True), test_needs_acid=True) @profiled('mapper') def setup_mappers(): mapper(Item, items, properties={ 'subitems': relationship(SubItem, backref='item', lazy='select') }) mapper(SubItem, subitems) mapper(Customer, customers, properties={ 'purchases': relationship(Purchase, lazy='select', backref='customer') }) mapper(Purchase, purchases, properties={ 'items': relationship(Item, lazy='select', secondary=purchaseitems) }) @profiled('inserts') def insert_data(): q_items = 1000 q_sub_per_item = 10 q_customers = 1000 con = testing.db.connect() transaction = con.begin() data, subdata = [], [] for item_id in xrange(1, q_items + 1): data.append({'name': "item number %s" % item_id}) for subitem_id in xrange(1, (item_id % q_sub_per_item) + 1): subdata.append({'item_id': item_id, 'name': "subitem number %s" % subitem_id}) if item_id % 100 == 0: items.insert().execute(*data) subitems.insert().execute(*subdata) del data[:] del subdata[:] if data: items.insert().execute(*data) if subdata: subitems.insert().execute(*subdata) transaction.commit() transaction = con.begin() data = [] for customer_id in xrange(1, q_customers): data.append({'name': "customer number %s" % customer_id}) if customer_id % 100 == 0: customers.insert().execute(*data) del data[:] if data: customers.insert().execute(*data) transaction.commit() transaction = con.begin() data, subdata = [], [] order_t = int(time.time()) - (5000 * 5 * 60) current = xrange(1, q_customers) step, purchase_id = 1, 0 while current: next = [] for customer_id in current: order_t += 300 data.append({'customer_id': customer_id, 'purchase_date': datetime.fromtimestamp(order_t)}) purchase_id += 1 for item_id in range(customer_id % 200, customer_id + 1, 200): if item_id != 0: subdata.append({'purchase_id': purchase_id, 'item_id': item_id}) if customer_id % 10 > step: next.append(customer_id) if len(data) >= 100: purchases.insert().execute(*data) if subdata: purchaseitems.insert().execute(*subdata) del data[:] del subdata[:] step, current = step + 1, next if data: purchases.insert().execute(*data) if subdata: purchaseitems.insert().execute(*subdata) transaction.commit() @profiled('queries') def run_queries(): session = create_session() # no explicit transaction here. # build a report of summarizing the last 50 purchases and # the top 20 items from all purchases q = session.query(Purchase). \ order_by(desc(Purchase.purchase_date)). \ limit(50).\ options(joinedload('items'), joinedload('items.subitems'), joinedload('customer')) report = [] # "write" the report. pretend it's going to a web template or something, # the point is to actually pull data through attributes and collections. for purchase in q: report.append(purchase.customer.name) report.append(purchase.customer.col_a) report.append(purchase.purchase_date) for item in purchase.items: report.append(item.name) report.extend([s.name for s in item.subitems]) # mix a little low-level with orm # pull a report of the top 20 items of all time _item_id = purchaseitems.c.item_id top_20_q = select([func.distinct(_item_id).label('id')], group_by=[purchaseitems.c.purchase_id, _item_id], order_by=[desc(func.count(_item_id)), _item_id], limit=20) ids = [r.id for r in top_20_q.execute().fetchall()] q2 = session.query(Item).filter(Item.id.in_(ids)) for num, item in enumerate(q2): report.append("number %s: %s" % (num + 1, item.name)) @profiled('creating') def create_purchase(): # commit a purchase customer_id = 100 item_ids = (10,22,34,46,58) session = create_session() session.begin() customer = session.query(Customer).get(customer_id) items = session.query(Item).filter(Item.id.in_(item_ids)) purchase = Purchase() purchase.customer = customer purchase.items.extend(items) session.flush() session.commit() session.expire(customer) def setup_db(): metadata.drop_all() metadata.create_all() def cleanup_db(): metadata.drop_all() @profiled('default') def default(): run_queries() create_purchase() @profiled('all') def main(): metadata.bind = testing.db try: define_tables() setup_mappers() setup_db() insert_data() default() finally: cleanup_db() main() SQLAlchemy-0.8.4/test/perf/sessions.py0000644000076500000240000000541212251150016020357 0ustar classicstaff00000000000000from sqlalchemy import * from sqlalchemy.orm import * from sqlalchemy.testing.compat import gc_collect from sqlalchemy.testing import AssertsExecutionResults, profiling, testing from test.orm import _fixtures # in this test we are specifically looking for time spent in the attributes.InstanceState.__cleanup() method. ITERATIONS = 100 class SessionTest(fixtures.TestBase, AssertsExecutionResults): @classmethod def setup_class(cls): global t1, t2, metadata,T1, T2 metadata = MetaData(testing.db) t1 = Table('t1', metadata, Column('c1', Integer, primary_key=True), Column('c2', String(30))) t2 = Table('t2', metadata, Column('c1', Integer, primary_key=True), Column('c2', String(30)), Column('t1id', Integer, ForeignKey('t1.c1')) ) metadata.create_all() l = [] for x in range(1,51): l.append({'c2':'this is t1 #%d' % x}) t1.insert().execute(*l) for x in range(1, 51): l = [] for y in range(1, 100): l.append({'c2':'this is t2 #%d' % y, 't1id':x}) t2.insert().execute(*l) class T1(fixtures.ComparableEntity): pass class T2(fixtures.ComparableEntity): pass mapper(T1, t1, properties={ 't2s':relationship(T2, backref='t1') }) mapper(T2, t2) @classmethod def teardown_class(cls): metadata.drop_all() clear_mappers() @profiling.profiled('clean', report=True) def test_session_clean(self): for x in range(0, ITERATIONS): sess = create_session() t1s = sess.query(T1).filter(T1.c1.between(15, 48)).all() for index in [2, 7, 12, 15, 18, 20]: t1s[index].t2s sess.close() del sess gc_collect() @profiling.profiled('dirty', report=True) def test_session_dirty(self): for x in range(0, ITERATIONS): sess = create_session() t1s = sess.query(T1).filter(T1.c1.between(15, 48)).all() for index in [2, 7, 12, 15, 18, 20]: t1s[index].c2 = 'this is some modified text' for t2 in t1s[index].t2s: t2.c2 = 'this is some modified text' del t1s gc_collect() sess.close() del sess gc_collect() @profiling.profiled('noclose', report=True) def test_session_noclose(self): for x in range(0, ITERATIONS): sess = create_session() t1s = sess.query(T1).filter(T1.c1.between(15, 48)).all() for index in [2, 7, 12, 15, 18, 20]: t1s[index].t2s del sess gc_collect() SQLAlchemy-0.8.4/test/perf/stress_all.py0000644000076500000240000001602112251150016020662 0ustar classicstaff00000000000000# -*- encoding: utf8 -*- from datetime import * import decimal #from fastdec import mpd as Decimal from cPickle import dumps, loads #from sqlalchemy.dialects.postgresql.base import ARRAY from stresstest import * # --- test_types = False test_methods = True test_pickle = False test_orm = False # --- verbose = True def values_results(raw_results): return [tuple(r.values()) for r in raw_results] def getitem_str_results(raw_results): return [ (r['id'], r['field0'], r['field1'], r['field2'], r['field3'], r['field4'], r['field5'], r['field6'], r['field7'], r['field8'], r['field9']) for r in raw_results] def getitem_fallback_results(raw_results): return [ (r['ID'], r['FIELD0'], r['FIELD1'], r['FIELD2'], r['FIELD3'], r['FIELD4'], r['FIELD5'], r['FIELD6'], r['FIELD7'], r['FIELD8'], r['FIELD9']) for r in raw_results] def getitem_int_results(raw_results): return [ (r[0], r[1], r[2], r[3], r[4], r[5], r[6], r[7], r[8], r[9], r[10]) for r in raw_results] def getitem_long_results(raw_results): return [ (r[0L], r[1L], r[2L], r[3L], r[4L], r[5L], r[6L], r[7L], r[8L], r[9L], r[10L]) for r in raw_results] def getitem_obj_results(raw_results): c = test_table.c fid, f0, f1, f2, f3, f4, f5, f6, f7, f8, f9 = ( c.id, c.field0, c.field1, c.field2, c.field3, c.field4, c.field5, c.field6, c.field7, c.field8, c.field9) return [ (r[fid], r[f0], r[f1], r[f2], r[f3], r[f4], r[f5], r[f6], r[f7], r[f8], r[f9]) for r in raw_results] def slice_results(raw_results): return [row[0:6] + row[6:11] for row in raw_results] # ---------- # # Test types # # ---------- # # Array #def genarrayvalue(rnum, fnum): # return [fnum, fnum + 1, fnum + 2] #arraytest = (ARRAY(Integer), genarrayvalue, # dict(num_fields=100, num_records=1000, # engineurl='postgresql:///test')) # Boolean def genbooleanvalue(rnum, fnum): if rnum % 4: return bool(fnum % 2) else: return None booleantest = (Boolean, genbooleanvalue, dict(num_records=100000)) # Datetime def gendatetimevalue(rnum, fnum): return (rnum % 4) and datetime(2005, 3, 3) or None datetimetest = (DateTime, gendatetimevalue, dict(num_records=10000)) # Decimal def gendecimalvalue(rnum, fnum): if rnum % 4: return Decimal(str(0.25 * fnum)) else: return None decimaltest = (Numeric(10, 2), gendecimalvalue, dict(num_records=10000)) # Interval # no microseconds because Postgres does not seem to support it from_epoch = timedelta(14643, 70235) def genintervalvalue(rnum, fnum): return from_epoch intervaltest = (Interval, genintervalvalue, dict(num_fields=2, num_records=100000)) # PickleType def genpicklevalue(rnum, fnum): return (rnum % 4) and {'str': "value%d" % fnum, 'int': rnum} or None pickletypetest = (PickleType, genpicklevalue, dict(num_fields=1, num_records=100000)) # TypeDecorator class MyIntType(TypeDecorator): impl = Integer def process_bind_param(self, value, dialect): return value * 10 def process_result_value(self, value, dialect): return value / 10 def copy(self): return MyIntType() def genmyintvalue(rnum, fnum): return rnum + fnum typedecoratortest = (MyIntType, genmyintvalue, dict(num_records=100000)) # Unicode def genunicodevalue(rnum, fnum): return (rnum % 4) and (u"value%d" % fnum) or None unicodetest = (Unicode(20, ), genunicodevalue, dict(num_records=100000)) # dict(engineurl='mysql:///test', freshdata=False)) # do the tests if test_types: tests = [booleantest, datetimetest, decimaltest, intervaltest, pickletypetest, typedecoratortest, unicodetest] for engineurl in ('postgresql://scott:tiger@localhost/test', 'sqlite://', 'mysql://scott:tiger@localhost/test'): print "\n%s\n" % engineurl for datatype, genvalue, kwargs in tests: print "%s:" % getattr(datatype, '__name__', datatype.__class__.__name__), profile_and_time_dbfunc(iter_results, datatype, genvalue, profile=False, engineurl=engineurl, verbose=verbose, **kwargs) # ---------------------- # # test row proxy methods # # ---------------------- # if test_methods: methods = [iter_results, values_results, getattr_results, getitem_str_results, getitem_fallback_results, getitem_int_results, getitem_long_results, getitem_obj_results, slice_results] for engineurl in ('postgresql://scott:tiger@localhost/test', 'sqlite://', 'mysql://scott:tiger@localhost/test'): print "\n%s\n" % engineurl test_table = prepare(Unicode(20,), genunicodevalue, num_fields=10, num_records=100000, verbose=verbose, engineurl=engineurl) for method in methods: print "%s:" % method.__name__, time_dbfunc(test_table, method, genunicodevalue, num_fields=10, num_records=100000, profile=False, verbose=verbose) # -------------------------------- # test pickling Rowproxy instances # -------------------------------- def pickletofile_results(raw_results): from cPickle import dump, load for protocol in (0, 1, 2): print "dumping protocol %d..." % protocol f = file('noext.pickle%d' % protocol, 'wb') dump(raw_results, f, protocol) f.close() return raw_results def pickle_results(raw_results): return loads(dumps(raw_results, 2)) def pickle_meta(raw_results): pickled = dumps(raw_results[0]._parent, 2) metadata = loads(pickled) return raw_results def pickle_rows(raw_results): return [loads(dumps(row, 2)) for row in raw_results] if test_pickle: test_table = prepare(Unicode, genunicodevalue, num_fields=10, num_records=10000) funcs = [pickle_rows, pickle_results] for func in funcs: print "%s:" % func.__name__, time_dbfunc(test_table, func, genunicodevalue, num_records=10000, profile=False, verbose=verbose) # -------------------------------- # test ORM # -------------------------------- if test_orm: from sqlalchemy.orm import * class Test(object): pass Session = sessionmaker() session = Session() def get_results(): return session.query(Test).all() print "ORM:", for engineurl in ('postgresql:///test', 'sqlite://', 'mysql:///test'): print "\n%s\n" % engineurl profile_and_time_dbfunc(getattr_results, Unicode(20), genunicodevalue, class_=Test, getresults_func=get_results, engineurl=engineurl, #freshdata=False, num_records=10000, verbose=verbose) SQLAlchemy-0.8.4/test/perf/stresstest.py0000644000076500000240000001362512251150016020741 0ustar classicstaff00000000000000import gc import sys import timeit import cProfile from sqlalchemy import MetaData, Table, Column from sqlalchemy.types import * from sqlalchemy.orm import mapper, clear_mappers metadata = MetaData() def gen_table(num_fields, field_type, metadata): return Table('test', metadata, Column('id', Integer, primary_key=True), *[Column("field%d" % fnum, field_type) for fnum in range(num_fields)]) def insert(test_table, num_fields, num_records, genvalue, verbose=True): if verbose: print "building insert values...", sys.stdout.flush() values = [dict(("field%d" % fnum, genvalue(rnum, fnum)) for fnum in range(num_fields)) for rnum in range(num_records)] if verbose: print "inserting...", sys.stdout.flush() def db_insert(): test_table.insert().execute(values) sys.modules['__main__'].db_insert = db_insert timing = timeit.timeit("db_insert()", "from __main__ import db_insert", number=1) if verbose: print "%s" % round(timing, 3) def check_result(results, num_fields, genvalue, verbose=True): if verbose: print "checking...", sys.stdout.flush() for rnum, row in enumerate(results): expected = tuple([rnum + 1] + [genvalue(rnum, fnum) for fnum in range(num_fields)]) assert row == expected, "got: %s\nexpected: %s" % (row, expected) return True def avgdev(values, comparison): return sum(value - comparison for value in values) / len(values) def nicer_res(values, printvalues=False): if printvalues: print values min_time = min(values) return round(min_time, 3), round(avgdev(values, min_time), 2) def profile_func(func_name, verbose=True): if verbose: print "profiling...", sys.stdout.flush() cProfile.run('%s()' % func_name, 'prof') def time_func(func_name, num_tests=1, verbose=True): if verbose: print "timing...", sys.stdout.flush() timings = timeit.repeat('%s()' % func_name, "from __main__ import %s" % func_name, number=num_tests, repeat=5) avg, dev = nicer_res(timings) if verbose: print "%s (%s)" % (avg, dev) else: print avg def profile_and_time(func_name, num_tests=1): profile_func(func_name) time_func(func_name, num_tests) def iter_results(raw_results): return [tuple(row) for row in raw_results] def getattr_results(raw_results): return [ (r.id, r.field0, r.field1, r.field2, r.field3, r.field4, r.field5, r.field6, r.field7, r.field8, r.field9) for r in raw_results] def fetchall(test_table): def results(): return test_table.select().order_by(test_table.c.id).execute() \ .fetchall() return results def hashable_set(l): hashables = [] for o in l: try: hash(o) hashables.append(o) except: pass return set(hashables) def prepare(field_type, genvalue, engineurl='sqlite://', num_fields=10, num_records=1000, freshdata=True, verbose=True): global metadata metadata.clear() metadata.bind = engineurl test_table = gen_table(num_fields, field_type, metadata) if freshdata: metadata.drop_all() metadata.create_all() insert(test_table, num_fields, num_records, genvalue, verbose) return test_table def time_dbfunc(test_table, test_func, genvalue, class_=None, getresults_func=None, num_fields=10, num_records=1000, num_tests=1, check_results=check_result, profile=True, check_leaks=True, print_leaks=False, verbose=True): if verbose: print "testing '%s'..." % test_func.__name__, sys.stdout.flush() if class_ is not None: clear_mappers() mapper(class_, test_table) if getresults_func is None: getresults_func = fetchall(test_table) def test(): return test_func(getresults_func()) sys.modules['__main__'].test = test if check_leaks: gc.collect() objects_before = gc.get_objects() num_objects_before = len(objects_before) hashable_objects_before = hashable_set(objects_before) # gc.set_debug(gc.DEBUG_LEAK) if check_results: check_results(test(), num_fields, genvalue, verbose) if check_leaks: gc.collect() objects_after = gc.get_objects() num_objects_after = len(objects_after) num_leaks = num_objects_after - num_objects_before hashable_objects_after = hashable_set(objects_after) diff = hashable_objects_after - hashable_objects_before ldiff = len(diff) if print_leaks and ldiff < num_records: print "\n*** hashable objects leaked (%d) ***" % ldiff print '\n'.join(map(str, diff)) print "***\n" if num_leaks > num_records: print "(leaked: %d !)" % num_leaks, if profile: profile_func('test', verbose) time_func('test', num_tests, verbose) def profile_and_time_dbfunc(test_func, field_type, genvalue, class_=None, getresults_func=None, engineurl='sqlite://', freshdata=True, num_fields=10, num_records=1000, num_tests=1, check_results=check_result, profile=True, check_leaks=True, print_leaks=False, verbose=True): test_table = prepare(field_type, genvalue, engineurl, num_fields, num_records, freshdata, verbose) time_dbfunc(test_table, test_func, genvalue, class_, getresults_func, num_fields, num_records, num_tests, check_results, profile, check_leaks, print_leaks, verbose) SQLAlchemy-0.8.4/test/perf/threaded_compile.py0000644000076500000240000000334112251150016022000 0ustar classicstaff00000000000000"""test that mapper compilation is threadsafe, including when additional mappers are created while the existing collection is being compiled.""" from sqlalchemy import * from sqlalchemy.orm import * import thread, time from sqlalchemy.orm import mapperlib meta = MetaData('sqlite:///foo.db') t1 = Table('t1', meta, Column('c1', Integer, primary_key=True), Column('c2', String(30)) ) t2 = Table('t2', meta, Column('c1', Integer, primary_key=True), Column('c2', String(30)), Column('t1c1', None, ForeignKey('t1.c1')) ) t3 = Table('t3', meta, Column('c1', Integer, primary_key=True), Column('c2', String(30)), ) meta.create_all() class T1(object): pass class T2(object): pass class FakeLock(object): def acquire(self):pass def release(self):pass # uncomment this to disable the mutex in mapper compilation; # should produce thread collisions #mapperlib._COMPILE_MUTEX = FakeLock() def run1(): for i in range(50): print "T1", thread.get_ident() class_mapper(T1) time.sleep(.05) def run2(): for i in range(50): print "T2", thread.get_ident() class_mapper(T2) time.sleep(.057) def run3(): for i in range(50): def foo(): print "FOO", thread.get_ident() class Foo(object):pass mapper(Foo, t3) class_mapper(Foo).compile() foo() time.sleep(.05) mapper(T1, t1, properties={'t2':relationship(T2, backref="t1")}) mapper(T2, t2) print "START" for j in range(0, 5): thread.start_new_thread(run1, ()) thread.start_new_thread(run2, ()) thread.start_new_thread(run3, ()) thread.start_new_thread(run3, ()) thread.start_new_thread(run3, ()) print "WAIT" time.sleep(5) SQLAlchemy-0.8.4/test/requirements.py0000644000076500000240000005503712251150016020310 0ustar classicstaff00000000000000"""Requirements specific to SQLAlchemy's own unit tests. """ from sqlalchemy import util import sys from sqlalchemy.testing.requirements import SuiteRequirements from sqlalchemy.testing import exclusions from sqlalchemy.testing.exclusions import \ skip, \ skip_if,\ only_if,\ only_on,\ fails_on_everything_except,\ fails_if,\ succeeds_if,\ SpecPredicate,\ against def no_support(db, reason): return SpecPredicate(db, description=reason) def exclude(db, op, spec, description=None): return SpecPredicate(db, op, spec, description=description) class DefaultRequirements(SuiteRequirements): @property def deferrable_or_no_constraints(self): """Target database must support derferable constraints.""" return skip_if([ no_support('firebird', 'not supported by database'), no_support('mysql', 'not supported by database'), no_support('mssql', 'not supported by database'), ]) @property def named_constraints(self): """target database must support names for constraints.""" return skip_if([ no_support('sqlite', 'not supported by database'), ]) @property def foreign_keys(self): """Target database must support foreign keys.""" return skip_if( no_support('sqlite', 'not supported by database') ) @property def on_update_cascade(self): """target database must support ON UPDATE..CASCADE behavior in foreign keys.""" return skip_if( ['sqlite', 'oracle'], 'target backend does not support ON UPDATE CASCADE' ) @property def deferrable_fks(self): """target database must support deferrable fks""" return only_on(['oracle']) @property def unbounded_varchar(self): """Target database must support VARCHAR with no length""" return skip_if([ "firebird", "oracle", "mysql" ], "not supported by database" ) @property def boolean_col_expressions(self): """Target database must support boolean expressions as columns""" return skip_if([ no_support('firebird', 'not supported by database'), no_support('oracle', 'not supported by database'), no_support('mssql', 'not supported by database'), no_support('sybase', 'not supported by database'), no_support('maxdb', 'FIXME: verify not supported by database'), no_support('informix', 'not supported by database'), ]) @property def standalone_binds(self): """target database/driver supports bound parameters as column expressions without being in the context of a typed column. """ return skip_if(["firebird", "mssql+mxodbc"], "not supported by driver") @property def identity(self): """Target database must support GENERATED AS IDENTITY or a facsimile. Includes GENERATED AS IDENTITY, AUTOINCREMENT, AUTO_INCREMENT, or other column DDL feature that fills in a DB-generated identifier at INSERT-time without requiring pre-execution of a SEQUENCE or other artifact. """ return skip_if(["firebird", "oracle", "postgresql", "sybase"], "not supported by database" ) @property def reflectable_autoincrement(self): """Target database must support tables that can automatically generate PKs assuming they were reflected. this is essentially all the DBs in "identity" plus Postgresql, which has SERIAL support. FB and Oracle (and sybase?) require the Sequence to be explicitly added, including if the table was reflected. """ return skip_if(["firebird", "oracle", "sybase"], "not supported by database" ) @property def binary_comparisons(self): """target database/driver can allow BLOB/BINARY fields to be compared against a bound parameter value. """ return skip_if(["oracle", "mssql"], "not supported by database/driver" ) @property def independent_cursors(self): """Target must support simultaneous, independent database cursors on a single connection.""" return skip_if(["mssql+pyodbc", "mssql+mxodbc"], "no driver support") @property def independent_connections(self): """Target must support simultaneous, independent database connections.""" # This is also true of some configurations of UnixODBC and probably win32 # ODBC as well. return skip_if([ no_support("sqlite", "independent connections disabled " "when :memory: connections are used"), exclude("mssql", "<", (9, 0, 0), "SQL Server 2005+ is required for " "independent connections" ) ] ) @property def updateable_autoincrement_pks(self): """Target must support UPDATE on autoincrement/integer primary key.""" return skip_if(["mssql", "sybase"], "IDENTITY columns can't be updated") @property def isolation_level(self): return only_on( ('postgresql', 'sqlite', 'mysql'), "DBAPI has no isolation level support" ).fails_on('postgresql+pypostgresql', 'pypostgresql bombs on multiple isolation level calls') @property def row_triggers(self): """Target must support standard statement-running EACH ROW triggers.""" return skip_if([ # no access to same table no_support('mysql', 'requires SUPER priv'), exclude('mysql', '<', (5, 0, 10), 'not supported by database'), # huh? TODO: implement triggers for PG tests, remove this no_support('postgresql', 'PG triggers need to be implemented for tests'), ]) @property def correlated_outer_joins(self): """Target must support an outer join to a subquery which correlates to the parent.""" return skip_if("oracle", 'Raises "ORA-01799: a column may not be ' 'outer-joined to a subquery"') @property def update_from(self): """Target must support UPDATE..FROM syntax""" return only_on(['postgresql', 'mssql', 'mysql'], "Backend does not support UPDATE..FROM") @property def update_where_target_in_subquery(self): """Target must support UPDATE where the same table is present in a subquery in the WHERE clause. This is an ANSI-standard syntax that apparently MySQL can't handle, such as: UPDATE documents SET flag=1 WHERE documents.title IN (SELECT max(documents.title) AS title FROM documents GROUP BY documents.user_id ) """ return fails_if('mysql', 'MySQL error 1093 "Cant specify target table ' 'for update in FROM clause"') @property def savepoints(self): """Target database must support savepoints.""" return skip_if([ "sqlite", "sybase", ("mysql", "<", (5, 0, 3)), ("informix", "<", (11, 55, "xC3")) ], "savepoints not supported") @property def schemas(self): """Target database must support external schemas, and have one named 'test_schema'.""" return skip_if([ "sqlite", "firebird" ], "no schema support") @property def cross_schema_fk_reflection(self): """target system must support reflection of inter-schema foreign keys """ return only_on([ "postgresql" ]) @property def unique_constraint_reflection(self): return fails_on_everything_except( "postgresql", "mysql", "sqlite" ) @property def update_nowait(self): """Target database must support SELECT...FOR UPDATE NOWAIT""" return skip_if(["firebird", "mssql", "mysql", "sqlite", "sybase"], "no FOR UPDATE NOWAIT support" ) @property def subqueries(self): """Target database must support subqueries.""" return skip_if(exclude('mysql', '<', (4, 1, 1)), 'no subquery support') @property def mod_operator_as_percent_sign(self): """target database must use a plain percent '%' as the 'modulus' operator.""" return only_if( ['mysql', 'sqlite', 'postgresql+psycopg2', 'mssql'] ) @property def intersect(self): """Target database must support INTERSECT or equivalent.""" return fails_if([ "firebird", "mysql", "sybase", "informix" ], 'no support for INTERSECT') @property def except_(self): """Target database must support EXCEPT or equivalent (i.e. MINUS).""" return fails_if([ "firebird", "mysql", "sybase", "informix" ], 'no support for EXCEPT') @property def offset(self): """Target database must support some method of adding OFFSET or equivalent to a result set.""" return fails_if([ "sybase" ], 'no support for OFFSET or equivalent') @property def window_functions(self): return only_if([ "postgresql", "mssql", "oracle" ], "Backend does not support window functions") @property def two_phase_transactions(self): """Target database must support two-phase transactions.""" return skip_if([ no_support('firebird', 'no SA implementation'), no_support('maxdb', 'two-phase xact not supported by database'), no_support('mssql', 'two-phase xact not supported by drivers'), no_support('oracle', 'two-phase xact not implemented in SQLA/oracle'), no_support('drizzle', 'two-phase xact not supported by database'), no_support('sqlite', 'two-phase xact not supported by database'), no_support('sybase', 'two-phase xact not supported by drivers/SQLA'), no_support('postgresql+zxjdbc', 'FIXME: JDBC driver confuses the transaction state, may ' 'need separate XA implementation'), exclude('mysql', '<', (5, 0, 3), 'two-phase xact not supported by database'), ]) @property def views(self): """Target database must support VIEWs.""" return skip_if("drizzle", "no VIEW support") @property def empty_strings_varchar(self): """target database can persist/return an empty string with a varchar.""" return fails_if(["oracle"], 'oracle converts empty strings to a blank space') @property def empty_strings_text(self): """target database can persist/return an empty string with an unbounded text.""" return exclusions.open() @property def unicode_data(self): """target drive must support unicode data stored in columns.""" return skip_if([ no_support("sybase", "no unicode driver support") ]) @property def unicode_connections(self): """Target driver must support some encoding of Unicode across the wire.""" # TODO: expand to exclude MySQLdb versions w/ broken unicode return skip_if([ exclude('mysql', '<', (4, 1, 1), 'no unicode connection support'), ]) @property def unicode_ddl(self): """Target driver must support some degree of non-ascii symbol names.""" # TODO: expand to exclude MySQLdb versions w/ broken unicode return skip_if([ no_support('maxdb', 'database support flakey'), no_support('oracle', 'FIXME: no support in database?'), no_support('sybase', 'FIXME: guessing, needs confirmation'), no_support('mssql+pymssql', 'no FreeTDS support'), exclude('mysql', '<', (4, 1, 1), 'no unicode connection support'), ]) @property def sane_rowcount(self): return skip_if( lambda: not self.db.dialect.supports_sane_rowcount, "driver doesn't support 'sane' rowcount" ) @property def cextensions(self): return skip_if( lambda: not self._has_cextensions(), "C extensions not installed" ) @property def emulated_lastrowid(self): """"target dialect retrieves cursor.lastrowid or an equivalent after an insert() construct executes. """ return fails_on_everything_except('mysql+mysqldb', 'mysql+oursql', 'sqlite+pysqlite', 'mysql+pymysql', 'mysql+cymysql', 'sybase', 'mssql+pyodbc', 'mssql+mxodbc') @property def implements_get_lastrowid(self): return skip_if([ no_support('sybase', 'not supported by database'), ]) @property def dbapi_lastrowid(self): """"target backend includes a 'lastrowid' accessor on the DBAPI cursor object. """ return fails_on_everything_except('mysql+mysqldb', 'mysql+oursql', 'sqlite+pysqlite', 'mysql+pymysql', 'mysql+cymysql') @property def sane_multi_rowcount(self): return skip_if( lambda: not self.db.dialect.supports_sane_multi_rowcount, "driver doesn't support 'sane' multi row count" ) @property def nullsordering(self): """Target backends that support nulls ordering.""" return fails_on_everything_except('postgresql', 'oracle', 'firebird') @property def reflects_pk_names(self): """Target driver reflects the name of primary key constraints.""" return fails_on_everything_except('postgresql', 'oracle', 'mssql', 'sybase') @property def datetime(self): """target dialect supports representation of Python datetime.datetime() objects.""" return exclusions.open() @property def datetime_microseconds(self): """target dialect supports representation of Python datetime.datetime() with microsecond objects.""" return skip_if(['mssql', 'mysql', 'firebird', '+zxjdbc', 'oracle', 'sybase']) @property def datetime_historic(self): """target dialect supports representation of Python datetime.datetime() objects with historic (pre 1900) values.""" return succeeds_if(['sqlite', 'postgresql', 'firebird']) @property def date(self): """target dialect supports representation of Python datetime.date() objects.""" return exclusions.open() @property def date_historic(self): """target dialect supports representation of Python datetime.datetime() objects with historic (pre 1900) values.""" return succeeds_if(['sqlite', 'postgresql', 'firebird']) @property def time(self): """target dialect supports representation of Python datetime.time() objects.""" return skip_if(['oracle']) @property def time_microseconds(self): """target dialect supports representation of Python datetime.time() with microsecond objects.""" return skip_if(['mssql', 'mysql', 'firebird', '+zxjdbc', 'oracle', 'sybase']) @property def precision_numerics_general(self): """target backend has general support for moderately high-precision numerics.""" return fails_if('mssql+pymssql', 'FIXME: improve pymssql dec handling') @property def precision_numerics_enotation_small(self): """target backend supports Decimal() objects using E notation to represent very small values.""" return fails_if('mssql+pymssql', 'FIXME: improve pymssql dec handling') @property def precision_numerics_enotation_large(self): """target backend supports Decimal() objects using E notation to represent very large values.""" return fails_if( ("sybase+pyodbc", None, None, "Don't know how do get these values through FreeTDS + Sybase"), ("firebird", None, None, "Precision must be from 1 to 18"), ) @property def precision_numerics_many_significant_digits(self): """target backend supports values with many digits on both sides, such as 319438950232418390.273596, 87673.594069654243 """ return fails_if( [('sqlite', None, None, 'TODO'), ("firebird", None, None, "Precision must be from 1 to 18"), ("sybase+pysybase", None, None, "TODO"), ('mssql+pymssql', None, None, 'FIXME: improve pymssql dec handling')] ) @property def precision_numerics_retains_significant_digits(self): """A precision numeric type will return empty significant digits, i.e. a value such as 10.000 will come back in Decimal form with the .000 maintained.""" return fails_if( [ ('oracle', None, None, "this may be a bug due to the difficulty in handling " "oracle precision numerics"), ('postgresql+pg8000', None, None, "pg-8000 does native decimal but truncates the decimals."), ("firebird", None, None, "database and/or driver truncates decimal places.") ] ) @property def python2(self): return skip_if( lambda: sys.version_info >= (3,), "Python version 2.xx is required." ) @property def python3(self): return skip_if( lambda: sys.version_info < (3,), "Python version 3.xx is required." ) @property def python26(self): return skip_if( lambda: sys.version_info < (2, 6), "Python version 2.6 or greater is required" ) @property def python25(self): return skip_if( lambda: sys.version_info < (2, 5), "Python version 2.5 or greater is required" ) @property def cpython(self): return only_if(lambda: util.cpython, "cPython interpreter needed" ) @property def bulletproof_pickle(self): from sqlalchemy.util import pickle return only_if( lambda: pickle.__name__ == 'cPickle' and \ sys.version_info < (3, 0) and not util.pypy, "Needs Python 2.x cPickle" ) @property def predictable_gc(self): """target platform must remove all cycles unconditionally when gc.collect() is called, as well as clean out unreferenced subclasses. """ return self.cpython @property def hstore(self): def check_hstore(): if not against("postgresql"): return False try: self.db.execute("SELECT 'a=>1,a=>2'::hstore;") return True except: return False return only_if(check_hstore) @property def range_types(self): def check_range_types(): if not against("postgresql+psycopg2"): return False try: self.db.execute("select '[1,2)'::int4range;") # only supported in psycopg 2.5+ from psycopg2.extras import NumericRange return True except: return False return only_if(check_range_types) @property def sqlite(self): return skip_if(lambda: not self._has_sqlite()) @property def oracle_test_dblink(self): return skip_if( lambda: not self.config.file_config.has_option( 'sqla_testing', 'oracle_db_link'), "oracle_db_link option not specified in config" ) @property def ad_hoc_engines(self): """Test environment must allow ad-hoc engine/connection creation. DBs that scale poorly for many connections, even when closed, i.e. Oracle, may use the "--low-connections" option which flags this requirement as not present. """ return skip_if(lambda: self.config.options.low_connections) @property def skip_mysql_on_windows(self): """Catchall for a large variety of MySQL on Windows failures""" return skip_if(self._has_mysql_on_windows, "Not supported on MySQL + Windows" ) @property def threading_with_mock(self): """Mark tests that use threading and mock at the same time - stability issues have been observed with coverage + python 3.3 """ return skip_if( lambda: (util.py3k or not util.py26) and self.config.options.enable_plugin_coverage, "Stability issues with coverage + py3k, py2.5" ) @property def english_locale_on_postgresql(self): return skip_if(lambda: against('postgresql') \ and not self.db.scalar('SHOW LC_COLLATE').startswith('en')) @property def selectone(self): """target driver must support the literal statement 'select 1'""" return skip_if(["oracle", "firebird"], "non-standard SELECT scalar syntax") def _has_cextensions(self): try: from sqlalchemy import cresultproxy, cprocessors return True except ImportError: return False def _has_sqlite(self): from sqlalchemy import create_engine try: create_engine('sqlite://') return True except ImportError: return False def _has_mysql_on_windows(self): return against('mysql') and \ self.db.dialect._detect_casing(self.db) == 1 def _has_mysql_fully_case_sensitive(self): return against('mysql') and \ self.db.dialect._detect_casing(self.db) == 0 SQLAlchemy-0.8.4/test/sql/0000755000076500000240000000000012251151573016011 5ustar classicstaff00000000000000SQLAlchemy-0.8.4/test/sql/__init__.py0000644000076500000240000000000012251147172020110 0ustar classicstaff00000000000000SQLAlchemy-0.8.4/test/sql/test_case_statement.py0000644000076500000240000001231512251150016022412 0ustar classicstaff00000000000000from sqlalchemy.testing import assert_raises, assert_raises_message, eq_ import sys from sqlalchemy import * from sqlalchemy.testing import fixtures, AssertsCompiledSQL from sqlalchemy import testing from sqlalchemy import util, exc from sqlalchemy.sql import table, column class CaseTest(fixtures.TestBase, AssertsCompiledSQL): __dialect__ = 'default' @classmethod def setup_class(cls): metadata = MetaData(testing.db) global info_table info_table = Table('infos', metadata, Column('pk', Integer, primary_key=True), Column('info', String(30))) info_table.create() info_table.insert().execute( {'pk':1, 'info':'pk_1_data'}, {'pk':2, 'info':'pk_2_data'}, {'pk':3, 'info':'pk_3_data'}, {'pk':4, 'info':'pk_4_data'}, {'pk':5, 'info':'pk_5_data'}, {'pk':6, 'info':'pk_6_data'}) @classmethod def teardown_class(cls): info_table.drop() @testing.fails_on('firebird', 'FIXME: unknown') @testing.fails_on('maxdb', 'FIXME: unknown') @testing.requires.subqueries def test_case(self): inner = select([case([ [info_table.c.pk < 3, 'lessthan3'], [and_(info_table.c.pk >= 3, info_table.c.pk < 7), 'gt3']]).label('x'), info_table.c.pk, info_table.c.info], from_obj=[info_table]) inner_result = inner.execute().fetchall() # Outputs: # lessthan3 1 pk_1_data # lessthan3 2 pk_2_data # gt3 3 pk_3_data # gt3 4 pk_4_data # gt3 5 pk_5_data # gt3 6 pk_6_data assert inner_result == [ ('lessthan3', 1, 'pk_1_data'), ('lessthan3', 2, 'pk_2_data'), ('gt3', 3, 'pk_3_data'), ('gt3', 4, 'pk_4_data'), ('gt3', 5, 'pk_5_data'), ('gt3', 6, 'pk_6_data') ] outer = select([inner.alias('q_inner')]) outer_result = outer.execute().fetchall() assert outer_result == [ ('lessthan3', 1, 'pk_1_data'), ('lessthan3', 2, 'pk_2_data'), ('gt3', 3, 'pk_3_data'), ('gt3', 4, 'pk_4_data'), ('gt3', 5, 'pk_5_data'), ('gt3', 6, 'pk_6_data') ] w_else = select([case([ [info_table.c.pk < 3, 3], [and_(info_table.c.pk >= 3, info_table.c.pk < 6), 6]], else_ = 0).label('x'), info_table.c.pk, info_table.c.info], from_obj=[info_table]) else_result = w_else.execute().fetchall() assert else_result == [ (3, 1, 'pk_1_data'), (3, 2, 'pk_2_data'), (6, 3, 'pk_3_data'), (6, 4, 'pk_4_data'), (6, 5, 'pk_5_data'), (0, 6, 'pk_6_data') ] def test_literal_interpretation(self): t = table('test', column('col1')) assert_raises(exc.ArgumentError, case, [("x", "y")]) self.assert_compile(case([("x", "y")], value=t.c.col1), "CASE test.col1 WHEN :param_1 THEN :param_2 END") self.assert_compile(case([(t.c.col1 == 7, "y")], else_="z"), "CASE WHEN (test.col1 = :col1_1) THEN :param_1 ELSE :param_2 END") def test_text_doesnt_explode(self): for s in [ select([case([(info_table.c.info == 'pk_4_data', text("'yes'"))], else_=text("'no'" ))]).order_by(info_table.c.info), select([case([(info_table.c.info == 'pk_4_data', literal_column("'yes'"))], else_=literal_column("'no'" ))]).order_by(info_table.c.info), ]: if testing.against("firebird"): eq_(s.execute().fetchall(), [ ('no ', ), ('no ', ), ('no ', ), ('yes', ), ('no ', ), ('no ', ), ]) else: eq_(s.execute().fetchall(), [ ('no', ), ('no', ), ('no', ), ('yes', ), ('no', ), ('no', ), ]) @testing.fails_on('firebird', 'FIXME: unknown') @testing.fails_on('maxdb', 'FIXME: unknown') def testcase_with_dict(self): query = select([case({ info_table.c.pk < 3: 'lessthan3', info_table.c.pk >= 3: 'gt3', }, else_='other'), info_table.c.pk, info_table.c.info ], from_obj=[info_table]) assert query.execute().fetchall() == [ ('lessthan3', 1, 'pk_1_data'), ('lessthan3', 2, 'pk_2_data'), ('gt3', 3, 'pk_3_data'), ('gt3', 4, 'pk_4_data'), ('gt3', 5, 'pk_5_data'), ('gt3', 6, 'pk_6_data') ] simple_query = select([case({ 1: 'one', 2: 'two', }, value=info_table.c.pk, else_='other'), info_table.c.pk ], whereclause=info_table.c.pk < 4, from_obj=[info_table]) assert simple_query.execute().fetchall() == [ ('one', 1), ('two', 2), ('other', 3), ] SQLAlchemy-0.8.4/test/sql/test_compiler.py0000644000076500000240000040166012251150016021232 0ustar classicstaff00000000000000#! coding:utf-8 """ compiler tests. These tests are among the very first that were written when SQLAlchemy began in 2005. As a result the testing style here is very dense; it's an ongoing job to break these into much smaller tests with correct pep8 styling and coherent test organization. """ from sqlalchemy.testing import eq_, is_, assert_raises, assert_raises_message from sqlalchemy import testing from sqlalchemy.testing import fixtures, AssertsCompiledSQL from sqlalchemy import Integer, String, MetaData, Table, Column, select, \ func, not_, cast, text, tuple_, exists, update, bindparam,\ literal, and_, null, type_coerce, alias, or_, literal_column,\ Float, TIMESTAMP, Numeric, Date, Text, collate, union, except_,\ intersect, union_all, Boolean, distinct, join, outerjoin, asc, desc,\ over, subquery, case import decimal from sqlalchemy import exc, sql, util, types, schema from sqlalchemy.sql import table, column, label from sqlalchemy.sql.expression import ClauseList, _literal_as_text, HasPrefixes from sqlalchemy.engine import default from sqlalchemy.dialects import mysql, mssql, postgresql, oracle, \ sqlite, sybase from sqlalchemy.ext.compiler import compiles table1 = table('mytable', column('myid', Integer), column('name', String), column('description', String), ) table2 = table( 'myothertable', column('otherid', Integer), column('othername', String), ) table3 = table( 'thirdtable', column('userid', Integer), column('otherstuff', String), ) metadata = MetaData() # table with a schema table4 = Table( 'remotetable', metadata, Column('rem_id', Integer, primary_key=True), Column('datatype_id', Integer), Column('value', String(20)), schema='remote_owner' ) # table with a 'multipart' schema table5 = Table( 'remotetable', metadata, Column('rem_id', Integer, primary_key=True), Column('datatype_id', Integer), Column('value', String(20)), schema='dbo.remote_owner' ) users = table('users', column('user_id'), column('user_name'), column('password'), ) addresses = table('addresses', column('address_id'), column('user_id'), column('street'), column('city'), column('state'), column('zip') ) keyed = Table('keyed', metadata, Column('x', Integer, key='colx'), Column('y', Integer, key='coly'), Column('z', Integer), ) class SelectTest(fixtures.TestBase, AssertsCompiledSQL): __dialect__ = 'default' def test_attribute_sanity(self): assert hasattr(table1, 'c') assert hasattr(table1.select(), 'c') assert not hasattr(table1.c.myid.self_group(), 'columns') assert hasattr(table1.select().self_group(), 'columns') assert not hasattr(table1.c.myid, 'columns') assert not hasattr(table1.c.myid, 'c') assert not hasattr(table1.select().c.myid, 'c') assert not hasattr(table1.select().c.myid, 'columns') assert not hasattr(table1.alias().c.myid, 'columns') assert not hasattr(table1.alias().c.myid, 'c') if util.compat.py32: assert_raises_message( exc.InvalidRequestError, 'Scalar Select expression has no ' 'columns; use this object directly within a ' 'column-level expression.', lambda: hasattr( select([table1.c.myid]).as_scalar().self_group(), 'columns')) assert_raises_message( exc.InvalidRequestError, 'Scalar Select expression has no ' 'columns; use this object directly within a ' 'column-level expression.', lambda: hasattr(select([table1.c.myid]).as_scalar(), 'columns')) else: assert not hasattr( select([table1.c.myid]).as_scalar().self_group(), 'columns') assert not hasattr(select([table1.c.myid]).as_scalar(), 'columns') def test_prefix_constructor(self): class Pref(HasPrefixes): def _generate(self): return self assert_raises(exc.ArgumentError, Pref().prefix_with, "some prefix", not_a_dialect=True ) def test_table_select(self): self.assert_compile(table1.select(), "SELECT mytable.myid, mytable.name, " "mytable.description FROM mytable") self.assert_compile(select([table1, table2]), "SELECT mytable.myid, mytable.name, mytable.description, " "myothertable.otherid, myothertable.othername FROM mytable, " "myothertable") def test_invalid_col_argument(self): assert_raises(exc.ArgumentError, select, table1) assert_raises(exc.ArgumentError, select, table1.c.myid) def test_int_limit_offset_coercion(self): for given, exp in [ ("5", 5), (5, 5), (5.2, 5), (decimal.Decimal("5"), 5), (None, None), ]: eq_(select().limit(given)._limit, exp) eq_(select().offset(given)._offset, exp) eq_(select(limit=given)._limit, exp) eq_(select(offset=given)._offset, exp) assert_raises(ValueError, select().limit, "foo") assert_raises(ValueError, select().offset, "foo") assert_raises(ValueError, select, offset="foo") assert_raises(ValueError, select, limit="foo") def test_limit_offset(self): for lim, offset, exp, params in [ (5, 10, "LIMIT :param_1 OFFSET :param_2", {'param_1':5, 'param_2':10}), (None, 10, "LIMIT -1 OFFSET :param_1", {'param_1':10}), (5, None, "LIMIT :param_1", {'param_1':5}), (0, 0, "LIMIT :param_1 OFFSET :param_2", {'param_1':0, 'param_2':0}), ]: self.assert_compile( select([1]).limit(lim).offset(offset), "SELECT 1 " + exp, checkparams=params ) def test_from_subquery(self): """tests placing select statements in the column clause of another select, for the purposes of selecting from the exported columns of that select.""" s = select([table1], table1.c.name == 'jack') self.assert_compile( select( [s], s.c.myid == 7 ), "SELECT myid, name, description FROM (SELECT mytable.myid AS myid, " "mytable.name AS name, mytable.description AS description " "FROM mytable " "WHERE mytable.name = :name_1) WHERE myid = :myid_1") sq = select([table1]) self.assert_compile( sq.select(), "SELECT myid, name, description FROM " "(SELECT mytable.myid AS myid, " "mytable.name AS name, mytable.description " "AS description FROM mytable)" ) sq = select( [table1], ).alias('sq') self.assert_compile( sq.select(sq.c.myid == 7), "SELECT sq.myid, sq.name, sq.description FROM " "(SELECT mytable.myid AS myid, mytable.name AS name, " "mytable.description AS description FROM mytable) AS sq " "WHERE sq.myid = :myid_1" ) sq = select( [table1, table2], and_(table1.c.myid == 7, table2.c.otherid == table1.c.myid), use_labels=True ).alias('sq') sqstring = "SELECT mytable.myid AS mytable_myid, mytable.name AS "\ "mytable_name, mytable.description AS mytable_description, "\ "myothertable.otherid AS myothertable_otherid, "\ "myothertable.othername AS myothertable_othername FROM "\ "mytable, myothertable WHERE mytable.myid = :myid_1 AND "\ "myothertable.otherid = mytable.myid" self.assert_compile( sq.select(), "SELECT sq.mytable_myid, sq.mytable_name, " "sq.mytable_description, sq.myothertable_otherid, " "sq.myothertable_othername FROM (%s) AS sq" % sqstring) sq2 = select( [sq], use_labels=True ).alias('sq2') self.assert_compile( sq2.select(), "SELECT sq2.sq_mytable_myid, sq2.sq_mytable_name, " "sq2.sq_mytable_description, sq2.sq_myothertable_otherid, " "sq2.sq_myothertable_othername FROM " "(SELECT sq.mytable_myid AS " "sq_mytable_myid, sq.mytable_name AS sq_mytable_name, " "sq.mytable_description AS sq_mytable_description, " "sq.myothertable_otherid AS sq_myothertable_otherid, " "sq.myothertable_othername AS sq_myothertable_othername " "FROM (%s) AS sq) AS sq2" % sqstring) def test_select_from_clauselist(self): self.assert_compile( select([ClauseList(column('a'), column('b'))] ).select_from('sometable'), 'SELECT a, b FROM sometable' ) def test_use_labels(self): self.assert_compile( select([table1.c.myid == 5], use_labels=True), "SELECT mytable.myid = :myid_1 AS anon_1 FROM mytable" ) self.assert_compile( select([func.foo()], use_labels=True), "SELECT foo() AS foo_1" ) self.assert_compile( select([not_(True)], use_labels=True), "SELECT NOT :param_1" ) self.assert_compile( select([cast("data", Integer)], use_labels=True), "SELECT CAST(:param_1 AS INTEGER) AS anon_1" ) self.assert_compile( select([func.sum( func.lala(table1.c.myid).label('foo')).label('bar')]), "SELECT sum(lala(mytable.myid)) AS bar FROM mytable" ) self.assert_compile( select([keyed]), "SELECT keyed.x, keyed.y" ", keyed.z FROM keyed" ) self.assert_compile( select([keyed]).apply_labels(), "SELECT keyed.x AS keyed_x, keyed.y AS " "keyed_y, keyed.z AS keyed_z FROM keyed" ) def test_paramstyles(self): stmt = text("select :foo, :bar, :bat from sometable") self.assert_compile( stmt, "select ?, ?, ? from sometable", dialect=default.DefaultDialect(paramstyle='qmark') ) self.assert_compile( stmt, "select :foo, :bar, :bat from sometable", dialect=default.DefaultDialect(paramstyle='named') ) self.assert_compile( stmt, "select %s, %s, %s from sometable", dialect=default.DefaultDialect(paramstyle='format') ) self.assert_compile( stmt, "select :1, :2, :3 from sometable", dialect=default.DefaultDialect(paramstyle='numeric') ) self.assert_compile( stmt, "select %(foo)s, %(bar)s, %(bat)s from sometable", dialect=default.DefaultDialect(paramstyle='pyformat') ) def test_dupe_columns(self): """test that deduping is performed against clause element identity, not rendered result.""" self.assert_compile( select([column('a'), column('a'), column('a')]), "SELECT a, a, a", dialect=default.DefaultDialect() ) c = column('a') self.assert_compile( select([c, c, c]), "SELECT a", dialect=default.DefaultDialect() ) a, b = column('a'), column('b') self.assert_compile( select([a, b, b, b, a, a]), "SELECT a, b", dialect=default.DefaultDialect() ) # using alternate keys. a, b, c = Column('a', Integer, key='b'), \ Column('b', Integer), \ Column('c', Integer, key='a') self.assert_compile( select([a, b, c, a, b, c]), "SELECT a, b, c", dialect=default.DefaultDialect() ) self.assert_compile( select([bindparam('a'), bindparam('b'), bindparam('c')]), "SELECT :a AS anon_1, :b AS anon_2, :c AS anon_3", dialect=default.DefaultDialect(paramstyle='named') ) self.assert_compile( select([bindparam('a'), bindparam('b'), bindparam('c')]), "SELECT ? AS anon_1, ? AS anon_2, ? AS anon_3", dialect=default.DefaultDialect(paramstyle='qmark'), ) self.assert_compile( select(["a", "a", "a"]), "SELECT a, a, a" ) s = select([bindparam('a'), bindparam('b'), bindparam('c')]) s = s.compile(dialect=default.DefaultDialect(paramstyle='qmark')) eq_(s.positiontup, ['a', 'b', 'c']) def test_nested_label_targeting(self): """test nested anonymous label generation. """ s1 = table1.select() s2 = s1.alias() s3 = select([s2], use_labels=True) s4 = s3.alias() s5 = select([s4], use_labels=True) self.assert_compile(s5, 'SELECT anon_1.anon_2_myid AS ' 'anon_1_anon_2_myid, anon_1.anon_2_name AS ' 'anon_1_anon_2_name, anon_1.anon_2_descript' 'ion AS anon_1_anon_2_description FROM ' '(SELECT anon_2.myid AS anon_2_myid, ' 'anon_2.name AS anon_2_name, ' 'anon_2.description AS anon_2_description ' 'FROM (SELECT mytable.myid AS myid, ' 'mytable.name AS name, mytable.description ' 'AS description FROM mytable) AS anon_2) ' 'AS anon_1') def test_nested_label_targeting_keyed(self): s1 = keyed.select() s2 = s1.alias() s3 = select([s2], use_labels=True) self.assert_compile(s3, "SELECT anon_1.x AS anon_1_x, " "anon_1.y AS anon_1_y, " "anon_1.z AS anon_1_z FROM " "(SELECT keyed.x AS x, keyed.y " "AS y, keyed.z AS z FROM keyed) AS anon_1") s4 = s3.alias() s5 = select([s4], use_labels=True) self.assert_compile(s5, "SELECT anon_1.anon_2_x AS anon_1_anon_2_x, " "anon_1.anon_2_y AS anon_1_anon_2_y, " "anon_1.anon_2_z AS anon_1_anon_2_z " "FROM (SELECT anon_2.x AS anon_2_x, " "anon_2.y AS anon_2_y, " "anon_2.z AS anon_2_z FROM " "(SELECT keyed.x AS x, keyed.y AS y, keyed.z " "AS z FROM keyed) AS anon_2) AS anon_1" ) def test_exists(self): s = select([table1.c.myid]).where(table1.c.myid == 5) self.assert_compile(exists(s), "EXISTS (SELECT mytable.myid FROM mytable " "WHERE mytable.myid = :myid_1)" ) self.assert_compile(exists(s.as_scalar()), "EXISTS (SELECT mytable.myid FROM mytable " "WHERE mytable.myid = :myid_1)" ) self.assert_compile(exists([table1.c.myid], table1.c.myid == 5).select(), 'SELECT EXISTS (SELECT mytable.myid FROM ' 'mytable WHERE mytable.myid = :myid_1)', params={'mytable_myid': 5}) self.assert_compile(select([table1, exists([1], from_obj=table2)]), 'SELECT mytable.myid, mytable.name, ' 'mytable.description, EXISTS (SELECT 1 ' 'FROM myothertable) FROM mytable', params={}) self.assert_compile(select([table1, exists([1], from_obj=table2).label('foo')]), 'SELECT mytable.myid, mytable.name, ' 'mytable.description, EXISTS (SELECT 1 ' 'FROM myothertable) AS foo FROM mytable', params={}) self.assert_compile(table1.select(exists().where(table2.c.otherid == table1.c.myid).correlate(table1)), 'SELECT mytable.myid, mytable.name, ' 'mytable.description FROM mytable WHERE ' 'EXISTS (SELECT * FROM myothertable WHERE ' 'myothertable.otherid = mytable.myid)') self.assert_compile(table1.select(exists().where(table2.c.otherid == table1.c.myid).correlate(table1)), 'SELECT mytable.myid, mytable.name, ' 'mytable.description FROM mytable WHERE ' 'EXISTS (SELECT * FROM myothertable WHERE ' 'myothertable.otherid = mytable.myid)') self.assert_compile(table1.select(exists().where(table2.c.otherid == table1.c.myid).correlate(table1) ).replace_selectable(table2, table2.alias()), 'SELECT mytable.myid, mytable.name, ' 'mytable.description FROM mytable WHERE ' 'EXISTS (SELECT * FROM myothertable AS ' 'myothertable_1 WHERE myothertable_1.otheri' 'd = mytable.myid)') self.assert_compile(table1.select(exists().where(table2.c.otherid == table1.c.myid).correlate(table1)).select_from( table1.join(table2, table1.c.myid == table2.c.otherid)).replace_selectable(table2, table2.alias()), 'SELECT mytable.myid, mytable.name, ' 'mytable.description FROM mytable JOIN ' 'myothertable AS myothertable_1 ON ' 'mytable.myid = myothertable_1.otherid ' 'WHERE EXISTS (SELECT * FROM myothertable ' 'AS myothertable_1 WHERE ' 'myothertable_1.otherid = mytable.myid)') self.assert_compile( select([ or_( exists().where(table2.c.otherid == 'foo'), exists().where(table2.c.otherid == 'bar') ) ]), "SELECT (EXISTS (SELECT * FROM myothertable " "WHERE myothertable.otherid = :otherid_1)) " "OR (EXISTS (SELECT * FROM myothertable WHERE " "myothertable.otherid = :otherid_2)) AS anon_1" ) def test_where_subquery(self): s = select([addresses.c.street], addresses.c.user_id == users.c.user_id, correlate=True).alias('s') # don't correlate in a FROM list self.assert_compile(select([users, s.c.street], from_obj=s), "SELECT users.user_id, users.user_name, " "users.password, s.street FROM users, " "(SELECT addresses.street AS street FROM " "addresses, users WHERE addresses.user_id = " "users.user_id) AS s") self.assert_compile(table1.select(table1.c.myid == select([table1.c.myid], table1.c.name == 'jack')), 'SELECT mytable.myid, mytable.name, ' 'mytable.description FROM mytable WHERE ' 'mytable.myid = (SELECT mytable.myid FROM ' 'mytable WHERE mytable.name = :name_1)') self.assert_compile(table1.select(table1.c.myid == select([table2.c.otherid], table1.c.name == table2.c.othername)), 'SELECT mytable.myid, mytable.name, ' 'mytable.description FROM mytable WHERE ' 'mytable.myid = (SELECT ' 'myothertable.otherid FROM myothertable ' 'WHERE mytable.name = myothertable.othernam' 'e)') self.assert_compile(table1.select(exists([1], table2.c.otherid == table1.c.myid)), 'SELECT mytable.myid, mytable.name, ' 'mytable.description FROM mytable WHERE ' 'EXISTS (SELECT 1 FROM myothertable WHERE ' 'myothertable.otherid = mytable.myid)') talias = table1.alias('ta') s = subquery('sq2', [talias], exists([1], table2.c.otherid == talias.c.myid)) self.assert_compile(select([s, table1]), 'SELECT sq2.myid, sq2.name, ' 'sq2.description, mytable.myid, ' 'mytable.name, mytable.description FROM ' '(SELECT ta.myid AS myid, ta.name AS name, ' 'ta.description AS description FROM ' 'mytable AS ta WHERE EXISTS (SELECT 1 FROM ' 'myothertable WHERE myothertable.otherid = ' 'ta.myid)) AS sq2, mytable') # test constructing the outer query via append_column(), which # occurs in the ORM's Query object s = select([], exists([1], table2.c.otherid == table1.c.myid), from_obj=table1) s.append_column(table1) self.assert_compile(s, 'SELECT mytable.myid, mytable.name, ' 'mytable.description FROM mytable WHERE ' 'EXISTS (SELECT 1 FROM myothertable WHERE ' 'myothertable.otherid = mytable.myid)') def test_orderby_subquery(self): self.assert_compile(table1.select(order_by=[select([table2.c.otherid], table1.c.myid == table2.c.otherid)]), 'SELECT mytable.myid, mytable.name, ' 'mytable.description FROM mytable ORDER BY ' '(SELECT myothertable.otherid FROM ' 'myothertable WHERE mytable.myid = ' 'myothertable.otherid)') self.assert_compile(table1.select(order_by=[ desc(select([table2.c.otherid], table1.c.myid == table2.c.otherid))]), 'SELECT mytable.myid, mytable.name, ' 'mytable.description FROM mytable ORDER BY ' '(SELECT myothertable.otherid FROM ' 'myothertable WHERE mytable.myid = ' 'myothertable.otherid) DESC') def test_scalar_select(self): assert_raises_message( exc.InvalidRequestError, r"Select objects don't have a type\. Call as_scalar\(\) " "on this Select object to return a 'scalar' " "version of this Select\.", func.coalesce, select([table1.c.myid]) ) s = select([table1.c.myid], correlate=False).as_scalar() self.assert_compile(select([table1, s]), 'SELECT mytable.myid, mytable.name, ' 'mytable.description, (SELECT mytable.myid ' 'FROM mytable) AS anon_1 FROM mytable') s = select([table1.c.myid]).as_scalar() self.assert_compile(select([table2, s]), 'SELECT myothertable.otherid, ' 'myothertable.othername, (SELECT ' 'mytable.myid FROM mytable) AS anon_1 FROM ' 'myothertable') s = select([table1.c.myid]).correlate(None).as_scalar() self.assert_compile(select([table1, s]), 'SELECT mytable.myid, mytable.name, ' 'mytable.description, (SELECT mytable.myid ' 'FROM mytable) AS anon_1 FROM mytable') s = select([table1.c.myid]).as_scalar() s2 = s.where(table1.c.myid == 5) self.assert_compile( s2, "(SELECT mytable.myid FROM mytable WHERE mytable.myid = :myid_1)" ) self.assert_compile( s, "(SELECT mytable.myid FROM mytable)" ) # test that aliases use as_scalar() when used in an explicitly # scalar context s = select([table1.c.myid]).alias() self.assert_compile(select([table1.c.myid]).where(table1.c.myid == s), 'SELECT mytable.myid FROM mytable WHERE ' 'mytable.myid = (SELECT mytable.myid FROM ' 'mytable)') self.assert_compile(select([table1.c.myid]).where(s > table1.c.myid), 'SELECT mytable.myid FROM mytable WHERE ' 'mytable.myid < (SELECT mytable.myid FROM ' 'mytable)') s = select([table1.c.myid]).as_scalar() self.assert_compile(select([table2, s]), 'SELECT myothertable.otherid, ' 'myothertable.othername, (SELECT ' 'mytable.myid FROM mytable) AS anon_1 FROM ' 'myothertable') # test expressions against scalar selects self.assert_compile(select([s - literal(8)]), 'SELECT (SELECT mytable.myid FROM mytable) ' '- :param_1 AS anon_1') self.assert_compile(select([select([table1.c.name]).as_scalar() + literal('x')]), 'SELECT (SELECT mytable.name FROM mytable) ' '|| :param_1 AS anon_1') self.assert_compile(select([s > literal(8)]), 'SELECT (SELECT mytable.myid FROM mytable) ' '> :param_1 AS anon_1') self.assert_compile(select([select([table1.c.name]).label('foo' )]), 'SELECT (SELECT mytable.name FROM mytable) ' 'AS foo') # scalar selects should not have any attributes on their 'c' or # 'columns' attribute s = select([table1.c.myid]).as_scalar() try: s.c.foo except exc.InvalidRequestError, err: assert str(err) \ == 'Scalar Select expression has no columns; use this '\ 'object directly within a column-level expression.' try: s.columns.foo except exc.InvalidRequestError, err: assert str(err) \ == 'Scalar Select expression has no columns; use this '\ 'object directly within a column-level expression.' zips = table('zips', column('zipcode'), column('latitude'), column('longitude'), ) places = table('places', column('id'), column('nm') ) zip = '12345' qlat = select([zips.c.latitude], zips.c.zipcode == zip).\ correlate(None).as_scalar() qlng = select([zips.c.longitude], zips.c.zipcode == zip).\ correlate(None).as_scalar() q = select([places.c.id, places.c.nm, zips.c.zipcode, func.latlondist(qlat, qlng).label('dist')], zips.c.zipcode == zip, order_by=['dist', places.c.nm] ) self.assert_compile(q, 'SELECT places.id, places.nm, ' 'zips.zipcode, latlondist((SELECT ' 'zips.latitude FROM zips WHERE ' 'zips.zipcode = :zipcode_1), (SELECT ' 'zips.longitude FROM zips WHERE ' 'zips.zipcode = :zipcode_2)) AS dist FROM ' 'places, zips WHERE zips.zipcode = ' ':zipcode_3 ORDER BY dist, places.nm') zalias = zips.alias('main_zip') qlat = select([zips.c.latitude], zips.c.zipcode == zalias.c.zipcode).\ as_scalar() qlng = select([zips.c.longitude], zips.c.zipcode == zalias.c.zipcode).\ as_scalar() q = select([places.c.id, places.c.nm, zalias.c.zipcode, func.latlondist(qlat, qlng).label('dist')], order_by=['dist', places.c.nm]) self.assert_compile(q, 'SELECT places.id, places.nm, ' 'main_zip.zipcode, latlondist((SELECT ' 'zips.latitude FROM zips WHERE ' 'zips.zipcode = main_zip.zipcode), (SELECT ' 'zips.longitude FROM zips WHERE ' 'zips.zipcode = main_zip.zipcode)) AS dist ' 'FROM places, zips AS main_zip ORDER BY ' 'dist, places.nm') a1 = table2.alias('t2alias') s1 = select([a1.c.otherid], table1.c.myid == a1.c.otherid).as_scalar() j1 = table1.join(table2, table1.c.myid == table2.c.otherid) s2 = select([table1, s1], from_obj=j1) self.assert_compile(s2, 'SELECT mytable.myid, mytable.name, ' 'mytable.description, (SELECT ' 't2alias.otherid FROM myothertable AS ' 't2alias WHERE mytable.myid = ' 't2alias.otherid) AS anon_1 FROM mytable ' 'JOIN myothertable ON mytable.myid = ' 'myothertable.otherid') def test_label_comparison(self): x = func.lala(table1.c.myid).label('foo') self.assert_compile(select([x], x == 5), 'SELECT lala(mytable.myid) AS foo FROM ' 'mytable WHERE lala(mytable.myid) = ' ':param_1') self.assert_compile( label('bar', column('foo', type_=String)) + 'foo', 'foo || :param_1') def test_conjunctions(self): a, b, c = 'a', 'b', 'c' x = and_(a, b, c) assert isinstance(x.type, Boolean) assert str(x) == 'a AND b AND c' self.assert_compile( select([x.label('foo')]), 'SELECT a AND b AND c AS foo' ) self.assert_compile( and_(table1.c.myid == 12, table1.c.name == 'asdf', table2.c.othername == 'foo', "sysdate() = today()"), "mytable.myid = :myid_1 AND mytable.name = :name_1 "\ "AND myothertable.othername = " ":othername_1 AND sysdate() = today()" ) self.assert_compile( and_( table1.c.myid == 12, or_(table2.c.othername == 'asdf', table2.c.othername == 'foo', table2.c.otherid == 9), "sysdate() = today()", ), 'mytable.myid = :myid_1 AND (myothertable.othername = ' ':othername_1 OR myothertable.othername = :othername_2 OR ' 'myothertable.otherid = :otherid_1) AND sysdate() = ' 'today()', checkparams={'othername_1': 'asdf', 'othername_2': 'foo', 'otherid_1': 9, 'myid_1': 12} ) def test_nested_conjunctions_short_circuit(self): """test that empty or_(), and_() conjunctions are collapsed by an enclosing conjunction.""" t = table('t', column('x')) self.assert_compile( select([t]).where(and_(t.c.x == 5, or_(and_(or_(t.c.x == 7))))), "SELECT t.x FROM t WHERE t.x = :x_1 AND t.x = :x_2" ) self.assert_compile( select([t]).where(and_(or_(t.c.x == 12, and_(or_(t.c.x == 8))))), "SELECT t.x FROM t WHERE t.x = :x_1 OR t.x = :x_2" ) self.assert_compile( select([t]).where(and_(or_(or_(t.c.x == 12), and_(or_(), or_(and_(t.c.x == 8)), and_())))), "SELECT t.x FROM t WHERE t.x = :x_1 OR t.x = :x_2" ) def test_distinct(self): self.assert_compile( select([table1.c.myid.distinct()]), "SELECT DISTINCT mytable.myid FROM mytable" ) self.assert_compile( select([distinct(table1.c.myid)]), "SELECT DISTINCT mytable.myid FROM mytable" ) self.assert_compile( select([table1.c.myid]).distinct(), "SELECT DISTINCT mytable.myid FROM mytable" ) self.assert_compile( select([func.count(table1.c.myid.distinct())]), "SELECT count(DISTINCT mytable.myid) AS count_1 FROM mytable" ) self.assert_compile( select([func.count(distinct(table1.c.myid))]), "SELECT count(DISTINCT mytable.myid) AS count_1 FROM mytable" ) def test_multiple_col_binds(self): self.assert_compile( select(["*"], or_(table1.c.myid == 12, table1.c.myid == 'asdf', table1.c.myid == 'foo')), "SELECT * FROM mytable WHERE mytable.myid = :myid_1 " "OR mytable.myid = :myid_2 OR mytable.myid = :myid_3" ) def test_order_by_nulls(self): self.assert_compile( table2.select(order_by=[table2.c.otherid, table2.c.othername.desc().nullsfirst()]), "SELECT myothertable.otherid, myothertable.othername FROM " "myothertable ORDER BY myothertable.otherid, " "myothertable.othername DESC NULLS FIRST" ) self.assert_compile( table2.select(order_by=[ table2.c.otherid, table2.c.othername.desc().nullslast()]), "SELECT myothertable.otherid, myothertable.othername FROM " "myothertable ORDER BY myothertable.otherid, " "myothertable.othername DESC NULLS LAST" ) self.assert_compile( table2.select(order_by=[ table2.c.otherid.nullslast(), table2.c.othername.desc().nullsfirst()]), "SELECT myothertable.otherid, myothertable.othername FROM " "myothertable ORDER BY myothertable.otherid NULLS LAST, " "myothertable.othername DESC NULLS FIRST" ) self.assert_compile( table2.select(order_by=[table2.c.otherid.nullsfirst(), table2.c.othername.desc()]), "SELECT myothertable.otherid, myothertable.othername FROM " "myothertable ORDER BY myothertable.otherid NULLS FIRST, " "myothertable.othername DESC" ) self.assert_compile( table2.select(order_by=[table2.c.otherid.nullsfirst(), table2.c.othername.desc().nullslast()]), "SELECT myothertable.otherid, myothertable.othername FROM " "myothertable ORDER BY myothertable.otherid NULLS FIRST, " "myothertable.othername DESC NULLS LAST" ) def test_orderby_groupby(self): self.assert_compile( table2.select(order_by=[table2.c.otherid, asc(table2.c.othername)]), "SELECT myothertable.otherid, myothertable.othername FROM " "myothertable ORDER BY myothertable.otherid, " "myothertable.othername ASC" ) self.assert_compile( table2.select(order_by=[table2.c.otherid, table2.c.othername.desc()]), "SELECT myothertable.otherid, myothertable.othername FROM " "myothertable ORDER BY myothertable.otherid, " "myothertable.othername DESC" ) # generative order_by self.assert_compile( table2.select().order_by(table2.c.otherid).\ order_by(table2.c.othername.desc()), "SELECT myothertable.otherid, myothertable.othername FROM " "myothertable ORDER BY myothertable.otherid, " "myothertable.othername DESC" ) self.assert_compile( table2.select().order_by(table2.c.otherid). order_by(table2.c.othername.desc() ).order_by(None), "SELECT myothertable.otherid, myothertable.othername " "FROM myothertable" ) self.assert_compile( select( [table2.c.othername, func.count(table2.c.otherid)], group_by=[table2.c.othername]), "SELECT myothertable.othername, " "count(myothertable.otherid) AS count_1 " "FROM myothertable GROUP BY myothertable.othername" ) # generative group by self.assert_compile( select([table2.c.othername, func.count(table2.c.otherid)]). group_by(table2.c.othername), "SELECT myothertable.othername, " "count(myothertable.otherid) AS count_1 " "FROM myothertable GROUP BY myothertable.othername" ) self.assert_compile( select([table2.c.othername, func.count(table2.c.otherid)]). group_by(table2.c.othername).group_by(None), "SELECT myothertable.othername, " "count(myothertable.otherid) AS count_1 " "FROM myothertable" ) self.assert_compile( select([table2.c.othername, func.count(table2.c.otherid)], group_by=[table2.c.othername], order_by=[table2.c.othername]), "SELECT myothertable.othername, " "count(myothertable.otherid) AS count_1 " "FROM myothertable " "GROUP BY myothertable.othername ORDER BY myothertable.othername" ) def test_for_update(self): self.assert_compile( table1.select(table1.c.myid == 7, for_update=True), "SELECT mytable.myid, mytable.name, mytable.description " "FROM mytable WHERE mytable.myid = :myid_1 FOR UPDATE") self.assert_compile( table1.select(table1.c.myid == 7, for_update=False), "SELECT mytable.myid, mytable.name, mytable.description " "FROM mytable WHERE mytable.myid = :myid_1") # not supported by dialect, should just use update self.assert_compile( table1.select(table1.c.myid == 7, for_update='nowait'), "SELECT mytable.myid, mytable.name, mytable.description " "FROM mytable WHERE mytable.myid = :myid_1 FOR UPDATE") # unknown lock mode self.assert_compile( table1.select(table1.c.myid == 7, for_update='unknown_mode'), "SELECT mytable.myid, mytable.name, mytable.description " "FROM mytable WHERE mytable.myid = :myid_1 FOR UPDATE") # ----- mysql self.assert_compile( table1.select(table1.c.myid == 7, for_update=True), "SELECT mytable.myid, mytable.name, mytable.description " "FROM mytable WHERE mytable.myid = %s FOR UPDATE", dialect=mysql.dialect()) self.assert_compile( table1.select(table1.c.myid == 7, for_update="read"), "SELECT mytable.myid, mytable.name, mytable.description " "FROM mytable WHERE mytable.myid = %s LOCK IN SHARE MODE", dialect=mysql.dialect()) # ----- oracle self.assert_compile( table1.select(table1.c.myid == 7, for_update=True), "SELECT mytable.myid, mytable.name, mytable.description " "FROM mytable WHERE mytable.myid = :myid_1 FOR UPDATE", dialect=oracle.dialect()) self.assert_compile( table1.select(table1.c.myid == 7, for_update="nowait"), "SELECT mytable.myid, mytable.name, mytable.description " "FROM mytable WHERE mytable.myid = :myid_1 FOR UPDATE NOWAIT", dialect=oracle.dialect()) # ----- postgresql self.assert_compile( table1.select(table1.c.myid == 7, for_update=True), "SELECT mytable.myid, mytable.name, mytable.description " "FROM mytable WHERE mytable.myid = %(myid_1)s FOR UPDATE", dialect=postgresql.dialect()) self.assert_compile( table1.select(table1.c.myid == 7, for_update="nowait"), "SELECT mytable.myid, mytable.name, mytable.description " "FROM mytable WHERE mytable.myid = %(myid_1)s FOR UPDATE NOWAIT", dialect=postgresql.dialect()) self.assert_compile( table1.select(table1.c.myid == 7, for_update="read"), "SELECT mytable.myid, mytable.name, mytable.description " "FROM mytable WHERE mytable.myid = %(myid_1)s FOR SHARE", dialect=postgresql.dialect()) self.assert_compile( table1.select(table1.c.myid == 7, for_update="read_nowait"), "SELECT mytable.myid, mytable.name, mytable.description " "FROM mytable WHERE mytable.myid = %(myid_1)s FOR SHARE NOWAIT", dialect=postgresql.dialect()) def test_alias(self): # test the alias for a table1. column names stay the same, # table name "changes" to "foo". self.assert_compile( select([table1.alias('foo')]), "SELECT foo.myid, foo.name, foo.description FROM mytable AS foo") for dialect in (oracle.dialect(),): self.assert_compile( select([table1.alias('foo')]), "SELECT foo.myid, foo.name, foo.description FROM mytable foo", dialect=dialect) self.assert_compile( select([table1.alias()]), "SELECT mytable_1.myid, mytable_1.name, mytable_1.description " "FROM mytable AS mytable_1") # create a select for a join of two tables. use_labels # means the column names will have labels tablename_columnname, # which become the column keys accessible off the Selectable object. # also, only use one column from the second table and all columns # from the first table1. q = select( [table1, table2.c.otherid], table1.c.myid == table2.c.otherid, use_labels=True ) # make an alias of the "selectable". column names # stay the same (i.e. the labels), table name "changes" to "t2view". a = alias(q, 't2view') # select from that alias, also using labels. two levels of labels # should produce two underscores. # also, reference the column "mytable_myid" off of the t2view alias. self.assert_compile( a.select(a.c.mytable_myid == 9, use_labels=True), "SELECT t2view.mytable_myid AS t2view_mytable_myid, " "t2view.mytable_name " "AS t2view_mytable_name, " "t2view.mytable_description AS t2view_mytable_description, " "t2view.myothertable_otherid AS t2view_myothertable_otherid FROM " "(SELECT mytable.myid AS mytable_myid, " "mytable.name AS mytable_name, " "mytable.description AS mytable_description, " "myothertable.otherid AS " "myothertable_otherid FROM mytable, myothertable " "WHERE mytable.myid = " "myothertable.otherid) AS t2view " "WHERE t2view.mytable_myid = :mytable_myid_1" ) def test_prefix(self): self.assert_compile( table1.select().prefix_with("SQL_CALC_FOUND_ROWS").\ prefix_with("SQL_SOME_WEIRD_MYSQL_THING"), "SELECT SQL_CALC_FOUND_ROWS SQL_SOME_WEIRD_MYSQL_THING " "mytable.myid, mytable.name, mytable.description FROM mytable" ) def test_prefix_dialect_specific(self): self.assert_compile( table1.select().prefix_with("SQL_CALC_FOUND_ROWS", dialect='sqlite').\ prefix_with("SQL_SOME_WEIRD_MYSQL_THING", dialect='mysql'), "SELECT SQL_SOME_WEIRD_MYSQL_THING " "mytable.myid, mytable.name, mytable.description FROM mytable", dialect=mysql.dialect() ) def test_text(self): self.assert_compile( text("select * from foo where lala = bar"), "select * from foo where lala = bar" ) # test bytestring self.assert_compile(select( ["foobar(a)", "pk_foo_bar(syslaal)"], "a = 12", from_obj=["foobar left outer join lala on foobar.foo = lala.foo"] ), "SELECT foobar(a), pk_foo_bar(syslaal) FROM foobar " "left outer join lala on foobar.foo = lala.foo WHERE a = 12" ) # test unicode self.assert_compile(select( [u"foobar(a)", u"pk_foo_bar(syslaal)"], u"a = 12", from_obj=[u"foobar left outer join lala on foobar.foo = lala.foo"] ), "SELECT foobar(a), pk_foo_bar(syslaal) FROM foobar " "left outer join lala on foobar.foo = lala.foo WHERE a = 12" ) # test building a select query programmatically with text s = select() s.append_column("column1") s.append_column("column2") s.append_whereclause("column1=12") s.append_whereclause("column2=19") s = s.order_by("column1") s.append_from("table1") self.assert_compile(s, "SELECT column1, column2 FROM table1 WHERE " "column1=12 AND column2=19 ORDER BY column1") self.assert_compile( select(["column1", "column2"], from_obj=table1).alias('somealias').select(), "SELECT somealias.column1, somealias.column2 FROM " "(SELECT column1, column2 FROM mytable) AS somealias" ) # test that use_labels doesnt interfere with literal columns self.assert_compile( select(["column1", "column2", table1.c.myid], from_obj=table1, use_labels=True), "SELECT column1, column2, mytable.myid AS mytable_myid " "FROM mytable" ) # test that use_labels doesnt interfere # with literal columns that have textual labels self.assert_compile( select(["column1 AS foobar", "column2 AS hoho", table1.c.myid], from_obj=table1, use_labels=True), "SELECT column1 AS foobar, column2 AS hoho, " "mytable.myid AS mytable_myid FROM mytable" ) # test that "auto-labeling of subquery columns" # doesnt interfere with literal columns, # exported columns dont get quoted self.assert_compile( select(["column1 AS foobar", "column2 AS hoho", table1.c.myid], from_obj=[table1]).select(), "SELECT column1 AS foobar, column2 AS hoho, myid FROM " "(SELECT column1 AS foobar, column2 AS hoho, " "mytable.myid AS myid FROM mytable)" ) self.assert_compile( select(['col1', 'col2'], from_obj='tablename').alias('myalias'), "SELECT col1, col2 FROM tablename" ) def test_binds_in_text(self): self.assert_compile( text("select * from foo where lala=:bar and hoho=:whee", bindparams=[bindparam('bar', 4), bindparam('whee', 7)]), "select * from foo where lala=:bar and hoho=:whee", checkparams={'bar': 4, 'whee': 7}, ) self.assert_compile( text("select * from foo where clock='05:06:07'"), "select * from foo where clock='05:06:07'", checkparams={}, params={}, ) dialect = postgresql.dialect() self.assert_compile( text("select * from foo where lala=:bar and hoho=:whee", bindparams=[bindparam('bar', 4), bindparam('whee', 7)]), "select * from foo where lala=%(bar)s and hoho=%(whee)s", checkparams={'bar': 4, 'whee': 7}, dialect=dialect ) # test escaping out text() params with a backslash self.assert_compile( text("select * from foo where clock='05:06:07' " "and mork='\:mindy'"), "select * from foo where clock='05:06:07' and mork=':mindy'", checkparams={}, params={}, dialect=dialect ) dialect = sqlite.dialect() self.assert_compile( text("select * from foo where lala=:bar and hoho=:whee", bindparams=[bindparam('bar', 4), bindparam('whee', 7)]), "select * from foo where lala=? and hoho=?", checkparams={'bar': 4, 'whee': 7}, dialect=dialect ) self.assert_compile(select( [table1, table2.c.otherid, "sysdate()", "foo, bar, lala"], and_( "foo.id = foofoo(lala)", "datetime(foo) = Today", table1.c.myid == table2.c.otherid, ) ), "SELECT mytable.myid, mytable.name, mytable.description, " "myothertable.otherid, sysdate(), foo, bar, lala " "FROM mytable, myothertable WHERE foo.id = foofoo(lala) AND " "datetime(foo) = Today AND mytable.myid = myothertable.otherid") self.assert_compile(select( [alias(table1, 't'), "foo.f"], "foo.f = t.id", from_obj=["(select f from bar where lala=heyhey) foo"] ), "SELECT t.myid, t.name, t.description, foo.f FROM mytable AS t, " "(select f from bar where lala=heyhey) foo WHERE foo.f = t.id") # test Text embedded within select_from(), using binds generate_series = text( "generate_series(:x, :y, :z) as s(a)", bindparams=[bindparam('x', None), bindparam('y', None), bindparam('z', None)] ) s = select([ (func.current_date() + literal_column("s.a")).label("dates") ]).select_from(generate_series) self.assert_compile( s, "SELECT CURRENT_DATE + s.a AS dates FROM " "generate_series(:x, :y, :z) as s(a)", checkparams={'y': None, 'x': None, 'z': None} ) self.assert_compile( s.params(x=5, y=6, z=7), "SELECT CURRENT_DATE + s.a AS dates FROM " "generate_series(:x, :y, :z) as s(a)", checkparams={'y': 6, 'x': 5, 'z': 7} ) @testing.emits_warning('.*empty sequence.*') def test_render_binds_as_literal(self): """test a compiler that renders binds inline into SQL in the columns clause.""" dialect = default.DefaultDialect() class Compiler(dialect.statement_compiler): ansi_bind_rules = True dialect.statement_compiler = Compiler self.assert_compile( select([literal("someliteral")]), "SELECT 'someliteral' AS anon_1", dialect=dialect ) self.assert_compile( select([table1.c.myid + 3]), "SELECT mytable.myid + 3 AS anon_1 FROM mytable", dialect=dialect ) self.assert_compile( select([table1.c.myid.in_([4, 5, 6])]), "SELECT mytable.myid IN (4, 5, 6) AS anon_1 FROM mytable", dialect=dialect ) self.assert_compile( select([func.mod(table1.c.myid, 5)]), "SELECT mod(mytable.myid, 5) AS mod_1 FROM mytable", dialect=dialect ) self.assert_compile( select([literal("foo").in_([])]), "SELECT 'foo' != 'foo' AS anon_1", dialect=dialect ) assert_raises( exc.CompileError, bindparam("foo").in_([]).compile, dialect=dialect ) def test_literal(self): self.assert_compile(select([literal('foo')]), "SELECT :param_1 AS anon_1") self.assert_compile(select([literal("foo") + literal("bar")], from_obj=[table1]), "SELECT :param_1 || :param_2 AS anon_1 FROM mytable") def test_calculated_columns(self): value_tbl = table('values', column('id', Integer), column('val1', Float), column('val2', Float), ) self.assert_compile( select([value_tbl.c.id, (value_tbl.c.val2 - value_tbl.c.val1) / value_tbl.c.val1]), "SELECT values.id, (values.val2 - values.val1) " "/ values.val1 AS anon_1 FROM values" ) self.assert_compile( select([value_tbl.c.id], (value_tbl.c.val2 - value_tbl.c.val1) / value_tbl.c.val1 > 2.0), "SELECT values.id FROM values WHERE " "(values.val2 - values.val1) / values.val1 > :param_1" ) self.assert_compile( select([value_tbl.c.id], value_tbl.c.val1 / (value_tbl.c.val2 - value_tbl.c.val1) / value_tbl.c.val1 > 2.0), "SELECT values.id FROM values WHERE " "(values.val1 / (values.val2 - values.val1)) " "/ values.val1 > :param_1" ) def test_collate(self): for expr in (select([table1.c.name.collate('latin1_german2_ci')]), select([collate(table1.c.name, 'latin1_german2_ci')])): self.assert_compile( expr, "SELECT mytable.name COLLATE latin1_german2_ci " "AS anon_1 FROM mytable") assert table1.c.name.collate('latin1_german2_ci').type is \ table1.c.name.type expr = select([table1.c.name.collate('latin1_german2_ci').\ label('k1')]).order_by('k1') self.assert_compile(expr, "SELECT mytable.name " "COLLATE latin1_german2_ci AS k1 FROM mytable ORDER BY k1") expr = select([collate('foo', 'latin1_german2_ci').label('k1')]) self.assert_compile(expr, "SELECT :param_1 COLLATE latin1_german2_ci AS k1") expr = select([table1.c.name.collate('latin1_german2_ci').like('%x%')]) self.assert_compile(expr, "SELECT mytable.name COLLATE latin1_german2_ci " "LIKE :param_1 AS anon_1 FROM mytable") expr = select([table1.c.name.like(collate('%x%', 'latin1_german2_ci'))]) self.assert_compile(expr, "SELECT mytable.name " "LIKE :param_1 COLLATE latin1_german2_ci AS anon_1 " "FROM mytable") expr = select([table1.c.name.collate('col1').like( collate('%x%', 'col2'))]) self.assert_compile(expr, "SELECT mytable.name COLLATE col1 " "LIKE :param_1 COLLATE col2 AS anon_1 " "FROM mytable") expr = select([func.concat('a', 'b').\ collate('latin1_german2_ci').label('x')]) self.assert_compile(expr, "SELECT concat(:param_1, :param_2) " "COLLATE latin1_german2_ci AS x") expr = select([table1.c.name]).\ order_by(table1.c.name.collate('latin1_german2_ci')) self.assert_compile(expr, "SELECT mytable.name FROM mytable ORDER BY " "mytable.name COLLATE latin1_german2_ci") def test_percent_chars(self): t = table("table%name", column("percent%"), column("%(oneofthese)s"), column("spaces % more spaces"), ) self.assert_compile( t.select(use_labels=True), '''SELECT "table%name"."percent%" AS "table%name_percent%", '''\ '''"table%name"."%(oneofthese)s" AS '''\ '''"table%name_%(oneofthese)s", '''\ '''"table%name"."spaces % more spaces" AS '''\ '''"table%name_spaces % '''\ '''more spaces" FROM "table%name"''' ) def test_joins(self): self.assert_compile( join(table2, table1, table1.c.myid == table2.c.otherid).select(), "SELECT myothertable.otherid, myothertable.othername, " "mytable.myid, mytable.name, mytable.description FROM " "myothertable JOIN mytable ON mytable.myid = myothertable.otherid" ) self.assert_compile( select( [table1], from_obj=[join(table1, table2, table1.c.myid == table2.c.otherid)] ), "SELECT mytable.myid, mytable.name, mytable.description FROM " "mytable JOIN myothertable ON mytable.myid = myothertable.otherid") self.assert_compile( select( [join(join(table1, table2, table1.c.myid == table2.c.otherid), table3, table1.c.myid == table3.c.userid)] ), "SELECT mytable.myid, mytable.name, mytable.description, " "myothertable.otherid, myothertable.othername, " "thirdtable.userid, " "thirdtable.otherstuff FROM mytable JOIN myothertable " "ON mytable.myid =" " myothertable.otherid JOIN thirdtable ON " "mytable.myid = thirdtable.userid" ) self.assert_compile( join(users, addresses, users.c.user_id == addresses.c.user_id).select(), "SELECT users.user_id, users.user_name, users.password, " "addresses.address_id, addresses.user_id, addresses.street, " "addresses.city, addresses.state, addresses.zip " "FROM users JOIN addresses " "ON users.user_id = addresses.user_id" ) self.assert_compile( select([table1, table2, table3], from_obj=[join(table1, table2, table1.c.myid == table2.c.otherid). outerjoin(table3, table1.c.myid == table3.c.userid)] ), "SELECT mytable.myid, mytable.name, mytable.description, " "myothertable.otherid, myothertable.othername, " "thirdtable.userid," " thirdtable.otherstuff FROM mytable " "JOIN myothertable ON mytable.myid " "= myothertable.otherid LEFT OUTER JOIN thirdtable " "ON mytable.myid =" " thirdtable.userid" ) self.assert_compile( select([table1, table2, table3], from_obj=[outerjoin(table1, join(table2, table3, table2.c.otherid == table3.c.userid), table1.c.myid == table2.c.otherid)] ), "SELECT mytable.myid, mytable.name, mytable.description, " "myothertable.otherid, myothertable.othername, " "thirdtable.userid," " thirdtable.otherstuff FROM mytable LEFT OUTER JOIN " "(myothertable " "JOIN thirdtable ON myothertable.otherid = " "thirdtable.userid) ON " "mytable.myid = myothertable.otherid" ) query = select( [table1, table2], or_( table1.c.name == 'fred', table1.c.myid == 10, table2.c.othername != 'jack', "EXISTS (select yay from foo where boo = lar)" ), from_obj=[outerjoin(table1, table2, table1.c.myid == table2.c.otherid)] ) self.assert_compile(query, "SELECT mytable.myid, mytable.name, mytable.description, " "myothertable.otherid, myothertable.othername " "FROM mytable LEFT OUTER JOIN myothertable ON mytable.myid = " "myothertable.otherid WHERE mytable.name = :name_1 OR " "mytable.myid = :myid_1 OR myothertable.othername != :othername_1 " "OR EXISTS (select yay from foo where boo = lar)", ) def test_compound_selects(self): assert_raises_message( exc.ArgumentError, "All selectables passed to CompoundSelect " "must have identical numbers of columns; " "select #1 has 2 columns, select #2 has 3", union, table3.select(), table1.select() ) x = union( select([table1], table1.c.myid == 5), select([table1], table1.c.myid == 12), order_by=[table1.c.myid], ) self.assert_compile(x, "SELECT mytable.myid, mytable.name, " "mytable.description " "FROM mytable WHERE " "mytable.myid = :myid_1 UNION " "SELECT mytable.myid, mytable.name, mytable.description " "FROM mytable WHERE mytable.myid = :myid_2 " "ORDER BY mytable.myid") x = union( select([table1]), select([table1]) ) x = union(x, select([table1])) self.assert_compile(x, "(SELECT mytable.myid, mytable.name, mytable.description " "FROM mytable UNION SELECT mytable.myid, mytable.name, " "mytable.description FROM mytable) UNION SELECT mytable.myid," " mytable.name, mytable.description FROM mytable") u1 = union( select([table1.c.myid, table1.c.name]), select([table2]), select([table3]) ) self.assert_compile(u1, "SELECT mytable.myid, mytable.name " "FROM mytable UNION SELECT myothertable.otherid, " "myothertable.othername FROM myothertable " "UNION SELECT thirdtable.userid, thirdtable.otherstuff " "FROM thirdtable") assert u1.corresponding_column(table2.c.otherid) is u1.c.myid self.assert_compile( union( select([table1.c.myid, table1.c.name]), select([table2]), order_by=['myid'], offset=10, limit=5 ), "SELECT mytable.myid, mytable.name " "FROM mytable UNION SELECT myothertable.otherid, " "myothertable.othername " "FROM myothertable ORDER BY myid LIMIT :param_1 OFFSET :param_2", {'param_1': 5, 'param_2': 10} ) self.assert_compile( union( select([table1.c.myid, table1.c.name, func.max(table1.c.description)], table1.c.name == 'name2', group_by=[table1.c.myid, table1.c.name]), table1.select(table1.c.name == 'name1') ), "SELECT mytable.myid, mytable.name, " "max(mytable.description) AS max_1 " "FROM mytable WHERE mytable.name = :name_1 " "GROUP BY mytable.myid, " "mytable.name UNION SELECT mytable.myid, mytable.name, " "mytable.description " "FROM mytable WHERE mytable.name = :name_2" ) self.assert_compile( union( select([literal(100).label('value')]), select([literal(200).label('value')]) ), "SELECT :param_1 AS value UNION SELECT :param_2 AS value" ) self.assert_compile( union_all( select([table1.c.myid]), union( select([table2.c.otherid]), select([table3.c.userid]), ) ), "SELECT mytable.myid FROM mytable UNION ALL " "(SELECT myothertable.otherid FROM myothertable UNION " "SELECT thirdtable.userid FROM thirdtable)" ) s = select([column('foo'), column('bar')]) # ORDER BY's even though not supported by # all DB's, are rendered if requested self.assert_compile(union(s.order_by("foo"), s.order_by("bar")), "SELECT foo, bar ORDER BY foo UNION SELECT foo, bar ORDER BY bar" ) # self_group() is honored self.assert_compile( union(s.order_by("foo").self_group(), s.order_by("bar").limit(10).self_group()), "(SELECT foo, bar ORDER BY foo) UNION (SELECT foo, " "bar ORDER BY bar LIMIT :param_1)", {'param_1': 10} ) def test_compound_grouping(self): s = select([column('foo'), column('bar')]).select_from('bat') self.assert_compile( union(union(union(s, s), s), s), "((SELECT foo, bar FROM bat UNION SELECT foo, bar FROM bat) " "UNION SELECT foo, bar FROM bat) UNION SELECT foo, bar FROM bat" ) self.assert_compile( union(s, s, s, s), "SELECT foo, bar FROM bat UNION SELECT foo, bar " "FROM bat UNION SELECT foo, bar FROM bat " "UNION SELECT foo, bar FROM bat" ) self.assert_compile( union(s, union(s, union(s, s))), "SELECT foo, bar FROM bat UNION (SELECT foo, bar FROM bat " "UNION (SELECT foo, bar FROM bat " "UNION SELECT foo, bar FROM bat))" ) self.assert_compile( select([s.alias()]), 'SELECT anon_1.foo, anon_1.bar FROM ' '(SELECT foo, bar FROM bat) AS anon_1' ) self.assert_compile( select([union(s, s).alias()]), 'SELECT anon_1.foo, anon_1.bar FROM ' '(SELECT foo, bar FROM bat UNION ' 'SELECT foo, bar FROM bat) AS anon_1' ) self.assert_compile( select([except_(s, s).alias()]), 'SELECT anon_1.foo, anon_1.bar FROM ' '(SELECT foo, bar FROM bat EXCEPT ' 'SELECT foo, bar FROM bat) AS anon_1' ) # this query sqlite specifically chokes on self.assert_compile( union( except_(s, s), s ), "(SELECT foo, bar FROM bat EXCEPT SELECT foo, bar FROM bat) " "UNION SELECT foo, bar FROM bat" ) self.assert_compile( union( s, except_(s, s), ), "SELECT foo, bar FROM bat " "UNION (SELECT foo, bar FROM bat EXCEPT SELECT foo, bar FROM bat)" ) # this solves it self.assert_compile( union( except_(s, s).alias().select(), s ), "SELECT anon_1.foo, anon_1.bar FROM " "(SELECT foo, bar FROM bat EXCEPT " "SELECT foo, bar FROM bat) AS anon_1 " "UNION SELECT foo, bar FROM bat" ) self.assert_compile( except_( union(s, s), union(s, s) ), "(SELECT foo, bar FROM bat UNION SELECT foo, bar FROM bat) " "EXCEPT (SELECT foo, bar FROM bat UNION SELECT foo, bar FROM bat)" ) s2 = union(s, s) s3 = union(s2, s2) self.assert_compile(s3, "(SELECT foo, bar FROM bat " "UNION SELECT foo, bar FROM bat) " "UNION (SELECT foo, bar FROM bat " "UNION SELECT foo, bar FROM bat)") self.assert_compile( union( intersect(s, s), intersect(s, s) ), "(SELECT foo, bar FROM bat INTERSECT SELECT foo, bar FROM bat) " "UNION (SELECT foo, bar FROM bat INTERSECT " "SELECT foo, bar FROM bat)" ) def test_binds(self): for ( stmt, expected_named_stmt, expected_positional_stmt, expected_default_params_dict, expected_default_params_list, test_param_dict, expected_test_params_dict, expected_test_params_list ) in [ ( select( [table1, table2], and_( table1.c.myid == table2.c.otherid, table1.c.name == bindparam('mytablename') )), "SELECT mytable.myid, mytable.name, mytable.description, " "myothertable.otherid, myothertable.othername FROM mytable, " "myothertable WHERE mytable.myid = myothertable.otherid " "AND mytable.name = :mytablename", "SELECT mytable.myid, mytable.name, mytable.description, " "myothertable.otherid, myothertable.othername FROM mytable, " "myothertable WHERE mytable.myid = myothertable.otherid AND " "mytable.name = ?", {'mytablename':None}, [None], {'mytablename':5}, {'mytablename':5}, [5] ), ( select([table1], or_(table1.c.myid == bindparam('myid'), table2.c.otherid == bindparam('myid'))), "SELECT mytable.myid, mytable.name, mytable.description " "FROM mytable, myothertable WHERE mytable.myid = :myid " "OR myothertable.otherid = :myid", "SELECT mytable.myid, mytable.name, mytable.description " "FROM mytable, myothertable WHERE mytable.myid = ? " "OR myothertable.otherid = ?", {'myid': None}, [None, None], {'myid': 5}, {'myid': 5}, [5, 5] ), ( text("SELECT mytable.myid, mytable.name, mytable.description FROM " "mytable, myothertable WHERE mytable.myid = :myid OR " "myothertable.otherid = :myid"), "SELECT mytable.myid, mytable.name, mytable.description FROM " "mytable, myothertable WHERE mytable.myid = :myid OR " "myothertable.otherid = :myid", "SELECT mytable.myid, mytable.name, mytable.description FROM " "mytable, myothertable WHERE mytable.myid = ? OR " "myothertable.otherid = ?", {'myid':None}, [None, None], {'myid': 5}, {'myid': 5}, [5, 5] ), ( select([table1], or_(table1.c.myid == bindparam('myid', unique=True), table2.c.otherid == bindparam('myid', unique=True))), "SELECT mytable.myid, mytable.name, mytable.description FROM " "mytable, myothertable WHERE mytable.myid = " ":myid_1 OR myothertable.otherid = :myid_2", "SELECT mytable.myid, mytable.name, mytable.description FROM " "mytable, myothertable WHERE mytable.myid = ? " "OR myothertable.otherid = ?", {'myid_1':None, 'myid_2':None}, [None, None], {'myid_1': 5, 'myid_2': 6}, {'myid_1': 5, 'myid_2': 6}, [5, 6] ), ( bindparam('test', type_=String, required=False) + text("'hi'"), ":test || 'hi'", "? || 'hi'", {'test':None}, [None], {}, {'test':None}, [None] ), ( # testing select.params() here - bindparam() objects # must get required flag set to False select([table1], or_(table1.c.myid == bindparam('myid'), table2.c.otherid == bindparam('myotherid'))).\ params({'myid':8, 'myotherid':7}), "SELECT mytable.myid, mytable.name, mytable.description FROM " "mytable, myothertable WHERE mytable.myid = " ":myid OR myothertable.otherid = :myotherid", "SELECT mytable.myid, mytable.name, mytable.description FROM " "mytable, myothertable WHERE mytable.myid = " "? OR myothertable.otherid = ?", {'myid': 8, 'myotherid': 7}, [8, 7], {'myid': 5}, {'myid': 5, 'myotherid': 7}, [5, 7] ), ( select([table1], or_(table1.c.myid == bindparam('myid', value=7, unique=True), table2.c.otherid == bindparam('myid', value=8, unique=True))), "SELECT mytable.myid, mytable.name, mytable.description FROM " "mytable, myothertable WHERE mytable.myid = " ":myid_1 OR myothertable.otherid = :myid_2", "SELECT mytable.myid, mytable.name, mytable.description FROM " "mytable, myothertable WHERE mytable.myid = " "? OR myothertable.otherid = ?", {'myid_1': 7, 'myid_2': 8}, [7, 8], {'myid_1': 5, 'myid_2': 6}, {'myid_1': 5, 'myid_2': 6}, [5, 6] ), ]: self.assert_compile(stmt, expected_named_stmt, params=expected_default_params_dict) self.assert_compile(stmt, expected_positional_stmt, dialect=sqlite.dialect()) nonpositional = stmt.compile() positional = stmt.compile(dialect=sqlite.dialect()) pp = positional.params eq_([pp[k] for k in positional.positiontup], expected_default_params_list) eq_(nonpositional.construct_params(test_param_dict), expected_test_params_dict) pp = positional.construct_params(test_param_dict) eq_( [pp[k] for k in positional.positiontup], expected_test_params_list ) # check that params() doesnt modify original statement s = select([table1], or_(table1.c.myid == bindparam('myid'), table2.c.otherid == bindparam('myotherid'))) s2 = s.params({'myid': 8, 'myotherid': 7}) s3 = s2.params({'myid': 9}) assert s.compile().params == {'myid': None, 'myotherid': None} assert s2.compile().params == {'myid': 8, 'myotherid': 7} assert s3.compile().params == {'myid': 9, 'myotherid': 7} # test using same 'unique' param object twice in one compile s = select([table1.c.myid]).where(table1.c.myid == 12).as_scalar() s2 = select([table1, s], table1.c.myid == s) self.assert_compile(s2, "SELECT mytable.myid, mytable.name, mytable.description, " "(SELECT mytable.myid FROM mytable WHERE mytable.myid = "\ ":myid_1) AS anon_1 FROM mytable WHERE mytable.myid = " "(SELECT mytable.myid FROM mytable WHERE mytable.myid = :myid_1)") positional = s2.compile(dialect=sqlite.dialect()) pp = positional.params assert [pp[k] for k in positional.positiontup] == [12, 12] # check that conflicts with "unique" params are caught s = select([table1], or_(table1.c.myid == 7, table1.c.myid == bindparam('myid_1'))) assert_raises_message(exc.CompileError, "conflicts with unique bind parameter " "of the same name", str, s) s = select([table1], or_(table1.c.myid == 7, table1.c.myid == 8, table1.c.myid == bindparam('myid_1'))) assert_raises_message(exc.CompileError, "conflicts with unique bind parameter " "of the same name", str, s) def _test_binds_no_hash_collision(self): """test that construct_params doesn't corrupt dict due to hash collisions""" total_params = 100000 in_clause = [':in%d' % i for i in range(total_params)] params = dict(('in%d' % i, i) for i in range(total_params)) t = text('text clause %s' % ', '.join(in_clause)) eq_(len(t.bindparams), total_params) c = t.compile() pp = c.construct_params(params) eq_(len(set(pp)), total_params, '%s %s' % (len(set(pp)), len(pp))) eq_(len(set(pp.values())), total_params) def test_bind_as_col(self): t = table('foo', column('id')) s = select([t, literal('lala').label('hoho')]) self.assert_compile(s, "SELECT foo.id, :param_1 AS hoho FROM foo") assert [str(c) for c in s.c] == ["id", "hoho"] def test_bind_callable(self): expr = column('x') == bindparam("key", callable_=lambda: 12) self.assert_compile( expr, "x = :key", {'x': 12} ) def test_bind_params_missing(self): assert_raises_message(exc.InvalidRequestError, r"A value is required for bind parameter 'x'", select([table1]).where( and_( table1.c.myid == bindparam("x", required=True), table1.c.name == bindparam("y", required=True) ) ).compile().construct_params, params=dict(y=5) ) assert_raises_message(exc.InvalidRequestError, r"A value is required for bind parameter 'x'", select([table1]).where( table1.c.myid == bindparam("x", required=True) ).compile().construct_params ) assert_raises_message(exc.InvalidRequestError, r"A value is required for bind parameter 'x', " "in parameter group 2", select([table1]).where( and_( table1.c.myid == bindparam("x", required=True), table1.c.name == bindparam("y", required=True) ) ).compile().construct_params, params=dict(y=5), _group_number=2 ) assert_raises_message(exc.InvalidRequestError, r"A value is required for bind parameter 'x', " "in parameter group 2", select([table1]).where( table1.c.myid == bindparam("x", required=True) ).compile().construct_params, _group_number=2 ) def test_tuple(self): self.assert_compile( tuple_(table1.c.myid, table1.c.name).in_( [(1, 'foo'), (5, 'bar')]), "(mytable.myid, mytable.name) IN " "((:param_1, :param_2), (:param_3, :param_4))" ) self.assert_compile( tuple_(table1.c.myid, table1.c.name).in_( [tuple_(table2.c.otherid, table2.c.othername)] ), "(mytable.myid, mytable.name) IN " "((myothertable.otherid, myothertable.othername))" ) self.assert_compile( tuple_(table1.c.myid, table1.c.name).in_( select([table2.c.otherid, table2.c.othername]) ), "(mytable.myid, mytable.name) IN (SELECT " "myothertable.otherid, myothertable.othername FROM myothertable)" ) def test_cast(self): tbl = table('casttest', column('id', Integer), column('v1', Float), column('v2', Float), column('ts', TIMESTAMP), ) def check_results(dialect, expected_results, literal): eq_(len(expected_results), 5, 'Incorrect number of expected results') eq_(str(cast(tbl.c.v1, Numeric).compile(dialect=dialect)), 'CAST(casttest.v1 AS %s)' % expected_results[0]) eq_(str(cast(tbl.c.v1, Numeric(12, 9)).compile(dialect=dialect)), 'CAST(casttest.v1 AS %s)' % expected_results[1]) eq_(str(cast(tbl.c.ts, Date).compile(dialect=dialect)), 'CAST(casttest.ts AS %s)' % expected_results[2]) eq_(str(cast(1234, Text).compile(dialect=dialect)), 'CAST(%s AS %s)' % (literal, expected_results[3])) eq_(str(cast('test', String(20)).compile(dialect=dialect)), 'CAST(%s AS %s)' % (literal, expected_results[4])) # fixme: shoving all of this dialect-specific stuff in one test # is now officialy completely ridiculous AND non-obviously omits # coverage on other dialects. sel = select([tbl, cast(tbl.c.v1, Numeric)]).compile(dialect=dialect) if isinstance(dialect, type(mysql.dialect())): eq_(str(sel), "SELECT casttest.id, casttest.v1, casttest.v2, casttest.ts, " "CAST(casttest.v1 AS DECIMAL) AS anon_1 \nFROM casttest") else: eq_(str(sel), "SELECT casttest.id, casttest.v1, casttest.v2, " "casttest.ts, CAST(casttest.v1 AS NUMERIC) AS " "anon_1 \nFROM casttest") # first test with PostgreSQL engine check_results(postgresql.dialect(), ['NUMERIC', 'NUMERIC(12, 9)', 'DATE', 'TEXT', 'VARCHAR(20)'], '%(param_1)s') # then the Oracle engine check_results(oracle.dialect(), ['NUMERIC', 'NUMERIC(12, 9)', 'DATE', 'CLOB', 'VARCHAR2(20 CHAR)'], ':param_1') # then the sqlite engine check_results(sqlite.dialect(), ['NUMERIC', 'NUMERIC(12, 9)', 'DATE', 'TEXT', 'VARCHAR(20)'], '?') # then the MySQL engine check_results(mysql.dialect(), ['DECIMAL', 'DECIMAL(12, 9)', 'DATE', 'CHAR', 'CHAR(20)'], '%s') self.assert_compile(cast(text('NULL'), Integer), 'CAST(NULL AS INTEGER)', dialect=sqlite.dialect()) self.assert_compile(cast(null(), Integer), 'CAST(NULL AS INTEGER)', dialect=sqlite.dialect()) self.assert_compile(cast(literal_column('NULL'), Integer), 'CAST(NULL AS INTEGER)', dialect=sqlite.dialect()) def test_over(self): self.assert_compile( func.row_number().over(), "row_number() OVER ()" ) self.assert_compile( func.row_number().over( order_by=[table1.c.name, table1.c.description] ), "row_number() OVER (ORDER BY mytable.name, mytable.description)" ) self.assert_compile( func.row_number().over( partition_by=[table1.c.name, table1.c.description] ), "row_number() OVER (PARTITION BY mytable.name, " "mytable.description)" ) self.assert_compile( func.row_number().over( partition_by=[table1.c.name], order_by=[table1.c.description] ), "row_number() OVER (PARTITION BY mytable.name " "ORDER BY mytable.description)" ) self.assert_compile( func.row_number().over( partition_by=table1.c.name, order_by=table1.c.description ), "row_number() OVER (PARTITION BY mytable.name " "ORDER BY mytable.description)" ) self.assert_compile( func.row_number().over( partition_by=table1.c.name, order_by=[table1.c.name, table1.c.description] ), "row_number() OVER (PARTITION BY mytable.name " "ORDER BY mytable.name, mytable.description)" ) self.assert_compile( func.row_number().over( partition_by=[], order_by=[table1.c.name, table1.c.description] ), "row_number() OVER (ORDER BY mytable.name, mytable.description)" ) self.assert_compile( func.row_number().over( partition_by=[table1.c.name, table1.c.description], order_by=[] ), "row_number() OVER (PARTITION BY mytable.name, " "mytable.description)" ) self.assert_compile( func.row_number().over( partition_by=[], order_by=[] ), "row_number() OVER ()" ) self.assert_compile( select([func.row_number().over( order_by=table1.c.description ).label('foo')]), "SELECT row_number() OVER (ORDER BY mytable.description) " "AS foo FROM mytable" ) # test from_obj generation. # from func: self.assert_compile( select([ func.max(table1.c.name).over( partition_by=['foo'] ) ]), "SELECT max(mytable.name) OVER (PARTITION BY foo) " "AS anon_1 FROM mytable" ) # from partition_by self.assert_compile( select([ func.row_number().over( partition_by=[table1.c.name] ) ]), "SELECT row_number() OVER (PARTITION BY mytable.name) " "AS anon_1 FROM mytable" ) # from order_by self.assert_compile( select([ func.row_number().over( order_by=table1.c.name ) ]), "SELECT row_number() OVER (ORDER BY mytable.name) " "AS anon_1 FROM mytable" ) # this tests that _from_objects # concantenates OK self.assert_compile( select([column("x") + over(func.foo())]), "SELECT x + foo() OVER () AS anon_1" ) def test_date_between(self): import datetime table = Table('dt', metadata, Column('date', Date)) self.assert_compile( table.select(table.c.date.between(datetime.date(2006, 6, 1), datetime.date(2006, 6, 5))), "SELECT dt.date FROM dt WHERE dt.date BETWEEN :date_1 AND :date_2", checkparams={'date_1': datetime.date(2006, 6, 1), 'date_2': datetime.date(2006, 6, 5)}) self.assert_compile( table.select(sql.between(table.c.date, datetime.date(2006, 6, 1), datetime.date(2006, 6, 5))), "SELECT dt.date FROM dt WHERE dt.date BETWEEN :date_1 AND :date_2", checkparams={'date_1': datetime.date(2006, 6, 1), 'date_2': datetime.date(2006, 6, 5)}) def test_delayed_col_naming(self): my_str = Column(String) sel1 = select([my_str]) assert_raises_message( exc.InvalidRequestError, "Cannot initialize a sub-selectable with this Column", lambda: sel1.c ) # calling label or as_scalar doesn't compile # anything. sel2 = select([func.substr(my_str, 2, 3)]).label('my_substr') assert_raises_message( exc.CompileError, "Cannot compile Column object until its 'name' is assigned.", str, sel2 ) sel3 = select([my_str]).as_scalar() assert_raises_message( exc.CompileError, "Cannot compile Column object until its 'name' is assigned.", str, sel3 ) my_str.name = 'foo' self.assert_compile( sel1, "SELECT foo", ) self.assert_compile( sel2, '(SELECT substr(foo, :substr_2, :substr_3) AS substr_1)', ) self.assert_compile( sel3, "(SELECT foo)" ) def test_naming(self): f1 = func.hoho(table1.c.name) s1 = select([table1.c.myid, table1.c.myid.label('foobar'), f1, func.lala(table1.c.name).label('gg')]) eq_( s1.c.keys(), ['myid', 'foobar', str(f1), 'gg'] ) meta = MetaData() t1 = Table('mytable', meta, Column('col1', Integer)) exprs = ( table1.c.myid == 12, func.hoho(table1.c.myid), cast(table1.c.name, Numeric) ) for col, key, expr, label in ( (table1.c.name, 'name', 'mytable.name', None), (exprs[0], str(exprs[0]), 'mytable.myid = :myid_1', 'anon_1'), (exprs[1], str(exprs[1]), 'hoho(mytable.myid)', 'hoho_1'), (exprs[2], str(exprs[2]), 'CAST(mytable.name AS NUMERIC)', 'anon_1'), (t1.c.col1, 'col1', 'mytable.col1', None), (column('some wacky thing'), 'some wacky thing', '"some wacky thing"', '') ): if getattr(col, 'table', None) is not None: t = col.table else: t = table1 s1 = select([col], from_obj=t) assert s1.c.keys() == [key], s1.c.keys() if label: self.assert_compile(s1, "SELECT %s AS %s FROM mytable" % (expr, label)) else: self.assert_compile(s1, "SELECT %s FROM mytable" % (expr,)) s1 = select([s1]) if label: self.assert_compile(s1, "SELECT %s FROM (SELECT %s AS %s FROM mytable)" % (label, expr, label)) elif col.table is not None: # sqlite rule labels subquery columns self.assert_compile(s1, "SELECT %s FROM (SELECT %s AS %s FROM mytable)" % (key, expr, key)) else: self.assert_compile(s1, "SELECT %s FROM (SELECT %s FROM mytable)" % (expr, expr)) def test_hints(self): s = select([table1.c.myid]).with_hint(table1, "test hint %(name)s") s2 = select([table1.c.myid]).\ with_hint(table1, "index(%(name)s idx)", 'oracle').\ with_hint(table1, "WITH HINT INDEX idx", 'sybase') a1 = table1.alias() s3 = select([a1.c.myid]).with_hint(a1, "index(%(name)s hint)") subs4 = select([ table1, table2 ]).select_from(table1.join(table2, table1.c.myid == table2.c.otherid)).\ with_hint(table1, 'hint1') s4 = select([table3]).select_from( table3.join( subs4, subs4.c.othername == table3.c.otherstuff ) ).\ with_hint(table3, 'hint3') t1 = table('QuotedName', column('col1')) s6 = select([t1.c.col1]).where(t1.c.col1 > 10).\ with_hint(t1, '%(name)s idx1') a2 = t1.alias('SomeName') s7 = select([a2.c.col1]).where(a2.c.col1 > 10).\ with_hint(a2, '%(name)s idx1') mysql_d, oracle_d, sybase_d = \ mysql.dialect(), \ oracle.dialect(), \ sybase.dialect() for stmt, dialect, expected in [ (s, mysql_d, "SELECT mytable.myid FROM mytable test hint mytable"), (s, oracle_d, "SELECT /*+ test hint mytable */ mytable.myid FROM mytable"), (s, sybase_d, "SELECT mytable.myid FROM mytable test hint mytable"), (s2, mysql_d, "SELECT mytable.myid FROM mytable"), (s2, oracle_d, "SELECT /*+ index(mytable idx) */ mytable.myid FROM mytable"), (s2, sybase_d, "SELECT mytable.myid FROM mytable WITH HINT INDEX idx"), (s3, mysql_d, "SELECT mytable_1.myid FROM mytable AS mytable_1 " "index(mytable_1 hint)"), (s3, oracle_d, "SELECT /*+ index(mytable_1 hint) */ mytable_1.myid FROM " "mytable mytable_1"), (s3, sybase_d, "SELECT mytable_1.myid FROM mytable AS mytable_1 " "index(mytable_1 hint)"), (s4, mysql_d, "SELECT thirdtable.userid, thirdtable.otherstuff FROM thirdtable " "hint3 INNER JOIN (SELECT mytable.myid, mytable.name, " "mytable.description, myothertable.otherid, " "myothertable.othername FROM mytable hint1 INNER " "JOIN myothertable ON mytable.myid = myothertable.otherid) " "ON othername = thirdtable.otherstuff"), (s4, sybase_d, "SELECT thirdtable.userid, thirdtable.otherstuff FROM thirdtable " "hint3 JOIN (SELECT mytable.myid, mytable.name, " "mytable.description, myothertable.otherid, " "myothertable.othername FROM mytable hint1 " "JOIN myothertable ON mytable.myid = myothertable.otherid) " "ON othername = thirdtable.otherstuff"), (s4, oracle_d, "SELECT /*+ hint3 */ thirdtable.userid, thirdtable.otherstuff " "FROM thirdtable JOIN (SELECT /*+ hint1 */ mytable.myid," " mytable.name, mytable.description, myothertable.otherid," " myothertable.othername FROM mytable JOIN myothertable ON" " mytable.myid = myothertable.otherid) ON othername =" " thirdtable.otherstuff"), # TODO: figure out dictionary ordering solution here # (s5, oracle_d, # "SELECT /*+ hint3 */ /*+ hint1 */ thirdtable.userid, " # "thirdtable.otherstuff " # "FROM thirdtable JOIN (SELECT mytable.myid," # " mytable.name, mytable.description, myothertable.otherid," # " myothertable.othername FROM mytable JOIN myothertable ON" # " mytable.myid = myothertable.otherid) ON othername =" # " thirdtable.otherstuff"), (s6, oracle_d, """SELECT /*+ "QuotedName" idx1 */ "QuotedName".col1 """ """FROM "QuotedName" WHERE "QuotedName".col1 > :col1_1"""), (s7, oracle_d, """SELECT /*+ SomeName idx1 */ "SomeName".col1 FROM """ """"QuotedName" "SomeName" WHERE "SomeName".col1 > :col1_1"""), ]: self.assert_compile( stmt, expected, dialect=dialect ) def test_literal_as_text_fromstring(self): self.assert_compile( and_("a", "b"), "a AND b" ) def test_literal_as_text_nonstring_raise(self): assert_raises(exc.ArgumentError, and_, ("a",), ("b",) ) class UnsupportedTest(fixtures.TestBase): def test_unsupported_element_str_visit_name(self): from sqlalchemy.sql.expression import ClauseElement class SomeElement(ClauseElement): __visit_name__ = 'some_element' assert_raises_message( exc.UnsupportedCompilationError, r"Compiler ", SomeElement().compile ) def test_unsupported_element_meth_visit_name(self): from sqlalchemy.sql.expression import ClauseElement class SomeElement(ClauseElement): @classmethod def __visit_name__(cls): return "some_element" assert_raises_message( exc.UnsupportedCompilationError, r"Compiler ", SomeElement().compile ) def test_unsupported_operator(self): from sqlalchemy.sql.expression import BinaryExpression def myop(x, y): pass binary = BinaryExpression(column("foo"), column("bar"), myop) assert_raises_message( exc.UnsupportedCompilationError, r"Compiler y')) Table('bar', metadata, Column('id', Integer, primary_key=True), Column('x', Integer, CheckConstraint('x>7')), Column('z', Integer) ) self.assert_sql_execution( testing.db, lambda: metadata.create_all(checkfirst=False), AllOf( CompiledSQL('CREATE TABLE foo (' 'id INTEGER NOT NULL, ' 'x INTEGER, ' 'y INTEGER, ' 'PRIMARY KEY (id), ' 'CHECK (x>y)' ')' ), CompiledSQL('CREATE TABLE bar (' 'id INTEGER NOT NULL, ' 'x INTEGER CHECK (x>7), ' 'z INTEGER, ' 'PRIMARY KEY (id)' ')' ) ) ) @testing.provide_metadata def test_unique_constraint_create(self): metadata = self.metadata Table('foo', metadata, Column('id', Integer, primary_key=True), Column('value', String(30), unique=True)) Table('bar', metadata, Column('id', Integer, primary_key=True), Column('value', String(30)), Column('value2', String(30)), UniqueConstraint('value', 'value2', name='uix1') ) self.assert_sql_execution( testing.db, lambda: metadata.create_all(checkfirst=False), AllOf( CompiledSQL('CREATE TABLE foo (' 'id INTEGER NOT NULL, ' 'value VARCHAR(30), ' 'PRIMARY KEY (id), ' 'UNIQUE (value)' ')'), CompiledSQL('CREATE TABLE bar (' 'id INTEGER NOT NULL, ' 'value VARCHAR(30), ' 'value2 VARCHAR(30), ' 'PRIMARY KEY (id), ' 'CONSTRAINT uix1 UNIQUE (value, value2)' ')') ) ) @testing.provide_metadata def test_index_create(self): metadata = self.metadata employees = Table('employees', metadata, Column('id', Integer, primary_key=True), Column('first_name', String(30)), Column('last_name', String(30)), Column('email_address', String(30))) i = Index('employee_name_index', employees.c.last_name, employees.c.first_name) assert i in employees.indexes i2 = Index('employee_email_index', employees.c.email_address, unique=True) assert i2 in employees.indexes self.assert_sql_execution( testing.db, lambda: metadata.create_all(checkfirst=False), RegexSQL("^CREATE TABLE"), AllOf( CompiledSQL('CREATE INDEX employee_name_index ON ' 'employees (last_name, first_name)', []), CompiledSQL('CREATE UNIQUE INDEX employee_email_index ON ' 'employees (email_address)', []) ) ) @testing.provide_metadata def test_index_create_camelcase(self): """test that mixed-case index identifiers are legal""" metadata = self.metadata employees = Table('companyEmployees', metadata, Column('id', Integer, primary_key=True), Column('firstName', String(30)), Column('lastName', String(30)), Column('emailAddress', String(30))) Index('employeeNameIndex', employees.c.lastName, employees.c.firstName) Index('employeeEmailIndex', employees.c.emailAddress, unique=True) self.assert_sql_execution( testing.db, lambda: metadata.create_all(checkfirst=False), RegexSQL("^CREATE TABLE"), AllOf( CompiledSQL('CREATE INDEX "employeeNameIndex" ON ' '"companyEmployees" ("lastName", "firstName")', []), CompiledSQL('CREATE UNIQUE INDEX "employeeEmailIndex" ON ' '"companyEmployees" ("emailAddress")', []) ) ) @testing.provide_metadata def test_index_create_inline(self): # test an index create using index=True, unique=True metadata = self.metadata events = Table('events', metadata, Column('id', Integer, primary_key=True), Column('name', String(30), index=True, unique=True), Column('location', String(30), index=True), Column('sport', String(30)), Column('announcer', String(30)), Column('winner', String(30))) Index('sport_announcer', events.c.sport, events.c.announcer, unique=True) Index('idx_winners', events.c.winner) eq_( set(ix.name for ix in events.indexes), set(['ix_events_name', 'ix_events_location', 'sport_announcer', 'idx_winners']) ) self.assert_sql_execution( testing.db, lambda: events.create(testing.db), RegexSQL("^CREATE TABLE events"), AllOf( ExactSQL('CREATE UNIQUE INDEX ix_events_name ON events ' '(name)'), ExactSQL('CREATE INDEX ix_events_location ON events ' '(location)'), ExactSQL('CREATE UNIQUE INDEX sport_announcer ON events ' '(sport, announcer)'), ExactSQL('CREATE INDEX idx_winners ON events (winner)') ) ) @testing.provide_metadata def test_index_functional_create(self): metadata = self.metadata t = Table('sometable', metadata, Column('id', Integer, primary_key=True), Column('data', String(50)) ) Index('myindex', t.c.data.desc()) self.assert_sql_execution( testing.db, lambda: t.create(testing.db), CompiledSQL('CREATE TABLE sometable (id INTEGER NOT NULL, ' 'data VARCHAR(50), PRIMARY KEY (id))'), ExactSQL('CREATE INDEX myindex ON sometable (data DESC)') ) class ConstraintCompilationTest(fixtures.TestBase, AssertsCompiledSQL): __dialect__ = 'default' def test_create_plain(self): t = Table('t', MetaData(), Column('x', Integer)) i = Index("xyz", t.c.x) self.assert_compile( schema.CreateIndex(i), "CREATE INDEX xyz ON t (x)" ) def test_drop_plain_unattached(self): self.assert_compile( schema.DropIndex(Index(name="xyz")), "DROP INDEX xyz" ) def test_drop_plain(self): self.assert_compile( schema.DropIndex(Index(name="xyz")), "DROP INDEX xyz" ) def test_create_schema(self): t = Table('t', MetaData(), Column('x', Integer), schema="foo") i = Index("xyz", t.c.x) self.assert_compile( schema.CreateIndex(i), "CREATE INDEX xyz ON foo.t (x)" ) def test_drop_schema(self): t = Table('t', MetaData(), Column('x', Integer), schema="foo") i = Index("xyz", t.c.x) self.assert_compile( schema.DropIndex(i), "DROP INDEX foo.xyz" ) def test_too_long_idx_name(self): dialect = testing.db.dialect.__class__() for max_ident, max_index in [(22, None), (256, 22)]: dialect.max_identifier_length = max_ident dialect.max_index_name_length = max_index for tname, cname, exp in [ ('sometable', 'this_name_is_too_long', 'ix_sometable_t_09aa'), ('sometable', 'this_name_alsois_long', 'ix_sometable_t_3cf1'), ]: t1 = Table(tname, MetaData(), Column(cname, Integer, index=True), ) ix1 = list(t1.indexes)[0] self.assert_compile( schema.CreateIndex(ix1), "CREATE INDEX %s " "ON %s (%s)" % (exp, tname, cname), dialect=dialect ) dialect.max_identifier_length = 22 dialect.max_index_name_length = None t1 = Table('t', MetaData(), Column('c', Integer)) assert_raises( exc.IdentifierError, schema.CreateIndex(Index( "this_other_name_is_too_long_for_what_were_doing", t1.c.c)).compile, dialect=dialect ) def test_functional_index(self): metadata = MetaData() x = Table('x', metadata, Column('q', String(50)) ) idx = Index('y', func.lower(x.c.q)) self.assert_compile( schema.CreateIndex(idx), "CREATE INDEX y ON x (lower(q))" ) self.assert_compile( schema.CreateIndex(idx), "CREATE INDEX y ON x (lower(q))", dialect=testing.db.dialect ) def test_index_declaration_inline(self): metadata = MetaData() t1 = Table('t1', metadata, Column('x', Integer), Column('y', Integer), Index('foo', 'x', 'y') ) self.assert_compile( schema.CreateIndex(list(t1.indexes)[0]), "CREATE INDEX foo ON t1 (x, y)" ) def _test_deferrable(self, constraint_factory): dialect = default.DefaultDialect() t = Table('tbl', MetaData(), Column('a', Integer), Column('b', Integer), constraint_factory(deferrable=True)) sql = str(schema.CreateTable(t).compile(dialect=dialect)) assert 'DEFERRABLE' in sql, sql assert 'NOT DEFERRABLE' not in sql, sql t = Table('tbl', MetaData(), Column('a', Integer), Column('b', Integer), constraint_factory(deferrable=False)) sql = str(schema.CreateTable(t).compile(dialect=dialect)) assert 'NOT DEFERRABLE' in sql t = Table('tbl', MetaData(), Column('a', Integer), Column('b', Integer), constraint_factory(deferrable=True, initially='IMMEDIATE')) sql = str(schema.CreateTable(t).compile(dialect=dialect)) assert 'NOT DEFERRABLE' not in sql assert 'INITIALLY IMMEDIATE' in sql t = Table('tbl', MetaData(), Column('a', Integer), Column('b', Integer), constraint_factory(deferrable=True, initially='DEFERRED')) sql = str(schema.CreateTable(t).compile(dialect=dialect)) assert 'NOT DEFERRABLE' not in sql assert 'INITIALLY DEFERRED' in sql def test_column_level_ck_name(self): t = Table('tbl', MetaData(), Column('a', Integer, CheckConstraint("a > 5", name="ck_a_greater_five")) ) self.assert_compile( schema.CreateTable(t), "CREATE TABLE tbl (a INTEGER CONSTRAINT " "ck_a_greater_five CHECK (a > 5))" ) def test_deferrable_pk(self): factory = lambda **kw: PrimaryKeyConstraint('a', **kw) self._test_deferrable(factory) def test_deferrable_table_fk(self): factory = lambda **kw: ForeignKeyConstraint(['b'], ['tbl.a'], **kw) self._test_deferrable(factory) def test_deferrable_column_fk(self): t = Table('tbl', MetaData(), Column('a', Integer), Column('b', Integer, ForeignKey('tbl.a', deferrable=True, initially='DEFERRED'))) self.assert_compile( schema.CreateTable(t), "CREATE TABLE tbl (a INTEGER, b INTEGER, " "FOREIGN KEY(b) REFERENCES tbl " "(a) DEFERRABLE INITIALLY DEFERRED)", ) def test_fk_match_clause(self): t = Table('tbl', MetaData(), Column('a', Integer), Column('b', Integer, ForeignKey('tbl.a', match="SIMPLE"))) self.assert_compile( schema.CreateTable(t), "CREATE TABLE tbl (a INTEGER, b INTEGER, " "FOREIGN KEY(b) REFERENCES tbl " "(a) MATCH SIMPLE)", ) self.assert_compile( schema.AddConstraint(list(t.foreign_keys)[0].constraint), "ALTER TABLE tbl ADD FOREIGN KEY(b) " "REFERENCES tbl (a) MATCH SIMPLE" ) def test_deferrable_unique(self): factory = lambda **kw: UniqueConstraint('b', **kw) self._test_deferrable(factory) def test_deferrable_table_check(self): factory = lambda **kw: CheckConstraint('a < b', **kw) self._test_deferrable(factory) def test_multiple(self): m = MetaData() Table("foo", m, Column('id', Integer, primary_key=True), Column('bar', Integer, primary_key=True) ) tb = Table("some_table", m, Column('id', Integer, primary_key=True), Column('foo_id', Integer, ForeignKey('foo.id')), Column('foo_bar', Integer, ForeignKey('foo.bar')), ) self.assert_compile( schema.CreateTable(tb), "CREATE TABLE some_table (" "id INTEGER NOT NULL, " "foo_id INTEGER, " "foo_bar INTEGER, " "PRIMARY KEY (id), " "FOREIGN KEY(foo_id) REFERENCES foo (id), " "FOREIGN KEY(foo_bar) REFERENCES foo (bar))" ) def test_deferrable_column_check(self): t = Table('tbl', MetaData(), Column('a', Integer), Column('b', Integer, CheckConstraint('a < b', deferrable=True, initially='DEFERRED'))) self.assert_compile( schema.CreateTable(t), "CREATE TABLE tbl (a INTEGER, b INTEGER CHECK (a < b) " "DEFERRABLE INITIALLY DEFERRED)" ) def test_use_alter(self): m = MetaData() Table('t', m, Column('a', Integer), ) Table('t2', m, Column('a', Integer, ForeignKey('t.a', use_alter=True, name='fk_ta')), Column('b', Integer, ForeignKey('t.a', name='fk_tb')) ) e = engines.mock_engine(dialect_name='postgresql') m.create_all(e) m.drop_all(e) e.assert_sql([ 'CREATE TABLE t (a INTEGER)', 'CREATE TABLE t2 (a INTEGER, b INTEGER, CONSTRAINT fk_tb ' 'FOREIGN KEY(b) REFERENCES t (a))', 'ALTER TABLE t2 ' 'ADD CONSTRAINT fk_ta FOREIGN KEY(a) REFERENCES t (a)', 'ALTER TABLE t2 DROP CONSTRAINT fk_ta', 'DROP TABLE t2', 'DROP TABLE t' ]) def _constraint_create_fixture(self): m = MetaData() t = Table('tbl', m, Column('a', Integer), Column('b', Integer) ) t2 = Table('t2', m, Column('a', Integer), Column('b', Integer) ) return t, t2 def test_render_ck_constraint_inline(self): t, t2 = self._constraint_create_fixture() CheckConstraint('a < b', name="my_test_constraint", deferrable=True, initially='DEFERRED', table=t) # before we create an AddConstraint, # the CONSTRAINT comes out inline self.assert_compile( schema.CreateTable(t), "CREATE TABLE tbl (" "a INTEGER, " "b INTEGER, " "CONSTRAINT my_test_constraint CHECK (a < b) " "DEFERRABLE INITIALLY DEFERRED" ")" ) def test_render_ck_constraint_external(self): t, t2 = self._constraint_create_fixture() constraint = CheckConstraint('a < b', name="my_test_constraint", deferrable=True, initially='DEFERRED', table=t) self.assert_compile( schema.AddConstraint(constraint), "ALTER TABLE tbl ADD CONSTRAINT my_test_constraint " "CHECK (a < b) DEFERRABLE INITIALLY DEFERRED" ) def test_external_ck_constraint_cancels_internal(self): t, t2 = self._constraint_create_fixture() constraint = CheckConstraint('a < b', name="my_test_constraint", deferrable=True, initially='DEFERRED', table=t) schema.AddConstraint(constraint) # once we make an AddConstraint, # inline compilation of the CONSTRAINT # is disabled self.assert_compile( schema.CreateTable(t), "CREATE TABLE tbl (" "a INTEGER, " "b INTEGER" ")" ) def test_render_drop_constraint(self): t, t2 = self._constraint_create_fixture() constraint = CheckConstraint('a < b', name="my_test_constraint", deferrable=True, initially='DEFERRED', table=t) self.assert_compile( schema.DropConstraint(constraint), "ALTER TABLE tbl DROP CONSTRAINT my_test_constraint" ) def test_render_drop_constraint_cascade(self): t, t2 = self._constraint_create_fixture() constraint = CheckConstraint('a < b', name="my_test_constraint", deferrable=True, initially='DEFERRED', table=t) self.assert_compile( schema.DropConstraint(constraint, cascade=True), "ALTER TABLE tbl DROP CONSTRAINT my_test_constraint CASCADE" ) def test_render_add_fk_constraint_stringcol(self): t, t2 = self._constraint_create_fixture() constraint = ForeignKeyConstraint(["b"], ["t2.a"]) t.append_constraint(constraint) self.assert_compile( schema.AddConstraint(constraint), "ALTER TABLE tbl ADD FOREIGN KEY(b) REFERENCES t2 (a)" ) def test_render_add_fk_constraint_realcol(self): t, t2 = self._constraint_create_fixture() constraint = ForeignKeyConstraint([t.c.a], [t2.c.b]) t.append_constraint(constraint) self.assert_compile( schema.AddConstraint(constraint), "ALTER TABLE tbl ADD FOREIGN KEY(a) REFERENCES t2 (b)" ) def test_render_add_uq_constraint_stringcol(self): t, t2 = self._constraint_create_fixture() constraint = UniqueConstraint("a", "b", name="uq_cst") t2.append_constraint(constraint) self.assert_compile( schema.AddConstraint(constraint), "ALTER TABLE t2 ADD CONSTRAINT uq_cst UNIQUE (a, b)" ) def test_render_add_uq_constraint_realcol(self): t, t2 = self._constraint_create_fixture() constraint = UniqueConstraint(t2.c.a, t2.c.b, name="uq_cs2") self.assert_compile( schema.AddConstraint(constraint), "ALTER TABLE t2 ADD CONSTRAINT uq_cs2 UNIQUE (a, b)" ) def test_render_add_pk_constraint(self): t, t2 = self._constraint_create_fixture() assert t.c.a.primary_key is False constraint = PrimaryKeyConstraint(t.c.a) assert t.c.a.primary_key is True self.assert_compile( schema.AddConstraint(constraint), "ALTER TABLE tbl ADD PRIMARY KEY (a)" ) def test_render_check_constraint_sql_literal(self): t, t2 = self._constraint_create_fixture() constraint = CheckConstraint(t.c.a > 5) self.assert_compile( schema.AddConstraint(constraint), "ALTER TABLE tbl ADD CHECK (a > 5)" ) def test_render_index_sql_literal(self): t, t2 = self._constraint_create_fixture() constraint = Index('name', t.c.a + 5) self.assert_compile( schema.CreateIndex(constraint), "CREATE INDEX name ON tbl (a + 5)" ) class ConstraintAPITest(fixtures.TestBase): def test_double_fk_usage_raises(self): f = ForeignKey('b.id') Column('x', Integer, f) assert_raises(exc.InvalidRequestError, Column, "y", Integer, f) def test_auto_append_constraint(self): m = MetaData() t = Table('tbl', m, Column('a', Integer), Column('b', Integer) ) t2 = Table('t2', m, Column('a', Integer), Column('b', Integer) ) for c in ( UniqueConstraint(t.c.a), CheckConstraint(t.c.a > 5), ForeignKeyConstraint([t.c.a], [t2.c.a]), PrimaryKeyConstraint(t.c.a) ): assert c in t.constraints t.append_constraint(c) assert c in t.constraints c = Index('foo', t.c.a) assert c in t.indexes def test_auto_append_lowercase_table(self): t = table('t', column('a')) t2 = table('t2', column('a')) for c in ( UniqueConstraint(t.c.a), CheckConstraint(t.c.a > 5), ForeignKeyConstraint([t.c.a], [t2.c.a]), PrimaryKeyConstraint(t.c.a), Index('foo', t.c.a) ): assert True def test_tometadata_ok(self): m = MetaData() t = Table('tbl', m, Column('a', Integer), Column('b', Integer) ) t2 = Table('t2', m, Column('a', Integer), Column('b', Integer) ) UniqueConstraint(t.c.a) CheckConstraint(t.c.a > 5) ForeignKeyConstraint([t.c.a], [t2.c.a]) PrimaryKeyConstraint(t.c.a) m2 = MetaData() t3 = t.tometadata(m2) eq_(len(t3.constraints), 4) for c in t3.constraints: assert c.table is t3 def test_check_constraint_copy(self): m = MetaData() t = Table('tbl', m, Column('a', Integer), Column('b', Integer) ) ck = CheckConstraint(t.c.a > 5) ck2 = ck.copy() assert ck in t.constraints assert ck2 not in t.constraints def test_ambig_check_constraint_auto_append(self): m = MetaData() t = Table('tbl', m, Column('a', Integer), Column('b', Integer) ) t2 = Table('t2', m, Column('a', Integer), Column('b', Integer) ) c = CheckConstraint(t.c.a > t2.c.b) assert c not in t.constraints assert c not in t2.constraints def test_index_asserts_cols_standalone(self): metadata = MetaData() t1 = Table('t1', metadata, Column('x', Integer) ) t2 = Table('t2', metadata, Column('y', Integer) ) assert_raises_message( exc.ArgumentError, "Column 't2.y' is not part of table 't1'.", Index, "bar", t1.c.x, t2.c.y ) def test_index_asserts_cols_inline(self): metadata = MetaData() t1 = Table('t1', metadata, Column('x', Integer) ) assert_raises_message( exc.ArgumentError, "Index 'bar' is against table 't1', and " "cannot be associated with table 't2'.", Table, 't2', metadata, Column('y', Integer), Index('bar', t1.c.x) ) def test_raise_index_nonexistent_name(self): m = MetaData() # the KeyError isn't ideal here, a nicer message # perhaps assert_raises( KeyError, Table, 't', m, Column('x', Integer), Index("foo", "q") ) def test_raise_not_a_column(self): assert_raises( exc.ArgumentError, Index, "foo", 5 ) def test_raise_expr_no_column(self): idx = Index('foo', func.lower(5)) assert_raises_message( exc.CompileError, "Index 'foo' is not associated with any table.", schema.CreateIndex(idx).compile, dialect=testing.db.dialect ) assert_raises_message( exc.CompileError, "Index 'foo' is not associated with any table.", schema.CreateIndex(idx).compile ) def test_no_warning_w_no_columns(self): # I think the test here is, there is no warning. # people want to create empty indexes for the purpose of # a drop. idx = Index(name="foo") assert_raises_message( exc.CompileError, "Index 'foo' is not associated with any table.", schema.CreateIndex(idx).compile, dialect=testing.db.dialect ) assert_raises_message( exc.CompileError, "Index 'foo' is not associated with any table.", schema.CreateIndex(idx).compile ) def test_raise_clauseelement_not_a_column(self): m = MetaData() t2 = Table('t2', m, Column('x', Integer)) class SomeClass(object): def __clause_element__(self): return t2 assert_raises( exc.ArgumentError, Index, "foo", SomeClass() ) SQLAlchemy-0.8.4/test/sql/test_cte.py0000644000076500000240000003337212251147172020205 0ustar classicstaff00000000000000from sqlalchemy.testing import fixtures from sqlalchemy.testing import AssertsCompiledSQL, assert_raises_message from sqlalchemy.sql import table, column, select, func, literal from sqlalchemy.dialects import mssql from sqlalchemy.engine import default from sqlalchemy.exc import CompileError class CTETest(fixtures.TestBase, AssertsCompiledSQL): __dialect__ = 'default' def test_nonrecursive(self): orders = table('orders', column('region'), column('amount'), column('product'), column('quantity') ) regional_sales = select([ orders.c.region, func.sum(orders.c.amount).label('total_sales') ]).group_by(orders.c.region).cte("regional_sales") top_regions = select([regional_sales.c.region]).\ where( regional_sales.c.total_sales > select([ func.sum(regional_sales.c.total_sales)/10 ]) ).cte("top_regions") s = select([ orders.c.region, orders.c.product, func.sum(orders.c.quantity).label("product_units"), func.sum(orders.c.amount).label("product_sales") ]).where(orders.c.region.in_( select([top_regions.c.region]) )).group_by(orders.c.region, orders.c.product) # needs to render regional_sales first as top_regions # refers to it self.assert_compile( s, "WITH regional_sales AS (SELECT orders.region AS region, " "sum(orders.amount) AS total_sales FROM orders " "GROUP BY orders.region), " "top_regions AS (SELECT " "regional_sales.region AS region FROM regional_sales " "WHERE regional_sales.total_sales > " "(SELECT sum(regional_sales.total_sales) / :sum_1 AS " "anon_1 FROM regional_sales)) " "SELECT orders.region, orders.product, " "sum(orders.quantity) AS product_units, " "sum(orders.amount) AS product_sales " "FROM orders WHERE orders.region " "IN (SELECT top_regions.region FROM top_regions) " "GROUP BY orders.region, orders.product" ) def test_recursive(self): parts = table('parts', column('part'), column('sub_part'), column('quantity'), ) included_parts = select([ parts.c.sub_part, parts.c.part, parts.c.quantity]).\ where(parts.c.part=='our part').\ cte(recursive=True) incl_alias = included_parts.alias() parts_alias = parts.alias() included_parts = included_parts.union( select([ parts_alias.c.part, parts_alias.c.sub_part, parts_alias.c.quantity]).\ where(parts_alias.c.part==incl_alias.c.sub_part) ) s = select([ included_parts.c.sub_part, func.sum(included_parts.c.quantity).label('total_quantity')]).\ select_from(included_parts.join( parts,included_parts.c.part==parts.c.part)).\ group_by(included_parts.c.sub_part) self.assert_compile(s, "WITH RECURSIVE anon_1(sub_part, part, quantity) " "AS (SELECT parts.sub_part AS sub_part, parts.part " "AS part, parts.quantity AS quantity FROM parts " "WHERE parts.part = :part_1 UNION SELECT parts_1.part " "AS part, parts_1.sub_part AS sub_part, parts_1.quantity " "AS quantity FROM parts AS parts_1, anon_1 AS anon_2 " "WHERE parts_1.part = anon_2.sub_part) " "SELECT anon_1.sub_part, " "sum(anon_1.quantity) AS total_quantity FROM anon_1 " "JOIN parts ON anon_1.part = parts.part " "GROUP BY anon_1.sub_part" ) # quick check that the "WITH RECURSIVE" varies per # dialect self.assert_compile(s, "WITH anon_1(sub_part, part, quantity) " "AS (SELECT parts.sub_part AS sub_part, parts.part " "AS part, parts.quantity AS quantity FROM parts " "WHERE parts.part = :part_1 UNION SELECT parts_1.part " "AS part, parts_1.sub_part AS sub_part, parts_1.quantity " "AS quantity FROM parts AS parts_1, anon_1 AS anon_2 " "WHERE parts_1.part = anon_2.sub_part) " "SELECT anon_1.sub_part, " "sum(anon_1.quantity) AS total_quantity FROM anon_1 " "JOIN parts ON anon_1.part = parts.part " "GROUP BY anon_1.sub_part", dialect=mssql.dialect() ) def test_recursive_union_no_alias_one(self): s1 = select([literal(0).label("x")]) cte = s1.cte(name="cte", recursive=True) cte = cte.union_all( select([cte.c.x + 1]).where(cte.c.x < 10) ) s2 = select([cte]) self.assert_compile(s2, "WITH RECURSIVE cte(x) AS " "(SELECT :param_1 AS x UNION ALL " "SELECT cte.x + :x_1 AS anon_1 " "FROM cte WHERE cte.x < :x_2) " "SELECT cte.x FROM cte" ) def test_recursive_union_no_alias_two(self): """ pg's example: WITH RECURSIVE t(n) AS ( VALUES (1) UNION ALL SELECT n+1 FROM t WHERE n < 100 ) SELECT sum(n) FROM t; """ # I know, this is the PG VALUES keyword, # we're cheating here. also yes we need the SELECT, # sorry PG. t = select([func.values(1).label("n")]).cte("t", recursive=True) t = t.union_all(select([t.c.n + 1]).where(t.c.n < 100)) s = select([func.sum(t.c.n)]) self.assert_compile(s, "WITH RECURSIVE t(n) AS " "(SELECT values(:values_1) AS n " "UNION ALL SELECT t.n + :n_1 AS anon_1 " "FROM t " "WHERE t.n < :n_2) " "SELECT sum(t.n) AS sum_1 FROM t" ) def test_recursive_union_no_alias_three(self): # like test one, but let's refer to the CTE # in a sibling CTE. s1 = select([literal(0).label("x")]) cte = s1.cte(name="cte", recursive=True) # can't do it here... #bar = select([cte]).cte('bar') cte = cte.union_all( select([cte.c.x + 1]).where(cte.c.x < 10) ) bar = select([cte]).cte('bar') s2 = select([cte, bar]) self.assert_compile(s2, "WITH RECURSIVE cte(x) AS " "(SELECT :param_1 AS x UNION ALL " "SELECT cte.x + :x_1 AS anon_1 " "FROM cte WHERE cte.x < :x_2), " "bar AS (SELECT cte.x AS x FROM cte) " "SELECT cte.x, bar.x FROM cte, bar" ) def test_recursive_union_no_alias_four(self): # like test one and three, but let's refer # previous version of "cte". here we test # how the compiler resolves multiple instances # of "cte". s1 = select([literal(0).label("x")]) cte = s1.cte(name="cte", recursive=True) bar = select([cte]).cte('bar') cte = cte.union_all( select([cte.c.x + 1]).where(cte.c.x < 10) ) # outer cte rendered first, then bar, which # includes "inner" cte s2 = select([cte, bar]) self.assert_compile(s2, "WITH RECURSIVE cte(x) AS " "(SELECT :param_1 AS x UNION ALL " "SELECT cte.x + :x_1 AS anon_1 " "FROM cte WHERE cte.x < :x_2), " "bar AS (SELECT cte.x AS x FROM cte) " "SELECT cte.x, bar.x FROM cte, bar" ) # bar rendered, only includes "inner" cte, # "outer" cte isn't present s2 = select([bar]) self.assert_compile(s2, "WITH RECURSIVE cte(x) AS " "(SELECT :param_1 AS x), " "bar AS (SELECT cte.x AS x FROM cte) " "SELECT bar.x FROM bar" ) # bar rendered, but then the "outer" # cte is rendered. s2 = select([bar, cte]) self.assert_compile(s2, "WITH RECURSIVE bar AS (SELECT cte.x AS x FROM cte), " "cte(x) AS " "(SELECT :param_1 AS x UNION ALL " "SELECT cte.x + :x_1 AS anon_1 " "FROM cte WHERE cte.x < :x_2) " "SELECT bar.x, cte.x FROM bar, cte" ) def test_conflicting_names(self): """test a flat out name conflict.""" s1 = select([1]) c1= s1.cte(name='cte1', recursive=True) s2 = select([1]) c2 = s2.cte(name='cte1', recursive=True) s = select([c1, c2]) assert_raises_message( CompileError, "Multiple, unrelated CTEs found " "with the same name: 'cte1'", s.compile ) def test_union(self): orders = table('orders', column('region'), column('amount'), ) regional_sales = select([ orders.c.region, orders.c.amount ]).cte("regional_sales") s = select([regional_sales.c.region]).\ where( regional_sales.c.amount > 500 ) self.assert_compile(s, "WITH regional_sales AS " "(SELECT orders.region AS region, " "orders.amount AS amount FROM orders) " "SELECT regional_sales.region " "FROM regional_sales WHERE " "regional_sales.amount > :amount_1") s = s.union_all( select([regional_sales.c.region]).\ where( regional_sales.c.amount < 300 ) ) self.assert_compile(s, "WITH regional_sales AS " "(SELECT orders.region AS region, " "orders.amount AS amount FROM orders) " "SELECT regional_sales.region FROM regional_sales " "WHERE regional_sales.amount > :amount_1 " "UNION ALL SELECT regional_sales.region " "FROM regional_sales WHERE " "regional_sales.amount < :amount_2") def test_reserved_quote(self): orders = table('orders', column('order'), ) s = select([orders.c.order]).cte("regional_sales", recursive=True) s = select([s.c.order]) self.assert_compile(s, 'WITH RECURSIVE regional_sales("order") AS ' '(SELECT orders."order" AS "order" ' "FROM orders)" ' SELECT regional_sales."order" ' "FROM regional_sales" ) def test_multi_subq_quote(self): cte = select([literal(1).label("id")]).cte(name='CTE') s1 = select([cte.c.id]).alias() s2 = select([cte.c.id]).alias() s = select([s1, s2]) self.assert_compile( s, 'WITH "CTE" AS (SELECT :param_1 AS id) ' 'SELECT anon_1.id, anon_2.id FROM ' '(SELECT "CTE".id AS id FROM "CTE") AS anon_1, ' '(SELECT "CTE".id AS id FROM "CTE") AS anon_2' ) def test_positional_binds(self): orders = table('orders', column('order'), ) s = select([orders.c.order, literal("x")]).cte("regional_sales") s = select([s.c.order, literal("y")]) dialect = default.DefaultDialect() dialect.positional = True dialect.paramstyle = 'numeric' self.assert_compile(s, 'WITH regional_sales AS (SELECT orders."order" ' 'AS "order", :1 AS anon_2 FROM orders) SELECT ' 'regional_sales."order", :2 AS anon_1 FROM regional_sales', checkpositional=('x', 'y'), dialect=dialect ) self.assert_compile(s.union(s), 'WITH regional_sales AS (SELECT orders."order" ' 'AS "order", :1 AS anon_2 FROM orders) SELECT ' 'regional_sales."order", :2 AS anon_1 FROM regional_sales ' 'UNION SELECT regional_sales."order", :3 AS anon_1 ' 'FROM regional_sales', checkpositional=('x', 'y', 'y'), dialect=dialect ) s = select([orders.c.order]).\ where(orders.c.order=='x').cte("regional_sales") s = select([s.c.order]).where(s.c.order=="y") self.assert_compile(s, 'WITH regional_sales AS (SELECT orders."order" AS ' '"order" FROM orders WHERE orders."order" = :1) ' 'SELECT regional_sales."order" FROM regional_sales ' 'WHERE regional_sales."order" = :2', checkpositional=('x', 'y'), dialect=dialect ) def test_all_aliases(self): orders = table('order', column('order')) s = select([orders.c.order]).cte("regional_sales") r1 = s.alias() r2 = s.alias() s2 = select([r1, r2]).where(r1.c.order > r2.c.order) self.assert_compile( s2, 'WITH regional_sales AS (SELECT "order"."order" ' 'AS "order" FROM "order") ' 'SELECT anon_1."order", anon_2."order" ' 'FROM regional_sales AS anon_1, ' 'regional_sales AS anon_2 WHERE anon_1."order" > anon_2."order"' ) s3 = select([orders]).select_from(orders.join(r1, r1.c.order == orders.c.order)) self.assert_compile( s3, 'WITH regional_sales AS ' '(SELECT "order"."order" AS "order" ' 'FROM "order")' ' SELECT "order"."order" ' 'FROM "order" JOIN regional_sales AS anon_1 ON anon_1."order" = "order"."order"' )SQLAlchemy-0.8.4/test/sql/test_defaults.py0000644000076500000240000011722712251150016021232 0ustar classicstaff00000000000000from sqlalchemy.testing import eq_, assert_raises_message, assert_raises import datetime from sqlalchemy.schema import CreateSequence, DropSequence from sqlalchemy.sql import select, text import sqlalchemy as sa from sqlalchemy import testing from sqlalchemy.testing import engines from sqlalchemy import MetaData, Integer, String, ForeignKey, Boolean, exc,\ Sequence, func, literal, Unicode from sqlalchemy.types import TypeDecorator, TypeEngine from sqlalchemy.testing.schema import Table, Column from sqlalchemy.dialects import sqlite from sqlalchemy.testing import fixtures t = f = f2 = ts = currenttime = metadata = default_generator = None class DefaultTest(fixtures.TestBase): @classmethod def setup_class(cls): global t, f, f2, ts, currenttime, metadata, default_generator db = testing.db metadata = MetaData(db) default_generator = {'x': 50} def mydefault(): default_generator['x'] += 1 return default_generator['x'] def myupdate_with_ctx(ctx): conn = ctx.connection return conn.execute(sa.select([sa.text('13')])).scalar() def mydefault_using_connection(ctx): conn = ctx.connection try: return conn.execute(sa.select([sa.text('12')])).scalar() finally: # ensure a "close()" on this connection does nothing, # since its a "branched" connection conn.close() use_function_defaults = testing.against('postgresql', 'mssql', 'maxdb') is_oracle = testing.against('oracle') # select "count(1)" returns different results on different DBs also # correct for "current_date" compatible as column default, value # differences currenttime = func.current_date(type_=sa.Date, bind=db) if is_oracle: ts = db.scalar(sa.select([func.trunc(func.sysdate(), sa.literal_column("'DAY'"), type_=sa.Date).label('today')])) assert isinstance(ts, datetime.date) and not isinstance(ts, datetime.datetime) f = sa.select([func.length('abcdef')], bind=db).scalar() f2 = sa.select([func.length('abcdefghijk')], bind=db).scalar() # TODO: engine propigation across nested functions not working currenttime = func.trunc(currenttime, sa.literal_column("'DAY'"), bind=db, type_=sa.Date) def1 = currenttime def2 = func.trunc(sa.text("sysdate"), sa.literal_column("'DAY'"), type_=sa.Date) deftype = sa.Date elif use_function_defaults: f = sa.select([func.length('abcdef')], bind=db).scalar() f2 = sa.select([func.length('abcdefghijk')], bind=db).scalar() def1 = currenttime deftype = sa.Date if testing.against('maxdb'): def2 = sa.text("curdate") elif testing.against('mssql'): def2 = sa.text("getdate()") else: def2 = sa.text("current_date") ts = db.scalar(func.current_date()) else: f = len('abcdef') f2 = len('abcdefghijk') def1 = def2 = "3" ts = 3 deftype = Integer t = Table('default_test1', metadata, # python function Column('col1', Integer, primary_key=True, default=mydefault), # python literal Column('col2', String(20), default="imthedefault", onupdate="im the update"), # preexecute expression Column('col3', Integer, default=func.length('abcdef'), onupdate=func.length('abcdefghijk')), # SQL-side default from sql expression Column('col4', deftype, server_default=def1), # SQL-side default from literal expression Column('col5', deftype, server_default=def2), # preexecute + update timestamp Column('col6', sa.Date, default=currenttime, onupdate=currenttime), Column('boolcol1', sa.Boolean, default=True), Column('boolcol2', sa.Boolean, default=False), # python function which uses ExecutionContext Column('col7', Integer, default=mydefault_using_connection, onupdate=myupdate_with_ctx), # python builtin Column('col8', sa.Date, default=datetime.date.today, onupdate=datetime.date.today), # combo Column('col9', String(20), default='py', server_default='ddl')) t.create() @classmethod def teardown_class(cls): t.drop() def teardown(self): default_generator['x'] = 50 t.delete().execute() def test_bad_arg_signature(self): ex_msg = \ "ColumnDefault Python function takes zero "\ "or one positional arguments" def fn1(x, y): pass def fn2(x, y, z=3): pass class fn3(object): def __init__(self, x, y): pass class FN4(object): def __call__(self, x, y): pass fn4 = FN4() for fn in fn1, fn2, fn3, fn4: assert_raises_message(sa.exc.ArgumentError, ex_msg, sa.ColumnDefault, fn) def test_arg_signature(self): def fn1(): pass def fn2(): pass def fn3(x=1): eq_(x, 1) def fn4(x=1, y=2, z=3): eq_(x, 1) fn5 = list class fn6a(object): def __init__(self, x): eq_(x, "context") class fn6b(object): def __init__(self, x, y=3): eq_(x, "context") class FN7(object): def __call__(self, x): eq_(x, "context") fn7 = FN7() class FN8(object): def __call__(self, x, y=3): eq_(x, "context") fn8 = FN8() for fn in fn1, fn2, fn3, fn4, fn5, fn6a, fn6b, fn7, fn8: c = sa.ColumnDefault(fn) c.arg("context") @testing.fails_on('firebird', 'Data type unknown') def test_standalone(self): c = testing.db.engine.contextual_connect() x = c.execute(t.c.col1.default) y = t.c.col2.default.execute() z = c.execute(t.c.col3.default) assert 50 <= x <= 57 eq_(y, 'imthedefault') eq_(z, f) eq_(f2, 11) def test_py_vs_server_default_detection(self): def has_(name, *wanted): slots = ['default', 'onupdate', 'server_default', 'server_onupdate'] col = tbl.c[name] for slot in wanted: slots.remove(slot) assert getattr(col, slot) is not None, getattr(col, slot) for slot in slots: assert getattr(col, slot) is None, getattr(col, slot) tbl = t has_('col1', 'default') has_('col2', 'default', 'onupdate') has_('col3', 'default', 'onupdate') has_('col4', 'server_default') has_('col5', 'server_default') has_('col6', 'default', 'onupdate') has_('boolcol1', 'default') has_('boolcol2', 'default') has_('col7', 'default', 'onupdate') has_('col8', 'default', 'onupdate') has_('col9', 'default', 'server_default') ColumnDefault, DefaultClause = sa.ColumnDefault, sa.DefaultClause t2 = Table('t2', MetaData(), Column('col1', Integer, Sequence('foo')), Column('col2', Integer, default=Sequence('foo'), server_default='y'), Column('col3', Integer, Sequence('foo'), server_default='x'), Column('col4', Integer, ColumnDefault('x'), DefaultClause('y')), Column('col4', Integer, ColumnDefault('x'), DefaultClause('y'), DefaultClause('y', for_update=True)), Column('col5', Integer, ColumnDefault('x'), DefaultClause('y'), onupdate='z'), Column('col6', Integer, ColumnDefault('x'), server_default='y', onupdate='z'), Column('col7', Integer, default='x', server_default='y', onupdate='z'), Column('col8', Integer, server_onupdate='u', default='x', server_default='y', onupdate='z')) tbl = t2 has_('col1', 'default') has_('col2', 'default', 'server_default') has_('col3', 'default', 'server_default') has_('col4', 'default', 'server_default', 'server_onupdate') has_('col5', 'default', 'server_default', 'onupdate') has_('col6', 'default', 'server_default', 'onupdate') has_('col7', 'default', 'server_default', 'onupdate') has_('col8', 'default', 'server_default', 'onupdate', 'server_onupdate') @testing.fails_on('firebird', 'Data type unknown') def test_insert(self): r = t.insert().execute() assert r.lastrow_has_defaults() eq_(set(r.context.postfetch_cols), set([t.c.col3, t.c.col5, t.c.col4, t.c.col6])) r = t.insert(inline=True).execute() assert r.lastrow_has_defaults() eq_(set(r.context.postfetch_cols), set([t.c.col3, t.c.col5, t.c.col4, t.c.col6])) t.insert().execute() ctexec = sa.select([currenttime.label('now')], bind=testing.db).scalar() l = t.select().order_by(t.c.col1).execute() today = datetime.date.today() eq_(l.fetchall(), [ (x, 'imthedefault', f, ts, ts, ctexec, True, False, 12, today, 'py') for x in range(51, 54)]) t.insert().execute(col9=None) assert r.lastrow_has_defaults() eq_(set(r.context.postfetch_cols), set([t.c.col3, t.c.col5, t.c.col4, t.c.col6])) eq_(t.select(t.c.col1 == 54).execute().fetchall(), [(54, 'imthedefault', f, ts, ts, ctexec, True, False, 12, today, None)]) @testing.fails_on('firebird', 'Data type unknown') def test_insertmany(self): # MySQL-Python 1.2.2 breaks functions in execute_many :( if (testing.against('mysql+mysqldb') and testing.db.dialect.dbapi.version_info[:3] == (1, 2, 2)): return t.insert().execute({}, {}, {}) ctexec = currenttime.scalar() l = t.select().execute() today = datetime.date.today() eq_(l.fetchall(), [(51, 'imthedefault', f, ts, ts, ctexec, True, False, 12, today, 'py'), (52, 'imthedefault', f, ts, ts, ctexec, True, False, 12, today, 'py'), (53, 'imthedefault', f, ts, ts, ctexec, True, False, 12, today, 'py')]) def test_no_embed_in_sql(self): """Using a DefaultGenerator, Sequence, DefaultClause in the columns, where clause of a select, or in the values clause of insert, update, raises an informative error""" for const in ( sa.Sequence('y'), sa.ColumnDefault('y'), sa.DefaultClause('y') ): assert_raises_message( sa.exc.ArgumentError, "SQL expression object or string expected.", t.select, [const] ) assert_raises_message( sa.exc.InvalidRequestError, "cannot be used directly as a column expression.", str, t.insert().values(col4=const) ) assert_raises_message( sa.exc.InvalidRequestError, "cannot be used directly as a column expression.", str, t.update().values(col4=const) ) def test_missing_many_param(self): assert_raises_message(exc.StatementError, "A value is required for bind parameter 'col7', in parameter group 1", t.insert().execute, {'col4': 7, 'col7': 12, 'col8': 19}, {'col4': 7, 'col8': 19}, {'col4': 7, 'col7': 12, 'col8': 19}, ) def test_insert_values(self): t.insert(values={'col3': 50}).execute() l = t.select().execute() eq_(50, l.first()['col3']) @testing.fails_on('firebird', 'Data type unknown') def test_updatemany(self): # MySQL-Python 1.2.2 breaks functions in execute_many :( if (testing.against('mysql+mysqldb') and testing.db.dialect.dbapi.version_info[:3] == (1, 2, 2)): return t.insert().execute({}, {}, {}) t.update(t.c.col1 == sa.bindparam('pkval')).execute( {'pkval': 51, 'col7': None, 'col8': None, 'boolcol1': False}) t.update(t.c.col1 == sa.bindparam('pkval')).execute( {'pkval': 51}, {'pkval': 52}, {'pkval': 53}) l = t.select().execute() ctexec = currenttime.scalar() today = datetime.date.today() eq_(l.fetchall(), [(51, 'im the update', f2, ts, ts, ctexec, False, False, 13, today, 'py'), (52, 'im the update', f2, ts, ts, ctexec, True, False, 13, today, 'py'), (53, 'im the update', f2, ts, ts, ctexec, True, False, 13, today, 'py')]) @testing.fails_on('firebird', 'Data type unknown') def test_update(self): r = t.insert().execute() pk = r.inserted_primary_key[0] t.update(t.c.col1 == pk).execute(col4=None, col5=None) ctexec = currenttime.scalar() l = t.select(t.c.col1 == pk).execute() l = l.first() eq_(l, (pk, 'im the update', f2, None, None, ctexec, True, False, 13, datetime.date.today(), 'py')) eq_(11, f2) @testing.fails_on('firebird', 'Data type unknown') def test_update_values(self): r = t.insert().execute() pk = r.inserted_primary_key[0] t.update(t.c.col1 == pk, values={'col3': 55}).execute() l = t.select(t.c.col1 == pk).execute() l = l.first() eq_(55, l['col3']) class PKDefaultTest(fixtures.TablesTest): __requires__ = ('subqueries',) @classmethod def define_tables(cls, metadata): t2 = Table('t2', metadata, Column('nextid', Integer)) Table('t1', metadata, Column('id', Integer, primary_key=True, default=sa.select([func.max(t2.c.nextid)]).as_scalar()), Column('data', String(30))) @testing.requires.returning def test_with_implicit_returning(self): self._test(True) def test_regular(self): self._test(False) def _test(self, returning): t2, t1 = self.tables.t2, self.tables.t1 if not returning and not testing.db.dialect.implicit_returning: engine = testing.db else: engine = engines.testing_engine( options={'implicit_returning': returning}) engine.execute(t2.insert(), nextid=1) r = engine.execute(t1.insert(), data='hi') eq_([1], r.inserted_primary_key) engine.execute(t2.insert(), nextid=2) r = engine.execute(t1.insert(), data='there') eq_([2], r.inserted_primary_key) class PKIncrementTest(fixtures.TablesTest): run_define_tables = 'each' @classmethod def define_tables(cls, metadata): Table("aitable", metadata, Column('id', Integer, Sequence('ai_id_seq', optional=True), primary_key=True), Column('int1', Integer), Column('str1', String(20))) # TODO: add coverage for increment on a secondary column in a key @testing.fails_on('firebird', 'Data type unknown') def _test_autoincrement(self, bind): aitable = self.tables.aitable ids = set() rs = bind.execute(aitable.insert(), int1=1) last = rs.inserted_primary_key[0] self.assert_(last) self.assert_(last not in ids) ids.add(last) rs = bind.execute(aitable.insert(), str1='row 2') last = rs.inserted_primary_key[0] self.assert_(last) self.assert_(last not in ids) ids.add(last) rs = bind.execute(aitable.insert(), int1=3, str1='row 3') last = rs.inserted_primary_key[0] self.assert_(last) self.assert_(last not in ids) ids.add(last) rs = bind.execute(aitable.insert(values={'int1': func.length('four')})) last = rs.inserted_primary_key[0] self.assert_(last) self.assert_(last not in ids) ids.add(last) eq_(ids, set([1, 2, 3, 4])) eq_(list(bind.execute(aitable.select().order_by(aitable.c.id))), [(1, 1, None), (2, None, 'row 2'), (3, 3, 'row 3'), (4, 4, None)]) def test_autoincrement_autocommit(self): self._test_autoincrement(testing.db) def test_autoincrement_transaction(self): con = testing.db.connect() tx = con.begin() try: try: self._test_autoincrement(con) except: try: tx.rollback() except: pass raise else: tx.commit() finally: con.close() class EmptyInsertTest(fixtures.TestBase): @testing.exclude('sqlite', '<', (3, 3, 8), 'no empty insert support') @testing.fails_on('oracle', 'FIXME: unknown') @testing.provide_metadata def test_empty_insert(self): t1 = Table('t1', self.metadata, Column('is_true', Boolean, server_default=('1'))) self.metadata.create_all() t1.insert().execute() eq_(1, select([func.count(text('*'))], from_obj=t1).scalar()) eq_(True, t1.select().scalar()) class AutoIncrementTest(fixtures.TablesTest): __requires__ = ('identity',) run_define_tables = 'each' @classmethod def define_tables(cls, metadata): """Each test manipulates self.metadata individually.""" @testing.exclude('sqlite', '<', (3, 4), 'no database support') def test_autoincrement_single_col(self): single = Table('single', self.metadata, Column('id', Integer, primary_key=True)) single.create() r = single.insert().execute() id_ = r.inserted_primary_key[0] eq_(id_, 1) eq_(1, sa.select([func.count(sa.text('*'))], from_obj=single).scalar()) def test_autoincrement_fk(self): nodes = Table('nodes', self.metadata, Column('id', Integer, primary_key=True), Column('parent_id', Integer, ForeignKey('nodes.id')), Column('data', String(30))) nodes.create() r = nodes.insert().execute(data='foo') id_ = r.inserted_primary_key[0] nodes.insert().execute(data='bar', parent_id=id_) def test_autoinc_detection_no_affinity(self): class MyType(TypeDecorator): impl = TypeEngine assert MyType()._type_affinity is None t = Table('x', MetaData(), Column('id', MyType(), primary_key=True) ) assert t._autoincrement_column is None def test_autoincrement_ignore_fk(self): m = MetaData() Table('y', m, Column('id', Integer(), primary_key=True) ) x = Table('x', m, Column('id', Integer(), ForeignKey('y.id'), autoincrement="ignore_fk", primary_key=True) ) assert x._autoincrement_column is x.c.id def test_autoincrement_fk_disqualifies(self): m = MetaData() Table('y', m, Column('id', Integer(), primary_key=True) ) x = Table('x', m, Column('id', Integer(), ForeignKey('y.id'), primary_key=True) ) assert x._autoincrement_column is None @testing.fails_on('sqlite', 'FIXME: unknown') def test_non_autoincrement(self): # sqlite INT primary keys can be non-unique! (only for ints) nonai = Table("nonaitest", self.metadata, Column('id', Integer, autoincrement=False, primary_key=True), Column('data', String(20))) nonai.create() def go(): # postgresql + mysql strict will fail on first row, # mysql in legacy mode fails on second row nonai.insert().execute(data='row 1') nonai.insert().execute(data='row 2') assert_raises( sa.exc.DBAPIError, go ) nonai.insert().execute(id=1, data='row 1') class SequenceDDLTest(fixtures.TestBase, testing.AssertsCompiledSQL): __dialect__ = 'default' def test_create_drop_ddl(self): self.assert_compile( CreateSequence(Sequence('foo_seq')), "CREATE SEQUENCE foo_seq", ) self.assert_compile( CreateSequence(Sequence('foo_seq', start=5)), "CREATE SEQUENCE foo_seq START WITH 5", ) self.assert_compile( CreateSequence(Sequence('foo_seq', increment=2)), "CREATE SEQUENCE foo_seq INCREMENT BY 2", ) self.assert_compile( CreateSequence(Sequence('foo_seq', increment=2, start=5)), "CREATE SEQUENCE foo_seq INCREMENT BY 2 START WITH 5", ) self.assert_compile( DropSequence(Sequence('foo_seq')), "DROP SEQUENCE foo_seq", ) class SequenceExecTest(fixtures.TestBase): __requires__ = ('sequences',) @classmethod def setup_class(cls): cls.seq = Sequence("my_sequence") cls.seq.create(testing.db) @classmethod def teardown_class(cls): cls.seq.drop(testing.db) def _assert_seq_result(self, ret): """asserts return of next_value is an int""" assert isinstance(ret, (int, long)) assert ret > 0 def test_implicit_connectionless(self): s = Sequence("my_sequence", metadata=MetaData(testing.db)) self._assert_seq_result(s.execute()) def test_explicit(self): s = Sequence("my_sequence") self._assert_seq_result(s.execute(testing.db)) def test_explicit_optional(self): """test dialect executes a Sequence, returns nextval, whether or not "optional" is set """ s = Sequence("my_sequence", optional=True) self._assert_seq_result(s.execute(testing.db)) def test_func_implicit_connectionless_execute(self): """test func.next_value().execute()/.scalar() works with connectionless execution. """ s = Sequence("my_sequence", metadata=MetaData(testing.db)) self._assert_seq_result(s.next_value().execute().scalar()) def test_func_explicit(self): s = Sequence("my_sequence") self._assert_seq_result(testing.db.scalar(s.next_value())) def test_func_implicit_connectionless_scalar(self): """test func.next_value().execute()/.scalar() works. """ s = Sequence("my_sequence", metadata=MetaData(testing.db)) self._assert_seq_result(s.next_value().scalar()) def test_func_embedded_select(self): """test can use next_value() in select column expr""" s = Sequence("my_sequence") self._assert_seq_result( testing.db.scalar(select([s.next_value()])) ) @testing.fails_on('oracle', "ORA-02287: sequence number not allowed here") @testing.provide_metadata def test_func_embedded_whereclause(self): """test can use next_value() in whereclause""" metadata = self.metadata t1 = Table('t', metadata, Column('x', Integer) ) t1.create(testing.db) testing.db.execute(t1.insert(), [{'x': 1}, {'x': 300}, {'x': 301}]) s = Sequence("my_sequence") eq_( testing.db.execute( t1.select().where(t1.c.x > s.next_value()) ).fetchall(), [(300, ), (301, )] ) @testing.provide_metadata def test_func_embedded_valuesbase(self): """test can use next_value() in values() of _ValuesBase""" metadata = self.metadata t1 = Table('t', metadata, Column('x', Integer) ) t1.create(testing.db) s = Sequence("my_sequence") testing.db.execute( t1.insert().values(x=s.next_value()) ) self._assert_seq_result( testing.db.scalar(t1.select()) ) @testing.provide_metadata def test_inserted_pk_no_returning(self): """test inserted_primary_key contains [None] when pk_col=next_value(), implicit returning is not used.""" metadata = self.metadata e = engines.testing_engine(options={'implicit_returning': False}) s = Sequence("my_sequence") metadata.bind = e t1 = Table('t', metadata, Column('x', Integer, primary_key=True) ) t1.create() r = e.execute( t1.insert().values(x=s.next_value()) ) eq_(r.inserted_primary_key, [None]) @testing.requires.returning @testing.provide_metadata def test_inserted_pk_implicit_returning(self): """test inserted_primary_key contains the result when pk_col=next_value(), when implicit returning is used.""" metadata = self.metadata e = engines.testing_engine(options={'implicit_returning': True}) s = Sequence("my_sequence") metadata.bind = e t1 = Table('t', metadata, Column('x', Integer, primary_key=True) ) t1.create() r = e.execute( t1.insert().values(x=s.next_value()) ) self._assert_seq_result(r.inserted_primary_key[0]) class SequenceTest(fixtures.TestBase, testing.AssertsCompiledSQL): __requires__ = ('sequences',) @testing.fails_on('firebird', 'no FB support for start/increment') def test_start_increment(self): for seq in ( Sequence('foo_seq'), Sequence('foo_seq', start=8), Sequence('foo_seq', increment=5)): seq.create(testing.db) try: values = [ testing.db.execute(seq) for i in range(3) ] start = seq.start or 1 inc = seq.increment or 1 assert values == list(xrange(start, start + inc * 3, inc)) finally: seq.drop(testing.db) def _has_sequence(self, name): return testing.db.dialect.has_sequence(testing.db, name) def test_nextval_render(self): """test dialect renders the "nextval" construct, whether or not "optional" is set """ for s in ( Sequence("my_seq"), Sequence("my_seq", optional=True)): assert str(s.next_value(). compile(dialect=testing.db.dialect)) in ( "nextval('my_seq')", "gen_id(my_seq, 1)", "my_seq.nextval", ) def test_nextval_unsupported(self): """test next_value() used on non-sequence platform raises NotImplementedError.""" s = Sequence("my_seq") d = sqlite.dialect() assert_raises_message( NotImplementedError, "Dialect 'sqlite' does not support sequence increments.", s.next_value().compile, dialect=d ) def test_checkfirst_sequence(self): s = Sequence("my_sequence") s.create(testing.db, checkfirst=False) assert self._has_sequence('my_sequence') s.create(testing.db, checkfirst=True) s.drop(testing.db, checkfirst=False) assert not self._has_sequence('my_sequence') s.drop(testing.db, checkfirst=True) def test_checkfirst_metadata(self): m = MetaData() Sequence("my_sequence", metadata=m) m.create_all(testing.db, checkfirst=False) assert self._has_sequence('my_sequence') m.create_all(testing.db, checkfirst=True) m.drop_all(testing.db, checkfirst=False) assert not self._has_sequence('my_sequence') m.drop_all(testing.db, checkfirst=True) def test_checkfirst_table(self): m = MetaData() s = Sequence("my_sequence") t = Table('t', m, Column('c', Integer, s, primary_key=True)) t.create(testing.db, checkfirst=False) assert self._has_sequence('my_sequence') t.create(testing.db, checkfirst=True) t.drop(testing.db, checkfirst=False) assert not self._has_sequence('my_sequence') t.drop(testing.db, checkfirst=True) @testing.provide_metadata def test_table_overrides_metadata_create(self): metadata = self.metadata Sequence("s1", metadata=metadata) s2 = Sequence("s2", metadata=metadata) s3 = Sequence("s3") t = Table('t', metadata, Column('c', Integer, s3, primary_key=True)) assert s3.metadata is metadata t.create(testing.db, checkfirst=True) s3.drop(testing.db) # 't' is created, and 's3' won't be # re-created since it's linked to 't'. # 's1' and 's2' are, however. metadata.create_all(testing.db) assert self._has_sequence('s1') assert self._has_sequence('s2') assert not self._has_sequence('s3') s2.drop(testing.db) assert self._has_sequence('s1') assert not self._has_sequence('s2') metadata.drop_all(testing.db) assert not self._has_sequence('s1') assert not self._has_sequence('s2') cartitems = sometable = metadata = None class TableBoundSequenceTest(fixtures.TestBase): __requires__ = ('sequences',) @classmethod def setup_class(cls): global cartitems, sometable, metadata metadata = MetaData(testing.db) cartitems = Table("cartitems", metadata, Column("cart_id", Integer, Sequence('cart_id_seq'), primary_key=True), Column("description", String(40)), Column("createdate", sa.DateTime()) ) sometable = Table('Manager', metadata, Column('obj_id', Integer, Sequence('obj_id_seq')), Column('name', String(128)), Column('id', Integer, Sequence('Manager_id_seq', optional=True), primary_key=True), ) metadata.create_all() @classmethod def teardown_class(cls): metadata.drop_all() def test_insert_via_seq(self): cartitems.insert().execute(description='hi') cartitems.insert().execute(description='there') r = cartitems.insert().execute(description='lala') assert r.inserted_primary_key and r.inserted_primary_key[0] is not None id_ = r.inserted_primary_key[0] eq_(1, sa.select([func.count(cartitems.c.cart_id)], sa.and_(cartitems.c.description == 'lala', cartitems.c.cart_id == id_)).scalar()) cartitems.select().execute().fetchall() def test_seq_nonpk(self): """test sequences fire off as defaults on non-pk columns""" engine = engines.testing_engine( options={'implicit_returning': False}) result = engine.execute(sometable.insert(), name="somename") assert set(result.postfetch_cols()) == set([sometable.c.obj_id]) result = engine.execute(sometable.insert(), name="someother") assert set(result.postfetch_cols()) == set([sometable.c.obj_id]) sometable.insert().execute( {'name': 'name3'}, {'name': 'name4'}) eq_(sometable.select().order_by(sometable.c.id).execute().fetchall(), [(1, "somename", 1), (2, "someother", 2), (3, "name3", 3), (4, "name4", 4)]) class SpecialTypePKTest(fixtures.TestBase): """test process_result_value in conjunction with primary key columns. Also tests that "autoincrement" checks are against column.type._type_affinity, rather than the class of "type" itself. """ @classmethod def setup_class(cls): class MyInteger(TypeDecorator): impl = Integer def process_bind_param(self, value, dialect): if value is None: return None return int(value[4:]) def process_result_value(self, value, dialect): if value is None: return None return "INT_%d" % value cls.MyInteger = MyInteger @testing.provide_metadata def _run_test(self, *arg, **kw): metadata = self.metadata implicit_returning = kw.pop('implicit_returning', True) kw['primary_key'] = True if kw.get('autoincrement', True): kw['test_needs_autoincrement'] = True t = Table('x', metadata, Column('y', self.MyInteger, *arg, **kw), Column('data', Integer), implicit_returning=implicit_returning ) t.create() r = t.insert().values(data=5).execute() # we don't pre-fetch 'server_default'. if 'server_default' in kw and (not testing.db.dialect.implicit_returning or not implicit_returning): eq_(r.inserted_primary_key, [None]) else: eq_(r.inserted_primary_key, ['INT_1']) r.close() eq_( t.select().execute().first(), ('INT_1', 5) ) def test_plain(self): # among other things, tests that autoincrement # is enabled. self._run_test() def test_literal_default_label(self): self._run_test(default=literal("INT_1", type_=self.MyInteger).label('foo')) def test_literal_default_no_label(self): self._run_test(default=literal("INT_1", type_=self.MyInteger)) def test_sequence(self): self._run_test(Sequence('foo_seq')) def test_server_default(self): self._run_test(server_default='1',) def test_server_default_no_autoincrement(self): self._run_test(server_default='1', autoincrement=False) def test_clause(self): stmt = select([literal("INT_1", type_=self.MyInteger)]).as_scalar() self._run_test(default=stmt) @testing.requires.returning def test_no_implicit_returning(self): self._run_test(implicit_returning=False) @testing.requires.returning def test_server_default_no_implicit_returning(self): self._run_test(server_default='1', autoincrement=False) class ServerDefaultsOnPKTest(fixtures.TestBase): @testing.provide_metadata def test_string_default_none_on_insert(self): """Test that without implicit returning, we return None for a string server default. That is, we don't want to attempt to pre-execute "server_default" generically - the user should use a Python side-default for a case like this. Testing that all backends do the same thing here. """ metadata = self.metadata t = Table('x', metadata, Column('y', String(10), server_default='key_one', primary_key=True), Column('data', String(10)), implicit_returning=False ) metadata.create_all() r = t.insert().execute(data='data') eq_(r.inserted_primary_key, [None]) eq_( t.select().execute().fetchall(), [('key_one', 'data')] ) @testing.requires.returning @testing.provide_metadata def test_string_default_on_insert_with_returning(self): """With implicit_returning, we get a string PK default back no problem.""" metadata = self.metadata t = Table('x', metadata, Column('y', String(10), server_default='key_one', primary_key=True), Column('data', String(10)) ) metadata.create_all() r = t.insert().execute(data='data') eq_(r.inserted_primary_key, ['key_one']) eq_( t.select().execute().fetchall(), [('key_one', 'data')] ) @testing.provide_metadata def test_int_default_none_on_insert(self): metadata = self.metadata t = Table('x', metadata, Column('y', Integer, server_default='5', primary_key=True), Column('data', String(10)), implicit_returning=False ) assert t._autoincrement_column is None metadata.create_all() r = t.insert().execute(data='data') eq_(r.inserted_primary_key, [None]) if testing.against('sqlite'): eq_( t.select().execute().fetchall(), [(1, 'data')] ) else: eq_( t.select().execute().fetchall(), [(5, 'data')] ) @testing.provide_metadata def test_autoincrement_reflected_from_server_default(self): metadata = self.metadata t = Table('x', metadata, Column('y', Integer, server_default='5', primary_key=True), Column('data', String(10)), implicit_returning=False ) assert t._autoincrement_column is None metadata.create_all() m2 = MetaData(metadata.bind) t2 = Table('x', m2, autoload=True, implicit_returning=False) assert t2._autoincrement_column is None @testing.provide_metadata def test_int_default_none_on_insert_reflected(self): metadata = self.metadata Table('x', metadata, Column('y', Integer, server_default='5', primary_key=True), Column('data', String(10)), implicit_returning=False ) metadata.create_all() m2 = MetaData(metadata.bind) t2 = Table('x', m2, autoload=True, implicit_returning=False) r = t2.insert().execute(data='data') eq_(r.inserted_primary_key, [None]) if testing.against('sqlite'): eq_( t2.select().execute().fetchall(), [(1, 'data')] ) else: eq_( t2.select().execute().fetchall(), [(5, 'data')] ) @testing.requires.returning @testing.provide_metadata def test_int_default_on_insert_with_returning(self): metadata = self.metadata t = Table('x', metadata, Column('y', Integer, server_default='5', primary_key=True), Column('data', String(10)) ) metadata.create_all() r = t.insert().execute(data='data') eq_(r.inserted_primary_key, [5]) eq_( t.select().execute().fetchall(), [(5, 'data')] ) class UnicodeDefaultsTest(fixtures.TestBase): def test_no_default(self): Column(Unicode(32)) def test_unicode_default(self): # Py3K #default = 'foo' # Py2K default = u'foo' # end Py2K Column(Unicode(32), default=default) def test_nonunicode_default(self): # Py3K #default = b'foo' # Py2K default = 'foo' # end Py2K assert_raises_message( sa.exc.SAWarning, "Unicode column received non-unicode default value.", Column, Unicode(32), default=default ) SQLAlchemy-0.8.4/test/sql/test_delete.py0000644000076500000240000000547712251147172020701 0ustar classicstaff00000000000000#! coding:utf-8 from sqlalchemy import Column, Integer, String, Table, delete, select from sqlalchemy.dialects import mysql from sqlalchemy.testing import AssertsCompiledSQL, fixtures class _DeleteTestBase(object): @classmethod def define_tables(cls, metadata): Table('mytable', metadata, Column('myid', Integer), Column('name', String(30)), Column('description', String(50))) Table('myothertable', metadata, Column('otherid', Integer), Column('othername', String(30))) class DeleteTest(_DeleteTestBase, fixtures.TablesTest, AssertsCompiledSQL): __dialect__ = 'default' def test_delete(self): table1 = self.tables.mytable self.assert_compile( delete(table1, table1.c.myid == 7), 'DELETE FROM mytable WHERE mytable.myid = :myid_1') self.assert_compile( table1.delete().where(table1.c.myid == 7), 'DELETE FROM mytable WHERE mytable.myid = :myid_1') self.assert_compile( table1.delete(). where(table1.c.myid == 7). where(table1.c.name == 'somename'), 'DELETE FROM mytable ' 'WHERE mytable.myid = :myid_1 ' 'AND mytable.name = :name_1') def test_prefix_with(self): table1 = self.tables.mytable stmt = table1.delete().\ prefix_with('A', 'B', dialect='mysql').\ prefix_with('C', 'D') self.assert_compile(stmt, 'DELETE C D FROM mytable') self.assert_compile(stmt, 'DELETE A B C D FROM mytable', dialect=mysql.dialect()) def test_alias(self): table1 = self.tables.mytable talias1 = table1.alias('t1') stmt = delete(talias1).where(talias1.c.myid == 7) self.assert_compile(stmt, 'DELETE FROM mytable AS t1 WHERE t1.myid = :myid_1') def test_correlated(self): table1, table2 = self.tables.mytable, self.tables.myothertable # test a non-correlated WHERE clause s = select([table2.c.othername], table2.c.otherid == 7) self.assert_compile(delete(table1, table1.c.name == s), 'DELETE FROM mytable ' 'WHERE mytable.name = (' 'SELECT myothertable.othername ' 'FROM myothertable ' 'WHERE myothertable.otherid = :otherid_1' ')') # test one that is actually correlated... s = select([table2.c.othername], table2.c.otherid == table1.c.myid) self.assert_compile(table1.delete(table1.c.name == s), 'DELETE FROM mytable ' 'WHERE mytable.name = (' 'SELECT myothertable.othername ' 'FROM myothertable ' 'WHERE myothertable.otherid = mytable.myid' ')') SQLAlchemy-0.8.4/test/sql/test_functions.py0000644000076500000240000004164512251150016021433 0ustar classicstaff00000000000000from sqlalchemy.testing import eq_ import datetime from sqlalchemy import * from sqlalchemy.sql import table, column from sqlalchemy import sql, util from sqlalchemy.sql.compiler import BIND_TEMPLATES from sqlalchemy.testing.engines import all_dialects from sqlalchemy import types as sqltypes from sqlalchemy.sql import functions from sqlalchemy.sql.functions import GenericFunction import decimal from sqlalchemy import testing from sqlalchemy.testing import fixtures, AssertsCompiledSQL, engines from sqlalchemy.dialects import sqlite, postgresql, mysql, oracle class CompileTest(fixtures.TestBase, AssertsCompiledSQL): __dialect__ = 'default' def tear_down(self): functions._registry.clear() def test_compile(self): for dialect in all_dialects(exclude=('sybase', 'access', 'informix', 'maxdb')): bindtemplate = BIND_TEMPLATES[dialect.paramstyle] self.assert_compile(func.current_timestamp(), "CURRENT_TIMESTAMP", dialect=dialect) self.assert_compile(func.localtime(), "LOCALTIME", dialect=dialect) if dialect.name in ('firebird', 'maxdb'): self.assert_compile(func.nosuchfunction(), "nosuchfunction", dialect=dialect) else: self.assert_compile(func.nosuchfunction(), "nosuchfunction()", dialect=dialect) # test generic function compile class fake_func(GenericFunction): __return_type__ = sqltypes.Integer def __init__(self, arg, **kwargs): GenericFunction.__init__(self, arg, **kwargs) self.assert_compile( fake_func('foo'), "fake_func(%s)" % bindtemplate % {'name': 'param_1', 'position': 1}, dialect=dialect) def test_use_labels(self): self.assert_compile(select([func.foo()], use_labels=True), "SELECT foo() AS foo_1" ) def test_underscores(self): self.assert_compile(func.if_(), "if()") def test_generic_now(self): assert isinstance(func.now().type, sqltypes.DateTime) for ret, dialect in [ ('CURRENT_TIMESTAMP', sqlite.dialect()), ('now()', postgresql.dialect()), ('now()', mysql.dialect()), ('CURRENT_TIMESTAMP', oracle.dialect()) ]: self.assert_compile(func.now(), ret, dialect=dialect) def test_generic_random(self): assert func.random().type == sqltypes.NULLTYPE assert isinstance(func.random(type_=Integer).type, Integer) for ret, dialect in [ ('random()', sqlite.dialect()), ('random()', postgresql.dialect()), ('rand()', mysql.dialect()), ('random()', oracle.dialect()) ]: self.assert_compile(func.random(), ret, dialect=dialect) def test_custom_default_namespace(self): class myfunc(GenericFunction): pass assert isinstance(func.myfunc(), myfunc) def test_custom_type(self): class myfunc(GenericFunction): type = DateTime assert isinstance(func.myfunc().type, DateTime) def test_custom_legacy_type(self): # in case someone was using this system class myfunc(GenericFunction): __return_type__ = DateTime assert isinstance(func.myfunc().type, DateTime) def test_custom_w_custom_name(self): class myfunc(GenericFunction): name = "notmyfunc" assert isinstance(func.notmyfunc(), myfunc) assert not isinstance(func.myfunc(), myfunc) def test_custom_package_namespace(self): def cls1(pk_name): class myfunc(GenericFunction): package = pk_name return myfunc f1 = cls1("mypackage") f2 = cls1("myotherpackage") assert isinstance(func.mypackage.myfunc(), f1) assert isinstance(func.myotherpackage.myfunc(), f2) def test_custom_name(self): class MyFunction(GenericFunction): name = 'my_func' def __init__(self, *args): args = args + (3,) super(MyFunction, self).__init__(*args) self.assert_compile( func.my_func(1, 2), "my_func(:param_1, :param_2, :param_3)" ) def test_custom_registered_identifier(self): class GeoBuffer(GenericFunction): type = Integer package = "geo" name = "BufferOne" identifier = "buf1" class GeoBuffer2(GenericFunction): type = Integer name = "BufferTwo" identifier = "buf2" class BufferThree(GenericFunction): type = Integer identifier = "buf3" self.assert_compile( func.geo.buf1(), "BufferOne()" ) self.assert_compile( func.buf2(), "BufferTwo()" ) self.assert_compile( func.buf3(), "BufferThree()" ) def test_custom_args(self): class myfunc(GenericFunction): pass self.assert_compile( myfunc(1, 2, 3), "myfunc(:param_1, :param_2, :param_3)" ) def test_namespacing_conflicts(self): self.assert_compile(func.text('foo'), 'text(:text_1)') def test_generic_count(self): assert isinstance(func.count().type, sqltypes.Integer) self.assert_compile(func.count(), 'count(*)') self.assert_compile(func.count(1), 'count(:param_1)') c = column('abc') self.assert_compile(func.count(c), 'count(abc)') def test_constructor(self): try: func.current_timestamp('somearg') assert False except TypeError: assert True try: func.char_length('a', 'b') assert False except TypeError: assert True try: func.char_length() assert False except TypeError: assert True def test_return_type_detection(self): for fn in [func.coalesce, func.max, func.min, func.sum]: for args, type_ in [ ((datetime.date(2007, 10, 5), datetime.date(2005, 10, 15)), sqltypes.Date), ((3, 5), sqltypes.Integer), ((decimal.Decimal(3), decimal.Decimal(5)), sqltypes.Numeric), (("foo", "bar"), sqltypes.String), ((datetime.datetime(2007, 10, 5, 8, 3, 34), datetime.datetime(2005, 10, 15, 14, 45, 33)), sqltypes.DateTime) ]: assert isinstance(fn(*args).type, type_), \ "%s / %s" % (fn(), type_) assert isinstance(func.concat("foo", "bar").type, sqltypes.String) def test_assorted(self): table1 = table('mytable', column('myid', Integer), ) table2 = table( 'myothertable', column('otherid', Integer), ) # test an expression with a function self.assert_compile(func.lala(3, 4, literal("five"), table1.c.myid) * table2.c.otherid, "lala(:lala_1, :lala_2, :param_1, mytable.myid) * " "myothertable.otherid") # test it in a SELECT self.assert_compile(select([func.count(table1.c.myid)]), "SELECT count(mytable.myid) AS count_1 FROM mytable") # test a "dotted" function name self.assert_compile(select([func.foo.bar.lala(table1.c.myid)]), "SELECT foo.bar.lala(mytable.myid) AS lala_1 FROM mytable") # test the bind parameter name with a "dotted" function name is # only the name (limits the length of the bind param name) self.assert_compile(select([func.foo.bar.lala(12)]), "SELECT foo.bar.lala(:lala_2) AS lala_1") # test a dotted func off the engine itself self.assert_compile(func.lala.hoho(7), "lala.hoho(:hoho_1)") # test None becomes NULL self.assert_compile(func.my_func(1, 2, None, 3), "my_func(:my_func_1, :my_func_2, NULL, :my_func_3)") # test pickling self.assert_compile( util.pickle.loads(util.pickle.dumps( func.my_func(1, 2, None, 3))), "my_func(:my_func_1, :my_func_2, NULL, :my_func_3)") # assert func raises AttributeError for __bases__ attribute, since # its not a class fixes pydoc try: func.__bases__ assert False except AttributeError: assert True def test_functions_with_cols(self): users = table('users', column('id'), column('name'), column('fullname')) calculate = select([column('q'), column('z'), column('r')], from_obj=[func.calculate(bindparam('x', None), bindparam('y', None))]) self.assert_compile(select([users], users.c.id > calculate.c.z), "SELECT users.id, users.name, users.fullname " "FROM users, (SELECT q, z, r " "FROM calculate(:x, :y)) " "WHERE users.id > z" ) s = select([users], users.c.id.between( calculate.alias('c1').unique_params(x=17, y=45).c.z, calculate.alias('c2').unique_params(x=5, y=12).c.z)) self.assert_compile(s, "SELECT users.id, users.name, users.fullname " "FROM users, (SELECT q, z, r " "FROM calculate(:x_1, :y_1)) AS c1, (SELECT q, z, r " "FROM calculate(:x_2, :y_2)) AS c2 " "WHERE users.id BETWEEN c1.z AND c2.z", checkparams={'y_1': 45, 'x_1': 17, 'y_2': 12, 'x_2': 5}) def test_non_functions(self): expr = func.cast("foo", Integer) self.assert_compile(expr, "CAST(:param_1 AS INTEGER)") expr = func.extract("year", datetime.date(2010, 12, 5)) self.assert_compile(expr, "EXTRACT(year FROM :param_1)") class ExecuteTest(fixtures.TestBase): @engines.close_first def tearDown(self): pass @testing.uses_deprecated def test_standalone_execute(self): x = testing.db.func.current_date().execute().scalar() y = testing.db.func.current_date().select().execute().scalar() z = testing.db.func.current_date().scalar() assert (x == y == z) is True # ansi func x = testing.db.func.current_date() assert isinstance(x.type, Date) assert isinstance(x.execute().scalar(), datetime.date) def test_conn_execute(self): from sqlalchemy.sql.expression import FunctionElement from sqlalchemy.ext.compiler import compiles class myfunc(FunctionElement): type = Date() @compiles(myfunc) def compile(elem, compiler, **kw): return compiler.process(func.current_date()) conn = testing.db.connect() try: x = conn.execute(func.current_date()).scalar() y = conn.execute(func.current_date().select()).scalar() z = conn.scalar(func.current_date()) q = conn.scalar(myfunc()) finally: conn.close() assert (x == y == z == q) is True def test_exec_options(self): f = func.foo() eq_(f._execution_options, {}) f = f.execution_options(foo='bar') eq_(f._execution_options, {'foo': 'bar'}) s = f.select() eq_(s._execution_options, {'foo': 'bar'}) ret = testing.db.execute(func.now().execution_options(foo='bar')) eq_(ret.context.execution_options, {'foo': 'bar'}) ret.close() @engines.close_first def test_update(self): """ Tests sending functions and SQL expressions to the VALUES and SET clauses of INSERT/UPDATE instances, and that column-level defaults get overridden. """ meta = MetaData(testing.db) t = Table('t1', meta, Column('id', Integer, Sequence('t1idseq', optional=True), primary_key=True), Column('value', Integer) ) t2 = Table('t2', meta, Column('id', Integer, Sequence('t2idseq', optional=True), primary_key=True), Column('value', Integer, default=7), Column('stuff', String(20), onupdate="thisisstuff") ) meta.create_all() try: t.insert(values=dict(value=func.length("one"))).execute() assert t.select().execute().first()['value'] == 3 t.update(values=dict(value=func.length("asfda"))).execute() assert t.select().execute().first()['value'] == 5 r = t.insert(values=dict(value=func.length("sfsaafsda"))).execute() id = r.inserted_primary_key[0] assert t.select(t.c.id == id).execute().first()['value'] == 9 t.update(values={t.c.value: func.length("asdf")}).execute() assert t.select().execute().first()['value'] == 4 print "--------------------------" t2.insert().execute() t2.insert(values=dict(value=func.length("one"))).execute() t2.insert(values=dict(value=func.length("asfda") + -19)).\ execute(stuff="hi") res = exec_sorted(select([t2.c.value, t2.c.stuff])) eq_(res, [(-14, 'hi'), (3, None), (7, None)]) t2.update(values=dict(value=func.length("asdsafasd"))).\ execute(stuff="some stuff") assert select([t2.c.value, t2.c.stuff]).execute().fetchall() == \ [(9, "some stuff"), (9, "some stuff"), (9, "some stuff")] t2.delete().execute() t2.insert(values=dict(value=func.length("one") + 8)).execute() assert t2.select().execute().first()['value'] == 11 t2.update(values=dict(value=func.length("asfda"))).execute() eq_( select([t2.c.value, t2.c.stuff]).execute().first(), (5, "thisisstuff") ) t2.update(values={t2.c.value: func.length("asfdaasdf"), t2.c.stuff: "foo"}).execute() print "HI", select([t2.c.value, t2.c.stuff]).execute().first() eq_(select([t2.c.value, t2.c.stuff]).execute().first(), (9, "foo") ) finally: meta.drop_all() @testing.fails_on_everything_except('postgresql') def test_as_from(self): # TODO: shouldnt this work on oracle too ? x = func.current_date(bind=testing.db).execute().scalar() y = func.current_date(bind=testing.db).select().execute().scalar() z = func.current_date(bind=testing.db).scalar() w = select(['*'], from_obj=[func.current_date(bind=testing.db)]).\ scalar() # construct a column-based FROM object out of a function, # like in [ticket:172] s = select([sql.column('date', type_=DateTime)], from_obj=[func.current_date(bind=testing.db)]) q = s.execute().first()[s.c.date] r = s.alias('datequery').select().scalar() assert x == y == z == w == q == r def test_extract_bind(self): """Basic common denominator execution tests for extract()""" date = datetime.date(2010, 5, 1) def execute(field): return testing.db.execute(select([extract(field, date)])).scalar() assert execute('year') == 2010 assert execute('month') == 5 assert execute('day') == 1 date = datetime.datetime(2010, 5, 1, 12, 11, 10) assert execute('year') == 2010 assert execute('month') == 5 assert execute('day') == 1 def test_extract_expression(self): meta = MetaData(testing.db) table = Table('test', meta, Column('dt', DateTime), Column('d', Date)) meta.create_all() try: table.insert().execute( {'dt': datetime.datetime(2010, 5, 1, 12, 11, 10), 'd': datetime.date(2010, 5, 1)}) rs = select([extract('year', table.c.dt), extract('month', table.c.d)]).execute() row = rs.first() assert row[0] == 2010 assert row[1] == 5 rs.close() finally: meta.drop_all() def exec_sorted(statement, *args, **kw): """Executes a statement and returns a sorted list plain tuple rows.""" return sorted([tuple(row) for row in statement.execute(*args, **kw).fetchall()]) SQLAlchemy-0.8.4/test/sql/test_generative.py0000644000076500000240000015770412251150016021560 0ustar classicstaff00000000000000from sqlalchemy import * from sqlalchemy.sql import table, column, ClauseElement, operators from sqlalchemy.sql.expression import _clone, _from_objects from sqlalchemy.testing import fixtures, AssertsExecutionResults, \ AssertsCompiledSQL from sqlalchemy import testing from sqlalchemy.sql.visitors import ClauseVisitor, CloningVisitor, \ cloned_traverse, ReplacingCloningVisitor from sqlalchemy import exc from sqlalchemy.sql import util as sql_util from sqlalchemy.testing import eq_, is_, assert_raises, assert_raises_message class TraversalTest(fixtures.TestBase, AssertsExecutionResults): """test ClauseVisitor's traversal, particularly its ability to copy and modify a ClauseElement in place.""" @classmethod def setup_class(cls): global A, B # establish two ficticious ClauseElements. # define deep equality semantics as well as deep # identity semantics. class A(ClauseElement): __visit_name__ = 'a' def __init__(self, expr): self.expr = expr def is_other(self, other): return other is self __hash__ = ClauseElement.__hash__ def __eq__(self, other): return other.expr == self.expr def __ne__(self, other): return other.expr != self.expr def __str__(self): return "A(%s)" % repr(self.expr) class B(ClauseElement): __visit_name__ = 'b' def __init__(self, *items): self.items = items def is_other(self, other): if other is not self: return False for i1, i2 in zip(self.items, other.items): if i1 is not i2: return False return True __hash__ = ClauseElement.__hash__ def __eq__(self, other): for i1, i2 in zip(self.items, other.items): if i1 != i2: return False return True def __ne__(self, other): for i1, i2 in zip(self.items, other.items): if i1 != i2: return True return False def _copy_internals(self, clone=_clone): self.items = [clone(i) for i in self.items] def get_children(self, **kwargs): return self.items def __str__(self): return "B(%s)" % repr([str(i) for i in self.items]) def test_test_classes(self): a1 = A("expr1") struct = B(a1, A("expr2"), B(A("expr1b"), A("expr2b")), A("expr3")) struct2 = B(a1, A("expr2"), B(A("expr1b"), A("expr2b")), A("expr3")) struct3 = B(a1, A("expr2"), B(A("expr1b"), A("expr2bmodified")), A("expr3")) assert a1.is_other(a1) assert struct.is_other(struct) assert struct == struct2 assert struct != struct3 assert not struct.is_other(struct2) assert not struct.is_other(struct3) def test_clone(self): struct = B(A("expr1"), A("expr2"), B(A("expr1b"), A("expr2b")), A("expr3")) class Vis(CloningVisitor): def visit_a(self, a): pass def visit_b(self, b): pass vis = Vis() s2 = vis.traverse(struct) assert struct == s2 assert not struct.is_other(s2) def test_no_clone(self): struct = B(A("expr1"), A("expr2"), B(A("expr1b"), A("expr2b")), A("expr3")) class Vis(ClauseVisitor): def visit_a(self, a): pass def visit_b(self, b): pass vis = Vis() s2 = vis.traverse(struct) assert struct == s2 assert struct.is_other(s2) def test_change_in_place(self): struct = B(A("expr1"), A("expr2"), B(A("expr1b"), A("expr2b")), A("expr3")) struct2 = B(A("expr1"), A("expr2modified"), B(A("expr1b"), A("expr2b")), A("expr3")) struct3 = B(A("expr1"), A("expr2"), B(A("expr1b"), A("expr2bmodified")), A("expr3")) class Vis(CloningVisitor): def visit_a(self, a): if a.expr == "expr2": a.expr = "expr2modified" def visit_b(self, b): pass vis = Vis() s2 = vis.traverse(struct) assert struct != s2 assert not struct.is_other(s2) assert struct2 == s2 class Vis2(CloningVisitor): def visit_a(self, a): if a.expr == "expr2b": a.expr = "expr2bmodified" def visit_b(self, b): pass vis2 = Vis2() s3 = vis2.traverse(struct) assert struct != s3 assert struct3 == s3 def test_visit_name(self): # override fns in testlib/schema.py from sqlalchemy import Column class CustomObj(Column): pass assert CustomObj.__visit_name__ == Column.__visit_name__ == 'column' foo, bar = CustomObj('foo', String), CustomObj('bar', String) bin = foo == bar set(ClauseVisitor().iterate(bin)) assert set(ClauseVisitor().iterate(bin)) == set([foo, bar, bin]) class BinaryEndpointTraversalTest(fixtures.TestBase): """test the special binary product visit""" def _assert_traversal(self, expr, expected): canary = [] def visit(binary, l, r): canary.append((binary.operator, l, r)) print binary.operator, l, r sql_util.visit_binary_product(visit, expr) eq_( canary, expected ) def test_basic(self): a, b = column("a"), column("b") self._assert_traversal( a == b, [ (operators.eq, a, b) ] ) def test_with_tuples(self): a, b, c, d, b1, b1a, b1b, e, f = ( column("a"), column("b"), column("c"), column("d"), column("b1"), column("b1a"), column("b1b"), column("e"), column("f") ) expr = tuple_( a, b, b1 == tuple_(b1a, b1b == d), c ) > tuple_( func.go(e + f) ) self._assert_traversal( expr, [ (operators.gt, a, e), (operators.gt, a, f), (operators.gt, b, e), (operators.gt, b, f), (operators.eq, b1, b1a), (operators.eq, b1b, d), (operators.gt, c, e), (operators.gt, c, f) ] ) def test_composed(self): a, b, e, f, q, j, r = ( column("a"), column("b"), column("e"), column("f"), column("q"), column("j"), column("r"), ) expr = and_( (a + b) == q + func.sum(e + f), and_( j == r, f == q ) ) self._assert_traversal( expr, [ (operators.eq, a, q), (operators.eq, a, e), (operators.eq, a, f), (operators.eq, b, q), (operators.eq, b, e), (operators.eq, b, f), (operators.eq, j, r), (operators.eq, f, q), ] ) def test_subquery(self): a, b, c = column("a"), column("b"), column("c") subq = select([c]).where(c == a).as_scalar() expr = and_(a == b, b == subq) self._assert_traversal( expr, [ (operators.eq, a, b), (operators.eq, b, subq), ] ) class ClauseTest(fixtures.TestBase, AssertsCompiledSQL): """test copy-in-place behavior of various ClauseElements.""" __dialect__ = 'default' @classmethod def setup_class(cls): global t1, t2, t3 t1 = table("table1", column("col1"), column("col2"), column("col3"), ) t2 = table("table2", column("col1"), column("col2"), column("col3"), ) t3 = Table('table3', MetaData(), Column('col1', Integer), Column('col2', Integer) ) def test_binary(self): clause = t1.c.col2 == t2.c.col2 eq_(str(clause), str(CloningVisitor().traverse(clause))) def test_binary_anon_label_quirk(self): t = table('t1', column('col1')) f = t.c.col1 * 5 self.assert_compile(select([f]), "SELECT t1.col1 * :col1_1 AS anon_1 FROM t1") f.anon_label a = t.alias() f = sql_util.ClauseAdapter(a).traverse(f) self.assert_compile(select([f]), "SELECT t1_1.col1 * :col1_1 AS anon_1 FROM t1 AS t1_1") def test_join(self): clause = t1.join(t2, t1.c.col2 == t2.c.col2) c1 = str(clause) assert str(clause) == str(CloningVisitor().traverse(clause)) class Vis(CloningVisitor): def visit_binary(self, binary): binary.right = t2.c.col3 clause2 = Vis().traverse(clause) assert c1 == str(clause) assert str(clause2) == str(t1.join(t2, t1.c.col2 == t2.c.col3)) def test_aliased_column_adapt(self): clause = t1.select() aliased = t1.select().alias() aliased2 = t1.alias() adapter = sql_util.ColumnAdapter(aliased) f = select([ adapter.columns[c] for c in aliased2.c ]).select_from(aliased) s = select([aliased2]).select_from(aliased) eq_(str(s), str(f)) f = select([ adapter.columns[func.count(aliased2.c.col1)] ]).select_from(aliased) eq_( str(select([func.count(aliased2.c.col1)]).select_from(aliased)), str(f) ) def test_aliased_cloned_column_adapt_inner(self): clause = select([t1.c.col1, func.foo(t1.c.col2).label('foo')]) aliased1 = select([clause.c.col1, clause.c.foo]) aliased2 = clause aliased2.c.col1, aliased2.c.foo aliased3 = cloned_traverse(aliased2, {}, {}) # fixed by [ticket:2419]. the inside columns # on aliased3 have _is_clone_of pointers to those of # aliased2. corresponding_column checks these # now. adapter = sql_util.ColumnAdapter(aliased1) f1 = select([ adapter.columns[c] for c in aliased2._raw_columns ]) f2 = select([ adapter.columns[c] for c in aliased3._raw_columns ]) eq_( str(f1), str(f2) ) def test_aliased_cloned_column_adapt_exported(self): clause = select([t1.c.col1, func.foo(t1.c.col2).label('foo')]) aliased1 = select([clause.c.col1, clause.c.foo]) aliased2 = clause aliased2.c.col1, aliased2.c.foo aliased3 = cloned_traverse(aliased2, {}, {}) # also fixed by [ticket:2419]. When we look at the # *outside* columns of aliased3, they previously did not # have an _is_clone_of pointer. But we now modified _make_proxy # to assign this. adapter = sql_util.ColumnAdapter(aliased1) f1 = select([ adapter.columns[c] for c in aliased2.c ]) f2 = select([ adapter.columns[c] for c in aliased3.c ]) eq_( str(f1), str(f2) ) def test_aliased_cloned_schema_column_adapt_exported(self): clause = select([t3.c.col1, func.foo(t3.c.col2).label('foo')]) aliased1 = select([clause.c.col1, clause.c.foo]) aliased2 = clause aliased2.c.col1, aliased2.c.foo aliased3 = cloned_traverse(aliased2, {}, {}) # also fixed by [ticket:2419]. When we look at the # *outside* columns of aliased3, they previously did not # have an _is_clone_of pointer. But we now modified _make_proxy # to assign this. adapter = sql_util.ColumnAdapter(aliased1) f1 = select([ adapter.columns[c] for c in aliased2.c ]) f2 = select([ adapter.columns[c] for c in aliased3.c ]) eq_( str(f1), str(f2) ) def test_text(self): clause = text( "select * from table where foo=:bar", bindparams=[bindparam('bar')]) c1 = str(clause) class Vis(CloningVisitor): def visit_textclause(self, text): text.text = text.text + " SOME MODIFIER=:lala" text.bindparams['lala'] = bindparam('lala') clause2 = Vis().traverse(clause) assert c1 == str(clause) assert str(clause2) == c1 + " SOME MODIFIER=:lala" assert clause.bindparams.keys() == ['bar'] assert set(clause2.bindparams.keys()) == set(['bar', 'lala']) def test_select(self): s2 = select([t1]) s2_assert = str(s2) s3_assert = str(select([t1], t1.c.col2 == 7)) class Vis(CloningVisitor): def visit_select(self, select): select.append_whereclause(t1.c.col2 == 7) s3 = Vis().traverse(s2) assert str(s3) == s3_assert assert str(s2) == s2_assert print str(s2) print str(s3) class Vis(ClauseVisitor): def visit_select(self, select): select.append_whereclause(t1.c.col2 == 7) Vis().traverse(s2) assert str(s2) == s3_assert s4_assert = str(select([t1], and_(t1.c.col2 == 7, t1.c.col3 == 9))) class Vis(CloningVisitor): def visit_select(self, select): select.append_whereclause(t1.c.col3 == 9) s4 = Vis().traverse(s3) print str(s3) print str(s4) assert str(s4) == s4_assert assert str(s3) == s3_assert s5_assert = str(select([t1], and_(t1.c.col2 == 7, t1.c.col1 == 9))) class Vis(CloningVisitor): def visit_binary(self, binary): if binary.left is t1.c.col3: binary.left = t1.c.col1 binary.right = bindparam("col1", unique=True) s5 = Vis().traverse(s4) print str(s4) print str(s5) assert str(s5) == s5_assert assert str(s4) == s4_assert def test_union(self): u = union(t1.select(), t2.select()) u2 = CloningVisitor().traverse(u) assert str(u) == str(u2) assert [str(c) for c in u2.c] == [str(c) for c in u.c] u = union(t1.select(), t2.select()) cols = [str(c) for c in u.c] u2 = CloningVisitor().traverse(u) assert str(u) == str(u2) assert [str(c) for c in u2.c] == cols s1 = select([t1], t1.c.col1 == bindparam('id_param')) s2 = select([t2]) u = union(s1, s2) u2 = u.params(id_param=7) u3 = u.params(id_param=10) assert str(u) == str(u2) == str(u3) assert u2.compile().params == {'id_param':7} assert u3.compile().params == {'id_param':10} def test_in(self): expr = t1.c.col1.in_(['foo', 'bar']) expr2 = CloningVisitor().traverse(expr) assert str(expr) == str(expr2) def test_over(self): expr = func.row_number().over(order_by=t1.c.col1) expr2 = CloningVisitor().traverse(expr) assert str(expr) == str(expr2) def test_adapt_union(self): u = union( t1.select().where(t1.c.col1 == 4), t1.select().where(t1.c.col1 == 5) ).alias() assert sql_util.ClauseAdapter(u).traverse(t1) is u def test_binds(self): """test that unique bindparams change their name upon clone() to prevent conflicts""" s = select([t1], t1.c.col1 == bindparam(None, unique=True)).alias() s2 = CloningVisitor().traverse(s).alias() s3 = select([s], s.c.col2 == s2.c.col2) self.assert_compile(s3, "SELECT anon_1.col1, anon_1.col2, anon_1.col3 FROM " "(SELECT table1.col1 AS col1, table1.col2 AS col2, " "table1.col3 AS col3 FROM table1 WHERE table1.col1 = :param_1) " "AS anon_1, " "(SELECT table1.col1 AS col1, table1.col2 AS col2, table1.col3 " "AS col3 FROM table1 WHERE table1.col1 = :param_2) AS anon_2 " "WHERE anon_1.col2 = anon_2.col2") s = select([t1], t1.c.col1 == 4).alias() s2 = CloningVisitor().traverse(s).alias() s3 = select([s], s.c.col2 == s2.c.col2) self.assert_compile(s3, "SELECT anon_1.col1, anon_1.col2, anon_1.col3 FROM " "(SELECT table1.col1 AS col1, table1.col2 AS col2, " "table1.col3 AS col3 FROM table1 WHERE table1.col1 = :col1_1) " "AS anon_1, " "(SELECT table1.col1 AS col1, table1.col2 AS col2, table1.col3 " "AS col3 FROM table1 WHERE table1.col1 = :col1_2) AS anon_2 " "WHERE anon_1.col2 = anon_2.col2") def test_extract(self): s = select([extract('foo', t1.c.col1).label('col1')]) self.assert_compile(s, "SELECT EXTRACT(foo FROM table1.col1) AS col1 FROM table1") s2 = CloningVisitor().traverse(s).alias() s3 = select([s2.c.col1]) self.assert_compile(s, "SELECT EXTRACT(foo FROM table1.col1) AS col1 FROM table1") self.assert_compile(s3, "SELECT anon_1.col1 FROM (SELECT EXTRACT(foo FROM " "table1.col1) AS col1 FROM table1) AS anon_1") @testing.emits_warning('.*replaced by another column with the same key') def test_alias(self): subq = t2.select().alias('subq') s = select([t1.c.col1, subq.c.col1], from_obj=[t1, subq, t1.join(subq, t1.c.col1 == subq.c.col2)] ) orig = str(s) s2 = CloningVisitor().traverse(s) assert orig == str(s) == str(s2) s4 = CloningVisitor().traverse(s2) assert orig == str(s) == str(s2) == str(s4) s3 = sql_util.ClauseAdapter(table('foo')).traverse(s) assert orig == str(s) == str(s3) s4 = sql_util.ClauseAdapter(table('foo')).traverse(s3) assert orig == str(s) == str(s3) == str(s4) subq = subq.alias('subq') s = select([t1.c.col1, subq.c.col1], from_obj=[t1, subq, t1.join(subq, t1.c.col1 == subq.c.col2)] ) s5 = CloningVisitor().traverse(s) assert orig == str(s) == str(s5) def test_correlated_select(self): s = select(['*'], t1.c.col1 == t2.c.col1, from_obj=[t1, t2]).correlate(t2) class Vis(CloningVisitor): def visit_select(self, select): select.append_whereclause(t1.c.col2 == 7) self.assert_compile( select([t2]).where(t2.c.col1 == Vis().traverse(s)), "SELECT table2.col1, table2.col2, table2.col3 " "FROM table2 WHERE table2.col1 = " "(SELECT * FROM table1 WHERE table1.col1 = table2.col1 " "AND table1.col2 = :col2_1)" ) def test_this_thing(self): s = select([t1]).where(t1.c.col1 == 'foo').alias() s2 = select([s.c.col1]) self.assert_compile(s2, 'SELECT anon_1.col1 FROM (SELECT ' 'table1.col1 AS col1, table1.col2 AS col2, ' 'table1.col3 AS col3 FROM table1 WHERE ' 'table1.col1 = :col1_1) AS anon_1') t1a = t1.alias() s2 = sql_util.ClauseAdapter(t1a).traverse(s2) self.assert_compile(s2, 'SELECT anon_1.col1 FROM (SELECT ' 'table1_1.col1 AS col1, table1_1.col2 AS ' 'col2, table1_1.col3 AS col3 FROM table1 ' 'AS table1_1 WHERE table1_1.col1 = ' ':col1_1) AS anon_1') def test_select_fromtwice_one(self): t1a = t1.alias() s = select([1], t1.c.col1 == t1a.c.col1, from_obj=t1a).correlate(t1a) s = select([t1]).where(t1.c.col1 == s) self.assert_compile(s, "SELECT table1.col1, table1.col2, table1.col3 FROM table1 " "WHERE table1.col1 = " "(SELECT 1 FROM table1, table1 AS table1_1 " "WHERE table1.col1 = table1_1.col1)" ) s = CloningVisitor().traverse(s) self.assert_compile(s, "SELECT table1.col1, table1.col2, table1.col3 FROM table1 " "WHERE table1.col1 = " "(SELECT 1 FROM table1, table1 AS table1_1 " "WHERE table1.col1 = table1_1.col1)") def test_select_fromtwice_two(self): s = select([t1]).where(t1.c.col1 == 'foo').alias() s2 = select([1], t1.c.col1 == s.c.col1, from_obj=s).correlate(t1) s3 = select([t1]).where(t1.c.col1 == s2) self.assert_compile(s3, "SELECT table1.col1, table1.col2, table1.col3 " "FROM table1 WHERE table1.col1 = " "(SELECT 1 FROM " "(SELECT table1.col1 AS col1, table1.col2 AS col2, " "table1.col3 AS col3 FROM table1 " "WHERE table1.col1 = :col1_1) " "AS anon_1 WHERE table1.col1 = anon_1.col1)" ) s4 = ReplacingCloningVisitor().traverse(s3) self.assert_compile(s4, "SELECT table1.col1, table1.col2, table1.col3 " "FROM table1 WHERE table1.col1 = " "(SELECT 1 FROM " "(SELECT table1.col1 AS col1, table1.col2 AS col2, " "table1.col3 AS col3 FROM table1 " "WHERE table1.col1 = :col1_1) " "AS anon_1 WHERE table1.col1 = anon_1.col1)" ) class ClauseAdapterTest(fixtures.TestBase, AssertsCompiledSQL): __dialect__ = 'default' @classmethod def setup_class(cls): global t1, t2 t1 = table("table1", column("col1"), column("col2"), column("col3"), ) t2 = table("table2", column("col1"), column("col2"), column("col3"), ) def test_correlation_on_clone(self): t1alias = t1.alias('t1alias') t2alias = t2.alias('t2alias') vis = sql_util.ClauseAdapter(t1alias) s = select(['*'], from_obj=[t1alias, t2alias]).as_scalar() assert t2alias in s._froms assert t1alias in s._froms self.assert_compile(select(['*'], t2alias.c.col1 == s), 'SELECT * FROM table2 AS t2alias WHERE ' 't2alias.col1 = (SELECT * FROM table1 AS ' 't1alias)') s = vis.traverse(s) assert t2alias not in s._froms # not present because it's been # cloned assert t1alias in s._froms # present because the adapter placed # it there # correlate list on "s" needs to take into account the full # _cloned_set for each element in _froms when correlating self.assert_compile(select(['*'], t2alias.c.col1 == s), 'SELECT * FROM table2 AS t2alias WHERE ' 't2alias.col1 = (SELECT * FROM table1 AS ' 't1alias)') s = select(['*'], from_obj=[t1alias, t2alias]).correlate(t2alias).as_scalar() self.assert_compile(select(['*'], t2alias.c.col1 == s), 'SELECT * FROM table2 AS t2alias WHERE ' 't2alias.col1 = (SELECT * FROM table1 AS ' 't1alias)') s = vis.traverse(s) self.assert_compile(select(['*'], t2alias.c.col1 == s), 'SELECT * FROM table2 AS t2alias WHERE ' 't2alias.col1 = (SELECT * FROM table1 AS ' 't1alias)') s = CloningVisitor().traverse(s) self.assert_compile(select(['*'], t2alias.c.col1 == s), 'SELECT * FROM table2 AS t2alias WHERE ' 't2alias.col1 = (SELECT * FROM table1 AS ' 't1alias)') s = select(['*']).where(t1.c.col1 == t2.c.col1).as_scalar() self.assert_compile(select([t1.c.col1, s]), 'SELECT table1.col1, (SELECT * FROM table2 ' 'WHERE table1.col1 = table2.col1) AS ' 'anon_1 FROM table1') vis = sql_util.ClauseAdapter(t1alias) s = vis.traverse(s) self.assert_compile(select([t1alias.c.col1, s]), 'SELECT t1alias.col1, (SELECT * FROM ' 'table2 WHERE t1alias.col1 = table2.col1) ' 'AS anon_1 FROM table1 AS t1alias') s = CloningVisitor().traverse(s) self.assert_compile(select([t1alias.c.col1, s]), 'SELECT t1alias.col1, (SELECT * FROM ' 'table2 WHERE t1alias.col1 = table2.col1) ' 'AS anon_1 FROM table1 AS t1alias') s = select(['*']).where(t1.c.col1 == t2.c.col1).correlate(t1).as_scalar() self.assert_compile(select([t1.c.col1, s]), 'SELECT table1.col1, (SELECT * FROM table2 ' 'WHERE table1.col1 = table2.col1) AS ' 'anon_1 FROM table1') vis = sql_util.ClauseAdapter(t1alias) s = vis.traverse(s) self.assert_compile(select([t1alias.c.col1, s]), 'SELECT t1alias.col1, (SELECT * FROM ' 'table2 WHERE t1alias.col1 = table2.col1) ' 'AS anon_1 FROM table1 AS t1alias') s = CloningVisitor().traverse(s) self.assert_compile(select([t1alias.c.col1, s]), 'SELECT t1alias.col1, (SELECT * FROM ' 'table2 WHERE t1alias.col1 = table2.col1) ' 'AS anon_1 FROM table1 AS t1alias') @testing.fails_on_everything_except() def test_joins_dont_adapt(self): # adapting to a join, i.e. ClauseAdapter(t1.join(t2)), doesn't # make much sense. ClauseAdapter doesn't make any changes if # it's against a straight join. users = table('users', column('id')) addresses = table('addresses', column('id'), column('user_id')) ualias = users.alias() s = select([func.count(addresses.c.id)], users.c.id == addresses.c.user_id).correlate(users) s = sql_util.ClauseAdapter(ualias).traverse(s) j1 = addresses.join(ualias, addresses.c.user_id == ualias.c.id) self.assert_compile(sql_util.ClauseAdapter(j1).traverse(s), 'SELECT count(addresses.id) AS count_1 ' 'FROM addresses WHERE users_1.id = ' 'addresses.user_id') def test_table_to_alias_1(self): t1alias = t1.alias('t1alias') vis = sql_util.ClauseAdapter(t1alias) ff = vis.traverse(func.count(t1.c.col1).label('foo')) assert list(_from_objects(ff)) == [t1alias] def test_table_to_alias_2(self): t1alias = t1.alias('t1alias') vis = sql_util.ClauseAdapter(t1alias) self.assert_compile(vis.traverse(select(['*'], from_obj=[t1])), 'SELECT * FROM table1 AS t1alias') def test_table_to_alias_3(self): t1alias = t1.alias('t1alias') vis = sql_util.ClauseAdapter(t1alias) self.assert_compile(select(['*'], t1.c.col1 == t2.c.col2), 'SELECT * FROM table1, table2 WHERE ' 'table1.col1 = table2.col2') def test_table_to_alias_4(self): t1alias = t1.alias('t1alias') vis = sql_util.ClauseAdapter(t1alias) self.assert_compile(vis.traverse(select(['*'], t1.c.col1 == t2.c.col2)), 'SELECT * FROM table1 AS t1alias, table2 ' 'WHERE t1alias.col1 = table2.col2') def test_table_to_alias_5(self): t1alias = t1.alias('t1alias') vis = sql_util.ClauseAdapter(t1alias) self.assert_compile(vis.traverse(select(['*'], t1.c.col1 == t2.c.col2, from_obj=[t1, t2])), 'SELECT * FROM table1 AS t1alias, table2 ' 'WHERE t1alias.col1 = table2.col2') def test_table_to_alias_6(self): t1alias = t1.alias('t1alias') vis = sql_util.ClauseAdapter(t1alias) self.assert_compile( select([t1alias, t2]).where(t1alias.c.col1 == vis.traverse(select(['*'], t1.c.col1 == t2.c.col2, from_obj=[t1, t2]).correlate(t1))), "SELECT t1alias.col1, t1alias.col2, t1alias.col3, " "table2.col1, table2.col2, table2.col3 " "FROM table1 AS t1alias, table2 WHERE t1alias.col1 = " "(SELECT * FROM table2 WHERE t1alias.col1 = table2.col2)" ) def test_table_to_alias_7(self): t1alias = t1.alias('t1alias') vis = sql_util.ClauseAdapter(t1alias) self.assert_compile( select([t1alias, t2]).where(t1alias.c.col1 == vis.traverse(select(['*'], t1.c.col1 == t2.c.col2, from_obj=[t1, t2]).correlate(t2))), "SELECT t1alias.col1, t1alias.col2, t1alias.col3, " "table2.col1, table2.col2, table2.col3 " "FROM table1 AS t1alias, table2 " "WHERE t1alias.col1 = " "(SELECT * FROM table1 AS t1alias " "WHERE t1alias.col1 = table2.col2)") def test_table_to_alias_8(self): t1alias = t1.alias('t1alias') vis = sql_util.ClauseAdapter(t1alias) self.assert_compile(vis.traverse(case([(t1.c.col1 == 5, t1.c.col2)], else_=t1.c.col1)), 'CASE WHEN (t1alias.col1 = :col1_1) THEN ' 't1alias.col2 ELSE t1alias.col1 END') def test_table_to_alias_9(self): t1alias = t1.alias('t1alias') vis = sql_util.ClauseAdapter(t1alias) self.assert_compile(vis.traverse(case([(5, t1.c.col2)], value=t1.c.col1, else_=t1.c.col1)), 'CASE t1alias.col1 WHEN :param_1 THEN ' 't1alias.col2 ELSE t1alias.col1 END') def test_table_to_alias_10(self): s = select(['*'], from_obj=[t1]).alias('foo') self.assert_compile(s.select(), 'SELECT foo.* FROM (SELECT * FROM table1) ' 'AS foo') def test_table_to_alias_11(self): s = select(['*'], from_obj=[t1]).alias('foo') t1alias = t1.alias('t1alias') vis = sql_util.ClauseAdapter(t1alias) self.assert_compile(vis.traverse(s.select()), 'SELECT foo.* FROM (SELECT * FROM table1 ' 'AS t1alias) AS foo') def test_table_to_alias_12(self): s = select(['*'], from_obj=[t1]).alias('foo') self.assert_compile(s.select(), 'SELECT foo.* FROM (SELECT * FROM table1) ' 'AS foo') def test_table_to_alias_13(self): t1alias = t1.alias('t1alias') vis = sql_util.ClauseAdapter(t1alias) ff = vis.traverse(func.count(t1.c.col1).label('foo')) self.assert_compile(select([ff]), 'SELECT count(t1alias.col1) AS foo FROM ' 'table1 AS t1alias') assert list(_from_objects(ff)) == [t1alias] #def test_table_to_alias_2(self): # TODO: self.assert_compile(vis.traverse(select([func.count(t1.c # .col1).l abel('foo')]), clone=True), "SELECT # count(t1alias.col1) AS foo FROM table1 AS t1alias") def test_table_to_alias_14(self): t1alias = t1.alias('t1alias') vis = sql_util.ClauseAdapter(t1alias) t2alias = t2.alias('t2alias') vis.chain(sql_util.ClauseAdapter(t2alias)) self.assert_compile(vis.traverse(select(['*'], t1.c.col1 == t2.c.col2)), 'SELECT * FROM table1 AS t1alias, table2 ' 'AS t2alias WHERE t1alias.col1 = ' 't2alias.col2') def test_table_to_alias_15(self): t1alias = t1.alias('t1alias') vis = sql_util.ClauseAdapter(t1alias) t2alias = t2.alias('t2alias') vis.chain(sql_util.ClauseAdapter(t2alias)) self.assert_compile(vis.traverse(select(['*'], t1.c.col1 == t2.c.col2, from_obj=[t1, t2])), 'SELECT * FROM table1 AS t1alias, table2 ' 'AS t2alias WHERE t1alias.col1 = ' 't2alias.col2') def test_table_to_alias_16(self): t1alias = t1.alias('t1alias') vis = sql_util.ClauseAdapter(t1alias) t2alias = t2.alias('t2alias') vis.chain(sql_util.ClauseAdapter(t2alias)) self.assert_compile( select([t1alias, t2alias]).where( t1alias.c.col1 == vis.traverse(select(['*'], t1.c.col1 == t2.c.col2, from_obj=[t1, t2]).correlate(t1)) ), "SELECT t1alias.col1, t1alias.col2, t1alias.col3, " "t2alias.col1, t2alias.col2, t2alias.col3 " "FROM table1 AS t1alias, table2 AS t2alias " "WHERE t1alias.col1 = " "(SELECT * FROM table2 AS t2alias " "WHERE t1alias.col1 = t2alias.col2)" ) def test_table_to_alias_17(self): t1alias = t1.alias('t1alias') vis = sql_util.ClauseAdapter(t1alias) t2alias = t2.alias('t2alias') vis.chain(sql_util.ClauseAdapter(t2alias)) self.assert_compile( t2alias.select().where(t2alias.c.col2 == vis.traverse(select(['*'], t1.c.col1 == t2.c.col2, from_obj=[t1, t2]).correlate(t2))), 'SELECT t2alias.col1, t2alias.col2, t2alias.col3 ' 'FROM table2 AS t2alias WHERE t2alias.col2 = ' '(SELECT * FROM table1 AS t1alias WHERE ' 't1alias.col1 = t2alias.col2)') def test_include_exclude(self): m = MetaData() a = Table('a', m, Column('id', Integer, primary_key=True), Column('xxx_id', Integer, ForeignKey('a.id', name='adf', use_alter=True) ) ) e = (a.c.id == a.c.xxx_id) assert str(e) == "a.id = a.xxx_id" b = a.alias() e = sql_util.ClauseAdapter( b, include= set([ a.c.id ]), equivalents= { a.c.id: set([ a.c.id]) } ).traverse( e) assert str(e) == "a_1.id = a.xxx_id" def test_recursive_equivalents(self): m = MetaData() a = Table('a', m, Column('x', Integer), Column('y', Integer)) b = Table('b', m, Column('x', Integer), Column('y', Integer)) c = Table('c', m, Column('x', Integer), Column('y', Integer)) # force a recursion overflow, by linking a.c.x<->c.c.x, and # asking for a nonexistent col. corresponding_column should prevent # endless depth. adapt = sql_util.ClauseAdapter(b, equivalents={a.c.x: set([c.c.x]), c.c.x: set([a.c.x])}) assert adapt._corresponding_column(a.c.x, False) is None def test_multilevel_equivalents(self): m = MetaData() a = Table('a', m, Column('x', Integer), Column('y', Integer)) b = Table('b', m, Column('x', Integer), Column('y', Integer)) c = Table('c', m, Column('x', Integer), Column('y', Integer)) alias = select([a]).select_from(a.join(b, a.c.x == b.c.x)).alias() # two levels of indirection from c.x->b.x->a.x, requires recursive # corresponding_column call adapt = sql_util.ClauseAdapter(alias, equivalents={b.c.x: set([a.c.x]), c.c.x: set([b.c.x])}) assert adapt._corresponding_column(a.c.x, False) is alias.c.x assert adapt._corresponding_column(c.c.x, False) is alias.c.x def test_join_to_alias(self): metadata = MetaData() a = Table('a', metadata, Column('id', Integer, primary_key=True)) b = Table('b', metadata, Column('id', Integer, primary_key=True), Column('aid', Integer, ForeignKey('a.id')), ) c = Table('c', metadata, Column('id', Integer, primary_key=True), Column('bid', Integer, ForeignKey('b.id')), ) d = Table('d', metadata, Column('id', Integer, primary_key=True), Column('aid', Integer, ForeignKey('a.id')), ) j1 = a.outerjoin(b) j2 = select([j1], use_labels=True) j3 = c.join(j2, j2.c.b_id == c.c.bid) j4 = j3.outerjoin(d) self.assert_compile(j4, 'c JOIN (SELECT a.id AS a_id, b.id AS ' 'b_id, b.aid AS b_aid FROM a LEFT OUTER ' 'JOIN b ON a.id = b.aid) ON b_id = c.bid ' 'LEFT OUTER JOIN d ON a_id = d.aid') j5 = j3.alias('foo') j6 = sql_util.ClauseAdapter(j5).copy_and_process([j4])[0] # this statement takes c join(a join b), wraps it inside an # aliased "select * from c join(a join b) AS foo". the outermost # right side "left outer join d" stays the same, except "d" # joins against foo.a_id instead of plain "a_id" self.assert_compile(j6, '(SELECT c.id AS c_id, c.bid AS c_bid, ' 'a_id AS a_id, b_id AS b_id, b_aid AS ' 'b_aid FROM c JOIN (SELECT a.id AS a_id, ' 'b.id AS b_id, b.aid AS b_aid FROM a LEFT ' 'OUTER JOIN b ON a.id = b.aid) ON b_id = ' 'c.bid) AS foo LEFT OUTER JOIN d ON ' 'foo.a_id = d.aid') def test_derived_from(self): assert select([t1]).is_derived_from(t1) assert not select([t2]).is_derived_from(t1) assert not t1.is_derived_from(select([t1])) assert t1.alias().is_derived_from(t1) s1 = select([t1, t2]).alias('foo') s2 = select([s1]).limit(5).offset(10).alias() assert s2.is_derived_from(s1) s2 = s2._clone() assert s2.is_derived_from(s1) def test_aliasedselect_to_aliasedselect_straight(self): # original issue from ticket #904 s1 = select([t1]).alias('foo') s2 = select([s1]).limit(5).offset(10).alias() self.assert_compile(sql_util.ClauseAdapter(s2).traverse(s1), 'SELECT foo.col1, foo.col2, foo.col3 FROM ' '(SELECT table1.col1 AS col1, table1.col2 ' 'AS col2, table1.col3 AS col3 FROM table1) ' 'AS foo LIMIT :param_1 OFFSET :param_2', {'param_1': 5, 'param_2': 10}) def test_aliasedselect_to_aliasedselect_join(self): s1 = select([t1]).alias('foo') s2 = select([s1]).limit(5).offset(10).alias() j = s1.outerjoin(t2, s1.c.col1 == t2.c.col1) self.assert_compile(sql_util.ClauseAdapter(s2).traverse(j).select(), 'SELECT anon_1.col1, anon_1.col2, ' 'anon_1.col3, table2.col1, table2.col2, ' 'table2.col3 FROM (SELECT foo.col1 AS ' 'col1, foo.col2 AS col2, foo.col3 AS col3 ' 'FROM (SELECT table1.col1 AS col1, ' 'table1.col2 AS col2, table1.col3 AS col3 ' 'FROM table1) AS foo LIMIT :param_1 OFFSET ' ':param_2) AS anon_1 LEFT OUTER JOIN ' 'table2 ON anon_1.col1 = table2.col1', {'param_1': 5, 'param_2': 10}) def test_aliasedselect_to_aliasedselect_join_nested_table(self): s1 = select([t1]).alias('foo') s2 = select([s1]).limit(5).offset(10).alias() talias = t1.alias('bar') assert not s2.is_derived_from(talias) j = s1.outerjoin(talias, s1.c.col1 == talias.c.col1) self.assert_compile(sql_util.ClauseAdapter(s2).traverse(j).select(), 'SELECT anon_1.col1, anon_1.col2, ' 'anon_1.col3, bar.col1, bar.col2, bar.col3 ' 'FROM (SELECT foo.col1 AS col1, foo.col2 ' 'AS col2, foo.col3 AS col3 FROM (SELECT ' 'table1.col1 AS col1, table1.col2 AS col2, ' 'table1.col3 AS col3 FROM table1) AS foo ' 'LIMIT :param_1 OFFSET :param_2) AS anon_1 ' 'LEFT OUTER JOIN table1 AS bar ON ' 'anon_1.col1 = bar.col1', {'param_1': 5, 'param_2': 10}) def test_functions(self): self.assert_compile( sql_util.ClauseAdapter(t1.alias()).\ traverse(func.count(t1.c.col1)), 'count(table1_1.col1)') s = select([func.count(t1.c.col1)]) self.assert_compile(sql_util.ClauseAdapter(t1.alias()).traverse(s), 'SELECT count(table1_1.col1) AS count_1 ' 'FROM table1 AS table1_1') def test_recursive(self): metadata = MetaData() a = Table('a', metadata, Column('id', Integer, primary_key=True)) b = Table('b', metadata, Column('id', Integer, primary_key=True), Column('aid', Integer, ForeignKey('a.id')), ) c = Table('c', metadata, Column('id', Integer, primary_key=True), Column('bid', Integer, ForeignKey('b.id')), ) d = Table('d', metadata, Column('id', Integer, primary_key=True), Column('aid', Integer, ForeignKey('a.id')), ) u = union( a.join(b).select().apply_labels(), a.join(d).select().apply_labels() ).alias() self.assert_compile( sql_util.ClauseAdapter(u).\ traverse(select([c.c.bid]).where(c.c.bid == u.c.b_aid)), "SELECT c.bid "\ "FROM c, (SELECT a.id AS a_id, b.id AS b_id, b.aid AS b_aid " "FROM a JOIN b ON a.id = b.aid UNION SELECT a.id AS a_id, d.id " "AS d_id, d.aid AS d_aid " "FROM a JOIN d ON a.id = d.aid) AS anon_1 " "WHERE c.bid = anon_1.b_aid" ) class SpliceJoinsTest(fixtures.TestBase, AssertsCompiledSQL): __dialect__ = 'default' @classmethod def setup_class(cls): global table1, table2, table3, table4 def _table(name): return table(name, column('col1'), column('col2'), column('col3')) table1, table2, table3, table4 = [_table(name) for name in ('table1', 'table2', 'table3', 'table4')] def test_splice(self): t1, t2, t3, t4 = table1, table2, table1.alias(), table2.alias() j = t1.join(t2, t1.c.col1 == t2.c.col1).join(t3, t2.c.col1 == t3.c.col1).join(t4, t4.c.col1 == t1.c.col1) s = select([t1]).where(t1.c.col2 < 5).alias() self.assert_compile(sql_util.splice_joins(s, j), '(SELECT table1.col1 AS col1, table1.col2 ' 'AS col2, table1.col3 AS col3 FROM table1 ' 'WHERE table1.col2 < :col2_1) AS anon_1 ' 'JOIN table2 ON anon_1.col1 = table2.col1 ' 'JOIN table1 AS table1_1 ON table2.col1 = ' 'table1_1.col1 JOIN table2 AS table2_1 ON ' 'table2_1.col1 = anon_1.col1') def test_stop_on(self): t1, t2, t3 = table1, table2, table3 j1 = t1.join(t2, t1.c.col1 == t2.c.col1) j2 = j1.join(t3, t2.c.col1 == t3.c.col1) s = select([t1]).select_from(j1).alias() self.assert_compile(sql_util.splice_joins(s, j2), '(SELECT table1.col1 AS col1, table1.col2 ' 'AS col2, table1.col3 AS col3 FROM table1 ' 'JOIN table2 ON table1.col1 = table2.col1) ' 'AS anon_1 JOIN table2 ON anon_1.col1 = ' 'table2.col1 JOIN table3 ON table2.col1 = ' 'table3.col1') self.assert_compile(sql_util.splice_joins(s, j2, j1), '(SELECT table1.col1 AS col1, table1.col2 ' 'AS col2, table1.col3 AS col3 FROM table1 ' 'JOIN table2 ON table1.col1 = table2.col1) ' 'AS anon_1 JOIN table3 ON table2.col1 = ' 'table3.col1') def test_splice_2(self): t2a = table2.alias() t3a = table3.alias() j1 = table1.join(t2a, table1.c.col1 == t2a.c.col1).join(t3a, t2a.c.col2 == t3a.c.col2) t2b = table4.alias() j2 = table1.join(t2b, table1.c.col3 == t2b.c.col3) self.assert_compile(sql_util.splice_joins(table1, j1), 'table1 JOIN table2 AS table2_1 ON ' 'table1.col1 = table2_1.col1 JOIN table3 ' 'AS table3_1 ON table2_1.col2 = ' 'table3_1.col2') self.assert_compile(sql_util.splice_joins(table1, j2), 'table1 JOIN table4 AS table4_1 ON ' 'table1.col3 = table4_1.col3') self.assert_compile(sql_util.splice_joins(sql_util.splice_joins(table1, j1), j2), 'table1 JOIN table2 AS table2_1 ON ' 'table1.col1 = table2_1.col1 JOIN table3 ' 'AS table3_1 ON table2_1.col2 = ' 'table3_1.col2 JOIN table4 AS table4_1 ON ' 'table1.col3 = table4_1.col3') class SelectTest(fixtures.TestBase, AssertsCompiledSQL): """tests the generative capability of Select""" __dialect__ = 'default' @classmethod def setup_class(cls): global t1, t2 t1 = table("table1", column("col1"), column("col2"), column("col3"), ) t2 = table("table2", column("col1"), column("col2"), column("col3"), ) def test_columns(self): s = t1.select() self.assert_compile(s, 'SELECT table1.col1, table1.col2, ' 'table1.col3 FROM table1') select_copy = s.column('yyy') self.assert_compile(select_copy, 'SELECT table1.col1, table1.col2, ' 'table1.col3, yyy FROM table1') assert s.columns is not select_copy.columns assert s._columns is not select_copy._columns assert s._raw_columns is not select_copy._raw_columns self.assert_compile(s, 'SELECT table1.col1, table1.col2, ' 'table1.col3 FROM table1') def test_froms(self): s = t1.select() self.assert_compile(s, 'SELECT table1.col1, table1.col2, ' 'table1.col3 FROM table1') select_copy = s.select_from(t2) self.assert_compile(select_copy, 'SELECT table1.col1, table1.col2, ' 'table1.col3 FROM table1, table2') assert s._froms is not select_copy._froms self.assert_compile(s, 'SELECT table1.col1, table1.col2, ' 'table1.col3 FROM table1') def test_prefixes(self): s = t1.select() self.assert_compile(s, 'SELECT table1.col1, table1.col2, ' 'table1.col3 FROM table1') select_copy = s.prefix_with('FOOBER') self.assert_compile(select_copy, 'SELECT FOOBER table1.col1, table1.col2, ' 'table1.col3 FROM table1') self.assert_compile(s, 'SELECT table1.col1, table1.col2, ' 'table1.col3 FROM table1') def test_execution_options(self): s = select().execution_options(foo='bar') s2 = s.execution_options(bar='baz') s3 = s.execution_options(foo='not bar') # The original select should not be modified. assert s._execution_options == dict(foo='bar') # s2 should have its execution_options based on s, though. assert s2._execution_options == dict(foo='bar', bar='baz') assert s3._execution_options == dict(foo='not bar') def test_invalid_options(self): assert_raises( exc.ArgumentError, select().execution_options, compiled_cache={} ) assert_raises( exc.ArgumentError, select().execution_options, isolation_level='READ_COMMITTED' ) # this feature not available yet def _NOTYET_test_execution_options_in_kwargs(self): s = select(execution_options=dict(foo='bar')) s2 = s.execution_options(bar='baz') # The original select should not be modified. assert s._execution_options == dict(foo='bar') # s2 should have its execution_options based on s, though. assert s2._execution_options == dict(foo='bar', bar='baz') # this feature not available yet def _NOTYET_test_execution_options_in_text(self): s = text('select 42', execution_options=dict(foo='bar')) assert s._execution_options == dict(foo='bar') class ValuesBaseTest(fixtures.TestBase, AssertsCompiledSQL): """Tests the generative capability of Insert, Update""" __dialect__ = 'default' # fixme: consolidate converage from elsewhere here and expand @classmethod def setup_class(cls): global t1, t2 t1 = table("table1", column("col1"), column("col2"), column("col3"), ) t2 = table("table2", column("col1"), column("col2"), column("col3"), ) def test_prefixes(self): i = t1.insert() self.assert_compile(i, "INSERT INTO table1 (col1, col2, col3) " "VALUES (:col1, :col2, :col3)") gen = i.prefix_with("foober") self.assert_compile(gen, "INSERT foober INTO table1 (col1, col2, col3) " "VALUES (:col1, :col2, :col3)") self.assert_compile(i, "INSERT INTO table1 (col1, col2, col3) " "VALUES (:col1, :col2, :col3)") i2 = t1.insert(prefixes=['squiznart']) self.assert_compile(i2, "INSERT squiznart INTO table1 (col1, col2, col3) " "VALUES (:col1, :col2, :col3)") gen2 = i2.prefix_with("quux") self.assert_compile(gen2, "INSERT squiznart quux INTO " "table1 (col1, col2, col3) " "VALUES (:col1, :col2, :col3)") def test_add_kwarg(self): i = t1.insert() eq_(i.parameters, None) i = i.values(col1=5) eq_(i.parameters, {"col1": 5}) i = i.values(col2=7) eq_(i.parameters, {"col1": 5, "col2": 7}) def test_via_tuple_single(self): i = t1.insert() eq_(i.parameters, None) i = i.values((5, 6, 7)) eq_(i.parameters, {"col1": 5, "col2": 6, "col3": 7}) def test_kw_and_dict_simulatenously_single(self): i = t1.insert() i = i.values({"col1": 5}, col2=7) eq_(i.parameters, {"col1": 5, "col2": 7}) def test_via_tuple_multi(self): i = t1.insert() eq_(i.parameters, None) i = i.values([(5, 6, 7), (8, 9, 10)]) eq_(i.parameters, [ {"col1": 5, "col2": 6, "col3": 7}, {"col1": 8, "col2": 9, "col3": 10}, ] ) def test_inline_values_single(self): i = t1.insert(values={"col1": 5}) eq_(i.parameters, {"col1": 5}) is_(i._has_multi_parameters, False) def test_inline_values_multi(self): i = t1.insert(values=[{"col1": 5}, {"col1": 6}]) eq_(i.parameters, [{"col1": 5}, {"col1": 6}]) is_(i._has_multi_parameters, True) def test_add_dictionary(self): i = t1.insert() eq_(i.parameters, None) i = i.values({"col1": 5}) eq_(i.parameters, {"col1": 5}) is_(i._has_multi_parameters, False) i = i.values({"col1": 6}) # note replaces eq_(i.parameters, {"col1": 6}) is_(i._has_multi_parameters, False) i = i.values({"col2": 7}) eq_(i.parameters, {"col1": 6, "col2": 7}) is_(i._has_multi_parameters, False) def test_add_kwarg_disallowed_multi(self): i = t1.insert() i = i.values([{"col1": 5}, {"col1": 7}]) assert_raises_message( exc.InvalidRequestError, "This construct already has multiple parameter sets.", i.values, col2=7 ) def test_cant_mix_single_multi_formats_dict_to_list(self): i = t1.insert().values(col1=5) assert_raises_message( exc.ArgumentError, "Can't mix single-values and multiple values " "formats in one statement", i.values, [{"col1": 6}] ) def test_cant_mix_single_multi_formats_list_to_dict(self): i = t1.insert().values([{"col1": 6}]) assert_raises_message( exc.ArgumentError, "Can't mix single-values and multiple values " "formats in one statement", i.values, {"col1": 5} ) def test_erroneous_multi_args_dicts(self): i = t1.insert() assert_raises_message( exc.ArgumentError, "Only a single dictionary/tuple or list of " "dictionaries/tuples is accepted positionally.", i.values, {"col1": 5}, {"col1": 7} ) def test_erroneous_multi_args_tuples(self): i = t1.insert() assert_raises_message( exc.ArgumentError, "Only a single dictionary/tuple or list of " "dictionaries/tuples is accepted positionally.", i.values, (5, 6, 7), (8, 9, 10) ) def test_erroneous_multi_args_plus_kw(self): i = t1.insert() assert_raises_message( exc.ArgumentError, "Can't pass kwargs and multiple parameter sets simultaenously", i.values, [{"col1": 5}], col2=7 ) def test_update_no_support_multi_values(self): u = t1.update() assert_raises_message( exc.InvalidRequestError, "This construct does not support multiple parameter sets.", u.values, [{"col1": 5}, {"col1": 7}] ) def test_update_no_support_multi_constructor(self): assert_raises_message( exc.InvalidRequestError, "This construct does not support multiple parameter sets.", t1.update, values=[{"col1": 5}, {"col1": 7}] ) SQLAlchemy-0.8.4/test/sql/test_insert.py0000644000076500000240000003164012251147172020732 0ustar classicstaff00000000000000#! coding:utf-8 from sqlalchemy import Column, Integer, MetaData, String, Table,\ bindparam, exc, func, insert, select from sqlalchemy.dialects import mysql, postgresql from sqlalchemy.engine import default from sqlalchemy.testing import AssertsCompiledSQL,\ assert_raises_message, fixtures class _InsertTestBase(object): @classmethod def define_tables(cls, metadata): Table('mytable', metadata, Column('myid', Integer), Column('name', String(30)), Column('description', String(30))) Table('myothertable', metadata, Column('otherid', Integer), Column('othername', String(30))) class InsertTest(_InsertTestBase, fixtures.TablesTest, AssertsCompiledSQL): __dialect__ = 'default' def test_generic_insert_bind_params_all_columns(self): table1 = self.tables.mytable self.assert_compile(insert(table1), 'INSERT INTO mytable (myid, name, description) ' 'VALUES (:myid, :name, :description)') def test_insert_with_values_dict(self): table1 = self.tables.mytable checkparams = { 'myid': 3, 'name': 'jack' } self.assert_compile(insert(table1, dict(myid=3, name='jack')), 'INSERT INTO mytable (myid, name) VALUES (:myid, :name)', checkparams=checkparams) def test_insert_with_values_tuple(self): table1 = self.tables.mytable checkparams = { 'myid': 3, 'name': 'jack', 'description': 'mydescription' } self.assert_compile(insert(table1, (3, 'jack', 'mydescription')), 'INSERT INTO mytable (myid, name, description) ' 'VALUES (:myid, :name, :description)', checkparams=checkparams) def test_insert_with_values_func(self): table1 = self.tables.mytable self.assert_compile(insert(table1, values=dict(myid=func.lala())), 'INSERT INTO mytable (myid) VALUES (lala())') def test_insert_with_user_supplied_bind_params(self): table1 = self.tables.mytable values = { table1.c.myid: bindparam('userid'), table1.c.name: bindparam('username') } self.assert_compile(insert(table1, values), 'INSERT INTO mytable (myid, name) VALUES (:userid, :username)') def test_insert_values(self): table1 = self.tables.mytable values1 = {table1.c.myid: bindparam('userid')} values2 = {table1.c.name: bindparam('username')} self.assert_compile(insert(table1, values=values1).values(values2), 'INSERT INTO mytable (myid, name) VALUES (:userid, :username)') def test_prefix_with(self): table1 = self.tables.mytable stmt = table1.insert().\ prefix_with('A', 'B', dialect='mysql').\ prefix_with('C', 'D') self.assert_compile(stmt, 'INSERT C D INTO mytable (myid, name, description) ' 'VALUES (:myid, :name, :description)') self.assert_compile(stmt, 'INSERT A B C D INTO mytable (myid, name, description) ' 'VALUES (%s, %s, %s)', dialect=mysql.dialect()) def test_inline_default(self): metadata = MetaData() table = Table('sometable', metadata, Column('id', Integer, primary_key=True), Column('foo', Integer, default=func.foobar())) self.assert_compile(table.insert(values={}, inline=True), 'INSERT INTO sometable (foo) VALUES (foobar())') self.assert_compile(table.insert(inline=True), 'INSERT INTO sometable (foo) VALUES (foobar())', params={}) def test_insert_returning_not_in_default(self): table1 = self.tables.mytable stmt = table1.insert().returning(table1.c.myid) assert_raises_message( exc.CompileError, "RETURNING is not supported by this dialect's statement compiler.", stmt.compile, dialect=default.DefaultDialect() ) def test_insert_from_select_select(self): table1 = self.tables.mytable sel = select([table1.c.myid, table1.c.name]).where(table1.c.name == 'foo') ins = self.tables.myothertable.insert().\ from_select(("otherid", "othername"), sel) self.assert_compile( ins, "INSERT INTO myothertable (otherid, othername) " "SELECT mytable.myid, mytable.name FROM mytable " "WHERE mytable.name = :name_1", checkparams={"name_1": "foo"} ) def test_insert_mix_select_values_exception(self): table1 = self.tables.mytable sel = select([table1.c.myid, table1.c.name]).where(table1.c.name == 'foo') ins = self.tables.myothertable.insert().\ from_select(("otherid", "othername"), sel) assert_raises_message( exc.InvalidRequestError, "This construct already inserts from a SELECT", ins.values, othername="5" ) def test_insert_mix_values_select_exception(self): table1 = self.tables.mytable sel = select([table1.c.myid, table1.c.name]).where(table1.c.name == 'foo') ins = self.tables.myothertable.insert().values(othername="5") assert_raises_message( exc.InvalidRequestError, "This construct already inserts value expressions", ins.from_select, ("otherid", "othername"), sel ) def test_insert_from_select_table(self): table1 = self.tables.mytable ins = self.tables.myothertable.insert().\ from_select(("otherid", "othername"), table1) # note we aren't checking the number of columns right now self.assert_compile( ins, "INSERT INTO myothertable (otherid, othername) " "SELECT mytable.myid, mytable.name, mytable.description " "FROM mytable", checkparams={} ) def test_insert_from_select_col_values(self): table1 = self.tables.mytable table2 = self.tables.myothertable sel = select([table1.c.myid, table1.c.name]).where(table1.c.name == 'foo') ins = table2.insert().\ from_select((table2.c.otherid, table2.c.othername), sel) self.assert_compile( ins, "INSERT INTO myothertable (otherid, othername) " "SELECT mytable.myid, mytable.name FROM mytable " "WHERE mytable.name = :name_1", checkparams={"name_1": "foo"} ) class EmptyTest(_InsertTestBase, fixtures.TablesTest, AssertsCompiledSQL): __dialect__ = 'default' def test_empty_insert_default(self): table1 = self.tables.mytable stmt = table1.insert().values({}) # hide from 2to3 self.assert_compile(stmt, 'INSERT INTO mytable () VALUES ()') def test_supports_empty_insert_true(self): table1 = self.tables.mytable dialect = default.DefaultDialect() dialect.supports_empty_insert = dialect.supports_default_values = True stmt = table1.insert().values({}) # hide from 2to3 self.assert_compile(stmt, 'INSERT INTO mytable DEFAULT VALUES', dialect=dialect) def test_supports_empty_insert_false(self): table1 = self.tables.mytable dialect = default.DefaultDialect() dialect.supports_empty_insert = dialect.supports_default_values = False stmt = table1.insert().values({}) # hide from 2to3 assert_raises_message(exc.CompileError, "The 'default' dialect with current database version " "settings does not support empty inserts.", stmt.compile, dialect=dialect) class MultirowTest(_InsertTestBase, fixtures.TablesTest, AssertsCompiledSQL): __dialect__ = 'default' def test_not_supported(self): table1 = self.tables.mytable dialect = default.DefaultDialect() stmt = table1.insert().values([{'myid': 1}, {'myid': 2}]) assert_raises_message( exc.CompileError, "The 'default' dialect with current database version settings " "does not support in-place multirow inserts.", stmt.compile, dialect=dialect) def test_named(self): table1 = self.tables.mytable values = [ {'myid': 1, 'name': 'a', 'description': 'b'}, {'myid': 2, 'name': 'c', 'description': 'd'}, {'myid': 3, 'name': 'e', 'description': 'f'} ] checkparams = { 'myid_0': 1, 'myid_1': 2, 'myid_2': 3, 'name_0': 'a', 'name_1': 'c', 'name_2': 'e', 'description_0': 'b', 'description_1': 'd', 'description_2': 'f', } dialect = default.DefaultDialect() dialect.supports_multivalues_insert = True self.assert_compile(table1.insert().values(values), 'INSERT INTO mytable (myid, name, description) VALUES ' '(:myid_0, :name_0, :description_0), ' '(:myid_1, :name_1, :description_1), ' '(:myid_2, :name_2, :description_2)', checkparams=checkparams, dialect=dialect) def test_positional(self): table1 = self.tables.mytable values = [ {'myid': 1, 'name': 'a', 'description': 'b'}, {'myid': 2, 'name': 'c', 'description': 'd'}, {'myid': 3, 'name': 'e', 'description': 'f'} ] checkpositional = (1, 'a', 'b', 2, 'c', 'd', 3, 'e', 'f') dialect = default.DefaultDialect() dialect.supports_multivalues_insert = True dialect.paramstyle = 'format' dialect.positional = True self.assert_compile(table1.insert().values(values), 'INSERT INTO mytable (myid, name, description) VALUES ' '(%s, %s, %s), (%s, %s, %s), (%s, %s, %s)', checkpositional=checkpositional, dialect=dialect) def test_inline_default(self): metadata = MetaData() table = Table('sometable', metadata, Column('id', Integer, primary_key=True), Column('data', String), Column('foo', Integer, default=func.foobar())) values = [ {'id': 1, 'data': 'data1'}, {'id': 2, 'data': 'data2', 'foo': 'plainfoo'}, {'id': 3, 'data': 'data3'}, ] checkparams = { 'id_0': 1, 'id_1': 2, 'id_2': 3, 'data_0': 'data1', 'data_1': 'data2', 'data_2': 'data3', 'foo_1': 'plainfoo', } self.assert_compile(table.insert().values(values), 'INSERT INTO sometable (id, data, foo) VALUES ' '(%(id_0)s, %(data_0)s, foobar()), ' '(%(id_1)s, %(data_1)s, %(foo_1)s), ' '(%(id_2)s, %(data_2)s, foobar())', checkparams=checkparams, dialect=postgresql.dialect()) def test_server_default(self): metadata = MetaData() table = Table('sometable', metadata, Column('id', Integer, primary_key=True), Column('data', String), Column('foo', Integer, server_default=func.foobar())) values = [ {'id': 1, 'data': 'data1'}, {'id': 2, 'data': 'data2', 'foo': 'plainfoo'}, {'id': 3, 'data': 'data3'}, ] checkparams = { 'id_0': 1, 'id_1': 2, 'id_2': 3, 'data_0': 'data1', 'data_1': 'data2', 'data_2': 'data3', } self.assert_compile(table.insert().values(values), 'INSERT INTO sometable (id, data) VALUES ' '(%(id_0)s, %(data_0)s), ' '(%(id_1)s, %(data_1)s), ' '(%(id_2)s, %(data_2)s)', checkparams=checkparams, dialect=postgresql.dialect()) def test_server_default_absent_value(self): metadata = MetaData() table = Table('sometable', metadata, Column('id', Integer, primary_key=True), Column('data', String), Column('foo', Integer, server_default=func.foobar())) values = [ {'id': 1, 'data': 'data1', 'foo': 'plainfoo'}, {'id': 2, 'data': 'data2'}, {'id': 3, 'data': 'data3', 'foo': 'otherfoo'}, ] checkparams = { 'id_0': 1, 'id_1': 2, 'id_2': 3, 'data_0': 'data1', 'data_1': 'data2', 'data_2': 'data3', 'foo_0': 'plainfoo', 'foo_2': 'otherfoo', } # note the effect here is that the first set of params # takes effect for the rest of them, when one is absent self.assert_compile(table.insert().values(values), 'INSERT INTO sometable (id, data, foo) VALUES ' '(%(id_0)s, %(data_0)s, %(foo_0)s), ' '(%(id_1)s, %(data_1)s, %(foo_0)s), ' '(%(id_2)s, %(data_2)s, %(foo_2)s)', checkparams=checkparams, dialect=postgresql.dialect()) SQLAlchemy-0.8.4/test/sql/test_inspect.py0000644000076500000240000000151112251147172021065 0ustar classicstaff00000000000000"""test the inspection registry system.""" from sqlalchemy import inspect from sqlalchemy import Table, Column, Integer, MetaData from sqlalchemy.testing import fixtures from sqlalchemy.testing import is_ class TestCoreInspection(fixtures.TestBase): def test_table(self): t = Table('t', MetaData(), Column('x', Integer) ) is_(inspect(t), t) assert t.is_selectable is_(t.selectable, t) def test_select(self): t = Table('t', MetaData(), Column('x', Integer) ) s = t.select() is_(inspect(s), s) assert s.is_selectable is_(s.selectable, s) def test_column_expr(self): c = Column('x', Integer) is_(inspect(c), c) assert not c.is_selectable assert not hasattr(c, 'selectable')SQLAlchemy-0.8.4/test/sql/test_labels.py0000644000076500000240000004515212251147172020673 0ustar classicstaff00000000000000from sqlalchemy import exc as exceptions, select, MetaData, Integer, or_ from sqlalchemy.engine import default from sqlalchemy.sql import table, column from sqlalchemy.testing import AssertsCompiledSQL, assert_raises, engines,\ fixtures from sqlalchemy.testing.schema import Table, Column IDENT_LENGTH = 29 class MaxIdentTest(fixtures.TestBase, AssertsCompiledSQL): __dialect__ = 'DefaultDialect' table1 = table('some_large_named_table', column('this_is_the_primarykey_column'), column('this_is_the_data_column') ) table2 = table('table_with_exactly_29_characs', column('this_is_the_primarykey_column'), column('this_is_the_data_column') ) def _length_fixture(self, length=IDENT_LENGTH, positional=False): dialect = default.DefaultDialect() dialect.max_identifier_length = length if positional: dialect.paramstyle = 'format' dialect.positional = True return dialect def _engine_fixture(self, length=IDENT_LENGTH): eng = engines.testing_engine() eng.dialect.max_identifier_length = length return eng def test_table_alias_1(self): self.assert_compile( self.table2.alias().select(), 'SELECT ' 'table_with_exactly_29_c_1.' 'this_is_the_primarykey_column, ' 'table_with_exactly_29_c_1.this_is_the_data_column ' 'FROM ' 'table_with_exactly_29_characs ' 'AS table_with_exactly_29_c_1', dialect=self._length_fixture() ) def test_table_alias_2(self): table1 = self.table1 table2 = self.table2 ta = table2.alias() on = table1.c.this_is_the_data_column == ta.c.this_is_the_data_column self.assert_compile( select([table1, ta]).select_from(table1.join(ta, on)). where(ta.c.this_is_the_data_column == 'data3'), 'SELECT ' 'some_large_named_table.this_is_the_primarykey_column, ' 'some_large_named_table.this_is_the_data_column, ' 'table_with_exactly_29_c_1.this_is_the_primarykey_column, ' 'table_with_exactly_29_c_1.this_is_the_data_column ' 'FROM ' 'some_large_named_table ' 'JOIN ' 'table_with_exactly_29_characs ' 'AS ' 'table_with_exactly_29_c_1 ' 'ON ' 'some_large_named_table.this_is_the_data_column = ' 'table_with_exactly_29_c_1.this_is_the_data_column ' 'WHERE ' 'table_with_exactly_29_c_1.this_is_the_data_column = ' ':this_is_the_data_column_1', dialect=self._length_fixture() ) def test_too_long_name_disallowed(self): m = MetaData() t = Table('this_name_is_too_long_for_what_were_doing_in_this_test', m, Column('foo', Integer)) eng = self._engine_fixture() methods = (t.create, t.drop, m.create_all, m.drop_all) for meth in methods: assert_raises(exceptions.IdentifierError, meth, eng) def _assert_labeled_table1_select(self, s): table1 = self.table1 compiled = s.compile(dialect=self._length_fixture()) assert set(compiled.result_map['some_large_named_table__2'][1]).\ issuperset( [ 'some_large_named_table_this_is_the_data_column', 'some_large_named_table__2', table1.c.this_is_the_data_column ] ) assert set(compiled.result_map['some_large_named_table__1'][1]).\ issuperset( [ 'some_large_named_table_this_is_the_primarykey_column', 'some_large_named_table__1', table1.c.this_is_the_primarykey_column ] ) def test_result_map_use_labels(self): table1 = self.table1 s = table1.select().apply_labels().\ order_by(table1.c.this_is_the_primarykey_column) self._assert_labeled_table1_select(s) def test_result_map_limit(self): table1 = self.table1 # some dialects such as oracle (and possibly ms-sql in a future # version) generate a subquery for limits/offsets. ensure that the # generated result map corresponds to the selected table, not the # select query s = table1.select(use_labels=True, order_by=[table1.c.this_is_the_primarykey_column]).\ limit(2) self._assert_labeled_table1_select(s) def test_result_map_subquery(self): table1 = self.table1 s = table1.select( table1.c.this_is_the_primarykey_column == 4).\ alias('foo') s2 = select([s]) compiled = s2.compile(dialect=self._length_fixture()) assert \ set(compiled.result_map['this_is_the_data_column'][1]).\ issuperset(['this_is_the_data_column', s.c.this_is_the_data_column]) assert \ set(compiled.result_map['this_is_the_primarykey_column'][1]).\ issuperset(['this_is_the_primarykey_column', s.c.this_is_the_primarykey_column]) def test_result_map_anon_alias(self): table1 = self.table1 dialect = self._length_fixture() q = table1.select(table1.c.this_is_the_primarykey_column == 4).alias() s = select([q]).apply_labels() self.assert_compile(s, 'SELECT ' 'anon_1.this_is_the_primarykey_column ' 'AS anon_1_this_is_the_prim_1, ' 'anon_1.this_is_the_data_column ' 'AS anon_1_this_is_the_data_2 ' 'FROM (' 'SELECT ' 'some_large_named_table.' 'this_is_the_primarykey_column ' 'AS this_is_the_primarykey_column, ' 'some_large_named_table.this_is_the_data_column ' 'AS this_is_the_data_column ' 'FROM ' 'some_large_named_table ' 'WHERE ' 'some_large_named_table.this_is_the_primarykey_column ' '= :this_is_the_primarykey__1' ') ' 'AS anon_1', dialect=dialect) compiled = s.compile(dialect=dialect) assert set(compiled.result_map['anon_1_this_is_the_data_2'][1]).\ issuperset([ 'anon_1_this_is_the_data_2', q.corresponding_column( table1.c.this_is_the_data_column) ]) assert set(compiled.result_map['anon_1_this_is_the_prim_1'][1]).\ issuperset([ 'anon_1_this_is_the_prim_1', q.corresponding_column( table1.c.this_is_the_primarykey_column) ]) def test_column_bind_labels_1(self): table1 = self.table1 s = table1.select(table1.c.this_is_the_primarykey_column == 4) self.assert_compile( s, "SELECT some_large_named_table.this_is_the_primarykey_column, " "some_large_named_table.this_is_the_data_column " "FROM some_large_named_table WHERE " "some_large_named_table.this_is_the_primarykey_column = " ":this_is_the_primarykey__1", checkparams={'this_is_the_primarykey__1': 4}, dialect=self._length_fixture() ) self.assert_compile( s, "SELECT some_large_named_table.this_is_the_primarykey_column, " "some_large_named_table.this_is_the_data_column " "FROM some_large_named_table WHERE " "some_large_named_table.this_is_the_primarykey_column = " "%s", checkpositional=(4, ), checkparams={'this_is_the_primarykey__1': 4}, dialect=self._length_fixture(positional=True) ) def test_column_bind_labels_2(self): table1 = self.table1 s = table1.select(or_( table1.c.this_is_the_primarykey_column == 4, table1.c.this_is_the_primarykey_column == 2 )) self.assert_compile( s, "SELECT some_large_named_table.this_is_the_primarykey_column, " "some_large_named_table.this_is_the_data_column " "FROM some_large_named_table WHERE " "some_large_named_table.this_is_the_primarykey_column = " ":this_is_the_primarykey__1 OR " "some_large_named_table.this_is_the_primarykey_column = " ":this_is_the_primarykey__2", checkparams={ 'this_is_the_primarykey__1': 4, 'this_is_the_primarykey__2': 2 }, dialect=self._length_fixture() ) self.assert_compile( s, "SELECT some_large_named_table.this_is_the_primarykey_column, " "some_large_named_table.this_is_the_data_column " "FROM some_large_named_table WHERE " "some_large_named_table.this_is_the_primarykey_column = " "%s OR " "some_large_named_table.this_is_the_primarykey_column = " "%s", checkparams={ 'this_is_the_primarykey__1': 4, 'this_is_the_primarykey__2': 2 }, checkpositional=(4, 2), dialect=self._length_fixture(positional=True) ) class LabelLengthTest(fixtures.TestBase, AssertsCompiledSQL): __dialect__ = 'DefaultDialect' table1 = table('some_large_named_table', column('this_is_the_primarykey_column'), column('this_is_the_data_column') ) table2 = table('table_with_exactly_29_characs', column('this_is_the_primarykey_column'), column('this_is_the_data_column') ) def test_adjustable_1(self): table1 = self.table1 q = table1.select( table1.c.this_is_the_primarykey_column == 4).alias('foo') x = select([q]) compile_dialect = default.DefaultDialect(label_length=10) self.assert_compile(x, 'SELECT ' 'foo.this_1, foo.this_2 ' 'FROM (' 'SELECT ' 'some_large_named_table.this_is_the_primarykey_column ' 'AS this_1, ' 'some_large_named_table.this_is_the_data_column ' 'AS this_2 ' 'FROM ' 'some_large_named_table ' 'WHERE ' 'some_large_named_table.this_is_the_primarykey_column ' '= :this_1' ') ' 'AS foo', dialect=compile_dialect) def test_adjustable_2(self): table1 = self.table1 q = table1.select( table1.c.this_is_the_primarykey_column == 4).alias('foo') x = select([q]) compile_dialect = default.DefaultDialect(label_length=10) self.assert_compile(x, 'SELECT ' 'foo.this_1, foo.this_2 ' 'FROM (' 'SELECT ' 'some_large_named_table.this_is_the_primarykey_column ' 'AS this_1, ' 'some_large_named_table.this_is_the_data_column ' 'AS this_2 ' 'FROM ' 'some_large_named_table ' 'WHERE ' 'some_large_named_table.this_is_the_primarykey_column ' '= :this_1' ') ' 'AS foo', dialect=compile_dialect) def test_adjustable_3(self): table1 = self.table1 compile_dialect = default.DefaultDialect(label_length=4) q = table1.select( table1.c.this_is_the_primarykey_column == 4).alias('foo') x = select([q]) self.assert_compile(x, 'SELECT ' 'foo._1, foo._2 ' 'FROM (' 'SELECT ' 'some_large_named_table.this_is_the_primarykey_column ' 'AS _1, ' 'some_large_named_table.this_is_the_data_column ' 'AS _2 ' 'FROM ' 'some_large_named_table ' 'WHERE ' 'some_large_named_table.this_is_the_primarykey_column ' '= :_1' ') ' 'AS foo', dialect=compile_dialect) def test_adjustable_4(self): table1 = self.table1 q = table1.select(table1.c.this_is_the_primarykey_column == 4).alias() x = select([q], use_labels=True) compile_dialect = default.DefaultDialect(label_length=10) self.assert_compile(x, 'SELECT ' 'anon_1.this_2 AS anon_1, ' 'anon_1.this_4 AS anon_3 ' 'FROM (' 'SELECT ' 'some_large_named_table.this_is_the_primarykey_column ' 'AS this_2, ' 'some_large_named_table.this_is_the_data_column ' 'AS this_4 ' 'FROM ' 'some_large_named_table ' 'WHERE ' 'some_large_named_table.this_is_the_primarykey_column ' '= :this_1' ') ' 'AS anon_1', dialect=compile_dialect) def test_adjustable_5(self): table1 = self.table1 q = table1.select(table1.c.this_is_the_primarykey_column == 4).alias() x = select([q], use_labels=True) compile_dialect = default.DefaultDialect(label_length=4) self.assert_compile(x, 'SELECT ' '_1._2 AS _1, ' '_1._4 AS _3 ' 'FROM (' 'SELECT ' 'some_large_named_table.this_is_the_primarykey_column ' 'AS _2, ' 'some_large_named_table.this_is_the_data_column ' 'AS _4 ' 'FROM ' 'some_large_named_table ' 'WHERE ' 'some_large_named_table.this_is_the_primarykey_column ' '= :_1' ') ' 'AS _1', dialect=compile_dialect) def test_adjustable_result_schema_column_1(self): table1 = self.table1 q = table1.select( table1.c.this_is_the_primarykey_column == 4).apply_labels().\ alias('foo') dialect = default.DefaultDialect(label_length=10) compiled = q.compile(dialect=dialect) assert set(compiled.result_map['some_2'][1]).issuperset([ table1.c.this_is_the_data_column, 'some_large_named_table_this_is_the_data_column', 'some_2' ]) assert set(compiled.result_map['some_1'][1]).issuperset([ table1.c.this_is_the_primarykey_column, 'some_large_named_table_this_is_the_primarykey_column', 'some_1' ]) def test_adjustable_result_schema_column_2(self): table1 = self.table1 q = table1.select( table1.c.this_is_the_primarykey_column == 4).alias('foo') x = select([q]) dialect = default.DefaultDialect(label_length=10) compiled = x.compile(dialect=dialect) assert set(compiled.result_map['this_2'][1]).issuperset([ q.corresponding_column(table1.c.this_is_the_data_column), 'this_is_the_data_column', 'this_2']) assert set(compiled.result_map['this_1'][1]).issuperset([ q.corresponding_column(table1.c.this_is_the_primarykey_column), 'this_is_the_primarykey_column', 'this_1']) def test_table_plus_column_exceeds_length(self): """test that the truncation only occurs when tablename + colname are concatenated, if they are individually under the label length. """ compile_dialect = default.DefaultDialect(label_length=30) a_table = table( 'thirty_characters_table_xxxxxx', column('id') ) other_table = table( 'other_thirty_characters_table_', column('id'), column('thirty_characters_table_id') ) anon = a_table.alias() j1 = other_table.outerjoin(anon, anon.c.id == other_table.c.thirty_characters_table_id) self.assert_compile( select([other_table, anon]). select_from(j1).apply_labels(), 'SELECT ' 'other_thirty_characters_table_.id ' 'AS other_thirty_characters__1, ' 'other_thirty_characters_table_.thirty_characters_table_id ' 'AS other_thirty_characters__2, ' 'thirty_characters_table__1.id ' 'AS thirty_characters_table__3 ' 'FROM ' 'other_thirty_characters_table_ ' 'LEFT OUTER JOIN ' 'thirty_characters_table_xxxxxx AS thirty_characters_table__1 ' 'ON thirty_characters_table__1.id = ' 'other_thirty_characters_table_.thirty_characters_table_id', dialect=compile_dialect) def test_colnames_longer_than_labels_lowercase(self): t1 = table('a', column('abcde')) self._test_colnames_longer_than_labels(t1) def test_colnames_longer_than_labels_uppercase(self): m = MetaData() t1 = Table('a', m, Column('abcde', Integer)) self._test_colnames_longer_than_labels(t1) def _test_colnames_longer_than_labels(self, t1): dialect = default.DefaultDialect(label_length=4) a1 = t1.alias(name='asdf') # 'abcde' is longer than 4, but rendered as itself # needs to have all characters s = select([a1]) self.assert_compile(select([a1]), 'SELECT asdf.abcde FROM a AS asdf', dialect=dialect) compiled = s.compile(dialect=dialect) assert set(compiled.result_map['abcde'][1]).issuperset([ 'abcde', a1.c.abcde, 'abcde']) # column still there, but short label s = select([a1]).apply_labels() self.assert_compile(s, 'SELECT asdf.abcde AS _1 FROM a AS asdf', dialect=dialect) compiled = s.compile(dialect=dialect) assert set(compiled.result_map['_1'][1]).issuperset([ 'asdf_abcde', a1.c.abcde, '_1']) SQLAlchemy-0.8.4/test/sql/test_metadata.py0000644000076500000240000016265512251150016021210 0ustar classicstaff00000000000000from sqlalchemy.testing import assert_raises from sqlalchemy.testing import assert_raises_message from sqlalchemy.testing import emits_warning import pickle from sqlalchemy import Integer, String, UniqueConstraint, \ CheckConstraint, ForeignKey, MetaData, Sequence, \ ForeignKeyConstraint, ColumnDefault, Index, event,\ events, Unicode, types as sqltypes from sqlalchemy.testing.schema import Table, Column from sqlalchemy import schema, exc import sqlalchemy as tsa from sqlalchemy.testing import fixtures from sqlalchemy import testing from sqlalchemy.testing import ComparesTables, AssertsCompiledSQL from sqlalchemy.testing import eq_, is_ class MetaDataTest(fixtures.TestBase, ComparesTables): def test_metadata_connect(self): metadata = MetaData() t1 = Table('table1', metadata, Column('col1', Integer, primary_key=True), Column('col2', String(20))) metadata.bind = testing.db metadata.create_all() try: assert t1.count().scalar() == 0 finally: metadata.drop_all() def test_metadata_contains(self): metadata = MetaData() t1 = Table('t1', metadata, Column('x', Integer)) t2 = Table('t2', metadata, Column('x', Integer), schema='foo') t3 = Table('t2', MetaData(), Column('x', Integer)) t4 = Table('t1', MetaData(), Column('x', Integer), schema='foo') assert "t1" in metadata assert "foo.t2" in metadata assert "t2" not in metadata assert "foo.t1" not in metadata assert t1 in metadata assert t2 in metadata assert t3 not in metadata assert t4 not in metadata def test_uninitialized_column_copy(self): for col in [ Column('foo', String(), nullable=False), Column('baz', String(), unique=True), Column(Integer(), primary_key=True), Column('bar', Integer(), Sequence('foo_seq'), primary_key=True, key='bar'), Column(Integer(), ForeignKey('bat.blah'), doc="this is a col"), Column('bar', Integer(), ForeignKey('bat.blah'), primary_key=True, key='bar'), Column('bar', Integer(), info={'foo': 'bar'}), ]: c2 = col.copy() for attr in ('name', 'type', 'nullable', 'primary_key', 'key', 'unique', 'info', 'doc'): eq_(getattr(col, attr), getattr(c2, attr)) eq_(len(col.foreign_keys), len(c2.foreign_keys)) if col.default: eq_(c2.default.name, 'foo_seq') for a1, a2 in zip(col.foreign_keys, c2.foreign_keys): assert a1 is not a2 eq_(a2._colspec, 'bat.blah') def test_col_subclass_copy(self): class MyColumn(schema.Column): def __init__(self, *args, **kw): self.widget = kw.pop('widget', None) super(MyColumn, self).__init__(*args, **kw) def copy(self, *arg, **kw): c = super(MyColumn, self).copy(*arg, **kw) c.widget = self.widget return c c1 = MyColumn('foo', Integer, widget='x') c2 = c1.copy() assert isinstance(c2, MyColumn) eq_(c2.widget, 'x') def test_uninitialized_column_copy_events(self): msgs = [] def write(c, t): msgs.append("attach %s.%s" % (t.name, c.name)) c1 = Column('foo', String()) m = MetaData() for i in xrange(3): cx = c1.copy() # as of 0.7, these events no longer copy. its expected # that listeners will be re-established from the # natural construction of things. cx._on_table_attach(write) Table('foo%d' % i, m, cx) eq_(msgs, ['attach foo0.foo', 'attach foo1.foo', 'attach foo2.foo']) def test_schema_collection_add(self): metadata = MetaData() Table('t1', metadata, Column('x', Integer), schema='foo') Table('t2', metadata, Column('x', Integer), schema='bar') Table('t3', metadata, Column('x', Integer)) eq_(metadata._schemas, set(['foo', 'bar'])) eq_(len(metadata.tables), 3) def test_schema_collection_remove(self): metadata = MetaData() t1 = Table('t1', metadata, Column('x', Integer), schema='foo') Table('t2', metadata, Column('x', Integer), schema='bar') t3 = Table('t3', metadata, Column('x', Integer), schema='bar') metadata.remove(t3) eq_(metadata._schemas, set(['foo', 'bar'])) eq_(len(metadata.tables), 2) metadata.remove(t1) eq_(metadata._schemas, set(['bar'])) eq_(len(metadata.tables), 1) def test_schema_collection_remove_all(self): metadata = MetaData() Table('t1', metadata, Column('x', Integer), schema='foo') Table('t2', metadata, Column('x', Integer), schema='bar') metadata.clear() eq_(metadata._schemas, set()) eq_(len(metadata.tables), 0) def test_metadata_tables_immutable(self): metadata = MetaData() Table('t1', metadata, Column('x', Integer)) assert 't1' in metadata.tables assert_raises( TypeError, lambda: metadata.tables.pop('t1') ) @testing.provide_metadata def test_dupe_tables(self): metadata = self.metadata Table('table1', metadata, Column('col1', Integer, primary_key=True), Column('col2', String(20))) metadata.create_all() Table('table1', metadata, autoload=True) def go(): Table('table1', metadata, Column('col1', Integer, primary_key=True), Column('col2', String(20))) assert_raises_message( tsa.exc.InvalidRequestError, "Table 'table1' is already defined for this " "MetaData instance. Specify 'extend_existing=True' " "to redefine options and columns on an existing " "Table object.", go ) def test_fk_copy(self): c1 = Column('foo', Integer) c2 = Column('bar', Integer) m = MetaData() t1 = Table('t', m, c1, c2) kw = dict(onupdate="X", ondelete="Y", use_alter=True, name='f1', deferrable="Z", initially="Q", link_to_name=True) fk1 = ForeignKey(c1, **kw) fk2 = ForeignKeyConstraint((c1,), (c2,), **kw) t1.append_constraint(fk2) fk1c = fk1.copy() fk2c = fk2.copy() for k in kw: eq_(getattr(fk1c, k), kw[k]) eq_(getattr(fk2c, k), kw[k]) def test_check_constraint_copy(self): r = lambda x: x c = CheckConstraint("foo bar", name='name', initially=True, deferrable=True, _create_rule=r) c2 = c.copy() eq_(c2.name, 'name') eq_(str(c2.sqltext), "foo bar") eq_(c2.initially, True) eq_(c2.deferrable, True) assert c2._create_rule is r def test_col_replace_w_constraint(self): m = MetaData() a = Table('a', m, Column('id', Integer, primary_key=True)) aid = Column('a_id', ForeignKey('a.id')) b = Table('b', m, aid) b.append_column(aid) assert b.c.a_id.references(a.c.id) eq_(len(b.constraints), 2) def test_fk_erroneous_schema_arg(self): assert_raises_message( exc.SADeprecationWarning, "'schema' argument on ForeignKey has no effect.", ForeignKey, "foo.bar", schema='myschema' ) def test_fk_construct(self): c1 = Column('foo', Integer) c2 = Column('bar', Integer) m = MetaData() t1 = Table('t', m, c1, c2) fk1 = ForeignKeyConstraint(('foo', ), ('bar', ), table=t1) assert fk1 in t1.constraints def test_fk_no_such_parent_col_error(self): meta = MetaData() a = Table('a', meta, Column('a', Integer)) Table('b', meta, Column('b', Integer)) def go(): a.append_constraint( ForeignKeyConstraint(['x'], ['b.b']) ) assert_raises_message( exc.ArgumentError, "Can't create ForeignKeyConstraint on " "table 'a': no column named 'x' is present.", go ) def test_fk_no_such_target_col_error(self): meta = MetaData() a = Table('a', meta, Column('a', Integer)) Table('b', meta, Column('b', Integer)) a.append_constraint( ForeignKeyConstraint(['a'], ['b.x']) ) def go(): list(a.c.a.foreign_keys)[0].column assert_raises_message( exc.NoReferencedColumnError, "Could not create ForeignKey 'b.x' on " "table 'a': table 'b' has no column named 'x'", go ) @testing.exclude('mysql', '<', (4, 1, 1), 'early types are squirrely') def test_to_metadata(self): meta = MetaData() table = Table('mytable', meta, Column('myid', Integer, Sequence('foo_id_seq'), primary_key=True), Column('name', String(40), nullable=True), Column('foo', String(40), nullable=False, server_default='x', server_onupdate='q'), Column('bar', String(40), nullable=False, default='y', onupdate='z'), Column('description', String(30), CheckConstraint("description='hi'")), UniqueConstraint('name'), test_needs_fk=True, ) table2 = Table('othertable', meta, Column('id', Integer, Sequence('foo_seq'), primary_key=True), Column('myid', Integer, ForeignKey('mytable.myid'), ), test_needs_fk=True, ) def test_to_metadata(): meta2 = MetaData() table_c = table.tometadata(meta2) table2_c = table2.tometadata(meta2) return (table_c, table2_c) def test_pickle(): meta.bind = testing.db meta2 = pickle.loads(pickle.dumps(meta)) assert meta2.bind is None pickle.loads(pickle.dumps(meta2)) return (meta2.tables['mytable'], meta2.tables['othertable']) def test_pickle_via_reflect(): # this is the most common use case, pickling the results of a # database reflection meta2 = MetaData(bind=testing.db) t1 = Table('mytable', meta2, autoload=True) Table('othertable', meta2, autoload=True) meta3 = pickle.loads(pickle.dumps(meta2)) assert meta3.bind is None assert meta3.tables['mytable'] is not t1 return (meta3.tables['mytable'], meta3.tables['othertable']) meta.create_all(testing.db) try: for test, has_constraints, reflect in \ (test_to_metadata, True, False), \ (test_pickle, True, False), \ (test_pickle_via_reflect, False, True): table_c, table2_c = test() self.assert_tables_equal(table, table_c) self.assert_tables_equal(table2, table2_c) assert table is not table_c assert table.primary_key is not table_c.primary_key assert list(table2_c.c.myid.foreign_keys)[0].column \ is table_c.c.myid assert list(table2_c.c.myid.foreign_keys)[0].column \ is not table.c.myid assert 'x' in str(table_c.c.foo.server_default.arg) if not reflect: assert isinstance(table_c.c.myid.default, Sequence) assert str(table_c.c.foo.server_onupdate.arg) == 'q' assert str(table_c.c.bar.default.arg) == 'y' assert getattr(table_c.c.bar.onupdate.arg, 'arg', table_c.c.bar.onupdate.arg) == 'z' assert isinstance(table2_c.c.id.default, Sequence) # constraints dont get reflected for any dialect right # now if has_constraints: for c in table_c.c.description.constraints: if isinstance(c, CheckConstraint): break else: assert False assert str(c.sqltext) == "description='hi'" for c in table_c.constraints: if isinstance(c, UniqueConstraint): break else: assert False assert c.columns.contains_column(table_c.c.name) assert not c.columns.contains_column(table.c.name) finally: meta.drop_all(testing.db) def test_col_key_fk_parent_tometadata(self): # test #2643 m1 = MetaData() a = Table('a', m1, Column('x', Integer)) b = Table('b', m1, Column('x', Integer, ForeignKey('a.x'), key='y')) assert b.c.y.references(a.c.x) m2 = MetaData() b2 = b.tometadata(m2) a2 = a.tometadata(m2) assert b2.c.y.references(a2.c.x) def test_pickle_metadata_sequence_restated(self): m1 = MetaData() Table('a', m1, Column('id', Integer, primary_key=True), Column('x', Integer, Sequence("x_seq"))) m2 = pickle.loads(pickle.dumps(m1)) s2 = Sequence("x_seq") t2 = Table('a', m2, Column('id', Integer, primary_key=True), Column('x', Integer, s2), extend_existing=True) assert m2._sequences['x_seq'] is t2.c.x.default assert m2._sequences['x_seq'] is s2 def test_sequence_restated_replaced(self): """Test restatement of Sequence replaces.""" m1 = MetaData() s1 = Sequence("x_seq") t = Table('a', m1, Column('x', Integer, s1) ) assert m1._sequences['x_seq'] is s1 s2 = Sequence('x_seq') Table('a', m1, Column('x', Integer, s2), extend_existing=True ) assert t.c.x.default is s2 assert m1._sequences['x_seq'] is s2 def test_pickle_metadata_sequence_implicit(self): m1 = MetaData() Table('a', m1, Column('id', Integer, primary_key=True), Column('x', Integer, Sequence("x_seq"))) m2 = pickle.loads(pickle.dumps(m1)) t2 = Table('a', m2, extend_existing=True) eq_(m2._sequences, {'x_seq': t2.c.x.default}) def test_pickle_metadata_schema(self): m1 = MetaData() Table('a', m1, Column('id', Integer, primary_key=True), Column('x', Integer, Sequence("x_seq")), schema='y') m2 = pickle.loads(pickle.dumps(m1)) Table('a', m2, schema='y', extend_existing=True) eq_(m2._schemas, m1._schemas) def test_tometadata_with_schema(self): meta = MetaData() table = Table('mytable', meta, Column('myid', Integer, primary_key=True), Column('name', String(40), nullable=True), Column('description', String(30), CheckConstraint("description='hi'")), UniqueConstraint('name'), test_needs_fk=True, ) table2 = Table('othertable', meta, Column('id', Integer, primary_key=True), Column('myid', Integer, ForeignKey('mytable.myid')), test_needs_fk=True, ) meta2 = MetaData() table_c = table.tometadata(meta2, schema='someschema') table2_c = table2.tometadata(meta2, schema='someschema') eq_(str(table_c.join(table2_c).onclause), str(table_c.c.myid == table2_c.c.myid)) eq_(str(table_c.join(table2_c).onclause), 'someschema.mytable.myid = someschema.othertable.myid') def test_tometadata_with_default_schema(self): meta = MetaData() table = Table('mytable', meta, Column('myid', Integer, primary_key=True), Column('name', String(40), nullable=True), Column('description', String(30), CheckConstraint("description='hi'")), UniqueConstraint('name'), test_needs_fk=True, schema='myschema', ) table2 = Table('othertable', meta, Column('id', Integer, primary_key=True), Column('myid', Integer, ForeignKey('myschema.mytable.myid')), test_needs_fk=True, schema='myschema', ) meta2 = MetaData() table_c = table.tometadata(meta2) table2_c = table2.tometadata(meta2) eq_(str(table_c.join(table2_c).onclause), str(table_c.c.myid == table2_c.c.myid)) eq_(str(table_c.join(table2_c).onclause), 'myschema.mytable.myid = myschema.othertable.myid') def test_tometadata_kwargs(self): meta = MetaData() table = Table('mytable', meta, Column('myid', Integer, primary_key=True), mysql_engine='InnoDB', ) meta2 = MetaData() table_c = table.tometadata(meta2) eq_(table.kwargs, table_c.kwargs) def test_tometadata_indexes(self): meta = MetaData() table = Table('mytable', meta, Column('id', Integer, primary_key=True), Column('data1', Integer, index=True), Column('data2', Integer), ) Index('multi', table.c.data1, table.c.data2), meta2 = MetaData() table_c = table.tometadata(meta2) def _get_key(i): return [i.name, i.unique] + \ sorted(i.kwargs.items()) + \ i.columns.keys() eq_( sorted([_get_key(i) for i in table.indexes]), sorted([_get_key(i) for i in table_c.indexes]) ) @emits_warning("Table '.+' already exists within the given MetaData") def test_tometadata_already_there(self): meta1 = MetaData() table1 = Table('mytable', meta1, Column('myid', Integer, primary_key=True), ) meta2 = MetaData() table2 = Table('mytable', meta2, Column('yourid', Integer, primary_key=True), ) table_c = table1.tometadata(meta2) table_d = table2.tometadata(meta2) # d'oh! assert table_c is table_d def test_metadata_schema_arg(self): m1 = MetaData(schema='sch1') m2 = MetaData(schema='sch1', quote_schema=True) m3 = MetaData(schema='sch1', quote_schema=False) m4 = MetaData() for i, (name, metadata, schema, quote_schema, exp_schema, exp_quote_schema) in enumerate([ ('t1', m1, None, None, 'sch1', None), ('t2', m1, 'sch2', None, 'sch2', None), ('t3', m1, 'sch2', True, 'sch2', True), ('t4', m1, 'sch1', None, 'sch1', None), ('t1', m2, None, None, 'sch1', True), ('t2', m2, 'sch2', None, 'sch2', None), ('t3', m2, 'sch2', True, 'sch2', True), ('t4', m2, 'sch1', None, 'sch1', None), ('t1', m3, None, None, 'sch1', False), ('t2', m3, 'sch2', None, 'sch2', None), ('t3', m3, 'sch2', True, 'sch2', True), ('t4', m3, 'sch1', None, 'sch1', None), ('t1', m4, None, None, None, None), ('t2', m4, 'sch2', None, 'sch2', None), ('t3', m4, 'sch2', True, 'sch2', True), ('t4', m4, 'sch1', None, 'sch1', None), ]): kw = {} if schema is not None: kw['schema'] = schema if quote_schema is not None: kw['quote_schema'] = quote_schema t = Table(name, metadata, **kw) eq_(t.schema, exp_schema, "test %d, table schema" % i) eq_(t.quote_schema, exp_quote_schema, "test %d, table quote_schema" % i) seq = Sequence(name, metadata=metadata, **kw) eq_(seq.schema, exp_schema, "test %d, seq schema" % i) eq_(seq.quote_schema, exp_quote_schema, "test %d, seq quote_schema" % i) def test_manual_dependencies(self): meta = MetaData() a = Table('a', meta, Column('foo', Integer)) b = Table('b', meta, Column('foo', Integer)) c = Table('c', meta, Column('foo', Integer)) d = Table('d', meta, Column('foo', Integer)) e = Table('e', meta, Column('foo', Integer)) e.add_is_dependent_on(c) a.add_is_dependent_on(b) b.add_is_dependent_on(d) e.add_is_dependent_on(b) c.add_is_dependent_on(a) eq_( meta.sorted_tables, [d, b, a, c, e] ) def test_tometadata_default_schema_metadata(self): meta = MetaData(schema='myschema') table = Table('mytable', meta, Column('myid', Integer, primary_key=True), Column('name', String(40), nullable=True), Column('description', String(30), CheckConstraint("description='hi'")), UniqueConstraint('name'), test_needs_fk=True ) table2 = Table('othertable', meta, Column('id', Integer, primary_key=True), Column('myid', Integer, ForeignKey('myschema.mytable.myid')), test_needs_fk=True ) meta2 = MetaData(schema='someschema') table_c = table.tometadata(meta2, schema=None) table2_c = table2.tometadata(meta2, schema=None) eq_(str(table_c.join(table2_c).onclause), str(table_c.c.myid == table2_c.c.myid)) eq_(str(table_c.join(table2_c).onclause), "someschema.mytable.myid = someschema.othertable.myid") def test_tometadata_strip_schema(self): meta = MetaData() table = Table('mytable', meta, Column('myid', Integer, primary_key=True), Column('name', String(40), nullable=True), Column('description', String(30), CheckConstraint("description='hi'")), UniqueConstraint('name'), test_needs_fk=True, ) table2 = Table('othertable', meta, Column('id', Integer, primary_key=True), Column('myid', Integer, ForeignKey('mytable.myid')), test_needs_fk=True, ) meta2 = MetaData() table_c = table.tometadata(meta2, schema=None) table2_c = table2.tometadata(meta2, schema=None) eq_(str(table_c.join(table2_c).onclause), str(table_c.c.myid == table2_c.c.myid)) eq_(str(table_c.join(table2_c).onclause), 'mytable.myid = othertable.myid') def test_nonexistent(self): assert_raises(tsa.exc.NoSuchTableError, Table, 'fake_table', MetaData(testing.db), autoload=True) def test_assorted_repr(self): t1 = Table("foo", MetaData(), Column("x", Integer)) i1 = Index("bar", t1.c.x) ck = schema.CheckConstraint("x > y", name="someconstraint") for const, exp in ( (Sequence("my_seq"), "Sequence('my_seq')"), (Sequence("my_seq", start=5), "Sequence('my_seq', start=5)"), (Column("foo", Integer), "Column('foo', Integer(), table=None)"), (Table("bar", MetaData(), Column("x", String)), "Table('bar', MetaData(bind=None), " "Column('x', String(), table=), schema=None)"), (schema.DefaultGenerator(for_update=True), "DefaultGenerator(for_update=True)"), (schema.Index("bar", "c"), "Index('bar')"), (i1, "Index('bar', Column('x', Integer(), table=))"), (schema.FetchedValue(), "FetchedValue()"), (ck, "CheckConstraint(" "%s" ", name='someconstraint')" % repr(ck.sqltext)), ): eq_( repr(const), exp ) class TableTest(fixtures.TestBase, AssertsCompiledSQL): @testing.skip_if('mssql', 'different col format') def test_prefixes(self): from sqlalchemy import Table table1 = Table("temporary_table_1", MetaData(), Column("col1", Integer), prefixes=["TEMPORARY"]) self.assert_compile( schema.CreateTable(table1), "CREATE TEMPORARY TABLE temporary_table_1 (col1 INTEGER)" ) table2 = Table("temporary_table_2", MetaData(), Column("col1", Integer), prefixes=["VIRTUAL"]) self.assert_compile( schema.CreateTable(table2), "CREATE VIRTUAL TABLE temporary_table_2 (col1 INTEGER)" ) def test_table_info(self): metadata = MetaData() t1 = Table('foo', metadata, info={'x': 'y'}) t2 = Table('bar', metadata, info={}) t3 = Table('bat', metadata) assert t1.info == {'x': 'y'} assert t2.info == {} assert t3.info == {} for t in (t1, t2, t3): t.info['bar'] = 'zip' assert t.info['bar'] == 'zip' def test_c_immutable(self): m = MetaData() t1 = Table('t', m, Column('x', Integer), Column('y', Integer)) assert_raises( TypeError, t1.c.extend, [Column('z', Integer)] ) def assign(): t1.c['z'] = Column('z', Integer) assert_raises( TypeError, assign ) def assign2(): t1.c.z = Column('z', Integer) assert_raises( TypeError, assign2 ) def test_autoincrement_replace(self): m = MetaData() t = Table('t', m, Column('id', Integer, primary_key=True) ) is_(t._autoincrement_column, t.c.id) t = Table('t', m, Column('id', Integer, primary_key=True), extend_existing=True ) is_(t._autoincrement_column, t.c.id) class SchemaTypeTest(fixtures.TestBase): class MyType(sqltypes.SchemaType, sqltypes.TypeEngine): column = None table = None evt_targets = () def _set_table(self, column, table): super(SchemaTypeTest.MyType, self)._set_table(column, table) self.column = column self.table = table def _on_table_create(self, target, bind, **kw): self.evt_targets += (target,) def test_independent_schema(self): m = MetaData() type_ = self.MyType(schema="q") t1 = Table('x', m, Column("y", type_), schema="z") eq_(t1.c.y.type.schema, "q") def test_inherit_schema(self): m = MetaData() type_ = self.MyType(schema="q", inherit_schema=True) t1 = Table('x', m, Column("y", type_), schema="z") eq_(t1.c.y.type.schema, "z") def test_independent_schema_enum(self): m = MetaData() type_ = sqltypes.Enum("a", schema="q") t1 = Table('x', m, Column("y", type_), schema="z") eq_(t1.c.y.type.schema, "q") def test_inherit_schema_enum(self): m = MetaData() type_ = sqltypes.Enum("a", "b", "c", schema="q", inherit_schema=True) t1 = Table('x', m, Column("y", type_), schema="z") eq_(t1.c.y.type.schema, "z") def test_tometadata_copy_type(self): m1 = MetaData() type_ = self.MyType() t1 = Table('x', m1, Column("y", type_)) m2 = MetaData() t2 = t1.tometadata(m2) # metadata isn't set is_(t2.c.y.type.metadata, None) # our test type sets table, though is_(t2.c.y.type.table, t2) def test_tometadata_independent_schema(self): m1 = MetaData() type_ = self.MyType() t1 = Table('x', m1, Column("y", type_)) m2 = MetaData() t2 = t1.tometadata(m2, schema="bar") eq_(t2.c.y.type.schema, None) def test_tometadata_inherit_schema(self): m1 = MetaData() type_ = self.MyType(inherit_schema=True) t1 = Table('x', m1, Column("y", type_)) m2 = MetaData() t2 = t1.tometadata(m2, schema="bar") eq_(t1.c.y.type.schema, None) eq_(t2.c.y.type.schema, "bar") def test_tometadata_independent_events(self): m1 = MetaData() type_ = self.MyType() t1 = Table('x', m1, Column("y", type_)) m2 = MetaData() t2 = t1.tometadata(m2) t1.dispatch.before_create(t1, testing.db) eq_(t1.c.y.type.evt_targets, (t1,)) eq_(t2.c.y.type.evt_targets, ()) t2.dispatch.before_create(t2, testing.db) t2.dispatch.before_create(t2, testing.db) eq_(t1.c.y.type.evt_targets, (t1,)) eq_(t2.c.y.type.evt_targets, (t2, t2)) class SchemaTest(fixtures.TestBase, AssertsCompiledSQL): def test_default_schema_metadata_fk(self): m = MetaData(schema="foo") t1 = Table('t1', m, Column('x', Integer)) t2 = Table('t2', m, Column('x', Integer, ForeignKey('t1.x'))) assert t2.c.x.references(t1.c.x) def test_ad_hoc_schema_equiv_fk(self): m = MetaData() t1 = Table('t1', m, Column('x', Integer), schema="foo") t2 = Table('t2', m, Column('x', Integer, ForeignKey('t1.x')), schema="foo") assert_raises( exc.NoReferencedTableError, lambda: t2.c.x.references(t1.c.x) ) def test_default_schema_metadata_fk_alt_remote(self): m = MetaData(schema="foo") t1 = Table('t1', m, Column('x', Integer)) t2 = Table('t2', m, Column('x', Integer, ForeignKey('t1.x')), schema="bar") assert t2.c.x.references(t1.c.x) def test_default_schema_metadata_fk_alt_local_raises(self): m = MetaData(schema="foo") t1 = Table('t1', m, Column('x', Integer), schema="bar") t2 = Table('t2', m, Column('x', Integer, ForeignKey('t1.x'))) assert_raises( exc.NoReferencedTableError, lambda: t2.c.x.references(t1.c.x) ) def test_default_schema_metadata_fk_alt_local(self): m = MetaData(schema="foo") t1 = Table('t1', m, Column('x', Integer), schema="bar") t2 = Table('t2', m, Column('x', Integer, ForeignKey('bar.t1.x'))) assert t2.c.x.references(t1.c.x) def test_create_drop_schema(self): self.assert_compile( schema.CreateSchema("sa_schema"), "CREATE SCHEMA sa_schema" ) self.assert_compile( schema.DropSchema("sa_schema"), "DROP SCHEMA sa_schema" ) self.assert_compile( schema.DropSchema("sa_schema", cascade=True), "DROP SCHEMA sa_schema CASCADE" ) def test_iteration(self): metadata = MetaData() table1 = Table('table1', metadata, Column('col1', Integer, primary_key=True), schema='someschema') table2 = Table('table2', metadata, Column('col1', Integer, primary_key=True), Column('col2', Integer, ForeignKey('someschema.table1.col1')), schema='someschema') t1 = str(schema.CreateTable(table1).compile(bind=testing.db)) t2 = str(schema.CreateTable(table2).compile(bind=testing.db)) if testing.db.dialect.preparer(testing.db.dialect).omit_schema: assert t1.index("CREATE TABLE table1") > -1 assert t2.index("CREATE TABLE table2") > -1 else: assert t1.index("CREATE TABLE someschema.table1") > -1 assert t2.index("CREATE TABLE someschema.table2") > -1 class UseExistingTest(fixtures.TablesTest): @classmethod def define_tables(cls, metadata): Table('users', metadata, Column('id', Integer, primary_key=True), Column('name', String(30))) def _useexisting_fixture(self): meta2 = MetaData(testing.db) Table('users', meta2, autoload=True) return meta2 def _notexisting_fixture(self): return MetaData(testing.db) def test_exception_no_flags(self): meta2 = self._useexisting_fixture() def go(): Table('users', meta2, Column('name', Unicode), autoload=True) assert_raises_message( exc.InvalidRequestError, "Table 'users' is already defined for this "\ "MetaData instance.", go ) @testing.uses_deprecated def test_deprecated_useexisting(self): meta2 = self._useexisting_fixture() users = Table('users', meta2, Column('name', Unicode), autoload=True, useexisting=True) assert isinstance(users.c.name.type, Unicode) assert not users.quote users = Table('users', meta2, quote=True, autoload=True, useexisting=True) assert users.quote def test_keep_plus_existing_raises(self): meta2 = self._useexisting_fixture() assert_raises( exc.ArgumentError, Table, 'users', meta2, keep_existing=True, extend_existing=True ) @testing.uses_deprecated def test_existing_plus_useexisting_raises(self): meta2 = self._useexisting_fixture() assert_raises( exc.ArgumentError, Table, 'users', meta2, useexisting=True, extend_existing=True ) def test_keep_existing_no_dupe_constraints(self): meta2 = self._notexisting_fixture() users = Table('users', meta2, Column('id', Integer), Column('name', Unicode), UniqueConstraint('name'), keep_existing=True ) assert 'name' in users.c assert 'id' in users.c eq_(len(users.constraints), 2) u2 = Table('users', meta2, Column('id', Integer), Column('name', Unicode), UniqueConstraint('name'), keep_existing=True ) eq_(len(u2.constraints), 2) def test_extend_existing_dupes_constraints(self): meta2 = self._notexisting_fixture() users = Table('users', meta2, Column('id', Integer), Column('name', Unicode), UniqueConstraint('name'), extend_existing=True ) assert 'name' in users.c assert 'id' in users.c eq_(len(users.constraints), 2) u2 = Table('users', meta2, Column('id', Integer), Column('name', Unicode), UniqueConstraint('name'), extend_existing=True ) # constraint got duped eq_(len(u2.constraints), 3) def test_keep_existing_coltype(self): meta2 = self._useexisting_fixture() users = Table('users', meta2, Column('name', Unicode), autoload=True, keep_existing=True) assert not isinstance(users.c.name.type, Unicode) def test_keep_existing_quote(self): meta2 = self._useexisting_fixture() users = Table('users', meta2, quote=True, autoload=True, keep_existing=True) assert not users.quote def test_keep_existing_add_column(self): meta2 = self._useexisting_fixture() users = Table('users', meta2, Column('foo', Integer), autoload=True, keep_existing=True) assert "foo" not in users.c def test_keep_existing_coltype_no_orig(self): meta2 = self._notexisting_fixture() users = Table('users', meta2, Column('name', Unicode), autoload=True, keep_existing=True) assert isinstance(users.c.name.type, Unicode) def test_keep_existing_quote_no_orig(self): meta2 = self._notexisting_fixture() users = Table('users', meta2, quote=True, autoload=True, keep_existing=True) assert users.quote def test_keep_existing_add_column_no_orig(self): meta2 = self._notexisting_fixture() users = Table('users', meta2, Column('foo', Integer), autoload=True, keep_existing=True) assert "foo" in users.c def test_keep_existing_coltype_no_reflection(self): meta2 = self._useexisting_fixture() users = Table('users', meta2, Column('name', Unicode), keep_existing=True) assert not isinstance(users.c.name.type, Unicode) def test_keep_existing_quote_no_reflection(self): meta2 = self._useexisting_fixture() users = Table('users', meta2, quote=True, keep_existing=True) assert not users.quote def test_keep_existing_add_column_no_reflection(self): meta2 = self._useexisting_fixture() users = Table('users', meta2, Column('foo', Integer), keep_existing=True) assert "foo" not in users.c def test_extend_existing_coltype(self): meta2 = self._useexisting_fixture() users = Table('users', meta2, Column('name', Unicode), autoload=True, extend_existing=True) assert isinstance(users.c.name.type, Unicode) def test_extend_existing_quote(self): meta2 = self._useexisting_fixture() users = Table('users', meta2, quote=True, autoload=True, extend_existing=True) assert users.quote def test_extend_existing_add_column(self): meta2 = self._useexisting_fixture() users = Table('users', meta2, Column('foo', Integer), autoload=True, extend_existing=True) assert "foo" in users.c def test_extend_existing_coltype_no_orig(self): meta2 = self._notexisting_fixture() users = Table('users', meta2, Column('name', Unicode), autoload=True, extend_existing=True) assert isinstance(users.c.name.type, Unicode) def test_extend_existing_quote_no_orig(self): meta2 = self._notexisting_fixture() users = Table('users', meta2, quote=True, autoload=True, extend_existing=True) assert users.quote def test_extend_existing_add_column_no_orig(self): meta2 = self._notexisting_fixture() users = Table('users', meta2, Column('foo', Integer), autoload=True, extend_existing=True) assert "foo" in users.c def test_extend_existing_coltype_no_reflection(self): meta2 = self._useexisting_fixture() users = Table('users', meta2, Column('name', Unicode), extend_existing=True) assert isinstance(users.c.name.type, Unicode) def test_extend_existing_quote_no_reflection(self): meta2 = self._useexisting_fixture() users = Table('users', meta2, quote=True, extend_existing=True) assert users.quote def test_extend_existing_add_column_no_reflection(self): meta2 = self._useexisting_fixture() users = Table('users', meta2, Column('foo', Integer), extend_existing=True) assert "foo" in users.c class ConstraintTest(fixtures.TestBase): def _single_fixture(self): m = MetaData() t1 = Table('t1', m, Column('a', Integer), Column('b', Integer) ) t2 = Table('t2', m, Column('a', Integer, ForeignKey('t1.a')) ) t3 = Table('t3', m, Column('a', Integer) ) return t1, t2, t3 def test_table_references(self): t1, t2, t3 = self._single_fixture() assert list(t2.c.a.foreign_keys)[0].references(t1) assert not list(t2.c.a.foreign_keys)[0].references(t3) def test_column_references(self): t1, t2, t3 = self._single_fixture() assert t2.c.a.references(t1.c.a) assert not t2.c.a.references(t3.c.a) assert not t2.c.a.references(t1.c.b) def test_column_references_derived(self): t1, t2, t3 = self._single_fixture() s1 = tsa.select([tsa.select([t1]).alias()]) assert t2.c.a.references(s1.c.a) assert not t2.c.a.references(s1.c.b) def test_copy_doesnt_reference(self): t1, t2, t3 = self._single_fixture() a2 = t2.c.a.copy() assert not a2.references(t1.c.a) assert not a2.references(t1.c.b) def test_derived_column_references(self): t1, t2, t3 = self._single_fixture() s1 = tsa.select([tsa.select([t2]).alias()]) assert s1.c.a.references(t1.c.a) assert not s1.c.a.references(t1.c.b) def test_invalid_composite_fk_check(self): m = MetaData() t1 = Table('t1', m, Column('x', Integer), Column('y', Integer), ForeignKeyConstraint(['x', 'y'], ['t2.x', 't3.y']) ) t2 = Table('t2', m, Column('x', Integer)) t3 = Table('t3', m, Column('y', Integer)) assert_raises_message( exc.ArgumentError, r"ForeignKeyConstraint on t1\(x, y\) refers to " "multiple remote tables: t2 and t3", t1.join, t2 ) assert_raises_message( exc.ArgumentError, r"ForeignKeyConstraint on t1\(x, y\) refers to " "multiple remote tables: t2 and t3", t1.join, t3 ) assert_raises_message( exc.ArgumentError, r"ForeignKeyConstraint on t1\(x, y\) refers to " "multiple remote tables: t2 and t3", schema.CreateTable(t1).compile ) def test_constraint_copied_to_proxy_ok(self): m = MetaData() t1 = Table('t1', m, Column('id', Integer, primary_key=True)) t2 = Table('t2', m, Column('id', Integer, ForeignKey('t1.id'), primary_key=True)) s = tsa.select([t2]) t2fk = list(t2.c.id.foreign_keys)[0] sfk = list(s.c.id.foreign_keys)[0] # the two FKs share the ForeignKeyConstraint is_( t2fk.constraint, sfk.constraint ) # but the ForeignKeyConstraint isn't # aware of the select's FK eq_( t2fk.constraint.elements, [t2fk] ) class ColumnDefinitionTest(AssertsCompiledSQL, fixtures.TestBase): """Test Column() construction.""" __dialect__ = 'default' def columns(self): return [Column(Integer), Column('b', Integer), Column(Integer), Column('d', Integer), Column(Integer, name='e'), Column(type_=Integer), Column(Integer()), Column('h', Integer()), Column(type_=Integer())] def test_basic(self): c = self.columns() for i, v in ((0, 'a'), (2, 'c'), (5, 'f'), (6, 'g'), (8, 'i')): c[i].name = v c[i].key = v del i, v tbl = Table('table', MetaData(), *c) for i, col in enumerate(tbl.c): assert col.name == c[i].name def test_name_none(self): c = Column(Integer) assert_raises_message( exc.ArgumentError, "Column must be constructed with a non-blank name or assign a " "non-blank .name ", Table, 't', MetaData(), c) def test_name_blank(self): c = Column('', Integer) assert_raises_message( exc.ArgumentError, "Column must be constructed with a non-blank name or assign a " "non-blank .name ", Table, 't', MetaData(), c) def test_dupe_column(self): c = Column('x', Integer) Table('t', MetaData(), c) assert_raises_message( exc.ArgumentError, "Column object already assigned to Table 't'", Table, 'q', MetaData(), c) def test_incomplete_key(self): c = Column(Integer) assert c.name is None assert c.key is None c.name = 'named' Table('t', MetaData(), c) assert c.name == 'named' assert c.name == c.key def test_unique_index_flags_default_to_none(self): c = Column(Integer) eq_(c.unique, None) eq_(c.index, None) c = Column('c', Integer, index=True) eq_(c.unique, None) eq_(c.index, True) t = Table('t', MetaData(), c) eq_(list(t.indexes)[0].unique, False) c = Column(Integer, unique=True) eq_(c.unique, True) eq_(c.index, None) c = Column('c', Integer, index=True, unique=True) eq_(c.unique, True) eq_(c.index, True) t = Table('t', MetaData(), c) eq_(list(t.indexes)[0].unique, True) def test_bogus(self): assert_raises(exc.ArgumentError, Column, 'foo', name='bar') assert_raises(exc.ArgumentError, Column, 'foo', Integer, type_=Integer()) def test_custom_subclass_proxy(self): """test proxy generation of a Column subclass, can be compiled.""" from sqlalchemy.schema import Column from sqlalchemy.ext.compiler import compiles from sqlalchemy.sql import select class MyColumn(Column): def _constructor(self, name, type, **kw): kw['name'] = name return MyColumn(type, **kw) def __init__(self, type, **kw): Column.__init__(self, type, **kw) def my_goofy_thing(self): return "hi" @compiles(MyColumn) def goofy(element, compiler, **kw): s = compiler.visit_column(element, **kw) return s + "-" id = MyColumn(Integer, primary_key=True) id.name = 'id' name = MyColumn(String) name.name = 'name' t1 = Table('foo', MetaData(), id, name ) # goofy thing eq_(t1.c.name.my_goofy_thing(), "hi") # create proxy s = select([t1.select().alias()]) # proxy has goofy thing eq_(s.c.name.my_goofy_thing(), "hi") # compile works self.assert_compile( select([t1.select().alias()]), "SELECT anon_1.id-, anon_1.name- FROM " "(SELECT foo.id- AS id, foo.name- AS name " "FROM foo) AS anon_1", ) def test_custom_subclass_proxy_typeerror(self): from sqlalchemy.schema import Column from sqlalchemy.sql import select class MyColumn(Column): def __init__(self, type, **kw): Column.__init__(self, type, **kw) id = MyColumn(Integer, primary_key=True) id.name = 'id' name = MyColumn(String) name.name = 'name' t1 = Table('foo', MetaData(), id, name ) assert_raises_message( TypeError, "Could not create a copy of this " "object. Ensure the class includes a _constructor()", getattr, select([t1.select().alias()]), 'c' ) def test_custom_create(self): from sqlalchemy.ext.compiler import compiles, deregister @compiles(schema.CreateColumn) def compile(element, compiler, **kw): column = element.element if "special" not in column.info: return compiler.visit_create_column(element, **kw) text = "%s SPECIAL DIRECTIVE %s" % ( column.name, compiler.type_compiler.process(column.type) ) default = compiler.get_column_default_string(column) if default is not None: text += " DEFAULT " + default if not column.nullable: text += " NOT NULL" if column.constraints: text += " ".join( compiler.process(const) for const in column.constraints) return text t = Table('mytable', MetaData(), Column('x', Integer, info={"special": True}, primary_key=True), Column('y', String(50)), Column('z', String(20), info={"special": True}) ) self.assert_compile( schema.CreateTable(t), "CREATE TABLE mytable (x SPECIAL DIRECTIVE INTEGER " "NOT NULL, y VARCHAR(50), " "z SPECIAL DIRECTIVE VARCHAR(20), PRIMARY KEY (x))" ) deregister(schema.CreateColumn) class ColumnDefaultsTest(fixtures.TestBase): """test assignment of default fixures to columns""" def _fixture(self, *arg, **kw): return Column('x', Integer, *arg, **kw) def test_server_default_positional(self): target = schema.DefaultClause('y') c = self._fixture(target) assert c.server_default is target assert target.column is c def test_onupdate_default_not_server_default_one(self): target1 = schema.DefaultClause('y') target2 = schema.DefaultClause('z') c = self._fixture(server_default=target1, server_onupdate=target2) eq_(c.server_default.arg, 'y') eq_(c.server_onupdate.arg, 'z') def test_onupdate_default_not_server_default_two(self): target1 = schema.DefaultClause('y', for_update=True) target2 = schema.DefaultClause('z', for_update=True) c = self._fixture(server_default=target1, server_onupdate=target2) eq_(c.server_default.arg, 'y') eq_(c.server_onupdate.arg, 'z') def test_onupdate_default_not_server_default_three(self): target1 = schema.DefaultClause('y', for_update=False) target2 = schema.DefaultClause('z', for_update=True) c = self._fixture(target1, target2) eq_(c.server_default.arg, 'y') eq_(c.server_onupdate.arg, 'z') def test_onupdate_default_not_server_default_four(self): target1 = schema.DefaultClause('y', for_update=False) c = self._fixture(server_onupdate=target1) is_(c.server_default, None) eq_(c.server_onupdate.arg, 'y') def test_server_default_keyword_as_schemaitem(self): target = schema.DefaultClause('y') c = self._fixture(server_default=target) assert c.server_default is target assert target.column is c def test_server_default_keyword_as_clause(self): target = 'y' c = self._fixture(server_default=target) assert c.server_default.arg == target assert c.server_default.column is c def test_server_default_onupdate_positional(self): target = schema.DefaultClause('y', for_update=True) c = self._fixture(target) assert c.server_onupdate is target assert target.column is c def test_server_default_onupdate_keyword_as_schemaitem(self): target = schema.DefaultClause('y', for_update=True) c = self._fixture(server_onupdate=target) assert c.server_onupdate is target assert target.column is c def test_server_default_onupdate_keyword_as_clause(self): target = 'y' c = self._fixture(server_onupdate=target) assert c.server_onupdate.arg == target assert c.server_onupdate.column is c def test_column_default_positional(self): target = schema.ColumnDefault('y') c = self._fixture(target) assert c.default is target assert target.column is c def test_column_default_keyword_as_schemaitem(self): target = schema.ColumnDefault('y') c = self._fixture(default=target) assert c.default is target assert target.column is c def test_column_default_keyword_as_clause(self): target = 'y' c = self._fixture(default=target) assert c.default.arg == target assert c.default.column is c def test_column_default_onupdate_positional(self): target = schema.ColumnDefault('y', for_update=True) c = self._fixture(target) assert c.onupdate is target assert target.column is c def test_column_default_onupdate_keyword_as_schemaitem(self): target = schema.ColumnDefault('y', for_update=True) c = self._fixture(onupdate=target) assert c.onupdate is target assert target.column is c def test_column_default_onupdate_keyword_as_clause(self): target = 'y' c = self._fixture(onupdate=target) assert c.onupdate.arg == target assert c.onupdate.column is c class ColumnOptionsTest(fixtures.TestBase): def test_default_generators(self): g1, g2 = Sequence('foo_id_seq'), ColumnDefault('f5') assert Column(String, default=g1).default is g1 assert Column(String, onupdate=g1).onupdate is g1 assert Column(String, default=g2).default is g2 assert Column(String, onupdate=g2).onupdate is g2 def test_type_required(self): assert_raises(exc.ArgumentError, Column) assert_raises(exc.ArgumentError, Column, "foo") assert_raises(exc.ArgumentError, Column, default="foo") assert_raises(exc.ArgumentError, Column, Sequence("a")) assert_raises(exc.ArgumentError, Column, "foo", default="foo") assert_raises(exc.ArgumentError, Column, "foo", Sequence("a")) Column(ForeignKey('bar.id')) Column("foo", ForeignKey('bar.id')) Column(ForeignKey('bar.id'), default="foo") Column(ForeignKey('bar.id'), Sequence("a")) Column("foo", ForeignKey('bar.id'), default="foo") Column("foo", ForeignKey('bar.id'), Sequence("a")) def test_column_info(self): c1 = Column('foo', String, info={'x': 'y'}) c2 = Column('bar', String, info={}) c3 = Column('bat', String) assert c1.info == {'x': 'y'} assert c2.info == {} assert c3.info == {} for c in (c1, c2, c3): c.info['bar'] = 'zip' assert c.info['bar'] == 'zip' class CatchAllEventsTest(fixtures.TestBase): def teardown(self): events.SchemaEventTarget.dispatch._clear() def test_all_events(self): canary = [] def before_attach(obj, parent): canary.append("%s->%s" % (obj.__class__.__name__, parent.__class__.__name__)) def after_attach(obj, parent): canary.append("%s->%s" % (obj.__class__.__name__, parent)) event.listen(schema.SchemaItem, "before_parent_attach", before_attach) event.listen(schema.SchemaItem, "after_parent_attach", after_attach) m = MetaData() Table('t1', m, Column('id', Integer, Sequence('foo_id'), primary_key=True), Column('bar', String, ForeignKey('t2.id')) ) Table('t2', m, Column('id', Integer, primary_key=True), ) eq_( canary, ['Sequence->Column', 'Sequence->id', 'ForeignKey->Column', 'ForeignKey->bar', 'Table->MetaData', 'PrimaryKeyConstraint->Table', 'PrimaryKeyConstraint->t1', 'Column->Table', 'Column->t1', 'Column->Table', 'Column->t1', 'ForeignKeyConstraint->Table', 'ForeignKeyConstraint->t1', 'Table->MetaData(bind=None)', 'Table->MetaData', 'PrimaryKeyConstraint->Table', 'PrimaryKeyConstraint->t2', 'Column->Table', 'Column->t2', 'Table->MetaData(bind=None)'] ) def test_events_per_constraint(self): canary = [] def evt(target): def before_attach(obj, parent): canary.append("%s->%s" % (target.__name__, parent.__class__.__name__)) def after_attach(obj, parent): canary.append("%s->%s" % (target.__name__, parent)) event.listen(target, "before_parent_attach", before_attach) event.listen(target, "after_parent_attach", after_attach) for target in [ schema.ForeignKeyConstraint, schema.PrimaryKeyConstraint, schema.UniqueConstraint, schema.CheckConstraint ]: evt(target) m = MetaData() Table('t1', m, Column('id', Integer, Sequence('foo_id'), primary_key=True), Column('bar', String, ForeignKey('t2.id')), Column('bat', Integer, unique=True), ) Table('t2', m, Column('id', Integer, primary_key=True), Column('bar', Integer), Column('bat', Integer), CheckConstraint("bar>5"), UniqueConstraint('bar', 'bat') ) eq_( canary, [ 'PrimaryKeyConstraint->Table', 'PrimaryKeyConstraint->t1', 'ForeignKeyConstraint->Table', 'ForeignKeyConstraint->t1', 'UniqueConstraint->Table', 'UniqueConstraint->t1', 'PrimaryKeyConstraint->Table', 'PrimaryKeyConstraint->t2', 'CheckConstraint->Table', 'CheckConstraint->t2', 'UniqueConstraint->Table', 'UniqueConstraint->t2' ] ) SQLAlchemy-0.8.4/test/sql/test_operators.py0000644000076500000240000012735712251150016021446 0ustar classicstaff00000000000000from sqlalchemy.testing import fixtures, eq_, is_ from sqlalchemy import testing from sqlalchemy.testing import assert_raises_message from sqlalchemy.sql import column, desc, asc, literal, collate, null, true, false from sqlalchemy.sql.expression import BinaryExpression, \ ClauseList, Grouping, \ UnaryExpression, select, union, func, tuple_ from sqlalchemy.sql import operators, table import operator from sqlalchemy import String, Integer from sqlalchemy import exc from sqlalchemy.schema import Column, Table, MetaData from sqlalchemy.types import TypeEngine, TypeDecorator, UserDefinedType from sqlalchemy.dialects import mysql, firebird, postgresql, oracle, \ sqlite, mssql from sqlalchemy import util import datetime import collections from sqlalchemy import text, literal_column class LoopOperate(operators.ColumnOperators): def operate(self, op, *other, **kwargs): return op class DefaultColumnComparatorTest(fixtures.TestBase): def _do_scalar_test(self, operator, compare_to): left = column('left') assert left.comparator.operate(operator).compare( compare_to(left) ) self._loop_test(operator) def _do_operate_test(self, operator, right=column('right')): left = column('left') assert left.comparator.operate(operator, right).compare( BinaryExpression(left, right, operator) ) assert operator(left, right).compare( BinaryExpression(left, right, operator) ) self._loop_test(operator, right) def _loop_test(self, operator, *arg): l = LoopOperate() is_( operator(l, *arg), operator ) def test_desc(self): self._do_scalar_test(operators.desc_op, desc) def test_asc(self): self._do_scalar_test(operators.asc_op, asc) def test_plus(self): self._do_operate_test(operators.add) def test_is_null(self): self._do_operate_test(operators.is_, None) def test_isnot_null(self): self._do_operate_test(operators.isnot, None) def test_is_null_const(self): self._do_operate_test(operators.is_, null()) def test_is_true_const(self): self._do_operate_test(operators.is_, true()) def test_is_false_const(self): self._do_operate_test(operators.is_, false()) def test_equals_true(self): self._do_operate_test(operators.eq, True) def test_notequals_true(self): self._do_operate_test(operators.ne, True) def test_is_true(self): self._do_operate_test(operators.is_, True) def test_isnot_true(self): self._do_operate_test(operators.isnot, True) def test_is_false(self): self._do_operate_test(operators.is_, False) def test_isnot_false(self): self._do_operate_test(operators.isnot, False) def test_like(self): self._do_operate_test(operators.like_op) def test_notlike(self): self._do_operate_test(operators.notlike_op) def test_ilike(self): self._do_operate_test(operators.ilike_op) def test_notilike(self): self._do_operate_test(operators.notilike_op) def test_is(self): self._do_operate_test(operators.is_) def test_isnot(self): self._do_operate_test(operators.isnot) def test_no_getitem(self): assert_raises_message( NotImplementedError, "Operator 'getitem' is not supported on this expression", self._do_operate_test, operators.getitem ) assert_raises_message( NotImplementedError, "Operator 'getitem' is not supported on this expression", lambda: column('left')[3] ) def test_in(self): left = column('left') assert left.comparator.operate(operators.in_op, [1, 2, 3]).compare( BinaryExpression( left, Grouping(ClauseList( literal(1), literal(2), literal(3) )), operators.in_op ) ) self._loop_test(operators.in_op, [1, 2, 3]) def test_notin(self): left = column('left') assert left.comparator.operate(operators.notin_op, [1, 2, 3]).compare( BinaryExpression( left, Grouping(ClauseList( literal(1), literal(2), literal(3) )), operators.notin_op ) ) self._loop_test(operators.notin_op, [1, 2, 3]) def test_collate(self): left = column('left') right = "some collation" left.comparator.operate(operators.collate, right).compare( collate(left, right) ) def test_concat(self): self._do_operate_test(operators.concat_op) class CustomUnaryOperatorTest(fixtures.TestBase, testing.AssertsCompiledSQL): __dialect__ = 'default' def _factorial_fixture(self): class MyInteger(Integer): class comparator_factory(Integer.Comparator): def factorial(self): return UnaryExpression(self.expr, modifier=operators.custom_op("!"), type_=MyInteger) def factorial_prefix(self): return UnaryExpression(self.expr, operator=operators.custom_op("!!"), type_=MyInteger) return MyInteger def test_factorial(self): col = column('somecol', self._factorial_fixture()) self.assert_compile( col.factorial(), "somecol !" ) def test_double_factorial(self): col = column('somecol', self._factorial_fixture()) self.assert_compile( col.factorial().factorial(), "somecol ! !" ) def test_factorial_prefix(self): col = column('somecol', self._factorial_fixture()) self.assert_compile( col.factorial_prefix(), "!! somecol" ) def test_unary_no_ops(self): assert_raises_message( exc.CompileError, "Unary expression has no operator or modifier", UnaryExpression(literal("x")).compile ) def test_unary_both_ops(self): assert_raises_message( exc.CompileError, "Unary expression does not support operator and " "modifier simultaneously", UnaryExpression(literal("x"), operator=operators.custom_op("x"), modifier=operators.custom_op("y")).compile ) class _CustomComparatorTests(object): def test_override_builtin(self): c1 = Column('foo', self._add_override_factory()) self._assert_add_override(c1) def test_column_proxy(self): t = Table('t', MetaData(), Column('foo', self._add_override_factory()) ) proxied = t.select().c.foo self._assert_add_override(proxied) def test_alias_proxy(self): t = Table('t', MetaData(), Column('foo', self._add_override_factory()) ) proxied = t.alias().c.foo self._assert_add_override(proxied) def test_binary_propagate(self): c1 = Column('foo', self._add_override_factory()) self._assert_add_override(c1 - 6) def test_reverse_binary_propagate(self): c1 = Column('foo', self._add_override_factory()) self._assert_add_override(6 - c1) def test_binary_multi_propagate(self): c1 = Column('foo', self._add_override_factory()) self._assert_add_override((c1 - 6) + 5) def test_no_boolean_propagate(self): c1 = Column('foo', self._add_override_factory()) self._assert_not_add_override(c1 == 56) def _assert_add_override(self, expr): assert (expr + 5).compare( expr.op("goofy")(5) ) def _assert_not_add_override(self, expr): assert not (expr + 5).compare( expr.op("goofy")(5) ) class CustomComparatorTest(_CustomComparatorTests, fixtures.TestBase): def _add_override_factory(self): class MyInteger(Integer): class comparator_factory(TypeEngine.Comparator): def __init__(self, expr): self.expr = expr def __add__(self, other): return self.expr.op("goofy")(other) return MyInteger class TypeDecoratorComparatorTest(_CustomComparatorTests, fixtures.TestBase): def _add_override_factory(self): class MyInteger(TypeDecorator): impl = Integer class comparator_factory(TypeEngine.Comparator): def __init__(self, expr): self.expr = expr def __add__(self, other): return self.expr.op("goofy")(other) return MyInteger class CustomEmbeddedinTypeDecoratorTest(_CustomComparatorTests, fixtures.TestBase): def _add_override_factory(self): class MyInteger(Integer): class comparator_factory(TypeEngine.Comparator): def __init__(self, expr): self.expr = expr def __add__(self, other): return self.expr.op("goofy")(other) class MyDecInteger(TypeDecorator): impl = MyInteger return MyDecInteger class NewOperatorTest(_CustomComparatorTests, fixtures.TestBase): def _add_override_factory(self): class MyInteger(Integer): class comparator_factory(TypeEngine.Comparator): def __init__(self, expr): self.expr = expr def foob(self, other): return self.expr.op("foob")(other) return MyInteger def _assert_add_override(self, expr): assert (expr.foob(5)).compare( expr.op("foob")(5) ) def _assert_not_add_override(self, expr): assert not hasattr(expr, "foob") class ExtensionOperatorTest(fixtures.TestBase, testing.AssertsCompiledSQL): __dialect__ = 'default' def test_contains(self): class MyType(UserDefinedType): class comparator_factory(UserDefinedType.Comparator): def contains(self, other, **kw): return self.op("->")(other) self.assert_compile( Column('x', MyType()).contains(5), "x -> :x_1" ) def test_getitem(self): class MyType(UserDefinedType): class comparator_factory(UserDefinedType.Comparator): def __getitem__(self, index): return self.op("->")(index) self.assert_compile( Column('x', MyType())[5], "x -> :x_1" ) @testing.requires.python26 def test_op_not_an_iterator(self): # see [ticket:2726] class MyType(UserDefinedType): class comparator_factory(UserDefinedType.Comparator): def __getitem__(self, index): return self.op("->")(index) col = Column('x', MyType()) assert not isinstance(col, collections.Iterable) def test_lshift(self): class MyType(UserDefinedType): class comparator_factory(UserDefinedType.Comparator): def __lshift__(self, other): return self.op("->")(other) self.assert_compile( Column('x', MyType()) << 5, "x -> :x_1" ) def test_rshift(self): class MyType(UserDefinedType): class comparator_factory(UserDefinedType.Comparator): def __rshift__(self, other): return self.op("->")(other) self.assert_compile( Column('x', MyType()) >> 5, "x -> :x_1" ) from sqlalchemy import and_, not_, between class OperatorPrecedenceTest(fixtures.TestBase, testing.AssertsCompiledSQL): __dialect__ = 'default' table1 = table('mytable', column('myid', Integer), column('name', String), column('description', String), ) table2 = table('op', column('field')) def test_operator_precedence_1(self): self.assert_compile( self.table2.select((self.table2.c.field == 5) == None), "SELECT op.field FROM op WHERE (op.field = :field_1) IS NULL") def test_operator_precedence_2(self): self.assert_compile( self.table2.select( (self.table2.c.field + 5) == self.table2.c.field), "SELECT op.field FROM op WHERE op.field + :field_1 = op.field") def test_operator_precedence_3(self): self.assert_compile( self.table2.select((self.table2.c.field + 5) * 6), "SELECT op.field FROM op WHERE (op.field + :field_1) * :param_1") def test_operator_precedence_4(self): self.assert_compile(self.table2.select((self.table2.c.field * 5) + 6), "SELECT op.field FROM op WHERE op.field * :field_1 + :param_1") def test_operator_precedence_5(self): self.assert_compile(self.table2.select( 5 + self.table2.c.field.in_([5, 6])), "SELECT op.field FROM op WHERE :param_1 + " "(op.field IN (:field_1, :field_2))") def test_operator_precedence_6(self): self.assert_compile(self.table2.select( (5 + self.table2.c.field).in_([5, 6])), "SELECT op.field FROM op WHERE :field_1 + op.field " "IN (:param_1, :param_2)") def test_operator_precedence_7(self): self.assert_compile(self.table2.select( not_(and_(self.table2.c.field == 5, self.table2.c.field == 7))), "SELECT op.field FROM op WHERE NOT " "(op.field = :field_1 AND op.field = :field_2)") def test_operator_precedence_8(self): self.assert_compile(self.table2.select(not_(self.table2.c.field == 5)), "SELECT op.field FROM op WHERE op.field != :field_1") def test_operator_precedence_9(self): self.assert_compile(self.table2.select( not_(self.table2.c.field.between(5, 6))), "SELECT op.field FROM op WHERE NOT " "(op.field BETWEEN :field_1 AND :field_2)") def test_operator_precedence_10(self): self.assert_compile(self.table2.select(not_(self.table2.c.field) == 5), "SELECT op.field FROM op WHERE (NOT op.field) = :param_1") def test_operator_precedence_11(self): self.assert_compile(self.table2.select( (self.table2.c.field == self.table2.c.field).\ between(False, True)), "SELECT op.field FROM op WHERE (op.field = op.field) " "BETWEEN :param_1 AND :param_2") def test_operator_precedence_12(self): self.assert_compile(self.table2.select( between((self.table2.c.field == self.table2.c.field), False, True)), "SELECT op.field FROM op WHERE (op.field = op.field) " "BETWEEN :param_1 AND :param_2") def test_operator_precedence_13(self): self.assert_compile(self.table2.select( self.table2.c.field.match( self.table2.c.field).is_(None)), "SELECT op.field FROM op WHERE (op.field MATCH op.field) IS NULL") def test_commutative_operators(self): self.assert_compile( literal("a") + literal("b") * literal("c"), ":param_1 || :param_2 * :param_3" ) def test_op_operators(self): self.assert_compile( self.table1.select(self.table1.c.myid.op('hoho')(12) == 14), "SELECT mytable.myid, mytable.name, mytable.description FROM " "mytable WHERE (mytable.myid hoho :myid_1) = :param_1" ) def test_op_operators_comma_precedence(self): self.assert_compile( func.foo(self.table1.c.myid.op('hoho')(12)), "foo(mytable.myid hoho :myid_1)" ) def test_op_operators_comparison_precedence(self): self.assert_compile( self.table1.c.myid.op('hoho')(12) == 5, "(mytable.myid hoho :myid_1) = :param_1" ) def test_op_operators_custom_precedence(self): op1 = self.table1.c.myid.op('hoho', precedence=5) op2 = op1(5).op('lala', precedence=4)(4) op3 = op1(5).op('lala', precedence=6)(4) self.assert_compile(op2, "mytable.myid hoho :myid_1 lala :param_1") self.assert_compile(op3, "(mytable.myid hoho :myid_1) lala :param_1") class OperatorAssociativityTest(fixtures.TestBase, testing.AssertsCompiledSQL): __dialect__ = 'default' def test_associativity_1(self): f = column('f') self.assert_compile(f - f, "f - f") def test_associativity_2(self): f = column('f') self.assert_compile(f - f - f, "(f - f) - f") def test_associativity_3(self): f = column('f') self.assert_compile((f - f) - f, "(f - f) - f") def test_associativity_4(self): f = column('f') self.assert_compile((f - f).label('foo') - f, "(f - f) - f") def test_associativity_5(self): f = column('f') self.assert_compile(f - (f - f), "f - (f - f)") def test_associativity_6(self): f = column('f') self.assert_compile(f - (f - f).label('foo'), "f - (f - f)") def test_associativity_7(self): f = column('f') # because - less precedent than / self.assert_compile(f / (f - f), "f / (f - f)") def test_associativity_8(self): f = column('f') self.assert_compile(f / (f - f).label('foo'), "f / (f - f)") def test_associativity_9(self): f = column('f') self.assert_compile(f / f - f, "f / f - f") def test_associativity_10(self): f = column('f') self.assert_compile((f / f) - f, "f / f - f") def test_associativity_11(self): f = column('f') self.assert_compile((f / f).label('foo') - f, "f / f - f") def test_associativity_12(self): f = column('f') # because / more precedent than - self.assert_compile(f - (f / f), "f - f / f") def test_associativity_13(self): f = column('f') self.assert_compile(f - (f / f).label('foo'), "f - f / f") def test_associativity_14(self): f = column('f') self.assert_compile(f - f / f, "f - f / f") def test_associativity_15(self): f = column('f') self.assert_compile((f - f) / f, "(f - f) / f") def test_associativity_16(self): f = column('f') self.assert_compile(((f - f) / f) - f, "(f - f) / f - f") def test_associativity_17(self): f = column('f') # - lower precedence than / self.assert_compile((f - f) / (f - f), "(f - f) / (f - f)") def test_associativity_18(self): f = column('f') # / higher precedence than - self.assert_compile((f / f) - (f / f), "f / f - f / f") def test_associativity_19(self): f = column('f') self.assert_compile((f / f) - (f - f), "f / f - (f - f)") def test_associativity_20(self): f = column('f') self.assert_compile((f / f) / (f - f), "(f / f) / (f - f)") def test_associativity_21(self): f = column('f') self.assert_compile(f / (f / (f - f)), "f / (f / (f - f))") class InTest(fixtures.TestBase, testing.AssertsCompiledSQL): __dialect__ = 'default' table1 = table('mytable', column('myid', Integer), ) table2 = table( 'myothertable', column('otherid', Integer), column('othername', String) ) def test_in_1(self): self.assert_compile(self.table1.c.myid.in_(['a']), "mytable.myid IN (:myid_1)") def test_in_2(self): self.assert_compile(~self.table1.c.myid.in_(['a']), "mytable.myid NOT IN (:myid_1)") def test_in_3(self): self.assert_compile(self.table1.c.myid.in_(['a', 'b']), "mytable.myid IN (:myid_1, :myid_2)") def test_in_4(self): self.assert_compile(self.table1.c.myid.in_(iter(['a', 'b'])), "mytable.myid IN (:myid_1, :myid_2)") def test_in_5(self): self.assert_compile(self.table1.c.myid.in_([literal('a')]), "mytable.myid IN (:param_1)") def test_in_6(self): self.assert_compile(self.table1.c.myid.in_([literal('a'), 'b']), "mytable.myid IN (:param_1, :myid_1)") def test_in_7(self): self.assert_compile( self.table1.c.myid.in_([literal('a'), literal('b')]), "mytable.myid IN (:param_1, :param_2)") def test_in_8(self): self.assert_compile(self.table1.c.myid.in_(['a', literal('b')]), "mytable.myid IN (:myid_1, :param_1)") def test_in_9(self): self.assert_compile(self.table1.c.myid.in_([literal(1) + 'a']), "mytable.myid IN (:param_1 + :param_2)") def test_in_10(self): self.assert_compile(self.table1.c.myid.in_([literal('a') + 'a', 'b']), "mytable.myid IN (:param_1 || :param_2, :myid_1)") def test_in_11(self): self.assert_compile(self.table1.c.myid.in_([literal('a') + \ literal('a'), literal('b')]), "mytable.myid IN (:param_1 || :param_2, :param_3)") def test_in_12(self): self.assert_compile(self.table1.c.myid.in_([1, literal(3) + 4]), "mytable.myid IN (:myid_1, :param_1 + :param_2)") def test_in_13(self): self.assert_compile(self.table1.c.myid.in_([literal('a') < 'b']), "mytable.myid IN (:param_1 < :param_2)") def test_in_14(self): self.assert_compile(self.table1.c.myid.in_([self.table1.c.myid]), "mytable.myid IN (mytable.myid)") def test_in_15(self): self.assert_compile(self.table1.c.myid.in_(['a', self.table1.c.myid]), "mytable.myid IN (:myid_1, mytable.myid)") def test_in_16(self): self.assert_compile(self.table1.c.myid.in_([literal('a'), self.table1.c.myid]), "mytable.myid IN (:param_1, mytable.myid)") def test_in_17(self): self.assert_compile(self.table1.c.myid.in_([literal('a'), \ self.table1.c.myid + 'a']), "mytable.myid IN (:param_1, mytable.myid + :myid_1)") def test_in_18(self): self.assert_compile(self.table1.c.myid.in_([literal(1), 'a' + \ self.table1.c.myid]), "mytable.myid IN (:param_1, :myid_1 + mytable.myid)") def test_in_19(self): self.assert_compile(self.table1.c.myid.in_([1, 2, 3]), "mytable.myid IN (:myid_1, :myid_2, :myid_3)") def test_in_20(self): self.assert_compile(self.table1.c.myid.in_( select([self.table2.c.otherid])), "mytable.myid IN (SELECT myothertable.otherid FROM myothertable)") def test_in_21(self): self.assert_compile(~self.table1.c.myid.in_( select([self.table2.c.otherid])), "mytable.myid NOT IN (SELECT myothertable.otherid FROM myothertable)") def test_in_22(self): self.assert_compile( self.table1.c.myid.in_( text("SELECT myothertable.otherid FROM myothertable") ), "mytable.myid IN (SELECT myothertable.otherid " "FROM myothertable)" ) @testing.emits_warning('.*empty sequence.*') def test_in_23(self): self.assert_compile(self.table1.c.myid.in_([]), "mytable.myid != mytable.myid") def test_in_24(self): self.assert_compile( select([self.table1.c.myid.in_(select([self.table2.c.otherid]))]), "SELECT mytable.myid IN (SELECT myothertable.otherid " "FROM myothertable) AS anon_1 FROM mytable" ) def test_in_25(self): self.assert_compile( select([self.table1.c.myid.in_( select([self.table2.c.otherid]).as_scalar())]), "SELECT mytable.myid IN (SELECT myothertable.otherid " "FROM myothertable) AS anon_1 FROM mytable" ) def test_in_26(self): self.assert_compile(self.table1.c.myid.in_( union( select([self.table1.c.myid], self.table1.c.myid == 5), select([self.table1.c.myid], self.table1.c.myid == 12), ) ), "mytable.myid IN ("\ "SELECT mytable.myid FROM mytable WHERE mytable.myid = :myid_1 "\ "UNION SELECT mytable.myid FROM mytable WHERE mytable.myid = :myid_2)") def test_in_27(self): # test that putting a select in an IN clause does not # blow away its ORDER BY clause self.assert_compile( select([self.table1, self.table2], self.table2.c.otherid.in_( select([self.table2.c.otherid], order_by=[self.table2.c.othername], limit=10, correlate=False) ), from_obj=[self.table1.join(self.table2, self.table1.c.myid == self.table2.c.otherid)], order_by=[self.table1.c.myid] ), "SELECT mytable.myid, " "myothertable.otherid, myothertable.othername FROM mytable "\ "JOIN myothertable ON mytable.myid = myothertable.otherid " "WHERE myothertable.otherid IN (SELECT myothertable.otherid "\ "FROM myothertable ORDER BY myothertable.othername " "LIMIT :param_1) ORDER BY mytable.myid", {'param_1': 10} ) def test_in_28(self): self.assert_compile( self.table1.c.myid.in_([None]), "mytable.myid IN (NULL)" ) @testing.emits_warning('.*empty sequence.*') def test_in_29(self): self.assert_compile(self.table1.c.myid.notin_([]), "mytable.myid = mytable.myid") @testing.emits_warning('.*empty sequence.*') def test_in_30(self): self.assert_compile(~self.table1.c.myid.in_([]), "mytable.myid = mytable.myid") class MathOperatorTest(fixtures.TestBase, testing.AssertsCompiledSQL): __dialect__ = 'default' table1 = table('mytable', column('myid', Integer), ) def _test_math_op(self, py_op, sql_op): for (lhs, rhs, res) in ( (5, self.table1.c.myid, ':myid_1 %s mytable.myid'), (5, literal(5), ':param_1 %s :param_2'), (self.table1.c.myid, 'b', 'mytable.myid %s :myid_1'), (self.table1.c.myid, literal(2.7), 'mytable.myid %s :param_1'), (self.table1.c.myid, self.table1.c.myid, 'mytable.myid %s mytable.myid'), (literal(5), 8, ':param_1 %s :param_2'), (literal(6), self.table1.c.myid, ':param_1 %s mytable.myid'), (literal(7), literal(5.5), ':param_1 %s :param_2'), ): self.assert_compile(py_op(lhs, rhs), res % sql_op) def test_math_op_add(self): self._test_math_op(operator.add, '+') def test_math_op_mul(self): self._test_math_op(operator.mul, '*') def test_math_op_sub(self): self._test_math_op(operator.sub, '-') def test_math_op_div(self): if util.py3k: self._test_math_op(operator.truediv, '/') else: self._test_math_op(operator.div, '/') class ComparisonOperatorTest(fixtures.TestBase, testing.AssertsCompiledSQL): __dialect__ = 'default' table1 = table('mytable', column('myid', Integer), ) def test_pickle_operators_one(self): clause = (self.table1.c.myid == 12) & \ self.table1.c.myid.between(15, 20) & \ self.table1.c.myid.like('hoho') eq_(str(clause), str(util.pickle.loads(util.pickle.dumps(clause)))) def test_pickle_operators_two(self): clause = tuple_(1, 2, 3) eq_(str(clause), str(util.pickle.loads(util.pickle.dumps(clause)))) def _test_comparison_op(self, py_op, fwd_op, rev_op): dt = datetime.datetime(2012, 5, 10, 15, 27, 18) for (lhs, rhs, l_sql, r_sql) in ( ('a', self.table1.c.myid, ':myid_1', 'mytable.myid'), ('a', literal('b'), ':param_2', ':param_1'), # note swap! (self.table1.c.myid, 'b', 'mytable.myid', ':myid_1'), (self.table1.c.myid, literal('b'), 'mytable.myid', ':param_1'), (self.table1.c.myid, self.table1.c.myid, 'mytable.myid', 'mytable.myid'), (literal('a'), 'b', ':param_1', ':param_2'), (literal('a'), self.table1.c.myid, ':param_1', 'mytable.myid'), (literal('a'), literal('b'), ':param_1', ':param_2'), (dt, literal('b'), ':param_2', ':param_1'), (literal('b'), dt, ':param_1', ':param_2'), ): # the compiled clause should match either (e.g.): # 'a' < 'b' -or- 'b' > 'a'. compiled = str(py_op(lhs, rhs)) fwd_sql = "%s %s %s" % (l_sql, fwd_op, r_sql) rev_sql = "%s %s %s" % (r_sql, rev_op, l_sql) self.assert_(compiled == fwd_sql or compiled == rev_sql, "\n'" + compiled + "'\n does not match\n'" + fwd_sql + "'\n or\n'" + rev_sql + "'") def test_comparison_operators_lt(self): self._test_comparison_op(operator.lt, '<', '>'), def test_comparison_operators_gt(self): self._test_comparison_op(operator.gt, '>', '<') def test_comparison_operators_eq(self): self._test_comparison_op(operator.eq, '=', '=') def test_comparison_operators_ne(self): self._test_comparison_op(operator.ne, '!=', '!=') def test_comparison_operators_le(self): self._test_comparison_op(operator.le, '<=', '>=') def test_comparison_operators_ge(self): self._test_comparison_op(operator.ge, '>=', '<=') class NonZeroTest(fixtures.TestBase): def _raises(self, expr): assert_raises_message( TypeError, "Boolean value of this clause is not defined", bool, expr ) def _assert_true(self, expr): is_(bool(expr), True) def _assert_false(self, expr): is_(bool(expr), False) def test_column_identity_eq(self): c1 = column('c1') self._assert_true(c1 == c1) def test_column_identity_gt(self): c1 = column('c1') self._raises(c1 > c1) def test_column_compare_eq(self): c1, c2 = column('c1'), column('c2') self._assert_false(c1 == c2) def test_column_compare_gt(self): c1, c2 = column('c1'), column('c2') self._raises(c1 > c2) def test_binary_identity_eq(self): c1 = column('c1') expr = c1 > 5 self._assert_true(expr == expr) def test_labeled_binary_identity_eq(self): c1 = column('c1') expr = (c1 > 5).label(None) self._assert_true(expr == expr) def test_annotated_binary_identity_eq(self): c1 = column('c1') expr1 = (c1 > 5) expr2 = expr1._annotate({"foo": "bar"}) self._assert_true(expr1 == expr2) def test_labeled_binary_compare_gt(self): c1 = column('c1') expr1 = (c1 > 5).label(None) expr2 = (c1 > 5).label(None) self._assert_false(expr1 == expr2) class NegationTest(fixtures.TestBase, testing.AssertsCompiledSQL): __dialect__ = 'default' table1 = table('mytable', column('myid', Integer), column('name', String), ) def test_negate_operators_1(self): for (py_op, op) in ( (operator.neg, '-'), (operator.inv, 'NOT '), ): for expr, expected in ( (self.table1.c.myid, "mytable.myid"), (literal("foo"), ":param_1"), ): self.assert_compile(py_op(expr), "%s%s" % (op, expected)) def test_negate_operators_2(self): self.assert_compile( self.table1.select((self.table1.c.myid != 12) & ~(self.table1.c.name == 'john')), "SELECT mytable.myid, mytable.name FROM " "mytable WHERE mytable.myid != :myid_1 " "AND mytable.name != :name_1" ) def test_negate_operators_3(self): self.assert_compile( self.table1.select((self.table1.c.myid != 12) & ~(self.table1.c.name.between('jack', 'john'))), "SELECT mytable.myid, mytable.name FROM " "mytable WHERE mytable.myid != :myid_1 AND "\ "NOT (mytable.name BETWEEN :name_1 AND :name_2)" ) def test_negate_operators_4(self): self.assert_compile( self.table1.select((self.table1.c.myid != 12) & ~and_(self.table1.c.name == 'john', self.table1.c.name == 'ed', self.table1.c.name == 'fred')), "SELECT mytable.myid, mytable.name FROM " "mytable WHERE mytable.myid != :myid_1 AND "\ "NOT (mytable.name = :name_1 AND mytable.name = :name_2 " "AND mytable.name = :name_3)" ) def test_negate_operators_5(self): self.assert_compile( self.table1.select((self.table1.c.myid != 12) & ~self.table1.c.name), "SELECT mytable.myid, mytable.name FROM " "mytable WHERE mytable.myid != :myid_1 AND NOT mytable.name" ) class LikeTest(fixtures.TestBase, testing.AssertsCompiledSQL): __dialect__ = 'default' table1 = table('mytable', column('myid', Integer), column('name', String), ) def test_like_1(self): self.assert_compile( self.table1.c.myid.like('somstr'), "mytable.myid LIKE :myid_1") def test_like_2(self): self.assert_compile( ~self.table1.c.myid.like('somstr'), "mytable.myid NOT LIKE :myid_1") def test_like_3(self): self.assert_compile( self.table1.c.myid.like('somstr', escape='\\'), "mytable.myid LIKE :myid_1 ESCAPE '\\'") def test_like_4(self): self.assert_compile( ~self.table1.c.myid.like('somstr', escape='\\'), "mytable.myid NOT LIKE :myid_1 ESCAPE '\\'") def test_like_5(self): self.assert_compile( self.table1.c.myid.ilike('somstr', escape='\\'), "lower(mytable.myid) LIKE lower(:myid_1) ESCAPE '\\'") def test_like_6(self): self.assert_compile( ~self.table1.c.myid.ilike('somstr', escape='\\'), "lower(mytable.myid) NOT LIKE lower(:myid_1) ESCAPE '\\'") def test_like_7(self): self.assert_compile( self.table1.c.myid.ilike('somstr', escape='\\'), "mytable.myid ILIKE %(myid_1)s ESCAPE '\\\\'", dialect=postgresql.dialect()) def test_like_8(self): self.assert_compile( ~self.table1.c.myid.ilike('somstr', escape='\\'), "mytable.myid NOT ILIKE %(myid_1)s ESCAPE '\\\\'", dialect=postgresql.dialect()) def test_like_9(self): self.assert_compile( self.table1.c.name.ilike('%something%'), "lower(mytable.name) LIKE lower(:name_1)") def test_like_10(self): self.assert_compile( self.table1.c.name.ilike('%something%'), "mytable.name ILIKE %(name_1)s", dialect=postgresql.dialect()) def test_like_11(self): self.assert_compile( ~self.table1.c.name.ilike('%something%'), "lower(mytable.name) NOT LIKE lower(:name_1)") def test_like_12(self): self.assert_compile( ~self.table1.c.name.ilike('%something%'), "mytable.name NOT ILIKE %(name_1)s", dialect=postgresql.dialect()) class MatchTest(fixtures.TestBase, testing.AssertsCompiledSQL): __dialect__ = 'default' table1 = table('mytable', column('myid', Integer), column('name', String), ) def test_match_1(self): self.assert_compile(self.table1.c.myid.match('somstr'), "mytable.myid MATCH ?", dialect=sqlite.dialect()) def test_match_2(self): self.assert_compile(self.table1.c.myid.match('somstr'), "MATCH (mytable.myid) AGAINST (%s IN BOOLEAN MODE)", dialect=mysql.dialect()) def test_match_3(self): self.assert_compile(self.table1.c.myid.match('somstr'), "CONTAINS (mytable.myid, :myid_1)", dialect=mssql.dialect()) def test_match_4(self): self.assert_compile(self.table1.c.myid.match('somstr'), "mytable.myid @@ to_tsquery(%(myid_1)s)", dialect=postgresql.dialect()) def test_match_5(self): self.assert_compile(self.table1.c.myid.match('somstr'), "CONTAINS (mytable.myid, :myid_1)", dialect=oracle.dialect()) class ComposedLikeOperatorsTest(fixtures.TestBase, testing.AssertsCompiledSQL): __dialect__ = 'default' def test_contains(self): self.assert_compile( column('x').contains('y'), "x LIKE '%%' || :x_1 || '%%'", checkparams={'x_1': 'y'} ) def test_contains_escape(self): self.assert_compile( column('x').contains('y', escape='\\'), "x LIKE '%%' || :x_1 || '%%' ESCAPE '\\'", checkparams={'x_1': 'y'} ) def test_contains_literal(self): self.assert_compile( column('x').contains(literal_column('y')), "x LIKE '%%' || y || '%%'", checkparams={} ) def test_contains_text(self): self.assert_compile( column('x').contains(text('y')), "x LIKE '%%' || y || '%%'", checkparams={} ) def test_not_contains(self): self.assert_compile( ~column('x').contains('y'), "x NOT LIKE '%%' || :x_1 || '%%'", checkparams={'x_1': 'y'} ) def test_not_contains_escape(self): self.assert_compile( ~column('x').contains('y', escape='\\'), "x NOT LIKE '%%' || :x_1 || '%%' ESCAPE '\\'", checkparams={'x_1': 'y'} ) def test_contains_concat(self): self.assert_compile( column('x').contains('y'), "x LIKE concat(concat('%%', %s), '%%')", checkparams={'x_1': 'y'}, dialect=mysql.dialect() ) def test_not_contains_concat(self): self.assert_compile( ~column('x').contains('y'), "x NOT LIKE concat(concat('%%', %s), '%%')", checkparams={'x_1': 'y'}, dialect=mysql.dialect() ) def test_contains_literal_concat(self): self.assert_compile( column('x').contains(literal_column('y')), "x LIKE concat(concat('%%', y), '%%')", checkparams={}, dialect=mysql.dialect() ) def test_contains_text_concat(self): self.assert_compile( column('x').contains(text('y')), "x LIKE concat(concat('%%', y), '%%')", checkparams={}, dialect=mysql.dialect() ) def test_startswith(self): self.assert_compile( column('x').startswith('y'), "x LIKE :x_1 || '%%'", checkparams={'x_1': 'y'} ) def test_startswith_escape(self): self.assert_compile( column('x').startswith('y', escape='\\'), "x LIKE :x_1 || '%%' ESCAPE '\\'", checkparams={'x_1': 'y'} ) def test_not_startswith(self): self.assert_compile( ~column('x').startswith('y'), "x NOT LIKE :x_1 || '%%'", checkparams={'x_1': 'y'} ) def test_not_startswith_escape(self): self.assert_compile( ~column('x').startswith('y', escape='\\'), "x NOT LIKE :x_1 || '%%' ESCAPE '\\'", checkparams={'x_1': 'y'} ) def test_startswith_literal(self): self.assert_compile( column('x').startswith(literal_column('y')), "x LIKE y || '%%'", checkparams={} ) def test_startswith_text(self): self.assert_compile( column('x').startswith(text('y')), "x LIKE y || '%%'", checkparams={} ) def test_startswith_concat(self): self.assert_compile( column('x').startswith('y'), "x LIKE concat(%s, '%%')", checkparams={'x_1': 'y'}, dialect=mysql.dialect() ) def test_not_startswith_concat(self): self.assert_compile( ~column('x').startswith('y'), "x NOT LIKE concat(%s, '%%')", checkparams={'x_1': 'y'}, dialect=mysql.dialect() ) def test_startswith_firebird(self): self.assert_compile( column('x').startswith('y'), "x STARTING WITH :x_1", checkparams={'x_1': 'y'}, dialect=firebird.dialect() ) def test_not_startswith_firebird(self): self.assert_compile( ~column('x').startswith('y'), "x NOT STARTING WITH :x_1", checkparams={'x_1': 'y'}, dialect=firebird.dialect() ) def test_startswith_literal_mysql(self): self.assert_compile( column('x').startswith(literal_column('y')), "x LIKE concat(y, '%%')", checkparams={}, dialect=mysql.dialect() ) def test_startswith_text_mysql(self): self.assert_compile( column('x').startswith(text('y')), "x LIKE concat(y, '%%')", checkparams={}, dialect=mysql.dialect() ) def test_endswith(self): self.assert_compile( column('x').endswith('y'), "x LIKE '%%' || :x_1", checkparams={'x_1': 'y'} ) def test_endswith_escape(self): self.assert_compile( column('x').endswith('y', escape='\\'), "x LIKE '%%' || :x_1 ESCAPE '\\'", checkparams={'x_1': 'y'} ) def test_not_endswith(self): self.assert_compile( ~column('x').endswith('y'), "x NOT LIKE '%%' || :x_1", checkparams={'x_1': 'y'} ) def test_not_endswith_escape(self): self.assert_compile( ~column('x').endswith('y', escape='\\'), "x NOT LIKE '%%' || :x_1 ESCAPE '\\'", checkparams={'x_1': 'y'} ) def test_endswith_literal(self): self.assert_compile( column('x').endswith(literal_column('y')), "x LIKE '%%' || y", checkparams={} ) def test_endswith_text(self): self.assert_compile( column('x').endswith(text('y')), "x LIKE '%%' || y", checkparams={} ) def test_endswith_mysql(self): self.assert_compile( column('x').endswith('y'), "x LIKE concat('%%', %s)", checkparams={'x_1': 'y'}, dialect=mysql.dialect() ) def test_not_endswith_mysql(self): self.assert_compile( ~column('x').endswith('y'), "x NOT LIKE concat('%%', %s)", checkparams={'x_1': 'y'}, dialect=mysql.dialect() ) def test_endswith_literal_mysql(self): self.assert_compile( column('x').endswith(literal_column('y')), "x LIKE concat('%%', y)", checkparams={}, dialect=mysql.dialect() ) def test_endswith_text_mysql(self): self.assert_compile( column('x').endswith(text('y')), "x LIKE concat('%%', y)", checkparams={}, dialect=mysql.dialect() ) SQLAlchemy-0.8.4/test/sql/test_query.py0000644000076500000240000026015212251150016020564 0ustar classicstaff00000000000000from sqlalchemy.testing import eq_, assert_raises_message, assert_raises, is_ from sqlalchemy import testing from sqlalchemy.testing import fixtures, engines from sqlalchemy import util import datetime from sqlalchemy import * from sqlalchemy import exc, sql from sqlalchemy.engine import default, result as _result from sqlalchemy.testing.schema import Table, Column # ongoing - these are old tests. those which are of general use # to test a dialect are being slowly migrated to # sqlalhcemy.testing.suite class QueryTest(fixtures.TestBase): @classmethod def setup_class(cls): global users, users2, addresses, metadata metadata = MetaData(testing.db) users = Table('query_users', metadata, Column('user_id', INT, primary_key=True, test_needs_autoincrement=True), Column('user_name', VARCHAR(20)), test_needs_acid=True ) addresses = Table('query_addresses', metadata, Column('address_id', Integer, primary_key=True, test_needs_autoincrement=True), Column('user_id', Integer, ForeignKey('query_users.user_id')), Column('address', String(30)), test_needs_acid=True ) users2 = Table('u2', metadata, Column('user_id', INT, primary_key = True), Column('user_name', VARCHAR(20)), test_needs_acid=True ) metadata.create_all() @engines.close_first def teardown(self): addresses.delete().execute() users.delete().execute() users2.delete().execute() @classmethod def teardown_class(cls): metadata.drop_all() @testing.requires.multivalues_inserts def test_multivalues_insert(self): users.insert(values=[{'user_id':7, 'user_name':'jack'}, {'user_id':8, 'user_name':'ed'}]).execute() rows = users.select().order_by(users.c.user_id).execute().fetchall() self.assert_(rows[0] == (7, 'jack')) self.assert_(rows[1] == (8, 'ed')) users.insert(values=[(9, 'jack'), (10, 'ed')]).execute() rows = users.select().order_by(users.c.user_id).execute().fetchall() self.assert_(rows[2] == (9, 'jack')) self.assert_(rows[3] == (10, 'ed')) def test_insert_heterogeneous_params(self): """test that executemany parameters are asserted to match the parameter set of the first.""" assert_raises_message(exc.StatementError, r"A value is required for bind parameter 'user_name', in " "parameter group 2 \(original cause: (sqlalchemy.exc.)?InvalidRequestError: A " "value is required for bind parameter 'user_name', in " "parameter group 2\) u?'INSERT INTO query_users", users.insert().execute, {'user_id':7, 'user_name':'jack'}, {'user_id':8, 'user_name':'ed'}, {'user_id':9} ) # this succeeds however. We aren't yet doing # a length check on all subsequent parameters. users.insert().execute( {'user_id':7}, {'user_id':8, 'user_name':'ed'}, {'user_id':9} ) def test_lastrow_accessor(self): """Tests the inserted_primary_key and lastrow_has_id() functions.""" def insert_values(engine, table, values): """ Inserts a row into a table, returns the full list of values INSERTed including defaults that fired off on the DB side and detects rows that had defaults and post-fetches. """ # verify implicit_returning is working if engine.dialect.implicit_returning: ins = table.insert() comp = ins.compile(engine, column_keys=list(values)) if not set(values).issuperset(c.key for c in table.primary_key): assert comp.returning result = engine.execute(table.insert(), **values) ret = values.copy() for col, id in zip(table.primary_key, result.inserted_primary_key): ret[col.key] = id if result.lastrow_has_defaults(): criterion = and_(*[col==id for col, id in zip(table.primary_key, result.inserted_primary_key)]) row = engine.execute(table.select(criterion)).first() for c in table.c: ret[c.key] = row[c] return ret if testing.against('firebird', 'postgresql', 'oracle', 'mssql'): assert testing.db.dialect.implicit_returning if testing.db.dialect.implicit_returning: test_engines = [ engines.testing_engine(options={'implicit_returning':False}), engines.testing_engine(options={'implicit_returning':True}), ] else: test_engines = [testing.db] for engine in test_engines: metadata = MetaData() for supported, table, values, assertvalues in [ ( {'unsupported':['sqlite']}, Table("t1", metadata, Column('id', Integer, primary_key=True, test_needs_autoincrement=True), Column('foo', String(30), primary_key=True)), {'foo':'hi'}, {'id':1, 'foo':'hi'} ), ( {'unsupported':['sqlite']}, Table("t2", metadata, Column('id', Integer, primary_key=True, test_needs_autoincrement=True), Column('foo', String(30), primary_key=True), Column('bar', String(30), server_default='hi') ), {'foo':'hi'}, {'id':1, 'foo':'hi', 'bar':'hi'} ), ( {'unsupported':[]}, Table("t3", metadata, Column("id", String(40), primary_key=True), Column('foo', String(30), primary_key=True), Column("bar", String(30)) ), {'id':'hi', 'foo':'thisisfoo', 'bar':"thisisbar"}, {'id':'hi', 'foo':'thisisfoo', 'bar':"thisisbar"} ), ( {'unsupported':[]}, Table("t4", metadata, Column('id', Integer, Sequence('t4_id_seq', optional=True), primary_key=True), Column('foo', String(30), primary_key=True), Column('bar', String(30), server_default='hi') ), {'foo':'hi', 'id':1}, {'id':1, 'foo':'hi', 'bar':'hi'} ), ( {'unsupported':[]}, Table("t5", metadata, Column('id', String(10), primary_key=True), Column('bar', String(30), server_default='hi') ), {'id':'id1'}, {'id':'id1', 'bar':'hi'}, ), ( {'unsupported':['sqlite']}, Table("t6", metadata, Column('id', Integer, primary_key=True, test_needs_autoincrement=True), Column('bar', Integer, primary_key=True) ), {'bar':0}, {'id':1, 'bar':0}, ), ]: if testing.db.name in supported['unsupported']: continue try: table.create(bind=engine, checkfirst=True) i = insert_values(engine, table, values) assert i == assertvalues, "tablename: %s %r %r" % \ (table.name, repr(i), repr(assertvalues)) finally: table.drop(bind=engine) @testing.only_on('sqlite+pysqlite') @testing.provide_metadata def test_lastrowid_zero(self): from sqlalchemy.dialects import sqlite eng = engines.testing_engine() class ExcCtx(sqlite.base.SQLiteExecutionContext): def get_lastrowid(self): return 0 eng.dialect.execution_ctx_cls = ExcCtx t = Table('t', MetaData(), Column('x', Integer, primary_key=True), Column('y', Integer)) t.create(eng) r = eng.execute(t.insert().values(y=5)) eq_(r.inserted_primary_key, [0]) @testing.fails_on('sqlite', "sqlite autoincremnt doesn't work with composite pks") def test_misordered_lastrow(self): related = Table('related', metadata, Column('id', Integer, primary_key=True), mysql_engine='MyISAM' ) t6 = Table("t6", metadata, Column('manual_id', Integer, ForeignKey('related.id'), primary_key=True), Column('auto_id', Integer, primary_key=True, test_needs_autoincrement=True), mysql_engine='MyISAM' ) metadata.create_all() r = related.insert().values(id=12).execute() id = r.inserted_primary_key[0] assert id==12 r = t6.insert().values(manual_id=id).execute() eq_(r.inserted_primary_key, [12, 1]) def test_row_iteration(self): users.insert().execute( {'user_id':7, 'user_name':'jack'}, {'user_id':8, 'user_name':'ed'}, {'user_id':9, 'user_name':'fred'}, ) r = users.select().execute() l = [] for row in r: l.append(row) self.assert_(len(l) == 3) @testing.requires.subqueries def test_anonymous_rows(self): users.insert().execute( {'user_id':7, 'user_name':'jack'}, {'user_id':8, 'user_name':'ed'}, {'user_id':9, 'user_name':'fred'}, ) sel = select([users.c.user_id]).where(users.c.user_name=='jack').as_scalar() for row in select([sel + 1, sel + 3], bind=users.bind).execute(): assert row['anon_1'] == 8 assert row['anon_2'] == 10 @testing.fails_on('firebird', "kinterbasdb doesn't send full type information") def test_order_by_label(self): """test that a label within an ORDER BY works on each backend. This test should be modified to support [ticket:1068] when that ticket is implemented. For now, you need to put the actual string in the ORDER BY. """ users.insert().execute( {'user_id':7, 'user_name':'jack'}, {'user_id':8, 'user_name':'ed'}, {'user_id':9, 'user_name':'fred'}, ) concat = ("test: " + users.c.user_name).label('thedata') eq_( select([concat]).order_by("thedata").execute().fetchall(), [("test: ed",), ("test: fred",), ("test: jack",)] ) eq_( select([concat]).order_by("thedata").execute().fetchall(), [("test: ed",), ("test: fred",), ("test: jack",)] ) concat = ("test: " + users.c.user_name).label('thedata') eq_( select([concat]).order_by(desc('thedata')).execute().fetchall(), [("test: jack",), ("test: fred",), ("test: ed",)] ) @testing.fails_on('postgresql', 'only simple labels allowed') @testing.fails_on('sybase', 'only simple labels allowed') @testing.fails_on('mssql', 'only simple labels allowed') def go(): concat = ("test: " + users.c.user_name).label('thedata') eq_( select([concat]).order_by(literal_column('thedata') + "x").execute().fetchall(), [("test: ed",), ("test: fred",), ("test: jack",)] ) go() def test_row_comparison(self): users.insert().execute(user_id = 7, user_name = 'jack') rp = users.select().execute().first() self.assert_(rp == rp) self.assert_(not(rp != rp)) equal = (7, 'jack') self.assert_(rp == equal) self.assert_(equal == rp) self.assert_(not (rp != equal)) self.assert_(not (equal != equal)) @testing.provide_metadata def test_column_label_overlap_fallback(self): content = Table('content', self.metadata, Column('type', String(30)), ) bar = Table('bar', self.metadata, Column('content_type', String(30)) ) self.metadata.create_all(testing.db) testing.db.execute(content.insert().values(type="t1")) row = testing.db.execute(content.select(use_labels=True)).first() assert content.c.type in row assert bar.c.content_type not in row assert sql.column('content_type') in row row = testing.db.execute(select([content.c.type.label("content_type")])).first() assert content.c.type in row assert bar.c.content_type not in row assert sql.column('content_type') in row row = testing.db.execute(select([func.now().label("content_type")])).first() assert content.c.type not in row assert bar.c.content_type not in row assert sql.column('content_type') in row def test_pickled_rows(self): users.insert().execute( {'user_id':7, 'user_name':'jack'}, {'user_id':8, 'user_name':'ed'}, {'user_id':9, 'user_name':'fred'}, ) for pickle in False, True: for use_labels in False, True: result = users.select(use_labels=use_labels).order_by(users.c.user_id).execute().fetchall() if pickle: result = util.pickle.loads(util.pickle.dumps(result)) eq_( result, [(7, "jack"), (8, "ed"), (9, "fred")] ) if use_labels: eq_(result[0]['query_users_user_id'], 7) eq_(result[0].keys(), ["query_users_user_id", "query_users_user_name"]) else: eq_(result[0]['user_id'], 7) eq_(result[0].keys(), ["user_id", "user_name"]) eq_(result[0][0], 7) eq_(result[0][users.c.user_id], 7) eq_(result[0][users.c.user_name], 'jack') if not pickle or use_labels: assert_raises(exc.NoSuchColumnError, lambda: result[0][addresses.c.user_id]) else: # test with a different table. name resolution is # causing 'user_id' to match when use_labels wasn't used. eq_(result[0][addresses.c.user_id], 7) assert_raises(exc.NoSuchColumnError, lambda: result[0]['fake key']) assert_raises(exc.NoSuchColumnError, lambda: result[0][addresses.c.address_id]) def test_column_error_printing(self): row = testing.db.execute(select([1])).first() class unprintable(object): def __str__(self): raise ValueError("nope") msg = r"Could not locate column in row for column '%s'" for accessor, repl in [ ("x", "x"), (Column("q", Integer), "q"), (Column("q", Integer) + 12, r"q \+ :q_1"), (unprintable(), "unprintable element.*"), ]: assert_raises_message( exc.NoSuchColumnError, msg % repl, lambda: row[accessor] ) @testing.requires.boolean_col_expressions def test_or_and_as_columns(self): true, false = literal(True), literal(False) eq_(testing.db.execute(select([and_(true, false)])).scalar(), False) eq_(testing.db.execute(select([and_(true, true)])).scalar(), True) eq_(testing.db.execute(select([or_(true, false)])).scalar(), True) eq_(testing.db.execute(select([or_(false, false)])).scalar(), False) eq_(testing.db.execute(select([not_(or_(false, false))])).scalar(), True) row = testing.db.execute(select([or_(false, false).label("x"), and_(true, false).label("y")])).first() assert row.x == False assert row.y == False row = testing.db.execute(select([or_(true, false).label("x"), and_(true, false).label("y")])).first() assert row.x == True assert row.y == False def test_fetchmany(self): users.insert().execute(user_id = 7, user_name = 'jack') users.insert().execute(user_id = 8, user_name = 'ed') users.insert().execute(user_id = 9, user_name = 'fred') r = users.select().execute() l = [] for row in r.fetchmany(size=2): l.append(row) self.assert_(len(l) == 2, "fetchmany(size=2) got %s rows" % len(l)) def test_like_ops(self): users.insert().execute( {'user_id':1, 'user_name':'apples'}, {'user_id':2, 'user_name':'oranges'}, {'user_id':3, 'user_name':'bananas'}, {'user_id':4, 'user_name':'legumes'}, {'user_id':5, 'user_name':'hi % there'}, ) for expr, result in ( (select([users.c.user_id]).\ where(users.c.user_name.startswith('apple')), [(1,)]), (select([users.c.user_id]).\ where(users.c.user_name.contains('i % t')), [(5,)]), (select([users.c.user_id]).\ where( users.c.user_name.endswith('anas') ), [(3,)]), (select([users.c.user_id]).\ where( users.c.user_name.contains('i % t', escape='&') ), [(5,)]), ): eq_(expr.execute().fetchall(), result) @testing.requires.mod_operator_as_percent_sign @testing.emits_warning('.*now automatically escapes.*') def test_percents_in_text(self): for expr, result in ( (text("select 6 % 10"), 6), (text("select 17 % 10"), 7), (text("select '%'"), '%'), (text("select '%%'"), '%%'), (text("select '%%%'"), '%%%'), (text("select 'hello % world'"), "hello % world") ): eq_(testing.db.scalar(expr), result) def test_ilike(self): users.insert().execute( {'user_id':1, 'user_name':'one'}, {'user_id':2, 'user_name':'TwO'}, {'user_id':3, 'user_name':'ONE'}, {'user_id':4, 'user_name':'OnE'}, ) eq_(select([users.c.user_id]).where(users.c.user_name.ilike('one')).execute().fetchall(), [(1, ), (3, ), (4, )]) eq_(select([users.c.user_id]).where(users.c.user_name.ilike('TWO')).execute().fetchall(), [(2, )]) if testing.against('postgresql'): eq_(select([users.c.user_id]).where(users.c.user_name.like('one')).execute().fetchall(), [(1, )]) eq_(select([users.c.user_id]).where(users.c.user_name.like('TWO')).execute().fetchall(), []) def test_compiled_execute(self): users.insert().execute(user_id = 7, user_name = 'jack') s = select([users], users.c.user_id==bindparam('id')).compile() c = testing.db.connect() assert c.execute(s, id=7).fetchall()[0]['user_id'] == 7 def test_compiled_insert_execute(self): users.insert().compile().execute(user_id = 7, user_name = 'jack') s = select([users], users.c.user_id==bindparam('id')).compile() c = testing.db.connect() assert c.execute(s, id=7).fetchall()[0]['user_id'] == 7 def test_repeated_bindparams(self): """Tests that a BindParam can be used more than once. This should be run for DB-APIs with both positional and named paramstyles. """ users.insert().execute(user_id = 7, user_name = 'jack') users.insert().execute(user_id = 8, user_name = 'fred') u = bindparam('userid') s = users.select(and_(users.c.user_name==u, users.c.user_name==u)) r = s.execute(userid='fred').fetchall() assert len(r) == 1 def test_bindparam_detection(self): dialect = default.DefaultDialect(paramstyle='qmark') prep = lambda q: str(sql.text(q).compile(dialect=dialect)) def a_eq(got, wanted): if got != wanted: print "Wanted %s" % wanted print "Received %s" % got self.assert_(got == wanted, got) a_eq(prep('select foo'), 'select foo') a_eq(prep("time='12:30:00'"), "time='12:30:00'") a_eq(prep(u"time='12:30:00'"), u"time='12:30:00'") a_eq(prep(":this:that"), ":this:that") a_eq(prep(":this :that"), "? ?") a_eq(prep("(:this),(:that :other)"), "(?),(? ?)") a_eq(prep("(:this),(:that:other)"), "(?),(:that:other)") a_eq(prep("(:this),(:that,:other)"), "(?),(?,?)") a_eq(prep("(:that_:other)"), "(:that_:other)") a_eq(prep("(:that_ :other)"), "(? ?)") a_eq(prep("(:that_other)"), "(?)") a_eq(prep("(:that$other)"), "(?)") a_eq(prep("(:that$:other)"), "(:that$:other)") a_eq(prep(".:that$ :other."), ".? ?.") a_eq(prep(r'select \foo'), r'select \foo') a_eq(prep(r"time='12\:30:00'"), r"time='12\:30:00'") a_eq(prep(":this \:that"), "? :that") a_eq(prep(r"(\:that$other)"), "(:that$other)") a_eq(prep(r".\:that$ :other."), ".:that$ ?.") @testing.requires.standalone_binds def test_select_from_bindparam(self): """Test result row processing when selecting from a plain bind param.""" class MyInteger(TypeDecorator): impl = Integer def process_bind_param(self, value, dialect): return int(value[4:]) def process_result_value(self, value, dialect): return "INT_%d" % value eq_( testing.db.scalar(select([literal("INT_5", type_=MyInteger)])), "INT_5" ) eq_( testing.db.scalar(select([literal("INT_5", type_=MyInteger).label('foo')])), "INT_5" ) @testing.exclude('mysql', '<', (5, 0, 37), 'database bug') def test_scalar_select(self): """test that scalar subqueries with labels get their type propagated to the result set.""" # mysql and/or mysqldb has a bug here, type isn't propagated for scalar # subquery. datetable = Table('datetable', metadata, Column('id', Integer, primary_key=True), Column('today', DateTime)) datetable.create() try: datetable.insert().execute(id=1, today=datetime.datetime(2006, 5, 12, 12, 0, 0)) s = select([datetable.alias('x').c.today]).as_scalar() s2 = select([datetable.c.id, s.label('somelabel')]) #print s2.c.somelabel.type assert isinstance(s2.execute().first()['somelabel'], datetime.datetime) finally: datetable.drop() def test_order_by(self): """Exercises ORDER BY clause generation. Tests simple, compound, aliased and DESC clauses. """ users.insert().execute(user_id=1, user_name='c') users.insert().execute(user_id=2, user_name='b') users.insert().execute(user_id=3, user_name='a') def a_eq(executable, wanted): got = list(executable.execute()) eq_(got, wanted) for labels in False, True: a_eq(users.select(order_by=[users.c.user_id], use_labels=labels), [(1, 'c'), (2, 'b'), (3, 'a')]) a_eq(users.select(order_by=[users.c.user_name, users.c.user_id], use_labels=labels), [(3, 'a'), (2, 'b'), (1, 'c')]) a_eq(select([users.c.user_id.label('foo')], use_labels=labels, order_by=[users.c.user_id]), [(1,), (2,), (3,)]) a_eq(select([users.c.user_id.label('foo'), users.c.user_name], use_labels=labels, order_by=[users.c.user_name, users.c.user_id]), [(3, 'a'), (2, 'b'), (1, 'c')]) a_eq(users.select(distinct=True, use_labels=labels, order_by=[users.c.user_id]), [(1, 'c'), (2, 'b'), (3, 'a')]) a_eq(select([users.c.user_id.label('foo')], distinct=True, use_labels=labels, order_by=[users.c.user_id]), [(1,), (2,), (3,)]) a_eq(select([users.c.user_id.label('a'), users.c.user_id.label('b'), users.c.user_name], use_labels=labels, order_by=[users.c.user_id]), [(1, 1, 'c'), (2, 2, 'b'), (3, 3, 'a')]) a_eq(users.select(distinct=True, use_labels=labels, order_by=[desc(users.c.user_id)]), [(3, 'a'), (2, 'b'), (1, 'c')]) a_eq(select([users.c.user_id.label('foo')], distinct=True, use_labels=labels, order_by=[users.c.user_id.desc()]), [(3,), (2,), (1,)]) @testing.requires.nullsordering def test_order_by_nulls(self): """Exercises ORDER BY clause generation. Tests simple, compound, aliased and DESC clauses. """ users.insert().execute(user_id=1) users.insert().execute(user_id=2, user_name='b') users.insert().execute(user_id=3, user_name='a') def a_eq(executable, wanted): got = list(executable.execute()) eq_(got, wanted) for labels in False, True: a_eq(users.select(order_by=[users.c.user_name.nullsfirst()], use_labels=labels), [(1, None), (3, 'a'), (2, 'b')]) a_eq(users.select(order_by=[users.c.user_name.nullslast()], use_labels=labels), [(3, 'a'), (2, 'b'), (1, None)]) a_eq(users.select(order_by=[asc(users.c.user_name).nullsfirst()], use_labels=labels), [(1, None), (3, 'a'), (2, 'b')]) a_eq(users.select(order_by=[asc(users.c.user_name).nullslast()], use_labels=labels), [(3, 'a'), (2, 'b'), (1, None)]) a_eq(users.select(order_by=[users.c.user_name.desc().nullsfirst()], use_labels=labels), [(1, None), (2, 'b'), (3, 'a')]) a_eq(users.select(order_by=[users.c.user_name.desc().nullslast()], use_labels=labels), [(2, 'b'), (3, 'a'), (1, None)]) a_eq(users.select(order_by=[desc(users.c.user_name).nullsfirst()], use_labels=labels), [(1, None), (2, 'b'), (3, 'a')]) a_eq(users.select(order_by=[desc(users.c.user_name).nullslast()], use_labels=labels), [(2, 'b'), (3, 'a'), (1, None)]) a_eq(users.select(order_by=[users.c.user_name.nullsfirst(), users.c.user_id], use_labels=labels), [(1, None), (3, 'a'), (2, 'b')]) a_eq(users.select(order_by=[users.c.user_name.nullslast(), users.c.user_id], use_labels=labels), [(3, 'a'), (2, 'b'), (1, None)]) @testing.fails_on('mssql+pyodbc', "pyodbc result row doesn't support slicing") def test_column_slices(self): users.insert().execute(user_id=1, user_name='john') users.insert().execute(user_id=2, user_name='jack') addresses.insert().execute(address_id=1, user_id=2, address='foo@bar.com') r = text("select * from query_addresses", bind=testing.db).execute().first() self.assert_(r[0:1] == (1,)) self.assert_(r[1:] == (2, 'foo@bar.com')) self.assert_(r[:-1] == (1, 2)) def test_column_accessor_basic_compiled(self): users.insert().execute( dict(user_id=1, user_name='john'), dict(user_id=2, user_name='jack') ) r = users.select(users.c.user_id==2).execute().first() self.assert_(r.user_id == r['user_id'] == r[users.c.user_id] == 2) self.assert_(r.user_name == r['user_name'] == r[users.c.user_name] == 'jack') def test_column_accessor_basic_text(self): users.insert().execute( dict(user_id=1, user_name='john'), dict(user_id=2, user_name='jack') ) r = testing.db.execute( text("select * from query_users where user_id=2") ).first() self.assert_(r.user_id == r['user_id'] == r[users.c.user_id] == 2) self.assert_(r.user_name == r['user_name'] == r[users.c.user_name] == 'jack') def test_column_accessor_textual_select(self): users.insert().execute( dict(user_id=1, user_name='john'), dict(user_id=2, user_name='jack') ) # this will create column() objects inside # the select(), these need to match on name anyway r = testing.db.execute( select(['user_id', 'user_name']).select_from('query_users'). where('user_id=2') ).first() self.assert_(r.user_id == r['user_id'] == r[users.c.user_id] == 2) self.assert_(r.user_name == r['user_name'] == r[users.c.user_name] == 'jack') def test_column_accessor_dotted_union(self): users.insert().execute( dict(user_id=1, user_name='john'), ) # test a little sqlite weirdness - with the UNION, # cols come back as "query_users.user_id" in cursor.description r = testing.db.execute( text("select query_users.user_id, query_users.user_name from query_users " "UNION select query_users.user_id, query_users.user_name from query_users" ) ).first() eq_(r['user_id'], 1) eq_(r['user_name'], "john") eq_(r.keys(), ["user_id", "user_name"]) @testing.only_on("sqlite", "sqlite specific feature") def test_column_accessor_sqlite_raw(self): users.insert().execute( dict(user_id=1, user_name='john'), ) r = text("select query_users.user_id, query_users.user_name from query_users " "UNION select query_users.user_id, query_users.user_name from query_users", bind=testing.db).execution_options(sqlite_raw_colnames=True).execute().first() assert 'user_id' not in r assert 'user_name' not in r eq_(r['query_users.user_id'], 1) eq_(r['query_users.user_name'], "john") eq_(r.keys(), ["query_users.user_id", "query_users.user_name"]) @testing.only_on("sqlite", "sqlite specific feature") def test_column_accessor_sqlite_translated(self): users.insert().execute( dict(user_id=1, user_name='john'), ) r = text("select query_users.user_id, query_users.user_name from query_users " "UNION select query_users.user_id, query_users.user_name from query_users", bind=testing.db).execute().first() eq_(r['user_id'], 1) eq_(r['user_name'], "john") eq_(r['query_users.user_id'], 1) eq_(r['query_users.user_name'], "john") eq_(r.keys(), ["user_id", "user_name"]) def test_column_accessor_labels_w_dots(self): users.insert().execute( dict(user_id=1, user_name='john'), ) # test using literal tablename.colname r = text('select query_users.user_id AS "query_users.user_id", ' 'query_users.user_name AS "query_users.user_name" from query_users', bind=testing.db).execution_options(sqlite_raw_colnames=True).execute().first() eq_(r['query_users.user_id'], 1) eq_(r['query_users.user_name'], "john") assert "user_name" not in r eq_(r.keys(), ["query_users.user_id", "query_users.user_name"]) def test_column_accessor_unary(self): users.insert().execute( dict(user_id=1, user_name='john'), ) # unary experssions r = select([users.c.user_name.distinct()]).order_by(users.c.user_name).execute().first() eq_(r[users.c.user_name], 'john') eq_(r.user_name, 'john') def test_column_accessor_err(self): r = testing.db.execute(select([1])).first() assert_raises_message( AttributeError, "Could not locate column in row for column 'foo'", getattr, r, "foo" ) assert_raises_message( KeyError, "Could not locate column in row for column 'foo'", lambda: r['foo'] ) def test_graceful_fetch_on_non_rows(self): """test that calling fetchone() etc. on a result that doesn't return rows fails gracefully. """ # these proxies don't work with no cursor.description present. # so they don't apply to this test at the moment. # result.FullyBufferedResultProxy, # result.BufferedRowResultProxy, # result.BufferedColumnResultProxy conn = testing.db.connect() for meth in ('fetchone', 'fetchall', 'first', 'scalar', 'fetchmany'): trans = conn.begin() result = conn.execute(users.insert(), user_id=1) assert_raises_message( exc.ResourceClosedError, "This result object does not return rows. " "It has been closed automatically.", getattr(result, meth), ) trans.rollback() @testing.requires.returning def test_no_inserted_pk_on_returning(self): result = testing.db.execute(users.insert().returning(users.c.user_id, users.c.user_name)) assert_raises_message( exc.InvalidRequestError, r"Can't call inserted_primary_key when returning\(\) is used.", getattr, result, 'inserted_primary_key' ) def test_fetchone_til_end(self): result = testing.db.execute("select * from query_users") eq_(result.fetchone(), None) assert_raises_message( exc.ResourceClosedError, "This result object is closed.", result.fetchone ) def test_row_case_sensitive(self): row = testing.db.execute( select([ literal_column("1").label("case_insensitive"), literal_column("2").label("CaseSensitive") ]) ).first() eq_(row.keys(), ["case_insensitive", "CaseSensitive"]) eq_(row["case_insensitive"], 1) eq_(row["CaseSensitive"], 2) assert_raises( KeyError, lambda: row["Case_insensitive"] ) assert_raises( KeyError, lambda: row["casesensitive"] ) def test_row_case_insensitive(self): ins_db = engines.testing_engine(options={"case_sensitive":False}) row = ins_db.execute( select([ literal_column("1").label("case_insensitive"), literal_column("2").label("CaseSensitive") ]) ).first() eq_(row.keys(), ["case_insensitive", "CaseSensitive"]) eq_(row["case_insensitive"], 1) eq_(row["CaseSensitive"], 2) eq_(row["Case_insensitive"],1) eq_(row["casesensitive"],2) def test_row_as_args(self): users.insert().execute(user_id=1, user_name='john') r = users.select(users.c.user_id==1).execute().first() users.delete().execute() users.insert().execute(r) eq_(users.select().execute().fetchall(), [(1, 'john')]) def test_result_as_args(self): users.insert().execute([ dict(user_id=1, user_name='john'), dict(user_id=2, user_name='ed')]) r = users.select().execute() users2.insert().execute(list(r)) eq_( users2.select().order_by(users2.c.user_id).execute().fetchall(), [(1, 'john'), (2, 'ed')] ) users2.delete().execute() r = users.select().execute() users2.insert().execute(*list(r)) eq_( users2.select().order_by(users2.c.user_id).execute().fetchall(), [(1, 'john'), (2, 'ed')] ) def test_ambiguous_column(self): users.insert().execute(user_id=1, user_name='john') result = users.outerjoin(addresses).select().execute() r = result.first() assert_raises_message( exc.InvalidRequestError, "Ambiguous column name", lambda: r['user_id'] ) assert_raises_message( exc.InvalidRequestError, "Ambiguous column name", lambda: r[users.c.user_id] ) assert_raises_message( exc.InvalidRequestError, "Ambiguous column name", lambda: r[addresses.c.user_id] ) # try to trick it - fake_table isn't in the result! # we get the correct error fake_table = Table('fake', MetaData(), Column('user_id', Integer)) assert_raises_message( exc.InvalidRequestError, "Could not locate column in row for column 'fake.user_id'", lambda: r[fake_table.c.user_id] ) r = util.pickle.loads(util.pickle.dumps(r)) assert_raises_message( exc.InvalidRequestError, "Ambiguous column name", lambda: r['user_id'] ) result = users.outerjoin(addresses).select().execute() result = _result.BufferedColumnResultProxy(result.context) r = result.first() assert isinstance(r, _result.BufferedColumnRow) assert_raises_message( exc.InvalidRequestError, "Ambiguous column name", lambda: r['user_id'] ) def test_ambiguous_column_by_col(self): users.insert().execute(user_id=1, user_name='john') ua = users.alias() u2 = users.alias() result = select([users.c.user_id, ua.c.user_id]).execute() row = result.first() assert_raises_message( exc.InvalidRequestError, "Ambiguous column name", lambda: row[users.c.user_id] ) assert_raises_message( exc.InvalidRequestError, "Ambiguous column name", lambda: row[ua.c.user_id] ) # Unfortunately, this fails - # we'd like # "Could not locate column in row" # to be raised here, but the check for # "common column" in _compare_name_for_result() # has other requirements to be more liberal. # Ultimately the # expression system would need a way to determine # if given two columns in a "proxy" relationship, if they # refer to a different parent table assert_raises_message( exc.InvalidRequestError, "Ambiguous column name", lambda: row[u2.c.user_id] ) def test_ambiguous_column_contains(self): # ticket 2702. in 0.7 we'd get True, False. # in 0.8, both columns are present so it's True; # but when they're fetched you'll get the ambiguous error. users.insert().execute(user_id=1, user_name='john') result = select([ users.c.user_id, addresses.c.user_id]).\ select_from(users.outerjoin(addresses)).execute() row = result.first() eq_( set([users.c.user_id in row, addresses.c.user_id in row]), set([True]) ) def test_ambiguous_column_by_col_plus_label(self): users.insert().execute(user_id=1, user_name='john') result = select([users.c.user_id, type_coerce(users.c.user_id, Integer).label('foo')] ).execute() row = result.first() eq_( row[users.c.user_id], 1 ) eq_( row[1], 1 ) @testing.requires.subqueries def test_column_label_targeting(self): users.insert().execute(user_id=7, user_name='ed') for s in ( users.select().alias('foo'), users.select().alias(users.name), ): row = s.select(use_labels=True).execute().first() assert row[s.c.user_id] == 7 assert row[s.c.user_name] == 'ed' def test_keys(self): users.insert().execute(user_id=1, user_name='foo') r = users.select().execute() eq_([x.lower() for x in r.keys()], ['user_id', 'user_name']) r = r.first() eq_([x.lower() for x in r.keys()], ['user_id', 'user_name']) def test_items(self): users.insert().execute(user_id=1, user_name='foo') r = users.select().execute().first() eq_([(x[0].lower(), x[1]) for x in r.items()], [('user_id', 1), ('user_name', 'foo')]) def test_len(self): users.insert().execute(user_id=1, user_name='foo') r = users.select().execute().first() eq_(len(r), 2) r = testing.db.execute('select user_name, user_id from query_users').first() eq_(len(r), 2) r = testing.db.execute('select user_name from query_users').first() eq_(len(r), 1) def test_column_order_with_simple_query(self): # should return values in column definition order users.insert().execute(user_id=1, user_name='foo') r = users.select(users.c.user_id==1).execute().first() eq_(r[0], 1) eq_(r[1], 'foo') eq_([x.lower() for x in r.keys()], ['user_id', 'user_name']) eq_(r.values(), [1, 'foo']) def test_column_order_with_text_query(self): # should return values in query order users.insert().execute(user_id=1, user_name='foo') r = testing.db.execute('select user_name, user_id from query_users').first() eq_(r[0], 'foo') eq_(r[1], 1) eq_([x.lower() for x in r.keys()], ['user_name', 'user_id']) eq_(r.values(), ['foo', 1]) @testing.crashes('oracle', 'FIXME: unknown, varify not fails_on()') @testing.crashes('firebird', 'An identifier must begin with a letter') @testing.crashes('maxdb', 'FIXME: unknown, verify not fails_on()') def test_column_accessor_shadow(self): meta = MetaData(testing.db) shadowed = Table('test_shadowed', meta, Column('shadow_id', INT, primary_key = True), Column('shadow_name', VARCHAR(20)), Column('parent', VARCHAR(20)), Column('row', VARCHAR(40)), Column('_parent', VARCHAR(20)), Column('_row', VARCHAR(20)), ) shadowed.create(checkfirst=True) try: shadowed.insert().execute(shadow_id=1, shadow_name='The Shadow', parent='The Light', row='Without light there is no shadow', _parent='Hidden parent', _row='Hidden row') r = shadowed.select(shadowed.c.shadow_id==1).execute().first() self.assert_(r.shadow_id == r['shadow_id'] == r[shadowed.c.shadow_id] == 1) self.assert_(r.shadow_name == r['shadow_name'] == r[shadowed.c.shadow_name] == 'The Shadow') self.assert_(r.parent == r['parent'] == r[shadowed.c.parent] == 'The Light') self.assert_(r.row == r['row'] == r[shadowed.c.row] == 'Without light there is no shadow') self.assert_(r['_parent'] == 'Hidden parent') self.assert_(r['_row'] == 'Hidden row') try: print r._parent, r._row self.fail('Should not allow access to private attributes') except AttributeError: pass # expected finally: shadowed.drop(checkfirst=True) @testing.emits_warning('.*empty sequence.*') def test_in_filtering(self): """test the behavior of the in_() function.""" users.insert().execute(user_id = 7, user_name = 'jack') users.insert().execute(user_id = 8, user_name = 'fred') users.insert().execute(user_id = 9, user_name = None) s = users.select(users.c.user_name.in_([])) r = s.execute().fetchall() # No username is in empty set assert len(r) == 0 s = users.select(not_(users.c.user_name.in_([]))) r = s.execute().fetchall() # All usernames with a value are outside an empty set assert len(r) == 2 s = users.select(users.c.user_name.in_(['jack','fred'])) r = s.execute().fetchall() assert len(r) == 2 s = users.select(not_(users.c.user_name.in_(['jack','fred']))) r = s.execute().fetchall() # Null values are not outside any set assert len(r) == 0 @testing.emits_warning('.*empty sequence.*') @testing.fails_on('firebird', "uses sql-92 rules") @testing.fails_on('sybase', "uses sql-92 rules") @testing.fails_if(lambda: testing.against('mssql+pyodbc') and not testing.db.dialect.freetds, "uses sql-92 rules") def test_bind_in(self): """test calling IN against a bind parameter. this isn't allowed on several platforms since we generate ? = ?. """ users.insert().execute(user_id = 7, user_name = 'jack') users.insert().execute(user_id = 8, user_name = 'fred') users.insert().execute(user_id = 9, user_name = None) u = bindparam('search_key') s = users.select(not_(u.in_([]))) r = s.execute(search_key='john').fetchall() assert len(r) == 3 r = s.execute(search_key=None).fetchall() assert len(r) == 0 @testing.emits_warning('.*empty sequence.*') def test_literal_in(self): """similar to test_bind_in but use a bind with a value.""" users.insert().execute(user_id = 7, user_name = 'jack') users.insert().execute(user_id = 8, user_name = 'fred') users.insert().execute(user_id = 9, user_name = None) s = users.select(not_(literal("john").in_([]))) r = s.execute().fetchall() assert len(r) == 3 @testing.emits_warning('.*empty sequence.*') @testing.requires.boolean_col_expressions def test_in_filtering_advanced(self): """test the behavior of the in_() function when comparing against an empty collection, specifically that a proper boolean value is generated. """ users.insert().execute(user_id = 7, user_name = 'jack') users.insert().execute(user_id = 8, user_name = 'fred') users.insert().execute(user_id = 9, user_name = None) s = users.select(users.c.user_name.in_([]) == True) r = s.execute().fetchall() assert len(r) == 0 s = users.select(users.c.user_name.in_([]) == False) r = s.execute().fetchall() assert len(r) == 2 s = users.select(users.c.user_name.in_([]) == None) r = s.execute().fetchall() assert len(r) == 1 class RequiredBindTest(fixtures.TablesTest): run_create_tables = None run_deletes = None @classmethod def define_tables(cls, metadata): Table('foo', metadata, Column('id', Integer, primary_key=True), Column('data', String(50)), Column('x', Integer) ) def _assert_raises(self, stmt, params): assert_raises_message( exc.StatementError, "A value is required for bind parameter 'x'", testing.db.execute, stmt, **params) assert_raises_message( exc.StatementError, "A value is required for bind parameter 'x'", testing.db.execute, stmt, params) def test_insert(self): stmt = self.tables.foo.insert().values(x=bindparam('x'), data=bindparam('data')) self._assert_raises( stmt, {'data': 'data'} ) def test_select_where(self): stmt = select([self.tables.foo]).\ where(self.tables.foo.c.data == bindparam('data')).\ where(self.tables.foo.c.x == bindparam('x')) self._assert_raises( stmt, {'data': 'data'} ) @testing.requires.standalone_binds def test_select_columns(self): stmt = select([bindparam('data'), bindparam('x')]) self._assert_raises( stmt, {'data': 'data'} ) def test_text(self): stmt = text("select * from foo where x=:x and data=:data1") self._assert_raises( stmt, {'data1': 'data'} ) def test_required_flag(self): is_(bindparam('foo').required, True) is_(bindparam('foo', required=False).required, False) is_(bindparam('foo', 'bar').required, False) is_(bindparam('foo', 'bar', required=True).required, True) c = lambda: None is_(bindparam('foo', callable_=c, required=True).required, True) is_(bindparam('foo', callable_=c).required, False) is_(bindparam('foo', callable_=c, required=False).required, False) class TableInsertTest(fixtures.TablesTest): """test for consistent insert behavior across dialects regarding the inline=True flag, lower-case 't' tables. """ run_create_tables = 'each' @classmethod def define_tables(cls, metadata): Table('foo', metadata, Column('id', Integer, Sequence('t_id_seq'), primary_key=True), Column('data', String(50)), Column('x', Integer) ) def _fixture(self, types=True): if types: t = sql.table('foo', sql.column('id', Integer), sql.column('data', String), sql.column('x', Integer)) else: t = sql.table('foo', sql.column('id'), sql.column('data'), sql.column('x')) return t def _test(self, stmt, row, returning=None, inserted_primary_key=False): r = testing.db.execute(stmt) if returning: returned = r.first() eq_(returned, returning) elif inserted_primary_key is not False: eq_(r.inserted_primary_key, inserted_primary_key) eq_(testing.db.execute(self.tables.foo.select()).first(), row) def _test_multi(self, stmt, rows, data): testing.db.execute(stmt, rows) eq_( testing.db.execute(self.tables.foo.select(). order_by(self.tables.foo.c.id)).fetchall(), data) @testing.requires.sequences def test_expicit_sequence(self): t = self._fixture() self._test( t.insert().values( id=func.next_value(Sequence('t_id_seq')), data='data', x=5 ), (1, 'data', 5) ) def test_uppercase(self): t = self.tables.foo self._test( t.insert().values( id=1, data='data', x=5 ), (1, 'data', 5), inserted_primary_key=[1] ) def test_uppercase_inline(self): t = self.tables.foo self._test( t.insert(inline=True).values( id=1, data='data', x=5 ), (1, 'data', 5), inserted_primary_key=[1] ) def test_uppercase_inline_implicit(self): t = self.tables.foo self._test( t.insert(inline=True).values( data='data', x=5 ), (1, 'data', 5), inserted_primary_key=[None] ) def test_uppercase_implicit(self): t = self.tables.foo self._test( t.insert().values(data='data', x=5), (1, 'data', 5), inserted_primary_key=[1] ) def test_uppercase_direct_params(self): t = self.tables.foo self._test( t.insert().values(id=1, data='data', x=5), (1, 'data', 5), inserted_primary_key=[1] ) @testing.requires.returning def test_uppercase_direct_params_returning(self): t = self.tables.foo self._test( t.insert().values( id=1, data='data', x=5).returning(t.c.id, t.c.x), (1, 'data', 5), returning=(1, 5) ) @testing.fails_on('mssql', "lowercase table doesn't support identity insert disable") def test_direct_params(self): t = self._fixture() self._test( t.insert().values(id=1, data='data', x=5), (1, 'data', 5), inserted_primary_key=[] ) @testing.fails_on('mssql', "lowercase table doesn't support identity insert disable") @testing.requires.returning def test_direct_params_returning(self): t = self._fixture() self._test( t.insert().values( id=1, data='data', x=5).returning(t.c.id, t.c.x), (1, 'data', 5), returning=(1, 5) ) @testing.requires.emulated_lastrowid def test_implicit_pk(self): t = self._fixture() self._test( t.insert().values( data='data', x=5), (1, 'data', 5), inserted_primary_key=[] ) @testing.requires.emulated_lastrowid def test_implicit_pk_multi_rows(self): t = self._fixture() self._test_multi( t.insert(), [ {'data':'d1', 'x':5}, {'data':'d2', 'x':6}, {'data':'d3', 'x':7}, ], [ (1, 'd1', 5), (2, 'd2', 6), (3, 'd3', 7) ], ) @testing.requires.emulated_lastrowid def test_implicit_pk_inline(self): t = self._fixture() self._test( t.insert(inline=True).values(data='data', x=5), (1, 'data', 5), inserted_primary_key=[] ) class PercentSchemaNamesTest(fixtures.TestBase): """tests using percent signs, spaces in table and column names. Doesn't pass for mysql, postgresql, but this is really a SQLAlchemy bug - we should be escaping out %% signs for this operation the same way we do for text() and column labels. """ @classmethod def setup_class(cls): global percent_table, metadata, lightweight_percent_table metadata = MetaData(testing.db) percent_table = Table('percent%table', metadata, Column("percent%", Integer), Column("spaces % more spaces", Integer), ) lightweight_percent_table = sql.table('percent%table', sql.column("percent%"), sql.column("spaces % more spaces"), ) metadata.create_all() def teardown(self): percent_table.delete().execute() @classmethod def teardown_class(cls): metadata.drop_all() @testing.skip_if(lambda: testing.against('postgresql'), "psycopg2 2.4 no longer accepts % in bind placeholders") def test_single_roundtrip(self): percent_table.insert().execute( {'percent%':5, 'spaces % more spaces':12}, ) percent_table.insert().execute( {'percent%':7, 'spaces % more spaces':11}, ) percent_table.insert().execute( {'percent%':9, 'spaces % more spaces':10}, ) percent_table.insert().execute( {'percent%':11, 'spaces % more spaces':9}, ) self._assert_table() @testing.skip_if(lambda: testing.against('postgresql'), "psycopg2 2.4 no longer accepts % in bind placeholders") @testing.crashes('mysql+mysqldb', "MySQLdb handles executemany() " "inconsistently vs. execute()") def test_executemany_roundtrip(self): percent_table.insert().execute( {'percent%':5, 'spaces % more spaces':12}, ) percent_table.insert().execute( {'percent%':7, 'spaces % more spaces':11}, {'percent%':9, 'spaces % more spaces':10}, {'percent%':11, 'spaces % more spaces':9}, ) self._assert_table() def _assert_table(self): for table in ( percent_table, percent_table.alias(), lightweight_percent_table, lightweight_percent_table.alias()): eq_( list( testing.db.execute( table.select().order_by(table.c['percent%']) ) ), [ (5, 12), (7, 11), (9, 10), (11, 9) ] ) eq_( list( testing.db.execute( table.select(). where(table.c['spaces % more spaces'].in_([9, 10])). order_by(table.c['percent%']), ) ), [ (9, 10), (11, 9) ] ) row = testing.db.execute(table.select().\ order_by(table.c['percent%'])).first() eq_(row['percent%'], 5) eq_(row['spaces % more spaces'], 12) eq_(row[table.c['percent%']], 5) eq_(row[table.c['spaces % more spaces']], 12) percent_table.update().values( {percent_table.c['spaces % more spaces']:15} ).execute() eq_( list( testing.db.execute( percent_table.\ select().\ order_by(percent_table.c['percent%']) ) ), [ (5, 15), (7, 15), (9, 15), (11, 15) ] ) class KeyTargetingTest(fixtures.TablesTest): run_inserts = 'once' run_deletes = None @classmethod def define_tables(cls, metadata): keyed1 = Table('keyed1', metadata, Column("a", CHAR(2), key="b"), Column("c", CHAR(2), key="q") ) keyed2 = Table('keyed2', metadata, Column("a", CHAR(2)), Column("b", CHAR(2)), ) keyed3 = Table('keyed3', metadata, Column("a", CHAR(2)), Column("d", CHAR(2)), ) keyed4 = Table('keyed4', metadata, Column("b", CHAR(2)), Column("q", CHAR(2)), ) content = Table('content', metadata, Column('t', String(30), key="type"), ) bar = Table('bar', metadata, Column('ctype', String(30), key="content_type") ) if testing.requires.schemas.enabled: wschema = Table('wschema', metadata, Column("a", CHAR(2), key="b"), Column("c", CHAR(2), key="q"), schema="test_schema" ) @classmethod def insert_data(cls): cls.tables.keyed1.insert().execute(dict(b="a1", q="c1")) cls.tables.keyed2.insert().execute(dict(a="a2", b="b2")) cls.tables.keyed3.insert().execute(dict(a="a3", d="d3")) cls.tables.keyed4.insert().execute(dict(b="b4", q="q4")) cls.tables.content.insert().execute(type="t1") if testing.requires.schemas.enabled: cls.tables['test_schema.wschema'].insert().execute(dict(b="a1", q="c1")) @testing.requires.schemas def test_keyed_accessor_wschema(self): keyed1 = self.tables['test_schema.wschema'] row = testing.db.execute(keyed1.select()).first() eq_(row.b, "a1") eq_(row.q, "c1") eq_(row.a, "a1") eq_(row.c, "c1") def test_keyed_accessor_single(self): keyed1 = self.tables.keyed1 row = testing.db.execute(keyed1.select()).first() eq_(row.b, "a1") eq_(row.q, "c1") eq_(row.a, "a1") eq_(row.c, "c1") def test_keyed_accessor_single_labeled(self): keyed1 = self.tables.keyed1 row = testing.db.execute(keyed1.select().apply_labels()).first() eq_(row.keyed1_b, "a1") eq_(row.keyed1_q, "c1") eq_(row.keyed1_a, "a1") eq_(row.keyed1_c, "c1") def test_keyed_accessor_composite_conflict_2(self): keyed1 = self.tables.keyed1 keyed2 = self.tables.keyed2 row = testing.db.execute(select([keyed1, keyed2])).first() # row.b is unambiguous eq_(row.b, "b2") # row.a is ambiguous assert_raises_message( exc.InvalidRequestError, "Ambig", getattr, row, "a" ) def test_keyed_accessor_composite_names_precedent(self): keyed1 = self.tables.keyed1 keyed4 = self.tables.keyed4 row = testing.db.execute(select([keyed1, keyed4])).first() eq_(row.b, "b4") eq_(row.q, "q4") eq_(row.a, "a1") eq_(row.c, "c1") def test_keyed_accessor_composite_keys_precedent(self): keyed1 = self.tables.keyed1 keyed3 = self.tables.keyed3 row = testing.db.execute(select([keyed1, keyed3])).first() eq_(row.q, "c1") assert_raises_message( exc.InvalidRequestError, "Ambiguous column name 'b'", getattr, row, "b" ) assert_raises_message( exc.InvalidRequestError, "Ambiguous column name 'a'", getattr, row, "a" ) eq_(row.d, "d3") def test_keyed_accessor_composite_labeled(self): keyed1 = self.tables.keyed1 keyed2 = self.tables.keyed2 row = testing.db.execute(select([keyed1, keyed2]).apply_labels()).first() eq_(row.keyed1_b, "a1") eq_(row.keyed1_a, "a1") eq_(row.keyed1_q, "c1") eq_(row.keyed1_c, "c1") eq_(row.keyed2_a, "a2") eq_(row.keyed2_b, "b2") assert_raises(KeyError, lambda: row['keyed2_c']) assert_raises(KeyError, lambda: row['keyed2_q']) def test_column_label_overlap_fallback(self): content, bar = self.tables.content, self.tables.bar row = testing.db.execute(select([content.c.type.label("content_type")])).first() assert content.c.type not in row assert bar.c.content_type not in row assert sql.column('content_type') in row row = testing.db.execute(select([func.now().label("content_type")])).first() assert content.c.type not in row assert bar.c.content_type not in row assert sql.column('content_type') in row def test_column_label_overlap_fallback_2(self): content, bar = self.tables.content, self.tables.bar row = testing.db.execute(content.select(use_labels=True)).first() assert content.c.type in row assert bar.c.content_type not in row assert sql.column('content_type') not in row class LimitTest(fixtures.TestBase): @classmethod def setup_class(cls): global users, addresses, metadata metadata = MetaData(testing.db) users = Table('query_users', metadata, Column('user_id', INT, primary_key = True), Column('user_name', VARCHAR(20)), ) addresses = Table('query_addresses', metadata, Column('address_id', Integer, primary_key=True), Column('user_id', Integer, ForeignKey('query_users.user_id')), Column('address', String(30))) metadata.create_all() users.insert().execute(user_id=1, user_name='john') addresses.insert().execute(address_id=1, user_id=1, address='addr1') users.insert().execute(user_id=2, user_name='jack') addresses.insert().execute(address_id=2, user_id=2, address='addr1') users.insert().execute(user_id=3, user_name='ed') addresses.insert().execute(address_id=3, user_id=3, address='addr2') users.insert().execute(user_id=4, user_name='wendy') addresses.insert().execute(address_id=4, user_id=4, address='addr3') users.insert().execute(user_id=5, user_name='laura') addresses.insert().execute(address_id=5, user_id=5, address='addr4') users.insert().execute(user_id=6, user_name='ralph') addresses.insert().execute(address_id=6, user_id=6, address='addr5') users.insert().execute(user_id=7, user_name='fido') addresses.insert().execute(address_id=7, user_id=7, address='addr5') @classmethod def teardown_class(cls): metadata.drop_all() def test_select_limit(self): r = users.select(limit=3, order_by=[users.c.user_id]).execute().fetchall() self.assert_(r == [(1, 'john'), (2, 'jack'), (3, 'ed')], repr(r)) @testing.requires.offset def test_select_limit_offset(self): """Test the interaction between limit and offset""" r = users.select(limit=3, offset=2, order_by=[users.c.user_id]).execute().fetchall() self.assert_(r==[(3, 'ed'), (4, 'wendy'), (5, 'laura')]) r = users.select(offset=5, order_by=[users.c.user_id]).execute().fetchall() self.assert_(r==[(6, 'ralph'), (7, 'fido')]) def test_select_distinct_limit(self): """Test the interaction between limit and distinct""" r = sorted([x[0] for x in select([addresses.c.address]).distinct().limit(3).order_by(addresses.c.address).execute().fetchall()]) self.assert_(len(r) == 3, repr(r)) self.assert_(r[0] != r[1] and r[1] != r[2], repr(r)) @testing.requires.offset @testing.fails_on('mssql', 'FIXME: unknown') def test_select_distinct_offset(self): """Test the interaction between distinct and offset""" r = sorted([x[0] for x in select([addresses.c.address]).distinct().offset(1).order_by(addresses.c.address).execute().fetchall()]) self.assert_(len(r) == 4, repr(r)) self.assert_(r[0] != r[1] and r[1] != r[2] and r[2] != [3], repr(r)) @testing.requires.offset def test_select_distinct_limit_offset(self): """Test the interaction between limit and limit/offset""" r = select([addresses.c.address]).order_by(addresses.c.address).distinct().offset(2).limit(3).execute().fetchall() self.assert_(len(r) == 3, repr(r)) self.assert_(r[0] != r[1] and r[1] != r[2], repr(r)) class CompoundTest(fixtures.TestBase): """test compound statements like UNION, INTERSECT, particularly their ability to nest on different databases.""" @classmethod def setup_class(cls): global metadata, t1, t2, t3 metadata = MetaData(testing.db) t1 = Table('t1', metadata, Column('col1', Integer, test_needs_autoincrement=True, primary_key=True), Column('col2', String(30)), Column('col3', String(40)), Column('col4', String(30)) ) t2 = Table('t2', metadata, Column('col1', Integer, test_needs_autoincrement=True, primary_key=True), Column('col2', String(30)), Column('col3', String(40)), Column('col4', String(30))) t3 = Table('t3', metadata, Column('col1', Integer, test_needs_autoincrement=True, primary_key=True), Column('col2', String(30)), Column('col3', String(40)), Column('col4', String(30))) metadata.create_all() t1.insert().execute([ dict(col2="t1col2r1", col3="aaa", col4="aaa"), dict(col2="t1col2r2", col3="bbb", col4="bbb"), dict(col2="t1col2r3", col3="ccc", col4="ccc"), ]) t2.insert().execute([ dict(col2="t2col2r1", col3="aaa", col4="bbb"), dict(col2="t2col2r2", col3="bbb", col4="ccc"), dict(col2="t2col2r3", col3="ccc", col4="aaa"), ]) t3.insert().execute([ dict(col2="t3col2r1", col3="aaa", col4="ccc"), dict(col2="t3col2r2", col3="bbb", col4="aaa"), dict(col2="t3col2r3", col3="ccc", col4="bbb"), ]) @engines.close_first def teardown(self): pass @classmethod def teardown_class(cls): metadata.drop_all() def _fetchall_sorted(self, executed): return sorted([tuple(row) for row in executed.fetchall()]) @testing.requires.subqueries def test_union(self): (s1, s2) = ( select([t1.c.col3.label('col3'), t1.c.col4.label('col4')], t1.c.col2.in_(["t1col2r1", "t1col2r2"])), select([t2.c.col3.label('col3'), t2.c.col4.label('col4')], t2.c.col2.in_(["t2col2r2", "t2col2r3"])) ) u = union(s1, s2) wanted = [('aaa', 'aaa'), ('bbb', 'bbb'), ('bbb', 'ccc'), ('ccc', 'aaa')] found1 = self._fetchall_sorted(u.execute()) eq_(found1, wanted) found2 = self._fetchall_sorted(u.alias('bar').select().execute()) eq_(found2, wanted) @testing.fails_on('firebird', "doesn't like ORDER BY with UNIONs") def test_union_ordered(self): (s1, s2) = ( select([t1.c.col3.label('col3'), t1.c.col4.label('col4')], t1.c.col2.in_(["t1col2r1", "t1col2r2"])), select([t2.c.col3.label('col3'), t2.c.col4.label('col4')], t2.c.col2.in_(["t2col2r2", "t2col2r3"])) ) u = union(s1, s2, order_by=['col3', 'col4']) wanted = [('aaa', 'aaa'), ('bbb', 'bbb'), ('bbb', 'ccc'), ('ccc', 'aaa')] eq_(u.execute().fetchall(), wanted) @testing.fails_on('firebird', "doesn't like ORDER BY with UNIONs") @testing.fails_on('maxdb', 'FIXME: unknown') @testing.requires.subqueries def test_union_ordered_alias(self): (s1, s2) = ( select([t1.c.col3.label('col3'), t1.c.col4.label('col4')], t1.c.col2.in_(["t1col2r1", "t1col2r2"])), select([t2.c.col3.label('col3'), t2.c.col4.label('col4')], t2.c.col2.in_(["t2col2r2", "t2col2r3"])) ) u = union(s1, s2, order_by=['col3', 'col4']) wanted = [('aaa', 'aaa'), ('bbb', 'bbb'), ('bbb', 'ccc'), ('ccc', 'aaa')] eq_(u.alias('bar').select().execute().fetchall(), wanted) @testing.crashes('oracle', 'FIXME: unknown, verify not fails_on') @testing.fails_on('firebird', "has trouble extracting anonymous column from union subquery") @testing.fails_on('mysql', 'FIXME: unknown') @testing.fails_on('sqlite', 'FIXME: unknown') @testing.fails_on('informix', "FIXME: unknown (maybe the second alias isn't allows)") def test_union_all(self): e = union_all( select([t1.c.col3]), union( select([t1.c.col3]), select([t1.c.col3]), ) ) wanted = [('aaa',),('aaa',),('bbb',), ('bbb',), ('ccc',),('ccc',)] found1 = self._fetchall_sorted(e.execute()) eq_(found1, wanted) found2 = self._fetchall_sorted(e.alias('foo').select().execute()) eq_(found2, wanted) def test_union_all_lightweight(self): """like test_union_all, but breaks the sub-union into a subquery with an explicit column reference on the outside, more palatable to a wider variety of engines. """ u = union( select([t1.c.col3]), select([t1.c.col3]), ).alias() e = union_all( select([t1.c.col3]), select([u.c.col3]) ) wanted = [('aaa',),('aaa',),('bbb',), ('bbb',), ('ccc',),('ccc',)] found1 = self._fetchall_sorted(e.execute()) eq_(found1, wanted) found2 = self._fetchall_sorted(e.alias('foo').select().execute()) eq_(found2, wanted) @testing.requires.intersect def test_intersect(self): i = intersect( select([t2.c.col3, t2.c.col4]), select([t2.c.col3, t2.c.col4], t2.c.col4==t3.c.col3) ) wanted = [('aaa', 'bbb'), ('bbb', 'ccc'), ('ccc', 'aaa')] found1 = self._fetchall_sorted(i.execute()) eq_(found1, wanted) found2 = self._fetchall_sorted(i.alias('bar').select().execute()) eq_(found2, wanted) @testing.requires.except_ @testing.fails_on('sqlite', "Can't handle this style of nesting") def test_except_style1(self): e = except_(union( select([t1.c.col3, t1.c.col4]), select([t2.c.col3, t2.c.col4]), select([t3.c.col3, t3.c.col4]), ), select([t2.c.col3, t2.c.col4])) wanted = [('aaa', 'aaa'), ('aaa', 'ccc'), ('bbb', 'aaa'), ('bbb', 'bbb'), ('ccc', 'bbb'), ('ccc', 'ccc')] found = self._fetchall_sorted(e.alias().select().execute()) eq_(found, wanted) @testing.requires.except_ def test_except_style2(self): # same as style1, but add alias().select() to the except_(). # sqlite can handle it now. e = except_(union( select([t1.c.col3, t1.c.col4]), select([t2.c.col3, t2.c.col4]), select([t3.c.col3, t3.c.col4]), ).alias().select(), select([t2.c.col3, t2.c.col4])) wanted = [('aaa', 'aaa'), ('aaa', 'ccc'), ('bbb', 'aaa'), ('bbb', 'bbb'), ('ccc', 'bbb'), ('ccc', 'ccc')] found1 = self._fetchall_sorted(e.execute()) eq_(found1, wanted) found2 = self._fetchall_sorted(e.alias().select().execute()) eq_(found2, wanted) @testing.fails_on('sqlite', "Can't handle this style of nesting") @testing.requires.except_ def test_except_style3(self): # aaa, bbb, ccc - (aaa, bbb, ccc - (ccc)) = ccc e = except_( select([t1.c.col3]), # aaa, bbb, ccc except_( select([t2.c.col3]), # aaa, bbb, ccc select([t3.c.col3], t3.c.col3 == 'ccc'), #ccc ) ) eq_(e.execute().fetchall(), [('ccc',)]) eq_(e.alias('foo').select().execute().fetchall(), [('ccc',)]) @testing.requires.except_ def test_except_style4(self): # aaa, bbb, ccc - (aaa, bbb, ccc - (ccc)) = ccc e = except_( select([t1.c.col3]), # aaa, bbb, ccc except_( select([t2.c.col3]), # aaa, bbb, ccc select([t3.c.col3], t3.c.col3 == 'ccc'), #ccc ).alias().select() ) eq_(e.execute().fetchall(), [('ccc',)]) eq_( e.alias().select().execute().fetchall(), [('ccc',)] ) @testing.requires.intersect @testing.fails_on('sqlite', "sqlite can't handle leading parenthesis") def test_intersect_unions(self): u = intersect( union( select([t1.c.col3, t1.c.col4]), select([t3.c.col3, t3.c.col4]), ), union( select([t2.c.col3, t2.c.col4]), select([t3.c.col3, t3.c.col4]), ).alias().select() ) wanted = [('aaa', 'ccc'), ('bbb', 'aaa'), ('ccc', 'bbb')] found = self._fetchall_sorted(u.execute()) eq_(found, wanted) @testing.requires.intersect def test_intersect_unions_2(self): u = intersect( union( select([t1.c.col3, t1.c.col4]), select([t3.c.col3, t3.c.col4]), ).alias().select(), union( select([t2.c.col3, t2.c.col4]), select([t3.c.col3, t3.c.col4]), ).alias().select() ) wanted = [('aaa', 'ccc'), ('bbb', 'aaa'), ('ccc', 'bbb')] found = self._fetchall_sorted(u.execute()) eq_(found, wanted) @testing.requires.intersect def test_intersect_unions_3(self): u = intersect( select([t2.c.col3, t2.c.col4]), union( select([t1.c.col3, t1.c.col4]), select([t2.c.col3, t2.c.col4]), select([t3.c.col3, t3.c.col4]), ).alias().select() ) wanted = [('aaa', 'bbb'), ('bbb', 'ccc'), ('ccc', 'aaa')] found = self._fetchall_sorted(u.execute()) eq_(found, wanted) @testing.requires.intersect def test_composite_alias(self): ua = intersect( select([t2.c.col3, t2.c.col4]), union( select([t1.c.col3, t1.c.col4]), select([t2.c.col3, t2.c.col4]), select([t3.c.col3, t3.c.col4]), ).alias().select() ).alias() wanted = [('aaa', 'bbb'), ('bbb', 'ccc'), ('ccc', 'aaa')] found = self._fetchall_sorted(ua.select().execute()) eq_(found, wanted) class JoinTest(fixtures.TestBase): """Tests join execution. The compiled SQL emitted by the dialect might be ANSI joins or theta joins ('old oracle style', with (+) for OUTER). This test tries to exercise join syntax and uncover any inconsistencies in `JOIN rhs ON lhs.col=rhs.col` vs `rhs.col=lhs.col`. At least one database seems to be sensitive to this. """ @classmethod def setup_class(cls): global metadata global t1, t2, t3 metadata = MetaData(testing.db) t1 = Table('t1', metadata, Column('t1_id', Integer, primary_key=True), Column('name', String(32))) t2 = Table('t2', metadata, Column('t2_id', Integer, primary_key=True), Column('t1_id', Integer, ForeignKey('t1.t1_id')), Column('name', String(32))) t3 = Table('t3', metadata, Column('t3_id', Integer, primary_key=True), Column('t2_id', Integer, ForeignKey('t2.t2_id')), Column('name', String(32))) metadata.drop_all() metadata.create_all() # t1.10 -> t2.20 -> t3.30 # t1.11 -> t2.21 # t1.12 t1.insert().execute({'t1_id': 10, 'name': 't1 #10'}, {'t1_id': 11, 'name': 't1 #11'}, {'t1_id': 12, 'name': 't1 #12'}) t2.insert().execute({'t2_id': 20, 't1_id': 10, 'name': 't2 #20'}, {'t2_id': 21, 't1_id': 11, 'name': 't2 #21'}) t3.insert().execute({'t3_id': 30, 't2_id': 20, 'name': 't3 #30'}) @classmethod def teardown_class(cls): metadata.drop_all() def assertRows(self, statement, expected): """Execute a statement and assert that rows returned equal expected.""" found = sorted([tuple(row) for row in statement.execute().fetchall()]) eq_(found, sorted(expected)) def test_join_x1(self): """Joins t1->t2.""" for criteria in (t1.c.t1_id==t2.c.t1_id, t2.c.t1_id==t1.c.t1_id): expr = select( [t1.c.t1_id, t2.c.t2_id], from_obj=[t1.join(t2, criteria)]) self.assertRows(expr, [(10, 20), (11, 21)]) def test_join_x2(self): """Joins t1->t2->t3.""" for criteria in (t1.c.t1_id==t2.c.t1_id, t2.c.t1_id==t1.c.t1_id): expr = select( [t1.c.t1_id, t2.c.t2_id], from_obj=[t1.join(t2, criteria)]) self.assertRows(expr, [(10, 20), (11, 21)]) def test_outerjoin_x1(self): """Outer joins t1->t2.""" for criteria in (t2.c.t2_id==t3.c.t2_id, t3.c.t2_id==t2.c.t2_id): expr = select( [t1.c.t1_id, t2.c.t2_id], from_obj=[t1.join(t2).join(t3, criteria)]) self.assertRows(expr, [(10, 20)]) def test_outerjoin_x2(self): """Outer joins t1->t2,t3.""" for criteria in (t2.c.t2_id==t3.c.t2_id, t3.c.t2_id==t2.c.t2_id): expr = select( [t1.c.t1_id, t2.c.t2_id, t3.c.t3_id], from_obj=[t1.outerjoin(t2, t1.c.t1_id==t2.c.t1_id). \ outerjoin(t3, criteria)]) self.assertRows(expr, [(10, 20, 30), (11, 21, None), (12, None, None)]) def test_outerjoin_where_x2_t1(self): """Outer joins t1->t2,t3, where on t1.""" for criteria in (t2.c.t2_id==t3.c.t2_id, t3.c.t2_id==t2.c.t2_id): expr = select( [t1.c.t1_id, t2.c.t2_id, t3.c.t3_id], t1.c.name == 't1 #10', from_obj=[(t1.outerjoin(t2, t1.c.t1_id==t2.c.t1_id). outerjoin(t3, criteria))]) self.assertRows(expr, [(10, 20, 30)]) expr = select( [t1.c.t1_id, t2.c.t2_id, t3.c.t3_id], t1.c.t1_id < 12, from_obj=[(t1.outerjoin(t2, t1.c.t1_id==t2.c.t1_id). outerjoin(t3, criteria))]) self.assertRows(expr, [(10, 20, 30), (11, 21, None)]) def test_outerjoin_where_x2_t2(self): """Outer joins t1->t2,t3, where on t2.""" for criteria in (t2.c.t2_id==t3.c.t2_id, t3.c.t2_id==t2.c.t2_id): expr = select( [t1.c.t1_id, t2.c.t2_id, t3.c.t3_id], t2.c.name == 't2 #20', from_obj=[(t1.outerjoin(t2, t1.c.t1_id==t2.c.t1_id). outerjoin(t3, criteria))]) self.assertRows(expr, [(10, 20, 30)]) expr = select( [t1.c.t1_id, t2.c.t2_id, t3.c.t3_id], t2.c.t2_id < 29, from_obj=[(t1.outerjoin(t2, t1.c.t1_id==t2.c.t1_id). outerjoin(t3, criteria))]) self.assertRows(expr, [(10, 20, 30), (11, 21, None)]) def test_outerjoin_where_x2_t1t2(self): """Outer joins t1->t2,t3, where on t1 and t2.""" for criteria in (t2.c.t2_id==t3.c.t2_id, t3.c.t2_id==t2.c.t2_id): expr = select( [t1.c.t1_id, t2.c.t2_id, t3.c.t3_id], and_(t1.c.name == 't1 #10', t2.c.name == 't2 #20'), from_obj=[(t1.outerjoin(t2, t1.c.t1_id==t2.c.t1_id). outerjoin(t3, criteria))]) self.assertRows(expr, [(10, 20, 30)]) expr = select( [t1.c.t1_id, t2.c.t2_id, t3.c.t3_id], and_(t1.c.t1_id < 19, 29 > t2.c.t2_id), from_obj=[(t1.outerjoin(t2, t1.c.t1_id==t2.c.t1_id). outerjoin(t3, criteria))]) self.assertRows(expr, [(10, 20, 30), (11, 21, None)]) def test_outerjoin_where_x2_t3(self): """Outer joins t1->t2,t3, where on t3.""" for criteria in (t2.c.t2_id==t3.c.t2_id, t3.c.t2_id==t2.c.t2_id): expr = select( [t1.c.t1_id, t2.c.t2_id, t3.c.t3_id], t3.c.name == 't3 #30', from_obj=[(t1.outerjoin(t2, t1.c.t1_id==t2.c.t1_id). outerjoin(t3, criteria))]) self.assertRows(expr, [(10, 20, 30)]) expr = select( [t1.c.t1_id, t2.c.t2_id, t3.c.t3_id], t3.c.t3_id < 39, from_obj=[(t1.outerjoin(t2, t1.c.t1_id==t2.c.t1_id). outerjoin(t3, criteria))]) self.assertRows(expr, [(10, 20, 30)]) def test_outerjoin_where_x2_t1t3(self): """Outer joins t1->t2,t3, where on t1 and t3.""" for criteria in (t2.c.t2_id==t3.c.t2_id, t3.c.t2_id==t2.c.t2_id): expr = select( [t1.c.t1_id, t2.c.t2_id, t3.c.t3_id], and_(t1.c.name == 't1 #10', t3.c.name == 't3 #30'), from_obj=[(t1.outerjoin(t2, t1.c.t1_id==t2.c.t1_id). outerjoin(t3, criteria))]) self.assertRows(expr, [(10, 20, 30)]) expr = select( [t1.c.t1_id, t2.c.t2_id, t3.c.t3_id], and_(t1.c.t1_id < 19, t3.c.t3_id < 39), from_obj=[(t1.outerjoin(t2, t1.c.t1_id==t2.c.t1_id). outerjoin(t3, criteria))]) self.assertRows(expr, [(10, 20, 30)]) def test_outerjoin_where_x2_t1t2(self): """Outer joins t1->t2,t3, where on t1 and t2.""" for criteria in (t2.c.t2_id==t3.c.t2_id, t3.c.t2_id==t2.c.t2_id): expr = select( [t1.c.t1_id, t2.c.t2_id, t3.c.t3_id], and_(t1.c.name == 't1 #10', t2.c.name == 't2 #20'), from_obj=[(t1.outerjoin(t2, t1.c.t1_id==t2.c.t1_id). outerjoin(t3, criteria))]) self.assertRows(expr, [(10, 20, 30)]) expr = select( [t1.c.t1_id, t2.c.t2_id, t3.c.t3_id], and_(t1.c.t1_id < 12, t2.c.t2_id < 39), from_obj=[(t1.outerjoin(t2, t1.c.t1_id==t2.c.t1_id). outerjoin(t3, criteria))]) self.assertRows(expr, [(10, 20, 30), (11, 21, None)]) def test_outerjoin_where_x2_t1t2t3(self): """Outer joins t1->t2,t3, where on t1, t2 and t3.""" for criteria in (t2.c.t2_id==t3.c.t2_id, t3.c.t2_id==t2.c.t2_id): expr = select( [t1.c.t1_id, t2.c.t2_id, t3.c.t3_id], and_(t1.c.name == 't1 #10', t2.c.name == 't2 #20', t3.c.name == 't3 #30'), from_obj=[(t1.outerjoin(t2, t1.c.t1_id==t2.c.t1_id). outerjoin(t3, criteria))]) self.assertRows(expr, [(10, 20, 30)]) expr = select( [t1.c.t1_id, t2.c.t2_id, t3.c.t3_id], and_(t1.c.t1_id < 19, t2.c.t2_id < 29, t3.c.t3_id < 39), from_obj=[(t1.outerjoin(t2, t1.c.t1_id==t2.c.t1_id). outerjoin(t3, criteria))]) self.assertRows(expr, [(10, 20, 30)]) def test_mixed(self): """Joins t1->t2, outer t2->t3.""" for criteria in (t2.c.t2_id==t3.c.t2_id, t3.c.t2_id==t2.c.t2_id): expr = select( [t1.c.t1_id, t2.c.t2_id, t3.c.t3_id], from_obj=[(t1.join(t2).outerjoin(t3, criteria))]) print expr self.assertRows(expr, [(10, 20, 30), (11, 21, None)]) def test_mixed_where(self): """Joins t1->t2, outer t2->t3, plus a where on each table in turn.""" for criteria in (t2.c.t2_id==t3.c.t2_id, t3.c.t2_id==t2.c.t2_id): expr = select( [t1.c.t1_id, t2.c.t2_id, t3.c.t3_id], t1.c.name == 't1 #10', from_obj=[(t1.join(t2).outerjoin(t3, criteria))]) self.assertRows(expr, [(10, 20, 30)]) expr = select( [t1.c.t1_id, t2.c.t2_id, t3.c.t3_id], t2.c.name == 't2 #20', from_obj=[(t1.join(t2).outerjoin(t3, criteria))]) self.assertRows(expr, [(10, 20, 30)]) expr = select( [t1.c.t1_id, t2.c.t2_id, t3.c.t3_id], t3.c.name == 't3 #30', from_obj=[(t1.join(t2).outerjoin(t3, criteria))]) self.assertRows(expr, [(10, 20, 30)]) expr = select( [t1.c.t1_id, t2.c.t2_id, t3.c.t3_id], and_(t1.c.name == 't1 #10', t2.c.name == 't2 #20'), from_obj=[(t1.join(t2).outerjoin(t3, criteria))]) self.assertRows(expr, [(10, 20, 30)]) expr = select( [t1.c.t1_id, t2.c.t2_id, t3.c.t3_id], and_(t2.c.name == 't2 #20', t3.c.name == 't3 #30'), from_obj=[(t1.join(t2).outerjoin(t3, criteria))]) self.assertRows(expr, [(10, 20, 30)]) expr = select( [t1.c.t1_id, t2.c.t2_id, t3.c.t3_id], and_(t1.c.name == 't1 #10', t2.c.name == 't2 #20', t3.c.name == 't3 #30'), from_obj=[(t1.join(t2).outerjoin(t3, criteria))]) self.assertRows(expr, [(10, 20, 30)]) class OperatorTest(fixtures.TestBase): @classmethod def setup_class(cls): global metadata, flds metadata = MetaData(testing.db) flds = Table('flds', metadata, Column('idcol', Integer, primary_key=True, test_needs_autoincrement=True), Column('intcol', Integer), Column('strcol', String(50)), ) metadata.create_all() flds.insert().execute([ dict(intcol=5, strcol='foo'), dict(intcol=13, strcol='bar') ]) @classmethod def teardown_class(cls): metadata.drop_all() # TODO: seems like more tests warranted for this setup. def test_modulo(self): eq_( select([flds.c.intcol % 3], order_by=flds.c.idcol).execute().fetchall(), [(2,), (1,)] ) @testing.requires.window_functions def test_over(self): eq_( select([ flds.c.intcol, func.row_number().over(order_by=flds.c.strcol) ]).execute().fetchall(), [(13, 1L), (5, 2L)] ) SQLAlchemy-0.8.4/test/sql/test_quote.py0000644000076500000240000005200212251150016020545 0ustar classicstaff00000000000000from sqlalchemy import * from sqlalchemy import sql, schema from sqlalchemy.sql import compiler from sqlalchemy.testing import fixtures, AssertsCompiledSQL from sqlalchemy import testing class QuoteTest(fixtures.TestBase, AssertsCompiledSQL): __dialect__ = 'default' @classmethod def setup_class(cls): # TODO: figure out which databases/which identifiers allow special # characters to be used, such as: spaces, quote characters, # punctuation characters, set up tests for those as well. global table1, table2 metadata = MetaData(testing.db) table1 = Table('WorstCase1', metadata, Column('lowercase', Integer, primary_key=True), Column('UPPERCASE', Integer), Column('MixedCase', Integer), Column('ASC', Integer, key='a123')) table2 = Table('WorstCase2', metadata, Column('desc', Integer, primary_key=True, key='d123'), Column('Union', Integer, key='u123'), Column('MixedCase', Integer)) table1.create() table2.create() def teardown(self): table1.delete().execute() table2.delete().execute() @classmethod def teardown_class(cls): table1.drop() table2.drop() def test_reflect(self): meta2 = MetaData(testing.db) t2 = Table('WorstCase1', meta2, autoload=True, quote=True) assert 'lowercase' in t2.c # indicates the DB returns unquoted names as # UPPERCASE, which we then assume are unquoted and go to # lower case. So we cannot accurately reflect quoted UPPERCASE # names from a "name normalize" backend, as they cannot be # distinguished from case-insensitive/unquoted names. if testing.db.dialect.requires_name_normalize: assert 'uppercase' in t2.c else: assert 'UPPERCASE' in t2.c # ASC OTOH is a reserved word, which is always quoted, so # with that name we keep the quotes on and it stays uppercase # regardless. Seems a little weird, though. assert 'ASC' in t2.c assert 'MixedCase' in t2.c def test_basic(self): table1.insert().execute( {'lowercase': 1, 'UPPERCASE': 2, 'MixedCase': 3, 'a123': 4}, {'lowercase': 2, 'UPPERCASE': 2, 'MixedCase': 3, 'a123': 4}, {'lowercase': 4, 'UPPERCASE': 3, 'MixedCase': 2, 'a123': 1}) table2.insert().execute( {'d123': 1, 'u123': 2, 'MixedCase': 3}, {'d123': 2, 'u123': 2, 'MixedCase': 3}, {'d123': 4, 'u123': 3, 'MixedCase': 2}) columns = [ table1.c.lowercase, table1.c.UPPERCASE, table1.c.MixedCase, table1.c.a123 ] result = select(columns).execute().fetchall() assert(result == [(1, 2, 3, 4), (2, 2, 3, 4), (4, 3, 2, 1)]) columns = [ table2.c.d123, table2.c.u123, table2.c.MixedCase ] result = select(columns).execute().fetchall() assert(result == [(1, 2, 3), (2, 2, 3), (4, 3, 2)]) def test_use_labels(self): table1.insert().execute( {'lowercase': 1, 'UPPERCASE': 2, 'MixedCase': 3, 'a123': 4}, {'lowercase': 2, 'UPPERCASE': 2, 'MixedCase': 3, 'a123': 4}, {'lowercase': 4, 'UPPERCASE': 3, 'MixedCase': 2, 'a123': 1}) table2.insert().execute( {'d123': 1, 'u123': 2, 'MixedCase': 3}, {'d123': 2, 'u123': 2, 'MixedCase': 3}, {'d123': 4, 'u123': 3, 'MixedCase': 2}) columns = [ table1.c.lowercase, table1.c.UPPERCASE, table1.c.MixedCase, table1.c.a123 ] result = select(columns, use_labels=True).execute().fetchall() assert(result == [(1, 2, 3, 4), (2, 2, 3, 4), (4, 3, 2, 1)]) columns = [ table2.c.d123, table2.c.u123, table2.c.MixedCase ] result = select(columns, use_labels=True).execute().fetchall() assert(result == [(1, 2, 3), (2, 2, 3), (4, 3, 2)]) @testing.crashes('oracle', 'FIXME: unknown, verify not fails_on') @testing.requires.subqueries def test_labels(self): """test the quoting of labels. If labels aren't quoted, a query in postgresql in particular will fail since it produces: SELECT LaLa.lowercase, LaLa."UPPERCASE", LaLa."MixedCase", LaLa."ASC" FROM ( SELECT DISTINCT "WorstCase1".lowercase AS lowercase, "WorstCase1"."UPPERCASE" AS UPPERCASE, "WorstCase1"."MixedCase" AS MixedCase, "WorstCase1"."ASC" AS ASC FROM "WorstCase1" ) AS LaLa where the "UPPERCASE" column of "LaLa" doesn't exist. """ x = table1.select(distinct=True).alias('LaLa').select().scalar() self.assert_compile( table1.select(distinct=True).alias('LaLa').select(), 'SELECT ' '"LaLa".lowercase, ' '"LaLa"."UPPERCASE", ' '"LaLa"."MixedCase", ' '"LaLa"."ASC" ' 'FROM (' 'SELECT DISTINCT ' '"WorstCase1".lowercase AS lowercase, ' '"WorstCase1"."UPPERCASE" AS "UPPERCASE", ' '"WorstCase1"."MixedCase" AS "MixedCase", ' '"WorstCase1"."ASC" AS "ASC" ' 'FROM "WorstCase1"' ') AS "LaLa"' ) def test_lower_case_names(self): # Create table with quote defaults metadata = MetaData() t1 = Table('t1', metadata, Column('col1', Integer), schema='foo') # Note that the names are not quoted b/c they are all lower case result = 'CREATE TABLE foo.t1 (col1 INTEGER)' self.assert_compile(schema.CreateTable(t1), result) # Create the same table with quotes set to True now metadata = MetaData() t1 = Table('t1', metadata, Column('col1', Integer, quote=True), schema='foo', quote=True, quote_schema=True) # Note that the names are now quoted result = 'CREATE TABLE "foo"."t1" ("col1" INTEGER)' self.assert_compile(schema.CreateTable(t1), result) def test_upper_case_names(self): # Create table with quote defaults metadata = MetaData() t1 = Table('TABLE1', metadata, Column('COL1', Integer), schema='FOO') # Note that the names are quoted b/c they are not all lower case result = 'CREATE TABLE "FOO"."TABLE1" ("COL1" INTEGER)' self.assert_compile(schema.CreateTable(t1), result) # Create the same table with quotes set to False now metadata = MetaData() t1 = Table('TABLE1', metadata, Column('COL1', Integer, quote=False), schema='FOO', quote=False, quote_schema=False) # Note that the names are now unquoted result = 'CREATE TABLE FOO.TABLE1 (COL1 INTEGER)' self.assert_compile(schema.CreateTable(t1), result) def test_mixed_case_names(self): # Create table with quote defaults metadata = MetaData() t1 = Table('Table1', metadata, Column('Col1', Integer), schema='Foo') # Note that the names are quoted b/c they are not all lower case result = 'CREATE TABLE "Foo"."Table1" ("Col1" INTEGER)' self.assert_compile(schema.CreateTable(t1), result) # Create the same table with quotes set to False now metadata = MetaData() t1 = Table('Table1', metadata, Column('Col1', Integer, quote=False), schema='Foo', quote=False, quote_schema=False) # Note that the names are now unquoted result = 'CREATE TABLE Foo.Table1 (Col1 INTEGER)' self.assert_compile(schema.CreateTable(t1), result) def test_numeric_initial_char(self): # Create table with quote defaults metadata = MetaData() t1 = Table('35table', metadata, Column('25column', Integer), schema='45schema') # Note that the names are quoted b/c the initial # character is in ['$','0', '1' ... '9'] result = 'CREATE TABLE "45schema"."35table" ("25column" INTEGER)' self.assert_compile(schema.CreateTable(t1), result) # Create the same table with quotes set to False now metadata = MetaData() t1 = Table('35table', metadata, Column('25column', Integer, quote=False), schema='45schema', quote=False, quote_schema=False) # Note that the names are now unquoted result = 'CREATE TABLE 45schema.35table (25column INTEGER)' self.assert_compile(schema.CreateTable(t1), result) def test_illegal_initial_char(self): # Create table with quote defaults metadata = MetaData() t1 = Table('$table', metadata, Column('$column', Integer), schema='$schema') # Note that the names are quoted b/c the initial # character is in ['$','0', '1' ... '9'] result = 'CREATE TABLE "$schema"."$table" ("$column" INTEGER)' self.assert_compile(schema.CreateTable(t1), result) # Create the same table with quotes set to False now metadata = MetaData() t1 = Table('$table', metadata, Column('$column', Integer, quote=False), schema='$schema', quote=False, quote_schema=False) # Note that the names are now unquoted result = 'CREATE TABLE $schema.$table ($column INTEGER)' self.assert_compile(schema.CreateTable(t1), result) def test_reserved_words(self): # Create table with quote defaults metadata = MetaData() table = Table('foreign', metadata, Column('col1', Integer), Column('from', Integer), Column('order', Integer), schema='create') # Note that the names are quoted b/c they are reserved words x = select([table.c.col1, table.c['from'], table.c.order]) self.assert_compile(x, 'SELECT ' '"create"."foreign".col1, ' '"create"."foreign"."from", ' '"create"."foreign"."order" ' 'FROM "create"."foreign"' ) # Create the same table with quotes set to False now metadata = MetaData() table = Table('foreign', metadata, Column('col1', Integer), Column('from', Integer, quote=False), Column('order', Integer, quote=False), schema='create', quote=False, quote_schema=False) # Note that the names are now unquoted x = select([table.c.col1, table.c['from'], table.c.order]) self.assert_compile(x, 'SELECT ' 'create.foreign.col1, ' 'create.foreign.from, ' 'create.foreign.order ' 'FROM create.foreign' ) def test_subquery(self): # Lower case names, should not quote metadata = MetaData() t1 = Table('t1', metadata, Column('col1', Integer), schema='foo') a = t1.select().alias('anon') b = select([1], a.c.col1 == 2, from_obj=a) self.assert_compile(b, 'SELECT 1 ' 'FROM (' 'SELECT ' 'foo.t1.col1 AS col1 ' 'FROM ' 'foo.t1' ') AS anon ' 'WHERE anon.col1 = :col1_1' ) # Lower case names, quotes on, should quote metadata = MetaData() t1 = Table('t1', metadata, Column('col1', Integer, quote=True), schema='foo', quote=True, quote_schema=True) a = t1.select().alias('anon') b = select([1], a.c.col1 == 2, from_obj=a) self.assert_compile(b, 'SELECT 1 ' 'FROM (' 'SELECT ' '"foo"."t1"."col1" AS "col1" ' 'FROM ' '"foo"."t1"' ') AS anon ' 'WHERE anon."col1" = :col1_1' ) # Not lower case names, should quote metadata = MetaData() t1 = Table('T1', metadata, Column('Col1', Integer), schema='Foo') a = t1.select().alias('Anon') b = select([1], a.c.Col1 == 2, from_obj=a) self.assert_compile(b, 'SELECT 1 ' 'FROM (' 'SELECT ' '"Foo"."T1"."Col1" AS "Col1" ' 'FROM ' '"Foo"."T1"' ') AS "Anon" ' 'WHERE ' '"Anon"."Col1" = :Col1_1' ) # Not lower case names, quotes off, should not quote metadata = MetaData() t1 = Table('T1', metadata, Column('Col1', Integer, quote=False), schema='Foo', quote=False, quote_schema=False) a = t1.select().alias('Anon') b = select([1], a.c.Col1 == 2, from_obj=a) self.assert_compile(b, 'SELECT 1 ' 'FROM (' 'SELECT ' 'Foo.T1.Col1 AS Col1 ' 'FROM ' 'Foo.T1' ') AS "Anon" ' 'WHERE ' '"Anon".Col1 = :Col1_1' ) def test_join(self): # Lower case names, should not quote metadata = MetaData() t1 = Table('t1', metadata, Column('col1', Integer)) t2 = Table('t2', metadata, Column('col1', Integer), Column('t1col1', Integer, ForeignKey('t1.col1'))) self.assert_compile(t2.join(t1).select(), 'SELECT ' 't2.col1, t2.t1col1, t1.col1 ' 'FROM ' 't2 ' 'JOIN ' 't1 ON t1.col1 = t2.t1col1' ) # Lower case names, quotes on, should quote metadata = MetaData() t1 = Table('t1', metadata, Column('col1', Integer, quote=True), quote=True) t2 = Table('t2', metadata, Column('col1', Integer, quote=True), Column('t1col1', Integer, ForeignKey('t1.col1'), quote=True), quote=True) self.assert_compile(t2.join(t1).select(), 'SELECT ' '"t2"."col1", "t2"."t1col1", "t1"."col1" ' 'FROM ' '"t2" ' 'JOIN ' '"t1" ON "t1"."col1" = "t2"."t1col1"' ) # Not lower case names, should quote metadata = MetaData() t1 = Table('T1', metadata, Column('Col1', Integer)) t2 = Table('T2', metadata, Column('Col1', Integer), Column('T1Col1', Integer, ForeignKey('T1.Col1'))) self.assert_compile(t2.join(t1).select(), 'SELECT ' '"T2"."Col1", "T2"."T1Col1", "T1"."Col1" ' 'FROM ' '"T2" ' 'JOIN ' '"T1" ON "T1"."Col1" = "T2"."T1Col1"' ) # Not lower case names, quotes off, should not quote metadata = MetaData() t1 = Table('T1', metadata, Column('Col1', Integer, quote=False), quote=False) t2 = Table('T2', metadata, Column('Col1', Integer, quote=False), Column('T1Col1', Integer, ForeignKey('T1.Col1'), quote=False), quote=False) self.assert_compile(t2.join(t1).select(), 'SELECT ' 'T2.Col1, T2.T1Col1, T1.Col1 ' 'FROM ' 'T2 ' 'JOIN ' 'T1 ON T1.Col1 = T2.T1Col1' ) def test_label_and_alias(self): # Lower case names, should not quote metadata = MetaData() table = Table('t1', metadata, Column('col1', Integer)) x = select([table.c.col1.label('label1')]).alias('alias1') self.assert_compile(select([x.c.label1]), 'SELECT ' 'alias1.label1 ' 'FROM (' 'SELECT ' 't1.col1 AS label1 ' 'FROM t1' ') AS alias1' ) # Not lower case names, should quote metadata = MetaData() table = Table('T1', metadata, Column('Col1', Integer)) x = select([table.c.Col1.label('Label1')]).alias('Alias1') self.assert_compile(select([x.c.Label1]), 'SELECT ' '"Alias1"."Label1" ' 'FROM (' 'SELECT ' '"T1"."Col1" AS "Label1" ' 'FROM "T1"' ') AS "Alias1"' ) def test_literal_column_already_with_quotes(self): # Lower case names metadata = MetaData() table = Table('t1', metadata, Column('col1', Integer)) # Note that 'col1' is already quoted (literal_column) columns = [sql.literal_column("'col1'").label('label1')] x = select(columns, from_obj=[table]).alias('alias1') x = x.select() self.assert_compile(x, 'SELECT ' 'alias1.label1 ' 'FROM (' 'SELECT ' '\'col1\' AS label1 ' 'FROM t1' ') AS alias1' ) # Not lower case names metadata = MetaData() table = Table('T1', metadata, Column('Col1', Integer)) # Note that 'Col1' is already quoted (literal_column) columns = [sql.literal_column("'Col1'").label('Label1')] x = select(columns, from_obj=[table]).alias('Alias1') x = x.select() self.assert_compile(x, 'SELECT ' '"Alias1"."Label1" ' 'FROM (' 'SELECT ' '\'Col1\' AS "Label1" ' 'FROM "T1"' ') AS "Alias1"' ) def test_apply_labels(self): # Not lower case names, should quote metadata = MetaData() t1 = Table('T1', metadata, Column('Col1', Integer), schema='Foo') self.assert_compile(t1.select().apply_labels(), 'SELECT ' '"Foo"."T1"."Col1" AS "Foo_T1_Col1" ' 'FROM ' '"Foo"."T1"' ) # Not lower case names, quotes off metadata = MetaData() t1 = Table('T1', metadata, Column('Col1', Integer, quote=False), schema='Foo', quote=False, quote_schema=False) # TODO: is this what we really want here ? # what if table/schema *are* quoted? self.assert_compile(t1.select().apply_labels(), 'SELECT ' 'Foo.T1.Col1 AS Foo_T1_Col1 ' 'FROM ' 'Foo.T1' ) def test_quote_flag_propagate_check_constraint(self): m = MetaData() t = Table('t', m, Column('x', Integer, quote=True)) CheckConstraint(t.c.x > 5) self.assert_compile( schema.CreateTable(t), "CREATE TABLE t (" '"x" INTEGER, ' 'CHECK ("x" > 5)' ")" ) def test_quote_flag_propagate_index(self): m = MetaData() t = Table('t', m, Column('x', Integer, quote=True)) idx = Index("foo", t.c.x) self.assert_compile( schema.CreateIndex(idx), 'CREATE INDEX foo ON t ("x")' ) class PreparerTest(fixtures.TestBase): """Test the db-agnostic quoting services of IdentifierPreparer.""" def test_unformat(self): prep = compiler.IdentifierPreparer(None) unformat = prep.unformat_identifiers def a_eq(have, want): if have != want: print "Wanted %s" % want print "Received %s" % have self.assert_(have == want) a_eq(unformat('foo'), ['foo']) a_eq(unformat('"foo"'), ['foo']) a_eq(unformat("'foo'"), ["'foo'"]) a_eq(unformat('foo.bar'), ['foo', 'bar']) a_eq(unformat('"foo"."bar"'), ['foo', 'bar']) a_eq(unformat('foo."bar"'), ['foo', 'bar']) a_eq(unformat('"foo".bar'), ['foo', 'bar']) a_eq(unformat('"foo"."b""a""r"."baz"'), ['foo', 'b"a"r', 'baz']) def test_unformat_custom(self): class Custom(compiler.IdentifierPreparer): def __init__(self, dialect): super(Custom, self).__init__( dialect, initial_quote='`', final_quote='`') def _escape_identifier(self, value): return value.replace('`', '``') def _unescape_identifier(self, value): return value.replace('``', '`') prep = Custom(None) unformat = prep.unformat_identifiers def a_eq(have, want): if have != want: print "Wanted %s" % want print "Received %s" % have self.assert_(have == want) a_eq(unformat('foo'), ['foo']) a_eq(unformat('`foo`'), ['foo']) a_eq(unformat(`'foo'`), ["'foo'"]) a_eq(unformat('foo.bar'), ['foo', 'bar']) a_eq(unformat('`foo`.`bar`'), ['foo', 'bar']) a_eq(unformat('foo.`bar`'), ['foo', 'bar']) a_eq(unformat('`foo`.bar'), ['foo', 'bar']) a_eq(unformat('`foo`.`b``a``r`.`baz`'), ['foo', 'b`a`r', 'baz']) SQLAlchemy-0.8.4/test/sql/test_returning.py0000644000076500000240000001756412251150016021443 0ustar classicstaff00000000000000from sqlalchemy.testing import eq_ from sqlalchemy import * from sqlalchemy import testing from sqlalchemy.testing.schema import Table, Column from sqlalchemy.types import TypeDecorator from sqlalchemy.testing import fixtures, AssertsExecutionResults, engines, \ assert_raises_message from sqlalchemy import exc as sa_exc class ReturningTest(fixtures.TestBase, AssertsExecutionResults): __requires__ = 'returning', def setup(self): meta = MetaData(testing.db) global table, GoofyType class GoofyType(TypeDecorator): impl = String def process_bind_param(self, value, dialect): if value is None: return None return "FOO" + value def process_result_value(self, value, dialect): if value is None: return None return value + "BAR" table = Table('tables', meta, Column('id', Integer, primary_key=True, test_needs_autoincrement=True), Column('persons', Integer), Column('full', Boolean), Column('goofy', GoofyType(50)) ) table.create(checkfirst=True) def teardown(self): table.drop() def test_column_targeting(self): result = table.insert().returning(table.c.id, table.c.full).execute({'persons': 1, 'full': False}) row = result.first() assert row[table.c.id] == row['id'] == 1 assert row[table.c.full] == row['full'] == False result = table.insert().values(persons=5, full=True, goofy="somegoofy").\ returning(table.c.persons, table.c.full, table.c.goofy).execute() row = result.first() assert row[table.c.persons] == row['persons'] == 5 assert row[table.c.full] == row['full'] == True eq_(row[table.c.goofy], row['goofy']) eq_(row['goofy'], "FOOsomegoofyBAR") @testing.fails_on('firebird', "fb can't handle returning x AS y") def test_labeling(self): result = table.insert().values(persons=6).\ returning(table.c.persons.label('lala')).execute() row = result.first() assert row['lala'] == 6 @testing.fails_on('firebird', "fb/kintersbasdb can't handle the bind params") @testing.fails_on('oracle+zxjdbc', "JDBC driver bug") def test_anon_expressions(self): result = table.insert().values(goofy="someOTHERgoofy").\ returning(func.lower(table.c.goofy, type_=GoofyType)).execute() row = result.first() eq_(row[0], "foosomeothergoofyBAR") result = table.insert().values(persons=12).\ returning(table.c.persons + 18).execute() row = result.first() eq_(row[0], 30) def test_update_returning(self): table.insert().execute([{'persons': 5, 'full': False}, {'persons': 3, 'full': False}]) result = table.update(table.c.persons > 4, dict(full=True)).returning(table.c.id).execute() eq_(result.fetchall(), [(1,)]) result2 = select([table.c.id, table.c.full]).order_by(table.c.id).execute() eq_(result2.fetchall(), [(1, True), (2, False)]) def test_insert_returning(self): result = table.insert().returning(table.c.id).execute({'persons': 1, 'full': False}) eq_(result.fetchall(), [(1,)]) @testing.requires.multivalues_inserts def test_multirow_returning(self): ins = table.insert().returning(table.c.id, table.c.persons).values( [ {'persons': 1, 'full': False}, {'persons': 2, 'full': True}, {'persons': 3, 'full': False}, ] ) result = testing.db.execute(ins) eq_( result.fetchall(), [(1, 1), (2, 2), (3, 3)] ) def test_no_ipk_on_returning(self): result = testing.db.execute( table.insert().returning(table.c.id), {'persons': 1, 'full': False} ) assert_raises_message( sa_exc.InvalidRequestError, "Can't call inserted_primary_key when returning\(\) is used.", getattr, result, "inserted_primary_key" ) @testing.fails_on_everything_except('postgresql', 'firebird') def test_literal_returning(self): if testing.against("postgresql"): literal_true = "true" else: literal_true = "1" result4 = testing.db.execute('insert into tables (id, persons, "full") ' 'values (5, 10, %s) returning persons' % literal_true) eq_([dict(row) for row in result4], [{'persons': 10}]) def test_delete_returning(self): table.insert().execute([{'persons': 5, 'full': False}, {'persons': 3, 'full': False}]) result = table.delete(table.c.persons > 4).returning(table.c.id).execute() eq_(result.fetchall(), [(1,)]) result2 = select([table.c.id, table.c.full]).order_by(table.c.id).execute() eq_(result2.fetchall(), [(2, False),]) class SequenceReturningTest(fixtures.TestBase): __requires__ = 'returning', 'sequences' def setup(self): meta = MetaData(testing.db) global table, seq seq = Sequence('tid_seq') table = Table('tables', meta, Column('id', Integer, seq, primary_key=True), Column('data', String(50)) ) table.create(checkfirst=True) def teardown(self): table.drop() def test_insert(self): r = table.insert().values(data='hi').returning(table.c.id).execute() assert r.first() == (1, ) assert seq.execute() == 2 class KeyReturningTest(fixtures.TestBase, AssertsExecutionResults): """test returning() works with columns that define 'key'.""" __requires__ = 'returning', def setup(self): meta = MetaData(testing.db) global table table = Table('tables', meta, Column('id', Integer, primary_key=True, key='foo_id', test_needs_autoincrement=True), Column('data', String(20)), ) table.create(checkfirst=True) def teardown(self): table.drop() @testing.exclude('firebird', '<', (2, 0), '2.0+ feature') @testing.exclude('postgresql', '<', (8, 2), '8.2+ feature') def test_insert(self): result = table.insert().returning(table.c.foo_id).execute(data='somedata') row = result.first() assert row[table.c.foo_id] == row['id'] == 1 result = table.select().execute().first() assert row[table.c.foo_id] == row['id'] == 1 class ImplicitReturningFlag(fixtures.TestBase): def test_flag_turned_off(self): e = engines.testing_engine(options={'implicit_returning':False}) assert e.dialect.implicit_returning is False c = e.connect() assert e.dialect.implicit_returning is False def test_flag_turned_on(self): e = engines.testing_engine(options={'implicit_returning':True}) assert e.dialect.implicit_returning is True c = e.connect() assert e.dialect.implicit_returning is True def test_flag_turned_default(self): supports = [False] def go(): supports[0] = True testing.requires.returning(go)() e = engines.testing_engine() # starts as False. This is because all of Firebird, # Postgresql, Oracle, SQL Server started supporting RETURNING # as of a certain version, and the flag is not set until # version detection occurs. If some DB comes along that has # RETURNING in all cases, this test can be adjusted. assert e.dialect.implicit_returning is False # version detection on connect sets it c = e.connect() assert e.dialect.implicit_returning is supports[0] SQLAlchemy-0.8.4/test/sql/test_rowcount.py0000644000076500000240000000443512251150016021277 0ustar classicstaff00000000000000from sqlalchemy import * from sqlalchemy.testing import fixtures, AssertsExecutionResults from sqlalchemy import testing class FoundRowsTest(fixtures.TestBase, AssertsExecutionResults): """tests rowcount functionality""" __requires__ = ('sane_rowcount', ) @classmethod def setup_class(cls): global employees_table, metadata metadata = MetaData(testing.db) employees_table = Table('employees', metadata, Column('employee_id', Integer, Sequence('employee_id_seq', optional=True), primary_key=True), Column('name', String(50)), Column('department', String(1)), ) metadata.create_all() def setup(self): global data data = [ ('Angela', 'A'), ('Andrew', 'A'), ('Anand', 'A'), ('Bob', 'B'), ('Bobette', 'B'), ('Buffy', 'B'), ('Charlie', 'C'), ('Cynthia', 'C'), ('Chris', 'C') ] i = employees_table.insert() i.execute(*[{'name':n, 'department':d} for n, d in data]) def teardown(self): employees_table.delete().execute() @classmethod def teardown_class(cls): metadata.drop_all() def testbasic(self): s = employees_table.select() r = s.execute().fetchall() assert len(r) == len(data) def test_update_rowcount1(self): # WHERE matches 3, 3 rows changed department = employees_table.c.department r = employees_table.update(department=='C').execute(department='Z') print "expecting 3, dialect reports %s" % r.rowcount assert r.rowcount == 3 def test_update_rowcount2(self): # WHERE matches 3, 0 rows changed department = employees_table.c.department r = employees_table.update(department=='C').execute(department='C') print "expecting 3, dialect reports %s" % r.rowcount assert r.rowcount == 3 def test_delete_rowcount(self): # WHERE matches 3, 3 rows deleted department = employees_table.c.department r = employees_table.delete(department=='C').execute() print "expecting 3, dialect reports %s" % r.rowcount assert r.rowcount == 3 SQLAlchemy-0.8.4/test/sql/test_selectable.py0000644000076500000240000017666112251150016021535 0ustar classicstaff00000000000000"""Test various algorithmic properties of selectables.""" from sqlalchemy.testing import eq_, assert_raises, \ assert_raises_message, is_ from sqlalchemy import * from sqlalchemy.testing import fixtures, AssertsCompiledSQL, \ AssertsExecutionResults from sqlalchemy import testing from sqlalchemy.sql import util as sql_util, visitors, expression from sqlalchemy import exc from sqlalchemy.sql import table, column, null from sqlalchemy import util from sqlalchemy.schema import Column, Table, MetaData metadata = MetaData() table1 = Table('table1', metadata, Column('col1', Integer, primary_key=True), Column('col2', String(20)), Column('col3', Integer), Column('colx', Integer), ) table2 = Table('table2', metadata, Column('col1', Integer, primary_key=True), Column('col2', Integer, ForeignKey('table1.col1')), Column('col3', String(20)), Column('coly', Integer), ) keyed = Table('keyed', metadata, Column('x', Integer, key='colx'), Column('y', Integer, key='coly'), Column('z', Integer), ) class SelectableTest(fixtures.TestBase, AssertsExecutionResults, AssertsCompiledSQL): __dialect__ = 'default' def test_indirect_correspondence_on_labels(self): # this test depends upon 'distance' to # get the right result # same column three times s = select([table1.c.col1.label('c2'), table1.c.col1, table1.c.col1.label('c1')]) # this tests the same thing as # test_direct_correspondence_on_labels below - # that the presence of label() affects the 'distance' assert s.corresponding_column(table1.c.col1) is s.c.col1 assert s.corresponding_column(s.c.col1) is s.c.col1 assert s.corresponding_column(s.c.c1) is s.c.c1 def test_labeled_subquery_twice(self): scalar_select = select([table1.c.col1]).label('foo') s1 = select([scalar_select]) s2 = select([scalar_select, scalar_select]) eq_( s1.c.foo.proxy_set, set([s1.c.foo, scalar_select, scalar_select.element]) ) eq_( s2.c.foo.proxy_set, set([s2.c.foo, scalar_select, scalar_select.element]) ) assert s1.corresponding_column(scalar_select) is s1.c.foo assert s2.corresponding_column(scalar_select) is s2.c.foo def test_label_grouped_still_corresponds(self): label = select([table1.c.col1]).label('foo') label2 = label.self_group() s1 = select([label]) s2 = select([label2]) assert s1.corresponding_column(label) is s1.c.foo assert s2.corresponding_column(label) is s2.c.foo def test_direct_correspondence_on_labels(self): # this test depends on labels being part # of the proxy set to get the right result l1, l2 = table1.c.col1.label('foo'), table1.c.col1.label('bar') sel = select([l1, l2]) sel2 = sel.alias() assert sel2.corresponding_column(l1) is sel2.c.foo assert sel2.corresponding_column(l2) is sel2.c.bar sel2 = select([table1.c.col1.label('foo'), table1.c.col2.label('bar')]) sel3 = sel.union(sel2).alias() assert sel3.corresponding_column(l1) is sel3.c.foo assert sel3.corresponding_column(l2) is sel3.c.bar def test_keyed_gen(self): s = select([keyed]) eq_(s.c.colx.key, 'colx') eq_(s.c.colx.name, 'x') assert s.corresponding_column(keyed.c.colx) is s.c.colx assert s.corresponding_column(keyed.c.coly) is s.c.coly assert s.corresponding_column(keyed.c.z) is s.c.z sel2 = s.alias() assert sel2.corresponding_column(keyed.c.colx) is sel2.c.colx assert sel2.corresponding_column(keyed.c.coly) is sel2.c.coly assert sel2.corresponding_column(keyed.c.z) is sel2.c.z def test_keyed_label_gen(self): s = select([keyed]).apply_labels() assert s.corresponding_column(keyed.c.colx) is s.c.keyed_colx assert s.corresponding_column(keyed.c.coly) is s.c.keyed_coly assert s.corresponding_column(keyed.c.z) is s.c.keyed_z sel2 = s.alias() assert sel2.corresponding_column(keyed.c.colx) is sel2.c.keyed_colx assert sel2.corresponding_column(keyed.c.coly) is sel2.c.keyed_coly assert sel2.corresponding_column(keyed.c.z) is sel2.c.keyed_z def test_keyed_c_collection_upper(self): c = Column('foo', Integer, key='bar') t = Table('t', MetaData(), c) is_(t.c.bar, c) def test_keyed_c_collection_lower(self): c = column('foo') c.key = 'bar' t = table('t', c) is_(t.c.bar, c) def test_clone_c_proxy_key_upper(self): c = Column('foo', Integer, key='bar') t = Table('t', MetaData(), c) s = select([t])._clone() assert c in s.c.bar.proxy_set def test_clone_c_proxy_key_lower(self): c = column('foo') c.key = 'bar' t = table('t', c) s = select([t])._clone() assert c in s.c.bar.proxy_set def test_no_error_on_unsupported_expr_key(self): from sqlalchemy.dialects.postgresql import ARRAY t = table('t', column('x', ARRAY(Integer))) expr = t.c.x[5] s = select([t, expr]) eq_( s.c.keys(), ['x', expr.anon_label] ) def test_cloned_intersection(self): t1 = table('t1', column('x')) t2 = table('t2', column('x')) s1 = t1.select() s2 = t2.select() s3 = t1.select() s1c1 = s1._clone() s1c2 = s1._clone() s2c1 = s2._clone() s3c1 = s3._clone() eq_( expression._cloned_intersection( [s1c1, s3c1], [s2c1, s1c2] ), set([s1c1]) ) def test_cloned_difference(self): t1 = table('t1', column('x')) t2 = table('t2', column('x')) s1 = t1.select() s2 = t2.select() s3 = t1.select() s1c1 = s1._clone() s1c2 = s1._clone() s2c1 = s2._clone() s2c2 = s2._clone() s3c1 = s3._clone() eq_( expression._cloned_difference( [s1c1, s2c1, s3c1], [s2c1, s1c2] ), set([s3c1]) ) def test_distance_on_aliases(self): a1 = table1.alias('a1') for s in (select([a1, table1], use_labels=True), select([table1, a1], use_labels=True)): assert s.corresponding_column(table1.c.col1) \ is s.c.table1_col1 assert s.corresponding_column(a1.c.col1) is s.c.a1_col1 def test_join_against_self(self): jj = select([table1.c.col1.label('bar_col1')]) jjj = join(table1, jj, table1.c.col1 == jj.c.bar_col1) # test column directly agaisnt itself assert jjj.corresponding_column(jjj.c.table1_col1) \ is jjj.c.table1_col1 assert jjj.corresponding_column(jj.c.bar_col1) is jjj.c.bar_col1 # test alias of the join j2 = jjj.alias('foo') assert j2.corresponding_column(table1.c.col1) \ is j2.c.table1_col1 def test_clone_append_column(self): sel = select([literal_column('1').label('a')]) eq_(sel.c.keys(), ['a']) cloned = visitors.ReplacingCloningVisitor().traverse(sel) cloned.append_column(literal_column('2').label('b')) cloned.append_column(func.foo()) eq_(cloned.c.keys(), ['a', 'b', 'foo()']) def test_append_column_after_replace_selectable(self): basesel = select([literal_column('1').label('a')]) tojoin = select([ literal_column('1').label('a'), literal_column('2').label('b') ]) basefrom = basesel.alias('basefrom') joinfrom = tojoin.alias('joinfrom') sel = select([basefrom.c.a]) replaced = sel.replace_selectable( basefrom, basefrom.join(joinfrom, basefrom.c.a == joinfrom.c.a) ) self.assert_compile( replaced, "SELECT basefrom.a FROM (SELECT 1 AS a) AS basefrom " "JOIN (SELECT 1 AS a, 2 AS b) AS joinfrom " "ON basefrom.a = joinfrom.a" ) replaced.append_column(joinfrom.c.b) self.assert_compile( replaced, "SELECT basefrom.a, joinfrom.b FROM (SELECT 1 AS a) AS basefrom " "JOIN (SELECT 1 AS a, 2 AS b) AS joinfrom " "ON basefrom.a = joinfrom.a" ) def test_against_cloned_non_table(self): # test that corresponding column digs across # clone boundaries with anonymous labeled elements col = func.count().label('foo') sel = select([col]) sel2 = visitors.ReplacingCloningVisitor().traverse(sel) assert sel2.corresponding_column(col) is sel2.c.foo sel3 = visitors.ReplacingCloningVisitor().traverse(sel2) assert sel3.corresponding_column(col) is sel3.c.foo def test_with_only_generative(self): s1 = table1.select().as_scalar() self.assert_compile( s1.with_only_columns([s1]), "SELECT (SELECT table1.col1, table1.col2, " "table1.col3, table1.colx FROM table1) AS anon_1" ) def test_type_coerce_preserve_subq(self): class MyType(TypeDecorator): impl = Integer stmt = select([type_coerce(column('x'), MyType).label('foo')]) stmt2 = stmt.select() assert isinstance(stmt._raw_columns[0].type, MyType) assert isinstance(stmt.c.foo.type, MyType) assert isinstance(stmt2.c.foo.type, MyType) def test_select_on_table(self): sel = select([table1, table2], use_labels=True) assert sel.corresponding_column(table1.c.col1) \ is sel.c.table1_col1 assert sel.corresponding_column(table1.c.col1, require_embedded=True) is sel.c.table1_col1 assert table1.corresponding_column(sel.c.table1_col1) \ is table1.c.col1 assert table1.corresponding_column(sel.c.table1_col1, require_embedded=True) is None def test_join_against_join(self): j = outerjoin(table1, table2, table1.c.col1 == table2.c.col2) jj = select([table1.c.col1.label('bar_col1')], from_obj=[j]).alias('foo') jjj = join(table1, jj, table1.c.col1 == jj.c.bar_col1) assert jjj.corresponding_column(jjj.c.table1_col1) \ is jjj.c.table1_col1 j2 = jjj.alias('foo') assert j2.corresponding_column(jjj.c.table1_col1) \ is j2.c.table1_col1 assert jjj.corresponding_column(jj.c.bar_col1) is jj.c.bar_col1 def test_table_alias(self): a = table1.alias('a') j = join(a, table2) criterion = a.c.col1 == table2.c.col2 self.assert_(criterion.compare(j.onclause)) def test_union(self): # tests that we can correspond a column in a Select statement # with a certain Table, against a column in a Union where one of # its underlying Selects matches to that same Table u = select([table1.c.col1, table1.c.col2, table1.c.col3, table1.c.colx, null().label('coly' )]).union(select([table2.c.col1, table2.c.col2, table2.c.col3, null().label('colx'), table2.c.coly])) s1 = table1.select(use_labels=True) s2 = table2.select(use_labels=True) assert u.corresponding_column(s1.c.table1_col2) is u.c.col2 assert u.corresponding_column(s2.c.table2_col2) is u.c.col2 def test_union_precedence(self): # conflicting column correspondence should be resolved based on # the order of the select()s in the union s1 = select([table1.c.col1, table1.c.col2]) s2 = select([table1.c.col2, table1.c.col1]) s3 = select([table1.c.col3, table1.c.colx]) s4 = select([table1.c.colx, table1.c.col3]) u1 = union(s1, s2) assert u1.corresponding_column(table1.c.col1) is u1.c.col1 assert u1.corresponding_column(table1.c.col2) is u1.c.col2 u1 = union(s1, s2, s3, s4) assert u1.corresponding_column(table1.c.col1) is u1.c.col1 assert u1.corresponding_column(table1.c.col2) is u1.c.col2 assert u1.corresponding_column(table1.c.colx) is u1.c.col2 assert u1.corresponding_column(table1.c.col3) is u1.c.col1 def test_singular_union(self): u = union(select([table1.c.col1, table1.c.col2, table1.c.col3]), select([table1.c.col1, table1.c.col2, table1.c.col3])) u = union(select([table1.c.col1, table1.c.col2, table1.c.col3])) assert u.c.col1 is not None assert u.c.col2 is not None assert u.c.col3 is not None def test_alias_union(self): # same as testunion, except its an alias of the union u = select([table1.c.col1, table1.c.col2, table1.c.col3, table1.c.colx, null().label('coly' )]).union(select([table2.c.col1, table2.c.col2, table2.c.col3, null().label('colx'), table2.c.coly])).alias('analias') s1 = table1.select(use_labels=True) s2 = table2.select(use_labels=True) assert u.corresponding_column(s1.c.table1_col2) is u.c.col2 assert u.corresponding_column(s2.c.table2_col2) is u.c.col2 assert u.corresponding_column(s2.c.table2_coly) is u.c.coly assert s2.corresponding_column(u.c.coly) is s2.c.table2_coly def test_select_union(self): # like testaliasunion, but off a Select off the union. u = select([table1.c.col1, table1.c.col2, table1.c.col3, table1.c.colx, null().label('coly' )]).union(select([table2.c.col1, table2.c.col2, table2.c.col3, null().label('colx'), table2.c.coly])).alias('analias') s = select([u]) s1 = table1.select(use_labels=True) s2 = table2.select(use_labels=True) assert s.corresponding_column(s1.c.table1_col2) is s.c.col2 assert s.corresponding_column(s2.c.table2_col2) is s.c.col2 def test_union_against_join(self): # same as testunion, except its an alias of the union u = select([table1.c.col1, table1.c.col2, table1.c.col3, table1.c.colx, null().label('coly' )]).union(select([table2.c.col1, table2.c.col2, table2.c.col3, null().label('colx'), table2.c.coly])).alias('analias') j1 = table1.join(table2) assert u.corresponding_column(j1.c.table1_colx) is u.c.colx assert j1.corresponding_column(u.c.colx) is j1.c.table1_colx def test_join(self): a = join(table1, table2) print str(a.select(use_labels=True)) b = table2.alias('b') j = join(a, b) print str(j) criterion = a.c.table1_col1 == b.c.col2 self.assert_(criterion.compare(j.onclause)) def test_select_alias(self): a = table1.select().alias('a') j = join(a, table2) criterion = a.c.col1 == table2.c.col2 self.assert_(criterion.compare(j.onclause)) def test_select_labels(self): a = table1.select(use_labels=True) j = join(a, table2) criterion = a.c.table1_col1 == table2.c.col2 self.assert_(criterion.compare(j.onclause)) def test_scalar_cloned_comparator(self): sel = select([table1.c.col1]).as_scalar() expr = sel == table1.c.col1 sel2 = visitors.ReplacingCloningVisitor().traverse(sel) expr2 = sel2 == table1.c.col1 is_(expr2.left, sel2) def test_column_labels(self): a = select([table1.c.col1.label('acol1'), table1.c.col2.label('acol2'), table1.c.col3.label('acol3')]) j = join(a, table2) criterion = a.c.acol1 == table2.c.col2 self.assert_(criterion.compare(j.onclause)) def test_labeled_select_correspoinding(self): l1 = select([func.max(table1.c.col1)]).label('foo') s = select([l1]) eq_(s.corresponding_column(l1), s.c.foo) s = select([table1.c.col1, l1]) eq_(s.corresponding_column(l1), s.c.foo) def test_select_alias_labels(self): a = table2.select(use_labels=True).alias('a') j = join(a, table1) criterion = table1.c.col1 == a.c.table2_col2 self.assert_(criterion.compare(j.onclause)) def test_table_joined_to_select_of_table(self): metadata = MetaData() a = Table('a', metadata, Column('id', Integer, primary_key=True)) j2 = select([a.c.id.label('aid')]).alias('bar') j3 = a.join(j2, j2.c.aid == a.c.id) j4 = select([j3]).alias('foo') assert j4.corresponding_column(j2.c.aid) is j4.c.aid assert j4.corresponding_column(a.c.id) is j4.c.id def test_two_metadata_join_raises(self): m = MetaData() m2 = MetaData() t1 = Table('t1', m, Column('id', Integer), Column('id2', Integer)) t2 = Table('t2', m, Column('id', Integer, ForeignKey('t1.id'))) t3 = Table('t3', m2, Column('id', Integer, ForeignKey('t1.id2'))) s = select([t2, t3], use_labels=True) assert_raises(exc.NoReferencedTableError, s.join, t1) def test_multi_label_chain_naming_col(self): # See [ticket:2167] for this one. l1 = table1.c.col1.label('a') l2 = select([l1]).label('b') s = select([l2]) assert s.c.b is not None self.assert_compile( s.select(), "SELECT b FROM (SELECT (SELECT table1.col1 AS a FROM table1) AS b)" ) s2 = select([s.label('c')]) self.assert_compile( s2.select(), "SELECT c FROM (SELECT (SELECT (SELECT table1.col1 AS a FROM table1) AS b) AS c)" ) def test_self_referential_select_raises(self): t = table('t', column('x')) s = select([t]) s.append_whereclause(s.c.x > 5) assert_raises_message( exc.InvalidRequestError, r"select\(\) construct refers to itself as a FROM", s.compile ) def test_unusual_column_elements_text(self): """test that .c excludes text().""" s = select([table1.c.col1, text("foo")]) eq_( list(s.c), [s.c.col1] ) def test_unusual_column_elements_clauselist(self): """Test that raw ClauseList is expanded into .c.""" from sqlalchemy.sql.expression import ClauseList s = select([table1.c.col1, ClauseList(table1.c.col2, table1.c.col3)]) eq_( list(s.c), [s.c.col1, s.c.col2, s.c.col3] ) def test_unusual_column_elements_boolean_clauselist(self): """test that BooleanClauseList is placed as single element in .c.""" c2 = and_(table1.c.col2 == 5, table1.c.col3 == 4) s = select([table1.c.col1, c2]) eq_( list(s.c), [s.c.col1, s.corresponding_column(c2)] ) def test_from_list_deferred_constructor(self): c1 = Column('c1', Integer) c2 = Column('c2', Integer) s = select([c1]) t = Table('t', MetaData(), c1, c2) eq_(c1._from_objects, [t]) eq_(c2._from_objects, [t]) self.assert_compile(select([c1]), "SELECT t.c1 FROM t") self.assert_compile(select([c2]), "SELECT t.c2 FROM t") def test_from_list_deferred_whereclause(self): c1 = Column('c1', Integer) c2 = Column('c2', Integer) s = select([c1]).where(c1 == 5) t = Table('t', MetaData(), c1, c2) eq_(c1._from_objects, [t]) eq_(c2._from_objects, [t]) self.assert_compile(select([c1]), "SELECT t.c1 FROM t") self.assert_compile(select([c2]), "SELECT t.c2 FROM t") def test_from_list_deferred_fromlist(self): m = MetaData() t1 = Table('t1', m, Column('x', Integer)) c1 = Column('c1', Integer) s = select([c1]).where(c1 == 5).select_from(t1) t2 = Table('t2', MetaData(), c1) eq_(c1._from_objects, [t2]) self.assert_compile(select([c1]), "SELECT t2.c1 FROM t2") def test_from_list_deferred_cloning(self): c1 = Column('c1', Integer) c2 = Column('c2', Integer) s = select([c1]) s2 = select([c2]) s3 = sql_util.ClauseAdapter(s).traverse(s2) Table('t', MetaData(), c1, c2) self.assert_compile( s3, "SELECT t.c2 FROM t" ) def test_from_list_with_columns(self): table1 = table('t1', column('a')) table2 = table('t2', column('b')) s1 = select([table1.c.a, table2.c.b]) self.assert_compile(s1, "SELECT t1.a, t2.b FROM t1, t2" ) s2 = s1.with_only_columns([table2.c.b]) self.assert_compile(s2, "SELECT t2.b FROM t2" ) s3 = sql_util.ClauseAdapter(table1).traverse(s1) self.assert_compile(s3, "SELECT t1.a, t2.b FROM t1, t2" ) s4 = s3.with_only_columns([table2.c.b]) self.assert_compile(s4, "SELECT t2.b FROM t2" ) def test_from_list_warning_against_existing(self): c1 = Column('c1', Integer) s = select([c1]) # force a compile. self.assert_compile( s, "SELECT c1" ) Table('t', MetaData(), c1) self.assert_compile( s, "SELECT t.c1 FROM t" ) def test_from_list_recovers_after_warning(self): c1 = Column('c1', Integer) c2 = Column('c2', Integer) s = select([c1]) # force a compile. eq_(str(s), "SELECT c1") @testing.emits_warning() def go(): return Table('t', MetaData(), c1, c2) t = go() eq_(c1._from_objects, [t]) eq_(c2._from_objects, [t]) # 's' has been baked. Can't afford # not caching select._froms. # hopefully the warning will clue the user self.assert_compile(s, "SELECT t.c1 FROM t") self.assert_compile(select([c1]), "SELECT t.c1 FROM t") self.assert_compile(select([c2]), "SELECT t.c2 FROM t") def test_label_gen_resets_on_table(self): c1 = Column('c1', Integer) eq_(c1._label, "c1") Table('t1', MetaData(), c1) eq_(c1._label, "t1_c1") class RefreshForNewColTest(fixtures.TestBase): def test_join_uninit(self): a = table('a', column('x')) b = table('b', column('y')) j = a.join(b, a.c.x == b.c.y) q = column('q') b.append_column(q) j._refresh_for_new_column(q) assert j.c.b_q is q def test_join_init(self): a = table('a', column('x')) b = table('b', column('y')) j = a.join(b, a.c.x == b.c.y) j.c q = column('q') b.append_column(q) j._refresh_for_new_column(q) assert j.c.b_q is q def test_join_samename_init(self): a = table('a', column('x')) b = table('b', column('y')) j = a.join(b, a.c.x == b.c.y) j.c q = column('x') b.append_column(q) j._refresh_for_new_column(q) assert j.c.b_x is q def test_select_samename_init(self): a = table('a', column('x')) b = table('b', column('y')) s = select([a, b]).apply_labels() s.c q = column('x') b.append_column(q) s._refresh_for_new_column(q) assert q in s.c.b_x.proxy_set def test_aliased_select_samename_uninit(self): a = table('a', column('x')) b = table('b', column('y')) s = select([a, b]).apply_labels().alias() q = column('x') b.append_column(q) s._refresh_for_new_column(q) assert q in s.c.b_x.proxy_set def test_aliased_select_samename_init(self): a = table('a', column('x')) b = table('b', column('y')) s = select([a, b]).apply_labels().alias() s.c q = column('x') b.append_column(q) s._refresh_for_new_column(q) assert q in s.c.b_x.proxy_set def test_aliased_select_irrelevant(self): a = table('a', column('x')) b = table('b', column('y')) c = table('c', column('z')) s = select([a, b]).apply_labels().alias() s.c q = column('x') c.append_column(q) s._refresh_for_new_column(q) assert 'c_x' not in s.c def test_aliased_select_no_cols_clause(self): a = table('a', column('x')) s = select([a.c.x]).apply_labels().alias() s.c q = column('q') a.append_column(q) s._refresh_for_new_column(q) assert 'a_q' not in s.c def test_union_uninit(self): a = table('a', column('x')) s1 = select([a]) s2 = select([a]) s3 = s1.union(s2) q = column('q') a.append_column(q) s3._refresh_for_new_column(q) assert a.c.q in s3.c.q.proxy_set def test_union_init_raises(self): a = table('a', column('x')) s1 = select([a]) s2 = select([a]) s3 = s1.union(s2) s3.c q = column('q') a.append_column(q) assert_raises_message( NotImplementedError, "CompoundSelect constructs don't support addition of " "columns to underlying selectables", s3._refresh_for_new_column, q ) def test_nested_join_uninit(self): a = table('a', column('x')) b = table('b', column('y')) c = table('c', column('z')) j = a.join(b, a.c.x == b.c.y).join(c, b.c.y == c.c.z) q = column('q') b.append_column(q) j._refresh_for_new_column(q) assert j.c.b_q is q def test_nested_join_init(self): a = table('a', column('x')) b = table('b', column('y')) c = table('c', column('z')) j = a.join(b, a.c.x == b.c.y).join(c, b.c.y == c.c.z) j.c q = column('q') b.append_column(q) j._refresh_for_new_column(q) assert j.c.b_q is q class AnonLabelTest(fixtures.TestBase): """Test behaviors fixed by [ticket:2168].""" def test_anon_labels_named_column(self): c1 = column('x') assert c1.label(None) is not c1 eq_(str(select([c1.label(None)])), "SELECT x AS x_1") def test_anon_labels_literal_column(self): c1 = literal_column('x') assert c1.label(None) is not c1 eq_(str(select([c1.label(None)])), "SELECT x AS x_1") def test_anon_labels_func(self): c1 = func.count('*') assert c1.label(None) is not c1 eq_(str(select([c1])), "SELECT count(:param_1) AS count_1") c2 = select([c1]).compile() eq_(str(select([c1.label(None)])), "SELECT count(:param_1) AS count_1") def test_named_labels_named_column(self): c1 = column('x') eq_(str(select([c1.label('y')])), "SELECT x AS y") def test_named_labels_literal_column(self): c1 = literal_column('x') eq_(str(select([c1.label('y')])), "SELECT x AS y") class JoinConditionTest(fixtures.TestBase, AssertsExecutionResults): def test_join_condition(self): m = MetaData() t1 = Table('t1', m, Column('id', Integer)) t2 = Table('t2', m, Column('id', Integer), Column('t1id', ForeignKey('t1.id'))) t3 = Table('t3', m, Column('id', Integer), Column('t1id', ForeignKey('t1.id')), Column('t2id', ForeignKey('t2.id'))) t4 = Table('t4', m, Column('id', Integer), Column('t2id', ForeignKey('t2.id'))) t5 = Table('t5', m, Column('t1id1', ForeignKey('t1.id')), Column('t1id2', ForeignKey('t1.id')), ) t1t2 = t1.join(t2) t2t3 = t2.join(t3) for (left, right, a_subset, expected) in [ (t1, t2, None, t1.c.id == t2.c.t1id), (t1t2, t3, t2, t1t2.c.t2_id == t3.c.t2id), (t2t3, t1, t3, t1.c.id == t3.c.t1id), (t2t3, t4, None, t2t3.c.t2_id == t4.c.t2id), (t2t3, t4, t3, t2t3.c.t2_id == t4.c.t2id), (t2t3.join(t1), t4, None, t2t3.c.t2_id == t4.c.t2id), (t2t3.join(t1), t4, t1, t2t3.c.t2_id == t4.c.t2id), (t1t2, t2t3, t2, t1t2.c.t2_id == t2t3.c.t3_t2id), ]: assert expected.compare(sql_util.join_condition(left, right, a_subset=a_subset)) # these are ambiguous, or have no joins for left, right, a_subset in [ (t1t2, t3, None), (t2t3, t1, None), (t1, t4, None), (t1t2, t2t3, None), (t5, t1, None), (t5.select(use_labels=True), t1, None) ]: assert_raises( exc.ArgumentError, sql_util.join_condition, left, right, a_subset=a_subset ) als = t2t3.alias() # test join's behavior, including natural for left, right, expected in [ (t1, t2, t1.c.id == t2.c.t1id), (t1t2, t3, t1t2.c.t2_id == t3.c.t2id), (t2t3, t1, t1.c.id == t3.c.t1id), (t2t3, t4, t2t3.c.t2_id == t4.c.t2id), (t2t3, t4, t2t3.c.t2_id == t4.c.t2id), (t2t3.join(t1), t4, t2t3.c.t2_id == t4.c.t2id), (t2t3.join(t1), t4, t2t3.c.t2_id == t4.c.t2id), (t1t2, als, t1t2.c.t2_id == als.c.t3_t2id) ]: assert expected.compare( left.join(right).onclause ) # TODO: this raises due to right side being "grouped", and no # longer has FKs. Did we want to make FromGrouping friendlier # ? assert_raises_message(exc.ArgumentError, "Perhaps you meant to convert the right " "side to a subquery using alias\(\)\?", t1t2.join, t2t3) assert_raises_message(exc.ArgumentError, "Perhaps you meant to convert the right " "side to a subquery using alias\(\)\?", t1t2.join, t2t3.select(use_labels=True)) def test_join_cond_no_such_unrelated_table(self): m = MetaData() # bounding the "good" column with two "bad" ones is so to # try to get coverage to get the "continue" statements # in the loop... t1 = Table('t1', m, Column('y', Integer, ForeignKey('t22.id')), Column('x', Integer, ForeignKey('t2.id')), Column('q', Integer, ForeignKey('t22.id')), ) t2 = Table('t2', m, Column('id', Integer)) assert sql_util.join_condition(t1, t2).compare(t1.c.x == t2.c.id) assert sql_util.join_condition(t2, t1).compare(t1.c.x == t2.c.id) def test_join_cond_no_such_unrelated_column(self): m = MetaData() t1 = Table('t1', m, Column('x', Integer, ForeignKey('t2.id')), Column('y', Integer, ForeignKey('t3.q'))) t2 = Table('t2', m, Column('id', Integer)) Table('t3', m, Column('id', Integer)) assert sql_util.join_condition(t1, t2).compare(t1.c.x == t2.c.id) assert sql_util.join_condition(t2, t1).compare(t1.c.x == t2.c.id) def test_join_cond_no_such_related_table(self): m1 = MetaData() m2 = MetaData() t1 = Table('t1', m1, Column('x', Integer, ForeignKey('t2.id'))) t2 = Table('t2', m2, Column('id', Integer)) assert_raises_message( exc.NoReferencedTableError, "Foreign key associated with column 't1.x' could not find " "table 't2' with which to generate a foreign key to " "target column 'id'", sql_util.join_condition, t1, t2 ) assert_raises_message( exc.NoReferencedTableError, "Foreign key associated with column 't1.x' could not find " "table 't2' with which to generate a foreign key to " "target column 'id'", sql_util.join_condition, t2, t1 ) def test_join_cond_no_such_related_column(self): m = MetaData() t1 = Table('t1', m, Column('x', Integer, ForeignKey('t2.q'))) t2 = Table('t2', m, Column('id', Integer)) assert_raises_message( exc.NoReferencedColumnError, "Could not create ForeignKey 't2.q' on table 't1': " "table 't2' has no column named 'q'", sql_util.join_condition, t1, t2 ) assert_raises_message( exc.NoReferencedColumnError, "Could not create ForeignKey 't2.q' on table 't1': " "table 't2' has no column named 'q'", sql_util.join_condition, t2, t1 ) class PrimaryKeyTest(fixtures.TestBase, AssertsExecutionResults): def test_join_pk_collapse_implicit(self): """test that redundant columns in a join get 'collapsed' into a minimal primary key, which is the root column along a chain of foreign key relationships.""" meta = MetaData() a = Table('a', meta, Column('id', Integer, primary_key=True)) b = Table('b', meta, Column('id', Integer, ForeignKey('a.id'), primary_key=True)) c = Table('c', meta, Column('id', Integer, ForeignKey('b.id'), primary_key=True)) d = Table('d', meta, Column('id', Integer, ForeignKey('c.id'), primary_key=True)) assert c.c.id.references(b.c.id) assert not d.c.id.references(a.c.id) assert list(a.join(b).primary_key) == [a.c.id] assert list(b.join(c).primary_key) == [b.c.id] assert list(a.join(b).join(c).primary_key) == [a.c.id] assert list(b.join(c).join(d).primary_key) == [b.c.id] assert list(d.join(c).join(b).primary_key) == [b.c.id] assert list(a.join(b).join(c).join(d).primary_key) == [a.c.id] def test_join_pk_collapse_explicit(self): """test that redundant columns in a join get 'collapsed' into a minimal primary key, which is the root column along a chain of explicit join conditions.""" meta = MetaData() a = Table('a', meta, Column('id', Integer, primary_key=True), Column('x', Integer)) b = Table('b', meta, Column('id', Integer, ForeignKey('a.id'), primary_key=True), Column('x', Integer)) c = Table('c', meta, Column('id', Integer, ForeignKey('b.id'), primary_key=True), Column('x', Integer)) d = Table('d', meta, Column('id', Integer, ForeignKey('c.id'), primary_key=True), Column('x', Integer)) print list(a.join(b, a.c.x == b.c.id).primary_key) assert list(a.join(b, a.c.x == b.c.id).primary_key) == [a.c.id] assert list(b.join(c, b.c.x == c.c.id).primary_key) == [b.c.id] assert list(a.join(b).join(c, c.c.id == b.c.x).primary_key) \ == [a.c.id] assert list(b.join(c, c.c.x == b.c.id).join(d).primary_key) \ == [b.c.id] assert list(b.join(c, c.c.id == b.c.x).join(d).primary_key) \ == [b.c.id] assert list(d.join(b, d.c.id == b.c.id).join(c, b.c.id == c.c.x).primary_key) == [b.c.id] assert list(a.join(b).join(c, c.c.id == b.c.x).join(d).primary_key) == [a.c.id] assert list(a.join(b, and_(a.c.id == b.c.id, a.c.x == b.c.id)).primary_key) == [a.c.id] def test_init_doesnt_blowitaway(self): meta = MetaData() a = Table('a', meta, Column('id', Integer, primary_key=True), Column('x', Integer)) b = Table('b', meta, Column('id', Integer, ForeignKey('a.id'), primary_key=True), Column('x', Integer)) j = a.join(b) assert list(j.primary_key) == [a.c.id] j.foreign_keys assert list(j.primary_key) == [a.c.id] def test_non_column_clause(self): meta = MetaData() a = Table('a', meta, Column('id', Integer, primary_key=True), Column('x', Integer)) b = Table('b', meta, Column('id', Integer, ForeignKey('a.id'), primary_key=True), Column('x', Integer, primary_key=True)) j = a.join(b, and_(a.c.id == b.c.id, b.c.x == 5)) assert str(j) == "a JOIN b ON a.id = b.id AND b.x = :x_1", str(j) assert list(j.primary_key) == [a.c.id, b.c.x] def test_onclause_direction(self): metadata = MetaData() employee = Table('Employee', metadata, Column('name', String(100)), Column('id', Integer, primary_key=True), ) engineer = Table('Engineer', metadata, Column('id', Integer, ForeignKey('Employee.id'), primary_key=True)) eq_(util.column_set(employee.join(engineer, employee.c.id == engineer.c.id).primary_key), util.column_set([employee.c.id])) eq_(util.column_set(employee.join(engineer, engineer.c.id == employee.c.id).primary_key), util.column_set([employee.c.id])) class ReduceTest(fixtures.TestBase, AssertsExecutionResults): def test_reduce(self): meta = MetaData() t1 = Table('t1', meta, Column('t1id', Integer, primary_key=True), Column('t1data', String(30))) t2 = Table('t2', meta, Column('t2id', Integer, ForeignKey('t1.t1id'), primary_key=True), Column('t2data', String(30))) t3 = Table('t3', meta, Column('t3id', Integer, ForeignKey('t2.t2id'), primary_key=True), Column('t3data', String(30))) eq_(util.column_set(sql_util.reduce_columns([ t1.c.t1id, t1.c.t1data, t2.c.t2id, t2.c.t2data, t3.c.t3id, t3.c.t3data, ])), util.column_set([t1.c.t1id, t1.c.t1data, t2.c.t2data, t3.c.t3data])) def test_reduce_selectable(self): metadata = MetaData() engineers = Table('engineers', metadata, Column('engineer_id', Integer, primary_key=True), Column('engineer_name', String(50))) managers = Table('managers', metadata, Column('manager_id', Integer, primary_key=True), Column('manager_name', String(50))) s = select([engineers, managers]).where(engineers.c.engineer_name == managers.c.manager_name) eq_(util.column_set(sql_util.reduce_columns(list(s.c), s)), util.column_set([s.c.engineer_id, s.c.engineer_name, s.c.manager_id])) def test_reduce_generation(self): m = MetaData() t1 = Table('t1', m, Column('x', Integer, primary_key=True), Column('y', Integer)) t2 = Table('t2', m, Column('z', Integer, ForeignKey('t1.x')), Column('q', Integer)) s1 = select([t1, t2]) s2 = s1.reduce_columns(only_synonyms=False) eq_( set(s2.inner_columns), set([t1.c.x, t1.c.y, t2.c.q]) ) s2 = s1.reduce_columns() eq_( set(s2.inner_columns), set([t1.c.x, t1.c.y, t2.c.z, t2.c.q]) ) def test_reduce_only_synonym_fk(self): m = MetaData() t1 = Table('t1', m, Column('x', Integer, primary_key=True), Column('y', Integer)) t2 = Table('t2', m, Column('x', Integer, ForeignKey('t1.x')), Column('q', Integer, ForeignKey('t1.y'))) s1 = select([t1, t2]) s1 = s1.reduce_columns(only_synonyms=True) eq_( set(s1.c), set([s1.c.x, s1.c.y, s1.c.q]) ) def test_reduce_only_synonym_lineage(self): m = MetaData() t1 = Table('t1', m, Column('x', Integer, primary_key=True), Column('y', Integer), Column('z', Integer) ) # test that the first appearance in the columns clause # wins - t1 is first, t1.c.x wins s1 = select([t1]) s2 = select([t1, s1]).where(t1.c.x == s1.c.x).where(s1.c.y == t1.c.z) eq_( set(s2.reduce_columns().inner_columns), set([t1.c.x, t1.c.y, t1.c.z, s1.c.y, s1.c.z]) ) # reverse order, s1.c.x wins s1 = select([t1]) s2 = select([s1, t1]).where(t1.c.x == s1.c.x).where(s1.c.y == t1.c.z) eq_( set(s2.reduce_columns().inner_columns), set([s1.c.x, t1.c.y, t1.c.z, s1.c.y, s1.c.z]) ) def test_reduce_aliased_join(self): metadata = MetaData() people = Table('people', metadata, Column('person_id', Integer, Sequence('person_id_seq', optional=True), primary_key=True), Column('name', String(50)), Column('type', String(30))) engineers = Table( 'engineers', metadata, Column('person_id', Integer, ForeignKey('people.person_id' ), primary_key=True), Column('status', String(30)), Column('engineer_name', String(50)), Column('primary_language', String(50)), ) managers = Table('managers', metadata, Column('person_id', Integer, ForeignKey('people.person_id'), primary_key=True), Column('status', String(30)), Column('manager_name', String(50))) pjoin = \ people.outerjoin(engineers).outerjoin(managers).\ select(use_labels=True).alias('pjoin' ) eq_(util.column_set(sql_util.reduce_columns([pjoin.c.people_person_id, pjoin.c.engineers_person_id, pjoin.c.managers_person_id])), util.column_set([pjoin.c.people_person_id])) def test_reduce_aliased_union(self): metadata = MetaData() item_table = Table('item', metadata, Column('id', Integer, ForeignKey('base_item.id'), primary_key=True), Column('dummy', Integer, default=0)) base_item_table = Table('base_item', metadata, Column('id', Integer, primary_key=True), Column('child_name', String(255), default=None)) from sqlalchemy.orm.util import polymorphic_union item_join = polymorphic_union({ 'BaseItem': base_item_table.select( base_item_table.c.child_name == 'BaseItem'), 'Item': base_item_table.join(item_table)}, None, 'item_join') eq_(util.column_set(sql_util.reduce_columns([item_join.c.id, item_join.c.dummy, item_join.c.child_name])), util.column_set([item_join.c.id, item_join.c.dummy, item_join.c.child_name])) def test_reduce_aliased_union_2(self): metadata = MetaData() page_table = Table('page', metadata, Column('id', Integer, primary_key=True)) magazine_page_table = Table('magazine_page', metadata, Column('page_id', Integer, ForeignKey('page.id'), primary_key=True)) classified_page_table = Table('classified_page', metadata, Column('magazine_page_id', Integer, ForeignKey('magazine_page.page_id'), primary_key=True)) # this is essentially the union formed by the ORM's # polymorphic_union function. we define two versions with # different ordering of selects. # # the first selectable has the "real" column # classified_page.magazine_page_id pjoin = union( select([ page_table.c.id, magazine_page_table.c.page_id, classified_page_table.c.magazine_page_id ]). select_from( page_table.join(magazine_page_table). join(classified_page_table)), select([ page_table.c.id, magazine_page_table.c.page_id, cast(null(), Integer).label('magazine_page_id') ]). select_from(page_table.join(magazine_page_table)) ).alias('pjoin') eq_(util.column_set(sql_util.reduce_columns([pjoin.c.id, pjoin.c.page_id, pjoin.c.magazine_page_id])), util.column_set([pjoin.c.id])) # the first selectable has a CAST, which is a placeholder for # classified_page.magazine_page_id in the second selectable. # reduce_columns needs to take into account all foreign keys # derived from pjoin.c.magazine_page_id. the UNION construct # currently makes the external column look like that of the # first selectable only. pjoin = union(select([ page_table.c.id, magazine_page_table.c.page_id, cast(null(), Integer).label('magazine_page_id') ]). select_from(page_table.join(magazine_page_table)), select([ page_table.c.id, magazine_page_table.c.page_id, classified_page_table.c.magazine_page_id ]). select_from(page_table.join(magazine_page_table). join(classified_page_table)) ).alias('pjoin') eq_(util.column_set(sql_util.reduce_columns([pjoin.c.id, pjoin.c.page_id, pjoin.c.magazine_page_id])), util.column_set([pjoin.c.id])) class DerivedTest(fixtures.TestBase, AssertsExecutionResults): def test_table(self): meta = MetaData() t1 = Table('t1', meta, Column('c1', Integer, primary_key=True), Column('c2', String(30))) t2 = Table('t2', meta, Column('c1', Integer, primary_key=True), Column('c2', String(30))) assert t1.is_derived_from(t1) assert not t2.is_derived_from(t1) def test_alias(self): meta = MetaData() t1 = Table('t1', meta, Column('c1', Integer, primary_key=True), Column('c2', String(30))) t2 = Table('t2', meta, Column('c1', Integer, primary_key=True), Column('c2', String(30))) assert t1.alias().is_derived_from(t1) assert not t2.alias().is_derived_from(t1) assert not t1.is_derived_from(t1.alias()) assert not t1.is_derived_from(t2.alias()) def test_select(self): meta = MetaData() t1 = Table('t1', meta, Column('c1', Integer, primary_key=True), Column('c2', String(30))) t2 = Table('t2', meta, Column('c1', Integer, primary_key=True), Column('c2', String(30))) assert t1.select().is_derived_from(t1) assert not t2.select().is_derived_from(t1) assert select([t1, t2]).is_derived_from(t1) assert t1.select().alias('foo').is_derived_from(t1) assert select([t1, t2]).alias('foo').is_derived_from(t1) assert not t2.select().alias('foo').is_derived_from(t1) class AnnotationsTest(fixtures.TestBase): def test_hashing(self): t = table('t', column('x')) a = t.alias() s = t.select() s2 = a.select() for obj in [ t, t.c.x, a, s, s2, t.c.x > 1, (t.c.x > 1).label(None) ]: annot = obj._annotate({}) eq_(set([obj]), set([annot])) def test_compare(self): t = table('t', column('x'), column('y')) x_a = t.c.x._annotate({}) assert t.c.x.compare(x_a) assert x_a.compare(t.c.x) assert not x_a.compare(t.c.y) assert not t.c.y.compare(x_a) assert (t.c.x == 5).compare(x_a == 5) assert not (t.c.y == 5).compare(x_a == 5) s = select([t]) x_p = s.c.x assert not x_a.compare(x_p) assert not t.c.x.compare(x_p) x_p_a = x_p._annotate({}) assert x_p_a.compare(x_p) assert x_p.compare(x_p_a) assert not x_p_a.compare(x_a) def test_late_name_add(self): from sqlalchemy.schema import Column c1 = Column(Integer) c1_a = c1._annotate({"foo": "bar"}) c1.name = 'somename' eq_(c1_a.name, 'somename') def test_late_table_add(self): c1 = Column("foo", Integer) c1_a = c1._annotate({"foo": "bar"}) t = Table('t', MetaData(), c1) is_(c1_a.table, t) def test_custom_constructions(self): from sqlalchemy.schema import Column class MyColumn(Column): def __init__(self): Column.__init__(self, 'foo', Integer) _constructor = Column t1 = Table('t1', MetaData(), MyColumn()) s1 = t1.select() assert isinstance(t1.c.foo, MyColumn) assert isinstance(s1.c.foo, Column) annot_1 = t1.c.foo._annotate({}) s2 = select([annot_1]) assert isinstance(s2.c.foo, Column) annot_2 = s1._annotate({}) assert isinstance(annot_2.c.foo, Column) def test_annotated_corresponding_column(self): table1 = table('table1', column("col1")) s1 = select([table1.c.col1]) t1 = s1._annotate({}) t2 = s1 # t1 needs to share the same _make_proxy() columns as t2, even # though it's annotated. otherwise paths will diverge once they # are corresponded against "inner" below. assert t1.c is t2.c assert t1.c.col1 is t2.c.col1 inner = select([s1]) assert inner.corresponding_column(t2.c.col1, require_embedded=False) \ is inner.corresponding_column(t2.c.col1, require_embedded=True) is inner.c.col1 assert inner.corresponding_column(t1.c.col1, require_embedded=False) \ is inner.corresponding_column(t1.c.col1, require_embedded=True) is inner.c.col1 def test_annotated_visit(self): table1 = table('table1', column("col1"), column("col2")) bin = table1.c.col1 == bindparam('foo', value=None) assert str(bin) == "table1.col1 = :foo" def visit_binary(b): b.right = table1.c.col2 b2 = visitors.cloned_traverse(bin, {}, {'binary': visit_binary}) assert str(b2) == "table1.col1 = table1.col2" b3 = visitors.cloned_traverse(bin._annotate({}), {}, {'binary': visit_binary}) assert str(b3) == 'table1.col1 = table1.col2' def visit_binary(b): b.left = bindparam('bar') b4 = visitors.cloned_traverse(b2, {}, {'binary': visit_binary}) assert str(b4) == ":bar = table1.col2" b5 = visitors.cloned_traverse(b3, {}, {'binary': visit_binary}) assert str(b5) == ":bar = table1.col2" def test_annotate_aliased(self): t1 = table('t1', column('c1')) s = select([(t1.c.c1 + 3).label('bat')]) a = s.alias() a = sql_util._deep_annotate(a, {'foo': 'bar'}) eq_(a._annotations['foo'], 'bar') eq_(a.element._annotations['foo'], 'bar') def test_annotate_expressions(self): table1 = table('table1', column('col1'), column('col2')) for expr, expected in [(table1.c.col1, 'table1.col1'), (table1.c.col1 == 5, 'table1.col1 = :col1_1'), (table1.c.col1.in_([2, 3, 4]), 'table1.col1 IN (:col1_1, :col1_2, ' ':col1_3)')]: eq_(str(expr), expected) eq_(str(expr._annotate({})), expected) eq_(str(sql_util._deep_annotate(expr, {})), expected) eq_(str(sql_util._deep_annotate(expr, {}, exclude=[table1.c.col1])), expected) def test_deannotate(self): table1 = table('table1', column("col1"), column("col2")) bin = table1.c.col1 == bindparam('foo', value=None) b2 = sql_util._deep_annotate(bin, {'_orm_adapt': True}) b3 = sql_util._deep_deannotate(b2) b4 = sql_util._deep_deannotate(bin) for elem in (b2._annotations, b2.left._annotations): assert '_orm_adapt' in elem for elem in b3._annotations, b3.left._annotations, \ b4._annotations, b4.left._annotations: assert elem == {} assert b2.left is not bin.left assert b3.left is not b2.left is not bin.left assert b4.left is bin.left # since column is immutable # deannotate copies the element assert bin.right is not b2.right is not b3.right is not b4.right def test_annotate_unique_traversal(self): """test that items are copied only once during annotate, deannotate traversal #2453 - however note this was modified by #1401, and it's likely that re49563072578 is helping us with the str() comparison case now, as deannotate is making clones again in some cases. """ table1 = table('table1', column('x')) table2 = table('table2', column('y')) a1 = table1.alias() s = select([a1.c.x]).select_from( a1.join(table2, a1.c.x == table2.c.y) ) for sel in ( sql_util._deep_deannotate(s), visitors.cloned_traverse(s, {}, {}), visitors.replacement_traverse(s, {}, lambda x: None) ): # the columns clause isn't changed at all assert sel._raw_columns[0].table is a1 assert sel._froms[0] is sel._froms[1].left eq_(str(s), str(sel)) # when we are modifying annotations sets only # partially, each element is copied unconditionally # when encountered. for sel in ( sql_util._deep_deannotate(s, {"foo": "bar"}), sql_util._deep_annotate(s, {'foo': 'bar'}), ): assert sel._froms[0] is not sel._froms[1].left # but things still work out due to # re49563072578 eq_(str(s), str(sel)) def test_annotate_varied_annot_same_col(self): """test two instances of the same column with different annotations preserving them when deep_annotate is run on them. """ t1 = table('table1', column("col1"), column("col2")) s = select([t1.c.col1._annotate({"foo":"bar"})]) s2 = select([t1.c.col1._annotate({"bat":"hoho"})]) s3 = s.union(s2) sel = sql_util._deep_annotate(s3, {"new": "thing"}) eq_( sel.selects[0]._raw_columns[0]._annotations, {"foo": "bar", "new": "thing"} ) eq_( sel.selects[1]._raw_columns[0]._annotations, {"bat": "hoho", "new": "thing"} ) def test_deannotate_2(self): table1 = table('table1', column("col1"), column("col2")) j = table1.c.col1._annotate({"remote": True}) == \ table1.c.col2._annotate({"local": True}) j2 = sql_util._deep_deannotate(j) eq_( j.left._annotations, {"remote": True} ) eq_( j2.left._annotations, {} ) def test_deannotate_3(self): table1 = table('table1', column("col1"), column("col2"), column("col3"), column("col4")) j = and_( table1.c.col1._annotate({"remote": True}) == table1.c.col2._annotate({"local": True}), table1.c.col3._annotate({"remote": True}) == table1.c.col4._annotate({"local": True}) ) j2 = sql_util._deep_deannotate(j) eq_( j.clauses[0].left._annotations, {"remote": True} ) eq_( j2.clauses[0].left._annotations, {} ) def test_annotate_fromlist_preservation(self): """test the FROM list in select still works even when multiple annotate runs have created copies of the same selectable #2453, continued """ table1 = table('table1', column('x')) table2 = table('table2', column('y')) a1 = table1.alias() s = select([a1.c.x]).select_from( a1.join(table2, a1.c.x == table2.c.y) ) assert_s = select([select([s])]) for fn in ( sql_util._deep_deannotate, lambda s: sql_util._deep_annotate(s, {'foo': 'bar'}), lambda s: visitors.cloned_traverse(s, {}, {}), lambda s: visitors.replacement_traverse(s, {}, lambda x: None) ): sel = fn(select([fn(select([fn(s)]))])) eq_(str(assert_s), str(sel)) def test_bind_unique_test(self): table('t', column('a'), column('b')) b = bindparam("bind", value="x", unique=True) # the annotation of "b" should render the # same. The "unique" test in compiler should # also pass, [ticket:2425] eq_(str(or_(b, b._annotate({"foo": "bar"}))), ":bind_1 OR :bind_1") def test_comparators_cleaned_out_construction(self): c = column('a') comp1 = c.comparator c1 = c._annotate({"foo": "bar"}) comp2 = c1.comparator assert comp1 is not comp2 def test_comparators_cleaned_out_reannotate(self): c = column('a') c1 = c._annotate({"foo": "bar"}) comp1 = c1.comparator c2 = c1._annotate({"bat": "hoho"}) comp2 = c2.comparator assert comp1 is not comp2 def test_comparator_cleanout_integration(self): c = column('a') c1 = c._annotate({"foo": "bar"}) comp1 = c1.comparator c2 = c1._annotate({"bat": "hoho"}) comp2 = c2.comparator assert (c2 == 5).left._annotations == {"foo": "bar", "bat": "hoho"} class WithLabelsTest(fixtures.TestBase): def _assert_labels_warning(self, s): assert_raises_message( exc.SAWarning, r"replaced by Column.*, which has the same key", lambda: s.c ) def _assert_result_keys(self, s, keys): compiled = s.compile() eq_(set(compiled.result_map), set(keys)) def _assert_subq_result_keys(self, s, keys): compiled = s.select().compile() eq_(set(compiled.result_map), set(keys)) def _names_overlap(self): m = MetaData() t1 = Table('t1', m, Column('x', Integer)) t2 = Table('t2', m, Column('x', Integer)) return select([t1, t2]) def test_names_overlap_nolabel(self): sel = self._names_overlap() self._assert_labels_warning(sel) self._assert_result_keys(sel, ['x']) def test_names_overlap_label(self): sel = self._names_overlap().apply_labels() eq_( sel.c.keys(), ['t1_x', 't2_x'] ) self._assert_result_keys(sel, ['t1_x', 't2_x']) def _names_overlap_keys_dont(self): m = MetaData() t1 = Table('t1', m, Column('x', Integer, key='a')) t2 = Table('t2', m, Column('x', Integer, key='b')) return select([t1, t2]) def test_names_overlap_keys_dont_nolabel(self): sel = self._names_overlap_keys_dont() eq_( sel.c.keys(), ['a', 'b'] ) self._assert_result_keys(sel, ['x']) def test_names_overlap_keys_dont_label(self): sel = self._names_overlap_keys_dont().apply_labels() eq_( sel.c.keys(), ['t1_a', 't2_b'] ) self._assert_result_keys(sel, ['t1_x', 't2_x']) def _labels_overlap(self): m = MetaData() t1 = Table('t', m, Column('x_id', Integer)) t2 = Table('t_x', m, Column('id', Integer)) return select([t1, t2]) def test_labels_overlap_nolabel(self): sel = self._labels_overlap() eq_( sel.c.keys(), ['x_id', 'id'] ) self._assert_result_keys(sel, ['x_id', 'id']) def test_labels_overlap_label(self): sel = self._labels_overlap().apply_labels() t2 = sel.froms[1] eq_( sel.c.keys(), ['t_x_id', t2.c.id.anon_label] ) self._assert_result_keys(sel, ['t_x_id', 'id_1']) self._assert_subq_result_keys(sel, ['t_x_id', 'id_1']) def _labels_overlap_keylabels_dont(self): m = MetaData() t1 = Table('t', m, Column('x_id', Integer, key='a')) t2 = Table('t_x', m, Column('id', Integer, key='b')) return select([t1, t2]) def test_labels_overlap_keylabels_dont_nolabel(self): sel = self._labels_overlap_keylabels_dont() eq_(sel.c.keys(), ['a', 'b']) self._assert_result_keys(sel, ['x_id', 'id']) def test_labels_overlap_keylabels_dont_label(self): sel = self._labels_overlap_keylabels_dont().apply_labels() eq_(sel.c.keys(), ['t_a', 't_x_b']) self._assert_result_keys(sel, ['t_x_id', 'id_1']) def _keylabels_overlap_labels_dont(self): m = MetaData() t1 = Table('t', m, Column('a', Integer, key='x_id')) t2 = Table('t_x', m, Column('b', Integer, key='id')) return select([t1, t2]) def test_keylabels_overlap_labels_dont_nolabel(self): sel = self._keylabels_overlap_labels_dont() eq_(sel.c.keys(), ['x_id', 'id']) self._assert_result_keys(sel, ['a', 'b']) def test_keylabels_overlap_labels_dont_label(self): sel = self._keylabels_overlap_labels_dont().apply_labels() t2 = sel.froms[1] eq_(sel.c.keys(), ['t_x_id', t2.c.id.anon_label]) self._assert_result_keys(sel, ['t_a', 't_x_b']) self._assert_subq_result_keys(sel, ['t_a', 't_x_b']) def _keylabels_overlap_labels_overlap(self): m = MetaData() t1 = Table('t', m, Column('x_id', Integer, key='x_a')) t2 = Table('t_x', m, Column('id', Integer, key='a')) return select([t1, t2]) def test_keylabels_overlap_labels_overlap_nolabel(self): sel = self._keylabels_overlap_labels_overlap() eq_(sel.c.keys(), ['x_a', 'a']) self._assert_result_keys(sel, ['x_id', 'id']) self._assert_subq_result_keys(sel, ['x_id', 'id']) def test_keylabels_overlap_labels_overlap_label(self): sel = self._keylabels_overlap_labels_overlap().apply_labels() t2 = sel.froms[1] eq_(sel.c.keys(), ['t_x_a', t2.c.a.anon_label]) self._assert_result_keys(sel, ['t_x_id', 'id_1']) self._assert_subq_result_keys(sel, ['t_x_id', 'id_1']) def _keys_overlap_names_dont(self): m = MetaData() t1 = Table('t1', m, Column('a', Integer, key='x')) t2 = Table('t2', m, Column('b', Integer, key='x')) return select([t1, t2]) def test_keys_overlap_names_dont_nolabel(self): sel = self._keys_overlap_names_dont() self._assert_labels_warning(sel) self._assert_result_keys(sel, ['a', 'b']) def test_keys_overlap_names_dont_label(self): sel = self._keys_overlap_names_dont().apply_labels() eq_( sel.c.keys(), ['t1_x', 't2_x'] ) self._assert_result_keys(sel, ['t1_a', 't2_b']) SQLAlchemy-0.8.4/test/sql/test_type_expressions.py0000644000076500000240000002014712251147172023051 0ustar classicstaff00000000000000from sqlalchemy import Table, Column, String, func, MetaData, select, TypeDecorator, cast from sqlalchemy.testing import fixtures, AssertsCompiledSQL from sqlalchemy import testing from sqlalchemy.testing import eq_ class _ExprFixture(object): def _fixture(self): class MyString(String): def bind_expression(self, bindvalue): return func.lower(bindvalue) def column_expression(self, col): return func.lower(col) test_table = Table( 'test_table', MetaData(), Column('x', String), Column('y', MyString) ) return test_table class SelectTest(_ExprFixture, fixtures.TestBase, AssertsCompiledSQL): __dialect__ = 'default' def test_select_cols(self): table = self._fixture() self.assert_compile( select([table]), "SELECT test_table.x, lower(test_table.y) AS y FROM test_table" ) def test_anonymous_expr(self): table = self._fixture() self.assert_compile( select([cast(table.c.y, String)]), "SELECT CAST(test_table.y AS VARCHAR) AS anon_1 FROM test_table" ) def test_select_cols_use_labels(self): table = self._fixture() self.assert_compile( select([table]).apply_labels(), "SELECT test_table.x AS test_table_x, " "lower(test_table.y) AS test_table_y FROM test_table" ) def test_select_cols_use_labels_result_map_targeting(self): table = self._fixture() compiled = select([table]).apply_labels().compile() assert table.c.y in compiled.result_map['test_table_y'][1] assert table.c.x in compiled.result_map['test_table_x'][1] # the lower() function goes into the result_map, we don't really # need this but it's fine self.assert_compile( compiled.result_map['test_table_y'][1][2], "lower(test_table.y)" ) # then the original column gets put in there as well. # it's not important that it's the last value. self.assert_compile( compiled.result_map['test_table_y'][1][-1], "test_table.y" ) def test_insert_binds(self): table = self._fixture() self.assert_compile( table.insert(), "INSERT INTO test_table (x, y) VALUES (:x, lower(:y))" ) self.assert_compile( table.insert().values(y="hi"), "INSERT INTO test_table (y) VALUES (lower(:y))" ) def test_select_binds(self): table = self._fixture() self.assert_compile( select([table]).where(table.c.y == "hi"), "SELECT test_table.x, lower(test_table.y) AS y FROM " "test_table WHERE test_table.y = lower(:y_1)" ) class DerivedTest(_ExprFixture, fixtures.TestBase, AssertsCompiledSQL): __dialect__ = 'default' def test_select_from_select(self): table = self._fixture() self.assert_compile( table.select().select(), "SELECT x, lower(y) AS y FROM (SELECT test_table.x " "AS x, test_table.y AS y FROM test_table)" ) def test_select_from_alias(self): table = self._fixture() self.assert_compile( table.select().alias().select(), "SELECT anon_1.x, lower(anon_1.y) AS y FROM (SELECT " "test_table.x AS x, test_table.y AS y " "FROM test_table) AS anon_1" ) def test_select_from_aliased_join(self): table = self._fixture() s1 = table.select().alias() s2 = table.select().alias() j = s1.join(s2, s1.c.x == s2.c.x) s3 = j.select() self.assert_compile(s3, "SELECT anon_1.x, lower(anon_1.y) AS y, anon_2.x, " "lower(anon_2.y) AS y " "FROM (SELECT test_table.x AS x, test_table.y AS y " "FROM test_table) AS anon_1 JOIN (SELECT " "test_table.x AS x, test_table.y AS y " "FROM test_table) AS anon_2 ON anon_1.x = anon_2.x" ) class RoundTripTestBase(object): def test_round_trip(self): testing.db.execute( self.tables.test_table.insert(), {"x": "X1", "y": "Y1"}, {"x": "X2", "y": "Y2"}, {"x": "X3", "y": "Y3"}, ) # test insert coercion alone eq_( testing.db.execute( "select * from test_table order by y").fetchall(), [ ("X1", "y1"), ("X2", "y2"), ("X3", "y3"), ] ) # conversion back to upper eq_( testing.db.execute( select([self.tables.test_table]).\ order_by(self.tables.test_table.c.y) ).fetchall(), [ ("X1", "Y1"), ("X2", "Y2"), ("X3", "Y3"), ] ) def test_targeting_no_labels(self): testing.db.execute( self.tables.test_table.insert(), {"x": "X1", "y": "Y1"}, ) row = testing.db.execute(select([self.tables.test_table])).first() eq_( row[self.tables.test_table.c.y], "Y1" ) def test_targeting_by_string(self): testing.db.execute( self.tables.test_table.insert(), {"x": "X1", "y": "Y1"}, ) row = testing.db.execute(select([self.tables.test_table])).first() eq_( row["y"], "Y1" ) def test_targeting_apply_labels(self): testing.db.execute( self.tables.test_table.insert(), {"x": "X1", "y": "Y1"}, ) row = testing.db.execute(select([self.tables.test_table]). apply_labels()).first() eq_( row[self.tables.test_table.c.y], "Y1" ) def test_targeting_individual_labels(self): testing.db.execute( self.tables.test_table.insert(), {"x": "X1", "y": "Y1"}, ) row = testing.db.execute(select([ self.tables.test_table.c.x.label('xbar'), self.tables.test_table.c.y.label('ybar') ])).first() eq_( row[self.tables.test_table.c.y], "Y1" ) class StringRoundTripTest(fixtures.TablesTest, RoundTripTestBase): @classmethod def define_tables(cls, metadata): class MyString(String): def bind_expression(self, bindvalue): return func.lower(bindvalue) def column_expression(self, col): return func.upper(col) Table( 'test_table', metadata, Column('x', String(50)), Column('y', MyString(50) ) ) class TypeDecRoundTripTest(fixtures.TablesTest, RoundTripTestBase): @classmethod def define_tables(cls, metadata): class MyString(TypeDecorator): impl = String def bind_expression(self, bindvalue): return func.lower(bindvalue) def column_expression(self, col): return func.upper(col) Table( 'test_table', metadata, Column('x', String(50)), Column('y', MyString(50) ) ) class ReturningTest(fixtures.TablesTest): __requires__ = 'returning', @classmethod def define_tables(cls, metadata): class MyString(String): def column_expression(self, col): return func.lower(col) Table( 'test_table', metadata, Column('x', String(50)), Column('y', MyString(50), server_default="YVALUE") ) @testing.provide_metadata def test_insert_returning(self): table = self.tables.test_table result = testing.db.execute( table.insert().returning(table.c.y), {"x": "xvalue"} ) eq_( result.first(), ("yvalue",) ) SQLAlchemy-0.8.4/test/sql/test_types.py0000644000076500000240000016062212251150016020564 0ustar classicstaff00000000000000# coding: utf-8 from sqlalchemy.testing import eq_, assert_raises, assert_raises_message import decimal import datetime import os from sqlalchemy import * from sqlalchemy import exc, types, util, dialects for name in dialects.__all__: __import__("sqlalchemy.dialects.%s" % name) from sqlalchemy.sql import operators, column, table from sqlalchemy.schema import CheckConstraint, AddConstraint from sqlalchemy.engine import default from sqlalchemy.testing.schema import Table, Column from sqlalchemy import testing from sqlalchemy.testing import AssertsCompiledSQL, AssertsExecutionResults, \ engines, pickleable from sqlalchemy.testing.util import picklers from sqlalchemy.testing.util import round_decimal from sqlalchemy.testing import fixtures class AdaptTest(fixtures.TestBase): def _all_dialect_modules(self): return [ getattr(dialects, d) for d in dialects.__all__ if not d.startswith('_') ] def _all_dialects(self): return [d.base.dialect() for d in self._all_dialect_modules()] def _types_for_mod(self, mod): for key in dir(mod): typ = getattr(mod, key) if not isinstance(typ, type) or not issubclass(typ, types.TypeEngine): continue yield typ def _all_types(self): for typ in self._types_for_mod(types): yield typ for dialect in self._all_dialect_modules(): for typ in self._types_for_mod(dialect): yield typ def test_uppercase_importable(self): import sqlalchemy as sa for typ in self._types_for_mod(types): if typ.__name__ == typ.__name__.upper(): assert getattr(sa, typ.__name__) is typ assert typ.__name__ in types.__all__ def test_uppercase_rendering(self): """Test that uppercase types from types.py always render as their type. As of SQLA 0.6, using an uppercase type means you want specifically that type. If the database in use doesn't support that DDL, it (the DB backend) should raise an error - it means you should be using a lowercased (genericized) type. """ for dialect in self._all_dialects(): for type_, expected in ( (REAL, "REAL"), (FLOAT, "FLOAT"), (NUMERIC, "NUMERIC"), (DECIMAL, "DECIMAL"), (INTEGER, "INTEGER"), (SMALLINT, "SMALLINT"), (TIMESTAMP, ("TIMESTAMP", "TIMESTAMP WITHOUT TIME ZONE")), (DATETIME, "DATETIME"), (DATE, "DATE"), (TIME, ("TIME", "TIME WITHOUT TIME ZONE")), (CLOB, "CLOB"), (VARCHAR(10), ("VARCHAR(10)", "VARCHAR(10 CHAR)")), (NVARCHAR(10), ("NVARCHAR(10)", "NATIONAL VARCHAR(10)", "NVARCHAR2(10)")), (CHAR, "CHAR"), (NCHAR, ("NCHAR", "NATIONAL CHAR")), (BLOB, ("BLOB", "BLOB SUB_TYPE 0")), (BOOLEAN, ("BOOLEAN", "BOOL", "INTEGER")) ): if isinstance(expected, str): expected = (expected, ) try: compiled = types.to_instance(type_).\ compile(dialect=dialect) except NotImplementedError: continue assert compiled in expected, \ "%r matches none of %r for dialect %s" % \ (compiled, expected, dialect.name) assert str(types.to_instance(type_)) in expected, \ "default str() of type %r not expected, %r" % \ (type_, expected) @testing.uses_deprecated() def test_adapt_method(self): """ensure all types have a working adapt() method, which creates a distinct copy. The distinct copy ensures that when we cache the adapted() form of a type against the original in a weak key dictionary, a cycle is not formed. This test doesn't test type-specific arguments of adapt() beyond their defaults. """ for typ in self._all_types(): if typ in (types.TypeDecorator, types.TypeEngine, types.Variant): continue elif typ is dialects.postgresql.ARRAY: t1 = typ(String) else: t1 = typ() for cls in [typ] + typ.__subclasses__(): if not issubclass(typ, types.Enum) and \ issubclass(cls, types.Enum): continue t2 = t1.adapt(cls) assert t1 is not t2 for k in t1.__dict__: if k == 'impl': continue # assert each value was copied, or that # the adapted type has a more specific # value than the original (i.e. SQL Server # applies precision=24 for REAL) assert \ getattr(t2, k) == t1.__dict__[k] or \ t1.__dict__[k] is None def test_python_type(self): eq_(types.Integer().python_type, int) eq_(types.Numeric().python_type, decimal.Decimal) eq_(types.Numeric(asdecimal=False).python_type, float) # Py3K #eq_(types.LargeBinary().python_type, bytes) # Py2K eq_(types.LargeBinary().python_type, str) # end Py2K eq_(types.Float().python_type, float) eq_(types.Interval().python_type, datetime.timedelta) eq_(types.Date().python_type, datetime.date) eq_(types.DateTime().python_type, datetime.datetime) # Py3K #eq_(types.String().python_type, unicode) # Py2K eq_(types.String().python_type, str) # end Py2K eq_(types.Unicode().python_type, unicode) eq_(types.String(convert_unicode=True).python_type, unicode) assert_raises( NotImplementedError, lambda: types.TypeEngine().python_type ) @testing.uses_deprecated() def test_repr(self): for typ in self._all_types(): if typ in (types.TypeDecorator, types.TypeEngine, types.Variant): continue elif typ is dialects.postgresql.ARRAY: t1 = typ(String) else: t1 = typ() repr(t1) def test_plain_init_deprecation_warning(self): for typ in (Integer, Date, SmallInteger): assert_raises_message( exc.SADeprecationWarning, "Passing arguments to type object " "constructor %s is deprecated" % typ, typ, 11 ) class TypeAffinityTest(fixtures.TestBase): def test_type_affinity(self): for type_, affin in [ (String(), String), (VARCHAR(), String), (Date(), Date), (LargeBinary(), types._Binary) ]: eq_(type_._type_affinity, affin) for t1, t2, comp in [ (Integer(), SmallInteger(), True), (Integer(), String(), False), (Integer(), Integer(), True), (Text(), String(), True), (Text(), Unicode(), True), (LargeBinary(), Integer(), False), (LargeBinary(), PickleType(), True), (PickleType(), LargeBinary(), True), (PickleType(), PickleType(), True), ]: eq_(t1._compare_type_affinity(t2), comp, "%s %s" % (t1, t2)) def test_decorator_doesnt_cache(self): from sqlalchemy.dialects import postgresql class MyType(TypeDecorator): impl = CHAR def load_dialect_impl(self, dialect): if dialect.name == 'postgresql': return dialect.type_descriptor(postgresql.UUID()) else: return dialect.type_descriptor(CHAR(32)) t1 = MyType() d = postgresql.dialect() assert t1._type_affinity is String assert t1.dialect_impl(d)._type_affinity is postgresql.UUID class PickleMetadataTest(fixtures.TestBase): def testmeta(self): for loads, dumps in picklers(): column_types = [ Column('Boo', Boolean()), Column('Str', String()), Column('Tex', Text()), Column('Uni', Unicode()), Column('Int', Integer()), Column('Sma', SmallInteger()), Column('Big', BigInteger()), Column('Num', Numeric()), Column('Flo', Float()), Column('Dat', DateTime()), Column('Dat', Date()), Column('Tim', Time()), Column('Lar', LargeBinary()), Column('Pic', PickleType()), Column('Int', Interval()), Column('Enu', Enum('x', 'y', 'z', name="somename")), ] for column_type in column_types: meta = MetaData() Table('foo', meta, column_type) loads(dumps(column_type)) loads(dumps(meta)) class UserDefinedTest(fixtures.TablesTest, AssertsCompiledSQL): """tests user-defined types.""" def test_processing(self): users = self.tables.users users.insert().execute( user_id=2, goofy='jack', goofy2='jack', goofy4=u'jack', goofy7=u'jack', goofy8=12, goofy9=12) users.insert().execute( user_id=3, goofy='lala', goofy2='lala', goofy4=u'lala', goofy7=u'lala', goofy8=15, goofy9=15) users.insert().execute( user_id=4, goofy='fred', goofy2='fred', goofy4=u'fred', goofy7=u'fred', goofy8=9, goofy9=9) l = users.select().order_by(users.c.user_id).execute().fetchall() for assertstr, assertint, assertint2, row in zip( ["BIND_INjackBIND_OUT", "BIND_INlalaBIND_OUT", "BIND_INfredBIND_OUT"], [1200, 1500, 900], [1800, 2250, 1350], l ): for col in list(row)[1:5]: eq_(col, assertstr) eq_(row[5], assertint) eq_(row[6], assertint2) for col in row[3], row[4]: assert isinstance(col, unicode) def test_typedecorator_impl(self): for impl_, exp, kw in [ (Float, "FLOAT", {}), (Float, "FLOAT(2)", {'precision': 2}), (Float(2), "FLOAT(2)", {'precision': 4}), (Numeric(19, 2), "NUMERIC(19, 2)", {}), ]: for dialect_ in (dialects.postgresql, dialects.mssql, dialects.mysql): dialect_ = dialect_.dialect() raw_impl = types.to_instance(impl_, **kw) class MyType(types.TypeDecorator): impl = impl_ dec_type = MyType(**kw) eq_(dec_type.impl.__class__, raw_impl.__class__) raw_dialect_impl = raw_impl.dialect_impl(dialect_) dec_dialect_impl = dec_type.dialect_impl(dialect_) eq_(dec_dialect_impl.__class__, MyType) eq_(raw_dialect_impl.__class__, dec_dialect_impl.impl.__class__) self.assert_compile( MyType(**kw), exp, dialect=dialect_ ) def test_user_defined_typedec_impl(self): class MyType(types.TypeDecorator): impl = Float def load_dialect_impl(self, dialect): if dialect.name == 'sqlite': return String(50) else: return super(MyType, self).load_dialect_impl(dialect) sl = dialects.sqlite.dialect() pg = dialects.postgresql.dialect() t = MyType() self.assert_compile(t, "VARCHAR(50)", dialect=sl) self.assert_compile(t, "FLOAT", dialect=pg) eq_( t.dialect_impl(dialect=sl).impl.__class__, String().dialect_impl(dialect=sl).__class__ ) eq_( t.dialect_impl(dialect=pg).impl.__class__, Float().dialect_impl(pg).__class__ ) def test_type_decorator_repr(self): class MyType(TypeDecorator): impl = VARCHAR eq_(repr(MyType(45)), "MyType(length=45)") def test_user_defined_typedec_impl_bind(self): class TypeOne(types.TypeEngine): def bind_processor(self, dialect): def go(value): return value + " ONE" return go class TypeTwo(types.TypeEngine): def bind_processor(self, dialect): def go(value): return value + " TWO" return go class MyType(types.TypeDecorator): impl = TypeOne def load_dialect_impl(self, dialect): if dialect.name == 'sqlite': return TypeOne() else: return TypeTwo() def process_bind_param(self, value, dialect): return "MYTYPE " + value sl = dialects.sqlite.dialect() pg = dialects.postgresql.dialect() t = MyType() eq_( t._cached_bind_processor(sl)('foo'), "MYTYPE foo ONE" ) eq_( t._cached_bind_processor(pg)('foo'), "MYTYPE foo TWO" ) def test_user_defined_dialect_specific_args(self): class MyType(types.UserDefinedType): def __init__(self, foo='foo', **kwargs): super(MyType, self).__init__() self.foo = foo self.dialect_specific_args = kwargs def adapt(self, cls): return cls(foo=self.foo, **self.dialect_specific_args) t = MyType(bar='bar') a = t.dialect_impl(testing.db.dialect) eq_(a.foo, 'foo') eq_(a.dialect_specific_args['bar'], 'bar') @testing.provide_metadata def test_type_coerce(self): """test ad-hoc usage of custom types with type_coerce().""" metadata = self.metadata class MyType(types.TypeDecorator): impl = String def process_bind_param(self, value, dialect): return value[0:-8] def process_result_value(self, value, dialect): return value + "BIND_OUT" t = Table('t', metadata, Column('data', String(50))) metadata.create_all() t.insert().values(data=type_coerce('d1BIND_OUT', MyType)).execute() eq_( select([type_coerce(t.c.data, MyType)]).execute().fetchall(), [('d1BIND_OUT', )] ) eq_( select([t.c.data, type_coerce(t.c.data, MyType)]).execute().fetchall(), [('d1', 'd1BIND_OUT')] ) eq_( select([t.c.data, type_coerce(t.c.data, MyType)]). alias().select().execute().fetchall(), [('d1', 'd1BIND_OUT')] ) eq_( select([t.c.data, type_coerce(t.c.data, MyType)]).\ where(type_coerce(t.c.data, MyType) == 'd1BIND_OUT').\ execute().fetchall(), [('d1', 'd1BIND_OUT')] ) eq_( select([t.c.data, type_coerce(t.c.data, MyType)]).\ where(t.c.data == type_coerce('d1BIND_OUT', MyType)).\ execute().fetchall(), [('d1', 'd1BIND_OUT')] ) eq_( select([t.c.data, type_coerce(t.c.data, MyType)]).\ where(t.c.data == type_coerce(None, MyType)).\ execute().fetchall(), [] ) eq_( select([t.c.data, type_coerce(t.c.data, MyType)]).\ where(type_coerce(t.c.data, MyType) == None).\ execute().fetchall(), [] ) eq_( testing.db.scalar( select([type_coerce(literal('d1BIND_OUT'), MyType)]) ), 'd1BIND_OUT' ) class MyFoob(object): def __clause_element__(self): return t.c.data eq_( testing.db.execute( select([t.c.data, type_coerce(MyFoob(), MyType)]) ).fetchall(), [('d1', 'd1BIND_OUT')] ) @classmethod def define_tables(cls, metadata): class MyType(types.UserDefinedType): def get_col_spec(self): return "VARCHAR(100)" def bind_processor(self, dialect): def process(value): return "BIND_IN" + value return process def result_processor(self, dialect, coltype): def process(value): return value + "BIND_OUT" return process def adapt(self, typeobj): return typeobj() class MyDecoratedType(types.TypeDecorator): impl = String def bind_processor(self, dialect): impl_processor = super(MyDecoratedType, self).bind_processor(dialect)\ or (lambda value: value) def process(value): return "BIND_IN" + impl_processor(value) return process def result_processor(self, dialect, coltype): impl_processor = super(MyDecoratedType, self).result_processor(dialect, coltype)\ or (lambda value: value) def process(value): return impl_processor(value) + "BIND_OUT" return process def copy(self): return MyDecoratedType() class MyNewUnicodeType(types.TypeDecorator): impl = Unicode def process_bind_param(self, value, dialect): return "BIND_IN" + value def process_result_value(self, value, dialect): return value + "BIND_OUT" def copy(self): return MyNewUnicodeType(self.impl.length) class MyNewIntType(types.TypeDecorator): impl = Integer def process_bind_param(self, value, dialect): return value * 10 def process_result_value(self, value, dialect): return value * 10 def copy(self): return MyNewIntType() class MyNewIntSubClass(MyNewIntType): def process_result_value(self, value, dialect): return value * 15 def copy(self): return MyNewIntSubClass() class MyUnicodeType(types.TypeDecorator): impl = Unicode def bind_processor(self, dialect): impl_processor = super(MyUnicodeType, self).bind_processor(dialect)\ or (lambda value: value) def process(value): return "BIND_IN" + impl_processor(value) return process def result_processor(self, dialect, coltype): impl_processor = super(MyUnicodeType, self).result_processor(dialect, coltype)\ or (lambda value: value) def process(value): return impl_processor(value) + "BIND_OUT" return process def copy(self): return MyUnicodeType(self.impl.length) Table('users', metadata, Column('user_id', Integer, primary_key=True), # totall custom type Column('goofy', MyType, nullable=False), # decorated type with an argument, so its a String Column('goofy2', MyDecoratedType(50), nullable=False), Column('goofy4', MyUnicodeType(50), nullable=False), Column('goofy7', MyNewUnicodeType(50), nullable=False), Column('goofy8', MyNewIntType, nullable=False), Column('goofy9', MyNewIntSubClass, nullable=False), ) class VariantTest(fixtures.TestBase, AssertsCompiledSQL): def setup(self): class UTypeOne(types.UserDefinedType): def get_col_spec(self): return "UTYPEONE" def bind_processor(self, dialect): def process(value): return value + "UONE" return process class UTypeTwo(types.UserDefinedType): def get_col_spec(self): return "UTYPETWO" def bind_processor(self, dialect): def process(value): return value + "UTWO" return process class UTypeThree(types.UserDefinedType): def get_col_spec(self): return "UTYPETHREE" self.UTypeOne = UTypeOne self.UTypeTwo = UTypeTwo self.UTypeThree = UTypeThree self.variant = self.UTypeOne().with_variant( self.UTypeTwo(), 'postgresql') self.composite = self.variant.with_variant( self.UTypeThree(), 'mysql') def test_illegal_dupe(self): v = self.UTypeOne().with_variant( self.UTypeTwo(), 'postgresql' ) assert_raises_message( exc.ArgumentError, "Dialect 'postgresql' is already present " "in the mapping for this Variant", lambda: v.with_variant(self.UTypeThree(), 'postgresql') ) def test_compile(self): self.assert_compile( self.variant, "UTYPEONE", use_default_dialect=True ) self.assert_compile( self.variant, "UTYPEONE", dialect=dialects.mysql.dialect() ) self.assert_compile( self.variant, "UTYPETWO", dialect=dialects.postgresql.dialect() ) def test_compile_composite(self): self.assert_compile( self.composite, "UTYPEONE", use_default_dialect=True ) self.assert_compile( self.composite, "UTYPETHREE", dialect=dialects.mysql.dialect() ) self.assert_compile( self.composite, "UTYPETWO", dialect=dialects.postgresql.dialect() ) def test_bind_process(self): eq_( self.variant._cached_bind_processor( dialects.mysql.dialect())('foo'), 'fooUONE' ) eq_( self.variant._cached_bind_processor( default.DefaultDialect())('foo'), 'fooUONE' ) eq_( self.variant._cached_bind_processor( dialects.postgresql.dialect())('foo'), 'fooUTWO' ) def test_bind_process_composite(self): assert self.composite._cached_bind_processor( dialects.mysql.dialect()) is None eq_( self.composite._cached_bind_processor( default.DefaultDialect())('foo'), 'fooUONE' ) eq_( self.composite._cached_bind_processor( dialects.postgresql.dialect())('foo'), 'fooUTWO' ) class UnicodeTest(fixtures.TestBase): """Exercise the Unicode and related types. Note: unicode round trip tests are now in sqlalchemy/testing/suite/test_types.py. """ def test_native_unicode(self): """assert expected values for 'native unicode' mode""" if (testing.against('mssql+pyodbc') and not testing.db.dialect.freetds) \ or testing.against('mssql+mxodbc'): eq_( testing.db.dialect.returns_unicode_strings, 'conditional' ) elif testing.against('mssql+pymssql'): eq_( testing.db.dialect.returns_unicode_strings, ('charset' in testing.db.url.query) ) elif testing.against('mysql+cymysql', 'mysql+pymssql'): eq_( testing.db.dialect.returns_unicode_strings, True if util.py3k else False ) else: expected = (testing.db.name, testing.db.driver) in \ ( ('postgresql', 'psycopg2'), ('postgresql', 'pypostgresql'), ('postgresql', 'pg8000'), ('postgresql', 'zxjdbc'), ('mysql', 'oursql'), ('mysql', 'zxjdbc'), ('mysql', 'mysqlconnector'), ('sqlite', 'pysqlite'), ('oracle', 'zxjdbc'), ('oracle', 'cx_oracle'), ) eq_( testing.db.dialect.returns_unicode_strings, expected ) data = u"Alors vous imaginez ma surprise, au lever du jour, quand "\ u"une drôle de petite voix m’a réveillé. "\ u"Elle disait: « S’il vous plaît… dessine-moi un mouton! »" def test_unicode_warnings_typelevel_native_unicode(self): unicodedata = self.data u = Unicode() dialect = default.DefaultDialect() dialect.supports_unicode_binds = True uni = u.dialect_impl(dialect).bind_processor(dialect) # Py3K #assert_raises(exc.SAWarning, uni, b'x') #assert isinstance(uni(unicodedata), str) # Py2K assert_raises(exc.SAWarning, uni, 'x') assert isinstance(uni(unicodedata), unicode) # end Py2K def test_unicode_warnings_typelevel_sqla_unicode(self): unicodedata = self.data u = Unicode() dialect = default.DefaultDialect() dialect.supports_unicode_binds = False uni = u.dialect_impl(dialect).bind_processor(dialect) # Py3K #assert_raises(exc.SAWarning, uni, b'x') #assert isinstance(uni(unicodedata), bytes) # Py2K assert_raises(exc.SAWarning, uni, 'x') assert isinstance(uni(unicodedata), str) # end Py2K eq_(uni(unicodedata), unicodedata.encode('utf-8')) def test_unicode_warnings_dialectlevel(self): unicodedata = self.data dialect = default.DefaultDialect(convert_unicode=True) dialect.supports_unicode_binds = False s = String() uni = s.dialect_impl(dialect).bind_processor(dialect) # this is not the unicode type - no warning # Py3K #uni(b'x') #assert isinstance(uni(unicodedata), bytes) # Py2K uni('x') assert isinstance(uni(unicodedata), str) # end Py2K eq_(uni(unicodedata), unicodedata.encode('utf-8')) def test_ignoring_unicode_error(self): """checks String(unicode_error='ignore') is passed to underlying codec.""" unicodedata = self.data type_ = String(248, convert_unicode='force', unicode_error='ignore') dialect = default.DefaultDialect(encoding='ascii') proc = type_.result_processor(dialect, 10) utfdata = unicodedata.encode('utf8') eq_( proc(utfdata), unicodedata.encode('ascii', 'ignore').decode() ) class EnumTest(AssertsCompiledSQL, fixtures.TestBase): @classmethod def setup_class(cls): global enum_table, non_native_enum_table, metadata metadata = MetaData(testing.db) enum_table = Table('enum_table', metadata, Column("id", Integer, primary_key=True), Column('someenum', Enum('one', 'two', 'three', name='myenum')) ) non_native_enum_table = Table('non_native_enum_table', metadata, Column("id", Integer, primary_key=True), Column('someenum', Enum('one', 'two', 'three', native_enum=False)), ) metadata.create_all() def teardown(self): enum_table.delete().execute() non_native_enum_table.delete().execute() @classmethod def teardown_class(cls): metadata.drop_all() @testing.fails_on('postgresql+zxjdbc', 'zxjdbc fails on ENUM: column "XXX" is of type XXX ' 'but expression is of type character varying') @testing.fails_on('postgresql+pg8000', 'zxjdbc fails on ENUM: column "XXX" is of type XXX ' 'but expression is of type text') def test_round_trip(self): enum_table.insert().execute([ {'id': 1, 'someenum': 'two'}, {'id': 2, 'someenum': 'two'}, {'id': 3, 'someenum': 'one'}, ]) eq_( enum_table.select().order_by(enum_table.c.id).execute().fetchall(), [ (1, 'two'), (2, 'two'), (3, 'one'), ] ) def test_non_native_round_trip(self): non_native_enum_table.insert().execute([ {'id': 1, 'someenum': 'two'}, {'id': 2, 'someenum': 'two'}, {'id': 3, 'someenum': 'one'}, ]) eq_( non_native_enum_table.select(). order_by(non_native_enum_table.c.id).execute().fetchall(), [ (1, 'two'), (2, 'two'), (3, 'one'), ] ) def test_adapt(self): from sqlalchemy.dialects.postgresql import ENUM e1 = Enum('one', 'two', 'three', native_enum=False) eq_(e1.adapt(ENUM).native_enum, False) e1 = Enum('one', 'two', 'three', native_enum=True) eq_(e1.adapt(ENUM).native_enum, True) e1 = Enum('one', 'two', 'three', name='foo', schema='bar') eq_(e1.adapt(ENUM).name, 'foo') eq_(e1.adapt(ENUM).schema, 'bar') @testing.crashes('mysql', 'Inconsistent behavior across various OS/drivers' ) def test_constraint(self): assert_raises(exc.DBAPIError, enum_table.insert().execute, {'id': 4, 'someenum': 'four'} ) def test_non_native_constraint_custom_type(self): class Foob(object): def __init__(self, name): self.name = name class MyEnum(types.SchemaType, TypeDecorator): def __init__(self, values): self.impl = Enum( name="myenum", native_enum=False, *[v.name for v in values] ) def _set_table(self, table, column): self.impl._set_table(table, column) # future method def process_literal_param(self, value, dialect): return value.name def process_bind_param(self, value, dialect): return value.name m = MetaData() t1 = Table('t', m, Column('x', MyEnum([Foob('a'), Foob('b')]))) const = [c for c in t1.constraints if isinstance(c, CheckConstraint)][0] self.assert_compile( AddConstraint(const), "ALTER TABLE t ADD CONSTRAINT myenum CHECK (x IN ('a', 'b'))", dialect=default.DefaultDialect() ) @testing.fails_on('mysql', "the CHECK constraint doesn't raise an exception for unknown reason") def test_non_native_constraint(self): assert_raises(exc.DBAPIError, non_native_enum_table.insert().execute, {'id': 4, 'someenum': 'four'} ) def test_mock_engine_no_prob(self): """ensure no 'checkfirst' queries are run when enums are created with checkfirst=False""" e = engines.mock_engine() t = Table('t1', MetaData(), Column('x', Enum("x", "y", name="pge")) ) t.create(e, checkfirst=False) # basically looking for the start of # the constraint, or the ENUM def itself, # depending on backend. assert "('x'," in e.print_sql() class BinaryTest(fixtures.TestBase, AssertsExecutionResults): __excluded_on__ = ( ('mysql', '<', (4, 1, 1)), # screwy varbinary types ) @classmethod def setup_class(cls): global binary_table, MyPickleType, metadata class MyPickleType(types.TypeDecorator): impl = PickleType def process_bind_param(self, value, dialect): if value: value.stuff = 'this is modified stuff' return value def process_result_value(self, value, dialect): if value: value.stuff = 'this is the right stuff' return value metadata = MetaData(testing.db) binary_table = Table('binary_table', metadata, Column('primary_id', Integer, primary_key=True, test_needs_autoincrement=True), Column('data', LargeBinary), Column('data_slice', LargeBinary(100)), Column('misc', String(30)), Column('pickled', PickleType), Column('mypickle', MyPickleType) ) metadata.create_all() @engines.close_first def teardown(self): binary_table.delete().execute() @classmethod def teardown_class(cls): metadata.drop_all() def test_round_trip(self): testobj1 = pickleable.Foo('im foo 1') testobj2 = pickleable.Foo('im foo 2') testobj3 = pickleable.Foo('im foo 3') stream1 = self.load_stream('binary_data_one.dat') stream2 = self.load_stream('binary_data_two.dat') binary_table.insert().execute( primary_id=1, misc='binary_data_one.dat', data=stream1, data_slice=stream1[0:100], pickled=testobj1, mypickle=testobj3) binary_table.insert().execute( primary_id=2, misc='binary_data_two.dat', data=stream2, data_slice=stream2[0:99], pickled=testobj2) binary_table.insert().execute( primary_id=3, misc='binary_data_two.dat', data=None, data_slice=stream2[0:99], pickled=None) for stmt in ( binary_table.select(order_by=binary_table.c.primary_id), text( "select * from binary_table order by binary_table.primary_id", typemap={'pickled': PickleType, 'mypickle': MyPickleType, 'data': LargeBinary, 'data_slice': LargeBinary}, bind=testing.db) ): l = stmt.execute().fetchall() eq_(stream1, l[0]['data']) eq_(stream1[0:100], l[0]['data_slice']) eq_(stream2, l[1]['data']) eq_(testobj1, l[0]['pickled']) eq_(testobj2, l[1]['pickled']) eq_(testobj3.moredata, l[0]['mypickle'].moredata) eq_(l[0]['mypickle'].stuff, 'this is the right stuff') @testing.requires.binary_comparisons def test_comparison(self): """test that type coercion occurs on comparison for binary""" expr = binary_table.c.data == 'foo' assert isinstance(expr.right.type, LargeBinary) data = os.urandom(32) binary_table.insert().execute(data=data) eq_(binary_table.select(). where(binary_table.c.data == data).alias(). count().scalar(), 1) def load_stream(self, name): f = os.path.join(os.path.dirname(__file__), "..", name) return open(f, mode='rb').read() class ExpressionTest(fixtures.TestBase, AssertsExecutionResults, AssertsCompiledSQL): __dialect__ = 'default' @classmethod def setup_class(cls): global test_table, meta, MyCustomType, MyTypeDec class MyCustomType(types.UserDefinedType): def get_col_spec(self): return "INT" def bind_processor(self, dialect): def process(value): return value * 10 return process def result_processor(self, dialect, coltype): def process(value): return value / 10 return process def adapt_operator(self, op): return {operators.add: operators.sub, operators.sub: operators.add}.get(op, op) class MyTypeDec(types.TypeDecorator): impl = String def process_bind_param(self, value, dialect): return "BIND_IN" + str(value) def process_result_value(self, value, dialect): return value + "BIND_OUT" meta = MetaData(testing.db) test_table = Table('test', meta, Column('id', Integer, primary_key=True), Column('data', String(30)), Column('atimestamp', Date), Column('avalue', MyCustomType), Column('bvalue', MyTypeDec(50)), ) meta.create_all() test_table.insert().execute({ 'id': 1, 'data': 'somedata', 'atimestamp': datetime.date(2007, 10, 15), 'avalue': 25, 'bvalue': 'foo'}) @classmethod def teardown_class(cls): meta.drop_all() def test_control(self): assert testing.db.execute("select avalue from test").scalar() == 250 eq_( test_table.select().execute().fetchall(), [(1, 'somedata', datetime.date(2007, 10, 15), 25, 'BIND_INfooBIND_OUT')] ) def test_bind_adapt(self): # test an untyped bind gets the left side's type expr = test_table.c.atimestamp == bindparam("thedate") eq_(expr.right.type._type_affinity, Date) eq_( testing.db.execute( select([test_table.c.id, test_table.c.data, test_table.c.atimestamp]) .where(expr), {"thedate": datetime.date(2007, 10, 15)}).fetchall(), [(1, 'somedata', datetime.date(2007, 10, 15))] ) expr = test_table.c.avalue == bindparam("somevalue") eq_(expr.right.type._type_affinity, MyCustomType) eq_( testing.db.execute(test_table.select().where(expr), {'somevalue': 25}).fetchall(), [(1, 'somedata', datetime.date(2007, 10, 15), 25, 'BIND_INfooBIND_OUT')] ) expr = test_table.c.bvalue == bindparam("somevalue") eq_(expr.right.type._type_affinity, String) eq_( testing.db.execute(test_table.select().where(expr), {"somevalue": "foo"}).fetchall(), [(1, 'somedata', datetime.date(2007, 10, 15), 25, 'BIND_INfooBIND_OUT')] ) def test_literal_adapt(self): # literals get typed based on the types dictionary, unless # compatible with the left side type expr = column('foo', String) == 5 eq_(expr.right.type._type_affinity, Integer) expr = column('foo', String) == "asdf" eq_(expr.right.type._type_affinity, String) expr = column('foo', CHAR) == 5 eq_(expr.right.type._type_affinity, Integer) expr = column('foo', CHAR) == "asdf" eq_(expr.right.type.__class__, CHAR) @testing.uses_deprecated @testing.fails_on('firebird', 'Data type unknown on the parameter') @testing.fails_on('mssql', 'int is unsigned ? not clear') def test_operator_adapt(self): """test type-based overloading of operators""" # test string concatenation expr = test_table.c.data + "somedata" eq_(testing.db.execute(select([expr])).scalar(), "somedatasomedata") expr = test_table.c.id + 15 eq_(testing.db.execute(select([expr])).scalar(), 16) # test custom operator conversion expr = test_table.c.avalue + 40 assert expr.type.__class__ is test_table.c.avalue.type.__class__ # value here is calculated as (250 - 40) / 10 = 21 # because "40" is an integer, not an "avalue" eq_(testing.db.execute(select([expr.label('foo')])).scalar(), 21) expr = test_table.c.avalue + literal(40, type_=MyCustomType) # + operator converted to - # value is calculated as: (250 - (40 * 10)) / 10 == -15 eq_(testing.db.execute(select([expr.label('foo')])).scalar(), -15) # this one relies upon anonymous labeling to assemble result # processing rules on the column. eq_(testing.db.execute(select([expr])).scalar(), -15) def test_typedec_operator_adapt(self): expr = test_table.c.bvalue + "hi" assert expr.type.__class__ is MyTypeDec assert expr.right.type.__class__ is MyTypeDec eq_( testing.db.execute(select([expr.label('foo')])).scalar(), "BIND_INfooBIND_INhiBIND_OUT" ) def test_typedec_is_adapt(self): class CoerceNothing(TypeDecorator): coerce_to_is_types = () impl = Integer class CoerceBool(TypeDecorator): coerce_to_is_types = (bool, ) impl = Boolean class CoerceNone(TypeDecorator): coerce_to_is_types = (type(None),) impl = Integer c1 = column('x', CoerceNothing()) c2 = column('x', CoerceBool()) c3 = column('x', CoerceNone()) self.assert_compile( and_(c1 == None, c2 == None, c3 == None), "x = :x_1 AND x = :x_2 AND x IS NULL" ) self.assert_compile( and_(c1 == True, c2 == True, c3 == True), "x = :x_1 AND x = true AND x = :x_2" ) self.assert_compile( and_(c1 == 3, c2 == 3, c3 == 3), "x = :x_1 AND x = :x_2 AND x = :x_3" ) self.assert_compile( and_(c1.is_(True), c2.is_(True), c3.is_(True)), "x IS :x_1 AND x IS true AND x IS :x_2" ) def test_typedec_righthand_coercion(self): class MyTypeDec(types.TypeDecorator): impl = String def process_bind_param(self, value, dialect): return "BIND_IN" + str(value) def process_result_value(self, value, dialect): return value + "BIND_OUT" tab = table('test', column('bvalue', MyTypeDec)) expr = tab.c.bvalue + 6 self.assert_compile( expr, "test.bvalue || :bvalue_1", use_default_dialect=True ) assert expr.type.__class__ is MyTypeDec eq_( testing.db.execute(select([expr.label('foo')])).scalar(), "BIND_INfooBIND_IN6BIND_OUT" ) def test_bind_typing(self): from sqlalchemy.sql import column class MyFoobarType(types.UserDefinedType): pass class Foo(object): pass # unknown type + integer, right hand bind # coerces to given type expr = column("foo", MyFoobarType) + 5 assert expr.right.type._type_affinity is MyFoobarType # untyped bind - it gets assigned MyFoobarType expr = column("foo", MyFoobarType) + bindparam("foo") assert expr.right.type._type_affinity is MyFoobarType expr = column("foo", MyFoobarType) + bindparam("foo", type_=Integer) assert expr.right.type._type_affinity is types.Integer # unknown type + unknown, right hand bind # coerces to the left expr = column("foo", MyFoobarType) + Foo() assert expr.right.type._type_affinity is MyFoobarType # including for non-commutative ops expr = column("foo", MyFoobarType) - Foo() assert expr.right.type._type_affinity is MyFoobarType expr = column("foo", MyFoobarType) - datetime.date(2010, 8, 25) assert expr.right.type._type_affinity is MyFoobarType def test_date_coercion(self): from sqlalchemy.sql import column expr = column('bar', types.NULLTYPE) - column('foo', types.TIMESTAMP) eq_(expr.type._type_affinity, types.NullType) expr = func.sysdate() - column('foo', types.TIMESTAMP) eq_(expr.type._type_affinity, types.Interval) expr = func.current_date() - column('foo', types.TIMESTAMP) eq_(expr.type._type_affinity, types.Interval) def test_numerics_coercion(self): from sqlalchemy.sql import column import operator for op in ( operator.add, operator.mul, operator.truediv, operator.sub ): for other in (Numeric(10, 2), Integer): expr = op( column('bar', types.Numeric(10, 2)), column('foo', other) ) assert isinstance(expr.type, types.Numeric) expr = op( column('foo', other), column('bar', types.Numeric(10, 2)) ) assert isinstance(expr.type, types.Numeric) def test_null_comparison(self): eq_( str(column('a', types.NullType()) + column('b', types.NullType())), "a + b" ) def test_expression_typing(self): expr = column('bar', Integer) - 3 eq_(expr.type._type_affinity, Integer) expr = bindparam('bar') + bindparam('foo') eq_(expr.type, types.NULLTYPE) def test_distinct(self): s = select([distinct(test_table.c.avalue)]) eq_(testing.db.execute(s).scalar(), 25) s = select([test_table.c.avalue.distinct()]) eq_(testing.db.execute(s).scalar(), 25) assert distinct(test_table.c.data).type == test_table.c.data.type assert test_table.c.data.distinct().type == test_table.c.data.type class CompileTest(fixtures.TestBase, AssertsCompiledSQL): __dialect__ = 'default' @testing.requires.unbounded_varchar def test_string_plain(self): self.assert_compile(String(), "VARCHAR") def test_string_length(self): self.assert_compile(String(50), "VARCHAR(50)") def test_string_collation(self): self.assert_compile(String(50, collation="FOO"), 'VARCHAR(50) COLLATE "FOO"') def test_char_plain(self): self.assert_compile(CHAR(), "CHAR") def test_char_length(self): self.assert_compile(CHAR(50), "CHAR(50)") def test_char_collation(self): self.assert_compile(CHAR(50, collation="FOO"), 'CHAR(50) COLLATE "FOO"') def test_text_plain(self): self.assert_compile(Text(), "TEXT") def test_text_length(self): self.assert_compile(Text(50), "TEXT(50)") def test_text_collation(self): self.assert_compile(Text(collation="FOO"), 'TEXT COLLATE "FOO"') def test_default_compile_pg_inet(self): self.assert_compile(dialects.postgresql.INET(), "INET", allow_dialect_select=True) def test_default_compile_pg_float(self): self.assert_compile(dialects.postgresql.FLOAT(), "FLOAT", allow_dialect_select=True) def test_default_compile_mysql_integer(self): self.assert_compile( dialects.mysql.INTEGER(display_width=5), "INTEGER(5)", allow_dialect_select=True) def test_numeric_plain(self): self.assert_compile(types.NUMERIC(), 'NUMERIC') def test_numeric_precision(self): self.assert_compile(types.NUMERIC(2), 'NUMERIC(2)') def test_numeric_scale(self): self.assert_compile(types.NUMERIC(2, 4), 'NUMERIC(2, 4)') def test_decimal_plain(self): self.assert_compile(types.DECIMAL(), 'DECIMAL') def test_decimal_precision(self): self.assert_compile(types.DECIMAL(2), 'DECIMAL(2)') def test_decimal_scale(self): self.assert_compile(types.DECIMAL(2, 4), 'DECIMAL(2, 4)') class NumericRawSQLTest(fixtures.TestBase): """Test what DBAPIs and dialects return without any typing information supplied at the SQLA level. """ def _fixture(self, metadata, type, data): t = Table('t', metadata, Column("val", type) ) metadata.create_all() t.insert().execute(val=data) @testing.fails_on('sqlite', "Doesn't provide Decimal results natively") @testing.provide_metadata def test_decimal_fp(self): metadata = self.metadata self._fixture(metadata, Numeric(10, 5), decimal.Decimal("45.5")) val = testing.db.execute("select val from t").scalar() assert isinstance(val, decimal.Decimal) eq_(val, decimal.Decimal("45.5")) @testing.fails_on('sqlite', "Doesn't provide Decimal results natively") @testing.provide_metadata def test_decimal_int(self): metadata = self.metadata self._fixture(metadata, Numeric(10, 5), decimal.Decimal("45")) val = testing.db.execute("select val from t").scalar() assert isinstance(val, decimal.Decimal) eq_(val, decimal.Decimal("45")) @testing.provide_metadata def test_ints(self): metadata = self.metadata self._fixture(metadata, Integer, 45) val = testing.db.execute("select val from t").scalar() assert isinstance(val, (int, long)) eq_(val, 45) @testing.provide_metadata def test_float(self): metadata = self.metadata self._fixture(metadata, Float, 46.583) val = testing.db.execute("select val from t").scalar() assert isinstance(val, float) # some DBAPIs have unusual float handling if testing.against('oracle+cx_oracle', 'mysql+oursql', 'firebird'): eq_(round_decimal(val, 3), 46.583) else: eq_(val, 46.583) class IntervalTest(fixtures.TestBase, AssertsExecutionResults): @classmethod def setup_class(cls): global interval_table, metadata metadata = MetaData(testing.db) interval_table = Table("intervaltable", metadata, Column("id", Integer, primary_key=True, test_needs_autoincrement=True), Column("native_interval", Interval()), Column("native_interval_args", Interval(day_precision=3, second_precision=6)), Column("non_native_interval", Interval(native=False)), ) metadata.create_all() @engines.close_first def teardown(self): interval_table.delete().execute() @classmethod def teardown_class(cls): metadata.drop_all() def test_non_native_adapt(self): interval = Interval(native=False) adapted = interval.dialect_impl(testing.db.dialect) assert type(adapted) is Interval assert adapted.native is False eq_(str(adapted), "DATETIME") @testing.fails_on("+pg8000", "Not yet known how to pass values of the INTERVAL type") @testing.fails_on("postgresql+zxjdbc", "Not yet known how to pass values of the INTERVAL type") @testing.fails_on("oracle+zxjdbc", "Not yet known how to pass values of the INTERVAL type") def test_roundtrip(self): small_delta = datetime.timedelta(days=15, seconds=5874) delta = datetime.timedelta(414) interval_table.insert().execute( native_interval=small_delta, native_interval_args=delta, non_native_interval=delta ) row = interval_table.select().execute().first() eq_(row['native_interval'], small_delta) eq_(row['native_interval_args'], delta) eq_(row['non_native_interval'], delta) @testing.fails_on("oracle+zxjdbc", "Not yet known how to pass values of the INTERVAL type") def test_null(self): interval_table.insert().execute(id=1, native_inverval=None, non_native_interval=None) row = interval_table.select().execute().first() eq_(row['native_interval'], None) eq_(row['native_interval_args'], None) eq_(row['non_native_interval'], None) class BooleanTest(fixtures.TestBase, AssertsExecutionResults, AssertsCompiledSQL): @classmethod def setup_class(cls): global bool_table metadata = MetaData(testing.db) bool_table = Table('booltest', metadata, Column('id', Integer, primary_key=True, autoincrement=False), Column('value', Boolean), Column('unconstrained_value', Boolean(create_constraint=False)), ) bool_table.create() @classmethod def teardown_class(cls): bool_table.drop() def teardown(self): bool_table.delete().execute() def test_boolean(self): bool_table.insert().execute(id=1, value=True) bool_table.insert().execute(id=2, value=False) bool_table.insert().execute(id=3, value=True) bool_table.insert().execute(id=4, value=True) bool_table.insert().execute(id=5, value=True) bool_table.insert().execute(id=6, value=None) res = select([bool_table.c.id, bool_table.c.value]).where( bool_table.c.value == True ).order_by(bool_table.c.id).execute().fetchall() eq_(res, [(1, True), (3, True), (4, True), (5, True)]) res2 = select([bool_table.c.id, bool_table.c.value]).where( bool_table.c.value == False).execute().fetchall() eq_(res2, [(2, False)]) res3 = select([bool_table.c.id, bool_table.c.value]).\ order_by(bool_table.c.id).\ execute().fetchall() eq_(res3, [(1, True), (2, False), (3, True), (4, True), (5, True), (6, None)]) # ensure we're getting True/False, not just ints assert res3[0][1] is True assert res3[1][1] is False @testing.fails_on('mysql', "The CHECK clause is parsed but ignored by all storage engines.") @testing.fails_on('mssql', "FIXME: MS-SQL 2005 doesn't honor CHECK ?!?") @testing.skip_if(lambda: testing.db.dialect.supports_native_boolean) def test_constraint(self): assert_raises((exc.IntegrityError, exc.ProgrammingError), testing.db.execute, "insert into booltest (id, value) values(1, 5)") @testing.skip_if(lambda: testing.db.dialect.supports_native_boolean) def test_unconstrained(self): testing.db.execute( "insert into booltest (id, unconstrained_value) values (1, 5)") def test_non_native_constraint_custom_type(self): class Foob(object): def __init__(self, value): self.value = value class MyBool(types.SchemaType, TypeDecorator): impl = Boolean() def _set_table(self, table, column): self.impl._set_table(table, column) # future method def process_literal_param(self, value, dialect): return value.value def process_bind_param(self, value, dialect): return value.value m = MetaData() t1 = Table('t', m, Column('x', MyBool())) const = [c for c in t1.constraints if isinstance(c, CheckConstraint)][0] self.assert_compile( AddConstraint(const), "ALTER TABLE t ADD CHECK (x IN (0, 1))", dialect=dialects.sqlite.dialect() ) class PickleTest(fixtures.TestBase): def test_eq_comparison(self): p1 = PickleType() for obj in ( {'1': '2'}, pickleable.Bar(5, 6), pickleable.OldSchool(10, 11) ): assert p1.compare_values(p1.copy_value(obj), obj) assert_raises(NotImplementedError, p1.compare_values, pickleable.BrokenComparable('foo'), pickleable.BrokenComparable('foo')) def test_nonmutable_comparison(self): p1 = PickleType() for obj in ( {'1': '2'}, pickleable.Bar(5, 6), pickleable.OldSchool(10, 11) ): assert p1.compare_values(p1.copy_value(obj), obj) class CallableTest(fixtures.TestBase): @classmethod def setup_class(cls): global meta meta = MetaData(testing.db) @classmethod def teardown_class(cls): meta.drop_all() def test_callable_as_arg(self): ucode = util.partial(Unicode) thing_table = Table('thing', meta, Column('name', ucode(20)) ) assert isinstance(thing_table.c.name.type, Unicode) thing_table.create() def test_callable_as_kwarg(self): ucode = util.partial(Unicode) thang_table = Table('thang', meta, Column('name', type_=ucode(20), primary_key=True) ) assert isinstance(thang_table.c.name.type, Unicode) thang_table.create() SQLAlchemy-0.8.4/test/sql/test_unicode.py0000644000076500000240000001447212251150016021047 0ustar classicstaff00000000000000# coding: utf-8 """verrrrry basic unicode column name testing""" from sqlalchemy import * from sqlalchemy.testing import fixtures, engines, eq_ from sqlalchemy import testing, util from sqlalchemy.testing.engines import utf8_engine from sqlalchemy.sql import column from sqlalchemy.testing.schema import Table, Column class UnicodeSchemaTest(fixtures.TestBase): __requires__ = ('unicode_ddl',) @classmethod def setup_class(cls): global unicode_bind, metadata, t1, t2, t3 unicode_bind = utf8_engine() metadata = MetaData(unicode_bind) t1 = Table('unitable1', metadata, Column(u'méil', Integer, primary_key=True), Column(u'\u6e2c\u8a66', Integer), test_needs_fk=True, ) t2 = Table(u'Unitéble2', metadata, Column(u'méil', Integer, primary_key=True, key="a"), Column(u'\u6e2c\u8a66', Integer, ForeignKey(u'unitable1.méil'), key="b" ), test_needs_fk=True, ) # Few DBs support Unicode foreign keys if testing.against('sqlite'): t3 = Table(u'\u6e2c\u8a66', metadata, Column(u'\u6e2c\u8a66_id', Integer, primary_key=True, autoincrement=False), Column(u'unitable1_\u6e2c\u8a66', Integer, ForeignKey(u'unitable1.\u6e2c\u8a66') ), Column(u'Unitéble2_b', Integer, ForeignKey(u'Unitéble2.b') ), Column(u'\u6e2c\u8a66_self', Integer, ForeignKey(u'\u6e2c\u8a66.\u6e2c\u8a66_id') ), test_needs_fk=True, ) else: t3 = Table(u'\u6e2c\u8a66', metadata, Column(u'\u6e2c\u8a66_id', Integer, primary_key=True, autoincrement=False), Column(u'unitable1_\u6e2c\u8a66', Integer), Column(u'Unitéble2_b', Integer), Column(u'\u6e2c\u8a66_self', Integer), test_needs_fk=True, ) metadata.create_all() @engines.close_first def teardown(self): if metadata.tables: t3.delete().execute() t2.delete().execute() t1.delete().execute() @classmethod def teardown_class(cls): global unicode_bind metadata.drop_all() del unicode_bind @testing.skip_if(lambda: util.pypy, "pypy/sqlite3 reports unicode cursor.description " "incorrectly pre 2.2, workaround applied in 0.9") def test_insert(self): t1.insert().execute({u'méil':1, u'\u6e2c\u8a66':5}) t2.insert().execute({'a':1, 'b':1}) t3.insert().execute({u'\u6e2c\u8a66_id': 1, u'unitable1_\u6e2c\u8a66': 5, u'Unitéble2_b': 1, u'\u6e2c\u8a66_self': 1}) assert t1.select().execute().fetchall() == [(1, 5)] assert t2.select().execute().fetchall() == [(1, 1)] assert t3.select().execute().fetchall() == [(1, 5, 1, 1)] @testing.skip_if(lambda: util.pypy, "pypy/sqlite3 reports unicode cursor.description " "incorrectly pre 2.2, workaround applied in 0.9") def test_reflect(self): t1.insert().execute({u'méil':2, u'\u6e2c\u8a66':7}) t2.insert().execute({'a':2, 'b':2}) t3.insert().execute({u'\u6e2c\u8a66_id': 2, u'unitable1_\u6e2c\u8a66': 7, u'Unitéble2_b': 2, u'\u6e2c\u8a66_self': 2}) meta = MetaData(unicode_bind) tt1 = Table(t1.name, meta, autoload=True) tt2 = Table(t2.name, meta, autoload=True) tt3 = Table(t3.name, meta, autoload=True) tt1.insert().execute({u'méil':1, u'\u6e2c\u8a66':5}) tt2.insert().execute({u'méil':1, u'\u6e2c\u8a66':1}) tt3.insert().execute({u'\u6e2c\u8a66_id': 1, u'unitable1_\u6e2c\u8a66': 5, u'Unitéble2_b': 1, u'\u6e2c\u8a66_self': 1}) self.assert_(tt1.select(order_by=desc(u'méil')).execute().fetchall() == [(2, 7), (1, 5)]) self.assert_(tt2.select(order_by=desc(u'méil')).execute().fetchall() == [(2, 2), (1, 1)]) self.assert_(tt3.select(order_by=desc(u'\u6e2c\u8a66_id')). execute().fetchall() == [(2, 7, 2, 2), (1, 5, 1, 1)]) meta.drop_all() metadata.create_all() def test_repr(self): m = MetaData() t = Table(u'\u6e2c\u8a66', m, Column(u'\u6e2c\u8a66_id', Integer)) if util.py2k: eq_( repr(t), ( "Table(u'\\u6e2c\\u8a66', MetaData(bind=None), " "Column(u'\\u6e2c\\u8a66_id', Integer(), table=<\\u6e2c\\u8a66>), " "schema=None)")) class EscapesDefaultsTest(fixtures.TestBase): def test_default_exec(self): metadata = MetaData(testing.db) t1 = Table('t1', metadata, Column(u'special_col', Integer, Sequence('special_col'), primary_key=True), Column('data', String(50)) # to appease SQLite without DEFAULT VALUES ) metadata.create_all() try: engine = metadata.bind # reset the identifier preparer, so that we can force it to cache # a unicode identifier engine.dialect.identifier_preparer = engine.dialect.preparer(engine.dialect) select([column(u'special_col')]).select_from(t1).execute().close() assert isinstance(engine.dialect.identifier_preparer.format_sequence(Sequence('special_col')), unicode) # now execute, run the sequence. it should run in u"Special_col.nextid" or similar as # a unicode object; cx_oracle asserts that this is None or a String (postgresql lets it pass thru). # ensure that executioncontext._exec_default() is encoding. t1.insert().execute(data='foo') finally: metadata.drop_all() SQLAlchemy-0.8.4/test/sql/test_update.py0000644000076500000240000004565212251150016020707 0ustar classicstaff00000000000000from sqlalchemy import * from sqlalchemy import testing from sqlalchemy.dialects import mysql from sqlalchemy.engine import default from sqlalchemy.testing import AssertsCompiledSQL, eq_, fixtures from sqlalchemy.testing.schema import Table, Column class _UpdateFromTestBase(object): @classmethod def define_tables(cls, metadata): Table('mytable', metadata, Column('myid', Integer), Column('name', String(30)), Column('description', String(50))) Table('myothertable', metadata, Column('otherid', Integer), Column('othername', String(30))) Table('users', metadata, Column('id', Integer, primary_key=True, test_needs_autoincrement=True), Column('name', String(30), nullable=False)) Table('addresses', metadata, Column('id', Integer, primary_key=True, test_needs_autoincrement=True), Column('user_id', None, ForeignKey('users.id')), Column('name', String(30), nullable=False), Column('email_address', String(50), nullable=False)) Table('dingalings', metadata, Column('id', Integer, primary_key=True, test_needs_autoincrement=True), Column('address_id', None, ForeignKey('addresses.id')), Column('data', String(30))) @classmethod def fixtures(cls): return dict( users=( ('id', 'name'), (7, 'jack'), (8, 'ed'), (9, 'fred'), (10, 'chuck') ), addresses = ( ('id', 'user_id', 'name', 'email_address'), (1, 7, 'x', 'jack@bean.com'), (2, 8, 'x', 'ed@wood.com'), (3, 8, 'x', 'ed@bettyboop.com'), (4, 8, 'x', 'ed@lala.com'), (5, 9, 'x', 'fred@fred.com') ), dingalings = ( ('id', 'address_id', 'data'), (1, 2, 'ding 1/2'), (2, 5, 'ding 2/5') ), ) class UpdateTest(_UpdateFromTestBase, fixtures.TablesTest, AssertsCompiledSQL): __dialect__ = 'default' def test_update_1(self): table1 = self.tables.mytable self.assert_compile( update(table1, table1.c.myid == 7), 'UPDATE mytable SET name=:name WHERE mytable.myid = :myid_1', params={table1.c.name: 'fred'}) def test_update_2(self): table1 = self.tables.mytable self.assert_compile( table1.update(). where(table1.c.myid == 7). values({table1.c.myid: 5}), 'UPDATE mytable SET myid=:myid WHERE mytable.myid = :myid_1', checkparams={'myid': 5, 'myid_1': 7}) def test_update_3(self): table1 = self.tables.mytable self.assert_compile( update(table1, table1.c.myid == 7), 'UPDATE mytable SET name=:name WHERE mytable.myid = :myid_1', params={'name': 'fred'}) def test_update_4(self): table1 = self.tables.mytable self.assert_compile( update(table1, values={table1.c.name: table1.c.myid}), 'UPDATE mytable SET name=mytable.myid') def test_update_5(self): table1 = self.tables.mytable self.assert_compile( update(table1, whereclause=table1.c.name == bindparam('crit'), values={table1.c.name: 'hi'}), 'UPDATE mytable SET name=:name WHERE mytable.name = :crit', params={'crit': 'notthere'}, checkparams={'crit': 'notthere', 'name': 'hi'}) def test_update_6(self): table1 = self.tables.mytable self.assert_compile( update(table1, table1.c.myid == 12, values={table1.c.name: table1.c.myid}), 'UPDATE mytable ' 'SET name=mytable.myid, description=:description ' 'WHERE mytable.myid = :myid_1', params={'description': 'test'}, checkparams={'description': 'test', 'myid_1': 12}) def test_update_7(self): table1 = self.tables.mytable self.assert_compile( update(table1, table1.c.myid == 12, values={table1.c.myid: 9}), 'UPDATE mytable ' 'SET myid=:myid, description=:description ' 'WHERE mytable.myid = :myid_1', params={'myid_1': 12, 'myid': 9, 'description': 'test'}) def test_update_8(self): table1 = self.tables.mytable self.assert_compile( update(table1, table1.c.myid == 12), 'UPDATE mytable SET myid=:myid WHERE mytable.myid = :myid_1', params={'myid': 18}, checkparams={'myid': 18, 'myid_1': 12}) def test_update_9(self): table1 = self.tables.mytable s = table1.update(table1.c.myid == 12, values={table1.c.name: 'lala'}) c = s.compile(column_keys=['id', 'name']) eq_(str(s), str(c)) def test_update_10(self): table1 = self.tables.mytable v1 = {table1.c.name: table1.c.myid} v2 = {table1.c.name: table1.c.name + 'foo'} self.assert_compile( update(table1, table1.c.myid == 12, values=v1).values(v2), 'UPDATE mytable ' 'SET ' 'name=(mytable.name || :name_1), ' 'description=:description ' 'WHERE mytable.myid = :myid_1', params={'description': 'test'}) def test_update_11(self): table1 = self.tables.mytable values = { table1.c.name: table1.c.name + 'lala', table1.c.myid: func.do_stuff(table1.c.myid, literal('hoho')) } self.assert_compile(update(table1, (table1.c.myid == func.hoho(4)) & (table1.c.name == literal('foo') + table1.c.name + literal('lala')), values=values), 'UPDATE mytable ' 'SET ' 'myid=do_stuff(mytable.myid, :param_1), ' 'name=(mytable.name || :name_1) ' 'WHERE ' 'mytable.myid = hoho(:hoho_1) AND ' 'mytable.name = :param_2 || mytable.name || :param_3') def test_prefix_with(self): table1 = self.tables.mytable stmt = table1.update().\ prefix_with('A', 'B', dialect='mysql').\ prefix_with('C', 'D') self.assert_compile(stmt, 'UPDATE C D mytable SET myid=:myid, name=:name, ' 'description=:description') self.assert_compile(stmt, 'UPDATE A B C D mytable SET myid=%s, name=%s, description=%s', dialect=mysql.dialect()) def test_alias(self): table1 = self.tables.mytable talias1 = table1.alias('t1') self.assert_compile(update(talias1, talias1.c.myid == 7), 'UPDATE mytable AS t1 ' 'SET name=:name ' 'WHERE t1.myid = :myid_1', params={table1.c.name: 'fred'}) self.assert_compile(update(talias1, table1.c.myid == 7), 'UPDATE mytable AS t1 ' 'SET name=:name ' 'FROM mytable ' 'WHERE mytable.myid = :myid_1', params={table1.c.name: 'fred'}) def test_update_to_expression(self): """test update from an expression. this logic is triggered currently by a left side that doesn't have a key. The current supported use case is updating the index of a Postgresql ARRAY type. """ table1 = self.tables.mytable expr = func.foo(table1.c.myid) assert not hasattr(expr, 'key') self.assert_compile(table1.update().values({expr: 'bar'}), 'UPDATE mytable SET foo(myid)=:param_1') def test_update_bound_ordering(self): """test that bound parameters between the UPDATE and FROM clauses order correctly in different SQL compilation scenarios. """ table1 = self.tables.mytable table2 = self.tables.myothertable sel = select([table2]).where(table2.c.otherid == 5).alias() upd = table1.update().\ where(table1.c.name == sel.c.othername).\ values(name='foo') dialect = default.DefaultDialect() dialect.positional = True self.assert_compile( upd, "UPDATE mytable SET name=:name FROM (SELECT " "myothertable.otherid AS otherid, " "myothertable.othername AS othername " "FROM myothertable " "WHERE myothertable.otherid = :otherid_1) AS anon_1 " "WHERE mytable.name = anon_1.othername", checkpositional=('foo', 5), dialect=dialect ) self.assert_compile( upd, "UPDATE mytable, (SELECT myothertable.otherid AS otherid, " "myothertable.othername AS othername " "FROM myothertable " "WHERE myothertable.otherid = %s) AS anon_1 SET mytable.name=%s " "WHERE mytable.name = anon_1.othername", checkpositional=(5, 'foo'), dialect=mysql.dialect() ) class UpdateFromCompileTest(_UpdateFromTestBase, fixtures.TablesTest, AssertsCompiledSQL): __dialect__ = 'default' run_create_tables = run_inserts = run_deletes = None def test_render_table(self): users, addresses = self.tables.users, self.tables.addresses self.assert_compile( users.update(). values(name='newname'). where(users.c.id == addresses.c.user_id). where(addresses.c.email_address == 'e1'), 'UPDATE users ' 'SET name=:name FROM addresses ' 'WHERE ' 'users.id = addresses.user_id AND ' 'addresses.email_address = :email_address_1', checkparams={u'email_address_1': 'e1', 'name': 'newname'}) def test_render_multi_table(self): users = self.tables.users addresses = self.tables.addresses dingalings = self.tables.dingalings checkparams = { u'email_address_1': 'e1', u'id_1': 2, 'name': 'newname' } self.assert_compile( users.update(). values(name='newname'). where(users.c.id == addresses.c.user_id). where(addresses.c.email_address == 'e1'). where(addresses.c.id == dingalings.c.address_id). where(dingalings.c.id == 2), 'UPDATE users ' 'SET name=:name ' 'FROM addresses, dingalings ' 'WHERE ' 'users.id = addresses.user_id AND ' 'addresses.email_address = :email_address_1 AND ' 'addresses.id = dingalings.address_id AND ' 'dingalings.id = :id_1', checkparams=checkparams) def test_render_table_mysql(self): users, addresses = self.tables.users, self.tables.addresses self.assert_compile( users.update(). values(name='newname'). where(users.c.id == addresses.c.user_id). where(addresses.c.email_address == 'e1'), 'UPDATE users, addresses ' 'SET users.name=%s ' 'WHERE ' 'users.id = addresses.user_id AND ' 'addresses.email_address = %s', checkparams={u'email_address_1': 'e1', 'name': 'newname'}, dialect=mysql.dialect()) def test_render_subquery(self): users, addresses = self.tables.users, self.tables.addresses checkparams = { u'email_address_1': 'e1', u'id_1': 7, 'name': 'newname' } cols = [ addresses.c.id, addresses.c.user_id, addresses.c.email_address ] subq = select(cols).where(addresses.c.id == 7).alias() self.assert_compile( users.update(). values(name='newname'). where(users.c.id == subq.c.user_id). where(subq.c.email_address == 'e1'), 'UPDATE users ' 'SET name=:name FROM (' 'SELECT ' 'addresses.id AS id, ' 'addresses.user_id AS user_id, ' 'addresses.email_address AS email_address ' 'FROM addresses ' 'WHERE addresses.id = :id_1' ') AS anon_1 ' 'WHERE users.id = anon_1.user_id ' 'AND anon_1.email_address = :email_address_1', checkparams=checkparams) class UpdateFromRoundTripTest(_UpdateFromTestBase, fixtures.TablesTest): @testing.requires.update_from def test_exec_two_table(self): users, addresses = self.tables.users, self.tables.addresses testing.db.execute( addresses.update(). values(email_address=users.c.name). where(users.c.id == addresses.c.user_id). where(users.c.name == 'ed')) expected = [ (1, 7, 'x', 'jack@bean.com'), (2, 8, 'x', 'ed'), (3, 8, 'x', 'ed'), (4, 8, 'x', 'ed'), (5, 9, 'x', 'fred@fred.com')] self._assert_addresses(addresses, expected) @testing.requires.update_from def test_exec_two_table_plus_alias(self): users, addresses = self.tables.users, self.tables.addresses a1 = addresses.alias() testing.db.execute( addresses.update(). values(email_address=users.c.name). where(users.c.id == a1.c.user_id). where(users.c.name == 'ed'). where(a1.c.id == addresses.c.id) ) expected = [ (1, 7, 'x', 'jack@bean.com'), (2, 8, 'x', 'ed'), (3, 8, 'x', 'ed'), (4, 8, 'x', 'ed'), (5, 9, 'x', 'fred@fred.com')] self._assert_addresses(addresses, expected) @testing.requires.update_from def test_exec_three_table(self): users = self.tables.users addresses = self.tables.addresses dingalings = self.tables.dingalings testing.db.execute( addresses.update(). values(email_address=users.c.name). where(users.c.id == addresses.c.user_id). where(users.c.name == 'ed'). where(addresses.c.id == dingalings.c.address_id). where(dingalings.c.id == 1)) expected = [ (1, 7, 'x', 'jack@bean.com'), (2, 8, 'x', 'ed'), (3, 8, 'x', 'ed@bettyboop.com'), (4, 8, 'x', 'ed@lala.com'), (5, 9, 'x', 'fred@fred.com')] self._assert_addresses(addresses, expected) @testing.only_on('mysql', 'Multi table update') def test_exec_multitable(self): users, addresses = self.tables.users, self.tables.addresses values = { addresses.c.email_address: users.c.name, users.c.name: 'ed2' } testing.db.execute( addresses.update(). values(values). where(users.c.id == addresses.c.user_id). where(users.c.name == 'ed')) expected = [ (1, 7, 'x', 'jack@bean.com'), (2, 8, 'x', 'ed'), (3, 8, 'x', 'ed'), (4, 8, 'x', 'ed'), (5, 9, 'x', 'fred@fred.com')] self._assert_addresses(addresses, expected) expected = [ (7, 'jack'), (8, 'ed2'), (9, 'fred'), (10, 'chuck')] self._assert_users(users, expected) def _assert_addresses(self, addresses, expected): stmt = addresses.select().order_by(addresses.c.id) eq_(testing.db.execute(stmt).fetchall(), expected) def _assert_users(self, users, expected): stmt = users.select().order_by(users.c.id) eq_(testing.db.execute(stmt).fetchall(), expected) class UpdateFromMultiTableUpdateDefaultsTest(_UpdateFromTestBase, fixtures.TablesTest): @classmethod def define_tables(cls, metadata): Table('users', metadata, Column('id', Integer, primary_key=True, test_needs_autoincrement=True), Column('name', String(30), nullable=False), Column('some_update', String(30), onupdate='im the update')) Table('addresses', metadata, Column('id', Integer, primary_key=True, test_needs_autoincrement=True), Column('user_id', None, ForeignKey('users.id')), Column('email_address', String(50), nullable=False)) @classmethod def fixtures(cls): return dict( users=( ('id', 'name', 'some_update'), (8, 'ed', 'value'), (9, 'fred', 'value'), ), addresses=( ('id', 'user_id', 'email_address'), (2, 8, 'ed@wood.com'), (3, 8, 'ed@bettyboop.com'), (4, 9, 'fred@fred.com') ), ) @testing.only_on('mysql', 'Multi table update') def test_defaults_second_table(self): users, addresses = self.tables.users, self.tables.addresses values = { addresses.c.email_address: users.c.name, users.c.name: 'ed2' } ret = testing.db.execute( addresses.update(). values(values). where(users.c.id == addresses.c.user_id). where(users.c.name == 'ed')) eq_(set(ret.prefetch_cols()), set([users.c.some_update])) expected = [ (2, 8, 'ed'), (3, 8, 'ed'), (4, 9, 'fred@fred.com')] self._assert_addresses(addresses, expected) expected = [ (8, 'ed2', 'im the update'), (9, 'fred', 'value')] self._assert_users(users, expected) @testing.only_on('mysql', 'Multi table update') def test_no_defaults_second_table(self): users, addresses = self.tables.users, self.tables.addresses ret = testing.db.execute( addresses.update(). values({'email_address': users.c.name}). where(users.c.id == addresses.c.user_id). where(users.c.name == 'ed')) eq_(ret.prefetch_cols(), []) expected = [ (2, 8, 'ed'), (3, 8, 'ed'), (4, 9, 'fred@fred.com')] self._assert_addresses(addresses, expected) # users table not actually updated, so no onupdate expected = [ (8, 'ed', 'value'), (9, 'fred', 'value')] self._assert_users(users, expected) def _assert_addresses(self, addresses, expected): stmt = addresses.select().order_by(addresses.c.id) eq_(testing.db.execute(stmt).fetchall(), expected) def _assert_users(self, users, expected): stmt = users.select().order_by(users.c.id) eq_(testing.db.execute(stmt).fetchall(), expected)